1 /* $NetBSD: i915_perf.c,v 1.7 2021/12/19 12:32:15 riastradh Exp $ */ 2 3 /* 4 * Copyright 2015-2016 Intel Corporation 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice (including the next 14 * paragraph) shall be included in all copies or substantial portions of the 15 * Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 23 * IN THE SOFTWARE. 24 * 25 * Authors: 26 * Robert Bragg <robert (at) sixbynine.org> 27 */ 28 29 30 /** 31 * DOC: i915 Perf Overview 32 * 33 * Gen graphics supports a large number of performance counters that can help 34 * driver and application developers understand and optimize their use of the 35 * GPU. 36 * 37 * This i915 perf interface enables userspace to configure and open a file 38 * descriptor representing a stream of GPU metrics which can then be read() as 39 * a stream of sample records. 40 * 41 * The interface is particularly suited to exposing buffered metrics that are 42 * captured by DMA from the GPU, unsynchronized with and unrelated to the CPU. 43 * 44 * Streams representing a single context are accessible to applications with a 45 * corresponding drm file descriptor, such that OpenGL can use the interface 46 * without special privileges. Access to system-wide metrics requires root 47 * privileges by default, unless changed via the dev.i915.perf_event_paranoid 48 * sysctl option. 49 * 50 */ 51 52 /** 53 * DOC: i915 Perf History and Comparison with Core Perf 54 * 55 * The interface was initially inspired by the core Perf infrastructure but 56 * some notable differences are: 57 * 58 * i915 perf file descriptors represent a "stream" instead of an "event"; where 59 * a perf event primarily corresponds to a single 64bit value, while a stream 60 * might sample sets of tightly-coupled counters, depending on the 61 * configuration. For example the Gen OA unit isn't designed to support 62 * orthogonal configurations of individual counters; it's configured for a set 63 * of related counters. Samples for an i915 perf stream capturing OA metrics 64 * will include a set of counter values packed in a compact HW specific format. 65 * The OA unit supports a number of different packing formats which can be 66 * selected by the user opening the stream. Perf has support for grouping 67 * events, but each event in the group is configured, validated and 68 * authenticated individually with separate system calls. 69 * 70 * i915 perf stream configurations are provided as an array of u64 (key,value) 71 * pairs, instead of a fixed struct with multiple miscellaneous config members, 72 * interleaved with event-type specific members. 73 * 74 * i915 perf doesn't support exposing metrics via an mmap'd circular buffer. 75 * The supported metrics are being written to memory by the GPU unsynchronized 76 * with the CPU, using HW specific packing formats for counter sets. Sometimes 77 * the constraints on HW configuration require reports to be filtered before it 78 * would be acceptable to expose them to unprivileged applications - to hide 79 * the metrics of other processes/contexts. For these use cases a read() based 80 * interface is a good fit, and provides an opportunity to filter data as it 81 * gets copied from the GPU mapped buffers to userspace buffers. 82 * 83 * 84 * Issues hit with first prototype based on Core Perf 85 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 86 * 87 * The first prototype of this driver was based on the core perf 88 * infrastructure, and while we did make that mostly work, with some changes to 89 * perf, we found we were breaking or working around too many assumptions baked 90 * into perf's currently cpu centric design. 91 * 92 * In the end we didn't see a clear benefit to making perf's implementation and 93 * interface more complex by changing design assumptions while we knew we still 94 * wouldn't be able to use any existing perf based userspace tools. 95 * 96 * Also considering the Gen specific nature of the Observability hardware and 97 * how userspace will sometimes need to combine i915 perf OA metrics with 98 * side-band OA data captured via MI_REPORT_PERF_COUNT commands; we're 99 * expecting the interface to be used by a platform specific userspace such as 100 * OpenGL or tools. This is to say; we aren't inherently missing out on having 101 * a standard vendor/architecture agnostic interface by not using perf. 102 * 103 * 104 * For posterity, in case we might re-visit trying to adapt core perf to be 105 * better suited to exposing i915 metrics these were the main pain points we 106 * hit: 107 * 108 * - The perf based OA PMU driver broke some significant design assumptions: 109 * 110 * Existing perf pmus are used for profiling work on a cpu and we were 111 * introducing the idea of _IS_DEVICE pmus with different security 112 * implications, the need to fake cpu-related data (such as user/kernel 113 * registers) to fit with perf's current design, and adding _DEVICE records 114 * as a way to forward device-specific status records. 115 * 116 * The OA unit writes reports of counters into a circular buffer, without 117 * involvement from the CPU, making our PMU driver the first of a kind. 118 * 119 * Given the way we were periodically forward data from the GPU-mapped, OA 120 * buffer to perf's buffer, those bursts of sample writes looked to perf like 121 * we were sampling too fast and so we had to subvert its throttling checks. 122 * 123 * Perf supports groups of counters and allows those to be read via 124 * transactions internally but transactions currently seem designed to be 125 * explicitly initiated from the cpu (say in response to a userspace read()) 126 * and while we could pull a report out of the OA buffer we can't 127 * trigger a report from the cpu on demand. 128 * 129 * Related to being report based; the OA counters are configured in HW as a 130 * set while perf generally expects counter configurations to be orthogonal. 131 * Although counters can be associated with a group leader as they are 132 * opened, there's no clear precedent for being able to provide group-wide 133 * configuration attributes (for example we want to let userspace choose the 134 * OA unit report format used to capture all counters in a set, or specify a 135 * GPU context to filter metrics on). We avoided using perf's grouping 136 * feature and forwarded OA reports to userspace via perf's 'raw' sample 137 * field. This suited our userspace well considering how coupled the counters 138 * are when dealing with normalizing. It would be inconvenient to split 139 * counters up into separate events, only to require userspace to recombine 140 * them. For Mesa it's also convenient to be forwarded raw, periodic reports 141 * for combining with the side-band raw reports it captures using 142 * MI_REPORT_PERF_COUNT commands. 143 * 144 * - As a side note on perf's grouping feature; there was also some concern 145 * that using PERF_FORMAT_GROUP as a way to pack together counter values 146 * would quite drastically inflate our sample sizes, which would likely 147 * lower the effective sampling resolutions we could use when the available 148 * memory bandwidth is limited. 149 * 150 * With the OA unit's report formats, counters are packed together as 32 151 * or 40bit values, with the largest report size being 256 bytes. 152 * 153 * PERF_FORMAT_GROUP values are 64bit, but there doesn't appear to be a 154 * documented ordering to the values, implying PERF_FORMAT_ID must also be 155 * used to add a 64bit ID before each value; giving 16 bytes per counter. 156 * 157 * Related to counter orthogonality; we can't time share the OA unit, while 158 * event scheduling is a central design idea within perf for allowing 159 * userspace to open + enable more events than can be configured in HW at any 160 * one time. The OA unit is not designed to allow re-configuration while in 161 * use. We can't reconfigure the OA unit without losing internal OA unit 162 * state which we can't access explicitly to save and restore. Reconfiguring 163 * the OA unit is also relatively slow, involving ~100 register writes. From 164 * userspace Mesa also depends on a stable OA configuration when emitting 165 * MI_REPORT_PERF_COUNT commands and importantly the OA unit can't be 166 * disabled while there are outstanding MI_RPC commands lest we hang the 167 * command streamer. 168 * 169 * The contents of sample records aren't extensible by device drivers (i.e. 170 * the sample_type bits). As an example; Sourab Gupta had been looking to 171 * attach GPU timestamps to our OA samples. We were shoehorning OA reports 172 * into sample records by using the 'raw' field, but it's tricky to pack more 173 * than one thing into this field because events/core.c currently only lets a 174 * pmu give a single raw data pointer plus len which will be copied into the 175 * ring buffer. To include more than the OA report we'd have to copy the 176 * report into an intermediate larger buffer. I'd been considering allowing a 177 * vector of data+len values to be specified for copying the raw data, but 178 * it felt like a kludge to being using the raw field for this purpose. 179 * 180 * - It felt like our perf based PMU was making some technical compromises 181 * just for the sake of using perf: 182 * 183 * perf_event_open() requires events to either relate to a pid or a specific 184 * cpu core, while our device pmu related to neither. Events opened with a 185 * pid will be automatically enabled/disabled according to the scheduling of 186 * that process - so not appropriate for us. When an event is related to a 187 * cpu id, perf ensures pmu methods will be invoked via an inter process 188 * interrupt on that core. To avoid invasive changes our userspace opened OA 189 * perf events for a specific cpu. This was workable but it meant the 190 * majority of the OA driver ran in atomic context, including all OA report 191 * forwarding, which wasn't really necessary in our case and seems to make 192 * our locking requirements somewhat complex as we handled the interaction 193 * with the rest of the i915 driver. 194 */ 195 196 #include <sys/cdefs.h> 197 __KERNEL_RCSID(0, "$NetBSD: i915_perf.c,v 1.7 2021/12/19 12:32:15 riastradh Exp $"); 198 199 #include <linux/anon_inodes.h> 200 #include <linux/sizes.h> 201 #include <linux/uuid.h> 202 203 #include "gem/i915_gem_context.h" 204 #include "gt/intel_engine_pm.h" 205 #include "gt/intel_engine_user.h" 206 #include "gt/intel_gt.h" 207 #include "gt/intel_lrc_reg.h" 208 #include "gt/intel_ring.h" 209 210 #include "i915_drv.h" 211 #include "i915_perf.h" 212 #include "oa/i915_oa_hsw.h" 213 #include "oa/i915_oa_bdw.h" 214 #include "oa/i915_oa_chv.h" 215 #include "oa/i915_oa_sklgt2.h" 216 #include "oa/i915_oa_sklgt3.h" 217 #include "oa/i915_oa_sklgt4.h" 218 #include "oa/i915_oa_bxt.h" 219 #include "oa/i915_oa_kblgt2.h" 220 #include "oa/i915_oa_kblgt3.h" 221 #include "oa/i915_oa_glk.h" 222 #include "oa/i915_oa_cflgt2.h" 223 #include "oa/i915_oa_cflgt3.h" 224 #include "oa/i915_oa_cnl.h" 225 #include "oa/i915_oa_icl.h" 226 #include "oa/i915_oa_tgl.h" 227 228 #ifdef __NetBSD__ 229 #include <sys/filedesc.h> 230 #include <sys/poll.h> 231 #include <sys/select.h> 232 #include <linux/nbsd-namespace.h> 233 #endif 234 235 /* HW requires this to be a power of two, between 128k and 16M, though driver 236 * is currently generally designed assuming the largest 16M size is used such 237 * that the overflow cases are unlikely in normal operation. 238 */ 239 #define OA_BUFFER_SIZE SZ_16M 240 241 #define OA_TAKEN(tail, head) ((tail - head) & (OA_BUFFER_SIZE - 1)) 242 243 /** 244 * DOC: OA Tail Pointer Race 245 * 246 * There's a HW race condition between OA unit tail pointer register updates and 247 * writes to memory whereby the tail pointer can sometimes get ahead of what's 248 * been written out to the OA buffer so far (in terms of what's visible to the 249 * CPU). 250 * 251 * Although this can be observed explicitly while copying reports to userspace 252 * by checking for a zeroed report-id field in tail reports, we want to account 253 * for this earlier, as part of the oa_buffer_check to avoid lots of redundant 254 * read() attempts. 255 * 256 * In effect we define a tail pointer for reading that lags the real tail 257 * pointer by at least %OA_TAIL_MARGIN_NSEC nanoseconds, which gives enough 258 * time for the corresponding reports to become visible to the CPU. 259 * 260 * To manage this we actually track two tail pointers: 261 * 1) An 'aging' tail with an associated timestamp that is tracked until we 262 * can trust the corresponding data is visible to the CPU; at which point 263 * it is considered 'aged'. 264 * 2) An 'aged' tail that can be used for read()ing. 265 * 266 * The two separate pointers let us decouple read()s from tail pointer aging. 267 * 268 * The tail pointers are checked and updated at a limited rate within a hrtimer 269 * callback (the same callback that is used for delivering EPOLLIN events) 270 * 271 * Initially the tails are marked invalid with %INVALID_TAIL_PTR which 272 * indicates that an updated tail pointer is needed. 273 * 274 * Most of the implementation details for this workaround are in 275 * oa_buffer_check_unlocked() and _append_oa_reports() 276 * 277 * Note for posterity: previously the driver used to define an effective tail 278 * pointer that lagged the real pointer by a 'tail margin' measured in bytes 279 * derived from %OA_TAIL_MARGIN_NSEC and the configured sampling frequency. 280 * This was flawed considering that the OA unit may also automatically generate 281 * non-periodic reports (such as on context switch) or the OA unit may be 282 * enabled without any periodic sampling. 283 */ 284 #define OA_TAIL_MARGIN_NSEC 100000ULL 285 #define INVALID_TAIL_PTR 0xffffffff 286 287 /* frequency for checking whether the OA unit has written new reports to the 288 * circular OA buffer... 289 */ 290 #define POLL_FREQUENCY 200 291 #define POLL_PERIOD (NSEC_PER_SEC / POLL_FREQUENCY) 292 293 /* for sysctl proc_dointvec_minmax of dev.i915.perf_stream_paranoid */ 294 static u32 i915_perf_stream_paranoid = true; 295 296 /* The maximum exponent the hardware accepts is 63 (essentially it selects one 297 * of the 64bit timestamp bits to trigger reports from) but there's currently 298 * no known use case for sampling as infrequently as once per 47 thousand years. 299 * 300 * Since the timestamps included in OA reports are only 32bits it seems 301 * reasonable to limit the OA exponent where it's still possible to account for 302 * overflow in OA report timestamps. 303 */ 304 #define OA_EXPONENT_MAX 31 305 306 #define INVALID_CTX_ID 0xffffffff 307 308 /* On Gen8+ automatically triggered OA reports include a 'reason' field... */ 309 #define OAREPORT_REASON_MASK 0x3f 310 #define OAREPORT_REASON_MASK_EXTENDED 0x7f 311 #define OAREPORT_REASON_SHIFT 19 312 #define OAREPORT_REASON_TIMER (1<<0) 313 #define OAREPORT_REASON_CTX_SWITCH (1<<3) 314 #define OAREPORT_REASON_CLK_RATIO (1<<5) 315 316 317 /* For sysctl proc_dointvec_minmax of i915_oa_max_sample_rate 318 * 319 * The highest sampling frequency we can theoretically program the OA unit 320 * with is always half the timestamp frequency: E.g. 6.25Mhz for Haswell. 321 * 322 * Initialized just before we register the sysctl parameter. 323 */ 324 static int oa_sample_rate_hard_limit; 325 326 /* Theoretically we can program the OA unit to sample every 160ns but don't 327 * allow that by default unless root... 328 * 329 * The default threshold of 100000Hz is based on perf's similar 330 * kernel.perf_event_max_sample_rate sysctl parameter. 331 */ 332 static u32 i915_oa_max_sample_rate = 100000; 333 334 /* XXX: beware if future OA HW adds new report formats that the current 335 * code assumes all reports have a power-of-two size and ~(size - 1) can 336 * be used as a mask to align the OA tail pointer. 337 */ 338 static const struct i915_oa_format hsw_oa_formats[I915_OA_FORMAT_MAX] = { 339 [I915_OA_FORMAT_A13] = { 0, 64 }, 340 [I915_OA_FORMAT_A29] = { 1, 128 }, 341 [I915_OA_FORMAT_A13_B8_C8] = { 2, 128 }, 342 /* A29_B8_C8 Disallowed as 192 bytes doesn't factor into buffer size */ 343 [I915_OA_FORMAT_B4_C8] = { 4, 64 }, 344 [I915_OA_FORMAT_A45_B8_C8] = { 5, 256 }, 345 [I915_OA_FORMAT_B4_C8_A16] = { 6, 128 }, 346 [I915_OA_FORMAT_C4_B8] = { 7, 64 }, 347 }; 348 349 static const struct i915_oa_format gen8_plus_oa_formats[I915_OA_FORMAT_MAX] = { 350 [I915_OA_FORMAT_A12] = { 0, 64 }, 351 [I915_OA_FORMAT_A12_B8_C8] = { 2, 128 }, 352 [I915_OA_FORMAT_A32u40_A4u32_B8_C8] = { 5, 256 }, 353 [I915_OA_FORMAT_C4_B8] = { 7, 64 }, 354 }; 355 356 static const struct i915_oa_format gen12_oa_formats[I915_OA_FORMAT_MAX] = { 357 [I915_OA_FORMAT_A32u40_A4u32_B8_C8] = { 5, 256 }, 358 }; 359 360 #define SAMPLE_OA_REPORT (1<<0) 361 362 /** 363 * struct perf_open_properties - for validated properties given to open a stream 364 * @sample_flags: `DRM_I915_PERF_PROP_SAMPLE_*` properties are tracked as flags 365 * @single_context: Whether a single or all gpu contexts should be monitored 366 * @hold_preemption: Whether the preemption is disabled for the filtered 367 * context 368 * @ctx_handle: A gem ctx handle for use with @single_context 369 * @metrics_set: An ID for an OA unit metric set advertised via sysfs 370 * @oa_format: An OA unit HW report format 371 * @oa_periodic: Whether to enable periodic OA unit sampling 372 * @oa_period_exponent: The OA unit sampling period is derived from this 373 * @engine: The engine (typically rcs0) being monitored by the OA unit 374 * 375 * As read_properties_unlocked() enumerates and validates the properties given 376 * to open a stream of metrics the configuration is built up in the structure 377 * which starts out zero initialized. 378 */ 379 struct perf_open_properties { 380 u32 sample_flags; 381 382 u64 single_context:1; 383 u64 hold_preemption:1; 384 u64 ctx_handle; 385 386 /* OA sampling state */ 387 int metrics_set; 388 int oa_format; 389 bool oa_periodic; 390 int oa_period_exponent; 391 392 struct intel_engine_cs *engine; 393 }; 394 395 struct i915_oa_config_bo { 396 struct llist_node node; 397 398 struct i915_oa_config *oa_config; 399 struct i915_vma *vma; 400 }; 401 402 #ifndef __NetBSD__ /* XXX i915 perf sysctl */ 403 static struct ctl_table_header *sysctl_header; 404 #endif 405 406 static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer); 407 408 void i915_oa_config_release(struct kref *ref) 409 { 410 struct i915_oa_config *oa_config = 411 container_of(ref, typeof(*oa_config), ref); 412 413 kfree(__UNCONST(oa_config->flex_regs)); 414 kfree(__UNCONST(oa_config->b_counter_regs)); 415 kfree(__UNCONST(oa_config->mux_regs)); 416 417 kfree_rcu(oa_config, rcu); 418 } 419 420 struct i915_oa_config * 421 i915_perf_get_oa_config(struct i915_perf *perf, int metrics_set) 422 { 423 struct i915_oa_config *oa_config; 424 425 rcu_read_lock(); 426 if (metrics_set == 1) 427 oa_config = &perf->test_config; 428 else 429 oa_config = idr_find(&perf->metrics_idr, metrics_set); 430 if (oa_config) 431 oa_config = i915_oa_config_get(oa_config); 432 rcu_read_unlock(); 433 434 return oa_config; 435 } 436 437 static void free_oa_config_bo(struct i915_oa_config_bo *oa_bo) 438 { 439 i915_oa_config_put(oa_bo->oa_config); 440 i915_vma_put(oa_bo->vma); 441 kfree(oa_bo); 442 } 443 444 static u32 gen12_oa_hw_tail_read(struct i915_perf_stream *stream) 445 { 446 struct intel_uncore *uncore = stream->uncore; 447 448 return intel_uncore_read(uncore, GEN12_OAG_OATAILPTR) & 449 GEN12_OAG_OATAILPTR_MASK; 450 } 451 452 static u32 gen8_oa_hw_tail_read(struct i915_perf_stream *stream) 453 { 454 struct intel_uncore *uncore = stream->uncore; 455 456 return intel_uncore_read(uncore, GEN8_OATAILPTR) & GEN8_OATAILPTR_MASK; 457 } 458 459 static u32 gen7_oa_hw_tail_read(struct i915_perf_stream *stream) 460 { 461 struct intel_uncore *uncore = stream->uncore; 462 u32 oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1); 463 464 return oastatus1 & GEN7_OASTATUS1_TAIL_MASK; 465 } 466 467 /** 468 * oa_buffer_check - check for data and update tail ptr state 469 * @stream: i915 stream instance 470 * 471 * This is either called via fops (for blocking reads in user ctx) or the poll 472 * check hrtimer (atomic ctx) to check the OA buffer tail pointer and check 473 * if there is data available for userspace to read. 474 * 475 * This function is central to providing a workaround for the OA unit tail 476 * pointer having a race with respect to what data is visible to the CPU. 477 * It is responsible for reading tail pointers from the hardware and giving 478 * the pointers time to 'age' before they are made available for reading. 479 * (See description of OA_TAIL_MARGIN_NSEC above for further details.) 480 * 481 * Besides returning true when there is data available to read() this function 482 * also has the side effect of updating the oa_buffer.tails[], .aging_timestamp 483 * and .aged_tail_idx state used for reading. 484 * 485 * Note: It's safe to read OA config state here unlocked, assuming that this is 486 * only called while the stream is enabled, while the global OA configuration 487 * can't be modified. 488 * 489 * Returns: %true if the OA buffer contains data, else %false 490 */ 491 static bool oa_buffer_check(struct i915_perf_stream *stream) 492 { 493 int report_size = stream->oa_buffer.format_size; 494 unsigned int aged_idx; 495 u32 head, hw_tail, aged_tail, aging_tail; 496 u64 now; 497 498 /* We have to consider the (unlikely) possibility that read() errors 499 * could result in an OA buffer reset which might reset the head, 500 * tails[] and aged_tail state. 501 */ 502 503 /* NB: The head we observe here might effectively be a little out of 504 * date (between head and tails[aged_idx].offset if there is currently 505 * a read() in progress. 506 */ 507 head = stream->oa_buffer.head; 508 509 aged_idx = stream->oa_buffer.aged_tail_idx; 510 aged_tail = stream->oa_buffer.tails[aged_idx].offset; 511 aging_tail = stream->oa_buffer.tails[!aged_idx].offset; 512 513 hw_tail = stream->perf->ops.oa_hw_tail_read(stream); 514 515 /* The tail pointer increases in 64 byte increments, 516 * not in report_size steps... 517 */ 518 hw_tail &= ~(report_size - 1); 519 520 now = ktime_get_mono_fast_ns(); 521 522 /* Update the aged tail 523 * 524 * Flip the tail pointer available for read()s once the aging tail is 525 * old enough to trust that the corresponding data will be visible to 526 * the CPU... 527 * 528 * Do this before updating the aging pointer in case we may be able to 529 * immediately start aging a new pointer too (if new data has become 530 * available) without needing to wait for a later hrtimer callback. 531 */ 532 if (aging_tail != INVALID_TAIL_PTR && 533 ((now - stream->oa_buffer.aging_timestamp) > 534 OA_TAIL_MARGIN_NSEC)) { 535 536 aged_idx ^= 1; 537 stream->oa_buffer.aged_tail_idx = aged_idx; 538 539 aged_tail = aging_tail; 540 541 /* Mark that we need a new pointer to start aging... */ 542 stream->oa_buffer.tails[!aged_idx].offset = INVALID_TAIL_PTR; 543 aging_tail = INVALID_TAIL_PTR; 544 } 545 546 /* Update the aging tail 547 * 548 * We throttle aging tail updates until we have a new tail that 549 * represents >= one report more data than is already available for 550 * reading. This ensures there will be enough data for a successful 551 * read once this new pointer has aged and ensures we will give the new 552 * pointer time to age. 553 */ 554 if (aging_tail == INVALID_TAIL_PTR && 555 (aged_tail == INVALID_TAIL_PTR || 556 OA_TAKEN(hw_tail, aged_tail) >= report_size)) { 557 struct i915_vma *vma = stream->oa_buffer.vma; 558 u32 gtt_offset = i915_ggtt_offset(vma); 559 560 /* Be paranoid and do a bounds check on the pointer read back 561 * from hardware, just in case some spurious hardware condition 562 * could put the tail out of bounds... 563 */ 564 if (hw_tail >= gtt_offset && 565 hw_tail < (gtt_offset + OA_BUFFER_SIZE)) { 566 stream->oa_buffer.tails[!aged_idx].offset = 567 aging_tail = hw_tail; 568 stream->oa_buffer.aging_timestamp = now; 569 } else { 570 DRM_ERROR("Ignoring spurious out of range OA buffer tail pointer = %x\n", 571 hw_tail); 572 } 573 } 574 575 return aged_tail == INVALID_TAIL_PTR ? 576 false : OA_TAKEN(aged_tail, head) >= report_size; 577 } 578 579 /** 580 * append_oa_status - Appends a status record to a userspace read() buffer. 581 * @stream: An i915-perf stream opened for OA metrics 582 * @buf: destination buffer given by userspace 583 * @count: the number of bytes userspace wants to read 584 * @offset: (inout): the current position for writing into @buf 585 * @type: The kind of status to report to userspace 586 * 587 * Writes a status record (such as `DRM_I915_PERF_RECORD_OA_REPORT_LOST`) 588 * into the userspace read() buffer. 589 * 590 * The @buf @offset will only be updated on success. 591 * 592 * Returns: 0 on success, negative error code on failure. 593 */ 594 static int append_oa_status(struct i915_perf_stream *stream, 595 #ifdef __NetBSD__ 596 struct uio *buf, 597 kauth_cred_t count, /* XXX dummy */ 598 int offset, /* XXX dummy */ 599 #else 600 char __user *buf, 601 size_t count, 602 size_t *offset, 603 #endif 604 enum drm_i915_perf_record_type type) 605 { 606 struct drm_i915_perf_record_header header = { type, 0, sizeof(header) }; 607 608 #ifdef __NetBSD__ 609 /* XXX errno NetBSD->Linux */ 610 return -uiomove(&header, sizeof(header), buf); 611 #else 612 if ((count - *offset) < header.size) 613 return -ENOSPC; 614 615 if (copy_to_user(buf + *offset, &header, sizeof(header))) 616 return -EFAULT; 617 618 (*offset) += header.size; 619 620 return 0; 621 #endif 622 } 623 624 /** 625 * append_oa_sample - Copies single OA report into userspace read() buffer. 626 * @stream: An i915-perf stream opened for OA metrics 627 * @buf: destination buffer given by userspace 628 * @count: the number of bytes userspace wants to read 629 * @offset: (inout): the current position for writing into @buf 630 * @report: A single OA report to (optionally) include as part of the sample 631 * 632 * The contents of a sample are configured through `DRM_I915_PERF_PROP_SAMPLE_*` 633 * properties when opening a stream, tracked as `stream->sample_flags`. This 634 * function copies the requested components of a single sample to the given 635 * read() @buf. 636 * 637 * The @buf @offset will only be updated on success. 638 * 639 * Returns: 0 on success, negative error code on failure. 640 */ 641 static int append_oa_sample(struct i915_perf_stream *stream, 642 #ifdef __NetBSD__ 643 struct uio *buf, 644 kauth_cred_t count, /* XXX dummy */ 645 int offset, /* XXX dummy */ 646 #else 647 char __user *buf, 648 size_t count, 649 size_t *offset, 650 #endif 651 const u8 *report) 652 { 653 int report_size = stream->oa_buffer.format_size; 654 struct drm_i915_perf_record_header header; 655 u32 sample_flags = stream->sample_flags; 656 657 header.type = DRM_I915_PERF_RECORD_SAMPLE; 658 header.pad = 0; 659 header.size = stream->sample_size; 660 661 #ifdef __NetBSD__ 662 /* XXX errno NetBSD->Linux */ 663 int ret = -uiomove(&header, sizeof(header), buf); 664 if (ret) 665 return ret; 666 #else 667 if ((count - *offset) < header.size) 668 return -ENOSPC; 669 670 buf += *offset; 671 if (copy_to_user(buf, &header, sizeof(header))) 672 return -EFAULT; 673 buf += sizeof(header); 674 #endif 675 676 if (sample_flags & SAMPLE_OA_REPORT) { 677 #ifdef __NetBSD__ 678 ret = -uiomove(__UNCONST(report), report_size, buf); 679 if (ret) 680 return ret; 681 #else 682 if (copy_to_user(buf, report, report_size)) 683 return -EFAULT; 684 #endif 685 } 686 687 #ifndef __NetBSD__ /* done by uiomove */ 688 (*offset) += header.size; 689 #endif 690 691 return 0; 692 } 693 694 /** 695 * Copies all buffered OA reports into userspace read() buffer. 696 * @stream: An i915-perf stream opened for OA metrics 697 * @buf: destination buffer given by userspace 698 * @count: the number of bytes userspace wants to read 699 * @offset: (inout): the current position for writing into @buf 700 * 701 * Notably any error condition resulting in a short read (-%ENOSPC or 702 * -%EFAULT) will be returned even though one or more records may 703 * have been successfully copied. In this case it's up to the caller 704 * to decide if the error should be squashed before returning to 705 * userspace. 706 * 707 * Note: reports are consumed from the head, and appended to the 708 * tail, so the tail chases the head?... If you think that's mad 709 * and back-to-front you're not alone, but this follows the 710 * Gen PRM naming convention. 711 * 712 * Returns: 0 on success, negative error code on failure. 713 */ 714 #ifdef __NetBSD__ 715 static int gen8_append_oa_reports(struct i915_perf_stream *stream, 716 struct uio *buf, 717 kauth_cred_t count, /* XXX dummy */ 718 int offset) /* XXX dummy */ 719 #else 720 static int gen8_append_oa_reports(struct i915_perf_stream *stream, 721 char __user *buf, 722 size_t count, 723 size_t *offset) 724 #endif 725 { 726 struct intel_uncore *uncore = stream->uncore; 727 int report_size = stream->oa_buffer.format_size; 728 u8 *oa_buf_base = stream->oa_buffer.vaddr; 729 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma); 730 u32 mask = (OA_BUFFER_SIZE - 1); 731 #ifdef __NetBSD__ 732 size_t start_offset = buf->uio_offset; 733 #else 734 size_t start_offset = *offset; 735 #endif 736 unsigned long flags; 737 unsigned int aged_tail_idx; 738 u32 head, tail; 739 u32 taken; 740 int ret = 0; 741 742 if (WARN_ON(!stream->enabled)) 743 return -EIO; 744 745 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); 746 747 head = stream->oa_buffer.head; 748 aged_tail_idx = stream->oa_buffer.aged_tail_idx; 749 tail = stream->oa_buffer.tails[aged_tail_idx].offset; 750 751 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); 752 753 /* 754 * An invalid tail pointer here means we're still waiting for the poll 755 * hrtimer callback to give us a pointer 756 */ 757 if (tail == INVALID_TAIL_PTR) 758 return -EAGAIN; 759 760 /* 761 * NB: oa_buffer.head/tail include the gtt_offset which we don't want 762 * while indexing relative to oa_buf_base. 763 */ 764 head -= gtt_offset; 765 tail -= gtt_offset; 766 767 /* 768 * An out of bounds or misaligned head or tail pointer implies a driver 769 * bug since we validate + align the tail pointers we read from the 770 * hardware and we are in full control of the head pointer which should 771 * only be incremented by multiples of the report size (notably also 772 * all a power of two). 773 */ 774 if (WARN_ONCE(head > OA_BUFFER_SIZE || head % report_size || 775 tail > OA_BUFFER_SIZE || tail % report_size, 776 "Inconsistent OA buffer pointers: head = %u, tail = %u\n", 777 head, tail)) 778 return -EIO; 779 780 781 for (/* none */; 782 (taken = OA_TAKEN(tail, head)); 783 head = (head + report_size) & mask) { 784 u8 *report = oa_buf_base + head; 785 u32 *report32 = (void *)report; 786 u32 ctx_id; 787 u32 reason; 788 789 /* 790 * All the report sizes factor neatly into the buffer 791 * size so we never expect to see a report split 792 * between the beginning and end of the buffer. 793 * 794 * Given the initial alignment check a misalignment 795 * here would imply a driver bug that would result 796 * in an overrun. 797 */ 798 if (WARN_ON((OA_BUFFER_SIZE - head) < report_size)) { 799 DRM_ERROR("Spurious OA head ptr: non-integral report offset\n"); 800 break; 801 } 802 803 /* 804 * The reason field includes flags identifying what 805 * triggered this specific report (mostly timer 806 * triggered or e.g. due to a context switch). 807 * 808 * This field is never expected to be zero so we can 809 * check that the report isn't invalid before copying 810 * it to userspace... 811 */ 812 reason = ((report32[0] >> OAREPORT_REASON_SHIFT) & 813 (IS_GEN(stream->perf->i915, 12) ? 814 OAREPORT_REASON_MASK_EXTENDED : 815 OAREPORT_REASON_MASK)); 816 if (reason == 0) { 817 if (__ratelimit(&stream->perf->spurious_report_rs)) 818 DRM_NOTE("Skipping spurious, invalid OA report\n"); 819 continue; 820 } 821 822 ctx_id = report32[2] & stream->specific_ctx_id_mask; 823 824 /* 825 * Squash whatever is in the CTX_ID field if it's marked as 826 * invalid to be sure we avoid false-positive, single-context 827 * filtering below... 828 * 829 * Note: that we don't clear the valid_ctx_bit so userspace can 830 * understand that the ID has been squashed by the kernel. 831 */ 832 if (!(report32[0] & stream->perf->gen8_valid_ctx_bit) && 833 INTEL_GEN(stream->perf->i915) <= 11) 834 ctx_id = report32[2] = INVALID_CTX_ID; 835 836 /* 837 * NB: For Gen 8 the OA unit no longer supports clock gating 838 * off for a specific context and the kernel can't securely 839 * stop the counters from updating as system-wide / global 840 * values. 841 * 842 * Automatic reports now include a context ID so reports can be 843 * filtered on the cpu but it's not worth trying to 844 * automatically subtract/hide counter progress for other 845 * contexts while filtering since we can't stop userspace 846 * issuing MI_REPORT_PERF_COUNT commands which would still 847 * provide a side-band view of the real values. 848 * 849 * To allow userspace (such as Mesa/GL_INTEL_performance_query) 850 * to normalize counters for a single filtered context then it 851 * needs be forwarded bookend context-switch reports so that it 852 * can track switches in between MI_REPORT_PERF_COUNT commands 853 * and can itself subtract/ignore the progress of counters 854 * associated with other contexts. Note that the hardware 855 * automatically triggers reports when switching to a new 856 * context which are tagged with the ID of the newly active 857 * context. To avoid the complexity (and likely fragility) of 858 * reading ahead while parsing reports to try and minimize 859 * forwarding redundant context switch reports (i.e. between 860 * other, unrelated contexts) we simply elect to forward them 861 * all. 862 * 863 * We don't rely solely on the reason field to identify context 864 * switches since it's not-uncommon for periodic samples to 865 * identify a switch before any 'context switch' report. 866 */ 867 if (!stream->perf->exclusive_stream->ctx || 868 stream->specific_ctx_id == ctx_id || 869 stream->oa_buffer.last_ctx_id == stream->specific_ctx_id || 870 reason & OAREPORT_REASON_CTX_SWITCH) { 871 872 /* 873 * While filtering for a single context we avoid 874 * leaking the IDs of other contexts. 875 */ 876 if (stream->perf->exclusive_stream->ctx && 877 stream->specific_ctx_id != ctx_id) { 878 report32[2] = INVALID_CTX_ID; 879 } 880 881 ret = append_oa_sample(stream, buf, count, offset, 882 report); 883 if (ret) 884 break; 885 886 stream->oa_buffer.last_ctx_id = ctx_id; 887 } 888 889 /* 890 * The above reason field sanity check is based on 891 * the assumption that the OA buffer is initially 892 * zeroed and we reset the field after copying so the 893 * check is still meaningful once old reports start 894 * being overwritten. 895 */ 896 report32[0] = 0; 897 } 898 899 #ifdef __NetBSD__ 900 if (start_offset != buf->uio_offset) 901 #else 902 if (start_offset != *offset) 903 #endif 904 { 905 i915_reg_t oaheadptr; 906 907 oaheadptr = IS_GEN(stream->perf->i915, 12) ? 908 GEN12_OAG_OAHEADPTR : GEN8_OAHEADPTR; 909 910 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); 911 912 /* 913 * We removed the gtt_offset for the copy loop above, indexing 914 * relative to oa_buf_base so put back here... 915 */ 916 head += gtt_offset; 917 intel_uncore_write(uncore, oaheadptr, 918 head & GEN12_OAG_OAHEADPTR_MASK); 919 stream->oa_buffer.head = head; 920 921 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); 922 } 923 924 return ret; 925 } 926 927 /** 928 * gen8_oa_read - copy status records then buffered OA reports 929 * @stream: An i915-perf stream opened for OA metrics 930 * @buf: destination buffer given by userspace 931 * @count: the number of bytes userspace wants to read 932 * @offset: (inout): the current position for writing into @buf 933 * 934 * Checks OA unit status registers and if necessary appends corresponding 935 * status records for userspace (such as for a buffer full condition) and then 936 * initiate appending any buffered OA reports. 937 * 938 * Updates @offset according to the number of bytes successfully copied into 939 * the userspace buffer. 940 * 941 * NB: some data may be successfully copied to the userspace buffer 942 * even if an error is returned, and this is reflected in the 943 * updated @offset. 944 * 945 * Returns: zero on success or a negative error code 946 */ 947 #ifdef __NetBSD__ 948 static int gen8_oa_read(struct i915_perf_stream *stream, 949 struct uio *buf, 950 kauth_cred_t count, /* XXX dummy */ 951 int offset) /* XXX dummy */ 952 #else 953 static int gen8_oa_read(struct i915_perf_stream *stream, 954 char __user *buf, 955 size_t count, 956 size_t *offset) 957 #endif 958 { 959 struct intel_uncore *uncore = stream->uncore; 960 u32 oastatus; 961 i915_reg_t oastatus_reg; 962 int ret; 963 964 if (WARN_ON(!stream->oa_buffer.vaddr)) 965 return -EIO; 966 967 oastatus_reg = IS_GEN(stream->perf->i915, 12) ? 968 GEN12_OAG_OASTATUS : GEN8_OASTATUS; 969 970 oastatus = intel_uncore_read(uncore, oastatus_reg); 971 972 /* 973 * We treat OABUFFER_OVERFLOW as a significant error: 974 * 975 * Although theoretically we could handle this more gracefully 976 * sometimes, some Gens don't correctly suppress certain 977 * automatically triggered reports in this condition and so we 978 * have to assume that old reports are now being trampled 979 * over. 980 * 981 * Considering how we don't currently give userspace control 982 * over the OA buffer size and always configure a large 16MB 983 * buffer, then a buffer overflow does anyway likely indicate 984 * that something has gone quite badly wrong. 985 */ 986 if (oastatus & GEN8_OASTATUS_OABUFFER_OVERFLOW) { 987 ret = append_oa_status(stream, buf, count, offset, 988 DRM_I915_PERF_RECORD_OA_BUFFER_LOST); 989 if (ret) 990 return ret; 991 992 DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n", 993 stream->period_exponent); 994 995 stream->perf->ops.oa_disable(stream); 996 stream->perf->ops.oa_enable(stream); 997 998 /* 999 * Note: .oa_enable() is expected to re-init the oabuffer and 1000 * reset GEN8_OASTATUS for us 1001 */ 1002 oastatus = intel_uncore_read(uncore, oastatus_reg); 1003 } 1004 1005 if (oastatus & GEN8_OASTATUS_REPORT_LOST) { 1006 ret = append_oa_status(stream, buf, count, offset, 1007 DRM_I915_PERF_RECORD_OA_REPORT_LOST); 1008 if (ret) 1009 return ret; 1010 intel_uncore_write(uncore, oastatus_reg, 1011 oastatus & ~GEN8_OASTATUS_REPORT_LOST); 1012 } 1013 1014 return gen8_append_oa_reports(stream, buf, count, offset); 1015 } 1016 1017 /** 1018 * Copies all buffered OA reports into userspace read() buffer. 1019 * @stream: An i915-perf stream opened for OA metrics 1020 * @buf: destination buffer given by userspace 1021 * @count: the number of bytes userspace wants to read 1022 * @offset: (inout): the current position for writing into @buf 1023 * 1024 * Notably any error condition resulting in a short read (-%ENOSPC or 1025 * -%EFAULT) will be returned even though one or more records may 1026 * have been successfully copied. In this case it's up to the caller 1027 * to decide if the error should be squashed before returning to 1028 * userspace. 1029 * 1030 * Note: reports are consumed from the head, and appended to the 1031 * tail, so the tail chases the head?... If you think that's mad 1032 * and back-to-front you're not alone, but this follows the 1033 * Gen PRM naming convention. 1034 * 1035 * Returns: 0 on success, negative error code on failure. 1036 */ 1037 #ifdef __NetBSD__ 1038 static int gen7_append_oa_reports(struct i915_perf_stream *stream, 1039 struct uio *buf, 1040 kauth_cred_t count, /* XXX dummy */ 1041 int offset) /* XXX dummy */ 1042 #else 1043 static int gen7_append_oa_reports(struct i915_perf_stream *stream, 1044 char __user *buf, 1045 size_t count, 1046 size_t *offset) 1047 #endif 1048 { 1049 struct intel_uncore *uncore = stream->uncore; 1050 int report_size = stream->oa_buffer.format_size; 1051 u8 *oa_buf_base = stream->oa_buffer.vaddr; 1052 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma); 1053 u32 mask = (OA_BUFFER_SIZE - 1); 1054 #ifdef __NetBSD__ 1055 size_t start_offset = buf->uio_offset; 1056 #else 1057 size_t start_offset = *offset; 1058 #endif 1059 unsigned long flags; 1060 unsigned int aged_tail_idx; 1061 u32 head, tail; 1062 u32 taken; 1063 int ret = 0; 1064 1065 if (WARN_ON(!stream->enabled)) 1066 return -EIO; 1067 1068 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); 1069 1070 head = stream->oa_buffer.head; 1071 aged_tail_idx = stream->oa_buffer.aged_tail_idx; 1072 tail = stream->oa_buffer.tails[aged_tail_idx].offset; 1073 1074 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); 1075 1076 /* An invalid tail pointer here means we're still waiting for the poll 1077 * hrtimer callback to give us a pointer 1078 */ 1079 if (tail == INVALID_TAIL_PTR) 1080 return -EAGAIN; 1081 1082 /* NB: oa_buffer.head/tail include the gtt_offset which we don't want 1083 * while indexing relative to oa_buf_base. 1084 */ 1085 head -= gtt_offset; 1086 tail -= gtt_offset; 1087 1088 /* An out of bounds or misaligned head or tail pointer implies a driver 1089 * bug since we validate + align the tail pointers we read from the 1090 * hardware and we are in full control of the head pointer which should 1091 * only be incremented by multiples of the report size (notably also 1092 * all a power of two). 1093 */ 1094 if (WARN_ONCE(head > OA_BUFFER_SIZE || head % report_size || 1095 tail > OA_BUFFER_SIZE || tail % report_size, 1096 "Inconsistent OA buffer pointers: head = %u, tail = %u\n", 1097 head, tail)) 1098 return -EIO; 1099 1100 1101 for (/* none */; 1102 (taken = OA_TAKEN(tail, head)); 1103 head = (head + report_size) & mask) { 1104 u8 *report = oa_buf_base + head; 1105 u32 *report32 = (void *)report; 1106 1107 /* All the report sizes factor neatly into the buffer 1108 * size so we never expect to see a report split 1109 * between the beginning and end of the buffer. 1110 * 1111 * Given the initial alignment check a misalignment 1112 * here would imply a driver bug that would result 1113 * in an overrun. 1114 */ 1115 if (WARN_ON((OA_BUFFER_SIZE - head) < report_size)) { 1116 DRM_ERROR("Spurious OA head ptr: non-integral report offset\n"); 1117 break; 1118 } 1119 1120 /* The report-ID field for periodic samples includes 1121 * some undocumented flags related to what triggered 1122 * the report and is never expected to be zero so we 1123 * can check that the report isn't invalid before 1124 * copying it to userspace... 1125 */ 1126 if (report32[0] == 0) { 1127 if (__ratelimit(&stream->perf->spurious_report_rs)) 1128 DRM_NOTE("Skipping spurious, invalid OA report\n"); 1129 continue; 1130 } 1131 1132 ret = append_oa_sample(stream, buf, count, offset, report); 1133 if (ret) 1134 break; 1135 1136 /* The above report-id field sanity check is based on 1137 * the assumption that the OA buffer is initially 1138 * zeroed and we reset the field after copying so the 1139 * check is still meaningful once old reports start 1140 * being overwritten. 1141 */ 1142 report32[0] = 0; 1143 } 1144 1145 #ifdef __NetBSD__ 1146 if (start_offset != buf->uio_offset) 1147 #else 1148 if (start_offset != *offset) 1149 #endif 1150 { 1151 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); 1152 1153 /* We removed the gtt_offset for the copy loop above, indexing 1154 * relative to oa_buf_base so put back here... 1155 */ 1156 head += gtt_offset; 1157 1158 intel_uncore_write(uncore, GEN7_OASTATUS2, 1159 (head & GEN7_OASTATUS2_HEAD_MASK) | 1160 GEN7_OASTATUS2_MEM_SELECT_GGTT); 1161 stream->oa_buffer.head = head; 1162 1163 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); 1164 } 1165 1166 return ret; 1167 } 1168 1169 /** 1170 * gen7_oa_read - copy status records then buffered OA reports 1171 * @stream: An i915-perf stream opened for OA metrics 1172 * @buf: destination buffer given by userspace 1173 * @count: the number of bytes userspace wants to read 1174 * @offset: (inout): the current position for writing into @buf 1175 * 1176 * Checks Gen 7 specific OA unit status registers and if necessary appends 1177 * corresponding status records for userspace (such as for a buffer full 1178 * condition) and then initiate appending any buffered OA reports. 1179 * 1180 * Updates @offset according to the number of bytes successfully copied into 1181 * the userspace buffer. 1182 * 1183 * Returns: zero on success or a negative error code 1184 */ 1185 #ifdef __NetBSD__ 1186 static int gen7_oa_read(struct i915_perf_stream *stream, 1187 struct uio *buf, 1188 kauth_cred_t count, /* XXX dummy */ 1189 int offset) /* XXX dummy */ 1190 #else 1191 static int gen7_oa_read(struct i915_perf_stream *stream, 1192 char __user *buf, 1193 size_t count, 1194 size_t *offset) 1195 #endif 1196 { 1197 struct intel_uncore *uncore = stream->uncore; 1198 u32 oastatus1; 1199 int ret; 1200 1201 if (WARN_ON(!stream->oa_buffer.vaddr)) 1202 return -EIO; 1203 1204 oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1); 1205 1206 /* XXX: On Haswell we don't have a safe way to clear oastatus1 1207 * bits while the OA unit is enabled (while the tail pointer 1208 * may be updated asynchronously) so we ignore status bits 1209 * that have already been reported to userspace. 1210 */ 1211 oastatus1 &= ~stream->perf->gen7_latched_oastatus1; 1212 1213 /* We treat OABUFFER_OVERFLOW as a significant error: 1214 * 1215 * - The status can be interpreted to mean that the buffer is 1216 * currently full (with a higher precedence than OA_TAKEN() 1217 * which will start to report a near-empty buffer after an 1218 * overflow) but it's awkward that we can't clear the status 1219 * on Haswell, so without a reset we won't be able to catch 1220 * the state again. 1221 * 1222 * - Since it also implies the HW has started overwriting old 1223 * reports it may also affect our sanity checks for invalid 1224 * reports when copying to userspace that assume new reports 1225 * are being written to cleared memory. 1226 * 1227 * - In the future we may want to introduce a flight recorder 1228 * mode where the driver will automatically maintain a safe 1229 * guard band between head/tail, avoiding this overflow 1230 * condition, but we avoid the added driver complexity for 1231 * now. 1232 */ 1233 if (unlikely(oastatus1 & GEN7_OASTATUS1_OABUFFER_OVERFLOW)) { 1234 ret = append_oa_status(stream, buf, count, offset, 1235 DRM_I915_PERF_RECORD_OA_BUFFER_LOST); 1236 if (ret) 1237 return ret; 1238 1239 DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n", 1240 stream->period_exponent); 1241 1242 stream->perf->ops.oa_disable(stream); 1243 stream->perf->ops.oa_enable(stream); 1244 1245 oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1); 1246 } 1247 1248 if (unlikely(oastatus1 & GEN7_OASTATUS1_REPORT_LOST)) { 1249 ret = append_oa_status(stream, buf, count, offset, 1250 DRM_I915_PERF_RECORD_OA_REPORT_LOST); 1251 if (ret) 1252 return ret; 1253 stream->perf->gen7_latched_oastatus1 |= 1254 GEN7_OASTATUS1_REPORT_LOST; 1255 } 1256 1257 return gen7_append_oa_reports(stream, buf, count, offset); 1258 } 1259 1260 /** 1261 * i915_oa_wait_unlocked - handles blocking IO until OA data available 1262 * @stream: An i915-perf stream opened for OA metrics 1263 * 1264 * Called when userspace tries to read() from a blocking stream FD opened 1265 * for OA metrics. It waits until the hrtimer callback finds a non-empty 1266 * OA buffer and wakes us. 1267 * 1268 * Note: it's acceptable to have this return with some false positives 1269 * since any subsequent read handling will return -EAGAIN if there isn't 1270 * really data ready for userspace yet. 1271 * 1272 * Returns: zero on success or a negative error code 1273 */ 1274 static int i915_oa_wait_unlocked(struct i915_perf_stream *stream) 1275 { 1276 unsigned long flags; 1277 int ret; 1278 1279 /* We would wait indefinitely if periodic sampling is not enabled */ 1280 if (!stream->periodic) 1281 return -EIO; 1282 1283 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); 1284 DRM_SPIN_WAIT_UNTIL(ret, &stream->poll_wq, &stream->oa_buffer.ptr_lock, 1285 oa_buffer_check(stream)); 1286 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); 1287 1288 return ret; 1289 } 1290 1291 /** 1292 * i915_oa_poll_wait - call poll_wait() for an OA stream poll() 1293 * @stream: An i915-perf stream opened for OA metrics 1294 * @file: An i915 perf stream file 1295 * @wait: poll() state table 1296 * 1297 * For handling userspace polling on an i915 perf stream opened for OA metrics, 1298 * this starts a poll_wait with the wait queue that our hrtimer callback wakes 1299 * when it sees data ready to read in the circular OA buffer. 1300 */ 1301 #ifndef __NetBSD__ 1302 static void i915_oa_poll_wait(struct i915_perf_stream *stream, 1303 struct file *file, 1304 poll_table *wait) 1305 { 1306 poll_wait(file, &stream->poll_wq, wait); 1307 } 1308 #endif 1309 1310 /** 1311 * i915_oa_read - just calls through to &i915_oa_ops->read 1312 * @stream: An i915-perf stream opened for OA metrics 1313 * @buf: destination buffer given by userspace 1314 * @count: the number of bytes userspace wants to read 1315 * @offset: (inout): the current position for writing into @buf 1316 * 1317 * Updates @offset according to the number of bytes successfully copied into 1318 * the userspace buffer. 1319 * 1320 * Returns: zero on success or a negative error code 1321 */ 1322 #ifdef __NetBSD__ 1323 static int i915_oa_read(struct i915_perf_stream *stream, 1324 struct uio *buf, 1325 kauth_cred_t count, /* XXX dummy */ 1326 int offset) /* XXX dummy */ 1327 #else 1328 static int i915_oa_read(struct i915_perf_stream *stream, 1329 char __user *buf, 1330 size_t count, 1331 size_t *offset) 1332 #endif 1333 { 1334 return stream->perf->ops.read(stream, buf, count, offset); 1335 } 1336 1337 static struct intel_context *oa_pin_context(struct i915_perf_stream *stream) 1338 { 1339 struct i915_gem_engines_iter it; 1340 struct i915_gem_context *ctx = stream->ctx; 1341 struct intel_context *ce; 1342 int err; 1343 1344 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { 1345 if (ce->engine != stream->engine) /* first match! */ 1346 continue; 1347 1348 /* 1349 * As the ID is the gtt offset of the context's vma we 1350 * pin the vma to ensure the ID remains fixed. 1351 */ 1352 err = intel_context_pin(ce); 1353 if (err == 0) { 1354 stream->pinned_ctx = ce; 1355 break; 1356 } 1357 } 1358 i915_gem_context_unlock_engines(ctx); 1359 1360 return stream->pinned_ctx; 1361 } 1362 1363 /** 1364 * oa_get_render_ctx_id - determine and hold ctx hw id 1365 * @stream: An i915-perf stream opened for OA metrics 1366 * 1367 * Determine the render context hw id, and ensure it remains fixed for the 1368 * lifetime of the stream. This ensures that we don't have to worry about 1369 * updating the context ID in OACONTROL on the fly. 1370 * 1371 * Returns: zero on success or a negative error code 1372 */ 1373 static int oa_get_render_ctx_id(struct i915_perf_stream *stream) 1374 { 1375 struct intel_context *ce; 1376 1377 ce = oa_pin_context(stream); 1378 if (IS_ERR(ce)) 1379 return PTR_ERR(ce); 1380 1381 switch (INTEL_GEN(ce->engine->i915)) { 1382 case 7: { 1383 /* 1384 * On Haswell we don't do any post processing of the reports 1385 * and don't need to use the mask. 1386 */ 1387 stream->specific_ctx_id = i915_ggtt_offset(ce->state); 1388 stream->specific_ctx_id_mask = 0; 1389 break; 1390 } 1391 1392 case 8: 1393 case 9: 1394 case 10: 1395 if (intel_engine_in_execlists_submission_mode(ce->engine)) { 1396 stream->specific_ctx_id_mask = 1397 (1U << GEN8_CTX_ID_WIDTH) - 1; 1398 stream->specific_ctx_id = stream->specific_ctx_id_mask; 1399 } else { 1400 /* 1401 * When using GuC, the context descriptor we write in 1402 * i915 is read by GuC and rewritten before it's 1403 * actually written into the hardware. The LRCA is 1404 * what is put into the context id field of the 1405 * context descriptor by GuC. Because it's aligned to 1406 * a page, the lower 12bits are always at 0 and 1407 * dropped by GuC. They won't be part of the context 1408 * ID in the OA reports, so squash those lower bits. 1409 */ 1410 stream->specific_ctx_id = 1411 lower_32_bits(ce->lrc_desc) >> 12; 1412 1413 /* 1414 * GuC uses the top bit to signal proxy submission, so 1415 * ignore that bit. 1416 */ 1417 stream->specific_ctx_id_mask = 1418 (1U << (GEN8_CTX_ID_WIDTH - 1)) - 1; 1419 } 1420 break; 1421 1422 case 11: 1423 case 12: { 1424 stream->specific_ctx_id_mask = 1425 ((1U << GEN11_SW_CTX_ID_WIDTH) - 1) << (GEN11_SW_CTX_ID_SHIFT - 32); 1426 stream->specific_ctx_id = stream->specific_ctx_id_mask; 1427 break; 1428 } 1429 1430 default: 1431 MISSING_CASE(INTEL_GEN(ce->engine->i915)); 1432 } 1433 1434 ce->tag = stream->specific_ctx_id_mask; 1435 1436 DRM_DEBUG_DRIVER("filtering on ctx_id=0x%x ctx_id_mask=0x%x\n", 1437 stream->specific_ctx_id, 1438 stream->specific_ctx_id_mask); 1439 1440 return 0; 1441 } 1442 1443 /** 1444 * oa_put_render_ctx_id - counterpart to oa_get_render_ctx_id releases hold 1445 * @stream: An i915-perf stream opened for OA metrics 1446 * 1447 * In case anything needed doing to ensure the context HW ID would remain valid 1448 * for the lifetime of the stream, then that can be undone here. 1449 */ 1450 static void oa_put_render_ctx_id(struct i915_perf_stream *stream) 1451 { 1452 struct intel_context *ce; 1453 1454 ce = fetch_and_zero(&stream->pinned_ctx); 1455 if (ce) { 1456 ce->tag = 0; /* recomputed on next submission after parking */ 1457 intel_context_unpin(ce); 1458 } 1459 1460 stream->specific_ctx_id = INVALID_CTX_ID; 1461 stream->specific_ctx_id_mask = 0; 1462 } 1463 1464 static void 1465 free_oa_buffer(struct i915_perf_stream *stream) 1466 { 1467 i915_vma_unpin_and_release(&stream->oa_buffer.vma, 1468 I915_VMA_RELEASE_MAP); 1469 1470 stream->oa_buffer.vaddr = NULL; 1471 } 1472 1473 static void 1474 free_oa_configs(struct i915_perf_stream *stream) 1475 { 1476 struct i915_oa_config_bo *oa_bo, *tmp; 1477 1478 i915_oa_config_put(stream->oa_config); 1479 llist_for_each_entry_safe(oa_bo, tmp, stream->oa_config_bos.first, node) 1480 free_oa_config_bo(oa_bo); 1481 } 1482 1483 static void 1484 free_noa_wait(struct i915_perf_stream *stream) 1485 { 1486 i915_vma_unpin_and_release(&stream->noa_wait, 0); 1487 } 1488 1489 static void i915_oa_stream_destroy(struct i915_perf_stream *stream) 1490 { 1491 struct i915_perf *perf = stream->perf; 1492 1493 BUG_ON(stream != perf->exclusive_stream); 1494 1495 spin_lock_destroy(&stream->oa_buffer.ptr_lock); 1496 seldestroy(&stream->poll_selq); 1497 DRM_DESTROY_WAITQUEUE(&stream->poll_wq); 1498 hrtimer_cancel(&stream->poll_check_timer); 1499 1500 /* 1501 * Unset exclusive_stream first, it will be checked while disabling 1502 * the metric set on gen8+. 1503 */ 1504 perf->exclusive_stream = NULL; 1505 perf->ops.disable_metric_set(stream); 1506 1507 free_oa_buffer(stream); 1508 1509 intel_uncore_forcewake_put(stream->uncore, FORCEWAKE_ALL); 1510 intel_engine_pm_put(stream->engine); 1511 1512 if (stream->ctx) 1513 oa_put_render_ctx_id(stream); 1514 1515 free_oa_configs(stream); 1516 free_noa_wait(stream); 1517 1518 if (perf->spurious_report_rs.missed) { 1519 DRM_NOTE("%d spurious OA report notices suppressed due to ratelimiting\n", 1520 perf->spurious_report_rs.missed); 1521 } 1522 } 1523 1524 static void gen7_init_oa_buffer(struct i915_perf_stream *stream) 1525 { 1526 struct intel_uncore *uncore = stream->uncore; 1527 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma); 1528 unsigned long flags; 1529 1530 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); 1531 1532 /* Pre-DevBDW: OABUFFER must be set with counters off, 1533 * before OASTATUS1, but after OASTATUS2 1534 */ 1535 intel_uncore_write(uncore, GEN7_OASTATUS2, /* head */ 1536 gtt_offset | GEN7_OASTATUS2_MEM_SELECT_GGTT); 1537 stream->oa_buffer.head = gtt_offset; 1538 1539 intel_uncore_write(uncore, GEN7_OABUFFER, gtt_offset); 1540 1541 intel_uncore_write(uncore, GEN7_OASTATUS1, /* tail */ 1542 gtt_offset | OABUFFER_SIZE_16M); 1543 1544 /* Mark that we need updated tail pointers to read from... */ 1545 stream->oa_buffer.tails[0].offset = INVALID_TAIL_PTR; 1546 stream->oa_buffer.tails[1].offset = INVALID_TAIL_PTR; 1547 1548 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); 1549 1550 /* On Haswell we have to track which OASTATUS1 flags we've 1551 * already seen since they can't be cleared while periodic 1552 * sampling is enabled. 1553 */ 1554 stream->perf->gen7_latched_oastatus1 = 0; 1555 1556 /* NB: although the OA buffer will initially be allocated 1557 * zeroed via shmfs (and so this memset is redundant when 1558 * first allocating), we may re-init the OA buffer, either 1559 * when re-enabling a stream or in error/reset paths. 1560 * 1561 * The reason we clear the buffer for each re-init is for the 1562 * sanity check in gen7_append_oa_reports() that looks at the 1563 * report-id field to make sure it's non-zero which relies on 1564 * the assumption that new reports are being written to zeroed 1565 * memory... 1566 */ 1567 memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE); 1568 1569 stream->pollin = false; 1570 } 1571 1572 static void gen8_init_oa_buffer(struct i915_perf_stream *stream) 1573 { 1574 struct intel_uncore *uncore = stream->uncore; 1575 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma); 1576 unsigned long flags; 1577 1578 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); 1579 1580 intel_uncore_write(uncore, GEN8_OASTATUS, 0); 1581 intel_uncore_write(uncore, GEN8_OAHEADPTR, gtt_offset); 1582 stream->oa_buffer.head = gtt_offset; 1583 1584 intel_uncore_write(uncore, GEN8_OABUFFER_UDW, 0); 1585 1586 /* 1587 * PRM says: 1588 * 1589 * "This MMIO must be set before the OATAILPTR 1590 * register and after the OAHEADPTR register. This is 1591 * to enable proper functionality of the overflow 1592 * bit." 1593 */ 1594 intel_uncore_write(uncore, GEN8_OABUFFER, gtt_offset | 1595 OABUFFER_SIZE_16M | GEN8_OABUFFER_MEM_SELECT_GGTT); 1596 intel_uncore_write(uncore, GEN8_OATAILPTR, gtt_offset & GEN8_OATAILPTR_MASK); 1597 1598 /* Mark that we need updated tail pointers to read from... */ 1599 stream->oa_buffer.tails[0].offset = INVALID_TAIL_PTR; 1600 stream->oa_buffer.tails[1].offset = INVALID_TAIL_PTR; 1601 1602 /* 1603 * Reset state used to recognise context switches, affecting which 1604 * reports we will forward to userspace while filtering for a single 1605 * context. 1606 */ 1607 stream->oa_buffer.last_ctx_id = INVALID_CTX_ID; 1608 1609 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); 1610 1611 /* 1612 * NB: although the OA buffer will initially be allocated 1613 * zeroed via shmfs (and so this memset is redundant when 1614 * first allocating), we may re-init the OA buffer, either 1615 * when re-enabling a stream or in error/reset paths. 1616 * 1617 * The reason we clear the buffer for each re-init is for the 1618 * sanity check in gen8_append_oa_reports() that looks at the 1619 * reason field to make sure it's non-zero which relies on 1620 * the assumption that new reports are being written to zeroed 1621 * memory... 1622 */ 1623 memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE); 1624 1625 stream->pollin = false; 1626 } 1627 1628 static void gen12_init_oa_buffer(struct i915_perf_stream *stream) 1629 { 1630 struct intel_uncore *uncore = stream->uncore; 1631 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma); 1632 unsigned long flags; 1633 1634 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); 1635 1636 intel_uncore_write(uncore, GEN12_OAG_OASTATUS, 0); 1637 intel_uncore_write(uncore, GEN12_OAG_OAHEADPTR, 1638 gtt_offset & GEN12_OAG_OAHEADPTR_MASK); 1639 stream->oa_buffer.head = gtt_offset; 1640 1641 /* 1642 * PRM says: 1643 * 1644 * "This MMIO must be set before the OATAILPTR 1645 * register and after the OAHEADPTR register. This is 1646 * to enable proper functionality of the overflow 1647 * bit." 1648 */ 1649 intel_uncore_write(uncore, GEN12_OAG_OABUFFER, gtt_offset | 1650 OABUFFER_SIZE_16M | GEN8_OABUFFER_MEM_SELECT_GGTT); 1651 intel_uncore_write(uncore, GEN12_OAG_OATAILPTR, 1652 gtt_offset & GEN12_OAG_OATAILPTR_MASK); 1653 1654 /* Mark that we need updated tail pointers to read from... */ 1655 stream->oa_buffer.tails[0].offset = INVALID_TAIL_PTR; 1656 stream->oa_buffer.tails[1].offset = INVALID_TAIL_PTR; 1657 1658 /* 1659 * Reset state used to recognise context switches, affecting which 1660 * reports we will forward to userspace while filtering for a single 1661 * context. 1662 */ 1663 stream->oa_buffer.last_ctx_id = INVALID_CTX_ID; 1664 1665 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); 1666 1667 /* 1668 * NB: although the OA buffer will initially be allocated 1669 * zeroed via shmfs (and so this memset is redundant when 1670 * first allocating), we may re-init the OA buffer, either 1671 * when re-enabling a stream or in error/reset paths. 1672 * 1673 * The reason we clear the buffer for each re-init is for the 1674 * sanity check in gen8_append_oa_reports() that looks at the 1675 * reason field to make sure it's non-zero which relies on 1676 * the assumption that new reports are being written to zeroed 1677 * memory... 1678 */ 1679 memset(stream->oa_buffer.vaddr, 0, 1680 stream->oa_buffer.vma->size); 1681 1682 stream->pollin = false; 1683 } 1684 1685 static int alloc_oa_buffer(struct i915_perf_stream *stream) 1686 { 1687 struct drm_i915_gem_object *bo; 1688 struct i915_vma *vma; 1689 int ret; 1690 1691 if (WARN_ON(stream->oa_buffer.vma)) 1692 return -ENODEV; 1693 1694 BUILD_BUG_ON_NOT_POWER_OF_2(OA_BUFFER_SIZE); 1695 BUILD_BUG_ON(OA_BUFFER_SIZE < SZ_128K || OA_BUFFER_SIZE > SZ_16M); 1696 1697 bo = i915_gem_object_create_shmem(stream->perf->i915, OA_BUFFER_SIZE); 1698 if (IS_ERR(bo)) { 1699 DRM_ERROR("Failed to allocate OA buffer\n"); 1700 return PTR_ERR(bo); 1701 } 1702 1703 i915_gem_object_set_cache_coherency(bo, I915_CACHE_LLC); 1704 1705 /* PreHSW required 512K alignment, HSW requires 16M */ 1706 vma = i915_gem_object_ggtt_pin(bo, NULL, 0, SZ_16M, 0); 1707 if (IS_ERR(vma)) { 1708 ret = PTR_ERR(vma); 1709 goto err_unref; 1710 } 1711 stream->oa_buffer.vma = vma; 1712 1713 stream->oa_buffer.vaddr = 1714 i915_gem_object_pin_map(bo, I915_MAP_WB); 1715 if (IS_ERR(stream->oa_buffer.vaddr)) { 1716 ret = PTR_ERR(stream->oa_buffer.vaddr); 1717 goto err_unpin; 1718 } 1719 1720 return 0; 1721 1722 err_unpin: 1723 __i915_vma_unpin(vma); 1724 1725 err_unref: 1726 i915_gem_object_put(bo); 1727 1728 stream->oa_buffer.vaddr = NULL; 1729 stream->oa_buffer.vma = NULL; 1730 1731 return ret; 1732 } 1733 1734 static u32 *save_restore_register(struct i915_perf_stream *stream, u32 *cs, 1735 bool save, i915_reg_t reg, u32 offset, 1736 u32 dword_count) 1737 { 1738 u32 cmd; 1739 u32 d; 1740 1741 cmd = save ? MI_STORE_REGISTER_MEM : MI_LOAD_REGISTER_MEM; 1742 if (INTEL_GEN(stream->perf->i915) >= 8) 1743 cmd++; 1744 1745 for (d = 0; d < dword_count; d++) { 1746 *cs++ = cmd; 1747 *cs++ = i915_mmio_reg_offset(reg) + 4 * d; 1748 *cs++ = intel_gt_scratch_offset(stream->engine->gt, 1749 offset) + 4 * d; 1750 *cs++ = 0; 1751 } 1752 1753 return cs; 1754 } 1755 1756 static int alloc_noa_wait(struct i915_perf_stream *stream) 1757 { 1758 struct drm_i915_private *i915 = stream->perf->i915; 1759 struct drm_i915_gem_object *bo; 1760 struct i915_vma *vma; 1761 const u64 delay_ticks = 0xffffffffffffffff - 1762 DIV64_U64_ROUND_UP( 1763 atomic64_read(&stream->perf->noa_programming_delay) * 1764 RUNTIME_INFO(i915)->cs_timestamp_frequency_khz, 1765 1000000ull); 1766 const u32 base = stream->engine->mmio_base; 1767 #define CS_GPR(x) GEN8_RING_CS_GPR(base, x) 1768 u32 *batch, *ts0, *cs, *jump; 1769 int ret, i; 1770 enum { 1771 START_TS, 1772 NOW_TS, 1773 DELTA_TS, 1774 JUMP_PREDICATE, 1775 DELTA_TARGET, 1776 N_CS_GPR 1777 }; 1778 1779 bo = i915_gem_object_create_internal(i915, 4096); 1780 if (IS_ERR(bo)) { 1781 DRM_ERROR("Failed to allocate NOA wait batchbuffer\n"); 1782 return PTR_ERR(bo); 1783 } 1784 1785 /* 1786 * We pin in GGTT because we jump into this buffer now because 1787 * multiple OA config BOs will have a jump to this address and it 1788 * needs to be fixed during the lifetime of the i915/perf stream. 1789 */ 1790 vma = i915_gem_object_ggtt_pin(bo, NULL, 0, 0, PIN_HIGH); 1791 if (IS_ERR(vma)) { 1792 ret = PTR_ERR(vma); 1793 goto err_unref; 1794 } 1795 1796 batch = cs = i915_gem_object_pin_map(bo, I915_MAP_WB); 1797 if (IS_ERR(batch)) { 1798 ret = PTR_ERR(batch); 1799 goto err_unpin; 1800 } 1801 1802 /* Save registers. */ 1803 for (i = 0; i < N_CS_GPR; i++) 1804 cs = save_restore_register( 1805 stream, cs, true /* save */, CS_GPR(i), 1806 INTEL_GT_SCRATCH_FIELD_PERF_CS_GPR + 8 * i, 2); 1807 cs = save_restore_register( 1808 stream, cs, true /* save */, MI_PREDICATE_RESULT_1, 1809 INTEL_GT_SCRATCH_FIELD_PERF_PREDICATE_RESULT_1, 1); 1810 1811 /* First timestamp snapshot location. */ 1812 ts0 = cs; 1813 1814 /* 1815 * Initial snapshot of the timestamp register to implement the wait. 1816 * We work with 32b values, so clear out the top 32b bits of the 1817 * register because the ALU works 64bits. 1818 */ 1819 *cs++ = MI_LOAD_REGISTER_IMM(1); 1820 *cs++ = i915_mmio_reg_offset(CS_GPR(START_TS)) + 4; 1821 *cs++ = 0; 1822 *cs++ = MI_LOAD_REGISTER_REG | (3 - 2); 1823 *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(base)); 1824 *cs++ = i915_mmio_reg_offset(CS_GPR(START_TS)); 1825 1826 /* 1827 * This is the location we're going to jump back into until the 1828 * required amount of time has passed. 1829 */ 1830 jump = cs; 1831 1832 /* 1833 * Take another snapshot of the timestamp register. Take care to clear 1834 * up the top 32bits of CS_GPR(1) as we're using it for other 1835 * operations below. 1836 */ 1837 *cs++ = MI_LOAD_REGISTER_IMM(1); 1838 *cs++ = i915_mmio_reg_offset(CS_GPR(NOW_TS)) + 4; 1839 *cs++ = 0; 1840 *cs++ = MI_LOAD_REGISTER_REG | (3 - 2); 1841 *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(base)); 1842 *cs++ = i915_mmio_reg_offset(CS_GPR(NOW_TS)); 1843 1844 /* 1845 * Do a diff between the 2 timestamps and store the result back into 1846 * CS_GPR(1). 1847 */ 1848 *cs++ = MI_MATH(5); 1849 *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(NOW_TS)); 1850 *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(START_TS)); 1851 *cs++ = MI_MATH_SUB; 1852 *cs++ = MI_MATH_STORE(MI_MATH_REG(DELTA_TS), MI_MATH_REG_ACCU); 1853 *cs++ = MI_MATH_STORE(MI_MATH_REG(JUMP_PREDICATE), MI_MATH_REG_CF); 1854 1855 /* 1856 * Transfer the carry flag (set to 1 if ts1 < ts0, meaning the 1857 * timestamp have rolled over the 32bits) into the predicate register 1858 * to be used for the predicated jump. 1859 */ 1860 *cs++ = MI_LOAD_REGISTER_REG | (3 - 2); 1861 *cs++ = i915_mmio_reg_offset(CS_GPR(JUMP_PREDICATE)); 1862 *cs++ = i915_mmio_reg_offset(MI_PREDICATE_RESULT_1); 1863 1864 /* Restart from the beginning if we had timestamps roll over. */ 1865 *cs++ = (INTEL_GEN(i915) < 8 ? 1866 MI_BATCH_BUFFER_START : 1867 MI_BATCH_BUFFER_START_GEN8) | 1868 MI_BATCH_PREDICATE; 1869 *cs++ = i915_ggtt_offset(vma) + (ts0 - batch) * 4; 1870 *cs++ = 0; 1871 1872 /* 1873 * Now add the diff between to previous timestamps and add it to : 1874 * (((1 * << 64) - 1) - delay_ns) 1875 * 1876 * When the Carry Flag contains 1 this means the elapsed time is 1877 * longer than the expected delay, and we can exit the wait loop. 1878 */ 1879 *cs++ = MI_LOAD_REGISTER_IMM(2); 1880 *cs++ = i915_mmio_reg_offset(CS_GPR(DELTA_TARGET)); 1881 *cs++ = lower_32_bits(delay_ticks); 1882 *cs++ = i915_mmio_reg_offset(CS_GPR(DELTA_TARGET)) + 4; 1883 *cs++ = upper_32_bits(delay_ticks); 1884 1885 *cs++ = MI_MATH(4); 1886 *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(DELTA_TS)); 1887 *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(DELTA_TARGET)); 1888 *cs++ = MI_MATH_ADD; 1889 *cs++ = MI_MATH_STOREINV(MI_MATH_REG(JUMP_PREDICATE), MI_MATH_REG_CF); 1890 1891 *cs++ = MI_ARB_CHECK; 1892 1893 /* 1894 * Transfer the result into the predicate register to be used for the 1895 * predicated jump. 1896 */ 1897 *cs++ = MI_LOAD_REGISTER_REG | (3 - 2); 1898 *cs++ = i915_mmio_reg_offset(CS_GPR(JUMP_PREDICATE)); 1899 *cs++ = i915_mmio_reg_offset(MI_PREDICATE_RESULT_1); 1900 1901 /* Predicate the jump. */ 1902 *cs++ = (INTEL_GEN(i915) < 8 ? 1903 MI_BATCH_BUFFER_START : 1904 MI_BATCH_BUFFER_START_GEN8) | 1905 MI_BATCH_PREDICATE; 1906 *cs++ = i915_ggtt_offset(vma) + (jump - batch) * 4; 1907 *cs++ = 0; 1908 1909 /* Restore registers. */ 1910 for (i = 0; i < N_CS_GPR; i++) 1911 cs = save_restore_register( 1912 stream, cs, false /* restore */, CS_GPR(i), 1913 INTEL_GT_SCRATCH_FIELD_PERF_CS_GPR + 8 * i, 2); 1914 cs = save_restore_register( 1915 stream, cs, false /* restore */, MI_PREDICATE_RESULT_1, 1916 INTEL_GT_SCRATCH_FIELD_PERF_PREDICATE_RESULT_1, 1); 1917 1918 /* And return to the ring. */ 1919 *cs++ = MI_BATCH_BUFFER_END; 1920 1921 GEM_BUG_ON(cs - batch > PAGE_SIZE / sizeof(*batch)); 1922 1923 i915_gem_object_flush_map(bo); 1924 i915_gem_object_unpin_map(bo); 1925 1926 stream->noa_wait = vma; 1927 return 0; 1928 1929 err_unpin: 1930 i915_vma_unpin_and_release(&vma, 0); 1931 err_unref: 1932 i915_gem_object_put(bo); 1933 return ret; 1934 } 1935 1936 static u32 *write_cs_mi_lri(u32 *cs, 1937 const struct i915_oa_reg *reg_data, 1938 u32 n_regs) 1939 { 1940 u32 i; 1941 1942 for (i = 0; i < n_regs; i++) { 1943 if ((i % MI_LOAD_REGISTER_IMM_MAX_REGS) == 0) { 1944 u32 n_lri = min_t(u32, 1945 n_regs - i, 1946 MI_LOAD_REGISTER_IMM_MAX_REGS); 1947 1948 *cs++ = MI_LOAD_REGISTER_IMM(n_lri); 1949 } 1950 *cs++ = i915_mmio_reg_offset(reg_data[i].addr); 1951 *cs++ = reg_data[i].value; 1952 } 1953 1954 return cs; 1955 } 1956 1957 static int num_lri_dwords(int num_regs) 1958 { 1959 int count = 0; 1960 1961 if (num_regs > 0) { 1962 count += DIV_ROUND_UP(num_regs, MI_LOAD_REGISTER_IMM_MAX_REGS); 1963 count += num_regs * 2; 1964 } 1965 1966 return count; 1967 } 1968 1969 static struct i915_oa_config_bo * 1970 alloc_oa_config_buffer(struct i915_perf_stream *stream, 1971 struct i915_oa_config *oa_config) 1972 { 1973 struct drm_i915_gem_object *obj; 1974 struct i915_oa_config_bo *oa_bo; 1975 size_t config_length = 0; 1976 u32 *cs; 1977 int err; 1978 1979 oa_bo = kzalloc(sizeof(*oa_bo), GFP_KERNEL); 1980 if (!oa_bo) 1981 return ERR_PTR(-ENOMEM); 1982 1983 config_length += num_lri_dwords(oa_config->mux_regs_len); 1984 config_length += num_lri_dwords(oa_config->b_counter_regs_len); 1985 config_length += num_lri_dwords(oa_config->flex_regs_len); 1986 config_length += 3; /* MI_BATCH_BUFFER_START */ 1987 config_length = ALIGN(sizeof(u32) * config_length, I915_GTT_PAGE_SIZE); 1988 1989 obj = i915_gem_object_create_shmem(stream->perf->i915, config_length); 1990 if (IS_ERR(obj)) { 1991 err = PTR_ERR(obj); 1992 goto err_free; 1993 } 1994 1995 cs = i915_gem_object_pin_map(obj, I915_MAP_WB); 1996 if (IS_ERR(cs)) { 1997 err = PTR_ERR(cs); 1998 goto err_oa_bo; 1999 } 2000 2001 cs = write_cs_mi_lri(cs, 2002 oa_config->mux_regs, 2003 oa_config->mux_regs_len); 2004 cs = write_cs_mi_lri(cs, 2005 oa_config->b_counter_regs, 2006 oa_config->b_counter_regs_len); 2007 cs = write_cs_mi_lri(cs, 2008 oa_config->flex_regs, 2009 oa_config->flex_regs_len); 2010 2011 /* Jump into the active wait. */ 2012 *cs++ = (INTEL_GEN(stream->perf->i915) < 8 ? 2013 MI_BATCH_BUFFER_START : 2014 MI_BATCH_BUFFER_START_GEN8); 2015 *cs++ = i915_ggtt_offset(stream->noa_wait); 2016 *cs++ = 0; 2017 2018 i915_gem_object_flush_map(obj); 2019 i915_gem_object_unpin_map(obj); 2020 2021 oa_bo->vma = i915_vma_instance(obj, 2022 &stream->engine->gt->ggtt->vm, 2023 NULL); 2024 if (IS_ERR(oa_bo->vma)) { 2025 err = PTR_ERR(oa_bo->vma); 2026 goto err_oa_bo; 2027 } 2028 2029 oa_bo->oa_config = i915_oa_config_get(oa_config); 2030 llist_add(&oa_bo->node, &stream->oa_config_bos); 2031 2032 return oa_bo; 2033 2034 err_oa_bo: 2035 i915_gem_object_put(obj); 2036 err_free: 2037 kfree(oa_bo); 2038 return ERR_PTR(err); 2039 } 2040 2041 static struct i915_vma * 2042 get_oa_vma(struct i915_perf_stream *stream, struct i915_oa_config *oa_config) 2043 { 2044 struct i915_oa_config_bo *oa_bo; 2045 2046 /* 2047 * Look for the buffer in the already allocated BOs attached 2048 * to the stream. 2049 */ 2050 llist_for_each_entry(oa_bo, stream->oa_config_bos.first, node) { 2051 if (oa_bo->oa_config == oa_config && 2052 memcmp(oa_bo->oa_config->uuid, 2053 oa_config->uuid, 2054 sizeof(oa_config->uuid)) == 0) 2055 goto out; 2056 } 2057 2058 oa_bo = alloc_oa_config_buffer(stream, oa_config); 2059 if (IS_ERR(oa_bo)) 2060 return ERR_CAST(oa_bo); 2061 2062 out: 2063 return i915_vma_get(oa_bo->vma); 2064 } 2065 2066 static int emit_oa_config(struct i915_perf_stream *stream, 2067 struct i915_oa_config *oa_config, 2068 struct intel_context *ce) 2069 { 2070 struct i915_request *rq; 2071 struct i915_vma *vma; 2072 int err; 2073 2074 vma = get_oa_vma(stream, oa_config); 2075 if (IS_ERR(vma)) 2076 return PTR_ERR(vma); 2077 2078 err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH); 2079 if (err) 2080 goto err_vma_put; 2081 2082 intel_engine_pm_get(ce->engine); 2083 rq = i915_request_create(ce); 2084 intel_engine_pm_put(ce->engine); 2085 if (IS_ERR(rq)) { 2086 err = PTR_ERR(rq); 2087 goto err_vma_unpin; 2088 } 2089 2090 i915_vma_lock(vma); 2091 err = i915_request_await_object(rq, vma->obj, 0); 2092 if (!err) 2093 err = i915_vma_move_to_active(vma, rq, 0); 2094 i915_vma_unlock(vma); 2095 if (err) 2096 goto err_add_request; 2097 2098 err = rq->engine->emit_bb_start(rq, 2099 vma->node.start, 0, 2100 I915_DISPATCH_SECURE); 2101 err_add_request: 2102 i915_request_add(rq); 2103 err_vma_unpin: 2104 i915_vma_unpin(vma); 2105 err_vma_put: 2106 i915_vma_put(vma); 2107 return err; 2108 } 2109 2110 static struct intel_context *oa_context(struct i915_perf_stream *stream) 2111 { 2112 return stream->pinned_ctx ?: stream->engine->kernel_context; 2113 } 2114 2115 static int hsw_enable_metric_set(struct i915_perf_stream *stream) 2116 { 2117 struct intel_uncore *uncore = stream->uncore; 2118 2119 /* 2120 * PRM: 2121 * 2122 * OA unit is using crclk for its functionality. When trunk 2123 * level clock gating takes place, OA clock would be gated, 2124 * unable to count the events from non-render clock domain. 2125 * Render clock gating must be disabled when OA is enabled to 2126 * count the events from non-render domain. Unit level clock 2127 * gating for RCS should also be disabled. 2128 */ 2129 intel_uncore_rmw(uncore, GEN7_MISCCPCTL, 2130 GEN7_DOP_CLOCK_GATE_ENABLE, 0); 2131 intel_uncore_rmw(uncore, GEN6_UCGCTL1, 2132 0, GEN6_CSUNIT_CLOCK_GATE_DISABLE); 2133 2134 return emit_oa_config(stream, stream->oa_config, oa_context(stream)); 2135 } 2136 2137 static void hsw_disable_metric_set(struct i915_perf_stream *stream) 2138 { 2139 struct intel_uncore *uncore = stream->uncore; 2140 2141 intel_uncore_rmw(uncore, GEN6_UCGCTL1, 2142 GEN6_CSUNIT_CLOCK_GATE_DISABLE, 0); 2143 intel_uncore_rmw(uncore, GEN7_MISCCPCTL, 2144 0, GEN7_DOP_CLOCK_GATE_ENABLE); 2145 2146 intel_uncore_rmw(uncore, GDT_CHICKEN_BITS, GT_NOA_ENABLE, 0); 2147 } 2148 2149 static u32 oa_config_flex_reg(const struct i915_oa_config *oa_config, 2150 i915_reg_t reg) 2151 { 2152 u32 mmio = i915_mmio_reg_offset(reg); 2153 int i; 2154 2155 /* 2156 * This arbitrary default will select the 'EU FPU0 Pipeline 2157 * Active' event. In the future it's anticipated that there 2158 * will be an explicit 'No Event' we can select, but not yet... 2159 */ 2160 if (!oa_config) 2161 return 0; 2162 2163 for (i = 0; i < oa_config->flex_regs_len; i++) { 2164 if (i915_mmio_reg_offset(oa_config->flex_regs[i].addr) == mmio) 2165 return oa_config->flex_regs[i].value; 2166 } 2167 2168 return 0; 2169 } 2170 /* 2171 * NB: It must always remain pointer safe to run this even if the OA unit 2172 * has been disabled. 2173 * 2174 * It's fine to put out-of-date values into these per-context registers 2175 * in the case that the OA unit has been disabled. 2176 */ 2177 static void 2178 gen8_update_reg_state_unlocked(const struct intel_context *ce, 2179 const struct i915_perf_stream *stream) 2180 { 2181 u32 ctx_oactxctrl = stream->perf->ctx_oactxctrl_offset; 2182 u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset; 2183 /* The MMIO offsets for Flex EU registers aren't contiguous */ 2184 i915_reg_t flex_regs[] = { 2185 EU_PERF_CNTL0, 2186 EU_PERF_CNTL1, 2187 EU_PERF_CNTL2, 2188 EU_PERF_CNTL3, 2189 EU_PERF_CNTL4, 2190 EU_PERF_CNTL5, 2191 EU_PERF_CNTL6, 2192 }; 2193 u32 *reg_state = ce->lrc_reg_state; 2194 int i; 2195 2196 reg_state[ctx_oactxctrl + 1] = 2197 (stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) | 2198 (stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) | 2199 GEN8_OA_COUNTER_RESUME; 2200 2201 for (i = 0; i < ARRAY_SIZE(flex_regs); i++) 2202 reg_state[ctx_flexeu0 + i * 2 + 1] = 2203 oa_config_flex_reg(stream->oa_config, flex_regs[i]); 2204 2205 reg_state[CTX_R_PWR_CLK_STATE] = 2206 intel_sseu_make_rpcs(ce->engine->i915, &ce->sseu); 2207 } 2208 2209 struct flex { 2210 i915_reg_t reg; 2211 u32 offset; 2212 u32 value; 2213 }; 2214 2215 static int 2216 gen8_store_flex(struct i915_request *rq, 2217 struct intel_context *ce, 2218 const struct flex *flex, unsigned int count) 2219 { 2220 u32 offset; 2221 u32 *cs; 2222 2223 cs = intel_ring_begin(rq, 4 * count); 2224 if (IS_ERR(cs)) 2225 return PTR_ERR(cs); 2226 2227 offset = i915_ggtt_offset(ce->state) + LRC_STATE_PN * PAGE_SIZE; 2228 do { 2229 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; 2230 *cs++ = offset + flex->offset * sizeof(u32); 2231 *cs++ = 0; 2232 *cs++ = flex->value; 2233 } while (flex++, --count); 2234 2235 intel_ring_advance(rq, cs); 2236 2237 return 0; 2238 } 2239 2240 static int 2241 gen8_load_flex(struct i915_request *rq, 2242 struct intel_context *ce, 2243 const struct flex *flex, unsigned int count) 2244 { 2245 u32 *cs; 2246 2247 GEM_BUG_ON(!count || count > 63); 2248 2249 cs = intel_ring_begin(rq, 2 * count + 2); 2250 if (IS_ERR(cs)) 2251 return PTR_ERR(cs); 2252 2253 *cs++ = MI_LOAD_REGISTER_IMM(count); 2254 do { 2255 *cs++ = i915_mmio_reg_offset(flex->reg); 2256 *cs++ = flex->value; 2257 } while (flex++, --count); 2258 *cs++ = MI_NOOP; 2259 2260 intel_ring_advance(rq, cs); 2261 2262 return 0; 2263 } 2264 2265 static int gen8_modify_context(struct intel_context *ce, 2266 const struct flex *flex, unsigned int count) 2267 { 2268 struct i915_request *rq; 2269 int err; 2270 2271 rq = intel_engine_create_kernel_request(ce->engine); 2272 if (IS_ERR(rq)) 2273 return PTR_ERR(rq); 2274 2275 /* Serialise with the remote context */ 2276 err = intel_context_prepare_remote_request(ce, rq); 2277 if (err == 0) 2278 err = gen8_store_flex(rq, ce, flex, count); 2279 2280 i915_request_add(rq); 2281 return err; 2282 } 2283 2284 static int gen8_modify_self(struct intel_context *ce, 2285 const struct flex *flex, unsigned int count) 2286 { 2287 struct i915_request *rq; 2288 int err; 2289 2290 rq = i915_request_create(ce); 2291 if (IS_ERR(rq)) 2292 return PTR_ERR(rq); 2293 2294 err = gen8_load_flex(rq, ce, flex, count); 2295 2296 i915_request_add(rq); 2297 return err; 2298 } 2299 2300 static int gen8_configure_context(struct i915_gem_context *ctx, 2301 struct flex *flex, unsigned int count) 2302 { 2303 struct i915_gem_engines_iter it; 2304 struct intel_context *ce; 2305 int err = 0; 2306 2307 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { 2308 GEM_BUG_ON(ce == ce->engine->kernel_context); 2309 2310 if (ce->engine->class != RENDER_CLASS) 2311 continue; 2312 2313 /* Otherwise OA settings will be set upon first use */ 2314 if (!intel_context_pin_if_active(ce)) 2315 continue; 2316 2317 flex->value = intel_sseu_make_rpcs(ctx->i915, &ce->sseu); 2318 err = gen8_modify_context(ce, flex, count); 2319 2320 intel_context_unpin(ce); 2321 if (err) 2322 break; 2323 } 2324 i915_gem_context_unlock_engines(ctx); 2325 2326 return err; 2327 } 2328 2329 static int gen12_configure_oar_context(struct i915_perf_stream *stream, bool enable) 2330 { 2331 int err; 2332 struct intel_context *ce = stream->pinned_ctx; 2333 u32 format = stream->oa_buffer.format; 2334 struct flex regs_context[] = { 2335 { 2336 GEN8_OACTXCONTROL, 2337 stream->perf->ctx_oactxctrl_offset + 1, 2338 enable ? GEN8_OA_COUNTER_RESUME : 0, 2339 }, 2340 }; 2341 /* Offsets in regs_lri are not used since this configuration is only 2342 * applied using LRI. Initialize the correct offsets for posterity. 2343 */ 2344 #define GEN12_OAR_OACONTROL_OFFSET 0x5B0 2345 struct flex regs_lri[] = { 2346 { 2347 GEN12_OAR_OACONTROL, 2348 GEN12_OAR_OACONTROL_OFFSET + 1, 2349 (format << GEN12_OAR_OACONTROL_COUNTER_FORMAT_SHIFT) | 2350 (enable ? GEN12_OAR_OACONTROL_COUNTER_ENABLE : 0) 2351 }, 2352 { 2353 RING_CONTEXT_CONTROL(ce->engine->mmio_base), 2354 CTX_CONTEXT_CONTROL, 2355 _MASKED_FIELD(GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE, 2356 enable ? 2357 GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE : 2358 0) 2359 }, 2360 }; 2361 2362 /* Modify the context image of pinned context with regs_context*/ 2363 err = intel_context_lock_pinned(ce); 2364 if (err) 2365 return err; 2366 2367 err = gen8_modify_context(ce, regs_context, ARRAY_SIZE(regs_context)); 2368 intel_context_unlock_pinned(ce); 2369 if (err) 2370 return err; 2371 2372 /* Apply regs_lri using LRI with pinned context */ 2373 return gen8_modify_self(ce, regs_lri, ARRAY_SIZE(regs_lri)); 2374 } 2375 2376 /* 2377 * Manages updating the per-context aspects of the OA stream 2378 * configuration across all contexts. 2379 * 2380 * The awkward consideration here is that OACTXCONTROL controls the 2381 * exponent for periodic sampling which is primarily used for system 2382 * wide profiling where we'd like a consistent sampling period even in 2383 * the face of context switches. 2384 * 2385 * Our approach of updating the register state context (as opposed to 2386 * say using a workaround batch buffer) ensures that the hardware 2387 * won't automatically reload an out-of-date timer exponent even 2388 * transiently before a WA BB could be parsed. 2389 * 2390 * This function needs to: 2391 * - Ensure the currently running context's per-context OA state is 2392 * updated 2393 * - Ensure that all existing contexts will have the correct per-context 2394 * OA state if they are scheduled for use. 2395 * - Ensure any new contexts will be initialized with the correct 2396 * per-context OA state. 2397 * 2398 * Note: it's only the RCS/Render context that has any OA state. 2399 * Note: the first flex register passed must always be R_PWR_CLK_STATE 2400 */ 2401 static int oa_configure_all_contexts(struct i915_perf_stream *stream, 2402 struct flex *regs, 2403 size_t num_regs) 2404 { 2405 struct drm_i915_private *i915 = stream->perf->i915; 2406 struct intel_engine_cs *engine; 2407 struct i915_gem_context *ctx, *cn; 2408 int err; 2409 2410 lockdep_assert_held(&stream->perf->lock); 2411 2412 /* 2413 * The OA register config is setup through the context image. This image 2414 * might be written to by the GPU on context switch (in particular on 2415 * lite-restore). This means we can't safely update a context's image, 2416 * if this context is scheduled/submitted to run on the GPU. 2417 * 2418 * We could emit the OA register config through the batch buffer but 2419 * this might leave small interval of time where the OA unit is 2420 * configured at an invalid sampling period. 2421 * 2422 * Note that since we emit all requests from a single ring, there 2423 * is still an implicit global barrier here that may cause a high 2424 * priority context to wait for an otherwise independent low priority 2425 * context. Contexts idle at the time of reconfiguration are not 2426 * trapped behind the barrier. 2427 */ 2428 spin_lock(&i915->gem.contexts.lock); 2429 list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) { 2430 if (!kref_get_unless_zero(&ctx->ref)) 2431 continue; 2432 2433 spin_unlock(&i915->gem.contexts.lock); 2434 2435 err = gen8_configure_context(ctx, regs, num_regs); 2436 if (err) { 2437 i915_gem_context_put(ctx); 2438 return err; 2439 } 2440 2441 spin_lock(&i915->gem.contexts.lock); 2442 list_safe_reset_next(ctx, cn, link); 2443 i915_gem_context_put(ctx); 2444 } 2445 spin_unlock(&i915->gem.contexts.lock); 2446 2447 /* 2448 * After updating all other contexts, we need to modify ourselves. 2449 * If we don't modify the kernel_context, we do not get events while 2450 * idle. 2451 */ 2452 for_each_uabi_engine(engine, i915) { 2453 struct intel_context *ce = engine->kernel_context; 2454 2455 if (engine->class != RENDER_CLASS) 2456 continue; 2457 2458 regs[0].value = intel_sseu_make_rpcs(i915, &ce->sseu); 2459 2460 err = gen8_modify_self(ce, regs, num_regs); 2461 if (err) 2462 return err; 2463 } 2464 2465 return 0; 2466 } 2467 2468 static int gen12_configure_all_contexts(struct i915_perf_stream *stream, 2469 const struct i915_oa_config *oa_config) 2470 { 2471 struct flex regs[] = { 2472 { 2473 GEN8_R_PWR_CLK_STATE, 2474 CTX_R_PWR_CLK_STATE, 2475 }, 2476 }; 2477 2478 return oa_configure_all_contexts(stream, regs, ARRAY_SIZE(regs)); 2479 } 2480 2481 static int lrc_configure_all_contexts(struct i915_perf_stream *stream, 2482 const struct i915_oa_config *oa_config) 2483 { 2484 /* The MMIO offsets for Flex EU registers aren't contiguous */ 2485 const u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset; 2486 #define ctx_flexeuN(N) (ctx_flexeu0 + 2 * (N) + 1) 2487 struct flex regs[] = { 2488 { 2489 GEN8_R_PWR_CLK_STATE, 2490 CTX_R_PWR_CLK_STATE, 2491 }, 2492 { 2493 GEN8_OACTXCONTROL, 2494 stream->perf->ctx_oactxctrl_offset + 1, 2495 }, 2496 { EU_PERF_CNTL0, ctx_flexeuN(0) }, 2497 { EU_PERF_CNTL1, ctx_flexeuN(1) }, 2498 { EU_PERF_CNTL2, ctx_flexeuN(2) }, 2499 { EU_PERF_CNTL3, ctx_flexeuN(3) }, 2500 { EU_PERF_CNTL4, ctx_flexeuN(4) }, 2501 { EU_PERF_CNTL5, ctx_flexeuN(5) }, 2502 { EU_PERF_CNTL6, ctx_flexeuN(6) }, 2503 }; 2504 #undef ctx_flexeuN 2505 int i; 2506 2507 regs[1].value = 2508 (stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) | 2509 (stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) | 2510 GEN8_OA_COUNTER_RESUME; 2511 2512 for (i = 2; i < ARRAY_SIZE(regs); i++) 2513 regs[i].value = oa_config_flex_reg(oa_config, regs[i].reg); 2514 2515 return oa_configure_all_contexts(stream, regs, ARRAY_SIZE(regs)); 2516 } 2517 2518 static int gen8_enable_metric_set(struct i915_perf_stream *stream) 2519 { 2520 struct intel_uncore *uncore = stream->uncore; 2521 struct i915_oa_config *oa_config = stream->oa_config; 2522 int ret; 2523 2524 /* 2525 * We disable slice/unslice clock ratio change reports on SKL since 2526 * they are too noisy. The HW generates a lot of redundant reports 2527 * where the ratio hasn't really changed causing a lot of redundant 2528 * work to processes and increasing the chances we'll hit buffer 2529 * overruns. 2530 * 2531 * Although we don't currently use the 'disable overrun' OABUFFER 2532 * feature it's worth noting that clock ratio reports have to be 2533 * disabled before considering to use that feature since the HW doesn't 2534 * correctly block these reports. 2535 * 2536 * Currently none of the high-level metrics we have depend on knowing 2537 * this ratio to normalize. 2538 * 2539 * Note: This register is not power context saved and restored, but 2540 * that's OK considering that we disable RC6 while the OA unit is 2541 * enabled. 2542 * 2543 * The _INCLUDE_CLK_RATIO bit allows the slice/unslice frequency to 2544 * be read back from automatically triggered reports, as part of the 2545 * RPT_ID field. 2546 */ 2547 if (IS_GEN_RANGE(stream->perf->i915, 9, 11)) { 2548 intel_uncore_write(uncore, GEN8_OA_DEBUG, 2549 _MASKED_BIT_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS | 2550 GEN9_OA_DEBUG_INCLUDE_CLK_RATIO)); 2551 } 2552 2553 /* 2554 * Update all contexts prior writing the mux configurations as we need 2555 * to make sure all slices/subslices are ON before writing to NOA 2556 * registers. 2557 */ 2558 ret = lrc_configure_all_contexts(stream, oa_config); 2559 if (ret) 2560 return ret; 2561 2562 return emit_oa_config(stream, oa_config, oa_context(stream)); 2563 } 2564 2565 static u32 oag_report_ctx_switches(const struct i915_perf_stream *stream) 2566 { 2567 return _MASKED_FIELD(GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS, 2568 (stream->sample_flags & SAMPLE_OA_REPORT) ? 2569 0 : GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS); 2570 } 2571 2572 static int gen12_enable_metric_set(struct i915_perf_stream *stream) 2573 { 2574 struct intel_uncore *uncore = stream->uncore; 2575 struct i915_oa_config *oa_config = stream->oa_config; 2576 bool periodic = stream->periodic; 2577 u32 period_exponent = stream->period_exponent; 2578 int ret; 2579 2580 intel_uncore_write(uncore, GEN12_OAG_OA_DEBUG, 2581 /* Disable clk ratio reports, like previous Gens. */ 2582 _MASKED_BIT_ENABLE(GEN12_OAG_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS | 2583 GEN12_OAG_OA_DEBUG_INCLUDE_CLK_RATIO) | 2584 /* 2585 * If the user didn't require OA reports, instruct 2586 * the hardware not to emit ctx switch reports. 2587 */ 2588 oag_report_ctx_switches(stream)); 2589 2590 intel_uncore_write(uncore, GEN12_OAG_OAGLBCTXCTRL, periodic ? 2591 (GEN12_OAG_OAGLBCTXCTRL_COUNTER_RESUME | 2592 GEN12_OAG_OAGLBCTXCTRL_TIMER_ENABLE | 2593 (period_exponent << GEN12_OAG_OAGLBCTXCTRL_TIMER_PERIOD_SHIFT)) 2594 : 0); 2595 2596 /* 2597 * Update all contexts prior writing the mux configurations as we need 2598 * to make sure all slices/subslices are ON before writing to NOA 2599 * registers. 2600 */ 2601 ret = gen12_configure_all_contexts(stream, oa_config); 2602 if (ret) 2603 return ret; 2604 2605 /* 2606 * For Gen12, performance counters are context 2607 * saved/restored. Only enable it for the context that 2608 * requested this. 2609 */ 2610 if (stream->ctx) { 2611 ret = gen12_configure_oar_context(stream, true); 2612 if (ret) 2613 return ret; 2614 } 2615 2616 return emit_oa_config(stream, oa_config, oa_context(stream)); 2617 } 2618 2619 static void gen8_disable_metric_set(struct i915_perf_stream *stream) 2620 { 2621 struct intel_uncore *uncore = stream->uncore; 2622 2623 /* Reset all contexts' slices/subslices configurations. */ 2624 lrc_configure_all_contexts(stream, NULL); 2625 2626 intel_uncore_rmw(uncore, GDT_CHICKEN_BITS, GT_NOA_ENABLE, 0); 2627 } 2628 2629 static void gen10_disable_metric_set(struct i915_perf_stream *stream) 2630 { 2631 struct intel_uncore *uncore = stream->uncore; 2632 2633 /* Reset all contexts' slices/subslices configurations. */ 2634 lrc_configure_all_contexts(stream, NULL); 2635 2636 /* Make sure we disable noa to save power. */ 2637 intel_uncore_rmw(uncore, RPM_CONFIG1, GEN10_GT_NOA_ENABLE, 0); 2638 } 2639 2640 static void gen12_disable_metric_set(struct i915_perf_stream *stream) 2641 { 2642 struct intel_uncore *uncore = stream->uncore; 2643 2644 /* Reset all contexts' slices/subslices configurations. */ 2645 gen12_configure_all_contexts(stream, NULL); 2646 2647 /* disable the context save/restore or OAR counters */ 2648 if (stream->ctx) 2649 gen12_configure_oar_context(stream, false); 2650 2651 /* Make sure we disable noa to save power. */ 2652 intel_uncore_rmw(uncore, RPM_CONFIG1, GEN10_GT_NOA_ENABLE, 0); 2653 } 2654 2655 static void gen7_oa_enable(struct i915_perf_stream *stream) 2656 { 2657 struct intel_uncore *uncore = stream->uncore; 2658 struct i915_gem_context *ctx = stream->ctx; 2659 u32 ctx_id = stream->specific_ctx_id; 2660 bool periodic = stream->periodic; 2661 u32 period_exponent = stream->period_exponent; 2662 u32 report_format = stream->oa_buffer.format; 2663 2664 /* 2665 * Reset buf pointers so we don't forward reports from before now. 2666 * 2667 * Think carefully if considering trying to avoid this, since it 2668 * also ensures status flags and the buffer itself are cleared 2669 * in error paths, and we have checks for invalid reports based 2670 * on the assumption that certain fields are written to zeroed 2671 * memory which this helps maintains. 2672 */ 2673 gen7_init_oa_buffer(stream); 2674 2675 intel_uncore_write(uncore, GEN7_OACONTROL, 2676 (ctx_id & GEN7_OACONTROL_CTX_MASK) | 2677 (period_exponent << 2678 GEN7_OACONTROL_TIMER_PERIOD_SHIFT) | 2679 (periodic ? GEN7_OACONTROL_TIMER_ENABLE : 0) | 2680 (report_format << GEN7_OACONTROL_FORMAT_SHIFT) | 2681 (ctx ? GEN7_OACONTROL_PER_CTX_ENABLE : 0) | 2682 GEN7_OACONTROL_ENABLE); 2683 } 2684 2685 static void gen8_oa_enable(struct i915_perf_stream *stream) 2686 { 2687 struct intel_uncore *uncore = stream->uncore; 2688 u32 report_format = stream->oa_buffer.format; 2689 2690 /* 2691 * Reset buf pointers so we don't forward reports from before now. 2692 * 2693 * Think carefully if considering trying to avoid this, since it 2694 * also ensures status flags and the buffer itself are cleared 2695 * in error paths, and we have checks for invalid reports based 2696 * on the assumption that certain fields are written to zeroed 2697 * memory which this helps maintains. 2698 */ 2699 gen8_init_oa_buffer(stream); 2700 2701 /* 2702 * Note: we don't rely on the hardware to perform single context 2703 * filtering and instead filter on the cpu based on the context-id 2704 * field of reports 2705 */ 2706 intel_uncore_write(uncore, GEN8_OACONTROL, 2707 (report_format << GEN8_OA_REPORT_FORMAT_SHIFT) | 2708 GEN8_OA_COUNTER_ENABLE); 2709 } 2710 2711 static void gen12_oa_enable(struct i915_perf_stream *stream) 2712 { 2713 struct intel_uncore *uncore = stream->uncore; 2714 u32 report_format = stream->oa_buffer.format; 2715 2716 /* 2717 * If we don't want OA reports from the OA buffer, then we don't even 2718 * need to program the OAG unit. 2719 */ 2720 if (!(stream->sample_flags & SAMPLE_OA_REPORT)) 2721 return; 2722 2723 gen12_init_oa_buffer(stream); 2724 2725 intel_uncore_write(uncore, GEN12_OAG_OACONTROL, 2726 (report_format << GEN12_OAG_OACONTROL_OA_COUNTER_FORMAT_SHIFT) | 2727 GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE); 2728 } 2729 2730 /** 2731 * i915_oa_stream_enable - handle `I915_PERF_IOCTL_ENABLE` for OA stream 2732 * @stream: An i915 perf stream opened for OA metrics 2733 * 2734 * [Re]enables hardware periodic sampling according to the period configured 2735 * when opening the stream. This also starts a hrtimer that will periodically 2736 * check for data in the circular OA buffer for notifying userspace (e.g. 2737 * during a read() or poll()). 2738 */ 2739 static void i915_oa_stream_enable(struct i915_perf_stream *stream) 2740 { 2741 stream->perf->ops.oa_enable(stream); 2742 2743 if (stream->periodic) 2744 hrtimer_start(&stream->poll_check_timer, 2745 ns_to_ktime(POLL_PERIOD), 2746 HRTIMER_MODE_REL_PINNED); 2747 } 2748 2749 static void gen7_oa_disable(struct i915_perf_stream *stream) 2750 { 2751 struct intel_uncore *uncore = stream->uncore; 2752 2753 intel_uncore_write(uncore, GEN7_OACONTROL, 0); 2754 if (intel_wait_for_register(uncore, 2755 GEN7_OACONTROL, GEN7_OACONTROL_ENABLE, 0, 2756 50)) 2757 DRM_ERROR("wait for OA to be disabled timed out\n"); 2758 } 2759 2760 static void gen8_oa_disable(struct i915_perf_stream *stream) 2761 { 2762 struct intel_uncore *uncore = stream->uncore; 2763 2764 intel_uncore_write(uncore, GEN8_OACONTROL, 0); 2765 if (intel_wait_for_register(uncore, 2766 GEN8_OACONTROL, GEN8_OA_COUNTER_ENABLE, 0, 2767 50)) 2768 DRM_ERROR("wait for OA to be disabled timed out\n"); 2769 } 2770 2771 static void gen12_oa_disable(struct i915_perf_stream *stream) 2772 { 2773 struct intel_uncore *uncore = stream->uncore; 2774 2775 intel_uncore_write(uncore, GEN12_OAG_OACONTROL, 0); 2776 if (intel_wait_for_register(uncore, 2777 GEN12_OAG_OACONTROL, 2778 GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE, 0, 2779 50)) 2780 DRM_ERROR("wait for OA to be disabled timed out\n"); 2781 } 2782 2783 /** 2784 * i915_oa_stream_disable - handle `I915_PERF_IOCTL_DISABLE` for OA stream 2785 * @stream: An i915 perf stream opened for OA metrics 2786 * 2787 * Stops the OA unit from periodically writing counter reports into the 2788 * circular OA buffer. This also stops the hrtimer that periodically checks for 2789 * data in the circular OA buffer, for notifying userspace. 2790 */ 2791 static void i915_oa_stream_disable(struct i915_perf_stream *stream) 2792 { 2793 stream->perf->ops.oa_disable(stream); 2794 2795 if (stream->periodic) 2796 hrtimer_cancel(&stream->poll_check_timer); 2797 } 2798 2799 static const struct i915_perf_stream_ops i915_oa_stream_ops = { 2800 .destroy = i915_oa_stream_destroy, 2801 .enable = i915_oa_stream_enable, 2802 .disable = i915_oa_stream_disable, 2803 .wait_unlocked = i915_oa_wait_unlocked, 2804 #ifndef __NetBSD__ 2805 .poll_wait = i915_oa_poll_wait, 2806 #endif 2807 .read = i915_oa_read, 2808 }; 2809 2810 /** 2811 * i915_oa_stream_init - validate combined props for OA stream and init 2812 * @stream: An i915 perf stream 2813 * @param: The open parameters passed to `DRM_I915_PERF_OPEN` 2814 * @props: The property state that configures stream (individually validated) 2815 * 2816 * While read_properties_unlocked() validates properties in isolation it 2817 * doesn't ensure that the combination necessarily makes sense. 2818 * 2819 * At this point it has been determined that userspace wants a stream of 2820 * OA metrics, but still we need to further validate the combined 2821 * properties are OK. 2822 * 2823 * If the configuration makes sense then we can allocate memory for 2824 * a circular OA buffer and apply the requested metric set configuration. 2825 * 2826 * Returns: zero on success or a negative error code. 2827 */ 2828 static int i915_oa_stream_init(struct i915_perf_stream *stream, 2829 struct drm_i915_perf_open_param *param, 2830 struct perf_open_properties *props) 2831 { 2832 struct i915_perf *perf = stream->perf; 2833 int format_size; 2834 int ret; 2835 2836 if (!props->engine) { 2837 DRM_DEBUG("OA engine not specified\n"); 2838 return -EINVAL; 2839 } 2840 2841 /* 2842 * If the sysfs metrics/ directory wasn't registered for some 2843 * reason then don't let userspace try their luck with config 2844 * IDs 2845 */ 2846 if (!perf->metrics_kobj) { 2847 DRM_DEBUG("OA metrics weren't advertised via sysfs\n"); 2848 return -EINVAL; 2849 } 2850 2851 if (!(props->sample_flags & SAMPLE_OA_REPORT) && 2852 (INTEL_GEN(perf->i915) < 12 || !stream->ctx)) { 2853 DRM_DEBUG("Only OA report sampling supported\n"); 2854 return -EINVAL; 2855 } 2856 2857 if (!perf->ops.enable_metric_set) { 2858 DRM_DEBUG("OA unit not supported\n"); 2859 return -ENODEV; 2860 } 2861 2862 /* 2863 * To avoid the complexity of having to accurately filter 2864 * counter reports and marshal to the appropriate client 2865 * we currently only allow exclusive access 2866 */ 2867 if (perf->exclusive_stream) { 2868 DRM_DEBUG("OA unit already in use\n"); 2869 return -EBUSY; 2870 } 2871 2872 if (!props->oa_format) { 2873 DRM_DEBUG("OA report format not specified\n"); 2874 return -EINVAL; 2875 } 2876 2877 stream->engine = props->engine; 2878 stream->uncore = stream->engine->gt->uncore; 2879 2880 stream->sample_size = sizeof(struct drm_i915_perf_record_header); 2881 2882 format_size = perf->oa_formats[props->oa_format].size; 2883 2884 stream->sample_flags = props->sample_flags; 2885 stream->sample_size += format_size; 2886 2887 stream->oa_buffer.format_size = format_size; 2888 if (WARN_ON(stream->oa_buffer.format_size == 0)) 2889 return -EINVAL; 2890 2891 stream->hold_preemption = props->hold_preemption; 2892 2893 stream->oa_buffer.format = 2894 perf->oa_formats[props->oa_format].format; 2895 2896 stream->periodic = props->oa_periodic; 2897 if (stream->periodic) 2898 stream->period_exponent = props->oa_period_exponent; 2899 2900 if (stream->ctx) { 2901 ret = oa_get_render_ctx_id(stream); 2902 if (ret) { 2903 DRM_DEBUG("Invalid context id to filter with\n"); 2904 return ret; 2905 } 2906 } 2907 2908 ret = alloc_noa_wait(stream); 2909 if (ret) { 2910 DRM_DEBUG("Unable to allocate NOA wait batch buffer\n"); 2911 goto err_noa_wait_alloc; 2912 } 2913 2914 stream->oa_config = i915_perf_get_oa_config(perf, props->metrics_set); 2915 if (!stream->oa_config) { 2916 DRM_DEBUG("Invalid OA config id=%i\n", props->metrics_set); 2917 ret = -EINVAL; 2918 goto err_config; 2919 } 2920 2921 /* PRM - observability performance counters: 2922 * 2923 * OACONTROL, performance counter enable, note: 2924 * 2925 * "When this bit is set, in order to have coherent counts, 2926 * RC6 power state and trunk clock gating must be disabled. 2927 * This can be achieved by programming MMIO registers as 2928 * 0xA094=0 and 0xA090[31]=1" 2929 * 2930 * In our case we are expecting that taking pm + FORCEWAKE 2931 * references will effectively disable RC6. 2932 */ 2933 intel_engine_pm_get(stream->engine); 2934 intel_uncore_forcewake_get(stream->uncore, FORCEWAKE_ALL); 2935 2936 ret = alloc_oa_buffer(stream); 2937 if (ret) 2938 goto err_oa_buf_alloc; 2939 2940 stream->ops = &i915_oa_stream_ops; 2941 perf->exclusive_stream = stream; 2942 2943 ret = perf->ops.enable_metric_set(stream); 2944 if (ret) { 2945 DRM_DEBUG("Unable to enable metric set\n"); 2946 goto err_enable; 2947 } 2948 2949 DRM_DEBUG("opening stream oa config uuid=%s\n", 2950 stream->oa_config->uuid); 2951 2952 hrtimer_init(&stream->poll_check_timer, 2953 CLOCK_MONOTONIC, HRTIMER_MODE_REL); 2954 stream->poll_check_timer.function = oa_poll_check_timer_cb; 2955 DRM_INIT_WAITQUEUE(&stream->poll_wq, "i915perf"); 2956 selinit(&stream->poll_selq); 2957 spin_lock_init(&stream->oa_buffer.ptr_lock); 2958 2959 return 0; 2960 2961 err_enable: 2962 perf->exclusive_stream = NULL; 2963 perf->ops.disable_metric_set(stream); 2964 2965 free_oa_buffer(stream); 2966 2967 err_oa_buf_alloc: 2968 free_oa_configs(stream); 2969 2970 intel_uncore_forcewake_put(stream->uncore, FORCEWAKE_ALL); 2971 intel_engine_pm_put(stream->engine); 2972 2973 err_config: 2974 free_noa_wait(stream); 2975 2976 err_noa_wait_alloc: 2977 if (stream->ctx) 2978 oa_put_render_ctx_id(stream); 2979 2980 return ret; 2981 } 2982 2983 void i915_oa_init_reg_state(const struct intel_context *ce, 2984 const struct intel_engine_cs *engine) 2985 { 2986 struct i915_perf_stream *stream; 2987 2988 /* perf.exclusive_stream serialised by lrc_configure_all_contexts() */ 2989 2990 if (engine->class != RENDER_CLASS) 2991 return; 2992 2993 stream = engine->i915->perf.exclusive_stream; 2994 /* 2995 * For gen12, only CTX_R_PWR_CLK_STATE needs update, but the caller 2996 * is already doing that, so nothing to be done for gen12 here. 2997 */ 2998 if (stream && INTEL_GEN(stream->perf->i915) < 12) 2999 gen8_update_reg_state_unlocked(ce, stream); 3000 } 3001 3002 /** 3003 * i915_perf_read_locked - &i915_perf_stream_ops->read with error normalisation 3004 * @stream: An i915 perf stream 3005 * @file: An i915 perf stream file 3006 * @buf: destination buffer given by userspace 3007 * @count: the number of bytes userspace wants to read 3008 * @ppos: (inout) file seek position (unused) 3009 * 3010 * Besides wrapping &i915_perf_stream_ops->read this provides a common place to 3011 * ensure that if we've successfully copied any data then reporting that takes 3012 * precedence over any internal error status, so the data isn't lost. 3013 * 3014 * For example ret will be -ENOSPC whenever there is more buffered data than 3015 * can be copied to userspace, but that's only interesting if we weren't able 3016 * to copy some data because it implies the userspace buffer is too small to 3017 * receive a single record (and we never split records). 3018 * 3019 * Another case with ret == -EFAULT is more of a grey area since it would seem 3020 * like bad form for userspace to ask us to overrun its buffer, but the user 3021 * knows best: 3022 * 3023 * http://yarchive.net/comp/linux/partial_reads_writes.html 3024 * 3025 * Returns: The number of bytes copied or a negative error code on failure. 3026 */ 3027 #ifdef __NetBSD__ 3028 static int i915_perf_read_locked(struct i915_perf_stream *stream, 3029 struct file *file, 3030 struct uio *buf, 3031 kauth_cred_t count, /* XXX dummy */ 3032 int ppos) /* XXX dummy */ 3033 { 3034 return stream->ops->read(stream, buf, count, ppos); 3035 } 3036 #else 3037 static ssize_t i915_perf_read_locked(struct i915_perf_stream *stream, 3038 struct file *file, 3039 char __user *buf, 3040 size_t count, 3041 loff_t *ppos) 3042 { 3043 /* Note we keep the offset (aka bytes read) separate from any 3044 * error status so that the final check for whether we return 3045 * the bytes read with a higher precedence than any error (see 3046 * comment below) doesn't need to be handled/duplicated in 3047 * stream->ops->read() implementations. 3048 */ 3049 size_t offset = 0; 3050 int ret = stream->ops->read(stream, buf, count, &offset); 3051 3052 return offset ?: (ret ?: -EAGAIN); 3053 } 3054 #endif 3055 3056 /** 3057 * i915_perf_read - handles read() FOP for i915 perf stream FDs 3058 * @file: An i915 perf stream file 3059 * @buf: destination buffer given by userspace 3060 * @count: the number of bytes userspace wants to read 3061 * @ppos: (inout) file seek position (unused) 3062 * 3063 * The entry point for handling a read() on a stream file descriptor from 3064 * userspace. Most of the work is left to the i915_perf_read_locked() and 3065 * &i915_perf_stream_ops->read but to save having stream implementations (of 3066 * which we might have multiple later) we handle blocking read here. 3067 * 3068 * We can also consistently treat trying to read from a disabled stream 3069 * as an IO error so implementations can assume the stream is enabled 3070 * while reading. 3071 * 3072 * Returns: The number of bytes copied or a negative error code on failure. 3073 */ 3074 #ifdef __NetBSD__ 3075 static int i915_perf_read(struct file *file, 3076 off_t *offset, 3077 struct uio *buf, 3078 kauth_cred_t count, /* XXX dummy */ 3079 int ppos) /* XXX dummy */ 3080 #else 3081 static ssize_t i915_perf_read(struct file *file, 3082 char __user *buf, 3083 size_t count, 3084 loff_t *ppos) 3085 #endif 3086 { 3087 #ifdef __NetBSD__ 3088 struct i915_perf_stream *stream = file->f_data; 3089 #else 3090 struct i915_perf_stream *stream = file->private_data; 3091 #endif 3092 struct i915_perf *perf = stream->perf; 3093 ssize_t ret; 3094 3095 /* To ensure it's handled consistently we simply treat all reads of a 3096 * disabled stream as an error. In particular it might otherwise lead 3097 * to a deadlock for blocking file descriptors... 3098 */ 3099 if (!stream->enabled) 3100 return -EIO; 3101 3102 #ifdef __NetBSD__ 3103 buf->uio_offset = *offset; 3104 if (!(file->f_flag & FNONBLOCK)) 3105 #else 3106 if (!(file->f_flags & O_NONBLOCK)) 3107 #endif 3108 { 3109 /* There's the small chance of false positives from 3110 * stream->ops->wait_unlocked. 3111 * 3112 * E.g. with single context filtering since we only wait until 3113 * oabuffer has >= 1 report we don't immediately know whether 3114 * any reports really belong to the current context 3115 */ 3116 do { 3117 ret = stream->ops->wait_unlocked(stream); 3118 if (ret) 3119 return ret; 3120 3121 mutex_lock(&perf->lock); 3122 ret = i915_perf_read_locked(stream, file, 3123 buf, count, ppos); 3124 mutex_unlock(&perf->lock); 3125 } while (ret == -EAGAIN); 3126 } else { 3127 mutex_lock(&perf->lock); 3128 ret = i915_perf_read_locked(stream, file, buf, count, ppos); 3129 mutex_unlock(&perf->lock); 3130 } 3131 3132 /* We allow the poll checking to sometimes report false positive EPOLLIN 3133 * events where we might actually report EAGAIN on read() if there's 3134 * not really any data available. In this situation though we don't 3135 * want to enter a busy loop between poll() reporting a EPOLLIN event 3136 * and read() returning -EAGAIN. Clearing the oa.pollin state here 3137 * effectively ensures we back off until the next hrtimer callback 3138 * before reporting another EPOLLIN event. 3139 */ 3140 if (ret >= 0 || ret == -EAGAIN) { 3141 /* Maybe make ->pollin per-stream state if we support multiple 3142 * concurrent streams in the future. 3143 */ 3144 stream->pollin = false; 3145 } 3146 3147 return ret; 3148 } 3149 3150 static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer) 3151 { 3152 struct i915_perf_stream *stream = 3153 container_of(hrtimer, typeof(*stream), poll_check_timer); 3154 unsigned long flags; 3155 3156 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); 3157 if (oa_buffer_check(stream)) { 3158 stream->pollin = true; 3159 DRM_SPIN_WAKEUP_ONE(&stream->poll_wq, 3160 &stream->oa_buffer.ptr_lock); 3161 selnotify(&stream->poll_selq, POLLIN|POLLRDNORM, NOTE_SUBMIT); 3162 } 3163 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); 3164 3165 hrtimer_forward_now(hrtimer, ns_to_ktime(POLL_PERIOD)); 3166 3167 return HRTIMER_RESTART; 3168 } 3169 3170 #ifdef __NetBSD__ 3171 3172 static int 3173 i915_perf_poll(struct file *fp, int events) 3174 { 3175 struct i915_perf_stream *stream = fp->f_data; 3176 unsigned long flags; 3177 int revents = 0; 3178 3179 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); 3180 if (stream->pollin) 3181 revents |= events & (POLLIN|POLLRDNORM); 3182 else 3183 selrecord(curlwp, &stream->poll_selq); 3184 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); 3185 3186 return revents; 3187 } 3188 3189 #else 3190 3191 /** 3192 * i915_perf_poll_locked - poll_wait() with a suitable wait queue for stream 3193 * @stream: An i915 perf stream 3194 * @file: An i915 perf stream file 3195 * @wait: poll() state table 3196 * 3197 * For handling userspace polling on an i915 perf stream, this calls through to 3198 * &i915_perf_stream_ops->poll_wait to call poll_wait() with a wait queue that 3199 * will be woken for new stream data. 3200 * 3201 * Note: The &perf->lock mutex has been taken to serialize 3202 * with any non-file-operation driver hooks. 3203 * 3204 * Returns: any poll events that are ready without sleeping 3205 */ 3206 static __poll_t i915_perf_poll_locked(struct i915_perf_stream *stream, 3207 struct file *file, 3208 poll_table *wait) 3209 { 3210 __poll_t events = 0; 3211 3212 stream->ops->poll_wait(stream, file, wait); 3213 3214 /* Note: we don't explicitly check whether there's something to read 3215 * here since this path may be very hot depending on what else 3216 * userspace is polling, or on the timeout in use. We rely solely on 3217 * the hrtimer/oa_poll_check_timer_cb to notify us when there are 3218 * samples to read. 3219 */ 3220 if (stream->pollin) 3221 events |= EPOLLIN; 3222 3223 return events; 3224 } 3225 3226 /** 3227 * i915_perf_poll - call poll_wait() with a suitable wait queue for stream 3228 * @file: An i915 perf stream file 3229 * @wait: poll() state table 3230 * 3231 * For handling userspace polling on an i915 perf stream, this ensures 3232 * poll_wait() gets called with a wait queue that will be woken for new stream 3233 * data. 3234 * 3235 * Note: Implementation deferred to i915_perf_poll_locked() 3236 * 3237 * Returns: any poll events that are ready without sleeping 3238 */ 3239 static __poll_t i915_perf_poll(struct file *file, poll_table *wait) 3240 { 3241 struct i915_perf_stream *stream = file->private_data; 3242 struct i915_perf *perf = stream->perf; 3243 __poll_t ret; 3244 3245 mutex_lock(&perf->lock); 3246 ret = i915_perf_poll_locked(stream, file, wait); 3247 mutex_unlock(&perf->lock); 3248 3249 return ret; 3250 } 3251 3252 #endif /* __NetBSD__ */ 3253 3254 /** 3255 * i915_perf_enable_locked - handle `I915_PERF_IOCTL_ENABLE` ioctl 3256 * @stream: A disabled i915 perf stream 3257 * 3258 * [Re]enables the associated capture of data for this stream. 3259 * 3260 * If a stream was previously enabled then there's currently no intention 3261 * to provide userspace any guarantee about the preservation of previously 3262 * buffered data. 3263 */ 3264 static void i915_perf_enable_locked(struct i915_perf_stream *stream) 3265 { 3266 if (stream->enabled) 3267 return; 3268 3269 /* Allow stream->ops->enable() to refer to this */ 3270 stream->enabled = true; 3271 3272 if (stream->ops->enable) 3273 stream->ops->enable(stream); 3274 3275 if (stream->hold_preemption) 3276 intel_context_set_nopreempt(stream->pinned_ctx); 3277 } 3278 3279 /** 3280 * i915_perf_disable_locked - handle `I915_PERF_IOCTL_DISABLE` ioctl 3281 * @stream: An enabled i915 perf stream 3282 * 3283 * Disables the associated capture of data for this stream. 3284 * 3285 * The intention is that disabling an re-enabling a stream will ideally be 3286 * cheaper than destroying and re-opening a stream with the same configuration, 3287 * though there are no formal guarantees about what state or buffered data 3288 * must be retained between disabling and re-enabling a stream. 3289 * 3290 * Note: while a stream is disabled it's considered an error for userspace 3291 * to attempt to read from the stream (-EIO). 3292 */ 3293 static void i915_perf_disable_locked(struct i915_perf_stream *stream) 3294 { 3295 if (!stream->enabled) 3296 return; 3297 3298 /* Allow stream->ops->disable() to refer to this */ 3299 stream->enabled = false; 3300 3301 if (stream->hold_preemption) 3302 intel_context_clear_nopreempt(stream->pinned_ctx); 3303 3304 if (stream->ops->disable) 3305 stream->ops->disable(stream); 3306 } 3307 3308 static long i915_perf_config_locked(struct i915_perf_stream *stream, 3309 unsigned long metrics_set) 3310 { 3311 struct i915_oa_config *config; 3312 long ret = stream->oa_config->id; 3313 3314 config = i915_perf_get_oa_config(stream->perf, metrics_set); 3315 if (!config) 3316 return -EINVAL; 3317 3318 if (config != stream->oa_config) { 3319 int err; 3320 3321 /* 3322 * If OA is bound to a specific context, emit the 3323 * reconfiguration inline from that context. The update 3324 * will then be ordered with respect to submission on that 3325 * context. 3326 * 3327 * When set globally, we use a low priority kernel context, 3328 * so it will effectively take effect when idle. 3329 */ 3330 err = emit_oa_config(stream, config, oa_context(stream)); 3331 if (err == 0) 3332 config = xchg(&stream->oa_config, config); 3333 else 3334 ret = err; 3335 } 3336 3337 i915_oa_config_put(config); 3338 3339 return ret; 3340 } 3341 3342 /** 3343 * i915_perf_ioctl - support ioctl() usage with i915 perf stream FDs 3344 * @stream: An i915 perf stream 3345 * @cmd: the ioctl request 3346 * @arg: the ioctl data 3347 * 3348 * Note: The &perf->lock mutex has been taken to serialize 3349 * with any non-file-operation driver hooks. 3350 * 3351 * Returns: zero on success or a negative error code. Returns -EINVAL for 3352 * an unknown ioctl request. 3353 */ 3354 static long i915_perf_ioctl_locked(struct i915_perf_stream *stream, 3355 unsigned int cmd, 3356 unsigned long arg) 3357 { 3358 switch (cmd) { 3359 case I915_PERF_IOCTL_ENABLE: 3360 i915_perf_enable_locked(stream); 3361 return 0; 3362 case I915_PERF_IOCTL_DISABLE: 3363 i915_perf_disable_locked(stream); 3364 return 0; 3365 case I915_PERF_IOCTL_CONFIG: 3366 return i915_perf_config_locked(stream, arg); 3367 } 3368 3369 return -EINVAL; 3370 } 3371 3372 /** 3373 * i915_perf_ioctl - support ioctl() usage with i915 perf stream FDs 3374 * @file: An i915 perf stream file 3375 * @cmd: the ioctl request 3376 * @arg: the ioctl data 3377 * 3378 * Implementation deferred to i915_perf_ioctl_locked(). 3379 * 3380 * Returns: zero on success or a negative error code. Returns -EINVAL for 3381 * an unknown ioctl request. 3382 */ 3383 #ifdef __NetBSD__ 3384 static int i915_perf_ioctl(struct file *file, 3385 unsigned long cmd, 3386 void *cookie) 3387 #else 3388 static long i915_perf_ioctl(struct file *file, 3389 unsigned int cmd, 3390 unsigned long arg) 3391 #endif 3392 { 3393 #ifdef __NetBSD__ 3394 unsigned long arg = (unsigned long)(uintptr_t)cookie; 3395 struct i915_perf_stream *stream = file->f_data; 3396 #else 3397 struct i915_perf_stream *stream = file->private_data; 3398 #endif 3399 struct i915_perf *perf = stream->perf; 3400 long ret; 3401 3402 mutex_lock(&perf->lock); 3403 ret = i915_perf_ioctl_locked(stream, cmd, arg); 3404 mutex_unlock(&perf->lock); 3405 3406 return ret; 3407 } 3408 3409 /** 3410 * i915_perf_destroy_locked - destroy an i915 perf stream 3411 * @stream: An i915 perf stream 3412 * 3413 * Frees all resources associated with the given i915 perf @stream, disabling 3414 * any associated data capture in the process. 3415 * 3416 * Note: The &perf->lock mutex has been taken to serialize 3417 * with any non-file-operation driver hooks. 3418 */ 3419 static void i915_perf_destroy_locked(struct i915_perf_stream *stream) 3420 { 3421 if (stream->enabled) 3422 i915_perf_disable_locked(stream); 3423 3424 if (stream->ops->destroy) 3425 stream->ops->destroy(stream); 3426 3427 if (stream->ctx) 3428 i915_gem_context_put(stream->ctx); 3429 3430 kfree(stream); 3431 } 3432 3433 /** 3434 * i915_perf_release - handles userspace close() of a stream file 3435 * @inode: anonymous inode associated with file 3436 * @file: An i915 perf stream file 3437 * 3438 * Cleans up any resources associated with an open i915 perf stream file. 3439 * 3440 * NB: close() can't really fail from the userspace point of view. 3441 * 3442 * Returns: zero on success or a negative error code. 3443 */ 3444 #ifdef __NetBSD__ 3445 static int i915_perf_close(struct file *fp) 3446 { 3447 struct i915_perf_stream *stream = fp->f_data; 3448 struct i915_perf *perf = stream->perf; 3449 3450 mutex_lock(&perf->lock); 3451 i915_perf_destroy_locked(stream); 3452 mutex_unlock(&perf->lock); 3453 3454 /* Release the reference the perf stream kept on the driver. */ 3455 drm_dev_put(&perf->i915->drm); 3456 3457 return 0; 3458 } 3459 #else 3460 static int i915_perf_release(struct inode *inode, struct file *file) 3461 { 3462 struct i915_perf_stream *stream = file->private_data; 3463 struct i915_perf *perf = stream->perf; 3464 3465 mutex_lock(&perf->lock); 3466 i915_perf_destroy_locked(stream); 3467 mutex_unlock(&perf->lock); 3468 3469 /* Release the reference the perf stream kept on the driver. */ 3470 drm_dev_put(&perf->i915->drm); 3471 3472 return 0; 3473 } 3474 #endif 3475 3476 3477 #ifdef __NetBSD__ 3478 static int 3479 i915_perf_stat(struct file *fp, struct stat *st) 3480 { 3481 const dev_t devno = 0; /* XXX */ 3482 3483 memset(st, 0, sizeof(*st)); 3484 3485 st->st_dev = devno; /* XXX */ 3486 st->st_ino = 0; /* XXX */ 3487 st->st_uid = kauth_cred_geteuid(fp->f_cred); 3488 st->st_gid = kauth_cred_getegid(fp->f_cred); 3489 st->st_mode = S_IFCHR; 3490 st->st_rdev = devno; 3491 3492 return 0; 3493 } 3494 3495 static const struct fileops fops = { 3496 .fo_name = "i915perf", 3497 .fo_read = i915_perf_read, 3498 .fo_write = fbadop_write, 3499 .fo_ioctl = i915_perf_ioctl, 3500 .fo_fcntl = fnullop_fcntl, 3501 .fo_poll = i915_perf_poll, 3502 .fo_stat = i915_perf_stat, 3503 .fo_close = i915_perf_close, 3504 .fo_kqfilter = fnullop_kqfilter, /* XXX */ 3505 .fo_restart = fnullop_restart, 3506 .fo_mmap = NULL, 3507 }; 3508 #else 3509 static const struct file_operations fops = { 3510 .owner = THIS_MODULE, 3511 .llseek = no_llseek, 3512 .release = i915_perf_release, 3513 .poll = i915_perf_poll, 3514 .read = i915_perf_read, 3515 .unlocked_ioctl = i915_perf_ioctl, 3516 /* Our ioctl have no arguments, so it's safe to use the same function 3517 * to handle 32bits compatibility. 3518 */ 3519 .compat_ioctl = i915_perf_ioctl, 3520 }; 3521 #endif 3522 3523 3524 /** 3525 * i915_perf_open_ioctl_locked - DRM ioctl() for userspace to open a stream FD 3526 * @perf: i915 perf instance 3527 * @param: The open parameters passed to 'DRM_I915_PERF_OPEN` 3528 * @props: individually validated u64 property value pairs 3529 * @file: drm file 3530 * 3531 * See i915_perf_ioctl_open() for interface details. 3532 * 3533 * Implements further stream config validation and stream initialization on 3534 * behalf of i915_perf_open_ioctl() with the &perf->lock mutex 3535 * taken to serialize with any non-file-operation driver hooks. 3536 * 3537 * Note: at this point the @props have only been validated in isolation and 3538 * it's still necessary to validate that the combination of properties makes 3539 * sense. 3540 * 3541 * In the case where userspace is interested in OA unit metrics then further 3542 * config validation and stream initialization details will be handled by 3543 * i915_oa_stream_init(). The code here should only validate config state that 3544 * will be relevant to all stream types / backends. 3545 * 3546 * Returns: zero on success or a negative error code. 3547 */ 3548 static int 3549 i915_perf_open_ioctl_locked(struct i915_perf *perf, 3550 struct drm_i915_perf_open_param *param, 3551 struct perf_open_properties *props, 3552 struct drm_file *file) 3553 { 3554 struct i915_gem_context *specific_ctx = NULL; 3555 struct i915_perf_stream *stream = NULL; 3556 unsigned long f_flags = 0; 3557 bool privileged_op = true; 3558 int stream_fd; 3559 int ret; 3560 3561 if (props->single_context) { 3562 u32 ctx_handle = props->ctx_handle; 3563 struct drm_i915_file_private *file_priv = file->driver_priv; 3564 3565 specific_ctx = i915_gem_context_lookup(file_priv, ctx_handle); 3566 if (!specific_ctx) { 3567 DRM_DEBUG("Failed to look up context with ID %u for opening perf stream\n", 3568 ctx_handle); 3569 ret = -ENOENT; 3570 goto err; 3571 } 3572 } 3573 3574 /* 3575 * On Haswell the OA unit supports clock gating off for a specific 3576 * context and in this mode there's no visibility of metrics for the 3577 * rest of the system, which we consider acceptable for a 3578 * non-privileged client. 3579 * 3580 * For Gen8->11 the OA unit no longer supports clock gating off for a 3581 * specific context and the kernel can't securely stop the counters 3582 * from updating as system-wide / global values. Even though we can 3583 * filter reports based on the included context ID we can't block 3584 * clients from seeing the raw / global counter values via 3585 * MI_REPORT_PERF_COUNT commands and so consider it a privileged op to 3586 * enable the OA unit by default. 3587 * 3588 * For Gen12+ we gain a new OAR unit that only monitors the RCS on a 3589 * per context basis. So we can relax requirements there if the user 3590 * doesn't request global stream access (i.e. query based sampling 3591 * using MI_RECORD_PERF_COUNT. 3592 */ 3593 if (IS_HASWELL(perf->i915) && specific_ctx) 3594 privileged_op = false; 3595 else if (IS_GEN(perf->i915, 12) && specific_ctx && 3596 (props->sample_flags & SAMPLE_OA_REPORT) == 0) 3597 privileged_op = false; 3598 3599 if (props->hold_preemption) { 3600 if (!props->single_context) { 3601 DRM_DEBUG("preemption disable with no context\n"); 3602 ret = -EINVAL; 3603 goto err; 3604 } 3605 privileged_op = true; 3606 } 3607 3608 /* Similar to perf's kernel.perf_paranoid_cpu sysctl option 3609 * we check a dev.i915.perf_stream_paranoid sysctl option 3610 * to determine if it's ok to access system wide OA counters 3611 * without CAP_SYS_ADMIN privileges. 3612 */ 3613 if (privileged_op && 3614 i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) { 3615 DRM_DEBUG("Insufficient privileges to open i915 perf stream\n"); 3616 ret = -EACCES; 3617 goto err_ctx; 3618 } 3619 3620 stream = kzalloc(sizeof(*stream), GFP_KERNEL); 3621 if (!stream) { 3622 ret = -ENOMEM; 3623 goto err_ctx; 3624 } 3625 3626 stream->perf = perf; 3627 stream->ctx = specific_ctx; 3628 3629 ret = i915_oa_stream_init(stream, param, props); 3630 if (ret) 3631 goto err_alloc; 3632 3633 /* we avoid simply assigning stream->sample_flags = props->sample_flags 3634 * to have _stream_init check the combination of sample flags more 3635 * thoroughly, but still this is the expected result at this point. 3636 */ 3637 if (WARN_ON(stream->sample_flags != props->sample_flags)) { 3638 ret = -ENODEV; 3639 goto err_flags; 3640 } 3641 3642 if (param->flags & I915_PERF_FLAG_FD_CLOEXEC) 3643 f_flags |= O_CLOEXEC; 3644 if (param->flags & I915_PERF_FLAG_FD_NONBLOCK) 3645 f_flags |= O_NONBLOCK; 3646 3647 #ifdef __NetBSD__ 3648 struct file *fp; 3649 3650 /* XXX errno NetBSD->Linux */ 3651 ret = -fd_allocfile(&fp, &stream_fd); 3652 if (ret) 3653 goto err_flags; 3654 3655 fp->f_type = DTYPE_MISC; 3656 fp->f_flag = FREAD; 3657 if (f_flags & O_NONBLOCK) 3658 fp->f_flag |= FNONBLOCK; 3659 if (f_flags & O_CLOEXEC) 3660 fd_set_exclose(curlwp, stream_fd, true); 3661 fp->f_ops = &fops; 3662 3663 fd_affix(curproc, fp, stream_fd); 3664 #else 3665 stream_fd = anon_inode_getfd("[i915_perf]", &fops, stream, f_flags); 3666 if (stream_fd < 0) { 3667 ret = stream_fd; 3668 goto err_flags; 3669 } 3670 #endif 3671 3672 if (!(param->flags & I915_PERF_FLAG_DISABLED)) 3673 i915_perf_enable_locked(stream); 3674 3675 /* Take a reference on the driver that will be kept with stream_fd 3676 * until its release. 3677 */ 3678 drm_dev_get(&perf->i915->drm); 3679 3680 return stream_fd; 3681 3682 err_flags: 3683 if (stream->ops->destroy) 3684 stream->ops->destroy(stream); 3685 err_alloc: 3686 kfree(stream); 3687 err_ctx: 3688 if (specific_ctx) 3689 i915_gem_context_put(specific_ctx); 3690 err: 3691 return ret; 3692 } 3693 3694 static u64 oa_exponent_to_ns(struct i915_perf *perf, int exponent) 3695 { 3696 return div64_u64(1000000000ULL * (2ULL << exponent), 3697 1000ULL * RUNTIME_INFO(perf->i915)->cs_timestamp_frequency_khz); 3698 } 3699 3700 /** 3701 * read_properties_unlocked - validate + copy userspace stream open properties 3702 * @perf: i915 perf instance 3703 * @uprops: The array of u64 key value pairs given by userspace 3704 * @n_props: The number of key value pairs expected in @uprops 3705 * @props: The stream configuration built up while validating properties 3706 * 3707 * Note this function only validates properties in isolation it doesn't 3708 * validate that the combination of properties makes sense or that all 3709 * properties necessary for a particular kind of stream have been set. 3710 * 3711 * Note that there currently aren't any ordering requirements for properties so 3712 * we shouldn't validate or assume anything about ordering here. This doesn't 3713 * rule out defining new properties with ordering requirements in the future. 3714 */ 3715 static int read_properties_unlocked(struct i915_perf *perf, 3716 u64 __user *uprops, 3717 u32 n_props, 3718 struct perf_open_properties *props) 3719 { 3720 u64 __user *uprop = uprops; 3721 u32 i; 3722 3723 memset(props, 0, sizeof(struct perf_open_properties)); 3724 3725 if (!n_props) { 3726 DRM_DEBUG("No i915 perf properties given\n"); 3727 return -EINVAL; 3728 } 3729 3730 /* At the moment we only support using i915-perf on the RCS. */ 3731 props->engine = intel_engine_lookup_user(perf->i915, 3732 I915_ENGINE_CLASS_RENDER, 3733 0); 3734 if (!props->engine) { 3735 DRM_DEBUG("No RENDER-capable engines\n"); 3736 return -EINVAL; 3737 } 3738 3739 /* Considering that ID = 0 is reserved and assuming that we don't 3740 * (currently) expect any configurations to ever specify duplicate 3741 * values for a particular property ID then the last _PROP_MAX value is 3742 * one greater than the maximum number of properties we expect to get 3743 * from userspace. 3744 */ 3745 if (n_props >= DRM_I915_PERF_PROP_MAX) { 3746 DRM_DEBUG("More i915 perf properties specified than exist\n"); 3747 return -EINVAL; 3748 } 3749 3750 for (i = 0; i < n_props; i++) { 3751 u64 oa_period, oa_freq_hz; 3752 u64 id, value; 3753 int ret; 3754 3755 ret = get_user(id, uprop); 3756 if (ret) 3757 return ret; 3758 3759 ret = get_user(value, uprop + 1); 3760 if (ret) 3761 return ret; 3762 3763 if (id == 0 || id >= DRM_I915_PERF_PROP_MAX) { 3764 DRM_DEBUG("Unknown i915 perf property ID\n"); 3765 return -EINVAL; 3766 } 3767 3768 switch ((enum drm_i915_perf_property_id)id) { 3769 case DRM_I915_PERF_PROP_CTX_HANDLE: 3770 props->single_context = 1; 3771 props->ctx_handle = value; 3772 break; 3773 case DRM_I915_PERF_PROP_SAMPLE_OA: 3774 if (value) 3775 props->sample_flags |= SAMPLE_OA_REPORT; 3776 break; 3777 case DRM_I915_PERF_PROP_OA_METRICS_SET: 3778 if (value == 0) { 3779 DRM_DEBUG("Unknown OA metric set ID\n"); 3780 return -EINVAL; 3781 } 3782 props->metrics_set = value; 3783 break; 3784 case DRM_I915_PERF_PROP_OA_FORMAT: 3785 if (value == 0 || value >= I915_OA_FORMAT_MAX) { 3786 DRM_DEBUG("Out-of-range OA report format %"PRIu64"\n", 3787 value); 3788 return -EINVAL; 3789 } 3790 if (!perf->oa_formats[value].size) { 3791 DRM_DEBUG("Unsupported OA report format %"PRIu64"\n", 3792 value); 3793 return -EINVAL; 3794 } 3795 props->oa_format = value; 3796 break; 3797 case DRM_I915_PERF_PROP_OA_EXPONENT: 3798 if (value > OA_EXPONENT_MAX) { 3799 DRM_DEBUG("OA timer exponent too high (> %u)\n", 3800 OA_EXPONENT_MAX); 3801 return -EINVAL; 3802 } 3803 3804 /* Theoretically we can program the OA unit to sample 3805 * e.g. every 160ns for HSW, 167ns for BDW/SKL or 104ns 3806 * for BXT. We don't allow such high sampling 3807 * frequencies by default unless root. 3808 */ 3809 3810 BUILD_BUG_ON(sizeof(oa_period) != 8); 3811 oa_period = oa_exponent_to_ns(perf, value); 3812 3813 /* This check is primarily to ensure that oa_period <= 3814 * UINT32_MAX (before passing to do_div which only 3815 * accepts a u32 denominator), but we can also skip 3816 * checking anything < 1Hz which implicitly can't be 3817 * limited via an integer oa_max_sample_rate. 3818 */ 3819 if (oa_period <= NSEC_PER_SEC) { 3820 u64 tmp = NSEC_PER_SEC; 3821 do_div(tmp, oa_period); 3822 oa_freq_hz = tmp; 3823 } else 3824 oa_freq_hz = 0; 3825 3826 if (oa_freq_hz > i915_oa_max_sample_rate && 3827 !capable(CAP_SYS_ADMIN)) { 3828 DRM_DEBUG("OA exponent would exceed the max sampling frequency (sysctl hw.drm2.i915.oa_max_sample_rate) %uHz without root privileges\n", 3829 i915_oa_max_sample_rate); 3830 return -EACCES; 3831 } 3832 3833 props->oa_periodic = true; 3834 props->oa_period_exponent = value; 3835 break; 3836 case DRM_I915_PERF_PROP_HOLD_PREEMPTION: 3837 props->hold_preemption = !!value; 3838 break; 3839 case DRM_I915_PERF_PROP_MAX: 3840 MISSING_CASE(id); 3841 return -EINVAL; 3842 } 3843 3844 uprop += 2; 3845 } 3846 3847 return 0; 3848 } 3849 3850 /** 3851 * i915_perf_open_ioctl - DRM ioctl() for userspace to open a stream FD 3852 * @dev: drm device 3853 * @data: ioctl data copied from userspace (unvalidated) 3854 * @file: drm file 3855 * 3856 * Validates the stream open parameters given by userspace including flags 3857 * and an array of u64 key, value pair properties. 3858 * 3859 * Very little is assumed up front about the nature of the stream being 3860 * opened (for instance we don't assume it's for periodic OA unit metrics). An 3861 * i915-perf stream is expected to be a suitable interface for other forms of 3862 * buffered data written by the GPU besides periodic OA metrics. 3863 * 3864 * Note we copy the properties from userspace outside of the i915 perf 3865 * mutex to avoid an awkward lockdep with mmap_sem. 3866 * 3867 * Most of the implementation details are handled by 3868 * i915_perf_open_ioctl_locked() after taking the &perf->lock 3869 * mutex for serializing with any non-file-operation driver hooks. 3870 * 3871 * Return: A newly opened i915 Perf stream file descriptor or negative 3872 * error code on failure. 3873 */ 3874 int i915_perf_open_ioctl(struct drm_device *dev, void *data, 3875 struct drm_file *file) 3876 { 3877 struct i915_perf *perf = &to_i915(dev)->perf; 3878 struct drm_i915_perf_open_param *param = data; 3879 struct perf_open_properties props; 3880 u32 known_open_flags; 3881 int ret; 3882 3883 if (!perf->i915) { 3884 DRM_DEBUG("i915 perf interface not available for this system\n"); 3885 return -ENOTSUPP; 3886 } 3887 3888 known_open_flags = I915_PERF_FLAG_FD_CLOEXEC | 3889 I915_PERF_FLAG_FD_NONBLOCK | 3890 I915_PERF_FLAG_DISABLED; 3891 if (param->flags & ~known_open_flags) { 3892 DRM_DEBUG("Unknown drm_i915_perf_open_param flag\n"); 3893 return -EINVAL; 3894 } 3895 3896 ret = read_properties_unlocked(perf, 3897 u64_to_user_ptr(param->properties_ptr), 3898 param->num_properties, 3899 &props); 3900 if (ret) 3901 return ret; 3902 3903 mutex_lock(&perf->lock); 3904 ret = i915_perf_open_ioctl_locked(perf, param, &props, file); 3905 mutex_unlock(&perf->lock); 3906 3907 return ret; 3908 } 3909 3910 /** 3911 * i915_perf_register - exposes i915-perf to userspace 3912 * @i915: i915 device instance 3913 * 3914 * In particular OA metric sets are advertised under a sysfs metrics/ 3915 * directory allowing userspace to enumerate valid IDs that can be 3916 * used to open an i915-perf stream. 3917 */ 3918 void i915_perf_register(struct drm_i915_private *i915) 3919 { 3920 #ifndef __NetBSD__ 3921 struct i915_perf *perf = &i915->perf; 3922 int ret; 3923 3924 if (!perf->i915) 3925 return; 3926 3927 /* To be sure we're synchronized with an attempted 3928 * i915_perf_open_ioctl(); considering that we register after 3929 * being exposed to userspace. 3930 */ 3931 mutex_lock(&perf->lock); 3932 3933 #ifndef __NetBSD__ 3934 perf->metrics_kobj = 3935 kobject_create_and_add("metrics", 3936 &i915->drm.primary->kdev->kobj); 3937 if (!perf->metrics_kobj) 3938 goto exit; 3939 3940 sysfs_attr_init(&perf->test_config.sysfs_metric_id.attr); 3941 #endif 3942 3943 if (IS_TIGERLAKE(i915)) { 3944 i915_perf_load_test_config_tgl(i915); 3945 } else if (INTEL_GEN(i915) >= 11) { 3946 i915_perf_load_test_config_icl(i915); 3947 } else if (IS_CANNONLAKE(i915)) { 3948 i915_perf_load_test_config_cnl(i915); 3949 } else if (IS_COFFEELAKE(i915)) { 3950 if (IS_CFL_GT2(i915)) 3951 i915_perf_load_test_config_cflgt2(i915); 3952 if (IS_CFL_GT3(i915)) 3953 i915_perf_load_test_config_cflgt3(i915); 3954 } else if (IS_GEMINILAKE(i915)) { 3955 i915_perf_load_test_config_glk(i915); 3956 } else if (IS_KABYLAKE(i915)) { 3957 if (IS_KBL_GT2(i915)) 3958 i915_perf_load_test_config_kblgt2(i915); 3959 else if (IS_KBL_GT3(i915)) 3960 i915_perf_load_test_config_kblgt3(i915); 3961 } else if (IS_BROXTON(i915)) { 3962 i915_perf_load_test_config_bxt(i915); 3963 } else if (IS_SKYLAKE(i915)) { 3964 if (IS_SKL_GT2(i915)) 3965 i915_perf_load_test_config_sklgt2(i915); 3966 else if (IS_SKL_GT3(i915)) 3967 i915_perf_load_test_config_sklgt3(i915); 3968 else if (IS_SKL_GT4(i915)) 3969 i915_perf_load_test_config_sklgt4(i915); 3970 } else if (IS_CHERRYVIEW(i915)) { 3971 i915_perf_load_test_config_chv(i915); 3972 } else if (IS_BROADWELL(i915)) { 3973 i915_perf_load_test_config_bdw(i915); 3974 } else if (IS_HASWELL(i915)) { 3975 i915_perf_load_test_config_hsw(i915); 3976 } 3977 3978 if (perf->test_config.id == 0) 3979 goto sysfs_error; 3980 3981 #ifdef __NetBSD__ /* XXX i915 sysfs */ 3982 __USE(ret); 3983 #else 3984 ret = sysfs_create_group(perf->metrics_kobj, 3985 &perf->test_config.sysfs_metric); 3986 if (ret) 3987 goto sysfs_error; 3988 #endif 3989 3990 perf->test_config.perf = perf; 3991 kref_init(&perf->test_config.ref); 3992 3993 goto exit; 3994 3995 sysfs_error: 3996 #ifndef __NetBSD__ 3997 kobject_put(perf->metrics_kobj); 3998 perf->metrics_kobj = NULL; 3999 #endif 4000 4001 exit: 4002 mutex_unlock(&perf->lock); 4003 #endif 4004 } 4005 4006 /** 4007 * i915_perf_unregister - hide i915-perf from userspace 4008 * @i915: i915 device instance 4009 * 4010 * i915-perf state cleanup is split up into an 'unregister' and 4011 * 'deinit' phase where the interface is first hidden from 4012 * userspace by i915_perf_unregister() before cleaning up 4013 * remaining state in i915_perf_fini(). 4014 */ 4015 void i915_perf_unregister(struct drm_i915_private *i915) 4016 { 4017 #ifndef __NetBSD__ 4018 struct i915_perf *perf = &i915->perf; 4019 4020 if (!perf->metrics_kobj) 4021 return; 4022 4023 sysfs_remove_group(perf->metrics_kobj, 4024 &perf->test_config.sysfs_metric); 4025 4026 kobject_put(perf->metrics_kobj); 4027 perf->metrics_kobj = NULL; 4028 #endif 4029 } 4030 4031 static bool gen8_is_valid_flex_addr(struct i915_perf *perf, u32 addr) 4032 { 4033 static const i915_reg_t flex_eu_regs[] = { 4034 EU_PERF_CNTL0, 4035 EU_PERF_CNTL1, 4036 EU_PERF_CNTL2, 4037 EU_PERF_CNTL3, 4038 EU_PERF_CNTL4, 4039 EU_PERF_CNTL5, 4040 EU_PERF_CNTL6, 4041 }; 4042 int i; 4043 4044 for (i = 0; i < ARRAY_SIZE(flex_eu_regs); i++) { 4045 if (i915_mmio_reg_offset(flex_eu_regs[i]) == addr) 4046 return true; 4047 } 4048 return false; 4049 } 4050 4051 #define ADDR_IN_RANGE(addr, start, end) \ 4052 ((addr) >= (start) && \ 4053 (addr) <= (end)) 4054 4055 #define REG_IN_RANGE(addr, start, end) \ 4056 ((addr) >= i915_mmio_reg_offset(start) && \ 4057 (addr) <= i915_mmio_reg_offset(end)) 4058 4059 #define REG_EQUAL(addr, mmio) \ 4060 ((addr) == i915_mmio_reg_offset(mmio)) 4061 4062 static bool gen7_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr) 4063 { 4064 return REG_IN_RANGE(addr, OASTARTTRIG1, OASTARTTRIG8) || 4065 REG_IN_RANGE(addr, OAREPORTTRIG1, OAREPORTTRIG8) || 4066 REG_IN_RANGE(addr, OACEC0_0, OACEC7_1); 4067 } 4068 4069 static bool gen7_is_valid_mux_addr(struct i915_perf *perf, u32 addr) 4070 { 4071 return REG_EQUAL(addr, HALF_SLICE_CHICKEN2) || 4072 REG_IN_RANGE(addr, MICRO_BP0_0, NOA_WRITE) || 4073 REG_IN_RANGE(addr, OA_PERFCNT1_LO, OA_PERFCNT2_HI) || 4074 REG_IN_RANGE(addr, OA_PERFMATRIX_LO, OA_PERFMATRIX_HI); 4075 } 4076 4077 static bool gen8_is_valid_mux_addr(struct i915_perf *perf, u32 addr) 4078 { 4079 return gen7_is_valid_mux_addr(perf, addr) || 4080 REG_EQUAL(addr, WAIT_FOR_RC6_EXIT) || 4081 REG_IN_RANGE(addr, RPM_CONFIG0, NOA_CONFIG(8)); 4082 } 4083 4084 static bool gen10_is_valid_mux_addr(struct i915_perf *perf, u32 addr) 4085 { 4086 return gen8_is_valid_mux_addr(perf, addr) || 4087 REG_EQUAL(addr, GEN10_NOA_WRITE_HIGH) || 4088 REG_IN_RANGE(addr, OA_PERFCNT3_LO, OA_PERFCNT4_HI); 4089 } 4090 4091 static bool hsw_is_valid_mux_addr(struct i915_perf *perf, u32 addr) 4092 { 4093 return gen7_is_valid_mux_addr(perf, addr) || 4094 ADDR_IN_RANGE(addr, 0x25100, 0x2FF90) || 4095 REG_IN_RANGE(addr, HSW_MBVID2_NOA0, HSW_MBVID2_NOA9) || 4096 REG_EQUAL(addr, HSW_MBVID2_MISR0); 4097 } 4098 4099 static bool chv_is_valid_mux_addr(struct i915_perf *perf, u32 addr) 4100 { 4101 return gen7_is_valid_mux_addr(perf, addr) || 4102 ADDR_IN_RANGE(addr, 0x182300, 0x1823A4); 4103 } 4104 4105 static bool gen12_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr) 4106 { 4107 return REG_IN_RANGE(addr, GEN12_OAG_OASTARTTRIG1, GEN12_OAG_OASTARTTRIG8) || 4108 REG_IN_RANGE(addr, GEN12_OAG_OAREPORTTRIG1, GEN12_OAG_OAREPORTTRIG8) || 4109 REG_IN_RANGE(addr, GEN12_OAG_CEC0_0, GEN12_OAG_CEC7_1) || 4110 REG_IN_RANGE(addr, GEN12_OAG_SCEC0_0, GEN12_OAG_SCEC7_1) || 4111 REG_EQUAL(addr, GEN12_OAA_DBG_REG) || 4112 REG_EQUAL(addr, GEN12_OAG_OA_PESS) || 4113 REG_EQUAL(addr, GEN12_OAG_SPCTR_CNF); 4114 } 4115 4116 static bool gen12_is_valid_mux_addr(struct i915_perf *perf, u32 addr) 4117 { 4118 return REG_EQUAL(addr, NOA_WRITE) || 4119 REG_EQUAL(addr, GEN10_NOA_WRITE_HIGH) || 4120 REG_EQUAL(addr, GDT_CHICKEN_BITS) || 4121 REG_EQUAL(addr, WAIT_FOR_RC6_EXIT) || 4122 REG_EQUAL(addr, RPM_CONFIG0) || 4123 REG_EQUAL(addr, RPM_CONFIG1) || 4124 REG_IN_RANGE(addr, NOA_CONFIG(0), NOA_CONFIG(8)); 4125 } 4126 4127 static u32 mask_reg_value(u32 reg, u32 val) 4128 { 4129 /* HALF_SLICE_CHICKEN2 is programmed with a the 4130 * WaDisableSTUnitPowerOptimization workaround. Make sure the value 4131 * programmed by userspace doesn't change this. 4132 */ 4133 if (REG_EQUAL(reg, HALF_SLICE_CHICKEN2)) 4134 val = val & ~_MASKED_BIT_ENABLE(GEN8_ST_PO_DISABLE); 4135 4136 /* WAIT_FOR_RC6_EXIT has only one bit fullfilling the function 4137 * indicated by its name and a bunch of selection fields used by OA 4138 * configs. 4139 */ 4140 if (REG_EQUAL(reg, WAIT_FOR_RC6_EXIT)) 4141 val = val & ~_MASKED_BIT_ENABLE(HSW_WAIT_FOR_RC6_EXIT_ENABLE); 4142 4143 return val; 4144 } 4145 4146 static struct i915_oa_reg *alloc_oa_regs(struct i915_perf *perf, 4147 bool (*is_valid)(struct i915_perf *perf, u32 addr), 4148 u32 __user *regs, 4149 u32 n_regs) 4150 { 4151 struct i915_oa_reg *oa_regs; 4152 int err; 4153 u32 i; 4154 4155 if (!n_regs) 4156 return NULL; 4157 4158 if (!access_ok(regs, n_regs * sizeof(u32) * 2)) 4159 return ERR_PTR(-EFAULT); 4160 4161 /* No is_valid function means we're not allowing any register to be programmed. */ 4162 GEM_BUG_ON(!is_valid); 4163 if (!is_valid) 4164 return ERR_PTR(-EINVAL); 4165 4166 oa_regs = kmalloc_array(n_regs, sizeof(*oa_regs), GFP_KERNEL); 4167 if (!oa_regs) 4168 return ERR_PTR(-ENOMEM); 4169 4170 for (i = 0; i < n_regs; i++) { 4171 u32 addr, value; 4172 4173 err = get_user(addr, regs); 4174 if (err) 4175 goto addr_err; 4176 4177 if (!is_valid(perf, addr)) { 4178 DRM_DEBUG("Invalid oa_reg address: %X\n", addr); 4179 err = -EINVAL; 4180 goto addr_err; 4181 } 4182 4183 err = get_user(value, regs + 1); 4184 if (err) 4185 goto addr_err; 4186 4187 oa_regs[i].addr = _MMIO(addr); 4188 oa_regs[i].value = mask_reg_value(addr, value); 4189 4190 regs += 2; 4191 } 4192 4193 return oa_regs; 4194 4195 addr_err: 4196 kfree(oa_regs); 4197 return ERR_PTR(err); 4198 } 4199 4200 #ifndef __NetBSD__ /* XXX i915 sysfs */ 4201 static ssize_t show_dynamic_id(struct device *dev, 4202 struct device_attribute *attr, 4203 char *buf) 4204 { 4205 struct i915_oa_config *oa_config = 4206 container_of(attr, typeof(*oa_config), sysfs_metric_id); 4207 4208 return sprintf(buf, "%d\n", oa_config->id); 4209 } 4210 #endif 4211 4212 static int create_dynamic_oa_sysfs_entry(struct i915_perf *perf, 4213 struct i915_oa_config *oa_config) 4214 { 4215 #ifdef __NetBSD__ /* XXX i915 sysfs */ 4216 return 0; 4217 #else 4218 sysfs_attr_init(&oa_config->sysfs_metric_id.attr); 4219 oa_config->sysfs_metric_id.attr.name = "id"; 4220 oa_config->sysfs_metric_id.attr.mode = S_IRUGO; 4221 oa_config->sysfs_metric_id.show = show_dynamic_id; 4222 oa_config->sysfs_metric_id.store = NULL; 4223 4224 oa_config->attrs[0] = &oa_config->sysfs_metric_id.attr; 4225 oa_config->attrs[1] = NULL; 4226 4227 oa_config->sysfs_metric.name = oa_config->uuid; 4228 oa_config->sysfs_metric.attrs = oa_config->attrs; 4229 4230 return sysfs_create_group(perf->metrics_kobj, 4231 &oa_config->sysfs_metric); 4232 #endif 4233 } 4234 4235 /** 4236 * i915_perf_add_config_ioctl - DRM ioctl() for userspace to add a new OA config 4237 * @dev: drm device 4238 * @data: ioctl data (pointer to struct drm_i915_perf_oa_config) copied from 4239 * userspace (unvalidated) 4240 * @file: drm file 4241 * 4242 * Validates the submitted OA register to be saved into a new OA config that 4243 * can then be used for programming the OA unit and its NOA network. 4244 * 4245 * Returns: A new allocated config number to be used with the perf open ioctl 4246 * or a negative error code on failure. 4247 */ 4248 int i915_perf_add_config_ioctl(struct drm_device *dev, void *data, 4249 struct drm_file *file) 4250 { 4251 struct i915_perf *perf = &to_i915(dev)->perf; 4252 struct drm_i915_perf_oa_config *args = data; 4253 struct i915_oa_config *oa_config, *tmp; 4254 struct i915_oa_reg *regs; 4255 int err, id; 4256 4257 if (!perf->i915) { 4258 DRM_DEBUG("i915 perf interface not available for this system\n"); 4259 return -ENOTSUPP; 4260 } 4261 4262 if (!perf->metrics_kobj) { 4263 DRM_DEBUG("OA metrics weren't advertised via sysfs\n"); 4264 return -EINVAL; 4265 } 4266 4267 if (i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) { 4268 DRM_DEBUG("Insufficient privileges to add i915 OA config\n"); 4269 return -EACCES; 4270 } 4271 4272 if ((!args->mux_regs_ptr || !args->n_mux_regs) && 4273 (!args->boolean_regs_ptr || !args->n_boolean_regs) && 4274 (!args->flex_regs_ptr || !args->n_flex_regs)) { 4275 DRM_DEBUG("No OA registers given\n"); 4276 return -EINVAL; 4277 } 4278 4279 oa_config = kzalloc(sizeof(*oa_config), GFP_KERNEL); 4280 if (!oa_config) { 4281 DRM_DEBUG("Failed to allocate memory for the OA config\n"); 4282 return -ENOMEM; 4283 } 4284 4285 oa_config->perf = perf; 4286 kref_init(&oa_config->ref); 4287 4288 if (!uuid_is_valid(args->uuid)) { 4289 DRM_DEBUG("Invalid uuid format for OA config\n"); 4290 err = -EINVAL; 4291 goto reg_err; 4292 } 4293 4294 /* Last character in oa_config->uuid will be 0 because oa_config is 4295 * kzalloc. 4296 */ 4297 memcpy(oa_config->uuid, args->uuid, sizeof(args->uuid)); 4298 4299 oa_config->mux_regs_len = args->n_mux_regs; 4300 regs = alloc_oa_regs(perf, 4301 perf->ops.is_valid_mux_reg, 4302 u64_to_user_ptr(args->mux_regs_ptr), 4303 args->n_mux_regs); 4304 4305 if (IS_ERR(regs)) { 4306 DRM_DEBUG("Failed to create OA config for mux_regs\n"); 4307 err = PTR_ERR(regs); 4308 goto reg_err; 4309 } 4310 oa_config->mux_regs = regs; 4311 4312 oa_config->b_counter_regs_len = args->n_boolean_regs; 4313 regs = alloc_oa_regs(perf, 4314 perf->ops.is_valid_b_counter_reg, 4315 u64_to_user_ptr(args->boolean_regs_ptr), 4316 args->n_boolean_regs); 4317 4318 if (IS_ERR(regs)) { 4319 DRM_DEBUG("Failed to create OA config for b_counter_regs\n"); 4320 err = PTR_ERR(regs); 4321 goto reg_err; 4322 } 4323 oa_config->b_counter_regs = regs; 4324 4325 if (INTEL_GEN(perf->i915) < 8) { 4326 if (args->n_flex_regs != 0) { 4327 err = -EINVAL; 4328 goto reg_err; 4329 } 4330 } else { 4331 oa_config->flex_regs_len = args->n_flex_regs; 4332 regs = alloc_oa_regs(perf, 4333 perf->ops.is_valid_flex_reg, 4334 u64_to_user_ptr(args->flex_regs_ptr), 4335 args->n_flex_regs); 4336 4337 if (IS_ERR(regs)) { 4338 DRM_DEBUG("Failed to create OA config for flex_regs\n"); 4339 err = PTR_ERR(regs); 4340 goto reg_err; 4341 } 4342 oa_config->flex_regs = regs; 4343 } 4344 4345 idr_preload(GFP_KERNEL); 4346 err = mutex_lock_interruptible(&perf->metrics_lock); 4347 if (err) 4348 goto reg_err; 4349 4350 /* We shouldn't have too many configs, so this iteration shouldn't be 4351 * too costly. 4352 */ 4353 idr_for_each_entry(&perf->metrics_idr, tmp, id) { 4354 if (!strcmp(tmp->uuid, oa_config->uuid)) { 4355 DRM_DEBUG("OA config already exists with this uuid\n"); 4356 err = -EADDRINUSE; 4357 goto sysfs_err; 4358 } 4359 } 4360 4361 err = create_dynamic_oa_sysfs_entry(perf, oa_config); 4362 if (err) { 4363 DRM_DEBUG("Failed to create sysfs entry for OA config\n"); 4364 goto sysfs_err; 4365 } 4366 4367 /* Config id 0 is invalid, id 1 for kernel stored test config. */ 4368 oa_config->id = idr_alloc(&perf->metrics_idr, 4369 oa_config, 2, 4370 0, GFP_KERNEL); 4371 if (oa_config->id < 0) { 4372 DRM_DEBUG("Failed to create sysfs entry for OA config\n"); 4373 err = oa_config->id; 4374 goto sysfs_err; 4375 } 4376 4377 mutex_unlock(&perf->metrics_lock); 4378 idr_preload_end(); 4379 4380 DRM_DEBUG("Added config %s id=%i\n", oa_config->uuid, oa_config->id); 4381 4382 return oa_config->id; 4383 4384 sysfs_err: 4385 mutex_unlock(&perf->metrics_lock); 4386 idr_preload_end(); 4387 reg_err: 4388 i915_oa_config_put(oa_config); 4389 DRM_DEBUG("Failed to add new OA config\n"); 4390 return err; 4391 } 4392 4393 /** 4394 * i915_perf_remove_config_ioctl - DRM ioctl() for userspace to remove an OA config 4395 * @dev: drm device 4396 * @data: ioctl data (pointer to u64 integer) copied from userspace 4397 * @file: drm file 4398 * 4399 * Configs can be removed while being used, the will stop appearing in sysfs 4400 * and their content will be freed when the stream using the config is closed. 4401 * 4402 * Returns: 0 on success or a negative error code on failure. 4403 */ 4404 int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data, 4405 struct drm_file *file) 4406 { 4407 struct i915_perf *perf = &to_i915(dev)->perf; 4408 u64 *arg = data; 4409 struct i915_oa_config *oa_config; 4410 int ret; 4411 4412 if (!perf->i915) { 4413 DRM_DEBUG("i915 perf interface not available for this system\n"); 4414 return -ENOTSUPP; 4415 } 4416 4417 if (i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) { 4418 DRM_DEBUG("Insufficient privileges to remove i915 OA config\n"); 4419 return -EACCES; 4420 } 4421 4422 ret = mutex_lock_interruptible(&perf->metrics_lock); 4423 if (ret) 4424 return ret; 4425 4426 oa_config = idr_find(&perf->metrics_idr, *arg); 4427 if (!oa_config) { 4428 DRM_DEBUG("Failed to remove unknown OA config\n"); 4429 ret = -ENOENT; 4430 goto err_unlock; 4431 } 4432 4433 GEM_BUG_ON(*arg != oa_config->id); 4434 4435 #ifndef __NetBSD__ 4436 sysfs_remove_group(perf->metrics_kobj, &oa_config->sysfs_metric); 4437 #endif 4438 4439 idr_remove(&perf->metrics_idr, *arg); 4440 4441 mutex_unlock(&perf->metrics_lock); 4442 4443 DRM_DEBUG("Removed config %s id=%i\n", oa_config->uuid, oa_config->id); 4444 4445 i915_oa_config_put(oa_config); 4446 4447 return 0; 4448 4449 err_unlock: 4450 mutex_unlock(&perf->metrics_lock); 4451 return ret; 4452 } 4453 4454 #ifndef __NetBSD__ /* XXX i915 perf sysctl */ 4455 4456 static struct ctl_table oa_table[] = { 4457 { 4458 .procname = "perf_stream_paranoid", 4459 .data = &i915_perf_stream_paranoid, 4460 .maxlen = sizeof(i915_perf_stream_paranoid), 4461 .mode = 0644, 4462 .proc_handler = proc_dointvec_minmax, 4463 .extra1 = SYSCTL_ZERO, 4464 .extra2 = SYSCTL_ONE, 4465 }, 4466 { 4467 .procname = "oa_max_sample_rate", 4468 .data = &i915_oa_max_sample_rate, 4469 .maxlen = sizeof(i915_oa_max_sample_rate), 4470 .mode = 0644, 4471 .proc_handler = proc_dointvec_minmax, 4472 .extra1 = SYSCTL_ZERO, 4473 .extra2 = &oa_sample_rate_hard_limit, 4474 }, 4475 {} 4476 }; 4477 4478 static struct ctl_table i915_root[] = { 4479 { 4480 .procname = "i915", 4481 .maxlen = 0, 4482 .mode = 0555, 4483 .child = oa_table, 4484 }, 4485 {} 4486 }; 4487 4488 static struct ctl_table dev_root[] = { 4489 { 4490 .procname = "dev", 4491 .maxlen = 0, 4492 .mode = 0555, 4493 .child = i915_root, 4494 }, 4495 {} 4496 }; 4497 4498 #endif /* __NetBSD__ */ 4499 4500 /** 4501 * i915_perf_init - initialize i915-perf state on module bind 4502 * @i915: i915 device instance 4503 * 4504 * Initializes i915-perf state without exposing anything to userspace. 4505 * 4506 * Note: i915-perf initialization is split into an 'init' and 'register' 4507 * phase with the i915_perf_register() exposing state to userspace. 4508 */ 4509 void i915_perf_init(struct drm_i915_private *i915) 4510 { 4511 struct i915_perf *perf = &i915->perf; 4512 4513 /* XXX const struct i915_perf_ops! */ 4514 4515 if (IS_HASWELL(i915)) { 4516 perf->ops.is_valid_b_counter_reg = gen7_is_valid_b_counter_addr; 4517 perf->ops.is_valid_mux_reg = hsw_is_valid_mux_addr; 4518 perf->ops.is_valid_flex_reg = NULL; 4519 perf->ops.enable_metric_set = hsw_enable_metric_set; 4520 perf->ops.disable_metric_set = hsw_disable_metric_set; 4521 perf->ops.oa_enable = gen7_oa_enable; 4522 perf->ops.oa_disable = gen7_oa_disable; 4523 perf->ops.read = gen7_oa_read; 4524 perf->ops.oa_hw_tail_read = gen7_oa_hw_tail_read; 4525 4526 perf->oa_formats = hsw_oa_formats; 4527 } else if (HAS_LOGICAL_RING_CONTEXTS(i915)) { 4528 /* Note: that although we could theoretically also support the 4529 * legacy ringbuffer mode on BDW (and earlier iterations of 4530 * this driver, before upstreaming did this) it didn't seem 4531 * worth the complexity to maintain now that BDW+ enable 4532 * execlist mode by default. 4533 */ 4534 perf->ops.read = gen8_oa_read; 4535 4536 if (IS_GEN_RANGE(i915, 8, 9)) { 4537 perf->oa_formats = gen8_plus_oa_formats; 4538 4539 perf->ops.is_valid_b_counter_reg = 4540 gen7_is_valid_b_counter_addr; 4541 perf->ops.is_valid_mux_reg = 4542 gen8_is_valid_mux_addr; 4543 perf->ops.is_valid_flex_reg = 4544 gen8_is_valid_flex_addr; 4545 4546 if (IS_CHERRYVIEW(i915)) { 4547 perf->ops.is_valid_mux_reg = 4548 chv_is_valid_mux_addr; 4549 } 4550 4551 perf->ops.oa_enable = gen8_oa_enable; 4552 perf->ops.oa_disable = gen8_oa_disable; 4553 perf->ops.enable_metric_set = gen8_enable_metric_set; 4554 perf->ops.disable_metric_set = gen8_disable_metric_set; 4555 perf->ops.oa_hw_tail_read = gen8_oa_hw_tail_read; 4556 4557 if (IS_GEN(i915, 8)) { 4558 perf->ctx_oactxctrl_offset = 0x120; 4559 perf->ctx_flexeu0_offset = 0x2ce; 4560 4561 perf->gen8_valid_ctx_bit = BIT(25); 4562 } else { 4563 perf->ctx_oactxctrl_offset = 0x128; 4564 perf->ctx_flexeu0_offset = 0x3de; 4565 4566 perf->gen8_valid_ctx_bit = BIT(16); 4567 } 4568 } else if (IS_GEN_RANGE(i915, 10, 11)) { 4569 perf->oa_formats = gen8_plus_oa_formats; 4570 4571 perf->ops.is_valid_b_counter_reg = 4572 gen7_is_valid_b_counter_addr; 4573 perf->ops.is_valid_mux_reg = 4574 gen10_is_valid_mux_addr; 4575 perf->ops.is_valid_flex_reg = 4576 gen8_is_valid_flex_addr; 4577 4578 perf->ops.oa_enable = gen8_oa_enable; 4579 perf->ops.oa_disable = gen8_oa_disable; 4580 perf->ops.enable_metric_set = gen8_enable_metric_set; 4581 perf->ops.disable_metric_set = gen10_disable_metric_set; 4582 perf->ops.oa_hw_tail_read = gen8_oa_hw_tail_read; 4583 4584 if (IS_GEN(i915, 10)) { 4585 perf->ctx_oactxctrl_offset = 0x128; 4586 perf->ctx_flexeu0_offset = 0x3de; 4587 } else { 4588 perf->ctx_oactxctrl_offset = 0x124; 4589 perf->ctx_flexeu0_offset = 0x78e; 4590 } 4591 perf->gen8_valid_ctx_bit = BIT(16); 4592 } else if (IS_GEN(i915, 12)) { 4593 perf->oa_formats = gen12_oa_formats; 4594 4595 perf->ops.is_valid_b_counter_reg = 4596 gen12_is_valid_b_counter_addr; 4597 perf->ops.is_valid_mux_reg = 4598 gen12_is_valid_mux_addr; 4599 perf->ops.is_valid_flex_reg = 4600 gen8_is_valid_flex_addr; 4601 4602 perf->ops.oa_enable = gen12_oa_enable; 4603 perf->ops.oa_disable = gen12_oa_disable; 4604 perf->ops.enable_metric_set = gen12_enable_metric_set; 4605 perf->ops.disable_metric_set = gen12_disable_metric_set; 4606 perf->ops.oa_hw_tail_read = gen12_oa_hw_tail_read; 4607 4608 perf->ctx_flexeu0_offset = 0; 4609 perf->ctx_oactxctrl_offset = 0x144; 4610 } 4611 } 4612 4613 if (perf->ops.enable_metric_set) { 4614 mutex_init(&perf->lock); 4615 4616 oa_sample_rate_hard_limit = 1000 * 4617 (RUNTIME_INFO(i915)->cs_timestamp_frequency_khz / 2); 4618 4619 mutex_init(&perf->metrics_lock); 4620 idr_init(&perf->metrics_idr); 4621 4622 /* We set up some ratelimit state to potentially throttle any 4623 * _NOTES about spurious, invalid OA reports which we don't 4624 * forward to userspace. 4625 * 4626 * We print a _NOTE about any throttling when closing the 4627 * stream instead of waiting until driver _fini which no one 4628 * would ever see. 4629 * 4630 * Using the same limiting factors as printk_ratelimit() 4631 */ 4632 ratelimit_state_init(&perf->spurious_report_rs, 5 * HZ, 10); 4633 /* Since we use a DRM_NOTE for spurious reports it would be 4634 * inconsistent to let __ratelimit() automatically print a 4635 * warning for throttling. 4636 */ 4637 ratelimit_set_flags(&perf->spurious_report_rs, 4638 RATELIMIT_MSG_ON_RELEASE); 4639 4640 atomic64_set(&perf->noa_programming_delay, 4641 500 * 1000 /* 500us */); 4642 4643 perf->i915 = i915; 4644 } 4645 } 4646 4647 static int destroy_config(int id, void *p, void *data) 4648 { 4649 i915_oa_config_put(p); 4650 return 0; 4651 } 4652 4653 void i915_perf_sysctl_register(void) 4654 { 4655 #ifndef __NetBSD__ /* XXX i915 perf sysctl */ 4656 sysctl_header = register_sysctl_table(dev_root); 4657 #endif 4658 } 4659 4660 void i915_perf_sysctl_unregister(void) 4661 { 4662 #ifndef __NetBSD__ /* XXX i915 perf sysctl */ 4663 unregister_sysctl_table(sysctl_header); 4664 #endif 4665 } 4666 4667 /** 4668 * i915_perf_fini - Counter part to i915_perf_init() 4669 * @i915: i915 device instance 4670 */ 4671 void i915_perf_fini(struct drm_i915_private *i915) 4672 { 4673 struct i915_perf *perf = &i915->perf; 4674 4675 if (!perf->i915) 4676 return; 4677 4678 if (perf->ops.enable_metric_set) { 4679 mutex_destroy(&perf->metrics_lock); 4680 mutex_destroy(&perf->lock); 4681 } 4682 4683 idr_for_each(&perf->metrics_idr, destroy_config, perf); 4684 idr_destroy(&perf->metrics_idr); 4685 4686 memset(&perf->ops, 0, sizeof(perf->ops)); 4687 perf->i915 = NULL; 4688 } 4689 4690 /** 4691 * i915_perf_ioctl_version - Version of the i915-perf subsystem 4692 * 4693 * This version number is used by userspace to detect available features. 4694 */ 4695 int i915_perf_ioctl_version(void) 4696 { 4697 /* 4698 * 1: Initial version 4699 * I915_PERF_IOCTL_ENABLE 4700 * I915_PERF_IOCTL_DISABLE 4701 * 4702 * 2: Added runtime modification of OA config. 4703 * I915_PERF_IOCTL_CONFIG 4704 * 4705 * 3: Add DRM_I915_PERF_PROP_HOLD_PREEMPTION parameter to hold 4706 * preemption on a particular context so that performance data is 4707 * accessible from a delta of MI_RPC reports without looking at the 4708 * OA buffer. 4709 */ 4710 return 3; 4711 } 4712 4713 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 4714 #include "selftests/i915_perf.c" 4715 #endif 4716