1 /*
2 * Copyright © 2015-2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Robert Bragg <robert@sixbynine.org>
25 */
26
27
28 /**
29 * DOC: i915 Perf Overview
30 *
31 * Gen graphics supports a large number of performance counters that can help
32 * driver and application developers understand and optimize their use of the
33 * GPU.
34 *
35 * This i915 perf interface enables userspace to configure and open a file
36 * descriptor representing a stream of GPU metrics which can then be read() as
37 * a stream of sample records.
38 *
39 * The interface is particularly suited to exposing buffered metrics that are
40 * captured by DMA from the GPU, unsynchronized with and unrelated to the CPU.
41 *
42 * Streams representing a single context are accessible to applications with a
43 * corresponding drm file descriptor, such that OpenGL can use the interface
44 * without special privileges. Access to system-wide metrics requires root
45 * privileges by default, unless changed via the dev.i915.perf_event_paranoid
46 * sysctl option.
47 *
48 */
49
50 /**
51 * DOC: i915 Perf History and Comparison with Core Perf
52 *
53 * The interface was initially inspired by the core Perf infrastructure but
54 * some notable differences are:
55 *
56 * i915 perf file descriptors represent a "stream" instead of an "event"; where
57 * a perf event primarily corresponds to a single 64bit value, while a stream
58 * might sample sets of tightly-coupled counters, depending on the
59 * configuration. For example the Gen OA unit isn't designed to support
60 * orthogonal configurations of individual counters; it's configured for a set
61 * of related counters. Samples for an i915 perf stream capturing OA metrics
62 * will include a set of counter values packed in a compact HW specific format.
63 * The OA unit supports a number of different packing formats which can be
64 * selected by the user opening the stream. Perf has support for grouping
65 * events, but each event in the group is configured, validated and
66 * authenticated individually with separate system calls.
67 *
68 * i915 perf stream configurations are provided as an array of u64 (key,value)
69 * pairs, instead of a fixed struct with multiple miscellaneous config members,
70 * interleaved with event-type specific members.
71 *
72 * i915 perf doesn't support exposing metrics via an mmap'd circular buffer.
73 * The supported metrics are being written to memory by the GPU unsynchronized
74 * with the CPU, using HW specific packing formats for counter sets. Sometimes
75 * the constraints on HW configuration require reports to be filtered before it
76 * would be acceptable to expose them to unprivileged applications - to hide
77 * the metrics of other processes/contexts. For these use cases a read() based
78 * interface is a good fit, and provides an opportunity to filter data as it
79 * gets copied from the GPU mapped buffers to userspace buffers.
80 *
81 *
82 * Issues hit with first prototype based on Core Perf
83 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
84 *
85 * The first prototype of this driver was based on the core perf
86 * infrastructure, and while we did make that mostly work, with some changes to
87 * perf, we found we were breaking or working around too many assumptions baked
88 * into perf's currently cpu centric design.
89 *
90 * In the end we didn't see a clear benefit to making perf's implementation and
91 * interface more complex by changing design assumptions while we knew we still
92 * wouldn't be able to use any existing perf based userspace tools.
93 *
94 * Also considering the Gen specific nature of the Observability hardware and
95 * how userspace will sometimes need to combine i915 perf OA metrics with
96 * side-band OA data captured via MI_REPORT_PERF_COUNT commands; we're
97 * expecting the interface to be used by a platform specific userspace such as
98 * OpenGL or tools. This is to say; we aren't inherently missing out on having
99 * a standard vendor/architecture agnostic interface by not using perf.
100 *
101 *
102 * For posterity, in case we might re-visit trying to adapt core perf to be
103 * better suited to exposing i915 metrics these were the main pain points we
104 * hit:
105 *
106 * - The perf based OA PMU driver broke some significant design assumptions:
107 *
108 * Existing perf pmus are used for profiling work on a cpu and we were
109 * introducing the idea of _IS_DEVICE pmus with different security
110 * implications, the need to fake cpu-related data (such as user/kernel
111 * registers) to fit with perf's current design, and adding _DEVICE records
112 * as a way to forward device-specific status records.
113 *
114 * The OA unit writes reports of counters into a circular buffer, without
115 * involvement from the CPU, making our PMU driver the first of a kind.
116 *
117 * Given the way we were periodically forward data from the GPU-mapped, OA
118 * buffer to perf's buffer, those bursts of sample writes looked to perf like
119 * we were sampling too fast and so we had to subvert its throttling checks.
120 *
121 * Perf supports groups of counters and allows those to be read via
122 * transactions internally but transactions currently seem designed to be
123 * explicitly initiated from the cpu (say in response to a userspace read())
124 * and while we could pull a report out of the OA buffer we can't
125 * trigger a report from the cpu on demand.
126 *
127 * Related to being report based; the OA counters are configured in HW as a
128 * set while perf generally expects counter configurations to be orthogonal.
129 * Although counters can be associated with a group leader as they are
130 * opened, there's no clear precedent for being able to provide group-wide
131 * configuration attributes (for example we want to let userspace choose the
132 * OA unit report format used to capture all counters in a set, or specify a
133 * GPU context to filter metrics on). We avoided using perf's grouping
134 * feature and forwarded OA reports to userspace via perf's 'raw' sample
135 * field. This suited our userspace well considering how coupled the counters
136 * are when dealing with normalizing. It would be inconvenient to split
137 * counters up into separate events, only to require userspace to recombine
138 * them. For Mesa it's also convenient to be forwarded raw, periodic reports
139 * for combining with the side-band raw reports it captures using
140 * MI_REPORT_PERF_COUNT commands.
141 *
142 * - As a side note on perf's grouping feature; there was also some concern
143 * that using PERF_FORMAT_GROUP as a way to pack together counter values
144 * would quite drastically inflate our sample sizes, which would likely
145 * lower the effective sampling resolutions we could use when the available
146 * memory bandwidth is limited.
147 *
148 * With the OA unit's report formats, counters are packed together as 32
149 * or 40bit values, with the largest report size being 256 bytes.
150 *
151 * PERF_FORMAT_GROUP values are 64bit, but there doesn't appear to be a
152 * documented ordering to the values, implying PERF_FORMAT_ID must also be
153 * used to add a 64bit ID before each value; giving 16 bytes per counter.
154 *
155 * Related to counter orthogonality; we can't time share the OA unit, while
156 * event scheduling is a central design idea within perf for allowing
157 * userspace to open + enable more events than can be configured in HW at any
158 * one time. The OA unit is not designed to allow re-configuration while in
159 * use. We can't reconfigure the OA unit without losing internal OA unit
160 * state which we can't access explicitly to save and restore. Reconfiguring
161 * the OA unit is also relatively slow, involving ~100 register writes. From
162 * userspace Mesa also depends on a stable OA configuration when emitting
163 * MI_REPORT_PERF_COUNT commands and importantly the OA unit can't be
164 * disabled while there are outstanding MI_RPC commands lest we hang the
165 * command streamer.
166 *
167 * The contents of sample records aren't extensible by device drivers (i.e.
168 * the sample_type bits). As an example; Sourab Gupta had been looking to
169 * attach GPU timestamps to our OA samples. We were shoehorning OA reports
170 * into sample records by using the 'raw' field, but it's tricky to pack more
171 * than one thing into this field because events/core.c currently only lets a
172 * pmu give a single raw data pointer plus len which will be copied into the
173 * ring buffer. To include more than the OA report we'd have to copy the
174 * report into an intermediate larger buffer. I'd been considering allowing a
175 * vector of data+len values to be specified for copying the raw data, but
176 * it felt like a kludge to being using the raw field for this purpose.
177 *
178 * - It felt like our perf based PMU was making some technical compromises
179 * just for the sake of using perf:
180 *
181 * perf_event_open() requires events to either relate to a pid or a specific
182 * cpu core, while our device pmu related to neither. Events opened with a
183 * pid will be automatically enabled/disabled according to the scheduling of
184 * that process - so not appropriate for us. When an event is related to a
185 * cpu id, perf ensures pmu methods will be invoked via an inter process
186 * interrupt on that core. To avoid invasive changes our userspace opened OA
187 * perf events for a specific cpu. This was workable but it meant the
188 * majority of the OA driver ran in atomic context, including all OA report
189 * forwarding, which wasn't really necessary in our case and seems to make
190 * our locking requirements somewhat complex as we handled the interaction
191 * with the rest of the i915 driver.
192 */
193
194 #include <linux/anon_inodes.h>
195 #include <linux/sizes.h>
196 #include <linux/uuid.h>
197
198 #include "gem/i915_gem_context.h"
199 #include "gem/i915_gem_internal.h"
200 #include "gt/intel_engine_pm.h"
201 #include "gt/intel_engine_regs.h"
202 #include "gt/intel_engine_user.h"
203 #include "gt/intel_execlists_submission.h"
204 #include "gt/intel_gpu_commands.h"
205 #include "gt/intel_gt.h"
206 #include "gt/intel_gt_clock_utils.h"
207 #include "gt/intel_gt_regs.h"
208 #include "gt/intel_lrc.h"
209 #include "gt/intel_lrc_reg.h"
210 #include "gt/intel_ring.h"
211
212 #include "i915_drv.h"
213 #include "i915_file_private.h"
214 #include "i915_perf.h"
215 #include "i915_perf_oa_regs.h"
216
217 /* HW requires this to be a power of two, between 128k and 16M, though driver
218 * is currently generally designed assuming the largest 16M size is used such
219 * that the overflow cases are unlikely in normal operation.
220 */
221 #define OA_BUFFER_SIZE SZ_16M
222
223 #define OA_TAKEN(tail, head) ((tail - head) & (OA_BUFFER_SIZE - 1))
224
225 /**
226 * DOC: OA Tail Pointer Race
227 *
228 * There's a HW race condition between OA unit tail pointer register updates and
229 * writes to memory whereby the tail pointer can sometimes get ahead of what's
230 * been written out to the OA buffer so far (in terms of what's visible to the
231 * CPU).
232 *
233 * Although this can be observed explicitly while copying reports to userspace
234 * by checking for a zeroed report-id field in tail reports, we want to account
235 * for this earlier, as part of the oa_buffer_check_unlocked to avoid lots of
236 * redundant read() attempts.
237 *
238 * We workaround this issue in oa_buffer_check_unlocked() by reading the reports
239 * in the OA buffer, starting from the tail reported by the HW until we find a
240 * report with its first 2 dwords not 0 meaning its previous report is
241 * completely in memory and ready to be read. Those dwords are also set to 0
242 * once read and the whole buffer is cleared upon OA buffer initialization. The
243 * first dword is the reason for this report while the second is the timestamp,
244 * making the chances of having those 2 fields at 0 fairly unlikely. A more
245 * detailed explanation is available in oa_buffer_check_unlocked().
246 *
247 * Most of the implementation details for this workaround are in
248 * oa_buffer_check_unlocked() and _append_oa_reports()
249 *
250 * Note for posterity: previously the driver used to define an effective tail
251 * pointer that lagged the real pointer by a 'tail margin' measured in bytes
252 * derived from %OA_TAIL_MARGIN_NSEC and the configured sampling frequency.
253 * This was flawed considering that the OA unit may also automatically generate
254 * non-periodic reports (such as on context switch) or the OA unit may be
255 * enabled without any periodic sampling.
256 */
257 #define OA_TAIL_MARGIN_NSEC 100000ULL
258 #define INVALID_TAIL_PTR 0xffffffff
259
260 /* The default frequency for checking whether the OA unit has written new
261 * reports to the circular OA buffer...
262 */
263 #define DEFAULT_POLL_FREQUENCY_HZ 200
264 #define DEFAULT_POLL_PERIOD_NS (NSEC_PER_SEC / DEFAULT_POLL_FREQUENCY_HZ)
265
266 /* for sysctl proc_dointvec_minmax of dev.i915.perf_stream_paranoid */
267 static u32 i915_perf_stream_paranoid = true;
268
269 /* The maximum exponent the hardware accepts is 63 (essentially it selects one
270 * of the 64bit timestamp bits to trigger reports from) but there's currently
271 * no known use case for sampling as infrequently as once per 47 thousand years.
272 *
273 * Since the timestamps included in OA reports are only 32bits it seems
274 * reasonable to limit the OA exponent where it's still possible to account for
275 * overflow in OA report timestamps.
276 */
277 #define OA_EXPONENT_MAX 31
278
279 #define INVALID_CTX_ID 0xffffffff
280
281 /* On Gen8+ automatically triggered OA reports include a 'reason' field... */
282 #define OAREPORT_REASON_MASK 0x3f
283 #define OAREPORT_REASON_MASK_EXTENDED 0x7f
284 #define OAREPORT_REASON_SHIFT 19
285 #define OAREPORT_REASON_TIMER (1<<0)
286 #define OAREPORT_REASON_CTX_SWITCH (1<<3)
287 #define OAREPORT_REASON_CLK_RATIO (1<<5)
288
289
290 /* For sysctl proc_dointvec_minmax of i915_oa_max_sample_rate
291 *
292 * The highest sampling frequency we can theoretically program the OA unit
293 * with is always half the timestamp frequency: E.g. 6.25Mhz for Haswell.
294 *
295 * Initialized just before we register the sysctl parameter.
296 */
297 static int oa_sample_rate_hard_limit;
298
299 /* Theoretically we can program the OA unit to sample every 160ns but don't
300 * allow that by default unless root...
301 *
302 * The default threshold of 100000Hz is based on perf's similar
303 * kernel.perf_event_max_sample_rate sysctl parameter.
304 */
305 static u32 i915_oa_max_sample_rate = 100000;
306
307 /* XXX: beware if future OA HW adds new report formats that the current
308 * code assumes all reports have a power-of-two size and ~(size - 1) can
309 * be used as a mask to align the OA tail pointer.
310 */
311 static const struct i915_oa_format oa_formats[I915_OA_FORMAT_MAX] = {
312 [I915_OA_FORMAT_A13] = { 0, 64 },
313 [I915_OA_FORMAT_A29] = { 1, 128 },
314 [I915_OA_FORMAT_A13_B8_C8] = { 2, 128 },
315 /* A29_B8_C8 Disallowed as 192 bytes doesn't factor into buffer size */
316 [I915_OA_FORMAT_B4_C8] = { 4, 64 },
317 [I915_OA_FORMAT_A45_B8_C8] = { 5, 256 },
318 [I915_OA_FORMAT_B4_C8_A16] = { 6, 128 },
319 [I915_OA_FORMAT_C4_B8] = { 7, 64 },
320 [I915_OA_FORMAT_A12] = { 0, 64 },
321 [I915_OA_FORMAT_A12_B8_C8] = { 2, 128 },
322 [I915_OA_FORMAT_A32u40_A4u32_B8_C8] = { 5, 256 },
323 };
324
325 #define SAMPLE_OA_REPORT (1<<0)
326
327 /**
328 * struct perf_open_properties - for validated properties given to open a stream
329 * @sample_flags: `DRM_I915_PERF_PROP_SAMPLE_*` properties are tracked as flags
330 * @single_context: Whether a single or all gpu contexts should be monitored
331 * @hold_preemption: Whether the preemption is disabled for the filtered
332 * context
333 * @ctx_handle: A gem ctx handle for use with @single_context
334 * @metrics_set: An ID for an OA unit metric set advertised via sysfs
335 * @oa_format: An OA unit HW report format
336 * @oa_periodic: Whether to enable periodic OA unit sampling
337 * @oa_period_exponent: The OA unit sampling period is derived from this
338 * @engine: The engine (typically rcs0) being monitored by the OA unit
339 * @has_sseu: Whether @sseu was specified by userspace
340 * @sseu: internal SSEU configuration computed either from the userspace
341 * specified configuration in the opening parameters or a default value
342 * (see get_default_sseu_config())
343 * @poll_oa_period: The period in nanoseconds at which the CPU will check for OA
344 * data availability
345 *
346 * As read_properties_unlocked() enumerates and validates the properties given
347 * to open a stream of metrics the configuration is built up in the structure
348 * which starts out zero initialized.
349 */
350 struct perf_open_properties {
351 u32 sample_flags;
352
353 u64 single_context:1;
354 u64 hold_preemption:1;
355 u64 ctx_handle;
356
357 /* OA sampling state */
358 int metrics_set;
359 int oa_format;
360 bool oa_periodic;
361 int oa_period_exponent;
362
363 struct intel_engine_cs *engine;
364
365 bool has_sseu;
366 struct intel_sseu sseu;
367
368 u64 poll_oa_period;
369 };
370
371 struct i915_oa_config_bo {
372 struct llist_node node;
373
374 struct i915_oa_config *oa_config;
375 struct i915_vma *vma;
376 };
377
378 static struct ctl_table_header *sysctl_header;
379
380 static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer);
381
i915_oa_config_release(struct kref * ref)382 void i915_oa_config_release(struct kref *ref)
383 {
384 struct i915_oa_config *oa_config =
385 container_of(ref, typeof(*oa_config), ref);
386
387 kfree(oa_config->flex_regs);
388 kfree(oa_config->b_counter_regs);
389 kfree(oa_config->mux_regs);
390
391 kfree_rcu(oa_config, rcu);
392 }
393
394 struct i915_oa_config *
i915_perf_get_oa_config(struct i915_perf * perf,int metrics_set)395 i915_perf_get_oa_config(struct i915_perf *perf, int metrics_set)
396 {
397 struct i915_oa_config *oa_config;
398
399 rcu_read_lock();
400 oa_config = idr_find(&perf->metrics_idr, metrics_set);
401 if (oa_config)
402 oa_config = i915_oa_config_get(oa_config);
403 rcu_read_unlock();
404
405 return oa_config;
406 }
407
free_oa_config_bo(struct i915_oa_config_bo * oa_bo)408 static void free_oa_config_bo(struct i915_oa_config_bo *oa_bo)
409 {
410 i915_oa_config_put(oa_bo->oa_config);
411 i915_vma_put(oa_bo->vma);
412 kfree(oa_bo);
413 }
414
gen12_oa_hw_tail_read(struct i915_perf_stream * stream)415 static u32 gen12_oa_hw_tail_read(struct i915_perf_stream *stream)
416 {
417 struct intel_uncore *uncore = stream->uncore;
418
419 return intel_uncore_read(uncore, GEN12_OAG_OATAILPTR) &
420 GEN12_OAG_OATAILPTR_MASK;
421 }
422
gen8_oa_hw_tail_read(struct i915_perf_stream * stream)423 static u32 gen8_oa_hw_tail_read(struct i915_perf_stream *stream)
424 {
425 struct intel_uncore *uncore = stream->uncore;
426
427 return intel_uncore_read(uncore, GEN8_OATAILPTR) & GEN8_OATAILPTR_MASK;
428 }
429
gen7_oa_hw_tail_read(struct i915_perf_stream * stream)430 static u32 gen7_oa_hw_tail_read(struct i915_perf_stream *stream)
431 {
432 struct intel_uncore *uncore = stream->uncore;
433 u32 oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1);
434
435 return oastatus1 & GEN7_OASTATUS1_TAIL_MASK;
436 }
437
438 /**
439 * oa_buffer_check_unlocked - check for data and update tail ptr state
440 * @stream: i915 stream instance
441 *
442 * This is either called via fops (for blocking reads in user ctx) or the poll
443 * check hrtimer (atomic ctx) to check the OA buffer tail pointer and check
444 * if there is data available for userspace to read.
445 *
446 * This function is central to providing a workaround for the OA unit tail
447 * pointer having a race with respect to what data is visible to the CPU.
448 * It is responsible for reading tail pointers from the hardware and giving
449 * the pointers time to 'age' before they are made available for reading.
450 * (See description of OA_TAIL_MARGIN_NSEC above for further details.)
451 *
452 * Besides returning true when there is data available to read() this function
453 * also updates the tail, aging_tail and aging_timestamp in the oa_buffer
454 * object.
455 *
456 * Note: It's safe to read OA config state here unlocked, assuming that this is
457 * only called while the stream is enabled, while the global OA configuration
458 * can't be modified.
459 *
460 * Returns: %true if the OA buffer contains data, else %false
461 */
oa_buffer_check_unlocked(struct i915_perf_stream * stream)462 static bool oa_buffer_check_unlocked(struct i915_perf_stream *stream)
463 {
464 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
465 int report_size = stream->oa_buffer.format_size;
466 unsigned long flags;
467 bool pollin;
468 u32 hw_tail;
469 u64 now;
470
471 /* We have to consider the (unlikely) possibility that read() errors
472 * could result in an OA buffer reset which might reset the head and
473 * tail state.
474 */
475 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
476
477 hw_tail = stream->perf->ops.oa_hw_tail_read(stream);
478
479 /* The tail pointer increases in 64 byte increments,
480 * not in report_size steps...
481 */
482 hw_tail &= ~(report_size - 1);
483
484 now = ktime_get_mono_fast_ns();
485
486 if (hw_tail == stream->oa_buffer.aging_tail &&
487 (now - stream->oa_buffer.aging_timestamp) > OA_TAIL_MARGIN_NSEC) {
488 /* If the HW tail hasn't move since the last check and the HW
489 * tail has been aging for long enough, declare it the new
490 * tail.
491 */
492 stream->oa_buffer.tail = stream->oa_buffer.aging_tail;
493 } else {
494 u32 head, tail, aged_tail;
495
496 /* NB: The head we observe here might effectively be a little
497 * out of date. If a read() is in progress, the head could be
498 * anywhere between this head and stream->oa_buffer.tail.
499 */
500 head = stream->oa_buffer.head - gtt_offset;
501 aged_tail = stream->oa_buffer.tail - gtt_offset;
502
503 hw_tail -= gtt_offset;
504 tail = hw_tail;
505
506 /* Walk the stream backward until we find a report with dword 0
507 * & 1 not at 0. Since the circular buffer pointers progress by
508 * increments of 64 bytes and that reports can be up to 256
509 * bytes long, we can't tell whether a report has fully landed
510 * in memory before the first 2 dwords of the following report
511 * have effectively landed.
512 *
513 * This is assuming that the writes of the OA unit land in
514 * memory in the order they were written to.
515 * If not : (╯°□°)╯︵ ┻━┻
516 */
517 while (OA_TAKEN(tail, aged_tail) >= report_size) {
518 u32 *report32 = (void *)(stream->oa_buffer.vaddr + tail);
519
520 if (report32[0] != 0 || report32[1] != 0)
521 break;
522
523 tail = (tail - report_size) & (OA_BUFFER_SIZE - 1);
524 }
525
526 if (OA_TAKEN(hw_tail, tail) > report_size &&
527 __ratelimit(&stream->perf->tail_pointer_race))
528 DRM_NOTE("unlanded report(s) head=0x%x "
529 "tail=0x%x hw_tail=0x%x\n",
530 head, tail, hw_tail);
531
532 stream->oa_buffer.tail = gtt_offset + tail;
533 stream->oa_buffer.aging_tail = gtt_offset + hw_tail;
534 stream->oa_buffer.aging_timestamp = now;
535 }
536
537 pollin = OA_TAKEN(stream->oa_buffer.tail - gtt_offset,
538 stream->oa_buffer.head - gtt_offset) >= report_size;
539
540 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
541
542 return pollin;
543 }
544
545 /**
546 * append_oa_status - Appends a status record to a userspace read() buffer.
547 * @stream: An i915-perf stream opened for OA metrics
548 * @buf: destination buffer given by userspace
549 * @count: the number of bytes userspace wants to read
550 * @offset: (inout): the current position for writing into @buf
551 * @type: The kind of status to report to userspace
552 *
553 * Writes a status record (such as `DRM_I915_PERF_RECORD_OA_REPORT_LOST`)
554 * into the userspace read() buffer.
555 *
556 * The @buf @offset will only be updated on success.
557 *
558 * Returns: 0 on success, negative error code on failure.
559 */
append_oa_status(struct i915_perf_stream * stream,char __user * buf,size_t count,size_t * offset,enum drm_i915_perf_record_type type)560 static int append_oa_status(struct i915_perf_stream *stream,
561 char __user *buf,
562 size_t count,
563 size_t *offset,
564 enum drm_i915_perf_record_type type)
565 {
566 struct drm_i915_perf_record_header header = { type, 0, sizeof(header) };
567
568 if ((count - *offset) < header.size)
569 return -ENOSPC;
570
571 if (copy_to_user(buf + *offset, &header, sizeof(header)))
572 return -EFAULT;
573
574 (*offset) += header.size;
575
576 return 0;
577 }
578
579 /**
580 * append_oa_sample - Copies single OA report into userspace read() buffer.
581 * @stream: An i915-perf stream opened for OA metrics
582 * @buf: destination buffer given by userspace
583 * @count: the number of bytes userspace wants to read
584 * @offset: (inout): the current position for writing into @buf
585 * @report: A single OA report to (optionally) include as part of the sample
586 *
587 * The contents of a sample are configured through `DRM_I915_PERF_PROP_SAMPLE_*`
588 * properties when opening a stream, tracked as `stream->sample_flags`. This
589 * function copies the requested components of a single sample to the given
590 * read() @buf.
591 *
592 * The @buf @offset will only be updated on success.
593 *
594 * Returns: 0 on success, negative error code on failure.
595 */
append_oa_sample(struct i915_perf_stream * stream,char __user * buf,size_t count,size_t * offset,const u8 * report)596 static int append_oa_sample(struct i915_perf_stream *stream,
597 char __user *buf,
598 size_t count,
599 size_t *offset,
600 const u8 *report)
601 {
602 int report_size = stream->oa_buffer.format_size;
603 struct drm_i915_perf_record_header header;
604
605 header.type = DRM_I915_PERF_RECORD_SAMPLE;
606 header.pad = 0;
607 header.size = stream->sample_size;
608
609 if ((count - *offset) < header.size)
610 return -ENOSPC;
611
612 buf += *offset;
613 if (copy_to_user(buf, &header, sizeof(header)))
614 return -EFAULT;
615 buf += sizeof(header);
616
617 if (copy_to_user(buf, report, report_size))
618 return -EFAULT;
619
620 (*offset) += header.size;
621
622 return 0;
623 }
624
625 /**
626 * gen8_append_oa_reports - Copies all buffered OA reports into
627 * userspace read() buffer.
628 * @stream: An i915-perf stream opened for OA metrics
629 * @buf: destination buffer given by userspace
630 * @count: the number of bytes userspace wants to read
631 * @offset: (inout): the current position for writing into @buf
632 *
633 * Notably any error condition resulting in a short read (-%ENOSPC or
634 * -%EFAULT) will be returned even though one or more records may
635 * have been successfully copied. In this case it's up to the caller
636 * to decide if the error should be squashed before returning to
637 * userspace.
638 *
639 * Note: reports are consumed from the head, and appended to the
640 * tail, so the tail chases the head?... If you think that's mad
641 * and back-to-front you're not alone, but this follows the
642 * Gen PRM naming convention.
643 *
644 * Returns: 0 on success, negative error code on failure.
645 */
gen8_append_oa_reports(struct i915_perf_stream * stream,char __user * buf,size_t count,size_t * offset)646 static int gen8_append_oa_reports(struct i915_perf_stream *stream,
647 char __user *buf,
648 size_t count,
649 size_t *offset)
650 {
651 struct intel_uncore *uncore = stream->uncore;
652 int report_size = stream->oa_buffer.format_size;
653 u8 *oa_buf_base = stream->oa_buffer.vaddr;
654 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
655 u32 mask = (OA_BUFFER_SIZE - 1);
656 size_t start_offset = *offset;
657 unsigned long flags;
658 u32 head, tail;
659 u32 taken;
660 int ret = 0;
661
662 if (drm_WARN_ON(&uncore->i915->drm, !stream->enabled))
663 return -EIO;
664
665 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
666
667 head = stream->oa_buffer.head;
668 tail = stream->oa_buffer.tail;
669
670 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
671
672 /*
673 * NB: oa_buffer.head/tail include the gtt_offset which we don't want
674 * while indexing relative to oa_buf_base.
675 */
676 head -= gtt_offset;
677 tail -= gtt_offset;
678
679 /*
680 * An out of bounds or misaligned head or tail pointer implies a driver
681 * bug since we validate + align the tail pointers we read from the
682 * hardware and we are in full control of the head pointer which should
683 * only be incremented by multiples of the report size (notably also
684 * all a power of two).
685 */
686 if (drm_WARN_ONCE(&uncore->i915->drm,
687 head > OA_BUFFER_SIZE || head % report_size ||
688 tail > OA_BUFFER_SIZE || tail % report_size,
689 "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
690 head, tail))
691 return -EIO;
692
693
694 for (/* none */;
695 (taken = OA_TAKEN(tail, head));
696 head = (head + report_size) & mask) {
697 u8 *report = oa_buf_base + head;
698 u32 *report32 = (void *)report;
699 u32 ctx_id;
700 u32 reason;
701
702 /*
703 * All the report sizes factor neatly into the buffer
704 * size so we never expect to see a report split
705 * between the beginning and end of the buffer.
706 *
707 * Given the initial alignment check a misalignment
708 * here would imply a driver bug that would result
709 * in an overrun.
710 */
711 if (drm_WARN_ON(&uncore->i915->drm,
712 (OA_BUFFER_SIZE - head) < report_size)) {
713 drm_err(&uncore->i915->drm,
714 "Spurious OA head ptr: non-integral report offset\n");
715 break;
716 }
717
718 /*
719 * The reason field includes flags identifying what
720 * triggered this specific report (mostly timer
721 * triggered or e.g. due to a context switch).
722 *
723 * This field is never expected to be zero so we can
724 * check that the report isn't invalid before copying
725 * it to userspace...
726 */
727 reason = ((report32[0] >> OAREPORT_REASON_SHIFT) &
728 (GRAPHICS_VER(stream->perf->i915) == 12 ?
729 OAREPORT_REASON_MASK_EXTENDED :
730 OAREPORT_REASON_MASK));
731
732 ctx_id = report32[2] & stream->specific_ctx_id_mask;
733
734 /*
735 * Squash whatever is in the CTX_ID field if it's marked as
736 * invalid to be sure we avoid false-positive, single-context
737 * filtering below...
738 *
739 * Note: that we don't clear the valid_ctx_bit so userspace can
740 * understand that the ID has been squashed by the kernel.
741 */
742 if (!(report32[0] & stream->perf->gen8_valid_ctx_bit) &&
743 GRAPHICS_VER(stream->perf->i915) <= 11)
744 ctx_id = report32[2] = INVALID_CTX_ID;
745
746 /*
747 * NB: For Gen 8 the OA unit no longer supports clock gating
748 * off for a specific context and the kernel can't securely
749 * stop the counters from updating as system-wide / global
750 * values.
751 *
752 * Automatic reports now include a context ID so reports can be
753 * filtered on the cpu but it's not worth trying to
754 * automatically subtract/hide counter progress for other
755 * contexts while filtering since we can't stop userspace
756 * issuing MI_REPORT_PERF_COUNT commands which would still
757 * provide a side-band view of the real values.
758 *
759 * To allow userspace (such as Mesa/GL_INTEL_performance_query)
760 * to normalize counters for a single filtered context then it
761 * needs be forwarded bookend context-switch reports so that it
762 * can track switches in between MI_REPORT_PERF_COUNT commands
763 * and can itself subtract/ignore the progress of counters
764 * associated with other contexts. Note that the hardware
765 * automatically triggers reports when switching to a new
766 * context which are tagged with the ID of the newly active
767 * context. To avoid the complexity (and likely fragility) of
768 * reading ahead while parsing reports to try and minimize
769 * forwarding redundant context switch reports (i.e. between
770 * other, unrelated contexts) we simply elect to forward them
771 * all.
772 *
773 * We don't rely solely on the reason field to identify context
774 * switches since it's not-uncommon for periodic samples to
775 * identify a switch before any 'context switch' report.
776 */
777 if (!stream->perf->exclusive_stream->ctx ||
778 stream->specific_ctx_id == ctx_id ||
779 stream->oa_buffer.last_ctx_id == stream->specific_ctx_id ||
780 reason & OAREPORT_REASON_CTX_SWITCH) {
781
782 /*
783 * While filtering for a single context we avoid
784 * leaking the IDs of other contexts.
785 */
786 if (stream->perf->exclusive_stream->ctx &&
787 stream->specific_ctx_id != ctx_id) {
788 report32[2] = INVALID_CTX_ID;
789 }
790
791 ret = append_oa_sample(stream, buf, count, offset,
792 report);
793 if (ret)
794 break;
795
796 stream->oa_buffer.last_ctx_id = ctx_id;
797 }
798
799 /*
800 * Clear out the first 2 dword as a mean to detect unlanded
801 * reports.
802 */
803 report32[0] = 0;
804 report32[1] = 0;
805 }
806
807 if (start_offset != *offset) {
808 i915_reg_t oaheadptr;
809
810 oaheadptr = GRAPHICS_VER(stream->perf->i915) == 12 ?
811 GEN12_OAG_OAHEADPTR : GEN8_OAHEADPTR;
812
813 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
814
815 /*
816 * We removed the gtt_offset for the copy loop above, indexing
817 * relative to oa_buf_base so put back here...
818 */
819 head += gtt_offset;
820 intel_uncore_write(uncore, oaheadptr,
821 head & GEN12_OAG_OAHEADPTR_MASK);
822 stream->oa_buffer.head = head;
823
824 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
825 }
826
827 return ret;
828 }
829
830 /**
831 * gen8_oa_read - copy status records then buffered OA reports
832 * @stream: An i915-perf stream opened for OA metrics
833 * @buf: destination buffer given by userspace
834 * @count: the number of bytes userspace wants to read
835 * @offset: (inout): the current position for writing into @buf
836 *
837 * Checks OA unit status registers and if necessary appends corresponding
838 * status records for userspace (such as for a buffer full condition) and then
839 * initiate appending any buffered OA reports.
840 *
841 * Updates @offset according to the number of bytes successfully copied into
842 * the userspace buffer.
843 *
844 * NB: some data may be successfully copied to the userspace buffer
845 * even if an error is returned, and this is reflected in the
846 * updated @offset.
847 *
848 * Returns: zero on success or a negative error code
849 */
gen8_oa_read(struct i915_perf_stream * stream,char __user * buf,size_t count,size_t * offset)850 static int gen8_oa_read(struct i915_perf_stream *stream,
851 char __user *buf,
852 size_t count,
853 size_t *offset)
854 {
855 struct intel_uncore *uncore = stream->uncore;
856 u32 oastatus;
857 i915_reg_t oastatus_reg;
858 int ret;
859
860 if (drm_WARN_ON(&uncore->i915->drm, !stream->oa_buffer.vaddr))
861 return -EIO;
862
863 oastatus_reg = GRAPHICS_VER(stream->perf->i915) == 12 ?
864 GEN12_OAG_OASTATUS : GEN8_OASTATUS;
865
866 oastatus = intel_uncore_read(uncore, oastatus_reg);
867
868 /*
869 * We treat OABUFFER_OVERFLOW as a significant error:
870 *
871 * Although theoretically we could handle this more gracefully
872 * sometimes, some Gens don't correctly suppress certain
873 * automatically triggered reports in this condition and so we
874 * have to assume that old reports are now being trampled
875 * over.
876 *
877 * Considering how we don't currently give userspace control
878 * over the OA buffer size and always configure a large 16MB
879 * buffer, then a buffer overflow does anyway likely indicate
880 * that something has gone quite badly wrong.
881 */
882 if (oastatus & GEN8_OASTATUS_OABUFFER_OVERFLOW) {
883 ret = append_oa_status(stream, buf, count, offset,
884 DRM_I915_PERF_RECORD_OA_BUFFER_LOST);
885 if (ret)
886 return ret;
887
888 DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n",
889 stream->period_exponent);
890
891 stream->perf->ops.oa_disable(stream);
892 stream->perf->ops.oa_enable(stream);
893
894 /*
895 * Note: .oa_enable() is expected to re-init the oabuffer and
896 * reset GEN8_OASTATUS for us
897 */
898 oastatus = intel_uncore_read(uncore, oastatus_reg);
899 }
900
901 if (oastatus & GEN8_OASTATUS_REPORT_LOST) {
902 ret = append_oa_status(stream, buf, count, offset,
903 DRM_I915_PERF_RECORD_OA_REPORT_LOST);
904 if (ret)
905 return ret;
906
907 intel_uncore_rmw(uncore, oastatus_reg,
908 GEN8_OASTATUS_COUNTER_OVERFLOW |
909 GEN8_OASTATUS_REPORT_LOST,
910 IS_GRAPHICS_VER(uncore->i915, 8, 11) ?
911 (GEN8_OASTATUS_HEAD_POINTER_WRAP |
912 GEN8_OASTATUS_TAIL_POINTER_WRAP) : 0);
913 }
914
915 return gen8_append_oa_reports(stream, buf, count, offset);
916 }
917
918 /**
919 * gen7_append_oa_reports - Copies all buffered OA reports into
920 * userspace read() buffer.
921 * @stream: An i915-perf stream opened for OA metrics
922 * @buf: destination buffer given by userspace
923 * @count: the number of bytes userspace wants to read
924 * @offset: (inout): the current position for writing into @buf
925 *
926 * Notably any error condition resulting in a short read (-%ENOSPC or
927 * -%EFAULT) will be returned even though one or more records may
928 * have been successfully copied. In this case it's up to the caller
929 * to decide if the error should be squashed before returning to
930 * userspace.
931 *
932 * Note: reports are consumed from the head, and appended to the
933 * tail, so the tail chases the head?... If you think that's mad
934 * and back-to-front you're not alone, but this follows the
935 * Gen PRM naming convention.
936 *
937 * Returns: 0 on success, negative error code on failure.
938 */
gen7_append_oa_reports(struct i915_perf_stream * stream,char __user * buf,size_t count,size_t * offset)939 static int gen7_append_oa_reports(struct i915_perf_stream *stream,
940 char __user *buf,
941 size_t count,
942 size_t *offset)
943 {
944 struct intel_uncore *uncore = stream->uncore;
945 int report_size = stream->oa_buffer.format_size;
946 u8 *oa_buf_base = stream->oa_buffer.vaddr;
947 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
948 u32 mask = (OA_BUFFER_SIZE - 1);
949 size_t start_offset = *offset;
950 unsigned long flags;
951 u32 head, tail;
952 u32 taken;
953 int ret = 0;
954
955 if (drm_WARN_ON(&uncore->i915->drm, !stream->enabled))
956 return -EIO;
957
958 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
959
960 head = stream->oa_buffer.head;
961 tail = stream->oa_buffer.tail;
962
963 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
964
965 /* NB: oa_buffer.head/tail include the gtt_offset which we don't want
966 * while indexing relative to oa_buf_base.
967 */
968 head -= gtt_offset;
969 tail -= gtt_offset;
970
971 /* An out of bounds or misaligned head or tail pointer implies a driver
972 * bug since we validate + align the tail pointers we read from the
973 * hardware and we are in full control of the head pointer which should
974 * only be incremented by multiples of the report size (notably also
975 * all a power of two).
976 */
977 if (drm_WARN_ONCE(&uncore->i915->drm,
978 head > OA_BUFFER_SIZE || head % report_size ||
979 tail > OA_BUFFER_SIZE || tail % report_size,
980 "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
981 head, tail))
982 return -EIO;
983
984
985 for (/* none */;
986 (taken = OA_TAKEN(tail, head));
987 head = (head + report_size) & mask) {
988 u8 *report = oa_buf_base + head;
989 u32 *report32 = (void *)report;
990
991 /* All the report sizes factor neatly into the buffer
992 * size so we never expect to see a report split
993 * between the beginning and end of the buffer.
994 *
995 * Given the initial alignment check a misalignment
996 * here would imply a driver bug that would result
997 * in an overrun.
998 */
999 if (drm_WARN_ON(&uncore->i915->drm,
1000 (OA_BUFFER_SIZE - head) < report_size)) {
1001 drm_err(&uncore->i915->drm,
1002 "Spurious OA head ptr: non-integral report offset\n");
1003 break;
1004 }
1005
1006 /* The report-ID field for periodic samples includes
1007 * some undocumented flags related to what triggered
1008 * the report and is never expected to be zero so we
1009 * can check that the report isn't invalid before
1010 * copying it to userspace...
1011 */
1012 if (report32[0] == 0) {
1013 if (__ratelimit(&stream->perf->spurious_report_rs))
1014 DRM_NOTE("Skipping spurious, invalid OA report\n");
1015 continue;
1016 }
1017
1018 ret = append_oa_sample(stream, buf, count, offset, report);
1019 if (ret)
1020 break;
1021
1022 /* Clear out the first 2 dwords as a mean to detect unlanded
1023 * reports.
1024 */
1025 report32[0] = 0;
1026 report32[1] = 0;
1027 }
1028
1029 if (start_offset != *offset) {
1030 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
1031
1032 /* We removed the gtt_offset for the copy loop above, indexing
1033 * relative to oa_buf_base so put back here...
1034 */
1035 head += gtt_offset;
1036
1037 intel_uncore_write(uncore, GEN7_OASTATUS2,
1038 (head & GEN7_OASTATUS2_HEAD_MASK) |
1039 GEN7_OASTATUS2_MEM_SELECT_GGTT);
1040 stream->oa_buffer.head = head;
1041
1042 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
1043 }
1044
1045 return ret;
1046 }
1047
1048 /**
1049 * gen7_oa_read - copy status records then buffered OA reports
1050 * @stream: An i915-perf stream opened for OA metrics
1051 * @buf: destination buffer given by userspace
1052 * @count: the number of bytes userspace wants to read
1053 * @offset: (inout): the current position for writing into @buf
1054 *
1055 * Checks Gen 7 specific OA unit status registers and if necessary appends
1056 * corresponding status records for userspace (such as for a buffer full
1057 * condition) and then initiate appending any buffered OA reports.
1058 *
1059 * Updates @offset according to the number of bytes successfully copied into
1060 * the userspace buffer.
1061 *
1062 * Returns: zero on success or a negative error code
1063 */
gen7_oa_read(struct i915_perf_stream * stream,char __user * buf,size_t count,size_t * offset)1064 static int gen7_oa_read(struct i915_perf_stream *stream,
1065 char __user *buf,
1066 size_t count,
1067 size_t *offset)
1068 {
1069 struct intel_uncore *uncore = stream->uncore;
1070 u32 oastatus1;
1071 int ret;
1072
1073 if (drm_WARN_ON(&uncore->i915->drm, !stream->oa_buffer.vaddr))
1074 return -EIO;
1075
1076 oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1);
1077
1078 /* XXX: On Haswell we don't have a safe way to clear oastatus1
1079 * bits while the OA unit is enabled (while the tail pointer
1080 * may be updated asynchronously) so we ignore status bits
1081 * that have already been reported to userspace.
1082 */
1083 oastatus1 &= ~stream->perf->gen7_latched_oastatus1;
1084
1085 /* We treat OABUFFER_OVERFLOW as a significant error:
1086 *
1087 * - The status can be interpreted to mean that the buffer is
1088 * currently full (with a higher precedence than OA_TAKEN()
1089 * which will start to report a near-empty buffer after an
1090 * overflow) but it's awkward that we can't clear the status
1091 * on Haswell, so without a reset we won't be able to catch
1092 * the state again.
1093 *
1094 * - Since it also implies the HW has started overwriting old
1095 * reports it may also affect our sanity checks for invalid
1096 * reports when copying to userspace that assume new reports
1097 * are being written to cleared memory.
1098 *
1099 * - In the future we may want to introduce a flight recorder
1100 * mode where the driver will automatically maintain a safe
1101 * guard band between head/tail, avoiding this overflow
1102 * condition, but we avoid the added driver complexity for
1103 * now.
1104 */
1105 if (unlikely(oastatus1 & GEN7_OASTATUS1_OABUFFER_OVERFLOW)) {
1106 ret = append_oa_status(stream, buf, count, offset,
1107 DRM_I915_PERF_RECORD_OA_BUFFER_LOST);
1108 if (ret)
1109 return ret;
1110
1111 DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n",
1112 stream->period_exponent);
1113
1114 stream->perf->ops.oa_disable(stream);
1115 stream->perf->ops.oa_enable(stream);
1116
1117 oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1);
1118 }
1119
1120 if (unlikely(oastatus1 & GEN7_OASTATUS1_REPORT_LOST)) {
1121 ret = append_oa_status(stream, buf, count, offset,
1122 DRM_I915_PERF_RECORD_OA_REPORT_LOST);
1123 if (ret)
1124 return ret;
1125 stream->perf->gen7_latched_oastatus1 |=
1126 GEN7_OASTATUS1_REPORT_LOST;
1127 }
1128
1129 return gen7_append_oa_reports(stream, buf, count, offset);
1130 }
1131
1132 /**
1133 * i915_oa_wait_unlocked - handles blocking IO until OA data available
1134 * @stream: An i915-perf stream opened for OA metrics
1135 *
1136 * Called when userspace tries to read() from a blocking stream FD opened
1137 * for OA metrics. It waits until the hrtimer callback finds a non-empty
1138 * OA buffer and wakes us.
1139 *
1140 * Note: it's acceptable to have this return with some false positives
1141 * since any subsequent read handling will return -EAGAIN if there isn't
1142 * really data ready for userspace yet.
1143 *
1144 * Returns: zero on success or a negative error code
1145 */
i915_oa_wait_unlocked(struct i915_perf_stream * stream)1146 static int i915_oa_wait_unlocked(struct i915_perf_stream *stream)
1147 {
1148 /* We would wait indefinitely if periodic sampling is not enabled */
1149 if (!stream->periodic)
1150 return -EIO;
1151
1152 return wait_event_interruptible(stream->poll_wq,
1153 oa_buffer_check_unlocked(stream));
1154 }
1155
1156 /**
1157 * i915_oa_poll_wait - call poll_wait() for an OA stream poll()
1158 * @stream: An i915-perf stream opened for OA metrics
1159 * @file: An i915 perf stream file
1160 * @wait: poll() state table
1161 *
1162 * For handling userspace polling on an i915 perf stream opened for OA metrics,
1163 * this starts a poll_wait with the wait queue that our hrtimer callback wakes
1164 * when it sees data ready to read in the circular OA buffer.
1165 */
i915_oa_poll_wait(struct i915_perf_stream * stream,struct file * file,poll_table * wait)1166 static void i915_oa_poll_wait(struct i915_perf_stream *stream,
1167 struct file *file,
1168 poll_table *wait)
1169 {
1170 poll_wait(file, &stream->poll_wq, wait);
1171 }
1172
1173 /**
1174 * i915_oa_read - just calls through to &i915_oa_ops->read
1175 * @stream: An i915-perf stream opened for OA metrics
1176 * @buf: destination buffer given by userspace
1177 * @count: the number of bytes userspace wants to read
1178 * @offset: (inout): the current position for writing into @buf
1179 *
1180 * Updates @offset according to the number of bytes successfully copied into
1181 * the userspace buffer.
1182 *
1183 * Returns: zero on success or a negative error code
1184 */
i915_oa_read(struct i915_perf_stream * stream,char __user * buf,size_t count,size_t * offset)1185 static int i915_oa_read(struct i915_perf_stream *stream,
1186 char __user *buf,
1187 size_t count,
1188 size_t *offset)
1189 {
1190 return stream->perf->ops.read(stream, buf, count, offset);
1191 }
1192
oa_pin_context(struct i915_perf_stream * stream)1193 static struct intel_context *oa_pin_context(struct i915_perf_stream *stream)
1194 {
1195 struct i915_gem_engines_iter it;
1196 struct i915_gem_context *ctx = stream->ctx;
1197 struct intel_context *ce;
1198 struct i915_gem_ww_ctx ww;
1199 int err = -ENODEV;
1200
1201 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
1202 if (ce->engine != stream->engine) /* first match! */
1203 continue;
1204
1205 err = 0;
1206 break;
1207 }
1208 i915_gem_context_unlock_engines(ctx);
1209
1210 if (err)
1211 return ERR_PTR(err);
1212
1213 i915_gem_ww_ctx_init(&ww, true);
1214 retry:
1215 /*
1216 * As the ID is the gtt offset of the context's vma we
1217 * pin the vma to ensure the ID remains fixed.
1218 */
1219 err = intel_context_pin_ww(ce, &ww);
1220 if (err == -EDEADLK) {
1221 err = i915_gem_ww_ctx_backoff(&ww);
1222 if (!err)
1223 goto retry;
1224 }
1225 i915_gem_ww_ctx_fini(&ww);
1226
1227 if (err)
1228 return ERR_PTR(err);
1229
1230 stream->pinned_ctx = ce;
1231 return stream->pinned_ctx;
1232 }
1233
1234 /**
1235 * oa_get_render_ctx_id - determine and hold ctx hw id
1236 * @stream: An i915-perf stream opened for OA metrics
1237 *
1238 * Determine the render context hw id, and ensure it remains fixed for the
1239 * lifetime of the stream. This ensures that we don't have to worry about
1240 * updating the context ID in OACONTROL on the fly.
1241 *
1242 * Returns: zero on success or a negative error code
1243 */
oa_get_render_ctx_id(struct i915_perf_stream * stream)1244 static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
1245 {
1246 struct intel_context *ce;
1247
1248 ce = oa_pin_context(stream);
1249 if (IS_ERR(ce))
1250 return PTR_ERR(ce);
1251
1252 switch (GRAPHICS_VER(ce->engine->i915)) {
1253 case 7: {
1254 /*
1255 * On Haswell we don't do any post processing of the reports
1256 * and don't need to use the mask.
1257 */
1258 stream->specific_ctx_id = i915_ggtt_offset(ce->state);
1259 stream->specific_ctx_id_mask = 0;
1260 break;
1261 }
1262
1263 case 8:
1264 case 9:
1265 if (intel_engine_uses_guc(ce->engine)) {
1266 /*
1267 * When using GuC, the context descriptor we write in
1268 * i915 is read by GuC and rewritten before it's
1269 * actually written into the hardware. The LRCA is
1270 * what is put into the context id field of the
1271 * context descriptor by GuC. Because it's aligned to
1272 * a page, the lower 12bits are always at 0 and
1273 * dropped by GuC. They won't be part of the context
1274 * ID in the OA reports, so squash those lower bits.
1275 */
1276 stream->specific_ctx_id = ce->lrc.lrca >> 12;
1277
1278 /*
1279 * GuC uses the top bit to signal proxy submission, so
1280 * ignore that bit.
1281 */
1282 stream->specific_ctx_id_mask =
1283 (1U << (GEN8_CTX_ID_WIDTH - 1)) - 1;
1284 } else {
1285 stream->specific_ctx_id_mask =
1286 (1U << GEN8_CTX_ID_WIDTH) - 1;
1287 stream->specific_ctx_id = stream->specific_ctx_id_mask;
1288 }
1289 break;
1290
1291 case 11:
1292 case 12:
1293 if (GRAPHICS_VER_FULL(ce->engine->i915) >= IP_VER(12, 50)) {
1294 stream->specific_ctx_id_mask =
1295 ((1U << XEHP_SW_CTX_ID_WIDTH) - 1) <<
1296 (XEHP_SW_CTX_ID_SHIFT - 32);
1297 stream->specific_ctx_id =
1298 (XEHP_MAX_CONTEXT_HW_ID - 1) <<
1299 (XEHP_SW_CTX_ID_SHIFT - 32);
1300 } else {
1301 stream->specific_ctx_id_mask =
1302 ((1U << GEN11_SW_CTX_ID_WIDTH) - 1) << (GEN11_SW_CTX_ID_SHIFT - 32);
1303 /*
1304 * Pick an unused context id
1305 * 0 - BITS_PER_LONG are used by other contexts
1306 * GEN12_MAX_CONTEXT_HW_ID (0x7ff) is used by idle context
1307 */
1308 stream->specific_ctx_id =
1309 (GEN12_MAX_CONTEXT_HW_ID - 1) << (GEN11_SW_CTX_ID_SHIFT - 32);
1310 }
1311 break;
1312
1313 default:
1314 MISSING_CASE(GRAPHICS_VER(ce->engine->i915));
1315 }
1316
1317 ce->tag = stream->specific_ctx_id;
1318
1319 drm_dbg(&stream->perf->i915->drm,
1320 "filtering on ctx_id=0x%x ctx_id_mask=0x%x\n",
1321 stream->specific_ctx_id,
1322 stream->specific_ctx_id_mask);
1323
1324 return 0;
1325 }
1326
1327 /**
1328 * oa_put_render_ctx_id - counterpart to oa_get_render_ctx_id releases hold
1329 * @stream: An i915-perf stream opened for OA metrics
1330 *
1331 * In case anything needed doing to ensure the context HW ID would remain valid
1332 * for the lifetime of the stream, then that can be undone here.
1333 */
oa_put_render_ctx_id(struct i915_perf_stream * stream)1334 static void oa_put_render_ctx_id(struct i915_perf_stream *stream)
1335 {
1336 struct intel_context *ce;
1337
1338 ce = fetch_and_zero(&stream->pinned_ctx);
1339 if (ce) {
1340 ce->tag = 0; /* recomputed on next submission after parking */
1341 intel_context_unpin(ce);
1342 }
1343
1344 stream->specific_ctx_id = INVALID_CTX_ID;
1345 stream->specific_ctx_id_mask = 0;
1346 }
1347
1348 static void
free_oa_buffer(struct i915_perf_stream * stream)1349 free_oa_buffer(struct i915_perf_stream *stream)
1350 {
1351 i915_vma_unpin_and_release(&stream->oa_buffer.vma,
1352 I915_VMA_RELEASE_MAP);
1353
1354 stream->oa_buffer.vaddr = NULL;
1355 }
1356
1357 static void
free_oa_configs(struct i915_perf_stream * stream)1358 free_oa_configs(struct i915_perf_stream *stream)
1359 {
1360 struct i915_oa_config_bo *oa_bo, *tmp;
1361
1362 i915_oa_config_put(stream->oa_config);
1363 llist_for_each_entry_safe(oa_bo, tmp, stream->oa_config_bos.first, node)
1364 free_oa_config_bo(oa_bo);
1365 }
1366
1367 static void
free_noa_wait(struct i915_perf_stream * stream)1368 free_noa_wait(struct i915_perf_stream *stream)
1369 {
1370 i915_vma_unpin_and_release(&stream->noa_wait, 0);
1371 }
1372
i915_oa_stream_destroy(struct i915_perf_stream * stream)1373 static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
1374 {
1375 struct i915_perf *perf = stream->perf;
1376
1377 BUG_ON(stream != perf->exclusive_stream);
1378
1379 /*
1380 * Unset exclusive_stream first, it will be checked while disabling
1381 * the metric set on gen8+.
1382 *
1383 * See i915_oa_init_reg_state() and lrc_configure_all_contexts()
1384 */
1385 WRITE_ONCE(perf->exclusive_stream, NULL);
1386 perf->ops.disable_metric_set(stream);
1387
1388 free_oa_buffer(stream);
1389
1390 intel_uncore_forcewake_put(stream->uncore, FORCEWAKE_ALL);
1391 intel_engine_pm_put(stream->engine);
1392
1393 if (stream->ctx)
1394 oa_put_render_ctx_id(stream);
1395
1396 free_oa_configs(stream);
1397 free_noa_wait(stream);
1398
1399 if (perf->spurious_report_rs.missed) {
1400 DRM_NOTE("%d spurious OA report notices suppressed due to ratelimiting\n",
1401 perf->spurious_report_rs.missed);
1402 }
1403 }
1404
gen7_init_oa_buffer(struct i915_perf_stream * stream)1405 static void gen7_init_oa_buffer(struct i915_perf_stream *stream)
1406 {
1407 struct intel_uncore *uncore = stream->uncore;
1408 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
1409 unsigned long flags;
1410
1411 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
1412
1413 /* Pre-DevBDW: OABUFFER must be set with counters off,
1414 * before OASTATUS1, but after OASTATUS2
1415 */
1416 intel_uncore_write(uncore, GEN7_OASTATUS2, /* head */
1417 gtt_offset | GEN7_OASTATUS2_MEM_SELECT_GGTT);
1418 stream->oa_buffer.head = gtt_offset;
1419
1420 intel_uncore_write(uncore, GEN7_OABUFFER, gtt_offset);
1421
1422 intel_uncore_write(uncore, GEN7_OASTATUS1, /* tail */
1423 gtt_offset | OABUFFER_SIZE_16M);
1424
1425 /* Mark that we need updated tail pointers to read from... */
1426 stream->oa_buffer.aging_tail = INVALID_TAIL_PTR;
1427 stream->oa_buffer.tail = gtt_offset;
1428
1429 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
1430
1431 /* On Haswell we have to track which OASTATUS1 flags we've
1432 * already seen since they can't be cleared while periodic
1433 * sampling is enabled.
1434 */
1435 stream->perf->gen7_latched_oastatus1 = 0;
1436
1437 /* NB: although the OA buffer will initially be allocated
1438 * zeroed via shmfs (and so this memset is redundant when
1439 * first allocating), we may re-init the OA buffer, either
1440 * when re-enabling a stream or in error/reset paths.
1441 *
1442 * The reason we clear the buffer for each re-init is for the
1443 * sanity check in gen7_append_oa_reports() that looks at the
1444 * report-id field to make sure it's non-zero which relies on
1445 * the assumption that new reports are being written to zeroed
1446 * memory...
1447 */
1448 memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
1449 }
1450
gen8_init_oa_buffer(struct i915_perf_stream * stream)1451 static void gen8_init_oa_buffer(struct i915_perf_stream *stream)
1452 {
1453 struct intel_uncore *uncore = stream->uncore;
1454 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
1455 unsigned long flags;
1456
1457 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
1458
1459 intel_uncore_write(uncore, GEN8_OASTATUS, 0);
1460 intel_uncore_write(uncore, GEN8_OAHEADPTR, gtt_offset);
1461 stream->oa_buffer.head = gtt_offset;
1462
1463 intel_uncore_write(uncore, GEN8_OABUFFER_UDW, 0);
1464
1465 /*
1466 * PRM says:
1467 *
1468 * "This MMIO must be set before the OATAILPTR
1469 * register and after the OAHEADPTR register. This is
1470 * to enable proper functionality of the overflow
1471 * bit."
1472 */
1473 intel_uncore_write(uncore, GEN8_OABUFFER, gtt_offset |
1474 OABUFFER_SIZE_16M | GEN8_OABUFFER_MEM_SELECT_GGTT);
1475 intel_uncore_write(uncore, GEN8_OATAILPTR, gtt_offset & GEN8_OATAILPTR_MASK);
1476
1477 /* Mark that we need updated tail pointers to read from... */
1478 stream->oa_buffer.aging_tail = INVALID_TAIL_PTR;
1479 stream->oa_buffer.tail = gtt_offset;
1480
1481 /*
1482 * Reset state used to recognise context switches, affecting which
1483 * reports we will forward to userspace while filtering for a single
1484 * context.
1485 */
1486 stream->oa_buffer.last_ctx_id = INVALID_CTX_ID;
1487
1488 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
1489
1490 /*
1491 * NB: although the OA buffer will initially be allocated
1492 * zeroed via shmfs (and so this memset is redundant when
1493 * first allocating), we may re-init the OA buffer, either
1494 * when re-enabling a stream or in error/reset paths.
1495 *
1496 * The reason we clear the buffer for each re-init is for the
1497 * sanity check in gen8_append_oa_reports() that looks at the
1498 * reason field to make sure it's non-zero which relies on
1499 * the assumption that new reports are being written to zeroed
1500 * memory...
1501 */
1502 memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
1503 }
1504
gen12_init_oa_buffer(struct i915_perf_stream * stream)1505 static void gen12_init_oa_buffer(struct i915_perf_stream *stream)
1506 {
1507 struct intel_uncore *uncore = stream->uncore;
1508 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
1509 unsigned long flags;
1510
1511 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
1512
1513 intel_uncore_write(uncore, GEN12_OAG_OASTATUS, 0);
1514 intel_uncore_write(uncore, GEN12_OAG_OAHEADPTR,
1515 gtt_offset & GEN12_OAG_OAHEADPTR_MASK);
1516 stream->oa_buffer.head = gtt_offset;
1517
1518 /*
1519 * PRM says:
1520 *
1521 * "This MMIO must be set before the OATAILPTR
1522 * register and after the OAHEADPTR register. This is
1523 * to enable proper functionality of the overflow
1524 * bit."
1525 */
1526 intel_uncore_write(uncore, GEN12_OAG_OABUFFER, gtt_offset |
1527 OABUFFER_SIZE_16M | GEN8_OABUFFER_MEM_SELECT_GGTT);
1528 intel_uncore_write(uncore, GEN12_OAG_OATAILPTR,
1529 gtt_offset & GEN12_OAG_OATAILPTR_MASK);
1530
1531 /* Mark that we need updated tail pointers to read from... */
1532 stream->oa_buffer.aging_tail = INVALID_TAIL_PTR;
1533 stream->oa_buffer.tail = gtt_offset;
1534
1535 /*
1536 * Reset state used to recognise context switches, affecting which
1537 * reports we will forward to userspace while filtering for a single
1538 * context.
1539 */
1540 stream->oa_buffer.last_ctx_id = INVALID_CTX_ID;
1541
1542 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
1543
1544 /*
1545 * NB: although the OA buffer will initially be allocated
1546 * zeroed via shmfs (and so this memset is redundant when
1547 * first allocating), we may re-init the OA buffer, either
1548 * when re-enabling a stream or in error/reset paths.
1549 *
1550 * The reason we clear the buffer for each re-init is for the
1551 * sanity check in gen8_append_oa_reports() that looks at the
1552 * reason field to make sure it's non-zero which relies on
1553 * the assumption that new reports are being written to zeroed
1554 * memory...
1555 */
1556 memset(stream->oa_buffer.vaddr, 0,
1557 stream->oa_buffer.vma->size);
1558 }
1559
alloc_oa_buffer(struct i915_perf_stream * stream)1560 static int alloc_oa_buffer(struct i915_perf_stream *stream)
1561 {
1562 struct drm_i915_private *i915 = stream->perf->i915;
1563 struct drm_i915_gem_object *bo;
1564 struct i915_vma *vma;
1565 int ret;
1566
1567 if (drm_WARN_ON(&i915->drm, stream->oa_buffer.vma))
1568 return -ENODEV;
1569
1570 BUILD_BUG_ON_NOT_POWER_OF_2(OA_BUFFER_SIZE);
1571 BUILD_BUG_ON(OA_BUFFER_SIZE < SZ_128K || OA_BUFFER_SIZE > SZ_16M);
1572
1573 bo = i915_gem_object_create_shmem(stream->perf->i915, OA_BUFFER_SIZE);
1574 if (IS_ERR(bo)) {
1575 drm_err(&i915->drm, "Failed to allocate OA buffer\n");
1576 return PTR_ERR(bo);
1577 }
1578
1579 i915_gem_object_set_cache_coherency(bo, I915_CACHE_LLC);
1580
1581 /* PreHSW required 512K alignment, HSW requires 16M */
1582 vma = i915_gem_object_ggtt_pin(bo, NULL, 0, SZ_16M, 0);
1583 if (IS_ERR(vma)) {
1584 ret = PTR_ERR(vma);
1585 goto err_unref;
1586 }
1587 stream->oa_buffer.vma = vma;
1588
1589 stream->oa_buffer.vaddr =
1590 i915_gem_object_pin_map_unlocked(bo, I915_MAP_WB);
1591 if (IS_ERR(stream->oa_buffer.vaddr)) {
1592 ret = PTR_ERR(stream->oa_buffer.vaddr);
1593 goto err_unpin;
1594 }
1595
1596 return 0;
1597
1598 err_unpin:
1599 __i915_vma_unpin(vma);
1600
1601 err_unref:
1602 i915_gem_object_put(bo);
1603
1604 stream->oa_buffer.vaddr = NULL;
1605 stream->oa_buffer.vma = NULL;
1606
1607 return ret;
1608 }
1609
save_restore_register(struct i915_perf_stream * stream,u32 * cs,bool save,i915_reg_t reg,u32 offset,u32 dword_count)1610 static u32 *save_restore_register(struct i915_perf_stream *stream, u32 *cs,
1611 bool save, i915_reg_t reg, u32 offset,
1612 u32 dword_count)
1613 {
1614 u32 cmd;
1615 u32 d;
1616
1617 cmd = save ? MI_STORE_REGISTER_MEM : MI_LOAD_REGISTER_MEM;
1618 cmd |= MI_SRM_LRM_GLOBAL_GTT;
1619 if (GRAPHICS_VER(stream->perf->i915) >= 8)
1620 cmd++;
1621
1622 for (d = 0; d < dword_count; d++) {
1623 *cs++ = cmd;
1624 *cs++ = i915_mmio_reg_offset(reg) + 4 * d;
1625 *cs++ = intel_gt_scratch_offset(stream->engine->gt,
1626 offset) + 4 * d;
1627 *cs++ = 0;
1628 }
1629
1630 return cs;
1631 }
1632
alloc_noa_wait(struct i915_perf_stream * stream)1633 static int alloc_noa_wait(struct i915_perf_stream *stream)
1634 {
1635 struct drm_i915_private *i915 = stream->perf->i915;
1636 struct drm_i915_gem_object *bo;
1637 struct i915_vma *vma;
1638 const u64 delay_ticks = 0xffffffffffffffff -
1639 intel_gt_ns_to_clock_interval(to_gt(stream->perf->i915),
1640 atomic64_read(&stream->perf->noa_programming_delay));
1641 const u32 base = stream->engine->mmio_base;
1642 #define CS_GPR(x) GEN8_RING_CS_GPR(base, x)
1643 u32 *batch, *ts0, *cs, *jump;
1644 struct i915_gem_ww_ctx ww;
1645 int ret, i;
1646 enum {
1647 START_TS,
1648 NOW_TS,
1649 DELTA_TS,
1650 JUMP_PREDICATE,
1651 DELTA_TARGET,
1652 N_CS_GPR
1653 };
1654
1655 bo = i915_gem_object_create_internal(i915, 4096);
1656 if (IS_ERR(bo)) {
1657 drm_err(&i915->drm,
1658 "Failed to allocate NOA wait batchbuffer\n");
1659 return PTR_ERR(bo);
1660 }
1661
1662 i915_gem_ww_ctx_init(&ww, true);
1663 retry:
1664 ret = i915_gem_object_lock(bo, &ww);
1665 if (ret)
1666 goto out_ww;
1667
1668 /*
1669 * We pin in GGTT because we jump into this buffer now because
1670 * multiple OA config BOs will have a jump to this address and it
1671 * needs to be fixed during the lifetime of the i915/perf stream.
1672 */
1673 vma = i915_gem_object_ggtt_pin_ww(bo, &ww, NULL, 0, 0, PIN_HIGH);
1674 if (IS_ERR(vma)) {
1675 ret = PTR_ERR(vma);
1676 goto out_ww;
1677 }
1678
1679 batch = cs = i915_gem_object_pin_map(bo, I915_MAP_WB);
1680 if (IS_ERR(batch)) {
1681 ret = PTR_ERR(batch);
1682 goto err_unpin;
1683 }
1684
1685 /* Save registers. */
1686 for (i = 0; i < N_CS_GPR; i++)
1687 cs = save_restore_register(
1688 stream, cs, true /* save */, CS_GPR(i),
1689 INTEL_GT_SCRATCH_FIELD_PERF_CS_GPR + 8 * i, 2);
1690 cs = save_restore_register(
1691 stream, cs, true /* save */, MI_PREDICATE_RESULT_1(RENDER_RING_BASE),
1692 INTEL_GT_SCRATCH_FIELD_PERF_PREDICATE_RESULT_1, 1);
1693
1694 /* First timestamp snapshot location. */
1695 ts0 = cs;
1696
1697 /*
1698 * Initial snapshot of the timestamp register to implement the wait.
1699 * We work with 32b values, so clear out the top 32b bits of the
1700 * register because the ALU works 64bits.
1701 */
1702 *cs++ = MI_LOAD_REGISTER_IMM(1);
1703 *cs++ = i915_mmio_reg_offset(CS_GPR(START_TS)) + 4;
1704 *cs++ = 0;
1705 *cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
1706 *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(base));
1707 *cs++ = i915_mmio_reg_offset(CS_GPR(START_TS));
1708
1709 /*
1710 * This is the location we're going to jump back into until the
1711 * required amount of time has passed.
1712 */
1713 jump = cs;
1714
1715 /*
1716 * Take another snapshot of the timestamp register. Take care to clear
1717 * up the top 32bits of CS_GPR(1) as we're using it for other
1718 * operations below.
1719 */
1720 *cs++ = MI_LOAD_REGISTER_IMM(1);
1721 *cs++ = i915_mmio_reg_offset(CS_GPR(NOW_TS)) + 4;
1722 *cs++ = 0;
1723 *cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
1724 *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(base));
1725 *cs++ = i915_mmio_reg_offset(CS_GPR(NOW_TS));
1726
1727 /*
1728 * Do a diff between the 2 timestamps and store the result back into
1729 * CS_GPR(1).
1730 */
1731 *cs++ = MI_MATH(5);
1732 *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(NOW_TS));
1733 *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(START_TS));
1734 *cs++ = MI_MATH_SUB;
1735 *cs++ = MI_MATH_STORE(MI_MATH_REG(DELTA_TS), MI_MATH_REG_ACCU);
1736 *cs++ = MI_MATH_STORE(MI_MATH_REG(JUMP_PREDICATE), MI_MATH_REG_CF);
1737
1738 /*
1739 * Transfer the carry flag (set to 1 if ts1 < ts0, meaning the
1740 * timestamp have rolled over the 32bits) into the predicate register
1741 * to be used for the predicated jump.
1742 */
1743 *cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
1744 *cs++ = i915_mmio_reg_offset(CS_GPR(JUMP_PREDICATE));
1745 *cs++ = i915_mmio_reg_offset(MI_PREDICATE_RESULT_1(RENDER_RING_BASE));
1746
1747 /* Restart from the beginning if we had timestamps roll over. */
1748 *cs++ = (GRAPHICS_VER(i915) < 8 ?
1749 MI_BATCH_BUFFER_START :
1750 MI_BATCH_BUFFER_START_GEN8) |
1751 MI_BATCH_PREDICATE;
1752 *cs++ = i915_ggtt_offset(vma) + (ts0 - batch) * 4;
1753 *cs++ = 0;
1754
1755 /*
1756 * Now add the diff between to previous timestamps and add it to :
1757 * (((1 * << 64) - 1) - delay_ns)
1758 *
1759 * When the Carry Flag contains 1 this means the elapsed time is
1760 * longer than the expected delay, and we can exit the wait loop.
1761 */
1762 *cs++ = MI_LOAD_REGISTER_IMM(2);
1763 *cs++ = i915_mmio_reg_offset(CS_GPR(DELTA_TARGET));
1764 *cs++ = lower_32_bits(delay_ticks);
1765 *cs++ = i915_mmio_reg_offset(CS_GPR(DELTA_TARGET)) + 4;
1766 *cs++ = upper_32_bits(delay_ticks);
1767
1768 *cs++ = MI_MATH(4);
1769 *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(DELTA_TS));
1770 *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(DELTA_TARGET));
1771 *cs++ = MI_MATH_ADD;
1772 *cs++ = MI_MATH_STOREINV(MI_MATH_REG(JUMP_PREDICATE), MI_MATH_REG_CF);
1773
1774 *cs++ = MI_ARB_CHECK;
1775
1776 /*
1777 * Transfer the result into the predicate register to be used for the
1778 * predicated jump.
1779 */
1780 *cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
1781 *cs++ = i915_mmio_reg_offset(CS_GPR(JUMP_PREDICATE));
1782 *cs++ = i915_mmio_reg_offset(MI_PREDICATE_RESULT_1(RENDER_RING_BASE));
1783
1784 /* Predicate the jump. */
1785 *cs++ = (GRAPHICS_VER(i915) < 8 ?
1786 MI_BATCH_BUFFER_START :
1787 MI_BATCH_BUFFER_START_GEN8) |
1788 MI_BATCH_PREDICATE;
1789 *cs++ = i915_ggtt_offset(vma) + (jump - batch) * 4;
1790 *cs++ = 0;
1791
1792 /* Restore registers. */
1793 for (i = 0; i < N_CS_GPR; i++)
1794 cs = save_restore_register(
1795 stream, cs, false /* restore */, CS_GPR(i),
1796 INTEL_GT_SCRATCH_FIELD_PERF_CS_GPR + 8 * i, 2);
1797 cs = save_restore_register(
1798 stream, cs, false /* restore */, MI_PREDICATE_RESULT_1(RENDER_RING_BASE),
1799 INTEL_GT_SCRATCH_FIELD_PERF_PREDICATE_RESULT_1, 1);
1800
1801 /* And return to the ring. */
1802 *cs++ = MI_BATCH_BUFFER_END;
1803
1804 GEM_BUG_ON(cs - batch > PAGE_SIZE / sizeof(*batch));
1805
1806 i915_gem_object_flush_map(bo);
1807 __i915_gem_object_release_map(bo);
1808
1809 stream->noa_wait = vma;
1810 goto out_ww;
1811
1812 err_unpin:
1813 i915_vma_unpin_and_release(&vma, 0);
1814 out_ww:
1815 if (ret == -EDEADLK) {
1816 ret = i915_gem_ww_ctx_backoff(&ww);
1817 if (!ret)
1818 goto retry;
1819 }
1820 i915_gem_ww_ctx_fini(&ww);
1821 if (ret)
1822 i915_gem_object_put(bo);
1823 return ret;
1824 }
1825
write_cs_mi_lri(u32 * cs,const struct i915_oa_reg * reg_data,u32 n_regs)1826 static u32 *write_cs_mi_lri(u32 *cs,
1827 const struct i915_oa_reg *reg_data,
1828 u32 n_regs)
1829 {
1830 u32 i;
1831
1832 for (i = 0; i < n_regs; i++) {
1833 if ((i % MI_LOAD_REGISTER_IMM_MAX_REGS) == 0) {
1834 u32 n_lri = min_t(u32,
1835 n_regs - i,
1836 MI_LOAD_REGISTER_IMM_MAX_REGS);
1837
1838 *cs++ = MI_LOAD_REGISTER_IMM(n_lri);
1839 }
1840 *cs++ = i915_mmio_reg_offset(reg_data[i].addr);
1841 *cs++ = reg_data[i].value;
1842 }
1843
1844 return cs;
1845 }
1846
num_lri_dwords(int num_regs)1847 static int num_lri_dwords(int num_regs)
1848 {
1849 int count = 0;
1850
1851 if (num_regs > 0) {
1852 count += DIV_ROUND_UP(num_regs, MI_LOAD_REGISTER_IMM_MAX_REGS);
1853 count += num_regs * 2;
1854 }
1855
1856 return count;
1857 }
1858
1859 static struct i915_oa_config_bo *
alloc_oa_config_buffer(struct i915_perf_stream * stream,struct i915_oa_config * oa_config)1860 alloc_oa_config_buffer(struct i915_perf_stream *stream,
1861 struct i915_oa_config *oa_config)
1862 {
1863 struct drm_i915_gem_object *obj;
1864 struct i915_oa_config_bo *oa_bo;
1865 struct i915_gem_ww_ctx ww;
1866 size_t config_length = 0;
1867 u32 *cs;
1868 int err;
1869
1870 oa_bo = kzalloc(sizeof(*oa_bo), GFP_KERNEL);
1871 if (!oa_bo)
1872 return ERR_PTR(-ENOMEM);
1873
1874 config_length += num_lri_dwords(oa_config->mux_regs_len);
1875 config_length += num_lri_dwords(oa_config->b_counter_regs_len);
1876 config_length += num_lri_dwords(oa_config->flex_regs_len);
1877 config_length += 3; /* MI_BATCH_BUFFER_START */
1878 config_length = ALIGN(sizeof(u32) * config_length, I915_GTT_PAGE_SIZE);
1879
1880 obj = i915_gem_object_create_shmem(stream->perf->i915, config_length);
1881 if (IS_ERR(obj)) {
1882 err = PTR_ERR(obj);
1883 goto err_free;
1884 }
1885
1886 i915_gem_ww_ctx_init(&ww, true);
1887 retry:
1888 err = i915_gem_object_lock(obj, &ww);
1889 if (err)
1890 goto out_ww;
1891
1892 cs = i915_gem_object_pin_map(obj, I915_MAP_WB);
1893 if (IS_ERR(cs)) {
1894 err = PTR_ERR(cs);
1895 goto out_ww;
1896 }
1897
1898 cs = write_cs_mi_lri(cs,
1899 oa_config->mux_regs,
1900 oa_config->mux_regs_len);
1901 cs = write_cs_mi_lri(cs,
1902 oa_config->b_counter_regs,
1903 oa_config->b_counter_regs_len);
1904 cs = write_cs_mi_lri(cs,
1905 oa_config->flex_regs,
1906 oa_config->flex_regs_len);
1907
1908 /* Jump into the active wait. */
1909 *cs++ = (GRAPHICS_VER(stream->perf->i915) < 8 ?
1910 MI_BATCH_BUFFER_START :
1911 MI_BATCH_BUFFER_START_GEN8);
1912 *cs++ = i915_ggtt_offset(stream->noa_wait);
1913 *cs++ = 0;
1914
1915 i915_gem_object_flush_map(obj);
1916 __i915_gem_object_release_map(obj);
1917
1918 oa_bo->vma = i915_vma_instance(obj,
1919 &stream->engine->gt->ggtt->vm,
1920 NULL);
1921 if (IS_ERR(oa_bo->vma)) {
1922 err = PTR_ERR(oa_bo->vma);
1923 goto out_ww;
1924 }
1925
1926 oa_bo->oa_config = i915_oa_config_get(oa_config);
1927 llist_add(&oa_bo->node, &stream->oa_config_bos);
1928
1929 out_ww:
1930 if (err == -EDEADLK) {
1931 err = i915_gem_ww_ctx_backoff(&ww);
1932 if (!err)
1933 goto retry;
1934 }
1935 i915_gem_ww_ctx_fini(&ww);
1936
1937 if (err)
1938 i915_gem_object_put(obj);
1939 err_free:
1940 if (err) {
1941 kfree(oa_bo);
1942 return ERR_PTR(err);
1943 }
1944 return oa_bo;
1945 }
1946
1947 static struct i915_vma *
get_oa_vma(struct i915_perf_stream * stream,struct i915_oa_config * oa_config)1948 get_oa_vma(struct i915_perf_stream *stream, struct i915_oa_config *oa_config)
1949 {
1950 struct i915_oa_config_bo *oa_bo;
1951
1952 /*
1953 * Look for the buffer in the already allocated BOs attached
1954 * to the stream.
1955 */
1956 llist_for_each_entry(oa_bo, stream->oa_config_bos.first, node) {
1957 if (oa_bo->oa_config == oa_config &&
1958 memcmp(oa_bo->oa_config->uuid,
1959 oa_config->uuid,
1960 sizeof(oa_config->uuid)) == 0)
1961 goto out;
1962 }
1963
1964 oa_bo = alloc_oa_config_buffer(stream, oa_config);
1965 if (IS_ERR(oa_bo))
1966 return ERR_CAST(oa_bo);
1967
1968 out:
1969 return i915_vma_get(oa_bo->vma);
1970 }
1971
1972 static int
emit_oa_config(struct i915_perf_stream * stream,struct i915_oa_config * oa_config,struct intel_context * ce,struct i915_active * active)1973 emit_oa_config(struct i915_perf_stream *stream,
1974 struct i915_oa_config *oa_config,
1975 struct intel_context *ce,
1976 struct i915_active *active)
1977 {
1978 struct i915_request *rq;
1979 struct i915_vma *vma;
1980 struct i915_gem_ww_ctx ww;
1981 int err;
1982
1983 vma = get_oa_vma(stream, oa_config);
1984 if (IS_ERR(vma))
1985 return PTR_ERR(vma);
1986
1987 i915_gem_ww_ctx_init(&ww, true);
1988 retry:
1989 err = i915_gem_object_lock(vma->obj, &ww);
1990 if (err)
1991 goto err;
1992
1993 err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_GLOBAL | PIN_HIGH);
1994 if (err)
1995 goto err;
1996
1997 intel_engine_pm_get(ce->engine);
1998 rq = i915_request_create(ce);
1999 intel_engine_pm_put(ce->engine);
2000 if (IS_ERR(rq)) {
2001 err = PTR_ERR(rq);
2002 goto err_vma_unpin;
2003 }
2004
2005 if (!IS_ERR_OR_NULL(active)) {
2006 /* After all individual context modifications */
2007 err = i915_request_await_active(rq, active,
2008 I915_ACTIVE_AWAIT_ACTIVE);
2009 if (err)
2010 goto err_add_request;
2011
2012 err = i915_active_add_request(active, rq);
2013 if (err)
2014 goto err_add_request;
2015 }
2016
2017 err = i915_request_await_object(rq, vma->obj, 0);
2018 if (!err)
2019 err = i915_vma_move_to_active(vma, rq, 0);
2020 if (err)
2021 goto err_add_request;
2022
2023 err = rq->engine->emit_bb_start(rq,
2024 vma->node.start, 0,
2025 I915_DISPATCH_SECURE);
2026 if (err)
2027 goto err_add_request;
2028
2029 err_add_request:
2030 i915_request_add(rq);
2031 err_vma_unpin:
2032 i915_vma_unpin(vma);
2033 err:
2034 if (err == -EDEADLK) {
2035 err = i915_gem_ww_ctx_backoff(&ww);
2036 if (!err)
2037 goto retry;
2038 }
2039
2040 i915_gem_ww_ctx_fini(&ww);
2041 i915_vma_put(vma);
2042 return err;
2043 }
2044
oa_context(struct i915_perf_stream * stream)2045 static struct intel_context *oa_context(struct i915_perf_stream *stream)
2046 {
2047 return stream->pinned_ctx ?: stream->engine->kernel_context;
2048 }
2049
2050 static int
hsw_enable_metric_set(struct i915_perf_stream * stream,struct i915_active * active)2051 hsw_enable_metric_set(struct i915_perf_stream *stream,
2052 struct i915_active *active)
2053 {
2054 struct intel_uncore *uncore = stream->uncore;
2055
2056 /*
2057 * PRM:
2058 *
2059 * OA unit is using “crclk” for its functionality. When trunk
2060 * level clock gating takes place, OA clock would be gated,
2061 * unable to count the events from non-render clock domain.
2062 * Render clock gating must be disabled when OA is enabled to
2063 * count the events from non-render domain. Unit level clock
2064 * gating for RCS should also be disabled.
2065 */
2066 intel_uncore_rmw(uncore, GEN7_MISCCPCTL,
2067 GEN7_DOP_CLOCK_GATE_ENABLE, 0);
2068 intel_uncore_rmw(uncore, GEN6_UCGCTL1,
2069 0, GEN6_CSUNIT_CLOCK_GATE_DISABLE);
2070
2071 return emit_oa_config(stream,
2072 stream->oa_config, oa_context(stream),
2073 active);
2074 }
2075
hsw_disable_metric_set(struct i915_perf_stream * stream)2076 static void hsw_disable_metric_set(struct i915_perf_stream *stream)
2077 {
2078 struct intel_uncore *uncore = stream->uncore;
2079
2080 intel_uncore_rmw(uncore, GEN6_UCGCTL1,
2081 GEN6_CSUNIT_CLOCK_GATE_DISABLE, 0);
2082 intel_uncore_rmw(uncore, GEN7_MISCCPCTL,
2083 0, GEN7_DOP_CLOCK_GATE_ENABLE);
2084
2085 intel_uncore_rmw(uncore, GDT_CHICKEN_BITS, GT_NOA_ENABLE, 0);
2086 }
2087
oa_config_flex_reg(const struct i915_oa_config * oa_config,i915_reg_t reg)2088 static u32 oa_config_flex_reg(const struct i915_oa_config *oa_config,
2089 i915_reg_t reg)
2090 {
2091 u32 mmio = i915_mmio_reg_offset(reg);
2092 int i;
2093
2094 /*
2095 * This arbitrary default will select the 'EU FPU0 Pipeline
2096 * Active' event. In the future it's anticipated that there
2097 * will be an explicit 'No Event' we can select, but not yet...
2098 */
2099 if (!oa_config)
2100 return 0;
2101
2102 for (i = 0; i < oa_config->flex_regs_len; i++) {
2103 if (i915_mmio_reg_offset(oa_config->flex_regs[i].addr) == mmio)
2104 return oa_config->flex_regs[i].value;
2105 }
2106
2107 return 0;
2108 }
2109 /*
2110 * NB: It must always remain pointer safe to run this even if the OA unit
2111 * has been disabled.
2112 *
2113 * It's fine to put out-of-date values into these per-context registers
2114 * in the case that the OA unit has been disabled.
2115 */
2116 static void
gen8_update_reg_state_unlocked(const struct intel_context * ce,const struct i915_perf_stream * stream)2117 gen8_update_reg_state_unlocked(const struct intel_context *ce,
2118 const struct i915_perf_stream *stream)
2119 {
2120 u32 ctx_oactxctrl = stream->perf->ctx_oactxctrl_offset;
2121 u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset;
2122 /* The MMIO offsets for Flex EU registers aren't contiguous */
2123 static const i915_reg_t flex_regs[] = {
2124 EU_PERF_CNTL0,
2125 EU_PERF_CNTL1,
2126 EU_PERF_CNTL2,
2127 EU_PERF_CNTL3,
2128 EU_PERF_CNTL4,
2129 EU_PERF_CNTL5,
2130 EU_PERF_CNTL6,
2131 };
2132 u32 *reg_state = ce->lrc_reg_state;
2133 int i;
2134
2135 reg_state[ctx_oactxctrl + 1] =
2136 (stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
2137 (stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) |
2138 GEN8_OA_COUNTER_RESUME;
2139
2140 for (i = 0; i < ARRAY_SIZE(flex_regs); i++)
2141 reg_state[ctx_flexeu0 + i * 2 + 1] =
2142 oa_config_flex_reg(stream->oa_config, flex_regs[i]);
2143 }
2144
2145 struct flex {
2146 i915_reg_t reg;
2147 u32 offset;
2148 u32 value;
2149 };
2150
2151 static int
gen8_store_flex(struct i915_request * rq,struct intel_context * ce,const struct flex * flex,unsigned int count)2152 gen8_store_flex(struct i915_request *rq,
2153 struct intel_context *ce,
2154 const struct flex *flex, unsigned int count)
2155 {
2156 u32 offset;
2157 u32 *cs;
2158
2159 cs = intel_ring_begin(rq, 4 * count);
2160 if (IS_ERR(cs))
2161 return PTR_ERR(cs);
2162
2163 offset = i915_ggtt_offset(ce->state) + LRC_STATE_OFFSET;
2164 do {
2165 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
2166 *cs++ = offset + flex->offset * sizeof(u32);
2167 *cs++ = 0;
2168 *cs++ = flex->value;
2169 } while (flex++, --count);
2170
2171 intel_ring_advance(rq, cs);
2172
2173 return 0;
2174 }
2175
2176 static int
gen8_load_flex(struct i915_request * rq,struct intel_context * ce,const struct flex * flex,unsigned int count)2177 gen8_load_flex(struct i915_request *rq,
2178 struct intel_context *ce,
2179 const struct flex *flex, unsigned int count)
2180 {
2181 u32 *cs;
2182
2183 GEM_BUG_ON(!count || count > 63);
2184
2185 cs = intel_ring_begin(rq, 2 * count + 2);
2186 if (IS_ERR(cs))
2187 return PTR_ERR(cs);
2188
2189 *cs++ = MI_LOAD_REGISTER_IMM(count);
2190 do {
2191 *cs++ = i915_mmio_reg_offset(flex->reg);
2192 *cs++ = flex->value;
2193 } while (flex++, --count);
2194 *cs++ = MI_NOOP;
2195
2196 intel_ring_advance(rq, cs);
2197
2198 return 0;
2199 }
2200
gen8_modify_context(struct intel_context * ce,const struct flex * flex,unsigned int count)2201 static int gen8_modify_context(struct intel_context *ce,
2202 const struct flex *flex, unsigned int count)
2203 {
2204 struct i915_request *rq;
2205 int err;
2206
2207 rq = intel_engine_create_kernel_request(ce->engine);
2208 if (IS_ERR(rq))
2209 return PTR_ERR(rq);
2210
2211 /* Serialise with the remote context */
2212 err = intel_context_prepare_remote_request(ce, rq);
2213 if (err == 0)
2214 err = gen8_store_flex(rq, ce, flex, count);
2215
2216 i915_request_add(rq);
2217 return err;
2218 }
2219
2220 static int
gen8_modify_self(struct intel_context * ce,const struct flex * flex,unsigned int count,struct i915_active * active)2221 gen8_modify_self(struct intel_context *ce,
2222 const struct flex *flex, unsigned int count,
2223 struct i915_active *active)
2224 {
2225 struct i915_request *rq;
2226 int err;
2227
2228 intel_engine_pm_get(ce->engine);
2229 rq = i915_request_create(ce);
2230 intel_engine_pm_put(ce->engine);
2231 if (IS_ERR(rq))
2232 return PTR_ERR(rq);
2233
2234 if (!IS_ERR_OR_NULL(active)) {
2235 err = i915_active_add_request(active, rq);
2236 if (err)
2237 goto err_add_request;
2238 }
2239
2240 err = gen8_load_flex(rq, ce, flex, count);
2241 if (err)
2242 goto err_add_request;
2243
2244 err_add_request:
2245 i915_request_add(rq);
2246 return err;
2247 }
2248
gen8_configure_context(struct i915_gem_context * ctx,struct flex * flex,unsigned int count)2249 static int gen8_configure_context(struct i915_gem_context *ctx,
2250 struct flex *flex, unsigned int count)
2251 {
2252 struct i915_gem_engines_iter it;
2253 struct intel_context *ce;
2254 int err = 0;
2255
2256 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
2257 GEM_BUG_ON(ce == ce->engine->kernel_context);
2258
2259 if (ce->engine->class != RENDER_CLASS)
2260 continue;
2261
2262 /* Otherwise OA settings will be set upon first use */
2263 if (!intel_context_pin_if_active(ce))
2264 continue;
2265
2266 flex->value = intel_sseu_make_rpcs(ce->engine->gt, &ce->sseu);
2267 err = gen8_modify_context(ce, flex, count);
2268
2269 intel_context_unpin(ce);
2270 if (err)
2271 break;
2272 }
2273 i915_gem_context_unlock_engines(ctx);
2274
2275 return err;
2276 }
2277
gen12_configure_oar_context(struct i915_perf_stream * stream,struct i915_active * active)2278 static int gen12_configure_oar_context(struct i915_perf_stream *stream,
2279 struct i915_active *active)
2280 {
2281 int err;
2282 struct intel_context *ce = stream->pinned_ctx;
2283 u32 format = stream->oa_buffer.format;
2284 struct flex regs_context[] = {
2285 {
2286 GEN8_OACTXCONTROL,
2287 stream->perf->ctx_oactxctrl_offset + 1,
2288 active ? GEN8_OA_COUNTER_RESUME : 0,
2289 },
2290 };
2291 /* Offsets in regs_lri are not used since this configuration is only
2292 * applied using LRI. Initialize the correct offsets for posterity.
2293 */
2294 #define GEN12_OAR_OACONTROL_OFFSET 0x5B0
2295 struct flex regs_lri[] = {
2296 {
2297 GEN12_OAR_OACONTROL,
2298 GEN12_OAR_OACONTROL_OFFSET + 1,
2299 (format << GEN12_OAR_OACONTROL_COUNTER_FORMAT_SHIFT) |
2300 (active ? GEN12_OAR_OACONTROL_COUNTER_ENABLE : 0)
2301 },
2302 {
2303 RING_CONTEXT_CONTROL(ce->engine->mmio_base),
2304 CTX_CONTEXT_CONTROL,
2305 _MASKED_FIELD(GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE,
2306 active ?
2307 GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE :
2308 0)
2309 },
2310 };
2311
2312 /* Modify the context image of pinned context with regs_context*/
2313 err = intel_context_lock_pinned(ce);
2314 if (err)
2315 return err;
2316
2317 err = gen8_modify_context(ce, regs_context, ARRAY_SIZE(regs_context));
2318 intel_context_unlock_pinned(ce);
2319 if (err)
2320 return err;
2321
2322 /* Apply regs_lri using LRI with pinned context */
2323 return gen8_modify_self(ce, regs_lri, ARRAY_SIZE(regs_lri), active);
2324 }
2325
2326 /*
2327 * Manages updating the per-context aspects of the OA stream
2328 * configuration across all contexts.
2329 *
2330 * The awkward consideration here is that OACTXCONTROL controls the
2331 * exponent for periodic sampling which is primarily used for system
2332 * wide profiling where we'd like a consistent sampling period even in
2333 * the face of context switches.
2334 *
2335 * Our approach of updating the register state context (as opposed to
2336 * say using a workaround batch buffer) ensures that the hardware
2337 * won't automatically reload an out-of-date timer exponent even
2338 * transiently before a WA BB could be parsed.
2339 *
2340 * This function needs to:
2341 * - Ensure the currently running context's per-context OA state is
2342 * updated
2343 * - Ensure that all existing contexts will have the correct per-context
2344 * OA state if they are scheduled for use.
2345 * - Ensure any new contexts will be initialized with the correct
2346 * per-context OA state.
2347 *
2348 * Note: it's only the RCS/Render context that has any OA state.
2349 * Note: the first flex register passed must always be R_PWR_CLK_STATE
2350 */
2351 static int
oa_configure_all_contexts(struct i915_perf_stream * stream,struct flex * regs,size_t num_regs,struct i915_active * active)2352 oa_configure_all_contexts(struct i915_perf_stream *stream,
2353 struct flex *regs,
2354 size_t num_regs,
2355 struct i915_active *active)
2356 {
2357 struct drm_i915_private *i915 = stream->perf->i915;
2358 struct intel_engine_cs *engine;
2359 struct i915_gem_context *ctx, *cn;
2360 int err;
2361
2362 lockdep_assert_held(&stream->perf->lock);
2363
2364 /*
2365 * The OA register config is setup through the context image. This image
2366 * might be written to by the GPU on context switch (in particular on
2367 * lite-restore). This means we can't safely update a context's image,
2368 * if this context is scheduled/submitted to run on the GPU.
2369 *
2370 * We could emit the OA register config through the batch buffer but
2371 * this might leave small interval of time where the OA unit is
2372 * configured at an invalid sampling period.
2373 *
2374 * Note that since we emit all requests from a single ring, there
2375 * is still an implicit global barrier here that may cause a high
2376 * priority context to wait for an otherwise independent low priority
2377 * context. Contexts idle at the time of reconfiguration are not
2378 * trapped behind the barrier.
2379 */
2380 spin_lock(&i915->gem.contexts.lock);
2381 list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
2382 if (!kref_get_unless_zero(&ctx->ref))
2383 continue;
2384
2385 spin_unlock(&i915->gem.contexts.lock);
2386
2387 err = gen8_configure_context(ctx, regs, num_regs);
2388 if (err) {
2389 i915_gem_context_put(ctx);
2390 return err;
2391 }
2392
2393 spin_lock(&i915->gem.contexts.lock);
2394 list_safe_reset_next(ctx, cn, link);
2395 i915_gem_context_put(ctx);
2396 }
2397 spin_unlock(&i915->gem.contexts.lock);
2398
2399 /*
2400 * After updating all other contexts, we need to modify ourselves.
2401 * If we don't modify the kernel_context, we do not get events while
2402 * idle.
2403 */
2404 for_each_uabi_engine(engine, i915) {
2405 struct intel_context *ce = engine->kernel_context;
2406
2407 if (engine->class != RENDER_CLASS)
2408 continue;
2409
2410 regs[0].value = intel_sseu_make_rpcs(engine->gt, &ce->sseu);
2411
2412 err = gen8_modify_self(ce, regs, num_regs, active);
2413 if (err)
2414 return err;
2415 }
2416
2417 return 0;
2418 }
2419
2420 static int
gen12_configure_all_contexts(struct i915_perf_stream * stream,const struct i915_oa_config * oa_config,struct i915_active * active)2421 gen12_configure_all_contexts(struct i915_perf_stream *stream,
2422 const struct i915_oa_config *oa_config,
2423 struct i915_active *active)
2424 {
2425 struct flex regs[] = {
2426 {
2427 GEN8_R_PWR_CLK_STATE(RENDER_RING_BASE),
2428 CTX_R_PWR_CLK_STATE,
2429 },
2430 };
2431
2432 return oa_configure_all_contexts(stream,
2433 regs, ARRAY_SIZE(regs),
2434 active);
2435 }
2436
2437 static int
lrc_configure_all_contexts(struct i915_perf_stream * stream,const struct i915_oa_config * oa_config,struct i915_active * active)2438 lrc_configure_all_contexts(struct i915_perf_stream *stream,
2439 const struct i915_oa_config *oa_config,
2440 struct i915_active *active)
2441 {
2442 /* The MMIO offsets for Flex EU registers aren't contiguous */
2443 const u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset;
2444 #define ctx_flexeuN(N) (ctx_flexeu0 + 2 * (N) + 1)
2445 struct flex regs[] = {
2446 {
2447 GEN8_R_PWR_CLK_STATE(RENDER_RING_BASE),
2448 CTX_R_PWR_CLK_STATE,
2449 },
2450 {
2451 GEN8_OACTXCONTROL,
2452 stream->perf->ctx_oactxctrl_offset + 1,
2453 },
2454 { EU_PERF_CNTL0, ctx_flexeuN(0) },
2455 { EU_PERF_CNTL1, ctx_flexeuN(1) },
2456 { EU_PERF_CNTL2, ctx_flexeuN(2) },
2457 { EU_PERF_CNTL3, ctx_flexeuN(3) },
2458 { EU_PERF_CNTL4, ctx_flexeuN(4) },
2459 { EU_PERF_CNTL5, ctx_flexeuN(5) },
2460 { EU_PERF_CNTL6, ctx_flexeuN(6) },
2461 };
2462 #undef ctx_flexeuN
2463 int i;
2464
2465 regs[1].value =
2466 (stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
2467 (stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) |
2468 GEN8_OA_COUNTER_RESUME;
2469
2470 for (i = 2; i < ARRAY_SIZE(regs); i++)
2471 regs[i].value = oa_config_flex_reg(oa_config, regs[i].reg);
2472
2473 return oa_configure_all_contexts(stream,
2474 regs, ARRAY_SIZE(regs),
2475 active);
2476 }
2477
2478 static int
gen8_enable_metric_set(struct i915_perf_stream * stream,struct i915_active * active)2479 gen8_enable_metric_set(struct i915_perf_stream *stream,
2480 struct i915_active *active)
2481 {
2482 struct intel_uncore *uncore = stream->uncore;
2483 struct i915_oa_config *oa_config = stream->oa_config;
2484 int ret;
2485
2486 /*
2487 * We disable slice/unslice clock ratio change reports on SKL since
2488 * they are too noisy. The HW generates a lot of redundant reports
2489 * where the ratio hasn't really changed causing a lot of redundant
2490 * work to processes and increasing the chances we'll hit buffer
2491 * overruns.
2492 *
2493 * Although we don't currently use the 'disable overrun' OABUFFER
2494 * feature it's worth noting that clock ratio reports have to be
2495 * disabled before considering to use that feature since the HW doesn't
2496 * correctly block these reports.
2497 *
2498 * Currently none of the high-level metrics we have depend on knowing
2499 * this ratio to normalize.
2500 *
2501 * Note: This register is not power context saved and restored, but
2502 * that's OK considering that we disable RC6 while the OA unit is
2503 * enabled.
2504 *
2505 * The _INCLUDE_CLK_RATIO bit allows the slice/unslice frequency to
2506 * be read back from automatically triggered reports, as part of the
2507 * RPT_ID field.
2508 */
2509 if (IS_GRAPHICS_VER(stream->perf->i915, 9, 11)) {
2510 intel_uncore_write(uncore, GEN8_OA_DEBUG,
2511 _MASKED_BIT_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
2512 GEN9_OA_DEBUG_INCLUDE_CLK_RATIO));
2513 }
2514
2515 /*
2516 * Update all contexts prior writing the mux configurations as we need
2517 * to make sure all slices/subslices are ON before writing to NOA
2518 * registers.
2519 */
2520 ret = lrc_configure_all_contexts(stream, oa_config, active);
2521 if (ret)
2522 return ret;
2523
2524 return emit_oa_config(stream,
2525 stream->oa_config, oa_context(stream),
2526 active);
2527 }
2528
oag_report_ctx_switches(const struct i915_perf_stream * stream)2529 static u32 oag_report_ctx_switches(const struct i915_perf_stream *stream)
2530 {
2531 return _MASKED_FIELD(GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS,
2532 (stream->sample_flags & SAMPLE_OA_REPORT) ?
2533 0 : GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS);
2534 }
2535
2536 static int
gen12_enable_metric_set(struct i915_perf_stream * stream,struct i915_active * active)2537 gen12_enable_metric_set(struct i915_perf_stream *stream,
2538 struct i915_active *active)
2539 {
2540 struct intel_uncore *uncore = stream->uncore;
2541 struct i915_oa_config *oa_config = stream->oa_config;
2542 bool periodic = stream->periodic;
2543 u32 period_exponent = stream->period_exponent;
2544 int ret;
2545
2546 intel_uncore_write(uncore, GEN12_OAG_OA_DEBUG,
2547 /* Disable clk ratio reports, like previous Gens. */
2548 _MASKED_BIT_ENABLE(GEN12_OAG_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
2549 GEN12_OAG_OA_DEBUG_INCLUDE_CLK_RATIO) |
2550 /*
2551 * If the user didn't require OA reports, instruct
2552 * the hardware not to emit ctx switch reports.
2553 */
2554 oag_report_ctx_switches(stream));
2555
2556 intel_uncore_write(uncore, GEN12_OAG_OAGLBCTXCTRL, periodic ?
2557 (GEN12_OAG_OAGLBCTXCTRL_COUNTER_RESUME |
2558 GEN12_OAG_OAGLBCTXCTRL_TIMER_ENABLE |
2559 (period_exponent << GEN12_OAG_OAGLBCTXCTRL_TIMER_PERIOD_SHIFT))
2560 : 0);
2561
2562 /*
2563 * Update all contexts prior writing the mux configurations as we need
2564 * to make sure all slices/subslices are ON before writing to NOA
2565 * registers.
2566 */
2567 ret = gen12_configure_all_contexts(stream, oa_config, active);
2568 if (ret)
2569 return ret;
2570
2571 /*
2572 * For Gen12, performance counters are context
2573 * saved/restored. Only enable it for the context that
2574 * requested this.
2575 */
2576 if (stream->ctx) {
2577 ret = gen12_configure_oar_context(stream, active);
2578 if (ret)
2579 return ret;
2580 }
2581
2582 return emit_oa_config(stream,
2583 stream->oa_config, oa_context(stream),
2584 active);
2585 }
2586
gen8_disable_metric_set(struct i915_perf_stream * stream)2587 static void gen8_disable_metric_set(struct i915_perf_stream *stream)
2588 {
2589 struct intel_uncore *uncore = stream->uncore;
2590
2591 /* Reset all contexts' slices/subslices configurations. */
2592 lrc_configure_all_contexts(stream, NULL, NULL);
2593
2594 intel_uncore_rmw(uncore, GDT_CHICKEN_BITS, GT_NOA_ENABLE, 0);
2595 }
2596
gen11_disable_metric_set(struct i915_perf_stream * stream)2597 static void gen11_disable_metric_set(struct i915_perf_stream *stream)
2598 {
2599 struct intel_uncore *uncore = stream->uncore;
2600
2601 /* Reset all contexts' slices/subslices configurations. */
2602 lrc_configure_all_contexts(stream, NULL, NULL);
2603
2604 /* Make sure we disable noa to save power. */
2605 intel_uncore_rmw(uncore, RPM_CONFIG1, GEN10_GT_NOA_ENABLE, 0);
2606 }
2607
gen12_disable_metric_set(struct i915_perf_stream * stream)2608 static void gen12_disable_metric_set(struct i915_perf_stream *stream)
2609 {
2610 struct intel_uncore *uncore = stream->uncore;
2611
2612 /* Reset all contexts' slices/subslices configurations. */
2613 gen12_configure_all_contexts(stream, NULL, NULL);
2614
2615 /* disable the context save/restore or OAR counters */
2616 if (stream->ctx)
2617 gen12_configure_oar_context(stream, NULL);
2618
2619 /* Make sure we disable noa to save power. */
2620 intel_uncore_rmw(uncore, RPM_CONFIG1, GEN10_GT_NOA_ENABLE, 0);
2621 }
2622
gen7_oa_enable(struct i915_perf_stream * stream)2623 static void gen7_oa_enable(struct i915_perf_stream *stream)
2624 {
2625 struct intel_uncore *uncore = stream->uncore;
2626 struct i915_gem_context *ctx = stream->ctx;
2627 u32 ctx_id = stream->specific_ctx_id;
2628 bool periodic = stream->periodic;
2629 u32 period_exponent = stream->period_exponent;
2630 u32 report_format = stream->oa_buffer.format;
2631
2632 /*
2633 * Reset buf pointers so we don't forward reports from before now.
2634 *
2635 * Think carefully if considering trying to avoid this, since it
2636 * also ensures status flags and the buffer itself are cleared
2637 * in error paths, and we have checks for invalid reports based
2638 * on the assumption that certain fields are written to zeroed
2639 * memory which this helps maintains.
2640 */
2641 gen7_init_oa_buffer(stream);
2642
2643 intel_uncore_write(uncore, GEN7_OACONTROL,
2644 (ctx_id & GEN7_OACONTROL_CTX_MASK) |
2645 (period_exponent <<
2646 GEN7_OACONTROL_TIMER_PERIOD_SHIFT) |
2647 (periodic ? GEN7_OACONTROL_TIMER_ENABLE : 0) |
2648 (report_format << GEN7_OACONTROL_FORMAT_SHIFT) |
2649 (ctx ? GEN7_OACONTROL_PER_CTX_ENABLE : 0) |
2650 GEN7_OACONTROL_ENABLE);
2651 }
2652
gen8_oa_enable(struct i915_perf_stream * stream)2653 static void gen8_oa_enable(struct i915_perf_stream *stream)
2654 {
2655 struct intel_uncore *uncore = stream->uncore;
2656 u32 report_format = stream->oa_buffer.format;
2657
2658 /*
2659 * Reset buf pointers so we don't forward reports from before now.
2660 *
2661 * Think carefully if considering trying to avoid this, since it
2662 * also ensures status flags and the buffer itself are cleared
2663 * in error paths, and we have checks for invalid reports based
2664 * on the assumption that certain fields are written to zeroed
2665 * memory which this helps maintains.
2666 */
2667 gen8_init_oa_buffer(stream);
2668
2669 /*
2670 * Note: we don't rely on the hardware to perform single context
2671 * filtering and instead filter on the cpu based on the context-id
2672 * field of reports
2673 */
2674 intel_uncore_write(uncore, GEN8_OACONTROL,
2675 (report_format << GEN8_OA_REPORT_FORMAT_SHIFT) |
2676 GEN8_OA_COUNTER_ENABLE);
2677 }
2678
gen12_oa_enable(struct i915_perf_stream * stream)2679 static void gen12_oa_enable(struct i915_perf_stream *stream)
2680 {
2681 struct intel_uncore *uncore = stream->uncore;
2682 u32 report_format = stream->oa_buffer.format;
2683
2684 /*
2685 * If we don't want OA reports from the OA buffer, then we don't even
2686 * need to program the OAG unit.
2687 */
2688 if (!(stream->sample_flags & SAMPLE_OA_REPORT))
2689 return;
2690
2691 gen12_init_oa_buffer(stream);
2692
2693 intel_uncore_write(uncore, GEN12_OAG_OACONTROL,
2694 (report_format << GEN12_OAG_OACONTROL_OA_COUNTER_FORMAT_SHIFT) |
2695 GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE);
2696 }
2697
2698 /**
2699 * i915_oa_stream_enable - handle `I915_PERF_IOCTL_ENABLE` for OA stream
2700 * @stream: An i915 perf stream opened for OA metrics
2701 *
2702 * [Re]enables hardware periodic sampling according to the period configured
2703 * when opening the stream. This also starts a hrtimer that will periodically
2704 * check for data in the circular OA buffer for notifying userspace (e.g.
2705 * during a read() or poll()).
2706 */
i915_oa_stream_enable(struct i915_perf_stream * stream)2707 static void i915_oa_stream_enable(struct i915_perf_stream *stream)
2708 {
2709 stream->pollin = false;
2710
2711 stream->perf->ops.oa_enable(stream);
2712
2713 if (stream->sample_flags & SAMPLE_OA_REPORT)
2714 hrtimer_start(&stream->poll_check_timer,
2715 ns_to_ktime(stream->poll_oa_period),
2716 HRTIMER_MODE_REL_PINNED);
2717 }
2718
gen7_oa_disable(struct i915_perf_stream * stream)2719 static void gen7_oa_disable(struct i915_perf_stream *stream)
2720 {
2721 struct intel_uncore *uncore = stream->uncore;
2722
2723 intel_uncore_write(uncore, GEN7_OACONTROL, 0);
2724 if (intel_wait_for_register(uncore,
2725 GEN7_OACONTROL, GEN7_OACONTROL_ENABLE, 0,
2726 50))
2727 drm_err(&stream->perf->i915->drm,
2728 "wait for OA to be disabled timed out\n");
2729 }
2730
gen8_oa_disable(struct i915_perf_stream * stream)2731 static void gen8_oa_disable(struct i915_perf_stream *stream)
2732 {
2733 struct intel_uncore *uncore = stream->uncore;
2734
2735 intel_uncore_write(uncore, GEN8_OACONTROL, 0);
2736 if (intel_wait_for_register(uncore,
2737 GEN8_OACONTROL, GEN8_OA_COUNTER_ENABLE, 0,
2738 50))
2739 drm_err(&stream->perf->i915->drm,
2740 "wait for OA to be disabled timed out\n");
2741 }
2742
gen12_oa_disable(struct i915_perf_stream * stream)2743 static void gen12_oa_disable(struct i915_perf_stream *stream)
2744 {
2745 struct intel_uncore *uncore = stream->uncore;
2746
2747 intel_uncore_write(uncore, GEN12_OAG_OACONTROL, 0);
2748 if (intel_wait_for_register(uncore,
2749 GEN12_OAG_OACONTROL,
2750 GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE, 0,
2751 50))
2752 drm_err(&stream->perf->i915->drm,
2753 "wait for OA to be disabled timed out\n");
2754
2755 intel_uncore_write(uncore, GEN12_OA_TLB_INV_CR, 1);
2756 if (intel_wait_for_register(uncore,
2757 GEN12_OA_TLB_INV_CR,
2758 1, 0,
2759 50))
2760 drm_err(&stream->perf->i915->drm,
2761 "wait for OA tlb invalidate timed out\n");
2762 }
2763
2764 /**
2765 * i915_oa_stream_disable - handle `I915_PERF_IOCTL_DISABLE` for OA stream
2766 * @stream: An i915 perf stream opened for OA metrics
2767 *
2768 * Stops the OA unit from periodically writing counter reports into the
2769 * circular OA buffer. This also stops the hrtimer that periodically checks for
2770 * data in the circular OA buffer, for notifying userspace.
2771 */
i915_oa_stream_disable(struct i915_perf_stream * stream)2772 static void i915_oa_stream_disable(struct i915_perf_stream *stream)
2773 {
2774 stream->perf->ops.oa_disable(stream);
2775
2776 if (stream->sample_flags & SAMPLE_OA_REPORT)
2777 hrtimer_cancel(&stream->poll_check_timer);
2778 }
2779
2780 static const struct i915_perf_stream_ops i915_oa_stream_ops = {
2781 .destroy = i915_oa_stream_destroy,
2782 .enable = i915_oa_stream_enable,
2783 .disable = i915_oa_stream_disable,
2784 .wait_unlocked = i915_oa_wait_unlocked,
2785 .poll_wait = i915_oa_poll_wait,
2786 .read = i915_oa_read,
2787 };
2788
i915_perf_stream_enable_sync(struct i915_perf_stream * stream)2789 static int i915_perf_stream_enable_sync(struct i915_perf_stream *stream)
2790 {
2791 struct i915_active *active;
2792 int err;
2793
2794 active = i915_active_create();
2795 if (!active)
2796 return -ENOMEM;
2797
2798 err = stream->perf->ops.enable_metric_set(stream, active);
2799 if (err == 0)
2800 __i915_active_wait(active, TASK_UNINTERRUPTIBLE);
2801
2802 i915_active_put(active);
2803 return err;
2804 }
2805
2806 static void
get_default_sseu_config(struct intel_sseu * out_sseu,struct intel_engine_cs * engine)2807 get_default_sseu_config(struct intel_sseu *out_sseu,
2808 struct intel_engine_cs *engine)
2809 {
2810 const struct sseu_dev_info *devinfo_sseu = &engine->gt->info.sseu;
2811
2812 *out_sseu = intel_sseu_from_device_info(devinfo_sseu);
2813
2814 if (GRAPHICS_VER(engine->i915) == 11) {
2815 /*
2816 * We only need subslice count so it doesn't matter which ones
2817 * we select - just turn off low bits in the amount of half of
2818 * all available subslices per slice.
2819 */
2820 out_sseu->subslice_mask =
2821 ~(~0 << (hweight8(out_sseu->subslice_mask) / 2));
2822 out_sseu->slice_mask = 0x1;
2823 }
2824 }
2825
2826 static int
get_sseu_config(struct intel_sseu * out_sseu,struct intel_engine_cs * engine,const struct drm_i915_gem_context_param_sseu * drm_sseu)2827 get_sseu_config(struct intel_sseu *out_sseu,
2828 struct intel_engine_cs *engine,
2829 const struct drm_i915_gem_context_param_sseu *drm_sseu)
2830 {
2831 if (drm_sseu->engine.engine_class != engine->uabi_class ||
2832 drm_sseu->engine.engine_instance != engine->uabi_instance)
2833 return -EINVAL;
2834
2835 return i915_gem_user_to_context_sseu(engine->gt, drm_sseu, out_sseu);
2836 }
2837
2838 /**
2839 * i915_oa_stream_init - validate combined props for OA stream and init
2840 * @stream: An i915 perf stream
2841 * @param: The open parameters passed to `DRM_I915_PERF_OPEN`
2842 * @props: The property state that configures stream (individually validated)
2843 *
2844 * While read_properties_unlocked() validates properties in isolation it
2845 * doesn't ensure that the combination necessarily makes sense.
2846 *
2847 * At this point it has been determined that userspace wants a stream of
2848 * OA metrics, but still we need to further validate the combined
2849 * properties are OK.
2850 *
2851 * If the configuration makes sense then we can allocate memory for
2852 * a circular OA buffer and apply the requested metric set configuration.
2853 *
2854 * Returns: zero on success or a negative error code.
2855 */
i915_oa_stream_init(struct i915_perf_stream * stream,struct drm_i915_perf_open_param * param,struct perf_open_properties * props)2856 static int i915_oa_stream_init(struct i915_perf_stream *stream,
2857 struct drm_i915_perf_open_param *param,
2858 struct perf_open_properties *props)
2859 {
2860 struct drm_i915_private *i915 = stream->perf->i915;
2861 struct i915_perf *perf = stream->perf;
2862 int format_size;
2863 int ret;
2864
2865 if (!props->engine) {
2866 DRM_DEBUG("OA engine not specified\n");
2867 return -EINVAL;
2868 }
2869
2870 /*
2871 * If the sysfs metrics/ directory wasn't registered for some
2872 * reason then don't let userspace try their luck with config
2873 * IDs
2874 */
2875 if (!perf->metrics_kobj) {
2876 DRM_DEBUG("OA metrics weren't advertised via sysfs\n");
2877 return -EINVAL;
2878 }
2879
2880 if (!(props->sample_flags & SAMPLE_OA_REPORT) &&
2881 (GRAPHICS_VER(perf->i915) < 12 || !stream->ctx)) {
2882 DRM_DEBUG("Only OA report sampling supported\n");
2883 return -EINVAL;
2884 }
2885
2886 if (!perf->ops.enable_metric_set) {
2887 DRM_DEBUG("OA unit not supported\n");
2888 return -ENODEV;
2889 }
2890
2891 /*
2892 * To avoid the complexity of having to accurately filter
2893 * counter reports and marshal to the appropriate client
2894 * we currently only allow exclusive access
2895 */
2896 if (perf->exclusive_stream) {
2897 DRM_DEBUG("OA unit already in use\n");
2898 return -EBUSY;
2899 }
2900
2901 if (!props->oa_format) {
2902 DRM_DEBUG("OA report format not specified\n");
2903 return -EINVAL;
2904 }
2905
2906 stream->engine = props->engine;
2907 stream->uncore = stream->engine->gt->uncore;
2908
2909 stream->sample_size = sizeof(struct drm_i915_perf_record_header);
2910
2911 format_size = perf->oa_formats[props->oa_format].size;
2912
2913 stream->sample_flags = props->sample_flags;
2914 stream->sample_size += format_size;
2915
2916 stream->oa_buffer.format_size = format_size;
2917 if (drm_WARN_ON(&i915->drm, stream->oa_buffer.format_size == 0))
2918 return -EINVAL;
2919
2920 stream->hold_preemption = props->hold_preemption;
2921
2922 stream->oa_buffer.format =
2923 perf->oa_formats[props->oa_format].format;
2924
2925 stream->periodic = props->oa_periodic;
2926 if (stream->periodic)
2927 stream->period_exponent = props->oa_period_exponent;
2928
2929 if (stream->ctx) {
2930 ret = oa_get_render_ctx_id(stream);
2931 if (ret) {
2932 DRM_DEBUG("Invalid context id to filter with\n");
2933 return ret;
2934 }
2935 }
2936
2937 ret = alloc_noa_wait(stream);
2938 if (ret) {
2939 DRM_DEBUG("Unable to allocate NOA wait batch buffer\n");
2940 goto err_noa_wait_alloc;
2941 }
2942
2943 stream->oa_config = i915_perf_get_oa_config(perf, props->metrics_set);
2944 if (!stream->oa_config) {
2945 DRM_DEBUG("Invalid OA config id=%i\n", props->metrics_set);
2946 ret = -EINVAL;
2947 goto err_config;
2948 }
2949
2950 /* PRM - observability performance counters:
2951 *
2952 * OACONTROL, performance counter enable, note:
2953 *
2954 * "When this bit is set, in order to have coherent counts,
2955 * RC6 power state and trunk clock gating must be disabled.
2956 * This can be achieved by programming MMIO registers as
2957 * 0xA094=0 and 0xA090[31]=1"
2958 *
2959 * In our case we are expecting that taking pm + FORCEWAKE
2960 * references will effectively disable RC6.
2961 */
2962 intel_engine_pm_get(stream->engine);
2963 intel_uncore_forcewake_get(stream->uncore, FORCEWAKE_ALL);
2964
2965 ret = alloc_oa_buffer(stream);
2966 if (ret)
2967 goto err_oa_buf_alloc;
2968
2969 stream->ops = &i915_oa_stream_ops;
2970
2971 perf->sseu = props->sseu;
2972 WRITE_ONCE(perf->exclusive_stream, stream);
2973
2974 ret = i915_perf_stream_enable_sync(stream);
2975 if (ret) {
2976 DRM_DEBUG("Unable to enable metric set\n");
2977 goto err_enable;
2978 }
2979
2980 DRM_DEBUG("opening stream oa config uuid=%s\n",
2981 stream->oa_config->uuid);
2982
2983 hrtimer_init(&stream->poll_check_timer,
2984 CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2985 stream->poll_check_timer.function = oa_poll_check_timer_cb;
2986 init_waitqueue_head(&stream->poll_wq);
2987 spin_lock_init(&stream->oa_buffer.ptr_lock);
2988
2989 return 0;
2990
2991 err_enable:
2992 WRITE_ONCE(perf->exclusive_stream, NULL);
2993 perf->ops.disable_metric_set(stream);
2994
2995 free_oa_buffer(stream);
2996
2997 err_oa_buf_alloc:
2998 free_oa_configs(stream);
2999
3000 intel_uncore_forcewake_put(stream->uncore, FORCEWAKE_ALL);
3001 intel_engine_pm_put(stream->engine);
3002
3003 err_config:
3004 free_noa_wait(stream);
3005
3006 err_noa_wait_alloc:
3007 if (stream->ctx)
3008 oa_put_render_ctx_id(stream);
3009
3010 return ret;
3011 }
3012
i915_oa_init_reg_state(const struct intel_context * ce,const struct intel_engine_cs * engine)3013 void i915_oa_init_reg_state(const struct intel_context *ce,
3014 const struct intel_engine_cs *engine)
3015 {
3016 struct i915_perf_stream *stream;
3017
3018 if (engine->class != RENDER_CLASS)
3019 return;
3020
3021 /* perf.exclusive_stream serialised by lrc_configure_all_contexts() */
3022 stream = READ_ONCE(engine->i915->perf.exclusive_stream);
3023 if (stream && GRAPHICS_VER(stream->perf->i915) < 12)
3024 gen8_update_reg_state_unlocked(ce, stream);
3025 }
3026
3027 /**
3028 * i915_perf_read - handles read() FOP for i915 perf stream FDs
3029 * @file: An i915 perf stream file
3030 * @buf: destination buffer given by userspace
3031 * @count: the number of bytes userspace wants to read
3032 * @ppos: (inout) file seek position (unused)
3033 *
3034 * The entry point for handling a read() on a stream file descriptor from
3035 * userspace. Most of the work is left to the i915_perf_read_locked() and
3036 * &i915_perf_stream_ops->read but to save having stream implementations (of
3037 * which we might have multiple later) we handle blocking read here.
3038 *
3039 * We can also consistently treat trying to read from a disabled stream
3040 * as an IO error so implementations can assume the stream is enabled
3041 * while reading.
3042 *
3043 * Returns: The number of bytes copied or a negative error code on failure.
3044 */
i915_perf_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)3045 static ssize_t i915_perf_read(struct file *file,
3046 char __user *buf,
3047 size_t count,
3048 loff_t *ppos)
3049 {
3050 struct i915_perf_stream *stream = file->private_data;
3051 struct i915_perf *perf = stream->perf;
3052 size_t offset = 0;
3053 int ret;
3054
3055 /* To ensure it's handled consistently we simply treat all reads of a
3056 * disabled stream as an error. In particular it might otherwise lead
3057 * to a deadlock for blocking file descriptors...
3058 */
3059 if (!stream->enabled || !(stream->sample_flags & SAMPLE_OA_REPORT))
3060 return -EIO;
3061
3062 if (!(file->f_flags & O_NONBLOCK)) {
3063 /* There's the small chance of false positives from
3064 * stream->ops->wait_unlocked.
3065 *
3066 * E.g. with single context filtering since we only wait until
3067 * oabuffer has >= 1 report we don't immediately know whether
3068 * any reports really belong to the current context
3069 */
3070 do {
3071 ret = stream->ops->wait_unlocked(stream);
3072 if (ret)
3073 return ret;
3074
3075 mutex_lock(&perf->lock);
3076 ret = stream->ops->read(stream, buf, count, &offset);
3077 mutex_unlock(&perf->lock);
3078 } while (!offset && !ret);
3079 } else {
3080 mutex_lock(&perf->lock);
3081 ret = stream->ops->read(stream, buf, count, &offset);
3082 mutex_unlock(&perf->lock);
3083 }
3084
3085 /* We allow the poll checking to sometimes report false positive EPOLLIN
3086 * events where we might actually report EAGAIN on read() if there's
3087 * not really any data available. In this situation though we don't
3088 * want to enter a busy loop between poll() reporting a EPOLLIN event
3089 * and read() returning -EAGAIN. Clearing the oa.pollin state here
3090 * effectively ensures we back off until the next hrtimer callback
3091 * before reporting another EPOLLIN event.
3092 * The exception to this is if ops->read() returned -ENOSPC which means
3093 * that more OA data is available than could fit in the user provided
3094 * buffer. In this case we want the next poll() call to not block.
3095 */
3096 if (ret != -ENOSPC)
3097 stream->pollin = false;
3098
3099 /* Possible values for ret are 0, -EFAULT, -ENOSPC, -EIO, ... */
3100 return offset ?: (ret ?: -EAGAIN);
3101 }
3102
oa_poll_check_timer_cb(struct hrtimer * hrtimer)3103 static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer)
3104 {
3105 struct i915_perf_stream *stream =
3106 container_of(hrtimer, typeof(*stream), poll_check_timer);
3107
3108 if (oa_buffer_check_unlocked(stream)) {
3109 stream->pollin = true;
3110 wake_up(&stream->poll_wq);
3111 }
3112
3113 hrtimer_forward_now(hrtimer,
3114 ns_to_ktime(stream->poll_oa_period));
3115
3116 return HRTIMER_RESTART;
3117 }
3118
3119 /**
3120 * i915_perf_poll_locked - poll_wait() with a suitable wait queue for stream
3121 * @stream: An i915 perf stream
3122 * @file: An i915 perf stream file
3123 * @wait: poll() state table
3124 *
3125 * For handling userspace polling on an i915 perf stream, this calls through to
3126 * &i915_perf_stream_ops->poll_wait to call poll_wait() with a wait queue that
3127 * will be woken for new stream data.
3128 *
3129 * Note: The &perf->lock mutex has been taken to serialize
3130 * with any non-file-operation driver hooks.
3131 *
3132 * Returns: any poll events that are ready without sleeping
3133 */
i915_perf_poll_locked(struct i915_perf_stream * stream,struct file * file,poll_table * wait)3134 static __poll_t i915_perf_poll_locked(struct i915_perf_stream *stream,
3135 struct file *file,
3136 poll_table *wait)
3137 {
3138 __poll_t events = 0;
3139
3140 stream->ops->poll_wait(stream, file, wait);
3141
3142 /* Note: we don't explicitly check whether there's something to read
3143 * here since this path may be very hot depending on what else
3144 * userspace is polling, or on the timeout in use. We rely solely on
3145 * the hrtimer/oa_poll_check_timer_cb to notify us when there are
3146 * samples to read.
3147 */
3148 if (stream->pollin)
3149 events |= EPOLLIN;
3150
3151 return events;
3152 }
3153
3154 /**
3155 * i915_perf_poll - call poll_wait() with a suitable wait queue for stream
3156 * @file: An i915 perf stream file
3157 * @wait: poll() state table
3158 *
3159 * For handling userspace polling on an i915 perf stream, this ensures
3160 * poll_wait() gets called with a wait queue that will be woken for new stream
3161 * data.
3162 *
3163 * Note: Implementation deferred to i915_perf_poll_locked()
3164 *
3165 * Returns: any poll events that are ready without sleeping
3166 */
i915_perf_poll(struct file * file,poll_table * wait)3167 static __poll_t i915_perf_poll(struct file *file, poll_table *wait)
3168 {
3169 struct i915_perf_stream *stream = file->private_data;
3170 struct i915_perf *perf = stream->perf;
3171 __poll_t ret;
3172
3173 mutex_lock(&perf->lock);
3174 ret = i915_perf_poll_locked(stream, file, wait);
3175 mutex_unlock(&perf->lock);
3176
3177 return ret;
3178 }
3179
3180 /**
3181 * i915_perf_enable_locked - handle `I915_PERF_IOCTL_ENABLE` ioctl
3182 * @stream: A disabled i915 perf stream
3183 *
3184 * [Re]enables the associated capture of data for this stream.
3185 *
3186 * If a stream was previously enabled then there's currently no intention
3187 * to provide userspace any guarantee about the preservation of previously
3188 * buffered data.
3189 */
i915_perf_enable_locked(struct i915_perf_stream * stream)3190 static void i915_perf_enable_locked(struct i915_perf_stream *stream)
3191 {
3192 if (stream->enabled)
3193 return;
3194
3195 /* Allow stream->ops->enable() to refer to this */
3196 stream->enabled = true;
3197
3198 if (stream->ops->enable)
3199 stream->ops->enable(stream);
3200
3201 if (stream->hold_preemption)
3202 intel_context_set_nopreempt(stream->pinned_ctx);
3203 }
3204
3205 /**
3206 * i915_perf_disable_locked - handle `I915_PERF_IOCTL_DISABLE` ioctl
3207 * @stream: An enabled i915 perf stream
3208 *
3209 * Disables the associated capture of data for this stream.
3210 *
3211 * The intention is that disabling an re-enabling a stream will ideally be
3212 * cheaper than destroying and re-opening a stream with the same configuration,
3213 * though there are no formal guarantees about what state or buffered data
3214 * must be retained between disabling and re-enabling a stream.
3215 *
3216 * Note: while a stream is disabled it's considered an error for userspace
3217 * to attempt to read from the stream (-EIO).
3218 */
i915_perf_disable_locked(struct i915_perf_stream * stream)3219 static void i915_perf_disable_locked(struct i915_perf_stream *stream)
3220 {
3221 if (!stream->enabled)
3222 return;
3223
3224 /* Allow stream->ops->disable() to refer to this */
3225 stream->enabled = false;
3226
3227 if (stream->hold_preemption)
3228 intel_context_clear_nopreempt(stream->pinned_ctx);
3229
3230 if (stream->ops->disable)
3231 stream->ops->disable(stream);
3232 }
3233
i915_perf_config_locked(struct i915_perf_stream * stream,unsigned long metrics_set)3234 static long i915_perf_config_locked(struct i915_perf_stream *stream,
3235 unsigned long metrics_set)
3236 {
3237 struct i915_oa_config *config;
3238 long ret = stream->oa_config->id;
3239
3240 config = i915_perf_get_oa_config(stream->perf, metrics_set);
3241 if (!config)
3242 return -EINVAL;
3243
3244 if (config != stream->oa_config) {
3245 int err;
3246
3247 /*
3248 * If OA is bound to a specific context, emit the
3249 * reconfiguration inline from that context. The update
3250 * will then be ordered with respect to submission on that
3251 * context.
3252 *
3253 * When set globally, we use a low priority kernel context,
3254 * so it will effectively take effect when idle.
3255 */
3256 err = emit_oa_config(stream, config, oa_context(stream), NULL);
3257 if (!err)
3258 config = xchg(&stream->oa_config, config);
3259 else
3260 ret = err;
3261 }
3262
3263 i915_oa_config_put(config);
3264
3265 return ret;
3266 }
3267
3268 /**
3269 * i915_perf_ioctl_locked - support ioctl() usage with i915 perf stream FDs
3270 * @stream: An i915 perf stream
3271 * @cmd: the ioctl request
3272 * @arg: the ioctl data
3273 *
3274 * Note: The &perf->lock mutex has been taken to serialize
3275 * with any non-file-operation driver hooks.
3276 *
3277 * Returns: zero on success or a negative error code. Returns -EINVAL for
3278 * an unknown ioctl request.
3279 */
i915_perf_ioctl_locked(struct i915_perf_stream * stream,unsigned int cmd,unsigned long arg)3280 static long i915_perf_ioctl_locked(struct i915_perf_stream *stream,
3281 unsigned int cmd,
3282 unsigned long arg)
3283 {
3284 switch (cmd) {
3285 case I915_PERF_IOCTL_ENABLE:
3286 i915_perf_enable_locked(stream);
3287 return 0;
3288 case I915_PERF_IOCTL_DISABLE:
3289 i915_perf_disable_locked(stream);
3290 return 0;
3291 case I915_PERF_IOCTL_CONFIG:
3292 return i915_perf_config_locked(stream, arg);
3293 }
3294
3295 return -EINVAL;
3296 }
3297
3298 /**
3299 * i915_perf_ioctl - support ioctl() usage with i915 perf stream FDs
3300 * @file: An i915 perf stream file
3301 * @cmd: the ioctl request
3302 * @arg: the ioctl data
3303 *
3304 * Implementation deferred to i915_perf_ioctl_locked().
3305 *
3306 * Returns: zero on success or a negative error code. Returns -EINVAL for
3307 * an unknown ioctl request.
3308 */
i915_perf_ioctl(struct file * file,unsigned int cmd,unsigned long arg)3309 static long i915_perf_ioctl(struct file *file,
3310 unsigned int cmd,
3311 unsigned long arg)
3312 {
3313 struct i915_perf_stream *stream = file->private_data;
3314 struct i915_perf *perf = stream->perf;
3315 long ret;
3316
3317 mutex_lock(&perf->lock);
3318 ret = i915_perf_ioctl_locked(stream, cmd, arg);
3319 mutex_unlock(&perf->lock);
3320
3321 return ret;
3322 }
3323
3324 /**
3325 * i915_perf_destroy_locked - destroy an i915 perf stream
3326 * @stream: An i915 perf stream
3327 *
3328 * Frees all resources associated with the given i915 perf @stream, disabling
3329 * any associated data capture in the process.
3330 *
3331 * Note: The &perf->lock mutex has been taken to serialize
3332 * with any non-file-operation driver hooks.
3333 */
i915_perf_destroy_locked(struct i915_perf_stream * stream)3334 static void i915_perf_destroy_locked(struct i915_perf_stream *stream)
3335 {
3336 if (stream->enabled)
3337 i915_perf_disable_locked(stream);
3338
3339 if (stream->ops->destroy)
3340 stream->ops->destroy(stream);
3341
3342 if (stream->ctx)
3343 i915_gem_context_put(stream->ctx);
3344
3345 kfree(stream);
3346 }
3347
3348 /**
3349 * i915_perf_release - handles userspace close() of a stream file
3350 * @inode: anonymous inode associated with file
3351 * @file: An i915 perf stream file
3352 *
3353 * Cleans up any resources associated with an open i915 perf stream file.
3354 *
3355 * NB: close() can't really fail from the userspace point of view.
3356 *
3357 * Returns: zero on success or a negative error code.
3358 */
i915_perf_release(struct inode * inode,struct file * file)3359 static int i915_perf_release(struct inode *inode, struct file *file)
3360 {
3361 struct i915_perf_stream *stream = file->private_data;
3362 struct i915_perf *perf = stream->perf;
3363
3364 mutex_lock(&perf->lock);
3365 i915_perf_destroy_locked(stream);
3366 mutex_unlock(&perf->lock);
3367
3368 /* Release the reference the perf stream kept on the driver. */
3369 drm_dev_put(&perf->i915->drm);
3370
3371 return 0;
3372 }
3373
3374
3375 static const struct file_operations fops = {
3376 .owner = THIS_MODULE,
3377 .llseek = no_llseek,
3378 .release = i915_perf_release,
3379 .poll = i915_perf_poll,
3380 .read = i915_perf_read,
3381 .unlocked_ioctl = i915_perf_ioctl,
3382 /* Our ioctl have no arguments, so it's safe to use the same function
3383 * to handle 32bits compatibility.
3384 */
3385 .compat_ioctl = i915_perf_ioctl,
3386 };
3387
3388
3389 /**
3390 * i915_perf_open_ioctl_locked - DRM ioctl() for userspace to open a stream FD
3391 * @perf: i915 perf instance
3392 * @param: The open parameters passed to 'DRM_I915_PERF_OPEN`
3393 * @props: individually validated u64 property value pairs
3394 * @file: drm file
3395 *
3396 * See i915_perf_ioctl_open() for interface details.
3397 *
3398 * Implements further stream config validation and stream initialization on
3399 * behalf of i915_perf_open_ioctl() with the &perf->lock mutex
3400 * taken to serialize with any non-file-operation driver hooks.
3401 *
3402 * Note: at this point the @props have only been validated in isolation and
3403 * it's still necessary to validate that the combination of properties makes
3404 * sense.
3405 *
3406 * In the case where userspace is interested in OA unit metrics then further
3407 * config validation and stream initialization details will be handled by
3408 * i915_oa_stream_init(). The code here should only validate config state that
3409 * will be relevant to all stream types / backends.
3410 *
3411 * Returns: zero on success or a negative error code.
3412 */
3413 static int
i915_perf_open_ioctl_locked(struct i915_perf * perf,struct drm_i915_perf_open_param * param,struct perf_open_properties * props,struct drm_file * file)3414 i915_perf_open_ioctl_locked(struct i915_perf *perf,
3415 struct drm_i915_perf_open_param *param,
3416 struct perf_open_properties *props,
3417 struct drm_file *file)
3418 {
3419 struct i915_gem_context *specific_ctx = NULL;
3420 struct i915_perf_stream *stream = NULL;
3421 unsigned long f_flags = 0;
3422 bool privileged_op = true;
3423 int stream_fd;
3424 int ret;
3425
3426 if (props->single_context) {
3427 u32 ctx_handle = props->ctx_handle;
3428 struct drm_i915_file_private *file_priv = file->driver_priv;
3429
3430 specific_ctx = i915_gem_context_lookup(file_priv, ctx_handle);
3431 if (IS_ERR(specific_ctx)) {
3432 DRM_DEBUG("Failed to look up context with ID %u for opening perf stream\n",
3433 ctx_handle);
3434 ret = PTR_ERR(specific_ctx);
3435 goto err;
3436 }
3437 }
3438
3439 /*
3440 * On Haswell the OA unit supports clock gating off for a specific
3441 * context and in this mode there's no visibility of metrics for the
3442 * rest of the system, which we consider acceptable for a
3443 * non-privileged client.
3444 *
3445 * For Gen8->11 the OA unit no longer supports clock gating off for a
3446 * specific context and the kernel can't securely stop the counters
3447 * from updating as system-wide / global values. Even though we can
3448 * filter reports based on the included context ID we can't block
3449 * clients from seeing the raw / global counter values via
3450 * MI_REPORT_PERF_COUNT commands and so consider it a privileged op to
3451 * enable the OA unit by default.
3452 *
3453 * For Gen12+ we gain a new OAR unit that only monitors the RCS on a
3454 * per context basis. So we can relax requirements there if the user
3455 * doesn't request global stream access (i.e. query based sampling
3456 * using MI_RECORD_PERF_COUNT.
3457 */
3458 if (IS_HASWELL(perf->i915) && specific_ctx)
3459 privileged_op = false;
3460 else if (GRAPHICS_VER(perf->i915) == 12 && specific_ctx &&
3461 (props->sample_flags & SAMPLE_OA_REPORT) == 0)
3462 privileged_op = false;
3463
3464 if (props->hold_preemption) {
3465 if (!props->single_context) {
3466 DRM_DEBUG("preemption disable with no context\n");
3467 ret = -EINVAL;
3468 goto err;
3469 }
3470 privileged_op = true;
3471 }
3472
3473 /*
3474 * Asking for SSEU configuration is a priviliged operation.
3475 */
3476 if (props->has_sseu)
3477 privileged_op = true;
3478 else
3479 get_default_sseu_config(&props->sseu, props->engine);
3480
3481 /* Similar to perf's kernel.perf_paranoid_cpu sysctl option
3482 * we check a dev.i915.perf_stream_paranoid sysctl option
3483 * to determine if it's ok to access system wide OA counters
3484 * without CAP_PERFMON or CAP_SYS_ADMIN privileges.
3485 */
3486 if (privileged_op &&
3487 i915_perf_stream_paranoid && !perfmon_capable()) {
3488 DRM_DEBUG("Insufficient privileges to open i915 perf stream\n");
3489 ret = -EACCES;
3490 goto err_ctx;
3491 }
3492
3493 stream = kzalloc(sizeof(*stream), GFP_KERNEL);
3494 if (!stream) {
3495 ret = -ENOMEM;
3496 goto err_ctx;
3497 }
3498
3499 stream->perf = perf;
3500 stream->ctx = specific_ctx;
3501 stream->poll_oa_period = props->poll_oa_period;
3502
3503 ret = i915_oa_stream_init(stream, param, props);
3504 if (ret)
3505 goto err_alloc;
3506
3507 /* we avoid simply assigning stream->sample_flags = props->sample_flags
3508 * to have _stream_init check the combination of sample flags more
3509 * thoroughly, but still this is the expected result at this point.
3510 */
3511 if (WARN_ON(stream->sample_flags != props->sample_flags)) {
3512 ret = -ENODEV;
3513 goto err_flags;
3514 }
3515
3516 if (param->flags & I915_PERF_FLAG_FD_CLOEXEC)
3517 f_flags |= O_CLOEXEC;
3518 if (param->flags & I915_PERF_FLAG_FD_NONBLOCK)
3519 f_flags |= O_NONBLOCK;
3520
3521 stream_fd = anon_inode_getfd("[i915_perf]", &fops, stream, f_flags);
3522 if (stream_fd < 0) {
3523 ret = stream_fd;
3524 goto err_flags;
3525 }
3526
3527 if (!(param->flags & I915_PERF_FLAG_DISABLED))
3528 i915_perf_enable_locked(stream);
3529
3530 /* Take a reference on the driver that will be kept with stream_fd
3531 * until its release.
3532 */
3533 drm_dev_get(&perf->i915->drm);
3534
3535 return stream_fd;
3536
3537 err_flags:
3538 if (stream->ops->destroy)
3539 stream->ops->destroy(stream);
3540 err_alloc:
3541 kfree(stream);
3542 err_ctx:
3543 if (specific_ctx)
3544 i915_gem_context_put(specific_ctx);
3545 err:
3546 return ret;
3547 }
3548
oa_exponent_to_ns(struct i915_perf * perf,int exponent)3549 static u64 oa_exponent_to_ns(struct i915_perf *perf, int exponent)
3550 {
3551 return intel_gt_clock_interval_to_ns(to_gt(perf->i915),
3552 2ULL << exponent);
3553 }
3554
3555 static __always_inline bool
oa_format_valid(struct i915_perf * perf,enum drm_i915_oa_format format)3556 oa_format_valid(struct i915_perf *perf, enum drm_i915_oa_format format)
3557 {
3558 return test_bit(format, perf->format_mask);
3559 }
3560
3561 static __always_inline void
oa_format_add(struct i915_perf * perf,enum drm_i915_oa_format format)3562 oa_format_add(struct i915_perf *perf, enum drm_i915_oa_format format)
3563 {
3564 __set_bit(format, perf->format_mask);
3565 }
3566
3567 /**
3568 * read_properties_unlocked - validate + copy userspace stream open properties
3569 * @perf: i915 perf instance
3570 * @uprops: The array of u64 key value pairs given by userspace
3571 * @n_props: The number of key value pairs expected in @uprops
3572 * @props: The stream configuration built up while validating properties
3573 *
3574 * Note this function only validates properties in isolation it doesn't
3575 * validate that the combination of properties makes sense or that all
3576 * properties necessary for a particular kind of stream have been set.
3577 *
3578 * Note that there currently aren't any ordering requirements for properties so
3579 * we shouldn't validate or assume anything about ordering here. This doesn't
3580 * rule out defining new properties with ordering requirements in the future.
3581 */
read_properties_unlocked(struct i915_perf * perf,u64 __user * uprops,u32 n_props,struct perf_open_properties * props)3582 static int read_properties_unlocked(struct i915_perf *perf,
3583 u64 __user *uprops,
3584 u32 n_props,
3585 struct perf_open_properties *props)
3586 {
3587 u64 __user *uprop = uprops;
3588 u32 i;
3589 int ret;
3590
3591 memset(props, 0, sizeof(struct perf_open_properties));
3592 props->poll_oa_period = DEFAULT_POLL_PERIOD_NS;
3593
3594 if (!n_props) {
3595 DRM_DEBUG("No i915 perf properties given\n");
3596 return -EINVAL;
3597 }
3598
3599 /* At the moment we only support using i915-perf on the RCS. */
3600 props->engine = intel_engine_lookup_user(perf->i915,
3601 I915_ENGINE_CLASS_RENDER,
3602 0);
3603 if (!props->engine) {
3604 DRM_DEBUG("No RENDER-capable engines\n");
3605 return -EINVAL;
3606 }
3607
3608 /* Considering that ID = 0 is reserved and assuming that we don't
3609 * (currently) expect any configurations to ever specify duplicate
3610 * values for a particular property ID then the last _PROP_MAX value is
3611 * one greater than the maximum number of properties we expect to get
3612 * from userspace.
3613 */
3614 if (n_props >= DRM_I915_PERF_PROP_MAX) {
3615 DRM_DEBUG("More i915 perf properties specified than exist\n");
3616 return -EINVAL;
3617 }
3618
3619 for (i = 0; i < n_props; i++) {
3620 u64 oa_period, oa_freq_hz;
3621 u64 id, value;
3622
3623 ret = get_user(id, uprop);
3624 if (ret)
3625 return ret;
3626
3627 ret = get_user(value, uprop + 1);
3628 if (ret)
3629 return ret;
3630
3631 if (id == 0 || id >= DRM_I915_PERF_PROP_MAX) {
3632 DRM_DEBUG("Unknown i915 perf property ID\n");
3633 return -EINVAL;
3634 }
3635
3636 switch ((enum drm_i915_perf_property_id)id) {
3637 case DRM_I915_PERF_PROP_CTX_HANDLE:
3638 props->single_context = 1;
3639 props->ctx_handle = value;
3640 break;
3641 case DRM_I915_PERF_PROP_SAMPLE_OA:
3642 if (value)
3643 props->sample_flags |= SAMPLE_OA_REPORT;
3644 break;
3645 case DRM_I915_PERF_PROP_OA_METRICS_SET:
3646 if (value == 0) {
3647 DRM_DEBUG("Unknown OA metric set ID\n");
3648 return -EINVAL;
3649 }
3650 props->metrics_set = value;
3651 break;
3652 case DRM_I915_PERF_PROP_OA_FORMAT:
3653 if (value == 0 || value >= I915_OA_FORMAT_MAX) {
3654 DRM_DEBUG("Out-of-range OA report format %llu\n",
3655 value);
3656 return -EINVAL;
3657 }
3658 if (!oa_format_valid(perf, value)) {
3659 DRM_DEBUG("Unsupported OA report format %llu\n",
3660 value);
3661 return -EINVAL;
3662 }
3663 props->oa_format = value;
3664 break;
3665 case DRM_I915_PERF_PROP_OA_EXPONENT:
3666 if (value > OA_EXPONENT_MAX) {
3667 DRM_DEBUG("OA timer exponent too high (> %u)\n",
3668 OA_EXPONENT_MAX);
3669 return -EINVAL;
3670 }
3671
3672 /* Theoretically we can program the OA unit to sample
3673 * e.g. every 160ns for HSW, 167ns for BDW/SKL or 104ns
3674 * for BXT. We don't allow such high sampling
3675 * frequencies by default unless root.
3676 */
3677
3678 BUILD_BUG_ON(sizeof(oa_period) != 8);
3679 oa_period = oa_exponent_to_ns(perf, value);
3680
3681 /* This check is primarily to ensure that oa_period <=
3682 * UINT32_MAX (before passing to do_div which only
3683 * accepts a u32 denominator), but we can also skip
3684 * checking anything < 1Hz which implicitly can't be
3685 * limited via an integer oa_max_sample_rate.
3686 */
3687 if (oa_period <= NSEC_PER_SEC) {
3688 u64 tmp = NSEC_PER_SEC;
3689 do_div(tmp, oa_period);
3690 oa_freq_hz = tmp;
3691 } else
3692 oa_freq_hz = 0;
3693
3694 if (oa_freq_hz > i915_oa_max_sample_rate && !perfmon_capable()) {
3695 DRM_DEBUG("OA exponent would exceed the max sampling frequency (sysctl dev.i915.oa_max_sample_rate) %uHz without CAP_PERFMON or CAP_SYS_ADMIN privileges\n",
3696 i915_oa_max_sample_rate);
3697 return -EACCES;
3698 }
3699
3700 props->oa_periodic = true;
3701 props->oa_period_exponent = value;
3702 break;
3703 case DRM_I915_PERF_PROP_HOLD_PREEMPTION:
3704 props->hold_preemption = !!value;
3705 break;
3706 case DRM_I915_PERF_PROP_GLOBAL_SSEU: {
3707 struct drm_i915_gem_context_param_sseu user_sseu;
3708
3709 if (copy_from_user(&user_sseu,
3710 u64_to_user_ptr(value),
3711 sizeof(user_sseu))) {
3712 DRM_DEBUG("Unable to copy global sseu parameter\n");
3713 return -EFAULT;
3714 }
3715
3716 ret = get_sseu_config(&props->sseu, props->engine, &user_sseu);
3717 if (ret) {
3718 DRM_DEBUG("Invalid SSEU configuration\n");
3719 return ret;
3720 }
3721 props->has_sseu = true;
3722 break;
3723 }
3724 case DRM_I915_PERF_PROP_POLL_OA_PERIOD:
3725 if (value < 100000 /* 100us */) {
3726 DRM_DEBUG("OA availability timer too small (%lluns < 100us)\n",
3727 value);
3728 return -EINVAL;
3729 }
3730 props->poll_oa_period = value;
3731 break;
3732 case DRM_I915_PERF_PROP_MAX:
3733 MISSING_CASE(id);
3734 return -EINVAL;
3735 }
3736
3737 uprop += 2;
3738 }
3739
3740 return 0;
3741 }
3742
3743 /**
3744 * i915_perf_open_ioctl - DRM ioctl() for userspace to open a stream FD
3745 * @dev: drm device
3746 * @data: ioctl data copied from userspace (unvalidated)
3747 * @file: drm file
3748 *
3749 * Validates the stream open parameters given by userspace including flags
3750 * and an array of u64 key, value pair properties.
3751 *
3752 * Very little is assumed up front about the nature of the stream being
3753 * opened (for instance we don't assume it's for periodic OA unit metrics). An
3754 * i915-perf stream is expected to be a suitable interface for other forms of
3755 * buffered data written by the GPU besides periodic OA metrics.
3756 *
3757 * Note we copy the properties from userspace outside of the i915 perf
3758 * mutex to avoid an awkward lockdep with mmap_lock.
3759 *
3760 * Most of the implementation details are handled by
3761 * i915_perf_open_ioctl_locked() after taking the &perf->lock
3762 * mutex for serializing with any non-file-operation driver hooks.
3763 *
3764 * Return: A newly opened i915 Perf stream file descriptor or negative
3765 * error code on failure.
3766 */
i915_perf_open_ioctl(struct drm_device * dev,void * data,struct drm_file * file)3767 int i915_perf_open_ioctl(struct drm_device *dev, void *data,
3768 struct drm_file *file)
3769 {
3770 struct i915_perf *perf = &to_i915(dev)->perf;
3771 struct drm_i915_perf_open_param *param = data;
3772 struct perf_open_properties props;
3773 u32 known_open_flags;
3774 int ret;
3775
3776 if (!perf->i915) {
3777 DRM_DEBUG("i915 perf interface not available for this system\n");
3778 return -ENOTSUPP;
3779 }
3780
3781 known_open_flags = I915_PERF_FLAG_FD_CLOEXEC |
3782 I915_PERF_FLAG_FD_NONBLOCK |
3783 I915_PERF_FLAG_DISABLED;
3784 if (param->flags & ~known_open_flags) {
3785 DRM_DEBUG("Unknown drm_i915_perf_open_param flag\n");
3786 return -EINVAL;
3787 }
3788
3789 ret = read_properties_unlocked(perf,
3790 u64_to_user_ptr(param->properties_ptr),
3791 param->num_properties,
3792 &props);
3793 if (ret)
3794 return ret;
3795
3796 mutex_lock(&perf->lock);
3797 ret = i915_perf_open_ioctl_locked(perf, param, &props, file);
3798 mutex_unlock(&perf->lock);
3799
3800 return ret;
3801 }
3802
3803 /**
3804 * i915_perf_register - exposes i915-perf to userspace
3805 * @i915: i915 device instance
3806 *
3807 * In particular OA metric sets are advertised under a sysfs metrics/
3808 * directory allowing userspace to enumerate valid IDs that can be
3809 * used to open an i915-perf stream.
3810 */
i915_perf_register(struct drm_i915_private * i915)3811 void i915_perf_register(struct drm_i915_private *i915)
3812 {
3813 struct i915_perf *perf = &i915->perf;
3814
3815 if (!perf->i915)
3816 return;
3817
3818 /* To be sure we're synchronized with an attempted
3819 * i915_perf_open_ioctl(); considering that we register after
3820 * being exposed to userspace.
3821 */
3822 mutex_lock(&perf->lock);
3823
3824 perf->metrics_kobj =
3825 kobject_create_and_add("metrics",
3826 &i915->drm.primary->kdev->kobj);
3827
3828 mutex_unlock(&perf->lock);
3829 }
3830
3831 /**
3832 * i915_perf_unregister - hide i915-perf from userspace
3833 * @i915: i915 device instance
3834 *
3835 * i915-perf state cleanup is split up into an 'unregister' and
3836 * 'deinit' phase where the interface is first hidden from
3837 * userspace by i915_perf_unregister() before cleaning up
3838 * remaining state in i915_perf_fini().
3839 */
i915_perf_unregister(struct drm_i915_private * i915)3840 void i915_perf_unregister(struct drm_i915_private *i915)
3841 {
3842 struct i915_perf *perf = &i915->perf;
3843
3844 if (!perf->metrics_kobj)
3845 return;
3846
3847 kobject_put(perf->metrics_kobj);
3848 perf->metrics_kobj = NULL;
3849 }
3850
gen8_is_valid_flex_addr(struct i915_perf * perf,u32 addr)3851 static bool gen8_is_valid_flex_addr(struct i915_perf *perf, u32 addr)
3852 {
3853 static const i915_reg_t flex_eu_regs[] = {
3854 EU_PERF_CNTL0,
3855 EU_PERF_CNTL1,
3856 EU_PERF_CNTL2,
3857 EU_PERF_CNTL3,
3858 EU_PERF_CNTL4,
3859 EU_PERF_CNTL5,
3860 EU_PERF_CNTL6,
3861 };
3862 int i;
3863
3864 for (i = 0; i < ARRAY_SIZE(flex_eu_regs); i++) {
3865 if (i915_mmio_reg_offset(flex_eu_regs[i]) == addr)
3866 return true;
3867 }
3868 return false;
3869 }
3870
reg_in_range_table(u32 addr,const struct i915_range * table)3871 static bool reg_in_range_table(u32 addr, const struct i915_range *table)
3872 {
3873 while (table->start || table->end) {
3874 if (addr >= table->start && addr <= table->end)
3875 return true;
3876
3877 table++;
3878 }
3879
3880 return false;
3881 }
3882
3883 #define REG_EQUAL(addr, mmio) \
3884 ((addr) == i915_mmio_reg_offset(mmio))
3885
3886 static const struct i915_range gen7_oa_b_counters[] = {
3887 { .start = 0x2710, .end = 0x272c }, /* OASTARTTRIG[1-8] */
3888 { .start = 0x2740, .end = 0x275c }, /* OAREPORTTRIG[1-8] */
3889 { .start = 0x2770, .end = 0x27ac }, /* OACEC[0-7][0-1] */
3890 {}
3891 };
3892
3893 static const struct i915_range gen12_oa_b_counters[] = {
3894 { .start = 0x2b2c, .end = 0x2b2c }, /* GEN12_OAG_OA_PESS */
3895 { .start = 0xd900, .end = 0xd91c }, /* GEN12_OAG_OASTARTTRIG[1-8] */
3896 { .start = 0xd920, .end = 0xd93c }, /* GEN12_OAG_OAREPORTTRIG1[1-8] */
3897 { .start = 0xd940, .end = 0xd97c }, /* GEN12_OAG_CEC[0-7][0-1] */
3898 { .start = 0xdc00, .end = 0xdc3c }, /* GEN12_OAG_SCEC[0-7][0-1] */
3899 { .start = 0xdc40, .end = 0xdc40 }, /* GEN12_OAG_SPCTR_CNF */
3900 { .start = 0xdc44, .end = 0xdc44 }, /* GEN12_OAA_DBG_REG */
3901 {}
3902 };
3903
3904 static const struct i915_range gen7_oa_mux_regs[] = {
3905 { .start = 0x91b8, .end = 0x91cc }, /* OA_PERFCNT[1-2], OA_PERFMATRIX */
3906 { .start = 0x9800, .end = 0x9888 }, /* MICRO_BP0_0 - NOA_WRITE */
3907 { .start = 0xe180, .end = 0xe180 }, /* HALF_SLICE_CHICKEN2 */
3908 {}
3909 };
3910
3911 static const struct i915_range hsw_oa_mux_regs[] = {
3912 { .start = 0x09e80, .end = 0x09ea4 }, /* HSW_MBVID2_NOA[0-9] */
3913 { .start = 0x09ec0, .end = 0x09ec0 }, /* HSW_MBVID2_MISR0 */
3914 { .start = 0x25100, .end = 0x2ff90 },
3915 {}
3916 };
3917
3918 static const struct i915_range chv_oa_mux_regs[] = {
3919 { .start = 0x182300, .end = 0x1823a4 },
3920 {}
3921 };
3922
3923 static const struct i915_range gen8_oa_mux_regs[] = {
3924 { .start = 0x0d00, .end = 0x0d2c }, /* RPM_CONFIG[0-1], NOA_CONFIG[0-8] */
3925 { .start = 0x20cc, .end = 0x20cc }, /* WAIT_FOR_RC6_EXIT */
3926 {}
3927 };
3928
3929 static const struct i915_range gen11_oa_mux_regs[] = {
3930 { .start = 0x91c8, .end = 0x91dc }, /* OA_PERFCNT[3-4] */
3931 {}
3932 };
3933
3934 static const struct i915_range gen12_oa_mux_regs[] = {
3935 { .start = 0x0d00, .end = 0x0d04 }, /* RPM_CONFIG[0-1] */
3936 { .start = 0x0d0c, .end = 0x0d2c }, /* NOA_CONFIG[0-8] */
3937 { .start = 0x9840, .end = 0x9840 }, /* GDT_CHICKEN_BITS */
3938 { .start = 0x9884, .end = 0x9888 }, /* NOA_WRITE */
3939 { .start = 0x20cc, .end = 0x20cc }, /* WAIT_FOR_RC6_EXIT */
3940 {}
3941 };
3942
gen7_is_valid_b_counter_addr(struct i915_perf * perf,u32 addr)3943 static bool gen7_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr)
3944 {
3945 return reg_in_range_table(addr, gen7_oa_b_counters);
3946 }
3947
gen8_is_valid_mux_addr(struct i915_perf * perf,u32 addr)3948 static bool gen8_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
3949 {
3950 return reg_in_range_table(addr, gen7_oa_mux_regs) ||
3951 reg_in_range_table(addr, gen8_oa_mux_regs);
3952 }
3953
gen11_is_valid_mux_addr(struct i915_perf * perf,u32 addr)3954 static bool gen11_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
3955 {
3956 return reg_in_range_table(addr, gen7_oa_mux_regs) ||
3957 reg_in_range_table(addr, gen8_oa_mux_regs) ||
3958 reg_in_range_table(addr, gen11_oa_mux_regs);
3959 }
3960
hsw_is_valid_mux_addr(struct i915_perf * perf,u32 addr)3961 static bool hsw_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
3962 {
3963 return reg_in_range_table(addr, gen7_oa_mux_regs) ||
3964 reg_in_range_table(addr, hsw_oa_mux_regs);
3965 }
3966
chv_is_valid_mux_addr(struct i915_perf * perf,u32 addr)3967 static bool chv_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
3968 {
3969 return reg_in_range_table(addr, gen7_oa_mux_regs) ||
3970 reg_in_range_table(addr, chv_oa_mux_regs);
3971 }
3972
gen12_is_valid_b_counter_addr(struct i915_perf * perf,u32 addr)3973 static bool gen12_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr)
3974 {
3975 return reg_in_range_table(addr, gen12_oa_b_counters);
3976 }
3977
gen12_is_valid_mux_addr(struct i915_perf * perf,u32 addr)3978 static bool gen12_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
3979 {
3980 return reg_in_range_table(addr, gen12_oa_mux_regs);
3981 }
3982
mask_reg_value(u32 reg,u32 val)3983 static u32 mask_reg_value(u32 reg, u32 val)
3984 {
3985 /* HALF_SLICE_CHICKEN2 is programmed with a the
3986 * WaDisableSTUnitPowerOptimization workaround. Make sure the value
3987 * programmed by userspace doesn't change this.
3988 */
3989 if (REG_EQUAL(reg, HALF_SLICE_CHICKEN2))
3990 val = val & ~_MASKED_BIT_ENABLE(GEN8_ST_PO_DISABLE);
3991
3992 /* WAIT_FOR_RC6_EXIT has only one bit fullfilling the function
3993 * indicated by its name and a bunch of selection fields used by OA
3994 * configs.
3995 */
3996 if (REG_EQUAL(reg, WAIT_FOR_RC6_EXIT))
3997 val = val & ~_MASKED_BIT_ENABLE(HSW_WAIT_FOR_RC6_EXIT_ENABLE);
3998
3999 return val;
4000 }
4001
alloc_oa_regs(struct i915_perf * perf,bool (* is_valid)(struct i915_perf * perf,u32 addr),u32 __user * regs,u32 n_regs)4002 static struct i915_oa_reg *alloc_oa_regs(struct i915_perf *perf,
4003 bool (*is_valid)(struct i915_perf *perf, u32 addr),
4004 u32 __user *regs,
4005 u32 n_regs)
4006 {
4007 struct i915_oa_reg *oa_regs;
4008 int err;
4009 u32 i;
4010
4011 if (!n_regs)
4012 return NULL;
4013
4014 /* No is_valid function means we're not allowing any register to be programmed. */
4015 GEM_BUG_ON(!is_valid);
4016 if (!is_valid)
4017 return ERR_PTR(-EINVAL);
4018
4019 oa_regs = kmalloc_array(n_regs, sizeof(*oa_regs), GFP_KERNEL);
4020 if (!oa_regs)
4021 return ERR_PTR(-ENOMEM);
4022
4023 for (i = 0; i < n_regs; i++) {
4024 u32 addr, value;
4025
4026 err = get_user(addr, regs);
4027 if (err)
4028 goto addr_err;
4029
4030 if (!is_valid(perf, addr)) {
4031 DRM_DEBUG("Invalid oa_reg address: %X\n", addr);
4032 err = -EINVAL;
4033 goto addr_err;
4034 }
4035
4036 err = get_user(value, regs + 1);
4037 if (err)
4038 goto addr_err;
4039
4040 oa_regs[i].addr = _MMIO(addr);
4041 oa_regs[i].value = mask_reg_value(addr, value);
4042
4043 regs += 2;
4044 }
4045
4046 return oa_regs;
4047
4048 addr_err:
4049 kfree(oa_regs);
4050 return ERR_PTR(err);
4051 }
4052
show_dynamic_id(struct kobject * kobj,struct kobj_attribute * attr,char * buf)4053 static ssize_t show_dynamic_id(struct kobject *kobj,
4054 struct kobj_attribute *attr,
4055 char *buf)
4056 {
4057 struct i915_oa_config *oa_config =
4058 container_of(attr, typeof(*oa_config), sysfs_metric_id);
4059
4060 return sprintf(buf, "%d\n", oa_config->id);
4061 }
4062
create_dynamic_oa_sysfs_entry(struct i915_perf * perf,struct i915_oa_config * oa_config)4063 static int create_dynamic_oa_sysfs_entry(struct i915_perf *perf,
4064 struct i915_oa_config *oa_config)
4065 {
4066 sysfs_attr_init(&oa_config->sysfs_metric_id.attr);
4067 oa_config->sysfs_metric_id.attr.name = "id";
4068 oa_config->sysfs_metric_id.attr.mode = S_IRUGO;
4069 oa_config->sysfs_metric_id.show = show_dynamic_id;
4070 oa_config->sysfs_metric_id.store = NULL;
4071
4072 oa_config->attrs[0] = &oa_config->sysfs_metric_id.attr;
4073 oa_config->attrs[1] = NULL;
4074
4075 oa_config->sysfs_metric.name = oa_config->uuid;
4076 oa_config->sysfs_metric.attrs = oa_config->attrs;
4077
4078 return sysfs_create_group(perf->metrics_kobj,
4079 &oa_config->sysfs_metric);
4080 }
4081
4082 /**
4083 * i915_perf_add_config_ioctl - DRM ioctl() for userspace to add a new OA config
4084 * @dev: drm device
4085 * @data: ioctl data (pointer to struct drm_i915_perf_oa_config) copied from
4086 * userspace (unvalidated)
4087 * @file: drm file
4088 *
4089 * Validates the submitted OA register to be saved into a new OA config that
4090 * can then be used for programming the OA unit and its NOA network.
4091 *
4092 * Returns: A new allocated config number to be used with the perf open ioctl
4093 * or a negative error code on failure.
4094 */
i915_perf_add_config_ioctl(struct drm_device * dev,void * data,struct drm_file * file)4095 int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
4096 struct drm_file *file)
4097 {
4098 struct i915_perf *perf = &to_i915(dev)->perf;
4099 struct drm_i915_perf_oa_config *args = data;
4100 struct i915_oa_config *oa_config, *tmp;
4101 struct i915_oa_reg *regs;
4102 int err, id;
4103
4104 if (!perf->i915) {
4105 DRM_DEBUG("i915 perf interface not available for this system\n");
4106 return -ENOTSUPP;
4107 }
4108
4109 if (!perf->metrics_kobj) {
4110 DRM_DEBUG("OA metrics weren't advertised via sysfs\n");
4111 return -EINVAL;
4112 }
4113
4114 if (i915_perf_stream_paranoid && !perfmon_capable()) {
4115 DRM_DEBUG("Insufficient privileges to add i915 OA config\n");
4116 return -EACCES;
4117 }
4118
4119 if ((!args->mux_regs_ptr || !args->n_mux_regs) &&
4120 (!args->boolean_regs_ptr || !args->n_boolean_regs) &&
4121 (!args->flex_regs_ptr || !args->n_flex_regs)) {
4122 DRM_DEBUG("No OA registers given\n");
4123 return -EINVAL;
4124 }
4125
4126 oa_config = kzalloc(sizeof(*oa_config), GFP_KERNEL);
4127 if (!oa_config) {
4128 DRM_DEBUG("Failed to allocate memory for the OA config\n");
4129 return -ENOMEM;
4130 }
4131
4132 oa_config->perf = perf;
4133 kref_init(&oa_config->ref);
4134
4135 if (!uuid_is_valid(args->uuid)) {
4136 DRM_DEBUG("Invalid uuid format for OA config\n");
4137 err = -EINVAL;
4138 goto reg_err;
4139 }
4140
4141 /* Last character in oa_config->uuid will be 0 because oa_config is
4142 * kzalloc.
4143 */
4144 memcpy(oa_config->uuid, args->uuid, sizeof(args->uuid));
4145
4146 oa_config->mux_regs_len = args->n_mux_regs;
4147 regs = alloc_oa_regs(perf,
4148 perf->ops.is_valid_mux_reg,
4149 u64_to_user_ptr(args->mux_regs_ptr),
4150 args->n_mux_regs);
4151
4152 if (IS_ERR(regs)) {
4153 DRM_DEBUG("Failed to create OA config for mux_regs\n");
4154 err = PTR_ERR(regs);
4155 goto reg_err;
4156 }
4157 oa_config->mux_regs = regs;
4158
4159 oa_config->b_counter_regs_len = args->n_boolean_regs;
4160 regs = alloc_oa_regs(perf,
4161 perf->ops.is_valid_b_counter_reg,
4162 u64_to_user_ptr(args->boolean_regs_ptr),
4163 args->n_boolean_regs);
4164
4165 if (IS_ERR(regs)) {
4166 DRM_DEBUG("Failed to create OA config for b_counter_regs\n");
4167 err = PTR_ERR(regs);
4168 goto reg_err;
4169 }
4170 oa_config->b_counter_regs = regs;
4171
4172 if (GRAPHICS_VER(perf->i915) < 8) {
4173 if (args->n_flex_regs != 0) {
4174 err = -EINVAL;
4175 goto reg_err;
4176 }
4177 } else {
4178 oa_config->flex_regs_len = args->n_flex_regs;
4179 regs = alloc_oa_regs(perf,
4180 perf->ops.is_valid_flex_reg,
4181 u64_to_user_ptr(args->flex_regs_ptr),
4182 args->n_flex_regs);
4183
4184 if (IS_ERR(regs)) {
4185 DRM_DEBUG("Failed to create OA config for flex_regs\n");
4186 err = PTR_ERR(regs);
4187 goto reg_err;
4188 }
4189 oa_config->flex_regs = regs;
4190 }
4191
4192 err = mutex_lock_interruptible(&perf->metrics_lock);
4193 if (err)
4194 goto reg_err;
4195
4196 /* We shouldn't have too many configs, so this iteration shouldn't be
4197 * too costly.
4198 */
4199 idr_for_each_entry(&perf->metrics_idr, tmp, id) {
4200 if (!strcmp(tmp->uuid, oa_config->uuid)) {
4201 DRM_DEBUG("OA config already exists with this uuid\n");
4202 err = -EADDRINUSE;
4203 goto sysfs_err;
4204 }
4205 }
4206
4207 err = create_dynamic_oa_sysfs_entry(perf, oa_config);
4208 if (err) {
4209 DRM_DEBUG("Failed to create sysfs entry for OA config\n");
4210 goto sysfs_err;
4211 }
4212
4213 /* Config id 0 is invalid, id 1 for kernel stored test config. */
4214 oa_config->id = idr_alloc(&perf->metrics_idr,
4215 oa_config, 2,
4216 0, GFP_KERNEL);
4217 if (oa_config->id < 0) {
4218 DRM_DEBUG("Failed to create sysfs entry for OA config\n");
4219 err = oa_config->id;
4220 goto sysfs_err;
4221 }
4222
4223 mutex_unlock(&perf->metrics_lock);
4224
4225 DRM_DEBUG("Added config %s id=%i\n", oa_config->uuid, oa_config->id);
4226
4227 return oa_config->id;
4228
4229 sysfs_err:
4230 mutex_unlock(&perf->metrics_lock);
4231 reg_err:
4232 i915_oa_config_put(oa_config);
4233 DRM_DEBUG("Failed to add new OA config\n");
4234 return err;
4235 }
4236
4237 /**
4238 * i915_perf_remove_config_ioctl - DRM ioctl() for userspace to remove an OA config
4239 * @dev: drm device
4240 * @data: ioctl data (pointer to u64 integer) copied from userspace
4241 * @file: drm file
4242 *
4243 * Configs can be removed while being used, the will stop appearing in sysfs
4244 * and their content will be freed when the stream using the config is closed.
4245 *
4246 * Returns: 0 on success or a negative error code on failure.
4247 */
i915_perf_remove_config_ioctl(struct drm_device * dev,void * data,struct drm_file * file)4248 int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
4249 struct drm_file *file)
4250 {
4251 struct i915_perf *perf = &to_i915(dev)->perf;
4252 u64 *arg = data;
4253 struct i915_oa_config *oa_config;
4254 int ret;
4255
4256 if (!perf->i915) {
4257 DRM_DEBUG("i915 perf interface not available for this system\n");
4258 return -ENOTSUPP;
4259 }
4260
4261 if (i915_perf_stream_paranoid && !perfmon_capable()) {
4262 DRM_DEBUG("Insufficient privileges to remove i915 OA config\n");
4263 return -EACCES;
4264 }
4265
4266 ret = mutex_lock_interruptible(&perf->metrics_lock);
4267 if (ret)
4268 return ret;
4269
4270 oa_config = idr_find(&perf->metrics_idr, *arg);
4271 if (!oa_config) {
4272 DRM_DEBUG("Failed to remove unknown OA config\n");
4273 ret = -ENOENT;
4274 goto err_unlock;
4275 }
4276
4277 GEM_BUG_ON(*arg != oa_config->id);
4278
4279 sysfs_remove_group(perf->metrics_kobj, &oa_config->sysfs_metric);
4280
4281 idr_remove(&perf->metrics_idr, *arg);
4282
4283 mutex_unlock(&perf->metrics_lock);
4284
4285 DRM_DEBUG("Removed config %s id=%i\n", oa_config->uuid, oa_config->id);
4286
4287 i915_oa_config_put(oa_config);
4288
4289 return 0;
4290
4291 err_unlock:
4292 mutex_unlock(&perf->metrics_lock);
4293 return ret;
4294 }
4295
4296 static struct ctl_table oa_table[] = {
4297 {
4298 .procname = "perf_stream_paranoid",
4299 .data = &i915_perf_stream_paranoid,
4300 .maxlen = sizeof(i915_perf_stream_paranoid),
4301 .mode = 0644,
4302 .proc_handler = proc_dointvec_minmax,
4303 .extra1 = SYSCTL_ZERO,
4304 .extra2 = SYSCTL_ONE,
4305 },
4306 {
4307 .procname = "oa_max_sample_rate",
4308 .data = &i915_oa_max_sample_rate,
4309 .maxlen = sizeof(i915_oa_max_sample_rate),
4310 .mode = 0644,
4311 .proc_handler = proc_dointvec_minmax,
4312 .extra1 = SYSCTL_ZERO,
4313 .extra2 = &oa_sample_rate_hard_limit,
4314 },
4315 {}
4316 };
4317
oa_init_supported_formats(struct i915_perf * perf)4318 static void oa_init_supported_formats(struct i915_perf *perf)
4319 {
4320 struct drm_i915_private *i915 = perf->i915;
4321 enum intel_platform platform = INTEL_INFO(i915)->platform;
4322
4323 switch (platform) {
4324 case INTEL_HASWELL:
4325 oa_format_add(perf, I915_OA_FORMAT_A13);
4326 oa_format_add(perf, I915_OA_FORMAT_A13);
4327 oa_format_add(perf, I915_OA_FORMAT_A29);
4328 oa_format_add(perf, I915_OA_FORMAT_A13_B8_C8);
4329 oa_format_add(perf, I915_OA_FORMAT_B4_C8);
4330 oa_format_add(perf, I915_OA_FORMAT_A45_B8_C8);
4331 oa_format_add(perf, I915_OA_FORMAT_B4_C8_A16);
4332 oa_format_add(perf, I915_OA_FORMAT_C4_B8);
4333 break;
4334
4335 case INTEL_BROADWELL:
4336 case INTEL_CHERRYVIEW:
4337 case INTEL_SKYLAKE:
4338 case INTEL_BROXTON:
4339 case INTEL_KABYLAKE:
4340 case INTEL_GEMINILAKE:
4341 case INTEL_COFFEELAKE:
4342 case INTEL_COMETLAKE:
4343 case INTEL_ICELAKE:
4344 case INTEL_ELKHARTLAKE:
4345 case INTEL_JASPERLAKE:
4346 case INTEL_TIGERLAKE:
4347 case INTEL_ROCKETLAKE:
4348 case INTEL_DG1:
4349 case INTEL_ALDERLAKE_S:
4350 case INTEL_ALDERLAKE_P:
4351 oa_format_add(perf, I915_OA_FORMAT_A12);
4352 oa_format_add(perf, I915_OA_FORMAT_A12_B8_C8);
4353 oa_format_add(perf, I915_OA_FORMAT_A32u40_A4u32_B8_C8);
4354 oa_format_add(perf, I915_OA_FORMAT_C4_B8);
4355 break;
4356
4357 default:
4358 MISSING_CASE(platform);
4359 }
4360 }
4361
4362 /**
4363 * i915_perf_init - initialize i915-perf state on module bind
4364 * @i915: i915 device instance
4365 *
4366 * Initializes i915-perf state without exposing anything to userspace.
4367 *
4368 * Note: i915-perf initialization is split into an 'init' and 'register'
4369 * phase with the i915_perf_register() exposing state to userspace.
4370 */
i915_perf_init(struct drm_i915_private * i915)4371 void i915_perf_init(struct drm_i915_private *i915)
4372 {
4373 struct i915_perf *perf = &i915->perf;
4374
4375 /* XXX const struct i915_perf_ops! */
4376
4377 /* i915_perf is not enabled for DG2 yet */
4378 if (IS_DG2(i915))
4379 return;
4380
4381 perf->oa_formats = oa_formats;
4382 if (IS_HASWELL(i915)) {
4383 perf->ops.is_valid_b_counter_reg = gen7_is_valid_b_counter_addr;
4384 perf->ops.is_valid_mux_reg = hsw_is_valid_mux_addr;
4385 perf->ops.is_valid_flex_reg = NULL;
4386 perf->ops.enable_metric_set = hsw_enable_metric_set;
4387 perf->ops.disable_metric_set = hsw_disable_metric_set;
4388 perf->ops.oa_enable = gen7_oa_enable;
4389 perf->ops.oa_disable = gen7_oa_disable;
4390 perf->ops.read = gen7_oa_read;
4391 perf->ops.oa_hw_tail_read = gen7_oa_hw_tail_read;
4392 } else if (HAS_LOGICAL_RING_CONTEXTS(i915)) {
4393 /* Note: that although we could theoretically also support the
4394 * legacy ringbuffer mode on BDW (and earlier iterations of
4395 * this driver, before upstreaming did this) it didn't seem
4396 * worth the complexity to maintain now that BDW+ enable
4397 * execlist mode by default.
4398 */
4399 perf->ops.read = gen8_oa_read;
4400
4401 if (IS_GRAPHICS_VER(i915, 8, 9)) {
4402 perf->ops.is_valid_b_counter_reg =
4403 gen7_is_valid_b_counter_addr;
4404 perf->ops.is_valid_mux_reg =
4405 gen8_is_valid_mux_addr;
4406 perf->ops.is_valid_flex_reg =
4407 gen8_is_valid_flex_addr;
4408
4409 if (IS_CHERRYVIEW(i915)) {
4410 perf->ops.is_valid_mux_reg =
4411 chv_is_valid_mux_addr;
4412 }
4413
4414 perf->ops.oa_enable = gen8_oa_enable;
4415 perf->ops.oa_disable = gen8_oa_disable;
4416 perf->ops.enable_metric_set = gen8_enable_metric_set;
4417 perf->ops.disable_metric_set = gen8_disable_metric_set;
4418 perf->ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
4419
4420 if (GRAPHICS_VER(i915) == 8) {
4421 perf->ctx_oactxctrl_offset = 0x120;
4422 perf->ctx_flexeu0_offset = 0x2ce;
4423
4424 perf->gen8_valid_ctx_bit = BIT(25);
4425 } else {
4426 perf->ctx_oactxctrl_offset = 0x128;
4427 perf->ctx_flexeu0_offset = 0x3de;
4428
4429 perf->gen8_valid_ctx_bit = BIT(16);
4430 }
4431 } else if (GRAPHICS_VER(i915) == 11) {
4432 perf->ops.is_valid_b_counter_reg =
4433 gen7_is_valid_b_counter_addr;
4434 perf->ops.is_valid_mux_reg =
4435 gen11_is_valid_mux_addr;
4436 perf->ops.is_valid_flex_reg =
4437 gen8_is_valid_flex_addr;
4438
4439 perf->ops.oa_enable = gen8_oa_enable;
4440 perf->ops.oa_disable = gen8_oa_disable;
4441 perf->ops.enable_metric_set = gen8_enable_metric_set;
4442 perf->ops.disable_metric_set = gen11_disable_metric_set;
4443 perf->ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
4444
4445 perf->ctx_oactxctrl_offset = 0x124;
4446 perf->ctx_flexeu0_offset = 0x78e;
4447
4448 perf->gen8_valid_ctx_bit = BIT(16);
4449 } else if (GRAPHICS_VER(i915) == 12) {
4450 perf->ops.is_valid_b_counter_reg =
4451 gen12_is_valid_b_counter_addr;
4452 perf->ops.is_valid_mux_reg =
4453 gen12_is_valid_mux_addr;
4454 perf->ops.is_valid_flex_reg =
4455 gen8_is_valid_flex_addr;
4456
4457 perf->ops.oa_enable = gen12_oa_enable;
4458 perf->ops.oa_disable = gen12_oa_disable;
4459 perf->ops.enable_metric_set = gen12_enable_metric_set;
4460 perf->ops.disable_metric_set = gen12_disable_metric_set;
4461 perf->ops.oa_hw_tail_read = gen12_oa_hw_tail_read;
4462
4463 perf->ctx_flexeu0_offset = 0;
4464 perf->ctx_oactxctrl_offset = 0x144;
4465 }
4466 }
4467
4468 if (perf->ops.enable_metric_set) {
4469 mutex_init(&perf->lock);
4470
4471 /* Choose a representative limit */
4472 oa_sample_rate_hard_limit = to_gt(i915)->clock_frequency / 2;
4473
4474 mutex_init(&perf->metrics_lock);
4475 idr_init_base(&perf->metrics_idr, 1);
4476
4477 /* We set up some ratelimit state to potentially throttle any
4478 * _NOTES about spurious, invalid OA reports which we don't
4479 * forward to userspace.
4480 *
4481 * We print a _NOTE about any throttling when closing the
4482 * stream instead of waiting until driver _fini which no one
4483 * would ever see.
4484 *
4485 * Using the same limiting factors as printk_ratelimit()
4486 */
4487 ratelimit_state_init(&perf->spurious_report_rs, 5 * HZ, 10);
4488 /* Since we use a DRM_NOTE for spurious reports it would be
4489 * inconsistent to let __ratelimit() automatically print a
4490 * warning for throttling.
4491 */
4492 ratelimit_set_flags(&perf->spurious_report_rs,
4493 RATELIMIT_MSG_ON_RELEASE);
4494
4495 ratelimit_state_init(&perf->tail_pointer_race,
4496 5 * HZ, 10);
4497 ratelimit_set_flags(&perf->tail_pointer_race,
4498 RATELIMIT_MSG_ON_RELEASE);
4499
4500 atomic64_set(&perf->noa_programming_delay,
4501 500 * 1000 /* 500us */);
4502
4503 perf->i915 = i915;
4504
4505 oa_init_supported_formats(perf);
4506 }
4507 }
4508
destroy_config(int id,void * p,void * data)4509 static int destroy_config(int id, void *p, void *data)
4510 {
4511 i915_oa_config_put(p);
4512 return 0;
4513 }
4514
i915_perf_sysctl_register(void)4515 int i915_perf_sysctl_register(void)
4516 {
4517 sysctl_header = register_sysctl("dev/i915", oa_table);
4518 return 0;
4519 }
4520
i915_perf_sysctl_unregister(void)4521 void i915_perf_sysctl_unregister(void)
4522 {
4523 unregister_sysctl_table(sysctl_header);
4524 }
4525
4526 /**
4527 * i915_perf_fini - Counter part to i915_perf_init()
4528 * @i915: i915 device instance
4529 */
i915_perf_fini(struct drm_i915_private * i915)4530 void i915_perf_fini(struct drm_i915_private *i915)
4531 {
4532 struct i915_perf *perf = &i915->perf;
4533
4534 if (!perf->i915)
4535 return;
4536
4537 idr_for_each(&perf->metrics_idr, destroy_config, perf);
4538 idr_destroy(&perf->metrics_idr);
4539
4540 memset(&perf->ops, 0, sizeof(perf->ops));
4541 perf->i915 = NULL;
4542 }
4543
4544 /**
4545 * i915_perf_ioctl_version - Version of the i915-perf subsystem
4546 *
4547 * This version number is used by userspace to detect available features.
4548 */
i915_perf_ioctl_version(void)4549 int i915_perf_ioctl_version(void)
4550 {
4551 /*
4552 * 1: Initial version
4553 * I915_PERF_IOCTL_ENABLE
4554 * I915_PERF_IOCTL_DISABLE
4555 *
4556 * 2: Added runtime modification of OA config.
4557 * I915_PERF_IOCTL_CONFIG
4558 *
4559 * 3: Add DRM_I915_PERF_PROP_HOLD_PREEMPTION parameter to hold
4560 * preemption on a particular context so that performance data is
4561 * accessible from a delta of MI_RPC reports without looking at the
4562 * OA buffer.
4563 *
4564 * 4: Add DRM_I915_PERF_PROP_ALLOWED_SSEU to limit what contexts can
4565 * be run for the duration of the performance recording based on
4566 * their SSEU configuration.
4567 *
4568 * 5: Add DRM_I915_PERF_PROP_POLL_OA_PERIOD parameter that controls the
4569 * interval for the hrtimer used to check for OA data.
4570 */
4571 return 5;
4572 }
4573
4574 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
4575 #include "selftests/i915_perf.c"
4576 #endif
4577