1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Resource Director Technology(RDT)
4 * - Monitoring code
5 *
6 * Copyright (C) 2017 Intel Corporation
7 *
8 * Author:
9 * Vikas Shivappa <vikas.shivappa@intel.com>
10 *
11 * This replaces the cqm.c based on perf but we reuse a lot of
12 * code and datastructures originally from Peter Zijlstra and Matt Fleming.
13 *
14 * More information about RDT be found in the Intel (R) x86 Architecture
15 * Software Developer Manual June 2016, volume 3, section 17.17.
16 */
17
18 #include <linux/module.h>
19 #include <linux/sizes.h>
20 #include <linux/slab.h>
21
22 #include <asm/cpu_device_id.h>
23 #include <asm/resctrl.h>
24
25 #include "internal.h"
26
27 struct rmid_entry {
28 u32 rmid;
29 int busy;
30 struct list_head list;
31 };
32
33 /**
34 * @rmid_free_lru A least recently used list of free RMIDs
35 * These RMIDs are guaranteed to have an occupancy less than the
36 * threshold occupancy
37 */
38 static LIST_HEAD(rmid_free_lru);
39
40 /**
41 * @rmid_limbo_count count of currently unused but (potentially)
42 * dirty RMIDs.
43 * This counts RMIDs that no one is currently using but that
44 * may have a occupancy value > resctrl_rmid_realloc_threshold. User can
45 * change the threshold occupancy value.
46 */
47 static unsigned int rmid_limbo_count;
48
49 /**
50 * @rmid_entry - The entry in the limbo and free lists.
51 */
52 static struct rmid_entry *rmid_ptrs;
53
54 /*
55 * Global boolean for rdt_monitor which is true if any
56 * resource monitoring is enabled.
57 */
58 bool rdt_mon_capable;
59
60 /*
61 * Global to indicate which monitoring events are enabled.
62 */
63 unsigned int rdt_mon_features;
64
65 /*
66 * This is the threshold cache occupancy in bytes at which we will consider an
67 * RMID available for re-allocation.
68 */
69 unsigned int resctrl_rmid_realloc_threshold;
70
71 /*
72 * This is the maximum value for the reallocation threshold, in bytes.
73 */
74 unsigned int resctrl_rmid_realloc_limit;
75
76 #define CF(cf) ((unsigned long)(1048576 * (cf) + 0.5))
77
78 /*
79 * The correction factor table is documented in Documentation/x86/resctrl.rst.
80 * If rmid > rmid threshold, MBM total and local values should be multiplied
81 * by the correction factor.
82 *
83 * The original table is modified for better code:
84 *
85 * 1. The threshold 0 is changed to rmid count - 1 so don't do correction
86 * for the case.
87 * 2. MBM total and local correction table indexed by core counter which is
88 * equal to (x86_cache_max_rmid + 1) / 8 - 1 and is from 0 up to 27.
89 * 3. The correction factor is normalized to 2^20 (1048576) so it's faster
90 * to calculate corrected value by shifting:
91 * corrected_value = (original_value * correction_factor) >> 20
92 */
93 static const struct mbm_correction_factor_table {
94 u32 rmidthreshold;
95 u64 cf;
96 } mbm_cf_table[] __initconst = {
97 {7, CF(1.000000)},
98 {15, CF(1.000000)},
99 {15, CF(0.969650)},
100 {31, CF(1.000000)},
101 {31, CF(1.066667)},
102 {31, CF(0.969650)},
103 {47, CF(1.142857)},
104 {63, CF(1.000000)},
105 {63, CF(1.185115)},
106 {63, CF(1.066553)},
107 {79, CF(1.454545)},
108 {95, CF(1.000000)},
109 {95, CF(1.230769)},
110 {95, CF(1.142857)},
111 {95, CF(1.066667)},
112 {127, CF(1.000000)},
113 {127, CF(1.254863)},
114 {127, CF(1.185255)},
115 {151, CF(1.000000)},
116 {127, CF(1.066667)},
117 {167, CF(1.000000)},
118 {159, CF(1.454334)},
119 {183, CF(1.000000)},
120 {127, CF(0.969744)},
121 {191, CF(1.280246)},
122 {191, CF(1.230921)},
123 {215, CF(1.000000)},
124 {191, CF(1.143118)},
125 };
126
127 static u32 mbm_cf_rmidthreshold __read_mostly = UINT_MAX;
128 static u64 mbm_cf __read_mostly;
129
get_corrected_mbm_count(u32 rmid,unsigned long val)130 static inline u64 get_corrected_mbm_count(u32 rmid, unsigned long val)
131 {
132 /* Correct MBM value. */
133 if (rmid > mbm_cf_rmidthreshold)
134 val = (val * mbm_cf) >> 20;
135
136 return val;
137 }
138
__rmid_entry(u32 rmid)139 static inline struct rmid_entry *__rmid_entry(u32 rmid)
140 {
141 struct rmid_entry *entry;
142
143 entry = &rmid_ptrs[rmid];
144 WARN_ON(entry->rmid != rmid);
145
146 return entry;
147 }
148
__rmid_read(u32 rmid,enum resctrl_event_id eventid,u64 * val)149 static int __rmid_read(u32 rmid, enum resctrl_event_id eventid, u64 *val)
150 {
151 u64 msr_val;
152
153 /*
154 * As per the SDM, when IA32_QM_EVTSEL.EvtID (bits 7:0) is configured
155 * with a valid event code for supported resource type and the bits
156 * IA32_QM_EVTSEL.RMID (bits 41:32) are configured with valid RMID,
157 * IA32_QM_CTR.data (bits 61:0) reports the monitored data.
158 * IA32_QM_CTR.Error (bit 63) and IA32_QM_CTR.Unavailable (bit 62)
159 * are error bits.
160 */
161 wrmsr(MSR_IA32_QM_EVTSEL, eventid, rmid);
162 rdmsrl(MSR_IA32_QM_CTR, msr_val);
163
164 if (msr_val & RMID_VAL_ERROR)
165 return -EIO;
166 if (msr_val & RMID_VAL_UNAVAIL)
167 return -EINVAL;
168
169 *val = msr_val;
170 return 0;
171 }
172
get_arch_mbm_state(struct rdt_hw_domain * hw_dom,u32 rmid,enum resctrl_event_id eventid)173 static struct arch_mbm_state *get_arch_mbm_state(struct rdt_hw_domain *hw_dom,
174 u32 rmid,
175 enum resctrl_event_id eventid)
176 {
177 switch (eventid) {
178 case QOS_L3_OCCUP_EVENT_ID:
179 return NULL;
180 case QOS_L3_MBM_TOTAL_EVENT_ID:
181 return &hw_dom->arch_mbm_total[rmid];
182 case QOS_L3_MBM_LOCAL_EVENT_ID:
183 return &hw_dom->arch_mbm_local[rmid];
184 }
185
186 /* Never expect to get here */
187 WARN_ON_ONCE(1);
188
189 return NULL;
190 }
191
resctrl_arch_reset_rmid(struct rdt_resource * r,struct rdt_domain * d,u32 rmid,enum resctrl_event_id eventid)192 void resctrl_arch_reset_rmid(struct rdt_resource *r, struct rdt_domain *d,
193 u32 rmid, enum resctrl_event_id eventid)
194 {
195 struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
196 struct arch_mbm_state *am;
197
198 am = get_arch_mbm_state(hw_dom, rmid, eventid);
199 if (am) {
200 memset(am, 0, sizeof(*am));
201
202 /* Record any initial, non-zero count value. */
203 __rmid_read(rmid, eventid, &am->prev_msr);
204 }
205 }
206
mbm_overflow_count(u64 prev_msr,u64 cur_msr,unsigned int width)207 static u64 mbm_overflow_count(u64 prev_msr, u64 cur_msr, unsigned int width)
208 {
209 u64 shift = 64 - width, chunks;
210
211 chunks = (cur_msr << shift) - (prev_msr << shift);
212 return chunks >> shift;
213 }
214
resctrl_arch_rmid_read(struct rdt_resource * r,struct rdt_domain * d,u32 rmid,enum resctrl_event_id eventid,u64 * val)215 int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d,
216 u32 rmid, enum resctrl_event_id eventid, u64 *val)
217 {
218 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
219 struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
220 struct arch_mbm_state *am;
221 u64 msr_val, chunks;
222 int ret;
223
224 if (!cpumask_test_cpu(smp_processor_id(), &d->cpu_mask))
225 return -EINVAL;
226
227 ret = __rmid_read(rmid, eventid, &msr_val);
228 if (ret)
229 return ret;
230
231 am = get_arch_mbm_state(hw_dom, rmid, eventid);
232 if (am) {
233 am->chunks += mbm_overflow_count(am->prev_msr, msr_val,
234 hw_res->mbm_width);
235 chunks = get_corrected_mbm_count(rmid, am->chunks);
236 am->prev_msr = msr_val;
237 } else {
238 chunks = msr_val;
239 }
240
241 *val = chunks * hw_res->mon_scale;
242
243 return 0;
244 }
245
246 /*
247 * Check the RMIDs that are marked as busy for this domain. If the
248 * reported LLC occupancy is below the threshold clear the busy bit and
249 * decrement the count. If the busy count gets to zero on an RMID, we
250 * free the RMID
251 */
__check_limbo(struct rdt_domain * d,bool force_free)252 void __check_limbo(struct rdt_domain *d, bool force_free)
253 {
254 struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
255 struct rmid_entry *entry;
256 u32 crmid = 1, nrmid;
257 bool rmid_dirty;
258 u64 val = 0;
259
260 /*
261 * Skip RMID 0 and start from RMID 1 and check all the RMIDs that
262 * are marked as busy for occupancy < threshold. If the occupancy
263 * is less than the threshold decrement the busy counter of the
264 * RMID and move it to the free list when the counter reaches 0.
265 */
266 for (;;) {
267 nrmid = find_next_bit(d->rmid_busy_llc, r->num_rmid, crmid);
268 if (nrmid >= r->num_rmid)
269 break;
270
271 entry = __rmid_entry(nrmid);
272
273 if (resctrl_arch_rmid_read(r, d, entry->rmid,
274 QOS_L3_OCCUP_EVENT_ID, &val)) {
275 rmid_dirty = true;
276 } else {
277 rmid_dirty = (val >= resctrl_rmid_realloc_threshold);
278 }
279
280 if (force_free || !rmid_dirty) {
281 clear_bit(entry->rmid, d->rmid_busy_llc);
282 if (!--entry->busy) {
283 rmid_limbo_count--;
284 list_add_tail(&entry->list, &rmid_free_lru);
285 }
286 }
287 crmid = nrmid + 1;
288 }
289 }
290
has_busy_rmid(struct rdt_resource * r,struct rdt_domain * d)291 bool has_busy_rmid(struct rdt_resource *r, struct rdt_domain *d)
292 {
293 return find_first_bit(d->rmid_busy_llc, r->num_rmid) != r->num_rmid;
294 }
295
296 /*
297 * As of now the RMIDs allocation is global.
298 * However we keep track of which packages the RMIDs
299 * are used to optimize the limbo list management.
300 */
alloc_rmid(void)301 int alloc_rmid(void)
302 {
303 struct rmid_entry *entry;
304
305 lockdep_assert_held(&rdtgroup_mutex);
306
307 if (list_empty(&rmid_free_lru))
308 return rmid_limbo_count ? -EBUSY : -ENOSPC;
309
310 entry = list_first_entry(&rmid_free_lru,
311 struct rmid_entry, list);
312 list_del(&entry->list);
313
314 return entry->rmid;
315 }
316
add_rmid_to_limbo(struct rmid_entry * entry)317 static void add_rmid_to_limbo(struct rmid_entry *entry)
318 {
319 struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
320 struct rdt_domain *d;
321 int cpu, err;
322 u64 val = 0;
323
324 entry->busy = 0;
325 cpu = get_cpu();
326 list_for_each_entry(d, &r->domains, list) {
327 if (cpumask_test_cpu(cpu, &d->cpu_mask)) {
328 err = resctrl_arch_rmid_read(r, d, entry->rmid,
329 QOS_L3_OCCUP_EVENT_ID,
330 &val);
331 if (err || val <= resctrl_rmid_realloc_threshold)
332 continue;
333 }
334
335 /*
336 * For the first limbo RMID in the domain,
337 * setup up the limbo worker.
338 */
339 if (!has_busy_rmid(r, d))
340 cqm_setup_limbo_handler(d, CQM_LIMBOCHECK_INTERVAL);
341 set_bit(entry->rmid, d->rmid_busy_llc);
342 entry->busy++;
343 }
344 put_cpu();
345
346 if (entry->busy)
347 rmid_limbo_count++;
348 else
349 list_add_tail(&entry->list, &rmid_free_lru);
350 }
351
free_rmid(u32 rmid)352 void free_rmid(u32 rmid)
353 {
354 struct rmid_entry *entry;
355
356 if (!rmid)
357 return;
358
359 lockdep_assert_held(&rdtgroup_mutex);
360
361 entry = __rmid_entry(rmid);
362
363 if (is_llc_occupancy_enabled())
364 add_rmid_to_limbo(entry);
365 else
366 list_add_tail(&entry->list, &rmid_free_lru);
367 }
368
__mon_event_count(u32 rmid,struct rmid_read * rr)369 static int __mon_event_count(u32 rmid, struct rmid_read *rr)
370 {
371 struct mbm_state *m;
372 u64 tval = 0;
373
374 if (rr->first)
375 resctrl_arch_reset_rmid(rr->r, rr->d, rmid, rr->evtid);
376
377 rr->err = resctrl_arch_rmid_read(rr->r, rr->d, rmid, rr->evtid, &tval);
378 if (rr->err)
379 return rr->err;
380
381 switch (rr->evtid) {
382 case QOS_L3_OCCUP_EVENT_ID:
383 rr->val += tval;
384 return 0;
385 case QOS_L3_MBM_TOTAL_EVENT_ID:
386 m = &rr->d->mbm_total[rmid];
387 break;
388 case QOS_L3_MBM_LOCAL_EVENT_ID:
389 m = &rr->d->mbm_local[rmid];
390 break;
391 default:
392 /*
393 * Code would never reach here because an invalid
394 * event id would fail in resctrl_arch_rmid_read().
395 */
396 return -EINVAL;
397 }
398
399 if (rr->first) {
400 memset(m, 0, sizeof(struct mbm_state));
401 return 0;
402 }
403
404 rr->val += tval;
405
406 return 0;
407 }
408
409 /*
410 * mbm_bw_count() - Update bw count from values previously read by
411 * __mon_event_count().
412 * @rmid: The rmid used to identify the cached mbm_state.
413 * @rr: The struct rmid_read populated by __mon_event_count().
414 *
415 * Supporting function to calculate the memory bandwidth
416 * and delta bandwidth in MBps. The chunks value previously read by
417 * __mon_event_count() is compared with the chunks value from the previous
418 * invocation. This must be called once per second to maintain values in MBps.
419 */
mbm_bw_count(u32 rmid,struct rmid_read * rr)420 static void mbm_bw_count(u32 rmid, struct rmid_read *rr)
421 {
422 struct mbm_state *m = &rr->d->mbm_local[rmid];
423 u64 cur_bw, bytes, cur_bytes;
424
425 cur_bytes = rr->val;
426 bytes = cur_bytes - m->prev_bw_bytes;
427 m->prev_bw_bytes = cur_bytes;
428
429 cur_bw = bytes / SZ_1M;
430
431 if (m->delta_comp)
432 m->delta_bw = abs(cur_bw - m->prev_bw);
433 m->delta_comp = false;
434 m->prev_bw = cur_bw;
435 }
436
437 /*
438 * This is called via IPI to read the CQM/MBM counters
439 * on a domain.
440 */
mon_event_count(void * info)441 void mon_event_count(void *info)
442 {
443 struct rdtgroup *rdtgrp, *entry;
444 struct rmid_read *rr = info;
445 struct list_head *head;
446 int ret;
447
448 rdtgrp = rr->rgrp;
449
450 ret = __mon_event_count(rdtgrp->mon.rmid, rr);
451
452 /*
453 * For Ctrl groups read data from child monitor groups and
454 * add them together. Count events which are read successfully.
455 * Discard the rmid_read's reporting errors.
456 */
457 head = &rdtgrp->mon.crdtgrp_list;
458
459 if (rdtgrp->type == RDTCTRL_GROUP) {
460 list_for_each_entry(entry, head, mon.crdtgrp_list) {
461 if (__mon_event_count(entry->mon.rmid, rr) == 0)
462 ret = 0;
463 }
464 }
465
466 /*
467 * __mon_event_count() calls for newly created monitor groups may
468 * report -EINVAL/Unavailable if the monitor hasn't seen any traffic.
469 * Discard error if any of the monitor event reads succeeded.
470 */
471 if (ret == 0)
472 rr->err = 0;
473 }
474
475 /*
476 * Feedback loop for MBA software controller (mba_sc)
477 *
478 * mba_sc is a feedback loop where we periodically read MBM counters and
479 * adjust the bandwidth percentage values via the IA32_MBA_THRTL_MSRs so
480 * that:
481 *
482 * current bandwidth(cur_bw) < user specified bandwidth(user_bw)
483 *
484 * This uses the MBM counters to measure the bandwidth and MBA throttle
485 * MSRs to control the bandwidth for a particular rdtgrp. It builds on the
486 * fact that resctrl rdtgroups have both monitoring and control.
487 *
488 * The frequency of the checks is 1s and we just tag along the MBM overflow
489 * timer. Having 1s interval makes the calculation of bandwidth simpler.
490 *
491 * Although MBA's goal is to restrict the bandwidth to a maximum, there may
492 * be a need to increase the bandwidth to avoid unnecessarily restricting
493 * the L2 <-> L3 traffic.
494 *
495 * Since MBA controls the L2 external bandwidth where as MBM measures the
496 * L3 external bandwidth the following sequence could lead to such a
497 * situation.
498 *
499 * Consider an rdtgroup which had high L3 <-> memory traffic in initial
500 * phases -> mba_sc kicks in and reduced bandwidth percentage values -> but
501 * after some time rdtgroup has mostly L2 <-> L3 traffic.
502 *
503 * In this case we may restrict the rdtgroup's L2 <-> L3 traffic as its
504 * throttle MSRs already have low percentage values. To avoid
505 * unnecessarily restricting such rdtgroups, we also increase the bandwidth.
506 */
update_mba_bw(struct rdtgroup * rgrp,struct rdt_domain * dom_mbm)507 static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm)
508 {
509 u32 closid, rmid, cur_msr_val, new_msr_val;
510 struct mbm_state *pmbm_data, *cmbm_data;
511 u32 cur_bw, delta_bw, user_bw;
512 struct rdt_resource *r_mba;
513 struct rdt_domain *dom_mba;
514 struct list_head *head;
515 struct rdtgroup *entry;
516
517 if (!is_mbm_local_enabled())
518 return;
519
520 r_mba = &rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl;
521
522 closid = rgrp->closid;
523 rmid = rgrp->mon.rmid;
524 pmbm_data = &dom_mbm->mbm_local[rmid];
525
526 dom_mba = get_domain_from_cpu(smp_processor_id(), r_mba);
527 if (!dom_mba) {
528 pr_warn_once("Failure to get domain for MBA update\n");
529 return;
530 }
531
532 cur_bw = pmbm_data->prev_bw;
533 user_bw = dom_mba->mbps_val[closid];
534 delta_bw = pmbm_data->delta_bw;
535
536 /* MBA resource doesn't support CDP */
537 cur_msr_val = resctrl_arch_get_config(r_mba, dom_mba, closid, CDP_NONE);
538
539 /*
540 * For Ctrl groups read data from child monitor groups.
541 */
542 head = &rgrp->mon.crdtgrp_list;
543 list_for_each_entry(entry, head, mon.crdtgrp_list) {
544 cmbm_data = &dom_mbm->mbm_local[entry->mon.rmid];
545 cur_bw += cmbm_data->prev_bw;
546 delta_bw += cmbm_data->delta_bw;
547 }
548
549 /*
550 * Scale up/down the bandwidth linearly for the ctrl group. The
551 * bandwidth step is the bandwidth granularity specified by the
552 * hardware.
553 *
554 * The delta_bw is used when increasing the bandwidth so that we
555 * dont alternately increase and decrease the control values
556 * continuously.
557 *
558 * For ex: consider cur_bw = 90MBps, user_bw = 100MBps and if
559 * bandwidth step is 20MBps(> user_bw - cur_bw), we would keep
560 * switching between 90 and 110 continuously if we only check
561 * cur_bw < user_bw.
562 */
563 if (cur_msr_val > r_mba->membw.min_bw && user_bw < cur_bw) {
564 new_msr_val = cur_msr_val - r_mba->membw.bw_gran;
565 } else if (cur_msr_val < MAX_MBA_BW &&
566 (user_bw > (cur_bw + delta_bw))) {
567 new_msr_val = cur_msr_val + r_mba->membw.bw_gran;
568 } else {
569 return;
570 }
571
572 resctrl_arch_update_one(r_mba, dom_mba, closid, CDP_NONE, new_msr_val);
573
574 /*
575 * Delta values are updated dynamically package wise for each
576 * rdtgrp every time the throttle MSR changes value.
577 *
578 * This is because (1)the increase in bandwidth is not perfectly
579 * linear and only "approximately" linear even when the hardware
580 * says it is linear.(2)Also since MBA is a core specific
581 * mechanism, the delta values vary based on number of cores used
582 * by the rdtgrp.
583 */
584 pmbm_data->delta_comp = true;
585 list_for_each_entry(entry, head, mon.crdtgrp_list) {
586 cmbm_data = &dom_mbm->mbm_local[entry->mon.rmid];
587 cmbm_data->delta_comp = true;
588 }
589 }
590
mbm_update(struct rdt_resource * r,struct rdt_domain * d,int rmid)591 static void mbm_update(struct rdt_resource *r, struct rdt_domain *d, int rmid)
592 {
593 struct rmid_read rr;
594
595 rr.first = false;
596 rr.r = r;
597 rr.d = d;
598
599 /*
600 * This is protected from concurrent reads from user
601 * as both the user and we hold the global mutex.
602 */
603 if (is_mbm_total_enabled()) {
604 rr.evtid = QOS_L3_MBM_TOTAL_EVENT_ID;
605 rr.val = 0;
606 __mon_event_count(rmid, &rr);
607 }
608 if (is_mbm_local_enabled()) {
609 rr.evtid = QOS_L3_MBM_LOCAL_EVENT_ID;
610 rr.val = 0;
611 __mon_event_count(rmid, &rr);
612
613 /*
614 * Call the MBA software controller only for the
615 * control groups and when user has enabled
616 * the software controller explicitly.
617 */
618 if (is_mba_sc(NULL))
619 mbm_bw_count(rmid, &rr);
620 }
621 }
622
623 /*
624 * Handler to scan the limbo list and move the RMIDs
625 * to free list whose occupancy < threshold_occupancy.
626 */
cqm_handle_limbo(struct work_struct * work)627 void cqm_handle_limbo(struct work_struct *work)
628 {
629 unsigned long delay = msecs_to_jiffies(CQM_LIMBOCHECK_INTERVAL);
630 int cpu = smp_processor_id();
631 struct rdt_resource *r;
632 struct rdt_domain *d;
633
634 mutex_lock(&rdtgroup_mutex);
635
636 r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
637 d = container_of(work, struct rdt_domain, cqm_limbo.work);
638
639 __check_limbo(d, false);
640
641 if (has_busy_rmid(r, d))
642 schedule_delayed_work_on(cpu, &d->cqm_limbo, delay);
643
644 mutex_unlock(&rdtgroup_mutex);
645 }
646
cqm_setup_limbo_handler(struct rdt_domain * dom,unsigned long delay_ms)647 void cqm_setup_limbo_handler(struct rdt_domain *dom, unsigned long delay_ms)
648 {
649 unsigned long delay = msecs_to_jiffies(delay_ms);
650 int cpu;
651
652 cpu = cpumask_any(&dom->cpu_mask);
653 dom->cqm_work_cpu = cpu;
654
655 schedule_delayed_work_on(cpu, &dom->cqm_limbo, delay);
656 }
657
mbm_handle_overflow(struct work_struct * work)658 void mbm_handle_overflow(struct work_struct *work)
659 {
660 unsigned long delay = msecs_to_jiffies(MBM_OVERFLOW_INTERVAL);
661 struct rdtgroup *prgrp, *crgrp;
662 int cpu = smp_processor_id();
663 struct list_head *head;
664 struct rdt_resource *r;
665 struct rdt_domain *d;
666
667 mutex_lock(&rdtgroup_mutex);
668
669 if (!static_branch_likely(&rdt_mon_enable_key))
670 goto out_unlock;
671
672 r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
673 d = container_of(work, struct rdt_domain, mbm_over.work);
674
675 list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) {
676 mbm_update(r, d, prgrp->mon.rmid);
677
678 head = &prgrp->mon.crdtgrp_list;
679 list_for_each_entry(crgrp, head, mon.crdtgrp_list)
680 mbm_update(r, d, crgrp->mon.rmid);
681
682 if (is_mba_sc(NULL))
683 update_mba_bw(prgrp, d);
684 }
685
686 schedule_delayed_work_on(cpu, &d->mbm_over, delay);
687
688 out_unlock:
689 mutex_unlock(&rdtgroup_mutex);
690 }
691
mbm_setup_overflow_handler(struct rdt_domain * dom,unsigned long delay_ms)692 void mbm_setup_overflow_handler(struct rdt_domain *dom, unsigned long delay_ms)
693 {
694 unsigned long delay = msecs_to_jiffies(delay_ms);
695 int cpu;
696
697 if (!static_branch_likely(&rdt_mon_enable_key))
698 return;
699 cpu = cpumask_any(&dom->cpu_mask);
700 dom->mbm_work_cpu = cpu;
701 schedule_delayed_work_on(cpu, &dom->mbm_over, delay);
702 }
703
dom_data_init(struct rdt_resource * r)704 static int dom_data_init(struct rdt_resource *r)
705 {
706 struct rmid_entry *entry = NULL;
707 int i, nr_rmids;
708
709 nr_rmids = r->num_rmid;
710 rmid_ptrs = kcalloc(nr_rmids, sizeof(struct rmid_entry), GFP_KERNEL);
711 if (!rmid_ptrs)
712 return -ENOMEM;
713
714 for (i = 0; i < nr_rmids; i++) {
715 entry = &rmid_ptrs[i];
716 INIT_LIST_HEAD(&entry->list);
717
718 entry->rmid = i;
719 list_add_tail(&entry->list, &rmid_free_lru);
720 }
721
722 /*
723 * RMID 0 is special and is always allocated. It's used for all
724 * tasks that are not monitored.
725 */
726 entry = __rmid_entry(0);
727 list_del(&entry->list);
728
729 return 0;
730 }
731
732 static struct mon_evt llc_occupancy_event = {
733 .name = "llc_occupancy",
734 .evtid = QOS_L3_OCCUP_EVENT_ID,
735 };
736
737 static struct mon_evt mbm_total_event = {
738 .name = "mbm_total_bytes",
739 .evtid = QOS_L3_MBM_TOTAL_EVENT_ID,
740 };
741
742 static struct mon_evt mbm_local_event = {
743 .name = "mbm_local_bytes",
744 .evtid = QOS_L3_MBM_LOCAL_EVENT_ID,
745 };
746
747 /*
748 * Initialize the event list for the resource.
749 *
750 * Note that MBM events are also part of RDT_RESOURCE_L3 resource
751 * because as per the SDM the total and local memory bandwidth
752 * are enumerated as part of L3 monitoring.
753 */
l3_mon_evt_init(struct rdt_resource * r)754 static void l3_mon_evt_init(struct rdt_resource *r)
755 {
756 INIT_LIST_HEAD(&r->evt_list);
757
758 if (is_llc_occupancy_enabled())
759 list_add_tail(&llc_occupancy_event.list, &r->evt_list);
760 if (is_mbm_total_enabled())
761 list_add_tail(&mbm_total_event.list, &r->evt_list);
762 if (is_mbm_local_enabled())
763 list_add_tail(&mbm_local_event.list, &r->evt_list);
764 }
765
rdt_get_mon_l3_config(struct rdt_resource * r)766 int rdt_get_mon_l3_config(struct rdt_resource *r)
767 {
768 unsigned int mbm_offset = boot_cpu_data.x86_cache_mbm_width_offset;
769 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
770 unsigned int threshold;
771 int ret;
772
773 resctrl_rmid_realloc_limit = boot_cpu_data.x86_cache_size * 1024;
774 hw_res->mon_scale = boot_cpu_data.x86_cache_occ_scale;
775 r->num_rmid = boot_cpu_data.x86_cache_max_rmid + 1;
776 hw_res->mbm_width = MBM_CNTR_WIDTH_BASE;
777
778 if (mbm_offset > 0 && mbm_offset <= MBM_CNTR_WIDTH_OFFSET_MAX)
779 hw_res->mbm_width += mbm_offset;
780 else if (mbm_offset > MBM_CNTR_WIDTH_OFFSET_MAX)
781 pr_warn("Ignoring impossible MBM counter offset\n");
782
783 /*
784 * A reasonable upper limit on the max threshold is the number
785 * of lines tagged per RMID if all RMIDs have the same number of
786 * lines tagged in the LLC.
787 *
788 * For a 35MB LLC and 56 RMIDs, this is ~1.8% of the LLC.
789 */
790 threshold = resctrl_rmid_realloc_limit / r->num_rmid;
791
792 /*
793 * Because num_rmid may not be a power of two, round the value
794 * to the nearest multiple of hw_res->mon_scale so it matches a
795 * value the hardware will measure. mon_scale may not be a power of 2.
796 */
797 resctrl_rmid_realloc_threshold = resctrl_arch_round_mon_val(threshold);
798
799 ret = dom_data_init(r);
800 if (ret)
801 return ret;
802
803 l3_mon_evt_init(r);
804
805 r->mon_capable = true;
806
807 return 0;
808 }
809
intel_rdt_mbm_apply_quirk(void)810 void __init intel_rdt_mbm_apply_quirk(void)
811 {
812 int cf_index;
813
814 cf_index = (boot_cpu_data.x86_cache_max_rmid + 1) / 8 - 1;
815 if (cf_index >= ARRAY_SIZE(mbm_cf_table)) {
816 pr_info("No MBM correction factor available\n");
817 return;
818 }
819
820 mbm_cf_rmidthreshold = mbm_cf_table[cf_index].rmidthreshold;
821 mbm_cf = mbm_cf_table[cf_index].cf;
822 }
823