Lines Matching refs:plr

171 		if (rdtgrp->plr && rdtgrp->plr->minor == minor) {  in region_find_by_minor()
189 static void pseudo_lock_cstates_relax(struct pseudo_lock_region *plr) in pseudo_lock_cstates_relax() argument
193 list_for_each_entry_safe(pm_req, next, &plr->pm_reqs, list) { in pseudo_lock_cstates_relax()
218 static int pseudo_lock_cstates_constrain(struct pseudo_lock_region *plr) in pseudo_lock_cstates_constrain() argument
224 for_each_cpu(cpu, &plr->d->cpu_mask) { in pseudo_lock_cstates_constrain()
242 list_add(&pm_req->list, &plr->pm_reqs); in pseudo_lock_cstates_constrain()
248 pseudo_lock_cstates_relax(plr); in pseudo_lock_cstates_constrain()
261 static void pseudo_lock_region_clear(struct pseudo_lock_region *plr) in pseudo_lock_region_clear() argument
263 plr->size = 0; in pseudo_lock_region_clear()
264 plr->line_size = 0; in pseudo_lock_region_clear()
265 kfree(plr->kmem); in pseudo_lock_region_clear()
266 plr->kmem = NULL; in pseudo_lock_region_clear()
267 plr->s = NULL; in pseudo_lock_region_clear()
268 if (plr->d) in pseudo_lock_region_clear()
269 plr->d->plr = NULL; in pseudo_lock_region_clear()
270 plr->d = NULL; in pseudo_lock_region_clear()
271 plr->cbm = 0; in pseudo_lock_region_clear()
272 plr->debugfs_dir = NULL; in pseudo_lock_region_clear()
293 static int pseudo_lock_region_init(struct pseudo_lock_region *plr) in pseudo_lock_region_init() argument
300 plr->cpu = cpumask_first(&plr->d->cpu_mask); in pseudo_lock_region_init()
302 if (!cpu_online(plr->cpu)) { in pseudo_lock_region_init()
304 plr->cpu); in pseudo_lock_region_init()
309 ci = get_cpu_cacheinfo(plr->cpu); in pseudo_lock_region_init()
311 plr->size = rdtgroup_cbm_to_size(plr->s->res, plr->d, plr->cbm); in pseudo_lock_region_init()
314 if (ci->info_list[i].level == plr->s->res->cache_level) { in pseudo_lock_region_init()
315 plr->line_size = ci->info_list[i].coherency_line_size; in pseudo_lock_region_init()
323 pseudo_lock_region_clear(plr); in pseudo_lock_region_init()
340 struct pseudo_lock_region *plr; in pseudo_lock_init() local
342 plr = kzalloc(sizeof(*plr), GFP_KERNEL); in pseudo_lock_init()
343 if (!plr) in pseudo_lock_init()
346 init_waitqueue_head(&plr->lock_thread_wq); in pseudo_lock_init()
347 INIT_LIST_HEAD(&plr->pm_reqs); in pseudo_lock_init()
348 rdtgrp->plr = plr; in pseudo_lock_init()
362 static int pseudo_lock_region_alloc(struct pseudo_lock_region *plr) in pseudo_lock_region_alloc() argument
366 ret = pseudo_lock_region_init(plr); in pseudo_lock_region_alloc()
374 if (plr->size > KMALLOC_MAX_SIZE) { in pseudo_lock_region_alloc()
380 plr->kmem = kzalloc(plr->size, GFP_KERNEL); in pseudo_lock_region_alloc()
381 if (!plr->kmem) { in pseudo_lock_region_alloc()
390 pseudo_lock_region_clear(plr); in pseudo_lock_region_alloc()
407 pseudo_lock_region_clear(rdtgrp->plr); in pseudo_lock_free()
408 kfree(rdtgrp->plr); in pseudo_lock_free()
409 rdtgrp->plr = NULL; in pseudo_lock_free()
434 struct pseudo_lock_region *plr = rdtgrp->plr; in pseudo_lock_fn() local
485 mem_r = plr->kmem; in pseudo_lock_fn()
486 size = plr->size; in pseudo_lock_fn()
487 line_size = plr->line_size; in pseudo_lock_fn()
536 plr->thread_done = 1; in pseudo_lock_fn()
537 wake_up_interruptible(&plr->lock_thread_wq); in pseudo_lock_fn()
818 if (d->plr) { in rdtgroup_cbm_overlaps_pseudo_locked()
819 cbm_len = d->plr->s->res->cache.cbm_len; in rdtgroup_cbm_overlaps_pseudo_locked()
820 cbm_b = d->plr->cbm; in rdtgroup_cbm_overlaps_pseudo_locked()
856 if (d_i->plr) in rdtgroup_pseudo_locked_in_hierarchy()
889 struct pseudo_lock_region *plr = _plr; in measure_cycles_lat_fn() local
901 mem_r = READ_ONCE(plr->kmem); in measure_cycles_lat_fn()
907 for (i = 0; i < plr->size; i += 32) { in measure_cycles_lat_fn()
918 plr->thread_done = 1; in measure_cycles_lat_fn()
919 wake_up_interruptible(&plr->lock_thread_wq); in measure_cycles_lat_fn()
955 struct pseudo_lock_region *plr, in measure_residency_fn() argument
968 miss_event = perf_event_create_kernel_counter(miss_attr, plr->cpu, in measure_residency_fn()
973 hit_event = perf_event_create_kernel_counter(hit_attr, plr->cpu, in measure_residency_fn()
1005 line_size = READ_ONCE(plr->line_size); in measure_residency_fn()
1006 mem_r = READ_ONCE(plr->kmem); in measure_residency_fn()
1007 size = READ_ONCE(plr->size); in measure_residency_fn()
1073 struct pseudo_lock_region *plr = _plr; in measure_l2_residency() local
1096 measure_residency_fn(&perf_miss_attr, &perf_hit_attr, plr, &counts); in measure_l2_residency()
1104 plr->thread_done = 1; in measure_l2_residency()
1105 wake_up_interruptible(&plr->lock_thread_wq); in measure_l2_residency()
1111 struct pseudo_lock_region *plr = _plr; in measure_l3_residency() local
1135 measure_residency_fn(&perf_miss_attr, &perf_hit_attr, plr, &counts); in measure_l3_residency()
1160 plr->thread_done = 1; in measure_l3_residency()
1161 wake_up_interruptible(&plr->lock_thread_wq); in measure_l3_residency()
1179 struct pseudo_lock_region *plr = rdtgrp->plr; in pseudo_lock_measure_cycles() local
1192 if (!plr->d) { in pseudo_lock_measure_cycles()
1197 plr->thread_done = 0; in pseudo_lock_measure_cycles()
1198 cpu = cpumask_first(&plr->d->cpu_mask); in pseudo_lock_measure_cycles()
1204 plr->cpu = cpu; in pseudo_lock_measure_cycles()
1207 thread = kthread_create_on_node(measure_cycles_lat_fn, plr, in pseudo_lock_measure_cycles()
1212 thread = kthread_create_on_node(measure_l2_residency, plr, in pseudo_lock_measure_cycles()
1217 thread = kthread_create_on_node(measure_l3_residency, plr, in pseudo_lock_measure_cycles()
1231 ret = wait_event_interruptible(plr->lock_thread_wq, in pseudo_lock_measure_cycles()
1232 plr->thread_done == 1); in pseudo_lock_measure_cycles()
1299 struct pseudo_lock_region *plr = rdtgrp->plr; in rdtgroup_pseudo_lock_create() local
1305 ret = pseudo_lock_region_alloc(plr); in rdtgroup_pseudo_lock_create()
1309 ret = pseudo_lock_cstates_constrain(plr); in rdtgroup_pseudo_lock_create()
1315 plr->thread_done = 0; in rdtgroup_pseudo_lock_create()
1318 cpu_to_node(plr->cpu), in rdtgroup_pseudo_lock_create()
1319 "pseudo_lock/%u", plr->cpu); in rdtgroup_pseudo_lock_create()
1326 kthread_bind(thread, plr->cpu); in rdtgroup_pseudo_lock_create()
1329 ret = wait_event_interruptible(plr->lock_thread_wq, in rdtgroup_pseudo_lock_create()
1330 plr->thread_done == 1); in rdtgroup_pseudo_lock_create()
1362 plr->debugfs_dir = debugfs_create_dir(rdtgrp->kn->name, in rdtgroup_pseudo_lock_create()
1364 if (!IS_ERR_OR_NULL(plr->debugfs_dir)) in rdtgroup_pseudo_lock_create()
1366 plr->debugfs_dir, rdtgrp, in rdtgroup_pseudo_lock_create()
1389 plr->minor = new_minor; in rdtgroup_pseudo_lock_create()
1402 debugfs_remove_recursive(plr->debugfs_dir); in rdtgroup_pseudo_lock_create()
1405 pseudo_lock_cstates_relax(plr); in rdtgroup_pseudo_lock_create()
1407 pseudo_lock_region_clear(plr); in rdtgroup_pseudo_lock_create()
1428 struct pseudo_lock_region *plr = rdtgrp->plr; in rdtgroup_pseudo_lock_remove() local
1439 pseudo_lock_cstates_relax(plr); in rdtgroup_pseudo_lock_remove()
1440 debugfs_remove_recursive(rdtgrp->plr->debugfs_dir); in rdtgroup_pseudo_lock_remove()
1441 device_destroy(&pseudo_lock_class, MKDEV(pseudo_lock_major, plr->minor)); in rdtgroup_pseudo_lock_remove()
1442 pseudo_lock_minor_release(plr->minor); in rdtgroup_pseudo_lock_remove()
1501 struct pseudo_lock_region *plr; in pseudo_lock_dev_mmap() local
1515 plr = rdtgrp->plr; in pseudo_lock_dev_mmap()
1517 if (!plr->d) { in pseudo_lock_dev_mmap()
1528 if (!cpumask_subset(current->cpus_ptr, &plr->d->cpu_mask)) { in pseudo_lock_dev_mmap()
1533 physical = __pa(plr->kmem) >> PAGE_SHIFT; in pseudo_lock_dev_mmap()
1534 psize = plr->size - off; in pseudo_lock_dev_mmap()
1536 if (off > plr->size) { in pseudo_lock_dev_mmap()
1555 memset(plr->kmem + off, 0, vsize); in pseudo_lock_dev_mmap()