Lines Matching refs:work
271 struct kwork_work *work; in work_search() local
275 work = container_of(node, struct kwork_work, node); in work_search()
276 cmp = work_cmp(sort_list, key, work); in work_search()
282 if (work->name == NULL) in work_search()
283 work->name = key->name; in work_search()
284 return work; in work_search()
318 struct kwork_work *work = zalloc(sizeof(*work)); in work_new() local
320 if (work == NULL) { in work_new()
326 INIT_LIST_HEAD(&work->atom_list[i]); in work_new()
328 work->id = key->id; in work_new()
329 work->cpu = key->cpu; in work_new()
330 work->name = key->name; in work_new()
331 work->class = key->class; in work_new()
332 return work; in work_new()
339 struct kwork_work *work = work_search(root, key, sort_list); in work_findnew() local
341 if (work != NULL) in work_findnew()
342 return work; in work_findnew()
344 work = work_new(key); in work_findnew()
345 if (work) in work_findnew()
346 work_insert(root, work, sort_list); in work_findnew()
348 return work; in work_findnew()
365 struct kwork_work *work, in profile_event_match() argument
368 int cpu = work->cpu; in profile_event_match()
380 (work->name != NULL) && in profile_event_match()
381 (strcmp(work->name, kwork->profile_name) != 0)) in profile_event_match()
398 struct kwork_work *work, key; in work_push_atom() local
407 work = work_findnew(&class->work_root, &key, &kwork->cmp_id); in work_push_atom()
408 if (work == NULL) { in work_push_atom()
413 if (!profile_event_match(kwork, work, sample)) { in work_push_atom()
419 dst_atom = list_last_entry_or_null(&work->atom_list[dst_type], in work_push_atom()
428 *ret_work = work; in work_push_atom()
430 list_add_tail(&atom->list, &work->atom_list[src_type]); in work_push_atom()
445 struct kwork_work *work, key; in work_pop_atom() local
450 work = work_findnew(&class->work_root, &key, &kwork->cmp_id); in work_pop_atom()
452 *ret_work = work; in work_pop_atom()
454 if (work == NULL) in work_pop_atom()
457 if (!profile_event_match(kwork, work, sample)) in work_pop_atom()
460 atom = list_last_entry_or_null(&work->atom_list[dst_type], in work_pop_atom()
467 list_add_tail(&src_atom->list, &work->atom_list[src_type]); in work_pop_atom()
476 static void report_update_exit_event(struct kwork_work *work, in report_update_exit_event() argument
486 if ((delta > work->max_runtime) || in report_update_exit_event()
487 (work->max_runtime == 0)) { in report_update_exit_event()
488 work->max_runtime = delta; in report_update_exit_event()
489 work->max_runtime_start = entry_time; in report_update_exit_event()
490 work->max_runtime_end = exit_time; in report_update_exit_event()
492 work->total_runtime += delta; in report_update_exit_event()
493 work->nr_atoms++; in report_update_exit_event()
515 struct kwork_work *work = NULL; in report_exit_event() local
519 machine, &work); in report_exit_event()
520 if (work == NULL) in report_exit_event()
524 report_update_exit_event(work, atom, sample); in report_exit_event()
531 static void latency_update_entry_event(struct kwork_work *work, in latency_update_entry_event() argument
541 if ((delta > work->max_latency) || in latency_update_entry_event()
542 (work->max_latency == 0)) { in latency_update_entry_event()
543 work->max_latency = delta; in latency_update_entry_event()
544 work->max_latency_start = raise_time; in latency_update_entry_event()
545 work->max_latency_end = entry_time; in latency_update_entry_event()
547 work->total_latency += delta; in latency_update_entry_event()
548 work->nr_atoms++; in latency_update_entry_event()
570 struct kwork_work *work = NULL; in latency_entry_event() local
574 machine, &work); in latency_entry_event()
575 if (work == NULL) in latency_entry_event()
579 latency_update_entry_event(work, atom, sample); in latency_entry_event()
636 struct kwork_work *work, in timehist_print_event() argument
661 printf(" [%0*d] ", PRINT_CPU_WIDTH, work->cpu); in timehist_print_event()
666 if (work->class && work->class->work_name) { in timehist_print_event()
667 work->class->work_name(work, kwork_name, in timehist_print_event()
729 struct kwork_work *work = NULL; in timehist_entry_event() local
733 machine, &work); in timehist_entry_event()
737 if (work != NULL) in timehist_entry_event()
750 struct kwork_work *work = NULL; in timehist_exit_event() local
763 machine, &work); in timehist_exit_event()
764 if (work == NULL) { in timehist_exit_event()
770 work->nr_atoms++; in timehist_exit_event()
771 timehist_print_event(kwork, work, atom, sample, &al); in timehist_exit_event()
825 struct kwork_work *work, in irq_work_init() argument
830 work->class = class; in irq_work_init()
831 work->cpu = sample->cpu; in irq_work_init()
832 work->id = evsel__intval(evsel, sample, "irq"); in irq_work_init()
833 work->name = evsel__strval(evsel, sample, "name"); in irq_work_init()
836 static void irq_work_name(struct kwork_work *work, char *buf, int len) in irq_work_name() argument
838 snprintf(buf, len, "%s:%" PRIu64 "", work->name, work->id); in irq_work_name()
944 struct kwork_work *work, in softirq_work_init() argument
951 work->id = num; in softirq_work_init()
952 work->class = class; in softirq_work_init()
953 work->cpu = sample->cpu; in softirq_work_init()
954 work->name = evsel__softirq_name(evsel, num); in softirq_work_init()
957 static void softirq_work_name(struct kwork_work *work, char *buf, int len) in softirq_work_name() argument
959 snprintf(buf, len, "(s)%s:%" PRIu64 "", work->name, work->id); in softirq_work_name()
1035 struct kwork_work *work, in workqueue_work_init() argument
1044 work->class = class; in workqueue_work_init()
1045 work->cpu = sample->cpu; in workqueue_work_init()
1046 work->id = evsel__intval(evsel, sample, "work"); in workqueue_work_init()
1047 work->name = function_addr == 0 ? NULL : in workqueue_work_init()
1051 static void workqueue_work_name(struct kwork_work *work, char *buf, int len) in workqueue_work_name() argument
1053 if (work->name != NULL) in workqueue_work_name()
1054 snprintf(buf, len, "(w)%s", work->name); in workqueue_work_name()
1056 snprintf(buf, len, "(w)0x%" PRIx64, work->id); in workqueue_work_name()
1080 static int report_print_work(struct perf_kwork *kwork, struct kwork_work *work) in report_print_work() argument
1092 if (work->class && work->class->work_name) { in report_print_work()
1093 work->class->work_name(work, kwork_name, in report_print_work()
1103 ret += printf(" %0*d |", PRINT_CPU_WIDTH, work->cpu); in report_print_work()
1111 (double)work->total_runtime / NSEC_PER_MSEC); in report_print_work()
1115 (double)work->total_latency / in report_print_work()
1116 work->nr_atoms / NSEC_PER_MSEC); in report_print_work()
1122 ret += printf(" %*" PRIu64 " |", PRINT_COUNT_WIDTH, work->nr_atoms); in report_print_work()
1128 timestamp__scnprintf_usec(work->max_runtime_start, in report_print_work()
1131 timestamp__scnprintf_usec(work->max_runtime_end, in report_print_work()
1136 (double)work->max_runtime / NSEC_PER_MSEC, in report_print_work()
1144 timestamp__scnprintf_usec(work->max_latency_start, in report_print_work()
1147 timestamp__scnprintf_usec(work->max_latency_end, in report_print_work()
1152 (double)work->max_latency / NSEC_PER_MSEC, in report_print_work()
1442 struct kwork_work *work) in process_skipped_events() argument
1448 count = nr_list_entry(&work->atom_list[i]); in process_skipped_events()
1458 struct kwork_work *work = NULL; in perf_kwork_add_work() local
1460 work = work_new(key); in perf_kwork_add_work()
1461 if (work == NULL) in perf_kwork_add_work()
1464 work_insert(&class->work_root, work, &kwork->cmp_id); in perf_kwork_add_work()
1465 return work; in perf_kwork_add_work()
1510 struct kwork_work *work; in perf_kwork__report() local
1527 work = rb_entry(next, struct kwork_work, node); in perf_kwork__report()
1528 process_skipped_events(kwork, work); in perf_kwork__report()
1530 if (work->nr_atoms != 0) { in perf_kwork__report()
1531 report_print_work(kwork, work); in perf_kwork__report()
1533 kwork->all_runtime += work->total_runtime; in perf_kwork__report()
1534 kwork->all_count += work->nr_atoms; in perf_kwork__report()