/linux-5.19.10/tools/testing/radix-tree/ |
D | regression1.c | 171 int nr_threads; in regression1_test() local 177 nr_threads = 2; in regression1_test() 178 pthread_barrier_init(&worker_barrier, NULL, nr_threads); in regression1_test() 180 threads = malloc(nr_threads * sizeof(pthread_t *)); in regression1_test() 182 for (i = 0; i < nr_threads; i++) { in regression1_test() 190 for (i = 0; i < nr_threads; i++) { in regression1_test()
|
/linux-5.19.10/fs/xfs/ |
D | xfs_pwork.c | 66 unsigned int nr_threads = 0; in xfs_pwork_init() local 70 nr_threads = xfs_globals.pwork_threads; in xfs_pwork_init() 72 trace_xfs_pwork_init(mp, nr_threads, current->pid); in xfs_pwork_init() 75 WQ_UNBOUND | WQ_SYSFS | WQ_FREEZABLE, nr_threads, tag, in xfs_pwork_init()
|
D | xfs_trace.h | 3794 TP_PROTO(struct xfs_mount *mp, unsigned int nr_threads, pid_t pid), 3795 TP_ARGS(mp, nr_threads, pid), 3798 __field(unsigned int, nr_threads) 3803 __entry->nr_threads = nr_threads; 3808 __entry->nr_threads, __entry->pid)
|
/linux-5.19.10/tools/lib/perf/ |
D | threadmap.c | 45 struct perf_thread_map *perf_thread_map__new_array(int nr_threads, pid_t *array) in perf_thread_map__new_array() argument 47 struct perf_thread_map *threads = thread_map__alloc(nr_threads); in perf_thread_map__new_array() 53 for (i = 0; i < nr_threads; i++) in perf_thread_map__new_array() 56 threads->nr = nr_threads; in perf_thread_map__new_array()
|
D | evlist.c | 307 int nr_threads = perf_thread_map__nr(evlist->threads); in perf_evlist__alloc_pollfd() local 315 nfds += nr_cpus * nr_threads; in perf_evlist__alloc_pollfd() 528 int nr_threads = perf_thread_map__nr(evlist->threads); in mmap_per_thread() local 534 __func__, nr_cpus, nr_threads); in mmap_per_thread() 537 for (thread = 0; thread < nr_threads; thread++, idx++) { in mmap_per_thread() 570 int nr_threads = perf_thread_map__nr(evlist->threads); in mmap_per_cpu() local 575 pr_debug("%s: nr cpu values %d nr threads %d\n", __func__, nr_cpus, nr_threads); in mmap_per_cpu() 581 for (thread = 0; thread < nr_threads; thread++) { in mmap_per_cpu()
|
/linux-5.19.10/tools/testing/selftests/cgroup/ |
D | test_kmem.c | 98 int nr_threads = 2 * get_nprocs(); in alloc_kmem_smp() local 103 tinfo = calloc(nr_threads, sizeof(pthread_t)); in alloc_kmem_smp() 107 for (i = 0; i < nr_threads; i++) { in alloc_kmem_smp() 115 for (i = 0; i < nr_threads; i++) { in alloc_kmem_smp() 250 int nr_threads = 1000; in spawn_1000_threads() local 256 tinfo = calloc(nr_threads, sizeof(pthread_t)); in spawn_1000_threads() 260 for (i = 0; i < nr_threads; i++) { in spawn_1000_threads()
|
/linux-5.19.10/tools/perf/bench/ |
D | sched-pipe.c | 83 int nr_threads = 2; in bench_sched_pipe() local 101 for (t = 0; t < nr_threads; t++) { in bench_sched_pipe() 118 for (t = 0; t < nr_threads; t++) { in bench_sched_pipe() 125 for (t = 0; t < nr_threads; t++) { in bench_sched_pipe()
|
D | numa.c | 81 int nr_threads; member 172 OPT_INTEGER('t', "nr_threads" , &p0.nr_threads, "number of threads per process"), 959 for (t = 0; t < g->p.nr_threads; t++) { in count_process_nodes() 964 task_nr = process_nr*g->p.nr_threads + t; in count_process_nodes() 998 for (t = 0; t < g->p.nr_threads; t++) { in count_node_processes() 1003 task_nr = p*g->p.nr_threads + t; in count_node_processes() 1210 if (process_nr == g->p.nr_proc-1 && thread_nr == g->p.nr_threads-1) in worker_thread() 1253 val += do_work(process_data, g->p.bytes_process, thread_nr, g->p.nr_threads, l, val); in worker_thread() 1266 val += do_work(process_data, g->p.bytes_process_locked, thread_nr, g->p.nr_threads, l, val); in worker_thread() 1391 task_nr = process_nr*g->p.nr_threads; in worker_process() [all …]
|
/linux-5.19.10/kernel/power/ |
D | swap.c | 691 unsigned thr, run_threads, nr_threads; in save_image_lzo() local 702 nr_threads = num_online_cpus() - 1; in save_image_lzo() 703 nr_threads = clamp_val(nr_threads, 1, LZO_THREADS); in save_image_lzo() 712 data = vzalloc(array_size(nr_threads, sizeof(*data))); in save_image_lzo() 729 for (thr = 0; thr < nr_threads; thr++) { in save_image_lzo() 752 for (thr = 0; thr < nr_threads; thr++) { in save_image_lzo() 771 pr_info("Using %u thread(s) for compression\n", nr_threads); in save_image_lzo() 780 for (thr = 0; thr < nr_threads; thr++) { in save_image_lzo() 874 for (thr = 0; thr < nr_threads; thr++) in save_image_lzo() 1178 unsigned i, thr, run_threads, nr_threads; in load_image_lzo() local [all …]
|
/linux-5.19.10/lib/ |
D | test_vmalloc.c | 26 __param(int, nr_threads, 0, 476 nr_threads = clamp(nr_threads, 1, (int) USHRT_MAX); in init_test_configurtion() 479 tdriver = kvcalloc(nr_threads, sizeof(*tdriver), GFP_KERNEL); in init_test_configurtion() 508 for (i = 0; i < nr_threads; i++) { in do_concurrent_test() 535 for (i = 0; i < nr_threads; i++) { in do_concurrent_test()
|
/linux-5.19.10/arch/s390/appldata/ |
D | appldata_os.c | 65 u32 nr_threads; /* number of threads */ member 101 os_data->nr_threads = nr_threads; in appldata_get_os_data()
|
D | appldata_base.c | 549 EXPORT_SYMBOL_GPL(nr_threads);
|
/linux-5.19.10/tools/perf/ |
D | builtin-record.c | 171 int nr_threads; member 223 for (t = 0; t < rec->nr_threads; t++) in record__bytes_written() 1067 for (t = 0; t < rec->nr_threads; t++) { in record__free_thread_data() 1082 rec->thread_data = zalloc(rec->nr_threads * sizeof(*(rec->thread_data))); in record__alloc_thread_data() 1089 for (t = 0; t < rec->nr_threads; t++) in record__alloc_thread_data() 1092 for (t = 0; t < rec->nr_threads; t++) { in record__alloc_thread_data() 2068 int t, tt, err, ret = 0, nr_threads = rec->nr_threads; in record__start_threads() local 2088 for (t = 1; t < nr_threads; t++) { in record__start_threads() 2134 for (t = 1; t < rec->nr_threads; t++) in record__stop_threads() 2137 for (t = 0; t < rec->nr_threads; t++) { in record__stop_threads() [all …]
|
/linux-5.19.10/include/linux/sched/ |
D | stat.h | 17 extern int nr_threads;
|
D | signal.h | 81 atomic_t nr_threads; member 96 int nr_threads; member 706 return task->signal->nr_threads; in get_nr_threads()
|
/linux-5.19.10/fs/proc/ |
D | loadavg.c | 23 nr_running(), nr_threads, in loadavg_proc_show()
|
/linux-5.19.10/tools/lib/perf/include/perf/ |
D | threadmap.h | 11 LIBPERF_API struct perf_thread_map *perf_thread_map__new_array(int nr_threads, pid_t *array);
|
/linux-5.19.10/tools/tracing/latency/ |
D | latency-collector.c | 77 static unsigned int nr_threads = DEFAULT_NR_PRINTER_THREADS; variable 1642 if (nr_threads > MAX_THREADS) { in start_printthread() 1645 nr_threads, MAX_THREADS); in start_printthread() 1646 nr_threads = MAX_THREADS; in start_printthread() 1648 for (i = 0; i < nr_threads; i++) { in start_printthread() 1931 nr_threads = value; in scan_arguments() 2040 policy_name(sched_policy), sched_pri, nr_threads); in show_params()
|
/linux-5.19.10/init/ |
D | init_task.c | 19 .nr_threads = 1,
|
/linux-5.19.10/drivers/dma/ |
D | img-mdc-dma.c | 140 unsigned int nr_threads; member 924 mdma->nr_threads = in mdc_dma_probe() 1010 mdma->nr_channels, mdma->nr_threads); in mdc_dma_probe()
|
/linux-5.19.10/kernel/ |
D | exit.c | 76 nr_threads--; in __unhash_process() 149 sig->nr_threads--; in __exit_signal() 373 if (atomic_dec_and_test(&core_state->nr_threads)) in coredump_task_exit()
|
D | fork.c | 127 int nr_threads; /* The idle threads do not count.. */ variable 1695 sig->nr_threads = 1; in copy_signal() 2119 if (data_race(nr_threads >= max_threads)) in copy_process() 2447 current->signal->nr_threads++; in copy_process() 2457 nr_threads++; in copy_process()
|
/linux-5.19.10/tools/lib/perf/Documentation/ |
D | libperf.txt | 65 struct perf_thread_map *perf_thread_map__new_array(int nr_threads, pid_t *array);
|
/linux-5.19.10/tools/perf/util/ |
D | evsel.c | 1684 static void evsel__remove_fd(struct evsel *pos, int nr_cpus, int nr_threads, int thread_idx) in evsel__remove_fd() argument 1687 for (int thread = thread_idx; thread < nr_threads - 1; thread++) in evsel__remove_fd() 1693 int nr_threads, int thread_idx) in update_fds() argument 1697 if (cpu_map_idx >= nr_cpus || thread_idx >= nr_threads) in update_fds() 1703 evsel__remove_fd(pos, nr_cpus, nr_threads, thread_idx); in update_fds()
|
/linux-5.19.10/fs/ |
D | coredump.c | 386 atomic_set(&core_state->nr_threads, nr); in zap_threads()
|