Home
last modified time | relevance | path

Searched refs:nthreads (Results 1 – 25 of 34) sorted by relevance

12

/linux-6.1.9/tools/perf/bench/
Dfutex-requeue.c53 OPT_UINTEGER('t', "threads", &params.nthreads, "Specify amount of threads"),
77 params.nthreads, in print_summary()
132 threads_starting = params.nthreads; in block_threads()
139 for (i = 0; i < params.nthreads; i++) { in block_threads()
189 if (!params.nthreads) in bench_futex_requeue()
190 params.nthreads = perf_cpu_map__nr(cpu); in bench_futex_requeue()
192 worker = calloc(params.nthreads, sizeof(*worker)); in bench_futex_requeue()
199 if (params.nrequeue > params.nthreads) in bench_futex_requeue()
200 params.nrequeue = params.nthreads; in bench_futex_requeue()
203 params.nrequeue = params.nthreads; in bench_futex_requeue()
[all …]
Dbreakpoint.c22 unsigned int nthreads; member
26 .nthreads = 1,
33 OPT_UINTEGER('t', "threads", &thread_params.nthreads, "Specify amount of threads"),
85 threads = calloc(thread_params.nthreads, sizeof(threads[0])); in breakpoint_thread()
91 for (i = 0; i < thread_params.nthreads; i++) { in breakpoint_thread()
96 futex_wake(&done, thread_params.nthreads, 0); in breakpoint_thread()
97 for (i = 0; i < thread_params.nthreads; i++) in breakpoint_thread()
149 (double)result_usec / bench_repeat / thread_params.nthreads); in bench_breakpoint_thread()
152 thread_params.nthreads * thread_params.nparallel); in bench_breakpoint_thread()
188 unsigned int i, nthreads, result_usec, done = 0; in bench_breakpoint_enable() local
[all …]
Dfutex-wake.c53 OPT_UINTEGER('t', "threads", &params.nthreads, "Specify amount of threads"),
93 params.nthreads, in print_summary()
105 threads_starting = params.nthreads; in block_threads()
112 for (i = 0; i < params.nthreads; i++) { in block_threads()
164 if (!params.nthreads) in bench_futex_wake()
165 params.nthreads = perf_cpu_map__nr(cpu); in bench_futex_wake()
167 worker = calloc(params.nthreads, sizeof(*worker)); in bench_futex_wake()
176 getpid(), params.nthreads, params.fshared ? "shared":"private", in bench_futex_wake()
204 while (nwoken != params.nthreads) in bench_futex_wake()
215 j + 1, nwoken, params.nthreads, in bench_futex_wake()
[all …]
Dfutex-wake-parallel.c63 OPT_UINTEGER('t', "threads", &params.nthreads, "Specify amount of threads"),
153 threads_starting = params.nthreads; in block_threads()
160 for (i = 0; i < params.nthreads; i++) { in block_threads()
197 params.nthreads, waketime_avg / USEC_PER_MSEC, in print_run()
212 params.nthreads, in print_summary()
266 if (!params.nthreads) in bench_futex_wake_parallel()
267 params.nthreads = perf_cpu_map__nr(cpu); in bench_futex_wake_parallel()
270 if (params.nwakes > params.nthreads || in bench_futex_wake_parallel()
272 params.nwakes = params.nthreads; in bench_futex_wake_parallel()
274 if (params.nthreads % params.nwakes) in bench_futex_wake_parallel()
[all …]
Dfutex-lock-pi.c48 OPT_UINTEGER('t', "threads", &params.nthreads, "Specify amount of threads"),
129 threads_starting = params.nthreads; in create_threads()
135 for (i = 0; i < params.nthreads; i++) { in create_threads()
187 if (!params.nthreads) in bench_futex_lock_pi()
188 params.nthreads = perf_cpu_map__nr(cpu); in bench_futex_lock_pi()
190 worker = calloc(params.nthreads, sizeof(*worker)); in bench_futex_lock_pi()
198 getpid(), params.nthreads, params.runtime); in bench_futex_lock_pi()
205 threads_starting = params.nthreads; in bench_futex_lock_pi()
221 for (i = 0; i < params.nthreads; i++) { in bench_futex_lock_pi()
232 for (i = 0; i < params.nthreads; i++) { in bench_futex_lock_pi()
Dfutex-hash.c56 OPT_UINTEGER('t', "threads", &params.nthreads, "Specify amount of threads"),
155 if (!params.nthreads) /* default to the number of CPUs */ in bench_futex_hash()
156 params.nthreads = perf_cpu_map__nr(cpu); in bench_futex_hash()
158 worker = calloc(params.nthreads, sizeof(*worker)); in bench_futex_hash()
166 … getpid(), params.nthreads, params.nfutexes, params.fshared ? "shared":"private", params.runtime); in bench_futex_hash()
173 threads_starting = params.nthreads; in bench_futex_hash()
182 for (i = 0; i < params.nthreads; i++) { in bench_futex_hash()
216 for (i = 0; i < params.nthreads; i++) { in bench_futex_hash()
227 for (i = 0; i < params.nthreads; i++) { in bench_futex_hash()
Depoll-wait.c91 static unsigned int nthreads = 0; variable
128 OPT_UINTEGER('t', "threads", &nthreads, "Specify amount of threads"),
317 for (i = 0; i < nthreads; i++) { in do_threads()
394 shuffle((void *)worker, nthreads, sizeof(*worker)); in writerfn()
397 for (i = 0; i < nthreads; i++) { in writerfn()
468 if (!nthreads) in bench_epoll_wait()
469 nthreads = perf_cpu_map__nr(cpu) - 1; in bench_epoll_wait()
471 worker = calloc(nthreads, sizeof(*worker)); in bench_epoll_wait()
478 rl.rlim_cur = rl.rlim_max = nfds * nthreads * 2 + 50; in bench_epoll_wait()
486 getpid(), nthreads, oneshot ? " (EPOLLONESHOT semantics)": "", nfds, nsecs); in bench_epoll_wait()
[all …]
Depoll-ctl.c36 static unsigned int nthreads = 0; variable
75 OPT_UINTEGER('t', "threads", &nthreads, "Specify amount of threads"),
240 for (i = 0; i < nthreads; i++) { in do_threads()
349 if (!nthreads) in bench_epoll_ctl()
350 nthreads = perf_cpu_map__nr(cpu); in bench_epoll_ctl()
352 worker = calloc(nthreads, sizeof(*worker)); in bench_epoll_ctl()
358 rl.rlim_cur = rl.rlim_max = nfds * nthreads * 2 + 50; in bench_epoll_ctl()
366 getpid(), nthreads, nfds, nsecs); in bench_epoll_ctl()
375 threads_starting = nthreads; in bench_epoll_ctl()
391 for (i = 0; i < nthreads; i++) { in bench_epoll_ctl()
[all …]
Dfutex.h24 unsigned int nthreads; member
/linux-6.1.9/tools/testing/selftests/vm/
Dmigration.c26 int nthreads; in FIXTURE() local
36 self->nthreads = numa_num_task_cpus() - 1; in FIXTURE_SETUP()
50 self->threads = malloc(self->nthreads * sizeof(*self->threads)); in FIXTURE_SETUP()
52 self->pids = malloc(self->nthreads * sizeof(*self->pids)); in FIXTURE_SETUP()
119 if (self->nthreads < 2 || self->n1 < 0 || self->n2 < 0)
127 for (i = 0; i < self->nthreads - 1; i++)
132 for (i = 0; i < self->nthreads - 1; i++)
145 if (self->nthreads < 2 || self->n1 < 0 || self->n2 < 0)
153 for (i = 0; i < self->nthreads - 1; i++) {
162 for (i = 0; i < self->nthreads - 1; i++)
[all …]
Dgup_test.c93 int filed, i, opt, nr_pages = 1, thp = -1, write = 1, nthreads = 1, ret; in main() local
130 nthreads = atoi(optarg); in main()
258 tid = malloc(sizeof(pthread_t) * nthreads); in main()
260 for (i = 0; i < nthreads; i++) { in main()
264 for (i = 0; i < nthreads; i++) { in main()
/linux-6.1.9/tools/perf/util/
Dcounts.c10 struct perf_counts *perf_counts__new(int ncpus, int nthreads) in perf_counts__new() argument
17 values = xyarray__new(ncpus, nthreads, sizeof(struct perf_counts_values)); in perf_counts__new()
25 values = xyarray__new(ncpus, nthreads, sizeof(bool)); in perf_counts__new()
62 int nthreads = perf_thread_map__nr(evsel->core.threads); in evsel__alloc_counts() local
64 evsel->counts = perf_counts__new(perf_cpu_map__nr(cpus), nthreads); in evsel__alloc_counts()
Dstat.c162 int nthreads = perf_thread_map__nr(evsel->core.threads); in evsel__alloc_prev_raw_counts() local
165 counts = perf_counts__new(cpu_map_nr, nthreads); in evsel__alloc_prev_raw_counts()
241 int idx, nthreads = perf_thread_map__nr(evsel->core.threads); in evsel__copy_prev_raw_counts() local
243 for (int thread = 0; thread < nthreads; thread++) { in evsel__copy_prev_raw_counts()
412 int nthreads = perf_thread_map__nr(counter->core.threads); in process_counter_maps() local
416 for (thread = 0; thread < nthreads; thread++) { in process_counter_maps()
Dcounts.h38 struct perf_counts *perf_counts__new(int ncpus, int nthreads);
/linux-6.1.9/arch/powerpc/platforms/pseries/
Dhotplug-cpu.c152 static int find_cpu_id_range(unsigned int nthreads, int assigned_node, in find_cpu_id_range() argument
163 for (cpu = 0; cpu < nthreads; cpu++) in find_cpu_id_range()
192 cpumask_shift_left(*cpu_mask, *cpu_mask, nthreads); in find_cpu_id_range()
212 int len, nthreads, node, cpu, assigned_node; in pseries_add_processor() local
221 nthreads = len / sizeof(u32); in pseries_add_processor()
239 rc = find_cpu_id_range(nthreads, node, &cpu_mask); in pseries_add_processor()
245 rc = find_cpu_id_range(nthreads, NUMA_NO_NODE, &cpu_mask); in pseries_add_processor()
271 cpu, cpu + nthreads - 1); in pseries_add_processor()
295 int len, nthreads, i; in pseries_remove_processor() local
303 nthreads = len / sizeof(u32); in pseries_remove_processor()
[all …]
/linux-6.1.9/kernel/
Dscftorture.c53 torture_param(int, nthreads, -1, "# threads, defaults to -1 for all CPUs.");
161 for (i = 0; i < nthreads; i++) { in scf_torture_stats_print()
499 …verbose, holdoff, longwait, nthreads, onoff_holdoff, onoff_interval, shutdown, stat_interval, stut… in scftorture_print_module_parms()
514 if (nthreads && scf_stats_p) in scf_torture_cleanup()
515 for (i = 0; i < nthreads; i++) in scf_torture_cleanup()
621 if (nthreads < 0) in scf_torture_init()
622 nthreads = num_online_cpus(); in scf_torture_init()
623 scf_stats_p = kcalloc(nthreads, sizeof(scf_stats_p[0]), GFP_KERNEL); in scf_torture_init()
630 VERBOSE_SCFTORTOUT("Starting %d smp_call_function() threads", nthreads); in scf_torture_init()
632 atomic_set(&n_started, nthreads); in scf_torture_init()
[all …]
/linux-6.1.9/kernel/kcsan/
Dkcsan_test.c1375 long nthreads = (long)prev; in nthreads_gen_params() local
1377 if (nthreads < 0 || nthreads >= 32) in nthreads_gen_params()
1378 nthreads = 0; /* stop */ in nthreads_gen_params()
1379 else if (!nthreads) in nthreads_gen_params()
1380 nthreads = 2; /* initial value */ in nthreads_gen_params()
1381 else if (nthreads < 5) in nthreads_gen_params()
1382 nthreads++; in nthreads_gen_params()
1383 else if (nthreads == 5) in nthreads_gen_params()
1384 nthreads = 8; in nthreads_gen_params()
1386 nthreads *= 2; in nthreads_gen_params()
[all …]
/linux-6.1.9/kernel/locking/
Dtest-ww_mutex.c315 static int __test_cycle(unsigned int nthreads) in __test_cycle() argument
318 unsigned int n, last = nthreads - 1; in __test_cycle()
321 cycles = kmalloc_array(nthreads, sizeof(*cycles), GFP_KERNEL); in __test_cycle()
325 for (n = 0; n < nthreads; n++) { in __test_cycle()
344 for (n = 0; n < nthreads; n++) in __test_cycle()
350 for (n = 0; n < nthreads; n++) { in __test_cycle()
357 n, nthreads, cycle->result); in __test_cycle()
362 for (n = 0; n < nthreads; n++) in __test_cycle()
564 static int stress(int nlocks, int nthreads, unsigned int flags) in stress() argument
576 for (n = 0; nthreads; n++) { in stress()
[all …]
/linux-6.1.9/tools/lib/perf/
Devsel.c52 int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads) in perf_evsel__alloc_fd() argument
54 evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int)); in perf_evsel__alloc_fd()
60 for (thread = 0; thread < nthreads; thread++) { in perf_evsel__alloc_fd()
72 static int perf_evsel__alloc_mmap(struct perf_evsel *evsel, int ncpus, int nthreads) in perf_evsel__alloc_mmap() argument
74 evsel->mmap = xyarray__new(ncpus, nthreads, sizeof(struct perf_mmap)); in perf_evsel__alloc_mmap()
513 int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads) in perf_evsel__alloc_id() argument
515 if (ncpus == 0 || nthreads == 0) in perf_evsel__alloc_id()
518 evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id)); in perf_evsel__alloc_id()
522 evsel->id = zalloc(ncpus * nthreads * sizeof(u64)); in perf_evsel__alloc_id()
/linux-6.1.9/fs/nfsd/
Dnfssvc.c690 int nfsd_get_nrthreads(int n, int *nthreads, struct net *net) in nfsd_get_nrthreads() argument
697 nthreads[i] = nn->nfsd_serv->sv_pools[i].sp_nrthreads; in nfsd_get_nrthreads()
726 int nfsd_set_nrthreads(int n, int *nthreads, struct net *net) in nfsd_set_nrthreads() argument
744 nthreads[i] = min(nthreads[i], NFSD_MAXSERVS); in nfsd_set_nrthreads()
745 tot += nthreads[i]; in nfsd_set_nrthreads()
750 int new = nthreads[i] * NFSD_MAXSERVS / tot; in nfsd_set_nrthreads()
751 tot -= (nthreads[i] - new); in nfsd_set_nrthreads()
752 nthreads[i] = new; in nfsd_set_nrthreads()
755 nthreads[i]--; in nfsd_set_nrthreads()
764 if (nthreads[0] == 0) in nfsd_set_nrthreads()
[all …]
Dnfsctl.c467 int *nthreads; in write_pool_threads() local
483 nthreads = kcalloc(npools, sizeof(int), GFP_KERNEL); in write_pool_threads()
485 if (nthreads == NULL) in write_pool_threads()
490 rv = get_int(&mesg, &nthreads[i]); in write_pool_threads()
496 if (nthreads[i] < 0) in write_pool_threads()
499 rv = nfsd_set_nrthreads(i, nthreads, net); in write_pool_threads()
504 rv = nfsd_get_nrthreads(npools, nthreads, net); in write_pool_threads()
511 snprintf(mesg, size, "%d%c", nthreads[i], (i == npools-1 ? '\n' : ' ')); in write_pool_threads()
518 kfree(nthreads); in write_pool_threads()
/linux-6.1.9/tools/lib/perf/include/internal/
Devsel.h73 int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
79 int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads);
/linux-6.1.9/arch/powerpc/kernel/
Dsetup-common.c447 int nthreads = 1; in smp_setup_cpu_maps() local
480 nthreads = len / sizeof(int); in smp_setup_cpu_maps()
482 for (j = 0; j < nthreads && cpu < nr_cpu_ids; j++) { in smp_setup_cpu_maps()
508 nthreads = 1; in smp_setup_cpu_maps()
533 maxcpus *= nthreads; in smp_setup_cpu_maps()
559 cpu_init_thread_core_maps(nthreads); in smp_setup_cpu_maps()
Dprom.c329 int i, nthreads; in early_init_dt_scan_cpus() local
343 nthreads = len / sizeof(int); in early_init_dt_scan_cpus()
349 for (i = 0; i < nthreads; i++) { in early_init_dt_scan_cpus()
409 if (nthreads == 1) in early_init_dt_scan_cpus()
/linux-6.1.9/tools/testing/selftests/filesystems/binderfs/
Dbinderfs_test.c403 int i, j, k, nthreads; in TEST() local
453 nthreads = get_nprocs_conf(); in TEST()
454 if (nthreads > DEFAULT_THREADS) in TEST()
455 nthreads = DEFAULT_THREADS; in TEST()
460 for (i = 0; i < nthreads; i++) { in TEST()

12