/linux-6.6.21/tools/perf/bench/ |
D | futex-requeue.c | 53 OPT_UINTEGER('t', "threads", ¶ms.nthreads, "Specify amount of threads"), 77 params.nthreads, in print_summary() 131 threads_starting = params.nthreads; in block_threads() 138 for (i = 0; i < params.nthreads; i++) { in block_threads() 191 if (!params.nthreads) in bench_futex_requeue() 192 params.nthreads = perf_cpu_map__nr(cpu); in bench_futex_requeue() 194 worker = calloc(params.nthreads, sizeof(*worker)); in bench_futex_requeue() 201 if (params.nrequeue > params.nthreads) in bench_futex_requeue() 202 params.nrequeue = params.nthreads; in bench_futex_requeue() 205 params.nrequeue = params.nthreads; in bench_futex_requeue() [all …]
|
D | breakpoint.c | 22 unsigned int nthreads; member 26 .nthreads = 1, 33 OPT_UINTEGER('t', "threads", &thread_params.nthreads, "Specify amount of threads"), 91 threads = calloc(thread_params.nthreads, sizeof(threads[0])); in breakpoint_thread() 97 for (i = 0; i < thread_params.nthreads; i++) { in breakpoint_thread() 102 futex_wake(&done, thread_params.nthreads, 0); in breakpoint_thread() 103 for (i = 0; i < thread_params.nthreads; i++) in breakpoint_thread() 161 (double)result_usec / bench_repeat / thread_params.nthreads); in bench_breakpoint_thread() 164 thread_params.nthreads * thread_params.nparallel); in bench_breakpoint_thread() 200 unsigned int i, nthreads, result_usec, done = 0; in bench_breakpoint_enable() local [all …]
|
D | futex-wake.c | 53 OPT_UINTEGER('t', "threads", ¶ms.nthreads, "Specify amount of threads"), 93 params.nthreads, in print_summary() 104 threads_starting = params.nthreads; in block_threads() 111 for (i = 0; i < params.nthreads; i++) { in block_threads() 166 if (!params.nthreads) in bench_futex_wake() 167 params.nthreads = perf_cpu_map__nr(cpu); in bench_futex_wake() 169 worker = calloc(params.nthreads, sizeof(*worker)); in bench_futex_wake() 178 getpid(), params.nthreads, params.fshared ? "shared":"private", in bench_futex_wake() 205 while (nwoken != params.nthreads) in bench_futex_wake() 216 j + 1, nwoken, params.nthreads, in bench_futex_wake() [all …]
|
D | futex-wake-parallel.c | 63 OPT_UINTEGER('t', "threads", ¶ms.nthreads, "Specify amount of threads"), 155 threads_starting = params.nthreads; in block_threads() 162 for (i = 0; i < params.nthreads; i++) { in block_threads() 203 params.nthreads, waketime_avg / USEC_PER_MSEC, in print_run() 218 params.nthreads, in print_summary() 271 if (!params.nthreads) in bench_futex_wake_parallel() 272 params.nthreads = perf_cpu_map__nr(cpu); in bench_futex_wake_parallel() 275 if (params.nwakes > params.nthreads || in bench_futex_wake_parallel() 277 params.nwakes = params.nthreads; in bench_futex_wake_parallel() 279 if (params.nthreads % params.nwakes) in bench_futex_wake_parallel() [all …]
|
D | futex-lock-pi.c | 48 OPT_UINTEGER('t', "threads", ¶ms.nthreads, "Specify amount of threads"), 128 threads_starting = params.nthreads; in create_threads() 134 for (i = 0; i < params.nthreads; i++) { in create_threads() 189 if (!params.nthreads) in bench_futex_lock_pi() 190 params.nthreads = perf_cpu_map__nr(cpu); in bench_futex_lock_pi() 192 worker = calloc(params.nthreads, sizeof(*worker)); in bench_futex_lock_pi() 200 getpid(), params.nthreads, params.runtime); in bench_futex_lock_pi() 207 threads_starting = params.nthreads; in bench_futex_lock_pi() 221 for (i = 0; i < params.nthreads; i++) { in bench_futex_lock_pi() 232 for (i = 0; i < params.nthreads; i++) { in bench_futex_lock_pi()
|
D | futex-hash.c | 56 OPT_UINTEGER('t', "threads", ¶ms.nthreads, "Specify amount of threads"), 155 if (!params.nthreads) /* default to the number of CPUs */ in bench_futex_hash() 156 params.nthreads = perf_cpu_map__nr(cpu); in bench_futex_hash() 158 worker = calloc(params.nthreads, sizeof(*worker)); in bench_futex_hash() 166 … getpid(), params.nthreads, params.nfutexes, params.fshared ? "shared":"private", params.runtime); in bench_futex_hash() 173 threads_starting = params.nthreads; in bench_futex_hash() 182 for (i = 0; i < params.nthreads; i++) { in bench_futex_hash() 216 for (i = 0; i < params.nthreads; i++) { in bench_futex_hash() 227 for (i = 0; i < params.nthreads; i++) { in bench_futex_hash()
|
D | epoll-wait.c | 91 static unsigned int nthreads = 0; variable 128 OPT_UINTEGER('t', "threads", &nthreads, "Specify amount of threads"), 317 for (i = 0; i < nthreads; i++) { in do_threads() 394 shuffle((void *)worker, nthreads, sizeof(*worker)); in writerfn() 397 for (i = 0; i < nthreads; i++) { in writerfn() 468 if (!nthreads) in bench_epoll_wait() 469 nthreads = perf_cpu_map__nr(cpu) - 1; in bench_epoll_wait() 471 worker = calloc(nthreads, sizeof(*worker)); in bench_epoll_wait() 478 rl.rlim_cur = rl.rlim_max = nfds * nthreads * 2 + 50; in bench_epoll_wait() 486 getpid(), nthreads, oneshot ? " (EPOLLONESHOT semantics)": "", nfds, nsecs); in bench_epoll_wait() [all …]
|
D | epoll-ctl.c | 36 static unsigned int nthreads = 0; variable 75 OPT_UINTEGER('t', "threads", &nthreads, "Specify amount of threads"), 240 for (i = 0; i < nthreads; i++) { in do_threads() 349 if (!nthreads) in bench_epoll_ctl() 350 nthreads = perf_cpu_map__nr(cpu); in bench_epoll_ctl() 352 worker = calloc(nthreads, sizeof(*worker)); in bench_epoll_ctl() 358 rl.rlim_cur = rl.rlim_max = nfds * nthreads * 2 + 50; in bench_epoll_ctl() 366 getpid(), nthreads, nfds, nsecs); in bench_epoll_ctl() 375 threads_starting = nthreads; in bench_epoll_ctl() 391 for (i = 0; i < nthreads; i++) { in bench_epoll_ctl() [all …]
|
/linux-6.6.21/tools/testing/selftests/mm/ |
D | migration.c | 27 int nthreads; in FIXTURE() local 37 self->nthreads = numa_num_task_cpus() - 1; in FIXTURE_SETUP() 51 self->threads = malloc(self->nthreads * sizeof(*self->threads)); in FIXTURE_SETUP() 53 self->pids = malloc(self->nthreads * sizeof(*self->pids)); in FIXTURE_SETUP() 123 if (self->nthreads < 2 || self->n1 < 0 || self->n2 < 0) 131 for (i = 0; i < self->nthreads - 1; i++) 136 for (i = 0; i < self->nthreads - 1; i++) 149 if (self->nthreads < 2 || self->n1 < 0 || self->n2 < 0) 157 for (i = 0; i < self->nthreads - 1; i++) { 171 for (i = 0; i < self->nthreads - 1; i++) [all …]
|
D | gup_test.c | 92 int filed, i, opt, nr_pages = 1, thp = -1, write = 1, nthreads = 1, ret; in main() local 129 nthreads = atoi(optarg); in main() 257 tid = malloc(sizeof(pthread_t) * nthreads); in main() 259 for (i = 0; i < nthreads; i++) { in main() 263 for (i = 0; i < nthreads; i++) { in main()
|
/linux-6.6.21/tools/perf/util/ |
D | counts.c | 10 struct perf_counts *perf_counts__new(int ncpus, int nthreads) in perf_counts__new() argument 17 values = xyarray__new(ncpus, nthreads, sizeof(struct perf_counts_values)); in perf_counts__new() 25 values = xyarray__new(ncpus, nthreads, sizeof(bool)); in perf_counts__new() 61 int nthreads = perf_thread_map__nr(evsel->core.threads); in evsel__alloc_counts() local 63 evsel->counts = perf_counts__new(perf_cpu_map__nr(cpus), nthreads); in evsel__alloc_counts()
|
D | stat.c | 157 int nthreads = perf_thread_map__nr(evsel->core.threads); in evsel__alloc_prev_raw_counts() local 160 counts = perf_counts__new(cpu_map_nr, nthreads); in evsel__alloc_prev_raw_counts() 249 int idx, nthreads = perf_thread_map__nr(evsel->core.threads); in evsel__copy_prev_raw_counts() local 251 for (int thread = 0; thread < nthreads; thread++) { in evsel__copy_prev_raw_counts() 464 int nthreads = perf_thread_map__nr(counter->core.threads); in process_counter_maps() local 468 for (thread = 0; thread < nthreads; thread++) { in process_counter_maps()
|
D | counts.h | 37 struct perf_counts *perf_counts__new(int ncpus, int nthreads);
|
/linux-6.6.21/arch/powerpc/platforms/pseries/ |
D | hotplug-cpu.c | 153 static int find_cpu_id_range(unsigned int nthreads, int assigned_node, in find_cpu_id_range() argument 164 for (cpu = 0; cpu < nthreads; cpu++) in find_cpu_id_range() 193 cpumask_shift_left(*cpu_mask, *cpu_mask, nthreads); in find_cpu_id_range() 213 int len, nthreads, node, cpu, assigned_node; in pseries_add_processor() local 222 nthreads = len / sizeof(u32); in pseries_add_processor() 240 rc = find_cpu_id_range(nthreads, node, &cpu_mask); in pseries_add_processor() 246 rc = find_cpu_id_range(nthreads, NUMA_NO_NODE, &cpu_mask); in pseries_add_processor() 272 cpu, cpu + nthreads - 1); in pseries_add_processor() 296 int len, nthreads, i; in pseries_remove_processor() local 304 nthreads = len / sizeof(u32); in pseries_remove_processor() [all …]
|
/linux-6.6.21/tools/testing/selftests/timers/ |
D | posix_timers.c | 214 const int nthreads = 10; in check_timer_distribution() local 215 pthread_t threads[nthreads]; in check_timer_distribution() 226 remain = nthreads + 1; /* worker threads + this thread */ in check_timer_distribution() 239 for (i = 0; i < nthreads; i++) { in check_timer_distribution() 249 for (i = 0; i < nthreads; i++) { in check_timer_distribution()
|
/linux-6.6.21/kernel/ |
D | scftorture.c | 53 torture_param(int, nthreads, -1, "# threads, defaults to -1 for all CPUs."); 161 for (i = 0; i < nthreads; i++) { in scf_torture_stats_print() 505 …verbose, holdoff, longwait, nthreads, onoff_holdoff, onoff_interval, shutdown, stat_interval, stut… in scftorture_print_module_parms() 520 if (nthreads && scf_stats_p) in scf_torture_cleanup() 521 for (i = 0; i < nthreads; i++) in scf_torture_cleanup() 627 if (nthreads < 0) in scf_torture_init() 628 nthreads = num_online_cpus(); in scf_torture_init() 629 scf_stats_p = kcalloc(nthreads, sizeof(scf_stats_p[0]), GFP_KERNEL); in scf_torture_init() 636 VERBOSE_SCFTORTOUT("Starting %d smp_call_function() threads", nthreads); in scf_torture_init() 638 atomic_set(&n_started, nthreads); in scf_torture_init() [all …]
|
/linux-6.6.21/kernel/locking/ |
D | test-ww_mutex.c | 315 static int __test_cycle(unsigned int nthreads) in __test_cycle() argument 318 unsigned int n, last = nthreads - 1; in __test_cycle() 321 cycles = kmalloc_array(nthreads, sizeof(*cycles), GFP_KERNEL); in __test_cycle() 325 for (n = 0; n < nthreads; n++) { in __test_cycle() 344 for (n = 0; n < nthreads; n++) in __test_cycle() 350 for (n = 0; n < nthreads; n++) { in __test_cycle() 357 n, nthreads, cycle->result); in __test_cycle() 362 for (n = 0; n < nthreads; n++) in __test_cycle() 560 static int stress(int nlocks, int nthreads, unsigned int flags) in stress() argument 570 stress_array = kmalloc_array(nthreads, sizeof(*stress_array), in stress() [all …]
|
/linux-6.6.21/kernel/kcsan/ |
D | kcsan_test.c | 1375 long nthreads = (long)prev; in nthreads_gen_params() local 1377 if (nthreads < 0 || nthreads >= 32) in nthreads_gen_params() 1378 nthreads = 0; /* stop */ in nthreads_gen_params() 1379 else if (!nthreads) in nthreads_gen_params() 1380 nthreads = 2; /* initial value */ in nthreads_gen_params() 1381 else if (nthreads < 5) in nthreads_gen_params() 1382 nthreads++; in nthreads_gen_params() 1383 else if (nthreads == 5) in nthreads_gen_params() 1384 nthreads = 8; in nthreads_gen_params() 1386 nthreads *= 2; in nthreads_gen_params() [all …]
|
/linux-6.6.21/tools/lib/perf/ |
D | evsel.c | 52 int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads) in perf_evsel__alloc_fd() argument 54 evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int)); in perf_evsel__alloc_fd() 60 for (thread = 0; thread < nthreads; thread++) { in perf_evsel__alloc_fd() 72 static int perf_evsel__alloc_mmap(struct perf_evsel *evsel, int ncpus, int nthreads) in perf_evsel__alloc_mmap() argument 74 evsel->mmap = xyarray__new(ncpus, nthreads, sizeof(struct perf_mmap)); in perf_evsel__alloc_mmap() 513 int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads) in perf_evsel__alloc_id() argument 515 if (ncpus == 0 || nthreads == 0) in perf_evsel__alloc_id() 518 evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id)); in perf_evsel__alloc_id() 522 evsel->id = zalloc(ncpus * nthreads * sizeof(u64)); in perf_evsel__alloc_id()
|
/linux-6.6.21/fs/nfsd/ |
D | nfssvc.c | 714 int nfsd_get_nrthreads(int n, int *nthreads, struct net *net) in nfsd_get_nrthreads() argument 721 nthreads[i] = nn->nfsd_serv->sv_pools[i].sp_nrthreads; in nfsd_get_nrthreads() 727 int nfsd_set_nrthreads(int n, int *nthreads, struct net *net) in nfsd_set_nrthreads() argument 745 nthreads[i] = min(nthreads[i], NFSD_MAXSERVS); in nfsd_set_nrthreads() 746 tot += nthreads[i]; in nfsd_set_nrthreads() 751 int new = nthreads[i] * NFSD_MAXSERVS / tot; in nfsd_set_nrthreads() 752 tot -= (nthreads[i] - new); in nfsd_set_nrthreads() 753 nthreads[i] = new; in nfsd_set_nrthreads() 756 nthreads[i]--; in nfsd_set_nrthreads() 765 if (nthreads[0] == 0) in nfsd_set_nrthreads() [all …]
|
D | nfsctl.c | 448 int *nthreads; in write_pool_threads() local 464 nthreads = kcalloc(npools, sizeof(int), GFP_KERNEL); in write_pool_threads() 466 if (nthreads == NULL) in write_pool_threads() 471 rv = get_int(&mesg, &nthreads[i]); in write_pool_threads() 477 if (nthreads[i] < 0) in write_pool_threads() 479 trace_nfsd_ctl_pool_threads(net, i, nthreads[i]); in write_pool_threads() 481 rv = nfsd_set_nrthreads(i, nthreads, net); in write_pool_threads() 486 rv = nfsd_get_nrthreads(npools, nthreads, net); in write_pool_threads() 493 snprintf(mesg, size, "%d%c", nthreads[i], (i == npools-1 ? '\n' : ' ')); in write_pool_threads() 500 kfree(nthreads); in write_pool_threads()
|
/linux-6.6.21/tools/lib/perf/include/internal/ |
D | evsel.h | 82 int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads); 88 int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads);
|
/linux-6.6.21/tools/testing/selftests/filesystems/binderfs/ |
D | binderfs_test.c | 403 int i, j, k, nthreads; in TEST() local 453 nthreads = get_nprocs_conf(); in TEST() 454 if (nthreads > DEFAULT_THREADS) in TEST() 455 nthreads = DEFAULT_THREADS; in TEST() 460 for (i = 0; i < nthreads; i++) { in TEST()
|
/linux-6.6.21/arch/powerpc/kernel/ |
D | setup-common.c | 452 int nthreads = 1; in smp_setup_cpu_maps() local 485 nthreads = len / sizeof(int); in smp_setup_cpu_maps() 487 for (j = 0; j < nthreads && cpu < nr_cpu_ids; j++) { in smp_setup_cpu_maps() 513 nthreads = 1; in smp_setup_cpu_maps() 538 maxcpus *= nthreads; in smp_setup_cpu_maps() 564 cpu_init_thread_core_maps(nthreads); in smp_setup_cpu_maps()
|
D | prom.c | 332 int i, nthreads; in early_init_dt_scan_cpus() local 349 nthreads = len / sizeof(int); in early_init_dt_scan_cpus() 355 for (i = 0; i < nthreads; i++) { in early_init_dt_scan_cpus() 415 if (nthreads == 1) in early_init_dt_scan_cpus()
|