Home
last modified time | relevance | path

Searched refs:workers (Results 1 – 13 of 13) sorted by relevance

/linux-6.1.9/tools/testing/selftests/bpf/
Dtest_progs.c375 if (verbose() && !env.workers) in test__end_subtest()
853 env->workers = atoi(arg); in parse_arg()
854 if (!env->workers) { in parse_arg()
859 env->workers = get_nprocs(); in parse_arg()
987 for (i = 0; i < env.workers; i++) in sigint_handler()
1324 dispatcher_threads = calloc(sizeof(pthread_t), env.workers); in server_main()
1325 data = calloc(sizeof(struct dispatch_data), env.workers); in server_main()
1327 env.worker_current_test = calloc(sizeof(int), env.workers); in server_main()
1328 for (i = 0; i < env.workers; i++) { in server_main()
1341 for (i = 0; i < env.workers; i++) { in server_main()
[all …]
Dtest_progs.h121 int workers; /* number of worker process */ member
/linux-6.1.9/Documentation/core-api/
Dworkqueue.rst34 number of workers as the number of CPUs. The kernel grew a lot of MT
118 number of the currently runnable workers. Generally, work items are
122 workers on the CPU, the worker-pool doesn't start execution of a new
125 are pending work items. This allows using a minimal number of workers
128 Keeping idle workers around doesn't cost other than the memory space
140 Forward progress guarantee relies on that workers can be created when
142 through the use of rescue workers. All work items which might be used
169 worker-pools which host workers which are not bound to any
178 of mostly unused workers across different CPUs as the issuer
200 each other. Each maintains its separate pool of workers and
[all …]
/linux-6.1.9/net/l2tp/
DKconfig23 with home workers to connect to their offices.
/linux-6.1.9/drivers/md/
Draid5.h518 struct r5worker *workers; member
Draid5.c204 group->workers[0].working = true; in raid5_wakeup_stripe_thread()
206 queue_work_on(sh->cpu, raid5_wq, &group->workers[0].work); in raid5_wakeup_stripe_thread()
211 if (group->workers[i].working == false) { in raid5_wakeup_stripe_thread()
212 group->workers[i].working = true; in raid5_wakeup_stripe_thread()
214 &group->workers[i].work); in raid5_wakeup_stripe_thread()
7197 kfree(old_groups[0].workers); in raid5_store_group_thread_cnt()
7234 struct r5worker *workers; in alloc_thread_groups() local
7243 workers = kcalloc(size, *group_cnt, GFP_NOIO); in alloc_thread_groups()
7246 if (!*worker_groups || !workers) { in alloc_thread_groups()
7247 kfree(workers); in alloc_thread_groups()
[all …]
/linux-6.1.9/kernel/
Dworkqueue.c179 struct list_head workers; /* A: attached workers */ member
428 list_for_each_entry((worker), &(pool)->workers, node) \
1878 list_add_tail(&worker->node, &pool->workers); in worker_attach_to_pool()
1903 if (list_empty(&pool->workers)) in worker_detach_from_pool()
3468 INIT_LIST_HEAD(&pool->workers); in init_worker_pool()
3603 if (!list_empty(&pool->workers)) in put_unbound_pool()
/linux-6.1.9/drivers/block/mtip32xx/
Dmtip32xx.c733 int do_irq_enable = 1, i, workers; in mtip_handle_irq() local
754 for (i = 0, workers = 0; i < MTIP_MAX_SLOT_GROUPS; in mtip_handle_irq()
759 workers++; in mtip_handle_irq()
762 atomic_set(&dd->irq_workers_active, workers); in mtip_handle_irq()
763 if (workers) { in mtip_handle_irq()
/linux-6.1.9/Documentation/dev-tools/
Dkcov.rst223 some kernel interface (e.g. vhost workers); as well as from soft
/linux-6.1.9/fs/btrfs/
Ddisk-io.c715 btrfs_queue_work(fs_info->workers, &async->work); in btrfs_wq_submit_bio()
2091 btrfs_destroy_workqueue(fs_info->workers); in btrfs_stop_all_workers()
2277 fs_info->workers = in btrfs_init_workqueues()
2320 if (!(fs_info->workers && fs_info->hipri_workers && in btrfs_init_workqueues()
Dsuper.c1938 btrfs_workqueue_set_max(fs_info->workers, new_pool_size); in btrfs_resize_thread_pool()
Dctree.h822 struct btrfs_workqueue *workers; member
/linux-6.1.9/Documentation/admin-guide/
Dkernel-per-CPU-kthreads.rst262 d. As of v3.18, Christoph Lameter's on-demand vmstat workers