| /linux-6.15/Documentation/translations/zh_CN/core-api/ |
| H A D | workqueue.rst | 577 pool[00] ref= 1 nice= 0 idle/workers= 4/ 4 cpu= 0 578 pool[01] ref= 1 nice=-20 idle/workers= 2/ 2 cpu= 0 579 pool[02] ref= 1 nice= 0 idle/workers= 4/ 4 cpu= 1 580 pool[03] ref= 1 nice=-20 idle/workers= 2/ 2 cpu= 1 581 pool[04] ref= 1 nice= 0 idle/workers= 4/ 4 cpu= 2 582 pool[05] ref= 1 nice=-20 idle/workers= 2/ 2 cpu= 2 583 pool[06] ref= 1 nice= 0 idle/workers= 3/ 3 cpu= 3 584 pool[07] ref= 1 nice=-20 idle/workers= 2/ 2 cpu= 3 585 pool[08] ref=42 nice= 0 idle/workers= 6/ 6 cpus=0000000f 586 pool[09] ref=28 nice= 0 idle/workers= 3/ 3 cpus=00000003 [all …]
|
| /linux-6.15/Documentation/core-api/ |
| H A D | workqueue.rst | 188 worker-pools which host workers which are not bound to any 219 each other. Each maintains its separate pool of workers and 220 implements concurrency management among its workers. 655 pool[00] ref= 1 nice= 0 idle/workers= 4/ 4 cpu= 0 656 pool[01] ref= 1 nice=-20 idle/workers= 2/ 2 cpu= 0 657 pool[02] ref= 1 nice= 0 idle/workers= 4/ 4 cpu= 1 658 pool[03] ref= 1 nice=-20 idle/workers= 2/ 2 cpu= 1 659 pool[04] ref= 1 nice= 0 idle/workers= 4/ 4 cpu= 2 660 pool[05] ref= 1 nice=-20 idle/workers= 2/ 2 cpu= 2 661 pool[06] ref= 1 nice= 0 idle/workers= 3/ 3 cpu= 3 [all …]
|
| /linux-6.15/drivers/gpu/drm/xe/ |
| H A D | xe_gt_sriov_pf.c | 50 INIT_WORK(>->sriov.pf.workers.restart, pf_worker_restart_func); in pf_init_workers() 189 struct xe_gt *gt = container_of(w, typeof(*gt), sriov.pf.workers.restart); in pf_worker_restart_func() 200 if (!queue_work(xe->sriov.wq, >->sriov.pf.workers.restart)) in pf_queue_restart()
|
| H A D | xe_gt_sriov_pf_types.h | 57 struct xe_gt_sriov_pf_workers workers; member
|
| /linux-6.15/tools/testing/selftests/bpf/ |
| H A D | test_progs.c | 534 if (verbose() && !env.workers) in test__end_subtest() 1086 env->workers = atoi(arg); in parse_arg() 1087 if (!env->workers) { in parse_arg() 1092 env->workers = get_nprocs(); in parse_arg() 1303 for (i = 0; i < env.workers; i++) in sigint_handler() 1685 for (i = 0; i < env.workers; i++) { in server_main() 1698 for (i = 0; i < env.workers; i++) { in server_main() 1739 for (i = 0; i < env.workers; i++) { in server_main() 2008 env.workers = 0; in main() 2012 if (env.workers) { in main() [all …]
|
| H A D | test_progs.h | 129 int workers; /* number of worker process */ member
|
| /linux-6.15/fs/erofs/ |
| H A D | Kconfig | 164 bool "EROFS per-cpu decompression kthread workers" 167 Saying Y here enables per-CPU kthread workers pool to carry out 173 bool "EROFS high priority per-CPU kthread workers" 177 This permits EROFS to configure per-CPU kthread workers to run
|
| /linux-6.15/Documentation/devicetree/bindings/media/ |
| H A D | mediatek,vcodec-subdev-decoder.yaml | 46 Its workers take input bitstream and LAT buffer, enable the hardware for 50 Its workers take LAT buffer and output buffer, enable the hardware for
|
| /linux-6.15/drivers/md/ |
| H A D | raid5.h | 514 struct r5worker *workers; member
|
| H A D | raid5.c | 205 group->workers[0].working = true; in raid5_wakeup_stripe_thread() 212 if (group->workers[i].working == false) { in raid5_wakeup_stripe_thread() 213 group->workers[i].working = true; in raid5_wakeup_stripe_thread() 215 &group->workers[i].work); in raid5_wakeup_stripe_thread() 7197 kfree(old_groups[0].workers); in raid5_store_group_thread_cnt() 7235 struct r5worker *workers; in alloc_thread_groups() local 7244 workers = kcalloc(size, *group_cnt, GFP_NOIO); in alloc_thread_groups() 7247 if (!*worker_groups || !workers) { in alloc_thread_groups() 7248 kfree(workers); in alloc_thread_groups() 7260 group->workers = workers + i * cnt; in alloc_thread_groups() [all …]
|
| /linux-6.15/net/l2tp/ |
| H A D | Kconfig | 23 with home workers to connect to their offices.
|
| /linux-6.15/drivers/block/mtip32xx/ |
| H A D | mtip32xx.c | 733 int do_irq_enable = 1, i, workers; in mtip_handle_irq() local 754 for (i = 0, workers = 0; i < MTIP_MAX_SLOT_GROUPS; in mtip_handle_irq() 759 workers++; in mtip_handle_irq() 762 atomic_set(&dd->irq_workers_active, workers); in mtip_handle_irq() 763 if (workers) { in mtip_handle_irq()
|
| /linux-6.15/fs/btrfs/ |
| H A D | fs.h | 619 struct btrfs_workqueue *workers; member
|
| H A D | bio.c | 643 btrfs_queue_work(fs_info->workers, &async->work); in btrfs_wq_submit_bio()
|
| H A D | disk-io.c | 1777 btrfs_destroy_workqueue(fs_info->workers); in btrfs_stop_all_workers() 1968 fs_info->workers = in btrfs_init_workqueues() 2007 if (!(fs_info->workers && in btrfs_init_workqueues() 4363 btrfs_flush_workqueue(fs_info->workers); in close_ctree()
|
| H A D | super.c | 1238 btrfs_workqueue_set_max(fs_info->workers, new_pool_size); in btrfs_resize_thread_pool()
|
| /linux-6.15/Documentation/admin-guide/ |
| H A D | workload-tracing.rst | 126 starts specified number (N) of workers that exercise various netdevice 264 The netdev stressor starts N workers that exercise various netdevice ioctl
|
| H A D | kernel-per-CPU-kthreads.rst | 258 c. As of v3.18, Christoph Lameter's on-demand vmstat workers
|
| /linux-6.15/kernel/ |
| H A D | workqueue.c | 218 struct list_head workers; /* A: attached workers */ member 582 list_for_each_entry((worker), &(pool)->workers, node) \ 2695 list_add_tail(&worker->node, &pool->workers); in worker_attach_to_pool() 3606 bh_worker(list_first_entry(&pool->workers, struct worker, node)); in workqueue_softirq_action() 3633 bh_worker(list_first_entry(&pool->workers, struct worker, node)); in drain_dead_softirq_workfn() 4780 INIT_LIST_HEAD(&pool->workers); in init_worker_pool()
|
| /linux-6.15/Documentation/dev-tools/ |
| H A D | kcov.rst | 248 exits (e.g. vhost workers).
|
| /linux-6.15/Documentation/filesystems/xfs/ |
| H A D | xfs-online-fsck-design.rst | 5085 Each inode btree chunk found by the first workqueue's workers are queued to the 5090 first workqueue's workers until the backlog eases. 5137 1. Start a round of repair with a workqueue and enough workers to keep the CPUs
|