| /linux-6.15/drivers/soc/ti/ |
| H A D | knav_qmss_acc.c | 283 cmd->command, cmd->queue_mask, cmd->list_dma, in knav_acc_write() 289 writel_relaxed(cmd->queue_mask, &pdsp->acc_command->queue_mask); in knav_acc_write() 308 u32 queue_mask; in knav_acc_setup_cmd() local 313 queue_mask = BIT(range->num_queues) - 1; in knav_acc_setup_cmd() 317 queue_mask = 0; in knav_acc_setup_cmd() 322 cmd->queue_mask = queue_mask; in knav_acc_setup_cmd()
|
| H A D | knav_qmss.h | 89 u32 queue_mask; member
|
| /linux-6.15/drivers/gpu/drm/amd/amdkfd/ |
| H A D | kfd_packet_manager_vi.c | 137 packet->queue_mask_lo = lower_32_bits(res->queue_mask); in pm_set_resources_vi() 138 packet->queue_mask_hi = upper_32_bits(res->queue_mask); in pm_set_resources_vi()
|
| H A D | kfd_packet_manager_v9.c | 212 packet->queue_mask_lo = lower_32_bits(res->queue_mask); in pm_set_resources_v9() 213 packet->queue_mask_hi = upper_32_bits(res->queue_mask); in pm_set_resources_v9()
|
| H A D | kfd_device_queue_manager.c | 1703 res.queue_mask = 0; in set_sched_resources() 1719 if (WARN_ON(i >= (sizeof(res.queue_mask)*8))) { in set_sched_resources() 1724 res.queue_mask |= 1ull in set_sched_resources() 1734 res.vmid_mask, res.queue_mask); in set_sched_resources()
|
| H A D | kfd_priv.h | 637 uint64_t queue_mask; member
|
| /linux-6.15/Documentation/networking/device_drivers/ethernet/intel/ |
| H A D | idpf.rst | 121 # ethtool --per-queue <ethX> queue_mask 0xa --coalesce adaptive-rx off 126 # ethtool --per-queue <ethX> queue_mask 0xa --show-coalesce
|
| H A D | ice.rst | 1146 # ethtool --per-queue <ethX> queue_mask 0xa --coalesce adaptive-rx off 1151 # ethtool --per-queue <ethX> queue_mask 0xa --show-coalesce
|
| /linux-6.15/drivers/net/ethernet/marvell/ |
| H A D | mv643xx_eth.c | 2255 u8 queue_mask; in mv643xx_eth_poll() local 2266 queue_mask = mp->work_tx | mp->work_tx_end | mp->work_rx; in mv643xx_eth_poll() 2268 queue_mask |= mp->work_rx_refill; in mv643xx_eth_poll() 2270 if (!queue_mask) { in mv643xx_eth_poll() 2276 queue = fls(queue_mask) - 1; in mv643xx_eth_poll() 2277 queue_mask = 1 << queue; in mv643xx_eth_poll() 2283 if (mp->work_tx_end & queue_mask) { in mv643xx_eth_poll() 2285 } else if (mp->work_tx & queue_mask) { in mv643xx_eth_poll() 2288 } else if (mp->work_rx & queue_mask) { in mv643xx_eth_poll() 2290 } else if (!mp->oom && (mp->work_rx_refill & queue_mask)) { in mv643xx_eth_poll()
|
| /linux-6.15/net/ethtool/ |
| H A D | ioctl.c | 2845 DECLARE_BITMAP(queue_mask, MAX_NUM_QUEUE); in ethtool_get_per_queue_coalesce() 2852 bitmap_from_arr32(queue_mask, per_queue_opt->queue_mask, in ethtool_get_per_queue_coalesce() 2855 for_each_set_bit(bit, queue_mask, MAX_NUM_QUEUE) { in ethtool_get_per_queue_coalesce() 2878 DECLARE_BITMAP(queue_mask, MAX_NUM_QUEUE); in ethtool_set_per_queue_coalesce() 2886 bitmap_from_arr32(queue_mask, per_queue_opt->queue_mask, MAX_NUM_QUEUE); in ethtool_set_per_queue_coalesce() 2887 n_queue = bitmap_weight(queue_mask, MAX_NUM_QUEUE); in ethtool_set_per_queue_coalesce() 2892 for_each_set_bit(bit, queue_mask, MAX_NUM_QUEUE) { in ethtool_set_per_queue_coalesce() 2921 for_each_set_bit(i, queue_mask, bit) { in ethtool_set_per_queue_coalesce()
|
| /linux-6.15/drivers/gpu/drm/amd/amdgpu/ |
| H A D | amdgpu_gfx.c | 619 uint64_t queue_mask = ~0ULL; in amdgpu_gfx_mes_enable_kcq() local 633 kiq->pmf->kiq_set_resources(kiq_ring, queue_mask); in amdgpu_gfx_mes_enable_kcq() 657 uint64_t queue_mask = 0; in amdgpu_gfx_enable_kcq() local 673 if (WARN_ON(i > (sizeof(queue_mask)*8))) { in amdgpu_gfx_enable_kcq() 678 queue_mask |= (1ull << amdgpu_queue_mask_bit_to_set_resource_bit(adev, i)); in amdgpu_gfx_enable_kcq() 696 kiq->pmf->kiq_set_resources(kiq_ring, queue_mask); in amdgpu_gfx_enable_kcq()
|
| H A D | amdgpu_gfx.h | 131 uint64_t queue_mask);
|
| H A D | gfx_v8_0.c | 4329 uint64_t queue_mask = 0; in gfx_v8_0_kiq_kcq_enable() local 4339 if (WARN_ON(i >= (sizeof(queue_mask)*8))) { in gfx_v8_0_kiq_kcq_enable() 4344 queue_mask |= (1ull << i); in gfx_v8_0_kiq_kcq_enable() 4355 amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */ in gfx_v8_0_kiq_kcq_enable() 4356 amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */ in gfx_v8_0_kiq_kcq_enable()
|
| H A D | gfx_v9_4_3.c | 171 uint64_t queue_mask) in gfx_v9_4_3_kiq_set_resources() argument 185 lower_32_bits(queue_mask)); /* queue mask lo */ in gfx_v9_4_3_kiq_set_resources() 187 upper_32_bits(queue_mask)); /* queue mask hi */ in gfx_v9_4_3_kiq_set_resources()
|
| H A D | gfx_v12_0.c | 262 uint64_t queue_mask) in gfx_v12_0_kiq_set_resources() argument 267 amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */ in gfx_v12_0_kiq_set_resources() 268 amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */ in gfx_v12_0_kiq_set_resources()
|
| H A D | gfx_v11_0.c | 314 static void gfx11_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask) in gfx11_kiq_set_resources() argument 326 amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */ in gfx11_kiq_set_resources() 327 amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */ in gfx11_kiq_set_resources()
|
| H A D | gfx_v9_0.c | 901 uint64_t queue_mask) in gfx_v9_0_kiq_set_resources() argument 915 lower_32_bits(queue_mask)); /* queue mask lo */ in gfx_v9_0_kiq_set_resources() 917 upper_32_bits(queue_mask)); /* queue mask hi */ in gfx_v9_0_kiq_set_resources()
|
| H A D | gfx_v10_0.c | 3678 static void gfx10_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask) in gfx10_kiq_set_resources() argument 3689 amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */ in gfx10_kiq_set_resources() 3690 amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */ in gfx10_kiq_set_resources()
|
| /linux-6.15/drivers/gpu/drm/panthor/ |
| H A D | panthor_sched.c | 1291 u32 queue_mask = 0, i; in csg_slot_prog_locked() local 1311 queue_mask |= BIT(i); in csg_slot_prog_locked() 1337 panthor_fw_toggle_reqs(csg_iface, doorbell_req, doorbell_ack, queue_mask); in csg_slot_prog_locked() 2530 static void group_schedule_locked(struct panthor_group *group, u32 queue_mask) in group_schedule_locked() argument 2543 if ((queue_mask & group->blocked_queues) == queue_mask) in group_schedule_locked() 2547 group->idle_queues &= ~queue_mask; in group_schedule_locked()
|
| /linux-6.15/drivers/net/ethernet/cadence/ |
| H A D | macb_main.c | 4149 unsigned int *queue_mask, in macb_probe_queues() argument 4152 *queue_mask = 0x1; in macb_probe_queues() 4165 *queue_mask |= readl_relaxed(mem + GEM_DCFG6) & 0xff; in macb_probe_queues() 4166 *num_queues = hweight32(*queue_mask); in macb_probe_queues() 4285 if (!(bp->queue_mask & (1 << hw_q))) in macb_init() 5176 unsigned int queue_mask, num_queues; in macb_probe() local 5212 macb_probe_queues(mem, native_io, &queue_mask, &num_queues); in macb_probe() 5236 bp->queue_mask = queue_mask; in macb_probe()
|
| H A D | macb.h | 1269 unsigned int queue_mask; member
|
| /linux-6.15/net/sched/ |
| H A D | sch_taprio.c | 1423 u32 i, queue_mask = 0; in tc_map_to_queue_mask() local 1434 queue_mask |= GENMASK(offset + count - 1, offset); in tc_map_to_queue_mask() 1437 return queue_mask; in tc_map_to_queue_mask()
|
| /linux-6.15/include/uapi/linux/ |
| H A D | ethtool.h | 1780 __u32 queue_mask[__KERNEL_DIV_ROUND_UP(MAX_NUM_QUEUE, 32)]; member
|