| /linux-6.15/include/net/ |
| H A D | fq_impl.h | 36 idx = flow - fq->flows; in __fq_adjust_removal() 152 flow = &fq->flows[idx]; in fq_flow_classify() 160 tin->flows++; in fq_flow_classify() 173 struct fq_flow *cur = &fq->flows[i]; in fq_find_fattest_flow() 361 fq->flows = kvcalloc(fq->flows_cnt, sizeof(fq->flows[0]), GFP_KERNEL); in fq_init() 362 if (!fq->flows) in fq_init() 367 kvfree(fq->flows); in fq_init() 368 fq->flows = NULL; in fq_init() 373 fq_flow_init(&fq->flows[i]); in fq_init() 386 kvfree(fq->flows); in fq_reset() [all …]
|
| H A D | fq.h | 57 u32 flows; member 69 struct fq_flow *flows; member
|
| /linux-6.15/drivers/crypto/allwinner/sun8i-ss/ |
| H A D | sun8i-ss-core.c | 76 ss->flows[flow].stat_req++; in sun8i_ss_run_task() 133 ss->flows[flow].status = 0; in sun8i_ss_run_task() 159 ss->flows[flow].status = 1; in ss_irq_handler() 478 ss->flows[i].stat_req); in sun8i_ss_debugfs_show() 550 if (!ss->flows) in allocate_flows() 558 if (!ss->flows[i].biv) { in allocate_flows() 566 if (!ss->flows[i].iv[j]) { in allocate_flows() 575 if (!ss->flows[i].pad) { in allocate_flows() 579 ss->flows[i].result = in allocate_flows() 583 if (!ss->flows[i].result) { in allocate_flows() [all …]
|
| H A D | sun8i-ss-prng.c | 134 reinit_completion(&ss->flows[flow].complete); in sun8i_ss_prng_generate() 135 ss->flows[flow].status = 0; in sun8i_ss_prng_generate() 141 wait_for_completion_interruptible_timeout(&ss->flows[flow].complete, in sun8i_ss_prng_generate() 143 if (ss->flows[flow].status == 0) { in sun8i_ss_prng_generate()
|
| H A D | sun8i-ss-hash.c | 290 ss->flows[flow].stat_req++; in sun8i_ss_run_hash_task() 323 reinit_completion(&ss->flows[flow].complete); in sun8i_ss_run_hash_task() 324 ss->flows[flow].status = 0; in sun8i_ss_run_hash_task() 329 wait_for_completion_interruptible_timeout(&ss->flows[flow].complete, in sun8i_ss_run_hash_task() 331 if (ss->flows[flow].status == 0) { in sun8i_ss_run_hash_task() 407 engine = ss->flows[e].engine; in sun8i_ss_hash_digest() 505 result = ss->flows[rctx->flow].result; in sun8i_ss_hash_run() 506 pad = ss->flows[rctx->flow].pad; in sun8i_ss_hash_run()
|
| H A D | sun8i-ss-cipher.c | 130 struct sun8i_ss_flow *sf = &ss->flows[rctx->flow]; in sun8i_ss_setup_ivs() 191 struct sun8i_ss_flow *sf = &ss->flows[rctx->flow]; in sun8i_ss_cipher() 360 engine = op->ss->flows[e].engine; in sun8i_ss_skdecrypt() 381 engine = op->ss->flows[e].engine; in sun8i_ss_skencrypt()
|
| /linux-6.15/samples/bpf/ |
| H A D | do_hbm_test.sh | 78 flows=1 150 -f=*|--flows=*) 151 flows="${i#*=}" 278 while [ $flow_cnt -le $flows ] ; do 320 while [ $flow_cnt -le $flows ] ; do 346 iperf3 -c $host -p $port -i 0 -P $flows -f m -t $dur > iperf.$id 366 while [ $flow_cnt -le $flows ] ; do 386 while [ $flow_cnt -le $flows ] ; do
|
| /linux-6.15/net/sched/ |
| H A D | sch_fq_codel.c | 164 flow = &q->flows[idx]; in fq_codel_drop() 205 flow = &q->flows[idx]; in fq_codel_enqueue() 342 struct fq_codel_flow *flow = q->flows + i; in fq_codel_reset() 379 if (q->flows) in fq_codel_change() 464 kvfree(q->flows); in fq_codel_destroy() 496 if (!q->flows) { in fq_codel_init() 497 q->flows = kvcalloc(q->flows_cnt, in fq_codel_init() 500 if (!q->flows) { in fq_codel_init() 523 kvfree(q->flows); in fq_codel_init() 524 q->flows = NULL; in fq_codel_init() [all …]
|
| H A D | sch_fq_pie.c | 58 struct fq_pie_flow *flows; member 152 sel_flow = &q->flows[idx]; in fq_pie_qdisc_enqueue() 308 if (q->flows) { in fq_pie_change() 401 &q->flows[q->flows_cursor].vars, in fq_pie_timer() 402 q->flows[q->flows_cursor].backlog); in fq_pie_timer() 449 q->flows = kvcalloc(q->flows_cnt, sizeof(struct fq_pie_flow), in fq_pie_init() 451 if (!q->flows) { in fq_pie_init() 456 struct fq_pie_flow *flow = q->flows + idx; in fq_pie_init() 541 struct fq_pie_flow *flow = q->flows + idx; in fq_pie_reset() 559 kvfree(q->flows); in fq_pie_destroy()
|
| H A D | sch_cake.c | 151 struct cake_flow flows[CAKE_QUEUES]; member 787 q->flows[reduced_hash].set)) { in cake_hash() 805 if (!q->flows[outer_hash + k].set) { in cake_hash() 820 if (!q->flows[outer_hash + k].set) { in cake_hash() 862 q->flows[reduced_hash].srchost = srchost_idx; in cake_hash() 1572 flow = &b->flows[idx]; in cake_drop() 1761 flow = &b->flows[idx]; in cake_enqueue() 2079 q->cur_flow = flow - b->flows; in cake_dequeue() 2765 struct cake_flow *flow = b->flows + j; in cake_init() 3012 flow = &b->flows[idx % CAKE_QUEUES]; in cake_dump_class_stats() [all …]
|
| H A D | sch_fq.c | 143 u32 flows; member 295 q->flows -= fcnt; in fq_gc() 327 if (q->flows != q->inactive_flows + q->throttled_flows) in fq_fastpath_check() 458 q->flows++; in fq_classify() 831 q->flows = 0; in fq_reset() 876 q->flows -= fcnt; in fq_rehash() 1301 st.flows = q->flows; in fq_dump_stats()
|
| /linux-6.15/drivers/dma/ti/ |
| H A D | k3-udma-glue.c | 84 struct k3_udma_glue_rx_flow *flows; member 1036 rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num, in k3_udma_glue_request_rx_chn_priv() 1037 sizeof(*rx_chn->flows), GFP_KERNEL); in k3_udma_glue_request_rx_chn_priv() 1038 if (!rx_chn->flows) { in k3_udma_glue_request_rx_chn_priv() 1048 rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i; in k3_udma_glue_request_rx_chn_priv() 1088 rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num, in k3_udma_glue_request_remote_rx_chn_common() 1089 sizeof(*rx_chn->flows), GFP_KERNEL); in k3_udma_glue_request_remote_rx_chn_common() 1090 if (!rx_chn->flows) in k3_udma_glue_request_remote_rx_chn_common() 1120 rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i; in k3_udma_glue_request_remote_rx_chn_common() 1284 flow = &rx_chn->flows[flow_idx]; in k3_udma_glue_rx_flow_get_fdq_id() [all …]
|
| /linux-6.15/drivers/media/platform/amphion/ |
| H A D | vpu_dbg.c | 207 for (i = 0; i < ARRAY_SIZE(inst->flows); i++) { in vpu_dbg_instance() 208 u32 idx = (inst->flow_idx + i) % (ARRAY_SIZE(inst->flows)); in vpu_dbg_instance() 210 if (!inst->flows[idx]) in vpu_dbg_instance() 213 inst->flows[idx] >= VPU_MSG_ID_NOOP ? "M" : "C", in vpu_dbg_instance() 214 vpu_id_name(inst->flows[idx])); in vpu_dbg_instance() 506 inst->flows[inst->flow_idx] = flow; in vpu_inst_record_flow() 507 inst->flow_idx = (inst->flow_idx + 1) % (ARRAY_SIZE(inst->flows)); in vpu_inst_record_flow()
|
| /linux-6.15/drivers/infiniband/hw/hfi1/ |
| H A D | tid_rdma.c | 1612 kfree(req->flows); in hfi1_kern_exp_rcv_free_flows() 1613 req->flows = NULL; in hfi1_kern_exp_rcv_free_flows() 1637 if (likely(req->flows)) in hfi1_kern_exp_rcv_alloc_flows() 1639 flows = kmalloc_node(MAX_FLOWS * sizeof(*flows), gfp, in hfi1_kern_exp_rcv_alloc_flows() 1641 if (!flows) in hfi1_kern_exp_rcv_alloc_flows() 1645 flows[i].req = req; in hfi1_kern_exp_rcv_alloc_flows() 1646 flows[i].npagesets = 0; in hfi1_kern_exp_rcv_alloc_flows() 1650 req->flows = flows; in hfi1_kern_exp_rcv_alloc_flows() 1691 flow = &req->flows[tail]; in find_flow_ib() 3070 flow = &req->flows[fidx]; in hfi1_tid_rdma_restart_req() [all …]
|
| /linux-6.15/Documentation/networking/ |
| H A D | nf_flowtable.rst | 33 specifies what flows are placed into the flowtable. Hence, packets follow the 34 classic IP forwarding path unless the user explicitly instruct flows to use this 111 You can identify offloaded flows through the [OFFLOAD] tag when listing your 130 instead the real device is sufficient for the flowtable to track your flows. 198 There is a workqueue that adds the flows to the hardware. Note that a few 202 You can identify hardware offloaded flows through the [HW_OFFLOAD] tag when
|
| H A D | scaling.rst | 31 of logical flows. Packets for each flow are steered to a separate receive 50 applications that monitor TCP/IP flows (IDS, firewalls, ...etc) and need 252 to the same CPU is CPU load imbalance if flows vary in packet rate. 258 Flow Limit is an optional RPS feature that prioritizes small flows 259 during CPU contention by dropping packets from large flows slightly 260 ahead of those from small flows. It is active only when an RPS or RFS 266 new packet is dropped. Packets from other flows are still only 270 even large flows maintain connectivity. 288 identification of large flows and fewer false positives. The default 325 flows to the CPUs where those flows are being processed. The flow hash [all …]
|
| H A D | iou-zcrx.rst | 34 typically distribute flows across all HW Rx queues. Flow steering is required 35 to ensure that only desired flows are directed towards HW queues that are 42 copy flows away from queues that are configured for io_uring ZC Rx.
|
| H A D | openvswitch.rst | 16 table" that userspace populates with "flows" that map from keys based 104 A wildcarded flow can represent a group of exact match flows. Each '1' bit 108 by reduce the number of new flows need to be processed by the user space program. 120 two possible approaches: reactively install flows as they miss the kernel 130 The behavior when using overlapping wildcarded flows is undefined. It is the 133 performs best-effort detection of overlapping wildcarded flows and may reject 146 future operations. The kernel is not required to index flows by the original
|
| /linux-6.15/Documentation/admin-guide/pm/ |
| H A D | system-wide.rst | 11 suspend-flows
|
| /linux-6.15/drivers/net/ethernet/mellanox/mlx5/core/ |
| H A D | en_rep.h | 183 struct list_head flows; member 208 struct list_head flows; member
|
| H A D | eswitch_offloads.c | 1206 flows = kvcalloc(nvports, sizeof(*flows), GFP_KERNEL); in esw_add_fdb_peer_miss_rules() 1207 if (!flows) { in esw_add_fdb_peer_miss_rules() 1227 flows[vport->index] = flow; in esw_add_fdb_peer_miss_rules() 1239 flows[vport->index] = flow; in esw_add_fdb_peer_miss_rules() 1253 flows[vport->index] = flow; in esw_add_fdb_peer_miss_rules() 1286 if (!flows[vport->index]) in esw_add_fdb_peer_miss_rules() 1292 if (!flows[vport->index]) in esw_add_fdb_peer_miss_rules() 1307 kvfree(flows); in esw_add_fdb_peer_miss_rules() 1322 if (!flows) in esw_del_fdb_peer_miss_rules() 1330 if (!flows[vport->index]) in esw_del_fdb_peer_miss_rules() [all …]
|
| /linux-6.15/Documentation/userspace-api/media/mediactl/ |
| H A D | media-controller-model.rst | 26 by an entity flows from the entity's output to one or more entity 31 pads, either on the same entity or on different entities. Data flows
|
| /linux-6.15/net/mctp/test/ |
| H A D | route-test.c | 1116 struct mctp_flow *flows[2]; in mctp_test_fragment_flow() local 1140 flows[0] = skb_ext_find(tx_skbs[0], SKB_EXT_MCTP); in mctp_test_fragment_flow() 1141 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, flows[0]); in mctp_test_fragment_flow() 1142 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, flows[0]->key); in mctp_test_fragment_flow() 1143 KUNIT_ASSERT_PTR_EQ(test, flows[0]->key->sk, sock->sk); in mctp_test_fragment_flow() 1145 flows[1] = skb_ext_find(tx_skbs[1], SKB_EXT_MCTP); in mctp_test_fragment_flow() 1146 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, flows[1]); in mctp_test_fragment_flow() 1147 KUNIT_ASSERT_PTR_EQ(test, flows[1]->key, flows[0]->key); in mctp_test_fragment_flow()
|
| /linux-6.15/net/core/ |
| H A D | pktgen.c | 414 struct flow_state *flows; member 2378 pkt_dev->flows[flow].count = 0; in f_pick() 2379 pkt_dev->flows[flow].flags = 0; in f_pick() 2389 pkt_dev->flows[flow].count = 0; in f_pick() 2390 pkt_dev->flows[flow].flags = 0; in f_pick() 2424 pkt_dev->flows[flow].x = x; in get_ipsec_sa() 2652 pkt_dev->flows[flow].count++; in mod_cur_headers() 2736 pkt_dev->flows[i].x = NULL; in free_SAs() 3833 if (pkt_dev->flows == NULL) { in pktgen_add_device() 3894 vfree(pkt_dev->flows); in pktgen_add_device() [all …]
|
| /linux-6.15/Documentation/admin-guide/blockdev/drbd/ |
| H A D | figures.rst | 5 Data flows that Relate some functions, and write packets
|