| /linux-6.15/drivers/net/ethernet/intel/ice/ |
| H A D | ice_base.c | 109 q_vector = kzalloc(sizeof(*q_vector), GFP_KERNEL); in ice_vsi_alloc_q_vector() 110 if (!q_vector) in ice_vsi_alloc_q_vector() 147 q_vector->reg_idx = q_vector->irq.index; in ice_vsi_alloc_q_vector() 148 q_vector->vf_reg_idx = q_vector->irq.index; in ice_vsi_alloc_q_vector() 165 kfree(q_vector); in ice_vsi_alloc_q_vector() 839 tx_ring->q_vector = q_vector; in ice_vsi_map_rings_to_vectors() 856 rx_ring->q_vector = q_vector; in ice_vsi_map_rings_to_vectors() 1037 ice_write_itr(&q_vector->rx, q_vector->rx.itr_setting); in ice_cfg_itr() 1040 ice_write_itr(&q_vector->tx, q_vector->tx.itr_setting); in ice_cfg_itr() 1042 ice_write_intrl(q_vector, q_vector->intrl); in ice_cfg_itr() [all …]
|
| H A D | ice_trace.h | 64 TP_PROTO(struct ice_q_vector *q_vector, struct dim *dim), 65 TP_ARGS(q_vector, dim), 66 TP_STRUCT__entry(__field(struct ice_q_vector *, q_vector) 70 TP_fast_assign(__entry->q_vector = q_vector; 76 __entry->q_vector->rx.rx_ring->q_index, 86 TP_PROTO(struct ice_q_vector *q_vector, struct dim *dim), 87 TP_ARGS(q_vector, dim) 92 TP_ARGS(q_vector, dim), 97 TP_fast_assign(__entry->q_vector = q_vector; 103 __entry->q_vector->tx.tx_ring->q_index, [all …]
|
| H A D | ice_xsk.c | 71 if (!vsi->netdev || !q_vector) in ice_qvec_toggle_napi() 75 napi_enable(&q_vector->napi); in ice_qvec_toggle_napi() 77 napi_disable(&q_vector->napi); in ice_qvec_toggle_napi() 88 struct ice_q_vector *q_vector) in ice_qvec_dis_irq() argument 103 if (q_vector) { in ice_qvec_dis_irq() 124 ice_cfg_itr(hw, q_vector); in ice_qvec_cfg_msix() 166 struct ice_q_vector *q_vector; in ice_qp_dis() local 177 q_vector = rx_ring->q_vector; in ice_qp_dis() 217 struct ice_q_vector *q_vector; in ice_qp_ena() local 240 q_vector = vsi->rx_rings[q_idx]->q_vector; in ice_qp_ena() [all …]
|
| H A D | ice_lib.c | 484 if (!q_vector->tx.tx_ring) in ice_msix_clean_ctrl_vsi() 503 if (!q_vector->tx.tx_ring && !q_vector->rx.rx_ring) in ice_msix_clean_rings() 506 q_vector->total_events++; in ice_msix_clean_rings() 508 napi_schedule(&q_vector->napi); in ice_msix_clean_rings() 1357 if (q_vector) { in ice_vsi_clear_rings() 1872 struct ice_q_vector *q_vector; in ice_write_itr() local 1875 if (!q_vector) in ice_write_itr() 1893 if (ITR_IS_DYNAMIC(&q_vector->tx) || ITR_IS_DYNAMIC(&q_vector->rx)) { in ice_set_q_vector_intrl() 1902 ice_write_intrl(q_vector, q_vector->intrl); in ice_set_q_vector_intrl() 1924 ice_cfg_itr(hw, q_vector); in ice_vsi_cfg_msix() [all …]
|
| H A D | ice_txrx.c | 1453 struct ice_vsi *vsi = q_vector->vsi; in ice_enable_interrupt() 1454 bool wb_en = q_vector->wb_on_itr; in ice_enable_interrupt() 1468 q_vector->wb_on_itr = false; in ice_enable_interrupt() 1500 struct ice_vsi *vsi = q_vector->vsi; in ice_set_wb_on_itr() 1503 if (q_vector->wb_on_itr) in ice_set_wb_on_itr() 1515 q_vector->wb_on_itr = true; in ice_set_wb_on_itr() 1529 struct ice_q_vector *q_vector = in ice_napi_poll() local 1592 ice_set_wb_on_itr(q_vector); in ice_napi_poll() 1600 ice_net_dim(q_vector); in ice_napi_poll() 1601 ice_enable_interrupt(q_vector); in ice_napi_poll() [all …]
|
| /linux-6.15/drivers/net/ethernet/intel/fm10k/ |
| H A D | fm10k_debugfs.c | 116 struct fm10k_q_vector *q_vector = ring->q_vector; in fm10k_dbg_desc_open() local 120 if (ring < q_vector->rx.ring) in fm10k_dbg_desc_open() 152 struct fm10k_intfc *interface = q_vector->interface; in fm10k_dbg_q_vector_init() 165 for (i = 0; i < q_vector->tx.count; i++) { in fm10k_dbg_q_vector_init() 166 struct fm10k_ring *ring = &q_vector->tx.ring[i]; in fm10k_dbg_q_vector_init() 171 q_vector->dbg_q_vector, ring, in fm10k_dbg_q_vector_init() 176 for (i = 0; i < q_vector->rx.count; i++) { in fm10k_dbg_q_vector_init() 177 struct fm10k_ring *ring = &q_vector->rx.ring[i]; in fm10k_dbg_q_vector_init() 182 q_vector->dbg_q_vector, ring, in fm10k_dbg_q_vector_init() 196 debugfs_remove_recursive(q_vector->dbg_q_vector); in fm10k_dbg_q_vector_exit() [all …]
|
| H A D | fm10k_main.c | 1600 q_vector = kzalloc(struct_size(q_vector, ring, ring_count), GFP_KERNEL); in fm10k_alloc_q_vector() 1601 if (!q_vector) in fm10k_alloc_q_vector() 1608 interface->q_vector[v_idx] = q_vector; in fm10k_alloc_q_vector() 1610 q_vector->v_idx = v_idx; in fm10k_alloc_q_vector() 1613 ring = q_vector->ring; in fm10k_alloc_q_vector() 1616 q_vector->tx.ring = ring; in fm10k_alloc_q_vector() 1628 ring->q_vector = q_vector; in fm10k_alloc_q_vector() 1646 q_vector->rx.ring = ring; in fm10k_alloc_q_vector() 1658 ring->q_vector = q_vector; in fm10k_alloc_q_vector() 1691 struct fm10k_q_vector *q_vector = interface->q_vector[v_idx]; in fm10k_free_q_vector() local [all …]
|
| H A D | fm10k_pci.c | 1178 q_vector = interface->q_vector[q_idx]; in fm10k_napi_enable_all() 1187 if (q_vector->rx.count || q_vector->tx.count) in fm10k_msix_clean_rings() 1732 q_vector = interface->q_vector[vector]; in fm10k_qv_free_irq() 1734 if (!q_vector->tx.count && !q_vector->rx.count) in fm10k_qv_free_irq() 1768 if (q_vector->tx.count && q_vector->rx.count) { in fm10k_qv_request_irq() 1769 snprintf(q_vector->name, sizeof(q_vector->name), in fm10k_qv_request_irq() 1773 snprintf(q_vector->name, sizeof(q_vector->name), in fm10k_qv_request_irq() 1790 q_vector->name, q_vector); in fm10k_qv_request_irq() 1816 q_vector = interface->q_vector[vector]; in fm10k_qv_request_irq() 1818 if (!q_vector->tx.count && !q_vector->rx.count) in fm10k_qv_request_irq() [all …]
|
| /linux-6.15/drivers/net/ethernet/wangxun/txgbe/ |
| H A D | txgbe_irq.c | 44 struct wx_q_vector *q_vector = wx->q_vector[vector]; in txgbe_request_queue_irqs() local 47 if (q_vector->tx.ring && q_vector->rx.ring) in txgbe_request_queue_irqs() 48 snprintf(q_vector->name, sizeof(q_vector->name) - 1, in txgbe_request_queue_irqs() 55 q_vector->name, q_vector); in txgbe_request_queue_irqs() 58 q_vector->name, err); in txgbe_request_queue_irqs() 69 wx->q_vector[vector]); in txgbe_request_queue_irqs() 107 struct wx_q_vector *q_vector; in txgbe_misc_irq_handle() local 129 q_vector = wx->q_vector[0]; in txgbe_misc_irq_handle() 130 napi_schedule_irqoff(&q_vector->napi); in txgbe_misc_irq_handle()
|
| /linux-6.15/drivers/net/ethernet/intel/iavf/ |
| H A D | iavf_txrx.c | 557 itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr); in iavf_update_itr() 840 struct iavf_q_vector *q_vector = rx_ring->q_vector; in iavf_receive_skb() local 1541 iavf_update_itr(q_vector, &q_vector->tx); in iavf_update_enable_itr() 1542 iavf_update_itr(q_vector, &q_vector->rx); in iavf_update_enable_itr() 1552 if (q_vector->rx.target_itr < q_vector->rx.current_itr) { in iavf_update_enable_itr() 1556 q_vector->rx.current_itr = q_vector->rx.target_itr; in iavf_update_enable_itr() 1559 ((q_vector->rx.target_itr - q_vector->rx.current_itr) < in iavf_update_enable_itr() 1560 (q_vector->tx.target_itr - q_vector->tx.current_itr))) { in iavf_update_enable_itr() 1566 q_vector->tx.current_itr = q_vector->tx.target_itr; in iavf_update_enable_itr() 1568 } else if (q_vector->rx.current_itr != q_vector->rx.target_itr) { in iavf_update_enable_itr() [all …]
|
| H A D | iavf_main.c | 438 if (!q_vector->tx.ring && !q_vector->rx.ring) in iavf_msix_clean_rings() 459 rx_ring->q_vector = q_vector; in iavf_map_vector_to_rxq() 469 q_vector->rx.current_itr = q_vector->rx.target_itr; in iavf_map_vector_to_rxq() 485 tx_ring->q_vector = q_vector; in iavf_map_vector_to_txq() 495 q_vector->tx.current_itr = q_vector->tx.target_itr; in iavf_map_vector_to_txq() 582 if (q_vector->tx.ring && q_vector->rx.ring) { in iavf_request_traffic_irqs() 583 snprintf(q_vector->name, sizeof(q_vector->name), in iavf_request_traffic_irqs() 587 snprintf(q_vector->name, sizeof(q_vector->name), in iavf_request_traffic_irqs() 590 snprintf(q_vector->name, sizeof(q_vector->name), in iavf_request_traffic_irqs() 599 q_vector->name, in iavf_request_traffic_irqs() [all …]
|
| /linux-6.15/drivers/net/ethernet/intel/ixgbe/ |
| H A D | ixgbe_lib.c | 859 q_vector = kzalloc_node(struct_size(q_vector, ring, ring_count), in ixgbe_alloc_q_vector() 861 if (!q_vector) in ixgbe_alloc_q_vector() 862 q_vector = kzalloc(struct_size(q_vector, ring, ring_count), in ixgbe_alloc_q_vector() 864 if (!q_vector) in ixgbe_alloc_q_vector() 874 q_vector->cpu = -1; in ixgbe_alloc_q_vector() 881 adapter->q_vector[v_idx] = q_vector; in ixgbe_alloc_q_vector() 910 ring = q_vector->ring; in ixgbe_alloc_q_vector() 918 ring->q_vector = q_vector; in ixgbe_alloc_q_vector() 944 ring->q_vector = q_vector; in ixgbe_alloc_q_vector() 972 ring->q_vector = q_vector; in ixgbe_alloc_q_vector() [all …]
|
| H A D | ixgbe_txrx_common.h | 24 void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector, 40 int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector, 44 bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector, 50 struct ixgbe_q_vector *q_vector, u64 pkts, 53 struct ixgbe_q_vector *q_vector, u64 pkts,
|
| H A D | ixgbe_xsk.c | 223 skb = napi_alloc_skb(&rx_ring->q_vector->napi, totalsize); in ixgbe_construct_skb_zc() 247 int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector, in ixgbe_clean_rx_irq_zc() argument 252 struct ixgbe_adapter *adapter = q_vector->adapter; in ixgbe_clean_rx_irq_zc() 349 ixgbe_rx_skb(q_vector, skb); in ixgbe_clean_rx_irq_zc() 361 ixgbe_update_rx_ring_stats(rx_ring, q_vector, total_rx_packets, in ixgbe_clean_rx_irq_zc() 456 bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector, in ixgbe_clean_xdp_tx_irq() argument 497 ixgbe_update_tx_ring_stats(tx_ring, q_vector, total_packets, in ixgbe_clean_xdp_tx_irq() 506 return ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit); in ixgbe_clean_xdp_tx_irq() 531 if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) { in ixgbe_xsk_wakeup() 532 u64 eics = BIT_ULL(ring->q_vector->v_idx); in ixgbe_xsk_wakeup()
|
| /linux-6.15/drivers/net/ethernet/intel/idpf/ |
| H A D | idpf_txrx.c | 3551 kfree(q_vector->tx); in idpf_vport_intr_rel() 3575 if (!q_vector) in idpf_vport_intr_rel_irq() 3685 idpf_update_dim_sample(q_vector, &dim_sample, &q_vector->tx_dim, in idpf_net_dim() 3704 idpf_update_dim_sample(q_vector, &dim_sample, &q_vector->rx_dim, in idpf_net_dim() 3749 if (q_vector->num_rxq && q_vector->num_txq) in idpf_vport_intr_req_irq() 4122 q->q_vector->tx[q->q_vector->num_txq++] = q; in idpf_vport_intr_map_vector_to_qs() 4129 q->q_vector->complq[q->q_vector->num_complq++] = q; in idpf_vport_intr_map_vector_to_qs() 4238 q_vector->tx = kcalloc(txqs_per_vector, sizeof(*q_vector->tx), in idpf_vport_intr_alloc() 4240 if (!q_vector->tx) in idpf_vport_intr_alloc() 4243 q_vector->rx = kcalloc(rxqs_per_vector, sizeof(*q_vector->rx), in idpf_vport_intr_alloc() [all …]
|
| H A D | idpf_txrx.h | 554 struct idpf_q_vector *q_vector; member 678 struct idpf_q_vector *q_vector; member 735 struct idpf_q_vector *q_vector; member 788 struct idpf_q_vector *q_vector; member 915 static inline int idpf_q_vector_to_mem(const struct idpf_q_vector *q_vector) in idpf_q_vector_to_mem() argument 919 if (!q_vector) in idpf_q_vector_to_mem() 922 cpu = cpumask_first(&q_vector->napi.config->affinity_mask); in idpf_q_vector_to_mem() 990 if (q_vector->wb_on_itr) in idpf_vport_intr_set_wb_on_itr() 993 q_vector->wb_on_itr = true; in idpf_vport_intr_set_wb_on_itr() 994 reg = &q_vector->intr_reg; in idpf_vport_intr_set_wb_on_itr() [all …]
|
| /linux-6.15/drivers/net/ethernet/intel/igc/ |
| H A D | igc_main.c | 4472 igc_update_itr(q_vector, &q_vector->tx); in igc_set_itr() 4473 igc_update_itr(q_vector, &q_vector->rx); in igc_set_itr() 4475 current_itr = max(q_vector->rx.itr, q_vector->tx.itr); in igc_set_itr() 4799 q_vector = adapter->q_vector[v_idx]; in igc_alloc_q_vector() 4812 adapter->q_vector[v_idx] = q_vector; in igc_alloc_q_vector() 4842 ring->q_vector = q_vector; in igc_alloc_q_vector() 4864 ring->q_vector = q_vector; in igc_alloc_q_vector() 5657 if (q_vector->rx.ring && q_vector->tx.ring) in igc_request_msix() 5958 struct igc_q_vector *q_vector = adapter->q_vector[0]; in igc_intr_msi() local 5995 struct igc_q_vector *q_vector = adapter->q_vector[0]; in igc_intr() local [all …]
|
| H A D | igc_xdp.c | 32 napi_disable(&adapter->rx_ring[i]->q_vector->napi); in igc_xdp_set_prog() 47 napi_enable(&adapter->rx_ring[i]->q_vector->napi); in igc_xdp_set_prog() 92 napi = &rx_ring->q_vector->napi; in igc_xdp_enable_pool() 138 napi = &rx_ring->q_vector->napi; in igc_xdp_disable_pool()
|
| /linux-6.15/drivers/net/ethernet/intel/ixgbevf/ |
| H A D | ixgbevf_main.c | 1363 q_vector = adapter->q_vector[v_idx]; in ixgbevf_configure_msix() 1471 ixgbevf_update_itr(q_vector, &q_vector->tx); in ixgbevf_set_itr() 1472 ixgbevf_update_itr(q_vector, &q_vector->rx); in ixgbevf_set_itr() 1527 if (q_vector->rx.ring || q_vector->tx.ring) in ixgbevf_msix_clean_rings() 1566 q_vector->name, q_vector); in ixgbevf_request_msix_irqs() 2163 q_vector = adapter->q_vector[q_idx]; in ixgbevf_napi_enable_all() 2175 q_vector = adapter->q_vector[q_idx]; in ixgbevf_napi_disable_all() 2747 adapter->q_vector[v_idx] = q_vector; in ixgbevf_alloc_q_vector() 2760 ring->q_vector = q_vector; in ixgbevf_alloc_q_vector() 2788 ring->q_vector = q_vector; in ixgbevf_alloc_q_vector() [all …]
|
| /linux-6.15/drivers/net/ethernet/wangxun/libwx/ |
| H A D | wx_lib.c | 1605 q_vector = wx->q_vector[q_idx]; in wx_napi_enable_all() 1617 q_vector = wx->q_vector[q_idx]; in wx_napi_disable_all() 1824 q_vector = kzalloc(struct_size(q_vector, ring, ring_count), in wx_alloc_q_vector() 1834 wx->q_vector[v_idx] = q_vector; in wx_alloc_q_vector() 1869 ring->q_vector = q_vector; in wx_alloc_q_vector() 1896 ring->q_vector = q_vector; in wx_alloc_q_vector() 1930 struct wx_q_vector *q_vector = wx->q_vector[v_idx]; in wx_free_q_vector() local 2074 if (q_vector->rx.ring || q_vector->tx.ring) in wx_msix_clean_rings() 2093 struct wx_q_vector *q_vector = wx->q_vector[vector]; in wx_free_irq() local 2097 if (!q_vector->rx.ring && !q_vector->tx.ring) in wx_free_irq() [all …]
|
| H A D | wx_ethtool.c | 310 if (wx->q_vector[0]->tx.count && wx->q_vector[0]->rx.count) in wx_get_coalesce() 330 struct wx_q_vector *q_vector; in wx_set_coalesce() local 334 if (wx->q_vector[0]->tx.count && wx->q_vector[0]->rx.count) { in wx_set_coalesce() 389 if (wx->q_vector[0]->tx.count && wx->q_vector[0]->rx.count) in wx_set_coalesce() 393 q_vector = wx->q_vector[i]; in wx_set_coalesce() 394 if (q_vector->tx.count && !q_vector->rx.count) in wx_set_coalesce() 396 q_vector->itr = tx_itr_param; in wx_set_coalesce() 399 q_vector->itr = rx_itr_param; in wx_set_coalesce() 400 wx_write_eitr(q_vector); in wx_set_coalesce()
|
| /linux-6.15/drivers/net/ethernet/intel/i40e/ |
| H A D | i40e_txrx.c | 1246 itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr); in i40e_update_itr() 2681 i40e_update_itr(q_vector, &q_vector->tx); in i40e_update_enable_itr() 2682 i40e_update_itr(q_vector, &q_vector->rx); in i40e_update_enable_itr() 2692 if (q_vector->rx.target_itr < q_vector->rx.current_itr) { in i40e_update_enable_itr() 2696 q_vector->rx.current_itr = q_vector->rx.target_itr; in i40e_update_enable_itr() 2698 } else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) || in i40e_update_enable_itr() 2699 ((q_vector->rx.target_itr - q_vector->rx.current_itr) < in i40e_update_enable_itr() 2700 (q_vector->tx.target_itr - q_vector->tx.current_itr))) { in i40e_update_enable_itr() 2706 q_vector->tx.current_itr = q_vector->tx.target_itr; in i40e_update_enable_itr() 2708 } else if (q_vector->rx.current_itr != q_vector->rx.target_itr) { in i40e_update_enable_itr() [all …]
|
| /linux-6.15/drivers/net/ethernet/intel/igb/ |
| H A D | igb_main.c | 927 struct igb_q_vector *q_vector = adapter->q_vector[i]; in igb_request_msix() local 933 if (q_vector->rx.ring && q_vector->tx.ring) in igb_request_msix() 977 struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; in igb_free_q_vector() local 984 if (q_vector) in igb_free_q_vector() 998 struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; in igb_reset_q_vector() local 1180 q_vector = adapter->q_vector[v_idx]; in igb_alloc_q_vector() 1200 adapter->q_vector[v_idx] = q_vector; in igb_alloc_q_vector() 1230 ring->q_vector = q_vector; in igb_alloc_q_vector() 1265 ring->q_vector = q_vector; in igb_alloc_q_vector() 5917 igb_update_itr(q_vector, &q_vector->tx); in igb_set_itr() [all …]
|
| H A D | igb_xsk.c | 48 napi_disable(&rx_ring->q_vector->napi); in igb_txrx_ring_disable() 80 napi_enable(&rx_ring->q_vector->napi); in igb_txrx_ring_enable() 276 skb = napi_alloc_skb(&rx_ring->q_vector->napi, totalsize); in igb_construct_skb_zc() 341 int igb_clean_rx_irq_zc(struct igb_q_vector *q_vector, in igb_clean_rx_irq_zc() argument 344 struct igb_adapter *adapter = q_vector->adapter; in igb_clean_rx_irq_zc() 346 struct igb_ring *rx_ring = q_vector->rx.ring; in igb_clean_rx_irq_zc() 383 ts_hdr_len = igb_ptp_rx_pktstamp(rx_ring->q_vector, in igb_clean_rx_irq_zc() 435 napi_gro_receive(&q_vector->napi, skb); in igb_clean_rx_irq_zc() 446 igb_update_rx_stats(q_vector, total_packets, total_bytes); in igb_clean_rx_irq_zc() 551 if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) { in igb_xsk_wakeup() [all …]
|
| /linux-6.15/drivers/net/ethernet/wangxun/ngbe/ |
| H A D | ngbe_main.c | 168 struct wx_q_vector *q_vector; in ngbe_intr() local 173 q_vector = wx->q_vector[0]; in ngbe_intr() 195 napi_schedule_irqoff(&q_vector->napi); in ngbe_intr() 233 struct wx_q_vector *q_vector = wx->q_vector[vector]; in ngbe_request_msix_irqs() local 236 if (q_vector->tx.ring && q_vector->rx.ring) in ngbe_request_msix_irqs() 237 snprintf(q_vector->name, sizeof(q_vector->name) - 1, in ngbe_request_msix_irqs() 244 q_vector->name, q_vector); in ngbe_request_msix_irqs() 247 q_vector->name, err); in ngbe_request_msix_irqs() 266 wx->q_vector[vector]); in ngbe_request_msix_irqs()
|