1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation.
3  */
4 
5 #include <rte_ethdev.h>
6 #include <rte_pci.h>
7 #include <rte_malloc.h>
8 
9 #include "rte_ethdev_driver.h"
10 #include "base/ixgbe_type.h"
11 #include "base/ixgbe_vf.h"
12 #include "ixgbe_ethdev.h"
13 #include "ixgbe_rxtx.h"
14 #include "rte_pmd_ixgbe.h"
15 
16 
17 static int
ixgbe_vf_representor_link_update(struct rte_eth_dev * ethdev,int wait_to_complete)18 ixgbe_vf_representor_link_update(struct rte_eth_dev *ethdev,
19 	int wait_to_complete)
20 {
21 	struct ixgbe_vf_representor *representor = ethdev->data->dev_private;
22 
23 	return ixgbe_dev_link_update_share(representor->pf_ethdev,
24 		wait_to_complete, 0);
25 }
26 
27 static int
ixgbe_vf_representor_mac_addr_set(struct rte_eth_dev * ethdev,struct rte_ether_addr * mac_addr)28 ixgbe_vf_representor_mac_addr_set(struct rte_eth_dev *ethdev,
29 	struct rte_ether_addr *mac_addr)
30 {
31 	struct ixgbe_vf_representor *representor = ethdev->data->dev_private;
32 
33 	return rte_pmd_ixgbe_set_vf_mac_addr(
34 		representor->pf_ethdev->data->port_id,
35 		representor->vf_id, mac_addr);
36 }
37 
38 static int
ixgbe_vf_representor_dev_infos_get(struct rte_eth_dev * ethdev,struct rte_eth_dev_info * dev_info)39 ixgbe_vf_representor_dev_infos_get(struct rte_eth_dev *ethdev,
40 	struct rte_eth_dev_info *dev_info)
41 {
42 	struct ixgbe_vf_representor *representor = ethdev->data->dev_private;
43 
44 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(
45 		representor->pf_ethdev->data->dev_private);
46 
47 	dev_info->device = representor->pf_ethdev->device;
48 
49 	dev_info->min_rx_bufsize = 1024;
50 	/**< Minimum size of RX buffer. */
51 	dev_info->max_rx_pktlen = 9728;
52 	/**< Maximum configurable length of RX pkt. */
53 	dev_info->max_rx_queues = IXGBE_VF_MAX_RX_QUEUES;
54 	/**< Maximum number of RX queues. */
55 	dev_info->max_tx_queues = IXGBE_VF_MAX_TX_QUEUES;
56 	/**< Maximum number of TX queues. */
57 
58 	dev_info->max_mac_addrs = hw->mac.num_rar_entries;
59 	/**< Maximum number of MAC addresses. */
60 
61 	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
62 		DEV_RX_OFFLOAD_IPV4_CKSUM |	DEV_RX_OFFLOAD_UDP_CKSUM  |
63 		DEV_RX_OFFLOAD_TCP_CKSUM;
64 	/**< Device RX offload capabilities. */
65 
66 	dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
67 		DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM |
68 		DEV_TX_OFFLOAD_TCP_CKSUM | DEV_TX_OFFLOAD_SCTP_CKSUM |
69 		DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_MULTI_SEGS;
70 	/**< Device TX offload capabilities. */
71 
72 	dev_info->speed_capa =
73 		representor->pf_ethdev->data->dev_link.link_speed;
74 	/**< Supported speeds bitmap (ETH_LINK_SPEED_). */
75 
76 	dev_info->switch_info.name =
77 		representor->pf_ethdev->device->name;
78 	dev_info->switch_info.domain_id = representor->switch_domain_id;
79 	dev_info->switch_info.port_id = representor->vf_id;
80 
81 	return 0;
82 }
83 
ixgbe_vf_representor_dev_configure(__rte_unused struct rte_eth_dev * dev)84 static int ixgbe_vf_representor_dev_configure(
85 		__rte_unused struct rte_eth_dev *dev)
86 {
87 	return 0;
88 }
89 
ixgbe_vf_representor_rx_queue_setup(__rte_unused struct rte_eth_dev * dev,__rte_unused uint16_t rx_queue_id,__rte_unused uint16_t nb_rx_desc,__rte_unused unsigned int socket_id,__rte_unused const struct rte_eth_rxconf * rx_conf,__rte_unused struct rte_mempool * mb_pool)90 static int ixgbe_vf_representor_rx_queue_setup(
91 	__rte_unused struct rte_eth_dev *dev,
92 	__rte_unused uint16_t rx_queue_id,
93 	__rte_unused uint16_t nb_rx_desc,
94 	__rte_unused unsigned int socket_id,
95 	__rte_unused const struct rte_eth_rxconf *rx_conf,
96 	__rte_unused struct rte_mempool *mb_pool)
97 {
98 	return 0;
99 }
100 
ixgbe_vf_representor_tx_queue_setup(__rte_unused struct rte_eth_dev * dev,__rte_unused uint16_t rx_queue_id,__rte_unused uint16_t nb_rx_desc,__rte_unused unsigned int socket_id,__rte_unused const struct rte_eth_txconf * tx_conf)101 static int ixgbe_vf_representor_tx_queue_setup(
102 	__rte_unused struct rte_eth_dev *dev,
103 	__rte_unused uint16_t rx_queue_id,
104 	__rte_unused uint16_t nb_rx_desc,
105 	__rte_unused unsigned int socket_id,
106 	__rte_unused const struct rte_eth_txconf *tx_conf)
107 {
108 	return 0;
109 }
110 
ixgbe_vf_representor_dev_start(__rte_unused struct rte_eth_dev * dev)111 static int ixgbe_vf_representor_dev_start(__rte_unused struct rte_eth_dev *dev)
112 {
113 	return 0;
114 }
115 
ixgbe_vf_representor_dev_stop(__rte_unused struct rte_eth_dev * dev)116 static int ixgbe_vf_representor_dev_stop(__rte_unused struct rte_eth_dev *dev)
117 {
118 	return 0;
119 }
120 
121 static int
ixgbe_vf_representor_vlan_filter_set(struct rte_eth_dev * ethdev,uint16_t vlan_id,int on)122 ixgbe_vf_representor_vlan_filter_set(struct rte_eth_dev *ethdev,
123 	uint16_t vlan_id, int on)
124 {
125 	struct ixgbe_vf_representor *representor = ethdev->data->dev_private;
126 	uint64_t vf_mask = 1ULL << representor->vf_id;
127 
128 	return rte_pmd_ixgbe_set_vf_vlan_filter(
129 		representor->pf_ethdev->data->port_id, vlan_id, vf_mask, on);
130 }
131 
132 static void
ixgbe_vf_representor_vlan_strip_queue_set(struct rte_eth_dev * ethdev,__rte_unused uint16_t rx_queue_id,int on)133 ixgbe_vf_representor_vlan_strip_queue_set(struct rte_eth_dev *ethdev,
134 	__rte_unused uint16_t rx_queue_id, int on)
135 {
136 	struct ixgbe_vf_representor *representor = ethdev->data->dev_private;
137 
138 	rte_pmd_ixgbe_set_vf_vlan_stripq(representor->pf_ethdev->data->port_id,
139 		representor->vf_id, on);
140 }
141 
142 static const struct eth_dev_ops ixgbe_vf_representor_dev_ops = {
143 	.dev_infos_get		= ixgbe_vf_representor_dev_infos_get,
144 
145 	.dev_start		= ixgbe_vf_representor_dev_start,
146 	.dev_configure		= ixgbe_vf_representor_dev_configure,
147 	.dev_stop		= ixgbe_vf_representor_dev_stop,
148 
149 	.rx_queue_setup		= ixgbe_vf_representor_rx_queue_setup,
150 	.tx_queue_setup		= ixgbe_vf_representor_tx_queue_setup,
151 
152 	.link_update		= ixgbe_vf_representor_link_update,
153 
154 	.vlan_filter_set	= ixgbe_vf_representor_vlan_filter_set,
155 	.vlan_strip_queue_set	= ixgbe_vf_representor_vlan_strip_queue_set,
156 
157 	.mac_addr_set		= ixgbe_vf_representor_mac_addr_set,
158 };
159 
160 static uint16_t
ixgbe_vf_representor_rx_burst(__rte_unused void * rx_queue,__rte_unused struct rte_mbuf ** rx_pkts,__rte_unused uint16_t nb_pkts)161 ixgbe_vf_representor_rx_burst(__rte_unused void *rx_queue,
162 	__rte_unused struct rte_mbuf **rx_pkts, __rte_unused uint16_t nb_pkts)
163 {
164 	return 0;
165 }
166 
167 static uint16_t
ixgbe_vf_representor_tx_burst(__rte_unused void * tx_queue,__rte_unused struct rte_mbuf ** tx_pkts,__rte_unused uint16_t nb_pkts)168 ixgbe_vf_representor_tx_burst(__rte_unused void *tx_queue,
169 	__rte_unused struct rte_mbuf **tx_pkts, __rte_unused uint16_t nb_pkts)
170 {
171 	return 0;
172 }
173 
174 int
ixgbe_vf_representor_init(struct rte_eth_dev * ethdev,void * init_params)175 ixgbe_vf_representor_init(struct rte_eth_dev *ethdev, void *init_params)
176 {
177 	struct ixgbe_vf_representor *representor = ethdev->data->dev_private;
178 
179 	struct ixgbe_vf_info *vf_data;
180 	struct rte_pci_device *pci_dev;
181 	struct rte_eth_link *link;
182 
183 	if (!representor)
184 		return -ENOMEM;
185 
186 	representor->vf_id =
187 		((struct ixgbe_vf_representor *)init_params)->vf_id;
188 	representor->switch_domain_id =
189 		((struct ixgbe_vf_representor *)init_params)->switch_domain_id;
190 	representor->pf_ethdev =
191 		((struct ixgbe_vf_representor *)init_params)->pf_ethdev;
192 
193 	pci_dev = RTE_ETH_DEV_TO_PCI(representor->pf_ethdev);
194 
195 	if (representor->vf_id >= pci_dev->max_vfs)
196 		return -ENODEV;
197 
198 	ethdev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
199 	ethdev->data->representor_id = representor->vf_id;
200 
201 	/* Set representor device ops */
202 	ethdev->dev_ops = &ixgbe_vf_representor_dev_ops;
203 
204 	/* No data-path, but need stub Rx/Tx functions to avoid crash
205 	 * when testing with the likes of testpmd.
206 	 */
207 	ethdev->rx_pkt_burst = ixgbe_vf_representor_rx_burst;
208 	ethdev->tx_pkt_burst = ixgbe_vf_representor_tx_burst;
209 
210 	/* Setting the number queues allocated to the VF */
211 	ethdev->data->nb_rx_queues = IXGBE_VF_MAX_RX_QUEUES;
212 	ethdev->data->nb_tx_queues = IXGBE_VF_MAX_RX_QUEUES;
213 
214 	/* Reference VF mac address from PF data structure */
215 	vf_data = *IXGBE_DEV_PRIVATE_TO_P_VFDATA(
216 		representor->pf_ethdev->data->dev_private);
217 
218 	ethdev->data->mac_addrs = (struct rte_ether_addr *)
219 		vf_data[representor->vf_id].vf_mac_addresses;
220 
221 	/* Link state. Inherited from PF */
222 	link = &representor->pf_ethdev->data->dev_link;
223 
224 	ethdev->data->dev_link.link_speed = link->link_speed;
225 	ethdev->data->dev_link.link_duplex = link->link_duplex;
226 	ethdev->data->dev_link.link_status = link->link_status;
227 	ethdev->data->dev_link.link_autoneg = link->link_autoneg;
228 
229 	return 0;
230 }
231 
232 int
ixgbe_vf_representor_uninit(struct rte_eth_dev * ethdev)233 ixgbe_vf_representor_uninit(struct rte_eth_dev *ethdev)
234 {
235 	/* mac_addrs must not be freed because part of ixgbe_vf_info */
236 	ethdev->data->mac_addrs = NULL;
237 
238 	return 0;
239 }
240