xref: /linux-6.15/include/linux/avf/virtchnl.h (revision 542e893f)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Copyright (c) 2013-2022, Intel Corporation. */
3 
4 #ifndef _VIRTCHNL_H_
5 #define _VIRTCHNL_H_
6 
7 #include <linux/bitops.h>
8 #include <linux/overflow.h>
9 #include <uapi/linux/if_ether.h>
10 
11 /* Description:
12  * This header file describes the Virtual Function (VF) - Physical Function
13  * (PF) communication protocol used by the drivers for all devices starting
14  * from our 40G product line
15  *
16  * Admin queue buffer usage:
17  * desc->opcode is always aqc_opc_send_msg_to_pf
18  * flags, retval, datalen, and data addr are all used normally.
19  * The Firmware copies the cookie fields when sending messages between the
20  * PF and VF, but uses all other fields internally. Due to this limitation,
21  * we must send all messages as "indirect", i.e. using an external buffer.
22  *
23  * All the VSI indexes are relative to the VF. Each VF can have maximum of
24  * three VSIs. All the queue indexes are relative to the VSI.  Each VF can
25  * have a maximum of sixteen queues for all of its VSIs.
26  *
27  * The PF is required to return a status code in v_retval for all messages
28  * except RESET_VF, which does not require any response. The returned value
29  * is of virtchnl_status_code type, defined here.
30  *
31  * In general, VF driver initialization should roughly follow the order of
32  * these opcodes. The VF driver must first validate the API version of the
33  * PF driver, then request a reset, then get resources, then configure
34  * queues and interrupts. After these operations are complete, the VF
35  * driver may start its queues, optionally add MAC and VLAN filters, and
36  * process traffic.
37  */
38 
39 /* START GENERIC DEFINES
40  * Need to ensure the following enums and defines hold the same meaning and
41  * value in current and future projects
42  */
43 
44 /* Error Codes */
45 enum virtchnl_status_code {
46 	VIRTCHNL_STATUS_SUCCESS				= 0,
47 	VIRTCHNL_STATUS_ERR_PARAM			= -5,
48 	VIRTCHNL_STATUS_ERR_NO_MEMORY			= -18,
49 	VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH		= -38,
50 	VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR		= -39,
51 	VIRTCHNL_STATUS_ERR_INVALID_VF_ID		= -40,
52 	VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR		= -53,
53 	VIRTCHNL_STATUS_ERR_NOT_SUPPORTED		= -64,
54 };
55 
56 /* Backward compatibility */
57 #define VIRTCHNL_ERR_PARAM VIRTCHNL_STATUS_ERR_PARAM
58 #define VIRTCHNL_STATUS_NOT_SUPPORTED VIRTCHNL_STATUS_ERR_NOT_SUPPORTED
59 
60 #define VIRTCHNL_LINK_SPEED_2_5GB_SHIFT		0x0
61 #define VIRTCHNL_LINK_SPEED_100MB_SHIFT		0x1
62 #define VIRTCHNL_LINK_SPEED_1000MB_SHIFT	0x2
63 #define VIRTCHNL_LINK_SPEED_10GB_SHIFT		0x3
64 #define VIRTCHNL_LINK_SPEED_40GB_SHIFT		0x4
65 #define VIRTCHNL_LINK_SPEED_20GB_SHIFT		0x5
66 #define VIRTCHNL_LINK_SPEED_25GB_SHIFT		0x6
67 #define VIRTCHNL_LINK_SPEED_5GB_SHIFT		0x7
68 
69 enum virtchnl_link_speed {
70 	VIRTCHNL_LINK_SPEED_UNKNOWN	= 0,
71 	VIRTCHNL_LINK_SPEED_100MB	= BIT(VIRTCHNL_LINK_SPEED_100MB_SHIFT),
72 	VIRTCHNL_LINK_SPEED_1GB		= BIT(VIRTCHNL_LINK_SPEED_1000MB_SHIFT),
73 	VIRTCHNL_LINK_SPEED_10GB	= BIT(VIRTCHNL_LINK_SPEED_10GB_SHIFT),
74 	VIRTCHNL_LINK_SPEED_40GB	= BIT(VIRTCHNL_LINK_SPEED_40GB_SHIFT),
75 	VIRTCHNL_LINK_SPEED_20GB	= BIT(VIRTCHNL_LINK_SPEED_20GB_SHIFT),
76 	VIRTCHNL_LINK_SPEED_25GB	= BIT(VIRTCHNL_LINK_SPEED_25GB_SHIFT),
77 	VIRTCHNL_LINK_SPEED_2_5GB	= BIT(VIRTCHNL_LINK_SPEED_2_5GB_SHIFT),
78 	VIRTCHNL_LINK_SPEED_5GB		= BIT(VIRTCHNL_LINK_SPEED_5GB_SHIFT),
79 };
80 
81 /* for hsplit_0 field of Rx HMC context */
82 /* deprecated with AVF 1.0 */
83 enum virtchnl_rx_hsplit {
84 	VIRTCHNL_RX_HSPLIT_NO_SPLIT      = 0,
85 	VIRTCHNL_RX_HSPLIT_SPLIT_L2      = 1,
86 	VIRTCHNL_RX_HSPLIT_SPLIT_IP      = 2,
87 	VIRTCHNL_RX_HSPLIT_SPLIT_TCP_UDP = 4,
88 	VIRTCHNL_RX_HSPLIT_SPLIT_SCTP    = 8,
89 };
90 
91 /* END GENERIC DEFINES */
92 
93 /* Opcodes for VF-PF communication. These are placed in the v_opcode field
94  * of the virtchnl_msg structure.
95  */
96 enum virtchnl_ops {
97 /* The PF sends status change events to VFs using
98  * the VIRTCHNL_OP_EVENT opcode.
99  * VFs send requests to the PF using the other ops.
100  * Use of "advanced opcode" features must be negotiated as part of capabilities
101  * exchange and are not considered part of base mode feature set.
102  */
103 	VIRTCHNL_OP_UNKNOWN = 0,
104 	VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */
105 	VIRTCHNL_OP_RESET_VF = 2,
106 	VIRTCHNL_OP_GET_VF_RESOURCES = 3,
107 	VIRTCHNL_OP_CONFIG_TX_QUEUE = 4,
108 	VIRTCHNL_OP_CONFIG_RX_QUEUE = 5,
109 	VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6,
110 	VIRTCHNL_OP_CONFIG_IRQ_MAP = 7,
111 	VIRTCHNL_OP_ENABLE_QUEUES = 8,
112 	VIRTCHNL_OP_DISABLE_QUEUES = 9,
113 	VIRTCHNL_OP_ADD_ETH_ADDR = 10,
114 	VIRTCHNL_OP_DEL_ETH_ADDR = 11,
115 	VIRTCHNL_OP_ADD_VLAN = 12,
116 	VIRTCHNL_OP_DEL_VLAN = 13,
117 	VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14,
118 	VIRTCHNL_OP_GET_STATS = 15,
119 	VIRTCHNL_OP_RSVD = 16,
120 	VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
121 	VIRTCHNL_OP_CONFIG_RSS_HFUNC = 18,
122 	/* opcode 19 is reserved */
123 	VIRTCHNL_OP_IWARP = 20, /* advanced opcode */
124 	VIRTCHNL_OP_RDMA = VIRTCHNL_OP_IWARP,
125 	VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21, /* advanced opcode */
126 	VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP = VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP,
127 	VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP = 22, /* advanced opcode */
128 	VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP = VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP,
129 	VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
130 	VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
131 	VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
132 	VIRTCHNL_OP_SET_RSS_HENA = 26,
133 	VIRTCHNL_OP_ENABLE_VLAN_STRIPPING = 27,
134 	VIRTCHNL_OP_DISABLE_VLAN_STRIPPING = 28,
135 	VIRTCHNL_OP_REQUEST_QUEUES = 29,
136 	VIRTCHNL_OP_ENABLE_CHANNELS = 30,
137 	VIRTCHNL_OP_DISABLE_CHANNELS = 31,
138 	VIRTCHNL_OP_ADD_CLOUD_FILTER = 32,
139 	VIRTCHNL_OP_DEL_CLOUD_FILTER = 33,
140 	/* opcode 34 - 43 are reserved */
141 	VIRTCHNL_OP_GET_SUPPORTED_RXDIDS = 44,
142 	VIRTCHNL_OP_ADD_RSS_CFG = 45,
143 	VIRTCHNL_OP_DEL_RSS_CFG = 46,
144 	VIRTCHNL_OP_ADD_FDIR_FILTER = 47,
145 	VIRTCHNL_OP_DEL_FDIR_FILTER = 48,
146 	VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS = 51,
147 	VIRTCHNL_OP_ADD_VLAN_V2 = 52,
148 	VIRTCHNL_OP_DEL_VLAN_V2 = 53,
149 	VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 = 54,
150 	VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2 = 55,
151 	VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2 = 56,
152 	VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2 = 57,
153 	VIRTCHNL_OP_MAX,
154 };
155 
156 /* These macros are used to generate compilation errors if a structure/union
157  * is not exactly the correct length. It gives a divide by zero error if the
158  * structure/union is not of the correct size, otherwise it creates an enum
159  * that is never used.
160  */
161 #define VIRTCHNL_CHECK_STRUCT_LEN(n, X) enum virtchnl_static_assert_enum_##X \
162 	{ virtchnl_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) }
163 #define VIRTCHNL_CHECK_UNION_LEN(n, X) enum virtchnl_static_asset_enum_##X \
164 	{ virtchnl_static_assert_##X = (n)/((sizeof(union X) == (n)) ? 1 : 0) }
165 
166 /* Message descriptions and data structures. */
167 
168 /* VIRTCHNL_OP_VERSION
169  * VF posts its version number to the PF. PF responds with its version number
170  * in the same format, along with a return code.
171  * Reply from PF has its major/minor versions also in param0 and param1.
172  * If there is a major version mismatch, then the VF cannot operate.
173  * If there is a minor version mismatch, then the VF can operate but should
174  * add a warning to the system log.
175  *
176  * This enum element MUST always be specified as == 1, regardless of other
177  * changes in the API. The PF must always respond to this message without
178  * error regardless of version mismatch.
179  */
180 #define VIRTCHNL_VERSION_MAJOR		1
181 #define VIRTCHNL_VERSION_MINOR		1
182 #define VIRTCHNL_VERSION_MINOR_NO_VF_CAPS	0
183 
184 struct virtchnl_version_info {
185 	u32 major;
186 	u32 minor;
187 };
188 
189 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_version_info);
190 
191 #define VF_IS_V10(_v) (((_v)->major == 1) && ((_v)->minor == 0))
192 #define VF_IS_V11(_ver) (((_ver)->major == 1) && ((_ver)->minor == 1))
193 
194 /* VIRTCHNL_OP_RESET_VF
195  * VF sends this request to PF with no parameters
196  * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register
197  * until reset completion is indicated. The admin queue must be reinitialized
198  * after this operation.
199  *
200  * When reset is complete, PF must ensure that all queues in all VSIs associated
201  * with the VF are stopped, all queue configurations in the HMC are set to 0,
202  * and all MAC and VLAN filters (except the default MAC address) on all VSIs
203  * are cleared.
204  */
205 
206 /* VSI types that use VIRTCHNL interface for VF-PF communication. VSI_SRIOV
207  * vsi_type should always be 6 for backward compatibility. Add other fields
208  * as needed.
209  */
210 enum virtchnl_vsi_type {
211 	VIRTCHNL_VSI_TYPE_INVALID = 0,
212 	VIRTCHNL_VSI_SRIOV = 6,
213 };
214 
215 /* VIRTCHNL_OP_GET_VF_RESOURCES
216  * Version 1.0 VF sends this request to PF with no parameters
217  * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities
218  * PF responds with an indirect message containing
219  * virtchnl_vf_resource and one or more
220  * virtchnl_vsi_resource structures.
221  */
222 
223 struct virtchnl_vsi_resource {
224 	u16 vsi_id;
225 	u16 num_queue_pairs;
226 
227 	/* see enum virtchnl_vsi_type */
228 	s32 vsi_type;
229 	u16 qset_handle;
230 	u8 default_mac_addr[ETH_ALEN];
231 };
232 
233 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
234 
235 /* VF capability flags
236  * VIRTCHNL_VF_OFFLOAD_L2 flag is inclusive of base mode L2 offloads including
237  * TX/RX Checksum offloading and TSO for non-tunnelled packets.
238  */
239 #define VIRTCHNL_VF_OFFLOAD_L2			BIT(0)
240 #define VIRTCHNL_VF_OFFLOAD_RDMA		BIT(1)
241 #define VIRTCHNL_VF_CAP_RDMA			VIRTCHNL_VF_OFFLOAD_RDMA
242 #define VIRTCHNL_VF_OFFLOAD_RSS_AQ		BIT(3)
243 #define VIRTCHNL_VF_OFFLOAD_RSS_REG		BIT(4)
244 #define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR		BIT(5)
245 #define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES		BIT(6)
246 /* used to negotiate communicating link speeds in Mbps */
247 #define VIRTCHNL_VF_CAP_ADV_LINK_SPEED		BIT(7)
248 #define  VIRTCHNL_VF_OFFLOAD_CRC		BIT(10)
249 #define VIRTCHNL_VF_OFFLOAD_VLAN_V2		BIT(15)
250 #define VIRTCHNL_VF_OFFLOAD_VLAN		BIT(16)
251 #define VIRTCHNL_VF_OFFLOAD_RX_POLLING		BIT(17)
252 #define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2	BIT(18)
253 #define VIRTCHNL_VF_OFFLOAD_RSS_PF		BIT(19)
254 #define VIRTCHNL_VF_OFFLOAD_ENCAP		BIT(20)
255 #define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM		BIT(21)
256 #define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM	BIT(22)
257 #define VIRTCHNL_VF_OFFLOAD_ADQ			BIT(23)
258 #define VIRTCHNL_VF_OFFLOAD_USO			BIT(25)
259 #define VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC	BIT(26)
260 #define VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF		BIT(27)
261 #define VIRTCHNL_VF_OFFLOAD_FDIR_PF		BIT(28)
262 
263 #define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \
264 			       VIRTCHNL_VF_OFFLOAD_VLAN | \
265 			       VIRTCHNL_VF_OFFLOAD_RSS_PF)
266 
267 struct virtchnl_vf_resource {
268 	u16 num_vsis;
269 	u16 num_queue_pairs;
270 	u16 max_vectors;
271 	u16 max_mtu;
272 
273 	u32 vf_cap_flags;
274 	u32 rss_key_size;
275 	u32 rss_lut_size;
276 
277 	struct virtchnl_vsi_resource vsi_res[];
278 };
279 
280 VIRTCHNL_CHECK_STRUCT_LEN(20, virtchnl_vf_resource);
281 #define virtchnl_vf_resource_LEGACY_SIZEOF	36
282 
283 /* VIRTCHNL_OP_CONFIG_TX_QUEUE
284  * VF sends this message to set up parameters for one TX queue.
285  * External data buffer contains one instance of virtchnl_txq_info.
286  * PF configures requested queue and returns a status code.
287  */
288 
289 /* Tx queue config info */
290 struct virtchnl_txq_info {
291 	u16 vsi_id;
292 	u16 queue_id;
293 	u16 ring_len;		/* number of descriptors, multiple of 8 */
294 	u16 headwb_enabled; /* deprecated with AVF 1.0 */
295 	u64 dma_ring_addr;
296 	u64 dma_headwb_addr; /* deprecated with AVF 1.0 */
297 };
298 
299 VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_txq_info);
300 
301 /* VIRTCHNL_OP_CONFIG_RX_QUEUE
302  * VF sends this message to set up parameters for one RX queue.
303  * External data buffer contains one instance of virtchnl_rxq_info.
304  * PF configures requested queue and returns a status code. The
305  * crc_disable flag disables CRC stripping on the VF. Setting
306  * the crc_disable flag to 1 will disable CRC stripping for each
307  * queue in the VF where the flag is set. The VIRTCHNL_VF_OFFLOAD_CRC
308  * offload must have been set prior to sending this info or the PF
309  * will ignore the request. This flag should be set the same for
310  * all of the queues for a VF.
311  */
312 
313 /* Rx queue config info */
314 struct virtchnl_rxq_info {
315 	u16 vsi_id;
316 	u16 queue_id;
317 	u32 ring_len;		/* number of descriptors, multiple of 32 */
318 	u16 hdr_size;
319 	u16 splithdr_enabled; /* deprecated with AVF 1.0 */
320 	u32 databuffer_size;
321 	u32 max_pkt_size;
322 	u8 crc_disable;
323 	u8 rxdid;
324 	u8 pad1[2];
325 	u64 dma_ring_addr;
326 
327 	/* see enum virtchnl_rx_hsplit; deprecated with AVF 1.0 */
328 	s32 rx_split_pos;
329 	u32 pad2;
330 };
331 
332 VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_rxq_info);
333 
334 /* VIRTCHNL_OP_CONFIG_VSI_QUEUES
335  * VF sends this message to set parameters for all active TX and RX queues
336  * associated with the specified VSI.
337  * PF configures queues and returns status.
338  * If the number of queues specified is greater than the number of queues
339  * associated with the VSI, an error is returned and no queues are configured.
340  * NOTE: The VF is not required to configure all queues in a single request.
341  * It may send multiple messages. PF drivers must correctly handle all VF
342  * requests.
343  */
344 struct virtchnl_queue_pair_info {
345 	/* NOTE: vsi_id and queue_id should be identical for both queues. */
346 	struct virtchnl_txq_info txq;
347 	struct virtchnl_rxq_info rxq;
348 };
349 
350 VIRTCHNL_CHECK_STRUCT_LEN(64, virtchnl_queue_pair_info);
351 
352 struct virtchnl_vsi_queue_config_info {
353 	u16 vsi_id;
354 	u16 num_queue_pairs;
355 	u32 pad;
356 	struct virtchnl_queue_pair_info qpair[];
357 };
358 
359 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_vsi_queue_config_info);
360 #define virtchnl_vsi_queue_config_info_LEGACY_SIZEOF	72
361 
362 /* VIRTCHNL_OP_REQUEST_QUEUES
363  * VF sends this message to request the PF to allocate additional queues to
364  * this VF.  Each VF gets a guaranteed number of queues on init but asking for
365  * additional queues must be negotiated.  This is a best effort request as it
366  * is possible the PF does not have enough queues left to support the request.
367  * If the PF cannot support the number requested it will respond with the
368  * maximum number it is able to support.  If the request is successful, PF will
369  * then reset the VF to institute required changes.
370  */
371 
372 /* VF resource request */
373 struct virtchnl_vf_res_request {
374 	u16 num_queue_pairs;
375 };
376 
377 /* VIRTCHNL_OP_CONFIG_IRQ_MAP
378  * VF uses this message to map vectors to queues.
379  * The rxq_map and txq_map fields are bitmaps used to indicate which queues
380  * are to be associated with the specified vector.
381  * The "other" causes are always mapped to vector 0. The VF may not request
382  * that vector 0 be used for traffic.
383  * PF configures interrupt mapping and returns status.
384  * NOTE: due to hardware requirements, all active queues (both TX and RX)
385  * should be mapped to interrupts, even if the driver intends to operate
386  * only in polling mode. In this case the interrupt may be disabled, but
387  * the ITR timer will still run to trigger writebacks.
388  */
389 struct virtchnl_vector_map {
390 	u16 vsi_id;
391 	u16 vector_id;
392 	u16 rxq_map;
393 	u16 txq_map;
394 	u16 rxitr_idx;
395 	u16 txitr_idx;
396 };
397 
398 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_vector_map);
399 
400 struct virtchnl_irq_map_info {
401 	u16 num_vectors;
402 	struct virtchnl_vector_map vecmap[];
403 };
404 
405 VIRTCHNL_CHECK_STRUCT_LEN(2, virtchnl_irq_map_info);
406 #define virtchnl_irq_map_info_LEGACY_SIZEOF	14
407 
408 /* VIRTCHNL_OP_ENABLE_QUEUES
409  * VIRTCHNL_OP_DISABLE_QUEUES
410  * VF sends these message to enable or disable TX/RX queue pairs.
411  * The queues fields are bitmaps indicating which queues to act upon.
412  * (Currently, we only support 16 queues per VF, but we make the field
413  * u32 to allow for expansion.)
414  * PF performs requested action and returns status.
415  * NOTE: The VF is not required to enable/disable all queues in a single
416  * request. It may send multiple messages.
417  * PF drivers must correctly handle all VF requests.
418  */
419 struct virtchnl_queue_select {
420 	u16 vsi_id;
421 	u16 pad;
422 	u32 rx_queues;
423 	u32 tx_queues;
424 };
425 
426 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_select);
427 
428 /* VIRTCHNL_OP_ADD_ETH_ADDR
429  * VF sends this message in order to add one or more unicast or multicast
430  * address filters for the specified VSI.
431  * PF adds the filters and returns status.
432  */
433 
434 /* VIRTCHNL_OP_DEL_ETH_ADDR
435  * VF sends this message in order to remove one or more unicast or multicast
436  * filters for the specified VSI.
437  * PF removes the filters and returns status.
438  */
439 
440 /* VIRTCHNL_ETHER_ADDR_LEGACY
441  * Prior to adding the @type member to virtchnl_ether_addr, there were 2 pad
442  * bytes. Moving forward all VF drivers should not set type to
443  * VIRTCHNL_ETHER_ADDR_LEGACY. This is only here to not break previous/legacy
444  * behavior. The control plane function (i.e. PF) can use a best effort method
445  * of tracking the primary/device unicast in this case, but there is no
446  * guarantee and functionality depends on the implementation of the PF.
447  */
448 
449 /* VIRTCHNL_ETHER_ADDR_PRIMARY
450  * All VF drivers should set @type to VIRTCHNL_ETHER_ADDR_PRIMARY for the
451  * primary/device unicast MAC address filter for VIRTCHNL_OP_ADD_ETH_ADDR and
452  * VIRTCHNL_OP_DEL_ETH_ADDR. This allows for the underlying control plane
453  * function (i.e. PF) to accurately track and use this MAC address for
454  * displaying on the host and for VM/function reset.
455  */
456 
457 /* VIRTCHNL_ETHER_ADDR_EXTRA
458  * All VF drivers should set @type to VIRTCHNL_ETHER_ADDR_EXTRA for any extra
459  * unicast and/or multicast filters that are being added/deleted via
460  * VIRTCHNL_OP_DEL_ETH_ADDR/VIRTCHNL_OP_ADD_ETH_ADDR respectively.
461  */
462 struct virtchnl_ether_addr {
463 	u8 addr[ETH_ALEN];
464 	u8 type;
465 #define VIRTCHNL_ETHER_ADDR_LEGACY	0
466 #define VIRTCHNL_ETHER_ADDR_PRIMARY	1
467 #define VIRTCHNL_ETHER_ADDR_EXTRA	2
468 #define VIRTCHNL_ETHER_ADDR_TYPE_MASK	3 /* first two bits of type are valid */
469 	u8 pad;
470 };
471 
472 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_ether_addr);
473 
474 struct virtchnl_ether_addr_list {
475 	u16 vsi_id;
476 	u16 num_elements;
477 	struct virtchnl_ether_addr list[];
478 };
479 
480 VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_ether_addr_list);
481 #define virtchnl_ether_addr_list_LEGACY_SIZEOF	12
482 
483 /* VIRTCHNL_OP_ADD_VLAN
484  * VF sends this message to add one or more VLAN tag filters for receives.
485  * PF adds the filters and returns status.
486  * If a port VLAN is configured by the PF, this operation will return an
487  * error to the VF.
488  */
489 
490 /* VIRTCHNL_OP_DEL_VLAN
491  * VF sends this message to remove one or more VLAN tag filters for receives.
492  * PF removes the filters and returns status.
493  * If a port VLAN is configured by the PF, this operation will return an
494  * error to the VF.
495  */
496 
497 struct virtchnl_vlan_filter_list {
498 	u16 vsi_id;
499 	u16 num_elements;
500 	u16 vlan_id[];
501 };
502 
503 VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_vlan_filter_list);
504 #define virtchnl_vlan_filter_list_LEGACY_SIZEOF	6
505 
506 /* This enum is used for all of the VIRTCHNL_VF_OFFLOAD_VLAN_V2_CAPS related
507  * structures and opcodes.
508  *
509  * VIRTCHNL_VLAN_UNSUPPORTED - This field is not supported and if a VF driver
510  * populates it the PF should return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED.
511  *
512  * VIRTCHNL_VLAN_ETHERTYPE_8100 - This field supports 0x8100 ethertype.
513  * VIRTCHNL_VLAN_ETHERTYPE_88A8 - This field supports 0x88A8 ethertype.
514  * VIRTCHNL_VLAN_ETHERTYPE_9100 - This field supports 0x9100 ethertype.
515  *
516  * VIRTCHNL_VLAN_ETHERTYPE_AND - Used when multiple ethertypes can be supported
517  * by the PF concurrently. For example, if the PF can support
518  * VIRTCHNL_VLAN_ETHERTYPE_8100 AND VIRTCHNL_VLAN_ETHERTYPE_88A8 filters it
519  * would OR the following bits:
520  *
521  *	VIRTHCNL_VLAN_ETHERTYPE_8100 |
522  *	VIRTCHNL_VLAN_ETHERTYPE_88A8 |
523  *	VIRTCHNL_VLAN_ETHERTYPE_AND;
524  *
525  * The VF would interpret this as VLAN filtering can be supported on both 0x8100
526  * and 0x88A8 VLAN ethertypes.
527  *
528  * VIRTCHNL_ETHERTYPE_XOR - Used when only a single ethertype can be supported
529  * by the PF concurrently. For example if the PF can support
530  * VIRTCHNL_VLAN_ETHERTYPE_8100 XOR VIRTCHNL_VLAN_ETHERTYPE_88A8 stripping
531  * offload it would OR the following bits:
532  *
533  *	VIRTCHNL_VLAN_ETHERTYPE_8100 |
534  *	VIRTCHNL_VLAN_ETHERTYPE_88A8 |
535  *	VIRTCHNL_VLAN_ETHERTYPE_XOR;
536  *
537  * The VF would interpret this as VLAN stripping can be supported on either
538  * 0x8100 or 0x88a8 VLAN ethertypes. So when requesting VLAN stripping via
539  * VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 the specified ethertype will override
540  * the previously set value.
541  *
542  * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1 - Used to tell the VF to insert and/or
543  * strip the VLAN tag using the L2TAG1 field of the Tx/Rx descriptors.
544  *
545  * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2 - Used to tell the VF to insert hardware
546  * offloaded VLAN tags using the L2TAG2 field of the Tx descriptor.
547  *
548  * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2 - Used to tell the VF to strip hardware
549  * offloaded VLAN tags using the L2TAG2_2 field of the Rx descriptor.
550  *
551  * VIRTCHNL_VLAN_PRIO - This field supports VLAN priority bits. This is used for
552  * VLAN filtering if the underlying PF supports it.
553  *
554  * VIRTCHNL_VLAN_TOGGLE_ALLOWED - This field is used to say whether a
555  * certain VLAN capability can be toggled. For example if the underlying PF/CP
556  * allows the VF to toggle VLAN filtering, stripping, and/or insertion it should
557  * set this bit along with the supported ethertypes.
558  */
559 enum virtchnl_vlan_support {
560 	VIRTCHNL_VLAN_UNSUPPORTED =		0,
561 	VIRTCHNL_VLAN_ETHERTYPE_8100 =		BIT(0),
562 	VIRTCHNL_VLAN_ETHERTYPE_88A8 =		BIT(1),
563 	VIRTCHNL_VLAN_ETHERTYPE_9100 =		BIT(2),
564 	VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1 =	BIT(8),
565 	VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2 =	BIT(9),
566 	VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2 =	BIT(10),
567 	VIRTCHNL_VLAN_PRIO =			BIT(24),
568 	VIRTCHNL_VLAN_FILTER_MASK =		BIT(28),
569 	VIRTCHNL_VLAN_ETHERTYPE_AND =		BIT(29),
570 	VIRTCHNL_VLAN_ETHERTYPE_XOR =		BIT(30),
571 	VIRTCHNL_VLAN_TOGGLE =			BIT(31),
572 };
573 
574 /* This structure is used as part of the VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS
575  * for filtering, insertion, and stripping capabilities.
576  *
577  * If only outer capabilities are supported (for filtering, insertion, and/or
578  * stripping) then this refers to the outer most or single VLAN from the VF's
579  * perspective.
580  *
581  * If only inner capabilities are supported (for filtering, insertion, and/or
582  * stripping) then this refers to the outer most or single VLAN from the VF's
583  * perspective. Functionally this is the same as if only outer capabilities are
584  * supported. The VF driver is just forced to use the inner fields when
585  * adding/deleting filters and enabling/disabling offloads (if supported).
586  *
587  * If both outer and inner capabilities are supported (for filtering, insertion,
588  * and/or stripping) then outer refers to the outer most or single VLAN and
589  * inner refers to the second VLAN, if it exists, in the packet.
590  *
591  * There is no support for tunneled VLAN offloads, so outer or inner are never
592  * referring to a tunneled packet from the VF's perspective.
593  */
594 struct virtchnl_vlan_supported_caps {
595 	u32 outer;
596 	u32 inner;
597 };
598 
599 /* The PF populates these fields based on the supported VLAN filtering. If a
600  * field is VIRTCHNL_VLAN_UNSUPPORTED then it's not supported and the PF will
601  * reject any VIRTCHNL_OP_ADD_VLAN_V2 or VIRTCHNL_OP_DEL_VLAN_V2 messages using
602  * the unsupported fields.
603  *
604  * Also, a VF is only allowed to toggle its VLAN filtering setting if the
605  * VIRTCHNL_VLAN_TOGGLE bit is set.
606  *
607  * The ethertype(s) specified in the ethertype_init field are the ethertypes
608  * enabled for VLAN filtering. VLAN filtering in this case refers to the outer
609  * most VLAN from the VF's perspective. If both inner and outer filtering are
610  * allowed then ethertype_init only refers to the outer most VLAN as only
611  * VLAN ethertype supported for inner VLAN filtering is
612  * VIRTCHNL_VLAN_ETHERTYPE_8100. By default, inner VLAN filtering is disabled
613  * when both inner and outer filtering are allowed.
614  *
615  * The max_filters field tells the VF how many VLAN filters it's allowed to have
616  * at any one time. If it exceeds this amount and tries to add another filter,
617  * then the request will be rejected by the PF. To prevent failures, the VF
618  * should keep track of how many VLAN filters it has added and not attempt to
619  * add more than max_filters.
620  */
621 struct virtchnl_vlan_filtering_caps {
622 	struct virtchnl_vlan_supported_caps filtering_support;
623 	u32 ethertype_init;
624 	u16 max_filters;
625 	u8 pad[2];
626 };
627 
628 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vlan_filtering_caps);
629 
630 /* This enum is used for the virtchnl_vlan_offload_caps structure to specify
631  * if the PF supports a different ethertype for stripping and insertion.
632  *
633  * VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION - The ethertype(s) specified
634  * for stripping affect the ethertype(s) specified for insertion and visa versa
635  * as well. If the VF tries to configure VLAN stripping via
636  * VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 with VIRTCHNL_VLAN_ETHERTYPE_8100 then
637  * that will be the ethertype for both stripping and insertion.
638  *
639  * VIRTCHNL_ETHERTYPE_MATCH_NOT_REQUIRED - The ethertype(s) specified for
640  * stripping do not affect the ethertype(s) specified for insertion and visa
641  * versa.
642  */
643 enum virtchnl_vlan_ethertype_match {
644 	VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION = 0,
645 	VIRTCHNL_ETHERTYPE_MATCH_NOT_REQUIRED = 1,
646 };
647 
648 /* The PF populates these fields based on the supported VLAN offloads. If a
649  * field is VIRTCHNL_VLAN_UNSUPPORTED then it's not supported and the PF will
650  * reject any VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 or
651  * VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2 messages using the unsupported fields.
652  *
653  * Also, a VF is only allowed to toggle its VLAN offload setting if the
654  * VIRTCHNL_VLAN_TOGGLE_ALLOWED bit is set.
655  *
656  * The VF driver needs to be aware of how the tags are stripped by hardware and
657  * inserted by the VF driver based on the level of offload support. The PF will
658  * populate these fields based on where the VLAN tags are expected to be
659  * offloaded via the VIRTHCNL_VLAN_TAG_LOCATION_* bits. The VF will need to
660  * interpret these fields. See the definition of the
661  * VIRTCHNL_VLAN_TAG_LOCATION_* bits above the virtchnl_vlan_support
662  * enumeration.
663  */
664 struct virtchnl_vlan_offload_caps {
665 	struct virtchnl_vlan_supported_caps stripping_support;
666 	struct virtchnl_vlan_supported_caps insertion_support;
667 	u32 ethertype_init;
668 	u8 ethertype_match;
669 	u8 pad[3];
670 };
671 
672 VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_vlan_offload_caps);
673 
674 /* VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS
675  * VF sends this message to determine its VLAN capabilities.
676  *
677  * PF will mark which capabilities it supports based on hardware support and
678  * current configuration. For example, if a port VLAN is configured the PF will
679  * not allow outer VLAN filtering, stripping, or insertion to be configured so
680  * it will block these features from the VF.
681  *
682  * The VF will need to cross reference its capabilities with the PFs
683  * capabilities in the response message from the PF to determine the VLAN
684  * support.
685  */
686 struct virtchnl_vlan_caps {
687 	struct virtchnl_vlan_filtering_caps filtering;
688 	struct virtchnl_vlan_offload_caps offloads;
689 };
690 
691 VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_vlan_caps);
692 
693 struct virtchnl_vlan {
694 	u16 tci;	/* tci[15:13] = PCP and tci[11:0] = VID */
695 	u16 tci_mask;	/* only valid if VIRTCHNL_VLAN_FILTER_MASK set in
696 			 * filtering caps
697 			 */
698 	u16 tpid;	/* 0x8100, 0x88a8, etc. and only type(s) set in
699 			 * filtering caps. Note that tpid here does not refer to
700 			 * VIRTCHNL_VLAN_ETHERTYPE_*, but it refers to the
701 			 * actual 2-byte VLAN TPID
702 			 */
703 	u8 pad[2];
704 };
705 
706 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_vlan);
707 
708 struct virtchnl_vlan_filter {
709 	struct virtchnl_vlan inner;
710 	struct virtchnl_vlan outer;
711 	u8 pad[16];
712 };
713 
714 VIRTCHNL_CHECK_STRUCT_LEN(32, virtchnl_vlan_filter);
715 
716 /* VIRTCHNL_OP_ADD_VLAN_V2
717  * VIRTCHNL_OP_DEL_VLAN_V2
718  *
719  * VF sends these messages to add/del one or more VLAN tag filters for Rx
720  * traffic.
721  *
722  * The PF attempts to add the filters and returns status.
723  *
724  * The VF should only ever attempt to add/del virtchnl_vlan_filter(s) using the
725  * supported fields negotiated via VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS.
726  */
727 struct virtchnl_vlan_filter_list_v2 {
728 	u16 vport_id;
729 	u16 num_elements;
730 	u8 pad[4];
731 	struct virtchnl_vlan_filter filters[];
732 };
733 
734 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_vlan_filter_list_v2);
735 #define virtchnl_vlan_filter_list_v2_LEGACY_SIZEOF	40
736 
737 /* VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2
738  * VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2
739  * VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2
740  * VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2
741  *
742  * VF sends this message to enable or disable VLAN stripping or insertion. It
743  * also needs to specify an ethertype. The VF knows which VLAN ethertypes are
744  * allowed and whether or not it's allowed to enable/disable the specific
745  * offload via the VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS message. The VF needs to
746  * parse the virtchnl_vlan_caps.offloads fields to determine which offload
747  * messages are allowed.
748  *
749  * For example, if the PF populates the virtchnl_vlan_caps.offloads in the
750  * following manner the VF will be allowed to enable and/or disable 0x8100 inner
751  * VLAN insertion and/or stripping via the opcodes listed above. Inner in this
752  * case means the outer most or single VLAN from the VF's perspective. This is
753  * because no outer offloads are supported. See the comments above the
754  * virtchnl_vlan_supported_caps structure for more details.
755  *
756  * virtchnl_vlan_caps.offloads.stripping_support.inner =
757  *			VIRTCHNL_VLAN_TOGGLE |
758  *			VIRTCHNL_VLAN_ETHERTYPE_8100;
759  *
760  * virtchnl_vlan_caps.offloads.insertion_support.inner =
761  *			VIRTCHNL_VLAN_TOGGLE |
762  *			VIRTCHNL_VLAN_ETHERTYPE_8100;
763  *
764  * In order to enable inner (again note that in this case inner is the outer
765  * most or single VLAN from the VF's perspective) VLAN stripping for 0x8100
766  * VLANs, the VF would populate the virtchnl_vlan_setting structure in the
767  * following manner and send the VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 message.
768  *
769  * virtchnl_vlan_setting.inner_ethertype_setting =
770  *			VIRTCHNL_VLAN_ETHERTYPE_8100;
771  *
772  * virtchnl_vlan_setting.vport_id = vport_id or vsi_id assigned to the VF on
773  * initialization.
774  *
775  * The reason that VLAN TPID(s) are not being used for the
776  * outer_ethertype_setting and inner_ethertype_setting fields is because it's
777  * possible a device could support VLAN insertion and/or stripping offload on
778  * multiple ethertypes concurrently, so this method allows a VF to request
779  * multiple ethertypes in one message using the virtchnl_vlan_support
780  * enumeration.
781  *
782  * For example, if the PF populates the virtchnl_vlan_caps.offloads in the
783  * following manner the VF will be allowed to enable 0x8100 and 0x88a8 outer
784  * VLAN insertion and stripping simultaneously. The
785  * virtchnl_vlan_caps.offloads.ethertype_match field will also have to be
786  * populated based on what the PF can support.
787  *
788  * virtchnl_vlan_caps.offloads.stripping_support.outer =
789  *			VIRTCHNL_VLAN_TOGGLE |
790  *			VIRTCHNL_VLAN_ETHERTYPE_8100 |
791  *			VIRTCHNL_VLAN_ETHERTYPE_88A8 |
792  *			VIRTCHNL_VLAN_ETHERTYPE_AND;
793  *
794  * virtchnl_vlan_caps.offloads.insertion_support.outer =
795  *			VIRTCHNL_VLAN_TOGGLE |
796  *			VIRTCHNL_VLAN_ETHERTYPE_8100 |
797  *			VIRTCHNL_VLAN_ETHERTYPE_88A8 |
798  *			VIRTCHNL_VLAN_ETHERTYPE_AND;
799  *
800  * In order to enable outer VLAN stripping for 0x8100 and 0x88a8 VLANs, the VF
801  * would populate the virthcnl_vlan_offload_structure in the following manner
802  * and send the VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 message.
803  *
804  * virtchnl_vlan_setting.outer_ethertype_setting =
805  *			VIRTHCNL_VLAN_ETHERTYPE_8100 |
806  *			VIRTHCNL_VLAN_ETHERTYPE_88A8;
807  *
808  * virtchnl_vlan_setting.vport_id = vport_id or vsi_id assigned to the VF on
809  * initialization.
810  *
811  * There is also the case where a PF and the underlying hardware can support
812  * VLAN offloads on multiple ethertypes, but not concurrently. For example, if
813  * the PF populates the virtchnl_vlan_caps.offloads in the following manner the
814  * VF will be allowed to enable and/or disable 0x8100 XOR 0x88a8 outer VLAN
815  * offloads. The ethertypes must match for stripping and insertion.
816  *
817  * virtchnl_vlan_caps.offloads.stripping_support.outer =
818  *			VIRTCHNL_VLAN_TOGGLE |
819  *			VIRTCHNL_VLAN_ETHERTYPE_8100 |
820  *			VIRTCHNL_VLAN_ETHERTYPE_88A8 |
821  *			VIRTCHNL_VLAN_ETHERTYPE_XOR;
822  *
823  * virtchnl_vlan_caps.offloads.insertion_support.outer =
824  *			VIRTCHNL_VLAN_TOGGLE |
825  *			VIRTCHNL_VLAN_ETHERTYPE_8100 |
826  *			VIRTCHNL_VLAN_ETHERTYPE_88A8 |
827  *			VIRTCHNL_VLAN_ETHERTYPE_XOR;
828  *
829  * virtchnl_vlan_caps.offloads.ethertype_match =
830  *			VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION;
831  *
832  * In order to enable outer VLAN stripping for 0x88a8 VLANs, the VF would
833  * populate the virtchnl_vlan_setting structure in the following manner and send
834  * the VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2. Also, this will change the
835  * ethertype for VLAN insertion if it's enabled. So, for completeness, a
836  * VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2 with the same ethertype should be sent.
837  *
838  * virtchnl_vlan_setting.outer_ethertype_setting = VIRTHCNL_VLAN_ETHERTYPE_88A8;
839  *
840  * virtchnl_vlan_setting.vport_id = vport_id or vsi_id assigned to the VF on
841  * initialization.
842  */
843 struct virtchnl_vlan_setting {
844 	u32 outer_ethertype_setting;
845 	u32 inner_ethertype_setting;
846 	u16 vport_id;
847 	u8 pad[6];
848 };
849 
850 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vlan_setting);
851 
852 /* VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
853  * VF sends VSI id and flags.
854  * PF returns status code in retval.
855  * Note: we assume that broadcast accept mode is always enabled.
856  */
857 struct virtchnl_promisc_info {
858 	u16 vsi_id;
859 	u16 flags;
860 };
861 
862 VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_promisc_info);
863 
864 #define FLAG_VF_UNICAST_PROMISC	0x00000001
865 #define FLAG_VF_MULTICAST_PROMISC	0x00000002
866 
867 /* VIRTCHNL_OP_GET_STATS
868  * VF sends this message to request stats for the selected VSI. VF uses
869  * the virtchnl_queue_select struct to specify the VSI. The queue_id
870  * field is ignored by the PF.
871  *
872  * PF replies with struct eth_stats in an external buffer.
873  */
874 
875 /* VIRTCHNL_OP_CONFIG_RSS_KEY
876  * VIRTCHNL_OP_CONFIG_RSS_LUT
877  * VF sends these messages to configure RSS. Only supported if both PF
878  * and VF drivers set the VIRTCHNL_VF_OFFLOAD_RSS_PF bit during
879  * configuration negotiation. If this is the case, then the RSS fields in
880  * the VF resource struct are valid.
881  * Both the key and LUT are initialized to 0 by the PF, meaning that
882  * RSS is effectively disabled until set up by the VF.
883  */
884 struct virtchnl_rss_key {
885 	u16 vsi_id;
886 	u16 key_len;
887 	u8 key[];          /* RSS hash key, packed bytes */
888 };
889 
890 VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_rss_key);
891 #define virtchnl_rss_key_LEGACY_SIZEOF	6
892 
893 struct virtchnl_rss_lut {
894 	u16 vsi_id;
895 	u16 lut_entries;
896 	u8 lut[];         /* RSS lookup table */
897 };
898 
899 VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_rss_lut);
900 #define virtchnl_rss_lut_LEGACY_SIZEOF	6
901 
902 /* VIRTCHNL_OP_GET_RSS_HENA_CAPS
903  * VIRTCHNL_OP_SET_RSS_HENA
904  * VF sends these messages to get and set the hash filter enable bits for RSS.
905  * By default, the PF sets these to all possible traffic types that the
906  * hardware supports. The VF can query this value if it wants to change the
907  * traffic types that are hashed by the hardware.
908  */
909 struct virtchnl_rss_hena {
910 	u64 hena;
911 };
912 
913 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hena);
914 
915 /* Type of RSS algorithm */
916 enum virtchnl_rss_algorithm {
917 	VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC	= 0,
918 	VIRTCHNL_RSS_ALG_R_ASYMMETRIC		= 1,
919 	VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC	= 2,
920 	VIRTCHNL_RSS_ALG_XOR_SYMMETRIC		= 3,
921 };
922 
923 /* VIRTCHNL_OP_CONFIG_RSS_HFUNC
924  * VF sends this message to configure the RSS hash function. Only supported
925  * if both PF and VF drivers set the VIRTCHNL_VF_OFFLOAD_RSS_PF bit during
926  * configuration negotiation.
927  * The hash function is initialized to VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC
928  * by the PF.
929  */
930 struct virtchnl_rss_hfunc {
931 	u16 vsi_id;
932 	u16 rss_algorithm; /* enum virtchnl_rss_algorithm */
933 	u32 reserved;
934 };
935 
936 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hfunc);
937 
938 /* VIRTCHNL_OP_ENABLE_CHANNELS
939  * VIRTCHNL_OP_DISABLE_CHANNELS
940  * VF sends these messages to enable or disable channels based on
941  * the user specified queue count and queue offset for each traffic class.
942  * This struct encompasses all the information that the PF needs from
943  * VF to create a channel.
944  */
945 struct virtchnl_channel_info {
946 	u16 count; /* number of queues in a channel */
947 	u16 offset; /* queues in a channel start from 'offset' */
948 	u32 pad;
949 	u64 max_tx_rate;
950 };
951 
952 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_channel_info);
953 
954 struct virtchnl_tc_info {
955 	u32	num_tc;
956 	u32	pad;
957 	struct virtchnl_channel_info list[];
958 };
959 
960 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_tc_info);
961 #define virtchnl_tc_info_LEGACY_SIZEOF	24
962 
963 /* VIRTCHNL_ADD_CLOUD_FILTER
964  * VIRTCHNL_DEL_CLOUD_FILTER
965  * VF sends these messages to add or delete a cloud filter based on the
966  * user specified match and action filters. These structures encompass
967  * all the information that the PF needs from the VF to add/delete a
968  * cloud filter.
969  */
970 
971 struct virtchnl_l4_spec {
972 	u8	src_mac[ETH_ALEN];
973 	u8	dst_mac[ETH_ALEN];
974 	__be16	vlan_id;
975 	__be16	pad; /* reserved for future use */
976 	__be32	src_ip[4];
977 	__be32	dst_ip[4];
978 	__be16	src_port;
979 	__be16	dst_port;
980 };
981 
982 VIRTCHNL_CHECK_STRUCT_LEN(52, virtchnl_l4_spec);
983 
984 union virtchnl_flow_spec {
985 	struct	virtchnl_l4_spec tcp_spec;
986 	u8	buffer[128]; /* reserved for future use */
987 };
988 
989 VIRTCHNL_CHECK_UNION_LEN(128, virtchnl_flow_spec);
990 
991 enum virtchnl_action {
992 	/* action types */
993 	VIRTCHNL_ACTION_DROP = 0,
994 	VIRTCHNL_ACTION_TC_REDIRECT,
995 	VIRTCHNL_ACTION_PASSTHRU,
996 	VIRTCHNL_ACTION_QUEUE,
997 	VIRTCHNL_ACTION_Q_REGION,
998 	VIRTCHNL_ACTION_MARK,
999 	VIRTCHNL_ACTION_COUNT,
1000 };
1001 
1002 enum virtchnl_flow_type {
1003 	/* flow types */
1004 	VIRTCHNL_TCP_V4_FLOW = 0,
1005 	VIRTCHNL_TCP_V6_FLOW,
1006 };
1007 
1008 struct virtchnl_filter {
1009 	union	virtchnl_flow_spec data;
1010 	union	virtchnl_flow_spec mask;
1011 
1012 	/* see enum virtchnl_flow_type */
1013 	s32	flow_type;
1014 
1015 	/* see enum virtchnl_action */
1016 	s32	action;
1017 	u32	action_meta;
1018 	u8	field_flags;
1019 	u8	pad[3];
1020 };
1021 
1022 VIRTCHNL_CHECK_STRUCT_LEN(272, virtchnl_filter);
1023 
1024 struct virtchnl_supported_rxdids {
1025 	u64 supported_rxdids;
1026 };
1027 
1028 /* VIRTCHNL_OP_EVENT
1029  * PF sends this message to inform the VF driver of events that may affect it.
1030  * No direct response is expected from the VF, though it may generate other
1031  * messages in response to this one.
1032  */
1033 enum virtchnl_event_codes {
1034 	VIRTCHNL_EVENT_UNKNOWN = 0,
1035 	VIRTCHNL_EVENT_LINK_CHANGE,
1036 	VIRTCHNL_EVENT_RESET_IMPENDING,
1037 	VIRTCHNL_EVENT_PF_DRIVER_CLOSE,
1038 };
1039 
1040 #define PF_EVENT_SEVERITY_INFO		0
1041 #define PF_EVENT_SEVERITY_CERTAIN_DOOM	255
1042 
1043 struct virtchnl_pf_event {
1044 	/* see enum virtchnl_event_codes */
1045 	s32 event;
1046 	union {
1047 		/* If the PF driver does not support the new speed reporting
1048 		 * capabilities then use link_event else use link_event_adv to
1049 		 * get the speed and link information. The ability to understand
1050 		 * new speeds is indicated by setting the capability flag
1051 		 * VIRTCHNL_VF_CAP_ADV_LINK_SPEED in vf_cap_flags parameter
1052 		 * in virtchnl_vf_resource struct and can be used to determine
1053 		 * which link event struct to use below.
1054 		 */
1055 		struct {
1056 			enum virtchnl_link_speed link_speed;
1057 			bool link_status;
1058 			u8 pad[3];
1059 		} link_event;
1060 		struct {
1061 			/* link_speed provided in Mbps */
1062 			u32 link_speed;
1063 			u8 link_status;
1064 			u8 pad[3];
1065 		} link_event_adv;
1066 	} event_data;
1067 
1068 	s32 severity;
1069 };
1070 
1071 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_pf_event);
1072 
1073 /* used to specify if a ceq_idx or aeq_idx is invalid */
1074 #define VIRTCHNL_RDMA_INVALID_QUEUE_IDX	0xFFFF
1075 /* VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP
1076  * VF uses this message to request PF to map RDMA vectors to RDMA queues.
1077  * The request for this originates from the VF RDMA driver through
1078  * a client interface between VF LAN and VF RDMA driver.
1079  * A vector could have an AEQ and CEQ attached to it although
1080  * there is a single AEQ per VF RDMA instance in which case
1081  * most vectors will have an VIRTCHNL_RDMA_INVALID_QUEUE_IDX for aeq and valid
1082  * idx for ceqs There will never be a case where there will be multiple CEQs
1083  * attached to a single vector.
1084  * PF configures interrupt mapping and returns status.
1085  */
1086 
1087 struct virtchnl_rdma_qv_info {
1088 	u32 v_idx; /* msix_vector */
1089 	u16 ceq_idx; /* set to VIRTCHNL_RDMA_INVALID_QUEUE_IDX if invalid */
1090 	u16 aeq_idx; /* set to VIRTCHNL_RDMA_INVALID_QUEUE_IDX if invalid */
1091 	u8 itr_idx;
1092 	u8 pad[3];
1093 };
1094 
1095 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_rdma_qv_info);
1096 
1097 struct virtchnl_rdma_qvlist_info {
1098 	u32 num_vectors;
1099 	struct virtchnl_rdma_qv_info qv_info[];
1100 };
1101 
1102 VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_rdma_qvlist_info);
1103 #define virtchnl_rdma_qvlist_info_LEGACY_SIZEOF	16
1104 
1105 /* VF reset states - these are written into the RSTAT register:
1106  * VFGEN_RSTAT on the VF
1107  * When the PF initiates a reset, it writes 0
1108  * When the reset is complete, it writes 1
1109  * When the PF detects that the VF has recovered, it writes 2
1110  * VF checks this register periodically to determine if a reset has occurred,
1111  * then polls it to know when the reset is complete.
1112  * If either the PF or VF reads the register while the hardware
1113  * is in a reset state, it will return DEADBEEF, which, when masked
1114  * will result in 3.
1115  */
1116 enum virtchnl_vfr_states {
1117 	VIRTCHNL_VFR_INPROGRESS = 0,
1118 	VIRTCHNL_VFR_COMPLETED,
1119 	VIRTCHNL_VFR_VFACTIVE,
1120 };
1121 
1122 #define VIRTCHNL_MAX_NUM_PROTO_HDRS	32
1123 #define PROTO_HDR_SHIFT			5
1124 #define PROTO_HDR_FIELD_START(proto_hdr_type) ((proto_hdr_type) << PROTO_HDR_SHIFT)
1125 #define PROTO_HDR_FIELD_MASK ((1UL << PROTO_HDR_SHIFT) - 1)
1126 
1127 /* VF use these macros to configure each protocol header.
1128  * Specify which protocol headers and protocol header fields base on
1129  * virtchnl_proto_hdr_type and virtchnl_proto_hdr_field.
1130  * @param hdr: a struct of virtchnl_proto_hdr
1131  * @param hdr_type: ETH/IPV4/TCP, etc
1132  * @param field: SRC/DST/TEID/SPI, etc
1133  */
1134 #define VIRTCHNL_ADD_PROTO_HDR_FIELD(hdr, field) \
1135 	((hdr)->field_selector |= BIT((field) & PROTO_HDR_FIELD_MASK))
1136 #define VIRTCHNL_DEL_PROTO_HDR_FIELD(hdr, field) \
1137 	((hdr)->field_selector &= ~BIT((field) & PROTO_HDR_FIELD_MASK))
1138 #define VIRTCHNL_TEST_PROTO_HDR_FIELD(hdr, val) \
1139 	((hdr)->field_selector & BIT((val) & PROTO_HDR_FIELD_MASK))
1140 #define VIRTCHNL_GET_PROTO_HDR_FIELD(hdr)	((hdr)->field_selector)
1141 
1142 #define VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, hdr_type, field) \
1143 	(VIRTCHNL_ADD_PROTO_HDR_FIELD(hdr, \
1144 		VIRTCHNL_PROTO_HDR_ ## hdr_type ## _ ## field))
1145 #define VIRTCHNL_DEL_PROTO_HDR_FIELD_BIT(hdr, hdr_type, field) \
1146 	(VIRTCHNL_DEL_PROTO_HDR_FIELD(hdr, \
1147 		VIRTCHNL_PROTO_HDR_ ## hdr_type ## _ ## field))
1148 
1149 #define VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, hdr_type) \
1150 	((hdr)->type = VIRTCHNL_PROTO_HDR_ ## hdr_type)
1151 #define VIRTCHNL_GET_PROTO_HDR_TYPE(hdr) \
1152 	(((hdr)->type) >> PROTO_HDR_SHIFT)
1153 #define VIRTCHNL_TEST_PROTO_HDR_TYPE(hdr, val) \
1154 	((hdr)->type == ((s32)((val) >> PROTO_HDR_SHIFT)))
1155 #define VIRTCHNL_TEST_PROTO_HDR(hdr, val) \
1156 	(VIRTCHNL_TEST_PROTO_HDR_TYPE((hdr), (val)) && \
1157 	 VIRTCHNL_TEST_PROTO_HDR_FIELD((hdr), (val)))
1158 
1159 /* Protocol header type within a packet segment. A segment consists of one or
1160  * more protocol headers that make up a logical group of protocol headers. Each
1161  * logical group of protocol headers encapsulates or is encapsulated using/by
1162  * tunneling or encapsulation protocols for network virtualization.
1163  */
1164 enum virtchnl_proto_hdr_type {
1165 	VIRTCHNL_PROTO_HDR_NONE,
1166 	VIRTCHNL_PROTO_HDR_ETH,
1167 	VIRTCHNL_PROTO_HDR_S_VLAN,
1168 	VIRTCHNL_PROTO_HDR_C_VLAN,
1169 	VIRTCHNL_PROTO_HDR_IPV4,
1170 	VIRTCHNL_PROTO_HDR_IPV6,
1171 	VIRTCHNL_PROTO_HDR_TCP,
1172 	VIRTCHNL_PROTO_HDR_UDP,
1173 	VIRTCHNL_PROTO_HDR_SCTP,
1174 	VIRTCHNL_PROTO_HDR_GTPU_IP,
1175 	VIRTCHNL_PROTO_HDR_GTPU_EH,
1176 	VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN,
1177 	VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP,
1178 	VIRTCHNL_PROTO_HDR_PPPOE,
1179 	VIRTCHNL_PROTO_HDR_L2TPV3,
1180 	VIRTCHNL_PROTO_HDR_ESP,
1181 	VIRTCHNL_PROTO_HDR_AH,
1182 	VIRTCHNL_PROTO_HDR_PFCP,
1183 };
1184 
1185 /* Protocol header field within a protocol header. */
1186 enum virtchnl_proto_hdr_field {
1187 	/* ETHER */
1188 	VIRTCHNL_PROTO_HDR_ETH_SRC =
1189 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_ETH),
1190 	VIRTCHNL_PROTO_HDR_ETH_DST,
1191 	VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE,
1192 	/* S-VLAN */
1193 	VIRTCHNL_PROTO_HDR_S_VLAN_ID =
1194 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_S_VLAN),
1195 	/* C-VLAN */
1196 	VIRTCHNL_PROTO_HDR_C_VLAN_ID =
1197 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_C_VLAN),
1198 	/* IPV4 */
1199 	VIRTCHNL_PROTO_HDR_IPV4_SRC =
1200 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV4),
1201 	VIRTCHNL_PROTO_HDR_IPV4_DST,
1202 	VIRTCHNL_PROTO_HDR_IPV4_DSCP,
1203 	VIRTCHNL_PROTO_HDR_IPV4_TTL,
1204 	VIRTCHNL_PROTO_HDR_IPV4_PROT,
1205 	/* IPV6 */
1206 	VIRTCHNL_PROTO_HDR_IPV6_SRC =
1207 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV6),
1208 	VIRTCHNL_PROTO_HDR_IPV6_DST,
1209 	VIRTCHNL_PROTO_HDR_IPV6_TC,
1210 	VIRTCHNL_PROTO_HDR_IPV6_HOP_LIMIT,
1211 	VIRTCHNL_PROTO_HDR_IPV6_PROT,
1212 	/* TCP */
1213 	VIRTCHNL_PROTO_HDR_TCP_SRC_PORT =
1214 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_TCP),
1215 	VIRTCHNL_PROTO_HDR_TCP_DST_PORT,
1216 	/* UDP */
1217 	VIRTCHNL_PROTO_HDR_UDP_SRC_PORT =
1218 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_UDP),
1219 	VIRTCHNL_PROTO_HDR_UDP_DST_PORT,
1220 	/* SCTP */
1221 	VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT =
1222 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_SCTP),
1223 	VIRTCHNL_PROTO_HDR_SCTP_DST_PORT,
1224 	/* GTPU_IP */
1225 	VIRTCHNL_PROTO_HDR_GTPU_IP_TEID =
1226 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_IP),
1227 	/* GTPU_EH */
1228 	VIRTCHNL_PROTO_HDR_GTPU_EH_PDU =
1229 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_EH),
1230 	VIRTCHNL_PROTO_HDR_GTPU_EH_QFI,
1231 	/* PPPOE */
1232 	VIRTCHNL_PROTO_HDR_PPPOE_SESS_ID =
1233 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_PPPOE),
1234 	/* L2TPV3 */
1235 	VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID =
1236 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_L2TPV3),
1237 	/* ESP */
1238 	VIRTCHNL_PROTO_HDR_ESP_SPI =
1239 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_ESP),
1240 	/* AH */
1241 	VIRTCHNL_PROTO_HDR_AH_SPI =
1242 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_AH),
1243 	/* PFCP */
1244 	VIRTCHNL_PROTO_HDR_PFCP_S_FIELD =
1245 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_PFCP),
1246 	VIRTCHNL_PROTO_HDR_PFCP_SEID,
1247 };
1248 
1249 struct virtchnl_proto_hdr {
1250 	/* see enum virtchnl_proto_hdr_type */
1251 	s32 type;
1252 	u32 field_selector; /* a bit mask to select field for header type */
1253 	u8 buffer[64];
1254 	/**
1255 	 * binary buffer in network order for specific header type.
1256 	 * For example, if type = VIRTCHNL_PROTO_HDR_IPV4, a IPv4
1257 	 * header is expected to be copied into the buffer.
1258 	 */
1259 };
1260 
1261 VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_proto_hdr);
1262 
1263 struct virtchnl_proto_hdrs {
1264 	u8 tunnel_level;
1265 	u8 pad[3];
1266 	/**
1267 	 * specify where protocol header start from.
1268 	 * 0 - from the outer layer
1269 	 * 1 - from the first inner layer
1270 	 * 2 - from the second inner layer
1271 	 * ....
1272 	 **/
1273 	int count; /* the proto layers must < VIRTCHNL_MAX_NUM_PROTO_HDRS */
1274 	struct virtchnl_proto_hdr proto_hdr[VIRTCHNL_MAX_NUM_PROTO_HDRS];
1275 };
1276 
1277 VIRTCHNL_CHECK_STRUCT_LEN(2312, virtchnl_proto_hdrs);
1278 
1279 struct virtchnl_rss_cfg {
1280 	struct virtchnl_proto_hdrs proto_hdrs;	   /* protocol headers */
1281 
1282 	/* see enum virtchnl_rss_algorithm; rss algorithm type */
1283 	s32 rss_algorithm;
1284 	u8 reserved[128];                          /* reserve for future */
1285 };
1286 
1287 VIRTCHNL_CHECK_STRUCT_LEN(2444, virtchnl_rss_cfg);
1288 
1289 /* action configuration for FDIR */
1290 struct virtchnl_filter_action {
1291 	/* see enum virtchnl_action type */
1292 	s32 type;
1293 	union {
1294 		/* used for queue and qgroup action */
1295 		struct {
1296 			u16 index;
1297 			u8 region;
1298 		} queue;
1299 		/* used for count action */
1300 		struct {
1301 			/* share counter ID with other flow rules */
1302 			u8 shared;
1303 			u32 id; /* counter ID */
1304 		} count;
1305 		/* used for mark action */
1306 		u32 mark_id;
1307 		u8 reserve[32];
1308 	} act_conf;
1309 };
1310 
1311 VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_filter_action);
1312 
1313 #define VIRTCHNL_MAX_NUM_ACTIONS  8
1314 
1315 struct virtchnl_filter_action_set {
1316 	/* action number must be less then VIRTCHNL_MAX_NUM_ACTIONS */
1317 	int count;
1318 	struct virtchnl_filter_action actions[VIRTCHNL_MAX_NUM_ACTIONS];
1319 };
1320 
1321 VIRTCHNL_CHECK_STRUCT_LEN(292, virtchnl_filter_action_set);
1322 
1323 /* pattern and action for FDIR rule */
1324 struct virtchnl_fdir_rule {
1325 	struct virtchnl_proto_hdrs proto_hdrs;
1326 	struct virtchnl_filter_action_set action_set;
1327 };
1328 
1329 VIRTCHNL_CHECK_STRUCT_LEN(2604, virtchnl_fdir_rule);
1330 
1331 /* Status returned to VF after VF requests FDIR commands
1332  * VIRTCHNL_FDIR_SUCCESS
1333  * VF FDIR related request is successfully done by PF
1334  * The request can be OP_ADD/DEL/QUERY_FDIR_FILTER.
1335  *
1336  * VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE
1337  * OP_ADD_FDIR_FILTER request is failed due to no Hardware resource.
1338  *
1339  * VIRTCHNL_FDIR_FAILURE_RULE_EXIST
1340  * OP_ADD_FDIR_FILTER request is failed due to the rule is already existed.
1341  *
1342  * VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT
1343  * OP_ADD_FDIR_FILTER request is failed due to conflict with existing rule.
1344  *
1345  * VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST
1346  * OP_DEL_FDIR_FILTER request is failed due to this rule doesn't exist.
1347  *
1348  * VIRTCHNL_FDIR_FAILURE_RULE_INVALID
1349  * OP_ADD_FDIR_FILTER request is failed due to parameters validation
1350  * or HW doesn't support.
1351  *
1352  * VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT
1353  * OP_ADD/DEL_FDIR_FILTER request is failed due to timing out
1354  * for programming.
1355  *
1356  * VIRTCHNL_FDIR_FAILURE_QUERY_INVALID
1357  * OP_QUERY_FDIR_FILTER request is failed due to parameters validation,
1358  * for example, VF query counter of a rule who has no counter action.
1359  */
1360 enum virtchnl_fdir_prgm_status {
1361 	VIRTCHNL_FDIR_SUCCESS = 0,
1362 	VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE,
1363 	VIRTCHNL_FDIR_FAILURE_RULE_EXIST,
1364 	VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT,
1365 	VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST,
1366 	VIRTCHNL_FDIR_FAILURE_RULE_INVALID,
1367 	VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT,
1368 	VIRTCHNL_FDIR_FAILURE_QUERY_INVALID,
1369 };
1370 
1371 /* VIRTCHNL_OP_ADD_FDIR_FILTER
1372  * VF sends this request to PF by filling out vsi_id,
1373  * validate_only and rule_cfg. PF will return flow_id
1374  * if the request is successfully done and return add_status to VF.
1375  */
1376 struct virtchnl_fdir_add {
1377 	u16 vsi_id;  /* INPUT */
1378 	/*
1379 	 * 1 for validating a fdir rule, 0 for creating a fdir rule.
1380 	 * Validate and create share one ops: VIRTCHNL_OP_ADD_FDIR_FILTER.
1381 	 */
1382 	u16 validate_only; /* INPUT */
1383 	u32 flow_id;       /* OUTPUT */
1384 	struct virtchnl_fdir_rule rule_cfg; /* INPUT */
1385 
1386 	/* see enum virtchnl_fdir_prgm_status; OUTPUT */
1387 	s32 status;
1388 };
1389 
1390 VIRTCHNL_CHECK_STRUCT_LEN(2616, virtchnl_fdir_add);
1391 
1392 /* VIRTCHNL_OP_DEL_FDIR_FILTER
1393  * VF sends this request to PF by filling out vsi_id
1394  * and flow_id. PF will return del_status to VF.
1395  */
1396 struct virtchnl_fdir_del {
1397 	u16 vsi_id;  /* INPUT */
1398 	u16 pad;
1399 	u32 flow_id; /* INPUT */
1400 
1401 	/* see enum virtchnl_fdir_prgm_status; OUTPUT */
1402 	s32 status;
1403 };
1404 
1405 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_fdir_del);
1406 
1407 #define __vss_byone(p, member, count, old)				      \
1408 	(struct_size(p, member, count) + (old - 1 - struct_size(p, member, 0)))
1409 
1410 #define __vss_byelem(p, member, count, old)				      \
1411 	(struct_size(p, member, count - 1) + (old - struct_size(p, member, 0)))
1412 
1413 #define __vss_full(p, member, count, old)				      \
1414 	(struct_size(p, member, count) + (old - struct_size(p, member, 0)))
1415 
1416 #define __vss(type, func, p, member, count)		\
1417 	struct type: func(p, member, count, type##_LEGACY_SIZEOF)
1418 
1419 #define virtchnl_struct_size(p, m, c)					      \
1420 	_Generic(*p,							      \
1421 		 __vss(virtchnl_vf_resource, __vss_full, p, m, c),	      \
1422 		 __vss(virtchnl_vsi_queue_config_info, __vss_full, p, m, c),  \
1423 		 __vss(virtchnl_irq_map_info, __vss_full, p, m, c),	      \
1424 		 __vss(virtchnl_ether_addr_list, __vss_full, p, m, c),	      \
1425 		 __vss(virtchnl_vlan_filter_list, __vss_full, p, m, c),	      \
1426 		 __vss(virtchnl_vlan_filter_list_v2, __vss_byelem, p, m, c),  \
1427 		 __vss(virtchnl_tc_info, __vss_byelem, p, m, c),	      \
1428 		 __vss(virtchnl_rdma_qvlist_info, __vss_byelem, p, m, c),     \
1429 		 __vss(virtchnl_rss_key, __vss_byone, p, m, c),		      \
1430 		 __vss(virtchnl_rss_lut, __vss_byone, p, m, c))
1431 
1432 /**
1433  * virtchnl_vc_validate_vf_msg
1434  * @ver: Virtchnl version info
1435  * @v_opcode: Opcode for the message
1436  * @msg: pointer to the msg buffer
1437  * @msglen: msg length
1438  *
1439  * validate msg format against struct for each opcode
1440  */
1441 static inline int
1442 virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
1443 			    u8 *msg, u16 msglen)
1444 {
1445 	bool err_msg_format = false;
1446 	u32 valid_len = 0;
1447 
1448 	/* Validate message length. */
1449 	switch (v_opcode) {
1450 	case VIRTCHNL_OP_VERSION:
1451 		valid_len = sizeof(struct virtchnl_version_info);
1452 		break;
1453 	case VIRTCHNL_OP_RESET_VF:
1454 		break;
1455 	case VIRTCHNL_OP_GET_VF_RESOURCES:
1456 		if (VF_IS_V11(ver))
1457 			valid_len = sizeof(u32);
1458 		break;
1459 	case VIRTCHNL_OP_CONFIG_TX_QUEUE:
1460 		valid_len = sizeof(struct virtchnl_txq_info);
1461 		break;
1462 	case VIRTCHNL_OP_CONFIG_RX_QUEUE:
1463 		valid_len = sizeof(struct virtchnl_rxq_info);
1464 		break;
1465 	case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
1466 		valid_len = virtchnl_vsi_queue_config_info_LEGACY_SIZEOF;
1467 		if (msglen >= valid_len) {
1468 			struct virtchnl_vsi_queue_config_info *vqc =
1469 			    (struct virtchnl_vsi_queue_config_info *)msg;
1470 			valid_len = virtchnl_struct_size(vqc, qpair,
1471 							 vqc->num_queue_pairs);
1472 			if (vqc->num_queue_pairs == 0)
1473 				err_msg_format = true;
1474 		}
1475 		break;
1476 	case VIRTCHNL_OP_CONFIG_IRQ_MAP:
1477 		valid_len = virtchnl_irq_map_info_LEGACY_SIZEOF;
1478 		if (msglen >= valid_len) {
1479 			struct virtchnl_irq_map_info *vimi =
1480 			    (struct virtchnl_irq_map_info *)msg;
1481 			valid_len = virtchnl_struct_size(vimi, vecmap,
1482 							 vimi->num_vectors);
1483 			if (vimi->num_vectors == 0)
1484 				err_msg_format = true;
1485 		}
1486 		break;
1487 	case VIRTCHNL_OP_ENABLE_QUEUES:
1488 	case VIRTCHNL_OP_DISABLE_QUEUES:
1489 		valid_len = sizeof(struct virtchnl_queue_select);
1490 		break;
1491 	case VIRTCHNL_OP_ADD_ETH_ADDR:
1492 	case VIRTCHNL_OP_DEL_ETH_ADDR:
1493 		valid_len = virtchnl_ether_addr_list_LEGACY_SIZEOF;
1494 		if (msglen >= valid_len) {
1495 			struct virtchnl_ether_addr_list *veal =
1496 			    (struct virtchnl_ether_addr_list *)msg;
1497 			valid_len = virtchnl_struct_size(veal, list,
1498 							 veal->num_elements);
1499 			if (veal->num_elements == 0)
1500 				err_msg_format = true;
1501 		}
1502 		break;
1503 	case VIRTCHNL_OP_ADD_VLAN:
1504 	case VIRTCHNL_OP_DEL_VLAN:
1505 		valid_len = virtchnl_vlan_filter_list_LEGACY_SIZEOF;
1506 		if (msglen >= valid_len) {
1507 			struct virtchnl_vlan_filter_list *vfl =
1508 			    (struct virtchnl_vlan_filter_list *)msg;
1509 			valid_len = virtchnl_struct_size(vfl, vlan_id,
1510 							 vfl->num_elements);
1511 			if (vfl->num_elements == 0)
1512 				err_msg_format = true;
1513 		}
1514 		break;
1515 	case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
1516 		valid_len = sizeof(struct virtchnl_promisc_info);
1517 		break;
1518 	case VIRTCHNL_OP_GET_STATS:
1519 		valid_len = sizeof(struct virtchnl_queue_select);
1520 		break;
1521 	case VIRTCHNL_OP_RDMA:
1522 		/* These messages are opaque to us and will be validated in
1523 		 * the RDMA client code. We just need to check for nonzero
1524 		 * length. The firmware will enforce max length restrictions.
1525 		 */
1526 		if (msglen)
1527 			valid_len = msglen;
1528 		else
1529 			err_msg_format = true;
1530 		break;
1531 	case VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP:
1532 		break;
1533 	case VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP:
1534 		valid_len = virtchnl_rdma_qvlist_info_LEGACY_SIZEOF;
1535 		if (msglen >= valid_len) {
1536 			struct virtchnl_rdma_qvlist_info *qv =
1537 				(struct virtchnl_rdma_qvlist_info *)msg;
1538 
1539 			valid_len = virtchnl_struct_size(qv, qv_info,
1540 							 qv->num_vectors);
1541 		}
1542 		break;
1543 	case VIRTCHNL_OP_CONFIG_RSS_KEY:
1544 		valid_len = virtchnl_rss_key_LEGACY_SIZEOF;
1545 		if (msglen >= valid_len) {
1546 			struct virtchnl_rss_key *vrk =
1547 				(struct virtchnl_rss_key *)msg;
1548 			valid_len = virtchnl_struct_size(vrk, key,
1549 							 vrk->key_len);
1550 		}
1551 		break;
1552 	case VIRTCHNL_OP_CONFIG_RSS_LUT:
1553 		valid_len = virtchnl_rss_lut_LEGACY_SIZEOF;
1554 		if (msglen >= valid_len) {
1555 			struct virtchnl_rss_lut *vrl =
1556 				(struct virtchnl_rss_lut *)msg;
1557 			valid_len = virtchnl_struct_size(vrl, lut,
1558 							 vrl->lut_entries);
1559 		}
1560 		break;
1561 	case VIRTCHNL_OP_CONFIG_RSS_HFUNC:
1562 		valid_len = sizeof(struct virtchnl_rss_hfunc);
1563 		break;
1564 	case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
1565 		break;
1566 	case VIRTCHNL_OP_SET_RSS_HENA:
1567 		valid_len = sizeof(struct virtchnl_rss_hena);
1568 		break;
1569 	case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
1570 	case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
1571 		break;
1572 	case VIRTCHNL_OP_REQUEST_QUEUES:
1573 		valid_len = sizeof(struct virtchnl_vf_res_request);
1574 		break;
1575 	case VIRTCHNL_OP_ENABLE_CHANNELS:
1576 		valid_len = virtchnl_tc_info_LEGACY_SIZEOF;
1577 		if (msglen >= valid_len) {
1578 			struct virtchnl_tc_info *vti =
1579 				(struct virtchnl_tc_info *)msg;
1580 			valid_len = virtchnl_struct_size(vti, list,
1581 							 vti->num_tc);
1582 			if (vti->num_tc == 0)
1583 				err_msg_format = true;
1584 		}
1585 		break;
1586 	case VIRTCHNL_OP_DISABLE_CHANNELS:
1587 		break;
1588 	case VIRTCHNL_OP_ADD_CLOUD_FILTER:
1589 	case VIRTCHNL_OP_DEL_CLOUD_FILTER:
1590 		valid_len = sizeof(struct virtchnl_filter);
1591 		break;
1592 	case VIRTCHNL_OP_GET_SUPPORTED_RXDIDS:
1593 		break;
1594 	case VIRTCHNL_OP_ADD_RSS_CFG:
1595 	case VIRTCHNL_OP_DEL_RSS_CFG:
1596 		valid_len = sizeof(struct virtchnl_rss_cfg);
1597 		break;
1598 	case VIRTCHNL_OP_ADD_FDIR_FILTER:
1599 		valid_len = sizeof(struct virtchnl_fdir_add);
1600 		break;
1601 	case VIRTCHNL_OP_DEL_FDIR_FILTER:
1602 		valid_len = sizeof(struct virtchnl_fdir_del);
1603 		break;
1604 	case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS:
1605 		break;
1606 	case VIRTCHNL_OP_ADD_VLAN_V2:
1607 	case VIRTCHNL_OP_DEL_VLAN_V2:
1608 		valid_len = virtchnl_vlan_filter_list_v2_LEGACY_SIZEOF;
1609 		if (msglen >= valid_len) {
1610 			struct virtchnl_vlan_filter_list_v2 *vfl =
1611 			    (struct virtchnl_vlan_filter_list_v2 *)msg;
1612 
1613 			valid_len = virtchnl_struct_size(vfl, filters,
1614 							 vfl->num_elements);
1615 
1616 			if (vfl->num_elements == 0) {
1617 				err_msg_format = true;
1618 				break;
1619 			}
1620 		}
1621 		break;
1622 	case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2:
1623 	case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2:
1624 	case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2:
1625 	case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2:
1626 		valid_len = sizeof(struct virtchnl_vlan_setting);
1627 		break;
1628 	/* These are always errors coming from the VF. */
1629 	case VIRTCHNL_OP_EVENT:
1630 	case VIRTCHNL_OP_UNKNOWN:
1631 	default:
1632 		return VIRTCHNL_STATUS_ERR_PARAM;
1633 	}
1634 	/* few more checks */
1635 	if (err_msg_format || valid_len != msglen)
1636 		return VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH;
1637 
1638 	return 0;
1639 }
1640 #endif /* _VIRTCHNL_H_ */
1641