1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* Copyright (c) 2013-2022, Intel Corporation. */ 3 4 #ifndef _VIRTCHNL_H_ 5 #define _VIRTCHNL_H_ 6 7 /* Description: 8 * This header file describes the Virtual Function (VF) - Physical Function 9 * (PF) communication protocol used by the drivers for all devices starting 10 * from our 40G product line 11 * 12 * Admin queue buffer usage: 13 * desc->opcode is always aqc_opc_send_msg_to_pf 14 * flags, retval, datalen, and data addr are all used normally. 15 * The Firmware copies the cookie fields when sending messages between the 16 * PF and VF, but uses all other fields internally. Due to this limitation, 17 * we must send all messages as "indirect", i.e. using an external buffer. 18 * 19 * All the VSI indexes are relative to the VF. Each VF can have maximum of 20 * three VSIs. All the queue indexes are relative to the VSI. Each VF can 21 * have a maximum of sixteen queues for all of its VSIs. 22 * 23 * The PF is required to return a status code in v_retval for all messages 24 * except RESET_VF, which does not require any response. The returned value 25 * is of virtchnl_status_code type, defined here. 26 * 27 * In general, VF driver initialization should roughly follow the order of 28 * these opcodes. The VF driver must first validate the API version of the 29 * PF driver, then request a reset, then get resources, then configure 30 * queues and interrupts. After these operations are complete, the VF 31 * driver may start its queues, optionally add MAC and VLAN filters, and 32 * process traffic. 33 */ 34 35 /* START GENERIC DEFINES 36 * Need to ensure the following enums and defines hold the same meaning and 37 * value in current and future projects 38 */ 39 40 /* Error Codes */ 41 enum virtchnl_status_code { 42 VIRTCHNL_STATUS_SUCCESS = 0, 43 VIRTCHNL_STATUS_ERR_PARAM = -5, 44 VIRTCHNL_STATUS_ERR_NO_MEMORY = -18, 45 VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH = -38, 46 VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR = -39, 47 VIRTCHNL_STATUS_ERR_INVALID_VF_ID = -40, 48 VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR = -53, 49 VIRTCHNL_STATUS_ERR_NOT_SUPPORTED = -64, 50 }; 51 52 /* Backward compatibility */ 53 #define VIRTCHNL_ERR_PARAM VIRTCHNL_STATUS_ERR_PARAM 54 #define VIRTCHNL_STATUS_NOT_SUPPORTED VIRTCHNL_STATUS_ERR_NOT_SUPPORTED 55 56 #define VIRTCHNL_LINK_SPEED_2_5GB_SHIFT 0x0 57 #define VIRTCHNL_LINK_SPEED_100MB_SHIFT 0x1 58 #define VIRTCHNL_LINK_SPEED_1000MB_SHIFT 0x2 59 #define VIRTCHNL_LINK_SPEED_10GB_SHIFT 0x3 60 #define VIRTCHNL_LINK_SPEED_40GB_SHIFT 0x4 61 #define VIRTCHNL_LINK_SPEED_20GB_SHIFT 0x5 62 #define VIRTCHNL_LINK_SPEED_25GB_SHIFT 0x6 63 #define VIRTCHNL_LINK_SPEED_5GB_SHIFT 0x7 64 65 enum virtchnl_link_speed { 66 VIRTCHNL_LINK_SPEED_UNKNOWN = 0, 67 VIRTCHNL_LINK_SPEED_100MB = BIT(VIRTCHNL_LINK_SPEED_100MB_SHIFT), 68 VIRTCHNL_LINK_SPEED_1GB = BIT(VIRTCHNL_LINK_SPEED_1000MB_SHIFT), 69 VIRTCHNL_LINK_SPEED_10GB = BIT(VIRTCHNL_LINK_SPEED_10GB_SHIFT), 70 VIRTCHNL_LINK_SPEED_40GB = BIT(VIRTCHNL_LINK_SPEED_40GB_SHIFT), 71 VIRTCHNL_LINK_SPEED_20GB = BIT(VIRTCHNL_LINK_SPEED_20GB_SHIFT), 72 VIRTCHNL_LINK_SPEED_25GB = BIT(VIRTCHNL_LINK_SPEED_25GB_SHIFT), 73 VIRTCHNL_LINK_SPEED_2_5GB = BIT(VIRTCHNL_LINK_SPEED_2_5GB_SHIFT), 74 VIRTCHNL_LINK_SPEED_5GB = BIT(VIRTCHNL_LINK_SPEED_5GB_SHIFT), 75 }; 76 77 /* for hsplit_0 field of Rx HMC context */ 78 /* deprecated with AVF 1.0 */ 79 enum virtchnl_rx_hsplit { 80 VIRTCHNL_RX_HSPLIT_NO_SPLIT = 0, 81 VIRTCHNL_RX_HSPLIT_SPLIT_L2 = 1, 82 VIRTCHNL_RX_HSPLIT_SPLIT_IP = 2, 83 VIRTCHNL_RX_HSPLIT_SPLIT_TCP_UDP = 4, 84 VIRTCHNL_RX_HSPLIT_SPLIT_SCTP = 8, 85 }; 86 87 /* END GENERIC DEFINES */ 88 89 /* Opcodes for VF-PF communication. These are placed in the v_opcode field 90 * of the virtchnl_msg structure. 91 */ 92 enum virtchnl_ops { 93 /* The PF sends status change events to VFs using 94 * the VIRTCHNL_OP_EVENT opcode. 95 * VFs send requests to the PF using the other ops. 96 * Use of "advanced opcode" features must be negotiated as part of capabilities 97 * exchange and are not considered part of base mode feature set. 98 */ 99 VIRTCHNL_OP_UNKNOWN = 0, 100 VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */ 101 VIRTCHNL_OP_RESET_VF = 2, 102 VIRTCHNL_OP_GET_VF_RESOURCES = 3, 103 VIRTCHNL_OP_CONFIG_TX_QUEUE = 4, 104 VIRTCHNL_OP_CONFIG_RX_QUEUE = 5, 105 VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6, 106 VIRTCHNL_OP_CONFIG_IRQ_MAP = 7, 107 VIRTCHNL_OP_ENABLE_QUEUES = 8, 108 VIRTCHNL_OP_DISABLE_QUEUES = 9, 109 VIRTCHNL_OP_ADD_ETH_ADDR = 10, 110 VIRTCHNL_OP_DEL_ETH_ADDR = 11, 111 VIRTCHNL_OP_ADD_VLAN = 12, 112 VIRTCHNL_OP_DEL_VLAN = 13, 113 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14, 114 VIRTCHNL_OP_GET_STATS = 15, 115 VIRTCHNL_OP_RSVD = 16, 116 VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */ 117 /* opcode 19 is reserved */ 118 VIRTCHNL_OP_IWARP = 20, /* advanced opcode */ 119 VIRTCHNL_OP_RDMA = VIRTCHNL_OP_IWARP, 120 VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21, /* advanced opcode */ 121 VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP = VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP, 122 VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP = 22, /* advanced opcode */ 123 VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP = VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP, 124 VIRTCHNL_OP_CONFIG_RSS_KEY = 23, 125 VIRTCHNL_OP_CONFIG_RSS_LUT = 24, 126 VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25, 127 VIRTCHNL_OP_SET_RSS_HENA = 26, 128 VIRTCHNL_OP_ENABLE_VLAN_STRIPPING = 27, 129 VIRTCHNL_OP_DISABLE_VLAN_STRIPPING = 28, 130 VIRTCHNL_OP_REQUEST_QUEUES = 29, 131 VIRTCHNL_OP_ENABLE_CHANNELS = 30, 132 VIRTCHNL_OP_DISABLE_CHANNELS = 31, 133 VIRTCHNL_OP_ADD_CLOUD_FILTER = 32, 134 VIRTCHNL_OP_DEL_CLOUD_FILTER = 33, 135 /* opcode 34 - 43 are reserved */ 136 VIRTCHNL_OP_GET_SUPPORTED_RXDIDS = 44, 137 VIRTCHNL_OP_ADD_RSS_CFG = 45, 138 VIRTCHNL_OP_DEL_RSS_CFG = 46, 139 VIRTCHNL_OP_ADD_FDIR_FILTER = 47, 140 VIRTCHNL_OP_DEL_FDIR_FILTER = 48, 141 VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS = 51, 142 VIRTCHNL_OP_ADD_VLAN_V2 = 52, 143 VIRTCHNL_OP_DEL_VLAN_V2 = 53, 144 VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 = 54, 145 VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2 = 55, 146 VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2 = 56, 147 VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2 = 57, 148 VIRTCHNL_OP_MAX, 149 }; 150 151 /* These macros are used to generate compilation errors if a structure/union 152 * is not exactly the correct length. It gives a divide by zero error if the 153 * structure/union is not of the correct size, otherwise it creates an enum 154 * that is never used. 155 */ 156 #define VIRTCHNL_CHECK_STRUCT_LEN(n, X) enum virtchnl_static_assert_enum_##X \ 157 { virtchnl_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) } 158 #define VIRTCHNL_CHECK_UNION_LEN(n, X) enum virtchnl_static_asset_enum_##X \ 159 { virtchnl_static_assert_##X = (n)/((sizeof(union X) == (n)) ? 1 : 0) } 160 161 /* Message descriptions and data structures. */ 162 163 /* VIRTCHNL_OP_VERSION 164 * VF posts its version number to the PF. PF responds with its version number 165 * in the same format, along with a return code. 166 * Reply from PF has its major/minor versions also in param0 and param1. 167 * If there is a major version mismatch, then the VF cannot operate. 168 * If there is a minor version mismatch, then the VF can operate but should 169 * add a warning to the system log. 170 * 171 * This enum element MUST always be specified as == 1, regardless of other 172 * changes in the API. The PF must always respond to this message without 173 * error regardless of version mismatch. 174 */ 175 #define VIRTCHNL_VERSION_MAJOR 1 176 #define VIRTCHNL_VERSION_MINOR 1 177 #define VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0 178 179 struct virtchnl_version_info { 180 u32 major; 181 u32 minor; 182 }; 183 184 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_version_info); 185 186 #define VF_IS_V10(_v) (((_v)->major == 1) && ((_v)->minor == 0)) 187 #define VF_IS_V11(_ver) (((_ver)->major == 1) && ((_ver)->minor == 1)) 188 189 /* VIRTCHNL_OP_RESET_VF 190 * VF sends this request to PF with no parameters 191 * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register 192 * until reset completion is indicated. The admin queue must be reinitialized 193 * after this operation. 194 * 195 * When reset is complete, PF must ensure that all queues in all VSIs associated 196 * with the VF are stopped, all queue configurations in the HMC are set to 0, 197 * and all MAC and VLAN filters (except the default MAC address) on all VSIs 198 * are cleared. 199 */ 200 201 /* VSI types that use VIRTCHNL interface for VF-PF communication. VSI_SRIOV 202 * vsi_type should always be 6 for backward compatibility. Add other fields 203 * as needed. 204 */ 205 enum virtchnl_vsi_type { 206 VIRTCHNL_VSI_TYPE_INVALID = 0, 207 VIRTCHNL_VSI_SRIOV = 6, 208 }; 209 210 /* VIRTCHNL_OP_GET_VF_RESOURCES 211 * Version 1.0 VF sends this request to PF with no parameters 212 * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities 213 * PF responds with an indirect message containing 214 * virtchnl_vf_resource and one or more 215 * virtchnl_vsi_resource structures. 216 */ 217 218 struct virtchnl_vsi_resource { 219 u16 vsi_id; 220 u16 num_queue_pairs; 221 222 /* see enum virtchnl_vsi_type */ 223 s32 vsi_type; 224 u16 qset_handle; 225 u8 default_mac_addr[ETH_ALEN]; 226 }; 227 228 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource); 229 230 /* VF capability flags 231 * VIRTCHNL_VF_OFFLOAD_L2 flag is inclusive of base mode L2 offloads including 232 * TX/RX Checksum offloading and TSO for non-tunnelled packets. 233 */ 234 #define VIRTCHNL_VF_OFFLOAD_L2 BIT(0) 235 #define VIRTCHNL_VF_OFFLOAD_RDMA BIT(1) 236 #define VIRTCHNL_VF_CAP_RDMA VIRTCHNL_VF_OFFLOAD_RDMA 237 #define VIRTCHNL_VF_OFFLOAD_RSS_AQ BIT(3) 238 #define VIRTCHNL_VF_OFFLOAD_RSS_REG BIT(4) 239 #define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR BIT(5) 240 #define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES BIT(6) 241 /* used to negotiate communicating link speeds in Mbps */ 242 #define VIRTCHNL_VF_CAP_ADV_LINK_SPEED BIT(7) 243 #define VIRTCHNL_VF_OFFLOAD_CRC BIT(10) 244 #define VIRTCHNL_VF_OFFLOAD_VLAN_V2 BIT(15) 245 #define VIRTCHNL_VF_OFFLOAD_VLAN BIT(16) 246 #define VIRTCHNL_VF_OFFLOAD_RX_POLLING BIT(17) 247 #define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 BIT(18) 248 #define VIRTCHNL_VF_OFFLOAD_RSS_PF BIT(19) 249 #define VIRTCHNL_VF_OFFLOAD_ENCAP BIT(20) 250 #define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM BIT(21) 251 #define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM BIT(22) 252 #define VIRTCHNL_VF_OFFLOAD_ADQ BIT(23) 253 #define VIRTCHNL_VF_OFFLOAD_USO BIT(25) 254 #define VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC BIT(26) 255 #define VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF BIT(27) 256 #define VIRTCHNL_VF_OFFLOAD_FDIR_PF BIT(28) 257 258 #define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \ 259 VIRTCHNL_VF_OFFLOAD_VLAN | \ 260 VIRTCHNL_VF_OFFLOAD_RSS_PF) 261 262 struct virtchnl_vf_resource { 263 u16 num_vsis; 264 u16 num_queue_pairs; 265 u16 max_vectors; 266 u16 max_mtu; 267 268 u32 vf_cap_flags; 269 u32 rss_key_size; 270 u32 rss_lut_size; 271 272 struct virtchnl_vsi_resource vsi_res[]; 273 }; 274 275 VIRTCHNL_CHECK_STRUCT_LEN(20, virtchnl_vf_resource); 276 #define virtchnl_vf_resource_LEGACY_SIZEOF 36 277 278 /* VIRTCHNL_OP_CONFIG_TX_QUEUE 279 * VF sends this message to set up parameters for one TX queue. 280 * External data buffer contains one instance of virtchnl_txq_info. 281 * PF configures requested queue and returns a status code. 282 */ 283 284 /* Tx queue config info */ 285 struct virtchnl_txq_info { 286 u16 vsi_id; 287 u16 queue_id; 288 u16 ring_len; /* number of descriptors, multiple of 8 */ 289 u16 headwb_enabled; /* deprecated with AVF 1.0 */ 290 u64 dma_ring_addr; 291 u64 dma_headwb_addr; /* deprecated with AVF 1.0 */ 292 }; 293 294 VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_txq_info); 295 296 /* VIRTCHNL_OP_CONFIG_RX_QUEUE 297 * VF sends this message to set up parameters for one RX queue. 298 * External data buffer contains one instance of virtchnl_rxq_info. 299 * PF configures requested queue and returns a status code. The 300 * crc_disable flag disables CRC stripping on the VF. Setting 301 * the crc_disable flag to 1 will disable CRC stripping for each 302 * queue in the VF where the flag is set. The VIRTCHNL_VF_OFFLOAD_CRC 303 * offload must have been set prior to sending this info or the PF 304 * will ignore the request. This flag should be set the same for 305 * all of the queues for a VF. 306 */ 307 308 /* Rx queue config info */ 309 struct virtchnl_rxq_info { 310 u16 vsi_id; 311 u16 queue_id; 312 u32 ring_len; /* number of descriptors, multiple of 32 */ 313 u16 hdr_size; 314 u16 splithdr_enabled; /* deprecated with AVF 1.0 */ 315 u32 databuffer_size; 316 u32 max_pkt_size; 317 u8 crc_disable; 318 u8 rxdid; 319 u8 pad1[2]; 320 u64 dma_ring_addr; 321 322 /* see enum virtchnl_rx_hsplit; deprecated with AVF 1.0 */ 323 s32 rx_split_pos; 324 u32 pad2; 325 }; 326 327 VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_rxq_info); 328 329 /* VIRTCHNL_OP_CONFIG_VSI_QUEUES 330 * VF sends this message to set parameters for all active TX and RX queues 331 * associated with the specified VSI. 332 * PF configures queues and returns status. 333 * If the number of queues specified is greater than the number of queues 334 * associated with the VSI, an error is returned and no queues are configured. 335 * NOTE: The VF is not required to configure all queues in a single request. 336 * It may send multiple messages. PF drivers must correctly handle all VF 337 * requests. 338 */ 339 struct virtchnl_queue_pair_info { 340 /* NOTE: vsi_id and queue_id should be identical for both queues. */ 341 struct virtchnl_txq_info txq; 342 struct virtchnl_rxq_info rxq; 343 }; 344 345 VIRTCHNL_CHECK_STRUCT_LEN(64, virtchnl_queue_pair_info); 346 347 struct virtchnl_vsi_queue_config_info { 348 u16 vsi_id; 349 u16 num_queue_pairs; 350 u32 pad; 351 struct virtchnl_queue_pair_info qpair[]; 352 }; 353 354 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_vsi_queue_config_info); 355 #define virtchnl_vsi_queue_config_info_LEGACY_SIZEOF 72 356 357 /* VIRTCHNL_OP_REQUEST_QUEUES 358 * VF sends this message to request the PF to allocate additional queues to 359 * this VF. Each VF gets a guaranteed number of queues on init but asking for 360 * additional queues must be negotiated. This is a best effort request as it 361 * is possible the PF does not have enough queues left to support the request. 362 * If the PF cannot support the number requested it will respond with the 363 * maximum number it is able to support. If the request is successful, PF will 364 * then reset the VF to institute required changes. 365 */ 366 367 /* VF resource request */ 368 struct virtchnl_vf_res_request { 369 u16 num_queue_pairs; 370 }; 371 372 /* VIRTCHNL_OP_CONFIG_IRQ_MAP 373 * VF uses this message to map vectors to queues. 374 * The rxq_map and txq_map fields are bitmaps used to indicate which queues 375 * are to be associated with the specified vector. 376 * The "other" causes are always mapped to vector 0. The VF may not request 377 * that vector 0 be used for traffic. 378 * PF configures interrupt mapping and returns status. 379 * NOTE: due to hardware requirements, all active queues (both TX and RX) 380 * should be mapped to interrupts, even if the driver intends to operate 381 * only in polling mode. In this case the interrupt may be disabled, but 382 * the ITR timer will still run to trigger writebacks. 383 */ 384 struct virtchnl_vector_map { 385 u16 vsi_id; 386 u16 vector_id; 387 u16 rxq_map; 388 u16 txq_map; 389 u16 rxitr_idx; 390 u16 txitr_idx; 391 }; 392 393 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_vector_map); 394 395 struct virtchnl_irq_map_info { 396 u16 num_vectors; 397 struct virtchnl_vector_map vecmap[]; 398 }; 399 400 VIRTCHNL_CHECK_STRUCT_LEN(2, virtchnl_irq_map_info); 401 #define virtchnl_irq_map_info_LEGACY_SIZEOF 14 402 403 /* VIRTCHNL_OP_ENABLE_QUEUES 404 * VIRTCHNL_OP_DISABLE_QUEUES 405 * VF sends these message to enable or disable TX/RX queue pairs. 406 * The queues fields are bitmaps indicating which queues to act upon. 407 * (Currently, we only support 16 queues per VF, but we make the field 408 * u32 to allow for expansion.) 409 * PF performs requested action and returns status. 410 * NOTE: The VF is not required to enable/disable all queues in a single 411 * request. It may send multiple messages. 412 * PF drivers must correctly handle all VF requests. 413 */ 414 struct virtchnl_queue_select { 415 u16 vsi_id; 416 u16 pad; 417 u32 rx_queues; 418 u32 tx_queues; 419 }; 420 421 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_select); 422 423 /* VIRTCHNL_OP_ADD_ETH_ADDR 424 * VF sends this message in order to add one or more unicast or multicast 425 * address filters for the specified VSI. 426 * PF adds the filters and returns status. 427 */ 428 429 /* VIRTCHNL_OP_DEL_ETH_ADDR 430 * VF sends this message in order to remove one or more unicast or multicast 431 * filters for the specified VSI. 432 * PF removes the filters and returns status. 433 */ 434 435 /* VIRTCHNL_ETHER_ADDR_LEGACY 436 * Prior to adding the @type member to virtchnl_ether_addr, there were 2 pad 437 * bytes. Moving forward all VF drivers should not set type to 438 * VIRTCHNL_ETHER_ADDR_LEGACY. This is only here to not break previous/legacy 439 * behavior. The control plane function (i.e. PF) can use a best effort method 440 * of tracking the primary/device unicast in this case, but there is no 441 * guarantee and functionality depends on the implementation of the PF. 442 */ 443 444 /* VIRTCHNL_ETHER_ADDR_PRIMARY 445 * All VF drivers should set @type to VIRTCHNL_ETHER_ADDR_PRIMARY for the 446 * primary/device unicast MAC address filter for VIRTCHNL_OP_ADD_ETH_ADDR and 447 * VIRTCHNL_OP_DEL_ETH_ADDR. This allows for the underlying control plane 448 * function (i.e. PF) to accurately track and use this MAC address for 449 * displaying on the host and for VM/function reset. 450 */ 451 452 /* VIRTCHNL_ETHER_ADDR_EXTRA 453 * All VF drivers should set @type to VIRTCHNL_ETHER_ADDR_EXTRA for any extra 454 * unicast and/or multicast filters that are being added/deleted via 455 * VIRTCHNL_OP_DEL_ETH_ADDR/VIRTCHNL_OP_ADD_ETH_ADDR respectively. 456 */ 457 struct virtchnl_ether_addr { 458 u8 addr[ETH_ALEN]; 459 u8 type; 460 #define VIRTCHNL_ETHER_ADDR_LEGACY 0 461 #define VIRTCHNL_ETHER_ADDR_PRIMARY 1 462 #define VIRTCHNL_ETHER_ADDR_EXTRA 2 463 #define VIRTCHNL_ETHER_ADDR_TYPE_MASK 3 /* first two bits of type are valid */ 464 u8 pad; 465 }; 466 467 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_ether_addr); 468 469 struct virtchnl_ether_addr_list { 470 u16 vsi_id; 471 u16 num_elements; 472 struct virtchnl_ether_addr list[]; 473 }; 474 475 VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_ether_addr_list); 476 #define virtchnl_ether_addr_list_LEGACY_SIZEOF 12 477 478 /* VIRTCHNL_OP_ADD_VLAN 479 * VF sends this message to add one or more VLAN tag filters for receives. 480 * PF adds the filters and returns status. 481 * If a port VLAN is configured by the PF, this operation will return an 482 * error to the VF. 483 */ 484 485 /* VIRTCHNL_OP_DEL_VLAN 486 * VF sends this message to remove one or more VLAN tag filters for receives. 487 * PF removes the filters and returns status. 488 * If a port VLAN is configured by the PF, this operation will return an 489 * error to the VF. 490 */ 491 492 struct virtchnl_vlan_filter_list { 493 u16 vsi_id; 494 u16 num_elements; 495 u16 vlan_id[]; 496 }; 497 498 VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_vlan_filter_list); 499 #define virtchnl_vlan_filter_list_LEGACY_SIZEOF 6 500 501 /* This enum is used for all of the VIRTCHNL_VF_OFFLOAD_VLAN_V2_CAPS related 502 * structures and opcodes. 503 * 504 * VIRTCHNL_VLAN_UNSUPPORTED - This field is not supported and if a VF driver 505 * populates it the PF should return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED. 506 * 507 * VIRTCHNL_VLAN_ETHERTYPE_8100 - This field supports 0x8100 ethertype. 508 * VIRTCHNL_VLAN_ETHERTYPE_88A8 - This field supports 0x88A8 ethertype. 509 * VIRTCHNL_VLAN_ETHERTYPE_9100 - This field supports 0x9100 ethertype. 510 * 511 * VIRTCHNL_VLAN_ETHERTYPE_AND - Used when multiple ethertypes can be supported 512 * by the PF concurrently. For example, if the PF can support 513 * VIRTCHNL_VLAN_ETHERTYPE_8100 AND VIRTCHNL_VLAN_ETHERTYPE_88A8 filters it 514 * would OR the following bits: 515 * 516 * VIRTHCNL_VLAN_ETHERTYPE_8100 | 517 * VIRTCHNL_VLAN_ETHERTYPE_88A8 | 518 * VIRTCHNL_VLAN_ETHERTYPE_AND; 519 * 520 * The VF would interpret this as VLAN filtering can be supported on both 0x8100 521 * and 0x88A8 VLAN ethertypes. 522 * 523 * VIRTCHNL_ETHERTYPE_XOR - Used when only a single ethertype can be supported 524 * by the PF concurrently. For example if the PF can support 525 * VIRTCHNL_VLAN_ETHERTYPE_8100 XOR VIRTCHNL_VLAN_ETHERTYPE_88A8 stripping 526 * offload it would OR the following bits: 527 * 528 * VIRTCHNL_VLAN_ETHERTYPE_8100 | 529 * VIRTCHNL_VLAN_ETHERTYPE_88A8 | 530 * VIRTCHNL_VLAN_ETHERTYPE_XOR; 531 * 532 * The VF would interpret this as VLAN stripping can be supported on either 533 * 0x8100 or 0x88a8 VLAN ethertypes. So when requesting VLAN stripping via 534 * VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 the specified ethertype will override 535 * the previously set value. 536 * 537 * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1 - Used to tell the VF to insert and/or 538 * strip the VLAN tag using the L2TAG1 field of the Tx/Rx descriptors. 539 * 540 * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2 - Used to tell the VF to insert hardware 541 * offloaded VLAN tags using the L2TAG2 field of the Tx descriptor. 542 * 543 * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2 - Used to tell the VF to strip hardware 544 * offloaded VLAN tags using the L2TAG2_2 field of the Rx descriptor. 545 * 546 * VIRTCHNL_VLAN_PRIO - This field supports VLAN priority bits. This is used for 547 * VLAN filtering if the underlying PF supports it. 548 * 549 * VIRTCHNL_VLAN_TOGGLE_ALLOWED - This field is used to say whether a 550 * certain VLAN capability can be toggled. For example if the underlying PF/CP 551 * allows the VF to toggle VLAN filtering, stripping, and/or insertion it should 552 * set this bit along with the supported ethertypes. 553 */ 554 enum virtchnl_vlan_support { 555 VIRTCHNL_VLAN_UNSUPPORTED = 0, 556 VIRTCHNL_VLAN_ETHERTYPE_8100 = BIT(0), 557 VIRTCHNL_VLAN_ETHERTYPE_88A8 = BIT(1), 558 VIRTCHNL_VLAN_ETHERTYPE_9100 = BIT(2), 559 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1 = BIT(8), 560 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2 = BIT(9), 561 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2 = BIT(10), 562 VIRTCHNL_VLAN_PRIO = BIT(24), 563 VIRTCHNL_VLAN_FILTER_MASK = BIT(28), 564 VIRTCHNL_VLAN_ETHERTYPE_AND = BIT(29), 565 VIRTCHNL_VLAN_ETHERTYPE_XOR = BIT(30), 566 VIRTCHNL_VLAN_TOGGLE = BIT(31), 567 }; 568 569 /* This structure is used as part of the VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS 570 * for filtering, insertion, and stripping capabilities. 571 * 572 * If only outer capabilities are supported (for filtering, insertion, and/or 573 * stripping) then this refers to the outer most or single VLAN from the VF's 574 * perspective. 575 * 576 * If only inner capabilities are supported (for filtering, insertion, and/or 577 * stripping) then this refers to the outer most or single VLAN from the VF's 578 * perspective. Functionally this is the same as if only outer capabilities are 579 * supported. The VF driver is just forced to use the inner fields when 580 * adding/deleting filters and enabling/disabling offloads (if supported). 581 * 582 * If both outer and inner capabilities are supported (for filtering, insertion, 583 * and/or stripping) then outer refers to the outer most or single VLAN and 584 * inner refers to the second VLAN, if it exists, in the packet. 585 * 586 * There is no support for tunneled VLAN offloads, so outer or inner are never 587 * referring to a tunneled packet from the VF's perspective. 588 */ 589 struct virtchnl_vlan_supported_caps { 590 u32 outer; 591 u32 inner; 592 }; 593 594 /* The PF populates these fields based on the supported VLAN filtering. If a 595 * field is VIRTCHNL_VLAN_UNSUPPORTED then it's not supported and the PF will 596 * reject any VIRTCHNL_OP_ADD_VLAN_V2 or VIRTCHNL_OP_DEL_VLAN_V2 messages using 597 * the unsupported fields. 598 * 599 * Also, a VF is only allowed to toggle its VLAN filtering setting if the 600 * VIRTCHNL_VLAN_TOGGLE bit is set. 601 * 602 * The ethertype(s) specified in the ethertype_init field are the ethertypes 603 * enabled for VLAN filtering. VLAN filtering in this case refers to the outer 604 * most VLAN from the VF's perspective. If both inner and outer filtering are 605 * allowed then ethertype_init only refers to the outer most VLAN as only 606 * VLAN ethertype supported for inner VLAN filtering is 607 * VIRTCHNL_VLAN_ETHERTYPE_8100. By default, inner VLAN filtering is disabled 608 * when both inner and outer filtering are allowed. 609 * 610 * The max_filters field tells the VF how many VLAN filters it's allowed to have 611 * at any one time. If it exceeds this amount and tries to add another filter, 612 * then the request will be rejected by the PF. To prevent failures, the VF 613 * should keep track of how many VLAN filters it has added and not attempt to 614 * add more than max_filters. 615 */ 616 struct virtchnl_vlan_filtering_caps { 617 struct virtchnl_vlan_supported_caps filtering_support; 618 u32 ethertype_init; 619 u16 max_filters; 620 u8 pad[2]; 621 }; 622 623 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vlan_filtering_caps); 624 625 /* This enum is used for the virtchnl_vlan_offload_caps structure to specify 626 * if the PF supports a different ethertype for stripping and insertion. 627 * 628 * VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION - The ethertype(s) specified 629 * for stripping affect the ethertype(s) specified for insertion and visa versa 630 * as well. If the VF tries to configure VLAN stripping via 631 * VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 with VIRTCHNL_VLAN_ETHERTYPE_8100 then 632 * that will be the ethertype for both stripping and insertion. 633 * 634 * VIRTCHNL_ETHERTYPE_MATCH_NOT_REQUIRED - The ethertype(s) specified for 635 * stripping do not affect the ethertype(s) specified for insertion and visa 636 * versa. 637 */ 638 enum virtchnl_vlan_ethertype_match { 639 VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION = 0, 640 VIRTCHNL_ETHERTYPE_MATCH_NOT_REQUIRED = 1, 641 }; 642 643 /* The PF populates these fields based on the supported VLAN offloads. If a 644 * field is VIRTCHNL_VLAN_UNSUPPORTED then it's not supported and the PF will 645 * reject any VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 or 646 * VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2 messages using the unsupported fields. 647 * 648 * Also, a VF is only allowed to toggle its VLAN offload setting if the 649 * VIRTCHNL_VLAN_TOGGLE_ALLOWED bit is set. 650 * 651 * The VF driver needs to be aware of how the tags are stripped by hardware and 652 * inserted by the VF driver based on the level of offload support. The PF will 653 * populate these fields based on where the VLAN tags are expected to be 654 * offloaded via the VIRTHCNL_VLAN_TAG_LOCATION_* bits. The VF will need to 655 * interpret these fields. See the definition of the 656 * VIRTCHNL_VLAN_TAG_LOCATION_* bits above the virtchnl_vlan_support 657 * enumeration. 658 */ 659 struct virtchnl_vlan_offload_caps { 660 struct virtchnl_vlan_supported_caps stripping_support; 661 struct virtchnl_vlan_supported_caps insertion_support; 662 u32 ethertype_init; 663 u8 ethertype_match; 664 u8 pad[3]; 665 }; 666 667 VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_vlan_offload_caps); 668 669 /* VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS 670 * VF sends this message to determine its VLAN capabilities. 671 * 672 * PF will mark which capabilities it supports based on hardware support and 673 * current configuration. For example, if a port VLAN is configured the PF will 674 * not allow outer VLAN filtering, stripping, or insertion to be configured so 675 * it will block these features from the VF. 676 * 677 * The VF will need to cross reference its capabilities with the PFs 678 * capabilities in the response message from the PF to determine the VLAN 679 * support. 680 */ 681 struct virtchnl_vlan_caps { 682 struct virtchnl_vlan_filtering_caps filtering; 683 struct virtchnl_vlan_offload_caps offloads; 684 }; 685 686 VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_vlan_caps); 687 688 struct virtchnl_vlan { 689 u16 tci; /* tci[15:13] = PCP and tci[11:0] = VID */ 690 u16 tci_mask; /* only valid if VIRTCHNL_VLAN_FILTER_MASK set in 691 * filtering caps 692 */ 693 u16 tpid; /* 0x8100, 0x88a8, etc. and only type(s) set in 694 * filtering caps. Note that tpid here does not refer to 695 * VIRTCHNL_VLAN_ETHERTYPE_*, but it refers to the 696 * actual 2-byte VLAN TPID 697 */ 698 u8 pad[2]; 699 }; 700 701 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_vlan); 702 703 struct virtchnl_vlan_filter { 704 struct virtchnl_vlan inner; 705 struct virtchnl_vlan outer; 706 u8 pad[16]; 707 }; 708 709 VIRTCHNL_CHECK_STRUCT_LEN(32, virtchnl_vlan_filter); 710 711 /* VIRTCHNL_OP_ADD_VLAN_V2 712 * VIRTCHNL_OP_DEL_VLAN_V2 713 * 714 * VF sends these messages to add/del one or more VLAN tag filters for Rx 715 * traffic. 716 * 717 * The PF attempts to add the filters and returns status. 718 * 719 * The VF should only ever attempt to add/del virtchnl_vlan_filter(s) using the 720 * supported fields negotiated via VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS. 721 */ 722 struct virtchnl_vlan_filter_list_v2 { 723 u16 vport_id; 724 u16 num_elements; 725 u8 pad[4]; 726 struct virtchnl_vlan_filter filters[]; 727 }; 728 729 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_vlan_filter_list_v2); 730 #define virtchnl_vlan_filter_list_v2_LEGACY_SIZEOF 40 731 732 /* VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 733 * VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2 734 * VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2 735 * VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2 736 * 737 * VF sends this message to enable or disable VLAN stripping or insertion. It 738 * also needs to specify an ethertype. The VF knows which VLAN ethertypes are 739 * allowed and whether or not it's allowed to enable/disable the specific 740 * offload via the VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS message. The VF needs to 741 * parse the virtchnl_vlan_caps.offloads fields to determine which offload 742 * messages are allowed. 743 * 744 * For example, if the PF populates the virtchnl_vlan_caps.offloads in the 745 * following manner the VF will be allowed to enable and/or disable 0x8100 inner 746 * VLAN insertion and/or stripping via the opcodes listed above. Inner in this 747 * case means the outer most or single VLAN from the VF's perspective. This is 748 * because no outer offloads are supported. See the comments above the 749 * virtchnl_vlan_supported_caps structure for more details. 750 * 751 * virtchnl_vlan_caps.offloads.stripping_support.inner = 752 * VIRTCHNL_VLAN_TOGGLE | 753 * VIRTCHNL_VLAN_ETHERTYPE_8100; 754 * 755 * virtchnl_vlan_caps.offloads.insertion_support.inner = 756 * VIRTCHNL_VLAN_TOGGLE | 757 * VIRTCHNL_VLAN_ETHERTYPE_8100; 758 * 759 * In order to enable inner (again note that in this case inner is the outer 760 * most or single VLAN from the VF's perspective) VLAN stripping for 0x8100 761 * VLANs, the VF would populate the virtchnl_vlan_setting structure in the 762 * following manner and send the VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 message. 763 * 764 * virtchnl_vlan_setting.inner_ethertype_setting = 765 * VIRTCHNL_VLAN_ETHERTYPE_8100; 766 * 767 * virtchnl_vlan_setting.vport_id = vport_id or vsi_id assigned to the VF on 768 * initialization. 769 * 770 * The reason that VLAN TPID(s) are not being used for the 771 * outer_ethertype_setting and inner_ethertype_setting fields is because it's 772 * possible a device could support VLAN insertion and/or stripping offload on 773 * multiple ethertypes concurrently, so this method allows a VF to request 774 * multiple ethertypes in one message using the virtchnl_vlan_support 775 * enumeration. 776 * 777 * For example, if the PF populates the virtchnl_vlan_caps.offloads in the 778 * following manner the VF will be allowed to enable 0x8100 and 0x88a8 outer 779 * VLAN insertion and stripping simultaneously. The 780 * virtchnl_vlan_caps.offloads.ethertype_match field will also have to be 781 * populated based on what the PF can support. 782 * 783 * virtchnl_vlan_caps.offloads.stripping_support.outer = 784 * VIRTCHNL_VLAN_TOGGLE | 785 * VIRTCHNL_VLAN_ETHERTYPE_8100 | 786 * VIRTCHNL_VLAN_ETHERTYPE_88A8 | 787 * VIRTCHNL_VLAN_ETHERTYPE_AND; 788 * 789 * virtchnl_vlan_caps.offloads.insertion_support.outer = 790 * VIRTCHNL_VLAN_TOGGLE | 791 * VIRTCHNL_VLAN_ETHERTYPE_8100 | 792 * VIRTCHNL_VLAN_ETHERTYPE_88A8 | 793 * VIRTCHNL_VLAN_ETHERTYPE_AND; 794 * 795 * In order to enable outer VLAN stripping for 0x8100 and 0x88a8 VLANs, the VF 796 * would populate the virthcnl_vlan_offload_structure in the following manner 797 * and send the VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 message. 798 * 799 * virtchnl_vlan_setting.outer_ethertype_setting = 800 * VIRTHCNL_VLAN_ETHERTYPE_8100 | 801 * VIRTHCNL_VLAN_ETHERTYPE_88A8; 802 * 803 * virtchnl_vlan_setting.vport_id = vport_id or vsi_id assigned to the VF on 804 * initialization. 805 * 806 * There is also the case where a PF and the underlying hardware can support 807 * VLAN offloads on multiple ethertypes, but not concurrently. For example, if 808 * the PF populates the virtchnl_vlan_caps.offloads in the following manner the 809 * VF will be allowed to enable and/or disable 0x8100 XOR 0x88a8 outer VLAN 810 * offloads. The ethertypes must match for stripping and insertion. 811 * 812 * virtchnl_vlan_caps.offloads.stripping_support.outer = 813 * VIRTCHNL_VLAN_TOGGLE | 814 * VIRTCHNL_VLAN_ETHERTYPE_8100 | 815 * VIRTCHNL_VLAN_ETHERTYPE_88A8 | 816 * VIRTCHNL_VLAN_ETHERTYPE_XOR; 817 * 818 * virtchnl_vlan_caps.offloads.insertion_support.outer = 819 * VIRTCHNL_VLAN_TOGGLE | 820 * VIRTCHNL_VLAN_ETHERTYPE_8100 | 821 * VIRTCHNL_VLAN_ETHERTYPE_88A8 | 822 * VIRTCHNL_VLAN_ETHERTYPE_XOR; 823 * 824 * virtchnl_vlan_caps.offloads.ethertype_match = 825 * VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION; 826 * 827 * In order to enable outer VLAN stripping for 0x88a8 VLANs, the VF would 828 * populate the virtchnl_vlan_setting structure in the following manner and send 829 * the VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2. Also, this will change the 830 * ethertype for VLAN insertion if it's enabled. So, for completeness, a 831 * VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2 with the same ethertype should be sent. 832 * 833 * virtchnl_vlan_setting.outer_ethertype_setting = VIRTHCNL_VLAN_ETHERTYPE_88A8; 834 * 835 * virtchnl_vlan_setting.vport_id = vport_id or vsi_id assigned to the VF on 836 * initialization. 837 */ 838 struct virtchnl_vlan_setting { 839 u32 outer_ethertype_setting; 840 u32 inner_ethertype_setting; 841 u16 vport_id; 842 u8 pad[6]; 843 }; 844 845 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vlan_setting); 846 847 /* VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE 848 * VF sends VSI id and flags. 849 * PF returns status code in retval. 850 * Note: we assume that broadcast accept mode is always enabled. 851 */ 852 struct virtchnl_promisc_info { 853 u16 vsi_id; 854 u16 flags; 855 }; 856 857 VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_promisc_info); 858 859 #define FLAG_VF_UNICAST_PROMISC 0x00000001 860 #define FLAG_VF_MULTICAST_PROMISC 0x00000002 861 862 /* VIRTCHNL_OP_GET_STATS 863 * VF sends this message to request stats for the selected VSI. VF uses 864 * the virtchnl_queue_select struct to specify the VSI. The queue_id 865 * field is ignored by the PF. 866 * 867 * PF replies with struct eth_stats in an external buffer. 868 */ 869 870 /* VIRTCHNL_OP_CONFIG_RSS_KEY 871 * VIRTCHNL_OP_CONFIG_RSS_LUT 872 * VF sends these messages to configure RSS. Only supported if both PF 873 * and VF drivers set the VIRTCHNL_VF_OFFLOAD_RSS_PF bit during 874 * configuration negotiation. If this is the case, then the RSS fields in 875 * the VF resource struct are valid. 876 * Both the key and LUT are initialized to 0 by the PF, meaning that 877 * RSS is effectively disabled until set up by the VF. 878 */ 879 struct virtchnl_rss_key { 880 u16 vsi_id; 881 u16 key_len; 882 u8 key[]; /* RSS hash key, packed bytes */ 883 }; 884 885 VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_rss_key); 886 #define virtchnl_rss_key_LEGACY_SIZEOF 6 887 888 struct virtchnl_rss_lut { 889 u16 vsi_id; 890 u16 lut_entries; 891 u8 lut[]; /* RSS lookup table */ 892 }; 893 894 VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_rss_lut); 895 #define virtchnl_rss_lut_LEGACY_SIZEOF 6 896 897 /* VIRTCHNL_OP_GET_RSS_HENA_CAPS 898 * VIRTCHNL_OP_SET_RSS_HENA 899 * VF sends these messages to get and set the hash filter enable bits for RSS. 900 * By default, the PF sets these to all possible traffic types that the 901 * hardware supports. The VF can query this value if it wants to change the 902 * traffic types that are hashed by the hardware. 903 */ 904 struct virtchnl_rss_hena { 905 u64 hena; 906 }; 907 908 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hena); 909 910 /* VIRTCHNL_OP_ENABLE_CHANNELS 911 * VIRTCHNL_OP_DISABLE_CHANNELS 912 * VF sends these messages to enable or disable channels based on 913 * the user specified queue count and queue offset for each traffic class. 914 * This struct encompasses all the information that the PF needs from 915 * VF to create a channel. 916 */ 917 struct virtchnl_channel_info { 918 u16 count; /* number of queues in a channel */ 919 u16 offset; /* queues in a channel start from 'offset' */ 920 u32 pad; 921 u64 max_tx_rate; 922 }; 923 924 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_channel_info); 925 926 struct virtchnl_tc_info { 927 u32 num_tc; 928 u32 pad; 929 struct virtchnl_channel_info list[]; 930 }; 931 932 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_tc_info); 933 #define virtchnl_tc_info_LEGACY_SIZEOF 24 934 935 /* VIRTCHNL_ADD_CLOUD_FILTER 936 * VIRTCHNL_DEL_CLOUD_FILTER 937 * VF sends these messages to add or delete a cloud filter based on the 938 * user specified match and action filters. These structures encompass 939 * all the information that the PF needs from the VF to add/delete a 940 * cloud filter. 941 */ 942 943 struct virtchnl_l4_spec { 944 u8 src_mac[ETH_ALEN]; 945 u8 dst_mac[ETH_ALEN]; 946 __be16 vlan_id; 947 __be16 pad; /* reserved for future use */ 948 __be32 src_ip[4]; 949 __be32 dst_ip[4]; 950 __be16 src_port; 951 __be16 dst_port; 952 }; 953 954 VIRTCHNL_CHECK_STRUCT_LEN(52, virtchnl_l4_spec); 955 956 union virtchnl_flow_spec { 957 struct virtchnl_l4_spec tcp_spec; 958 u8 buffer[128]; /* reserved for future use */ 959 }; 960 961 VIRTCHNL_CHECK_UNION_LEN(128, virtchnl_flow_spec); 962 963 enum virtchnl_action { 964 /* action types */ 965 VIRTCHNL_ACTION_DROP = 0, 966 VIRTCHNL_ACTION_TC_REDIRECT, 967 VIRTCHNL_ACTION_PASSTHRU, 968 VIRTCHNL_ACTION_QUEUE, 969 VIRTCHNL_ACTION_Q_REGION, 970 VIRTCHNL_ACTION_MARK, 971 VIRTCHNL_ACTION_COUNT, 972 }; 973 974 enum virtchnl_flow_type { 975 /* flow types */ 976 VIRTCHNL_TCP_V4_FLOW = 0, 977 VIRTCHNL_TCP_V6_FLOW, 978 }; 979 980 struct virtchnl_filter { 981 union virtchnl_flow_spec data; 982 union virtchnl_flow_spec mask; 983 984 /* see enum virtchnl_flow_type */ 985 s32 flow_type; 986 987 /* see enum virtchnl_action */ 988 s32 action; 989 u32 action_meta; 990 u8 field_flags; 991 u8 pad[3]; 992 }; 993 994 VIRTCHNL_CHECK_STRUCT_LEN(272, virtchnl_filter); 995 996 struct virtchnl_supported_rxdids { 997 u64 supported_rxdids; 998 }; 999 1000 /* VIRTCHNL_OP_EVENT 1001 * PF sends this message to inform the VF driver of events that may affect it. 1002 * No direct response is expected from the VF, though it may generate other 1003 * messages in response to this one. 1004 */ 1005 enum virtchnl_event_codes { 1006 VIRTCHNL_EVENT_UNKNOWN = 0, 1007 VIRTCHNL_EVENT_LINK_CHANGE, 1008 VIRTCHNL_EVENT_RESET_IMPENDING, 1009 VIRTCHNL_EVENT_PF_DRIVER_CLOSE, 1010 }; 1011 1012 #define PF_EVENT_SEVERITY_INFO 0 1013 #define PF_EVENT_SEVERITY_CERTAIN_DOOM 255 1014 1015 struct virtchnl_pf_event { 1016 /* see enum virtchnl_event_codes */ 1017 s32 event; 1018 union { 1019 /* If the PF driver does not support the new speed reporting 1020 * capabilities then use link_event else use link_event_adv to 1021 * get the speed and link information. The ability to understand 1022 * new speeds is indicated by setting the capability flag 1023 * VIRTCHNL_VF_CAP_ADV_LINK_SPEED in vf_cap_flags parameter 1024 * in virtchnl_vf_resource struct and can be used to determine 1025 * which link event struct to use below. 1026 */ 1027 struct { 1028 enum virtchnl_link_speed link_speed; 1029 bool link_status; 1030 u8 pad[3]; 1031 } link_event; 1032 struct { 1033 /* link_speed provided in Mbps */ 1034 u32 link_speed; 1035 u8 link_status; 1036 u8 pad[3]; 1037 } link_event_adv; 1038 } event_data; 1039 1040 s32 severity; 1041 }; 1042 1043 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_pf_event); 1044 1045 /* used to specify if a ceq_idx or aeq_idx is invalid */ 1046 #define VIRTCHNL_RDMA_INVALID_QUEUE_IDX 0xFFFF 1047 /* VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP 1048 * VF uses this message to request PF to map RDMA vectors to RDMA queues. 1049 * The request for this originates from the VF RDMA driver through 1050 * a client interface between VF LAN and VF RDMA driver. 1051 * A vector could have an AEQ and CEQ attached to it although 1052 * there is a single AEQ per VF RDMA instance in which case 1053 * most vectors will have an VIRTCHNL_RDMA_INVALID_QUEUE_IDX for aeq and valid 1054 * idx for ceqs There will never be a case where there will be multiple CEQs 1055 * attached to a single vector. 1056 * PF configures interrupt mapping and returns status. 1057 */ 1058 1059 struct virtchnl_rdma_qv_info { 1060 u32 v_idx; /* msix_vector */ 1061 u16 ceq_idx; /* set to VIRTCHNL_RDMA_INVALID_QUEUE_IDX if invalid */ 1062 u16 aeq_idx; /* set to VIRTCHNL_RDMA_INVALID_QUEUE_IDX if invalid */ 1063 u8 itr_idx; 1064 u8 pad[3]; 1065 }; 1066 1067 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_rdma_qv_info); 1068 1069 struct virtchnl_rdma_qvlist_info { 1070 u32 num_vectors; 1071 struct virtchnl_rdma_qv_info qv_info[]; 1072 }; 1073 1074 VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_rdma_qvlist_info); 1075 #define virtchnl_rdma_qvlist_info_LEGACY_SIZEOF 16 1076 1077 /* VF reset states - these are written into the RSTAT register: 1078 * VFGEN_RSTAT on the VF 1079 * When the PF initiates a reset, it writes 0 1080 * When the reset is complete, it writes 1 1081 * When the PF detects that the VF has recovered, it writes 2 1082 * VF checks this register periodically to determine if a reset has occurred, 1083 * then polls it to know when the reset is complete. 1084 * If either the PF or VF reads the register while the hardware 1085 * is in a reset state, it will return DEADBEEF, which, when masked 1086 * will result in 3. 1087 */ 1088 enum virtchnl_vfr_states { 1089 VIRTCHNL_VFR_INPROGRESS = 0, 1090 VIRTCHNL_VFR_COMPLETED, 1091 VIRTCHNL_VFR_VFACTIVE, 1092 }; 1093 1094 /* Type of RSS algorithm */ 1095 enum virtchnl_rss_algorithm { 1096 VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC = 0, 1097 VIRTCHNL_RSS_ALG_R_ASYMMETRIC = 1, 1098 VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC = 2, 1099 VIRTCHNL_RSS_ALG_XOR_SYMMETRIC = 3, 1100 }; 1101 1102 #define VIRTCHNL_MAX_NUM_PROTO_HDRS 32 1103 #define PROTO_HDR_SHIFT 5 1104 #define PROTO_HDR_FIELD_START(proto_hdr_type) ((proto_hdr_type) << PROTO_HDR_SHIFT) 1105 #define PROTO_HDR_FIELD_MASK ((1UL << PROTO_HDR_SHIFT) - 1) 1106 1107 /* VF use these macros to configure each protocol header. 1108 * Specify which protocol headers and protocol header fields base on 1109 * virtchnl_proto_hdr_type and virtchnl_proto_hdr_field. 1110 * @param hdr: a struct of virtchnl_proto_hdr 1111 * @param hdr_type: ETH/IPV4/TCP, etc 1112 * @param field: SRC/DST/TEID/SPI, etc 1113 */ 1114 #define VIRTCHNL_ADD_PROTO_HDR_FIELD(hdr, field) \ 1115 ((hdr)->field_selector |= BIT((field) & PROTO_HDR_FIELD_MASK)) 1116 #define VIRTCHNL_DEL_PROTO_HDR_FIELD(hdr, field) \ 1117 ((hdr)->field_selector &= ~BIT((field) & PROTO_HDR_FIELD_MASK)) 1118 #define VIRTCHNL_TEST_PROTO_HDR_FIELD(hdr, val) \ 1119 ((hdr)->field_selector & BIT((val) & PROTO_HDR_FIELD_MASK)) 1120 #define VIRTCHNL_GET_PROTO_HDR_FIELD(hdr) ((hdr)->field_selector) 1121 1122 #define VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, hdr_type, field) \ 1123 (VIRTCHNL_ADD_PROTO_HDR_FIELD(hdr, \ 1124 VIRTCHNL_PROTO_HDR_ ## hdr_type ## _ ## field)) 1125 #define VIRTCHNL_DEL_PROTO_HDR_FIELD_BIT(hdr, hdr_type, field) \ 1126 (VIRTCHNL_DEL_PROTO_HDR_FIELD(hdr, \ 1127 VIRTCHNL_PROTO_HDR_ ## hdr_type ## _ ## field)) 1128 1129 #define VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, hdr_type) \ 1130 ((hdr)->type = VIRTCHNL_PROTO_HDR_ ## hdr_type) 1131 #define VIRTCHNL_GET_PROTO_HDR_TYPE(hdr) \ 1132 (((hdr)->type) >> PROTO_HDR_SHIFT) 1133 #define VIRTCHNL_TEST_PROTO_HDR_TYPE(hdr, val) \ 1134 ((hdr)->type == ((s32)((val) >> PROTO_HDR_SHIFT))) 1135 #define VIRTCHNL_TEST_PROTO_HDR(hdr, val) \ 1136 (VIRTCHNL_TEST_PROTO_HDR_TYPE((hdr), (val)) && \ 1137 VIRTCHNL_TEST_PROTO_HDR_FIELD((hdr), (val))) 1138 1139 /* Protocol header type within a packet segment. A segment consists of one or 1140 * more protocol headers that make up a logical group of protocol headers. Each 1141 * logical group of protocol headers encapsulates or is encapsulated using/by 1142 * tunneling or encapsulation protocols for network virtualization. 1143 */ 1144 enum virtchnl_proto_hdr_type { 1145 VIRTCHNL_PROTO_HDR_NONE, 1146 VIRTCHNL_PROTO_HDR_ETH, 1147 VIRTCHNL_PROTO_HDR_S_VLAN, 1148 VIRTCHNL_PROTO_HDR_C_VLAN, 1149 VIRTCHNL_PROTO_HDR_IPV4, 1150 VIRTCHNL_PROTO_HDR_IPV6, 1151 VIRTCHNL_PROTO_HDR_TCP, 1152 VIRTCHNL_PROTO_HDR_UDP, 1153 VIRTCHNL_PROTO_HDR_SCTP, 1154 VIRTCHNL_PROTO_HDR_GTPU_IP, 1155 VIRTCHNL_PROTO_HDR_GTPU_EH, 1156 VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN, 1157 VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP, 1158 VIRTCHNL_PROTO_HDR_PPPOE, 1159 VIRTCHNL_PROTO_HDR_L2TPV3, 1160 VIRTCHNL_PROTO_HDR_ESP, 1161 VIRTCHNL_PROTO_HDR_AH, 1162 VIRTCHNL_PROTO_HDR_PFCP, 1163 }; 1164 1165 /* Protocol header field within a protocol header. */ 1166 enum virtchnl_proto_hdr_field { 1167 /* ETHER */ 1168 VIRTCHNL_PROTO_HDR_ETH_SRC = 1169 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_ETH), 1170 VIRTCHNL_PROTO_HDR_ETH_DST, 1171 VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE, 1172 /* S-VLAN */ 1173 VIRTCHNL_PROTO_HDR_S_VLAN_ID = 1174 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_S_VLAN), 1175 /* C-VLAN */ 1176 VIRTCHNL_PROTO_HDR_C_VLAN_ID = 1177 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_C_VLAN), 1178 /* IPV4 */ 1179 VIRTCHNL_PROTO_HDR_IPV4_SRC = 1180 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV4), 1181 VIRTCHNL_PROTO_HDR_IPV4_DST, 1182 VIRTCHNL_PROTO_HDR_IPV4_DSCP, 1183 VIRTCHNL_PROTO_HDR_IPV4_TTL, 1184 VIRTCHNL_PROTO_HDR_IPV4_PROT, 1185 /* IPV6 */ 1186 VIRTCHNL_PROTO_HDR_IPV6_SRC = 1187 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV6), 1188 VIRTCHNL_PROTO_HDR_IPV6_DST, 1189 VIRTCHNL_PROTO_HDR_IPV6_TC, 1190 VIRTCHNL_PROTO_HDR_IPV6_HOP_LIMIT, 1191 VIRTCHNL_PROTO_HDR_IPV6_PROT, 1192 /* TCP */ 1193 VIRTCHNL_PROTO_HDR_TCP_SRC_PORT = 1194 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_TCP), 1195 VIRTCHNL_PROTO_HDR_TCP_DST_PORT, 1196 /* UDP */ 1197 VIRTCHNL_PROTO_HDR_UDP_SRC_PORT = 1198 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_UDP), 1199 VIRTCHNL_PROTO_HDR_UDP_DST_PORT, 1200 /* SCTP */ 1201 VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT = 1202 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_SCTP), 1203 VIRTCHNL_PROTO_HDR_SCTP_DST_PORT, 1204 /* GTPU_IP */ 1205 VIRTCHNL_PROTO_HDR_GTPU_IP_TEID = 1206 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_IP), 1207 /* GTPU_EH */ 1208 VIRTCHNL_PROTO_HDR_GTPU_EH_PDU = 1209 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_EH), 1210 VIRTCHNL_PROTO_HDR_GTPU_EH_QFI, 1211 /* PPPOE */ 1212 VIRTCHNL_PROTO_HDR_PPPOE_SESS_ID = 1213 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_PPPOE), 1214 /* L2TPV3 */ 1215 VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID = 1216 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_L2TPV3), 1217 /* ESP */ 1218 VIRTCHNL_PROTO_HDR_ESP_SPI = 1219 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_ESP), 1220 /* AH */ 1221 VIRTCHNL_PROTO_HDR_AH_SPI = 1222 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_AH), 1223 /* PFCP */ 1224 VIRTCHNL_PROTO_HDR_PFCP_S_FIELD = 1225 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_PFCP), 1226 VIRTCHNL_PROTO_HDR_PFCP_SEID, 1227 }; 1228 1229 struct virtchnl_proto_hdr { 1230 /* see enum virtchnl_proto_hdr_type */ 1231 s32 type; 1232 u32 field_selector; /* a bit mask to select field for header type */ 1233 u8 buffer[64]; 1234 /** 1235 * binary buffer in network order for specific header type. 1236 * For example, if type = VIRTCHNL_PROTO_HDR_IPV4, a IPv4 1237 * header is expected to be copied into the buffer. 1238 */ 1239 }; 1240 1241 VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_proto_hdr); 1242 1243 struct virtchnl_proto_hdrs { 1244 u8 tunnel_level; 1245 u8 pad[3]; 1246 /** 1247 * specify where protocol header start from. 1248 * 0 - from the outer layer 1249 * 1 - from the first inner layer 1250 * 2 - from the second inner layer 1251 * .... 1252 **/ 1253 int count; /* the proto layers must < VIRTCHNL_MAX_NUM_PROTO_HDRS */ 1254 struct virtchnl_proto_hdr proto_hdr[VIRTCHNL_MAX_NUM_PROTO_HDRS]; 1255 }; 1256 1257 VIRTCHNL_CHECK_STRUCT_LEN(2312, virtchnl_proto_hdrs); 1258 1259 struct virtchnl_rss_cfg { 1260 struct virtchnl_proto_hdrs proto_hdrs; /* protocol headers */ 1261 1262 /* see enum virtchnl_rss_algorithm; rss algorithm type */ 1263 s32 rss_algorithm; 1264 u8 reserved[128]; /* reserve for future */ 1265 }; 1266 1267 VIRTCHNL_CHECK_STRUCT_LEN(2444, virtchnl_rss_cfg); 1268 1269 /* action configuration for FDIR */ 1270 struct virtchnl_filter_action { 1271 /* see enum virtchnl_action type */ 1272 s32 type; 1273 union { 1274 /* used for queue and qgroup action */ 1275 struct { 1276 u16 index; 1277 u8 region; 1278 } queue; 1279 /* used for count action */ 1280 struct { 1281 /* share counter ID with other flow rules */ 1282 u8 shared; 1283 u32 id; /* counter ID */ 1284 } count; 1285 /* used for mark action */ 1286 u32 mark_id; 1287 u8 reserve[32]; 1288 } act_conf; 1289 }; 1290 1291 VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_filter_action); 1292 1293 #define VIRTCHNL_MAX_NUM_ACTIONS 8 1294 1295 struct virtchnl_filter_action_set { 1296 /* action number must be less then VIRTCHNL_MAX_NUM_ACTIONS */ 1297 int count; 1298 struct virtchnl_filter_action actions[VIRTCHNL_MAX_NUM_ACTIONS]; 1299 }; 1300 1301 VIRTCHNL_CHECK_STRUCT_LEN(292, virtchnl_filter_action_set); 1302 1303 /* pattern and action for FDIR rule */ 1304 struct virtchnl_fdir_rule { 1305 struct virtchnl_proto_hdrs proto_hdrs; 1306 struct virtchnl_filter_action_set action_set; 1307 }; 1308 1309 VIRTCHNL_CHECK_STRUCT_LEN(2604, virtchnl_fdir_rule); 1310 1311 /* Status returned to VF after VF requests FDIR commands 1312 * VIRTCHNL_FDIR_SUCCESS 1313 * VF FDIR related request is successfully done by PF 1314 * The request can be OP_ADD/DEL/QUERY_FDIR_FILTER. 1315 * 1316 * VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE 1317 * OP_ADD_FDIR_FILTER request is failed due to no Hardware resource. 1318 * 1319 * VIRTCHNL_FDIR_FAILURE_RULE_EXIST 1320 * OP_ADD_FDIR_FILTER request is failed due to the rule is already existed. 1321 * 1322 * VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT 1323 * OP_ADD_FDIR_FILTER request is failed due to conflict with existing rule. 1324 * 1325 * VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST 1326 * OP_DEL_FDIR_FILTER request is failed due to this rule doesn't exist. 1327 * 1328 * VIRTCHNL_FDIR_FAILURE_RULE_INVALID 1329 * OP_ADD_FDIR_FILTER request is failed due to parameters validation 1330 * or HW doesn't support. 1331 * 1332 * VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT 1333 * OP_ADD/DEL_FDIR_FILTER request is failed due to timing out 1334 * for programming. 1335 * 1336 * VIRTCHNL_FDIR_FAILURE_QUERY_INVALID 1337 * OP_QUERY_FDIR_FILTER request is failed due to parameters validation, 1338 * for example, VF query counter of a rule who has no counter action. 1339 */ 1340 enum virtchnl_fdir_prgm_status { 1341 VIRTCHNL_FDIR_SUCCESS = 0, 1342 VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE, 1343 VIRTCHNL_FDIR_FAILURE_RULE_EXIST, 1344 VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT, 1345 VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST, 1346 VIRTCHNL_FDIR_FAILURE_RULE_INVALID, 1347 VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT, 1348 VIRTCHNL_FDIR_FAILURE_QUERY_INVALID, 1349 }; 1350 1351 /* VIRTCHNL_OP_ADD_FDIR_FILTER 1352 * VF sends this request to PF by filling out vsi_id, 1353 * validate_only and rule_cfg. PF will return flow_id 1354 * if the request is successfully done and return add_status to VF. 1355 */ 1356 struct virtchnl_fdir_add { 1357 u16 vsi_id; /* INPUT */ 1358 /* 1359 * 1 for validating a fdir rule, 0 for creating a fdir rule. 1360 * Validate and create share one ops: VIRTCHNL_OP_ADD_FDIR_FILTER. 1361 */ 1362 u16 validate_only; /* INPUT */ 1363 u32 flow_id; /* OUTPUT */ 1364 struct virtchnl_fdir_rule rule_cfg; /* INPUT */ 1365 1366 /* see enum virtchnl_fdir_prgm_status; OUTPUT */ 1367 s32 status; 1368 }; 1369 1370 VIRTCHNL_CHECK_STRUCT_LEN(2616, virtchnl_fdir_add); 1371 1372 /* VIRTCHNL_OP_DEL_FDIR_FILTER 1373 * VF sends this request to PF by filling out vsi_id 1374 * and flow_id. PF will return del_status to VF. 1375 */ 1376 struct virtchnl_fdir_del { 1377 u16 vsi_id; /* INPUT */ 1378 u16 pad; 1379 u32 flow_id; /* INPUT */ 1380 1381 /* see enum virtchnl_fdir_prgm_status; OUTPUT */ 1382 s32 status; 1383 }; 1384 1385 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_fdir_del); 1386 1387 #define __vss_byone(p, member, count, old) \ 1388 (struct_size(p, member, count) + (old - 1 - struct_size(p, member, 0))) 1389 1390 #define __vss_byelem(p, member, count, old) \ 1391 (struct_size(p, member, count - 1) + (old - struct_size(p, member, 0))) 1392 1393 #define __vss_full(p, member, count, old) \ 1394 (struct_size(p, member, count) + (old - struct_size(p, member, 0))) 1395 1396 #define __vss(type, func, p, member, count) \ 1397 struct type: func(p, member, count, type##_LEGACY_SIZEOF) 1398 1399 #define virtchnl_struct_size(p, m, c) \ 1400 _Generic(*p, \ 1401 __vss(virtchnl_vf_resource, __vss_full, p, m, c), \ 1402 __vss(virtchnl_vsi_queue_config_info, __vss_full, p, m, c), \ 1403 __vss(virtchnl_irq_map_info, __vss_full, p, m, c), \ 1404 __vss(virtchnl_ether_addr_list, __vss_full, p, m, c), \ 1405 __vss(virtchnl_vlan_filter_list, __vss_full, p, m, c), \ 1406 __vss(virtchnl_vlan_filter_list_v2, __vss_byelem, p, m, c), \ 1407 __vss(virtchnl_tc_info, __vss_byelem, p, m, c), \ 1408 __vss(virtchnl_rdma_qvlist_info, __vss_byelem, p, m, c), \ 1409 __vss(virtchnl_rss_key, __vss_byone, p, m, c), \ 1410 __vss(virtchnl_rss_lut, __vss_byone, p, m, c)) 1411 1412 /** 1413 * virtchnl_vc_validate_vf_msg 1414 * @ver: Virtchnl version info 1415 * @v_opcode: Opcode for the message 1416 * @msg: pointer to the msg buffer 1417 * @msglen: msg length 1418 * 1419 * validate msg format against struct for each opcode 1420 */ 1421 static inline int 1422 virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode, 1423 u8 *msg, u16 msglen) 1424 { 1425 bool err_msg_format = false; 1426 u32 valid_len = 0; 1427 1428 /* Validate message length. */ 1429 switch (v_opcode) { 1430 case VIRTCHNL_OP_VERSION: 1431 valid_len = sizeof(struct virtchnl_version_info); 1432 break; 1433 case VIRTCHNL_OP_RESET_VF: 1434 break; 1435 case VIRTCHNL_OP_GET_VF_RESOURCES: 1436 if (VF_IS_V11(ver)) 1437 valid_len = sizeof(u32); 1438 break; 1439 case VIRTCHNL_OP_CONFIG_TX_QUEUE: 1440 valid_len = sizeof(struct virtchnl_txq_info); 1441 break; 1442 case VIRTCHNL_OP_CONFIG_RX_QUEUE: 1443 valid_len = sizeof(struct virtchnl_rxq_info); 1444 break; 1445 case VIRTCHNL_OP_CONFIG_VSI_QUEUES: 1446 valid_len = virtchnl_vsi_queue_config_info_LEGACY_SIZEOF; 1447 if (msglen >= valid_len) { 1448 struct virtchnl_vsi_queue_config_info *vqc = 1449 (struct virtchnl_vsi_queue_config_info *)msg; 1450 valid_len = virtchnl_struct_size(vqc, qpair, 1451 vqc->num_queue_pairs); 1452 if (vqc->num_queue_pairs == 0) 1453 err_msg_format = true; 1454 } 1455 break; 1456 case VIRTCHNL_OP_CONFIG_IRQ_MAP: 1457 valid_len = virtchnl_irq_map_info_LEGACY_SIZEOF; 1458 if (msglen >= valid_len) { 1459 struct virtchnl_irq_map_info *vimi = 1460 (struct virtchnl_irq_map_info *)msg; 1461 valid_len = virtchnl_struct_size(vimi, vecmap, 1462 vimi->num_vectors); 1463 if (vimi->num_vectors == 0) 1464 err_msg_format = true; 1465 } 1466 break; 1467 case VIRTCHNL_OP_ENABLE_QUEUES: 1468 case VIRTCHNL_OP_DISABLE_QUEUES: 1469 valid_len = sizeof(struct virtchnl_queue_select); 1470 break; 1471 case VIRTCHNL_OP_ADD_ETH_ADDR: 1472 case VIRTCHNL_OP_DEL_ETH_ADDR: 1473 valid_len = virtchnl_ether_addr_list_LEGACY_SIZEOF; 1474 if (msglen >= valid_len) { 1475 struct virtchnl_ether_addr_list *veal = 1476 (struct virtchnl_ether_addr_list *)msg; 1477 valid_len = virtchnl_struct_size(veal, list, 1478 veal->num_elements); 1479 if (veal->num_elements == 0) 1480 err_msg_format = true; 1481 } 1482 break; 1483 case VIRTCHNL_OP_ADD_VLAN: 1484 case VIRTCHNL_OP_DEL_VLAN: 1485 valid_len = virtchnl_vlan_filter_list_LEGACY_SIZEOF; 1486 if (msglen >= valid_len) { 1487 struct virtchnl_vlan_filter_list *vfl = 1488 (struct virtchnl_vlan_filter_list *)msg; 1489 valid_len = virtchnl_struct_size(vfl, vlan_id, 1490 vfl->num_elements); 1491 if (vfl->num_elements == 0) 1492 err_msg_format = true; 1493 } 1494 break; 1495 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: 1496 valid_len = sizeof(struct virtchnl_promisc_info); 1497 break; 1498 case VIRTCHNL_OP_GET_STATS: 1499 valid_len = sizeof(struct virtchnl_queue_select); 1500 break; 1501 case VIRTCHNL_OP_RDMA: 1502 /* These messages are opaque to us and will be validated in 1503 * the RDMA client code. We just need to check for nonzero 1504 * length. The firmware will enforce max length restrictions. 1505 */ 1506 if (msglen) 1507 valid_len = msglen; 1508 else 1509 err_msg_format = true; 1510 break; 1511 case VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP: 1512 break; 1513 case VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP: 1514 valid_len = virtchnl_rdma_qvlist_info_LEGACY_SIZEOF; 1515 if (msglen >= valid_len) { 1516 struct virtchnl_rdma_qvlist_info *qv = 1517 (struct virtchnl_rdma_qvlist_info *)msg; 1518 1519 valid_len = virtchnl_struct_size(qv, qv_info, 1520 qv->num_vectors); 1521 } 1522 break; 1523 case VIRTCHNL_OP_CONFIG_RSS_KEY: 1524 valid_len = virtchnl_rss_key_LEGACY_SIZEOF; 1525 if (msglen >= valid_len) { 1526 struct virtchnl_rss_key *vrk = 1527 (struct virtchnl_rss_key *)msg; 1528 valid_len = virtchnl_struct_size(vrk, key, 1529 vrk->key_len); 1530 } 1531 break; 1532 case VIRTCHNL_OP_CONFIG_RSS_LUT: 1533 valid_len = virtchnl_rss_lut_LEGACY_SIZEOF; 1534 if (msglen >= valid_len) { 1535 struct virtchnl_rss_lut *vrl = 1536 (struct virtchnl_rss_lut *)msg; 1537 valid_len = virtchnl_struct_size(vrl, lut, 1538 vrl->lut_entries); 1539 } 1540 break; 1541 case VIRTCHNL_OP_GET_RSS_HENA_CAPS: 1542 break; 1543 case VIRTCHNL_OP_SET_RSS_HENA: 1544 valid_len = sizeof(struct virtchnl_rss_hena); 1545 break; 1546 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: 1547 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: 1548 break; 1549 case VIRTCHNL_OP_REQUEST_QUEUES: 1550 valid_len = sizeof(struct virtchnl_vf_res_request); 1551 break; 1552 case VIRTCHNL_OP_ENABLE_CHANNELS: 1553 valid_len = virtchnl_tc_info_LEGACY_SIZEOF; 1554 if (msglen >= valid_len) { 1555 struct virtchnl_tc_info *vti = 1556 (struct virtchnl_tc_info *)msg; 1557 valid_len = virtchnl_struct_size(vti, list, 1558 vti->num_tc); 1559 if (vti->num_tc == 0) 1560 err_msg_format = true; 1561 } 1562 break; 1563 case VIRTCHNL_OP_DISABLE_CHANNELS: 1564 break; 1565 case VIRTCHNL_OP_ADD_CLOUD_FILTER: 1566 case VIRTCHNL_OP_DEL_CLOUD_FILTER: 1567 valid_len = sizeof(struct virtchnl_filter); 1568 break; 1569 case VIRTCHNL_OP_GET_SUPPORTED_RXDIDS: 1570 break; 1571 case VIRTCHNL_OP_ADD_RSS_CFG: 1572 case VIRTCHNL_OP_DEL_RSS_CFG: 1573 valid_len = sizeof(struct virtchnl_rss_cfg); 1574 break; 1575 case VIRTCHNL_OP_ADD_FDIR_FILTER: 1576 valid_len = sizeof(struct virtchnl_fdir_add); 1577 break; 1578 case VIRTCHNL_OP_DEL_FDIR_FILTER: 1579 valid_len = sizeof(struct virtchnl_fdir_del); 1580 break; 1581 case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS: 1582 break; 1583 case VIRTCHNL_OP_ADD_VLAN_V2: 1584 case VIRTCHNL_OP_DEL_VLAN_V2: 1585 valid_len = virtchnl_vlan_filter_list_v2_LEGACY_SIZEOF; 1586 if (msglen >= valid_len) { 1587 struct virtchnl_vlan_filter_list_v2 *vfl = 1588 (struct virtchnl_vlan_filter_list_v2 *)msg; 1589 1590 valid_len = virtchnl_struct_size(vfl, filters, 1591 vfl->num_elements); 1592 1593 if (vfl->num_elements == 0) { 1594 err_msg_format = true; 1595 break; 1596 } 1597 } 1598 break; 1599 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2: 1600 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2: 1601 case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2: 1602 case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2: 1603 valid_len = sizeof(struct virtchnl_vlan_setting); 1604 break; 1605 /* These are always errors coming from the VF. */ 1606 case VIRTCHNL_OP_EVENT: 1607 case VIRTCHNL_OP_UNKNOWN: 1608 default: 1609 return VIRTCHNL_STATUS_ERR_PARAM; 1610 } 1611 /* few more checks */ 1612 if (err_msg_format || valid_len != msglen) 1613 return VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH; 1614 1615 return 0; 1616 } 1617 #endif /* _VIRTCHNL_H_ */ 1618