1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2001-2021 Intel Corporation 3 */ 4 5 #ifndef _VIRTCHNL_H_ 6 #define _VIRTCHNL_H_ 7 8 /* Description: 9 * This header file describes the Virtual Function (VF) - Physical Function 10 * (PF) communication protocol used by the drivers for all devices starting 11 * from our 40G product line 12 * 13 * Admin queue buffer usage: 14 * desc->opcode is always aqc_opc_send_msg_to_pf 15 * flags, retval, datalen, and data addr are all used normally. 16 * The Firmware copies the cookie fields when sending messages between the 17 * PF and VF, but uses all other fields internally. Due to this limitation, 18 * we must send all messages as "indirect", i.e. using an external buffer. 19 * 20 * All the VSI indexes are relative to the VF. Each VF can have maximum of 21 * three VSIs. All the queue indexes are relative to the VSI. Each VF can 22 * have a maximum of sixteen queues for all of its VSIs. 23 * 24 * The PF is required to return a status code in v_retval for all messages 25 * except RESET_VF, which does not require any response. The returned value 26 * is of virtchnl_status_code type, defined in the shared type.h. 27 * 28 * In general, VF driver initialization should roughly follow the order of 29 * these opcodes. The VF driver must first validate the API version of the 30 * PF driver, then request a reset, then get resources, then configure 31 * queues and interrupts. After these operations are complete, the VF 32 * driver may start its queues, optionally add MAC and VLAN filters, and 33 * process traffic. 34 */ 35 36 /* START GENERIC DEFINES 37 * Need to ensure the following enums and defines hold the same meaning and 38 * value in current and future projects 39 */ 40 41 /* Error Codes */ 42 enum virtchnl_status_code { 43 VIRTCHNL_STATUS_SUCCESS = 0, 44 VIRTCHNL_STATUS_ERR_PARAM = -5, 45 VIRTCHNL_STATUS_ERR_NO_MEMORY = -18, 46 VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH = -38, 47 VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR = -39, 48 VIRTCHNL_STATUS_ERR_INVALID_VF_ID = -40, 49 VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR = -53, 50 VIRTCHNL_STATUS_ERR_NOT_SUPPORTED = -64, 51 }; 52 53 /* Backward compatibility */ 54 #define VIRTCHNL_ERR_PARAM VIRTCHNL_STATUS_ERR_PARAM 55 #define VIRTCHNL_STATUS_NOT_SUPPORTED VIRTCHNL_STATUS_ERR_NOT_SUPPORTED 56 57 #define VIRTCHNL_LINK_SPEED_2_5GB_SHIFT 0x0 58 #define VIRTCHNL_LINK_SPEED_100MB_SHIFT 0x1 59 #define VIRTCHNL_LINK_SPEED_1000MB_SHIFT 0x2 60 #define VIRTCHNL_LINK_SPEED_10GB_SHIFT 0x3 61 #define VIRTCHNL_LINK_SPEED_40GB_SHIFT 0x4 62 #define VIRTCHNL_LINK_SPEED_20GB_SHIFT 0x5 63 #define VIRTCHNL_LINK_SPEED_25GB_SHIFT 0x6 64 #define VIRTCHNL_LINK_SPEED_5GB_SHIFT 0x7 65 66 enum virtchnl_link_speed { 67 VIRTCHNL_LINK_SPEED_UNKNOWN = 0, 68 VIRTCHNL_LINK_SPEED_100MB = BIT(VIRTCHNL_LINK_SPEED_100MB_SHIFT), 69 VIRTCHNL_LINK_SPEED_1GB = BIT(VIRTCHNL_LINK_SPEED_1000MB_SHIFT), 70 VIRTCHNL_LINK_SPEED_10GB = BIT(VIRTCHNL_LINK_SPEED_10GB_SHIFT), 71 VIRTCHNL_LINK_SPEED_40GB = BIT(VIRTCHNL_LINK_SPEED_40GB_SHIFT), 72 VIRTCHNL_LINK_SPEED_20GB = BIT(VIRTCHNL_LINK_SPEED_20GB_SHIFT), 73 VIRTCHNL_LINK_SPEED_25GB = BIT(VIRTCHNL_LINK_SPEED_25GB_SHIFT), 74 VIRTCHNL_LINK_SPEED_2_5GB = BIT(VIRTCHNL_LINK_SPEED_2_5GB_SHIFT), 75 VIRTCHNL_LINK_SPEED_5GB = BIT(VIRTCHNL_LINK_SPEED_5GB_SHIFT), 76 }; 77 78 /* for hsplit_0 field of Rx HMC context */ 79 /* deprecated with IAVF 1.0 */ 80 enum virtchnl_rx_hsplit { 81 VIRTCHNL_RX_HSPLIT_NO_SPLIT = 0, 82 VIRTCHNL_RX_HSPLIT_SPLIT_L2 = 1, 83 VIRTCHNL_RX_HSPLIT_SPLIT_IP = 2, 84 VIRTCHNL_RX_HSPLIT_SPLIT_TCP_UDP = 4, 85 VIRTCHNL_RX_HSPLIT_SPLIT_SCTP = 8, 86 }; 87 88 enum virtchnl_bw_limit_type { 89 VIRTCHNL_BW_SHAPER = 0, 90 }; 91 92 #define VIRTCHNL_ETH_LENGTH_OF_ADDRESS 6 93 /* END GENERIC DEFINES */ 94 95 /* Opcodes for VF-PF communication. These are placed in the v_opcode field 96 * of the virtchnl_msg structure. 97 */ 98 enum virtchnl_ops { 99 /* The PF sends status change events to VFs using 100 * the VIRTCHNL_OP_EVENT opcode. 101 * VFs send requests to the PF using the other ops. 102 * Use of "advanced opcode" features must be negotiated as part of capabilities 103 * exchange and are not considered part of base mode feature set. 104 */ 105 VIRTCHNL_OP_UNKNOWN = 0, 106 VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */ 107 VIRTCHNL_OP_RESET_VF = 2, 108 VIRTCHNL_OP_GET_VF_RESOURCES = 3, 109 VIRTCHNL_OP_CONFIG_TX_QUEUE = 4, 110 VIRTCHNL_OP_CONFIG_RX_QUEUE = 5, 111 VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6, 112 VIRTCHNL_OP_CONFIG_IRQ_MAP = 7, 113 VIRTCHNL_OP_ENABLE_QUEUES = 8, 114 VIRTCHNL_OP_DISABLE_QUEUES = 9, 115 VIRTCHNL_OP_ADD_ETH_ADDR = 10, 116 VIRTCHNL_OP_DEL_ETH_ADDR = 11, 117 VIRTCHNL_OP_ADD_VLAN = 12, 118 VIRTCHNL_OP_DEL_VLAN = 13, 119 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14, 120 VIRTCHNL_OP_GET_STATS = 15, 121 VIRTCHNL_OP_RSVD = 16, 122 VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */ 123 /* opcode 19 is reserved */ 124 /* opcodes 20, 21, and 22 are reserved */ 125 VIRTCHNL_OP_CONFIG_RSS_KEY = 23, 126 VIRTCHNL_OP_CONFIG_RSS_LUT = 24, 127 VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25, 128 VIRTCHNL_OP_SET_RSS_HENA = 26, 129 VIRTCHNL_OP_ENABLE_VLAN_STRIPPING = 27, 130 VIRTCHNL_OP_DISABLE_VLAN_STRIPPING = 28, 131 VIRTCHNL_OP_REQUEST_QUEUES = 29, 132 VIRTCHNL_OP_ENABLE_CHANNELS = 30, 133 VIRTCHNL_OP_DISABLE_CHANNELS = 31, 134 VIRTCHNL_OP_ADD_CLOUD_FILTER = 32, 135 VIRTCHNL_OP_DEL_CLOUD_FILTER = 33, 136 /* opcodes 34, 35, 36, and 37 are reserved */ 137 VIRTCHNL_OP_DCF_CONFIG_BW = 37, 138 VIRTCHNL_OP_DCF_VLAN_OFFLOAD = 38, 139 VIRTCHNL_OP_DCF_CMD_DESC = 39, 140 VIRTCHNL_OP_DCF_CMD_BUFF = 40, 141 VIRTCHNL_OP_DCF_DISABLE = 41, 142 VIRTCHNL_OP_DCF_GET_VSI_MAP = 42, 143 VIRTCHNL_OP_DCF_GET_PKG_INFO = 43, 144 VIRTCHNL_OP_GET_SUPPORTED_RXDIDS = 44, 145 VIRTCHNL_OP_ADD_RSS_CFG = 45, 146 VIRTCHNL_OP_DEL_RSS_CFG = 46, 147 VIRTCHNL_OP_ADD_FDIR_FILTER = 47, 148 VIRTCHNL_OP_DEL_FDIR_FILTER = 48, 149 VIRTCHNL_OP_GET_MAX_RSS_QREGION = 50, 150 VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS = 51, 151 VIRTCHNL_OP_ADD_VLAN_V2 = 52, 152 VIRTCHNL_OP_DEL_VLAN_V2 = 53, 153 VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 = 54, 154 VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2 = 55, 155 VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2 = 56, 156 VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2 = 57, 157 VIRTCHNL_OP_ENABLE_VLAN_FILTERING_V2 = 58, 158 VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2 = 59, 159 VIRTCHNL_OP_GET_QOS_CAPS = 66, 160 VIRTCHNL_OP_CONFIG_QUEUE_TC_MAP = 67, 161 VIRTCHNL_OP_ENABLE_QUEUES_V2 = 107, 162 VIRTCHNL_OP_DISABLE_QUEUES_V2 = 108, 163 VIRTCHNL_OP_MAP_QUEUE_VECTOR = 111, 164 VIRTCHNL_OP_MAX, 165 }; 166 167 static inline const char *virtchnl_op_str(enum virtchnl_ops v_opcode) 168 { 169 switch (v_opcode) { 170 case VIRTCHNL_OP_UNKNOWN: 171 return "VIRTCHNL_OP_UNKNOWN"; 172 case VIRTCHNL_OP_VERSION: 173 return "VIRTCHNL_OP_VERSION"; 174 case VIRTCHNL_OP_RESET_VF: 175 return "VIRTCHNL_OP_RESET_VF"; 176 case VIRTCHNL_OP_GET_VF_RESOURCES: 177 return "VIRTCHNL_OP_GET_VF_RESOURCES"; 178 case VIRTCHNL_OP_CONFIG_TX_QUEUE: 179 return "VIRTCHNL_OP_CONFIG_TX_QUEUE"; 180 case VIRTCHNL_OP_CONFIG_RX_QUEUE: 181 return "VIRTCHNL_OP_CONFIG_RX_QUEUE"; 182 case VIRTCHNL_OP_CONFIG_VSI_QUEUES: 183 return "VIRTCHNL_OP_CONFIG_VSI_QUEUES"; 184 case VIRTCHNL_OP_CONFIG_IRQ_MAP: 185 return "VIRTCHNL_OP_CONFIG_IRQ_MAP"; 186 case VIRTCHNL_OP_ENABLE_QUEUES: 187 return "VIRTCHNL_OP_ENABLE_QUEUES"; 188 case VIRTCHNL_OP_DISABLE_QUEUES: 189 return "VIRTCHNL_OP_DISABLE_QUEUES"; 190 case VIRTCHNL_OP_ADD_ETH_ADDR: 191 return "VIRTCHNL_OP_ADD_ETH_ADDR"; 192 case VIRTCHNL_OP_DEL_ETH_ADDR: 193 return "VIRTCHNL_OP_DEL_ETH_ADDR"; 194 case VIRTCHNL_OP_ADD_VLAN: 195 return "VIRTCHNL_OP_ADD_VLAN"; 196 case VIRTCHNL_OP_DEL_VLAN: 197 return "VIRTCHNL_OP_DEL_VLAN"; 198 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: 199 return "VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE"; 200 case VIRTCHNL_OP_GET_STATS: 201 return "VIRTCHNL_OP_GET_STATS"; 202 case VIRTCHNL_OP_RSVD: 203 return "VIRTCHNL_OP_RSVD"; 204 case VIRTCHNL_OP_EVENT: 205 return "VIRTCHNL_OP_EVENT"; 206 case VIRTCHNL_OP_CONFIG_RSS_KEY: 207 return "VIRTCHNL_OP_CONFIG_RSS_KEY"; 208 case VIRTCHNL_OP_CONFIG_RSS_LUT: 209 return "VIRTCHNL_OP_CONFIG_RSS_LUT"; 210 case VIRTCHNL_OP_GET_RSS_HENA_CAPS: 211 return "VIRTCHNL_OP_GET_RSS_HENA_CAPS"; 212 case VIRTCHNL_OP_SET_RSS_HENA: 213 return "VIRTCHNL_OP_SET_RSS_HENA"; 214 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: 215 return "VIRTCHNL_OP_ENABLE_VLAN_STRIPPING"; 216 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: 217 return "VIRTCHNL_OP_DISABLE_VLAN_STRIPPING"; 218 case VIRTCHNL_OP_REQUEST_QUEUES: 219 return "VIRTCHNL_OP_REQUEST_QUEUES"; 220 case VIRTCHNL_OP_ENABLE_CHANNELS: 221 return "VIRTCHNL_OP_ENABLE_CHANNELS"; 222 case VIRTCHNL_OP_DISABLE_CHANNELS: 223 return "VIRTCHNL_OP_DISABLE_CHANNELS"; 224 case VIRTCHNL_OP_ADD_CLOUD_FILTER: 225 return "VIRTCHNL_OP_ADD_CLOUD_FILTER"; 226 case VIRTCHNL_OP_DEL_CLOUD_FILTER: 227 return "VIRTCHNL_OP_DEL_CLOUD_FILTER"; 228 case VIRTCHNL_OP_DCF_CMD_DESC: 229 return "VIRTCHNL_OP_DCF_CMD_DESC"; 230 case VIRTCHNL_OP_DCF_CMD_BUFF: 231 return "VIRTCHHNL_OP_DCF_CMD_BUFF"; 232 case VIRTCHNL_OP_DCF_DISABLE: 233 return "VIRTCHNL_OP_DCF_DISABLE"; 234 case VIRTCHNL_OP_DCF_GET_VSI_MAP: 235 return "VIRTCHNL_OP_DCF_GET_VSI_MAP"; 236 case VIRTCHNL_OP_GET_SUPPORTED_RXDIDS: 237 return "VIRTCHNL_OP_GET_SUPPORTED_RXDIDS"; 238 case VIRTCHNL_OP_ADD_RSS_CFG: 239 return "VIRTCHNL_OP_ADD_RSS_CFG"; 240 case VIRTCHNL_OP_DEL_RSS_CFG: 241 return "VIRTCHNL_OP_DEL_RSS_CFG"; 242 case VIRTCHNL_OP_ADD_FDIR_FILTER: 243 return "VIRTCHNL_OP_ADD_FDIR_FILTER"; 244 case VIRTCHNL_OP_DEL_FDIR_FILTER: 245 return "VIRTCHNL_OP_DEL_FDIR_FILTER"; 246 case VIRTCHNL_OP_GET_MAX_RSS_QREGION: 247 return "VIRTCHNL_OP_GET_MAX_RSS_QREGION"; 248 case VIRTCHNL_OP_ENABLE_QUEUES_V2: 249 return "VIRTCHNL_OP_ENABLE_QUEUES_V2"; 250 case VIRTCHNL_OP_DISABLE_QUEUES_V2: 251 return "VIRTCHNL_OP_DISABLE_QUEUES_V2"; 252 case VIRTCHNL_OP_MAP_QUEUE_VECTOR: 253 return "VIRTCHNL_OP_MAP_QUEUE_VECTOR"; 254 case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS: 255 return "VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS"; 256 case VIRTCHNL_OP_ADD_VLAN_V2: 257 return "VIRTCHNL_OP_ADD_VLAN_V2"; 258 case VIRTCHNL_OP_DEL_VLAN_V2: 259 return "VIRTCHNL_OP_DEL_VLAN_V2"; 260 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2: 261 return "VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2"; 262 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2: 263 return "VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2"; 264 case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2: 265 return "VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2"; 266 case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2: 267 return "VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2"; 268 case VIRTCHNL_OP_ENABLE_VLAN_FILTERING_V2: 269 return "VIRTCHNL_OP_ENABLE_VLAN_FILTERING_V2"; 270 case VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2: 271 return "VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2"; 272 case VIRTCHNL_OP_MAX: 273 return "VIRTCHNL_OP_MAX"; 274 default: 275 return "Unsupported (update virtchnl.h)"; 276 } 277 } 278 279 /* These macros are used to generate compilation errors if a structure/union 280 * is not exactly the correct length. It gives a divide by zero error if the 281 * structure/union is not of the correct size, otherwise it creates an enum 282 * that is never used. 283 */ 284 #define VIRTCHNL_CHECK_STRUCT_LEN(n, X) enum virtchnl_static_assert_enum_##X \ 285 { virtchnl_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) } 286 #define VIRTCHNL_CHECK_UNION_LEN(n, X) enum virtchnl_static_asset_enum_##X \ 287 { virtchnl_static_assert_##X = (n)/((sizeof(union X) == (n)) ? 1 : 0) } 288 289 /* Virtual channel message descriptor. This overlays the admin queue 290 * descriptor. All other data is passed in external buffers. 291 */ 292 293 struct virtchnl_msg { 294 u8 pad[8]; /* AQ flags/opcode/len/retval fields */ 295 296 /* avoid confusion with desc->opcode */ 297 enum virtchnl_ops v_opcode; 298 299 /* ditto for desc->retval */ 300 enum virtchnl_status_code v_retval; 301 u32 vfid; /* used by PF when sending to VF */ 302 }; 303 304 VIRTCHNL_CHECK_STRUCT_LEN(20, virtchnl_msg); 305 306 /* Message descriptions and data structures. */ 307 308 /* VIRTCHNL_OP_VERSION 309 * VF posts its version number to the PF. PF responds with its version number 310 * in the same format, along with a return code. 311 * Reply from PF has its major/minor versions also in param0 and param1. 312 * If there is a major version mismatch, then the VF cannot operate. 313 * If there is a minor version mismatch, then the VF can operate but should 314 * add a warning to the system log. 315 * 316 * This enum element MUST always be specified as == 1, regardless of other 317 * changes in the API. The PF must always respond to this message without 318 * error regardless of version mismatch. 319 */ 320 #define VIRTCHNL_VERSION_MAJOR 1 321 #define VIRTCHNL_VERSION_MINOR 1 322 #define VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0 323 324 struct virtchnl_version_info { 325 u32 major; 326 u32 minor; 327 }; 328 329 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_version_info); 330 331 #define VF_IS_V10(_v) (((_v)->major == 1) && ((_v)->minor == 0)) 332 #define VF_IS_V11(_ver) (((_ver)->major == 1) && ((_ver)->minor == 1)) 333 334 /* VIRTCHNL_OP_RESET_VF 335 * VF sends this request to PF with no parameters 336 * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register 337 * until reset completion is indicated. The admin queue must be reinitialized 338 * after this operation. 339 * 340 * When reset is complete, PF must ensure that all queues in all VSIs associated 341 * with the VF are stopped, all queue configurations in the HMC are set to 0, 342 * and all MAC and VLAN filters (except the default MAC address) on all VSIs 343 * are cleared. 344 */ 345 346 /* VSI types that use VIRTCHNL interface for VF-PF communication. VSI_SRIOV 347 * vsi_type should always be 6 for backward compatibility. Add other fields 348 * as needed. 349 */ 350 enum virtchnl_vsi_type { 351 VIRTCHNL_VSI_TYPE_INVALID = 0, 352 VIRTCHNL_VSI_SRIOV = 6, 353 }; 354 355 /* VIRTCHNL_OP_GET_VF_RESOURCES 356 * Version 1.0 VF sends this request to PF with no parameters 357 * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities 358 * PF responds with an indirect message containing 359 * virtchnl_vf_resource and one or more 360 * virtchnl_vsi_resource structures. 361 */ 362 363 struct virtchnl_vsi_resource { 364 u16 vsi_id; 365 u16 num_queue_pairs; 366 367 /* see enum virtchnl_vsi_type */ 368 s32 vsi_type; 369 u16 qset_handle; 370 u8 default_mac_addr[VIRTCHNL_ETH_LENGTH_OF_ADDRESS]; 371 }; 372 373 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource); 374 375 /* VF capability flags 376 * VIRTCHNL_VF_OFFLOAD_L2 flag is inclusive of base mode L2 offloads including 377 * TX/RX Checksum offloading and TSO for non-tunnelled packets. 378 */ 379 #define VIRTCHNL_VF_OFFLOAD_L2 BIT(0) 380 #define VIRTCHNL_VF_OFFLOAD_IWARP BIT(1) 381 #define VIRTCHNL_VF_OFFLOAD_RSVD BIT(2) 382 #define VIRTCHNL_VF_OFFLOAD_RSS_AQ BIT(3) 383 #define VIRTCHNL_VF_OFFLOAD_RSS_REG BIT(4) 384 #define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR BIT(5) 385 #define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES BIT(6) 386 /* used to negotiate communicating link speeds in Mbps */ 387 #define VIRTCHNL_VF_CAP_ADV_LINK_SPEED BIT(7) 388 /* BIT(8) is reserved */ 389 #define VIRTCHNL_VF_LARGE_NUM_QPAIRS BIT(9) 390 #define VIRTCHNL_VF_OFFLOAD_CRC BIT(10) 391 #define VIRTCHNL_VF_OFFLOAD_VLAN_V2 BIT(15) 392 #define VIRTCHNL_VF_OFFLOAD_VLAN BIT(16) 393 #define VIRTCHNL_VF_OFFLOAD_RX_POLLING BIT(17) 394 #define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 BIT(18) 395 #define VIRTCHNL_VF_OFFLOAD_RSS_PF BIT(19) 396 #define VIRTCHNL_VF_OFFLOAD_ENCAP BIT(20) 397 #define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM BIT(21) 398 #define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM BIT(22) 399 #define VIRTCHNL_VF_OFFLOAD_ADQ BIT(23) 400 #define VIRTCHNL_VF_OFFLOAD_ADQ_V2 BIT(24) 401 #define VIRTCHNL_VF_OFFLOAD_USO BIT(25) 402 #define VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC BIT(26) 403 #define VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF BIT(27) 404 #define VIRTCHNL_VF_OFFLOAD_FDIR_PF BIT(28) 405 #define VIRTCHNL_VF_OFFLOAD_QOS BIT(29) 406 #define VIRTCHNL_VF_CAP_DCF BIT(30) 407 /* BIT(31) is reserved */ 408 409 #define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \ 410 VIRTCHNL_VF_OFFLOAD_VLAN | \ 411 VIRTCHNL_VF_OFFLOAD_RSS_PF) 412 413 struct virtchnl_vf_resource { 414 u16 num_vsis; 415 u16 num_queue_pairs; 416 u16 max_vectors; 417 u16 max_mtu; 418 419 u32 vf_cap_flags; 420 u32 rss_key_size; 421 u32 rss_lut_size; 422 423 struct virtchnl_vsi_resource vsi_res[1]; 424 }; 425 426 VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_vf_resource); 427 428 /* VIRTCHNL_OP_CONFIG_TX_QUEUE 429 * VF sends this message to set up parameters for one TX queue. 430 * External data buffer contains one instance of virtchnl_txq_info. 431 * PF configures requested queue and returns a status code. 432 */ 433 434 /* Tx queue config info */ 435 struct virtchnl_txq_info { 436 u16 vsi_id; 437 u16 queue_id; 438 u16 ring_len; /* number of descriptors, multiple of 8 */ 439 u16 headwb_enabled; /* deprecated with AVF 1.0 */ 440 u64 dma_ring_addr; 441 u64 dma_headwb_addr; /* deprecated with AVF 1.0 */ 442 }; 443 444 VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_txq_info); 445 446 /* RX descriptor IDs (range from 0 to 63) */ 447 enum virtchnl_rx_desc_ids { 448 VIRTCHNL_RXDID_0_16B_BASE = 0, 449 /* 32B_BASE and FLEX_SPLITQ share desc ids as default descriptors 450 * because they can be differentiated based on queue model; e.g. single 451 * queue model can only use 32B_BASE and split queue model can only use 452 * FLEX_SPLITQ. Having these as 1 allows them to be used as default 453 * descriptors without negotiation. 454 */ 455 VIRTCHNL_RXDID_1_32B_BASE = 1, 456 VIRTCHNL_RXDID_1_FLEX_SPLITQ = 1, 457 VIRTCHNL_RXDID_2_FLEX_SQ_NIC = 2, 458 VIRTCHNL_RXDID_3_FLEX_SQ_SW = 3, 459 VIRTCHNL_RXDID_4_FLEX_SQ_NIC_VEB = 4, 460 VIRTCHNL_RXDID_5_FLEX_SQ_NIC_ACL = 5, 461 VIRTCHNL_RXDID_6_FLEX_SQ_NIC_2 = 6, 462 VIRTCHNL_RXDID_7_HW_RSVD = 7, 463 /* 9 through 15 are reserved */ 464 VIRTCHNL_RXDID_16_COMMS_GENERIC = 16, 465 VIRTCHNL_RXDID_17_COMMS_AUX_VLAN = 17, 466 VIRTCHNL_RXDID_18_COMMS_AUX_IPV4 = 18, 467 VIRTCHNL_RXDID_19_COMMS_AUX_IPV6 = 19, 468 VIRTCHNL_RXDID_20_COMMS_AUX_FLOW = 20, 469 VIRTCHNL_RXDID_21_COMMS_AUX_TCP = 21, 470 /* 22 through 63 are reserved */ 471 }; 472 473 /* RX descriptor ID bitmasks */ 474 enum virtchnl_rx_desc_id_bitmasks { 475 VIRTCHNL_RXDID_0_16B_BASE_M = BIT(VIRTCHNL_RXDID_0_16B_BASE), 476 VIRTCHNL_RXDID_1_32B_BASE_M = BIT(VIRTCHNL_RXDID_1_32B_BASE), 477 VIRTCHNL_RXDID_1_FLEX_SPLITQ_M = BIT(VIRTCHNL_RXDID_1_FLEX_SPLITQ), 478 VIRTCHNL_RXDID_2_FLEX_SQ_NIC_M = BIT(VIRTCHNL_RXDID_2_FLEX_SQ_NIC), 479 VIRTCHNL_RXDID_3_FLEX_SQ_SW_M = BIT(VIRTCHNL_RXDID_3_FLEX_SQ_SW), 480 VIRTCHNL_RXDID_4_FLEX_SQ_NIC_VEB_M = BIT(VIRTCHNL_RXDID_4_FLEX_SQ_NIC_VEB), 481 VIRTCHNL_RXDID_5_FLEX_SQ_NIC_ACL_M = BIT(VIRTCHNL_RXDID_5_FLEX_SQ_NIC_ACL), 482 VIRTCHNL_RXDID_6_FLEX_SQ_NIC_2_M = BIT(VIRTCHNL_RXDID_6_FLEX_SQ_NIC_2), 483 VIRTCHNL_RXDID_7_HW_RSVD_M = BIT(VIRTCHNL_RXDID_7_HW_RSVD), 484 /* 9 through 15 are reserved */ 485 VIRTCHNL_RXDID_16_COMMS_GENERIC_M = BIT(VIRTCHNL_RXDID_16_COMMS_GENERIC), 486 VIRTCHNL_RXDID_17_COMMS_AUX_VLAN_M = BIT(VIRTCHNL_RXDID_17_COMMS_AUX_VLAN), 487 VIRTCHNL_RXDID_18_COMMS_AUX_IPV4_M = BIT(VIRTCHNL_RXDID_18_COMMS_AUX_IPV4), 488 VIRTCHNL_RXDID_19_COMMS_AUX_IPV6_M = BIT(VIRTCHNL_RXDID_19_COMMS_AUX_IPV6), 489 VIRTCHNL_RXDID_20_COMMS_AUX_FLOW_M = BIT(VIRTCHNL_RXDID_20_COMMS_AUX_FLOW), 490 VIRTCHNL_RXDID_21_COMMS_AUX_TCP_M = BIT(VIRTCHNL_RXDID_21_COMMS_AUX_TCP), 491 /* 22 through 63 are reserved */ 492 }; 493 494 /* VIRTCHNL_OP_CONFIG_RX_QUEUE 495 * VF sends this message to set up parameters for one RX queue. 496 * External data buffer contains one instance of virtchnl_rxq_info. 497 * PF configures requested queue and returns a status code. The 498 * crc_disable flag disables CRC stripping on the VF. Setting 499 * the crc_disable flag to 1 will disable CRC stripping for each 500 * queue in the VF where the flag is set. The VIRTCHNL_VF_OFFLOAD_CRC 501 * offload must have been set prior to sending this info or the PF 502 * will ignore the request. This flag should be set the same for 503 * all of the queues for a VF. 504 */ 505 506 /* Rx queue config info */ 507 struct virtchnl_rxq_info { 508 u16 vsi_id; 509 u16 queue_id; 510 u32 ring_len; /* number of descriptors, multiple of 32 */ 511 u16 hdr_size; 512 u16 splithdr_enabled; /* deprecated with AVF 1.0 */ 513 u32 databuffer_size; 514 u32 max_pkt_size; 515 u8 crc_disable; 516 /* see enum virtchnl_rx_desc_ids; 517 * only used when VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC is supported. Note 518 * that when the offload is not supported, the descriptor format aligns 519 * with VIRTCHNL_RXDID_1_32B_BASE. 520 */ 521 u8 rxdid; 522 u8 pad1[2]; 523 u64 dma_ring_addr; 524 525 /* see enum virtchnl_rx_hsplit; deprecated with AVF 1.0 */ 526 s32 rx_split_pos; 527 u32 pad2; 528 }; 529 530 VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_rxq_info); 531 532 /* VIRTCHNL_OP_CONFIG_VSI_QUEUES 533 * VF sends this message to set parameters for active TX and RX queues 534 * associated with the specified VSI. 535 * PF configures queues and returns status. 536 * If the number of queues specified is greater than the number of queues 537 * associated with the VSI, an error is returned and no queues are configured. 538 * NOTE: The VF is not required to configure all queues in a single request. 539 * It may send multiple messages. PF drivers must correctly handle all VF 540 * requests. 541 */ 542 struct virtchnl_queue_pair_info { 543 /* NOTE: vsi_id and queue_id should be identical for both queues. */ 544 struct virtchnl_txq_info txq; 545 struct virtchnl_rxq_info rxq; 546 }; 547 548 VIRTCHNL_CHECK_STRUCT_LEN(64, virtchnl_queue_pair_info); 549 550 struct virtchnl_vsi_queue_config_info { 551 u16 vsi_id; 552 u16 num_queue_pairs; 553 u32 pad; 554 struct virtchnl_queue_pair_info qpair[1]; 555 }; 556 557 VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_vsi_queue_config_info); 558 559 /* VIRTCHNL_OP_REQUEST_QUEUES 560 * VF sends this message to request the PF to allocate additional queues to 561 * this VF. Each VF gets a guaranteed number of queues on init but asking for 562 * additional queues must be negotiated. This is a best effort request as it 563 * is possible the PF does not have enough queues left to support the request. 564 * If the PF cannot support the number requested it will respond with the 565 * maximum number it is able to support. If the request is successful, PF will 566 * then reset the VF to institute required changes. 567 */ 568 569 /* VF resource request */ 570 struct virtchnl_vf_res_request { 571 u16 num_queue_pairs; 572 }; 573 574 /* VIRTCHNL_OP_CONFIG_IRQ_MAP 575 * VF uses this message to map vectors to queues. 576 * The rxq_map and txq_map fields are bitmaps used to indicate which queues 577 * are to be associated with the specified vector. 578 * The "other" causes are always mapped to vector 0. The VF may not request 579 * that vector 0 be used for traffic. 580 * PF configures interrupt mapping and returns status. 581 * NOTE: due to hardware requirements, all active queues (both TX and RX) 582 * should be mapped to interrupts, even if the driver intends to operate 583 * only in polling mode. In this case the interrupt may be disabled, but 584 * the ITR timer will still run to trigger writebacks. 585 */ 586 struct virtchnl_vector_map { 587 u16 vsi_id; 588 u16 vector_id; 589 u16 rxq_map; 590 u16 txq_map; 591 u16 rxitr_idx; 592 u16 txitr_idx; 593 }; 594 595 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_vector_map); 596 597 struct virtchnl_irq_map_info { 598 u16 num_vectors; 599 struct virtchnl_vector_map vecmap[1]; 600 }; 601 602 VIRTCHNL_CHECK_STRUCT_LEN(14, virtchnl_irq_map_info); 603 604 /* VIRTCHNL_OP_ENABLE_QUEUES 605 * VIRTCHNL_OP_DISABLE_QUEUES 606 * VF sends these message to enable or disable TX/RX queue pairs. 607 * The queues fields are bitmaps indicating which queues to act upon. 608 * (Currently, we only support 16 queues per VF, but we make the field 609 * u32 to allow for expansion.) 610 * PF performs requested action and returns status. 611 * NOTE: The VF is not required to enable/disable all queues in a single 612 * request. It may send multiple messages. 613 * PF drivers must correctly handle all VF requests. 614 */ 615 struct virtchnl_queue_select { 616 u16 vsi_id; 617 u16 pad; 618 u32 rx_queues; 619 u32 tx_queues; 620 }; 621 622 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_select); 623 624 /* VIRTCHNL_OP_GET_MAX_RSS_QREGION 625 * 626 * if VIRTCHNL_VF_LARGE_NUM_QPAIRS was negotiated in VIRTCHNL_OP_GET_VF_RESOURCES 627 * then this op must be supported. 628 * 629 * VF sends this message in order to query the max RSS queue region 630 * size supported by PF, when VIRTCHNL_VF_LARGE_NUM_QPAIRS is enabled. 631 * This information should be used when configuring the RSS LUT and/or 632 * configuring queue region based filters. 633 * 634 * The maximum RSS queue region is 2^qregion_width. So, a qregion_width 635 * of 6 would inform the VF that the PF supports a maximum RSS queue region 636 * of 64. 637 * 638 * A queue region represents a range of queues that can be used to configure 639 * a RSS LUT. For example, if a VF is given 64 queues, but only a max queue 640 * region size of 16 (i.e. 2^qregion_width = 16) then it will only be able 641 * to configure the RSS LUT with queue indices from 0 to 15. However, other 642 * filters can be used to direct packets to queues >15 via specifying a queue 643 * base/offset and queue region width. 644 */ 645 struct virtchnl_max_rss_qregion { 646 u16 vport_id; 647 u16 qregion_width; 648 u8 pad[4]; 649 }; 650 651 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_max_rss_qregion); 652 653 /* VIRTCHNL_OP_ADD_ETH_ADDR 654 * VF sends this message in order to add one or more unicast or multicast 655 * address filters for the specified VSI. 656 * PF adds the filters and returns status. 657 */ 658 659 /* VIRTCHNL_OP_DEL_ETH_ADDR 660 * VF sends this message in order to remove one or more unicast or multicast 661 * filters for the specified VSI. 662 * PF removes the filters and returns status. 663 */ 664 665 /* VIRTCHNL_ETHER_ADDR_LEGACY 666 * Prior to adding the @type member to virtchnl_ether_addr, there were 2 pad 667 * bytes. Moving forward all VF drivers should not set type to 668 * VIRTCHNL_ETHER_ADDR_LEGACY. This is only here to not break previous/legacy 669 * behavior. The control plane function (i.e. PF) can use a best effort method 670 * of tracking the primary/device unicast in this case, but there is no 671 * guarantee and functionality depends on the implementation of the PF. 672 */ 673 674 /* VIRTCHNL_ETHER_ADDR_PRIMARY 675 * All VF drivers should set @type to VIRTCHNL_ETHER_ADDR_PRIMARY for the 676 * primary/device unicast MAC address filter for VIRTCHNL_OP_ADD_ETH_ADDR and 677 * VIRTCHNL_OP_DEL_ETH_ADDR. This allows for the underlying control plane 678 * function (i.e. PF) to accurately track and use this MAC address for 679 * displaying on the host and for VM/function reset. 680 */ 681 682 /* VIRTCHNL_ETHER_ADDR_EXTRA 683 * All VF drivers should set @type to VIRTCHNL_ETHER_ADDR_EXTRA for any extra 684 * unicast and/or multicast filters that are being added/deleted via 685 * VIRTCHNL_OP_DEL_ETH_ADDR/VIRTCHNL_OP_ADD_ETH_ADDR respectively. 686 */ 687 struct virtchnl_ether_addr { 688 u8 addr[VIRTCHNL_ETH_LENGTH_OF_ADDRESS]; 689 u8 type; 690 #define VIRTCHNL_ETHER_ADDR_LEGACY 0 691 #define VIRTCHNL_ETHER_ADDR_PRIMARY 1 692 #define VIRTCHNL_ETHER_ADDR_EXTRA 2 693 #define VIRTCHNL_ETHER_ADDR_TYPE_MASK 3 /* first two bits of type are valid */ 694 u8 pad; 695 }; 696 697 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_ether_addr); 698 699 struct virtchnl_ether_addr_list { 700 u16 vsi_id; 701 u16 num_elements; 702 struct virtchnl_ether_addr list[1]; 703 }; 704 705 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_ether_addr_list); 706 707 /* VIRTCHNL_OP_ADD_VLAN 708 * VF sends this message to add one or more VLAN tag filters for receives. 709 * PF adds the filters and returns status. 710 * If a port VLAN is configured by the PF, this operation will return an 711 * error to the VF. 712 */ 713 714 /* VIRTCHNL_OP_DEL_VLAN 715 * VF sends this message to remove one or more VLAN tag filters for receives. 716 * PF removes the filters and returns status. 717 * If a port VLAN is configured by the PF, this operation will return an 718 * error to the VF. 719 */ 720 721 struct virtchnl_vlan_filter_list { 722 u16 vsi_id; 723 u16 num_elements; 724 u16 vlan_id[1]; 725 }; 726 727 VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_vlan_filter_list); 728 729 /* This enum is used for all of the VIRTCHNL_VF_OFFLOAD_VLAN_V2_CAPS related 730 * structures and opcodes. 731 * 732 * VIRTCHNL_VLAN_UNSUPPORTED - This field is not supported and if a VF driver 733 * populates it the PF should return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED. 734 * 735 * VIRTCHNL_VLAN_ETHERTYPE_8100 - This field supports 0x8100 ethertype. 736 * VIRTCHNL_VLAN_ETHERTYPE_88A8 - This field supports 0x88A8 ethertype. 737 * VIRTCHNL_VLAN_ETHERTYPE_9100 - This field supports 0x9100 ethertype. 738 * 739 * VIRTCHNL_VLAN_ETHERTYPE_AND - Used when multiple ethertypes can be supported 740 * by the PF concurrently. For example, if the PF can support 741 * VIRTCHNL_VLAN_ETHERTYPE_8100 AND VIRTCHNL_VLAN_ETHERTYPE_88A8 filters it 742 * would OR the following bits: 743 * 744 * VIRTHCNL_VLAN_ETHERTYPE_8100 | 745 * VIRTCHNL_VLAN_ETHERTYPE_88A8 | 746 * VIRTCHNL_VLAN_ETHERTYPE_AND; 747 * 748 * The VF would interpret this as VLAN filtering can be supported on both 0x8100 749 * and 0x88A8 VLAN ethertypes. 750 * 751 * VIRTCHNL_ETHERTYPE_XOR - Used when only a single ethertype can be supported 752 * by the PF concurrently. For example if the PF can support 753 * VIRTCHNL_VLAN_ETHERTYPE_8100 XOR VIRTCHNL_VLAN_ETHERTYPE_88A8 stripping 754 * offload it would OR the following bits: 755 * 756 * VIRTCHNL_VLAN_ETHERTYPE_8100 | 757 * VIRTCHNL_VLAN_ETHERTYPE_88A8 | 758 * VIRTCHNL_VLAN_ETHERTYPE_XOR; 759 * 760 * The VF would interpret this as VLAN stripping can be supported on either 761 * 0x8100 or 0x88a8 VLAN ethertypes. So when requesting VLAN stripping via 762 * VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 the specified ethertype will override 763 * the previously set value. 764 * 765 * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1 - Used to tell the VF to insert and/or 766 * strip the VLAN tag using the L2TAG1 field of the Tx/Rx descriptors. 767 * 768 * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2 - Used to tell the VF to insert hardware 769 * offloaded VLAN tags using the L2TAG2 field of the Tx descriptor. 770 * 771 * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2 - Used to tell the VF to strip hardware 772 * offloaded VLAN tags using the L2TAG2_2 field of the Rx descriptor. 773 * 774 * VIRTCHNL_VLAN_PRIO - This field supports VLAN priority bits. This is used for 775 * VLAN filtering if the underlying PF supports it. 776 * 777 * VIRTCHNL_VLAN_TOGGLE_ALLOWED - This field is used to say whether a 778 * certain VLAN capability can be toggled. For example if the underlying PF/CP 779 * allows the VF to toggle VLAN filtering, stripping, and/or insertion it should 780 * set this bit along with the supported ethertypes. 781 */ 782 enum virtchnl_vlan_support { 783 VIRTCHNL_VLAN_UNSUPPORTED = 0, 784 VIRTCHNL_VLAN_ETHERTYPE_8100 = 0x00000001, 785 VIRTCHNL_VLAN_ETHERTYPE_88A8 = 0x00000002, 786 VIRTCHNL_VLAN_ETHERTYPE_9100 = 0x00000004, 787 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1 = 0x00000100, 788 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2 = 0x00000200, 789 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2 = 0x00000400, 790 VIRTCHNL_VLAN_PRIO = 0x01000000, 791 VIRTCHNL_VLAN_FILTER_MASK = 0x10000000, 792 VIRTCHNL_VLAN_ETHERTYPE_AND = 0x20000000, 793 VIRTCHNL_VLAN_ETHERTYPE_XOR = 0x40000000, 794 VIRTCHNL_VLAN_TOGGLE = 0x80000000 795 }; 796 797 /* This structure is used as part of the VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS 798 * for filtering, insertion, and stripping capabilities. 799 * 800 * If only outer capabilities are supported (for filtering, insertion, and/or 801 * stripping) then this refers to the outer most or single VLAN from the VF's 802 * perspective. 803 * 804 * If only inner capabilities are supported (for filtering, insertion, and/or 805 * stripping) then this refers to the outer most or single VLAN from the VF's 806 * perspective. Functionally this is the same as if only outer capabilities are 807 * supported. The VF driver is just forced to use the inner fields when 808 * adding/deleting filters and enabling/disabling offloads (if supported). 809 * 810 * If both outer and inner capabilities are supported (for filtering, insertion, 811 * and/or stripping) then outer refers to the outer most or single VLAN and 812 * inner refers to the second VLAN, if it exists, in the packet. 813 * 814 * There is no support for tunneled VLAN offloads, so outer or inner are never 815 * referring to a tunneled packet from the VF's perspective. 816 */ 817 struct virtchnl_vlan_supported_caps { 818 u32 outer; 819 u32 inner; 820 }; 821 822 /* The PF populates these fields based on the supported VLAN filtering. If a 823 * field is VIRTCHNL_VLAN_UNSUPPORTED then it's not supported and the PF will 824 * reject any VIRTCHNL_OP_ADD_VLAN_V2 or VIRTCHNL_OP_DEL_VLAN_V2 messages using 825 * the unsupported fields. 826 * 827 * Also, a VF is only allowed to toggle its VLAN filtering setting if the 828 * VIRTCHNL_VLAN_TOGGLE bit is set. 829 * 830 * The ethertype(s) specified in the ethertype_init field are the ethertypes 831 * enabled for VLAN filtering. VLAN filtering in this case refers to the outer 832 * most VLAN from the VF's perspective. If both inner and outer filtering are 833 * allowed then ethertype_init only refers to the outer most VLAN as only 834 * VLAN ethertype supported for inner VLAN filtering is 835 * VIRTCHNL_VLAN_ETHERTYPE_8100. By default, inner VLAN filtering is disabled 836 * when both inner and outer filtering are allowed. 837 * 838 * The max_filters field tells the VF how many VLAN filters it's allowed to have 839 * at any one time. If it exceeds this amount and tries to add another filter, 840 * then the request will be rejected by the PF. To prevent failures, the VF 841 * should keep track of how many VLAN filters it has added and not attempt to 842 * add more than max_filters. 843 */ 844 struct virtchnl_vlan_filtering_caps { 845 struct virtchnl_vlan_supported_caps filtering_support; 846 u32 ethertype_init; 847 u16 max_filters; 848 u8 pad[2]; 849 }; 850 851 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vlan_filtering_caps); 852 853 /* This enum is used for the virtchnl_vlan_offload_caps structure to specify 854 * if the PF supports a different ethertype for stripping and insertion. 855 * 856 * VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION - The ethertype(s) specified 857 * for stripping affect the ethertype(s) specified for insertion and visa versa 858 * as well. If the VF tries to configure VLAN stripping via 859 * VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 with VIRTCHNL_VLAN_ETHERTYPE_8100 then 860 * that will be the ethertype for both stripping and insertion. 861 * 862 * VIRTCHNL_ETHERTYPE_MATCH_NOT_REQUIRED - The ethertype(s) specified for 863 * stripping do not affect the ethertype(s) specified for insertion and visa 864 * versa. 865 */ 866 enum virtchnl_vlan_ethertype_match { 867 VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION = 0, 868 VIRTCHNL_ETHERTYPE_MATCH_NOT_REQUIRED = 1, 869 }; 870 871 /* The PF populates these fields based on the supported VLAN offloads. If a 872 * field is VIRTCHNL_VLAN_UNSUPPORTED then it's not supported and the PF will 873 * reject any VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 or 874 * VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2 messages using the unsupported fields. 875 * 876 * Also, a VF is only allowed to toggle its VLAN offload setting if the 877 * VIRTCHNL_VLAN_TOGGLE_ALLOWED bit is set. 878 * 879 * The VF driver needs to be aware of how the tags are stripped by hardware and 880 * inserted by the VF driver based on the level of offload support. The PF will 881 * populate these fields based on where the VLAN tags are expected to be 882 * offloaded via the VIRTHCNL_VLAN_TAG_LOCATION_* bits. The VF will need to 883 * interpret these fields. See the definition of the 884 * VIRTCHNL_VLAN_TAG_LOCATION_* bits above the virtchnl_vlan_support 885 * enumeration. 886 */ 887 struct virtchnl_vlan_offload_caps { 888 struct virtchnl_vlan_supported_caps stripping_support; 889 struct virtchnl_vlan_supported_caps insertion_support; 890 u32 ethertype_init; 891 u8 ethertype_match; 892 u8 pad[3]; 893 }; 894 895 VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_vlan_offload_caps); 896 897 /* VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS 898 * VF sends this message to determine its VLAN capabilities. 899 * 900 * PF will mark which capabilities it supports based on hardware support and 901 * current configuration. For example, if a port VLAN is configured the PF will 902 * not allow outer VLAN filtering, stripping, or insertion to be configured so 903 * it will block these features from the VF. 904 * 905 * The VF will need to cross reference its capabilities with the PFs 906 * capabilities in the response message from the PF to determine the VLAN 907 * support. 908 */ 909 struct virtchnl_vlan_caps { 910 struct virtchnl_vlan_filtering_caps filtering; 911 struct virtchnl_vlan_offload_caps offloads; 912 }; 913 914 VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_vlan_caps); 915 916 struct virtchnl_vlan { 917 u16 tci; /* tci[15:13] = PCP and tci[11:0] = VID */ 918 u16 tci_mask; /* only valid if VIRTCHNL_VLAN_FILTER_MASK set in 919 * filtering caps 920 */ 921 u16 tpid; /* 0x8100, 0x88a8, etc. and only type(s) set in 922 * filtering caps. Note that tpid here does not refer to 923 * VIRTCHNL_VLAN_ETHERTYPE_*, but it refers to the 924 * actual 2-byte VLAN TPID 925 */ 926 u8 pad[2]; 927 }; 928 929 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_vlan); 930 931 struct virtchnl_vlan_filter { 932 struct virtchnl_vlan inner; 933 struct virtchnl_vlan outer; 934 u8 pad[16]; 935 }; 936 937 VIRTCHNL_CHECK_STRUCT_LEN(32, virtchnl_vlan_filter); 938 939 /* VIRTCHNL_OP_ADD_VLAN_V2 940 * VIRTCHNL_OP_DEL_VLAN_V2 941 * 942 * VF sends these messages to add/del one or more VLAN tag filters for Rx 943 * traffic. 944 * 945 * The PF attempts to add the filters and returns status. 946 * 947 * The VF should only ever attempt to add/del virtchnl_vlan_filter(s) using the 948 * supported fields negotiated via VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS. 949 */ 950 struct virtchnl_vlan_filter_list_v2 { 951 u16 vport_id; 952 u16 num_elements; 953 u8 pad[4]; 954 struct virtchnl_vlan_filter filters[1]; 955 }; 956 957 VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_vlan_filter_list_v2); 958 959 /* VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 960 * VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2 961 * VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2 962 * VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2 963 * 964 * VF sends this message to enable or disable VLAN stripping or insertion. It 965 * also needs to specify an ethertype. The VF knows which VLAN ethertypes are 966 * allowed and whether or not it's allowed to enable/disable the specific 967 * offload via the VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS message. The VF needs to 968 * parse the virtchnl_vlan_caps.offloads fields to determine which offload 969 * messages are allowed. 970 * 971 * For example, if the PF populates the virtchnl_vlan_caps.offloads in the 972 * following manner the VF will be allowed to enable and/or disable 0x8100 inner 973 * VLAN insertion and/or stripping via the opcodes listed above. Inner in this 974 * case means the outer most or single VLAN from the VF's perspective. This is 975 * because no outer offloads are supported. See the comments above the 976 * virtchnl_vlan_supported_caps structure for more details. 977 * 978 * virtchnl_vlan_caps.offloads.stripping_support.inner = 979 * VIRTCHNL_VLAN_TOGGLE | 980 * VIRTCHNL_VLAN_ETHERTYPE_8100; 981 * 982 * virtchnl_vlan_caps.offloads.insertion_support.inner = 983 * VIRTCHNL_VLAN_TOGGLE | 984 * VIRTCHNL_VLAN_ETHERTYPE_8100; 985 * 986 * In order to enable inner (again note that in this case inner is the outer 987 * most or single VLAN from the VF's perspective) VLAN stripping for 0x8100 988 * VLANs, the VF would populate the virtchnl_vlan_setting structure in the 989 * following manner and send the VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 message. 990 * 991 * virtchnl_vlan_setting.inner_ethertype_setting = 992 * VIRTCHNL_VLAN_ETHERTYPE_8100; 993 * 994 * virtchnl_vlan_setting.vport_id = vport_id or vsi_id assigned to the VF on 995 * initialization. 996 * 997 * The reason that VLAN TPID(s) are not being used for the 998 * outer_ethertype_setting and inner_ethertype_setting fields is because it's 999 * possible a device could support VLAN insertion and/or stripping offload on 1000 * multiple ethertypes concurrently, so this method allows a VF to request 1001 * multiple ethertypes in one message using the virtchnl_vlan_support 1002 * enumeration. 1003 * 1004 * For example, if the PF populates the virtchnl_vlan_caps.offloads in the 1005 * following manner the VF will be allowed to enable 0x8100 and 0x88a8 outer 1006 * VLAN insertion and stripping simultaneously. The 1007 * virtchnl_vlan_caps.offloads.ethertype_match field will also have to be 1008 * populated based on what the PF can support. 1009 * 1010 * virtchnl_vlan_caps.offloads.stripping_support.outer = 1011 * VIRTCHNL_VLAN_TOGGLE | 1012 * VIRTCHNL_VLAN_ETHERTYPE_8100 | 1013 * VIRTCHNL_VLAN_ETHERTYPE_88A8 | 1014 * VIRTCHNL_VLAN_ETHERTYPE_AND; 1015 * 1016 * virtchnl_vlan_caps.offloads.insertion_support.outer = 1017 * VIRTCHNL_VLAN_TOGGLE | 1018 * VIRTCHNL_VLAN_ETHERTYPE_8100 | 1019 * VIRTCHNL_VLAN_ETHERTYPE_88A8 | 1020 * VIRTCHNL_VLAN_ETHERTYPE_AND; 1021 * 1022 * In order to enable outer VLAN stripping for 0x8100 and 0x88a8 VLANs, the VF 1023 * would populate the virthcnl_vlan_offload_structure in the following manner 1024 * and send the VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 message. 1025 * 1026 * virtchnl_vlan_setting.outer_ethertype_setting = 1027 * VIRTHCNL_VLAN_ETHERTYPE_8100 | 1028 * VIRTHCNL_VLAN_ETHERTYPE_88A8; 1029 * 1030 * virtchnl_vlan_setting.vport_id = vport_id or vsi_id assigned to the VF on 1031 * initialization. 1032 * 1033 * There is also the case where a PF and the underlying hardware can support 1034 * VLAN offloads on multiple ethertypes, but not concurrently. For example, if 1035 * the PF populates the virtchnl_vlan_caps.offloads in the following manner the 1036 * VF will be allowed to enable and/or disable 0x8100 XOR 0x88a8 outer VLAN 1037 * offloads. The ethertypes must match for stripping and insertion. 1038 * 1039 * virtchnl_vlan_caps.offloads.stripping_support.outer = 1040 * VIRTCHNL_VLAN_TOGGLE | 1041 * VIRTCHNL_VLAN_ETHERTYPE_8100 | 1042 * VIRTCHNL_VLAN_ETHERTYPE_88A8 | 1043 * VIRTCHNL_VLAN_ETHERTYPE_XOR; 1044 * 1045 * virtchnl_vlan_caps.offloads.insertion_support.outer = 1046 * VIRTCHNL_VLAN_TOGGLE | 1047 * VIRTCHNL_VLAN_ETHERTYPE_8100 | 1048 * VIRTCHNL_VLAN_ETHERTYPE_88A8 | 1049 * VIRTCHNL_VLAN_ETHERTYPE_XOR; 1050 * 1051 * virtchnl_vlan_caps.offloads.ethertype_match = 1052 * VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION; 1053 * 1054 * In order to enable outer VLAN stripping for 0x88a8 VLANs, the VF would 1055 * populate the virtchnl_vlan_setting structure in the following manner and send 1056 * the VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2. Also, this will change the 1057 * ethertype for VLAN insertion if it's enabled. So, for completeness, a 1058 * VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2 with the same ethertype should be sent. 1059 * 1060 * virtchnl_vlan_setting.outer_ethertype_setting = VIRTHCNL_VLAN_ETHERTYPE_88A8; 1061 * 1062 * virtchnl_vlan_setting.vport_id = vport_id or vsi_id assigned to the VF on 1063 * initialization. 1064 * 1065 * VIRTCHNL_OP_ENABLE_VLAN_FILTERING_V2 1066 * VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2 1067 * 1068 * VF sends this message to enable or disable VLAN filtering. It also needs to 1069 * specify an ethertype. The VF knows which VLAN ethertypes are allowed and 1070 * whether or not it's allowed to enable/disable filtering via the 1071 * VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS message. The VF needs to 1072 * parse the virtchnl_vlan_caps.filtering fields to determine which, if any, 1073 * filtering messages are allowed. 1074 * 1075 * For example, if the PF populates the virtchnl_vlan_caps.filtering in the 1076 * following manner the VF will be allowed to enable/disable 0x8100 and 0x88a8 1077 * outer VLAN filtering together. Note, that the VIRTCHNL_VLAN_ETHERTYPE_AND 1078 * means that all filtering ethertypes will to be enabled and disabled together 1079 * regardless of the request from the VF. This means that the underlying 1080 * hardware only supports VLAN filtering for all VLAN the specified ethertypes 1081 * or none of them. 1082 * 1083 * virtchnl_vlan_caps.filtering.filtering_support.outer = 1084 * VIRTCHNL_VLAN_TOGGLE | 1085 * VIRTCHNL_VLAN_ETHERTYPE_8100 | 1086 * VIRTHCNL_VLAN_ETHERTYPE_88A8 | 1087 * VIRTCHNL_VLAN_ETHERTYPE_9100 | 1088 * VIRTCHNL_VLAN_ETHERTYPE_AND; 1089 * 1090 * In order to enable outer VLAN filtering for 0x88a8 and 0x8100 VLANs (0x9100 1091 * VLANs aren't supported by the VF driver), the VF would populate the 1092 * virtchnl_vlan_setting structure in the following manner and send the 1093 * VIRTCHNL_OP_ENABLE_VLAN_FILTERING_V2. The same message format would be used 1094 * to disable outer VLAN filtering for 0x88a8 and 0x8100 VLANs, but the 1095 * VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2 opcode is used. 1096 * 1097 * virtchnl_vlan_setting.outer_ethertype_setting = 1098 * VIRTCHNL_VLAN_ETHERTYPE_8100 | 1099 * VIRTCHNL_VLAN_ETHERTYPE_88A8; 1100 * 1101 */ 1102 struct virtchnl_vlan_setting { 1103 u32 outer_ethertype_setting; 1104 u32 inner_ethertype_setting; 1105 u16 vport_id; 1106 u8 pad[6]; 1107 }; 1108 1109 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vlan_setting); 1110 1111 /* VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE 1112 * VF sends VSI id and flags. 1113 * PF returns status code in retval. 1114 * Note: we assume that broadcast accept mode is always enabled. 1115 */ 1116 struct virtchnl_promisc_info { 1117 u16 vsi_id; 1118 u16 flags; 1119 }; 1120 1121 VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_promisc_info); 1122 1123 #define FLAG_VF_UNICAST_PROMISC 0x00000001 1124 #define FLAG_VF_MULTICAST_PROMISC 0x00000002 1125 1126 /* VIRTCHNL_OP_GET_STATS 1127 * VF sends this message to request stats for the selected VSI. VF uses 1128 * the virtchnl_queue_select struct to specify the VSI. The queue_id 1129 * field is ignored by the PF. 1130 * 1131 * PF replies with struct virtchnl_eth_stats in an external buffer. 1132 */ 1133 1134 struct virtchnl_eth_stats { 1135 u64 rx_bytes; /* received bytes */ 1136 u64 rx_unicast; /* received unicast pkts */ 1137 u64 rx_multicast; /* received multicast pkts */ 1138 u64 rx_broadcast; /* received broadcast pkts */ 1139 u64 rx_discards; 1140 u64 rx_unknown_protocol; 1141 u64 tx_bytes; /* transmitted bytes */ 1142 u64 tx_unicast; /* transmitted unicast pkts */ 1143 u64 tx_multicast; /* transmitted multicast pkts */ 1144 u64 tx_broadcast; /* transmitted broadcast pkts */ 1145 u64 tx_discards; 1146 u64 tx_errors; 1147 }; 1148 1149 /* VIRTCHNL_OP_CONFIG_RSS_KEY 1150 * VIRTCHNL_OP_CONFIG_RSS_LUT 1151 * VF sends these messages to configure RSS. Only supported if both PF 1152 * and VF drivers set the VIRTCHNL_VF_OFFLOAD_RSS_PF bit during 1153 * configuration negotiation. If this is the case, then the RSS fields in 1154 * the VF resource struct are valid. 1155 * Both the key and LUT are initialized to 0 by the PF, meaning that 1156 * RSS is effectively disabled until set up by the VF. 1157 */ 1158 struct virtchnl_rss_key { 1159 u16 vsi_id; 1160 u16 key_len; 1161 u8 key[1]; /* RSS hash key, packed bytes */ 1162 }; 1163 1164 VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_key); 1165 1166 struct virtchnl_rss_lut { 1167 u16 vsi_id; 1168 u16 lut_entries; 1169 u8 lut[1]; /* RSS lookup table */ 1170 }; 1171 1172 VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_lut); 1173 1174 /* VIRTCHNL_OP_GET_RSS_HENA_CAPS 1175 * VIRTCHNL_OP_SET_RSS_HENA 1176 * VF sends these messages to get and set the hash filter enable bits for RSS. 1177 * By default, the PF sets these to all possible traffic types that the 1178 * hardware supports. The VF can query this value if it wants to change the 1179 * traffic types that are hashed by the hardware. 1180 */ 1181 struct virtchnl_rss_hena { 1182 u64 hena; 1183 }; 1184 1185 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hena); 1186 1187 /* Type of RSS algorithm */ 1188 enum virtchnl_rss_algorithm { 1189 VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC = 0, 1190 VIRTCHNL_RSS_ALG_XOR_ASYMMETRIC = 1, 1191 VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC = 2, 1192 VIRTCHNL_RSS_ALG_XOR_SYMMETRIC = 3, 1193 }; 1194 1195 /* This is used by PF driver to enforce how many channels can be supported. 1196 * When ADQ_V2 capability is negotiated, it will allow 16 channels otherwise 1197 * PF driver will allow only max 4 channels 1198 */ 1199 #define VIRTCHNL_MAX_ADQ_CHANNELS 4 1200 #define VIRTCHNL_MAX_ADQ_V2_CHANNELS 16 1201 1202 /* VIRTCHNL_OP_ENABLE_CHANNELS 1203 * VIRTCHNL_OP_DISABLE_CHANNELS 1204 * VF sends these messages to enable or disable channels based on 1205 * the user specified queue count and queue offset for each traffic class. 1206 * This struct encompasses all the information that the PF needs from 1207 * VF to create a channel. 1208 */ 1209 struct virtchnl_channel_info { 1210 u16 count; /* number of queues in a channel */ 1211 u16 offset; /* queues in a channel start from 'offset' */ 1212 u32 pad; 1213 u64 max_tx_rate; 1214 }; 1215 1216 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_channel_info); 1217 1218 struct virtchnl_tc_info { 1219 u32 num_tc; 1220 u32 pad; 1221 struct virtchnl_channel_info list[1]; 1222 }; 1223 1224 VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_tc_info); 1225 1226 /* VIRTCHNL_ADD_CLOUD_FILTER 1227 * VIRTCHNL_DEL_CLOUD_FILTER 1228 * VF sends these messages to add or delete a cloud filter based on the 1229 * user specified match and action filters. These structures encompass 1230 * all the information that the PF needs from the VF to add/delete a 1231 * cloud filter. 1232 */ 1233 1234 struct virtchnl_l4_spec { 1235 u8 src_mac[VIRTCHNL_ETH_LENGTH_OF_ADDRESS]; 1236 u8 dst_mac[VIRTCHNL_ETH_LENGTH_OF_ADDRESS]; 1237 /* vlan_prio is part of this 16 bit field even from OS perspective 1238 * vlan_id:12 is actual vlan_id, then vlanid:bit14..12 is vlan_prio 1239 * in future, when decided to offload vlan_prio, pass that information 1240 * as part of the "vlan_id" field, Bit14..12 1241 */ 1242 __be16 vlan_id; 1243 __be16 pad; /* reserved for future use */ 1244 __be32 src_ip[4]; 1245 __be32 dst_ip[4]; 1246 __be16 src_port; 1247 __be16 dst_port; 1248 }; 1249 1250 VIRTCHNL_CHECK_STRUCT_LEN(52, virtchnl_l4_spec); 1251 1252 union virtchnl_flow_spec { 1253 struct virtchnl_l4_spec tcp_spec; 1254 u8 buffer[128]; /* reserved for future use */ 1255 }; 1256 1257 VIRTCHNL_CHECK_UNION_LEN(128, virtchnl_flow_spec); 1258 1259 enum virtchnl_action { 1260 /* action types */ 1261 VIRTCHNL_ACTION_DROP = 0, 1262 VIRTCHNL_ACTION_TC_REDIRECT, 1263 VIRTCHNL_ACTION_PASSTHRU, 1264 VIRTCHNL_ACTION_QUEUE, 1265 VIRTCHNL_ACTION_Q_REGION, 1266 VIRTCHNL_ACTION_MARK, 1267 VIRTCHNL_ACTION_COUNT, 1268 }; 1269 1270 enum virtchnl_flow_type { 1271 /* flow types */ 1272 VIRTCHNL_TCP_V4_FLOW = 0, 1273 VIRTCHNL_TCP_V6_FLOW, 1274 VIRTCHNL_UDP_V4_FLOW, 1275 VIRTCHNL_UDP_V6_FLOW, 1276 }; 1277 1278 struct virtchnl_filter { 1279 union virtchnl_flow_spec data; 1280 union virtchnl_flow_spec mask; 1281 1282 /* see enum virtchnl_flow_type */ 1283 s32 flow_type; 1284 1285 /* see enum virtchnl_action */ 1286 s32 action; 1287 u32 action_meta; 1288 u8 field_flags; 1289 }; 1290 1291 VIRTCHNL_CHECK_STRUCT_LEN(272, virtchnl_filter); 1292 1293 struct virtchnl_shaper_bw { 1294 /* Unit is Kbps */ 1295 u32 committed; 1296 u32 peak; 1297 }; 1298 1299 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_shaper_bw); 1300 1301 /* VIRTCHNL_OP_DCF_GET_VSI_MAP 1302 * VF sends this message to get VSI mapping table. 1303 * PF responds with an indirect message containing VF's 1304 * HW VSI IDs. 1305 * The index of vf_vsi array is the logical VF ID, the 1306 * value of vf_vsi array is the VF's HW VSI ID with its 1307 * valid configuration. 1308 */ 1309 struct virtchnl_dcf_vsi_map { 1310 u16 pf_vsi; /* PF's HW VSI ID */ 1311 u16 num_vfs; /* The actual number of VFs allocated */ 1312 #define VIRTCHNL_DCF_VF_VSI_ID_S 0 1313 #define VIRTCHNL_DCF_VF_VSI_ID_M (0xFFF << VIRTCHNL_DCF_VF_VSI_ID_S) 1314 #define VIRTCHNL_DCF_VF_VSI_VALID BIT(15) 1315 u16 vf_vsi[1]; 1316 }; 1317 1318 VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_dcf_vsi_map); 1319 1320 #define PKG_NAME_SIZE 32 1321 #define DSN_SIZE 8 1322 1323 struct pkg_version { 1324 u8 major; 1325 u8 minor; 1326 u8 update; 1327 u8 draft; 1328 }; 1329 1330 VIRTCHNL_CHECK_STRUCT_LEN(4, pkg_version); 1331 1332 struct virtchnl_pkg_info { 1333 struct pkg_version pkg_ver; 1334 u32 track_id; 1335 char pkg_name[PKG_NAME_SIZE]; 1336 u8 dsn[DSN_SIZE]; 1337 }; 1338 1339 VIRTCHNL_CHECK_STRUCT_LEN(48, virtchnl_pkg_info); 1340 1341 /* VIRTCHNL_OP_DCF_VLAN_OFFLOAD 1342 * DCF negotiates the VIRTCHNL_VF_OFFLOAD_VLAN_V2 capability firstly to get 1343 * the double VLAN configuration, then DCF sends this message to configure the 1344 * outer or inner VLAN offloads (insertion and strip) for the target VF. 1345 */ 1346 struct virtchnl_dcf_vlan_offload { 1347 u16 vf_id; 1348 u16 tpid; 1349 u16 vlan_flags; 1350 #define VIRTCHNL_DCF_VLAN_TYPE_S 0 1351 #define VIRTCHNL_DCF_VLAN_TYPE_M \ 1352 (0x1 << VIRTCHNL_DCF_VLAN_TYPE_S) 1353 #define VIRTCHNL_DCF_VLAN_TYPE_INNER 0x0 1354 #define VIRTCHNL_DCF_VLAN_TYPE_OUTER 0x1 1355 #define VIRTCHNL_DCF_VLAN_INSERT_MODE_S 1 1356 #define VIRTCHNL_DCF_VLAN_INSERT_MODE_M \ 1357 (0x7 << VIRTCHNL_DCF_VLAN_INSERT_MODE_S) 1358 #define VIRTCHNL_DCF_VLAN_INSERT_DISABLE 0x1 1359 #define VIRTCHNL_DCF_VLAN_INSERT_PORT_BASED 0x2 1360 #define VIRTCHNL_DCF_VLAN_INSERT_VIA_TX_DESC 0x3 1361 #define VIRTCHNL_DCF_VLAN_STRIP_MODE_S 4 1362 #define VIRTCHNL_DCF_VLAN_STRIP_MODE_M \ 1363 (0x7 << VIRTCHNL_DCF_VLAN_STRIP_MODE_S) 1364 #define VIRTCHNL_DCF_VLAN_STRIP_DISABLE 0x1 1365 #define VIRTCHNL_DCF_VLAN_STRIP_ONLY 0x2 1366 #define VIRTCHNL_DCF_VLAN_STRIP_INTO_RX_DESC 0x3 1367 u16 vlan_id; 1368 u16 pad[4]; 1369 }; 1370 1371 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_dcf_vlan_offload); 1372 1373 struct virtchnl_dcf_bw_cfg { 1374 u8 tc_num; 1375 #define VIRTCHNL_DCF_BW_CIR BIT(0) 1376 #define VIRTCHNL_DCF_BW_PIR BIT(1) 1377 u8 bw_type; 1378 u8 pad[2]; 1379 enum virtchnl_bw_limit_type type; 1380 union { 1381 struct virtchnl_shaper_bw shaper; 1382 u8 pad2[32]; 1383 }; 1384 }; 1385 1386 VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_dcf_bw_cfg); 1387 1388 /* VIRTCHNL_OP_DCF_CONFIG_BW 1389 * VF send this message to set the bandwidth configuration of each 1390 * TC with a specific vf id. The flag node_type is to indicate that 1391 * this message is to configure VSI node or TC node bandwidth. 1392 */ 1393 struct virtchnl_dcf_bw_cfg_list { 1394 u16 vf_id; 1395 u8 num_elem; 1396 #define VIRTCHNL_DCF_TARGET_TC_BW 0 1397 #define VIRTCHNL_DCF_TARGET_VF_BW 1 1398 u8 node_type; 1399 struct virtchnl_dcf_bw_cfg cfg[1]; 1400 }; 1401 1402 VIRTCHNL_CHECK_STRUCT_LEN(44, virtchnl_dcf_bw_cfg_list); 1403 1404 struct virtchnl_supported_rxdids { 1405 /* see enum virtchnl_rx_desc_id_bitmasks */ 1406 u64 supported_rxdids; 1407 }; 1408 1409 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_supported_rxdids); 1410 1411 /* VIRTCHNL_OP_EVENT 1412 * PF sends this message to inform the VF driver of events that may affect it. 1413 * No direct response is expected from the VF, though it may generate other 1414 * messages in response to this one. 1415 */ 1416 enum virtchnl_event_codes { 1417 VIRTCHNL_EVENT_UNKNOWN = 0, 1418 VIRTCHNL_EVENT_LINK_CHANGE, 1419 VIRTCHNL_EVENT_RESET_IMPENDING, 1420 VIRTCHNL_EVENT_PF_DRIVER_CLOSE, 1421 VIRTCHNL_EVENT_DCF_VSI_MAP_UPDATE, 1422 }; 1423 1424 #define PF_EVENT_SEVERITY_INFO 0 1425 #define PF_EVENT_SEVERITY_ATTENTION 1 1426 #define PF_EVENT_SEVERITY_ACTION_REQUIRED 2 1427 #define PF_EVENT_SEVERITY_CERTAIN_DOOM 255 1428 1429 struct virtchnl_pf_event { 1430 /* see enum virtchnl_event_codes */ 1431 s32 event; 1432 union { 1433 /* If the PF driver does not support the new speed reporting 1434 * capabilities then use link_event else use link_event_adv to 1435 * get the speed and link information. The ability to understand 1436 * new speeds is indicated by setting the capability flag 1437 * VIRTCHNL_VF_CAP_ADV_LINK_SPEED in vf_cap_flags parameter 1438 * in virtchnl_vf_resource struct and can be used to determine 1439 * which link event struct to use below. 1440 */ 1441 struct { 1442 enum virtchnl_link_speed link_speed; 1443 u8 link_status; 1444 } link_event; 1445 struct { 1446 /* link_speed provided in Mbps */ 1447 u32 link_speed; 1448 u8 link_status; 1449 } link_event_adv; 1450 struct { 1451 u16 vf_id; 1452 u16 vsi_id; 1453 } vf_vsi_map; 1454 } event_data; 1455 1456 int severity; 1457 }; 1458 1459 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_pf_event); 1460 1461 1462 /* VF reset states - these are written into the RSTAT register: 1463 * VFGEN_RSTAT on the VF 1464 * When the PF initiates a reset, it writes 0 1465 * When the reset is complete, it writes 1 1466 * When the PF detects that the VF has recovered, it writes 2 1467 * VF checks this register periodically to determine if a reset has occurred, 1468 * then polls it to know when the reset is complete. 1469 * If either the PF or VF reads the register while the hardware 1470 * is in a reset state, it will return DEADBEEF, which, when masked 1471 * will result in 3. 1472 */ 1473 enum virtchnl_vfr_states { 1474 VIRTCHNL_VFR_INPROGRESS = 0, 1475 VIRTCHNL_VFR_COMPLETED, 1476 VIRTCHNL_VFR_VFACTIVE, 1477 }; 1478 1479 #define VIRTCHNL_MAX_NUM_PROTO_HDRS 32 1480 #define PROTO_HDR_SHIFT 5 1481 #define PROTO_HDR_FIELD_START(proto_hdr_type) \ 1482 (proto_hdr_type << PROTO_HDR_SHIFT) 1483 #define PROTO_HDR_FIELD_MASK ((1UL << PROTO_HDR_SHIFT) - 1) 1484 1485 /* VF use these macros to configure each protocol header. 1486 * Specify which protocol headers and protocol header fields base on 1487 * virtchnl_proto_hdr_type and virtchnl_proto_hdr_field. 1488 * @param hdr: a struct of virtchnl_proto_hdr 1489 * @param hdr_type: ETH/IPV4/TCP, etc 1490 * @param field: SRC/DST/TEID/SPI, etc 1491 */ 1492 #define VIRTCHNL_ADD_PROTO_HDR_FIELD(hdr, field) \ 1493 ((hdr)->field_selector |= BIT((field) & PROTO_HDR_FIELD_MASK)) 1494 #define VIRTCHNL_DEL_PROTO_HDR_FIELD(hdr, field) \ 1495 ((hdr)->field_selector &= ~BIT((field) & PROTO_HDR_FIELD_MASK)) 1496 #define VIRTCHNL_TEST_PROTO_HDR_FIELD(hdr, val) \ 1497 ((hdr)->field_selector & BIT((val) & PROTO_HDR_FIELD_MASK)) 1498 #define VIRTCHNL_GET_PROTO_HDR_FIELD(hdr) ((hdr)->field_selector) 1499 1500 #define VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, hdr_type, field) \ 1501 (VIRTCHNL_ADD_PROTO_HDR_FIELD(hdr, \ 1502 VIRTCHNL_PROTO_HDR_ ## hdr_type ## _ ## field)) 1503 #define VIRTCHNL_DEL_PROTO_HDR_FIELD_BIT(hdr, hdr_type, field) \ 1504 (VIRTCHNL_DEL_PROTO_HDR_FIELD(hdr, \ 1505 VIRTCHNL_PROTO_HDR_ ## hdr_type ## _ ## field)) 1506 1507 #define VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, hdr_type) \ 1508 ((hdr)->type = VIRTCHNL_PROTO_HDR_ ## hdr_type) 1509 #define VIRTCHNL_GET_PROTO_HDR_TYPE(hdr) \ 1510 (((hdr)->type) >> PROTO_HDR_SHIFT) 1511 #define VIRTCHNL_TEST_PROTO_HDR_TYPE(hdr, val) \ 1512 ((hdr)->type == ((val) >> PROTO_HDR_SHIFT)) 1513 #define VIRTCHNL_TEST_PROTO_HDR(hdr, val) \ 1514 (VIRTCHNL_TEST_PROTO_HDR_TYPE(hdr, val) && \ 1515 VIRTCHNL_TEST_PROTO_HDR_FIELD(hdr, val)) 1516 1517 /* Protocol header type within a packet segment. A segment consists of one or 1518 * more protocol headers that make up a logical group of protocol headers. Each 1519 * logical group of protocol headers encapsulates or is encapsulated using/by 1520 * tunneling or encapsulation protocols for network virtualization. 1521 */ 1522 enum virtchnl_proto_hdr_type { 1523 VIRTCHNL_PROTO_HDR_NONE, 1524 VIRTCHNL_PROTO_HDR_ETH, 1525 VIRTCHNL_PROTO_HDR_S_VLAN, 1526 VIRTCHNL_PROTO_HDR_C_VLAN, 1527 VIRTCHNL_PROTO_HDR_IPV4, 1528 VIRTCHNL_PROTO_HDR_IPV6, 1529 VIRTCHNL_PROTO_HDR_TCP, 1530 VIRTCHNL_PROTO_HDR_UDP, 1531 VIRTCHNL_PROTO_HDR_SCTP, 1532 VIRTCHNL_PROTO_HDR_GTPU_IP, 1533 VIRTCHNL_PROTO_HDR_GTPU_EH, 1534 VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN, 1535 VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP, 1536 VIRTCHNL_PROTO_HDR_PPPOE, 1537 VIRTCHNL_PROTO_HDR_L2TPV3, 1538 VIRTCHNL_PROTO_HDR_ESP, 1539 VIRTCHNL_PROTO_HDR_AH, 1540 VIRTCHNL_PROTO_HDR_PFCP, 1541 VIRTCHNL_PROTO_HDR_GTPC, 1542 VIRTCHNL_PROTO_HDR_ECPRI, 1543 VIRTCHNL_PROTO_HDR_L2TPV2, 1544 VIRTCHNL_PROTO_HDR_PPP, 1545 /* IPv4 and IPv6 Fragment header types are only associated to 1546 * VIRTCHNL_PROTO_HDR_IPV4 and VIRTCHNL_PROTO_HDR_IPV6 respectively, 1547 * cannot be used independently. 1548 */ 1549 VIRTCHNL_PROTO_HDR_IPV4_FRAG, 1550 VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG, 1551 VIRTCHNL_PROTO_HDR_GRE, 1552 }; 1553 1554 /* Protocol header field within a protocol header. */ 1555 enum virtchnl_proto_hdr_field { 1556 /* ETHER */ 1557 VIRTCHNL_PROTO_HDR_ETH_SRC = 1558 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_ETH), 1559 VIRTCHNL_PROTO_HDR_ETH_DST, 1560 VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE, 1561 /* S-VLAN */ 1562 VIRTCHNL_PROTO_HDR_S_VLAN_ID = 1563 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_S_VLAN), 1564 /* C-VLAN */ 1565 VIRTCHNL_PROTO_HDR_C_VLAN_ID = 1566 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_C_VLAN), 1567 /* IPV4 */ 1568 VIRTCHNL_PROTO_HDR_IPV4_SRC = 1569 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV4), 1570 VIRTCHNL_PROTO_HDR_IPV4_DST, 1571 VIRTCHNL_PROTO_HDR_IPV4_DSCP, 1572 VIRTCHNL_PROTO_HDR_IPV4_TTL, 1573 VIRTCHNL_PROTO_HDR_IPV4_PROT, 1574 /* IPV6 */ 1575 VIRTCHNL_PROTO_HDR_IPV6_SRC = 1576 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV6), 1577 VIRTCHNL_PROTO_HDR_IPV6_DST, 1578 VIRTCHNL_PROTO_HDR_IPV6_TC, 1579 VIRTCHNL_PROTO_HDR_IPV6_HOP_LIMIT, 1580 VIRTCHNL_PROTO_HDR_IPV6_PROT, 1581 /* IPV6 Prefix */ 1582 VIRTCHNL_PROTO_HDR_IPV6_PREFIX32_SRC, 1583 VIRTCHNL_PROTO_HDR_IPV6_PREFIX32_DST, 1584 VIRTCHNL_PROTO_HDR_IPV6_PREFIX40_SRC, 1585 VIRTCHNL_PROTO_HDR_IPV6_PREFIX40_DST, 1586 VIRTCHNL_PROTO_HDR_IPV6_PREFIX48_SRC, 1587 VIRTCHNL_PROTO_HDR_IPV6_PREFIX48_DST, 1588 VIRTCHNL_PROTO_HDR_IPV6_PREFIX56_SRC, 1589 VIRTCHNL_PROTO_HDR_IPV6_PREFIX56_DST, 1590 VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_SRC, 1591 VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_DST, 1592 VIRTCHNL_PROTO_HDR_IPV6_PREFIX96_SRC, 1593 VIRTCHNL_PROTO_HDR_IPV6_PREFIX96_DST, 1594 /* TCP */ 1595 VIRTCHNL_PROTO_HDR_TCP_SRC_PORT = 1596 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_TCP), 1597 VIRTCHNL_PROTO_HDR_TCP_DST_PORT, 1598 VIRTCHNL_PROTO_HDR_TCP_CHKSUM, 1599 /* UDP */ 1600 VIRTCHNL_PROTO_HDR_UDP_SRC_PORT = 1601 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_UDP), 1602 VIRTCHNL_PROTO_HDR_UDP_DST_PORT, 1603 VIRTCHNL_PROTO_HDR_UDP_CHKSUM, 1604 /* SCTP */ 1605 VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT = 1606 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_SCTP), 1607 VIRTCHNL_PROTO_HDR_SCTP_DST_PORT, 1608 VIRTCHNL_PROTO_HDR_SCTP_CHKSUM, 1609 /* GTPU_IP */ 1610 VIRTCHNL_PROTO_HDR_GTPU_IP_TEID = 1611 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_IP), 1612 /* GTPU_EH */ 1613 VIRTCHNL_PROTO_HDR_GTPU_EH_PDU = 1614 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_EH), 1615 VIRTCHNL_PROTO_HDR_GTPU_EH_QFI, 1616 /* PPPOE */ 1617 VIRTCHNL_PROTO_HDR_PPPOE_SESS_ID = 1618 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_PPPOE), 1619 /* L2TPV3 */ 1620 VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID = 1621 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_L2TPV3), 1622 /* ESP */ 1623 VIRTCHNL_PROTO_HDR_ESP_SPI = 1624 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_ESP), 1625 /* AH */ 1626 VIRTCHNL_PROTO_HDR_AH_SPI = 1627 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_AH), 1628 /* PFCP */ 1629 VIRTCHNL_PROTO_HDR_PFCP_S_FIELD = 1630 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_PFCP), 1631 VIRTCHNL_PROTO_HDR_PFCP_SEID, 1632 /* GTPC */ 1633 VIRTCHNL_PROTO_HDR_GTPC_TEID = 1634 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPC), 1635 /* ECPRI */ 1636 VIRTCHNL_PROTO_HDR_ECPRI_MSG_TYPE = 1637 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_ECPRI), 1638 VIRTCHNL_PROTO_HDR_ECPRI_PC_RTC_ID, 1639 /* IPv4 Dummy Fragment */ 1640 VIRTCHNL_PROTO_HDR_IPV4_FRAG_PKID = 1641 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV4_FRAG), 1642 /* IPv6 Extension Fragment */ 1643 VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG_PKID = 1644 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG), 1645 /* GTPU_DWN/UP */ 1646 VIRTCHNL_PROTO_HDR_GTPU_DWN_QFI = 1647 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN), 1648 VIRTCHNL_PROTO_HDR_GTPU_UP_QFI = 1649 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP), 1650 }; 1651 1652 struct virtchnl_proto_hdr { 1653 /* see enum virtchnl_proto_hdr_type */ 1654 s32 type; 1655 u32 field_selector; /* a bit mask to select field for header type */ 1656 u8 buffer[64]; 1657 /** 1658 * binary buffer in network order for specific header type. 1659 * For example, if type = VIRTCHNL_PROTO_HDR_IPV4, a IPv4 1660 * header is expected to be copied into the buffer. 1661 */ 1662 }; 1663 1664 VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_proto_hdr); 1665 1666 struct virtchnl_proto_hdrs { 1667 u8 tunnel_level; 1668 /** 1669 * specify where protocol header start from. 1670 * 0 - from the outer layer 1671 * 1 - from the first inner layer 1672 * 2 - from the second inner layer 1673 * .... 1674 **/ 1675 int count; /* the proto layers must < VIRTCHNL_MAX_NUM_PROTO_HDRS */ 1676 struct virtchnl_proto_hdr proto_hdr[VIRTCHNL_MAX_NUM_PROTO_HDRS]; 1677 }; 1678 1679 VIRTCHNL_CHECK_STRUCT_LEN(2312, virtchnl_proto_hdrs); 1680 1681 struct virtchnl_rss_cfg { 1682 struct virtchnl_proto_hdrs proto_hdrs; /* protocol headers */ 1683 1684 /* see enum virtchnl_rss_algorithm; rss algorithm type */ 1685 s32 rss_algorithm; 1686 u8 reserved[128]; /* reserve for future */ 1687 }; 1688 1689 VIRTCHNL_CHECK_STRUCT_LEN(2444, virtchnl_rss_cfg); 1690 1691 /* action configuration for FDIR */ 1692 struct virtchnl_filter_action { 1693 /* see enum virtchnl_action type */ 1694 s32 type; 1695 union { 1696 /* used for queue and qgroup action */ 1697 struct { 1698 u16 index; 1699 u8 region; 1700 } queue; 1701 /* used for count action */ 1702 struct { 1703 /* share counter ID with other flow rules */ 1704 u8 shared; 1705 u32 id; /* counter ID */ 1706 } count; 1707 /* used for mark action */ 1708 u32 mark_id; 1709 u8 reserve[32]; 1710 } act_conf; 1711 }; 1712 1713 VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_filter_action); 1714 1715 #define VIRTCHNL_MAX_NUM_ACTIONS 8 1716 1717 struct virtchnl_filter_action_set { 1718 /* action number must be less then VIRTCHNL_MAX_NUM_ACTIONS */ 1719 int count; 1720 struct virtchnl_filter_action actions[VIRTCHNL_MAX_NUM_ACTIONS]; 1721 }; 1722 1723 VIRTCHNL_CHECK_STRUCT_LEN(292, virtchnl_filter_action_set); 1724 1725 /* pattern and action for FDIR rule */ 1726 struct virtchnl_fdir_rule { 1727 struct virtchnl_proto_hdrs proto_hdrs; 1728 struct virtchnl_filter_action_set action_set; 1729 }; 1730 1731 VIRTCHNL_CHECK_STRUCT_LEN(2604, virtchnl_fdir_rule); 1732 1733 /* Status returned to VF after VF requests FDIR commands 1734 * VIRTCHNL_FDIR_SUCCESS 1735 * VF FDIR related request is successfully done by PF 1736 * The request can be OP_ADD/DEL/QUERY_FDIR_FILTER. 1737 * 1738 * VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE 1739 * OP_ADD_FDIR_FILTER request is failed due to no Hardware resource. 1740 * 1741 * VIRTCHNL_FDIR_FAILURE_RULE_EXIST 1742 * OP_ADD_FDIR_FILTER request is failed due to the rule is already existed. 1743 * 1744 * VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT 1745 * OP_ADD_FDIR_FILTER request is failed due to conflict with existing rule. 1746 * 1747 * VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST 1748 * OP_DEL_FDIR_FILTER request is failed due to this rule doesn't exist. 1749 * 1750 * VIRTCHNL_FDIR_FAILURE_RULE_INVALID 1751 * OP_ADD_FDIR_FILTER request is failed due to parameters validation 1752 * or HW doesn't support. 1753 * 1754 * VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT 1755 * OP_ADD/DEL_FDIR_FILTER request is failed due to timing out 1756 * for programming. 1757 * 1758 * VIRTCHNL_FDIR_FAILURE_QUERY_INVALID 1759 * OP_QUERY_FDIR_FILTER request is failed due to parameters validation, 1760 * for example, VF query counter of a rule who has no counter action. 1761 */ 1762 enum virtchnl_fdir_prgm_status { 1763 VIRTCHNL_FDIR_SUCCESS = 0, 1764 VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE, 1765 VIRTCHNL_FDIR_FAILURE_RULE_EXIST, 1766 VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT, 1767 VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST, 1768 VIRTCHNL_FDIR_FAILURE_RULE_INVALID, 1769 VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT, 1770 VIRTCHNL_FDIR_FAILURE_QUERY_INVALID, 1771 }; 1772 1773 /* VIRTCHNL_OP_ADD_FDIR_FILTER 1774 * VF sends this request to PF by filling out vsi_id, 1775 * validate_only and rule_cfg. PF will return flow_id 1776 * if the request is successfully done and return add_status to VF. 1777 */ 1778 struct virtchnl_fdir_add { 1779 u16 vsi_id; /* INPUT */ 1780 /* 1781 * 1 for validating a fdir rule, 0 for creating a fdir rule. 1782 * Validate and create share one ops: VIRTCHNL_OP_ADD_FDIR_FILTER. 1783 */ 1784 u16 validate_only; /* INPUT */ 1785 u32 flow_id; /* OUTPUT */ 1786 struct virtchnl_fdir_rule rule_cfg; /* INPUT */ 1787 1788 /* see enum virtchnl_fdir_prgm_status; OUTPUT */ 1789 s32 status; 1790 }; 1791 1792 VIRTCHNL_CHECK_STRUCT_LEN(2616, virtchnl_fdir_add); 1793 1794 /* VIRTCHNL_OP_DEL_FDIR_FILTER 1795 * VF sends this request to PF by filling out vsi_id 1796 * and flow_id. PF will return del_status to VF. 1797 */ 1798 struct virtchnl_fdir_del { 1799 u16 vsi_id; /* INPUT */ 1800 u16 pad; 1801 u32 flow_id; /* INPUT */ 1802 1803 /* see enum virtchnl_fdir_prgm_status; OUTPUT */ 1804 s32 status; 1805 }; 1806 1807 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_fdir_del); 1808 1809 /* VIRTCHNL_OP_GET_QOS_CAPS 1810 * VF sends this message to get its QoS Caps, such as 1811 * TC number, Arbiter and Bandwidth. 1812 */ 1813 struct virtchnl_qos_cap_elem { 1814 u8 tc_num; 1815 u8 tc_prio; 1816 #define VIRTCHNL_ABITER_STRICT 0 1817 #define VIRTCHNL_ABITER_ETS 2 1818 u8 arbiter; 1819 #define VIRTCHNL_STRICT_WEIGHT 1 1820 u8 weight; 1821 enum virtchnl_bw_limit_type type; 1822 union { 1823 struct virtchnl_shaper_bw shaper; 1824 u8 pad2[32]; 1825 }; 1826 }; 1827 1828 VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_qos_cap_elem); 1829 1830 struct virtchnl_qos_cap_list { 1831 u16 vsi_id; 1832 u16 num_elem; 1833 struct virtchnl_qos_cap_elem cap[1]; 1834 }; 1835 1836 VIRTCHNL_CHECK_STRUCT_LEN(44, virtchnl_qos_cap_list); 1837 1838 /* VIRTCHNL_OP_CONFIG_QUEUE_TC_MAP 1839 * VF sends message virtchnl_queue_tc_mapping to set queue to tc 1840 * mapping for all the Tx and Rx queues with a specified VSI, and 1841 * would get response about bitmap of valid user priorities 1842 * associated with queues. 1843 */ 1844 struct virtchnl_queue_tc_mapping { 1845 u16 vsi_id; 1846 u16 num_tc; 1847 u16 num_queue_pairs; 1848 u8 pad[2]; 1849 union { 1850 struct { 1851 u16 start_queue_id; 1852 u16 queue_count; 1853 } req; 1854 struct { 1855 #define VIRTCHNL_USER_PRIO_TYPE_UP 0 1856 #define VIRTCHNL_USER_PRIO_TYPE_DSCP 1 1857 u16 prio_type; 1858 u16 valid_prio_bitmap; 1859 } resp; 1860 } tc[1]; 1861 }; 1862 1863 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_tc_mapping); 1864 1865 1866 /* TX and RX queue types are valid in legacy as well as split queue models. 1867 * With Split Queue model, 2 additional types are introduced - TX_COMPLETION 1868 * and RX_BUFFER. In split queue model, RX corresponds to the queue where HW 1869 * posts completions. 1870 */ 1871 enum virtchnl_queue_type { 1872 VIRTCHNL_QUEUE_TYPE_TX = 0, 1873 VIRTCHNL_QUEUE_TYPE_RX = 1, 1874 VIRTCHNL_QUEUE_TYPE_TX_COMPLETION = 2, 1875 VIRTCHNL_QUEUE_TYPE_RX_BUFFER = 3, 1876 VIRTCHNL_QUEUE_TYPE_CONFIG_TX = 4, 1877 VIRTCHNL_QUEUE_TYPE_CONFIG_RX = 5 1878 }; 1879 1880 1881 /* structure to specify a chunk of contiguous queues */ 1882 struct virtchnl_queue_chunk { 1883 /* see enum virtchnl_queue_type */ 1884 s32 type; 1885 u16 start_queue_id; 1886 u16 num_queues; 1887 }; 1888 1889 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_queue_chunk); 1890 1891 /* structure to specify several chunks of contiguous queues */ 1892 struct virtchnl_queue_chunks { 1893 u16 num_chunks; 1894 u16 rsvd; 1895 struct virtchnl_queue_chunk chunks[1]; 1896 }; 1897 1898 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_chunks); 1899 1900 1901 /* VIRTCHNL_OP_ENABLE_QUEUES_V2 1902 * VIRTCHNL_OP_DISABLE_QUEUES_V2 1903 * VIRTCHNL_OP_DEL_QUEUES 1904 * 1905 * If VIRTCHNL_CAP_EXT_FEATURES was negotiated in VIRTCHNL_OP_GET_VF_RESOURCES 1906 * then all of these ops are available. 1907 * 1908 * If VIRTCHNL_VF_LARGE_NUM_QPAIRS was negotiated in VIRTCHNL_OP_GET_VF_RESOURCES 1909 * then VIRTCHNL_OP_ENABLE_QUEUES_V2 and VIRTCHNL_OP_DISABLE_QUEUES_V2 are 1910 * available. 1911 * 1912 * PF sends these messages to enable, disable or delete queues specified in 1913 * chunks. PF sends virtchnl_del_ena_dis_queues struct to specify the queues 1914 * to be enabled/disabled/deleted. Also applicable to single queue RX or 1915 * TX. CP performs requested action and returns status. 1916 */ 1917 struct virtchnl_del_ena_dis_queues { 1918 u16 vport_id; 1919 u16 pad; 1920 struct virtchnl_queue_chunks chunks; 1921 }; 1922 1923 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_del_ena_dis_queues); 1924 1925 /* Virtchannel interrupt throttling rate index */ 1926 enum virtchnl_itr_idx { 1927 VIRTCHNL_ITR_IDX_0 = 0, 1928 VIRTCHNL_ITR_IDX_1 = 1, 1929 VIRTCHNL_ITR_IDX_NO_ITR = 3, 1930 }; 1931 1932 /* Queue to vector mapping */ 1933 struct virtchnl_queue_vector { 1934 u16 queue_id; 1935 u16 vector_id; 1936 u8 pad[4]; 1937 1938 /* see enum virtchnl_itr_idx */ 1939 s32 itr_idx; 1940 1941 /* see enum virtchnl_queue_type */ 1942 s32 queue_type; 1943 }; 1944 1945 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_queue_vector); 1946 1947 /* VIRTCHNL_OP_MAP_QUEUE_VECTOR 1948 * VIRTCHNL_OP_UNMAP_QUEUE_VECTOR 1949 * 1950 * If VIRTCHNL_CAP_EXT_FEATURES was negotiated in VIRTCHNL_OP_GET_VF_RESOURCES 1951 * then all of these ops are available. 1952 * 1953 * If VIRTCHNL_VF_LARGE_NUM_QPAIRS was negotiated in VIRTCHNL_OP_GET_VF_RESOURCES 1954 * then only VIRTCHNL_OP_MAP_QUEUE_VECTOR is available. 1955 * 1956 * PF sends this message to map or unmap queues to vectors and ITR index 1957 * registers. External data buffer contains virtchnl_queue_vector_maps structure 1958 * that contains num_qv_maps of virtchnl_queue_vector structures. 1959 * CP maps the requested queue vector maps after validating the queue and vector 1960 * ids and returns a status code. 1961 */ 1962 struct virtchnl_queue_vector_maps { 1963 u16 vport_id; 1964 u16 num_qv_maps; 1965 u8 pad[4]; 1966 struct virtchnl_queue_vector qv_maps[1]; 1967 }; 1968 1969 VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_queue_vector_maps); 1970 1971 1972 /* Since VF messages are limited by u16 size, precalculate the maximum possible 1973 * values of nested elements in virtchnl structures that virtual channel can 1974 * possibly handle in a single message. 1975 */ 1976 enum virtchnl_vector_limits { 1977 VIRTCHNL_OP_CONFIG_VSI_QUEUES_MAX = 1978 ((u16)(~0) - sizeof(struct virtchnl_vsi_queue_config_info)) / 1979 sizeof(struct virtchnl_queue_pair_info), 1980 1981 VIRTCHNL_OP_CONFIG_IRQ_MAP_MAX = 1982 ((u16)(~0) - sizeof(struct virtchnl_irq_map_info)) / 1983 sizeof(struct virtchnl_vector_map), 1984 1985 VIRTCHNL_OP_ADD_DEL_ETH_ADDR_MAX = 1986 ((u16)(~0) - sizeof(struct virtchnl_ether_addr_list)) / 1987 sizeof(struct virtchnl_ether_addr), 1988 1989 VIRTCHNL_OP_ADD_DEL_VLAN_MAX = 1990 ((u16)(~0) - sizeof(struct virtchnl_vlan_filter_list)) / 1991 sizeof(u16), 1992 1993 1994 VIRTCHNL_OP_ENABLE_CHANNELS_MAX = 1995 ((u16)(~0) - sizeof(struct virtchnl_tc_info)) / 1996 sizeof(struct virtchnl_channel_info), 1997 1998 VIRTCHNL_OP_ENABLE_DISABLE_DEL_QUEUES_V2_MAX = 1999 ((u16)(~0) - sizeof(struct virtchnl_del_ena_dis_queues)) / 2000 sizeof(struct virtchnl_queue_chunk), 2001 2002 VIRTCHNL_OP_MAP_UNMAP_QUEUE_VECTOR_MAX = 2003 ((u16)(~0) - sizeof(struct virtchnl_queue_vector_maps)) / 2004 sizeof(struct virtchnl_queue_vector), 2005 2006 VIRTCHNL_OP_ADD_DEL_VLAN_V2_MAX = 2007 ((u16)(~0) - sizeof(struct virtchnl_vlan_filter_list_v2)) / 2008 sizeof(struct virtchnl_vlan_filter), 2009 }; 2010 2011 /** 2012 * virtchnl_vc_validate_vf_msg 2013 * @ver: Virtchnl version info 2014 * @v_opcode: Opcode for the message 2015 * @msg: pointer to the msg buffer 2016 * @msglen: msg length 2017 * 2018 * validate msg format against struct for each opcode 2019 */ 2020 static inline int 2021 virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode, 2022 u8 *msg, u16 msglen) 2023 { 2024 bool err_msg_format = false; 2025 u32 valid_len = 0; 2026 2027 /* Validate message length. */ 2028 switch (v_opcode) { 2029 case VIRTCHNL_OP_VERSION: 2030 valid_len = sizeof(struct virtchnl_version_info); 2031 break; 2032 case VIRTCHNL_OP_RESET_VF: 2033 break; 2034 case VIRTCHNL_OP_GET_VF_RESOURCES: 2035 if (VF_IS_V11(ver)) 2036 valid_len = sizeof(u32); 2037 break; 2038 case VIRTCHNL_OP_CONFIG_TX_QUEUE: 2039 valid_len = sizeof(struct virtchnl_txq_info); 2040 break; 2041 case VIRTCHNL_OP_CONFIG_RX_QUEUE: 2042 valid_len = sizeof(struct virtchnl_rxq_info); 2043 break; 2044 case VIRTCHNL_OP_CONFIG_VSI_QUEUES: 2045 valid_len = sizeof(struct virtchnl_vsi_queue_config_info); 2046 if (msglen >= valid_len) { 2047 struct virtchnl_vsi_queue_config_info *vqc = 2048 (struct virtchnl_vsi_queue_config_info *)msg; 2049 2050 if (vqc->num_queue_pairs == 0 || vqc->num_queue_pairs > 2051 VIRTCHNL_OP_CONFIG_VSI_QUEUES_MAX) { 2052 err_msg_format = true; 2053 break; 2054 } 2055 2056 valid_len += (vqc->num_queue_pairs * 2057 sizeof(struct 2058 virtchnl_queue_pair_info)); 2059 } 2060 break; 2061 case VIRTCHNL_OP_CONFIG_IRQ_MAP: 2062 valid_len = sizeof(struct virtchnl_irq_map_info); 2063 if (msglen >= valid_len) { 2064 struct virtchnl_irq_map_info *vimi = 2065 (struct virtchnl_irq_map_info *)msg; 2066 2067 if (vimi->num_vectors == 0 || vimi->num_vectors > 2068 VIRTCHNL_OP_CONFIG_IRQ_MAP_MAX) { 2069 err_msg_format = true; 2070 break; 2071 } 2072 2073 valid_len += (vimi->num_vectors * 2074 sizeof(struct virtchnl_vector_map)); 2075 } 2076 break; 2077 case VIRTCHNL_OP_ENABLE_QUEUES: 2078 case VIRTCHNL_OP_DISABLE_QUEUES: 2079 valid_len = sizeof(struct virtchnl_queue_select); 2080 break; 2081 case VIRTCHNL_OP_GET_MAX_RSS_QREGION: 2082 break; 2083 case VIRTCHNL_OP_ADD_ETH_ADDR: 2084 case VIRTCHNL_OP_DEL_ETH_ADDR: 2085 valid_len = sizeof(struct virtchnl_ether_addr_list); 2086 if (msglen >= valid_len) { 2087 struct virtchnl_ether_addr_list *veal = 2088 (struct virtchnl_ether_addr_list *)msg; 2089 2090 if (veal->num_elements == 0 || veal->num_elements > 2091 VIRTCHNL_OP_ADD_DEL_ETH_ADDR_MAX) { 2092 err_msg_format = true; 2093 break; 2094 } 2095 2096 valid_len += veal->num_elements * 2097 sizeof(struct virtchnl_ether_addr); 2098 } 2099 break; 2100 case VIRTCHNL_OP_ADD_VLAN: 2101 case VIRTCHNL_OP_DEL_VLAN: 2102 valid_len = sizeof(struct virtchnl_vlan_filter_list); 2103 if (msglen >= valid_len) { 2104 struct virtchnl_vlan_filter_list *vfl = 2105 (struct virtchnl_vlan_filter_list *)msg; 2106 2107 if (vfl->num_elements == 0 || vfl->num_elements > 2108 VIRTCHNL_OP_ADD_DEL_VLAN_MAX) { 2109 err_msg_format = true; 2110 break; 2111 } 2112 2113 valid_len += vfl->num_elements * sizeof(u16); 2114 } 2115 break; 2116 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: 2117 valid_len = sizeof(struct virtchnl_promisc_info); 2118 break; 2119 case VIRTCHNL_OP_GET_STATS: 2120 valid_len = sizeof(struct virtchnl_queue_select); 2121 break; 2122 case VIRTCHNL_OP_CONFIG_RSS_KEY: 2123 valid_len = sizeof(struct virtchnl_rss_key); 2124 if (msglen >= valid_len) { 2125 struct virtchnl_rss_key *vrk = 2126 (struct virtchnl_rss_key *)msg; 2127 2128 if (vrk->key_len == 0) { 2129 /* zero length is allowed as input */ 2130 break; 2131 } 2132 2133 valid_len += vrk->key_len - 1; 2134 } 2135 break; 2136 case VIRTCHNL_OP_CONFIG_RSS_LUT: 2137 valid_len = sizeof(struct virtchnl_rss_lut); 2138 if (msglen >= valid_len) { 2139 struct virtchnl_rss_lut *vrl = 2140 (struct virtchnl_rss_lut *)msg; 2141 2142 if (vrl->lut_entries == 0) { 2143 /* zero entries is allowed as input */ 2144 break; 2145 } 2146 2147 valid_len += vrl->lut_entries - 1; 2148 } 2149 break; 2150 case VIRTCHNL_OP_GET_RSS_HENA_CAPS: 2151 break; 2152 case VIRTCHNL_OP_SET_RSS_HENA: 2153 valid_len = sizeof(struct virtchnl_rss_hena); 2154 break; 2155 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: 2156 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: 2157 break; 2158 case VIRTCHNL_OP_REQUEST_QUEUES: 2159 valid_len = sizeof(struct virtchnl_vf_res_request); 2160 break; 2161 case VIRTCHNL_OP_ENABLE_CHANNELS: 2162 valid_len = sizeof(struct virtchnl_tc_info); 2163 if (msglen >= valid_len) { 2164 struct virtchnl_tc_info *vti = 2165 (struct virtchnl_tc_info *)msg; 2166 2167 if (vti->num_tc == 0 || vti->num_tc > 2168 VIRTCHNL_OP_ENABLE_CHANNELS_MAX) { 2169 err_msg_format = true; 2170 break; 2171 } 2172 2173 valid_len += (vti->num_tc - 1) * 2174 sizeof(struct virtchnl_channel_info); 2175 } 2176 break; 2177 case VIRTCHNL_OP_DISABLE_CHANNELS: 2178 break; 2179 case VIRTCHNL_OP_ADD_CLOUD_FILTER: 2180 case VIRTCHNL_OP_DEL_CLOUD_FILTER: 2181 valid_len = sizeof(struct virtchnl_filter); 2182 break; 2183 case VIRTCHNL_OP_DCF_VLAN_OFFLOAD: 2184 valid_len = sizeof(struct virtchnl_dcf_vlan_offload); 2185 break; 2186 case VIRTCHNL_OP_DCF_CMD_DESC: 2187 case VIRTCHNL_OP_DCF_CMD_BUFF: 2188 /* These two opcodes are specific to handle the AdminQ command, 2189 * so the validation needs to be done in PF's context. 2190 */ 2191 valid_len = msglen; 2192 break; 2193 case VIRTCHNL_OP_DCF_DISABLE: 2194 case VIRTCHNL_OP_DCF_GET_VSI_MAP: 2195 case VIRTCHNL_OP_DCF_GET_PKG_INFO: 2196 break; 2197 case VIRTCHNL_OP_DCF_CONFIG_BW: 2198 valid_len = sizeof(struct virtchnl_dcf_bw_cfg_list); 2199 if (msglen >= valid_len) { 2200 struct virtchnl_dcf_bw_cfg_list *cfg_list = 2201 (struct virtchnl_dcf_bw_cfg_list *)msg; 2202 if (cfg_list->num_elem == 0) { 2203 err_msg_format = true; 2204 break; 2205 } 2206 valid_len += (cfg_list->num_elem - 1) * 2207 sizeof(struct virtchnl_dcf_bw_cfg); 2208 } 2209 break; 2210 case VIRTCHNL_OP_GET_SUPPORTED_RXDIDS: 2211 break; 2212 case VIRTCHNL_OP_ADD_RSS_CFG: 2213 case VIRTCHNL_OP_DEL_RSS_CFG: 2214 valid_len = sizeof(struct virtchnl_rss_cfg); 2215 break; 2216 case VIRTCHNL_OP_ADD_FDIR_FILTER: 2217 valid_len = sizeof(struct virtchnl_fdir_add); 2218 break; 2219 case VIRTCHNL_OP_DEL_FDIR_FILTER: 2220 valid_len = sizeof(struct virtchnl_fdir_del); 2221 break; 2222 case VIRTCHNL_OP_GET_QOS_CAPS: 2223 break; 2224 case VIRTCHNL_OP_CONFIG_QUEUE_TC_MAP: 2225 valid_len = sizeof(struct virtchnl_queue_tc_mapping); 2226 if (msglen >= valid_len) { 2227 struct virtchnl_queue_tc_mapping *q_tc = 2228 (struct virtchnl_queue_tc_mapping *)msg; 2229 if (q_tc->num_tc == 0) { 2230 err_msg_format = true; 2231 break; 2232 } 2233 valid_len += (q_tc->num_tc - 1) * 2234 sizeof(q_tc->tc[0]); 2235 } 2236 break; 2237 case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS: 2238 break; 2239 case VIRTCHNL_OP_ADD_VLAN_V2: 2240 case VIRTCHNL_OP_DEL_VLAN_V2: 2241 valid_len = sizeof(struct virtchnl_vlan_filter_list_v2); 2242 if (msglen >= valid_len) { 2243 struct virtchnl_vlan_filter_list_v2 *vfl = 2244 (struct virtchnl_vlan_filter_list_v2 *)msg; 2245 2246 if (vfl->num_elements == 0 || vfl->num_elements > 2247 VIRTCHNL_OP_ADD_DEL_VLAN_V2_MAX) { 2248 err_msg_format = true; 2249 break; 2250 } 2251 2252 valid_len += (vfl->num_elements - 1) * 2253 sizeof(struct virtchnl_vlan_filter); 2254 } 2255 break; 2256 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2: 2257 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2: 2258 case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2: 2259 case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2: 2260 case VIRTCHNL_OP_ENABLE_VLAN_FILTERING_V2: 2261 case VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2: 2262 valid_len = sizeof(struct virtchnl_vlan_setting); 2263 break; 2264 case VIRTCHNL_OP_ENABLE_QUEUES_V2: 2265 case VIRTCHNL_OP_DISABLE_QUEUES_V2: 2266 valid_len = sizeof(struct virtchnl_del_ena_dis_queues); 2267 if (msglen >= valid_len) { 2268 struct virtchnl_del_ena_dis_queues *qs = 2269 (struct virtchnl_del_ena_dis_queues *)msg; 2270 if (qs->chunks.num_chunks == 0 || 2271 qs->chunks.num_chunks > VIRTCHNL_OP_ENABLE_DISABLE_DEL_QUEUES_V2_MAX) { 2272 err_msg_format = true; 2273 break; 2274 } 2275 valid_len += (qs->chunks.num_chunks - 1) * 2276 sizeof(struct virtchnl_queue_chunk); 2277 } 2278 break; 2279 case VIRTCHNL_OP_MAP_QUEUE_VECTOR: 2280 valid_len = sizeof(struct virtchnl_queue_vector_maps); 2281 if (msglen >= valid_len) { 2282 struct virtchnl_queue_vector_maps *v_qp = 2283 (struct virtchnl_queue_vector_maps *)msg; 2284 if (v_qp->num_qv_maps == 0 || 2285 v_qp->num_qv_maps > VIRTCHNL_OP_MAP_UNMAP_QUEUE_VECTOR_MAX) { 2286 err_msg_format = true; 2287 break; 2288 } 2289 valid_len += (v_qp->num_qv_maps - 1) * 2290 sizeof(struct virtchnl_queue_vector); 2291 } 2292 break; 2293 /* These are always errors coming from the VF. */ 2294 case VIRTCHNL_OP_EVENT: 2295 case VIRTCHNL_OP_UNKNOWN: 2296 default: 2297 return VIRTCHNL_STATUS_ERR_PARAM; 2298 } 2299 /* few more checks */ 2300 if (err_msg_format || valid_len != msglen) 2301 return VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH; 2302 2303 return 0; 2304 } 2305 #endif /* _VIRTCHNL_H_ */ 2306