xref: /dpdk/drivers/common/iavf/virtchnl.h (revision 8809f78c7dd9f33a44a4f89c58fc91ded34296ed)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2001-2020 Intel Corporation
3  */
4 
5 #ifndef _VIRTCHNL_H_
6 #define _VIRTCHNL_H_
7 
8 /* Description:
9  * This header file describes the VF-PF communication protocol used
10  * by the drivers for all devices starting from our 40G product line
11  *
12  * Admin queue buffer usage:
13  * desc->opcode is always aqc_opc_send_msg_to_pf
14  * flags, retval, datalen, and data addr are all used normally.
15  * The Firmware copies the cookie fields when sending messages between the
16  * PF and VF, but uses all other fields internally. Due to this limitation,
17  * we must send all messages as "indirect", i.e. using an external buffer.
18  *
19  * All the VSI indexes are relative to the VF. Each VF can have maximum of
20  * three VSIs. All the queue indexes are relative to the VSI.  Each VF can
21  * have a maximum of sixteen queues for all of its VSIs.
22  *
23  * The PF is required to return a status code in v_retval for all messages
24  * except RESET_VF, which does not require any response. The return value
25  * is of status_code type, defined in the shared type.h.
26  *
27  * In general, VF driver initialization should roughly follow the order of
28  * these opcodes. The VF driver must first validate the API version of the
29  * PF driver, then request a reset, then get resources, then configure
30  * queues and interrupts. After these operations are complete, the VF
31  * driver may start its queues, optionally add MAC and VLAN filters, and
32  * process traffic.
33  */
34 
35 /* START GENERIC DEFINES
36  * Need to ensure the following enums and defines hold the same meaning and
37  * value in current and future projects
38  */
39 
40 /* Error Codes */
41 enum virtchnl_status_code {
42 	VIRTCHNL_STATUS_SUCCESS				= 0,
43 	VIRTCHNL_STATUS_ERR_PARAM			= -5,
44 	VIRTCHNL_STATUS_ERR_NO_MEMORY			= -18,
45 	VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH		= -38,
46 	VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR		= -39,
47 	VIRTCHNL_STATUS_ERR_INVALID_VF_ID		= -40,
48 	VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR		= -53,
49 	VIRTCHNL_STATUS_ERR_NOT_SUPPORTED		= -64,
50 };
51 
52 /* Backward compatibility */
53 #define VIRTCHNL_ERR_PARAM VIRTCHNL_STATUS_ERR_PARAM
54 #define VIRTCHNL_STATUS_NOT_SUPPORTED VIRTCHNL_STATUS_ERR_NOT_SUPPORTED
55 
56 #define VIRTCHNL_LINK_SPEED_2_5GB_SHIFT		0x0
57 #define VIRTCHNL_LINK_SPEED_100MB_SHIFT		0x1
58 #define VIRTCHNL_LINK_SPEED_1000MB_SHIFT	0x2
59 #define VIRTCHNL_LINK_SPEED_10GB_SHIFT		0x3
60 #define VIRTCHNL_LINK_SPEED_40GB_SHIFT		0x4
61 #define VIRTCHNL_LINK_SPEED_20GB_SHIFT		0x5
62 #define VIRTCHNL_LINK_SPEED_25GB_SHIFT		0x6
63 #define VIRTCHNL_LINK_SPEED_5GB_SHIFT		0x7
64 
65 enum virtchnl_link_speed {
66 	VIRTCHNL_LINK_SPEED_UNKNOWN	= 0,
67 	VIRTCHNL_LINK_SPEED_100MB	= BIT(VIRTCHNL_LINK_SPEED_100MB_SHIFT),
68 	VIRTCHNL_LINK_SPEED_1GB		= BIT(VIRTCHNL_LINK_SPEED_1000MB_SHIFT),
69 	VIRTCHNL_LINK_SPEED_10GB	= BIT(VIRTCHNL_LINK_SPEED_10GB_SHIFT),
70 	VIRTCHNL_LINK_SPEED_40GB	= BIT(VIRTCHNL_LINK_SPEED_40GB_SHIFT),
71 	VIRTCHNL_LINK_SPEED_20GB	= BIT(VIRTCHNL_LINK_SPEED_20GB_SHIFT),
72 	VIRTCHNL_LINK_SPEED_25GB	= BIT(VIRTCHNL_LINK_SPEED_25GB_SHIFT),
73 	VIRTCHNL_LINK_SPEED_2_5GB	= BIT(VIRTCHNL_LINK_SPEED_2_5GB_SHIFT),
74 	VIRTCHNL_LINK_SPEED_5GB		= BIT(VIRTCHNL_LINK_SPEED_5GB_SHIFT),
75 };
76 
77 /* for hsplit_0 field of Rx HMC context */
78 /* deprecated with IAVF 1.0 */
79 enum virtchnl_rx_hsplit {
80 	VIRTCHNL_RX_HSPLIT_NO_SPLIT      = 0,
81 	VIRTCHNL_RX_HSPLIT_SPLIT_L2      = 1,
82 	VIRTCHNL_RX_HSPLIT_SPLIT_IP      = 2,
83 	VIRTCHNL_RX_HSPLIT_SPLIT_TCP_UDP = 4,
84 	VIRTCHNL_RX_HSPLIT_SPLIT_SCTP    = 8,
85 };
86 
87 #define VIRTCHNL_ETH_LENGTH_OF_ADDRESS	6
88 /* END GENERIC DEFINES */
89 
90 /* Opcodes for VF-PF communication. These are placed in the v_opcode field
91  * of the virtchnl_msg structure.
92  */
93 enum virtchnl_ops {
94 /* The PF sends status change events to VFs using
95  * the VIRTCHNL_OP_EVENT opcode.
96  * VFs send requests to the PF using the other ops.
97  * Use of "advanced opcode" features must be negotiated as part of capabilities
98  * exchange and are not considered part of base mode feature set.
99  */
100 	VIRTCHNL_OP_UNKNOWN = 0,
101 	VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */
102 	VIRTCHNL_OP_RESET_VF = 2,
103 	VIRTCHNL_OP_GET_VF_RESOURCES = 3,
104 	VIRTCHNL_OP_CONFIG_TX_QUEUE = 4,
105 	VIRTCHNL_OP_CONFIG_RX_QUEUE = 5,
106 	VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6,
107 	VIRTCHNL_OP_CONFIG_IRQ_MAP = 7,
108 	VIRTCHNL_OP_ENABLE_QUEUES = 8,
109 	VIRTCHNL_OP_DISABLE_QUEUES = 9,
110 	VIRTCHNL_OP_ADD_ETH_ADDR = 10,
111 	VIRTCHNL_OP_DEL_ETH_ADDR = 11,
112 	VIRTCHNL_OP_ADD_VLAN = 12,
113 	VIRTCHNL_OP_DEL_VLAN = 13,
114 	VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14,
115 	VIRTCHNL_OP_GET_STATS = 15,
116 	VIRTCHNL_OP_RSVD = 16,
117 	VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
118 	/* opcode 19 is reserved */
119 	/* opcodes 20, 21, and 22 are reserved */
120 	VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
121 	VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
122 	VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
123 	VIRTCHNL_OP_SET_RSS_HENA = 26,
124 	VIRTCHNL_OP_ENABLE_VLAN_STRIPPING = 27,
125 	VIRTCHNL_OP_DISABLE_VLAN_STRIPPING = 28,
126 	VIRTCHNL_OP_REQUEST_QUEUES = 29,
127 	VIRTCHNL_OP_ENABLE_CHANNELS = 30,
128 	VIRTCHNL_OP_DISABLE_CHANNELS = 31,
129 	VIRTCHNL_OP_ADD_CLOUD_FILTER = 32,
130 	VIRTCHNL_OP_DEL_CLOUD_FILTER = 33,
131 	/* opcodes 34, 35, 36, 37 and 38 are reserved */
132 	VIRTCHNL_OP_DCF_CMD_DESC = 39,
133 	VIRTCHNL_OP_DCF_CMD_BUFF = 40,
134 	VIRTCHNL_OP_DCF_DISABLE = 41,
135 	VIRTCHNL_OP_DCF_GET_VSI_MAP = 42,
136 	VIRTCHNL_OP_DCF_GET_PKG_INFO = 43,
137 	VIRTCHNL_OP_GET_SUPPORTED_RXDIDS = 44,
138 	VIRTCHNL_OP_ADD_RSS_CFG = 45,
139 	VIRTCHNL_OP_DEL_RSS_CFG = 46,
140 	VIRTCHNL_OP_ADD_FDIR_FILTER = 47,
141 	VIRTCHNL_OP_DEL_FDIR_FILTER = 48,
142 	VIRTCHNL_OP_QUERY_FDIR_FILTER = 49,
143 	VIRTCHNL_OP_GET_MAX_RSS_QREGION = 50,
144 	VIRTCHNL_OP_ENABLE_QUEUES_V2 = 107,
145 	VIRTCHNL_OP_DISABLE_QUEUES_V2 = 108,
146 	VIRTCHNL_OP_MAP_QUEUE_VECTOR = 111,
147 	VIRTCHNL_OP_MAX,
148 };
149 
150 /* These macros are used to generate compilation errors if a structure/union
151  * is not exactly the correct length. It gives a divide by zero error if the
152  * structure/union is not of the correct size, otherwise it creates an enum
153  * that is never used.
154  */
155 #define VIRTCHNL_CHECK_STRUCT_LEN(n, X) enum virtchnl_static_assert_enum_##X \
156 	{ virtchnl_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) }
157 #define VIRTCHNL_CHECK_UNION_LEN(n, X) enum virtchnl_static_asset_enum_##X \
158 	{ virtchnl_static_assert_##X = (n)/((sizeof(union X) == (n)) ? 1 : 0) }
159 
160 /* Virtual channel message descriptor. This overlays the admin queue
161  * descriptor. All other data is passed in external buffers.
162  */
163 
164 struct virtchnl_msg {
165 	u8 pad[8];			 /* AQ flags/opcode/len/retval fields */
166 	enum virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */
167 	enum virtchnl_status_code v_retval;  /* ditto for desc->retval */
168 	u32 vfid;			 /* used by PF when sending to VF */
169 };
170 
171 VIRTCHNL_CHECK_STRUCT_LEN(20, virtchnl_msg);
172 
173 /* Message descriptions and data structures. */
174 
175 /* VIRTCHNL_OP_VERSION
176  * VF posts its version number to the PF. PF responds with its version number
177  * in the same format, along with a return code.
178  * Reply from PF has its major/minor versions also in param0 and param1.
179  * If there is a major version mismatch, then the VF cannot operate.
180  * If there is a minor version mismatch, then the VF can operate but should
181  * add a warning to the system log.
182  *
183  * This enum element MUST always be specified as == 1, regardless of other
184  * changes in the API. The PF must always respond to this message without
185  * error regardless of version mismatch.
186  */
187 #define VIRTCHNL_VERSION_MAJOR		1
188 #define VIRTCHNL_VERSION_MINOR		1
189 #define VIRTCHNL_VERSION_MINOR_NO_VF_CAPS	0
190 
191 struct virtchnl_version_info {
192 	u32 major;
193 	u32 minor;
194 };
195 
196 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_version_info);
197 
198 #define VF_IS_V10(_v) (((_v)->major == 1) && ((_v)->minor == 0))
199 #define VF_IS_V11(_ver) (((_ver)->major == 1) && ((_ver)->minor == 1))
200 
201 /* VIRTCHNL_OP_RESET_VF
202  * VF sends this request to PF with no parameters
203  * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register
204  * until reset completion is indicated. The admin queue must be reinitialized
205  * after this operation.
206  *
207  * When reset is complete, PF must ensure that all queues in all VSIs associated
208  * with the VF are stopped, all queue configurations in the HMC are set to 0,
209  * and all MAC and VLAN filters (except the default MAC address) on all VSIs
210  * are cleared.
211  */
212 
213 /* VSI types that use VIRTCHNL interface for VF-PF communication. VSI_SRIOV
214  * vsi_type should always be 6 for backward compatibility. Add other fields
215  * as needed.
216  */
217 enum virtchnl_vsi_type {
218 	VIRTCHNL_VSI_TYPE_INVALID = 0,
219 	VIRTCHNL_VSI_SRIOV = 6,
220 };
221 
222 /* VIRTCHNL_OP_GET_VF_RESOURCES
223  * Version 1.0 VF sends this request to PF with no parameters
224  * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities
225  * PF responds with an indirect message containing
226  * virtchnl_vf_resource and one or more
227  * virtchnl_vsi_resource structures.
228  */
229 
230 struct virtchnl_vsi_resource {
231 	u16 vsi_id;
232 	u16 num_queue_pairs;
233 	enum virtchnl_vsi_type vsi_type;
234 	u16 qset_handle;
235 	u8 default_mac_addr[VIRTCHNL_ETH_LENGTH_OF_ADDRESS];
236 };
237 
238 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
239 
240 /* VF capability flags
241  * VIRTCHNL_VF_OFFLOAD_L2 flag is inclusive of base mode L2 offloads including
242  * TX/RX Checksum offloading and TSO for non-tunnelled packets.
243  */
244 #define VIRTCHNL_VF_OFFLOAD_L2			0x00000001
245 #define VIRTCHNL_VF_OFFLOAD_IWARP		0x00000002
246 #define VIRTCHNL_VF_OFFLOAD_RSVD		0x00000004
247 #define VIRTCHNL_VF_OFFLOAD_RSS_AQ		0x00000008
248 #define VIRTCHNL_VF_OFFLOAD_RSS_REG		0x00000010
249 #define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR		0x00000020
250 #define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES		0x00000040
251 #define VIRTCHNL_VF_OFFLOAD_CRC			0x00000080
252 	/* 0X00000100 is reserved */
253 #define VIRTCHNL_VF_LARGE_NUM_QPAIRS		0x00000200
254 #define VIRTCHNL_VF_OFFLOAD_VLAN		0x00010000
255 #define VIRTCHNL_VF_OFFLOAD_RX_POLLING		0x00020000
256 #define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2	0x00040000
257 #define VIRTCHNL_VF_OFFLOAD_RSS_PF		0X00080000
258 #define VIRTCHNL_VF_OFFLOAD_ENCAP		0X00100000
259 #define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM		0X00200000
260 #define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM	0X00400000
261 #define VIRTCHNL_VF_OFFLOAD_ADQ			0X00800000
262 #define VIRTCHNL_VF_OFFLOAD_ADQ_V2		0X01000000
263 #define VIRTCHNL_VF_OFFLOAD_USO			0X02000000
264 #define VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC	0X04000000
265 #define VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF		0X08000000
266 #define VIRTCHNL_VF_OFFLOAD_FDIR_PF		0X10000000
267 	/* 0X20000000 is reserved */
268 #define VIRTCHNL_VF_CAP_DCF			0X40000000
269 	/* 0X80000000 is reserved */
270 
271 /* Define below the capability flags that are not offloads */
272 #define VIRTCHNL_VF_CAP_ADV_LINK_SPEED		0x00000080
273 #define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \
274 			       VIRTCHNL_VF_OFFLOAD_VLAN | \
275 			       VIRTCHNL_VF_OFFLOAD_RSS_PF)
276 
277 struct virtchnl_vf_resource {
278 	u16 num_vsis;
279 	u16 num_queue_pairs;
280 	u16 max_vectors;
281 	u16 max_mtu;
282 
283 	u32 vf_cap_flags;
284 	u32 rss_key_size;
285 	u32 rss_lut_size;
286 
287 	struct virtchnl_vsi_resource vsi_res[1];
288 };
289 
290 VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_vf_resource);
291 
292 /* VIRTCHNL_OP_CONFIG_TX_QUEUE
293  * VF sends this message to set up parameters for one TX queue.
294  * External data buffer contains one instance of virtchnl_txq_info.
295  * PF configures requested queue and returns a status code.
296  */
297 
298 /* Tx queue config info */
299 struct virtchnl_txq_info {
300 	u16 vsi_id;
301 	u16 queue_id;
302 	u16 ring_len;		/* number of descriptors, multiple of 8 */
303 	u16 headwb_enabled; /* deprecated with AVF 1.0 */
304 	u64 dma_ring_addr;
305 	u64 dma_headwb_addr; /* deprecated with AVF 1.0 */
306 };
307 
308 VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_txq_info);
309 
310 /* VIRTCHNL_OP_CONFIG_RX_QUEUE
311  * VF sends this message to set up parameters for one RX queue.
312  * External data buffer contains one instance of virtchnl_rxq_info.
313  * PF configures requested queue and returns a status code. The
314  * crc_disable flag disables CRC stripping on the VF. Setting
315  * the crc_disable flag to 1 will disable CRC stripping for each
316  * queue in the VF where the flag is set. The VIRTCHNL_VF_OFFLOAD_CRC
317  * offload must have been set prior to sending this info or the PF
318  * will ignore the request. This flag should be set the same for
319  * all of the queues for a VF.
320  */
321 
322 /* Rx queue config info */
323 struct virtchnl_rxq_info {
324 	u16 vsi_id;
325 	u16 queue_id;
326 	u32 ring_len;		/* number of descriptors, multiple of 32 */
327 	u16 hdr_size;
328 	u16 splithdr_enabled; /* deprecated with AVF 1.0 */
329 	u32 databuffer_size;
330 	u32 max_pkt_size;
331 	u8 crc_disable;
332 	/* only used when VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC is supported */
333 	u8 rxdid;
334 	u8 pad1[2];
335 	u64 dma_ring_addr;
336 	enum virtchnl_rx_hsplit rx_split_pos; /* deprecated with AVF 1.0 */
337 	u32 pad2;
338 };
339 
340 VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_rxq_info);
341 
342 /* VIRTCHNL_OP_CONFIG_VSI_QUEUES
343  * VF sends this message to set parameters for active TX and RX queues
344  * associated with the specified VSI.
345  * PF configures queues and returns status.
346  * If the number of queues specified is greater than the number of queues
347  * associated with the VSI, an error is returned and no queues are configured.
348  * NOTE: The VF is not required to configure all queues in a single request.
349  * It may send multiple messages. PF drivers must correctly handle all VF
350  * requests.
351  */
352 struct virtchnl_queue_pair_info {
353 	/* NOTE: vsi_id and queue_id should be identical for both queues. */
354 	struct virtchnl_txq_info txq;
355 	struct virtchnl_rxq_info rxq;
356 };
357 
358 VIRTCHNL_CHECK_STRUCT_LEN(64, virtchnl_queue_pair_info);
359 
360 struct virtchnl_vsi_queue_config_info {
361 	u16 vsi_id;
362 	u16 num_queue_pairs;
363 	u32 pad;
364 	struct virtchnl_queue_pair_info qpair[1];
365 };
366 
367 VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_vsi_queue_config_info);
368 
369 /* VIRTCHNL_OP_REQUEST_QUEUES
370  * VF sends this message to request the PF to allocate additional queues to
371  * this VF.  Each VF gets a guaranteed number of queues on init but asking for
372  * additional queues must be negotiated.  This is a best effort request as it
373  * is possible the PF does not have enough queues left to support the request.
374  * If the PF cannot support the number requested it will respond with the
375  * maximum number it is able to support.  If the request is successful, PF will
376  * then reset the VF to institute required changes.
377  */
378 
379 /* VF resource request */
380 struct virtchnl_vf_res_request {
381 	u16 num_queue_pairs;
382 };
383 
384 /* VIRTCHNL_OP_CONFIG_IRQ_MAP
385  * VF uses this message to map vectors to queues.
386  * The rxq_map and txq_map fields are bitmaps used to indicate which queues
387  * are to be associated with the specified vector.
388  * The "other" causes are always mapped to vector 0. The VF may not request
389  * that vector 0 be used for traffic.
390  * PF configures interrupt mapping and returns status.
391  * NOTE: due to hardware requirements, all active queues (both TX and RX)
392  * should be mapped to interrupts, even if the driver intends to operate
393  * only in polling mode. In this case the interrupt may be disabled, but
394  * the ITR timer will still run to trigger writebacks.
395  */
396 struct virtchnl_vector_map {
397 	u16 vsi_id;
398 	u16 vector_id;
399 	u16 rxq_map;
400 	u16 txq_map;
401 	u16 rxitr_idx;
402 	u16 txitr_idx;
403 };
404 
405 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_vector_map);
406 
407 struct virtchnl_irq_map_info {
408 	u16 num_vectors;
409 	struct virtchnl_vector_map vecmap[1];
410 };
411 
412 VIRTCHNL_CHECK_STRUCT_LEN(14, virtchnl_irq_map_info);
413 
414 /* VIRTCHNL_OP_ENABLE_QUEUES
415  * VIRTCHNL_OP_DISABLE_QUEUES
416  * VF sends these message to enable or disable TX/RX queue pairs.
417  * The queues fields are bitmaps indicating which queues to act upon.
418  * (Currently, we only support 16 queues per VF, but we make the field
419  * u32 to allow for expansion.)
420  * PF performs requested action and returns status.
421  * NOTE: The VF is not required to enable/disable all queues in a single
422  * request. It may send multiple messages.
423  * PF drivers must correctly handle all VF requests.
424  */
425 struct virtchnl_queue_select {
426 	u16 vsi_id;
427 	u16 pad;
428 	u32 rx_queues;
429 	u32 tx_queues;
430 };
431 
432 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_select);
433 
434 /* VIRTCHNL_OP_GET_MAX_RSS_QREGION
435  *
436  * if VIRTCHNL_VF_LARGE_NUM_QPAIRS was negotiated in VIRTCHNL_OP_GET_VF_RESOURCES
437  * then this op must be supported.
438  *
439  * VF sends this message in order to query the max RSS queue region
440  * size supported by PF, when VIRTCHNL_VF_LARGE_NUM_QPAIRS is enabled.
441  * This information should be used when configuring the RSS LUT and/or
442  * configuring queue region based filters.
443  *
444  * The maximum RSS queue region is 2^qregion_width. So, a qregion_width
445  * of 6 would inform the VF that the PF supports a maximum RSS queue region
446  * of 64.
447  *
448  * A queue region represents a range of queues that can be used to configure
449  * a RSS LUT. For example, if a VF is given 64 queues, but only a max queue
450  * region size of 16 (i.e. 2^qregion_width = 16) then it will only be able
451  * to configure the RSS LUT with queue indices from 0 to 15. However, other
452  * filters can be used to direct packets to queues >15 via specifying a queue
453  * base/offset and queue region width.
454  */
455 struct virtchnl_max_rss_qregion {
456 	u16 vport_id;
457 	u16 qregion_width;
458 	u8 pad[4];
459 };
460 
461 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_max_rss_qregion);
462 
463 /* VIRTCHNL_OP_ADD_ETH_ADDR
464  * VF sends this message in order to add one or more unicast or multicast
465  * address filters for the specified VSI.
466  * PF adds the filters and returns status.
467  */
468 
469 /* VIRTCHNL_OP_DEL_ETH_ADDR
470  * VF sends this message in order to remove one or more unicast or multicast
471  * filters for the specified VSI.
472  * PF removes the filters and returns status.
473  */
474 
475 /* VIRTCHNL_ETHER_ADDR_LEGACY
476  * Prior to adding the @type member to virtchnl_ether_addr, there were 2 pad
477  * bytes. Moving forward all VF drivers should not set type to
478  * VIRTCHNL_ETHER_ADDR_LEGACY. This is only here to not break previous/legacy
479  * behavior. The control plane function (i.e. PF) can use a best effort method
480  * of tracking the primary/device unicast in this case, but there is no
481  * guarantee and functionality depends on the implementation of the PF.
482  */
483 
484 /* VIRTCHNL_ETHER_ADDR_PRIMARY
485  * All VF drivers should set @type to VIRTCHNL_ETHER_ADDR_PRIMARY for the
486  * primary/device unicast MAC address filter for VIRTCHNL_OP_ADD_ETH_ADDR and
487  * VIRTCHNL_OP_DEL_ETH_ADDR. This allows for the underlying control plane
488  * function (i.e. PF) to accurately track and use this MAC address for
489  * displaying on the host and for VM/function reset.
490  */
491 
492 /* VIRTCHNL_ETHER_ADDR_EXTRA
493  * All VF drivers should set @type to VIRTCHNL_ETHER_ADDR_EXTRA for any extra
494  * unicast and/or multicast filters that are being added/deleted via
495  * VIRTCHNL_OP_DEL_ETH_ADDR/VIRTCHNL_OP_ADD_ETH_ADDR respectively.
496  */
497 struct virtchnl_ether_addr {
498 	u8 addr[VIRTCHNL_ETH_LENGTH_OF_ADDRESS];
499 	u8 type;
500 #define VIRTCHNL_ETHER_ADDR_LEGACY	0
501 #define VIRTCHNL_ETHER_ADDR_PRIMARY	1
502 #define VIRTCHNL_ETHER_ADDR_EXTRA	2
503 #define VIRTCHNL_ETHER_ADDR_TYPE_MASK	3 /* first two bits of type are valid */
504 	u8 pad;
505 };
506 
507 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_ether_addr);
508 
509 struct virtchnl_ether_addr_list {
510 	u16 vsi_id;
511 	u16 num_elements;
512 	struct virtchnl_ether_addr list[1];
513 };
514 
515 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_ether_addr_list);
516 
517 /* VIRTCHNL_OP_ADD_VLAN
518  * VF sends this message to add one or more VLAN tag filters for receives.
519  * PF adds the filters and returns status.
520  * If a port VLAN is configured by the PF, this operation will return an
521  * error to the VF.
522  */
523 
524 /* VIRTCHNL_OP_DEL_VLAN
525  * VF sends this message to remove one or more VLAN tag filters for receives.
526  * PF removes the filters and returns status.
527  * If a port VLAN is configured by the PF, this operation will return an
528  * error to the VF.
529  */
530 
531 struct virtchnl_vlan_filter_list {
532 	u16 vsi_id;
533 	u16 num_elements;
534 	u16 vlan_id[1];
535 };
536 
537 VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_vlan_filter_list);
538 
539 /* VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
540  * VF sends VSI id and flags.
541  * PF returns status code in retval.
542  * Note: we assume that broadcast accept mode is always enabled.
543  */
544 struct virtchnl_promisc_info {
545 	u16 vsi_id;
546 	u16 flags;
547 };
548 
549 VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_promisc_info);
550 
551 #define FLAG_VF_UNICAST_PROMISC	0x00000001
552 #define FLAG_VF_MULTICAST_PROMISC	0x00000002
553 
554 /* VIRTCHNL_OP_GET_STATS
555  * VF sends this message to request stats for the selected VSI. VF uses
556  * the virtchnl_queue_select struct to specify the VSI. The queue_id
557  * field is ignored by the PF.
558  *
559  * PF replies with struct virtchnl_eth_stats in an external buffer.
560  */
561 
562 struct virtchnl_eth_stats {
563 	u64 rx_bytes;			/* received bytes */
564 	u64 rx_unicast;			/* received unicast pkts */
565 	u64 rx_multicast;		/* received multicast pkts */
566 	u64 rx_broadcast;		/* received broadcast pkts */
567 	u64 rx_discards;
568 	u64 rx_unknown_protocol;
569 	u64 tx_bytes;			/* transmitted bytes */
570 	u64 tx_unicast;			/* transmitted unicast pkts */
571 	u64 tx_multicast;		/* transmitted multicast pkts */
572 	u64 tx_broadcast;		/* transmitted broadcast pkts */
573 	u64 tx_discards;
574 	u64 tx_errors;
575 };
576 
577 /* VIRTCHNL_OP_CONFIG_RSS_KEY
578  * VIRTCHNL_OP_CONFIG_RSS_LUT
579  * VF sends these messages to configure RSS. Only supported if both PF
580  * and VF drivers set the VIRTCHNL_VF_OFFLOAD_RSS_PF bit during
581  * configuration negotiation. If this is the case, then the RSS fields in
582  * the VF resource struct are valid.
583  * Both the key and LUT are initialized to 0 by the PF, meaning that
584  * RSS is effectively disabled until set up by the VF.
585  */
586 struct virtchnl_rss_key {
587 	u16 vsi_id;
588 	u16 key_len;
589 	u8 key[1];         /* RSS hash key, packed bytes */
590 };
591 
592 VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_key);
593 
594 struct virtchnl_rss_lut {
595 	u16 vsi_id;
596 	u16 lut_entries;
597 	u8 lut[1];        /* RSS lookup table */
598 };
599 
600 VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_lut);
601 
602 /* VIRTCHNL_OP_GET_RSS_HENA_CAPS
603  * VIRTCHNL_OP_SET_RSS_HENA
604  * VF sends these messages to get and set the hash filter enable bits for RSS.
605  * By default, the PF sets these to all possible traffic types that the
606  * hardware supports. The VF can query this value if it wants to change the
607  * traffic types that are hashed by the hardware.
608  */
609 struct virtchnl_rss_hena {
610 	u64 hena;
611 };
612 
613 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hena);
614 
615 /* Type of RSS algorithm */
616 enum virtchnl_rss_algorithm {
617 	VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC	= 0,
618 	VIRTCHNL_RSS_ALG_XOR_ASYMMETRIC		= 1,
619 	VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC	= 2,
620 	VIRTCHNL_RSS_ALG_XOR_SYMMETRIC		= 3,
621 };
622 
623 /* This is used by PF driver to enforce how many channels can be supported.
624  * When ADQ_V2 capability is negotiated, it will allow 16 channels otherwise
625  * PF driver will allow only max 4 channels
626  */
627 #define VIRTCHNL_MAX_ADQ_CHANNELS 4
628 #define VIRTCHNL_MAX_ADQ_V2_CHANNELS 16
629 
630 /* VIRTCHNL_OP_ENABLE_CHANNELS
631  * VIRTCHNL_OP_DISABLE_CHANNELS
632  * VF sends these messages to enable or disable channels based on
633  * the user specified queue count and queue offset for each traffic class.
634  * This struct encompasses all the information that the PF needs from
635  * VF to create a channel.
636  */
637 struct virtchnl_channel_info {
638 	u16 count; /* number of queues in a channel */
639 	u16 offset; /* queues in a channel start from 'offset' */
640 	u32 pad;
641 	u64 max_tx_rate;
642 };
643 
644 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_channel_info);
645 
646 struct virtchnl_tc_info {
647 	u32	num_tc;
648 	u32	pad;
649 	struct	virtchnl_channel_info list[1];
650 };
651 
652 VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_tc_info);
653 
654 /* VIRTCHNL_ADD_CLOUD_FILTER
655  * VIRTCHNL_DEL_CLOUD_FILTER
656  * VF sends these messages to add or delete a cloud filter based on the
657  * user specified match and action filters. These structures encompass
658  * all the information that the PF needs from the VF to add/delete a
659  * cloud filter.
660  */
661 
662 struct virtchnl_l4_spec {
663 	u8	src_mac[VIRTCHNL_ETH_LENGTH_OF_ADDRESS];
664 	u8	dst_mac[VIRTCHNL_ETH_LENGTH_OF_ADDRESS];
665 	/* vlan_prio is part of this 16 bit field even from OS perspective
666 	 * vlan_id:12 is actual vlan_id, then vlanid:bit14..12 is vlan_prio
667 	 * in future, when decided to offload vlan_prio, pass that information
668 	 * as part of the "vlan_id" field, Bit14..12
669 	 */
670 	__be16	vlan_id;
671 	__be16	pad; /* reserved for future use */
672 	__be32	src_ip[4];
673 	__be32	dst_ip[4];
674 	__be16	src_port;
675 	__be16	dst_port;
676 };
677 
678 VIRTCHNL_CHECK_STRUCT_LEN(52, virtchnl_l4_spec);
679 
680 union virtchnl_flow_spec {
681 	struct	virtchnl_l4_spec tcp_spec;
682 	u8	buffer[128]; /* reserved for future use */
683 };
684 
685 VIRTCHNL_CHECK_UNION_LEN(128, virtchnl_flow_spec);
686 
687 enum virtchnl_action {
688 	/* action types */
689 	VIRTCHNL_ACTION_DROP = 0,
690 	VIRTCHNL_ACTION_TC_REDIRECT,
691 	VIRTCHNL_ACTION_PASSTHRU,
692 	VIRTCHNL_ACTION_QUEUE,
693 	VIRTCHNL_ACTION_Q_REGION,
694 	VIRTCHNL_ACTION_MARK,
695 	VIRTCHNL_ACTION_COUNT,
696 };
697 
698 enum virtchnl_flow_type {
699 	/* flow types */
700 	VIRTCHNL_TCP_V4_FLOW = 0,
701 	VIRTCHNL_TCP_V6_FLOW,
702 	VIRTCHNL_UDP_V4_FLOW,
703 	VIRTCHNL_UDP_V6_FLOW,
704 };
705 
706 struct virtchnl_filter {
707 	union	virtchnl_flow_spec data;
708 	union	virtchnl_flow_spec mask;
709 	enum	virtchnl_flow_type flow_type;
710 	enum	virtchnl_action action;
711 	u32	action_meta;
712 	u8	field_flags;
713 };
714 
715 VIRTCHNL_CHECK_STRUCT_LEN(272, virtchnl_filter);
716 
717 /* VIRTCHNL_OP_DCF_GET_VSI_MAP
718  * VF sends this message to get VSI mapping table.
719  * PF responds with an indirect message containing VF's
720  * HW VSI IDs.
721  * The index of vf_vsi array is the logical VF ID, the
722  * value of vf_vsi array is the VF's HW VSI ID with its
723  * valid configuration.
724  */
725 struct virtchnl_dcf_vsi_map {
726 	u16 pf_vsi;	/* PF's HW VSI ID */
727 	u16 num_vfs;	/* The actual number of VFs allocated */
728 #define VIRTCHNL_DCF_VF_VSI_ID_S	0
729 #define VIRTCHNL_DCF_VF_VSI_ID_M	(0xFFF << VIRTCHNL_DCF_VF_VSI_ID_S)
730 #define VIRTCHNL_DCF_VF_VSI_VALID	BIT(15)
731 	u16 vf_vsi[1];
732 };
733 
734 VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_dcf_vsi_map);
735 
736 #define PKG_NAME_SIZE	32
737 #define DSN_SIZE	8
738 
739 struct pkg_version {
740 	u8 major;
741 	u8 minor;
742 	u8 update;
743 	u8 draft;
744 };
745 
746 VIRTCHNL_CHECK_STRUCT_LEN(4, pkg_version);
747 
748 struct virtchnl_pkg_info {
749 	struct pkg_version pkg_ver;
750 	u32 track_id;
751 	char pkg_name[PKG_NAME_SIZE];
752 	u8 dsn[DSN_SIZE];
753 };
754 
755 VIRTCHNL_CHECK_STRUCT_LEN(48, virtchnl_pkg_info);
756 
757 struct virtchnl_supported_rxdids {
758 	u64 supported_rxdids;
759 };
760 
761 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_supported_rxdids);
762 
763 /* VIRTCHNL_OP_EVENT
764  * PF sends this message to inform the VF driver of events that may affect it.
765  * No direct response is expected from the VF, though it may generate other
766  * messages in response to this one.
767  */
768 enum virtchnl_event_codes {
769 	VIRTCHNL_EVENT_UNKNOWN = 0,
770 	VIRTCHNL_EVENT_LINK_CHANGE,
771 	VIRTCHNL_EVENT_RESET_IMPENDING,
772 	VIRTCHNL_EVENT_PF_DRIVER_CLOSE,
773 	VIRTCHNL_EVENT_DCF_VSI_MAP_UPDATE,
774 };
775 
776 #define PF_EVENT_SEVERITY_INFO		0
777 #define PF_EVENT_SEVERITY_ATTENTION	1
778 #define PF_EVENT_SEVERITY_ACTION_REQUIRED	2
779 #define PF_EVENT_SEVERITY_CERTAIN_DOOM	255
780 
781 struct virtchnl_pf_event {
782 	enum virtchnl_event_codes event;
783 	union {
784 		/* If the PF driver does not support the new speed reporting
785 		 * capabilities then use link_event else use link_event_adv to
786 		 * get the speed and link information. The ability to understand
787 		 * new speeds is indicated by setting the capability flag
788 		 * VIRTCHNL_VF_CAP_ADV_LINK_SPEED in vf_cap_flags parameter
789 		 * in virtchnl_vf_resource struct and can be used to determine
790 		 * which link event struct to use below.
791 		 */
792 		struct {
793 			enum virtchnl_link_speed link_speed;
794 			u8 link_status;
795 		} link_event;
796 		struct {
797 			/* link_speed provided in Mbps */
798 			u32 link_speed;
799 			u8 link_status;
800 		} link_event_adv;
801 		struct {
802 			u16 vf_id;
803 			u16 vsi_id;
804 		} vf_vsi_map;
805 	} event_data;
806 
807 	int severity;
808 };
809 
810 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_pf_event);
811 
812 
813 /* VF reset states - these are written into the RSTAT register:
814  * VFGEN_RSTAT on the VF
815  * When the PF initiates a reset, it writes 0
816  * When the reset is complete, it writes 1
817  * When the PF detects that the VF has recovered, it writes 2
818  * VF checks this register periodically to determine if a reset has occurred,
819  * then polls it to know when the reset is complete.
820  * If either the PF or VF reads the register while the hardware
821  * is in a reset state, it will return DEADBEEF, which, when masked
822  * will result in 3.
823  */
824 enum virtchnl_vfr_states {
825 	VIRTCHNL_VFR_INPROGRESS = 0,
826 	VIRTCHNL_VFR_COMPLETED,
827 	VIRTCHNL_VFR_VFACTIVE,
828 };
829 
830 #define VIRTCHNL_MAX_NUM_PROTO_HDRS	32
831 #define PROTO_HDR_SHIFT			5
832 #define PROTO_HDR_FIELD_START(proto_hdr_type) \
833 					(proto_hdr_type << PROTO_HDR_SHIFT)
834 #define PROTO_HDR_FIELD_MASK ((1UL << PROTO_HDR_SHIFT) - 1)
835 
836 /* VF use these macros to configure each protocol header.
837  * Specify which protocol headers and protocol header fields base on
838  * virtchnl_proto_hdr_type and virtchnl_proto_hdr_field.
839  * @param hdr: a struct of virtchnl_proto_hdr
840  * @param hdr_type: ETH/IPV4/TCP, etc
841  * @param field: SRC/DST/TEID/SPI, etc
842  */
843 #define VIRTCHNL_ADD_PROTO_HDR_FIELD(hdr, field) \
844 	((hdr)->field_selector |= BIT((field) & PROTO_HDR_FIELD_MASK))
845 #define VIRTCHNL_DEL_PROTO_HDR_FIELD(hdr, field) \
846 	((hdr)->field_selector &= ~BIT((field) & PROTO_HDR_FIELD_MASK))
847 #define VIRTCHNL_TEST_PROTO_HDR_FIELD(hdr, val) \
848 	((hdr)->field_selector & BIT((val) & PROTO_HDR_FIELD_MASK))
849 #define VIRTCHNL_GET_PROTO_HDR_FIELD(hdr)	((hdr)->field_selector)
850 
851 #define VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, hdr_type, field) \
852 	(VIRTCHNL_ADD_PROTO_HDR_FIELD(hdr, \
853 		VIRTCHNL_PROTO_HDR_ ## hdr_type ## _ ## field))
854 #define VIRTCHNL_DEL_PROTO_HDR_FIELD_BIT(hdr, hdr_type, field) \
855 	(VIRTCHNL_DEL_PROTO_HDR_FIELD(hdr, \
856 		VIRTCHNL_PROTO_HDR_ ## hdr_type ## _ ## field))
857 
858 #define VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, hdr_type) \
859 	((hdr)->type = VIRTCHNL_PROTO_HDR_ ## hdr_type)
860 #define VIRTCHNL_GET_PROTO_HDR_TYPE(hdr) \
861 	(((hdr)->type) >> PROTO_HDR_SHIFT)
862 #define VIRTCHNL_TEST_PROTO_HDR_TYPE(hdr, val) \
863 	((hdr)->type == ((val) >> PROTO_HDR_SHIFT))
864 #define VIRTCHNL_TEST_PROTO_HDR(hdr, val) \
865 	(VIRTCHNL_TEST_PROTO_HDR_TYPE(hdr, val) && \
866 	 VIRTCHNL_TEST_PROTO_HDR_FIELD(hdr, val))
867 
868 /* Protocol header type within a packet segment. A segment consists of one or
869  * more protocol headers that make up a logical group of protocol headers. Each
870  * logical group of protocol headers encapsulates or is encapsulated using/by
871  * tunneling or encapsulation protocols for network virtualization.
872  */
873 enum virtchnl_proto_hdr_type {
874 	VIRTCHNL_PROTO_HDR_NONE,
875 	VIRTCHNL_PROTO_HDR_ETH,
876 	VIRTCHNL_PROTO_HDR_S_VLAN,
877 	VIRTCHNL_PROTO_HDR_C_VLAN,
878 	VIRTCHNL_PROTO_HDR_IPV4,
879 	VIRTCHNL_PROTO_HDR_IPV6,
880 	VIRTCHNL_PROTO_HDR_TCP,
881 	VIRTCHNL_PROTO_HDR_UDP,
882 	VIRTCHNL_PROTO_HDR_SCTP,
883 	VIRTCHNL_PROTO_HDR_GTPU_IP,
884 	VIRTCHNL_PROTO_HDR_GTPU_EH,
885 	VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN,
886 	VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP,
887 	VIRTCHNL_PROTO_HDR_PPPOE,
888 	VIRTCHNL_PROTO_HDR_L2TPV3,
889 	VIRTCHNL_PROTO_HDR_ESP,
890 	VIRTCHNL_PROTO_HDR_AH,
891 	VIRTCHNL_PROTO_HDR_PFCP,
892 	VIRTCHNL_PROTO_HDR_GTPC,
893 };
894 
895 /* Protocol header field within a protocol header. */
896 enum virtchnl_proto_hdr_field {
897 	/* ETHER */
898 	VIRTCHNL_PROTO_HDR_ETH_SRC =
899 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_ETH),
900 	VIRTCHNL_PROTO_HDR_ETH_DST,
901 	VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE,
902 	/* S-VLAN */
903 	VIRTCHNL_PROTO_HDR_S_VLAN_ID =
904 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_S_VLAN),
905 	/* C-VLAN */
906 	VIRTCHNL_PROTO_HDR_C_VLAN_ID =
907 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_C_VLAN),
908 	/* IPV4 */
909 	VIRTCHNL_PROTO_HDR_IPV4_SRC =
910 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV4),
911 	VIRTCHNL_PROTO_HDR_IPV4_DST,
912 	VIRTCHNL_PROTO_HDR_IPV4_DSCP,
913 	VIRTCHNL_PROTO_HDR_IPV4_TTL,
914 	VIRTCHNL_PROTO_HDR_IPV4_PROT,
915 	/* IPV6 */
916 	VIRTCHNL_PROTO_HDR_IPV6_SRC =
917 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV6),
918 	VIRTCHNL_PROTO_HDR_IPV6_DST,
919 	VIRTCHNL_PROTO_HDR_IPV6_TC,
920 	VIRTCHNL_PROTO_HDR_IPV6_HOP_LIMIT,
921 	VIRTCHNL_PROTO_HDR_IPV6_PROT,
922 	/* IPV6 Prefix */
923 	VIRTCHNL_PROTO_HDR_IPV6_PREFIX32_SRC,
924 	VIRTCHNL_PROTO_HDR_IPV6_PREFIX32_DST,
925 	VIRTCHNL_PROTO_HDR_IPV6_PREFIX40_SRC,
926 	VIRTCHNL_PROTO_HDR_IPV6_PREFIX40_DST,
927 	VIRTCHNL_PROTO_HDR_IPV6_PREFIX48_SRC,
928 	VIRTCHNL_PROTO_HDR_IPV6_PREFIX48_DST,
929 	VIRTCHNL_PROTO_HDR_IPV6_PREFIX56_SRC,
930 	VIRTCHNL_PROTO_HDR_IPV6_PREFIX56_DST,
931 	VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_SRC,
932 	VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_DST,
933 	VIRTCHNL_PROTO_HDR_IPV6_PREFIX96_SRC,
934 	VIRTCHNL_PROTO_HDR_IPV6_PREFIX96_DST,
935 	/* TCP */
936 	VIRTCHNL_PROTO_HDR_TCP_SRC_PORT =
937 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_TCP),
938 	VIRTCHNL_PROTO_HDR_TCP_DST_PORT,
939 	/* UDP */
940 	VIRTCHNL_PROTO_HDR_UDP_SRC_PORT =
941 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_UDP),
942 	VIRTCHNL_PROTO_HDR_UDP_DST_PORT,
943 	/* SCTP */
944 	VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT =
945 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_SCTP),
946 	VIRTCHNL_PROTO_HDR_SCTP_DST_PORT,
947 	/* GTPU_IP */
948 	VIRTCHNL_PROTO_HDR_GTPU_IP_TEID =
949 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_IP),
950 	/* GTPU_EH */
951 	VIRTCHNL_PROTO_HDR_GTPU_EH_PDU =
952 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_EH),
953 	VIRTCHNL_PROTO_HDR_GTPU_EH_QFI,
954 	/* PPPOE */
955 	VIRTCHNL_PROTO_HDR_PPPOE_SESS_ID =
956 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_PPPOE),
957 	/* L2TPV3 */
958 	VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID =
959 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_L2TPV3),
960 	/* ESP */
961 	VIRTCHNL_PROTO_HDR_ESP_SPI =
962 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_ESP),
963 	/* AH */
964 	VIRTCHNL_PROTO_HDR_AH_SPI =
965 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_AH),
966 	/* PFCP */
967 	VIRTCHNL_PROTO_HDR_PFCP_S_FIELD =
968 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_PFCP),
969 	VIRTCHNL_PROTO_HDR_PFCP_SEID,
970 	/* GTPC */
971 	VIRTCHNL_PROTO_HDR_GTPC_TEID =
972 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPC),
973 };
974 
975 struct virtchnl_proto_hdr {
976 	enum virtchnl_proto_hdr_type type;
977 	u32 field_selector; /* a bit mask to select field for header type */
978 	u8 buffer[64];
979 	/**
980 	 * binary buffer in network order for specific header type.
981 	 * For example, if type = VIRTCHNL_PROTO_HDR_IPV4, a IPv4
982 	 * header is expected to be copied into the buffer.
983 	 */
984 };
985 
986 VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_proto_hdr);
987 
988 struct virtchnl_proto_hdrs {
989 	u8 tunnel_level;
990 	/**
991 	 * specify where protocol header start from.
992 	 * 0 - from the outer layer
993 	 * 1 - from the first inner layer
994 	 * 2 - from the second inner layer
995 	 * ....
996 	 **/
997 	int count; /* the proto layers must < VIRTCHNL_MAX_NUM_PROTO_HDRS */
998 	struct virtchnl_proto_hdr proto_hdr[VIRTCHNL_MAX_NUM_PROTO_HDRS];
999 };
1000 
1001 VIRTCHNL_CHECK_STRUCT_LEN(2312, virtchnl_proto_hdrs);
1002 
1003 struct virtchnl_rss_cfg {
1004 	struct virtchnl_proto_hdrs proto_hdrs;	   /* protocol headers */
1005 	enum virtchnl_rss_algorithm rss_algorithm; /* rss algorithm type */
1006 	u8 reserved[128];                          /* reserve for future */
1007 };
1008 
1009 VIRTCHNL_CHECK_STRUCT_LEN(2444, virtchnl_rss_cfg);
1010 
1011 /* action configuration for FDIR */
1012 struct virtchnl_filter_action {
1013 	enum virtchnl_action type;
1014 	union {
1015 		/* used for queue and qgroup action */
1016 		struct {
1017 			u16 index;
1018 			u8 region;
1019 		} queue;
1020 		/* used for count action */
1021 		struct {
1022 			/* share counter ID with other flow rules */
1023 			u8 shared;
1024 			u32 id; /* counter ID */
1025 		} count;
1026 		/* used for mark action */
1027 		u32 mark_id;
1028 		u8 reserve[32];
1029 	} act_conf;
1030 };
1031 
1032 VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_filter_action);
1033 
1034 #define VIRTCHNL_MAX_NUM_ACTIONS  8
1035 
1036 struct virtchnl_filter_action_set {
1037 	/* action number must be less then VIRTCHNL_MAX_NUM_ACTIONS */
1038 	int count;
1039 	struct virtchnl_filter_action actions[VIRTCHNL_MAX_NUM_ACTIONS];
1040 };
1041 
1042 VIRTCHNL_CHECK_STRUCT_LEN(292, virtchnl_filter_action_set);
1043 
1044 /* pattern and action for FDIR rule */
1045 struct virtchnl_fdir_rule {
1046 	struct virtchnl_proto_hdrs proto_hdrs;
1047 	struct virtchnl_filter_action_set action_set;
1048 };
1049 
1050 VIRTCHNL_CHECK_STRUCT_LEN(2604, virtchnl_fdir_rule);
1051 
1052 /* query information to retrieve fdir rule counters.
1053  * PF will fill out this structure to reset counter.
1054  */
1055 struct virtchnl_fdir_query_info {
1056 	u32 match_packets_valid:1;
1057 	u32 match_bytes_valid:1;
1058 	u32 reserved:30;  /* Reserved, must be zero. */
1059 	u32 pad;
1060 	u64 matched_packets; /* Number of packets for this rule. */
1061 	u64 matched_bytes;   /* Number of bytes through this rule. */
1062 };
1063 
1064 VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_fdir_query_info);
1065 
1066 /* Status returned to VF after VF requests FDIR commands
1067  * VIRTCHNL_FDIR_SUCCESS
1068  * VF FDIR related request is successfully done by PF
1069  * The request can be OP_ADD/DEL/QUERY_FDIR_FILTER.
1070  *
1071  * VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE
1072  * OP_ADD_FDIR_FILTER request is failed due to no Hardware resource.
1073  *
1074  * VIRTCHNL_FDIR_FAILURE_RULE_EXIST
1075  * OP_ADD_FDIR_FILTER request is failed due to the rule is already existed.
1076  *
1077  * VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT
1078  * OP_ADD_FDIR_FILTER request is failed due to conflict with existing rule.
1079  *
1080  * VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST
1081  * OP_DEL_FDIR_FILTER request is failed due to this rule doesn't exist.
1082  *
1083  * VIRTCHNL_FDIR_FAILURE_RULE_INVALID
1084  * OP_ADD_FDIR_FILTER request is failed due to parameters validation
1085  * or HW doesn't support.
1086  *
1087  * VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT
1088  * OP_ADD/DEL_FDIR_FILTER request is failed due to timing out
1089  * for programming.
1090  *
1091  * VIRTCHNL_FDIR_FAILURE_QUERY_INVALID
1092  * OP_QUERY_FDIR_FILTER request is failed due to parameters validation,
1093  * for example, VF query counter of a rule who has no counter action.
1094  */
1095 enum virtchnl_fdir_prgm_status {
1096 	VIRTCHNL_FDIR_SUCCESS = 0,
1097 	VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE,
1098 	VIRTCHNL_FDIR_FAILURE_RULE_EXIST,
1099 	VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT,
1100 	VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST,
1101 	VIRTCHNL_FDIR_FAILURE_RULE_INVALID,
1102 	VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT,
1103 	VIRTCHNL_FDIR_FAILURE_QUERY_INVALID,
1104 };
1105 
1106 /* VIRTCHNL_OP_ADD_FDIR_FILTER
1107  * VF sends this request to PF by filling out vsi_id,
1108  * validate_only and rule_cfg. PF will return flow_id
1109  * if the request is successfully done and return add_status to VF.
1110  */
1111 struct virtchnl_fdir_add {
1112 	u16 vsi_id;  /* INPUT */
1113 	/*
1114 	 * 1 for validating a fdir rule, 0 for creating a fdir rule.
1115 	 * Validate and create share one ops: VIRTCHNL_OP_ADD_FDIR_FILTER.
1116 	 */
1117 	u16 validate_only; /* INPUT */
1118 	u32 flow_id;       /* OUTPUT */
1119 	struct virtchnl_fdir_rule rule_cfg; /* INPUT */
1120 	enum virtchnl_fdir_prgm_status status; /* OUTPUT */
1121 };
1122 
1123 VIRTCHNL_CHECK_STRUCT_LEN(2616, virtchnl_fdir_add);
1124 
1125 /* VIRTCHNL_OP_DEL_FDIR_FILTER
1126  * VF sends this request to PF by filling out vsi_id
1127  * and flow_id. PF will return del_status to VF.
1128  */
1129 struct virtchnl_fdir_del {
1130 	u16 vsi_id;  /* INPUT */
1131 	u16 pad;
1132 	u32 flow_id; /* INPUT */
1133 	enum virtchnl_fdir_prgm_status status; /* OUTPUT */
1134 };
1135 
1136 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_fdir_del);
1137 
1138 /* VIRTCHNL_OP_QUERY_FDIR_FILTER
1139  * VF sends this request to PF by filling out vsi_id,
1140  * flow_id and reset_counter. PF will return query_info
1141  * and query_status to VF.
1142  */
1143 struct virtchnl_fdir_query {
1144 	u16 vsi_id;   /* INPUT */
1145 	u16 pad1[3];
1146 	u32 flow_id;  /* INPUT */
1147 	u32 reset_counter:1; /* INPUT */
1148 	struct virtchnl_fdir_query_info query_info; /* OUTPUT */
1149 	enum virtchnl_fdir_prgm_status status;  /* OUTPUT */
1150 	u32 pad2;
1151 };
1152 
1153 VIRTCHNL_CHECK_STRUCT_LEN(48, virtchnl_fdir_query);
1154 
1155 /* TX and RX queue types are valid in legacy as well as split queue models.
1156  * With Split Queue model, 2 additional types are introduced - TX_COMPLETION
1157  * and RX_BUFFER. In split queue model, RX corresponds to the queue where HW
1158  * posts completions.
1159  */
1160 enum virtchnl_queue_type {
1161 	VIRTCHNL_QUEUE_TYPE_TX			= 0,
1162 	VIRTCHNL_QUEUE_TYPE_RX			= 1,
1163 	VIRTCHNL_QUEUE_TYPE_TX_COMPLETION	= 2,
1164 	VIRTCHNL_QUEUE_TYPE_RX_BUFFER		= 3,
1165 	VIRTCHNL_QUEUE_TYPE_CONFIG_TX		= 4,
1166 	VIRTCHNL_QUEUE_TYPE_CONFIG_RX		= 5
1167 };
1168 
1169 
1170 /* structure to specify a chunk of contiguous queues */
1171 struct virtchnl_queue_chunk {
1172 	enum virtchnl_queue_type type;
1173 	u16 start_queue_id;
1174 	u16 num_queues;
1175 };
1176 
1177 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_queue_chunk);
1178 
1179 /* structure to specify several chunks of contiguous queues */
1180 struct virtchnl_queue_chunks {
1181 	u16 num_chunks;
1182 	u16 rsvd;
1183 	struct virtchnl_queue_chunk chunks[1];
1184 };
1185 
1186 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_chunks);
1187 
1188 
1189 /* VIRTCHNL_OP_ENABLE_QUEUES_V2
1190  * VIRTCHNL_OP_DISABLE_QUEUES_V2
1191  * VIRTCHNL_OP_DEL_QUEUES
1192  *
1193  * If VIRTCHNL_CAP_EXT_FEATURES was negotiated in VIRTCHNL_OP_GET_VF_RESOURCES
1194  * then all of these ops are available.
1195  *
1196  * If VIRTCHNL_VF_LARGE_NUM_QPAIRS was negotiated in VIRTCHNL_OP_GET_VF_RESOURCES
1197  * then VIRTCHNL_OP_ENABLE_QUEUES_V2 and VIRTCHNL_OP_DISABLE_QUEUES_V2 are
1198  * available.
1199  *
1200  * PF sends these messages to enable, disable or delete queues specified in
1201  * chunks. PF sends virtchnl_del_ena_dis_queues struct to specify the queues
1202  * to be enabled/disabled/deleted. Also applicable to single queue RX or
1203  * TX. CP performs requested action and returns status.
1204  */
1205 struct virtchnl_del_ena_dis_queues {
1206 	u16 vport_id;
1207 	u16 pad;
1208 	struct virtchnl_queue_chunks chunks;
1209 };
1210 
1211 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_del_ena_dis_queues);
1212 
1213 /* Virtchannel interrupt throttling rate index */
1214 enum virtchnl_itr_idx {
1215 	VIRTCHNL_ITR_IDX_0	= 0,
1216 	VIRTCHNL_ITR_IDX_1	= 1,
1217 	VIRTCHNL_ITR_IDX_NO_ITR	= 3,
1218 };
1219 
1220 /* Queue to vector mapping */
1221 struct virtchnl_queue_vector {
1222 	u16 queue_id;
1223 	u16 vector_id;
1224 	u8 pad[4];
1225 	enum virtchnl_itr_idx itr_idx;
1226 	enum virtchnl_queue_type queue_type;
1227 };
1228 
1229 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_queue_vector);
1230 
1231 /* VIRTCHNL_OP_MAP_QUEUE_VECTOR
1232  * VIRTCHNL_OP_UNMAP_QUEUE_VECTOR
1233  *
1234  * If VIRTCHNL_CAP_EXT_FEATURES was negotiated in VIRTCHNL_OP_GET_VF_RESOURCES
1235  * then all of these ops are available.
1236  *
1237  * If VIRTCHNL_VF_LARGE_NUM_QPAIRS was negotiated in VIRTCHNL_OP_GET_VF_RESOURCES
1238  * then only VIRTCHNL_OP_MAP_QUEUE_VECTOR is available.
1239  *
1240  * PF sends this message to map or unmap queues to vectors and ITR index
1241  * registers. External data buffer contains virtchnl_queue_vector_maps structure
1242  * that contains num_qv_maps of virtchnl_queue_vector structures.
1243  * CP maps the requested queue vector maps after validating the queue and vector
1244  * ids and returns a status code.
1245  */
1246 struct virtchnl_queue_vector_maps {
1247 	u16 vport_id;
1248 	u16 num_qv_maps;
1249 	u8 pad[4];
1250 	struct virtchnl_queue_vector qv_maps[1];
1251 };
1252 
1253 VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_queue_vector_maps);
1254 
1255 
1256 /* Since VF messages are limited by u16 size, precalculate the maximum possible
1257  * values of nested elements in virtchnl structures that virtual channel can
1258  * possibly handle in a single message.
1259  */
1260 enum virtchnl_vector_limits {
1261 	VIRTCHNL_OP_CONFIG_VSI_QUEUES_MAX	=
1262 		((u16)(~0) - sizeof(struct virtchnl_vsi_queue_config_info)) /
1263 		sizeof(struct virtchnl_queue_pair_info),
1264 
1265 	VIRTCHNL_OP_CONFIG_IRQ_MAP_MAX		=
1266 		((u16)(~0) - sizeof(struct virtchnl_irq_map_info)) /
1267 		sizeof(struct virtchnl_vector_map),
1268 
1269 	VIRTCHNL_OP_ADD_DEL_ETH_ADDR_MAX	=
1270 		((u16)(~0) - sizeof(struct virtchnl_ether_addr_list)) /
1271 		sizeof(struct virtchnl_ether_addr),
1272 
1273 	VIRTCHNL_OP_ADD_DEL_VLAN_MAX		=
1274 		((u16)(~0) - sizeof(struct virtchnl_vlan_filter_list)) /
1275 		sizeof(u16),
1276 
1277 
1278 	VIRTCHNL_OP_ENABLE_CHANNELS_MAX		=
1279 		((u16)(~0) - sizeof(struct virtchnl_tc_info)) /
1280 		sizeof(struct virtchnl_channel_info),
1281 
1282 	VIRTCHNL_OP_ENABLE_DISABLE_DEL_QUEUES_V2_MAX	=
1283 		((u16)(~0) - sizeof(struct virtchnl_del_ena_dis_queues)) /
1284 		sizeof(struct virtchnl_queue_chunk),
1285 
1286 	VIRTCHNL_OP_MAP_UNMAP_QUEUE_VECTOR_MAX	=
1287 		((u16)(~0) - sizeof(struct virtchnl_queue_vector_maps)) /
1288 		sizeof(struct virtchnl_queue_vector),
1289 };
1290 
1291 /**
1292  * virtchnl_vc_validate_vf_msg
1293  * @ver: Virtchnl version info
1294  * @v_opcode: Opcode for the message
1295  * @msg: pointer to the msg buffer
1296  * @msglen: msg length
1297  *
1298  * validate msg format against struct for each opcode
1299  */
1300 static inline int
1301 virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
1302 			    u8 *msg, u16 msglen)
1303 {
1304 	bool err_msg_format = false;
1305 	u32 valid_len = 0;
1306 
1307 	/* Validate message length. */
1308 	switch (v_opcode) {
1309 	case VIRTCHNL_OP_VERSION:
1310 		valid_len = sizeof(struct virtchnl_version_info);
1311 		break;
1312 	case VIRTCHNL_OP_RESET_VF:
1313 		break;
1314 	case VIRTCHNL_OP_GET_VF_RESOURCES:
1315 		if (VF_IS_V11(ver))
1316 			valid_len = sizeof(u32);
1317 		break;
1318 	case VIRTCHNL_OP_CONFIG_TX_QUEUE:
1319 		valid_len = sizeof(struct virtchnl_txq_info);
1320 		break;
1321 	case VIRTCHNL_OP_CONFIG_RX_QUEUE:
1322 		valid_len = sizeof(struct virtchnl_rxq_info);
1323 		break;
1324 	case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
1325 		valid_len = sizeof(struct virtchnl_vsi_queue_config_info);
1326 		if (msglen >= valid_len) {
1327 			struct virtchnl_vsi_queue_config_info *vqc =
1328 			    (struct virtchnl_vsi_queue_config_info *)msg;
1329 
1330 			if (vqc->num_queue_pairs == 0 || vqc->num_queue_pairs >
1331 			    VIRTCHNL_OP_CONFIG_VSI_QUEUES_MAX) {
1332 				err_msg_format = true;
1333 				break;
1334 			}
1335 
1336 			valid_len += (vqc->num_queue_pairs *
1337 				      sizeof(struct
1338 					     virtchnl_queue_pair_info));
1339 		}
1340 		break;
1341 	case VIRTCHNL_OP_CONFIG_IRQ_MAP:
1342 		valid_len = sizeof(struct virtchnl_irq_map_info);
1343 		if (msglen >= valid_len) {
1344 			struct virtchnl_irq_map_info *vimi =
1345 			    (struct virtchnl_irq_map_info *)msg;
1346 
1347 			if (vimi->num_vectors == 0 || vimi->num_vectors >
1348 			    VIRTCHNL_OP_CONFIG_IRQ_MAP_MAX) {
1349 				err_msg_format = true;
1350 				break;
1351 			}
1352 
1353 			valid_len += (vimi->num_vectors *
1354 				      sizeof(struct virtchnl_vector_map));
1355 		}
1356 		break;
1357 	case VIRTCHNL_OP_ENABLE_QUEUES:
1358 	case VIRTCHNL_OP_DISABLE_QUEUES:
1359 		valid_len = sizeof(struct virtchnl_queue_select);
1360 		break;
1361 	case VIRTCHNL_OP_GET_MAX_RSS_QREGION:
1362 		break;
1363 	case VIRTCHNL_OP_ADD_ETH_ADDR:
1364 	case VIRTCHNL_OP_DEL_ETH_ADDR:
1365 		valid_len = sizeof(struct virtchnl_ether_addr_list);
1366 		if (msglen >= valid_len) {
1367 			struct virtchnl_ether_addr_list *veal =
1368 			    (struct virtchnl_ether_addr_list *)msg;
1369 
1370 			if (veal->num_elements == 0 || veal->num_elements >
1371 			    VIRTCHNL_OP_ADD_DEL_ETH_ADDR_MAX) {
1372 				err_msg_format = true;
1373 				break;
1374 			}
1375 
1376 			valid_len += veal->num_elements *
1377 			    sizeof(struct virtchnl_ether_addr);
1378 		}
1379 		break;
1380 	case VIRTCHNL_OP_ADD_VLAN:
1381 	case VIRTCHNL_OP_DEL_VLAN:
1382 		valid_len = sizeof(struct virtchnl_vlan_filter_list);
1383 		if (msglen >= valid_len) {
1384 			struct virtchnl_vlan_filter_list *vfl =
1385 			    (struct virtchnl_vlan_filter_list *)msg;
1386 
1387 			if (vfl->num_elements == 0 || vfl->num_elements >
1388 			    VIRTCHNL_OP_ADD_DEL_VLAN_MAX) {
1389 				err_msg_format = true;
1390 				break;
1391 			}
1392 
1393 			valid_len += vfl->num_elements * sizeof(u16);
1394 		}
1395 		break;
1396 	case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
1397 		valid_len = sizeof(struct virtchnl_promisc_info);
1398 		break;
1399 	case VIRTCHNL_OP_GET_STATS:
1400 		valid_len = sizeof(struct virtchnl_queue_select);
1401 		break;
1402 	case VIRTCHNL_OP_CONFIG_RSS_KEY:
1403 		valid_len = sizeof(struct virtchnl_rss_key);
1404 		if (msglen >= valid_len) {
1405 			struct virtchnl_rss_key *vrk =
1406 				(struct virtchnl_rss_key *)msg;
1407 
1408 			if (vrk->key_len == 0) {
1409 				/* zero length is allowed as input */
1410 				break;
1411 			}
1412 
1413 			valid_len += vrk->key_len - 1;
1414 		}
1415 		break;
1416 	case VIRTCHNL_OP_CONFIG_RSS_LUT:
1417 		valid_len = sizeof(struct virtchnl_rss_lut);
1418 		if (msglen >= valid_len) {
1419 			struct virtchnl_rss_lut *vrl =
1420 				(struct virtchnl_rss_lut *)msg;
1421 
1422 			if (vrl->lut_entries == 0) {
1423 				/* zero entries is allowed as input */
1424 				break;
1425 			}
1426 
1427 			valid_len += vrl->lut_entries - 1;
1428 		}
1429 		break;
1430 	case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
1431 		break;
1432 	case VIRTCHNL_OP_SET_RSS_HENA:
1433 		valid_len = sizeof(struct virtchnl_rss_hena);
1434 		break;
1435 	case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
1436 	case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
1437 		break;
1438 	case VIRTCHNL_OP_REQUEST_QUEUES:
1439 		valid_len = sizeof(struct virtchnl_vf_res_request);
1440 		break;
1441 	case VIRTCHNL_OP_ENABLE_CHANNELS:
1442 		valid_len = sizeof(struct virtchnl_tc_info);
1443 		if (msglen >= valid_len) {
1444 			struct virtchnl_tc_info *vti =
1445 				(struct virtchnl_tc_info *)msg;
1446 
1447 			if (vti->num_tc == 0 || vti->num_tc >
1448 			    VIRTCHNL_OP_ENABLE_CHANNELS_MAX) {
1449 				err_msg_format = true;
1450 				break;
1451 			}
1452 
1453 			valid_len += (vti->num_tc - 1) *
1454 				     sizeof(struct virtchnl_channel_info);
1455 		}
1456 		break;
1457 	case VIRTCHNL_OP_DISABLE_CHANNELS:
1458 		break;
1459 	case VIRTCHNL_OP_ADD_CLOUD_FILTER:
1460 	case VIRTCHNL_OP_DEL_CLOUD_FILTER:
1461 		valid_len = sizeof(struct virtchnl_filter);
1462 		break;
1463 	case VIRTCHNL_OP_DCF_CMD_DESC:
1464 	case VIRTCHNL_OP_DCF_CMD_BUFF:
1465 		/* These two opcodes are specific to handle the AdminQ command,
1466 		 * so the validation needs to be done in PF's context.
1467 		 */
1468 		valid_len = msglen;
1469 		break;
1470 	case VIRTCHNL_OP_DCF_DISABLE:
1471 	case VIRTCHNL_OP_DCF_GET_VSI_MAP:
1472 	case VIRTCHNL_OP_DCF_GET_PKG_INFO:
1473 		break;
1474 	case VIRTCHNL_OP_GET_SUPPORTED_RXDIDS:
1475 		break;
1476 	case VIRTCHNL_OP_ADD_RSS_CFG:
1477 	case VIRTCHNL_OP_DEL_RSS_CFG:
1478 		valid_len = sizeof(struct virtchnl_rss_cfg);
1479 		break;
1480 	case VIRTCHNL_OP_ADD_FDIR_FILTER:
1481 		valid_len = sizeof(struct virtchnl_fdir_add);
1482 		break;
1483 	case VIRTCHNL_OP_DEL_FDIR_FILTER:
1484 		valid_len = sizeof(struct virtchnl_fdir_del);
1485 		break;
1486 	case VIRTCHNL_OP_QUERY_FDIR_FILTER:
1487 		valid_len = sizeof(struct virtchnl_fdir_query);
1488 		break;
1489 	case VIRTCHNL_OP_ENABLE_QUEUES_V2:
1490 	case VIRTCHNL_OP_DISABLE_QUEUES_V2:
1491 		valid_len = sizeof(struct virtchnl_del_ena_dis_queues);
1492 		if (msglen >= valid_len) {
1493 			struct virtchnl_del_ena_dis_queues *qs =
1494 				(struct virtchnl_del_ena_dis_queues *)msg;
1495 			if (qs->chunks.num_chunks == 0 ||
1496 			    qs->chunks.num_chunks > VIRTCHNL_OP_ENABLE_DISABLE_DEL_QUEUES_V2_MAX) {
1497 				err_msg_format = true;
1498 				break;
1499 			}
1500 			valid_len += (qs->chunks.num_chunks - 1) *
1501 				      sizeof(struct virtchnl_queue_chunk);
1502 		}
1503 		break;
1504 	case VIRTCHNL_OP_MAP_QUEUE_VECTOR:
1505 		valid_len = sizeof(struct virtchnl_queue_vector_maps);
1506 		if (msglen >= valid_len) {
1507 			struct virtchnl_queue_vector_maps *v_qp =
1508 				(struct virtchnl_queue_vector_maps *)msg;
1509 			if (v_qp->num_qv_maps == 0 ||
1510 			    v_qp->num_qv_maps > VIRTCHNL_OP_MAP_UNMAP_QUEUE_VECTOR_MAX) {
1511 				err_msg_format = true;
1512 				break;
1513 			}
1514 			valid_len += (v_qp->num_qv_maps - 1) *
1515 				      sizeof(struct virtchnl_queue_vector);
1516 		}
1517 		break;
1518 	/* These are always errors coming from the VF. */
1519 	case VIRTCHNL_OP_EVENT:
1520 	case VIRTCHNL_OP_UNKNOWN:
1521 	default:
1522 		return VIRTCHNL_STATUS_ERR_PARAM;
1523 	}
1524 	/* few more checks */
1525 	if (err_msg_format || valid_len != msglen)
1526 		return VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH;
1527 
1528 	return 0;
1529 }
1530 #endif /* _VIRTCHNL_H_ */
1531