1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2024 Intel Corporation
3 */
4
5 #ifndef _VIRTCHNL2_H_
6 #define _VIRTCHNL2_H_
7
8 /* All opcodes associated with virtchnl 2 are prefixed with virtchnl2 or
9 * VIRTCHNL2. Any future opcodes, offloads/capabilities, structures,
10 * and defines must be prefixed with virtchnl2 or VIRTCHNL2 to avoid confusion.
11 *
12 * PF/VF uses the virtchnl interface defined in this header file to communicate
13 * with device Control Plane (CP). Driver and the CP may run on different
14 * platforms with different endianness. To avoid byte order discrepancies,
15 * all the structures in this header follow little-endian format.
16 *
17 * This is an interface definition file where existing enums and their values
18 * must remain unchanged over time, so we specify explicit values for all enums.
19 */
20
21 #include "virtchnl2_lan_desc.h"
22
23 /**
24 * enum virtchnl2_status - Error codes.
25 * @VIRTCHNL2_STATUS_SUCCESS: Success
26 * @VIRTCHNL2_STATUS_ERR_EPERM: Operation not permitted, used in case of command
27 * not permitted for sender
28 * @VIRTCHNL2_STATUS_ERR_ESRCH: Bad opcode - virtchnl interface problem
29 * @VIRTCHNL2_STATUS_ERR_EIO: I/O error - HW access error
30 * @VIRTCHNL2_STATUS_ERR_ENXIO: No such resource - Referenced resource is not
31 * allocated
32 * @VIRTCHNL2_STATUS_ERR_EACCES: Permission denied - Resource is not permitted
33 * to caller
34 * @VIRTCHNL2_STATUS_ERR_EBUSY: Device or resource busy - In case shared
35 * resource is in use by others
36 * @VIRTCHNL2_STATUS_ERR_EEXIST: Object already exists and not free
37 * @VIRTCHNL2_STATUS_ERR_EINVAL: Invalid input argument in command
38 * @VIRTCHNL2_STATUS_ERR_ENOSPC: No space left or allocation failure
39 * @VIRTCHNL2_STATUS_ERR_ERANGE: Parameter out of range
40 * @VIRTCHNL2_STATUS_ERR_EMODE: Operation not allowed in current dev mode
41 * @VIRTCHNL2_STATUS_ERR_ESM: State Machine error - Command sequence problem
42 */
43 enum virtchnl2_status {
44 VIRTCHNL2_STATUS_SUCCESS = 0,
45 VIRTCHNL2_STATUS_ERR_EPERM = 1,
46 VIRTCHNL2_STATUS_ERR_ESRCH = 3,
47 VIRTCHNL2_STATUS_ERR_EIO = 5,
48 VIRTCHNL2_STATUS_ERR_ENXIO = 6,
49 VIRTCHNL2_STATUS_ERR_EACCES = 13,
50 VIRTCHNL2_STATUS_ERR_EBUSY = 16,
51 VIRTCHNL2_STATUS_ERR_EEXIST = 17,
52 VIRTCHNL2_STATUS_ERR_EINVAL = 22,
53 VIRTCHNL2_STATUS_ERR_ENOSPC = 28,
54 VIRTCHNL2_STATUS_ERR_ERANGE = 34,
55 VIRTCHNL2_STATUS_ERR_EMODE = 200,
56 VIRTCHNL2_STATUS_ERR_ESM = 201,
57 };
58
59 /**
60 * This macro is used to generate compilation errors if a structure
61 * is not exactly the correct length.
62 */
63 #define VIRTCHNL2_CHECK_STRUCT_LEN(n, X) \
64 static_assert((n) == sizeof(struct X), \
65 "Structure length does not match with the expected value")
66 #define VIRTCHNL2_CHECK_STRUCT_VAR_LEN(n, X, T) \
67 VIRTCHNL2_CHECK_STRUCT_LEN(n, X)
68
69 #define STRUCT_VAR_LEN 1
70
71 /**
72 * New major set of opcodes introduced and so leaving room for
73 * old misc opcodes to be added in future. Also these opcodes may only
74 * be used if both the PF and VF have successfully negotiated the
75 * VIRTCHNL version as 2.0 during VIRTCHNL2_OP_VERSION exchange.
76 */
77 enum virtchnl2_op {
78 VIRTCHNL2_OP_UNKNOWN = 0,
79 VIRTCHNL2_OP_VERSION = 1,
80 VIRTCHNL2_OP_GET_CAPS = 500,
81 VIRTCHNL2_OP_CREATE_VPORT = 501,
82 VIRTCHNL2_OP_DESTROY_VPORT = 502,
83 VIRTCHNL2_OP_ENABLE_VPORT = 503,
84 VIRTCHNL2_OP_DISABLE_VPORT = 504,
85 VIRTCHNL2_OP_CONFIG_TX_QUEUES = 505,
86 VIRTCHNL2_OP_CONFIG_RX_QUEUES = 506,
87 VIRTCHNL2_OP_ENABLE_QUEUES = 507,
88 VIRTCHNL2_OP_DISABLE_QUEUES = 508,
89 VIRTCHNL2_OP_ADD_QUEUES = 509,
90 VIRTCHNL2_OP_DEL_QUEUES = 510,
91 VIRTCHNL2_OP_MAP_QUEUE_VECTOR = 511,
92 VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR = 512,
93 VIRTCHNL2_OP_GET_RSS_KEY = 513,
94 VIRTCHNL2_OP_SET_RSS_KEY = 514,
95 VIRTCHNL2_OP_GET_RSS_LUT = 515,
96 VIRTCHNL2_OP_SET_RSS_LUT = 516,
97 VIRTCHNL2_OP_GET_RSS_HASH = 517,
98 VIRTCHNL2_OP_SET_RSS_HASH = 518,
99 VIRTCHNL2_OP_SET_SRIOV_VFS = 519,
100 VIRTCHNL2_OP_ALLOC_VECTORS = 520,
101 VIRTCHNL2_OP_DEALLOC_VECTORS = 521,
102 VIRTCHNL2_OP_EVENT = 522,
103 VIRTCHNL2_OP_GET_STATS = 523,
104 VIRTCHNL2_OP_RESET_VF = 524,
105 /* Opcode 525 is reserved */
106 VIRTCHNL2_OP_GET_PTYPE_INFO = 526,
107 /* Opcode 527 and 528 are reserved for VIRTCHNL2_OP_GET_PTYPE_ID and
108 * VIRTCHNL2_OP_GET_PTYPE_INFO_RAW.
109 */
110 /* Opcodes 529, 530, and 531 are reserved */
111 VIRTCHNL2_OP_NON_FLEX_CREATE_ADI = 532,
112 VIRTCHNL2_OP_NON_FLEX_DESTROY_ADI = 533,
113 VIRTCHNL2_OP_LOOPBACK = 534,
114 VIRTCHNL2_OP_ADD_MAC_ADDR = 535,
115 VIRTCHNL2_OP_DEL_MAC_ADDR = 536,
116 VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE = 537,
117 VIRTCHNL2_OP_ADD_QUEUE_GROUPS = 538,
118 VIRTCHNL2_OP_DEL_QUEUE_GROUPS = 539,
119 VIRTCHNL2_OP_GET_PORT_STATS = 540,
120 /* TimeSync opcodes */
121 VIRTCHNL2_OP_GET_PTP_CAPS = 541,
122 VIRTCHNL2_OP_GET_PTP_TX_TSTAMP_LATCHES = 542,
123 };
124
125 #define VIRTCHNL2_RDMA_INVALID_QUEUE_IDX 0xFFFF
126
127 /**
128 * enum virtchnl2_vport_type - Type of virtual port
129 * @VIRTCHNL2_VPORT_TYPE_DEFAULT: Default virtual port type
130 * @VIRTCHNL2_VPORT_TYPE_SRIOV: SRIOV virtual port type
131 * @VIRTCHNL2_VPORT_TYPE_SIOV: SIOV virtual port type
132 * @VIRTCHNL2_VPORT_TYPE_SUBDEV: Subdevice virtual port type
133 * @VIRTCHNL2_VPORT_TYPE_MNG: Management virtual port type
134 */
135 enum virtchnl2_vport_type {
136 VIRTCHNL2_VPORT_TYPE_DEFAULT = 0,
137 VIRTCHNL2_VPORT_TYPE_SRIOV = 1,
138 VIRTCHNL2_VPORT_TYPE_SIOV = 2,
139 VIRTCHNL2_VPORT_TYPE_SUBDEV = 3,
140 VIRTCHNL2_VPORT_TYPE_MNG = 4,
141 };
142
143 /**
144 * enum virtchnl2_queue_model - Type of queue model
145 * @VIRTCHNL2_QUEUE_MODEL_SINGLE: Single queue model
146 * @VIRTCHNL2_QUEUE_MODEL_SPLIT: Split queue model
147 *
148 * In the single queue model, the same transmit descriptor queue is used by
149 * software to post descriptors to hardware and by hardware to post completed
150 * descriptors to software.
151 * Likewise, the same receive descriptor queue is used by hardware to post
152 * completions to software and by software to post buffers to hardware.
153 *
154 * In the split queue model, hardware uses transmit completion queues to post
155 * descriptor/buffer completions to software, while software uses transmit
156 * descriptor queues to post descriptors to hardware.
157 * Likewise, hardware posts descriptor completions to the receive descriptor
158 * queue, while software uses receive buffer queues to post buffers to hardware.
159 */
160 enum virtchnl2_queue_model {
161 VIRTCHNL2_QUEUE_MODEL_SINGLE = 0,
162 VIRTCHNL2_QUEUE_MODEL_SPLIT = 1,
163 };
164
165 /* Checksum offload capability flags */
166 enum virtchnl2_cap_txrx_csum {
167 VIRTCHNL2_CAP_TX_CSUM_L3_IPV4 = BIT(0),
168 VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_TCP = BIT(1),
169 VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_UDP = BIT(2),
170 VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_SCTP = BIT(3),
171 VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_TCP = BIT(4),
172 VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_UDP = BIT(5),
173 VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP = BIT(6),
174 VIRTCHNL2_CAP_TX_CSUM_GENERIC = BIT(7),
175 VIRTCHNL2_CAP_RX_CSUM_L3_IPV4 = BIT(8),
176 VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_TCP = BIT(9),
177 VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_UDP = BIT(10),
178 VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_SCTP = BIT(11),
179 VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP = BIT(12),
180 VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP = BIT(13),
181 VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_SCTP = BIT(14),
182 VIRTCHNL2_CAP_RX_CSUM_GENERIC = BIT(15),
183 VIRTCHNL2_CAP_TX_CSUM_L3_SINGLE_TUNNEL = BIT(16),
184 VIRTCHNL2_CAP_TX_CSUM_L3_DOUBLE_TUNNEL = BIT(17),
185 VIRTCHNL2_CAP_RX_CSUM_L3_SINGLE_TUNNEL = BIT(18),
186 VIRTCHNL2_CAP_RX_CSUM_L3_DOUBLE_TUNNEL = BIT(19),
187 VIRTCHNL2_CAP_TX_CSUM_L4_SINGLE_TUNNEL = BIT(20),
188 VIRTCHNL2_CAP_TX_CSUM_L4_DOUBLE_TUNNEL = BIT(21),
189 VIRTCHNL2_CAP_RX_CSUM_L4_SINGLE_TUNNEL = BIT(22),
190 VIRTCHNL2_CAP_RX_CSUM_L4_DOUBLE_TUNNEL = BIT(23),
191 };
192
193 /* Segmentation offload capability flags */
194 enum virtchnl2_cap_seg {
195 VIRTCHNL2_CAP_SEG_IPV4_TCP = BIT(0),
196 VIRTCHNL2_CAP_SEG_IPV4_UDP = BIT(1),
197 VIRTCHNL2_CAP_SEG_IPV4_SCTP = BIT(2),
198 VIRTCHNL2_CAP_SEG_IPV6_TCP = BIT(3),
199 VIRTCHNL2_CAP_SEG_IPV6_UDP = BIT(4),
200 VIRTCHNL2_CAP_SEG_IPV6_SCTP = BIT(5),
201 VIRTCHNL2_CAP_SEG_GENERIC = BIT(6),
202 VIRTCHNL2_CAP_SEG_TX_SINGLE_TUNNEL = BIT(7),
203 VIRTCHNL2_CAP_SEG_TX_DOUBLE_TUNNEL = BIT(8),
204 };
205
206 /* Receive Side Scaling Flow type capability flags */
207 enum virtchnl2_cap_rss {
208 VIRTCHNL2_CAP_RSS_IPV4_TCP = BIT(0),
209 VIRTCHNL2_CAP_RSS_IPV4_UDP = BIT(1),
210 VIRTCHNL2_CAP_RSS_IPV4_SCTP = BIT(2),
211 VIRTCHNL2_CAP_RSS_IPV4_OTHER = BIT(3),
212 VIRTCHNL2_CAP_RSS_IPV6_TCP = BIT(4),
213 VIRTCHNL2_CAP_RSS_IPV6_UDP = BIT(5),
214 VIRTCHNL2_CAP_RSS_IPV6_SCTP = BIT(6),
215 VIRTCHNL2_CAP_RSS_IPV6_OTHER = BIT(7),
216 VIRTCHNL2_CAP_RSS_IPV4_AH = BIT(8),
217 VIRTCHNL2_CAP_RSS_IPV4_ESP = BIT(9),
218 VIRTCHNL2_CAP_RSS_IPV4_AH_ESP = BIT(10),
219 VIRTCHNL2_CAP_RSS_IPV6_AH = BIT(11),
220 VIRTCHNL2_CAP_RSS_IPV6_ESP = BIT(12),
221 VIRTCHNL2_CAP_RSS_IPV6_AH_ESP = BIT(13),
222 };
223
224 /* Header split capability flags */
225 enum virtchnl2_cap_rx_hsplit_at {
226 /* For prepended metadata */
227 VIRTCHNL2_CAP_RX_HSPLIT_AT_L2 = BIT(0),
228 /* All VLANs go into header buffer */
229 VIRTCHNL2_CAP_RX_HSPLIT_AT_L3 = BIT(1),
230 VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V4 = BIT(2),
231 VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V6 = BIT(3),
232 };
233
234 /* Receive Side Coalescing offload capability flags */
235 enum virtchnl2_cap_rsc {
236 VIRTCHNL2_CAP_RSC_IPV4_TCP = BIT(0),
237 VIRTCHNL2_CAP_RSC_IPV4_SCTP = BIT(1),
238 VIRTCHNL2_CAP_RSC_IPV6_TCP = BIT(2),
239 VIRTCHNL2_CAP_RSC_IPV6_SCTP = BIT(3),
240 };
241
242 /* Other capability flags */
243 enum virtchnl2_cap_other {
244 VIRTCHNL2_CAP_RDMA = BIT_ULL(0),
245 VIRTCHNL2_CAP_SRIOV = BIT_ULL(1),
246 VIRTCHNL2_CAP_MACFILTER = BIT_ULL(2),
247 VIRTCHNL2_CAP_FLOW_DIRECTOR = BIT_ULL(3),
248 VIRTCHNL2_CAP_SPLITQ_QSCHED = BIT_ULL(4),
249 VIRTCHNL2_CAP_CRC = BIT_ULL(5),
250 VIRTCHNL2_CAP_FLOW_STEER = BIT_ULL(6),
251 VIRTCHNL2_CAP_WB_ON_ITR = BIT_ULL(7),
252 VIRTCHNL2_CAP_PROMISC = BIT_ULL(8),
253 VIRTCHNL2_CAP_LINK_SPEED = BIT_ULL(9),
254 VIRTCHNL2_CAP_INLINE_IPSEC = BIT_ULL(10),
255 VIRTCHNL2_CAP_LARGE_NUM_QUEUES = BIT_ULL(11),
256 /* Require additional info */
257 VIRTCHNL2_CAP_VLAN = BIT_ULL(12),
258 VIRTCHNL2_CAP_PTP = BIT_ULL(13),
259 VIRTCHNL2_CAP_ADV_RSS = BIT_ULL(15),
260 VIRTCHNL2_CAP_FDIR = BIT_ULL(16),
261 VIRTCHNL2_CAP_RX_FLEX_DESC = BIT_ULL(17),
262 VIRTCHNL2_CAP_PTYPE = BIT_ULL(18),
263 VIRTCHNL2_CAP_LOOPBACK = BIT_ULL(19),
264 /* Enable miss completion types plus ability to detect a miss completion
265 * if a reserved bit is set in a standard completion's tag.
266 */
267 VIRTCHNL2_CAP_MISS_COMPL_TAG = BIT_ULL(20),
268 /* This must be the last capability */
269 VIRTCHNL2_CAP_OEM = BIT_ULL(63),
270 };
271
272 /**
273 * enum virtchnl2_action_types - Available actions for sideband flow steering
274 * @VIRTCHNL2_ACTION_DROP: Drop the packet
275 * @VIRTCHNL2_ACTION_PASSTHRU: Forward the packet to the next classifier/stage
276 * @VIRTCHNL2_ACTION_QUEUE: Forward the packet to a receive queue
277 * @VIRTCHNL2_ACTION_Q_GROUP: Forward the packet to a receive queue group
278 * @VIRTCHNL2_ACTION_MARK: Mark the packet with specific marker value
279 * @VIRTCHNL2_ACTION_COUNT: Increment the corresponding counter
280 */
281
282 enum virtchnl2_action_types {
283 VIRTCHNL2_ACTION_DROP = BIT(0),
284 VIRTCHNL2_ACTION_PASSTHRU = BIT(1),
285 VIRTCHNL2_ACTION_QUEUE = BIT(2),
286 VIRTCHNL2_ACTION_Q_GROUP = BIT(3),
287 VIRTCHNL2_ACTION_MARK = BIT(4),
288 VIRTCHNL2_ACTION_COUNT = BIT(5),
289 };
290
291 /* Flow type capabilities for Flow Steering and Receive-Side Scaling */
292 enum virtchnl2_flow_types {
293 VIRTCHNL2_FLOW_IPV4_TCP = BIT(0),
294 VIRTCHNL2_FLOW_IPV4_UDP = BIT(1),
295 VIRTCHNL2_FLOW_IPV4_SCTP = BIT(2),
296 VIRTCHNL2_FLOW_IPV4_OTHER = BIT(3),
297 VIRTCHNL2_FLOW_IPV6_TCP = BIT(4),
298 VIRTCHNL2_FLOW_IPV6_UDP = BIT(5),
299 VIRTCHNL2_FLOW_IPV6_SCTP = BIT(6),
300 VIRTCHNL2_FLOW_IPV6_OTHER = BIT(7),
301 VIRTCHNL2_FLOW_IPV4_AH = BIT(8),
302 VIRTCHNL2_FLOW_IPV4_ESP = BIT(9),
303 VIRTCHNL2_FLOW_IPV4_AH_ESP = BIT(10),
304 VIRTCHNL2_FLOW_IPV6_AH = BIT(11),
305 VIRTCHNL2_FLOW_IPV6_ESP = BIT(12),
306 VIRTCHNL2_FLOW_IPV6_AH_ESP = BIT(13),
307 };
308
309 /**
310 * enum virtchnl2_txq_sched_mode - Transmit Queue Scheduling Modes
311 * @VIRTCHNL2_TXQ_SCHED_MODE_QUEUE: Queue mode is the legacy mode i.e. inorder
312 * completions where descriptors and buffers
313 * are completed at the same time.
314 * @VIRTCHNL2_TXQ_SCHED_MODE_FLOW: Flow scheduling mode allows for out of order
315 * packet processing where descriptors are
316 * cleaned in order, but buffers can be
317 * completed out of order.
318 */
319 enum virtchnl2_txq_sched_mode {
320 VIRTCHNL2_TXQ_SCHED_MODE_QUEUE = 0,
321 VIRTCHNL2_TXQ_SCHED_MODE_FLOW = 1,
322 };
323
324 /**
325 * enum virtchnl2_txq_flags - Transmit Queue feature flags
326 * @VIRTCHNL2_TXQ_ENABLE_MISS_COMPL: Enable rule miss completion type. Packet
327 * completion for a packet sent on exception
328 * path and only relevant in flow scheduling
329 * mode.
330 */
331 enum virtchnl2_txq_flags {
332 VIRTCHNL2_TXQ_ENABLE_MISS_COMPL = BIT(0),
333 };
334
335 /**
336 * enum virtchnl2_peer_type - Transmit mailbox peer type
337 * @VIRTCHNL2_RDMA_CPF: RDMA peer type
338 * @VIRTCHNL2_NVME_CPF: NVME peer type
339 * @VIRTCHNL2_ATE_CPF: ATE peer type
340 * @VIRTCHNL2_LCE_CPF: LCE peer type
341 */
342 enum virtchnl2_peer_type {
343 VIRTCHNL2_RDMA_CPF = 0,
344 VIRTCHNL2_NVME_CPF = 1,
345 VIRTCHNL2_ATE_CPF = 2,
346 VIRTCHNL2_LCE_CPF = 3,
347 };
348
349 /**
350 * enum virtchnl2_rxq_flags - Receive Queue Feature flags
351 * @VIRTCHNL2_RXQ_RSC: Rx queue RSC flag
352 * @VIRTCHNL2_RXQ_HDR_SPLIT: Rx queue header split flag
353 * @VIRTCHNL2_RXQ_IMMEDIATE_WRITE_BACK: When set, packet descriptors are flushed
354 * by hardware immediately after processing
355 * each packet.
356 * @VIRTCHNL2_RX_DESC_SIZE_16BYTE: Rx queue 16 byte descriptor size
357 * @VIRTCHNL2_RX_DESC_SIZE_32BYTE: Rx queue 32 byte descriptor size
358 */
359 enum virtchnl2_rxq_flags {
360 VIRTCHNL2_RXQ_RSC = BIT(0),
361 VIRTCHNL2_RXQ_HDR_SPLIT = BIT(1),
362 VIRTCHNL2_RXQ_IMMEDIATE_WRITE_BACK = BIT(2),
363 VIRTCHNL2_RX_DESC_SIZE_16BYTE = BIT(3),
364 VIRTCHNL2_RX_DESC_SIZE_32BYTE = BIT(4),
365 };
366
367 /**
368 * enum virtchnl2_rss_alg - Type of RSS algorithm
369 * @VIRTCHNL2_RSS_ALG_TOEPLITZ_ASYMMETRIC: TOEPLITZ_ASYMMETRIC algorithm
370 * @VIRTCHNL2_RSS_ALG_R_ASYMMETRIC: R_ASYMMETRIC algorithm
371 * @VIRTCHNL2_RSS_ALG_TOEPLITZ_SYMMETRIC: TOEPLITZ_SYMMETRIC algorithm
372 * @VIRTCHNL2_RSS_ALG_XOR_SYMMETRIC: XOR_SYMMETRIC algorithm
373 */
374 enum virtchnl2_rss_alg {
375 VIRTCHNL2_RSS_ALG_TOEPLITZ_ASYMMETRIC = 0,
376 VIRTCHNL2_RSS_ALG_R_ASYMMETRIC = 1,
377 VIRTCHNL2_RSS_ALG_TOEPLITZ_SYMMETRIC = 2,
378 VIRTCHNL2_RSS_ALG_XOR_SYMMETRIC = 3,
379 };
380
381 /**
382 * enum virtchnl2_event_codes - Type of event
383 * @VIRTCHNL2_EVENT_UNKNOWN: Unknown event type
384 * @VIRTCHNL2_EVENT_LINK_CHANGE: Link change event type
385 * @VIRTCHNL2_EVENT_START_RESET_ADI: Start reset ADI event type
386 * @VIRTCHNL2_EVENT_FINISH_RESET_ADI: Finish reset ADI event type
387 * @VIRTCHNL2_EVENT_ADI_ACTIVE: Event type to indicate 'function active' state
388 * of ADI.
389 */
390 enum virtchnl2_event_codes {
391 VIRTCHNL2_EVENT_UNKNOWN = 0,
392 VIRTCHNL2_EVENT_LINK_CHANGE = 1,
393 /* These messages are only sent to PF from CP */
394 VIRTCHNL2_EVENT_START_RESET_ADI = 2,
395 VIRTCHNL2_EVENT_FINISH_RESET_ADI = 3,
396 VIRTCHNL2_EVENT_ADI_ACTIVE = 4,
397 };
398
399 /**
400 * enum virtchnl2_queue_type - Various queue types
401 * @VIRTCHNL2_QUEUE_TYPE_TX: TX queue type
402 * @VIRTCHNL2_QUEUE_TYPE_RX: RX queue type
403 * @VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION: TX completion queue type
404 * @VIRTCHNL2_QUEUE_TYPE_RX_BUFFER: RX buffer queue type
405 * @VIRTCHNL2_QUEUE_TYPE_CONFIG_TX: Config TX queue type
406 * @VIRTCHNL2_QUEUE_TYPE_CONFIG_RX: Config RX queue type
407 * @VIRTCHNL2_QUEUE_TYPE_MBX_TX: TX mailbox queue type
408 * @VIRTCHNL2_QUEUE_TYPE_MBX_RX: RX mailbox queue type
409 *
410 * Transmit and Receive queue types are valid in single as well as split queue
411 * models. With Split Queue model, 2 additional types are introduced which are
412 * TX_COMPLETION and RX_BUFFER. In split queue model, receive corresponds to
413 * the queue where hardware posts completions.
414 */
415 enum virtchnl2_queue_type {
416 VIRTCHNL2_QUEUE_TYPE_TX = 0,
417 VIRTCHNL2_QUEUE_TYPE_RX = 1,
418 VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION = 2,
419 VIRTCHNL2_QUEUE_TYPE_RX_BUFFER = 3,
420 VIRTCHNL2_QUEUE_TYPE_CONFIG_TX = 4,
421 VIRTCHNL2_QUEUE_TYPE_CONFIG_RX = 5,
422 VIRTCHNL2_QUEUE_TYPE_P2P_TX = 6,
423 VIRTCHNL2_QUEUE_TYPE_P2P_RX = 7,
424 VIRTCHNL2_QUEUE_TYPE_P2P_TX_COMPLETION = 8,
425 VIRTCHNL2_QUEUE_TYPE_P2P_RX_BUFFER = 9,
426 VIRTCHNL2_QUEUE_TYPE_MBX_TX = 10,
427 VIRTCHNL2_QUEUE_TYPE_MBX_RX = 11,
428 };
429
430 /**
431 * enum virtchnl2_itr_idx - Interrupt throttling rate index
432 * @VIRTCHNL2_ITR_IDX_0: ITR index 0
433 * @VIRTCHNL2_ITR_IDX_1: ITR index 1
434 */
435 enum virtchnl2_itr_idx {
436 VIRTCHNL2_ITR_IDX_0 = 0,
437 VIRTCHNL2_ITR_IDX_1 = 1,
438 };
439
440 /**
441 * VIRTCHNL2_VECTOR_LIMITS
442 * Since PF/VF messages are limited by __le16 size, precalculate the maximum
443 * possible values of nested elements in virtchnl structures that virtual
444 * channel can possibly handle in a single message.
445 */
446
447 #define VIRTCHNL2_OP_DEL_ENABLE_DISABLE_QUEUES_MAX (\
448 ((__le16)(~0) - sizeof(struct virtchnl2_del_ena_dis_queues)) / \
449 sizeof(struct virtchnl2_queue_chunk))
450
451 #define VIRTCHNL2_OP_MAP_UNMAP_QUEUE_VECTOR_MAX (\
452 ((__le16)(~0) - sizeof(struct virtchnl2_queue_vector_maps)) / \
453 sizeof(struct virtchnl2_queue_vector))
454
455 /**
456 * enum virtchnl2_mac_addr_type - MAC address types
457 * @VIRTCHNL2_MAC_ADDR_PRIMARY: PF/VF driver should set this type for the
458 * primary/device unicast MAC address filter for
459 * VIRTCHNL2_OP_ADD_MAC_ADDR and
460 * VIRTCHNL2_OP_DEL_MAC_ADDR. This allows for the
461 * underlying control plane function to accurately
462 * track the MAC address and for VM/function reset.
463 * @VIRTCHNL2_MAC_ADDR_EXTRA: PF/VF driver should set this type for any extra
464 * unicast and/or multicast filters that are being
465 * added/deleted via VIRTCHNL2_OP_ADD_MAC_ADDR or
466 * VIRTCHNL2_OP_DEL_MAC_ADDR.
467 */
468 enum virtchnl2_mac_addr_type {
469 VIRTCHNL2_MAC_ADDR_PRIMARY = 1,
470 VIRTCHNL2_MAC_ADDR_EXTRA = 2,
471 };
472
473 /**
474 * enum virtchnl2_promisc_flags - Flags used for promiscuous mode
475 * @VIRTCHNL2_UNICAST_PROMISC: Unicast promiscuous mode
476 * @VIRTCHNL2_MULTICAST_PROMISC: Multicast promiscuous mode
477 */
478 enum virtchnl2_promisc_flags {
479 VIRTCHNL2_UNICAST_PROMISC = BIT(0),
480 VIRTCHNL2_MULTICAST_PROMISC = BIT(1),
481 };
482
483 /**
484 * enum virtchnl2_queue_group_type - Type of queue groups
485 * @VIRTCHNL2_QUEUE_GROUP_DATA: Data queue group type
486 * @VIRTCHNL2_QUEUE_GROUP_MBX: Mailbox queue group type
487 * @VIRTCHNL2_QUEUE_GROUP_CONFIG: Config queue group type
488 *
489 * 0 till 0xFF is for general use
490 */
491 enum virtchnl2_queue_group_type {
492 VIRTCHNL2_QUEUE_GROUP_DATA = 1,
493 VIRTCHNL2_QUEUE_GROUP_MBX = 2,
494 VIRTCHNL2_QUEUE_GROUP_CONFIG = 3,
495 };
496
497 /* Protocol header type within a packet segment. A segment consists of one or
498 * more protocol headers that make up a logical group of protocol headers. Each
499 * logical group of protocol headers encapsulates or is encapsulated using/by
500 * tunneling or encapsulation protocols for network virtualization.
501 */
502 enum virtchnl2_proto_hdr_type {
503 /* VIRTCHNL2_PROTO_HDR_ANY is a mandatory protocol id */
504 VIRTCHNL2_PROTO_HDR_ANY = 0,
505 VIRTCHNL2_PROTO_HDR_PRE_MAC = 1,
506 /* VIRTCHNL2_PROTO_HDR_MAC is a mandatory protocol id */
507 VIRTCHNL2_PROTO_HDR_MAC = 2,
508 VIRTCHNL2_PROTO_HDR_POST_MAC = 3,
509 VIRTCHNL2_PROTO_HDR_ETHERTYPE = 4,
510 VIRTCHNL2_PROTO_HDR_VLAN = 5,
511 VIRTCHNL2_PROTO_HDR_SVLAN = 6,
512 VIRTCHNL2_PROTO_HDR_CVLAN = 7,
513 VIRTCHNL2_PROTO_HDR_MPLS = 8,
514 VIRTCHNL2_PROTO_HDR_UMPLS = 9,
515 VIRTCHNL2_PROTO_HDR_MMPLS = 10,
516 VIRTCHNL2_PROTO_HDR_PTP = 11,
517 VIRTCHNL2_PROTO_HDR_CTRL = 12,
518 VIRTCHNL2_PROTO_HDR_LLDP = 13,
519 VIRTCHNL2_PROTO_HDR_ARP = 14,
520 VIRTCHNL2_PROTO_HDR_ECP = 15,
521 VIRTCHNL2_PROTO_HDR_EAPOL = 16,
522 VIRTCHNL2_PROTO_HDR_PPPOD = 17,
523 VIRTCHNL2_PROTO_HDR_PPPOE = 18,
524 /* VIRTCHNL2_PROTO_HDR_IPV4 is a mandatory protocol id */
525 VIRTCHNL2_PROTO_HDR_IPV4 = 19,
526 /* IPv4 and IPv6 Fragment header types are only associated to
527 * VIRTCHNL2_PROTO_HDR_IPV4 and VIRTCHNL2_PROTO_HDR_IPV6 respectively,
528 * cannot be used independently.
529 */
530 /* VIRTCHNL2_PROTO_HDR_IPV4_FRAG is a mandatory protocol id */
531 VIRTCHNL2_PROTO_HDR_IPV4_FRAG = 20,
532 /* VIRTCHNL2_PROTO_HDR_IPV6 is a mandatory protocol id */
533 VIRTCHNL2_PROTO_HDR_IPV6 = 21,
534 /* VIRTCHNL2_PROTO_HDR_IPV6_FRAG is a mandatory protocol id */
535 VIRTCHNL2_PROTO_HDR_IPV6_FRAG = 22,
536 VIRTCHNL2_PROTO_HDR_IPV6_EH = 23,
537 /* VIRTCHNL2_PROTO_HDR_UDP is a mandatory protocol id */
538 VIRTCHNL2_PROTO_HDR_UDP = 24,
539 /* VIRTCHNL2_PROTO_HDR_TCP is a mandatory protocol id */
540 VIRTCHNL2_PROTO_HDR_TCP = 25,
541 /* VIRTCHNL2_PROTO_HDR_SCTP is a mandatory protocol id */
542 VIRTCHNL2_PROTO_HDR_SCTP = 26,
543 /* VIRTCHNL2_PROTO_HDR_ICMP is a mandatory protocol id */
544 VIRTCHNL2_PROTO_HDR_ICMP = 27,
545 /* VIRTCHNL2_PROTO_HDR_ICMPV6 is a mandatory protocol id */
546 VIRTCHNL2_PROTO_HDR_ICMPV6 = 28,
547 VIRTCHNL2_PROTO_HDR_IGMP = 29,
548 VIRTCHNL2_PROTO_HDR_AH = 30,
549 VIRTCHNL2_PROTO_HDR_ESP = 31,
550 VIRTCHNL2_PROTO_HDR_IKE = 32,
551 VIRTCHNL2_PROTO_HDR_NATT_KEEP = 33,
552 /* VIRTCHNL2_PROTO_HDR_PAY is a mandatory protocol id */
553 VIRTCHNL2_PROTO_HDR_PAY = 34,
554 VIRTCHNL2_PROTO_HDR_L2TPV2 = 35,
555 VIRTCHNL2_PROTO_HDR_L2TPV2_CONTROL = 36,
556 VIRTCHNL2_PROTO_HDR_L2TPV3 = 37,
557 VIRTCHNL2_PROTO_HDR_GTP = 38,
558 VIRTCHNL2_PROTO_HDR_GTP_EH = 39,
559 VIRTCHNL2_PROTO_HDR_GTPCV2 = 40,
560 VIRTCHNL2_PROTO_HDR_GTPC_TEID = 41,
561 VIRTCHNL2_PROTO_HDR_GTPU = 42,
562 VIRTCHNL2_PROTO_HDR_GTPU_UL = 43,
563 VIRTCHNL2_PROTO_HDR_GTPU_DL = 44,
564 VIRTCHNL2_PROTO_HDR_ECPRI = 45,
565 VIRTCHNL2_PROTO_HDR_VRRP = 46,
566 VIRTCHNL2_PROTO_HDR_OSPF = 47,
567 /* VIRTCHNL2_PROTO_HDR_TUN is a mandatory protocol id */
568 VIRTCHNL2_PROTO_HDR_TUN = 48,
569 VIRTCHNL2_PROTO_HDR_GRE = 49,
570 VIRTCHNL2_PROTO_HDR_NVGRE = 50,
571 VIRTCHNL2_PROTO_HDR_VXLAN = 51,
572 VIRTCHNL2_PROTO_HDR_VXLAN_GPE = 52,
573 VIRTCHNL2_PROTO_HDR_GENEVE = 53,
574 VIRTCHNL2_PROTO_HDR_NSH = 54,
575 VIRTCHNL2_PROTO_HDR_QUIC = 55,
576 VIRTCHNL2_PROTO_HDR_PFCP = 56,
577 VIRTCHNL2_PROTO_HDR_PFCP_NODE = 57,
578 VIRTCHNL2_PROTO_HDR_PFCP_SESSION = 58,
579 VIRTCHNL2_PROTO_HDR_RTP = 59,
580 VIRTCHNL2_PROTO_HDR_ROCE = 60,
581 VIRTCHNL2_PROTO_HDR_ROCEV1 = 61,
582 VIRTCHNL2_PROTO_HDR_ROCEV2 = 62,
583 /* Protocol ids up to 32767 are reserved */
584 /* 32768 - 65534 are used for user defined protocol ids */
585 /* VIRTCHNL2_PROTO_HDR_NO_PROTO is a mandatory protocol id */
586 VIRTCHNL2_PROTO_HDR_NO_PROTO = 65535,
587 };
588
589 enum virtchl2_version {
590 VIRTCHNL2_VERSION_MINOR_0 = 0,
591 VIRTCHNL2_VERSION_MAJOR_2 = 2,
592 };
593
594 /**
595 * struct virtchnl2_version_info - Version information
596 * @major: Major version
597 * @minor: Minor version
598 *
599 * PF/VF posts its version number to the CP. CP responds with its version number
600 * in the same format, along with a return code.
601 * If there is a major version mismatch, then the PF/VF cannot operate.
602 * If there is a minor version mismatch, then the PF/VF can operate but should
603 * add a warning to the system log.
604 *
605 * This version opcode MUST always be specified as == 1, regardless of other
606 * changes in the API. The CP must always respond to this message without
607 * error regardless of version mismatch.
608 *
609 * Associated with VIRTCHNL2_OP_VERSION.
610 */
611 struct virtchnl2_version_info {
612 __le32 major;
613 __le32 minor;
614 };
615
616 VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_version_info);
617
618 /**
619 * struct virtchnl2_get_capabilities - Capabilities info
620 * @csum_caps: See enum virtchnl2_cap_txrx_csum
621 * @seg_caps: See enum virtchnl2_cap_seg
622 * @hsplit_caps: See enum virtchnl2_cap_rx_hsplit_at
623 * @rsc_caps: See enum virtchnl2_cap_rsc
624 * @rss_caps: See enum virtchnl2_cap_rss
625 * @other_caps: See enum virtchnl2_cap_other
626 * @mailbox_dyn_ctl: DYN_CTL register offset and vector id for mailbox
627 * provided by CP.
628 * @mailbox_vector_id: Mailbox vector id
629 * @num_allocated_vectors: Maximum number of allocated vectors for the device
630 * @max_rx_q: Maximum number of supported Rx queues
631 * @max_tx_q: Maximum number of supported Tx queues
632 * @max_rx_bufq: Maximum number of supported buffer queues
633 * @max_tx_complq: Maximum number of supported completion queues
634 * @max_sriov_vfs: The PF sends the maximum VFs it is requesting. The CP
635 * responds with the maximum VFs granted.
636 * @max_vports: Maximum number of vports that can be supported
637 * @default_num_vports: Default number of vports driver should allocate on load
638 * @max_tx_hdr_size: Max header length hardware can parse/checksum, in bytes
639 * @max_sg_bufs_per_tx_pkt: Max number of scatter gather buffers that can be
640 * sent per transmit packet without needing to be
641 * linearized.
642 * @reserved: Reserved field
643 * @max_adis: Max number of ADIs
644 * @device_type: See enum virtchl2_device_type
645 * @min_sso_packet_len: Min packet length supported by device for single
646 * segment offload
647 * @max_hdr_buf_per_lso: Max number of header buffers that can be used for
648 * an LSO
649 * @pad1: Padding for future extensions
650 *
651 * Dataplane driver sends this message to CP to negotiate capabilities and
652 * provides a virtchnl2_get_capabilities structure with its desired
653 * capabilities, max_sriov_vfs and num_allocated_vectors.
654 * CP responds with a virtchnl2_get_capabilities structure updated
655 * with allowed capabilities and the other fields as below.
656 * If PF sets max_sriov_vfs as 0, CP will respond with max number of VFs
657 * that can be created by this PF. For any other value 'n', CP responds
658 * with max_sriov_vfs set to min(n, x) where x is the max number of VFs
659 * allowed by CP's policy. max_sriov_vfs is not applicable for VFs.
660 * If dataplane driver sets num_allocated_vectors as 0, CP will respond with 1
661 * which is default vector associated with the default mailbox. For any other
662 * value 'n', CP responds with a value <= n based on the CP's policy of
663 * max number of vectors for a PF.
664 * CP will respond with the vector ID of mailbox allocated to the PF in
665 * mailbox_vector_id and the number of itr index registers in itr_idx_map.
666 * It also responds with default number of vports that the dataplane driver
667 * should comeup with in default_num_vports and maximum number of vports that
668 * can be supported in max_vports.
669 *
670 * Associated with VIRTCHNL2_OP_GET_CAPS.
671 */
672 struct virtchnl2_get_capabilities {
673 __le32 csum_caps;
674 __le32 seg_caps;
675 __le32 hsplit_caps;
676 __le32 rsc_caps;
677 __le64 rss_caps;
678 __le64 other_caps;
679 __le32 mailbox_dyn_ctl;
680 __le16 mailbox_vector_id;
681 __le16 num_allocated_vectors;
682 __le16 max_rx_q;
683 __le16 max_tx_q;
684 __le16 max_rx_bufq;
685 __le16 max_tx_complq;
686 __le16 max_sriov_vfs;
687 __le16 max_vports;
688 __le16 default_num_vports;
689 __le16 max_tx_hdr_size;
690 u8 max_sg_bufs_per_tx_pkt;
691 u8 reserved;
692 __le16 max_adis;
693
694 /* version of Control Plane that is running */
695 __le16 oem_cp_ver_major;
696 __le16 oem_cp_ver_minor;
697 /* see VIRTCHNL2_DEVICE_TYPE definitions */
698 __le32 device_type;
699 u8 min_sso_packet_len;
700 u8 max_hdr_buf_per_lso;
701
702 u8 pad1[10];
703 };
704
705 VIRTCHNL2_CHECK_STRUCT_LEN(80, virtchnl2_get_capabilities);
706
707 /**
708 * struct virtchnl2_queue_reg_chunk - Single queue chunk
709 * @type: See enum virtchnl2_queue_type
710 * @start_queue_id: Start Queue ID
711 * @num_queues: Number of queues in the chunk
712 * @pad: Padding
713 * @qtail_reg_start: Queue tail register offset
714 * @qtail_reg_spacing: Queue tail register spacing
715 * @pad1: Padding for future extensions
716 */
717 struct virtchnl2_queue_reg_chunk {
718 __le32 type;
719 __le32 start_queue_id;
720 __le32 num_queues;
721 __le32 pad;
722 __le64 qtail_reg_start;
723 __le32 qtail_reg_spacing;
724
725 u8 pad1[4];
726 };
727
728 VIRTCHNL2_CHECK_STRUCT_LEN(32, virtchnl2_queue_reg_chunk);
729
730 /**
731 * struct virtchnl2_queue_reg_chunks - Specify several chunks of contiguous
732 * queues.
733 * @num_chunks: Number of chunks
734 * @pad: Padding
735 * @chunks: Chunks of queue info
736 */
737 struct virtchnl2_queue_reg_chunks {
738 __le16 num_chunks;
739 u8 pad[6];
740 struct virtchnl2_queue_reg_chunk chunks[STRUCT_VAR_LEN];
741 };
742 VIRTCHNL2_CHECK_STRUCT_VAR_LEN(40, virtchnl2_queue_reg_chunks, chunks);
743
744 /**
745 * enum virtchnl2_vport_flags - Vport flags
746 * @VIRTCHNL2_VPORT_UPLINK_PORT: Uplink port flag
747 * @VIRTCHNL2_VPORT_INLINE_FLOW_STEER: Inline flow steering enabled
748 * @VIRTCHNL2_VPORT_INLINE_FLOW_STEER_RXQ: Inline flow steering enabled
749 * with explicit Rx queue action
750 * @VIRTCHNL2_VPORT_SIDEBAND_FLOW_STEER: Sideband flow steering enabled
751 */
752 enum virtchnl2_vport_flags {
753 VIRTCHNL2_VPORT_UPLINK_PORT = BIT(0),
754 VIRTCHNL2_VPORT_INLINE_FLOW_STEER = BIT(1),
755 VIRTCHNL2_VPORT_INLINE_FLOW_STEER_RXQ = BIT(2),
756 VIRTCHNL2_VPORT_SIDEBAND_FLOW_STEER = BIT(3),
757 };
758
759 #define VIRTCHNL2_ETH_LENGTH_OF_ADDRESS 6
760
761
762 /**
763 * struct virtchnl2_create_vport - Create vport config info
764 * @vport_type: See enum virtchnl2_vport_type
765 * @txq_model: See virtchnl2_queue_model
766 * @rxq_model: See virtchnl2_queue_model
767 * @num_tx_q: Number of Tx queues
768 * @num_tx_complq: Valid only if txq_model is split queue
769 * @num_rx_q: Number of Rx queues
770 * @num_rx_bufq: Valid only if rxq_model is split queue
771 * @default_rx_q: Relative receive queue index to be used as default
772 * @vport_index: Used to align PF and CP in case of default multiple vports,
773 * it is filled by the PF and CP returns the same value, to
774 * enable the driver to support multiple asynchronous parallel
775 * CREATE_VPORT requests and associate a response to a specific
776 * request.
777 * @max_mtu: Max MTU. CP populates this field on response
778 * @vport_id: Vport id. CP populates this field on response
779 * @default_mac_addr: Default MAC address
780 * @vport_flags: See enum virtchnl2_vport_flags
781 * @rx_desc_ids: See enum virtchnl2_rx_desc_id_bitmasks
782 * @tx_desc_ids: See enum virtchnl2_tx_desc_ids
783 * @reserved: Reserved bytes and cannot be used
784 * @inline_flow_types: Bit mask of supported inline-flow-steering
785 * flow types (See enum virtchnl2_flow_types)
786 * @sideband_flow_types: Bit mask of supported sideband-flow-steering
787 * flow types (See enum virtchnl2_flow_types)
788 * @sideband_flow_actions: Bit mask of supported action types
789 * for sideband flow steering (See enum virtchnl2_action_types)
790 * @flow_steer_max_rules: Max rules allowed for inline and sideband
791 * flow steering combined
792 * @rss_algorithm: RSS algorithm
793 * @rss_key_size: RSS key size
794 * @rss_lut_size: RSS LUT size
795 * @rx_split_pos: See enum virtchnl2_cap_rx_hsplit_at
796 * @pad: Padding for future extensions
797 * @chunks: Chunks of contiguous queues
798 *
799 * PF/VF sends this message to CP to create a vport by filling in required
800 * fields of virtchnl2_create_vport structure.
801 * CP responds with the updated virtchnl2_create_vport structure containing the
802 * necessary fields followed by chunks which in turn will have an array of
803 * num_chunks entries of virtchnl2_queue_chunk structures.
804 */
805 struct virtchnl2_create_vport {
806 __le16 vport_type;
807 __le16 txq_model;
808 __le16 rxq_model;
809 __le16 num_tx_q;
810 __le16 num_tx_complq;
811 __le16 num_rx_q;
812 __le16 num_rx_bufq;
813 __le16 default_rx_q;
814 __le16 vport_index;
815 __le16 max_mtu;
816 __le32 vport_id;
817 u8 default_mac_addr[VIRTCHNL2_ETH_LENGTH_OF_ADDRESS];
818 __le16 vport_flags;
819 __le64 rx_desc_ids;
820 __le64 tx_desc_ids;
821 u8 reserved[48];
822 __le64 inline_flow_types;
823 __le64 sideband_flow_types;
824 __le32 sideband_flow_actions;
825 __le32 flow_steer_max_rules;
826 __le32 rss_algorithm;
827 __le16 rss_key_size;
828 __le16 rss_lut_size;
829 __le32 rx_split_pos;
830 u8 pad[20];
831 struct virtchnl2_queue_reg_chunks chunks;
832 };
833 VIRTCHNL2_CHECK_STRUCT_VAR_LEN(192, virtchnl2_create_vport, chunks.chunks);
834
835 /**
836 * struct virtchnl2_vport - Vport identifier information
837 * @vport_id: Vport id
838 * @pad: Padding for future extensions
839 *
840 * PF/VF sends this message to CP to destroy, enable or disable a vport by
841 * filling in the vport_id in virtchnl2_vport structure.
842 * CP responds with the status of the requested operation.
843 *
844 * Associated with VIRTCHNL2_OP_DESTROY_VPORT, VIRTCHNL2_OP_ENABLE_VPORT,
845 * VIRTCHNL2_OP_DISABLE_VPORT.
846 */
847 struct virtchnl2_vport {
848 __le32 vport_id;
849 u8 pad[4];
850 };
851
852 VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_vport);
853
854 /**
855 * struct virtchnl2_txq_info - Transmit queue config info
856 * @dma_ring_addr: DMA address
857 * @type: See enum virtchnl2_queue_type
858 * @queue_id: Queue ID
859 * @relative_queue_id: Valid only if queue model is split and type is transmit
860 * queue. Used in many to one mapping of transmit queues to
861 * completion queue.
862 * @model: See enum virtchnl2_queue_model
863 * @sched_mode: See enum virtchnl2_txq_sched_mode
864 * @qflags: TX queue feature flags
865 * @ring_len: Ring length
866 * @tx_compl_queue_id: Valid only if queue model is split and type is transmit
867 * queue.
868 * @peer_type: Valid only if queue type is VIRTCHNL2_QUEUE_TYPE_MAILBOX_TX
869 * @peer_rx_queue_id: Valid only if queue type is CONFIG_TX and used to deliver
870 * messages for the respective CONFIG_TX queue.
871 * @pad: Padding
872 * @egress_pasid: Egress PASID info
873 * @egress_hdr_pasid: Egress HDR passid
874 * @egress_buf_pasid: Egress buf passid
875 * @pad1: Padding for future extensions
876 */
877 struct virtchnl2_txq_info {
878 __le64 dma_ring_addr;
879 __le32 type;
880 __le32 queue_id;
881 __le16 relative_queue_id;
882 __le16 model;
883 __le16 sched_mode;
884 __le16 qflags;
885 __le16 ring_len;
886 __le16 tx_compl_queue_id;
887 __le16 peer_type;
888 __le16 peer_rx_queue_id;
889
890 u8 pad[4];
891 __le32 egress_pasid;
892 __le32 egress_hdr_pasid;
893 __le32 egress_buf_pasid;
894
895 u8 pad1[8];
896 };
897
898 VIRTCHNL2_CHECK_STRUCT_LEN(56, virtchnl2_txq_info);
899
900 /**
901 * struct virtchnl2_config_tx_queues - TX queue config
902 * @vport_id: Vport id
903 * @num_qinfo: Number of virtchnl2_txq_info structs
904 * @pad: Padding for future extensions
905 * @qinfo: Tx queues config info
906 *
907 * PF/VF sends this message to set up parameters for one or more transmit
908 * queues. This message contains an array of num_qinfo instances of
909 * virtchnl2_txq_info structures. CP configures requested queues and returns
910 * a status code. If num_qinfo specified is greater than the number of queues
911 * associated with the vport, an error is returned and no queues are configured.
912 *
913 * Associated with VIRTCHNL2_OP_CONFIG_TX_QUEUES.
914 */
915 struct virtchnl2_config_tx_queues {
916 __le32 vport_id;
917 __le16 num_qinfo;
918
919 u8 pad[10];
920 struct virtchnl2_txq_info qinfo[STRUCT_VAR_LEN];
921 };
922 VIRTCHNL2_CHECK_STRUCT_VAR_LEN(72, virtchnl2_config_tx_queues, qinfo);
923
924 /**
925 * struct virtchnl2_rxq_info - Receive queue config info
926 * @desc_ids: See VIRTCHNL2_RX_DESC_IDS definitions
927 * @dma_ring_addr: See VIRTCHNL2_RX_DESC_IDS definitions
928 * @type: See enum virtchnl2_queue_type
929 * @queue_id: Queue id
930 * @model: See enum virtchnl2_queue_model
931 * @hdr_buffer_size: Header buffer size
932 * @data_buffer_size: Data buffer size
933 * @max_pkt_size: Max packet size
934 * @ring_len: Ring length
935 * @buffer_notif_stride: Buffer notification stride in units of 32-descriptors.
936 * This field must be a power of 2.
937 * @pad: Padding
938 * @dma_head_wb_addr: Applicable only for receive buffer queues
939 * @qflags: Applicable only for receive completion queues.
940 * See enum virtchnl2_rxq_flags.
941 * @rx_buffer_low_watermark: Rx buffer low watermark
942 * @rx_bufq1_id: Buffer queue index of the first buffer queue associated with
943 * the Rx queue. Valid only in split queue model.
944 * @rx_bufq2_id: Buffer queue index of the second buffer queue associated with
945 * the Rx queue. Valid only in split queue model.
946 * @bufq2_ena: It indicates if there is a second buffer, rx_bufq2_id is valid
947 * only if this field is set.
948 * @pad1: Padding
949 * @ingress_pasid: Ingress PASID
950 * @ingress_hdr_pasid: Ingress PASID header
951 * @ingress_buf_pasid: Ingress PASID buffer
952 * @pad2: Padding for future extensions
953 */
954 struct virtchnl2_rxq_info {
955 __le64 desc_ids;
956 __le64 dma_ring_addr;
957 __le32 type;
958 __le32 queue_id;
959 __le16 model;
960 __le16 hdr_buffer_size;
961 __le32 data_buffer_size;
962 __le32 max_pkt_size;
963 __le16 ring_len;
964 u8 buffer_notif_stride;
965 u8 pad;
966 __le64 dma_head_wb_addr;
967 __le16 qflags;
968 __le16 rx_buffer_low_watermark;
969 __le16 rx_bufq1_id;
970 __le16 rx_bufq2_id;
971 u8 bufq2_ena;
972 u8 pad1[3];
973 __le32 ingress_pasid;
974 __le32 ingress_hdr_pasid;
975 __le32 ingress_buf_pasid;
976
977 u8 pad2[16];
978 };
979 VIRTCHNL2_CHECK_STRUCT_LEN(88, virtchnl2_rxq_info);
980
981 /**
982 * struct virtchnl2_config_rx_queues - Rx queues config
983 * @vport_id: Vport id
984 * @num_qinfo: Number of instances
985 * @pad: Padding for future extensions
986 * @qinfo: Rx queues config info
987 *
988 * PF/VF sends this message to set up parameters for one or more receive queues.
989 * This message contains an array of num_qinfo instances of virtchnl2_rxq_info
990 * structures. CP configures requested queues and returns a status code.
991 * If the number of queues specified is greater than the number of queues
992 * associated with the vport, an error is returned and no queues are configured.
993 *
994 * Associated with VIRTCHNL2_OP_CONFIG_RX_QUEUES.
995 */
996 struct virtchnl2_config_rx_queues {
997 __le32 vport_id;
998 __le16 num_qinfo;
999
1000 u8 pad[18];
1001 struct virtchnl2_rxq_info qinfo[STRUCT_VAR_LEN];
1002 };
1003 VIRTCHNL2_CHECK_STRUCT_VAR_LEN(112, virtchnl2_config_rx_queues, qinfo);
1004
1005 /**
1006 * struct virtchnl2_add_queues - Data for VIRTCHNL2_OP_ADD_QUEUES
1007 * @vport_id: Vport id
1008 * @num_tx_q: Number of Tx qieues
1009 * @num_tx_complq: Number of Tx completion queues
1010 * @num_rx_q: Number of Rx queues
1011 * @num_rx_bufq: Number of Rx buffer queues
1012 * @pad: Padding for future extensions
1013 * @chunks: Chunks of contiguous queues
1014 *
1015 * PF/VF sends this message to request additional transmit/receive queues beyond
1016 * the ones that were assigned via CREATE_VPORT request. virtchnl2_add_queues
1017 * structure is used to specify the number of each type of queues.
1018 * CP responds with the same structure with the actual number of queues assigned
1019 * followed by num_chunks of virtchnl2_queue_chunk structures.
1020 *
1021 * Associated with VIRTCHNL2_OP_ADD_QUEUES.
1022 */
1023 struct virtchnl2_add_queues {
1024 __le32 vport_id;
1025 __le16 num_tx_q;
1026 __le16 num_tx_complq;
1027 __le16 num_rx_q;
1028 __le16 num_rx_bufq;
1029 u8 pad[4];
1030
1031 struct virtchnl2_queue_reg_chunks chunks;
1032 };
1033 VIRTCHNL2_CHECK_STRUCT_VAR_LEN(56, virtchnl2_add_queues, chunks.chunks);
1034
1035 /* Queue Groups Extension */
1036 /**
1037 * struct virtchnl2_rx_queue_group_info - RX queue group info
1038 * @rss_lut_size: User can ask to update rss_lut size originally allocated by
1039 * CreateVport command. New size will be returned if allocation
1040 * succeeded, otherwise original rss_size from CreateVport
1041 * will be returned.
1042 * @pad: Padding for future extensions
1043 */
1044 struct virtchnl2_rx_queue_group_info {
1045 __le16 rss_lut_size;
1046 u8 pad[6];
1047 };
1048
1049 VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_rx_queue_group_info);
1050
1051 /**
1052 * struct virtchnl2_tx_queue_group_info - TX queue group info
1053 * @tx_tc: TX TC queue group will be connected to
1054 * @priority: Each group can have its own priority, value 0-7, while each group
1055 * with unique priority is strict priority. It can be single set of
1056 * queue groups which configured with same priority, then they are
1057 * assumed part of WFQ arbitration group and are expected to be
1058 * assigned with weight.
1059 * @is_sp: Determines if queue group is expected to be Strict Priority according
1060 * to its priority.
1061 * @pad: Padding
1062 * @pir_weight: Peak Info Rate Weight in case Queue Group is part of WFQ
1063 * arbitration set.
1064 * The weights of the groups are independent of each other.
1065 * Possible values: 1-200
1066 * @cir_pad: Future extension purpose for CIR only
1067 * @pad2: Padding for future extensions
1068 */
1069 struct virtchnl2_tx_queue_group_info {
1070 u8 tx_tc;
1071 u8 priority;
1072 u8 is_sp;
1073 u8 pad;
1074 __le16 pir_weight;
1075 u8 cir_pad[2];
1076 u8 pad2[8];
1077 };
1078
1079 VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_tx_queue_group_info);
1080
1081 /**
1082 * struct virtchnl2_queue_group_id - Queue group ID
1083 * @queue_group_id: Queue group ID - Depended on it's type
1084 * Data: Is an ID which is relative to Vport
1085 * Config & Mailbox: Is an ID which is relative to func
1086 * This ID is use in future calls, i.e. delete.
1087 * Requested by host and assigned by Control plane.
1088 * @queue_group_type: Functional type: See enum virtchnl2_queue_group_type
1089 * @pad: Padding for future extensions
1090 */
1091 struct virtchnl2_queue_group_id {
1092 __le16 queue_group_id;
1093 __le16 queue_group_type;
1094 u8 pad[4];
1095 };
1096
1097 VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_queue_group_id);
1098
1099 /**
1100 * struct virtchnl2_queue_group_info - Queue group info
1101 * @qg_id: Queue group ID
1102 * @num_tx_q: Number of TX queues requested
1103 * @num_tx_complq: Number of completion queues requested
1104 * @num_rx_q: Number of RX queues requested
1105 * @num_rx_bufq: Number of RX buffer queues requested
1106 * @tx_q_grp_info: TX queue group info
1107 * @rx_q_grp_info: RX queue group info
1108 * @pad: Padding for future extensions
1109 * @chunks: Queue register chunks from CP
1110 */
1111 struct virtchnl2_queue_group_info {
1112 struct virtchnl2_queue_group_id qg_id;
1113 __le16 num_tx_q;
1114 __le16 num_tx_complq;
1115 __le16 num_rx_q;
1116 __le16 num_rx_bufq;
1117
1118 struct virtchnl2_tx_queue_group_info tx_q_grp_info;
1119 struct virtchnl2_rx_queue_group_info rx_q_grp_info;
1120 u8 pad[40];
1121 struct virtchnl2_queue_reg_chunks chunks;
1122 };
1123 VIRTCHNL2_CHECK_STRUCT_VAR_LEN(120, virtchnl2_queue_group_info, chunks.chunks);
1124
1125 /**
1126 * struct virtchnl2_add_queue_groups - Add queue groups
1127 * @vport_id: Vport_id to add queue group to, same as allocated by
1128 * CreateVport. NA for mailbox and other types not assigned to vport.
1129 * @num_queue_groups: Total number of queue groups
1130 * @pad: Padding for future extensions
1131 *
1132 * PF sends this message to request additional transmit/receive queue groups
1133 * beyond the ones that were assigned via CREATE_VPORT request.
1134 * virtchnl2_add_queue_groups structure is used to specify the number of each
1135 * type of queues. CP responds with the same structure with the actual number of
1136 * groups and queues assigned followed by num_queue_groups and groups of
1137 * virtchnl2_queue_group_info and virtchnl2_queue_chunk structures.
1138 *
1139 * Associated with VIRTCHNL2_OP_ADD_QUEUE_GROUPS.
1140 */
1141 struct virtchnl2_add_queue_groups {
1142 __le32 vport_id;
1143 __le16 num_queue_groups;
1144 u8 pad[10];
1145 struct virtchnl2_queue_group_info groups[STRUCT_VAR_LEN];
1146
1147 };
1148
1149 VIRTCHNL2_CHECK_STRUCT_LEN(136, virtchnl2_add_queue_groups);
1150
1151 /**
1152 * struct virtchnl2_delete_queue_groups - Delete queue groups
1153 * @vport_id: Vport ID to delete queue group from, same as allocated by
1154 * CreateVport.
1155 * @num_queue_groups: Defines number of groups provided
1156 * @pad: Padding
1157 * @qg_ids: IDs & types of Queue Groups to delete
1158 *
1159 * PF sends this message to delete queue groups.
1160 * PF sends virtchnl2_delete_queue_groups struct to specify the queue groups
1161 * to be deleted. CP performs requested action and returns status and update
1162 * num_queue_groups with number of successfully deleted queue groups.
1163 *
1164 * Associated with VIRTCHNL2_OP_DEL_QUEUE_GROUPS.
1165 */
1166 struct virtchnl2_delete_queue_groups {
1167 __le32 vport_id;
1168 __le16 num_queue_groups;
1169 u8 pad[2];
1170
1171 struct virtchnl2_queue_group_id qg_ids[STRUCT_VAR_LEN];
1172 };
1173 VIRTCHNL2_CHECK_STRUCT_VAR_LEN(16, virtchnl2_delete_queue_groups, qg_ids);
1174
1175 /**
1176 * struct virtchnl2_vector_chunk - Structure to specify a chunk of contiguous
1177 * interrupt vectors.
1178 * @start_vector_id: Start vector id
1179 * @start_evv_id: Start EVV id
1180 * @num_vectors: Number of vectors
1181 * @pad: Padding
1182 * @dynctl_reg_start: DYN_CTL register offset
1183 * @dynctl_reg_spacing: Register spacing between DYN_CTL registers of 2
1184 * consecutive vectors.
1185 * @itrn_reg_start: ITRN register offset
1186 * @itrn_reg_spacing: Register spacing between dynctl registers of 2
1187 * consecutive vectors.
1188 * @itrn_index_spacing: Register spacing between itrn registers of the same
1189 * vector where n=0..2.
1190 * @pad1: Padding for future extensions
1191 *
1192 * Register offsets and spacing provided by CP.
1193 * Dynamic control registers are used for enabling/disabling/re-enabling
1194 * interrupts and updating interrupt rates in the hotpath. Any changes
1195 * to interrupt rates in the dynamic control registers will be reflected
1196 * in the interrupt throttling rate registers.
1197 * itrn registers are used to update interrupt rates for specific
1198 * interrupt indices without modifying the state of the interrupt.
1199 */
1200 struct virtchnl2_vector_chunk {
1201 __le16 start_vector_id;
1202 __le16 start_evv_id;
1203 __le16 num_vectors;
1204 __le16 pad;
1205
1206 __le32 dynctl_reg_start;
1207 __le32 dynctl_reg_spacing;
1208
1209 __le32 itrn_reg_start;
1210 __le32 itrn_reg_spacing;
1211 __le32 itrn_index_spacing;
1212 u8 pad1[4];
1213 };
1214 VIRTCHNL2_CHECK_STRUCT_LEN(32, virtchnl2_vector_chunk);
1215
1216 /**
1217 * struct virtchnl2_vector_chunks - Chunks of contiguous interrupt vectors
1218 * @num_vchunks: number of vector chunks
1219 * @pad: Padding for future extensions
1220 * @vchunks: Chunks of contiguous vector info
1221 *
1222 * PF/VF sends virtchnl2_vector_chunks struct to specify the vectors it is
1223 * giving away. CP performs requested action and returns status.
1224 *
1225 * Associated with VIRTCHNL2_OP_DEALLOC_VECTORS.
1226 */
1227 struct virtchnl2_vector_chunks {
1228 __le16 num_vchunks;
1229 u8 pad[14];
1230
1231 struct virtchnl2_vector_chunk vchunks[STRUCT_VAR_LEN];
1232 };
1233 VIRTCHNL2_CHECK_STRUCT_VAR_LEN(48, virtchnl2_vector_chunks, vchunks);
1234
1235 /**
1236 * struct virtchnl2_alloc_vectors - Vector allocation info
1237 * @num_vectors: Number of vectors
1238 * @pad: Padding for future extensions
1239 * @vchunks: Chunks of contiguous vector info
1240 *
1241 * PF/VF sends this message to request additional interrupt vectors beyond the
1242 * ones that were assigned via GET_CAPS request. virtchnl2_alloc_vectors
1243 * structure is used to specify the number of vectors requested. CP responds
1244 * with the same structure with the actual number of vectors assigned followed
1245 * by virtchnl2_vector_chunks structure identifying the vector ids.
1246 *
1247 * Associated with VIRTCHNL2_OP_ALLOC_VECTORS.
1248 */
1249 struct virtchnl2_alloc_vectors {
1250 __le16 num_vectors;
1251 u8 pad[14];
1252
1253 struct virtchnl2_vector_chunks vchunks;
1254 };
1255 VIRTCHNL2_CHECK_STRUCT_VAR_LEN(64, virtchnl2_alloc_vectors, vchunks.vchunks);
1256
1257 /**
1258 * struct virtchnl2_rss_lut - RSS LUT info
1259 * @vport_id: Vport id
1260 * @lut_entries_start: Start of LUT entries
1261 * @lut_entries: Number of LUT entrties
1262 * @pad: Padding
1263 * @lut: RSS lookup table
1264 *
1265 * PF/VF sends this message to get or set RSS lookup table. Only supported if
1266 * both PF and CP drivers set the VIRTCHNL2_CAP_RSS bit during configuration
1267 * negotiation.
1268 *
1269 * Associated with VIRTCHNL2_OP_GET_RSS_LUT and VIRTCHNL2_OP_SET_RSS_LUT.
1270 */
1271 struct virtchnl2_rss_lut {
1272 __le32 vport_id;
1273 __le16 lut_entries_start;
1274 __le16 lut_entries;
1275 u8 pad[4];
1276 __le32 lut[STRUCT_VAR_LEN];
1277 };
1278 VIRTCHNL2_CHECK_STRUCT_VAR_LEN(16, virtchnl2_rss_lut, lut);
1279
1280 /**
1281 * struct virtchnl2_rss_hash - RSS hash info
1282 * @ptype_groups: Packet type groups bitmap
1283 * @vport_id: Vport id
1284 * @pad: Padding for future extensions
1285 *
1286 * PF/VF sends these messages to get and set the hash filter enable bits for
1287 * RSS. By default, the CP sets these to all possible traffic types that the
1288 * hardware supports. The PF can query this value if it wants to change the
1289 * traffic types that are hashed by the hardware.
1290 * Only supported if both PF and CP drivers set the VIRTCHNL2_CAP_RSS bit
1291 * during configuration negotiation.
1292 *
1293 * Associated with VIRTCHNL2_OP_GET_RSS_HASH and VIRTCHNL2_OP_SET_RSS_HASH
1294 */
1295 struct virtchnl2_rss_hash {
1296 __le64 ptype_groups;
1297 __le32 vport_id;
1298 u8 pad[4];
1299 };
1300
1301 VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_rss_hash);
1302
1303 /**
1304 * struct virtchnl2_sriov_vfs_info - VFs info
1305 * @num_vfs: Number of VFs
1306 * @pad: Padding for future extensions
1307 *
1308 * This message is used to set number of SRIOV VFs to be created. The actual
1309 * allocation of resources for the VFs in terms of vport, queues and interrupts
1310 * is done by CP. When this call completes, the IDPF driver calls
1311 * pci_enable_sriov to let the OS instantiate the SRIOV PCIE devices.
1312 * The number of VFs set to 0 will destroy all the VFs of this function.
1313 *
1314 * Associated with VIRTCHNL2_OP_SET_SRIOV_VFS.
1315 */
1316
1317 struct virtchnl2_sriov_vfs_info {
1318 __le16 num_vfs;
1319 __le16 pad;
1320 };
1321
1322 VIRTCHNL2_CHECK_STRUCT_LEN(4, virtchnl2_sriov_vfs_info);
1323
1324 /**
1325 * struct virtchnl2_non_flex_queue_reg_chunks - Specify several chunks of
1326 * contiguous queues.
1327 * @num_chunks: Number of chunks
1328 * @pad: Padding
1329 * @chunks: Chunks of queue info. 'chunks' is fixed size(not flexible) and
1330 * will be deprecated at some point.
1331 */
1332 struct virtchnl2_non_flex_queue_reg_chunks {
1333 __le16 num_chunks;
1334 u8 pad[6];
1335 struct virtchnl2_queue_reg_chunk chunks[1];
1336 };
1337
1338 VIRTCHNL2_CHECK_STRUCT_LEN(40, virtchnl2_non_flex_queue_reg_chunks);
1339
1340 /**
1341 * struct virtchnl2_non_flex_vector_chunks - Chunks of contiguous interrupt
1342 * vectors.
1343 * @num_vchunks: Number of vector chunks
1344 * @pad: Padding for future extensions
1345 * @vchunks: Chunks of contiguous vector info. 'vchunks' is fixed size
1346 * (not flexible) and will be deprecated at some point.
1347 */
1348 struct virtchnl2_non_flex_vector_chunks {
1349 __le16 num_vchunks;
1350 u8 pad[14];
1351 struct virtchnl2_vector_chunk vchunks[1];
1352 };
1353
1354 VIRTCHNL2_CHECK_STRUCT_LEN(48, virtchnl2_non_flex_vector_chunks);
1355
1356 /**
1357 * struct virtchnl2_non_flex_create_adi - Create ADI
1358 * @pasid: PF sends PASID to CP
1359 * @mbx_id: mbx_id is set to 1 by PF when requesting CP to provide HW mailbox
1360 * id else it is set to 0 by PF.
1361 * @mbx_vec_id: PF sends mailbox vector id to CP
1362 * @adi_index: PF populates this ADI index
1363 * @adi_id: CP populates ADI id
1364 * @pad: Padding
1365 * @chunks: CP populates queue chunks
1366 * @vchunks: PF sends vector chunks to CP
1367 *
1368 * PF sends this message to CP to create ADI by filling in required
1369 * fields of virtchnl2_non_flex_create_adi structure.
1370 * CP responds with the updated virtchnl2_non_flex_create_adi structure
1371 * containing the necessary fields followed by chunks which in turn will have
1372 * an array of num_chunks entries of virtchnl2_queue_chunk structures.
1373 *
1374 * Associated with VIRTCHNL2_OP_NON_FLEX_CREATE_ADI.
1375 */
1376 struct virtchnl2_non_flex_create_adi {
1377 __le32 pasid;
1378 __le16 mbx_id;
1379 __le16 mbx_vec_id;
1380 __le16 adi_index;
1381 __le16 adi_id;
1382 u8 pad[68];
1383 struct virtchnl2_non_flex_queue_reg_chunks chunks;
1384 struct virtchnl2_non_flex_vector_chunks vchunks;
1385 };
1386
1387 VIRTCHNL2_CHECK_STRUCT_LEN(168, virtchnl2_non_flex_create_adi);
1388
1389 /**
1390 * struct virtchnl2_non_flex_destroy_adi - Destroy ADI
1391 * @adi_id: ADI id to destroy
1392 * @pad: Padding
1393 *
1394 * PF sends this message to CP to destroy ADI by filling
1395 * in the adi_id in virtchnl2_destropy_adi structure.
1396 * CP responds with the status of the requested operation.
1397 *
1398 * Associated with VIRTCHNL2_OP_NON_FLEX_DESTROY_ADI.
1399 */
1400 struct virtchnl2_non_flex_destroy_adi {
1401 __le16 adi_id;
1402 u8 pad[2];
1403 };
1404
1405 VIRTCHNL2_CHECK_STRUCT_LEN(4, virtchnl2_non_flex_destroy_adi);
1406
1407 /**
1408 * struct virtchnl2_ptype - Packet type info
1409 * @ptype_id_10: 10-bit packet type
1410 * @ptype_id_8: 8-bit packet type
1411 * @proto_id_count: Number of protocol ids the packet supports, maximum of 32
1412 * protocol ids are supported.
1413 * @pad: Padding
1414 * @proto_id: proto_id_count decides the allocation of protocol id array.
1415 * See enum virtchnl2_proto_hdr_type.
1416 *
1417 * Based on the descriptor type the PF supports, CP fills ptype_id_10 or
1418 * ptype_id_8 for flex and base descriptor respectively. If ptype_id_10 value
1419 * is set to 0xFFFF, PF should consider this ptype as dummy one and it is the
1420 * last ptype.
1421 */
1422 struct virtchnl2_ptype {
1423 __le16 ptype_id_10;
1424 u8 ptype_id_8;
1425 u8 proto_id_count;
1426 __le16 pad;
1427 __le16 proto_id[STRUCT_VAR_LEN];
1428 };
1429 VIRTCHNL2_CHECK_STRUCT_VAR_LEN(8, virtchnl2_ptype, proto_id);
1430
1431 /**
1432 * struct virtchnl2_get_ptype_info - Packet type info
1433 * @start_ptype_id: Starting ptype ID
1434 * @num_ptypes: Number of packet types from start_ptype_id
1435 * @pad: Padding for future extensions
1436 * @ptype: Array of packet type info
1437 *
1438 * The total number of supported packet types is based on the descriptor type.
1439 * For the flex descriptor, it is 1024 (10-bit ptype), and for the base
1440 * descriptor, it is 256 (8-bit ptype). Send this message to the CP by
1441 * populating the 'start_ptype_id' and the 'num_ptypes'. CP responds with the
1442 * 'start_ptype_id', 'num_ptypes', and the array of ptype (virtchnl2_ptype) that
1443 * are added at the end of the 'virtchnl2_get_ptype_info' message (Note: There
1444 * is no specific field for the ptypes but are added at the end of the
1445 * ptype info message. PF/VF is expected to extract the ptypes accordingly.
1446 * Reason for doing this is because compiler doesn't allow nested flexible
1447 * array fields).
1448 *
1449 * If all the ptypes don't fit into one mailbox buffer, CP splits the
1450 * ptype info into multiple messages, where each message will have its own
1451 * 'start_ptype_id', 'num_ptypes', and the ptype array itself. When CP is done
1452 * updating all the ptype information extracted from the package (the number of
1453 * ptypes extracted might be less than what PF/VF expects), it will append a
1454 * dummy ptype (which has 'ptype_id_10' of 'struct virtchnl2_ptype' as 0xFFFF)
1455 * to the ptype array.
1456 *
1457 * PF/VF is expected to receive multiple VIRTCHNL2_OP_GET_PTYPE_INFO messages.
1458 *
1459 * Associated with VIRTCHNL2_OP_GET_PTYPE_INFO.
1460 */
1461 struct virtchnl2_get_ptype_info {
1462 __le16 start_ptype_id;
1463 __le16 num_ptypes;
1464 __le32 pad;
1465 struct virtchnl2_ptype ptype[STRUCT_VAR_LEN];
1466 };
1467
1468 VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_get_ptype_info);
1469
1470 /**
1471 * struct virtchnl2_vport_stats - Vport statistics
1472 * @vport_id: Vport id
1473 * @pad: Padding
1474 * @rx_bytes: Received bytes
1475 * @rx_unicast: Received unicast packets
1476 * @rx_multicast: Received multicast packets
1477 * @rx_broadcast: Received broadcast packets
1478 * @rx_discards: Discarded packets on receive
1479 * @rx_errors: Receive errors
1480 * @rx_unknown_protocol: Unlnown protocol
1481 * @tx_bytes: Transmitted bytes
1482 * @tx_unicast: Transmitted unicast packets
1483 * @tx_multicast: Transmitted multicast packets
1484 * @tx_broadcast: Transmitted broadcast packets
1485 * @tx_discards: Discarded packets on transmit
1486 * @tx_errors: Transmit errors
1487 * @rx_invalid_frame_length: Packets with invalid frame length
1488 * @rx_overflow_drop: Packets dropped on buffer overflow
1489 *
1490 * PF/VF sends this message to CP to get the update stats by specifying the
1491 * vport_id. CP responds with stats in struct virtchnl2_vport_stats.
1492 *
1493 * Associated with VIRTCHNL2_OP_GET_STATS.
1494 */
1495 struct virtchnl2_vport_stats {
1496 __le32 vport_id;
1497 u8 pad[4];
1498
1499 __le64 rx_bytes;
1500 __le64 rx_unicast;
1501 __le64 rx_multicast;
1502 __le64 rx_broadcast;
1503 __le64 rx_discards;
1504 __le64 rx_errors;
1505 __le64 rx_unknown_protocol;
1506 __le64 tx_bytes;
1507 __le64 tx_unicast;
1508 __le64 tx_multicast;
1509 __le64 tx_broadcast;
1510 __le64 tx_discards;
1511 __le64 tx_errors;
1512 __le64 rx_invalid_frame_length;
1513 __le64 rx_overflow_drop;
1514 };
1515
1516 VIRTCHNL2_CHECK_STRUCT_LEN(128, virtchnl2_vport_stats);
1517
1518 /**
1519 * struct virtchnl2_phy_port_stats - Physical port statistics
1520 */
1521 struct virtchnl2_phy_port_stats {
1522 __le64 rx_bytes;
1523 __le64 rx_unicast_pkts;
1524 __le64 rx_multicast_pkts;
1525 __le64 rx_broadcast_pkts;
1526 __le64 rx_size_64_pkts;
1527 __le64 rx_size_127_pkts;
1528 __le64 rx_size_255_pkts;
1529 __le64 rx_size_511_pkts;
1530 __le64 rx_size_1023_pkts;
1531 __le64 rx_size_1518_pkts;
1532 __le64 rx_size_jumbo_pkts;
1533 __le64 rx_xon_events;
1534 __le64 rx_xoff_events;
1535 __le64 rx_undersized_pkts;
1536 __le64 rx_fragmented_pkts;
1537 __le64 rx_oversized_pkts;
1538 __le64 rx_jabber_pkts;
1539 __le64 rx_csum_errors;
1540 __le64 rx_length_errors;
1541 __le64 rx_dropped_pkts;
1542 __le64 rx_crc_errors;
1543 /* Frames with length < 64 and a bad CRC */
1544 __le64 rx_runt_errors;
1545 __le64 rx_illegal_bytes;
1546 __le64 rx_total_pkts;
1547 u8 rx_pad[128];
1548
1549 __le64 tx_bytes;
1550 __le64 tx_unicast_pkts;
1551 __le64 tx_multicast_pkts;
1552 __le64 tx_broadcast_pkts;
1553 __le64 tx_errors;
1554 __le64 tx_timeout_events;
1555 __le64 tx_size_64_pkts;
1556 __le64 tx_size_127_pkts;
1557 __le64 tx_size_255_pkts;
1558 __le64 tx_size_511_pkts;
1559 __le64 tx_size_1023_pkts;
1560 __le64 tx_size_1518_pkts;
1561 __le64 tx_size_jumbo_pkts;
1562 __le64 tx_xon_events;
1563 __le64 tx_xoff_events;
1564 __le64 tx_dropped_link_down_pkts;
1565 __le64 tx_total_pkts;
1566 u8 tx_pad[128];
1567 __le64 mac_local_faults;
1568 __le64 mac_remote_faults;
1569 };
1570
1571 VIRTCHNL2_CHECK_STRUCT_LEN(600, virtchnl2_phy_port_stats);
1572
1573 /**
1574 * struct virtchnl2_port_stats - Port statistics
1575 * @vport_id: Vport ID
1576 * @pad: Padding
1577 * @phy_port_stats: Physical port statistics
1578 * @virt_port_stats: Vport statistics
1579 *
1580 * vport_id. CP responds with stats in struct virtchnl2_port_stats that
1581 * includes both physical port as well as vport statistics.
1582 *
1583 * Associated with VIRTCHNL2_OP_GET_PORT_STATS.
1584 */
1585 struct virtchnl2_port_stats {
1586 __le32 vport_id;
1587 u8 pad[4];
1588
1589 struct virtchnl2_phy_port_stats phy_port_stats;
1590 struct virtchnl2_vport_stats virt_port_stats;
1591 };
1592
1593 VIRTCHNL2_CHECK_STRUCT_LEN(736, virtchnl2_port_stats);
1594
1595 /**
1596 * struct virtchnl2_event - Event info
1597 * @event: Event opcode. See enum virtchnl2_event_codes
1598 * @link_speed: Link_speed provided in Mbps
1599 * @vport_id: Vport ID
1600 * @link_status: Link status
1601 * @pad: Padding
1602 * @adi_id: ADI id
1603 *
1604 * CP sends this message to inform the PF/VF driver of events that may affect
1605 * it. No direct response is expected from the driver, though it may generate
1606 * other messages in response to this one.
1607 *
1608 * Associated with VIRTCHNL2_OP_EVENT.
1609 */
1610 struct virtchnl2_event {
1611 __le32 event;
1612 __le32 link_speed;
1613 __le32 vport_id;
1614 u8 link_status;
1615 u8 pad;
1616 __le16 adi_id;
1617 };
1618
1619 VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_event);
1620
1621 /**
1622 * struct virtchnl2_rss_key - RSS key info
1623 * @vport_id: Vport id
1624 * @key_len: Length of RSS key
1625 * @pad: Padding
1626 * @key: RSS hash key, packed bytes
1627 * PF/VF sends this message to get or set RSS key. Only supported if both
1628 * PF/VF and CP drivers set the VIRTCHNL2_CAP_RSS bit during configuration
1629 * negotiation.
1630 *
1631 * Associated with VIRTCHNL2_OP_GET_RSS_KEY and VIRTCHNL2_OP_SET_RSS_KEY.
1632 */
1633 struct virtchnl2_rss_key {
1634 __le32 vport_id;
1635 __le16 key_len;
1636 u8 pad;
1637 u8 key[1];
1638 };
1639
1640 VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_rss_key);
1641
1642 /**
1643 * struct virtchnl2_queue_chunk - Chunk of contiguous queues
1644 * @type: See enum virtchnl2_queue_type
1645 * @start_queue_id: Starting queue id
1646 * @num_queues: Number of queues
1647 * @pad: Padding for future extensions
1648 */
1649 struct virtchnl2_queue_chunk {
1650 __le32 type;
1651 __le32 start_queue_id;
1652 __le32 num_queues;
1653 u8 pad[4];
1654 };
1655
1656 VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_queue_chunk);
1657
1658 /* struct virtchnl2_queue_chunks - Chunks of contiguous queues
1659 * @num_chunks: Number of chunks
1660 * @pad: Padding
1661 * @chunks: Chunks of contiguous queues info
1662 */
1663 struct virtchnl2_queue_chunks {
1664 __le16 num_chunks;
1665 u8 pad[6];
1666 struct virtchnl2_queue_chunk chunks[STRUCT_VAR_LEN];
1667 };
1668 VIRTCHNL2_CHECK_STRUCT_VAR_LEN(24, virtchnl2_queue_chunks, chunks);
1669
1670 /**
1671 * struct virtchnl2_del_ena_dis_queues - Enable/disable queues info
1672 * @vport_id: Vport id
1673 * @pad: Padding
1674 * @chunks: Chunks of contiguous queues info
1675 *
1676 * PF/VF sends these messages to enable, disable or delete queues specified in
1677 * chunks. It sends virtchnl2_del_ena_dis_queues struct to specify the queues
1678 * to be enabled/disabled/deleted. Also applicable to single queue receive or
1679 * transmit. CP performs requested action and returns status.
1680 *
1681 * Associated with VIRTCHNL2_OP_ENABLE_QUEUES, VIRTCHNL2_OP_DISABLE_QUEUES and
1682 * VIRTCHNL2_OP_DISABLE_QUEUES.
1683 */
1684 struct virtchnl2_del_ena_dis_queues {
1685 __le32 vport_id;
1686 u8 pad[4];
1687
1688 struct virtchnl2_queue_chunks chunks;
1689 };
1690 VIRTCHNL2_CHECK_STRUCT_VAR_LEN(32, virtchnl2_del_ena_dis_queues, chunks.chunks);
1691
1692 /**
1693 * struct virtchnl2_queue_vector - Queue to vector mapping
1694 * @queue_id: Queue id
1695 * @vector_id: Vector id
1696 * @pad: Padding
1697 * @itr_idx: See enum virtchnl2_itr_idx
1698 * @queue_type: See enum virtchnl2_queue_type
1699 * @pad: Padding for future extensions
1700 */
1701 struct virtchnl2_queue_vector {
1702 __le32 queue_id;
1703 __le16 vector_id;
1704 u8 pad[2];
1705
1706 __le32 itr_idx;
1707
1708 __le32 queue_type;
1709 u8 pad1[8];
1710 };
1711
1712 VIRTCHNL2_CHECK_STRUCT_LEN(24, virtchnl2_queue_vector);
1713
1714 /**
1715 * struct virtchnl2_queue_vector_maps - Map/unmap queues info
1716 * @vport_id: Vport id
1717 * @num_qv_maps: Number of queue vector maps
1718 * @pad: Padding
1719 * @qv_maps: Queue to vector maps
1720 *
1721 * PF/VF sends this message to map or unmap queues to vectors and interrupt
1722 * throttling rate index registers. External data buffer contains
1723 * virtchnl2_queue_vector_maps structure that contains num_qv_maps of
1724 * virtchnl2_queue_vector structures. CP maps the requested queue vector maps
1725 * after validating the queue and vector ids and returns a status code.
1726 *
1727 * Associated with VIRTCHNL2_OP_MAP_QUEUE_VECTOR and
1728 * VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR.
1729 */
1730 struct virtchnl2_queue_vector_maps {
1731 __le32 vport_id;
1732 __le16 num_qv_maps;
1733 u8 pad[10];
1734
1735 struct virtchnl2_queue_vector qv_maps[STRUCT_VAR_LEN];
1736 };
1737 VIRTCHNL2_CHECK_STRUCT_VAR_LEN(40, virtchnl2_queue_vector_maps, qv_maps);
1738
1739 /**
1740 * struct virtchnl2_loopback - Loopback info
1741 * @vport_id: Vport id
1742 * @enable: Enable/disable
1743 * @pad: Padding for future extensions
1744 *
1745 * PF/VF sends this message to transition to/from the loopback state. Setting
1746 * the 'enable' to 1 enables the loopback state and setting 'enable' to 0
1747 * disables it. CP configures the state to loopback and returns status.
1748 *
1749 * Associated with VIRTCHNL2_OP_LOOPBACK.
1750 */
1751 struct virtchnl2_loopback {
1752 __le32 vport_id;
1753 u8 enable;
1754 u8 pad[3];
1755 };
1756
1757 VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_loopback);
1758
1759 /* struct virtchnl2_mac_addr - MAC address info
1760 * @addr: MAC address
1761 * @type: MAC type. See enum virtchnl2_mac_addr_type.
1762 * @pad: Padding for future extensions
1763 */
1764 struct virtchnl2_mac_addr {
1765 u8 addr[VIRTCHNL2_ETH_LENGTH_OF_ADDRESS];
1766 u8 type;
1767 u8 pad;
1768 };
1769
1770 VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_mac_addr);
1771
1772 /**
1773 * struct virtchnl2_mac_addr_list - List of MAC addresses
1774 * @vport_id: Vport id
1775 * @num_mac_addr: Number of MAC addresses
1776 * @pad: Padding
1777 * @mac_addr_list: List with MAC address info
1778 *
1779 * PF/VF driver uses this structure to send list of MAC addresses to be
1780 * added/deleted to the CP where as CP performs the action and returns the
1781 * status.
1782 *
1783 * Associated with VIRTCHNL2_OP_ADD_MAC_ADDR and VIRTCHNL2_OP_DEL_MAC_ADDR.
1784 */
1785 struct virtchnl2_mac_addr_list {
1786 __le32 vport_id;
1787 __le16 num_mac_addr;
1788 u8 pad[2];
1789
1790 struct virtchnl2_mac_addr mac_addr_list[STRUCT_VAR_LEN];
1791 };
1792 VIRTCHNL2_CHECK_STRUCT_VAR_LEN(16, virtchnl2_mac_addr_list, mac_addr_list);
1793
1794 /**
1795 * struct virtchnl2_promisc_info - Promiscuous type information
1796 * @vport_id: Vport id
1797 * @flags: See enum virtchnl2_promisc_flags
1798 * @pad: Padding for future extensions
1799 *
1800 * PF/VF sends vport id and flags to the CP where as CP performs the action
1801 * and returns the status.
1802 *
1803 * Associated with VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE.
1804 */
1805 struct virtchnl2_promisc_info {
1806 __le32 vport_id;
1807 __le16 flags;
1808 u8 pad[2];
1809 };
1810
1811 VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_promisc_info);
1812
1813 /**
1814 * enum virtchnl2_ptp_caps - PTP capabilities
1815 */
1816 enum virtchnl2_ptp_caps {
1817 VIRTCHNL2_PTP_CAP_LEGACY_CROSS_TIME = BIT(0),
1818 VIRTCHNL2_PTP_CAP_PTM = BIT(1),
1819 VIRTCHNL2_PTP_CAP_DEVICE_CLOCK_CONTROL = BIT(2),
1820 VIRTCHNL2_PTP_CAP_TX_TSTAMPS_DIRECT = BIT(3),
1821 VIRTCHNL2_PTP_CAP_TX_TSTAMPS_VIRTCHNL = BIT(4),
1822 };
1823
1824 /**
1825 * struct virtchnl2_ptp_legacy_cross_time_reg - Legacy cross time registers
1826 * offsets.
1827 */
1828 struct virtchnl2_ptp_legacy_cross_time_reg {
1829 __le32 shadow_time_0;
1830 __le32 shadow_time_l;
1831 __le32 shadow_time_h;
1832 __le32 cmd_sync;
1833 };
1834
1835 VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_legacy_cross_time_reg);
1836
1837 /**
1838 * struct virtchnl2_ptp_ptm_cross_time_reg - PTM cross time registers offsets
1839 */
1840 struct virtchnl2_ptp_ptm_cross_time_reg {
1841 __le32 art_l;
1842 __le32 art_h;
1843 __le32 cmd_sync;
1844 u8 pad[4];
1845 };
1846
1847 VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_ptm_cross_time_reg);
1848
1849 /**
1850 * struct virtchnl2_ptp_device_clock_control - Registers needed to control the
1851 * main clock.
1852 */
1853 struct virtchnl2_ptp_device_clock_control {
1854 __le32 cmd;
1855 __le32 incval_l;
1856 __le32 incval_h;
1857 __le32 shadj_l;
1858 __le32 shadj_h;
1859 u8 pad[4];
1860 };
1861
1862 VIRTCHNL2_CHECK_STRUCT_LEN(24, virtchnl2_ptp_device_clock_control);
1863
1864 /**
1865 * struct virtchnl2_ptp_tx_tstamp_entry - PTP TX timestamp entry
1866 * @tx_latch_register_base: TX latch register base
1867 * @tx_latch_register_offset: TX latch register offset
1868 * @index: Index
1869 * @pad: Padding
1870 */
1871 struct virtchnl2_ptp_tx_tstamp_entry {
1872 __le32 tx_latch_register_base;
1873 __le32 tx_latch_register_offset;
1874 u8 index;
1875 u8 pad[7];
1876 };
1877
1878 VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_tx_tstamp_entry);
1879
1880 /**
1881 * struct virtchnl2_ptp_tx_tstamp - Structure that defines tx tstamp entries
1882 * @num_latches: Total number of latches
1883 * @latch_size: Latch size expressed in bits
1884 * @pad: Padding
1885 * @ptp_tx_tstamp_entries: Aarray of TX timestamp entries
1886 */
1887 struct virtchnl2_ptp_tx_tstamp {
1888 __le16 num_latches;
1889 __le16 latch_size;
1890 u8 pad[4];
1891 struct virtchnl2_ptp_tx_tstamp_entry ptp_tx_tstamp_entries[STRUCT_VAR_LEN];
1892 };
1893 VIRTCHNL2_CHECK_STRUCT_VAR_LEN(24, virtchnl2_ptp_tx_tstamp,
1894 ptp_tx_tstamp_entries);
1895
1896 /**
1897 * struct virtchnl2_get_ptp_caps - Get PTP capabilities
1898 * @ptp_caps: PTP capability bitmap. See enum virtchnl2_ptp_caps.
1899 * @pad: Padding
1900 * @legacy_cross_time_reg: Legacy cross time register
1901 * @ptm_cross_time_reg: PTM cross time register
1902 * @device_clock_control: Device clock control
1903 * @tx_tstamp: TX timestamp
1904 *
1905 * PV/VF sends this message to negotiate PTP capabilities. CP updates bitmap
1906 * with supported features and fulfills appropriate structures.
1907 *
1908 * Associated with VIRTCHNL2_OP_GET_PTP_CAPS.
1909 */
1910 struct virtchnl2_get_ptp_caps {
1911 __le32 ptp_caps;
1912 u8 pad[4];
1913
1914 struct virtchnl2_ptp_legacy_cross_time_reg legacy_cross_time_reg;
1915 struct virtchnl2_ptp_ptm_cross_time_reg ptm_cross_time_reg;
1916 struct virtchnl2_ptp_device_clock_control device_clock_control;
1917 struct virtchnl2_ptp_tx_tstamp tx_tstamp;
1918 };
1919 VIRTCHNL2_CHECK_STRUCT_VAR_LEN(88, virtchnl2_get_ptp_caps,
1920 tx_tstamp.ptp_tx_tstamp_entries);
1921
1922 /**
1923 * struct virtchnl2_ptp_tx_tstamp_latch - Structure that describes tx tstamp
1924 * values, index and validity.
1925 * @tstamp_h: Timestamp high
1926 * @tstamp_l: Timestamp low
1927 * @index: Index
1928 * @valid: Timestamp validity
1929 * @pad: Padding
1930 */
1931 struct virtchnl2_ptp_tx_tstamp_latch {
1932 __le32 tstamp_h;
1933 __le32 tstamp_l;
1934 u8 index;
1935 u8 valid;
1936 u8 pad[6];
1937 };
1938
1939 VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_tx_tstamp_latch);
1940
1941 /**
1942 * struct virtchnl2_ptp_tx_tstamp_latches - PTP TX timestamp latches
1943 * @num_latches: Number of latches
1944 * @latch_size: Latch size expressed in bits
1945 * @pad: Padding
1946 * @tstamp_latches: PTP TX timestamp latch
1947 *
1948 * PF/VF sends this message to receive a specified number of timestamps
1949 * entries.
1950 *
1951 * Associated with VIRTCHNL2_OP_GET_PTP_TX_TSTAMP_LATCHES.
1952 */
1953 struct virtchnl2_ptp_tx_tstamp_latches {
1954 __le16 num_latches;
1955 __le16 latch_size;
1956 u8 pad[4];
1957 struct virtchnl2_ptp_tx_tstamp_latch tstamp_latches[STRUCT_VAR_LEN];
1958 };
1959 VIRTCHNL2_CHECK_STRUCT_VAR_LEN(24, virtchnl2_ptp_tx_tstamp_latches,
1960 tstamp_latches);
1961
virtchnl2_op_str(__le32 v_opcode)1962 static inline const char *virtchnl2_op_str(__le32 v_opcode)
1963 {
1964 switch (v_opcode) {
1965 case VIRTCHNL2_OP_VERSION:
1966 return "VIRTCHNL2_OP_VERSION";
1967 case VIRTCHNL2_OP_GET_CAPS:
1968 return "VIRTCHNL2_OP_GET_CAPS";
1969 case VIRTCHNL2_OP_CREATE_VPORT:
1970 return "VIRTCHNL2_OP_CREATE_VPORT";
1971 case VIRTCHNL2_OP_DESTROY_VPORT:
1972 return "VIRTCHNL2_OP_DESTROY_VPORT";
1973 case VIRTCHNL2_OP_ENABLE_VPORT:
1974 return "VIRTCHNL2_OP_ENABLE_VPORT";
1975 case VIRTCHNL2_OP_DISABLE_VPORT:
1976 return "VIRTCHNL2_OP_DISABLE_VPORT";
1977 case VIRTCHNL2_OP_CONFIG_TX_QUEUES:
1978 return "VIRTCHNL2_OP_CONFIG_TX_QUEUES";
1979 case VIRTCHNL2_OP_CONFIG_RX_QUEUES:
1980 return "VIRTCHNL2_OP_CONFIG_RX_QUEUES";
1981 case VIRTCHNL2_OP_ENABLE_QUEUES:
1982 return "VIRTCHNL2_OP_ENABLE_QUEUES";
1983 case VIRTCHNL2_OP_DISABLE_QUEUES:
1984 return "VIRTCHNL2_OP_DISABLE_QUEUES";
1985 case VIRTCHNL2_OP_ADD_QUEUES:
1986 return "VIRTCHNL2_OP_ADD_QUEUES";
1987 case VIRTCHNL2_OP_DEL_QUEUES:
1988 return "VIRTCHNL2_OP_DEL_QUEUES";
1989 case VIRTCHNL2_OP_MAP_QUEUE_VECTOR:
1990 return "VIRTCHNL2_OP_MAP_QUEUE_VECTOR";
1991 case VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR:
1992 return "VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR";
1993 case VIRTCHNL2_OP_GET_RSS_KEY:
1994 return "VIRTCHNL2_OP_GET_RSS_KEY";
1995 case VIRTCHNL2_OP_SET_RSS_KEY:
1996 return "VIRTCHNL2_OP_SET_RSS_KEY";
1997 case VIRTCHNL2_OP_GET_RSS_LUT:
1998 return "VIRTCHNL2_OP_GET_RSS_LUT";
1999 case VIRTCHNL2_OP_SET_RSS_LUT:
2000 return "VIRTCHNL2_OP_SET_RSS_LUT";
2001 case VIRTCHNL2_OP_GET_RSS_HASH:
2002 return "VIRTCHNL2_OP_GET_RSS_HASH";
2003 case VIRTCHNL2_OP_SET_RSS_HASH:
2004 return "VIRTCHNL2_OP_SET_RSS_HASH";
2005 case VIRTCHNL2_OP_SET_SRIOV_VFS:
2006 return "VIRTCHNL2_OP_SET_SRIOV_VFS";
2007 case VIRTCHNL2_OP_ALLOC_VECTORS:
2008 return "VIRTCHNL2_OP_ALLOC_VECTORS";
2009 case VIRTCHNL2_OP_DEALLOC_VECTORS:
2010 return "VIRTCHNL2_OP_DEALLOC_VECTORS";
2011 case VIRTCHNL2_OP_GET_PTYPE_INFO:
2012 return "VIRTCHNL2_OP_GET_PTYPE_INFO";
2013 case VIRTCHNL2_OP_GET_STATS:
2014 return "VIRTCHNL2_OP_GET_STATS";
2015 case VIRTCHNL2_OP_EVENT:
2016 return "VIRTCHNL2_OP_EVENT";
2017 case VIRTCHNL2_OP_RESET_VF:
2018 return "VIRTCHNL2_OP_RESET_VF";
2019 case VIRTCHNL2_OP_NON_FLEX_CREATE_ADI:
2020 return "VIRTCHNL2_OP_NON_FLEX_CREATE_ADI";
2021 case VIRTCHNL2_OP_NON_FLEX_DESTROY_ADI:
2022 return "VIRTCHNL2_OP_NON_FLEX_DESTROY_ADI";
2023 case VIRTCHNL2_OP_ADD_QUEUE_GROUPS:
2024 return "VIRTCHNL2_OP_ADD_QUEUE_GROUPS";
2025 case VIRTCHNL2_OP_DEL_QUEUE_GROUPS:
2026 return "VIRTCHNL2_OP_DEL_QUEUE_GROUPS";
2027 case VIRTCHNL2_OP_GET_PORT_STATS:
2028 return "VIRTCHNL2_OP_GET_PORT_STATS";
2029 case VIRTCHNL2_OP_GET_PTP_CAPS:
2030 return "VIRTCHNL2_OP_GET_PTP_CAPS";
2031 case VIRTCHNL2_OP_GET_PTP_TX_TSTAMP_LATCHES:
2032 return "VIRTCHNL2_OP_GET_PTP_TX_TSTAMP_LATCHES";
2033 default:
2034 return "Unsupported (update virtchnl2.h)";
2035 }
2036 }
2037
2038 #endif /* _VIRTCHNL_2_H_ */
2039