xref: /dpdk/drivers/net/enic/enic_vf_representor.c (revision f665790a5dbad7b645ff46f31d65e977324e7bfc)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2008-2019 Cisco Systems, Inc.  All rights reserved.
3  */
4 
5 #include <stdint.h>
6 #include <stdio.h>
7 
8 #include <bus_pci_driver.h>
9 #include <rte_common.h>
10 #include <dev_driver.h>
11 #include <ethdev_driver.h>
12 #include <ethdev_pci.h>
13 #include <rte_flow_driver.h>
14 #include <rte_kvargs.h>
15 #include <rte_pci.h>
16 #include <rte_string_fns.h>
17 
18 #include "enic_compat.h"
19 #include "enic.h"
20 #include "vnic_dev.h"
21 #include "vnic_enet.h"
22 #include "vnic_intr.h"
23 #include "vnic_cq.h"
24 #include "vnic_wq.h"
25 #include "vnic_rq.h"
26 
27 static uint16_t enic_vf_recv_pkts(void *rx_queue,
28 				  struct rte_mbuf **rx_pkts,
29 				  uint16_t nb_pkts)
30 {
31 	return enic_recv_pkts(rx_queue, rx_pkts, nb_pkts);
32 }
33 
34 static uint16_t enic_vf_xmit_pkts(void *tx_queue,
35 				  struct rte_mbuf **tx_pkts,
36 				  uint16_t nb_pkts)
37 {
38 	return enic_xmit_pkts(tx_queue, tx_pkts, nb_pkts);
39 }
40 
41 static int enic_vf_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
42 	uint16_t queue_idx,
43 	uint16_t nb_desc,
44 	unsigned int socket_id,
45 	const struct rte_eth_txconf *tx_conf)
46 {
47 	struct enic_vf_representor *vf;
48 	struct vnic_wq *wq;
49 	struct enic *pf;
50 	int err;
51 
52 	ENICPMD_FUNC_TRACE();
53 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
54 		return -E_RTE_SECONDARY;
55 	/* Only one queue now */
56 	if (queue_idx != 0)
57 		return -EINVAL;
58 	vf = eth_dev->data->dev_private;
59 	pf = vf->pf;
60 	wq = &pf->wq[vf->pf_wq_idx];
61 	wq->offloads = tx_conf->offloads |
62 		eth_dev->data->dev_conf.txmode.offloads;
63 	eth_dev->data->tx_queues[0] = (void *)wq;
64 	/* Pass vf not pf because of cq index calculation. See enic_alloc_wq */
65 	err = enic_alloc_wq(&vf->enic, queue_idx, socket_id, nb_desc);
66 	if (err) {
67 		ENICPMD_LOG(ERR, "error in allocating wq");
68 		return err;
69 	}
70 	return 0;
71 }
72 
73 static void enic_vf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
74 {
75 	void *txq = dev->data->tx_queues[qid];
76 
77 	ENICPMD_FUNC_TRACE();
78 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
79 		return;
80 	enic_free_wq(txq);
81 }
82 
83 static int enic_vf_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
84 	uint16_t queue_idx,
85 	uint16_t nb_desc,
86 	unsigned int socket_id,
87 	const struct rte_eth_rxconf *rx_conf,
88 	struct rte_mempool *mp)
89 {
90 	struct enic_vf_representor *vf;
91 	struct enic *pf;
92 	int ret;
93 
94 	ENICPMD_FUNC_TRACE();
95 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
96 		return -E_RTE_SECONDARY;
97 	/* Only 1 queue now */
98 	if (queue_idx != 0)
99 		return -EINVAL;
100 	vf = eth_dev->data->dev_private;
101 	pf = vf->pf;
102 	eth_dev->data->rx_queues[queue_idx] =
103 		(void *)&pf->rq[vf->pf_rq_sop_idx];
104 	ret = enic_alloc_rq(&vf->enic, queue_idx, socket_id, mp, nb_desc,
105 			    rx_conf->rx_free_thresh);
106 	if (ret) {
107 		ENICPMD_LOG(ERR, "error in allocating rq");
108 		return ret;
109 	}
110 	return 0;
111 }
112 
113 static void enic_vf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
114 {
115 	void *rxq = dev->data->rx_queues[qid];
116 
117 	ENICPMD_FUNC_TRACE();
118 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
119 		return;
120 	enic_free_rq(rxq);
121 }
122 
123 static int enic_vf_dev_configure(struct rte_eth_dev *eth_dev __rte_unused)
124 {
125 	ENICPMD_FUNC_TRACE();
126 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
127 		return -E_RTE_SECONDARY;
128 	return 0;
129 }
130 
131 static int
132 setup_rep_vf_fwd(struct enic_vf_representor *vf)
133 {
134 	int ret;
135 
136 	ENICPMD_FUNC_TRACE();
137 	/* Representor -> VF rule
138 	 * Egress packets from this representor are on the representor's WQ.
139 	 * So, loop back that WQ to VF.
140 	 */
141 	ret = enic_fm_add_rep2vf_flow(vf);
142 	if (ret) {
143 		ENICPMD_LOG(ERR, "Cannot create representor->VF flow");
144 		return ret;
145 	}
146 	/* VF -> representor rule
147 	 * Packets from VF loop back to the representor, unless they match
148 	 * user-added flows.
149 	 */
150 	ret = enic_fm_add_vf2rep_flow(vf);
151 	if (ret) {
152 		ENICPMD_LOG(ERR, "Cannot create VF->representor flow");
153 		return ret;
154 	}
155 	return 0;
156 }
157 
158 static int enic_vf_dev_start(struct rte_eth_dev *eth_dev)
159 {
160 	struct enic_vf_representor *vf;
161 	struct vnic_rq *data_rq;
162 	int index, cq_idx;
163 	struct enic *pf;
164 	int ret;
165 
166 	ENICPMD_FUNC_TRACE();
167 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
168 		return -E_RTE_SECONDARY;
169 
170 	vf = eth_dev->data->dev_private;
171 	pf = vf->pf;
172 	/* Get representor flowman for flow API and representor path */
173 	ret = enic_fm_init(&vf->enic);
174 	if (ret)
175 		return ret;
176 	/* Set up implicit flow rules to forward between representor and VF */
177 	ret = setup_rep_vf_fwd(vf);
178 	if (ret) {
179 		ENICPMD_LOG(ERR, "Cannot set up representor-VF flows");
180 		return ret;
181 	}
182 	/* Remove all packet filters so no ingress packets go to VF.
183 	 * When PF enables switchdev, it will ensure packet filters
184 	 * are removed.  So, this is not technically needed.
185 	 */
186 	ENICPMD_LOG(DEBUG, "Clear packet filters");
187 	ret = vnic_dev_packet_filter(vf->enic.vdev, 0, 0, 0, 0, 0);
188 	if (ret) {
189 		ENICPMD_LOG(ERR, "Cannot clear packet filters");
190 		return ret;
191 	}
192 
193 	/* Start WQ: see enic_init_vnic_resources */
194 	index = vf->pf_wq_idx;
195 	cq_idx = vf->pf_wq_cq_idx;
196 	vnic_wq_init(&pf->wq[index], cq_idx, 1, 0);
197 	vnic_cq_init(&pf->cq[cq_idx],
198 		     0 /* flow_control_enable */,
199 		     1 /* color_enable */,
200 		     0 /* cq_head */,
201 		     0 /* cq_tail */,
202 		     1 /* cq_tail_color */,
203 		     0 /* interrupt_enable */,
204 		     0 /* cq_entry_enable */,
205 		     1 /* cq_message_enable */,
206 		     0 /* interrupt offset */,
207 		     (uint64_t)pf->wq[index].cqmsg_rz->iova);
208 	/* enic_start_wq */
209 	vnic_wq_enable(&pf->wq[index]);
210 	eth_dev->data->tx_queue_state[0] = RTE_ETH_QUEUE_STATE_STARTED;
211 
212 	/* Start RQ: see enic_init_vnic_resources */
213 	index = vf->pf_rq_sop_idx;
214 	cq_idx = enic_cq_rq(vf->pf, index);
215 	vnic_rq_init(&pf->rq[index], cq_idx, 1, 0);
216 	data_rq = &pf->rq[vf->pf_rq_data_idx];
217 	if (data_rq->in_use)
218 		vnic_rq_init(data_rq, cq_idx, 1, 0);
219 	vnic_cq_init(&pf->cq[cq_idx],
220 		     0 /* flow_control_enable */,
221 		     1 /* color_enable */,
222 		     0 /* cq_head */,
223 		     0 /* cq_tail */,
224 		     1 /* cq_tail_color */,
225 		     0,
226 		     1 /* cq_entry_enable */,
227 		     0 /* cq_message_enable */,
228 		     0,
229 		     0 /* cq_message_addr */);
230 	/* enic_enable */
231 	ret = enic_alloc_rx_queue_mbufs(pf, &pf->rq[index]);
232 	if (ret) {
233 		ENICPMD_LOG(ERR, "Failed to alloc sop RX queue mbufs");
234 		return ret;
235 	}
236 	ret = enic_alloc_rx_queue_mbufs(pf, data_rq);
237 	if (ret) {
238 		/* Release the allocated mbufs for the sop rq*/
239 		enic_rxmbuf_queue_release(pf, &pf->rq[index]);
240 		ENICPMD_LOG(ERR, "Failed to alloc data RX queue mbufs");
241 		return ret;
242 	}
243 	enic_start_rq(pf, vf->pf_rq_sop_idx);
244 	eth_dev->data->tx_queue_state[0] = RTE_ETH_QUEUE_STATE_STARTED;
245 	eth_dev->data->rx_queue_state[0] = RTE_ETH_QUEUE_STATE_STARTED;
246 	return 0;
247 }
248 
249 static int enic_vf_dev_stop(struct rte_eth_dev *eth_dev)
250 {
251 	struct enic_vf_representor *vf;
252 	struct vnic_rq *rq;
253 	struct enic *pf;
254 
255 	ENICPMD_FUNC_TRACE();
256 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
257 		return 0;
258 	/* Undo dev_start. Disable/clean WQ */
259 	vf = eth_dev->data->dev_private;
260 	pf = vf->pf;
261 	vnic_wq_disable(&pf->wq[vf->pf_wq_idx]);
262 	vnic_wq_clean(&pf->wq[vf->pf_wq_idx], enic_free_wq_buf);
263 	vnic_cq_clean(&pf->cq[vf->pf_wq_cq_idx]);
264 	/* Disable/clean RQ */
265 	rq = &pf->rq[vf->pf_rq_sop_idx];
266 	vnic_rq_disable(rq);
267 	vnic_rq_clean(rq, enic_free_rq_buf);
268 	rq = &pf->rq[vf->pf_rq_data_idx];
269 	if (rq->in_use) {
270 		vnic_rq_disable(rq);
271 		vnic_rq_clean(rq, enic_free_rq_buf);
272 	}
273 	vnic_cq_clean(&pf->cq[enic_cq_rq(vf->pf, vf->pf_rq_sop_idx)]);
274 	eth_dev->data->tx_queue_state[0] = RTE_ETH_QUEUE_STATE_STOPPED;
275 	eth_dev->data->rx_queue_state[0] = RTE_ETH_QUEUE_STATE_STOPPED;
276 	/* Clean up representor flowman */
277 	enic_fm_destroy(&vf->enic);
278 
279 	return 0;
280 }
281 
282 /*
283  * "close" is no-op for now and solely exists so that rte_eth_dev_close()
284  * can finish its own cleanup without errors.
285  */
286 static int enic_vf_dev_close(struct rte_eth_dev *eth_dev __rte_unused)
287 {
288 	ENICPMD_FUNC_TRACE();
289 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
290 		return 0;
291 	return 0;
292 }
293 
294 static int
295 adjust_flow_attr(const struct rte_flow_attr *attrs,
296 		 struct rte_flow_attr *vf_attrs,
297 		 struct rte_flow_error *error)
298 {
299 	if (!attrs) {
300 		return rte_flow_error_set(error, EINVAL,
301 				RTE_FLOW_ERROR_TYPE_ATTR,
302 				NULL, "no attribute specified");
303 	}
304 	/*
305 	 * Swap ingress and egress as the firmware view of direction
306 	 * is the opposite of the representor.
307 	 */
308 	*vf_attrs = *attrs;
309 	if (attrs->ingress && !attrs->egress) {
310 		vf_attrs->ingress = 0;
311 		vf_attrs->egress = 1;
312 		return 0;
313 	}
314 	return rte_flow_error_set(error, ENOTSUP,
315 			RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
316 			"representor only supports ingress");
317 }
318 
319 static int
320 enic_vf_flow_validate(struct rte_eth_dev *dev,
321 		      const struct rte_flow_attr *attrs,
322 		      const struct rte_flow_item pattern[],
323 		      const struct rte_flow_action actions[],
324 		      struct rte_flow_error *error)
325 {
326 	struct rte_flow_attr vf_attrs;
327 	int ret;
328 
329 	ret = adjust_flow_attr(attrs, &vf_attrs, error);
330 	if (ret)
331 		return ret;
332 	attrs = &vf_attrs;
333 	return enic_fm_flow_ops.validate(dev, attrs, pattern, actions, error);
334 }
335 
336 static struct rte_flow *
337 enic_vf_flow_create(struct rte_eth_dev *dev,
338 		    const struct rte_flow_attr *attrs,
339 		    const struct rte_flow_item pattern[],
340 		    const struct rte_flow_action actions[],
341 		    struct rte_flow_error *error)
342 {
343 	struct rte_flow_attr vf_attrs;
344 
345 	if (adjust_flow_attr(attrs, &vf_attrs, error))
346 		return NULL;
347 	attrs = &vf_attrs;
348 	return enic_fm_flow_ops.create(dev, attrs, pattern, actions, error);
349 }
350 
351 static int
352 enic_vf_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
353 		     struct rte_flow_error *error)
354 {
355 	return enic_fm_flow_ops.destroy(dev, flow, error);
356 }
357 
358 static int
359 enic_vf_flow_query(struct rte_eth_dev *dev,
360 		   struct rte_flow *flow,
361 		   const struct rte_flow_action *actions,
362 		   void *data,
363 		   struct rte_flow_error *error)
364 {
365 	return enic_fm_flow_ops.query(dev, flow, actions, data, error);
366 }
367 
368 static int
369 enic_vf_flow_flush(struct rte_eth_dev *dev,
370 		   struct rte_flow_error *error)
371 {
372 	return enic_fm_flow_ops.flush(dev, error);
373 }
374 
375 static const struct rte_flow_ops enic_vf_flow_ops = {
376 	.validate = enic_vf_flow_validate,
377 	.create = enic_vf_flow_create,
378 	.destroy = enic_vf_flow_destroy,
379 	.flush = enic_vf_flow_flush,
380 	.query = enic_vf_flow_query,
381 };
382 
383 static int
384 enic_vf_flow_ops_get(struct rte_eth_dev *eth_dev,
385 		     const struct rte_flow_ops **ops)
386 {
387 	struct enic_vf_representor *vf;
388 
389 	ENICPMD_FUNC_TRACE();
390 	vf = eth_dev->data->dev_private;
391 	if (vf->enic.flow_filter_mode != FILTER_FLOWMAN) {
392 		ENICPMD_LOG(WARNING,
393 				"VF representors require flowman support for rte_flow API");
394 		return -EINVAL;
395 	}
396 
397 	*ops = &enic_vf_flow_ops;
398 	return 0;
399 }
400 
401 static int enic_vf_link_update(struct rte_eth_dev *eth_dev,
402 	int wait_to_complete __rte_unused)
403 {
404 	struct enic_vf_representor *vf;
405 	struct rte_eth_link link;
406 	struct enic *pf;
407 
408 	ENICPMD_FUNC_TRACE();
409 	vf = eth_dev->data->dev_private;
410 	pf = vf->pf;
411 	/*
412 	 * Link status and speed are same as PF. Update PF status and then
413 	 * copy it to VF.
414 	 */
415 	enic_link_update(pf->rte_dev);
416 	rte_eth_linkstatus_get(pf->rte_dev, &link);
417 	rte_eth_linkstatus_set(eth_dev, &link);
418 	return 0;
419 }
420 
421 static int enic_vf_stats_get(struct rte_eth_dev *eth_dev,
422 	struct rte_eth_stats *stats)
423 {
424 	struct enic_vf_representor *vf;
425 	struct vnic_stats *vs;
426 	int err;
427 
428 	ENICPMD_FUNC_TRACE();
429 	vf = eth_dev->data->dev_private;
430 	/* Get VF stats via PF */
431 	err = vnic_dev_stats_dump(vf->enic.vdev, &vs);
432 	if (err) {
433 		ENICPMD_LOG(ERR, "error in getting stats");
434 		return err;
435 	}
436 	stats->ipackets = vs->rx.rx_frames_ok;
437 	stats->opackets = vs->tx.tx_frames_ok;
438 	stats->ibytes = vs->rx.rx_bytes_ok;
439 	stats->obytes = vs->tx.tx_bytes_ok;
440 	stats->ierrors = vs->rx.rx_errors + vs->rx.rx_drop;
441 	stats->oerrors = vs->tx.tx_errors;
442 	stats->imissed = vs->rx.rx_no_bufs;
443 	return 0;
444 }
445 
446 static int enic_vf_stats_reset(struct rte_eth_dev *eth_dev)
447 {
448 	struct enic_vf_representor *vf;
449 	int err;
450 
451 	ENICPMD_FUNC_TRACE();
452 	vf = eth_dev->data->dev_private;
453 	/* Ask PF to clear VF stats */
454 	err = vnic_dev_stats_clear(vf->enic.vdev);
455 	if (err)
456 		ENICPMD_LOG(ERR, "error in clearing stats");
457 	return err;
458 }
459 
460 static int enic_vf_dev_infos_get(struct rte_eth_dev *eth_dev,
461 	struct rte_eth_dev_info *device_info)
462 {
463 	struct enic_vf_representor *vf;
464 	struct enic *pf;
465 
466 	ENICPMD_FUNC_TRACE();
467 	vf = eth_dev->data->dev_private;
468 	pf = vf->pf;
469 	device_info->max_rx_queues = eth_dev->data->nb_rx_queues;
470 	device_info->max_tx_queues = eth_dev->data->nb_tx_queues;
471 	device_info->min_rx_bufsize = ENIC_MIN_MTU;
472 	/* Max packet size is same as PF */
473 	device_info->max_rx_pktlen = enic_mtu_to_max_rx_pktlen(pf->max_mtu);
474 	device_info->max_mac_addrs = ENIC_UNICAST_PERFECT_FILTERS;
475 	/* No offload capa, RSS, etc. until Tx/Rx handlers are added */
476 	device_info->rx_offload_capa = 0;
477 	device_info->tx_offload_capa = 0;
478 	device_info->switch_info.name =	pf->rte_dev->device->name;
479 	device_info->switch_info.domain_id = vf->switch_domain_id;
480 	device_info->switch_info.port_id = vf->vf_id;
481 	return 0;
482 }
483 
484 static void set_vf_packet_filter(struct enic_vf_representor *vf)
485 {
486 	/* switchdev: packet filters are ignored */
487 	if (vf->enic.switchdev_mode)
488 		return;
489 	/* Ask PF to apply filters on VF */
490 	vnic_dev_packet_filter(vf->enic.vdev, 1 /* unicast */, 1 /* mcast */,
491 		1 /* bcast */, vf->promisc, vf->allmulti);
492 }
493 
494 static int enic_vf_promiscuous_enable(struct rte_eth_dev *eth_dev)
495 {
496 	struct enic_vf_representor *vf;
497 
498 	ENICPMD_FUNC_TRACE();
499 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
500 		return -E_RTE_SECONDARY;
501 	vf = eth_dev->data->dev_private;
502 	vf->promisc = 1;
503 	set_vf_packet_filter(vf);
504 	return 0;
505 }
506 
507 static int enic_vf_promiscuous_disable(struct rte_eth_dev *eth_dev)
508 {
509 	struct enic_vf_representor *vf;
510 
511 	ENICPMD_FUNC_TRACE();
512 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
513 		return -E_RTE_SECONDARY;
514 	vf = eth_dev->data->dev_private;
515 	vf->promisc = 0;
516 	set_vf_packet_filter(vf);
517 	return 0;
518 }
519 
520 static int enic_vf_allmulticast_enable(struct rte_eth_dev *eth_dev)
521 {
522 	struct enic_vf_representor *vf;
523 
524 	ENICPMD_FUNC_TRACE();
525 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
526 		return -E_RTE_SECONDARY;
527 	vf = eth_dev->data->dev_private;
528 	vf->allmulti = 1;
529 	set_vf_packet_filter(vf);
530 	return 0;
531 }
532 
533 static int enic_vf_allmulticast_disable(struct rte_eth_dev *eth_dev)
534 {
535 	struct enic_vf_representor *vf;
536 
537 	ENICPMD_FUNC_TRACE();
538 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
539 		return -E_RTE_SECONDARY;
540 	vf = eth_dev->data->dev_private;
541 	vf->allmulti = 0;
542 	set_vf_packet_filter(vf);
543 	return 0;
544 }
545 
546 /*
547  * A minimal set of handlers.
548  * The representor can get/set a small set of VF settings via "proxy" devcmd.
549  * With proxy devcmd, the PF driver basically tells the VIC firmware to
550  * "perform this devcmd on that VF".
551  */
552 static const struct eth_dev_ops enic_vf_representor_dev_ops = {
553 	.allmulticast_enable  = enic_vf_allmulticast_enable,
554 	.allmulticast_disable = enic_vf_allmulticast_disable,
555 	.dev_configure        = enic_vf_dev_configure,
556 	.dev_infos_get        = enic_vf_dev_infos_get,
557 	.dev_start            = enic_vf_dev_start,
558 	.dev_stop             = enic_vf_dev_stop,
559 	.dev_close            = enic_vf_dev_close,
560 	.flow_ops_get         = enic_vf_flow_ops_get,
561 	.link_update          = enic_vf_link_update,
562 	.promiscuous_enable   = enic_vf_promiscuous_enable,
563 	.promiscuous_disable  = enic_vf_promiscuous_disable,
564 	.stats_get            = enic_vf_stats_get,
565 	.stats_reset          = enic_vf_stats_reset,
566 	.rx_queue_setup	      = enic_vf_dev_rx_queue_setup,
567 	.rx_queue_release     = enic_vf_dev_rx_queue_release,
568 	.tx_queue_setup	      = enic_vf_dev_tx_queue_setup,
569 	.tx_queue_release     = enic_vf_dev_tx_queue_release,
570 };
571 
572 static int get_vf_config(struct enic_vf_representor *vf)
573 {
574 	struct vnic_enet_config *c;
575 	struct enic *pf;
576 	int switch_mtu;
577 	int err;
578 
579 	c = &vf->config;
580 	pf = vf->pf;
581 	/* VF MAC */
582 	err = vnic_dev_get_mac_addr(vf->enic.vdev, vf->mac_addr.addr_bytes);
583 	if (err) {
584 		ENICPMD_LOG(ERR, "error in getting MAC address");
585 		return err;
586 	}
587 	rte_ether_addr_copy(&vf->mac_addr, vf->eth_dev->data->mac_addrs);
588 
589 	/* VF MTU per its vNIC setting */
590 	err = vnic_dev_spec(vf->enic.vdev,
591 			    offsetof(struct vnic_enet_config, mtu),
592 			    sizeof(c->mtu), &c->mtu);
593 	if (err) {
594 		ENICPMD_LOG(ERR, "error in getting MTU");
595 		return err;
596 	}
597 	/*
598 	 * Blade switch (fabric interconnect) port's MTU. Assume the kernel
599 	 * enic driver runs on VF. That driver automatically adjusts its MTU
600 	 * according to the switch MTU.
601 	 */
602 	switch_mtu = vnic_dev_mtu(pf->vdev);
603 	vf->eth_dev->data->mtu = c->mtu;
604 	if (switch_mtu > c->mtu)
605 		vf->eth_dev->data->mtu = RTE_MIN(ENIC_MAX_MTU, switch_mtu);
606 	return 0;
607 }
608 
609 int enic_vf_representor_init(struct rte_eth_dev *eth_dev, void *init_params)
610 {
611 	struct enic_vf_representor *vf, *params;
612 	struct rte_pci_device *pdev;
613 	struct enic *pf, *vf_enic;
614 	struct rte_pci_addr *addr;
615 	int ret;
616 
617 	ENICPMD_FUNC_TRACE();
618 	params = init_params;
619 	vf = eth_dev->data->dev_private;
620 	vf->switch_domain_id = params->switch_domain_id;
621 	vf->vf_id = params->vf_id;
622 	vf->eth_dev = eth_dev;
623 	vf->pf = params->pf;
624 	vf->allmulti = 1;
625 	vf->promisc = 0;
626 	pf = vf->pf;
627 	vf->enic.switchdev_mode = pf->switchdev_mode;
628 	/* Only switchdev is supported now */
629 	RTE_ASSERT(vf->enic.switchdev_mode);
630 	/* Allocate WQ, RQ, CQ for the representor */
631 	vf->pf_wq_idx = vf_wq_idx(vf);
632 	vf->pf_wq_cq_idx = vf_wq_cq_idx(vf);
633 	vf->pf_rq_sop_idx = vf_rq_sop_idx(vf);
634 	vf->pf_rq_data_idx = vf_rq_data_idx(vf);
635 	/* Remove these assertions once queue allocation has an easy-to-use
636 	 * allocator API instead of index number calculations used throughout
637 	 * the driver..
638 	 */
639 	RTE_ASSERT(enic_cq_rq(pf, vf->pf_rq_sop_idx) == vf->pf_rq_sop_idx);
640 	RTE_ASSERT(enic_rte_rq_idx_to_sop_idx(vf->pf_rq_sop_idx) ==
641 		   vf->pf_rq_sop_idx);
642 	/* RX handlers use enic_cq_rq(sop) to get CQ, so do not save it */
643 	pf->vf_required_wq++;
644 	pf->vf_required_rq += 2; /* sop and data */
645 	pf->vf_required_cq += 2; /* 1 for rq sop and 1 for wq */
646 	ENICPMD_LOG(DEBUG, "vf_id %u wq %u rq_sop %u rq_data %u wq_cq %u rq_cq %u",
647 		vf->vf_id, vf->pf_wq_idx, vf->pf_rq_sop_idx, vf->pf_rq_data_idx,
648 		vf->pf_wq_cq_idx, enic_cq_rq(pf, vf->pf_rq_sop_idx));
649 	if (enic_cq_rq(pf, vf->pf_rq_sop_idx) >= pf->conf_cq_count) {
650 		ENICPMD_LOG(ERR, "Insufficient CQs. Please ensure number of CQs (%u)"
651 			    " >= number of RQs (%u) in CIMC or UCSM",
652 			    pf->conf_cq_count, pf->conf_rq_count);
653 		return -EINVAL;
654 	}
655 
656 	/* Check for non-existent VFs */
657 	pdev = RTE_ETH_DEV_TO_PCI(pf->rte_dev);
658 	if (vf->vf_id >= pdev->max_vfs) {
659 		ENICPMD_LOG(ERR, "VF ID is invalid. vf_id %u max_vfs %u",
660 			    vf->vf_id, pdev->max_vfs);
661 		return -ENODEV;
662 	}
663 
664 	eth_dev->device->driver = pf->rte_dev->device->driver;
665 	eth_dev->dev_ops = &enic_vf_representor_dev_ops;
666 	eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
667 	eth_dev->data->representor_id = vf->vf_id;
668 	eth_dev->data->backer_port_id = pf->port_id;
669 	eth_dev->data->mac_addrs = rte_zmalloc("enic_mac_addr_vf",
670 		sizeof(struct rte_ether_addr) *
671 		ENIC_UNICAST_PERFECT_FILTERS, 0);
672 	if (eth_dev->data->mac_addrs == NULL)
673 		return -ENOMEM;
674 	/* Use 1 RX queue and 1 TX queue for representor path */
675 	eth_dev->data->nb_rx_queues = 1;
676 	eth_dev->data->nb_tx_queues = 1;
677 	eth_dev->rx_pkt_burst = &enic_vf_recv_pkts;
678 	eth_dev->tx_pkt_burst = &enic_vf_xmit_pkts;
679 	/* Initial link state copied from PF */
680 	eth_dev->data->dev_link = pf->rte_dev->data->dev_link;
681 	/* Representor vdev to perform devcmd */
682 	vf->enic.vdev = vnic_vf_rep_register(&vf->enic, pf->vdev, vf->vf_id);
683 	if (vf->enic.vdev == NULL)
684 		return -ENOMEM;
685 	ret = vnic_dev_alloc_stats_mem(vf->enic.vdev);
686 	if (ret)
687 		return ret;
688 	/* Get/copy VF vNIC MAC, MTU, etc. into eth_dev */
689 	ret = get_vf_config(vf);
690 	if (ret)
691 		return ret;
692 
693 	/*
694 	 * Calculate VF BDF. The firmware ensures that PF BDF is always
695 	 * bus:dev.0, and VF BDFs are dev.1, dev.2, and so on.
696 	 */
697 	vf->bdf = pdev->addr;
698 	vf->bdf.function += vf->vf_id + 1;
699 
700 	/* Copy a few fields used by enic_fm_flow */
701 	vf_enic = &vf->enic;
702 	vf_enic->switch_domain_id = vf->switch_domain_id;
703 	vf_enic->flow_filter_mode = pf->flow_filter_mode;
704 	vf_enic->rte_dev = eth_dev;
705 	vf_enic->dev_data = eth_dev->data;
706 	LIST_INIT(&vf_enic->flows);
707 	LIST_INIT(&vf_enic->memzone_list);
708 	rte_spinlock_init(&vf_enic->memzone_list_lock);
709 	addr = &vf->bdf;
710 	snprintf(vf_enic->bdf_name, PCI_PRI_STR_SIZE, PCI_PRI_FMT,
711 		 addr->domain, addr->bus, addr->devid, addr->function);
712 	return 0;
713 }
714 
715 int enic_vf_representor_uninit(struct rte_eth_dev *eth_dev)
716 {
717 	struct enic_vf_representor *vf;
718 
719 	ENICPMD_FUNC_TRACE();
720 	vf = eth_dev->data->dev_private;
721 	vnic_dev_unregister(vf->enic.vdev);
722 	return 0;
723 }
724