xref: /dpdk/drivers/net/enic/enic_vf_representor.c (revision 68a03efeed657e6e05f281479b33b51102797e15)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2008-2019 Cisco Systems, Inc.  All rights reserved.
3  */
4 
5 #include <stdint.h>
6 #include <stdio.h>
7 
8 #include <rte_bus_pci.h>
9 #include <rte_common.h>
10 #include <rte_dev.h>
11 #include <ethdev_driver.h>
12 #include <ethdev_pci.h>
13 #include <rte_flow_driver.h>
14 #include <rte_kvargs.h>
15 #include <rte_pci.h>
16 #include <rte_string_fns.h>
17 
18 #include "enic_compat.h"
19 #include "enic.h"
20 #include "vnic_dev.h"
21 #include "vnic_enet.h"
22 #include "vnic_intr.h"
23 #include "vnic_cq.h"
24 #include "vnic_wq.h"
25 #include "vnic_rq.h"
26 
27 static uint16_t enic_vf_recv_pkts(void *rx_queue,
28 				  struct rte_mbuf **rx_pkts,
29 				  uint16_t nb_pkts)
30 {
31 	return enic_recv_pkts(rx_queue, rx_pkts, nb_pkts);
32 }
33 
34 static uint16_t enic_vf_xmit_pkts(void *tx_queue,
35 				  struct rte_mbuf **tx_pkts,
36 				  uint16_t nb_pkts)
37 {
38 	return enic_xmit_pkts(tx_queue, tx_pkts, nb_pkts);
39 }
40 
41 static int enic_vf_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
42 	uint16_t queue_idx,
43 	uint16_t nb_desc,
44 	unsigned int socket_id,
45 	const struct rte_eth_txconf *tx_conf)
46 {
47 	struct enic_vf_representor *vf;
48 	struct vnic_wq *wq;
49 	struct enic *pf;
50 	int err;
51 
52 	ENICPMD_FUNC_TRACE();
53 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
54 		return -E_RTE_SECONDARY;
55 	/* Only one queue now */
56 	if (queue_idx != 0)
57 		return -EINVAL;
58 	vf = eth_dev->data->dev_private;
59 	pf = vf->pf;
60 	wq = &pf->wq[vf->pf_wq_idx];
61 	wq->offloads = tx_conf->offloads |
62 		eth_dev->data->dev_conf.txmode.offloads;
63 	eth_dev->data->tx_queues[0] = (void *)wq;
64 	/* Pass vf not pf because of cq index calculation. See enic_alloc_wq */
65 	err = enic_alloc_wq(&vf->enic, queue_idx, socket_id, nb_desc);
66 	if (err) {
67 		ENICPMD_LOG(ERR, "error in allocating wq\n");
68 		return err;
69 	}
70 	return 0;
71 }
72 
73 static void enic_vf_dev_tx_queue_release(void *txq)
74 {
75 	ENICPMD_FUNC_TRACE();
76 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
77 		return;
78 	enic_free_wq(txq);
79 }
80 
81 static int enic_vf_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
82 	uint16_t queue_idx,
83 	uint16_t nb_desc,
84 	unsigned int socket_id,
85 	const struct rte_eth_rxconf *rx_conf,
86 	struct rte_mempool *mp)
87 {
88 	struct enic_vf_representor *vf;
89 	struct enic *pf;
90 	int ret;
91 
92 	ENICPMD_FUNC_TRACE();
93 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
94 		return -E_RTE_SECONDARY;
95 	/* Only 1 queue now */
96 	if (queue_idx != 0)
97 		return -EINVAL;
98 	vf = eth_dev->data->dev_private;
99 	pf = vf->pf;
100 	eth_dev->data->rx_queues[queue_idx] =
101 		(void *)&pf->rq[vf->pf_rq_sop_idx];
102 	ret = enic_alloc_rq(&vf->enic, queue_idx, socket_id, mp, nb_desc,
103 			    rx_conf->rx_free_thresh);
104 	if (ret) {
105 		ENICPMD_LOG(ERR, "error in allocating rq\n");
106 		return ret;
107 	}
108 	return 0;
109 }
110 
111 static void enic_vf_dev_rx_queue_release(void *rxq)
112 {
113 	ENICPMD_FUNC_TRACE();
114 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
115 		return;
116 	enic_free_rq(rxq);
117 }
118 
119 static int enic_vf_dev_configure(struct rte_eth_dev *eth_dev __rte_unused)
120 {
121 	ENICPMD_FUNC_TRACE();
122 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
123 		return -E_RTE_SECONDARY;
124 	return 0;
125 }
126 
127 static int
128 setup_rep_vf_fwd(struct enic_vf_representor *vf)
129 {
130 	int ret;
131 
132 	ENICPMD_FUNC_TRACE();
133 	/* Representor -> VF rule
134 	 * Egress packets from this representor are on the representor's WQ.
135 	 * So, loop back that WQ to VF.
136 	 */
137 	ret = enic_fm_add_rep2vf_flow(vf);
138 	if (ret) {
139 		ENICPMD_LOG(ERR, "Cannot create representor->VF flow");
140 		return ret;
141 	}
142 	/* VF -> representor rule
143 	 * Packets from VF loop back to the representor, unless they match
144 	 * user-added flows.
145 	 */
146 	ret = enic_fm_add_vf2rep_flow(vf);
147 	if (ret) {
148 		ENICPMD_LOG(ERR, "Cannot create VF->representor flow");
149 		return ret;
150 	}
151 	return 0;
152 }
153 
154 static int enic_vf_dev_start(struct rte_eth_dev *eth_dev)
155 {
156 	struct enic_vf_representor *vf;
157 	struct vnic_rq *data_rq;
158 	int index, cq_idx;
159 	struct enic *pf;
160 	int ret;
161 
162 	ENICPMD_FUNC_TRACE();
163 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
164 		return -E_RTE_SECONDARY;
165 
166 	vf = eth_dev->data->dev_private;
167 	pf = vf->pf;
168 	/* Get representor flowman for flow API and representor path */
169 	ret = enic_fm_init(&vf->enic);
170 	if (ret)
171 		return ret;
172 	/* Set up implicit flow rules to forward between representor and VF */
173 	ret = setup_rep_vf_fwd(vf);
174 	if (ret) {
175 		ENICPMD_LOG(ERR, "Cannot set up representor-VF flows");
176 		return ret;
177 	}
178 	/* Remove all packet filters so no ingress packets go to VF.
179 	 * When PF enables switchdev, it will ensure packet filters
180 	 * are removed.  So, this is not technically needed.
181 	 */
182 	ENICPMD_LOG(DEBUG, "Clear packet filters");
183 	ret = vnic_dev_packet_filter(vf->enic.vdev, 0, 0, 0, 0, 0);
184 	if (ret) {
185 		ENICPMD_LOG(ERR, "Cannot clear packet filters");
186 		return ret;
187 	}
188 
189 	/* Start WQ: see enic_init_vnic_resources */
190 	index = vf->pf_wq_idx;
191 	cq_idx = vf->pf_wq_cq_idx;
192 	vnic_wq_init(&pf->wq[index], cq_idx, 1, 0);
193 	vnic_cq_init(&pf->cq[cq_idx],
194 		     0 /* flow_control_enable */,
195 		     1 /* color_enable */,
196 		     0 /* cq_head */,
197 		     0 /* cq_tail */,
198 		     1 /* cq_tail_color */,
199 		     0 /* interrupt_enable */,
200 		     0 /* cq_entry_enable */,
201 		     1 /* cq_message_enable */,
202 		     0 /* interrupt offset */,
203 		     (uint64_t)pf->wq[index].cqmsg_rz->iova);
204 	/* enic_start_wq */
205 	vnic_wq_enable(&pf->wq[index]);
206 	eth_dev->data->tx_queue_state[0] = RTE_ETH_QUEUE_STATE_STARTED;
207 
208 	/* Start RQ: see enic_init_vnic_resources */
209 	index = vf->pf_rq_sop_idx;
210 	cq_idx = enic_cq_rq(vf->pf, index);
211 	vnic_rq_init(&pf->rq[index], cq_idx, 1, 0);
212 	data_rq = &pf->rq[vf->pf_rq_data_idx];
213 	if (data_rq->in_use)
214 		vnic_rq_init(data_rq, cq_idx, 1, 0);
215 	vnic_cq_init(&pf->cq[cq_idx],
216 		     0 /* flow_control_enable */,
217 		     1 /* color_enable */,
218 		     0 /* cq_head */,
219 		     0 /* cq_tail */,
220 		     1 /* cq_tail_color */,
221 		     0,
222 		     1 /* cq_entry_enable */,
223 		     0 /* cq_message_enable */,
224 		     0,
225 		     0 /* cq_message_addr */);
226 	/* enic_enable */
227 	ret = enic_alloc_rx_queue_mbufs(pf, &pf->rq[index]);
228 	if (ret) {
229 		ENICPMD_LOG(ERR, "Failed to alloc sop RX queue mbufs\n");
230 		return ret;
231 	}
232 	ret = enic_alloc_rx_queue_mbufs(pf, data_rq);
233 	if (ret) {
234 		/* Release the allocated mbufs for the sop rq*/
235 		enic_rxmbuf_queue_release(pf, &pf->rq[index]);
236 		ENICPMD_LOG(ERR, "Failed to alloc data RX queue mbufs\n");
237 		return ret;
238 	}
239 	enic_start_rq(pf, vf->pf_rq_sop_idx);
240 	eth_dev->data->tx_queue_state[0] = RTE_ETH_QUEUE_STATE_STARTED;
241 	eth_dev->data->rx_queue_state[0] = RTE_ETH_QUEUE_STATE_STARTED;
242 	return 0;
243 }
244 
245 static int enic_vf_dev_stop(struct rte_eth_dev *eth_dev)
246 {
247 	struct enic_vf_representor *vf;
248 	struct vnic_rq *rq;
249 	struct enic *pf;
250 
251 	ENICPMD_FUNC_TRACE();
252 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
253 		return 0;
254 	/* Undo dev_start. Disable/clean WQ */
255 	vf = eth_dev->data->dev_private;
256 	pf = vf->pf;
257 	vnic_wq_disable(&pf->wq[vf->pf_wq_idx]);
258 	vnic_wq_clean(&pf->wq[vf->pf_wq_idx], enic_free_wq_buf);
259 	vnic_cq_clean(&pf->cq[vf->pf_wq_cq_idx]);
260 	/* Disable/clean RQ */
261 	rq = &pf->rq[vf->pf_rq_sop_idx];
262 	vnic_rq_disable(rq);
263 	vnic_rq_clean(rq, enic_free_rq_buf);
264 	rq = &pf->rq[vf->pf_rq_data_idx];
265 	if (rq->in_use) {
266 		vnic_rq_disable(rq);
267 		vnic_rq_clean(rq, enic_free_rq_buf);
268 	}
269 	vnic_cq_clean(&pf->cq[enic_cq_rq(vf->pf, vf->pf_rq_sop_idx)]);
270 	eth_dev->data->tx_queue_state[0] = RTE_ETH_QUEUE_STATE_STOPPED;
271 	eth_dev->data->rx_queue_state[0] = RTE_ETH_QUEUE_STATE_STOPPED;
272 	/* Clean up representor flowman */
273 	enic_fm_destroy(&vf->enic);
274 
275 	return 0;
276 }
277 
278 /*
279  * "close" is no-op for now and solely exists so that rte_eth_dev_close()
280  * can finish its own cleanup without errors.
281  */
282 static int enic_vf_dev_close(struct rte_eth_dev *eth_dev __rte_unused)
283 {
284 	ENICPMD_FUNC_TRACE();
285 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
286 		return 0;
287 	return 0;
288 }
289 
290 static int
291 adjust_flow_attr(const struct rte_flow_attr *attrs,
292 		 struct rte_flow_attr *vf_attrs,
293 		 struct rte_flow_error *error)
294 {
295 	if (!attrs) {
296 		return rte_flow_error_set(error, EINVAL,
297 				RTE_FLOW_ERROR_TYPE_ATTR,
298 				NULL, "no attribute specified");
299 	}
300 	/*
301 	 * Swap ingress and egress as the firmware view of direction
302 	 * is the opposite of the representor.
303 	 */
304 	*vf_attrs = *attrs;
305 	if (attrs->ingress && !attrs->egress) {
306 		vf_attrs->ingress = 0;
307 		vf_attrs->egress = 1;
308 		return 0;
309 	}
310 	return rte_flow_error_set(error, ENOTSUP,
311 			RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
312 			"representor only supports ingress");
313 }
314 
315 static int
316 enic_vf_flow_validate(struct rte_eth_dev *dev,
317 		      const struct rte_flow_attr *attrs,
318 		      const struct rte_flow_item pattern[],
319 		      const struct rte_flow_action actions[],
320 		      struct rte_flow_error *error)
321 {
322 	struct rte_flow_attr vf_attrs;
323 	int ret;
324 
325 	ret = adjust_flow_attr(attrs, &vf_attrs, error);
326 	if (ret)
327 		return ret;
328 	attrs = &vf_attrs;
329 	return enic_fm_flow_ops.validate(dev, attrs, pattern, actions, error);
330 }
331 
332 static struct rte_flow *
333 enic_vf_flow_create(struct rte_eth_dev *dev,
334 		    const struct rte_flow_attr *attrs,
335 		    const struct rte_flow_item pattern[],
336 		    const struct rte_flow_action actions[],
337 		    struct rte_flow_error *error)
338 {
339 	struct rte_flow_attr vf_attrs;
340 
341 	if (adjust_flow_attr(attrs, &vf_attrs, error))
342 		return NULL;
343 	attrs = &vf_attrs;
344 	return enic_fm_flow_ops.create(dev, attrs, pattern, actions, error);
345 }
346 
347 static int
348 enic_vf_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
349 		     struct rte_flow_error *error)
350 {
351 	return enic_fm_flow_ops.destroy(dev, flow, error);
352 }
353 
354 static int
355 enic_vf_flow_query(struct rte_eth_dev *dev,
356 		   struct rte_flow *flow,
357 		   const struct rte_flow_action *actions,
358 		   void *data,
359 		   struct rte_flow_error *error)
360 {
361 	return enic_fm_flow_ops.query(dev, flow, actions, data, error);
362 }
363 
364 static int
365 enic_vf_flow_flush(struct rte_eth_dev *dev,
366 		   struct rte_flow_error *error)
367 {
368 	return enic_fm_flow_ops.flush(dev, error);
369 }
370 
371 static const struct rte_flow_ops enic_vf_flow_ops = {
372 	.validate = enic_vf_flow_validate,
373 	.create = enic_vf_flow_create,
374 	.destroy = enic_vf_flow_destroy,
375 	.flush = enic_vf_flow_flush,
376 	.query = enic_vf_flow_query,
377 };
378 
379 static int
380 enic_vf_flow_ops_get(struct rte_eth_dev *eth_dev,
381 		     const struct rte_flow_ops **ops)
382 {
383 	struct enic_vf_representor *vf;
384 
385 	ENICPMD_FUNC_TRACE();
386 	vf = eth_dev->data->dev_private;
387 	if (vf->enic.flow_filter_mode != FILTER_FLOWMAN) {
388 		ENICPMD_LOG(WARNING,
389 				"VF representors require flowman support for rte_flow API");
390 		return -EINVAL;
391 	}
392 
393 	*ops = &enic_vf_flow_ops;
394 	return 0;
395 }
396 
397 static int enic_vf_link_update(struct rte_eth_dev *eth_dev,
398 	int wait_to_complete __rte_unused)
399 {
400 	struct enic_vf_representor *vf;
401 	struct rte_eth_link link;
402 	struct enic *pf;
403 
404 	ENICPMD_FUNC_TRACE();
405 	vf = eth_dev->data->dev_private;
406 	pf = vf->pf;
407 	/*
408 	 * Link status and speed are same as PF. Update PF status and then
409 	 * copy it to VF.
410 	 */
411 	enic_link_update(pf->rte_dev);
412 	rte_eth_linkstatus_get(pf->rte_dev, &link);
413 	rte_eth_linkstatus_set(eth_dev, &link);
414 	return 0;
415 }
416 
417 static int enic_vf_stats_get(struct rte_eth_dev *eth_dev,
418 	struct rte_eth_stats *stats)
419 {
420 	struct enic_vf_representor *vf;
421 	struct vnic_stats *vs;
422 	int err;
423 
424 	ENICPMD_FUNC_TRACE();
425 	vf = eth_dev->data->dev_private;
426 	/* Get VF stats via PF */
427 	err = vnic_dev_stats_dump(vf->enic.vdev, &vs);
428 	if (err) {
429 		ENICPMD_LOG(ERR, "error in getting stats\n");
430 		return err;
431 	}
432 	stats->ipackets = vs->rx.rx_frames_ok;
433 	stats->opackets = vs->tx.tx_frames_ok;
434 	stats->ibytes = vs->rx.rx_bytes_ok;
435 	stats->obytes = vs->tx.tx_bytes_ok;
436 	stats->ierrors = vs->rx.rx_errors + vs->rx.rx_drop;
437 	stats->oerrors = vs->tx.tx_errors;
438 	stats->imissed = vs->rx.rx_no_bufs;
439 	return 0;
440 }
441 
442 static int enic_vf_stats_reset(struct rte_eth_dev *eth_dev)
443 {
444 	struct enic_vf_representor *vf;
445 	int err;
446 
447 	ENICPMD_FUNC_TRACE();
448 	vf = eth_dev->data->dev_private;
449 	/* Ask PF to clear VF stats */
450 	err = vnic_dev_stats_clear(vf->enic.vdev);
451 	if (err)
452 		ENICPMD_LOG(ERR, "error in clearing stats\n");
453 	return err;
454 }
455 
456 static int enic_vf_dev_infos_get(struct rte_eth_dev *eth_dev,
457 	struct rte_eth_dev_info *device_info)
458 {
459 	struct enic_vf_representor *vf;
460 	struct enic *pf;
461 
462 	ENICPMD_FUNC_TRACE();
463 	vf = eth_dev->data->dev_private;
464 	pf = vf->pf;
465 	device_info->max_rx_queues = eth_dev->data->nb_rx_queues;
466 	device_info->max_tx_queues = eth_dev->data->nb_tx_queues;
467 	device_info->min_rx_bufsize = ENIC_MIN_MTU;
468 	/* Max packet size is same as PF */
469 	device_info->max_rx_pktlen = enic_mtu_to_max_rx_pktlen(pf->max_mtu);
470 	device_info->max_mac_addrs = ENIC_UNICAST_PERFECT_FILTERS;
471 	/* No offload capa, RSS, etc. until Tx/Rx handlers are added */
472 	device_info->rx_offload_capa = 0;
473 	device_info->tx_offload_capa = 0;
474 	device_info->switch_info.name =	pf->rte_dev->device->name;
475 	device_info->switch_info.domain_id = vf->switch_domain_id;
476 	device_info->switch_info.port_id = vf->vf_id;
477 	return 0;
478 }
479 
480 static void set_vf_packet_filter(struct enic_vf_representor *vf)
481 {
482 	/* switchdev: packet filters are ignored */
483 	if (vf->enic.switchdev_mode)
484 		return;
485 	/* Ask PF to apply filters on VF */
486 	vnic_dev_packet_filter(vf->enic.vdev, 1 /* unicast */, 1 /* mcast */,
487 		1 /* bcast */, vf->promisc, vf->allmulti);
488 }
489 
490 static int enic_vf_promiscuous_enable(struct rte_eth_dev *eth_dev)
491 {
492 	struct enic_vf_representor *vf;
493 
494 	ENICPMD_FUNC_TRACE();
495 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
496 		return -E_RTE_SECONDARY;
497 	vf = eth_dev->data->dev_private;
498 	vf->promisc = 1;
499 	set_vf_packet_filter(vf);
500 	return 0;
501 }
502 
503 static int enic_vf_promiscuous_disable(struct rte_eth_dev *eth_dev)
504 {
505 	struct enic_vf_representor *vf;
506 
507 	ENICPMD_FUNC_TRACE();
508 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
509 		return -E_RTE_SECONDARY;
510 	vf = eth_dev->data->dev_private;
511 	vf->promisc = 0;
512 	set_vf_packet_filter(vf);
513 	return 0;
514 }
515 
516 static int enic_vf_allmulticast_enable(struct rte_eth_dev *eth_dev)
517 {
518 	struct enic_vf_representor *vf;
519 
520 	ENICPMD_FUNC_TRACE();
521 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
522 		return -E_RTE_SECONDARY;
523 	vf = eth_dev->data->dev_private;
524 	vf->allmulti = 1;
525 	set_vf_packet_filter(vf);
526 	return 0;
527 }
528 
529 static int enic_vf_allmulticast_disable(struct rte_eth_dev *eth_dev)
530 {
531 	struct enic_vf_representor *vf;
532 
533 	ENICPMD_FUNC_TRACE();
534 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
535 		return -E_RTE_SECONDARY;
536 	vf = eth_dev->data->dev_private;
537 	vf->allmulti = 0;
538 	set_vf_packet_filter(vf);
539 	return 0;
540 }
541 
542 /*
543  * A minimal set of handlers.
544  * The representor can get/set a small set of VF settings via "proxy" devcmd.
545  * With proxy devcmd, the PF driver basically tells the VIC firmware to
546  * "perform this devcmd on that VF".
547  */
548 static const struct eth_dev_ops enic_vf_representor_dev_ops = {
549 	.allmulticast_enable  = enic_vf_allmulticast_enable,
550 	.allmulticast_disable = enic_vf_allmulticast_disable,
551 	.dev_configure        = enic_vf_dev_configure,
552 	.dev_infos_get        = enic_vf_dev_infos_get,
553 	.dev_start            = enic_vf_dev_start,
554 	.dev_stop             = enic_vf_dev_stop,
555 	.dev_close            = enic_vf_dev_close,
556 	.flow_ops_get         = enic_vf_flow_ops_get,
557 	.link_update          = enic_vf_link_update,
558 	.promiscuous_enable   = enic_vf_promiscuous_enable,
559 	.promiscuous_disable  = enic_vf_promiscuous_disable,
560 	.stats_get            = enic_vf_stats_get,
561 	.stats_reset          = enic_vf_stats_reset,
562 	.rx_queue_setup	      = enic_vf_dev_rx_queue_setup,
563 	.rx_queue_release     = enic_vf_dev_rx_queue_release,
564 	.tx_queue_setup	      = enic_vf_dev_tx_queue_setup,
565 	.tx_queue_release     = enic_vf_dev_tx_queue_release,
566 };
567 
568 static int get_vf_config(struct enic_vf_representor *vf)
569 {
570 	struct vnic_enet_config *c;
571 	struct enic *pf;
572 	int switch_mtu;
573 	int err;
574 
575 	c = &vf->config;
576 	pf = vf->pf;
577 	/* VF MAC */
578 	err = vnic_dev_get_mac_addr(vf->enic.vdev, vf->mac_addr.addr_bytes);
579 	if (err) {
580 		ENICPMD_LOG(ERR, "error in getting MAC address\n");
581 		return err;
582 	}
583 	rte_ether_addr_copy(&vf->mac_addr, vf->eth_dev->data->mac_addrs);
584 
585 	/* VF MTU per its vNIC setting */
586 	err = vnic_dev_spec(vf->enic.vdev,
587 			    offsetof(struct vnic_enet_config, mtu),
588 			    sizeof(c->mtu), &c->mtu);
589 	if (err) {
590 		ENICPMD_LOG(ERR, "error in getting MTU\n");
591 		return err;
592 	}
593 	/*
594 	 * Blade switch (fabric interconnect) port's MTU. Assume the kernel
595 	 * enic driver runs on VF. That driver automatically adjusts its MTU
596 	 * according to the switch MTU.
597 	 */
598 	switch_mtu = vnic_dev_mtu(pf->vdev);
599 	vf->eth_dev->data->mtu = c->mtu;
600 	if (switch_mtu > c->mtu)
601 		vf->eth_dev->data->mtu = RTE_MIN(ENIC_MAX_MTU, switch_mtu);
602 	return 0;
603 }
604 
605 int enic_vf_representor_init(struct rte_eth_dev *eth_dev, void *init_params)
606 {
607 	struct enic_vf_representor *vf, *params;
608 	struct rte_pci_device *pdev;
609 	struct enic *pf, *vf_enic;
610 	struct rte_pci_addr *addr;
611 	int ret;
612 
613 	ENICPMD_FUNC_TRACE();
614 	params = init_params;
615 	vf = eth_dev->data->dev_private;
616 	vf->switch_domain_id = params->switch_domain_id;
617 	vf->vf_id = params->vf_id;
618 	vf->eth_dev = eth_dev;
619 	vf->pf = params->pf;
620 	vf->allmulti = 1;
621 	vf->promisc = 0;
622 	pf = vf->pf;
623 	vf->enic.switchdev_mode = pf->switchdev_mode;
624 	/* Only switchdev is supported now */
625 	RTE_ASSERT(vf->enic.switchdev_mode);
626 	/* Allocate WQ, RQ, CQ for the representor */
627 	vf->pf_wq_idx = vf_wq_idx(vf);
628 	vf->pf_wq_cq_idx = vf_wq_cq_idx(vf);
629 	vf->pf_rq_sop_idx = vf_rq_sop_idx(vf);
630 	vf->pf_rq_data_idx = vf_rq_data_idx(vf);
631 	/* Remove these assertions once queue allocation has an easy-to-use
632 	 * allocator API instead of index number calculations used throughout
633 	 * the driver..
634 	 */
635 	RTE_ASSERT(enic_cq_rq(pf, vf->pf_rq_sop_idx) == vf->pf_rq_sop_idx);
636 	RTE_ASSERT(enic_rte_rq_idx_to_sop_idx(vf->pf_rq_sop_idx) ==
637 		   vf->pf_rq_sop_idx);
638 	/* RX handlers use enic_cq_rq(sop) to get CQ, so do not save it */
639 	pf->vf_required_wq++;
640 	pf->vf_required_rq += 2; /* sop and data */
641 	pf->vf_required_cq += 2; /* 1 for rq sop and 1 for wq */
642 	ENICPMD_LOG(DEBUG, "vf_id %u wq %u rq_sop %u rq_data %u wq_cq %u rq_cq %u",
643 		vf->vf_id, vf->pf_wq_idx, vf->pf_rq_sop_idx, vf->pf_rq_data_idx,
644 		vf->pf_wq_cq_idx, enic_cq_rq(pf, vf->pf_rq_sop_idx));
645 	if (enic_cq_rq(pf, vf->pf_rq_sop_idx) >= pf->conf_cq_count) {
646 		ENICPMD_LOG(ERR, "Insufficient CQs. Please ensure number of CQs (%u)"
647 			    " >= number of RQs (%u) in CIMC or UCSM",
648 			    pf->conf_cq_count, pf->conf_rq_count);
649 		return -EINVAL;
650 	}
651 
652 	/* Check for non-existent VFs */
653 	pdev = RTE_ETH_DEV_TO_PCI(pf->rte_dev);
654 	if (vf->vf_id >= pdev->max_vfs) {
655 		ENICPMD_LOG(ERR, "VF ID is invalid. vf_id %u max_vfs %u",
656 			    vf->vf_id, pdev->max_vfs);
657 		return -ENODEV;
658 	}
659 
660 	eth_dev->device->driver = pf->rte_dev->device->driver;
661 	eth_dev->dev_ops = &enic_vf_representor_dev_ops;
662 	eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR |
663 					RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
664 	eth_dev->data->representor_id = vf->vf_id;
665 	eth_dev->data->mac_addrs = rte_zmalloc("enic_mac_addr_vf",
666 		sizeof(struct rte_ether_addr) *
667 		ENIC_UNICAST_PERFECT_FILTERS, 0);
668 	if (eth_dev->data->mac_addrs == NULL)
669 		return -ENOMEM;
670 	/* Use 1 RX queue and 1 TX queue for representor path */
671 	eth_dev->data->nb_rx_queues = 1;
672 	eth_dev->data->nb_tx_queues = 1;
673 	eth_dev->rx_pkt_burst = &enic_vf_recv_pkts;
674 	eth_dev->tx_pkt_burst = &enic_vf_xmit_pkts;
675 	/* Initial link state copied from PF */
676 	eth_dev->data->dev_link = pf->rte_dev->data->dev_link;
677 	/* Representor vdev to perform devcmd */
678 	vf->enic.vdev = vnic_vf_rep_register(&vf->enic, pf->vdev, vf->vf_id);
679 	if (vf->enic.vdev == NULL)
680 		return -ENOMEM;
681 	ret = vnic_dev_alloc_stats_mem(vf->enic.vdev);
682 	if (ret)
683 		return ret;
684 	/* Get/copy VF vNIC MAC, MTU, etc. into eth_dev */
685 	ret = get_vf_config(vf);
686 	if (ret)
687 		return ret;
688 
689 	/*
690 	 * Calculate VF BDF. The firmware ensures that PF BDF is always
691 	 * bus:dev.0, and VF BDFs are dev.1, dev.2, and so on.
692 	 */
693 	vf->bdf = pdev->addr;
694 	vf->bdf.function += vf->vf_id + 1;
695 
696 	/* Copy a few fields used by enic_fm_flow */
697 	vf_enic = &vf->enic;
698 	vf_enic->switch_domain_id = vf->switch_domain_id;
699 	vf_enic->flow_filter_mode = pf->flow_filter_mode;
700 	vf_enic->rte_dev = eth_dev;
701 	vf_enic->dev_data = eth_dev->data;
702 	LIST_INIT(&vf_enic->flows);
703 	LIST_INIT(&vf_enic->memzone_list);
704 	rte_spinlock_init(&vf_enic->memzone_list_lock);
705 	addr = &vf->bdf;
706 	snprintf(vf_enic->bdf_name, ENICPMD_BDF_LENGTH, "%04x:%02x:%02x.%x",
707 		 addr->domain, addr->bus, addr->devid, addr->function);
708 	return 0;
709 }
710 
711 int enic_vf_representor_uninit(struct rte_eth_dev *eth_dev)
712 {
713 	struct enic_vf_representor *vf;
714 
715 	ENICPMD_FUNC_TRACE();
716 	vf = eth_dev->data->dev_private;
717 	vnic_dev_unregister(vf->enic.vdev);
718 	return 0;
719 }
720