xref: /dpdk/drivers/net/sfc/sfc_repr.c (revision de87641d5c004ef505d58d542a66d66f6a1cb2be)
1a62ec905SIgor Romanov /* SPDX-License-Identifier: BSD-3-Clause
2a62ec905SIgor Romanov  *
3a62ec905SIgor Romanov  * Copyright(c) 2019-2021 Xilinx, Inc.
4a62ec905SIgor Romanov  * Copyright(c) 2019 Solarflare Communications Inc.
5a62ec905SIgor Romanov  *
6a62ec905SIgor Romanov  * This software was jointly developed between OKTET Labs (under contract
7a62ec905SIgor Romanov  * for Solarflare) and Solarflare Communications, Inc.
8a62ec905SIgor Romanov  */
9a62ec905SIgor Romanov 
10a62ec905SIgor Romanov #include <stdint.h>
11a62ec905SIgor Romanov 
12d5a478f3SIvan Malov #include <rte_flow_driver.h>
13d5a478f3SIvan Malov #include <rte_flow.h>
1475f080fdSIgor Romanov #include <rte_mbuf.h>
15a62ec905SIgor Romanov #include <rte_ethdev.h>
16a62ec905SIgor Romanov #include <rte_malloc.h>
17a62ec905SIgor Romanov #include <ethdev_driver.h>
18a62ec905SIgor Romanov 
19a62ec905SIgor Romanov #include "efx.h"
20a62ec905SIgor Romanov 
21a62ec905SIgor Romanov #include "sfc_log.h"
22a62ec905SIgor Romanov #include "sfc_debug.h"
23a62ec905SIgor Romanov #include "sfc_repr.h"
24a62ec905SIgor Romanov #include "sfc_ethdev_state.h"
25c377f1adSIgor Romanov #include "sfc_repr_proxy_api.h"
26a62ec905SIgor Romanov #include "sfc_switch.h"
2775f080fdSIgor Romanov #include "sfc_dp_tx.h"
28a62ec905SIgor Romanov 
29a62ec905SIgor Romanov /** Multi-process shared representor private data */
30a62ec905SIgor Romanov struct sfc_repr_shared {
31a62ec905SIgor Romanov 	uint16_t		pf_port_id;
32a62ec905SIgor Romanov 	uint16_t		repr_id;
33a62ec905SIgor Romanov 	uint16_t		switch_domain_id;
34a62ec905SIgor Romanov 	uint16_t		switch_port_id;
35a62ec905SIgor Romanov };
36a62ec905SIgor Romanov 
376764c3bbSIgor Romanov struct sfc_repr_queue_stats {
386764c3bbSIgor Romanov 	union sfc_pkts_bytes		packets_bytes;
396764c3bbSIgor Romanov };
406764c3bbSIgor Romanov 
41155583abSIgor Romanov struct sfc_repr_rxq {
42155583abSIgor Romanov 	/* Datapath members */
43155583abSIgor Romanov 	struct rte_ring			*ring;
446764c3bbSIgor Romanov 	struct sfc_repr_queue_stats	stats;
45155583abSIgor Romanov };
46155583abSIgor Romanov 
47155583abSIgor Romanov struct sfc_repr_txq {
48155583abSIgor Romanov 	/* Datapath members */
49155583abSIgor Romanov 	struct rte_ring			*ring;
50155583abSIgor Romanov 	efx_mport_id_t			egress_mport;
516764c3bbSIgor Romanov 	struct sfc_repr_queue_stats	stats;
52155583abSIgor Romanov };
53155583abSIgor Romanov 
54a62ec905SIgor Romanov /** Primary process representor private data */
55a62ec905SIgor Romanov struct sfc_repr {
56a62ec905SIgor Romanov 	/**
57a62ec905SIgor Romanov 	 * PMD setup and configuration is not thread safe. Since it is not
58a62ec905SIgor Romanov 	 * performance sensitive, it is better to guarantee thread-safety
59a62ec905SIgor Romanov 	 * and add device level lock. Adapter control operations which
60a62ec905SIgor Romanov 	 * change its state should acquire the lock.
61a62ec905SIgor Romanov 	 */
62a62ec905SIgor Romanov 	rte_spinlock_t			lock;
63a62ec905SIgor Romanov 	enum sfc_ethdev_state		state;
64a62ec905SIgor Romanov };
65a62ec905SIgor Romanov 
66a62ec905SIgor Romanov #define sfcr_err(sr, ...) \
67a62ec905SIgor Romanov 	do {								\
68a62ec905SIgor Romanov 		const struct sfc_repr *_sr = (sr);			\
69a62ec905SIgor Romanov 									\
70a62ec905SIgor Romanov 		(void)_sr;						\
71a62ec905SIgor Romanov 		SFC_GENERIC_LOG(ERR, __VA_ARGS__);			\
72a62ec905SIgor Romanov 	} while (0)
73a62ec905SIgor Romanov 
74155583abSIgor Romanov #define sfcr_warn(sr, ...) \
75155583abSIgor Romanov 	do {								\
76155583abSIgor Romanov 		const struct sfc_repr *_sr = (sr);			\
77155583abSIgor Romanov 									\
78155583abSIgor Romanov 		(void)_sr;						\
79155583abSIgor Romanov 		SFC_GENERIC_LOG(WARNING, __VA_ARGS__);			\
80155583abSIgor Romanov 	} while (0)
81155583abSIgor Romanov 
82a62ec905SIgor Romanov #define sfcr_info(sr, ...) \
83a62ec905SIgor Romanov 	do {								\
84a62ec905SIgor Romanov 		const struct sfc_repr *_sr = (sr);			\
85a62ec905SIgor Romanov 									\
86a62ec905SIgor Romanov 		(void)_sr;						\
87a62ec905SIgor Romanov 		SFC_GENERIC_LOG(INFO,					\
88a62ec905SIgor Romanov 				RTE_FMT("%s() "				\
89a62ec905SIgor Romanov 				RTE_FMT_HEAD(__VA_ARGS__ ,),		\
90a62ec905SIgor Romanov 				__func__,				\
91a62ec905SIgor Romanov 				RTE_FMT_TAIL(__VA_ARGS__ ,)));		\
92a62ec905SIgor Romanov 	} while (0)
93a62ec905SIgor Romanov 
94a62ec905SIgor Romanov static inline struct sfc_repr_shared *
95a62ec905SIgor Romanov sfc_repr_shared_by_eth_dev(struct rte_eth_dev *eth_dev)
96a62ec905SIgor Romanov {
97a62ec905SIgor Romanov 	struct sfc_repr_shared *srs = eth_dev->data->dev_private;
98a62ec905SIgor Romanov 
99a62ec905SIgor Romanov 	return srs;
100a62ec905SIgor Romanov }
101a62ec905SIgor Romanov 
102a62ec905SIgor Romanov static inline struct sfc_repr *
103a62ec905SIgor Romanov sfc_repr_by_eth_dev(struct rte_eth_dev *eth_dev)
104a62ec905SIgor Romanov {
105a62ec905SIgor Romanov 	struct sfc_repr *sr = eth_dev->process_private;
106a62ec905SIgor Romanov 
107a62ec905SIgor Romanov 	return sr;
108a62ec905SIgor Romanov }
109a62ec905SIgor Romanov 
110a62ec905SIgor Romanov /*
111a62ec905SIgor Romanov  * Add wrapper functions to acquire/release lock to be able to remove or
112a62ec905SIgor Romanov  * change the lock in one place.
113a62ec905SIgor Romanov  */
114a62ec905SIgor Romanov 
1158e582cf5SDavid Marchand #define sfc_repr_lock_init(sr) rte_spinlock_init(&(sr)->lock)
1168e582cf5SDavid Marchand #define sfc_repr_lock_is_locked(sr) rte_spinlock_is_locked(&(sr)->lock)
1178e582cf5SDavid Marchand #define sfc_repr_lock(sr) rte_spinlock_lock(&(sr)->lock)
1188e582cf5SDavid Marchand #define sfc_repr_unlock(sr) rte_spinlock_unlock(&(sr)->lock)
1198e582cf5SDavid Marchand #define sfc_repr_lock_fini(sr) RTE_SET_USED(sr)
120a62ec905SIgor Romanov 
12175f080fdSIgor Romanov static void
12275f080fdSIgor Romanov sfc_repr_rx_queue_stop(void *queue)
12375f080fdSIgor Romanov {
12475f080fdSIgor Romanov 	struct sfc_repr_rxq *rxq = queue;
12575f080fdSIgor Romanov 
12675f080fdSIgor Romanov 	if (rxq == NULL)
12775f080fdSIgor Romanov 		return;
12875f080fdSIgor Romanov 
12975f080fdSIgor Romanov 	rte_ring_reset(rxq->ring);
13075f080fdSIgor Romanov }
13175f080fdSIgor Romanov 
13275f080fdSIgor Romanov static void
13375f080fdSIgor Romanov sfc_repr_tx_queue_stop(void *queue)
13475f080fdSIgor Romanov {
13575f080fdSIgor Romanov 	struct sfc_repr_txq *txq = queue;
13675f080fdSIgor Romanov 
13775f080fdSIgor Romanov 	if (txq == NULL)
13875f080fdSIgor Romanov 		return;
13975f080fdSIgor Romanov 
14075f080fdSIgor Romanov 	rte_ring_reset(txq->ring);
14175f080fdSIgor Romanov }
14275f080fdSIgor Romanov 
143788dedcfSIgor Romanov static uint16_t
144a3fbef19SIgor Romanov sfc_repr_rx_burst(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
145a3fbef19SIgor Romanov {
146a3fbef19SIgor Romanov 	struct sfc_repr_rxq *rxq = rx_queue;
147a3fbef19SIgor Romanov 	void **objs = (void *)&rx_pkts[0];
1486764c3bbSIgor Romanov 	unsigned int n_rx;
149a3fbef19SIgor Romanov 
150a3fbef19SIgor Romanov 	/* mbufs port is already filled correctly by representors proxy */
1516764c3bbSIgor Romanov 	n_rx = rte_ring_sc_dequeue_burst(rxq->ring, objs, nb_pkts, NULL);
1526764c3bbSIgor Romanov 
1536764c3bbSIgor Romanov 	if (n_rx > 0) {
1546764c3bbSIgor Romanov 		unsigned int n_bytes = 0;
1556764c3bbSIgor Romanov 		unsigned int i = 0;
1566764c3bbSIgor Romanov 
1576764c3bbSIgor Romanov 		do {
1586764c3bbSIgor Romanov 			n_bytes += rx_pkts[i]->pkt_len;
1596764c3bbSIgor Romanov 		} while (++i < n_rx);
1606764c3bbSIgor Romanov 
1616764c3bbSIgor Romanov 		sfc_pkts_bytes_add(&rxq->stats.packets_bytes, n_rx, n_bytes);
1626764c3bbSIgor Romanov 	}
1636764c3bbSIgor Romanov 
1646764c3bbSIgor Romanov 	return n_rx;
165a3fbef19SIgor Romanov }
166a3fbef19SIgor Romanov 
167a3fbef19SIgor Romanov static uint16_t
168788dedcfSIgor Romanov sfc_repr_tx_burst(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
169788dedcfSIgor Romanov {
170788dedcfSIgor Romanov 	struct sfc_repr_txq *txq = tx_queue;
1716764c3bbSIgor Romanov 	unsigned int n_bytes = 0;
172788dedcfSIgor Romanov 	unsigned int n_tx;
173788dedcfSIgor Romanov 	void **objs;
174788dedcfSIgor Romanov 	uint16_t i;
175788dedcfSIgor Romanov 
176788dedcfSIgor Romanov 	/*
177788dedcfSIgor Romanov 	 * mbuf is likely cache-hot. Set flag and egress m-port here instead of
178788dedcfSIgor Romanov 	 * doing that in representors proxy. Also, it should help to avoid
179788dedcfSIgor Romanov 	 * cache bounce. Moreover, potentially, it allows to use one
180788dedcfSIgor Romanov 	 * multi-producer single-consumer ring for all representors.
181788dedcfSIgor Romanov 	 *
182788dedcfSIgor Romanov 	 * The only potential problem is doing so many times if enqueue
183788dedcfSIgor Romanov 	 * fails and sender retries.
184788dedcfSIgor Romanov 	 */
185788dedcfSIgor Romanov 	for (i = 0; i < nb_pkts; ++i) {
186788dedcfSIgor Romanov 		struct rte_mbuf *m = tx_pkts[i];
187788dedcfSIgor Romanov 
188788dedcfSIgor Romanov 		m->ol_flags |= sfc_dp_mport_override;
189788dedcfSIgor Romanov 		*RTE_MBUF_DYNFIELD(m, sfc_dp_mport_offset,
190788dedcfSIgor Romanov 				   efx_mport_id_t *) = txq->egress_mport;
1916764c3bbSIgor Romanov 		n_bytes += tx_pkts[i]->pkt_len;
192788dedcfSIgor Romanov 	}
193788dedcfSIgor Romanov 
194788dedcfSIgor Romanov 	objs = (void *)&tx_pkts[0];
195788dedcfSIgor Romanov 	n_tx = rte_ring_sp_enqueue_burst(txq->ring, objs, nb_pkts, NULL);
196788dedcfSIgor Romanov 
197788dedcfSIgor Romanov 	/*
198788dedcfSIgor Romanov 	 * Remove m-port override flag from packets that were not enqueued
199788dedcfSIgor Romanov 	 * Setting the flag only for enqueued packets after the burst is
200788dedcfSIgor Romanov 	 * not possible since the ownership of enqueued packets is
2016764c3bbSIgor Romanov 	 * transferred to representor proxy. The same logic applies to
2026764c3bbSIgor Romanov 	 * counting the enqueued packets' bytes.
203788dedcfSIgor Romanov 	 */
204788dedcfSIgor Romanov 	for (i = n_tx; i < nb_pkts; ++i) {
205788dedcfSIgor Romanov 		struct rte_mbuf *m = tx_pkts[i];
206788dedcfSIgor Romanov 
207788dedcfSIgor Romanov 		m->ol_flags &= ~sfc_dp_mport_override;
2086764c3bbSIgor Romanov 		n_bytes -= m->pkt_len;
209788dedcfSIgor Romanov 	}
210788dedcfSIgor Romanov 
2116764c3bbSIgor Romanov 	sfc_pkts_bytes_add(&txq->stats.packets_bytes, n_tx, n_bytes);
2126764c3bbSIgor Romanov 
213788dedcfSIgor Romanov 	return n_tx;
214788dedcfSIgor Romanov }
215788dedcfSIgor Romanov 
21675f080fdSIgor Romanov static int
21775f080fdSIgor Romanov sfc_repr_start(struct rte_eth_dev *dev)
21875f080fdSIgor Romanov {
21975f080fdSIgor Romanov 	struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
22075f080fdSIgor Romanov 	struct sfc_repr_shared *srs;
22175f080fdSIgor Romanov 	int ret;
22275f080fdSIgor Romanov 
22375f080fdSIgor Romanov 	sfcr_info(sr, "entry");
22475f080fdSIgor Romanov 
22575f080fdSIgor Romanov 	SFC_ASSERT(sfc_repr_lock_is_locked(sr));
22675f080fdSIgor Romanov 
22775f080fdSIgor Romanov 	switch (sr->state) {
22875f080fdSIgor Romanov 	case SFC_ETHDEV_CONFIGURED:
22975f080fdSIgor Romanov 		break;
23075f080fdSIgor Romanov 	case SFC_ETHDEV_STARTED:
23175f080fdSIgor Romanov 		sfcr_info(sr, "already started");
23275f080fdSIgor Romanov 		return 0;
23375f080fdSIgor Romanov 	default:
23475f080fdSIgor Romanov 		ret = -EINVAL;
23575f080fdSIgor Romanov 		goto fail_bad_state;
23675f080fdSIgor Romanov 	}
23775f080fdSIgor Romanov 
23875f080fdSIgor Romanov 	sr->state = SFC_ETHDEV_STARTING;
23975f080fdSIgor Romanov 
24075f080fdSIgor Romanov 	srs = sfc_repr_shared_by_eth_dev(dev);
24175f080fdSIgor Romanov 	ret = sfc_repr_proxy_start_repr(srs->pf_port_id, srs->repr_id);
24275f080fdSIgor Romanov 	if (ret != 0) {
24375f080fdSIgor Romanov 		SFC_ASSERT(ret > 0);
24475f080fdSIgor Romanov 		ret = -ret;
24575f080fdSIgor Romanov 		goto fail_start;
24675f080fdSIgor Romanov 	}
24775f080fdSIgor Romanov 
24875f080fdSIgor Romanov 	sr->state = SFC_ETHDEV_STARTED;
24975f080fdSIgor Romanov 
25075f080fdSIgor Romanov 	sfcr_info(sr, "done");
25175f080fdSIgor Romanov 
25275f080fdSIgor Romanov 	return 0;
25375f080fdSIgor Romanov 
25475f080fdSIgor Romanov fail_start:
25575f080fdSIgor Romanov 	sr->state = SFC_ETHDEV_CONFIGURED;
25675f080fdSIgor Romanov 
25775f080fdSIgor Romanov fail_bad_state:
25875f080fdSIgor Romanov 	sfcr_err(sr, "%s() failed: %s", __func__, rte_strerror(-ret));
25975f080fdSIgor Romanov 	return ret;
26075f080fdSIgor Romanov }
26175f080fdSIgor Romanov 
26275f080fdSIgor Romanov static int
26375f080fdSIgor Romanov sfc_repr_dev_start(struct rte_eth_dev *dev)
26475f080fdSIgor Romanov {
26575f080fdSIgor Romanov 	struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
266*35ac4f27SJie Hai 	uint16_t i;
26775f080fdSIgor Romanov 	int ret;
26875f080fdSIgor Romanov 
26975f080fdSIgor Romanov 	sfcr_info(sr, "entry");
27075f080fdSIgor Romanov 
27175f080fdSIgor Romanov 	sfc_repr_lock(sr);
27275f080fdSIgor Romanov 	ret = sfc_repr_start(dev);
27375f080fdSIgor Romanov 	sfc_repr_unlock(sr);
27475f080fdSIgor Romanov 
27575f080fdSIgor Romanov 	if (ret != 0)
27675f080fdSIgor Romanov 		goto fail_start;
27775f080fdSIgor Romanov 
278*35ac4f27SJie Hai 	for (i = 0; i < dev->data->nb_rx_queues; i++)
279*35ac4f27SJie Hai 		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
280*35ac4f27SJie Hai 	for (i = 0; i < dev->data->nb_tx_queues; i++)
281*35ac4f27SJie Hai 		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
282*35ac4f27SJie Hai 
28375f080fdSIgor Romanov 	sfcr_info(sr, "done");
28475f080fdSIgor Romanov 
28575f080fdSIgor Romanov 	return 0;
28675f080fdSIgor Romanov 
28775f080fdSIgor Romanov fail_start:
28875f080fdSIgor Romanov 	sfcr_err(sr, "%s() failed: %s", __func__, rte_strerror(-ret));
28975f080fdSIgor Romanov 	return ret;
29075f080fdSIgor Romanov }
29175f080fdSIgor Romanov 
29275f080fdSIgor Romanov static int
29375f080fdSIgor Romanov sfc_repr_stop(struct rte_eth_dev *dev)
29475f080fdSIgor Romanov {
29575f080fdSIgor Romanov 	struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
29675f080fdSIgor Romanov 	struct sfc_repr_shared *srs;
29775f080fdSIgor Romanov 	unsigned int i;
29875f080fdSIgor Romanov 	int ret;
29975f080fdSIgor Romanov 
30075f080fdSIgor Romanov 	sfcr_info(sr, "entry");
30175f080fdSIgor Romanov 
30275f080fdSIgor Romanov 	SFC_ASSERT(sfc_repr_lock_is_locked(sr));
30375f080fdSIgor Romanov 
30475f080fdSIgor Romanov 	switch (sr->state) {
30575f080fdSIgor Romanov 	case SFC_ETHDEV_STARTED:
30675f080fdSIgor Romanov 		break;
30775f080fdSIgor Romanov 	case SFC_ETHDEV_CONFIGURED:
30875f080fdSIgor Romanov 		sfcr_info(sr, "already stopped");
30975f080fdSIgor Romanov 		return 0;
31075f080fdSIgor Romanov 	default:
31175f080fdSIgor Romanov 		sfcr_err(sr, "stop in unexpected state %u", sr->state);
31275f080fdSIgor Romanov 		SFC_ASSERT(B_FALSE);
31375f080fdSIgor Romanov 		ret = -EINVAL;
31475f080fdSIgor Romanov 		goto fail_bad_state;
31575f080fdSIgor Romanov 	}
31675f080fdSIgor Romanov 
31775f080fdSIgor Romanov 	srs = sfc_repr_shared_by_eth_dev(dev);
31875f080fdSIgor Romanov 	ret = sfc_repr_proxy_stop_repr(srs->pf_port_id, srs->repr_id);
31975f080fdSIgor Romanov 	if (ret != 0) {
32075f080fdSIgor Romanov 		SFC_ASSERT(ret > 0);
32175f080fdSIgor Romanov 		ret = -ret;
32275f080fdSIgor Romanov 		goto fail_stop;
32375f080fdSIgor Romanov 	}
32475f080fdSIgor Romanov 
32575f080fdSIgor Romanov 	for (i = 0; i < dev->data->nb_rx_queues; i++)
32675f080fdSIgor Romanov 		sfc_repr_rx_queue_stop(dev->data->rx_queues[i]);
32775f080fdSIgor Romanov 
32875f080fdSIgor Romanov 	for (i = 0; i < dev->data->nb_tx_queues; i++)
32975f080fdSIgor Romanov 		sfc_repr_tx_queue_stop(dev->data->tx_queues[i]);
33075f080fdSIgor Romanov 
33175f080fdSIgor Romanov 	sr->state = SFC_ETHDEV_CONFIGURED;
33275f080fdSIgor Romanov 	sfcr_info(sr, "done");
33375f080fdSIgor Romanov 
33475f080fdSIgor Romanov 	return 0;
33575f080fdSIgor Romanov 
33675f080fdSIgor Romanov fail_bad_state:
33775f080fdSIgor Romanov fail_stop:
33875f080fdSIgor Romanov 	sfcr_err(sr, "%s() failed: %s", __func__, rte_strerror(-ret));
33975f080fdSIgor Romanov 
34075f080fdSIgor Romanov 	return ret;
34175f080fdSIgor Romanov }
34275f080fdSIgor Romanov 
34375f080fdSIgor Romanov static int
34475f080fdSIgor Romanov sfc_repr_dev_stop(struct rte_eth_dev *dev)
34575f080fdSIgor Romanov {
34675f080fdSIgor Romanov 	struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
347*35ac4f27SJie Hai 	uint16_t i;
34875f080fdSIgor Romanov 	int ret;
34975f080fdSIgor Romanov 
35075f080fdSIgor Romanov 	sfcr_info(sr, "entry");
35175f080fdSIgor Romanov 
35275f080fdSIgor Romanov 	sfc_repr_lock(sr);
35375f080fdSIgor Romanov 
35475f080fdSIgor Romanov 	ret = sfc_repr_stop(dev);
35575f080fdSIgor Romanov 	if (ret != 0) {
35675f080fdSIgor Romanov 		sfcr_err(sr, "%s() failed to stop representor", __func__);
35775f080fdSIgor Romanov 		goto fail_stop;
35875f080fdSIgor Romanov 	}
35975f080fdSIgor Romanov 
36075f080fdSIgor Romanov 	sfc_repr_unlock(sr);
36175f080fdSIgor Romanov 
362*35ac4f27SJie Hai 	for (i = 0; i < dev->data->nb_rx_queues; i++)
363*35ac4f27SJie Hai 		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
364*35ac4f27SJie Hai 	for (i = 0; i < dev->data->nb_tx_queues; i++)
365*35ac4f27SJie Hai 		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
366*35ac4f27SJie Hai 
36775f080fdSIgor Romanov 	sfcr_info(sr, "done");
36875f080fdSIgor Romanov 
36975f080fdSIgor Romanov 	return 0;
37075f080fdSIgor Romanov 
37175f080fdSIgor Romanov fail_stop:
37275f080fdSIgor Romanov 	sfc_repr_unlock(sr);
37375f080fdSIgor Romanov 
37475f080fdSIgor Romanov 	sfcr_err(sr, "%s() failed %s", __func__, rte_strerror(-ret));
37575f080fdSIgor Romanov 
37675f080fdSIgor Romanov 	return ret;
37775f080fdSIgor Romanov }
37875f080fdSIgor Romanov 
379a62ec905SIgor Romanov static int
380a62ec905SIgor Romanov sfc_repr_check_conf(struct sfc_repr *sr, uint16_t nb_rx_queues,
381a62ec905SIgor Romanov 		    const struct rte_eth_conf *conf)
382a62ec905SIgor Romanov {
383a62ec905SIgor Romanov 	const struct rte_eth_rss_conf *rss_conf;
384a62ec905SIgor Romanov 	int ret = 0;
385a62ec905SIgor Romanov 
386a62ec905SIgor Romanov 	sfcr_info(sr, "entry");
387a62ec905SIgor Romanov 
388a62ec905SIgor Romanov 	if (conf->link_speeds != 0) {
389a62ec905SIgor Romanov 		sfcr_err(sr, "specific link speeds not supported");
390a62ec905SIgor Romanov 		ret = -EINVAL;
391a62ec905SIgor Romanov 	}
392a62ec905SIgor Romanov 
393a62ec905SIgor Romanov 	switch (conf->rxmode.mq_mode) {
394295968d1SFerruh Yigit 	case RTE_ETH_MQ_RX_RSS:
395a62ec905SIgor Romanov 		if (nb_rx_queues != 1) {
396a62ec905SIgor Romanov 			sfcr_err(sr, "Rx RSS is not supported with %u queues",
397a62ec905SIgor Romanov 				 nb_rx_queues);
398a62ec905SIgor Romanov 			ret = -EINVAL;
399a62ec905SIgor Romanov 			break;
400a62ec905SIgor Romanov 		}
401a62ec905SIgor Romanov 
402a62ec905SIgor Romanov 		rss_conf = &conf->rx_adv_conf.rss_conf;
403a62ec905SIgor Romanov 		if (rss_conf->rss_key != NULL || rss_conf->rss_key_len != 0 ||
404a62ec905SIgor Romanov 		    rss_conf->rss_hf != 0) {
405a62ec905SIgor Romanov 			sfcr_err(sr, "Rx RSS configuration is not supported");
406a62ec905SIgor Romanov 			ret = -EINVAL;
407a62ec905SIgor Romanov 		}
408a62ec905SIgor Romanov 		break;
409295968d1SFerruh Yigit 	case RTE_ETH_MQ_RX_NONE:
410a62ec905SIgor Romanov 		break;
411a62ec905SIgor Romanov 	default:
412a62ec905SIgor Romanov 		sfcr_err(sr, "Rx mode MQ modes other than RSS not supported");
413a62ec905SIgor Romanov 		ret = -EINVAL;
414a62ec905SIgor Romanov 		break;
415a62ec905SIgor Romanov 	}
416a62ec905SIgor Romanov 
417295968d1SFerruh Yigit 	if (conf->txmode.mq_mode != RTE_ETH_MQ_TX_NONE) {
418a62ec905SIgor Romanov 		sfcr_err(sr, "Tx mode MQ modes not supported");
419a62ec905SIgor Romanov 		ret = -EINVAL;
420a62ec905SIgor Romanov 	}
421a62ec905SIgor Romanov 
422a62ec905SIgor Romanov 	if (conf->lpbk_mode != 0) {
423a62ec905SIgor Romanov 		sfcr_err(sr, "loopback not supported");
424a62ec905SIgor Romanov 		ret = -EINVAL;
425a62ec905SIgor Romanov 	}
426a62ec905SIgor Romanov 
427a62ec905SIgor Romanov 	if (conf->dcb_capability_en != 0) {
428a62ec905SIgor Romanov 		sfcr_err(sr, "priority-based flow control not supported");
429a62ec905SIgor Romanov 		ret = -EINVAL;
430a62ec905SIgor Romanov 	}
431a62ec905SIgor Romanov 
432a62ec905SIgor Romanov 	if (conf->intr_conf.lsc != 0) {
433a62ec905SIgor Romanov 		sfcr_err(sr, "link status change interrupt not supported");
434a62ec905SIgor Romanov 		ret = -EINVAL;
435a62ec905SIgor Romanov 	}
436a62ec905SIgor Romanov 
437a62ec905SIgor Romanov 	if (conf->intr_conf.rxq != 0) {
438a62ec905SIgor Romanov 		sfcr_err(sr, "receive queue interrupt not supported");
439a62ec905SIgor Romanov 		ret = -EINVAL;
440a62ec905SIgor Romanov 	}
441a62ec905SIgor Romanov 
442a62ec905SIgor Romanov 	if (conf->intr_conf.rmv != 0) {
443a62ec905SIgor Romanov 		sfcr_err(sr, "remove interrupt not supported");
444a62ec905SIgor Romanov 		ret = -EINVAL;
445a62ec905SIgor Romanov 	}
446a62ec905SIgor Romanov 
447a62ec905SIgor Romanov 	sfcr_info(sr, "done %d", ret);
448a62ec905SIgor Romanov 
449a62ec905SIgor Romanov 	return ret;
450a62ec905SIgor Romanov }
451a62ec905SIgor Romanov 
452a62ec905SIgor Romanov 
453a62ec905SIgor Romanov static int
454a62ec905SIgor Romanov sfc_repr_configure(struct sfc_repr *sr, uint16_t nb_rx_queues,
455a62ec905SIgor Romanov 		   const struct rte_eth_conf *conf)
456a62ec905SIgor Romanov {
457a62ec905SIgor Romanov 	int ret;
458a62ec905SIgor Romanov 
459a62ec905SIgor Romanov 	sfcr_info(sr, "entry");
460a62ec905SIgor Romanov 
461a62ec905SIgor Romanov 	SFC_ASSERT(sfc_repr_lock_is_locked(sr));
462a62ec905SIgor Romanov 
463a62ec905SIgor Romanov 	ret = sfc_repr_check_conf(sr, nb_rx_queues, conf);
464a62ec905SIgor Romanov 	if (ret != 0)
465a62ec905SIgor Romanov 		goto fail_check_conf;
466a62ec905SIgor Romanov 
467a62ec905SIgor Romanov 	sr->state = SFC_ETHDEV_CONFIGURED;
468a62ec905SIgor Romanov 
469a62ec905SIgor Romanov 	sfcr_info(sr, "done");
470a62ec905SIgor Romanov 
471a62ec905SIgor Romanov 	return 0;
472a62ec905SIgor Romanov 
473a62ec905SIgor Romanov fail_check_conf:
474a62ec905SIgor Romanov 	sfcr_info(sr, "failed %s", rte_strerror(-ret));
475a62ec905SIgor Romanov 	return ret;
476a62ec905SIgor Romanov }
477a62ec905SIgor Romanov 
478a62ec905SIgor Romanov static int
479a62ec905SIgor Romanov sfc_repr_dev_configure(struct rte_eth_dev *dev)
480a62ec905SIgor Romanov {
481a62ec905SIgor Romanov 	struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
482a62ec905SIgor Romanov 	struct rte_eth_dev_data *dev_data = dev->data;
483a62ec905SIgor Romanov 	int ret;
484a62ec905SIgor Romanov 
485a62ec905SIgor Romanov 	sfcr_info(sr, "entry n_rxq=%u n_txq=%u",
486a62ec905SIgor Romanov 		  dev_data->nb_rx_queues, dev_data->nb_tx_queues);
487a62ec905SIgor Romanov 
488a62ec905SIgor Romanov 	sfc_repr_lock(sr);
489a62ec905SIgor Romanov 	switch (sr->state) {
490a62ec905SIgor Romanov 	case SFC_ETHDEV_CONFIGURED:
491a62ec905SIgor Romanov 		/* FALLTHROUGH */
492a62ec905SIgor Romanov 	case SFC_ETHDEV_INITIALIZED:
493a62ec905SIgor Romanov 		ret = sfc_repr_configure(sr, dev_data->nb_rx_queues,
494a62ec905SIgor Romanov 					 &dev_data->dev_conf);
495a62ec905SIgor Romanov 		break;
496a62ec905SIgor Romanov 	default:
497a62ec905SIgor Romanov 		sfcr_err(sr, "unexpected adapter state %u to configure",
498a62ec905SIgor Romanov 			 sr->state);
499a62ec905SIgor Romanov 		ret = -EINVAL;
500a62ec905SIgor Romanov 		break;
501a62ec905SIgor Romanov 	}
502a62ec905SIgor Romanov 	sfc_repr_unlock(sr);
503a62ec905SIgor Romanov 
504a62ec905SIgor Romanov 	sfcr_info(sr, "done %s", rte_strerror(-ret));
505a62ec905SIgor Romanov 
506a62ec905SIgor Romanov 	return ret;
507a62ec905SIgor Romanov }
508a62ec905SIgor Romanov 
509a62ec905SIgor Romanov static int
510a62ec905SIgor Romanov sfc_repr_dev_infos_get(struct rte_eth_dev *dev,
511a62ec905SIgor Romanov 		       struct rte_eth_dev_info *dev_info)
512a62ec905SIgor Romanov {
513a62ec905SIgor Romanov 	struct sfc_repr_shared *srs = sfc_repr_shared_by_eth_dev(dev);
514a62ec905SIgor Romanov 
515a62ec905SIgor Romanov 	dev_info->device = dev->device;
516a62ec905SIgor Romanov 
517a5198566SViacheslav Galaktionov 	dev_info->max_rx_pktlen = EFX_MAC_PDU_MAX;
518a62ec905SIgor Romanov 	dev_info->max_rx_queues = SFC_REPR_RXQ_MAX;
519a62ec905SIgor Romanov 	dev_info->max_tx_queues = SFC_REPR_TXQ_MAX;
520a62ec905SIgor Romanov 	dev_info->default_rxconf.rx_drop_en = 1;
521a62ec905SIgor Romanov 	dev_info->switch_info.domain_id = srs->switch_domain_id;
522a62ec905SIgor Romanov 	dev_info->switch_info.port_id = srs->switch_port_id;
523a62ec905SIgor Romanov 
524a62ec905SIgor Romanov 	return 0;
525a62ec905SIgor Romanov }
526a62ec905SIgor Romanov 
527155583abSIgor Romanov static int
5281dfb92a5SIgor Romanov sfc_repr_dev_link_update(struct rte_eth_dev *dev,
5291dfb92a5SIgor Romanov 			 __rte_unused int wait_to_complete)
5301dfb92a5SIgor Romanov {
5311dfb92a5SIgor Romanov 	struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
5321dfb92a5SIgor Romanov 	struct rte_eth_link link;
5331dfb92a5SIgor Romanov 
5341dfb92a5SIgor Romanov 	if (sr->state != SFC_ETHDEV_STARTED) {
5351dfb92a5SIgor Romanov 		sfc_port_link_mode_to_info(EFX_LINK_UNKNOWN, &link);
5361dfb92a5SIgor Romanov 	} else {
5371dfb92a5SIgor Romanov 		memset(&link, 0, sizeof(link));
538295968d1SFerruh Yigit 		link.link_status = RTE_ETH_LINK_UP;
539295968d1SFerruh Yigit 		link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
5401dfb92a5SIgor Romanov 	}
5411dfb92a5SIgor Romanov 
5421dfb92a5SIgor Romanov 	return rte_eth_linkstatus_set(dev, &link);
5431dfb92a5SIgor Romanov }
5441dfb92a5SIgor Romanov 
5451dfb92a5SIgor Romanov static int
546155583abSIgor Romanov sfc_repr_ring_create(uint16_t pf_port_id, uint16_t repr_id,
547155583abSIgor Romanov 		     const char *type_name, uint16_t qid, uint16_t nb_desc,
548155583abSIgor Romanov 		     unsigned int socket_id, struct rte_ring **ring)
549155583abSIgor Romanov {
550155583abSIgor Romanov 	char ring_name[RTE_RING_NAMESIZE];
551155583abSIgor Romanov 	int ret;
552155583abSIgor Romanov 
553155583abSIgor Romanov 	ret = snprintf(ring_name, sizeof(ring_name), "sfc_%u_repr_%u_%sq%u",
554155583abSIgor Romanov 		       pf_port_id, repr_id, type_name, qid);
555155583abSIgor Romanov 	if (ret >= (int)sizeof(ring_name))
556155583abSIgor Romanov 		return -ENAMETOOLONG;
557155583abSIgor Romanov 
558155583abSIgor Romanov 	/*
559155583abSIgor Romanov 	 * Single producer/consumer rings are used since the API for Tx/Rx
560155583abSIgor Romanov 	 * packet burst for representors are guaranteed to be called from
561155583abSIgor Romanov 	 * a single thread, and the user of the other end (representor proxy)
562155583abSIgor Romanov 	 * is also single-threaded.
563155583abSIgor Romanov 	 */
564155583abSIgor Romanov 	*ring = rte_ring_create(ring_name, nb_desc, socket_id,
565155583abSIgor Romanov 			       RING_F_SP_ENQ | RING_F_SC_DEQ);
566155583abSIgor Romanov 	if (*ring == NULL)
567155583abSIgor Romanov 		return -rte_errno;
568155583abSIgor Romanov 
569155583abSIgor Romanov 	return 0;
570155583abSIgor Romanov }
571155583abSIgor Romanov 
572155583abSIgor Romanov static int
573155583abSIgor Romanov sfc_repr_rx_qcheck_conf(struct sfc_repr *sr,
574155583abSIgor Romanov 			const struct rte_eth_rxconf *rx_conf)
575155583abSIgor Romanov {
576155583abSIgor Romanov 	int ret = 0;
577155583abSIgor Romanov 
578155583abSIgor Romanov 	sfcr_info(sr, "entry");
579155583abSIgor Romanov 
580155583abSIgor Romanov 	if (rx_conf->rx_thresh.pthresh != 0 ||
581155583abSIgor Romanov 	    rx_conf->rx_thresh.hthresh != 0 ||
582155583abSIgor Romanov 	    rx_conf->rx_thresh.wthresh != 0) {
583155583abSIgor Romanov 		sfcr_warn(sr,
584155583abSIgor Romanov 			"RxQ prefetch/host/writeback thresholds are not supported");
585155583abSIgor Romanov 	}
586155583abSIgor Romanov 
587155583abSIgor Romanov 	if (rx_conf->rx_free_thresh != 0)
588155583abSIgor Romanov 		sfcr_warn(sr, "RxQ free threshold is not supported");
589155583abSIgor Romanov 
590155583abSIgor Romanov 	if (rx_conf->rx_drop_en == 0)
591155583abSIgor Romanov 		sfcr_warn(sr, "RxQ drop disable is not supported");
592155583abSIgor Romanov 
593155583abSIgor Romanov 	if (rx_conf->rx_deferred_start) {
594155583abSIgor Romanov 		sfcr_err(sr, "Deferred start is not supported");
595155583abSIgor Romanov 		ret = -EINVAL;
596155583abSIgor Romanov 	}
597155583abSIgor Romanov 
598155583abSIgor Romanov 	sfcr_info(sr, "done: %s", rte_strerror(-ret));
599155583abSIgor Romanov 
600155583abSIgor Romanov 	return ret;
601155583abSIgor Romanov }
602155583abSIgor Romanov 
603155583abSIgor Romanov static int
604155583abSIgor Romanov sfc_repr_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
605155583abSIgor Romanov 			uint16_t nb_rx_desc, unsigned int socket_id,
606155583abSIgor Romanov 			__rte_unused const struct rte_eth_rxconf *rx_conf,
607155583abSIgor Romanov 			struct rte_mempool *mb_pool)
608155583abSIgor Romanov {
609155583abSIgor Romanov 	struct sfc_repr_shared *srs = sfc_repr_shared_by_eth_dev(dev);
610155583abSIgor Romanov 	struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
611155583abSIgor Romanov 	struct sfc_repr_rxq *rxq;
612155583abSIgor Romanov 	int ret;
613155583abSIgor Romanov 
614155583abSIgor Romanov 	sfcr_info(sr, "entry");
615155583abSIgor Romanov 
616155583abSIgor Romanov 	ret = sfc_repr_rx_qcheck_conf(sr, rx_conf);
617155583abSIgor Romanov 	if (ret != 0)
618155583abSIgor Romanov 		goto fail_check_conf;
619155583abSIgor Romanov 
620155583abSIgor Romanov 	ret = -ENOMEM;
621155583abSIgor Romanov 	rxq = rte_zmalloc_socket("sfc-repr-rxq", sizeof(*rxq),
622155583abSIgor Romanov 				 RTE_CACHE_LINE_SIZE, socket_id);
623155583abSIgor Romanov 	if (rxq == NULL) {
624155583abSIgor Romanov 		sfcr_err(sr, "%s() failed to alloc RxQ", __func__);
625155583abSIgor Romanov 		goto fail_rxq_alloc;
626155583abSIgor Romanov 	}
627155583abSIgor Romanov 
628155583abSIgor Romanov 	ret = sfc_repr_ring_create(srs->pf_port_id, srs->repr_id,
629155583abSIgor Romanov 				   "rx", rx_queue_id, nb_rx_desc,
630155583abSIgor Romanov 				   socket_id, &rxq->ring);
631155583abSIgor Romanov 	if (ret != 0) {
632155583abSIgor Romanov 		sfcr_err(sr, "%s() failed to create ring", __func__);
633155583abSIgor Romanov 		goto fail_ring_create;
634155583abSIgor Romanov 	}
635155583abSIgor Romanov 
636155583abSIgor Romanov 	ret = sfc_repr_proxy_add_rxq(srs->pf_port_id, srs->repr_id,
637155583abSIgor Romanov 				     rx_queue_id, rxq->ring, mb_pool);
638155583abSIgor Romanov 	if (ret != 0) {
639155583abSIgor Romanov 		SFC_ASSERT(ret > 0);
640155583abSIgor Romanov 		ret = -ret;
641155583abSIgor Romanov 		sfcr_err(sr, "%s() failed to add proxy RxQ", __func__);
642155583abSIgor Romanov 		goto fail_proxy_add_rxq;
643155583abSIgor Romanov 	}
644155583abSIgor Romanov 
645155583abSIgor Romanov 	dev->data->rx_queues[rx_queue_id] = rxq;
646155583abSIgor Romanov 
647155583abSIgor Romanov 	sfcr_info(sr, "done");
648155583abSIgor Romanov 
649155583abSIgor Romanov 	return 0;
650155583abSIgor Romanov 
651155583abSIgor Romanov fail_proxy_add_rxq:
652155583abSIgor Romanov 	rte_ring_free(rxq->ring);
653155583abSIgor Romanov 
654155583abSIgor Romanov fail_ring_create:
655155583abSIgor Romanov 	rte_free(rxq);
656155583abSIgor Romanov 
657155583abSIgor Romanov fail_rxq_alloc:
658155583abSIgor Romanov fail_check_conf:
659155583abSIgor Romanov 	sfcr_err(sr, "%s() failed: %s", __func__, rte_strerror(-ret));
660155583abSIgor Romanov 	return ret;
661155583abSIgor Romanov }
662155583abSIgor Romanov 
663155583abSIgor Romanov static void
664155583abSIgor Romanov sfc_repr_rx_queue_release(struct rte_eth_dev *dev, uint16_t rx_queue_id)
665155583abSIgor Romanov {
666155583abSIgor Romanov 	struct sfc_repr_shared *srs = sfc_repr_shared_by_eth_dev(dev);
667155583abSIgor Romanov 	struct sfc_repr_rxq *rxq = dev->data->rx_queues[rx_queue_id];
668155583abSIgor Romanov 
669155583abSIgor Romanov 	sfc_repr_proxy_del_rxq(srs->pf_port_id, srs->repr_id, rx_queue_id);
670155583abSIgor Romanov 	rte_ring_free(rxq->ring);
671155583abSIgor Romanov 	rte_free(rxq);
672155583abSIgor Romanov }
673155583abSIgor Romanov 
674155583abSIgor Romanov static int
675155583abSIgor Romanov sfc_repr_tx_qcheck_conf(struct sfc_repr *sr,
676155583abSIgor Romanov 			const struct rte_eth_txconf *tx_conf)
677155583abSIgor Romanov {
678155583abSIgor Romanov 	int ret = 0;
679155583abSIgor Romanov 
680155583abSIgor Romanov 	sfcr_info(sr, "entry");
681155583abSIgor Romanov 
682155583abSIgor Romanov 	if (tx_conf->tx_rs_thresh != 0)
683155583abSIgor Romanov 		sfcr_warn(sr, "RS bit in transmit descriptor is not supported");
684155583abSIgor Romanov 
685155583abSIgor Romanov 	if (tx_conf->tx_free_thresh != 0)
686155583abSIgor Romanov 		sfcr_warn(sr, "TxQ free threshold is not supported");
687155583abSIgor Romanov 
688155583abSIgor Romanov 	if (tx_conf->tx_thresh.pthresh != 0 ||
689155583abSIgor Romanov 	    tx_conf->tx_thresh.hthresh != 0 ||
690155583abSIgor Romanov 	    tx_conf->tx_thresh.wthresh != 0) {
691155583abSIgor Romanov 		sfcr_warn(sr,
692155583abSIgor Romanov 			"prefetch/host/writeback thresholds are not supported");
693155583abSIgor Romanov 	}
694155583abSIgor Romanov 
695155583abSIgor Romanov 	if (tx_conf->tx_deferred_start) {
696155583abSIgor Romanov 		sfcr_err(sr, "Deferred start is not supported");
697155583abSIgor Romanov 		ret = -EINVAL;
698155583abSIgor Romanov 	}
699155583abSIgor Romanov 
700155583abSIgor Romanov 	sfcr_info(sr, "done: %s", rte_strerror(-ret));
701155583abSIgor Romanov 
702155583abSIgor Romanov 	return ret;
703155583abSIgor Romanov }
704155583abSIgor Romanov 
705155583abSIgor Romanov static int
706155583abSIgor Romanov sfc_repr_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
707155583abSIgor Romanov 			uint16_t nb_tx_desc, unsigned int socket_id,
708155583abSIgor Romanov 			const struct rte_eth_txconf *tx_conf)
709155583abSIgor Romanov {
710155583abSIgor Romanov 	struct sfc_repr_shared *srs = sfc_repr_shared_by_eth_dev(dev);
711155583abSIgor Romanov 	struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
712155583abSIgor Romanov 	struct sfc_repr_txq *txq;
713155583abSIgor Romanov 	int ret;
714155583abSIgor Romanov 
715155583abSIgor Romanov 	sfcr_info(sr, "entry");
716155583abSIgor Romanov 
717155583abSIgor Romanov 	ret = sfc_repr_tx_qcheck_conf(sr, tx_conf);
718155583abSIgor Romanov 	if (ret != 0)
719155583abSIgor Romanov 		goto fail_check_conf;
720155583abSIgor Romanov 
721155583abSIgor Romanov 	ret = -ENOMEM;
722155583abSIgor Romanov 	txq = rte_zmalloc_socket("sfc-repr-txq", sizeof(*txq),
723155583abSIgor Romanov 				 RTE_CACHE_LINE_SIZE, socket_id);
724155583abSIgor Romanov 	if (txq == NULL)
725155583abSIgor Romanov 		goto fail_txq_alloc;
726155583abSIgor Romanov 
727155583abSIgor Romanov 	ret = sfc_repr_ring_create(srs->pf_port_id, srs->repr_id,
728155583abSIgor Romanov 				   "tx", tx_queue_id, nb_tx_desc,
729155583abSIgor Romanov 				   socket_id, &txq->ring);
730155583abSIgor Romanov 	if (ret != 0)
731155583abSIgor Romanov 		goto fail_ring_create;
732155583abSIgor Romanov 
733155583abSIgor Romanov 	ret = sfc_repr_proxy_add_txq(srs->pf_port_id, srs->repr_id,
734155583abSIgor Romanov 				     tx_queue_id, txq->ring,
735155583abSIgor Romanov 				     &txq->egress_mport);
736155583abSIgor Romanov 	if (ret != 0)
737155583abSIgor Romanov 		goto fail_proxy_add_txq;
738155583abSIgor Romanov 
739155583abSIgor Romanov 	dev->data->tx_queues[tx_queue_id] = txq;
740155583abSIgor Romanov 
741155583abSIgor Romanov 	sfcr_info(sr, "done");
742155583abSIgor Romanov 
743155583abSIgor Romanov 	return 0;
744155583abSIgor Romanov 
745155583abSIgor Romanov fail_proxy_add_txq:
746155583abSIgor Romanov 	rte_ring_free(txq->ring);
747155583abSIgor Romanov 
748155583abSIgor Romanov fail_ring_create:
749155583abSIgor Romanov 	rte_free(txq);
750155583abSIgor Romanov 
751155583abSIgor Romanov fail_txq_alloc:
752155583abSIgor Romanov fail_check_conf:
753155583abSIgor Romanov 	sfcr_err(sr, "%s() failed: %s", __func__, rte_strerror(-ret));
754155583abSIgor Romanov 	return ret;
755155583abSIgor Romanov }
756155583abSIgor Romanov 
757155583abSIgor Romanov static void
758155583abSIgor Romanov sfc_repr_tx_queue_release(struct rte_eth_dev *dev, uint16_t tx_queue_id)
759155583abSIgor Romanov {
760155583abSIgor Romanov 	struct sfc_repr_shared *srs = sfc_repr_shared_by_eth_dev(dev);
761155583abSIgor Romanov 	struct sfc_repr_txq *txq = dev->data->tx_queues[tx_queue_id];
762155583abSIgor Romanov 
763155583abSIgor Romanov 	sfc_repr_proxy_del_txq(srs->pf_port_id, srs->repr_id, tx_queue_id);
764155583abSIgor Romanov 	rte_ring_free(txq->ring);
765155583abSIgor Romanov 	rte_free(txq);
766155583abSIgor Romanov }
767155583abSIgor Romanov 
768a62ec905SIgor Romanov static void
769a62ec905SIgor Romanov sfc_repr_close(struct sfc_repr *sr)
770a62ec905SIgor Romanov {
771a62ec905SIgor Romanov 	SFC_ASSERT(sfc_repr_lock_is_locked(sr));
772a62ec905SIgor Romanov 	SFC_ASSERT(sr->state == SFC_ETHDEV_CONFIGURED);
773a62ec905SIgor Romanov 
774a62ec905SIgor Romanov 	sr->state = SFC_ETHDEV_INITIALIZED;
775a62ec905SIgor Romanov }
776a62ec905SIgor Romanov 
777a62ec905SIgor Romanov static int
778a62ec905SIgor Romanov sfc_repr_dev_close(struct rte_eth_dev *dev)
779a62ec905SIgor Romanov {
780a62ec905SIgor Romanov 	struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
781c377f1adSIgor Romanov 	struct sfc_repr_shared *srs = sfc_repr_shared_by_eth_dev(dev);
782155583abSIgor Romanov 	unsigned int i;
783a62ec905SIgor Romanov 
784a62ec905SIgor Romanov 	sfcr_info(sr, "entry");
785a62ec905SIgor Romanov 
786a62ec905SIgor Romanov 	sfc_repr_lock(sr);
787a62ec905SIgor Romanov 	switch (sr->state) {
78875f080fdSIgor Romanov 	case SFC_ETHDEV_STARTED:
78975f080fdSIgor Romanov 		sfc_repr_stop(dev);
79075f080fdSIgor Romanov 		SFC_ASSERT(sr->state == SFC_ETHDEV_CONFIGURED);
79175f080fdSIgor Romanov 		/* FALLTHROUGH */
792a62ec905SIgor Romanov 	case SFC_ETHDEV_CONFIGURED:
793a62ec905SIgor Romanov 		sfc_repr_close(sr);
794a62ec905SIgor Romanov 		SFC_ASSERT(sr->state == SFC_ETHDEV_INITIALIZED);
795a62ec905SIgor Romanov 		/* FALLTHROUGH */
796a62ec905SIgor Romanov 	case SFC_ETHDEV_INITIALIZED:
797a62ec905SIgor Romanov 		break;
798a62ec905SIgor Romanov 	default:
799a62ec905SIgor Romanov 		sfcr_err(sr, "unexpected adapter state %u on close", sr->state);
800a62ec905SIgor Romanov 		break;
801a62ec905SIgor Romanov 	}
802a62ec905SIgor Romanov 
803155583abSIgor Romanov 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
804155583abSIgor Romanov 		sfc_repr_rx_queue_release(dev, i);
805155583abSIgor Romanov 		dev->data->rx_queues[i] = NULL;
806155583abSIgor Romanov 	}
807155583abSIgor Romanov 
808155583abSIgor Romanov 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
809155583abSIgor Romanov 		sfc_repr_tx_queue_release(dev, i);
810155583abSIgor Romanov 		dev->data->tx_queues[i] = NULL;
811155583abSIgor Romanov 	}
812155583abSIgor Romanov 
813a62ec905SIgor Romanov 	/*
814a62ec905SIgor Romanov 	 * Cleanup all resources.
815a62ec905SIgor Romanov 	 * Rollback primary process sfc_repr_eth_dev_init() below.
816a62ec905SIgor Romanov 	 */
817a62ec905SIgor Romanov 
818c377f1adSIgor Romanov 	(void)sfc_repr_proxy_del_port(srs->pf_port_id, srs->repr_id);
819c377f1adSIgor Romanov 
820723327beSIvan Malov 	sfc_mae_clear_switch_port(srs->switch_domain_id, srs->switch_port_id);
821723327beSIvan Malov 
822a3fbef19SIgor Romanov 	dev->rx_pkt_burst = NULL;
823788dedcfSIgor Romanov 	dev->tx_pkt_burst = NULL;
824a62ec905SIgor Romanov 	dev->dev_ops = NULL;
825a62ec905SIgor Romanov 
826a62ec905SIgor Romanov 	sfc_repr_unlock(sr);
827a62ec905SIgor Romanov 	sfc_repr_lock_fini(sr);
828a62ec905SIgor Romanov 
829a62ec905SIgor Romanov 	sfcr_info(sr, "done");
830a62ec905SIgor Romanov 
831a62ec905SIgor Romanov 	free(sr);
832a62ec905SIgor Romanov 
833a62ec905SIgor Romanov 	return 0;
834a62ec905SIgor Romanov }
835a62ec905SIgor Romanov 
8366764c3bbSIgor Romanov static int
837ae9aafe4SIvan Malov sfc_repr_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr)
838ae9aafe4SIvan Malov {
839ae9aafe4SIvan Malov 	struct sfc_repr_shared *srs = sfc_repr_shared_by_eth_dev(dev);
840ae9aafe4SIvan Malov 	int ret;
841ae9aafe4SIvan Malov 
842ae9aafe4SIvan Malov 	ret = sfc_repr_proxy_repr_entity_mac_addr_set(srs->pf_port_id,
843ae9aafe4SIvan Malov 						      srs->repr_id, mac_addr);
844ae9aafe4SIvan Malov 	return -ret;
845ae9aafe4SIvan Malov }
846ae9aafe4SIvan Malov 
847ae9aafe4SIvan Malov static int
8486764c3bbSIgor Romanov sfc_repr_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
8496764c3bbSIgor Romanov {
8506764c3bbSIgor Romanov 	union sfc_pkts_bytes queue_stats;
8516764c3bbSIgor Romanov 	uint16_t i;
8526764c3bbSIgor Romanov 
8536764c3bbSIgor Romanov 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
8546764c3bbSIgor Romanov 		struct sfc_repr_rxq *rxq = dev->data->rx_queues[i];
8556764c3bbSIgor Romanov 
8566764c3bbSIgor Romanov 		sfc_pkts_bytes_get(&rxq->stats.packets_bytes,
8576764c3bbSIgor Romanov 				   &queue_stats);
8586764c3bbSIgor Romanov 
8596764c3bbSIgor Romanov 		stats->ipackets += queue_stats.pkts;
8606764c3bbSIgor Romanov 		stats->ibytes += queue_stats.bytes;
8616764c3bbSIgor Romanov 	}
8626764c3bbSIgor Romanov 
8636764c3bbSIgor Romanov 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
8646764c3bbSIgor Romanov 		struct sfc_repr_txq *txq = dev->data->tx_queues[i];
8656764c3bbSIgor Romanov 
8666764c3bbSIgor Romanov 		sfc_pkts_bytes_get(&txq->stats.packets_bytes,
8676764c3bbSIgor Romanov 				   &queue_stats);
8686764c3bbSIgor Romanov 
8696764c3bbSIgor Romanov 		stats->opackets += queue_stats.pkts;
8706764c3bbSIgor Romanov 		stats->obytes += queue_stats.bytes;
8716764c3bbSIgor Romanov 	}
8726764c3bbSIgor Romanov 
8736764c3bbSIgor Romanov 	return 0;
8746764c3bbSIgor Romanov }
8756764c3bbSIgor Romanov 
876d5a478f3SIvan Malov static int
877d5a478f3SIvan Malov sfc_repr_flow_pick_transfer_proxy(struct rte_eth_dev *dev,
878d5a478f3SIvan Malov 				  uint16_t *transfer_proxy_port,
879d5a478f3SIvan Malov 				  struct rte_flow_error *error)
880d5a478f3SIvan Malov {
881d5a478f3SIvan Malov 	struct sfc_repr_shared *srs = sfc_repr_shared_by_eth_dev(dev);
882d5a478f3SIvan Malov 
883d5a478f3SIvan Malov 	return rte_flow_pick_transfer_proxy(srs->pf_port_id,
884d5a478f3SIvan Malov 					    transfer_proxy_port, error);
885d5a478f3SIvan Malov }
886d5a478f3SIvan Malov 
887d5a478f3SIvan Malov const struct rte_flow_ops sfc_repr_flow_ops = {
888d5a478f3SIvan Malov 	.pick_transfer_proxy = sfc_repr_flow_pick_transfer_proxy,
889d5a478f3SIvan Malov };
890d5a478f3SIvan Malov 
891d5a478f3SIvan Malov static int
892d5a478f3SIvan Malov sfc_repr_dev_flow_ops_get(struct rte_eth_dev *dev __rte_unused,
893d5a478f3SIvan Malov 			  const struct rte_flow_ops **ops)
894d5a478f3SIvan Malov {
895d5a478f3SIvan Malov 	*ops = &sfc_repr_flow_ops;
896d5a478f3SIvan Malov 	return 0;
897d5a478f3SIvan Malov }
898d5a478f3SIvan Malov 
899a62ec905SIgor Romanov static const struct eth_dev_ops sfc_repr_dev_ops = {
900a62ec905SIgor Romanov 	.dev_configure			= sfc_repr_dev_configure,
90175f080fdSIgor Romanov 	.dev_start			= sfc_repr_dev_start,
90275f080fdSIgor Romanov 	.dev_stop			= sfc_repr_dev_stop,
903a62ec905SIgor Romanov 	.dev_close			= sfc_repr_dev_close,
904a62ec905SIgor Romanov 	.dev_infos_get			= sfc_repr_dev_infos_get,
9051dfb92a5SIgor Romanov 	.link_update			= sfc_repr_dev_link_update,
906ae9aafe4SIvan Malov 	.mac_addr_set			= sfc_repr_mac_addr_set,
9076764c3bbSIgor Romanov 	.stats_get			= sfc_repr_stats_get,
908155583abSIgor Romanov 	.rx_queue_setup			= sfc_repr_rx_queue_setup,
909155583abSIgor Romanov 	.rx_queue_release		= sfc_repr_rx_queue_release,
910155583abSIgor Romanov 	.tx_queue_setup			= sfc_repr_tx_queue_setup,
911155583abSIgor Romanov 	.tx_queue_release		= sfc_repr_tx_queue_release,
912d5a478f3SIvan Malov 	.flow_ops_get			= sfc_repr_dev_flow_ops_get,
913a62ec905SIgor Romanov };
914a62ec905SIgor Romanov 
915a62ec905SIgor Romanov 
916a62ec905SIgor Romanov struct sfc_repr_init_data {
917a62ec905SIgor Romanov 	uint16_t		pf_port_id;
918a62ec905SIgor Romanov 	uint16_t		switch_domain_id;
919a62ec905SIgor Romanov 	efx_mport_sel_t		mport_sel;
920768d1e44SViacheslav Galaktionov 	efx_pcie_interface_t	intf;
921768d1e44SViacheslav Galaktionov 	uint16_t		pf;
922768d1e44SViacheslav Galaktionov 	uint16_t		vf;
923a62ec905SIgor Romanov };
924a62ec905SIgor Romanov 
925a62ec905SIgor Romanov static int
926a62ec905SIgor Romanov sfc_repr_assign_mae_switch_port(uint16_t switch_domain_id,
927a62ec905SIgor Romanov 				const struct sfc_mae_switch_port_request *req,
928a62ec905SIgor Romanov 				uint16_t *switch_port_id)
929a62ec905SIgor Romanov {
930a62ec905SIgor Romanov 	int rc;
931a62ec905SIgor Romanov 
932a62ec905SIgor Romanov 	rc = sfc_mae_assign_switch_port(switch_domain_id, req, switch_port_id);
933a62ec905SIgor Romanov 
934a62ec905SIgor Romanov 	SFC_ASSERT(rc >= 0);
935a62ec905SIgor Romanov 	return -rc;
936a62ec905SIgor Romanov }
937a62ec905SIgor Romanov 
938a62ec905SIgor Romanov static int
939a62ec905SIgor Romanov sfc_repr_eth_dev_init(struct rte_eth_dev *dev, void *init_params)
940a62ec905SIgor Romanov {
941a62ec905SIgor Romanov 	const struct sfc_repr_init_data *repr_data = init_params;
942a62ec905SIgor Romanov 	struct sfc_repr_shared *srs = sfc_repr_shared_by_eth_dev(dev);
943a62ec905SIgor Romanov 	struct sfc_mae_switch_port_request switch_port_request;
944a62ec905SIgor Romanov 	efx_mport_sel_t ethdev_mport_sel;
945ef96f7ebSIvan Malov 	efx_mport_id_t proxy_mport_id;
946a62ec905SIgor Romanov 	struct sfc_repr *sr;
947a62ec905SIgor Romanov 	int ret;
948a62ec905SIgor Romanov 
949a62ec905SIgor Romanov 	/*
950ef96f7ebSIvan Malov 	 * For each representor, a driver-internal flow has to be installed
951ef96f7ebSIvan Malov 	 * in order to direct traffic coming from the represented entity to
952ef96f7ebSIvan Malov 	 * the "representor proxy". Such internal flows need to find ethdev
953ef96f7ebSIvan Malov 	 * mport by ethdev ID of the representors in question to specify in
954ef96f7ebSIvan Malov 	 * delivery action. So set the representor ethdev's mport to that
955ef96f7ebSIvan Malov 	 * of the "representor proxy" in below switch port request.
956a62ec905SIgor Romanov 	 */
957ef96f7ebSIvan Malov 	sfc_repr_proxy_mport_alias_get(repr_data->pf_port_id, &proxy_mport_id);
958ef96f7ebSIvan Malov 
959ef96f7ebSIvan Malov 	ret = efx_mae_mport_by_id(&proxy_mport_id, &ethdev_mport_sel);
960ef96f7ebSIvan Malov 	if (ret != 0) {
961ef96f7ebSIvan Malov 		SFC_GENERIC_LOG(ERR,
962ef96f7ebSIvan Malov 			"%s() failed to get repr proxy mport by ID", __func__);
963ef96f7ebSIvan Malov 		goto fail_get_selector;
964ef96f7ebSIvan Malov 	}
965a62ec905SIgor Romanov 
966a62ec905SIgor Romanov 	memset(&switch_port_request, 0, sizeof(switch_port_request));
967a62ec905SIgor Romanov 	switch_port_request.type = SFC_MAE_SWITCH_PORT_REPRESENTOR;
968a62ec905SIgor Romanov 	switch_port_request.ethdev_mportp = &ethdev_mport_sel;
969a62ec905SIgor Romanov 	switch_port_request.entity_mportp = &repr_data->mport_sel;
970a62ec905SIgor Romanov 	switch_port_request.ethdev_port_id = dev->data->port_id;
971768d1e44SViacheslav Galaktionov 	switch_port_request.port_data.repr.intf = repr_data->intf;
972768d1e44SViacheslav Galaktionov 	switch_port_request.port_data.repr.pf = repr_data->pf;
973768d1e44SViacheslav Galaktionov 	switch_port_request.port_data.repr.vf = repr_data->vf;
974a62ec905SIgor Romanov 
975a62ec905SIgor Romanov 	ret = sfc_repr_assign_mae_switch_port(repr_data->switch_domain_id,
976a62ec905SIgor Romanov 					      &switch_port_request,
977a62ec905SIgor Romanov 					      &srs->switch_port_id);
978a62ec905SIgor Romanov 	if (ret != 0) {
979a62ec905SIgor Romanov 		SFC_GENERIC_LOG(ERR,
980a62ec905SIgor Romanov 			"%s() failed to assign MAE switch port (domain id %u)",
981a62ec905SIgor Romanov 			__func__, repr_data->switch_domain_id);
982a62ec905SIgor Romanov 		goto fail_mae_assign_switch_port;
983a62ec905SIgor Romanov 	}
984a62ec905SIgor Romanov 
985c377f1adSIgor Romanov 	ret = sfc_repr_proxy_add_port(repr_data->pf_port_id,
986ae9aafe4SIvan Malov 				      srs->switch_port_id, dev->data->port_id,
987ae9aafe4SIvan Malov 				      &repr_data->mport_sel, repr_data->intf,
988ae9aafe4SIvan Malov 				      repr_data->pf, repr_data->vf);
989c377f1adSIgor Romanov 	if (ret != 0) {
990c377f1adSIgor Romanov 		SFC_GENERIC_LOG(ERR, "%s() failed to add repr proxy port",
991c377f1adSIgor Romanov 				__func__);
992c377f1adSIgor Romanov 		SFC_ASSERT(ret > 0);
993c377f1adSIgor Romanov 		ret = -ret;
994c377f1adSIgor Romanov 		goto fail_create_port;
995c377f1adSIgor Romanov 	}
996c377f1adSIgor Romanov 
997a62ec905SIgor Romanov 	/*
998a62ec905SIgor Romanov 	 * Allocate process private data from heap, since it should not
999a62ec905SIgor Romanov 	 * be located in shared memory allocated using rte_malloc() API.
1000a62ec905SIgor Romanov 	 */
1001a62ec905SIgor Romanov 	sr = calloc(1, sizeof(*sr));
1002a62ec905SIgor Romanov 	if (sr == NULL) {
1003a62ec905SIgor Romanov 		ret = -ENOMEM;
1004a62ec905SIgor Romanov 		goto fail_alloc_sr;
1005a62ec905SIgor Romanov 	}
1006a62ec905SIgor Romanov 
1007a62ec905SIgor Romanov 	sfc_repr_lock_init(sr);
1008a62ec905SIgor Romanov 	sfc_repr_lock(sr);
1009a62ec905SIgor Romanov 
1010a62ec905SIgor Romanov 	dev->process_private = sr;
1011a62ec905SIgor Romanov 
1012a62ec905SIgor Romanov 	srs->pf_port_id = repr_data->pf_port_id;
101396a46eadSViacheslav Galaktionov 	srs->repr_id = srs->switch_port_id;
1014a62ec905SIgor Romanov 	srs->switch_domain_id = repr_data->switch_domain_id;
1015a62ec905SIgor Romanov 
1016a62ec905SIgor Romanov 	dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
1017a62ec905SIgor Romanov 	dev->data->representor_id = srs->repr_id;
1018a62ec905SIgor Romanov 	dev->data->backer_port_id = srs->pf_port_id;
1019a62ec905SIgor Romanov 
1020a62ec905SIgor Romanov 	dev->data->mac_addrs = rte_zmalloc("sfcr", RTE_ETHER_ADDR_LEN, 0);
1021a62ec905SIgor Romanov 	if (dev->data->mac_addrs == NULL) {
1022a62ec905SIgor Romanov 		ret = -ENOMEM;
1023a62ec905SIgor Romanov 		goto fail_mac_addrs;
1024a62ec905SIgor Romanov 	}
1025a62ec905SIgor Romanov 
1026ae9aafe4SIvan Malov 	rte_eth_random_addr(dev->data->mac_addrs[0].addr_bytes);
1027ae9aafe4SIvan Malov 
1028ae9aafe4SIvan Malov 	ret = sfc_repr_proxy_repr_entity_mac_addr_set(repr_data->pf_port_id,
1029ae9aafe4SIvan Malov 						      srs->repr_id,
1030ae9aafe4SIvan Malov 						      &dev->data->mac_addrs[0]);
1031ae9aafe4SIvan Malov 	if (ret != 0) {
1032ae9aafe4SIvan Malov 		ret = -ret;
1033ae9aafe4SIvan Malov 		goto fail_mac_addr_set;
1034ae9aafe4SIvan Malov 	}
1035ae9aafe4SIvan Malov 
1036a3fbef19SIgor Romanov 	dev->rx_pkt_burst = sfc_repr_rx_burst;
1037788dedcfSIgor Romanov 	dev->tx_pkt_burst = sfc_repr_tx_burst;
1038a62ec905SIgor Romanov 	dev->dev_ops = &sfc_repr_dev_ops;
1039a62ec905SIgor Romanov 
1040a62ec905SIgor Romanov 	sr->state = SFC_ETHDEV_INITIALIZED;
1041a62ec905SIgor Romanov 	sfc_repr_unlock(sr);
1042a62ec905SIgor Romanov 
1043a62ec905SIgor Romanov 	return 0;
1044a62ec905SIgor Romanov 
1045ae9aafe4SIvan Malov fail_mac_addr_set:
1046a62ec905SIgor Romanov fail_mac_addrs:
1047a62ec905SIgor Romanov 	sfc_repr_unlock(sr);
1048a62ec905SIgor Romanov 	free(sr);
1049a62ec905SIgor Romanov 
1050a62ec905SIgor Romanov fail_alloc_sr:
1051c377f1adSIgor Romanov 	(void)sfc_repr_proxy_del_port(repr_data->pf_port_id,
105296a46eadSViacheslav Galaktionov 				      srs->switch_port_id);
1053c377f1adSIgor Romanov 
1054c377f1adSIgor Romanov fail_create_port:
1055a62ec905SIgor Romanov fail_mae_assign_switch_port:
1056ef96f7ebSIvan Malov fail_get_selector:
1057a62ec905SIgor Romanov 	SFC_GENERIC_LOG(ERR, "%s() failed: %s", __func__, rte_strerror(-ret));
1058a62ec905SIgor Romanov 	return ret;
1059a62ec905SIgor Romanov }
1060a62ec905SIgor Romanov 
1061a62ec905SIgor Romanov int
1062768d1e44SViacheslav Galaktionov sfc_repr_create(struct rte_eth_dev *parent,
1063768d1e44SViacheslav Galaktionov 		struct sfc_repr_entity_info *entity,
1064768d1e44SViacheslav Galaktionov 		uint16_t switch_domain_id,
1065768d1e44SViacheslav Galaktionov 		const efx_mport_sel_t *mport_sel)
1066a62ec905SIgor Romanov {
1067a62ec905SIgor Romanov 	struct sfc_repr_init_data repr_data;
1068a62ec905SIgor Romanov 	char name[RTE_ETH_NAME_MAX_LEN];
1069c75d560dSViacheslav Galaktionov 	int controller;
1070a62ec905SIgor Romanov 	int ret;
1071c75d560dSViacheslav Galaktionov 	int rc;
1072472fa1b1SIgor Romanov 	struct rte_eth_dev *dev;
1073a62ec905SIgor Romanov 
1074c75d560dSViacheslav Galaktionov 	controller = -1;
1075c75d560dSViacheslav Galaktionov 	rc = sfc_mae_switch_domain_get_controller(switch_domain_id,
1076c75d560dSViacheslav Galaktionov 						  entity->intf, &controller);
1077c75d560dSViacheslav Galaktionov 	if (rc != 0) {
1078c75d560dSViacheslav Galaktionov 		SFC_GENERIC_LOG(ERR, "%s() failed to get DPDK controller for %d",
1079c75d560dSViacheslav Galaktionov 				__func__, entity->intf);
1080c75d560dSViacheslav Galaktionov 		return -rc;
1081c75d560dSViacheslav Galaktionov 	}
1082c75d560dSViacheslav Galaktionov 
1083c75d560dSViacheslav Galaktionov 	switch (entity->type) {
1084c75d560dSViacheslav Galaktionov 	case RTE_ETH_REPRESENTOR_VF:
1085c75d560dSViacheslav Galaktionov 		ret = snprintf(name, sizeof(name), "net_%s_representor_c%upf%uvf%u",
1086c75d560dSViacheslav Galaktionov 			       parent->device->name, controller, entity->pf,
1087c75d560dSViacheslav Galaktionov 			       entity->vf);
1088c75d560dSViacheslav Galaktionov 		break;
1089c75d560dSViacheslav Galaktionov 	case RTE_ETH_REPRESENTOR_PF:
1090c75d560dSViacheslav Galaktionov 		ret = snprintf(name, sizeof(name), "net_%s_representor_c%upf%u",
1091c75d560dSViacheslav Galaktionov 			       parent->device->name, controller, entity->pf);
1092c75d560dSViacheslav Galaktionov 		break;
1093c75d560dSViacheslav Galaktionov 	default:
1094c75d560dSViacheslav Galaktionov 		return -ENOTSUP;
1095c75d560dSViacheslav Galaktionov 	}
1096c75d560dSViacheslav Galaktionov 
1097c75d560dSViacheslav Galaktionov 	if (ret >= (int)sizeof(name)) {
1098a62ec905SIgor Romanov 		SFC_GENERIC_LOG(ERR, "%s() failed name too long", __func__);
1099a62ec905SIgor Romanov 		return -ENAMETOOLONG;
1100a62ec905SIgor Romanov 	}
1101a62ec905SIgor Romanov 
1102472fa1b1SIgor Romanov 	dev = rte_eth_dev_allocated(name);
1103472fa1b1SIgor Romanov 	if (dev == NULL) {
1104a62ec905SIgor Romanov 		memset(&repr_data, 0, sizeof(repr_data));
1105a62ec905SIgor Romanov 		repr_data.pf_port_id = parent->data->port_id;
1106a62ec905SIgor Romanov 		repr_data.switch_domain_id = switch_domain_id;
1107a62ec905SIgor Romanov 		repr_data.mport_sel = *mport_sel;
1108768d1e44SViacheslav Galaktionov 		repr_data.intf = entity->intf;
1109768d1e44SViacheslav Galaktionov 		repr_data.pf = entity->pf;
1110768d1e44SViacheslav Galaktionov 		repr_data.vf = entity->vf;
1111a62ec905SIgor Romanov 
1112a62ec905SIgor Romanov 		ret = rte_eth_dev_create(parent->device, name,
1113a62ec905SIgor Romanov 					 sizeof(struct sfc_repr_shared),
1114a62ec905SIgor Romanov 					 NULL, NULL,
1115a62ec905SIgor Romanov 					 sfc_repr_eth_dev_init, &repr_data);
1116472fa1b1SIgor Romanov 		if (ret != 0) {
1117472fa1b1SIgor Romanov 			SFC_GENERIC_LOG(ERR, "%s() failed to create device",
1118472fa1b1SIgor Romanov 					__func__);
1119a62ec905SIgor Romanov 			return ret;
1120a62ec905SIgor Romanov 		}
1121472fa1b1SIgor Romanov 	}
1122472fa1b1SIgor Romanov 
1123472fa1b1SIgor Romanov 	return 0;
1124472fa1b1SIgor Romanov }
1125