xref: /dpdk/drivers/net/sfc/sfc_repr.c (revision 1dfb92a529722c87b251eaaf673c48b13d1a54cc)
1a62ec905SIgor Romanov /* SPDX-License-Identifier: BSD-3-Clause
2a62ec905SIgor Romanov  *
3a62ec905SIgor Romanov  * Copyright(c) 2019-2021 Xilinx, Inc.
4a62ec905SIgor Romanov  * Copyright(c) 2019 Solarflare Communications Inc.
5a62ec905SIgor Romanov  *
6a62ec905SIgor Romanov  * This software was jointly developed between OKTET Labs (under contract
7a62ec905SIgor Romanov  * for Solarflare) and Solarflare Communications, Inc.
8a62ec905SIgor Romanov  */
9a62ec905SIgor Romanov 
10a62ec905SIgor Romanov #include <stdint.h>
11a62ec905SIgor Romanov 
1275f080fdSIgor Romanov #include <rte_mbuf.h>
13a62ec905SIgor Romanov #include <rte_ethdev.h>
14a62ec905SIgor Romanov #include <rte_malloc.h>
15a62ec905SIgor Romanov #include <ethdev_driver.h>
16a62ec905SIgor Romanov 
17a62ec905SIgor Romanov #include "efx.h"
18a62ec905SIgor Romanov 
19a62ec905SIgor Romanov #include "sfc_log.h"
20a62ec905SIgor Romanov #include "sfc_debug.h"
21a62ec905SIgor Romanov #include "sfc_repr.h"
22a62ec905SIgor Romanov #include "sfc_ethdev_state.h"
23c377f1adSIgor Romanov #include "sfc_repr_proxy_api.h"
24a62ec905SIgor Romanov #include "sfc_switch.h"
2575f080fdSIgor Romanov #include "sfc_dp_tx.h"
26a62ec905SIgor Romanov 
27a62ec905SIgor Romanov /** Multi-process shared representor private data */
28a62ec905SIgor Romanov struct sfc_repr_shared {
29a62ec905SIgor Romanov 	uint16_t		pf_port_id;
30a62ec905SIgor Romanov 	uint16_t		repr_id;
31a62ec905SIgor Romanov 	uint16_t		switch_domain_id;
32a62ec905SIgor Romanov 	uint16_t		switch_port_id;
33a62ec905SIgor Romanov };
34a62ec905SIgor Romanov 
35155583abSIgor Romanov struct sfc_repr_rxq {
36155583abSIgor Romanov 	/* Datapath members */
37155583abSIgor Romanov 	struct rte_ring			*ring;
38155583abSIgor Romanov };
39155583abSIgor Romanov 
40155583abSIgor Romanov struct sfc_repr_txq {
41155583abSIgor Romanov 	/* Datapath members */
42155583abSIgor Romanov 	struct rte_ring			*ring;
43155583abSIgor Romanov 	efx_mport_id_t			egress_mport;
44155583abSIgor Romanov };
45155583abSIgor Romanov 
46a62ec905SIgor Romanov /** Primary process representor private data */
47a62ec905SIgor Romanov struct sfc_repr {
48a62ec905SIgor Romanov 	/**
49a62ec905SIgor Romanov 	 * PMD setup and configuration is not thread safe. Since it is not
50a62ec905SIgor Romanov 	 * performance sensitive, it is better to guarantee thread-safety
51a62ec905SIgor Romanov 	 * and add device level lock. Adapter control operations which
52a62ec905SIgor Romanov 	 * change its state should acquire the lock.
53a62ec905SIgor Romanov 	 */
54a62ec905SIgor Romanov 	rte_spinlock_t			lock;
55a62ec905SIgor Romanov 	enum sfc_ethdev_state		state;
56a62ec905SIgor Romanov };
57a62ec905SIgor Romanov 
58a62ec905SIgor Romanov #define sfcr_err(sr, ...) \
59a62ec905SIgor Romanov 	do {								\
60a62ec905SIgor Romanov 		const struct sfc_repr *_sr = (sr);			\
61a62ec905SIgor Romanov 									\
62a62ec905SIgor Romanov 		(void)_sr;						\
63a62ec905SIgor Romanov 		SFC_GENERIC_LOG(ERR, __VA_ARGS__);			\
64a62ec905SIgor Romanov 	} while (0)
65a62ec905SIgor Romanov 
66155583abSIgor Romanov #define sfcr_warn(sr, ...) \
67155583abSIgor Romanov 	do {								\
68155583abSIgor Romanov 		const struct sfc_repr *_sr = (sr);			\
69155583abSIgor Romanov 									\
70155583abSIgor Romanov 		(void)_sr;						\
71155583abSIgor Romanov 		SFC_GENERIC_LOG(WARNING, __VA_ARGS__);			\
72155583abSIgor Romanov 	} while (0)
73155583abSIgor Romanov 
74a62ec905SIgor Romanov #define sfcr_info(sr, ...) \
75a62ec905SIgor Romanov 	do {								\
76a62ec905SIgor Romanov 		const struct sfc_repr *_sr = (sr);			\
77a62ec905SIgor Romanov 									\
78a62ec905SIgor Romanov 		(void)_sr;						\
79a62ec905SIgor Romanov 		SFC_GENERIC_LOG(INFO,					\
80a62ec905SIgor Romanov 				RTE_FMT("%s() "				\
81a62ec905SIgor Romanov 				RTE_FMT_HEAD(__VA_ARGS__ ,),		\
82a62ec905SIgor Romanov 				__func__,				\
83a62ec905SIgor Romanov 				RTE_FMT_TAIL(__VA_ARGS__ ,)));		\
84a62ec905SIgor Romanov 	} while (0)
85a62ec905SIgor Romanov 
86a62ec905SIgor Romanov static inline struct sfc_repr_shared *
87a62ec905SIgor Romanov sfc_repr_shared_by_eth_dev(struct rte_eth_dev *eth_dev)
88a62ec905SIgor Romanov {
89a62ec905SIgor Romanov 	struct sfc_repr_shared *srs = eth_dev->data->dev_private;
90a62ec905SIgor Romanov 
91a62ec905SIgor Romanov 	return srs;
92a62ec905SIgor Romanov }
93a62ec905SIgor Romanov 
94a62ec905SIgor Romanov static inline struct sfc_repr *
95a62ec905SIgor Romanov sfc_repr_by_eth_dev(struct rte_eth_dev *eth_dev)
96a62ec905SIgor Romanov {
97a62ec905SIgor Romanov 	struct sfc_repr *sr = eth_dev->process_private;
98a62ec905SIgor Romanov 
99a62ec905SIgor Romanov 	return sr;
100a62ec905SIgor Romanov }
101a62ec905SIgor Romanov 
102a62ec905SIgor Romanov /*
103a62ec905SIgor Romanov  * Add wrapper functions to acquire/release lock to be able to remove or
104a62ec905SIgor Romanov  * change the lock in one place.
105a62ec905SIgor Romanov  */
106a62ec905SIgor Romanov 
107a62ec905SIgor Romanov static inline void
108a62ec905SIgor Romanov sfc_repr_lock_init(struct sfc_repr *sr)
109a62ec905SIgor Romanov {
110a62ec905SIgor Romanov 	rte_spinlock_init(&sr->lock);
111a62ec905SIgor Romanov }
112a62ec905SIgor Romanov 
113a62ec905SIgor Romanov #if defined(RTE_LIBRTE_SFC_EFX_DEBUG) || defined(RTE_ENABLE_ASSERT)
114a62ec905SIgor Romanov 
115a62ec905SIgor Romanov static inline int
116a62ec905SIgor Romanov sfc_repr_lock_is_locked(struct sfc_repr *sr)
117a62ec905SIgor Romanov {
118a62ec905SIgor Romanov 	return rte_spinlock_is_locked(&sr->lock);
119a62ec905SIgor Romanov }
120a62ec905SIgor Romanov 
121a62ec905SIgor Romanov #endif
122a62ec905SIgor Romanov 
123a62ec905SIgor Romanov static inline void
124a62ec905SIgor Romanov sfc_repr_lock(struct sfc_repr *sr)
125a62ec905SIgor Romanov {
126a62ec905SIgor Romanov 	rte_spinlock_lock(&sr->lock);
127a62ec905SIgor Romanov }
128a62ec905SIgor Romanov 
129a62ec905SIgor Romanov static inline void
130a62ec905SIgor Romanov sfc_repr_unlock(struct sfc_repr *sr)
131a62ec905SIgor Romanov {
132a62ec905SIgor Romanov 	rte_spinlock_unlock(&sr->lock);
133a62ec905SIgor Romanov }
134a62ec905SIgor Romanov 
135a62ec905SIgor Romanov static inline void
136a62ec905SIgor Romanov sfc_repr_lock_fini(__rte_unused struct sfc_repr *sr)
137a62ec905SIgor Romanov {
138a62ec905SIgor Romanov 	/* Just for symmetry of the API */
139a62ec905SIgor Romanov }
140a62ec905SIgor Romanov 
14175f080fdSIgor Romanov static void
14275f080fdSIgor Romanov sfc_repr_rx_queue_stop(void *queue)
14375f080fdSIgor Romanov {
14475f080fdSIgor Romanov 	struct sfc_repr_rxq *rxq = queue;
14575f080fdSIgor Romanov 
14675f080fdSIgor Romanov 	if (rxq == NULL)
14775f080fdSIgor Romanov 		return;
14875f080fdSIgor Romanov 
14975f080fdSIgor Romanov 	rte_ring_reset(rxq->ring);
15075f080fdSIgor Romanov }
15175f080fdSIgor Romanov 
15275f080fdSIgor Romanov static void
15375f080fdSIgor Romanov sfc_repr_tx_queue_stop(void *queue)
15475f080fdSIgor Romanov {
15575f080fdSIgor Romanov 	struct sfc_repr_txq *txq = queue;
15675f080fdSIgor Romanov 
15775f080fdSIgor Romanov 	if (txq == NULL)
15875f080fdSIgor Romanov 		return;
15975f080fdSIgor Romanov 
16075f080fdSIgor Romanov 	rte_ring_reset(txq->ring);
16175f080fdSIgor Romanov }
16275f080fdSIgor Romanov 
16375f080fdSIgor Romanov static int
16475f080fdSIgor Romanov sfc_repr_start(struct rte_eth_dev *dev)
16575f080fdSIgor Romanov {
16675f080fdSIgor Romanov 	struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
16775f080fdSIgor Romanov 	struct sfc_repr_shared *srs;
16875f080fdSIgor Romanov 	int ret;
16975f080fdSIgor Romanov 
17075f080fdSIgor Romanov 	sfcr_info(sr, "entry");
17175f080fdSIgor Romanov 
17275f080fdSIgor Romanov 	SFC_ASSERT(sfc_repr_lock_is_locked(sr));
17375f080fdSIgor Romanov 
17475f080fdSIgor Romanov 	switch (sr->state) {
17575f080fdSIgor Romanov 	case SFC_ETHDEV_CONFIGURED:
17675f080fdSIgor Romanov 		break;
17775f080fdSIgor Romanov 	case SFC_ETHDEV_STARTED:
17875f080fdSIgor Romanov 		sfcr_info(sr, "already started");
17975f080fdSIgor Romanov 		return 0;
18075f080fdSIgor Romanov 	default:
18175f080fdSIgor Romanov 		ret = -EINVAL;
18275f080fdSIgor Romanov 		goto fail_bad_state;
18375f080fdSIgor Romanov 	}
18475f080fdSIgor Romanov 
18575f080fdSIgor Romanov 	sr->state = SFC_ETHDEV_STARTING;
18675f080fdSIgor Romanov 
18775f080fdSIgor Romanov 	srs = sfc_repr_shared_by_eth_dev(dev);
18875f080fdSIgor Romanov 	ret = sfc_repr_proxy_start_repr(srs->pf_port_id, srs->repr_id);
18975f080fdSIgor Romanov 	if (ret != 0) {
19075f080fdSIgor Romanov 		SFC_ASSERT(ret > 0);
19175f080fdSIgor Romanov 		ret = -ret;
19275f080fdSIgor Romanov 		goto fail_start;
19375f080fdSIgor Romanov 	}
19475f080fdSIgor Romanov 
19575f080fdSIgor Romanov 	sr->state = SFC_ETHDEV_STARTED;
19675f080fdSIgor Romanov 
19775f080fdSIgor Romanov 	sfcr_info(sr, "done");
19875f080fdSIgor Romanov 
19975f080fdSIgor Romanov 	return 0;
20075f080fdSIgor Romanov 
20175f080fdSIgor Romanov fail_start:
20275f080fdSIgor Romanov 	sr->state = SFC_ETHDEV_CONFIGURED;
20375f080fdSIgor Romanov 
20475f080fdSIgor Romanov fail_bad_state:
20575f080fdSIgor Romanov 	sfcr_err(sr, "%s() failed: %s", __func__, rte_strerror(-ret));
20675f080fdSIgor Romanov 	return ret;
20775f080fdSIgor Romanov }
20875f080fdSIgor Romanov 
20975f080fdSIgor Romanov static int
21075f080fdSIgor Romanov sfc_repr_dev_start(struct rte_eth_dev *dev)
21175f080fdSIgor Romanov {
21275f080fdSIgor Romanov 	struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
21375f080fdSIgor Romanov 	int ret;
21475f080fdSIgor Romanov 
21575f080fdSIgor Romanov 	sfcr_info(sr, "entry");
21675f080fdSIgor Romanov 
21775f080fdSIgor Romanov 	sfc_repr_lock(sr);
21875f080fdSIgor Romanov 	ret = sfc_repr_start(dev);
21975f080fdSIgor Romanov 	sfc_repr_unlock(sr);
22075f080fdSIgor Romanov 
22175f080fdSIgor Romanov 	if (ret != 0)
22275f080fdSIgor Romanov 		goto fail_start;
22375f080fdSIgor Romanov 
22475f080fdSIgor Romanov 	sfcr_info(sr, "done");
22575f080fdSIgor Romanov 
22675f080fdSIgor Romanov 	return 0;
22775f080fdSIgor Romanov 
22875f080fdSIgor Romanov fail_start:
22975f080fdSIgor Romanov 	sfcr_err(sr, "%s() failed: %s", __func__, rte_strerror(-ret));
23075f080fdSIgor Romanov 	return ret;
23175f080fdSIgor Romanov }
23275f080fdSIgor Romanov 
23375f080fdSIgor Romanov static int
23475f080fdSIgor Romanov sfc_repr_stop(struct rte_eth_dev *dev)
23575f080fdSIgor Romanov {
23675f080fdSIgor Romanov 	struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
23775f080fdSIgor Romanov 	struct sfc_repr_shared *srs;
23875f080fdSIgor Romanov 	unsigned int i;
23975f080fdSIgor Romanov 	int ret;
24075f080fdSIgor Romanov 
24175f080fdSIgor Romanov 	sfcr_info(sr, "entry");
24275f080fdSIgor Romanov 
24375f080fdSIgor Romanov 	SFC_ASSERT(sfc_repr_lock_is_locked(sr));
24475f080fdSIgor Romanov 
24575f080fdSIgor Romanov 	switch (sr->state) {
24675f080fdSIgor Romanov 	case SFC_ETHDEV_STARTED:
24775f080fdSIgor Romanov 		break;
24875f080fdSIgor Romanov 	case SFC_ETHDEV_CONFIGURED:
24975f080fdSIgor Romanov 		sfcr_info(sr, "already stopped");
25075f080fdSIgor Romanov 		return 0;
25175f080fdSIgor Romanov 	default:
25275f080fdSIgor Romanov 		sfcr_err(sr, "stop in unexpected state %u", sr->state);
25375f080fdSIgor Romanov 		SFC_ASSERT(B_FALSE);
25475f080fdSIgor Romanov 		ret = -EINVAL;
25575f080fdSIgor Romanov 		goto fail_bad_state;
25675f080fdSIgor Romanov 	}
25775f080fdSIgor Romanov 
25875f080fdSIgor Romanov 	srs = sfc_repr_shared_by_eth_dev(dev);
25975f080fdSIgor Romanov 	ret = sfc_repr_proxy_stop_repr(srs->pf_port_id, srs->repr_id);
26075f080fdSIgor Romanov 	if (ret != 0) {
26175f080fdSIgor Romanov 		SFC_ASSERT(ret > 0);
26275f080fdSIgor Romanov 		ret = -ret;
26375f080fdSIgor Romanov 		goto fail_stop;
26475f080fdSIgor Romanov 	}
26575f080fdSIgor Romanov 
26675f080fdSIgor Romanov 	for (i = 0; i < dev->data->nb_rx_queues; i++)
26775f080fdSIgor Romanov 		sfc_repr_rx_queue_stop(dev->data->rx_queues[i]);
26875f080fdSIgor Romanov 
26975f080fdSIgor Romanov 	for (i = 0; i < dev->data->nb_tx_queues; i++)
27075f080fdSIgor Romanov 		sfc_repr_tx_queue_stop(dev->data->tx_queues[i]);
27175f080fdSIgor Romanov 
27275f080fdSIgor Romanov 	sr->state = SFC_ETHDEV_CONFIGURED;
27375f080fdSIgor Romanov 	sfcr_info(sr, "done");
27475f080fdSIgor Romanov 
27575f080fdSIgor Romanov 	return 0;
27675f080fdSIgor Romanov 
27775f080fdSIgor Romanov fail_bad_state:
27875f080fdSIgor Romanov fail_stop:
27975f080fdSIgor Romanov 	sfcr_err(sr, "%s() failed: %s", __func__, rte_strerror(-ret));
28075f080fdSIgor Romanov 
28175f080fdSIgor Romanov 	return ret;
28275f080fdSIgor Romanov }
28375f080fdSIgor Romanov 
28475f080fdSIgor Romanov static int
28575f080fdSIgor Romanov sfc_repr_dev_stop(struct rte_eth_dev *dev)
28675f080fdSIgor Romanov {
28775f080fdSIgor Romanov 	struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
28875f080fdSIgor Romanov 	int ret;
28975f080fdSIgor Romanov 
29075f080fdSIgor Romanov 	sfcr_info(sr, "entry");
29175f080fdSIgor Romanov 
29275f080fdSIgor Romanov 	sfc_repr_lock(sr);
29375f080fdSIgor Romanov 
29475f080fdSIgor Romanov 	ret = sfc_repr_stop(dev);
29575f080fdSIgor Romanov 	if (ret != 0) {
29675f080fdSIgor Romanov 		sfcr_err(sr, "%s() failed to stop representor", __func__);
29775f080fdSIgor Romanov 		goto fail_stop;
29875f080fdSIgor Romanov 	}
29975f080fdSIgor Romanov 
30075f080fdSIgor Romanov 	sfc_repr_unlock(sr);
30175f080fdSIgor Romanov 
30275f080fdSIgor Romanov 	sfcr_info(sr, "done");
30375f080fdSIgor Romanov 
30475f080fdSIgor Romanov 	return 0;
30575f080fdSIgor Romanov 
30675f080fdSIgor Romanov fail_stop:
30775f080fdSIgor Romanov 	sfc_repr_unlock(sr);
30875f080fdSIgor Romanov 
30975f080fdSIgor Romanov 	sfcr_err(sr, "%s() failed %s", __func__, rte_strerror(-ret));
31075f080fdSIgor Romanov 
31175f080fdSIgor Romanov 	return ret;
31275f080fdSIgor Romanov }
31375f080fdSIgor Romanov 
314a62ec905SIgor Romanov static int
315a62ec905SIgor Romanov sfc_repr_check_conf(struct sfc_repr *sr, uint16_t nb_rx_queues,
316a62ec905SIgor Romanov 		    const struct rte_eth_conf *conf)
317a62ec905SIgor Romanov {
318a62ec905SIgor Romanov 	const struct rte_eth_rss_conf *rss_conf;
319a62ec905SIgor Romanov 	int ret = 0;
320a62ec905SIgor Romanov 
321a62ec905SIgor Romanov 	sfcr_info(sr, "entry");
322a62ec905SIgor Romanov 
323a62ec905SIgor Romanov 	if (conf->link_speeds != 0) {
324a62ec905SIgor Romanov 		sfcr_err(sr, "specific link speeds not supported");
325a62ec905SIgor Romanov 		ret = -EINVAL;
326a62ec905SIgor Romanov 	}
327a62ec905SIgor Romanov 
328a62ec905SIgor Romanov 	switch (conf->rxmode.mq_mode) {
329a62ec905SIgor Romanov 	case ETH_MQ_RX_RSS:
330a62ec905SIgor Romanov 		if (nb_rx_queues != 1) {
331a62ec905SIgor Romanov 			sfcr_err(sr, "Rx RSS is not supported with %u queues",
332a62ec905SIgor Romanov 				 nb_rx_queues);
333a62ec905SIgor Romanov 			ret = -EINVAL;
334a62ec905SIgor Romanov 			break;
335a62ec905SIgor Romanov 		}
336a62ec905SIgor Romanov 
337a62ec905SIgor Romanov 		rss_conf = &conf->rx_adv_conf.rss_conf;
338a62ec905SIgor Romanov 		if (rss_conf->rss_key != NULL || rss_conf->rss_key_len != 0 ||
339a62ec905SIgor Romanov 		    rss_conf->rss_hf != 0) {
340a62ec905SIgor Romanov 			sfcr_err(sr, "Rx RSS configuration is not supported");
341a62ec905SIgor Romanov 			ret = -EINVAL;
342a62ec905SIgor Romanov 		}
343a62ec905SIgor Romanov 		break;
344a62ec905SIgor Romanov 	case ETH_MQ_RX_NONE:
345a62ec905SIgor Romanov 		break;
346a62ec905SIgor Romanov 	default:
347a62ec905SIgor Romanov 		sfcr_err(sr, "Rx mode MQ modes other than RSS not supported");
348a62ec905SIgor Romanov 		ret = -EINVAL;
349a62ec905SIgor Romanov 		break;
350a62ec905SIgor Romanov 	}
351a62ec905SIgor Romanov 
352a62ec905SIgor Romanov 	if (conf->txmode.mq_mode != ETH_MQ_TX_NONE) {
353a62ec905SIgor Romanov 		sfcr_err(sr, "Tx mode MQ modes not supported");
354a62ec905SIgor Romanov 		ret = -EINVAL;
355a62ec905SIgor Romanov 	}
356a62ec905SIgor Romanov 
357a62ec905SIgor Romanov 	if (conf->lpbk_mode != 0) {
358a62ec905SIgor Romanov 		sfcr_err(sr, "loopback not supported");
359a62ec905SIgor Romanov 		ret = -EINVAL;
360a62ec905SIgor Romanov 	}
361a62ec905SIgor Romanov 
362a62ec905SIgor Romanov 	if (conf->dcb_capability_en != 0) {
363a62ec905SIgor Romanov 		sfcr_err(sr, "priority-based flow control not supported");
364a62ec905SIgor Romanov 		ret = -EINVAL;
365a62ec905SIgor Romanov 	}
366a62ec905SIgor Romanov 
367a62ec905SIgor Romanov 	if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
368a62ec905SIgor Romanov 		sfcr_err(sr, "Flow Director not supported");
369a62ec905SIgor Romanov 		ret = -EINVAL;
370a62ec905SIgor Romanov 	}
371a62ec905SIgor Romanov 
372a62ec905SIgor Romanov 	if (conf->intr_conf.lsc != 0) {
373a62ec905SIgor Romanov 		sfcr_err(sr, "link status change interrupt not supported");
374a62ec905SIgor Romanov 		ret = -EINVAL;
375a62ec905SIgor Romanov 	}
376a62ec905SIgor Romanov 
377a62ec905SIgor Romanov 	if (conf->intr_conf.rxq != 0) {
378a62ec905SIgor Romanov 		sfcr_err(sr, "receive queue interrupt not supported");
379a62ec905SIgor Romanov 		ret = -EINVAL;
380a62ec905SIgor Romanov 	}
381a62ec905SIgor Romanov 
382a62ec905SIgor Romanov 	if (conf->intr_conf.rmv != 0) {
383a62ec905SIgor Romanov 		sfcr_err(sr, "remove interrupt not supported");
384a62ec905SIgor Romanov 		ret = -EINVAL;
385a62ec905SIgor Romanov 	}
386a62ec905SIgor Romanov 
387a62ec905SIgor Romanov 	sfcr_info(sr, "done %d", ret);
388a62ec905SIgor Romanov 
389a62ec905SIgor Romanov 	return ret;
390a62ec905SIgor Romanov }
391a62ec905SIgor Romanov 
392a62ec905SIgor Romanov 
393a62ec905SIgor Romanov static int
394a62ec905SIgor Romanov sfc_repr_configure(struct sfc_repr *sr, uint16_t nb_rx_queues,
395a62ec905SIgor Romanov 		   const struct rte_eth_conf *conf)
396a62ec905SIgor Romanov {
397a62ec905SIgor Romanov 	int ret;
398a62ec905SIgor Romanov 
399a62ec905SIgor Romanov 	sfcr_info(sr, "entry");
400a62ec905SIgor Romanov 
401a62ec905SIgor Romanov 	SFC_ASSERT(sfc_repr_lock_is_locked(sr));
402a62ec905SIgor Romanov 
403a62ec905SIgor Romanov 	ret = sfc_repr_check_conf(sr, nb_rx_queues, conf);
404a62ec905SIgor Romanov 	if (ret != 0)
405a62ec905SIgor Romanov 		goto fail_check_conf;
406a62ec905SIgor Romanov 
407a62ec905SIgor Romanov 	sr->state = SFC_ETHDEV_CONFIGURED;
408a62ec905SIgor Romanov 
409a62ec905SIgor Romanov 	sfcr_info(sr, "done");
410a62ec905SIgor Romanov 
411a62ec905SIgor Romanov 	return 0;
412a62ec905SIgor Romanov 
413a62ec905SIgor Romanov fail_check_conf:
414a62ec905SIgor Romanov 	sfcr_info(sr, "failed %s", rte_strerror(-ret));
415a62ec905SIgor Romanov 	return ret;
416a62ec905SIgor Romanov }
417a62ec905SIgor Romanov 
418a62ec905SIgor Romanov static int
419a62ec905SIgor Romanov sfc_repr_dev_configure(struct rte_eth_dev *dev)
420a62ec905SIgor Romanov {
421a62ec905SIgor Romanov 	struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
422a62ec905SIgor Romanov 	struct rte_eth_dev_data *dev_data = dev->data;
423a62ec905SIgor Romanov 	int ret;
424a62ec905SIgor Romanov 
425a62ec905SIgor Romanov 	sfcr_info(sr, "entry n_rxq=%u n_txq=%u",
426a62ec905SIgor Romanov 		  dev_data->nb_rx_queues, dev_data->nb_tx_queues);
427a62ec905SIgor Romanov 
428a62ec905SIgor Romanov 	sfc_repr_lock(sr);
429a62ec905SIgor Romanov 	switch (sr->state) {
430a62ec905SIgor Romanov 	case SFC_ETHDEV_CONFIGURED:
431a62ec905SIgor Romanov 		/* FALLTHROUGH */
432a62ec905SIgor Romanov 	case SFC_ETHDEV_INITIALIZED:
433a62ec905SIgor Romanov 		ret = sfc_repr_configure(sr, dev_data->nb_rx_queues,
434a62ec905SIgor Romanov 					 &dev_data->dev_conf);
435a62ec905SIgor Romanov 		break;
436a62ec905SIgor Romanov 	default:
437a62ec905SIgor Romanov 		sfcr_err(sr, "unexpected adapter state %u to configure",
438a62ec905SIgor Romanov 			 sr->state);
439a62ec905SIgor Romanov 		ret = -EINVAL;
440a62ec905SIgor Romanov 		break;
441a62ec905SIgor Romanov 	}
442a62ec905SIgor Romanov 	sfc_repr_unlock(sr);
443a62ec905SIgor Romanov 
444a62ec905SIgor Romanov 	sfcr_info(sr, "done %s", rte_strerror(-ret));
445a62ec905SIgor Romanov 
446a62ec905SIgor Romanov 	return ret;
447a62ec905SIgor Romanov }
448a62ec905SIgor Romanov 
449a62ec905SIgor Romanov static int
450a62ec905SIgor Romanov sfc_repr_dev_infos_get(struct rte_eth_dev *dev,
451a62ec905SIgor Romanov 		       struct rte_eth_dev_info *dev_info)
452a62ec905SIgor Romanov {
453a62ec905SIgor Romanov 	struct sfc_repr_shared *srs = sfc_repr_shared_by_eth_dev(dev);
454a62ec905SIgor Romanov 
455a62ec905SIgor Romanov 	dev_info->device = dev->device;
456a62ec905SIgor Romanov 
457a62ec905SIgor Romanov 	dev_info->max_rx_queues = SFC_REPR_RXQ_MAX;
458a62ec905SIgor Romanov 	dev_info->max_tx_queues = SFC_REPR_TXQ_MAX;
459a62ec905SIgor Romanov 	dev_info->default_rxconf.rx_drop_en = 1;
460a62ec905SIgor Romanov 	dev_info->switch_info.domain_id = srs->switch_domain_id;
461a62ec905SIgor Romanov 	dev_info->switch_info.port_id = srs->switch_port_id;
462a62ec905SIgor Romanov 
463a62ec905SIgor Romanov 	return 0;
464a62ec905SIgor Romanov }
465a62ec905SIgor Romanov 
466155583abSIgor Romanov static int
467*1dfb92a5SIgor Romanov sfc_repr_dev_link_update(struct rte_eth_dev *dev,
468*1dfb92a5SIgor Romanov 			 __rte_unused int wait_to_complete)
469*1dfb92a5SIgor Romanov {
470*1dfb92a5SIgor Romanov 	struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
471*1dfb92a5SIgor Romanov 	struct rte_eth_link link;
472*1dfb92a5SIgor Romanov 
473*1dfb92a5SIgor Romanov 	if (sr->state != SFC_ETHDEV_STARTED) {
474*1dfb92a5SIgor Romanov 		sfc_port_link_mode_to_info(EFX_LINK_UNKNOWN, &link);
475*1dfb92a5SIgor Romanov 	} else {
476*1dfb92a5SIgor Romanov 		memset(&link, 0, sizeof(link));
477*1dfb92a5SIgor Romanov 		link.link_status = ETH_LINK_UP;
478*1dfb92a5SIgor Romanov 		link.link_speed = ETH_SPEED_NUM_UNKNOWN;
479*1dfb92a5SIgor Romanov 	}
480*1dfb92a5SIgor Romanov 
481*1dfb92a5SIgor Romanov 	return rte_eth_linkstatus_set(dev, &link);
482*1dfb92a5SIgor Romanov }
483*1dfb92a5SIgor Romanov 
484*1dfb92a5SIgor Romanov static int
485155583abSIgor Romanov sfc_repr_ring_create(uint16_t pf_port_id, uint16_t repr_id,
486155583abSIgor Romanov 		     const char *type_name, uint16_t qid, uint16_t nb_desc,
487155583abSIgor Romanov 		     unsigned int socket_id, struct rte_ring **ring)
488155583abSIgor Romanov {
489155583abSIgor Romanov 	char ring_name[RTE_RING_NAMESIZE];
490155583abSIgor Romanov 	int ret;
491155583abSIgor Romanov 
492155583abSIgor Romanov 	ret = snprintf(ring_name, sizeof(ring_name), "sfc_%u_repr_%u_%sq%u",
493155583abSIgor Romanov 		       pf_port_id, repr_id, type_name, qid);
494155583abSIgor Romanov 	if (ret >= (int)sizeof(ring_name))
495155583abSIgor Romanov 		return -ENAMETOOLONG;
496155583abSIgor Romanov 
497155583abSIgor Romanov 	/*
498155583abSIgor Romanov 	 * Single producer/consumer rings are used since the API for Tx/Rx
499155583abSIgor Romanov 	 * packet burst for representors are guaranteed to be called from
500155583abSIgor Romanov 	 * a single thread, and the user of the other end (representor proxy)
501155583abSIgor Romanov 	 * is also single-threaded.
502155583abSIgor Romanov 	 */
503155583abSIgor Romanov 	*ring = rte_ring_create(ring_name, nb_desc, socket_id,
504155583abSIgor Romanov 			       RING_F_SP_ENQ | RING_F_SC_DEQ);
505155583abSIgor Romanov 	if (*ring == NULL)
506155583abSIgor Romanov 		return -rte_errno;
507155583abSIgor Romanov 
508155583abSIgor Romanov 	return 0;
509155583abSIgor Romanov }
510155583abSIgor Romanov 
511155583abSIgor Romanov static int
512155583abSIgor Romanov sfc_repr_rx_qcheck_conf(struct sfc_repr *sr,
513155583abSIgor Romanov 			const struct rte_eth_rxconf *rx_conf)
514155583abSIgor Romanov {
515155583abSIgor Romanov 	int ret = 0;
516155583abSIgor Romanov 
517155583abSIgor Romanov 	sfcr_info(sr, "entry");
518155583abSIgor Romanov 
519155583abSIgor Romanov 	if (rx_conf->rx_thresh.pthresh != 0 ||
520155583abSIgor Romanov 	    rx_conf->rx_thresh.hthresh != 0 ||
521155583abSIgor Romanov 	    rx_conf->rx_thresh.wthresh != 0) {
522155583abSIgor Romanov 		sfcr_warn(sr,
523155583abSIgor Romanov 			"RxQ prefetch/host/writeback thresholds are not supported");
524155583abSIgor Romanov 	}
525155583abSIgor Romanov 
526155583abSIgor Romanov 	if (rx_conf->rx_free_thresh != 0)
527155583abSIgor Romanov 		sfcr_warn(sr, "RxQ free threshold is not supported");
528155583abSIgor Romanov 
529155583abSIgor Romanov 	if (rx_conf->rx_drop_en == 0)
530155583abSIgor Romanov 		sfcr_warn(sr, "RxQ drop disable is not supported");
531155583abSIgor Romanov 
532155583abSIgor Romanov 	if (rx_conf->rx_deferred_start) {
533155583abSIgor Romanov 		sfcr_err(sr, "Deferred start is not supported");
534155583abSIgor Romanov 		ret = -EINVAL;
535155583abSIgor Romanov 	}
536155583abSIgor Romanov 
537155583abSIgor Romanov 	sfcr_info(sr, "done: %s", rte_strerror(-ret));
538155583abSIgor Romanov 
539155583abSIgor Romanov 	return ret;
540155583abSIgor Romanov }
541155583abSIgor Romanov 
542155583abSIgor Romanov static int
543155583abSIgor Romanov sfc_repr_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
544155583abSIgor Romanov 			uint16_t nb_rx_desc, unsigned int socket_id,
545155583abSIgor Romanov 			__rte_unused const struct rte_eth_rxconf *rx_conf,
546155583abSIgor Romanov 			struct rte_mempool *mb_pool)
547155583abSIgor Romanov {
548155583abSIgor Romanov 	struct sfc_repr_shared *srs = sfc_repr_shared_by_eth_dev(dev);
549155583abSIgor Romanov 	struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
550155583abSIgor Romanov 	struct sfc_repr_rxq *rxq;
551155583abSIgor Romanov 	int ret;
552155583abSIgor Romanov 
553155583abSIgor Romanov 	sfcr_info(sr, "entry");
554155583abSIgor Romanov 
555155583abSIgor Romanov 	ret = sfc_repr_rx_qcheck_conf(sr, rx_conf);
556155583abSIgor Romanov 	if (ret != 0)
557155583abSIgor Romanov 		goto fail_check_conf;
558155583abSIgor Romanov 
559155583abSIgor Romanov 	ret = -ENOMEM;
560155583abSIgor Romanov 	rxq = rte_zmalloc_socket("sfc-repr-rxq", sizeof(*rxq),
561155583abSIgor Romanov 				 RTE_CACHE_LINE_SIZE, socket_id);
562155583abSIgor Romanov 	if (rxq == NULL) {
563155583abSIgor Romanov 		sfcr_err(sr, "%s() failed to alloc RxQ", __func__);
564155583abSIgor Romanov 		goto fail_rxq_alloc;
565155583abSIgor Romanov 	}
566155583abSIgor Romanov 
567155583abSIgor Romanov 	ret = sfc_repr_ring_create(srs->pf_port_id, srs->repr_id,
568155583abSIgor Romanov 				   "rx", rx_queue_id, nb_rx_desc,
569155583abSIgor Romanov 				   socket_id, &rxq->ring);
570155583abSIgor Romanov 	if (ret != 0) {
571155583abSIgor Romanov 		sfcr_err(sr, "%s() failed to create ring", __func__);
572155583abSIgor Romanov 		goto fail_ring_create;
573155583abSIgor Romanov 	}
574155583abSIgor Romanov 
575155583abSIgor Romanov 	ret = sfc_repr_proxy_add_rxq(srs->pf_port_id, srs->repr_id,
576155583abSIgor Romanov 				     rx_queue_id, rxq->ring, mb_pool);
577155583abSIgor Romanov 	if (ret != 0) {
578155583abSIgor Romanov 		SFC_ASSERT(ret > 0);
579155583abSIgor Romanov 		ret = -ret;
580155583abSIgor Romanov 		sfcr_err(sr, "%s() failed to add proxy RxQ", __func__);
581155583abSIgor Romanov 		goto fail_proxy_add_rxq;
582155583abSIgor Romanov 	}
583155583abSIgor Romanov 
584155583abSIgor Romanov 	dev->data->rx_queues[rx_queue_id] = rxq;
585155583abSIgor Romanov 
586155583abSIgor Romanov 	sfcr_info(sr, "done");
587155583abSIgor Romanov 
588155583abSIgor Romanov 	return 0;
589155583abSIgor Romanov 
590155583abSIgor Romanov fail_proxy_add_rxq:
591155583abSIgor Romanov 	rte_ring_free(rxq->ring);
592155583abSIgor Romanov 
593155583abSIgor Romanov fail_ring_create:
594155583abSIgor Romanov 	rte_free(rxq);
595155583abSIgor Romanov 
596155583abSIgor Romanov fail_rxq_alloc:
597155583abSIgor Romanov fail_check_conf:
598155583abSIgor Romanov 	sfcr_err(sr, "%s() failed: %s", __func__, rte_strerror(-ret));
599155583abSIgor Romanov 	return ret;
600155583abSIgor Romanov }
601155583abSIgor Romanov 
602155583abSIgor Romanov static void
603155583abSIgor Romanov sfc_repr_rx_queue_release(struct rte_eth_dev *dev, uint16_t rx_queue_id)
604155583abSIgor Romanov {
605155583abSIgor Romanov 	struct sfc_repr_shared *srs = sfc_repr_shared_by_eth_dev(dev);
606155583abSIgor Romanov 	struct sfc_repr_rxq *rxq = dev->data->rx_queues[rx_queue_id];
607155583abSIgor Romanov 
608155583abSIgor Romanov 	sfc_repr_proxy_del_rxq(srs->pf_port_id, srs->repr_id, rx_queue_id);
609155583abSIgor Romanov 	rte_ring_free(rxq->ring);
610155583abSIgor Romanov 	rte_free(rxq);
611155583abSIgor Romanov }
612155583abSIgor Romanov 
613155583abSIgor Romanov static int
614155583abSIgor Romanov sfc_repr_tx_qcheck_conf(struct sfc_repr *sr,
615155583abSIgor Romanov 			const struct rte_eth_txconf *tx_conf)
616155583abSIgor Romanov {
617155583abSIgor Romanov 	int ret = 0;
618155583abSIgor Romanov 
619155583abSIgor Romanov 	sfcr_info(sr, "entry");
620155583abSIgor Romanov 
621155583abSIgor Romanov 	if (tx_conf->tx_rs_thresh != 0)
622155583abSIgor Romanov 		sfcr_warn(sr, "RS bit in transmit descriptor is not supported");
623155583abSIgor Romanov 
624155583abSIgor Romanov 	if (tx_conf->tx_free_thresh != 0)
625155583abSIgor Romanov 		sfcr_warn(sr, "TxQ free threshold is not supported");
626155583abSIgor Romanov 
627155583abSIgor Romanov 	if (tx_conf->tx_thresh.pthresh != 0 ||
628155583abSIgor Romanov 	    tx_conf->tx_thresh.hthresh != 0 ||
629155583abSIgor Romanov 	    tx_conf->tx_thresh.wthresh != 0) {
630155583abSIgor Romanov 		sfcr_warn(sr,
631155583abSIgor Romanov 			"prefetch/host/writeback thresholds are not supported");
632155583abSIgor Romanov 	}
633155583abSIgor Romanov 
634155583abSIgor Romanov 	if (tx_conf->tx_deferred_start) {
635155583abSIgor Romanov 		sfcr_err(sr, "Deferred start is not supported");
636155583abSIgor Romanov 		ret = -EINVAL;
637155583abSIgor Romanov 	}
638155583abSIgor Romanov 
639155583abSIgor Romanov 	sfcr_info(sr, "done: %s", rte_strerror(-ret));
640155583abSIgor Romanov 
641155583abSIgor Romanov 	return ret;
642155583abSIgor Romanov }
643155583abSIgor Romanov 
644155583abSIgor Romanov static int
645155583abSIgor Romanov sfc_repr_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
646155583abSIgor Romanov 			uint16_t nb_tx_desc, unsigned int socket_id,
647155583abSIgor Romanov 			const struct rte_eth_txconf *tx_conf)
648155583abSIgor Romanov {
649155583abSIgor Romanov 	struct sfc_repr_shared *srs = sfc_repr_shared_by_eth_dev(dev);
650155583abSIgor Romanov 	struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
651155583abSIgor Romanov 	struct sfc_repr_txq *txq;
652155583abSIgor Romanov 	int ret;
653155583abSIgor Romanov 
654155583abSIgor Romanov 	sfcr_info(sr, "entry");
655155583abSIgor Romanov 
656155583abSIgor Romanov 	ret = sfc_repr_tx_qcheck_conf(sr, tx_conf);
657155583abSIgor Romanov 	if (ret != 0)
658155583abSIgor Romanov 		goto fail_check_conf;
659155583abSIgor Romanov 
660155583abSIgor Romanov 	ret = -ENOMEM;
661155583abSIgor Romanov 	txq = rte_zmalloc_socket("sfc-repr-txq", sizeof(*txq),
662155583abSIgor Romanov 				 RTE_CACHE_LINE_SIZE, socket_id);
663155583abSIgor Romanov 	if (txq == NULL)
664155583abSIgor Romanov 		goto fail_txq_alloc;
665155583abSIgor Romanov 
666155583abSIgor Romanov 	ret = sfc_repr_ring_create(srs->pf_port_id, srs->repr_id,
667155583abSIgor Romanov 				   "tx", tx_queue_id, nb_tx_desc,
668155583abSIgor Romanov 				   socket_id, &txq->ring);
669155583abSIgor Romanov 	if (ret != 0)
670155583abSIgor Romanov 		goto fail_ring_create;
671155583abSIgor Romanov 
672155583abSIgor Romanov 	ret = sfc_repr_proxy_add_txq(srs->pf_port_id, srs->repr_id,
673155583abSIgor Romanov 				     tx_queue_id, txq->ring,
674155583abSIgor Romanov 				     &txq->egress_mport);
675155583abSIgor Romanov 	if (ret != 0)
676155583abSIgor Romanov 		goto fail_proxy_add_txq;
677155583abSIgor Romanov 
678155583abSIgor Romanov 	dev->data->tx_queues[tx_queue_id] = txq;
679155583abSIgor Romanov 
680155583abSIgor Romanov 	sfcr_info(sr, "done");
681155583abSIgor Romanov 
682155583abSIgor Romanov 	return 0;
683155583abSIgor Romanov 
684155583abSIgor Romanov fail_proxy_add_txq:
685155583abSIgor Romanov 	rte_ring_free(txq->ring);
686155583abSIgor Romanov 
687155583abSIgor Romanov fail_ring_create:
688155583abSIgor Romanov 	rte_free(txq);
689155583abSIgor Romanov 
690155583abSIgor Romanov fail_txq_alloc:
691155583abSIgor Romanov fail_check_conf:
692155583abSIgor Romanov 	sfcr_err(sr, "%s() failed: %s", __func__, rte_strerror(-ret));
693155583abSIgor Romanov 	return ret;
694155583abSIgor Romanov }
695155583abSIgor Romanov 
696155583abSIgor Romanov static void
697155583abSIgor Romanov sfc_repr_tx_queue_release(struct rte_eth_dev *dev, uint16_t tx_queue_id)
698155583abSIgor Romanov {
699155583abSIgor Romanov 	struct sfc_repr_shared *srs = sfc_repr_shared_by_eth_dev(dev);
700155583abSIgor Romanov 	struct sfc_repr_txq *txq = dev->data->tx_queues[tx_queue_id];
701155583abSIgor Romanov 
702155583abSIgor Romanov 	sfc_repr_proxy_del_txq(srs->pf_port_id, srs->repr_id, tx_queue_id);
703155583abSIgor Romanov 	rte_ring_free(txq->ring);
704155583abSIgor Romanov 	rte_free(txq);
705155583abSIgor Romanov }
706155583abSIgor Romanov 
707a62ec905SIgor Romanov static void
708a62ec905SIgor Romanov sfc_repr_close(struct sfc_repr *sr)
709a62ec905SIgor Romanov {
710a62ec905SIgor Romanov 	SFC_ASSERT(sfc_repr_lock_is_locked(sr));
711a62ec905SIgor Romanov 
712a62ec905SIgor Romanov 	SFC_ASSERT(sr->state == SFC_ETHDEV_CONFIGURED);
713a62ec905SIgor Romanov 	sr->state = SFC_ETHDEV_CLOSING;
714a62ec905SIgor Romanov 
715a62ec905SIgor Romanov 	/* Put representor close actions here */
716a62ec905SIgor Romanov 
717a62ec905SIgor Romanov 	sr->state = SFC_ETHDEV_INITIALIZED;
718a62ec905SIgor Romanov }
719a62ec905SIgor Romanov 
720a62ec905SIgor Romanov static int
721a62ec905SIgor Romanov sfc_repr_dev_close(struct rte_eth_dev *dev)
722a62ec905SIgor Romanov {
723a62ec905SIgor Romanov 	struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
724c377f1adSIgor Romanov 	struct sfc_repr_shared *srs = sfc_repr_shared_by_eth_dev(dev);
725155583abSIgor Romanov 	unsigned int i;
726a62ec905SIgor Romanov 
727a62ec905SIgor Romanov 	sfcr_info(sr, "entry");
728a62ec905SIgor Romanov 
729a62ec905SIgor Romanov 	sfc_repr_lock(sr);
730a62ec905SIgor Romanov 	switch (sr->state) {
73175f080fdSIgor Romanov 	case SFC_ETHDEV_STARTED:
73275f080fdSIgor Romanov 		sfc_repr_stop(dev);
73375f080fdSIgor Romanov 		SFC_ASSERT(sr->state == SFC_ETHDEV_CONFIGURED);
73475f080fdSIgor Romanov 		/* FALLTHROUGH */
735a62ec905SIgor Romanov 	case SFC_ETHDEV_CONFIGURED:
736a62ec905SIgor Romanov 		sfc_repr_close(sr);
737a62ec905SIgor Romanov 		SFC_ASSERT(sr->state == SFC_ETHDEV_INITIALIZED);
738a62ec905SIgor Romanov 		/* FALLTHROUGH */
739a62ec905SIgor Romanov 	case SFC_ETHDEV_INITIALIZED:
740a62ec905SIgor Romanov 		break;
741a62ec905SIgor Romanov 	default:
742a62ec905SIgor Romanov 		sfcr_err(sr, "unexpected adapter state %u on close", sr->state);
743a62ec905SIgor Romanov 		break;
744a62ec905SIgor Romanov 	}
745a62ec905SIgor Romanov 
746155583abSIgor Romanov 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
747155583abSIgor Romanov 		sfc_repr_rx_queue_release(dev, i);
748155583abSIgor Romanov 		dev->data->rx_queues[i] = NULL;
749155583abSIgor Romanov 	}
750155583abSIgor Romanov 
751155583abSIgor Romanov 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
752155583abSIgor Romanov 		sfc_repr_tx_queue_release(dev, i);
753155583abSIgor Romanov 		dev->data->tx_queues[i] = NULL;
754155583abSIgor Romanov 	}
755155583abSIgor Romanov 
756a62ec905SIgor Romanov 	/*
757a62ec905SIgor Romanov 	 * Cleanup all resources.
758a62ec905SIgor Romanov 	 * Rollback primary process sfc_repr_eth_dev_init() below.
759a62ec905SIgor Romanov 	 */
760a62ec905SIgor Romanov 
761c377f1adSIgor Romanov 	(void)sfc_repr_proxy_del_port(srs->pf_port_id, srs->repr_id);
762c377f1adSIgor Romanov 
763a62ec905SIgor Romanov 	dev->dev_ops = NULL;
764a62ec905SIgor Romanov 
765a62ec905SIgor Romanov 	sfc_repr_unlock(sr);
766a62ec905SIgor Romanov 	sfc_repr_lock_fini(sr);
767a62ec905SIgor Romanov 
768a62ec905SIgor Romanov 	sfcr_info(sr, "done");
769a62ec905SIgor Romanov 
770a62ec905SIgor Romanov 	free(sr);
771a62ec905SIgor Romanov 
772a62ec905SIgor Romanov 	return 0;
773a62ec905SIgor Romanov }
774a62ec905SIgor Romanov 
775a62ec905SIgor Romanov static const struct eth_dev_ops sfc_repr_dev_ops = {
776a62ec905SIgor Romanov 	.dev_configure			= sfc_repr_dev_configure,
77775f080fdSIgor Romanov 	.dev_start			= sfc_repr_dev_start,
77875f080fdSIgor Romanov 	.dev_stop			= sfc_repr_dev_stop,
779a62ec905SIgor Romanov 	.dev_close			= sfc_repr_dev_close,
780a62ec905SIgor Romanov 	.dev_infos_get			= sfc_repr_dev_infos_get,
781*1dfb92a5SIgor Romanov 	.link_update			= sfc_repr_dev_link_update,
782155583abSIgor Romanov 	.rx_queue_setup			= sfc_repr_rx_queue_setup,
783155583abSIgor Romanov 	.rx_queue_release		= sfc_repr_rx_queue_release,
784155583abSIgor Romanov 	.tx_queue_setup			= sfc_repr_tx_queue_setup,
785155583abSIgor Romanov 	.tx_queue_release		= sfc_repr_tx_queue_release,
786a62ec905SIgor Romanov };
787a62ec905SIgor Romanov 
788a62ec905SIgor Romanov 
789a62ec905SIgor Romanov struct sfc_repr_init_data {
790a62ec905SIgor Romanov 	uint16_t		pf_port_id;
791a62ec905SIgor Romanov 	uint16_t		repr_id;
792a62ec905SIgor Romanov 	uint16_t		switch_domain_id;
793a62ec905SIgor Romanov 	efx_mport_sel_t		mport_sel;
794a62ec905SIgor Romanov };
795a62ec905SIgor Romanov 
796a62ec905SIgor Romanov static int
797a62ec905SIgor Romanov sfc_repr_assign_mae_switch_port(uint16_t switch_domain_id,
798a62ec905SIgor Romanov 				const struct sfc_mae_switch_port_request *req,
799a62ec905SIgor Romanov 				uint16_t *switch_port_id)
800a62ec905SIgor Romanov {
801a62ec905SIgor Romanov 	int rc;
802a62ec905SIgor Romanov 
803a62ec905SIgor Romanov 	rc = sfc_mae_assign_switch_port(switch_domain_id, req, switch_port_id);
804a62ec905SIgor Romanov 
805a62ec905SIgor Romanov 	SFC_ASSERT(rc >= 0);
806a62ec905SIgor Romanov 	return -rc;
807a62ec905SIgor Romanov }
808a62ec905SIgor Romanov 
809a62ec905SIgor Romanov static int
810a62ec905SIgor Romanov sfc_repr_eth_dev_init(struct rte_eth_dev *dev, void *init_params)
811a62ec905SIgor Romanov {
812a62ec905SIgor Romanov 	const struct sfc_repr_init_data *repr_data = init_params;
813a62ec905SIgor Romanov 	struct sfc_repr_shared *srs = sfc_repr_shared_by_eth_dev(dev);
814a62ec905SIgor Romanov 	struct sfc_mae_switch_port_request switch_port_request;
815a62ec905SIgor Romanov 	efx_mport_sel_t ethdev_mport_sel;
816a62ec905SIgor Romanov 	struct sfc_repr *sr;
817a62ec905SIgor Romanov 	int ret;
818a62ec905SIgor Romanov 
819a62ec905SIgor Romanov 	/*
820a62ec905SIgor Romanov 	 * Currently there is no mport we can use for representor's
821a62ec905SIgor Romanov 	 * ethdev. Use an invalid one for now. This way representors
822a62ec905SIgor Romanov 	 * can be instantiated.
823a62ec905SIgor Romanov 	 */
824a62ec905SIgor Romanov 	efx_mae_mport_invalid(&ethdev_mport_sel);
825a62ec905SIgor Romanov 
826a62ec905SIgor Romanov 	memset(&switch_port_request, 0, sizeof(switch_port_request));
827a62ec905SIgor Romanov 	switch_port_request.type = SFC_MAE_SWITCH_PORT_REPRESENTOR;
828a62ec905SIgor Romanov 	switch_port_request.ethdev_mportp = &ethdev_mport_sel;
829a62ec905SIgor Romanov 	switch_port_request.entity_mportp = &repr_data->mport_sel;
830a62ec905SIgor Romanov 	switch_port_request.ethdev_port_id = dev->data->port_id;
831a62ec905SIgor Romanov 
832a62ec905SIgor Romanov 	ret = sfc_repr_assign_mae_switch_port(repr_data->switch_domain_id,
833a62ec905SIgor Romanov 					      &switch_port_request,
834a62ec905SIgor Romanov 					      &srs->switch_port_id);
835a62ec905SIgor Romanov 	if (ret != 0) {
836a62ec905SIgor Romanov 		SFC_GENERIC_LOG(ERR,
837a62ec905SIgor Romanov 			"%s() failed to assign MAE switch port (domain id %u)",
838a62ec905SIgor Romanov 			__func__, repr_data->switch_domain_id);
839a62ec905SIgor Romanov 		goto fail_mae_assign_switch_port;
840a62ec905SIgor Romanov 	}
841a62ec905SIgor Romanov 
842c377f1adSIgor Romanov 	ret = sfc_repr_proxy_add_port(repr_data->pf_port_id,
843c377f1adSIgor Romanov 				      repr_data->repr_id,
844c377f1adSIgor Romanov 				      dev->data->port_id,
845c377f1adSIgor Romanov 				      &repr_data->mport_sel);
846c377f1adSIgor Romanov 	if (ret != 0) {
847c377f1adSIgor Romanov 		SFC_GENERIC_LOG(ERR, "%s() failed to add repr proxy port",
848c377f1adSIgor Romanov 				__func__);
849c377f1adSIgor Romanov 		SFC_ASSERT(ret > 0);
850c377f1adSIgor Romanov 		ret = -ret;
851c377f1adSIgor Romanov 		goto fail_create_port;
852c377f1adSIgor Romanov 	}
853c377f1adSIgor Romanov 
854a62ec905SIgor Romanov 	/*
855a62ec905SIgor Romanov 	 * Allocate process private data from heap, since it should not
856a62ec905SIgor Romanov 	 * be located in shared memory allocated using rte_malloc() API.
857a62ec905SIgor Romanov 	 */
858a62ec905SIgor Romanov 	sr = calloc(1, sizeof(*sr));
859a62ec905SIgor Romanov 	if (sr == NULL) {
860a62ec905SIgor Romanov 		ret = -ENOMEM;
861a62ec905SIgor Romanov 		goto fail_alloc_sr;
862a62ec905SIgor Romanov 	}
863a62ec905SIgor Romanov 
864a62ec905SIgor Romanov 	sfc_repr_lock_init(sr);
865a62ec905SIgor Romanov 	sfc_repr_lock(sr);
866a62ec905SIgor Romanov 
867a62ec905SIgor Romanov 	dev->process_private = sr;
868a62ec905SIgor Romanov 
869a62ec905SIgor Romanov 	srs->pf_port_id = repr_data->pf_port_id;
870a62ec905SIgor Romanov 	srs->repr_id = repr_data->repr_id;
871a62ec905SIgor Romanov 	srs->switch_domain_id = repr_data->switch_domain_id;
872a62ec905SIgor Romanov 
873a62ec905SIgor Romanov 	dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
874a62ec905SIgor Romanov 	dev->data->representor_id = srs->repr_id;
875a62ec905SIgor Romanov 	dev->data->backer_port_id = srs->pf_port_id;
876a62ec905SIgor Romanov 
877a62ec905SIgor Romanov 	dev->data->mac_addrs = rte_zmalloc("sfcr", RTE_ETHER_ADDR_LEN, 0);
878a62ec905SIgor Romanov 	if (dev->data->mac_addrs == NULL) {
879a62ec905SIgor Romanov 		ret = -ENOMEM;
880a62ec905SIgor Romanov 		goto fail_mac_addrs;
881a62ec905SIgor Romanov 	}
882a62ec905SIgor Romanov 
883a62ec905SIgor Romanov 	dev->dev_ops = &sfc_repr_dev_ops;
884a62ec905SIgor Romanov 
885a62ec905SIgor Romanov 	sr->state = SFC_ETHDEV_INITIALIZED;
886a62ec905SIgor Romanov 	sfc_repr_unlock(sr);
887a62ec905SIgor Romanov 
888a62ec905SIgor Romanov 	return 0;
889a62ec905SIgor Romanov 
890a62ec905SIgor Romanov fail_mac_addrs:
891a62ec905SIgor Romanov 	sfc_repr_unlock(sr);
892a62ec905SIgor Romanov 	free(sr);
893a62ec905SIgor Romanov 
894a62ec905SIgor Romanov fail_alloc_sr:
895c377f1adSIgor Romanov 	(void)sfc_repr_proxy_del_port(repr_data->pf_port_id,
896c377f1adSIgor Romanov 				      repr_data->repr_id);
897c377f1adSIgor Romanov 
898c377f1adSIgor Romanov fail_create_port:
899a62ec905SIgor Romanov fail_mae_assign_switch_port:
900a62ec905SIgor Romanov 	SFC_GENERIC_LOG(ERR, "%s() failed: %s", __func__, rte_strerror(-ret));
901a62ec905SIgor Romanov 	return ret;
902a62ec905SIgor Romanov }
903a62ec905SIgor Romanov 
904a62ec905SIgor Romanov int
905a62ec905SIgor Romanov sfc_repr_create(struct rte_eth_dev *parent, uint16_t representor_id,
906a62ec905SIgor Romanov 		uint16_t switch_domain_id, const efx_mport_sel_t *mport_sel)
907a62ec905SIgor Romanov {
908a62ec905SIgor Romanov 	struct sfc_repr_init_data repr_data;
909a62ec905SIgor Romanov 	char name[RTE_ETH_NAME_MAX_LEN];
910a62ec905SIgor Romanov 	int ret;
911a62ec905SIgor Romanov 
912a62ec905SIgor Romanov 	if (snprintf(name, sizeof(name), "net_%s_representor_%u",
913a62ec905SIgor Romanov 		     parent->device->name, representor_id) >=
914a62ec905SIgor Romanov 			(int)sizeof(name)) {
915a62ec905SIgor Romanov 		SFC_GENERIC_LOG(ERR, "%s() failed name too long", __func__);
916a62ec905SIgor Romanov 		return -ENAMETOOLONG;
917a62ec905SIgor Romanov 	}
918a62ec905SIgor Romanov 
919a62ec905SIgor Romanov 	memset(&repr_data, 0, sizeof(repr_data));
920a62ec905SIgor Romanov 	repr_data.pf_port_id = parent->data->port_id;
921a62ec905SIgor Romanov 	repr_data.repr_id = representor_id;
922a62ec905SIgor Romanov 	repr_data.switch_domain_id = switch_domain_id;
923a62ec905SIgor Romanov 	repr_data.mport_sel = *mport_sel;
924a62ec905SIgor Romanov 
925a62ec905SIgor Romanov 	ret = rte_eth_dev_create(parent->device, name,
926a62ec905SIgor Romanov 				  sizeof(struct sfc_repr_shared),
927a62ec905SIgor Romanov 				  NULL, NULL,
928a62ec905SIgor Romanov 				  sfc_repr_eth_dev_init, &repr_data);
929a62ec905SIgor Romanov 	if (ret != 0)
930a62ec905SIgor Romanov 		SFC_GENERIC_LOG(ERR, "%s() failed to create device", __func__);
931a62ec905SIgor Romanov 
932a62ec905SIgor Romanov 	SFC_GENERIC_LOG(INFO, "%s() done: %s", __func__, rte_strerror(-ret));
933a62ec905SIgor Romanov 
934a62ec905SIgor Romanov 	return ret;
935a62ec905SIgor Romanov }
936