xref: /dpdk/drivers/net/sfc/sfc_switch.c (revision 44db08d53be366d69bb7d16bffc3e55ba2d7398a)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright(c) 2019-2021 Xilinx, Inc.
4  * Copyright(c) 2019 Solarflare Communications Inc.
5  *
6  * This software was jointly developed between OKTET Labs (under contract
7  * for Solarflare) and Solarflare Communications, Inc.
8  */
9 
10 #include <stdbool.h>
11 
12 #include <rte_common.h>
13 #include <rte_spinlock.h>
14 
15 #include "efx.h"
16 
17 #include "sfc.h"
18 #include "sfc_log.h"
19 #include "sfc_switch.h"
20 
21 /**
22  * Switch port registry entry.
23  *
24  * Drivers aware of RTE switch domains also have to maintain RTE switch
25  * port IDs for RTE ethdev instances they operate. These IDs are supposed
26  * to stand for physical interconnect entities, in example, PCIe functions.
27  *
28  * In terms of MAE, a physical interconnect entity can be referred to using
29  * an MPORT selector, that is, a 32-bit value. RTE switch port IDs, in turn,
30  * are 16-bit values, so indirect mapping has to be maintained:
31  *
32  * +--------------------+          +---------------------------------------+
33  * | RTE switch port ID |  ------  |         MAE switch port entry         |
34  * +--------------------+          |         ---------------------         |
35  *                                 |                                       |
36  *                                 | Entity (PCIe function) MPORT selector |
37  *                                 |                   +                   |
38  *                                 |  Port type (independent/representor)  |
39  *                                 +---------------------------------------+
40  *
41  * This mapping comprises a port type to ensure that RTE switch port ID
42  * of a represented entity and that of its representor are different in
43  * the case when the entity gets plugged into DPDK and not into a guest.
44  *
45  * Entry data also comprises RTE ethdev's own MPORT. This value
46  * coincides with the entity MPORT in the case of independent ports.
47  * In the case of representors, this ID is not a selector and refers
48  * to an allocatable object (that is, it's likely to change on RTE
49  * ethdev replug). Flow API backend must use this value rather
50  * than entity_mport to support flow rule action PORT_ID.
51  */
52 struct sfc_mae_switch_port {
53 	TAILQ_ENTRY(sfc_mae_switch_port)	switch_domain_ports;
54 
55 	/** RTE ethdev MPORT */
56 	efx_mport_sel_t				ethdev_mport;
57 	/** RTE ethdev port ID */
58 	uint16_t				ethdev_port_id;
59 
60 	/** Entity (PCIe function) MPORT selector */
61 	efx_mport_sel_t				entity_mport;
62 	/** Port type (independent/representor) */
63 	enum sfc_mae_switch_port_type		type;
64 	/** RTE switch port ID */
65 	uint16_t				id;
66 };
67 
68 TAILQ_HEAD(sfc_mae_switch_ports, sfc_mae_switch_port);
69 
70 /**
71  * Switch domain registry entry.
72  *
73  * Even if an RTE ethdev instance gets unplugged, the corresponding
74  * entry in the switch port registry will not be removed because the
75  * entity (PCIe function) MPORT is static and cannot change. If this
76  * RTE ethdev gets plugged back, the entry will be reused, and
77  * RTE switch port ID will be the same.
78  */
79 struct sfc_mae_switch_domain {
80 	TAILQ_ENTRY(sfc_mae_switch_domain)	entries;
81 
82 	/** HW switch ID */
83 	struct sfc_hw_switch_id			*hw_switch_id;
84 	/** The number of ports in the switch port registry */
85 	unsigned int				nb_ports;
86 	/** Switch port registry */
87 	struct sfc_mae_switch_ports		ports;
88 	/** RTE switch domain ID allocated for a group of devices */
89 	uint16_t				id;
90 	/** DPDK controller -> EFX interface mapping */
91 	efx_pcie_interface_t			*controllers;
92 	/** Number of DPDK controllers and EFX interfaces */
93 	size_t					nb_controllers;
94 };
95 
96 TAILQ_HEAD(sfc_mae_switch_domains, sfc_mae_switch_domain);
97 
98 /**
99  * MAE representation of RTE switch infrastructure.
100  *
101  * It is possible that an RTE flow API client tries to insert a rule
102  * referencing an RTE ethdev deployed on top of a different physical
103  * device (it may belong to the same vendor or not). This particular
104  * driver/engine cannot support this and has to turn down such rules.
105  *
106  * Technically, it's HW switch identifier which, if queried for each
107  * RTE ethdev instance, indicates relationship between the instances.
108  * In the meantime, RTE flow API clients also need to somehow figure
109  * out relationship between RTE ethdev instances in advance.
110  *
111  * The concept of RTE switch domains resolves this issue. The driver
112  * maintains a static list of switch domains which is easy to browse,
113  * and each RTE ethdev fills RTE switch parameters in device
114  * information structure which is made available to clients.
115  *
116  * Even if all RTE ethdev instances belonging to a switch domain get
117  * unplugged, the corresponding entry in the switch domain registry
118  * will not be removed because the corresponding HW switch exists
119  * regardless of its ports being plugged to DPDK or kept aside.
120  * If a port gets plugged back to DPDK, the corresponding
121  * RTE ethdev will indicate the same RTE switch domain ID.
122  */
123 struct sfc_mae_switch {
124 	/** A lock to protect the whole structure */
125 	rte_spinlock_t			lock;
126 	/** Switch domain registry */
127 	struct sfc_mae_switch_domains	domains;
128 };
129 
130 static struct sfc_mae_switch sfc_mae_switch = {
131 	.lock = RTE_SPINLOCK_INITIALIZER,
132 	.domains = TAILQ_HEAD_INITIALIZER(sfc_mae_switch.domains),
133 };
134 
135 
136 /* This function expects to be called only when the lock is held */
137 static struct sfc_mae_switch_domain *
138 sfc_mae_find_switch_domain_by_id(uint16_t switch_domain_id)
139 {
140 	struct sfc_mae_switch_domain *domain;
141 
142 	SFC_ASSERT(rte_spinlock_is_locked(&sfc_mae_switch.lock));
143 
144 	TAILQ_FOREACH(domain, &sfc_mae_switch.domains, entries) {
145 		if (domain->id == switch_domain_id)
146 			return domain;
147 	}
148 
149 	return NULL;
150 }
151 
152 /* This function expects to be called only when the lock is held */
153 static struct sfc_mae_switch_domain *
154 sfc_mae_find_switch_domain_by_hw_switch_id(const struct sfc_hw_switch_id *id)
155 {
156 	struct sfc_mae_switch_domain *domain;
157 
158 	SFC_ASSERT(rte_spinlock_is_locked(&sfc_mae_switch.lock));
159 
160 	TAILQ_FOREACH(domain, &sfc_mae_switch.domains, entries) {
161 		if (sfc_hw_switch_ids_equal(domain->hw_switch_id, id))
162 			return domain;
163 	}
164 
165 	return NULL;
166 }
167 
168 int
169 sfc_mae_assign_switch_domain(struct sfc_adapter *sa,
170 			     uint16_t *switch_domain_id)
171 {
172 	struct sfc_hw_switch_id *hw_switch_id;
173 	struct sfc_mae_switch_domain *domain;
174 	int rc;
175 
176 	rte_spinlock_lock(&sfc_mae_switch.lock);
177 
178 	rc = sfc_hw_switch_id_init(sa, &hw_switch_id);
179 	if (rc != 0)
180 		goto fail_hw_switch_id_init;
181 
182 	domain = sfc_mae_find_switch_domain_by_hw_switch_id(hw_switch_id);
183 	if (domain != NULL) {
184 		sfc_hw_switch_id_fini(sa, hw_switch_id);
185 		goto done;
186 	}
187 
188 	domain = rte_zmalloc("sfc_mae_switch_domain", sizeof(*domain), 0);
189 	if (domain == NULL) {
190 		rc = ENOMEM;
191 		goto fail_mem_alloc;
192 	}
193 
194 	/*
195 	 * This code belongs to driver init path, that is, negation is
196 	 * done at the end of the path by sfc_eth_dev_init(). RTE APIs
197 	 * negate error codes, so drop negation here.
198 	 */
199 	rc = -rte_eth_switch_domain_alloc(&domain->id);
200 	if (rc != 0)
201 		goto fail_domain_alloc;
202 
203 	domain->hw_switch_id = hw_switch_id;
204 
205 	TAILQ_INIT(&domain->ports);
206 
207 	TAILQ_INSERT_TAIL(&sfc_mae_switch.domains, domain, entries);
208 
209 done:
210 	*switch_domain_id = domain->id;
211 
212 	rte_spinlock_unlock(&sfc_mae_switch.lock);
213 
214 	return 0;
215 
216 fail_domain_alloc:
217 	rte_free(domain);
218 
219 fail_mem_alloc:
220 	sfc_hw_switch_id_fini(sa, hw_switch_id);
221 
222 fail_hw_switch_id_init:
223 	rte_spinlock_unlock(&sfc_mae_switch.lock);
224 	return rc;
225 }
226 
227 int
228 sfc_mae_switch_domain_controllers(uint16_t switch_domain_id,
229 				  const efx_pcie_interface_t **controllers,
230 				  size_t *nb_controllers)
231 {
232 	struct sfc_mae_switch_domain *domain;
233 
234 	if (controllers == NULL || nb_controllers == NULL)
235 		return EINVAL;
236 
237 	rte_spinlock_lock(&sfc_mae_switch.lock);
238 
239 	domain = sfc_mae_find_switch_domain_by_id(switch_domain_id);
240 	if (domain == NULL) {
241 		rte_spinlock_unlock(&sfc_mae_switch.lock);
242 		return EINVAL;
243 	}
244 
245 	*controllers = domain->controllers;
246 	*nb_controllers = domain->nb_controllers;
247 
248 	rte_spinlock_unlock(&sfc_mae_switch.lock);
249 	return 0;
250 }
251 
252 int
253 sfc_mae_switch_domain_map_controllers(uint16_t switch_domain_id,
254 				      efx_pcie_interface_t *controllers,
255 				      size_t nb_controllers)
256 {
257 	struct sfc_mae_switch_domain *domain;
258 
259 	rte_spinlock_lock(&sfc_mae_switch.lock);
260 
261 	domain = sfc_mae_find_switch_domain_by_id(switch_domain_id);
262 	if (domain == NULL) {
263 		rte_spinlock_unlock(&sfc_mae_switch.lock);
264 		return EINVAL;
265 	}
266 
267 	/* Controller mapping may be set only once */
268 	if (domain->controllers != NULL) {
269 		rte_spinlock_unlock(&sfc_mae_switch.lock);
270 		return EINVAL;
271 	}
272 
273 	domain->controllers = controllers;
274 	domain->nb_controllers = nb_controllers;
275 
276 	rte_spinlock_unlock(&sfc_mae_switch.lock);
277 	return 0;
278 }
279 
280 /* This function expects to be called only when the lock is held */
281 static struct sfc_mae_switch_port *
282 sfc_mae_find_switch_port_by_entity(const struct sfc_mae_switch_domain *domain,
283 				   const efx_mport_sel_t *entity_mportp,
284 				   enum sfc_mae_switch_port_type type)
285 {
286 	struct sfc_mae_switch_port *port;
287 
288 	SFC_ASSERT(rte_spinlock_is_locked(&sfc_mae_switch.lock));
289 
290 	TAILQ_FOREACH(port, &domain->ports, switch_domain_ports) {
291 		if (port->entity_mport.sel == entity_mportp->sel &&
292 		    port->type == type)
293 			return port;
294 	}
295 
296 	return NULL;
297 }
298 
299 int
300 sfc_mae_assign_switch_port(uint16_t switch_domain_id,
301 			   const struct sfc_mae_switch_port_request *req,
302 			   uint16_t *switch_port_id)
303 {
304 	struct sfc_mae_switch_domain *domain;
305 	struct sfc_mae_switch_port *port;
306 	int rc;
307 
308 	rte_spinlock_lock(&sfc_mae_switch.lock);
309 
310 	domain = sfc_mae_find_switch_domain_by_id(switch_domain_id);
311 	if (domain == NULL) {
312 		rc = EINVAL;
313 		goto fail_find_switch_domain_by_id;
314 	}
315 
316 	port = sfc_mae_find_switch_port_by_entity(domain, req->entity_mportp,
317 						  req->type);
318 	if (port != NULL)
319 		goto done;
320 
321 	port = rte_zmalloc("sfc_mae_switch_port", sizeof(*port), 0);
322 	if (port == NULL) {
323 		rc = ENOMEM;
324 		goto fail_mem_alloc;
325 	}
326 
327 	port->entity_mport.sel = req->entity_mportp->sel;
328 	port->type = req->type;
329 
330 	port->id = (domain->nb_ports++);
331 
332 	TAILQ_INSERT_TAIL(&domain->ports, port, switch_domain_ports);
333 
334 done:
335 	port->ethdev_mport = *req->ethdev_mportp;
336 	port->ethdev_port_id = req->ethdev_port_id;
337 
338 	*switch_port_id = port->id;
339 
340 	rte_spinlock_unlock(&sfc_mae_switch.lock);
341 
342 	return 0;
343 
344 fail_mem_alloc:
345 fail_find_switch_domain_by_id:
346 	rte_spinlock_unlock(&sfc_mae_switch.lock);
347 	return rc;
348 }
349 
350 /* This function expects to be called only when the lock is held */
351 static int
352 sfc_mae_find_switch_port_by_ethdev(uint16_t switch_domain_id,
353 				   uint16_t ethdev_port_id,
354 				   efx_mport_sel_t *mport_sel)
355 {
356 	struct sfc_mae_switch_domain *domain;
357 	struct sfc_mae_switch_port *port;
358 
359 	SFC_ASSERT(rte_spinlock_is_locked(&sfc_mae_switch.lock));
360 
361 	if (ethdev_port_id == RTE_MAX_ETHPORTS)
362 		return EINVAL;
363 
364 	domain = sfc_mae_find_switch_domain_by_id(switch_domain_id);
365 	if (domain == NULL)
366 		return EINVAL;
367 
368 	TAILQ_FOREACH(port, &domain->ports, switch_domain_ports) {
369 		if (port->ethdev_port_id == ethdev_port_id) {
370 			*mport_sel = port->ethdev_mport;
371 			return 0;
372 		}
373 	}
374 
375 	return ENOENT;
376 }
377 
378 int
379 sfc_mae_switch_port_by_ethdev(uint16_t switch_domain_id,
380 			      uint16_t ethdev_port_id,
381 			      efx_mport_sel_t *mport_sel)
382 {
383 	int rc;
384 
385 	rte_spinlock_lock(&sfc_mae_switch.lock);
386 	rc = sfc_mae_find_switch_port_by_ethdev(switch_domain_id,
387 						ethdev_port_id, mport_sel);
388 	rte_spinlock_unlock(&sfc_mae_switch.lock);
389 
390 	return rc;
391 }
392