xref: /freebsd-src/sys/dev/mlx5/mlx5_core/mlx5_eswitch.c (revision 41df1d60e3237df2106dddf357e3205bf29f85d5)
1 /*-
2  * Copyright (c) 2013-2017, Mellanox Technologies, Ltd.  All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  *
25  * $FreeBSD$
26  */
27 
28 #include "opt_rss.h"
29 #include "opt_ratelimit.h"
30 
31 #include <linux/etherdevice.h>
32 #include <dev/mlx5/driver.h>
33 #include <dev/mlx5/mlx5_ifc.h>
34 #include <dev/mlx5/vport.h>
35 #include <dev/mlx5/fs.h>
36 #include <dev/mlx5/mpfs.h>
37 #include <dev/mlx5/mlx5_core/mlx5_core.h>
38 #include <dev/mlx5/mlx5_core/eswitch.h>
39 
40 #define UPLINK_VPORT 0xFFFF
41 
42 #define MLX5_DEBUG_ESWITCH_MASK BIT(3)
43 
44 #define esw_info(dev, format, ...)				\
45 	printf("mlx5_core: INFO: ""(%s): E-Switch: " format, (dev)->priv.name, ##__VA_ARGS__)
46 
47 #define esw_warn(dev, format, ...)				\
48 	printf("mlx5_core: WARN: ""(%s): E-Switch: " format, (dev)->priv.name, ##__VA_ARGS__)
49 
50 #define esw_debug(dev, format, ...)				\
51 	mlx5_core_dbg_mask(dev, MLX5_DEBUG_ESWITCH_MASK, format, ##__VA_ARGS__)
52 
53 enum {
54 	MLX5_ACTION_NONE = 0,
55 	MLX5_ACTION_ADD  = 1,
56 	MLX5_ACTION_DEL  = 2,
57 };
58 
59 /* E-Switch UC L2 table hash node */
60 struct esw_uc_addr {
61 	struct l2addr_node node;
62 	u32                table_index;
63 	u32                vport;
64 };
65 
66 /* E-Switch MC FDB table hash node */
67 struct esw_mc_addr { /* SRIOV only */
68 	struct l2addr_node     node;
69 	struct mlx5_flow_rule *uplink_rule; /* Forward to uplink rule */
70 	u32                    refcnt;
71 };
72 
73 /* Vport UC/MC hash node */
74 struct vport_addr {
75 	struct l2addr_node     node;
76 	u8                     action;
77 	u32                    vport;
78 	struct mlx5_flow_rule *flow_rule; /* SRIOV only */
79 };
80 
81 enum {
82 	UC_ADDR_CHANGE = BIT(0),
83 	MC_ADDR_CHANGE = BIT(1),
84 };
85 
86 /* Vport context events */
87 #define SRIOV_VPORT_EVENTS (UC_ADDR_CHANGE | \
88 			    MC_ADDR_CHANGE)
89 
90 static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
91 					u32 events_mask)
92 {
93 	int in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {0};
94 	int out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0};
95 	void *nic_vport_ctx;
96 
97 	MLX5_SET(modify_nic_vport_context_in, in,
98 		 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
99 	MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1);
100 	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
101 	if (vport)
102 		MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
103 	nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
104 				     in, nic_vport_context);
105 
106 	MLX5_SET(nic_vport_context, nic_vport_ctx, arm_change_event, 1);
107 
108 	if (events_mask & UC_ADDR_CHANGE)
109 		MLX5_SET(nic_vport_context, nic_vport_ctx,
110 			 event_on_uc_address_change, 1);
111 	if (events_mask & MC_ADDR_CHANGE)
112 		MLX5_SET(nic_vport_context, nic_vport_ctx,
113 			 event_on_mc_address_change, 1);
114 
115 	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
116 }
117 
118 /* E-Switch vport context HW commands */
119 static int query_esw_vport_context_cmd(struct mlx5_core_dev *mdev, u32 vport,
120 				       u32 *out, int outlen)
121 {
122 	u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {0};
123 
124 	MLX5_SET(query_nic_vport_context_in, in, opcode,
125 		 MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT);
126 
127 	MLX5_SET(query_esw_vport_context_in, in, vport_number, vport);
128 	if (vport)
129 		MLX5_SET(query_esw_vport_context_in, in, other_vport, 1);
130 
131 	return mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
132 }
133 
134 static int query_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport,
135 				 u16 *vlan, u8 *qos)
136 {
137 	u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {0};
138 	int err;
139 	bool cvlan_strip;
140 	bool cvlan_insert;
141 
142 	*vlan = 0;
143 	*qos = 0;
144 
145 	if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) ||
146 	    !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist))
147 		return -ENOTSUPP;
148 
149 	err = query_esw_vport_context_cmd(dev, vport, out, sizeof(out));
150 	if (err)
151 		goto out;
152 
153 	cvlan_strip = MLX5_GET(query_esw_vport_context_out, out,
154 			       esw_vport_context.vport_cvlan_strip);
155 
156 	cvlan_insert = MLX5_GET(query_esw_vport_context_out, out,
157 				esw_vport_context.vport_cvlan_insert);
158 
159 	if (cvlan_strip || cvlan_insert) {
160 		*vlan = MLX5_GET(query_esw_vport_context_out, out,
161 				 esw_vport_context.cvlan_id);
162 		*qos = MLX5_GET(query_esw_vport_context_out, out,
163 				esw_vport_context.cvlan_pcp);
164 	}
165 
166 	esw_debug(dev, "Query Vport[%d] cvlan: VLAN %d qos=%d\n",
167 		  vport, *vlan, *qos);
168 out:
169 	return err;
170 }
171 
172 static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport,
173 					void *in, int inlen)
174 {
175 	u32 out[MLX5_ST_SZ_DW(modify_esw_vport_context_out)] = {0};
176 
177 	MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
178 	if (vport)
179 		MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
180 
181 	MLX5_SET(modify_esw_vport_context_in, in, opcode,
182 		 MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
183 
184 	return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
185 }
186 
187 static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport,
188 				  u16 vlan, u8 qos, bool set)
189 {
190 	u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {0};
191 
192 	if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) ||
193 	    !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist))
194 		return -ENOTSUPP;
195 
196 	esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%d\n",
197 		  vport, vlan, qos, set);
198 
199 	if (set) {
200 		MLX5_SET(modify_esw_vport_context_in, in,
201 			 esw_vport_context.vport_cvlan_strip, 1);
202 		/* insert only if no vlan in packet */
203 		MLX5_SET(modify_esw_vport_context_in, in,
204 			 esw_vport_context.vport_cvlan_insert, 1);
205 		MLX5_SET(modify_esw_vport_context_in, in,
206 			 esw_vport_context.cvlan_pcp, qos);
207 		MLX5_SET(modify_esw_vport_context_in, in,
208 			 esw_vport_context.cvlan_id, vlan);
209 	}
210 
211 	MLX5_SET(modify_esw_vport_context_in, in,
212 		 field_select.vport_cvlan_strip, 1);
213 	MLX5_SET(modify_esw_vport_context_in, in,
214 		 field_select.vport_cvlan_insert, 1);
215 
216 	return modify_esw_vport_context_cmd(dev, vport, in, sizeof(in));
217 }
218 
219 /* E-Switch FDB */
220 static struct mlx5_flow_rule *
221 esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport)
222 {
223 	int match_header = MLX5_MATCH_OUTER_HEADERS;
224 	struct mlx5_flow_destination dest;
225 	struct mlx5_flow_rule *flow_rule = NULL;
226 	u32 *match_v;
227 	u32 *match_c;
228 	u8 *dmac_v;
229 	u8 *dmac_c;
230 
231 	match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
232 	match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
233 	if (!match_v || !match_c) {
234 		printf("mlx5_core: WARN: ""FDB: Failed to alloc match parameters\n");
235 		goto out;
236 	}
237 	dmac_v = MLX5_ADDR_OF(fte_match_param, match_v,
238 			      outer_headers.dmac_47_16);
239 	dmac_c = MLX5_ADDR_OF(fte_match_param, match_c,
240 			      outer_headers.dmac_47_16);
241 
242 	ether_addr_copy(dmac_v, mac);
243 	/* Match criteria mask */
244 	memset(dmac_c, 0xff, 6);
245 
246 	dest.type = MLX5_FLOW_CONTEXT_DEST_TYPE_VPORT;
247 	dest.vport_num = vport;
248 
249 	esw_debug(esw->dev,
250 		  "\tFDB add rule dmac_v(%pM) dmac_c(%pM) -> vport(%d)\n",
251 		  dmac_v, dmac_c, vport);
252 	flow_rule =
253 		mlx5_add_flow_rule(esw->fdb_table.fdb,
254 				   match_header,
255 				   match_c,
256 				   match_v,
257 				   MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
258 				   0, &dest);
259 	if (IS_ERR_OR_NULL(flow_rule)) {
260 		printf("mlx5_core: WARN: ""FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n", dmac_v, dmac_c, vport, PTR_ERR(flow_rule));
261 		flow_rule = NULL;
262 	}
263 out:
264 	kfree(match_v);
265 	kfree(match_c);
266 	return flow_rule;
267 }
268 
269 static int esw_create_fdb_table(struct mlx5_eswitch *esw)
270 {
271 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
272 	struct mlx5_core_dev *dev = esw->dev;
273 	struct mlx5_flow_namespace *root_ns;
274 	struct mlx5_flow_table *fdb;
275 	struct mlx5_flow_group *g;
276 	void *match_criteria;
277 	int table_size;
278 	u32 *flow_group_in;
279 	u8 *dmac;
280 	int err = 0;
281 
282 	esw_debug(dev, "Create FDB log_max_size(%d)\n",
283 		  MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
284 
285 	root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
286 	if (!root_ns) {
287 		esw_warn(dev, "Failed to get FDB flow namespace\n");
288 		return -ENOMEM;
289 	}
290 
291 	flow_group_in = mlx5_vzalloc(inlen);
292 	if (!flow_group_in)
293 		return -ENOMEM;
294 	memset(flow_group_in, 0, inlen);
295 
296 	/* (-2) Since MaorG said so .. */
297 	table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size)) - 2;
298 
299 	fdb = mlx5_create_flow_table(root_ns, 0, "FDB", table_size);
300 	if (IS_ERR_OR_NULL(fdb)) {
301 		err = PTR_ERR(fdb);
302 		esw_warn(dev, "Failed to create FDB Table err %d\n", err);
303 		goto out;
304 	}
305 
306 	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
307 		 MLX5_MATCH_OUTER_HEADERS);
308 	match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
309 	dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, outer_headers.dmac_47_16);
310 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
311 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1);
312 	eth_broadcast_addr(dmac);
313 
314 	g = mlx5_create_flow_group(fdb, flow_group_in);
315 	if (IS_ERR_OR_NULL(g)) {
316 		err = PTR_ERR(g);
317 		esw_warn(dev, "Failed to create flow group err(%d)\n", err);
318 		goto out;
319 	}
320 
321 	esw->fdb_table.addr_grp = g;
322 	esw->fdb_table.fdb = fdb;
323 out:
324 	kfree(flow_group_in);
325 	if (err && !IS_ERR_OR_NULL(fdb))
326 		mlx5_destroy_flow_table(fdb);
327 	return err;
328 }
329 
330 static void esw_destroy_fdb_table(struct mlx5_eswitch *esw)
331 {
332 	if (!esw->fdb_table.fdb)
333 		return;
334 
335 	esw_debug(esw->dev, "Destroy FDB Table\n");
336 	mlx5_destroy_flow_group(esw->fdb_table.addr_grp);
337 	mlx5_destroy_flow_table(esw->fdb_table.fdb);
338 	esw->fdb_table.fdb = NULL;
339 	esw->fdb_table.addr_grp = NULL;
340 }
341 
342 /* E-Switch vport UC/MC lists management */
343 typedef int (*vport_addr_action)(struct mlx5_eswitch *esw,
344 				 struct vport_addr *vaddr);
345 
346 static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
347 {
348 	struct hlist_head *hash = esw->l2_table.l2_hash;
349 	struct esw_uc_addr *esw_uc;
350 	u8 *mac = vaddr->node.addr;
351 	u32 vport = vaddr->vport;
352 	int err;
353 
354 	esw_uc = l2addr_hash_find(hash, mac, struct esw_uc_addr);
355 	if (esw_uc) {
356 		esw_warn(esw->dev,
357 			 "Failed to set L2 mac(%pM) for vport(%d), mac is already in use by vport(%d)\n",
358 			 mac, vport, esw_uc->vport);
359 		return -EEXIST;
360 	}
361 
362 	esw_uc = l2addr_hash_add(hash, mac, struct esw_uc_addr, GFP_KERNEL);
363 	if (!esw_uc)
364 		return -ENOMEM;
365 	esw_uc->vport = vport;
366 
367 	err = mlx5_mpfs_add_mac(esw->dev, &esw_uc->table_index, mac, 0, 0);
368 	if (err)
369 		goto abort;
370 
371 	if (esw->fdb_table.fdb) /* SRIOV is enabled: Forward UC MAC to vport */
372 		vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
373 
374 	esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM index:%d fr(%p)\n",
375 		  vport, mac, esw_uc->table_index, vaddr->flow_rule);
376 	return err;
377 abort:
378 	l2addr_hash_del(esw_uc);
379 	return err;
380 }
381 
382 static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
383 {
384 	struct hlist_head *hash = esw->l2_table.l2_hash;
385 	struct esw_uc_addr *esw_uc;
386 	u8 *mac = vaddr->node.addr;
387 	u32 vport = vaddr->vport;
388 
389 	esw_uc = l2addr_hash_find(hash, mac, struct esw_uc_addr);
390 	if (!esw_uc || esw_uc->vport != vport) {
391 		esw_debug(esw->dev,
392 			  "MAC(%pM) doesn't belong to vport (%d)\n",
393 			  mac, vport);
394 		return -EINVAL;
395 	}
396 	esw_debug(esw->dev, "\tDELETE UC MAC: vport[%d] %pM index:%d fr(%p)\n",
397 		  vport, mac, esw_uc->table_index, vaddr->flow_rule);
398 
399 	mlx5_mpfs_del_mac(esw->dev, esw_uc->table_index);
400 
401 	mlx5_del_flow_rule(&vaddr->flow_rule);
402 
403 	l2addr_hash_del(esw_uc);
404 	return 0;
405 }
406 
407 static int esw_add_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
408 {
409 	struct hlist_head *hash = esw->mc_table;
410 	struct esw_mc_addr *esw_mc;
411 	u8 *mac = vaddr->node.addr;
412 	u32 vport = vaddr->vport;
413 
414 	if (!esw->fdb_table.fdb)
415 		return 0;
416 
417 	esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr);
418 	if (esw_mc)
419 		goto add;
420 
421 	esw_mc = l2addr_hash_add(hash, mac, struct esw_mc_addr, GFP_KERNEL);
422 	if (!esw_mc)
423 		return -ENOMEM;
424 
425 	esw_mc->uplink_rule = /* Forward MC MAC to Uplink */
426 		esw_fdb_set_vport_rule(esw, mac, UPLINK_VPORT);
427 add:
428 	esw_mc->refcnt++;
429 	/* Forward MC MAC to vport */
430 	vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
431 	esw_debug(esw->dev,
432 		  "\tADDED MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n",
433 		  vport, mac, vaddr->flow_rule,
434 		  esw_mc->refcnt, esw_mc->uplink_rule);
435 	return 0;
436 }
437 
438 static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
439 {
440 	struct hlist_head *hash = esw->mc_table;
441 	struct esw_mc_addr *esw_mc;
442 	u8 *mac = vaddr->node.addr;
443 	u32 vport = vaddr->vport;
444 
445 	if (!esw->fdb_table.fdb)
446 		return 0;
447 
448 	esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr);
449 	if (!esw_mc) {
450 		esw_warn(esw->dev,
451 			 "Failed to find eswitch MC addr for MAC(%pM) vport(%d)",
452 			 mac, vport);
453 		return -EINVAL;
454 	}
455 	esw_debug(esw->dev,
456 		  "\tDELETE MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n",
457 		  vport, mac, vaddr->flow_rule, esw_mc->refcnt,
458 		  esw_mc->uplink_rule);
459 
460 	mlx5_del_flow_rule(&vaddr->flow_rule);
461 
462 	if (--esw_mc->refcnt)
463 		return 0;
464 
465 	mlx5_del_flow_rule(&esw_mc->uplink_rule);
466 
467 	l2addr_hash_del(esw_mc);
468 	return 0;
469 }
470 
471 /* Apply vport UC/MC list to HW l2 table and FDB table */
472 static void esw_apply_vport_addr_list(struct mlx5_eswitch *esw,
473 				      u32 vport_num, int list_type)
474 {
475 	struct mlx5_vport *vport = &esw->vports[vport_num];
476 	bool is_uc = list_type == MLX5_NIC_VPORT_LIST_TYPE_UC;
477 	vport_addr_action vport_addr_add;
478 	vport_addr_action vport_addr_del;
479 	struct vport_addr *addr;
480 	struct l2addr_node *node;
481 	struct hlist_head *hash;
482 	struct hlist_node *tmp;
483 	int hi;
484 
485 	vport_addr_add = is_uc ? esw_add_uc_addr :
486 				 esw_add_mc_addr;
487 	vport_addr_del = is_uc ? esw_del_uc_addr :
488 				 esw_del_mc_addr;
489 
490 	hash = is_uc ? vport->uc_list : vport->mc_list;
491 	for_each_l2hash_node(node, tmp, hash, hi) {
492 		addr = container_of(node, struct vport_addr, node);
493 		switch (addr->action) {
494 		case MLX5_ACTION_ADD:
495 			vport_addr_add(esw, addr);
496 			addr->action = MLX5_ACTION_NONE;
497 			break;
498 		case MLX5_ACTION_DEL:
499 			vport_addr_del(esw, addr);
500 			l2addr_hash_del(addr);
501 			break;
502 		}
503 	}
504 }
505 
506 /* Sync vport UC/MC list from vport context */
507 static void esw_update_vport_addr_list(struct mlx5_eswitch *esw,
508 				       u32 vport_num, int list_type)
509 {
510 	struct mlx5_vport *vport = &esw->vports[vport_num];
511 	bool is_uc = list_type == MLX5_NIC_VPORT_LIST_TYPE_UC;
512 	u8 (*mac_list)[ETH_ALEN];
513 	struct l2addr_node *node;
514 	struct vport_addr *addr;
515 	struct hlist_head *hash;
516 	struct hlist_node *tmp;
517 	int size;
518 	int err;
519 	int hi;
520 	int i;
521 
522 	size = is_uc ? MLX5_MAX_UC_PER_VPORT(esw->dev) :
523 		       MLX5_MAX_MC_PER_VPORT(esw->dev);
524 
525 	mac_list = kcalloc(size, ETH_ALEN, GFP_KERNEL);
526 	if (!mac_list)
527 		return;
528 
529 	hash = is_uc ? vport->uc_list : vport->mc_list;
530 
531 	for_each_l2hash_node(node, tmp, hash, hi) {
532 		addr = container_of(node, struct vport_addr, node);
533 		addr->action = MLX5_ACTION_DEL;
534 	}
535 
536 	err = mlx5_query_nic_vport_mac_list(esw->dev, vport_num, list_type,
537 					    mac_list, &size);
538 	if (err)
539 		return;
540 	esw_debug(esw->dev, "vport[%d] context update %s list size (%d)\n",
541 		  vport_num, is_uc ? "UC" : "MC", size);
542 
543 	for (i = 0; i < size; i++) {
544 		if (is_uc && !is_valid_ether_addr(mac_list[i]))
545 			continue;
546 
547 		if (!is_uc && !is_multicast_ether_addr(mac_list[i]))
548 			continue;
549 
550 		addr = l2addr_hash_find(hash, mac_list[i], struct vport_addr);
551 		if (addr) {
552 			addr->action = MLX5_ACTION_NONE;
553 			continue;
554 		}
555 
556 		addr = l2addr_hash_add(hash, mac_list[i], struct vport_addr,
557 				       GFP_KERNEL);
558 		if (!addr) {
559 			esw_warn(esw->dev,
560 				 "Failed to add MAC(%pM) to vport[%d] DB\n",
561 				 mac_list[i], vport_num);
562 			continue;
563 		}
564 		addr->vport = vport_num;
565 		addr->action = MLX5_ACTION_ADD;
566 	}
567 	kfree(mac_list);
568 }
569 
570 static void esw_vport_change_handler(struct work_struct *work)
571 {
572 	struct mlx5_vport *vport =
573 		container_of(work, struct mlx5_vport, vport_change_handler);
574 	struct mlx5_core_dev *dev = vport->dev;
575 	struct mlx5_eswitch *esw = dev->priv.eswitch;
576 	u8 mac[ETH_ALEN];
577 
578 	mlx5_query_nic_vport_mac_address(dev, vport->vport, mac);
579 	esw_debug(dev, "vport[%d] Context Changed: perm mac: %pM\n",
580 		  vport->vport, mac);
581 
582 	if (vport->enabled_events & UC_ADDR_CHANGE) {
583 		esw_update_vport_addr_list(esw, vport->vport,
584 					   MLX5_NIC_VPORT_LIST_TYPE_UC);
585 		esw_apply_vport_addr_list(esw, vport->vport,
586 					  MLX5_NIC_VPORT_LIST_TYPE_UC);
587 	}
588 
589 	if (vport->enabled_events & MC_ADDR_CHANGE) {
590 		esw_update_vport_addr_list(esw, vport->vport,
591 					   MLX5_NIC_VPORT_LIST_TYPE_MC);
592 		esw_apply_vport_addr_list(esw, vport->vport,
593 					  MLX5_NIC_VPORT_LIST_TYPE_MC);
594 	}
595 
596 	esw_debug(esw->dev, "vport[%d] Context Changed: Done\n", vport->vport);
597 	if (vport->enabled)
598 		arm_vport_context_events_cmd(dev, vport->vport,
599 					     vport->enabled_events);
600 }
601 
602 static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
603 					struct mlx5_vport *vport)
604 {
605 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
606 	struct mlx5_flow_group *vlan_grp = NULL;
607 	struct mlx5_flow_group *drop_grp = NULL;
608 	struct mlx5_core_dev *dev = esw->dev;
609 	struct mlx5_flow_namespace *root_ns;
610 	struct mlx5_flow_table *acl;
611 	void *match_criteria;
612 	char table_name[32];
613 	u32 *flow_group_in;
614 	int table_size = 2;
615 	int err = 0;
616 
617 	if (!MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support))
618 		return;
619 
620 	esw_debug(dev, "Create vport[%d] egress ACL log_max_size(%d)\n",
621 		  vport->vport, MLX5_CAP_ESW_EGRESS_ACL(dev, log_max_ft_size));
622 
623 	root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS);
624 	if (!root_ns) {
625 		esw_warn(dev, "Failed to get E-Switch egress flow namespace\n");
626 		return;
627 	}
628 
629 	flow_group_in = mlx5_vzalloc(inlen);
630 	if (!flow_group_in)
631 		return;
632 
633 	snprintf(table_name, 32, "egress_%d", vport->vport);
634 	acl = mlx5_create_vport_flow_table(root_ns, vport->vport, 0, table_name, table_size);
635 	if (IS_ERR_OR_NULL(acl)) {
636 		err = PTR_ERR(acl);
637 		esw_warn(dev, "Failed to create E-Switch vport[%d] egress flow Table, err(%d)\n",
638 			 vport->vport, err);
639 		goto out;
640 	}
641 
642 	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
643 	match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
644 	MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
645 	MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.first_vid);
646 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
647 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
648 
649 	vlan_grp = mlx5_create_flow_group(acl, flow_group_in);
650 	if (IS_ERR_OR_NULL(vlan_grp)) {
651 		err = PTR_ERR(vlan_grp);
652 		esw_warn(dev, "Failed to create E-Switch vport[%d] egress allowed vlans flow group, err(%d)\n",
653 			 vport->vport, err);
654 		goto out;
655 	}
656 
657 	memset(flow_group_in, 0, inlen);
658 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
659 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
660 	drop_grp = mlx5_create_flow_group(acl, flow_group_in);
661 	if (IS_ERR_OR_NULL(drop_grp)) {
662 		err = PTR_ERR(drop_grp);
663 		esw_warn(dev, "Failed to create E-Switch vport[%d] egress drop flow group, err(%d)\n",
664 			 vport->vport, err);
665 		goto out;
666 	}
667 
668 	vport->egress.acl = acl;
669 	vport->egress.drop_grp = drop_grp;
670 	vport->egress.allowed_vlans_grp = vlan_grp;
671 out:
672 	kfree(flow_group_in);
673 	if (err && !IS_ERR_OR_NULL(vlan_grp))
674 		mlx5_destroy_flow_group(vlan_grp);
675 	if (err && !IS_ERR_OR_NULL(acl))
676 		mlx5_destroy_flow_table(acl);
677 }
678 
679 static void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
680 					   struct mlx5_vport *vport)
681 {
682 	mlx5_del_flow_rule(&vport->egress.allowed_vlan);
683 	mlx5_del_flow_rule(&vport->egress.drop_rule);
684 }
685 
686 static void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
687 					 struct mlx5_vport *vport)
688 {
689 	if (IS_ERR_OR_NULL(vport->egress.acl))
690 		return;
691 
692 	esw_debug(esw->dev, "Destroy vport[%d] E-Switch egress ACL\n", vport->vport);
693 
694 	esw_vport_cleanup_egress_rules(esw, vport);
695 	mlx5_destroy_flow_group(vport->egress.allowed_vlans_grp);
696 	mlx5_destroy_flow_group(vport->egress.drop_grp);
697 	mlx5_destroy_flow_table(vport->egress.acl);
698 	vport->egress.allowed_vlans_grp = NULL;
699 	vport->egress.drop_grp = NULL;
700 	vport->egress.acl = NULL;
701 }
702 
703 static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
704 					 struct mlx5_vport *vport)
705 {
706 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
707 	struct mlx5_core_dev *dev = esw->dev;
708 	struct mlx5_flow_namespace *root_ns;
709 	struct mlx5_flow_table *acl;
710 	struct mlx5_flow_group *g;
711 	void *match_criteria;
712 	char table_name[32];
713 	u32 *flow_group_in;
714 	int table_size = 1;
715 	int err = 0;
716 
717 	if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
718 		return;
719 
720 	esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n",
721 		  vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size));
722 
723 	root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS);
724 	if (!root_ns) {
725 		esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n");
726 		return;
727 	}
728 
729 	flow_group_in = mlx5_vzalloc(inlen);
730 	if (!flow_group_in)
731 		return;
732 
733 	snprintf(table_name, 32, "ingress_%d", vport->vport);
734 	acl = mlx5_create_vport_flow_table(root_ns, vport->vport, 0, table_name, table_size);
735 	if (IS_ERR_OR_NULL(acl)) {
736 		err = PTR_ERR(acl);
737 		esw_warn(dev, "Failed to create E-Switch vport[%d] ingress flow Table, err(%d)\n",
738 			 vport->vport, err);
739 		goto out;
740 	}
741 
742 	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
743 	match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
744 	MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
745 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
746 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
747 
748 	g = mlx5_create_flow_group(acl, flow_group_in);
749 	if (IS_ERR_OR_NULL(g)) {
750 		err = PTR_ERR(g);
751 		esw_warn(dev, "Failed to create E-Switch vport[%d] ingress flow group, err(%d)\n",
752 			 vport->vport, err);
753 		goto out;
754 	}
755 
756 	vport->ingress.acl = acl;
757 	vport->ingress.drop_grp = g;
758 out:
759 	kfree(flow_group_in);
760 	if (err && !IS_ERR_OR_NULL(acl))
761 		mlx5_destroy_flow_table(acl);
762 }
763 
764 static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
765 					    struct mlx5_vport *vport)
766 {
767 	mlx5_del_flow_rule(&vport->ingress.drop_rule);
768 }
769 
770 static void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
771 					  struct mlx5_vport *vport)
772 {
773 	if (IS_ERR_OR_NULL(vport->ingress.acl))
774 		return;
775 
776 	esw_debug(esw->dev, "Destroy vport[%d] E-Switch ingress ACL\n", vport->vport);
777 
778 	esw_vport_cleanup_ingress_rules(esw, vport);
779 	mlx5_destroy_flow_group(vport->ingress.drop_grp);
780 	mlx5_destroy_flow_table(vport->ingress.acl);
781 	vport->ingress.acl = NULL;
782 	vport->ingress.drop_grp = NULL;
783 }
784 
785 static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
786 				    struct mlx5_vport *vport)
787 {
788 	struct mlx5_flow_destination dest;
789 	u32 *match_v;
790 	u32 *match_c;
791 	int err = 0;
792 
793 	if (IS_ERR_OR_NULL(vport->ingress.acl)) {
794 		esw_warn(esw->dev,
795 			 "vport[%d] configure ingress rules failed, ingress acl is not initialized!\n",
796 			 vport->vport);
797 		return -EPERM;
798 	}
799 
800 	esw_vport_cleanup_ingress_rules(esw, vport);
801 
802 	if (!vport->vlan && !vport->qos)
803 		return 0;
804 
805 	esw_debug(esw->dev,
806 		  "vport[%d] configure ingress rules, vlan(%d) qos(%d)\n",
807 		  vport->vport, vport->vlan, vport->qos);
808 
809 	match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
810 	match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
811 	if (!match_v || !match_c) {
812 		err = -ENOMEM;
813 		esw_warn(esw->dev, "vport[%d] configure ingress rules failed, err(%d)\n",
814 			 vport->vport, err);
815 		goto out;
816 	}
817 	MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.cvlan_tag);
818 	MLX5_SET_TO_ONES(fte_match_param, match_v, outer_headers.cvlan_tag);
819 
820 	dest.type = MLX5_FLOW_CONTEXT_DEST_TYPE_VPORT;
821 	dest.vport_num = vport->vport;
822 
823 	vport->ingress.drop_rule =
824 		mlx5_add_flow_rule(vport->ingress.acl,
825 				   MLX5_MATCH_OUTER_HEADERS,
826 				   match_c,
827 				   match_v,
828 				   MLX5_FLOW_CONTEXT_ACTION_DROP,
829 				   0, &dest);
830 	if (IS_ERR_OR_NULL(vport->ingress.drop_rule)) {
831 		err = PTR_ERR(vport->ingress.drop_rule);
832 		printf("mlx5_core: WARN: ""vport[%d] configure ingress rules, err(%d)\n", vport->vport, err);
833 		vport->ingress.drop_rule = NULL;
834 	}
835 out:
836 	kfree(match_v);
837 	kfree(match_c);
838 	return err;
839 }
840 
841 static int esw_vport_egress_config(struct mlx5_eswitch *esw,
842 				   struct mlx5_vport *vport)
843 {
844 	struct mlx5_flow_destination dest;
845 	u32 *match_v;
846 	u32 *match_c;
847 	int err = 0;
848 
849 	if (IS_ERR_OR_NULL(vport->egress.acl)) {
850 		esw_warn(esw->dev, "vport[%d] configure rgress rules failed, egress acl is not initialized!\n",
851 			 vport->vport);
852 		return -EPERM;
853 	}
854 
855 	esw_vport_cleanup_egress_rules(esw, vport);
856 
857 	if (!vport->vlan && !vport->qos)
858 		return 0;
859 
860 	esw_debug(esw->dev,
861 		  "vport[%d] configure egress rules, vlan(%d) qos(%d)\n",
862 		  vport->vport, vport->vlan, vport->qos);
863 
864 	match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
865 	match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
866 	if (!match_v || !match_c) {
867 		err = -ENOMEM;
868 		esw_warn(esw->dev, "vport[%d] configure egress rules failed, err(%d)\n",
869 			 vport->vport, err);
870 		goto out;
871 	}
872 
873 	/* Allowed vlan rule */
874 	MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.cvlan_tag);
875 	MLX5_SET_TO_ONES(fte_match_param, match_v, outer_headers.cvlan_tag);
876 	MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.first_vid);
877 	MLX5_SET(fte_match_param, match_v, outer_headers.first_vid, vport->vlan);
878 
879 	dest.type = MLX5_FLOW_CONTEXT_DEST_TYPE_VPORT;
880 	dest.vport_num = vport->vport;
881 
882 	vport->egress.allowed_vlan =
883 		mlx5_add_flow_rule(vport->egress.acl,
884 				   MLX5_MATCH_OUTER_HEADERS,
885 				   match_c,
886 				   match_v,
887 				   MLX5_FLOW_CONTEXT_ACTION_ALLOW,
888 				   0, &dest);
889 	if (IS_ERR_OR_NULL(vport->egress.allowed_vlan)) {
890 		err = PTR_ERR(vport->egress.allowed_vlan);
891 		printf("mlx5_core: WARN: ""vport[%d] configure egress allowed vlan rule failed, err(%d)\n", vport->vport, err);
892 		vport->egress.allowed_vlan = NULL;
893 		goto out;
894 	}
895 
896 	/* Drop others rule (star rule) */
897 	memset(match_c, 0, MLX5_ST_SZ_BYTES(fte_match_param));
898 	memset(match_v, 0, MLX5_ST_SZ_BYTES(fte_match_param));
899 	vport->egress.drop_rule =
900 		mlx5_add_flow_rule(vport->egress.acl,
901 				   0,
902 				   match_c,
903 				   match_v,
904 				   MLX5_FLOW_CONTEXT_ACTION_DROP,
905 				   0, &dest);
906 	if (IS_ERR_OR_NULL(vport->egress.drop_rule)) {
907 		err = PTR_ERR(vport->egress.drop_rule);
908 		printf("mlx5_core: WARN: ""vport[%d] configure egress drop rule failed, err(%d)\n", vport->vport, err);
909 		vport->egress.drop_rule = NULL;
910 	}
911 out:
912 	kfree(match_v);
913 	kfree(match_c);
914 	return err;
915 }
916 
917 static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
918 			     int enable_events)
919 {
920 	struct mlx5_vport *vport = &esw->vports[vport_num];
921 	unsigned long flags;
922 
923 	mutex_lock(&vport->state_lock);
924 	WARN_ON(vport->enabled);
925 
926 	esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num);
927 
928 	if (vport_num) { /* Only VFs need ACLs for VST and spoofchk filtering */
929 		esw_vport_enable_ingress_acl(esw, vport);
930 		esw_vport_enable_egress_acl(esw, vport);
931 		esw_vport_ingress_config(esw, vport);
932 		esw_vport_egress_config(esw, vport);
933 	}
934 
935 	mlx5_modify_vport_admin_state(esw->dev,
936 				      MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
937 				      vport_num,
938 				      MLX5_ESW_VPORT_ADMIN_STATE_AUTO);
939 
940 	/* Sync with current vport context */
941 	vport->enabled_events = enable_events;
942 	esw_vport_change_handler(&vport->vport_change_handler);
943 
944 	spin_lock_irqsave(&vport->lock, flags);
945 	vport->enabled = true;
946 	spin_unlock_irqrestore(&vport->lock, flags);
947 
948 	arm_vport_context_events_cmd(esw->dev, vport_num, enable_events);
949 
950 	esw->enabled_vports++;
951 	esw_debug(esw->dev, "Enabled VPORT(%d)\n", vport_num);
952 	mutex_unlock(&vport->state_lock);
953 }
954 
955 static void esw_cleanup_vport(struct mlx5_eswitch *esw, u16 vport_num)
956 {
957 	struct mlx5_vport *vport = &esw->vports[vport_num];
958 	struct l2addr_node *node;
959 	struct vport_addr *addr;
960 	struct hlist_node *tmp;
961 	int hi;
962 
963 	for_each_l2hash_node(node, tmp, vport->uc_list, hi) {
964 		addr = container_of(node, struct vport_addr, node);
965 		addr->action = MLX5_ACTION_DEL;
966 	}
967 	esw_apply_vport_addr_list(esw, vport_num, MLX5_NIC_VPORT_LIST_TYPE_UC);
968 
969 	for_each_l2hash_node(node, tmp, vport->mc_list, hi) {
970 		addr = container_of(node, struct vport_addr, node);
971 		addr->action = MLX5_ACTION_DEL;
972 	}
973 	esw_apply_vport_addr_list(esw, vport_num, MLX5_NIC_VPORT_LIST_TYPE_MC);
974 }
975 
976 static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
977 {
978 	struct mlx5_vport *vport = &esw->vports[vport_num];
979 	unsigned long flags;
980 
981 	mutex_lock(&vport->state_lock);
982 	if (!vport->enabled) {
983 		mutex_unlock(&vport->state_lock);
984 		return;
985 	}
986 
987 	esw_debug(esw->dev, "Disabling vport(%d)\n", vport_num);
988 	/* Mark this vport as disabled to discard new events */
989 	spin_lock_irqsave(&vport->lock, flags);
990 	vport->enabled = false;
991 	vport->enabled_events = 0;
992 	spin_unlock_irqrestore(&vport->lock, flags);
993 
994 	mlx5_modify_vport_admin_state(esw->dev,
995 				      MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
996 				      vport_num,
997 				      MLX5_ESW_VPORT_ADMIN_STATE_DOWN);
998 	/* Wait for current already scheduled events to complete */
999 	flush_workqueue(esw->work_queue);
1000 	/* Disable events from this vport */
1001 	arm_vport_context_events_cmd(esw->dev, vport->vport, 0);
1002 	/* We don't assume VFs will cleanup after themselves */
1003 	esw_cleanup_vport(esw, vport_num);
1004 	if (vport_num) {
1005 		esw_vport_disable_egress_acl(esw, vport);
1006 		esw_vport_disable_ingress_acl(esw, vport);
1007 	}
1008 	esw->enabled_vports--;
1009 	mutex_unlock(&vport->state_lock);
1010 }
1011 
1012 /* Public E-Switch API */
1013 int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs)
1014 {
1015 	int err;
1016 	int i;
1017 
1018 	if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) ||
1019 	    MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1020 		return 0;
1021 
1022 	if (!MLX5_CAP_GEN(esw->dev, eswitch_flow_table) ||
1023 	    !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
1024 		esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n");
1025 		return -ENOTSUPP;
1026 	}
1027 
1028 	if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, ft_support))
1029 		esw_warn(esw->dev, "E-Switch ingress ACL is not supported by FW\n");
1030 
1031 	if (!MLX5_CAP_ESW_EGRESS_ACL(esw->dev, ft_support))
1032 		esw_warn(esw->dev, "E-Switch engress ACL is not supported by FW\n");
1033 
1034 	esw_info(esw->dev, "E-Switch enable SRIOV: nvfs(%d)\n", nvfs);
1035 
1036 	esw_disable_vport(esw, 0);
1037 
1038 	err = esw_create_fdb_table(esw);
1039 	if (err)
1040 		goto abort;
1041 
1042 	for (i = 0; i <= nvfs; i++)
1043 		esw_enable_vport(esw, i, SRIOV_VPORT_EVENTS);
1044 
1045 	esw_info(esw->dev, "SRIOV enabled: active vports(%d)\n",
1046 		 esw->enabled_vports);
1047 	return 0;
1048 
1049 abort:
1050 	esw_enable_vport(esw, 0, UC_ADDR_CHANGE);
1051 	return err;
1052 }
1053 
1054 void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
1055 {
1056 	int i;
1057 
1058 	if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) ||
1059 	    MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1060 		return;
1061 
1062 	esw_info(esw->dev, "disable SRIOV: active vports(%d)\n",
1063 		 esw->enabled_vports);
1064 
1065 	for (i = 0; i < esw->total_vports; i++)
1066 		esw_disable_vport(esw, i);
1067 
1068 	esw_destroy_fdb_table(esw);
1069 
1070 	/* VPORT 0 (PF) must be enabled back with non-sriov configuration */
1071 	esw_enable_vport(esw, 0, UC_ADDR_CHANGE);
1072 }
1073 
1074 int mlx5_eswitch_init(struct mlx5_core_dev *dev, int total_vports)
1075 {
1076 	int l2_table_size = 1 << MLX5_CAP_GEN(dev, log_max_l2_table);
1077 	struct mlx5_eswitch *esw;
1078 	int vport_num;
1079 	int err;
1080 
1081 	if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
1082 	    MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1083 		return 0;
1084 
1085 	esw_info(dev,
1086 		 "Total vports %d, l2 table size(%d), per vport: max uc(%d) max mc(%d)\n",
1087 		 total_vports, l2_table_size,
1088 		 MLX5_MAX_UC_PER_VPORT(dev),
1089 		 MLX5_MAX_MC_PER_VPORT(dev));
1090 
1091 	esw = kzalloc(sizeof(*esw), GFP_KERNEL);
1092 	if (!esw)
1093 		return -ENOMEM;
1094 
1095 	esw->dev = dev;
1096 
1097 	esw->l2_table.bitmap = kcalloc(BITS_TO_LONGS(l2_table_size),
1098 				   sizeof(uintptr_t), GFP_KERNEL);
1099 	if (!esw->l2_table.bitmap) {
1100 		err = -ENOMEM;
1101 		goto abort;
1102 	}
1103 	esw->l2_table.size = l2_table_size;
1104 
1105 	esw->work_queue = create_singlethread_workqueue("mlx5_esw_wq");
1106 	if (!esw->work_queue) {
1107 		err = -ENOMEM;
1108 		goto abort;
1109 	}
1110 
1111 	esw->vports = kcalloc(total_vports, sizeof(struct mlx5_vport),
1112 			      GFP_KERNEL);
1113 	if (!esw->vports) {
1114 		err = -ENOMEM;
1115 		goto abort;
1116 	}
1117 
1118 	for (vport_num = 0; vport_num < total_vports; vport_num++) {
1119 		struct mlx5_vport *vport = &esw->vports[vport_num];
1120 
1121 		vport->vport = vport_num;
1122 		vport->dev = dev;
1123 		INIT_WORK(&vport->vport_change_handler,
1124 			  esw_vport_change_handler);
1125 		spin_lock_init(&vport->lock);
1126 		mutex_init(&vport->state_lock);
1127 	}
1128 
1129 	esw->total_vports = total_vports;
1130 	esw->enabled_vports = 0;
1131 
1132 	dev->priv.eswitch = esw;
1133 	esw_enable_vport(esw, 0, UC_ADDR_CHANGE);
1134 	/* VF Vports will be enabled when SRIOV is enabled */
1135 	return 0;
1136 abort:
1137 	if (esw->work_queue)
1138 		destroy_workqueue(esw->work_queue);
1139 	kfree(esw->l2_table.bitmap);
1140 	kfree(esw->vports);
1141 	kfree(esw);
1142 	return err;
1143 }
1144 
1145 void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
1146 {
1147 	if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) ||
1148 	    MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1149 		return;
1150 
1151 	esw_info(esw->dev, "cleanup\n");
1152 	esw_disable_vport(esw, 0);
1153 
1154 	esw->dev->priv.eswitch = NULL;
1155 	destroy_workqueue(esw->work_queue);
1156 	kfree(esw->l2_table.bitmap);
1157 	kfree(esw->vports);
1158 	kfree(esw);
1159 }
1160 
1161 void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe)
1162 {
1163 	struct mlx5_eqe_vport_change *vc_eqe = &eqe->data.vport_change;
1164 	u16 vport_num = be16_to_cpu(vc_eqe->vport_num);
1165 	struct mlx5_vport *vport;
1166 
1167 	if (!esw) {
1168 		printf("mlx5_core: WARN: ""MLX5 E-Switch: vport %d got an event while eswitch is not initialized\n", vport_num);
1169 		return;
1170 	}
1171 
1172 	vport = &esw->vports[vport_num];
1173 	spin_lock(&vport->lock);
1174 	if (vport->enabled)
1175 		queue_work(esw->work_queue, &vport->vport_change_handler);
1176 	spin_unlock(&vport->lock);
1177 }
1178 
1179 /* Vport Administration */
1180 #define ESW_ALLOWED(esw) \
1181 	(esw && MLX5_CAP_GEN(esw->dev, vport_group_manager) && mlx5_core_is_pf(esw->dev))
1182 #define LEGAL_VPORT(esw, vport) (vport >= 0 && vport < esw->total_vports)
1183 
1184 static void node_guid_gen_from_mac(u64 *node_guid, u8 mac[ETH_ALEN])
1185 {
1186 	((u8 *)node_guid)[7] = mac[0];
1187 	((u8 *)node_guid)[6] = mac[1];
1188 	((u8 *)node_guid)[5] = mac[2];
1189 	((u8 *)node_guid)[4] = 0xff;
1190 	((u8 *)node_guid)[3] = 0xfe;
1191 	((u8 *)node_guid)[2] = mac[3];
1192 	((u8 *)node_guid)[1] = mac[4];
1193 	((u8 *)node_guid)[0] = mac[5];
1194 }
1195 
1196 int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
1197 			       int vport, u8 mac[ETH_ALEN])
1198 {
1199 	int err = 0;
1200 	u64 node_guid;
1201 
1202 	if (!ESW_ALLOWED(esw))
1203 		return -EPERM;
1204 	if (!LEGAL_VPORT(esw, vport))
1205 		return -EINVAL;
1206 
1207 	err = mlx5_modify_nic_vport_mac_address(esw->dev, vport, mac);
1208 	if (err) {
1209 		mlx5_core_warn(esw->dev,
1210 			       "Failed to mlx5_modify_nic_vport_mac vport(%d) err=(%d)\n",
1211 			       vport, err);
1212 		return err;
1213 	}
1214 
1215 	node_guid_gen_from_mac(&node_guid, mac);
1216 	err = mlx5_modify_nic_vport_node_guid(esw->dev, vport, node_guid);
1217 	if (err) {
1218 		mlx5_core_warn(esw->dev,
1219 			       "Failed to mlx5_modify_nic_vport_node_guid vport(%d) err=(%d)\n",
1220 			       vport, err);
1221 		return err;
1222 	}
1223 
1224 	return err;
1225 }
1226 
1227 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
1228 				 int vport, int link_state)
1229 {
1230 	if (!ESW_ALLOWED(esw))
1231 		return -EPERM;
1232 	if (!LEGAL_VPORT(esw, vport))
1233 		return -EINVAL;
1234 
1235 	return mlx5_modify_vport_admin_state(esw->dev,
1236 					     MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
1237 					     vport, link_state);
1238 }
1239 
1240 int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
1241 				  int vport, struct mlx5_esw_vport_info *ivi)
1242 {
1243 	u16 vlan;
1244 	u8 qos;
1245 
1246 	if (!ESW_ALLOWED(esw))
1247 		return -EPERM;
1248 	if (!LEGAL_VPORT(esw, vport))
1249 		return -EINVAL;
1250 
1251 	memset(ivi, 0, sizeof(*ivi));
1252 	ivi->vf = vport - 1;
1253 
1254 	mlx5_query_nic_vport_mac_address(esw->dev, vport, ivi->mac);
1255 	ivi->linkstate = mlx5_query_vport_admin_state(esw->dev,
1256 						      MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
1257 						      vport);
1258 	query_esw_vport_cvlan(esw->dev, vport, &vlan, &qos);
1259 	ivi->vlan = vlan;
1260 	ivi->qos = qos;
1261 	ivi->spoofchk = 0;
1262 
1263 	return 0;
1264 }
1265 
1266 int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
1267 				int vport, u16 vlan, u8 qos)
1268 {
1269 	struct mlx5_vport *evport;
1270 	int err = 0;
1271 	int set = 0;
1272 
1273 	if (!ESW_ALLOWED(esw))
1274 		return -EPERM;
1275 	if (!LEGAL_VPORT(esw, vport) || (vlan > 4095) || (qos > 7))
1276 		return -EINVAL;
1277 
1278 	if (vlan || qos)
1279 		set = 1;
1280 
1281 	evport = &esw->vports[vport];
1282 
1283 	err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set);
1284 	if (err)
1285 		return err;
1286 
1287 	mutex_lock(&evport->state_lock);
1288 	evport->vlan = vlan;
1289 	evport->qos = qos;
1290 	if (evport->enabled) {
1291 		esw_vport_ingress_config(esw, evport);
1292 		esw_vport_egress_config(esw, evport);
1293 	}
1294 	mutex_unlock(&evport->state_lock);
1295 	return err;
1296 }
1297 
1298