xref: /dpdk/drivers/net/sfc/sfc_mae.c (revision 68a03efeed657e6e05f281479b33b51102797e15)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright(c) 2019-2021 Xilinx, Inc.
4  * Copyright(c) 2019 Solarflare Communications Inc.
5  *
6  * This software was jointly developed between OKTET Labs (under contract
7  * for Solarflare) and Solarflare Communications, Inc.
8  */
9 
10 #include <stdbool.h>
11 
12 #include <rte_bitops.h>
13 #include <rte_common.h>
14 #include <rte_vxlan.h>
15 
16 #include "efx.h"
17 
18 #include "sfc.h"
19 #include "sfc_log.h"
20 #include "sfc_switch.h"
21 
22 static int
23 sfc_mae_assign_entity_mport(struct sfc_adapter *sa,
24 			    efx_mport_sel_t *mportp)
25 {
26 	const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
27 
28 	return efx_mae_mport_by_pcie_function(encp->enc_pf, encp->enc_vf,
29 					      mportp);
30 }
31 
32 int
33 sfc_mae_attach(struct sfc_adapter *sa)
34 {
35 	struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
36 	struct sfc_mae_switch_port_request switch_port_request = {0};
37 	const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
38 	efx_mport_sel_t entity_mport;
39 	struct sfc_mae *mae = &sa->mae;
40 	struct sfc_mae_bounce_eh *bounce_eh = &mae->bounce_eh;
41 	efx_mae_limits_t limits;
42 	int rc;
43 
44 	sfc_log_init(sa, "entry");
45 
46 	if (!encp->enc_mae_supported) {
47 		mae->status = SFC_MAE_STATUS_UNSUPPORTED;
48 		return 0;
49 	}
50 
51 	sfc_log_init(sa, "init MAE");
52 	rc = efx_mae_init(sa->nic);
53 	if (rc != 0)
54 		goto fail_mae_init;
55 
56 	sfc_log_init(sa, "get MAE limits");
57 	rc = efx_mae_get_limits(sa->nic, &limits);
58 	if (rc != 0)
59 		goto fail_mae_get_limits;
60 
61 	sfc_log_init(sa, "assign entity MPORT");
62 	rc = sfc_mae_assign_entity_mport(sa, &entity_mport);
63 	if (rc != 0)
64 		goto fail_mae_assign_entity_mport;
65 
66 	sfc_log_init(sa, "assign RTE switch domain");
67 	rc = sfc_mae_assign_switch_domain(sa, &mae->switch_domain_id);
68 	if (rc != 0)
69 		goto fail_mae_assign_switch_domain;
70 
71 	sfc_log_init(sa, "assign RTE switch port");
72 	switch_port_request.type = SFC_MAE_SWITCH_PORT_INDEPENDENT;
73 	switch_port_request.entity_mportp = &entity_mport;
74 	/*
75 	 * As of now, the driver does not support representors, so
76 	 * RTE ethdev MPORT simply matches that of the entity.
77 	 */
78 	switch_port_request.ethdev_mportp = &entity_mport;
79 	switch_port_request.ethdev_port_id = sas->port_id;
80 	rc = sfc_mae_assign_switch_port(mae->switch_domain_id,
81 					&switch_port_request,
82 					&mae->switch_port_id);
83 	if (rc != 0)
84 		goto fail_mae_assign_switch_port;
85 
86 	sfc_log_init(sa, "allocate encap. header bounce buffer");
87 	bounce_eh->buf_size = limits.eml_encap_header_size_limit;
88 	bounce_eh->buf = rte_malloc("sfc_mae_bounce_eh",
89 				    bounce_eh->buf_size, 0);
90 	if (bounce_eh->buf == NULL)
91 		goto fail_mae_alloc_bounce_eh;
92 
93 	mae->status = SFC_MAE_STATUS_SUPPORTED;
94 	mae->nb_outer_rule_prios_max = limits.eml_max_n_outer_prios;
95 	mae->nb_action_rule_prios_max = limits.eml_max_n_action_prios;
96 	mae->encap_types_supported = limits.eml_encap_types_supported;
97 	TAILQ_INIT(&mae->outer_rules);
98 	TAILQ_INIT(&mae->encap_headers);
99 	TAILQ_INIT(&mae->action_sets);
100 
101 	sfc_log_init(sa, "done");
102 
103 	return 0;
104 
105 fail_mae_alloc_bounce_eh:
106 fail_mae_assign_switch_port:
107 fail_mae_assign_switch_domain:
108 fail_mae_assign_entity_mport:
109 fail_mae_get_limits:
110 	efx_mae_fini(sa->nic);
111 
112 fail_mae_init:
113 	sfc_log_init(sa, "failed %d", rc);
114 
115 	return rc;
116 }
117 
118 void
119 sfc_mae_detach(struct sfc_adapter *sa)
120 {
121 	struct sfc_mae *mae = &sa->mae;
122 	enum sfc_mae_status status_prev = mae->status;
123 
124 	sfc_log_init(sa, "entry");
125 
126 	mae->nb_action_rule_prios_max = 0;
127 	mae->status = SFC_MAE_STATUS_UNKNOWN;
128 
129 	if (status_prev != SFC_MAE_STATUS_SUPPORTED)
130 		return;
131 
132 	rte_free(mae->bounce_eh.buf);
133 
134 	efx_mae_fini(sa->nic);
135 
136 	sfc_log_init(sa, "done");
137 }
138 
139 static struct sfc_mae_outer_rule *
140 sfc_mae_outer_rule_attach(struct sfc_adapter *sa,
141 			  const efx_mae_match_spec_t *match_spec,
142 			  efx_tunnel_protocol_t encap_type)
143 {
144 	struct sfc_mae_outer_rule *rule;
145 	struct sfc_mae *mae = &sa->mae;
146 
147 	SFC_ASSERT(sfc_adapter_is_locked(sa));
148 
149 	TAILQ_FOREACH(rule, &mae->outer_rules, entries) {
150 		if (efx_mae_match_specs_equal(rule->match_spec, match_spec) &&
151 		    rule->encap_type == encap_type) {
152 			++(rule->refcnt);
153 			return rule;
154 		}
155 	}
156 
157 	return NULL;
158 }
159 
160 static int
161 sfc_mae_outer_rule_add(struct sfc_adapter *sa,
162 		       efx_mae_match_spec_t *match_spec,
163 		       efx_tunnel_protocol_t encap_type,
164 		       struct sfc_mae_outer_rule **rulep)
165 {
166 	struct sfc_mae_outer_rule *rule;
167 	struct sfc_mae *mae = &sa->mae;
168 
169 	SFC_ASSERT(sfc_adapter_is_locked(sa));
170 
171 	rule = rte_zmalloc("sfc_mae_outer_rule", sizeof(*rule), 0);
172 	if (rule == NULL)
173 		return ENOMEM;
174 
175 	rule->refcnt = 1;
176 	rule->match_spec = match_spec;
177 	rule->encap_type = encap_type;
178 
179 	rule->fw_rsrc.rule_id.id = EFX_MAE_RSRC_ID_INVALID;
180 
181 	TAILQ_INSERT_TAIL(&mae->outer_rules, rule, entries);
182 
183 	*rulep = rule;
184 
185 	return 0;
186 }
187 
188 static void
189 sfc_mae_outer_rule_del(struct sfc_adapter *sa,
190 		       struct sfc_mae_outer_rule *rule)
191 {
192 	struct sfc_mae *mae = &sa->mae;
193 
194 	SFC_ASSERT(sfc_adapter_is_locked(sa));
195 	SFC_ASSERT(rule->refcnt != 0);
196 
197 	--(rule->refcnt);
198 
199 	if (rule->refcnt != 0)
200 		return;
201 
202 	SFC_ASSERT(rule->fw_rsrc.rule_id.id == EFX_MAE_RSRC_ID_INVALID);
203 	SFC_ASSERT(rule->fw_rsrc.refcnt == 0);
204 
205 	efx_mae_match_spec_fini(sa->nic, rule->match_spec);
206 
207 	TAILQ_REMOVE(&mae->outer_rules, rule, entries);
208 	rte_free(rule);
209 }
210 
211 static int
212 sfc_mae_outer_rule_enable(struct sfc_adapter *sa,
213 			  struct sfc_mae_outer_rule *rule,
214 			  efx_mae_match_spec_t *match_spec_action)
215 {
216 	struct sfc_mae_fw_rsrc *fw_rsrc = &rule->fw_rsrc;
217 	int rc;
218 
219 	SFC_ASSERT(sfc_adapter_is_locked(sa));
220 
221 	if (fw_rsrc->refcnt == 0) {
222 		SFC_ASSERT(fw_rsrc->rule_id.id == EFX_MAE_RSRC_ID_INVALID);
223 		SFC_ASSERT(rule->match_spec != NULL);
224 
225 		rc = efx_mae_outer_rule_insert(sa->nic, rule->match_spec,
226 					       rule->encap_type,
227 					       &fw_rsrc->rule_id);
228 		if (rc != 0)
229 			return rc;
230 	}
231 
232 	rc = efx_mae_match_spec_outer_rule_id_set(match_spec_action,
233 						  &fw_rsrc->rule_id);
234 	if (rc != 0) {
235 		if (fw_rsrc->refcnt == 0) {
236 			(void)efx_mae_outer_rule_remove(sa->nic,
237 							&fw_rsrc->rule_id);
238 		}
239 		return rc;
240 	}
241 
242 	++(fw_rsrc->refcnt);
243 
244 	return 0;
245 }
246 
247 static int
248 sfc_mae_outer_rule_disable(struct sfc_adapter *sa,
249 			   struct sfc_mae_outer_rule *rule)
250 {
251 	struct sfc_mae_fw_rsrc *fw_rsrc = &rule->fw_rsrc;
252 	int rc;
253 
254 	SFC_ASSERT(sfc_adapter_is_locked(sa));
255 	SFC_ASSERT(fw_rsrc->rule_id.id != EFX_MAE_RSRC_ID_INVALID);
256 	SFC_ASSERT(fw_rsrc->refcnt != 0);
257 
258 	if (fw_rsrc->refcnt == 1) {
259 		rc = efx_mae_outer_rule_remove(sa->nic, &fw_rsrc->rule_id);
260 		if (rc != 0)
261 			return rc;
262 
263 		fw_rsrc->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
264 	}
265 
266 	--(fw_rsrc->refcnt);
267 
268 	return 0;
269 }
270 
271 static struct sfc_mae_encap_header *
272 sfc_mae_encap_header_attach(struct sfc_adapter *sa,
273 			    const struct sfc_mae_bounce_eh *bounce_eh)
274 {
275 	struct sfc_mae_encap_header *encap_header;
276 	struct sfc_mae *mae = &sa->mae;
277 
278 	SFC_ASSERT(sfc_adapter_is_locked(sa));
279 
280 	TAILQ_FOREACH(encap_header, &mae->encap_headers, entries) {
281 		if (encap_header->size == bounce_eh->size &&
282 		    memcmp(encap_header->buf, bounce_eh->buf,
283 			   bounce_eh->size) == 0) {
284 			++(encap_header->refcnt);
285 			return encap_header;
286 		}
287 	}
288 
289 	return NULL;
290 }
291 
292 static int
293 sfc_mae_encap_header_add(struct sfc_adapter *sa,
294 			 const struct sfc_mae_bounce_eh *bounce_eh,
295 			 struct sfc_mae_encap_header **encap_headerp)
296 {
297 	struct sfc_mae_encap_header *encap_header;
298 	struct sfc_mae *mae = &sa->mae;
299 
300 	SFC_ASSERT(sfc_adapter_is_locked(sa));
301 
302 	encap_header = rte_zmalloc("sfc_mae_encap_header",
303 				   sizeof(*encap_header), 0);
304 	if (encap_header == NULL)
305 		return ENOMEM;
306 
307 	encap_header->size = bounce_eh->size;
308 
309 	encap_header->buf = rte_malloc("sfc_mae_encap_header_buf",
310 				       encap_header->size, 0);
311 	if (encap_header->buf == NULL) {
312 		rte_free(encap_header);
313 		return ENOMEM;
314 	}
315 
316 	rte_memcpy(encap_header->buf, bounce_eh->buf, bounce_eh->size);
317 
318 	encap_header->refcnt = 1;
319 	encap_header->type = bounce_eh->type;
320 	encap_header->fw_rsrc.eh_id.id = EFX_MAE_RSRC_ID_INVALID;
321 
322 	TAILQ_INSERT_TAIL(&mae->encap_headers, encap_header, entries);
323 
324 	*encap_headerp = encap_header;
325 
326 	return 0;
327 }
328 
329 static void
330 sfc_mae_encap_header_del(struct sfc_adapter *sa,
331 		       struct sfc_mae_encap_header *encap_header)
332 {
333 	struct sfc_mae *mae = &sa->mae;
334 
335 	if (encap_header == NULL)
336 		return;
337 
338 	SFC_ASSERT(sfc_adapter_is_locked(sa));
339 	SFC_ASSERT(encap_header->refcnt != 0);
340 
341 	--(encap_header->refcnt);
342 
343 	if (encap_header->refcnt != 0)
344 		return;
345 
346 	SFC_ASSERT(encap_header->fw_rsrc.eh_id.id == EFX_MAE_RSRC_ID_INVALID);
347 	SFC_ASSERT(encap_header->fw_rsrc.refcnt == 0);
348 
349 	TAILQ_REMOVE(&mae->encap_headers, encap_header, entries);
350 	rte_free(encap_header->buf);
351 	rte_free(encap_header);
352 }
353 
354 static int
355 sfc_mae_encap_header_enable(struct sfc_adapter *sa,
356 			    struct sfc_mae_encap_header *encap_header,
357 			    efx_mae_actions_t *action_set_spec)
358 {
359 	struct sfc_mae_fw_rsrc *fw_rsrc;
360 	int rc;
361 
362 	if (encap_header == NULL)
363 		return 0;
364 
365 	SFC_ASSERT(sfc_adapter_is_locked(sa));
366 
367 	fw_rsrc = &encap_header->fw_rsrc;
368 
369 	if (fw_rsrc->refcnt == 0) {
370 		SFC_ASSERT(fw_rsrc->eh_id.id == EFX_MAE_RSRC_ID_INVALID);
371 		SFC_ASSERT(encap_header->buf != NULL);
372 		SFC_ASSERT(encap_header->size != 0);
373 
374 		rc = efx_mae_encap_header_alloc(sa->nic, encap_header->type,
375 						encap_header->buf,
376 						encap_header->size,
377 						&fw_rsrc->eh_id);
378 		if (rc != 0)
379 			return rc;
380 	}
381 
382 	rc = efx_mae_action_set_fill_in_eh_id(action_set_spec,
383 					      &fw_rsrc->eh_id);
384 	if (rc != 0) {
385 		if (fw_rsrc->refcnt == 0) {
386 			(void)efx_mae_encap_header_free(sa->nic,
387 							&fw_rsrc->eh_id);
388 		}
389 		return rc;
390 	}
391 
392 	++(fw_rsrc->refcnt);
393 
394 	return 0;
395 }
396 
397 static int
398 sfc_mae_encap_header_disable(struct sfc_adapter *sa,
399 			     struct sfc_mae_encap_header *encap_header)
400 {
401 	struct sfc_mae_fw_rsrc *fw_rsrc;
402 	int rc;
403 
404 	if (encap_header == NULL)
405 		return 0;
406 
407 	SFC_ASSERT(sfc_adapter_is_locked(sa));
408 
409 	fw_rsrc = &encap_header->fw_rsrc;
410 
411 	SFC_ASSERT(fw_rsrc->eh_id.id != EFX_MAE_RSRC_ID_INVALID);
412 	SFC_ASSERT(fw_rsrc->refcnt != 0);
413 
414 	if (fw_rsrc->refcnt == 1) {
415 		rc = efx_mae_encap_header_free(sa->nic, &fw_rsrc->eh_id);
416 		if (rc != 0)
417 			return rc;
418 
419 		fw_rsrc->eh_id.id = EFX_MAE_RSRC_ID_INVALID;
420 	}
421 
422 	--(fw_rsrc->refcnt);
423 
424 	return 0;
425 }
426 
427 static struct sfc_mae_action_set *
428 sfc_mae_action_set_attach(struct sfc_adapter *sa,
429 			  const struct sfc_mae_encap_header *encap_header,
430 			  const efx_mae_actions_t *spec)
431 {
432 	struct sfc_mae_action_set *action_set;
433 	struct sfc_mae *mae = &sa->mae;
434 
435 	SFC_ASSERT(sfc_adapter_is_locked(sa));
436 
437 	TAILQ_FOREACH(action_set, &mae->action_sets, entries) {
438 		if (action_set->encap_header == encap_header &&
439 		    efx_mae_action_set_specs_equal(action_set->spec, spec)) {
440 			++(action_set->refcnt);
441 			return action_set;
442 		}
443 	}
444 
445 	return NULL;
446 }
447 
448 static int
449 sfc_mae_action_set_add(struct sfc_adapter *sa,
450 		       efx_mae_actions_t *spec,
451 		       struct sfc_mae_encap_header *encap_header,
452 		       struct sfc_mae_action_set **action_setp)
453 {
454 	struct sfc_mae_action_set *action_set;
455 	struct sfc_mae *mae = &sa->mae;
456 
457 	SFC_ASSERT(sfc_adapter_is_locked(sa));
458 
459 	action_set = rte_zmalloc("sfc_mae_action_set", sizeof(*action_set), 0);
460 	if (action_set == NULL)
461 		return ENOMEM;
462 
463 	action_set->refcnt = 1;
464 	action_set->spec = spec;
465 	action_set->encap_header = encap_header;
466 
467 	action_set->fw_rsrc.aset_id.id = EFX_MAE_RSRC_ID_INVALID;
468 
469 	TAILQ_INSERT_TAIL(&mae->action_sets, action_set, entries);
470 
471 	*action_setp = action_set;
472 
473 	return 0;
474 }
475 
476 static void
477 sfc_mae_action_set_del(struct sfc_adapter *sa,
478 		       struct sfc_mae_action_set *action_set)
479 {
480 	struct sfc_mae *mae = &sa->mae;
481 
482 	SFC_ASSERT(sfc_adapter_is_locked(sa));
483 	SFC_ASSERT(action_set->refcnt != 0);
484 
485 	--(action_set->refcnt);
486 
487 	if (action_set->refcnt != 0)
488 		return;
489 
490 	SFC_ASSERT(action_set->fw_rsrc.aset_id.id == EFX_MAE_RSRC_ID_INVALID);
491 	SFC_ASSERT(action_set->fw_rsrc.refcnt == 0);
492 
493 	efx_mae_action_set_spec_fini(sa->nic, action_set->spec);
494 	sfc_mae_encap_header_del(sa, action_set->encap_header);
495 	TAILQ_REMOVE(&mae->action_sets, action_set, entries);
496 	rte_free(action_set);
497 }
498 
499 static int
500 sfc_mae_action_set_enable(struct sfc_adapter *sa,
501 			  struct sfc_mae_action_set *action_set)
502 {
503 	struct sfc_mae_encap_header *encap_header = action_set->encap_header;
504 	struct sfc_mae_fw_rsrc *fw_rsrc = &action_set->fw_rsrc;
505 	int rc;
506 
507 	SFC_ASSERT(sfc_adapter_is_locked(sa));
508 
509 	if (fw_rsrc->refcnt == 0) {
510 		SFC_ASSERT(fw_rsrc->aset_id.id == EFX_MAE_RSRC_ID_INVALID);
511 		SFC_ASSERT(action_set->spec != NULL);
512 
513 		rc = sfc_mae_encap_header_enable(sa, encap_header,
514 						 action_set->spec);
515 		if (rc != 0)
516 			return rc;
517 
518 		rc = efx_mae_action_set_alloc(sa->nic, action_set->spec,
519 					      &fw_rsrc->aset_id);
520 		if (rc != 0) {
521 			(void)sfc_mae_encap_header_disable(sa, encap_header);
522 
523 			return rc;
524 		}
525 	}
526 
527 	++(fw_rsrc->refcnt);
528 
529 	return 0;
530 }
531 
532 static int
533 sfc_mae_action_set_disable(struct sfc_adapter *sa,
534 			   struct sfc_mae_action_set *action_set)
535 {
536 	struct sfc_mae_fw_rsrc *fw_rsrc = &action_set->fw_rsrc;
537 	int rc;
538 
539 	SFC_ASSERT(sfc_adapter_is_locked(sa));
540 	SFC_ASSERT(fw_rsrc->aset_id.id != EFX_MAE_RSRC_ID_INVALID);
541 	SFC_ASSERT(fw_rsrc->refcnt != 0);
542 
543 	if (fw_rsrc->refcnt == 1) {
544 		rc = efx_mae_action_set_free(sa->nic, &fw_rsrc->aset_id);
545 		if (rc != 0)
546 			return rc;
547 
548 		fw_rsrc->aset_id.id = EFX_MAE_RSRC_ID_INVALID;
549 
550 		rc = sfc_mae_encap_header_disable(sa, action_set->encap_header);
551 		if (rc != 0)
552 			return rc;
553 	}
554 
555 	--(fw_rsrc->refcnt);
556 
557 	return 0;
558 }
559 
560 void
561 sfc_mae_flow_cleanup(struct sfc_adapter *sa,
562 		     struct rte_flow *flow)
563 {
564 	struct sfc_flow_spec *spec;
565 	struct sfc_flow_spec_mae *spec_mae;
566 
567 	if (flow == NULL)
568 		return;
569 
570 	spec = &flow->spec;
571 
572 	if (spec == NULL)
573 		return;
574 
575 	spec_mae = &spec->mae;
576 
577 	SFC_ASSERT(spec_mae->rule_id.id == EFX_MAE_RSRC_ID_INVALID);
578 
579 	if (spec_mae->outer_rule != NULL)
580 		sfc_mae_outer_rule_del(sa, spec_mae->outer_rule);
581 
582 	if (spec_mae->action_set != NULL)
583 		sfc_mae_action_set_del(sa, spec_mae->action_set);
584 
585 	if (spec_mae->match_spec != NULL)
586 		efx_mae_match_spec_fini(sa->nic, spec_mae->match_spec);
587 }
588 
589 static int
590 sfc_mae_set_ethertypes(struct sfc_mae_parse_ctx *ctx)
591 {
592 	struct sfc_mae_pattern_data *pdata = &ctx->pattern_data;
593 	const efx_mae_field_id_t *fremap = ctx->field_ids_remap;
594 	const efx_mae_field_id_t field_ids[] = {
595 		EFX_MAE_FIELD_VLAN0_PROTO_BE,
596 		EFX_MAE_FIELD_VLAN1_PROTO_BE,
597 	};
598 	const struct sfc_mae_ethertype *et;
599 	unsigned int i;
600 	int rc;
601 
602 	/*
603 	 * In accordance with RTE flow API convention, the innermost L2
604 	 * item's "type" ("inner_type") is a L3 EtherType. If there is
605 	 * no L3 item, it's 0x0000/0x0000.
606 	 */
607 	et = &pdata->ethertypes[pdata->nb_vlan_tags];
608 	rc = efx_mae_match_spec_field_set(ctx->match_spec,
609 					  fremap[EFX_MAE_FIELD_ETHER_TYPE_BE],
610 					  sizeof(et->value),
611 					  (const uint8_t *)&et->value,
612 					  sizeof(et->mask),
613 					  (const uint8_t *)&et->mask);
614 	if (rc != 0)
615 		return rc;
616 
617 	/*
618 	 * sfc_mae_rule_parse_item_vlan() has already made sure
619 	 * that pdata->nb_vlan_tags does not exceed this figure.
620 	 */
621 	RTE_BUILD_BUG_ON(SFC_MAE_MATCH_VLAN_MAX_NTAGS != 2);
622 
623 	for (i = 0; i < pdata->nb_vlan_tags; ++i) {
624 		et = &pdata->ethertypes[i];
625 
626 		rc = efx_mae_match_spec_field_set(ctx->match_spec,
627 						  fremap[field_ids[i]],
628 						  sizeof(et->value),
629 						  (const uint8_t *)&et->value,
630 						  sizeof(et->mask),
631 						  (const uint8_t *)&et->mask);
632 		if (rc != 0)
633 			return rc;
634 	}
635 
636 	return 0;
637 }
638 
639 static int
640 sfc_mae_rule_process_pattern_data(struct sfc_mae_parse_ctx *ctx,
641 				  struct rte_flow_error *error)
642 {
643 	const efx_mae_field_id_t *fremap = ctx->field_ids_remap;
644 	struct sfc_mae_pattern_data *pdata = &ctx->pattern_data;
645 	struct sfc_mae_ethertype *ethertypes = pdata->ethertypes;
646 	const rte_be16_t supported_tpids[] = {
647 		/* VLAN standard TPID (always the first element) */
648 		RTE_BE16(RTE_ETHER_TYPE_VLAN),
649 
650 		/* Double-tagging TPIDs */
651 		RTE_BE16(RTE_ETHER_TYPE_QINQ),
652 		RTE_BE16(RTE_ETHER_TYPE_QINQ1),
653 		RTE_BE16(RTE_ETHER_TYPE_QINQ2),
654 		RTE_BE16(RTE_ETHER_TYPE_QINQ3),
655 	};
656 	unsigned int nb_supported_tpids = RTE_DIM(supported_tpids);
657 	unsigned int ethertype_idx;
658 	const uint8_t *valuep;
659 	const uint8_t *maskp;
660 	int rc;
661 
662 	if (pdata->innermost_ethertype_restriction.mask != 0 &&
663 	    pdata->nb_vlan_tags < SFC_MAE_MATCH_VLAN_MAX_NTAGS) {
664 		/*
665 		 * If a single item VLAN is followed by a L3 item, value
666 		 * of "type" in item ETH can't be a double-tagging TPID.
667 		 */
668 		nb_supported_tpids = 1;
669 	}
670 
671 	/*
672 	 * sfc_mae_rule_parse_item_vlan() has already made sure
673 	 * that pdata->nb_vlan_tags does not exceed this figure.
674 	 */
675 	RTE_BUILD_BUG_ON(SFC_MAE_MATCH_VLAN_MAX_NTAGS != 2);
676 
677 	for (ethertype_idx = 0;
678 	     ethertype_idx < pdata->nb_vlan_tags; ++ethertype_idx) {
679 		unsigned int tpid_idx;
680 
681 		/* Exact match is supported only. */
682 		if (ethertypes[ethertype_idx].mask != RTE_BE16(0xffff)) {
683 			rc = EINVAL;
684 			goto fail;
685 		}
686 
687 		for (tpid_idx = pdata->nb_vlan_tags - ethertype_idx - 1;
688 		     tpid_idx < nb_supported_tpids; ++tpid_idx) {
689 			if (ethertypes[ethertype_idx].value ==
690 			    supported_tpids[tpid_idx])
691 				break;
692 		}
693 
694 		if (tpid_idx == nb_supported_tpids) {
695 			rc = EINVAL;
696 			goto fail;
697 		}
698 
699 		nb_supported_tpids = 1;
700 	}
701 
702 	if (pdata->innermost_ethertype_restriction.mask == RTE_BE16(0xffff)) {
703 		struct sfc_mae_ethertype *et = &ethertypes[ethertype_idx];
704 
705 		if (et->mask == 0) {
706 			et->mask = RTE_BE16(0xffff);
707 			et->value =
708 			    pdata->innermost_ethertype_restriction.value;
709 		} else if (et->mask != RTE_BE16(0xffff) ||
710 			   et->value !=
711 			   pdata->innermost_ethertype_restriction.value) {
712 			rc = EINVAL;
713 			goto fail;
714 		}
715 	}
716 
717 	/*
718 	 * Now, when the number of VLAN tags is known, set fields
719 	 * ETHER_TYPE, VLAN0_PROTO and VLAN1_PROTO so that the first
720 	 * one is either a valid L3 EtherType (or 0x0000/0x0000),
721 	 * and the last two are valid TPIDs (or 0x0000/0x0000).
722 	 */
723 	rc = sfc_mae_set_ethertypes(ctx);
724 	if (rc != 0)
725 		goto fail;
726 
727 	if (pdata->l3_next_proto_restriction_mask == 0xff) {
728 		if (pdata->l3_next_proto_mask == 0) {
729 			pdata->l3_next_proto_mask = 0xff;
730 			pdata->l3_next_proto_value =
731 			    pdata->l3_next_proto_restriction_value;
732 		} else if (pdata->l3_next_proto_mask != 0xff ||
733 			   pdata->l3_next_proto_value !=
734 			   pdata->l3_next_proto_restriction_value) {
735 			rc = EINVAL;
736 			goto fail;
737 		}
738 	}
739 
740 	valuep = (const uint8_t *)&pdata->l3_next_proto_value;
741 	maskp = (const uint8_t *)&pdata->l3_next_proto_mask;
742 	rc = efx_mae_match_spec_field_set(ctx->match_spec,
743 					  fremap[EFX_MAE_FIELD_IP_PROTO],
744 					  sizeof(pdata->l3_next_proto_value),
745 					  valuep,
746 					  sizeof(pdata->l3_next_proto_mask),
747 					  maskp);
748 	if (rc != 0)
749 		goto fail;
750 
751 	return 0;
752 
753 fail:
754 	return rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
755 				  "Failed to process pattern data");
756 }
757 
758 static int
759 sfc_mae_rule_parse_item_port_id(const struct rte_flow_item *item,
760 				struct sfc_flow_parse_ctx *ctx,
761 				struct rte_flow_error *error)
762 {
763 	struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
764 	const struct rte_flow_item_port_id supp_mask = {
765 		.id = 0xffffffff,
766 	};
767 	const void *def_mask = &rte_flow_item_port_id_mask;
768 	const struct rte_flow_item_port_id *spec = NULL;
769 	const struct rte_flow_item_port_id *mask = NULL;
770 	efx_mport_sel_t mport_sel;
771 	int rc;
772 
773 	if (ctx_mae->match_mport_set) {
774 		return rte_flow_error_set(error, ENOTSUP,
775 				RTE_FLOW_ERROR_TYPE_ITEM, item,
776 				"Can't handle multiple traffic source items");
777 	}
778 
779 	rc = sfc_flow_parse_init(item,
780 				 (const void **)&spec, (const void **)&mask,
781 				 (const void *)&supp_mask, def_mask,
782 				 sizeof(struct rte_flow_item_port_id), error);
783 	if (rc != 0)
784 		return rc;
785 
786 	if (mask->id != supp_mask.id) {
787 		return rte_flow_error_set(error, EINVAL,
788 				RTE_FLOW_ERROR_TYPE_ITEM, item,
789 				"Bad mask in the PORT_ID pattern item");
790 	}
791 
792 	/* If "spec" is not set, could be any port ID */
793 	if (spec == NULL)
794 		return 0;
795 
796 	if (spec->id > UINT16_MAX) {
797 		return rte_flow_error_set(error, EOVERFLOW,
798 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
799 					  "The port ID is too large");
800 	}
801 
802 	rc = sfc_mae_switch_port_by_ethdev(ctx_mae->sa->mae.switch_domain_id,
803 					   spec->id, &mport_sel);
804 	if (rc != 0) {
805 		return rte_flow_error_set(error, rc,
806 				RTE_FLOW_ERROR_TYPE_ITEM, item,
807 				"Can't find RTE ethdev by the port ID");
808 	}
809 
810 	rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec,
811 					  &mport_sel, NULL);
812 	if (rc != 0) {
813 		return rte_flow_error_set(error, rc,
814 				RTE_FLOW_ERROR_TYPE_ITEM, item,
815 				"Failed to set MPORT for the port ID");
816 	}
817 
818 	ctx_mae->match_mport_set = B_TRUE;
819 
820 	return 0;
821 }
822 
823 static int
824 sfc_mae_rule_parse_item_phy_port(const struct rte_flow_item *item,
825 				 struct sfc_flow_parse_ctx *ctx,
826 				 struct rte_flow_error *error)
827 {
828 	struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
829 	const struct rte_flow_item_phy_port supp_mask = {
830 		.index = 0xffffffff,
831 	};
832 	const void *def_mask = &rte_flow_item_phy_port_mask;
833 	const struct rte_flow_item_phy_port *spec = NULL;
834 	const struct rte_flow_item_phy_port *mask = NULL;
835 	efx_mport_sel_t mport_v;
836 	int rc;
837 
838 	if (ctx_mae->match_mport_set) {
839 		return rte_flow_error_set(error, ENOTSUP,
840 				RTE_FLOW_ERROR_TYPE_ITEM, item,
841 				"Can't handle multiple traffic source items");
842 	}
843 
844 	rc = sfc_flow_parse_init(item,
845 				 (const void **)&spec, (const void **)&mask,
846 				 (const void *)&supp_mask, def_mask,
847 				 sizeof(struct rte_flow_item_phy_port), error);
848 	if (rc != 0)
849 		return rc;
850 
851 	if (mask->index != supp_mask.index) {
852 		return rte_flow_error_set(error, EINVAL,
853 				RTE_FLOW_ERROR_TYPE_ITEM, item,
854 				"Bad mask in the PHY_PORT pattern item");
855 	}
856 
857 	/* If "spec" is not set, could be any physical port */
858 	if (spec == NULL)
859 		return 0;
860 
861 	rc = efx_mae_mport_by_phy_port(spec->index, &mport_v);
862 	if (rc != 0) {
863 		return rte_flow_error_set(error, rc,
864 				RTE_FLOW_ERROR_TYPE_ITEM, item,
865 				"Failed to convert the PHY_PORT index");
866 	}
867 
868 	rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec, &mport_v, NULL);
869 	if (rc != 0) {
870 		return rte_flow_error_set(error, rc,
871 				RTE_FLOW_ERROR_TYPE_ITEM, item,
872 				"Failed to set MPORT for the PHY_PORT");
873 	}
874 
875 	ctx_mae->match_mport_set = B_TRUE;
876 
877 	return 0;
878 }
879 
880 static int
881 sfc_mae_rule_parse_item_pf(const struct rte_flow_item *item,
882 			   struct sfc_flow_parse_ctx *ctx,
883 			   struct rte_flow_error *error)
884 {
885 	struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
886 	const efx_nic_cfg_t *encp = efx_nic_cfg_get(ctx_mae->sa->nic);
887 	efx_mport_sel_t mport_v;
888 	int rc;
889 
890 	if (ctx_mae->match_mport_set) {
891 		return rte_flow_error_set(error, ENOTSUP,
892 				RTE_FLOW_ERROR_TYPE_ITEM, item,
893 				"Can't handle multiple traffic source items");
894 	}
895 
896 	rc = efx_mae_mport_by_pcie_function(encp->enc_pf, EFX_PCI_VF_INVALID,
897 					    &mport_v);
898 	if (rc != 0) {
899 		return rte_flow_error_set(error, rc,
900 				RTE_FLOW_ERROR_TYPE_ITEM, item,
901 				"Failed to convert the PF ID");
902 	}
903 
904 	rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec, &mport_v, NULL);
905 	if (rc != 0) {
906 		return rte_flow_error_set(error, rc,
907 				RTE_FLOW_ERROR_TYPE_ITEM, item,
908 				"Failed to set MPORT for the PF");
909 	}
910 
911 	ctx_mae->match_mport_set = B_TRUE;
912 
913 	return 0;
914 }
915 
916 static int
917 sfc_mae_rule_parse_item_vf(const struct rte_flow_item *item,
918 			   struct sfc_flow_parse_ctx *ctx,
919 			   struct rte_flow_error *error)
920 {
921 	struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
922 	const efx_nic_cfg_t *encp = efx_nic_cfg_get(ctx_mae->sa->nic);
923 	const struct rte_flow_item_vf supp_mask = {
924 		.id = 0xffffffff,
925 	};
926 	const void *def_mask = &rte_flow_item_vf_mask;
927 	const struct rte_flow_item_vf *spec = NULL;
928 	const struct rte_flow_item_vf *mask = NULL;
929 	efx_mport_sel_t mport_v;
930 	int rc;
931 
932 	if (ctx_mae->match_mport_set) {
933 		return rte_flow_error_set(error, ENOTSUP,
934 				RTE_FLOW_ERROR_TYPE_ITEM, item,
935 				"Can't handle multiple traffic source items");
936 	}
937 
938 	rc = sfc_flow_parse_init(item,
939 				 (const void **)&spec, (const void **)&mask,
940 				 (const void *)&supp_mask, def_mask,
941 				 sizeof(struct rte_flow_item_vf), error);
942 	if (rc != 0)
943 		return rc;
944 
945 	if (mask->id != supp_mask.id) {
946 		return rte_flow_error_set(error, EINVAL,
947 				RTE_FLOW_ERROR_TYPE_ITEM, item,
948 				"Bad mask in the VF pattern item");
949 	}
950 
951 	/*
952 	 * If "spec" is not set, the item requests any VF related to the
953 	 * PF of the current DPDK port (but not the PF itself).
954 	 * Reject this match criterion as unsupported.
955 	 */
956 	if (spec == NULL) {
957 		return rte_flow_error_set(error, EINVAL,
958 				RTE_FLOW_ERROR_TYPE_ITEM, item,
959 				"Bad spec in the VF pattern item");
960 	}
961 
962 	rc = efx_mae_mport_by_pcie_function(encp->enc_pf, spec->id, &mport_v);
963 	if (rc != 0) {
964 		return rte_flow_error_set(error, rc,
965 				RTE_FLOW_ERROR_TYPE_ITEM, item,
966 				"Failed to convert the PF + VF IDs");
967 	}
968 
969 	rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec, &mport_v, NULL);
970 	if (rc != 0) {
971 		return rte_flow_error_set(error, rc,
972 				RTE_FLOW_ERROR_TYPE_ITEM, item,
973 				"Failed to set MPORT for the PF + VF");
974 	}
975 
976 	ctx_mae->match_mport_set = B_TRUE;
977 
978 	return 0;
979 }
980 
981 /*
982  * Having this field ID in a field locator means that this
983  * locator cannot be used to actually set the field at the
984  * time when the corresponding item gets encountered. Such
985  * fields get stashed in the parsing context instead. This
986  * is required to resolve dependencies between the stashed
987  * fields. See sfc_mae_rule_process_pattern_data().
988  */
989 #define SFC_MAE_FIELD_HANDLING_DEFERRED	EFX_MAE_FIELD_NIDS
990 
991 struct sfc_mae_field_locator {
992 	efx_mae_field_id_t		field_id;
993 	size_t				size;
994 	/* Field offset in the corresponding rte_flow_item_ struct */
995 	size_t				ofst;
996 };
997 
998 static void
999 sfc_mae_item_build_supp_mask(const struct sfc_mae_field_locator *field_locators,
1000 			     unsigned int nb_field_locators, void *mask_ptr,
1001 			     size_t mask_size)
1002 {
1003 	unsigned int i;
1004 
1005 	memset(mask_ptr, 0, mask_size);
1006 
1007 	for (i = 0; i < nb_field_locators; ++i) {
1008 		const struct sfc_mae_field_locator *fl = &field_locators[i];
1009 
1010 		SFC_ASSERT(fl->ofst + fl->size <= mask_size);
1011 		memset(RTE_PTR_ADD(mask_ptr, fl->ofst), 0xff, fl->size);
1012 	}
1013 }
1014 
1015 static int
1016 sfc_mae_parse_item(const struct sfc_mae_field_locator *field_locators,
1017 		   unsigned int nb_field_locators, const uint8_t *spec,
1018 		   const uint8_t *mask, struct sfc_mae_parse_ctx *ctx,
1019 		   struct rte_flow_error *error)
1020 {
1021 	const efx_mae_field_id_t *fremap = ctx->field_ids_remap;
1022 	unsigned int i;
1023 	int rc = 0;
1024 
1025 	for (i = 0; i < nb_field_locators; ++i) {
1026 		const struct sfc_mae_field_locator *fl = &field_locators[i];
1027 
1028 		if (fl->field_id == SFC_MAE_FIELD_HANDLING_DEFERRED)
1029 			continue;
1030 
1031 		rc = efx_mae_match_spec_field_set(ctx->match_spec,
1032 						  fremap[fl->field_id],
1033 						  fl->size, spec + fl->ofst,
1034 						  fl->size, mask + fl->ofst);
1035 		if (rc != 0)
1036 			break;
1037 	}
1038 
1039 	if (rc != 0) {
1040 		rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM,
1041 				NULL, "Failed to process item fields");
1042 	}
1043 
1044 	return rc;
1045 }
1046 
1047 static const struct sfc_mae_field_locator flocs_eth[] = {
1048 	{
1049 		/*
1050 		 * This locator is used only for building supported fields mask.
1051 		 * The field is handled by sfc_mae_rule_process_pattern_data().
1052 		 */
1053 		SFC_MAE_FIELD_HANDLING_DEFERRED,
1054 		RTE_SIZEOF_FIELD(struct rte_flow_item_eth, type),
1055 		offsetof(struct rte_flow_item_eth, type),
1056 	},
1057 	{
1058 		EFX_MAE_FIELD_ETH_DADDR_BE,
1059 		RTE_SIZEOF_FIELD(struct rte_flow_item_eth, dst),
1060 		offsetof(struct rte_flow_item_eth, dst),
1061 	},
1062 	{
1063 		EFX_MAE_FIELD_ETH_SADDR_BE,
1064 		RTE_SIZEOF_FIELD(struct rte_flow_item_eth, src),
1065 		offsetof(struct rte_flow_item_eth, src),
1066 	},
1067 };
1068 
1069 static int
1070 sfc_mae_rule_parse_item_eth(const struct rte_flow_item *item,
1071 			    struct sfc_flow_parse_ctx *ctx,
1072 			    struct rte_flow_error *error)
1073 {
1074 	struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1075 	struct rte_flow_item_eth supp_mask;
1076 	const uint8_t *spec = NULL;
1077 	const uint8_t *mask = NULL;
1078 	int rc;
1079 
1080 	sfc_mae_item_build_supp_mask(flocs_eth, RTE_DIM(flocs_eth),
1081 				     &supp_mask, sizeof(supp_mask));
1082 
1083 	rc = sfc_flow_parse_init(item,
1084 				 (const void **)&spec, (const void **)&mask,
1085 				 (const void *)&supp_mask,
1086 				 &rte_flow_item_eth_mask,
1087 				 sizeof(struct rte_flow_item_eth), error);
1088 	if (rc != 0)
1089 		return rc;
1090 
1091 	if (spec != NULL) {
1092 		struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1093 		struct sfc_mae_ethertype *ethertypes = pdata->ethertypes;
1094 		const struct rte_flow_item_eth *item_spec;
1095 		const struct rte_flow_item_eth *item_mask;
1096 
1097 		item_spec = (const struct rte_flow_item_eth *)spec;
1098 		item_mask = (const struct rte_flow_item_eth *)mask;
1099 
1100 		ethertypes[0].value = item_spec->type;
1101 		ethertypes[0].mask = item_mask->type;
1102 	} else {
1103 		/*
1104 		 * The specification is empty. This is wrong in the case
1105 		 * when there are more network patterns in line. Other
1106 		 * than that, any Ethernet can match. All of that is
1107 		 * checked at the end of parsing.
1108 		 */
1109 		return 0;
1110 	}
1111 
1112 	return sfc_mae_parse_item(flocs_eth, RTE_DIM(flocs_eth), spec, mask,
1113 				  ctx_mae, error);
1114 }
1115 
1116 static const struct sfc_mae_field_locator flocs_vlan[] = {
1117 	/* Outermost tag */
1118 	{
1119 		EFX_MAE_FIELD_VLAN0_TCI_BE,
1120 		RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, tci),
1121 		offsetof(struct rte_flow_item_vlan, tci),
1122 	},
1123 	{
1124 		/*
1125 		 * This locator is used only for building supported fields mask.
1126 		 * The field is handled by sfc_mae_rule_process_pattern_data().
1127 		 */
1128 		SFC_MAE_FIELD_HANDLING_DEFERRED,
1129 		RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, inner_type),
1130 		offsetof(struct rte_flow_item_vlan, inner_type),
1131 	},
1132 
1133 	/* Innermost tag */
1134 	{
1135 		EFX_MAE_FIELD_VLAN1_TCI_BE,
1136 		RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, tci),
1137 		offsetof(struct rte_flow_item_vlan, tci),
1138 	},
1139 	{
1140 		/*
1141 		 * This locator is used only for building supported fields mask.
1142 		 * The field is handled by sfc_mae_rule_process_pattern_data().
1143 		 */
1144 		SFC_MAE_FIELD_HANDLING_DEFERRED,
1145 		RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, inner_type),
1146 		offsetof(struct rte_flow_item_vlan, inner_type),
1147 	},
1148 };
1149 
1150 static int
1151 sfc_mae_rule_parse_item_vlan(const struct rte_flow_item *item,
1152 			     struct sfc_flow_parse_ctx *ctx,
1153 			     struct rte_flow_error *error)
1154 {
1155 	struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1156 	struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1157 	const struct sfc_mae_field_locator *flocs;
1158 	struct rte_flow_item_vlan supp_mask;
1159 	const uint8_t *spec = NULL;
1160 	const uint8_t *mask = NULL;
1161 	unsigned int nb_flocs;
1162 	int rc;
1163 
1164 	RTE_BUILD_BUG_ON(SFC_MAE_MATCH_VLAN_MAX_NTAGS != 2);
1165 
1166 	if (pdata->nb_vlan_tags == SFC_MAE_MATCH_VLAN_MAX_NTAGS) {
1167 		return rte_flow_error_set(error, ENOTSUP,
1168 				RTE_FLOW_ERROR_TYPE_ITEM, item,
1169 				"Can't match that many VLAN tags");
1170 	}
1171 
1172 	nb_flocs = RTE_DIM(flocs_vlan) / SFC_MAE_MATCH_VLAN_MAX_NTAGS;
1173 	flocs = flocs_vlan + pdata->nb_vlan_tags * nb_flocs;
1174 
1175 	/* If parsing fails, this can remain incremented. */
1176 	++pdata->nb_vlan_tags;
1177 
1178 	sfc_mae_item_build_supp_mask(flocs, nb_flocs,
1179 				     &supp_mask, sizeof(supp_mask));
1180 
1181 	rc = sfc_flow_parse_init(item,
1182 				 (const void **)&spec, (const void **)&mask,
1183 				 (const void *)&supp_mask,
1184 				 &rte_flow_item_vlan_mask,
1185 				 sizeof(struct rte_flow_item_vlan), error);
1186 	if (rc != 0)
1187 		return rc;
1188 
1189 	if (spec != NULL) {
1190 		struct sfc_mae_ethertype *ethertypes = pdata->ethertypes;
1191 		const struct rte_flow_item_vlan *item_spec;
1192 		const struct rte_flow_item_vlan *item_mask;
1193 
1194 		item_spec = (const struct rte_flow_item_vlan *)spec;
1195 		item_mask = (const struct rte_flow_item_vlan *)mask;
1196 
1197 		ethertypes[pdata->nb_vlan_tags].value = item_spec->inner_type;
1198 		ethertypes[pdata->nb_vlan_tags].mask = item_mask->inner_type;
1199 	} else {
1200 		/*
1201 		 * The specification is empty. This is wrong in the case
1202 		 * when there are more network patterns in line. Other
1203 		 * than that, any Ethernet can match. All of that is
1204 		 * checked at the end of parsing.
1205 		 */
1206 		return 0;
1207 	}
1208 
1209 	return sfc_mae_parse_item(flocs, nb_flocs, spec, mask, ctx_mae, error);
1210 }
1211 
1212 static const struct sfc_mae_field_locator flocs_ipv4[] = {
1213 	{
1214 		EFX_MAE_FIELD_SRC_IP4_BE,
1215 		RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.src_addr),
1216 		offsetof(struct rte_flow_item_ipv4, hdr.src_addr),
1217 	},
1218 	{
1219 		EFX_MAE_FIELD_DST_IP4_BE,
1220 		RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.dst_addr),
1221 		offsetof(struct rte_flow_item_ipv4, hdr.dst_addr),
1222 	},
1223 	{
1224 		/*
1225 		 * This locator is used only for building supported fields mask.
1226 		 * The field is handled by sfc_mae_rule_process_pattern_data().
1227 		 */
1228 		SFC_MAE_FIELD_HANDLING_DEFERRED,
1229 		RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.next_proto_id),
1230 		offsetof(struct rte_flow_item_ipv4, hdr.next_proto_id),
1231 	},
1232 	{
1233 		EFX_MAE_FIELD_IP_TOS,
1234 		RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4,
1235 				 hdr.type_of_service),
1236 		offsetof(struct rte_flow_item_ipv4, hdr.type_of_service),
1237 	},
1238 	{
1239 		EFX_MAE_FIELD_IP_TTL,
1240 		RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.time_to_live),
1241 		offsetof(struct rte_flow_item_ipv4, hdr.time_to_live),
1242 	},
1243 };
1244 
1245 static int
1246 sfc_mae_rule_parse_item_ipv4(const struct rte_flow_item *item,
1247 			     struct sfc_flow_parse_ctx *ctx,
1248 			     struct rte_flow_error *error)
1249 {
1250 	rte_be16_t ethertype_ipv4_be = RTE_BE16(RTE_ETHER_TYPE_IPV4);
1251 	struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1252 	struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1253 	struct rte_flow_item_ipv4 supp_mask;
1254 	const uint8_t *spec = NULL;
1255 	const uint8_t *mask = NULL;
1256 	int rc;
1257 
1258 	sfc_mae_item_build_supp_mask(flocs_ipv4, RTE_DIM(flocs_ipv4),
1259 				     &supp_mask, sizeof(supp_mask));
1260 
1261 	rc = sfc_flow_parse_init(item,
1262 				 (const void **)&spec, (const void **)&mask,
1263 				 (const void *)&supp_mask,
1264 				 &rte_flow_item_ipv4_mask,
1265 				 sizeof(struct rte_flow_item_ipv4), error);
1266 	if (rc != 0)
1267 		return rc;
1268 
1269 	pdata->innermost_ethertype_restriction.value = ethertype_ipv4_be;
1270 	pdata->innermost_ethertype_restriction.mask = RTE_BE16(0xffff);
1271 
1272 	if (spec != NULL) {
1273 		const struct rte_flow_item_ipv4 *item_spec;
1274 		const struct rte_flow_item_ipv4 *item_mask;
1275 
1276 		item_spec = (const struct rte_flow_item_ipv4 *)spec;
1277 		item_mask = (const struct rte_flow_item_ipv4 *)mask;
1278 
1279 		pdata->l3_next_proto_value = item_spec->hdr.next_proto_id;
1280 		pdata->l3_next_proto_mask = item_mask->hdr.next_proto_id;
1281 	} else {
1282 		return 0;
1283 	}
1284 
1285 	return sfc_mae_parse_item(flocs_ipv4, RTE_DIM(flocs_ipv4), spec, mask,
1286 				  ctx_mae, error);
1287 }
1288 
1289 static const struct sfc_mae_field_locator flocs_ipv6[] = {
1290 	{
1291 		EFX_MAE_FIELD_SRC_IP6_BE,
1292 		RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.src_addr),
1293 		offsetof(struct rte_flow_item_ipv6, hdr.src_addr),
1294 	},
1295 	{
1296 		EFX_MAE_FIELD_DST_IP6_BE,
1297 		RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.dst_addr),
1298 		offsetof(struct rte_flow_item_ipv6, hdr.dst_addr),
1299 	},
1300 	{
1301 		/*
1302 		 * This locator is used only for building supported fields mask.
1303 		 * The field is handled by sfc_mae_rule_process_pattern_data().
1304 		 */
1305 		SFC_MAE_FIELD_HANDLING_DEFERRED,
1306 		RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.proto),
1307 		offsetof(struct rte_flow_item_ipv6, hdr.proto),
1308 	},
1309 	{
1310 		EFX_MAE_FIELD_IP_TTL,
1311 		RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.hop_limits),
1312 		offsetof(struct rte_flow_item_ipv6, hdr.hop_limits),
1313 	},
1314 };
1315 
1316 static int
1317 sfc_mae_rule_parse_item_ipv6(const struct rte_flow_item *item,
1318 			     struct sfc_flow_parse_ctx *ctx,
1319 			     struct rte_flow_error *error)
1320 {
1321 	rte_be16_t ethertype_ipv6_be = RTE_BE16(RTE_ETHER_TYPE_IPV6);
1322 	struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1323 	const efx_mae_field_id_t *fremap = ctx_mae->field_ids_remap;
1324 	struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1325 	struct rte_flow_item_ipv6 supp_mask;
1326 	const uint8_t *spec = NULL;
1327 	const uint8_t *mask = NULL;
1328 	rte_be32_t vtc_flow_be;
1329 	uint32_t vtc_flow;
1330 	uint8_t tc_value;
1331 	uint8_t tc_mask;
1332 	int rc;
1333 
1334 	sfc_mae_item_build_supp_mask(flocs_ipv6, RTE_DIM(flocs_ipv6),
1335 				     &supp_mask, sizeof(supp_mask));
1336 
1337 	vtc_flow_be = RTE_BE32(RTE_IPV6_HDR_TC_MASK);
1338 	memcpy(&supp_mask, &vtc_flow_be, sizeof(vtc_flow_be));
1339 
1340 	rc = sfc_flow_parse_init(item,
1341 				 (const void **)&spec, (const void **)&mask,
1342 				 (const void *)&supp_mask,
1343 				 &rte_flow_item_ipv6_mask,
1344 				 sizeof(struct rte_flow_item_ipv6), error);
1345 	if (rc != 0)
1346 		return rc;
1347 
1348 	pdata->innermost_ethertype_restriction.value = ethertype_ipv6_be;
1349 	pdata->innermost_ethertype_restriction.mask = RTE_BE16(0xffff);
1350 
1351 	if (spec != NULL) {
1352 		const struct rte_flow_item_ipv6 *item_spec;
1353 		const struct rte_flow_item_ipv6 *item_mask;
1354 
1355 		item_spec = (const struct rte_flow_item_ipv6 *)spec;
1356 		item_mask = (const struct rte_flow_item_ipv6 *)mask;
1357 
1358 		pdata->l3_next_proto_value = item_spec->hdr.proto;
1359 		pdata->l3_next_proto_mask = item_mask->hdr.proto;
1360 	} else {
1361 		return 0;
1362 	}
1363 
1364 	rc = sfc_mae_parse_item(flocs_ipv6, RTE_DIM(flocs_ipv6), spec, mask,
1365 				ctx_mae, error);
1366 	if (rc != 0)
1367 		return rc;
1368 
1369 	memcpy(&vtc_flow_be, spec, sizeof(vtc_flow_be));
1370 	vtc_flow = rte_be_to_cpu_32(vtc_flow_be);
1371 	tc_value = (vtc_flow & RTE_IPV6_HDR_TC_MASK) >> RTE_IPV6_HDR_TC_SHIFT;
1372 
1373 	memcpy(&vtc_flow_be, mask, sizeof(vtc_flow_be));
1374 	vtc_flow = rte_be_to_cpu_32(vtc_flow_be);
1375 	tc_mask = (vtc_flow & RTE_IPV6_HDR_TC_MASK) >> RTE_IPV6_HDR_TC_SHIFT;
1376 
1377 	rc = efx_mae_match_spec_field_set(ctx_mae->match_spec,
1378 					  fremap[EFX_MAE_FIELD_IP_TOS],
1379 					  sizeof(tc_value), &tc_value,
1380 					  sizeof(tc_mask), &tc_mask);
1381 	if (rc != 0) {
1382 		return rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM,
1383 				NULL, "Failed to process item fields");
1384 	}
1385 
1386 	return 0;
1387 }
1388 
1389 static const struct sfc_mae_field_locator flocs_tcp[] = {
1390 	{
1391 		EFX_MAE_FIELD_L4_SPORT_BE,
1392 		RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.src_port),
1393 		offsetof(struct rte_flow_item_tcp, hdr.src_port),
1394 	},
1395 	{
1396 		EFX_MAE_FIELD_L4_DPORT_BE,
1397 		RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.dst_port),
1398 		offsetof(struct rte_flow_item_tcp, hdr.dst_port),
1399 	},
1400 	{
1401 		EFX_MAE_FIELD_TCP_FLAGS_BE,
1402 		/*
1403 		 * The values have been picked intentionally since the
1404 		 * target MAE field is oversize (16 bit). This mapping
1405 		 * relies on the fact that the MAE field is big-endian.
1406 		 */
1407 		RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.data_off) +
1408 		RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.tcp_flags),
1409 		offsetof(struct rte_flow_item_tcp, hdr.data_off),
1410 	},
1411 };
1412 
1413 static int
1414 sfc_mae_rule_parse_item_tcp(const struct rte_flow_item *item,
1415 			    struct sfc_flow_parse_ctx *ctx,
1416 			    struct rte_flow_error *error)
1417 {
1418 	struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1419 	struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1420 	struct rte_flow_item_tcp supp_mask;
1421 	const uint8_t *spec = NULL;
1422 	const uint8_t *mask = NULL;
1423 	int rc;
1424 
1425 	/*
1426 	 * When encountered among outermost items, item TCP is invalid.
1427 	 * Check which match specification is being constructed now.
1428 	 */
1429 	if (ctx_mae->match_spec != ctx_mae->match_spec_action) {
1430 		return rte_flow_error_set(error, EINVAL,
1431 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1432 					  "TCP in outer frame is invalid");
1433 	}
1434 
1435 	sfc_mae_item_build_supp_mask(flocs_tcp, RTE_DIM(flocs_tcp),
1436 				     &supp_mask, sizeof(supp_mask));
1437 
1438 	rc = sfc_flow_parse_init(item,
1439 				 (const void **)&spec, (const void **)&mask,
1440 				 (const void *)&supp_mask,
1441 				 &rte_flow_item_tcp_mask,
1442 				 sizeof(struct rte_flow_item_tcp), error);
1443 	if (rc != 0)
1444 		return rc;
1445 
1446 	pdata->l3_next_proto_restriction_value = IPPROTO_TCP;
1447 	pdata->l3_next_proto_restriction_mask = 0xff;
1448 
1449 	if (spec == NULL)
1450 		return 0;
1451 
1452 	return sfc_mae_parse_item(flocs_tcp, RTE_DIM(flocs_tcp), spec, mask,
1453 				  ctx_mae, error);
1454 }
1455 
1456 static const struct sfc_mae_field_locator flocs_udp[] = {
1457 	{
1458 		EFX_MAE_FIELD_L4_SPORT_BE,
1459 		RTE_SIZEOF_FIELD(struct rte_flow_item_udp, hdr.src_port),
1460 		offsetof(struct rte_flow_item_udp, hdr.src_port),
1461 	},
1462 	{
1463 		EFX_MAE_FIELD_L4_DPORT_BE,
1464 		RTE_SIZEOF_FIELD(struct rte_flow_item_udp, hdr.dst_port),
1465 		offsetof(struct rte_flow_item_udp, hdr.dst_port),
1466 	},
1467 };
1468 
1469 static int
1470 sfc_mae_rule_parse_item_udp(const struct rte_flow_item *item,
1471 			    struct sfc_flow_parse_ctx *ctx,
1472 			    struct rte_flow_error *error)
1473 {
1474 	struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1475 	struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1476 	struct rte_flow_item_udp supp_mask;
1477 	const uint8_t *spec = NULL;
1478 	const uint8_t *mask = NULL;
1479 	int rc;
1480 
1481 	sfc_mae_item_build_supp_mask(flocs_udp, RTE_DIM(flocs_udp),
1482 				     &supp_mask, sizeof(supp_mask));
1483 
1484 	rc = sfc_flow_parse_init(item,
1485 				 (const void **)&spec, (const void **)&mask,
1486 				 (const void *)&supp_mask,
1487 				 &rte_flow_item_udp_mask,
1488 				 sizeof(struct rte_flow_item_udp), error);
1489 	if (rc != 0)
1490 		return rc;
1491 
1492 	pdata->l3_next_proto_restriction_value = IPPROTO_UDP;
1493 	pdata->l3_next_proto_restriction_mask = 0xff;
1494 
1495 	if (spec == NULL)
1496 		return 0;
1497 
1498 	return sfc_mae_parse_item(flocs_udp, RTE_DIM(flocs_udp), spec, mask,
1499 				  ctx_mae, error);
1500 }
1501 
1502 static const struct sfc_mae_field_locator flocs_tunnel[] = {
1503 	{
1504 		/*
1505 		 * The size and offset values are relevant
1506 		 * for Geneve and NVGRE, too.
1507 		 */
1508 		.size = RTE_SIZEOF_FIELD(struct rte_flow_item_vxlan, vni),
1509 		.ofst = offsetof(struct rte_flow_item_vxlan, vni),
1510 	},
1511 };
1512 
1513 /*
1514  * An auxiliary registry which allows using non-encap. field IDs
1515  * directly when building a match specification of type ACTION.
1516  *
1517  * See sfc_mae_rule_parse_pattern() and sfc_mae_rule_parse_item_tunnel().
1518  */
1519 static const efx_mae_field_id_t field_ids_no_remap[] = {
1520 #define FIELD_ID_NO_REMAP(_field) \
1521 	[EFX_MAE_FIELD_##_field] = EFX_MAE_FIELD_##_field
1522 
1523 	FIELD_ID_NO_REMAP(ETHER_TYPE_BE),
1524 	FIELD_ID_NO_REMAP(ETH_SADDR_BE),
1525 	FIELD_ID_NO_REMAP(ETH_DADDR_BE),
1526 	FIELD_ID_NO_REMAP(VLAN0_TCI_BE),
1527 	FIELD_ID_NO_REMAP(VLAN0_PROTO_BE),
1528 	FIELD_ID_NO_REMAP(VLAN1_TCI_BE),
1529 	FIELD_ID_NO_REMAP(VLAN1_PROTO_BE),
1530 	FIELD_ID_NO_REMAP(SRC_IP4_BE),
1531 	FIELD_ID_NO_REMAP(DST_IP4_BE),
1532 	FIELD_ID_NO_REMAP(IP_PROTO),
1533 	FIELD_ID_NO_REMAP(IP_TOS),
1534 	FIELD_ID_NO_REMAP(IP_TTL),
1535 	FIELD_ID_NO_REMAP(SRC_IP6_BE),
1536 	FIELD_ID_NO_REMAP(DST_IP6_BE),
1537 	FIELD_ID_NO_REMAP(L4_SPORT_BE),
1538 	FIELD_ID_NO_REMAP(L4_DPORT_BE),
1539 	FIELD_ID_NO_REMAP(TCP_FLAGS_BE),
1540 
1541 #undef FIELD_ID_NO_REMAP
1542 };
1543 
1544 /*
1545  * An auxiliary registry which allows using "ENC" field IDs
1546  * when building a match specification of type OUTER.
1547  *
1548  * See sfc_mae_rule_encap_parse_init().
1549  */
1550 static const efx_mae_field_id_t field_ids_remap_to_encap[] = {
1551 #define FIELD_ID_REMAP_TO_ENCAP(_field) \
1552 	[EFX_MAE_FIELD_##_field] = EFX_MAE_FIELD_ENC_##_field
1553 
1554 	FIELD_ID_REMAP_TO_ENCAP(ETHER_TYPE_BE),
1555 	FIELD_ID_REMAP_TO_ENCAP(ETH_SADDR_BE),
1556 	FIELD_ID_REMAP_TO_ENCAP(ETH_DADDR_BE),
1557 	FIELD_ID_REMAP_TO_ENCAP(VLAN0_TCI_BE),
1558 	FIELD_ID_REMAP_TO_ENCAP(VLAN0_PROTO_BE),
1559 	FIELD_ID_REMAP_TO_ENCAP(VLAN1_TCI_BE),
1560 	FIELD_ID_REMAP_TO_ENCAP(VLAN1_PROTO_BE),
1561 	FIELD_ID_REMAP_TO_ENCAP(SRC_IP4_BE),
1562 	FIELD_ID_REMAP_TO_ENCAP(DST_IP4_BE),
1563 	FIELD_ID_REMAP_TO_ENCAP(IP_PROTO),
1564 	FIELD_ID_REMAP_TO_ENCAP(IP_TOS),
1565 	FIELD_ID_REMAP_TO_ENCAP(IP_TTL),
1566 	FIELD_ID_REMAP_TO_ENCAP(SRC_IP6_BE),
1567 	FIELD_ID_REMAP_TO_ENCAP(DST_IP6_BE),
1568 	FIELD_ID_REMAP_TO_ENCAP(L4_SPORT_BE),
1569 	FIELD_ID_REMAP_TO_ENCAP(L4_DPORT_BE),
1570 
1571 #undef FIELD_ID_REMAP_TO_ENCAP
1572 };
1573 
1574 static int
1575 sfc_mae_rule_parse_item_tunnel(const struct rte_flow_item *item,
1576 			       struct sfc_flow_parse_ctx *ctx,
1577 			       struct rte_flow_error *error)
1578 {
1579 	struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1580 	uint8_t vnet_id_v[sizeof(uint32_t)] = {0};
1581 	uint8_t vnet_id_m[sizeof(uint32_t)] = {0};
1582 	const struct rte_flow_item_vxlan *vxp;
1583 	uint8_t supp_mask[sizeof(uint64_t)];
1584 	const uint8_t *spec = NULL;
1585 	const uint8_t *mask = NULL;
1586 	int rc;
1587 
1588 	/*
1589 	 * We're about to start processing inner frame items.
1590 	 * Process pattern data that has been deferred so far
1591 	 * and reset pattern data storage.
1592 	 */
1593 	rc = sfc_mae_rule_process_pattern_data(ctx_mae, error);
1594 	if (rc != 0)
1595 		return rc;
1596 
1597 	memset(&ctx_mae->pattern_data, 0, sizeof(ctx_mae->pattern_data));
1598 
1599 	sfc_mae_item_build_supp_mask(flocs_tunnel, RTE_DIM(flocs_tunnel),
1600 				     &supp_mask, sizeof(supp_mask));
1601 
1602 	/*
1603 	 * This tunnel item was preliminarily detected by
1604 	 * sfc_mae_rule_encap_parse_init(). Default mask
1605 	 * was also picked by that helper. Use it here.
1606 	 */
1607 	rc = sfc_flow_parse_init(item,
1608 				 (const void **)&spec, (const void **)&mask,
1609 				 (const void *)&supp_mask,
1610 				 ctx_mae->tunnel_def_mask,
1611 				 ctx_mae->tunnel_def_mask_size,  error);
1612 	if (rc != 0)
1613 		return rc;
1614 
1615 	/*
1616 	 * This item and later ones comprise a
1617 	 * match specification of type ACTION.
1618 	 */
1619 	ctx_mae->match_spec = ctx_mae->match_spec_action;
1620 
1621 	/* This item and later ones use non-encap. EFX MAE field IDs. */
1622 	ctx_mae->field_ids_remap = field_ids_no_remap;
1623 
1624 	if (spec == NULL)
1625 		return 0;
1626 
1627 	/*
1628 	 * Field EFX_MAE_FIELD_ENC_VNET_ID_BE is a 32-bit one.
1629 	 * Copy 24-bit VNI, which is BE, at offset 1 in it.
1630 	 * The extra byte is 0 both in the mask and in the value.
1631 	 */
1632 	vxp = (const struct rte_flow_item_vxlan *)spec;
1633 	memcpy(vnet_id_v + 1, &vxp->vni, sizeof(vxp->vni));
1634 
1635 	vxp = (const struct rte_flow_item_vxlan *)mask;
1636 	memcpy(vnet_id_m + 1, &vxp->vni, sizeof(vxp->vni));
1637 
1638 	rc = efx_mae_match_spec_field_set(ctx_mae->match_spec,
1639 					  EFX_MAE_FIELD_ENC_VNET_ID_BE,
1640 					  sizeof(vnet_id_v), vnet_id_v,
1641 					  sizeof(vnet_id_m), vnet_id_m);
1642 	if (rc != 0) {
1643 		rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM,
1644 					item, "Failed to set VXLAN VNI");
1645 	}
1646 
1647 	return rc;
1648 }
1649 
1650 static const struct sfc_flow_item sfc_flow_items[] = {
1651 	{
1652 		.type = RTE_FLOW_ITEM_TYPE_PORT_ID,
1653 		/*
1654 		 * In terms of RTE flow, this item is a META one,
1655 		 * and its position in the pattern is don't care.
1656 		 */
1657 		.prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
1658 		.layer = SFC_FLOW_ITEM_ANY_LAYER,
1659 		.ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1660 		.parse = sfc_mae_rule_parse_item_port_id,
1661 	},
1662 	{
1663 		.type = RTE_FLOW_ITEM_TYPE_PHY_PORT,
1664 		/*
1665 		 * In terms of RTE flow, this item is a META one,
1666 		 * and its position in the pattern is don't care.
1667 		 */
1668 		.prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
1669 		.layer = SFC_FLOW_ITEM_ANY_LAYER,
1670 		.ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1671 		.parse = sfc_mae_rule_parse_item_phy_port,
1672 	},
1673 	{
1674 		.type = RTE_FLOW_ITEM_TYPE_PF,
1675 		/*
1676 		 * In terms of RTE flow, this item is a META one,
1677 		 * and its position in the pattern is don't care.
1678 		 */
1679 		.prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
1680 		.layer = SFC_FLOW_ITEM_ANY_LAYER,
1681 		.ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1682 		.parse = sfc_mae_rule_parse_item_pf,
1683 	},
1684 	{
1685 		.type = RTE_FLOW_ITEM_TYPE_VF,
1686 		/*
1687 		 * In terms of RTE flow, this item is a META one,
1688 		 * and its position in the pattern is don't care.
1689 		 */
1690 		.prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
1691 		.layer = SFC_FLOW_ITEM_ANY_LAYER,
1692 		.ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1693 		.parse = sfc_mae_rule_parse_item_vf,
1694 	},
1695 	{
1696 		.type = RTE_FLOW_ITEM_TYPE_ETH,
1697 		.prev_layer = SFC_FLOW_ITEM_START_LAYER,
1698 		.layer = SFC_FLOW_ITEM_L2,
1699 		.ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1700 		.parse = sfc_mae_rule_parse_item_eth,
1701 	},
1702 	{
1703 		.type = RTE_FLOW_ITEM_TYPE_VLAN,
1704 		.prev_layer = SFC_FLOW_ITEM_L2,
1705 		.layer = SFC_FLOW_ITEM_L2,
1706 		.ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1707 		.parse = sfc_mae_rule_parse_item_vlan,
1708 	},
1709 	{
1710 		.type = RTE_FLOW_ITEM_TYPE_IPV4,
1711 		.prev_layer = SFC_FLOW_ITEM_L2,
1712 		.layer = SFC_FLOW_ITEM_L3,
1713 		.ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1714 		.parse = sfc_mae_rule_parse_item_ipv4,
1715 	},
1716 	{
1717 		.type = RTE_FLOW_ITEM_TYPE_IPV6,
1718 		.prev_layer = SFC_FLOW_ITEM_L2,
1719 		.layer = SFC_FLOW_ITEM_L3,
1720 		.ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1721 		.parse = sfc_mae_rule_parse_item_ipv6,
1722 	},
1723 	{
1724 		.type = RTE_FLOW_ITEM_TYPE_TCP,
1725 		.prev_layer = SFC_FLOW_ITEM_L3,
1726 		.layer = SFC_FLOW_ITEM_L4,
1727 		.ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1728 		.parse = sfc_mae_rule_parse_item_tcp,
1729 	},
1730 	{
1731 		.type = RTE_FLOW_ITEM_TYPE_UDP,
1732 		.prev_layer = SFC_FLOW_ITEM_L3,
1733 		.layer = SFC_FLOW_ITEM_L4,
1734 		.ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1735 		.parse = sfc_mae_rule_parse_item_udp,
1736 	},
1737 	{
1738 		.type = RTE_FLOW_ITEM_TYPE_VXLAN,
1739 		.prev_layer = SFC_FLOW_ITEM_L4,
1740 		.layer = SFC_FLOW_ITEM_START_LAYER,
1741 		.ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1742 		.parse = sfc_mae_rule_parse_item_tunnel,
1743 	},
1744 	{
1745 		.type = RTE_FLOW_ITEM_TYPE_GENEVE,
1746 		.prev_layer = SFC_FLOW_ITEM_L4,
1747 		.layer = SFC_FLOW_ITEM_START_LAYER,
1748 		.ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1749 		.parse = sfc_mae_rule_parse_item_tunnel,
1750 	},
1751 	{
1752 		.type = RTE_FLOW_ITEM_TYPE_NVGRE,
1753 		.prev_layer = SFC_FLOW_ITEM_L3,
1754 		.layer = SFC_FLOW_ITEM_START_LAYER,
1755 		.ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1756 		.parse = sfc_mae_rule_parse_item_tunnel,
1757 	},
1758 };
1759 
1760 static int
1761 sfc_mae_rule_process_outer(struct sfc_adapter *sa,
1762 			   struct sfc_mae_parse_ctx *ctx,
1763 			   struct sfc_mae_outer_rule **rulep,
1764 			   struct rte_flow_error *error)
1765 {
1766 	struct sfc_mae_outer_rule *rule;
1767 	int rc;
1768 
1769 	if (ctx->encap_type == EFX_TUNNEL_PROTOCOL_NONE) {
1770 		*rulep = NULL;
1771 		return 0;
1772 	}
1773 
1774 	SFC_ASSERT(ctx->match_spec_outer != NULL);
1775 
1776 	if (!efx_mae_match_spec_is_valid(sa->nic, ctx->match_spec_outer)) {
1777 		return rte_flow_error_set(error, ENOTSUP,
1778 					  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1779 					  "Inconsistent pattern (outer)");
1780 	}
1781 
1782 	*rulep = sfc_mae_outer_rule_attach(sa, ctx->match_spec_outer,
1783 					   ctx->encap_type);
1784 	if (*rulep != NULL) {
1785 		efx_mae_match_spec_fini(sa->nic, ctx->match_spec_outer);
1786 	} else {
1787 		rc = sfc_mae_outer_rule_add(sa, ctx->match_spec_outer,
1788 					    ctx->encap_type, rulep);
1789 		if (rc != 0) {
1790 			return rte_flow_error_set(error, rc,
1791 					RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1792 					"Failed to process the pattern");
1793 		}
1794 	}
1795 
1796 	/* The spec has now been tracked by the outer rule entry. */
1797 	ctx->match_spec_outer = NULL;
1798 
1799 	/*
1800 	 * Depending on whether we reuse an existing outer rule or create a
1801 	 * new one (see above), outer rule ID is either a valid value or
1802 	 * EFX_MAE_RSRC_ID_INVALID. Set it in the action rule match
1803 	 * specification (and the full mask, too) in order to have correct
1804 	 * class comparisons of the new rule with existing ones.
1805 	 * Also, action rule match specification will be validated shortly,
1806 	 * and having the full mask set for outer rule ID indicates that we
1807 	 * will use this field, and support for this field has to be checked.
1808 	 */
1809 	rule = *rulep;
1810 	rc = efx_mae_match_spec_outer_rule_id_set(ctx->match_spec_action,
1811 						  &rule->fw_rsrc.rule_id);
1812 	if (rc != 0) {
1813 		sfc_mae_outer_rule_del(sa, *rulep);
1814 		*rulep = NULL;
1815 
1816 		return rte_flow_error_set(error, rc,
1817 					  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1818 					  "Failed to process the pattern");
1819 	}
1820 
1821 	return 0;
1822 }
1823 
1824 static int
1825 sfc_mae_rule_encap_parse_init(struct sfc_adapter *sa,
1826 			      const struct rte_flow_item pattern[],
1827 			      struct sfc_mae_parse_ctx *ctx,
1828 			      struct rte_flow_error *error)
1829 {
1830 	struct sfc_mae *mae = &sa->mae;
1831 	int rc;
1832 
1833 	if (pattern == NULL) {
1834 		rte_flow_error_set(error, EINVAL,
1835 				   RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
1836 				   "NULL pattern");
1837 		return -rte_errno;
1838 	}
1839 
1840 	for (;;) {
1841 		switch (pattern->type) {
1842 		case RTE_FLOW_ITEM_TYPE_VXLAN:
1843 			ctx->encap_type = EFX_TUNNEL_PROTOCOL_VXLAN;
1844 			ctx->tunnel_def_mask = &rte_flow_item_vxlan_mask;
1845 			ctx->tunnel_def_mask_size =
1846 				sizeof(rte_flow_item_vxlan_mask);
1847 			break;
1848 		case RTE_FLOW_ITEM_TYPE_GENEVE:
1849 			ctx->encap_type = EFX_TUNNEL_PROTOCOL_GENEVE;
1850 			ctx->tunnel_def_mask = &rte_flow_item_geneve_mask;
1851 			ctx->tunnel_def_mask_size =
1852 				sizeof(rte_flow_item_geneve_mask);
1853 			break;
1854 		case RTE_FLOW_ITEM_TYPE_NVGRE:
1855 			ctx->encap_type = EFX_TUNNEL_PROTOCOL_NVGRE;
1856 			ctx->tunnel_def_mask = &rte_flow_item_nvgre_mask;
1857 			ctx->tunnel_def_mask_size =
1858 				sizeof(rte_flow_item_nvgre_mask);
1859 			break;
1860 		case RTE_FLOW_ITEM_TYPE_END:
1861 			break;
1862 		default:
1863 			++pattern;
1864 			continue;
1865 		};
1866 
1867 		break;
1868 	}
1869 
1870 	if (pattern->type == RTE_FLOW_ITEM_TYPE_END)
1871 		return 0;
1872 
1873 	if ((mae->encap_types_supported & (1U << ctx->encap_type)) == 0) {
1874 		return rte_flow_error_set(error, ENOTSUP,
1875 					  RTE_FLOW_ERROR_TYPE_ITEM,
1876 					  pattern, "Unsupported tunnel item");
1877 	}
1878 
1879 	if (ctx->priority >= mae->nb_outer_rule_prios_max) {
1880 		return rte_flow_error_set(error, ENOTSUP,
1881 					  RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1882 					  NULL, "Unsupported priority level");
1883 	}
1884 
1885 	rc = efx_mae_match_spec_init(sa->nic, EFX_MAE_RULE_OUTER, ctx->priority,
1886 				     &ctx->match_spec_outer);
1887 	if (rc != 0) {
1888 		return rte_flow_error_set(error, rc,
1889 			RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1890 			"Failed to initialise outer rule match specification");
1891 	}
1892 
1893 	/* Outermost items comprise a match specification of type OUTER. */
1894 	ctx->match_spec = ctx->match_spec_outer;
1895 
1896 	/* Outermost items use "ENC" EFX MAE field IDs. */
1897 	ctx->field_ids_remap = field_ids_remap_to_encap;
1898 
1899 	return 0;
1900 }
1901 
1902 static void
1903 sfc_mae_rule_encap_parse_fini(struct sfc_adapter *sa,
1904 			      struct sfc_mae_parse_ctx *ctx)
1905 {
1906 	if (ctx->encap_type == EFX_TUNNEL_PROTOCOL_NONE)
1907 		return;
1908 
1909 	if (ctx->match_spec_outer != NULL)
1910 		efx_mae_match_spec_fini(sa->nic, ctx->match_spec_outer);
1911 }
1912 
1913 int
1914 sfc_mae_rule_parse_pattern(struct sfc_adapter *sa,
1915 			   const struct rte_flow_item pattern[],
1916 			   struct sfc_flow_spec_mae *spec,
1917 			   struct rte_flow_error *error)
1918 {
1919 	struct sfc_mae_parse_ctx ctx_mae;
1920 	struct sfc_flow_parse_ctx ctx;
1921 	int rc;
1922 
1923 	memset(&ctx_mae, 0, sizeof(ctx_mae));
1924 	ctx_mae.priority = spec->priority;
1925 	ctx_mae.sa = sa;
1926 
1927 	rc = efx_mae_match_spec_init(sa->nic, EFX_MAE_RULE_ACTION,
1928 				     spec->priority,
1929 				     &ctx_mae.match_spec_action);
1930 	if (rc != 0) {
1931 		rc = rte_flow_error_set(error, rc,
1932 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1933 			"Failed to initialise action rule match specification");
1934 		goto fail_init_match_spec_action;
1935 	}
1936 
1937 	/*
1938 	 * As a preliminary setting, assume that there is no encapsulation
1939 	 * in the pattern. That is, pattern items are about to comprise a
1940 	 * match specification of type ACTION and use non-encap. field IDs.
1941 	 *
1942 	 * sfc_mae_rule_encap_parse_init() below may override this.
1943 	 */
1944 	ctx_mae.encap_type = EFX_TUNNEL_PROTOCOL_NONE;
1945 	ctx_mae.match_spec = ctx_mae.match_spec_action;
1946 	ctx_mae.field_ids_remap = field_ids_no_remap;
1947 
1948 	ctx.type = SFC_FLOW_PARSE_CTX_MAE;
1949 	ctx.mae = &ctx_mae;
1950 
1951 	rc = sfc_mae_rule_encap_parse_init(sa, pattern, &ctx_mae, error);
1952 	if (rc != 0)
1953 		goto fail_encap_parse_init;
1954 
1955 	rc = sfc_flow_parse_pattern(sfc_flow_items, RTE_DIM(sfc_flow_items),
1956 				    pattern, &ctx, error);
1957 	if (rc != 0)
1958 		goto fail_parse_pattern;
1959 
1960 	rc = sfc_mae_rule_process_pattern_data(&ctx_mae, error);
1961 	if (rc != 0)
1962 		goto fail_process_pattern_data;
1963 
1964 	rc = sfc_mae_rule_process_outer(sa, &ctx_mae, &spec->outer_rule, error);
1965 	if (rc != 0)
1966 		goto fail_process_outer;
1967 
1968 	if (!efx_mae_match_spec_is_valid(sa->nic, ctx_mae.match_spec_action)) {
1969 		rc = rte_flow_error_set(error, ENOTSUP,
1970 					RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1971 					"Inconsistent pattern");
1972 		goto fail_validate_match_spec_action;
1973 	}
1974 
1975 	spec->match_spec = ctx_mae.match_spec_action;
1976 
1977 	return 0;
1978 
1979 fail_validate_match_spec_action:
1980 fail_process_outer:
1981 fail_process_pattern_data:
1982 fail_parse_pattern:
1983 	sfc_mae_rule_encap_parse_fini(sa, &ctx_mae);
1984 
1985 fail_encap_parse_init:
1986 	efx_mae_match_spec_fini(sa->nic, ctx_mae.match_spec_action);
1987 
1988 fail_init_match_spec_action:
1989 	return rc;
1990 }
1991 
1992 /*
1993  * An action supported by MAE may correspond to a bundle of RTE flow actions,
1994  * in example, VLAN_PUSH = OF_PUSH_VLAN + OF_VLAN_SET_VID + OF_VLAN_SET_PCP.
1995  * That is, related RTE flow actions need to be tracked as parts of a whole
1996  * so that they can be combined into a single action and submitted to MAE
1997  * representation of a given rule's action set.
1998  *
1999  * Each RTE flow action provided by an application gets classified as
2000  * one belonging to some bundle type. If an action is not supposed to
2001  * belong to any bundle, or if this action is END, it is described as
2002  * one belonging to a dummy bundle of type EMPTY.
2003  *
2004  * A currently tracked bundle will be submitted if a repeating
2005  * action or an action of different bundle type follows.
2006  */
2007 
2008 enum sfc_mae_actions_bundle_type {
2009 	SFC_MAE_ACTIONS_BUNDLE_EMPTY = 0,
2010 	SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH,
2011 };
2012 
2013 struct sfc_mae_actions_bundle {
2014 	enum sfc_mae_actions_bundle_type	type;
2015 
2016 	/* Indicates actions already tracked by the current bundle */
2017 	uint64_t				actions_mask;
2018 
2019 	/* Parameters used by SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH */
2020 	rte_be16_t				vlan_push_tpid;
2021 	rte_be16_t				vlan_push_tci;
2022 };
2023 
2024 /*
2025  * Combine configuration of RTE flow actions tracked by the bundle into a
2026  * single action and submit the result to MAE action set specification.
2027  * Do nothing in the case of dummy action bundle.
2028  */
2029 static int
2030 sfc_mae_actions_bundle_submit(const struct sfc_mae_actions_bundle *bundle,
2031 			      efx_mae_actions_t *spec)
2032 {
2033 	int rc = 0;
2034 
2035 	switch (bundle->type) {
2036 	case SFC_MAE_ACTIONS_BUNDLE_EMPTY:
2037 		break;
2038 	case SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH:
2039 		rc = efx_mae_action_set_populate_vlan_push(
2040 			spec, bundle->vlan_push_tpid, bundle->vlan_push_tci);
2041 		break;
2042 	default:
2043 		SFC_ASSERT(B_FALSE);
2044 		break;
2045 	}
2046 
2047 	return rc;
2048 }
2049 
2050 /*
2051  * Given the type of the next RTE flow action in the line, decide
2052  * whether a new bundle is about to start, and, if this is the case,
2053  * submit and reset the current bundle.
2054  */
2055 static int
2056 sfc_mae_actions_bundle_sync(const struct rte_flow_action *action,
2057 			    struct sfc_mae_actions_bundle *bundle,
2058 			    efx_mae_actions_t *spec,
2059 			    struct rte_flow_error *error)
2060 {
2061 	enum sfc_mae_actions_bundle_type bundle_type_new;
2062 	int rc;
2063 
2064 	switch (action->type) {
2065 	case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
2066 	case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
2067 	case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
2068 		bundle_type_new = SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH;
2069 		break;
2070 	default:
2071 		/*
2072 		 * Self-sufficient actions, including END, are handled in this
2073 		 * case. No checks for unsupported actions are needed here
2074 		 * because parsing doesn't occur at this point.
2075 		 */
2076 		bundle_type_new = SFC_MAE_ACTIONS_BUNDLE_EMPTY;
2077 		break;
2078 	}
2079 
2080 	if (bundle_type_new != bundle->type ||
2081 	    (bundle->actions_mask & (1ULL << action->type)) != 0) {
2082 		rc = sfc_mae_actions_bundle_submit(bundle, spec);
2083 		if (rc != 0)
2084 			goto fail_submit;
2085 
2086 		memset(bundle, 0, sizeof(*bundle));
2087 	}
2088 
2089 	bundle->type = bundle_type_new;
2090 
2091 	return 0;
2092 
2093 fail_submit:
2094 	return rte_flow_error_set(error, rc,
2095 			RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2096 			"Failed to request the (group of) action(s)");
2097 }
2098 
2099 static void
2100 sfc_mae_rule_parse_action_of_push_vlan(
2101 			    const struct rte_flow_action_of_push_vlan *conf,
2102 			    struct sfc_mae_actions_bundle *bundle)
2103 {
2104 	bundle->vlan_push_tpid = conf->ethertype;
2105 }
2106 
2107 static void
2108 sfc_mae_rule_parse_action_of_set_vlan_vid(
2109 			    const struct rte_flow_action_of_set_vlan_vid *conf,
2110 			    struct sfc_mae_actions_bundle *bundle)
2111 {
2112 	bundle->vlan_push_tci |= (conf->vlan_vid &
2113 				  rte_cpu_to_be_16(RTE_LEN2MASK(12, uint16_t)));
2114 }
2115 
2116 static void
2117 sfc_mae_rule_parse_action_of_set_vlan_pcp(
2118 			    const struct rte_flow_action_of_set_vlan_pcp *conf,
2119 			    struct sfc_mae_actions_bundle *bundle)
2120 {
2121 	uint16_t vlan_tci_pcp = (uint16_t)(conf->vlan_pcp &
2122 					   RTE_LEN2MASK(3, uint8_t)) << 13;
2123 
2124 	bundle->vlan_push_tci |= rte_cpu_to_be_16(vlan_tci_pcp);
2125 }
2126 
2127 struct sfc_mae_parsed_item {
2128 	const struct rte_flow_item	*item;
2129 	size_t				proto_header_ofst;
2130 	size_t				proto_header_size;
2131 };
2132 
2133 /*
2134  * For each 16-bit word of the given header, override
2135  * bits enforced by the corresponding 16-bit mask.
2136  */
2137 static void
2138 sfc_mae_header_force_item_masks(uint8_t *header_buf,
2139 				const struct sfc_mae_parsed_item *parsed_items,
2140 				unsigned int nb_parsed_items)
2141 {
2142 	unsigned int item_idx;
2143 
2144 	for (item_idx = 0; item_idx < nb_parsed_items; ++item_idx) {
2145 		const struct sfc_mae_parsed_item *parsed_item;
2146 		const struct rte_flow_item *item;
2147 		size_t proto_header_size;
2148 		size_t ofst;
2149 
2150 		parsed_item = &parsed_items[item_idx];
2151 		proto_header_size = parsed_item->proto_header_size;
2152 		item = parsed_item->item;
2153 
2154 		for (ofst = 0; ofst < proto_header_size;
2155 		     ofst += sizeof(rte_be16_t)) {
2156 			rte_be16_t *wp = RTE_PTR_ADD(header_buf, ofst);
2157 			const rte_be16_t *w_maskp;
2158 			const rte_be16_t *w_specp;
2159 
2160 			w_maskp = RTE_PTR_ADD(item->mask, ofst);
2161 			w_specp = RTE_PTR_ADD(item->spec, ofst);
2162 
2163 			*wp &= ~(*w_maskp);
2164 			*wp |= (*w_specp & *w_maskp);
2165 		}
2166 
2167 		header_buf += proto_header_size;
2168 	}
2169 }
2170 
2171 #define SFC_IPV4_TTL_DEF	0x40
2172 #define SFC_IPV6_VTC_FLOW_DEF	0x60000000
2173 #define SFC_IPV6_HOP_LIMITS_DEF	0xff
2174 #define SFC_VXLAN_FLAGS_DEF	0x08000000
2175 
2176 static int
2177 sfc_mae_rule_parse_action_vxlan_encap(
2178 			    struct sfc_mae *mae,
2179 			    const struct rte_flow_action_vxlan_encap *conf,
2180 			    efx_mae_actions_t *spec,
2181 			    struct rte_flow_error *error)
2182 {
2183 	struct sfc_mae_bounce_eh *bounce_eh = &mae->bounce_eh;
2184 	struct rte_flow_item *pattern = conf->definition;
2185 	uint8_t *buf = bounce_eh->buf;
2186 
2187 	/* This array will keep track of non-VOID pattern items. */
2188 	struct sfc_mae_parsed_item parsed_items[1 /* Ethernet */ +
2189 						2 /* VLAN tags */ +
2190 						1 /* IPv4 or IPv6 */ +
2191 						1 /* UDP */ +
2192 						1 /* VXLAN */];
2193 	unsigned int nb_parsed_items = 0;
2194 
2195 	size_t eth_ethertype_ofst = offsetof(struct rte_ether_hdr, ether_type);
2196 	uint8_t dummy_buf[RTE_MAX(sizeof(struct rte_ipv4_hdr),
2197 				  sizeof(struct rte_ipv6_hdr))];
2198 	struct rte_ipv4_hdr *ipv4 = (void *)dummy_buf;
2199 	struct rte_ipv6_hdr *ipv6 = (void *)dummy_buf;
2200 	struct rte_vxlan_hdr *vxlan = NULL;
2201 	struct rte_udp_hdr *udp = NULL;
2202 	unsigned int nb_vlan_tags = 0;
2203 	size_t next_proto_ofst = 0;
2204 	size_t ethertype_ofst = 0;
2205 	uint64_t exp_items;
2206 
2207 	if (pattern == NULL) {
2208 		return rte_flow_error_set(error, EINVAL,
2209 				RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2210 				"The encap. header definition is NULL");
2211 	}
2212 
2213 	bounce_eh->type = EFX_TUNNEL_PROTOCOL_VXLAN;
2214 	bounce_eh->size = 0;
2215 
2216 	/*
2217 	 * Process pattern items and remember non-VOID ones.
2218 	 * Defer applying masks until after the complete header
2219 	 * has been built from the pattern items.
2220 	 */
2221 	exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_ETH);
2222 
2223 	for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; ++pattern) {
2224 		struct sfc_mae_parsed_item *parsed_item;
2225 		const uint64_t exp_items_extra_vlan[] = {
2226 			RTE_BIT64(RTE_FLOW_ITEM_TYPE_VLAN), 0
2227 		};
2228 		size_t proto_header_size;
2229 		rte_be16_t *ethertypep;
2230 		uint8_t *next_protop;
2231 		uint8_t *buf_cur;
2232 
2233 		if (pattern->spec == NULL) {
2234 			return rte_flow_error_set(error, EINVAL,
2235 					RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2236 					"NULL item spec in the encap. header");
2237 		}
2238 
2239 		if (pattern->mask == NULL) {
2240 			return rte_flow_error_set(error, EINVAL,
2241 					RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2242 					"NULL item mask in the encap. header");
2243 		}
2244 
2245 		if (pattern->last != NULL) {
2246 			/* This is not a match pattern, so disallow range. */
2247 			return rte_flow_error_set(error, EINVAL,
2248 					RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2249 					"Range item in the encap. header");
2250 		}
2251 
2252 		if (pattern->type == RTE_FLOW_ITEM_TYPE_VOID) {
2253 			/* Handle VOID separately, for clarity. */
2254 			continue;
2255 		}
2256 
2257 		if ((exp_items & RTE_BIT64(pattern->type)) == 0) {
2258 			return rte_flow_error_set(error, ENOTSUP,
2259 					RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2260 					"Unexpected item in the encap. header");
2261 		}
2262 
2263 		parsed_item = &parsed_items[nb_parsed_items];
2264 		buf_cur = buf + bounce_eh->size;
2265 
2266 		switch (pattern->type) {
2267 		case RTE_FLOW_ITEM_TYPE_ETH:
2268 			SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_ETH,
2269 					       exp_items);
2270 			RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_eth,
2271 						  hdr) != 0);
2272 
2273 			proto_header_size = sizeof(struct rte_ether_hdr);
2274 
2275 			ethertype_ofst = eth_ethertype_ofst;
2276 
2277 			exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_VLAN) |
2278 				    RTE_BIT64(RTE_FLOW_ITEM_TYPE_IPV4) |
2279 				    RTE_BIT64(RTE_FLOW_ITEM_TYPE_IPV6);
2280 			break;
2281 		case RTE_FLOW_ITEM_TYPE_VLAN:
2282 			SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_VLAN,
2283 					       exp_items);
2284 			RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_vlan,
2285 						  hdr) != 0);
2286 
2287 			proto_header_size = sizeof(struct rte_vlan_hdr);
2288 
2289 			ethertypep = RTE_PTR_ADD(buf, eth_ethertype_ofst);
2290 			*ethertypep = RTE_BE16(RTE_ETHER_TYPE_QINQ);
2291 
2292 			ethertypep = RTE_PTR_ADD(buf, ethertype_ofst);
2293 			*ethertypep = RTE_BE16(RTE_ETHER_TYPE_VLAN);
2294 
2295 			ethertype_ofst =
2296 			    bounce_eh->size +
2297 			    offsetof(struct rte_vlan_hdr, eth_proto);
2298 
2299 			exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_IPV4) |
2300 				    RTE_BIT64(RTE_FLOW_ITEM_TYPE_IPV6);
2301 			exp_items |= exp_items_extra_vlan[nb_vlan_tags];
2302 
2303 			++nb_vlan_tags;
2304 			break;
2305 		case RTE_FLOW_ITEM_TYPE_IPV4:
2306 			SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_IPV4,
2307 					       exp_items);
2308 			RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_ipv4,
2309 						  hdr) != 0);
2310 
2311 			proto_header_size = sizeof(struct rte_ipv4_hdr);
2312 
2313 			ethertypep = RTE_PTR_ADD(buf, ethertype_ofst);
2314 			*ethertypep = RTE_BE16(RTE_ETHER_TYPE_IPV4);
2315 
2316 			next_proto_ofst =
2317 			    bounce_eh->size +
2318 			    offsetof(struct rte_ipv4_hdr, next_proto_id);
2319 
2320 			ipv4 = (struct rte_ipv4_hdr *)buf_cur;
2321 
2322 			exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_UDP);
2323 			break;
2324 		case RTE_FLOW_ITEM_TYPE_IPV6:
2325 			SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_IPV6,
2326 					       exp_items);
2327 			RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_ipv6,
2328 						  hdr) != 0);
2329 
2330 			proto_header_size = sizeof(struct rte_ipv6_hdr);
2331 
2332 			ethertypep = RTE_PTR_ADD(buf, ethertype_ofst);
2333 			*ethertypep = RTE_BE16(RTE_ETHER_TYPE_IPV6);
2334 
2335 			next_proto_ofst = bounce_eh->size +
2336 					  offsetof(struct rte_ipv6_hdr, proto);
2337 
2338 			ipv6 = (struct rte_ipv6_hdr *)buf_cur;
2339 
2340 			exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_UDP);
2341 			break;
2342 		case RTE_FLOW_ITEM_TYPE_UDP:
2343 			SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_UDP,
2344 					       exp_items);
2345 			RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_udp,
2346 						  hdr) != 0);
2347 
2348 			proto_header_size = sizeof(struct rte_udp_hdr);
2349 
2350 			next_protop = RTE_PTR_ADD(buf, next_proto_ofst);
2351 			*next_protop = IPPROTO_UDP;
2352 
2353 			udp = (struct rte_udp_hdr *)buf_cur;
2354 
2355 			exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_VXLAN);
2356 			break;
2357 		case RTE_FLOW_ITEM_TYPE_VXLAN:
2358 			SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_VXLAN,
2359 					       exp_items);
2360 			RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_vxlan,
2361 						  hdr) != 0);
2362 
2363 			proto_header_size = sizeof(struct rte_vxlan_hdr);
2364 
2365 			vxlan = (struct rte_vxlan_hdr *)buf_cur;
2366 
2367 			udp->dst_port = RTE_BE16(RTE_VXLAN_DEFAULT_PORT);
2368 			udp->dgram_len = RTE_BE16(sizeof(*udp) +
2369 						  sizeof(*vxlan));
2370 			udp->dgram_cksum = 0;
2371 
2372 			exp_items = 0;
2373 			break;
2374 		default:
2375 			return rte_flow_error_set(error, ENOTSUP,
2376 					RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2377 					"Unknown item in the encap. header");
2378 		}
2379 
2380 		if (bounce_eh->size + proto_header_size > bounce_eh->buf_size) {
2381 			return rte_flow_error_set(error, E2BIG,
2382 					RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2383 					"The encap. header is too big");
2384 		}
2385 
2386 		if ((proto_header_size & 1) != 0) {
2387 			return rte_flow_error_set(error, EINVAL,
2388 					RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2389 					"Odd layer size in the encap. header");
2390 		}
2391 
2392 		rte_memcpy(buf_cur, pattern->spec, proto_header_size);
2393 		bounce_eh->size += proto_header_size;
2394 
2395 		parsed_item->item = pattern;
2396 		parsed_item->proto_header_size = proto_header_size;
2397 		++nb_parsed_items;
2398 	}
2399 
2400 	if (exp_items != 0) {
2401 		/* Parsing item VXLAN would have reset exp_items to 0. */
2402 		return rte_flow_error_set(error, ENOTSUP,
2403 					RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2404 					"No item VXLAN in the encap. header");
2405 	}
2406 
2407 	/* One of the pointers (ipv4, ipv6) refers to a dummy area. */
2408 	ipv4->version_ihl = RTE_IPV4_VHL_DEF;
2409 	ipv4->time_to_live = SFC_IPV4_TTL_DEF;
2410 	ipv4->total_length = RTE_BE16(sizeof(*ipv4) + sizeof(*udp) +
2411 				      sizeof(*vxlan));
2412 	/* The HW cannot compute this checksum. */
2413 	ipv4->hdr_checksum = 0;
2414 	ipv4->hdr_checksum = rte_ipv4_cksum(ipv4);
2415 
2416 	ipv6->vtc_flow = RTE_BE32(SFC_IPV6_VTC_FLOW_DEF);
2417 	ipv6->hop_limits = SFC_IPV6_HOP_LIMITS_DEF;
2418 	ipv6->payload_len = udp->dgram_len;
2419 
2420 	vxlan->vx_flags = RTE_BE32(SFC_VXLAN_FLAGS_DEF);
2421 
2422 	/* Take care of the masks. */
2423 	sfc_mae_header_force_item_masks(buf, parsed_items, nb_parsed_items);
2424 
2425 	return (spec != NULL) ? efx_mae_action_set_populate_encap(spec) : 0;
2426 }
2427 
2428 static int
2429 sfc_mae_rule_parse_action_mark(const struct rte_flow_action_mark *conf,
2430 			       efx_mae_actions_t *spec)
2431 {
2432 	return efx_mae_action_set_populate_mark(spec, conf->id);
2433 }
2434 
2435 static int
2436 sfc_mae_rule_parse_action_phy_port(struct sfc_adapter *sa,
2437 				   const struct rte_flow_action_phy_port *conf,
2438 				   efx_mae_actions_t *spec)
2439 {
2440 	efx_mport_sel_t mport;
2441 	uint32_t phy_port;
2442 	int rc;
2443 
2444 	if (conf->original != 0)
2445 		phy_port = efx_nic_cfg_get(sa->nic)->enc_assigned_port;
2446 	else
2447 		phy_port = conf->index;
2448 
2449 	rc = efx_mae_mport_by_phy_port(phy_port, &mport);
2450 	if (rc != 0)
2451 		return rc;
2452 
2453 	return efx_mae_action_set_populate_deliver(spec, &mport);
2454 }
2455 
2456 static int
2457 sfc_mae_rule_parse_action_pf_vf(struct sfc_adapter *sa,
2458 				const struct rte_flow_action_vf *vf_conf,
2459 				efx_mae_actions_t *spec)
2460 {
2461 	const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
2462 	efx_mport_sel_t mport;
2463 	uint32_t vf;
2464 	int rc;
2465 
2466 	if (vf_conf == NULL)
2467 		vf = EFX_PCI_VF_INVALID;
2468 	else if (vf_conf->original != 0)
2469 		vf = encp->enc_vf;
2470 	else
2471 		vf = vf_conf->id;
2472 
2473 	rc = efx_mae_mport_by_pcie_function(encp->enc_pf, vf, &mport);
2474 	if (rc != 0)
2475 		return rc;
2476 
2477 	return efx_mae_action_set_populate_deliver(spec, &mport);
2478 }
2479 
2480 static int
2481 sfc_mae_rule_parse_action_port_id(struct sfc_adapter *sa,
2482 				  const struct rte_flow_action_port_id *conf,
2483 				  efx_mae_actions_t *spec)
2484 {
2485 	struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
2486 	struct sfc_mae *mae = &sa->mae;
2487 	efx_mport_sel_t mport;
2488 	uint16_t port_id;
2489 	int rc;
2490 
2491 	port_id = (conf->original != 0) ? sas->port_id : conf->id;
2492 
2493 	rc = sfc_mae_switch_port_by_ethdev(mae->switch_domain_id,
2494 					   port_id, &mport);
2495 	if (rc != 0)
2496 		return rc;
2497 
2498 	return efx_mae_action_set_populate_deliver(spec, &mport);
2499 }
2500 
2501 static int
2502 sfc_mae_rule_parse_action(struct sfc_adapter *sa,
2503 			  const struct rte_flow_action *action,
2504 			  const struct sfc_mae_outer_rule *outer_rule,
2505 			  struct sfc_mae_actions_bundle *bundle,
2506 			  efx_mae_actions_t *spec,
2507 			  struct rte_flow_error *error)
2508 {
2509 	bool custom_error = B_FALSE;
2510 	int rc = 0;
2511 
2512 	switch (action->type) {
2513 	case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
2514 		SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VXLAN_DECAP,
2515 				       bundle->actions_mask);
2516 		if (outer_rule == NULL ||
2517 		    outer_rule->encap_type != EFX_TUNNEL_PROTOCOL_VXLAN)
2518 			rc = EINVAL;
2519 		else
2520 			rc = efx_mae_action_set_populate_decap(spec);
2521 		break;
2522 	case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
2523 		SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_POP_VLAN,
2524 				       bundle->actions_mask);
2525 		rc = efx_mae_action_set_populate_vlan_pop(spec);
2526 		break;
2527 	case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
2528 		SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN,
2529 				       bundle->actions_mask);
2530 		sfc_mae_rule_parse_action_of_push_vlan(action->conf, bundle);
2531 		break;
2532 	case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
2533 		SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID,
2534 				       bundle->actions_mask);
2535 		sfc_mae_rule_parse_action_of_set_vlan_vid(action->conf, bundle);
2536 		break;
2537 	case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
2538 		SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP,
2539 				       bundle->actions_mask);
2540 		sfc_mae_rule_parse_action_of_set_vlan_pcp(action->conf, bundle);
2541 		break;
2542 	case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
2543 		SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP,
2544 				       bundle->actions_mask);
2545 		rc = sfc_mae_rule_parse_action_vxlan_encap(&sa->mae,
2546 							   action->conf,
2547 							   spec, error);
2548 		custom_error = B_TRUE;
2549 		break;
2550 	case RTE_FLOW_ACTION_TYPE_FLAG:
2551 		SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_FLAG,
2552 				       bundle->actions_mask);
2553 		rc = efx_mae_action_set_populate_flag(spec);
2554 		break;
2555 	case RTE_FLOW_ACTION_TYPE_MARK:
2556 		SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_MARK,
2557 				       bundle->actions_mask);
2558 		rc = sfc_mae_rule_parse_action_mark(action->conf, spec);
2559 		break;
2560 	case RTE_FLOW_ACTION_TYPE_PHY_PORT:
2561 		SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PHY_PORT,
2562 				       bundle->actions_mask);
2563 		rc = sfc_mae_rule_parse_action_phy_port(sa, action->conf, spec);
2564 		break;
2565 	case RTE_FLOW_ACTION_TYPE_PF:
2566 		SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PF,
2567 				       bundle->actions_mask);
2568 		rc = sfc_mae_rule_parse_action_pf_vf(sa, NULL, spec);
2569 		break;
2570 	case RTE_FLOW_ACTION_TYPE_VF:
2571 		SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VF,
2572 				       bundle->actions_mask);
2573 		rc = sfc_mae_rule_parse_action_pf_vf(sa, action->conf, spec);
2574 		break;
2575 	case RTE_FLOW_ACTION_TYPE_PORT_ID:
2576 		SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PORT_ID,
2577 				       bundle->actions_mask);
2578 		rc = sfc_mae_rule_parse_action_port_id(sa, action->conf, spec);
2579 		break;
2580 	case RTE_FLOW_ACTION_TYPE_DROP:
2581 		SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_DROP,
2582 				       bundle->actions_mask);
2583 		rc = efx_mae_action_set_populate_drop(spec);
2584 		break;
2585 	default:
2586 		return rte_flow_error_set(error, ENOTSUP,
2587 				RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2588 				"Unsupported action");
2589 	}
2590 
2591 	if (rc == 0) {
2592 		bundle->actions_mask |= (1ULL << action->type);
2593 	} else if (!custom_error) {
2594 		rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ACTION,
2595 				NULL, "Failed to request the action");
2596 	}
2597 
2598 	return rc;
2599 }
2600 
2601 static void
2602 sfc_mae_bounce_eh_invalidate(struct sfc_mae_bounce_eh *bounce_eh)
2603 {
2604 	bounce_eh->type = EFX_TUNNEL_PROTOCOL_NONE;
2605 }
2606 
2607 static int
2608 sfc_mae_process_encap_header(struct sfc_adapter *sa,
2609 			     const struct sfc_mae_bounce_eh *bounce_eh,
2610 			     struct sfc_mae_encap_header **encap_headerp)
2611 {
2612 	if (bounce_eh->type == EFX_TUNNEL_PROTOCOL_NONE) {
2613 		encap_headerp = NULL;
2614 		return 0;
2615 	}
2616 
2617 	*encap_headerp = sfc_mae_encap_header_attach(sa, bounce_eh);
2618 	if (*encap_headerp != NULL)
2619 		return 0;
2620 
2621 	return sfc_mae_encap_header_add(sa, bounce_eh, encap_headerp);
2622 }
2623 
2624 int
2625 sfc_mae_rule_parse_actions(struct sfc_adapter *sa,
2626 			   const struct rte_flow_action actions[],
2627 			   struct sfc_flow_spec_mae *spec_mae,
2628 			   struct rte_flow_error *error)
2629 {
2630 	struct sfc_mae_encap_header *encap_header = NULL;
2631 	struct sfc_mae_actions_bundle bundle = {0};
2632 	const struct rte_flow_action *action;
2633 	struct sfc_mae *mae = &sa->mae;
2634 	efx_mae_actions_t *spec;
2635 	int rc;
2636 
2637 	rte_errno = 0;
2638 
2639 	if (actions == NULL) {
2640 		return rte_flow_error_set(error, EINVAL,
2641 				RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
2642 				"NULL actions");
2643 	}
2644 
2645 	rc = efx_mae_action_set_spec_init(sa->nic, &spec);
2646 	if (rc != 0)
2647 		goto fail_action_set_spec_init;
2648 
2649 	/* Cleanup after previous encap. header bounce buffer usage. */
2650 	sfc_mae_bounce_eh_invalidate(&mae->bounce_eh);
2651 
2652 	for (action = actions;
2653 	     action->type != RTE_FLOW_ACTION_TYPE_END; ++action) {
2654 		rc = sfc_mae_actions_bundle_sync(action, &bundle, spec, error);
2655 		if (rc != 0)
2656 			goto fail_rule_parse_action;
2657 
2658 		rc = sfc_mae_rule_parse_action(sa, action, spec_mae->outer_rule,
2659 					       &bundle, spec, error);
2660 		if (rc != 0)
2661 			goto fail_rule_parse_action;
2662 	}
2663 
2664 	rc = sfc_mae_actions_bundle_sync(action, &bundle, spec, error);
2665 	if (rc != 0)
2666 		goto fail_rule_parse_action;
2667 
2668 	rc = sfc_mae_process_encap_header(sa, &mae->bounce_eh, &encap_header);
2669 	if (rc != 0)
2670 		goto fail_process_encap_header;
2671 
2672 	spec_mae->action_set = sfc_mae_action_set_attach(sa, encap_header,
2673 							 spec);
2674 	if (spec_mae->action_set != NULL) {
2675 		sfc_mae_encap_header_del(sa, encap_header);
2676 		efx_mae_action_set_spec_fini(sa->nic, spec);
2677 		return 0;
2678 	}
2679 
2680 	rc = sfc_mae_action_set_add(sa, spec, encap_header,
2681 				    &spec_mae->action_set);
2682 	if (rc != 0)
2683 		goto fail_action_set_add;
2684 
2685 	return 0;
2686 
2687 fail_action_set_add:
2688 	sfc_mae_encap_header_del(sa, encap_header);
2689 
2690 fail_process_encap_header:
2691 fail_rule_parse_action:
2692 	efx_mae_action_set_spec_fini(sa->nic, spec);
2693 
2694 fail_action_set_spec_init:
2695 	if (rc > 0 && rte_errno == 0) {
2696 		rc = rte_flow_error_set(error, rc,
2697 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2698 			NULL, "Failed to process the action");
2699 	}
2700 	return rc;
2701 }
2702 
2703 static bool
2704 sfc_mae_rules_class_cmp(struct sfc_adapter *sa,
2705 			const efx_mae_match_spec_t *left,
2706 			const efx_mae_match_spec_t *right)
2707 {
2708 	bool have_same_class;
2709 	int rc;
2710 
2711 	rc = efx_mae_match_specs_class_cmp(sa->nic, left, right,
2712 					   &have_same_class);
2713 
2714 	return (rc == 0) ? have_same_class : false;
2715 }
2716 
2717 static int
2718 sfc_mae_outer_rule_class_verify(struct sfc_adapter *sa,
2719 				struct sfc_mae_outer_rule *rule)
2720 {
2721 	struct sfc_mae_fw_rsrc *fw_rsrc = &rule->fw_rsrc;
2722 	struct sfc_mae_outer_rule *entry;
2723 	struct sfc_mae *mae = &sa->mae;
2724 
2725 	if (fw_rsrc->rule_id.id != EFX_MAE_RSRC_ID_INVALID) {
2726 		/* An active rule is reused. It's class is wittingly valid. */
2727 		return 0;
2728 	}
2729 
2730 	TAILQ_FOREACH_REVERSE(entry, &mae->outer_rules,
2731 			      sfc_mae_outer_rules, entries) {
2732 		const efx_mae_match_spec_t *left = entry->match_spec;
2733 		const efx_mae_match_spec_t *right = rule->match_spec;
2734 
2735 		if (entry == rule)
2736 			continue;
2737 
2738 		if (sfc_mae_rules_class_cmp(sa, left, right))
2739 			return 0;
2740 	}
2741 
2742 	sfc_info(sa, "for now, the HW doesn't support rule validation, and HW "
2743 		 "support for outer frame pattern items is not guaranteed; "
2744 		 "other than that, the items are valid from SW standpoint");
2745 	return 0;
2746 }
2747 
2748 static int
2749 sfc_mae_action_rule_class_verify(struct sfc_adapter *sa,
2750 				 struct sfc_flow_spec_mae *spec)
2751 {
2752 	const struct rte_flow *entry;
2753 
2754 	TAILQ_FOREACH_REVERSE(entry, &sa->flow_list, sfc_flow_list, entries) {
2755 		const struct sfc_flow_spec *entry_spec = &entry->spec;
2756 		const struct sfc_flow_spec_mae *es_mae = &entry_spec->mae;
2757 		const efx_mae_match_spec_t *left = es_mae->match_spec;
2758 		const efx_mae_match_spec_t *right = spec->match_spec;
2759 
2760 		switch (entry_spec->type) {
2761 		case SFC_FLOW_SPEC_FILTER:
2762 			/* Ignore VNIC-level flows */
2763 			break;
2764 		case SFC_FLOW_SPEC_MAE:
2765 			if (sfc_mae_rules_class_cmp(sa, left, right))
2766 				return 0;
2767 			break;
2768 		default:
2769 			SFC_ASSERT(false);
2770 		}
2771 	}
2772 
2773 	sfc_info(sa, "for now, the HW doesn't support rule validation, and HW "
2774 		 "support for inner frame pattern items is not guaranteed; "
2775 		 "other than that, the items are valid from SW standpoint");
2776 	return 0;
2777 }
2778 
2779 /**
2780  * Confirm that a given flow can be accepted by the FW.
2781  *
2782  * @param sa
2783  *   Software adapter context
2784  * @param flow
2785  *   Flow to be verified
2786  * @return
2787  *   Zero on success and non-zero in the case of error.
2788  *   A special value of EAGAIN indicates that the adapter is
2789  *   not in started state. This state is compulsory because
2790  *   it only makes sense to compare the rule class of the flow
2791  *   being validated with classes of the active rules.
2792  *   Such classes are wittingly supported by the FW.
2793  */
2794 int
2795 sfc_mae_flow_verify(struct sfc_adapter *sa,
2796 		    struct rte_flow *flow)
2797 {
2798 	struct sfc_flow_spec *spec = &flow->spec;
2799 	struct sfc_flow_spec_mae *spec_mae = &spec->mae;
2800 	struct sfc_mae_outer_rule *outer_rule = spec_mae->outer_rule;
2801 	int rc;
2802 
2803 	SFC_ASSERT(sfc_adapter_is_locked(sa));
2804 
2805 	if (sa->state != SFC_ADAPTER_STARTED)
2806 		return EAGAIN;
2807 
2808 	if (outer_rule != NULL) {
2809 		rc = sfc_mae_outer_rule_class_verify(sa, outer_rule);
2810 		if (rc != 0)
2811 			return rc;
2812 	}
2813 
2814 	return sfc_mae_action_rule_class_verify(sa, spec_mae);
2815 }
2816 
2817 int
2818 sfc_mae_flow_insert(struct sfc_adapter *sa,
2819 		    struct rte_flow *flow)
2820 {
2821 	struct sfc_flow_spec *spec = &flow->spec;
2822 	struct sfc_flow_spec_mae *spec_mae = &spec->mae;
2823 	struct sfc_mae_outer_rule *outer_rule = spec_mae->outer_rule;
2824 	struct sfc_mae_action_set *action_set = spec_mae->action_set;
2825 	struct sfc_mae_fw_rsrc *fw_rsrc = &action_set->fw_rsrc;
2826 	int rc;
2827 
2828 	SFC_ASSERT(spec_mae->rule_id.id == EFX_MAE_RSRC_ID_INVALID);
2829 	SFC_ASSERT(action_set != NULL);
2830 
2831 	if (outer_rule != NULL) {
2832 		rc = sfc_mae_outer_rule_enable(sa, outer_rule,
2833 					       spec_mae->match_spec);
2834 		if (rc != 0)
2835 			goto fail_outer_rule_enable;
2836 	}
2837 
2838 	rc = sfc_mae_action_set_enable(sa, action_set);
2839 	if (rc != 0)
2840 		goto fail_action_set_enable;
2841 
2842 	rc = efx_mae_action_rule_insert(sa->nic, spec_mae->match_spec,
2843 					NULL, &fw_rsrc->aset_id,
2844 					&spec_mae->rule_id);
2845 	if (rc != 0)
2846 		goto fail_action_rule_insert;
2847 
2848 	return 0;
2849 
2850 fail_action_rule_insert:
2851 	(void)sfc_mae_action_set_disable(sa, action_set);
2852 
2853 fail_action_set_enable:
2854 	if (outer_rule != NULL)
2855 		(void)sfc_mae_outer_rule_disable(sa, outer_rule);
2856 
2857 fail_outer_rule_enable:
2858 	return rc;
2859 }
2860 
2861 int
2862 sfc_mae_flow_remove(struct sfc_adapter *sa,
2863 		    struct rte_flow *flow)
2864 {
2865 	struct sfc_flow_spec *spec = &flow->spec;
2866 	struct sfc_flow_spec_mae *spec_mae = &spec->mae;
2867 	struct sfc_mae_action_set *action_set = spec_mae->action_set;
2868 	struct sfc_mae_outer_rule *outer_rule = spec_mae->outer_rule;
2869 	int rc;
2870 
2871 	SFC_ASSERT(spec_mae->rule_id.id != EFX_MAE_RSRC_ID_INVALID);
2872 	SFC_ASSERT(action_set != NULL);
2873 
2874 	rc = efx_mae_action_rule_remove(sa->nic, &spec_mae->rule_id);
2875 	if (rc != 0)
2876 		return rc;
2877 
2878 	spec_mae->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
2879 
2880 	rc = sfc_mae_action_set_disable(sa, action_set);
2881 	if (rc != 0) {
2882 		sfc_err(sa, "failed to disable the action set (rc = %d)", rc);
2883 		/* Despite the error, proceed with outer rule removal. */
2884 	}
2885 
2886 	if (outer_rule != NULL)
2887 		return sfc_mae_outer_rule_disable(sa, outer_rule);
2888 
2889 	return 0;
2890 }
2891