xref: /dpdk/lib/pipeline/rte_swx_pipeline.c (revision 99a2dd955fba6e4cc23b77d590a033650ced9c45)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Intel Corporation
3  */
4 #include <stdlib.h>
5 #include <string.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <inttypes.h>
9 #include <sys/queue.h>
10 #include <arpa/inet.h>
11 
12 #include <rte_common.h>
13 #include <rte_prefetch.h>
14 #include <rte_byteorder.h>
15 #include <rte_cycles.h>
16 #include <rte_meter.h>
17 
18 #include "rte_swx_pipeline.h"
19 #include "rte_swx_ctl.h"
20 
21 #define CHECK(condition, err_code)                                             \
22 do {                                                                           \
23 	if (!(condition))                                                      \
24 		return -(err_code);                                            \
25 } while (0)
26 
27 #define CHECK_NAME(name, err_code)                                             \
28 	CHECK((name) &&                                                        \
29 	      (name)[0] &&                                                     \
30 	      (strnlen((name), RTE_SWX_NAME_SIZE) < RTE_SWX_NAME_SIZE),        \
31 	      err_code)
32 
33 #define CHECK_INSTRUCTION(instr, err_code)                                     \
34 	CHECK((instr) &&                                                       \
35 	      (instr)[0] &&                                                    \
36 	      (strnlen((instr), RTE_SWX_INSTRUCTION_SIZE) <                    \
37 	       RTE_SWX_INSTRUCTION_SIZE),                                      \
38 	      err_code)
39 
40 #ifndef TRACE_LEVEL
41 #define TRACE_LEVEL 0
42 #endif
43 
44 #if TRACE_LEVEL
45 #define TRACE(...) printf(__VA_ARGS__)
46 #else
47 #define TRACE(...)
48 #endif
49 
50 /*
51  * Environment.
52  */
53 #define ntoh64(x) rte_be_to_cpu_64(x)
54 #define hton64(x) rte_cpu_to_be_64(x)
55 
56 #ifndef RTE_SWX_PIPELINE_HUGE_PAGES_DISABLE
57 
58 #include <rte_malloc.h>
59 
60 static void *
61 env_malloc(size_t size, size_t alignment, int numa_node)
62 {
63 	return rte_zmalloc_socket(NULL, size, alignment, numa_node);
64 }
65 
66 static void
67 env_free(void *start, size_t size __rte_unused)
68 {
69 	rte_free(start);
70 }
71 
72 #else
73 
74 #include <numa.h>
75 
76 static void *
77 env_malloc(size_t size, size_t alignment __rte_unused, int numa_node)
78 {
79 	void *start;
80 
81 	if (numa_available() == -1)
82 		return NULL;
83 
84 	start = numa_alloc_onnode(size, numa_node);
85 	if (!start)
86 		return NULL;
87 
88 	memset(start, 0, size);
89 	return start;
90 }
91 
92 static void
93 env_free(void *start, size_t size)
94 {
95 	if (numa_available() == -1)
96 		return;
97 
98 	numa_free(start, size);
99 }
100 
101 #endif
102 
103 /*
104  * Struct.
105  */
106 struct field {
107 	char name[RTE_SWX_NAME_SIZE];
108 	uint32_t n_bits;
109 	uint32_t offset;
110 };
111 
112 struct struct_type {
113 	TAILQ_ENTRY(struct_type) node;
114 	char name[RTE_SWX_NAME_SIZE];
115 	struct field *fields;
116 	uint32_t n_fields;
117 	uint32_t n_bits;
118 };
119 
120 TAILQ_HEAD(struct_type_tailq, struct_type);
121 
122 /*
123  * Input port.
124  */
125 struct port_in_type {
126 	TAILQ_ENTRY(port_in_type) node;
127 	char name[RTE_SWX_NAME_SIZE];
128 	struct rte_swx_port_in_ops ops;
129 };
130 
131 TAILQ_HEAD(port_in_type_tailq, port_in_type);
132 
133 struct port_in {
134 	TAILQ_ENTRY(port_in) node;
135 	struct port_in_type *type;
136 	void *obj;
137 	uint32_t id;
138 };
139 
140 TAILQ_HEAD(port_in_tailq, port_in);
141 
142 struct port_in_runtime {
143 	rte_swx_port_in_pkt_rx_t pkt_rx;
144 	void *obj;
145 };
146 
147 /*
148  * Output port.
149  */
150 struct port_out_type {
151 	TAILQ_ENTRY(port_out_type) node;
152 	char name[RTE_SWX_NAME_SIZE];
153 	struct rte_swx_port_out_ops ops;
154 };
155 
156 TAILQ_HEAD(port_out_type_tailq, port_out_type);
157 
158 struct port_out {
159 	TAILQ_ENTRY(port_out) node;
160 	struct port_out_type *type;
161 	void *obj;
162 	uint32_t id;
163 };
164 
165 TAILQ_HEAD(port_out_tailq, port_out);
166 
167 struct port_out_runtime {
168 	rte_swx_port_out_pkt_tx_t pkt_tx;
169 	rte_swx_port_out_flush_t flush;
170 	void *obj;
171 };
172 
173 /*
174  * Extern object.
175  */
176 struct extern_type_member_func {
177 	TAILQ_ENTRY(extern_type_member_func) node;
178 	char name[RTE_SWX_NAME_SIZE];
179 	rte_swx_extern_type_member_func_t func;
180 	uint32_t id;
181 };
182 
183 TAILQ_HEAD(extern_type_member_func_tailq, extern_type_member_func);
184 
185 struct extern_type {
186 	TAILQ_ENTRY(extern_type) node;
187 	char name[RTE_SWX_NAME_SIZE];
188 	struct struct_type *mailbox_struct_type;
189 	rte_swx_extern_type_constructor_t constructor;
190 	rte_swx_extern_type_destructor_t destructor;
191 	struct extern_type_member_func_tailq funcs;
192 	uint32_t n_funcs;
193 };
194 
195 TAILQ_HEAD(extern_type_tailq, extern_type);
196 
197 struct extern_obj {
198 	TAILQ_ENTRY(extern_obj) node;
199 	char name[RTE_SWX_NAME_SIZE];
200 	struct extern_type *type;
201 	void *obj;
202 	uint32_t struct_id;
203 	uint32_t id;
204 };
205 
206 TAILQ_HEAD(extern_obj_tailq, extern_obj);
207 
208 #ifndef RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX
209 #define RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX 8
210 #endif
211 
212 struct extern_obj_runtime {
213 	void *obj;
214 	uint8_t *mailbox;
215 	rte_swx_extern_type_member_func_t funcs[RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX];
216 };
217 
218 /*
219  * Extern function.
220  */
221 struct extern_func {
222 	TAILQ_ENTRY(extern_func) node;
223 	char name[RTE_SWX_NAME_SIZE];
224 	struct struct_type *mailbox_struct_type;
225 	rte_swx_extern_func_t func;
226 	uint32_t struct_id;
227 	uint32_t id;
228 };
229 
230 TAILQ_HEAD(extern_func_tailq, extern_func);
231 
232 struct extern_func_runtime {
233 	uint8_t *mailbox;
234 	rte_swx_extern_func_t func;
235 };
236 
237 /*
238  * Header.
239  */
240 struct header {
241 	TAILQ_ENTRY(header) node;
242 	char name[RTE_SWX_NAME_SIZE];
243 	struct struct_type *st;
244 	uint32_t struct_id;
245 	uint32_t id;
246 };
247 
248 TAILQ_HEAD(header_tailq, header);
249 
250 struct header_runtime {
251 	uint8_t *ptr0;
252 };
253 
254 struct header_out_runtime {
255 	uint8_t *ptr0;
256 	uint8_t *ptr;
257 	uint32_t n_bytes;
258 };
259 
260 /*
261  * Instruction.
262  */
263 
264 /* Packet headers are always in Network Byte Order (NBO), i.e. big endian.
265  * Packet meta-data fields are always assumed to be in Host Byte Order (HBO).
266  * Table entry fields can be in either NBO or HBO; they are assumed to be in HBO
267  * when transferred to packet meta-data and in NBO when transferred to packet
268  * headers.
269  */
270 
271 /* Notation conventions:
272  *    -Header field: H = h.header.field (dst/src)
273  *    -Meta-data field: M = m.field (dst/src)
274  *    -Extern object mailbox field: E = e.field (dst/src)
275  *    -Extern function mailbox field: F = f.field (dst/src)
276  *    -Table action data field: T = t.field (src only)
277  *    -Immediate value: I = 32-bit unsigned value (src only)
278  */
279 
280 enum instruction_type {
281 	/* rx m.port_in */
282 	INSTR_RX,
283 
284 	/* tx port_out
285 	 * port_out = MI
286 	 */
287 	INSTR_TX,   /* port_out = M */
288 	INSTR_TX_I, /* port_out = I */
289 
290 	/* extract h.header */
291 	INSTR_HDR_EXTRACT,
292 	INSTR_HDR_EXTRACT2,
293 	INSTR_HDR_EXTRACT3,
294 	INSTR_HDR_EXTRACT4,
295 	INSTR_HDR_EXTRACT5,
296 	INSTR_HDR_EXTRACT6,
297 	INSTR_HDR_EXTRACT7,
298 	INSTR_HDR_EXTRACT8,
299 
300 	/* emit h.header */
301 	INSTR_HDR_EMIT,
302 	INSTR_HDR_EMIT_TX,
303 	INSTR_HDR_EMIT2_TX,
304 	INSTR_HDR_EMIT3_TX,
305 	INSTR_HDR_EMIT4_TX,
306 	INSTR_HDR_EMIT5_TX,
307 	INSTR_HDR_EMIT6_TX,
308 	INSTR_HDR_EMIT7_TX,
309 	INSTR_HDR_EMIT8_TX,
310 
311 	/* validate h.header */
312 	INSTR_HDR_VALIDATE,
313 
314 	/* invalidate h.header */
315 	INSTR_HDR_INVALIDATE,
316 
317 	/* mov dst src
318 	 * dst = src
319 	 * dst = HMEF, src = HMEFTI
320 	 */
321 	INSTR_MOV,    /* dst = MEF, src = MEFT */
322 	INSTR_MOV_MH, /* dst = MEF, src = H */
323 	INSTR_MOV_HM, /* dst = H, src = MEFT */
324 	INSTR_MOV_HH, /* dst = H, src = H */
325 	INSTR_MOV_I,  /* dst = HMEF, src = I */
326 
327 	/* dma h.header t.field
328 	 * memcpy(h.header, t.field, sizeof(h.header))
329 	 */
330 	INSTR_DMA_HT,
331 	INSTR_DMA_HT2,
332 	INSTR_DMA_HT3,
333 	INSTR_DMA_HT4,
334 	INSTR_DMA_HT5,
335 	INSTR_DMA_HT6,
336 	INSTR_DMA_HT7,
337 	INSTR_DMA_HT8,
338 
339 	/* add dst src
340 	 * dst += src
341 	 * dst = HMEF, src = HMEFTI
342 	 */
343 	INSTR_ALU_ADD,    /* dst = MEF, src = MEF */
344 	INSTR_ALU_ADD_MH, /* dst = MEF, src = H */
345 	INSTR_ALU_ADD_HM, /* dst = H, src = MEF */
346 	INSTR_ALU_ADD_HH, /* dst = H, src = H */
347 	INSTR_ALU_ADD_MI, /* dst = MEF, src = I */
348 	INSTR_ALU_ADD_HI, /* dst = H, src = I */
349 
350 	/* sub dst src
351 	 * dst -= src
352 	 * dst = HMEF, src = HMEFTI
353 	 */
354 	INSTR_ALU_SUB,    /* dst = MEF, src = MEF */
355 	INSTR_ALU_SUB_MH, /* dst = MEF, src = H */
356 	INSTR_ALU_SUB_HM, /* dst = H, src = MEF */
357 	INSTR_ALU_SUB_HH, /* dst = H, src = H */
358 	INSTR_ALU_SUB_MI, /* dst = MEF, src = I */
359 	INSTR_ALU_SUB_HI, /* dst = H, src = I */
360 
361 	/* ckadd dst src
362 	 * dst = dst '+ src[0:1] '+ src[2:3] + ...
363 	 * dst = H, src = {H, h.header}
364 	 */
365 	INSTR_ALU_CKADD_FIELD,    /* src = H */
366 	INSTR_ALU_CKADD_STRUCT20, /* src = h.header, with sizeof(header) = 20 */
367 	INSTR_ALU_CKADD_STRUCT,   /* src = h.hdeader, with any sizeof(header) */
368 
369 	/* cksub dst src
370 	 * dst = dst '- src
371 	 * dst = H, src = H
372 	 */
373 	INSTR_ALU_CKSUB_FIELD,
374 
375 	/* and dst src
376 	 * dst &= src
377 	 * dst = HMEF, src = HMEFTI
378 	 */
379 	INSTR_ALU_AND,    /* dst = MEF, src = MEFT */
380 	INSTR_ALU_AND_MH, /* dst = MEF, src = H */
381 	INSTR_ALU_AND_HM, /* dst = H, src = MEFT */
382 	INSTR_ALU_AND_HH, /* dst = H, src = H */
383 	INSTR_ALU_AND_I,  /* dst = HMEF, src = I */
384 
385 	/* or dst src
386 	 * dst |= src
387 	 * dst = HMEF, src = HMEFTI
388 	 */
389 	INSTR_ALU_OR,    /* dst = MEF, src = MEFT */
390 	INSTR_ALU_OR_MH, /* dst = MEF, src = H */
391 	INSTR_ALU_OR_HM, /* dst = H, src = MEFT */
392 	INSTR_ALU_OR_HH, /* dst = H, src = H */
393 	INSTR_ALU_OR_I,  /* dst = HMEF, src = I */
394 
395 	/* xor dst src
396 	 * dst ^= src
397 	 * dst = HMEF, src = HMEFTI
398 	 */
399 	INSTR_ALU_XOR,    /* dst = MEF, src = MEFT */
400 	INSTR_ALU_XOR_MH, /* dst = MEF, src = H */
401 	INSTR_ALU_XOR_HM, /* dst = H, src = MEFT */
402 	INSTR_ALU_XOR_HH, /* dst = H, src = H */
403 	INSTR_ALU_XOR_I,  /* dst = HMEF, src = I */
404 
405 	/* shl dst src
406 	 * dst <<= src
407 	 * dst = HMEF, src = HMEFTI
408 	 */
409 	INSTR_ALU_SHL,    /* dst = MEF, src = MEF */
410 	INSTR_ALU_SHL_MH, /* dst = MEF, src = H */
411 	INSTR_ALU_SHL_HM, /* dst = H, src = MEF */
412 	INSTR_ALU_SHL_HH, /* dst = H, src = H */
413 	INSTR_ALU_SHL_MI, /* dst = MEF, src = I */
414 	INSTR_ALU_SHL_HI, /* dst = H, src = I */
415 
416 	/* shr dst src
417 	 * dst >>= src
418 	 * dst = HMEF, src = HMEFTI
419 	 */
420 	INSTR_ALU_SHR,    /* dst = MEF, src = MEF */
421 	INSTR_ALU_SHR_MH, /* dst = MEF, src = H */
422 	INSTR_ALU_SHR_HM, /* dst = H, src = MEF */
423 	INSTR_ALU_SHR_HH, /* dst = H, src = H */
424 	INSTR_ALU_SHR_MI, /* dst = MEF, src = I */
425 	INSTR_ALU_SHR_HI, /* dst = H, src = I */
426 
427 	/* regprefetch REGARRAY index
428 	 * prefetch REGARRAY[index]
429 	 * index = HMEFTI
430 	 */
431 	INSTR_REGPREFETCH_RH, /* index = H */
432 	INSTR_REGPREFETCH_RM, /* index = MEFT */
433 	INSTR_REGPREFETCH_RI, /* index = I */
434 
435 	/* regrd dst REGARRAY index
436 	 * dst = REGARRAY[index]
437 	 * dst = HMEF, index = HMEFTI
438 	 */
439 	INSTR_REGRD_HRH, /* dst = H, index = H */
440 	INSTR_REGRD_HRM, /* dst = H, index = MEFT */
441 	INSTR_REGRD_HRI, /* dst = H, index = I */
442 	INSTR_REGRD_MRH, /* dst = MEF, index = H */
443 	INSTR_REGRD_MRM, /* dst = MEF, index = MEFT */
444 	INSTR_REGRD_MRI, /* dst = MEF, index = I */
445 
446 	/* regwr REGARRAY index src
447 	 * REGARRAY[index] = src
448 	 * index = HMEFTI, src = HMEFTI
449 	 */
450 	INSTR_REGWR_RHH, /* index = H, src = H */
451 	INSTR_REGWR_RHM, /* index = H, src = MEFT */
452 	INSTR_REGWR_RHI, /* index = H, src = I */
453 	INSTR_REGWR_RMH, /* index = MEFT, src = H */
454 	INSTR_REGWR_RMM, /* index = MEFT, src = MEFT */
455 	INSTR_REGWR_RMI, /* index = MEFT, src = I */
456 	INSTR_REGWR_RIH, /* index = I, src = H */
457 	INSTR_REGWR_RIM, /* index = I, src = MEFT */
458 	INSTR_REGWR_RII, /* index = I, src = I */
459 
460 	/* regadd REGARRAY index src
461 	 * REGARRAY[index] += src
462 	 * index = HMEFTI, src = HMEFTI
463 	 */
464 	INSTR_REGADD_RHH, /* index = H, src = H */
465 	INSTR_REGADD_RHM, /* index = H, src = MEFT */
466 	INSTR_REGADD_RHI, /* index = H, src = I */
467 	INSTR_REGADD_RMH, /* index = MEFT, src = H */
468 	INSTR_REGADD_RMM, /* index = MEFT, src = MEFT */
469 	INSTR_REGADD_RMI, /* index = MEFT, src = I */
470 	INSTR_REGADD_RIH, /* index = I, src = H */
471 	INSTR_REGADD_RIM, /* index = I, src = MEFT */
472 	INSTR_REGADD_RII, /* index = I, src = I */
473 
474 	/* metprefetch METARRAY index
475 	 * prefetch METARRAY[index]
476 	 * index = HMEFTI
477 	 */
478 	INSTR_METPREFETCH_H, /* index = H */
479 	INSTR_METPREFETCH_M, /* index = MEFT */
480 	INSTR_METPREFETCH_I, /* index = I */
481 
482 	/* meter METARRAY index length color_in color_out
483 	 * color_out = meter(METARRAY[index], length, color_in)
484 	 * index = HMEFTI, length = HMEFT, color_in = MEFTI, color_out = MEF
485 	 */
486 	INSTR_METER_HHM, /* index = H, length = H, color_in = MEFT */
487 	INSTR_METER_HHI, /* index = H, length = H, color_in = I */
488 	INSTR_METER_HMM, /* index = H, length = MEFT, color_in = MEFT */
489 	INSTR_METER_HMI, /* index = H, length = MEFT, color_in = I */
490 	INSTR_METER_MHM, /* index = MEFT, length = H, color_in = MEFT */
491 	INSTR_METER_MHI, /* index = MEFT, length = H, color_in = I */
492 	INSTR_METER_MMM, /* index = MEFT, length = MEFT, color_in = MEFT */
493 	INSTR_METER_MMI, /* index = MEFT, length = MEFT, color_in = I */
494 	INSTR_METER_IHM, /* index = I, length = H, color_in = MEFT */
495 	INSTR_METER_IHI, /* index = I, length = H, color_in = I */
496 	INSTR_METER_IMM, /* index = I, length = MEFT, color_in = MEFT */
497 	INSTR_METER_IMI, /* index = I, length = MEFT, color_in = I */
498 
499 	/* table TABLE */
500 	INSTR_TABLE,
501 
502 	/* extern e.obj.func */
503 	INSTR_EXTERN_OBJ,
504 
505 	/* extern f.func */
506 	INSTR_EXTERN_FUNC,
507 
508 	/* jmp LABEL
509 	 * Unconditional jump
510 	 */
511 	INSTR_JMP,
512 
513 	/* jmpv LABEL h.header
514 	 * Jump if header is valid
515 	 */
516 	INSTR_JMP_VALID,
517 
518 	/* jmpnv LABEL h.header
519 	 * Jump if header is invalid
520 	 */
521 	INSTR_JMP_INVALID,
522 
523 	/* jmph LABEL
524 	 * Jump if table lookup hit
525 	 */
526 	INSTR_JMP_HIT,
527 
528 	/* jmpnh LABEL
529 	 * Jump if table lookup miss
530 	 */
531 	INSTR_JMP_MISS,
532 
533 	/* jmpa LABEL ACTION
534 	 * Jump if action run
535 	 */
536 	INSTR_JMP_ACTION_HIT,
537 
538 	/* jmpna LABEL ACTION
539 	 * Jump if action not run
540 	 */
541 	INSTR_JMP_ACTION_MISS,
542 
543 	/* jmpeq LABEL a b
544 	 * Jump if a is equal to b
545 	 * a = HMEFT, b = HMEFTI
546 	 */
547 	INSTR_JMP_EQ,    /* a = MEFT, b = MEFT */
548 	INSTR_JMP_EQ_MH, /* a = MEFT, b = H */
549 	INSTR_JMP_EQ_HM, /* a = H, b = MEFT */
550 	INSTR_JMP_EQ_HH, /* a = H, b = H */
551 	INSTR_JMP_EQ_I,  /* (a, b) = (MEFT, I) or (a, b) = (H, I) */
552 
553 	/* jmpneq LABEL a b
554 	 * Jump if a is not equal to b
555 	 * a = HMEFT, b = HMEFTI
556 	 */
557 	INSTR_JMP_NEQ,    /* a = MEFT, b = MEFT */
558 	INSTR_JMP_NEQ_MH, /* a = MEFT, b = H */
559 	INSTR_JMP_NEQ_HM, /* a = H, b = MEFT */
560 	INSTR_JMP_NEQ_HH, /* a = H, b = H */
561 	INSTR_JMP_NEQ_I,  /* (a, b) = (MEFT, I) or (a, b) = (H, I) */
562 
563 	/* jmplt LABEL a b
564 	 * Jump if a is less than b
565 	 * a = HMEFT, b = HMEFTI
566 	 */
567 	INSTR_JMP_LT,    /* a = MEFT, b = MEFT */
568 	INSTR_JMP_LT_MH, /* a = MEFT, b = H */
569 	INSTR_JMP_LT_HM, /* a = H, b = MEFT */
570 	INSTR_JMP_LT_HH, /* a = H, b = H */
571 	INSTR_JMP_LT_MI, /* a = MEFT, b = I */
572 	INSTR_JMP_LT_HI, /* a = H, b = I */
573 
574 	/* jmpgt LABEL a b
575 	 * Jump if a is greater than b
576 	 * a = HMEFT, b = HMEFTI
577 	 */
578 	INSTR_JMP_GT,    /* a = MEFT, b = MEFT */
579 	INSTR_JMP_GT_MH, /* a = MEFT, b = H */
580 	INSTR_JMP_GT_HM, /* a = H, b = MEFT */
581 	INSTR_JMP_GT_HH, /* a = H, b = H */
582 	INSTR_JMP_GT_MI, /* a = MEFT, b = I */
583 	INSTR_JMP_GT_HI, /* a = H, b = I */
584 
585 	/* return
586 	 * Return from action
587 	 */
588 	INSTR_RETURN,
589 };
590 
591 struct instr_operand {
592 	uint8_t struct_id;
593 	uint8_t n_bits;
594 	uint8_t offset;
595 	uint8_t pad;
596 };
597 
598 struct instr_io {
599 	struct {
600 		union {
601 			struct {
602 				uint8_t offset;
603 				uint8_t n_bits;
604 				uint8_t pad[2];
605 			};
606 
607 			uint32_t val;
608 		};
609 	} io;
610 
611 	struct {
612 		uint8_t header_id[8];
613 		uint8_t struct_id[8];
614 		uint8_t n_bytes[8];
615 	} hdr;
616 };
617 
618 struct instr_hdr_validity {
619 	uint8_t header_id;
620 };
621 
622 struct instr_table {
623 	uint8_t table_id;
624 };
625 
626 struct instr_extern_obj {
627 	uint8_t ext_obj_id;
628 	uint8_t func_id;
629 };
630 
631 struct instr_extern_func {
632 	uint8_t ext_func_id;
633 };
634 
635 struct instr_dst_src {
636 	struct instr_operand dst;
637 	union {
638 		struct instr_operand src;
639 		uint64_t src_val;
640 	};
641 };
642 
643 struct instr_regarray {
644 	uint8_t regarray_id;
645 	uint8_t pad[3];
646 
647 	union {
648 		struct instr_operand idx;
649 		uint32_t idx_val;
650 	};
651 
652 	union {
653 		struct instr_operand dstsrc;
654 		uint64_t dstsrc_val;
655 	};
656 };
657 
658 struct instr_meter {
659 	uint8_t metarray_id;
660 	uint8_t pad[3];
661 
662 	union {
663 		struct instr_operand idx;
664 		uint32_t idx_val;
665 	};
666 
667 	struct instr_operand length;
668 
669 	union {
670 		struct instr_operand color_in;
671 		uint32_t color_in_val;
672 	};
673 
674 	struct instr_operand color_out;
675 };
676 
677 struct instr_dma {
678 	struct {
679 		uint8_t header_id[8];
680 		uint8_t struct_id[8];
681 	} dst;
682 
683 	struct {
684 		uint8_t offset[8];
685 	} src;
686 
687 	uint16_t n_bytes[8];
688 };
689 
690 struct instr_jmp {
691 	struct instruction *ip;
692 
693 	union {
694 		struct instr_operand a;
695 		uint8_t header_id;
696 		uint8_t action_id;
697 	};
698 
699 	union {
700 		struct instr_operand b;
701 		uint64_t b_val;
702 	};
703 };
704 
705 struct instruction {
706 	enum instruction_type type;
707 	union {
708 		struct instr_io io;
709 		struct instr_hdr_validity valid;
710 		struct instr_dst_src mov;
711 		struct instr_regarray regarray;
712 		struct instr_meter meter;
713 		struct instr_dma dma;
714 		struct instr_dst_src alu;
715 		struct instr_table table;
716 		struct instr_extern_obj ext_obj;
717 		struct instr_extern_func ext_func;
718 		struct instr_jmp jmp;
719 	};
720 };
721 
722 struct instruction_data {
723 	char label[RTE_SWX_NAME_SIZE];
724 	char jmp_label[RTE_SWX_NAME_SIZE];
725 	uint32_t n_users; /* user = jmp instruction to this instruction. */
726 	int invalid;
727 };
728 
729 /*
730  * Action.
731  */
732 struct action {
733 	TAILQ_ENTRY(action) node;
734 	char name[RTE_SWX_NAME_SIZE];
735 	struct struct_type *st;
736 	int *args_endianness; /* 0 = Host Byte Order (HBO). */
737 	struct instruction *instructions;
738 	uint32_t n_instructions;
739 	uint32_t id;
740 };
741 
742 TAILQ_HEAD(action_tailq, action);
743 
744 /*
745  * Table.
746  */
747 struct table_type {
748 	TAILQ_ENTRY(table_type) node;
749 	char name[RTE_SWX_NAME_SIZE];
750 	enum rte_swx_table_match_type match_type;
751 	struct rte_swx_table_ops ops;
752 };
753 
754 TAILQ_HEAD(table_type_tailq, table_type);
755 
756 struct match_field {
757 	enum rte_swx_table_match_type match_type;
758 	struct field *field;
759 };
760 
761 struct table {
762 	TAILQ_ENTRY(table) node;
763 	char name[RTE_SWX_NAME_SIZE];
764 	char args[RTE_SWX_NAME_SIZE];
765 	struct table_type *type; /* NULL when n_fields == 0. */
766 
767 	/* Match. */
768 	struct match_field *fields;
769 	uint32_t n_fields;
770 	struct header *header; /* Only valid when n_fields > 0. */
771 
772 	/* Action. */
773 	struct action **actions;
774 	struct action *default_action;
775 	uint8_t *default_action_data;
776 	uint32_t n_actions;
777 	int default_action_is_const;
778 	uint32_t action_data_size_max;
779 
780 	uint32_t size;
781 	uint32_t id;
782 };
783 
784 TAILQ_HEAD(table_tailq, table);
785 
786 struct table_runtime {
787 	rte_swx_table_lookup_t func;
788 	void *mailbox;
789 	uint8_t **key;
790 };
791 
792 struct table_statistics {
793 	uint64_t n_pkts_hit[2]; /* 0 = Miss, 1 = Hit. */
794 	uint64_t *n_pkts_action;
795 };
796 
797 /*
798  * Register array.
799  */
800 struct regarray {
801 	TAILQ_ENTRY(regarray) node;
802 	char name[RTE_SWX_NAME_SIZE];
803 	uint64_t init_val;
804 	uint32_t size;
805 	uint32_t id;
806 };
807 
808 TAILQ_HEAD(regarray_tailq, regarray);
809 
810 struct regarray_runtime {
811 	uint64_t *regarray;
812 	uint32_t size_mask;
813 };
814 
815 /*
816  * Meter array.
817  */
818 struct meter_profile {
819 	TAILQ_ENTRY(meter_profile) node;
820 	char name[RTE_SWX_NAME_SIZE];
821 	struct rte_meter_trtcm_params params;
822 	struct rte_meter_trtcm_profile profile;
823 	uint32_t n_users;
824 };
825 
826 TAILQ_HEAD(meter_profile_tailq, meter_profile);
827 
828 struct metarray {
829 	TAILQ_ENTRY(metarray) node;
830 	char name[RTE_SWX_NAME_SIZE];
831 	uint32_t size;
832 	uint32_t id;
833 };
834 
835 TAILQ_HEAD(metarray_tailq, metarray);
836 
837 struct meter {
838 	struct rte_meter_trtcm m;
839 	struct meter_profile *profile;
840 	enum rte_color color_mask;
841 	uint8_t pad[20];
842 
843 	uint64_t n_pkts[RTE_COLORS];
844 	uint64_t n_bytes[RTE_COLORS];
845 };
846 
847 struct metarray_runtime {
848 	struct meter *metarray;
849 	uint32_t size_mask;
850 };
851 
852 /*
853  * Pipeline.
854  */
855 struct thread {
856 	/* Packet. */
857 	struct rte_swx_pkt pkt;
858 	uint8_t *ptr;
859 
860 	/* Structures. */
861 	uint8_t **structs;
862 
863 	/* Packet headers. */
864 	struct header_runtime *headers; /* Extracted or generated headers. */
865 	struct header_out_runtime *headers_out; /* Emitted headers. */
866 	uint8_t *header_storage;
867 	uint8_t *header_out_storage;
868 	uint64_t valid_headers;
869 	uint32_t n_headers_out;
870 
871 	/* Packet meta-data. */
872 	uint8_t *metadata;
873 
874 	/* Tables. */
875 	struct table_runtime *tables;
876 	struct rte_swx_table_state *table_state;
877 	uint64_t action_id;
878 	int hit; /* 0 = Miss, 1 = Hit. */
879 
880 	/* Extern objects and functions. */
881 	struct extern_obj_runtime *extern_objs;
882 	struct extern_func_runtime *extern_funcs;
883 
884 	/* Instructions. */
885 	struct instruction *ip;
886 	struct instruction *ret;
887 };
888 
889 #define MASK64_BIT_GET(mask, pos) ((mask) & (1LLU << (pos)))
890 #define MASK64_BIT_SET(mask, pos) ((mask) | (1LLU << (pos)))
891 #define MASK64_BIT_CLR(mask, pos) ((mask) & ~(1LLU << (pos)))
892 
893 #define HEADER_VALID(thread, header_id) \
894 	MASK64_BIT_GET((thread)->valid_headers, header_id)
895 
896 #define ALU(thread, ip, operator)  \
897 {                                                                              \
898 	uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id];      \
899 	uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset];   \
900 	uint64_t dst64 = *dst64_ptr;                                           \
901 	uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits);       \
902 	uint64_t dst = dst64 & dst64_mask;                                     \
903 									       \
904 	uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id];      \
905 	uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset];   \
906 	uint64_t src64 = *src64_ptr;                                           \
907 	uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits);       \
908 	uint64_t src = src64 & src64_mask;                                     \
909 									       \
910 	uint64_t result = dst operator src;                                    \
911 									       \
912 	*dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask);            \
913 }
914 
915 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
916 
917 #define ALU_MH(thread, ip, operator)  \
918 {                                                                              \
919 	uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id];      \
920 	uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset];   \
921 	uint64_t dst64 = *dst64_ptr;                                           \
922 	uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits);       \
923 	uint64_t dst = dst64 & dst64_mask;                                     \
924 									       \
925 	uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id];      \
926 	uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset];   \
927 	uint64_t src64 = *src64_ptr;                                           \
928 	uint64_t src = ntoh64(src64) >> (64 - (ip)->alu.src.n_bits);           \
929 									       \
930 	uint64_t result = dst operator src;                                    \
931 									       \
932 	*dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask);            \
933 }
934 
935 #define ALU_HM(thread, ip, operator)  \
936 {                                                                              \
937 	uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id];      \
938 	uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset];   \
939 	uint64_t dst64 = *dst64_ptr;                                           \
940 	uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits);       \
941 	uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits);           \
942 									       \
943 	uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id];      \
944 	uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset];   \
945 	uint64_t src64 = *src64_ptr;                                           \
946 	uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits);       \
947 	uint64_t src = src64 & src64_mask;                                     \
948 									       \
949 	uint64_t result = dst operator src;                                    \
950 	result = hton64(result << (64 - (ip)->alu.dst.n_bits));                \
951 									       \
952 	*dst64_ptr = (dst64 & ~dst64_mask) | result;                           \
953 }
954 
955 #define ALU_HM_FAST(thread, ip, operator)  \
956 {                                                                                 \
957 	uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id];         \
958 	uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset];      \
959 	uint64_t dst64 = *dst64_ptr;                                              \
960 	uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits);          \
961 	uint64_t dst = dst64 & dst64_mask;                                        \
962 										  \
963 	uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id];         \
964 	uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset];      \
965 	uint64_t src64 = *src64_ptr;                                              \
966 	uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits);          \
967 	uint64_t src = hton64(src64 & src64_mask) >> (64 - (ip)->alu.dst.n_bits); \
968 										  \
969 	uint64_t result = dst operator src;                                       \
970 										  \
971 	*dst64_ptr = (dst64 & ~dst64_mask) | result;                              \
972 }
973 
974 #define ALU_HH(thread, ip, operator)  \
975 {                                                                              \
976 	uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id];      \
977 	uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset];   \
978 	uint64_t dst64 = *dst64_ptr;                                           \
979 	uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits);       \
980 	uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits);           \
981 									       \
982 	uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id];      \
983 	uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset];   \
984 	uint64_t src64 = *src64_ptr;                                           \
985 	uint64_t src = ntoh64(src64) >> (64 - (ip)->alu.src.n_bits);           \
986 									       \
987 	uint64_t result = dst operator src;                                    \
988 	result = hton64(result << (64 - (ip)->alu.dst.n_bits));                \
989 									       \
990 	*dst64_ptr = (dst64 & ~dst64_mask) | result;                           \
991 }
992 
993 #define ALU_HH_FAST(thread, ip, operator)  \
994 {                                                                                             \
995 	uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id];                     \
996 	uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset];                  \
997 	uint64_t dst64 = *dst64_ptr;                                                          \
998 	uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits);                      \
999 	uint64_t dst = dst64 & dst64_mask;                                                    \
1000 											      \
1001 	uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id];                     \
1002 	uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset];                  \
1003 	uint64_t src64 = *src64_ptr;                                                          \
1004 	uint64_t src = (src64 << (64 - (ip)->alu.src.n_bits)) >> (64 - (ip)->alu.dst.n_bits); \
1005 											      \
1006 	uint64_t result = dst operator src;                                                   \
1007 											      \
1008 	*dst64_ptr = (dst64 & ~dst64_mask) | result;                                          \
1009 }
1010 
1011 #else
1012 
1013 #define ALU_MH ALU
1014 #define ALU_HM ALU
1015 #define ALU_HM_FAST ALU
1016 #define ALU_HH ALU
1017 #define ALU_HH_FAST ALU
1018 
1019 #endif
1020 
1021 #define ALU_I(thread, ip, operator)  \
1022 {                                                                              \
1023 	uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id];      \
1024 	uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset];   \
1025 	uint64_t dst64 = *dst64_ptr;                                           \
1026 	uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits);       \
1027 	uint64_t dst = dst64 & dst64_mask;                                     \
1028 									       \
1029 	uint64_t src = (ip)->alu.src_val;                                      \
1030 									       \
1031 	uint64_t result = dst operator src;                                    \
1032 									       \
1033 	*dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask);            \
1034 }
1035 
1036 #define ALU_MI ALU_I
1037 
1038 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1039 
1040 #define ALU_HI(thread, ip, operator)  \
1041 {                                                                              \
1042 	uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id];      \
1043 	uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset];   \
1044 	uint64_t dst64 = *dst64_ptr;                                           \
1045 	uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits);       \
1046 	uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits);           \
1047 									       \
1048 	uint64_t src = (ip)->alu.src_val;                                      \
1049 									       \
1050 	uint64_t result = dst operator src;                                    \
1051 	result = hton64(result << (64 - (ip)->alu.dst.n_bits));                \
1052 									       \
1053 	*dst64_ptr = (dst64 & ~dst64_mask) | result;                           \
1054 }
1055 
1056 #else
1057 
1058 #define ALU_HI ALU_I
1059 
1060 #endif
1061 
1062 #define MOV(thread, ip)  \
1063 {                                                                              \
1064 	uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id];      \
1065 	uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset];   \
1066 	uint64_t dst64 = *dst64_ptr;                                           \
1067 	uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits);       \
1068 									       \
1069 	uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id];      \
1070 	uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset];   \
1071 	uint64_t src64 = *src64_ptr;                                           \
1072 	uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->mov.src.n_bits);       \
1073 	uint64_t src = src64 & src64_mask;                                     \
1074 									       \
1075 	*dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask);               \
1076 }
1077 
1078 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1079 
1080 #define MOV_MH(thread, ip)  \
1081 {                                                                              \
1082 	uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id];      \
1083 	uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset];   \
1084 	uint64_t dst64 = *dst64_ptr;                                           \
1085 	uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits);       \
1086 									       \
1087 	uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id];      \
1088 	uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset];   \
1089 	uint64_t src64 = *src64_ptr;                                           \
1090 	uint64_t src = ntoh64(src64) >> (64 - (ip)->mov.src.n_bits);           \
1091 									       \
1092 	*dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask);               \
1093 }
1094 
1095 #define MOV_HM(thread, ip)  \
1096 {                                                                              \
1097 	uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id];      \
1098 	uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset];   \
1099 	uint64_t dst64 = *dst64_ptr;                                           \
1100 	uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits);       \
1101 									       \
1102 	uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id];      \
1103 	uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset];   \
1104 	uint64_t src64 = *src64_ptr;                                           \
1105 	uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->mov.src.n_bits);       \
1106 	uint64_t src = src64 & src64_mask;                                     \
1107 									       \
1108 	src = hton64(src) >> (64 - (ip)->mov.dst.n_bits);                      \
1109 	*dst64_ptr = (dst64 & ~dst64_mask) | src;                              \
1110 }
1111 
1112 #define MOV_HH(thread, ip)  \
1113 {                                                                              \
1114 	uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id];      \
1115 	uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset];   \
1116 	uint64_t dst64 = *dst64_ptr;                                           \
1117 	uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits);       \
1118 									       \
1119 	uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id];      \
1120 	uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset];   \
1121 	uint64_t src64 = *src64_ptr;                                           \
1122 									       \
1123 	uint64_t src = src64 << (64 - (ip)->mov.src.n_bits);                   \
1124 	src = src >> (64 - (ip)->mov.dst.n_bits);                              \
1125 	*dst64_ptr = (dst64 & ~dst64_mask) | src;                              \
1126 }
1127 
1128 #else
1129 
1130 #define MOV_MH MOV
1131 #define MOV_HM MOV
1132 #define MOV_HH MOV
1133 
1134 #endif
1135 
1136 #define MOV_I(thread, ip)  \
1137 {                                                                              \
1138 	uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id];      \
1139 	uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset];   \
1140 	uint64_t dst64 = *dst64_ptr;                                           \
1141 	uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits);       \
1142 									       \
1143 	uint64_t src = (ip)->mov.src_val;                                      \
1144 									       \
1145 	*dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask);               \
1146 }
1147 
1148 #define JMP_CMP(thread, ip, operator)  \
1149 {                                                                              \
1150 	uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id];          \
1151 	uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset];         \
1152 	uint64_t a64 = *a64_ptr;                                               \
1153 	uint64_t a64_mask = UINT64_MAX >> (64 - (ip)->jmp.a.n_bits);           \
1154 	uint64_t a = a64 & a64_mask;                                           \
1155 									       \
1156 	uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id];          \
1157 	uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset];         \
1158 	uint64_t b64 = *b64_ptr;                                               \
1159 	uint64_t b64_mask = UINT64_MAX >> (64 - (ip)->jmp.b.n_bits);           \
1160 	uint64_t b = b64 & b64_mask;                                           \
1161 									       \
1162 	(thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1);     \
1163 }
1164 
1165 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1166 
1167 #define JMP_CMP_MH(thread, ip, operator)  \
1168 {                                                                              \
1169 	uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id];          \
1170 	uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset];         \
1171 	uint64_t a64 = *a64_ptr;                                               \
1172 	uint64_t a64_mask = UINT64_MAX >> (64 - (ip)->jmp.a.n_bits);           \
1173 	uint64_t a = a64 & a64_mask;                                           \
1174 									       \
1175 	uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id];          \
1176 	uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset];         \
1177 	uint64_t b64 = *b64_ptr;                                               \
1178 	uint64_t b = ntoh64(b64) >> (64 - (ip)->jmp.b.n_bits);                 \
1179 									       \
1180 	(thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1);     \
1181 }
1182 
1183 #define JMP_CMP_HM(thread, ip, operator)  \
1184 {                                                                              \
1185 	uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id];          \
1186 	uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset];         \
1187 	uint64_t a64 = *a64_ptr;                                               \
1188 	uint64_t a = ntoh64(a64) >> (64 - (ip)->jmp.a.n_bits);                 \
1189 									       \
1190 	uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id];          \
1191 	uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset];         \
1192 	uint64_t b64 = *b64_ptr;                                               \
1193 	uint64_t b64_mask = UINT64_MAX >> (64 - (ip)->jmp.b.n_bits);           \
1194 	uint64_t b = b64 & b64_mask;                                           \
1195 									       \
1196 	(thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1);     \
1197 }
1198 
1199 #define JMP_CMP_HH(thread, ip, operator)  \
1200 {                                                                              \
1201 	uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id];          \
1202 	uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset];         \
1203 	uint64_t a64 = *a64_ptr;                                               \
1204 	uint64_t a = ntoh64(a64) >> (64 - (ip)->jmp.a.n_bits);                 \
1205 									       \
1206 	uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id];          \
1207 	uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset];         \
1208 	uint64_t b64 = *b64_ptr;                                               \
1209 	uint64_t b = ntoh64(b64) >> (64 - (ip)->jmp.b.n_bits);                 \
1210 									       \
1211 	(thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1);     \
1212 }
1213 
1214 #define JMP_CMP_HH_FAST(thread, ip, operator)  \
1215 {                                                                              \
1216 	uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id];          \
1217 	uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset];         \
1218 	uint64_t a64 = *a64_ptr;                                               \
1219 	uint64_t a = a64 << (64 - (ip)->jmp.a.n_bits);                         \
1220 									       \
1221 	uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id];          \
1222 	uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset];         \
1223 	uint64_t b64 = *b64_ptr;                                               \
1224 	uint64_t b = b64 << (64 - (ip)->jmp.b.n_bits);                         \
1225 									       \
1226 	(thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1);     \
1227 }
1228 
1229 #else
1230 
1231 #define JMP_CMP_MH JMP_CMP
1232 #define JMP_CMP_HM JMP_CMP
1233 #define JMP_CMP_HH JMP_CMP
1234 #define JMP_CMP_HH_FAST JMP_CMP
1235 
1236 #endif
1237 
1238 #define JMP_CMP_I(thread, ip, operator)  \
1239 {                                                                              \
1240 	uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id];          \
1241 	uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset];         \
1242 	uint64_t a64 = *a64_ptr;                                               \
1243 	uint64_t a64_mask = UINT64_MAX >> (64 - (ip)->jmp.a.n_bits);           \
1244 	uint64_t a = a64 & a64_mask;                                           \
1245 									       \
1246 	uint64_t b = (ip)->jmp.b_val;                                          \
1247 									       \
1248 	(thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1);     \
1249 }
1250 
1251 #define JMP_CMP_MI JMP_CMP_I
1252 
1253 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1254 
1255 #define JMP_CMP_HI(thread, ip, operator)  \
1256 {                                                                              \
1257 	uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id];          \
1258 	uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset];         \
1259 	uint64_t a64 = *a64_ptr;                                               \
1260 	uint64_t a = ntoh64(a64) >> (64 - (ip)->jmp.a.n_bits);                 \
1261 									       \
1262 	uint64_t b = (ip)->jmp.b_val;                                          \
1263 									       \
1264 	(thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1);     \
1265 }
1266 
1267 #else
1268 
1269 #define JMP_CMP_HI JMP_CMP_I
1270 
1271 #endif
1272 
1273 #define METADATA_READ(thread, offset, n_bits)                                  \
1274 ({                                                                             \
1275 	uint64_t *m64_ptr = (uint64_t *)&(thread)->metadata[offset];           \
1276 	uint64_t m64 = *m64_ptr;                                               \
1277 	uint64_t m64_mask = UINT64_MAX >> (64 - (n_bits));                     \
1278 	(m64 & m64_mask);                                                      \
1279 })
1280 
1281 #define METADATA_WRITE(thread, offset, n_bits, value)                          \
1282 {                                                                              \
1283 	uint64_t *m64_ptr = (uint64_t *)&(thread)->metadata[offset];           \
1284 	uint64_t m64 = *m64_ptr;                                               \
1285 	uint64_t m64_mask = UINT64_MAX >> (64 - (n_bits));                     \
1286 									       \
1287 	uint64_t m_new = value;                                                \
1288 									       \
1289 	*m64_ptr = (m64 & ~m64_mask) | (m_new & m64_mask);                     \
1290 }
1291 
1292 #ifndef RTE_SWX_PIPELINE_THREADS_MAX
1293 #define RTE_SWX_PIPELINE_THREADS_MAX 16
1294 #endif
1295 
1296 struct rte_swx_pipeline {
1297 	struct struct_type_tailq struct_types;
1298 	struct port_in_type_tailq port_in_types;
1299 	struct port_in_tailq ports_in;
1300 	struct port_out_type_tailq port_out_types;
1301 	struct port_out_tailq ports_out;
1302 	struct extern_type_tailq extern_types;
1303 	struct extern_obj_tailq extern_objs;
1304 	struct extern_func_tailq extern_funcs;
1305 	struct header_tailq headers;
1306 	struct struct_type *metadata_st;
1307 	uint32_t metadata_struct_id;
1308 	struct action_tailq actions;
1309 	struct table_type_tailq table_types;
1310 	struct table_tailq tables;
1311 	struct regarray_tailq regarrays;
1312 	struct meter_profile_tailq meter_profiles;
1313 	struct metarray_tailq metarrays;
1314 
1315 	struct port_in_runtime *in;
1316 	struct port_out_runtime *out;
1317 	struct instruction **action_instructions;
1318 	struct rte_swx_table_state *table_state;
1319 	struct table_statistics *table_stats;
1320 	struct regarray_runtime *regarray_runtime;
1321 	struct metarray_runtime *metarray_runtime;
1322 	struct instruction *instructions;
1323 	struct thread threads[RTE_SWX_PIPELINE_THREADS_MAX];
1324 
1325 	uint32_t n_structs;
1326 	uint32_t n_ports_in;
1327 	uint32_t n_ports_out;
1328 	uint32_t n_extern_objs;
1329 	uint32_t n_extern_funcs;
1330 	uint32_t n_actions;
1331 	uint32_t n_tables;
1332 	uint32_t n_regarrays;
1333 	uint32_t n_metarrays;
1334 	uint32_t n_headers;
1335 	uint32_t thread_id;
1336 	uint32_t port_id;
1337 	uint32_t n_instructions;
1338 	int build_done;
1339 	int numa_node;
1340 };
1341 
1342 /*
1343  * Struct.
1344  */
1345 static struct struct_type *
1346 struct_type_find(struct rte_swx_pipeline *p, const char *name)
1347 {
1348 	struct struct_type *elem;
1349 
1350 	TAILQ_FOREACH(elem, &p->struct_types, node)
1351 		if (strcmp(elem->name, name) == 0)
1352 			return elem;
1353 
1354 	return NULL;
1355 }
1356 
1357 static struct field *
1358 struct_type_field_find(struct struct_type *st, const char *name)
1359 {
1360 	uint32_t i;
1361 
1362 	for (i = 0; i < st->n_fields; i++) {
1363 		struct field *f = &st->fields[i];
1364 
1365 		if (strcmp(f->name, name) == 0)
1366 			return f;
1367 	}
1368 
1369 	return NULL;
1370 }
1371 
1372 int
1373 rte_swx_pipeline_struct_type_register(struct rte_swx_pipeline *p,
1374 				      const char *name,
1375 				      struct rte_swx_field_params *fields,
1376 				      uint32_t n_fields)
1377 {
1378 	struct struct_type *st;
1379 	uint32_t i;
1380 
1381 	CHECK(p, EINVAL);
1382 	CHECK_NAME(name, EINVAL);
1383 	CHECK(fields, EINVAL);
1384 	CHECK(n_fields, EINVAL);
1385 
1386 	for (i = 0; i < n_fields; i++) {
1387 		struct rte_swx_field_params *f = &fields[i];
1388 		uint32_t j;
1389 
1390 		CHECK_NAME(f->name, EINVAL);
1391 		CHECK(f->n_bits, EINVAL);
1392 		CHECK(f->n_bits <= 64, EINVAL);
1393 		CHECK((f->n_bits & 7) == 0, EINVAL);
1394 
1395 		for (j = 0; j < i; j++) {
1396 			struct rte_swx_field_params *f_prev = &fields[j];
1397 
1398 			CHECK(strcmp(f->name, f_prev->name), EINVAL);
1399 		}
1400 	}
1401 
1402 	CHECK(!struct_type_find(p, name), EEXIST);
1403 
1404 	/* Node allocation. */
1405 	st = calloc(1, sizeof(struct struct_type));
1406 	CHECK(st, ENOMEM);
1407 
1408 	st->fields = calloc(n_fields, sizeof(struct field));
1409 	if (!st->fields) {
1410 		free(st);
1411 		CHECK(0, ENOMEM);
1412 	}
1413 
1414 	/* Node initialization. */
1415 	strcpy(st->name, name);
1416 	for (i = 0; i < n_fields; i++) {
1417 		struct field *dst = &st->fields[i];
1418 		struct rte_swx_field_params *src = &fields[i];
1419 
1420 		strcpy(dst->name, src->name);
1421 		dst->n_bits = src->n_bits;
1422 		dst->offset = st->n_bits;
1423 
1424 		st->n_bits += src->n_bits;
1425 	}
1426 	st->n_fields = n_fields;
1427 
1428 	/* Node add to tailq. */
1429 	TAILQ_INSERT_TAIL(&p->struct_types, st, node);
1430 
1431 	return 0;
1432 }
1433 
1434 static int
1435 struct_build(struct rte_swx_pipeline *p)
1436 {
1437 	uint32_t i;
1438 
1439 	for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1440 		struct thread *t = &p->threads[i];
1441 
1442 		t->structs = calloc(p->n_structs, sizeof(uint8_t *));
1443 		CHECK(t->structs, ENOMEM);
1444 	}
1445 
1446 	return 0;
1447 }
1448 
1449 static void
1450 struct_build_free(struct rte_swx_pipeline *p)
1451 {
1452 	uint32_t i;
1453 
1454 	for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1455 		struct thread *t = &p->threads[i];
1456 
1457 		free(t->structs);
1458 		t->structs = NULL;
1459 	}
1460 }
1461 
1462 static void
1463 struct_free(struct rte_swx_pipeline *p)
1464 {
1465 	struct_build_free(p);
1466 
1467 	/* Struct types. */
1468 	for ( ; ; ) {
1469 		struct struct_type *elem;
1470 
1471 		elem = TAILQ_FIRST(&p->struct_types);
1472 		if (!elem)
1473 			break;
1474 
1475 		TAILQ_REMOVE(&p->struct_types, elem, node);
1476 		free(elem->fields);
1477 		free(elem);
1478 	}
1479 }
1480 
1481 /*
1482  * Input port.
1483  */
1484 static struct port_in_type *
1485 port_in_type_find(struct rte_swx_pipeline *p, const char *name)
1486 {
1487 	struct port_in_type *elem;
1488 
1489 	if (!name)
1490 		return NULL;
1491 
1492 	TAILQ_FOREACH(elem, &p->port_in_types, node)
1493 		if (strcmp(elem->name, name) == 0)
1494 			return elem;
1495 
1496 	return NULL;
1497 }
1498 
1499 int
1500 rte_swx_pipeline_port_in_type_register(struct rte_swx_pipeline *p,
1501 				       const char *name,
1502 				       struct rte_swx_port_in_ops *ops)
1503 {
1504 	struct port_in_type *elem;
1505 
1506 	CHECK(p, EINVAL);
1507 	CHECK_NAME(name, EINVAL);
1508 	CHECK(ops, EINVAL);
1509 	CHECK(ops->create, EINVAL);
1510 	CHECK(ops->free, EINVAL);
1511 	CHECK(ops->pkt_rx, EINVAL);
1512 	CHECK(ops->stats_read, EINVAL);
1513 
1514 	CHECK(!port_in_type_find(p, name), EEXIST);
1515 
1516 	/* Node allocation. */
1517 	elem = calloc(1, sizeof(struct port_in_type));
1518 	CHECK(elem, ENOMEM);
1519 
1520 	/* Node initialization. */
1521 	strcpy(elem->name, name);
1522 	memcpy(&elem->ops, ops, sizeof(*ops));
1523 
1524 	/* Node add to tailq. */
1525 	TAILQ_INSERT_TAIL(&p->port_in_types, elem, node);
1526 
1527 	return 0;
1528 }
1529 
1530 static struct port_in *
1531 port_in_find(struct rte_swx_pipeline *p, uint32_t port_id)
1532 {
1533 	struct port_in *port;
1534 
1535 	TAILQ_FOREACH(port, &p->ports_in, node)
1536 		if (port->id == port_id)
1537 			return port;
1538 
1539 	return NULL;
1540 }
1541 
1542 int
1543 rte_swx_pipeline_port_in_config(struct rte_swx_pipeline *p,
1544 				uint32_t port_id,
1545 				const char *port_type_name,
1546 				void *args)
1547 {
1548 	struct port_in_type *type = NULL;
1549 	struct port_in *port = NULL;
1550 	void *obj = NULL;
1551 
1552 	CHECK(p, EINVAL);
1553 
1554 	CHECK(!port_in_find(p, port_id), EINVAL);
1555 
1556 	CHECK_NAME(port_type_name, EINVAL);
1557 	type = port_in_type_find(p, port_type_name);
1558 	CHECK(type, EINVAL);
1559 
1560 	obj = type->ops.create(args);
1561 	CHECK(obj, ENODEV);
1562 
1563 	/* Node allocation. */
1564 	port = calloc(1, sizeof(struct port_in));
1565 	CHECK(port, ENOMEM);
1566 
1567 	/* Node initialization. */
1568 	port->type = type;
1569 	port->obj = obj;
1570 	port->id = port_id;
1571 
1572 	/* Node add to tailq. */
1573 	TAILQ_INSERT_TAIL(&p->ports_in, port, node);
1574 	if (p->n_ports_in < port_id + 1)
1575 		p->n_ports_in = port_id + 1;
1576 
1577 	return 0;
1578 }
1579 
1580 static int
1581 port_in_build(struct rte_swx_pipeline *p)
1582 {
1583 	struct port_in *port;
1584 	uint32_t i;
1585 
1586 	CHECK(p->n_ports_in, EINVAL);
1587 	CHECK(rte_is_power_of_2(p->n_ports_in), EINVAL);
1588 
1589 	for (i = 0; i < p->n_ports_in; i++)
1590 		CHECK(port_in_find(p, i), EINVAL);
1591 
1592 	p->in = calloc(p->n_ports_in, sizeof(struct port_in_runtime));
1593 	CHECK(p->in, ENOMEM);
1594 
1595 	TAILQ_FOREACH(port, &p->ports_in, node) {
1596 		struct port_in_runtime *in = &p->in[port->id];
1597 
1598 		in->pkt_rx = port->type->ops.pkt_rx;
1599 		in->obj = port->obj;
1600 	}
1601 
1602 	return 0;
1603 }
1604 
1605 static void
1606 port_in_build_free(struct rte_swx_pipeline *p)
1607 {
1608 	free(p->in);
1609 	p->in = NULL;
1610 }
1611 
1612 static void
1613 port_in_free(struct rte_swx_pipeline *p)
1614 {
1615 	port_in_build_free(p);
1616 
1617 	/* Input ports. */
1618 	for ( ; ; ) {
1619 		struct port_in *port;
1620 
1621 		port = TAILQ_FIRST(&p->ports_in);
1622 		if (!port)
1623 			break;
1624 
1625 		TAILQ_REMOVE(&p->ports_in, port, node);
1626 		port->type->ops.free(port->obj);
1627 		free(port);
1628 	}
1629 
1630 	/* Input port types. */
1631 	for ( ; ; ) {
1632 		struct port_in_type *elem;
1633 
1634 		elem = TAILQ_FIRST(&p->port_in_types);
1635 		if (!elem)
1636 			break;
1637 
1638 		TAILQ_REMOVE(&p->port_in_types, elem, node);
1639 		free(elem);
1640 	}
1641 }
1642 
1643 /*
1644  * Output port.
1645  */
1646 static struct port_out_type *
1647 port_out_type_find(struct rte_swx_pipeline *p, const char *name)
1648 {
1649 	struct port_out_type *elem;
1650 
1651 	if (!name)
1652 		return NULL;
1653 
1654 	TAILQ_FOREACH(elem, &p->port_out_types, node)
1655 		if (!strcmp(elem->name, name))
1656 			return elem;
1657 
1658 	return NULL;
1659 }
1660 
1661 int
1662 rte_swx_pipeline_port_out_type_register(struct rte_swx_pipeline *p,
1663 					const char *name,
1664 					struct rte_swx_port_out_ops *ops)
1665 {
1666 	struct port_out_type *elem;
1667 
1668 	CHECK(p, EINVAL);
1669 	CHECK_NAME(name, EINVAL);
1670 	CHECK(ops, EINVAL);
1671 	CHECK(ops->create, EINVAL);
1672 	CHECK(ops->free, EINVAL);
1673 	CHECK(ops->pkt_tx, EINVAL);
1674 	CHECK(ops->stats_read, EINVAL);
1675 
1676 	CHECK(!port_out_type_find(p, name), EEXIST);
1677 
1678 	/* Node allocation. */
1679 	elem = calloc(1, sizeof(struct port_out_type));
1680 	CHECK(elem, ENOMEM);
1681 
1682 	/* Node initialization. */
1683 	strcpy(elem->name, name);
1684 	memcpy(&elem->ops, ops, sizeof(*ops));
1685 
1686 	/* Node add to tailq. */
1687 	TAILQ_INSERT_TAIL(&p->port_out_types, elem, node);
1688 
1689 	return 0;
1690 }
1691 
1692 static struct port_out *
1693 port_out_find(struct rte_swx_pipeline *p, uint32_t port_id)
1694 {
1695 	struct port_out *port;
1696 
1697 	TAILQ_FOREACH(port, &p->ports_out, node)
1698 		if (port->id == port_id)
1699 			return port;
1700 
1701 	return NULL;
1702 }
1703 
1704 int
1705 rte_swx_pipeline_port_out_config(struct rte_swx_pipeline *p,
1706 				 uint32_t port_id,
1707 				 const char *port_type_name,
1708 				 void *args)
1709 {
1710 	struct port_out_type *type = NULL;
1711 	struct port_out *port = NULL;
1712 	void *obj = NULL;
1713 
1714 	CHECK(p, EINVAL);
1715 
1716 	CHECK(!port_out_find(p, port_id), EINVAL);
1717 
1718 	CHECK_NAME(port_type_name, EINVAL);
1719 	type = port_out_type_find(p, port_type_name);
1720 	CHECK(type, EINVAL);
1721 
1722 	obj = type->ops.create(args);
1723 	CHECK(obj, ENODEV);
1724 
1725 	/* Node allocation. */
1726 	port = calloc(1, sizeof(struct port_out));
1727 	CHECK(port, ENOMEM);
1728 
1729 	/* Node initialization. */
1730 	port->type = type;
1731 	port->obj = obj;
1732 	port->id = port_id;
1733 
1734 	/* Node add to tailq. */
1735 	TAILQ_INSERT_TAIL(&p->ports_out, port, node);
1736 	if (p->n_ports_out < port_id + 1)
1737 		p->n_ports_out = port_id + 1;
1738 
1739 	return 0;
1740 }
1741 
1742 static int
1743 port_out_build(struct rte_swx_pipeline *p)
1744 {
1745 	struct port_out *port;
1746 	uint32_t i;
1747 
1748 	CHECK(p->n_ports_out, EINVAL);
1749 
1750 	for (i = 0; i < p->n_ports_out; i++)
1751 		CHECK(port_out_find(p, i), EINVAL);
1752 
1753 	p->out = calloc(p->n_ports_out, sizeof(struct port_out_runtime));
1754 	CHECK(p->out, ENOMEM);
1755 
1756 	TAILQ_FOREACH(port, &p->ports_out, node) {
1757 		struct port_out_runtime *out = &p->out[port->id];
1758 
1759 		out->pkt_tx = port->type->ops.pkt_tx;
1760 		out->flush = port->type->ops.flush;
1761 		out->obj = port->obj;
1762 	}
1763 
1764 	return 0;
1765 }
1766 
1767 static void
1768 port_out_build_free(struct rte_swx_pipeline *p)
1769 {
1770 	free(p->out);
1771 	p->out = NULL;
1772 }
1773 
1774 static void
1775 port_out_free(struct rte_swx_pipeline *p)
1776 {
1777 	port_out_build_free(p);
1778 
1779 	/* Output ports. */
1780 	for ( ; ; ) {
1781 		struct port_out *port;
1782 
1783 		port = TAILQ_FIRST(&p->ports_out);
1784 		if (!port)
1785 			break;
1786 
1787 		TAILQ_REMOVE(&p->ports_out, port, node);
1788 		port->type->ops.free(port->obj);
1789 		free(port);
1790 	}
1791 
1792 	/* Output port types. */
1793 	for ( ; ; ) {
1794 		struct port_out_type *elem;
1795 
1796 		elem = TAILQ_FIRST(&p->port_out_types);
1797 		if (!elem)
1798 			break;
1799 
1800 		TAILQ_REMOVE(&p->port_out_types, elem, node);
1801 		free(elem);
1802 	}
1803 }
1804 
1805 /*
1806  * Extern object.
1807  */
1808 static struct extern_type *
1809 extern_type_find(struct rte_swx_pipeline *p, const char *name)
1810 {
1811 	struct extern_type *elem;
1812 
1813 	TAILQ_FOREACH(elem, &p->extern_types, node)
1814 		if (strcmp(elem->name, name) == 0)
1815 			return elem;
1816 
1817 	return NULL;
1818 }
1819 
1820 static struct extern_type_member_func *
1821 extern_type_member_func_find(struct extern_type *type, const char *name)
1822 {
1823 	struct extern_type_member_func *elem;
1824 
1825 	TAILQ_FOREACH(elem, &type->funcs, node)
1826 		if (strcmp(elem->name, name) == 0)
1827 			return elem;
1828 
1829 	return NULL;
1830 }
1831 
1832 static struct extern_obj *
1833 extern_obj_find(struct rte_swx_pipeline *p, const char *name)
1834 {
1835 	struct extern_obj *elem;
1836 
1837 	TAILQ_FOREACH(elem, &p->extern_objs, node)
1838 		if (strcmp(elem->name, name) == 0)
1839 			return elem;
1840 
1841 	return NULL;
1842 }
1843 
1844 static struct extern_type_member_func *
1845 extern_obj_member_func_parse(struct rte_swx_pipeline *p,
1846 			     const char *name,
1847 			     struct extern_obj **obj)
1848 {
1849 	struct extern_obj *object;
1850 	struct extern_type_member_func *func;
1851 	char *object_name, *func_name;
1852 
1853 	if (name[0] != 'e' || name[1] != '.')
1854 		return NULL;
1855 
1856 	object_name = strdup(&name[2]);
1857 	if (!object_name)
1858 		return NULL;
1859 
1860 	func_name = strchr(object_name, '.');
1861 	if (!func_name) {
1862 		free(object_name);
1863 		return NULL;
1864 	}
1865 
1866 	*func_name = 0;
1867 	func_name++;
1868 
1869 	object = extern_obj_find(p, object_name);
1870 	if (!object) {
1871 		free(object_name);
1872 		return NULL;
1873 	}
1874 
1875 	func = extern_type_member_func_find(object->type, func_name);
1876 	if (!func) {
1877 		free(object_name);
1878 		return NULL;
1879 	}
1880 
1881 	if (obj)
1882 		*obj = object;
1883 
1884 	free(object_name);
1885 	return func;
1886 }
1887 
1888 static struct field *
1889 extern_obj_mailbox_field_parse(struct rte_swx_pipeline *p,
1890 			       const char *name,
1891 			       struct extern_obj **object)
1892 {
1893 	struct extern_obj *obj;
1894 	struct field *f;
1895 	char *obj_name, *field_name;
1896 
1897 	if ((name[0] != 'e') || (name[1] != '.'))
1898 		return NULL;
1899 
1900 	obj_name = strdup(&name[2]);
1901 	if (!obj_name)
1902 		return NULL;
1903 
1904 	field_name = strchr(obj_name, '.');
1905 	if (!field_name) {
1906 		free(obj_name);
1907 		return NULL;
1908 	}
1909 
1910 	*field_name = 0;
1911 	field_name++;
1912 
1913 	obj = extern_obj_find(p, obj_name);
1914 	if (!obj) {
1915 		free(obj_name);
1916 		return NULL;
1917 	}
1918 
1919 	f = struct_type_field_find(obj->type->mailbox_struct_type, field_name);
1920 	if (!f) {
1921 		free(obj_name);
1922 		return NULL;
1923 	}
1924 
1925 	if (object)
1926 		*object = obj;
1927 
1928 	free(obj_name);
1929 	return f;
1930 }
1931 
1932 int
1933 rte_swx_pipeline_extern_type_register(struct rte_swx_pipeline *p,
1934 	const char *name,
1935 	const char *mailbox_struct_type_name,
1936 	rte_swx_extern_type_constructor_t constructor,
1937 	rte_swx_extern_type_destructor_t destructor)
1938 {
1939 	struct extern_type *elem;
1940 	struct struct_type *mailbox_struct_type;
1941 
1942 	CHECK(p, EINVAL);
1943 
1944 	CHECK_NAME(name, EINVAL);
1945 	CHECK(!extern_type_find(p, name), EEXIST);
1946 
1947 	CHECK_NAME(mailbox_struct_type_name, EINVAL);
1948 	mailbox_struct_type = struct_type_find(p, mailbox_struct_type_name);
1949 	CHECK(mailbox_struct_type, EINVAL);
1950 
1951 	CHECK(constructor, EINVAL);
1952 	CHECK(destructor, EINVAL);
1953 
1954 	/* Node allocation. */
1955 	elem = calloc(1, sizeof(struct extern_type));
1956 	CHECK(elem, ENOMEM);
1957 
1958 	/* Node initialization. */
1959 	strcpy(elem->name, name);
1960 	elem->mailbox_struct_type = mailbox_struct_type;
1961 	elem->constructor = constructor;
1962 	elem->destructor = destructor;
1963 	TAILQ_INIT(&elem->funcs);
1964 
1965 	/* Node add to tailq. */
1966 	TAILQ_INSERT_TAIL(&p->extern_types, elem, node);
1967 
1968 	return 0;
1969 }
1970 
1971 int
1972 rte_swx_pipeline_extern_type_member_func_register(struct rte_swx_pipeline *p,
1973 	const char *extern_type_name,
1974 	const char *name,
1975 	rte_swx_extern_type_member_func_t member_func)
1976 {
1977 	struct extern_type *type;
1978 	struct extern_type_member_func *type_member;
1979 
1980 	CHECK(p, EINVAL);
1981 
1982 	CHECK_NAME(extern_type_name, EINVAL);
1983 	type = extern_type_find(p, extern_type_name);
1984 	CHECK(type, EINVAL);
1985 	CHECK(type->n_funcs < RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX, ENOSPC);
1986 
1987 	CHECK_NAME(name, EINVAL);
1988 	CHECK(!extern_type_member_func_find(type, name), EEXIST);
1989 
1990 	CHECK(member_func, EINVAL);
1991 
1992 	/* Node allocation. */
1993 	type_member = calloc(1, sizeof(struct extern_type_member_func));
1994 	CHECK(type_member, ENOMEM);
1995 
1996 	/* Node initialization. */
1997 	strcpy(type_member->name, name);
1998 	type_member->func = member_func;
1999 	type_member->id = type->n_funcs;
2000 
2001 	/* Node add to tailq. */
2002 	TAILQ_INSERT_TAIL(&type->funcs, type_member, node);
2003 	type->n_funcs++;
2004 
2005 	return 0;
2006 }
2007 
2008 int
2009 rte_swx_pipeline_extern_object_config(struct rte_swx_pipeline *p,
2010 				      const char *extern_type_name,
2011 				      const char *name,
2012 				      const char *args)
2013 {
2014 	struct extern_type *type;
2015 	struct extern_obj *obj;
2016 	void *obj_handle;
2017 
2018 	CHECK(p, EINVAL);
2019 
2020 	CHECK_NAME(extern_type_name, EINVAL);
2021 	type = extern_type_find(p, extern_type_name);
2022 	CHECK(type, EINVAL);
2023 
2024 	CHECK_NAME(name, EINVAL);
2025 	CHECK(!extern_obj_find(p, name), EEXIST);
2026 
2027 	/* Node allocation. */
2028 	obj = calloc(1, sizeof(struct extern_obj));
2029 	CHECK(obj, ENOMEM);
2030 
2031 	/* Object construction. */
2032 	obj_handle = type->constructor(args);
2033 	if (!obj_handle) {
2034 		free(obj);
2035 		CHECK(0, ENODEV);
2036 	}
2037 
2038 	/* Node initialization. */
2039 	strcpy(obj->name, name);
2040 	obj->type = type;
2041 	obj->obj = obj_handle;
2042 	obj->struct_id = p->n_structs;
2043 	obj->id = p->n_extern_objs;
2044 
2045 	/* Node add to tailq. */
2046 	TAILQ_INSERT_TAIL(&p->extern_objs, obj, node);
2047 	p->n_extern_objs++;
2048 	p->n_structs++;
2049 
2050 	return 0;
2051 }
2052 
2053 static int
2054 extern_obj_build(struct rte_swx_pipeline *p)
2055 {
2056 	uint32_t i;
2057 
2058 	for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
2059 		struct thread *t = &p->threads[i];
2060 		struct extern_obj *obj;
2061 
2062 		t->extern_objs = calloc(p->n_extern_objs,
2063 					sizeof(struct extern_obj_runtime));
2064 		CHECK(t->extern_objs, ENOMEM);
2065 
2066 		TAILQ_FOREACH(obj, &p->extern_objs, node) {
2067 			struct extern_obj_runtime *r =
2068 				&t->extern_objs[obj->id];
2069 			struct extern_type_member_func *func;
2070 			uint32_t mailbox_size =
2071 				obj->type->mailbox_struct_type->n_bits / 8;
2072 
2073 			r->obj = obj->obj;
2074 
2075 			r->mailbox = calloc(1, mailbox_size);
2076 			CHECK(r->mailbox, ENOMEM);
2077 
2078 			TAILQ_FOREACH(func, &obj->type->funcs, node)
2079 				r->funcs[func->id] = func->func;
2080 
2081 			t->structs[obj->struct_id] = r->mailbox;
2082 		}
2083 	}
2084 
2085 	return 0;
2086 }
2087 
2088 static void
2089 extern_obj_build_free(struct rte_swx_pipeline *p)
2090 {
2091 	uint32_t i;
2092 
2093 	for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
2094 		struct thread *t = &p->threads[i];
2095 		uint32_t j;
2096 
2097 		if (!t->extern_objs)
2098 			continue;
2099 
2100 		for (j = 0; j < p->n_extern_objs; j++) {
2101 			struct extern_obj_runtime *r = &t->extern_objs[j];
2102 
2103 			free(r->mailbox);
2104 		}
2105 
2106 		free(t->extern_objs);
2107 		t->extern_objs = NULL;
2108 	}
2109 }
2110 
2111 static void
2112 extern_obj_free(struct rte_swx_pipeline *p)
2113 {
2114 	extern_obj_build_free(p);
2115 
2116 	/* Extern objects. */
2117 	for ( ; ; ) {
2118 		struct extern_obj *elem;
2119 
2120 		elem = TAILQ_FIRST(&p->extern_objs);
2121 		if (!elem)
2122 			break;
2123 
2124 		TAILQ_REMOVE(&p->extern_objs, elem, node);
2125 		if (elem->obj)
2126 			elem->type->destructor(elem->obj);
2127 		free(elem);
2128 	}
2129 
2130 	/* Extern types. */
2131 	for ( ; ; ) {
2132 		struct extern_type *elem;
2133 
2134 		elem = TAILQ_FIRST(&p->extern_types);
2135 		if (!elem)
2136 			break;
2137 
2138 		TAILQ_REMOVE(&p->extern_types, elem, node);
2139 
2140 		for ( ; ; ) {
2141 			struct extern_type_member_func *func;
2142 
2143 			func = TAILQ_FIRST(&elem->funcs);
2144 			if (!func)
2145 				break;
2146 
2147 			TAILQ_REMOVE(&elem->funcs, func, node);
2148 			free(func);
2149 		}
2150 
2151 		free(elem);
2152 	}
2153 }
2154 
2155 /*
2156  * Extern function.
2157  */
2158 static struct extern_func *
2159 extern_func_find(struct rte_swx_pipeline *p, const char *name)
2160 {
2161 	struct extern_func *elem;
2162 
2163 	TAILQ_FOREACH(elem, &p->extern_funcs, node)
2164 		if (strcmp(elem->name, name) == 0)
2165 			return elem;
2166 
2167 	return NULL;
2168 }
2169 
2170 static struct extern_func *
2171 extern_func_parse(struct rte_swx_pipeline *p,
2172 		  const char *name)
2173 {
2174 	if (name[0] != 'f' || name[1] != '.')
2175 		return NULL;
2176 
2177 	return extern_func_find(p, &name[2]);
2178 }
2179 
2180 static struct field *
2181 extern_func_mailbox_field_parse(struct rte_swx_pipeline *p,
2182 				const char *name,
2183 				struct extern_func **function)
2184 {
2185 	struct extern_func *func;
2186 	struct field *f;
2187 	char *func_name, *field_name;
2188 
2189 	if ((name[0] != 'f') || (name[1] != '.'))
2190 		return NULL;
2191 
2192 	func_name = strdup(&name[2]);
2193 	if (!func_name)
2194 		return NULL;
2195 
2196 	field_name = strchr(func_name, '.');
2197 	if (!field_name) {
2198 		free(func_name);
2199 		return NULL;
2200 	}
2201 
2202 	*field_name = 0;
2203 	field_name++;
2204 
2205 	func = extern_func_find(p, func_name);
2206 	if (!func) {
2207 		free(func_name);
2208 		return NULL;
2209 	}
2210 
2211 	f = struct_type_field_find(func->mailbox_struct_type, field_name);
2212 	if (!f) {
2213 		free(func_name);
2214 		return NULL;
2215 	}
2216 
2217 	if (function)
2218 		*function = func;
2219 
2220 	free(func_name);
2221 	return f;
2222 }
2223 
2224 int
2225 rte_swx_pipeline_extern_func_register(struct rte_swx_pipeline *p,
2226 				      const char *name,
2227 				      const char *mailbox_struct_type_name,
2228 				      rte_swx_extern_func_t func)
2229 {
2230 	struct extern_func *f;
2231 	struct struct_type *mailbox_struct_type;
2232 
2233 	CHECK(p, EINVAL);
2234 
2235 	CHECK_NAME(name, EINVAL);
2236 	CHECK(!extern_func_find(p, name), EEXIST);
2237 
2238 	CHECK_NAME(mailbox_struct_type_name, EINVAL);
2239 	mailbox_struct_type = struct_type_find(p, mailbox_struct_type_name);
2240 	CHECK(mailbox_struct_type, EINVAL);
2241 
2242 	CHECK(func, EINVAL);
2243 
2244 	/* Node allocation. */
2245 	f = calloc(1, sizeof(struct extern_func));
2246 	CHECK(func, ENOMEM);
2247 
2248 	/* Node initialization. */
2249 	strcpy(f->name, name);
2250 	f->mailbox_struct_type = mailbox_struct_type;
2251 	f->func = func;
2252 	f->struct_id = p->n_structs;
2253 	f->id = p->n_extern_funcs;
2254 
2255 	/* Node add to tailq. */
2256 	TAILQ_INSERT_TAIL(&p->extern_funcs, f, node);
2257 	p->n_extern_funcs++;
2258 	p->n_structs++;
2259 
2260 	return 0;
2261 }
2262 
2263 static int
2264 extern_func_build(struct rte_swx_pipeline *p)
2265 {
2266 	uint32_t i;
2267 
2268 	for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
2269 		struct thread *t = &p->threads[i];
2270 		struct extern_func *func;
2271 
2272 		/* Memory allocation. */
2273 		t->extern_funcs = calloc(p->n_extern_funcs,
2274 					 sizeof(struct extern_func_runtime));
2275 		CHECK(t->extern_funcs, ENOMEM);
2276 
2277 		/* Extern function. */
2278 		TAILQ_FOREACH(func, &p->extern_funcs, node) {
2279 			struct extern_func_runtime *r =
2280 				&t->extern_funcs[func->id];
2281 			uint32_t mailbox_size =
2282 				func->mailbox_struct_type->n_bits / 8;
2283 
2284 			r->func = func->func;
2285 
2286 			r->mailbox = calloc(1, mailbox_size);
2287 			CHECK(r->mailbox, ENOMEM);
2288 
2289 			t->structs[func->struct_id] = r->mailbox;
2290 		}
2291 	}
2292 
2293 	return 0;
2294 }
2295 
2296 static void
2297 extern_func_build_free(struct rte_swx_pipeline *p)
2298 {
2299 	uint32_t i;
2300 
2301 	for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
2302 		struct thread *t = &p->threads[i];
2303 		uint32_t j;
2304 
2305 		if (!t->extern_funcs)
2306 			continue;
2307 
2308 		for (j = 0; j < p->n_extern_funcs; j++) {
2309 			struct extern_func_runtime *r = &t->extern_funcs[j];
2310 
2311 			free(r->mailbox);
2312 		}
2313 
2314 		free(t->extern_funcs);
2315 		t->extern_funcs = NULL;
2316 	}
2317 }
2318 
2319 static void
2320 extern_func_free(struct rte_swx_pipeline *p)
2321 {
2322 	extern_func_build_free(p);
2323 
2324 	for ( ; ; ) {
2325 		struct extern_func *elem;
2326 
2327 		elem = TAILQ_FIRST(&p->extern_funcs);
2328 		if (!elem)
2329 			break;
2330 
2331 		TAILQ_REMOVE(&p->extern_funcs, elem, node);
2332 		free(elem);
2333 	}
2334 }
2335 
2336 /*
2337  * Header.
2338  */
2339 static struct header *
2340 header_find(struct rte_swx_pipeline *p, const char *name)
2341 {
2342 	struct header *elem;
2343 
2344 	TAILQ_FOREACH(elem, &p->headers, node)
2345 		if (strcmp(elem->name, name) == 0)
2346 			return elem;
2347 
2348 	return NULL;
2349 }
2350 
2351 static struct header *
2352 header_find_by_struct_id(struct rte_swx_pipeline *p, uint32_t struct_id)
2353 {
2354 	struct header *elem;
2355 
2356 	TAILQ_FOREACH(elem, &p->headers, node)
2357 		if (elem->struct_id == struct_id)
2358 			return elem;
2359 
2360 	return NULL;
2361 }
2362 
2363 static struct header *
2364 header_parse(struct rte_swx_pipeline *p,
2365 	     const char *name)
2366 {
2367 	if (name[0] != 'h' || name[1] != '.')
2368 		return NULL;
2369 
2370 	return header_find(p, &name[2]);
2371 }
2372 
2373 static struct field *
2374 header_field_parse(struct rte_swx_pipeline *p,
2375 		   const char *name,
2376 		   struct header **header)
2377 {
2378 	struct header *h;
2379 	struct field *f;
2380 	char *header_name, *field_name;
2381 
2382 	if ((name[0] != 'h') || (name[1] != '.'))
2383 		return NULL;
2384 
2385 	header_name = strdup(&name[2]);
2386 	if (!header_name)
2387 		return NULL;
2388 
2389 	field_name = strchr(header_name, '.');
2390 	if (!field_name) {
2391 		free(header_name);
2392 		return NULL;
2393 	}
2394 
2395 	*field_name = 0;
2396 	field_name++;
2397 
2398 	h = header_find(p, header_name);
2399 	if (!h) {
2400 		free(header_name);
2401 		return NULL;
2402 	}
2403 
2404 	f = struct_type_field_find(h->st, field_name);
2405 	if (!f) {
2406 		free(header_name);
2407 		return NULL;
2408 	}
2409 
2410 	if (header)
2411 		*header = h;
2412 
2413 	free(header_name);
2414 	return f;
2415 }
2416 
2417 int
2418 rte_swx_pipeline_packet_header_register(struct rte_swx_pipeline *p,
2419 					const char *name,
2420 					const char *struct_type_name)
2421 {
2422 	struct struct_type *st;
2423 	struct header *h;
2424 	size_t n_headers_max;
2425 
2426 	CHECK(p, EINVAL);
2427 	CHECK_NAME(name, EINVAL);
2428 	CHECK_NAME(struct_type_name, EINVAL);
2429 
2430 	CHECK(!header_find(p, name), EEXIST);
2431 
2432 	st = struct_type_find(p, struct_type_name);
2433 	CHECK(st, EINVAL);
2434 
2435 	n_headers_max = RTE_SIZEOF_FIELD(struct thread, valid_headers) * 8;
2436 	CHECK(p->n_headers < n_headers_max, ENOSPC);
2437 
2438 	/* Node allocation. */
2439 	h = calloc(1, sizeof(struct header));
2440 	CHECK(h, ENOMEM);
2441 
2442 	/* Node initialization. */
2443 	strcpy(h->name, name);
2444 	h->st = st;
2445 	h->struct_id = p->n_structs;
2446 	h->id = p->n_headers;
2447 
2448 	/* Node add to tailq. */
2449 	TAILQ_INSERT_TAIL(&p->headers, h, node);
2450 	p->n_headers++;
2451 	p->n_structs++;
2452 
2453 	return 0;
2454 }
2455 
2456 static int
2457 header_build(struct rte_swx_pipeline *p)
2458 {
2459 	struct header *h;
2460 	uint32_t n_bytes = 0, i;
2461 
2462 	TAILQ_FOREACH(h, &p->headers, node) {
2463 		n_bytes += h->st->n_bits / 8;
2464 	}
2465 
2466 	for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
2467 		struct thread *t = &p->threads[i];
2468 		uint32_t offset = 0;
2469 
2470 		t->headers = calloc(p->n_headers,
2471 				    sizeof(struct header_runtime));
2472 		CHECK(t->headers, ENOMEM);
2473 
2474 		t->headers_out = calloc(p->n_headers,
2475 					sizeof(struct header_out_runtime));
2476 		CHECK(t->headers_out, ENOMEM);
2477 
2478 		t->header_storage = calloc(1, n_bytes);
2479 		CHECK(t->header_storage, ENOMEM);
2480 
2481 		t->header_out_storage = calloc(1, n_bytes);
2482 		CHECK(t->header_out_storage, ENOMEM);
2483 
2484 		TAILQ_FOREACH(h, &p->headers, node) {
2485 			uint8_t *header_storage;
2486 
2487 			header_storage = &t->header_storage[offset];
2488 			offset += h->st->n_bits / 8;
2489 
2490 			t->headers[h->id].ptr0 = header_storage;
2491 			t->structs[h->struct_id] = header_storage;
2492 		}
2493 	}
2494 
2495 	return 0;
2496 }
2497 
2498 static void
2499 header_build_free(struct rte_swx_pipeline *p)
2500 {
2501 	uint32_t i;
2502 
2503 	for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
2504 		struct thread *t = &p->threads[i];
2505 
2506 		free(t->headers_out);
2507 		t->headers_out = NULL;
2508 
2509 		free(t->headers);
2510 		t->headers = NULL;
2511 
2512 		free(t->header_out_storage);
2513 		t->header_out_storage = NULL;
2514 
2515 		free(t->header_storage);
2516 		t->header_storage = NULL;
2517 	}
2518 }
2519 
2520 static void
2521 header_free(struct rte_swx_pipeline *p)
2522 {
2523 	header_build_free(p);
2524 
2525 	for ( ; ; ) {
2526 		struct header *elem;
2527 
2528 		elem = TAILQ_FIRST(&p->headers);
2529 		if (!elem)
2530 			break;
2531 
2532 		TAILQ_REMOVE(&p->headers, elem, node);
2533 		free(elem);
2534 	}
2535 }
2536 
2537 /*
2538  * Meta-data.
2539  */
2540 static struct field *
2541 metadata_field_parse(struct rte_swx_pipeline *p, const char *name)
2542 {
2543 	if (!p->metadata_st)
2544 		return NULL;
2545 
2546 	if (name[0] != 'm' || name[1] != '.')
2547 		return NULL;
2548 
2549 	return struct_type_field_find(p->metadata_st, &name[2]);
2550 }
2551 
2552 int
2553 rte_swx_pipeline_packet_metadata_register(struct rte_swx_pipeline *p,
2554 					  const char *struct_type_name)
2555 {
2556 	struct struct_type *st = NULL;
2557 
2558 	CHECK(p, EINVAL);
2559 
2560 	CHECK_NAME(struct_type_name, EINVAL);
2561 	st  = struct_type_find(p, struct_type_name);
2562 	CHECK(st, EINVAL);
2563 	CHECK(!p->metadata_st, EINVAL);
2564 
2565 	p->metadata_st = st;
2566 	p->metadata_struct_id = p->n_structs;
2567 
2568 	p->n_structs++;
2569 
2570 	return 0;
2571 }
2572 
2573 static int
2574 metadata_build(struct rte_swx_pipeline *p)
2575 {
2576 	uint32_t n_bytes = p->metadata_st->n_bits / 8;
2577 	uint32_t i;
2578 
2579 	/* Thread-level initialization. */
2580 	for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
2581 		struct thread *t = &p->threads[i];
2582 		uint8_t *metadata;
2583 
2584 		metadata = calloc(1, n_bytes);
2585 		CHECK(metadata, ENOMEM);
2586 
2587 		t->metadata = metadata;
2588 		t->structs[p->metadata_struct_id] = metadata;
2589 	}
2590 
2591 	return 0;
2592 }
2593 
2594 static void
2595 metadata_build_free(struct rte_swx_pipeline *p)
2596 {
2597 	uint32_t i;
2598 
2599 	for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
2600 		struct thread *t = &p->threads[i];
2601 
2602 		free(t->metadata);
2603 		t->metadata = NULL;
2604 	}
2605 }
2606 
2607 static void
2608 metadata_free(struct rte_swx_pipeline *p)
2609 {
2610 	metadata_build_free(p);
2611 }
2612 
2613 /*
2614  * Instruction.
2615  */
2616 static int
2617 instruction_is_tx(enum instruction_type type)
2618 {
2619 	switch (type) {
2620 	case INSTR_TX:
2621 	case INSTR_TX_I:
2622 		return 1;
2623 
2624 	default:
2625 		return 0;
2626 	}
2627 }
2628 
2629 static int
2630 instruction_is_jmp(struct instruction *instr)
2631 {
2632 	switch (instr->type) {
2633 	case INSTR_JMP:
2634 	case INSTR_JMP_VALID:
2635 	case INSTR_JMP_INVALID:
2636 	case INSTR_JMP_HIT:
2637 	case INSTR_JMP_MISS:
2638 	case INSTR_JMP_ACTION_HIT:
2639 	case INSTR_JMP_ACTION_MISS:
2640 	case INSTR_JMP_EQ:
2641 	case INSTR_JMP_EQ_MH:
2642 	case INSTR_JMP_EQ_HM:
2643 	case INSTR_JMP_EQ_HH:
2644 	case INSTR_JMP_EQ_I:
2645 	case INSTR_JMP_NEQ:
2646 	case INSTR_JMP_NEQ_MH:
2647 	case INSTR_JMP_NEQ_HM:
2648 	case INSTR_JMP_NEQ_HH:
2649 	case INSTR_JMP_NEQ_I:
2650 	case INSTR_JMP_LT:
2651 	case INSTR_JMP_LT_MH:
2652 	case INSTR_JMP_LT_HM:
2653 	case INSTR_JMP_LT_HH:
2654 	case INSTR_JMP_LT_MI:
2655 	case INSTR_JMP_LT_HI:
2656 	case INSTR_JMP_GT:
2657 	case INSTR_JMP_GT_MH:
2658 	case INSTR_JMP_GT_HM:
2659 	case INSTR_JMP_GT_HH:
2660 	case INSTR_JMP_GT_MI:
2661 	case INSTR_JMP_GT_HI:
2662 		return 1;
2663 
2664 	default:
2665 		return 0;
2666 	}
2667 }
2668 
2669 static struct field *
2670 action_field_parse(struct action *action, const char *name);
2671 
2672 static struct field *
2673 struct_field_parse(struct rte_swx_pipeline *p,
2674 		   struct action *action,
2675 		   const char *name,
2676 		   uint32_t *struct_id)
2677 {
2678 	struct field *f;
2679 
2680 	switch (name[0]) {
2681 	case 'h':
2682 	{
2683 		struct header *header;
2684 
2685 		f = header_field_parse(p, name, &header);
2686 		if (!f)
2687 			return NULL;
2688 
2689 		*struct_id = header->struct_id;
2690 		return f;
2691 	}
2692 
2693 	case 'm':
2694 	{
2695 		f = metadata_field_parse(p, name);
2696 		if (!f)
2697 			return NULL;
2698 
2699 		*struct_id = p->metadata_struct_id;
2700 		return f;
2701 	}
2702 
2703 	case 't':
2704 	{
2705 		if (!action)
2706 			return NULL;
2707 
2708 		f = action_field_parse(action, name);
2709 		if (!f)
2710 			return NULL;
2711 
2712 		*struct_id = 0;
2713 		return f;
2714 	}
2715 
2716 	case 'e':
2717 	{
2718 		struct extern_obj *obj;
2719 
2720 		f = extern_obj_mailbox_field_parse(p, name, &obj);
2721 		if (!f)
2722 			return NULL;
2723 
2724 		*struct_id = obj->struct_id;
2725 		return f;
2726 	}
2727 
2728 	case 'f':
2729 	{
2730 		struct extern_func *func;
2731 
2732 		f = extern_func_mailbox_field_parse(p, name, &func);
2733 		if (!f)
2734 			return NULL;
2735 
2736 		*struct_id = func->struct_id;
2737 		return f;
2738 	}
2739 
2740 	default:
2741 		return NULL;
2742 	}
2743 }
2744 
2745 static inline void
2746 pipeline_port_inc(struct rte_swx_pipeline *p)
2747 {
2748 	p->port_id = (p->port_id + 1) & (p->n_ports_in - 1);
2749 }
2750 
2751 static inline void
2752 thread_ip_reset(struct rte_swx_pipeline *p, struct thread *t)
2753 {
2754 	t->ip = p->instructions;
2755 }
2756 
2757 static inline void
2758 thread_ip_set(struct thread *t, struct instruction *ip)
2759 {
2760 	t->ip = ip;
2761 }
2762 
2763 static inline void
2764 thread_ip_action_call(struct rte_swx_pipeline *p,
2765 		      struct thread *t,
2766 		      uint32_t action_id)
2767 {
2768 	t->ret = t->ip + 1;
2769 	t->ip = p->action_instructions[action_id];
2770 }
2771 
2772 static inline void
2773 thread_ip_inc(struct rte_swx_pipeline *p);
2774 
2775 static inline void
2776 thread_ip_inc(struct rte_swx_pipeline *p)
2777 {
2778 	struct thread *t = &p->threads[p->thread_id];
2779 
2780 	t->ip++;
2781 }
2782 
2783 static inline void
2784 thread_ip_inc_cond(struct thread *t, int cond)
2785 {
2786 	t->ip += cond;
2787 }
2788 
2789 static inline void
2790 thread_yield(struct rte_swx_pipeline *p)
2791 {
2792 	p->thread_id = (p->thread_id + 1) & (RTE_SWX_PIPELINE_THREADS_MAX - 1);
2793 }
2794 
2795 static inline void
2796 thread_yield_cond(struct rte_swx_pipeline *p, int cond)
2797 {
2798 	p->thread_id = (p->thread_id + cond) & (RTE_SWX_PIPELINE_THREADS_MAX - 1);
2799 }
2800 
2801 /*
2802  * rx.
2803  */
2804 static int
2805 instr_rx_translate(struct rte_swx_pipeline *p,
2806 		   struct action *action,
2807 		   char **tokens,
2808 		   int n_tokens,
2809 		   struct instruction *instr,
2810 		   struct instruction_data *data __rte_unused)
2811 {
2812 	struct field *f;
2813 
2814 	CHECK(!action, EINVAL);
2815 	CHECK(n_tokens == 2, EINVAL);
2816 
2817 	f = metadata_field_parse(p, tokens[1]);
2818 	CHECK(f, EINVAL);
2819 
2820 	instr->type = INSTR_RX;
2821 	instr->io.io.offset = f->offset / 8;
2822 	instr->io.io.n_bits = f->n_bits;
2823 	return 0;
2824 }
2825 
2826 static inline void
2827 instr_rx_exec(struct rte_swx_pipeline *p);
2828 
2829 static inline void
2830 instr_rx_exec(struct rte_swx_pipeline *p)
2831 {
2832 	struct thread *t = &p->threads[p->thread_id];
2833 	struct instruction *ip = t->ip;
2834 	struct port_in_runtime *port = &p->in[p->port_id];
2835 	struct rte_swx_pkt *pkt = &t->pkt;
2836 	int pkt_received;
2837 
2838 	/* Packet. */
2839 	pkt_received = port->pkt_rx(port->obj, pkt);
2840 	t->ptr = &pkt->pkt[pkt->offset];
2841 	rte_prefetch0(t->ptr);
2842 
2843 	TRACE("[Thread %2u] rx %s from port %u\n",
2844 	      p->thread_id,
2845 	      pkt_received ? "1 pkt" : "0 pkts",
2846 	      p->port_id);
2847 
2848 	/* Headers. */
2849 	t->valid_headers = 0;
2850 	t->n_headers_out = 0;
2851 
2852 	/* Meta-data. */
2853 	METADATA_WRITE(t, ip->io.io.offset, ip->io.io.n_bits, p->port_id);
2854 
2855 	/* Tables. */
2856 	t->table_state = p->table_state;
2857 
2858 	/* Thread. */
2859 	pipeline_port_inc(p);
2860 	thread_ip_inc_cond(t, pkt_received);
2861 	thread_yield(p);
2862 }
2863 
2864 /*
2865  * tx.
2866  */
2867 static int
2868 instr_tx_translate(struct rte_swx_pipeline *p,
2869 		   struct action *action __rte_unused,
2870 		   char **tokens,
2871 		   int n_tokens,
2872 		   struct instruction *instr,
2873 		   struct instruction_data *data __rte_unused)
2874 {
2875 	char *port = tokens[1];
2876 	struct field *f;
2877 	uint32_t port_val;
2878 
2879 	CHECK(n_tokens == 2, EINVAL);
2880 
2881 	f = metadata_field_parse(p, port);
2882 	if (f) {
2883 		instr->type = INSTR_TX;
2884 		instr->io.io.offset = f->offset / 8;
2885 		instr->io.io.n_bits = f->n_bits;
2886 		return 0;
2887 	}
2888 
2889 	/* TX_I. */
2890 	port_val = strtoul(port, &port, 0);
2891 	CHECK(!port[0], EINVAL);
2892 
2893 	instr->type = INSTR_TX_I;
2894 	instr->io.io.val = port_val;
2895 	return 0;
2896 }
2897 
2898 static int
2899 instr_drop_translate(struct rte_swx_pipeline *p,
2900 		     struct action *action __rte_unused,
2901 		     char **tokens __rte_unused,
2902 		     int n_tokens,
2903 		     struct instruction *instr,
2904 		     struct instruction_data *data __rte_unused)
2905 {
2906 	CHECK(n_tokens == 1, EINVAL);
2907 
2908 	/* TX_I. */
2909 	instr->type = INSTR_TX_I;
2910 	instr->io.io.val = p->n_ports_out - 1;
2911 	return 0;
2912 }
2913 
2914 static inline void
2915 emit_handler(struct thread *t)
2916 {
2917 	struct header_out_runtime *h0 = &t->headers_out[0];
2918 	struct header_out_runtime *h1 = &t->headers_out[1];
2919 	uint32_t offset = 0, i;
2920 
2921 	/* No header change or header decapsulation. */
2922 	if ((t->n_headers_out == 1) &&
2923 	    (h0->ptr + h0->n_bytes == t->ptr)) {
2924 		TRACE("Emit handler: no header change or header decap.\n");
2925 
2926 		t->pkt.offset -= h0->n_bytes;
2927 		t->pkt.length += h0->n_bytes;
2928 
2929 		return;
2930 	}
2931 
2932 	/* Header encapsulation (optionally, with prior header decasulation). */
2933 	if ((t->n_headers_out == 2) &&
2934 	    (h1->ptr + h1->n_bytes == t->ptr) &&
2935 	    (h0->ptr == h0->ptr0)) {
2936 		uint32_t offset;
2937 
2938 		TRACE("Emit handler: header encapsulation.\n");
2939 
2940 		offset = h0->n_bytes + h1->n_bytes;
2941 		memcpy(t->ptr - offset, h0->ptr, h0->n_bytes);
2942 		t->pkt.offset -= offset;
2943 		t->pkt.length += offset;
2944 
2945 		return;
2946 	}
2947 
2948 	/* Header insertion. */
2949 	/* TBD */
2950 
2951 	/* Header extraction. */
2952 	/* TBD */
2953 
2954 	/* For any other case. */
2955 	TRACE("Emit handler: complex case.\n");
2956 
2957 	for (i = 0; i < t->n_headers_out; i++) {
2958 		struct header_out_runtime *h = &t->headers_out[i];
2959 
2960 		memcpy(&t->header_out_storage[offset], h->ptr, h->n_bytes);
2961 		offset += h->n_bytes;
2962 	}
2963 
2964 	if (offset) {
2965 		memcpy(t->ptr - offset, t->header_out_storage, offset);
2966 		t->pkt.offset -= offset;
2967 		t->pkt.length += offset;
2968 	}
2969 }
2970 
2971 static inline void
2972 instr_tx_exec(struct rte_swx_pipeline *p);
2973 
2974 static inline void
2975 instr_tx_exec(struct rte_swx_pipeline *p)
2976 {
2977 	struct thread *t = &p->threads[p->thread_id];
2978 	struct instruction *ip = t->ip;
2979 	uint64_t port_id = METADATA_READ(t, ip->io.io.offset, ip->io.io.n_bits);
2980 	struct port_out_runtime *port = &p->out[port_id];
2981 	struct rte_swx_pkt *pkt = &t->pkt;
2982 
2983 	TRACE("[Thread %2u]: tx 1 pkt to port %u\n",
2984 	      p->thread_id,
2985 	      (uint32_t)port_id);
2986 
2987 	/* Headers. */
2988 	emit_handler(t);
2989 
2990 	/* Packet. */
2991 	port->pkt_tx(port->obj, pkt);
2992 
2993 	/* Thread. */
2994 	thread_ip_reset(p, t);
2995 	instr_rx_exec(p);
2996 }
2997 
2998 static inline void
2999 instr_tx_i_exec(struct rte_swx_pipeline *p)
3000 {
3001 	struct thread *t = &p->threads[p->thread_id];
3002 	struct instruction *ip = t->ip;
3003 	uint64_t port_id = ip->io.io.val;
3004 	struct port_out_runtime *port = &p->out[port_id];
3005 	struct rte_swx_pkt *pkt = &t->pkt;
3006 
3007 	TRACE("[Thread %2u]: tx (i) 1 pkt to port %u\n",
3008 	      p->thread_id,
3009 	      (uint32_t)port_id);
3010 
3011 	/* Headers. */
3012 	emit_handler(t);
3013 
3014 	/* Packet. */
3015 	port->pkt_tx(port->obj, pkt);
3016 
3017 	/* Thread. */
3018 	thread_ip_reset(p, t);
3019 	instr_rx_exec(p);
3020 }
3021 
3022 /*
3023  * extract.
3024  */
3025 static int
3026 instr_hdr_extract_translate(struct rte_swx_pipeline *p,
3027 			    struct action *action,
3028 			    char **tokens,
3029 			    int n_tokens,
3030 			    struct instruction *instr,
3031 			    struct instruction_data *data __rte_unused)
3032 {
3033 	struct header *h;
3034 
3035 	CHECK(!action, EINVAL);
3036 	CHECK(n_tokens == 2, EINVAL);
3037 
3038 	h = header_parse(p, tokens[1]);
3039 	CHECK(h, EINVAL);
3040 
3041 	instr->type = INSTR_HDR_EXTRACT;
3042 	instr->io.hdr.header_id[0] = h->id;
3043 	instr->io.hdr.struct_id[0] = h->struct_id;
3044 	instr->io.hdr.n_bytes[0] = h->st->n_bits / 8;
3045 	return 0;
3046 }
3047 
3048 static inline void
3049 __instr_hdr_extract_exec(struct rte_swx_pipeline *p, uint32_t n_extract);
3050 
3051 static inline void
3052 __instr_hdr_extract_exec(struct rte_swx_pipeline *p, uint32_t n_extract)
3053 {
3054 	struct thread *t = &p->threads[p->thread_id];
3055 	struct instruction *ip = t->ip;
3056 	uint64_t valid_headers = t->valid_headers;
3057 	uint8_t *ptr = t->ptr;
3058 	uint32_t offset = t->pkt.offset;
3059 	uint32_t length = t->pkt.length;
3060 	uint32_t i;
3061 
3062 	for (i = 0; i < n_extract; i++) {
3063 		uint32_t header_id = ip->io.hdr.header_id[i];
3064 		uint32_t struct_id = ip->io.hdr.struct_id[i];
3065 		uint32_t n_bytes = ip->io.hdr.n_bytes[i];
3066 
3067 		TRACE("[Thread %2u]: extract header %u (%u bytes)\n",
3068 		      p->thread_id,
3069 		      header_id,
3070 		      n_bytes);
3071 
3072 		/* Headers. */
3073 		t->structs[struct_id] = ptr;
3074 		valid_headers = MASK64_BIT_SET(valid_headers, header_id);
3075 
3076 		/* Packet. */
3077 		offset += n_bytes;
3078 		length -= n_bytes;
3079 		ptr += n_bytes;
3080 	}
3081 
3082 	/* Headers. */
3083 	t->valid_headers = valid_headers;
3084 
3085 	/* Packet. */
3086 	t->pkt.offset = offset;
3087 	t->pkt.length = length;
3088 	t->ptr = ptr;
3089 }
3090 
3091 static inline void
3092 instr_hdr_extract_exec(struct rte_swx_pipeline *p)
3093 {
3094 	__instr_hdr_extract_exec(p, 1);
3095 
3096 	/* Thread. */
3097 	thread_ip_inc(p);
3098 }
3099 
3100 static inline void
3101 instr_hdr_extract2_exec(struct rte_swx_pipeline *p)
3102 {
3103 	TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n",
3104 	      p->thread_id);
3105 
3106 	__instr_hdr_extract_exec(p, 2);
3107 
3108 	/* Thread. */
3109 	thread_ip_inc(p);
3110 }
3111 
3112 static inline void
3113 instr_hdr_extract3_exec(struct rte_swx_pipeline *p)
3114 {
3115 	TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n",
3116 	      p->thread_id);
3117 
3118 	__instr_hdr_extract_exec(p, 3);
3119 
3120 	/* Thread. */
3121 	thread_ip_inc(p);
3122 }
3123 
3124 static inline void
3125 instr_hdr_extract4_exec(struct rte_swx_pipeline *p)
3126 {
3127 	TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n",
3128 	      p->thread_id);
3129 
3130 	__instr_hdr_extract_exec(p, 4);
3131 
3132 	/* Thread. */
3133 	thread_ip_inc(p);
3134 }
3135 
3136 static inline void
3137 instr_hdr_extract5_exec(struct rte_swx_pipeline *p)
3138 {
3139 	TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n",
3140 	      p->thread_id);
3141 
3142 	__instr_hdr_extract_exec(p, 5);
3143 
3144 	/* Thread. */
3145 	thread_ip_inc(p);
3146 }
3147 
3148 static inline void
3149 instr_hdr_extract6_exec(struct rte_swx_pipeline *p)
3150 {
3151 	TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n",
3152 	      p->thread_id);
3153 
3154 	__instr_hdr_extract_exec(p, 6);
3155 
3156 	/* Thread. */
3157 	thread_ip_inc(p);
3158 }
3159 
3160 static inline void
3161 instr_hdr_extract7_exec(struct rte_swx_pipeline *p)
3162 {
3163 	TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n",
3164 	      p->thread_id);
3165 
3166 	__instr_hdr_extract_exec(p, 7);
3167 
3168 	/* Thread. */
3169 	thread_ip_inc(p);
3170 }
3171 
3172 static inline void
3173 instr_hdr_extract8_exec(struct rte_swx_pipeline *p)
3174 {
3175 	TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n",
3176 	      p->thread_id);
3177 
3178 	__instr_hdr_extract_exec(p, 8);
3179 
3180 	/* Thread. */
3181 	thread_ip_inc(p);
3182 }
3183 
3184 /*
3185  * emit.
3186  */
3187 static int
3188 instr_hdr_emit_translate(struct rte_swx_pipeline *p,
3189 			 struct action *action __rte_unused,
3190 			 char **tokens,
3191 			 int n_tokens,
3192 			 struct instruction *instr,
3193 			 struct instruction_data *data __rte_unused)
3194 {
3195 	struct header *h;
3196 
3197 	CHECK(n_tokens == 2, EINVAL);
3198 
3199 	h = header_parse(p, tokens[1]);
3200 	CHECK(h, EINVAL);
3201 
3202 	instr->type = INSTR_HDR_EMIT;
3203 	instr->io.hdr.header_id[0] = h->id;
3204 	instr->io.hdr.struct_id[0] = h->struct_id;
3205 	instr->io.hdr.n_bytes[0] = h->st->n_bits / 8;
3206 	return 0;
3207 }
3208 
3209 static inline void
3210 __instr_hdr_emit_exec(struct rte_swx_pipeline *p, uint32_t n_emit);
3211 
3212 static inline void
3213 __instr_hdr_emit_exec(struct rte_swx_pipeline *p, uint32_t n_emit)
3214 {
3215 	struct thread *t = &p->threads[p->thread_id];
3216 	struct instruction *ip = t->ip;
3217 	uint64_t valid_headers = t->valid_headers;
3218 	uint32_t n_headers_out = t->n_headers_out;
3219 	struct header_out_runtime *ho = &t->headers_out[n_headers_out - 1];
3220 	uint8_t *ho_ptr = NULL;
3221 	uint32_t ho_nbytes = 0, first = 1, i;
3222 
3223 	for (i = 0; i < n_emit; i++) {
3224 		uint32_t header_id = ip->io.hdr.header_id[i];
3225 		uint32_t struct_id = ip->io.hdr.struct_id[i];
3226 		uint32_t n_bytes = ip->io.hdr.n_bytes[i];
3227 
3228 		struct header_runtime *hi = &t->headers[header_id];
3229 		uint8_t *hi_ptr = t->structs[struct_id];
3230 
3231 		if (!MASK64_BIT_GET(valid_headers, header_id))
3232 			continue;
3233 
3234 		TRACE("[Thread %2u]: emit header %u\n",
3235 		      p->thread_id,
3236 		      header_id);
3237 
3238 		/* Headers. */
3239 		if (first) {
3240 			first = 0;
3241 
3242 			if (!t->n_headers_out) {
3243 				ho = &t->headers_out[0];
3244 
3245 				ho->ptr0 = hi->ptr0;
3246 				ho->ptr = hi_ptr;
3247 
3248 				ho_ptr = hi_ptr;
3249 				ho_nbytes = n_bytes;
3250 
3251 				n_headers_out = 1;
3252 
3253 				continue;
3254 			} else {
3255 				ho_ptr = ho->ptr;
3256 				ho_nbytes = ho->n_bytes;
3257 			}
3258 		}
3259 
3260 		if (ho_ptr + ho_nbytes == hi_ptr) {
3261 			ho_nbytes += n_bytes;
3262 		} else {
3263 			ho->n_bytes = ho_nbytes;
3264 
3265 			ho++;
3266 			ho->ptr0 = hi->ptr0;
3267 			ho->ptr = hi_ptr;
3268 
3269 			ho_ptr = hi_ptr;
3270 			ho_nbytes = n_bytes;
3271 
3272 			n_headers_out++;
3273 		}
3274 	}
3275 
3276 	ho->n_bytes = ho_nbytes;
3277 	t->n_headers_out = n_headers_out;
3278 }
3279 
3280 static inline void
3281 instr_hdr_emit_exec(struct rte_swx_pipeline *p)
3282 {
3283 	__instr_hdr_emit_exec(p, 1);
3284 
3285 	/* Thread. */
3286 	thread_ip_inc(p);
3287 }
3288 
3289 static inline void
3290 instr_hdr_emit_tx_exec(struct rte_swx_pipeline *p)
3291 {
3292 	TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n",
3293 	      p->thread_id);
3294 
3295 	__instr_hdr_emit_exec(p, 1);
3296 	instr_tx_exec(p);
3297 }
3298 
3299 static inline void
3300 instr_hdr_emit2_tx_exec(struct rte_swx_pipeline *p)
3301 {
3302 	TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n",
3303 	      p->thread_id);
3304 
3305 	__instr_hdr_emit_exec(p, 2);
3306 	instr_tx_exec(p);
3307 }
3308 
3309 static inline void
3310 instr_hdr_emit3_tx_exec(struct rte_swx_pipeline *p)
3311 {
3312 	TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n",
3313 	      p->thread_id);
3314 
3315 	__instr_hdr_emit_exec(p, 3);
3316 	instr_tx_exec(p);
3317 }
3318 
3319 static inline void
3320 instr_hdr_emit4_tx_exec(struct rte_swx_pipeline *p)
3321 {
3322 	TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n",
3323 	      p->thread_id);
3324 
3325 	__instr_hdr_emit_exec(p, 4);
3326 	instr_tx_exec(p);
3327 }
3328 
3329 static inline void
3330 instr_hdr_emit5_tx_exec(struct rte_swx_pipeline *p)
3331 {
3332 	TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n",
3333 	      p->thread_id);
3334 
3335 	__instr_hdr_emit_exec(p, 5);
3336 	instr_tx_exec(p);
3337 }
3338 
3339 static inline void
3340 instr_hdr_emit6_tx_exec(struct rte_swx_pipeline *p)
3341 {
3342 	TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n",
3343 	      p->thread_id);
3344 
3345 	__instr_hdr_emit_exec(p, 6);
3346 	instr_tx_exec(p);
3347 }
3348 
3349 static inline void
3350 instr_hdr_emit7_tx_exec(struct rte_swx_pipeline *p)
3351 {
3352 	TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n",
3353 	      p->thread_id);
3354 
3355 	__instr_hdr_emit_exec(p, 7);
3356 	instr_tx_exec(p);
3357 }
3358 
3359 static inline void
3360 instr_hdr_emit8_tx_exec(struct rte_swx_pipeline *p)
3361 {
3362 	TRACE("[Thread %2u] *** The next 9 instructions are fused. ***\n",
3363 	      p->thread_id);
3364 
3365 	__instr_hdr_emit_exec(p, 8);
3366 	instr_tx_exec(p);
3367 }
3368 
3369 /*
3370  * validate.
3371  */
3372 static int
3373 instr_hdr_validate_translate(struct rte_swx_pipeline *p,
3374 			     struct action *action __rte_unused,
3375 			     char **tokens,
3376 			     int n_tokens,
3377 			     struct instruction *instr,
3378 			     struct instruction_data *data __rte_unused)
3379 {
3380 	struct header *h;
3381 
3382 	CHECK(n_tokens == 2, EINVAL);
3383 
3384 	h = header_parse(p, tokens[1]);
3385 	CHECK(h, EINVAL);
3386 
3387 	instr->type = INSTR_HDR_VALIDATE;
3388 	instr->valid.header_id = h->id;
3389 	return 0;
3390 }
3391 
3392 static inline void
3393 instr_hdr_validate_exec(struct rte_swx_pipeline *p)
3394 {
3395 	struct thread *t = &p->threads[p->thread_id];
3396 	struct instruction *ip = t->ip;
3397 	uint32_t header_id = ip->valid.header_id;
3398 
3399 	TRACE("[Thread %2u] validate header %u\n", p->thread_id, header_id);
3400 
3401 	/* Headers. */
3402 	t->valid_headers = MASK64_BIT_SET(t->valid_headers, header_id);
3403 
3404 	/* Thread. */
3405 	thread_ip_inc(p);
3406 }
3407 
3408 /*
3409  * invalidate.
3410  */
3411 static int
3412 instr_hdr_invalidate_translate(struct rte_swx_pipeline *p,
3413 			       struct action *action __rte_unused,
3414 			       char **tokens,
3415 			       int n_tokens,
3416 			       struct instruction *instr,
3417 			       struct instruction_data *data __rte_unused)
3418 {
3419 	struct header *h;
3420 
3421 	CHECK(n_tokens == 2, EINVAL);
3422 
3423 	h = header_parse(p, tokens[1]);
3424 	CHECK(h, EINVAL);
3425 
3426 	instr->type = INSTR_HDR_INVALIDATE;
3427 	instr->valid.header_id = h->id;
3428 	return 0;
3429 }
3430 
3431 static inline void
3432 instr_hdr_invalidate_exec(struct rte_swx_pipeline *p)
3433 {
3434 	struct thread *t = &p->threads[p->thread_id];
3435 	struct instruction *ip = t->ip;
3436 	uint32_t header_id = ip->valid.header_id;
3437 
3438 	TRACE("[Thread %2u] invalidate header %u\n", p->thread_id, header_id);
3439 
3440 	/* Headers. */
3441 	t->valid_headers = MASK64_BIT_CLR(t->valid_headers, header_id);
3442 
3443 	/* Thread. */
3444 	thread_ip_inc(p);
3445 }
3446 
3447 /*
3448  * table.
3449  */
3450 static struct table *
3451 table_find(struct rte_swx_pipeline *p, const char *name);
3452 
3453 static int
3454 instr_table_translate(struct rte_swx_pipeline *p,
3455 		      struct action *action,
3456 		      char **tokens,
3457 		      int n_tokens,
3458 		      struct instruction *instr,
3459 		      struct instruction_data *data __rte_unused)
3460 {
3461 	struct table *t;
3462 
3463 	CHECK(!action, EINVAL);
3464 	CHECK(n_tokens == 2, EINVAL);
3465 
3466 	t = table_find(p, tokens[1]);
3467 	CHECK(t, EINVAL);
3468 
3469 	instr->type = INSTR_TABLE;
3470 	instr->table.table_id = t->id;
3471 	return 0;
3472 }
3473 
3474 static inline void
3475 instr_table_exec(struct rte_swx_pipeline *p)
3476 {
3477 	struct thread *t = &p->threads[p->thread_id];
3478 	struct instruction *ip = t->ip;
3479 	uint32_t table_id = ip->table.table_id;
3480 	struct rte_swx_table_state *ts = &t->table_state[table_id];
3481 	struct table_runtime *table = &t->tables[table_id];
3482 	struct table_statistics *stats = &p->table_stats[table_id];
3483 	uint64_t action_id, n_pkts_hit, n_pkts_action;
3484 	uint8_t *action_data;
3485 	int done, hit;
3486 
3487 	/* Table. */
3488 	done = table->func(ts->obj,
3489 			   table->mailbox,
3490 			   table->key,
3491 			   &action_id,
3492 			   &action_data,
3493 			   &hit);
3494 	if (!done) {
3495 		/* Thread. */
3496 		TRACE("[Thread %2u] table %u (not finalized)\n",
3497 		      p->thread_id,
3498 		      table_id);
3499 
3500 		thread_yield(p);
3501 		return;
3502 	}
3503 
3504 	action_id = hit ? action_id : ts->default_action_id;
3505 	action_data = hit ? action_data : ts->default_action_data;
3506 	n_pkts_hit = stats->n_pkts_hit[hit];
3507 	n_pkts_action = stats->n_pkts_action[action_id];
3508 
3509 	TRACE("[Thread %2u] table %u (%s, action %u)\n",
3510 	      p->thread_id,
3511 	      table_id,
3512 	      hit ? "hit" : "miss",
3513 	      (uint32_t)action_id);
3514 
3515 	t->action_id = action_id;
3516 	t->structs[0] = action_data;
3517 	t->hit = hit;
3518 	stats->n_pkts_hit[hit] = n_pkts_hit + 1;
3519 	stats->n_pkts_action[action_id] = n_pkts_action + 1;
3520 
3521 	/* Thread. */
3522 	thread_ip_action_call(p, t, action_id);
3523 }
3524 
3525 /*
3526  * extern.
3527  */
3528 static int
3529 instr_extern_translate(struct rte_swx_pipeline *p,
3530 		       struct action *action __rte_unused,
3531 		       char **tokens,
3532 		       int n_tokens,
3533 		       struct instruction *instr,
3534 		       struct instruction_data *data __rte_unused)
3535 {
3536 	char *token = tokens[1];
3537 
3538 	CHECK(n_tokens == 2, EINVAL);
3539 
3540 	if (token[0] == 'e') {
3541 		struct extern_obj *obj;
3542 		struct extern_type_member_func *func;
3543 
3544 		func = extern_obj_member_func_parse(p, token, &obj);
3545 		CHECK(func, EINVAL);
3546 
3547 		instr->type = INSTR_EXTERN_OBJ;
3548 		instr->ext_obj.ext_obj_id = obj->id;
3549 		instr->ext_obj.func_id = func->id;
3550 
3551 		return 0;
3552 	}
3553 
3554 	if (token[0] == 'f') {
3555 		struct extern_func *func;
3556 
3557 		func = extern_func_parse(p, token);
3558 		CHECK(func, EINVAL);
3559 
3560 		instr->type = INSTR_EXTERN_FUNC;
3561 		instr->ext_func.ext_func_id = func->id;
3562 
3563 		return 0;
3564 	}
3565 
3566 	CHECK(0, EINVAL);
3567 }
3568 
3569 static inline void
3570 instr_extern_obj_exec(struct rte_swx_pipeline *p)
3571 {
3572 	struct thread *t = &p->threads[p->thread_id];
3573 	struct instruction *ip = t->ip;
3574 	uint32_t obj_id = ip->ext_obj.ext_obj_id;
3575 	uint32_t func_id = ip->ext_obj.func_id;
3576 	struct extern_obj_runtime *obj = &t->extern_objs[obj_id];
3577 	rte_swx_extern_type_member_func_t func = obj->funcs[func_id];
3578 
3579 	TRACE("[Thread %2u] extern obj %u member func %u\n",
3580 	      p->thread_id,
3581 	      obj_id,
3582 	      func_id);
3583 
3584 	/* Extern object member function execute. */
3585 	uint32_t done = func(obj->obj, obj->mailbox);
3586 
3587 	/* Thread. */
3588 	thread_ip_inc_cond(t, done);
3589 	thread_yield_cond(p, done ^ 1);
3590 }
3591 
3592 static inline void
3593 instr_extern_func_exec(struct rte_swx_pipeline *p)
3594 {
3595 	struct thread *t = &p->threads[p->thread_id];
3596 	struct instruction *ip = t->ip;
3597 	uint32_t ext_func_id = ip->ext_func.ext_func_id;
3598 	struct extern_func_runtime *ext_func = &t->extern_funcs[ext_func_id];
3599 	rte_swx_extern_func_t func = ext_func->func;
3600 
3601 	TRACE("[Thread %2u] extern func %u\n",
3602 	      p->thread_id,
3603 	      ext_func_id);
3604 
3605 	/* Extern function execute. */
3606 	uint32_t done = func(ext_func->mailbox);
3607 
3608 	/* Thread. */
3609 	thread_ip_inc_cond(t, done);
3610 	thread_yield_cond(p, done ^ 1);
3611 }
3612 
3613 /*
3614  * mov.
3615  */
3616 static int
3617 instr_mov_translate(struct rte_swx_pipeline *p,
3618 		    struct action *action,
3619 		    char **tokens,
3620 		    int n_tokens,
3621 		    struct instruction *instr,
3622 		    struct instruction_data *data __rte_unused)
3623 {
3624 	char *dst = tokens[1], *src = tokens[2];
3625 	struct field *fdst, *fsrc;
3626 	uint64_t src_val;
3627 	uint32_t dst_struct_id = 0, src_struct_id = 0;
3628 
3629 	CHECK(n_tokens == 3, EINVAL);
3630 
3631 	fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
3632 	CHECK(fdst, EINVAL);
3633 
3634 	/* MOV, MOV_MH, MOV_HM or MOV_HH. */
3635 	fsrc = struct_field_parse(p, action, src, &src_struct_id);
3636 	if (fsrc) {
3637 		instr->type = INSTR_MOV;
3638 		if (dst[0] != 'h' && src[0] == 'h')
3639 			instr->type = INSTR_MOV_MH;
3640 		if (dst[0] == 'h' && src[0] != 'h')
3641 			instr->type = INSTR_MOV_HM;
3642 		if (dst[0] == 'h' && src[0] == 'h')
3643 			instr->type = INSTR_MOV_HH;
3644 
3645 		instr->mov.dst.struct_id = (uint8_t)dst_struct_id;
3646 		instr->mov.dst.n_bits = fdst->n_bits;
3647 		instr->mov.dst.offset = fdst->offset / 8;
3648 		instr->mov.src.struct_id = (uint8_t)src_struct_id;
3649 		instr->mov.src.n_bits = fsrc->n_bits;
3650 		instr->mov.src.offset = fsrc->offset / 8;
3651 		return 0;
3652 	}
3653 
3654 	/* MOV_I. */
3655 	src_val = strtoull(src, &src, 0);
3656 	CHECK(!src[0], EINVAL);
3657 
3658 	if (dst[0] == 'h')
3659 		src_val = hton64(src_val) >> (64 - fdst->n_bits);
3660 
3661 	instr->type = INSTR_MOV_I;
3662 	instr->mov.dst.struct_id = (uint8_t)dst_struct_id;
3663 	instr->mov.dst.n_bits = fdst->n_bits;
3664 	instr->mov.dst.offset = fdst->offset / 8;
3665 	instr->mov.src_val = src_val;
3666 	return 0;
3667 }
3668 
3669 static inline void
3670 instr_mov_exec(struct rte_swx_pipeline *p)
3671 {
3672 	struct thread *t = &p->threads[p->thread_id];
3673 	struct instruction *ip = t->ip;
3674 
3675 	TRACE("[Thread %2u] mov\n",
3676 	      p->thread_id);
3677 
3678 	MOV(t, ip);
3679 
3680 	/* Thread. */
3681 	thread_ip_inc(p);
3682 }
3683 
3684 static inline void
3685 instr_mov_mh_exec(struct rte_swx_pipeline *p)
3686 {
3687 	struct thread *t = &p->threads[p->thread_id];
3688 	struct instruction *ip = t->ip;
3689 
3690 	TRACE("[Thread %2u] mov (mh)\n",
3691 	      p->thread_id);
3692 
3693 	MOV_MH(t, ip);
3694 
3695 	/* Thread. */
3696 	thread_ip_inc(p);
3697 }
3698 
3699 static inline void
3700 instr_mov_hm_exec(struct rte_swx_pipeline *p)
3701 {
3702 	struct thread *t = &p->threads[p->thread_id];
3703 	struct instruction *ip = t->ip;
3704 
3705 	TRACE("[Thread %2u] mov (hm)\n",
3706 	      p->thread_id);
3707 
3708 	MOV_HM(t, ip);
3709 
3710 	/* Thread. */
3711 	thread_ip_inc(p);
3712 }
3713 
3714 static inline void
3715 instr_mov_hh_exec(struct rte_swx_pipeline *p)
3716 {
3717 	struct thread *t = &p->threads[p->thread_id];
3718 	struct instruction *ip = t->ip;
3719 
3720 	TRACE("[Thread %2u] mov (hh)\n",
3721 	      p->thread_id);
3722 
3723 	MOV_HH(t, ip);
3724 
3725 	/* Thread. */
3726 	thread_ip_inc(p);
3727 }
3728 
3729 static inline void
3730 instr_mov_i_exec(struct rte_swx_pipeline *p)
3731 {
3732 	struct thread *t = &p->threads[p->thread_id];
3733 	struct instruction *ip = t->ip;
3734 
3735 	TRACE("[Thread %2u] mov m.f %" PRIx64 "\n",
3736 	      p->thread_id,
3737 	      ip->mov.src_val);
3738 
3739 	MOV_I(t, ip);
3740 
3741 	/* Thread. */
3742 	thread_ip_inc(p);
3743 }
3744 
3745 /*
3746  * dma.
3747  */
3748 static inline void
3749 __instr_dma_ht_exec(struct rte_swx_pipeline *p, uint32_t n_dma);
3750 
3751 static inline void
3752 __instr_dma_ht_exec(struct rte_swx_pipeline *p, uint32_t n_dma)
3753 {
3754 	struct thread *t = &p->threads[p->thread_id];
3755 	struct instruction *ip = t->ip;
3756 	uint8_t *action_data = t->structs[0];
3757 	uint64_t valid_headers = t->valid_headers;
3758 	uint32_t i;
3759 
3760 	for (i = 0; i < n_dma; i++) {
3761 		uint32_t header_id = ip->dma.dst.header_id[i];
3762 		uint32_t struct_id = ip->dma.dst.struct_id[i];
3763 		uint32_t offset = ip->dma.src.offset[i];
3764 		uint32_t n_bytes = ip->dma.n_bytes[i];
3765 
3766 		struct header_runtime *h = &t->headers[header_id];
3767 		uint8_t *h_ptr0 = h->ptr0;
3768 		uint8_t *h_ptr = t->structs[struct_id];
3769 
3770 		void *dst = MASK64_BIT_GET(valid_headers, header_id) ?
3771 			h_ptr : h_ptr0;
3772 		void *src = &action_data[offset];
3773 
3774 		TRACE("[Thread %2u] dma h.s t.f\n", p->thread_id);
3775 
3776 		/* Headers. */
3777 		memcpy(dst, src, n_bytes);
3778 		t->structs[struct_id] = dst;
3779 		valid_headers = MASK64_BIT_SET(valid_headers, header_id);
3780 	}
3781 
3782 	t->valid_headers = valid_headers;
3783 }
3784 
3785 static inline void
3786 instr_dma_ht_exec(struct rte_swx_pipeline *p)
3787 {
3788 	__instr_dma_ht_exec(p, 1);
3789 
3790 	/* Thread. */
3791 	thread_ip_inc(p);
3792 }
3793 
3794 static inline void
3795 instr_dma_ht2_exec(struct rte_swx_pipeline *p)
3796 {
3797 	TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n",
3798 	      p->thread_id);
3799 
3800 	__instr_dma_ht_exec(p, 2);
3801 
3802 	/* Thread. */
3803 	thread_ip_inc(p);
3804 }
3805 
3806 static inline void
3807 instr_dma_ht3_exec(struct rte_swx_pipeline *p)
3808 {
3809 	TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n",
3810 	      p->thread_id);
3811 
3812 	__instr_dma_ht_exec(p, 3);
3813 
3814 	/* Thread. */
3815 	thread_ip_inc(p);
3816 }
3817 
3818 static inline void
3819 instr_dma_ht4_exec(struct rte_swx_pipeline *p)
3820 {
3821 	TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n",
3822 	      p->thread_id);
3823 
3824 	__instr_dma_ht_exec(p, 4);
3825 
3826 	/* Thread. */
3827 	thread_ip_inc(p);
3828 }
3829 
3830 static inline void
3831 instr_dma_ht5_exec(struct rte_swx_pipeline *p)
3832 {
3833 	TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n",
3834 	      p->thread_id);
3835 
3836 	__instr_dma_ht_exec(p, 5);
3837 
3838 	/* Thread. */
3839 	thread_ip_inc(p);
3840 }
3841 
3842 static inline void
3843 instr_dma_ht6_exec(struct rte_swx_pipeline *p)
3844 {
3845 	TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n",
3846 	      p->thread_id);
3847 
3848 	__instr_dma_ht_exec(p, 6);
3849 
3850 	/* Thread. */
3851 	thread_ip_inc(p);
3852 }
3853 
3854 static inline void
3855 instr_dma_ht7_exec(struct rte_swx_pipeline *p)
3856 {
3857 	TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n",
3858 	      p->thread_id);
3859 
3860 	__instr_dma_ht_exec(p, 7);
3861 
3862 	/* Thread. */
3863 	thread_ip_inc(p);
3864 }
3865 
3866 static inline void
3867 instr_dma_ht8_exec(struct rte_swx_pipeline *p)
3868 {
3869 	TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n",
3870 	      p->thread_id);
3871 
3872 	__instr_dma_ht_exec(p, 8);
3873 
3874 	/* Thread. */
3875 	thread_ip_inc(p);
3876 }
3877 
3878 /*
3879  * alu.
3880  */
3881 static int
3882 instr_alu_add_translate(struct rte_swx_pipeline *p,
3883 			struct action *action,
3884 			char **tokens,
3885 			int n_tokens,
3886 			struct instruction *instr,
3887 			struct instruction_data *data __rte_unused)
3888 {
3889 	char *dst = tokens[1], *src = tokens[2];
3890 	struct field *fdst, *fsrc;
3891 	uint64_t src_val;
3892 	uint32_t dst_struct_id = 0, src_struct_id = 0;
3893 
3894 	CHECK(n_tokens == 3, EINVAL);
3895 
3896 	fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
3897 	CHECK(fdst, EINVAL);
3898 
3899 	/* ADD, ADD_HM, ADD_MH, ADD_HH. */
3900 	fsrc = struct_field_parse(p, action, src, &src_struct_id);
3901 	if (fsrc) {
3902 		instr->type = INSTR_ALU_ADD;
3903 		if (dst[0] == 'h' && src[0] != 'h')
3904 			instr->type = INSTR_ALU_ADD_HM;
3905 		if (dst[0] != 'h' && src[0] == 'h')
3906 			instr->type = INSTR_ALU_ADD_MH;
3907 		if (dst[0] == 'h' && src[0] == 'h')
3908 			instr->type = INSTR_ALU_ADD_HH;
3909 
3910 		instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3911 		instr->alu.dst.n_bits = fdst->n_bits;
3912 		instr->alu.dst.offset = fdst->offset / 8;
3913 		instr->alu.src.struct_id = (uint8_t)src_struct_id;
3914 		instr->alu.src.n_bits = fsrc->n_bits;
3915 		instr->alu.src.offset = fsrc->offset / 8;
3916 		return 0;
3917 	}
3918 
3919 	/* ADD_MI, ADD_HI. */
3920 	src_val = strtoull(src, &src, 0);
3921 	CHECK(!src[0], EINVAL);
3922 
3923 	instr->type = INSTR_ALU_ADD_MI;
3924 	if (dst[0] == 'h')
3925 		instr->type = INSTR_ALU_ADD_HI;
3926 
3927 	instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3928 	instr->alu.dst.n_bits = fdst->n_bits;
3929 	instr->alu.dst.offset = fdst->offset / 8;
3930 	instr->alu.src_val = src_val;
3931 	return 0;
3932 }
3933 
3934 static int
3935 instr_alu_sub_translate(struct rte_swx_pipeline *p,
3936 			struct action *action,
3937 			char **tokens,
3938 			int n_tokens,
3939 			struct instruction *instr,
3940 			struct instruction_data *data __rte_unused)
3941 {
3942 	char *dst = tokens[1], *src = tokens[2];
3943 	struct field *fdst, *fsrc;
3944 	uint64_t src_val;
3945 	uint32_t dst_struct_id = 0, src_struct_id = 0;
3946 
3947 	CHECK(n_tokens == 3, EINVAL);
3948 
3949 	fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
3950 	CHECK(fdst, EINVAL);
3951 
3952 	/* SUB, SUB_HM, SUB_MH, SUB_HH. */
3953 	fsrc = struct_field_parse(p, action, src, &src_struct_id);
3954 	if (fsrc) {
3955 		instr->type = INSTR_ALU_SUB;
3956 		if (dst[0] == 'h' && src[0] != 'h')
3957 			instr->type = INSTR_ALU_SUB_HM;
3958 		if (dst[0] != 'h' && src[0] == 'h')
3959 			instr->type = INSTR_ALU_SUB_MH;
3960 		if (dst[0] == 'h' && src[0] == 'h')
3961 			instr->type = INSTR_ALU_SUB_HH;
3962 
3963 		instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3964 		instr->alu.dst.n_bits = fdst->n_bits;
3965 		instr->alu.dst.offset = fdst->offset / 8;
3966 		instr->alu.src.struct_id = (uint8_t)src_struct_id;
3967 		instr->alu.src.n_bits = fsrc->n_bits;
3968 		instr->alu.src.offset = fsrc->offset / 8;
3969 		return 0;
3970 	}
3971 
3972 	/* SUB_MI, SUB_HI. */
3973 	src_val = strtoull(src, &src, 0);
3974 	CHECK(!src[0], EINVAL);
3975 
3976 	instr->type = INSTR_ALU_SUB_MI;
3977 	if (dst[0] == 'h')
3978 		instr->type = INSTR_ALU_SUB_HI;
3979 
3980 	instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3981 	instr->alu.dst.n_bits = fdst->n_bits;
3982 	instr->alu.dst.offset = fdst->offset / 8;
3983 	instr->alu.src_val = src_val;
3984 	return 0;
3985 }
3986 
3987 static int
3988 instr_alu_ckadd_translate(struct rte_swx_pipeline *p,
3989 			  struct action *action __rte_unused,
3990 			  char **tokens,
3991 			  int n_tokens,
3992 			  struct instruction *instr,
3993 			  struct instruction_data *data __rte_unused)
3994 {
3995 	char *dst = tokens[1], *src = tokens[2];
3996 	struct header *hdst, *hsrc;
3997 	struct field *fdst, *fsrc;
3998 
3999 	CHECK(n_tokens == 3, EINVAL);
4000 
4001 	fdst = header_field_parse(p, dst, &hdst);
4002 	CHECK(fdst && (fdst->n_bits == 16), EINVAL);
4003 
4004 	/* CKADD_FIELD. */
4005 	fsrc = header_field_parse(p, src, &hsrc);
4006 	if (fsrc) {
4007 		instr->type = INSTR_ALU_CKADD_FIELD;
4008 		instr->alu.dst.struct_id = (uint8_t)hdst->struct_id;
4009 		instr->alu.dst.n_bits = fdst->n_bits;
4010 		instr->alu.dst.offset = fdst->offset / 8;
4011 		instr->alu.src.struct_id = (uint8_t)hsrc->struct_id;
4012 		instr->alu.src.n_bits = fsrc->n_bits;
4013 		instr->alu.src.offset = fsrc->offset / 8;
4014 		return 0;
4015 	}
4016 
4017 	/* CKADD_STRUCT, CKADD_STRUCT20. */
4018 	hsrc = header_parse(p, src);
4019 	CHECK(hsrc, EINVAL);
4020 
4021 	instr->type = INSTR_ALU_CKADD_STRUCT;
4022 	if ((hsrc->st->n_bits / 8) == 20)
4023 		instr->type = INSTR_ALU_CKADD_STRUCT20;
4024 
4025 	instr->alu.dst.struct_id = (uint8_t)hdst->struct_id;
4026 	instr->alu.dst.n_bits = fdst->n_bits;
4027 	instr->alu.dst.offset = fdst->offset / 8;
4028 	instr->alu.src.struct_id = (uint8_t)hsrc->struct_id;
4029 	instr->alu.src.n_bits = hsrc->st->n_bits;
4030 	instr->alu.src.offset = 0; /* Unused. */
4031 	return 0;
4032 }
4033 
4034 static int
4035 instr_alu_cksub_translate(struct rte_swx_pipeline *p,
4036 			  struct action *action __rte_unused,
4037 			  char **tokens,
4038 			  int n_tokens,
4039 			  struct instruction *instr,
4040 			  struct instruction_data *data __rte_unused)
4041 {
4042 	char *dst = tokens[1], *src = tokens[2];
4043 	struct header *hdst, *hsrc;
4044 	struct field *fdst, *fsrc;
4045 
4046 	CHECK(n_tokens == 3, EINVAL);
4047 
4048 	fdst = header_field_parse(p, dst, &hdst);
4049 	CHECK(fdst && (fdst->n_bits == 16), EINVAL);
4050 
4051 	fsrc = header_field_parse(p, src, &hsrc);
4052 	CHECK(fsrc, EINVAL);
4053 
4054 	instr->type = INSTR_ALU_CKSUB_FIELD;
4055 	instr->alu.dst.struct_id = (uint8_t)hdst->struct_id;
4056 	instr->alu.dst.n_bits = fdst->n_bits;
4057 	instr->alu.dst.offset = fdst->offset / 8;
4058 	instr->alu.src.struct_id = (uint8_t)hsrc->struct_id;
4059 	instr->alu.src.n_bits = fsrc->n_bits;
4060 	instr->alu.src.offset = fsrc->offset / 8;
4061 	return 0;
4062 }
4063 
4064 static int
4065 instr_alu_shl_translate(struct rte_swx_pipeline *p,
4066 			struct action *action,
4067 			char **tokens,
4068 			int n_tokens,
4069 			struct instruction *instr,
4070 			struct instruction_data *data __rte_unused)
4071 {
4072 	char *dst = tokens[1], *src = tokens[2];
4073 	struct field *fdst, *fsrc;
4074 	uint64_t src_val;
4075 	uint32_t dst_struct_id = 0, src_struct_id = 0;
4076 
4077 	CHECK(n_tokens == 3, EINVAL);
4078 
4079 	fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
4080 	CHECK(fdst, EINVAL);
4081 
4082 	/* SHL, SHL_HM, SHL_MH, SHL_HH. */
4083 	fsrc = struct_field_parse(p, action, src, &src_struct_id);
4084 	if (fsrc) {
4085 		instr->type = INSTR_ALU_SHL;
4086 		if (dst[0] == 'h' && src[0] != 'h')
4087 			instr->type = INSTR_ALU_SHL_HM;
4088 		if (dst[0] != 'h' && src[0] == 'h')
4089 			instr->type = INSTR_ALU_SHL_MH;
4090 		if (dst[0] == 'h' && src[0] == 'h')
4091 			instr->type = INSTR_ALU_SHL_HH;
4092 
4093 		instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
4094 		instr->alu.dst.n_bits = fdst->n_bits;
4095 		instr->alu.dst.offset = fdst->offset / 8;
4096 		instr->alu.src.struct_id = (uint8_t)src_struct_id;
4097 		instr->alu.src.n_bits = fsrc->n_bits;
4098 		instr->alu.src.offset = fsrc->offset / 8;
4099 		return 0;
4100 	}
4101 
4102 	/* SHL_MI, SHL_HI. */
4103 	src_val = strtoull(src, &src, 0);
4104 	CHECK(!src[0], EINVAL);
4105 
4106 	instr->type = INSTR_ALU_SHL_MI;
4107 	if (dst[0] == 'h')
4108 		instr->type = INSTR_ALU_SHL_HI;
4109 
4110 	instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
4111 	instr->alu.dst.n_bits = fdst->n_bits;
4112 	instr->alu.dst.offset = fdst->offset / 8;
4113 	instr->alu.src_val = src_val;
4114 	return 0;
4115 }
4116 
4117 static int
4118 instr_alu_shr_translate(struct rte_swx_pipeline *p,
4119 			struct action *action,
4120 			char **tokens,
4121 			int n_tokens,
4122 			struct instruction *instr,
4123 			struct instruction_data *data __rte_unused)
4124 {
4125 	char *dst = tokens[1], *src = tokens[2];
4126 	struct field *fdst, *fsrc;
4127 	uint64_t src_val;
4128 	uint32_t dst_struct_id = 0, src_struct_id = 0;
4129 
4130 	CHECK(n_tokens == 3, EINVAL);
4131 
4132 	fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
4133 	CHECK(fdst, EINVAL);
4134 
4135 	/* SHR, SHR_HM, SHR_MH, SHR_HH. */
4136 	fsrc = struct_field_parse(p, action, src, &src_struct_id);
4137 	if (fsrc) {
4138 		instr->type = INSTR_ALU_SHR;
4139 		if (dst[0] == 'h' && src[0] != 'h')
4140 			instr->type = INSTR_ALU_SHR_HM;
4141 		if (dst[0] != 'h' && src[0] == 'h')
4142 			instr->type = INSTR_ALU_SHR_MH;
4143 		if (dst[0] == 'h' && src[0] == 'h')
4144 			instr->type = INSTR_ALU_SHR_HH;
4145 
4146 		instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
4147 		instr->alu.dst.n_bits = fdst->n_bits;
4148 		instr->alu.dst.offset = fdst->offset / 8;
4149 		instr->alu.src.struct_id = (uint8_t)src_struct_id;
4150 		instr->alu.src.n_bits = fsrc->n_bits;
4151 		instr->alu.src.offset = fsrc->offset / 8;
4152 		return 0;
4153 	}
4154 
4155 	/* SHR_MI, SHR_HI. */
4156 	src_val = strtoull(src, &src, 0);
4157 	CHECK(!src[0], EINVAL);
4158 
4159 	instr->type = INSTR_ALU_SHR_MI;
4160 	if (dst[0] == 'h')
4161 		instr->type = INSTR_ALU_SHR_HI;
4162 
4163 	instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
4164 	instr->alu.dst.n_bits = fdst->n_bits;
4165 	instr->alu.dst.offset = fdst->offset / 8;
4166 	instr->alu.src_val = src_val;
4167 	return 0;
4168 }
4169 
4170 static int
4171 instr_alu_and_translate(struct rte_swx_pipeline *p,
4172 			struct action *action,
4173 			char **tokens,
4174 			int n_tokens,
4175 			struct instruction *instr,
4176 			struct instruction_data *data __rte_unused)
4177 {
4178 	char *dst = tokens[1], *src = tokens[2];
4179 	struct field *fdst, *fsrc;
4180 	uint64_t src_val;
4181 	uint32_t dst_struct_id = 0, src_struct_id = 0;
4182 
4183 	CHECK(n_tokens == 3, EINVAL);
4184 
4185 	fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
4186 	CHECK(fdst, EINVAL);
4187 
4188 	/* AND, AND_MH, AND_HM, AND_HH. */
4189 	fsrc = struct_field_parse(p, action, src, &src_struct_id);
4190 	if (fsrc) {
4191 		instr->type = INSTR_ALU_AND;
4192 		if (dst[0] != 'h' && src[0] == 'h')
4193 			instr->type = INSTR_ALU_AND_MH;
4194 		if (dst[0] == 'h' && src[0] != 'h')
4195 			instr->type = INSTR_ALU_AND_HM;
4196 		if (dst[0] == 'h' && src[0] == 'h')
4197 			instr->type = INSTR_ALU_AND_HH;
4198 
4199 		instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
4200 		instr->alu.dst.n_bits = fdst->n_bits;
4201 		instr->alu.dst.offset = fdst->offset / 8;
4202 		instr->alu.src.struct_id = (uint8_t)src_struct_id;
4203 		instr->alu.src.n_bits = fsrc->n_bits;
4204 		instr->alu.src.offset = fsrc->offset / 8;
4205 		return 0;
4206 	}
4207 
4208 	/* AND_I. */
4209 	src_val = strtoull(src, &src, 0);
4210 	CHECK(!src[0], EINVAL);
4211 
4212 	if (dst[0] == 'h')
4213 		src_val = hton64(src_val) >> (64 - fdst->n_bits);
4214 
4215 	instr->type = INSTR_ALU_AND_I;
4216 	instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
4217 	instr->alu.dst.n_bits = fdst->n_bits;
4218 	instr->alu.dst.offset = fdst->offset / 8;
4219 	instr->alu.src_val = src_val;
4220 	return 0;
4221 }
4222 
4223 static int
4224 instr_alu_or_translate(struct rte_swx_pipeline *p,
4225 		       struct action *action,
4226 		       char **tokens,
4227 		       int n_tokens,
4228 		       struct instruction *instr,
4229 		       struct instruction_data *data __rte_unused)
4230 {
4231 	char *dst = tokens[1], *src = tokens[2];
4232 	struct field *fdst, *fsrc;
4233 	uint64_t src_val;
4234 	uint32_t dst_struct_id = 0, src_struct_id = 0;
4235 
4236 	CHECK(n_tokens == 3, EINVAL);
4237 
4238 	fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
4239 	CHECK(fdst, EINVAL);
4240 
4241 	/* OR, OR_MH, OR_HM, OR_HH. */
4242 	fsrc = struct_field_parse(p, action, src, &src_struct_id);
4243 	if (fsrc) {
4244 		instr->type = INSTR_ALU_OR;
4245 		if (dst[0] != 'h' && src[0] == 'h')
4246 			instr->type = INSTR_ALU_OR_MH;
4247 		if (dst[0] == 'h' && src[0] != 'h')
4248 			instr->type = INSTR_ALU_OR_HM;
4249 		if (dst[0] == 'h' && src[0] == 'h')
4250 			instr->type = INSTR_ALU_OR_HH;
4251 
4252 		instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
4253 		instr->alu.dst.n_bits = fdst->n_bits;
4254 		instr->alu.dst.offset = fdst->offset / 8;
4255 		instr->alu.src.struct_id = (uint8_t)src_struct_id;
4256 		instr->alu.src.n_bits = fsrc->n_bits;
4257 		instr->alu.src.offset = fsrc->offset / 8;
4258 		return 0;
4259 	}
4260 
4261 	/* OR_I. */
4262 	src_val = strtoull(src, &src, 0);
4263 	CHECK(!src[0], EINVAL);
4264 
4265 	if (dst[0] == 'h')
4266 		src_val = hton64(src_val) >> (64 - fdst->n_bits);
4267 
4268 	instr->type = INSTR_ALU_OR_I;
4269 	instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
4270 	instr->alu.dst.n_bits = fdst->n_bits;
4271 	instr->alu.dst.offset = fdst->offset / 8;
4272 	instr->alu.src_val = src_val;
4273 	return 0;
4274 }
4275 
4276 static int
4277 instr_alu_xor_translate(struct rte_swx_pipeline *p,
4278 			struct action *action,
4279 			char **tokens,
4280 			int n_tokens,
4281 			struct instruction *instr,
4282 			struct instruction_data *data __rte_unused)
4283 {
4284 	char *dst = tokens[1], *src = tokens[2];
4285 	struct field *fdst, *fsrc;
4286 	uint64_t src_val;
4287 	uint32_t dst_struct_id = 0, src_struct_id = 0;
4288 
4289 	CHECK(n_tokens == 3, EINVAL);
4290 
4291 	fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
4292 	CHECK(fdst, EINVAL);
4293 
4294 	/* XOR, XOR_MH, XOR_HM, XOR_HH. */
4295 	fsrc = struct_field_parse(p, action, src, &src_struct_id);
4296 	if (fsrc) {
4297 		instr->type = INSTR_ALU_XOR;
4298 		if (dst[0] != 'h' && src[0] == 'h')
4299 			instr->type = INSTR_ALU_XOR_MH;
4300 		if (dst[0] == 'h' && src[0] != 'h')
4301 			instr->type = INSTR_ALU_XOR_HM;
4302 		if (dst[0] == 'h' && src[0] == 'h')
4303 			instr->type = INSTR_ALU_XOR_HH;
4304 
4305 		instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
4306 		instr->alu.dst.n_bits = fdst->n_bits;
4307 		instr->alu.dst.offset = fdst->offset / 8;
4308 		instr->alu.src.struct_id = (uint8_t)src_struct_id;
4309 		instr->alu.src.n_bits = fsrc->n_bits;
4310 		instr->alu.src.offset = fsrc->offset / 8;
4311 		return 0;
4312 	}
4313 
4314 	/* XOR_I. */
4315 	src_val = strtoull(src, &src, 0);
4316 	CHECK(!src[0], EINVAL);
4317 
4318 	if (dst[0] == 'h')
4319 		src_val = hton64(src_val) >> (64 - fdst->n_bits);
4320 
4321 	instr->type = INSTR_ALU_XOR_I;
4322 	instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
4323 	instr->alu.dst.n_bits = fdst->n_bits;
4324 	instr->alu.dst.offset = fdst->offset / 8;
4325 	instr->alu.src_val = src_val;
4326 	return 0;
4327 }
4328 
4329 static inline void
4330 instr_alu_add_exec(struct rte_swx_pipeline *p)
4331 {
4332 	struct thread *t = &p->threads[p->thread_id];
4333 	struct instruction *ip = t->ip;
4334 
4335 	TRACE("[Thread %2u] add\n", p->thread_id);
4336 
4337 	/* Structs. */
4338 	ALU(t, ip, +);
4339 
4340 	/* Thread. */
4341 	thread_ip_inc(p);
4342 }
4343 
4344 static inline void
4345 instr_alu_add_mh_exec(struct rte_swx_pipeline *p)
4346 {
4347 	struct thread *t = &p->threads[p->thread_id];
4348 	struct instruction *ip = t->ip;
4349 
4350 	TRACE("[Thread %2u] add (mh)\n", p->thread_id);
4351 
4352 	/* Structs. */
4353 	ALU_MH(t, ip, +);
4354 
4355 	/* Thread. */
4356 	thread_ip_inc(p);
4357 }
4358 
4359 static inline void
4360 instr_alu_add_hm_exec(struct rte_swx_pipeline *p)
4361 {
4362 	struct thread *t = &p->threads[p->thread_id];
4363 	struct instruction *ip = t->ip;
4364 
4365 	TRACE("[Thread %2u] add (hm)\n", p->thread_id);
4366 
4367 	/* Structs. */
4368 	ALU_HM(t, ip, +);
4369 
4370 	/* Thread. */
4371 	thread_ip_inc(p);
4372 }
4373 
4374 static inline void
4375 instr_alu_add_hh_exec(struct rte_swx_pipeline *p)
4376 {
4377 	struct thread *t = &p->threads[p->thread_id];
4378 	struct instruction *ip = t->ip;
4379 
4380 	TRACE("[Thread %2u] add (hh)\n", p->thread_id);
4381 
4382 	/* Structs. */
4383 	ALU_HH(t, ip, +);
4384 
4385 	/* Thread. */
4386 	thread_ip_inc(p);
4387 }
4388 
4389 static inline void
4390 instr_alu_add_mi_exec(struct rte_swx_pipeline *p)
4391 {
4392 	struct thread *t = &p->threads[p->thread_id];
4393 	struct instruction *ip = t->ip;
4394 
4395 	TRACE("[Thread %2u] add (mi)\n", p->thread_id);
4396 
4397 	/* Structs. */
4398 	ALU_MI(t, ip, +);
4399 
4400 	/* Thread. */
4401 	thread_ip_inc(p);
4402 }
4403 
4404 static inline void
4405 instr_alu_add_hi_exec(struct rte_swx_pipeline *p)
4406 {
4407 	struct thread *t = &p->threads[p->thread_id];
4408 	struct instruction *ip = t->ip;
4409 
4410 	TRACE("[Thread %2u] add (hi)\n", p->thread_id);
4411 
4412 	/* Structs. */
4413 	ALU_HI(t, ip, +);
4414 
4415 	/* Thread. */
4416 	thread_ip_inc(p);
4417 }
4418 
4419 static inline void
4420 instr_alu_sub_exec(struct rte_swx_pipeline *p)
4421 {
4422 	struct thread *t = &p->threads[p->thread_id];
4423 	struct instruction *ip = t->ip;
4424 
4425 	TRACE("[Thread %2u] sub\n", p->thread_id);
4426 
4427 	/* Structs. */
4428 	ALU(t, ip, -);
4429 
4430 	/* Thread. */
4431 	thread_ip_inc(p);
4432 }
4433 
4434 static inline void
4435 instr_alu_sub_mh_exec(struct rte_swx_pipeline *p)
4436 {
4437 	struct thread *t = &p->threads[p->thread_id];
4438 	struct instruction *ip = t->ip;
4439 
4440 	TRACE("[Thread %2u] sub (mh)\n", p->thread_id);
4441 
4442 	/* Structs. */
4443 	ALU_MH(t, ip, -);
4444 
4445 	/* Thread. */
4446 	thread_ip_inc(p);
4447 }
4448 
4449 static inline void
4450 instr_alu_sub_hm_exec(struct rte_swx_pipeline *p)
4451 {
4452 	struct thread *t = &p->threads[p->thread_id];
4453 	struct instruction *ip = t->ip;
4454 
4455 	TRACE("[Thread %2u] sub (hm)\n", p->thread_id);
4456 
4457 	/* Structs. */
4458 	ALU_HM(t, ip, -);
4459 
4460 	/* Thread. */
4461 	thread_ip_inc(p);
4462 }
4463 
4464 static inline void
4465 instr_alu_sub_hh_exec(struct rte_swx_pipeline *p)
4466 {
4467 	struct thread *t = &p->threads[p->thread_id];
4468 	struct instruction *ip = t->ip;
4469 
4470 	TRACE("[Thread %2u] sub (hh)\n", p->thread_id);
4471 
4472 	/* Structs. */
4473 	ALU_HH(t, ip, -);
4474 
4475 	/* Thread. */
4476 	thread_ip_inc(p);
4477 }
4478 
4479 static inline void
4480 instr_alu_sub_mi_exec(struct rte_swx_pipeline *p)
4481 {
4482 	struct thread *t = &p->threads[p->thread_id];
4483 	struct instruction *ip = t->ip;
4484 
4485 	TRACE("[Thread %2u] sub (mi)\n", p->thread_id);
4486 
4487 	/* Structs. */
4488 	ALU_MI(t, ip, -);
4489 
4490 	/* Thread. */
4491 	thread_ip_inc(p);
4492 }
4493 
4494 static inline void
4495 instr_alu_sub_hi_exec(struct rte_swx_pipeline *p)
4496 {
4497 	struct thread *t = &p->threads[p->thread_id];
4498 	struct instruction *ip = t->ip;
4499 
4500 	TRACE("[Thread %2u] sub (hi)\n", p->thread_id);
4501 
4502 	/* Structs. */
4503 	ALU_HI(t, ip, -);
4504 
4505 	/* Thread. */
4506 	thread_ip_inc(p);
4507 }
4508 
4509 static inline void
4510 instr_alu_shl_exec(struct rte_swx_pipeline *p)
4511 {
4512 	struct thread *t = &p->threads[p->thread_id];
4513 	struct instruction *ip = t->ip;
4514 
4515 	TRACE("[Thread %2u] shl\n", p->thread_id);
4516 
4517 	/* Structs. */
4518 	ALU(t, ip, <<);
4519 
4520 	/* Thread. */
4521 	thread_ip_inc(p);
4522 }
4523 
4524 static inline void
4525 instr_alu_shl_mh_exec(struct rte_swx_pipeline *p)
4526 {
4527 	struct thread *t = &p->threads[p->thread_id];
4528 	struct instruction *ip = t->ip;
4529 
4530 	TRACE("[Thread %2u] shl (mh)\n", p->thread_id);
4531 
4532 	/* Structs. */
4533 	ALU_MH(t, ip, <<);
4534 
4535 	/* Thread. */
4536 	thread_ip_inc(p);
4537 }
4538 
4539 static inline void
4540 instr_alu_shl_hm_exec(struct rte_swx_pipeline *p)
4541 {
4542 	struct thread *t = &p->threads[p->thread_id];
4543 	struct instruction *ip = t->ip;
4544 
4545 	TRACE("[Thread %2u] shl (hm)\n", p->thread_id);
4546 
4547 	/* Structs. */
4548 	ALU_HM(t, ip, <<);
4549 
4550 	/* Thread. */
4551 	thread_ip_inc(p);
4552 }
4553 
4554 static inline void
4555 instr_alu_shl_hh_exec(struct rte_swx_pipeline *p)
4556 {
4557 	struct thread *t = &p->threads[p->thread_id];
4558 	struct instruction *ip = t->ip;
4559 
4560 	TRACE("[Thread %2u] shl (hh)\n", p->thread_id);
4561 
4562 	/* Structs. */
4563 	ALU_HH(t, ip, <<);
4564 
4565 	/* Thread. */
4566 	thread_ip_inc(p);
4567 }
4568 
4569 static inline void
4570 instr_alu_shl_mi_exec(struct rte_swx_pipeline *p)
4571 {
4572 	struct thread *t = &p->threads[p->thread_id];
4573 	struct instruction *ip = t->ip;
4574 
4575 	TRACE("[Thread %2u] shl (mi)\n", p->thread_id);
4576 
4577 	/* Structs. */
4578 	ALU_MI(t, ip, <<);
4579 
4580 	/* Thread. */
4581 	thread_ip_inc(p);
4582 }
4583 
4584 static inline void
4585 instr_alu_shl_hi_exec(struct rte_swx_pipeline *p)
4586 {
4587 	struct thread *t = &p->threads[p->thread_id];
4588 	struct instruction *ip = t->ip;
4589 
4590 	TRACE("[Thread %2u] shl (hi)\n", p->thread_id);
4591 
4592 	/* Structs. */
4593 	ALU_HI(t, ip, <<);
4594 
4595 	/* Thread. */
4596 	thread_ip_inc(p);
4597 }
4598 
4599 static inline void
4600 instr_alu_shr_exec(struct rte_swx_pipeline *p)
4601 {
4602 	struct thread *t = &p->threads[p->thread_id];
4603 	struct instruction *ip = t->ip;
4604 
4605 	TRACE("[Thread %2u] shr\n", p->thread_id);
4606 
4607 	/* Structs. */
4608 	ALU(t, ip, >>);
4609 
4610 	/* Thread. */
4611 	thread_ip_inc(p);
4612 }
4613 
4614 static inline void
4615 instr_alu_shr_mh_exec(struct rte_swx_pipeline *p)
4616 {
4617 	struct thread *t = &p->threads[p->thread_id];
4618 	struct instruction *ip = t->ip;
4619 
4620 	TRACE("[Thread %2u] shr (mh)\n", p->thread_id);
4621 
4622 	/* Structs. */
4623 	ALU_MH(t, ip, >>);
4624 
4625 	/* Thread. */
4626 	thread_ip_inc(p);
4627 }
4628 
4629 static inline void
4630 instr_alu_shr_hm_exec(struct rte_swx_pipeline *p)
4631 {
4632 	struct thread *t = &p->threads[p->thread_id];
4633 	struct instruction *ip = t->ip;
4634 
4635 	TRACE("[Thread %2u] shr (hm)\n", p->thread_id);
4636 
4637 	/* Structs. */
4638 	ALU_HM(t, ip, >>);
4639 
4640 	/* Thread. */
4641 	thread_ip_inc(p);
4642 }
4643 
4644 static inline void
4645 instr_alu_shr_hh_exec(struct rte_swx_pipeline *p)
4646 {
4647 	struct thread *t = &p->threads[p->thread_id];
4648 	struct instruction *ip = t->ip;
4649 
4650 	TRACE("[Thread %2u] shr (hh)\n", p->thread_id);
4651 
4652 	/* Structs. */
4653 	ALU_HH(t, ip, >>);
4654 
4655 	/* Thread. */
4656 	thread_ip_inc(p);
4657 }
4658 
4659 static inline void
4660 instr_alu_shr_mi_exec(struct rte_swx_pipeline *p)
4661 {
4662 	struct thread *t = &p->threads[p->thread_id];
4663 	struct instruction *ip = t->ip;
4664 
4665 	TRACE("[Thread %2u] shr (mi)\n", p->thread_id);
4666 
4667 	/* Structs. */
4668 	ALU_MI(t, ip, >>);
4669 
4670 	/* Thread. */
4671 	thread_ip_inc(p);
4672 }
4673 
4674 static inline void
4675 instr_alu_shr_hi_exec(struct rte_swx_pipeline *p)
4676 {
4677 	struct thread *t = &p->threads[p->thread_id];
4678 	struct instruction *ip = t->ip;
4679 
4680 	TRACE("[Thread %2u] shr (hi)\n", p->thread_id);
4681 
4682 	/* Structs. */
4683 	ALU_HI(t, ip, >>);
4684 
4685 	/* Thread. */
4686 	thread_ip_inc(p);
4687 }
4688 
4689 static inline void
4690 instr_alu_and_exec(struct rte_swx_pipeline *p)
4691 {
4692 	struct thread *t = &p->threads[p->thread_id];
4693 	struct instruction *ip = t->ip;
4694 
4695 	TRACE("[Thread %2u] and\n", p->thread_id);
4696 
4697 	/* Structs. */
4698 	ALU(t, ip, &);
4699 
4700 	/* Thread. */
4701 	thread_ip_inc(p);
4702 }
4703 
4704 static inline void
4705 instr_alu_and_mh_exec(struct rte_swx_pipeline *p)
4706 {
4707 	struct thread *t = &p->threads[p->thread_id];
4708 	struct instruction *ip = t->ip;
4709 
4710 	TRACE("[Thread %2u] and (mh)\n", p->thread_id);
4711 
4712 	/* Structs. */
4713 	ALU_MH(t, ip, &);
4714 
4715 	/* Thread. */
4716 	thread_ip_inc(p);
4717 }
4718 
4719 static inline void
4720 instr_alu_and_hm_exec(struct rte_swx_pipeline *p)
4721 {
4722 	struct thread *t = &p->threads[p->thread_id];
4723 	struct instruction *ip = t->ip;
4724 
4725 	TRACE("[Thread %2u] and (hm)\n", p->thread_id);
4726 
4727 	/* Structs. */
4728 	ALU_HM_FAST(t, ip, &);
4729 
4730 	/* Thread. */
4731 	thread_ip_inc(p);
4732 }
4733 
4734 static inline void
4735 instr_alu_and_hh_exec(struct rte_swx_pipeline *p)
4736 {
4737 	struct thread *t = &p->threads[p->thread_id];
4738 	struct instruction *ip = t->ip;
4739 
4740 	TRACE("[Thread %2u] and (hh)\n", p->thread_id);
4741 
4742 	/* Structs. */
4743 	ALU_HH_FAST(t, ip, &);
4744 
4745 	/* Thread. */
4746 	thread_ip_inc(p);
4747 }
4748 
4749 static inline void
4750 instr_alu_and_i_exec(struct rte_swx_pipeline *p)
4751 {
4752 	struct thread *t = &p->threads[p->thread_id];
4753 	struct instruction *ip = t->ip;
4754 
4755 	TRACE("[Thread %2u] and (i)\n", p->thread_id);
4756 
4757 	/* Structs. */
4758 	ALU_I(t, ip, &);
4759 
4760 	/* Thread. */
4761 	thread_ip_inc(p);
4762 }
4763 
4764 static inline void
4765 instr_alu_or_exec(struct rte_swx_pipeline *p)
4766 {
4767 	struct thread *t = &p->threads[p->thread_id];
4768 	struct instruction *ip = t->ip;
4769 
4770 	TRACE("[Thread %2u] or\n", p->thread_id);
4771 
4772 	/* Structs. */
4773 	ALU(t, ip, |);
4774 
4775 	/* Thread. */
4776 	thread_ip_inc(p);
4777 }
4778 
4779 static inline void
4780 instr_alu_or_mh_exec(struct rte_swx_pipeline *p)
4781 {
4782 	struct thread *t = &p->threads[p->thread_id];
4783 	struct instruction *ip = t->ip;
4784 
4785 	TRACE("[Thread %2u] or (mh)\n", p->thread_id);
4786 
4787 	/* Structs. */
4788 	ALU_MH(t, ip, |);
4789 
4790 	/* Thread. */
4791 	thread_ip_inc(p);
4792 }
4793 
4794 static inline void
4795 instr_alu_or_hm_exec(struct rte_swx_pipeline *p)
4796 {
4797 	struct thread *t = &p->threads[p->thread_id];
4798 	struct instruction *ip = t->ip;
4799 
4800 	TRACE("[Thread %2u] or (hm)\n", p->thread_id);
4801 
4802 	/* Structs. */
4803 	ALU_HM_FAST(t, ip, |);
4804 
4805 	/* Thread. */
4806 	thread_ip_inc(p);
4807 }
4808 
4809 static inline void
4810 instr_alu_or_hh_exec(struct rte_swx_pipeline *p)
4811 {
4812 	struct thread *t = &p->threads[p->thread_id];
4813 	struct instruction *ip = t->ip;
4814 
4815 	TRACE("[Thread %2u] or (hh)\n", p->thread_id);
4816 
4817 	/* Structs. */
4818 	ALU_HH_FAST(t, ip, |);
4819 
4820 	/* Thread. */
4821 	thread_ip_inc(p);
4822 }
4823 
4824 static inline void
4825 instr_alu_or_i_exec(struct rte_swx_pipeline *p)
4826 {
4827 	struct thread *t = &p->threads[p->thread_id];
4828 	struct instruction *ip = t->ip;
4829 
4830 	TRACE("[Thread %2u] or (i)\n", p->thread_id);
4831 
4832 	/* Structs. */
4833 	ALU_I(t, ip, |);
4834 
4835 	/* Thread. */
4836 	thread_ip_inc(p);
4837 }
4838 
4839 static inline void
4840 instr_alu_xor_exec(struct rte_swx_pipeline *p)
4841 {
4842 	struct thread *t = &p->threads[p->thread_id];
4843 	struct instruction *ip = t->ip;
4844 
4845 	TRACE("[Thread %2u] xor\n", p->thread_id);
4846 
4847 	/* Structs. */
4848 	ALU(t, ip, ^);
4849 
4850 	/* Thread. */
4851 	thread_ip_inc(p);
4852 }
4853 
4854 static inline void
4855 instr_alu_xor_mh_exec(struct rte_swx_pipeline *p)
4856 {
4857 	struct thread *t = &p->threads[p->thread_id];
4858 	struct instruction *ip = t->ip;
4859 
4860 	TRACE("[Thread %2u] xor (mh)\n", p->thread_id);
4861 
4862 	/* Structs. */
4863 	ALU_MH(t, ip, ^);
4864 
4865 	/* Thread. */
4866 	thread_ip_inc(p);
4867 }
4868 
4869 static inline void
4870 instr_alu_xor_hm_exec(struct rte_swx_pipeline *p)
4871 {
4872 	struct thread *t = &p->threads[p->thread_id];
4873 	struct instruction *ip = t->ip;
4874 
4875 	TRACE("[Thread %2u] xor (hm)\n", p->thread_id);
4876 
4877 	/* Structs. */
4878 	ALU_HM_FAST(t, ip, ^);
4879 
4880 	/* Thread. */
4881 	thread_ip_inc(p);
4882 }
4883 
4884 static inline void
4885 instr_alu_xor_hh_exec(struct rte_swx_pipeline *p)
4886 {
4887 	struct thread *t = &p->threads[p->thread_id];
4888 	struct instruction *ip = t->ip;
4889 
4890 	TRACE("[Thread %2u] xor (hh)\n", p->thread_id);
4891 
4892 	/* Structs. */
4893 	ALU_HH_FAST(t, ip, ^);
4894 
4895 	/* Thread. */
4896 	thread_ip_inc(p);
4897 }
4898 
4899 static inline void
4900 instr_alu_xor_i_exec(struct rte_swx_pipeline *p)
4901 {
4902 	struct thread *t = &p->threads[p->thread_id];
4903 	struct instruction *ip = t->ip;
4904 
4905 	TRACE("[Thread %2u] xor (i)\n", p->thread_id);
4906 
4907 	/* Structs. */
4908 	ALU_I(t, ip, ^);
4909 
4910 	/* Thread. */
4911 	thread_ip_inc(p);
4912 }
4913 
4914 static inline void
4915 instr_alu_ckadd_field_exec(struct rte_swx_pipeline *p)
4916 {
4917 	struct thread *t = &p->threads[p->thread_id];
4918 	struct instruction *ip = t->ip;
4919 	uint8_t *dst_struct, *src_struct;
4920 	uint16_t *dst16_ptr, dst;
4921 	uint64_t *src64_ptr, src64, src64_mask, src;
4922 	uint64_t r;
4923 
4924 	TRACE("[Thread %2u] ckadd (field)\n", p->thread_id);
4925 
4926 	/* Structs. */
4927 	dst_struct = t->structs[ip->alu.dst.struct_id];
4928 	dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
4929 	dst = *dst16_ptr;
4930 
4931 	src_struct = t->structs[ip->alu.src.struct_id];
4932 	src64_ptr = (uint64_t *)&src_struct[ip->alu.src.offset];
4933 	src64 = *src64_ptr;
4934 	src64_mask = UINT64_MAX >> (64 - ip->alu.src.n_bits);
4935 	src = src64 & src64_mask;
4936 
4937 	r = dst;
4938 	r = ~r & 0xFFFF;
4939 
4940 	/* The first input (r) is a 16-bit number. The second and the third
4941 	 * inputs are 32-bit numbers. In the worst case scenario, the sum of the
4942 	 * three numbers (output r) is a 34-bit number.
4943 	 */
4944 	r += (src >> 32) + (src & 0xFFFFFFFF);
4945 
4946 	/* The first input is a 16-bit number. The second input is an 18-bit
4947 	 * number. In the worst case scenario, the sum of the two numbers is a
4948 	 * 19-bit number.
4949 	 */
4950 	r = (r & 0xFFFF) + (r >> 16);
4951 
4952 	/* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
4953 	 * a 3-bit number (0 .. 7). Their sum is a 17-bit number (0 .. 0x10006).
4954 	 */
4955 	r = (r & 0xFFFF) + (r >> 16);
4956 
4957 	/* When the input r is (0 .. 0xFFFF), the output r is equal to the input
4958 	 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
4959 	 * 0x10006), the output r is (0 .. 7). So no carry bit can be generated,
4960 	 * therefore the output r is always a 16-bit number.
4961 	 */
4962 	r = (r & 0xFFFF) + (r >> 16);
4963 
4964 	r = ~r & 0xFFFF;
4965 	r = r ? r : 0xFFFF;
4966 
4967 	*dst16_ptr = (uint16_t)r;
4968 
4969 	/* Thread. */
4970 	thread_ip_inc(p);
4971 }
4972 
4973 static inline void
4974 instr_alu_cksub_field_exec(struct rte_swx_pipeline *p)
4975 {
4976 	struct thread *t = &p->threads[p->thread_id];
4977 	struct instruction *ip = t->ip;
4978 	uint8_t *dst_struct, *src_struct;
4979 	uint16_t *dst16_ptr, dst;
4980 	uint64_t *src64_ptr, src64, src64_mask, src;
4981 	uint64_t r;
4982 
4983 	TRACE("[Thread %2u] cksub (field)\n", p->thread_id);
4984 
4985 	/* Structs. */
4986 	dst_struct = t->structs[ip->alu.dst.struct_id];
4987 	dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
4988 	dst = *dst16_ptr;
4989 
4990 	src_struct = t->structs[ip->alu.src.struct_id];
4991 	src64_ptr = (uint64_t *)&src_struct[ip->alu.src.offset];
4992 	src64 = *src64_ptr;
4993 	src64_mask = UINT64_MAX >> (64 - ip->alu.src.n_bits);
4994 	src = src64 & src64_mask;
4995 
4996 	r = dst;
4997 	r = ~r & 0xFFFF;
4998 
4999 	/* Subtraction in 1's complement arithmetic (i.e. a '- b) is the same as
5000 	 * the following sequence of operations in 2's complement arithmetic:
5001 	 *    a '- b = (a - b) % 0xFFFF.
5002 	 *
5003 	 * In order to prevent an underflow for the below subtraction, in which
5004 	 * a 33-bit number (the subtrahend) is taken out of a 16-bit number (the
5005 	 * minuend), we first add a multiple of the 0xFFFF modulus to the
5006 	 * minuend. The number we add to the minuend needs to be a 34-bit number
5007 	 * or higher, so for readability reasons we picked the 36-bit multiple.
5008 	 * We are effectively turning the 16-bit minuend into a 36-bit number:
5009 	 *    (a - b) % 0xFFFF = (a + 0xFFFF00000 - b) % 0xFFFF.
5010 	 */
5011 	r += 0xFFFF00000ULL; /* The output r is a 36-bit number. */
5012 
5013 	/* A 33-bit number is subtracted from a 36-bit number (the input r). The
5014 	 * result (the output r) is a 36-bit number.
5015 	 */
5016 	r -= (src >> 32) + (src & 0xFFFFFFFF);
5017 
5018 	/* The first input is a 16-bit number. The second input is a 20-bit
5019 	 * number. Their sum is a 21-bit number.
5020 	 */
5021 	r = (r & 0xFFFF) + (r >> 16);
5022 
5023 	/* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
5024 	 * a 5-bit number (0 .. 31). The sum is a 17-bit number (0 .. 0x1001E).
5025 	 */
5026 	r = (r & 0xFFFF) + (r >> 16);
5027 
5028 	/* When the input r is (0 .. 0xFFFF), the output r is equal to the input
5029 	 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
5030 	 * 0x1001E), the output r is (0 .. 31). So no carry bit can be
5031 	 * generated, therefore the output r is always a 16-bit number.
5032 	 */
5033 	r = (r & 0xFFFF) + (r >> 16);
5034 
5035 	r = ~r & 0xFFFF;
5036 	r = r ? r : 0xFFFF;
5037 
5038 	*dst16_ptr = (uint16_t)r;
5039 
5040 	/* Thread. */
5041 	thread_ip_inc(p);
5042 }
5043 
5044 static inline void
5045 instr_alu_ckadd_struct20_exec(struct rte_swx_pipeline *p)
5046 {
5047 	struct thread *t = &p->threads[p->thread_id];
5048 	struct instruction *ip = t->ip;
5049 	uint8_t *dst_struct, *src_struct;
5050 	uint16_t *dst16_ptr;
5051 	uint32_t *src32_ptr;
5052 	uint64_t r0, r1;
5053 
5054 	TRACE("[Thread %2u] ckadd (struct of 20 bytes)\n", p->thread_id);
5055 
5056 	/* Structs. */
5057 	dst_struct = t->structs[ip->alu.dst.struct_id];
5058 	dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
5059 
5060 	src_struct = t->structs[ip->alu.src.struct_id];
5061 	src32_ptr = (uint32_t *)&src_struct[0];
5062 
5063 	r0 = src32_ptr[0]; /* r0 is a 32-bit number. */
5064 	r1 = src32_ptr[1]; /* r1 is a 32-bit number. */
5065 	r0 += src32_ptr[2]; /* The output r0 is a 33-bit number. */
5066 	r1 += src32_ptr[3]; /* The output r1 is a 33-bit number. */
5067 	r0 += r1 + src32_ptr[4]; /* The output r0 is a 35-bit number. */
5068 
5069 	/* The first input is a 16-bit number. The second input is a 19-bit
5070 	 * number. Their sum is a 20-bit number.
5071 	 */
5072 	r0 = (r0 & 0xFFFF) + (r0 >> 16);
5073 
5074 	/* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
5075 	 * a 4-bit number (0 .. 15). The sum is a 17-bit number (0 .. 0x1000E).
5076 	 */
5077 	r0 = (r0 & 0xFFFF) + (r0 >> 16);
5078 
5079 	/* When the input r is (0 .. 0xFFFF), the output r is equal to the input
5080 	 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
5081 	 * 0x1000E), the output r is (0 .. 15). So no carry bit can be
5082 	 * generated, therefore the output r is always a 16-bit number.
5083 	 */
5084 	r0 = (r0 & 0xFFFF) + (r0 >> 16);
5085 
5086 	r0 = ~r0 & 0xFFFF;
5087 	r0 = r0 ? r0 : 0xFFFF;
5088 
5089 	*dst16_ptr = (uint16_t)r0;
5090 
5091 	/* Thread. */
5092 	thread_ip_inc(p);
5093 }
5094 
5095 static inline void
5096 instr_alu_ckadd_struct_exec(struct rte_swx_pipeline *p)
5097 {
5098 	struct thread *t = &p->threads[p->thread_id];
5099 	struct instruction *ip = t->ip;
5100 	uint8_t *dst_struct, *src_struct;
5101 	uint16_t *dst16_ptr;
5102 	uint32_t *src32_ptr;
5103 	uint64_t r = 0;
5104 	uint32_t i;
5105 
5106 	TRACE("[Thread %2u] ckadd (struct)\n", p->thread_id);
5107 
5108 	/* Structs. */
5109 	dst_struct = t->structs[ip->alu.dst.struct_id];
5110 	dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
5111 
5112 	src_struct = t->structs[ip->alu.src.struct_id];
5113 	src32_ptr = (uint32_t *)&src_struct[0];
5114 
5115 	/* The max number of 32-bit words in a 256-byte header is 8 = 2^3.
5116 	 * Therefore, in the worst case scenario, a 35-bit number is added to a
5117 	 * 16-bit number (the input r), so the output r is 36-bit number.
5118 	 */
5119 	for (i = 0; i < ip->alu.src.n_bits / 32; i++, src32_ptr++)
5120 		r += *src32_ptr;
5121 
5122 	/* The first input is a 16-bit number. The second input is a 20-bit
5123 	 * number. Their sum is a 21-bit number.
5124 	 */
5125 	r = (r & 0xFFFF) + (r >> 16);
5126 
5127 	/* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
5128 	 * a 5-bit number (0 .. 31). The sum is a 17-bit number (0 .. 0x1000E).
5129 	 */
5130 	r = (r & 0xFFFF) + (r >> 16);
5131 
5132 	/* When the input r is (0 .. 0xFFFF), the output r is equal to the input
5133 	 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
5134 	 * 0x1001E), the output r is (0 .. 31). So no carry bit can be
5135 	 * generated, therefore the output r is always a 16-bit number.
5136 	 */
5137 	r = (r & 0xFFFF) + (r >> 16);
5138 
5139 	r = ~r & 0xFFFF;
5140 	r = r ? r : 0xFFFF;
5141 
5142 	*dst16_ptr = (uint16_t)r;
5143 
5144 	/* Thread. */
5145 	thread_ip_inc(p);
5146 }
5147 
5148 /*
5149  * Register array.
5150  */
5151 static struct regarray *
5152 regarray_find(struct rte_swx_pipeline *p, const char *name);
5153 
5154 static int
5155 instr_regprefetch_translate(struct rte_swx_pipeline *p,
5156 		      struct action *action,
5157 		      char **tokens,
5158 		      int n_tokens,
5159 		      struct instruction *instr,
5160 		      struct instruction_data *data __rte_unused)
5161 {
5162 	char *regarray = tokens[1], *idx = tokens[2];
5163 	struct regarray *r;
5164 	struct field *fidx;
5165 	uint32_t idx_struct_id, idx_val;
5166 
5167 	CHECK(n_tokens == 3, EINVAL);
5168 
5169 	r = regarray_find(p, regarray);
5170 	CHECK(r, EINVAL);
5171 
5172 	/* REGPREFETCH_RH, REGPREFETCH_RM. */
5173 	fidx = struct_field_parse(p, action, idx, &idx_struct_id);
5174 	if (fidx) {
5175 		instr->type = INSTR_REGPREFETCH_RM;
5176 		if (idx[0] == 'h')
5177 			instr->type = INSTR_REGPREFETCH_RH;
5178 
5179 		instr->regarray.regarray_id = r->id;
5180 		instr->regarray.idx.struct_id = (uint8_t)idx_struct_id;
5181 		instr->regarray.idx.n_bits = fidx->n_bits;
5182 		instr->regarray.idx.offset = fidx->offset / 8;
5183 		instr->regarray.dstsrc_val = 0; /* Unused. */
5184 		return 0;
5185 	}
5186 
5187 	/* REGPREFETCH_RI. */
5188 	idx_val = strtoul(idx, &idx, 0);
5189 	CHECK(!idx[0], EINVAL);
5190 
5191 	instr->type = INSTR_REGPREFETCH_RI;
5192 	instr->regarray.regarray_id = r->id;
5193 	instr->regarray.idx_val = idx_val;
5194 	instr->regarray.dstsrc_val = 0; /* Unused. */
5195 	return 0;
5196 }
5197 
5198 static int
5199 instr_regrd_translate(struct rte_swx_pipeline *p,
5200 		      struct action *action,
5201 		      char **tokens,
5202 		      int n_tokens,
5203 		      struct instruction *instr,
5204 		      struct instruction_data *data __rte_unused)
5205 {
5206 	char *dst = tokens[1], *regarray = tokens[2], *idx = tokens[3];
5207 	struct regarray *r;
5208 	struct field *fdst, *fidx;
5209 	uint32_t dst_struct_id, idx_struct_id, idx_val;
5210 
5211 	CHECK(n_tokens == 4, EINVAL);
5212 
5213 	r = regarray_find(p, regarray);
5214 	CHECK(r, EINVAL);
5215 
5216 	fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
5217 	CHECK(fdst, EINVAL);
5218 
5219 	/* REGRD_HRH, REGRD_HRM, REGRD_MRH, REGRD_MRM. */
5220 	fidx = struct_field_parse(p, action, idx, &idx_struct_id);
5221 	if (fidx) {
5222 		instr->type = INSTR_REGRD_MRM;
5223 		if (dst[0] == 'h' && idx[0] != 'h')
5224 			instr->type = INSTR_REGRD_HRM;
5225 		if (dst[0] != 'h' && idx[0] == 'h')
5226 			instr->type = INSTR_REGRD_MRH;
5227 		if (dst[0] == 'h' && idx[0] == 'h')
5228 			instr->type = INSTR_REGRD_HRH;
5229 
5230 		instr->regarray.regarray_id = r->id;
5231 		instr->regarray.idx.struct_id = (uint8_t)idx_struct_id;
5232 		instr->regarray.idx.n_bits = fidx->n_bits;
5233 		instr->regarray.idx.offset = fidx->offset / 8;
5234 		instr->regarray.dstsrc.struct_id = (uint8_t)dst_struct_id;
5235 		instr->regarray.dstsrc.n_bits = fdst->n_bits;
5236 		instr->regarray.dstsrc.offset = fdst->offset / 8;
5237 		return 0;
5238 	}
5239 
5240 	/* REGRD_MRI, REGRD_HRI. */
5241 	idx_val = strtoul(idx, &idx, 0);
5242 	CHECK(!idx[0], EINVAL);
5243 
5244 	instr->type = INSTR_REGRD_MRI;
5245 	if (dst[0] == 'h')
5246 		instr->type = INSTR_REGRD_HRI;
5247 
5248 	instr->regarray.regarray_id = r->id;
5249 	instr->regarray.idx_val = idx_val;
5250 	instr->regarray.dstsrc.struct_id = (uint8_t)dst_struct_id;
5251 	instr->regarray.dstsrc.n_bits = fdst->n_bits;
5252 	instr->regarray.dstsrc.offset = fdst->offset / 8;
5253 	return 0;
5254 }
5255 
5256 static int
5257 instr_regwr_translate(struct rte_swx_pipeline *p,
5258 		      struct action *action,
5259 		      char **tokens,
5260 		      int n_tokens,
5261 		      struct instruction *instr,
5262 		      struct instruction_data *data __rte_unused)
5263 {
5264 	char *regarray = tokens[1], *idx = tokens[2], *src = tokens[3];
5265 	struct regarray *r;
5266 	struct field *fidx, *fsrc;
5267 	uint64_t src_val;
5268 	uint32_t idx_struct_id, idx_val, src_struct_id;
5269 
5270 	CHECK(n_tokens == 4, EINVAL);
5271 
5272 	r = regarray_find(p, regarray);
5273 	CHECK(r, EINVAL);
5274 
5275 	/* REGWR_RHH, REGWR_RHM, REGWR_RMH, REGWR_RMM. */
5276 	fidx = struct_field_parse(p, action, idx, &idx_struct_id);
5277 	fsrc = struct_field_parse(p, action, src, &src_struct_id);
5278 	if (fidx && fsrc) {
5279 		instr->type = INSTR_REGWR_RMM;
5280 		if (idx[0] == 'h' && src[0] != 'h')
5281 			instr->type = INSTR_REGWR_RHM;
5282 		if (idx[0] != 'h' && src[0] == 'h')
5283 			instr->type = INSTR_REGWR_RMH;
5284 		if (idx[0] == 'h' && src[0] == 'h')
5285 			instr->type = INSTR_REGWR_RHH;
5286 
5287 		instr->regarray.regarray_id = r->id;
5288 		instr->regarray.idx.struct_id = (uint8_t)idx_struct_id;
5289 		instr->regarray.idx.n_bits = fidx->n_bits;
5290 		instr->regarray.idx.offset = fidx->offset / 8;
5291 		instr->regarray.dstsrc.struct_id = (uint8_t)src_struct_id;
5292 		instr->regarray.dstsrc.n_bits = fsrc->n_bits;
5293 		instr->regarray.dstsrc.offset = fsrc->offset / 8;
5294 		return 0;
5295 	}
5296 
5297 	/* REGWR_RHI, REGWR_RMI. */
5298 	if (fidx && !fsrc) {
5299 		src_val = strtoull(src, &src, 0);
5300 		CHECK(!src[0], EINVAL);
5301 
5302 		instr->type = INSTR_REGWR_RMI;
5303 		if (idx[0] == 'h')
5304 			instr->type = INSTR_REGWR_RHI;
5305 
5306 		instr->regarray.regarray_id = r->id;
5307 		instr->regarray.idx.struct_id = (uint8_t)idx_struct_id;
5308 		instr->regarray.idx.n_bits = fidx->n_bits;
5309 		instr->regarray.idx.offset = fidx->offset / 8;
5310 		instr->regarray.dstsrc_val = src_val;
5311 		return 0;
5312 	}
5313 
5314 	/* REGWR_RIH, REGWR_RIM. */
5315 	if (!fidx && fsrc) {
5316 		idx_val = strtoul(idx, &idx, 0);
5317 		CHECK(!idx[0], EINVAL);
5318 
5319 		instr->type = INSTR_REGWR_RIM;
5320 		if (src[0] == 'h')
5321 			instr->type = INSTR_REGWR_RIH;
5322 
5323 		instr->regarray.regarray_id = r->id;
5324 		instr->regarray.idx_val = idx_val;
5325 		instr->regarray.dstsrc.struct_id = (uint8_t)src_struct_id;
5326 		instr->regarray.dstsrc.n_bits = fsrc->n_bits;
5327 		instr->regarray.dstsrc.offset = fsrc->offset / 8;
5328 		return 0;
5329 	}
5330 
5331 	/* REGWR_RII. */
5332 	src_val = strtoull(src, &src, 0);
5333 	CHECK(!src[0], EINVAL);
5334 
5335 	idx_val = strtoul(idx, &idx, 0);
5336 	CHECK(!idx[0], EINVAL);
5337 
5338 	instr->type = INSTR_REGWR_RII;
5339 	instr->regarray.idx_val = idx_val;
5340 	instr->regarray.dstsrc_val = src_val;
5341 
5342 	return 0;
5343 }
5344 
5345 static int
5346 instr_regadd_translate(struct rte_swx_pipeline *p,
5347 		       struct action *action,
5348 		       char **tokens,
5349 		       int n_tokens,
5350 		       struct instruction *instr,
5351 		       struct instruction_data *data __rte_unused)
5352 {
5353 	char *regarray = tokens[1], *idx = tokens[2], *src = tokens[3];
5354 	struct regarray *r;
5355 	struct field *fidx, *fsrc;
5356 	uint64_t src_val;
5357 	uint32_t idx_struct_id, idx_val, src_struct_id;
5358 
5359 	CHECK(n_tokens == 4, EINVAL);
5360 
5361 	r = regarray_find(p, regarray);
5362 	CHECK(r, EINVAL);
5363 
5364 	/* REGADD_RHH, REGADD_RHM, REGADD_RMH, REGADD_RMM. */
5365 	fidx = struct_field_parse(p, action, idx, &idx_struct_id);
5366 	fsrc = struct_field_parse(p, action, src, &src_struct_id);
5367 	if (fidx && fsrc) {
5368 		instr->type = INSTR_REGADD_RMM;
5369 		if (idx[0] == 'h' && src[0] != 'h')
5370 			instr->type = INSTR_REGADD_RHM;
5371 		if (idx[0] != 'h' && src[0] == 'h')
5372 			instr->type = INSTR_REGADD_RMH;
5373 		if (idx[0] == 'h' && src[0] == 'h')
5374 			instr->type = INSTR_REGADD_RHH;
5375 
5376 		instr->regarray.regarray_id = r->id;
5377 		instr->regarray.idx.struct_id = (uint8_t)idx_struct_id;
5378 		instr->regarray.idx.n_bits = fidx->n_bits;
5379 		instr->regarray.idx.offset = fidx->offset / 8;
5380 		instr->regarray.dstsrc.struct_id = (uint8_t)src_struct_id;
5381 		instr->regarray.dstsrc.n_bits = fsrc->n_bits;
5382 		instr->regarray.dstsrc.offset = fsrc->offset / 8;
5383 		return 0;
5384 	}
5385 
5386 	/* REGADD_RHI, REGADD_RMI. */
5387 	if (fidx && !fsrc) {
5388 		src_val = strtoull(src, &src, 0);
5389 		CHECK(!src[0], EINVAL);
5390 
5391 		instr->type = INSTR_REGADD_RMI;
5392 		if (idx[0] == 'h')
5393 			instr->type = INSTR_REGADD_RHI;
5394 
5395 		instr->regarray.regarray_id = r->id;
5396 		instr->regarray.idx.struct_id = (uint8_t)idx_struct_id;
5397 		instr->regarray.idx.n_bits = fidx->n_bits;
5398 		instr->regarray.idx.offset = fidx->offset / 8;
5399 		instr->regarray.dstsrc_val = src_val;
5400 		return 0;
5401 	}
5402 
5403 	/* REGADD_RIH, REGADD_RIM. */
5404 	if (!fidx && fsrc) {
5405 		idx_val = strtoul(idx, &idx, 0);
5406 		CHECK(!idx[0], EINVAL);
5407 
5408 		instr->type = INSTR_REGADD_RIM;
5409 		if (src[0] == 'h')
5410 			instr->type = INSTR_REGADD_RIH;
5411 
5412 		instr->regarray.regarray_id = r->id;
5413 		instr->regarray.idx_val = idx_val;
5414 		instr->regarray.dstsrc.struct_id = (uint8_t)src_struct_id;
5415 		instr->regarray.dstsrc.n_bits = fsrc->n_bits;
5416 		instr->regarray.dstsrc.offset = fsrc->offset / 8;
5417 		return 0;
5418 	}
5419 
5420 	/* REGADD_RII. */
5421 	src_val = strtoull(src, &src, 0);
5422 	CHECK(!src[0], EINVAL);
5423 
5424 	idx_val = strtoul(idx, &idx, 0);
5425 	CHECK(!idx[0], EINVAL);
5426 
5427 	instr->type = INSTR_REGADD_RII;
5428 	instr->regarray.idx_val = idx_val;
5429 	instr->regarray.dstsrc_val = src_val;
5430 	return 0;
5431 }
5432 
5433 static inline uint64_t *
5434 instr_regarray_regarray(struct rte_swx_pipeline *p, struct instruction *ip)
5435 {
5436 	struct regarray_runtime *r = &p->regarray_runtime[ip->regarray.regarray_id];
5437 	return r->regarray;
5438 }
5439 
5440 static inline uint64_t
5441 instr_regarray_idx_hbo(struct rte_swx_pipeline *p, struct thread *t, struct instruction *ip)
5442 {
5443 	struct regarray_runtime *r = &p->regarray_runtime[ip->regarray.regarray_id];
5444 
5445 	uint8_t *idx_struct = t->structs[ip->regarray.idx.struct_id];
5446 	uint64_t *idx64_ptr = (uint64_t *)&idx_struct[ip->regarray.idx.offset];
5447 	uint64_t idx64 = *idx64_ptr;
5448 	uint64_t idx64_mask = UINT64_MAX >> (64 - ip->regarray.idx.n_bits);
5449 	uint64_t idx = idx64 & idx64_mask & r->size_mask;
5450 
5451 	return idx;
5452 }
5453 
5454 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
5455 
5456 static inline uint64_t
5457 instr_regarray_idx_nbo(struct rte_swx_pipeline *p, struct thread *t, struct instruction *ip)
5458 {
5459 	struct regarray_runtime *r = &p->regarray_runtime[ip->regarray.regarray_id];
5460 
5461 	uint8_t *idx_struct = t->structs[ip->regarray.idx.struct_id];
5462 	uint64_t *idx64_ptr = (uint64_t *)&idx_struct[ip->regarray.idx.offset];
5463 	uint64_t idx64 = *idx64_ptr;
5464 	uint64_t idx = (ntoh64(idx64) >> (64 - ip->regarray.idx.n_bits)) & r->size_mask;
5465 
5466 	return idx;
5467 }
5468 
5469 #else
5470 
5471 #define instr_regarray_idx_nbo instr_regarray_idx_hbo
5472 
5473 #endif
5474 
5475 static inline uint64_t
5476 instr_regarray_idx_imm(struct rte_swx_pipeline *p, struct instruction *ip)
5477 {
5478 	struct regarray_runtime *r = &p->regarray_runtime[ip->regarray.regarray_id];
5479 
5480 	uint64_t idx = ip->regarray.idx_val & r->size_mask;
5481 
5482 	return idx;
5483 }
5484 
5485 static inline uint64_t
5486 instr_regarray_src_hbo(struct thread *t, struct instruction *ip)
5487 {
5488 	uint8_t *src_struct = t->structs[ip->regarray.dstsrc.struct_id];
5489 	uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->regarray.dstsrc.offset];
5490 	uint64_t src64 = *src64_ptr;
5491 	uint64_t src64_mask = UINT64_MAX >> (64 - ip->regarray.dstsrc.n_bits);
5492 	uint64_t src = src64 & src64_mask;
5493 
5494 	return src;
5495 }
5496 
5497 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
5498 
5499 static inline uint64_t
5500 instr_regarray_src_nbo(struct thread *t, struct instruction *ip)
5501 {
5502 	uint8_t *src_struct = t->structs[ip->regarray.dstsrc.struct_id];
5503 	uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->regarray.dstsrc.offset];
5504 	uint64_t src64 = *src64_ptr;
5505 	uint64_t src = ntoh64(src64) >> (64 - ip->regarray.dstsrc.n_bits);
5506 
5507 	return src;
5508 }
5509 
5510 #else
5511 
5512 #define instr_regarray_src_nbo instr_regarray_src_hbo
5513 
5514 #endif
5515 
5516 static inline void
5517 instr_regarray_dst_hbo_src_hbo_set(struct thread *t, struct instruction *ip, uint64_t src)
5518 {
5519 	uint8_t *dst_struct = t->structs[ip->regarray.dstsrc.struct_id];
5520 	uint64_t *dst64_ptr = (uint64_t *)&dst_struct[ip->regarray.dstsrc.offset];
5521 	uint64_t dst64 = *dst64_ptr;
5522 	uint64_t dst64_mask = UINT64_MAX >> (64 - ip->regarray.dstsrc.n_bits);
5523 
5524 	*dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask);
5525 
5526 }
5527 
5528 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
5529 
5530 static inline void
5531 instr_regarray_dst_nbo_src_hbo_set(struct thread *t, struct instruction *ip, uint64_t src)
5532 {
5533 	uint8_t *dst_struct = t->structs[ip->regarray.dstsrc.struct_id];
5534 	uint64_t *dst64_ptr = (uint64_t *)&dst_struct[ip->regarray.dstsrc.offset];
5535 	uint64_t dst64 = *dst64_ptr;
5536 	uint64_t dst64_mask = UINT64_MAX >> (64 - ip->regarray.dstsrc.n_bits);
5537 
5538 	src = hton64(src) >> (64 - ip->regarray.dstsrc.n_bits);
5539 	*dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask);
5540 }
5541 
5542 #else
5543 
5544 #define instr_regarray_dst_nbo_src_hbo_set instr_regarray_dst_hbo_src_hbo_set
5545 
5546 #endif
5547 
5548 static inline void
5549 instr_regprefetch_rh_exec(struct rte_swx_pipeline *p)
5550 {
5551 	struct thread *t = &p->threads[p->thread_id];
5552 	struct instruction *ip = t->ip;
5553 	uint64_t *regarray, idx;
5554 
5555 	TRACE("[Thread %2u] regprefetch (r[h])\n", p->thread_id);
5556 
5557 	/* Structs. */
5558 	regarray = instr_regarray_regarray(p, ip);
5559 	idx = instr_regarray_idx_nbo(p, t, ip);
5560 	rte_prefetch0(&regarray[idx]);
5561 
5562 	/* Thread. */
5563 	thread_ip_inc(p);
5564 }
5565 
5566 static inline void
5567 instr_regprefetch_rm_exec(struct rte_swx_pipeline *p)
5568 {
5569 	struct thread *t = &p->threads[p->thread_id];
5570 	struct instruction *ip = t->ip;
5571 	uint64_t *regarray, idx;
5572 
5573 	TRACE("[Thread %2u] regprefetch (r[m])\n", p->thread_id);
5574 
5575 	/* Structs. */
5576 	regarray = instr_regarray_regarray(p, ip);
5577 	idx = instr_regarray_idx_hbo(p, t, ip);
5578 	rte_prefetch0(&regarray[idx]);
5579 
5580 	/* Thread. */
5581 	thread_ip_inc(p);
5582 }
5583 
5584 static inline void
5585 instr_regprefetch_ri_exec(struct rte_swx_pipeline *p)
5586 {
5587 	struct thread *t = &p->threads[p->thread_id];
5588 	struct instruction *ip = t->ip;
5589 	uint64_t *regarray, idx;
5590 
5591 	TRACE("[Thread %2u] regprefetch (r[i])\n", p->thread_id);
5592 
5593 	/* Structs. */
5594 	regarray = instr_regarray_regarray(p, ip);
5595 	idx = instr_regarray_idx_imm(p, ip);
5596 	rte_prefetch0(&regarray[idx]);
5597 
5598 	/* Thread. */
5599 	thread_ip_inc(p);
5600 }
5601 
5602 static inline void
5603 instr_regrd_hrh_exec(struct rte_swx_pipeline *p)
5604 {
5605 	struct thread *t = &p->threads[p->thread_id];
5606 	struct instruction *ip = t->ip;
5607 	uint64_t *regarray, idx;
5608 
5609 	TRACE("[Thread %2u] regrd (h = r[h])\n", p->thread_id);
5610 
5611 	/* Structs. */
5612 	regarray = instr_regarray_regarray(p, ip);
5613 	idx = instr_regarray_idx_nbo(p, t, ip);
5614 	instr_regarray_dst_nbo_src_hbo_set(t, ip, regarray[idx]);
5615 
5616 	/* Thread. */
5617 	thread_ip_inc(p);
5618 }
5619 
5620 static inline void
5621 instr_regrd_hrm_exec(struct rte_swx_pipeline *p)
5622 {
5623 	struct thread *t = &p->threads[p->thread_id];
5624 	struct instruction *ip = t->ip;
5625 	uint64_t *regarray, idx;
5626 
5627 	TRACE("[Thread %2u] regrd (h = r[m])\n", p->thread_id);
5628 
5629 	/* Structs. */
5630 	regarray = instr_regarray_regarray(p, ip);
5631 	idx = instr_regarray_idx_hbo(p, t, ip);
5632 	instr_regarray_dst_nbo_src_hbo_set(t, ip, regarray[idx]);
5633 
5634 	/* Thread. */
5635 	thread_ip_inc(p);
5636 }
5637 
5638 static inline void
5639 instr_regrd_mrh_exec(struct rte_swx_pipeline *p)
5640 {
5641 	struct thread *t = &p->threads[p->thread_id];
5642 	struct instruction *ip = t->ip;
5643 	uint64_t *regarray, idx;
5644 
5645 	TRACE("[Thread %2u] regrd (m = r[h])\n", p->thread_id);
5646 
5647 	/* Structs. */
5648 	regarray = instr_regarray_regarray(p, ip);
5649 	idx = instr_regarray_idx_nbo(p, t, ip);
5650 	instr_regarray_dst_hbo_src_hbo_set(t, ip, regarray[idx]);
5651 
5652 	/* Thread. */
5653 	thread_ip_inc(p);
5654 }
5655 
5656 static inline void
5657 instr_regrd_mrm_exec(struct rte_swx_pipeline *p)
5658 {
5659 	struct thread *t = &p->threads[p->thread_id];
5660 	struct instruction *ip = t->ip;
5661 	uint64_t *regarray, idx;
5662 
5663 	/* Structs. */
5664 	regarray = instr_regarray_regarray(p, ip);
5665 	idx = instr_regarray_idx_hbo(p, t, ip);
5666 	instr_regarray_dst_hbo_src_hbo_set(t, ip, regarray[idx]);
5667 
5668 	/* Thread. */
5669 	thread_ip_inc(p);
5670 }
5671 
5672 static inline void
5673 instr_regrd_hri_exec(struct rte_swx_pipeline *p)
5674 {
5675 	struct thread *t = &p->threads[p->thread_id];
5676 	struct instruction *ip = t->ip;
5677 	uint64_t *regarray, idx;
5678 
5679 	TRACE("[Thread %2u] regrd (h = r[i])\n", p->thread_id);
5680 
5681 	/* Structs. */
5682 	regarray = instr_regarray_regarray(p, ip);
5683 	idx = instr_regarray_idx_imm(p, ip);
5684 	instr_regarray_dst_nbo_src_hbo_set(t, ip, regarray[idx]);
5685 
5686 	/* Thread. */
5687 	thread_ip_inc(p);
5688 }
5689 
5690 static inline void
5691 instr_regrd_mri_exec(struct rte_swx_pipeline *p)
5692 {
5693 	struct thread *t = &p->threads[p->thread_id];
5694 	struct instruction *ip = t->ip;
5695 	uint64_t *regarray, idx;
5696 
5697 	TRACE("[Thread %2u] regrd (m = r[i])\n", p->thread_id);
5698 
5699 	/* Structs. */
5700 	regarray = instr_regarray_regarray(p, ip);
5701 	idx = instr_regarray_idx_imm(p, ip);
5702 	instr_regarray_dst_hbo_src_hbo_set(t, ip, regarray[idx]);
5703 
5704 	/* Thread. */
5705 	thread_ip_inc(p);
5706 }
5707 
5708 static inline void
5709 instr_regwr_rhh_exec(struct rte_swx_pipeline *p)
5710 {
5711 	struct thread *t = &p->threads[p->thread_id];
5712 	struct instruction *ip = t->ip;
5713 	uint64_t *regarray, idx, src;
5714 
5715 	TRACE("[Thread %2u] regwr (r[h] = h)\n", p->thread_id);
5716 
5717 	/* Structs. */
5718 	regarray = instr_regarray_regarray(p, ip);
5719 	idx = instr_regarray_idx_nbo(p, t, ip);
5720 	src = instr_regarray_src_nbo(t, ip);
5721 	regarray[idx] = src;
5722 
5723 	/* Thread. */
5724 	thread_ip_inc(p);
5725 }
5726 
5727 static inline void
5728 instr_regwr_rhm_exec(struct rte_swx_pipeline *p)
5729 {
5730 	struct thread *t = &p->threads[p->thread_id];
5731 	struct instruction *ip = t->ip;
5732 	uint64_t *regarray, idx, src;
5733 
5734 	TRACE("[Thread %2u] regwr (r[h] = m)\n", p->thread_id);
5735 
5736 	/* Structs. */
5737 	regarray = instr_regarray_regarray(p, ip);
5738 	idx = instr_regarray_idx_nbo(p, t, ip);
5739 	src = instr_regarray_src_hbo(t, ip);
5740 	regarray[idx] = src;
5741 
5742 	/* Thread. */
5743 	thread_ip_inc(p);
5744 }
5745 
5746 static inline void
5747 instr_regwr_rmh_exec(struct rte_swx_pipeline *p)
5748 {
5749 	struct thread *t = &p->threads[p->thread_id];
5750 	struct instruction *ip = t->ip;
5751 	uint64_t *regarray, idx, src;
5752 
5753 	TRACE("[Thread %2u] regwr (r[m] = h)\n", p->thread_id);
5754 
5755 	/* Structs. */
5756 	regarray = instr_regarray_regarray(p, ip);
5757 	idx = instr_regarray_idx_hbo(p, t, ip);
5758 	src = instr_regarray_src_nbo(t, ip);
5759 	regarray[idx] = src;
5760 
5761 	/* Thread. */
5762 	thread_ip_inc(p);
5763 }
5764 
5765 static inline void
5766 instr_regwr_rmm_exec(struct rte_swx_pipeline *p)
5767 {
5768 	struct thread *t = &p->threads[p->thread_id];
5769 	struct instruction *ip = t->ip;
5770 	uint64_t *regarray, idx, src;
5771 
5772 	TRACE("[Thread %2u] regwr (r[m] = m)\n", p->thread_id);
5773 
5774 	/* Structs. */
5775 	regarray = instr_regarray_regarray(p, ip);
5776 	idx = instr_regarray_idx_hbo(p, t, ip);
5777 	src = instr_regarray_src_hbo(t, ip);
5778 	regarray[idx] = src;
5779 
5780 	/* Thread. */
5781 	thread_ip_inc(p);
5782 }
5783 
5784 static inline void
5785 instr_regwr_rhi_exec(struct rte_swx_pipeline *p)
5786 {
5787 	struct thread *t = &p->threads[p->thread_id];
5788 	struct instruction *ip = t->ip;
5789 	uint64_t *regarray, idx, src;
5790 
5791 	TRACE("[Thread %2u] regwr (r[h] = i)\n", p->thread_id);
5792 
5793 	/* Structs. */
5794 	regarray = instr_regarray_regarray(p, ip);
5795 	idx = instr_regarray_idx_nbo(p, t, ip);
5796 	src = ip->regarray.dstsrc_val;
5797 	regarray[idx] = src;
5798 
5799 	/* Thread. */
5800 	thread_ip_inc(p);
5801 }
5802 
5803 static inline void
5804 instr_regwr_rmi_exec(struct rte_swx_pipeline *p)
5805 {
5806 	struct thread *t = &p->threads[p->thread_id];
5807 	struct instruction *ip = t->ip;
5808 	uint64_t *regarray, idx, src;
5809 
5810 	TRACE("[Thread %2u] regwr (r[m] = i)\n", p->thread_id);
5811 
5812 	/* Structs. */
5813 	regarray = instr_regarray_regarray(p, ip);
5814 	idx = instr_regarray_idx_hbo(p, t, ip);
5815 	src = ip->regarray.dstsrc_val;
5816 	regarray[idx] = src;
5817 
5818 	/* Thread. */
5819 	thread_ip_inc(p);
5820 }
5821 
5822 static inline void
5823 instr_regwr_rih_exec(struct rte_swx_pipeline *p)
5824 {
5825 	struct thread *t = &p->threads[p->thread_id];
5826 	struct instruction *ip = t->ip;
5827 	uint64_t *regarray, idx, src;
5828 
5829 	TRACE("[Thread %2u] regwr (r[i] = h)\n", p->thread_id);
5830 
5831 	/* Structs. */
5832 	regarray = instr_regarray_regarray(p, ip);
5833 	idx = instr_regarray_idx_imm(p, ip);
5834 	src = instr_regarray_src_nbo(t, ip);
5835 	regarray[idx] = src;
5836 
5837 	/* Thread. */
5838 	thread_ip_inc(p);
5839 }
5840 
5841 static inline void
5842 instr_regwr_rim_exec(struct rte_swx_pipeline *p)
5843 {
5844 	struct thread *t = &p->threads[p->thread_id];
5845 	struct instruction *ip = t->ip;
5846 	uint64_t *regarray, idx, src;
5847 
5848 	TRACE("[Thread %2u] regwr (r[i] = m)\n", p->thread_id);
5849 
5850 	/* Structs. */
5851 	regarray = instr_regarray_regarray(p, ip);
5852 	idx = instr_regarray_idx_imm(p, ip);
5853 	src = instr_regarray_src_hbo(t, ip);
5854 	regarray[idx] = src;
5855 
5856 	/* Thread. */
5857 	thread_ip_inc(p);
5858 }
5859 
5860 static inline void
5861 instr_regwr_rii_exec(struct rte_swx_pipeline *p)
5862 {
5863 	struct thread *t = &p->threads[p->thread_id];
5864 	struct instruction *ip = t->ip;
5865 	uint64_t *regarray, idx, src;
5866 
5867 	TRACE("[Thread %2u] regwr (r[i] = i)\n", p->thread_id);
5868 
5869 	/* Structs. */
5870 	regarray = instr_regarray_regarray(p, ip);
5871 	idx = instr_regarray_idx_imm(p, ip);
5872 	src = ip->regarray.dstsrc_val;
5873 	regarray[idx] = src;
5874 
5875 	/* Thread. */
5876 	thread_ip_inc(p);
5877 }
5878 
5879 static inline void
5880 instr_regadd_rhh_exec(struct rte_swx_pipeline *p)
5881 {
5882 	struct thread *t = &p->threads[p->thread_id];
5883 	struct instruction *ip = t->ip;
5884 	uint64_t *regarray, idx, src;
5885 
5886 	TRACE("[Thread %2u] regadd (r[h] += h)\n", p->thread_id);
5887 
5888 	/* Structs. */
5889 	regarray = instr_regarray_regarray(p, ip);
5890 	idx = instr_regarray_idx_nbo(p, t, ip);
5891 	src = instr_regarray_src_nbo(t, ip);
5892 	regarray[idx] += src;
5893 
5894 	/* Thread. */
5895 	thread_ip_inc(p);
5896 }
5897 
5898 static inline void
5899 instr_regadd_rhm_exec(struct rte_swx_pipeline *p)
5900 {
5901 	struct thread *t = &p->threads[p->thread_id];
5902 	struct instruction *ip = t->ip;
5903 	uint64_t *regarray, idx, src;
5904 
5905 	TRACE("[Thread %2u] regadd (r[h] += m)\n", p->thread_id);
5906 
5907 	/* Structs. */
5908 	regarray = instr_regarray_regarray(p, ip);
5909 	idx = instr_regarray_idx_nbo(p, t, ip);
5910 	src = instr_regarray_src_hbo(t, ip);
5911 	regarray[idx] += src;
5912 
5913 	/* Thread. */
5914 	thread_ip_inc(p);
5915 }
5916 
5917 static inline void
5918 instr_regadd_rmh_exec(struct rte_swx_pipeline *p)
5919 {
5920 	struct thread *t = &p->threads[p->thread_id];
5921 	struct instruction *ip = t->ip;
5922 	uint64_t *regarray, idx, src;
5923 
5924 	TRACE("[Thread %2u] regadd (r[m] += h)\n", p->thread_id);
5925 
5926 	/* Structs. */
5927 	regarray = instr_regarray_regarray(p, ip);
5928 	idx = instr_regarray_idx_hbo(p, t, ip);
5929 	src = instr_regarray_src_nbo(t, ip);
5930 	regarray[idx] += src;
5931 
5932 	/* Thread. */
5933 	thread_ip_inc(p);
5934 }
5935 
5936 static inline void
5937 instr_regadd_rmm_exec(struct rte_swx_pipeline *p)
5938 {
5939 	struct thread *t = &p->threads[p->thread_id];
5940 	struct instruction *ip = t->ip;
5941 	uint64_t *regarray, idx, src;
5942 
5943 	TRACE("[Thread %2u] regadd (r[m] += m)\n", p->thread_id);
5944 
5945 	/* Structs. */
5946 	regarray = instr_regarray_regarray(p, ip);
5947 	idx = instr_regarray_idx_hbo(p, t, ip);
5948 	src = instr_regarray_src_hbo(t, ip);
5949 	regarray[idx] += src;
5950 
5951 	/* Thread. */
5952 	thread_ip_inc(p);
5953 }
5954 
5955 static inline void
5956 instr_regadd_rhi_exec(struct rte_swx_pipeline *p)
5957 {
5958 	struct thread *t = &p->threads[p->thread_id];
5959 	struct instruction *ip = t->ip;
5960 	uint64_t *regarray, idx, src;
5961 
5962 	TRACE("[Thread %2u] regadd (r[h] += i)\n", p->thread_id);
5963 
5964 	/* Structs. */
5965 	regarray = instr_regarray_regarray(p, ip);
5966 	idx = instr_regarray_idx_nbo(p, t, ip);
5967 	src = ip->regarray.dstsrc_val;
5968 	regarray[idx] += src;
5969 
5970 	/* Thread. */
5971 	thread_ip_inc(p);
5972 }
5973 
5974 static inline void
5975 instr_regadd_rmi_exec(struct rte_swx_pipeline *p)
5976 {
5977 	struct thread *t = &p->threads[p->thread_id];
5978 	struct instruction *ip = t->ip;
5979 	uint64_t *regarray, idx, src;
5980 
5981 	TRACE("[Thread %2u] regadd (r[m] += i)\n", p->thread_id);
5982 
5983 	/* Structs. */
5984 	regarray = instr_regarray_regarray(p, ip);
5985 	idx = instr_regarray_idx_hbo(p, t, ip);
5986 	src = ip->regarray.dstsrc_val;
5987 	regarray[idx] += src;
5988 
5989 	/* Thread. */
5990 	thread_ip_inc(p);
5991 }
5992 
5993 static inline void
5994 instr_regadd_rih_exec(struct rte_swx_pipeline *p)
5995 {
5996 	struct thread *t = &p->threads[p->thread_id];
5997 	struct instruction *ip = t->ip;
5998 	uint64_t *regarray, idx, src;
5999 
6000 	TRACE("[Thread %2u] regadd (r[i] += h)\n", p->thread_id);
6001 
6002 	/* Structs. */
6003 	regarray = instr_regarray_regarray(p, ip);
6004 	idx = instr_regarray_idx_imm(p, ip);
6005 	src = instr_regarray_src_nbo(t, ip);
6006 	regarray[idx] += src;
6007 
6008 	/* Thread. */
6009 	thread_ip_inc(p);
6010 }
6011 
6012 static inline void
6013 instr_regadd_rim_exec(struct rte_swx_pipeline *p)
6014 {
6015 	struct thread *t = &p->threads[p->thread_id];
6016 	struct instruction *ip = t->ip;
6017 	uint64_t *regarray, idx, src;
6018 
6019 	TRACE("[Thread %2u] regadd (r[i] += m)\n", p->thread_id);
6020 
6021 	/* Structs. */
6022 	regarray = instr_regarray_regarray(p, ip);
6023 	idx = instr_regarray_idx_imm(p, ip);
6024 	src = instr_regarray_src_hbo(t, ip);
6025 	regarray[idx] += src;
6026 
6027 	/* Thread. */
6028 	thread_ip_inc(p);
6029 }
6030 
6031 static inline void
6032 instr_regadd_rii_exec(struct rte_swx_pipeline *p)
6033 {
6034 	struct thread *t = &p->threads[p->thread_id];
6035 	struct instruction *ip = t->ip;
6036 	uint64_t *regarray, idx, src;
6037 
6038 	TRACE("[Thread %2u] regadd (r[i] += i)\n", p->thread_id);
6039 
6040 	/* Structs. */
6041 	regarray = instr_regarray_regarray(p, ip);
6042 	idx = instr_regarray_idx_imm(p, ip);
6043 	src = ip->regarray.dstsrc_val;
6044 	regarray[idx] += src;
6045 
6046 	/* Thread. */
6047 	thread_ip_inc(p);
6048 }
6049 
6050 /*
6051  * metarray.
6052  */
6053 static struct metarray *
6054 metarray_find(struct rte_swx_pipeline *p, const char *name);
6055 
6056 static int
6057 instr_metprefetch_translate(struct rte_swx_pipeline *p,
6058 			    struct action *action,
6059 			    char **tokens,
6060 			    int n_tokens,
6061 			    struct instruction *instr,
6062 			    struct instruction_data *data __rte_unused)
6063 {
6064 	char *metarray = tokens[1], *idx = tokens[2];
6065 	struct metarray *m;
6066 	struct field *fidx;
6067 	uint32_t idx_struct_id, idx_val;
6068 
6069 	CHECK(n_tokens == 3, EINVAL);
6070 
6071 	m = metarray_find(p, metarray);
6072 	CHECK(m, EINVAL);
6073 
6074 	/* METPREFETCH_H, METPREFETCH_M. */
6075 	fidx = struct_field_parse(p, action, idx, &idx_struct_id);
6076 	if (fidx) {
6077 		instr->type = INSTR_METPREFETCH_M;
6078 		if (idx[0] == 'h')
6079 			instr->type = INSTR_METPREFETCH_H;
6080 
6081 		instr->meter.metarray_id = m->id;
6082 		instr->meter.idx.struct_id = (uint8_t)idx_struct_id;
6083 		instr->meter.idx.n_bits = fidx->n_bits;
6084 		instr->meter.idx.offset = fidx->offset / 8;
6085 		return 0;
6086 	}
6087 
6088 	/* METPREFETCH_I. */
6089 	idx_val = strtoul(idx, &idx, 0);
6090 	CHECK(!idx[0], EINVAL);
6091 
6092 	instr->type = INSTR_METPREFETCH_I;
6093 	instr->meter.metarray_id = m->id;
6094 	instr->meter.idx_val = idx_val;
6095 	return 0;
6096 }
6097 
6098 static int
6099 instr_meter_translate(struct rte_swx_pipeline *p,
6100 		      struct action *action,
6101 		      char **tokens,
6102 		      int n_tokens,
6103 		      struct instruction *instr,
6104 		      struct instruction_data *data __rte_unused)
6105 {
6106 	char *metarray = tokens[1], *idx = tokens[2], *length = tokens[3];
6107 	char *color_in = tokens[4], *color_out = tokens[5];
6108 	struct metarray *m;
6109 	struct field *fidx, *flength, *fcin, *fcout;
6110 	uint32_t idx_struct_id, length_struct_id;
6111 	uint32_t color_in_struct_id, color_out_struct_id;
6112 
6113 	CHECK(n_tokens == 6, EINVAL);
6114 
6115 	m = metarray_find(p, metarray);
6116 	CHECK(m, EINVAL);
6117 
6118 	fidx = struct_field_parse(p, action, idx, &idx_struct_id);
6119 
6120 	flength = struct_field_parse(p, action, length, &length_struct_id);
6121 	CHECK(flength, EINVAL);
6122 
6123 	fcin = struct_field_parse(p, action, color_in, &color_in_struct_id);
6124 
6125 	fcout = struct_field_parse(p, NULL, color_out, &color_out_struct_id);
6126 	CHECK(fcout, EINVAL);
6127 
6128 	/* index = HMEFT, length = HMEFT, color_in = MEFT, color_out = MEF. */
6129 	if (fidx && fcin) {
6130 		instr->type = INSTR_METER_MMM;
6131 		if (idx[0] == 'h' && length[0] == 'h')
6132 			instr->type = INSTR_METER_HHM;
6133 		if (idx[0] == 'h' && length[0] != 'h')
6134 			instr->type = INSTR_METER_HMM;
6135 		if (idx[0] != 'h' && length[0] == 'h')
6136 			instr->type = INSTR_METER_MHM;
6137 
6138 		instr->meter.metarray_id = m->id;
6139 
6140 		instr->meter.idx.struct_id = (uint8_t)idx_struct_id;
6141 		instr->meter.idx.n_bits = fidx->n_bits;
6142 		instr->meter.idx.offset = fidx->offset / 8;
6143 
6144 		instr->meter.length.struct_id = (uint8_t)length_struct_id;
6145 		instr->meter.length.n_bits = flength->n_bits;
6146 		instr->meter.length.offset = flength->offset / 8;
6147 
6148 		instr->meter.color_in.struct_id = (uint8_t)color_in_struct_id;
6149 		instr->meter.color_in.n_bits = fcin->n_bits;
6150 		instr->meter.color_in.offset = fcin->offset / 8;
6151 
6152 		instr->meter.color_out.struct_id = (uint8_t)color_out_struct_id;
6153 		instr->meter.color_out.n_bits = fcout->n_bits;
6154 		instr->meter.color_out.offset = fcout->offset / 8;
6155 
6156 		return 0;
6157 	}
6158 
6159 	/* index = HMEFT, length = HMEFT, color_in = I, color_out = MEF. */
6160 	if (fidx && !fcin) {
6161 		uint32_t color_in_val = strtoul(color_in, &color_in, 0);
6162 		CHECK(!color_in[0], EINVAL);
6163 
6164 		instr->type = INSTR_METER_MMI;
6165 		if (idx[0] == 'h' && length[0] == 'h')
6166 			instr->type = INSTR_METER_HHI;
6167 		if (idx[0] == 'h' && length[0] != 'h')
6168 			instr->type = INSTR_METER_HMI;
6169 		if (idx[0] != 'h' && length[0] == 'h')
6170 			instr->type = INSTR_METER_MHI;
6171 
6172 		instr->meter.metarray_id = m->id;
6173 
6174 		instr->meter.idx.struct_id = (uint8_t)idx_struct_id;
6175 		instr->meter.idx.n_bits = fidx->n_bits;
6176 		instr->meter.idx.offset = fidx->offset / 8;
6177 
6178 		instr->meter.length.struct_id = (uint8_t)length_struct_id;
6179 		instr->meter.length.n_bits = flength->n_bits;
6180 		instr->meter.length.offset = flength->offset / 8;
6181 
6182 		instr->meter.color_in_val = color_in_val;
6183 
6184 		instr->meter.color_out.struct_id = (uint8_t)color_out_struct_id;
6185 		instr->meter.color_out.n_bits = fcout->n_bits;
6186 		instr->meter.color_out.offset = fcout->offset / 8;
6187 
6188 		return 0;
6189 	}
6190 
6191 	/* index = I, length = HMEFT, color_in = MEFT, color_out = MEF. */
6192 	if (!fidx && fcin) {
6193 		uint32_t idx_val;
6194 
6195 		idx_val = strtoul(idx, &idx, 0);
6196 		CHECK(!idx[0], EINVAL);
6197 
6198 		instr->type = INSTR_METER_IMM;
6199 		if (length[0] == 'h')
6200 			instr->type = INSTR_METER_IHM;
6201 
6202 		instr->meter.metarray_id = m->id;
6203 
6204 		instr->meter.idx_val = idx_val;
6205 
6206 		instr->meter.length.struct_id = (uint8_t)length_struct_id;
6207 		instr->meter.length.n_bits = flength->n_bits;
6208 		instr->meter.length.offset = flength->offset / 8;
6209 
6210 		instr->meter.color_in.struct_id = (uint8_t)color_in_struct_id;
6211 		instr->meter.color_in.n_bits = fcin->n_bits;
6212 		instr->meter.color_in.offset = fcin->offset / 8;
6213 
6214 		instr->meter.color_out.struct_id = (uint8_t)color_out_struct_id;
6215 		instr->meter.color_out.n_bits = fcout->n_bits;
6216 		instr->meter.color_out.offset = fcout->offset / 8;
6217 
6218 		return 0;
6219 	}
6220 
6221 	/* index = I, length = HMEFT, color_in = I, color_out = MEF. */
6222 	if (!fidx && !fcin) {
6223 		uint32_t idx_val, color_in_val;
6224 
6225 		idx_val = strtoul(idx, &idx, 0);
6226 		CHECK(!idx[0], EINVAL);
6227 
6228 		color_in_val = strtoul(color_in, &color_in, 0);
6229 		CHECK(!color_in[0], EINVAL);
6230 
6231 		instr->type = INSTR_METER_IMI;
6232 		if (length[0] == 'h')
6233 			instr->type = INSTR_METER_IHI;
6234 
6235 		instr->meter.metarray_id = m->id;
6236 
6237 		instr->meter.idx_val = idx_val;
6238 
6239 		instr->meter.length.struct_id = (uint8_t)length_struct_id;
6240 		instr->meter.length.n_bits = flength->n_bits;
6241 		instr->meter.length.offset = flength->offset / 8;
6242 
6243 		instr->meter.color_in_val = color_in_val;
6244 
6245 		instr->meter.color_out.struct_id = (uint8_t)color_out_struct_id;
6246 		instr->meter.color_out.n_bits = fcout->n_bits;
6247 		instr->meter.color_out.offset = fcout->offset / 8;
6248 
6249 		return 0;
6250 	}
6251 
6252 	CHECK(0, EINVAL);
6253 }
6254 
6255 static inline struct meter *
6256 instr_meter_idx_hbo(struct rte_swx_pipeline *p, struct thread *t, struct instruction *ip)
6257 {
6258 	struct metarray_runtime *r = &p->metarray_runtime[ip->meter.metarray_id];
6259 
6260 	uint8_t *idx_struct = t->structs[ip->meter.idx.struct_id];
6261 	uint64_t *idx64_ptr = (uint64_t *)&idx_struct[ip->meter.idx.offset];
6262 	uint64_t idx64 = *idx64_ptr;
6263 	uint64_t idx64_mask = UINT64_MAX >> (64 - (ip)->meter.idx.n_bits);
6264 	uint64_t idx = idx64 & idx64_mask & r->size_mask;
6265 
6266 	return &r->metarray[idx];
6267 }
6268 
6269 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
6270 
6271 static inline struct meter *
6272 instr_meter_idx_nbo(struct rte_swx_pipeline *p, struct thread *t, struct instruction *ip)
6273 {
6274 	struct metarray_runtime *r = &p->metarray_runtime[ip->meter.metarray_id];
6275 
6276 	uint8_t *idx_struct = t->structs[ip->meter.idx.struct_id];
6277 	uint64_t *idx64_ptr = (uint64_t *)&idx_struct[ip->meter.idx.offset];
6278 	uint64_t idx64 = *idx64_ptr;
6279 	uint64_t idx = (ntoh64(idx64) >> (64 - ip->meter.idx.n_bits)) & r->size_mask;
6280 
6281 	return &r->metarray[idx];
6282 }
6283 
6284 #else
6285 
6286 #define instr_meter_idx_nbo instr_meter_idx_hbo
6287 
6288 #endif
6289 
6290 static inline struct meter *
6291 instr_meter_idx_imm(struct rte_swx_pipeline *p, struct instruction *ip)
6292 {
6293 	struct metarray_runtime *r = &p->metarray_runtime[ip->meter.metarray_id];
6294 
6295 	uint64_t idx =  ip->meter.idx_val & r->size_mask;
6296 
6297 	return &r->metarray[idx];
6298 }
6299 
6300 static inline uint32_t
6301 instr_meter_length_hbo(struct thread *t, struct instruction *ip)
6302 {
6303 	uint8_t *src_struct = t->structs[ip->meter.length.struct_id];
6304 	uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->meter.length.offset];
6305 	uint64_t src64 = *src64_ptr;
6306 	uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->meter.length.n_bits);
6307 	uint64_t src = src64 & src64_mask;
6308 
6309 	return (uint32_t)src;
6310 }
6311 
6312 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
6313 
6314 static inline uint32_t
6315 instr_meter_length_nbo(struct thread *t, struct instruction *ip)
6316 {
6317 	uint8_t *src_struct = t->structs[ip->meter.length.struct_id];
6318 	uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->meter.length.offset];
6319 	uint64_t src64 = *src64_ptr;
6320 	uint64_t src = ntoh64(src64) >> (64 - ip->meter.length.n_bits);
6321 
6322 	return (uint32_t)src;
6323 }
6324 
6325 #else
6326 
6327 #define instr_meter_length_nbo instr_meter_length_hbo
6328 
6329 #endif
6330 
6331 static inline enum rte_color
6332 instr_meter_color_in_hbo(struct thread *t, struct instruction *ip)
6333 {
6334 	uint8_t *src_struct = t->structs[ip->meter.color_in.struct_id];
6335 	uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->meter.color_in.offset];
6336 	uint64_t src64 = *src64_ptr;
6337 	uint64_t src64_mask = UINT64_MAX >> (64 - ip->meter.color_in.n_bits);
6338 	uint64_t src = src64 & src64_mask;
6339 
6340 	return (enum rte_color)src;
6341 }
6342 
6343 static inline void
6344 instr_meter_color_out_hbo_set(struct thread *t, struct instruction *ip, enum rte_color color_out)
6345 {
6346 	uint8_t *dst_struct = t->structs[ip->meter.color_out.struct_id];
6347 	uint64_t *dst64_ptr = (uint64_t *)&dst_struct[ip->meter.color_out.offset];
6348 	uint64_t dst64 = *dst64_ptr;
6349 	uint64_t dst64_mask = UINT64_MAX >> (64 - ip->meter.color_out.n_bits);
6350 
6351 	uint64_t src = (uint64_t)color_out;
6352 
6353 	*dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask);
6354 }
6355 
6356 static inline void
6357 instr_metprefetch_h_exec(struct rte_swx_pipeline *p)
6358 {
6359 	struct thread *t = &p->threads[p->thread_id];
6360 	struct instruction *ip = t->ip;
6361 	struct meter *m;
6362 
6363 	TRACE("[Thread %2u] metprefetch (h)\n", p->thread_id);
6364 
6365 	/* Structs. */
6366 	m = instr_meter_idx_nbo(p, t, ip);
6367 	rte_prefetch0(m);
6368 
6369 	/* Thread. */
6370 	thread_ip_inc(p);
6371 }
6372 
6373 static inline void
6374 instr_metprefetch_m_exec(struct rte_swx_pipeline *p)
6375 {
6376 	struct thread *t = &p->threads[p->thread_id];
6377 	struct instruction *ip = t->ip;
6378 	struct meter *m;
6379 
6380 	TRACE("[Thread %2u] metprefetch (m)\n", p->thread_id);
6381 
6382 	/* Structs. */
6383 	m = instr_meter_idx_hbo(p, t, ip);
6384 	rte_prefetch0(m);
6385 
6386 	/* Thread. */
6387 	thread_ip_inc(p);
6388 }
6389 
6390 static inline void
6391 instr_metprefetch_i_exec(struct rte_swx_pipeline *p)
6392 {
6393 	struct thread *t = &p->threads[p->thread_id];
6394 	struct instruction *ip = t->ip;
6395 	struct meter *m;
6396 
6397 	TRACE("[Thread %2u] metprefetch (i)\n", p->thread_id);
6398 
6399 	/* Structs. */
6400 	m = instr_meter_idx_imm(p, ip);
6401 	rte_prefetch0(m);
6402 
6403 	/* Thread. */
6404 	thread_ip_inc(p);
6405 }
6406 
6407 static inline void
6408 instr_meter_hhm_exec(struct rte_swx_pipeline *p)
6409 {
6410 	struct thread *t = &p->threads[p->thread_id];
6411 	struct instruction *ip = t->ip;
6412 	struct meter *m;
6413 	uint64_t time, n_pkts, n_bytes;
6414 	uint32_t length;
6415 	enum rte_color color_in, color_out;
6416 
6417 	TRACE("[Thread %2u] meter (hhm)\n", p->thread_id);
6418 
6419 	/* Structs. */
6420 	m = instr_meter_idx_nbo(p, t, ip);
6421 	rte_prefetch0(m->n_pkts);
6422 	time = rte_get_tsc_cycles();
6423 	length = instr_meter_length_nbo(t, ip);
6424 	color_in = instr_meter_color_in_hbo(t, ip);
6425 
6426 	color_out = rte_meter_trtcm_color_aware_check(&m->m,
6427 		&m->profile->profile,
6428 		time,
6429 		length,
6430 		color_in);
6431 
6432 	color_out &= m->color_mask;
6433 
6434 	n_pkts = m->n_pkts[color_out];
6435 	n_bytes = m->n_bytes[color_out];
6436 
6437 	instr_meter_color_out_hbo_set(t, ip, color_out);
6438 
6439 	m->n_pkts[color_out] = n_pkts + 1;
6440 	m->n_bytes[color_out] = n_bytes + length;
6441 
6442 	/* Thread. */
6443 	thread_ip_inc(p);
6444 }
6445 
6446 static inline void
6447 instr_meter_hhi_exec(struct rte_swx_pipeline *p)
6448 {
6449 	struct thread *t = &p->threads[p->thread_id];
6450 	struct instruction *ip = t->ip;
6451 	struct meter *m;
6452 	uint64_t time, n_pkts, n_bytes;
6453 	uint32_t length;
6454 	enum rte_color color_in, color_out;
6455 
6456 	TRACE("[Thread %2u] meter (hhi)\n", p->thread_id);
6457 
6458 	/* Structs. */
6459 	m = instr_meter_idx_nbo(p, t, ip);
6460 	rte_prefetch0(m->n_pkts);
6461 	time = rte_get_tsc_cycles();
6462 	length = instr_meter_length_nbo(t, ip);
6463 	color_in = (enum rte_color)ip->meter.color_in_val;
6464 
6465 	color_out = rte_meter_trtcm_color_aware_check(&m->m,
6466 		&m->profile->profile,
6467 		time,
6468 		length,
6469 		color_in);
6470 
6471 	color_out &= m->color_mask;
6472 
6473 	n_pkts = m->n_pkts[color_out];
6474 	n_bytes = m->n_bytes[color_out];
6475 
6476 	instr_meter_color_out_hbo_set(t, ip, color_out);
6477 
6478 	m->n_pkts[color_out] = n_pkts + 1;
6479 	m->n_bytes[color_out] = n_bytes + length;
6480 
6481 	/* Thread. */
6482 	thread_ip_inc(p);
6483 }
6484 
6485 static inline void
6486 instr_meter_hmm_exec(struct rte_swx_pipeline *p)
6487 {
6488 	struct thread *t = &p->threads[p->thread_id];
6489 	struct instruction *ip = t->ip;
6490 	struct meter *m;
6491 	uint64_t time, n_pkts, n_bytes;
6492 	uint32_t length;
6493 	enum rte_color color_in, color_out;
6494 
6495 	TRACE("[Thread %2u] meter (hmm)\n", p->thread_id);
6496 
6497 	/* Structs. */
6498 	m = instr_meter_idx_nbo(p, t, ip);
6499 	rte_prefetch0(m->n_pkts);
6500 	time = rte_get_tsc_cycles();
6501 	length = instr_meter_length_hbo(t, ip);
6502 	color_in = instr_meter_color_in_hbo(t, ip);
6503 
6504 	color_out = rte_meter_trtcm_color_aware_check(&m->m,
6505 		&m->profile->profile,
6506 		time,
6507 		length,
6508 		color_in);
6509 
6510 	color_out &= m->color_mask;
6511 
6512 	n_pkts = m->n_pkts[color_out];
6513 	n_bytes = m->n_bytes[color_out];
6514 
6515 	instr_meter_color_out_hbo_set(t, ip, color_out);
6516 
6517 	m->n_pkts[color_out] = n_pkts + 1;
6518 	m->n_bytes[color_out] = n_bytes + length;
6519 
6520 	/* Thread. */
6521 	thread_ip_inc(p);
6522 }
6523 static inline void
6524 instr_meter_hmi_exec(struct rte_swx_pipeline *p)
6525 {
6526 	struct thread *t = &p->threads[p->thread_id];
6527 	struct instruction *ip = t->ip;
6528 	struct meter *m;
6529 	uint64_t time, n_pkts, n_bytes;
6530 	uint32_t length;
6531 	enum rte_color color_in, color_out;
6532 
6533 	TRACE("[Thread %2u] meter (hmi)\n", p->thread_id);
6534 
6535 	/* Structs. */
6536 	m = instr_meter_idx_nbo(p, t, ip);
6537 	rte_prefetch0(m->n_pkts);
6538 	time = rte_get_tsc_cycles();
6539 	length = instr_meter_length_hbo(t, ip);
6540 	color_in = (enum rte_color)ip->meter.color_in_val;
6541 
6542 	color_out = rte_meter_trtcm_color_aware_check(&m->m,
6543 		&m->profile->profile,
6544 		time,
6545 		length,
6546 		color_in);
6547 
6548 	color_out &= m->color_mask;
6549 
6550 	n_pkts = m->n_pkts[color_out];
6551 	n_bytes = m->n_bytes[color_out];
6552 
6553 	instr_meter_color_out_hbo_set(t, ip, color_out);
6554 
6555 	m->n_pkts[color_out] = n_pkts + 1;
6556 	m->n_bytes[color_out] = n_bytes + length;
6557 
6558 	/* Thread. */
6559 	thread_ip_inc(p);
6560 }
6561 
6562 static inline void
6563 instr_meter_mhm_exec(struct rte_swx_pipeline *p)
6564 {
6565 	struct thread *t = &p->threads[p->thread_id];
6566 	struct instruction *ip = t->ip;
6567 	struct meter *m;
6568 	uint64_t time, n_pkts, n_bytes;
6569 	uint32_t length;
6570 	enum rte_color color_in, color_out;
6571 
6572 	TRACE("[Thread %2u] meter (mhm)\n", p->thread_id);
6573 
6574 	/* Structs. */
6575 	m = instr_meter_idx_hbo(p, t, ip);
6576 	rte_prefetch0(m->n_pkts);
6577 	time = rte_get_tsc_cycles();
6578 	length = instr_meter_length_nbo(t, ip);
6579 	color_in = instr_meter_color_in_hbo(t, ip);
6580 
6581 	color_out = rte_meter_trtcm_color_aware_check(&m->m,
6582 		&m->profile->profile,
6583 		time,
6584 		length,
6585 		color_in);
6586 
6587 	color_out &= m->color_mask;
6588 
6589 	n_pkts = m->n_pkts[color_out];
6590 	n_bytes = m->n_bytes[color_out];
6591 
6592 	instr_meter_color_out_hbo_set(t, ip, color_out);
6593 
6594 	m->n_pkts[color_out] = n_pkts + 1;
6595 	m->n_bytes[color_out] = n_bytes + length;
6596 
6597 	/* Thread. */
6598 	thread_ip_inc(p);
6599 }
6600 
6601 static inline void
6602 instr_meter_mhi_exec(struct rte_swx_pipeline *p)
6603 {
6604 	struct thread *t = &p->threads[p->thread_id];
6605 	struct instruction *ip = t->ip;
6606 	struct meter *m;
6607 	uint64_t time, n_pkts, n_bytes;
6608 	uint32_t length;
6609 	enum rte_color color_in, color_out;
6610 
6611 	TRACE("[Thread %2u] meter (mhi)\n", p->thread_id);
6612 
6613 	/* Structs. */
6614 	m = instr_meter_idx_hbo(p, t, ip);
6615 	rte_prefetch0(m->n_pkts);
6616 	time = rte_get_tsc_cycles();
6617 	length = instr_meter_length_nbo(t, ip);
6618 	color_in = (enum rte_color)ip->meter.color_in_val;
6619 
6620 	color_out = rte_meter_trtcm_color_aware_check(&m->m,
6621 		&m->profile->profile,
6622 		time,
6623 		length,
6624 		color_in);
6625 
6626 	color_out &= m->color_mask;
6627 
6628 	n_pkts = m->n_pkts[color_out];
6629 	n_bytes = m->n_bytes[color_out];
6630 
6631 	instr_meter_color_out_hbo_set(t, ip, color_out);
6632 
6633 	m->n_pkts[color_out] = n_pkts + 1;
6634 	m->n_bytes[color_out] = n_bytes + length;
6635 
6636 	/* Thread. */
6637 	thread_ip_inc(p);
6638 }
6639 
6640 static inline void
6641 instr_meter_mmm_exec(struct rte_swx_pipeline *p)
6642 {
6643 	struct thread *t = &p->threads[p->thread_id];
6644 	struct instruction *ip = t->ip;
6645 	struct meter *m;
6646 	uint64_t time, n_pkts, n_bytes;
6647 	uint32_t length;
6648 	enum rte_color color_in, color_out;
6649 
6650 	TRACE("[Thread %2u] meter (mmm)\n", p->thread_id);
6651 
6652 	/* Structs. */
6653 	m = instr_meter_idx_hbo(p, t, ip);
6654 	rte_prefetch0(m->n_pkts);
6655 	time = rte_get_tsc_cycles();
6656 	length = instr_meter_length_hbo(t, ip);
6657 	color_in = instr_meter_color_in_hbo(t, ip);
6658 
6659 	color_out = rte_meter_trtcm_color_aware_check(&m->m,
6660 		&m->profile->profile,
6661 		time,
6662 		length,
6663 		color_in);
6664 
6665 	color_out &= m->color_mask;
6666 
6667 	n_pkts = m->n_pkts[color_out];
6668 	n_bytes = m->n_bytes[color_out];
6669 
6670 	instr_meter_color_out_hbo_set(t, ip, color_out);
6671 
6672 	m->n_pkts[color_out] = n_pkts + 1;
6673 	m->n_bytes[color_out] = n_bytes + length;
6674 
6675 	/* Thread. */
6676 	thread_ip_inc(p);
6677 }
6678 
6679 static inline void
6680 instr_meter_mmi_exec(struct rte_swx_pipeline *p)
6681 {
6682 	struct thread *t = &p->threads[p->thread_id];
6683 	struct instruction *ip = t->ip;
6684 	struct meter *m;
6685 	uint64_t time, n_pkts, n_bytes;
6686 	uint32_t length;
6687 	enum rte_color color_in, color_out;
6688 
6689 	TRACE("[Thread %2u] meter (mmi)\n", p->thread_id);
6690 
6691 	/* Structs. */
6692 	m = instr_meter_idx_hbo(p, t, ip);
6693 	rte_prefetch0(m->n_pkts);
6694 	time = rte_get_tsc_cycles();
6695 	length = instr_meter_length_hbo(t, ip);
6696 	color_in = (enum rte_color)ip->meter.color_in_val;
6697 
6698 	color_out = rte_meter_trtcm_color_aware_check(&m->m,
6699 		&m->profile->profile,
6700 		time,
6701 		length,
6702 		color_in);
6703 
6704 	color_out &= m->color_mask;
6705 
6706 	n_pkts = m->n_pkts[color_out];
6707 	n_bytes = m->n_bytes[color_out];
6708 
6709 	instr_meter_color_out_hbo_set(t, ip, color_out);
6710 
6711 	m->n_pkts[color_out] = n_pkts + 1;
6712 	m->n_bytes[color_out] = n_bytes + length;
6713 
6714 	/* Thread. */
6715 	thread_ip_inc(p);
6716 }
6717 
6718 static inline void
6719 instr_meter_ihm_exec(struct rte_swx_pipeline *p)
6720 {
6721 	struct thread *t = &p->threads[p->thread_id];
6722 	struct instruction *ip = t->ip;
6723 	struct meter *m;
6724 	uint64_t time, n_pkts, n_bytes;
6725 	uint32_t length;
6726 	enum rte_color color_in, color_out;
6727 
6728 	TRACE("[Thread %2u] meter (ihm)\n", p->thread_id);
6729 
6730 	/* Structs. */
6731 	m = instr_meter_idx_imm(p, ip);
6732 	rte_prefetch0(m->n_pkts);
6733 	time = rte_get_tsc_cycles();
6734 	length = instr_meter_length_nbo(t, ip);
6735 	color_in = instr_meter_color_in_hbo(t, ip);
6736 
6737 	color_out = rte_meter_trtcm_color_aware_check(&m->m,
6738 		&m->profile->profile,
6739 		time,
6740 		length,
6741 		color_in);
6742 
6743 	color_out &= m->color_mask;
6744 
6745 	n_pkts = m->n_pkts[color_out];
6746 	n_bytes = m->n_bytes[color_out];
6747 
6748 	instr_meter_color_out_hbo_set(t, ip, color_out);
6749 
6750 	m->n_pkts[color_out] = n_pkts + 1;
6751 	m->n_bytes[color_out] = n_bytes + length;
6752 
6753 	/* Thread. */
6754 	thread_ip_inc(p);
6755 }
6756 
6757 static inline void
6758 instr_meter_ihi_exec(struct rte_swx_pipeline *p)
6759 {
6760 	struct thread *t = &p->threads[p->thread_id];
6761 	struct instruction *ip = t->ip;
6762 	struct meter *m;
6763 	uint64_t time, n_pkts, n_bytes;
6764 	uint32_t length;
6765 	enum rte_color color_in, color_out;
6766 
6767 	TRACE("[Thread %2u] meter (ihi)\n", p->thread_id);
6768 
6769 	/* Structs. */
6770 	m = instr_meter_idx_imm(p, ip);
6771 	rte_prefetch0(m->n_pkts);
6772 	time = rte_get_tsc_cycles();
6773 	length = instr_meter_length_nbo(t, ip);
6774 	color_in = (enum rte_color)ip->meter.color_in_val;
6775 
6776 	color_out = rte_meter_trtcm_color_aware_check(&m->m,
6777 		&m->profile->profile,
6778 		time,
6779 		length,
6780 		color_in);
6781 
6782 	color_out &= m->color_mask;
6783 
6784 	n_pkts = m->n_pkts[color_out];
6785 	n_bytes = m->n_bytes[color_out];
6786 
6787 	instr_meter_color_out_hbo_set(t, ip, color_out);
6788 
6789 	m->n_pkts[color_out] = n_pkts + 1;
6790 	m->n_bytes[color_out] = n_bytes + length;
6791 
6792 	/* Thread. */
6793 	thread_ip_inc(p);
6794 }
6795 
6796 static inline void
6797 instr_meter_imm_exec(struct rte_swx_pipeline *p)
6798 {
6799 	struct thread *t = &p->threads[p->thread_id];
6800 	struct instruction *ip = t->ip;
6801 	struct meter *m;
6802 	uint64_t time, n_pkts, n_bytes;
6803 	uint32_t length;
6804 	enum rte_color color_in, color_out;
6805 
6806 	TRACE("[Thread %2u] meter (imm)\n", p->thread_id);
6807 
6808 	/* Structs. */
6809 	m = instr_meter_idx_imm(p, ip);
6810 	rte_prefetch0(m->n_pkts);
6811 	time = rte_get_tsc_cycles();
6812 	length = instr_meter_length_hbo(t, ip);
6813 	color_in = instr_meter_color_in_hbo(t, ip);
6814 
6815 	color_out = rte_meter_trtcm_color_aware_check(&m->m,
6816 		&m->profile->profile,
6817 		time,
6818 		length,
6819 		color_in);
6820 
6821 	color_out &= m->color_mask;
6822 
6823 	n_pkts = m->n_pkts[color_out];
6824 	n_bytes = m->n_bytes[color_out];
6825 
6826 	instr_meter_color_out_hbo_set(t, ip, color_out);
6827 
6828 	m->n_pkts[color_out] = n_pkts + 1;
6829 	m->n_bytes[color_out] = n_bytes + length;
6830 
6831 	/* Thread. */
6832 	thread_ip_inc(p);
6833 }
6834 static inline void
6835 instr_meter_imi_exec(struct rte_swx_pipeline *p)
6836 {
6837 	struct thread *t = &p->threads[p->thread_id];
6838 	struct instruction *ip = t->ip;
6839 	struct meter *m;
6840 	uint64_t time, n_pkts, n_bytes;
6841 	uint32_t length;
6842 	enum rte_color color_in, color_out;
6843 
6844 	TRACE("[Thread %2u] meter (imi)\n", p->thread_id);
6845 
6846 	/* Structs. */
6847 	m = instr_meter_idx_imm(p, ip);
6848 	rte_prefetch0(m->n_pkts);
6849 	time = rte_get_tsc_cycles();
6850 	length = instr_meter_length_hbo(t, ip);
6851 	color_in = (enum rte_color)ip->meter.color_in_val;
6852 
6853 	color_out = rte_meter_trtcm_color_aware_check(&m->m,
6854 		&m->profile->profile,
6855 		time,
6856 		length,
6857 		color_in);
6858 
6859 	color_out &= m->color_mask;
6860 
6861 	n_pkts = m->n_pkts[color_out];
6862 	n_bytes = m->n_bytes[color_out];
6863 
6864 	instr_meter_color_out_hbo_set(t, ip, color_out);
6865 
6866 	m->n_pkts[color_out] = n_pkts + 1;
6867 	m->n_bytes[color_out] = n_bytes + length;
6868 
6869 	/* Thread. */
6870 	thread_ip_inc(p);
6871 }
6872 
6873 /*
6874  * jmp.
6875  */
6876 static struct action *
6877 action_find(struct rte_swx_pipeline *p, const char *name);
6878 
6879 static int
6880 instr_jmp_translate(struct rte_swx_pipeline *p __rte_unused,
6881 		    struct action *action __rte_unused,
6882 		    char **tokens,
6883 		    int n_tokens,
6884 		    struct instruction *instr,
6885 		    struct instruction_data *data)
6886 {
6887 	CHECK(n_tokens == 2, EINVAL);
6888 
6889 	strcpy(data->jmp_label, tokens[1]);
6890 
6891 	instr->type = INSTR_JMP;
6892 	instr->jmp.ip = NULL; /* Resolved later. */
6893 	return 0;
6894 }
6895 
6896 static int
6897 instr_jmp_valid_translate(struct rte_swx_pipeline *p,
6898 			  struct action *action __rte_unused,
6899 			  char **tokens,
6900 			  int n_tokens,
6901 			  struct instruction *instr,
6902 			  struct instruction_data *data)
6903 {
6904 	struct header *h;
6905 
6906 	CHECK(n_tokens == 3, EINVAL);
6907 
6908 	strcpy(data->jmp_label, tokens[1]);
6909 
6910 	h = header_parse(p, tokens[2]);
6911 	CHECK(h, EINVAL);
6912 
6913 	instr->type = INSTR_JMP_VALID;
6914 	instr->jmp.ip = NULL; /* Resolved later. */
6915 	instr->jmp.header_id = h->id;
6916 	return 0;
6917 }
6918 
6919 static int
6920 instr_jmp_invalid_translate(struct rte_swx_pipeline *p,
6921 			    struct action *action __rte_unused,
6922 			    char **tokens,
6923 			    int n_tokens,
6924 			    struct instruction *instr,
6925 			    struct instruction_data *data)
6926 {
6927 	struct header *h;
6928 
6929 	CHECK(n_tokens == 3, EINVAL);
6930 
6931 	strcpy(data->jmp_label, tokens[1]);
6932 
6933 	h = header_parse(p, tokens[2]);
6934 	CHECK(h, EINVAL);
6935 
6936 	instr->type = INSTR_JMP_INVALID;
6937 	instr->jmp.ip = NULL; /* Resolved later. */
6938 	instr->jmp.header_id = h->id;
6939 	return 0;
6940 }
6941 
6942 static int
6943 instr_jmp_hit_translate(struct rte_swx_pipeline *p __rte_unused,
6944 			struct action *action,
6945 			char **tokens,
6946 			int n_tokens,
6947 			struct instruction *instr,
6948 			struct instruction_data *data)
6949 {
6950 	CHECK(!action, EINVAL);
6951 	CHECK(n_tokens == 2, EINVAL);
6952 
6953 	strcpy(data->jmp_label, tokens[1]);
6954 
6955 	instr->type = INSTR_JMP_HIT;
6956 	instr->jmp.ip = NULL; /* Resolved later. */
6957 	return 0;
6958 }
6959 
6960 static int
6961 instr_jmp_miss_translate(struct rte_swx_pipeline *p __rte_unused,
6962 			 struct action *action,
6963 			 char **tokens,
6964 			 int n_tokens,
6965 			 struct instruction *instr,
6966 			 struct instruction_data *data)
6967 {
6968 	CHECK(!action, EINVAL);
6969 	CHECK(n_tokens == 2, EINVAL);
6970 
6971 	strcpy(data->jmp_label, tokens[1]);
6972 
6973 	instr->type = INSTR_JMP_MISS;
6974 	instr->jmp.ip = NULL; /* Resolved later. */
6975 	return 0;
6976 }
6977 
6978 static int
6979 instr_jmp_action_hit_translate(struct rte_swx_pipeline *p,
6980 			       struct action *action,
6981 			       char **tokens,
6982 			       int n_tokens,
6983 			       struct instruction *instr,
6984 			       struct instruction_data *data)
6985 {
6986 	struct action *a;
6987 
6988 	CHECK(!action, EINVAL);
6989 	CHECK(n_tokens == 3, EINVAL);
6990 
6991 	strcpy(data->jmp_label, tokens[1]);
6992 
6993 	a = action_find(p, tokens[2]);
6994 	CHECK(a, EINVAL);
6995 
6996 	instr->type = INSTR_JMP_ACTION_HIT;
6997 	instr->jmp.ip = NULL; /* Resolved later. */
6998 	instr->jmp.action_id = a->id;
6999 	return 0;
7000 }
7001 
7002 static int
7003 instr_jmp_action_miss_translate(struct rte_swx_pipeline *p,
7004 				struct action *action,
7005 				char **tokens,
7006 				int n_tokens,
7007 				struct instruction *instr,
7008 				struct instruction_data *data)
7009 {
7010 	struct action *a;
7011 
7012 	CHECK(!action, EINVAL);
7013 	CHECK(n_tokens == 3, EINVAL);
7014 
7015 	strcpy(data->jmp_label, tokens[1]);
7016 
7017 	a = action_find(p, tokens[2]);
7018 	CHECK(a, EINVAL);
7019 
7020 	instr->type = INSTR_JMP_ACTION_MISS;
7021 	instr->jmp.ip = NULL; /* Resolved later. */
7022 	instr->jmp.action_id = a->id;
7023 	return 0;
7024 }
7025 
7026 static int
7027 instr_jmp_eq_translate(struct rte_swx_pipeline *p,
7028 		       struct action *action,
7029 		       char **tokens,
7030 		       int n_tokens,
7031 		       struct instruction *instr,
7032 		       struct instruction_data *data)
7033 {
7034 	char *a = tokens[2], *b = tokens[3];
7035 	struct field *fa, *fb;
7036 	uint64_t b_val;
7037 	uint32_t a_struct_id, b_struct_id;
7038 
7039 	CHECK(n_tokens == 4, EINVAL);
7040 
7041 	strcpy(data->jmp_label, tokens[1]);
7042 
7043 	fa = struct_field_parse(p, action, a, &a_struct_id);
7044 	CHECK(fa, EINVAL);
7045 
7046 	/* JMP_EQ, JMP_EQ_MH, JMP_EQ_HM, JMP_EQ_HH. */
7047 	fb = struct_field_parse(p, action, b, &b_struct_id);
7048 	if (fb) {
7049 		instr->type = INSTR_JMP_EQ;
7050 		if (a[0] != 'h' && b[0] == 'h')
7051 			instr->type = INSTR_JMP_EQ_MH;
7052 		if (a[0] == 'h' && b[0] != 'h')
7053 			instr->type = INSTR_JMP_EQ_HM;
7054 		if (a[0] == 'h' && b[0] == 'h')
7055 			instr->type = INSTR_JMP_EQ_HH;
7056 		instr->jmp.ip = NULL; /* Resolved later. */
7057 
7058 		instr->jmp.a.struct_id = (uint8_t)a_struct_id;
7059 		instr->jmp.a.n_bits = fa->n_bits;
7060 		instr->jmp.a.offset = fa->offset / 8;
7061 		instr->jmp.b.struct_id = (uint8_t)b_struct_id;
7062 		instr->jmp.b.n_bits = fb->n_bits;
7063 		instr->jmp.b.offset = fb->offset / 8;
7064 		return 0;
7065 	}
7066 
7067 	/* JMP_EQ_I. */
7068 	b_val = strtoull(b, &b, 0);
7069 	CHECK(!b[0], EINVAL);
7070 
7071 	if (a[0] == 'h')
7072 		b_val = hton64(b_val) >> (64 - fa->n_bits);
7073 
7074 	instr->type = INSTR_JMP_EQ_I;
7075 	instr->jmp.ip = NULL; /* Resolved later. */
7076 	instr->jmp.a.struct_id = (uint8_t)a_struct_id;
7077 	instr->jmp.a.n_bits = fa->n_bits;
7078 	instr->jmp.a.offset = fa->offset / 8;
7079 	instr->jmp.b_val = b_val;
7080 	return 0;
7081 }
7082 
7083 static int
7084 instr_jmp_neq_translate(struct rte_swx_pipeline *p,
7085 			struct action *action,
7086 			char **tokens,
7087 			int n_tokens,
7088 			struct instruction *instr,
7089 			struct instruction_data *data)
7090 {
7091 	char *a = tokens[2], *b = tokens[3];
7092 	struct field *fa, *fb;
7093 	uint64_t b_val;
7094 	uint32_t a_struct_id, b_struct_id;
7095 
7096 	CHECK(n_tokens == 4, EINVAL);
7097 
7098 	strcpy(data->jmp_label, tokens[1]);
7099 
7100 	fa = struct_field_parse(p, action, a, &a_struct_id);
7101 	CHECK(fa, EINVAL);
7102 
7103 	/* JMP_NEQ, JMP_NEQ_MH, JMP_NEQ_HM, JMP_NEQ_HH. */
7104 	fb = struct_field_parse(p, action, b, &b_struct_id);
7105 	if (fb) {
7106 		instr->type = INSTR_JMP_NEQ;
7107 		if (a[0] != 'h' && b[0] == 'h')
7108 			instr->type = INSTR_JMP_NEQ_MH;
7109 		if (a[0] == 'h' && b[0] != 'h')
7110 			instr->type = INSTR_JMP_NEQ_HM;
7111 		if (a[0] == 'h' && b[0] == 'h')
7112 			instr->type = INSTR_JMP_NEQ_HH;
7113 		instr->jmp.ip = NULL; /* Resolved later. */
7114 
7115 		instr->jmp.a.struct_id = (uint8_t)a_struct_id;
7116 		instr->jmp.a.n_bits = fa->n_bits;
7117 		instr->jmp.a.offset = fa->offset / 8;
7118 		instr->jmp.b.struct_id = (uint8_t)b_struct_id;
7119 		instr->jmp.b.n_bits = fb->n_bits;
7120 		instr->jmp.b.offset = fb->offset / 8;
7121 		return 0;
7122 	}
7123 
7124 	/* JMP_NEQ_I. */
7125 	b_val = strtoull(b, &b, 0);
7126 	CHECK(!b[0], EINVAL);
7127 
7128 	if (a[0] == 'h')
7129 		b_val = hton64(b_val) >> (64 - fa->n_bits);
7130 
7131 	instr->type = INSTR_JMP_NEQ_I;
7132 	instr->jmp.ip = NULL; /* Resolved later. */
7133 	instr->jmp.a.struct_id = (uint8_t)a_struct_id;
7134 	instr->jmp.a.n_bits = fa->n_bits;
7135 	instr->jmp.a.offset = fa->offset / 8;
7136 	instr->jmp.b_val = b_val;
7137 	return 0;
7138 }
7139 
7140 static int
7141 instr_jmp_lt_translate(struct rte_swx_pipeline *p,
7142 		       struct action *action,
7143 		       char **tokens,
7144 		       int n_tokens,
7145 		       struct instruction *instr,
7146 		       struct instruction_data *data)
7147 {
7148 	char *a = tokens[2], *b = tokens[3];
7149 	struct field *fa, *fb;
7150 	uint64_t b_val;
7151 	uint32_t a_struct_id, b_struct_id;
7152 
7153 	CHECK(n_tokens == 4, EINVAL);
7154 
7155 	strcpy(data->jmp_label, tokens[1]);
7156 
7157 	fa = struct_field_parse(p, action, a, &a_struct_id);
7158 	CHECK(fa, EINVAL);
7159 
7160 	/* JMP_LT, JMP_LT_MH, JMP_LT_HM, JMP_LT_HH. */
7161 	fb = struct_field_parse(p, action, b, &b_struct_id);
7162 	if (fb) {
7163 		instr->type = INSTR_JMP_LT;
7164 		if (a[0] == 'h' && b[0] != 'h')
7165 			instr->type = INSTR_JMP_LT_HM;
7166 		if (a[0] != 'h' && b[0] == 'h')
7167 			instr->type = INSTR_JMP_LT_MH;
7168 		if (a[0] == 'h' && b[0] == 'h')
7169 			instr->type = INSTR_JMP_LT_HH;
7170 		instr->jmp.ip = NULL; /* Resolved later. */
7171 
7172 		instr->jmp.a.struct_id = (uint8_t)a_struct_id;
7173 		instr->jmp.a.n_bits = fa->n_bits;
7174 		instr->jmp.a.offset = fa->offset / 8;
7175 		instr->jmp.b.struct_id = (uint8_t)b_struct_id;
7176 		instr->jmp.b.n_bits = fb->n_bits;
7177 		instr->jmp.b.offset = fb->offset / 8;
7178 		return 0;
7179 	}
7180 
7181 	/* JMP_LT_MI, JMP_LT_HI. */
7182 	b_val = strtoull(b, &b, 0);
7183 	CHECK(!b[0], EINVAL);
7184 
7185 	instr->type = INSTR_JMP_LT_MI;
7186 	if (a[0] == 'h')
7187 		instr->type = INSTR_JMP_LT_HI;
7188 	instr->jmp.ip = NULL; /* Resolved later. */
7189 
7190 	instr->jmp.a.struct_id = (uint8_t)a_struct_id;
7191 	instr->jmp.a.n_bits = fa->n_bits;
7192 	instr->jmp.a.offset = fa->offset / 8;
7193 	instr->jmp.b_val = b_val;
7194 	return 0;
7195 }
7196 
7197 static int
7198 instr_jmp_gt_translate(struct rte_swx_pipeline *p,
7199 		       struct action *action,
7200 		       char **tokens,
7201 		       int n_tokens,
7202 		       struct instruction *instr,
7203 		       struct instruction_data *data)
7204 {
7205 	char *a = tokens[2], *b = tokens[3];
7206 	struct field *fa, *fb;
7207 	uint64_t b_val;
7208 	uint32_t a_struct_id, b_struct_id;
7209 
7210 	CHECK(n_tokens == 4, EINVAL);
7211 
7212 	strcpy(data->jmp_label, tokens[1]);
7213 
7214 	fa = struct_field_parse(p, action, a, &a_struct_id);
7215 	CHECK(fa, EINVAL);
7216 
7217 	/* JMP_GT, JMP_GT_MH, JMP_GT_HM, JMP_GT_HH. */
7218 	fb = struct_field_parse(p, action, b, &b_struct_id);
7219 	if (fb) {
7220 		instr->type = INSTR_JMP_GT;
7221 		if (a[0] == 'h' && b[0] != 'h')
7222 			instr->type = INSTR_JMP_GT_HM;
7223 		if (a[0] != 'h' && b[0] == 'h')
7224 			instr->type = INSTR_JMP_GT_MH;
7225 		if (a[0] == 'h' && b[0] == 'h')
7226 			instr->type = INSTR_JMP_GT_HH;
7227 		instr->jmp.ip = NULL; /* Resolved later. */
7228 
7229 		instr->jmp.a.struct_id = (uint8_t)a_struct_id;
7230 		instr->jmp.a.n_bits = fa->n_bits;
7231 		instr->jmp.a.offset = fa->offset / 8;
7232 		instr->jmp.b.struct_id = (uint8_t)b_struct_id;
7233 		instr->jmp.b.n_bits = fb->n_bits;
7234 		instr->jmp.b.offset = fb->offset / 8;
7235 		return 0;
7236 	}
7237 
7238 	/* JMP_GT_MI, JMP_GT_HI. */
7239 	b_val = strtoull(b, &b, 0);
7240 	CHECK(!b[0], EINVAL);
7241 
7242 	instr->type = INSTR_JMP_GT_MI;
7243 	if (a[0] == 'h')
7244 		instr->type = INSTR_JMP_GT_HI;
7245 	instr->jmp.ip = NULL; /* Resolved later. */
7246 
7247 	instr->jmp.a.struct_id = (uint8_t)a_struct_id;
7248 	instr->jmp.a.n_bits = fa->n_bits;
7249 	instr->jmp.a.offset = fa->offset / 8;
7250 	instr->jmp.b_val = b_val;
7251 	return 0;
7252 }
7253 
7254 static inline void
7255 instr_jmp_exec(struct rte_swx_pipeline *p)
7256 {
7257 	struct thread *t = &p->threads[p->thread_id];
7258 	struct instruction *ip = t->ip;
7259 
7260 	TRACE("[Thread %2u] jmp\n", p->thread_id);
7261 
7262 	thread_ip_set(t, ip->jmp.ip);
7263 }
7264 
7265 static inline void
7266 instr_jmp_valid_exec(struct rte_swx_pipeline *p)
7267 {
7268 	struct thread *t = &p->threads[p->thread_id];
7269 	struct instruction *ip = t->ip;
7270 	uint32_t header_id = ip->jmp.header_id;
7271 
7272 	TRACE("[Thread %2u] jmpv\n", p->thread_id);
7273 
7274 	t->ip = HEADER_VALID(t, header_id) ? ip->jmp.ip : (t->ip + 1);
7275 }
7276 
7277 static inline void
7278 instr_jmp_invalid_exec(struct rte_swx_pipeline *p)
7279 {
7280 	struct thread *t = &p->threads[p->thread_id];
7281 	struct instruction *ip = t->ip;
7282 	uint32_t header_id = ip->jmp.header_id;
7283 
7284 	TRACE("[Thread %2u] jmpnv\n", p->thread_id);
7285 
7286 	t->ip = HEADER_VALID(t, header_id) ? (t->ip + 1) : ip->jmp.ip;
7287 }
7288 
7289 static inline void
7290 instr_jmp_hit_exec(struct rte_swx_pipeline *p)
7291 {
7292 	struct thread *t = &p->threads[p->thread_id];
7293 	struct instruction *ip = t->ip;
7294 	struct instruction *ip_next[] = {t->ip + 1, ip->jmp.ip};
7295 
7296 	TRACE("[Thread %2u] jmph\n", p->thread_id);
7297 
7298 	t->ip = ip_next[t->hit];
7299 }
7300 
7301 static inline void
7302 instr_jmp_miss_exec(struct rte_swx_pipeline *p)
7303 {
7304 	struct thread *t = &p->threads[p->thread_id];
7305 	struct instruction *ip = t->ip;
7306 	struct instruction *ip_next[] = {ip->jmp.ip, t->ip + 1};
7307 
7308 	TRACE("[Thread %2u] jmpnh\n", p->thread_id);
7309 
7310 	t->ip = ip_next[t->hit];
7311 }
7312 
7313 static inline void
7314 instr_jmp_action_hit_exec(struct rte_swx_pipeline *p)
7315 {
7316 	struct thread *t = &p->threads[p->thread_id];
7317 	struct instruction *ip = t->ip;
7318 
7319 	TRACE("[Thread %2u] jmpa\n", p->thread_id);
7320 
7321 	t->ip = (ip->jmp.action_id == t->action_id) ? ip->jmp.ip : (t->ip + 1);
7322 }
7323 
7324 static inline void
7325 instr_jmp_action_miss_exec(struct rte_swx_pipeline *p)
7326 {
7327 	struct thread *t = &p->threads[p->thread_id];
7328 	struct instruction *ip = t->ip;
7329 
7330 	TRACE("[Thread %2u] jmpna\n", p->thread_id);
7331 
7332 	t->ip = (ip->jmp.action_id == t->action_id) ? (t->ip + 1) : ip->jmp.ip;
7333 }
7334 
7335 static inline void
7336 instr_jmp_eq_exec(struct rte_swx_pipeline *p)
7337 {
7338 	struct thread *t = &p->threads[p->thread_id];
7339 	struct instruction *ip = t->ip;
7340 
7341 	TRACE("[Thread %2u] jmpeq\n", p->thread_id);
7342 
7343 	JMP_CMP(t, ip, ==);
7344 }
7345 
7346 static inline void
7347 instr_jmp_eq_mh_exec(struct rte_swx_pipeline *p)
7348 {
7349 	struct thread *t = &p->threads[p->thread_id];
7350 	struct instruction *ip = t->ip;
7351 
7352 	TRACE("[Thread %2u] jmpeq (mh)\n", p->thread_id);
7353 
7354 	JMP_CMP_MH(t, ip, ==);
7355 }
7356 
7357 static inline void
7358 instr_jmp_eq_hm_exec(struct rte_swx_pipeline *p)
7359 {
7360 	struct thread *t = &p->threads[p->thread_id];
7361 	struct instruction *ip = t->ip;
7362 
7363 	TRACE("[Thread %2u] jmpeq (hm)\n", p->thread_id);
7364 
7365 	JMP_CMP_HM(t, ip, ==);
7366 }
7367 
7368 static inline void
7369 instr_jmp_eq_hh_exec(struct rte_swx_pipeline *p)
7370 {
7371 	struct thread *t = &p->threads[p->thread_id];
7372 	struct instruction *ip = t->ip;
7373 
7374 	TRACE("[Thread %2u] jmpeq (hh)\n", p->thread_id);
7375 
7376 	JMP_CMP_HH_FAST(t, ip, ==);
7377 }
7378 
7379 static inline void
7380 instr_jmp_eq_i_exec(struct rte_swx_pipeline *p)
7381 {
7382 	struct thread *t = &p->threads[p->thread_id];
7383 	struct instruction *ip = t->ip;
7384 
7385 	TRACE("[Thread %2u] jmpeq (i)\n", p->thread_id);
7386 
7387 	JMP_CMP_I(t, ip, ==);
7388 }
7389 
7390 static inline void
7391 instr_jmp_neq_exec(struct rte_swx_pipeline *p)
7392 {
7393 	struct thread *t = &p->threads[p->thread_id];
7394 	struct instruction *ip = t->ip;
7395 
7396 	TRACE("[Thread %2u] jmpneq\n", p->thread_id);
7397 
7398 	JMP_CMP(t, ip, !=);
7399 }
7400 
7401 static inline void
7402 instr_jmp_neq_mh_exec(struct rte_swx_pipeline *p)
7403 {
7404 	struct thread *t = &p->threads[p->thread_id];
7405 	struct instruction *ip = t->ip;
7406 
7407 	TRACE("[Thread %2u] jmpneq (mh)\n", p->thread_id);
7408 
7409 	JMP_CMP_MH(t, ip, !=);
7410 }
7411 
7412 static inline void
7413 instr_jmp_neq_hm_exec(struct rte_swx_pipeline *p)
7414 {
7415 	struct thread *t = &p->threads[p->thread_id];
7416 	struct instruction *ip = t->ip;
7417 
7418 	TRACE("[Thread %2u] jmpneq (hm)\n", p->thread_id);
7419 
7420 	JMP_CMP_HM(t, ip, !=);
7421 }
7422 
7423 static inline void
7424 instr_jmp_neq_hh_exec(struct rte_swx_pipeline *p)
7425 {
7426 	struct thread *t = &p->threads[p->thread_id];
7427 	struct instruction *ip = t->ip;
7428 
7429 	TRACE("[Thread %2u] jmpneq (hh)\n", p->thread_id);
7430 
7431 	JMP_CMP_HH_FAST(t, ip, !=);
7432 }
7433 
7434 static inline void
7435 instr_jmp_neq_i_exec(struct rte_swx_pipeline *p)
7436 {
7437 	struct thread *t = &p->threads[p->thread_id];
7438 	struct instruction *ip = t->ip;
7439 
7440 	TRACE("[Thread %2u] jmpneq (i)\n", p->thread_id);
7441 
7442 	JMP_CMP_I(t, ip, !=);
7443 }
7444 
7445 static inline void
7446 instr_jmp_lt_exec(struct rte_swx_pipeline *p)
7447 {
7448 	struct thread *t = &p->threads[p->thread_id];
7449 	struct instruction *ip = t->ip;
7450 
7451 	TRACE("[Thread %2u] jmplt\n", p->thread_id);
7452 
7453 	JMP_CMP(t, ip, <);
7454 }
7455 
7456 static inline void
7457 instr_jmp_lt_mh_exec(struct rte_swx_pipeline *p)
7458 {
7459 	struct thread *t = &p->threads[p->thread_id];
7460 	struct instruction *ip = t->ip;
7461 
7462 	TRACE("[Thread %2u] jmplt (mh)\n", p->thread_id);
7463 
7464 	JMP_CMP_MH(t, ip, <);
7465 }
7466 
7467 static inline void
7468 instr_jmp_lt_hm_exec(struct rte_swx_pipeline *p)
7469 {
7470 	struct thread *t = &p->threads[p->thread_id];
7471 	struct instruction *ip = t->ip;
7472 
7473 	TRACE("[Thread %2u] jmplt (hm)\n", p->thread_id);
7474 
7475 	JMP_CMP_HM(t, ip, <);
7476 }
7477 
7478 static inline void
7479 instr_jmp_lt_hh_exec(struct rte_swx_pipeline *p)
7480 {
7481 	struct thread *t = &p->threads[p->thread_id];
7482 	struct instruction *ip = t->ip;
7483 
7484 	TRACE("[Thread %2u] jmplt (hh)\n", p->thread_id);
7485 
7486 	JMP_CMP_HH(t, ip, <);
7487 }
7488 
7489 static inline void
7490 instr_jmp_lt_mi_exec(struct rte_swx_pipeline *p)
7491 {
7492 	struct thread *t = &p->threads[p->thread_id];
7493 	struct instruction *ip = t->ip;
7494 
7495 	TRACE("[Thread %2u] jmplt (mi)\n", p->thread_id);
7496 
7497 	JMP_CMP_MI(t, ip, <);
7498 }
7499 
7500 static inline void
7501 instr_jmp_lt_hi_exec(struct rte_swx_pipeline *p)
7502 {
7503 	struct thread *t = &p->threads[p->thread_id];
7504 	struct instruction *ip = t->ip;
7505 
7506 	TRACE("[Thread %2u] jmplt (hi)\n", p->thread_id);
7507 
7508 	JMP_CMP_HI(t, ip, <);
7509 }
7510 
7511 static inline void
7512 instr_jmp_gt_exec(struct rte_swx_pipeline *p)
7513 {
7514 	struct thread *t = &p->threads[p->thread_id];
7515 	struct instruction *ip = t->ip;
7516 
7517 	TRACE("[Thread %2u] jmpgt\n", p->thread_id);
7518 
7519 	JMP_CMP(t, ip, >);
7520 }
7521 
7522 static inline void
7523 instr_jmp_gt_mh_exec(struct rte_swx_pipeline *p)
7524 {
7525 	struct thread *t = &p->threads[p->thread_id];
7526 	struct instruction *ip = t->ip;
7527 
7528 	TRACE("[Thread %2u] jmpgt (mh)\n", p->thread_id);
7529 
7530 	JMP_CMP_MH(t, ip, >);
7531 }
7532 
7533 static inline void
7534 instr_jmp_gt_hm_exec(struct rte_swx_pipeline *p)
7535 {
7536 	struct thread *t = &p->threads[p->thread_id];
7537 	struct instruction *ip = t->ip;
7538 
7539 	TRACE("[Thread %2u] jmpgt (hm)\n", p->thread_id);
7540 
7541 	JMP_CMP_HM(t, ip, >);
7542 }
7543 
7544 static inline void
7545 instr_jmp_gt_hh_exec(struct rte_swx_pipeline *p)
7546 {
7547 	struct thread *t = &p->threads[p->thread_id];
7548 	struct instruction *ip = t->ip;
7549 
7550 	TRACE("[Thread %2u] jmpgt (hh)\n", p->thread_id);
7551 
7552 	JMP_CMP_HH(t, ip, >);
7553 }
7554 
7555 static inline void
7556 instr_jmp_gt_mi_exec(struct rte_swx_pipeline *p)
7557 {
7558 	struct thread *t = &p->threads[p->thread_id];
7559 	struct instruction *ip = t->ip;
7560 
7561 	TRACE("[Thread %2u] jmpgt (mi)\n", p->thread_id);
7562 
7563 	JMP_CMP_MI(t, ip, >);
7564 }
7565 
7566 static inline void
7567 instr_jmp_gt_hi_exec(struct rte_swx_pipeline *p)
7568 {
7569 	struct thread *t = &p->threads[p->thread_id];
7570 	struct instruction *ip = t->ip;
7571 
7572 	TRACE("[Thread %2u] jmpgt (hi)\n", p->thread_id);
7573 
7574 	JMP_CMP_HI(t, ip, >);
7575 }
7576 
7577 /*
7578  * return.
7579  */
7580 static int
7581 instr_return_translate(struct rte_swx_pipeline *p __rte_unused,
7582 		       struct action *action,
7583 		       char **tokens __rte_unused,
7584 		       int n_tokens,
7585 		       struct instruction *instr,
7586 		       struct instruction_data *data __rte_unused)
7587 {
7588 	CHECK(action, EINVAL);
7589 	CHECK(n_tokens == 1, EINVAL);
7590 
7591 	instr->type = INSTR_RETURN;
7592 	return 0;
7593 }
7594 
7595 static inline void
7596 instr_return_exec(struct rte_swx_pipeline *p)
7597 {
7598 	struct thread *t = &p->threads[p->thread_id];
7599 
7600 	TRACE("[Thread %2u] return\n", p->thread_id);
7601 
7602 	t->ip = t->ret;
7603 }
7604 
7605 static int
7606 instr_translate(struct rte_swx_pipeline *p,
7607 		struct action *action,
7608 		char *string,
7609 		struct instruction *instr,
7610 		struct instruction_data *data)
7611 {
7612 	char *tokens[RTE_SWX_INSTRUCTION_TOKENS_MAX];
7613 	int n_tokens = 0, tpos = 0;
7614 
7615 	/* Parse the instruction string into tokens. */
7616 	for ( ; ; ) {
7617 		char *token;
7618 
7619 		token = strtok_r(string, " \t\v", &string);
7620 		if (!token)
7621 			break;
7622 
7623 		CHECK(n_tokens < RTE_SWX_INSTRUCTION_TOKENS_MAX, EINVAL);
7624 		CHECK_NAME(token, EINVAL);
7625 
7626 		tokens[n_tokens] = token;
7627 		n_tokens++;
7628 	}
7629 
7630 	CHECK(n_tokens, EINVAL);
7631 
7632 	/* Handle the optional instruction label. */
7633 	if ((n_tokens >= 2) && !strcmp(tokens[1], ":")) {
7634 		strcpy(data->label, tokens[0]);
7635 
7636 		tpos += 2;
7637 		CHECK(n_tokens - tpos, EINVAL);
7638 	}
7639 
7640 	/* Identify the instruction type. */
7641 	if (!strcmp(tokens[tpos], "rx"))
7642 		return instr_rx_translate(p,
7643 					  action,
7644 					  &tokens[tpos],
7645 					  n_tokens - tpos,
7646 					  instr,
7647 					  data);
7648 
7649 	if (!strcmp(tokens[tpos], "tx"))
7650 		return instr_tx_translate(p,
7651 					  action,
7652 					  &tokens[tpos],
7653 					  n_tokens - tpos,
7654 					  instr,
7655 					  data);
7656 
7657 	if (!strcmp(tokens[tpos], "drop"))
7658 		return instr_drop_translate(p,
7659 					    action,
7660 					    &tokens[tpos],
7661 					    n_tokens - tpos,
7662 					    instr,
7663 					    data);
7664 
7665 	if (!strcmp(tokens[tpos], "extract"))
7666 		return instr_hdr_extract_translate(p,
7667 						   action,
7668 						   &tokens[tpos],
7669 						   n_tokens - tpos,
7670 						   instr,
7671 						   data);
7672 
7673 	if (!strcmp(tokens[tpos], "emit"))
7674 		return instr_hdr_emit_translate(p,
7675 						action,
7676 						&tokens[tpos],
7677 						n_tokens - tpos,
7678 						instr,
7679 						data);
7680 
7681 	if (!strcmp(tokens[tpos], "validate"))
7682 		return instr_hdr_validate_translate(p,
7683 						    action,
7684 						    &tokens[tpos],
7685 						    n_tokens - tpos,
7686 						    instr,
7687 						    data);
7688 
7689 	if (!strcmp(tokens[tpos], "invalidate"))
7690 		return instr_hdr_invalidate_translate(p,
7691 						      action,
7692 						      &tokens[tpos],
7693 						      n_tokens - tpos,
7694 						      instr,
7695 						      data);
7696 
7697 	if (!strcmp(tokens[tpos], "mov"))
7698 		return instr_mov_translate(p,
7699 					   action,
7700 					   &tokens[tpos],
7701 					   n_tokens - tpos,
7702 					   instr,
7703 					   data);
7704 
7705 	if (!strcmp(tokens[tpos], "add"))
7706 		return instr_alu_add_translate(p,
7707 					       action,
7708 					       &tokens[tpos],
7709 					       n_tokens - tpos,
7710 					       instr,
7711 					       data);
7712 
7713 	if (!strcmp(tokens[tpos], "sub"))
7714 		return instr_alu_sub_translate(p,
7715 					       action,
7716 					       &tokens[tpos],
7717 					       n_tokens - tpos,
7718 					       instr,
7719 					       data);
7720 
7721 	if (!strcmp(tokens[tpos], "ckadd"))
7722 		return instr_alu_ckadd_translate(p,
7723 						 action,
7724 						 &tokens[tpos],
7725 						 n_tokens - tpos,
7726 						 instr,
7727 						 data);
7728 
7729 	if (!strcmp(tokens[tpos], "cksub"))
7730 		return instr_alu_cksub_translate(p,
7731 						 action,
7732 						 &tokens[tpos],
7733 						 n_tokens - tpos,
7734 						 instr,
7735 						 data);
7736 
7737 	if (!strcmp(tokens[tpos], "and"))
7738 		return instr_alu_and_translate(p,
7739 					       action,
7740 					       &tokens[tpos],
7741 					       n_tokens - tpos,
7742 					       instr,
7743 					       data);
7744 
7745 	if (!strcmp(tokens[tpos], "or"))
7746 		return instr_alu_or_translate(p,
7747 					      action,
7748 					      &tokens[tpos],
7749 					      n_tokens - tpos,
7750 					      instr,
7751 					      data);
7752 
7753 	if (!strcmp(tokens[tpos], "xor"))
7754 		return instr_alu_xor_translate(p,
7755 					       action,
7756 					       &tokens[tpos],
7757 					       n_tokens - tpos,
7758 					       instr,
7759 					       data);
7760 
7761 	if (!strcmp(tokens[tpos], "shl"))
7762 		return instr_alu_shl_translate(p,
7763 					       action,
7764 					       &tokens[tpos],
7765 					       n_tokens - tpos,
7766 					       instr,
7767 					       data);
7768 
7769 	if (!strcmp(tokens[tpos], "shr"))
7770 		return instr_alu_shr_translate(p,
7771 					       action,
7772 					       &tokens[tpos],
7773 					       n_tokens - tpos,
7774 					       instr,
7775 					       data);
7776 
7777 	if (!strcmp(tokens[tpos], "regprefetch"))
7778 		return instr_regprefetch_translate(p,
7779 						   action,
7780 						   &tokens[tpos],
7781 						   n_tokens - tpos,
7782 						   instr,
7783 						   data);
7784 
7785 	if (!strcmp(tokens[tpos], "regrd"))
7786 		return instr_regrd_translate(p,
7787 					     action,
7788 					     &tokens[tpos],
7789 					     n_tokens - tpos,
7790 					     instr,
7791 					     data);
7792 
7793 	if (!strcmp(tokens[tpos], "regwr"))
7794 		return instr_regwr_translate(p,
7795 					     action,
7796 					     &tokens[tpos],
7797 					     n_tokens - tpos,
7798 					     instr,
7799 					     data);
7800 
7801 	if (!strcmp(tokens[tpos], "regadd"))
7802 		return instr_regadd_translate(p,
7803 					      action,
7804 					      &tokens[tpos],
7805 					      n_tokens - tpos,
7806 					      instr,
7807 					      data);
7808 
7809 	if (!strcmp(tokens[tpos], "metprefetch"))
7810 		return instr_metprefetch_translate(p,
7811 						   action,
7812 						   &tokens[tpos],
7813 						   n_tokens - tpos,
7814 						   instr,
7815 						   data);
7816 
7817 	if (!strcmp(tokens[tpos], "meter"))
7818 		return instr_meter_translate(p,
7819 					     action,
7820 					     &tokens[tpos],
7821 					     n_tokens - tpos,
7822 					     instr,
7823 					     data);
7824 
7825 	if (!strcmp(tokens[tpos], "table"))
7826 		return instr_table_translate(p,
7827 					     action,
7828 					     &tokens[tpos],
7829 					     n_tokens - tpos,
7830 					     instr,
7831 					     data);
7832 
7833 	if (!strcmp(tokens[tpos], "extern"))
7834 		return instr_extern_translate(p,
7835 					      action,
7836 					      &tokens[tpos],
7837 					      n_tokens - tpos,
7838 					      instr,
7839 					      data);
7840 
7841 	if (!strcmp(tokens[tpos], "jmp"))
7842 		return instr_jmp_translate(p,
7843 					   action,
7844 					   &tokens[tpos],
7845 					   n_tokens - tpos,
7846 					   instr,
7847 					   data);
7848 
7849 	if (!strcmp(tokens[tpos], "jmpv"))
7850 		return instr_jmp_valid_translate(p,
7851 						 action,
7852 						 &tokens[tpos],
7853 						 n_tokens - tpos,
7854 						 instr,
7855 						 data);
7856 
7857 	if (!strcmp(tokens[tpos], "jmpnv"))
7858 		return instr_jmp_invalid_translate(p,
7859 						   action,
7860 						   &tokens[tpos],
7861 						   n_tokens - tpos,
7862 						   instr,
7863 						   data);
7864 
7865 	if (!strcmp(tokens[tpos], "jmph"))
7866 		return instr_jmp_hit_translate(p,
7867 					       action,
7868 					       &tokens[tpos],
7869 					       n_tokens - tpos,
7870 					       instr,
7871 					       data);
7872 
7873 	if (!strcmp(tokens[tpos], "jmpnh"))
7874 		return instr_jmp_miss_translate(p,
7875 						action,
7876 						&tokens[tpos],
7877 						n_tokens - tpos,
7878 						instr,
7879 						data);
7880 
7881 	if (!strcmp(tokens[tpos], "jmpa"))
7882 		return instr_jmp_action_hit_translate(p,
7883 						      action,
7884 						      &tokens[tpos],
7885 						      n_tokens - tpos,
7886 						      instr,
7887 						      data);
7888 
7889 	if (!strcmp(tokens[tpos], "jmpna"))
7890 		return instr_jmp_action_miss_translate(p,
7891 						       action,
7892 						       &tokens[tpos],
7893 						       n_tokens - tpos,
7894 						       instr,
7895 						       data);
7896 
7897 	if (!strcmp(tokens[tpos], "jmpeq"))
7898 		return instr_jmp_eq_translate(p,
7899 					      action,
7900 					      &tokens[tpos],
7901 					      n_tokens - tpos,
7902 					      instr,
7903 					      data);
7904 
7905 	if (!strcmp(tokens[tpos], "jmpneq"))
7906 		return instr_jmp_neq_translate(p,
7907 					       action,
7908 					       &tokens[tpos],
7909 					       n_tokens - tpos,
7910 					       instr,
7911 					       data);
7912 
7913 	if (!strcmp(tokens[tpos], "jmplt"))
7914 		return instr_jmp_lt_translate(p,
7915 					      action,
7916 					      &tokens[tpos],
7917 					      n_tokens - tpos,
7918 					      instr,
7919 					      data);
7920 
7921 	if (!strcmp(tokens[tpos], "jmpgt"))
7922 		return instr_jmp_gt_translate(p,
7923 					      action,
7924 					      &tokens[tpos],
7925 					      n_tokens - tpos,
7926 					      instr,
7927 					      data);
7928 
7929 	if (!strcmp(tokens[tpos], "return"))
7930 		return instr_return_translate(p,
7931 					      action,
7932 					      &tokens[tpos],
7933 					      n_tokens - tpos,
7934 					      instr,
7935 					      data);
7936 
7937 	CHECK(0, EINVAL);
7938 }
7939 
7940 static struct instruction_data *
7941 label_find(struct instruction_data *data, uint32_t n, const char *label)
7942 {
7943 	uint32_t i;
7944 
7945 	for (i = 0; i < n; i++)
7946 		if (!strcmp(label, data[i].label))
7947 			return &data[i];
7948 
7949 	return NULL;
7950 }
7951 
7952 static uint32_t
7953 label_is_used(struct instruction_data *data, uint32_t n, const char *label)
7954 {
7955 	uint32_t count = 0, i;
7956 
7957 	if (!label[0])
7958 		return 0;
7959 
7960 	for (i = 0; i < n; i++)
7961 		if (!strcmp(label, data[i].jmp_label))
7962 			count++;
7963 
7964 	return count;
7965 }
7966 
7967 static int
7968 instr_label_check(struct instruction_data *instruction_data,
7969 		  uint32_t n_instructions)
7970 {
7971 	uint32_t i;
7972 
7973 	/* Check that all instruction labels are unique. */
7974 	for (i = 0; i < n_instructions; i++) {
7975 		struct instruction_data *data = &instruction_data[i];
7976 		char *label = data->label;
7977 		uint32_t j;
7978 
7979 		if (!label[0])
7980 			continue;
7981 
7982 		for (j = i + 1; j < n_instructions; j++)
7983 			CHECK(strcmp(label, data[j].label), EINVAL);
7984 	}
7985 
7986 	/* Get users for each instruction label. */
7987 	for (i = 0; i < n_instructions; i++) {
7988 		struct instruction_data *data = &instruction_data[i];
7989 		char *label = data->label;
7990 
7991 		data->n_users = label_is_used(instruction_data,
7992 					      n_instructions,
7993 					      label);
7994 	}
7995 
7996 	return 0;
7997 }
7998 
7999 static int
8000 instr_jmp_resolve(struct instruction *instructions,
8001 		  struct instruction_data *instruction_data,
8002 		  uint32_t n_instructions)
8003 {
8004 	uint32_t i;
8005 
8006 	for (i = 0; i < n_instructions; i++) {
8007 		struct instruction *instr = &instructions[i];
8008 		struct instruction_data *data = &instruction_data[i];
8009 		struct instruction_data *found;
8010 
8011 		if (!instruction_is_jmp(instr))
8012 			continue;
8013 
8014 		found = label_find(instruction_data,
8015 				   n_instructions,
8016 				   data->jmp_label);
8017 		CHECK(found, EINVAL);
8018 
8019 		instr->jmp.ip = &instructions[found - instruction_data];
8020 	}
8021 
8022 	return 0;
8023 }
8024 
8025 static int
8026 instr_verify(struct rte_swx_pipeline *p __rte_unused,
8027 	     struct action *a,
8028 	     struct instruction *instr,
8029 	     struct instruction_data *data __rte_unused,
8030 	     uint32_t n_instructions)
8031 {
8032 	if (!a) {
8033 		enum instruction_type type;
8034 		uint32_t i;
8035 
8036 		/* Check that the first instruction is rx. */
8037 		CHECK(instr[0].type == INSTR_RX, EINVAL);
8038 
8039 		/* Check that there is at least one tx instruction. */
8040 		for (i = 0; i < n_instructions; i++) {
8041 			type = instr[i].type;
8042 
8043 			if (instruction_is_tx(type))
8044 				break;
8045 		}
8046 		CHECK(i < n_instructions, EINVAL);
8047 
8048 		/* Check that the last instruction is either tx or unconditional
8049 		 * jump.
8050 		 */
8051 		type = instr[n_instructions - 1].type;
8052 		CHECK(instruction_is_tx(type) || (type == INSTR_JMP), EINVAL);
8053 	}
8054 
8055 	if (a) {
8056 		enum instruction_type type;
8057 		uint32_t i;
8058 
8059 		/* Check that there is at least one return or tx instruction. */
8060 		for (i = 0; i < n_instructions; i++) {
8061 			type = instr[i].type;
8062 
8063 			if ((type == INSTR_RETURN) || instruction_is_tx(type))
8064 				break;
8065 		}
8066 		CHECK(i < n_instructions, EINVAL);
8067 	}
8068 
8069 	return 0;
8070 }
8071 
8072 static uint32_t
8073 instr_compact(struct instruction *instructions,
8074 	      struct instruction_data *instruction_data,
8075 	      uint32_t n_instructions)
8076 {
8077 	uint32_t i, pos = 0;
8078 
8079 	/* Eliminate the invalid instructions that have been optimized out. */
8080 	for (i = 0; i < n_instructions; i++) {
8081 		struct instruction *instr = &instructions[i];
8082 		struct instruction_data *data = &instruction_data[i];
8083 
8084 		if (data->invalid)
8085 			continue;
8086 
8087 		if (i != pos) {
8088 			memcpy(&instructions[pos], instr, sizeof(*instr));
8089 			memcpy(&instruction_data[pos], data, sizeof(*data));
8090 		}
8091 
8092 		pos++;
8093 	}
8094 
8095 	return pos;
8096 }
8097 
8098 static int
8099 instr_pattern_extract_many_search(struct instruction *instr,
8100 				  struct instruction_data *data,
8101 				  uint32_t n_instr,
8102 				  uint32_t *n_pattern_instr)
8103 {
8104 	uint32_t i;
8105 
8106 	for (i = 0; i < n_instr; i++) {
8107 		if (data[i].invalid)
8108 			break;
8109 
8110 		if (instr[i].type != INSTR_HDR_EXTRACT)
8111 			break;
8112 
8113 		if (i == RTE_DIM(instr->io.hdr.header_id))
8114 			break;
8115 
8116 		if (i && data[i].n_users)
8117 			break;
8118 	}
8119 
8120 	if (i < 2)
8121 		return 0;
8122 
8123 	*n_pattern_instr = i;
8124 	return 1;
8125 }
8126 
8127 static void
8128 instr_pattern_extract_many_replace(struct instruction *instr,
8129 				   struct instruction_data *data,
8130 				   uint32_t n_instr)
8131 {
8132 	uint32_t i;
8133 
8134 	for (i = 1; i < n_instr; i++) {
8135 		instr[0].type++;
8136 		instr[0].io.hdr.header_id[i] = instr[i].io.hdr.header_id[0];
8137 		instr[0].io.hdr.struct_id[i] = instr[i].io.hdr.struct_id[0];
8138 		instr[0].io.hdr.n_bytes[i] = instr[i].io.hdr.n_bytes[0];
8139 
8140 		data[i].invalid = 1;
8141 	}
8142 }
8143 
8144 static uint32_t
8145 instr_pattern_extract_many_optimize(struct instruction *instructions,
8146 				    struct instruction_data *instruction_data,
8147 				    uint32_t n_instructions)
8148 {
8149 	uint32_t i;
8150 
8151 	for (i = 0; i < n_instructions; ) {
8152 		struct instruction *instr = &instructions[i];
8153 		struct instruction_data *data = &instruction_data[i];
8154 		uint32_t n_instr = 0;
8155 		int detected;
8156 
8157 		/* Extract many. */
8158 		detected = instr_pattern_extract_many_search(instr,
8159 							     data,
8160 							     n_instructions - i,
8161 							     &n_instr);
8162 		if (detected) {
8163 			instr_pattern_extract_many_replace(instr,
8164 							   data,
8165 							   n_instr);
8166 			i += n_instr;
8167 			continue;
8168 		}
8169 
8170 		/* No pattern starting at the current instruction. */
8171 		i++;
8172 	}
8173 
8174 	/* Eliminate the invalid instructions that have been optimized out. */
8175 	n_instructions = instr_compact(instructions,
8176 				       instruction_data,
8177 				       n_instructions);
8178 
8179 	return n_instructions;
8180 }
8181 
8182 static int
8183 instr_pattern_emit_many_tx_search(struct instruction *instr,
8184 				  struct instruction_data *data,
8185 				  uint32_t n_instr,
8186 				  uint32_t *n_pattern_instr)
8187 {
8188 	uint32_t i;
8189 
8190 	for (i = 0; i < n_instr; i++) {
8191 		if (data[i].invalid)
8192 			break;
8193 
8194 		if (instr[i].type != INSTR_HDR_EMIT)
8195 			break;
8196 
8197 		if (i == RTE_DIM(instr->io.hdr.header_id))
8198 			break;
8199 
8200 		if (i && data[i].n_users)
8201 			break;
8202 	}
8203 
8204 	if (!i)
8205 		return 0;
8206 
8207 	if (!instruction_is_tx(instr[i].type))
8208 		return 0;
8209 
8210 	if (data[i].n_users)
8211 		return 0;
8212 
8213 	i++;
8214 
8215 	*n_pattern_instr = i;
8216 	return 1;
8217 }
8218 
8219 static void
8220 instr_pattern_emit_many_tx_replace(struct instruction *instr,
8221 				   struct instruction_data *data,
8222 				   uint32_t n_instr)
8223 {
8224 	uint32_t i;
8225 
8226 	/* Any emit instruction in addition to the first one. */
8227 	for (i = 1; i < n_instr - 1; i++) {
8228 		instr[0].type++;
8229 		instr[0].io.hdr.header_id[i] = instr[i].io.hdr.header_id[0];
8230 		instr[0].io.hdr.struct_id[i] = instr[i].io.hdr.struct_id[0];
8231 		instr[0].io.hdr.n_bytes[i] = instr[i].io.hdr.n_bytes[0];
8232 
8233 		data[i].invalid = 1;
8234 	}
8235 
8236 	/* The TX instruction is the last one in the pattern. */
8237 	instr[0].type++;
8238 	instr[0].io.io.offset = instr[i].io.io.offset;
8239 	instr[0].io.io.n_bits = instr[i].io.io.n_bits;
8240 	data[i].invalid = 1;
8241 }
8242 
8243 static uint32_t
8244 instr_pattern_emit_many_tx_optimize(struct instruction *instructions,
8245 				    struct instruction_data *instruction_data,
8246 				    uint32_t n_instructions)
8247 {
8248 	uint32_t i;
8249 
8250 	for (i = 0; i < n_instructions; ) {
8251 		struct instruction *instr = &instructions[i];
8252 		struct instruction_data *data = &instruction_data[i];
8253 		uint32_t n_instr = 0;
8254 		int detected;
8255 
8256 		/* Emit many + TX. */
8257 		detected = instr_pattern_emit_many_tx_search(instr,
8258 							     data,
8259 							     n_instructions - i,
8260 							     &n_instr);
8261 		if (detected) {
8262 			instr_pattern_emit_many_tx_replace(instr,
8263 							   data,
8264 							   n_instr);
8265 			i += n_instr;
8266 			continue;
8267 		}
8268 
8269 		/* No pattern starting at the current instruction. */
8270 		i++;
8271 	}
8272 
8273 	/* Eliminate the invalid instructions that have been optimized out. */
8274 	n_instructions = instr_compact(instructions,
8275 				       instruction_data,
8276 				       n_instructions);
8277 
8278 	return n_instructions;
8279 }
8280 
8281 static uint32_t
8282 action_arg_src_mov_count(struct action *a,
8283 			 uint32_t arg_id,
8284 			 struct instruction *instructions,
8285 			 struct instruction_data *instruction_data,
8286 			 uint32_t n_instructions);
8287 
8288 static int
8289 instr_pattern_mov_all_validate_search(struct rte_swx_pipeline *p,
8290 				      struct action *a,
8291 				      struct instruction *instr,
8292 				      struct instruction_data *data,
8293 				      uint32_t n_instr,
8294 				      struct instruction *instructions,
8295 				      struct instruction_data *instruction_data,
8296 				      uint32_t n_instructions,
8297 				      uint32_t *n_pattern_instr)
8298 {
8299 	struct header *h;
8300 	uint32_t src_field_id, i, j;
8301 
8302 	/* Prerequisites. */
8303 	if (!a || !a->st)
8304 		return 0;
8305 
8306 	/* First instruction: MOV_HM. */
8307 	if (data[0].invalid || (instr[0].type != INSTR_MOV_HM))
8308 		return 0;
8309 
8310 	h = header_find_by_struct_id(p, instr[0].mov.dst.struct_id);
8311 	if (!h)
8312 		return 0;
8313 
8314 	for (src_field_id = 0; src_field_id < a->st->n_fields; src_field_id++)
8315 		if (instr[0].mov.src.offset == a->st->fields[src_field_id].offset / 8)
8316 			break;
8317 
8318 	if (src_field_id == a->st->n_fields)
8319 		return 0;
8320 
8321 	if (instr[0].mov.dst.offset ||
8322 	    (instr[0].mov.dst.n_bits != h->st->fields[0].n_bits) ||
8323 	    instr[0].mov.src.struct_id ||
8324 	    (instr[0].mov.src.n_bits != a->st->fields[src_field_id].n_bits) ||
8325 	    (instr[0].mov.dst.n_bits != instr[0].mov.src.n_bits))
8326 		return 0;
8327 
8328 	if ((n_instr < h->st->n_fields + 1) ||
8329 	     (a->st->n_fields < src_field_id + h->st->n_fields + 1))
8330 		return 0;
8331 
8332 	/* Subsequent instructions: MOV_HM. */
8333 	for (i = 1; i < h->st->n_fields; i++)
8334 		if (data[i].invalid ||
8335 		    data[i].n_users ||
8336 		    (instr[i].type != INSTR_MOV_HM) ||
8337 		    (instr[i].mov.dst.struct_id != h->struct_id) ||
8338 		    (instr[i].mov.dst.offset != h->st->fields[i].offset / 8) ||
8339 		    (instr[i].mov.dst.n_bits != h->st->fields[i].n_bits) ||
8340 		    instr[i].mov.src.struct_id ||
8341 		    (instr[i].mov.src.offset != a->st->fields[src_field_id + i].offset / 8) ||
8342 		    (instr[i].mov.src.n_bits != a->st->fields[src_field_id + i].n_bits) ||
8343 		    (instr[i].mov.dst.n_bits != instr[i].mov.src.n_bits))
8344 			return 0;
8345 
8346 	/* Last instruction: HDR_VALIDATE. */
8347 	if ((instr[i].type != INSTR_HDR_VALIDATE) ||
8348 	    (instr[i].valid.header_id != h->id))
8349 		return 0;
8350 
8351 	/* Check that none of the action args that are used as source for this
8352 	 * DMA transfer are not used as source in any other mov instruction.
8353 	 */
8354 	for (j = src_field_id; j < src_field_id + h->st->n_fields; j++) {
8355 		uint32_t n_users;
8356 
8357 		n_users = action_arg_src_mov_count(a,
8358 						   j,
8359 						   instructions,
8360 						   instruction_data,
8361 						   n_instructions);
8362 		if (n_users > 1)
8363 			return 0;
8364 	}
8365 
8366 	*n_pattern_instr = 1 + i;
8367 	return 1;
8368 }
8369 
8370 static void
8371 instr_pattern_mov_all_validate_replace(struct rte_swx_pipeline *p,
8372 				       struct action *a,
8373 				       struct instruction *instr,
8374 				       struct instruction_data *data,
8375 				       uint32_t n_instr)
8376 {
8377 	struct header *h;
8378 	uint32_t src_field_id, src_offset, i;
8379 
8380 	/* Read from the instructions before they are modified. */
8381 	h = header_find_by_struct_id(p, instr[0].mov.dst.struct_id);
8382 	if (!h)
8383 		return;
8384 
8385 	for (src_field_id = 0; src_field_id < a->st->n_fields; src_field_id++)
8386 		if (instr[0].mov.src.offset == a->st->fields[src_field_id].offset / 8)
8387 			break;
8388 
8389 	if (src_field_id == a->st->n_fields)
8390 		return;
8391 
8392 	src_offset = instr[0].mov.src.offset;
8393 
8394 	/* Modify the instructions. */
8395 	instr[0].type = INSTR_DMA_HT;
8396 	instr[0].dma.dst.header_id[0] = h->id;
8397 	instr[0].dma.dst.struct_id[0] = h->struct_id;
8398 	instr[0].dma.src.offset[0] = (uint8_t)src_offset;
8399 	instr[0].dma.n_bytes[0] = h->st->n_bits / 8;
8400 
8401 	for (i = 1; i < n_instr; i++)
8402 		data[i].invalid = 1;
8403 
8404 	/* Update the endianness of the action arguments to header endianness. */
8405 	for (i = 0; i < h->st->n_fields; i++)
8406 		a->args_endianness[src_field_id + i] = 1;
8407 }
8408 
8409 static uint32_t
8410 instr_pattern_mov_all_validate_optimize(struct rte_swx_pipeline *p,
8411 					struct action *a,
8412 					struct instruction *instructions,
8413 					struct instruction_data *instruction_data,
8414 					uint32_t n_instructions)
8415 {
8416 	uint32_t i;
8417 
8418 	if (!a || !a->st)
8419 		return n_instructions;
8420 
8421 	for (i = 0; i < n_instructions; ) {
8422 		struct instruction *instr = &instructions[i];
8423 		struct instruction_data *data = &instruction_data[i];
8424 		uint32_t n_instr = 0;
8425 		int detected;
8426 
8427 		/* Mov all + validate. */
8428 		detected = instr_pattern_mov_all_validate_search(p,
8429 								 a,
8430 								 instr,
8431 								 data,
8432 								 n_instructions - i,
8433 								 instructions,
8434 								 instruction_data,
8435 								 n_instructions,
8436 								 &n_instr);
8437 		if (detected) {
8438 			instr_pattern_mov_all_validate_replace(p, a, instr, data, n_instr);
8439 			i += n_instr;
8440 			continue;
8441 		}
8442 
8443 		/* No pattern starting at the current instruction. */
8444 		i++;
8445 	}
8446 
8447 	/* Eliminate the invalid instructions that have been optimized out. */
8448 	n_instructions = instr_compact(instructions,
8449 				       instruction_data,
8450 				       n_instructions);
8451 
8452 	return n_instructions;
8453 }
8454 
8455 static int
8456 instr_pattern_dma_many_search(struct instruction *instr,
8457 			      struct instruction_data *data,
8458 			      uint32_t n_instr,
8459 			      uint32_t *n_pattern_instr)
8460 {
8461 	uint32_t i;
8462 
8463 	for (i = 0; i < n_instr; i++) {
8464 		if (data[i].invalid)
8465 			break;
8466 
8467 		if (instr[i].type != INSTR_DMA_HT)
8468 			break;
8469 
8470 		if (i == RTE_DIM(instr->dma.dst.header_id))
8471 			break;
8472 
8473 		if (i && data[i].n_users)
8474 			break;
8475 	}
8476 
8477 	if (i < 2)
8478 		return 0;
8479 
8480 	*n_pattern_instr = i;
8481 	return 1;
8482 }
8483 
8484 static void
8485 instr_pattern_dma_many_replace(struct instruction *instr,
8486 			       struct instruction_data *data,
8487 			       uint32_t n_instr)
8488 {
8489 	uint32_t i;
8490 
8491 	for (i = 1; i < n_instr; i++) {
8492 		instr[0].type++;
8493 		instr[0].dma.dst.header_id[i] = instr[i].dma.dst.header_id[0];
8494 		instr[0].dma.dst.struct_id[i] = instr[i].dma.dst.struct_id[0];
8495 		instr[0].dma.src.offset[i] = instr[i].dma.src.offset[0];
8496 		instr[0].dma.n_bytes[i] = instr[i].dma.n_bytes[0];
8497 
8498 		data[i].invalid = 1;
8499 	}
8500 }
8501 
8502 static uint32_t
8503 instr_pattern_dma_many_optimize(struct instruction *instructions,
8504 	       struct instruction_data *instruction_data,
8505 	       uint32_t n_instructions)
8506 {
8507 	uint32_t i;
8508 
8509 	for (i = 0; i < n_instructions; ) {
8510 		struct instruction *instr = &instructions[i];
8511 		struct instruction_data *data = &instruction_data[i];
8512 		uint32_t n_instr = 0;
8513 		int detected;
8514 
8515 		/* DMA many. */
8516 		detected = instr_pattern_dma_many_search(instr,
8517 							 data,
8518 							 n_instructions - i,
8519 							 &n_instr);
8520 		if (detected) {
8521 			instr_pattern_dma_many_replace(instr, data, n_instr);
8522 			i += n_instr;
8523 			continue;
8524 		}
8525 
8526 		/* No pattern starting at the current instruction. */
8527 		i++;
8528 	}
8529 
8530 	/* Eliminate the invalid instructions that have been optimized out. */
8531 	n_instructions = instr_compact(instructions,
8532 				       instruction_data,
8533 				       n_instructions);
8534 
8535 	return n_instructions;
8536 }
8537 
8538 static uint32_t
8539 instr_optimize(struct rte_swx_pipeline *p,
8540 	       struct action *a,
8541 	       struct instruction *instructions,
8542 	       struct instruction_data *instruction_data,
8543 	       uint32_t n_instructions)
8544 {
8545 	/* Extract many. */
8546 	n_instructions = instr_pattern_extract_many_optimize(instructions,
8547 							     instruction_data,
8548 							     n_instructions);
8549 
8550 	/* Emit many + TX. */
8551 	n_instructions = instr_pattern_emit_many_tx_optimize(instructions,
8552 							     instruction_data,
8553 							     n_instructions);
8554 
8555 	/* Mov all + validate. */
8556 	n_instructions = instr_pattern_mov_all_validate_optimize(p,
8557 								 a,
8558 								 instructions,
8559 								 instruction_data,
8560 								 n_instructions);
8561 
8562 	/* DMA many. */
8563 	n_instructions = instr_pattern_dma_many_optimize(instructions,
8564 							 instruction_data,
8565 							 n_instructions);
8566 
8567 	return n_instructions;
8568 }
8569 
8570 static int
8571 instruction_config(struct rte_swx_pipeline *p,
8572 		   struct action *a,
8573 		   const char **instructions,
8574 		   uint32_t n_instructions)
8575 {
8576 	struct instruction *instr = NULL;
8577 	struct instruction_data *data = NULL;
8578 	int err = 0;
8579 	uint32_t i;
8580 
8581 	CHECK(n_instructions, EINVAL);
8582 	CHECK(instructions, EINVAL);
8583 	for (i = 0; i < n_instructions; i++)
8584 		CHECK_INSTRUCTION(instructions[i], EINVAL);
8585 
8586 	/* Memory allocation. */
8587 	instr = calloc(n_instructions, sizeof(struct instruction));
8588 	if (!instr) {
8589 		err = -ENOMEM;
8590 		goto error;
8591 	}
8592 
8593 	data = calloc(n_instructions, sizeof(struct instruction_data));
8594 	if (!data) {
8595 		err = -ENOMEM;
8596 		goto error;
8597 	}
8598 
8599 	for (i = 0; i < n_instructions; i++) {
8600 		char *string = strdup(instructions[i]);
8601 		if (!string) {
8602 			err = -ENOMEM;
8603 			goto error;
8604 		}
8605 
8606 		err = instr_translate(p, a, string, &instr[i], &data[i]);
8607 		if (err) {
8608 			free(string);
8609 			goto error;
8610 		}
8611 
8612 		free(string);
8613 	}
8614 
8615 	err = instr_label_check(data, n_instructions);
8616 	if (err)
8617 		goto error;
8618 
8619 	err = instr_verify(p, a, instr, data, n_instructions);
8620 	if (err)
8621 		goto error;
8622 
8623 	n_instructions = instr_optimize(p, a, instr, data, n_instructions);
8624 
8625 	err = instr_jmp_resolve(instr, data, n_instructions);
8626 	if (err)
8627 		goto error;
8628 
8629 	if (a) {
8630 		a->instructions = instr;
8631 		a->n_instructions = n_instructions;
8632 	} else {
8633 		p->instructions = instr;
8634 		p->n_instructions = n_instructions;
8635 	}
8636 
8637 	free(data);
8638 	return 0;
8639 
8640 error:
8641 	free(data);
8642 	free(instr);
8643 	return err;
8644 }
8645 
8646 typedef void (*instr_exec_t)(struct rte_swx_pipeline *);
8647 
8648 static instr_exec_t instruction_table[] = {
8649 	[INSTR_RX] = instr_rx_exec,
8650 	[INSTR_TX] = instr_tx_exec,
8651 	[INSTR_TX_I] = instr_tx_i_exec,
8652 
8653 	[INSTR_HDR_EXTRACT] = instr_hdr_extract_exec,
8654 	[INSTR_HDR_EXTRACT2] = instr_hdr_extract2_exec,
8655 	[INSTR_HDR_EXTRACT3] = instr_hdr_extract3_exec,
8656 	[INSTR_HDR_EXTRACT4] = instr_hdr_extract4_exec,
8657 	[INSTR_HDR_EXTRACT5] = instr_hdr_extract5_exec,
8658 	[INSTR_HDR_EXTRACT6] = instr_hdr_extract6_exec,
8659 	[INSTR_HDR_EXTRACT7] = instr_hdr_extract7_exec,
8660 	[INSTR_HDR_EXTRACT8] = instr_hdr_extract8_exec,
8661 
8662 	[INSTR_HDR_EMIT] = instr_hdr_emit_exec,
8663 	[INSTR_HDR_EMIT_TX] = instr_hdr_emit_tx_exec,
8664 	[INSTR_HDR_EMIT2_TX] = instr_hdr_emit2_tx_exec,
8665 	[INSTR_HDR_EMIT3_TX] = instr_hdr_emit3_tx_exec,
8666 	[INSTR_HDR_EMIT4_TX] = instr_hdr_emit4_tx_exec,
8667 	[INSTR_HDR_EMIT5_TX] = instr_hdr_emit5_tx_exec,
8668 	[INSTR_HDR_EMIT6_TX] = instr_hdr_emit6_tx_exec,
8669 	[INSTR_HDR_EMIT7_TX] = instr_hdr_emit7_tx_exec,
8670 	[INSTR_HDR_EMIT8_TX] = instr_hdr_emit8_tx_exec,
8671 
8672 	[INSTR_HDR_VALIDATE] = instr_hdr_validate_exec,
8673 	[INSTR_HDR_INVALIDATE] = instr_hdr_invalidate_exec,
8674 
8675 	[INSTR_MOV] = instr_mov_exec,
8676 	[INSTR_MOV_MH] = instr_mov_mh_exec,
8677 	[INSTR_MOV_HM] = instr_mov_hm_exec,
8678 	[INSTR_MOV_HH] = instr_mov_hh_exec,
8679 	[INSTR_MOV_I] = instr_mov_i_exec,
8680 
8681 	[INSTR_DMA_HT] = instr_dma_ht_exec,
8682 	[INSTR_DMA_HT2] = instr_dma_ht2_exec,
8683 	[INSTR_DMA_HT3] = instr_dma_ht3_exec,
8684 	[INSTR_DMA_HT4] = instr_dma_ht4_exec,
8685 	[INSTR_DMA_HT5] = instr_dma_ht5_exec,
8686 	[INSTR_DMA_HT6] = instr_dma_ht6_exec,
8687 	[INSTR_DMA_HT7] = instr_dma_ht7_exec,
8688 	[INSTR_DMA_HT8] = instr_dma_ht8_exec,
8689 
8690 	[INSTR_ALU_ADD] = instr_alu_add_exec,
8691 	[INSTR_ALU_ADD_MH] = instr_alu_add_mh_exec,
8692 	[INSTR_ALU_ADD_HM] = instr_alu_add_hm_exec,
8693 	[INSTR_ALU_ADD_HH] = instr_alu_add_hh_exec,
8694 	[INSTR_ALU_ADD_MI] = instr_alu_add_mi_exec,
8695 	[INSTR_ALU_ADD_HI] = instr_alu_add_hi_exec,
8696 
8697 	[INSTR_ALU_SUB] = instr_alu_sub_exec,
8698 	[INSTR_ALU_SUB_MH] = instr_alu_sub_mh_exec,
8699 	[INSTR_ALU_SUB_HM] = instr_alu_sub_hm_exec,
8700 	[INSTR_ALU_SUB_HH] = instr_alu_sub_hh_exec,
8701 	[INSTR_ALU_SUB_MI] = instr_alu_sub_mi_exec,
8702 	[INSTR_ALU_SUB_HI] = instr_alu_sub_hi_exec,
8703 
8704 	[INSTR_ALU_CKADD_FIELD] = instr_alu_ckadd_field_exec,
8705 	[INSTR_ALU_CKADD_STRUCT] = instr_alu_ckadd_struct_exec,
8706 	[INSTR_ALU_CKADD_STRUCT20] = instr_alu_ckadd_struct20_exec,
8707 	[INSTR_ALU_CKSUB_FIELD] = instr_alu_cksub_field_exec,
8708 
8709 	[INSTR_ALU_AND] = instr_alu_and_exec,
8710 	[INSTR_ALU_AND_MH] = instr_alu_and_mh_exec,
8711 	[INSTR_ALU_AND_HM] = instr_alu_and_hm_exec,
8712 	[INSTR_ALU_AND_HH] = instr_alu_and_hh_exec,
8713 	[INSTR_ALU_AND_I] = instr_alu_and_i_exec,
8714 
8715 	[INSTR_ALU_OR] = instr_alu_or_exec,
8716 	[INSTR_ALU_OR_MH] = instr_alu_or_mh_exec,
8717 	[INSTR_ALU_OR_HM] = instr_alu_or_hm_exec,
8718 	[INSTR_ALU_OR_HH] = instr_alu_or_hh_exec,
8719 	[INSTR_ALU_OR_I] = instr_alu_or_i_exec,
8720 
8721 	[INSTR_ALU_XOR] = instr_alu_xor_exec,
8722 	[INSTR_ALU_XOR_MH] = instr_alu_xor_mh_exec,
8723 	[INSTR_ALU_XOR_HM] = instr_alu_xor_hm_exec,
8724 	[INSTR_ALU_XOR_HH] = instr_alu_xor_hh_exec,
8725 	[INSTR_ALU_XOR_I] = instr_alu_xor_i_exec,
8726 
8727 	[INSTR_ALU_SHL] = instr_alu_shl_exec,
8728 	[INSTR_ALU_SHL_MH] = instr_alu_shl_mh_exec,
8729 	[INSTR_ALU_SHL_HM] = instr_alu_shl_hm_exec,
8730 	[INSTR_ALU_SHL_HH] = instr_alu_shl_hh_exec,
8731 	[INSTR_ALU_SHL_MI] = instr_alu_shl_mi_exec,
8732 	[INSTR_ALU_SHL_HI] = instr_alu_shl_hi_exec,
8733 
8734 	[INSTR_ALU_SHR] = instr_alu_shr_exec,
8735 	[INSTR_ALU_SHR_MH] = instr_alu_shr_mh_exec,
8736 	[INSTR_ALU_SHR_HM] = instr_alu_shr_hm_exec,
8737 	[INSTR_ALU_SHR_HH] = instr_alu_shr_hh_exec,
8738 	[INSTR_ALU_SHR_MI] = instr_alu_shr_mi_exec,
8739 	[INSTR_ALU_SHR_HI] = instr_alu_shr_hi_exec,
8740 
8741 	[INSTR_REGPREFETCH_RH] = instr_regprefetch_rh_exec,
8742 	[INSTR_REGPREFETCH_RM] = instr_regprefetch_rm_exec,
8743 	[INSTR_REGPREFETCH_RI] = instr_regprefetch_ri_exec,
8744 
8745 	[INSTR_REGRD_HRH] = instr_regrd_hrh_exec,
8746 	[INSTR_REGRD_HRM] = instr_regrd_hrm_exec,
8747 	[INSTR_REGRD_MRH] = instr_regrd_mrh_exec,
8748 	[INSTR_REGRD_MRM] = instr_regrd_mrm_exec,
8749 	[INSTR_REGRD_HRI] = instr_regrd_hri_exec,
8750 	[INSTR_REGRD_MRI] = instr_regrd_mri_exec,
8751 
8752 	[INSTR_REGWR_RHH] = instr_regwr_rhh_exec,
8753 	[INSTR_REGWR_RHM] = instr_regwr_rhm_exec,
8754 	[INSTR_REGWR_RMH] = instr_regwr_rmh_exec,
8755 	[INSTR_REGWR_RMM] = instr_regwr_rmm_exec,
8756 	[INSTR_REGWR_RHI] = instr_regwr_rhi_exec,
8757 	[INSTR_REGWR_RMI] = instr_regwr_rmi_exec,
8758 	[INSTR_REGWR_RIH] = instr_regwr_rih_exec,
8759 	[INSTR_REGWR_RIM] = instr_regwr_rim_exec,
8760 	[INSTR_REGWR_RII] = instr_regwr_rii_exec,
8761 
8762 	[INSTR_REGADD_RHH] = instr_regadd_rhh_exec,
8763 	[INSTR_REGADD_RHM] = instr_regadd_rhm_exec,
8764 	[INSTR_REGADD_RMH] = instr_regadd_rmh_exec,
8765 	[INSTR_REGADD_RMM] = instr_regadd_rmm_exec,
8766 	[INSTR_REGADD_RHI] = instr_regadd_rhi_exec,
8767 	[INSTR_REGADD_RMI] = instr_regadd_rmi_exec,
8768 	[INSTR_REGADD_RIH] = instr_regadd_rih_exec,
8769 	[INSTR_REGADD_RIM] = instr_regadd_rim_exec,
8770 	[INSTR_REGADD_RII] = instr_regadd_rii_exec,
8771 
8772 	[INSTR_METPREFETCH_H] = instr_metprefetch_h_exec,
8773 	[INSTR_METPREFETCH_M] = instr_metprefetch_m_exec,
8774 	[INSTR_METPREFETCH_I] = instr_metprefetch_i_exec,
8775 
8776 	[INSTR_METER_HHM] = instr_meter_hhm_exec,
8777 	[INSTR_METER_HHI] = instr_meter_hhi_exec,
8778 	[INSTR_METER_HMM] = instr_meter_hmm_exec,
8779 	[INSTR_METER_HMI] = instr_meter_hmi_exec,
8780 	[INSTR_METER_MHM] = instr_meter_mhm_exec,
8781 	[INSTR_METER_MHI] = instr_meter_mhi_exec,
8782 	[INSTR_METER_MMM] = instr_meter_mmm_exec,
8783 	[INSTR_METER_MMI] = instr_meter_mmi_exec,
8784 	[INSTR_METER_IHM] = instr_meter_ihm_exec,
8785 	[INSTR_METER_IHI] = instr_meter_ihi_exec,
8786 	[INSTR_METER_IMM] = instr_meter_imm_exec,
8787 	[INSTR_METER_IMI] = instr_meter_imi_exec,
8788 
8789 	[INSTR_TABLE] = instr_table_exec,
8790 	[INSTR_EXTERN_OBJ] = instr_extern_obj_exec,
8791 	[INSTR_EXTERN_FUNC] = instr_extern_func_exec,
8792 
8793 	[INSTR_JMP] = instr_jmp_exec,
8794 	[INSTR_JMP_VALID] = instr_jmp_valid_exec,
8795 	[INSTR_JMP_INVALID] = instr_jmp_invalid_exec,
8796 	[INSTR_JMP_HIT] = instr_jmp_hit_exec,
8797 	[INSTR_JMP_MISS] = instr_jmp_miss_exec,
8798 	[INSTR_JMP_ACTION_HIT] = instr_jmp_action_hit_exec,
8799 	[INSTR_JMP_ACTION_MISS] = instr_jmp_action_miss_exec,
8800 
8801 	[INSTR_JMP_EQ] = instr_jmp_eq_exec,
8802 	[INSTR_JMP_EQ_MH] = instr_jmp_eq_mh_exec,
8803 	[INSTR_JMP_EQ_HM] = instr_jmp_eq_hm_exec,
8804 	[INSTR_JMP_EQ_HH] = instr_jmp_eq_hh_exec,
8805 	[INSTR_JMP_EQ_I] = instr_jmp_eq_i_exec,
8806 
8807 	[INSTR_JMP_NEQ] = instr_jmp_neq_exec,
8808 	[INSTR_JMP_NEQ_MH] = instr_jmp_neq_mh_exec,
8809 	[INSTR_JMP_NEQ_HM] = instr_jmp_neq_hm_exec,
8810 	[INSTR_JMP_NEQ_HH] = instr_jmp_neq_hh_exec,
8811 	[INSTR_JMP_NEQ_I] = instr_jmp_neq_i_exec,
8812 
8813 	[INSTR_JMP_LT] = instr_jmp_lt_exec,
8814 	[INSTR_JMP_LT_MH] = instr_jmp_lt_mh_exec,
8815 	[INSTR_JMP_LT_HM] = instr_jmp_lt_hm_exec,
8816 	[INSTR_JMP_LT_HH] = instr_jmp_lt_hh_exec,
8817 	[INSTR_JMP_LT_MI] = instr_jmp_lt_mi_exec,
8818 	[INSTR_JMP_LT_HI] = instr_jmp_lt_hi_exec,
8819 
8820 	[INSTR_JMP_GT] = instr_jmp_gt_exec,
8821 	[INSTR_JMP_GT_MH] = instr_jmp_gt_mh_exec,
8822 	[INSTR_JMP_GT_HM] = instr_jmp_gt_hm_exec,
8823 	[INSTR_JMP_GT_HH] = instr_jmp_gt_hh_exec,
8824 	[INSTR_JMP_GT_MI] = instr_jmp_gt_mi_exec,
8825 	[INSTR_JMP_GT_HI] = instr_jmp_gt_hi_exec,
8826 
8827 	[INSTR_RETURN] = instr_return_exec,
8828 };
8829 
8830 static inline void
8831 instr_exec(struct rte_swx_pipeline *p)
8832 {
8833 	struct thread *t = &p->threads[p->thread_id];
8834 	struct instruction *ip = t->ip;
8835 	instr_exec_t instr = instruction_table[ip->type];
8836 
8837 	instr(p);
8838 }
8839 
8840 /*
8841  * Action.
8842  */
8843 static struct action *
8844 action_find(struct rte_swx_pipeline *p, const char *name)
8845 {
8846 	struct action *elem;
8847 
8848 	if (!name)
8849 		return NULL;
8850 
8851 	TAILQ_FOREACH(elem, &p->actions, node)
8852 		if (strcmp(elem->name, name) == 0)
8853 			return elem;
8854 
8855 	return NULL;
8856 }
8857 
8858 static struct action *
8859 action_find_by_id(struct rte_swx_pipeline *p, uint32_t id)
8860 {
8861 	struct action *action = NULL;
8862 
8863 	TAILQ_FOREACH(action, &p->actions, node)
8864 		if (action->id == id)
8865 			return action;
8866 
8867 	return NULL;
8868 }
8869 
8870 static struct field *
8871 action_field_find(struct action *a, const char *name)
8872 {
8873 	return a->st ? struct_type_field_find(a->st, name) : NULL;
8874 }
8875 
8876 static struct field *
8877 action_field_parse(struct action *action, const char *name)
8878 {
8879 	if (name[0] != 't' || name[1] != '.')
8880 		return NULL;
8881 
8882 	return action_field_find(action, &name[2]);
8883 }
8884 
8885 int
8886 rte_swx_pipeline_action_config(struct rte_swx_pipeline *p,
8887 			       const char *name,
8888 			       const char *args_struct_type_name,
8889 			       const char **instructions,
8890 			       uint32_t n_instructions)
8891 {
8892 	struct struct_type *args_struct_type;
8893 	struct action *a;
8894 	int err;
8895 
8896 	CHECK(p, EINVAL);
8897 
8898 	CHECK_NAME(name, EINVAL);
8899 	CHECK(!action_find(p, name), EEXIST);
8900 
8901 	if (args_struct_type_name) {
8902 		CHECK_NAME(args_struct_type_name, EINVAL);
8903 		args_struct_type = struct_type_find(p, args_struct_type_name);
8904 		CHECK(args_struct_type, EINVAL);
8905 	} else {
8906 		args_struct_type = NULL;
8907 	}
8908 
8909 	/* Node allocation. */
8910 	a = calloc(1, sizeof(struct action));
8911 	CHECK(a, ENOMEM);
8912 	if (args_struct_type) {
8913 		a->args_endianness = calloc(args_struct_type->n_fields, sizeof(int));
8914 		if (!a->args_endianness) {
8915 			free(a);
8916 			CHECK(0, ENOMEM);
8917 		}
8918 	}
8919 
8920 	/* Node initialization. */
8921 	strcpy(a->name, name);
8922 	a->st = args_struct_type;
8923 	a->id = p->n_actions;
8924 
8925 	/* Instruction translation. */
8926 	err = instruction_config(p, a, instructions, n_instructions);
8927 	if (err) {
8928 		free(a->args_endianness);
8929 		free(a);
8930 		return err;
8931 	}
8932 
8933 	/* Node add to tailq. */
8934 	TAILQ_INSERT_TAIL(&p->actions, a, node);
8935 	p->n_actions++;
8936 
8937 	return 0;
8938 }
8939 
8940 static int
8941 action_build(struct rte_swx_pipeline *p)
8942 {
8943 	struct action *action;
8944 
8945 	p->action_instructions = calloc(p->n_actions,
8946 					sizeof(struct instruction *));
8947 	CHECK(p->action_instructions, ENOMEM);
8948 
8949 	TAILQ_FOREACH(action, &p->actions, node)
8950 		p->action_instructions[action->id] = action->instructions;
8951 
8952 	return 0;
8953 }
8954 
8955 static void
8956 action_build_free(struct rte_swx_pipeline *p)
8957 {
8958 	free(p->action_instructions);
8959 	p->action_instructions = NULL;
8960 }
8961 
8962 static void
8963 action_free(struct rte_swx_pipeline *p)
8964 {
8965 	action_build_free(p);
8966 
8967 	for ( ; ; ) {
8968 		struct action *action;
8969 
8970 		action = TAILQ_FIRST(&p->actions);
8971 		if (!action)
8972 			break;
8973 
8974 		TAILQ_REMOVE(&p->actions, action, node);
8975 		free(action->instructions);
8976 		free(action);
8977 	}
8978 }
8979 
8980 static uint32_t
8981 action_arg_src_mov_count(struct action *a,
8982 			 uint32_t arg_id,
8983 			 struct instruction *instructions,
8984 			 struct instruction_data *instruction_data,
8985 			 uint32_t n_instructions)
8986 {
8987 	uint32_t offset, n_users = 0, i;
8988 
8989 	if (!a->st ||
8990 	    (arg_id >= a->st->n_fields) ||
8991 	    !instructions ||
8992 	    !instruction_data ||
8993 	    !n_instructions)
8994 		return 0;
8995 
8996 	offset = a->st->fields[arg_id].offset / 8;
8997 
8998 	for (i = 0; i < n_instructions; i++) {
8999 		struct instruction *instr = &instructions[i];
9000 		struct instruction_data *data = &instruction_data[i];
9001 
9002 		if (data->invalid ||
9003 		    ((instr->type != INSTR_MOV) && (instr->type != INSTR_MOV_HM)) ||
9004 		    instr->mov.src.struct_id ||
9005 		    (instr->mov.src.offset != offset))
9006 			continue;
9007 
9008 		n_users++;
9009 	}
9010 
9011 	return n_users;
9012 }
9013 
9014 /*
9015  * Table.
9016  */
9017 static struct table_type *
9018 table_type_find(struct rte_swx_pipeline *p, const char *name)
9019 {
9020 	struct table_type *elem;
9021 
9022 	TAILQ_FOREACH(elem, &p->table_types, node)
9023 		if (strcmp(elem->name, name) == 0)
9024 			return elem;
9025 
9026 	return NULL;
9027 }
9028 
9029 static struct table_type *
9030 table_type_resolve(struct rte_swx_pipeline *p,
9031 		   const char *recommended_type_name,
9032 		   enum rte_swx_table_match_type match_type)
9033 {
9034 	struct table_type *elem;
9035 
9036 	/* Only consider the recommended type if the match type is correct. */
9037 	if (recommended_type_name)
9038 		TAILQ_FOREACH(elem, &p->table_types, node)
9039 			if (!strcmp(elem->name, recommended_type_name) &&
9040 			    (elem->match_type == match_type))
9041 				return elem;
9042 
9043 	/* Ignore the recommended type and get the first element with this match
9044 	 * type.
9045 	 */
9046 	TAILQ_FOREACH(elem, &p->table_types, node)
9047 		if (elem->match_type == match_type)
9048 			return elem;
9049 
9050 	return NULL;
9051 }
9052 
9053 static struct table *
9054 table_find(struct rte_swx_pipeline *p, const char *name)
9055 {
9056 	struct table *elem;
9057 
9058 	TAILQ_FOREACH(elem, &p->tables, node)
9059 		if (strcmp(elem->name, name) == 0)
9060 			return elem;
9061 
9062 	return NULL;
9063 }
9064 
9065 static struct table *
9066 table_find_by_id(struct rte_swx_pipeline *p, uint32_t id)
9067 {
9068 	struct table *table = NULL;
9069 
9070 	TAILQ_FOREACH(table, &p->tables, node)
9071 		if (table->id == id)
9072 			return table;
9073 
9074 	return NULL;
9075 }
9076 
9077 int
9078 rte_swx_pipeline_table_type_register(struct rte_swx_pipeline *p,
9079 				     const char *name,
9080 				     enum rte_swx_table_match_type match_type,
9081 				     struct rte_swx_table_ops *ops)
9082 {
9083 	struct table_type *elem;
9084 
9085 	CHECK(p, EINVAL);
9086 
9087 	CHECK_NAME(name, EINVAL);
9088 	CHECK(!table_type_find(p, name), EEXIST);
9089 
9090 	CHECK(ops, EINVAL);
9091 	CHECK(ops->create, EINVAL);
9092 	CHECK(ops->lkp, EINVAL);
9093 	CHECK(ops->free, EINVAL);
9094 
9095 	/* Node allocation. */
9096 	elem = calloc(1, sizeof(struct table_type));
9097 	CHECK(elem, ENOMEM);
9098 
9099 	/* Node initialization. */
9100 	strcpy(elem->name, name);
9101 	elem->match_type = match_type;
9102 	memcpy(&elem->ops, ops, sizeof(*ops));
9103 
9104 	/* Node add to tailq. */
9105 	TAILQ_INSERT_TAIL(&p->table_types, elem, node);
9106 
9107 	return 0;
9108 }
9109 
9110 static enum rte_swx_table_match_type
9111 table_match_type_resolve(struct rte_swx_match_field_params *fields,
9112 			 uint32_t n_fields,
9113 			 uint32_t max_offset_field_id)
9114 {
9115 	uint32_t n_fields_em = 0, i;
9116 
9117 	for (i = 0; i < n_fields; i++)
9118 		if (fields[i].match_type == RTE_SWX_TABLE_MATCH_EXACT)
9119 			n_fields_em++;
9120 
9121 	if (n_fields_em == n_fields)
9122 		return RTE_SWX_TABLE_MATCH_EXACT;
9123 
9124 	if ((n_fields_em == n_fields - 1) &&
9125 	    (fields[max_offset_field_id].match_type == RTE_SWX_TABLE_MATCH_LPM))
9126 		return RTE_SWX_TABLE_MATCH_LPM;
9127 
9128 	return RTE_SWX_TABLE_MATCH_WILDCARD;
9129 }
9130 
9131 static int
9132 table_match_fields_check(struct rte_swx_pipeline *p,
9133 			 struct rte_swx_pipeline_table_params *params,
9134 			 struct header **header,
9135 			 uint32_t *min_offset_field_id,
9136 			 uint32_t *max_offset_field_id)
9137 {
9138 	struct header *h0 = NULL;
9139 	struct field *hf, *mf;
9140 	uint32_t *offset = NULL, min_offset, max_offset, min_offset_pos, max_offset_pos, i;
9141 	int status = 0;
9142 
9143 	/* Return if no match fields. */
9144 	if (!params->n_fields) {
9145 		if (params->fields) {
9146 			status = -EINVAL;
9147 			goto end;
9148 		}
9149 
9150 		return 0;
9151 	}
9152 
9153 	/* Memory allocation. */
9154 	offset = calloc(params->n_fields, sizeof(uint32_t));
9155 	if (!offset) {
9156 		status = -ENOMEM;
9157 		goto end;
9158 	}
9159 
9160 	/* Check that all the match fields belong to either the same header or
9161 	 * to the meta-data.
9162 	 */
9163 	hf = header_field_parse(p, params->fields[0].name, &h0);
9164 	mf = metadata_field_parse(p, params->fields[0].name);
9165 	if (!hf && !mf) {
9166 		status = -EINVAL;
9167 		goto end;
9168 	}
9169 
9170 	offset[0] = h0 ? hf->offset : mf->offset;
9171 
9172 	for (i = 1; i < params->n_fields; i++)
9173 		if (h0) {
9174 			struct header *h;
9175 
9176 			hf = header_field_parse(p, params->fields[i].name, &h);
9177 			if (!hf || (h->id != h0->id)) {
9178 				status = -EINVAL;
9179 				goto end;
9180 			}
9181 
9182 			offset[i] = hf->offset;
9183 		} else {
9184 			mf = metadata_field_parse(p, params->fields[i].name);
9185 			if (!mf) {
9186 				status = -EINVAL;
9187 				goto end;
9188 			}
9189 
9190 			offset[i] = mf->offset;
9191 		}
9192 
9193 	/* Check that there are no duplicated match fields. */
9194 	for (i = 0; i < params->n_fields; i++) {
9195 		uint32_t j;
9196 
9197 		for (j = 0; j < i; j++)
9198 			if (offset[j] == offset[i]) {
9199 				status = -EINVAL;
9200 				goto end;
9201 			}
9202 	}
9203 
9204 	/* Find the min and max offset fields. */
9205 	min_offset = offset[0];
9206 	max_offset = offset[0];
9207 	min_offset_pos = 0;
9208 	max_offset_pos = 0;
9209 
9210 	for (i = 1; i < params->n_fields; i++) {
9211 		if (offset[i] < min_offset) {
9212 			min_offset = offset[i];
9213 			min_offset_pos = i;
9214 		}
9215 
9216 		if (offset[i] > max_offset) {
9217 			max_offset = offset[i];
9218 			max_offset_pos = i;
9219 		}
9220 	}
9221 
9222 	/* Return. */
9223 	if (header)
9224 		*header = h0;
9225 
9226 	if (min_offset_field_id)
9227 		*min_offset_field_id = min_offset_pos;
9228 
9229 	if (max_offset_field_id)
9230 		*max_offset_field_id = max_offset_pos;
9231 
9232 end:
9233 	free(offset);
9234 	return status;
9235 }
9236 
9237 int
9238 rte_swx_pipeline_table_config(struct rte_swx_pipeline *p,
9239 			      const char *name,
9240 			      struct rte_swx_pipeline_table_params *params,
9241 			      const char *recommended_table_type_name,
9242 			      const char *args,
9243 			      uint32_t size)
9244 {
9245 	struct table_type *type;
9246 	struct table *t;
9247 	struct action *default_action;
9248 	struct header *header = NULL;
9249 	uint32_t action_data_size_max = 0, min_offset_field_id = 0, max_offset_field_id = 0, i;
9250 	int status = 0;
9251 
9252 	CHECK(p, EINVAL);
9253 
9254 	CHECK_NAME(name, EINVAL);
9255 	CHECK(!table_find(p, name), EEXIST);
9256 
9257 	CHECK(params, EINVAL);
9258 
9259 	/* Match checks. */
9260 	status = table_match_fields_check(p,
9261 					  params,
9262 					  &header,
9263 					  &min_offset_field_id,
9264 					  &max_offset_field_id);
9265 	if (status)
9266 		return status;
9267 
9268 	/* Action checks. */
9269 	CHECK(params->n_actions, EINVAL);
9270 	CHECK(params->action_names, EINVAL);
9271 	for (i = 0; i < params->n_actions; i++) {
9272 		const char *action_name = params->action_names[i];
9273 		struct action *a;
9274 		uint32_t action_data_size;
9275 
9276 		CHECK_NAME(action_name, EINVAL);
9277 
9278 		a = action_find(p, action_name);
9279 		CHECK(a, EINVAL);
9280 
9281 		action_data_size = a->st ? a->st->n_bits / 8 : 0;
9282 		if (action_data_size > action_data_size_max)
9283 			action_data_size_max = action_data_size;
9284 	}
9285 
9286 	CHECK_NAME(params->default_action_name, EINVAL);
9287 	for (i = 0; i < p->n_actions; i++)
9288 		if (!strcmp(params->action_names[i],
9289 			    params->default_action_name))
9290 			break;
9291 	CHECK(i < params->n_actions, EINVAL);
9292 	default_action = action_find(p, params->default_action_name);
9293 	CHECK((default_action->st && params->default_action_data) ||
9294 	      !params->default_action_data, EINVAL);
9295 
9296 	/* Table type checks. */
9297 	if (recommended_table_type_name)
9298 		CHECK_NAME(recommended_table_type_name, EINVAL);
9299 
9300 	if (params->n_fields) {
9301 		enum rte_swx_table_match_type match_type;
9302 
9303 		match_type = table_match_type_resolve(params->fields,
9304 						      params->n_fields,
9305 						      max_offset_field_id);
9306 		type = table_type_resolve(p,
9307 					  recommended_table_type_name,
9308 					  match_type);
9309 		CHECK(type, EINVAL);
9310 	} else {
9311 		type = NULL;
9312 	}
9313 
9314 	/* Memory allocation. */
9315 	t = calloc(1, sizeof(struct table));
9316 	CHECK(t, ENOMEM);
9317 
9318 	t->fields = calloc(params->n_fields, sizeof(struct match_field));
9319 	if (!t->fields) {
9320 		free(t);
9321 		CHECK(0, ENOMEM);
9322 	}
9323 
9324 	t->actions = calloc(params->n_actions, sizeof(struct action *));
9325 	if (!t->actions) {
9326 		free(t->fields);
9327 		free(t);
9328 		CHECK(0, ENOMEM);
9329 	}
9330 
9331 	if (action_data_size_max) {
9332 		t->default_action_data = calloc(1, action_data_size_max);
9333 		if (!t->default_action_data) {
9334 			free(t->actions);
9335 			free(t->fields);
9336 			free(t);
9337 			CHECK(0, ENOMEM);
9338 		}
9339 	}
9340 
9341 	/* Node initialization. */
9342 	strcpy(t->name, name);
9343 	if (args && args[0])
9344 		strcpy(t->args, args);
9345 	t->type = type;
9346 
9347 	for (i = 0; i < params->n_fields; i++) {
9348 		struct rte_swx_match_field_params *field = &params->fields[i];
9349 		struct match_field *f = &t->fields[i];
9350 
9351 		f->match_type = field->match_type;
9352 		f->field = header ?
9353 			header_field_parse(p, field->name, NULL) :
9354 			metadata_field_parse(p, field->name);
9355 	}
9356 	t->n_fields = params->n_fields;
9357 	t->header = header;
9358 
9359 	for (i = 0; i < params->n_actions; i++)
9360 		t->actions[i] = action_find(p, params->action_names[i]);
9361 	t->default_action = default_action;
9362 	if (default_action->st)
9363 		memcpy(t->default_action_data,
9364 		       params->default_action_data,
9365 		       default_action->st->n_bits / 8);
9366 	t->n_actions = params->n_actions;
9367 	t->default_action_is_const = params->default_action_is_const;
9368 	t->action_data_size_max = action_data_size_max;
9369 
9370 	t->size = size;
9371 	t->id = p->n_tables;
9372 
9373 	/* Node add to tailq. */
9374 	TAILQ_INSERT_TAIL(&p->tables, t, node);
9375 	p->n_tables++;
9376 
9377 	return 0;
9378 }
9379 
9380 static struct rte_swx_table_params *
9381 table_params_get(struct table *table)
9382 {
9383 	struct rte_swx_table_params *params;
9384 	struct field *first, *last;
9385 	uint8_t *key_mask;
9386 	uint32_t key_size, key_offset, action_data_size, i;
9387 
9388 	/* Memory allocation. */
9389 	params = calloc(1, sizeof(struct rte_swx_table_params));
9390 	if (!params)
9391 		return NULL;
9392 
9393 	/* Find first (smallest offset) and last (biggest offset) match fields. */
9394 	first = table->fields[0].field;
9395 	last = table->fields[0].field;
9396 
9397 	for (i = 0; i < table->n_fields; i++) {
9398 		struct field *f = table->fields[i].field;
9399 
9400 		if (f->offset < first->offset)
9401 			first = f;
9402 
9403 		if (f->offset > last->offset)
9404 			last = f;
9405 	}
9406 
9407 	/* Key offset and size. */
9408 	key_offset = first->offset / 8;
9409 	key_size = (last->offset + last->n_bits - first->offset) / 8;
9410 
9411 	/* Memory allocation. */
9412 	key_mask = calloc(1, key_size);
9413 	if (!key_mask) {
9414 		free(params);
9415 		return NULL;
9416 	}
9417 
9418 	/* Key mask. */
9419 	for (i = 0; i < table->n_fields; i++) {
9420 		struct field *f = table->fields[i].field;
9421 		uint32_t start = (f->offset - first->offset) / 8;
9422 		size_t size = f->n_bits / 8;
9423 
9424 		memset(&key_mask[start], 0xFF, size);
9425 	}
9426 
9427 	/* Action data size. */
9428 	action_data_size = 0;
9429 	for (i = 0; i < table->n_actions; i++) {
9430 		struct action *action = table->actions[i];
9431 		uint32_t ads = action->st ? action->st->n_bits / 8 : 0;
9432 
9433 		if (ads > action_data_size)
9434 			action_data_size = ads;
9435 	}
9436 
9437 	/* Fill in. */
9438 	params->match_type = table->type->match_type;
9439 	params->key_size = key_size;
9440 	params->key_offset = key_offset;
9441 	params->key_mask0 = key_mask;
9442 	params->action_data_size = action_data_size;
9443 	params->n_keys_max = table->size;
9444 
9445 	return params;
9446 }
9447 
9448 static void
9449 table_params_free(struct rte_swx_table_params *params)
9450 {
9451 	if (!params)
9452 		return;
9453 
9454 	free(params->key_mask0);
9455 	free(params);
9456 }
9457 
9458 static int
9459 table_state_build(struct rte_swx_pipeline *p)
9460 {
9461 	struct table *table;
9462 
9463 	p->table_state = calloc(p->n_tables,
9464 				sizeof(struct rte_swx_table_state));
9465 	CHECK(p->table_state, ENOMEM);
9466 
9467 	TAILQ_FOREACH(table, &p->tables, node) {
9468 		struct rte_swx_table_state *ts = &p->table_state[table->id];
9469 
9470 		if (table->type) {
9471 			struct rte_swx_table_params *params;
9472 
9473 			/* ts->obj. */
9474 			params = table_params_get(table);
9475 			CHECK(params, ENOMEM);
9476 
9477 			ts->obj = table->type->ops.create(params,
9478 				NULL,
9479 				table->args,
9480 				p->numa_node);
9481 
9482 			table_params_free(params);
9483 			CHECK(ts->obj, ENODEV);
9484 		}
9485 
9486 		/* ts->default_action_data. */
9487 		if (table->action_data_size_max) {
9488 			ts->default_action_data =
9489 				malloc(table->action_data_size_max);
9490 			CHECK(ts->default_action_data, ENOMEM);
9491 
9492 			memcpy(ts->default_action_data,
9493 			       table->default_action_data,
9494 			       table->action_data_size_max);
9495 		}
9496 
9497 		/* ts->default_action_id. */
9498 		ts->default_action_id = table->default_action->id;
9499 	}
9500 
9501 	return 0;
9502 }
9503 
9504 static void
9505 table_state_build_free(struct rte_swx_pipeline *p)
9506 {
9507 	uint32_t i;
9508 
9509 	if (!p->table_state)
9510 		return;
9511 
9512 	for (i = 0; i < p->n_tables; i++) {
9513 		struct rte_swx_table_state *ts = &p->table_state[i];
9514 		struct table *table = table_find_by_id(p, i);
9515 
9516 		/* ts->obj. */
9517 		if (table->type && ts->obj)
9518 			table->type->ops.free(ts->obj);
9519 
9520 		/* ts->default_action_data. */
9521 		free(ts->default_action_data);
9522 	}
9523 
9524 	free(p->table_state);
9525 	p->table_state = NULL;
9526 }
9527 
9528 static void
9529 table_state_free(struct rte_swx_pipeline *p)
9530 {
9531 	table_state_build_free(p);
9532 }
9533 
9534 static int
9535 table_stub_lkp(void *table __rte_unused,
9536 	       void *mailbox __rte_unused,
9537 	       uint8_t **key __rte_unused,
9538 	       uint64_t *action_id __rte_unused,
9539 	       uint8_t **action_data __rte_unused,
9540 	       int *hit)
9541 {
9542 	*hit = 0;
9543 	return 1; /* DONE. */
9544 }
9545 
9546 static int
9547 table_build(struct rte_swx_pipeline *p)
9548 {
9549 	uint32_t i;
9550 
9551 	/* Per pipeline: table statistics. */
9552 	p->table_stats = calloc(p->n_tables, sizeof(struct table_statistics));
9553 	CHECK(p->table_stats, ENOMEM);
9554 
9555 	for (i = 0; i < p->n_tables; i++) {
9556 		p->table_stats[i].n_pkts_action = calloc(p->n_actions, sizeof(uint64_t));
9557 		CHECK(p->table_stats[i].n_pkts_action, ENOMEM);
9558 	}
9559 
9560 	/* Per thread: table runt-time. */
9561 	for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
9562 		struct thread *t = &p->threads[i];
9563 		struct table *table;
9564 
9565 		t->tables = calloc(p->n_tables, sizeof(struct table_runtime));
9566 		CHECK(t->tables, ENOMEM);
9567 
9568 		TAILQ_FOREACH(table, &p->tables, node) {
9569 			struct table_runtime *r = &t->tables[table->id];
9570 
9571 			if (table->type) {
9572 				uint64_t size;
9573 
9574 				size = table->type->ops.mailbox_size_get();
9575 
9576 				/* r->func. */
9577 				r->func = table->type->ops.lkp;
9578 
9579 				/* r->mailbox. */
9580 				if (size) {
9581 					r->mailbox = calloc(1, size);
9582 					CHECK(r->mailbox, ENOMEM);
9583 				}
9584 
9585 				/* r->key. */
9586 				r->key = table->header ?
9587 					&t->structs[table->header->struct_id] :
9588 					&t->structs[p->metadata_struct_id];
9589 			} else {
9590 				r->func = table_stub_lkp;
9591 			}
9592 		}
9593 	}
9594 
9595 	return 0;
9596 }
9597 
9598 static void
9599 table_build_free(struct rte_swx_pipeline *p)
9600 {
9601 	uint32_t i;
9602 
9603 	for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
9604 		struct thread *t = &p->threads[i];
9605 		uint32_t j;
9606 
9607 		if (!t->tables)
9608 			continue;
9609 
9610 		for (j = 0; j < p->n_tables; j++) {
9611 			struct table_runtime *r = &t->tables[j];
9612 
9613 			free(r->mailbox);
9614 		}
9615 
9616 		free(t->tables);
9617 		t->tables = NULL;
9618 	}
9619 
9620 	if (p->table_stats) {
9621 		for (i = 0; i < p->n_tables; i++)
9622 			free(p->table_stats[i].n_pkts_action);
9623 
9624 		free(p->table_stats);
9625 	}
9626 }
9627 
9628 static void
9629 table_free(struct rte_swx_pipeline *p)
9630 {
9631 	table_build_free(p);
9632 
9633 	/* Tables. */
9634 	for ( ; ; ) {
9635 		struct table *elem;
9636 
9637 		elem = TAILQ_FIRST(&p->tables);
9638 		if (!elem)
9639 			break;
9640 
9641 		TAILQ_REMOVE(&p->tables, elem, node);
9642 		free(elem->fields);
9643 		free(elem->actions);
9644 		free(elem->default_action_data);
9645 		free(elem);
9646 	}
9647 
9648 	/* Table types. */
9649 	for ( ; ; ) {
9650 		struct table_type *elem;
9651 
9652 		elem = TAILQ_FIRST(&p->table_types);
9653 		if (!elem)
9654 			break;
9655 
9656 		TAILQ_REMOVE(&p->table_types, elem, node);
9657 		free(elem);
9658 	}
9659 }
9660 
9661 /*
9662  * Register array.
9663  */
9664 static struct regarray *
9665 regarray_find(struct rte_swx_pipeline *p, const char *name)
9666 {
9667 	struct regarray *elem;
9668 
9669 	TAILQ_FOREACH(elem, &p->regarrays, node)
9670 		if (!strcmp(elem->name, name))
9671 			return elem;
9672 
9673 	return NULL;
9674 }
9675 
9676 static struct regarray *
9677 regarray_find_by_id(struct rte_swx_pipeline *p, uint32_t id)
9678 {
9679 	struct regarray *elem = NULL;
9680 
9681 	TAILQ_FOREACH(elem, &p->regarrays, node)
9682 		if (elem->id == id)
9683 			return elem;
9684 
9685 	return NULL;
9686 }
9687 
9688 int
9689 rte_swx_pipeline_regarray_config(struct rte_swx_pipeline *p,
9690 			      const char *name,
9691 			      uint32_t size,
9692 			      uint64_t init_val)
9693 {
9694 	struct regarray *r;
9695 
9696 	CHECK(p, EINVAL);
9697 
9698 	CHECK_NAME(name, EINVAL);
9699 	CHECK(!regarray_find(p, name), EEXIST);
9700 
9701 	CHECK(size, EINVAL);
9702 	size = rte_align32pow2(size);
9703 
9704 	/* Memory allocation. */
9705 	r = calloc(1, sizeof(struct regarray));
9706 	CHECK(r, ENOMEM);
9707 
9708 	/* Node initialization. */
9709 	strcpy(r->name, name);
9710 	r->init_val = init_val;
9711 	r->size = size;
9712 	r->id = p->n_regarrays;
9713 
9714 	/* Node add to tailq. */
9715 	TAILQ_INSERT_TAIL(&p->regarrays, r, node);
9716 	p->n_regarrays++;
9717 
9718 	return 0;
9719 }
9720 
9721 static int
9722 regarray_build(struct rte_swx_pipeline *p)
9723 {
9724 	struct regarray *regarray;
9725 
9726 	if (!p->n_regarrays)
9727 		return 0;
9728 
9729 	p->regarray_runtime = calloc(p->n_regarrays, sizeof(struct regarray_runtime));
9730 	CHECK(p->regarray_runtime, ENOMEM);
9731 
9732 	TAILQ_FOREACH(regarray, &p->regarrays, node) {
9733 		struct regarray_runtime *r = &p->regarray_runtime[regarray->id];
9734 		uint32_t i;
9735 
9736 		r->regarray = env_malloc(regarray->size * sizeof(uint64_t),
9737 					 RTE_CACHE_LINE_SIZE,
9738 					 p->numa_node);
9739 		CHECK(r->regarray, ENOMEM);
9740 
9741 		if (regarray->init_val)
9742 			for (i = 0; i < regarray->size; i++)
9743 				r->regarray[i] = regarray->init_val;
9744 
9745 		r->size_mask = regarray->size - 1;
9746 	}
9747 
9748 	return 0;
9749 }
9750 
9751 static void
9752 regarray_build_free(struct rte_swx_pipeline *p)
9753 {
9754 	uint32_t i;
9755 
9756 	if (!p->regarray_runtime)
9757 		return;
9758 
9759 	for (i = 0; i < p->n_regarrays; i++) {
9760 		struct regarray *regarray = regarray_find_by_id(p, i);
9761 		struct regarray_runtime *r = &p->regarray_runtime[i];
9762 
9763 		env_free(r->regarray, regarray->size * sizeof(uint64_t));
9764 	}
9765 
9766 	free(p->regarray_runtime);
9767 	p->regarray_runtime = NULL;
9768 }
9769 
9770 static void
9771 regarray_free(struct rte_swx_pipeline *p)
9772 {
9773 	regarray_build_free(p);
9774 
9775 	for ( ; ; ) {
9776 		struct regarray *elem;
9777 
9778 		elem = TAILQ_FIRST(&p->regarrays);
9779 		if (!elem)
9780 			break;
9781 
9782 		TAILQ_REMOVE(&p->regarrays, elem, node);
9783 		free(elem);
9784 	}
9785 }
9786 
9787 /*
9788  * Meter array.
9789  */
9790 static struct meter_profile *
9791 meter_profile_find(struct rte_swx_pipeline *p, const char *name)
9792 {
9793 	struct meter_profile *elem;
9794 
9795 	TAILQ_FOREACH(elem, &p->meter_profiles, node)
9796 		if (!strcmp(elem->name, name))
9797 			return elem;
9798 
9799 	return NULL;
9800 }
9801 
9802 static struct metarray *
9803 metarray_find(struct rte_swx_pipeline *p, const char *name)
9804 {
9805 	struct metarray *elem;
9806 
9807 	TAILQ_FOREACH(elem, &p->metarrays, node)
9808 		if (!strcmp(elem->name, name))
9809 			return elem;
9810 
9811 	return NULL;
9812 }
9813 
9814 static struct metarray *
9815 metarray_find_by_id(struct rte_swx_pipeline *p, uint32_t id)
9816 {
9817 	struct metarray *elem = NULL;
9818 
9819 	TAILQ_FOREACH(elem, &p->metarrays, node)
9820 		if (elem->id == id)
9821 			return elem;
9822 
9823 	return NULL;
9824 }
9825 
9826 int
9827 rte_swx_pipeline_metarray_config(struct rte_swx_pipeline *p,
9828 				 const char *name,
9829 				 uint32_t size)
9830 {
9831 	struct metarray *m;
9832 
9833 	CHECK(p, EINVAL);
9834 
9835 	CHECK_NAME(name, EINVAL);
9836 	CHECK(!metarray_find(p, name), EEXIST);
9837 
9838 	CHECK(size, EINVAL);
9839 	size = rte_align32pow2(size);
9840 
9841 	/* Memory allocation. */
9842 	m = calloc(1, sizeof(struct metarray));
9843 	CHECK(m, ENOMEM);
9844 
9845 	/* Node initialization. */
9846 	strcpy(m->name, name);
9847 	m->size = size;
9848 	m->id = p->n_metarrays;
9849 
9850 	/* Node add to tailq. */
9851 	TAILQ_INSERT_TAIL(&p->metarrays, m, node);
9852 	p->n_metarrays++;
9853 
9854 	return 0;
9855 }
9856 
9857 struct meter_profile meter_profile_default = {
9858 	.node = {0},
9859 	.name = "",
9860 	.params = {0},
9861 
9862 	.profile = {
9863 		.cbs = 10000,
9864 		.pbs = 10000,
9865 		.cir_period = 1,
9866 		.cir_bytes_per_period = 1,
9867 		.pir_period = 1,
9868 		.pir_bytes_per_period = 1,
9869 	},
9870 
9871 	.n_users = 0,
9872 };
9873 
9874 static void
9875 meter_init(struct meter *m)
9876 {
9877 	memset(m, 0, sizeof(struct meter));
9878 	rte_meter_trtcm_config(&m->m, &meter_profile_default.profile);
9879 	m->profile = &meter_profile_default;
9880 	m->color_mask = RTE_COLOR_GREEN;
9881 
9882 	meter_profile_default.n_users++;
9883 }
9884 
9885 static int
9886 metarray_build(struct rte_swx_pipeline *p)
9887 {
9888 	struct metarray *m;
9889 
9890 	if (!p->n_metarrays)
9891 		return 0;
9892 
9893 	p->metarray_runtime = calloc(p->n_metarrays, sizeof(struct metarray_runtime));
9894 	CHECK(p->metarray_runtime, ENOMEM);
9895 
9896 	TAILQ_FOREACH(m, &p->metarrays, node) {
9897 		struct metarray_runtime *r = &p->metarray_runtime[m->id];
9898 		uint32_t i;
9899 
9900 		r->metarray = env_malloc(m->size * sizeof(struct meter),
9901 					 RTE_CACHE_LINE_SIZE,
9902 					 p->numa_node);
9903 		CHECK(r->metarray, ENOMEM);
9904 
9905 		for (i = 0; i < m->size; i++)
9906 			meter_init(&r->metarray[i]);
9907 
9908 		r->size_mask = m->size - 1;
9909 	}
9910 
9911 	return 0;
9912 }
9913 
9914 static void
9915 metarray_build_free(struct rte_swx_pipeline *p)
9916 {
9917 	uint32_t i;
9918 
9919 	if (!p->metarray_runtime)
9920 		return;
9921 
9922 	for (i = 0; i < p->n_metarrays; i++) {
9923 		struct metarray *m = metarray_find_by_id(p, i);
9924 		struct metarray_runtime *r = &p->metarray_runtime[i];
9925 
9926 		env_free(r->metarray, m->size * sizeof(struct meter));
9927 	}
9928 
9929 	free(p->metarray_runtime);
9930 	p->metarray_runtime = NULL;
9931 }
9932 
9933 static void
9934 metarray_free(struct rte_swx_pipeline *p)
9935 {
9936 	metarray_build_free(p);
9937 
9938 	/* Meter arrays. */
9939 	for ( ; ; ) {
9940 		struct metarray *elem;
9941 
9942 		elem = TAILQ_FIRST(&p->metarrays);
9943 		if (!elem)
9944 			break;
9945 
9946 		TAILQ_REMOVE(&p->metarrays, elem, node);
9947 		free(elem);
9948 	}
9949 
9950 	/* Meter profiles. */
9951 	for ( ; ; ) {
9952 		struct meter_profile *elem;
9953 
9954 		elem = TAILQ_FIRST(&p->meter_profiles);
9955 		if (!elem)
9956 			break;
9957 
9958 		TAILQ_REMOVE(&p->meter_profiles, elem, node);
9959 		free(elem);
9960 	}
9961 }
9962 
9963 /*
9964  * Pipeline.
9965  */
9966 int
9967 rte_swx_pipeline_config(struct rte_swx_pipeline **p, int numa_node)
9968 {
9969 	struct rte_swx_pipeline *pipeline;
9970 
9971 	/* Check input parameters. */
9972 	CHECK(p, EINVAL);
9973 
9974 	/* Memory allocation. */
9975 	pipeline = calloc(1, sizeof(struct rte_swx_pipeline));
9976 	CHECK(pipeline, ENOMEM);
9977 
9978 	/* Initialization. */
9979 	TAILQ_INIT(&pipeline->struct_types);
9980 	TAILQ_INIT(&pipeline->port_in_types);
9981 	TAILQ_INIT(&pipeline->ports_in);
9982 	TAILQ_INIT(&pipeline->port_out_types);
9983 	TAILQ_INIT(&pipeline->ports_out);
9984 	TAILQ_INIT(&pipeline->extern_types);
9985 	TAILQ_INIT(&pipeline->extern_objs);
9986 	TAILQ_INIT(&pipeline->extern_funcs);
9987 	TAILQ_INIT(&pipeline->headers);
9988 	TAILQ_INIT(&pipeline->actions);
9989 	TAILQ_INIT(&pipeline->table_types);
9990 	TAILQ_INIT(&pipeline->tables);
9991 	TAILQ_INIT(&pipeline->regarrays);
9992 	TAILQ_INIT(&pipeline->meter_profiles);
9993 	TAILQ_INIT(&pipeline->metarrays);
9994 
9995 	pipeline->n_structs = 1; /* Struct 0 is reserved for action_data. */
9996 	pipeline->numa_node = numa_node;
9997 
9998 	*p = pipeline;
9999 	return 0;
10000 }
10001 
10002 void
10003 rte_swx_pipeline_free(struct rte_swx_pipeline *p)
10004 {
10005 	if (!p)
10006 		return;
10007 
10008 	free(p->instructions);
10009 
10010 	metarray_free(p);
10011 	regarray_free(p);
10012 	table_state_free(p);
10013 	table_free(p);
10014 	action_free(p);
10015 	metadata_free(p);
10016 	header_free(p);
10017 	extern_func_free(p);
10018 	extern_obj_free(p);
10019 	port_out_free(p);
10020 	port_in_free(p);
10021 	struct_free(p);
10022 
10023 	free(p);
10024 }
10025 
10026 int
10027 rte_swx_pipeline_instructions_config(struct rte_swx_pipeline *p,
10028 				     const char **instructions,
10029 				     uint32_t n_instructions)
10030 {
10031 	int err;
10032 	uint32_t i;
10033 
10034 	err = instruction_config(p, NULL, instructions, n_instructions);
10035 	if (err)
10036 		return err;
10037 
10038 	/* Thread instruction pointer reset. */
10039 	for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
10040 		struct thread *t = &p->threads[i];
10041 
10042 		thread_ip_reset(p, t);
10043 	}
10044 
10045 	return 0;
10046 }
10047 
10048 int
10049 rte_swx_pipeline_build(struct rte_swx_pipeline *p)
10050 {
10051 	int status;
10052 
10053 	CHECK(p, EINVAL);
10054 	CHECK(p->build_done == 0, EEXIST);
10055 
10056 	status = port_in_build(p);
10057 	if (status)
10058 		goto error;
10059 
10060 	status = port_out_build(p);
10061 	if (status)
10062 		goto error;
10063 
10064 	status = struct_build(p);
10065 	if (status)
10066 		goto error;
10067 
10068 	status = extern_obj_build(p);
10069 	if (status)
10070 		goto error;
10071 
10072 	status = extern_func_build(p);
10073 	if (status)
10074 		goto error;
10075 
10076 	status = header_build(p);
10077 	if (status)
10078 		goto error;
10079 
10080 	status = metadata_build(p);
10081 	if (status)
10082 		goto error;
10083 
10084 	status = action_build(p);
10085 	if (status)
10086 		goto error;
10087 
10088 	status = table_build(p);
10089 	if (status)
10090 		goto error;
10091 
10092 	status = table_state_build(p);
10093 	if (status)
10094 		goto error;
10095 
10096 	status = regarray_build(p);
10097 	if (status)
10098 		goto error;
10099 
10100 	status = metarray_build(p);
10101 	if (status)
10102 		goto error;
10103 
10104 	p->build_done = 1;
10105 	return 0;
10106 
10107 error:
10108 	metarray_build_free(p);
10109 	regarray_build_free(p);
10110 	table_state_build_free(p);
10111 	table_build_free(p);
10112 	action_build_free(p);
10113 	metadata_build_free(p);
10114 	header_build_free(p);
10115 	extern_func_build_free(p);
10116 	extern_obj_build_free(p);
10117 	port_out_build_free(p);
10118 	port_in_build_free(p);
10119 	struct_build_free(p);
10120 
10121 	return status;
10122 }
10123 
10124 void
10125 rte_swx_pipeline_run(struct rte_swx_pipeline *p, uint32_t n_instructions)
10126 {
10127 	uint32_t i;
10128 
10129 	for (i = 0; i < n_instructions; i++)
10130 		instr_exec(p);
10131 }
10132 
10133 void
10134 rte_swx_pipeline_flush(struct rte_swx_pipeline *p)
10135 {
10136 	uint32_t i;
10137 
10138 	for (i = 0; i < p->n_ports_out; i++) {
10139 		struct port_out_runtime *port = &p->out[i];
10140 
10141 		if (port->flush)
10142 			port->flush(port->obj);
10143 	}
10144 }
10145 
10146 /*
10147  * Control.
10148  */
10149 int
10150 rte_swx_ctl_pipeline_info_get(struct rte_swx_pipeline *p,
10151 			      struct rte_swx_ctl_pipeline_info *pipeline)
10152 {
10153 	struct action *action;
10154 	struct table *table;
10155 	uint32_t n_actions = 0, n_tables = 0;
10156 
10157 	if (!p || !pipeline)
10158 		return -EINVAL;
10159 
10160 	TAILQ_FOREACH(action, &p->actions, node)
10161 		n_actions++;
10162 
10163 	TAILQ_FOREACH(table, &p->tables, node)
10164 		n_tables++;
10165 
10166 	pipeline->n_ports_in = p->n_ports_in;
10167 	pipeline->n_ports_out = p->n_ports_out;
10168 	pipeline->n_actions = n_actions;
10169 	pipeline->n_tables = n_tables;
10170 	pipeline->n_regarrays = p->n_regarrays;
10171 	pipeline->n_metarrays = p->n_metarrays;
10172 
10173 	return 0;
10174 }
10175 
10176 int
10177 rte_swx_ctl_pipeline_numa_node_get(struct rte_swx_pipeline *p, int *numa_node)
10178 {
10179 	if (!p || !numa_node)
10180 		return -EINVAL;
10181 
10182 	*numa_node = p->numa_node;
10183 	return 0;
10184 }
10185 
10186 int
10187 rte_swx_ctl_action_info_get(struct rte_swx_pipeline *p,
10188 			    uint32_t action_id,
10189 			    struct rte_swx_ctl_action_info *action)
10190 {
10191 	struct action *a = NULL;
10192 
10193 	if (!p || (action_id >= p->n_actions) || !action)
10194 		return -EINVAL;
10195 
10196 	a = action_find_by_id(p, action_id);
10197 	if (!a)
10198 		return -EINVAL;
10199 
10200 	strcpy(action->name, a->name);
10201 	action->n_args = a->st ? a->st->n_fields : 0;
10202 	return 0;
10203 }
10204 
10205 int
10206 rte_swx_ctl_action_arg_info_get(struct rte_swx_pipeline *p,
10207 				uint32_t action_id,
10208 				uint32_t action_arg_id,
10209 				struct rte_swx_ctl_action_arg_info *action_arg)
10210 {
10211 	struct action *a = NULL;
10212 	struct field *arg = NULL;
10213 
10214 	if (!p || (action_id >= p->n_actions) || !action_arg)
10215 		return -EINVAL;
10216 
10217 	a = action_find_by_id(p, action_id);
10218 	if (!a || !a->st || (action_arg_id >= a->st->n_fields))
10219 		return -EINVAL;
10220 
10221 	arg = &a->st->fields[action_arg_id];
10222 	strcpy(action_arg->name, arg->name);
10223 	action_arg->n_bits = arg->n_bits;
10224 	action_arg->is_network_byte_order = a->args_endianness[action_arg_id];
10225 
10226 	return 0;
10227 }
10228 
10229 int
10230 rte_swx_ctl_table_info_get(struct rte_swx_pipeline *p,
10231 			   uint32_t table_id,
10232 			   struct rte_swx_ctl_table_info *table)
10233 {
10234 	struct table *t = NULL;
10235 
10236 	if (!p || !table)
10237 		return -EINVAL;
10238 
10239 	t = table_find_by_id(p, table_id);
10240 	if (!t)
10241 		return -EINVAL;
10242 
10243 	strcpy(table->name, t->name);
10244 	strcpy(table->args, t->args);
10245 	table->n_match_fields = t->n_fields;
10246 	table->n_actions = t->n_actions;
10247 	table->default_action_is_const = t->default_action_is_const;
10248 	table->size = t->size;
10249 	return 0;
10250 }
10251 
10252 int
10253 rte_swx_ctl_table_match_field_info_get(struct rte_swx_pipeline *p,
10254 	uint32_t table_id,
10255 	uint32_t match_field_id,
10256 	struct rte_swx_ctl_table_match_field_info *match_field)
10257 {
10258 	struct table *t;
10259 	struct match_field *f;
10260 
10261 	if (!p || (table_id >= p->n_tables) || !match_field)
10262 		return -EINVAL;
10263 
10264 	t = table_find_by_id(p, table_id);
10265 	if (!t || (match_field_id >= t->n_fields))
10266 		return -EINVAL;
10267 
10268 	f = &t->fields[match_field_id];
10269 	match_field->match_type = f->match_type;
10270 	match_field->is_header = t->header ? 1 : 0;
10271 	match_field->n_bits = f->field->n_bits;
10272 	match_field->offset = f->field->offset;
10273 
10274 	return 0;
10275 }
10276 
10277 int
10278 rte_swx_ctl_table_action_info_get(struct rte_swx_pipeline *p,
10279 	uint32_t table_id,
10280 	uint32_t table_action_id,
10281 	struct rte_swx_ctl_table_action_info *table_action)
10282 {
10283 	struct table *t;
10284 
10285 	if (!p || (table_id >= p->n_tables) || !table_action)
10286 		return -EINVAL;
10287 
10288 	t = table_find_by_id(p, table_id);
10289 	if (!t || (table_action_id >= t->n_actions))
10290 		return -EINVAL;
10291 
10292 	table_action->action_id = t->actions[table_action_id]->id;
10293 
10294 	return 0;
10295 }
10296 
10297 int
10298 rte_swx_ctl_table_ops_get(struct rte_swx_pipeline *p,
10299 			  uint32_t table_id,
10300 			  struct rte_swx_table_ops *table_ops,
10301 			  int *is_stub)
10302 {
10303 	struct table *t;
10304 
10305 	if (!p || (table_id >= p->n_tables))
10306 		return -EINVAL;
10307 
10308 	t = table_find_by_id(p, table_id);
10309 	if (!t)
10310 		return -EINVAL;
10311 
10312 	if (t->type) {
10313 		if (table_ops)
10314 			memcpy(table_ops, &t->type->ops, sizeof(*table_ops));
10315 		*is_stub = 0;
10316 	} else {
10317 		*is_stub = 1;
10318 	}
10319 
10320 	return 0;
10321 }
10322 
10323 int
10324 rte_swx_pipeline_table_state_get(struct rte_swx_pipeline *p,
10325 				 struct rte_swx_table_state **table_state)
10326 {
10327 	if (!p || !table_state || !p->build_done)
10328 		return -EINVAL;
10329 
10330 	*table_state = p->table_state;
10331 	return 0;
10332 }
10333 
10334 int
10335 rte_swx_pipeline_table_state_set(struct rte_swx_pipeline *p,
10336 				 struct rte_swx_table_state *table_state)
10337 {
10338 	if (!p || !table_state || !p->build_done)
10339 		return -EINVAL;
10340 
10341 	p->table_state = table_state;
10342 	return 0;
10343 }
10344 
10345 int
10346 rte_swx_ctl_pipeline_port_in_stats_read(struct rte_swx_pipeline *p,
10347 					uint32_t port_id,
10348 					struct rte_swx_port_in_stats *stats)
10349 {
10350 	struct port_in *port;
10351 
10352 	if (!p || !stats)
10353 		return -EINVAL;
10354 
10355 	port = port_in_find(p, port_id);
10356 	if (!port)
10357 		return -EINVAL;
10358 
10359 	port->type->ops.stats_read(port->obj, stats);
10360 	return 0;
10361 }
10362 
10363 int
10364 rte_swx_ctl_pipeline_port_out_stats_read(struct rte_swx_pipeline *p,
10365 					 uint32_t port_id,
10366 					 struct rte_swx_port_out_stats *stats)
10367 {
10368 	struct port_out *port;
10369 
10370 	if (!p || !stats)
10371 		return -EINVAL;
10372 
10373 	port = port_out_find(p, port_id);
10374 	if (!port)
10375 		return -EINVAL;
10376 
10377 	port->type->ops.stats_read(port->obj, stats);
10378 	return 0;
10379 }
10380 
10381 int
10382 rte_swx_ctl_pipeline_table_stats_read(struct rte_swx_pipeline *p,
10383 				      const char *table_name,
10384 				      struct rte_swx_table_stats *stats)
10385 {
10386 	struct table *table;
10387 	struct table_statistics *table_stats;
10388 
10389 	if (!p || !table_name || !table_name[0] || !stats || !stats->n_pkts_action)
10390 		return -EINVAL;
10391 
10392 	table = table_find(p, table_name);
10393 	if (!table)
10394 		return -EINVAL;
10395 
10396 	table_stats = &p->table_stats[table->id];
10397 
10398 	memcpy(&stats->n_pkts_action,
10399 	       &table_stats->n_pkts_action,
10400 	       p->n_actions * sizeof(uint64_t));
10401 
10402 	stats->n_pkts_hit = table_stats->n_pkts_hit[1];
10403 	stats->n_pkts_miss = table_stats->n_pkts_hit[0];
10404 
10405 	return 0;
10406 }
10407 
10408 int
10409 rte_swx_ctl_regarray_info_get(struct rte_swx_pipeline *p,
10410 			      uint32_t regarray_id,
10411 			      struct rte_swx_ctl_regarray_info *regarray)
10412 {
10413 	struct regarray *r;
10414 
10415 	if (!p || !regarray)
10416 		return -EINVAL;
10417 
10418 	r = regarray_find_by_id(p, regarray_id);
10419 	if (!r)
10420 		return -EINVAL;
10421 
10422 	strcpy(regarray->name, r->name);
10423 	regarray->size = r->size;
10424 	return 0;
10425 }
10426 
10427 int
10428 rte_swx_ctl_pipeline_regarray_read(struct rte_swx_pipeline *p,
10429 				   const char *regarray_name,
10430 				   uint32_t regarray_index,
10431 				   uint64_t *value)
10432 {
10433 	struct regarray *regarray;
10434 	struct regarray_runtime *r;
10435 
10436 	if (!p || !regarray_name || !value)
10437 		return -EINVAL;
10438 
10439 	regarray = regarray_find(p, regarray_name);
10440 	if (!regarray || (regarray_index >= regarray->size))
10441 		return -EINVAL;
10442 
10443 	r = &p->regarray_runtime[regarray->id];
10444 	*value = r->regarray[regarray_index];
10445 	return 0;
10446 }
10447 
10448 int
10449 rte_swx_ctl_pipeline_regarray_write(struct rte_swx_pipeline *p,
10450 				   const char *regarray_name,
10451 				   uint32_t regarray_index,
10452 				   uint64_t value)
10453 {
10454 	struct regarray *regarray;
10455 	struct regarray_runtime *r;
10456 
10457 	if (!p || !regarray_name)
10458 		return -EINVAL;
10459 
10460 	regarray = regarray_find(p, regarray_name);
10461 	if (!regarray || (regarray_index >= regarray->size))
10462 		return -EINVAL;
10463 
10464 	r = &p->regarray_runtime[regarray->id];
10465 	r->regarray[regarray_index] = value;
10466 	return 0;
10467 }
10468 
10469 int
10470 rte_swx_ctl_metarray_info_get(struct rte_swx_pipeline *p,
10471 			      uint32_t metarray_id,
10472 			      struct rte_swx_ctl_metarray_info *metarray)
10473 {
10474 	struct metarray *m;
10475 
10476 	if (!p || !metarray)
10477 		return -EINVAL;
10478 
10479 	m = metarray_find_by_id(p, metarray_id);
10480 	if (!m)
10481 		return -EINVAL;
10482 
10483 	strcpy(metarray->name, m->name);
10484 	metarray->size = m->size;
10485 	return 0;
10486 }
10487 
10488 int
10489 rte_swx_ctl_meter_profile_add(struct rte_swx_pipeline *p,
10490 			      const char *name,
10491 			      struct rte_meter_trtcm_params *params)
10492 {
10493 	struct meter_profile *mp;
10494 	int status;
10495 
10496 	CHECK(p, EINVAL);
10497 	CHECK_NAME(name, EINVAL);
10498 	CHECK(params, EINVAL);
10499 	CHECK(!meter_profile_find(p, name), EEXIST);
10500 
10501 	/* Node allocation. */
10502 	mp = calloc(1, sizeof(struct meter_profile));
10503 	CHECK(mp, ENOMEM);
10504 
10505 	/* Node initialization. */
10506 	strcpy(mp->name, name);
10507 	memcpy(&mp->params, params, sizeof(struct rte_meter_trtcm_params));
10508 	status = rte_meter_trtcm_profile_config(&mp->profile, params);
10509 	if (status) {
10510 		free(mp);
10511 		CHECK(0, EINVAL);
10512 	}
10513 
10514 	/* Node add to tailq. */
10515 	TAILQ_INSERT_TAIL(&p->meter_profiles, mp, node);
10516 
10517 	return 0;
10518 }
10519 
10520 int
10521 rte_swx_ctl_meter_profile_delete(struct rte_swx_pipeline *p,
10522 				 const char *name)
10523 {
10524 	struct meter_profile *mp;
10525 
10526 	CHECK(p, EINVAL);
10527 	CHECK_NAME(name, EINVAL);
10528 
10529 	mp = meter_profile_find(p, name);
10530 	CHECK(mp, EINVAL);
10531 	CHECK(!mp->n_users, EBUSY);
10532 
10533 	/* Remove node from tailq. */
10534 	TAILQ_REMOVE(&p->meter_profiles, mp, node);
10535 	free(mp);
10536 
10537 	return 0;
10538 }
10539 
10540 int
10541 rte_swx_ctl_meter_reset(struct rte_swx_pipeline *p,
10542 			const char *metarray_name,
10543 			uint32_t metarray_index)
10544 {
10545 	struct meter_profile *mp_old;
10546 	struct metarray *metarray;
10547 	struct metarray_runtime *metarray_runtime;
10548 	struct meter *m;
10549 
10550 	CHECK(p, EINVAL);
10551 	CHECK_NAME(metarray_name, EINVAL);
10552 
10553 	metarray = metarray_find(p, metarray_name);
10554 	CHECK(metarray, EINVAL);
10555 	CHECK(metarray_index < metarray->size, EINVAL);
10556 
10557 	metarray_runtime = &p->metarray_runtime[metarray->id];
10558 	m = &metarray_runtime->metarray[metarray_index];
10559 	mp_old = m->profile;
10560 
10561 	meter_init(m);
10562 
10563 	mp_old->n_users--;
10564 
10565 	return 0;
10566 }
10567 
10568 int
10569 rte_swx_ctl_meter_set(struct rte_swx_pipeline *p,
10570 		      const char *metarray_name,
10571 		      uint32_t metarray_index,
10572 		      const char *profile_name)
10573 {
10574 	struct meter_profile *mp, *mp_old;
10575 	struct metarray *metarray;
10576 	struct metarray_runtime *metarray_runtime;
10577 	struct meter *m;
10578 
10579 	CHECK(p, EINVAL);
10580 	CHECK_NAME(metarray_name, EINVAL);
10581 
10582 	metarray = metarray_find(p, metarray_name);
10583 	CHECK(metarray, EINVAL);
10584 	CHECK(metarray_index < metarray->size, EINVAL);
10585 
10586 	mp = meter_profile_find(p, profile_name);
10587 	CHECK(mp, EINVAL);
10588 
10589 	metarray_runtime = &p->metarray_runtime[metarray->id];
10590 	m = &metarray_runtime->metarray[metarray_index];
10591 	mp_old = m->profile;
10592 
10593 	memset(m, 0, sizeof(struct meter));
10594 	rte_meter_trtcm_config(&m->m, &mp->profile);
10595 	m->profile = mp;
10596 	m->color_mask = RTE_COLORS;
10597 
10598 	mp->n_users++;
10599 	mp_old->n_users--;
10600 
10601 	return 0;
10602 }
10603 
10604 int
10605 rte_swx_ctl_meter_stats_read(struct rte_swx_pipeline *p,
10606 			     const char *metarray_name,
10607 			     uint32_t metarray_index,
10608 			     struct rte_swx_ctl_meter_stats *stats)
10609 {
10610 	struct metarray *metarray;
10611 	struct metarray_runtime *metarray_runtime;
10612 	struct meter *m;
10613 
10614 	CHECK(p, EINVAL);
10615 	CHECK_NAME(metarray_name, EINVAL);
10616 
10617 	metarray = metarray_find(p, metarray_name);
10618 	CHECK(metarray, EINVAL);
10619 	CHECK(metarray_index < metarray->size, EINVAL);
10620 
10621 	CHECK(stats, EINVAL);
10622 
10623 	metarray_runtime = &p->metarray_runtime[metarray->id];
10624 	m = &metarray_runtime->metarray[metarray_index];
10625 
10626 	memcpy(stats->n_pkts, m->n_pkts, sizeof(m->n_pkts));
10627 	memcpy(stats->n_bytes, m->n_bytes, sizeof(m->n_bytes));
10628 
10629 	return 0;
10630 }
10631