xref: /dpdk/lib/net/rte_net.c (revision 99a2dd955fba6e4cc23b77d590a033650ced9c45)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2016 6WIND S.A.
3  */
4 
5 #include <stdint.h>
6 
7 #include <rte_mbuf.h>
8 #include <rte_mbuf_ptype.h>
9 #include <rte_byteorder.h>
10 #include <rte_ether.h>
11 #include <rte_ip.h>
12 #include <rte_tcp.h>
13 #include <rte_udp.h>
14 #include <rte_sctp.h>
15 #include <rte_gre.h>
16 #include <rte_mpls.h>
17 #include <rte_net.h>
18 #include <rte_os_shim.h>
19 
20 /* get l3 packet type from ip6 next protocol */
21 static uint32_t
ptype_l3_ip6(uint8_t ip6_proto)22 ptype_l3_ip6(uint8_t ip6_proto)
23 {
24 	static const uint32_t ip6_ext_proto_map[256] = {
25 		[IPPROTO_HOPOPTS] = RTE_PTYPE_L3_IPV6_EXT - RTE_PTYPE_L3_IPV6,
26 		[IPPROTO_ROUTING] = RTE_PTYPE_L3_IPV6_EXT - RTE_PTYPE_L3_IPV6,
27 		[IPPROTO_FRAGMENT] = RTE_PTYPE_L3_IPV6_EXT - RTE_PTYPE_L3_IPV6,
28 		[IPPROTO_ESP] = RTE_PTYPE_L3_IPV6_EXT - RTE_PTYPE_L3_IPV6,
29 		[IPPROTO_AH] = RTE_PTYPE_L3_IPV6_EXT - RTE_PTYPE_L3_IPV6,
30 		[IPPROTO_DSTOPTS] = RTE_PTYPE_L3_IPV6_EXT - RTE_PTYPE_L3_IPV6,
31 	};
32 
33 	return RTE_PTYPE_L3_IPV6 + ip6_ext_proto_map[ip6_proto];
34 }
35 
36 /* get l3 packet type from ip version and header length */
37 static uint32_t
ptype_l3_ip(uint8_t ipv_ihl)38 ptype_l3_ip(uint8_t ipv_ihl)
39 {
40 	static const uint32_t ptype_l3_ip_proto_map[256] = {
41 		[0x45] = RTE_PTYPE_L3_IPV4,
42 		[0x46] = RTE_PTYPE_L3_IPV4_EXT,
43 		[0x47] = RTE_PTYPE_L3_IPV4_EXT,
44 		[0x48] = RTE_PTYPE_L3_IPV4_EXT,
45 		[0x49] = RTE_PTYPE_L3_IPV4_EXT,
46 		[0x4A] = RTE_PTYPE_L3_IPV4_EXT,
47 		[0x4B] = RTE_PTYPE_L3_IPV4_EXT,
48 		[0x4C] = RTE_PTYPE_L3_IPV4_EXT,
49 		[0x4D] = RTE_PTYPE_L3_IPV4_EXT,
50 		[0x4E] = RTE_PTYPE_L3_IPV4_EXT,
51 		[0x4F] = RTE_PTYPE_L3_IPV4_EXT,
52 	};
53 
54 	return ptype_l3_ip_proto_map[ipv_ihl];
55 }
56 
57 /* get l4 packet type from proto */
58 static uint32_t
ptype_l4(uint8_t proto)59 ptype_l4(uint8_t proto)
60 {
61 	static const uint32_t ptype_l4_proto[256] = {
62 		[IPPROTO_UDP] = RTE_PTYPE_L4_UDP,
63 		[IPPROTO_TCP] = RTE_PTYPE_L4_TCP,
64 		[IPPROTO_SCTP] = RTE_PTYPE_L4_SCTP,
65 	};
66 
67 	return ptype_l4_proto[proto];
68 }
69 
70 /* get inner l3 packet type from ip6 next protocol */
71 static uint32_t
ptype_inner_l3_ip6(uint8_t ip6_proto)72 ptype_inner_l3_ip6(uint8_t ip6_proto)
73 {
74 	static const uint32_t ptype_inner_ip6_ext_proto_map[256] = {
75 		[IPPROTO_HOPOPTS] = RTE_PTYPE_INNER_L3_IPV6_EXT -
76 			RTE_PTYPE_INNER_L3_IPV6,
77 		[IPPROTO_ROUTING] = RTE_PTYPE_INNER_L3_IPV6_EXT -
78 			RTE_PTYPE_INNER_L3_IPV6,
79 		[IPPROTO_FRAGMENT] = RTE_PTYPE_INNER_L3_IPV6_EXT -
80 			RTE_PTYPE_INNER_L3_IPV6,
81 		[IPPROTO_ESP] = RTE_PTYPE_INNER_L3_IPV6_EXT -
82 			RTE_PTYPE_INNER_L3_IPV6,
83 		[IPPROTO_AH] = RTE_PTYPE_INNER_L3_IPV6_EXT -
84 			RTE_PTYPE_INNER_L3_IPV6,
85 		[IPPROTO_DSTOPTS] = RTE_PTYPE_INNER_L3_IPV6_EXT -
86 			RTE_PTYPE_INNER_L3_IPV6,
87 	};
88 
89 	return RTE_PTYPE_INNER_L3_IPV6 +
90 		ptype_inner_ip6_ext_proto_map[ip6_proto];
91 }
92 
93 /* get inner l3 packet type from ip version and header length */
94 static uint32_t
ptype_inner_l3_ip(uint8_t ipv_ihl)95 ptype_inner_l3_ip(uint8_t ipv_ihl)
96 {
97 	static const uint32_t ptype_inner_l3_ip_proto_map[256] = {
98 		[0x45] = RTE_PTYPE_INNER_L3_IPV4,
99 		[0x46] = RTE_PTYPE_INNER_L3_IPV4_EXT,
100 		[0x47] = RTE_PTYPE_INNER_L3_IPV4_EXT,
101 		[0x48] = RTE_PTYPE_INNER_L3_IPV4_EXT,
102 		[0x49] = RTE_PTYPE_INNER_L3_IPV4_EXT,
103 		[0x4A] = RTE_PTYPE_INNER_L3_IPV4_EXT,
104 		[0x4B] = RTE_PTYPE_INNER_L3_IPV4_EXT,
105 		[0x4C] = RTE_PTYPE_INNER_L3_IPV4_EXT,
106 		[0x4D] = RTE_PTYPE_INNER_L3_IPV4_EXT,
107 		[0x4E] = RTE_PTYPE_INNER_L3_IPV4_EXT,
108 		[0x4F] = RTE_PTYPE_INNER_L3_IPV4_EXT,
109 	};
110 
111 	return ptype_inner_l3_ip_proto_map[ipv_ihl];
112 }
113 
114 /* get inner l4 packet type from proto */
115 static uint32_t
ptype_inner_l4(uint8_t proto)116 ptype_inner_l4(uint8_t proto)
117 {
118 	static const uint32_t ptype_inner_l4_proto[256] = {
119 		[IPPROTO_UDP] = RTE_PTYPE_INNER_L4_UDP,
120 		[IPPROTO_TCP] = RTE_PTYPE_INNER_L4_TCP,
121 		[IPPROTO_SCTP] = RTE_PTYPE_INNER_L4_SCTP,
122 	};
123 
124 	return ptype_inner_l4_proto[proto];
125 }
126 
127 /* get the tunnel packet type if any, update proto and off. */
128 static uint32_t
ptype_tunnel(uint16_t * proto,const struct rte_mbuf * m,uint32_t * off)129 ptype_tunnel(uint16_t *proto, const struct rte_mbuf *m,
130 	uint32_t *off)
131 {
132 	switch (*proto) {
133 	case IPPROTO_GRE: {
134 		static const uint8_t opt_len[16] = {
135 			[0x0] = 4,
136 			[0x1] = 8,
137 			[0x2] = 8,
138 			[0x8] = 8,
139 			[0x3] = 12,
140 			[0x9] = 12,
141 			[0xa] = 12,
142 			[0xb] = 16,
143 		};
144 		const struct rte_gre_hdr *gh;
145 		struct rte_gre_hdr gh_copy;
146 		uint16_t flags;
147 
148 		gh = rte_pktmbuf_read(m, *off, sizeof(*gh), &gh_copy);
149 		if (unlikely(gh == NULL))
150 			return 0;
151 
152 		flags = rte_be_to_cpu_16(*(const uint16_t *)gh);
153 		flags >>= 12;
154 		if (opt_len[flags] == 0)
155 			return 0;
156 
157 		*off += opt_len[flags];
158 		*proto = gh->proto;
159 		if (*proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_TEB))
160 			return RTE_PTYPE_TUNNEL_NVGRE;
161 		else
162 			return RTE_PTYPE_TUNNEL_GRE;
163 	}
164 	case IPPROTO_IPIP:
165 		*proto = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
166 		return RTE_PTYPE_TUNNEL_IP;
167 	case IPPROTO_IPV6:
168 		*proto = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
169 		return RTE_PTYPE_TUNNEL_IP; /* IP is also valid for IPv6 */
170 	default:
171 		return 0;
172 	}
173 }
174 
175 /* parse ipv6 extended headers, update offset and return next proto */
176 int
rte_net_skip_ip6_ext(uint16_t proto,const struct rte_mbuf * m,uint32_t * off,int * frag)177 rte_net_skip_ip6_ext(uint16_t proto, const struct rte_mbuf *m, uint32_t *off,
178 	int *frag)
179 {
180 	struct ext_hdr {
181 		uint8_t next_hdr;
182 		uint8_t len;
183 	};
184 	const struct ext_hdr *xh;
185 	struct ext_hdr xh_copy;
186 	unsigned int i;
187 
188 	*frag = 0;
189 
190 #define MAX_EXT_HDRS 5
191 	for (i = 0; i < MAX_EXT_HDRS; i++) {
192 		switch (proto) {
193 		case IPPROTO_HOPOPTS:
194 		case IPPROTO_ROUTING:
195 		case IPPROTO_DSTOPTS:
196 			xh = rte_pktmbuf_read(m, *off, sizeof(*xh),
197 				&xh_copy);
198 			if (xh == NULL)
199 				return -1;
200 			*off += (xh->len + 1) * 8;
201 			proto = xh->next_hdr;
202 			break;
203 		case IPPROTO_FRAGMENT:
204 			xh = rte_pktmbuf_read(m, *off, sizeof(*xh),
205 				&xh_copy);
206 			if (xh == NULL)
207 				return -1;
208 			*off += 8;
209 			proto = xh->next_hdr;
210 			*frag = 1;
211 			return proto; /* this is always the last ext hdr */
212 		case IPPROTO_NONE:
213 			return 0;
214 		default:
215 			return proto;
216 		}
217 	}
218 	return -1;
219 }
220 
221 /* parse mbuf data to get packet type */
rte_net_get_ptype(const struct rte_mbuf * m,struct rte_net_hdr_lens * hdr_lens,uint32_t layers)222 uint32_t rte_net_get_ptype(const struct rte_mbuf *m,
223 	struct rte_net_hdr_lens *hdr_lens, uint32_t layers)
224 {
225 	struct rte_net_hdr_lens local_hdr_lens;
226 	const struct rte_ether_hdr *eh;
227 	struct rte_ether_hdr eh_copy;
228 	uint32_t pkt_type = RTE_PTYPE_L2_ETHER;
229 	uint32_t off = 0;
230 	uint16_t proto;
231 	int ret;
232 
233 	if (hdr_lens == NULL)
234 		hdr_lens = &local_hdr_lens;
235 
236 	eh = rte_pktmbuf_read(m, off, sizeof(*eh), &eh_copy);
237 	if (unlikely(eh == NULL))
238 		return 0;
239 	proto = eh->ether_type;
240 	off = sizeof(*eh);
241 	hdr_lens->l2_len = off;
242 
243 	if ((layers & RTE_PTYPE_L2_MASK) == 0)
244 		return 0;
245 
246 	if (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4))
247 		goto l3; /* fast path if packet is IPv4 */
248 
249 	if (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN)) {
250 		const struct rte_vlan_hdr *vh;
251 		struct rte_vlan_hdr vh_copy;
252 
253 		pkt_type = RTE_PTYPE_L2_ETHER_VLAN;
254 		vh = rte_pktmbuf_read(m, off, sizeof(*vh), &vh_copy);
255 		if (unlikely(vh == NULL))
256 			return pkt_type;
257 		off += sizeof(*vh);
258 		hdr_lens->l2_len += sizeof(*vh);
259 		proto = vh->eth_proto;
260 	} else if (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_QINQ)) {
261 		const struct rte_vlan_hdr *vh;
262 		struct rte_vlan_hdr vh_copy;
263 
264 		pkt_type = RTE_PTYPE_L2_ETHER_QINQ;
265 		vh = rte_pktmbuf_read(m, off + sizeof(*vh), sizeof(*vh),
266 			&vh_copy);
267 		if (unlikely(vh == NULL))
268 			return pkt_type;
269 		off += 2 * sizeof(*vh);
270 		hdr_lens->l2_len += 2 * sizeof(*vh);
271 		proto = vh->eth_proto;
272 	} else if ((proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_MPLS)) ||
273 		(proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_MPLSM))) {
274 		unsigned int i;
275 		const struct rte_mpls_hdr *mh;
276 		struct rte_mpls_hdr mh_copy;
277 
278 #define MAX_MPLS_HDR 5
279 		for (i = 0; i < MAX_MPLS_HDR; i++) {
280 			mh = rte_pktmbuf_read(m, off + (i * sizeof(*mh)),
281 				sizeof(*mh), &mh_copy);
282 			if (unlikely(mh == NULL))
283 				return pkt_type;
284 		}
285 		if (i == MAX_MPLS_HDR)
286 			return pkt_type;
287 		pkt_type = RTE_PTYPE_L2_ETHER_MPLS;
288 		hdr_lens->l2_len += (sizeof(*mh) * i);
289 		return pkt_type;
290 	}
291 
292 l3:
293 	if ((layers & RTE_PTYPE_L3_MASK) == 0)
294 		return pkt_type;
295 
296 	if (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
297 		const struct rte_ipv4_hdr *ip4h;
298 		struct rte_ipv4_hdr ip4h_copy;
299 
300 		ip4h = rte_pktmbuf_read(m, off, sizeof(*ip4h), &ip4h_copy);
301 		if (unlikely(ip4h == NULL))
302 			return pkt_type;
303 
304 		pkt_type |= ptype_l3_ip(ip4h->version_ihl);
305 		hdr_lens->l3_len = rte_ipv4_hdr_len(ip4h);
306 		off += hdr_lens->l3_len;
307 
308 		if ((layers & RTE_PTYPE_L4_MASK) == 0)
309 			return pkt_type;
310 
311 		if (ip4h->fragment_offset & rte_cpu_to_be_16(
312 				RTE_IPV4_HDR_OFFSET_MASK | RTE_IPV4_HDR_MF_FLAG)) {
313 			pkt_type |= RTE_PTYPE_L4_FRAG;
314 			hdr_lens->l4_len = 0;
315 			return pkt_type;
316 		}
317 		proto = ip4h->next_proto_id;
318 		pkt_type |= ptype_l4(proto);
319 	} else if (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
320 		const struct rte_ipv6_hdr *ip6h;
321 		struct rte_ipv6_hdr ip6h_copy;
322 		int frag = 0;
323 
324 		ip6h = rte_pktmbuf_read(m, off, sizeof(*ip6h), &ip6h_copy);
325 		if (unlikely(ip6h == NULL))
326 			return pkt_type;
327 
328 		proto = ip6h->proto;
329 		hdr_lens->l3_len = sizeof(*ip6h);
330 		off += hdr_lens->l3_len;
331 		pkt_type |= ptype_l3_ip6(proto);
332 		if ((pkt_type & RTE_PTYPE_L3_MASK) == RTE_PTYPE_L3_IPV6_EXT) {
333 			ret = rte_net_skip_ip6_ext(proto, m, &off, &frag);
334 			if (ret < 0)
335 				return pkt_type;
336 			proto = ret;
337 			hdr_lens->l3_len = off - hdr_lens->l2_len;
338 		}
339 		if (proto == 0)
340 			return pkt_type;
341 
342 		if ((layers & RTE_PTYPE_L4_MASK) == 0)
343 			return pkt_type;
344 
345 		if (frag) {
346 			pkt_type |= RTE_PTYPE_L4_FRAG;
347 			hdr_lens->l4_len = 0;
348 			return pkt_type;
349 		}
350 		pkt_type |= ptype_l4(proto);
351 	}
352 
353 	if ((pkt_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP) {
354 		hdr_lens->l4_len = sizeof(struct rte_udp_hdr);
355 		return pkt_type;
356 	} else if ((pkt_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP) {
357 		const struct rte_tcp_hdr *th;
358 		struct rte_tcp_hdr th_copy;
359 
360 		th = rte_pktmbuf_read(m, off, sizeof(*th), &th_copy);
361 		if (unlikely(th == NULL))
362 			return pkt_type & (RTE_PTYPE_L2_MASK |
363 				RTE_PTYPE_L3_MASK);
364 		hdr_lens->l4_len = (th->data_off & 0xf0) >> 2;
365 		return pkt_type;
366 	} else if ((pkt_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_SCTP) {
367 		hdr_lens->l4_len = sizeof(struct rte_sctp_hdr);
368 		return pkt_type;
369 	} else {
370 		uint32_t prev_off = off;
371 
372 		hdr_lens->l4_len = 0;
373 
374 		if ((layers & RTE_PTYPE_TUNNEL_MASK) == 0)
375 			return pkt_type;
376 
377 		pkt_type |= ptype_tunnel(&proto, m, &off);
378 		hdr_lens->tunnel_len = off - prev_off;
379 	}
380 
381 	/* same job for inner header: we need to duplicate the code
382 	 * because the packet types do not have the same value.
383 	 */
384 	if ((layers & RTE_PTYPE_INNER_L2_MASK) == 0)
385 		return pkt_type;
386 
387 	hdr_lens->inner_l2_len = 0;
388 	if (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_TEB)) {
389 		eh = rte_pktmbuf_read(m, off, sizeof(*eh), &eh_copy);
390 		if (unlikely(eh == NULL))
391 			return pkt_type;
392 		pkt_type |= RTE_PTYPE_INNER_L2_ETHER;
393 		proto = eh->ether_type;
394 		off += sizeof(*eh);
395 		hdr_lens->inner_l2_len = sizeof(*eh);
396 	}
397 
398 	if (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN)) {
399 		const struct rte_vlan_hdr *vh;
400 		struct rte_vlan_hdr vh_copy;
401 
402 		pkt_type &= ~RTE_PTYPE_INNER_L2_MASK;
403 		pkt_type |= RTE_PTYPE_INNER_L2_ETHER_VLAN;
404 		vh = rte_pktmbuf_read(m, off, sizeof(*vh), &vh_copy);
405 		if (unlikely(vh == NULL))
406 			return pkt_type;
407 		off += sizeof(*vh);
408 		hdr_lens->inner_l2_len += sizeof(*vh);
409 		proto = vh->eth_proto;
410 	} else if (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_QINQ)) {
411 		const struct rte_vlan_hdr *vh;
412 		struct rte_vlan_hdr vh_copy;
413 
414 		pkt_type &= ~RTE_PTYPE_INNER_L2_MASK;
415 		pkt_type |= RTE_PTYPE_INNER_L2_ETHER_QINQ;
416 		vh = rte_pktmbuf_read(m, off + sizeof(*vh), sizeof(*vh),
417 			&vh_copy);
418 		if (unlikely(vh == NULL))
419 			return pkt_type;
420 		off += 2 * sizeof(*vh);
421 		hdr_lens->inner_l2_len += 2 * sizeof(*vh);
422 		proto = vh->eth_proto;
423 	}
424 
425 	if ((layers & RTE_PTYPE_INNER_L3_MASK) == 0)
426 		return pkt_type;
427 
428 	if (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
429 		const struct rte_ipv4_hdr *ip4h;
430 		struct rte_ipv4_hdr ip4h_copy;
431 
432 		ip4h = rte_pktmbuf_read(m, off, sizeof(*ip4h), &ip4h_copy);
433 		if (unlikely(ip4h == NULL))
434 			return pkt_type;
435 
436 		pkt_type |= ptype_inner_l3_ip(ip4h->version_ihl);
437 		hdr_lens->inner_l3_len = rte_ipv4_hdr_len(ip4h);
438 		off += hdr_lens->inner_l3_len;
439 
440 		if ((layers & RTE_PTYPE_INNER_L4_MASK) == 0)
441 			return pkt_type;
442 		if (ip4h->fragment_offset &
443 				rte_cpu_to_be_16(RTE_IPV4_HDR_OFFSET_MASK |
444 					RTE_IPV4_HDR_MF_FLAG)) {
445 			pkt_type |= RTE_PTYPE_INNER_L4_FRAG;
446 			hdr_lens->inner_l4_len = 0;
447 			return pkt_type;
448 		}
449 		proto = ip4h->next_proto_id;
450 		pkt_type |= ptype_inner_l4(proto);
451 	} else if (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
452 		const struct rte_ipv6_hdr *ip6h;
453 		struct rte_ipv6_hdr ip6h_copy;
454 		int frag = 0;
455 
456 		ip6h = rte_pktmbuf_read(m, off, sizeof(*ip6h), &ip6h_copy);
457 		if (unlikely(ip6h == NULL))
458 			return pkt_type;
459 
460 		proto = ip6h->proto;
461 		hdr_lens->inner_l3_len = sizeof(*ip6h);
462 		off += hdr_lens->inner_l3_len;
463 		pkt_type |= ptype_inner_l3_ip6(proto);
464 		if ((pkt_type & RTE_PTYPE_INNER_L3_MASK) ==
465 				RTE_PTYPE_INNER_L3_IPV6_EXT) {
466 			uint32_t prev_off;
467 
468 			prev_off = off;
469 			ret = rte_net_skip_ip6_ext(proto, m, &off, &frag);
470 			if (ret < 0)
471 				return pkt_type;
472 			proto = ret;
473 			hdr_lens->inner_l3_len += off - prev_off;
474 		}
475 		if (proto == 0)
476 			return pkt_type;
477 
478 		if ((layers & RTE_PTYPE_INNER_L4_MASK) == 0)
479 			return pkt_type;
480 
481 		if (frag) {
482 			pkt_type |= RTE_PTYPE_INNER_L4_FRAG;
483 			hdr_lens->inner_l4_len = 0;
484 			return pkt_type;
485 		}
486 		pkt_type |= ptype_inner_l4(proto);
487 	}
488 
489 	if ((pkt_type & RTE_PTYPE_INNER_L4_MASK) == RTE_PTYPE_INNER_L4_UDP) {
490 		hdr_lens->inner_l4_len = sizeof(struct rte_udp_hdr);
491 	} else if ((pkt_type & RTE_PTYPE_INNER_L4_MASK) ==
492 			RTE_PTYPE_INNER_L4_TCP) {
493 		const struct rte_tcp_hdr *th;
494 		struct rte_tcp_hdr th_copy;
495 
496 		th = rte_pktmbuf_read(m, off, sizeof(*th), &th_copy);
497 		if (unlikely(th == NULL))
498 			return pkt_type & (RTE_PTYPE_INNER_L2_MASK |
499 				RTE_PTYPE_INNER_L3_MASK);
500 		hdr_lens->inner_l4_len = (th->data_off & 0xf0) >> 2;
501 	} else if ((pkt_type & RTE_PTYPE_INNER_L4_MASK) ==
502 			RTE_PTYPE_INNER_L4_SCTP) {
503 		hdr_lens->inner_l4_len = sizeof(struct rte_sctp_hdr);
504 	} else {
505 		hdr_lens->inner_l4_len = 0;
506 	}
507 
508 	return pkt_type;
509 }
510