199a2dd95SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
299a2dd95SBruce Richardson * Copyright(c) 2010-2014 Intel Corporation
399a2dd95SBruce Richardson */
499a2dd95SBruce Richardson
599a2dd95SBruce Richardson #include <stddef.h>
699a2dd95SBruce Richardson #include <errno.h>
799a2dd95SBruce Richardson
899a2dd95SBruce Richardson #include <rte_ether.h>
999a2dd95SBruce Richardson
1099a2dd95SBruce Richardson #include "ip_frag_common.h"
1199a2dd95SBruce Richardson
1299a2dd95SBruce Richardson /* Fragment Offset */
1399a2dd95SBruce Richardson #define RTE_IPV4_HDR_DF_SHIFT 14
1499a2dd95SBruce Richardson #define RTE_IPV4_HDR_MF_SHIFT 13
1599a2dd95SBruce Richardson #define RTE_IPV4_HDR_FO_SHIFT 3
1699a2dd95SBruce Richardson
1799a2dd95SBruce Richardson #define IPV4_HDR_DF_MASK (1 << RTE_IPV4_HDR_DF_SHIFT)
1899a2dd95SBruce Richardson #define IPV4_HDR_MF_MASK (1 << RTE_IPV4_HDR_MF_SHIFT)
1999a2dd95SBruce Richardson
2099a2dd95SBruce Richardson #define IPV4_HDR_FO_ALIGN (1 << RTE_IPV4_HDR_FO_SHIFT)
2199a2dd95SBruce Richardson
22b50a14a8SHuichao Cai #define IPV4_HDR_MAX_LEN 60
23b50a14a8SHuichao Cai
__fill_ipv4hdr_frag(struct rte_ipv4_hdr * dst,const struct rte_ipv4_hdr * src,uint16_t header_len,uint16_t len,uint16_t fofs,uint16_t dofs,uint32_t mf)2499a2dd95SBruce Richardson static inline void __fill_ipv4hdr_frag(struct rte_ipv4_hdr *dst,
251edf7a79SPu Xu const struct rte_ipv4_hdr *src, uint16_t header_len,
261edf7a79SPu Xu uint16_t len, uint16_t fofs, uint16_t dofs, uint32_t mf)
2799a2dd95SBruce Richardson {
28e1522b32SHuichao Cai memcpy(dst, src, header_len);
2999a2dd95SBruce Richardson fofs = (uint16_t)(fofs + (dofs >> RTE_IPV4_HDR_FO_SHIFT));
3099a2dd95SBruce Richardson fofs = (uint16_t)(fofs | mf << RTE_IPV4_HDR_MF_SHIFT);
3199a2dd95SBruce Richardson dst->fragment_offset = rte_cpu_to_be_16(fofs);
3299a2dd95SBruce Richardson dst->total_length = rte_cpu_to_be_16(len);
3399a2dd95SBruce Richardson dst->hdr_checksum = 0;
3499a2dd95SBruce Richardson }
3599a2dd95SBruce Richardson
__free_fragments(struct rte_mbuf * mb[],uint32_t num)3699a2dd95SBruce Richardson static inline void __free_fragments(struct rte_mbuf *mb[], uint32_t num)
3799a2dd95SBruce Richardson {
3899a2dd95SBruce Richardson uint32_t i;
3999a2dd95SBruce Richardson for (i = 0; i != num; i++)
4099a2dd95SBruce Richardson rte_pktmbuf_free(mb[i]);
4199a2dd95SBruce Richardson }
4299a2dd95SBruce Richardson
__create_ipopt_frag_hdr(uint8_t * iph,uint16_t ipopt_len,uint8_t * ipopt_frag_hdr)43b50a14a8SHuichao Cai static inline uint16_t __create_ipopt_frag_hdr(uint8_t *iph,
44b50a14a8SHuichao Cai uint16_t ipopt_len, uint8_t *ipopt_frag_hdr)
45b50a14a8SHuichao Cai {
46b50a14a8SHuichao Cai uint16_t len = ipopt_len;
47b50a14a8SHuichao Cai struct rte_ipv4_hdr *iph_opt = (struct rte_ipv4_hdr *)ipopt_frag_hdr;
48b50a14a8SHuichao Cai
49b50a14a8SHuichao Cai ipopt_len = 0;
50e1522b32SHuichao Cai memcpy(ipopt_frag_hdr, iph, sizeof(struct rte_ipv4_hdr));
51b50a14a8SHuichao Cai ipopt_frag_hdr += sizeof(struct rte_ipv4_hdr);
52b50a14a8SHuichao Cai
53b50a14a8SHuichao Cai uint8_t *p_opt = iph + sizeof(struct rte_ipv4_hdr);
54b50a14a8SHuichao Cai
55b50a14a8SHuichao Cai while (len > 0) {
56b50a14a8SHuichao Cai if (unlikely(*p_opt == RTE_IPV4_HDR_OPT_NOP)) {
57b50a14a8SHuichao Cai len--;
58b50a14a8SHuichao Cai p_opt++;
59b50a14a8SHuichao Cai continue;
60b50a14a8SHuichao Cai } else if (unlikely(*p_opt == RTE_IPV4_HDR_OPT_EOL))
61b50a14a8SHuichao Cai break;
62b50a14a8SHuichao Cai
63b50a14a8SHuichao Cai if (unlikely(p_opt[1] < 2 || p_opt[1] > len))
64b50a14a8SHuichao Cai break;
65b50a14a8SHuichao Cai
66b50a14a8SHuichao Cai if (RTE_IPV4_HDR_OPT_COPIED(*p_opt)) {
67e1522b32SHuichao Cai memcpy(ipopt_frag_hdr + ipopt_len,
68b50a14a8SHuichao Cai p_opt, p_opt[1]);
69b50a14a8SHuichao Cai ipopt_len += p_opt[1];
70b50a14a8SHuichao Cai }
71b50a14a8SHuichao Cai
72b50a14a8SHuichao Cai len -= p_opt[1];
73b50a14a8SHuichao Cai p_opt += p_opt[1];
74b50a14a8SHuichao Cai }
75b50a14a8SHuichao Cai
76b50a14a8SHuichao Cai len = RTE_ALIGN_CEIL(ipopt_len, RTE_IPV4_IHL_MULTIPLIER);
77b50a14a8SHuichao Cai memset(ipopt_frag_hdr + ipopt_len,
78b50a14a8SHuichao Cai RTE_IPV4_HDR_OPT_EOL, len - ipopt_len);
79b50a14a8SHuichao Cai ipopt_len = len;
80b50a14a8SHuichao Cai iph_opt->ihl = (sizeof(struct rte_ipv4_hdr) + ipopt_len) /
81b50a14a8SHuichao Cai RTE_IPV4_IHL_MULTIPLIER;
82b50a14a8SHuichao Cai
83b50a14a8SHuichao Cai return ipopt_len;
84b50a14a8SHuichao Cai }
85b50a14a8SHuichao Cai
8699a2dd95SBruce Richardson /**
8799a2dd95SBruce Richardson * IPv4 fragmentation.
8899a2dd95SBruce Richardson *
8999a2dd95SBruce Richardson * This function implements the fragmentation of IPv4 packets.
9099a2dd95SBruce Richardson *
9199a2dd95SBruce Richardson * @param pkt_in
9299a2dd95SBruce Richardson * The input packet.
9399a2dd95SBruce Richardson * @param pkts_out
9499a2dd95SBruce Richardson * Array storing the output fragments.
9599a2dd95SBruce Richardson * @param mtu_size
9699a2dd95SBruce Richardson * Size in bytes of the Maximum Transfer Unit (MTU) for the outgoing IPv4
9799a2dd95SBruce Richardson * datagrams. This value includes the size of the IPv4 header.
9899a2dd95SBruce Richardson * @param pool_direct
9999a2dd95SBruce Richardson * MBUF pool used for allocating direct buffers for the output fragments.
10099a2dd95SBruce Richardson * @param pool_indirect
10199a2dd95SBruce Richardson * MBUF pool used for allocating indirect buffers for the output fragments.
10299a2dd95SBruce Richardson * @return
10399a2dd95SBruce Richardson * Upon successful completion - number of output fragments placed
10499a2dd95SBruce Richardson * in the pkts_out array.
10599a2dd95SBruce Richardson * Otherwise - (-1) * <errno>.
10699a2dd95SBruce Richardson */
10799a2dd95SBruce Richardson int32_t
rte_ipv4_fragment_packet(struct rte_mbuf * pkt_in,struct rte_mbuf ** pkts_out,uint16_t nb_pkts_out,uint16_t mtu_size,struct rte_mempool * pool_direct,struct rte_mempool * pool_indirect)10899a2dd95SBruce Richardson rte_ipv4_fragment_packet(struct rte_mbuf *pkt_in,
10999a2dd95SBruce Richardson struct rte_mbuf **pkts_out,
11099a2dd95SBruce Richardson uint16_t nb_pkts_out,
11199a2dd95SBruce Richardson uint16_t mtu_size,
11299a2dd95SBruce Richardson struct rte_mempool *pool_direct,
11399a2dd95SBruce Richardson struct rte_mempool *pool_indirect)
11499a2dd95SBruce Richardson {
11599a2dd95SBruce Richardson struct rte_mbuf *in_seg = NULL;
11699a2dd95SBruce Richardson struct rte_ipv4_hdr *in_hdr;
11799a2dd95SBruce Richardson uint32_t out_pkt_pos, in_seg_data_pos;
11899a2dd95SBruce Richardson uint32_t more_in_segs;
1191edf7a79SPu Xu uint16_t fragment_offset, flag_offset, frag_size, header_len;
120013bb504SHuichao Cai uint16_t frag_bytes_remaining;
121b50a14a8SHuichao Cai uint8_t ipopt_frag_hdr[IPV4_HDR_MAX_LEN];
122b50a14a8SHuichao Cai uint16_t ipopt_len;
12399a2dd95SBruce Richardson
12499a2dd95SBruce Richardson /*
12599a2dd95SBruce Richardson * Formal parameter checking.
12699a2dd95SBruce Richardson */
12799a2dd95SBruce Richardson if (unlikely(pkt_in == NULL) || unlikely(pkts_out == NULL) ||
12899a2dd95SBruce Richardson unlikely(nb_pkts_out == 0) ||
12999a2dd95SBruce Richardson unlikely(pool_direct == NULL) || unlikely(pool_indirect == NULL) ||
13099a2dd95SBruce Richardson unlikely(mtu_size < RTE_ETHER_MIN_MTU))
13199a2dd95SBruce Richardson return -EINVAL;
13299a2dd95SBruce Richardson
1331edf7a79SPu Xu in_hdr = rte_pktmbuf_mtod(pkt_in, struct rte_ipv4_hdr *);
1341edf7a79SPu Xu header_len = (in_hdr->version_ihl & RTE_IPV4_HDR_IHL_MASK) *
1351edf7a79SPu Xu RTE_IPV4_IHL_MULTIPLIER;
1361edf7a79SPu Xu
1371edf7a79SPu Xu /* Check IP header length */
1381edf7a79SPu Xu if (unlikely(pkt_in->data_len < header_len) ||
1391edf7a79SPu Xu unlikely(mtu_size < header_len))
1401edf7a79SPu Xu return -EINVAL;
1411edf7a79SPu Xu
14299a2dd95SBruce Richardson /*
14399a2dd95SBruce Richardson * Ensure the IP payload length of all fragments is aligned to a
14499a2dd95SBruce Richardson * multiple of 8 bytes as per RFC791 section 2.3.
14599a2dd95SBruce Richardson */
1461edf7a79SPu Xu frag_size = RTE_ALIGN_FLOOR((mtu_size - header_len),
14799a2dd95SBruce Richardson IPV4_HDR_FO_ALIGN);
14899a2dd95SBruce Richardson
14999a2dd95SBruce Richardson flag_offset = rte_cpu_to_be_16(in_hdr->fragment_offset);
15099a2dd95SBruce Richardson
15199a2dd95SBruce Richardson /* If Don't Fragment flag is set */
15299a2dd95SBruce Richardson if (unlikely ((flag_offset & IPV4_HDR_DF_MASK) != 0))
15399a2dd95SBruce Richardson return -ENOTSUP;
15499a2dd95SBruce Richardson
15599a2dd95SBruce Richardson /* Check that pkts_out is big enough to hold all fragments */
15699a2dd95SBruce Richardson if (unlikely(frag_size * nb_pkts_out <
1571edf7a79SPu Xu (uint16_t)(pkt_in->pkt_len - header_len)))
15899a2dd95SBruce Richardson return -EINVAL;
15999a2dd95SBruce Richardson
16099a2dd95SBruce Richardson in_seg = pkt_in;
1611edf7a79SPu Xu in_seg_data_pos = header_len;
16299a2dd95SBruce Richardson out_pkt_pos = 0;
163013bb504SHuichao Cai fragment_offset = 0;
16499a2dd95SBruce Richardson
165b50a14a8SHuichao Cai ipopt_len = header_len - sizeof(struct rte_ipv4_hdr);
166b50a14a8SHuichao Cai if (unlikely(ipopt_len > RTE_IPV4_HDR_OPT_MAX_LEN))
167b50a14a8SHuichao Cai return -EINVAL;
168b50a14a8SHuichao Cai
16999a2dd95SBruce Richardson more_in_segs = 1;
17099a2dd95SBruce Richardson while (likely(more_in_segs)) {
17199a2dd95SBruce Richardson struct rte_mbuf *out_pkt = NULL, *out_seg_prev = NULL;
17299a2dd95SBruce Richardson uint32_t more_out_segs;
17399a2dd95SBruce Richardson struct rte_ipv4_hdr *out_hdr;
17499a2dd95SBruce Richardson
17599a2dd95SBruce Richardson /* Allocate direct buffer */
17699a2dd95SBruce Richardson out_pkt = rte_pktmbuf_alloc(pool_direct);
17799a2dd95SBruce Richardson if (unlikely(out_pkt == NULL)) {
17899a2dd95SBruce Richardson __free_fragments(pkts_out, out_pkt_pos);
17999a2dd95SBruce Richardson return -ENOMEM;
18099a2dd95SBruce Richardson }
18199a2dd95SBruce Richardson
18299a2dd95SBruce Richardson /* Reserve space for the IP header that will be built later */
1831edf7a79SPu Xu out_pkt->data_len = header_len;
1841edf7a79SPu Xu out_pkt->pkt_len = header_len;
18599a2dd95SBruce Richardson frag_bytes_remaining = frag_size;
18699a2dd95SBruce Richardson
18799a2dd95SBruce Richardson out_seg_prev = out_pkt;
18899a2dd95SBruce Richardson more_out_segs = 1;
18999a2dd95SBruce Richardson while (likely(more_out_segs && more_in_segs)) {
19099a2dd95SBruce Richardson struct rte_mbuf *out_seg = NULL;
19199a2dd95SBruce Richardson uint32_t len;
19299a2dd95SBruce Richardson
19399a2dd95SBruce Richardson /* Allocate indirect buffer */
19499a2dd95SBruce Richardson out_seg = rte_pktmbuf_alloc(pool_indirect);
19599a2dd95SBruce Richardson if (unlikely(out_seg == NULL)) {
19699a2dd95SBruce Richardson rte_pktmbuf_free(out_pkt);
19799a2dd95SBruce Richardson __free_fragments(pkts_out, out_pkt_pos);
19899a2dd95SBruce Richardson return -ENOMEM;
19999a2dd95SBruce Richardson }
20099a2dd95SBruce Richardson out_seg_prev->next = out_seg;
20199a2dd95SBruce Richardson out_seg_prev = out_seg;
20299a2dd95SBruce Richardson
20399a2dd95SBruce Richardson /* Prepare indirect buffer */
20499a2dd95SBruce Richardson rte_pktmbuf_attach(out_seg, in_seg);
20599a2dd95SBruce Richardson len = frag_bytes_remaining;
20699a2dd95SBruce Richardson if (len > (in_seg->data_len - in_seg_data_pos)) {
20799a2dd95SBruce Richardson len = in_seg->data_len - in_seg_data_pos;
20899a2dd95SBruce Richardson }
20999a2dd95SBruce Richardson out_seg->data_off = in_seg->data_off + in_seg_data_pos;
21099a2dd95SBruce Richardson out_seg->data_len = (uint16_t)len;
21199a2dd95SBruce Richardson out_pkt->pkt_len = (uint16_t)(len +
21299a2dd95SBruce Richardson out_pkt->pkt_len);
21399a2dd95SBruce Richardson out_pkt->nb_segs += 1;
21499a2dd95SBruce Richardson in_seg_data_pos += len;
21599a2dd95SBruce Richardson frag_bytes_remaining -= len;
21699a2dd95SBruce Richardson
21799a2dd95SBruce Richardson /* Current output packet (i.e. fragment) done ? */
21899a2dd95SBruce Richardson if (unlikely(frag_bytes_remaining == 0))
21999a2dd95SBruce Richardson more_out_segs = 0;
22099a2dd95SBruce Richardson
22199a2dd95SBruce Richardson /* Current input segment done ? */
22299a2dd95SBruce Richardson if (unlikely(in_seg_data_pos == in_seg->data_len)) {
22399a2dd95SBruce Richardson in_seg = in_seg->next;
22499a2dd95SBruce Richardson in_seg_data_pos = 0;
22599a2dd95SBruce Richardson
22699a2dd95SBruce Richardson if (unlikely(in_seg == NULL))
22799a2dd95SBruce Richardson more_in_segs = 0;
22899a2dd95SBruce Richardson }
22999a2dd95SBruce Richardson }
23099a2dd95SBruce Richardson
23199a2dd95SBruce Richardson /* Build the IP header */
23299a2dd95SBruce Richardson
23399a2dd95SBruce Richardson out_hdr = rte_pktmbuf_mtod(out_pkt, struct rte_ipv4_hdr *);
23499a2dd95SBruce Richardson
2351edf7a79SPu Xu __fill_ipv4hdr_frag(out_hdr, in_hdr, header_len,
23699a2dd95SBruce Richardson (uint16_t)out_pkt->pkt_len,
237013bb504SHuichao Cai flag_offset, fragment_offset, more_in_segs);
23899a2dd95SBruce Richardson
239b50a14a8SHuichao Cai if (unlikely((fragment_offset == 0) && (ipopt_len) &&
240b50a14a8SHuichao Cai ((flag_offset & RTE_IPV4_HDR_OFFSET_MASK) == 0))) {
241b50a14a8SHuichao Cai ipopt_len = __create_ipopt_frag_hdr((uint8_t *)in_hdr,
242b50a14a8SHuichao Cai ipopt_len, ipopt_frag_hdr);
24399a2dd95SBruce Richardson fragment_offset = (uint16_t)(fragment_offset +
2441edf7a79SPu Xu out_pkt->pkt_len - header_len);
2451edf7a79SPu Xu out_pkt->l3_len = header_len;
24699a2dd95SBruce Richardson
247b50a14a8SHuichao Cai header_len = sizeof(struct rte_ipv4_hdr) + ipopt_len;
248b50a14a8SHuichao Cai in_hdr = (struct rte_ipv4_hdr *)ipopt_frag_hdr;
249b50a14a8SHuichao Cai } else {
250b50a14a8SHuichao Cai fragment_offset = (uint16_t)(fragment_offset +
251b50a14a8SHuichao Cai out_pkt->pkt_len - header_len);
252b50a14a8SHuichao Cai out_pkt->l3_len = header_len;
253b50a14a8SHuichao Cai }
254b50a14a8SHuichao Cai
25599a2dd95SBruce Richardson /* Write the fragment to the output list */
25699a2dd95SBruce Richardson pkts_out[out_pkt_pos] = out_pkt;
25799a2dd95SBruce Richardson out_pkt_pos ++;
25899a2dd95SBruce Richardson }
25999a2dd95SBruce Richardson
26099a2dd95SBruce Richardson return out_pkt_pos;
26199a2dd95SBruce Richardson }
262*4aee6110SHuichao Cai
263*4aee6110SHuichao Cai /**
264*4aee6110SHuichao Cai * IPv4 fragmentation by copy.
265*4aee6110SHuichao Cai *
266*4aee6110SHuichao Cai * This function implements the fragmentation of IPv4 packets by copy
267*4aee6110SHuichao Cai * non-segmented mbuf.
268*4aee6110SHuichao Cai * This function is mainly used to adapt Tx MBUF_FAST_FREE offload.
269*4aee6110SHuichao Cai * MBUF_FAST_FREE: Device supports optimization for fast release of mbufs.
270*4aee6110SHuichao Cai * When set, application must guarantee that per-queue all mbufs comes from
271*4aee6110SHuichao Cai * the same mempool, has refcnt = 1, direct and non-segmented.
272*4aee6110SHuichao Cai *
273*4aee6110SHuichao Cai * @param pkt_in
274*4aee6110SHuichao Cai * The input packet.
275*4aee6110SHuichao Cai * @param pkts_out
276*4aee6110SHuichao Cai * Array storing the output fragments.
277*4aee6110SHuichao Cai * @param nb_pkts_out
278*4aee6110SHuichao Cai * Number of fragments.
279*4aee6110SHuichao Cai * @param mtu_size
280*4aee6110SHuichao Cai * Size in bytes of the Maximum Transfer Unit (MTU) for the outgoing IPv4
281*4aee6110SHuichao Cai * datagrams. This value includes the size of the IPv4 header.
282*4aee6110SHuichao Cai * @param pool_direct
283*4aee6110SHuichao Cai * MBUF pool used for allocating direct buffers for the output fragments.
284*4aee6110SHuichao Cai * @return
285*4aee6110SHuichao Cai * Upon successful completion - number of output fragments placed
286*4aee6110SHuichao Cai * in the pkts_out array.
287*4aee6110SHuichao Cai * Otherwise - (-1) * errno.
288*4aee6110SHuichao Cai */
289*4aee6110SHuichao Cai int32_t
rte_ipv4_fragment_copy_nonseg_packet(struct rte_mbuf * pkt_in,struct rte_mbuf ** pkts_out,uint16_t nb_pkts_out,uint16_t mtu_size,struct rte_mempool * pool_direct)290*4aee6110SHuichao Cai rte_ipv4_fragment_copy_nonseg_packet(struct rte_mbuf *pkt_in,
291*4aee6110SHuichao Cai struct rte_mbuf **pkts_out,
292*4aee6110SHuichao Cai uint16_t nb_pkts_out,
293*4aee6110SHuichao Cai uint16_t mtu_size,
294*4aee6110SHuichao Cai struct rte_mempool *pool_direct)
295*4aee6110SHuichao Cai {
296*4aee6110SHuichao Cai struct rte_mbuf *in_seg = NULL;
297*4aee6110SHuichao Cai struct rte_ipv4_hdr *in_hdr;
298*4aee6110SHuichao Cai uint32_t out_pkt_pos, in_seg_data_pos;
299*4aee6110SHuichao Cai uint32_t more_in_segs;
300*4aee6110SHuichao Cai uint16_t fragment_offset, flag_offset, frag_size, header_len;
301*4aee6110SHuichao Cai uint16_t frag_bytes_remaining;
302*4aee6110SHuichao Cai uint8_t ipopt_frag_hdr[IPV4_HDR_MAX_LEN];
303*4aee6110SHuichao Cai uint16_t ipopt_len;
304*4aee6110SHuichao Cai
305*4aee6110SHuichao Cai /*
306*4aee6110SHuichao Cai * Formal parameter checking.
307*4aee6110SHuichao Cai */
308*4aee6110SHuichao Cai if (unlikely(pkt_in == NULL) || unlikely(pkts_out == NULL) ||
309*4aee6110SHuichao Cai unlikely(nb_pkts_out == 0) || unlikely(pool_direct == NULL) ||
310*4aee6110SHuichao Cai unlikely(mtu_size < RTE_ETHER_MIN_MTU))
311*4aee6110SHuichao Cai return -EINVAL;
312*4aee6110SHuichao Cai
313*4aee6110SHuichao Cai in_hdr = rte_pktmbuf_mtod(pkt_in, struct rte_ipv4_hdr *);
314*4aee6110SHuichao Cai header_len = (in_hdr->version_ihl & RTE_IPV4_HDR_IHL_MASK) *
315*4aee6110SHuichao Cai RTE_IPV4_IHL_MULTIPLIER;
316*4aee6110SHuichao Cai
317*4aee6110SHuichao Cai /* Check IP header length */
318*4aee6110SHuichao Cai if (unlikely(pkt_in->data_len < header_len) ||
319*4aee6110SHuichao Cai unlikely(mtu_size < header_len))
320*4aee6110SHuichao Cai return -EINVAL;
321*4aee6110SHuichao Cai
322*4aee6110SHuichao Cai /*
323*4aee6110SHuichao Cai * Ensure the IP payload length of all fragments is aligned to a
324*4aee6110SHuichao Cai * multiple of 8 bytes as per RFC791 section 2.3.
325*4aee6110SHuichao Cai */
326*4aee6110SHuichao Cai frag_size = RTE_ALIGN_FLOOR((mtu_size - header_len),
327*4aee6110SHuichao Cai IPV4_HDR_FO_ALIGN);
328*4aee6110SHuichao Cai
329*4aee6110SHuichao Cai flag_offset = rte_cpu_to_be_16(in_hdr->fragment_offset);
330*4aee6110SHuichao Cai
331*4aee6110SHuichao Cai /* If Don't Fragment flag is set */
332*4aee6110SHuichao Cai if (unlikely((flag_offset & IPV4_HDR_DF_MASK) != 0))
333*4aee6110SHuichao Cai return -ENOTSUP;
334*4aee6110SHuichao Cai
335*4aee6110SHuichao Cai /* Check that pkts_out is big enough to hold all fragments */
336*4aee6110SHuichao Cai if (unlikely(frag_size * nb_pkts_out <
337*4aee6110SHuichao Cai (uint16_t)(pkt_in->pkt_len - header_len)))
338*4aee6110SHuichao Cai return -EINVAL;
339*4aee6110SHuichao Cai
340*4aee6110SHuichao Cai in_seg = pkt_in;
341*4aee6110SHuichao Cai in_seg_data_pos = header_len;
342*4aee6110SHuichao Cai out_pkt_pos = 0;
343*4aee6110SHuichao Cai fragment_offset = 0;
344*4aee6110SHuichao Cai
345*4aee6110SHuichao Cai ipopt_len = header_len - sizeof(struct rte_ipv4_hdr);
346*4aee6110SHuichao Cai if (unlikely(ipopt_len > RTE_IPV4_HDR_OPT_MAX_LEN))
347*4aee6110SHuichao Cai return -EINVAL;
348*4aee6110SHuichao Cai
349*4aee6110SHuichao Cai more_in_segs = 1;
350*4aee6110SHuichao Cai while (likely(more_in_segs)) {
351*4aee6110SHuichao Cai struct rte_mbuf *out_pkt = NULL;
352*4aee6110SHuichao Cai uint32_t more_out_segs;
353*4aee6110SHuichao Cai struct rte_ipv4_hdr *out_hdr;
354*4aee6110SHuichao Cai
355*4aee6110SHuichao Cai /* Allocate direct buffer */
356*4aee6110SHuichao Cai out_pkt = rte_pktmbuf_alloc(pool_direct);
357*4aee6110SHuichao Cai if (unlikely(out_pkt == NULL)) {
358*4aee6110SHuichao Cai __free_fragments(pkts_out, out_pkt_pos);
359*4aee6110SHuichao Cai return -ENOMEM;
360*4aee6110SHuichao Cai }
361*4aee6110SHuichao Cai if (unlikely(rte_pktmbuf_tailroom(out_pkt) < frag_size)) {
362*4aee6110SHuichao Cai rte_pktmbuf_free(out_pkt);
363*4aee6110SHuichao Cai __free_fragments(pkts_out, out_pkt_pos);
364*4aee6110SHuichao Cai return -EINVAL;
365*4aee6110SHuichao Cai }
366*4aee6110SHuichao Cai
367*4aee6110SHuichao Cai /* Reserve space for the IP header that will be built later */
368*4aee6110SHuichao Cai out_pkt->data_len = header_len;
369*4aee6110SHuichao Cai out_pkt->pkt_len = header_len;
370*4aee6110SHuichao Cai frag_bytes_remaining = frag_size;
371*4aee6110SHuichao Cai
372*4aee6110SHuichao Cai more_out_segs = 1;
373*4aee6110SHuichao Cai while (likely(more_out_segs && more_in_segs)) {
374*4aee6110SHuichao Cai uint32_t len;
375*4aee6110SHuichao Cai
376*4aee6110SHuichao Cai len = frag_bytes_remaining;
377*4aee6110SHuichao Cai if (len > (in_seg->data_len - in_seg_data_pos))
378*4aee6110SHuichao Cai len = in_seg->data_len - in_seg_data_pos;
379*4aee6110SHuichao Cai
380*4aee6110SHuichao Cai memcpy(rte_pktmbuf_mtod_offset(out_pkt, char *,
381*4aee6110SHuichao Cai out_pkt->data_len),
382*4aee6110SHuichao Cai rte_pktmbuf_mtod_offset(in_seg, char *,
383*4aee6110SHuichao Cai in_seg_data_pos),
384*4aee6110SHuichao Cai len);
385*4aee6110SHuichao Cai
386*4aee6110SHuichao Cai in_seg_data_pos += len;
387*4aee6110SHuichao Cai frag_bytes_remaining -= len;
388*4aee6110SHuichao Cai out_pkt->data_len += len;
389*4aee6110SHuichao Cai
390*4aee6110SHuichao Cai /* Current output packet (i.e. fragment) done ? */
391*4aee6110SHuichao Cai if (unlikely(frag_bytes_remaining == 0))
392*4aee6110SHuichao Cai more_out_segs = 0;
393*4aee6110SHuichao Cai
394*4aee6110SHuichao Cai /* Current input segment done ? */
395*4aee6110SHuichao Cai if (unlikely(in_seg_data_pos == in_seg->data_len)) {
396*4aee6110SHuichao Cai in_seg = in_seg->next;
397*4aee6110SHuichao Cai in_seg_data_pos = 0;
398*4aee6110SHuichao Cai
399*4aee6110SHuichao Cai if (unlikely(in_seg == NULL))
400*4aee6110SHuichao Cai more_in_segs = 0;
401*4aee6110SHuichao Cai }
402*4aee6110SHuichao Cai }
403*4aee6110SHuichao Cai
404*4aee6110SHuichao Cai /* Build the IP header */
405*4aee6110SHuichao Cai
406*4aee6110SHuichao Cai out_pkt->pkt_len = out_pkt->data_len;
407*4aee6110SHuichao Cai out_hdr = rte_pktmbuf_mtod(out_pkt, struct rte_ipv4_hdr *);
408*4aee6110SHuichao Cai
409*4aee6110SHuichao Cai __fill_ipv4hdr_frag(out_hdr, in_hdr, header_len,
410*4aee6110SHuichao Cai (uint16_t)out_pkt->pkt_len,
411*4aee6110SHuichao Cai flag_offset, fragment_offset, more_in_segs);
412*4aee6110SHuichao Cai
413*4aee6110SHuichao Cai if (unlikely((fragment_offset == 0) && (ipopt_len) &&
414*4aee6110SHuichao Cai ((flag_offset & RTE_IPV4_HDR_OFFSET_MASK) == 0))) {
415*4aee6110SHuichao Cai ipopt_len = __create_ipopt_frag_hdr((uint8_t *)in_hdr,
416*4aee6110SHuichao Cai ipopt_len, ipopt_frag_hdr);
417*4aee6110SHuichao Cai fragment_offset = (uint16_t)(fragment_offset +
418*4aee6110SHuichao Cai out_pkt->pkt_len - header_len);
419*4aee6110SHuichao Cai out_pkt->l3_len = header_len;
420*4aee6110SHuichao Cai
421*4aee6110SHuichao Cai header_len = sizeof(struct rte_ipv4_hdr) + ipopt_len;
422*4aee6110SHuichao Cai in_hdr = (struct rte_ipv4_hdr *)ipopt_frag_hdr;
423*4aee6110SHuichao Cai } else {
424*4aee6110SHuichao Cai fragment_offset = (uint16_t)(fragment_offset +
425*4aee6110SHuichao Cai out_pkt->pkt_len - header_len);
426*4aee6110SHuichao Cai out_pkt->l3_len = header_len;
427*4aee6110SHuichao Cai }
428*4aee6110SHuichao Cai
429*4aee6110SHuichao Cai /* Write the fragment to the output list */
430*4aee6110SHuichao Cai pkts_out[out_pkt_pos] = out_pkt;
431*4aee6110SHuichao Cai out_pkt_pos++;
432*4aee6110SHuichao Cai }
433*4aee6110SHuichao Cai
434*4aee6110SHuichao Cai return out_pkt_pos;
435*4aee6110SHuichao Cai }
436