1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
3 */
4
5 #include <stddef.h>
6 #include <errno.h>
7
8 #include <rte_ether.h>
9
10 #include "ip_frag_common.h"
11
12 /* Fragment Offset */
13 #define RTE_IPV4_HDR_DF_SHIFT 14
14 #define RTE_IPV4_HDR_MF_SHIFT 13
15 #define RTE_IPV4_HDR_FO_SHIFT 3
16
17 #define IPV4_HDR_DF_MASK (1 << RTE_IPV4_HDR_DF_SHIFT)
18 #define IPV4_HDR_MF_MASK (1 << RTE_IPV4_HDR_MF_SHIFT)
19
20 #define IPV4_HDR_FO_ALIGN (1 << RTE_IPV4_HDR_FO_SHIFT)
21
22 #define IPV4_HDR_MAX_LEN 60
23
__fill_ipv4hdr_frag(struct rte_ipv4_hdr * dst,const struct rte_ipv4_hdr * src,uint16_t header_len,uint16_t len,uint16_t fofs,uint16_t dofs,uint32_t mf)24 static inline void __fill_ipv4hdr_frag(struct rte_ipv4_hdr *dst,
25 const struct rte_ipv4_hdr *src, uint16_t header_len,
26 uint16_t len, uint16_t fofs, uint16_t dofs, uint32_t mf)
27 {
28 memcpy(dst, src, header_len);
29 fofs = (uint16_t)(fofs + (dofs >> RTE_IPV4_HDR_FO_SHIFT));
30 fofs = (uint16_t)(fofs | mf << RTE_IPV4_HDR_MF_SHIFT);
31 dst->fragment_offset = rte_cpu_to_be_16(fofs);
32 dst->total_length = rte_cpu_to_be_16(len);
33 dst->hdr_checksum = 0;
34 }
35
__free_fragments(struct rte_mbuf * mb[],uint32_t num)36 static inline void __free_fragments(struct rte_mbuf *mb[], uint32_t num)
37 {
38 uint32_t i;
39 for (i = 0; i != num; i++)
40 rte_pktmbuf_free(mb[i]);
41 }
42
__create_ipopt_frag_hdr(uint8_t * iph,uint16_t ipopt_len,uint8_t * ipopt_frag_hdr)43 static inline uint16_t __create_ipopt_frag_hdr(uint8_t *iph,
44 uint16_t ipopt_len, uint8_t *ipopt_frag_hdr)
45 {
46 uint16_t len = ipopt_len;
47 struct rte_ipv4_hdr *iph_opt = (struct rte_ipv4_hdr *)ipopt_frag_hdr;
48
49 ipopt_len = 0;
50 memcpy(ipopt_frag_hdr, iph, sizeof(struct rte_ipv4_hdr));
51 ipopt_frag_hdr += sizeof(struct rte_ipv4_hdr);
52
53 uint8_t *p_opt = iph + sizeof(struct rte_ipv4_hdr);
54
55 while (len > 0) {
56 if (unlikely(*p_opt == RTE_IPV4_HDR_OPT_NOP)) {
57 len--;
58 p_opt++;
59 continue;
60 } else if (unlikely(*p_opt == RTE_IPV4_HDR_OPT_EOL))
61 break;
62
63 if (unlikely(p_opt[1] < 2 || p_opt[1] > len))
64 break;
65
66 if (RTE_IPV4_HDR_OPT_COPIED(*p_opt)) {
67 memcpy(ipopt_frag_hdr + ipopt_len,
68 p_opt, p_opt[1]);
69 ipopt_len += p_opt[1];
70 }
71
72 len -= p_opt[1];
73 p_opt += p_opt[1];
74 }
75
76 len = RTE_ALIGN_CEIL(ipopt_len, RTE_IPV4_IHL_MULTIPLIER);
77 memset(ipopt_frag_hdr + ipopt_len,
78 RTE_IPV4_HDR_OPT_EOL, len - ipopt_len);
79 ipopt_len = len;
80 iph_opt->ihl = (sizeof(struct rte_ipv4_hdr) + ipopt_len) /
81 RTE_IPV4_IHL_MULTIPLIER;
82
83 return ipopt_len;
84 }
85
86 /**
87 * IPv4 fragmentation.
88 *
89 * This function implements the fragmentation of IPv4 packets.
90 *
91 * @param pkt_in
92 * The input packet.
93 * @param pkts_out
94 * Array storing the output fragments.
95 * @param mtu_size
96 * Size in bytes of the Maximum Transfer Unit (MTU) for the outgoing IPv4
97 * datagrams. This value includes the size of the IPv4 header.
98 * @param pool_direct
99 * MBUF pool used for allocating direct buffers for the output fragments.
100 * @param pool_indirect
101 * MBUF pool used for allocating indirect buffers for the output fragments.
102 * @return
103 * Upon successful completion - number of output fragments placed
104 * in the pkts_out array.
105 * Otherwise - (-1) * <errno>.
106 */
107 int32_t
rte_ipv4_fragment_packet(struct rte_mbuf * pkt_in,struct rte_mbuf ** pkts_out,uint16_t nb_pkts_out,uint16_t mtu_size,struct rte_mempool * pool_direct,struct rte_mempool * pool_indirect)108 rte_ipv4_fragment_packet(struct rte_mbuf *pkt_in,
109 struct rte_mbuf **pkts_out,
110 uint16_t nb_pkts_out,
111 uint16_t mtu_size,
112 struct rte_mempool *pool_direct,
113 struct rte_mempool *pool_indirect)
114 {
115 struct rte_mbuf *in_seg = NULL;
116 struct rte_ipv4_hdr *in_hdr;
117 uint32_t out_pkt_pos, in_seg_data_pos;
118 uint32_t more_in_segs;
119 uint16_t fragment_offset, flag_offset, frag_size, header_len;
120 uint16_t frag_bytes_remaining;
121 uint8_t ipopt_frag_hdr[IPV4_HDR_MAX_LEN];
122 uint16_t ipopt_len;
123
124 /*
125 * Formal parameter checking.
126 */
127 if (unlikely(pkt_in == NULL) || unlikely(pkts_out == NULL) ||
128 unlikely(nb_pkts_out == 0) ||
129 unlikely(pool_direct == NULL) || unlikely(pool_indirect == NULL) ||
130 unlikely(mtu_size < RTE_ETHER_MIN_MTU))
131 return -EINVAL;
132
133 in_hdr = rte_pktmbuf_mtod(pkt_in, struct rte_ipv4_hdr *);
134 header_len = (in_hdr->version_ihl & RTE_IPV4_HDR_IHL_MASK) *
135 RTE_IPV4_IHL_MULTIPLIER;
136
137 /* Check IP header length */
138 if (unlikely(pkt_in->data_len < header_len) ||
139 unlikely(mtu_size < header_len))
140 return -EINVAL;
141
142 /*
143 * Ensure the IP payload length of all fragments is aligned to a
144 * multiple of 8 bytes as per RFC791 section 2.3.
145 */
146 frag_size = RTE_ALIGN_FLOOR((mtu_size - header_len),
147 IPV4_HDR_FO_ALIGN);
148
149 flag_offset = rte_cpu_to_be_16(in_hdr->fragment_offset);
150
151 /* If Don't Fragment flag is set */
152 if (unlikely ((flag_offset & IPV4_HDR_DF_MASK) != 0))
153 return -ENOTSUP;
154
155 /* Check that pkts_out is big enough to hold all fragments */
156 if (unlikely(frag_size * nb_pkts_out <
157 (uint16_t)(pkt_in->pkt_len - header_len)))
158 return -EINVAL;
159
160 in_seg = pkt_in;
161 in_seg_data_pos = header_len;
162 out_pkt_pos = 0;
163 fragment_offset = 0;
164
165 ipopt_len = header_len - sizeof(struct rte_ipv4_hdr);
166 if (unlikely(ipopt_len > RTE_IPV4_HDR_OPT_MAX_LEN))
167 return -EINVAL;
168
169 more_in_segs = 1;
170 while (likely(more_in_segs)) {
171 struct rte_mbuf *out_pkt = NULL, *out_seg_prev = NULL;
172 uint32_t more_out_segs;
173 struct rte_ipv4_hdr *out_hdr;
174
175 /* Allocate direct buffer */
176 out_pkt = rte_pktmbuf_alloc(pool_direct);
177 if (unlikely(out_pkt == NULL)) {
178 __free_fragments(pkts_out, out_pkt_pos);
179 return -ENOMEM;
180 }
181
182 /* Reserve space for the IP header that will be built later */
183 out_pkt->data_len = header_len;
184 out_pkt->pkt_len = header_len;
185 frag_bytes_remaining = frag_size;
186
187 out_seg_prev = out_pkt;
188 more_out_segs = 1;
189 while (likely(more_out_segs && more_in_segs)) {
190 struct rte_mbuf *out_seg = NULL;
191 uint32_t len;
192
193 /* Allocate indirect buffer */
194 out_seg = rte_pktmbuf_alloc(pool_indirect);
195 if (unlikely(out_seg == NULL)) {
196 rte_pktmbuf_free(out_pkt);
197 __free_fragments(pkts_out, out_pkt_pos);
198 return -ENOMEM;
199 }
200 out_seg_prev->next = out_seg;
201 out_seg_prev = out_seg;
202
203 /* Prepare indirect buffer */
204 rte_pktmbuf_attach(out_seg, in_seg);
205 len = frag_bytes_remaining;
206 if (len > (in_seg->data_len - in_seg_data_pos)) {
207 len = in_seg->data_len - in_seg_data_pos;
208 }
209 out_seg->data_off = in_seg->data_off + in_seg_data_pos;
210 out_seg->data_len = (uint16_t)len;
211 out_pkt->pkt_len = (uint16_t)(len +
212 out_pkt->pkt_len);
213 out_pkt->nb_segs += 1;
214 in_seg_data_pos += len;
215 frag_bytes_remaining -= len;
216
217 /* Current output packet (i.e. fragment) done ? */
218 if (unlikely(frag_bytes_remaining == 0))
219 more_out_segs = 0;
220
221 /* Current input segment done ? */
222 if (unlikely(in_seg_data_pos == in_seg->data_len)) {
223 in_seg = in_seg->next;
224 in_seg_data_pos = 0;
225
226 if (unlikely(in_seg == NULL))
227 more_in_segs = 0;
228 }
229 }
230
231 /* Build the IP header */
232
233 out_hdr = rte_pktmbuf_mtod(out_pkt, struct rte_ipv4_hdr *);
234
235 __fill_ipv4hdr_frag(out_hdr, in_hdr, header_len,
236 (uint16_t)out_pkt->pkt_len,
237 flag_offset, fragment_offset, more_in_segs);
238
239 if (unlikely((fragment_offset == 0) && (ipopt_len) &&
240 ((flag_offset & RTE_IPV4_HDR_OFFSET_MASK) == 0))) {
241 ipopt_len = __create_ipopt_frag_hdr((uint8_t *)in_hdr,
242 ipopt_len, ipopt_frag_hdr);
243 fragment_offset = (uint16_t)(fragment_offset +
244 out_pkt->pkt_len - header_len);
245 out_pkt->l3_len = header_len;
246
247 header_len = sizeof(struct rte_ipv4_hdr) + ipopt_len;
248 in_hdr = (struct rte_ipv4_hdr *)ipopt_frag_hdr;
249 } else {
250 fragment_offset = (uint16_t)(fragment_offset +
251 out_pkt->pkt_len - header_len);
252 out_pkt->l3_len = header_len;
253 }
254
255 /* Write the fragment to the output list */
256 pkts_out[out_pkt_pos] = out_pkt;
257 out_pkt_pos ++;
258 }
259
260 return out_pkt_pos;
261 }
262
263 /**
264 * IPv4 fragmentation by copy.
265 *
266 * This function implements the fragmentation of IPv4 packets by copy
267 * non-segmented mbuf.
268 * This function is mainly used to adapt Tx MBUF_FAST_FREE offload.
269 * MBUF_FAST_FREE: Device supports optimization for fast release of mbufs.
270 * When set, application must guarantee that per-queue all mbufs comes from
271 * the same mempool, has refcnt = 1, direct and non-segmented.
272 *
273 * @param pkt_in
274 * The input packet.
275 * @param pkts_out
276 * Array storing the output fragments.
277 * @param nb_pkts_out
278 * Number of fragments.
279 * @param mtu_size
280 * Size in bytes of the Maximum Transfer Unit (MTU) for the outgoing IPv4
281 * datagrams. This value includes the size of the IPv4 header.
282 * @param pool_direct
283 * MBUF pool used for allocating direct buffers for the output fragments.
284 * @return
285 * Upon successful completion - number of output fragments placed
286 * in the pkts_out array.
287 * Otherwise - (-1) * errno.
288 */
289 int32_t
rte_ipv4_fragment_copy_nonseg_packet(struct rte_mbuf * pkt_in,struct rte_mbuf ** pkts_out,uint16_t nb_pkts_out,uint16_t mtu_size,struct rte_mempool * pool_direct)290 rte_ipv4_fragment_copy_nonseg_packet(struct rte_mbuf *pkt_in,
291 struct rte_mbuf **pkts_out,
292 uint16_t nb_pkts_out,
293 uint16_t mtu_size,
294 struct rte_mempool *pool_direct)
295 {
296 struct rte_mbuf *in_seg = NULL;
297 struct rte_ipv4_hdr *in_hdr;
298 uint32_t out_pkt_pos, in_seg_data_pos;
299 uint32_t more_in_segs;
300 uint16_t fragment_offset, flag_offset, frag_size, header_len;
301 uint16_t frag_bytes_remaining;
302 uint8_t ipopt_frag_hdr[IPV4_HDR_MAX_LEN];
303 uint16_t ipopt_len;
304
305 /*
306 * Formal parameter checking.
307 */
308 if (unlikely(pkt_in == NULL) || unlikely(pkts_out == NULL) ||
309 unlikely(nb_pkts_out == 0) || unlikely(pool_direct == NULL) ||
310 unlikely(mtu_size < RTE_ETHER_MIN_MTU))
311 return -EINVAL;
312
313 in_hdr = rte_pktmbuf_mtod(pkt_in, struct rte_ipv4_hdr *);
314 header_len = (in_hdr->version_ihl & RTE_IPV4_HDR_IHL_MASK) *
315 RTE_IPV4_IHL_MULTIPLIER;
316
317 /* Check IP header length */
318 if (unlikely(pkt_in->data_len < header_len) ||
319 unlikely(mtu_size < header_len))
320 return -EINVAL;
321
322 /*
323 * Ensure the IP payload length of all fragments is aligned to a
324 * multiple of 8 bytes as per RFC791 section 2.3.
325 */
326 frag_size = RTE_ALIGN_FLOOR((mtu_size - header_len),
327 IPV4_HDR_FO_ALIGN);
328
329 flag_offset = rte_cpu_to_be_16(in_hdr->fragment_offset);
330
331 /* If Don't Fragment flag is set */
332 if (unlikely((flag_offset & IPV4_HDR_DF_MASK) != 0))
333 return -ENOTSUP;
334
335 /* Check that pkts_out is big enough to hold all fragments */
336 if (unlikely(frag_size * nb_pkts_out <
337 (uint16_t)(pkt_in->pkt_len - header_len)))
338 return -EINVAL;
339
340 in_seg = pkt_in;
341 in_seg_data_pos = header_len;
342 out_pkt_pos = 0;
343 fragment_offset = 0;
344
345 ipopt_len = header_len - sizeof(struct rte_ipv4_hdr);
346 if (unlikely(ipopt_len > RTE_IPV4_HDR_OPT_MAX_LEN))
347 return -EINVAL;
348
349 more_in_segs = 1;
350 while (likely(more_in_segs)) {
351 struct rte_mbuf *out_pkt = NULL;
352 uint32_t more_out_segs;
353 struct rte_ipv4_hdr *out_hdr;
354
355 /* Allocate direct buffer */
356 out_pkt = rte_pktmbuf_alloc(pool_direct);
357 if (unlikely(out_pkt == NULL)) {
358 __free_fragments(pkts_out, out_pkt_pos);
359 return -ENOMEM;
360 }
361 if (unlikely(rte_pktmbuf_tailroom(out_pkt) < frag_size)) {
362 rte_pktmbuf_free(out_pkt);
363 __free_fragments(pkts_out, out_pkt_pos);
364 return -EINVAL;
365 }
366
367 /* Reserve space for the IP header that will be built later */
368 out_pkt->data_len = header_len;
369 out_pkt->pkt_len = header_len;
370 frag_bytes_remaining = frag_size;
371
372 more_out_segs = 1;
373 while (likely(more_out_segs && more_in_segs)) {
374 uint32_t len;
375
376 len = frag_bytes_remaining;
377 if (len > (in_seg->data_len - in_seg_data_pos))
378 len = in_seg->data_len - in_seg_data_pos;
379
380 memcpy(rte_pktmbuf_mtod_offset(out_pkt, char *,
381 out_pkt->data_len),
382 rte_pktmbuf_mtod_offset(in_seg, char *,
383 in_seg_data_pos),
384 len);
385
386 in_seg_data_pos += len;
387 frag_bytes_remaining -= len;
388 out_pkt->data_len += len;
389
390 /* Current output packet (i.e. fragment) done ? */
391 if (unlikely(frag_bytes_remaining == 0))
392 more_out_segs = 0;
393
394 /* Current input segment done ? */
395 if (unlikely(in_seg_data_pos == in_seg->data_len)) {
396 in_seg = in_seg->next;
397 in_seg_data_pos = 0;
398
399 if (unlikely(in_seg == NULL))
400 more_in_segs = 0;
401 }
402 }
403
404 /* Build the IP header */
405
406 out_pkt->pkt_len = out_pkt->data_len;
407 out_hdr = rte_pktmbuf_mtod(out_pkt, struct rte_ipv4_hdr *);
408
409 __fill_ipv4hdr_frag(out_hdr, in_hdr, header_len,
410 (uint16_t)out_pkt->pkt_len,
411 flag_offset, fragment_offset, more_in_segs);
412
413 if (unlikely((fragment_offset == 0) && (ipopt_len) &&
414 ((flag_offset & RTE_IPV4_HDR_OFFSET_MASK) == 0))) {
415 ipopt_len = __create_ipopt_frag_hdr((uint8_t *)in_hdr,
416 ipopt_len, ipopt_frag_hdr);
417 fragment_offset = (uint16_t)(fragment_offset +
418 out_pkt->pkt_len - header_len);
419 out_pkt->l3_len = header_len;
420
421 header_len = sizeof(struct rte_ipv4_hdr) + ipopt_len;
422 in_hdr = (struct rte_ipv4_hdr *)ipopt_frag_hdr;
423 } else {
424 fragment_offset = (uint16_t)(fragment_offset +
425 out_pkt->pkt_len - header_len);
426 out_pkt->l3_len = header_len;
427 }
428
429 /* Write the fragment to the output list */
430 pkts_out[out_pkt_pos] = out_pkt;
431 out_pkt_pos++;
432 }
433
434 return out_pkt_pos;
435 }
436