xref: /dpdk/lib/ip_frag/rte_ip_frag.h (revision 719834a6849e1daf4a70ff7742bbcc3ae7e25607)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4 
5 #ifndef _RTE_IP_FRAG_H_
6 #define _RTE_IP_FRAG_H_
7 
8 /**
9  * @file
10  * RTE IP Fragmentation and Reassembly
11  *
12  * Implementation of IP packet fragmentation and reassembly.
13  */
14 
15 #include <stdint.h>
16 #include <stdio.h>
17 
18 #include <rte_config.h>
19 #include <rte_malloc.h>
20 #include <rte_memory.h>
21 #include <rte_ip.h>
22 #include <rte_byteorder.h>
23 
24 #ifdef __cplusplus
25 extern "C" {
26 #endif
27 
28 struct rte_mbuf;
29 
30 /** death row size (in packets) */
31 #define RTE_IP_FRAG_DEATH_ROW_LEN 32
32 
33 /** death row size in mbufs */
34 #define RTE_IP_FRAG_DEATH_ROW_MBUF_LEN \
35 	(RTE_IP_FRAG_DEATH_ROW_LEN * (RTE_LIBRTE_IP_FRAG_MAX_FRAG + 1))
36 
37 /** mbuf death row (packets to be freed) */
38 struct rte_ip_frag_death_row {
39 	uint32_t cnt;          /**< number of mbufs currently on death row */
40 	struct rte_mbuf *row[RTE_IP_FRAG_DEATH_ROW_MBUF_LEN];
41 	/**< mbufs to be freed */
42 };
43 
44 /**
45  * Create a new IP fragmentation table.
46  *
47  * @param bucket_num
48  *   Number of buckets in the hash table.
49  * @param bucket_entries
50  *   Number of entries per bucket (e.g. hash associativity).
51  *   Should be power of two.
52  * @param max_entries
53  *   Maximum number of entries that could be stored in the table.
54  *   The value should be less or equal then bucket_num * bucket_entries.
55  * @param max_cycles
56  *   Maximum TTL in cycles for each fragmented packet.
57  * @param socket_id
58  *   The *socket_id* argument is the socket identifier in the case of
59  *   NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA constraints.
60  * @return
61  *   The pointer to the new allocated fragmentation table, on success. NULL on error.
62  */
63 struct rte_ip_frag_tbl * rte_ip_frag_table_create(uint32_t bucket_num,
64 		uint32_t bucket_entries,  uint32_t max_entries,
65 		uint64_t max_cycles, int socket_id);
66 
67 /**
68  * Free allocated IP fragmentation table.
69  *
70  * @param tbl
71  *   Fragmentation table to free.
72  */
73 void
74 rte_ip_frag_table_destroy(struct rte_ip_frag_tbl *tbl);
75 
76 /**
77  * This function implements the fragmentation of IPv6 packets.
78  *
79  * @param pkt_in
80  *   The input packet.
81  * @param pkts_out
82  *   Array storing the output fragments.
83  * @param nb_pkts_out
84  *   Number of fragments.
85  * @param mtu_size
86  *   Size in bytes of the Maximum Transfer Unit (MTU) for the outgoing IPv6
87  *   datagrams. This value includes the size of the IPv6 header.
88  * @param pool_direct
89  *   MBUF pool used for allocating direct buffers for the output fragments.
90  * @param pool_indirect
91  *   MBUF pool used for allocating indirect buffers for the output fragments.
92  * @return
93  *   Upon successful completion - number of output fragments placed
94  *   in the pkts_out array.
95  *   Otherwise - (-1) * errno.
96  */
97 int32_t
98 rte_ipv6_fragment_packet(struct rte_mbuf *pkt_in,
99 		struct rte_mbuf **pkts_out,
100 		uint16_t nb_pkts_out,
101 		uint16_t mtu_size,
102 		struct rte_mempool *pool_direct,
103 		struct rte_mempool *pool_indirect);
104 
105 /**
106  * This function implements reassembly of fragmented IPv6 packets.
107  * Incoming mbuf should have its l2_len/l3_len fields setup correctly.
108  *
109  * @param tbl
110  *   Table where to lookup/add the fragmented packet.
111  * @param dr
112  *   Death row to free buffers to
113  * @param mb
114  *   Incoming mbuf with IPv6 fragment.
115  * @param tms
116  *   Fragment arrival timestamp.
117  * @param ip_hdr
118  *   Pointer to the IPv6 header.
119  * @param frag_hdr
120  *   Pointer to the IPv6 fragment extension header.
121  * @return
122  *   Pointer to mbuf for reassembled packet, or NULL if:
123  *   - an error occurred.
124  *   - not all fragments of the packet are collected yet.
125  */
126 struct rte_mbuf *rte_ipv6_frag_reassemble_packet(struct rte_ip_frag_tbl *tbl,
127 		struct rte_ip_frag_death_row *dr,
128 		struct rte_mbuf *mb, uint64_t tms, struct rte_ipv6_hdr *ip_hdr,
129 		struct rte_ipv6_fragment_ext *frag_hdr);
130 
131 /**
132  * Return a pointer to the packet's fragment header, if found.
133  * It only looks at the extension header that's right after the fixed IPv6
134  * header, and doesn't follow the whole chain of extension headers.
135  *
136  * @param hdr
137  *   Pointer to the IPv6 header.
138  * @return
139  *   Pointer to the IPv6 fragment extension header, or NULL if it's not
140  *   present.
141  */
142 static inline struct rte_ipv6_fragment_ext *
143 rte_ipv6_frag_get_ipv6_fragment_header(struct rte_ipv6_hdr *hdr)
144 {
145 	if (hdr->proto == IPPROTO_FRAGMENT) {
146 		return (struct rte_ipv6_fragment_ext *) ++hdr;
147 	}
148 	else
149 		return NULL;
150 }
151 
152 /**
153  * IPv4 fragmentation.
154  *
155  * This function implements the fragmentation of IPv4 packets.
156  *
157  * @param pkt_in
158  *   The input packet.
159  * @param pkts_out
160  *   Array storing the output fragments.
161  * @param nb_pkts_out
162  *   Number of fragments.
163  * @param mtu_size
164  *   Size in bytes of the Maximum Transfer Unit (MTU) for the outgoing IPv4
165  *   datagrams. This value includes the size of the IPv4 header.
166  * @param pool_direct
167  *   MBUF pool used for allocating direct buffers for the output fragments.
168  * @param pool_indirect
169  *   MBUF pool used for allocating indirect buffers for the output fragments.
170  * @return
171  *   Upon successful completion - number of output fragments placed
172  *   in the pkts_out array.
173  *   Otherwise - (-1) * errno.
174  */
175 int32_t rte_ipv4_fragment_packet(struct rte_mbuf *pkt_in,
176 			struct rte_mbuf **pkts_out,
177 			uint16_t nb_pkts_out, uint16_t mtu_size,
178 			struct rte_mempool *pool_direct,
179 			struct rte_mempool *pool_indirect);
180 
181 /**
182  * IPv4 fragmentation by copy.
183  *
184  * This function implements the fragmentation of IPv4 packets by copy
185  * non-segmented mbuf.
186  * This function is mainly used to adapt Tx MBUF_FAST_FREE offload.
187  * MBUF_FAST_FREE: Device supports optimization for fast release of mbufs.
188  * When set, application must guarantee that per-queue all mbufs comes from
189  * the same mempool, has refcnt = 1, direct and non-segmented.
190  *
191  * @param pkt_in
192  *   The input packet.
193  * @param pkts_out
194  *   Array storing the output fragments.
195  * @param nb_pkts_out
196  *   Number of fragments.
197  * @param mtu_size
198  *   Size in bytes of the Maximum Transfer Unit (MTU) for the outgoing IPv4
199  *   datagrams. This value includes the size of the IPv4 header.
200  * @param pool_direct
201  *   MBUF pool used for allocating direct buffers for the output fragments.
202  * @return
203  *   Upon successful completion - number of output fragments placed
204  *   in the pkts_out array.
205  *   Otherwise - (-1) * errno.
206  */
207 int32_t
208 rte_ipv4_fragment_copy_nonseg_packet(struct rte_mbuf *pkt_in,
209 	struct rte_mbuf **pkts_out,
210 	uint16_t nb_pkts_out,
211 	uint16_t mtu_size,
212 	struct rte_mempool *pool_direct);
213 
214 /**
215  * This function implements reassembly of fragmented IPv4 packets.
216  * Incoming mbufs should have its l2_len/l3_len fields setup correctly.
217  *
218  * @param tbl
219  *   Table where to lookup/add the fragmented packet.
220  * @param dr
221  *   Death row to free buffers to
222  * @param mb
223  *   Incoming mbuf with IPv4 fragment.
224  * @param tms
225  *   Fragment arrival timestamp.
226  * @param ip_hdr
227  *   Pointer to the IPV4 header inside the fragment.
228  * @return
229  *   Pointer to mbuf for reassembled packet, or NULL if:
230  *   - an error occurred.
231  *   - not all fragments of the packet are collected yet.
232  */
233 struct rte_mbuf * rte_ipv4_frag_reassemble_packet(struct rte_ip_frag_tbl *tbl,
234 		struct rte_ip_frag_death_row *dr,
235 		struct rte_mbuf *mb, uint64_t tms, struct rte_ipv4_hdr *ip_hdr);
236 
237 /**
238  * Check if the IPv4 packet is fragmented
239  *
240  * @param hdr
241  *   IPv4 header of the packet
242  * @return
243  *   1 if fragmented, 0 if not fragmented
244  */
245 static inline int
246 rte_ipv4_frag_pkt_is_fragmented(const struct rte_ipv4_hdr *hdr)
247 {
248 	uint16_t flag_offset, ip_flag, ip_ofs;
249 
250 	flag_offset = rte_be_to_cpu_16(hdr->fragment_offset);
251 	ip_ofs = (uint16_t)(flag_offset & RTE_IPV4_HDR_OFFSET_MASK);
252 	ip_flag = (uint16_t)(flag_offset & RTE_IPV4_HDR_MF_FLAG);
253 
254 	return ip_flag != 0 || ip_ofs  != 0;
255 }
256 
257 /**
258  * Free mbufs on a given death row.
259  *
260  * @param dr
261  *   Death row to free mbufs in.
262  * @param prefetch
263  *   How many buffers to prefetch before freeing.
264  */
265 void rte_ip_frag_free_death_row(struct rte_ip_frag_death_row *dr,
266 		uint32_t prefetch);
267 
268 
269 /**
270  * Dump fragmentation table statistics to file.
271  *
272  * @param f
273  *   File to dump statistics to
274  * @param tbl
275  *   Fragmentation table to dump statistics from
276  */
277 void
278 rte_ip_frag_table_statistics_dump(FILE * f, const struct rte_ip_frag_tbl *tbl);
279 
280 /**
281  * Delete expired fragments
282  *
283  * @param tbl
284  *   Table to delete expired fragments from
285  * @param dr
286  *   Death row to free buffers to
287  * @param tms
288  *   Current timestamp
289  */
290 void
291 rte_ip_frag_table_del_expired_entries(struct rte_ip_frag_tbl *tbl,
292 	struct rte_ip_frag_death_row *dr, uint64_t tms);
293 
294 /**@{@name Obsolete macros, kept here for compatibility reasons.
295  * Will be deprecated/removed in future DPDK releases.
296  */
297 /** Obsolete */
298 #define IP_FRAG_DEATH_ROW_LEN		RTE_IP_FRAG_DEATH_ROW_LEN
299 /** Obsolete */
300 #define IP_FRAG_DEATH_ROW_MBUF_LEN	RTE_IP_FRAG_DEATH_ROW_MBUF_LEN
301 /** Obsolete */
302 #define ipv6_extension_fragment		rte_ipv6_fragment_ext
303 /**@}*/
304 
305 #ifdef __cplusplus
306 }
307 #endif
308 
309 #endif /* _RTE_IP_FRAG_H_ */
310