1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
3 */
4
5 #include <stddef.h>
6
7 #include <rte_jhash.h>
8 #include <rte_hash_crc.h>
9
10 #include "ip_frag_common.h"
11
12 #define PRIME_VALUE 0xeaad8405
13
14 #define IP_FRAG_TBL_POS(tbl, sig) \
15 ((tbl)->pkt + ((sig) & (tbl)->entry_mask))
16
17 static inline void
ip_frag_tbl_add(struct rte_ip_frag_tbl * tbl,struct ip_frag_pkt * fp,const struct ip_frag_key * key,uint64_t tms)18 ip_frag_tbl_add(struct rte_ip_frag_tbl *tbl, struct ip_frag_pkt *fp,
19 const struct ip_frag_key *key, uint64_t tms)
20 {
21 fp->key = key[0];
22 ip_frag_reset(fp, tms);
23 TAILQ_INSERT_TAIL(&tbl->lru, fp, lru);
24 tbl->use_entries++;
25 IP_FRAG_TBL_STAT_UPDATE(&tbl->stat, add_num, 1);
26 }
27
28 static inline void
ip_frag_tbl_reuse(struct rte_ip_frag_tbl * tbl,struct rte_ip_frag_death_row * dr,struct ip_frag_pkt * fp,uint64_t tms)29 ip_frag_tbl_reuse(struct rte_ip_frag_tbl *tbl, struct rte_ip_frag_death_row *dr,
30 struct ip_frag_pkt *fp, uint64_t tms)
31 {
32 ip_frag_free(fp, dr);
33 ip_frag_reset(fp, tms);
34 TAILQ_REMOVE(&tbl->lru, fp, lru);
35 TAILQ_INSERT_TAIL(&tbl->lru, fp, lru);
36 IP_FRAG_TBL_STAT_UPDATE(&tbl->stat, reuse_num, 1);
37 }
38
39
40 static inline void
ipv4_frag_hash(const struct ip_frag_key * key,uint32_t * v1,uint32_t * v2)41 ipv4_frag_hash(const struct ip_frag_key *key, uint32_t *v1, uint32_t *v2)
42 {
43 uint32_t v;
44 const uint32_t *p;
45
46 p = (const uint32_t *)&key->src_dst;
47
48 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
49 v = rte_hash_crc_4byte(p[0], PRIME_VALUE);
50 v = rte_hash_crc_4byte(p[1], v);
51 v = rte_hash_crc_4byte(key->id, v);
52 #else
53
54 v = rte_jhash_3words(p[0], p[1], key->id, PRIME_VALUE);
55 #endif /* RTE_ARCH_X86 */
56
57 *v1 = v;
58 *v2 = (v << 7) + (v >> 14);
59 }
60
61 static inline void
ipv6_frag_hash(const struct ip_frag_key * key,uint32_t * v1,uint32_t * v2)62 ipv6_frag_hash(const struct ip_frag_key *key, uint32_t *v1, uint32_t *v2)
63 {
64 uint32_t v;
65 const uint32_t *p;
66
67 p = (const uint32_t *) &key->src_dst;
68
69 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
70 v = rte_hash_crc_4byte(p[0], PRIME_VALUE);
71 v = rte_hash_crc_4byte(p[1], v);
72 v = rte_hash_crc_4byte(p[2], v);
73 v = rte_hash_crc_4byte(p[3], v);
74 v = rte_hash_crc_4byte(p[4], v);
75 v = rte_hash_crc_4byte(p[5], v);
76 v = rte_hash_crc_4byte(p[6], v);
77 v = rte_hash_crc_4byte(p[7], v);
78 v = rte_hash_crc_4byte(key->id, v);
79 #else
80
81 v = rte_jhash_3words(p[0], p[1], p[2], PRIME_VALUE);
82 v = rte_jhash_3words(p[3], p[4], p[5], v);
83 v = rte_jhash_3words(p[6], p[7], key->id, v);
84 #endif /* RTE_ARCH_X86 */
85
86 *v1 = v;
87 *v2 = (v << 7) + (v >> 14);
88 }
89
90 struct rte_mbuf *
ip_frag_process(struct ip_frag_pkt * fp,struct rte_ip_frag_death_row * dr,struct rte_mbuf * mb,uint16_t ofs,uint16_t len,uint16_t more_frags)91 ip_frag_process(struct ip_frag_pkt *fp, struct rte_ip_frag_death_row *dr,
92 struct rte_mbuf *mb, uint16_t ofs, uint16_t len, uint16_t more_frags)
93 {
94 uint32_t idx;
95
96 fp->frag_size += len;
97
98 /* this is the first fragment. */
99 if (ofs == 0) {
100 idx = (fp->frags[IP_FIRST_FRAG_IDX].mb == NULL) ?
101 IP_FIRST_FRAG_IDX : UINT32_MAX;
102
103 /* this is the last fragment. */
104 } else if (more_frags == 0) {
105 fp->total_size = ofs + len;
106 idx = (fp->frags[IP_LAST_FRAG_IDX].mb == NULL) ?
107 IP_LAST_FRAG_IDX : UINT32_MAX;
108
109 /* this is the intermediate fragment. */
110 } else if ((idx = fp->last_idx) < RTE_DIM(fp->frags)) {
111 fp->last_idx++;
112 }
113
114 /*
115 * erroneous packet: either exceed max allowed number of fragments,
116 * or duplicate first/last fragment encountered.
117 */
118 if (idx >= RTE_DIM(fp->frags)) {
119
120 /* report an error. */
121 if (fp->key.key_len == IPV4_KEYLEN)
122 IP_FRAG_LOG(DEBUG, "%s:%d invalid fragmented packet:\n"
123 "ipv4_frag_pkt: %p, key: <%" PRIx64 ", %#x>, "
124 "total_size: %u, frag_size: %u, last_idx: %u\n"
125 "first fragment: ofs: %u, len: %u\n"
126 "last fragment: ofs: %u, len: %u\n\n",
127 __func__, __LINE__,
128 fp, fp->key.src_dst[0], fp->key.id,
129 fp->total_size, fp->frag_size, fp->last_idx,
130 fp->frags[IP_FIRST_FRAG_IDX].ofs,
131 fp->frags[IP_FIRST_FRAG_IDX].len,
132 fp->frags[IP_LAST_FRAG_IDX].ofs,
133 fp->frags[IP_LAST_FRAG_IDX].len);
134 else
135 IP_FRAG_LOG(DEBUG, "%s:%d invalid fragmented packet:\n"
136 "ipv6_frag_pkt: %p, key: <" IPv6_KEY_BYTES_FMT ", %#x>, "
137 "total_size: %u, frag_size: %u, last_idx: %u\n"
138 "first fragment: ofs: %u, len: %u\n"
139 "last fragment: ofs: %u, len: %u\n\n",
140 __func__, __LINE__,
141 fp, IPv6_KEY_BYTES(fp->key.src_dst), fp->key.id,
142 fp->total_size, fp->frag_size, fp->last_idx,
143 fp->frags[IP_FIRST_FRAG_IDX].ofs,
144 fp->frags[IP_FIRST_FRAG_IDX].len,
145 fp->frags[IP_LAST_FRAG_IDX].ofs,
146 fp->frags[IP_LAST_FRAG_IDX].len);
147
148 /* free all fragments, invalidate the entry. */
149 ip_frag_free(fp, dr);
150 ip_frag_key_invalidate(&fp->key);
151 IP_FRAG_MBUF2DR(dr, mb);
152
153 return NULL;
154 }
155
156 fp->frags[idx].ofs = ofs;
157 fp->frags[idx].len = len;
158 fp->frags[idx].mb = mb;
159
160 mb = NULL;
161
162 /* not all fragments are collected yet. */
163 if (likely (fp->frag_size < fp->total_size)) {
164 return mb;
165
166 /* if we collected all fragments, then try to reassemble. */
167 } else if (fp->frag_size == fp->total_size &&
168 fp->frags[IP_FIRST_FRAG_IDX].mb != NULL) {
169 if (fp->key.key_len == IPV4_KEYLEN)
170 mb = ipv4_frag_reassemble(fp);
171 else
172 mb = ipv6_frag_reassemble(fp);
173 }
174
175 /* errorenous set of fragments. */
176 if (mb == NULL) {
177
178 /* report an error. */
179 if (fp->key.key_len == IPV4_KEYLEN)
180 IP_FRAG_LOG(DEBUG, "%s:%d invalid fragmented packet:\n"
181 "ipv4_frag_pkt: %p, key: <%" PRIx64 ", %#x>, "
182 "total_size: %u, frag_size: %u, last_idx: %u\n"
183 "first fragment: ofs: %u, len: %u\n"
184 "last fragment: ofs: %u, len: %u\n\n",
185 __func__, __LINE__,
186 fp, fp->key.src_dst[0], fp->key.id,
187 fp->total_size, fp->frag_size, fp->last_idx,
188 fp->frags[IP_FIRST_FRAG_IDX].ofs,
189 fp->frags[IP_FIRST_FRAG_IDX].len,
190 fp->frags[IP_LAST_FRAG_IDX].ofs,
191 fp->frags[IP_LAST_FRAG_IDX].len);
192 else
193 IP_FRAG_LOG(DEBUG, "%s:%d invalid fragmented packet:\n"
194 "ipv6_frag_pkt: %p, key: <" IPv6_KEY_BYTES_FMT ", %#x>, "
195 "total_size: %u, frag_size: %u, last_idx: %u\n"
196 "first fragment: ofs: %u, len: %u\n"
197 "last fragment: ofs: %u, len: %u\n\n",
198 __func__, __LINE__,
199 fp, IPv6_KEY_BYTES(fp->key.src_dst), fp->key.id,
200 fp->total_size, fp->frag_size, fp->last_idx,
201 fp->frags[IP_FIRST_FRAG_IDX].ofs,
202 fp->frags[IP_FIRST_FRAG_IDX].len,
203 fp->frags[IP_LAST_FRAG_IDX].ofs,
204 fp->frags[IP_LAST_FRAG_IDX].len);
205
206 /* free associated resources. */
207 ip_frag_free(fp, dr);
208 }
209
210 /* we are done with that entry, invalidate it. */
211 ip_frag_key_invalidate(&fp->key);
212 return mb;
213 }
214
215
216 /*
217 * Find an entry in the table for the corresponding fragment.
218 * If such entry is not present, then allocate a new one.
219 * If the entry is stale, then free and reuse it.
220 */
221 struct ip_frag_pkt *
ip_frag_find(struct rte_ip_frag_tbl * tbl,struct rte_ip_frag_death_row * dr,const struct ip_frag_key * key,uint64_t tms)222 ip_frag_find(struct rte_ip_frag_tbl *tbl, struct rte_ip_frag_death_row *dr,
223 const struct ip_frag_key *key, uint64_t tms)
224 {
225 struct ip_frag_pkt *pkt, *free, *stale, *lru;
226 uint64_t max_cycles;
227
228 /*
229 * Actually the two line below are totally redundant.
230 * they are here, just to make gcc 4.6 happy.
231 */
232 free = NULL;
233 stale = NULL;
234 max_cycles = tbl->max_cycles;
235
236 IP_FRAG_TBL_STAT_UPDATE(&tbl->stat, find_num, 1);
237
238 if ((pkt = ip_frag_lookup(tbl, key, tms, &free, &stale)) == NULL) {
239
240 /*timed-out entry, free and invalidate it*/
241 if (stale != NULL) {
242 ip_frag_tbl_del(tbl, dr, stale);
243 free = stale;
244
245 /*
246 * we found a free entry, check if we can use it.
247 * If we run out of free entries in the table, then
248 * check if we have a timed out entry to delete.
249 */
250 } else if (free != NULL &&
251 tbl->max_entries <= tbl->use_entries) {
252 lru = TAILQ_FIRST(&tbl->lru);
253 if (max_cycles + lru->start < tms) {
254 ip_frag_tbl_del(tbl, dr, lru);
255 } else {
256 free = NULL;
257 IP_FRAG_TBL_STAT_UPDATE(&tbl->stat,
258 fail_nospace, 1);
259 }
260 }
261
262 /* found a free entry to reuse. */
263 if (free != NULL) {
264 ip_frag_tbl_add(tbl, free, key, tms);
265 pkt = free;
266 }
267
268 /*
269 * we found the flow, but it is already timed out,
270 * so free associated resources, reposition it in the LRU list,
271 * and reuse it.
272 */
273 } else if (max_cycles + pkt->start < tms) {
274 ip_frag_tbl_reuse(tbl, dr, pkt, tms);
275 }
276
277 IP_FRAG_TBL_STAT_UPDATE(&tbl->stat, fail_total, (pkt == NULL));
278
279 tbl->last = pkt;
280 return pkt;
281 }
282
283 struct ip_frag_pkt *
ip_frag_lookup(struct rte_ip_frag_tbl * tbl,const struct ip_frag_key * key,uint64_t tms,struct ip_frag_pkt ** free,struct ip_frag_pkt ** stale)284 ip_frag_lookup(struct rte_ip_frag_tbl *tbl,
285 const struct ip_frag_key *key, uint64_t tms,
286 struct ip_frag_pkt **free, struct ip_frag_pkt **stale)
287 {
288 struct ip_frag_pkt *p1, *p2;
289 struct ip_frag_pkt *empty, *old;
290 uint64_t max_cycles;
291 uint32_t i, assoc, sig1, sig2;
292
293 empty = NULL;
294 old = NULL;
295
296 max_cycles = tbl->max_cycles;
297 assoc = tbl->bucket_entries;
298
299 if (tbl->last != NULL && ip_frag_key_cmp(key, &tbl->last->key) == 0)
300 return tbl->last;
301
302 /* different hashing methods for IPv4 and IPv6 */
303 if (key->key_len == IPV4_KEYLEN)
304 ipv4_frag_hash(key, &sig1, &sig2);
305 else
306 ipv6_frag_hash(key, &sig1, &sig2);
307
308 p1 = IP_FRAG_TBL_POS(tbl, sig1);
309 p2 = IP_FRAG_TBL_POS(tbl, sig2);
310
311 for (i = 0; i != assoc; i++) {
312 if (p1->key.key_len == IPV4_KEYLEN)
313 IP_FRAG_LOG(DEBUG, "%s:%d:\n"
314 "tbl: %p, max_entries: %u, use_entries: %u\n"
315 "ipv4_frag_pkt line0: %p, index: %u from %u\n"
316 "key: <%" PRIx64 ", %#x>, start: %" PRIu64 "\n",
317 __func__, __LINE__,
318 tbl, tbl->max_entries, tbl->use_entries,
319 p1, i, assoc,
320 p1[i].key.src_dst[0], p1[i].key.id, p1[i].start);
321 else
322 IP_FRAG_LOG(DEBUG, "%s:%d:\n"
323 "tbl: %p, max_entries: %u, use_entries: %u\n"
324 "ipv6_frag_pkt line0: %p, index: %u from %u\n"
325 "key: <" IPv6_KEY_BYTES_FMT ", %#x>, start: %" PRIu64 "\n",
326 __func__, __LINE__,
327 tbl, tbl->max_entries, tbl->use_entries,
328 p1, i, assoc,
329 IPv6_KEY_BYTES(p1[i].key.src_dst), p1[i].key.id, p1[i].start);
330
331 if (ip_frag_key_cmp(key, &p1[i].key) == 0)
332 return p1 + i;
333 else if (ip_frag_key_is_empty(&p1[i].key))
334 empty = (empty == NULL) ? (p1 + i) : empty;
335 else if (max_cycles + p1[i].start < tms)
336 old = (old == NULL) ? (p1 + i) : old;
337
338 if (p2->key.key_len == IPV4_KEYLEN)
339 IP_FRAG_LOG(DEBUG, "%s:%d:\n"
340 "tbl: %p, max_entries: %u, use_entries: %u\n"
341 "ipv4_frag_pkt line1: %p, index: %u from %u\n"
342 "key: <%" PRIx64 ", %#x>, start: %" PRIu64 "\n",
343 __func__, __LINE__,
344 tbl, tbl->max_entries, tbl->use_entries,
345 p2, i, assoc,
346 p2[i].key.src_dst[0], p2[i].key.id, p2[i].start);
347 else
348 IP_FRAG_LOG(DEBUG, "%s:%d:\n"
349 "tbl: %p, max_entries: %u, use_entries: %u\n"
350 "ipv6_frag_pkt line1: %p, index: %u from %u\n"
351 "key: <" IPv6_KEY_BYTES_FMT ", %#x>, start: %" PRIu64 "\n",
352 __func__, __LINE__,
353 tbl, tbl->max_entries, tbl->use_entries,
354 p2, i, assoc,
355 IPv6_KEY_BYTES(p2[i].key.src_dst), p2[i].key.id, p2[i].start);
356
357 if (ip_frag_key_cmp(key, &p2[i].key) == 0)
358 return p2 + i;
359 else if (ip_frag_key_is_empty(&p2[i].key))
360 empty = (empty == NULL) ?( p2 + i) : empty;
361 else if (max_cycles + p2[i].start < tms)
362 old = (old == NULL) ? (p2 + i) : old;
363 }
364
365 *free = empty;
366 *stale = old;
367 return NULL;
368 }
369