xref: /dpdk/lib/ip_frag/rte_ip_frag_common.c (revision daa02b5cddbb8e11b31d41e2bf7bb1ae64dcae2f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4 
5 #include <stddef.h>
6 #include <stdio.h>
7 
8 #include <rte_memory.h>
9 #include <rte_log.h>
10 
11 #include "ip_frag_common.h"
12 
13 #define	IP_FRAG_HASH_FNUM	2
14 
15 /* free mbufs from death row */
16 void
17 rte_ip_frag_free_death_row(struct rte_ip_frag_death_row *dr,
18 		uint32_t prefetch)
19 {
20 	uint32_t i, k, n;
21 
22 	k = RTE_MIN(prefetch, dr->cnt);
23 	n = dr->cnt;
24 
25 	for (i = 0; i != k; i++)
26 		rte_prefetch0(dr->row[i]);
27 
28 	for (i = 0; i != n - k; i++) {
29 		rte_prefetch0(dr->row[i + k]);
30 		rte_pktmbuf_free(dr->row[i]);
31 	}
32 
33 	for (; i != n; i++)
34 		rte_pktmbuf_free(dr->row[i]);
35 
36 	dr->cnt = 0;
37 }
38 
39 /* create fragmentation table */
40 struct rte_ip_frag_tbl *
41 rte_ip_frag_table_create(uint32_t bucket_num, uint32_t bucket_entries,
42 	uint32_t max_entries, uint64_t max_cycles, int socket_id)
43 {
44 	struct rte_ip_frag_tbl *tbl;
45 	size_t sz;
46 	uint64_t nb_entries;
47 
48 	nb_entries = rte_align32pow2(bucket_num);
49 	nb_entries *= bucket_entries;
50 	nb_entries *= IP_FRAG_HASH_FNUM;
51 
52 	/* check input parameters. */
53 	if (rte_is_power_of_2(bucket_entries) == 0 ||
54 			nb_entries > UINT32_MAX || nb_entries == 0 ||
55 			nb_entries < max_entries) {
56 		RTE_LOG(ERR, USER1, "%s: invalid input parameter\n", __func__);
57 		return NULL;
58 	}
59 
60 	sz = sizeof (*tbl) + nb_entries * sizeof (tbl->pkt[0]);
61 	if ((tbl = rte_zmalloc_socket(__func__, sz, RTE_CACHE_LINE_SIZE,
62 			socket_id)) == NULL) {
63 		RTE_LOG(ERR, USER1,
64 			"%s: allocation of %zu bytes at socket %d failed do\n",
65 			__func__, sz, socket_id);
66 		return NULL;
67 	}
68 
69 	RTE_LOG(INFO, USER1, "%s: allocated of %zu bytes at socket %d\n",
70 		__func__, sz, socket_id);
71 
72 	tbl->max_cycles = max_cycles;
73 	tbl->max_entries = max_entries;
74 	tbl->nb_entries = (uint32_t)nb_entries;
75 	tbl->nb_buckets = bucket_num;
76 	tbl->bucket_entries = bucket_entries;
77 	tbl->entry_mask = (tbl->nb_entries - 1) & ~(tbl->bucket_entries  - 1);
78 
79 	TAILQ_INIT(&(tbl->lru));
80 	return tbl;
81 }
82 
83 /* delete fragmentation table */
84 void
85 rte_ip_frag_table_destroy(struct rte_ip_frag_tbl *tbl)
86 {
87 	struct ip_frag_pkt *fp;
88 
89 	TAILQ_FOREACH(fp, &tbl->lru, lru) {
90 		ip_frag_free_immediate(fp);
91 	}
92 
93 	rte_free(tbl);
94 }
95 
96 /* dump frag table statistics to file */
97 void
98 rte_ip_frag_table_statistics_dump(FILE *f, const struct rte_ip_frag_tbl *tbl)
99 {
100 	uint64_t fail_total, fail_nospace;
101 
102 	fail_total = tbl->stat.fail_total;
103 	fail_nospace = tbl->stat.fail_nospace;
104 
105 	fprintf(f, "max entries:\t%u;\n"
106 		"entries in use:\t%u;\n"
107 		"finds/inserts:\t%" PRIu64 ";\n"
108 		"entries added:\t%" PRIu64 ";\n"
109 		"entries deleted by timeout:\t%" PRIu64 ";\n"
110 		"entries reused by timeout:\t%" PRIu64 ";\n"
111 		"total add failures:\t%" PRIu64 ";\n"
112 		"add no-space failures:\t%" PRIu64 ";\n"
113 		"add hash-collisions failures:\t%" PRIu64 ";\n",
114 		tbl->max_entries,
115 		tbl->use_entries,
116 		tbl->stat.find_num,
117 		tbl->stat.add_num,
118 		tbl->stat.del_num,
119 		tbl->stat.reuse_num,
120 		fail_total,
121 		fail_nospace,
122 		fail_total - fail_nospace);
123 }
124 
125 /* Delete expired fragments */
126 void
127 rte_frag_table_del_expired_entries(struct rte_ip_frag_tbl *tbl,
128 	struct rte_ip_frag_death_row *dr, uint64_t tms)
129 {
130 	uint64_t max_cycles;
131 	struct ip_frag_pkt *fp;
132 
133 	max_cycles = tbl->max_cycles;
134 
135 	TAILQ_FOREACH(fp, &tbl->lru, lru)
136 		if (max_cycles + fp->start < tms) {
137 			/* check that death row has enough space */
138 			if (IP_FRAG_DEATH_ROW_MBUF_LEN - dr->cnt >= fp->last_idx)
139 				ip_frag_tbl_del(tbl, dr, fp);
140 			else
141 				return;
142 		} else
143 			return;
144 }
145