xref: /dpdk/lib/ip_frag/rte_ip_frag_common.c (revision 30a1de105a5f40d77b344a891c4a68f79e815c43)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4 
5 #include <stddef.h>
6 #include <stdio.h>
7 
8 #include <rte_log.h>
9 
10 #include "ip_frag_common.h"
11 
12 #define	IP_FRAG_HASH_FNUM	2
13 
14 /* free mbufs from death row */
15 void
16 rte_ip_frag_free_death_row(struct rte_ip_frag_death_row *dr,
17 		uint32_t prefetch)
18 {
19 	uint32_t i, k, n;
20 
21 	k = RTE_MIN(prefetch, dr->cnt);
22 	n = dr->cnt;
23 
24 	for (i = 0; i != k; i++)
25 		rte_prefetch0(dr->row[i]);
26 
27 	for (i = 0; i != n - k; i++) {
28 		rte_prefetch0(dr->row[i + k]);
29 		rte_pktmbuf_free(dr->row[i]);
30 	}
31 
32 	for (; i != n; i++)
33 		rte_pktmbuf_free(dr->row[i]);
34 
35 	dr->cnt = 0;
36 }
37 
38 /* create fragmentation table */
39 struct rte_ip_frag_tbl *
40 rte_ip_frag_table_create(uint32_t bucket_num, uint32_t bucket_entries,
41 	uint32_t max_entries, uint64_t max_cycles, int socket_id)
42 {
43 	struct rte_ip_frag_tbl *tbl;
44 	size_t sz;
45 	uint64_t nb_entries;
46 
47 	nb_entries = rte_align32pow2(bucket_num);
48 	nb_entries *= bucket_entries;
49 	nb_entries *= IP_FRAG_HASH_FNUM;
50 
51 	/* check input parameters. */
52 	if (rte_is_power_of_2(bucket_entries) == 0 ||
53 			nb_entries > UINT32_MAX || nb_entries == 0 ||
54 			nb_entries < max_entries) {
55 		RTE_LOG(ERR, USER1, "%s: invalid input parameter\n", __func__);
56 		return NULL;
57 	}
58 
59 	sz = sizeof (*tbl) + nb_entries * sizeof (tbl->pkt[0]);
60 	if ((tbl = rte_zmalloc_socket(__func__, sz, RTE_CACHE_LINE_SIZE,
61 			socket_id)) == NULL) {
62 		RTE_LOG(ERR, USER1,
63 			"%s: allocation of %zu bytes at socket %d failed do\n",
64 			__func__, sz, socket_id);
65 		return NULL;
66 	}
67 
68 	RTE_LOG(INFO, USER1, "%s: allocated of %zu bytes at socket %d\n",
69 		__func__, sz, socket_id);
70 
71 	tbl->max_cycles = max_cycles;
72 	tbl->max_entries = max_entries;
73 	tbl->nb_entries = (uint32_t)nb_entries;
74 	tbl->nb_buckets = bucket_num;
75 	tbl->bucket_entries = bucket_entries;
76 	tbl->entry_mask = (tbl->nb_entries - 1) & ~(tbl->bucket_entries  - 1);
77 
78 	TAILQ_INIT(&(tbl->lru));
79 	return tbl;
80 }
81 
82 /* delete fragmentation table */
83 void
84 rte_ip_frag_table_destroy(struct rte_ip_frag_tbl *tbl)
85 {
86 	struct ip_frag_pkt *fp;
87 
88 	TAILQ_FOREACH(fp, &tbl->lru, lru) {
89 		ip_frag_free_immediate(fp);
90 	}
91 
92 	rte_free(tbl);
93 }
94 
95 /* dump frag table statistics to file */
96 void
97 rte_ip_frag_table_statistics_dump(FILE *f, const struct rte_ip_frag_tbl *tbl)
98 {
99 	uint64_t fail_total, fail_nospace;
100 
101 	fail_total = tbl->stat.fail_total;
102 	fail_nospace = tbl->stat.fail_nospace;
103 
104 	fprintf(f, "max entries:\t%u;\n"
105 		"entries in use:\t%u;\n"
106 		"finds/inserts:\t%" PRIu64 ";\n"
107 		"entries added:\t%" PRIu64 ";\n"
108 		"entries deleted by timeout:\t%" PRIu64 ";\n"
109 		"entries reused by timeout:\t%" PRIu64 ";\n"
110 		"total add failures:\t%" PRIu64 ";\n"
111 		"add no-space failures:\t%" PRIu64 ";\n"
112 		"add hash-collisions failures:\t%" PRIu64 ";\n",
113 		tbl->max_entries,
114 		tbl->use_entries,
115 		tbl->stat.find_num,
116 		tbl->stat.add_num,
117 		tbl->stat.del_num,
118 		tbl->stat.reuse_num,
119 		fail_total,
120 		fail_nospace,
121 		fail_total - fail_nospace);
122 }
123 
124 /* Delete expired fragments */
125 void
126 rte_ip_frag_table_del_expired_entries(struct rte_ip_frag_tbl *tbl,
127 	struct rte_ip_frag_death_row *dr, uint64_t tms)
128 {
129 	uint64_t max_cycles;
130 	struct ip_frag_pkt *fp;
131 
132 	max_cycles = tbl->max_cycles;
133 
134 	TAILQ_FOREACH(fp, &tbl->lru, lru)
135 		if (max_cycles + fp->start < tms) {
136 			/* check that death row has enough space */
137 			if (RTE_IP_FRAG_DEATH_ROW_MBUF_LEN - dr->cnt >=
138 					fp->last_idx)
139 				ip_frag_tbl_del(tbl, dr, fp);
140 			else
141 				return;
142 		} else
143 			return;
144 }
145