xref: /dpdk/lib/ip_frag/rte_ip_frag_common.c (revision 99a2dd955fba6e4cc23b77d590a033650ced9c45)
1*99a2dd95SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
2*99a2dd95SBruce Richardson  * Copyright(c) 2010-2014 Intel Corporation
3*99a2dd95SBruce Richardson  */
4*99a2dd95SBruce Richardson 
5*99a2dd95SBruce Richardson #include <stddef.h>
6*99a2dd95SBruce Richardson #include <stdio.h>
7*99a2dd95SBruce Richardson 
8*99a2dd95SBruce Richardson #include <rte_memory.h>
9*99a2dd95SBruce Richardson #include <rte_log.h>
10*99a2dd95SBruce Richardson 
11*99a2dd95SBruce Richardson #include "ip_frag_common.h"
12*99a2dd95SBruce Richardson 
13*99a2dd95SBruce Richardson #define	IP_FRAG_HASH_FNUM	2
14*99a2dd95SBruce Richardson 
15*99a2dd95SBruce Richardson /* free mbufs from death row */
16*99a2dd95SBruce Richardson void
17*99a2dd95SBruce Richardson rte_ip_frag_free_death_row(struct rte_ip_frag_death_row *dr,
18*99a2dd95SBruce Richardson 		uint32_t prefetch)
19*99a2dd95SBruce Richardson {
20*99a2dd95SBruce Richardson 	uint32_t i, k, n;
21*99a2dd95SBruce Richardson 
22*99a2dd95SBruce Richardson 	k = RTE_MIN(prefetch, dr->cnt);
23*99a2dd95SBruce Richardson 	n = dr->cnt;
24*99a2dd95SBruce Richardson 
25*99a2dd95SBruce Richardson 	for (i = 0; i != k; i++)
26*99a2dd95SBruce Richardson 		rte_prefetch0(dr->row[i]);
27*99a2dd95SBruce Richardson 
28*99a2dd95SBruce Richardson 	for (i = 0; i != n - k; i++) {
29*99a2dd95SBruce Richardson 		rte_prefetch0(dr->row[i + k]);
30*99a2dd95SBruce Richardson 		rte_pktmbuf_free(dr->row[i]);
31*99a2dd95SBruce Richardson 	}
32*99a2dd95SBruce Richardson 
33*99a2dd95SBruce Richardson 	for (; i != n; i++)
34*99a2dd95SBruce Richardson 		rte_pktmbuf_free(dr->row[i]);
35*99a2dd95SBruce Richardson 
36*99a2dd95SBruce Richardson 	dr->cnt = 0;
37*99a2dd95SBruce Richardson }
38*99a2dd95SBruce Richardson 
39*99a2dd95SBruce Richardson /* create fragmentation table */
40*99a2dd95SBruce Richardson struct rte_ip_frag_tbl *
41*99a2dd95SBruce Richardson rte_ip_frag_table_create(uint32_t bucket_num, uint32_t bucket_entries,
42*99a2dd95SBruce Richardson 	uint32_t max_entries, uint64_t max_cycles, int socket_id)
43*99a2dd95SBruce Richardson {
44*99a2dd95SBruce Richardson 	struct rte_ip_frag_tbl *tbl;
45*99a2dd95SBruce Richardson 	size_t sz;
46*99a2dd95SBruce Richardson 	uint64_t nb_entries;
47*99a2dd95SBruce Richardson 
48*99a2dd95SBruce Richardson 	nb_entries = rte_align32pow2(bucket_num);
49*99a2dd95SBruce Richardson 	nb_entries *= bucket_entries;
50*99a2dd95SBruce Richardson 	nb_entries *= IP_FRAG_HASH_FNUM;
51*99a2dd95SBruce Richardson 
52*99a2dd95SBruce Richardson 	/* check input parameters. */
53*99a2dd95SBruce Richardson 	if (rte_is_power_of_2(bucket_entries) == 0 ||
54*99a2dd95SBruce Richardson 			nb_entries > UINT32_MAX || nb_entries == 0 ||
55*99a2dd95SBruce Richardson 			nb_entries < max_entries) {
56*99a2dd95SBruce Richardson 		RTE_LOG(ERR, USER1, "%s: invalid input parameter\n", __func__);
57*99a2dd95SBruce Richardson 		return NULL;
58*99a2dd95SBruce Richardson 	}
59*99a2dd95SBruce Richardson 
60*99a2dd95SBruce Richardson 	sz = sizeof (*tbl) + nb_entries * sizeof (tbl->pkt[0]);
61*99a2dd95SBruce Richardson 	if ((tbl = rte_zmalloc_socket(__func__, sz, RTE_CACHE_LINE_SIZE,
62*99a2dd95SBruce Richardson 			socket_id)) == NULL) {
63*99a2dd95SBruce Richardson 		RTE_LOG(ERR, USER1,
64*99a2dd95SBruce Richardson 			"%s: allocation of %zu bytes at socket %d failed do\n",
65*99a2dd95SBruce Richardson 			__func__, sz, socket_id);
66*99a2dd95SBruce Richardson 		return NULL;
67*99a2dd95SBruce Richardson 	}
68*99a2dd95SBruce Richardson 
69*99a2dd95SBruce Richardson 	RTE_LOG(INFO, USER1, "%s: allocated of %zu bytes at socket %d\n",
70*99a2dd95SBruce Richardson 		__func__, sz, socket_id);
71*99a2dd95SBruce Richardson 
72*99a2dd95SBruce Richardson 	tbl->max_cycles = max_cycles;
73*99a2dd95SBruce Richardson 	tbl->max_entries = max_entries;
74*99a2dd95SBruce Richardson 	tbl->nb_entries = (uint32_t)nb_entries;
75*99a2dd95SBruce Richardson 	tbl->nb_buckets = bucket_num;
76*99a2dd95SBruce Richardson 	tbl->bucket_entries = bucket_entries;
77*99a2dd95SBruce Richardson 	tbl->entry_mask = (tbl->nb_entries - 1) & ~(tbl->bucket_entries  - 1);
78*99a2dd95SBruce Richardson 
79*99a2dd95SBruce Richardson 	TAILQ_INIT(&(tbl->lru));
80*99a2dd95SBruce Richardson 	return tbl;
81*99a2dd95SBruce Richardson }
82*99a2dd95SBruce Richardson 
83*99a2dd95SBruce Richardson /* delete fragmentation table */
84*99a2dd95SBruce Richardson void
85*99a2dd95SBruce Richardson rte_ip_frag_table_destroy(struct rte_ip_frag_tbl *tbl)
86*99a2dd95SBruce Richardson {
87*99a2dd95SBruce Richardson 	struct ip_frag_pkt *fp;
88*99a2dd95SBruce Richardson 
89*99a2dd95SBruce Richardson 	TAILQ_FOREACH(fp, &tbl->lru, lru) {
90*99a2dd95SBruce Richardson 		ip_frag_free_immediate(fp);
91*99a2dd95SBruce Richardson 	}
92*99a2dd95SBruce Richardson 
93*99a2dd95SBruce Richardson 	rte_free(tbl);
94*99a2dd95SBruce Richardson }
95*99a2dd95SBruce Richardson 
96*99a2dd95SBruce Richardson /* dump frag table statistics to file */
97*99a2dd95SBruce Richardson void
98*99a2dd95SBruce Richardson rte_ip_frag_table_statistics_dump(FILE *f, const struct rte_ip_frag_tbl *tbl)
99*99a2dd95SBruce Richardson {
100*99a2dd95SBruce Richardson 	uint64_t fail_total, fail_nospace;
101*99a2dd95SBruce Richardson 
102*99a2dd95SBruce Richardson 	fail_total = tbl->stat.fail_total;
103*99a2dd95SBruce Richardson 	fail_nospace = tbl->stat.fail_nospace;
104*99a2dd95SBruce Richardson 
105*99a2dd95SBruce Richardson 	fprintf(f, "max entries:\t%u;\n"
106*99a2dd95SBruce Richardson 		"entries in use:\t%u;\n"
107*99a2dd95SBruce Richardson 		"finds/inserts:\t%" PRIu64 ";\n"
108*99a2dd95SBruce Richardson 		"entries added:\t%" PRIu64 ";\n"
109*99a2dd95SBruce Richardson 		"entries deleted by timeout:\t%" PRIu64 ";\n"
110*99a2dd95SBruce Richardson 		"entries reused by timeout:\t%" PRIu64 ";\n"
111*99a2dd95SBruce Richardson 		"total add failures:\t%" PRIu64 ";\n"
112*99a2dd95SBruce Richardson 		"add no-space failures:\t%" PRIu64 ";\n"
113*99a2dd95SBruce Richardson 		"add hash-collisions failures:\t%" PRIu64 ";\n",
114*99a2dd95SBruce Richardson 		tbl->max_entries,
115*99a2dd95SBruce Richardson 		tbl->use_entries,
116*99a2dd95SBruce Richardson 		tbl->stat.find_num,
117*99a2dd95SBruce Richardson 		tbl->stat.add_num,
118*99a2dd95SBruce Richardson 		tbl->stat.del_num,
119*99a2dd95SBruce Richardson 		tbl->stat.reuse_num,
120*99a2dd95SBruce Richardson 		fail_total,
121*99a2dd95SBruce Richardson 		fail_nospace,
122*99a2dd95SBruce Richardson 		fail_total - fail_nospace);
123*99a2dd95SBruce Richardson }
124*99a2dd95SBruce Richardson 
125*99a2dd95SBruce Richardson /* Delete expired fragments */
126*99a2dd95SBruce Richardson void
127*99a2dd95SBruce Richardson rte_frag_table_del_expired_entries(struct rte_ip_frag_tbl *tbl,
128*99a2dd95SBruce Richardson 	struct rte_ip_frag_death_row *dr, uint64_t tms)
129*99a2dd95SBruce Richardson {
130*99a2dd95SBruce Richardson 	uint64_t max_cycles;
131*99a2dd95SBruce Richardson 	struct ip_frag_pkt *fp;
132*99a2dd95SBruce Richardson 
133*99a2dd95SBruce Richardson 	max_cycles = tbl->max_cycles;
134*99a2dd95SBruce Richardson 
135*99a2dd95SBruce Richardson 	TAILQ_FOREACH(fp, &tbl->lru, lru)
136*99a2dd95SBruce Richardson 		if (max_cycles + fp->start < tms) {
137*99a2dd95SBruce Richardson 			/* check that death row has enough space */
138*99a2dd95SBruce Richardson 			if (IP_FRAG_DEATH_ROW_MBUF_LEN - dr->cnt >= fp->last_idx)
139*99a2dd95SBruce Richardson 				ip_frag_tbl_del(tbl, dr, fp);
140*99a2dd95SBruce Richardson 			else
141*99a2dd95SBruce Richardson 				return;
142*99a2dd95SBruce Richardson 		} else
143*99a2dd95SBruce Richardson 			return;
144*99a2dd95SBruce Richardson }
145