xref: /dpdk/lib/ip_frag/rte_ip_frag_common.c (revision ae67895b507bb6af22263c79ba0d5c374b396485)
199a2dd95SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
299a2dd95SBruce Richardson  * Copyright(c) 2010-2014 Intel Corporation
399a2dd95SBruce Richardson  */
499a2dd95SBruce Richardson 
599a2dd95SBruce Richardson #include <stddef.h>
699a2dd95SBruce Richardson #include <stdio.h>
799a2dd95SBruce Richardson 
899a2dd95SBruce Richardson #include <rte_log.h>
999a2dd95SBruce Richardson 
102f4716d0SStephen Hemminger RTE_LOG_REGISTER_DEFAULT(ipfrag_logtype, INFO);
112f4716d0SStephen Hemminger 
1299a2dd95SBruce Richardson #include "ip_frag_common.h"
1399a2dd95SBruce Richardson 
1499a2dd95SBruce Richardson #define	IP_FRAG_HASH_FNUM	2
1599a2dd95SBruce Richardson 
1699a2dd95SBruce Richardson /* free mbufs from death row */
1799a2dd95SBruce Richardson void
rte_ip_frag_free_death_row(struct rte_ip_frag_death_row * dr,uint32_t prefetch)1899a2dd95SBruce Richardson rte_ip_frag_free_death_row(struct rte_ip_frag_death_row *dr,
1999a2dd95SBruce Richardson 		uint32_t prefetch)
2099a2dd95SBruce Richardson {
2199a2dd95SBruce Richardson 	uint32_t i, k, n;
2299a2dd95SBruce Richardson 
2399a2dd95SBruce Richardson 	k = RTE_MIN(prefetch, dr->cnt);
2499a2dd95SBruce Richardson 	n = dr->cnt;
2599a2dd95SBruce Richardson 
2699a2dd95SBruce Richardson 	for (i = 0; i != k; i++)
2799a2dd95SBruce Richardson 		rte_prefetch0(dr->row[i]);
2899a2dd95SBruce Richardson 
2999a2dd95SBruce Richardson 	for (i = 0; i != n - k; i++) {
3099a2dd95SBruce Richardson 		rte_prefetch0(dr->row[i + k]);
3199a2dd95SBruce Richardson 		rte_pktmbuf_free(dr->row[i]);
3299a2dd95SBruce Richardson 	}
3399a2dd95SBruce Richardson 
3499a2dd95SBruce Richardson 	for (; i != n; i++)
3599a2dd95SBruce Richardson 		rte_pktmbuf_free(dr->row[i]);
3699a2dd95SBruce Richardson 
3799a2dd95SBruce Richardson 	dr->cnt = 0;
3899a2dd95SBruce Richardson }
3999a2dd95SBruce Richardson 
4099a2dd95SBruce Richardson /* create fragmentation table */
4199a2dd95SBruce Richardson struct rte_ip_frag_tbl *
rte_ip_frag_table_create(uint32_t bucket_num,uint32_t bucket_entries,uint32_t max_entries,uint64_t max_cycles,int socket_id)4299a2dd95SBruce Richardson rte_ip_frag_table_create(uint32_t bucket_num, uint32_t bucket_entries,
4399a2dd95SBruce Richardson 	uint32_t max_entries, uint64_t max_cycles, int socket_id)
4499a2dd95SBruce Richardson {
4599a2dd95SBruce Richardson 	struct rte_ip_frag_tbl *tbl;
4699a2dd95SBruce Richardson 	size_t sz;
4799a2dd95SBruce Richardson 	uint64_t nb_entries;
4899a2dd95SBruce Richardson 
4999a2dd95SBruce Richardson 	nb_entries = rte_align32pow2(bucket_num);
5099a2dd95SBruce Richardson 	nb_entries *= bucket_entries;
5199a2dd95SBruce Richardson 	nb_entries *= IP_FRAG_HASH_FNUM;
5299a2dd95SBruce Richardson 
5399a2dd95SBruce Richardson 	/* check input parameters. */
5499a2dd95SBruce Richardson 	if (rte_is_power_of_2(bucket_entries) == 0 ||
5599a2dd95SBruce Richardson 			nb_entries > UINT32_MAX || nb_entries == 0 ||
5699a2dd95SBruce Richardson 			nb_entries < max_entries) {
57*ae67895bSDavid Marchand 		IP_FRAG_LOG_LINE(ERR, "%s: invalid input parameter", __func__);
5899a2dd95SBruce Richardson 		return NULL;
5999a2dd95SBruce Richardson 	}
6099a2dd95SBruce Richardson 
6199a2dd95SBruce Richardson 	sz = sizeof (*tbl) + nb_entries * sizeof (tbl->pkt[0]);
6299a2dd95SBruce Richardson 	if ((tbl = rte_zmalloc_socket(__func__, sz, RTE_CACHE_LINE_SIZE,
6399a2dd95SBruce Richardson 			socket_id)) == NULL) {
64*ae67895bSDavid Marchand 		IP_FRAG_LOG_LINE(ERR,
65*ae67895bSDavid Marchand 			"%s: allocation of %zu bytes at socket %d failed do",
6699a2dd95SBruce Richardson 			__func__, sz, socket_id);
6799a2dd95SBruce Richardson 		return NULL;
6899a2dd95SBruce Richardson 	}
6999a2dd95SBruce Richardson 
70*ae67895bSDavid Marchand 	IP_FRAG_LOG_LINE(INFO, "%s: allocated of %zu bytes at socket %d",
7199a2dd95SBruce Richardson 		__func__, sz, socket_id);
7299a2dd95SBruce Richardson 
7399a2dd95SBruce Richardson 	tbl->max_cycles = max_cycles;
7499a2dd95SBruce Richardson 	tbl->max_entries = max_entries;
7599a2dd95SBruce Richardson 	tbl->nb_entries = (uint32_t)nb_entries;
7699a2dd95SBruce Richardson 	tbl->nb_buckets = bucket_num;
7799a2dd95SBruce Richardson 	tbl->bucket_entries = bucket_entries;
7899a2dd95SBruce Richardson 	tbl->entry_mask = (tbl->nb_entries - 1) & ~(tbl->bucket_entries  - 1);
7999a2dd95SBruce Richardson 
8099a2dd95SBruce Richardson 	TAILQ_INIT(&(tbl->lru));
8199a2dd95SBruce Richardson 	return tbl;
8299a2dd95SBruce Richardson }
8399a2dd95SBruce Richardson 
8499a2dd95SBruce Richardson /* delete fragmentation table */
8599a2dd95SBruce Richardson void
rte_ip_frag_table_destroy(struct rte_ip_frag_tbl * tbl)8699a2dd95SBruce Richardson rte_ip_frag_table_destroy(struct rte_ip_frag_tbl *tbl)
8799a2dd95SBruce Richardson {
8899a2dd95SBruce Richardson 	struct ip_frag_pkt *fp;
8999a2dd95SBruce Richardson 
9099a2dd95SBruce Richardson 	TAILQ_FOREACH(fp, &tbl->lru, lru) {
9199a2dd95SBruce Richardson 		ip_frag_free_immediate(fp);
9299a2dd95SBruce Richardson 	}
9399a2dd95SBruce Richardson 
9499a2dd95SBruce Richardson 	rte_free(tbl);
9599a2dd95SBruce Richardson }
9699a2dd95SBruce Richardson 
9799a2dd95SBruce Richardson /* dump frag table statistics to file */
9899a2dd95SBruce Richardson void
rte_ip_frag_table_statistics_dump(FILE * f,const struct rte_ip_frag_tbl * tbl)9999a2dd95SBruce Richardson rte_ip_frag_table_statistics_dump(FILE *f, const struct rte_ip_frag_tbl *tbl)
10099a2dd95SBruce Richardson {
10199a2dd95SBruce Richardson 	uint64_t fail_total, fail_nospace;
10299a2dd95SBruce Richardson 
10399a2dd95SBruce Richardson 	fail_total = tbl->stat.fail_total;
10499a2dd95SBruce Richardson 	fail_nospace = tbl->stat.fail_nospace;
10599a2dd95SBruce Richardson 
10699a2dd95SBruce Richardson 	fprintf(f, "max entries:\t%u;\n"
10799a2dd95SBruce Richardson 		"entries in use:\t%u;\n"
10899a2dd95SBruce Richardson 		"finds/inserts:\t%" PRIu64 ";\n"
10999a2dd95SBruce Richardson 		"entries added:\t%" PRIu64 ";\n"
11099a2dd95SBruce Richardson 		"entries deleted by timeout:\t%" PRIu64 ";\n"
11199a2dd95SBruce Richardson 		"entries reused by timeout:\t%" PRIu64 ";\n"
11299a2dd95SBruce Richardson 		"total add failures:\t%" PRIu64 ";\n"
11399a2dd95SBruce Richardson 		"add no-space failures:\t%" PRIu64 ";\n"
11499a2dd95SBruce Richardson 		"add hash-collisions failures:\t%" PRIu64 ";\n",
11599a2dd95SBruce Richardson 		tbl->max_entries,
11699a2dd95SBruce Richardson 		tbl->use_entries,
11799a2dd95SBruce Richardson 		tbl->stat.find_num,
11899a2dd95SBruce Richardson 		tbl->stat.add_num,
11999a2dd95SBruce Richardson 		tbl->stat.del_num,
12099a2dd95SBruce Richardson 		tbl->stat.reuse_num,
12199a2dd95SBruce Richardson 		fail_total,
12299a2dd95SBruce Richardson 		fail_nospace,
12399a2dd95SBruce Richardson 		fail_total - fail_nospace);
12499a2dd95SBruce Richardson }
12599a2dd95SBruce Richardson 
12699a2dd95SBruce Richardson /* Delete expired fragments */
12799a2dd95SBruce Richardson void
rte_ip_frag_table_del_expired_entries(struct rte_ip_frag_tbl * tbl,struct rte_ip_frag_death_row * dr,uint64_t tms)128b7fc82ecSKonstantin Ananyev rte_ip_frag_table_del_expired_entries(struct rte_ip_frag_tbl *tbl,
12999a2dd95SBruce Richardson 	struct rte_ip_frag_death_row *dr, uint64_t tms)
13099a2dd95SBruce Richardson {
13199a2dd95SBruce Richardson 	uint64_t max_cycles;
13299a2dd95SBruce Richardson 	struct ip_frag_pkt *fp;
13399a2dd95SBruce Richardson 
13499a2dd95SBruce Richardson 	max_cycles = tbl->max_cycles;
13599a2dd95SBruce Richardson 
13699a2dd95SBruce Richardson 	TAILQ_FOREACH(fp, &tbl->lru, lru)
13799a2dd95SBruce Richardson 		if (max_cycles + fp->start < tms) {
13899a2dd95SBruce Richardson 			/* check that death row has enough space */
139b7fc82ecSKonstantin Ananyev 			if (RTE_IP_FRAG_DEATH_ROW_MBUF_LEN - dr->cnt >=
140b7fc82ecSKonstantin Ananyev 					fp->last_idx)
14199a2dd95SBruce Richardson 				ip_frag_tbl_del(tbl, dr, fp);
14299a2dd95SBruce Richardson 			else
14399a2dd95SBruce Richardson 				return;
14499a2dd95SBruce Richardson 		} else
14599a2dd95SBruce Richardson 			return;
14699a2dd95SBruce Richardson }
147