1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2014 Intel Corporation 3 */ 4 5 #include <stddef.h> 6 #include <stdio.h> 7 8 #include <rte_log.h> 9 10 RTE_LOG_REGISTER_DEFAULT(ipfrag_logtype, INFO); 11 12 #include "ip_frag_common.h" 13 14 #define IP_FRAG_HASH_FNUM 2 15 16 /* free mbufs from death row */ 17 void 18 rte_ip_frag_free_death_row(struct rte_ip_frag_death_row *dr, 19 uint32_t prefetch) 20 { 21 uint32_t i, k, n; 22 23 k = RTE_MIN(prefetch, dr->cnt); 24 n = dr->cnt; 25 26 for (i = 0; i != k; i++) 27 rte_prefetch0(dr->row[i]); 28 29 for (i = 0; i != n - k; i++) { 30 rte_prefetch0(dr->row[i + k]); 31 rte_pktmbuf_free(dr->row[i]); 32 } 33 34 for (; i != n; i++) 35 rte_pktmbuf_free(dr->row[i]); 36 37 dr->cnt = 0; 38 } 39 40 /* create fragmentation table */ 41 struct rte_ip_frag_tbl * 42 rte_ip_frag_table_create(uint32_t bucket_num, uint32_t bucket_entries, 43 uint32_t max_entries, uint64_t max_cycles, int socket_id) 44 { 45 struct rte_ip_frag_tbl *tbl; 46 size_t sz; 47 uint64_t nb_entries; 48 49 nb_entries = rte_align32pow2(bucket_num); 50 nb_entries *= bucket_entries; 51 nb_entries *= IP_FRAG_HASH_FNUM; 52 53 /* check input parameters. */ 54 if (rte_is_power_of_2(bucket_entries) == 0 || 55 nb_entries > UINT32_MAX || nb_entries == 0 || 56 nb_entries < max_entries) { 57 IP_FRAG_LOG_LINE(ERR, "%s: invalid input parameter", __func__); 58 return NULL; 59 } 60 61 sz = sizeof (*tbl) + nb_entries * sizeof (tbl->pkt[0]); 62 if ((tbl = rte_zmalloc_socket(__func__, sz, RTE_CACHE_LINE_SIZE, 63 socket_id)) == NULL) { 64 IP_FRAG_LOG_LINE(ERR, 65 "%s: allocation of %zu bytes at socket %d failed do", 66 __func__, sz, socket_id); 67 return NULL; 68 } 69 70 IP_FRAG_LOG_LINE(INFO, "%s: allocated of %zu bytes at socket %d", 71 __func__, sz, socket_id); 72 73 tbl->max_cycles = max_cycles; 74 tbl->max_entries = max_entries; 75 tbl->nb_entries = (uint32_t)nb_entries; 76 tbl->nb_buckets = bucket_num; 77 tbl->bucket_entries = bucket_entries; 78 tbl->entry_mask = (tbl->nb_entries - 1) & ~(tbl->bucket_entries - 1); 79 80 TAILQ_INIT(&(tbl->lru)); 81 return tbl; 82 } 83 84 /* delete fragmentation table */ 85 void 86 rte_ip_frag_table_destroy(struct rte_ip_frag_tbl *tbl) 87 { 88 struct ip_frag_pkt *fp; 89 90 TAILQ_FOREACH(fp, &tbl->lru, lru) { 91 ip_frag_free_immediate(fp); 92 } 93 94 rte_free(tbl); 95 } 96 97 /* dump frag table statistics to file */ 98 void 99 rte_ip_frag_table_statistics_dump(FILE *f, const struct rte_ip_frag_tbl *tbl) 100 { 101 uint64_t fail_total, fail_nospace; 102 103 fail_total = tbl->stat.fail_total; 104 fail_nospace = tbl->stat.fail_nospace; 105 106 fprintf(f, "max entries:\t%u;\n" 107 "entries in use:\t%u;\n" 108 "finds/inserts:\t%" PRIu64 ";\n" 109 "entries added:\t%" PRIu64 ";\n" 110 "entries deleted by timeout:\t%" PRIu64 ";\n" 111 "entries reused by timeout:\t%" PRIu64 ";\n" 112 "total add failures:\t%" PRIu64 ";\n" 113 "add no-space failures:\t%" PRIu64 ";\n" 114 "add hash-collisions failures:\t%" PRIu64 ";\n", 115 tbl->max_entries, 116 tbl->use_entries, 117 tbl->stat.find_num, 118 tbl->stat.add_num, 119 tbl->stat.del_num, 120 tbl->stat.reuse_num, 121 fail_total, 122 fail_nospace, 123 fail_total - fail_nospace); 124 } 125 126 /* Delete expired fragments */ 127 void 128 rte_ip_frag_table_del_expired_entries(struct rte_ip_frag_tbl *tbl, 129 struct rte_ip_frag_death_row *dr, uint64_t tms) 130 { 131 uint64_t max_cycles; 132 struct ip_frag_pkt *fp; 133 134 max_cycles = tbl->max_cycles; 135 136 TAILQ_FOREACH(fp, &tbl->lru, lru) 137 if (max_cycles + fp->start < tms) { 138 /* check that death row has enough space */ 139 if (RTE_IP_FRAG_DEATH_ROW_MBUF_LEN - dr->cnt >= 140 fp->last_idx) 141 ip_frag_tbl_del(tbl, dr, fp); 142 else 143 return; 144 } else 145 return; 146 } 147