1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(C) 2023 Marvell. 3 */ 4 5 #include <arpa/inet.h> 6 #include <stdlib.h> 7 #include <sys/socket.h> 8 9 #include <rte_cycles.h> 10 #include <rte_debug.h> 11 #include <rte_ethdev.h> 12 #include <rte_ether.h> 13 #include <rte_graph.h> 14 #include <rte_graph_worker.h> 15 #include <rte_ip.h> 16 #include <rte_ip_frag.h> 17 #include <rte_mbuf.h> 18 #include <rte_tcp.h> 19 #include <rte_udp.h> 20 21 #include "rte_node_ip4_api.h" 22 23 #include "ip4_reassembly_priv.h" 24 #include "node_private.h" 25 26 struct ip4_reassembly_elem { 27 struct ip4_reassembly_elem *next; 28 struct ip4_reassembly_ctx ctx; 29 rte_node_t node_id; 30 }; 31 32 /* IP4 reassembly global data struct */ 33 struct ip4_reassembly_node_main { 34 struct ip4_reassembly_elem *head; 35 }; 36 37 typedef struct ip4_reassembly_ctx ip4_reassembly_ctx_t; 38 typedef struct ip4_reassembly_elem ip4_reassembly_elem_t; 39 40 static struct ip4_reassembly_node_main ip4_reassembly_main; 41 42 static uint16_t 43 ip4_reassembly_node_process(struct rte_graph *graph, struct rte_node *node, void **objs, 44 uint16_t nb_objs) 45 { 46 #define PREFETCH_OFFSET 4 47 struct rte_mbuf *mbuf, *mbuf_out; 48 struct rte_ip_frag_death_row *dr; 49 struct ip4_reassembly_ctx *ctx; 50 struct rte_ipv4_hdr *ipv4_hdr; 51 struct rte_ip_frag_tbl *tbl; 52 void **to_next, **to_free; 53 uint16_t idx = 0; 54 int i; 55 56 ctx = (struct ip4_reassembly_ctx *)node->ctx; 57 58 /* Get core specific reassembly tbl */ 59 tbl = ctx->tbl; 60 dr = ctx->dr; 61 62 for (i = 0; i < PREFETCH_OFFSET && i < nb_objs; i++) { 63 rte_prefetch0(rte_pktmbuf_mtod_offset((struct rte_mbuf *)objs[i], void *, 64 sizeof(struct rte_ether_hdr))); 65 } 66 67 to_next = node->objs; 68 for (i = 0; i < nb_objs - PREFETCH_OFFSET; i++) { 69 #if RTE_GRAPH_BURST_SIZE > 64 70 /* Prefetch next-next mbufs */ 71 if (likely(i + 8 < nb_objs)) 72 rte_prefetch0(objs[i + 8]); 73 #endif 74 rte_prefetch0(rte_pktmbuf_mtod_offset((struct rte_mbuf *)objs[i + PREFETCH_OFFSET], 75 void *, sizeof(struct rte_ether_hdr))); 76 mbuf = (struct rte_mbuf *)objs[i]; 77 78 ipv4_hdr = rte_pktmbuf_mtod_offset(mbuf, struct rte_ipv4_hdr *, 79 sizeof(struct rte_ether_hdr)); 80 if (rte_ipv4_frag_pkt_is_fragmented(ipv4_hdr)) { 81 /* prepare mbuf: setup l2_len/l3_len. */ 82 mbuf->l2_len = sizeof(struct rte_ether_hdr); 83 mbuf->l3_len = sizeof(struct rte_ipv4_hdr); 84 85 mbuf_out = rte_ipv4_frag_reassemble_packet(tbl, dr, mbuf, rte_rdtsc(), 86 ipv4_hdr); 87 } else { 88 mbuf_out = mbuf; 89 } 90 91 if (mbuf_out) 92 to_next[idx++] = (void *)mbuf_out; 93 } 94 95 for (; i < nb_objs; i++) { 96 mbuf = (struct rte_mbuf *)objs[i]; 97 98 ipv4_hdr = rte_pktmbuf_mtod_offset(mbuf, struct rte_ipv4_hdr *, 99 sizeof(struct rte_ether_hdr)); 100 if (rte_ipv4_frag_pkt_is_fragmented(ipv4_hdr)) { 101 /* prepare mbuf: setup l2_len/l3_len. */ 102 mbuf->l2_len = sizeof(struct rte_ether_hdr); 103 mbuf->l3_len = sizeof(struct rte_ipv4_hdr); 104 105 mbuf_out = rte_ipv4_frag_reassemble_packet(tbl, dr, mbuf, rte_rdtsc(), 106 ipv4_hdr); 107 } else { 108 mbuf_out = mbuf; 109 } 110 111 if (mbuf_out) 112 to_next[idx++] = (void *)mbuf_out; 113 } 114 node->idx = idx; 115 rte_node_next_stream_move(graph, node, 1); 116 if (dr->cnt) { 117 to_free = rte_node_next_stream_get(graph, node, 118 RTE_NODE_IP4_REASSEMBLY_NEXT_PKT_DROP, dr->cnt); 119 rte_memcpy(to_free, dr->row, dr->cnt * sizeof(to_free[0])); 120 rte_node_next_stream_put(graph, node, RTE_NODE_IP4_REASSEMBLY_NEXT_PKT_DROP, 121 dr->cnt); 122 idx += dr->cnt; 123 dr->cnt = 0; 124 } 125 126 return idx; 127 } 128 129 int 130 rte_node_ip4_reassembly_configure(struct rte_node_ip4_reassembly_cfg *cfg, uint16_t cnt) 131 { 132 ip4_reassembly_elem_t *elem; 133 int i; 134 135 for (i = 0; i < cnt; i++) { 136 elem = malloc(sizeof(ip4_reassembly_elem_t)); 137 if (elem == NULL) 138 return -ENOMEM; 139 elem->ctx.dr = cfg[i].dr; 140 elem->ctx.tbl = cfg[i].tbl; 141 elem->node_id = cfg[i].node_id; 142 elem->next = ip4_reassembly_main.head; 143 ip4_reassembly_main.head = elem; 144 } 145 146 return 0; 147 } 148 149 static int 150 ip4_reassembly_node_init(const struct rte_graph *graph, struct rte_node *node) 151 { 152 ip4_reassembly_ctx_t *ctx = (ip4_reassembly_ctx_t *)node->ctx; 153 ip4_reassembly_elem_t *elem = ip4_reassembly_main.head; 154 155 RTE_SET_USED(graph); 156 while (elem) { 157 if (elem->node_id == node->id) { 158 /* Update node specific context */ 159 memcpy(ctx, &elem->ctx, sizeof(ip4_reassembly_ctx_t)); 160 break; 161 } 162 elem = elem->next; 163 } 164 165 return 0; 166 } 167 168 static struct rte_node_register ip4_reassembly_node = { 169 .process = ip4_reassembly_node_process, 170 .name = "ip4_reassembly", 171 172 .init = ip4_reassembly_node_init, 173 174 .nb_edges = RTE_NODE_IP4_REASSEMBLY_NEXT_PKT_DROP + 1, 175 .next_nodes = { 176 [RTE_NODE_IP4_REASSEMBLY_NEXT_PKT_DROP] = "pkt_drop", 177 }, 178 }; 179 180 struct rte_node_register * 181 ip4_reassembly_node_get(void) 182 { 183 return &ip4_reassembly_node; 184 } 185 186 RTE_NODE_REGISTER(ip4_reassembly_node); 187