1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(C) 2020 Marvell International Ltd. 3 */ 4 5 #include <rte_debug.h> 6 #include <rte_ethdev.h> 7 #include <rte_ether.h> 8 #include <rte_graph.h> 9 #include <rte_graph_worker.h> 10 #include <rte_mbuf.h> 11 12 #include "ethdev_rx_priv.h" 13 #include "node_private.h" 14 15 static struct ethdev_rx_node_main ethdev_rx_main; 16 17 static __rte_always_inline uint16_t 18 ethdev_rx_node_process_inline(struct rte_graph *graph, struct rte_node *node, 19 ethdev_rx_node_ctx_t *ctx) 20 { 21 uint16_t count, next_index; 22 uint16_t port, queue; 23 24 port = ctx->port_id; 25 queue = ctx->queue_id; 26 next_index = ctx->cls_next; 27 28 /* Get pkts from port */ 29 count = rte_eth_rx_burst(port, queue, (struct rte_mbuf **)node->objs, 30 RTE_GRAPH_BURST_SIZE); 31 32 if (!count) 33 return 0; 34 node->idx = count; 35 /* Enqueue to next node */ 36 rte_node_next_stream_move(graph, node, next_index); 37 38 return count; 39 } 40 41 static __rte_always_inline uint16_t 42 ethdev_rx_node_process(struct rte_graph *graph, struct rte_node *node, 43 void **objs, uint16_t cnt) 44 { 45 ethdev_rx_node_ctx_t *ctx = (ethdev_rx_node_ctx_t *)node->ctx; 46 uint16_t n_pkts = 0; 47 48 RTE_SET_USED(objs); 49 RTE_SET_USED(cnt); 50 51 n_pkts = ethdev_rx_node_process_inline(graph, node, ctx); 52 return n_pkts; 53 } 54 55 static inline uint32_t 56 l3_ptype(uint16_t etype, uint32_t ptype) 57 { 58 ptype = ptype & ~RTE_PTYPE_L3_MASK; 59 if (etype == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) 60 ptype |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN; 61 else if (etype == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) 62 ptype |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN; 63 return ptype; 64 } 65 66 /* Callback for soft ptype parsing */ 67 static uint16_t 68 eth_pkt_parse_cb(uint16_t port, uint16_t queue, struct rte_mbuf **mbufs, 69 uint16_t nb_pkts, uint16_t max_pkts, void *user_param) 70 { 71 struct rte_mbuf *mbuf0, *mbuf1, *mbuf2, *mbuf3; 72 struct rte_ether_hdr *eth_hdr; 73 uint16_t etype, n_left; 74 struct rte_mbuf **pkts; 75 76 RTE_SET_USED(port); 77 RTE_SET_USED(queue); 78 RTE_SET_USED(max_pkts); 79 RTE_SET_USED(user_param); 80 81 pkts = mbufs; 82 n_left = nb_pkts; 83 while (n_left >= 12) { 84 85 /* Prefetch next-next mbufs */ 86 rte_prefetch0(pkts[8]); 87 rte_prefetch0(pkts[9]); 88 rte_prefetch0(pkts[10]); 89 rte_prefetch0(pkts[11]); 90 91 /* Prefetch next mbuf data */ 92 rte_prefetch0( 93 rte_pktmbuf_mtod(pkts[4], struct rte_ether_hdr *)); 94 rte_prefetch0( 95 rte_pktmbuf_mtod(pkts[5], struct rte_ether_hdr *)); 96 rte_prefetch0( 97 rte_pktmbuf_mtod(pkts[6], struct rte_ether_hdr *)); 98 rte_prefetch0( 99 rte_pktmbuf_mtod(pkts[7], struct rte_ether_hdr *)); 100 101 mbuf0 = pkts[0]; 102 mbuf1 = pkts[1]; 103 mbuf2 = pkts[2]; 104 mbuf3 = pkts[3]; 105 pkts += 4; 106 n_left -= 4; 107 108 /* Extract ptype of mbuf0 */ 109 eth_hdr = rte_pktmbuf_mtod(mbuf0, struct rte_ether_hdr *); 110 etype = eth_hdr->ether_type; 111 mbuf0->packet_type = l3_ptype(etype, 0); 112 113 /* Extract ptype of mbuf1 */ 114 eth_hdr = rte_pktmbuf_mtod(mbuf1, struct rte_ether_hdr *); 115 etype = eth_hdr->ether_type; 116 mbuf1->packet_type = l3_ptype(etype, 0); 117 118 /* Extract ptype of mbuf2 */ 119 eth_hdr = rte_pktmbuf_mtod(mbuf2, struct rte_ether_hdr *); 120 etype = eth_hdr->ether_type; 121 mbuf2->packet_type = l3_ptype(etype, 0); 122 123 /* Extract ptype of mbuf3 */ 124 eth_hdr = rte_pktmbuf_mtod(mbuf3, struct rte_ether_hdr *); 125 etype = eth_hdr->ether_type; 126 mbuf3->packet_type = l3_ptype(etype, 0); 127 } 128 129 while (n_left > 0) { 130 mbuf0 = pkts[0]; 131 132 pkts += 1; 133 n_left -= 1; 134 135 /* Extract ptype of mbuf0 */ 136 eth_hdr = rte_pktmbuf_mtod(mbuf0, struct rte_ether_hdr *); 137 etype = eth_hdr->ether_type; 138 mbuf0->packet_type = l3_ptype(etype, 0); 139 } 140 141 return nb_pkts; 142 } 143 144 #define MAX_PTYPES 16 145 static int 146 ethdev_ptype_setup(uint16_t port, uint16_t queue) 147 { 148 uint8_t l3_ipv4 = 0, l3_ipv6 = 0; 149 uint32_t ptypes[MAX_PTYPES]; 150 int i, rc; 151 152 /* Check IPv4 & IPv6 ptype support */ 153 rc = rte_eth_dev_get_supported_ptypes(port, RTE_PTYPE_L3_MASK, ptypes, 154 MAX_PTYPES); 155 for (i = 0; i < rc; i++) { 156 if (ptypes[i] & RTE_PTYPE_L3_IPV4) 157 l3_ipv4 = 1; 158 if (ptypes[i] & RTE_PTYPE_L3_IPV6) 159 l3_ipv6 = 1; 160 } 161 162 if (!l3_ipv4 || !l3_ipv6) { 163 node_info("ethdev_rx", 164 "Enabling ptype callback for required ptypes on port %u\n", 165 port); 166 167 if (!rte_eth_add_rx_callback(port, queue, eth_pkt_parse_cb, 168 NULL)) { 169 node_err("ethdev_rx", 170 "Failed to add rx ptype cb: port=%d, queue=%d\n", 171 port, queue); 172 return -EINVAL; 173 } 174 } 175 176 return 0; 177 } 178 179 static int 180 ethdev_rx_node_init(const struct rte_graph *graph, struct rte_node *node) 181 { 182 ethdev_rx_node_ctx_t *ctx = (ethdev_rx_node_ctx_t *)node->ctx; 183 ethdev_rx_node_elem_t *elem = ethdev_rx_main.head; 184 185 RTE_SET_USED(graph); 186 187 while (elem) { 188 if (elem->nid == node->id) { 189 /* Update node specific context */ 190 memcpy(ctx, &elem->ctx, sizeof(ethdev_rx_node_ctx_t)); 191 break; 192 } 193 elem = elem->next; 194 } 195 196 RTE_VERIFY(elem != NULL); 197 198 ctx->cls_next = ETHDEV_RX_NEXT_PKT_CLS; 199 200 /* Check and setup ptype */ 201 return ethdev_ptype_setup(ctx->port_id, ctx->queue_id); 202 } 203 204 struct ethdev_rx_node_main * 205 ethdev_rx_get_node_data_get(void) 206 { 207 return ðdev_rx_main; 208 } 209 210 static struct rte_node_register ethdev_rx_node_base = { 211 .process = ethdev_rx_node_process, 212 .flags = RTE_NODE_SOURCE_F, 213 .name = "ethdev_rx", 214 215 .init = ethdev_rx_node_init, 216 217 .nb_edges = ETHDEV_RX_NEXT_MAX, 218 .next_nodes = { 219 /* Default pkt classification node */ 220 [ETHDEV_RX_NEXT_PKT_CLS] = "pkt_cls", 221 [ETHDEV_RX_NEXT_IP4_LOOKUP] = "ip4_lookup", 222 }, 223 }; 224 225 struct rte_node_register * 226 ethdev_rx_node_get(void) 227 { 228 return ðdev_rx_node_base; 229 } 230 231 RTE_NODE_REGISTER(ethdev_rx_node_base); 232