1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2020 Marvell. 3 */ 4 5 #include <rte_graph.h> 6 #include <rte_graph_worker.h> 7 8 #include "pkt_cls_priv.h" 9 #include "node_private.h" 10 11 /* Next node for each ptype, default is '0' is "pkt_drop" */ 12 static const uint8_t p_nxt[256] __rte_cache_aligned = { 13 [RTE_PTYPE_L3_IPV4] = PKT_CLS_NEXT_IP4_LOOKUP, 14 15 [RTE_PTYPE_L3_IPV4_EXT] = PKT_CLS_NEXT_IP4_LOOKUP, 16 17 [RTE_PTYPE_L3_IPV4_EXT_UNKNOWN] = PKT_CLS_NEXT_IP4_LOOKUP, 18 19 [RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L2_ETHER] = 20 PKT_CLS_NEXT_IP4_LOOKUP, 21 22 [RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L2_ETHER] = 23 PKT_CLS_NEXT_IP4_LOOKUP, 24 25 [RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L2_ETHER] = 26 PKT_CLS_NEXT_IP4_LOOKUP, 27 28 [RTE_PTYPE_L3_IPV6] = PKT_CLS_NEXT_IP6_LOOKUP, 29 30 [RTE_PTYPE_L3_IPV6_EXT] = PKT_CLS_NEXT_IP6_LOOKUP, 31 32 [RTE_PTYPE_L3_IPV6_EXT_UNKNOWN] = PKT_CLS_NEXT_IP6_LOOKUP, 33 34 [RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L2_ETHER] = PKT_CLS_NEXT_IP6_LOOKUP, 35 36 [RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L2_ETHER] = PKT_CLS_NEXT_IP6_LOOKUP, 37 38 [RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L2_ETHER] = 39 PKT_CLS_NEXT_IP6_LOOKUP, 40 }; 41 42 static uint16_t 43 pkt_cls_node_process(struct rte_graph *graph, struct rte_node *node, 44 void **objs, uint16_t nb_objs) 45 { 46 struct rte_mbuf *mbuf0, *mbuf1, *mbuf2, *mbuf3, **pkts; 47 uint8_t l0, l1, l2, l3, last_type; 48 uint16_t next_index, n_left_from; 49 uint16_t held = 0, last_spec = 0; 50 struct pkt_cls_node_ctx *ctx; 51 void **to_next, **from; 52 uint32_t i; 53 54 pkts = (struct rte_mbuf **)objs; 55 from = objs; 56 n_left_from = nb_objs; 57 58 for (i = OBJS_PER_CLINE; i < RTE_GRAPH_BURST_SIZE; i += OBJS_PER_CLINE) 59 rte_prefetch0(&objs[i]); 60 61 #if RTE_GRAPH_BURST_SIZE > 64 62 for (i = 0; i < 4 && i < n_left_from; i++) 63 rte_prefetch0(pkts[i]); 64 #endif 65 66 ctx = (struct pkt_cls_node_ctx *)node->ctx; 67 last_type = ctx->l2l3_type; 68 next_index = p_nxt[last_type]; 69 70 /* Get stream for the speculated next node */ 71 to_next = rte_node_next_stream_get(graph, node, 72 next_index, nb_objs); 73 while (n_left_from >= 4) { 74 #if RTE_GRAPH_BURST_SIZE > 64 75 if (likely(n_left_from > 7)) { 76 rte_prefetch0(pkts[4]); 77 rte_prefetch0(pkts[5]); 78 rte_prefetch0(pkts[6]); 79 rte_prefetch0(pkts[7]); 80 } 81 #endif 82 83 mbuf0 = pkts[0]; 84 mbuf1 = pkts[1]; 85 mbuf2 = pkts[2]; 86 mbuf3 = pkts[3]; 87 pkts += 4; 88 n_left_from -= 4; 89 90 l0 = mbuf0->packet_type & 91 (RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK); 92 l1 = mbuf1->packet_type & 93 (RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK); 94 l2 = mbuf2->packet_type & 95 (RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK); 96 l3 = mbuf3->packet_type & 97 (RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK); 98 99 /* Check if they are destined to same 100 * next node based on l2l3 packet type. 101 */ 102 uint8_t fix_spec = (last_type ^ l0) | (last_type ^ l1) | 103 (last_type ^ l2) | (last_type ^ l3); 104 105 if (unlikely(fix_spec)) { 106 /* Copy things successfully speculated till now */ 107 rte_memcpy(to_next, from, 108 last_spec * sizeof(from[0])); 109 from += last_spec; 110 to_next += last_spec; 111 held += last_spec; 112 last_spec = 0; 113 114 /* l0 */ 115 if (p_nxt[l0] == next_index) { 116 to_next[0] = from[0]; 117 to_next++; 118 held++; 119 } else { 120 rte_node_enqueue_x1(graph, node, 121 p_nxt[l0], from[0]); 122 } 123 124 /* l1 */ 125 if (p_nxt[l1] == next_index) { 126 to_next[0] = from[1]; 127 to_next++; 128 held++; 129 } else { 130 rte_node_enqueue_x1(graph, node, 131 p_nxt[l1], from[1]); 132 } 133 134 /* l2 */ 135 if (p_nxt[l2] == next_index) { 136 to_next[0] = from[2]; 137 to_next++; 138 held++; 139 } else { 140 rte_node_enqueue_x1(graph, node, 141 p_nxt[l2], from[2]); 142 } 143 144 /* l3 */ 145 if (p_nxt[l3] == next_index) { 146 to_next[0] = from[3]; 147 to_next++; 148 held++; 149 } else { 150 rte_node_enqueue_x1(graph, node, 151 p_nxt[l3], from[3]); 152 } 153 154 /* Update speculated ptype */ 155 if ((last_type != l3) && (l2 == l3) && 156 (next_index != p_nxt[l3])) { 157 /* Put the current stream for 158 * speculated ltype. 159 */ 160 rte_node_next_stream_put(graph, node, 161 next_index, held); 162 163 held = 0; 164 165 /* Get next stream for new ltype */ 166 next_index = p_nxt[l3]; 167 last_type = l3; 168 to_next = rte_node_next_stream_get(graph, node, 169 next_index, 170 nb_objs); 171 } else if (next_index == p_nxt[l3]) { 172 last_type = l3; 173 } 174 175 from += 4; 176 } else { 177 last_spec += 4; 178 } 179 } 180 181 while (n_left_from > 0) { 182 mbuf0 = pkts[0]; 183 184 pkts += 1; 185 n_left_from -= 1; 186 187 l0 = mbuf0->packet_type & 188 (RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK); 189 if (unlikely((l0 != last_type) && 190 (p_nxt[l0] != next_index))) { 191 /* Copy things successfully speculated till now */ 192 rte_memcpy(to_next, from, 193 last_spec * sizeof(from[0])); 194 from += last_spec; 195 to_next += last_spec; 196 held += last_spec; 197 last_spec = 0; 198 199 rte_node_enqueue_x1(graph, node, 200 p_nxt[l0], from[0]); 201 from += 1; 202 } else { 203 last_spec += 1; 204 } 205 } 206 207 /* !!! Home run !!! */ 208 if (likely(last_spec == nb_objs)) { 209 rte_node_next_stream_move(graph, node, next_index); 210 return nb_objs; 211 } 212 213 held += last_spec; 214 /* Copy things successfully speculated till now */ 215 rte_memcpy(to_next, from, last_spec * sizeof(from[0])); 216 rte_node_next_stream_put(graph, node, next_index, held); 217 218 ctx->l2l3_type = last_type; 219 return nb_objs; 220 } 221 222 /* Packet Classification Node */ 223 struct rte_node_register pkt_cls_node = { 224 .process = pkt_cls_node_process, 225 .name = "pkt_cls", 226 227 .nb_edges = PKT_CLS_NEXT_MAX, 228 .next_nodes = { 229 /* Pkt drop node starts at '0' */ 230 [PKT_CLS_NEXT_PKT_DROP] = "pkt_drop", 231 [PKT_CLS_NEXT_IP4_LOOKUP] = "ip4_lookup", 232 [PKT_CLS_NEXT_IP6_LOOKUP] = "ip6_lookup", 233 }, 234 }; 235 RTE_NODE_REGISTER(pkt_cls_node); 236