1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2020 Marvell International Ltd. 3 */ 4 5 #include <stdio.h> 6 #include <stdlib.h> 7 8 #include <rte_common.h> 9 #include <rte_flow.h> 10 #include <rte_ip.h> 11 12 #include "flow.h" 13 #include "ipsec-secgw.h" 14 #include "parser.h" 15 16 #define FLOW_RULES_MAX 128 17 18 struct flow_rule_entry { 19 uint8_t is_eth; 20 uint8_t is_ipv4; 21 uint8_t is_ipv6; 22 union { 23 struct { 24 struct rte_flow_item_ipv4 spec; 25 struct rte_flow_item_ipv4 mask; 26 } ipv4; 27 struct { 28 struct rte_flow_item_ipv6 spec; 29 struct rte_flow_item_ipv6 mask; 30 } ipv6; 31 }; 32 struct rte_flow_item_mark mark_val; 33 uint16_t port; 34 uint16_t queue; 35 bool is_queue_set; 36 bool enable_count; 37 bool enable_mark; 38 bool set_security_action; 39 bool set_mark_action; 40 uint32_t mark_action_val; 41 struct rte_flow *flow; 42 } flow_rule_tbl[FLOW_RULES_MAX]; 43 44 int nb_flow_rule; 45 46 static void 47 ipv4_hdr_print(struct rte_ipv4_hdr *hdr) 48 { 49 char a, b, c, d; 50 51 uint32_t_to_char(rte_bswap32(hdr->src_addr), &a, &b, &c, &d); 52 printf("src: %3hhu.%3hhu.%3hhu.%3hhu \t", a, b, c, d); 53 54 uint32_t_to_char(rte_bswap32(hdr->dst_addr), &a, &b, &c, &d); 55 printf("dst: %3hhu.%3hhu.%3hhu.%3hhu", a, b, c, d); 56 } 57 58 static int 59 ipv4_addr_cpy(rte_be32_t *spec, rte_be32_t *mask, char *token, 60 struct parse_status *status) 61 { 62 struct in_addr ip; 63 uint32_t depth; 64 65 APP_CHECK(parse_ipv4_addr(token, &ip, &depth) == 0, status, 66 "unrecognized input \"%s\", expect valid ipv4 addr", token); 67 if (status->status < 0) 68 return -1; 69 70 if (depth > 32) 71 return -1; 72 73 memcpy(mask, &rte_flow_item_ipv4_mask.hdr.src_addr, sizeof(ip)); 74 75 *spec = ip.s_addr; 76 77 if (depth < 32) 78 *mask = htonl(*mask << (32 - depth)); 79 80 return 0; 81 } 82 83 static void 84 ipv6_hdr_print(struct rte_ipv6_hdr *hdr) 85 { 86 printf("src: " RTE_IPV6_ADDR_FMT " \t", RTE_IPV6_ADDR_SPLIT(&hdr->src_addr)); 87 printf("dst: " RTE_IPV6_ADDR_FMT, RTE_IPV6_ADDR_SPLIT(&hdr->dst_addr)); 88 } 89 90 static int 91 ipv6_addr_cpy(struct rte_ipv6_addr *spec, struct rte_ipv6_addr *mask, char *token, 92 struct parse_status *status) 93 { 94 struct rte_ipv6_addr ip; 95 uint32_t depth, i; 96 97 APP_CHECK(parse_ipv6_addr(token, &ip, &depth) == 0, status, 98 "unrecognized input \"%s\", expect valid ipv6 address", token); 99 if (status->status < 0) 100 return -1; 101 102 *mask = rte_flow_item_ipv6_mask.hdr.src_addr; 103 *spec = ip; 104 105 for (i = 0; i < depth && (i%8 <= sizeof(*mask)); i++) 106 mask->a[i/8] &= ~(1 << (7-i%8)); 107 108 return 0; 109 } 110 111 void 112 parse_flow_tokens(char **tokens, uint32_t n_tokens, 113 struct parse_status *status) 114 { 115 struct flow_rule_entry *rule; 116 uint32_t ti = 0; 117 118 if (nb_flow_rule >= FLOW_RULES_MAX) { 119 printf("Too many flow rules\n"); 120 return; 121 } 122 123 rule = &flow_rule_tbl[nb_flow_rule]; 124 memset(rule, 0, sizeof(*rule)); 125 126 for (ti = 0; ti < n_tokens; ti++) { 127 if (strcmp(tokens[ti], "mark") == 0) { 128 INCREMENT_TOKEN_INDEX(ti, n_tokens, status); 129 if (status->status < 0) 130 return; 131 APP_CHECK_TOKEN_IS_NUM(tokens, ti, status); 132 if (status->status < 0) 133 return; 134 135 rule->mark_val.id = atoi(tokens[ti]); 136 rule->enable_mark = true; 137 continue; 138 } 139 if (strcmp(tokens[ti], "eth") == 0) { 140 rule->is_eth = true; 141 continue; 142 } 143 144 if (strcmp(tokens[ti], "ipv4") == 0) { 145 rule->is_ipv4 = true; 146 INCREMENT_TOKEN_INDEX(ti, n_tokens, status); 147 if (status->status < 0) 148 return; 149 if (strcmp(tokens[ti], "src") == 0) { 150 INCREMENT_TOKEN_INDEX(ti, n_tokens, status); 151 if (status->status < 0) 152 return; 153 if (ipv4_addr_cpy(&rule->ipv4.spec.hdr.src_addr, 154 &rule->ipv4.mask.hdr.src_addr, 155 tokens[ti], status)) 156 return; 157 } 158 if (strcmp(tokens[ti], "dst") == 0) { 159 INCREMENT_TOKEN_INDEX(ti, n_tokens, status); 160 if (status->status < 0) 161 return; 162 if (ipv4_addr_cpy(&rule->ipv4.spec.hdr.dst_addr, 163 &rule->ipv4.mask.hdr.dst_addr, 164 tokens[ti], status)) 165 return; 166 } 167 continue; 168 } 169 if (strcmp(tokens[ti], "ipv6") == 0) { 170 rule->is_ipv6 = true; 171 INCREMENT_TOKEN_INDEX(ti, n_tokens, status); 172 if (status->status < 0) 173 return; 174 if (strcmp(tokens[ti], "src") == 0) { 175 INCREMENT_TOKEN_INDEX(ti, n_tokens, status); 176 if (status->status < 0) 177 return; 178 if (ipv6_addr_cpy(&rule->ipv6.spec.hdr.src_addr, 179 &rule->ipv6.mask.hdr.src_addr, 180 tokens[ti], status)) 181 return; 182 } 183 if (strcmp(tokens[ti], "dst") == 0) { 184 INCREMENT_TOKEN_INDEX(ti, n_tokens, status); 185 if (status->status < 0) 186 return; 187 if (ipv6_addr_cpy(&rule->ipv6.spec.hdr.dst_addr, 188 &rule->ipv6.mask.hdr.dst_addr, 189 tokens[ti], status)) 190 return; 191 } 192 continue; 193 } 194 195 if (strcmp(tokens[ti], "port") == 0) { 196 INCREMENT_TOKEN_INDEX(ti, n_tokens, status); 197 if (status->status < 0) 198 return; 199 APP_CHECK_TOKEN_IS_NUM(tokens, ti, status); 200 if (status->status < 0) 201 return; 202 203 rule->port = atoi(tokens[ti]); 204 continue; 205 } 206 207 if (strcmp(tokens[ti], "queue") == 0) { 208 INCREMENT_TOKEN_INDEX(ti, n_tokens, status); 209 if (status->status < 0) 210 return; 211 APP_CHECK_TOKEN_IS_NUM(tokens, ti, status); 212 if (status->status < 0) 213 return; 214 215 rule->queue = atoi(tokens[ti]); 216 rule->is_queue_set = true; 217 continue; 218 } 219 220 if (strcmp(tokens[ti], "count") == 0) { 221 rule->enable_count = true; 222 continue; 223 } 224 225 if (strcmp(tokens[ti], "security") == 0) { 226 rule->set_security_action = true; 227 continue; 228 } 229 if (strcmp(tokens[ti], "set_mark") == 0) { 230 INCREMENT_TOKEN_INDEX(ti, n_tokens, status); 231 if (status->status < 0) 232 return; 233 APP_CHECK_TOKEN_IS_NUM(tokens, ti, status); 234 if (status->status < 0) 235 return; 236 237 rule->set_mark_action = true; 238 rule->mark_action_val = atoi(tokens[ti]); 239 continue; 240 } 241 242 sprintf(status->parse_msg, "Unrecognized input:%s\n", 243 tokens[ti]); 244 status->status = -1; 245 return; 246 } 247 printf("\n"); 248 249 nb_flow_rule++; 250 } 251 252 #define MAX_RTE_FLOW_PATTERN (5) 253 #define MAX_RTE_FLOW_ACTIONS (5) 254 255 static void 256 flow_init_single(struct flow_rule_entry *rule) 257 { 258 struct rte_flow_action action[MAX_RTE_FLOW_ACTIONS] = {}; 259 struct rte_flow_item pattern[MAX_RTE_FLOW_PATTERN] = {}; 260 struct rte_flow_action_queue queue_action; 261 struct rte_flow_action_mark mark_action; 262 int ret, pattern_idx = 0, act_idx = 0; 263 struct rte_flow_item_mark mark_mask; 264 struct rte_flow_attr attr = {}; 265 struct rte_flow_error err = {}; 266 267 attr.egress = 0; 268 attr.ingress = 1; 269 270 if (rule->is_queue_set) { 271 queue_action.index = rule->queue; 272 action[act_idx].type = RTE_FLOW_ACTION_TYPE_QUEUE; 273 action[act_idx].conf = &queue_action; 274 act_idx++; 275 } 276 277 if (rule->enable_count) { 278 action[act_idx].type = RTE_FLOW_ACTION_TYPE_COUNT; 279 act_idx++; 280 } 281 282 if (rule->set_security_action) { 283 action[act_idx].type = RTE_FLOW_ACTION_TYPE_SECURITY; 284 action[act_idx].conf = NULL; 285 act_idx++; 286 } 287 288 if (rule->set_mark_action) { 289 mark_action.id = rule->mark_action_val; 290 action[act_idx].type = RTE_FLOW_ACTION_TYPE_MARK; 291 action[act_idx].conf = &mark_action; 292 act_idx++; 293 } 294 295 action[act_idx].type = RTE_FLOW_ACTION_TYPE_END; 296 action[act_idx].conf = NULL; 297 298 if (rule->enable_mark) { 299 mark_mask.id = UINT32_MAX; 300 pattern[pattern_idx].type = RTE_FLOW_ITEM_TYPE_MARK; 301 pattern[pattern_idx].spec = &rule->mark_val; 302 pattern[pattern_idx].mask = &mark_mask; 303 pattern_idx++; 304 } 305 306 if (rule->is_eth) { 307 pattern[pattern_idx].type = RTE_FLOW_ITEM_TYPE_ETH; 308 pattern_idx++; 309 } 310 311 if (rule->is_ipv4) { 312 pattern[pattern_idx].type = RTE_FLOW_ITEM_TYPE_IPV4; 313 pattern[pattern_idx].spec = &rule->ipv4.spec; 314 pattern[pattern_idx].mask = &rule->ipv4.mask; 315 pattern_idx++; 316 } else if (rule->is_ipv6) { 317 pattern[pattern_idx].type = RTE_FLOW_ITEM_TYPE_IPV6; 318 pattern[pattern_idx].spec = &rule->ipv6.spec; 319 pattern[pattern_idx].mask = &rule->ipv6.mask; 320 pattern_idx++; 321 } 322 323 if (rule->set_security_action) { 324 pattern[pattern_idx].type = RTE_FLOW_ITEM_TYPE_ESP; 325 pattern[pattern_idx].spec = NULL; 326 pattern[pattern_idx].mask = NULL; 327 pattern[pattern_idx].last = NULL; 328 pattern_idx++; 329 } 330 331 pattern[pattern_idx].type = RTE_FLOW_ITEM_TYPE_END; 332 333 ret = rte_flow_validate(rule->port, &attr, pattern, action, &err); 334 if (ret < 0) { 335 RTE_LOG(ERR, IPSEC, "Flow validation failed %s\n", err.message); 336 rule->flow = 0; 337 return; 338 } 339 340 rule->flow = rte_flow_create(rule->port, &attr, pattern, action, &err); 341 if (rule->flow == NULL) 342 RTE_LOG(ERR, IPSEC, "Flow creation return %s\n", err.message); 343 } 344 345 void 346 flow_print_counters(void) 347 { 348 struct rte_flow_query_count count_query; 349 struct rte_flow_action action; 350 struct flow_rule_entry *rule; 351 struct rte_flow_error error; 352 int i = 0, ret = 0; 353 354 action.type = RTE_FLOW_ACTION_TYPE_COUNT; 355 356 for (i = 0; i < nb_flow_rule; i++) { 357 rule = &flow_rule_tbl[i]; 358 if (!rule->flow || !rule->enable_count) 359 continue; 360 361 /* Poisoning to make sure PMDs update it in case of error. */ 362 memset(&error, 0x55, sizeof(error)); 363 memset(&count_query, 0, sizeof(count_query)); 364 ret = rte_flow_query(rule->port, rule->flow, &action, 365 &count_query, &error); 366 if (ret) 367 RTE_LOG(ERR, IPSEC, 368 "Failed to get flow counter " 369 " for port %u, err msg: %s\n", 370 rule->port, error.message); 371 372 printf("Flow #%3d:", i); 373 if (rule->is_ipv4) { 374 printf(" spec ipv4 "); 375 ipv4_hdr_print(&rule->ipv4.spec.hdr); 376 } 377 if (rule->is_ipv6) { 378 printf(" spec ipv6 "); 379 ipv6_hdr_print(&rule->ipv6.spec.hdr); 380 } 381 382 if (rule->set_security_action) 383 printf(" Security action set,"); 384 385 if (rule->enable_mark) 386 printf(" Mark Enabled"); 387 388 printf(" Port: %d,", rule->port); 389 if (rule->is_queue_set) 390 printf(" Queue: %d", rule->queue); 391 printf(" Hits: %"PRIu64"\n", count_query.hits); 392 } 393 } 394 395 void 396 flow_init(void) 397 { 398 struct flow_rule_entry *rule; 399 int i; 400 401 for (i = 0; i < nb_flow_rule; i++) { 402 rule = &flow_rule_tbl[i]; 403 flow_init_single(rule); 404 } 405 406 for (i = 0; i < nb_flow_rule; i++) { 407 rule = &flow_rule_tbl[i]; 408 printf("Flow #%3d: ", i); 409 if (rule->is_ipv4) { 410 printf("spec ipv4 "); 411 ipv4_hdr_print(&rule->ipv4.spec.hdr); 412 printf("\n"); 413 printf(" mask ipv4 "); 414 ipv4_hdr_print(&rule->ipv4.mask.hdr); 415 } 416 if (rule->is_ipv6) { 417 printf("spec ipv6 "); 418 ipv6_hdr_print(&rule->ipv6.spec.hdr); 419 printf("\n"); 420 printf(" mask ipv6 "); 421 ipv6_hdr_print(&rule->ipv6.mask.hdr); 422 } 423 424 if (rule->enable_mark) 425 printf(", Mark enabled"); 426 427 printf("\tPort: %d,", rule->port); 428 if (rule->is_queue_set) 429 printf(" Queue: %d,", rule->queue); 430 431 if (rule->set_security_action) 432 printf(" Security action set,"); 433 434 if (rule->set_mark_action) 435 printf(" Mark: %d,", rule->mark_action_val); 436 437 if (rule->enable_count) 438 printf(" Counter enabled,"); 439 440 if (rule->flow == NULL) 441 printf(" [UNSUPPORTED]"); 442 printf("\n"); 443 } 444 } 445