1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2014 Intel Corporation 3 * Copyright 2018 Mellanox Technologies, Ltd 4 */ 5 6 #include <stdio.h> 7 8 #include <rte_bitops.h> 9 #include <rte_net.h> 10 #include <rte_mbuf.h> 11 #include <rte_ether.h> 12 #include <rte_vxlan.h> 13 #include <rte_ethdev.h> 14 #include <rte_flow.h> 15 16 #include "testpmd.h" 17 18 #define MAX_STRING_LEN 8192 19 20 #define MKDUMPSTR(buf, buf_size, cur_len, ...) \ 21 do { \ 22 if (cur_len >= buf_size) \ 23 break; \ 24 cur_len += snprintf(buf + cur_len, buf_size - cur_len, __VA_ARGS__); \ 25 } while (0) 26 27 static inline void 28 print_ether_addr(const char *what, const struct rte_ether_addr *eth_addr, 29 char print_buf[], size_t buf_size, size_t *cur_len) 30 { 31 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 32 33 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr); 34 MKDUMPSTR(print_buf, buf_size, *cur_len, "%s%s", what, buf); 35 } 36 37 static inline bool 38 is_timestamp_enabled(const struct rte_mbuf *mbuf) 39 { 40 static uint64_t timestamp_rx_dynflag; 41 int timestamp_rx_dynflag_offset; 42 43 if (timestamp_rx_dynflag == 0) { 44 timestamp_rx_dynflag_offset = rte_mbuf_dynflag_lookup( 45 RTE_MBUF_DYNFLAG_RX_TIMESTAMP_NAME, NULL); 46 if (timestamp_rx_dynflag_offset < 0) 47 return false; 48 timestamp_rx_dynflag = RTE_BIT64(timestamp_rx_dynflag_offset); 49 } 50 51 return (mbuf->ol_flags & timestamp_rx_dynflag) != 0; 52 } 53 54 static inline rte_mbuf_timestamp_t 55 get_timestamp(const struct rte_mbuf *mbuf) 56 { 57 static int timestamp_dynfield_offset = -1; 58 59 if (timestamp_dynfield_offset < 0) { 60 timestamp_dynfield_offset = rte_mbuf_dynfield_lookup( 61 RTE_MBUF_DYNFIELD_TIMESTAMP_NAME, NULL); 62 if (timestamp_dynfield_offset < 0) 63 return 0; 64 } 65 66 return *RTE_MBUF_DYNFIELD(mbuf, 67 timestamp_dynfield_offset, rte_mbuf_timestamp_t *); 68 } 69 70 static inline void 71 dump_pkt_burst(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], 72 uint16_t nb_pkts, int is_rx) 73 { 74 struct rte_mbuf *mb; 75 const struct rte_ether_hdr *eth_hdr; 76 struct rte_ether_hdr _eth_hdr; 77 uint16_t eth_type; 78 uint64_t ol_flags; 79 uint16_t i, packet_type; 80 uint16_t is_encapsulation; 81 char buf[256]; 82 struct rte_net_hdr_lens hdr_lens; 83 uint32_t sw_packet_type; 84 uint32_t vx_vni; 85 const char *reason; 86 int dynf_index; 87 char print_buf[MAX_STRING_LEN]; 88 size_t buf_size = MAX_STRING_LEN; 89 size_t cur_len = 0; 90 uint64_t restore_info_dynflag; 91 92 if (!nb_pkts) 93 return; 94 restore_info_dynflag = rte_flow_restore_info_dynflag(); 95 MKDUMPSTR(print_buf, buf_size, cur_len, 96 "port %u/queue %u: %s %u packets\n", port_id, queue, 97 is_rx ? "received" : "sent", (unsigned int) nb_pkts); 98 for (i = 0; i < nb_pkts; i++) { 99 struct rte_flow_error error; 100 struct rte_flow_restore_info info = { 0, }; 101 102 mb = pkts[i]; 103 ol_flags = mb->ol_flags; 104 if (rxq_share > 0) 105 MKDUMPSTR(print_buf, buf_size, cur_len, "port %u, ", 106 mb->port); 107 eth_hdr = rte_pktmbuf_read(mb, 0, sizeof(_eth_hdr), &_eth_hdr); 108 eth_type = RTE_BE_TO_CPU_16(eth_hdr->ether_type); 109 packet_type = mb->packet_type; 110 is_encapsulation = RTE_ETH_IS_TUNNEL_PKT(packet_type); 111 if ((ol_flags & restore_info_dynflag) != 0 && 112 rte_flow_get_restore_info(port_id, mb, &info, &error) == 0) { 113 MKDUMPSTR(print_buf, buf_size, cur_len, 114 "restore info:"); 115 if (info.flags & RTE_FLOW_RESTORE_INFO_TUNNEL) { 116 struct port_flow_tunnel *port_tunnel; 117 118 port_tunnel = port_flow_locate_tunnel 119 (port_id, &info.tunnel); 120 MKDUMPSTR(print_buf, buf_size, cur_len, 121 " - tunnel"); 122 if (port_tunnel) 123 MKDUMPSTR(print_buf, buf_size, cur_len, 124 " #%u", port_tunnel->id); 125 else 126 MKDUMPSTR(print_buf, buf_size, cur_len, 127 " %s", "-none-"); 128 MKDUMPSTR(print_buf, buf_size, cur_len, 129 " type %s", port_flow_tunnel_type 130 (&info.tunnel)); 131 } else { 132 MKDUMPSTR(print_buf, buf_size, cur_len, 133 " - no tunnel info"); 134 } 135 if (info.flags & RTE_FLOW_RESTORE_INFO_ENCAPSULATED) 136 MKDUMPSTR(print_buf, buf_size, cur_len, 137 " - outer header present"); 138 else 139 MKDUMPSTR(print_buf, buf_size, cur_len, 140 " - no outer header"); 141 if (info.flags & RTE_FLOW_RESTORE_INFO_GROUP_ID) 142 MKDUMPSTR(print_buf, buf_size, cur_len, 143 " - miss group %u", info.group_id); 144 else 145 MKDUMPSTR(print_buf, buf_size, cur_len, 146 " - no miss group"); 147 MKDUMPSTR(print_buf, buf_size, cur_len, "\n"); 148 } 149 print_ether_addr(" src=", ð_hdr->src_addr, 150 print_buf, buf_size, &cur_len); 151 print_ether_addr(" - dst=", ð_hdr->dst_addr, 152 print_buf, buf_size, &cur_len); 153 MKDUMPSTR(print_buf, buf_size, cur_len, 154 " - pool=%s - type=0x%04x - length=%u - nb_segs=%d", 155 mb->pool->name, eth_type, (unsigned int) mb->pkt_len, 156 (int)mb->nb_segs); 157 if (ol_flags & RTE_MBUF_F_RX_RSS_HASH) { 158 MKDUMPSTR(print_buf, buf_size, cur_len, 159 " - RSS hash=0x%x", 160 (unsigned int) mb->hash.rss); 161 MKDUMPSTR(print_buf, buf_size, cur_len, 162 " - RSS queue=0x%x", (unsigned int) queue); 163 } 164 if (ol_flags & RTE_MBUF_F_RX_FDIR) { 165 MKDUMPSTR(print_buf, buf_size, cur_len, 166 " - FDIR matched "); 167 if (ol_flags & RTE_MBUF_F_RX_FDIR_ID) 168 MKDUMPSTR(print_buf, buf_size, cur_len, 169 "ID=0x%x", mb->hash.fdir.hi); 170 else if (ol_flags & RTE_MBUF_F_RX_FDIR_FLX) 171 MKDUMPSTR(print_buf, buf_size, cur_len, 172 "flex bytes=0x%08x %08x", 173 mb->hash.fdir.hi, mb->hash.fdir.lo); 174 else 175 MKDUMPSTR(print_buf, buf_size, cur_len, 176 "hash=0x%x ID=0x%x ", 177 mb->hash.fdir.hash, mb->hash.fdir.id); 178 } 179 if (is_timestamp_enabled(mb)) 180 MKDUMPSTR(print_buf, buf_size, cur_len, 181 " - timestamp %"PRIu64" ", get_timestamp(mb)); 182 if ((is_rx && (ol_flags & RTE_MBUF_F_RX_QINQ) != 0) || 183 (!is_rx && (ol_flags & RTE_MBUF_F_TX_QINQ) != 0)) 184 MKDUMPSTR(print_buf, buf_size, cur_len, 185 " - QinQ VLAN tci=0x%x, VLAN tci outer=0x%x", 186 mb->vlan_tci, mb->vlan_tci_outer); 187 else if ((is_rx && (ol_flags & RTE_MBUF_F_RX_VLAN) != 0) || 188 (!is_rx && (ol_flags & RTE_MBUF_F_TX_VLAN) != 0)) 189 MKDUMPSTR(print_buf, buf_size, cur_len, 190 " - VLAN tci=0x%x", mb->vlan_tci); 191 if (!is_rx && (ol_flags & RTE_MBUF_DYNFLAG_TX_METADATA)) 192 MKDUMPSTR(print_buf, buf_size, cur_len, 193 " - Tx metadata: 0x%x", 194 *RTE_FLOW_DYNF_METADATA(mb)); 195 if (is_rx && (ol_flags & RTE_MBUF_DYNFLAG_RX_METADATA)) 196 MKDUMPSTR(print_buf, buf_size, cur_len, 197 " - Rx metadata: 0x%x", 198 *RTE_FLOW_DYNF_METADATA(mb)); 199 for (dynf_index = 0; dynf_index < 64; dynf_index++) { 200 if (dynf_names[dynf_index][0] != '\0') 201 MKDUMPSTR(print_buf, buf_size, cur_len, 202 " - dynf %s: %d", 203 dynf_names[dynf_index], 204 !!(ol_flags & (1UL << dynf_index))); 205 } 206 if (mb->packet_type) { 207 rte_get_ptype_name(mb->packet_type, buf, sizeof(buf)); 208 MKDUMPSTR(print_buf, buf_size, cur_len, 209 " - hw ptype: %s", buf); 210 } 211 sw_packet_type = rte_net_get_ptype(mb, &hdr_lens, 212 RTE_PTYPE_ALL_MASK); 213 rte_get_ptype_name(sw_packet_type, buf, sizeof(buf)); 214 MKDUMPSTR(print_buf, buf_size, cur_len, " - sw ptype: %s", buf); 215 if (sw_packet_type & RTE_PTYPE_L2_MASK) 216 MKDUMPSTR(print_buf, buf_size, cur_len, " - l2_len=%d", 217 hdr_lens.l2_len); 218 if (sw_packet_type & RTE_PTYPE_L3_MASK) 219 MKDUMPSTR(print_buf, buf_size, cur_len, " - l3_len=%d", 220 hdr_lens.l3_len); 221 if (sw_packet_type & RTE_PTYPE_L4_MASK) 222 MKDUMPSTR(print_buf, buf_size, cur_len, " - l4_len=%d", 223 hdr_lens.l4_len); 224 if (sw_packet_type & RTE_PTYPE_TUNNEL_MASK) 225 MKDUMPSTR(print_buf, buf_size, cur_len, 226 " - tunnel_len=%d", hdr_lens.tunnel_len); 227 if (sw_packet_type & RTE_PTYPE_INNER_L2_MASK) 228 MKDUMPSTR(print_buf, buf_size, cur_len, 229 " - inner_l2_len=%d", hdr_lens.inner_l2_len); 230 if (sw_packet_type & RTE_PTYPE_INNER_L3_MASK) 231 MKDUMPSTR(print_buf, buf_size, cur_len, 232 " - inner_l3_len=%d", hdr_lens.inner_l3_len); 233 if (sw_packet_type & RTE_PTYPE_INNER_L4_MASK) 234 MKDUMPSTR(print_buf, buf_size, cur_len, 235 " - inner_l4_len=%d", hdr_lens.inner_l4_len); 236 237 struct rte_ipv4_hdr *ipv4_hdr; 238 struct rte_ipv6_hdr *ipv6_hdr; 239 struct rte_udp_hdr *udp_hdr; 240 struct rte_tcp_hdr *tcp_hdr; 241 uint8_t l2_len; 242 uint8_t l3_len; 243 uint8_t l4_len; 244 uint8_t l4_proto; 245 uint16_t l4_port; 246 struct rte_vxlan_hdr *vxlan_hdr; 247 248 l2_len = sizeof(struct rte_ether_hdr); 249 250 /* Do not support ipv4 option field */ 251 if (RTE_ETH_IS_IPV4_HDR(packet_type)) { 252 l3_len = sizeof(struct rte_ipv4_hdr); 253 ipv4_hdr = rte_pktmbuf_mtod_offset(mb, 254 struct rte_ipv4_hdr *, 255 l2_len); 256 l4_proto = ipv4_hdr->next_proto_id; 257 } else { 258 l3_len = sizeof(struct rte_ipv6_hdr); 259 ipv6_hdr = rte_pktmbuf_mtod_offset(mb, 260 struct rte_ipv6_hdr *, 261 l2_len); 262 l4_proto = ipv6_hdr->proto; 263 } 264 if (l4_proto == IPPROTO_UDP) { 265 udp_hdr = rte_pktmbuf_mtod_offset(mb, 266 struct rte_udp_hdr *, 267 l2_len + l3_len); 268 l4_port = RTE_BE_TO_CPU_16(udp_hdr->dst_port); 269 if (is_encapsulation) { 270 l4_len = sizeof(struct rte_udp_hdr); 271 vxlan_hdr = rte_pktmbuf_mtod_offset(mb, 272 struct rte_vxlan_hdr *, 273 l2_len + l3_len + l4_len); 274 vx_vni = rte_be_to_cpu_32(vxlan_hdr->vx_vni); 275 MKDUMPSTR(print_buf, buf_size, cur_len, 276 " - VXLAN packet: packet type =%d, " 277 "Destination UDP port =%d, VNI = %d, " 278 "last_rsvd = %d", packet_type, 279 l4_port, vx_vni >> 8, vx_vni & 0xff); 280 } else { 281 MKDUMPSTR(print_buf, buf_size, cur_len, 282 " - Destination UDP port=%d", l4_port); 283 } 284 } else if (l4_proto == IPPROTO_TCP) { 285 tcp_hdr = rte_pktmbuf_mtod_offset(mb, 286 struct rte_tcp_hdr *, 287 l2_len + l3_len); 288 l4_port = RTE_BE_TO_CPU_16(tcp_hdr->dst_port); 289 MKDUMPSTR(print_buf, buf_size, cur_len, 290 " - Destination TCP port=%d", l4_port); 291 } 292 293 MKDUMPSTR(print_buf, buf_size, cur_len, 294 " - %s queue=0x%x", is_rx ? "Receive" : "Send", 295 (unsigned int) queue); 296 MKDUMPSTR(print_buf, buf_size, cur_len, "\n"); 297 if (is_rx) 298 rte_get_rx_ol_flag_list(mb->ol_flags, buf, sizeof(buf)); 299 else 300 rte_get_tx_ol_flag_list(mb->ol_flags, buf, sizeof(buf)); 301 302 MKDUMPSTR(print_buf, buf_size, cur_len, 303 " ol_flags: %s\n", buf); 304 if (rte_mbuf_check(mb, 1, &reason) < 0) 305 MKDUMPSTR(print_buf, buf_size, cur_len, 306 "INVALID mbuf: %s\n", reason); 307 if (cur_len >= buf_size) 308 printf("%s ...\n", print_buf); 309 else 310 printf("%s", print_buf); 311 cur_len = 0; 312 } 313 } 314 315 uint16_t 316 dump_rx_pkts(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], 317 uint16_t nb_pkts, __rte_unused uint16_t max_pkts, 318 __rte_unused void *user_param) 319 { 320 dump_pkt_burst(port_id, queue, pkts, nb_pkts, 1); 321 return nb_pkts; 322 } 323 324 uint16_t 325 dump_tx_pkts(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], 326 uint16_t nb_pkts, __rte_unused void *user_param) 327 { 328 dump_pkt_burst(port_id, queue, pkts, nb_pkts, 0); 329 return nb_pkts; 330 } 331 332 uint16_t 333 tx_pkt_set_md(uint16_t port_id, __rte_unused uint16_t queue, 334 struct rte_mbuf *pkts[], uint16_t nb_pkts, 335 __rte_unused void *user_param) 336 { 337 uint16_t i = 0; 338 339 /* 340 * Add metadata value to every Tx packet, 341 * and set ol_flags accordingly. 342 */ 343 if (rte_flow_dynf_metadata_avail()) 344 for (i = 0; i < nb_pkts; i++) { 345 *RTE_FLOW_DYNF_METADATA(pkts[i]) = 346 ports[port_id].tx_metadata; 347 pkts[i]->ol_flags |= RTE_MBUF_DYNFLAG_TX_METADATA; 348 } 349 return nb_pkts; 350 } 351 352 void 353 add_tx_md_callback(portid_t portid) 354 { 355 struct rte_eth_dev_info dev_info; 356 uint16_t queue; 357 int ret; 358 359 if (port_id_is_invalid(portid, ENABLED_WARN)) 360 return; 361 362 ret = eth_dev_info_get_print_err(portid, &dev_info); 363 if (ret != 0) 364 return; 365 366 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) 367 if (!ports[portid].tx_set_md_cb[queue]) 368 ports[portid].tx_set_md_cb[queue] = 369 rte_eth_add_tx_callback(portid, queue, 370 tx_pkt_set_md, NULL); 371 } 372 373 void 374 remove_tx_md_callback(portid_t portid) 375 { 376 struct rte_eth_dev_info dev_info; 377 uint16_t queue; 378 int ret; 379 380 if (port_id_is_invalid(portid, ENABLED_WARN)) 381 return; 382 383 ret = eth_dev_info_get_print_err(portid, &dev_info); 384 if (ret != 0) 385 return; 386 387 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) 388 if (ports[portid].tx_set_md_cb[queue]) { 389 rte_eth_remove_tx_callback(portid, queue, 390 ports[portid].tx_set_md_cb[queue]); 391 ports[portid].tx_set_md_cb[queue] = NULL; 392 } 393 } 394 395 uint16_t 396 tx_pkt_set_dynf(uint16_t port_id, __rte_unused uint16_t queue, 397 struct rte_mbuf *pkts[], uint16_t nb_pkts, 398 __rte_unused void *user_param) 399 { 400 uint16_t i = 0; 401 402 if (ports[port_id].mbuf_dynf) 403 for (i = 0; i < nb_pkts; i++) 404 pkts[i]->ol_flags |= ports[port_id].mbuf_dynf; 405 return nb_pkts; 406 } 407 408 void 409 add_tx_dynf_callback(portid_t portid) 410 { 411 struct rte_eth_dev_info dev_info; 412 uint16_t queue; 413 int ret; 414 415 if (port_id_is_invalid(portid, ENABLED_WARN)) 416 return; 417 418 ret = eth_dev_info_get_print_err(portid, &dev_info); 419 if (ret != 0) 420 return; 421 422 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) 423 if (!ports[portid].tx_set_dynf_cb[queue]) 424 ports[portid].tx_set_dynf_cb[queue] = 425 rte_eth_add_tx_callback(portid, queue, 426 tx_pkt_set_dynf, NULL); 427 } 428 429 void 430 remove_tx_dynf_callback(portid_t portid) 431 { 432 struct rte_eth_dev_info dev_info; 433 uint16_t queue; 434 int ret; 435 436 if (port_id_is_invalid(portid, ENABLED_WARN)) 437 return; 438 439 ret = eth_dev_info_get_print_err(portid, &dev_info); 440 if (ret != 0) 441 return; 442 443 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) 444 if (ports[portid].tx_set_dynf_cb[queue]) { 445 rte_eth_remove_tx_callback(portid, queue, 446 ports[portid].tx_set_dynf_cb[queue]); 447 ports[portid].tx_set_dynf_cb[queue] = NULL; 448 } 449 } 450 451 int 452 eth_dev_info_get_print_err(uint16_t port_id, 453 struct rte_eth_dev_info *dev_info) 454 { 455 int ret; 456 457 ret = rte_eth_dev_info_get(port_id, dev_info); 458 if (ret != 0) 459 fprintf(stderr, 460 "Error during getting device (port %u) info: %s\n", 461 port_id, strerror(-ret)); 462 463 return ret; 464 } 465 466 int 467 eth_dev_conf_get_print_err(uint16_t port_id, struct rte_eth_conf *dev_conf) 468 { 469 int ret; 470 471 ret = rte_eth_dev_conf_get(port_id, dev_conf); 472 if (ret != 0) 473 fprintf(stderr, 474 "Error during getting device configuration (port %u): %s\n", 475 port_id, strerror(-ret)); 476 477 return ret; 478 } 479 480 void 481 eth_set_promisc_mode(uint16_t port, int enable) 482 { 483 int ret; 484 485 if (enable) 486 ret = rte_eth_promiscuous_enable(port); 487 else 488 ret = rte_eth_promiscuous_disable(port); 489 490 if (ret != 0) 491 fprintf(stderr, 492 "Error during %s promiscuous mode for port %u: %s\n", 493 enable ? "enabling" : "disabling", 494 port, rte_strerror(-ret)); 495 } 496 497 void 498 eth_set_allmulticast_mode(uint16_t port, int enable) 499 { 500 int ret; 501 502 if (enable) 503 ret = rte_eth_allmulticast_enable(port); 504 else 505 ret = rte_eth_allmulticast_disable(port); 506 507 if (ret != 0) 508 fprintf(stderr, 509 "Error during %s all-multicast mode for port %u: %s\n", 510 enable ? "enabling" : "disabling", 511 port, rte_strerror(-ret)); 512 } 513 514 int 515 eth_link_get_nowait_print_err(uint16_t port_id, struct rte_eth_link *link) 516 { 517 int ret; 518 519 ret = rte_eth_link_get_nowait(port_id, link); 520 if (ret < 0) 521 fprintf(stderr, 522 "Device (port %u) link get (without wait) failed: %s\n", 523 port_id, rte_strerror(-ret)); 524 525 return ret; 526 } 527 528 int 529 eth_macaddr_get_print_err(uint16_t port_id, struct rte_ether_addr *mac_addr) 530 { 531 int ret; 532 533 ret = rte_eth_macaddr_get(port_id, mac_addr); 534 if (ret != 0) 535 fprintf(stderr, 536 "Error getting device (port %u) mac address: %s\n", 537 port_id, rte_strerror(-ret)); 538 539 return ret; 540 } 541