1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <arpa/inet.h> 35 #include <getopt.h> 36 #include <linux/if_ether.h> 37 #include <linux/if_vlan.h> 38 #include <linux/virtio_net.h> 39 #include <linux/virtio_ring.h> 40 #include <signal.h> 41 #include <stdint.h> 42 #include <sys/eventfd.h> 43 #include <sys/param.h> 44 #include <unistd.h> 45 46 #include <rte_atomic.h> 47 #include <rte_cycles.h> 48 #include <rte_ethdev.h> 49 #include <rte_log.h> 50 #include <rte_string_fns.h> 51 #include <rte_malloc.h> 52 #include <rte_virtio_net.h> 53 #include <rte_ip.h> 54 #include <rte_tcp.h> 55 56 #include "main.h" 57 58 #ifndef MAX_QUEUES 59 #define MAX_QUEUES 128 60 #endif 61 62 /* the maximum number of external ports supported */ 63 #define MAX_SUP_PORTS 1 64 65 #define MBUF_CACHE_SIZE 128 66 #define MBUF_DATA_SIZE RTE_MBUF_DEFAULT_BUF_SIZE 67 68 #define MAX_PKT_BURST 32 /* Max burst size for RX/TX */ 69 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */ 70 71 #define BURST_RX_WAIT_US 15 /* Defines how long we wait between retries on RX */ 72 #define BURST_RX_RETRIES 4 /* Number of retries on RX. */ 73 74 #define JUMBO_FRAME_MAX_SIZE 0x2600 75 76 /* State of virtio device. */ 77 #define DEVICE_MAC_LEARNING 0 78 #define DEVICE_RX 1 79 #define DEVICE_SAFE_REMOVE 2 80 81 /* Configurable number of RX/TX ring descriptors */ 82 #define RTE_TEST_RX_DESC_DEFAULT 1024 83 #define RTE_TEST_TX_DESC_DEFAULT 512 84 85 #define INVALID_PORT_ID 0xFF 86 87 /* Max number of devices. Limited by vmdq. */ 88 #define MAX_DEVICES 64 89 90 /* Size of buffers used for snprintfs. */ 91 #define MAX_PRINT_BUFF 6072 92 93 /* Maximum character device basename size. */ 94 #define MAX_BASENAME_SZ 10 95 96 /* Maximum long option length for option parsing. */ 97 #define MAX_LONG_OPT_SZ 64 98 99 /* mask of enabled ports */ 100 static uint32_t enabled_port_mask = 0; 101 102 /* Promiscuous mode */ 103 static uint32_t promiscuous; 104 105 /* number of devices/queues to support*/ 106 static uint32_t num_queues = 0; 107 static uint32_t num_devices; 108 109 static struct rte_mempool *mbuf_pool; 110 static int mergeable; 111 112 /* Do vlan strip on host, enabled on default */ 113 static uint32_t vlan_strip = 1; 114 115 /* Enable VM2VM communications. If this is disabled then the MAC address compare is skipped. */ 116 typedef enum { 117 VM2VM_DISABLED = 0, 118 VM2VM_SOFTWARE = 1, 119 VM2VM_HARDWARE = 2, 120 VM2VM_LAST 121 } vm2vm_type; 122 static vm2vm_type vm2vm_mode = VM2VM_SOFTWARE; 123 124 /* Enable stats. */ 125 static uint32_t enable_stats = 0; 126 /* Enable retries on RX. */ 127 static uint32_t enable_retry = 1; 128 129 /* Disable TX checksum offload */ 130 static uint32_t enable_tx_csum; 131 132 /* Disable TSO offload */ 133 static uint32_t enable_tso; 134 135 /* Specify timeout (in useconds) between retries on RX. */ 136 static uint32_t burst_rx_delay_time = BURST_RX_WAIT_US; 137 /* Specify the number of retries on RX. */ 138 static uint32_t burst_rx_retry_num = BURST_RX_RETRIES; 139 140 /* Character device basename. Can be set by user. */ 141 static char dev_basename[MAX_BASENAME_SZ] = "vhost-net"; 142 143 /* empty vmdq configuration structure. Filled in programatically */ 144 static struct rte_eth_conf vmdq_conf_default = { 145 .rxmode = { 146 .mq_mode = ETH_MQ_RX_VMDQ_ONLY, 147 .split_hdr_size = 0, 148 .header_split = 0, /**< Header Split disabled */ 149 .hw_ip_checksum = 0, /**< IP checksum offload disabled */ 150 .hw_vlan_filter = 0, /**< VLAN filtering disabled */ 151 /* 152 * It is necessary for 1G NIC such as I350, 153 * this fixes bug of ipv4 forwarding in guest can't 154 * forward pakets from one virtio dev to another virtio dev. 155 */ 156 .hw_vlan_strip = 1, /**< VLAN strip enabled. */ 157 .jumbo_frame = 0, /**< Jumbo Frame Support disabled */ 158 .hw_strip_crc = 0, /**< CRC stripped by hardware */ 159 }, 160 161 .txmode = { 162 .mq_mode = ETH_MQ_TX_NONE, 163 }, 164 .rx_adv_conf = { 165 /* 166 * should be overridden separately in code with 167 * appropriate values 168 */ 169 .vmdq_rx_conf = { 170 .nb_queue_pools = ETH_8_POOLS, 171 .enable_default_pool = 0, 172 .default_pool = 0, 173 .nb_pool_maps = 0, 174 .pool_map = {{0, 0},}, 175 }, 176 }, 177 }; 178 179 static unsigned lcore_ids[RTE_MAX_LCORE]; 180 static uint8_t ports[RTE_MAX_ETHPORTS]; 181 static unsigned num_ports = 0; /**< The number of ports specified in command line */ 182 static uint16_t num_pf_queues, num_vmdq_queues; 183 static uint16_t vmdq_pool_base, vmdq_queue_base; 184 static uint16_t queues_per_pool; 185 186 const uint16_t vlan_tags[] = { 187 1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007, 188 1008, 1009, 1010, 1011, 1012, 1013, 1014, 1015, 189 1016, 1017, 1018, 1019, 1020, 1021, 1022, 1023, 190 1024, 1025, 1026, 1027, 1028, 1029, 1030, 1031, 191 1032, 1033, 1034, 1035, 1036, 1037, 1038, 1039, 192 1040, 1041, 1042, 1043, 1044, 1045, 1046, 1047, 193 1048, 1049, 1050, 1051, 1052, 1053, 1054, 1055, 194 1056, 1057, 1058, 1059, 1060, 1061, 1062, 1063, 195 }; 196 197 /* ethernet addresses of ports */ 198 static struct ether_addr vmdq_ports_eth_addr[RTE_MAX_ETHPORTS]; 199 200 static struct vhost_dev_tailq_list vhost_dev_list = 201 TAILQ_HEAD_INITIALIZER(vhost_dev_list); 202 203 static struct lcore_info lcore_info[RTE_MAX_LCORE]; 204 205 /* Used for queueing bursts of TX packets. */ 206 struct mbuf_table { 207 unsigned len; 208 unsigned txq_id; 209 struct rte_mbuf *m_table[MAX_PKT_BURST]; 210 }; 211 212 /* TX queue for each data core. */ 213 struct mbuf_table lcore_tx_queue[RTE_MAX_LCORE]; 214 215 #define MBUF_TABLE_DRAIN_TSC ((rte_get_tsc_hz() + US_PER_S - 1) \ 216 / US_PER_S * BURST_TX_DRAIN_US) 217 #define VLAN_HLEN 4 218 219 /* 220 * Builds up the correct configuration for VMDQ VLAN pool map 221 * according to the pool & queue limits. 222 */ 223 static inline int 224 get_eth_conf(struct rte_eth_conf *eth_conf, uint32_t num_devices) 225 { 226 struct rte_eth_vmdq_rx_conf conf; 227 struct rte_eth_vmdq_rx_conf *def_conf = 228 &vmdq_conf_default.rx_adv_conf.vmdq_rx_conf; 229 unsigned i; 230 231 memset(&conf, 0, sizeof(conf)); 232 conf.nb_queue_pools = (enum rte_eth_nb_pools)num_devices; 233 conf.nb_pool_maps = num_devices; 234 conf.enable_loop_back = def_conf->enable_loop_back; 235 conf.rx_mode = def_conf->rx_mode; 236 237 for (i = 0; i < conf.nb_pool_maps; i++) { 238 conf.pool_map[i].vlan_id = vlan_tags[ i ]; 239 conf.pool_map[i].pools = (1UL << i); 240 } 241 242 (void)(rte_memcpy(eth_conf, &vmdq_conf_default, sizeof(*eth_conf))); 243 (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_rx_conf, &conf, 244 sizeof(eth_conf->rx_adv_conf.vmdq_rx_conf))); 245 return 0; 246 } 247 248 /* 249 * Validate the device number according to the max pool number gotten form 250 * dev_info. If the device number is invalid, give the error message and 251 * return -1. Each device must have its own pool. 252 */ 253 static inline int 254 validate_num_devices(uint32_t max_nb_devices) 255 { 256 if (num_devices > max_nb_devices) { 257 RTE_LOG(ERR, VHOST_PORT, "invalid number of devices\n"); 258 return -1; 259 } 260 return 0; 261 } 262 263 /* 264 * Initialises a given port using global settings and with the rx buffers 265 * coming from the mbuf_pool passed as parameter 266 */ 267 static inline int 268 port_init(uint8_t port) 269 { 270 struct rte_eth_dev_info dev_info; 271 struct rte_eth_conf port_conf; 272 struct rte_eth_rxconf *rxconf; 273 struct rte_eth_txconf *txconf; 274 int16_t rx_rings, tx_rings; 275 uint16_t rx_ring_size, tx_ring_size; 276 int retval; 277 uint16_t q; 278 279 /* The max pool number from dev_info will be used to validate the pool number specified in cmd line */ 280 rte_eth_dev_info_get (port, &dev_info); 281 282 if (dev_info.max_rx_queues > MAX_QUEUES) { 283 rte_exit(EXIT_FAILURE, 284 "please define MAX_QUEUES no less than %u in %s\n", 285 dev_info.max_rx_queues, __FILE__); 286 } 287 288 rxconf = &dev_info.default_rxconf; 289 txconf = &dev_info.default_txconf; 290 rxconf->rx_drop_en = 1; 291 292 /* Enable vlan offload */ 293 txconf->txq_flags &= ~ETH_TXQ_FLAGS_NOVLANOFFL; 294 295 /*configure the number of supported virtio devices based on VMDQ limits */ 296 num_devices = dev_info.max_vmdq_pools; 297 298 rx_ring_size = RTE_TEST_RX_DESC_DEFAULT; 299 tx_ring_size = RTE_TEST_TX_DESC_DEFAULT; 300 tx_rings = (uint16_t)rte_lcore_count(); 301 302 retval = validate_num_devices(MAX_DEVICES); 303 if (retval < 0) 304 return retval; 305 306 /* Get port configuration. */ 307 retval = get_eth_conf(&port_conf, num_devices); 308 if (retval < 0) 309 return retval; 310 /* NIC queues are divided into pf queues and vmdq queues. */ 311 num_pf_queues = dev_info.max_rx_queues - dev_info.vmdq_queue_num; 312 queues_per_pool = dev_info.vmdq_queue_num / dev_info.max_vmdq_pools; 313 num_vmdq_queues = num_devices * queues_per_pool; 314 num_queues = num_pf_queues + num_vmdq_queues; 315 vmdq_queue_base = dev_info.vmdq_queue_base; 316 vmdq_pool_base = dev_info.vmdq_pool_base; 317 printf("pf queue num: %u, configured vmdq pool num: %u, each vmdq pool has %u queues\n", 318 num_pf_queues, num_devices, queues_per_pool); 319 320 if (port >= rte_eth_dev_count()) return -1; 321 322 if (enable_tx_csum == 0) 323 rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_CSUM); 324 325 if (enable_tso == 0) { 326 rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_HOST_TSO4); 327 rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_HOST_TSO6); 328 } 329 330 rx_rings = (uint16_t)dev_info.max_rx_queues; 331 /* Configure ethernet device. */ 332 retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf); 333 if (retval != 0) 334 return retval; 335 336 /* Setup the queues. */ 337 for (q = 0; q < rx_rings; q ++) { 338 retval = rte_eth_rx_queue_setup(port, q, rx_ring_size, 339 rte_eth_dev_socket_id(port), 340 rxconf, 341 mbuf_pool); 342 if (retval < 0) 343 return retval; 344 } 345 for (q = 0; q < tx_rings; q ++) { 346 retval = rte_eth_tx_queue_setup(port, q, tx_ring_size, 347 rte_eth_dev_socket_id(port), 348 txconf); 349 if (retval < 0) 350 return retval; 351 } 352 353 /* Start the device. */ 354 retval = rte_eth_dev_start(port); 355 if (retval < 0) { 356 RTE_LOG(ERR, VHOST_DATA, "Failed to start the device.\n"); 357 return retval; 358 } 359 360 if (promiscuous) 361 rte_eth_promiscuous_enable(port); 362 363 rte_eth_macaddr_get(port, &vmdq_ports_eth_addr[port]); 364 RTE_LOG(INFO, VHOST_PORT, "Max virtio devices supported: %u\n", num_devices); 365 RTE_LOG(INFO, VHOST_PORT, "Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8 366 " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n", 367 (unsigned)port, 368 vmdq_ports_eth_addr[port].addr_bytes[0], 369 vmdq_ports_eth_addr[port].addr_bytes[1], 370 vmdq_ports_eth_addr[port].addr_bytes[2], 371 vmdq_ports_eth_addr[port].addr_bytes[3], 372 vmdq_ports_eth_addr[port].addr_bytes[4], 373 vmdq_ports_eth_addr[port].addr_bytes[5]); 374 375 return 0; 376 } 377 378 /* 379 * Set character device basename. 380 */ 381 static int 382 us_vhost_parse_basename(const char *q_arg) 383 { 384 /* parse number string */ 385 386 if (strnlen(q_arg, MAX_BASENAME_SZ) > MAX_BASENAME_SZ) 387 return -1; 388 else 389 snprintf((char*)&dev_basename, MAX_BASENAME_SZ, "%s", q_arg); 390 391 return 0; 392 } 393 394 /* 395 * Parse the portmask provided at run time. 396 */ 397 static int 398 parse_portmask(const char *portmask) 399 { 400 char *end = NULL; 401 unsigned long pm; 402 403 errno = 0; 404 405 /* parse hexadecimal string */ 406 pm = strtoul(portmask, &end, 16); 407 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0)) 408 return -1; 409 410 if (pm == 0) 411 return -1; 412 413 return pm; 414 415 } 416 417 /* 418 * Parse num options at run time. 419 */ 420 static int 421 parse_num_opt(const char *q_arg, uint32_t max_valid_value) 422 { 423 char *end = NULL; 424 unsigned long num; 425 426 errno = 0; 427 428 /* parse unsigned int string */ 429 num = strtoul(q_arg, &end, 10); 430 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0)) 431 return -1; 432 433 if (num > max_valid_value) 434 return -1; 435 436 return num; 437 438 } 439 440 /* 441 * Display usage 442 */ 443 static void 444 us_vhost_usage(const char *prgname) 445 { 446 RTE_LOG(INFO, VHOST_CONFIG, "%s [EAL options] -- -p PORTMASK\n" 447 " --vm2vm [0|1|2]\n" 448 " --rx_retry [0|1] --mergeable [0|1] --stats [0-N]\n" 449 " --dev-basename <name>\n" 450 " --nb-devices ND\n" 451 " -p PORTMASK: Set mask for ports to be used by application\n" 452 " --vm2vm [0|1|2]: disable/software(default)/hardware vm2vm comms\n" 453 " --rx-retry [0|1]: disable/enable(default) retries on rx. Enable retry if destintation queue is full\n" 454 " --rx-retry-delay [0-N]: timeout(in usecond) between retries on RX. This makes effect only if retries on rx enabled\n" 455 " --rx-retry-num [0-N]: the number of retries on rx. This makes effect only if retries on rx enabled\n" 456 " --mergeable [0|1]: disable(default)/enable RX mergeable buffers\n" 457 " --vlan-strip [0|1]: disable/enable(default) RX VLAN strip on host\n" 458 " --stats [0-N]: 0: Disable stats, N: Time in seconds to print stats\n" 459 " --dev-basename: The basename to be used for the character device.\n" 460 " --tx-csum [0|1] disable/enable TX checksum offload.\n" 461 " --tso [0|1] disable/enable TCP segment offload.\n", 462 prgname); 463 } 464 465 /* 466 * Parse the arguments given in the command line of the application. 467 */ 468 static int 469 us_vhost_parse_args(int argc, char **argv) 470 { 471 int opt, ret; 472 int option_index; 473 unsigned i; 474 const char *prgname = argv[0]; 475 static struct option long_option[] = { 476 {"vm2vm", required_argument, NULL, 0}, 477 {"rx-retry", required_argument, NULL, 0}, 478 {"rx-retry-delay", required_argument, NULL, 0}, 479 {"rx-retry-num", required_argument, NULL, 0}, 480 {"mergeable", required_argument, NULL, 0}, 481 {"vlan-strip", required_argument, NULL, 0}, 482 {"stats", required_argument, NULL, 0}, 483 {"dev-basename", required_argument, NULL, 0}, 484 {"tx-csum", required_argument, NULL, 0}, 485 {"tso", required_argument, NULL, 0}, 486 {NULL, 0, 0, 0}, 487 }; 488 489 /* Parse command line */ 490 while ((opt = getopt_long(argc, argv, "p:P", 491 long_option, &option_index)) != EOF) { 492 switch (opt) { 493 /* Portmask */ 494 case 'p': 495 enabled_port_mask = parse_portmask(optarg); 496 if (enabled_port_mask == 0) { 497 RTE_LOG(INFO, VHOST_CONFIG, "Invalid portmask\n"); 498 us_vhost_usage(prgname); 499 return -1; 500 } 501 break; 502 503 case 'P': 504 promiscuous = 1; 505 vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.rx_mode = 506 ETH_VMDQ_ACCEPT_BROADCAST | 507 ETH_VMDQ_ACCEPT_MULTICAST; 508 rte_vhost_feature_enable(1ULL << VIRTIO_NET_F_CTRL_RX); 509 510 break; 511 512 case 0: 513 /* Enable/disable vm2vm comms. */ 514 if (!strncmp(long_option[option_index].name, "vm2vm", 515 MAX_LONG_OPT_SZ)) { 516 ret = parse_num_opt(optarg, (VM2VM_LAST - 1)); 517 if (ret == -1) { 518 RTE_LOG(INFO, VHOST_CONFIG, 519 "Invalid argument for " 520 "vm2vm [0|1|2]\n"); 521 us_vhost_usage(prgname); 522 return -1; 523 } else { 524 vm2vm_mode = (vm2vm_type)ret; 525 } 526 } 527 528 /* Enable/disable retries on RX. */ 529 if (!strncmp(long_option[option_index].name, "rx-retry", MAX_LONG_OPT_SZ)) { 530 ret = parse_num_opt(optarg, 1); 531 if (ret == -1) { 532 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry [0|1]\n"); 533 us_vhost_usage(prgname); 534 return -1; 535 } else { 536 enable_retry = ret; 537 } 538 } 539 540 /* Enable/disable TX checksum offload. */ 541 if (!strncmp(long_option[option_index].name, "tx-csum", MAX_LONG_OPT_SZ)) { 542 ret = parse_num_opt(optarg, 1); 543 if (ret == -1) { 544 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for tx-csum [0|1]\n"); 545 us_vhost_usage(prgname); 546 return -1; 547 } else 548 enable_tx_csum = ret; 549 } 550 551 /* Enable/disable TSO offload. */ 552 if (!strncmp(long_option[option_index].name, "tso", MAX_LONG_OPT_SZ)) { 553 ret = parse_num_opt(optarg, 1); 554 if (ret == -1) { 555 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for tso [0|1]\n"); 556 us_vhost_usage(prgname); 557 return -1; 558 } else 559 enable_tso = ret; 560 } 561 562 /* Specify the retries delay time (in useconds) on RX. */ 563 if (!strncmp(long_option[option_index].name, "rx-retry-delay", MAX_LONG_OPT_SZ)) { 564 ret = parse_num_opt(optarg, INT32_MAX); 565 if (ret == -1) { 566 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry-delay [0-N]\n"); 567 us_vhost_usage(prgname); 568 return -1; 569 } else { 570 burst_rx_delay_time = ret; 571 } 572 } 573 574 /* Specify the retries number on RX. */ 575 if (!strncmp(long_option[option_index].name, "rx-retry-num", MAX_LONG_OPT_SZ)) { 576 ret = parse_num_opt(optarg, INT32_MAX); 577 if (ret == -1) { 578 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry-num [0-N]\n"); 579 us_vhost_usage(prgname); 580 return -1; 581 } else { 582 burst_rx_retry_num = ret; 583 } 584 } 585 586 /* Enable/disable RX mergeable buffers. */ 587 if (!strncmp(long_option[option_index].name, "mergeable", MAX_LONG_OPT_SZ)) { 588 ret = parse_num_opt(optarg, 1); 589 if (ret == -1) { 590 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for mergeable [0|1]\n"); 591 us_vhost_usage(prgname); 592 return -1; 593 } else { 594 mergeable = !!ret; 595 if (ret) { 596 vmdq_conf_default.rxmode.jumbo_frame = 1; 597 vmdq_conf_default.rxmode.max_rx_pkt_len 598 = JUMBO_FRAME_MAX_SIZE; 599 } 600 } 601 } 602 603 /* Enable/disable RX VLAN strip on host. */ 604 if (!strncmp(long_option[option_index].name, 605 "vlan-strip", MAX_LONG_OPT_SZ)) { 606 ret = parse_num_opt(optarg, 1); 607 if (ret == -1) { 608 RTE_LOG(INFO, VHOST_CONFIG, 609 "Invalid argument for VLAN strip [0|1]\n"); 610 us_vhost_usage(prgname); 611 return -1; 612 } else { 613 vlan_strip = !!ret; 614 vmdq_conf_default.rxmode.hw_vlan_strip = 615 vlan_strip; 616 } 617 } 618 619 /* Enable/disable stats. */ 620 if (!strncmp(long_option[option_index].name, "stats", MAX_LONG_OPT_SZ)) { 621 ret = parse_num_opt(optarg, INT32_MAX); 622 if (ret == -1) { 623 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for stats [0..N]\n"); 624 us_vhost_usage(prgname); 625 return -1; 626 } else { 627 enable_stats = ret; 628 } 629 } 630 631 /* Set character device basename. */ 632 if (!strncmp(long_option[option_index].name, "dev-basename", MAX_LONG_OPT_SZ)) { 633 if (us_vhost_parse_basename(optarg) == -1) { 634 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for character device basename (Max %d characters)\n", MAX_BASENAME_SZ); 635 us_vhost_usage(prgname); 636 return -1; 637 } 638 } 639 640 break; 641 642 /* Invalid option - print options. */ 643 default: 644 us_vhost_usage(prgname); 645 return -1; 646 } 647 } 648 649 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 650 if (enabled_port_mask & (1 << i)) 651 ports[num_ports++] = (uint8_t)i; 652 } 653 654 if ((num_ports == 0) || (num_ports > MAX_SUP_PORTS)) { 655 RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u," 656 "but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS); 657 return -1; 658 } 659 660 return 0; 661 } 662 663 /* 664 * Update the global var NUM_PORTS and array PORTS according to system ports number 665 * and return valid ports number 666 */ 667 static unsigned check_ports_num(unsigned nb_ports) 668 { 669 unsigned valid_num_ports = num_ports; 670 unsigned portid; 671 672 if (num_ports > nb_ports) { 673 RTE_LOG(INFO, VHOST_PORT, "\nSpecified port number(%u) exceeds total system port number(%u)\n", 674 num_ports, nb_ports); 675 num_ports = nb_ports; 676 } 677 678 for (portid = 0; portid < num_ports; portid ++) { 679 if (ports[portid] >= nb_ports) { 680 RTE_LOG(INFO, VHOST_PORT, "\nSpecified port ID(%u) exceeds max system port ID(%u)\n", 681 ports[portid], (nb_ports - 1)); 682 ports[portid] = INVALID_PORT_ID; 683 valid_num_ports--; 684 } 685 } 686 return valid_num_ports; 687 } 688 689 static inline struct vhost_dev *__attribute__((always_inline)) 690 find_vhost_dev(struct ether_addr *mac) 691 { 692 struct vhost_dev *vdev; 693 694 TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) { 695 if (vdev->ready == DEVICE_RX && 696 is_same_ether_addr(mac, &vdev->mac_address)) 697 return vdev; 698 } 699 700 return NULL; 701 } 702 703 /* 704 * This function learns the MAC address of the device and registers this along with a 705 * vlan tag to a VMDQ. 706 */ 707 static int 708 link_vmdq(struct vhost_dev *vdev, struct rte_mbuf *m) 709 { 710 struct ether_hdr *pkt_hdr; 711 int i, ret; 712 713 /* Learn MAC address of guest device from packet */ 714 pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *); 715 716 if (find_vhost_dev(&pkt_hdr->s_addr)) { 717 RTE_LOG(ERR, VHOST_DATA, 718 "(%d) device is using a registered MAC!\n", 719 vdev->vid); 720 return -1; 721 } 722 723 for (i = 0; i < ETHER_ADDR_LEN; i++) 724 vdev->mac_address.addr_bytes[i] = pkt_hdr->s_addr.addr_bytes[i]; 725 726 /* vlan_tag currently uses the device_id. */ 727 vdev->vlan_tag = vlan_tags[vdev->vid]; 728 729 /* Print out VMDQ registration info. */ 730 RTE_LOG(INFO, VHOST_DATA, 731 "(%d) mac %02x:%02x:%02x:%02x:%02x:%02x and vlan %d registered\n", 732 vdev->vid, 733 vdev->mac_address.addr_bytes[0], vdev->mac_address.addr_bytes[1], 734 vdev->mac_address.addr_bytes[2], vdev->mac_address.addr_bytes[3], 735 vdev->mac_address.addr_bytes[4], vdev->mac_address.addr_bytes[5], 736 vdev->vlan_tag); 737 738 /* Register the MAC address. */ 739 ret = rte_eth_dev_mac_addr_add(ports[0], &vdev->mac_address, 740 (uint32_t)vdev->vid + vmdq_pool_base); 741 if (ret) 742 RTE_LOG(ERR, VHOST_DATA, 743 "(%d) failed to add device MAC address to VMDQ\n", 744 vdev->vid); 745 746 /* Enable stripping of the vlan tag as we handle routing. */ 747 if (vlan_strip) 748 rte_eth_dev_set_vlan_strip_on_queue(ports[0], 749 (uint16_t)vdev->vmdq_rx_q, 1); 750 751 /* Set device as ready for RX. */ 752 vdev->ready = DEVICE_RX; 753 754 return 0; 755 } 756 757 /* 758 * Removes MAC address and vlan tag from VMDQ. Ensures that nothing is adding buffers to the RX 759 * queue before disabling RX on the device. 760 */ 761 static inline void 762 unlink_vmdq(struct vhost_dev *vdev) 763 { 764 unsigned i = 0; 765 unsigned rx_count; 766 struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; 767 768 if (vdev->ready == DEVICE_RX) { 769 /*clear MAC and VLAN settings*/ 770 rte_eth_dev_mac_addr_remove(ports[0], &vdev->mac_address); 771 for (i = 0; i < 6; i++) 772 vdev->mac_address.addr_bytes[i] = 0; 773 774 vdev->vlan_tag = 0; 775 776 /*Clear out the receive buffers*/ 777 rx_count = rte_eth_rx_burst(ports[0], 778 (uint16_t)vdev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST); 779 780 while (rx_count) { 781 for (i = 0; i < rx_count; i++) 782 rte_pktmbuf_free(pkts_burst[i]); 783 784 rx_count = rte_eth_rx_burst(ports[0], 785 (uint16_t)vdev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST); 786 } 787 788 vdev->ready = DEVICE_MAC_LEARNING; 789 } 790 } 791 792 static inline void __attribute__((always_inline)) 793 virtio_xmit(struct vhost_dev *dst_vdev, struct vhost_dev *src_vdev, 794 struct rte_mbuf *m) 795 { 796 uint16_t ret; 797 798 ret = rte_vhost_enqueue_burst(dst_vdev->dev, VIRTIO_RXQ, &m, 1); 799 if (enable_stats) { 800 rte_atomic64_inc(&dst_vdev->stats.rx_total_atomic); 801 rte_atomic64_add(&dst_vdev->stats.rx_atomic, ret); 802 src_vdev->stats.tx_total++; 803 src_vdev->stats.tx += ret; 804 } 805 } 806 807 /* 808 * Check if the packet destination MAC address is for a local device. If so then put 809 * the packet on that devices RX queue. If not then return. 810 */ 811 static inline int __attribute__((always_inline)) 812 virtio_tx_local(struct vhost_dev *vdev, struct rte_mbuf *m) 813 { 814 struct ether_hdr *pkt_hdr; 815 struct vhost_dev *dst_vdev; 816 817 pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *); 818 819 dst_vdev = find_vhost_dev(&pkt_hdr->d_addr); 820 if (!dst_vdev) 821 return -1; 822 823 if (vdev->vid == dst_vdev->vid) { 824 RTE_LOG(DEBUG, VHOST_DATA, 825 "(%d) TX: src and dst MAC is same. Dropping packet.\n", 826 vdev->vid); 827 return 0; 828 } 829 830 RTE_LOG(DEBUG, VHOST_DATA, 831 "(%d) TX: MAC address is local\n", dst_vdev->vid); 832 833 if (unlikely(dst_vdev->remove)) { 834 RTE_LOG(DEBUG, VHOST_DATA, 835 "(%d) device is marked for removal\n", dst_vdev->vid); 836 return 0; 837 } 838 839 virtio_xmit(dst_vdev, vdev, m); 840 return 0; 841 } 842 843 /* 844 * Check if the destination MAC of a packet is one local VM, 845 * and get its vlan tag, and offset if it is. 846 */ 847 static inline int __attribute__((always_inline)) 848 find_local_dest(struct vhost_dev *vdev, struct rte_mbuf *m, 849 uint32_t *offset, uint16_t *vlan_tag) 850 { 851 struct vhost_dev *dst_vdev; 852 struct ether_hdr *pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *); 853 854 dst_vdev = find_vhost_dev(&pkt_hdr->d_addr); 855 if (!dst_vdev) 856 return 0; 857 858 if (vdev->vid == dst_vdev->vid) { 859 RTE_LOG(DEBUG, VHOST_DATA, 860 "(%d) TX: src and dst MAC is same. Dropping packet.\n", 861 vdev->vid); 862 return -1; 863 } 864 865 /* 866 * HW vlan strip will reduce the packet length 867 * by minus length of vlan tag, so need restore 868 * the packet length by plus it. 869 */ 870 *offset = VLAN_HLEN; 871 *vlan_tag = vlan_tags[vdev->vid]; 872 873 RTE_LOG(DEBUG, VHOST_DATA, 874 "(%d) TX: pkt to local VM device id: (%d), vlan tag: %u.\n", 875 vdev->vid, dst_vdev->vid, *vlan_tag); 876 877 return 0; 878 } 879 880 static uint16_t 881 get_psd_sum(void *l3_hdr, uint64_t ol_flags) 882 { 883 if (ol_flags & PKT_TX_IPV4) 884 return rte_ipv4_phdr_cksum(l3_hdr, ol_flags); 885 else /* assume ethertype == ETHER_TYPE_IPv6 */ 886 return rte_ipv6_phdr_cksum(l3_hdr, ol_flags); 887 } 888 889 static void virtio_tx_offload(struct rte_mbuf *m) 890 { 891 void *l3_hdr; 892 struct ipv4_hdr *ipv4_hdr = NULL; 893 struct tcp_hdr *tcp_hdr = NULL; 894 struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *); 895 896 l3_hdr = (char *)eth_hdr + m->l2_len; 897 898 if (m->ol_flags & PKT_TX_IPV4) { 899 ipv4_hdr = l3_hdr; 900 ipv4_hdr->hdr_checksum = 0; 901 m->ol_flags |= PKT_TX_IP_CKSUM; 902 } 903 904 tcp_hdr = (struct tcp_hdr *)((char *)l3_hdr + m->l3_len); 905 tcp_hdr->cksum = get_psd_sum(l3_hdr, m->ol_flags); 906 } 907 908 static inline void 909 free_pkts(struct rte_mbuf **pkts, uint16_t n) 910 { 911 while (n--) 912 rte_pktmbuf_free(pkts[n]); 913 } 914 915 static inline void __attribute__((always_inline)) 916 do_drain_mbuf_table(struct mbuf_table *tx_q) 917 { 918 uint16_t count; 919 920 count = rte_eth_tx_burst(ports[0], tx_q->txq_id, 921 tx_q->m_table, tx_q->len); 922 if (unlikely(count < tx_q->len)) 923 free_pkts(&tx_q->m_table[count], tx_q->len - count); 924 925 tx_q->len = 0; 926 } 927 928 /* 929 * This function routes the TX packet to the correct interface. This 930 * may be a local device or the physical port. 931 */ 932 static inline void __attribute__((always_inline)) 933 virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m, uint16_t vlan_tag) 934 { 935 struct mbuf_table *tx_q; 936 unsigned offset = 0; 937 const uint16_t lcore_id = rte_lcore_id(); 938 struct ether_hdr *nh; 939 940 941 nh = rte_pktmbuf_mtod(m, struct ether_hdr *); 942 if (unlikely(is_broadcast_ether_addr(&nh->d_addr))) { 943 struct vhost_dev *vdev2; 944 945 TAILQ_FOREACH(vdev2, &vhost_dev_list, global_vdev_entry) { 946 virtio_xmit(vdev2, vdev, m); 947 } 948 goto queue2nic; 949 } 950 951 /*check if destination is local VM*/ 952 if ((vm2vm_mode == VM2VM_SOFTWARE) && (virtio_tx_local(vdev, m) == 0)) { 953 rte_pktmbuf_free(m); 954 return; 955 } 956 957 if (unlikely(vm2vm_mode == VM2VM_HARDWARE)) { 958 if (unlikely(find_local_dest(vdev, m, &offset, 959 &vlan_tag) != 0)) { 960 rte_pktmbuf_free(m); 961 return; 962 } 963 } 964 965 RTE_LOG(DEBUG, VHOST_DATA, 966 "(%d) TX: MAC address is external\n", vdev->vid); 967 968 queue2nic: 969 970 /*Add packet to the port tx queue*/ 971 tx_q = &lcore_tx_queue[lcore_id]; 972 973 nh = rte_pktmbuf_mtod(m, struct ether_hdr *); 974 if (unlikely(nh->ether_type == rte_cpu_to_be_16(ETHER_TYPE_VLAN))) { 975 /* Guest has inserted the vlan tag. */ 976 struct vlan_hdr *vh = (struct vlan_hdr *) (nh + 1); 977 uint16_t vlan_tag_be = rte_cpu_to_be_16(vlan_tag); 978 if ((vm2vm_mode == VM2VM_HARDWARE) && 979 (vh->vlan_tci != vlan_tag_be)) 980 vh->vlan_tci = vlan_tag_be; 981 } else { 982 m->ol_flags |= PKT_TX_VLAN_PKT; 983 984 /* 985 * Find the right seg to adjust the data len when offset is 986 * bigger than tail room size. 987 */ 988 if (unlikely(vm2vm_mode == VM2VM_HARDWARE)) { 989 if (likely(offset <= rte_pktmbuf_tailroom(m))) 990 m->data_len += offset; 991 else { 992 struct rte_mbuf *seg = m; 993 994 while ((seg->next != NULL) && 995 (offset > rte_pktmbuf_tailroom(seg))) 996 seg = seg->next; 997 998 seg->data_len += offset; 999 } 1000 m->pkt_len += offset; 1001 } 1002 1003 m->vlan_tci = vlan_tag; 1004 } 1005 1006 if (m->ol_flags & PKT_TX_TCP_SEG) 1007 virtio_tx_offload(m); 1008 1009 tx_q->m_table[tx_q->len++] = m; 1010 if (enable_stats) { 1011 vdev->stats.tx_total++; 1012 vdev->stats.tx++; 1013 } 1014 1015 if (unlikely(tx_q->len == MAX_PKT_BURST)) 1016 do_drain_mbuf_table(tx_q); 1017 } 1018 1019 1020 static inline void __attribute__((always_inline)) 1021 drain_mbuf_table(struct mbuf_table *tx_q) 1022 { 1023 static uint64_t prev_tsc; 1024 uint64_t cur_tsc; 1025 1026 if (tx_q->len == 0) 1027 return; 1028 1029 cur_tsc = rte_rdtsc(); 1030 if (unlikely(cur_tsc - prev_tsc > MBUF_TABLE_DRAIN_TSC)) { 1031 prev_tsc = cur_tsc; 1032 1033 RTE_LOG(DEBUG, VHOST_DATA, 1034 "TX queue drained after timeout with burst size %u\n", 1035 tx_q->len); 1036 do_drain_mbuf_table(tx_q); 1037 } 1038 } 1039 1040 static inline void __attribute__((always_inline)) 1041 drain_eth_rx(struct vhost_dev *vdev) 1042 { 1043 uint16_t rx_count, enqueue_count; 1044 struct virtio_net *dev = vdev->dev; 1045 struct rte_mbuf *pkts[MAX_PKT_BURST]; 1046 1047 rx_count = rte_eth_rx_burst(ports[0], vdev->vmdq_rx_q, 1048 pkts, MAX_PKT_BURST); 1049 if (!rx_count) 1050 return; 1051 1052 /* 1053 * When "enable_retry" is set, here we wait and retry when there 1054 * is no enough free slots in the queue to hold @rx_count packets, 1055 * to diminish packet loss. 1056 */ 1057 if (enable_retry && 1058 unlikely(rx_count > rte_vhost_avail_entries(dev->vid, 1059 VIRTIO_RXQ))) { 1060 uint32_t retry; 1061 1062 for (retry = 0; retry < burst_rx_retry_num; retry++) { 1063 rte_delay_us(burst_rx_delay_time); 1064 if (rx_count <= rte_vhost_avail_entries(dev->vid, 1065 VIRTIO_RXQ)) 1066 break; 1067 } 1068 } 1069 1070 enqueue_count = rte_vhost_enqueue_burst(dev, VIRTIO_RXQ, 1071 pkts, rx_count); 1072 if (enable_stats) { 1073 rte_atomic64_add(&vdev->stats.rx_total_atomic, rx_count); 1074 rte_atomic64_add(&vdev->stats.rx_atomic, enqueue_count); 1075 } 1076 1077 free_pkts(pkts, rx_count); 1078 } 1079 1080 static inline void __attribute__((always_inline)) 1081 drain_virtio_tx(struct vhost_dev *vdev) 1082 { 1083 struct rte_mbuf *pkts[MAX_PKT_BURST]; 1084 uint16_t count; 1085 uint16_t i; 1086 1087 count = rte_vhost_dequeue_burst(vdev->dev, VIRTIO_TXQ, mbuf_pool, 1088 pkts, MAX_PKT_BURST); 1089 1090 /* setup VMDq for the first packet */ 1091 if (unlikely(vdev->ready == DEVICE_MAC_LEARNING) && count) { 1092 if (vdev->remove || link_vmdq(vdev, pkts[0]) == -1) 1093 free_pkts(pkts, count); 1094 } 1095 1096 for (i = 0; i < count; ++i) 1097 virtio_tx_route(vdev, pkts[i], vlan_tags[vdev->vid]); 1098 } 1099 1100 /* 1101 * Main function of vhost-switch. It basically does: 1102 * 1103 * for each vhost device { 1104 * - drain_eth_rx() 1105 * 1106 * Which drains the host eth Rx queue linked to the vhost device, 1107 * and deliver all of them to guest virito Rx ring associated with 1108 * this vhost device. 1109 * 1110 * - drain_virtio_tx() 1111 * 1112 * Which drains the guest virtio Tx queue and deliver all of them 1113 * to the target, which could be another vhost device, or the 1114 * physical eth dev. The route is done in function "virtio_tx_route". 1115 * } 1116 */ 1117 static int 1118 switch_worker(void *arg __rte_unused) 1119 { 1120 unsigned i; 1121 unsigned lcore_id = rte_lcore_id(); 1122 struct vhost_dev *vdev; 1123 struct mbuf_table *tx_q; 1124 1125 RTE_LOG(INFO, VHOST_DATA, "Procesing on Core %u started\n", lcore_id); 1126 1127 tx_q = &lcore_tx_queue[lcore_id]; 1128 for (i = 0; i < rte_lcore_count(); i++) { 1129 if (lcore_ids[i] == lcore_id) { 1130 tx_q->txq_id = i; 1131 break; 1132 } 1133 } 1134 1135 while(1) { 1136 drain_mbuf_table(tx_q); 1137 1138 /* 1139 * Inform the configuration core that we have exited the 1140 * linked list and that no devices are in use if requested. 1141 */ 1142 if (lcore_info[lcore_id].dev_removal_flag == REQUEST_DEV_REMOVAL) 1143 lcore_info[lcore_id].dev_removal_flag = ACK_DEV_REMOVAL; 1144 1145 /* 1146 * Process vhost devices 1147 */ 1148 TAILQ_FOREACH(vdev, &lcore_info[lcore_id].vdev_list, 1149 lcore_vdev_entry) { 1150 if (unlikely(vdev->remove)) { 1151 unlink_vmdq(vdev); 1152 vdev->ready = DEVICE_SAFE_REMOVE; 1153 continue; 1154 } 1155 1156 if (likely(vdev->ready == DEVICE_RX)) 1157 drain_eth_rx(vdev); 1158 1159 if (likely(!vdev->remove)) 1160 drain_virtio_tx(vdev); 1161 } 1162 } 1163 1164 return 0; 1165 } 1166 1167 /* 1168 * Remove a device from the specific data core linked list and from the 1169 * main linked list. Synchonization occurs through the use of the 1170 * lcore dev_removal_flag. Device is made volatile here to avoid re-ordering 1171 * of dev->remove=1 which can cause an infinite loop in the rte_pause loop. 1172 */ 1173 static void 1174 destroy_device (volatile struct virtio_net *dev) 1175 { 1176 struct vhost_dev *vdev = NULL; 1177 int lcore; 1178 1179 TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) { 1180 if (vdev->vid == dev->vid) 1181 break; 1182 } 1183 if (!vdev) 1184 return; 1185 /*set the remove flag. */ 1186 vdev->remove = 1; 1187 while(vdev->ready != DEVICE_SAFE_REMOVE) { 1188 rte_pause(); 1189 } 1190 1191 TAILQ_REMOVE(&lcore_info[vdev->coreid].vdev_list, vdev, 1192 lcore_vdev_entry); 1193 TAILQ_REMOVE(&vhost_dev_list, vdev, global_vdev_entry); 1194 1195 1196 /* Set the dev_removal_flag on each lcore. */ 1197 RTE_LCORE_FOREACH_SLAVE(lcore) 1198 lcore_info[lcore].dev_removal_flag = REQUEST_DEV_REMOVAL; 1199 1200 /* 1201 * Once each core has set the dev_removal_flag to ACK_DEV_REMOVAL 1202 * we can be sure that they can no longer access the device removed 1203 * from the linked lists and that the devices are no longer in use. 1204 */ 1205 RTE_LCORE_FOREACH_SLAVE(lcore) { 1206 while (lcore_info[lcore].dev_removal_flag != ACK_DEV_REMOVAL) 1207 rte_pause(); 1208 } 1209 1210 lcore_info[vdev->coreid].device_num--; 1211 1212 RTE_LOG(INFO, VHOST_DATA, 1213 "(%d) device has been removed from data core\n", 1214 vdev->vid); 1215 1216 rte_free(vdev); 1217 } 1218 1219 /* 1220 * A new device is added to a data core. First the device is added to the main linked list 1221 * and the allocated to a specific data core. 1222 */ 1223 static int 1224 new_device (struct virtio_net *dev) 1225 { 1226 int lcore, core_add = 0; 1227 uint32_t device_num_min = num_devices; 1228 struct vhost_dev *vdev; 1229 int vid = dev->vid; 1230 1231 vdev = rte_zmalloc("vhost device", sizeof(*vdev), RTE_CACHE_LINE_SIZE); 1232 if (vdev == NULL) { 1233 RTE_LOG(INFO, VHOST_DATA, 1234 "(%d) couldn't allocate memory for vhost dev\n", 1235 vid); 1236 return -1; 1237 } 1238 vdev->dev = dev; 1239 vdev->vid = vid; 1240 1241 TAILQ_INSERT_TAIL(&vhost_dev_list, vdev, global_vdev_entry); 1242 vdev->vmdq_rx_q = vid * queues_per_pool + vmdq_queue_base; 1243 1244 /*reset ready flag*/ 1245 vdev->ready = DEVICE_MAC_LEARNING; 1246 vdev->remove = 0; 1247 1248 /* Find a suitable lcore to add the device. */ 1249 RTE_LCORE_FOREACH_SLAVE(lcore) { 1250 if (lcore_info[lcore].device_num < device_num_min) { 1251 device_num_min = lcore_info[lcore].device_num; 1252 core_add = lcore; 1253 } 1254 } 1255 vdev->coreid = core_add; 1256 1257 TAILQ_INSERT_TAIL(&lcore_info[vdev->coreid].vdev_list, vdev, 1258 lcore_vdev_entry); 1259 lcore_info[vdev->coreid].device_num++; 1260 1261 /* Disable notifications. */ 1262 rte_vhost_enable_guest_notification(dev, VIRTIO_RXQ, 0); 1263 rte_vhost_enable_guest_notification(dev, VIRTIO_TXQ, 0); 1264 1265 RTE_LOG(INFO, VHOST_DATA, 1266 "(%d) device has been added to data core %d\n", 1267 vid, vdev->coreid); 1268 1269 return 0; 1270 } 1271 1272 /* 1273 * These callback allow devices to be added to the data core when configuration 1274 * has been fully complete. 1275 */ 1276 static const struct virtio_net_device_ops virtio_net_device_ops = 1277 { 1278 .new_device = new_device, 1279 .destroy_device = destroy_device, 1280 }; 1281 1282 /* 1283 * This is a thread will wake up after a period to print stats if the user has 1284 * enabled them. 1285 */ 1286 static void 1287 print_stats(void) 1288 { 1289 struct vhost_dev *vdev; 1290 uint64_t tx_dropped, rx_dropped; 1291 uint64_t tx, tx_total, rx, rx_total; 1292 const char clr[] = { 27, '[', '2', 'J', '\0' }; 1293 const char top_left[] = { 27, '[', '1', ';', '1', 'H','\0' }; 1294 1295 while(1) { 1296 sleep(enable_stats); 1297 1298 /* Clear screen and move to top left */ 1299 printf("%s%s\n", clr, top_left); 1300 printf("Device statistics =================================\n"); 1301 1302 TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) { 1303 tx_total = vdev->stats.tx_total; 1304 tx = vdev->stats.tx; 1305 tx_dropped = tx_total - tx; 1306 1307 rx_total = rte_atomic64_read(&vdev->stats.rx_total_atomic); 1308 rx = rte_atomic64_read(&vdev->stats.rx_atomic); 1309 rx_dropped = rx_total - rx; 1310 1311 printf("Statistics for device %d\n" 1312 "-----------------------\n" 1313 "TX total: %" PRIu64 "\n" 1314 "TX dropped: %" PRIu64 "\n" 1315 "TX successful: %" PRIu64 "\n" 1316 "RX total: %" PRIu64 "\n" 1317 "RX dropped: %" PRIu64 "\n" 1318 "RX successful: %" PRIu64 "\n", 1319 vdev->dev->vid, 1320 tx_total, tx_dropped, tx, 1321 rx_total, rx_dropped, rx); 1322 } 1323 1324 printf("===================================================\n"); 1325 } 1326 } 1327 1328 /* When we receive a INT signal, unregister vhost driver */ 1329 static void 1330 sigint_handler(__rte_unused int signum) 1331 { 1332 /* Unregister vhost driver. */ 1333 int ret = rte_vhost_driver_unregister((char *)&dev_basename); 1334 if (ret != 0) 1335 rte_exit(EXIT_FAILURE, "vhost driver unregister failure.\n"); 1336 exit(0); 1337 } 1338 1339 /* 1340 * While creating an mbuf pool, one key thing is to figure out how 1341 * many mbuf entries is enough for our use. FYI, here are some 1342 * guidelines: 1343 * 1344 * - Each rx queue would reserve @nr_rx_desc mbufs at queue setup stage 1345 * 1346 * - For each switch core (A CPU core does the packet switch), we need 1347 * also make some reservation for receiving the packets from virtio 1348 * Tx queue. How many is enough depends on the usage. It's normally 1349 * a simple calculation like following: 1350 * 1351 * MAX_PKT_BURST * max packet size / mbuf size 1352 * 1353 * So, we definitely need allocate more mbufs when TSO is enabled. 1354 * 1355 * - Similarly, for each switching core, we should serve @nr_rx_desc 1356 * mbufs for receiving the packets from physical NIC device. 1357 * 1358 * - We also need make sure, for each switch core, we have allocated 1359 * enough mbufs to fill up the mbuf cache. 1360 */ 1361 static void 1362 create_mbuf_pool(uint16_t nr_port, uint32_t nr_switch_core, uint32_t mbuf_size, 1363 uint32_t nr_queues, uint32_t nr_rx_desc, uint32_t nr_mbuf_cache) 1364 { 1365 uint32_t nr_mbufs; 1366 uint32_t nr_mbufs_per_core; 1367 uint32_t mtu = 1500; 1368 1369 if (mergeable) 1370 mtu = 9000; 1371 if (enable_tso) 1372 mtu = 64 * 1024; 1373 1374 nr_mbufs_per_core = (mtu + mbuf_size) * MAX_PKT_BURST / 1375 (mbuf_size - RTE_PKTMBUF_HEADROOM) * MAX_PKT_BURST; 1376 nr_mbufs_per_core += nr_rx_desc; 1377 nr_mbufs_per_core = RTE_MAX(nr_mbufs_per_core, nr_mbuf_cache); 1378 1379 nr_mbufs = nr_queues * nr_rx_desc; 1380 nr_mbufs += nr_mbufs_per_core * nr_switch_core; 1381 nr_mbufs *= nr_port; 1382 1383 mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", nr_mbufs, 1384 nr_mbuf_cache, 0, mbuf_size, 1385 rte_socket_id()); 1386 if (mbuf_pool == NULL) 1387 rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n"); 1388 } 1389 1390 /* 1391 * Main function, does initialisation and calls the per-lcore functions. The CUSE 1392 * device is also registered here to handle the IOCTLs. 1393 */ 1394 int 1395 main(int argc, char *argv[]) 1396 { 1397 unsigned lcore_id, core_id = 0; 1398 unsigned nb_ports, valid_num_ports; 1399 int ret; 1400 uint8_t portid; 1401 static pthread_t tid; 1402 char thread_name[RTE_MAX_THREAD_NAME_LEN]; 1403 1404 signal(SIGINT, sigint_handler); 1405 1406 /* init EAL */ 1407 ret = rte_eal_init(argc, argv); 1408 if (ret < 0) 1409 rte_exit(EXIT_FAILURE, "Error with EAL initialization\n"); 1410 argc -= ret; 1411 argv += ret; 1412 1413 /* parse app arguments */ 1414 ret = us_vhost_parse_args(argc, argv); 1415 if (ret < 0) 1416 rte_exit(EXIT_FAILURE, "Invalid argument\n"); 1417 1418 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id ++) 1419 TAILQ_INIT(&lcore_info[lcore_id].vdev_list); 1420 1421 if (rte_lcore_is_enabled(lcore_id)) 1422 lcore_ids[core_id ++] = lcore_id; 1423 1424 if (rte_lcore_count() > RTE_MAX_LCORE) 1425 rte_exit(EXIT_FAILURE,"Not enough cores\n"); 1426 1427 /* Get the number of physical ports. */ 1428 nb_ports = rte_eth_dev_count(); 1429 1430 /* 1431 * Update the global var NUM_PORTS and global array PORTS 1432 * and get value of var VALID_NUM_PORTS according to system ports number 1433 */ 1434 valid_num_ports = check_ports_num(nb_ports); 1435 1436 if ((valid_num_ports == 0) || (valid_num_ports > MAX_SUP_PORTS)) { 1437 RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u," 1438 "but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS); 1439 return -1; 1440 } 1441 1442 /* 1443 * FIXME: here we are trying to allocate mbufs big enough for 1444 * @MAX_QUEUES, but the truth is we're never going to use that 1445 * many queues here. We probably should only do allocation for 1446 * those queues we are going to use. 1447 */ 1448 create_mbuf_pool(valid_num_ports, rte_lcore_count() - 1, MBUF_DATA_SIZE, 1449 MAX_QUEUES, RTE_TEST_RX_DESC_DEFAULT, MBUF_CACHE_SIZE); 1450 1451 if (vm2vm_mode == VM2VM_HARDWARE) { 1452 /* Enable VT loop back to let L2 switch to do it. */ 1453 vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.enable_loop_back = 1; 1454 RTE_LOG(DEBUG, VHOST_CONFIG, 1455 "Enable loop back for L2 switch in vmdq.\n"); 1456 } 1457 1458 /* initialize all ports */ 1459 for (portid = 0; portid < nb_ports; portid++) { 1460 /* skip ports that are not enabled */ 1461 if ((enabled_port_mask & (1 << portid)) == 0) { 1462 RTE_LOG(INFO, VHOST_PORT, 1463 "Skipping disabled port %d\n", portid); 1464 continue; 1465 } 1466 if (port_init(portid) != 0) 1467 rte_exit(EXIT_FAILURE, 1468 "Cannot initialize network ports\n"); 1469 } 1470 1471 /* Enable stats if the user option is set. */ 1472 if (enable_stats) { 1473 ret = pthread_create(&tid, NULL, (void *)print_stats, NULL); 1474 if (ret != 0) 1475 rte_exit(EXIT_FAILURE, 1476 "Cannot create print-stats thread\n"); 1477 1478 /* Set thread_name for aid in debugging. */ 1479 snprintf(thread_name, RTE_MAX_THREAD_NAME_LEN, "print-stats"); 1480 ret = rte_thread_setname(tid, thread_name); 1481 if (ret != 0) 1482 RTE_LOG(DEBUG, VHOST_CONFIG, 1483 "Cannot set print-stats name\n"); 1484 } 1485 1486 /* Launch all data cores. */ 1487 RTE_LCORE_FOREACH_SLAVE(lcore_id) 1488 rte_eal_remote_launch(switch_worker, NULL, lcore_id); 1489 1490 if (mergeable == 0) 1491 rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_MRG_RXBUF); 1492 1493 /* Register vhost(cuse or user) driver to handle vhost messages. */ 1494 ret = rte_vhost_driver_register((char *)&dev_basename); 1495 if (ret != 0) 1496 rte_exit(EXIT_FAILURE, "vhost driver register failure.\n"); 1497 1498 rte_vhost_driver_callback_register(&virtio_net_device_ops); 1499 1500 /* Start CUSE session. */ 1501 rte_vhost_driver_session_start(); 1502 return 0; 1503 1504 } 1505