1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2019 Intel Corporation 3 */ 4 #include <stdint.h> 5 #include <stdio.h> 6 #include <stdlib.h> 7 #include <inttypes.h> 8 #include <unistd.h> 9 #include <signal.h> 10 #include <string.h> 11 #include <getopt.h> 12 13 #include <cmdline_parse_string.h> 14 #include <cmdline_socket.h> 15 #include <cmdline.h> 16 #include <rte_common.h> 17 #include <rte_rawdev.h> 18 #include <rte_ethdev.h> 19 #include <rte_malloc.h> 20 #include <rte_lcore.h> 21 #include <rte_cycles.h> 22 #include <rte_pmd_ntb.h> 23 #include <rte_mbuf_pool_ops.h> 24 #include "commands.h" 25 26 /* Per-port statistics struct */ 27 struct __rte_cache_aligned ntb_port_statistics { 28 uint64_t tx; 29 uint64_t rx; 30 }; 31 /* Port 0: NTB dev, Port 1: ethdev when iofwd. */ 32 struct ntb_port_statistics ntb_port_stats[2]; 33 34 struct ntb_fwd_stream { 35 uint16_t tx_port; 36 uint16_t rx_port; 37 uint16_t qp_id; 38 uint8_t tx_ntb; /* If ntb device is tx port. */ 39 }; 40 41 struct ntb_fwd_lcore_conf { 42 uint16_t stream_id; 43 uint16_t nb_stream; 44 uint8_t stopped; 45 }; 46 47 enum ntb_fwd_mode { 48 FILE_TRANS = 0, 49 RXONLY, 50 TXONLY, 51 IOFWD, 52 MAX_FWD_MODE, 53 }; 54 static const char *const fwd_mode_s[] = { 55 "file-trans", 56 "rxonly", 57 "txonly", 58 "iofwd", 59 NULL, 60 }; 61 static enum ntb_fwd_mode fwd_mode = MAX_FWD_MODE; 62 63 static struct ntb_fwd_lcore_conf fwd_lcore_conf[RTE_MAX_LCORE]; 64 static struct ntb_fwd_stream *fwd_streams; 65 66 static struct rte_mempool *mbuf_pool; 67 68 #define NTB_DRV_NAME_LEN 7 69 #define MEMPOOL_CACHE_SIZE 256 70 71 static uint8_t in_test; 72 static uint8_t interactive = 1; 73 static uint16_t eth_port_id = RTE_MAX_ETHPORTS; 74 static uint16_t dev_id; 75 76 /* Number of queues, default set as 1 */ 77 static uint16_t num_queues = 1; 78 static uint16_t ntb_buf_size = RTE_MBUF_DEFAULT_BUF_SIZE; 79 80 /* Configurable number of descriptors */ 81 #define NTB_DEFAULT_NUM_DESCS 1024 82 static uint16_t nb_desc = NTB_DEFAULT_NUM_DESCS; 83 84 static uint16_t tx_free_thresh; 85 86 #define NTB_MAX_PKT_BURST 32 87 #define NTB_DFLT_PKT_BURST 32 88 static uint16_t pkt_burst = NTB_DFLT_PKT_BURST; 89 90 #define BURST_TX_RETRIES 64 91 92 static struct rte_eth_conf eth_port_conf = { 93 .rxmode = { 94 .mq_mode = RTE_ETH_MQ_RX_RSS, 95 }, 96 .rx_adv_conf = { 97 .rss_conf = { 98 .rss_key = NULL, 99 .rss_hf = RTE_ETH_RSS_IP, 100 }, 101 }, 102 .txmode = { 103 .mq_mode = RTE_ETH_MQ_TX_NONE, 104 }, 105 }; 106 107 void 108 cmd_help_parsed(__rte_unused void *parsed_result, 109 struct cmdline *cl, 110 __rte_unused void *data) 111 { 112 cmdline_printf( 113 cl, 114 "\n" 115 "The following commands are currently available:\n\n" 116 "Control:\n" 117 " quit :" 118 " Quit the application.\n" 119 "\nTransmission:\n" 120 " send [path] :" 121 " Send [path] file. Only take effect in file-trans mode\n" 122 " start :" 123 " Start transmissions.\n" 124 " stop :" 125 " Stop transmissions.\n" 126 " clear/show port stats :" 127 " Clear/show port stats.\n" 128 " set fwd file-trans/rxonly/txonly/iofwd :" 129 " Set packet forwarding mode.\n" 130 ); 131 132 } 133 void 134 cmd_quit_parsed(__rte_unused void *parsed_result, 135 struct cmdline *cl, 136 __rte_unused void *data) 137 { 138 struct ntb_fwd_lcore_conf *conf; 139 uint32_t lcore_id; 140 141 /* Stop transmission first. */ 142 RTE_LCORE_FOREACH_WORKER(lcore_id) { 143 conf = &fwd_lcore_conf[lcore_id]; 144 145 if (!conf->nb_stream) 146 continue; 147 148 if (conf->stopped) 149 continue; 150 151 conf->stopped = 1; 152 } 153 printf("\nWaiting for lcores to finish...\n"); 154 rte_eal_mp_wait_lcore(); 155 in_test = 0; 156 157 /* Stop traffic and Close port. */ 158 rte_rawdev_stop(dev_id); 159 rte_rawdev_close(dev_id); 160 if (eth_port_id < RTE_MAX_ETHPORTS && fwd_mode == IOFWD) { 161 rte_eth_dev_stop(eth_port_id); 162 rte_eth_dev_close(eth_port_id); 163 } 164 165 cmdline_quit(cl); 166 } 167 168 void 169 cmd_send_parsed(void *parsed_result, 170 __rte_unused struct cmdline *cl, 171 __rte_unused void *data) 172 { 173 struct cmd_send_result *res = parsed_result; 174 struct rte_rawdev_buf *pkts_send[NTB_MAX_PKT_BURST]; 175 struct rte_mbuf *mbuf_send[NTB_MAX_PKT_BURST]; 176 uint64_t size, count, i, j, nb_burst; 177 uint16_t nb_tx, buf_size; 178 unsigned int nb_pkt; 179 size_t queue_id = 0; 180 uint16_t retry = 0; 181 uint32_t val; 182 FILE *file; 183 int ret; 184 185 if (num_queues != 1) { 186 printf("File transmission only supports 1 queue.\n"); 187 num_queues = 1; 188 } 189 190 file = fopen(res->filepath, "r"); 191 if (file == NULL) { 192 printf("Fail to open the file.\n"); 193 return; 194 } 195 196 if (fseek(file, 0, SEEK_END) < 0) { 197 printf("Fail to get file size.\n"); 198 fclose(file); 199 return; 200 } 201 size = ftell(file); 202 if (fseek(file, 0, SEEK_SET) < 0) { 203 printf("Fail to get file size.\n"); 204 fclose(file); 205 return; 206 } 207 208 /* Tell remote about the file size. */ 209 val = size >> 32; 210 rte_rawdev_set_attr(dev_id, "spad_user_0", val); 211 val = size; 212 rte_rawdev_set_attr(dev_id, "spad_user_1", val); 213 printf("Sending file, size is %"PRIu64"\n", size); 214 215 for (i = 0; i < NTB_MAX_PKT_BURST; i++) 216 pkts_send[i] = (struct rte_rawdev_buf *) 217 malloc(sizeof(struct rte_rawdev_buf)); 218 219 buf_size = ntb_buf_size - RTE_PKTMBUF_HEADROOM; 220 count = (size + buf_size - 1) / buf_size; 221 nb_burst = (count + pkt_burst - 1) / pkt_burst; 222 223 for (i = 0; i < nb_burst; i++) { 224 val = RTE_MIN(count, pkt_burst); 225 if (rte_mempool_get_bulk(mbuf_pool, (void **)mbuf_send, 226 val) == 0) { 227 for (nb_pkt = 0; nb_pkt < val; nb_pkt++) { 228 mbuf_send[nb_pkt]->port = dev_id; 229 mbuf_send[nb_pkt]->data_len = 230 fread(rte_pktmbuf_mtod(mbuf_send[nb_pkt], 231 void *), 1, buf_size, file); 232 mbuf_send[nb_pkt]->pkt_len = 233 mbuf_send[nb_pkt]->data_len; 234 pkts_send[nb_pkt]->buf_addr = mbuf_send[nb_pkt]; 235 } 236 } else { 237 for (nb_pkt = 0; nb_pkt < val; nb_pkt++) { 238 mbuf_send[nb_pkt] = 239 rte_mbuf_raw_alloc(mbuf_pool); 240 if (mbuf_send[nb_pkt] == NULL) 241 break; 242 mbuf_send[nb_pkt]->port = dev_id; 243 mbuf_send[nb_pkt]->data_len = 244 fread(rte_pktmbuf_mtod(mbuf_send[nb_pkt], 245 void *), 1, buf_size, file); 246 mbuf_send[nb_pkt]->pkt_len = 247 mbuf_send[nb_pkt]->data_len; 248 pkts_send[nb_pkt]->buf_addr = mbuf_send[nb_pkt]; 249 } 250 } 251 252 ret = rte_rawdev_enqueue_buffers(dev_id, pkts_send, nb_pkt, 253 (void *)queue_id); 254 if (ret < 0) { 255 printf("Enqueue failed with err %d\n", ret); 256 for (j = 0; j < nb_pkt; j++) 257 rte_pktmbuf_free(mbuf_send[j]); 258 goto clean; 259 } 260 nb_tx = ret; 261 while (nb_tx != nb_pkt && retry < BURST_TX_RETRIES) { 262 rte_delay_us(1); 263 ret = rte_rawdev_enqueue_buffers(dev_id, 264 &pkts_send[nb_tx], nb_pkt - nb_tx, 265 (void *)queue_id); 266 if (ret < 0) { 267 printf("Enqueue failed with err %d\n", ret); 268 for (j = nb_tx; j < nb_pkt; j++) 269 rte_pktmbuf_free(mbuf_send[j]); 270 goto clean; 271 } 272 nb_tx += ret; 273 } 274 count -= nb_pkt; 275 } 276 277 /* Clear register after file sending done. */ 278 rte_rawdev_set_attr(dev_id, "spad_user_0", 0); 279 rte_rawdev_set_attr(dev_id, "spad_user_1", 0); 280 printf("Done sending file.\n"); 281 282 clean: 283 for (i = 0; i < NTB_MAX_PKT_BURST; i++) 284 free(pkts_send[i]); 285 fclose(file); 286 } 287 288 #define RECV_FILE_LEN 30 289 static int 290 start_polling_recv_file(void *param) 291 { 292 struct rte_rawdev_buf *pkts_recv[NTB_MAX_PKT_BURST]; 293 struct ntb_fwd_lcore_conf *conf = param; 294 struct rte_mbuf *mbuf; 295 char filepath[RECV_FILE_LEN]; 296 uint64_t val, size, file_len; 297 uint16_t nb_rx, i, file_no; 298 size_t queue_id = 0; 299 FILE *file; 300 int ret; 301 302 for (i = 0; i < NTB_MAX_PKT_BURST; i++) 303 pkts_recv[i] = (struct rte_rawdev_buf *) 304 malloc(sizeof(struct rte_rawdev_buf)); 305 306 file_no = 0; 307 while (!conf->stopped) { 308 snprintf(filepath, RECV_FILE_LEN, "ntb_recv_file%d", file_no); 309 file = fopen(filepath, "w"); 310 if (file == NULL) { 311 printf("Fail to open the file.\n"); 312 return -EINVAL; 313 } 314 315 rte_rawdev_get_attr(dev_id, "spad_user_0", &val); 316 size = val << 32; 317 rte_rawdev_get_attr(dev_id, "spad_user_1", &val); 318 size |= val; 319 320 if (!size) { 321 fclose(file); 322 continue; 323 } 324 325 file_len = 0; 326 nb_rx = NTB_MAX_PKT_BURST; 327 while (file_len < size && !conf->stopped) { 328 ret = rte_rawdev_dequeue_buffers(dev_id, pkts_recv, 329 pkt_burst, (void *)queue_id); 330 if (ret < 0) { 331 printf("Dequeue failed with err %d\n", ret); 332 fclose(file); 333 goto clean; 334 } 335 nb_rx = ret; 336 ntb_port_stats[0].rx += nb_rx; 337 for (i = 0; i < nb_rx; i++) { 338 mbuf = pkts_recv[i]->buf_addr; 339 fwrite(rte_pktmbuf_mtod(mbuf, void *), 1, 340 mbuf->data_len, file); 341 file_len += mbuf->data_len; 342 rte_pktmbuf_free(mbuf); 343 pkts_recv[i]->buf_addr = NULL; 344 } 345 } 346 347 printf("Received file (size: %" PRIu64 ") from peer to %s.\n", 348 size, filepath); 349 fclose(file); 350 file_no++; 351 } 352 353 clean: 354 for (i = 0; i < NTB_MAX_PKT_BURST; i++) 355 free(pkts_recv[i]); 356 return 0; 357 } 358 359 static int 360 start_iofwd_per_lcore(void *param) 361 { 362 struct rte_rawdev_buf *ntb_buf[NTB_MAX_PKT_BURST]; 363 struct rte_mbuf *pkts_burst[NTB_MAX_PKT_BURST]; 364 struct ntb_fwd_lcore_conf *conf = param; 365 struct ntb_fwd_stream fs; 366 uint16_t nb_rx, nb_tx; 367 int i, j, ret; 368 369 for (i = 0; i < NTB_MAX_PKT_BURST; i++) 370 ntb_buf[i] = (struct rte_rawdev_buf *) 371 malloc(sizeof(struct rte_rawdev_buf)); 372 373 while (!conf->stopped) { 374 for (i = 0; i < conf->nb_stream; i++) { 375 fs = fwd_streams[conf->stream_id + i]; 376 if (fs.tx_ntb) { 377 nb_rx = rte_eth_rx_burst(fs.rx_port, 378 fs.qp_id, pkts_burst, 379 pkt_burst); 380 if (unlikely(nb_rx == 0)) 381 continue; 382 for (j = 0; j < nb_rx; j++) 383 ntb_buf[j]->buf_addr = pkts_burst[j]; 384 ret = rte_rawdev_enqueue_buffers(fs.tx_port, 385 ntb_buf, nb_rx, 386 (void *)(size_t)fs.qp_id); 387 if (ret < 0) { 388 printf("Enqueue failed with err %d\n", 389 ret); 390 for (j = 0; j < nb_rx; j++) 391 rte_pktmbuf_free(pkts_burst[j]); 392 goto clean; 393 } 394 nb_tx = ret; 395 ntb_port_stats[0].tx += nb_tx; 396 ntb_port_stats[1].rx += nb_rx; 397 } else { 398 ret = rte_rawdev_dequeue_buffers(fs.rx_port, 399 ntb_buf, pkt_burst, 400 (void *)(size_t)fs.qp_id); 401 if (ret < 0) { 402 printf("Dequeue failed with err %d\n", 403 ret); 404 goto clean; 405 } 406 nb_rx = ret; 407 if (unlikely(nb_rx == 0)) 408 continue; 409 for (j = 0; j < nb_rx; j++) 410 pkts_burst[j] = ntb_buf[j]->buf_addr; 411 nb_tx = rte_eth_tx_burst(fs.tx_port, 412 fs.qp_id, pkts_burst, nb_rx); 413 ntb_port_stats[1].tx += nb_tx; 414 ntb_port_stats[0].rx += nb_rx; 415 } 416 if (unlikely(nb_tx < nb_rx)) { 417 do { 418 rte_pktmbuf_free(pkts_burst[nb_tx]); 419 } while (++nb_tx < nb_rx); 420 } 421 } 422 } 423 424 clean: 425 for (i = 0; i < NTB_MAX_PKT_BURST; i++) 426 free(ntb_buf[i]); 427 428 return 0; 429 } 430 431 static int 432 start_rxonly_per_lcore(void *param) 433 { 434 struct rte_rawdev_buf *ntb_buf[NTB_MAX_PKT_BURST]; 435 struct ntb_fwd_lcore_conf *conf = param; 436 struct ntb_fwd_stream fs; 437 uint16_t nb_rx; 438 int i, j, ret; 439 440 for (i = 0; i < NTB_MAX_PKT_BURST; i++) 441 ntb_buf[i] = (struct rte_rawdev_buf *) 442 malloc(sizeof(struct rte_rawdev_buf)); 443 444 while (!conf->stopped) { 445 for (i = 0; i < conf->nb_stream; i++) { 446 fs = fwd_streams[conf->stream_id + i]; 447 ret = rte_rawdev_dequeue_buffers(fs.rx_port, 448 ntb_buf, pkt_burst, (void *)(size_t)fs.qp_id); 449 if (ret < 0) { 450 printf("Dequeue failed with err %d\n", ret); 451 goto clean; 452 } 453 nb_rx = ret; 454 if (unlikely(nb_rx == 0)) 455 continue; 456 ntb_port_stats[0].rx += nb_rx; 457 458 for (j = 0; j < nb_rx; j++) 459 rte_pktmbuf_free(ntb_buf[j]->buf_addr); 460 } 461 } 462 463 clean: 464 for (i = 0; i < NTB_MAX_PKT_BURST; i++) 465 free(ntb_buf[i]); 466 467 return 0; 468 } 469 470 471 static int 472 start_txonly_per_lcore(void *param) 473 { 474 struct rte_rawdev_buf *ntb_buf[NTB_MAX_PKT_BURST]; 475 struct rte_mbuf *pkts_burst[NTB_MAX_PKT_BURST]; 476 struct ntb_fwd_lcore_conf *conf = param; 477 struct ntb_fwd_stream fs; 478 uint16_t nb_pkt, nb_tx; 479 int i, j, ret; 480 481 for (i = 0; i < NTB_MAX_PKT_BURST; i++) 482 ntb_buf[i] = (struct rte_rawdev_buf *) 483 malloc(sizeof(struct rte_rawdev_buf)); 484 485 while (!conf->stopped) { 486 for (i = 0; i < conf->nb_stream; i++) { 487 fs = fwd_streams[conf->stream_id + i]; 488 if (rte_mempool_get_bulk(mbuf_pool, (void **)pkts_burst, 489 pkt_burst) == 0) { 490 for (nb_pkt = 0; nb_pkt < pkt_burst; nb_pkt++) { 491 pkts_burst[nb_pkt]->port = dev_id; 492 pkts_burst[nb_pkt]->data_len = 493 pkts_burst[nb_pkt]->buf_len - 494 RTE_PKTMBUF_HEADROOM; 495 pkts_burst[nb_pkt]->pkt_len = 496 pkts_burst[nb_pkt]->data_len; 497 ntb_buf[nb_pkt]->buf_addr = 498 pkts_burst[nb_pkt]; 499 } 500 } else { 501 for (nb_pkt = 0; nb_pkt < pkt_burst; nb_pkt++) { 502 pkts_burst[nb_pkt] = 503 rte_pktmbuf_alloc(mbuf_pool); 504 if (pkts_burst[nb_pkt] == NULL) 505 break; 506 pkts_burst[nb_pkt]->port = dev_id; 507 pkts_burst[nb_pkt]->data_len = 508 pkts_burst[nb_pkt]->buf_len - 509 RTE_PKTMBUF_HEADROOM; 510 pkts_burst[nb_pkt]->pkt_len = 511 pkts_burst[nb_pkt]->data_len; 512 ntb_buf[nb_pkt]->buf_addr = 513 pkts_burst[nb_pkt]; 514 } 515 } 516 ret = rte_rawdev_enqueue_buffers(fs.tx_port, ntb_buf, 517 nb_pkt, (void *)(size_t)fs.qp_id); 518 if (ret < 0) { 519 printf("Enqueue failed with err %d\n", ret); 520 for (j = 0; j < nb_pkt; j++) 521 rte_pktmbuf_free(pkts_burst[j]); 522 goto clean; 523 } 524 nb_tx = ret; 525 ntb_port_stats[0].tx += nb_tx; 526 if (unlikely(nb_tx < nb_pkt)) { 527 do { 528 rte_pktmbuf_free(pkts_burst[nb_tx]); 529 } while (++nb_tx < nb_pkt); 530 } 531 } 532 } 533 534 clean: 535 for (i = 0; i < NTB_MAX_PKT_BURST; i++) 536 free(ntb_buf[i]); 537 538 return 0; 539 } 540 541 static int 542 ntb_fwd_config_setup(void) 543 { 544 uint16_t i; 545 546 /* Make sure iofwd has valid ethdev. */ 547 if (fwd_mode == IOFWD && eth_port_id >= RTE_MAX_ETHPORTS) { 548 printf("No ethdev, cannot be in iofwd mode."); 549 return -EINVAL; 550 } 551 552 if (fwd_mode == IOFWD) { 553 fwd_streams = rte_zmalloc("ntb_fwd: fwd_streams", 554 sizeof(struct ntb_fwd_stream) * num_queues * 2, 555 RTE_CACHE_LINE_SIZE); 556 for (i = 0; i < num_queues; i++) { 557 fwd_streams[i * 2].qp_id = i; 558 fwd_streams[i * 2].tx_port = dev_id; 559 fwd_streams[i * 2].rx_port = eth_port_id; 560 fwd_streams[i * 2].tx_ntb = 1; 561 562 fwd_streams[i * 2 + 1].qp_id = i; 563 fwd_streams[i * 2 + 1].tx_port = eth_port_id; 564 fwd_streams[i * 2 + 1].rx_port = dev_id; 565 fwd_streams[i * 2 + 1].tx_ntb = 0; 566 } 567 return 0; 568 } 569 570 if (fwd_mode == RXONLY || fwd_mode == FILE_TRANS) { 571 /* Only support 1 queue in file-trans for in order. */ 572 if (fwd_mode == FILE_TRANS) 573 num_queues = 1; 574 575 fwd_streams = rte_zmalloc("ntb_fwd: fwd_streams", 576 sizeof(struct ntb_fwd_stream) * num_queues, 577 RTE_CACHE_LINE_SIZE); 578 for (i = 0; i < num_queues; i++) { 579 fwd_streams[i].qp_id = i; 580 fwd_streams[i].tx_port = RTE_MAX_ETHPORTS; 581 fwd_streams[i].rx_port = dev_id; 582 fwd_streams[i].tx_ntb = 0; 583 } 584 return 0; 585 } 586 587 if (fwd_mode == TXONLY) { 588 fwd_streams = rte_zmalloc("ntb_fwd: fwd_streams", 589 sizeof(struct ntb_fwd_stream) * num_queues, 590 RTE_CACHE_LINE_SIZE); 591 for (i = 0; i < num_queues; i++) { 592 fwd_streams[i].qp_id = i; 593 fwd_streams[i].tx_port = dev_id; 594 fwd_streams[i].rx_port = RTE_MAX_ETHPORTS; 595 fwd_streams[i].tx_ntb = 1; 596 } 597 } 598 return 0; 599 } 600 601 static void 602 assign_stream_to_lcores(void) 603 { 604 struct ntb_fwd_lcore_conf *conf; 605 struct ntb_fwd_stream *fs; 606 uint16_t nb_streams, sm_per_lcore, sm_id, i; 607 uint32_t lcore_id; 608 uint8_t lcore_num, nb_extra; 609 610 lcore_num = rte_lcore_count(); 611 /* Exclude main core */ 612 lcore_num--; 613 614 nb_streams = (fwd_mode == IOFWD) ? num_queues * 2 : num_queues; 615 616 sm_per_lcore = nb_streams / lcore_num; 617 nb_extra = nb_streams % lcore_num; 618 sm_id = 0; 619 i = 0; 620 621 RTE_LCORE_FOREACH_WORKER(lcore_id) { 622 conf = &fwd_lcore_conf[lcore_id]; 623 624 if (i < nb_extra) { 625 conf->nb_stream = sm_per_lcore + 1; 626 conf->stream_id = sm_id; 627 sm_id = sm_id + sm_per_lcore + 1; 628 } else { 629 conf->nb_stream = sm_per_lcore; 630 conf->stream_id = sm_id; 631 sm_id = sm_id + sm_per_lcore; 632 } 633 634 i++; 635 if (sm_id >= nb_streams) 636 break; 637 } 638 639 /* Print packet forwarding config. */ 640 RTE_LCORE_FOREACH_WORKER(lcore_id) { 641 conf = &fwd_lcore_conf[lcore_id]; 642 643 if (!conf->nb_stream) 644 continue; 645 646 printf("Streams on Lcore %u :\n", lcore_id); 647 for (i = 0; i < conf->nb_stream; i++) { 648 fs = &fwd_streams[conf->stream_id + i]; 649 if (fwd_mode == IOFWD) 650 printf(" + Stream %u : %s%u RX -> %s%u TX," 651 " Q=%u\n", conf->stream_id + i, 652 fs->tx_ntb ? "Eth" : "NTB", fs->rx_port, 653 fs->tx_ntb ? "NTB" : "Eth", fs->tx_port, 654 fs->qp_id); 655 if (fwd_mode == FILE_TRANS || fwd_mode == RXONLY) 656 printf(" + Stream %u : %s%u RX only\n", 657 conf->stream_id, "NTB", fs->rx_port); 658 if (fwd_mode == TXONLY) 659 printf(" + Stream %u : %s%u TX only\n", 660 conf->stream_id, "NTB", fs->tx_port); 661 } 662 } 663 } 664 665 static void 666 start_pkt_fwd(void) 667 { 668 struct ntb_fwd_lcore_conf *conf; 669 struct rte_eth_link eth_link; 670 uint32_t lcore_id; 671 int ret, i; 672 char link_status_text[RTE_ETH_LINK_MAX_STR_LEN]; 673 674 ret = ntb_fwd_config_setup(); 675 if (ret < 0) { 676 printf("Cannot start traffic. Please reset fwd mode.\n"); 677 return; 678 } 679 680 /* If using iofwd, checking ethdev link status first. */ 681 if (fwd_mode == IOFWD) { 682 printf("Checking eth link status...\n"); 683 /* Wait for eth link up at most 100 times. */ 684 for (i = 0; i < 100; i++) { 685 ret = rte_eth_link_get(eth_port_id, ð_link); 686 if (ret < 0) { 687 printf("Link get failed with err %d\n", ret); 688 return; 689 } 690 if (eth_link.link_status) { 691 rte_eth_link_to_str(link_status_text, 692 sizeof(link_status_text), 693 ð_link); 694 printf("Eth%u %s\n", eth_port_id, 695 link_status_text); 696 break; 697 } 698 } 699 if (!eth_link.link_status) { 700 printf("Eth%u link down. Cannot start traffic.\n", 701 eth_port_id); 702 return; 703 } 704 } 705 706 assign_stream_to_lcores(); 707 in_test = 1; 708 709 RTE_LCORE_FOREACH_WORKER(lcore_id) { 710 conf = &fwd_lcore_conf[lcore_id]; 711 712 if (!conf->nb_stream) 713 continue; 714 715 conf->stopped = 0; 716 if (fwd_mode == FILE_TRANS) 717 rte_eal_remote_launch(start_polling_recv_file, 718 conf, lcore_id); 719 else if (fwd_mode == IOFWD) 720 rte_eal_remote_launch(start_iofwd_per_lcore, 721 conf, lcore_id); 722 else if (fwd_mode == RXONLY) 723 rte_eal_remote_launch(start_rxonly_per_lcore, 724 conf, lcore_id); 725 else if (fwd_mode == TXONLY) 726 rte_eal_remote_launch(start_txonly_per_lcore, 727 conf, lcore_id); 728 } 729 } 730 731 void 732 cmd_start_parsed(__rte_unused void *parsed_result, 733 __rte_unused struct cmdline *cl, 734 __rte_unused void *data) 735 { 736 start_pkt_fwd(); 737 } 738 739 void 740 cmd_stop_parsed(__rte_unused void *parsed_result, 741 __rte_unused struct cmdline *cl, 742 __rte_unused void *data) 743 { 744 struct ntb_fwd_lcore_conf *conf; 745 uint32_t lcore_id; 746 747 RTE_LCORE_FOREACH_WORKER(lcore_id) { 748 conf = &fwd_lcore_conf[lcore_id]; 749 750 if (!conf->nb_stream) 751 continue; 752 753 if (conf->stopped) 754 continue; 755 756 conf->stopped = 1; 757 } 758 printf("\nWaiting for lcores to finish...\n"); 759 rte_eal_mp_wait_lcore(); 760 in_test = 0; 761 printf("\nDone.\n"); 762 } 763 764 static void 765 ntb_stats_clear(void) 766 { 767 int nb_ids, i; 768 uint32_t *ids; 769 770 /* Clear NTB dev stats */ 771 nb_ids = rte_rawdev_xstats_names_get(dev_id, NULL, 0); 772 if (nb_ids <= 0) { 773 printf("Error: Cannot get count of xstats\n"); 774 return; 775 } 776 ids = malloc(sizeof(uint32_t) * nb_ids); 777 for (i = 0; i < nb_ids; i++) 778 ids[i] = i; 779 rte_rawdev_xstats_reset(dev_id, ids, nb_ids); 780 printf("\n statistics for NTB port %d cleared\n", dev_id); 781 782 /* Clear Ethdev stats if have any */ 783 if (fwd_mode == IOFWD && eth_port_id != RTE_MAX_ETHPORTS) { 784 rte_eth_stats_reset(eth_port_id); 785 printf("\n statistics for ETH port %d cleared\n", eth_port_id); 786 } 787 } 788 789 static inline void 790 ntb_calculate_throughput(uint16_t port) { 791 uint64_t diff_pkts_rx, diff_pkts_tx, diff_cycles; 792 uint64_t mpps_rx, mpps_tx; 793 static uint64_t prev_pkts_rx[2]; 794 static uint64_t prev_pkts_tx[2]; 795 static uint64_t prev_cycles[2]; 796 797 diff_cycles = prev_cycles[port]; 798 prev_cycles[port] = rte_rdtsc(); 799 if (diff_cycles > 0) 800 diff_cycles = prev_cycles[port] - diff_cycles; 801 diff_pkts_rx = (ntb_port_stats[port].rx > prev_pkts_rx[port]) ? 802 (ntb_port_stats[port].rx - prev_pkts_rx[port]) : 0; 803 diff_pkts_tx = (ntb_port_stats[port].tx > prev_pkts_tx[port]) ? 804 (ntb_port_stats[port].tx - prev_pkts_tx[port]) : 0; 805 prev_pkts_rx[port] = ntb_port_stats[port].rx; 806 prev_pkts_tx[port] = ntb_port_stats[port].tx; 807 mpps_rx = diff_cycles > 0 ? 808 diff_pkts_rx * rte_get_tsc_hz() / diff_cycles : 0; 809 mpps_tx = diff_cycles > 0 ? 810 diff_pkts_tx * rte_get_tsc_hz() / diff_cycles : 0; 811 printf(" Throughput (since last show)\n"); 812 printf(" Rx-pps: %12"PRIu64"\n Tx-pps: %12"PRIu64"\n", 813 mpps_rx, mpps_tx); 814 815 } 816 817 static void 818 ntb_stats_display(void) 819 { 820 struct rte_rawdev_xstats_name *xstats_names; 821 struct rte_eth_stats stats; 822 uint64_t *values; 823 uint32_t *ids; 824 int nb_ids, i; 825 826 printf("###### statistics for NTB port %d #######\n", dev_id); 827 828 /* Get NTB dev stats and stats names */ 829 nb_ids = rte_rawdev_xstats_names_get(dev_id, NULL, 0); 830 if (nb_ids <= 0) { 831 printf("Error: Cannot get count of xstats\n"); 832 return; 833 } 834 xstats_names = malloc(sizeof(struct rte_rawdev_xstats_name) * nb_ids); 835 if (xstats_names == NULL) { 836 printf("Cannot allocate memory for xstats lookup\n"); 837 return; 838 } 839 if (nb_ids != rte_rawdev_xstats_names_get( 840 dev_id, xstats_names, nb_ids)) { 841 printf("Error: Cannot get xstats lookup\n"); 842 free(xstats_names); 843 return; 844 } 845 ids = malloc(sizeof(uint32_t) * nb_ids); 846 for (i = 0; i < nb_ids; i++) 847 ids[i] = i; 848 values = malloc(sizeof(uint64_t) * nb_ids); 849 if (nb_ids != rte_rawdev_xstats_get(dev_id, ids, values, nb_ids)) { 850 printf("Error: Unable to get xstats\n"); 851 free(xstats_names); 852 free(values); 853 free(ids); 854 return; 855 } 856 857 /* Display NTB dev stats */ 858 for (i = 0; i < nb_ids; i++) 859 printf(" %s: %"PRIu64"\n", xstats_names[i].name, values[i]); 860 ntb_calculate_throughput(0); 861 862 /* Get Ethdev stats if have any */ 863 if (fwd_mode == IOFWD && eth_port_id != RTE_MAX_ETHPORTS) { 864 printf("###### statistics for ETH port %d ######\n", 865 eth_port_id); 866 rte_eth_stats_get(eth_port_id, &stats); 867 printf(" RX-packets: %"PRIu64"\n", stats.ipackets); 868 printf(" RX-bytes: %"PRIu64"\n", stats.ibytes); 869 printf(" RX-errors: %"PRIu64"\n", stats.ierrors); 870 printf(" RX-missed: %"PRIu64"\n", stats.imissed); 871 printf(" TX-packets: %"PRIu64"\n", stats.opackets); 872 printf(" TX-bytes: %"PRIu64"\n", stats.obytes); 873 printf(" TX-errors: %"PRIu64"\n", stats.oerrors); 874 ntb_calculate_throughput(1); 875 } 876 877 free(xstats_names); 878 free(values); 879 free(ids); 880 } 881 882 void 883 cmd_show_port_stats_parsed(__rte_unused void *parsed_result, 884 __rte_unused struct cmdline *cl, 885 __rte_unused void *data) 886 { 887 ntb_stats_display(); 888 } 889 890 void 891 cmd_clear_port_stats_parsed(__rte_unused void *parsed_result, 892 __rte_unused struct cmdline *cl, 893 __rte_unused void *data) 894 { 895 ntb_stats_clear(); 896 } 897 898 void 899 cmd_set_fwd_parsed(void *parsed_result, 900 __rte_unused struct cmdline *cl, 901 __rte_unused void *data) 902 { 903 struct cmd_set_fwd_result *res = parsed_result; 904 int i; 905 906 if (in_test) { 907 printf("Please stop traffic first.\n"); 908 return; 909 } 910 911 for (i = 0; i < MAX_FWD_MODE; i++) { 912 if (!strcmp(res->mode, fwd_mode_s[i])) { 913 fwd_mode = i; 914 return; 915 } 916 } 917 printf("Invalid %s packet forwarding mode.\n", res->mode); 918 } 919 920 /* prompt function, called from main on MAIN lcore */ 921 static void 922 prompt(void) 923 { 924 struct cmdline *cl; 925 926 cl = cmdline_stdin_new(main_ctx, "ntb> "); 927 if (cl == NULL) 928 return; 929 930 cmdline_interact(cl); 931 cmdline_stdin_exit(cl); 932 } 933 934 static void 935 signal_handler(int signum) 936 { 937 if (signum == SIGINT || signum == SIGTERM) { 938 printf("\nSignal %d received, preparing to exit...\n", signum); 939 signal(signum, SIG_DFL); 940 kill(getpid(), signum); 941 } 942 } 943 944 #define OPT_BUF_SIZE "buf-size" 945 #define OPT_FWD_MODE "fwd-mode" 946 #define OPT_NB_DESC "nb-desc" 947 #define OPT_TXFREET "txfreet" 948 #define OPT_BURST "burst" 949 #define OPT_QP "qp" 950 951 enum { 952 /* long options mapped to a short option */ 953 OPT_NO_ZERO_COPY_NUM = 1, 954 OPT_BUF_SIZE_NUM, 955 OPT_FWD_MODE_NUM, 956 OPT_NB_DESC_NUM, 957 OPT_TXFREET_NUM, 958 OPT_BURST_NUM, 959 OPT_QP_NUM, 960 }; 961 962 static const char short_options[] = 963 "i" /* interactive mode */ 964 ; 965 966 static const struct option lgopts[] = { 967 {OPT_BUF_SIZE, 1, NULL, OPT_BUF_SIZE_NUM }, 968 {OPT_FWD_MODE, 1, NULL, OPT_FWD_MODE_NUM }, 969 {OPT_NB_DESC, 1, NULL, OPT_NB_DESC_NUM }, 970 {OPT_TXFREET, 1, NULL, OPT_TXFREET_NUM }, 971 {OPT_BURST, 1, NULL, OPT_BURST_NUM }, 972 {OPT_QP, 1, NULL, OPT_QP_NUM }, 973 {0, 0, NULL, 0 } 974 }; 975 976 static void 977 ntb_usage(const char *prgname) 978 { 979 printf("%s [EAL options] -- [options]\n" 980 "-i: run in interactive mode.\n" 981 "-qp=N: set number of queues as N (N > 0, default: 1).\n" 982 "--fwd-mode=N: set fwd mode (N: file-trans | rxonly | " 983 "txonly | iofwd, default: file-trans)\n" 984 "--buf-size=N: set mbuf dataroom size as N (0 < N < 65535," 985 " default: 2048).\n" 986 "--nb-desc=N: set number of descriptors as N (%u <= N <= %u," 987 " default: 1024).\n" 988 "--txfreet=N: set tx free thresh for NTB driver as N. (N >= 0)\n" 989 "--burst=N: set pkt burst as N (0 < N <= %u default: 32).\n", 990 prgname, NTB_MIN_DESC_SIZE, NTB_MAX_DESC_SIZE, 991 NTB_MAX_PKT_BURST); 992 } 993 994 static void 995 ntb_parse_args(int argc, char **argv) 996 { 997 char *prgname = argv[0], **argvopt = argv; 998 int opt, opt_idx, n, i; 999 1000 while ((opt = getopt_long(argc, argvopt, short_options, 1001 lgopts, &opt_idx)) != EOF) { 1002 switch (opt) { 1003 case 'i': 1004 printf("Interactive-mode selected.\n"); 1005 interactive = 1; 1006 break; 1007 case OPT_QP_NUM: 1008 n = atoi(optarg); 1009 if (n > 0) 1010 num_queues = n; 1011 else 1012 rte_exit(EXIT_FAILURE, "q must be > 0.\n"); 1013 break; 1014 case OPT_BUF_SIZE_NUM: 1015 n = atoi(optarg); 1016 if (n > RTE_PKTMBUF_HEADROOM && n <= 0xFFFF) 1017 ntb_buf_size = n; 1018 else 1019 rte_exit(EXIT_FAILURE, "buf-size must be > " 1020 "%u and < 65536.\n", 1021 RTE_PKTMBUF_HEADROOM); 1022 break; 1023 case OPT_FWD_MODE_NUM: 1024 for (i = 0; i < MAX_FWD_MODE; i++) { 1025 if (!strcmp(optarg, fwd_mode_s[i])) { 1026 fwd_mode = i; 1027 break; 1028 } 1029 } 1030 if (i == MAX_FWD_MODE) 1031 rte_exit(EXIT_FAILURE, "Unsupported mode. " 1032 "(Should be: file-trans | rxonly | txonly " 1033 "| iofwd)\n"); 1034 break; 1035 case OPT_NB_DESC_NUM: 1036 n = atoi(optarg); 1037 if (n >= NTB_MIN_DESC_SIZE && n <= NTB_MAX_DESC_SIZE) 1038 nb_desc = n; 1039 else 1040 rte_exit(EXIT_FAILURE, "nb-desc must be within" 1041 " [%u, %u].\n", NTB_MIN_DESC_SIZE, 1042 NTB_MAX_DESC_SIZE); 1043 break; 1044 case OPT_TXFREET_NUM: 1045 n = atoi(optarg); 1046 if (n >= 0) 1047 tx_free_thresh = n; 1048 else 1049 rte_exit(EXIT_FAILURE, "txfreet must be" 1050 " >= 0\n"); 1051 break; 1052 case OPT_BURST_NUM: 1053 n = atoi(optarg); 1054 if (n > 0 && n <= NTB_MAX_PKT_BURST) 1055 pkt_burst = n; 1056 else 1057 rte_exit(EXIT_FAILURE, "burst must be within " 1058 "(0, %u].\n", NTB_MAX_PKT_BURST); 1059 break; 1060 1061 default: 1062 ntb_usage(prgname); 1063 rte_exit(EXIT_FAILURE, 1064 "Command line is incomplete or incorrect.\n"); 1065 break; 1066 } 1067 } 1068 } 1069 1070 static void 1071 ntb_mempool_mz_free(__rte_unused struct rte_mempool_memhdr *memhdr, 1072 void *opaque) 1073 { 1074 const struct rte_memzone *mz = opaque; 1075 rte_memzone_free(mz); 1076 } 1077 1078 static struct rte_mempool * 1079 ntb_mbuf_pool_create(uint16_t mbuf_seg_size, uint32_t nb_mbuf, 1080 struct ntb_dev_info ntb_info, 1081 struct ntb_dev_config *ntb_conf, 1082 unsigned int socket_id) 1083 { 1084 size_t mz_len, total_elt_sz, max_mz_len, left_sz; 1085 struct rte_pktmbuf_pool_private mbp_priv; 1086 char pool_name[RTE_MEMPOOL_NAMESIZE]; 1087 char mz_name[RTE_MEMZONE_NAMESIZE]; 1088 const struct rte_memzone *mz; 1089 struct rte_mempool *mp; 1090 uint64_t align; 1091 uint32_t mz_id; 1092 int ret; 1093 1094 snprintf(pool_name, sizeof(pool_name), "ntb_mbuf_pool_%u", socket_id); 1095 mp = rte_mempool_create_empty(pool_name, nb_mbuf, 1096 (mbuf_seg_size + sizeof(struct rte_mbuf)), 1097 MEMPOOL_CACHE_SIZE, 1098 sizeof(struct rte_pktmbuf_pool_private), 1099 socket_id, 0); 1100 if (mp == NULL) 1101 return NULL; 1102 1103 if (rte_mempool_set_ops_byname(mp, rte_mbuf_best_mempool_ops(), NULL)) { 1104 printf("error setting mempool handler\n"); 1105 goto fail; 1106 } 1107 1108 memset(&mbp_priv, 0, sizeof(mbp_priv)); 1109 mbp_priv.mbuf_data_room_size = mbuf_seg_size; 1110 mbp_priv.mbuf_priv_size = 0; 1111 rte_pktmbuf_pool_init(mp, &mbp_priv); 1112 1113 ntb_conf->mz_list = rte_zmalloc("ntb_memzone_list", 1114 sizeof(struct rte_memzone *) * 1115 ntb_info.mw_cnt, 0); 1116 if (ntb_conf->mz_list == NULL) 1117 goto fail; 1118 1119 /* Put ntb header on mw0. */ 1120 if (ntb_info.mw_size[0] < ntb_info.ntb_hdr_size) { 1121 printf("mw0 (size: %" PRIu64 ") is not enough for ntb hdr" 1122 " (size: %u)\n", ntb_info.mw_size[0], 1123 ntb_info.ntb_hdr_size); 1124 goto fail; 1125 } 1126 1127 total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size; 1128 left_sz = total_elt_sz * nb_mbuf; 1129 for (mz_id = 0; mz_id < ntb_info.mw_cnt; mz_id++) { 1130 /* If populated mbuf is enough, no need to reserve extra mz. */ 1131 if (!left_sz) 1132 break; 1133 snprintf(mz_name, sizeof(mz_name), "ntb_mw_%d", mz_id); 1134 align = ntb_info.mw_size_align ? ntb_info.mw_size[mz_id] : 1135 RTE_CACHE_LINE_SIZE; 1136 /* Reserve ntb header space on memzone 0. */ 1137 max_mz_len = mz_id ? ntb_info.mw_size[mz_id] : 1138 ntb_info.mw_size[mz_id] - ntb_info.ntb_hdr_size; 1139 mz_len = left_sz <= max_mz_len ? left_sz : 1140 (max_mz_len / total_elt_sz * total_elt_sz); 1141 if (!mz_len) 1142 continue; 1143 mz = rte_memzone_reserve_aligned(mz_name, mz_len, socket_id, 1144 RTE_MEMZONE_IOVA_CONTIG, align); 1145 if (mz == NULL) { 1146 printf("Cannot allocate %" PRIu64 " aligned memzone" 1147 " %u\n", align, mz_id); 1148 goto fail; 1149 } 1150 left_sz -= mz_len; 1151 1152 /* Reserve ntb header space on memzone 0. */ 1153 if (mz_id) 1154 ret = rte_mempool_populate_iova(mp, mz->addr, mz->iova, 1155 mz->len, ntb_mempool_mz_free, 1156 (void *)(uintptr_t)mz); 1157 else 1158 ret = rte_mempool_populate_iova(mp, 1159 (void *)((size_t)mz->addr + 1160 ntb_info.ntb_hdr_size), 1161 mz->iova + ntb_info.ntb_hdr_size, 1162 mz->len - ntb_info.ntb_hdr_size, 1163 ntb_mempool_mz_free, 1164 (void *)(uintptr_t)mz); 1165 if (ret <= 0) { 1166 rte_memzone_free(mz); 1167 rte_mempool_free(mp); 1168 return NULL; 1169 } 1170 1171 ntb_conf->mz_list[mz_id] = mz; 1172 } 1173 if (left_sz) { 1174 printf("mw space is not enough for mempool.\n"); 1175 goto fail; 1176 } 1177 1178 ntb_conf->mz_num = mz_id; 1179 rte_mempool_obj_iter(mp, rte_pktmbuf_init, NULL); 1180 1181 return mp; 1182 fail: 1183 rte_mempool_free(mp); 1184 return NULL; 1185 } 1186 1187 int 1188 main(int argc, char **argv) 1189 { 1190 struct rte_eth_conf eth_pconf = eth_port_conf; 1191 struct rte_rawdev_info ntb_rawdev_conf; 1192 struct rte_rawdev_info ntb_rawdev_info; 1193 struct rte_eth_dev_info ethdev_info; 1194 struct rte_eth_rxconf eth_rx_conf; 1195 struct rte_eth_txconf eth_tx_conf; 1196 struct ntb_queue_conf ntb_q_conf; 1197 struct ntb_dev_config ntb_conf; 1198 struct ntb_dev_info ntb_info; 1199 uint64_t ntb_link_status; 1200 uint32_t nb_mbuf; 1201 int ret, i; 1202 1203 signal(SIGINT, signal_handler); 1204 signal(SIGTERM, signal_handler); 1205 1206 ret = rte_eal_init(argc, argv); 1207 if (ret < 0) 1208 rte_exit(EXIT_FAILURE, "Error with EAL initialization.\n"); 1209 1210 if (rte_lcore_count() < 2) 1211 rte_exit(EXIT_FAILURE, "Need at least 2 cores\n"); 1212 1213 /* Find 1st ntb rawdev. */ 1214 for (i = 0; i < RTE_RAWDEV_MAX_DEVS; i++) 1215 if (rte_rawdevs[i].driver_name && 1216 (strncmp(rte_rawdevs[i].driver_name, "raw_ntb", 1217 NTB_DRV_NAME_LEN) == 0) && (rte_rawdevs[i].attached == 1)) 1218 break; 1219 1220 if (i == RTE_RAWDEV_MAX_DEVS) 1221 rte_exit(EXIT_FAILURE, "Cannot find any ntb device.\n"); 1222 1223 dev_id = i; 1224 1225 argc -= ret; 1226 argv += ret; 1227 1228 ntb_parse_args(argc, argv); 1229 1230 rte_rawdev_set_attr(dev_id, NTB_QUEUE_SZ_NAME, nb_desc); 1231 printf("Set queue size as %u.\n", nb_desc); 1232 rte_rawdev_set_attr(dev_id, NTB_QUEUE_NUM_NAME, num_queues); 1233 printf("Set queue number as %u.\n", num_queues); 1234 ntb_rawdev_info.dev_private = (rte_rawdev_obj_t)(&ntb_info); 1235 rte_rawdev_info_get(dev_id, &ntb_rawdev_info, sizeof(ntb_info)); 1236 1237 nb_mbuf = nb_desc * num_queues * 2 * 2 + rte_lcore_count() * 1238 MEMPOOL_CACHE_SIZE; 1239 mbuf_pool = ntb_mbuf_pool_create(ntb_buf_size, nb_mbuf, ntb_info, 1240 &ntb_conf, rte_socket_id()); 1241 if (mbuf_pool == NULL) 1242 rte_exit(EXIT_FAILURE, "Cannot create mbuf pool.\n"); 1243 1244 ntb_conf.num_queues = num_queues; 1245 ntb_conf.queue_size = nb_desc; 1246 ntb_rawdev_conf.dev_private = (rte_rawdev_obj_t)(&ntb_conf); 1247 ret = rte_rawdev_configure(dev_id, &ntb_rawdev_conf, sizeof(ntb_conf)); 1248 if (ret) 1249 rte_exit(EXIT_FAILURE, "Can't config ntb dev: err=%d, " 1250 "port=%u\n", ret, dev_id); 1251 1252 ntb_q_conf.tx_free_thresh = tx_free_thresh; 1253 ntb_q_conf.nb_desc = nb_desc; 1254 ntb_q_conf.rx_mp = mbuf_pool; 1255 for (i = 0; i < num_queues; i++) { 1256 /* Setup rawdev queue */ 1257 ret = rte_rawdev_queue_setup(dev_id, i, &ntb_q_conf, 1258 sizeof(ntb_q_conf)); 1259 if (ret < 0) 1260 rte_exit(EXIT_FAILURE, 1261 "Failed to setup ntb queue %u.\n", i); 1262 } 1263 1264 /* Waiting for peer dev up at most 100s.*/ 1265 printf("Checking ntb link status...\n"); 1266 for (i = 0; i < 1000; i++) { 1267 rte_rawdev_get_attr(dev_id, NTB_LINK_STATUS_NAME, 1268 &ntb_link_status); 1269 if (ntb_link_status) { 1270 printf("Peer dev ready, ntb link up.\n"); 1271 break; 1272 } 1273 rte_delay_ms(100); 1274 } 1275 rte_rawdev_get_attr(dev_id, NTB_LINK_STATUS_NAME, &ntb_link_status); 1276 if (ntb_link_status == 0) 1277 printf("Expire 100s. Link is not up. Please restart app.\n"); 1278 1279 ret = rte_rawdev_start(dev_id); 1280 if (ret < 0) 1281 rte_exit(EXIT_FAILURE, "rte_rawdev_start: err=%d, port=%u\n", 1282 ret, dev_id); 1283 1284 /* Find 1st ethdev */ 1285 eth_port_id = rte_eth_find_next(0); 1286 1287 if (eth_port_id < RTE_MAX_ETHPORTS) { 1288 ret = rte_eth_dev_info_get(eth_port_id, ðdev_info); 1289 if (ret) 1290 rte_exit(EXIT_FAILURE, "Can't get info for port %u\n", eth_port_id); 1291 1292 eth_pconf.rx_adv_conf.rss_conf.rss_hf &= 1293 ethdev_info.flow_type_rss_offloads; 1294 ret = rte_eth_dev_configure(eth_port_id, num_queues, 1295 num_queues, ð_pconf); 1296 if (ret) 1297 rte_exit(EXIT_FAILURE, "Can't config ethdev: err=%d, " 1298 "port=%u\n", ret, eth_port_id); 1299 eth_rx_conf = ethdev_info.default_rxconf; 1300 eth_rx_conf.offloads = eth_pconf.rxmode.offloads; 1301 eth_tx_conf = ethdev_info.default_txconf; 1302 eth_tx_conf.offloads = eth_pconf.txmode.offloads; 1303 1304 /* Setup ethdev queue if ethdev exists */ 1305 for (i = 0; i < num_queues; i++) { 1306 ret = rte_eth_rx_queue_setup(eth_port_id, i, nb_desc, 1307 rte_eth_dev_socket_id(eth_port_id), 1308 ð_rx_conf, mbuf_pool); 1309 if (ret < 0) 1310 rte_exit(EXIT_FAILURE, 1311 "Failed to setup eth rxq %u.\n", i); 1312 ret = rte_eth_tx_queue_setup(eth_port_id, i, nb_desc, 1313 rte_eth_dev_socket_id(eth_port_id), 1314 ð_tx_conf); 1315 if (ret < 0) 1316 rte_exit(EXIT_FAILURE, 1317 "Failed to setup eth txq %u.\n", i); 1318 } 1319 1320 ret = rte_eth_dev_start(eth_port_id); 1321 if (ret < 0) 1322 rte_exit(EXIT_FAILURE, "rte_eth_dev_start: err=%d, " 1323 "port=%u\n", ret, eth_port_id); 1324 } 1325 1326 /* initialize port stats */ 1327 memset(&ntb_port_stats, 0, sizeof(ntb_port_stats)); 1328 1329 /* Set default fwd mode if user doesn't set it. */ 1330 if (fwd_mode == MAX_FWD_MODE && eth_port_id < RTE_MAX_ETHPORTS) { 1331 printf("Set default fwd mode as iofwd.\n"); 1332 fwd_mode = IOFWD; 1333 } 1334 if (fwd_mode == MAX_FWD_MODE) { 1335 printf("Set default fwd mode as file-trans.\n"); 1336 fwd_mode = FILE_TRANS; 1337 } 1338 1339 if (interactive) { 1340 sleep(1); 1341 prompt(); 1342 } else { 1343 start_pkt_fwd(); 1344 } 1345 1346 /* clean up the EAL */ 1347 rte_eal_cleanup(); 1348 1349 return 0; 1350 } 1351