1 /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) 2 * Copyright(c) 2018-2019 Pensando Systems, Inc. All rights reserved. 3 */ 4 5 #include <rte_malloc.h> 6 #include <ethdev_driver.h> 7 8 #include "ionic.h" 9 #include "ionic_logs.h" 10 #include "ionic_lif.h" 11 #include "ionic_ethdev.h" 12 #include "ionic_rx_filter.h" 13 #include "ionic_rxtx.h" 14 15 /* queuetype support level */ 16 static const uint8_t ionic_qtype_vers[IONIC_QTYPE_MAX] = { 17 [IONIC_QTYPE_ADMINQ] = 0, /* 0 = Base version with CQ support */ 18 [IONIC_QTYPE_NOTIFYQ] = 0, /* 0 = Base version */ 19 [IONIC_QTYPE_RXQ] = 2, /* 0 = Base version with CQ+SG support 20 * 1 = ... with EQ 21 * 2 = ... with CMB 22 */ 23 [IONIC_QTYPE_TXQ] = 3, /* 0 = Base version with CQ+SG support 24 * 1 = ... with Tx SG version 1 25 * 2 = ... with EQ 26 * 3 = ... with CMB 27 */ 28 }; 29 30 static int ionic_lif_addr_add(struct ionic_lif *lif, const uint8_t *addr); 31 static int ionic_lif_addr_del(struct ionic_lif *lif, const uint8_t *addr); 32 33 int 34 ionic_qcq_enable(struct ionic_qcq *qcq) 35 { 36 struct ionic_queue *q = &qcq->q; 37 struct ionic_lif *lif = qcq->lif; 38 struct ionic_admin_ctx ctx = { 39 .pending_work = true, 40 .cmd.q_control = { 41 .opcode = IONIC_CMD_Q_CONTROL, 42 .type = q->type, 43 .index = rte_cpu_to_le_32(q->index), 44 .oper = IONIC_Q_ENABLE, 45 }, 46 }; 47 48 return ionic_adminq_post_wait(lif, &ctx); 49 } 50 51 int 52 ionic_qcq_disable(struct ionic_qcq *qcq) 53 { 54 struct ionic_queue *q = &qcq->q; 55 struct ionic_lif *lif = qcq->lif; 56 struct ionic_admin_ctx ctx = { 57 .pending_work = true, 58 .cmd.q_control = { 59 .opcode = IONIC_CMD_Q_CONTROL, 60 .type = q->type, 61 .index = rte_cpu_to_le_32(q->index), 62 .oper = IONIC_Q_DISABLE, 63 }, 64 }; 65 66 return ionic_adminq_post_wait(lif, &ctx); 67 } 68 69 void 70 ionic_lif_stop(struct ionic_lif *lif) 71 { 72 uint32_t i; 73 74 IONIC_PRINT_CALL(); 75 76 lif->state &= ~IONIC_LIF_F_UP; 77 78 for (i = 0; i < lif->nrxqcqs; i++) { 79 struct ionic_rx_qcq *rxq = lif->rxqcqs[i]; 80 if (rxq->flags & IONIC_QCQ_F_INITED) 81 (void)ionic_dev_rx_queue_stop(lif->eth_dev, i); 82 } 83 84 for (i = 0; i < lif->ntxqcqs; i++) { 85 struct ionic_tx_qcq *txq = lif->txqcqs[i]; 86 if (txq->flags & IONIC_QCQ_F_INITED) 87 (void)ionic_dev_tx_queue_stop(lif->eth_dev, i); 88 } 89 } 90 91 void 92 ionic_lif_reset(struct ionic_lif *lif) 93 { 94 struct ionic_dev *idev = &lif->adapter->idev; 95 int err; 96 97 IONIC_PRINT_CALL(); 98 99 ionic_dev_cmd_lif_reset(idev); 100 err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT); 101 if (err) 102 IONIC_PRINT(WARNING, "Failed to reset %s", lif->name); 103 } 104 105 static void 106 ionic_lif_get_abs_stats(const struct ionic_lif *lif, struct rte_eth_stats *stats) 107 { 108 struct ionic_lif_stats *ls = &lif->info->stats; 109 uint32_t i; 110 uint32_t num_rx_q_counters = RTE_MIN(lif->nrxqcqs, (uint32_t) 111 RTE_ETHDEV_QUEUE_STAT_CNTRS); 112 uint32_t num_tx_q_counters = RTE_MIN(lif->ntxqcqs, (uint32_t) 113 RTE_ETHDEV_QUEUE_STAT_CNTRS); 114 115 memset(stats, 0, sizeof(*stats)); 116 117 if (ls == NULL) { 118 IONIC_PRINT(DEBUG, "Stats on port %u not yet initialized", 119 lif->port_id); 120 return; 121 } 122 123 /* RX */ 124 125 stats->ipackets = ls->rx_ucast_packets + 126 ls->rx_mcast_packets + 127 ls->rx_bcast_packets; 128 129 stats->ibytes = ls->rx_ucast_bytes + 130 ls->rx_mcast_bytes + 131 ls->rx_bcast_bytes; 132 133 for (i = 0; i < lif->nrxqcqs; i++) { 134 struct ionic_rx_stats *rx_stats = &lif->rxqcqs[i]->stats; 135 stats->imissed += 136 rx_stats->no_cb_arg + 137 rx_stats->bad_cq_status + 138 rx_stats->no_room + 139 rx_stats->bad_len; 140 } 141 142 stats->imissed += 143 ls->rx_ucast_drop_packets + 144 ls->rx_mcast_drop_packets + 145 ls->rx_bcast_drop_packets; 146 147 stats->imissed += 148 ls->rx_queue_empty + 149 ls->rx_dma_error + 150 ls->rx_queue_disabled + 151 ls->rx_desc_fetch_error + 152 ls->rx_desc_data_error; 153 154 for (i = 0; i < num_rx_q_counters; i++) { 155 struct ionic_rx_stats *rx_stats = &lif->rxqcqs[i]->stats; 156 stats->q_ipackets[i] = rx_stats->packets; 157 stats->q_ibytes[i] = rx_stats->bytes; 158 stats->q_errors[i] = 159 rx_stats->no_cb_arg + 160 rx_stats->bad_cq_status + 161 rx_stats->no_room + 162 rx_stats->bad_len; 163 } 164 165 /* TX */ 166 167 stats->opackets = ls->tx_ucast_packets + 168 ls->tx_mcast_packets + 169 ls->tx_bcast_packets; 170 171 stats->obytes = ls->tx_ucast_bytes + 172 ls->tx_mcast_bytes + 173 ls->tx_bcast_bytes; 174 175 for (i = 0; i < lif->ntxqcqs; i++) { 176 struct ionic_tx_stats *tx_stats = &lif->txqcqs[i]->stats; 177 stats->oerrors += tx_stats->drop; 178 } 179 180 stats->oerrors += 181 ls->tx_ucast_drop_packets + 182 ls->tx_mcast_drop_packets + 183 ls->tx_bcast_drop_packets; 184 185 stats->oerrors += 186 ls->tx_dma_error + 187 ls->tx_queue_disabled + 188 ls->tx_desc_fetch_error + 189 ls->tx_desc_data_error; 190 191 for (i = 0; i < num_tx_q_counters; i++) { 192 struct ionic_tx_stats *tx_stats = &lif->txqcqs[i]->stats; 193 stats->q_opackets[i] = tx_stats->packets; 194 stats->q_obytes[i] = tx_stats->bytes; 195 } 196 } 197 198 void 199 ionic_lif_get_stats(const struct ionic_lif *lif, 200 struct rte_eth_stats *stats) 201 { 202 ionic_lif_get_abs_stats(lif, stats); 203 204 stats->ipackets -= lif->stats_base.ipackets; 205 stats->opackets -= lif->stats_base.opackets; 206 stats->ibytes -= lif->stats_base.ibytes; 207 stats->obytes -= lif->stats_base.obytes; 208 stats->imissed -= lif->stats_base.imissed; 209 stats->ierrors -= lif->stats_base.ierrors; 210 stats->oerrors -= lif->stats_base.oerrors; 211 stats->rx_nombuf -= lif->stats_base.rx_nombuf; 212 } 213 214 void 215 ionic_lif_reset_stats(struct ionic_lif *lif) 216 { 217 uint32_t i; 218 219 for (i = 0; i < lif->nrxqcqs; i++) { 220 memset(&lif->rxqcqs[i]->stats, 0, 221 sizeof(struct ionic_rx_stats)); 222 memset(&lif->txqcqs[i]->stats, 0, 223 sizeof(struct ionic_tx_stats)); 224 } 225 226 ionic_lif_get_abs_stats(lif, &lif->stats_base); 227 } 228 229 void 230 ionic_lif_get_hw_stats(struct ionic_lif *lif, struct ionic_lif_stats *stats) 231 { 232 uint16_t i, count = sizeof(struct ionic_lif_stats) / sizeof(uint64_t); 233 uint64_t *stats64 = (uint64_t *)stats; 234 uint64_t *lif_stats64 = (uint64_t *)&lif->info->stats; 235 uint64_t *lif_stats64_base = (uint64_t *)&lif->lif_stats_base; 236 237 for (i = 0; i < count; i++) 238 stats64[i] = lif_stats64[i] - lif_stats64_base[i]; 239 } 240 241 void 242 ionic_lif_reset_hw_stats(struct ionic_lif *lif) 243 { 244 uint16_t i, count = sizeof(struct ionic_lif_stats) / sizeof(uint64_t); 245 uint64_t *lif_stats64 = (uint64_t *)&lif->info->stats; 246 uint64_t *lif_stats64_base = (uint64_t *)&lif->lif_stats_base; 247 248 for (i = 0; i < count; i++) 249 lif_stats64_base[i] = lif_stats64[i]; 250 } 251 252 static int 253 ionic_lif_addr_add(struct ionic_lif *lif, const uint8_t *addr) 254 { 255 struct ionic_admin_ctx ctx = { 256 .pending_work = true, 257 .cmd.rx_filter_add = { 258 .opcode = IONIC_CMD_RX_FILTER_ADD, 259 .match = rte_cpu_to_le_16(IONIC_RX_FILTER_MATCH_MAC), 260 }, 261 }; 262 int err; 263 264 memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, RTE_ETHER_ADDR_LEN); 265 266 err = ionic_adminq_post_wait(lif, &ctx); 267 if (err) 268 return err; 269 270 IONIC_PRINT(INFO, "rx_filter add (id %d)", 271 rte_le_to_cpu_32(ctx.comp.rx_filter_add.filter_id)); 272 273 return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, &ctx); 274 } 275 276 static int 277 ionic_lif_addr_del(struct ionic_lif *lif, const uint8_t *addr) 278 { 279 struct ionic_admin_ctx ctx = { 280 .pending_work = true, 281 .cmd.rx_filter_del = { 282 .opcode = IONIC_CMD_RX_FILTER_DEL, 283 }, 284 }; 285 struct ionic_rx_filter *f; 286 int err; 287 288 IONIC_PRINT_CALL(); 289 290 rte_spinlock_lock(&lif->rx_filters.lock); 291 292 f = ionic_rx_filter_by_addr(lif, addr); 293 if (!f) { 294 rte_spinlock_unlock(&lif->rx_filters.lock); 295 return -ENOENT; 296 } 297 298 ctx.cmd.rx_filter_del.filter_id = rte_cpu_to_le_32(f->filter_id); 299 ionic_rx_filter_free(f); 300 301 rte_spinlock_unlock(&lif->rx_filters.lock); 302 303 err = ionic_adminq_post_wait(lif, &ctx); 304 if (err) 305 return err; 306 307 IONIC_PRINT(INFO, "rx_filter del (id %d)", 308 rte_le_to_cpu_32(ctx.cmd.rx_filter_del.filter_id)); 309 310 return 0; 311 } 312 313 int 314 ionic_dev_add_mac(struct rte_eth_dev *eth_dev, 315 struct rte_ether_addr *mac_addr, 316 uint32_t index __rte_unused, uint32_t pool __rte_unused) 317 { 318 struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); 319 320 IONIC_PRINT_CALL(); 321 322 return ionic_lif_addr_add(lif, (const uint8_t *)mac_addr); 323 } 324 325 void 326 ionic_dev_remove_mac(struct rte_eth_dev *eth_dev, uint32_t index) 327 { 328 struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); 329 struct ionic_adapter *adapter = lif->adapter; 330 struct rte_ether_addr *mac_addr; 331 332 IONIC_PRINT_CALL(); 333 334 if (index >= adapter->max_mac_addrs) { 335 IONIC_PRINT(WARNING, 336 "Index %u is above MAC filter limit %u", 337 index, adapter->max_mac_addrs); 338 return; 339 } 340 341 mac_addr = ð_dev->data->mac_addrs[index]; 342 343 if (!rte_is_valid_assigned_ether_addr(mac_addr)) 344 return; 345 346 ionic_lif_addr_del(lif, (const uint8_t *)mac_addr); 347 } 348 349 int 350 ionic_dev_set_mac(struct rte_eth_dev *eth_dev, struct rte_ether_addr *mac_addr) 351 { 352 struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); 353 354 IONIC_PRINT_CALL(); 355 356 if (mac_addr == NULL) { 357 IONIC_PRINT(NOTICE, "New mac is null"); 358 return -1; 359 } 360 361 if (!rte_is_zero_ether_addr((struct rte_ether_addr *)lif->mac_addr)) { 362 IONIC_PRINT(INFO, "Deleting mac addr %pM", 363 lif->mac_addr); 364 ionic_lif_addr_del(lif, lif->mac_addr); 365 memset(lif->mac_addr, 0, RTE_ETHER_ADDR_LEN); 366 } 367 368 IONIC_PRINT(INFO, "Updating mac addr"); 369 370 rte_ether_addr_copy(mac_addr, (struct rte_ether_addr *)lif->mac_addr); 371 372 return ionic_lif_addr_add(lif, (const uint8_t *)mac_addr); 373 } 374 375 static int 376 ionic_vlan_rx_add_vid(struct ionic_lif *lif, uint16_t vid) 377 { 378 struct ionic_admin_ctx ctx = { 379 .pending_work = true, 380 .cmd.rx_filter_add = { 381 .opcode = IONIC_CMD_RX_FILTER_ADD, 382 .match = rte_cpu_to_le_16(IONIC_RX_FILTER_MATCH_VLAN), 383 .vlan.vlan = rte_cpu_to_le_16(vid), 384 }, 385 }; 386 int err; 387 388 err = ionic_adminq_post_wait(lif, &ctx); 389 if (err) 390 return err; 391 392 IONIC_PRINT(INFO, "rx_filter add VLAN %d (id %d)", vid, 393 rte_le_to_cpu_32(ctx.comp.rx_filter_add.filter_id)); 394 395 return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, &ctx); 396 } 397 398 static int 399 ionic_vlan_rx_kill_vid(struct ionic_lif *lif, uint16_t vid) 400 { 401 struct ionic_admin_ctx ctx = { 402 .pending_work = true, 403 .cmd.rx_filter_del = { 404 .opcode = IONIC_CMD_RX_FILTER_DEL, 405 }, 406 }; 407 struct ionic_rx_filter *f; 408 int err; 409 410 IONIC_PRINT_CALL(); 411 412 rte_spinlock_lock(&lif->rx_filters.lock); 413 414 f = ionic_rx_filter_by_vlan(lif, vid); 415 if (!f) { 416 rte_spinlock_unlock(&lif->rx_filters.lock); 417 return -ENOENT; 418 } 419 420 ctx.cmd.rx_filter_del.filter_id = rte_cpu_to_le_32(f->filter_id); 421 ionic_rx_filter_free(f); 422 rte_spinlock_unlock(&lif->rx_filters.lock); 423 424 err = ionic_adminq_post_wait(lif, &ctx); 425 if (err) 426 return err; 427 428 IONIC_PRINT(INFO, "rx_filter del VLAN %d (id %d)", vid, 429 rte_le_to_cpu_32(ctx.cmd.rx_filter_del.filter_id)); 430 431 return 0; 432 } 433 434 int 435 ionic_dev_vlan_filter_set(struct rte_eth_dev *eth_dev, uint16_t vlan_id, 436 int on) 437 { 438 struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); 439 int err; 440 441 if (on) 442 err = ionic_vlan_rx_add_vid(lif, vlan_id); 443 else 444 err = ionic_vlan_rx_kill_vid(lif, vlan_id); 445 446 return err; 447 } 448 449 static void 450 ionic_lif_rx_mode(struct ionic_lif *lif, uint32_t rx_mode) 451 { 452 struct ionic_admin_ctx ctx = { 453 .pending_work = true, 454 .cmd.rx_mode_set = { 455 .opcode = IONIC_CMD_RX_MODE_SET, 456 .rx_mode = rte_cpu_to_le_16(rx_mode), 457 }, 458 }; 459 int err; 460 461 if (rx_mode & IONIC_RX_MODE_F_UNICAST) 462 IONIC_PRINT(DEBUG, "rx_mode IONIC_RX_MODE_F_UNICAST"); 463 if (rx_mode & IONIC_RX_MODE_F_MULTICAST) 464 IONIC_PRINT(DEBUG, "rx_mode IONIC_RX_MODE_F_MULTICAST"); 465 if (rx_mode & IONIC_RX_MODE_F_BROADCAST) 466 IONIC_PRINT(DEBUG, "rx_mode IONIC_RX_MODE_F_BROADCAST"); 467 if (rx_mode & IONIC_RX_MODE_F_PROMISC) 468 IONIC_PRINT(DEBUG, "rx_mode IONIC_RX_MODE_F_PROMISC"); 469 if (rx_mode & IONIC_RX_MODE_F_ALLMULTI) 470 IONIC_PRINT(DEBUG, "rx_mode IONIC_RX_MODE_F_ALLMULTI"); 471 472 err = ionic_adminq_post_wait(lif, &ctx); 473 if (err) 474 IONIC_PRINT(ERR, "Failure setting RX mode"); 475 } 476 477 static void 478 ionic_set_rx_mode(struct ionic_lif *lif, uint32_t rx_mode) 479 { 480 if (lif->rx_mode != rx_mode) { 481 lif->rx_mode = rx_mode; 482 ionic_lif_rx_mode(lif, rx_mode); 483 } 484 } 485 486 int 487 ionic_dev_promiscuous_enable(struct rte_eth_dev *eth_dev) 488 { 489 struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); 490 uint32_t rx_mode = lif->rx_mode; 491 492 IONIC_PRINT_CALL(); 493 494 rx_mode |= IONIC_RX_MODE_F_PROMISC; 495 496 ionic_set_rx_mode(lif, rx_mode); 497 498 return 0; 499 } 500 501 int 502 ionic_dev_promiscuous_disable(struct rte_eth_dev *eth_dev) 503 { 504 struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); 505 uint32_t rx_mode = lif->rx_mode; 506 507 rx_mode &= ~IONIC_RX_MODE_F_PROMISC; 508 509 ionic_set_rx_mode(lif, rx_mode); 510 511 return 0; 512 } 513 514 int 515 ionic_dev_allmulticast_enable(struct rte_eth_dev *eth_dev) 516 { 517 struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); 518 uint32_t rx_mode = lif->rx_mode; 519 520 rx_mode |= IONIC_RX_MODE_F_ALLMULTI; 521 522 ionic_set_rx_mode(lif, rx_mode); 523 524 return 0; 525 } 526 527 int 528 ionic_dev_allmulticast_disable(struct rte_eth_dev *eth_dev) 529 { 530 struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); 531 uint32_t rx_mode = lif->rx_mode; 532 533 rx_mode &= ~IONIC_RX_MODE_F_ALLMULTI; 534 535 ionic_set_rx_mode(lif, rx_mode); 536 537 return 0; 538 } 539 540 int 541 ionic_lif_change_mtu(struct ionic_lif *lif, int new_mtu) 542 { 543 struct ionic_admin_ctx ctx = { 544 .pending_work = true, 545 .cmd.lif_setattr = { 546 .opcode = IONIC_CMD_LIF_SETATTR, 547 .attr = IONIC_LIF_ATTR_MTU, 548 .mtu = rte_cpu_to_le_32(new_mtu), 549 }, 550 }; 551 int err; 552 553 err = ionic_adminq_post_wait(lif, &ctx); 554 if (err) 555 return err; 556 557 return 0; 558 } 559 560 int 561 ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr) 562 { 563 struct ionic_adapter *adapter = lif->adapter; 564 struct ionic_dev *idev = &adapter->idev; 565 unsigned long index; 566 567 /* 568 * Note: interrupt handler is called for index = 0 only 569 * (we use interrupts for the notifyq only anyway, 570 * which has index = 0) 571 */ 572 573 for (index = 0; index < adapter->nintrs; index++) 574 if (!adapter->intrs[index]) 575 break; 576 577 if (index == adapter->nintrs) 578 return -ENOSPC; 579 580 adapter->intrs[index] = true; 581 582 ionic_intr_init(idev, intr, index); 583 584 return 0; 585 } 586 587 static int 588 ionic_qcq_alloc(struct ionic_lif *lif, 589 uint8_t type, 590 size_t struct_size, 591 uint32_t socket_id, 592 uint32_t index, 593 const char *type_name, 594 uint16_t flags, 595 uint16_t num_descs, 596 uint16_t desc_size, 597 uint16_t cq_desc_size, 598 uint16_t sg_desc_size, 599 struct ionic_qcq **qcq) 600 { 601 struct ionic_qcq *new; 602 uint32_t q_size, cq_size, sg_size, total_size; 603 void *q_base, *cq_base, *sg_base; 604 rte_iova_t q_base_pa = 0; 605 rte_iova_t cq_base_pa = 0; 606 rte_iova_t sg_base_pa = 0; 607 int err; 608 609 *qcq = NULL; 610 611 q_size = num_descs * desc_size; 612 cq_size = num_descs * cq_desc_size; 613 sg_size = num_descs * sg_desc_size; 614 615 total_size = RTE_ALIGN(q_size, rte_mem_page_size()) + 616 RTE_ALIGN(cq_size, rte_mem_page_size()); 617 /* 618 * Note: aligning q_size/cq_size is not enough due to cq_base address 619 * aligning as q_base could be not aligned to the page. 620 * Adding rte_mem_page_size(). 621 */ 622 total_size += rte_mem_page_size(); 623 624 if (flags & IONIC_QCQ_F_SG) { 625 total_size += RTE_ALIGN(sg_size, rte_mem_page_size()); 626 total_size += rte_mem_page_size(); 627 } 628 629 new = rte_zmalloc("ionic", struct_size, 0); 630 if (!new) { 631 IONIC_PRINT(ERR, "Cannot allocate queue structure"); 632 return -ENOMEM; 633 } 634 635 new->lif = lif; 636 637 new->q.info = rte_calloc_socket("ionic", 638 num_descs, sizeof(void *), 639 rte_mem_page_size(), socket_id); 640 if (!new->q.info) { 641 IONIC_PRINT(ERR, "Cannot allocate queue info"); 642 err = -ENOMEM; 643 goto err_out_free_qcq; 644 } 645 646 new->q.type = type; 647 648 err = ionic_q_init(&new->q, index, num_descs); 649 if (err) { 650 IONIC_PRINT(ERR, "Queue initialization failed"); 651 goto err_out_free_info; 652 } 653 654 err = ionic_cq_init(&new->cq, num_descs); 655 if (err) { 656 IONIC_PRINT(ERR, "Completion queue initialization failed"); 657 goto err_out_free_info; 658 } 659 660 new->base_z = rte_eth_dma_zone_reserve(lif->eth_dev, 661 type_name, index /* queue_idx */, 662 total_size, IONIC_ALIGN, socket_id); 663 664 if (!new->base_z) { 665 IONIC_PRINT(ERR, "Cannot reserve queue DMA memory"); 666 err = -ENOMEM; 667 goto err_out_free_info; 668 } 669 670 new->base = new->base_z->addr; 671 new->base_pa = new->base_z->iova; 672 673 q_base = new->base; 674 q_base_pa = new->base_pa; 675 676 cq_base = (void *)RTE_ALIGN((uintptr_t)q_base + q_size, 677 rte_mem_page_size()); 678 cq_base_pa = RTE_ALIGN(q_base_pa + q_size, 679 rte_mem_page_size()); 680 681 if (flags & IONIC_QCQ_F_SG) { 682 sg_base = (void *)RTE_ALIGN((uintptr_t)cq_base + cq_size, 683 rte_mem_page_size()); 684 sg_base_pa = RTE_ALIGN(cq_base_pa + cq_size, 685 rte_mem_page_size()); 686 ionic_q_sg_map(&new->q, sg_base, sg_base_pa); 687 } 688 689 IONIC_PRINT(DEBUG, "Q-Base-PA = %#jx CQ-Base-PA = %#jx " 690 "SG-base-PA = %#jx", 691 q_base_pa, cq_base_pa, sg_base_pa); 692 693 ionic_q_map(&new->q, q_base, q_base_pa); 694 ionic_cq_map(&new->cq, cq_base, cq_base_pa); 695 696 *qcq = new; 697 698 return 0; 699 700 err_out_free_info: 701 rte_free(new->q.info); 702 err_out_free_qcq: 703 rte_free(new); 704 705 return err; 706 } 707 708 void 709 ionic_qcq_free(struct ionic_qcq *qcq) 710 { 711 if (qcq->base_z) { 712 qcq->base = NULL; 713 qcq->base_pa = 0; 714 rte_memzone_free(qcq->base_z); 715 qcq->base_z = NULL; 716 } 717 718 if (qcq->q.info) { 719 rte_free(qcq->q.info); 720 qcq->q.info = NULL; 721 } 722 723 rte_free(qcq); 724 } 725 726 int 727 ionic_rx_qcq_alloc(struct ionic_lif *lif, uint32_t socket_id, uint32_t index, 728 uint16_t nrxq_descs, struct ionic_rx_qcq **rxq_out) 729 { 730 struct ionic_rx_qcq *rxq; 731 uint16_t flags; 732 int err; 733 734 flags = IONIC_QCQ_F_SG; 735 err = ionic_qcq_alloc(lif, 736 IONIC_QTYPE_RXQ, 737 sizeof(struct ionic_rx_qcq), 738 socket_id, 739 index, 740 "rx", 741 flags, 742 nrxq_descs, 743 sizeof(struct ionic_rxq_desc), 744 sizeof(struct ionic_rxq_comp), 745 sizeof(struct ionic_rxq_sg_desc), 746 (struct ionic_qcq **)&rxq); 747 if (err) 748 return err; 749 750 rxq->flags = flags; 751 752 lif->rxqcqs[index] = rxq; 753 *rxq_out = rxq; 754 755 return 0; 756 } 757 758 int 759 ionic_tx_qcq_alloc(struct ionic_lif *lif, uint32_t socket_id, uint32_t index, 760 uint16_t ntxq_descs, struct ionic_tx_qcq **txq_out) 761 { 762 struct ionic_tx_qcq *txq; 763 uint16_t flags, num_segs_fw; 764 int err; 765 766 flags = IONIC_QCQ_F_SG; 767 768 num_segs_fw = IONIC_TX_MAX_SG_ELEMS_V1 + 1; 769 770 err = ionic_qcq_alloc(lif, 771 IONIC_QTYPE_TXQ, 772 sizeof(struct ionic_tx_qcq), 773 socket_id, 774 index, 775 "tx", 776 flags, 777 ntxq_descs, 778 sizeof(struct ionic_txq_desc), 779 sizeof(struct ionic_txq_comp), 780 sizeof(struct ionic_txq_sg_desc_v1), 781 (struct ionic_qcq **)&txq); 782 if (err) 783 return err; 784 785 txq->flags = flags; 786 txq->num_segs_fw = num_segs_fw; 787 788 lif->txqcqs[index] = txq; 789 *txq_out = txq; 790 791 return 0; 792 } 793 794 static int 795 ionic_admin_qcq_alloc(struct ionic_lif *lif) 796 { 797 uint16_t flags = 0; 798 int err; 799 800 err = ionic_qcq_alloc(lif, 801 IONIC_QTYPE_ADMINQ, 802 sizeof(struct ionic_admin_qcq), 803 rte_socket_id(), 804 0, 805 "admin", 806 flags, 807 IONIC_ADMINQ_LENGTH, 808 sizeof(struct ionic_admin_cmd), 809 sizeof(struct ionic_admin_comp), 810 0, 811 (struct ionic_qcq **)&lif->adminqcq); 812 if (err) 813 return err; 814 815 return 0; 816 } 817 818 static int 819 ionic_notify_qcq_alloc(struct ionic_lif *lif) 820 { 821 struct ionic_notify_qcq *nqcq; 822 struct ionic_dev *idev = &lif->adapter->idev; 823 uint16_t flags = 0; 824 int err; 825 826 err = ionic_qcq_alloc(lif, 827 IONIC_QTYPE_NOTIFYQ, 828 sizeof(struct ionic_notify_qcq), 829 rte_socket_id(), 830 0, 831 "notify", 832 flags, 833 IONIC_NOTIFYQ_LENGTH, 834 sizeof(struct ionic_notifyq_cmd), 835 sizeof(union ionic_notifyq_comp), 836 0, 837 (struct ionic_qcq **)&nqcq); 838 if (err) 839 return err; 840 841 err = ionic_intr_alloc(lif, &nqcq->intr); 842 if (err) { 843 ionic_qcq_free(&nqcq->qcq); 844 return err; 845 } 846 847 ionic_intr_mask_assert(idev->intr_ctrl, nqcq->intr.index, 848 IONIC_INTR_MASK_SET); 849 850 lif->notifyqcq = nqcq; 851 852 return 0; 853 } 854 855 static void * 856 ionic_bus_map_dbpage(struct ionic_adapter *adapter, int page_num) 857 { 858 char *vaddr = adapter->bars[IONIC_PCI_BAR_DBELL].vaddr; 859 860 if (adapter->num_bars <= IONIC_PCI_BAR_DBELL) 861 return NULL; 862 863 return (void *)&vaddr[page_num << PAGE_SHIFT]; 864 } 865 866 static void 867 ionic_lif_queue_identify(struct ionic_lif *lif) 868 { 869 struct ionic_adapter *adapter = lif->adapter; 870 struct ionic_dev *idev = &adapter->idev; 871 union ionic_q_identity *q_ident = &adapter->ident.txq; 872 uint32_t q_words = RTE_DIM(q_ident->words); 873 uint32_t cmd_words = RTE_DIM(idev->dev_cmd->data); 874 uint32_t i, nwords, qtype; 875 int err; 876 877 for (qtype = 0; qtype < RTE_DIM(ionic_qtype_vers); qtype++) { 878 struct ionic_qtype_info *qti = &lif->qtype_info[qtype]; 879 880 /* Filter out the types this driver knows about */ 881 switch (qtype) { 882 case IONIC_QTYPE_ADMINQ: 883 case IONIC_QTYPE_NOTIFYQ: 884 case IONIC_QTYPE_RXQ: 885 case IONIC_QTYPE_TXQ: 886 break; 887 default: 888 continue; 889 } 890 891 memset(qti, 0, sizeof(*qti)); 892 893 ionic_dev_cmd_queue_identify(idev, IONIC_LIF_TYPE_CLASSIC, 894 qtype, ionic_qtype_vers[qtype]); 895 err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT); 896 if (err == -EINVAL) { 897 IONIC_PRINT(ERR, "qtype %d not supported\n", qtype); 898 continue; 899 } else if (err == -EIO) { 900 IONIC_PRINT(ERR, "q_ident failed, older FW\n"); 901 return; 902 } else if (err) { 903 IONIC_PRINT(ERR, "q_ident failed, qtype %d: %d\n", 904 qtype, err); 905 return; 906 } 907 908 nwords = RTE_MIN(q_words, cmd_words); 909 for (i = 0; i < nwords; i++) 910 q_ident->words[i] = ioread32(&idev->dev_cmd->data[i]); 911 912 qti->version = q_ident->version; 913 qti->supported = q_ident->supported; 914 qti->features = rte_le_to_cpu_64(q_ident->features); 915 qti->desc_sz = rte_le_to_cpu_16(q_ident->desc_sz); 916 qti->comp_sz = rte_le_to_cpu_16(q_ident->comp_sz); 917 qti->sg_desc_sz = rte_le_to_cpu_16(q_ident->sg_desc_sz); 918 qti->max_sg_elems = rte_le_to_cpu_16(q_ident->max_sg_elems); 919 qti->sg_desc_stride = 920 rte_le_to_cpu_16(q_ident->sg_desc_stride); 921 922 IONIC_PRINT(DEBUG, " qtype[%d].version = %d", 923 qtype, qti->version); 924 IONIC_PRINT(DEBUG, " qtype[%d].supported = %#x", 925 qtype, qti->supported); 926 IONIC_PRINT(DEBUG, " qtype[%d].features = %#jx", 927 qtype, qti->features); 928 IONIC_PRINT(DEBUG, " qtype[%d].desc_sz = %d", 929 qtype, qti->desc_sz); 930 IONIC_PRINT(DEBUG, " qtype[%d].comp_sz = %d", 931 qtype, qti->comp_sz); 932 IONIC_PRINT(DEBUG, " qtype[%d].sg_desc_sz = %d", 933 qtype, qti->sg_desc_sz); 934 IONIC_PRINT(DEBUG, " qtype[%d].max_sg_elems = %d", 935 qtype, qti->max_sg_elems); 936 IONIC_PRINT(DEBUG, " qtype[%d].sg_desc_stride = %d", 937 qtype, qti->sg_desc_stride); 938 } 939 } 940 941 int 942 ionic_lif_alloc(struct ionic_lif *lif) 943 { 944 struct ionic_adapter *adapter = lif->adapter; 945 uint32_t socket_id = rte_socket_id(); 946 int err; 947 948 /* 949 * lif->name was zeroed on allocation. 950 * Copy (sizeof() - 1) bytes to ensure that it is NULL terminated. 951 */ 952 memcpy(lif->name, lif->eth_dev->data->name, sizeof(lif->name) - 1); 953 954 IONIC_PRINT(DEBUG, "LIF: %s", lif->name); 955 956 ionic_lif_queue_identify(lif); 957 958 if (lif->qtype_info[IONIC_QTYPE_TXQ].version < 1) { 959 IONIC_PRINT(ERR, "FW too old, please upgrade"); 960 return -ENXIO; 961 } 962 963 IONIC_PRINT(DEBUG, "Allocating Lif Info"); 964 965 rte_spinlock_init(&lif->adminq_lock); 966 rte_spinlock_init(&lif->adminq_service_lock); 967 968 lif->kern_dbpage = ionic_bus_map_dbpage(adapter, 0); 969 if (!lif->kern_dbpage) { 970 IONIC_PRINT(ERR, "Cannot map dbpage, aborting"); 971 return -ENOMEM; 972 } 973 974 lif->txqcqs = rte_zmalloc("ionic", sizeof(*lif->txqcqs) * 975 adapter->max_ntxqs_per_lif, 0); 976 977 if (!lif->txqcqs) { 978 IONIC_PRINT(ERR, "Cannot allocate tx queues array"); 979 return -ENOMEM; 980 } 981 982 lif->rxqcqs = rte_zmalloc("ionic", sizeof(*lif->rxqcqs) * 983 adapter->max_nrxqs_per_lif, 0); 984 985 if (!lif->rxqcqs) { 986 IONIC_PRINT(ERR, "Cannot allocate rx queues array"); 987 return -ENOMEM; 988 } 989 990 IONIC_PRINT(DEBUG, "Allocating Notify Queue"); 991 992 err = ionic_notify_qcq_alloc(lif); 993 if (err) { 994 IONIC_PRINT(ERR, "Cannot allocate notify queue"); 995 return err; 996 } 997 998 IONIC_PRINT(DEBUG, "Allocating Admin Queue"); 999 1000 err = ionic_admin_qcq_alloc(lif); 1001 if (err) { 1002 IONIC_PRINT(ERR, "Cannot allocate admin queue"); 1003 return err; 1004 } 1005 1006 IONIC_PRINT(DEBUG, "Allocating Lif Info"); 1007 1008 lif->info_sz = RTE_ALIGN(sizeof(*lif->info), rte_mem_page_size()); 1009 1010 lif->info_z = rte_eth_dma_zone_reserve(lif->eth_dev, 1011 "lif_info", 0 /* queue_idx*/, 1012 lif->info_sz, IONIC_ALIGN, socket_id); 1013 if (!lif->info_z) { 1014 IONIC_PRINT(ERR, "Cannot allocate lif info memory"); 1015 return -ENOMEM; 1016 } 1017 1018 lif->info = lif->info_z->addr; 1019 lif->info_pa = lif->info_z->iova; 1020 1021 return 0; 1022 } 1023 1024 void 1025 ionic_lif_free(struct ionic_lif *lif) 1026 { 1027 if (lif->notifyqcq) { 1028 ionic_qcq_free(&lif->notifyqcq->qcq); 1029 lif->notifyqcq = NULL; 1030 } 1031 1032 if (lif->adminqcq) { 1033 ionic_qcq_free(&lif->adminqcq->qcq); 1034 lif->adminqcq = NULL; 1035 } 1036 1037 if (lif->txqcqs) { 1038 rte_free(lif->txqcqs); 1039 lif->txqcqs = NULL; 1040 } 1041 1042 if (lif->rxqcqs) { 1043 rte_free(lif->rxqcqs); 1044 lif->rxqcqs = NULL; 1045 } 1046 1047 if (lif->info) { 1048 rte_memzone_free(lif->info_z); 1049 lif->info = NULL; 1050 } 1051 } 1052 1053 void 1054 ionic_lif_free_queues(struct ionic_lif *lif) 1055 { 1056 uint32_t i; 1057 1058 for (i = 0; i < lif->ntxqcqs; i++) { 1059 ionic_dev_tx_queue_release(lif->eth_dev, i); 1060 lif->eth_dev->data->tx_queues[i] = NULL; 1061 } 1062 for (i = 0; i < lif->nrxqcqs; i++) { 1063 ionic_dev_rx_queue_release(lif->eth_dev, i); 1064 lif->eth_dev->data->rx_queues[i] = NULL; 1065 } 1066 } 1067 1068 int 1069 ionic_lif_rss_config(struct ionic_lif *lif, 1070 const uint16_t types, const uint8_t *key, const uint32_t *indir) 1071 { 1072 struct ionic_adapter *adapter = lif->adapter; 1073 struct ionic_admin_ctx ctx = { 1074 .pending_work = true, 1075 .cmd.lif_setattr = { 1076 .opcode = IONIC_CMD_LIF_SETATTR, 1077 .attr = IONIC_LIF_ATTR_RSS, 1078 .rss.types = rte_cpu_to_le_16(types), 1079 .rss.addr = rte_cpu_to_le_64(lif->rss_ind_tbl_pa), 1080 }, 1081 }; 1082 unsigned int i; 1083 uint16_t tbl_sz = 1084 rte_le_to_cpu_16(adapter->ident.lif.eth.rss_ind_tbl_sz); 1085 1086 IONIC_PRINT_CALL(); 1087 1088 lif->rss_types = types; 1089 1090 if (key) 1091 memcpy(lif->rss_hash_key, key, IONIC_RSS_HASH_KEY_SIZE); 1092 1093 if (indir) 1094 for (i = 0; i < tbl_sz; i++) 1095 lif->rss_ind_tbl[i] = indir[i]; 1096 1097 memcpy(ctx.cmd.lif_setattr.rss.key, lif->rss_hash_key, 1098 IONIC_RSS_HASH_KEY_SIZE); 1099 1100 return ionic_adminq_post_wait(lif, &ctx); 1101 } 1102 1103 static int 1104 ionic_lif_rss_setup(struct ionic_lif *lif) 1105 { 1106 struct ionic_adapter *adapter = lif->adapter; 1107 static const uint8_t toeplitz_symmetric_key[] = { 1108 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 1109 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 1110 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 1111 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 1112 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 1113 }; 1114 uint32_t i; 1115 uint16_t tbl_sz = 1116 rte_le_to_cpu_16(adapter->ident.lif.eth.rss_ind_tbl_sz); 1117 1118 IONIC_PRINT_CALL(); 1119 1120 if (!lif->rss_ind_tbl_z) { 1121 lif->rss_ind_tbl_z = rte_eth_dma_zone_reserve(lif->eth_dev, 1122 "rss_ind_tbl", 0 /* queue_idx */, 1123 sizeof(*lif->rss_ind_tbl) * tbl_sz, 1124 IONIC_ALIGN, rte_socket_id()); 1125 if (!lif->rss_ind_tbl_z) { 1126 IONIC_PRINT(ERR, "OOM"); 1127 return -ENOMEM; 1128 } 1129 1130 lif->rss_ind_tbl = lif->rss_ind_tbl_z->addr; 1131 lif->rss_ind_tbl_pa = lif->rss_ind_tbl_z->iova; 1132 } 1133 1134 if (lif->rss_ind_tbl_nrxqcqs != lif->nrxqcqs) { 1135 lif->rss_ind_tbl_nrxqcqs = lif->nrxqcqs; 1136 1137 /* Fill indirection table with 'default' values */ 1138 for (i = 0; i < tbl_sz; i++) 1139 lif->rss_ind_tbl[i] = i % lif->nrxqcqs; 1140 } 1141 1142 return ionic_lif_rss_config(lif, IONIC_RSS_OFFLOAD_ALL, 1143 toeplitz_symmetric_key, NULL); 1144 } 1145 1146 static void 1147 ionic_lif_rss_teardown(struct ionic_lif *lif) 1148 { 1149 if (!lif->rss_ind_tbl) 1150 return; 1151 1152 if (lif->rss_ind_tbl_z) { 1153 /* Disable RSS on the NIC */ 1154 ionic_lif_rss_config(lif, 0x0, NULL, NULL); 1155 1156 lif->rss_ind_tbl = NULL; 1157 lif->rss_ind_tbl_pa = 0; 1158 rte_memzone_free(lif->rss_ind_tbl_z); 1159 lif->rss_ind_tbl_z = NULL; 1160 } 1161 } 1162 1163 void 1164 ionic_lif_txq_deinit(struct ionic_tx_qcq *txq) 1165 { 1166 txq->flags &= ~IONIC_QCQ_F_INITED; 1167 } 1168 1169 void 1170 ionic_lif_rxq_deinit(struct ionic_rx_qcq *rxq) 1171 { 1172 rxq->flags &= ~IONIC_QCQ_F_INITED; 1173 } 1174 1175 static void 1176 ionic_lif_adminq_deinit(struct ionic_lif *lif) 1177 { 1178 lif->adminqcq->flags &= ~IONIC_QCQ_F_INITED; 1179 } 1180 1181 static void 1182 ionic_lif_notifyq_deinit(struct ionic_lif *lif) 1183 { 1184 struct ionic_notify_qcq *nqcq = lif->notifyqcq; 1185 struct ionic_dev *idev = &lif->adapter->idev; 1186 1187 if (!(nqcq->flags & IONIC_QCQ_F_INITED)) 1188 return; 1189 1190 ionic_intr_mask(idev->intr_ctrl, nqcq->intr.index, 1191 IONIC_INTR_MASK_SET); 1192 1193 nqcq->flags &= ~IONIC_QCQ_F_INITED; 1194 } 1195 1196 /* This acts like ionic_napi */ 1197 int 1198 ionic_qcq_service(struct ionic_qcq *qcq, int budget, ionic_cq_cb cb, 1199 void *cb_arg) 1200 { 1201 struct ionic_cq *cq = &qcq->cq; 1202 uint32_t work_done; 1203 1204 work_done = ionic_cq_service(cq, budget, cb, cb_arg); 1205 1206 return work_done; 1207 } 1208 1209 static void 1210 ionic_link_status_check(struct ionic_lif *lif) 1211 { 1212 struct ionic_adapter *adapter = lif->adapter; 1213 bool link_up; 1214 1215 lif->state &= ~IONIC_LIF_F_LINK_CHECK_NEEDED; 1216 1217 if (!lif->info) 1218 return; 1219 1220 link_up = (lif->info->status.link_status == IONIC_PORT_OPER_STATUS_UP); 1221 1222 if ((link_up && adapter->link_up) || 1223 (!link_up && !adapter->link_up)) 1224 return; 1225 1226 if (link_up) { 1227 adapter->link_speed = 1228 rte_le_to_cpu_32(lif->info->status.link_speed); 1229 IONIC_PRINT(DEBUG, "Link up - %d Gbps", 1230 adapter->link_speed); 1231 } else { 1232 IONIC_PRINT(DEBUG, "Link down"); 1233 } 1234 1235 adapter->link_up = link_up; 1236 ionic_dev_link_update(lif->eth_dev, 0); 1237 } 1238 1239 static void 1240 ionic_lif_handle_fw_down(struct ionic_lif *lif) 1241 { 1242 if (lif->state & IONIC_LIF_F_FW_RESET) 1243 return; 1244 1245 lif->state |= IONIC_LIF_F_FW_RESET; 1246 1247 if (lif->state & IONIC_LIF_F_UP) { 1248 IONIC_PRINT(NOTICE, 1249 "Surprise FW stop, stopping %s\n", lif->name); 1250 ionic_lif_stop(lif); 1251 } 1252 1253 IONIC_PRINT(NOTICE, "FW down, %s stopped", lif->name); 1254 } 1255 1256 static bool 1257 ionic_notifyq_cb(struct ionic_cq *cq, uint16_t cq_desc_index, void *cb_arg) 1258 { 1259 union ionic_notifyq_comp *cq_desc_base = cq->base; 1260 union ionic_notifyq_comp *cq_desc = &cq_desc_base[cq_desc_index]; 1261 struct ionic_lif *lif = cb_arg; 1262 1263 IONIC_PRINT(DEBUG, "Notifyq callback eid = %jd ecode = %d", 1264 cq_desc->event.eid, cq_desc->event.ecode); 1265 1266 /* Have we run out of new completions to process? */ 1267 if (!(cq_desc->event.eid > lif->last_eid)) 1268 return false; 1269 1270 lif->last_eid = cq_desc->event.eid; 1271 1272 switch (cq_desc->event.ecode) { 1273 case IONIC_EVENT_LINK_CHANGE: 1274 IONIC_PRINT(DEBUG, 1275 "Notifyq IONIC_EVENT_LINK_CHANGE %s " 1276 "eid=%jd link_status=%d link_speed=%d", 1277 lif->name, 1278 cq_desc->event.eid, 1279 cq_desc->link_change.link_status, 1280 cq_desc->link_change.link_speed); 1281 1282 lif->state |= IONIC_LIF_F_LINK_CHECK_NEEDED; 1283 break; 1284 1285 case IONIC_EVENT_RESET: 1286 IONIC_PRINT(NOTICE, 1287 "Notifyq IONIC_EVENT_RESET %s " 1288 "eid=%jd, reset_code=%d state=%d", 1289 lif->name, 1290 cq_desc->event.eid, 1291 cq_desc->reset.reset_code, 1292 cq_desc->reset.state); 1293 ionic_lif_handle_fw_down(lif); 1294 break; 1295 1296 default: 1297 IONIC_PRINT(WARNING, "Notifyq bad event ecode=%d eid=%jd", 1298 cq_desc->event.ecode, cq_desc->event.eid); 1299 break; 1300 } 1301 1302 return true; 1303 } 1304 1305 int 1306 ionic_notifyq_handler(struct ionic_lif *lif, int budget) 1307 { 1308 struct ionic_dev *idev = &lif->adapter->idev; 1309 struct ionic_notify_qcq *nqcq = lif->notifyqcq; 1310 uint32_t work_done; 1311 1312 if (!(nqcq->flags & IONIC_QCQ_F_INITED)) { 1313 IONIC_PRINT(DEBUG, "Notifyq not yet initialized"); 1314 return -1; 1315 } 1316 1317 ionic_intr_mask(idev->intr_ctrl, nqcq->intr.index, 1318 IONIC_INTR_MASK_SET); 1319 1320 work_done = ionic_qcq_service(&nqcq->qcq, budget, 1321 ionic_notifyq_cb, lif); 1322 1323 if (lif->state & IONIC_LIF_F_LINK_CHECK_NEEDED) 1324 ionic_link_status_check(lif); 1325 1326 ionic_intr_credits(idev->intr_ctrl, nqcq->intr.index, 1327 work_done, IONIC_INTR_CRED_RESET_COALESCE); 1328 1329 ionic_intr_mask(idev->intr_ctrl, nqcq->intr.index, 1330 IONIC_INTR_MASK_CLEAR); 1331 1332 return 0; 1333 } 1334 1335 static int 1336 ionic_lif_adminq_init(struct ionic_lif *lif) 1337 { 1338 struct ionic_dev *idev = &lif->adapter->idev; 1339 struct ionic_admin_qcq *aqcq = lif->adminqcq; 1340 struct ionic_queue *q = &aqcq->qcq.q; 1341 struct ionic_q_init_comp comp; 1342 int err; 1343 1344 ionic_dev_cmd_adminq_init(idev, &aqcq->qcq); 1345 err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT); 1346 if (err) 1347 return err; 1348 1349 ionic_dev_cmd_comp(idev, &comp); 1350 1351 q->hw_type = comp.hw_type; 1352 q->hw_index = rte_le_to_cpu_32(comp.hw_index); 1353 q->db = ionic_db_map(lif, q); 1354 1355 IONIC_PRINT(DEBUG, "adminq->hw_type %d", q->hw_type); 1356 IONIC_PRINT(DEBUG, "adminq->hw_index %d", q->hw_index); 1357 IONIC_PRINT(DEBUG, "adminq->db %p", q->db); 1358 1359 aqcq->flags |= IONIC_QCQ_F_INITED; 1360 1361 return 0; 1362 } 1363 1364 static int 1365 ionic_lif_notifyq_init(struct ionic_lif *lif) 1366 { 1367 struct ionic_dev *idev = &lif->adapter->idev; 1368 struct ionic_notify_qcq *nqcq = lif->notifyqcq; 1369 struct ionic_queue *q = &nqcq->qcq.q; 1370 int err; 1371 1372 struct ionic_admin_ctx ctx = { 1373 .pending_work = true, 1374 .cmd.q_init = { 1375 .opcode = IONIC_CMD_Q_INIT, 1376 .type = q->type, 1377 .ver = lif->qtype_info[q->type].version, 1378 .index = rte_cpu_to_le_32(q->index), 1379 .intr_index = rte_cpu_to_le_16(nqcq->intr.index), 1380 .flags = rte_cpu_to_le_16(IONIC_QINIT_F_IRQ | 1381 IONIC_QINIT_F_ENA), 1382 .ring_size = rte_log2_u32(q->num_descs), 1383 .ring_base = rte_cpu_to_le_64(q->base_pa), 1384 } 1385 }; 1386 1387 IONIC_PRINT(DEBUG, "notifyq_init.index %d", q->index); 1388 IONIC_PRINT(DEBUG, "notifyq_init.ring_base 0x%" PRIx64 "", q->base_pa); 1389 IONIC_PRINT(DEBUG, "notifyq_init.ring_size %d", 1390 ctx.cmd.q_init.ring_size); 1391 IONIC_PRINT(DEBUG, "notifyq_init.ver %u", ctx.cmd.q_init.ver); 1392 1393 err = ionic_adminq_post_wait(lif, &ctx); 1394 if (err) 1395 return err; 1396 1397 q->hw_type = ctx.comp.q_init.hw_type; 1398 q->hw_index = rte_le_to_cpu_32(ctx.comp.q_init.hw_index); 1399 q->db = NULL; 1400 1401 IONIC_PRINT(DEBUG, "notifyq->hw_type %d", q->hw_type); 1402 IONIC_PRINT(DEBUG, "notifyq->hw_index %d", q->hw_index); 1403 IONIC_PRINT(DEBUG, "notifyq->db %p", q->db); 1404 1405 ionic_intr_mask(idev->intr_ctrl, nqcq->intr.index, 1406 IONIC_INTR_MASK_CLEAR); 1407 1408 nqcq->flags |= IONIC_QCQ_F_INITED; 1409 1410 return 0; 1411 } 1412 1413 int 1414 ionic_lif_set_features(struct ionic_lif *lif) 1415 { 1416 struct ionic_admin_ctx ctx = { 1417 .pending_work = true, 1418 .cmd.lif_setattr = { 1419 .opcode = IONIC_CMD_LIF_SETATTR, 1420 .attr = IONIC_LIF_ATTR_FEATURES, 1421 .features = rte_cpu_to_le_64(lif->features), 1422 }, 1423 }; 1424 int err; 1425 1426 err = ionic_adminq_post_wait(lif, &ctx); 1427 if (err) 1428 return err; 1429 1430 lif->hw_features = rte_le_to_cpu_64(ctx.cmd.lif_setattr.features & 1431 ctx.comp.lif_setattr.features); 1432 1433 if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG) 1434 IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_VLAN_TX_TAG"); 1435 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP) 1436 IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_VLAN_RX_STRIP"); 1437 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER) 1438 IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_VLAN_RX_FILTER"); 1439 if (lif->hw_features & IONIC_ETH_HW_RX_HASH) 1440 IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_RX_HASH"); 1441 if (lif->hw_features & IONIC_ETH_HW_TX_SG) 1442 IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TX_SG"); 1443 if (lif->hw_features & IONIC_ETH_HW_RX_SG) 1444 IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_RX_SG"); 1445 if (lif->hw_features & IONIC_ETH_HW_TX_CSUM) 1446 IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TX_CSUM"); 1447 if (lif->hw_features & IONIC_ETH_HW_RX_CSUM) 1448 IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_RX_CSUM"); 1449 if (lif->hw_features & IONIC_ETH_HW_TSO) 1450 IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO"); 1451 if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6) 1452 IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_IPV6"); 1453 if (lif->hw_features & IONIC_ETH_HW_TSO_ECN) 1454 IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_ECN"); 1455 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE) 1456 IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_GRE"); 1457 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM) 1458 IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_GRE_CSUM"); 1459 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4) 1460 IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_IPXIP4"); 1461 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6) 1462 IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_IPXIP6"); 1463 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP) 1464 IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_UDP"); 1465 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM) 1466 IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_UDP_CSUM"); 1467 1468 return 0; 1469 } 1470 1471 int 1472 ionic_lif_txq_init(struct ionic_tx_qcq *txq) 1473 { 1474 struct ionic_qcq *qcq = &txq->qcq; 1475 struct ionic_queue *q = &qcq->q; 1476 struct ionic_lif *lif = qcq->lif; 1477 struct ionic_cq *cq = &qcq->cq; 1478 struct ionic_admin_ctx ctx = { 1479 .pending_work = true, 1480 .cmd.q_init = { 1481 .opcode = IONIC_CMD_Q_INIT, 1482 .type = q->type, 1483 .ver = lif->qtype_info[q->type].version, 1484 .index = rte_cpu_to_le_32(q->index), 1485 .flags = rte_cpu_to_le_16(IONIC_QINIT_F_SG | 1486 IONIC_QINIT_F_ENA), 1487 .intr_index = rte_cpu_to_le_16(IONIC_INTR_NONE), 1488 .ring_size = rte_log2_u32(q->num_descs), 1489 .ring_base = rte_cpu_to_le_64(q->base_pa), 1490 .cq_ring_base = rte_cpu_to_le_64(cq->base_pa), 1491 .sg_ring_base = rte_cpu_to_le_64(q->sg_base_pa), 1492 }, 1493 }; 1494 int err; 1495 1496 IONIC_PRINT(DEBUG, "txq_init.index %d", q->index); 1497 IONIC_PRINT(DEBUG, "txq_init.ring_base 0x%" PRIx64 "", q->base_pa); 1498 IONIC_PRINT(DEBUG, "txq_init.ring_size %d", 1499 ctx.cmd.q_init.ring_size); 1500 IONIC_PRINT(DEBUG, "txq_init.ver %u", ctx.cmd.q_init.ver); 1501 1502 err = ionic_adminq_post_wait(lif, &ctx); 1503 if (err) 1504 return err; 1505 1506 q->hw_type = ctx.comp.q_init.hw_type; 1507 q->hw_index = rte_le_to_cpu_32(ctx.comp.q_init.hw_index); 1508 q->db = ionic_db_map(lif, q); 1509 1510 IONIC_PRINT(DEBUG, "txq->hw_type %d", q->hw_type); 1511 IONIC_PRINT(DEBUG, "txq->hw_index %d", q->hw_index); 1512 IONIC_PRINT(DEBUG, "txq->db %p", q->db); 1513 1514 txq->flags |= IONIC_QCQ_F_INITED; 1515 1516 return 0; 1517 } 1518 1519 int 1520 ionic_lif_rxq_init(struct ionic_rx_qcq *rxq) 1521 { 1522 struct ionic_qcq *qcq = &rxq->qcq; 1523 struct ionic_queue *q = &qcq->q; 1524 struct ionic_lif *lif = qcq->lif; 1525 struct ionic_cq *cq = &qcq->cq; 1526 struct ionic_admin_ctx ctx = { 1527 .pending_work = true, 1528 .cmd.q_init = { 1529 .opcode = IONIC_CMD_Q_INIT, 1530 .type = q->type, 1531 .ver = lif->qtype_info[q->type].version, 1532 .index = rte_cpu_to_le_32(q->index), 1533 .flags = rte_cpu_to_le_16(IONIC_QINIT_F_SG | 1534 IONIC_QINIT_F_ENA), 1535 .intr_index = rte_cpu_to_le_16(IONIC_INTR_NONE), 1536 .ring_size = rte_log2_u32(q->num_descs), 1537 .ring_base = rte_cpu_to_le_64(q->base_pa), 1538 .cq_ring_base = rte_cpu_to_le_64(cq->base_pa), 1539 .sg_ring_base = rte_cpu_to_le_64(q->sg_base_pa), 1540 }, 1541 }; 1542 int err; 1543 1544 IONIC_PRINT(DEBUG, "rxq_init.index %d", q->index); 1545 IONIC_PRINT(DEBUG, "rxq_init.ring_base 0x%" PRIx64 "", q->base_pa); 1546 IONIC_PRINT(DEBUG, "rxq_init.ring_size %d", 1547 ctx.cmd.q_init.ring_size); 1548 IONIC_PRINT(DEBUG, "rxq_init.ver %u", ctx.cmd.q_init.ver); 1549 1550 err = ionic_adminq_post_wait(lif, &ctx); 1551 if (err) 1552 return err; 1553 1554 q->hw_type = ctx.comp.q_init.hw_type; 1555 q->hw_index = rte_le_to_cpu_32(ctx.comp.q_init.hw_index); 1556 q->db = ionic_db_map(lif, q); 1557 1558 rxq->flags |= IONIC_QCQ_F_INITED; 1559 1560 IONIC_PRINT(DEBUG, "rxq->hw_type %d", q->hw_type); 1561 IONIC_PRINT(DEBUG, "rxq->hw_index %d", q->hw_index); 1562 IONIC_PRINT(DEBUG, "rxq->db %p", q->db); 1563 1564 return 0; 1565 } 1566 1567 static int 1568 ionic_station_set(struct ionic_lif *lif) 1569 { 1570 struct ionic_admin_ctx ctx = { 1571 .pending_work = true, 1572 .cmd.lif_getattr = { 1573 .opcode = IONIC_CMD_LIF_GETATTR, 1574 .attr = IONIC_LIF_ATTR_MAC, 1575 }, 1576 }; 1577 int err; 1578 1579 IONIC_PRINT_CALL(); 1580 1581 err = ionic_adminq_post_wait(lif, &ctx); 1582 if (err) 1583 return err; 1584 1585 memcpy(lif->mac_addr, ctx.comp.lif_getattr.mac, RTE_ETHER_ADDR_LEN); 1586 1587 return 0; 1588 } 1589 1590 static void 1591 ionic_lif_set_name(struct ionic_lif *lif) 1592 { 1593 struct ionic_admin_ctx ctx = { 1594 .pending_work = true, 1595 .cmd.lif_setattr = { 1596 .opcode = IONIC_CMD_LIF_SETATTR, 1597 .attr = IONIC_LIF_ATTR_NAME, 1598 }, 1599 }; 1600 1601 memcpy(ctx.cmd.lif_setattr.name, lif->name, 1602 sizeof(ctx.cmd.lif_setattr.name) - 1); 1603 1604 ionic_adminq_post_wait(lif, &ctx); 1605 } 1606 1607 int 1608 ionic_lif_init(struct ionic_lif *lif) 1609 { 1610 struct ionic_dev *idev = &lif->adapter->idev; 1611 struct ionic_lif_init_comp comp; 1612 int err; 1613 1614 memset(&lif->stats_base, 0, sizeof(lif->stats_base)); 1615 1616 ionic_dev_cmd_lif_init(idev, lif->info_pa); 1617 err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT); 1618 if (err) 1619 return err; 1620 1621 ionic_dev_cmd_comp(idev, &comp); 1622 1623 lif->hw_index = rte_cpu_to_le_16(comp.hw_index); 1624 1625 err = ionic_lif_adminq_init(lif); 1626 if (err) 1627 return err; 1628 1629 err = ionic_lif_notifyq_init(lif); 1630 if (err) 1631 goto err_out_adminq_deinit; 1632 1633 /* 1634 * Configure initial feature set 1635 * This will be updated later by the dev_configure() step 1636 */ 1637 lif->features = IONIC_ETH_HW_RX_HASH | IONIC_ETH_HW_VLAN_RX_FILTER; 1638 1639 err = ionic_lif_set_features(lif); 1640 if (err) 1641 goto err_out_notifyq_deinit; 1642 1643 err = ionic_rx_filters_init(lif); 1644 if (err) 1645 goto err_out_notifyq_deinit; 1646 1647 err = ionic_station_set(lif); 1648 if (err) 1649 goto err_out_rx_filter_deinit; 1650 1651 ionic_lif_set_name(lif); 1652 1653 lif->state |= IONIC_LIF_F_INITED; 1654 1655 return 0; 1656 1657 err_out_rx_filter_deinit: 1658 ionic_rx_filters_deinit(lif); 1659 1660 err_out_notifyq_deinit: 1661 ionic_lif_notifyq_deinit(lif); 1662 1663 err_out_adminq_deinit: 1664 ionic_lif_adminq_deinit(lif); 1665 1666 return err; 1667 } 1668 1669 void 1670 ionic_lif_deinit(struct ionic_lif *lif) 1671 { 1672 if (!(lif->state & IONIC_LIF_F_INITED)) 1673 return; 1674 1675 ionic_rx_filters_deinit(lif); 1676 ionic_lif_rss_teardown(lif); 1677 ionic_lif_notifyq_deinit(lif); 1678 ionic_lif_adminq_deinit(lif); 1679 1680 lif->state &= ~IONIC_LIF_F_INITED; 1681 } 1682 1683 void 1684 ionic_lif_configure_vlan_offload(struct ionic_lif *lif, int mask) 1685 { 1686 struct rte_eth_dev *eth_dev = lif->eth_dev; 1687 struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode; 1688 1689 /* 1690 * IONIC_ETH_HW_VLAN_RX_FILTER cannot be turned off, so 1691 * set DEV_RX_OFFLOAD_VLAN_FILTER and ignore ETH_VLAN_FILTER_MASK 1692 */ 1693 rxmode->offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; 1694 1695 if (mask & ETH_VLAN_STRIP_MASK) { 1696 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) 1697 lif->features |= IONIC_ETH_HW_VLAN_RX_STRIP; 1698 else 1699 lif->features &= ~IONIC_ETH_HW_VLAN_RX_STRIP; 1700 } 1701 } 1702 1703 void 1704 ionic_lif_configure(struct ionic_lif *lif) 1705 { 1706 struct rte_eth_rxmode *rxmode = &lif->eth_dev->data->dev_conf.rxmode; 1707 struct rte_eth_txmode *txmode = &lif->eth_dev->data->dev_conf.txmode; 1708 struct ionic_identity *ident = &lif->adapter->ident; 1709 union ionic_lif_config *cfg = &ident->lif.eth.config; 1710 uint32_t ntxqs_per_lif = 1711 rte_le_to_cpu_32(cfg->queue_count[IONIC_QTYPE_TXQ]); 1712 uint32_t nrxqs_per_lif = 1713 rte_le_to_cpu_32(cfg->queue_count[IONIC_QTYPE_RXQ]); 1714 uint32_t nrxqs = lif->eth_dev->data->nb_rx_queues; 1715 uint32_t ntxqs = lif->eth_dev->data->nb_tx_queues; 1716 1717 lif->port_id = lif->eth_dev->data->port_id; 1718 1719 IONIC_PRINT(DEBUG, "Configuring LIF on port %u", 1720 lif->port_id); 1721 1722 if (nrxqs > 0) 1723 nrxqs_per_lif = RTE_MIN(nrxqs_per_lif, nrxqs); 1724 1725 if (ntxqs > 0) 1726 ntxqs_per_lif = RTE_MIN(ntxqs_per_lif, ntxqs); 1727 1728 lif->nrxqcqs = nrxqs_per_lif; 1729 lif->ntxqcqs = ntxqs_per_lif; 1730 1731 /* Update the LIF configuration based on the eth_dev */ 1732 1733 /* 1734 * NB: While it is true that RSS_HASH is always enabled on ionic, 1735 * setting this flag unconditionally causes problems in DTS. 1736 * rxmode->offloads |= DEV_RX_OFFLOAD_RSS_HASH; 1737 */ 1738 1739 /* RX per-port */ 1740 1741 if (rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM || 1742 rxmode->offloads & DEV_RX_OFFLOAD_UDP_CKSUM || 1743 rxmode->offloads & DEV_RX_OFFLOAD_TCP_CKSUM) 1744 lif->features |= IONIC_ETH_HW_RX_CSUM; 1745 else 1746 lif->features &= ~IONIC_ETH_HW_RX_CSUM; 1747 1748 if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER) { 1749 lif->features |= IONIC_ETH_HW_RX_SG; 1750 lif->eth_dev->data->scattered_rx = 1; 1751 } else { 1752 lif->features &= ~IONIC_ETH_HW_RX_SG; 1753 lif->eth_dev->data->scattered_rx = 0; 1754 } 1755 1756 /* Covers VLAN_STRIP */ 1757 ionic_lif_configure_vlan_offload(lif, ETH_VLAN_STRIP_MASK); 1758 1759 /* TX per-port */ 1760 1761 if (txmode->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM || 1762 txmode->offloads & DEV_TX_OFFLOAD_UDP_CKSUM || 1763 txmode->offloads & DEV_TX_OFFLOAD_TCP_CKSUM || 1764 txmode->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM || 1765 txmode->offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) 1766 lif->features |= IONIC_ETH_HW_TX_CSUM; 1767 else 1768 lif->features &= ~IONIC_ETH_HW_TX_CSUM; 1769 1770 if (txmode->offloads & DEV_TX_OFFLOAD_VLAN_INSERT) 1771 lif->features |= IONIC_ETH_HW_VLAN_TX_TAG; 1772 else 1773 lif->features &= ~IONIC_ETH_HW_VLAN_TX_TAG; 1774 1775 if (txmode->offloads & DEV_TX_OFFLOAD_MULTI_SEGS) 1776 lif->features |= IONIC_ETH_HW_TX_SG; 1777 else 1778 lif->features &= ~IONIC_ETH_HW_TX_SG; 1779 1780 if (txmode->offloads & DEV_TX_OFFLOAD_TCP_TSO) { 1781 lif->features |= IONIC_ETH_HW_TSO; 1782 lif->features |= IONIC_ETH_HW_TSO_IPV6; 1783 lif->features |= IONIC_ETH_HW_TSO_ECN; 1784 } else { 1785 lif->features &= ~IONIC_ETH_HW_TSO; 1786 lif->features &= ~IONIC_ETH_HW_TSO_IPV6; 1787 lif->features &= ~IONIC_ETH_HW_TSO_ECN; 1788 } 1789 } 1790 1791 int 1792 ionic_lif_start(struct ionic_lif *lif) 1793 { 1794 uint32_t rx_mode; 1795 uint32_t i; 1796 int err; 1797 1798 err = ionic_lif_rss_setup(lif); 1799 if (err) 1800 return err; 1801 1802 if (!lif->rx_mode) { 1803 IONIC_PRINT(DEBUG, "Setting RX mode on %s", 1804 lif->name); 1805 1806 rx_mode = IONIC_RX_MODE_F_UNICAST; 1807 rx_mode |= IONIC_RX_MODE_F_MULTICAST; 1808 rx_mode |= IONIC_RX_MODE_F_BROADCAST; 1809 1810 ionic_set_rx_mode(lif, rx_mode); 1811 } 1812 1813 IONIC_PRINT(DEBUG, "Starting %u RX queues and %u TX queues " 1814 "on port %u", 1815 lif->nrxqcqs, lif->ntxqcqs, lif->port_id); 1816 1817 for (i = 0; i < lif->nrxqcqs; i++) { 1818 struct ionic_rx_qcq *rxq = lif->rxqcqs[i]; 1819 if (!(rxq->flags & IONIC_QCQ_F_DEFERRED)) { 1820 err = ionic_dev_rx_queue_start(lif->eth_dev, i); 1821 1822 if (err) 1823 return err; 1824 } 1825 } 1826 1827 for (i = 0; i < lif->ntxqcqs; i++) { 1828 struct ionic_tx_qcq *txq = lif->txqcqs[i]; 1829 if (!(txq->flags & IONIC_QCQ_F_DEFERRED)) { 1830 err = ionic_dev_tx_queue_start(lif->eth_dev, i); 1831 1832 if (err) 1833 return err; 1834 } 1835 } 1836 1837 /* Carrier ON here */ 1838 lif->state |= IONIC_LIF_F_UP; 1839 1840 ionic_link_status_check(lif); 1841 1842 return 0; 1843 } 1844 1845 int 1846 ionic_lif_identify(struct ionic_adapter *adapter) 1847 { 1848 struct ionic_dev *idev = &adapter->idev; 1849 struct ionic_identity *ident = &adapter->ident; 1850 union ionic_lif_config *cfg = &ident->lif.eth.config; 1851 uint32_t lif_words = RTE_DIM(ident->lif.words); 1852 uint32_t cmd_words = RTE_DIM(idev->dev_cmd->data); 1853 uint32_t i, nwords; 1854 int err; 1855 1856 ionic_dev_cmd_lif_identify(idev, IONIC_LIF_TYPE_CLASSIC, 1857 IONIC_IDENTITY_VERSION_1); 1858 err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT); 1859 if (err) 1860 return (err); 1861 1862 nwords = RTE_MIN(lif_words, cmd_words); 1863 for (i = 0; i < nwords; i++) 1864 ident->lif.words[i] = ioread32(&idev->dev_cmd->data[i]); 1865 1866 IONIC_PRINT(INFO, "capabilities 0x%" PRIx64 " ", 1867 rte_le_to_cpu_64(ident->lif.capabilities)); 1868 1869 IONIC_PRINT(INFO, "eth.max_ucast_filters 0x%" PRIx32 " ", 1870 rte_le_to_cpu_32(ident->lif.eth.max_ucast_filters)); 1871 IONIC_PRINT(INFO, "eth.max_mcast_filters 0x%" PRIx32 " ", 1872 rte_le_to_cpu_32(ident->lif.eth.max_mcast_filters)); 1873 1874 IONIC_PRINT(INFO, "eth.features 0x%" PRIx64 " ", 1875 rte_le_to_cpu_64(cfg->features)); 1876 IONIC_PRINT(INFO, "eth.queue_count[IONIC_QTYPE_ADMINQ] 0x%" PRIx32 " ", 1877 rte_le_to_cpu_32(cfg->queue_count[IONIC_QTYPE_ADMINQ])); 1878 IONIC_PRINT(INFO, "eth.queue_count[IONIC_QTYPE_NOTIFYQ] 0x%" PRIx32 " ", 1879 rte_le_to_cpu_32(cfg->queue_count[IONIC_QTYPE_NOTIFYQ])); 1880 IONIC_PRINT(INFO, "eth.queue_count[IONIC_QTYPE_RXQ] 0x%" PRIx32 " ", 1881 rte_le_to_cpu_32(cfg->queue_count[IONIC_QTYPE_RXQ])); 1882 IONIC_PRINT(INFO, "eth.queue_count[IONIC_QTYPE_TXQ] 0x%" PRIx32 " ", 1883 rte_le_to_cpu_32(cfg->queue_count[IONIC_QTYPE_TXQ])); 1884 1885 return 0; 1886 } 1887 1888 int 1889 ionic_lifs_size(struct ionic_adapter *adapter) 1890 { 1891 struct ionic_identity *ident = &adapter->ident; 1892 union ionic_lif_config *cfg = &ident->lif.eth.config; 1893 uint32_t nintrs, dev_nintrs = rte_le_to_cpu_32(ident->dev.nintrs); 1894 1895 adapter->max_ntxqs_per_lif = 1896 rte_le_to_cpu_32(cfg->queue_count[IONIC_QTYPE_TXQ]); 1897 adapter->max_nrxqs_per_lif = 1898 rte_le_to_cpu_32(cfg->queue_count[IONIC_QTYPE_RXQ]); 1899 1900 nintrs = 1 /* notifyq */; 1901 1902 if (nintrs > dev_nintrs) { 1903 IONIC_PRINT(ERR, 1904 "At most %d intr supported, minimum req'd is %u", 1905 dev_nintrs, nintrs); 1906 return -ENOSPC; 1907 } 1908 1909 adapter->nintrs = nintrs; 1910 1911 return 0; 1912 } 1913