1 /* 2 * Copyright 2008-2014 Cisco Systems, Inc. All rights reserved. 3 * Copyright 2007 Nuova Systems, Inc. All rights reserved. 4 * 5 * Copyright (c) 2014, Cisco Systems, Inc. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in 17 * the documentation and/or other materials provided with the 18 * distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 24 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 30 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 * 33 */ 34 35 #include <stdio.h> 36 37 #include <sys/stat.h> 38 #include <sys/mman.h> 39 #include <fcntl.h> 40 #include <libgen.h> 41 42 #include <rte_pci.h> 43 #include <rte_memzone.h> 44 #include <rte_malloc.h> 45 #include <rte_mbuf.h> 46 #include <rte_string_fns.h> 47 #include <rte_ethdev.h> 48 49 #include "enic_compat.h" 50 #include "enic.h" 51 #include "wq_enet_desc.h" 52 #include "rq_enet_desc.h" 53 #include "cq_enet_desc.h" 54 #include "vnic_enet.h" 55 #include "vnic_dev.h" 56 #include "vnic_wq.h" 57 #include "vnic_rq.h" 58 #include "vnic_cq.h" 59 #include "vnic_intr.h" 60 #include "vnic_nic.h" 61 62 static inline int enic_is_sriov_vf(struct enic *enic) 63 { 64 return enic->pdev->id.device_id == PCI_DEVICE_ID_CISCO_VIC_ENET_VF; 65 } 66 67 static int is_zero_addr(uint8_t *addr) 68 { 69 return !(addr[0] | addr[1] | addr[2] | addr[3] | addr[4] | addr[5]); 70 } 71 72 static int is_mcast_addr(uint8_t *addr) 73 { 74 return addr[0] & 1; 75 } 76 77 static int is_eth_addr_valid(uint8_t *addr) 78 { 79 return !is_mcast_addr(addr) && !is_zero_addr(addr); 80 } 81 82 static void 83 enic_rxmbuf_queue_release(__rte_unused struct enic *enic, struct vnic_rq *rq) 84 { 85 uint16_t i; 86 87 if (!rq || !rq->mbuf_ring) { 88 dev_debug(enic, "Pointer to rq or mbuf_ring is NULL"); 89 return; 90 } 91 92 for (i = 0; i < rq->ring.desc_count; i++) { 93 if (rq->mbuf_ring[i]) { 94 rte_pktmbuf_free_seg(rq->mbuf_ring[i]); 95 rq->mbuf_ring[i] = NULL; 96 } 97 } 98 } 99 100 void enic_set_hdr_split_size(struct enic *enic, u16 split_hdr_size) 101 { 102 vnic_set_hdr_split_size(enic->vdev, split_hdr_size); 103 } 104 105 static void enic_free_wq_buf(struct vnic_wq_buf *buf) 106 { 107 struct rte_mbuf *mbuf = (struct rte_mbuf *)buf->mb; 108 109 rte_pktmbuf_free_seg(mbuf); 110 buf->mb = NULL; 111 } 112 113 static void enic_log_q_error(struct enic *enic) 114 { 115 unsigned int i; 116 u32 error_status; 117 118 for (i = 0; i < enic->wq_count; i++) { 119 error_status = vnic_wq_error_status(&enic->wq[i]); 120 if (error_status) 121 dev_err(enic, "WQ[%d] error_status %d\n", i, 122 error_status); 123 } 124 125 for (i = 0; i < enic_vnic_rq_count(enic); i++) { 126 if (!enic->rq[i].in_use) 127 continue; 128 error_status = vnic_rq_error_status(&enic->rq[i]); 129 if (error_status) 130 dev_err(enic, "RQ[%d] error_status %d\n", i, 131 error_status); 132 } 133 } 134 135 static void enic_clear_soft_stats(struct enic *enic) 136 { 137 struct enic_soft_stats *soft_stats = &enic->soft_stats; 138 rte_atomic64_clear(&soft_stats->rx_nombuf); 139 rte_atomic64_clear(&soft_stats->rx_packet_errors); 140 } 141 142 static void enic_init_soft_stats(struct enic *enic) 143 { 144 struct enic_soft_stats *soft_stats = &enic->soft_stats; 145 rte_atomic64_init(&soft_stats->rx_nombuf); 146 rte_atomic64_init(&soft_stats->rx_packet_errors); 147 enic_clear_soft_stats(enic); 148 } 149 150 void enic_dev_stats_clear(struct enic *enic) 151 { 152 if (vnic_dev_stats_clear(enic->vdev)) 153 dev_err(enic, "Error in clearing stats\n"); 154 enic_clear_soft_stats(enic); 155 } 156 157 void enic_dev_stats_get(struct enic *enic, struct rte_eth_stats *r_stats) 158 { 159 struct vnic_stats *stats; 160 struct enic_soft_stats *soft_stats = &enic->soft_stats; 161 int64_t rx_truncated; 162 uint64_t rx_packet_errors; 163 164 if (vnic_dev_stats_dump(enic->vdev, &stats)) { 165 dev_err(enic, "Error in getting stats\n"); 166 return; 167 } 168 169 /* The number of truncated packets can only be calculated by 170 * subtracting a hardware counter from error packets received by 171 * the driver. Note: this causes transient inaccuracies in the 172 * ipackets count. Also, the length of truncated packets are 173 * counted in ibytes even though truncated packets are dropped 174 * which can make ibytes be slightly higher than it should be. 175 */ 176 rx_packet_errors = rte_atomic64_read(&soft_stats->rx_packet_errors); 177 rx_truncated = rx_packet_errors - stats->rx.rx_errors - 178 stats->rx.rx_no_bufs; 179 180 r_stats->ipackets = stats->rx.rx_frames_ok - rx_truncated; 181 r_stats->opackets = stats->tx.tx_frames_ok; 182 183 r_stats->ibytes = stats->rx.rx_bytes_ok; 184 r_stats->obytes = stats->tx.tx_bytes_ok; 185 186 r_stats->ierrors = stats->rx.rx_errors + stats->rx.rx_drop; 187 r_stats->oerrors = stats->tx.tx_errors; 188 189 r_stats->imissed = stats->rx.rx_no_bufs + rx_truncated; 190 191 r_stats->rx_nombuf = rte_atomic64_read(&soft_stats->rx_nombuf); 192 } 193 194 void enic_del_mac_address(struct enic *enic) 195 { 196 if (vnic_dev_del_addr(enic->vdev, enic->mac_addr)) 197 dev_err(enic, "del mac addr failed\n"); 198 } 199 200 void enic_set_mac_address(struct enic *enic, uint8_t *mac_addr) 201 { 202 int err; 203 204 if (!is_eth_addr_valid(mac_addr)) { 205 dev_err(enic, "invalid mac address\n"); 206 return; 207 } 208 209 err = vnic_dev_del_addr(enic->vdev, enic->mac_addr); 210 if (err) { 211 dev_err(enic, "del mac addr failed\n"); 212 return; 213 } 214 215 ether_addr_copy((struct ether_addr *)mac_addr, 216 (struct ether_addr *)enic->mac_addr); 217 218 err = vnic_dev_add_addr(enic->vdev, mac_addr); 219 if (err) { 220 dev_err(enic, "add mac addr failed\n"); 221 return; 222 } 223 } 224 225 static void 226 enic_free_rq_buf(struct rte_mbuf **mbuf) 227 { 228 if (*mbuf == NULL) 229 return; 230 231 rte_pktmbuf_free(*mbuf); 232 mbuf = NULL; 233 } 234 235 void enic_init_vnic_resources(struct enic *enic) 236 { 237 unsigned int error_interrupt_enable = 1; 238 unsigned int error_interrupt_offset = 0; 239 unsigned int index = 0; 240 unsigned int cq_idx; 241 struct vnic_rq *data_rq; 242 243 for (index = 0; index < enic->rq_count; index++) { 244 cq_idx = enic_cq_rq(enic, enic_sop_rq(index)); 245 246 vnic_rq_init(&enic->rq[enic_sop_rq(index)], 247 cq_idx, 248 error_interrupt_enable, 249 error_interrupt_offset); 250 251 data_rq = &enic->rq[enic_data_rq(index)]; 252 if (data_rq->in_use) 253 vnic_rq_init(data_rq, 254 cq_idx, 255 error_interrupt_enable, 256 error_interrupt_offset); 257 258 vnic_cq_init(&enic->cq[cq_idx], 259 0 /* flow_control_enable */, 260 1 /* color_enable */, 261 0 /* cq_head */, 262 0 /* cq_tail */, 263 1 /* cq_tail_color */, 264 0 /* interrupt_enable */, 265 1 /* cq_entry_enable */, 266 0 /* cq_message_enable */, 267 0 /* interrupt offset */, 268 0 /* cq_message_addr */); 269 } 270 271 for (index = 0; index < enic->wq_count; index++) { 272 vnic_wq_init(&enic->wq[index], 273 enic_cq_wq(enic, index), 274 error_interrupt_enable, 275 error_interrupt_offset); 276 277 cq_idx = enic_cq_wq(enic, index); 278 vnic_cq_init(&enic->cq[cq_idx], 279 0 /* flow_control_enable */, 280 1 /* color_enable */, 281 0 /* cq_head */, 282 0 /* cq_tail */, 283 1 /* cq_tail_color */, 284 0 /* interrupt_enable */, 285 0 /* cq_entry_enable */, 286 1 /* cq_message_enable */, 287 0 /* interrupt offset */, 288 (u64)enic->wq[index].cqmsg_rz->phys_addr); 289 } 290 291 vnic_intr_init(&enic->intr, 292 enic->config.intr_timer_usec, 293 enic->config.intr_timer_type, 294 /*mask_on_assertion*/1); 295 } 296 297 298 static int 299 enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq) 300 { 301 struct rte_mbuf *mb; 302 struct rq_enet_desc *rqd = rq->ring.descs; 303 unsigned i; 304 dma_addr_t dma_addr; 305 306 if (!rq->in_use) 307 return 0; 308 309 dev_debug(enic, "queue %u, allocating %u rx queue mbufs\n", rq->index, 310 rq->ring.desc_count); 311 312 for (i = 0; i < rq->ring.desc_count; i++, rqd++) { 313 mb = rte_mbuf_raw_alloc(rq->mp); 314 if (mb == NULL) { 315 dev_err(enic, "RX mbuf alloc failed queue_id=%u\n", 316 (unsigned)rq->index); 317 return -ENOMEM; 318 } 319 320 mb->data_off = RTE_PKTMBUF_HEADROOM; 321 dma_addr = (dma_addr_t)(mb->buf_physaddr 322 + RTE_PKTMBUF_HEADROOM); 323 rq_enet_desc_enc(rqd, dma_addr, 324 (rq->is_sop ? RQ_ENET_TYPE_ONLY_SOP 325 : RQ_ENET_TYPE_NOT_SOP), 326 mb->buf_len - RTE_PKTMBUF_HEADROOM); 327 rq->mbuf_ring[i] = mb; 328 } 329 330 /* make sure all prior writes are complete before doing the PIO write */ 331 rte_rmb(); 332 333 /* Post all but the last buffer to VIC. */ 334 rq->posted_index = rq->ring.desc_count - 1; 335 336 rq->rx_nb_hold = 0; 337 338 dev_debug(enic, "port=%u, qidx=%u, Write %u posted idx, %u sw held\n", 339 enic->port_id, rq->index, rq->posted_index, rq->rx_nb_hold); 340 iowrite32(rq->posted_index, &rq->ctrl->posted_index); 341 iowrite32(0, &rq->ctrl->fetch_index); 342 rte_rmb(); 343 344 return 0; 345 346 } 347 348 static void * 349 enic_alloc_consistent(void *priv, size_t size, 350 dma_addr_t *dma_handle, u8 *name) 351 { 352 void *vaddr; 353 const struct rte_memzone *rz; 354 *dma_handle = 0; 355 struct enic *enic = (struct enic *)priv; 356 struct enic_memzone_entry *mze; 357 358 rz = rte_memzone_reserve_aligned((const char *)name, 359 size, SOCKET_ID_ANY, 0, ENIC_ALIGN); 360 if (!rz) { 361 pr_err("%s : Failed to allocate memory requested for %s\n", 362 __func__, name); 363 return NULL; 364 } 365 366 vaddr = rz->addr; 367 *dma_handle = (dma_addr_t)rz->phys_addr; 368 369 mze = rte_malloc("enic memzone entry", 370 sizeof(struct enic_memzone_entry), 0); 371 372 if (!mze) { 373 pr_err("%s : Failed to allocate memory for memzone list\n", 374 __func__); 375 rte_memzone_free(rz); 376 } 377 378 mze->rz = rz; 379 380 rte_spinlock_lock(&enic->memzone_list_lock); 381 LIST_INSERT_HEAD(&enic->memzone_list, mze, entries); 382 rte_spinlock_unlock(&enic->memzone_list_lock); 383 384 return vaddr; 385 } 386 387 static void 388 enic_free_consistent(void *priv, 389 __rte_unused size_t size, 390 void *vaddr, 391 dma_addr_t dma_handle) 392 { 393 struct enic_memzone_entry *mze; 394 struct enic *enic = (struct enic *)priv; 395 396 rte_spinlock_lock(&enic->memzone_list_lock); 397 LIST_FOREACH(mze, &enic->memzone_list, entries) { 398 if (mze->rz->addr == vaddr && 399 mze->rz->phys_addr == dma_handle) 400 break; 401 } 402 if (mze == NULL) { 403 rte_spinlock_unlock(&enic->memzone_list_lock); 404 dev_warning(enic, 405 "Tried to free memory, but couldn't find it in the memzone list\n"); 406 return; 407 } 408 LIST_REMOVE(mze, entries); 409 rte_spinlock_unlock(&enic->memzone_list_lock); 410 rte_memzone_free(mze->rz); 411 rte_free(mze); 412 } 413 414 static void 415 enic_intr_handler(__rte_unused struct rte_intr_handle *handle, 416 void *arg) 417 { 418 struct enic *enic = pmd_priv((struct rte_eth_dev *)arg); 419 420 vnic_intr_return_all_credits(&enic->intr); 421 422 enic_log_q_error(enic); 423 } 424 425 int enic_enable(struct enic *enic) 426 { 427 unsigned int index; 428 int err; 429 struct rte_eth_dev *eth_dev = enic->rte_dev; 430 431 eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev); 432 eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX; 433 vnic_dev_notify_set(enic->vdev, -1); /* No Intr for notify */ 434 435 if (enic_clsf_init(enic)) 436 dev_warning(enic, "Init of hash table for clsf failed."\ 437 "Flow director feature will not work\n"); 438 439 for (index = 0; index < enic->rq_count; index++) { 440 err = enic_alloc_rx_queue_mbufs(enic, 441 &enic->rq[enic_sop_rq(index)]); 442 if (err) { 443 dev_err(enic, "Failed to alloc sop RX queue mbufs\n"); 444 return err; 445 } 446 err = enic_alloc_rx_queue_mbufs(enic, 447 &enic->rq[enic_data_rq(index)]); 448 if (err) { 449 /* release the allocated mbufs for the sop rq*/ 450 enic_rxmbuf_queue_release(enic, 451 &enic->rq[enic_sop_rq(index)]); 452 453 dev_err(enic, "Failed to alloc data RX queue mbufs\n"); 454 return err; 455 } 456 } 457 458 for (index = 0; index < enic->wq_count; index++) 459 enic_start_wq(enic, index); 460 for (index = 0; index < enic->rq_count; index++) 461 enic_start_rq(enic, index); 462 463 vnic_dev_add_addr(enic->vdev, enic->mac_addr); 464 465 vnic_dev_enable_wait(enic->vdev); 466 467 /* Register and enable error interrupt */ 468 rte_intr_callback_register(&(enic->pdev->intr_handle), 469 enic_intr_handler, (void *)enic->rte_dev); 470 471 rte_intr_enable(&(enic->pdev->intr_handle)); 472 vnic_intr_unmask(&enic->intr); 473 474 return 0; 475 } 476 477 int enic_alloc_intr_resources(struct enic *enic) 478 { 479 int err; 480 481 dev_info(enic, "vNIC resources used: "\ 482 "wq %d rq %d cq %d intr %d\n", 483 enic->wq_count, enic_vnic_rq_count(enic), 484 enic->cq_count, enic->intr_count); 485 486 err = vnic_intr_alloc(enic->vdev, &enic->intr, 0); 487 if (err) 488 enic_free_vnic_resources(enic); 489 490 return err; 491 } 492 493 void enic_free_rq(void *rxq) 494 { 495 struct vnic_rq *rq_sop, *rq_data; 496 struct enic *enic; 497 498 if (rxq == NULL) 499 return; 500 501 rq_sop = (struct vnic_rq *)rxq; 502 enic = vnic_dev_priv(rq_sop->vdev); 503 rq_data = &enic->rq[rq_sop->data_queue_idx]; 504 505 enic_rxmbuf_queue_release(enic, rq_sop); 506 if (rq_data->in_use) 507 enic_rxmbuf_queue_release(enic, rq_data); 508 509 rte_free(rq_sop->mbuf_ring); 510 if (rq_data->in_use) 511 rte_free(rq_data->mbuf_ring); 512 513 rq_sop->mbuf_ring = NULL; 514 rq_data->mbuf_ring = NULL; 515 516 vnic_rq_free(rq_sop); 517 if (rq_data->in_use) 518 vnic_rq_free(rq_data); 519 520 vnic_cq_free(&enic->cq[rq_sop->index]); 521 } 522 523 void enic_start_wq(struct enic *enic, uint16_t queue_idx) 524 { 525 struct rte_eth_dev *eth_dev = enic->rte_dev; 526 vnic_wq_enable(&enic->wq[queue_idx]); 527 eth_dev->data->tx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED; 528 } 529 530 int enic_stop_wq(struct enic *enic, uint16_t queue_idx) 531 { 532 struct rte_eth_dev *eth_dev = enic->rte_dev; 533 int ret; 534 535 ret = vnic_wq_disable(&enic->wq[queue_idx]); 536 if (ret) 537 return ret; 538 539 eth_dev->data->tx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED; 540 return 0; 541 } 542 543 void enic_start_rq(struct enic *enic, uint16_t queue_idx) 544 { 545 struct vnic_rq *rq_sop = &enic->rq[enic_sop_rq(queue_idx)]; 546 struct vnic_rq *rq_data = &enic->rq[rq_sop->data_queue_idx]; 547 struct rte_eth_dev *eth_dev = enic->rte_dev; 548 549 if (rq_data->in_use) 550 vnic_rq_enable(rq_data); 551 rte_mb(); 552 vnic_rq_enable(rq_sop); 553 eth_dev->data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED; 554 } 555 556 int enic_stop_rq(struct enic *enic, uint16_t queue_idx) 557 { 558 int ret1 = 0, ret2 = 0; 559 struct rte_eth_dev *eth_dev = enic->rte_dev; 560 struct vnic_rq *rq_sop = &enic->rq[enic_sop_rq(queue_idx)]; 561 struct vnic_rq *rq_data = &enic->rq[rq_sop->data_queue_idx]; 562 563 ret2 = vnic_rq_disable(rq_sop); 564 rte_mb(); 565 if (rq_data->in_use) 566 ret1 = vnic_rq_disable(rq_data); 567 568 if (ret2) 569 return ret2; 570 else if (ret1) 571 return ret1; 572 573 eth_dev->data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED; 574 return 0; 575 } 576 577 int enic_alloc_rq(struct enic *enic, uint16_t queue_idx, 578 unsigned int socket_id, struct rte_mempool *mp, 579 uint16_t nb_desc) 580 { 581 int rc; 582 uint16_t sop_queue_idx = enic_sop_rq(queue_idx); 583 uint16_t data_queue_idx = enic_data_rq(queue_idx); 584 struct vnic_rq *rq_sop = &enic->rq[sop_queue_idx]; 585 struct vnic_rq *rq_data = &enic->rq[data_queue_idx]; 586 unsigned int mbuf_size, mbufs_per_pkt; 587 unsigned int nb_sop_desc, nb_data_desc; 588 uint16_t min_sop, max_sop, min_data, max_data; 589 590 rq_sop->is_sop = 1; 591 rq_sop->data_queue_idx = data_queue_idx; 592 rq_data->is_sop = 0; 593 rq_data->data_queue_idx = 0; 594 rq_sop->socket_id = socket_id; 595 rq_sop->mp = mp; 596 rq_data->socket_id = socket_id; 597 rq_data->mp = mp; 598 rq_sop->in_use = 1; 599 600 mbuf_size = (uint16_t)(rte_pktmbuf_data_room_size(mp) - 601 RTE_PKTMBUF_HEADROOM); 602 603 if (enic->rte_dev->data->dev_conf.rxmode.enable_scatter) { 604 dev_info(enic, "Scatter rx mode enabled\n"); 605 /* ceil((mtu + ETHER_HDR_LEN + 4)/mbuf_size) */ 606 mbufs_per_pkt = ((enic->config.mtu + ETHER_HDR_LEN + 4) + 607 (mbuf_size - 1)) / mbuf_size; 608 } else { 609 dev_info(enic, "Scatter rx mode disabled\n"); 610 mbufs_per_pkt = 1; 611 } 612 613 if (mbufs_per_pkt > 1) { 614 dev_info(enic, "Scatter rx mode in use\n"); 615 rq_data->in_use = 1; 616 } else { 617 dev_info(enic, "Scatter rx mode not being used\n"); 618 rq_data->in_use = 0; 619 } 620 621 /* number of descriptors have to be a multiple of 32 */ 622 nb_sop_desc = (nb_desc / mbufs_per_pkt) & ~0x1F; 623 nb_data_desc = (nb_desc - nb_sop_desc) & ~0x1F; 624 625 rq_sop->max_mbufs_per_pkt = mbufs_per_pkt; 626 rq_data->max_mbufs_per_pkt = mbufs_per_pkt; 627 628 if (mbufs_per_pkt > 1) { 629 min_sop = 64; 630 max_sop = ((enic->config.rq_desc_count / 631 (mbufs_per_pkt - 1)) & ~0x1F); 632 min_data = min_sop * (mbufs_per_pkt - 1); 633 max_data = enic->config.rq_desc_count; 634 } else { 635 min_sop = 64; 636 max_sop = enic->config.rq_desc_count; 637 min_data = 0; 638 max_data = 0; 639 } 640 641 if (nb_desc < (min_sop + min_data)) { 642 dev_warning(enic, 643 "Number of rx descs too low, adjusting to minimum\n"); 644 nb_sop_desc = min_sop; 645 nb_data_desc = min_data; 646 } else if (nb_desc > (max_sop + max_data)) { 647 dev_warning(enic, 648 "Number of rx_descs too high, adjusting to maximum\n"); 649 nb_sop_desc = max_sop; 650 nb_data_desc = max_data; 651 } 652 if (mbufs_per_pkt > 1) { 653 dev_info(enic, "For mtu %d and mbuf size %d valid rx descriptor range is %d to %d\n", 654 enic->config.mtu, mbuf_size, min_sop + min_data, 655 max_sop + max_data); 656 } 657 dev_info(enic, "Using %d rx descriptors (sop %d, data %d)\n", 658 nb_sop_desc + nb_data_desc, nb_sop_desc, nb_data_desc); 659 660 /* Allocate sop queue resources */ 661 rc = vnic_rq_alloc(enic->vdev, rq_sop, sop_queue_idx, 662 nb_sop_desc, sizeof(struct rq_enet_desc)); 663 if (rc) { 664 dev_err(enic, "error in allocation of sop rq\n"); 665 goto err_exit; 666 } 667 nb_sop_desc = rq_sop->ring.desc_count; 668 669 if (rq_data->in_use) { 670 /* Allocate data queue resources */ 671 rc = vnic_rq_alloc(enic->vdev, rq_data, data_queue_idx, 672 nb_data_desc, 673 sizeof(struct rq_enet_desc)); 674 if (rc) { 675 dev_err(enic, "error in allocation of data rq\n"); 676 goto err_free_rq_sop; 677 } 678 nb_data_desc = rq_data->ring.desc_count; 679 } 680 rc = vnic_cq_alloc(enic->vdev, &enic->cq[queue_idx], queue_idx, 681 socket_id, nb_sop_desc + nb_data_desc, 682 sizeof(struct cq_enet_rq_desc)); 683 if (rc) { 684 dev_err(enic, "error in allocation of cq for rq\n"); 685 goto err_free_rq_data; 686 } 687 688 /* Allocate the mbuf rings */ 689 rq_sop->mbuf_ring = (struct rte_mbuf **) 690 rte_zmalloc_socket("rq->mbuf_ring", 691 sizeof(struct rte_mbuf *) * nb_sop_desc, 692 RTE_CACHE_LINE_SIZE, rq_sop->socket_id); 693 if (rq_sop->mbuf_ring == NULL) 694 goto err_free_cq; 695 696 if (rq_data->in_use) { 697 rq_data->mbuf_ring = (struct rte_mbuf **) 698 rte_zmalloc_socket("rq->mbuf_ring", 699 sizeof(struct rte_mbuf *) * nb_data_desc, 700 RTE_CACHE_LINE_SIZE, rq_sop->socket_id); 701 if (rq_data->mbuf_ring == NULL) 702 goto err_free_sop_mbuf; 703 } 704 705 return 0; 706 707 err_free_sop_mbuf: 708 rte_free(rq_sop->mbuf_ring); 709 err_free_cq: 710 /* cleanup on error */ 711 vnic_cq_free(&enic->cq[queue_idx]); 712 err_free_rq_data: 713 if (rq_data->in_use) 714 vnic_rq_free(rq_data); 715 err_free_rq_sop: 716 vnic_rq_free(rq_sop); 717 err_exit: 718 return -ENOMEM; 719 } 720 721 void enic_free_wq(void *txq) 722 { 723 struct vnic_wq *wq; 724 struct enic *enic; 725 726 if (txq == NULL) 727 return; 728 729 wq = (struct vnic_wq *)txq; 730 enic = vnic_dev_priv(wq->vdev); 731 rte_memzone_free(wq->cqmsg_rz); 732 vnic_wq_free(wq); 733 vnic_cq_free(&enic->cq[enic->rq_count + wq->index]); 734 } 735 736 int enic_alloc_wq(struct enic *enic, uint16_t queue_idx, 737 unsigned int socket_id, uint16_t nb_desc) 738 { 739 int err; 740 struct vnic_wq *wq = &enic->wq[queue_idx]; 741 unsigned int cq_index = enic_cq_wq(enic, queue_idx); 742 char name[NAME_MAX]; 743 static int instance; 744 745 wq->socket_id = socket_id; 746 if (nb_desc) { 747 if (nb_desc > enic->config.wq_desc_count) { 748 dev_warning(enic, 749 "WQ %d - number of tx desc in cmd line (%d)"\ 750 "is greater than that in the UCSM/CIMC adapter"\ 751 "policy. Applying the value in the adapter "\ 752 "policy (%d)\n", 753 queue_idx, nb_desc, enic->config.wq_desc_count); 754 } else if (nb_desc != enic->config.wq_desc_count) { 755 enic->config.wq_desc_count = nb_desc; 756 dev_info(enic, 757 "TX Queues - effective number of descs:%d\n", 758 nb_desc); 759 } 760 } 761 762 /* Allocate queue resources */ 763 err = vnic_wq_alloc(enic->vdev, &enic->wq[queue_idx], queue_idx, 764 enic->config.wq_desc_count, 765 sizeof(struct wq_enet_desc)); 766 if (err) { 767 dev_err(enic, "error in allocation of wq\n"); 768 return err; 769 } 770 771 err = vnic_cq_alloc(enic->vdev, &enic->cq[cq_index], cq_index, 772 socket_id, enic->config.wq_desc_count, 773 sizeof(struct cq_enet_wq_desc)); 774 if (err) { 775 vnic_wq_free(wq); 776 dev_err(enic, "error in allocation of cq for wq\n"); 777 } 778 779 /* setup up CQ message */ 780 snprintf((char *)name, sizeof(name), 781 "vnic_cqmsg-%s-%d-%d", enic->bdf_name, queue_idx, 782 instance++); 783 784 wq->cqmsg_rz = rte_memzone_reserve_aligned((const char *)name, 785 sizeof(uint32_t), 786 SOCKET_ID_ANY, 0, 787 ENIC_ALIGN); 788 if (!wq->cqmsg_rz) 789 return -ENOMEM; 790 791 return err; 792 } 793 794 int enic_disable(struct enic *enic) 795 { 796 unsigned int i; 797 int err; 798 799 vnic_intr_mask(&enic->intr); 800 (void)vnic_intr_masked(&enic->intr); /* flush write */ 801 802 vnic_dev_disable(enic->vdev); 803 804 enic_clsf_destroy(enic); 805 806 if (!enic_is_sriov_vf(enic)) 807 vnic_dev_del_addr(enic->vdev, enic->mac_addr); 808 809 for (i = 0; i < enic->wq_count; i++) { 810 err = vnic_wq_disable(&enic->wq[i]); 811 if (err) 812 return err; 813 } 814 for (i = 0; i < enic_vnic_rq_count(enic); i++) { 815 if (enic->rq[i].in_use) { 816 err = vnic_rq_disable(&enic->rq[i]); 817 if (err) 818 return err; 819 } 820 } 821 822 vnic_dev_set_reset_flag(enic->vdev, 1); 823 vnic_dev_notify_unset(enic->vdev); 824 825 for (i = 0; i < enic->wq_count; i++) 826 vnic_wq_clean(&enic->wq[i], enic_free_wq_buf); 827 828 for (i = 0; i < enic_vnic_rq_count(enic); i++) 829 if (enic->rq[i].in_use) 830 vnic_rq_clean(&enic->rq[i], enic_free_rq_buf); 831 for (i = 0; i < enic->cq_count; i++) 832 vnic_cq_clean(&enic->cq[i]); 833 vnic_intr_clean(&enic->intr); 834 835 return 0; 836 } 837 838 static int enic_dev_wait(struct vnic_dev *vdev, 839 int (*start)(struct vnic_dev *, int), 840 int (*finished)(struct vnic_dev *, int *), 841 int arg) 842 { 843 int done; 844 int err; 845 int i; 846 847 err = start(vdev, arg); 848 if (err) 849 return err; 850 851 /* Wait for func to complete...2 seconds max */ 852 for (i = 0; i < 2000; i++) { 853 err = finished(vdev, &done); 854 if (err) 855 return err; 856 if (done) 857 return 0; 858 usleep(1000); 859 } 860 return -ETIMEDOUT; 861 } 862 863 static int enic_dev_open(struct enic *enic) 864 { 865 int err; 866 867 err = enic_dev_wait(enic->vdev, vnic_dev_open, 868 vnic_dev_open_done, 0); 869 if (err) 870 dev_err(enic_get_dev(enic), 871 "vNIC device open failed, err %d\n", err); 872 873 return err; 874 } 875 876 static int enic_set_rsskey(struct enic *enic) 877 { 878 dma_addr_t rss_key_buf_pa; 879 union vnic_rss_key *rss_key_buf_va = NULL; 880 static union vnic_rss_key rss_key = { 881 .key = { 882 [0] = {.b = {85, 67, 83, 97, 119, 101, 115, 111, 109, 101}}, 883 [1] = {.b = {80, 65, 76, 79, 117, 110, 105, 113, 117, 101}}, 884 [2] = {.b = {76, 73, 78, 85, 88, 114, 111, 99, 107, 115}}, 885 [3] = {.b = {69, 78, 73, 67, 105, 115, 99, 111, 111, 108}}, 886 } 887 }; 888 int err; 889 u8 name[NAME_MAX]; 890 891 snprintf((char *)name, NAME_MAX, "rss_key-%s", enic->bdf_name); 892 rss_key_buf_va = enic_alloc_consistent(enic, sizeof(union vnic_rss_key), 893 &rss_key_buf_pa, name); 894 if (!rss_key_buf_va) 895 return -ENOMEM; 896 897 rte_memcpy(rss_key_buf_va, &rss_key, sizeof(union vnic_rss_key)); 898 899 err = enic_set_rss_key(enic, 900 rss_key_buf_pa, 901 sizeof(union vnic_rss_key)); 902 903 enic_free_consistent(enic, sizeof(union vnic_rss_key), 904 rss_key_buf_va, rss_key_buf_pa); 905 906 return err; 907 } 908 909 static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits) 910 { 911 dma_addr_t rss_cpu_buf_pa; 912 union vnic_rss_cpu *rss_cpu_buf_va = NULL; 913 int i; 914 int err; 915 u8 name[NAME_MAX]; 916 917 snprintf((char *)name, NAME_MAX, "rss_cpu-%s", enic->bdf_name); 918 rss_cpu_buf_va = enic_alloc_consistent(enic, sizeof(union vnic_rss_cpu), 919 &rss_cpu_buf_pa, name); 920 if (!rss_cpu_buf_va) 921 return -ENOMEM; 922 923 for (i = 0; i < (1 << rss_hash_bits); i++) 924 (*rss_cpu_buf_va).cpu[i / 4].b[i % 4] = 925 enic_sop_rq(i % enic->rq_count); 926 927 err = enic_set_rss_cpu(enic, 928 rss_cpu_buf_pa, 929 sizeof(union vnic_rss_cpu)); 930 931 enic_free_consistent(enic, sizeof(union vnic_rss_cpu), 932 rss_cpu_buf_va, rss_cpu_buf_pa); 933 934 return err; 935 } 936 937 static int enic_set_niccfg(struct enic *enic, u8 rss_default_cpu, 938 u8 rss_hash_type, u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable) 939 { 940 const u8 tso_ipid_split_en = 0; 941 int err; 942 943 /* Enable VLAN tag stripping */ 944 945 err = enic_set_nic_cfg(enic, 946 rss_default_cpu, rss_hash_type, 947 rss_hash_bits, rss_base_cpu, 948 rss_enable, tso_ipid_split_en, 949 enic->ig_vlan_strip_en); 950 951 return err; 952 } 953 954 int enic_set_rss_nic_cfg(struct enic *enic) 955 { 956 const u8 rss_default_cpu = 0; 957 const u8 rss_hash_type = NIC_CFG_RSS_HASH_TYPE_IPV4 | 958 NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 | 959 NIC_CFG_RSS_HASH_TYPE_IPV6 | 960 NIC_CFG_RSS_HASH_TYPE_TCP_IPV6; 961 const u8 rss_hash_bits = 7; 962 const u8 rss_base_cpu = 0; 963 u8 rss_enable = ENIC_SETTING(enic, RSS) && (enic->rq_count > 1); 964 965 if (rss_enable) { 966 if (!enic_set_rsskey(enic)) { 967 if (enic_set_rsscpu(enic, rss_hash_bits)) { 968 rss_enable = 0; 969 dev_warning(enic, "RSS disabled, "\ 970 "Failed to set RSS cpu indirection table."); 971 } 972 } else { 973 rss_enable = 0; 974 dev_warning(enic, 975 "RSS disabled, Failed to set RSS key.\n"); 976 } 977 } 978 979 return enic_set_niccfg(enic, rss_default_cpu, rss_hash_type, 980 rss_hash_bits, rss_base_cpu, rss_enable); 981 } 982 983 int enic_setup_finish(struct enic *enic) 984 { 985 int ret; 986 987 enic_init_soft_stats(enic); 988 989 ret = enic_set_rss_nic_cfg(enic); 990 if (ret) { 991 dev_err(enic, "Failed to config nic, aborting.\n"); 992 return -1; 993 } 994 995 /* Default conf */ 996 vnic_dev_packet_filter(enic->vdev, 997 1 /* directed */, 998 1 /* multicast */, 999 1 /* broadcast */, 1000 0 /* promisc */, 1001 1 /* allmulti */); 1002 1003 enic->promisc = 0; 1004 enic->allmulti = 1; 1005 1006 return 0; 1007 } 1008 1009 void enic_add_packet_filter(struct enic *enic) 1010 { 1011 /* Args -> directed, multicast, broadcast, promisc, allmulti */ 1012 vnic_dev_packet_filter(enic->vdev, 1, 1, 1, 1013 enic->promisc, enic->allmulti); 1014 } 1015 1016 int enic_get_link_status(struct enic *enic) 1017 { 1018 return vnic_dev_link_status(enic->vdev); 1019 } 1020 1021 static void enic_dev_deinit(struct enic *enic) 1022 { 1023 struct rte_eth_dev *eth_dev = enic->rte_dev; 1024 1025 rte_free(eth_dev->data->mac_addrs); 1026 } 1027 1028 1029 int enic_set_vnic_res(struct enic *enic) 1030 { 1031 struct rte_eth_dev *eth_dev = enic->rte_dev; 1032 int rc = 0; 1033 1034 /* With Rx scatter support, two RQs are now used per RQ used by 1035 * the application. 1036 */ 1037 if (enic->conf_rq_count < eth_dev->data->nb_rx_queues) { 1038 dev_err(dev, "Not enough Receive queues. Requested:%u which uses %d RQs on VIC, Configured:%u\n", 1039 eth_dev->data->nb_rx_queues, 1040 eth_dev->data->nb_rx_queues * 2, enic->conf_rq_count); 1041 rc = -EINVAL; 1042 } 1043 if (enic->conf_wq_count < eth_dev->data->nb_tx_queues) { 1044 dev_err(dev, "Not enough Transmit queues. Requested:%u, Configured:%u\n", 1045 eth_dev->data->nb_tx_queues, enic->conf_wq_count); 1046 rc = -EINVAL; 1047 } 1048 1049 if (enic->conf_cq_count < (eth_dev->data->nb_rx_queues + 1050 eth_dev->data->nb_tx_queues)) { 1051 dev_err(dev, "Not enough Completion queues. Required:%u, Configured:%u\n", 1052 (eth_dev->data->nb_rx_queues + 1053 eth_dev->data->nb_tx_queues), enic->conf_cq_count); 1054 rc = -EINVAL; 1055 } 1056 1057 if (rc == 0) { 1058 enic->rq_count = eth_dev->data->nb_rx_queues; 1059 enic->wq_count = eth_dev->data->nb_tx_queues; 1060 enic->cq_count = enic->rq_count + enic->wq_count; 1061 } 1062 1063 return rc; 1064 } 1065 1066 /* The Cisco NIC can send and receive packets up to a max packet size 1067 * determined by the NIC type and firmware. There is also an MTU 1068 * configured into the NIC via the CIMC/UCSM management interface 1069 * which can be overridden by this function (up to the max packet size). 1070 * Depending on the network setup, doing so may cause packet drops 1071 * and unexpected behavior. 1072 */ 1073 int enic_set_mtu(struct enic *enic, uint16_t new_mtu) 1074 { 1075 uint16_t old_mtu; /* previous setting */ 1076 uint16_t config_mtu; /* Value configured into NIC via CIMC/UCSM */ 1077 struct rte_eth_dev *eth_dev = enic->rte_dev; 1078 1079 old_mtu = eth_dev->data->mtu; 1080 config_mtu = enic->config.mtu; 1081 1082 /* only works with Rx scatter disabled */ 1083 if (enic->rte_dev->data->dev_conf.rxmode.enable_scatter) 1084 return -ENOTSUP; 1085 1086 if (new_mtu > enic->max_mtu) { 1087 dev_err(enic, 1088 "MTU not updated: requested (%u) greater than max (%u)\n", 1089 new_mtu, enic->max_mtu); 1090 return -EINVAL; 1091 } 1092 if (new_mtu < ENIC_MIN_MTU) { 1093 dev_info(enic, 1094 "MTU not updated: requested (%u) less than min (%u)\n", 1095 new_mtu, ENIC_MIN_MTU); 1096 return -EINVAL; 1097 } 1098 if (new_mtu > config_mtu) 1099 dev_warning(enic, 1100 "MTU (%u) is greater than value configured in NIC (%u)\n", 1101 new_mtu, config_mtu); 1102 1103 /* update the mtu */ 1104 eth_dev->data->mtu = new_mtu; 1105 1106 dev_info(enic, "MTU changed from %u to %u\n", old_mtu, new_mtu); 1107 return 0; 1108 } 1109 1110 static int enic_dev_init(struct enic *enic) 1111 { 1112 int err; 1113 struct rte_eth_dev *eth_dev = enic->rte_dev; 1114 1115 vnic_dev_intr_coal_timer_info_default(enic->vdev); 1116 1117 /* Get vNIC configuration 1118 */ 1119 err = enic_get_vnic_config(enic); 1120 if (err) { 1121 dev_err(dev, "Get vNIC configuration failed, aborting\n"); 1122 return err; 1123 } 1124 1125 eth_dev->data->mac_addrs = rte_zmalloc("enic_mac_addr", ETH_ALEN, 0); 1126 if (!eth_dev->data->mac_addrs) { 1127 dev_err(enic, "mac addr storage alloc failed, aborting.\n"); 1128 return -1; 1129 } 1130 ether_addr_copy((struct ether_addr *) enic->mac_addr, 1131 ð_dev->data->mac_addrs[0]); 1132 1133 1134 /* Get available resource counts 1135 */ 1136 enic_get_res_counts(enic); 1137 1138 vnic_dev_set_reset_flag(enic->vdev, 0); 1139 1140 return 0; 1141 1142 } 1143 1144 int enic_probe(struct enic *enic) 1145 { 1146 struct rte_pci_device *pdev = enic->pdev; 1147 int err = -1; 1148 1149 dev_debug(enic, " Initializing ENIC PMD\n"); 1150 1151 enic->bar0.vaddr = (void *)pdev->mem_resource[0].addr; 1152 enic->bar0.len = pdev->mem_resource[0].len; 1153 1154 /* Register vNIC device */ 1155 enic->vdev = vnic_dev_register(NULL, enic, enic->pdev, &enic->bar0, 1); 1156 if (!enic->vdev) { 1157 dev_err(enic, "vNIC registration failed, aborting\n"); 1158 goto err_out; 1159 } 1160 1161 LIST_INIT(&enic->memzone_list); 1162 rte_spinlock_init(&enic->memzone_list_lock); 1163 1164 vnic_register_cbacks(enic->vdev, 1165 enic_alloc_consistent, 1166 enic_free_consistent); 1167 1168 /* Issue device open to get device in known state */ 1169 err = enic_dev_open(enic); 1170 if (err) { 1171 dev_err(enic, "vNIC dev open failed, aborting\n"); 1172 goto err_out_unregister; 1173 } 1174 1175 /* Set ingress vlan rewrite mode before vnic initialization */ 1176 err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev, 1177 IG_VLAN_REWRITE_MODE_PASS_THRU); 1178 if (err) { 1179 dev_err(enic, 1180 "Failed to set ingress vlan rewrite mode, aborting.\n"); 1181 goto err_out_dev_close; 1182 } 1183 1184 /* Issue device init to initialize the vnic-to-switch link. 1185 * We'll start with carrier off and wait for link UP 1186 * notification later to turn on carrier. We don't need 1187 * to wait here for the vnic-to-switch link initialization 1188 * to complete; link UP notification is the indication that 1189 * the process is complete. 1190 */ 1191 1192 err = vnic_dev_init(enic->vdev, 0); 1193 if (err) { 1194 dev_err(enic, "vNIC dev init failed, aborting\n"); 1195 goto err_out_dev_close; 1196 } 1197 1198 err = enic_dev_init(enic); 1199 if (err) { 1200 dev_err(enic, "Device initialization failed, aborting\n"); 1201 goto err_out_dev_close; 1202 } 1203 1204 return 0; 1205 1206 err_out_dev_close: 1207 vnic_dev_close(enic->vdev); 1208 err_out_unregister: 1209 vnic_dev_unregister(enic->vdev); 1210 err_out: 1211 return err; 1212 } 1213 1214 void enic_remove(struct enic *enic) 1215 { 1216 enic_dev_deinit(enic); 1217 vnic_dev_close(enic->vdev); 1218 vnic_dev_unregister(enic->vdev); 1219 } 1220