1 /* $NetBSD: ixv.c,v 1.183 2022/07/06 06:31:47 msaitoh Exp $ */ 2 3 /****************************************************************************** 4 5 Copyright (c) 2001-2017, Intel Corporation 6 All rights reserved. 7 8 Redistribution and use in source and binary forms, with or without 9 modification, are permitted provided that the following conditions are met: 10 11 1. Redistributions of source code must retain the above copyright notice, 12 this list of conditions and the following disclaimer. 13 14 2. Redistributions in binary form must reproduce the above copyright 15 notice, this list of conditions and the following disclaimer in the 16 documentation and/or other materials provided with the distribution. 17 18 3. Neither the name of the Intel Corporation nor the names of its 19 contributors may be used to endorse or promote products derived from 20 this software without specific prior written permission. 21 22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 POSSIBILITY OF SUCH DAMAGE. 33 34 ******************************************************************************/ 35 /*$FreeBSD: head/sys/dev/ixgbe/if_ixv.c 331224 2018-03-19 20:55:05Z erj $*/ 36 37 #include <sys/cdefs.h> 38 __KERNEL_RCSID(0, "$NetBSD: ixv.c,v 1.183 2022/07/06 06:31:47 msaitoh Exp $"); 39 40 #ifdef _KERNEL_OPT 41 #include "opt_inet.h" 42 #include "opt_inet6.h" 43 #include "opt_net_mpsafe.h" 44 #endif 45 46 #include "ixgbe.h" 47 48 /************************************************************************ 49 * Driver version 50 ************************************************************************/ 51 static const char ixv_driver_version[] = "2.0.1-k"; 52 /* XXX NetBSD: + 1.5.17 */ 53 54 /************************************************************************ 55 * PCI Device ID Table 56 * 57 * Used by probe to select devices to load on 58 * Last field stores an index into ixv_strings 59 * Last entry must be all 0s 60 * 61 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } 62 ************************************************************************/ 63 static const ixgbe_vendor_info_t ixv_vendor_info_array[] = 64 { 65 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0}, 66 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0}, 67 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0}, 68 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0}, 69 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, 0, 0, 0}, 70 /* required last entry */ 71 {0, 0, 0, 0, 0} 72 }; 73 74 /************************************************************************ 75 * Table of branding strings 76 ************************************************************************/ 77 static const char *ixv_strings[] = { 78 "Intel(R) PRO/10GbE Virtual Function Network Driver" 79 }; 80 81 /********************************************************************* 82 * Function prototypes 83 *********************************************************************/ 84 static int ixv_probe(device_t, cfdata_t, void *); 85 static void ixv_attach(device_t, device_t, void *); 86 static int ixv_detach(device_t, int); 87 #if 0 88 static int ixv_shutdown(device_t); 89 #endif 90 static int ixv_ifflags_cb(struct ethercom *); 91 static int ixv_ioctl(struct ifnet *, u_long, void *); 92 static int ixv_init(struct ifnet *); 93 static void ixv_init_locked(struct adapter *); 94 static void ixv_ifstop(struct ifnet *, int); 95 static void ixv_stop_locked(void *); 96 static void ixv_init_device_features(struct adapter *); 97 static void ixv_media_status(struct ifnet *, struct ifmediareq *); 98 static int ixv_media_change(struct ifnet *); 99 static int ixv_allocate_pci_resources(struct adapter *, 100 const struct pci_attach_args *); 101 static void ixv_free_deferred_handlers(struct adapter *); 102 static int ixv_allocate_msix(struct adapter *, 103 const struct pci_attach_args *); 104 static int ixv_configure_interrupts(struct adapter *); 105 static void ixv_free_pci_resources(struct adapter *); 106 static void ixv_local_timer(void *); 107 static void ixv_handle_timer(struct work *, void *); 108 static int ixv_setup_interface(device_t, struct adapter *); 109 static void ixv_schedule_admin_tasklet(struct adapter *); 110 static int ixv_negotiate_api(struct adapter *); 111 112 static void ixv_initialize_transmit_units(struct adapter *); 113 static void ixv_initialize_receive_units(struct adapter *); 114 static void ixv_initialize_rss_mapping(struct adapter *); 115 static s32 ixv_check_link(struct adapter *); 116 117 static void ixv_enable_intr(struct adapter *); 118 static void ixv_disable_intr(struct adapter *); 119 static int ixv_set_rxfilter(struct adapter *); 120 static void ixv_update_link_status(struct adapter *); 121 static int ixv_sysctl_debug(SYSCTLFN_PROTO); 122 static void ixv_set_ivar(struct adapter *, u8, u8, s8); 123 static void ixv_configure_ivars(struct adapter *); 124 static u8 * ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *); 125 static void ixv_eitr_write(struct adapter *, uint32_t, uint32_t); 126 127 static void ixv_setup_vlan_tagging(struct adapter *); 128 static int ixv_setup_vlan_support(struct adapter *); 129 static int ixv_vlan_cb(struct ethercom *, uint16_t, bool); 130 static int ixv_register_vlan(struct adapter *, u16); 131 static int ixv_unregister_vlan(struct adapter *, u16); 132 133 static void ixv_add_device_sysctls(struct adapter *); 134 static void ixv_init_stats(struct adapter *); 135 static void ixv_update_stats(struct adapter *); 136 static void ixv_add_stats_sysctls(struct adapter *); 137 static void ixv_clear_evcnt(struct adapter *); 138 139 /* Sysctl handlers */ 140 static int ixv_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO); 141 static int ixv_sysctl_next_to_check_handler(SYSCTLFN_PROTO); 142 static int ixv_sysctl_next_to_refresh_handler(SYSCTLFN_PROTO); 143 static int ixv_sysctl_rdh_handler(SYSCTLFN_PROTO); 144 static int ixv_sysctl_rdt_handler(SYSCTLFN_PROTO); 145 static int ixv_sysctl_tdt_handler(SYSCTLFN_PROTO); 146 static int ixv_sysctl_tdh_handler(SYSCTLFN_PROTO); 147 static int ixv_sysctl_tx_process_limit(SYSCTLFN_PROTO); 148 static int ixv_sysctl_rx_process_limit(SYSCTLFN_PROTO); 149 static int ixv_sysctl_rx_copy_len(SYSCTLFN_PROTO); 150 151 /* The MSI-X Interrupt handlers */ 152 static int ixv_msix_que(void *); 153 static int ixv_msix_mbx(void *); 154 155 /* Event handlers running on workqueue */ 156 static void ixv_handle_que(void *); 157 158 /* Deferred workqueue handlers */ 159 static void ixv_handle_admin(struct work *, void *); 160 static void ixv_handle_que_work(struct work *, void *); 161 162 const struct sysctlnode *ixv_sysctl_instance(struct adapter *); 163 static const ixgbe_vendor_info_t *ixv_lookup(const struct pci_attach_args *); 164 165 /************************************************************************ 166 * NetBSD Device Interface Entry Points 167 ************************************************************************/ 168 CFATTACH_DECL3_NEW(ixv, sizeof(struct adapter), 169 ixv_probe, ixv_attach, ixv_detach, NULL, NULL, NULL, 170 DVF_DETACH_SHUTDOWN); 171 172 #if 0 173 static driver_t ixv_driver = { 174 "ixv", ixv_methods, sizeof(struct adapter), 175 }; 176 177 devclass_t ixv_devclass; 178 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0); 179 MODULE_DEPEND(ixv, pci, 1, 1, 1); 180 MODULE_DEPEND(ixv, ether, 1, 1, 1); 181 #endif 182 183 /* 184 * TUNEABLE PARAMETERS: 185 */ 186 187 /* Number of Queues - do not exceed MSI-X vectors - 1 */ 188 static int ixv_num_queues = 0; 189 #define TUNABLE_INT(__x, __y) 190 TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues); 191 192 /* 193 * AIM: Adaptive Interrupt Moderation 194 * which means that the interrupt rate 195 * is varied over time based on the 196 * traffic for that interrupt vector 197 */ 198 static bool ixv_enable_aim = false; 199 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim); 200 201 static int ixv_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY); 202 TUNABLE_INT("hw.ixv.max_interrupt_rate", &ixv_max_interrupt_rate); 203 204 /* How many packets rxeof tries to clean at a time */ 205 static int ixv_rx_process_limit = 256; 206 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit); 207 208 /* How many packets txeof tries to clean at a time */ 209 static int ixv_tx_process_limit = 256; 210 TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit); 211 212 /* Which packet processing uses workqueue or softint */ 213 static bool ixv_txrx_workqueue = false; 214 215 /* 216 * Number of TX descriptors per ring, 217 * setting higher than RX as this seems 218 * the better performing choice. 219 */ 220 static int ixv_txd = PERFORM_TXD; 221 TUNABLE_INT("hw.ixv.txd", &ixv_txd); 222 223 /* Number of RX descriptors per ring */ 224 static int ixv_rxd = PERFORM_RXD; 225 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd); 226 227 /* Legacy Transmit (single queue) */ 228 static int ixv_enable_legacy_tx = 0; 229 TUNABLE_INT("hw.ixv.enable_legacy_tx", &ixv_enable_legacy_tx); 230 231 #ifdef NET_MPSAFE 232 #define IXGBE_MPSAFE 1 233 #define IXGBE_CALLOUT_FLAGS CALLOUT_MPSAFE 234 #define IXGBE_SOFTINT_FLAGS SOFTINT_MPSAFE 235 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE 236 #define IXGBE_TASKLET_WQ_FLAGS WQ_MPSAFE 237 #else 238 #define IXGBE_CALLOUT_FLAGS 0 239 #define IXGBE_SOFTINT_FLAGS 0 240 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU 241 #define IXGBE_TASKLET_WQ_FLAGS 0 242 #endif 243 #define IXGBE_WORKQUEUE_PRI PRI_SOFTNET 244 245 #if 0 246 static int (*ixv_start_locked)(struct ifnet *, struct tx_ring *); 247 static int (*ixv_ring_empty)(struct ifnet *, struct buf_ring *); 248 #endif 249 250 /************************************************************************ 251 * ixv_probe - Device identification routine 252 * 253 * Determines if the driver should be loaded on 254 * adapter based on its PCI vendor/device ID. 255 * 256 * return BUS_PROBE_DEFAULT on success, positive on failure 257 ************************************************************************/ 258 static int 259 ixv_probe(device_t dev, cfdata_t cf, void *aux) 260 { 261 #ifdef __HAVE_PCI_MSI_MSIX 262 const struct pci_attach_args *pa = aux; 263 264 return (ixv_lookup(pa) != NULL) ? 1 : 0; 265 #else 266 return 0; 267 #endif 268 } /* ixv_probe */ 269 270 static const ixgbe_vendor_info_t * 271 ixv_lookup(const struct pci_attach_args *pa) 272 { 273 const ixgbe_vendor_info_t *ent; 274 pcireg_t subid; 275 276 INIT_DEBUGOUT("ixv_lookup: begin"); 277 278 if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID) 279 return NULL; 280 281 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG); 282 283 for (ent = ixv_vendor_info_array; ent->vendor_id != 0; ent++) { 284 if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) && 285 (PCI_PRODUCT(pa->pa_id) == ent->device_id) && 286 ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) || 287 (ent->subvendor_id == 0)) && 288 ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) || 289 (ent->subdevice_id == 0))) { 290 return ent; 291 } 292 } 293 294 return NULL; 295 } 296 297 /************************************************************************ 298 * ixv_attach - Device initialization routine 299 * 300 * Called when the driver is being loaded. 301 * Identifies the type of hardware, allocates all resources 302 * and initializes the hardware. 303 * 304 * return 0 on success, positive on failure 305 ************************************************************************/ 306 static void 307 ixv_attach(device_t parent, device_t dev, void *aux) 308 { 309 struct adapter *adapter; 310 struct ixgbe_hw *hw; 311 int error = 0; 312 pcireg_t id, subid; 313 const ixgbe_vendor_info_t *ent; 314 const struct pci_attach_args *pa = aux; 315 const char *apivstr; 316 const char *str; 317 char wqname[MAXCOMLEN]; 318 char buf[256]; 319 320 INIT_DEBUGOUT("ixv_attach: begin"); 321 322 /* 323 * Make sure BUSMASTER is set, on a VM under 324 * KVM it may not be and will break things. 325 */ 326 ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag); 327 328 /* Allocate, clear, and link in our adapter structure */ 329 adapter = device_private(dev); 330 adapter->hw.back = adapter; 331 adapter->dev = dev; 332 hw = &adapter->hw; 333 334 adapter->init_locked = ixv_init_locked; 335 adapter->stop_locked = ixv_stop_locked; 336 337 adapter->osdep.pc = pa->pa_pc; 338 adapter->osdep.tag = pa->pa_tag; 339 if (pci_dma64_available(pa)) 340 adapter->osdep.dmat = pa->pa_dmat64; 341 else 342 adapter->osdep.dmat = pa->pa_dmat; 343 adapter->osdep.attached = false; 344 345 ent = ixv_lookup(pa); 346 347 KASSERT(ent != NULL); 348 349 aprint_normal(": %s, Version - %s\n", 350 ixv_strings[ent->index], ixv_driver_version); 351 352 /* Core Lock Init */ 353 IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev)); 354 355 /* Do base PCI setup - map BAR0 */ 356 if (ixv_allocate_pci_resources(adapter, pa)) { 357 aprint_error_dev(dev, "ixv_allocate_pci_resources() failed!\n"); 358 error = ENXIO; 359 goto err_out; 360 } 361 362 /* SYSCTL APIs */ 363 ixv_add_device_sysctls(adapter); 364 365 /* Set up the timer callout and workqueue */ 366 callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS); 367 snprintf(wqname, sizeof(wqname), "%s-timer", device_xname(dev)); 368 error = workqueue_create(&adapter->timer_wq, wqname, 369 ixv_handle_timer, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET, 370 IXGBE_TASKLET_WQ_FLAGS); 371 if (error) { 372 aprint_error_dev(dev, 373 "could not create timer workqueue (%d)\n", error); 374 goto err_out; 375 } 376 377 /* Save off the information about this board */ 378 id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG); 379 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG); 380 hw->vendor_id = PCI_VENDOR(id); 381 hw->device_id = PCI_PRODUCT(id); 382 hw->revision_id = 383 PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG)); 384 hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid); 385 hw->subsystem_device_id = PCI_SUBSYS_ID(subid); 386 387 /* A subset of set_mac_type */ 388 switch (hw->device_id) { 389 case IXGBE_DEV_ID_82599_VF: 390 hw->mac.type = ixgbe_mac_82599_vf; 391 str = "82599 VF"; 392 break; 393 case IXGBE_DEV_ID_X540_VF: 394 hw->mac.type = ixgbe_mac_X540_vf; 395 str = "X540 VF"; 396 break; 397 case IXGBE_DEV_ID_X550_VF: 398 hw->mac.type = ixgbe_mac_X550_vf; 399 str = "X550 VF"; 400 break; 401 case IXGBE_DEV_ID_X550EM_X_VF: 402 hw->mac.type = ixgbe_mac_X550EM_x_vf; 403 str = "X550EM X VF"; 404 break; 405 case IXGBE_DEV_ID_X550EM_A_VF: 406 hw->mac.type = ixgbe_mac_X550EM_a_vf; 407 str = "X550EM A VF"; 408 break; 409 default: 410 /* Shouldn't get here since probe succeeded */ 411 aprint_error_dev(dev, "Unknown device ID!\n"); 412 error = ENXIO; 413 goto err_out; 414 break; 415 } 416 aprint_normal_dev(dev, "device %s\n", str); 417 418 ixv_init_device_features(adapter); 419 420 /* Initialize the shared code */ 421 error = ixgbe_init_ops_vf(hw); 422 if (error) { 423 aprint_error_dev(dev, "ixgbe_init_ops_vf() failed!\n"); 424 error = EIO; 425 goto err_out; 426 } 427 428 /* Setup the mailbox */ 429 ixgbe_init_mbx_params_vf(hw); 430 431 /* Set the right number of segments */ 432 KASSERT(IXGBE_82599_SCATTER_MAX >= IXGBE_SCATTER_DEFAULT); 433 adapter->num_segs = IXGBE_SCATTER_DEFAULT; 434 435 /* Reset mbox api to 1.0 */ 436 error = hw->mac.ops.reset_hw(hw); 437 if (error == IXGBE_ERR_RESET_FAILED) 438 aprint_error_dev(dev, "...reset_hw() failure: Reset Failed!\n"); 439 else if (error) 440 aprint_error_dev(dev, "...reset_hw() failed with error %d\n", 441 error); 442 if (error) { 443 error = EIO; 444 goto err_out; 445 } 446 447 error = hw->mac.ops.init_hw(hw); 448 if (error) { 449 aprint_error_dev(dev, "...init_hw() failed!\n"); 450 error = EIO; 451 goto err_out; 452 } 453 454 /* Negotiate mailbox API version */ 455 error = ixv_negotiate_api(adapter); 456 if (error) 457 aprint_normal_dev(dev, 458 "MBX API negotiation failed during attach!\n"); 459 switch (hw->api_version) { 460 case ixgbe_mbox_api_10: 461 apivstr = "1.0"; 462 break; 463 case ixgbe_mbox_api_20: 464 apivstr = "2.0"; 465 break; 466 case ixgbe_mbox_api_11: 467 apivstr = "1.1"; 468 break; 469 case ixgbe_mbox_api_12: 470 apivstr = "1.2"; 471 break; 472 case ixgbe_mbox_api_13: 473 apivstr = "1.3"; 474 break; 475 case ixgbe_mbox_api_14: 476 apivstr = "1.4"; 477 break; 478 case ixgbe_mbox_api_15: 479 apivstr = "1.5"; 480 break; 481 default: 482 apivstr = "unknown"; 483 break; 484 } 485 aprint_normal_dev(dev, "Mailbox API %s\n", apivstr); 486 487 /* If no mac address was assigned, make a random one */ 488 if (!ixv_check_ether_addr(hw->mac.addr)) { 489 u8 addr[ETHER_ADDR_LEN]; 490 uint64_t rndval = cprng_strong64(); 491 492 memcpy(addr, &rndval, sizeof(addr)); 493 addr[0] &= 0xFE; 494 addr[0] |= 0x02; 495 bcopy(addr, hw->mac.addr, sizeof(addr)); 496 } 497 498 /* Register for VLAN events */ 499 ether_set_vlan_cb(&adapter->osdep.ec, ixv_vlan_cb); 500 501 /* Do descriptor calc and sanity checks */ 502 if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 || 503 ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) { 504 aprint_error_dev(dev, "TXD config issue, using default!\n"); 505 adapter->num_tx_desc = DEFAULT_TXD; 506 } else 507 adapter->num_tx_desc = ixv_txd; 508 509 if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 || 510 ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) { 511 aprint_error_dev(dev, "RXD config issue, using default!\n"); 512 adapter->num_rx_desc = DEFAULT_RXD; 513 } else 514 adapter->num_rx_desc = ixv_rxd; 515 516 /* Sysctls for limiting the amount of work done in the taskqueues */ 517 adapter->rx_process_limit 518 = (ixv_rx_process_limit <= adapter->num_rx_desc) 519 ? ixv_rx_process_limit : adapter->num_rx_desc; 520 adapter->tx_process_limit 521 = (ixv_tx_process_limit <= adapter->num_tx_desc) 522 ? ixv_tx_process_limit : adapter->num_tx_desc; 523 524 /* Set default high limit of copying mbuf in rxeof */ 525 adapter->rx_copy_len = IXGBE_RX_COPY_LEN_MAX; 526 527 /* Setup MSI-X */ 528 error = ixv_configure_interrupts(adapter); 529 if (error) 530 goto err_out; 531 532 /* Allocate our TX/RX Queues */ 533 if (ixgbe_allocate_queues(adapter)) { 534 aprint_error_dev(dev, "ixgbe_allocate_queues() failed!\n"); 535 error = ENOMEM; 536 goto err_out; 537 } 538 539 /* hw.ix defaults init */ 540 adapter->enable_aim = ixv_enable_aim; 541 542 adapter->txrx_use_workqueue = ixv_txrx_workqueue; 543 544 error = ixv_allocate_msix(adapter, pa); 545 if (error) { 546 aprint_error_dev(dev, "ixv_allocate_msix() failed!\n"); 547 goto err_late; 548 } 549 550 /* Setup OS specific network interface */ 551 error = ixv_setup_interface(dev, adapter); 552 if (error != 0) { 553 aprint_error_dev(dev, "ixv_setup_interface() failed!\n"); 554 goto err_late; 555 } 556 557 /* Allocate multicast array memory */ 558 adapter->mta = malloc(sizeof(*adapter->mta) * 559 IXGBE_MAX_VF_MC, M_DEVBUF, M_WAITOK); 560 561 /* Do the stats setup */ 562 ixv_init_stats(adapter); 563 ixv_add_stats_sysctls(adapter); 564 565 if (adapter->feat_en & IXGBE_FEATURE_NETMAP) 566 ixgbe_netmap_attach(adapter); 567 568 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap); 569 aprint_verbose_dev(dev, "feature cap %s\n", buf); 570 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en); 571 aprint_verbose_dev(dev, "feature ena %s\n", buf); 572 573 INIT_DEBUGOUT("ixv_attach: end"); 574 adapter->osdep.attached = true; 575 576 return; 577 578 err_late: 579 ixgbe_free_queues(adapter); 580 err_out: 581 ixv_free_pci_resources(adapter); 582 IXGBE_CORE_LOCK_DESTROY(adapter); 583 584 return; 585 } /* ixv_attach */ 586 587 /************************************************************************ 588 * ixv_detach - Device removal routine 589 * 590 * Called when the driver is being removed. 591 * Stops the adapter and deallocates all the resources 592 * that were allocated for driver operation. 593 * 594 * return 0 on success, positive on failure 595 ************************************************************************/ 596 static int 597 ixv_detach(device_t dev, int flags) 598 { 599 struct adapter *adapter = device_private(dev); 600 struct ixgbe_hw *hw = &adapter->hw; 601 struct tx_ring *txr = adapter->tx_rings; 602 struct rx_ring *rxr = adapter->rx_rings; 603 struct ixgbevf_hw_stats *stats = &adapter->stats.vf; 604 605 INIT_DEBUGOUT("ixv_detach: begin"); 606 if (adapter->osdep.attached == false) 607 return 0; 608 609 /* Stop the interface. Callouts are stopped in it. */ 610 ixv_ifstop(adapter->ifp, 1); 611 612 if (VLAN_ATTACHED(&adapter->osdep.ec) && 613 (flags & (DETACH_SHUTDOWN | DETACH_FORCE)) == 0) { 614 aprint_error_dev(dev, "VLANs in use, detach first\n"); 615 return EBUSY; 616 } 617 618 ether_ifdetach(adapter->ifp); 619 callout_halt(&adapter->timer, NULL); 620 ixv_free_deferred_handlers(adapter); 621 622 if (adapter->feat_en & IXGBE_FEATURE_NETMAP) 623 netmap_detach(adapter->ifp); 624 625 ixv_free_pci_resources(adapter); 626 #if 0 /* XXX the NetBSD port is probably missing something here */ 627 bus_generic_detach(dev); 628 #endif 629 if_detach(adapter->ifp); 630 ifmedia_fini(&adapter->media); 631 if_percpuq_destroy(adapter->ipq); 632 633 sysctl_teardown(&adapter->sysctllog); 634 evcnt_detach(&adapter->efbig_tx_dma_setup); 635 evcnt_detach(&adapter->mbuf_defrag_failed); 636 evcnt_detach(&adapter->efbig2_tx_dma_setup); 637 evcnt_detach(&adapter->einval_tx_dma_setup); 638 evcnt_detach(&adapter->other_tx_dma_setup); 639 evcnt_detach(&adapter->eagain_tx_dma_setup); 640 evcnt_detach(&adapter->enomem_tx_dma_setup); 641 evcnt_detach(&adapter->watchdog_events); 642 evcnt_detach(&adapter->tso_err); 643 evcnt_detach(&adapter->admin_irqev); 644 evcnt_detach(&adapter->link_workev); 645 646 txr = adapter->tx_rings; 647 for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) { 648 evcnt_detach(&adapter->queues[i].irqs); 649 evcnt_detach(&adapter->queues[i].handleq); 650 evcnt_detach(&adapter->queues[i].req); 651 evcnt_detach(&txr->no_desc_avail); 652 evcnt_detach(&txr->total_packets); 653 evcnt_detach(&txr->tso_tx); 654 #ifndef IXGBE_LEGACY_TX 655 evcnt_detach(&txr->pcq_drops); 656 #endif 657 658 evcnt_detach(&rxr->rx_packets); 659 evcnt_detach(&rxr->rx_bytes); 660 evcnt_detach(&rxr->rx_copies); 661 evcnt_detach(&rxr->no_mbuf); 662 evcnt_detach(&rxr->rx_discarded); 663 } 664 evcnt_detach(&stats->ipcs); 665 evcnt_detach(&stats->l4cs); 666 evcnt_detach(&stats->ipcs_bad); 667 evcnt_detach(&stats->l4cs_bad); 668 669 /* Packet Reception Stats */ 670 evcnt_detach(&stats->vfgorc); 671 evcnt_detach(&stats->vfgprc); 672 evcnt_detach(&stats->vfmprc); 673 674 /* Packet Transmission Stats */ 675 evcnt_detach(&stats->vfgotc); 676 evcnt_detach(&stats->vfgptc); 677 678 /* Mailbox Stats */ 679 evcnt_detach(&hw->mbx.stats.msgs_tx); 680 evcnt_detach(&hw->mbx.stats.msgs_rx); 681 evcnt_detach(&hw->mbx.stats.acks); 682 evcnt_detach(&hw->mbx.stats.reqs); 683 evcnt_detach(&hw->mbx.stats.rsts); 684 685 ixgbe_free_queues(adapter); 686 687 IXGBE_CORE_LOCK_DESTROY(adapter); 688 689 return (0); 690 } /* ixv_detach */ 691 692 /************************************************************************ 693 * ixv_init_locked - Init entry point 694 * 695 * Used in two ways: It is used by the stack as an init entry 696 * point in network interface structure. It is also used 697 * by the driver as a hw/sw initialization routine to get 698 * to a consistent state. 699 * 700 * return 0 on success, positive on failure 701 ************************************************************************/ 702 static void 703 ixv_init_locked(struct adapter *adapter) 704 { 705 struct ifnet *ifp = adapter->ifp; 706 device_t dev = adapter->dev; 707 struct ixgbe_hw *hw = &adapter->hw; 708 struct ix_queue *que; 709 int error = 0; 710 uint32_t mask; 711 int i; 712 713 INIT_DEBUGOUT("ixv_init_locked: begin"); 714 KASSERT(mutex_owned(&adapter->core_mtx)); 715 hw->adapter_stopped = FALSE; 716 hw->mac.ops.stop_adapter(hw); 717 callout_stop(&adapter->timer); 718 for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++) 719 que->disabled_count = 0; 720 721 adapter->max_frame_size = 722 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 723 724 /* reprogram the RAR[0] in case user changed it. */ 725 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); 726 727 /* Get the latest mac address, User can use a LAA */ 728 memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl), 729 IXGBE_ETH_LENGTH_OF_ADDRESS); 730 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1); 731 732 /* Prepare transmit descriptors and buffers */ 733 if (ixgbe_setup_transmit_structures(adapter)) { 734 aprint_error_dev(dev, "Could not setup transmit structures\n"); 735 ixv_stop_locked(adapter); 736 return; 737 } 738 739 /* Reset VF and renegotiate mailbox API version */ 740 hw->mac.ops.reset_hw(hw); 741 hw->mac.ops.start_hw(hw); 742 error = ixv_negotiate_api(adapter); 743 if (error) 744 device_printf(dev, 745 "Mailbox API negotiation failed in init_locked!\n"); 746 747 ixv_initialize_transmit_units(adapter); 748 749 /* Setup Multicast table */ 750 ixv_set_rxfilter(adapter); 751 752 /* Use fixed buffer size, even for jumbo frames */ 753 adapter->rx_mbuf_sz = MCLBYTES; 754 755 /* Prepare receive descriptors and buffers */ 756 error = ixgbe_setup_receive_structures(adapter); 757 if (error) { 758 device_printf(dev, 759 "Could not setup receive structures (err = %d)\n", error); 760 ixv_stop_locked(adapter); 761 return; 762 } 763 764 /* Configure RX settings */ 765 ixv_initialize_receive_units(adapter); 766 767 /* Initialize variable holding task enqueue requests interrupts */ 768 adapter->task_requests = 0; 769 770 /* Set up VLAN offload and filter */ 771 ixv_setup_vlan_support(adapter); 772 773 /* Set up MSI-X routing */ 774 ixv_configure_ivars(adapter); 775 776 /* Set up auto-mask */ 777 mask = (1 << adapter->vector); 778 for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++) 779 mask |= (1 << que->msix); 780 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, mask); 781 782 /* Set moderation on the Link interrupt */ 783 ixv_eitr_write(adapter, adapter->vector, IXGBE_LINK_ITR); 784 785 /* Stats init */ 786 ixv_init_stats(adapter); 787 788 /* Config/Enable Link */ 789 hw->mac.get_link_status = TRUE; 790 hw->mac.ops.check_link(hw, &adapter->link_speed, &adapter->link_up, 791 FALSE); 792 793 /* Start watchdog */ 794 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter); 795 atomic_store_relaxed(&adapter->timer_pending, 0); 796 797 /* OK to schedule workqueues. */ 798 adapter->schedule_wqs_ok = true; 799 800 /* And now turn on interrupts */ 801 ixv_enable_intr(adapter); 802 803 /* Update saved flags. See ixgbe_ifflags_cb() */ 804 adapter->if_flags = ifp->if_flags; 805 adapter->ec_capenable = adapter->osdep.ec.ec_capenable; 806 807 /* Now inform the stack we're ready */ 808 ifp->if_flags |= IFF_RUNNING; 809 ifp->if_flags &= ~IFF_OACTIVE; 810 811 return; 812 } /* ixv_init_locked */ 813 814 /************************************************************************ 815 * ixv_enable_queue 816 ************************************************************************/ 817 static inline void 818 ixv_enable_queue(struct adapter *adapter, u32 vector) 819 { 820 struct ixgbe_hw *hw = &adapter->hw; 821 struct ix_queue *que = &adapter->queues[vector]; 822 u32 queue = 1UL << vector; 823 u32 mask; 824 825 mutex_enter(&que->dc_mtx); 826 if (que->disabled_count > 0 && --que->disabled_count > 0) 827 goto out; 828 829 mask = (IXGBE_EIMS_RTX_QUEUE & queue); 830 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask); 831 out: 832 mutex_exit(&que->dc_mtx); 833 } /* ixv_enable_queue */ 834 835 /************************************************************************ 836 * ixv_disable_queue 837 ************************************************************************/ 838 static inline void 839 ixv_disable_queue(struct adapter *adapter, u32 vector) 840 { 841 struct ixgbe_hw *hw = &adapter->hw; 842 struct ix_queue *que = &adapter->queues[vector]; 843 u32 queue = 1UL << vector; 844 u32 mask; 845 846 mutex_enter(&que->dc_mtx); 847 if (que->disabled_count++ > 0) 848 goto out; 849 850 mask = (IXGBE_EIMS_RTX_QUEUE & queue); 851 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask); 852 out: 853 mutex_exit(&que->dc_mtx); 854 } /* ixv_disable_queue */ 855 856 #if 0 857 static inline void 858 ixv_rearm_queues(struct adapter *adapter, u64 queues) 859 { 860 u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues); 861 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask); 862 } /* ixv_rearm_queues */ 863 #endif 864 865 866 /************************************************************************ 867 * ixv_msix_que - MSI-X Queue Interrupt Service routine 868 ************************************************************************/ 869 static int 870 ixv_msix_que(void *arg) 871 { 872 struct ix_queue *que = arg; 873 struct adapter *adapter = que->adapter; 874 struct tx_ring *txr = que->txr; 875 struct rx_ring *rxr = que->rxr; 876 bool more; 877 u32 newitr = 0; 878 879 ixv_disable_queue(adapter, que->msix); 880 IXGBE_EVC_ADD(&que->irqs, 1); 881 882 #ifdef __NetBSD__ 883 /* Don't run ixgbe_rxeof in interrupt context */ 884 more = true; 885 #else 886 more = ixgbe_rxeof(que); 887 #endif 888 889 IXGBE_TX_LOCK(txr); 890 ixgbe_txeof(txr); 891 IXGBE_TX_UNLOCK(txr); 892 893 /* Do AIM now? */ 894 895 if (adapter->enable_aim == false) 896 goto no_calc; 897 /* 898 * Do Adaptive Interrupt Moderation: 899 * - Write out last calculated setting 900 * - Calculate based on average size over 901 * the last interval. 902 */ 903 if (que->eitr_setting) 904 ixv_eitr_write(adapter, que->msix, que->eitr_setting); 905 906 que->eitr_setting = 0; 907 908 /* Idle, do nothing */ 909 if ((txr->bytes == 0) && (rxr->bytes == 0)) 910 goto no_calc; 911 912 if ((txr->bytes) && (txr->packets)) 913 newitr = txr->bytes/txr->packets; 914 if ((rxr->bytes) && (rxr->packets)) 915 newitr = uimax(newitr, (rxr->bytes / rxr->packets)); 916 newitr += 24; /* account for hardware frame, crc */ 917 918 /* set an upper boundary */ 919 newitr = uimin(newitr, 3000); 920 921 /* Be nice to the mid range */ 922 if ((newitr > 300) && (newitr < 1200)) 923 newitr = (newitr / 3); 924 else 925 newitr = (newitr / 2); 926 927 /* 928 * When RSC is used, ITR interval must be larger than RSC_DELAY. 929 * Currently, we use 2us for RSC_DELAY. The minimum value is always 930 * greater than 2us on 100M (and 10M?(not documented)), but it's not 931 * on 1G and higher. 932 */ 933 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL) 934 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) { 935 if (newitr < IXGBE_MIN_RSC_EITR_10G1G) 936 newitr = IXGBE_MIN_RSC_EITR_10G1G; 937 } 938 939 /* save for next interrupt */ 940 que->eitr_setting = newitr; 941 942 /* Reset state */ 943 txr->bytes = 0; 944 txr->packets = 0; 945 rxr->bytes = 0; 946 rxr->packets = 0; 947 948 no_calc: 949 if (more) 950 softint_schedule(que->que_si); 951 else /* Re-enable this interrupt */ 952 ixv_enable_queue(adapter, que->msix); 953 954 return 1; 955 } /* ixv_msix_que */ 956 957 /************************************************************************ 958 * ixv_msix_mbx 959 ************************************************************************/ 960 static int 961 ixv_msix_mbx(void *arg) 962 { 963 struct adapter *adapter = arg; 964 struct ixgbe_hw *hw = &adapter->hw; 965 966 IXGBE_EVC_ADD(&adapter->admin_irqev, 1); 967 /* NetBSD: We use auto-clear, so it's not required to write VTEICR */ 968 969 /* Link status change */ 970 hw->mac.get_link_status = TRUE; 971 atomic_or_32(&adapter->task_requests, IXGBE_REQUEST_TASK_MBX); 972 ixv_schedule_admin_tasklet(adapter); 973 974 return 1; 975 } /* ixv_msix_mbx */ 976 977 static void 978 ixv_eitr_write(struct adapter *adapter, uint32_t index, uint32_t itr) 979 { 980 981 /* 982 * Newer devices than 82598 have VF function, so this function is 983 * simple. 984 */ 985 itr |= IXGBE_EITR_CNT_WDIS; 986 987 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(index), itr); 988 } 989 990 991 /************************************************************************ 992 * ixv_media_status - Media Ioctl callback 993 * 994 * Called whenever the user queries the status of 995 * the interface using ifconfig. 996 ************************************************************************/ 997 static void 998 ixv_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 999 { 1000 struct adapter *adapter = ifp->if_softc; 1001 1002 INIT_DEBUGOUT("ixv_media_status: begin"); 1003 ixv_update_link_status(adapter); 1004 1005 ifmr->ifm_status = IFM_AVALID; 1006 ifmr->ifm_active = IFM_ETHER; 1007 1008 if (adapter->link_active != LINK_STATE_UP) { 1009 ifmr->ifm_active |= IFM_NONE; 1010 return; 1011 } 1012 1013 ifmr->ifm_status |= IFM_ACTIVE; 1014 1015 switch (adapter->link_speed) { 1016 case IXGBE_LINK_SPEED_10GB_FULL: 1017 ifmr->ifm_active |= IFM_10G_T | IFM_FDX; 1018 break; 1019 case IXGBE_LINK_SPEED_5GB_FULL: 1020 ifmr->ifm_active |= IFM_5000_T | IFM_FDX; 1021 break; 1022 case IXGBE_LINK_SPEED_2_5GB_FULL: 1023 ifmr->ifm_active |= IFM_2500_T | IFM_FDX; 1024 break; 1025 case IXGBE_LINK_SPEED_1GB_FULL: 1026 ifmr->ifm_active |= IFM_1000_T | IFM_FDX; 1027 break; 1028 case IXGBE_LINK_SPEED_100_FULL: 1029 ifmr->ifm_active |= IFM_100_TX | IFM_FDX; 1030 break; 1031 case IXGBE_LINK_SPEED_10_FULL: 1032 ifmr->ifm_active |= IFM_10_T | IFM_FDX; 1033 break; 1034 } 1035 1036 ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active); 1037 } /* ixv_media_status */ 1038 1039 /************************************************************************ 1040 * ixv_media_change - Media Ioctl callback 1041 * 1042 * Called when the user changes speed/duplex using 1043 * media/mediopt option with ifconfig. 1044 ************************************************************************/ 1045 static int 1046 ixv_media_change(struct ifnet *ifp) 1047 { 1048 struct adapter *adapter = ifp->if_softc; 1049 struct ifmedia *ifm = &adapter->media; 1050 1051 INIT_DEBUGOUT("ixv_media_change: begin"); 1052 1053 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1054 return (EINVAL); 1055 1056 switch (IFM_SUBTYPE(ifm->ifm_media)) { 1057 case IFM_AUTO: 1058 break; 1059 default: 1060 device_printf(adapter->dev, "Only auto media type\n"); 1061 return (EINVAL); 1062 } 1063 1064 return (0); 1065 } /* ixv_media_change */ 1066 1067 static void 1068 ixv_schedule_admin_tasklet(struct adapter *adapter) 1069 { 1070 if (adapter->schedule_wqs_ok) { 1071 if (atomic_cas_uint(&adapter->admin_pending, 0, 1) == 0) 1072 workqueue_enqueue(adapter->admin_wq, 1073 &adapter->admin_wc, NULL); 1074 } 1075 } 1076 1077 /************************************************************************ 1078 * ixv_negotiate_api 1079 * 1080 * Negotiate the Mailbox API with the PF; 1081 * start with the most featured API first. 1082 ************************************************************************/ 1083 static int 1084 ixv_negotiate_api(struct adapter *adapter) 1085 { 1086 struct ixgbe_hw *hw = &adapter->hw; 1087 int mbx_api[] = { ixgbe_mbox_api_15, 1088 ixgbe_mbox_api_13, 1089 ixgbe_mbox_api_12, 1090 ixgbe_mbox_api_11, 1091 ixgbe_mbox_api_10, 1092 ixgbe_mbox_api_unknown }; 1093 int i = 0; 1094 1095 while (mbx_api[i] != ixgbe_mbox_api_unknown) { 1096 if (ixgbevf_negotiate_api_version(hw, mbx_api[i]) == 0) { 1097 if (hw->api_version >= ixgbe_mbox_api_15) 1098 ixgbe_upgrade_mbx_params_vf(hw); 1099 return (0); 1100 } 1101 i++; 1102 } 1103 1104 return (EINVAL); 1105 } /* ixv_negotiate_api */ 1106 1107 1108 /************************************************************************ 1109 * ixv_set_rxfilter - Multicast Update 1110 * 1111 * Called whenever multicast address list is updated. 1112 ************************************************************************/ 1113 static int 1114 ixv_set_rxfilter(struct adapter *adapter) 1115 { 1116 struct ixgbe_mc_addr *mta; 1117 struct ifnet *ifp = adapter->ifp; 1118 struct ixgbe_hw *hw = &adapter->hw; 1119 u8 *update_ptr; 1120 int mcnt = 0; 1121 struct ethercom *ec = &adapter->osdep.ec; 1122 struct ether_multi *enm; 1123 struct ether_multistep step; 1124 bool overflow = false; 1125 int error, rc = 0; 1126 1127 KASSERT(mutex_owned(&adapter->core_mtx)); 1128 IOCTL_DEBUGOUT("ixv_set_rxfilter: begin"); 1129 1130 mta = adapter->mta; 1131 bzero(mta, sizeof(*mta) * IXGBE_MAX_VF_MC); 1132 1133 /* 1: For PROMISC */ 1134 if (ifp->if_flags & IFF_PROMISC) { 1135 error = hw->mac.ops.update_xcast_mode(hw, 1136 IXGBEVF_XCAST_MODE_PROMISC); 1137 if (error == IXGBE_ERR_NOT_TRUSTED) { 1138 device_printf(adapter->dev, 1139 "this interface is not trusted\n"); 1140 error = EPERM; 1141 } else if (error == IXGBE_ERR_FEATURE_NOT_SUPPORTED) { 1142 device_printf(adapter->dev, 1143 "the PF doesn't support promisc mode\n"); 1144 error = EOPNOTSUPP; 1145 } else if (error == IXGBE_ERR_NOT_IN_PROMISC) { 1146 device_printf(adapter->dev, 1147 "the PF may not in promisc mode\n"); 1148 error = EINVAL; 1149 } else if (error) { 1150 device_printf(adapter->dev, 1151 "failed to set promisc mode. error = %d\n", 1152 error); 1153 error = EIO; 1154 } else 1155 return 0; 1156 rc = error; 1157 } 1158 1159 /* 2: For ALLMULTI or normal */ 1160 ETHER_LOCK(ec); 1161 ETHER_FIRST_MULTI(step, ec, enm); 1162 while (enm != NULL) { 1163 if ((mcnt >= IXGBE_MAX_VF_MC) || 1164 (memcmp(enm->enm_addrlo, enm->enm_addrhi, 1165 ETHER_ADDR_LEN) != 0)) { 1166 overflow = true; 1167 break; 1168 } 1169 bcopy(enm->enm_addrlo, 1170 mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS); 1171 mcnt++; 1172 ETHER_NEXT_MULTI(step, enm); 1173 } 1174 ETHER_UNLOCK(ec); 1175 1176 /* 3: For ALLMULTI */ 1177 if (overflow) { 1178 error = hw->mac.ops.update_xcast_mode(hw, 1179 IXGBEVF_XCAST_MODE_ALLMULTI); 1180 if (error == IXGBE_ERR_NOT_TRUSTED) { 1181 device_printf(adapter->dev, 1182 "this interface is not trusted\n"); 1183 error = EPERM; 1184 } else if (error == IXGBE_ERR_FEATURE_NOT_SUPPORTED) { 1185 device_printf(adapter->dev, 1186 "the PF doesn't support allmulti mode\n"); 1187 error = EOPNOTSUPP; 1188 } else if (error) { 1189 device_printf(adapter->dev, 1190 "number of Ethernet multicast addresses " 1191 "exceeds the limit (%d). error = %d\n", 1192 IXGBE_MAX_VF_MC, error); 1193 error = ENOSPC; 1194 } else { 1195 ETHER_LOCK(ec); 1196 ec->ec_flags |= ETHER_F_ALLMULTI; 1197 ETHER_UNLOCK(ec); 1198 return rc; /* Promisc might have failed */ 1199 } 1200 1201 if (rc == 0) 1202 rc = error; 1203 1204 /* Continue to update the multicast table as many as we can */ 1205 } 1206 1207 /* 4: For normal operation */ 1208 error = hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_MULTI); 1209 if ((error == IXGBE_ERR_FEATURE_NOT_SUPPORTED) || (error == 0)) { 1210 /* Normal operation */ 1211 ETHER_LOCK(ec); 1212 ec->ec_flags &= ~ETHER_F_ALLMULTI; 1213 ETHER_UNLOCK(ec); 1214 error = 0; 1215 } else if (error) { 1216 device_printf(adapter->dev, 1217 "failed to set Ethernet multicast address " 1218 "operation to normal. error = %d\n", error); 1219 } 1220 1221 update_ptr = (u8 *)mta; 1222 error = adapter->hw.mac.ops.update_mc_addr_list(&adapter->hw, 1223 update_ptr, mcnt, ixv_mc_array_itr, TRUE); 1224 if (rc == 0) 1225 rc = error; 1226 1227 return rc; 1228 } /* ixv_set_rxfilter */ 1229 1230 /************************************************************************ 1231 * ixv_mc_array_itr 1232 * 1233 * An iterator function needed by the multicast shared code. 1234 * It feeds the shared code routine the addresses in the 1235 * array of ixv_set_rxfilter() one by one. 1236 ************************************************************************/ 1237 static u8 * 1238 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq) 1239 { 1240 struct ixgbe_mc_addr *mta; 1241 1242 mta = (struct ixgbe_mc_addr *)*update_ptr; 1243 1244 *vmdq = 0; 1245 *update_ptr = (u8*)(mta + 1); 1246 1247 return (mta->addr); 1248 } /* ixv_mc_array_itr */ 1249 1250 /************************************************************************ 1251 * ixv_local_timer - Timer routine 1252 * 1253 * Checks for link status, updates statistics, 1254 * and runs the watchdog check. 1255 ************************************************************************/ 1256 static void 1257 ixv_local_timer(void *arg) 1258 { 1259 struct adapter *adapter = arg; 1260 1261 if (adapter->schedule_wqs_ok) { 1262 if (atomic_cas_uint(&adapter->timer_pending, 0, 1) == 0) 1263 workqueue_enqueue(adapter->timer_wq, 1264 &adapter->timer_wc, NULL); 1265 } 1266 } 1267 1268 static void 1269 ixv_handle_timer(struct work *wk, void *context) 1270 { 1271 struct adapter *adapter = context; 1272 device_t dev = adapter->dev; 1273 struct ix_queue *que = adapter->queues; 1274 u64 queues = 0; 1275 u64 v0, v1, v2, v3, v4, v5, v6, v7; 1276 int hung = 0; 1277 int i; 1278 1279 IXGBE_CORE_LOCK(adapter); 1280 1281 if (ixv_check_link(adapter)) { 1282 ixv_init_locked(adapter); 1283 IXGBE_CORE_UNLOCK(adapter); 1284 return; 1285 } 1286 1287 /* Stats Update */ 1288 ixv_update_stats(adapter); 1289 1290 /* Update some event counters */ 1291 v0 = v1 = v2 = v3 = v4 = v5 = v6 = v7 = 0; 1292 que = adapter->queues; 1293 for (i = 0; i < adapter->num_queues; i++, que++) { 1294 struct tx_ring *txr = que->txr; 1295 1296 v0 += txr->q_efbig_tx_dma_setup; 1297 v1 += txr->q_mbuf_defrag_failed; 1298 v2 += txr->q_efbig2_tx_dma_setup; 1299 v3 += txr->q_einval_tx_dma_setup; 1300 v4 += txr->q_other_tx_dma_setup; 1301 v5 += txr->q_eagain_tx_dma_setup; 1302 v6 += txr->q_enomem_tx_dma_setup; 1303 v7 += txr->q_tso_err; 1304 } 1305 IXGBE_EVC_STORE(&adapter->efbig_tx_dma_setup, v0); 1306 IXGBE_EVC_STORE(&adapter->mbuf_defrag_failed, v1); 1307 IXGBE_EVC_STORE(&adapter->efbig2_tx_dma_setup, v2); 1308 IXGBE_EVC_STORE(&adapter->einval_tx_dma_setup, v3); 1309 IXGBE_EVC_STORE(&adapter->other_tx_dma_setup, v4); 1310 IXGBE_EVC_STORE(&adapter->eagain_tx_dma_setup, v5); 1311 IXGBE_EVC_STORE(&adapter->enomem_tx_dma_setup, v6); 1312 IXGBE_EVC_STORE(&adapter->tso_err, v7); 1313 1314 /* 1315 * Check the TX queues status 1316 * - mark hung queues so we don't schedule on them 1317 * - watchdog only if all queues show hung 1318 */ 1319 que = adapter->queues; 1320 for (i = 0; i < adapter->num_queues; i++, que++) { 1321 /* Keep track of queues with work for soft irq */ 1322 if (que->txr->busy) 1323 queues |= ((u64)1 << que->me); 1324 /* 1325 * Each time txeof runs without cleaning, but there 1326 * are uncleaned descriptors it increments busy. If 1327 * we get to the MAX we declare it hung. 1328 */ 1329 if (que->busy == IXGBE_QUEUE_HUNG) { 1330 ++hung; 1331 /* Mark the queue as inactive */ 1332 adapter->active_queues &= ~((u64)1 << que->me); 1333 continue; 1334 } else { 1335 /* Check if we've come back from hung */ 1336 if ((adapter->active_queues & ((u64)1 << que->me)) == 0) 1337 adapter->active_queues |= ((u64)1 << que->me); 1338 } 1339 if (que->busy >= IXGBE_MAX_TX_BUSY) { 1340 device_printf(dev, 1341 "Warning queue %d appears to be hung!\n", i); 1342 que->txr->busy = IXGBE_QUEUE_HUNG; 1343 ++hung; 1344 } 1345 } 1346 1347 /* Only truly watchdog if all queues show hung */ 1348 if (hung == adapter->num_queues) 1349 goto watchdog; 1350 #if 0 1351 else if (queues != 0) { /* Force an IRQ on queues with work */ 1352 ixv_rearm_queues(adapter, queues); 1353 } 1354 #endif 1355 1356 atomic_store_relaxed(&adapter->timer_pending, 0); 1357 IXGBE_CORE_UNLOCK(adapter); 1358 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter); 1359 1360 return; 1361 1362 watchdog: 1363 device_printf(adapter->dev, "Watchdog timeout -- resetting\n"); 1364 adapter->ifp->if_flags &= ~IFF_RUNNING; 1365 IXGBE_EVC_ADD(&adapter->watchdog_events, 1); 1366 ixv_init_locked(adapter); 1367 IXGBE_CORE_UNLOCK(adapter); 1368 } /* ixv_handle_timer */ 1369 1370 /************************************************************************ 1371 * ixv_update_link_status - Update OS on link state 1372 * 1373 * Note: Only updates the OS on the cached link state. 1374 * The real check of the hardware only happens with 1375 * a link interrupt. 1376 ************************************************************************/ 1377 static void 1378 ixv_update_link_status(struct adapter *adapter) 1379 { 1380 struct ifnet *ifp = adapter->ifp; 1381 device_t dev = adapter->dev; 1382 1383 KASSERT(mutex_owned(&adapter->core_mtx)); 1384 1385 if (adapter->link_up) { 1386 if (adapter->link_active != LINK_STATE_UP) { 1387 if (bootverbose) { 1388 const char *bpsmsg; 1389 1390 switch (adapter->link_speed) { 1391 case IXGBE_LINK_SPEED_10GB_FULL: 1392 bpsmsg = "10 Gbps"; 1393 break; 1394 case IXGBE_LINK_SPEED_5GB_FULL: 1395 bpsmsg = "5 Gbps"; 1396 break; 1397 case IXGBE_LINK_SPEED_2_5GB_FULL: 1398 bpsmsg = "2.5 Gbps"; 1399 break; 1400 case IXGBE_LINK_SPEED_1GB_FULL: 1401 bpsmsg = "1 Gbps"; 1402 break; 1403 case IXGBE_LINK_SPEED_100_FULL: 1404 bpsmsg = "100 Mbps"; 1405 break; 1406 case IXGBE_LINK_SPEED_10_FULL: 1407 bpsmsg = "10 Mbps"; 1408 break; 1409 default: 1410 bpsmsg = "unknown speed"; 1411 break; 1412 } 1413 device_printf(dev, "Link is up %s %s \n", 1414 bpsmsg, "Full Duplex"); 1415 } 1416 adapter->link_active = LINK_STATE_UP; 1417 if_link_state_change(ifp, LINK_STATE_UP); 1418 } 1419 } else { 1420 /* 1421 * Do it when link active changes to DOWN. i.e. 1422 * a) LINK_STATE_UNKNOWN -> LINK_STATE_DOWN 1423 * b) LINK_STATE_UP -> LINK_STATE_DOWN 1424 */ 1425 if (adapter->link_active != LINK_STATE_DOWN) { 1426 if (bootverbose) 1427 device_printf(dev, "Link is Down\n"); 1428 if_link_state_change(ifp, LINK_STATE_DOWN); 1429 adapter->link_active = LINK_STATE_DOWN; 1430 } 1431 } 1432 } /* ixv_update_link_status */ 1433 1434 1435 /************************************************************************ 1436 * ixv_stop - Stop the hardware 1437 * 1438 * Disables all traffic on the adapter by issuing a 1439 * global reset on the MAC and deallocates TX/RX buffers. 1440 ************************************************************************/ 1441 static void 1442 ixv_ifstop(struct ifnet *ifp, int disable) 1443 { 1444 struct adapter *adapter = ifp->if_softc; 1445 1446 IXGBE_CORE_LOCK(adapter); 1447 ixv_stop_locked(adapter); 1448 IXGBE_CORE_UNLOCK(adapter); 1449 1450 workqueue_wait(adapter->admin_wq, &adapter->admin_wc); 1451 atomic_store_relaxed(&adapter->admin_pending, 0); 1452 workqueue_wait(adapter->timer_wq, &adapter->timer_wc); 1453 atomic_store_relaxed(&adapter->timer_pending, 0); 1454 } 1455 1456 static void 1457 ixv_stop_locked(void *arg) 1458 { 1459 struct ifnet *ifp; 1460 struct adapter *adapter = arg; 1461 struct ixgbe_hw *hw = &adapter->hw; 1462 1463 ifp = adapter->ifp; 1464 1465 KASSERT(mutex_owned(&adapter->core_mtx)); 1466 1467 INIT_DEBUGOUT("ixv_stop_locked: begin\n"); 1468 ixv_disable_intr(adapter); 1469 1470 /* Tell the stack that the interface is no longer active */ 1471 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1472 1473 hw->mac.ops.reset_hw(hw); 1474 adapter->hw.adapter_stopped = FALSE; 1475 hw->mac.ops.stop_adapter(hw); 1476 callout_stop(&adapter->timer); 1477 1478 /* Don't schedule workqueues. */ 1479 adapter->schedule_wqs_ok = false; 1480 1481 /* reprogram the RAR[0] in case user changed it. */ 1482 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); 1483 1484 return; 1485 } /* ixv_stop_locked */ 1486 1487 1488 /************************************************************************ 1489 * ixv_allocate_pci_resources 1490 ************************************************************************/ 1491 static int 1492 ixv_allocate_pci_resources(struct adapter *adapter, 1493 const struct pci_attach_args *pa) 1494 { 1495 pcireg_t memtype, csr; 1496 device_t dev = adapter->dev; 1497 bus_addr_t addr; 1498 int flags; 1499 1500 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0)); 1501 switch (memtype) { 1502 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: 1503 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: 1504 adapter->osdep.mem_bus_space_tag = pa->pa_memt; 1505 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0), 1506 memtype, &addr, &adapter->osdep.mem_size, &flags) != 0) 1507 goto map_err; 1508 if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) { 1509 aprint_normal_dev(dev, "clearing prefetchable bit\n"); 1510 flags &= ~BUS_SPACE_MAP_PREFETCHABLE; 1511 } 1512 if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr, 1513 adapter->osdep.mem_size, flags, 1514 &adapter->osdep.mem_bus_space_handle) != 0) { 1515 map_err: 1516 adapter->osdep.mem_size = 0; 1517 aprint_error_dev(dev, "unable to map BAR0\n"); 1518 return ENXIO; 1519 } 1520 /* 1521 * Enable address decoding for memory range in case it's not 1522 * set. 1523 */ 1524 csr = pci_conf_read(pa->pa_pc, pa->pa_tag, 1525 PCI_COMMAND_STATUS_REG); 1526 csr |= PCI_COMMAND_MEM_ENABLE; 1527 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, 1528 csr); 1529 break; 1530 default: 1531 aprint_error_dev(dev, "unexpected type on BAR0\n"); 1532 return ENXIO; 1533 } 1534 1535 /* Pick up the tuneable queues */ 1536 adapter->num_queues = ixv_num_queues; 1537 1538 return (0); 1539 } /* ixv_allocate_pci_resources */ 1540 1541 static void 1542 ixv_free_deferred_handlers(struct adapter *adapter) 1543 { 1544 struct ix_queue *que = adapter->queues; 1545 struct tx_ring *txr = adapter->tx_rings; 1546 int i; 1547 1548 for (i = 0; i < adapter->num_queues; i++, que++, txr++) { 1549 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) { 1550 if (txr->txr_si != NULL) 1551 softint_disestablish(txr->txr_si); 1552 } 1553 if (que->que_si != NULL) 1554 softint_disestablish(que->que_si); 1555 } 1556 if (adapter->txr_wq != NULL) 1557 workqueue_destroy(adapter->txr_wq); 1558 if (adapter->txr_wq_enqueued != NULL) 1559 percpu_free(adapter->txr_wq_enqueued, sizeof(u_int)); 1560 if (adapter->que_wq != NULL) 1561 workqueue_destroy(adapter->que_wq); 1562 1563 /* Drain the Mailbox(link) queue */ 1564 if (adapter->admin_wq != NULL) { 1565 workqueue_destroy(adapter->admin_wq); 1566 adapter->admin_wq = NULL; 1567 } 1568 if (adapter->timer_wq != NULL) { 1569 workqueue_destroy(adapter->timer_wq); 1570 adapter->timer_wq = NULL; 1571 } 1572 } /* ixv_free_deferred_handlers */ 1573 1574 /************************************************************************ 1575 * ixv_free_pci_resources 1576 ************************************************************************/ 1577 static void 1578 ixv_free_pci_resources(struct adapter * adapter) 1579 { 1580 struct ix_queue *que = adapter->queues; 1581 int rid; 1582 1583 /* 1584 * Release all msix queue resources: 1585 */ 1586 for (int i = 0; i < adapter->num_queues; i++, que++) { 1587 if (que->res != NULL) 1588 pci_intr_disestablish(adapter->osdep.pc, 1589 adapter->osdep.ihs[i]); 1590 } 1591 1592 1593 /* Clean the Mailbox interrupt last */ 1594 rid = adapter->vector; 1595 1596 if (adapter->osdep.ihs[rid] != NULL) { 1597 pci_intr_disestablish(adapter->osdep.pc, 1598 adapter->osdep.ihs[rid]); 1599 adapter->osdep.ihs[rid] = NULL; 1600 } 1601 1602 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1603 adapter->osdep.nintrs); 1604 1605 if (adapter->osdep.mem_size != 0) { 1606 bus_space_unmap(adapter->osdep.mem_bus_space_tag, 1607 adapter->osdep.mem_bus_space_handle, 1608 adapter->osdep.mem_size); 1609 } 1610 1611 return; 1612 } /* ixv_free_pci_resources */ 1613 1614 /************************************************************************ 1615 * ixv_setup_interface 1616 * 1617 * Setup networking device structure and register an interface. 1618 ************************************************************************/ 1619 static int 1620 ixv_setup_interface(device_t dev, struct adapter *adapter) 1621 { 1622 struct ethercom *ec = &adapter->osdep.ec; 1623 struct ifnet *ifp; 1624 1625 INIT_DEBUGOUT("ixv_setup_interface: begin"); 1626 1627 ifp = adapter->ifp = &ec->ec_if; 1628 strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ); 1629 ifp->if_baudrate = IF_Gbps(10); 1630 ifp->if_init = ixv_init; 1631 ifp->if_stop = ixv_ifstop; 1632 ifp->if_softc = adapter; 1633 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1634 #ifdef IXGBE_MPSAFE 1635 ifp->if_extflags = IFEF_MPSAFE; 1636 #endif 1637 ifp->if_ioctl = ixv_ioctl; 1638 if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) { 1639 #if 0 1640 ixv_start_locked = ixgbe_legacy_start_locked; 1641 #endif 1642 } else { 1643 ifp->if_transmit = ixgbe_mq_start; 1644 #if 0 1645 ixv_start_locked = ixgbe_mq_start_locked; 1646 #endif 1647 } 1648 ifp->if_start = ixgbe_legacy_start; 1649 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2); 1650 IFQ_SET_READY(&ifp->if_snd); 1651 1652 if_initialize(ifp); 1653 adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if); 1654 ether_ifattach(ifp, adapter->hw.mac.addr); 1655 aprint_normal_dev(dev, "Ethernet address %s\n", 1656 ether_sprintf(adapter->hw.mac.addr)); 1657 /* 1658 * We use per TX queue softint, so if_deferred_start_init() isn't 1659 * used. 1660 */ 1661 ether_set_ifflags_cb(ec, ixv_ifflags_cb); 1662 1663 adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR; 1664 1665 /* 1666 * Tell the upper layer(s) we support long frames. 1667 */ 1668 ifp->if_hdrlen = sizeof(struct ether_vlan_header); 1669 1670 /* Set capability flags */ 1671 ifp->if_capabilities |= IFCAP_HWCSUM 1672 | IFCAP_TSOv4 1673 | IFCAP_TSOv6; 1674 ifp->if_capenable = 0; 1675 1676 ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER 1677 | ETHERCAP_VLAN_HWTAGGING 1678 | ETHERCAP_VLAN_HWCSUM 1679 | ETHERCAP_JUMBO_MTU 1680 | ETHERCAP_VLAN_MTU; 1681 1682 /* Enable the above capabilities by default */ 1683 ec->ec_capenable = ec->ec_capabilities; 1684 1685 /* Don't enable LRO by default */ 1686 #if 0 1687 /* NetBSD doesn't support LRO yet */ 1688 ifp->if_capabilities |= IFCAP_LRO; 1689 #endif 1690 1691 /* 1692 * Specify the media types supported by this adapter and register 1693 * callbacks to update media and link information 1694 */ 1695 ec->ec_ifmedia = &adapter->media; 1696 ifmedia_init_with_lock(&adapter->media, IFM_IMASK, ixv_media_change, 1697 ixv_media_status, &adapter->core_mtx); 1698 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); 1699 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO); 1700 1701 if_register(ifp); 1702 1703 return 0; 1704 } /* ixv_setup_interface */ 1705 1706 1707 /************************************************************************ 1708 * ixv_initialize_transmit_units - Enable transmit unit. 1709 ************************************************************************/ 1710 static void 1711 ixv_initialize_transmit_units(struct adapter *adapter) 1712 { 1713 struct tx_ring *txr = adapter->tx_rings; 1714 struct ixgbe_hw *hw = &adapter->hw; 1715 int i; 1716 1717 for (i = 0; i < adapter->num_queues; i++, txr++) { 1718 u64 tdba = txr->txdma.dma_paddr; 1719 u32 txctrl, txdctl; 1720 int j = txr->me; 1721 1722 /* Set WTHRESH to 8, burst writeback */ 1723 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j)); 1724 txdctl |= IXGBE_TX_WTHRESH << IXGBE_TXDCTL_WTHRESH_SHIFT; 1725 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl); 1726 1727 /* Set the HW Tx Head and Tail indices */ 1728 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(j), 0); 1729 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(j), 0); 1730 1731 /* Set Tx Tail register */ 1732 txr->tail = IXGBE_VFTDT(j); 1733 1734 txr->txr_no_space = false; 1735 1736 /* Set Ring parameters */ 1737 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j), 1738 (tdba & 0x00000000ffffffffULL)); 1739 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32)); 1740 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j), 1741 adapter->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc)); 1742 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j)); 1743 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; 1744 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl); 1745 1746 /* Now enable */ 1747 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j)); 1748 txdctl |= IXGBE_TXDCTL_ENABLE; 1749 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl); 1750 } 1751 1752 return; 1753 } /* ixv_initialize_transmit_units */ 1754 1755 1756 /************************************************************************ 1757 * ixv_initialize_rss_mapping 1758 ************************************************************************/ 1759 static void 1760 ixv_initialize_rss_mapping(struct adapter *adapter) 1761 { 1762 struct ixgbe_hw *hw = &adapter->hw; 1763 u32 reta = 0, mrqc, rss_key[10]; 1764 int queue_id; 1765 int i, j; 1766 u32 rss_hash_config; 1767 1768 /* force use default RSS key. */ 1769 #ifdef __NetBSD__ 1770 rss_getkey((uint8_t *) &rss_key); 1771 #else 1772 if (adapter->feat_en & IXGBE_FEATURE_RSS) { 1773 /* Fetch the configured RSS key */ 1774 rss_getkey((uint8_t *)&rss_key); 1775 } else { 1776 /* set up random bits */ 1777 cprng_fast(&rss_key, sizeof(rss_key)); 1778 } 1779 #endif 1780 1781 /* Now fill out hash function seeds */ 1782 for (i = 0; i < 10; i++) 1783 IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]); 1784 1785 /* Set up the redirection table */ 1786 for (i = 0, j = 0; i < 64; i++, j++) { 1787 if (j == adapter->num_queues) 1788 j = 0; 1789 1790 if (adapter->feat_en & IXGBE_FEATURE_RSS) { 1791 /* 1792 * Fetch the RSS bucket id for the given indirection 1793 * entry. Cap it at the number of configured buckets 1794 * (which is num_queues.) 1795 */ 1796 queue_id = rss_get_indirection_to_bucket(i); 1797 queue_id = queue_id % adapter->num_queues; 1798 } else 1799 queue_id = j; 1800 1801 /* 1802 * The low 8 bits are for hash value (n+0); 1803 * The next 8 bits are for hash value (n+1), etc. 1804 */ 1805 reta >>= 8; 1806 reta |= ((uint32_t)queue_id) << 24; 1807 if ((i & 3) == 3) { 1808 IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), reta); 1809 reta = 0; 1810 } 1811 } 1812 1813 /* Perform hash on these packet types */ 1814 if (adapter->feat_en & IXGBE_FEATURE_RSS) 1815 rss_hash_config = rss_gethashconfig(); 1816 else { 1817 /* 1818 * Disable UDP - IP fragments aren't currently being handled 1819 * and so we end up with a mix of 2-tuple and 4-tuple 1820 * traffic. 1821 */ 1822 rss_hash_config = RSS_HASHTYPE_RSS_IPV4 1823 | RSS_HASHTYPE_RSS_TCP_IPV4 1824 | RSS_HASHTYPE_RSS_IPV6 1825 | RSS_HASHTYPE_RSS_TCP_IPV6; 1826 } 1827 1828 mrqc = IXGBE_MRQC_RSSEN; 1829 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4) 1830 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4; 1831 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4) 1832 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP; 1833 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6) 1834 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6; 1835 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6) 1836 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP; 1837 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX) 1838 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_IPV6_EX " 1839 "defined, but not supported\n", __func__); 1840 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX) 1841 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX " 1842 "defined, but not supported\n", __func__); 1843 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4) 1844 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; 1845 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6) 1846 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; 1847 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX) 1848 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX " 1849 "defined, but not supported\n", __func__); 1850 IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc); 1851 } /* ixv_initialize_rss_mapping */ 1852 1853 1854 /************************************************************************ 1855 * ixv_initialize_receive_units - Setup receive registers and features. 1856 ************************************************************************/ 1857 static void 1858 ixv_initialize_receive_units(struct adapter *adapter) 1859 { 1860 struct rx_ring *rxr = adapter->rx_rings; 1861 struct ixgbe_hw *hw = &adapter->hw; 1862 struct ifnet *ifp = adapter->ifp; 1863 u32 bufsz, psrtype; 1864 1865 if (ifp->if_mtu > ETHERMTU) 1866 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; 1867 else 1868 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; 1869 1870 psrtype = IXGBE_PSRTYPE_TCPHDR 1871 | IXGBE_PSRTYPE_UDPHDR 1872 | IXGBE_PSRTYPE_IPV4HDR 1873 | IXGBE_PSRTYPE_IPV6HDR 1874 | IXGBE_PSRTYPE_L2HDR; 1875 1876 if (adapter->num_queues > 1) 1877 psrtype |= 1 << 29; 1878 1879 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype); 1880 1881 /* Tell PF our max_frame size */ 1882 if (ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size) != 0) { 1883 device_printf(adapter->dev, "There is a problem with the PF " 1884 "setup. It is likely the receive unit for this VF will " 1885 "not function correctly.\n"); 1886 } 1887 1888 for (int i = 0; i < adapter->num_queues; i++, rxr++) { 1889 u64 rdba = rxr->rxdma.dma_paddr; 1890 u32 reg, rxdctl; 1891 int j = rxr->me; 1892 1893 /* Disable the queue */ 1894 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)); 1895 rxdctl &= ~IXGBE_RXDCTL_ENABLE; 1896 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl); 1897 for (int k = 0; k < 10; k++) { 1898 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) & 1899 IXGBE_RXDCTL_ENABLE) 1900 msec_delay(1); 1901 else 1902 break; 1903 } 1904 IXGBE_WRITE_BARRIER(hw); 1905 /* Setup the Base and Length of the Rx Descriptor Ring */ 1906 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j), 1907 (rdba & 0x00000000ffffffffULL)); 1908 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32)); 1909 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j), 1910 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc)); 1911 1912 /* Reset the ring indices */ 1913 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0); 1914 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0); 1915 1916 /* Set up the SRRCTL register */ 1917 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(j)); 1918 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; 1919 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; 1920 reg |= bufsz; 1921 reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 1922 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(j), reg); 1923 1924 /* Capture Rx Tail index */ 1925 rxr->tail = IXGBE_VFRDT(rxr->me); 1926 1927 /* Do the queue enabling last */ 1928 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME; 1929 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl); 1930 for (int k = 0; k < 10; k++) { 1931 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) & 1932 IXGBE_RXDCTL_ENABLE) 1933 break; 1934 msec_delay(1); 1935 } 1936 IXGBE_WRITE_BARRIER(hw); 1937 1938 /* Set the Tail Pointer */ 1939 #ifdef DEV_NETMAP 1940 /* 1941 * In netmap mode, we must preserve the buffers made 1942 * available to userspace before the if_init() 1943 * (this is true by default on the TX side, because 1944 * init makes all buffers available to userspace). 1945 * 1946 * netmap_reset() and the device specific routines 1947 * (e.g. ixgbe_setup_receive_rings()) map these 1948 * buffers at the end of the NIC ring, so here we 1949 * must set the RDT (tail) register to make sure 1950 * they are not overwritten. 1951 * 1952 * In this driver the NIC ring starts at RDH = 0, 1953 * RDT points to the last slot available for reception (?), 1954 * so RDT = num_rx_desc - 1 means the whole ring is available. 1955 */ 1956 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) && 1957 (ifp->if_capenable & IFCAP_NETMAP)) { 1958 struct netmap_adapter *na = NA(adapter->ifp); 1959 struct netmap_kring *kring = na->rx_rings[i]; 1960 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring); 1961 1962 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t); 1963 } else 1964 #endif /* DEV_NETMAP */ 1965 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 1966 adapter->num_rx_desc - 1); 1967 } 1968 1969 if (adapter->hw.mac.type >= ixgbe_mac_X550_vf) 1970 ixv_initialize_rss_mapping(adapter); 1971 } /* ixv_initialize_receive_units */ 1972 1973 /************************************************************************ 1974 * ixv_sysctl_tdh_handler - Transmit Descriptor Head handler function 1975 * 1976 * Retrieves the TDH value from the hardware 1977 ************************************************************************/ 1978 static int 1979 ixv_sysctl_tdh_handler(SYSCTLFN_ARGS) 1980 { 1981 struct sysctlnode node = *rnode; 1982 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data; 1983 uint32_t val; 1984 1985 if (!txr) 1986 return (0); 1987 1988 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_VFTDH(txr->me)); 1989 node.sysctl_data = &val; 1990 return sysctl_lookup(SYSCTLFN_CALL(&node)); 1991 } /* ixv_sysctl_tdh_handler */ 1992 1993 /************************************************************************ 1994 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function 1995 * 1996 * Retrieves the TDT value from the hardware 1997 ************************************************************************/ 1998 static int 1999 ixv_sysctl_tdt_handler(SYSCTLFN_ARGS) 2000 { 2001 struct sysctlnode node = *rnode; 2002 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data; 2003 uint32_t val; 2004 2005 if (!txr) 2006 return (0); 2007 2008 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_VFTDT(txr->me)); 2009 node.sysctl_data = &val; 2010 return sysctl_lookup(SYSCTLFN_CALL(&node)); 2011 } /* ixv_sysctl_tdt_handler */ 2012 2013 /************************************************************************ 2014 * ixv_sysctl_next_to_check_handler - Receive Descriptor next to check 2015 * handler function 2016 * 2017 * Retrieves the next_to_check value 2018 ************************************************************************/ 2019 static int 2020 ixv_sysctl_next_to_check_handler(SYSCTLFN_ARGS) 2021 { 2022 struct sysctlnode node = *rnode; 2023 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data; 2024 uint32_t val; 2025 2026 if (!rxr) 2027 return (0); 2028 2029 val = rxr->next_to_check; 2030 node.sysctl_data = &val; 2031 return sysctl_lookup(SYSCTLFN_CALL(&node)); 2032 } /* ixv_sysctl_next_to_check_handler */ 2033 2034 /************************************************************************ 2035 * ixv_sysctl_next_to_refresh_handler - Receive Descriptor next to refresh 2036 * handler function 2037 * 2038 * Retrieves the next_to_refresh value 2039 ************************************************************************/ 2040 static int 2041 ixv_sysctl_next_to_refresh_handler(SYSCTLFN_ARGS) 2042 { 2043 struct sysctlnode node = *rnode; 2044 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data; 2045 struct adapter *adapter; 2046 uint32_t val; 2047 2048 if (!rxr) 2049 return (0); 2050 2051 adapter = rxr->adapter; 2052 if (ixgbe_fw_recovery_mode_swflag(adapter)) 2053 return (EPERM); 2054 2055 val = rxr->next_to_refresh; 2056 node.sysctl_data = &val; 2057 return sysctl_lookup(SYSCTLFN_CALL(&node)); 2058 } /* ixv_sysctl_next_to_refresh_handler */ 2059 2060 /************************************************************************ 2061 * ixv_sysctl_rdh_handler - Receive Descriptor Head handler function 2062 * 2063 * Retrieves the RDH value from the hardware 2064 ************************************************************************/ 2065 static int 2066 ixv_sysctl_rdh_handler(SYSCTLFN_ARGS) 2067 { 2068 struct sysctlnode node = *rnode; 2069 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data; 2070 uint32_t val; 2071 2072 if (!rxr) 2073 return (0); 2074 2075 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_VFRDH(rxr->me)); 2076 node.sysctl_data = &val; 2077 return sysctl_lookup(SYSCTLFN_CALL(&node)); 2078 } /* ixv_sysctl_rdh_handler */ 2079 2080 /************************************************************************ 2081 * ixv_sysctl_rdt_handler - Receive Descriptor Tail handler function 2082 * 2083 * Retrieves the RDT value from the hardware 2084 ************************************************************************/ 2085 static int 2086 ixv_sysctl_rdt_handler(SYSCTLFN_ARGS) 2087 { 2088 struct sysctlnode node = *rnode; 2089 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data; 2090 uint32_t val; 2091 2092 if (!rxr) 2093 return (0); 2094 2095 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_VFRDT(rxr->me)); 2096 node.sysctl_data = &val; 2097 return sysctl_lookup(SYSCTLFN_CALL(&node)); 2098 } /* ixv_sysctl_rdt_handler */ 2099 2100 static void 2101 ixv_setup_vlan_tagging(struct adapter *adapter) 2102 { 2103 struct ethercom *ec = &adapter->osdep.ec; 2104 struct ixgbe_hw *hw = &adapter->hw; 2105 struct rx_ring *rxr; 2106 u32 ctrl; 2107 int i; 2108 bool hwtagging; 2109 2110 /* Enable HW tagging only if any vlan is attached */ 2111 hwtagging = (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) 2112 && VLAN_ATTACHED(ec); 2113 2114 /* Enable the queues */ 2115 for (i = 0; i < adapter->num_queues; i++) { 2116 rxr = &adapter->rx_rings[i]; 2117 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(rxr->me)); 2118 if (hwtagging) 2119 ctrl |= IXGBE_RXDCTL_VME; 2120 else 2121 ctrl &= ~IXGBE_RXDCTL_VME; 2122 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(rxr->me), ctrl); 2123 /* 2124 * Let Rx path know that it needs to store VLAN tag 2125 * as part of extra mbuf info. 2126 */ 2127 rxr->vtag_strip = hwtagging ? TRUE : FALSE; 2128 } 2129 } /* ixv_setup_vlan_tagging */ 2130 2131 /************************************************************************ 2132 * ixv_setup_vlan_support 2133 ************************************************************************/ 2134 static int 2135 ixv_setup_vlan_support(struct adapter *adapter) 2136 { 2137 struct ethercom *ec = &adapter->osdep.ec; 2138 struct ixgbe_hw *hw = &adapter->hw; 2139 u32 vid, vfta, retry; 2140 struct vlanid_list *vlanidp; 2141 int rv, error = 0; 2142 2143 /* 2144 * This function is called from both if_init and ifflags_cb() 2145 * on NetBSD. 2146 */ 2147 2148 /* 2149 * Part 1: 2150 * Setup VLAN HW tagging 2151 */ 2152 ixv_setup_vlan_tagging(adapter); 2153 2154 if (!VLAN_ATTACHED(ec)) 2155 return 0; 2156 2157 /* 2158 * Part 2: 2159 * Setup VLAN HW filter 2160 */ 2161 /* Cleanup shadow_vfta */ 2162 for (int i = 0; i < IXGBE_VFTA_SIZE; i++) 2163 adapter->shadow_vfta[i] = 0; 2164 /* Generate shadow_vfta from ec_vids */ 2165 ETHER_LOCK(ec); 2166 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) { 2167 uint32_t idx; 2168 2169 idx = vlanidp->vid / 32; 2170 KASSERT(idx < IXGBE_VFTA_SIZE); 2171 adapter->shadow_vfta[idx] |= (u32)1 << (vlanidp->vid % 32); 2172 } 2173 ETHER_UNLOCK(ec); 2174 2175 /* 2176 * A soft reset zero's out the VFTA, so 2177 * we need to repopulate it now. 2178 */ 2179 for (int i = 0; i < IXGBE_VFTA_SIZE; i++) { 2180 if (adapter->shadow_vfta[i] == 0) 2181 continue; 2182 vfta = adapter->shadow_vfta[i]; 2183 /* 2184 * Reconstruct the vlan id's 2185 * based on the bits set in each 2186 * of the array ints. 2187 */ 2188 for (int j = 0; j < 32; j++) { 2189 retry = 0; 2190 if ((vfta & ((u32)1 << j)) == 0) 2191 continue; 2192 vid = (i * 32) + j; 2193 2194 /* Call the shared code mailbox routine */ 2195 while ((rv = hw->mac.ops.set_vfta(hw, vid, 0, TRUE, 2196 FALSE)) != 0) { 2197 if (++retry > 5) { 2198 device_printf(adapter->dev, 2199 "%s: max retry exceeded\n", 2200 __func__); 2201 break; 2202 } 2203 } 2204 if (rv != 0) { 2205 device_printf(adapter->dev, 2206 "failed to set vlan %d\n", vid); 2207 error = EACCES; 2208 } 2209 } 2210 } 2211 return error; 2212 } /* ixv_setup_vlan_support */ 2213 2214 static int 2215 ixv_vlan_cb(struct ethercom *ec, uint16_t vid, bool set) 2216 { 2217 struct ifnet *ifp = &ec->ec_if; 2218 struct adapter *adapter = ifp->if_softc; 2219 int rv; 2220 2221 if (set) 2222 rv = ixv_register_vlan(adapter, vid); 2223 else 2224 rv = ixv_unregister_vlan(adapter, vid); 2225 2226 if (rv != 0) 2227 return rv; 2228 2229 /* 2230 * Control VLAN HW tagging when ec_nvlan is changed from 1 to 0 2231 * or 0 to 1. 2232 */ 2233 if ((set && (ec->ec_nvlans == 1)) || (!set && (ec->ec_nvlans == 0))) 2234 ixv_setup_vlan_tagging(adapter); 2235 2236 return rv; 2237 } 2238 2239 /************************************************************************ 2240 * ixv_register_vlan 2241 * 2242 * Run via a vlan config EVENT, it enables us to use the 2243 * HW Filter table since we can get the vlan id. This just 2244 * creates the entry in the soft version of the VFTA, init 2245 * will repopulate the real table. 2246 ************************************************************************/ 2247 static int 2248 ixv_register_vlan(struct adapter *adapter, u16 vtag) 2249 { 2250 struct ixgbe_hw *hw = &adapter->hw; 2251 u16 index, bit; 2252 int error; 2253 2254 if ((vtag == 0) || (vtag > 4095)) /* Invalid */ 2255 return EINVAL; 2256 IXGBE_CORE_LOCK(adapter); 2257 index = (vtag >> 5) & 0x7F; 2258 bit = vtag & 0x1F; 2259 adapter->shadow_vfta[index] |= ((u32)1 << bit); 2260 error = hw->mac.ops.set_vfta(hw, vtag, 0, true, false); 2261 IXGBE_CORE_UNLOCK(adapter); 2262 2263 if (error != 0) { 2264 device_printf(adapter->dev, "failed to register vlan %hu\n", 2265 vtag); 2266 error = EACCES; 2267 } 2268 return error; 2269 } /* ixv_register_vlan */ 2270 2271 /************************************************************************ 2272 * ixv_unregister_vlan 2273 * 2274 * Run via a vlan unconfig EVENT, remove our entry 2275 * in the soft vfta. 2276 ************************************************************************/ 2277 static int 2278 ixv_unregister_vlan(struct adapter *adapter, u16 vtag) 2279 { 2280 struct ixgbe_hw *hw = &adapter->hw; 2281 u16 index, bit; 2282 int error; 2283 2284 if ((vtag == 0) || (vtag > 4095)) /* Invalid */ 2285 return EINVAL; 2286 2287 IXGBE_CORE_LOCK(adapter); 2288 index = (vtag >> 5) & 0x7F; 2289 bit = vtag & 0x1F; 2290 adapter->shadow_vfta[index] &= ~((u32)1 << bit); 2291 error = hw->mac.ops.set_vfta(hw, vtag, 0, false, false); 2292 IXGBE_CORE_UNLOCK(adapter); 2293 2294 if (error != 0) { 2295 device_printf(adapter->dev, "failed to unregister vlan %hu\n", 2296 vtag); 2297 error = EIO; 2298 } 2299 return error; 2300 } /* ixv_unregister_vlan */ 2301 2302 /************************************************************************ 2303 * ixv_enable_intr 2304 ************************************************************************/ 2305 static void 2306 ixv_enable_intr(struct adapter *adapter) 2307 { 2308 struct ixgbe_hw *hw = &adapter->hw; 2309 struct ix_queue *que = adapter->queues; 2310 u32 mask; 2311 int i; 2312 2313 /* For VTEIAC */ 2314 mask = (1 << adapter->vector); 2315 for (i = 0; i < adapter->num_queues; i++, que++) 2316 mask |= (1 << que->msix); 2317 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask); 2318 2319 /* For VTEIMS */ 2320 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << adapter->vector)); 2321 que = adapter->queues; 2322 for (i = 0; i < adapter->num_queues; i++, que++) 2323 ixv_enable_queue(adapter, que->msix); 2324 2325 IXGBE_WRITE_FLUSH(hw); 2326 } /* ixv_enable_intr */ 2327 2328 /************************************************************************ 2329 * ixv_disable_intr 2330 ************************************************************************/ 2331 static void 2332 ixv_disable_intr(struct adapter *adapter) 2333 { 2334 struct ix_queue *que = adapter->queues; 2335 2336 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0); 2337 2338 /* disable interrupts other than queues */ 2339 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, adapter->vector); 2340 2341 for (int i = 0; i < adapter->num_queues; i++, que++) 2342 ixv_disable_queue(adapter, que->msix); 2343 2344 IXGBE_WRITE_FLUSH(&adapter->hw); 2345 } /* ixv_disable_intr */ 2346 2347 /************************************************************************ 2348 * ixv_set_ivar 2349 * 2350 * Setup the correct IVAR register for a particular MSI-X interrupt 2351 * - entry is the register array entry 2352 * - vector is the MSI-X vector for this queue 2353 * - type is RX/TX/MISC 2354 ************************************************************************/ 2355 static void 2356 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type) 2357 { 2358 struct ixgbe_hw *hw = &adapter->hw; 2359 u32 ivar, index; 2360 2361 vector |= IXGBE_IVAR_ALLOC_VAL; 2362 2363 if (type == -1) { /* MISC IVAR */ 2364 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC); 2365 ivar &= ~0xFF; 2366 ivar |= vector; 2367 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar); 2368 } else { /* RX/TX IVARS */ 2369 index = (16 * (entry & 1)) + (8 * type); 2370 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1)); 2371 ivar &= ~(0xffUL << index); 2372 ivar |= ((u32)vector << index); 2373 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar); 2374 } 2375 } /* ixv_set_ivar */ 2376 2377 /************************************************************************ 2378 * ixv_configure_ivars 2379 ************************************************************************/ 2380 static void 2381 ixv_configure_ivars(struct adapter *adapter) 2382 { 2383 struct ix_queue *que = adapter->queues; 2384 2385 /* XXX We should sync EITR value calculation with ixgbe.c? */ 2386 2387 for (int i = 0; i < adapter->num_queues; i++, que++) { 2388 /* First the RX queue entry */ 2389 ixv_set_ivar(adapter, i, que->msix, 0); 2390 /* ... and the TX */ 2391 ixv_set_ivar(adapter, i, que->msix, 1); 2392 /* Set an initial value in EITR */ 2393 ixv_eitr_write(adapter, que->msix, IXGBE_EITR_DEFAULT); 2394 } 2395 2396 /* For the mailbox interrupt */ 2397 ixv_set_ivar(adapter, 1, adapter->vector, -1); 2398 } /* ixv_configure_ivars */ 2399 2400 2401 /************************************************************************ 2402 * ixv_init_stats 2403 * 2404 * The VF stats registers never have a truly virgin 2405 * starting point, so this routine save initial vaules to 2406 * last_<REGNAME>. 2407 ************************************************************************/ 2408 static void 2409 ixv_init_stats(struct adapter *adapter) 2410 { 2411 struct ixgbe_hw *hw = &adapter->hw; 2412 2413 adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC); 2414 adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB); 2415 adapter->stats.vf.last_vfgorc |= 2416 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32); 2417 2418 adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC); 2419 adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB); 2420 adapter->stats.vf.last_vfgotc |= 2421 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32); 2422 2423 adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC); 2424 } /* ixv_init_stats */ 2425 2426 #define UPDATE_STAT_32(reg, last, count) \ 2427 { \ 2428 u32 current = IXGBE_READ_REG(hw, (reg)); \ 2429 IXGBE_EVC_ADD(&count, current - (last)); \ 2430 (last) = current; \ 2431 } 2432 2433 #define UPDATE_STAT_36(lsb, msb, last, count) \ 2434 { \ 2435 u64 cur_lsb = IXGBE_READ_REG(hw, (lsb)); \ 2436 u64 cur_msb = IXGBE_READ_REG(hw, (msb)); \ 2437 u64 current = ((cur_msb << 32) | cur_lsb); \ 2438 if (current < (last)) \ 2439 IXGBE_EVC_ADD(&count, current + __BIT(36) - (last)); \ 2440 else \ 2441 IXGBE_EVC_ADD(&count, current - (last)); \ 2442 (last) = current; \ 2443 } 2444 2445 /************************************************************************ 2446 * ixv_update_stats - Update the board statistics counters. 2447 ************************************************************************/ 2448 void 2449 ixv_update_stats(struct adapter *adapter) 2450 { 2451 struct ixgbe_hw *hw = &adapter->hw; 2452 struct ixgbevf_hw_stats *stats = &adapter->stats.vf; 2453 2454 UPDATE_STAT_32(IXGBE_VFGPRC, stats->last_vfgprc, stats->vfgprc); 2455 UPDATE_STAT_32(IXGBE_VFGPTC, stats->last_vfgptc, stats->vfgptc); 2456 UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, stats->last_vfgorc, 2457 stats->vfgorc); 2458 UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, stats->last_vfgotc, 2459 stats->vfgotc); 2460 UPDATE_STAT_32(IXGBE_VFMPRC, stats->last_vfmprc, stats->vfmprc); 2461 2462 /* VF doesn't count errors by hardware */ 2463 2464 } /* ixv_update_stats */ 2465 2466 /************************************************************************ 2467 * ixv_sysctl_interrupt_rate_handler 2468 ************************************************************************/ 2469 static int 2470 ixv_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS) 2471 { 2472 struct sysctlnode node = *rnode; 2473 struct ix_queue *que = (struct ix_queue *)node.sysctl_data; 2474 struct adapter *adapter = que->adapter; 2475 uint32_t reg, usec, rate; 2476 int error; 2477 2478 if (que == NULL) 2479 return 0; 2480 reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_VTEITR(que->msix)); 2481 usec = ((reg & 0x0FF8) >> 3); 2482 if (usec > 0) 2483 rate = 500000 / usec; 2484 else 2485 rate = 0; 2486 node.sysctl_data = &rate; 2487 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 2488 if (error || newp == NULL) 2489 return error; 2490 reg &= ~0xfff; /* default, no limitation */ 2491 if (rate > 0 && rate < 500000) { 2492 if (rate < 1000) 2493 rate = 1000; 2494 reg |= ((4000000 / rate) & 0xff8); 2495 /* 2496 * When RSC is used, ITR interval must be larger than 2497 * RSC_DELAY. Currently, we use 2us for RSC_DELAY. 2498 * The minimum value is always greater than 2us on 100M 2499 * (and 10M?(not documented)), but it's not on 1G and higher. 2500 */ 2501 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL) 2502 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) { 2503 if ((adapter->num_queues > 1) 2504 && (reg < IXGBE_MIN_RSC_EITR_10G1G)) 2505 return EINVAL; 2506 } 2507 ixv_max_interrupt_rate = rate; 2508 } else 2509 ixv_max_interrupt_rate = 0; 2510 ixv_eitr_write(adapter, que->msix, reg); 2511 2512 return (0); 2513 } /* ixv_sysctl_interrupt_rate_handler */ 2514 2515 const struct sysctlnode * 2516 ixv_sysctl_instance(struct adapter *adapter) 2517 { 2518 const char *dvname; 2519 struct sysctllog **log; 2520 int rc; 2521 const struct sysctlnode *rnode; 2522 2523 log = &adapter->sysctllog; 2524 dvname = device_xname(adapter->dev); 2525 2526 if ((rc = sysctl_createv(log, 0, NULL, &rnode, 2527 0, CTLTYPE_NODE, dvname, 2528 SYSCTL_DESCR("ixv information and settings"), 2529 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0) 2530 goto err; 2531 2532 return rnode; 2533 err: 2534 device_printf(adapter->dev, 2535 "%s: sysctl_createv failed, rc = %d\n", __func__, rc); 2536 return NULL; 2537 } 2538 2539 static void 2540 ixv_add_device_sysctls(struct adapter *adapter) 2541 { 2542 struct sysctllog **log; 2543 const struct sysctlnode *rnode, *cnode; 2544 device_t dev; 2545 2546 dev = adapter->dev; 2547 log = &adapter->sysctllog; 2548 2549 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) { 2550 aprint_error_dev(dev, "could not create sysctl root\n"); 2551 return; 2552 } 2553 2554 if (sysctl_createv(log, 0, &rnode, &cnode, 2555 CTLFLAG_READWRITE, CTLTYPE_INT, "debug", 2556 SYSCTL_DESCR("Debug Info"), 2557 ixv_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0) 2558 aprint_error_dev(dev, "could not create sysctl\n"); 2559 2560 if (sysctl_createv(log, 0, &rnode, &cnode, 2561 CTLFLAG_READWRITE, CTLTYPE_INT, 2562 "rx_copy_len", SYSCTL_DESCR("RX Copy Length"), 2563 ixv_sysctl_rx_copy_len, 0, 2564 (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0) 2565 aprint_error_dev(dev, "could not create sysctl\n"); 2566 2567 if (sysctl_createv(log, 0, &rnode, &cnode, 2568 CTLFLAG_READONLY, CTLTYPE_INT, 2569 "num_tx_desc", SYSCTL_DESCR("Number of TX descriptors"), 2570 NULL, 0, &adapter->num_tx_desc, 0, CTL_CREATE, CTL_EOL) != 0) 2571 aprint_error_dev(dev, "could not create sysctl\n"); 2572 2573 if (sysctl_createv(log, 0, &rnode, &cnode, 2574 CTLFLAG_READONLY, CTLTYPE_INT, 2575 "num_rx_desc", SYSCTL_DESCR("Number of RX descriptors"), 2576 NULL, 0, &adapter->num_rx_desc, 0, CTL_CREATE, CTL_EOL) != 0) 2577 aprint_error_dev(dev, "could not create sysctl\n"); 2578 2579 if (sysctl_createv(log, 0, &rnode, &cnode, 2580 CTLFLAG_READWRITE, CTLTYPE_INT, "rx_process_limit", 2581 SYSCTL_DESCR("max number of RX packets to process"), 2582 ixv_sysctl_rx_process_limit, 0, (void *)adapter, 0, CTL_CREATE, 2583 CTL_EOL) != 0) 2584 aprint_error_dev(dev, "could not create sysctl\n"); 2585 2586 if (sysctl_createv(log, 0, &rnode, &cnode, 2587 CTLFLAG_READWRITE, CTLTYPE_INT, "tx_process_limit", 2588 SYSCTL_DESCR("max number of TX packets to process"), 2589 ixv_sysctl_tx_process_limit, 0, (void *)adapter, 0, CTL_CREATE, 2590 CTL_EOL) != 0) 2591 aprint_error_dev(dev, "could not create sysctl\n"); 2592 2593 if (sysctl_createv(log, 0, &rnode, &cnode, 2594 CTLFLAG_READWRITE, CTLTYPE_BOOL, "enable_aim", 2595 SYSCTL_DESCR("Interrupt Moderation"), 2596 NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0) 2597 aprint_error_dev(dev, "could not create sysctl\n"); 2598 2599 if (sysctl_createv(log, 0, &rnode, &cnode, 2600 CTLFLAG_READWRITE, CTLTYPE_BOOL, "txrx_workqueue", 2601 SYSCTL_DESCR("Use workqueue for packet processing"), 2602 NULL, 0, &adapter->txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL) 2603 != 0) 2604 aprint_error_dev(dev, "could not create sysctl\n"); 2605 } 2606 2607 /************************************************************************ 2608 * ixv_add_stats_sysctls - Add statistic sysctls for the VF. 2609 ************************************************************************/ 2610 static void 2611 ixv_add_stats_sysctls(struct adapter *adapter) 2612 { 2613 device_t dev = adapter->dev; 2614 struct tx_ring *txr = adapter->tx_rings; 2615 struct rx_ring *rxr = adapter->rx_rings; 2616 struct ixgbevf_hw_stats *stats = &adapter->stats.vf; 2617 struct ixgbe_hw *hw = &adapter->hw; 2618 const struct sysctlnode *rnode, *cnode; 2619 struct sysctllog **log = &adapter->sysctllog; 2620 const char *xname = device_xname(dev); 2621 2622 /* Driver Statistics */ 2623 evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC, 2624 NULL, xname, "Driver tx dma soft fail EFBIG"); 2625 evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC, 2626 NULL, xname, "m_defrag() failed"); 2627 evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC, 2628 NULL, xname, "Driver tx dma hard fail EFBIG"); 2629 evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC, 2630 NULL, xname, "Driver tx dma hard fail EINVAL"); 2631 evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC, 2632 NULL, xname, "Driver tx dma hard fail other"); 2633 evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC, 2634 NULL, xname, "Driver tx dma soft fail EAGAIN"); 2635 evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC, 2636 NULL, xname, "Driver tx dma soft fail ENOMEM"); 2637 evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC, 2638 NULL, xname, "Watchdog timeouts"); 2639 evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC, 2640 NULL, xname, "TSO errors"); 2641 evcnt_attach_dynamic(&adapter->admin_irqev, EVCNT_TYPE_INTR, 2642 NULL, xname, "Admin MSI-X IRQ Handled"); 2643 evcnt_attach_dynamic(&adapter->link_workev, EVCNT_TYPE_INTR, 2644 NULL, xname, "Admin event"); 2645 2646 for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) { 2647 snprintf(adapter->queues[i].evnamebuf, 2648 sizeof(adapter->queues[i].evnamebuf), "%s q%d", xname, i); 2649 snprintf(adapter->queues[i].namebuf, 2650 sizeof(adapter->queues[i].namebuf), "q%d", i); 2651 2652 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) { 2653 aprint_error_dev(dev, 2654 "could not create sysctl root\n"); 2655 break; 2656 } 2657 2658 if (sysctl_createv(log, 0, &rnode, &rnode, 2659 0, CTLTYPE_NODE, 2660 adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"), 2661 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) 2662 break; 2663 2664 if (sysctl_createv(log, 0, &rnode, &cnode, 2665 CTLFLAG_READWRITE, CTLTYPE_INT, 2666 "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"), 2667 ixv_sysctl_interrupt_rate_handler, 0, 2668 (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0) 2669 break; 2670 2671 if (sysctl_createv(log, 0, &rnode, &cnode, 2672 CTLFLAG_READONLY, CTLTYPE_INT, 2673 "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"), 2674 ixv_sysctl_tdh_handler, 0, (void *)txr, 2675 0, CTL_CREATE, CTL_EOL) != 0) 2676 break; 2677 2678 if (sysctl_createv(log, 0, &rnode, &cnode, 2679 CTLFLAG_READONLY, CTLTYPE_INT, 2680 "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"), 2681 ixv_sysctl_tdt_handler, 0, (void *)txr, 2682 0, CTL_CREATE, CTL_EOL) != 0) 2683 break; 2684 2685 evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR, 2686 NULL, adapter->queues[i].evnamebuf, "IRQs on queue"); 2687 evcnt_attach_dynamic(&adapter->queues[i].handleq, 2688 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf, 2689 "Handled queue in softint"); 2690 evcnt_attach_dynamic(&adapter->queues[i].req, EVCNT_TYPE_MISC, 2691 NULL, adapter->queues[i].evnamebuf, "Requeued in softint"); 2692 evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC, 2693 NULL, adapter->queues[i].evnamebuf, "TSO"); 2694 evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC, 2695 NULL, adapter->queues[i].evnamebuf, 2696 "TX Queue No Descriptor Available"); 2697 evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC, 2698 NULL, adapter->queues[i].evnamebuf, 2699 "Queue Packets Transmitted"); 2700 #ifndef IXGBE_LEGACY_TX 2701 evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC, 2702 NULL, adapter->queues[i].evnamebuf, 2703 "Packets dropped in pcq"); 2704 #endif 2705 2706 #ifdef LRO 2707 struct lro_ctrl *lro = &rxr->lro; 2708 #endif /* LRO */ 2709 2710 if (sysctl_createv(log, 0, &rnode, &cnode, 2711 CTLFLAG_READONLY, CTLTYPE_INT, "rxd_nxck", 2712 SYSCTL_DESCR("Receive Descriptor next to check"), 2713 ixv_sysctl_next_to_check_handler, 0, (void *)rxr, 0, 2714 CTL_CREATE, CTL_EOL) != 0) 2715 break; 2716 2717 if (sysctl_createv(log, 0, &rnode, &cnode, 2718 CTLFLAG_READONLY, CTLTYPE_INT, "rxd_nxrf", 2719 SYSCTL_DESCR("Receive Descriptor next to refresh"), 2720 ixv_sysctl_next_to_refresh_handler, 0, (void *)rxr, 0, 2721 CTL_CREATE, CTL_EOL) != 0) 2722 break; 2723 2724 if (sysctl_createv(log, 0, &rnode, &cnode, 2725 CTLFLAG_READONLY, CTLTYPE_INT, "rxd_head", 2726 SYSCTL_DESCR("Receive Descriptor Head"), 2727 ixv_sysctl_rdh_handler, 0, (void *)rxr, 0, 2728 CTL_CREATE, CTL_EOL) != 0) 2729 break; 2730 2731 if (sysctl_createv(log, 0, &rnode, &cnode, 2732 CTLFLAG_READONLY, CTLTYPE_INT, "rxd_tail", 2733 SYSCTL_DESCR("Receive Descriptor Tail"), 2734 ixv_sysctl_rdt_handler, 0, (void *)rxr, 0, 2735 CTL_CREATE, CTL_EOL) != 0) 2736 break; 2737 2738 evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC, 2739 NULL, adapter->queues[i].evnamebuf, 2740 "Queue Packets Received"); 2741 evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC, 2742 NULL, adapter->queues[i].evnamebuf, 2743 "Queue Bytes Received"); 2744 evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC, 2745 NULL, adapter->queues[i].evnamebuf, "Copied RX Frames"); 2746 evcnt_attach_dynamic(&rxr->no_mbuf, EVCNT_TYPE_MISC, 2747 NULL, adapter->queues[i].evnamebuf, "Rx no mbuf"); 2748 evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC, 2749 NULL, adapter->queues[i].evnamebuf, "Rx discarded"); 2750 #ifdef LRO 2751 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued", 2752 CTLFLAG_RD, &lro->lro_queued, 0, 2753 "LRO Queued"); 2754 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed", 2755 CTLFLAG_RD, &lro->lro_flushed, 0, 2756 "LRO Flushed"); 2757 #endif /* LRO */ 2758 } 2759 2760 /* MAC stats get their own sub node */ 2761 2762 snprintf(stats->namebuf, 2763 sizeof(stats->namebuf), "%s MAC Statistics", xname); 2764 2765 evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL, 2766 stats->namebuf, "rx csum offload - IP"); 2767 evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL, 2768 stats->namebuf, "rx csum offload - L4"); 2769 evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL, 2770 stats->namebuf, "rx csum offload - IP bad"); 2771 evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL, 2772 stats->namebuf, "rx csum offload - L4 bad"); 2773 2774 /* Packet Reception Stats */ 2775 evcnt_attach_dynamic(&stats->vfgprc, EVCNT_TYPE_MISC, NULL, 2776 xname, "Good Packets Received"); 2777 evcnt_attach_dynamic(&stats->vfgorc, EVCNT_TYPE_MISC, NULL, 2778 xname, "Good Octets Received"); 2779 evcnt_attach_dynamic(&stats->vfmprc, EVCNT_TYPE_MISC, NULL, 2780 xname, "Multicast Packets Received"); 2781 evcnt_attach_dynamic(&stats->vfgptc, EVCNT_TYPE_MISC, NULL, 2782 xname, "Good Packets Transmitted"); 2783 evcnt_attach_dynamic(&stats->vfgotc, EVCNT_TYPE_MISC, NULL, 2784 xname, "Good Octets Transmitted"); 2785 2786 /* Mailbox Stats */ 2787 evcnt_attach_dynamic(&hw->mbx.stats.msgs_tx, EVCNT_TYPE_MISC, NULL, 2788 xname, "message TXs"); 2789 evcnt_attach_dynamic(&hw->mbx.stats.msgs_rx, EVCNT_TYPE_MISC, NULL, 2790 xname, "message RXs"); 2791 evcnt_attach_dynamic(&hw->mbx.stats.acks, EVCNT_TYPE_MISC, NULL, 2792 xname, "ACKs"); 2793 evcnt_attach_dynamic(&hw->mbx.stats.reqs, EVCNT_TYPE_MISC, NULL, 2794 xname, "REQs"); 2795 evcnt_attach_dynamic(&hw->mbx.stats.rsts, EVCNT_TYPE_MISC, NULL, 2796 xname, "RSTs"); 2797 2798 } /* ixv_add_stats_sysctls */ 2799 2800 static void 2801 ixv_clear_evcnt(struct adapter *adapter) 2802 { 2803 struct tx_ring *txr = adapter->tx_rings; 2804 struct rx_ring *rxr = adapter->rx_rings; 2805 struct ixgbevf_hw_stats *stats = &adapter->stats.vf; 2806 struct ixgbe_hw *hw = &adapter->hw; 2807 int i; 2808 2809 /* Driver Statistics */ 2810 IXGBE_EVC_STORE(&adapter->efbig_tx_dma_setup, 0); 2811 IXGBE_EVC_STORE(&adapter->mbuf_defrag_failed, 0); 2812 IXGBE_EVC_STORE(&adapter->efbig2_tx_dma_setup, 0); 2813 IXGBE_EVC_STORE(&adapter->einval_tx_dma_setup, 0); 2814 IXGBE_EVC_STORE(&adapter->other_tx_dma_setup, 0); 2815 IXGBE_EVC_STORE(&adapter->eagain_tx_dma_setup, 0); 2816 IXGBE_EVC_STORE(&adapter->enomem_tx_dma_setup, 0); 2817 IXGBE_EVC_STORE(&adapter->watchdog_events, 0); 2818 IXGBE_EVC_STORE(&adapter->tso_err, 0); 2819 IXGBE_EVC_STORE(&adapter->admin_irqev, 0); 2820 IXGBE_EVC_STORE(&adapter->link_workev, 0); 2821 2822 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) { 2823 IXGBE_EVC_STORE(&adapter->queues[i].irqs, 0); 2824 IXGBE_EVC_STORE(&adapter->queues[i].handleq, 0); 2825 IXGBE_EVC_STORE(&adapter->queues[i].req, 0); 2826 IXGBE_EVC_STORE(&txr->tso_tx, 0); 2827 IXGBE_EVC_STORE(&txr->no_desc_avail, 0); 2828 IXGBE_EVC_STORE(&txr->total_packets, 0); 2829 #ifndef IXGBE_LEGACY_TX 2830 IXGBE_EVC_STORE(&txr->pcq_drops, 0); 2831 #endif 2832 txr->q_efbig_tx_dma_setup = 0; 2833 txr->q_mbuf_defrag_failed = 0; 2834 txr->q_efbig2_tx_dma_setup = 0; 2835 txr->q_einval_tx_dma_setup = 0; 2836 txr->q_other_tx_dma_setup = 0; 2837 txr->q_eagain_tx_dma_setup = 0; 2838 txr->q_enomem_tx_dma_setup = 0; 2839 txr->q_tso_err = 0; 2840 2841 IXGBE_EVC_STORE(&rxr->rx_packets, 0); 2842 IXGBE_EVC_STORE(&rxr->rx_bytes, 0); 2843 IXGBE_EVC_STORE(&rxr->rx_copies, 0); 2844 IXGBE_EVC_STORE(&rxr->no_mbuf, 0); 2845 IXGBE_EVC_STORE(&rxr->rx_discarded, 0); 2846 } 2847 2848 /* MAC stats get their own sub node */ 2849 2850 IXGBE_EVC_STORE(&stats->ipcs, 0); 2851 IXGBE_EVC_STORE(&stats->l4cs, 0); 2852 IXGBE_EVC_STORE(&stats->ipcs_bad, 0); 2853 IXGBE_EVC_STORE(&stats->l4cs_bad, 0); 2854 2855 /* 2856 * Packet Reception Stats. 2857 * Call ixv_init_stats() to save last VF counters' values. 2858 */ 2859 ixv_init_stats(adapter); 2860 IXGBE_EVC_STORE(&stats->vfgprc, 0); 2861 IXGBE_EVC_STORE(&stats->vfgorc, 0); 2862 IXGBE_EVC_STORE(&stats->vfmprc, 0); 2863 IXGBE_EVC_STORE(&stats->vfgptc, 0); 2864 IXGBE_EVC_STORE(&stats->vfgotc, 0); 2865 2866 /* Mailbox Stats */ 2867 IXGBE_EVC_STORE(&hw->mbx.stats.msgs_tx, 0); 2868 IXGBE_EVC_STORE(&hw->mbx.stats.msgs_rx, 0); 2869 IXGBE_EVC_STORE(&hw->mbx.stats.acks, 0); 2870 IXGBE_EVC_STORE(&hw->mbx.stats.reqs, 0); 2871 IXGBE_EVC_STORE(&hw->mbx.stats.rsts, 0); 2872 2873 } /* ixv_clear_evcnt */ 2874 2875 #define PRINTQS(adapter, regname) \ 2876 do { \ 2877 struct ixgbe_hw *_hw = &(adapter)->hw; \ 2878 int _i; \ 2879 \ 2880 printf("%s: %s", device_xname((adapter)->dev), #regname); \ 2881 for (_i = 0; _i < (adapter)->num_queues; _i++) { \ 2882 printf((_i == 0) ? "\t" : " "); \ 2883 printf("%08x", IXGBE_READ_REG(_hw, \ 2884 IXGBE_##regname(_i))); \ 2885 } \ 2886 printf("\n"); \ 2887 } while (0) 2888 2889 /************************************************************************ 2890 * ixv_print_debug_info 2891 * 2892 * Provides a way to take a look at important statistics 2893 * maintained by the driver and hardware. 2894 ************************************************************************/ 2895 static void 2896 ixv_print_debug_info(struct adapter *adapter) 2897 { 2898 device_t dev = adapter->dev; 2899 struct ixgbe_hw *hw = &adapter->hw; 2900 int i; 2901 2902 device_printf(dev, "queue:"); 2903 for (i = 0; i < adapter->num_queues; i++) { 2904 printf((i == 0) ? "\t" : " "); 2905 printf("%8d", i); 2906 } 2907 printf("\n"); 2908 PRINTQS(adapter, VFRDBAL); 2909 PRINTQS(adapter, VFRDBAH); 2910 PRINTQS(adapter, VFRDLEN); 2911 PRINTQS(adapter, VFSRRCTL); 2912 PRINTQS(adapter, VFRDH); 2913 PRINTQS(adapter, VFRDT); 2914 PRINTQS(adapter, VFRXDCTL); 2915 2916 device_printf(dev, "EIMS:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_VTEIMS)); 2917 device_printf(dev, "EIAM:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_VTEIAM)); 2918 device_printf(dev, "EIAC:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_VTEIAC)); 2919 } /* ixv_print_debug_info */ 2920 2921 /************************************************************************ 2922 * ixv_sysctl_debug 2923 ************************************************************************/ 2924 static int 2925 ixv_sysctl_debug(SYSCTLFN_ARGS) 2926 { 2927 struct sysctlnode node = *rnode; 2928 struct adapter *adapter = (struct adapter *)node.sysctl_data; 2929 int error, result = 0; 2930 2931 node.sysctl_data = &result; 2932 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 2933 2934 if (error || newp == NULL) 2935 return error; 2936 2937 if (result == 1) 2938 ixv_print_debug_info(adapter); 2939 2940 return 0; 2941 } /* ixv_sysctl_debug */ 2942 2943 /************************************************************************ 2944 * ixv_sysctl_rx_copy_len 2945 ************************************************************************/ 2946 static int 2947 ixv_sysctl_rx_copy_len(SYSCTLFN_ARGS) 2948 { 2949 struct sysctlnode node = *rnode; 2950 struct adapter *adapter = (struct adapter *)node.sysctl_data; 2951 int error; 2952 int result = adapter->rx_copy_len; 2953 2954 node.sysctl_data = &result; 2955 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 2956 2957 if (error || newp == NULL) 2958 return error; 2959 2960 if ((result < 0) || (result > IXGBE_RX_COPY_LEN_MAX)) 2961 return EINVAL; 2962 2963 adapter->rx_copy_len = result; 2964 2965 return 0; 2966 } /* ixv_sysctl_rx_copy_len */ 2967 2968 /************************************************************************ 2969 * ixv_sysctl_tx_process_limit 2970 ************************************************************************/ 2971 static int 2972 ixv_sysctl_tx_process_limit(SYSCTLFN_ARGS) 2973 { 2974 struct sysctlnode node = *rnode; 2975 struct adapter *adapter = (struct adapter *)node.sysctl_data; 2976 int error; 2977 int result = adapter->tx_process_limit; 2978 2979 node.sysctl_data = &result; 2980 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 2981 2982 if (error || newp == NULL) 2983 return error; 2984 2985 if ((result <= 0) || (result > adapter->num_tx_desc)) 2986 return EINVAL; 2987 2988 adapter->tx_process_limit = result; 2989 2990 return 0; 2991 } /* ixv_sysctl_tx_process_limit */ 2992 2993 /************************************************************************ 2994 * ixv_sysctl_rx_process_limit 2995 ************************************************************************/ 2996 static int 2997 ixv_sysctl_rx_process_limit(SYSCTLFN_ARGS) 2998 { 2999 struct sysctlnode node = *rnode; 3000 struct adapter *adapter = (struct adapter *)node.sysctl_data; 3001 int error; 3002 int result = adapter->rx_process_limit; 3003 3004 node.sysctl_data = &result; 3005 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 3006 3007 if (error || newp == NULL) 3008 return error; 3009 3010 if ((result <= 0) || (result > adapter->num_rx_desc)) 3011 return EINVAL; 3012 3013 adapter->rx_process_limit = result; 3014 3015 return 0; 3016 } /* ixv_sysctl_rx_process_limit */ 3017 3018 /************************************************************************ 3019 * ixv_init_device_features 3020 ************************************************************************/ 3021 static void 3022 ixv_init_device_features(struct adapter *adapter) 3023 { 3024 adapter->feat_cap = IXGBE_FEATURE_NETMAP 3025 | IXGBE_FEATURE_VF 3026 | IXGBE_FEATURE_RSS 3027 | IXGBE_FEATURE_LEGACY_TX; 3028 3029 /* A tad short on feature flags for VFs, atm. */ 3030 switch (adapter->hw.mac.type) { 3031 case ixgbe_mac_82599_vf: 3032 break; 3033 case ixgbe_mac_X540_vf: 3034 break; 3035 case ixgbe_mac_X550_vf: 3036 case ixgbe_mac_X550EM_x_vf: 3037 case ixgbe_mac_X550EM_a_vf: 3038 adapter->feat_cap |= IXGBE_FEATURE_NEEDS_CTXD; 3039 break; 3040 default: 3041 break; 3042 } 3043 3044 /* Enabled by default... */ 3045 /* Is a virtual function (VF) */ 3046 if (adapter->feat_cap & IXGBE_FEATURE_VF) 3047 adapter->feat_en |= IXGBE_FEATURE_VF; 3048 /* Netmap */ 3049 if (adapter->feat_cap & IXGBE_FEATURE_NETMAP) 3050 adapter->feat_en |= IXGBE_FEATURE_NETMAP; 3051 /* Receive-Side Scaling (RSS) */ 3052 if (adapter->feat_cap & IXGBE_FEATURE_RSS) 3053 adapter->feat_en |= IXGBE_FEATURE_RSS; 3054 /* Needs advanced context descriptor regardless of offloads req'd */ 3055 if (adapter->feat_cap & IXGBE_FEATURE_NEEDS_CTXD) 3056 adapter->feat_en |= IXGBE_FEATURE_NEEDS_CTXD; 3057 3058 /* Enabled via sysctl... */ 3059 /* Legacy (single queue) transmit */ 3060 if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) && 3061 ixv_enable_legacy_tx) 3062 adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX; 3063 } /* ixv_init_device_features */ 3064 3065 /************************************************************************ 3066 * ixv_shutdown - Shutdown entry point 3067 ************************************************************************/ 3068 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */ 3069 static int 3070 ixv_shutdown(device_t dev) 3071 { 3072 struct adapter *adapter = device_private(dev); 3073 IXGBE_CORE_LOCK(adapter); 3074 ixv_stop_locked(adapter); 3075 IXGBE_CORE_UNLOCK(adapter); 3076 3077 return (0); 3078 } /* ixv_shutdown */ 3079 #endif 3080 3081 static int 3082 ixv_ifflags_cb(struct ethercom *ec) 3083 { 3084 struct ifnet *ifp = &ec->ec_if; 3085 struct adapter *adapter = ifp->if_softc; 3086 u_short saved_flags; 3087 u_short change; 3088 int rv = 0; 3089 3090 IXGBE_CORE_LOCK(adapter); 3091 3092 saved_flags = adapter->if_flags; 3093 change = ifp->if_flags ^ adapter->if_flags; 3094 if (change != 0) 3095 adapter->if_flags = ifp->if_flags; 3096 3097 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) { 3098 rv = ENETRESET; 3099 goto out; 3100 } else if ((change & IFF_PROMISC) != 0) { 3101 rv = ixv_set_rxfilter(adapter); 3102 if (rv != 0) { 3103 /* Restore previous */ 3104 adapter->if_flags = saved_flags; 3105 goto out; 3106 } 3107 } 3108 3109 /* Check for ec_capenable. */ 3110 change = ec->ec_capenable ^ adapter->ec_capenable; 3111 adapter->ec_capenable = ec->ec_capenable; 3112 if ((change & ~(ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING 3113 | ETHERCAP_VLAN_HWFILTER)) != 0) { 3114 rv = ENETRESET; 3115 goto out; 3116 } 3117 3118 /* 3119 * Special handling is not required for ETHERCAP_VLAN_MTU. 3120 * PF's MAXFRS(MHADD) does not include the 4bytes of the VLAN header. 3121 */ 3122 3123 /* Set up VLAN support and filter */ 3124 if ((change & (ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_HWFILTER)) != 0) 3125 rv = ixv_setup_vlan_support(adapter); 3126 3127 out: 3128 IXGBE_CORE_UNLOCK(adapter); 3129 3130 return rv; 3131 } 3132 3133 3134 /************************************************************************ 3135 * ixv_ioctl - Ioctl entry point 3136 * 3137 * Called when the user wants to configure the interface. 3138 * 3139 * return 0 on success, positive on failure 3140 ************************************************************************/ 3141 static int 3142 ixv_ioctl(struct ifnet *ifp, u_long command, void *data) 3143 { 3144 struct adapter *adapter = ifp->if_softc; 3145 struct ixgbe_hw *hw = &adapter->hw; 3146 struct ifcapreq *ifcr = data; 3147 int error; 3148 int l4csum_en; 3149 const int l4csum = IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx | 3150 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx; 3151 3152 switch (command) { 3153 case SIOCSIFFLAGS: 3154 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)"); 3155 break; 3156 case SIOCADDMULTI: { 3157 struct ether_multi *enm; 3158 struct ether_multistep step; 3159 struct ethercom *ec = &adapter->osdep.ec; 3160 bool overflow = false; 3161 int mcnt = 0; 3162 3163 /* 3164 * Check the number of multicast address. If it exceeds, 3165 * return ENOSPC. 3166 * Update this code when we support API 1.3. 3167 */ 3168 ETHER_LOCK(ec); 3169 ETHER_FIRST_MULTI(step, ec, enm); 3170 while (enm != NULL) { 3171 mcnt++; 3172 3173 /* 3174 * This code is before adding, so one room is required 3175 * at least. 3176 */ 3177 if (mcnt > (IXGBE_MAX_VF_MC - 1)) { 3178 overflow = true; 3179 break; 3180 } 3181 ETHER_NEXT_MULTI(step, enm); 3182 } 3183 ETHER_UNLOCK(ec); 3184 error = 0; 3185 if (overflow && ((ec->ec_flags & ETHER_F_ALLMULTI) == 0)) { 3186 error = hw->mac.ops.update_xcast_mode(hw, 3187 IXGBEVF_XCAST_MODE_ALLMULTI); 3188 if (error == IXGBE_ERR_NOT_TRUSTED) { 3189 device_printf(adapter->dev, 3190 "this interface is not trusted\n"); 3191 error = EPERM; 3192 } else if (error == IXGBE_ERR_FEATURE_NOT_SUPPORTED) { 3193 device_printf(adapter->dev, 3194 "the PF doesn't support allmulti mode\n"); 3195 error = EOPNOTSUPP; 3196 } else if (error) { 3197 device_printf(adapter->dev, 3198 "number of Ethernet multicast addresses " 3199 "exceeds the limit (%d). error = %d\n", 3200 IXGBE_MAX_VF_MC, error); 3201 error = ENOSPC; 3202 } else 3203 ec->ec_flags |= ETHER_F_ALLMULTI; 3204 } 3205 if (error) 3206 return error; 3207 } 3208 /*FALLTHROUGH*/ 3209 case SIOCDELMULTI: 3210 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI"); 3211 break; 3212 case SIOCSIFMEDIA: 3213 case SIOCGIFMEDIA: 3214 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)"); 3215 break; 3216 case SIOCSIFCAP: 3217 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)"); 3218 break; 3219 case SIOCSIFMTU: 3220 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)"); 3221 break; 3222 case SIOCZIFDATA: 3223 IOCTL_DEBUGOUT("ioctl: SIOCZIFDATA (Zero counter)"); 3224 ixv_update_stats(adapter); 3225 ixv_clear_evcnt(adapter); 3226 break; 3227 default: 3228 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command); 3229 break; 3230 } 3231 3232 switch (command) { 3233 case SIOCSIFCAP: 3234 /* Layer-4 Rx checksum offload has to be turned on and 3235 * off as a unit. 3236 */ 3237 l4csum_en = ifcr->ifcr_capenable & l4csum; 3238 if (l4csum_en != l4csum && l4csum_en != 0) 3239 return EINVAL; 3240 /*FALLTHROUGH*/ 3241 case SIOCADDMULTI: 3242 case SIOCDELMULTI: 3243 case SIOCSIFFLAGS: 3244 case SIOCSIFMTU: 3245 default: 3246 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET) 3247 return error; 3248 if ((ifp->if_flags & IFF_RUNNING) == 0) 3249 ; 3250 else if (command == SIOCSIFCAP || command == SIOCSIFMTU) { 3251 IXGBE_CORE_LOCK(adapter); 3252 ixv_init_locked(adapter); 3253 IXGBE_CORE_UNLOCK(adapter); 3254 } else if (command == SIOCADDMULTI || command == SIOCDELMULTI) { 3255 /* 3256 * Multicast list has changed; set the hardware filter 3257 * accordingly. 3258 */ 3259 IXGBE_CORE_LOCK(adapter); 3260 ixv_disable_intr(adapter); 3261 ixv_set_rxfilter(adapter); 3262 ixv_enable_intr(adapter); 3263 IXGBE_CORE_UNLOCK(adapter); 3264 } 3265 return 0; 3266 } 3267 } /* ixv_ioctl */ 3268 3269 /************************************************************************ 3270 * ixv_init 3271 ************************************************************************/ 3272 static int 3273 ixv_init(struct ifnet *ifp) 3274 { 3275 struct adapter *adapter = ifp->if_softc; 3276 3277 IXGBE_CORE_LOCK(adapter); 3278 ixv_init_locked(adapter); 3279 IXGBE_CORE_UNLOCK(adapter); 3280 3281 return 0; 3282 } /* ixv_init */ 3283 3284 /************************************************************************ 3285 * ixv_handle_que 3286 ************************************************************************/ 3287 static void 3288 ixv_handle_que(void *context) 3289 { 3290 struct ix_queue *que = context; 3291 struct adapter *adapter = que->adapter; 3292 struct tx_ring *txr = que->txr; 3293 struct ifnet *ifp = adapter->ifp; 3294 bool more; 3295 3296 IXGBE_EVC_ADD(&que->handleq, 1); 3297 3298 if (ifp->if_flags & IFF_RUNNING) { 3299 IXGBE_TX_LOCK(txr); 3300 more = ixgbe_txeof(txr); 3301 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) 3302 if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq)) 3303 ixgbe_mq_start_locked(ifp, txr); 3304 /* Only for queue 0 */ 3305 /* NetBSD still needs this for CBQ */ 3306 if ((&adapter->queues[0] == que) 3307 && (!ixgbe_legacy_ring_empty(ifp, NULL))) 3308 ixgbe_legacy_start_locked(ifp, txr); 3309 IXGBE_TX_UNLOCK(txr); 3310 more |= ixgbe_rxeof(que); 3311 if (more) { 3312 IXGBE_EVC_ADD(&que->req, 1); 3313 if (adapter->txrx_use_workqueue) { 3314 /* 3315 * "enqueued flag" is not required here 3316 * the same as ixg(4). See ixgbe_msix_que(). 3317 */ 3318 workqueue_enqueue(adapter->que_wq, 3319 &que->wq_cookie, curcpu()); 3320 } else 3321 softint_schedule(que->que_si); 3322 return; 3323 } 3324 } 3325 3326 /* Re-enable this interrupt */ 3327 ixv_enable_queue(adapter, que->msix); 3328 3329 return; 3330 } /* ixv_handle_que */ 3331 3332 /************************************************************************ 3333 * ixv_handle_que_work 3334 ************************************************************************/ 3335 static void 3336 ixv_handle_que_work(struct work *wk, void *context) 3337 { 3338 struct ix_queue *que = container_of(wk, struct ix_queue, wq_cookie); 3339 3340 /* 3341 * "enqueued flag" is not required here the same as ixg(4). 3342 * See ixgbe_msix_que(). 3343 */ 3344 ixv_handle_que(que); 3345 } 3346 3347 /************************************************************************ 3348 * ixv_allocate_msix - Setup MSI-X Interrupt resources and handlers 3349 ************************************************************************/ 3350 static int 3351 ixv_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa) 3352 { 3353 device_t dev = adapter->dev; 3354 struct ix_queue *que = adapter->queues; 3355 struct tx_ring *txr = adapter->tx_rings; 3356 int error, msix_ctrl, rid, vector = 0; 3357 pci_chipset_tag_t pc; 3358 pcitag_t tag; 3359 char intrbuf[PCI_INTRSTR_LEN]; 3360 char wqname[MAXCOMLEN]; 3361 char intr_xname[32]; 3362 const char *intrstr = NULL; 3363 kcpuset_t *affinity; 3364 int cpu_id = 0; 3365 3366 pc = adapter->osdep.pc; 3367 tag = adapter->osdep.tag; 3368 3369 adapter->osdep.nintrs = adapter->num_queues + 1; 3370 if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs, 3371 adapter->osdep.nintrs) != 0) { 3372 aprint_error_dev(dev, 3373 "failed to allocate MSI-X interrupt\n"); 3374 return (ENXIO); 3375 } 3376 3377 kcpuset_create(&affinity, false); 3378 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) { 3379 snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d", 3380 device_xname(dev), i); 3381 intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf, 3382 sizeof(intrbuf)); 3383 #ifdef IXGBE_MPSAFE 3384 pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE, 3385 true); 3386 #endif 3387 /* Set the handler function */ 3388 que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc, 3389 adapter->osdep.intrs[i], IPL_NET, ixv_msix_que, que, 3390 intr_xname); 3391 if (que->res == NULL) { 3392 pci_intr_release(pc, adapter->osdep.intrs, 3393 adapter->osdep.nintrs); 3394 aprint_error_dev(dev, 3395 "Failed to register QUE handler\n"); 3396 kcpuset_destroy(affinity); 3397 return (ENXIO); 3398 } 3399 que->msix = vector; 3400 adapter->active_queues |= (u64)(1 << que->msix); 3401 3402 cpu_id = i; 3403 /* Round-robin affinity */ 3404 kcpuset_zero(affinity); 3405 kcpuset_set(affinity, cpu_id % ncpu); 3406 error = interrupt_distribute(adapter->osdep.ihs[i], affinity, 3407 NULL); 3408 aprint_normal_dev(dev, "for TX/RX, interrupting at %s", 3409 intrstr); 3410 if (error == 0) 3411 aprint_normal(", bound queue %d to cpu %d\n", 3412 i, cpu_id % ncpu); 3413 else 3414 aprint_normal("\n"); 3415 3416 #ifndef IXGBE_LEGACY_TX 3417 txr->txr_si 3418 = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS, 3419 ixgbe_deferred_mq_start, txr); 3420 #endif 3421 que->que_si 3422 = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS, 3423 ixv_handle_que, que); 3424 if (que->que_si == NULL) { 3425 aprint_error_dev(dev, 3426 "could not establish software interrupt\n"); 3427 } 3428 } 3429 snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev)); 3430 error = workqueue_create(&adapter->txr_wq, wqname, 3431 ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET, 3432 IXGBE_WORKQUEUE_FLAGS); 3433 if (error) { 3434 aprint_error_dev(dev, 3435 "couldn't create workqueue for deferred Tx\n"); 3436 } 3437 adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int)); 3438 3439 snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev)); 3440 error = workqueue_create(&adapter->que_wq, wqname, 3441 ixv_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET, 3442 IXGBE_WORKQUEUE_FLAGS); 3443 if (error) { 3444 aprint_error_dev(dev, "couldn't create workqueue for Tx/Rx\n"); 3445 } 3446 3447 /* and Mailbox */ 3448 cpu_id++; 3449 snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev)); 3450 adapter->vector = vector; 3451 intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf, 3452 sizeof(intrbuf)); 3453 #ifdef IXGBE_MPSAFE 3454 pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE, 3455 true); 3456 #endif 3457 /* Set the mbx handler function */ 3458 adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc, 3459 adapter->osdep.intrs[vector], IPL_NET, ixv_msix_mbx, adapter, 3460 intr_xname); 3461 if (adapter->osdep.ihs[vector] == NULL) { 3462 aprint_error_dev(dev, "Failed to register LINK handler\n"); 3463 kcpuset_destroy(affinity); 3464 return (ENXIO); 3465 } 3466 /* Round-robin affinity */ 3467 kcpuset_zero(affinity); 3468 kcpuset_set(affinity, cpu_id % ncpu); 3469 error = interrupt_distribute(adapter->osdep.ihs[vector], affinity, 3470 NULL); 3471 3472 aprint_normal_dev(dev, 3473 "for link, interrupting at %s", intrstr); 3474 if (error == 0) 3475 aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu); 3476 else 3477 aprint_normal("\n"); 3478 3479 /* Tasklets for Mailbox */ 3480 snprintf(wqname, sizeof(wqname), "%s-admin", device_xname(dev)); 3481 error = workqueue_create(&adapter->admin_wq, wqname, 3482 ixv_handle_admin, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET, 3483 IXGBE_TASKLET_WQ_FLAGS); 3484 if (error) { 3485 aprint_error_dev(dev, 3486 "could not create admin workqueue (%d)\n", error); 3487 goto err_out; 3488 } 3489 3490 /* 3491 * Due to a broken design QEMU will fail to properly 3492 * enable the guest for MSI-X unless the vectors in 3493 * the table are all set up, so we must rewrite the 3494 * ENABLE in the MSI-X control register again at this 3495 * point to cause it to successfully initialize us. 3496 */ 3497 if (adapter->hw.mac.type == ixgbe_mac_82599_vf) { 3498 pci_get_capability(pc, tag, PCI_CAP_MSIX, &rid, NULL); 3499 rid += PCI_MSIX_CTL; 3500 msix_ctrl = pci_conf_read(pc, tag, rid); 3501 msix_ctrl |= PCI_MSIX_CTL_ENABLE; 3502 pci_conf_write(pc, tag, rid, msix_ctrl); 3503 } 3504 3505 kcpuset_destroy(affinity); 3506 return (0); 3507 err_out: 3508 kcpuset_destroy(affinity); 3509 ixv_free_deferred_handlers(adapter); 3510 ixv_free_pci_resources(adapter); 3511 return (error); 3512 } /* ixv_allocate_msix */ 3513 3514 /************************************************************************ 3515 * ixv_configure_interrupts - Setup MSI-X resources 3516 * 3517 * Note: The VF device MUST use MSI-X, there is no fallback. 3518 ************************************************************************/ 3519 static int 3520 ixv_configure_interrupts(struct adapter *adapter) 3521 { 3522 device_t dev = adapter->dev; 3523 int want, queues, msgs; 3524 3525 /* Must have at least 2 MSI-X vectors */ 3526 msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag); 3527 if (msgs < 2) { 3528 aprint_error_dev(dev, "MSIX config error\n"); 3529 return (ENXIO); 3530 } 3531 msgs = MIN(msgs, IXG_MAX_NINTR); 3532 3533 /* Figure out a reasonable auto config value */ 3534 queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu; 3535 3536 if (ixv_num_queues != 0) 3537 queues = ixv_num_queues; 3538 else if ((ixv_num_queues == 0) && (queues > IXGBE_VF_MAX_TX_QUEUES)) 3539 queues = IXGBE_VF_MAX_TX_QUEUES; 3540 3541 /* 3542 * Want vectors for the queues, 3543 * plus an additional for mailbox. 3544 */ 3545 want = queues + 1; 3546 if (msgs >= want) 3547 msgs = want; 3548 else { 3549 aprint_error_dev(dev, 3550 "MSI-X Configuration Problem, " 3551 "%d vectors but %d queues wanted!\n", msgs, want); 3552 return -1; 3553 } 3554 3555 aprint_normal_dev(dev, 3556 "Using MSI-X interrupts with %d vectors\n", msgs); 3557 adapter->num_queues = queues; 3558 3559 return (0); 3560 } /* ixv_configure_interrupts */ 3561 3562 3563 /************************************************************************ 3564 * ixv_handle_admin - Tasklet handler for MSI-X MBX interrupts 3565 * 3566 * Done outside of interrupt context since the driver might sleep 3567 ************************************************************************/ 3568 static void 3569 ixv_handle_admin(struct work *wk, void *context) 3570 { 3571 struct adapter *adapter = context; 3572 struct ixgbe_hw *hw = &adapter->hw; 3573 3574 IXGBE_CORE_LOCK(adapter); 3575 3576 IXGBE_EVC_ADD(&adapter->link_workev, 1); 3577 adapter->hw.mac.ops.check_link(&adapter->hw, &adapter->link_speed, 3578 &adapter->link_up, FALSE); 3579 ixv_update_link_status(adapter); 3580 3581 adapter->task_requests = 0; 3582 atomic_store_relaxed(&adapter->admin_pending, 0); 3583 3584 /* Re-enable interrupts */ 3585 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << adapter->vector)); 3586 3587 IXGBE_CORE_UNLOCK(adapter); 3588 } /* ixv_handle_admin */ 3589 3590 /************************************************************************ 3591 * ixv_check_link - Used in the local timer to poll for link changes 3592 ************************************************************************/ 3593 static s32 3594 ixv_check_link(struct adapter *adapter) 3595 { 3596 s32 error; 3597 3598 KASSERT(mutex_owned(&adapter->core_mtx)); 3599 3600 adapter->hw.mac.get_link_status = TRUE; 3601 3602 error = adapter->hw.mac.ops.check_link(&adapter->hw, 3603 &adapter->link_speed, &adapter->link_up, FALSE); 3604 ixv_update_link_status(adapter); 3605 3606 return error; 3607 } /* ixv_check_link */ 3608