1 /* $NetBSD: if_ixl.c,v 1.76 2021/02/09 15:05:49 jakllsch Exp $ */ 2 3 /* 4 * Copyright (c) 2013-2015, Intel Corporation 5 * All rights reserved. 6 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions are met: 9 * 10 * 1. Redistributions of source code must retain the above copyright notice, 11 * this list of conditions and the following disclaimer. 12 * 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * 3. Neither the name of the Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived from 19 * this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 /* 35 * Copyright (c) 2016,2017 David Gwynne <dlg@openbsd.org> 36 * 37 * Permission to use, copy, modify, and distribute this software for any 38 * purpose with or without fee is hereby granted, provided that the above 39 * copyright notice and this permission notice appear in all copies. 40 * 41 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 42 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 43 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 44 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 45 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 46 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 47 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 48 */ 49 50 /* 51 * Copyright (c) 2019 Internet Initiative Japan, Inc. 52 * All rights reserved. 53 * 54 * Redistribution and use in source and binary forms, with or without 55 * modification, are permitted provided that the following conditions 56 * are met: 57 * 1. Redistributions of source code must retain the above copyright 58 * notice, this list of conditions and the following disclaimer. 59 * 2. Redistributions in binary form must reproduce the above copyright 60 * notice, this list of conditions and the following disclaimer in the 61 * documentation and/or other materials provided with the distribution. 62 * 63 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 64 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 65 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 66 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 67 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 68 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 69 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 70 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 71 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 72 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 73 * POSSIBILITY OF SUCH DAMAGE. 74 */ 75 76 #include <sys/cdefs.h> 77 __KERNEL_RCSID(0, "$NetBSD: if_ixl.c,v 1.76 2021/02/09 15:05:49 jakllsch Exp $"); 78 79 #ifdef _KERNEL_OPT 80 #include "opt_net_mpsafe.h" 81 #include "opt_if_ixl.h" 82 #endif 83 84 #include <sys/param.h> 85 #include <sys/types.h> 86 87 #include <sys/bitops.h> 88 #include <sys/cpu.h> 89 #include <sys/device.h> 90 #include <sys/evcnt.h> 91 #include <sys/interrupt.h> 92 #include <sys/kmem.h> 93 #include <sys/module.h> 94 #include <sys/mutex.h> 95 #include <sys/pcq.h> 96 #include <sys/syslog.h> 97 #include <sys/workqueue.h> 98 99 #include <sys/bus.h> 100 101 #include <net/bpf.h> 102 #include <net/if.h> 103 #include <net/if_dl.h> 104 #include <net/if_media.h> 105 #include <net/if_ether.h> 106 #include <net/rss_config.h> 107 108 #include <netinet/tcp.h> /* for struct tcphdr */ 109 #include <netinet/udp.h> /* for struct udphdr */ 110 111 #include <dev/pci/pcivar.h> 112 #include <dev/pci/pcidevs.h> 113 114 #include <dev/pci/if_ixlreg.h> 115 #include <dev/pci/if_ixlvar.h> 116 117 #include <prop/proplib.h> 118 119 struct ixl_softc; /* defined */ 120 121 #define I40E_PF_RESET_WAIT_COUNT 200 122 #define I40E_AQ_LARGE_BUF 512 123 124 /* bitfields for Tx queue mapping in QTX_CTL */ 125 #define I40E_QTX_CTL_VF_QUEUE 0x0 126 #define I40E_QTX_CTL_VM_QUEUE 0x1 127 #define I40E_QTX_CTL_PF_QUEUE 0x2 128 129 #define I40E_QUEUE_TYPE_EOL 0x7ff 130 #define I40E_INTR_NOTX_QUEUE 0 131 132 #define I40E_QUEUE_TYPE_RX 0x0 133 #define I40E_QUEUE_TYPE_TX 0x1 134 #define I40E_QUEUE_TYPE_PE_CEQ 0x2 135 #define I40E_QUEUE_TYPE_UNKNOWN 0x3 136 137 #define I40E_ITR_INDEX_RX 0x0 138 #define I40E_ITR_INDEX_TX 0x1 139 #define I40E_ITR_INDEX_OTHER 0x2 140 #define I40E_ITR_INDEX_NONE 0x3 141 #define IXL_ITR_RX 0x7a /* 4K intrs/sec */ 142 #define IXL_ITR_TX 0x7a /* 4K intrs/sec */ 143 144 #define I40E_INTR_NOTX_QUEUE 0 145 #define I40E_INTR_NOTX_INTR 0 146 #define I40E_INTR_NOTX_RX_QUEUE 0 147 #define I40E_INTR_NOTX_TX_QUEUE 1 148 #define I40E_INTR_NOTX_RX_MASK I40E_PFINT_ICR0_QUEUE_0_MASK 149 #define I40E_INTR_NOTX_TX_MASK I40E_PFINT_ICR0_QUEUE_1_MASK 150 151 #define I40E_HASH_LUT_SIZE_128 0 152 153 #define IXL_ICR0_CRIT_ERR_MASK \ 154 (I40E_PFINT_ICR0_PCI_EXCEPTION_MASK | \ 155 I40E_PFINT_ICR0_ECC_ERR_MASK | \ 156 I40E_PFINT_ICR0_PE_CRITERR_MASK) 157 158 #define IXL_QUEUE_MAX_XL710 64 159 #define IXL_QUEUE_MAX_X722 128 160 161 #define IXL_TX_PKT_DESCS 8 162 #define IXL_TX_PKT_MAXSIZE (MCLBYTES * IXL_TX_PKT_DESCS) 163 #define IXL_TX_QUEUE_ALIGN 128 164 #define IXL_RX_QUEUE_ALIGN 128 165 166 #define IXL_MCLBYTES (MCLBYTES - ETHER_ALIGN) 167 #define IXL_MTU_ETHERLEN ETHER_HDR_LEN \ 168 + ETHER_CRC_LEN 169 #if 0 170 #define IXL_MAX_MTU (9728 - IXL_MTU_ETHERLEN) 171 #else 172 /* (dbuff * 5) - ETHER_HDR_LEN - ETHER_CRC_LEN */ 173 #define IXL_MAX_MTU (9600 - IXL_MTU_ETHERLEN) 174 #endif 175 #define IXL_MIN_MTU (ETHER_MIN_LEN - ETHER_CRC_LEN) 176 177 #define IXL_PCIREG PCI_MAPREG_START 178 179 #define IXL_ITR0 0x0 180 #define IXL_ITR1 0x1 181 #define IXL_ITR2 0x2 182 #define IXL_NOITR 0x3 183 184 #define IXL_AQ_NUM 256 185 #define IXL_AQ_MASK (IXL_AQ_NUM - 1) 186 #define IXL_AQ_ALIGN 64 /* lol */ 187 #define IXL_AQ_BUFLEN 4096 188 189 #define IXL_HMC_ROUNDUP 512 190 #define IXL_HMC_PGSIZE 4096 191 #define IXL_HMC_DVASZ sizeof(uint64_t) 192 #define IXL_HMC_PGS (IXL_HMC_PGSIZE / IXL_HMC_DVASZ) 193 #define IXL_HMC_L2SZ (IXL_HMC_PGSIZE * IXL_HMC_PGS) 194 #define IXL_HMC_PDVALID 1ULL 195 196 #define IXL_ATQ_EXEC_TIMEOUT (10 * hz) 197 198 #define IXL_SRRD_SRCTL_ATTEMPTS 100000 199 200 struct ixl_aq_regs { 201 bus_size_t atq_tail; 202 bus_size_t atq_head; 203 bus_size_t atq_len; 204 bus_size_t atq_bal; 205 bus_size_t atq_bah; 206 207 bus_size_t arq_tail; 208 bus_size_t arq_head; 209 bus_size_t arq_len; 210 bus_size_t arq_bal; 211 bus_size_t arq_bah; 212 213 uint32_t atq_len_enable; 214 uint32_t atq_tail_mask; 215 uint32_t atq_head_mask; 216 217 uint32_t arq_len_enable; 218 uint32_t arq_tail_mask; 219 uint32_t arq_head_mask; 220 }; 221 222 struct ixl_phy_type { 223 uint64_t phy_type; 224 uint64_t ifm_type; 225 }; 226 227 struct ixl_speed_type { 228 uint8_t dev_speed; 229 uint64_t net_speed; 230 }; 231 232 struct ixl_hmc_entry { 233 uint64_t hmc_base; 234 uint32_t hmc_count; 235 uint64_t hmc_size; 236 }; 237 238 enum ixl_hmc_types { 239 IXL_HMC_LAN_TX = 0, 240 IXL_HMC_LAN_RX, 241 IXL_HMC_FCOE_CTX, 242 IXL_HMC_FCOE_FILTER, 243 IXL_HMC_COUNT 244 }; 245 246 struct ixl_hmc_pack { 247 uint16_t offset; 248 uint16_t width; 249 uint16_t lsb; 250 }; 251 252 /* 253 * these hmc objects have weird sizes and alignments, so these are abstract 254 * representations of them that are nice for c to populate. 255 * 256 * the packing code relies on little-endian values being stored in the fields, 257 * no high bits in the fields being set, and the fields must be packed in the 258 * same order as they are in the ctx structure. 259 */ 260 261 struct ixl_hmc_rxq { 262 uint16_t head; 263 uint8_t cpuid; 264 uint64_t base; 265 #define IXL_HMC_RXQ_BASE_UNIT 128 266 uint16_t qlen; 267 uint16_t dbuff; 268 #define IXL_HMC_RXQ_DBUFF_UNIT 128 269 uint8_t hbuff; 270 #define IXL_HMC_RXQ_HBUFF_UNIT 64 271 uint8_t dtype; 272 #define IXL_HMC_RXQ_DTYPE_NOSPLIT 0x0 273 #define IXL_HMC_RXQ_DTYPE_HSPLIT 0x1 274 #define IXL_HMC_RXQ_DTYPE_SPLIT_ALWAYS 0x2 275 uint8_t dsize; 276 #define IXL_HMC_RXQ_DSIZE_16 0 277 #define IXL_HMC_RXQ_DSIZE_32 1 278 uint8_t crcstrip; 279 uint8_t fc_ena; 280 uint8_t l2sel; 281 uint8_t hsplit_0; 282 uint8_t hsplit_1; 283 uint8_t showiv; 284 uint16_t rxmax; 285 uint8_t tphrdesc_ena; 286 uint8_t tphwdesc_ena; 287 uint8_t tphdata_ena; 288 uint8_t tphhead_ena; 289 uint8_t lrxqthresh; 290 uint8_t prefena; 291 }; 292 293 static const struct ixl_hmc_pack ixl_hmc_pack_rxq[] = { 294 { offsetof(struct ixl_hmc_rxq, head), 13, 0 }, 295 { offsetof(struct ixl_hmc_rxq, cpuid), 8, 13 }, 296 { offsetof(struct ixl_hmc_rxq, base), 57, 32 }, 297 { offsetof(struct ixl_hmc_rxq, qlen), 13, 89 }, 298 { offsetof(struct ixl_hmc_rxq, dbuff), 7, 102 }, 299 { offsetof(struct ixl_hmc_rxq, hbuff), 5, 109 }, 300 { offsetof(struct ixl_hmc_rxq, dtype), 2, 114 }, 301 { offsetof(struct ixl_hmc_rxq, dsize), 1, 116 }, 302 { offsetof(struct ixl_hmc_rxq, crcstrip), 1, 117 }, 303 { offsetof(struct ixl_hmc_rxq, fc_ena), 1, 118 }, 304 { offsetof(struct ixl_hmc_rxq, l2sel), 1, 119 }, 305 { offsetof(struct ixl_hmc_rxq, hsplit_0), 4, 120 }, 306 { offsetof(struct ixl_hmc_rxq, hsplit_1), 2, 124 }, 307 { offsetof(struct ixl_hmc_rxq, showiv), 1, 127 }, 308 { offsetof(struct ixl_hmc_rxq, rxmax), 14, 174 }, 309 { offsetof(struct ixl_hmc_rxq, tphrdesc_ena), 1, 193 }, 310 { offsetof(struct ixl_hmc_rxq, tphwdesc_ena), 1, 194 }, 311 { offsetof(struct ixl_hmc_rxq, tphdata_ena), 1, 195 }, 312 { offsetof(struct ixl_hmc_rxq, tphhead_ena), 1, 196 }, 313 { offsetof(struct ixl_hmc_rxq, lrxqthresh), 3, 198 }, 314 { offsetof(struct ixl_hmc_rxq, prefena), 1, 201 }, 315 }; 316 317 #define IXL_HMC_RXQ_MINSIZE (201 + 1) 318 319 struct ixl_hmc_txq { 320 uint16_t head; 321 uint8_t new_context; 322 uint64_t base; 323 #define IXL_HMC_TXQ_BASE_UNIT 128 324 uint8_t fc_ena; 325 uint8_t timesync_ena; 326 uint8_t fd_ena; 327 uint8_t alt_vlan_ena; 328 uint8_t cpuid; 329 uint16_t thead_wb; 330 uint8_t head_wb_ena; 331 #define IXL_HMC_TXQ_DESC_WB 0 332 #define IXL_HMC_TXQ_HEAD_WB 1 333 uint16_t qlen; 334 uint8_t tphrdesc_ena; 335 uint8_t tphrpacket_ena; 336 uint8_t tphwdesc_ena; 337 uint64_t head_wb_addr; 338 uint32_t crc; 339 uint16_t rdylist; 340 uint8_t rdylist_act; 341 }; 342 343 static const struct ixl_hmc_pack ixl_hmc_pack_txq[] = { 344 { offsetof(struct ixl_hmc_txq, head), 13, 0 }, 345 { offsetof(struct ixl_hmc_txq, new_context), 1, 30 }, 346 { offsetof(struct ixl_hmc_txq, base), 57, 32 }, 347 { offsetof(struct ixl_hmc_txq, fc_ena), 1, 89 }, 348 { offsetof(struct ixl_hmc_txq, timesync_ena), 1, 90 }, 349 { offsetof(struct ixl_hmc_txq, fd_ena), 1, 91 }, 350 { offsetof(struct ixl_hmc_txq, alt_vlan_ena), 1, 92 }, 351 { offsetof(struct ixl_hmc_txq, cpuid), 8, 96 }, 352 /* line 1 */ 353 { offsetof(struct ixl_hmc_txq, thead_wb), 13, 0 + 128 }, 354 { offsetof(struct ixl_hmc_txq, head_wb_ena), 1, 32 + 128 }, 355 { offsetof(struct ixl_hmc_txq, qlen), 13, 33 + 128 }, 356 { offsetof(struct ixl_hmc_txq, tphrdesc_ena), 1, 46 + 128 }, 357 { offsetof(struct ixl_hmc_txq, tphrpacket_ena), 1, 47 + 128 }, 358 { offsetof(struct ixl_hmc_txq, tphwdesc_ena), 1, 48 + 128 }, 359 { offsetof(struct ixl_hmc_txq, head_wb_addr), 64, 64 + 128 }, 360 /* line 7 */ 361 { offsetof(struct ixl_hmc_txq, crc), 32, 0 + (7*128) }, 362 { offsetof(struct ixl_hmc_txq, rdylist), 10, 84 + (7*128) }, 363 { offsetof(struct ixl_hmc_txq, rdylist_act), 1, 94 + (7*128) }, 364 }; 365 366 #define IXL_HMC_TXQ_MINSIZE (94 + (7*128) + 1) 367 368 struct ixl_work { 369 struct work ixw_cookie; 370 void (*ixw_func)(void *); 371 void *ixw_arg; 372 unsigned int ixw_added; 373 }; 374 #define IXL_WORKQUEUE_PRI PRI_SOFTNET 375 376 struct ixl_tx_map { 377 struct mbuf *txm_m; 378 bus_dmamap_t txm_map; 379 unsigned int txm_eop; 380 }; 381 382 struct ixl_tx_ring { 383 kmutex_t txr_lock; 384 struct ixl_softc *txr_sc; 385 386 unsigned int txr_prod; 387 unsigned int txr_cons; 388 389 struct ixl_tx_map *txr_maps; 390 struct ixl_dmamem txr_mem; 391 392 bus_size_t txr_tail; 393 unsigned int txr_qid; 394 pcq_t *txr_intrq; 395 void *txr_si; 396 397 struct evcnt txr_defragged; 398 struct evcnt txr_defrag_failed; 399 struct evcnt txr_pcqdrop; 400 struct evcnt txr_transmitdef; 401 struct evcnt txr_intr; 402 struct evcnt txr_defer; 403 }; 404 405 struct ixl_rx_map { 406 struct mbuf *rxm_m; 407 bus_dmamap_t rxm_map; 408 }; 409 410 struct ixl_rx_ring { 411 kmutex_t rxr_lock; 412 413 unsigned int rxr_prod; 414 unsigned int rxr_cons; 415 416 struct ixl_rx_map *rxr_maps; 417 struct ixl_dmamem rxr_mem; 418 419 struct mbuf *rxr_m_head; 420 struct mbuf **rxr_m_tail; 421 422 bus_size_t rxr_tail; 423 unsigned int rxr_qid; 424 425 struct evcnt rxr_mgethdr_failed; 426 struct evcnt rxr_mgetcl_failed; 427 struct evcnt rxr_mbuf_load_failed; 428 struct evcnt rxr_intr; 429 struct evcnt rxr_defer; 430 }; 431 432 struct ixl_queue_pair { 433 struct ixl_softc *qp_sc; 434 struct ixl_tx_ring *qp_txr; 435 struct ixl_rx_ring *qp_rxr; 436 437 char qp_name[16]; 438 439 void *qp_si; 440 struct work qp_work; 441 bool qp_workqueue; 442 }; 443 444 struct ixl_atq { 445 struct ixl_aq_desc iatq_desc; 446 void (*iatq_fn)(struct ixl_softc *, 447 const struct ixl_aq_desc *); 448 }; 449 SIMPLEQ_HEAD(ixl_atq_list, ixl_atq); 450 451 struct ixl_product { 452 unsigned int vendor_id; 453 unsigned int product_id; 454 }; 455 456 struct ixl_stats_counters { 457 bool isc_has_offset; 458 struct evcnt isc_crc_errors; 459 uint64_t isc_crc_errors_offset; 460 struct evcnt isc_illegal_bytes; 461 uint64_t isc_illegal_bytes_offset; 462 struct evcnt isc_rx_bytes; 463 uint64_t isc_rx_bytes_offset; 464 struct evcnt isc_rx_discards; 465 uint64_t isc_rx_discards_offset; 466 struct evcnt isc_rx_unicast; 467 uint64_t isc_rx_unicast_offset; 468 struct evcnt isc_rx_multicast; 469 uint64_t isc_rx_multicast_offset; 470 struct evcnt isc_rx_broadcast; 471 uint64_t isc_rx_broadcast_offset; 472 struct evcnt isc_rx_size_64; 473 uint64_t isc_rx_size_64_offset; 474 struct evcnt isc_rx_size_127; 475 uint64_t isc_rx_size_127_offset; 476 struct evcnt isc_rx_size_255; 477 uint64_t isc_rx_size_255_offset; 478 struct evcnt isc_rx_size_511; 479 uint64_t isc_rx_size_511_offset; 480 struct evcnt isc_rx_size_1023; 481 uint64_t isc_rx_size_1023_offset; 482 struct evcnt isc_rx_size_1522; 483 uint64_t isc_rx_size_1522_offset; 484 struct evcnt isc_rx_size_big; 485 uint64_t isc_rx_size_big_offset; 486 struct evcnt isc_rx_undersize; 487 uint64_t isc_rx_undersize_offset; 488 struct evcnt isc_rx_oversize; 489 uint64_t isc_rx_oversize_offset; 490 struct evcnt isc_rx_fragments; 491 uint64_t isc_rx_fragments_offset; 492 struct evcnt isc_rx_jabber; 493 uint64_t isc_rx_jabber_offset; 494 struct evcnt isc_tx_bytes; 495 uint64_t isc_tx_bytes_offset; 496 struct evcnt isc_tx_dropped_link_down; 497 uint64_t isc_tx_dropped_link_down_offset; 498 struct evcnt isc_tx_unicast; 499 uint64_t isc_tx_unicast_offset; 500 struct evcnt isc_tx_multicast; 501 uint64_t isc_tx_multicast_offset; 502 struct evcnt isc_tx_broadcast; 503 uint64_t isc_tx_broadcast_offset; 504 struct evcnt isc_tx_size_64; 505 uint64_t isc_tx_size_64_offset; 506 struct evcnt isc_tx_size_127; 507 uint64_t isc_tx_size_127_offset; 508 struct evcnt isc_tx_size_255; 509 uint64_t isc_tx_size_255_offset; 510 struct evcnt isc_tx_size_511; 511 uint64_t isc_tx_size_511_offset; 512 struct evcnt isc_tx_size_1023; 513 uint64_t isc_tx_size_1023_offset; 514 struct evcnt isc_tx_size_1522; 515 uint64_t isc_tx_size_1522_offset; 516 struct evcnt isc_tx_size_big; 517 uint64_t isc_tx_size_big_offset; 518 struct evcnt isc_mac_local_faults; 519 uint64_t isc_mac_local_faults_offset; 520 struct evcnt isc_mac_remote_faults; 521 uint64_t isc_mac_remote_faults_offset; 522 struct evcnt isc_link_xon_rx; 523 uint64_t isc_link_xon_rx_offset; 524 struct evcnt isc_link_xon_tx; 525 uint64_t isc_link_xon_tx_offset; 526 struct evcnt isc_link_xoff_rx; 527 uint64_t isc_link_xoff_rx_offset; 528 struct evcnt isc_link_xoff_tx; 529 uint64_t isc_link_xoff_tx_offset; 530 struct evcnt isc_vsi_rx_discards; 531 uint64_t isc_vsi_rx_discards_offset; 532 struct evcnt isc_vsi_rx_bytes; 533 uint64_t isc_vsi_rx_bytes_offset; 534 struct evcnt isc_vsi_rx_unicast; 535 uint64_t isc_vsi_rx_unicast_offset; 536 struct evcnt isc_vsi_rx_multicast; 537 uint64_t isc_vsi_rx_multicast_offset; 538 struct evcnt isc_vsi_rx_broadcast; 539 uint64_t isc_vsi_rx_broadcast_offset; 540 struct evcnt isc_vsi_tx_errors; 541 uint64_t isc_vsi_tx_errors_offset; 542 struct evcnt isc_vsi_tx_bytes; 543 uint64_t isc_vsi_tx_bytes_offset; 544 struct evcnt isc_vsi_tx_unicast; 545 uint64_t isc_vsi_tx_unicast_offset; 546 struct evcnt isc_vsi_tx_multicast; 547 uint64_t isc_vsi_tx_multicast_offset; 548 struct evcnt isc_vsi_tx_broadcast; 549 uint64_t isc_vsi_tx_broadcast_offset; 550 }; 551 552 /* 553 * Locking notes: 554 * + a field in ixl_tx_ring is protected by txr_lock (a spin mutex), and 555 * a field in ixl_rx_ring is protected by rxr_lock (a spin mutex). 556 * - more than one lock of them cannot be held at once. 557 * + a field named sc_atq_* in ixl_softc is protected by sc_atq_lock 558 * (a spin mutex). 559 * - the lock cannot held with txr_lock or rxr_lock. 560 * + a field named sc_arq_* is not protected by any lock. 561 * - operations for sc_arq_* is done in one context related to 562 * sc_arq_task. 563 * + other fields in ixl_softc is protected by sc_cfg_lock 564 * (an adaptive mutex) 565 * - It must be held before another lock is held, and It can be 566 * released after the other lock is released. 567 * */ 568 569 struct ixl_softc { 570 device_t sc_dev; 571 struct ethercom sc_ec; 572 bool sc_attached; 573 bool sc_dead; 574 uint32_t sc_port; 575 struct sysctllog *sc_sysctllog; 576 struct workqueue *sc_workq; 577 struct workqueue *sc_workq_txrx; 578 int sc_stats_intval; 579 callout_t sc_stats_callout; 580 struct ixl_work sc_stats_task; 581 struct ixl_stats_counters 582 sc_stats_counters; 583 uint8_t sc_enaddr[ETHER_ADDR_LEN]; 584 struct ifmedia sc_media; 585 uint64_t sc_media_status; 586 uint64_t sc_media_active; 587 uint64_t sc_phy_types; 588 uint8_t sc_phy_abilities; 589 uint8_t sc_phy_linkspeed; 590 uint8_t sc_phy_fec_cfg; 591 uint16_t sc_eee_cap; 592 uint32_t sc_eeer_val; 593 uint8_t sc_d3_lpan; 594 kmutex_t sc_cfg_lock; 595 enum i40e_mac_type sc_mac_type; 596 uint32_t sc_rss_table_size; 597 uint32_t sc_rss_table_entry_width; 598 bool sc_txrx_workqueue; 599 u_int sc_tx_process_limit; 600 u_int sc_rx_process_limit; 601 u_int sc_tx_intr_process_limit; 602 u_int sc_rx_intr_process_limit; 603 604 int sc_cur_ec_capenable; 605 606 struct pci_attach_args sc_pa; 607 pci_intr_handle_t *sc_ihp; 608 void **sc_ihs; 609 unsigned int sc_nintrs; 610 611 bus_dma_tag_t sc_dmat; 612 bus_space_tag_t sc_memt; 613 bus_space_handle_t sc_memh; 614 bus_size_t sc_mems; 615 616 uint8_t sc_pf_id; 617 uint16_t sc_uplink_seid; /* le */ 618 uint16_t sc_downlink_seid; /* le */ 619 uint16_t sc_vsi_number; 620 uint16_t sc_vsi_stat_counter_idx; 621 uint16_t sc_seid; 622 unsigned int sc_base_queue; 623 624 pci_intr_type_t sc_intrtype; 625 unsigned int sc_msix_vector_queue; 626 627 struct ixl_dmamem sc_scratch; 628 struct ixl_dmamem sc_aqbuf; 629 630 const struct ixl_aq_regs * 631 sc_aq_regs; 632 uint32_t sc_aq_flags; 633 #define IXL_SC_AQ_FLAG_RXCTL __BIT(0) 634 #define IXL_SC_AQ_FLAG_NVMLOCK __BIT(1) 635 #define IXL_SC_AQ_FLAG_NVMREAD __BIT(2) 636 #define IXL_SC_AQ_FLAG_RSS __BIT(3) 637 638 kmutex_t sc_atq_lock; 639 kcondvar_t sc_atq_cv; 640 struct ixl_dmamem sc_atq; 641 unsigned int sc_atq_prod; 642 unsigned int sc_atq_cons; 643 644 struct ixl_dmamem sc_arq; 645 struct ixl_work sc_arq_task; 646 struct ixl_aq_bufs sc_arq_idle; 647 struct ixl_aq_buf *sc_arq_live[IXL_AQ_NUM]; 648 unsigned int sc_arq_prod; 649 unsigned int sc_arq_cons; 650 651 struct ixl_work sc_link_state_task; 652 struct ixl_atq sc_link_state_atq; 653 654 struct ixl_dmamem sc_hmc_sd; 655 struct ixl_dmamem sc_hmc_pd; 656 struct ixl_hmc_entry sc_hmc_entries[IXL_HMC_COUNT]; 657 658 struct if_percpuq *sc_ipq; 659 unsigned int sc_tx_ring_ndescs; 660 unsigned int sc_rx_ring_ndescs; 661 unsigned int sc_nqueue_pairs; 662 unsigned int sc_nqueue_pairs_max; 663 unsigned int sc_nqueue_pairs_device; 664 struct ixl_queue_pair *sc_qps; 665 uint32_t sc_itr_rx; 666 uint32_t sc_itr_tx; 667 668 struct evcnt sc_event_atq; 669 struct evcnt sc_event_link; 670 struct evcnt sc_event_ecc_err; 671 struct evcnt sc_event_pci_exception; 672 struct evcnt sc_event_crit_err; 673 }; 674 675 #define IXL_TXRX_PROCESS_UNLIMIT UINT_MAX 676 #define IXL_TX_PROCESS_LIMIT 256 677 #define IXL_RX_PROCESS_LIMIT 256 678 #define IXL_TX_INTR_PROCESS_LIMIT 256 679 #define IXL_RX_INTR_PROCESS_LIMIT 0U 680 681 #define IXL_IFCAP_RXCSUM (IFCAP_CSUM_IPv4_Rx | \ 682 IFCAP_CSUM_TCPv4_Rx | \ 683 IFCAP_CSUM_UDPv4_Rx | \ 684 IFCAP_CSUM_TCPv6_Rx | \ 685 IFCAP_CSUM_UDPv6_Rx) 686 #define IXL_IFCAP_TXCSUM (IFCAP_CSUM_IPv4_Tx | \ 687 IFCAP_CSUM_TCPv4_Tx | \ 688 IFCAP_CSUM_UDPv4_Tx | \ 689 IFCAP_CSUM_TCPv6_Tx | \ 690 IFCAP_CSUM_UDPv6_Tx) 691 #define IXL_CSUM_ALL_OFFLOAD (M_CSUM_IPv4 | \ 692 M_CSUM_TCPv4 | M_CSUM_TCPv6 | \ 693 M_CSUM_UDPv4 | M_CSUM_UDPv6) 694 695 #define delaymsec(_x) DELAY(1000 * (_x)) 696 #ifdef IXL_DEBUG 697 #define DDPRINTF(sc, fmt, args...) \ 698 do { \ 699 if ((sc) != NULL) { \ 700 device_printf( \ 701 ((struct ixl_softc *)(sc))->sc_dev, \ 702 ""); \ 703 } \ 704 printf("%s:\t" fmt, __func__, ##args); \ 705 } while (0) 706 #else 707 #define DDPRINTF(sc, fmt, args...) __nothing 708 #endif 709 #ifndef IXL_STATS_INTERVAL_MSEC 710 #define IXL_STATS_INTERVAL_MSEC 10000 711 #endif 712 #ifndef IXL_QUEUE_NUM 713 #define IXL_QUEUE_NUM 0 714 #endif 715 716 static bool ixl_param_nomsix = false; 717 static int ixl_param_stats_interval = IXL_STATS_INTERVAL_MSEC; 718 static int ixl_param_nqps_limit = IXL_QUEUE_NUM; 719 static unsigned int ixl_param_tx_ndescs = 512; 720 static unsigned int ixl_param_rx_ndescs = 256; 721 722 static enum i40e_mac_type 723 ixl_mactype(pci_product_id_t); 724 static void ixl_pci_csr_setup(pci_chipset_tag_t, pcitag_t); 725 static void ixl_clear_hw(struct ixl_softc *); 726 static int ixl_pf_reset(struct ixl_softc *); 727 728 static int ixl_dmamem_alloc(struct ixl_softc *, struct ixl_dmamem *, 729 bus_size_t, bus_size_t); 730 static void ixl_dmamem_free(struct ixl_softc *, struct ixl_dmamem *); 731 732 static int ixl_arq_fill(struct ixl_softc *); 733 static void ixl_arq_unfill(struct ixl_softc *); 734 735 static int ixl_atq_poll(struct ixl_softc *, struct ixl_aq_desc *, 736 unsigned int); 737 static void ixl_atq_set(struct ixl_atq *, 738 void (*)(struct ixl_softc *, const struct ixl_aq_desc *)); 739 static int ixl_atq_post_locked(struct ixl_softc *, struct ixl_atq *); 740 static void ixl_atq_done(struct ixl_softc *); 741 static int ixl_atq_exec(struct ixl_softc *, struct ixl_atq *); 742 static int ixl_atq_exec_locked(struct ixl_softc *, struct ixl_atq *); 743 static int ixl_get_version(struct ixl_softc *); 744 static int ixl_get_nvm_version(struct ixl_softc *); 745 static int ixl_get_hw_capabilities(struct ixl_softc *); 746 static int ixl_pxe_clear(struct ixl_softc *); 747 static int ixl_lldp_shut(struct ixl_softc *); 748 static int ixl_get_mac(struct ixl_softc *); 749 static int ixl_get_switch_config(struct ixl_softc *); 750 static int ixl_phy_mask_ints(struct ixl_softc *); 751 static int ixl_get_phy_info(struct ixl_softc *); 752 static int ixl_set_phy_config(struct ixl_softc *, uint8_t, uint8_t, bool); 753 static int ixl_set_phy_autoselect(struct ixl_softc *); 754 static int ixl_restart_an(struct ixl_softc *); 755 static int ixl_hmc(struct ixl_softc *); 756 static void ixl_hmc_free(struct ixl_softc *); 757 static int ixl_get_vsi(struct ixl_softc *); 758 static int ixl_set_vsi(struct ixl_softc *); 759 static void ixl_set_filter_control(struct ixl_softc *); 760 static void ixl_get_link_status(void *); 761 static int ixl_get_link_status_poll(struct ixl_softc *, int *); 762 static void ixl_get_link_status_done(struct ixl_softc *, 763 const struct ixl_aq_desc *); 764 static int ixl_set_link_status_locked(struct ixl_softc *, 765 const struct ixl_aq_desc *); 766 static uint64_t ixl_search_link_speed(uint8_t); 767 static uint8_t ixl_search_baudrate(uint64_t); 768 static void ixl_config_rss(struct ixl_softc *); 769 static int ixl_add_macvlan(struct ixl_softc *, const uint8_t *, 770 uint16_t, uint16_t); 771 static int ixl_remove_macvlan(struct ixl_softc *, const uint8_t *, 772 uint16_t, uint16_t); 773 static void ixl_arq(void *); 774 static void ixl_hmc_pack(void *, const void *, 775 const struct ixl_hmc_pack *, unsigned int); 776 static uint32_t ixl_rd_rx_csr(struct ixl_softc *, uint32_t); 777 static void ixl_wr_rx_csr(struct ixl_softc *, uint32_t, uint32_t); 778 static int ixl_rd16_nvm(struct ixl_softc *, uint16_t, uint16_t *); 779 780 static int ixl_match(device_t, cfdata_t, void *); 781 static void ixl_attach(device_t, device_t, void *); 782 static int ixl_detach(device_t, int); 783 784 static void ixl_media_add(struct ixl_softc *); 785 static int ixl_media_change(struct ifnet *); 786 static void ixl_media_status(struct ifnet *, struct ifmediareq *); 787 static void ixl_watchdog(struct ifnet *); 788 static int ixl_ioctl(struct ifnet *, u_long, void *); 789 static void ixl_start(struct ifnet *); 790 static int ixl_transmit(struct ifnet *, struct mbuf *); 791 static void ixl_deferred_transmit(void *); 792 static int ixl_intr(void *); 793 static int ixl_queue_intr(void *); 794 static int ixl_other_intr(void *); 795 static void ixl_handle_queue(void *); 796 static void ixl_handle_queue_wk(struct work *, void *); 797 static void ixl_sched_handle_queue(struct ixl_softc *, 798 struct ixl_queue_pair *); 799 static int ixl_init(struct ifnet *); 800 static int ixl_init_locked(struct ixl_softc *); 801 static void ixl_stop(struct ifnet *, int); 802 static void ixl_stop_locked(struct ixl_softc *); 803 static int ixl_iff(struct ixl_softc *); 804 static int ixl_ifflags_cb(struct ethercom *); 805 static int ixl_setup_interrupts(struct ixl_softc *); 806 static int ixl_establish_intx(struct ixl_softc *); 807 static int ixl_establish_msix(struct ixl_softc *); 808 static void ixl_enable_queue_intr(struct ixl_softc *, 809 struct ixl_queue_pair *); 810 static void ixl_disable_queue_intr(struct ixl_softc *, 811 struct ixl_queue_pair *); 812 static void ixl_enable_other_intr(struct ixl_softc *); 813 static void ixl_disable_other_intr(struct ixl_softc *); 814 static void ixl_config_queue_intr(struct ixl_softc *); 815 static void ixl_config_other_intr(struct ixl_softc *); 816 817 static struct ixl_tx_ring * 818 ixl_txr_alloc(struct ixl_softc *, unsigned int); 819 static void ixl_txr_qdis(struct ixl_softc *, struct ixl_tx_ring *, int); 820 static void ixl_txr_config(struct ixl_softc *, struct ixl_tx_ring *); 821 static int ixl_txr_enabled(struct ixl_softc *, struct ixl_tx_ring *); 822 static int ixl_txr_disabled(struct ixl_softc *, struct ixl_tx_ring *); 823 static void ixl_txr_unconfig(struct ixl_softc *, struct ixl_tx_ring *); 824 static void ixl_txr_clean(struct ixl_softc *, struct ixl_tx_ring *); 825 static void ixl_txr_free(struct ixl_softc *, struct ixl_tx_ring *); 826 static int ixl_txeof(struct ixl_softc *, struct ixl_tx_ring *, u_int); 827 828 static struct ixl_rx_ring * 829 ixl_rxr_alloc(struct ixl_softc *, unsigned int); 830 static void ixl_rxr_config(struct ixl_softc *, struct ixl_rx_ring *); 831 static int ixl_rxr_enabled(struct ixl_softc *, struct ixl_rx_ring *); 832 static int ixl_rxr_disabled(struct ixl_softc *, struct ixl_rx_ring *); 833 static void ixl_rxr_unconfig(struct ixl_softc *, struct ixl_rx_ring *); 834 static void ixl_rxr_clean(struct ixl_softc *, struct ixl_rx_ring *); 835 static void ixl_rxr_free(struct ixl_softc *, struct ixl_rx_ring *); 836 static int ixl_rxeof(struct ixl_softc *, struct ixl_rx_ring *, u_int); 837 static int ixl_rxfill(struct ixl_softc *, struct ixl_rx_ring *); 838 839 static struct workqueue * 840 ixl_workq_create(const char *, pri_t, int, int); 841 static void ixl_workq_destroy(struct workqueue *); 842 static int ixl_workqs_teardown(device_t); 843 static void ixl_work_set(struct ixl_work *, void (*)(void *), void *); 844 static void ixl_work_add(struct workqueue *, struct ixl_work *); 845 static void ixl_work_wait(struct workqueue *, struct ixl_work *); 846 static void ixl_workq_work(struct work *, void *); 847 static const struct ixl_product * 848 ixl_lookup(const struct pci_attach_args *pa); 849 static void ixl_link_state_update(struct ixl_softc *, 850 const struct ixl_aq_desc *); 851 static int ixl_vlan_cb(struct ethercom *, uint16_t, bool); 852 static int ixl_setup_vlan_hwfilter(struct ixl_softc *); 853 static void ixl_teardown_vlan_hwfilter(struct ixl_softc *); 854 static int ixl_update_macvlan(struct ixl_softc *); 855 static int ixl_setup_interrupts(struct ixl_softc *); 856 static void ixl_teardown_interrupts(struct ixl_softc *); 857 static int ixl_setup_stats(struct ixl_softc *); 858 static void ixl_teardown_stats(struct ixl_softc *); 859 static void ixl_stats_callout(void *); 860 static void ixl_stats_update(void *); 861 static int ixl_setup_sysctls(struct ixl_softc *); 862 static void ixl_teardown_sysctls(struct ixl_softc *); 863 static int ixl_sysctl_itr_handler(SYSCTLFN_PROTO); 864 static int ixl_queue_pairs_alloc(struct ixl_softc *); 865 static void ixl_queue_pairs_free(struct ixl_softc *); 866 867 static const struct ixl_phy_type ixl_phy_type_map[] = { 868 { 1ULL << IXL_PHY_TYPE_SGMII, IFM_1000_SGMII }, 869 { 1ULL << IXL_PHY_TYPE_1000BASE_KX, IFM_1000_KX }, 870 { 1ULL << IXL_PHY_TYPE_10GBASE_KX4, IFM_10G_KX4 }, 871 { 1ULL << IXL_PHY_TYPE_10GBASE_KR, IFM_10G_KR }, 872 { 1ULL << IXL_PHY_TYPE_40GBASE_KR4, IFM_40G_KR4 }, 873 { 1ULL << IXL_PHY_TYPE_XAUI | 874 1ULL << IXL_PHY_TYPE_XFI, IFM_10G_CX4 }, 875 { 1ULL << IXL_PHY_TYPE_SFI, IFM_10G_SFI }, 876 { 1ULL << IXL_PHY_TYPE_XLAUI | 877 1ULL << IXL_PHY_TYPE_XLPPI, IFM_40G_XLPPI }, 878 { 1ULL << IXL_PHY_TYPE_40GBASE_CR4_CU | 879 1ULL << IXL_PHY_TYPE_40GBASE_CR4, IFM_40G_CR4 }, 880 { 1ULL << IXL_PHY_TYPE_10GBASE_CR1_CU | 881 1ULL << IXL_PHY_TYPE_10GBASE_CR1, IFM_10G_CR1 }, 882 { 1ULL << IXL_PHY_TYPE_10GBASE_AOC, IFM_10G_AOC }, 883 { 1ULL << IXL_PHY_TYPE_40GBASE_AOC, IFM_40G_AOC }, 884 { 1ULL << IXL_PHY_TYPE_100BASE_TX, IFM_100_TX }, 885 { 1ULL << IXL_PHY_TYPE_1000BASE_T_OPTICAL | 886 1ULL << IXL_PHY_TYPE_1000BASE_T, IFM_1000_T }, 887 { 1ULL << IXL_PHY_TYPE_10GBASE_T, IFM_10G_T }, 888 { 1ULL << IXL_PHY_TYPE_10GBASE_SR, IFM_10G_SR }, 889 { 1ULL << IXL_PHY_TYPE_10GBASE_LR, IFM_10G_LR }, 890 { 1ULL << IXL_PHY_TYPE_10GBASE_SFPP_CU, IFM_10G_TWINAX }, 891 { 1ULL << IXL_PHY_TYPE_40GBASE_SR4, IFM_40G_SR4 }, 892 { 1ULL << IXL_PHY_TYPE_40GBASE_LR4, IFM_40G_LR4 }, 893 { 1ULL << IXL_PHY_TYPE_1000BASE_SX, IFM_1000_SX }, 894 { 1ULL << IXL_PHY_TYPE_1000BASE_LX, IFM_1000_LX }, 895 { 1ULL << IXL_PHY_TYPE_20GBASE_KR2, IFM_20G_KR2 }, 896 { 1ULL << IXL_PHY_TYPE_25GBASE_KR, IFM_25G_KR }, 897 { 1ULL << IXL_PHY_TYPE_25GBASE_CR, IFM_25G_CR }, 898 { 1ULL << IXL_PHY_TYPE_25GBASE_SR, IFM_25G_SR }, 899 { 1ULL << IXL_PHY_TYPE_25GBASE_LR, IFM_25G_LR }, 900 { 1ULL << IXL_PHY_TYPE_25GBASE_AOC, IFM_25G_AOC }, 901 { 1ULL << IXL_PHY_TYPE_25GBASE_ACC, IFM_25G_ACC }, 902 }; 903 904 static const struct ixl_speed_type ixl_speed_type_map[] = { 905 { IXL_AQ_LINK_SPEED_40GB, IF_Gbps(40) }, 906 { IXL_AQ_LINK_SPEED_25GB, IF_Gbps(25) }, 907 { IXL_AQ_LINK_SPEED_10GB, IF_Gbps(10) }, 908 { IXL_AQ_LINK_SPEED_1000MB, IF_Mbps(1000) }, 909 { IXL_AQ_LINK_SPEED_100MB, IF_Mbps(100)}, 910 }; 911 912 static const struct ixl_aq_regs ixl_pf_aq_regs = { 913 .atq_tail = I40E_PF_ATQT, 914 .atq_tail_mask = I40E_PF_ATQT_ATQT_MASK, 915 .atq_head = I40E_PF_ATQH, 916 .atq_head_mask = I40E_PF_ATQH_ATQH_MASK, 917 .atq_len = I40E_PF_ATQLEN, 918 .atq_bal = I40E_PF_ATQBAL, 919 .atq_bah = I40E_PF_ATQBAH, 920 .atq_len_enable = I40E_PF_ATQLEN_ATQENABLE_MASK, 921 922 .arq_tail = I40E_PF_ARQT, 923 .arq_tail_mask = I40E_PF_ARQT_ARQT_MASK, 924 .arq_head = I40E_PF_ARQH, 925 .arq_head_mask = I40E_PF_ARQH_ARQH_MASK, 926 .arq_len = I40E_PF_ARQLEN, 927 .arq_bal = I40E_PF_ARQBAL, 928 .arq_bah = I40E_PF_ARQBAH, 929 .arq_len_enable = I40E_PF_ARQLEN_ARQENABLE_MASK, 930 }; 931 932 #define ixl_rd(_s, _r) \ 933 bus_space_read_4((_s)->sc_memt, (_s)->sc_memh, (_r)) 934 #define ixl_wr(_s, _r, _v) \ 935 bus_space_write_4((_s)->sc_memt, (_s)->sc_memh, (_r), (_v)) 936 #define ixl_barrier(_s, _r, _l, _o) \ 937 bus_space_barrier((_s)->sc_memt, (_s)->sc_memh, (_r), (_l), (_o)) 938 #define ixl_flush(_s) (void)ixl_rd((_s), I40E_GLGEN_STAT) 939 #define ixl_nqueues(_sc) (1 << ((_sc)->sc_nqueue_pairs - 1)) 940 941 CFATTACH_DECL3_NEW(ixl, sizeof(struct ixl_softc), 942 ixl_match, ixl_attach, ixl_detach, NULL, NULL, NULL, 943 DVF_DETACH_SHUTDOWN); 944 945 static const struct ixl_product ixl_products[] = { 946 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_SFP }, 947 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_KX_B }, 948 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_KX_C }, 949 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_A }, 950 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_B }, 951 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_C }, 952 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_10G_T }, 953 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_20G_BP_1 }, 954 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_20G_BP_2 }, 955 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_T4_10G }, 956 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XXV710_25G_BP }, 957 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XXV710_25G_SFP28 }, 958 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_KX }, 959 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_QSFP }, 960 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_SFP }, 961 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_1G_BASET }, 962 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_10G_BASET }, 963 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_I_SFP }, 964 /* required last entry */ 965 {0, 0} 966 }; 967 968 static const struct ixl_product * 969 ixl_lookup(const struct pci_attach_args *pa) 970 { 971 const struct ixl_product *ixlp; 972 973 for (ixlp = ixl_products; ixlp->vendor_id != 0; ixlp++) { 974 if (PCI_VENDOR(pa->pa_id) == ixlp->vendor_id && 975 PCI_PRODUCT(pa->pa_id) == ixlp->product_id) 976 return ixlp; 977 } 978 979 return NULL; 980 } 981 982 static int 983 ixl_match(device_t parent, cfdata_t match, void *aux) 984 { 985 const struct pci_attach_args *pa = aux; 986 987 return (ixl_lookup(pa) != NULL) ? 1 : 0; 988 } 989 990 static void 991 ixl_attach(device_t parent, device_t self, void *aux) 992 { 993 struct ixl_softc *sc; 994 struct pci_attach_args *pa = aux; 995 struct ifnet *ifp; 996 pcireg_t memtype; 997 uint32_t firstq, port, ari, func; 998 char xnamebuf[32]; 999 int tries, rv, link; 1000 1001 sc = device_private(self); 1002 sc->sc_dev = self; 1003 ifp = &sc->sc_ec.ec_if; 1004 1005 sc->sc_pa = *pa; 1006 sc->sc_dmat = (pci_dma64_available(pa)) ? 1007 pa->pa_dmat64 : pa->pa_dmat; 1008 sc->sc_aq_regs = &ixl_pf_aq_regs; 1009 1010 sc->sc_mac_type = ixl_mactype(PCI_PRODUCT(pa->pa_id)); 1011 1012 ixl_pci_csr_setup(pa->pa_pc, pa->pa_tag); 1013 1014 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, IXL_PCIREG); 1015 if (pci_mapreg_map(pa, IXL_PCIREG, memtype, 0, 1016 &sc->sc_memt, &sc->sc_memh, NULL, &sc->sc_mems)) { 1017 aprint_error(": unable to map registers\n"); 1018 return; 1019 } 1020 1021 mutex_init(&sc->sc_cfg_lock, MUTEX_DEFAULT, IPL_SOFTNET); 1022 1023 firstq = ixl_rd(sc, I40E_PFLAN_QALLOC); 1024 firstq &= I40E_PFLAN_QALLOC_FIRSTQ_MASK; 1025 firstq >>= I40E_PFLAN_QALLOC_FIRSTQ_SHIFT; 1026 sc->sc_base_queue = firstq; 1027 1028 ixl_clear_hw(sc); 1029 if (ixl_pf_reset(sc) == -1) { 1030 /* error printed by ixl pf_reset */ 1031 goto unmap; 1032 } 1033 1034 port = ixl_rd(sc, I40E_PFGEN_PORTNUM); 1035 port &= I40E_PFGEN_PORTNUM_PORT_NUM_MASK; 1036 port >>= I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT; 1037 sc->sc_port = port; 1038 aprint_normal(": port %u", sc->sc_port); 1039 1040 ari = ixl_rd(sc, I40E_GLPCI_CAPSUP); 1041 ari &= I40E_GLPCI_CAPSUP_ARI_EN_MASK; 1042 ari >>= I40E_GLPCI_CAPSUP_ARI_EN_SHIFT; 1043 1044 func = ixl_rd(sc, I40E_PF_FUNC_RID); 1045 sc->sc_pf_id = func & (ari ? 0xff : 0x7); 1046 1047 /* initialise the adminq */ 1048 1049 mutex_init(&sc->sc_atq_lock, MUTEX_DEFAULT, IPL_NET); 1050 1051 if (ixl_dmamem_alloc(sc, &sc->sc_atq, 1052 sizeof(struct ixl_aq_desc) * IXL_AQ_NUM, IXL_AQ_ALIGN) != 0) { 1053 aprint_error("\n" "%s: unable to allocate atq\n", 1054 device_xname(self)); 1055 goto unmap; 1056 } 1057 1058 SIMPLEQ_INIT(&sc->sc_arq_idle); 1059 ixl_work_set(&sc->sc_arq_task, ixl_arq, sc); 1060 sc->sc_arq_cons = 0; 1061 sc->sc_arq_prod = 0; 1062 1063 if (ixl_dmamem_alloc(sc, &sc->sc_arq, 1064 sizeof(struct ixl_aq_desc) * IXL_AQ_NUM, IXL_AQ_ALIGN) != 0) { 1065 aprint_error("\n" "%s: unable to allocate arq\n", 1066 device_xname(self)); 1067 goto free_atq; 1068 } 1069 1070 if (!ixl_arq_fill(sc)) { 1071 aprint_error("\n" "%s: unable to fill arq descriptors\n", 1072 device_xname(self)); 1073 goto free_arq; 1074 } 1075 1076 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 1077 0, IXL_DMA_LEN(&sc->sc_atq), 1078 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1079 1080 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq), 1081 0, IXL_DMA_LEN(&sc->sc_arq), 1082 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1083 1084 for (tries = 0; tries < 10; tries++) { 1085 sc->sc_atq_cons = 0; 1086 sc->sc_atq_prod = 0; 1087 1088 ixl_wr(sc, sc->sc_aq_regs->atq_head, 0); 1089 ixl_wr(sc, sc->sc_aq_regs->arq_head, 0); 1090 ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0); 1091 ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0); 1092 1093 ixl_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE); 1094 1095 ixl_wr(sc, sc->sc_aq_regs->atq_bal, 1096 ixl_dmamem_lo(&sc->sc_atq)); 1097 ixl_wr(sc, sc->sc_aq_regs->atq_bah, 1098 ixl_dmamem_hi(&sc->sc_atq)); 1099 ixl_wr(sc, sc->sc_aq_regs->atq_len, 1100 sc->sc_aq_regs->atq_len_enable | IXL_AQ_NUM); 1101 1102 ixl_wr(sc, sc->sc_aq_regs->arq_bal, 1103 ixl_dmamem_lo(&sc->sc_arq)); 1104 ixl_wr(sc, sc->sc_aq_regs->arq_bah, 1105 ixl_dmamem_hi(&sc->sc_arq)); 1106 ixl_wr(sc, sc->sc_aq_regs->arq_len, 1107 sc->sc_aq_regs->arq_len_enable | IXL_AQ_NUM); 1108 1109 rv = ixl_get_version(sc); 1110 if (rv == 0) 1111 break; 1112 if (rv != ETIMEDOUT) { 1113 aprint_error(", unable to get firmware version\n"); 1114 goto shutdown; 1115 } 1116 1117 delaymsec(100); 1118 } 1119 1120 ixl_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod); 1121 1122 if (ixl_dmamem_alloc(sc, &sc->sc_aqbuf, IXL_AQ_BUFLEN, 0) != 0) { 1123 aprint_error_dev(self, ", unable to allocate nvm buffer\n"); 1124 goto shutdown; 1125 } 1126 1127 ixl_get_nvm_version(sc); 1128 1129 if (sc->sc_mac_type == I40E_MAC_X722) 1130 sc->sc_nqueue_pairs_device = IXL_QUEUE_MAX_X722; 1131 else 1132 sc->sc_nqueue_pairs_device = IXL_QUEUE_MAX_XL710; 1133 1134 rv = ixl_get_hw_capabilities(sc); 1135 if (rv != 0) { 1136 aprint_error(", GET HW CAPABILITIES %s\n", 1137 rv == ETIMEDOUT ? "timeout" : "error"); 1138 goto free_aqbuf; 1139 } 1140 1141 sc->sc_nqueue_pairs_max = MIN((int)sc->sc_nqueue_pairs_device, ncpu); 1142 if (ixl_param_nqps_limit > 0) { 1143 sc->sc_nqueue_pairs_max = MIN((int)sc->sc_nqueue_pairs_max, 1144 ixl_param_nqps_limit); 1145 } 1146 1147 sc->sc_nqueue_pairs = sc->sc_nqueue_pairs_max; 1148 sc->sc_tx_ring_ndescs = ixl_param_tx_ndescs; 1149 sc->sc_rx_ring_ndescs = ixl_param_rx_ndescs; 1150 1151 KASSERT(IXL_TXRX_PROCESS_UNLIMIT > sc->sc_rx_ring_ndescs); 1152 KASSERT(IXL_TXRX_PROCESS_UNLIMIT > sc->sc_tx_ring_ndescs); 1153 KASSERT(sc->sc_rx_ring_ndescs == 1154 (1U << (fls32(sc->sc_rx_ring_ndescs) - 1))); 1155 KASSERT(sc->sc_tx_ring_ndescs == 1156 (1U << (fls32(sc->sc_tx_ring_ndescs) - 1))); 1157 1158 if (ixl_get_mac(sc) != 0) { 1159 /* error printed by ixl_get_mac */ 1160 goto free_aqbuf; 1161 } 1162 1163 aprint_normal("\n"); 1164 aprint_naive("\n"); 1165 1166 aprint_normal_dev(self, "Ethernet address %s\n", 1167 ether_sprintf(sc->sc_enaddr)); 1168 1169 rv = ixl_pxe_clear(sc); 1170 if (rv != 0) { 1171 aprint_debug_dev(self, "CLEAR PXE MODE %s\n", 1172 rv == ETIMEDOUT ? "timeout" : "error"); 1173 } 1174 1175 ixl_set_filter_control(sc); 1176 1177 if (ixl_hmc(sc) != 0) { 1178 /* error printed by ixl_hmc */ 1179 goto free_aqbuf; 1180 } 1181 1182 if (ixl_lldp_shut(sc) != 0) { 1183 /* error printed by ixl_lldp_shut */ 1184 goto free_hmc; 1185 } 1186 1187 if (ixl_phy_mask_ints(sc) != 0) { 1188 /* error printed by ixl_phy_mask_ints */ 1189 goto free_hmc; 1190 } 1191 1192 if (ixl_restart_an(sc) != 0) { 1193 /* error printed by ixl_restart_an */ 1194 goto free_hmc; 1195 } 1196 1197 if (ixl_get_switch_config(sc) != 0) { 1198 /* error printed by ixl_get_switch_config */ 1199 goto free_hmc; 1200 } 1201 1202 rv = ixl_get_link_status_poll(sc, NULL); 1203 if (rv != 0) { 1204 aprint_error_dev(self, "GET LINK STATUS %s\n", 1205 rv == ETIMEDOUT ? "timeout" : "error"); 1206 goto free_hmc; 1207 } 1208 1209 /* 1210 * The FW often returns EIO in "Get PHY Abilities" command 1211 * if there is no delay 1212 */ 1213 DELAY(500); 1214 if (ixl_get_phy_info(sc) != 0) { 1215 /* error printed by ixl_get_phy_info */ 1216 goto free_hmc; 1217 } 1218 1219 if (ixl_dmamem_alloc(sc, &sc->sc_scratch, 1220 sizeof(struct ixl_aq_vsi_data), 8) != 0) { 1221 aprint_error_dev(self, "unable to allocate scratch buffer\n"); 1222 goto free_hmc; 1223 } 1224 1225 rv = ixl_get_vsi(sc); 1226 if (rv != 0) { 1227 aprint_error_dev(self, "GET VSI %s %d\n", 1228 rv == ETIMEDOUT ? "timeout" : "error", rv); 1229 goto free_scratch; 1230 } 1231 1232 rv = ixl_set_vsi(sc); 1233 if (rv != 0) { 1234 aprint_error_dev(self, "UPDATE VSI error %s %d\n", 1235 rv == ETIMEDOUT ? "timeout" : "error", rv); 1236 goto free_scratch; 1237 } 1238 1239 if (ixl_queue_pairs_alloc(sc) != 0) { 1240 /* error printed by ixl_queue_pairs_alloc */ 1241 goto free_scratch; 1242 } 1243 1244 if (ixl_setup_interrupts(sc) != 0) { 1245 /* error printed by ixl_setup_interrupts */ 1246 goto free_queue_pairs; 1247 } 1248 1249 if (ixl_setup_stats(sc) != 0) { 1250 aprint_error_dev(self, "failed to setup event counters\n"); 1251 goto teardown_intrs; 1252 } 1253 1254 if (ixl_setup_sysctls(sc) != 0) { 1255 /* error printed by ixl_setup_sysctls */ 1256 goto teardown_stats; 1257 } 1258 1259 snprintf(xnamebuf, sizeof(xnamebuf), "%s_wq_cfg", device_xname(self)); 1260 sc->sc_workq = ixl_workq_create(xnamebuf, IXL_WORKQUEUE_PRI, 1261 IPL_NET, WQ_MPSAFE); 1262 if (sc->sc_workq == NULL) 1263 goto teardown_sysctls; 1264 1265 snprintf(xnamebuf, sizeof(xnamebuf), "%s_wq_txrx", device_xname(self)); 1266 rv = workqueue_create(&sc->sc_workq_txrx, xnamebuf, ixl_handle_queue_wk, 1267 sc, IXL_WORKQUEUE_PRI, IPL_NET, WQ_PERCPU | WQ_MPSAFE); 1268 if (rv != 0) { 1269 sc->sc_workq_txrx = NULL; 1270 goto teardown_wqs; 1271 } 1272 1273 snprintf(xnamebuf, sizeof(xnamebuf), "%s_atq_cv", device_xname(self)); 1274 cv_init(&sc->sc_atq_cv, xnamebuf); 1275 1276 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ); 1277 1278 ifp->if_softc = sc; 1279 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1280 ifp->if_extflags = IFEF_MPSAFE; 1281 ifp->if_ioctl = ixl_ioctl; 1282 ifp->if_start = ixl_start; 1283 ifp->if_transmit = ixl_transmit; 1284 ifp->if_watchdog = ixl_watchdog; 1285 ifp->if_init = ixl_init; 1286 ifp->if_stop = ixl_stop; 1287 IFQ_SET_MAXLEN(&ifp->if_snd, sc->sc_tx_ring_ndescs); 1288 IFQ_SET_READY(&ifp->if_snd); 1289 ifp->if_capabilities |= IXL_IFCAP_RXCSUM; 1290 ifp->if_capabilities |= IXL_IFCAP_TXCSUM; 1291 #if 0 1292 ifp->if_capabilities |= IFCAP_TSOv4 | IFCAP_TSOv6; 1293 #endif 1294 ether_set_vlan_cb(&sc->sc_ec, ixl_vlan_cb); 1295 sc->sc_ec.ec_capabilities |= ETHERCAP_JUMBO_MTU; 1296 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING; 1297 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWFILTER; 1298 1299 sc->sc_ec.ec_capenable = sc->sc_ec.ec_capabilities; 1300 /* Disable VLAN_HWFILTER by default */ 1301 CLR(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER); 1302 1303 sc->sc_cur_ec_capenable = sc->sc_ec.ec_capenable; 1304 1305 sc->sc_ec.ec_ifmedia = &sc->sc_media; 1306 ifmedia_init_with_lock(&sc->sc_media, IFM_IMASK, ixl_media_change, 1307 ixl_media_status, &sc->sc_cfg_lock); 1308 1309 ixl_media_add(sc); 1310 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL); 1311 if (ISSET(sc->sc_phy_abilities, 1312 (IXL_PHY_ABILITY_PAUSE_TX | IXL_PHY_ABILITY_PAUSE_RX))) { 1313 ifmedia_add(&sc->sc_media, 1314 IFM_ETHER | IFM_AUTO | IFM_FLOW, 0, NULL); 1315 } 1316 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_NONE, 0, NULL); 1317 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO); 1318 1319 rv = if_initialize(ifp); 1320 if (rv != 0) { 1321 aprint_error_dev(self, "if_initialize failed=%d\n", rv); 1322 goto teardown_wqs; 1323 } 1324 1325 sc->sc_ipq = if_percpuq_create(ifp); 1326 if_deferred_start_init(ifp, NULL); 1327 ether_ifattach(ifp, sc->sc_enaddr); 1328 ether_set_ifflags_cb(&sc->sc_ec, ixl_ifflags_cb); 1329 1330 rv = ixl_get_link_status_poll(sc, &link); 1331 if (rv != 0) 1332 link = LINK_STATE_UNKNOWN; 1333 if_link_state_change(ifp, link); 1334 1335 ixl_atq_set(&sc->sc_link_state_atq, ixl_get_link_status_done); 1336 ixl_work_set(&sc->sc_link_state_task, ixl_get_link_status, sc); 1337 1338 ixl_config_other_intr(sc); 1339 ixl_enable_other_intr(sc); 1340 1341 ixl_set_phy_autoselect(sc); 1342 1343 /* remove default mac filter and replace it so we can see vlans */ 1344 rv = ixl_remove_macvlan(sc, sc->sc_enaddr, 0, 0); 1345 if (rv != ENOENT) { 1346 aprint_debug_dev(self, 1347 "unable to remove macvlan %u\n", rv); 1348 } 1349 rv = ixl_remove_macvlan(sc, sc->sc_enaddr, 0, 1350 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN); 1351 if (rv != ENOENT) { 1352 aprint_debug_dev(self, 1353 "unable to remove macvlan, ignore vlan %u\n", rv); 1354 } 1355 1356 if (ixl_update_macvlan(sc) != 0) { 1357 aprint_debug_dev(self, 1358 "couldn't enable vlan hardware filter\n"); 1359 CLR(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER); 1360 CLR(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER); 1361 } 1362 1363 sc->sc_txrx_workqueue = true; 1364 sc->sc_tx_process_limit = IXL_TX_PROCESS_LIMIT; 1365 sc->sc_rx_process_limit = IXL_RX_PROCESS_LIMIT; 1366 sc->sc_tx_intr_process_limit = IXL_TX_INTR_PROCESS_LIMIT; 1367 sc->sc_rx_intr_process_limit = IXL_RX_INTR_PROCESS_LIMIT; 1368 1369 ixl_stats_update(sc); 1370 sc->sc_stats_counters.isc_has_offset = true; 1371 1372 if (pmf_device_register(self, NULL, NULL) != true) 1373 aprint_debug_dev(self, "couldn't establish power handler\n"); 1374 sc->sc_itr_rx = IXL_ITR_RX; 1375 sc->sc_itr_tx = IXL_ITR_TX; 1376 sc->sc_attached = true; 1377 if_register(ifp); 1378 1379 return; 1380 1381 teardown_wqs: 1382 config_finalize_register(self, ixl_workqs_teardown); 1383 teardown_sysctls: 1384 ixl_teardown_sysctls(sc); 1385 teardown_stats: 1386 ixl_teardown_stats(sc); 1387 teardown_intrs: 1388 ixl_teardown_interrupts(sc); 1389 free_queue_pairs: 1390 ixl_queue_pairs_free(sc); 1391 free_scratch: 1392 ixl_dmamem_free(sc, &sc->sc_scratch); 1393 free_hmc: 1394 ixl_hmc_free(sc); 1395 free_aqbuf: 1396 ixl_dmamem_free(sc, &sc->sc_aqbuf); 1397 shutdown: 1398 ixl_wr(sc, sc->sc_aq_regs->atq_head, 0); 1399 ixl_wr(sc, sc->sc_aq_regs->arq_head, 0); 1400 ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0); 1401 ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0); 1402 1403 ixl_wr(sc, sc->sc_aq_regs->atq_bal, 0); 1404 ixl_wr(sc, sc->sc_aq_regs->atq_bah, 0); 1405 ixl_wr(sc, sc->sc_aq_regs->atq_len, 0); 1406 1407 ixl_wr(sc, sc->sc_aq_regs->arq_bal, 0); 1408 ixl_wr(sc, sc->sc_aq_regs->arq_bah, 0); 1409 ixl_wr(sc, sc->sc_aq_regs->arq_len, 0); 1410 1411 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq), 1412 0, IXL_DMA_LEN(&sc->sc_arq), 1413 BUS_DMASYNC_POSTREAD); 1414 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 1415 0, IXL_DMA_LEN(&sc->sc_atq), 1416 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1417 1418 ixl_arq_unfill(sc); 1419 free_arq: 1420 ixl_dmamem_free(sc, &sc->sc_arq); 1421 free_atq: 1422 ixl_dmamem_free(sc, &sc->sc_atq); 1423 unmap: 1424 mutex_destroy(&sc->sc_atq_lock); 1425 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems); 1426 mutex_destroy(&sc->sc_cfg_lock); 1427 sc->sc_mems = 0; 1428 1429 sc->sc_attached = false; 1430 } 1431 1432 static int 1433 ixl_detach(device_t self, int flags) 1434 { 1435 struct ixl_softc *sc = device_private(self); 1436 struct ifnet *ifp = &sc->sc_ec.ec_if; 1437 1438 if (!sc->sc_attached) 1439 return 0; 1440 1441 ixl_stop(ifp, 1); 1442 1443 ixl_disable_other_intr(sc); 1444 1445 callout_halt(&sc->sc_stats_callout, NULL); 1446 ixl_work_wait(sc->sc_workq, &sc->sc_stats_task); 1447 1448 /* wait for ATQ handler */ 1449 mutex_enter(&sc->sc_atq_lock); 1450 mutex_exit(&sc->sc_atq_lock); 1451 1452 ixl_work_wait(sc->sc_workq, &sc->sc_arq_task); 1453 ixl_work_wait(sc->sc_workq, &sc->sc_link_state_task); 1454 1455 if (sc->sc_workq != NULL) { 1456 ixl_workq_destroy(sc->sc_workq); 1457 sc->sc_workq = NULL; 1458 } 1459 1460 if (sc->sc_workq_txrx != NULL) { 1461 workqueue_destroy(sc->sc_workq_txrx); 1462 sc->sc_workq_txrx = NULL; 1463 } 1464 1465 if_percpuq_destroy(sc->sc_ipq); 1466 ether_ifdetach(ifp); 1467 if_detach(ifp); 1468 ifmedia_fini(&sc->sc_media); 1469 1470 ixl_teardown_interrupts(sc); 1471 ixl_teardown_stats(sc); 1472 ixl_teardown_sysctls(sc); 1473 1474 ixl_queue_pairs_free(sc); 1475 1476 ixl_dmamem_free(sc, &sc->sc_scratch); 1477 ixl_hmc_free(sc); 1478 1479 /* shutdown */ 1480 ixl_wr(sc, sc->sc_aq_regs->atq_head, 0); 1481 ixl_wr(sc, sc->sc_aq_regs->arq_head, 0); 1482 ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0); 1483 ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0); 1484 1485 ixl_wr(sc, sc->sc_aq_regs->atq_bal, 0); 1486 ixl_wr(sc, sc->sc_aq_regs->atq_bah, 0); 1487 ixl_wr(sc, sc->sc_aq_regs->atq_len, 0); 1488 1489 ixl_wr(sc, sc->sc_aq_regs->arq_bal, 0); 1490 ixl_wr(sc, sc->sc_aq_regs->arq_bah, 0); 1491 ixl_wr(sc, sc->sc_aq_regs->arq_len, 0); 1492 1493 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq), 1494 0, IXL_DMA_LEN(&sc->sc_arq), 1495 BUS_DMASYNC_POSTREAD); 1496 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 1497 0, IXL_DMA_LEN(&sc->sc_atq), 1498 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1499 1500 ixl_arq_unfill(sc); 1501 1502 ixl_dmamem_free(sc, &sc->sc_arq); 1503 ixl_dmamem_free(sc, &sc->sc_atq); 1504 ixl_dmamem_free(sc, &sc->sc_aqbuf); 1505 1506 cv_destroy(&sc->sc_atq_cv); 1507 mutex_destroy(&sc->sc_atq_lock); 1508 1509 if (sc->sc_mems != 0) { 1510 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems); 1511 sc->sc_mems = 0; 1512 } 1513 1514 mutex_destroy(&sc->sc_cfg_lock); 1515 1516 return 0; 1517 } 1518 1519 static int 1520 ixl_workqs_teardown(device_t self) 1521 { 1522 struct ixl_softc *sc = device_private(self); 1523 1524 if (sc->sc_workq != NULL) { 1525 ixl_workq_destroy(sc->sc_workq); 1526 sc->sc_workq = NULL; 1527 } 1528 1529 if (sc->sc_workq_txrx != NULL) { 1530 workqueue_destroy(sc->sc_workq_txrx); 1531 sc->sc_workq_txrx = NULL; 1532 } 1533 1534 return 0; 1535 } 1536 1537 static int 1538 ixl_vlan_cb(struct ethercom *ec, uint16_t vid, bool set) 1539 { 1540 struct ifnet *ifp = &ec->ec_if; 1541 struct ixl_softc *sc = ifp->if_softc; 1542 int rv; 1543 1544 if (!ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER)) { 1545 return 0; 1546 } 1547 1548 if (set) { 1549 rv = ixl_add_macvlan(sc, sc->sc_enaddr, vid, 1550 IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH); 1551 if (rv == 0) { 1552 rv = ixl_add_macvlan(sc, etherbroadcastaddr, 1553 vid, IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH); 1554 } 1555 } else { 1556 rv = ixl_remove_macvlan(sc, sc->sc_enaddr, vid, 1557 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH); 1558 (void)ixl_remove_macvlan(sc, etherbroadcastaddr, vid, 1559 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH); 1560 } 1561 1562 return rv; 1563 } 1564 1565 static void 1566 ixl_media_add(struct ixl_softc *sc) 1567 { 1568 struct ifmedia *ifm = &sc->sc_media; 1569 const struct ixl_phy_type *itype; 1570 unsigned int i; 1571 bool flow; 1572 1573 if (ISSET(sc->sc_phy_abilities, 1574 (IXL_PHY_ABILITY_PAUSE_TX | IXL_PHY_ABILITY_PAUSE_RX))) { 1575 flow = true; 1576 } else { 1577 flow = false; 1578 } 1579 1580 for (i = 0; i < __arraycount(ixl_phy_type_map); i++) { 1581 itype = &ixl_phy_type_map[i]; 1582 1583 if (ISSET(sc->sc_phy_types, itype->phy_type)) { 1584 ifmedia_add(ifm, 1585 IFM_ETHER | IFM_FDX | itype->ifm_type, 0, NULL); 1586 1587 if (flow) { 1588 ifmedia_add(ifm, 1589 IFM_ETHER | IFM_FDX | IFM_FLOW | 1590 itype->ifm_type, 0, NULL); 1591 } 1592 1593 if (itype->ifm_type != IFM_100_TX) 1594 continue; 1595 1596 ifmedia_add(ifm, IFM_ETHER | itype->ifm_type, 1597 0, NULL); 1598 if (flow) { 1599 ifmedia_add(ifm, 1600 IFM_ETHER | IFM_FLOW | itype->ifm_type, 1601 0, NULL); 1602 } 1603 } 1604 } 1605 } 1606 1607 static void 1608 ixl_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 1609 { 1610 struct ixl_softc *sc = ifp->if_softc; 1611 1612 KASSERT(mutex_owned(&sc->sc_cfg_lock)); 1613 1614 ifmr->ifm_status = sc->sc_media_status; 1615 ifmr->ifm_active = sc->sc_media_active; 1616 } 1617 1618 static int 1619 ixl_media_change(struct ifnet *ifp) 1620 { 1621 struct ixl_softc *sc = ifp->if_softc; 1622 struct ifmedia *ifm = &sc->sc_media; 1623 uint64_t ifm_active = sc->sc_media_active; 1624 uint8_t link_speed, abilities; 1625 1626 switch (IFM_SUBTYPE(ifm_active)) { 1627 case IFM_1000_SGMII: 1628 case IFM_1000_KX: 1629 case IFM_10G_KX4: 1630 case IFM_10G_KR: 1631 case IFM_40G_KR4: 1632 case IFM_20G_KR2: 1633 case IFM_25G_KR: 1634 /* backplanes */ 1635 return EINVAL; 1636 } 1637 1638 abilities = IXL_PHY_ABILITY_AUTONEGO | IXL_PHY_ABILITY_LINKUP; 1639 1640 switch (IFM_SUBTYPE(ifm->ifm_media)) { 1641 case IFM_AUTO: 1642 link_speed = sc->sc_phy_linkspeed; 1643 break; 1644 case IFM_NONE: 1645 link_speed = 0; 1646 CLR(abilities, IXL_PHY_ABILITY_LINKUP); 1647 break; 1648 default: 1649 link_speed = ixl_search_baudrate( 1650 ifmedia_baudrate(ifm->ifm_media)); 1651 } 1652 1653 if (ISSET(abilities, IXL_PHY_ABILITY_LINKUP)) { 1654 if (ISSET(link_speed, sc->sc_phy_linkspeed) == 0) 1655 return EINVAL; 1656 } 1657 1658 if (ifm->ifm_media & IFM_FLOW) { 1659 abilities |= sc->sc_phy_abilities & 1660 (IXL_PHY_ABILITY_PAUSE_TX | IXL_PHY_ABILITY_PAUSE_RX); 1661 } 1662 1663 return ixl_set_phy_config(sc, link_speed, abilities, false); 1664 } 1665 1666 static void 1667 ixl_watchdog(struct ifnet *ifp) 1668 { 1669 1670 } 1671 1672 static void 1673 ixl_del_all_multiaddr(struct ixl_softc *sc) 1674 { 1675 struct ethercom *ec = &sc->sc_ec; 1676 struct ether_multi *enm; 1677 struct ether_multistep step; 1678 1679 ETHER_LOCK(ec); 1680 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL; 1681 ETHER_NEXT_MULTI(step, enm)) { 1682 ixl_remove_macvlan(sc, enm->enm_addrlo, 0, 1683 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN); 1684 } 1685 ETHER_UNLOCK(ec); 1686 } 1687 1688 static int 1689 ixl_add_multi(struct ixl_softc *sc, uint8_t *addrlo, uint8_t *addrhi) 1690 { 1691 struct ifnet *ifp = &sc->sc_ec.ec_if; 1692 int rv; 1693 1694 if (ISSET(ifp->if_flags, IFF_ALLMULTI)) 1695 return 0; 1696 1697 if (memcmp(addrlo, addrhi, ETHER_ADDR_LEN) != 0) { 1698 ixl_del_all_multiaddr(sc); 1699 SET(ifp->if_flags, IFF_ALLMULTI); 1700 return ENETRESET; 1701 } 1702 1703 /* multicast address can not use VLAN HWFILTER */ 1704 rv = ixl_add_macvlan(sc, addrlo, 0, 1705 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN); 1706 1707 if (rv == ENOSPC) { 1708 ixl_del_all_multiaddr(sc); 1709 SET(ifp->if_flags, IFF_ALLMULTI); 1710 return ENETRESET; 1711 } 1712 1713 return rv; 1714 } 1715 1716 static int 1717 ixl_del_multi(struct ixl_softc *sc, uint8_t *addrlo, uint8_t *addrhi) 1718 { 1719 struct ifnet *ifp = &sc->sc_ec.ec_if; 1720 struct ethercom *ec = &sc->sc_ec; 1721 struct ether_multi *enm, *enm_last; 1722 struct ether_multistep step; 1723 int error, rv = 0; 1724 1725 if (!ISSET(ifp->if_flags, IFF_ALLMULTI)) { 1726 ixl_remove_macvlan(sc, addrlo, 0, 1727 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN); 1728 return 0; 1729 } 1730 1731 ETHER_LOCK(ec); 1732 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL; 1733 ETHER_NEXT_MULTI(step, enm)) { 1734 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 1735 ETHER_ADDR_LEN) != 0) { 1736 goto out; 1737 } 1738 } 1739 1740 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL; 1741 ETHER_NEXT_MULTI(step, enm)) { 1742 error = ixl_add_macvlan(sc, enm->enm_addrlo, 0, 1743 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN); 1744 if (error != 0) 1745 break; 1746 } 1747 1748 if (enm != NULL) { 1749 enm_last = enm; 1750 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL; 1751 ETHER_NEXT_MULTI(step, enm)) { 1752 if (enm == enm_last) 1753 break; 1754 1755 ixl_remove_macvlan(sc, enm->enm_addrlo, 0, 1756 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN); 1757 } 1758 } else { 1759 CLR(ifp->if_flags, IFF_ALLMULTI); 1760 rv = ENETRESET; 1761 } 1762 1763 out: 1764 ETHER_UNLOCK(ec); 1765 return rv; 1766 } 1767 1768 static int 1769 ixl_ioctl(struct ifnet *ifp, u_long cmd, void *data) 1770 { 1771 struct ifreq *ifr = (struct ifreq *)data; 1772 struct ixl_softc *sc = (struct ixl_softc *)ifp->if_softc; 1773 const struct sockaddr *sa; 1774 uint8_t addrhi[ETHER_ADDR_LEN], addrlo[ETHER_ADDR_LEN]; 1775 int s, error = 0; 1776 unsigned int nmtu; 1777 1778 switch (cmd) { 1779 case SIOCSIFMTU: 1780 nmtu = ifr->ifr_mtu; 1781 1782 if (nmtu < IXL_MIN_MTU || nmtu > IXL_MAX_MTU) { 1783 error = EINVAL; 1784 break; 1785 } 1786 if (ifp->if_mtu != nmtu) { 1787 s = splnet(); 1788 error = ether_ioctl(ifp, cmd, data); 1789 splx(s); 1790 if (error == ENETRESET) 1791 error = ixl_init(ifp); 1792 } 1793 break; 1794 case SIOCADDMULTI: 1795 sa = ifreq_getaddr(SIOCADDMULTI, ifr); 1796 if (ether_addmulti(sa, &sc->sc_ec) == ENETRESET) { 1797 error = ether_multiaddr(sa, addrlo, addrhi); 1798 if (error != 0) 1799 return error; 1800 1801 error = ixl_add_multi(sc, addrlo, addrhi); 1802 if (error != 0 && error != ENETRESET) { 1803 ether_delmulti(sa, &sc->sc_ec); 1804 error = EIO; 1805 } 1806 } 1807 break; 1808 1809 case SIOCDELMULTI: 1810 sa = ifreq_getaddr(SIOCDELMULTI, ifr); 1811 if (ether_delmulti(sa, &sc->sc_ec) == ENETRESET) { 1812 error = ether_multiaddr(sa, addrlo, addrhi); 1813 if (error != 0) 1814 return error; 1815 1816 error = ixl_del_multi(sc, addrlo, addrhi); 1817 } 1818 break; 1819 1820 default: 1821 s = splnet(); 1822 error = ether_ioctl(ifp, cmd, data); 1823 splx(s); 1824 } 1825 1826 if (error == ENETRESET) 1827 error = ixl_iff(sc); 1828 1829 return error; 1830 } 1831 1832 static enum i40e_mac_type 1833 ixl_mactype(pci_product_id_t id) 1834 { 1835 1836 switch (id) { 1837 case PCI_PRODUCT_INTEL_XL710_SFP: 1838 case PCI_PRODUCT_INTEL_XL710_KX_B: 1839 case PCI_PRODUCT_INTEL_XL710_KX_C: 1840 case PCI_PRODUCT_INTEL_XL710_QSFP_A: 1841 case PCI_PRODUCT_INTEL_XL710_QSFP_B: 1842 case PCI_PRODUCT_INTEL_XL710_QSFP_C: 1843 case PCI_PRODUCT_INTEL_X710_10G_T: 1844 case PCI_PRODUCT_INTEL_XL710_20G_BP_1: 1845 case PCI_PRODUCT_INTEL_XL710_20G_BP_2: 1846 case PCI_PRODUCT_INTEL_X710_T4_10G: 1847 case PCI_PRODUCT_INTEL_XXV710_25G_BP: 1848 case PCI_PRODUCT_INTEL_XXV710_25G_SFP28: 1849 return I40E_MAC_XL710; 1850 1851 case PCI_PRODUCT_INTEL_X722_KX: 1852 case PCI_PRODUCT_INTEL_X722_QSFP: 1853 case PCI_PRODUCT_INTEL_X722_SFP: 1854 case PCI_PRODUCT_INTEL_X722_1G_BASET: 1855 case PCI_PRODUCT_INTEL_X722_10G_BASET: 1856 case PCI_PRODUCT_INTEL_X722_I_SFP: 1857 return I40E_MAC_X722; 1858 } 1859 1860 return I40E_MAC_GENERIC; 1861 } 1862 1863 static void 1864 ixl_pci_csr_setup(pci_chipset_tag_t pc, pcitag_t tag) 1865 { 1866 pcireg_t csr; 1867 1868 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG); 1869 csr |= (PCI_COMMAND_MASTER_ENABLE | 1870 PCI_COMMAND_MEM_ENABLE); 1871 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr); 1872 } 1873 1874 static inline void * 1875 ixl_hmc_kva(struct ixl_softc *sc, enum ixl_hmc_types type, unsigned int i) 1876 { 1877 uint8_t *kva = IXL_DMA_KVA(&sc->sc_hmc_pd); 1878 struct ixl_hmc_entry *e = &sc->sc_hmc_entries[type]; 1879 1880 if (i >= e->hmc_count) 1881 return NULL; 1882 1883 kva += e->hmc_base; 1884 kva += i * e->hmc_size; 1885 1886 return kva; 1887 } 1888 1889 static inline size_t 1890 ixl_hmc_len(struct ixl_softc *sc, enum ixl_hmc_types type) 1891 { 1892 struct ixl_hmc_entry *e = &sc->sc_hmc_entries[type]; 1893 1894 return e->hmc_size; 1895 } 1896 1897 static void 1898 ixl_enable_queue_intr(struct ixl_softc *sc, struct ixl_queue_pair *qp) 1899 { 1900 struct ixl_rx_ring *rxr = qp->qp_rxr; 1901 1902 ixl_wr(sc, I40E_PFINT_DYN_CTLN(rxr->rxr_qid), 1903 I40E_PFINT_DYN_CTLN_INTENA_MASK | 1904 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | 1905 (IXL_NOITR << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT)); 1906 ixl_flush(sc); 1907 } 1908 1909 static void 1910 ixl_disable_queue_intr(struct ixl_softc *sc, struct ixl_queue_pair *qp) 1911 { 1912 struct ixl_rx_ring *rxr = qp->qp_rxr; 1913 1914 ixl_wr(sc, I40E_PFINT_DYN_CTLN(rxr->rxr_qid), 1915 (IXL_NOITR << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT)); 1916 ixl_flush(sc); 1917 } 1918 1919 static void 1920 ixl_enable_other_intr(struct ixl_softc *sc) 1921 { 1922 1923 ixl_wr(sc, I40E_PFINT_DYN_CTL0, 1924 I40E_PFINT_DYN_CTL0_INTENA_MASK | 1925 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK | 1926 (IXL_NOITR << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT)); 1927 ixl_flush(sc); 1928 } 1929 1930 static void 1931 ixl_disable_other_intr(struct ixl_softc *sc) 1932 { 1933 1934 ixl_wr(sc, I40E_PFINT_DYN_CTL0, 1935 (IXL_NOITR << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT)); 1936 ixl_flush(sc); 1937 } 1938 1939 static int 1940 ixl_reinit(struct ixl_softc *sc) 1941 { 1942 struct ixl_rx_ring *rxr; 1943 struct ixl_tx_ring *txr; 1944 unsigned int i; 1945 uint32_t reg; 1946 1947 KASSERT(mutex_owned(&sc->sc_cfg_lock)); 1948 1949 if (ixl_get_vsi(sc) != 0) 1950 return EIO; 1951 1952 if (ixl_set_vsi(sc) != 0) 1953 return EIO; 1954 1955 for (i = 0; i < sc->sc_nqueue_pairs; i++) { 1956 txr = sc->sc_qps[i].qp_txr; 1957 rxr = sc->sc_qps[i].qp_rxr; 1958 1959 ixl_txr_config(sc, txr); 1960 ixl_rxr_config(sc, rxr); 1961 } 1962 1963 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd), 1964 0, IXL_DMA_LEN(&sc->sc_hmc_pd), BUS_DMASYNC_PREWRITE); 1965 1966 for (i = 0; i < sc->sc_nqueue_pairs; i++) { 1967 txr = sc->sc_qps[i].qp_txr; 1968 rxr = sc->sc_qps[i].qp_rxr; 1969 1970 ixl_wr(sc, I40E_QTX_CTL(i), I40E_QTX_CTL_PF_QUEUE | 1971 (sc->sc_pf_id << I40E_QTX_CTL_PF_INDX_SHIFT)); 1972 ixl_flush(sc); 1973 1974 ixl_wr(sc, txr->txr_tail, txr->txr_prod); 1975 ixl_wr(sc, rxr->rxr_tail, rxr->rxr_prod); 1976 1977 /* ixl_rxfill() needs lock held */ 1978 mutex_enter(&rxr->rxr_lock); 1979 ixl_rxfill(sc, rxr); 1980 mutex_exit(&rxr->rxr_lock); 1981 1982 reg = ixl_rd(sc, I40E_QRX_ENA(i)); 1983 SET(reg, I40E_QRX_ENA_QENA_REQ_MASK); 1984 ixl_wr(sc, I40E_QRX_ENA(i), reg); 1985 if (ixl_rxr_enabled(sc, rxr) != 0) 1986 goto stop; 1987 1988 ixl_txr_qdis(sc, txr, 1); 1989 1990 reg = ixl_rd(sc, I40E_QTX_ENA(i)); 1991 SET(reg, I40E_QTX_ENA_QENA_REQ_MASK); 1992 ixl_wr(sc, I40E_QTX_ENA(i), reg); 1993 1994 if (ixl_txr_enabled(sc, txr) != 0) 1995 goto stop; 1996 } 1997 1998 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd), 1999 0, IXL_DMA_LEN(&sc->sc_hmc_pd), BUS_DMASYNC_POSTWRITE); 2000 2001 return 0; 2002 2003 stop: 2004 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd), 2005 0, IXL_DMA_LEN(&sc->sc_hmc_pd), BUS_DMASYNC_POSTWRITE); 2006 2007 return ETIMEDOUT; 2008 } 2009 2010 static int 2011 ixl_init_locked(struct ixl_softc *sc) 2012 { 2013 struct ifnet *ifp = &sc->sc_ec.ec_if; 2014 unsigned int i; 2015 int error, eccap_change; 2016 2017 KASSERT(mutex_owned(&sc->sc_cfg_lock)); 2018 2019 if (ISSET(ifp->if_flags, IFF_RUNNING)) 2020 ixl_stop_locked(sc); 2021 2022 if (sc->sc_dead) { 2023 return ENXIO; 2024 } 2025 2026 eccap_change = sc->sc_ec.ec_capenable ^ sc->sc_cur_ec_capenable; 2027 if (ISSET(eccap_change, ETHERCAP_VLAN_HWTAGGING)) 2028 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWTAGGING; 2029 2030 if (ISSET(eccap_change, ETHERCAP_VLAN_HWFILTER)) { 2031 if (ixl_update_macvlan(sc) == 0) { 2032 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWFILTER; 2033 } else { 2034 CLR(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER); 2035 CLR(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER); 2036 } 2037 } 2038 2039 if (sc->sc_intrtype != PCI_INTR_TYPE_MSIX) 2040 sc->sc_nqueue_pairs = 1; 2041 else 2042 sc->sc_nqueue_pairs = sc->sc_nqueue_pairs_max; 2043 2044 error = ixl_reinit(sc); 2045 if (error) { 2046 ixl_stop_locked(sc); 2047 return error; 2048 } 2049 2050 SET(ifp->if_flags, IFF_RUNNING); 2051 CLR(ifp->if_flags, IFF_OACTIVE); 2052 2053 ixl_config_rss(sc); 2054 ixl_config_queue_intr(sc); 2055 2056 for (i = 0; i < sc->sc_nqueue_pairs; i++) { 2057 ixl_enable_queue_intr(sc, &sc->sc_qps[i]); 2058 } 2059 2060 error = ixl_iff(sc); 2061 if (error) { 2062 ixl_stop_locked(sc); 2063 return error; 2064 } 2065 2066 callout_schedule(&sc->sc_stats_callout, mstohz(sc->sc_stats_intval)); 2067 2068 return 0; 2069 } 2070 2071 static int 2072 ixl_init(struct ifnet *ifp) 2073 { 2074 struct ixl_softc *sc = ifp->if_softc; 2075 int error; 2076 2077 mutex_enter(&sc->sc_cfg_lock); 2078 error = ixl_init_locked(sc); 2079 mutex_exit(&sc->sc_cfg_lock); 2080 2081 if (error == 0) 2082 (void)ixl_get_link_status(sc); 2083 2084 return error; 2085 } 2086 2087 static int 2088 ixl_iff(struct ixl_softc *sc) 2089 { 2090 struct ifnet *ifp = &sc->sc_ec.ec_if; 2091 struct ixl_atq iatq; 2092 struct ixl_aq_desc *iaq; 2093 struct ixl_aq_vsi_promisc_param *param; 2094 uint16_t flag_add, flag_del; 2095 int error; 2096 2097 if (!ISSET(ifp->if_flags, IFF_RUNNING)) 2098 return 0; 2099 2100 memset(&iatq, 0, sizeof(iatq)); 2101 2102 iaq = &iatq.iatq_desc; 2103 iaq->iaq_opcode = htole16(IXL_AQ_OP_SET_VSI_PROMISC); 2104 2105 param = (struct ixl_aq_vsi_promisc_param *)&iaq->iaq_param; 2106 param->flags = htole16(0); 2107 2108 if (!ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER) 2109 || ISSET(ifp->if_flags, IFF_PROMISC)) { 2110 param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_BCAST | 2111 IXL_AQ_VSI_PROMISC_FLAG_VLAN); 2112 } 2113 2114 if (ISSET(ifp->if_flags, IFF_PROMISC)) { 2115 param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_UCAST | 2116 IXL_AQ_VSI_PROMISC_FLAG_MCAST); 2117 } else if (ISSET(ifp->if_flags, IFF_ALLMULTI)) { 2118 param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_MCAST); 2119 } 2120 param->valid_flags = htole16(IXL_AQ_VSI_PROMISC_FLAG_UCAST | 2121 IXL_AQ_VSI_PROMISC_FLAG_MCAST | IXL_AQ_VSI_PROMISC_FLAG_BCAST | 2122 IXL_AQ_VSI_PROMISC_FLAG_VLAN); 2123 param->seid = sc->sc_seid; 2124 2125 error = ixl_atq_exec(sc, &iatq); 2126 if (error) 2127 return error; 2128 2129 if (iaq->iaq_retval != htole16(IXL_AQ_RC_OK)) 2130 return EIO; 2131 2132 if (memcmp(sc->sc_enaddr, CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN) != 0) { 2133 if (ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER)) { 2134 flag_add = IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH; 2135 flag_del = IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH; 2136 } else { 2137 flag_add = IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN; 2138 flag_del = IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN; 2139 } 2140 2141 ixl_remove_macvlan(sc, sc->sc_enaddr, 0, flag_del); 2142 2143 memcpy(sc->sc_enaddr, CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN); 2144 ixl_add_macvlan(sc, sc->sc_enaddr, 0, flag_add); 2145 } 2146 return 0; 2147 } 2148 2149 static void 2150 ixl_stop_rendezvous(struct ixl_softc *sc) 2151 { 2152 struct ixl_tx_ring *txr; 2153 struct ixl_rx_ring *rxr; 2154 unsigned int i; 2155 2156 for (i = 0; i < sc->sc_nqueue_pairs; i++) { 2157 txr = sc->sc_qps[i].qp_txr; 2158 rxr = sc->sc_qps[i].qp_rxr; 2159 2160 mutex_enter(&txr->txr_lock); 2161 mutex_exit(&txr->txr_lock); 2162 2163 mutex_enter(&rxr->rxr_lock); 2164 mutex_exit(&rxr->rxr_lock); 2165 2166 sc->sc_qps[i].qp_workqueue = false; 2167 workqueue_wait(sc->sc_workq_txrx, 2168 &sc->sc_qps[i].qp_work); 2169 } 2170 } 2171 2172 static void 2173 ixl_stop_locked(struct ixl_softc *sc) 2174 { 2175 struct ifnet *ifp = &sc->sc_ec.ec_if; 2176 struct ixl_rx_ring *rxr; 2177 struct ixl_tx_ring *txr; 2178 unsigned int i; 2179 uint32_t reg; 2180 2181 KASSERT(mutex_owned(&sc->sc_cfg_lock)); 2182 2183 CLR(ifp->if_flags, IFF_RUNNING | IFF_OACTIVE); 2184 callout_stop(&sc->sc_stats_callout); 2185 2186 for (i = 0; i < sc->sc_nqueue_pairs; i++) { 2187 txr = sc->sc_qps[i].qp_txr; 2188 rxr = sc->sc_qps[i].qp_rxr; 2189 2190 ixl_disable_queue_intr(sc, &sc->sc_qps[i]); 2191 2192 mutex_enter(&txr->txr_lock); 2193 ixl_txr_qdis(sc, txr, 0); 2194 mutex_exit(&txr->txr_lock); 2195 } 2196 2197 /* XXX wait at least 400 usec for all tx queues in one go */ 2198 ixl_flush(sc); 2199 DELAY(500); 2200 2201 for (i = 0; i < sc->sc_nqueue_pairs; i++) { 2202 txr = sc->sc_qps[i].qp_txr; 2203 rxr = sc->sc_qps[i].qp_rxr; 2204 2205 mutex_enter(&txr->txr_lock); 2206 reg = ixl_rd(sc, I40E_QTX_ENA(i)); 2207 CLR(reg, I40E_QTX_ENA_QENA_REQ_MASK); 2208 ixl_wr(sc, I40E_QTX_ENA(i), reg); 2209 mutex_exit(&txr->txr_lock); 2210 2211 mutex_enter(&rxr->rxr_lock); 2212 reg = ixl_rd(sc, I40E_QRX_ENA(i)); 2213 CLR(reg, I40E_QRX_ENA_QENA_REQ_MASK); 2214 ixl_wr(sc, I40E_QRX_ENA(i), reg); 2215 mutex_exit(&rxr->rxr_lock); 2216 } 2217 2218 /* XXX short wait for all queue disables to settle */ 2219 ixl_flush(sc); 2220 DELAY(50); 2221 2222 for (i = 0; i < sc->sc_nqueue_pairs; i++) { 2223 txr = sc->sc_qps[i].qp_txr; 2224 rxr = sc->sc_qps[i].qp_rxr; 2225 2226 mutex_enter(&txr->txr_lock); 2227 if (ixl_txr_disabled(sc, txr) != 0) { 2228 mutex_exit(&txr->txr_lock); 2229 goto die; 2230 } 2231 mutex_exit(&txr->txr_lock); 2232 2233 mutex_enter(&rxr->rxr_lock); 2234 if (ixl_rxr_disabled(sc, rxr) != 0) { 2235 mutex_exit(&rxr->rxr_lock); 2236 goto die; 2237 } 2238 mutex_exit(&rxr->rxr_lock); 2239 } 2240 2241 ixl_stop_rendezvous(sc); 2242 2243 for (i = 0; i < sc->sc_nqueue_pairs; i++) { 2244 txr = sc->sc_qps[i].qp_txr; 2245 rxr = sc->sc_qps[i].qp_rxr; 2246 2247 mutex_enter(&txr->txr_lock); 2248 ixl_txr_unconfig(sc, txr); 2249 mutex_exit(&txr->txr_lock); 2250 2251 mutex_enter(&rxr->rxr_lock); 2252 ixl_rxr_unconfig(sc, rxr); 2253 mutex_exit(&rxr->rxr_lock); 2254 2255 ixl_txr_clean(sc, txr); 2256 ixl_rxr_clean(sc, rxr); 2257 } 2258 2259 return; 2260 die: 2261 sc->sc_dead = true; 2262 log(LOG_CRIT, "%s: failed to shut down rings", 2263 device_xname(sc->sc_dev)); 2264 return; 2265 } 2266 2267 static void 2268 ixl_stop(struct ifnet *ifp, int disable) 2269 { 2270 struct ixl_softc *sc = ifp->if_softc; 2271 2272 mutex_enter(&sc->sc_cfg_lock); 2273 ixl_stop_locked(sc); 2274 mutex_exit(&sc->sc_cfg_lock); 2275 } 2276 2277 static int 2278 ixl_queue_pairs_alloc(struct ixl_softc *sc) 2279 { 2280 struct ixl_queue_pair *qp; 2281 unsigned int i; 2282 size_t sz; 2283 2284 sz = sizeof(sc->sc_qps[0]) * sc->sc_nqueue_pairs_max; 2285 sc->sc_qps = kmem_zalloc(sz, KM_SLEEP); 2286 2287 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) { 2288 qp = &sc->sc_qps[i]; 2289 2290 qp->qp_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE, 2291 ixl_handle_queue, qp); 2292 if (qp->qp_si == NULL) 2293 goto free; 2294 2295 qp->qp_txr = ixl_txr_alloc(sc, i); 2296 if (qp->qp_txr == NULL) 2297 goto free; 2298 2299 qp->qp_rxr = ixl_rxr_alloc(sc, i); 2300 if (qp->qp_rxr == NULL) 2301 goto free; 2302 2303 qp->qp_sc = sc; 2304 snprintf(qp->qp_name, sizeof(qp->qp_name), 2305 "%s-TXRX%d", device_xname(sc->sc_dev), i); 2306 } 2307 2308 return 0; 2309 free: 2310 if (sc->sc_qps != NULL) { 2311 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) { 2312 qp = &sc->sc_qps[i]; 2313 2314 if (qp->qp_txr != NULL) 2315 ixl_txr_free(sc, qp->qp_txr); 2316 if (qp->qp_rxr != NULL) 2317 ixl_rxr_free(sc, qp->qp_rxr); 2318 if (qp->qp_si != NULL) 2319 softint_disestablish(qp->qp_si); 2320 } 2321 2322 sz = sizeof(sc->sc_qps[0]) * sc->sc_nqueue_pairs_max; 2323 kmem_free(sc->sc_qps, sz); 2324 sc->sc_qps = NULL; 2325 } 2326 2327 return -1; 2328 } 2329 2330 static void 2331 ixl_queue_pairs_free(struct ixl_softc *sc) 2332 { 2333 struct ixl_queue_pair *qp; 2334 unsigned int i; 2335 size_t sz; 2336 2337 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) { 2338 qp = &sc->sc_qps[i]; 2339 ixl_txr_free(sc, qp->qp_txr); 2340 ixl_rxr_free(sc, qp->qp_rxr); 2341 softint_disestablish(qp->qp_si); 2342 } 2343 2344 sz = sizeof(sc->sc_qps[0]) * sc->sc_nqueue_pairs_max; 2345 kmem_free(sc->sc_qps, sz); 2346 sc->sc_qps = NULL; 2347 } 2348 2349 static struct ixl_tx_ring * 2350 ixl_txr_alloc(struct ixl_softc *sc, unsigned int qid) 2351 { 2352 struct ixl_tx_ring *txr = NULL; 2353 struct ixl_tx_map *maps = NULL, *txm; 2354 unsigned int i; 2355 2356 txr = kmem_zalloc(sizeof(*txr), KM_SLEEP); 2357 maps = kmem_zalloc(sizeof(maps[0]) * sc->sc_tx_ring_ndescs, 2358 KM_SLEEP); 2359 2360 if (ixl_dmamem_alloc(sc, &txr->txr_mem, 2361 sizeof(struct ixl_tx_desc) * sc->sc_tx_ring_ndescs, 2362 IXL_TX_QUEUE_ALIGN) != 0) 2363 goto free; 2364 2365 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) { 2366 txm = &maps[i]; 2367 2368 if (bus_dmamap_create(sc->sc_dmat, IXL_TX_PKT_MAXSIZE, 2369 IXL_TX_PKT_DESCS, IXL_TX_PKT_MAXSIZE, 0, 2370 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &txm->txm_map) != 0) 2371 goto uncreate; 2372 2373 txm->txm_eop = -1; 2374 txm->txm_m = NULL; 2375 } 2376 2377 txr->txr_cons = txr->txr_prod = 0; 2378 txr->txr_maps = maps; 2379 2380 txr->txr_intrq = pcq_create(sc->sc_tx_ring_ndescs, KM_NOSLEEP); 2381 if (txr->txr_intrq == NULL) 2382 goto uncreate; 2383 2384 txr->txr_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE, 2385 ixl_deferred_transmit, txr); 2386 if (txr->txr_si == NULL) 2387 goto destroy_pcq; 2388 2389 txr->txr_tail = I40E_QTX_TAIL(qid); 2390 txr->txr_qid = qid; 2391 txr->txr_sc = sc; 2392 mutex_init(&txr->txr_lock, MUTEX_DEFAULT, IPL_NET); 2393 2394 return txr; 2395 2396 destroy_pcq: 2397 pcq_destroy(txr->txr_intrq); 2398 uncreate: 2399 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) { 2400 txm = &maps[i]; 2401 2402 if (txm->txm_map == NULL) 2403 continue; 2404 2405 bus_dmamap_destroy(sc->sc_dmat, txm->txm_map); 2406 } 2407 2408 ixl_dmamem_free(sc, &txr->txr_mem); 2409 free: 2410 kmem_free(maps, sizeof(maps[0]) * sc->sc_tx_ring_ndescs); 2411 kmem_free(txr, sizeof(*txr)); 2412 2413 return NULL; 2414 } 2415 2416 static void 2417 ixl_txr_qdis(struct ixl_softc *sc, struct ixl_tx_ring *txr, int enable) 2418 { 2419 unsigned int qid; 2420 bus_size_t reg; 2421 uint32_t r; 2422 2423 qid = txr->txr_qid + sc->sc_base_queue; 2424 reg = I40E_GLLAN_TXPRE_QDIS(qid / 128); 2425 qid %= 128; 2426 2427 r = ixl_rd(sc, reg); 2428 CLR(r, I40E_GLLAN_TXPRE_QDIS_QINDX_MASK); 2429 SET(r, qid << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT); 2430 SET(r, enable ? I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK : 2431 I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK); 2432 ixl_wr(sc, reg, r); 2433 } 2434 2435 static void 2436 ixl_txr_config(struct ixl_softc *sc, struct ixl_tx_ring *txr) 2437 { 2438 struct ixl_hmc_txq txq; 2439 struct ixl_aq_vsi_data *data = IXL_DMA_KVA(&sc->sc_scratch); 2440 void *hmc; 2441 2442 memset(&txq, 0, sizeof(txq)); 2443 txq.head = htole16(txr->txr_cons); 2444 txq.new_context = 1; 2445 txq.base = htole64(IXL_DMA_DVA(&txr->txr_mem) / IXL_HMC_TXQ_BASE_UNIT); 2446 txq.head_wb_ena = IXL_HMC_TXQ_DESC_WB; 2447 txq.qlen = htole16(sc->sc_tx_ring_ndescs); 2448 txq.tphrdesc_ena = 0; 2449 txq.tphrpacket_ena = 0; 2450 txq.tphwdesc_ena = 0; 2451 txq.rdylist = data->qs_handle[0]; 2452 2453 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_TX, txr->txr_qid); 2454 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_TX)); 2455 ixl_hmc_pack(hmc, &txq, ixl_hmc_pack_txq, 2456 __arraycount(ixl_hmc_pack_txq)); 2457 } 2458 2459 static void 2460 ixl_txr_unconfig(struct ixl_softc *sc, struct ixl_tx_ring *txr) 2461 { 2462 void *hmc; 2463 2464 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_TX, txr->txr_qid); 2465 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_TX)); 2466 txr->txr_cons = txr->txr_prod = 0; 2467 } 2468 2469 static void 2470 ixl_txr_clean(struct ixl_softc *sc, struct ixl_tx_ring *txr) 2471 { 2472 struct ixl_tx_map *maps, *txm; 2473 bus_dmamap_t map; 2474 unsigned int i; 2475 2476 maps = txr->txr_maps; 2477 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) { 2478 txm = &maps[i]; 2479 2480 if (txm->txm_m == NULL) 2481 continue; 2482 2483 map = txm->txm_map; 2484 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 2485 BUS_DMASYNC_POSTWRITE); 2486 bus_dmamap_unload(sc->sc_dmat, map); 2487 2488 m_freem(txm->txm_m); 2489 txm->txm_m = NULL; 2490 } 2491 } 2492 2493 static int 2494 ixl_txr_enabled(struct ixl_softc *sc, struct ixl_tx_ring *txr) 2495 { 2496 bus_size_t ena = I40E_QTX_ENA(txr->txr_qid); 2497 uint32_t reg; 2498 int i; 2499 2500 for (i = 0; i < 10; i++) { 2501 reg = ixl_rd(sc, ena); 2502 if (ISSET(reg, I40E_QTX_ENA_QENA_STAT_MASK)) 2503 return 0; 2504 2505 delaymsec(10); 2506 } 2507 2508 return ETIMEDOUT; 2509 } 2510 2511 static int 2512 ixl_txr_disabled(struct ixl_softc *sc, struct ixl_tx_ring *txr) 2513 { 2514 bus_size_t ena = I40E_QTX_ENA(txr->txr_qid); 2515 uint32_t reg; 2516 int i; 2517 2518 KASSERT(mutex_owned(&txr->txr_lock)); 2519 2520 for (i = 0; i < 10; i++) { 2521 reg = ixl_rd(sc, ena); 2522 if (ISSET(reg, I40E_QTX_ENA_QENA_STAT_MASK) == 0) 2523 return 0; 2524 2525 delaymsec(10); 2526 } 2527 2528 return ETIMEDOUT; 2529 } 2530 2531 static void 2532 ixl_txr_free(struct ixl_softc *sc, struct ixl_tx_ring *txr) 2533 { 2534 struct ixl_tx_map *maps, *txm; 2535 struct mbuf *m; 2536 unsigned int i; 2537 2538 softint_disestablish(txr->txr_si); 2539 while ((m = pcq_get(txr->txr_intrq)) != NULL) 2540 m_freem(m); 2541 pcq_destroy(txr->txr_intrq); 2542 2543 maps = txr->txr_maps; 2544 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) { 2545 txm = &maps[i]; 2546 2547 bus_dmamap_destroy(sc->sc_dmat, txm->txm_map); 2548 } 2549 2550 ixl_dmamem_free(sc, &txr->txr_mem); 2551 mutex_destroy(&txr->txr_lock); 2552 kmem_free(maps, sizeof(maps[0]) * sc->sc_tx_ring_ndescs); 2553 kmem_free(txr, sizeof(*txr)); 2554 } 2555 2556 static inline int 2557 ixl_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf **m0, 2558 struct ixl_tx_ring *txr) 2559 { 2560 struct mbuf *m; 2561 int error; 2562 2563 KASSERT(mutex_owned(&txr->txr_lock)); 2564 2565 m = *m0; 2566 2567 error = bus_dmamap_load_mbuf(dmat, map, m, 2568 BUS_DMA_STREAMING | BUS_DMA_WRITE | BUS_DMA_NOWAIT); 2569 if (error != EFBIG) 2570 return error; 2571 2572 m = m_defrag(m, M_DONTWAIT); 2573 if (m != NULL) { 2574 *m0 = m; 2575 txr->txr_defragged.ev_count++; 2576 2577 error = bus_dmamap_load_mbuf(dmat, map, m, 2578 BUS_DMA_STREAMING | BUS_DMA_WRITE | BUS_DMA_NOWAIT); 2579 } else { 2580 txr->txr_defrag_failed.ev_count++; 2581 error = ENOBUFS; 2582 } 2583 2584 return error; 2585 } 2586 2587 static inline int 2588 ixl_tx_setup_offloads(struct mbuf *m, uint64_t *cmd_txd) 2589 { 2590 struct ether_header *eh; 2591 size_t len; 2592 uint64_t cmd; 2593 2594 cmd = 0; 2595 2596 eh = mtod(m, struct ether_header *); 2597 switch (htons(eh->ether_type)) { 2598 case ETHERTYPE_IP: 2599 case ETHERTYPE_IPV6: 2600 len = ETHER_HDR_LEN; 2601 break; 2602 case ETHERTYPE_VLAN: 2603 len = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 2604 break; 2605 default: 2606 len = 0; 2607 } 2608 cmd |= ((len >> 1) << IXL_TX_DESC_MACLEN_SHIFT); 2609 2610 if (m->m_pkthdr.csum_flags & 2611 (M_CSUM_TSOv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) { 2612 cmd |= IXL_TX_DESC_CMD_IIPT_IPV4; 2613 } 2614 if (m->m_pkthdr.csum_flags & M_CSUM_IPv4) { 2615 cmd |= IXL_TX_DESC_CMD_IIPT_IPV4_CSUM; 2616 } 2617 2618 if (m->m_pkthdr.csum_flags & 2619 (M_CSUM_TSOv6 | M_CSUM_TCPv6 | M_CSUM_UDPv6)) { 2620 cmd |= IXL_TX_DESC_CMD_IIPT_IPV6; 2621 } 2622 2623 switch (cmd & IXL_TX_DESC_CMD_IIPT_MASK) { 2624 case IXL_TX_DESC_CMD_IIPT_IPV4: 2625 case IXL_TX_DESC_CMD_IIPT_IPV4_CSUM: 2626 len = M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data); 2627 break; 2628 case IXL_TX_DESC_CMD_IIPT_IPV6: 2629 len = M_CSUM_DATA_IPv6_IPHL(m->m_pkthdr.csum_data); 2630 break; 2631 default: 2632 len = 0; 2633 } 2634 cmd |= ((len >> 2) << IXL_TX_DESC_IPLEN_SHIFT); 2635 2636 if (m->m_pkthdr.csum_flags & 2637 (M_CSUM_TSOv4 | M_CSUM_TSOv6 | M_CSUM_TCPv4 | M_CSUM_TCPv6)) { 2638 len = sizeof(struct tcphdr); 2639 cmd |= IXL_TX_DESC_CMD_L4T_EOFT_TCP; 2640 } else if (m->m_pkthdr.csum_flags & (M_CSUM_UDPv4 | M_CSUM_UDPv6)) { 2641 len = sizeof(struct udphdr); 2642 cmd |= IXL_TX_DESC_CMD_L4T_EOFT_UDP; 2643 } else { 2644 len = 0; 2645 } 2646 cmd |= ((len >> 2) << IXL_TX_DESC_L4LEN_SHIFT); 2647 2648 *cmd_txd |= cmd; 2649 return 0; 2650 } 2651 2652 static void 2653 ixl_tx_common_locked(struct ifnet *ifp, struct ixl_tx_ring *txr, 2654 bool is_transmit) 2655 { 2656 struct ixl_softc *sc = ifp->if_softc; 2657 struct ixl_tx_desc *ring, *txd; 2658 struct ixl_tx_map *txm; 2659 bus_dmamap_t map; 2660 struct mbuf *m; 2661 uint64_t cmd, cmd_txd; 2662 unsigned int prod, free, last, i; 2663 unsigned int mask; 2664 int post = 0; 2665 2666 KASSERT(mutex_owned(&txr->txr_lock)); 2667 2668 if (!ISSET(ifp->if_flags, IFF_RUNNING) 2669 || (!is_transmit && ISSET(ifp->if_flags, IFF_OACTIVE))) { 2670 if (!is_transmit) 2671 IFQ_PURGE(&ifp->if_snd); 2672 return; 2673 } 2674 2675 prod = txr->txr_prod; 2676 free = txr->txr_cons; 2677 if (free <= prod) 2678 free += sc->sc_tx_ring_ndescs; 2679 free -= prod; 2680 2681 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem), 2682 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTWRITE); 2683 2684 ring = IXL_DMA_KVA(&txr->txr_mem); 2685 mask = sc->sc_tx_ring_ndescs - 1; 2686 last = prod; 2687 cmd = 0; 2688 txd = NULL; 2689 2690 for (;;) { 2691 if (free <= IXL_TX_PKT_DESCS) { 2692 if (!is_transmit) 2693 SET(ifp->if_flags, IFF_OACTIVE); 2694 break; 2695 } 2696 2697 if (is_transmit) 2698 m = pcq_get(txr->txr_intrq); 2699 else 2700 IFQ_DEQUEUE(&ifp->if_snd, m); 2701 2702 if (m == NULL) 2703 break; 2704 2705 txm = &txr->txr_maps[prod]; 2706 map = txm->txm_map; 2707 2708 if (ixl_load_mbuf(sc->sc_dmat, map, &m, txr) != 0) { 2709 if_statinc(ifp, if_oerrors); 2710 m_freem(m); 2711 continue; 2712 } 2713 2714 cmd_txd = 0; 2715 if (m->m_pkthdr.csum_flags & IXL_CSUM_ALL_OFFLOAD) { 2716 ixl_tx_setup_offloads(m, &cmd_txd); 2717 } 2718 2719 if (vlan_has_tag(m)) { 2720 cmd_txd |= (uint64_t)vlan_get_tag(m) << 2721 IXL_TX_DESC_L2TAG1_SHIFT; 2722 cmd_txd |= IXL_TX_DESC_CMD_IL2TAG1; 2723 } 2724 2725 bus_dmamap_sync(sc->sc_dmat, map, 0, 2726 map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2727 2728 for (i = 0; i < (unsigned int)map->dm_nsegs; i++) { 2729 txd = &ring[prod]; 2730 2731 cmd = (uint64_t)map->dm_segs[i].ds_len << 2732 IXL_TX_DESC_BSIZE_SHIFT; 2733 cmd |= IXL_TX_DESC_DTYPE_DATA | IXL_TX_DESC_CMD_ICRC; 2734 cmd |= cmd_txd; 2735 2736 txd->addr = htole64(map->dm_segs[i].ds_addr); 2737 txd->cmd = htole64(cmd); 2738 2739 last = prod; 2740 2741 prod++; 2742 prod &= mask; 2743 } 2744 cmd |= IXL_TX_DESC_CMD_EOP | IXL_TX_DESC_CMD_RS; 2745 txd->cmd = htole64(cmd); 2746 2747 txm->txm_m = m; 2748 txm->txm_eop = last; 2749 2750 bpf_mtap(ifp, m, BPF_D_OUT); 2751 2752 free -= i; 2753 post = 1; 2754 } 2755 2756 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem), 2757 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREWRITE); 2758 2759 if (post) { 2760 txr->txr_prod = prod; 2761 ixl_wr(sc, txr->txr_tail, prod); 2762 } 2763 } 2764 2765 static int 2766 ixl_txeof(struct ixl_softc *sc, struct ixl_tx_ring *txr, u_int txlimit) 2767 { 2768 struct ifnet *ifp = &sc->sc_ec.ec_if; 2769 struct ixl_tx_desc *ring, *txd; 2770 struct ixl_tx_map *txm; 2771 struct mbuf *m; 2772 bus_dmamap_t map; 2773 unsigned int cons, prod, last; 2774 unsigned int mask; 2775 uint64_t dtype; 2776 int done = 0, more = 0; 2777 2778 KASSERT(mutex_owned(&txr->txr_lock)); 2779 2780 prod = txr->txr_prod; 2781 cons = txr->txr_cons; 2782 2783 if (cons == prod) 2784 return 0; 2785 2786 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem), 2787 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTREAD); 2788 2789 ring = IXL_DMA_KVA(&txr->txr_mem); 2790 mask = sc->sc_tx_ring_ndescs - 1; 2791 2792 net_stat_ref_t nsr = IF_STAT_GETREF(ifp); 2793 2794 do { 2795 if (txlimit-- <= 0) { 2796 more = 1; 2797 break; 2798 } 2799 2800 txm = &txr->txr_maps[cons]; 2801 last = txm->txm_eop; 2802 txd = &ring[last]; 2803 2804 dtype = txd->cmd & htole64(IXL_TX_DESC_DTYPE_MASK); 2805 if (dtype != htole64(IXL_TX_DESC_DTYPE_DONE)) 2806 break; 2807 2808 map = txm->txm_map; 2809 2810 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 2811 BUS_DMASYNC_POSTWRITE); 2812 bus_dmamap_unload(sc->sc_dmat, map); 2813 2814 m = txm->txm_m; 2815 if (m != NULL) { 2816 if_statinc_ref(nsr, if_opackets); 2817 if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len); 2818 if (ISSET(m->m_flags, M_MCAST)) 2819 if_statinc_ref(nsr, if_omcasts); 2820 m_freem(m); 2821 } 2822 2823 txm->txm_m = NULL; 2824 txm->txm_eop = -1; 2825 2826 cons = last + 1; 2827 cons &= mask; 2828 done = 1; 2829 } while (cons != prod); 2830 2831 IF_STAT_PUTREF(ifp); 2832 2833 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem), 2834 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREREAD); 2835 2836 txr->txr_cons = cons; 2837 2838 if (done) { 2839 softint_schedule(txr->txr_si); 2840 if (txr->txr_qid == 0) { 2841 CLR(ifp->if_flags, IFF_OACTIVE); 2842 if_schedule_deferred_start(ifp); 2843 } 2844 } 2845 2846 return more; 2847 } 2848 2849 static void 2850 ixl_start(struct ifnet *ifp) 2851 { 2852 struct ixl_softc *sc; 2853 struct ixl_tx_ring *txr; 2854 2855 sc = ifp->if_softc; 2856 txr = sc->sc_qps[0].qp_txr; 2857 2858 mutex_enter(&txr->txr_lock); 2859 ixl_tx_common_locked(ifp, txr, false); 2860 mutex_exit(&txr->txr_lock); 2861 } 2862 2863 static inline unsigned int 2864 ixl_select_txqueue(struct ixl_softc *sc, struct mbuf *m) 2865 { 2866 u_int cpuid; 2867 2868 cpuid = cpu_index(curcpu()); 2869 2870 return (unsigned int)(cpuid % sc->sc_nqueue_pairs); 2871 } 2872 2873 static int 2874 ixl_transmit(struct ifnet *ifp, struct mbuf *m) 2875 { 2876 struct ixl_softc *sc; 2877 struct ixl_tx_ring *txr; 2878 unsigned int qid; 2879 2880 sc = ifp->if_softc; 2881 qid = ixl_select_txqueue(sc, m); 2882 2883 txr = sc->sc_qps[qid].qp_txr; 2884 2885 if (__predict_false(!pcq_put(txr->txr_intrq, m))) { 2886 mutex_enter(&txr->txr_lock); 2887 txr->txr_pcqdrop.ev_count++; 2888 mutex_exit(&txr->txr_lock); 2889 2890 m_freem(m); 2891 return ENOBUFS; 2892 } 2893 2894 if (mutex_tryenter(&txr->txr_lock)) { 2895 ixl_tx_common_locked(ifp, txr, true); 2896 mutex_exit(&txr->txr_lock); 2897 } else { 2898 kpreempt_disable(); 2899 softint_schedule(txr->txr_si); 2900 kpreempt_enable(); 2901 } 2902 2903 return 0; 2904 } 2905 2906 static void 2907 ixl_deferred_transmit(void *xtxr) 2908 { 2909 struct ixl_tx_ring *txr = xtxr; 2910 struct ixl_softc *sc = txr->txr_sc; 2911 struct ifnet *ifp = &sc->sc_ec.ec_if; 2912 2913 mutex_enter(&txr->txr_lock); 2914 txr->txr_transmitdef.ev_count++; 2915 if (pcq_peek(txr->txr_intrq) != NULL) 2916 ixl_tx_common_locked(ifp, txr, true); 2917 mutex_exit(&txr->txr_lock); 2918 } 2919 2920 static struct ixl_rx_ring * 2921 ixl_rxr_alloc(struct ixl_softc *sc, unsigned int qid) 2922 { 2923 struct ixl_rx_ring *rxr = NULL; 2924 struct ixl_rx_map *maps = NULL, *rxm; 2925 unsigned int i; 2926 2927 rxr = kmem_zalloc(sizeof(*rxr), KM_SLEEP); 2928 maps = kmem_zalloc(sizeof(maps[0]) * sc->sc_rx_ring_ndescs, 2929 KM_SLEEP); 2930 2931 if (ixl_dmamem_alloc(sc, &rxr->rxr_mem, 2932 sizeof(struct ixl_rx_rd_desc_32) * sc->sc_rx_ring_ndescs, 2933 IXL_RX_QUEUE_ALIGN) != 0) 2934 goto free; 2935 2936 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) { 2937 rxm = &maps[i]; 2938 2939 if (bus_dmamap_create(sc->sc_dmat, 2940 IXL_MCLBYTES, 1, IXL_MCLBYTES, 0, 2941 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &rxm->rxm_map) != 0) 2942 goto uncreate; 2943 2944 rxm->rxm_m = NULL; 2945 } 2946 2947 rxr->rxr_cons = rxr->rxr_prod = 0; 2948 rxr->rxr_m_head = NULL; 2949 rxr->rxr_m_tail = &rxr->rxr_m_head; 2950 rxr->rxr_maps = maps; 2951 2952 rxr->rxr_tail = I40E_QRX_TAIL(qid); 2953 rxr->rxr_qid = qid; 2954 mutex_init(&rxr->rxr_lock, MUTEX_DEFAULT, IPL_NET); 2955 2956 return rxr; 2957 2958 uncreate: 2959 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) { 2960 rxm = &maps[i]; 2961 2962 if (rxm->rxm_map == NULL) 2963 continue; 2964 2965 bus_dmamap_destroy(sc->sc_dmat, rxm->rxm_map); 2966 } 2967 2968 ixl_dmamem_free(sc, &rxr->rxr_mem); 2969 free: 2970 kmem_free(maps, sizeof(maps[0]) * sc->sc_rx_ring_ndescs); 2971 kmem_free(rxr, sizeof(*rxr)); 2972 2973 return NULL; 2974 } 2975 2976 static void 2977 ixl_rxr_clean(struct ixl_softc *sc, struct ixl_rx_ring *rxr) 2978 { 2979 struct ixl_rx_map *maps, *rxm; 2980 bus_dmamap_t map; 2981 unsigned int i; 2982 2983 maps = rxr->rxr_maps; 2984 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) { 2985 rxm = &maps[i]; 2986 2987 if (rxm->rxm_m == NULL) 2988 continue; 2989 2990 map = rxm->rxm_map; 2991 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 2992 BUS_DMASYNC_POSTWRITE); 2993 bus_dmamap_unload(sc->sc_dmat, map); 2994 2995 m_freem(rxm->rxm_m); 2996 rxm->rxm_m = NULL; 2997 } 2998 2999 m_freem(rxr->rxr_m_head); 3000 rxr->rxr_m_head = NULL; 3001 rxr->rxr_m_tail = &rxr->rxr_m_head; 3002 3003 rxr->rxr_prod = rxr->rxr_cons = 0; 3004 } 3005 3006 static int 3007 ixl_rxr_enabled(struct ixl_softc *sc, struct ixl_rx_ring *rxr) 3008 { 3009 bus_size_t ena = I40E_QRX_ENA(rxr->rxr_qid); 3010 uint32_t reg; 3011 int i; 3012 3013 for (i = 0; i < 10; i++) { 3014 reg = ixl_rd(sc, ena); 3015 if (ISSET(reg, I40E_QRX_ENA_QENA_STAT_MASK)) 3016 return 0; 3017 3018 delaymsec(10); 3019 } 3020 3021 return ETIMEDOUT; 3022 } 3023 3024 static int 3025 ixl_rxr_disabled(struct ixl_softc *sc, struct ixl_rx_ring *rxr) 3026 { 3027 bus_size_t ena = I40E_QRX_ENA(rxr->rxr_qid); 3028 uint32_t reg; 3029 int i; 3030 3031 KASSERT(mutex_owned(&rxr->rxr_lock)); 3032 3033 for (i = 0; i < 10; i++) { 3034 reg = ixl_rd(sc, ena); 3035 if (ISSET(reg, I40E_QRX_ENA_QENA_STAT_MASK) == 0) 3036 return 0; 3037 3038 delaymsec(10); 3039 } 3040 3041 return ETIMEDOUT; 3042 } 3043 3044 static void 3045 ixl_rxr_config(struct ixl_softc *sc, struct ixl_rx_ring *rxr) 3046 { 3047 struct ixl_hmc_rxq rxq; 3048 struct ifnet *ifp = &sc->sc_ec.ec_if; 3049 uint16_t rxmax; 3050 void *hmc; 3051 3052 memset(&rxq, 0, sizeof(rxq)); 3053 rxmax = ifp->if_mtu + IXL_MTU_ETHERLEN; 3054 3055 rxq.head = htole16(rxr->rxr_cons); 3056 rxq.base = htole64(IXL_DMA_DVA(&rxr->rxr_mem) / IXL_HMC_RXQ_BASE_UNIT); 3057 rxq.qlen = htole16(sc->sc_rx_ring_ndescs); 3058 rxq.dbuff = htole16(IXL_MCLBYTES / IXL_HMC_RXQ_DBUFF_UNIT); 3059 rxq.hbuff = 0; 3060 rxq.dtype = IXL_HMC_RXQ_DTYPE_NOSPLIT; 3061 rxq.dsize = IXL_HMC_RXQ_DSIZE_32; 3062 rxq.crcstrip = 1; 3063 rxq.l2sel = 1; 3064 rxq.showiv = 1; 3065 rxq.rxmax = htole16(rxmax); 3066 rxq.tphrdesc_ena = 0; 3067 rxq.tphwdesc_ena = 0; 3068 rxq.tphdata_ena = 0; 3069 rxq.tphhead_ena = 0; 3070 rxq.lrxqthresh = 0; 3071 rxq.prefena = 1; 3072 3073 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_RX, rxr->rxr_qid); 3074 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_RX)); 3075 ixl_hmc_pack(hmc, &rxq, ixl_hmc_pack_rxq, 3076 __arraycount(ixl_hmc_pack_rxq)); 3077 } 3078 3079 static void 3080 ixl_rxr_unconfig(struct ixl_softc *sc, struct ixl_rx_ring *rxr) 3081 { 3082 void *hmc; 3083 3084 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_RX, rxr->rxr_qid); 3085 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_RX)); 3086 rxr->rxr_cons = rxr->rxr_prod = 0; 3087 } 3088 3089 static void 3090 ixl_rxr_free(struct ixl_softc *sc, struct ixl_rx_ring *rxr) 3091 { 3092 struct ixl_rx_map *maps, *rxm; 3093 unsigned int i; 3094 3095 maps = rxr->rxr_maps; 3096 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) { 3097 rxm = &maps[i]; 3098 3099 bus_dmamap_destroy(sc->sc_dmat, rxm->rxm_map); 3100 } 3101 3102 ixl_dmamem_free(sc, &rxr->rxr_mem); 3103 mutex_destroy(&rxr->rxr_lock); 3104 kmem_free(maps, sizeof(maps[0]) * sc->sc_rx_ring_ndescs); 3105 kmem_free(rxr, sizeof(*rxr)); 3106 } 3107 3108 static inline void 3109 ixl_rx_csum(struct mbuf *m, uint64_t qword) 3110 { 3111 int flags_mask; 3112 3113 if (!ISSET(qword, IXL_RX_DESC_L3L4P)) { 3114 /* No L3 or L4 checksum was calculated */ 3115 return; 3116 } 3117 3118 switch (__SHIFTOUT(qword, IXL_RX_DESC_PTYPE_MASK)) { 3119 case IXL_RX_DESC_PTYPE_IPV4FRAG: 3120 case IXL_RX_DESC_PTYPE_IPV4: 3121 case IXL_RX_DESC_PTYPE_SCTPV4: 3122 case IXL_RX_DESC_PTYPE_ICMPV4: 3123 flags_mask = M_CSUM_IPv4 | M_CSUM_IPv4_BAD; 3124 break; 3125 case IXL_RX_DESC_PTYPE_TCPV4: 3126 flags_mask = M_CSUM_IPv4 | M_CSUM_IPv4_BAD; 3127 flags_mask |= M_CSUM_TCPv4 | M_CSUM_TCP_UDP_BAD; 3128 break; 3129 case IXL_RX_DESC_PTYPE_UDPV4: 3130 flags_mask = M_CSUM_IPv4 | M_CSUM_IPv4_BAD; 3131 flags_mask |= M_CSUM_UDPv4 | M_CSUM_TCP_UDP_BAD; 3132 break; 3133 case IXL_RX_DESC_PTYPE_TCPV6: 3134 flags_mask = M_CSUM_TCPv6 | M_CSUM_TCP_UDP_BAD; 3135 break; 3136 case IXL_RX_DESC_PTYPE_UDPV6: 3137 flags_mask = M_CSUM_UDPv6 | M_CSUM_TCP_UDP_BAD; 3138 break; 3139 default: 3140 flags_mask = 0; 3141 } 3142 3143 m->m_pkthdr.csum_flags |= (flags_mask & (M_CSUM_IPv4 | 3144 M_CSUM_TCPv4 | M_CSUM_TCPv6 | M_CSUM_UDPv4 | M_CSUM_UDPv6)); 3145 3146 if (ISSET(qword, IXL_RX_DESC_IPE)) { 3147 m->m_pkthdr.csum_flags |= (flags_mask & M_CSUM_IPv4_BAD); 3148 } 3149 3150 if (ISSET(qword, IXL_RX_DESC_L4E)) { 3151 m->m_pkthdr.csum_flags |= (flags_mask & M_CSUM_TCP_UDP_BAD); 3152 } 3153 } 3154 3155 static int 3156 ixl_rxeof(struct ixl_softc *sc, struct ixl_rx_ring *rxr, u_int rxlimit) 3157 { 3158 struct ifnet *ifp = &sc->sc_ec.ec_if; 3159 struct ixl_rx_wb_desc_32 *ring, *rxd; 3160 struct ixl_rx_map *rxm; 3161 bus_dmamap_t map; 3162 unsigned int cons, prod; 3163 struct mbuf *m; 3164 uint64_t word, word0; 3165 unsigned int len; 3166 unsigned int mask; 3167 int done = 0, more = 0; 3168 3169 KASSERT(mutex_owned(&rxr->rxr_lock)); 3170 3171 if (!ISSET(ifp->if_flags, IFF_RUNNING)) 3172 return 0; 3173 3174 prod = rxr->rxr_prod; 3175 cons = rxr->rxr_cons; 3176 3177 if (cons == prod) 3178 return 0; 3179 3180 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem), 3181 0, IXL_DMA_LEN(&rxr->rxr_mem), 3182 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 3183 3184 ring = IXL_DMA_KVA(&rxr->rxr_mem); 3185 mask = sc->sc_rx_ring_ndescs - 1; 3186 3187 net_stat_ref_t nsr = IF_STAT_GETREF(ifp); 3188 3189 do { 3190 if (rxlimit-- <= 0) { 3191 more = 1; 3192 break; 3193 } 3194 3195 rxd = &ring[cons]; 3196 3197 word = le64toh(rxd->qword1); 3198 3199 if (!ISSET(word, IXL_RX_DESC_DD)) 3200 break; 3201 3202 rxm = &rxr->rxr_maps[cons]; 3203 3204 map = rxm->rxm_map; 3205 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 3206 BUS_DMASYNC_POSTREAD); 3207 bus_dmamap_unload(sc->sc_dmat, map); 3208 3209 m = rxm->rxm_m; 3210 rxm->rxm_m = NULL; 3211 3212 KASSERT(m != NULL); 3213 3214 len = (word & IXL_RX_DESC_PLEN_MASK) >> IXL_RX_DESC_PLEN_SHIFT; 3215 m->m_len = len; 3216 m->m_pkthdr.len = 0; 3217 3218 m->m_next = NULL; 3219 *rxr->rxr_m_tail = m; 3220 rxr->rxr_m_tail = &m->m_next; 3221 3222 m = rxr->rxr_m_head; 3223 m->m_pkthdr.len += len; 3224 3225 if (ISSET(word, IXL_RX_DESC_EOP)) { 3226 word0 = le64toh(rxd->qword0); 3227 3228 if (ISSET(word, IXL_RX_DESC_L2TAG1P)) { 3229 vlan_set_tag(m, 3230 __SHIFTOUT(word0, IXL_RX_DESC_L2TAG1_MASK)); 3231 } 3232 3233 if ((ifp->if_capenable & IXL_IFCAP_RXCSUM) != 0) 3234 ixl_rx_csum(m, word); 3235 3236 if (!ISSET(word, 3237 IXL_RX_DESC_RXE | IXL_RX_DESC_OVERSIZE)) { 3238 m_set_rcvif(m, ifp); 3239 if_statinc_ref(nsr, if_ipackets); 3240 if_statadd_ref(nsr, if_ibytes, 3241 m->m_pkthdr.len); 3242 if_percpuq_enqueue(sc->sc_ipq, m); 3243 } else { 3244 if_statinc_ref(nsr, if_ierrors); 3245 m_freem(m); 3246 } 3247 3248 rxr->rxr_m_head = NULL; 3249 rxr->rxr_m_tail = &rxr->rxr_m_head; 3250 } 3251 3252 cons++; 3253 cons &= mask; 3254 3255 done = 1; 3256 } while (cons != prod); 3257 3258 if (done) { 3259 rxr->rxr_cons = cons; 3260 if (ixl_rxfill(sc, rxr) == -1) 3261 if_statinc_ref(nsr, if_iqdrops); 3262 } 3263 3264 IF_STAT_PUTREF(ifp); 3265 3266 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem), 3267 0, IXL_DMA_LEN(&rxr->rxr_mem), 3268 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 3269 3270 return more; 3271 } 3272 3273 static int 3274 ixl_rxfill(struct ixl_softc *sc, struct ixl_rx_ring *rxr) 3275 { 3276 struct ixl_rx_rd_desc_32 *ring, *rxd; 3277 struct ixl_rx_map *rxm; 3278 bus_dmamap_t map; 3279 struct mbuf *m; 3280 unsigned int prod; 3281 unsigned int slots; 3282 unsigned int mask; 3283 int post = 0, error = 0; 3284 3285 KASSERT(mutex_owned(&rxr->rxr_lock)); 3286 3287 prod = rxr->rxr_prod; 3288 slots = ixl_rxr_unrefreshed(rxr->rxr_prod, rxr->rxr_cons, 3289 sc->sc_rx_ring_ndescs); 3290 3291 ring = IXL_DMA_KVA(&rxr->rxr_mem); 3292 mask = sc->sc_rx_ring_ndescs - 1; 3293 3294 if (__predict_false(slots <= 0)) 3295 return -1; 3296 3297 do { 3298 rxm = &rxr->rxr_maps[prod]; 3299 3300 MGETHDR(m, M_DONTWAIT, MT_DATA); 3301 if (m == NULL) { 3302 rxr->rxr_mgethdr_failed.ev_count++; 3303 error = -1; 3304 break; 3305 } 3306 3307 MCLGET(m, M_DONTWAIT); 3308 if (!ISSET(m->m_flags, M_EXT)) { 3309 rxr->rxr_mgetcl_failed.ev_count++; 3310 error = -1; 3311 m_freem(m); 3312 break; 3313 } 3314 3315 m->m_len = m->m_pkthdr.len = MCLBYTES; 3316 m_adj(m, ETHER_ALIGN); 3317 3318 map = rxm->rxm_map; 3319 3320 if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 3321 BUS_DMA_READ | BUS_DMA_NOWAIT) != 0) { 3322 rxr->rxr_mbuf_load_failed.ev_count++; 3323 error = -1; 3324 m_freem(m); 3325 break; 3326 } 3327 3328 rxm->rxm_m = m; 3329 3330 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 3331 BUS_DMASYNC_PREREAD); 3332 3333 rxd = &ring[prod]; 3334 3335 rxd->paddr = htole64(map->dm_segs[0].ds_addr); 3336 rxd->haddr = htole64(0); 3337 3338 prod++; 3339 prod &= mask; 3340 3341 post = 1; 3342 3343 } while (--slots); 3344 3345 if (post) { 3346 rxr->rxr_prod = prod; 3347 ixl_wr(sc, rxr->rxr_tail, prod); 3348 } 3349 3350 return error; 3351 } 3352 3353 static inline int 3354 ixl_handle_queue_common(struct ixl_softc *sc, struct ixl_queue_pair *qp, 3355 u_int txlimit, struct evcnt *txevcnt, 3356 u_int rxlimit, struct evcnt *rxevcnt) 3357 { 3358 struct ixl_tx_ring *txr = qp->qp_txr; 3359 struct ixl_rx_ring *rxr = qp->qp_rxr; 3360 int txmore, rxmore; 3361 int rv; 3362 3363 mutex_enter(&txr->txr_lock); 3364 txevcnt->ev_count++; 3365 txmore = ixl_txeof(sc, txr, txlimit); 3366 mutex_exit(&txr->txr_lock); 3367 3368 mutex_enter(&rxr->rxr_lock); 3369 rxevcnt->ev_count++; 3370 rxmore = ixl_rxeof(sc, rxr, rxlimit); 3371 mutex_exit(&rxr->rxr_lock); 3372 3373 rv = txmore | (rxmore << 1); 3374 3375 return rv; 3376 } 3377 3378 static void 3379 ixl_sched_handle_queue(struct ixl_softc *sc, struct ixl_queue_pair *qp) 3380 { 3381 3382 if (qp->qp_workqueue) 3383 workqueue_enqueue(sc->sc_workq_txrx, &qp->qp_work, NULL); 3384 else 3385 softint_schedule(qp->qp_si); 3386 } 3387 3388 static int 3389 ixl_intr(void *xsc) 3390 { 3391 struct ixl_softc *sc = xsc; 3392 struct ixl_tx_ring *txr; 3393 struct ixl_rx_ring *rxr; 3394 uint32_t icr, rxintr, txintr; 3395 int rv = 0; 3396 unsigned int i; 3397 3398 KASSERT(sc != NULL); 3399 3400 ixl_enable_other_intr(sc); 3401 icr = ixl_rd(sc, I40E_PFINT_ICR0); 3402 3403 if (ISSET(icr, I40E_PFINT_ICR0_ADMINQ_MASK)) { 3404 atomic_inc_64(&sc->sc_event_atq.ev_count); 3405 ixl_atq_done(sc); 3406 ixl_work_add(sc->sc_workq, &sc->sc_arq_task); 3407 rv = 1; 3408 } 3409 3410 if (ISSET(icr, I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK)) { 3411 atomic_inc_64(&sc->sc_event_link.ev_count); 3412 ixl_work_add(sc->sc_workq, &sc->sc_link_state_task); 3413 rv = 1; 3414 } 3415 3416 rxintr = icr & I40E_INTR_NOTX_RX_MASK; 3417 txintr = icr & I40E_INTR_NOTX_TX_MASK; 3418 3419 if (txintr || rxintr) { 3420 for (i = 0; i < sc->sc_nqueue_pairs; i++) { 3421 txr = sc->sc_qps[i].qp_txr; 3422 rxr = sc->sc_qps[i].qp_rxr; 3423 3424 ixl_handle_queue_common(sc, &sc->sc_qps[i], 3425 IXL_TXRX_PROCESS_UNLIMIT, &txr->txr_intr, 3426 IXL_TXRX_PROCESS_UNLIMIT, &rxr->rxr_intr); 3427 } 3428 rv = 1; 3429 } 3430 3431 return rv; 3432 } 3433 3434 static int 3435 ixl_queue_intr(void *xqp) 3436 { 3437 struct ixl_queue_pair *qp = xqp; 3438 struct ixl_tx_ring *txr = qp->qp_txr; 3439 struct ixl_rx_ring *rxr = qp->qp_rxr; 3440 struct ixl_softc *sc = qp->qp_sc; 3441 u_int txlimit, rxlimit; 3442 int more; 3443 3444 txlimit = sc->sc_tx_intr_process_limit; 3445 rxlimit = sc->sc_rx_intr_process_limit; 3446 qp->qp_workqueue = sc->sc_txrx_workqueue; 3447 3448 more = ixl_handle_queue_common(sc, qp, 3449 txlimit, &txr->txr_intr, rxlimit, &rxr->rxr_intr); 3450 3451 if (more != 0) { 3452 ixl_sched_handle_queue(sc, qp); 3453 } else { 3454 /* for ALTQ */ 3455 if (txr->txr_qid == 0) 3456 if_schedule_deferred_start(&sc->sc_ec.ec_if); 3457 softint_schedule(txr->txr_si); 3458 3459 ixl_enable_queue_intr(sc, qp); 3460 } 3461 3462 return 1; 3463 } 3464 3465 static void 3466 ixl_handle_queue_wk(struct work *wk, void *xsc) 3467 { 3468 struct ixl_queue_pair *qp; 3469 3470 qp = container_of(wk, struct ixl_queue_pair, qp_work); 3471 ixl_handle_queue(qp); 3472 } 3473 3474 static void 3475 ixl_handle_queue(void *xqp) 3476 { 3477 struct ixl_queue_pair *qp = xqp; 3478 struct ixl_softc *sc = qp->qp_sc; 3479 struct ixl_tx_ring *txr = qp->qp_txr; 3480 struct ixl_rx_ring *rxr = qp->qp_rxr; 3481 u_int txlimit, rxlimit; 3482 int more; 3483 3484 txlimit = sc->sc_tx_process_limit; 3485 rxlimit = sc->sc_rx_process_limit; 3486 3487 more = ixl_handle_queue_common(sc, qp, 3488 txlimit, &txr->txr_defer, rxlimit, &rxr->rxr_defer); 3489 3490 if (more != 0) 3491 ixl_sched_handle_queue(sc, qp); 3492 else 3493 ixl_enable_queue_intr(sc, qp); 3494 } 3495 3496 static inline void 3497 ixl_print_hmc_error(struct ixl_softc *sc, uint32_t reg) 3498 { 3499 uint32_t hmc_idx, hmc_isvf; 3500 uint32_t hmc_errtype, hmc_objtype, hmc_data; 3501 3502 hmc_idx = reg & I40E_PFHMC_ERRORINFO_PMF_INDEX_MASK; 3503 hmc_idx = hmc_idx >> I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT; 3504 hmc_isvf = reg & I40E_PFHMC_ERRORINFO_PMF_ISVF_MASK; 3505 hmc_isvf = hmc_isvf >> I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT; 3506 hmc_errtype = reg & I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_MASK; 3507 hmc_errtype = hmc_errtype >> I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT; 3508 hmc_objtype = reg & I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_MASK; 3509 hmc_objtype = hmc_objtype >> I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT; 3510 hmc_data = ixl_rd(sc, I40E_PFHMC_ERRORDATA); 3511 3512 device_printf(sc->sc_dev, 3513 "HMC Error (idx=0x%x, isvf=0x%x, err=0x%x, obj=0x%x, data=0x%x)\n", 3514 hmc_idx, hmc_isvf, hmc_errtype, hmc_objtype, hmc_data); 3515 } 3516 3517 static int 3518 ixl_other_intr(void *xsc) 3519 { 3520 struct ixl_softc *sc = xsc; 3521 uint32_t icr, mask, reg; 3522 int rv; 3523 3524 icr = ixl_rd(sc, I40E_PFINT_ICR0); 3525 mask = ixl_rd(sc, I40E_PFINT_ICR0_ENA); 3526 3527 if (ISSET(icr, I40E_PFINT_ICR0_ADMINQ_MASK)) { 3528 atomic_inc_64(&sc->sc_event_atq.ev_count); 3529 ixl_atq_done(sc); 3530 ixl_work_add(sc->sc_workq, &sc->sc_arq_task); 3531 rv = 1; 3532 } 3533 3534 if (ISSET(icr, I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK)) { 3535 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG)) 3536 device_printf(sc->sc_dev, "link stat changed\n"); 3537 3538 atomic_inc_64(&sc->sc_event_link.ev_count); 3539 ixl_work_add(sc->sc_workq, &sc->sc_link_state_task); 3540 rv = 1; 3541 } 3542 3543 if (ISSET(icr, I40E_PFINT_ICR0_GRST_MASK)) { 3544 CLR(mask, I40E_PFINT_ICR0_ENA_GRST_MASK); 3545 reg = ixl_rd(sc, I40E_GLGEN_RSTAT); 3546 reg = reg & I40E_GLGEN_RSTAT_RESET_TYPE_MASK; 3547 reg = reg >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT; 3548 3549 device_printf(sc->sc_dev, "GRST: %s\n", 3550 reg == I40E_RESET_CORER ? "CORER" : 3551 reg == I40E_RESET_GLOBR ? "GLOBR" : 3552 reg == I40E_RESET_EMPR ? "EMPR" : 3553 "POR"); 3554 } 3555 3556 if (ISSET(icr, I40E_PFINT_ICR0_ECC_ERR_MASK)) 3557 atomic_inc_64(&sc->sc_event_ecc_err.ev_count); 3558 if (ISSET(icr, I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)) 3559 atomic_inc_64(&sc->sc_event_pci_exception.ev_count); 3560 if (ISSET(icr, I40E_PFINT_ICR0_PE_CRITERR_MASK)) 3561 atomic_inc_64(&sc->sc_event_crit_err.ev_count); 3562 3563 if (ISSET(icr, IXL_ICR0_CRIT_ERR_MASK)) { 3564 CLR(mask, IXL_ICR0_CRIT_ERR_MASK); 3565 device_printf(sc->sc_dev, "critical error\n"); 3566 } 3567 3568 if (ISSET(icr, I40E_PFINT_ICR0_HMC_ERR_MASK)) { 3569 reg = ixl_rd(sc, I40E_PFHMC_ERRORINFO); 3570 if (ISSET(reg, I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK)) 3571 ixl_print_hmc_error(sc, reg); 3572 ixl_wr(sc, I40E_PFHMC_ERRORINFO, 0); 3573 } 3574 3575 ixl_wr(sc, I40E_PFINT_ICR0_ENA, mask); 3576 ixl_flush(sc); 3577 ixl_enable_other_intr(sc); 3578 return rv; 3579 } 3580 3581 static void 3582 ixl_get_link_status_done(struct ixl_softc *sc, 3583 const struct ixl_aq_desc *iaq) 3584 { 3585 struct ixl_aq_desc iaq_buf; 3586 3587 memcpy(&iaq_buf, iaq, sizeof(iaq_buf)); 3588 3589 /* 3590 * The lock can be released here 3591 * because there is no post processing about ATQ 3592 */ 3593 mutex_exit(&sc->sc_atq_lock); 3594 ixl_link_state_update(sc, &iaq_buf); 3595 mutex_enter(&sc->sc_atq_lock); 3596 } 3597 3598 static void 3599 ixl_get_link_status(void *xsc) 3600 { 3601 struct ixl_softc *sc = xsc; 3602 struct ixl_aq_desc *iaq; 3603 struct ixl_aq_link_param *param; 3604 int error; 3605 3606 mutex_enter(&sc->sc_atq_lock); 3607 3608 iaq = &sc->sc_link_state_atq.iatq_desc; 3609 memset(iaq, 0, sizeof(*iaq)); 3610 iaq->iaq_opcode = htole16(IXL_AQ_OP_PHY_LINK_STATUS); 3611 param = (struct ixl_aq_link_param *)iaq->iaq_param; 3612 param->notify = IXL_AQ_LINK_NOTIFY; 3613 3614 error = ixl_atq_exec_locked(sc, &sc->sc_link_state_atq); 3615 ixl_atq_set(&sc->sc_link_state_atq, ixl_get_link_status_done); 3616 3617 if (error == 0) { 3618 ixl_get_link_status_done(sc, iaq); 3619 } 3620 3621 mutex_exit(&sc->sc_atq_lock); 3622 } 3623 3624 static void 3625 ixl_link_state_update(struct ixl_softc *sc, const struct ixl_aq_desc *iaq) 3626 { 3627 struct ifnet *ifp = &sc->sc_ec.ec_if; 3628 int link_state; 3629 3630 mutex_enter(&sc->sc_cfg_lock); 3631 link_state = ixl_set_link_status_locked(sc, iaq); 3632 mutex_exit(&sc->sc_cfg_lock); 3633 3634 if (ifp->if_link_state != link_state) 3635 if_link_state_change(ifp, link_state); 3636 3637 if (link_state != LINK_STATE_DOWN) { 3638 kpreempt_disable(); 3639 if_schedule_deferred_start(ifp); 3640 kpreempt_enable(); 3641 } 3642 } 3643 3644 static void 3645 ixl_aq_dump(const struct ixl_softc *sc, const struct ixl_aq_desc *iaq, 3646 const char *msg) 3647 { 3648 char buf[512]; 3649 size_t len; 3650 3651 len = sizeof(buf); 3652 buf[--len] = '\0'; 3653 3654 device_printf(sc->sc_dev, "%s\n", msg); 3655 snprintb(buf, len, IXL_AQ_FLAGS_FMT, le16toh(iaq->iaq_flags)); 3656 device_printf(sc->sc_dev, "flags %s opcode %04x\n", 3657 buf, le16toh(iaq->iaq_opcode)); 3658 device_printf(sc->sc_dev, "datalen %u retval %u\n", 3659 le16toh(iaq->iaq_datalen), le16toh(iaq->iaq_retval)); 3660 device_printf(sc->sc_dev, "cookie %016" PRIx64 "\n", iaq->iaq_cookie); 3661 device_printf(sc->sc_dev, "%08x %08x %08x %08x\n", 3662 le32toh(iaq->iaq_param[0]), le32toh(iaq->iaq_param[1]), 3663 le32toh(iaq->iaq_param[2]), le32toh(iaq->iaq_param[3])); 3664 } 3665 3666 static void 3667 ixl_arq(void *xsc) 3668 { 3669 struct ixl_softc *sc = xsc; 3670 struct ixl_aq_desc *arq, *iaq; 3671 struct ixl_aq_buf *aqb; 3672 unsigned int cons = sc->sc_arq_cons; 3673 unsigned int prod; 3674 int done = 0; 3675 3676 prod = ixl_rd(sc, sc->sc_aq_regs->arq_head) & 3677 sc->sc_aq_regs->arq_head_mask; 3678 3679 if (cons == prod) 3680 goto done; 3681 3682 arq = IXL_DMA_KVA(&sc->sc_arq); 3683 3684 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq), 3685 0, IXL_DMA_LEN(&sc->sc_arq), 3686 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 3687 3688 do { 3689 iaq = &arq[cons]; 3690 aqb = sc->sc_arq_live[cons]; 3691 3692 KASSERT(aqb != NULL); 3693 3694 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, IXL_AQ_BUFLEN, 3695 BUS_DMASYNC_POSTREAD); 3696 3697 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG)) 3698 ixl_aq_dump(sc, iaq, "arq event"); 3699 3700 switch (iaq->iaq_opcode) { 3701 case htole16(IXL_AQ_OP_PHY_LINK_STATUS): 3702 ixl_link_state_update(sc, iaq); 3703 break; 3704 } 3705 3706 memset(iaq, 0, sizeof(*iaq)); 3707 sc->sc_arq_live[cons] = NULL; 3708 SIMPLEQ_INSERT_TAIL(&sc->sc_arq_idle, aqb, aqb_entry); 3709 3710 cons++; 3711 cons &= IXL_AQ_MASK; 3712 3713 done = 1; 3714 } while (cons != prod); 3715 3716 if (done) { 3717 sc->sc_arq_cons = cons; 3718 ixl_arq_fill(sc); 3719 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq), 3720 0, IXL_DMA_LEN(&sc->sc_arq), 3721 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 3722 } 3723 3724 done: 3725 ixl_enable_other_intr(sc); 3726 } 3727 3728 static void 3729 ixl_atq_set(struct ixl_atq *iatq, 3730 void (*fn)(struct ixl_softc *, const struct ixl_aq_desc *)) 3731 { 3732 3733 iatq->iatq_fn = fn; 3734 } 3735 3736 static int 3737 ixl_atq_post_locked(struct ixl_softc *sc, struct ixl_atq *iatq) 3738 { 3739 struct ixl_aq_desc *atq, *slot; 3740 unsigned int prod, cons, prod_next; 3741 3742 /* assert locked */ 3743 KASSERT(mutex_owned(&sc->sc_atq_lock)); 3744 3745 atq = IXL_DMA_KVA(&sc->sc_atq); 3746 prod = sc->sc_atq_prod; 3747 cons = sc->sc_atq_cons; 3748 prod_next = (prod +1) & IXL_AQ_MASK; 3749 3750 if (cons == prod_next) 3751 return ENOMEM; 3752 3753 slot = &atq[prod]; 3754 3755 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 3756 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTWRITE); 3757 3758 KASSERT(iatq->iatq_fn != NULL); 3759 *slot = iatq->iatq_desc; 3760 slot->iaq_cookie = (uint64_t)((intptr_t)iatq); 3761 3762 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG)) 3763 ixl_aq_dump(sc, slot, "atq command"); 3764 3765 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 3766 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREWRITE); 3767 3768 sc->sc_atq_prod = prod_next; 3769 ixl_wr(sc, sc->sc_aq_regs->atq_tail, sc->sc_atq_prod); 3770 3771 return 0; 3772 } 3773 3774 static void 3775 ixl_atq_done_locked(struct ixl_softc *sc) 3776 { 3777 struct ixl_aq_desc *atq, *slot; 3778 struct ixl_atq *iatq; 3779 unsigned int cons; 3780 unsigned int prod; 3781 3782 KASSERT(mutex_owned(&sc->sc_atq_lock)); 3783 3784 prod = sc->sc_atq_prod; 3785 cons = sc->sc_atq_cons; 3786 3787 if (prod == cons) 3788 return; 3789 3790 atq = IXL_DMA_KVA(&sc->sc_atq); 3791 3792 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 3793 0, IXL_DMA_LEN(&sc->sc_atq), 3794 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 3795 3796 do { 3797 slot = &atq[cons]; 3798 if (!ISSET(slot->iaq_flags, htole16(IXL_AQ_DD))) 3799 break; 3800 3801 iatq = (struct ixl_atq *)((intptr_t)slot->iaq_cookie); 3802 iatq->iatq_desc = *slot; 3803 3804 memset(slot, 0, sizeof(*slot)); 3805 3806 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG)) 3807 ixl_aq_dump(sc, &iatq->iatq_desc, "atq response"); 3808 3809 (*iatq->iatq_fn)(sc, &iatq->iatq_desc); 3810 3811 cons++; 3812 cons &= IXL_AQ_MASK; 3813 } while (cons != prod); 3814 3815 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 3816 0, IXL_DMA_LEN(&sc->sc_atq), 3817 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 3818 3819 sc->sc_atq_cons = cons; 3820 } 3821 3822 static void 3823 ixl_atq_done(struct ixl_softc *sc) 3824 { 3825 3826 mutex_enter(&sc->sc_atq_lock); 3827 ixl_atq_done_locked(sc); 3828 mutex_exit(&sc->sc_atq_lock); 3829 } 3830 3831 static void 3832 ixl_wakeup(struct ixl_softc *sc, const struct ixl_aq_desc *iaq) 3833 { 3834 3835 KASSERT(mutex_owned(&sc->sc_atq_lock)); 3836 3837 cv_signal(&sc->sc_atq_cv); 3838 } 3839 3840 static int 3841 ixl_atq_exec(struct ixl_softc *sc, struct ixl_atq *iatq) 3842 { 3843 int error; 3844 3845 mutex_enter(&sc->sc_atq_lock); 3846 error = ixl_atq_exec_locked(sc, iatq); 3847 mutex_exit(&sc->sc_atq_lock); 3848 3849 return error; 3850 } 3851 3852 static int 3853 ixl_atq_exec_locked(struct ixl_softc *sc, struct ixl_atq *iatq) 3854 { 3855 int error; 3856 3857 KASSERT(mutex_owned(&sc->sc_atq_lock)); 3858 KASSERT(iatq->iatq_desc.iaq_cookie == 0); 3859 3860 ixl_atq_set(iatq, ixl_wakeup); 3861 3862 error = ixl_atq_post_locked(sc, iatq); 3863 if (error) 3864 return error; 3865 3866 error = cv_timedwait(&sc->sc_atq_cv, &sc->sc_atq_lock, 3867 IXL_ATQ_EXEC_TIMEOUT); 3868 3869 return error; 3870 } 3871 3872 static int 3873 ixl_atq_poll(struct ixl_softc *sc, struct ixl_aq_desc *iaq, unsigned int tm) 3874 { 3875 struct ixl_aq_desc *atq, *slot; 3876 unsigned int prod; 3877 unsigned int t = 0; 3878 3879 mutex_enter(&sc->sc_atq_lock); 3880 3881 atq = IXL_DMA_KVA(&sc->sc_atq); 3882 prod = sc->sc_atq_prod; 3883 slot = atq + prod; 3884 3885 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 3886 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTWRITE); 3887 3888 *slot = *iaq; 3889 slot->iaq_flags |= htole16(IXL_AQ_SI); 3890 3891 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 3892 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREWRITE); 3893 3894 prod++; 3895 prod &= IXL_AQ_MASK; 3896 sc->sc_atq_prod = prod; 3897 ixl_wr(sc, sc->sc_aq_regs->atq_tail, prod); 3898 3899 while (ixl_rd(sc, sc->sc_aq_regs->atq_head) != prod) { 3900 delaymsec(1); 3901 3902 if (t++ > tm) { 3903 mutex_exit(&sc->sc_atq_lock); 3904 return ETIMEDOUT; 3905 } 3906 } 3907 3908 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 3909 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTREAD); 3910 *iaq = *slot; 3911 memset(slot, 0, sizeof(*slot)); 3912 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 3913 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREREAD); 3914 3915 sc->sc_atq_cons = prod; 3916 3917 mutex_exit(&sc->sc_atq_lock); 3918 3919 return 0; 3920 } 3921 3922 static int 3923 ixl_get_version(struct ixl_softc *sc) 3924 { 3925 struct ixl_aq_desc iaq; 3926 uint32_t fwbuild, fwver, apiver; 3927 uint16_t api_maj_ver, api_min_ver; 3928 3929 memset(&iaq, 0, sizeof(iaq)); 3930 iaq.iaq_opcode = htole16(IXL_AQ_OP_GET_VERSION); 3931 3932 iaq.iaq_retval = le16toh(23); 3933 3934 if (ixl_atq_poll(sc, &iaq, 2000) != 0) 3935 return ETIMEDOUT; 3936 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) 3937 return EIO; 3938 3939 fwbuild = le32toh(iaq.iaq_param[1]); 3940 fwver = le32toh(iaq.iaq_param[2]); 3941 apiver = le32toh(iaq.iaq_param[3]); 3942 3943 api_maj_ver = (uint16_t)apiver; 3944 api_min_ver = (uint16_t)(apiver >> 16); 3945 3946 aprint_normal(", FW %hu.%hu.%05u API %hu.%hu", (uint16_t)fwver, 3947 (uint16_t)(fwver >> 16), fwbuild, api_maj_ver, api_min_ver); 3948 3949 if (sc->sc_mac_type == I40E_MAC_X722) { 3950 SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMLOCK | 3951 IXL_SC_AQ_FLAG_NVMREAD); 3952 SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RXCTL); 3953 SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RSS); 3954 } 3955 3956 #define IXL_API_VER(maj, min) (((uint32_t)(maj) << 16) | (min)) 3957 if (IXL_API_VER(api_maj_ver, api_min_ver) >= IXL_API_VER(1, 5)) { 3958 SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RXCTL); 3959 SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMLOCK); 3960 } 3961 #undef IXL_API_VER 3962 3963 return 0; 3964 } 3965 3966 static int 3967 ixl_get_nvm_version(struct ixl_softc *sc) 3968 { 3969 uint16_t nvmver, cfg_ptr, eetrack_hi, eetrack_lo, oem_hi, oem_lo; 3970 uint32_t eetrack, oem; 3971 uint16_t nvm_maj_ver, nvm_min_ver, oem_build; 3972 uint8_t oem_ver, oem_patch; 3973 3974 nvmver = cfg_ptr = eetrack_hi = eetrack_lo = oem_hi = oem_lo = 0; 3975 ixl_rd16_nvm(sc, I40E_SR_NVM_DEV_STARTER_VERSION, &nvmver); 3976 ixl_rd16_nvm(sc, I40E_SR_NVM_EETRACK_HI, &eetrack_hi); 3977 ixl_rd16_nvm(sc, I40E_SR_NVM_EETRACK_LO, &eetrack_lo); 3978 ixl_rd16_nvm(sc, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr); 3979 ixl_rd16_nvm(sc, cfg_ptr + I40E_NVM_OEM_VER_OFF, &oem_hi); 3980 ixl_rd16_nvm(sc, cfg_ptr + I40E_NVM_OEM_VER_OFF + 1, &oem_lo); 3981 3982 nvm_maj_ver = (uint16_t)__SHIFTOUT(nvmver, IXL_NVM_VERSION_HI_MASK); 3983 nvm_min_ver = (uint16_t)__SHIFTOUT(nvmver, IXL_NVM_VERSION_LO_MASK); 3984 eetrack = ((uint32_t)eetrack_hi << 16) | eetrack_lo; 3985 oem = ((uint32_t)oem_hi << 16) | oem_lo; 3986 oem_ver = __SHIFTOUT(oem, IXL_NVM_OEMVERSION_MASK); 3987 oem_build = __SHIFTOUT(oem, IXL_NVM_OEMBUILD_MASK); 3988 oem_patch = __SHIFTOUT(oem, IXL_NVM_OEMPATCH_MASK); 3989 3990 aprint_normal(" nvm %x.%02x etid %08x oem %d.%d.%d", 3991 nvm_maj_ver, nvm_min_ver, eetrack, 3992 oem_ver, oem_build, oem_patch); 3993 3994 return 0; 3995 } 3996 3997 static int 3998 ixl_pxe_clear(struct ixl_softc *sc) 3999 { 4000 struct ixl_aq_desc iaq; 4001 int rv; 4002 4003 memset(&iaq, 0, sizeof(iaq)); 4004 iaq.iaq_opcode = htole16(IXL_AQ_OP_CLEAR_PXE_MODE); 4005 iaq.iaq_param[0] = htole32(0x2); 4006 4007 rv = ixl_atq_poll(sc, &iaq, 250); 4008 4009 ixl_wr(sc, I40E_GLLAN_RCTL_0, 0x1); 4010 4011 if (rv != 0) 4012 return ETIMEDOUT; 4013 4014 switch (iaq.iaq_retval) { 4015 case htole16(IXL_AQ_RC_OK): 4016 case htole16(IXL_AQ_RC_EEXIST): 4017 break; 4018 default: 4019 return EIO; 4020 } 4021 4022 return 0; 4023 } 4024 4025 static int 4026 ixl_lldp_shut(struct ixl_softc *sc) 4027 { 4028 struct ixl_aq_desc iaq; 4029 4030 memset(&iaq, 0, sizeof(iaq)); 4031 iaq.iaq_opcode = htole16(IXL_AQ_OP_LLDP_STOP_AGENT); 4032 iaq.iaq_param[0] = htole32(IXL_LLDP_SHUTDOWN); 4033 4034 if (ixl_atq_poll(sc, &iaq, 250) != 0) { 4035 aprint_error_dev(sc->sc_dev, "STOP LLDP AGENT timeout\n"); 4036 return -1; 4037 } 4038 4039 switch (iaq.iaq_retval) { 4040 case htole16(IXL_AQ_RC_EMODE): 4041 case htole16(IXL_AQ_RC_EPERM): 4042 /* ignore silently */ 4043 default: 4044 break; 4045 } 4046 4047 return 0; 4048 } 4049 4050 static void 4051 ixl_parse_hw_capability(struct ixl_softc *sc, struct ixl_aq_capability *cap) 4052 { 4053 uint16_t id; 4054 uint32_t number, logical_id; 4055 4056 id = le16toh(cap->cap_id); 4057 number = le32toh(cap->number); 4058 logical_id = le32toh(cap->logical_id); 4059 4060 switch (id) { 4061 case IXL_AQ_CAP_RSS: 4062 sc->sc_rss_table_size = number; 4063 sc->sc_rss_table_entry_width = logical_id; 4064 break; 4065 case IXL_AQ_CAP_RXQ: 4066 case IXL_AQ_CAP_TXQ: 4067 sc->sc_nqueue_pairs_device = MIN(number, 4068 sc->sc_nqueue_pairs_device); 4069 break; 4070 } 4071 } 4072 4073 static int 4074 ixl_get_hw_capabilities(struct ixl_softc *sc) 4075 { 4076 struct ixl_dmamem idm; 4077 struct ixl_aq_desc iaq; 4078 struct ixl_aq_capability *caps; 4079 size_t i, ncaps; 4080 bus_size_t caps_size; 4081 uint16_t status; 4082 int rv; 4083 4084 caps_size = sizeof(caps[0]) * 40; 4085 memset(&iaq, 0, sizeof(iaq)); 4086 iaq.iaq_opcode = htole16(IXL_AQ_OP_LIST_FUNC_CAP); 4087 4088 do { 4089 if (ixl_dmamem_alloc(sc, &idm, caps_size, 0) != 0) { 4090 return -1; 4091 } 4092 4093 iaq.iaq_flags = htole16(IXL_AQ_BUF | 4094 (caps_size > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0)); 4095 iaq.iaq_datalen = htole16(caps_size); 4096 ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm)); 4097 4098 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, 4099 IXL_DMA_LEN(&idm), BUS_DMASYNC_PREREAD); 4100 4101 rv = ixl_atq_poll(sc, &iaq, 250); 4102 4103 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, 4104 IXL_DMA_LEN(&idm), BUS_DMASYNC_POSTREAD); 4105 4106 if (rv != 0) { 4107 aprint_error(", HW capabilities timeout\n"); 4108 goto done; 4109 } 4110 4111 status = le16toh(iaq.iaq_retval); 4112 4113 if (status == IXL_AQ_RC_ENOMEM) { 4114 caps_size = le16toh(iaq.iaq_datalen); 4115 ixl_dmamem_free(sc, &idm); 4116 } 4117 } while (status == IXL_AQ_RC_ENOMEM); 4118 4119 if (status != IXL_AQ_RC_OK) { 4120 aprint_error(", HW capabilities error\n"); 4121 goto done; 4122 } 4123 4124 caps = IXL_DMA_KVA(&idm); 4125 ncaps = le16toh(iaq.iaq_param[1]); 4126 4127 for (i = 0; i < ncaps; i++) { 4128 ixl_parse_hw_capability(sc, &caps[i]); 4129 } 4130 4131 done: 4132 ixl_dmamem_free(sc, &idm); 4133 return rv; 4134 } 4135 4136 static int 4137 ixl_get_mac(struct ixl_softc *sc) 4138 { 4139 struct ixl_dmamem idm; 4140 struct ixl_aq_desc iaq; 4141 struct ixl_aq_mac_addresses *addrs; 4142 int rv; 4143 4144 if (ixl_dmamem_alloc(sc, &idm, sizeof(*addrs), 0) != 0) { 4145 aprint_error(", unable to allocate mac addresses\n"); 4146 return -1; 4147 } 4148 4149 memset(&iaq, 0, sizeof(iaq)); 4150 iaq.iaq_flags = htole16(IXL_AQ_BUF); 4151 iaq.iaq_opcode = htole16(IXL_AQ_OP_MAC_ADDRESS_READ); 4152 iaq.iaq_datalen = htole16(sizeof(*addrs)); 4153 ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm)); 4154 4155 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm), 4156 BUS_DMASYNC_PREREAD); 4157 4158 rv = ixl_atq_poll(sc, &iaq, 250); 4159 4160 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm), 4161 BUS_DMASYNC_POSTREAD); 4162 4163 if (rv != 0) { 4164 aprint_error(", MAC ADDRESS READ timeout\n"); 4165 rv = -1; 4166 goto done; 4167 } 4168 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) { 4169 aprint_error(", MAC ADDRESS READ error\n"); 4170 rv = -1; 4171 goto done; 4172 } 4173 4174 addrs = IXL_DMA_KVA(&idm); 4175 if (!ISSET(iaq.iaq_param[0], htole32(IXL_AQ_MAC_PORT_VALID))) { 4176 printf(", port address is not valid\n"); 4177 goto done; 4178 } 4179 4180 memcpy(sc->sc_enaddr, addrs->port, ETHER_ADDR_LEN); 4181 rv = 0; 4182 4183 done: 4184 ixl_dmamem_free(sc, &idm); 4185 return rv; 4186 } 4187 4188 static int 4189 ixl_get_switch_config(struct ixl_softc *sc) 4190 { 4191 struct ixl_dmamem idm; 4192 struct ixl_aq_desc iaq; 4193 struct ixl_aq_switch_config *hdr; 4194 struct ixl_aq_switch_config_element *elms, *elm; 4195 unsigned int nelm, i; 4196 int rv; 4197 4198 if (ixl_dmamem_alloc(sc, &idm, IXL_AQ_BUFLEN, 0) != 0) { 4199 aprint_error_dev(sc->sc_dev, 4200 "unable to allocate switch config buffer\n"); 4201 return -1; 4202 } 4203 4204 memset(&iaq, 0, sizeof(iaq)); 4205 iaq.iaq_flags = htole16(IXL_AQ_BUF | 4206 (IXL_AQ_BUFLEN > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0)); 4207 iaq.iaq_opcode = htole16(IXL_AQ_OP_SWITCH_GET_CONFIG); 4208 iaq.iaq_datalen = htole16(IXL_AQ_BUFLEN); 4209 ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm)); 4210 4211 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm), 4212 BUS_DMASYNC_PREREAD); 4213 4214 rv = ixl_atq_poll(sc, &iaq, 250); 4215 4216 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm), 4217 BUS_DMASYNC_POSTREAD); 4218 4219 if (rv != 0) { 4220 aprint_error_dev(sc->sc_dev, "GET SWITCH CONFIG timeout\n"); 4221 rv = -1; 4222 goto done; 4223 } 4224 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) { 4225 aprint_error_dev(sc->sc_dev, "GET SWITCH CONFIG error\n"); 4226 rv = -1; 4227 goto done; 4228 } 4229 4230 hdr = IXL_DMA_KVA(&idm); 4231 elms = (struct ixl_aq_switch_config_element *)(hdr + 1); 4232 4233 nelm = le16toh(hdr->num_reported); 4234 if (nelm < 1) { 4235 aprint_error_dev(sc->sc_dev, "no switch config available\n"); 4236 rv = -1; 4237 goto done; 4238 } 4239 4240 for (i = 0; i < nelm; i++) { 4241 elm = &elms[i]; 4242 4243 aprint_debug_dev(sc->sc_dev, 4244 "type %x revision %u seid %04x\n", 4245 elm->type, elm->revision, le16toh(elm->seid)); 4246 aprint_debug_dev(sc->sc_dev, 4247 "uplink %04x downlink %04x\n", 4248 le16toh(elm->uplink_seid), 4249 le16toh(elm->downlink_seid)); 4250 aprint_debug_dev(sc->sc_dev, 4251 "conntype %x scheduler %04x extra %04x\n", 4252 elm->connection_type, 4253 le16toh(elm->scheduler_id), 4254 le16toh(elm->element_info)); 4255 } 4256 4257 elm = &elms[0]; 4258 4259 sc->sc_uplink_seid = elm->uplink_seid; 4260 sc->sc_downlink_seid = elm->downlink_seid; 4261 sc->sc_seid = elm->seid; 4262 4263 if ((sc->sc_uplink_seid == htole16(0)) != 4264 (sc->sc_downlink_seid == htole16(0))) { 4265 aprint_error_dev(sc->sc_dev, "SEIDs are misconfigured\n"); 4266 rv = -1; 4267 goto done; 4268 } 4269 4270 done: 4271 ixl_dmamem_free(sc, &idm); 4272 return rv; 4273 } 4274 4275 static int 4276 ixl_phy_mask_ints(struct ixl_softc *sc) 4277 { 4278 struct ixl_aq_desc iaq; 4279 4280 memset(&iaq, 0, sizeof(iaq)); 4281 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_SET_EVENT_MASK); 4282 iaq.iaq_param[2] = htole32(IXL_AQ_PHY_EV_MASK & 4283 ~(IXL_AQ_PHY_EV_LINK_UPDOWN | IXL_AQ_PHY_EV_MODULE_QUAL_FAIL | 4284 IXL_AQ_PHY_EV_MEDIA_NA)); 4285 4286 if (ixl_atq_poll(sc, &iaq, 250) != 0) { 4287 aprint_error_dev(sc->sc_dev, "SET PHY EVENT MASK timeout\n"); 4288 return -1; 4289 } 4290 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) { 4291 aprint_error_dev(sc->sc_dev, "SET PHY EVENT MASK error\n"); 4292 return -1; 4293 } 4294 4295 return 0; 4296 } 4297 4298 static int 4299 ixl_get_phy_abilities(struct ixl_softc *sc, struct ixl_dmamem *idm) 4300 { 4301 struct ixl_aq_desc iaq; 4302 int rv; 4303 4304 memset(&iaq, 0, sizeof(iaq)); 4305 iaq.iaq_flags = htole16(IXL_AQ_BUF | 4306 (IXL_DMA_LEN(idm) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0)); 4307 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_GET_ABILITIES); 4308 iaq.iaq_datalen = htole16(IXL_DMA_LEN(idm)); 4309 iaq.iaq_param[0] = htole32(IXL_AQ_PHY_REPORT_INIT); 4310 ixl_aq_dva(&iaq, IXL_DMA_DVA(idm)); 4311 4312 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm), 4313 BUS_DMASYNC_PREREAD); 4314 4315 rv = ixl_atq_poll(sc, &iaq, 250); 4316 4317 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm), 4318 BUS_DMASYNC_POSTREAD); 4319 4320 if (rv != 0) 4321 return -1; 4322 4323 return le16toh(iaq.iaq_retval); 4324 } 4325 4326 static int 4327 ixl_get_phy_info(struct ixl_softc *sc) 4328 { 4329 struct ixl_dmamem idm; 4330 struct ixl_aq_phy_abilities *phy; 4331 int rv; 4332 4333 if (ixl_dmamem_alloc(sc, &idm, IXL_AQ_BUFLEN, 0) != 0) { 4334 aprint_error_dev(sc->sc_dev, 4335 "unable to allocate phy abilities buffer\n"); 4336 return -1; 4337 } 4338 4339 rv = ixl_get_phy_abilities(sc, &idm); 4340 switch (rv) { 4341 case -1: 4342 aprint_error_dev(sc->sc_dev, "GET PHY ABILITIES timeout\n"); 4343 goto done; 4344 case IXL_AQ_RC_OK: 4345 break; 4346 case IXL_AQ_RC_EIO: 4347 aprint_error_dev(sc->sc_dev,"unable to query phy types\n"); 4348 goto done; 4349 default: 4350 aprint_error_dev(sc->sc_dev, 4351 "GET PHY ABILITIIES error %u\n", rv); 4352 goto done; 4353 } 4354 4355 phy = IXL_DMA_KVA(&idm); 4356 4357 sc->sc_phy_types = le32toh(phy->phy_type); 4358 sc->sc_phy_types |= (uint64_t)le32toh(phy->phy_type_ext) << 32; 4359 4360 sc->sc_phy_abilities = phy->abilities; 4361 sc->sc_phy_linkspeed = phy->link_speed; 4362 sc->sc_phy_fec_cfg = phy->fec_cfg_curr_mod_ext_info & 4363 (IXL_AQ_ENABLE_FEC_KR | IXL_AQ_ENABLE_FEC_RS | 4364 IXL_AQ_REQUEST_FEC_KR | IXL_AQ_REQUEST_FEC_RS); 4365 sc->sc_eee_cap = phy->eee_capability; 4366 sc->sc_eeer_val = phy->eeer_val; 4367 sc->sc_d3_lpan = phy->d3_lpan; 4368 4369 rv = 0; 4370 4371 done: 4372 ixl_dmamem_free(sc, &idm); 4373 return rv; 4374 } 4375 4376 static int 4377 ixl_set_phy_config(struct ixl_softc *sc, 4378 uint8_t link_speed, uint8_t abilities, bool polling) 4379 { 4380 struct ixl_aq_phy_param *param; 4381 struct ixl_atq iatq; 4382 struct ixl_aq_desc *iaq; 4383 int error; 4384 4385 memset(&iatq, 0, sizeof(iatq)); 4386 4387 iaq = &iatq.iatq_desc; 4388 iaq->iaq_opcode = htole16(IXL_AQ_OP_PHY_SET_CONFIG); 4389 param = (struct ixl_aq_phy_param *)&iaq->iaq_param; 4390 param->phy_types = htole32((uint32_t)sc->sc_phy_types); 4391 param->phy_type_ext = (uint8_t)(sc->sc_phy_types >> 32); 4392 param->link_speed = link_speed; 4393 param->abilities = abilities | IXL_AQ_PHY_ABILITY_AUTO_LINK; 4394 param->fec_cfg = sc->sc_phy_fec_cfg; 4395 param->eee_capability = sc->sc_eee_cap; 4396 param->eeer_val = sc->sc_eeer_val; 4397 param->d3_lpan = sc->sc_d3_lpan; 4398 4399 if (polling) 4400 error = ixl_atq_poll(sc, iaq, 250); 4401 else 4402 error = ixl_atq_exec(sc, &iatq); 4403 4404 if (error != 0) 4405 return error; 4406 4407 switch (le16toh(iaq->iaq_retval)) { 4408 case IXL_AQ_RC_OK: 4409 break; 4410 case IXL_AQ_RC_EPERM: 4411 return EPERM; 4412 default: 4413 return EIO; 4414 } 4415 4416 return 0; 4417 } 4418 4419 static int 4420 ixl_set_phy_autoselect(struct ixl_softc *sc) 4421 { 4422 uint8_t link_speed, abilities; 4423 4424 link_speed = sc->sc_phy_linkspeed; 4425 abilities = IXL_PHY_ABILITY_LINKUP | IXL_PHY_ABILITY_AUTONEGO; 4426 4427 return ixl_set_phy_config(sc, link_speed, abilities, true); 4428 } 4429 4430 static int 4431 ixl_get_link_status_poll(struct ixl_softc *sc, int *l) 4432 { 4433 struct ixl_aq_desc iaq; 4434 struct ixl_aq_link_param *param; 4435 int link; 4436 4437 memset(&iaq, 0, sizeof(iaq)); 4438 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_LINK_STATUS); 4439 param = (struct ixl_aq_link_param *)iaq.iaq_param; 4440 param->notify = IXL_AQ_LINK_NOTIFY; 4441 4442 if (ixl_atq_poll(sc, &iaq, 250) != 0) { 4443 return ETIMEDOUT; 4444 } 4445 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) { 4446 return EIO; 4447 } 4448 4449 /* It is unneccessary to hold lock */ 4450 link = ixl_set_link_status_locked(sc, &iaq); 4451 4452 if (l != NULL) 4453 *l = link; 4454 4455 return 0; 4456 } 4457 4458 static int 4459 ixl_get_vsi(struct ixl_softc *sc) 4460 { 4461 struct ixl_dmamem *vsi = &sc->sc_scratch; 4462 struct ixl_aq_desc iaq; 4463 struct ixl_aq_vsi_param *param; 4464 struct ixl_aq_vsi_reply *reply; 4465 struct ixl_aq_vsi_data *data; 4466 int rv; 4467 4468 /* grumble, vsi info isn't "known" at compile time */ 4469 4470 memset(&iaq, 0, sizeof(iaq)); 4471 iaq.iaq_flags = htole16(IXL_AQ_BUF | 4472 (IXL_DMA_LEN(vsi) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0)); 4473 iaq.iaq_opcode = htole16(IXL_AQ_OP_GET_VSI_PARAMS); 4474 iaq.iaq_datalen = htole16(IXL_DMA_LEN(vsi)); 4475 ixl_aq_dva(&iaq, IXL_DMA_DVA(vsi)); 4476 4477 param = (struct ixl_aq_vsi_param *)iaq.iaq_param; 4478 param->uplink_seid = sc->sc_seid; 4479 4480 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi), 4481 BUS_DMASYNC_PREREAD); 4482 4483 rv = ixl_atq_poll(sc, &iaq, 250); 4484 4485 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi), 4486 BUS_DMASYNC_POSTREAD); 4487 4488 if (rv != 0) { 4489 return ETIMEDOUT; 4490 } 4491 4492 switch (le16toh(iaq.iaq_retval)) { 4493 case IXL_AQ_RC_OK: 4494 break; 4495 case IXL_AQ_RC_ENOENT: 4496 return ENOENT; 4497 case IXL_AQ_RC_EACCES: 4498 return EACCES; 4499 default: 4500 return EIO; 4501 } 4502 4503 reply = (struct ixl_aq_vsi_reply *)iaq.iaq_param; 4504 sc->sc_vsi_number = le16toh(reply->vsi_number); 4505 data = IXL_DMA_KVA(vsi); 4506 sc->sc_vsi_stat_counter_idx = le16toh(data->stat_counter_idx); 4507 4508 return 0; 4509 } 4510 4511 static int 4512 ixl_set_vsi(struct ixl_softc *sc) 4513 { 4514 struct ixl_dmamem *vsi = &sc->sc_scratch; 4515 struct ixl_aq_desc iaq; 4516 struct ixl_aq_vsi_param *param; 4517 struct ixl_aq_vsi_data *data = IXL_DMA_KVA(vsi); 4518 unsigned int qnum; 4519 uint16_t val; 4520 int rv; 4521 4522 qnum = sc->sc_nqueue_pairs - 1; 4523 4524 data->valid_sections = htole16(IXL_AQ_VSI_VALID_QUEUE_MAP | 4525 IXL_AQ_VSI_VALID_VLAN); 4526 4527 CLR(data->mapping_flags, htole16(IXL_AQ_VSI_QUE_MAP_MASK)); 4528 SET(data->mapping_flags, htole16(IXL_AQ_VSI_QUE_MAP_CONTIG)); 4529 data->queue_mapping[0] = htole16(0); 4530 data->tc_mapping[0] = htole16((0 << IXL_AQ_VSI_TC_Q_OFFSET_SHIFT) | 4531 (qnum << IXL_AQ_VSI_TC_Q_NUMBER_SHIFT)); 4532 4533 val = le16toh(data->port_vlan_flags); 4534 CLR(val, IXL_AQ_VSI_PVLAN_MODE_MASK | IXL_AQ_VSI_PVLAN_EMOD_MASK); 4535 SET(val, IXL_AQ_VSI_PVLAN_MODE_ALL); 4536 4537 if (ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWTAGGING)) { 4538 SET(val, IXL_AQ_VSI_PVLAN_EMOD_STR_BOTH); 4539 } else { 4540 SET(val, IXL_AQ_VSI_PVLAN_EMOD_NOTHING); 4541 } 4542 4543 data->port_vlan_flags = htole16(val); 4544 4545 /* grumble, vsi info isn't "known" at compile time */ 4546 4547 memset(&iaq, 0, sizeof(iaq)); 4548 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD | 4549 (IXL_DMA_LEN(vsi) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0)); 4550 iaq.iaq_opcode = htole16(IXL_AQ_OP_UPD_VSI_PARAMS); 4551 iaq.iaq_datalen = htole16(IXL_DMA_LEN(vsi)); 4552 ixl_aq_dva(&iaq, IXL_DMA_DVA(vsi)); 4553 4554 param = (struct ixl_aq_vsi_param *)iaq.iaq_param; 4555 param->uplink_seid = sc->sc_seid; 4556 4557 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi), 4558 BUS_DMASYNC_PREWRITE); 4559 4560 rv = ixl_atq_poll(sc, &iaq, 250); 4561 4562 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi), 4563 BUS_DMASYNC_POSTWRITE); 4564 4565 if (rv != 0) { 4566 return ETIMEDOUT; 4567 } 4568 4569 switch (le16toh(iaq.iaq_retval)) { 4570 case IXL_AQ_RC_OK: 4571 break; 4572 case IXL_AQ_RC_ENOENT: 4573 return ENOENT; 4574 case IXL_AQ_RC_EACCES: 4575 return EACCES; 4576 default: 4577 return EIO; 4578 } 4579 4580 return 0; 4581 } 4582 4583 static void 4584 ixl_set_filter_control(struct ixl_softc *sc) 4585 { 4586 uint32_t reg; 4587 4588 reg = ixl_rd_rx_csr(sc, I40E_PFQF_CTL_0); 4589 4590 CLR(reg, I40E_PFQF_CTL_0_HASHLUTSIZE_MASK); 4591 SET(reg, I40E_HASH_LUT_SIZE_128 << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT); 4592 4593 SET(reg, I40E_PFQF_CTL_0_FD_ENA_MASK); 4594 SET(reg, I40E_PFQF_CTL_0_ETYPE_ENA_MASK); 4595 SET(reg, I40E_PFQF_CTL_0_MACVLAN_ENA_MASK); 4596 4597 ixl_wr_rx_csr(sc, I40E_PFQF_CTL_0, reg); 4598 } 4599 4600 static inline void 4601 ixl_get_default_rss_key(uint32_t *buf, size_t len) 4602 { 4603 size_t cplen; 4604 uint8_t rss_seed[RSS_KEYSIZE]; 4605 4606 rss_getkey(rss_seed); 4607 memset(buf, 0, len); 4608 4609 cplen = MIN(len, sizeof(rss_seed)); 4610 memcpy(buf, rss_seed, cplen); 4611 } 4612 4613 static int 4614 ixl_set_rss_key(struct ixl_softc *sc, uint8_t *key, size_t keylen) 4615 { 4616 struct ixl_dmamem *idm; 4617 struct ixl_atq iatq; 4618 struct ixl_aq_desc *iaq; 4619 struct ixl_aq_rss_key_param *param; 4620 struct ixl_aq_rss_key_data *data; 4621 size_t len, datalen, stdlen, extlen; 4622 uint16_t vsi_id; 4623 int rv; 4624 4625 memset(&iatq, 0, sizeof(iatq)); 4626 iaq = &iatq.iatq_desc; 4627 idm = &sc->sc_aqbuf; 4628 4629 datalen = sizeof(*data); 4630 4631 /*XXX The buf size has to be less than the size of the register */ 4632 datalen = MIN(IXL_RSS_KEY_SIZE_REG * sizeof(uint32_t), datalen); 4633 4634 iaq->iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD | 4635 (datalen > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0)); 4636 iaq->iaq_opcode = htole16(IXL_AQ_OP_RSS_SET_KEY); 4637 iaq->iaq_datalen = htole16(datalen); 4638 4639 param = (struct ixl_aq_rss_key_param *)iaq->iaq_param; 4640 vsi_id = (sc->sc_vsi_number << IXL_AQ_RSSKEY_VSI_ID_SHIFT) | 4641 IXL_AQ_RSSKEY_VSI_VALID; 4642 param->vsi_id = htole16(vsi_id); 4643 4644 memset(IXL_DMA_KVA(idm), 0, IXL_DMA_LEN(idm)); 4645 data = IXL_DMA_KVA(idm); 4646 4647 len = MIN(keylen, datalen); 4648 stdlen = MIN(sizeof(data->standard_rss_key), len); 4649 memcpy(data->standard_rss_key, key, stdlen); 4650 len = (len > stdlen) ? (len - stdlen) : 0; 4651 4652 extlen = MIN(sizeof(data->extended_hash_key), len); 4653 extlen = (stdlen < keylen) ? 0 : keylen - stdlen; 4654 memcpy(data->extended_hash_key, key + stdlen, extlen); 4655 4656 ixl_aq_dva(iaq, IXL_DMA_DVA(idm)); 4657 4658 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, 4659 IXL_DMA_LEN(idm), BUS_DMASYNC_PREWRITE); 4660 4661 rv = ixl_atq_exec(sc, &iatq); 4662 4663 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, 4664 IXL_DMA_LEN(idm), BUS_DMASYNC_POSTWRITE); 4665 4666 if (rv != 0) { 4667 return ETIMEDOUT; 4668 } 4669 4670 if (iaq->iaq_retval != htole16(IXL_AQ_RC_OK)) { 4671 return EIO; 4672 } 4673 4674 return 0; 4675 } 4676 4677 static int 4678 ixl_set_rss_lut(struct ixl_softc *sc, uint8_t *lut, size_t lutlen) 4679 { 4680 struct ixl_dmamem *idm; 4681 struct ixl_atq iatq; 4682 struct ixl_aq_desc *iaq; 4683 struct ixl_aq_rss_lut_param *param; 4684 uint16_t vsi_id; 4685 uint8_t *data; 4686 size_t dmalen; 4687 int rv; 4688 4689 memset(&iatq, 0, sizeof(iatq)); 4690 iaq = &iatq.iatq_desc; 4691 idm = &sc->sc_aqbuf; 4692 4693 dmalen = MIN(lutlen, IXL_DMA_LEN(idm)); 4694 4695 iaq->iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD | 4696 (dmalen > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0)); 4697 iaq->iaq_opcode = htole16(IXL_AQ_OP_RSS_SET_LUT); 4698 iaq->iaq_datalen = htole16(dmalen); 4699 4700 memset(IXL_DMA_KVA(idm), 0, IXL_DMA_LEN(idm)); 4701 data = IXL_DMA_KVA(idm); 4702 memcpy(data, lut, dmalen); 4703 ixl_aq_dva(iaq, IXL_DMA_DVA(idm)); 4704 4705 param = (struct ixl_aq_rss_lut_param *)iaq->iaq_param; 4706 vsi_id = (sc->sc_vsi_number << IXL_AQ_RSSLUT_VSI_ID_SHIFT) | 4707 IXL_AQ_RSSLUT_VSI_VALID; 4708 param->vsi_id = htole16(vsi_id); 4709 param->flags = htole16(IXL_AQ_RSSLUT_TABLE_TYPE_PF << 4710 IXL_AQ_RSSLUT_TABLE_TYPE_SHIFT); 4711 4712 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, 4713 IXL_DMA_LEN(idm), BUS_DMASYNC_PREWRITE); 4714 4715 rv = ixl_atq_exec(sc, &iatq); 4716 4717 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, 4718 IXL_DMA_LEN(idm), BUS_DMASYNC_POSTWRITE); 4719 4720 if (rv != 0) { 4721 return ETIMEDOUT; 4722 } 4723 4724 if (iaq->iaq_retval != htole16(IXL_AQ_RC_OK)) { 4725 return EIO; 4726 } 4727 4728 return 0; 4729 } 4730 4731 static int 4732 ixl_register_rss_key(struct ixl_softc *sc) 4733 { 4734 uint32_t rss_seed[IXL_RSS_KEY_SIZE_REG]; 4735 int rv; 4736 size_t i; 4737 4738 ixl_get_default_rss_key(rss_seed, sizeof(rss_seed)); 4739 4740 if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RSS)) { 4741 rv = ixl_set_rss_key(sc, (uint8_t*)rss_seed, 4742 sizeof(rss_seed)); 4743 } else { 4744 rv = 0; 4745 for (i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) { 4746 ixl_wr_rx_csr(sc, I40E_PFQF_HKEY(i), rss_seed[i]); 4747 } 4748 } 4749 4750 return rv; 4751 } 4752 4753 static void 4754 ixl_register_rss_pctype(struct ixl_softc *sc) 4755 { 4756 uint64_t set_hena = 0; 4757 uint32_t hena0, hena1; 4758 4759 /* 4760 * We use TCP/UDP with IPv4/IPv6 by default. 4761 * Note: the device can not use just IP header in each 4762 * TCP/UDP packets for the RSS hash calculation. 4763 */ 4764 if (sc->sc_mac_type == I40E_MAC_X722) 4765 set_hena = IXL_RSS_HENA_DEFAULT_X722; 4766 else 4767 set_hena = IXL_RSS_HENA_DEFAULT_XL710; 4768 4769 hena0 = ixl_rd_rx_csr(sc, I40E_PFQF_HENA(0)); 4770 hena1 = ixl_rd_rx_csr(sc, I40E_PFQF_HENA(1)); 4771 4772 SET(hena0, set_hena); 4773 SET(hena1, set_hena >> 32); 4774 4775 ixl_wr_rx_csr(sc, I40E_PFQF_HENA(0), hena0); 4776 ixl_wr_rx_csr(sc, I40E_PFQF_HENA(1), hena1); 4777 } 4778 4779 static int 4780 ixl_register_rss_hlut(struct ixl_softc *sc) 4781 { 4782 unsigned int qid; 4783 uint8_t hlut_buf[512], lut_mask; 4784 uint32_t *hluts; 4785 size_t i, hluts_num; 4786 int rv; 4787 4788 lut_mask = (0x01 << sc->sc_rss_table_entry_width) - 1; 4789 4790 for (i = 0; i < sc->sc_rss_table_size; i++) { 4791 qid = i % sc->sc_nqueue_pairs; 4792 hlut_buf[i] = qid & lut_mask; 4793 } 4794 4795 if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RSS)) { 4796 rv = ixl_set_rss_lut(sc, hlut_buf, sizeof(hlut_buf)); 4797 } else { 4798 rv = 0; 4799 hluts = (uint32_t *)hlut_buf; 4800 hluts_num = sc->sc_rss_table_size >> 2; 4801 for (i = 0; i < hluts_num; i++) { 4802 ixl_wr(sc, I40E_PFQF_HLUT(i), hluts[i]); 4803 } 4804 ixl_flush(sc); 4805 } 4806 4807 return rv; 4808 } 4809 4810 static void 4811 ixl_config_rss(struct ixl_softc *sc) 4812 { 4813 4814 KASSERT(mutex_owned(&sc->sc_cfg_lock)); 4815 4816 ixl_register_rss_key(sc); 4817 ixl_register_rss_pctype(sc); 4818 ixl_register_rss_hlut(sc); 4819 } 4820 4821 static const struct ixl_phy_type * 4822 ixl_search_phy_type(uint8_t phy_type) 4823 { 4824 const struct ixl_phy_type *itype; 4825 uint64_t mask; 4826 unsigned int i; 4827 4828 if (phy_type >= 64) 4829 return NULL; 4830 4831 mask = 1ULL << phy_type; 4832 4833 for (i = 0; i < __arraycount(ixl_phy_type_map); i++) { 4834 itype = &ixl_phy_type_map[i]; 4835 4836 if (ISSET(itype->phy_type, mask)) 4837 return itype; 4838 } 4839 4840 return NULL; 4841 } 4842 4843 static uint64_t 4844 ixl_search_link_speed(uint8_t link_speed) 4845 { 4846 const struct ixl_speed_type *type; 4847 unsigned int i; 4848 4849 for (i = 0; i < __arraycount(ixl_speed_type_map); i++) { 4850 type = &ixl_speed_type_map[i]; 4851 4852 if (ISSET(type->dev_speed, link_speed)) 4853 return type->net_speed; 4854 } 4855 4856 return 0; 4857 } 4858 4859 static uint8_t 4860 ixl_search_baudrate(uint64_t baudrate) 4861 { 4862 const struct ixl_speed_type *type; 4863 unsigned int i; 4864 4865 for (i = 0; i < __arraycount(ixl_speed_type_map); i++) { 4866 type = &ixl_speed_type_map[i]; 4867 4868 if (type->net_speed == baudrate) { 4869 return type->dev_speed; 4870 } 4871 } 4872 4873 return 0; 4874 } 4875 4876 static int 4877 ixl_restart_an(struct ixl_softc *sc) 4878 { 4879 struct ixl_aq_desc iaq; 4880 4881 memset(&iaq, 0, sizeof(iaq)); 4882 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_RESTART_AN); 4883 iaq.iaq_param[0] = 4884 htole32(IXL_AQ_PHY_RESTART_AN | IXL_AQ_PHY_LINK_ENABLE); 4885 4886 if (ixl_atq_poll(sc, &iaq, 250) != 0) { 4887 aprint_error_dev(sc->sc_dev, "RESTART AN timeout\n"); 4888 return -1; 4889 } 4890 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) { 4891 aprint_error_dev(sc->sc_dev, "RESTART AN error\n"); 4892 return -1; 4893 } 4894 4895 return 0; 4896 } 4897 4898 static int 4899 ixl_add_macvlan(struct ixl_softc *sc, const uint8_t *macaddr, 4900 uint16_t vlan, uint16_t flags) 4901 { 4902 struct ixl_aq_desc iaq; 4903 struct ixl_aq_add_macvlan *param; 4904 struct ixl_aq_add_macvlan_elem *elem; 4905 4906 memset(&iaq, 0, sizeof(iaq)); 4907 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD); 4908 iaq.iaq_opcode = htole16(IXL_AQ_OP_ADD_MACVLAN); 4909 iaq.iaq_datalen = htole16(sizeof(*elem)); 4910 ixl_aq_dva(&iaq, IXL_DMA_DVA(&sc->sc_scratch)); 4911 4912 param = (struct ixl_aq_add_macvlan *)&iaq.iaq_param; 4913 param->num_addrs = htole16(1); 4914 param->seid0 = htole16(0x8000) | sc->sc_seid; 4915 param->seid1 = 0; 4916 param->seid2 = 0; 4917 4918 elem = IXL_DMA_KVA(&sc->sc_scratch); 4919 memset(elem, 0, sizeof(*elem)); 4920 memcpy(elem->macaddr, macaddr, ETHER_ADDR_LEN); 4921 elem->flags = htole16(IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH | flags); 4922 elem->vlan = htole16(vlan); 4923 4924 if (ixl_atq_poll(sc, &iaq, 250) != 0) { 4925 return IXL_AQ_RC_EINVAL; 4926 } 4927 4928 switch (le16toh(iaq.iaq_retval)) { 4929 case IXL_AQ_RC_OK: 4930 break; 4931 case IXL_AQ_RC_ENOSPC: 4932 return ENOSPC; 4933 case IXL_AQ_RC_ENOENT: 4934 return ENOENT; 4935 case IXL_AQ_RC_EACCES: 4936 return EACCES; 4937 case IXL_AQ_RC_EEXIST: 4938 return EEXIST; 4939 case IXL_AQ_RC_EINVAL: 4940 return EINVAL; 4941 default: 4942 return EIO; 4943 } 4944 4945 return 0; 4946 } 4947 4948 static int 4949 ixl_remove_macvlan(struct ixl_softc *sc, const uint8_t *macaddr, 4950 uint16_t vlan, uint16_t flags) 4951 { 4952 struct ixl_aq_desc iaq; 4953 struct ixl_aq_remove_macvlan *param; 4954 struct ixl_aq_remove_macvlan_elem *elem; 4955 4956 memset(&iaq, 0, sizeof(iaq)); 4957 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD); 4958 iaq.iaq_opcode = htole16(IXL_AQ_OP_REMOVE_MACVLAN); 4959 iaq.iaq_datalen = htole16(sizeof(*elem)); 4960 ixl_aq_dva(&iaq, IXL_DMA_DVA(&sc->sc_scratch)); 4961 4962 param = (struct ixl_aq_remove_macvlan *)&iaq.iaq_param; 4963 param->num_addrs = htole16(1); 4964 param->seid0 = htole16(0x8000) | sc->sc_seid; 4965 param->seid1 = 0; 4966 param->seid2 = 0; 4967 4968 elem = IXL_DMA_KVA(&sc->sc_scratch); 4969 memset(elem, 0, sizeof(*elem)); 4970 memcpy(elem->macaddr, macaddr, ETHER_ADDR_LEN); 4971 elem->flags = htole16(IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH | flags); 4972 elem->vlan = htole16(vlan); 4973 4974 if (ixl_atq_poll(sc, &iaq, 250) != 0) { 4975 return EINVAL; 4976 } 4977 4978 switch (le16toh(iaq.iaq_retval)) { 4979 case IXL_AQ_RC_OK: 4980 break; 4981 case IXL_AQ_RC_ENOENT: 4982 return ENOENT; 4983 case IXL_AQ_RC_EACCES: 4984 return EACCES; 4985 case IXL_AQ_RC_EINVAL: 4986 return EINVAL; 4987 default: 4988 return EIO; 4989 } 4990 4991 return 0; 4992 } 4993 4994 static int 4995 ixl_hmc(struct ixl_softc *sc) 4996 { 4997 struct { 4998 uint32_t count; 4999 uint32_t minsize; 5000 bus_size_t objsiz; 5001 bus_size_t setoff; 5002 bus_size_t setcnt; 5003 } regs[] = { 5004 { 5005 0, 5006 IXL_HMC_TXQ_MINSIZE, 5007 I40E_GLHMC_LANTXOBJSZ, 5008 I40E_GLHMC_LANTXBASE(sc->sc_pf_id), 5009 I40E_GLHMC_LANTXCNT(sc->sc_pf_id), 5010 }, 5011 { 5012 0, 5013 IXL_HMC_RXQ_MINSIZE, 5014 I40E_GLHMC_LANRXOBJSZ, 5015 I40E_GLHMC_LANRXBASE(sc->sc_pf_id), 5016 I40E_GLHMC_LANRXCNT(sc->sc_pf_id), 5017 }, 5018 { 5019 0, 5020 0, 5021 I40E_GLHMC_FCOEDDPOBJSZ, 5022 I40E_GLHMC_FCOEDDPBASE(sc->sc_pf_id), 5023 I40E_GLHMC_FCOEDDPCNT(sc->sc_pf_id), 5024 }, 5025 { 5026 0, 5027 0, 5028 I40E_GLHMC_FCOEFOBJSZ, 5029 I40E_GLHMC_FCOEFBASE(sc->sc_pf_id), 5030 I40E_GLHMC_FCOEFCNT(sc->sc_pf_id), 5031 }, 5032 }; 5033 struct ixl_hmc_entry *e; 5034 uint64_t size, dva; 5035 uint8_t *kva; 5036 uint64_t *sdpage; 5037 unsigned int i; 5038 int npages, tables; 5039 uint32_t reg; 5040 5041 CTASSERT(__arraycount(regs) <= __arraycount(sc->sc_hmc_entries)); 5042 5043 regs[IXL_HMC_LAN_TX].count = regs[IXL_HMC_LAN_RX].count = 5044 ixl_rd(sc, I40E_GLHMC_LANQMAX); 5045 5046 size = 0; 5047 for (i = 0; i < __arraycount(regs); i++) { 5048 e = &sc->sc_hmc_entries[i]; 5049 5050 e->hmc_count = regs[i].count; 5051 reg = ixl_rd(sc, regs[i].objsiz); 5052 e->hmc_size = IXL_BIT_ULL(0x3F & reg); 5053 e->hmc_base = size; 5054 5055 if ((e->hmc_size * 8) < regs[i].minsize) { 5056 aprint_error_dev(sc->sc_dev, 5057 "kernel hmc entry is too big\n"); 5058 return -1; 5059 } 5060 5061 size += roundup(e->hmc_size * e->hmc_count, IXL_HMC_ROUNDUP); 5062 } 5063 size = roundup(size, IXL_HMC_PGSIZE); 5064 npages = size / IXL_HMC_PGSIZE; 5065 5066 tables = roundup(size, IXL_HMC_L2SZ) / IXL_HMC_L2SZ; 5067 5068 if (ixl_dmamem_alloc(sc, &sc->sc_hmc_pd, size, IXL_HMC_PGSIZE) != 0) { 5069 aprint_error_dev(sc->sc_dev, 5070 "unable to allocate hmc pd memory\n"); 5071 return -1; 5072 } 5073 5074 if (ixl_dmamem_alloc(sc, &sc->sc_hmc_sd, tables * IXL_HMC_PGSIZE, 5075 IXL_HMC_PGSIZE) != 0) { 5076 aprint_error_dev(sc->sc_dev, 5077 "unable to allocate hmc sd memory\n"); 5078 ixl_dmamem_free(sc, &sc->sc_hmc_pd); 5079 return -1; 5080 } 5081 5082 kva = IXL_DMA_KVA(&sc->sc_hmc_pd); 5083 memset(kva, 0, IXL_DMA_LEN(&sc->sc_hmc_pd)); 5084 5085 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd), 5086 0, IXL_DMA_LEN(&sc->sc_hmc_pd), 5087 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 5088 5089 dva = IXL_DMA_DVA(&sc->sc_hmc_pd); 5090 sdpage = IXL_DMA_KVA(&sc->sc_hmc_sd); 5091 memset(sdpage, 0, IXL_DMA_LEN(&sc->sc_hmc_sd)); 5092 5093 for (i = 0; (int)i < npages; i++) { 5094 *sdpage = htole64(dva | IXL_HMC_PDVALID); 5095 sdpage++; 5096 5097 dva += IXL_HMC_PGSIZE; 5098 } 5099 5100 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_sd), 5101 0, IXL_DMA_LEN(&sc->sc_hmc_sd), 5102 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 5103 5104 dva = IXL_DMA_DVA(&sc->sc_hmc_sd); 5105 for (i = 0; (int)i < tables; i++) { 5106 uint32_t count; 5107 5108 KASSERT(npages >= 0); 5109 5110 count = ((unsigned int)npages > IXL_HMC_PGS) ? 5111 IXL_HMC_PGS : (unsigned int)npages; 5112 5113 ixl_wr(sc, I40E_PFHMC_SDDATAHIGH, dva >> 32); 5114 ixl_wr(sc, I40E_PFHMC_SDDATALOW, dva | 5115 (count << I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | 5116 (1U << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT)); 5117 ixl_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE); 5118 ixl_wr(sc, I40E_PFHMC_SDCMD, 5119 (1U << I40E_PFHMC_SDCMD_PMSDWR_SHIFT) | i); 5120 5121 npages -= IXL_HMC_PGS; 5122 dva += IXL_HMC_PGSIZE; 5123 } 5124 5125 for (i = 0; i < __arraycount(regs); i++) { 5126 e = &sc->sc_hmc_entries[i]; 5127 5128 ixl_wr(sc, regs[i].setoff, e->hmc_base / IXL_HMC_ROUNDUP); 5129 ixl_wr(sc, regs[i].setcnt, e->hmc_count); 5130 } 5131 5132 return 0; 5133 } 5134 5135 static void 5136 ixl_hmc_free(struct ixl_softc *sc) 5137 { 5138 ixl_dmamem_free(sc, &sc->sc_hmc_sd); 5139 ixl_dmamem_free(sc, &sc->sc_hmc_pd); 5140 } 5141 5142 static void 5143 ixl_hmc_pack(void *d, const void *s, const struct ixl_hmc_pack *packing, 5144 unsigned int npacking) 5145 { 5146 uint8_t *dst = d; 5147 const uint8_t *src = s; 5148 unsigned int i; 5149 5150 for (i = 0; i < npacking; i++) { 5151 const struct ixl_hmc_pack *pack = &packing[i]; 5152 unsigned int offset = pack->lsb / 8; 5153 unsigned int align = pack->lsb % 8; 5154 const uint8_t *in = src + pack->offset; 5155 uint8_t *out = dst + offset; 5156 int width = pack->width; 5157 unsigned int inbits = 0; 5158 5159 if (align) { 5160 inbits = (*in++) << align; 5161 *out++ |= (inbits & 0xff); 5162 inbits >>= 8; 5163 5164 width -= 8 - align; 5165 } 5166 5167 while (width >= 8) { 5168 inbits |= (*in++) << align; 5169 *out++ = (inbits & 0xff); 5170 inbits >>= 8; 5171 5172 width -= 8; 5173 } 5174 5175 if (width > 0) { 5176 inbits |= (*in) << align; 5177 *out |= (inbits & ((1 << width) - 1)); 5178 } 5179 } 5180 } 5181 5182 static struct ixl_aq_buf * 5183 ixl_aqb_alloc(struct ixl_softc *sc) 5184 { 5185 struct ixl_aq_buf *aqb; 5186 5187 aqb = kmem_alloc(sizeof(*aqb), KM_SLEEP); 5188 5189 aqb->aqb_size = IXL_AQ_BUFLEN; 5190 5191 if (bus_dmamap_create(sc->sc_dmat, aqb->aqb_size, 1, 5192 aqb->aqb_size, 0, 5193 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &aqb->aqb_map) != 0) 5194 goto free; 5195 if (bus_dmamem_alloc(sc->sc_dmat, aqb->aqb_size, 5196 IXL_AQ_ALIGN, 0, &aqb->aqb_seg, 1, &aqb->aqb_nsegs, 5197 BUS_DMA_WAITOK) != 0) 5198 goto destroy; 5199 if (bus_dmamem_map(sc->sc_dmat, &aqb->aqb_seg, aqb->aqb_nsegs, 5200 aqb->aqb_size, &aqb->aqb_data, BUS_DMA_WAITOK) != 0) 5201 goto dma_free; 5202 if (bus_dmamap_load(sc->sc_dmat, aqb->aqb_map, aqb->aqb_data, 5203 aqb->aqb_size, NULL, BUS_DMA_WAITOK) != 0) 5204 goto unmap; 5205 5206 return aqb; 5207 unmap: 5208 bus_dmamem_unmap(sc->sc_dmat, aqb->aqb_data, aqb->aqb_size); 5209 dma_free: 5210 bus_dmamem_free(sc->sc_dmat, &aqb->aqb_seg, 1); 5211 destroy: 5212 bus_dmamap_destroy(sc->sc_dmat, aqb->aqb_map); 5213 free: 5214 kmem_free(aqb, sizeof(*aqb)); 5215 5216 return NULL; 5217 } 5218 5219 static void 5220 ixl_aqb_free(struct ixl_softc *sc, struct ixl_aq_buf *aqb) 5221 { 5222 5223 bus_dmamap_unload(sc->sc_dmat, aqb->aqb_map); 5224 bus_dmamem_unmap(sc->sc_dmat, aqb->aqb_data, aqb->aqb_size); 5225 bus_dmamem_free(sc->sc_dmat, &aqb->aqb_seg, 1); 5226 bus_dmamap_destroy(sc->sc_dmat, aqb->aqb_map); 5227 kmem_free(aqb, sizeof(*aqb)); 5228 } 5229 5230 static int 5231 ixl_arq_fill(struct ixl_softc *sc) 5232 { 5233 struct ixl_aq_buf *aqb; 5234 struct ixl_aq_desc *arq, *iaq; 5235 unsigned int prod = sc->sc_arq_prod; 5236 unsigned int n; 5237 int post = 0; 5238 5239 n = ixl_rxr_unrefreshed(sc->sc_arq_prod, sc->sc_arq_cons, 5240 IXL_AQ_NUM); 5241 arq = IXL_DMA_KVA(&sc->sc_arq); 5242 5243 if (__predict_false(n <= 0)) 5244 return 0; 5245 5246 do { 5247 aqb = sc->sc_arq_live[prod]; 5248 iaq = &arq[prod]; 5249 5250 if (aqb == NULL) { 5251 aqb = SIMPLEQ_FIRST(&sc->sc_arq_idle); 5252 if (aqb != NULL) { 5253 SIMPLEQ_REMOVE(&sc->sc_arq_idle, aqb, 5254 ixl_aq_buf, aqb_entry); 5255 } else if ((aqb = ixl_aqb_alloc(sc)) == NULL) { 5256 break; 5257 } 5258 5259 sc->sc_arq_live[prod] = aqb; 5260 memset(aqb->aqb_data, 0, aqb->aqb_size); 5261 5262 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, 5263 aqb->aqb_size, BUS_DMASYNC_PREREAD); 5264 5265 iaq->iaq_flags = htole16(IXL_AQ_BUF | 5266 (IXL_AQ_BUFLEN > I40E_AQ_LARGE_BUF ? 5267 IXL_AQ_LB : 0)); 5268 iaq->iaq_opcode = 0; 5269 iaq->iaq_datalen = htole16(aqb->aqb_size); 5270 iaq->iaq_retval = 0; 5271 iaq->iaq_cookie = 0; 5272 iaq->iaq_param[0] = 0; 5273 iaq->iaq_param[1] = 0; 5274 ixl_aq_dva(iaq, aqb->aqb_map->dm_segs[0].ds_addr); 5275 } 5276 5277 prod++; 5278 prod &= IXL_AQ_MASK; 5279 5280 post = 1; 5281 5282 } while (--n); 5283 5284 if (post) { 5285 sc->sc_arq_prod = prod; 5286 ixl_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod); 5287 } 5288 5289 return post; 5290 } 5291 5292 static void 5293 ixl_arq_unfill(struct ixl_softc *sc) 5294 { 5295 struct ixl_aq_buf *aqb; 5296 unsigned int i; 5297 5298 for (i = 0; i < __arraycount(sc->sc_arq_live); i++) { 5299 aqb = sc->sc_arq_live[i]; 5300 if (aqb == NULL) 5301 continue; 5302 5303 sc->sc_arq_live[i] = NULL; 5304 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, aqb->aqb_size, 5305 BUS_DMASYNC_POSTREAD); 5306 ixl_aqb_free(sc, aqb); 5307 } 5308 5309 while ((aqb = SIMPLEQ_FIRST(&sc->sc_arq_idle)) != NULL) { 5310 SIMPLEQ_REMOVE(&sc->sc_arq_idle, aqb, 5311 ixl_aq_buf, aqb_entry); 5312 ixl_aqb_free(sc, aqb); 5313 } 5314 } 5315 5316 static void 5317 ixl_clear_hw(struct ixl_softc *sc) 5318 { 5319 uint32_t num_queues, base_queue; 5320 uint32_t num_pf_int; 5321 uint32_t num_vf_int; 5322 uint32_t num_vfs; 5323 uint32_t i, j; 5324 uint32_t val; 5325 uint32_t eol = 0x7ff; 5326 5327 /* get number of interrupts, queues, and vfs */ 5328 val = ixl_rd(sc, I40E_GLPCI_CNF2); 5329 num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >> 5330 I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT; 5331 num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >> 5332 I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT; 5333 5334 val = ixl_rd(sc, I40E_PFLAN_QALLOC); 5335 base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >> 5336 I40E_PFLAN_QALLOC_FIRSTQ_SHIFT; 5337 j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >> 5338 I40E_PFLAN_QALLOC_LASTQ_SHIFT; 5339 if (val & I40E_PFLAN_QALLOC_VALID_MASK) 5340 num_queues = (j - base_queue) + 1; 5341 else 5342 num_queues = 0; 5343 5344 val = ixl_rd(sc, I40E_PF_VT_PFALLOC); 5345 i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >> 5346 I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT; 5347 j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >> 5348 I40E_PF_VT_PFALLOC_LASTVF_SHIFT; 5349 if (val & I40E_PF_VT_PFALLOC_VALID_MASK) 5350 num_vfs = (j - i) + 1; 5351 else 5352 num_vfs = 0; 5353 5354 /* stop all the interrupts */ 5355 ixl_wr(sc, I40E_PFINT_ICR0_ENA, 0); 5356 ixl_flush(sc); 5357 val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT; 5358 for (i = 0; i < num_pf_int - 2; i++) 5359 ixl_wr(sc, I40E_PFINT_DYN_CTLN(i), val); 5360 ixl_flush(sc); 5361 5362 /* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */ 5363 val = eol << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT; 5364 ixl_wr(sc, I40E_PFINT_LNKLST0, val); 5365 for (i = 0; i < num_pf_int - 2; i++) 5366 ixl_wr(sc, I40E_PFINT_LNKLSTN(i), val); 5367 val = eol << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT; 5368 for (i = 0; i < num_vfs; i++) 5369 ixl_wr(sc, I40E_VPINT_LNKLST0(i), val); 5370 for (i = 0; i < num_vf_int - 2; i++) 5371 ixl_wr(sc, I40E_VPINT_LNKLSTN(i), val); 5372 5373 /* warn the HW of the coming Tx disables */ 5374 for (i = 0; i < num_queues; i++) { 5375 uint32_t abs_queue_idx = base_queue + i; 5376 uint32_t reg_block = 0; 5377 5378 if (abs_queue_idx >= 128) { 5379 reg_block = abs_queue_idx / 128; 5380 abs_queue_idx %= 128; 5381 } 5382 5383 val = ixl_rd(sc, I40E_GLLAN_TXPRE_QDIS(reg_block)); 5384 val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK; 5385 val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT); 5386 val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK; 5387 5388 ixl_wr(sc, I40E_GLLAN_TXPRE_QDIS(reg_block), val); 5389 } 5390 delaymsec(400); 5391 5392 /* stop all the queues */ 5393 for (i = 0; i < num_queues; i++) { 5394 ixl_wr(sc, I40E_QINT_TQCTL(i), 0); 5395 ixl_wr(sc, I40E_QTX_ENA(i), 0); 5396 ixl_wr(sc, I40E_QINT_RQCTL(i), 0); 5397 ixl_wr(sc, I40E_QRX_ENA(i), 0); 5398 } 5399 5400 /* short wait for all queue disables to settle */ 5401 delaymsec(50); 5402 } 5403 5404 static int 5405 ixl_pf_reset(struct ixl_softc *sc) 5406 { 5407 uint32_t cnt = 0; 5408 uint32_t cnt1 = 0; 5409 uint32_t reg = 0, reg0 = 0; 5410 uint32_t grst_del; 5411 5412 /* 5413 * Poll for Global Reset steady state in case of recent GRST. 5414 * The grst delay value is in 100ms units, and we'll wait a 5415 * couple counts longer to be sure we don't just miss the end. 5416 */ 5417 grst_del = ixl_rd(sc, I40E_GLGEN_RSTCTL); 5418 grst_del &= I40E_GLGEN_RSTCTL_GRSTDEL_MASK; 5419 grst_del >>= I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT; 5420 5421 grst_del = grst_del * 20; 5422 5423 for (cnt = 0; cnt < grst_del; cnt++) { 5424 reg = ixl_rd(sc, I40E_GLGEN_RSTAT); 5425 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK)) 5426 break; 5427 delaymsec(100); 5428 } 5429 if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) { 5430 aprint_error(", Global reset polling failed to complete\n"); 5431 return -1; 5432 } 5433 5434 /* Now Wait for the FW to be ready */ 5435 for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) { 5436 reg = ixl_rd(sc, I40E_GLNVM_ULD); 5437 reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | 5438 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK); 5439 if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | 5440 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK)) 5441 break; 5442 5443 delaymsec(10); 5444 } 5445 if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | 5446 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) { 5447 aprint_error(", wait for FW Reset complete timed out " 5448 "(I40E_GLNVM_ULD = 0x%x)\n", reg); 5449 return -1; 5450 } 5451 5452 /* 5453 * If there was a Global Reset in progress when we got here, 5454 * we don't need to do the PF Reset 5455 */ 5456 if (cnt == 0) { 5457 reg = ixl_rd(sc, I40E_PFGEN_CTRL); 5458 ixl_wr(sc, I40E_PFGEN_CTRL, reg | I40E_PFGEN_CTRL_PFSWR_MASK); 5459 for (cnt = 0; cnt < I40E_PF_RESET_WAIT_COUNT; cnt++) { 5460 reg = ixl_rd(sc, I40E_PFGEN_CTRL); 5461 if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK)) 5462 break; 5463 delaymsec(1); 5464 5465 reg0 = ixl_rd(sc, I40E_GLGEN_RSTAT); 5466 if (reg0 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) { 5467 aprint_error(", Core reset upcoming." 5468 " Skipping PF reset reset request\n"); 5469 return -1; 5470 } 5471 } 5472 if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) { 5473 aprint_error(", PF reset polling failed to complete" 5474 "(I40E_PFGEN_CTRL= 0x%x)\n", reg); 5475 return -1; 5476 } 5477 } 5478 5479 return 0; 5480 } 5481 5482 static int 5483 ixl_dmamem_alloc(struct ixl_softc *sc, struct ixl_dmamem *ixm, 5484 bus_size_t size, bus_size_t align) 5485 { 5486 ixm->ixm_size = size; 5487 5488 if (bus_dmamap_create(sc->sc_dmat, ixm->ixm_size, 1, 5489 ixm->ixm_size, 0, 5490 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, 5491 &ixm->ixm_map) != 0) 5492 return 1; 5493 if (bus_dmamem_alloc(sc->sc_dmat, ixm->ixm_size, 5494 align, 0, &ixm->ixm_seg, 1, &ixm->ixm_nsegs, 5495 BUS_DMA_WAITOK) != 0) 5496 goto destroy; 5497 if (bus_dmamem_map(sc->sc_dmat, &ixm->ixm_seg, ixm->ixm_nsegs, 5498 ixm->ixm_size, &ixm->ixm_kva, BUS_DMA_WAITOK) != 0) 5499 goto free; 5500 if (bus_dmamap_load(sc->sc_dmat, ixm->ixm_map, ixm->ixm_kva, 5501 ixm->ixm_size, NULL, BUS_DMA_WAITOK) != 0) 5502 goto unmap; 5503 5504 memset(ixm->ixm_kva, 0, ixm->ixm_size); 5505 5506 return 0; 5507 unmap: 5508 bus_dmamem_unmap(sc->sc_dmat, ixm->ixm_kva, ixm->ixm_size); 5509 free: 5510 bus_dmamem_free(sc->sc_dmat, &ixm->ixm_seg, 1); 5511 destroy: 5512 bus_dmamap_destroy(sc->sc_dmat, ixm->ixm_map); 5513 return 1; 5514 } 5515 5516 static void 5517 ixl_dmamem_free(struct ixl_softc *sc, struct ixl_dmamem *ixm) 5518 { 5519 bus_dmamap_unload(sc->sc_dmat, ixm->ixm_map); 5520 bus_dmamem_unmap(sc->sc_dmat, ixm->ixm_kva, ixm->ixm_size); 5521 bus_dmamem_free(sc->sc_dmat, &ixm->ixm_seg, 1); 5522 bus_dmamap_destroy(sc->sc_dmat, ixm->ixm_map); 5523 } 5524 5525 static int 5526 ixl_setup_vlan_hwfilter(struct ixl_softc *sc) 5527 { 5528 struct ethercom *ec = &sc->sc_ec; 5529 struct vlanid_list *vlanidp; 5530 int rv; 5531 5532 ixl_remove_macvlan(sc, sc->sc_enaddr, 0, 5533 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN); 5534 ixl_remove_macvlan(sc, etherbroadcastaddr, 0, 5535 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN); 5536 5537 rv = ixl_add_macvlan(sc, sc->sc_enaddr, 0, 5538 IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH); 5539 if (rv != 0) 5540 return rv; 5541 rv = ixl_add_macvlan(sc, etherbroadcastaddr, 0, 5542 IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH); 5543 if (rv != 0) 5544 return rv; 5545 5546 ETHER_LOCK(ec); 5547 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) { 5548 rv = ixl_add_macvlan(sc, sc->sc_enaddr, 5549 vlanidp->vid, IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH); 5550 if (rv != 0) 5551 break; 5552 rv = ixl_add_macvlan(sc, etherbroadcastaddr, 5553 vlanidp->vid, IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH); 5554 if (rv != 0) 5555 break; 5556 } 5557 ETHER_UNLOCK(ec); 5558 5559 return rv; 5560 } 5561 5562 static void 5563 ixl_teardown_vlan_hwfilter(struct ixl_softc *sc) 5564 { 5565 struct vlanid_list *vlanidp; 5566 struct ethercom *ec = &sc->sc_ec; 5567 5568 ixl_remove_macvlan(sc, sc->sc_enaddr, 0, 5569 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH); 5570 ixl_remove_macvlan(sc, etherbroadcastaddr, 0, 5571 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH); 5572 5573 ETHER_LOCK(ec); 5574 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) { 5575 ixl_remove_macvlan(sc, sc->sc_enaddr, 5576 vlanidp->vid, IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH); 5577 ixl_remove_macvlan(sc, etherbroadcastaddr, 5578 vlanidp->vid, IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH); 5579 } 5580 ETHER_UNLOCK(ec); 5581 5582 ixl_add_macvlan(sc, sc->sc_enaddr, 0, 5583 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN); 5584 ixl_add_macvlan(sc, etherbroadcastaddr, 0, 5585 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN); 5586 } 5587 5588 static int 5589 ixl_update_macvlan(struct ixl_softc *sc) 5590 { 5591 int rv = 0; 5592 int next_ec_capenable = sc->sc_ec.ec_capenable; 5593 5594 if (ISSET(next_ec_capenable, ETHERCAP_VLAN_HWFILTER)) { 5595 rv = ixl_setup_vlan_hwfilter(sc); 5596 if (rv != 0) 5597 ixl_teardown_vlan_hwfilter(sc); 5598 } else { 5599 ixl_teardown_vlan_hwfilter(sc); 5600 } 5601 5602 return rv; 5603 } 5604 5605 static int 5606 ixl_ifflags_cb(struct ethercom *ec) 5607 { 5608 struct ifnet *ifp = &ec->ec_if; 5609 struct ixl_softc *sc = ifp->if_softc; 5610 int rv, change; 5611 5612 mutex_enter(&sc->sc_cfg_lock); 5613 5614 change = ec->ec_capenable ^ sc->sc_cur_ec_capenable; 5615 5616 if (ISSET(change, ETHERCAP_VLAN_HWTAGGING)) { 5617 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWTAGGING; 5618 rv = ENETRESET; 5619 goto out; 5620 } 5621 5622 if (ISSET(change, ETHERCAP_VLAN_HWFILTER)) { 5623 rv = ixl_update_macvlan(sc); 5624 if (rv == 0) { 5625 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWFILTER; 5626 } else { 5627 CLR(ec->ec_capenable, ETHERCAP_VLAN_HWFILTER); 5628 CLR(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER); 5629 } 5630 } 5631 5632 rv = ixl_iff(sc); 5633 out: 5634 mutex_exit(&sc->sc_cfg_lock); 5635 5636 return rv; 5637 } 5638 5639 static int 5640 ixl_set_link_status_locked(struct ixl_softc *sc, const struct ixl_aq_desc *iaq) 5641 { 5642 const struct ixl_aq_link_status *status; 5643 const struct ixl_phy_type *itype; 5644 5645 uint64_t ifm_active = IFM_ETHER; 5646 uint64_t ifm_status = IFM_AVALID; 5647 int link_state = LINK_STATE_DOWN; 5648 uint64_t baudrate = 0; 5649 5650 status = (const struct ixl_aq_link_status *)iaq->iaq_param; 5651 if (!ISSET(status->link_info, IXL_AQ_LINK_UP_FUNCTION)) { 5652 ifm_active |= IFM_NONE; 5653 goto done; 5654 } 5655 5656 ifm_active |= IFM_FDX; 5657 ifm_status |= IFM_ACTIVE; 5658 link_state = LINK_STATE_UP; 5659 5660 itype = ixl_search_phy_type(status->phy_type); 5661 if (itype != NULL) 5662 ifm_active |= itype->ifm_type; 5663 5664 if (ISSET(status->an_info, IXL_AQ_LINK_PAUSE_TX)) 5665 ifm_active |= IFM_ETH_TXPAUSE; 5666 if (ISSET(status->an_info, IXL_AQ_LINK_PAUSE_RX)) 5667 ifm_active |= IFM_ETH_RXPAUSE; 5668 5669 baudrate = ixl_search_link_speed(status->link_speed); 5670 5671 done: 5672 /* sc->sc_cfg_lock held expect during attach */ 5673 sc->sc_media_active = ifm_active; 5674 sc->sc_media_status = ifm_status; 5675 5676 sc->sc_ec.ec_if.if_baudrate = baudrate; 5677 5678 return link_state; 5679 } 5680 5681 static int 5682 ixl_establish_intx(struct ixl_softc *sc) 5683 { 5684 pci_chipset_tag_t pc = sc->sc_pa.pa_pc; 5685 pci_intr_handle_t *intr; 5686 char xnamebuf[32]; 5687 char intrbuf[PCI_INTRSTR_LEN]; 5688 char const *intrstr; 5689 5690 KASSERT(sc->sc_nintrs == 1); 5691 5692 intr = &sc->sc_ihp[0]; 5693 5694 intrstr = pci_intr_string(pc, *intr, intrbuf, sizeof(intrbuf)); 5695 snprintf(xnamebuf, sizeof(xnamebuf), "%s:legacy", 5696 device_xname(sc->sc_dev)); 5697 5698 sc->sc_ihs[0] = pci_intr_establish_xname(pc, *intr, IPL_NET, ixl_intr, 5699 sc, xnamebuf); 5700 5701 if (sc->sc_ihs[0] == NULL) { 5702 aprint_error_dev(sc->sc_dev, 5703 "unable to establish interrupt at %s\n", intrstr); 5704 return -1; 5705 } 5706 5707 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr); 5708 return 0; 5709 } 5710 5711 static int 5712 ixl_establish_msix(struct ixl_softc *sc) 5713 { 5714 pci_chipset_tag_t pc = sc->sc_pa.pa_pc; 5715 kcpuset_t *affinity; 5716 unsigned int vector = 0; 5717 unsigned int i; 5718 int affinity_to, r; 5719 char xnamebuf[32]; 5720 char intrbuf[PCI_INTRSTR_LEN]; 5721 char const *intrstr; 5722 5723 kcpuset_create(&affinity, false); 5724 5725 /* the "other" intr is mapped to vector 0 */ 5726 vector = 0; 5727 intrstr = pci_intr_string(pc, sc->sc_ihp[vector], 5728 intrbuf, sizeof(intrbuf)); 5729 snprintf(xnamebuf, sizeof(xnamebuf), "%s others", 5730 device_xname(sc->sc_dev)); 5731 sc->sc_ihs[vector] = pci_intr_establish_xname(pc, 5732 sc->sc_ihp[vector], IPL_NET, ixl_other_intr, 5733 sc, xnamebuf); 5734 if (sc->sc_ihs[vector] == NULL) { 5735 aprint_error_dev(sc->sc_dev, 5736 "unable to establish interrupt at %s\n", intrstr); 5737 goto fail; 5738 } 5739 5740 aprint_normal_dev(sc->sc_dev, "other interrupt at %s", intrstr); 5741 5742 affinity_to = ncpu > (int)sc->sc_nqueue_pairs_max ? 1 : 0; 5743 affinity_to = (affinity_to + sc->sc_nqueue_pairs_max) % ncpu; 5744 5745 kcpuset_zero(affinity); 5746 kcpuset_set(affinity, affinity_to); 5747 r = interrupt_distribute(sc->sc_ihs[vector], affinity, NULL); 5748 if (r == 0) { 5749 aprint_normal(", affinity to %u", affinity_to); 5750 } 5751 aprint_normal("\n"); 5752 vector++; 5753 5754 sc->sc_msix_vector_queue = vector; 5755 affinity_to = ncpu > (int)sc->sc_nqueue_pairs_max ? 1 : 0; 5756 5757 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) { 5758 intrstr = pci_intr_string(pc, sc->sc_ihp[vector], 5759 intrbuf, sizeof(intrbuf)); 5760 snprintf(xnamebuf, sizeof(xnamebuf), "%s TXRX%d", 5761 device_xname(sc->sc_dev), i); 5762 5763 sc->sc_ihs[vector] = pci_intr_establish_xname(pc, 5764 sc->sc_ihp[vector], IPL_NET, ixl_queue_intr, 5765 (void *)&sc->sc_qps[i], xnamebuf); 5766 5767 if (sc->sc_ihs[vector] == NULL) { 5768 aprint_error_dev(sc->sc_dev, 5769 "unable to establish interrupt at %s\n", intrstr); 5770 goto fail; 5771 } 5772 5773 aprint_normal_dev(sc->sc_dev, 5774 "for TXRX%d interrupt at %s", i, intrstr); 5775 5776 kcpuset_zero(affinity); 5777 kcpuset_set(affinity, affinity_to); 5778 r = interrupt_distribute(sc->sc_ihs[vector], affinity, NULL); 5779 if (r == 0) { 5780 aprint_normal(", affinity to %u", affinity_to); 5781 affinity_to = (affinity_to + 1) % ncpu; 5782 } 5783 aprint_normal("\n"); 5784 vector++; 5785 } 5786 5787 kcpuset_destroy(affinity); 5788 5789 return 0; 5790 fail: 5791 for (i = 0; i < vector; i++) { 5792 pci_intr_disestablish(pc, sc->sc_ihs[i]); 5793 } 5794 5795 sc->sc_msix_vector_queue = 0; 5796 sc->sc_msix_vector_queue = 0; 5797 kcpuset_destroy(affinity); 5798 5799 return -1; 5800 } 5801 5802 static void 5803 ixl_config_queue_intr(struct ixl_softc *sc) 5804 { 5805 unsigned int i, vector; 5806 5807 if (sc->sc_intrtype == PCI_INTR_TYPE_MSIX) { 5808 vector = sc->sc_msix_vector_queue; 5809 } else { 5810 vector = I40E_INTR_NOTX_INTR; 5811 5812 ixl_wr(sc, I40E_PFINT_LNKLST0, 5813 (I40E_INTR_NOTX_QUEUE << 5814 I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) | 5815 (I40E_QUEUE_TYPE_RX << 5816 I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)); 5817 } 5818 5819 for (i = 0; i < sc->sc_nqueue_pairs; i++) { 5820 ixl_wr(sc, I40E_PFINT_DYN_CTLN(i), 0); 5821 ixl_flush(sc); 5822 5823 ixl_wr(sc, I40E_PFINT_LNKLSTN(i), 5824 ((i) << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) | 5825 (I40E_QUEUE_TYPE_RX << 5826 I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)); 5827 5828 ixl_wr(sc, I40E_QINT_RQCTL(i), 5829 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) | 5830 (I40E_ITR_INDEX_RX << 5831 I40E_QINT_RQCTL_ITR_INDX_SHIFT) | 5832 (I40E_INTR_NOTX_RX_QUEUE << 5833 I40E_QINT_RQCTL_MSIX0_INDX_SHIFT) | 5834 (i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) | 5835 (I40E_QUEUE_TYPE_TX << 5836 I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) | 5837 I40E_QINT_RQCTL_CAUSE_ENA_MASK); 5838 5839 ixl_wr(sc, I40E_QINT_TQCTL(i), 5840 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) | 5841 (I40E_ITR_INDEX_TX << 5842 I40E_QINT_TQCTL_ITR_INDX_SHIFT) | 5843 (I40E_INTR_NOTX_TX_QUEUE << 5844 I40E_QINT_TQCTL_MSIX0_INDX_SHIFT) | 5845 (I40E_QUEUE_TYPE_EOL << 5846 I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) | 5847 (I40E_QUEUE_TYPE_RX << 5848 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT) | 5849 I40E_QINT_TQCTL_CAUSE_ENA_MASK); 5850 5851 if (sc->sc_intrtype == PCI_INTR_TYPE_MSIX) { 5852 ixl_wr(sc, I40E_PFINT_ITRN(I40E_ITR_INDEX_RX, i), 5853 sc->sc_itr_rx); 5854 ixl_wr(sc, I40E_PFINT_ITRN(I40E_ITR_INDEX_TX, i), 5855 sc->sc_itr_tx); 5856 vector++; 5857 } 5858 } 5859 ixl_flush(sc); 5860 5861 ixl_wr(sc, I40E_PFINT_ITR0(I40E_ITR_INDEX_RX), sc->sc_itr_rx); 5862 ixl_wr(sc, I40E_PFINT_ITR0(I40E_ITR_INDEX_TX), sc->sc_itr_tx); 5863 ixl_flush(sc); 5864 } 5865 5866 static void 5867 ixl_config_other_intr(struct ixl_softc *sc) 5868 { 5869 ixl_wr(sc, I40E_PFINT_ICR0_ENA, 0); 5870 (void)ixl_rd(sc, I40E_PFINT_ICR0); 5871 5872 ixl_wr(sc, I40E_PFINT_ICR0_ENA, 5873 I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | 5874 I40E_PFINT_ICR0_ENA_GRST_MASK | 5875 I40E_PFINT_ICR0_ENA_ADMINQ_MASK | 5876 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | 5877 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | 5878 I40E_PFINT_ICR0_ENA_VFLR_MASK | 5879 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | 5880 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | 5881 I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK); 5882 5883 ixl_wr(sc, I40E_PFINT_LNKLST0, 0x7FF); 5884 ixl_wr(sc, I40E_PFINT_ITR0(I40E_ITR_INDEX_OTHER), 0); 5885 ixl_wr(sc, I40E_PFINT_STAT_CTL0, 5886 (I40E_ITR_INDEX_OTHER << 5887 I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)); 5888 ixl_flush(sc); 5889 } 5890 5891 static int 5892 ixl_setup_interrupts(struct ixl_softc *sc) 5893 { 5894 struct pci_attach_args *pa = &sc->sc_pa; 5895 pci_intr_type_t max_type, intr_type; 5896 int counts[PCI_INTR_TYPE_SIZE]; 5897 int error; 5898 unsigned int i; 5899 bool retry; 5900 5901 memset(counts, 0, sizeof(counts)); 5902 max_type = PCI_INTR_TYPE_MSIX; 5903 /* QPs + other interrupt */ 5904 counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueue_pairs_max + 1; 5905 counts[PCI_INTR_TYPE_INTX] = 1; 5906 5907 if (ixl_param_nomsix) 5908 counts[PCI_INTR_TYPE_MSIX] = 0; 5909 5910 do { 5911 retry = false; 5912 error = pci_intr_alloc(pa, &sc->sc_ihp, counts, max_type); 5913 if (error != 0) { 5914 aprint_error_dev(sc->sc_dev, 5915 "couldn't map interrupt\n"); 5916 break; 5917 } 5918 5919 intr_type = pci_intr_type(pa->pa_pc, sc->sc_ihp[0]); 5920 sc->sc_nintrs = counts[intr_type]; 5921 KASSERT(sc->sc_nintrs > 0); 5922 5923 for (i = 0; i < sc->sc_nintrs; i++) { 5924 pci_intr_setattr(pa->pa_pc, &sc->sc_ihp[i], 5925 PCI_INTR_MPSAFE, true); 5926 } 5927 5928 sc->sc_ihs = kmem_zalloc(sizeof(sc->sc_ihs[0]) * sc->sc_nintrs, 5929 KM_SLEEP); 5930 5931 if (intr_type == PCI_INTR_TYPE_MSIX) { 5932 error = ixl_establish_msix(sc); 5933 if (error) { 5934 counts[PCI_INTR_TYPE_MSIX] = 0; 5935 retry = true; 5936 } 5937 } else if (intr_type == PCI_INTR_TYPE_INTX) { 5938 error = ixl_establish_intx(sc); 5939 } else { 5940 error = -1; 5941 } 5942 5943 if (error) { 5944 kmem_free(sc->sc_ihs, 5945 sizeof(sc->sc_ihs[0]) * sc->sc_nintrs); 5946 pci_intr_release(pa->pa_pc, sc->sc_ihp, sc->sc_nintrs); 5947 } else { 5948 sc->sc_intrtype = intr_type; 5949 } 5950 } while (retry); 5951 5952 return error; 5953 } 5954 5955 static void 5956 ixl_teardown_interrupts(struct ixl_softc *sc) 5957 { 5958 struct pci_attach_args *pa = &sc->sc_pa; 5959 unsigned int i; 5960 5961 for (i = 0; i < sc->sc_nintrs; i++) { 5962 pci_intr_disestablish(pa->pa_pc, sc->sc_ihs[i]); 5963 } 5964 5965 pci_intr_release(pa->pa_pc, sc->sc_ihp, sc->sc_nintrs); 5966 5967 kmem_free(sc->sc_ihs, sizeof(sc->sc_ihs[0]) * sc->sc_nintrs); 5968 sc->sc_ihs = NULL; 5969 sc->sc_nintrs = 0; 5970 } 5971 5972 static int 5973 ixl_setup_stats(struct ixl_softc *sc) 5974 { 5975 struct ixl_queue_pair *qp; 5976 struct ixl_tx_ring *txr; 5977 struct ixl_rx_ring *rxr; 5978 struct ixl_stats_counters *isc; 5979 unsigned int i; 5980 5981 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) { 5982 qp = &sc->sc_qps[i]; 5983 txr = qp->qp_txr; 5984 rxr = qp->qp_rxr; 5985 5986 evcnt_attach_dynamic(&txr->txr_defragged, EVCNT_TYPE_MISC, 5987 NULL, qp->qp_name, "m_defrag successed"); 5988 evcnt_attach_dynamic(&txr->txr_defrag_failed, EVCNT_TYPE_MISC, 5989 NULL, qp->qp_name, "m_defrag_failed"); 5990 evcnt_attach_dynamic(&txr->txr_pcqdrop, EVCNT_TYPE_MISC, 5991 NULL, qp->qp_name, "Dropped in pcq"); 5992 evcnt_attach_dynamic(&txr->txr_transmitdef, EVCNT_TYPE_MISC, 5993 NULL, qp->qp_name, "Deferred transmit"); 5994 evcnt_attach_dynamic(&txr->txr_intr, EVCNT_TYPE_INTR, 5995 NULL, qp->qp_name, "Interrupt on queue"); 5996 evcnt_attach_dynamic(&txr->txr_defer, EVCNT_TYPE_MISC, 5997 NULL, qp->qp_name, "Handled queue in softint/workqueue"); 5998 5999 evcnt_attach_dynamic(&rxr->rxr_mgethdr_failed, EVCNT_TYPE_MISC, 6000 NULL, qp->qp_name, "MGETHDR failed"); 6001 evcnt_attach_dynamic(&rxr->rxr_mgetcl_failed, EVCNT_TYPE_MISC, 6002 NULL, qp->qp_name, "MCLGET failed"); 6003 evcnt_attach_dynamic(&rxr->rxr_mbuf_load_failed, 6004 EVCNT_TYPE_MISC, NULL, qp->qp_name, 6005 "bus_dmamap_load_mbuf failed"); 6006 evcnt_attach_dynamic(&rxr->rxr_intr, EVCNT_TYPE_INTR, 6007 NULL, qp->qp_name, "Interrupt on queue"); 6008 evcnt_attach_dynamic(&rxr->rxr_defer, EVCNT_TYPE_MISC, 6009 NULL, qp->qp_name, "Handled queue in softint/workqueue"); 6010 } 6011 6012 evcnt_attach_dynamic(&sc->sc_event_atq, EVCNT_TYPE_INTR, 6013 NULL, device_xname(sc->sc_dev), "Interrupt for other events"); 6014 evcnt_attach_dynamic(&sc->sc_event_link, EVCNT_TYPE_MISC, 6015 NULL, device_xname(sc->sc_dev), "Link status event"); 6016 evcnt_attach_dynamic(&sc->sc_event_ecc_err, EVCNT_TYPE_MISC, 6017 NULL, device_xname(sc->sc_dev), "ECC error"); 6018 evcnt_attach_dynamic(&sc->sc_event_pci_exception, EVCNT_TYPE_MISC, 6019 NULL, device_xname(sc->sc_dev), "PCI exception"); 6020 evcnt_attach_dynamic(&sc->sc_event_crit_err, EVCNT_TYPE_MISC, 6021 NULL, device_xname(sc->sc_dev), "Critical error"); 6022 6023 isc = &sc->sc_stats_counters; 6024 evcnt_attach_dynamic(&isc->isc_crc_errors, EVCNT_TYPE_MISC, 6025 NULL, device_xname(sc->sc_dev), "CRC errors"); 6026 evcnt_attach_dynamic(&isc->isc_illegal_bytes, EVCNT_TYPE_MISC, 6027 NULL, device_xname(sc->sc_dev), "Illegal bytes"); 6028 evcnt_attach_dynamic(&isc->isc_mac_local_faults, EVCNT_TYPE_MISC, 6029 NULL, device_xname(sc->sc_dev), "Mac local faults"); 6030 evcnt_attach_dynamic(&isc->isc_mac_remote_faults, EVCNT_TYPE_MISC, 6031 NULL, device_xname(sc->sc_dev), "Mac remote faults"); 6032 evcnt_attach_dynamic(&isc->isc_link_xon_rx, EVCNT_TYPE_MISC, 6033 NULL, device_xname(sc->sc_dev), "Rx xon"); 6034 evcnt_attach_dynamic(&isc->isc_link_xon_tx, EVCNT_TYPE_MISC, 6035 NULL, device_xname(sc->sc_dev), "Tx xon"); 6036 evcnt_attach_dynamic(&isc->isc_link_xoff_rx, EVCNT_TYPE_MISC, 6037 NULL, device_xname(sc->sc_dev), "Rx xoff"); 6038 evcnt_attach_dynamic(&isc->isc_link_xoff_tx, EVCNT_TYPE_MISC, 6039 NULL, device_xname(sc->sc_dev), "Tx xoff"); 6040 evcnt_attach_dynamic(&isc->isc_rx_fragments, EVCNT_TYPE_MISC, 6041 NULL, device_xname(sc->sc_dev), "Rx fragments"); 6042 evcnt_attach_dynamic(&isc->isc_rx_jabber, EVCNT_TYPE_MISC, 6043 NULL, device_xname(sc->sc_dev), "Rx jabber"); 6044 6045 evcnt_attach_dynamic(&isc->isc_rx_size_64, EVCNT_TYPE_MISC, 6046 NULL, device_xname(sc->sc_dev), "Rx size 64"); 6047 evcnt_attach_dynamic(&isc->isc_rx_size_127, EVCNT_TYPE_MISC, 6048 NULL, device_xname(sc->sc_dev), "Rx size 127"); 6049 evcnt_attach_dynamic(&isc->isc_rx_size_255, EVCNT_TYPE_MISC, 6050 NULL, device_xname(sc->sc_dev), "Rx size 255"); 6051 evcnt_attach_dynamic(&isc->isc_rx_size_511, EVCNT_TYPE_MISC, 6052 NULL, device_xname(sc->sc_dev), "Rx size 511"); 6053 evcnt_attach_dynamic(&isc->isc_rx_size_1023, EVCNT_TYPE_MISC, 6054 NULL, device_xname(sc->sc_dev), "Rx size 1023"); 6055 evcnt_attach_dynamic(&isc->isc_rx_size_1522, EVCNT_TYPE_MISC, 6056 NULL, device_xname(sc->sc_dev), "Rx size 1522"); 6057 evcnt_attach_dynamic(&isc->isc_rx_size_big, EVCNT_TYPE_MISC, 6058 NULL, device_xname(sc->sc_dev), "Rx jumbo packets"); 6059 evcnt_attach_dynamic(&isc->isc_rx_undersize, EVCNT_TYPE_MISC, 6060 NULL, device_xname(sc->sc_dev), "Rx under size"); 6061 evcnt_attach_dynamic(&isc->isc_rx_oversize, EVCNT_TYPE_MISC, 6062 NULL, device_xname(sc->sc_dev), "Rx over size"); 6063 6064 evcnt_attach_dynamic(&isc->isc_rx_bytes, EVCNT_TYPE_MISC, 6065 NULL, device_xname(sc->sc_dev), "Rx bytes / port"); 6066 evcnt_attach_dynamic(&isc->isc_rx_discards, EVCNT_TYPE_MISC, 6067 NULL, device_xname(sc->sc_dev), "Rx discards / port"); 6068 evcnt_attach_dynamic(&isc->isc_rx_unicast, EVCNT_TYPE_MISC, 6069 NULL, device_xname(sc->sc_dev), "Rx unicast / port"); 6070 evcnt_attach_dynamic(&isc->isc_rx_multicast, EVCNT_TYPE_MISC, 6071 NULL, device_xname(sc->sc_dev), "Rx multicast / port"); 6072 evcnt_attach_dynamic(&isc->isc_rx_broadcast, EVCNT_TYPE_MISC, 6073 NULL, device_xname(sc->sc_dev), "Rx broadcast / port"); 6074 6075 evcnt_attach_dynamic(&isc->isc_vsi_rx_bytes, EVCNT_TYPE_MISC, 6076 NULL, device_xname(sc->sc_dev), "Rx bytes / vsi"); 6077 evcnt_attach_dynamic(&isc->isc_vsi_rx_discards, EVCNT_TYPE_MISC, 6078 NULL, device_xname(sc->sc_dev), "Rx discard / vsi"); 6079 evcnt_attach_dynamic(&isc->isc_vsi_rx_unicast, EVCNT_TYPE_MISC, 6080 NULL, device_xname(sc->sc_dev), "Rx unicast / vsi"); 6081 evcnt_attach_dynamic(&isc->isc_vsi_rx_multicast, EVCNT_TYPE_MISC, 6082 NULL, device_xname(sc->sc_dev), "Rx multicast / vsi"); 6083 evcnt_attach_dynamic(&isc->isc_vsi_rx_broadcast, EVCNT_TYPE_MISC, 6084 NULL, device_xname(sc->sc_dev), "Rx broadcast / vsi"); 6085 6086 evcnt_attach_dynamic(&isc->isc_tx_size_64, EVCNT_TYPE_MISC, 6087 NULL, device_xname(sc->sc_dev), "Tx size 64"); 6088 evcnt_attach_dynamic(&isc->isc_tx_size_127, EVCNT_TYPE_MISC, 6089 NULL, device_xname(sc->sc_dev), "Tx size 127"); 6090 evcnt_attach_dynamic(&isc->isc_tx_size_255, EVCNT_TYPE_MISC, 6091 NULL, device_xname(sc->sc_dev), "Tx size 255"); 6092 evcnt_attach_dynamic(&isc->isc_tx_size_511, EVCNT_TYPE_MISC, 6093 NULL, device_xname(sc->sc_dev), "Tx size 511"); 6094 evcnt_attach_dynamic(&isc->isc_tx_size_1023, EVCNT_TYPE_MISC, 6095 NULL, device_xname(sc->sc_dev), "Tx size 1023"); 6096 evcnt_attach_dynamic(&isc->isc_tx_size_1522, EVCNT_TYPE_MISC, 6097 NULL, device_xname(sc->sc_dev), "Tx size 1522"); 6098 evcnt_attach_dynamic(&isc->isc_tx_size_big, EVCNT_TYPE_MISC, 6099 NULL, device_xname(sc->sc_dev), "Tx jumbo packets"); 6100 6101 evcnt_attach_dynamic(&isc->isc_tx_bytes, EVCNT_TYPE_MISC, 6102 NULL, device_xname(sc->sc_dev), "Tx bytes / port"); 6103 evcnt_attach_dynamic(&isc->isc_tx_dropped_link_down, EVCNT_TYPE_MISC, 6104 NULL, device_xname(sc->sc_dev), 6105 "Tx dropped due to link down / port"); 6106 evcnt_attach_dynamic(&isc->isc_tx_unicast, EVCNT_TYPE_MISC, 6107 NULL, device_xname(sc->sc_dev), "Tx unicast / port"); 6108 evcnt_attach_dynamic(&isc->isc_tx_multicast, EVCNT_TYPE_MISC, 6109 NULL, device_xname(sc->sc_dev), "Tx multicast / port"); 6110 evcnt_attach_dynamic(&isc->isc_tx_broadcast, EVCNT_TYPE_MISC, 6111 NULL, device_xname(sc->sc_dev), "Tx broadcast / port"); 6112 6113 evcnt_attach_dynamic(&isc->isc_vsi_tx_bytes, EVCNT_TYPE_MISC, 6114 NULL, device_xname(sc->sc_dev), "Tx bytes / vsi"); 6115 evcnt_attach_dynamic(&isc->isc_vsi_tx_errors, EVCNT_TYPE_MISC, 6116 NULL, device_xname(sc->sc_dev), "Tx errors / vsi"); 6117 evcnt_attach_dynamic(&isc->isc_vsi_tx_unicast, EVCNT_TYPE_MISC, 6118 NULL, device_xname(sc->sc_dev), "Tx unicast / vsi"); 6119 evcnt_attach_dynamic(&isc->isc_vsi_tx_multicast, EVCNT_TYPE_MISC, 6120 NULL, device_xname(sc->sc_dev), "Tx multicast / vsi"); 6121 evcnt_attach_dynamic(&isc->isc_vsi_tx_broadcast, EVCNT_TYPE_MISC, 6122 NULL, device_xname(sc->sc_dev), "Tx broadcast / vsi"); 6123 6124 sc->sc_stats_intval = ixl_param_stats_interval; 6125 callout_init(&sc->sc_stats_callout, CALLOUT_MPSAFE); 6126 callout_setfunc(&sc->sc_stats_callout, ixl_stats_callout, sc); 6127 ixl_work_set(&sc->sc_stats_task, ixl_stats_update, sc); 6128 6129 return 0; 6130 } 6131 6132 static void 6133 ixl_teardown_stats(struct ixl_softc *sc) 6134 { 6135 struct ixl_tx_ring *txr; 6136 struct ixl_rx_ring *rxr; 6137 struct ixl_stats_counters *isc; 6138 unsigned int i; 6139 6140 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) { 6141 txr = sc->sc_qps[i].qp_txr; 6142 rxr = sc->sc_qps[i].qp_rxr; 6143 6144 evcnt_detach(&txr->txr_defragged); 6145 evcnt_detach(&txr->txr_defrag_failed); 6146 evcnt_detach(&txr->txr_pcqdrop); 6147 evcnt_detach(&txr->txr_transmitdef); 6148 evcnt_detach(&txr->txr_intr); 6149 evcnt_detach(&txr->txr_defer); 6150 6151 evcnt_detach(&rxr->rxr_mgethdr_failed); 6152 evcnt_detach(&rxr->rxr_mgetcl_failed); 6153 evcnt_detach(&rxr->rxr_mbuf_load_failed); 6154 evcnt_detach(&rxr->rxr_intr); 6155 evcnt_detach(&rxr->rxr_defer); 6156 } 6157 6158 isc = &sc->sc_stats_counters; 6159 evcnt_detach(&isc->isc_crc_errors); 6160 evcnt_detach(&isc->isc_illegal_bytes); 6161 evcnt_detach(&isc->isc_mac_local_faults); 6162 evcnt_detach(&isc->isc_mac_remote_faults); 6163 evcnt_detach(&isc->isc_link_xon_rx); 6164 evcnt_detach(&isc->isc_link_xon_tx); 6165 evcnt_detach(&isc->isc_link_xoff_rx); 6166 evcnt_detach(&isc->isc_link_xoff_tx); 6167 evcnt_detach(&isc->isc_rx_fragments); 6168 evcnt_detach(&isc->isc_rx_jabber); 6169 evcnt_detach(&isc->isc_rx_bytes); 6170 evcnt_detach(&isc->isc_rx_discards); 6171 evcnt_detach(&isc->isc_rx_unicast); 6172 evcnt_detach(&isc->isc_rx_multicast); 6173 evcnt_detach(&isc->isc_rx_broadcast); 6174 evcnt_detach(&isc->isc_rx_size_64); 6175 evcnt_detach(&isc->isc_rx_size_127); 6176 evcnt_detach(&isc->isc_rx_size_255); 6177 evcnt_detach(&isc->isc_rx_size_511); 6178 evcnt_detach(&isc->isc_rx_size_1023); 6179 evcnt_detach(&isc->isc_rx_size_1522); 6180 evcnt_detach(&isc->isc_rx_size_big); 6181 evcnt_detach(&isc->isc_rx_undersize); 6182 evcnt_detach(&isc->isc_rx_oversize); 6183 evcnt_detach(&isc->isc_tx_bytes); 6184 evcnt_detach(&isc->isc_tx_dropped_link_down); 6185 evcnt_detach(&isc->isc_tx_unicast); 6186 evcnt_detach(&isc->isc_tx_multicast); 6187 evcnt_detach(&isc->isc_tx_broadcast); 6188 evcnt_detach(&isc->isc_tx_size_64); 6189 evcnt_detach(&isc->isc_tx_size_127); 6190 evcnt_detach(&isc->isc_tx_size_255); 6191 evcnt_detach(&isc->isc_tx_size_511); 6192 evcnt_detach(&isc->isc_tx_size_1023); 6193 evcnt_detach(&isc->isc_tx_size_1522); 6194 evcnt_detach(&isc->isc_tx_size_big); 6195 evcnt_detach(&isc->isc_vsi_rx_discards); 6196 evcnt_detach(&isc->isc_vsi_rx_bytes); 6197 evcnt_detach(&isc->isc_vsi_rx_unicast); 6198 evcnt_detach(&isc->isc_vsi_rx_multicast); 6199 evcnt_detach(&isc->isc_vsi_rx_broadcast); 6200 evcnt_detach(&isc->isc_vsi_tx_errors); 6201 evcnt_detach(&isc->isc_vsi_tx_bytes); 6202 evcnt_detach(&isc->isc_vsi_tx_unicast); 6203 evcnt_detach(&isc->isc_vsi_tx_multicast); 6204 evcnt_detach(&isc->isc_vsi_tx_broadcast); 6205 6206 evcnt_detach(&sc->sc_event_atq); 6207 evcnt_detach(&sc->sc_event_link); 6208 evcnt_detach(&sc->sc_event_ecc_err); 6209 evcnt_detach(&sc->sc_event_pci_exception); 6210 evcnt_detach(&sc->sc_event_crit_err); 6211 6212 callout_destroy(&sc->sc_stats_callout); 6213 } 6214 6215 static void 6216 ixl_stats_callout(void *xsc) 6217 { 6218 struct ixl_softc *sc = xsc; 6219 6220 ixl_work_add(sc->sc_workq, &sc->sc_stats_task); 6221 callout_schedule(&sc->sc_stats_callout, mstohz(sc->sc_stats_intval)); 6222 } 6223 6224 static uint64_t 6225 ixl_stat_delta(struct ixl_softc *sc, uint32_t reg_hi, uint32_t reg_lo, 6226 uint64_t *offset, bool has_offset) 6227 { 6228 uint64_t value, delta; 6229 int bitwidth; 6230 6231 bitwidth = reg_hi == 0 ? 32 : 48; 6232 6233 value = ixl_rd(sc, reg_lo); 6234 6235 if (bitwidth > 32) { 6236 value |= ((uint64_t)ixl_rd(sc, reg_hi) << 32); 6237 } 6238 6239 if (__predict_true(has_offset)) { 6240 delta = value; 6241 if (value < *offset) 6242 delta += ((uint64_t)1 << bitwidth); 6243 delta -= *offset; 6244 } else { 6245 delta = 0; 6246 } 6247 atomic_swap_64(offset, value); 6248 6249 return delta; 6250 } 6251 6252 static void 6253 ixl_stats_update(void *xsc) 6254 { 6255 struct ixl_softc *sc = xsc; 6256 struct ixl_stats_counters *isc; 6257 uint64_t delta; 6258 6259 isc = &sc->sc_stats_counters; 6260 6261 /* errors */ 6262 delta = ixl_stat_delta(sc, 6263 0, I40E_GLPRT_CRCERRS(sc->sc_port), 6264 &isc->isc_crc_errors_offset, isc->isc_has_offset); 6265 atomic_add_64(&isc->isc_crc_errors.ev_count, delta); 6266 6267 delta = ixl_stat_delta(sc, 6268 0, I40E_GLPRT_ILLERRC(sc->sc_port), 6269 &isc->isc_illegal_bytes_offset, isc->isc_has_offset); 6270 atomic_add_64(&isc->isc_illegal_bytes.ev_count, delta); 6271 6272 /* rx */ 6273 delta = ixl_stat_delta(sc, 6274 I40E_GLPRT_GORCH(sc->sc_port), I40E_GLPRT_GORCL(sc->sc_port), 6275 &isc->isc_rx_bytes_offset, isc->isc_has_offset); 6276 atomic_add_64(&isc->isc_rx_bytes.ev_count, delta); 6277 6278 delta = ixl_stat_delta(sc, 6279 0, I40E_GLPRT_RDPC(sc->sc_port), 6280 &isc->isc_rx_discards_offset, isc->isc_has_offset); 6281 atomic_add_64(&isc->isc_rx_discards.ev_count, delta); 6282 6283 delta = ixl_stat_delta(sc, 6284 I40E_GLPRT_UPRCH(sc->sc_port), I40E_GLPRT_UPRCL(sc->sc_port), 6285 &isc->isc_rx_unicast_offset, isc->isc_has_offset); 6286 atomic_add_64(&isc->isc_rx_unicast.ev_count, delta); 6287 6288 delta = ixl_stat_delta(sc, 6289 I40E_GLPRT_MPRCH(sc->sc_port), I40E_GLPRT_MPRCL(sc->sc_port), 6290 &isc->isc_rx_multicast_offset, isc->isc_has_offset); 6291 atomic_add_64(&isc->isc_rx_multicast.ev_count, delta); 6292 6293 delta = ixl_stat_delta(sc, 6294 I40E_GLPRT_BPRCH(sc->sc_port), I40E_GLPRT_BPRCL(sc->sc_port), 6295 &isc->isc_rx_broadcast_offset, isc->isc_has_offset); 6296 atomic_add_64(&isc->isc_rx_broadcast.ev_count, delta); 6297 6298 /* Packet size stats rx */ 6299 delta = ixl_stat_delta(sc, 6300 I40E_GLPRT_PRC64H(sc->sc_port), I40E_GLPRT_PRC64L(sc->sc_port), 6301 &isc->isc_rx_size_64_offset, isc->isc_has_offset); 6302 atomic_add_64(&isc->isc_rx_size_64.ev_count, delta); 6303 6304 delta = ixl_stat_delta(sc, 6305 I40E_GLPRT_PRC127H(sc->sc_port), I40E_GLPRT_PRC127L(sc->sc_port), 6306 &isc->isc_rx_size_127_offset, isc->isc_has_offset); 6307 atomic_add_64(&isc->isc_rx_size_127.ev_count, delta); 6308 6309 delta = ixl_stat_delta(sc, 6310 I40E_GLPRT_PRC255H(sc->sc_port), I40E_GLPRT_PRC255L(sc->sc_port), 6311 &isc->isc_rx_size_255_offset, isc->isc_has_offset); 6312 atomic_add_64(&isc->isc_rx_size_255.ev_count, delta); 6313 6314 delta = ixl_stat_delta(sc, 6315 I40E_GLPRT_PRC511H(sc->sc_port), I40E_GLPRT_PRC511L(sc->sc_port), 6316 &isc->isc_rx_size_511_offset, isc->isc_has_offset); 6317 atomic_add_64(&isc->isc_rx_size_511.ev_count, delta); 6318 6319 delta = ixl_stat_delta(sc, 6320 I40E_GLPRT_PRC1023H(sc->sc_port), I40E_GLPRT_PRC1023L(sc->sc_port), 6321 &isc->isc_rx_size_1023_offset, isc->isc_has_offset); 6322 atomic_add_64(&isc->isc_rx_size_1023.ev_count, delta); 6323 6324 delta = ixl_stat_delta(sc, 6325 I40E_GLPRT_PRC1522H(sc->sc_port), I40E_GLPRT_PRC1522L(sc->sc_port), 6326 &isc->isc_rx_size_1522_offset, isc->isc_has_offset); 6327 atomic_add_64(&isc->isc_rx_size_1522.ev_count, delta); 6328 6329 delta = ixl_stat_delta(sc, 6330 I40E_GLPRT_PRC9522H(sc->sc_port), I40E_GLPRT_PRC9522L(sc->sc_port), 6331 &isc->isc_rx_size_big_offset, isc->isc_has_offset); 6332 atomic_add_64(&isc->isc_rx_size_big.ev_count, delta); 6333 6334 delta = ixl_stat_delta(sc, 6335 0, I40E_GLPRT_RUC(sc->sc_port), 6336 &isc->isc_rx_undersize_offset, isc->isc_has_offset); 6337 atomic_add_64(&isc->isc_rx_undersize.ev_count, delta); 6338 6339 delta = ixl_stat_delta(sc, 6340 0, I40E_GLPRT_ROC(sc->sc_port), 6341 &isc->isc_rx_oversize_offset, isc->isc_has_offset); 6342 atomic_add_64(&isc->isc_rx_oversize.ev_count, delta); 6343 6344 /* tx */ 6345 delta = ixl_stat_delta(sc, 6346 I40E_GLPRT_GOTCH(sc->sc_port), I40E_GLPRT_GOTCL(sc->sc_port), 6347 &isc->isc_tx_bytes_offset, isc->isc_has_offset); 6348 atomic_add_64(&isc->isc_tx_bytes.ev_count, delta); 6349 6350 delta = ixl_stat_delta(sc, 6351 0, I40E_GLPRT_TDOLD(sc->sc_port), 6352 &isc->isc_tx_dropped_link_down_offset, isc->isc_has_offset); 6353 atomic_add_64(&isc->isc_tx_dropped_link_down.ev_count, delta); 6354 6355 delta = ixl_stat_delta(sc, 6356 I40E_GLPRT_UPTCH(sc->sc_port), I40E_GLPRT_UPTCL(sc->sc_port), 6357 &isc->isc_tx_unicast_offset, isc->isc_has_offset); 6358 atomic_add_64(&isc->isc_tx_unicast.ev_count, delta); 6359 6360 delta = ixl_stat_delta(sc, 6361 I40E_GLPRT_MPTCH(sc->sc_port), I40E_GLPRT_MPTCL(sc->sc_port), 6362 &isc->isc_tx_multicast_offset, isc->isc_has_offset); 6363 atomic_add_64(&isc->isc_tx_multicast.ev_count, delta); 6364 6365 delta = ixl_stat_delta(sc, 6366 I40E_GLPRT_BPTCH(sc->sc_port), I40E_GLPRT_BPTCL(sc->sc_port), 6367 &isc->isc_tx_broadcast_offset, isc->isc_has_offset); 6368 atomic_add_64(&isc->isc_tx_broadcast.ev_count, delta); 6369 6370 /* Packet size stats tx */ 6371 delta = ixl_stat_delta(sc, 6372 I40E_GLPRT_PTC64L(sc->sc_port), I40E_GLPRT_PTC64L(sc->sc_port), 6373 &isc->isc_tx_size_64_offset, isc->isc_has_offset); 6374 atomic_add_64(&isc->isc_tx_size_64.ev_count, delta); 6375 6376 delta = ixl_stat_delta(sc, 6377 I40E_GLPRT_PTC127H(sc->sc_port), I40E_GLPRT_PTC127L(sc->sc_port), 6378 &isc->isc_tx_size_127_offset, isc->isc_has_offset); 6379 atomic_add_64(&isc->isc_tx_size_127.ev_count, delta); 6380 6381 delta = ixl_stat_delta(sc, 6382 I40E_GLPRT_PTC255H(sc->sc_port), I40E_GLPRT_PTC255L(sc->sc_port), 6383 &isc->isc_tx_size_255_offset, isc->isc_has_offset); 6384 atomic_add_64(&isc->isc_tx_size_255.ev_count, delta); 6385 6386 delta = ixl_stat_delta(sc, 6387 I40E_GLPRT_PTC511H(sc->sc_port), I40E_GLPRT_PTC511L(sc->sc_port), 6388 &isc->isc_tx_size_511_offset, isc->isc_has_offset); 6389 atomic_add_64(&isc->isc_tx_size_511.ev_count, delta); 6390 6391 delta = ixl_stat_delta(sc, 6392 I40E_GLPRT_PTC1023H(sc->sc_port), I40E_GLPRT_PTC1023L(sc->sc_port), 6393 &isc->isc_tx_size_1023_offset, isc->isc_has_offset); 6394 atomic_add_64(&isc->isc_tx_size_1023.ev_count, delta); 6395 6396 delta = ixl_stat_delta(sc, 6397 I40E_GLPRT_PTC1522H(sc->sc_port), I40E_GLPRT_PTC1522L(sc->sc_port), 6398 &isc->isc_tx_size_1522_offset, isc->isc_has_offset); 6399 atomic_add_64(&isc->isc_tx_size_1522.ev_count, delta); 6400 6401 delta = ixl_stat_delta(sc, 6402 I40E_GLPRT_PTC9522H(sc->sc_port), I40E_GLPRT_PTC9522L(sc->sc_port), 6403 &isc->isc_tx_size_big_offset, isc->isc_has_offset); 6404 atomic_add_64(&isc->isc_tx_size_big.ev_count, delta); 6405 6406 /* mac faults */ 6407 delta = ixl_stat_delta(sc, 6408 0, I40E_GLPRT_MLFC(sc->sc_port), 6409 &isc->isc_mac_local_faults_offset, isc->isc_has_offset); 6410 atomic_add_64(&isc->isc_mac_local_faults.ev_count, delta); 6411 6412 delta = ixl_stat_delta(sc, 6413 0, I40E_GLPRT_MRFC(sc->sc_port), 6414 &isc->isc_mac_remote_faults_offset, isc->isc_has_offset); 6415 atomic_add_64(&isc->isc_mac_remote_faults.ev_count, delta); 6416 6417 /* Flow control (LFC) stats */ 6418 delta = ixl_stat_delta(sc, 6419 0, I40E_GLPRT_LXONRXC(sc->sc_port), 6420 &isc->isc_link_xon_rx_offset, isc->isc_has_offset); 6421 atomic_add_64(&isc->isc_link_xon_rx.ev_count, delta); 6422 6423 delta = ixl_stat_delta(sc, 6424 0, I40E_GLPRT_LXONTXC(sc->sc_port), 6425 &isc->isc_link_xon_tx_offset, isc->isc_has_offset); 6426 atomic_add_64(&isc->isc_link_xon_tx.ev_count, delta); 6427 6428 delta = ixl_stat_delta(sc, 6429 0, I40E_GLPRT_LXOFFRXC(sc->sc_port), 6430 &isc->isc_link_xoff_rx_offset, isc->isc_has_offset); 6431 atomic_add_64(&isc->isc_link_xoff_rx.ev_count, delta); 6432 6433 delta = ixl_stat_delta(sc, 6434 0, I40E_GLPRT_LXOFFTXC(sc->sc_port), 6435 &isc->isc_link_xoff_tx_offset, isc->isc_has_offset); 6436 atomic_add_64(&isc->isc_link_xoff_tx.ev_count, delta); 6437 6438 /* fragments */ 6439 delta = ixl_stat_delta(sc, 6440 0, I40E_GLPRT_RFC(sc->sc_port), 6441 &isc->isc_rx_fragments_offset, isc->isc_has_offset); 6442 atomic_add_64(&isc->isc_rx_fragments.ev_count, delta); 6443 6444 delta = ixl_stat_delta(sc, 6445 0, I40E_GLPRT_RJC(sc->sc_port), 6446 &isc->isc_rx_jabber_offset, isc->isc_has_offset); 6447 atomic_add_64(&isc->isc_rx_jabber.ev_count, delta); 6448 6449 /* VSI rx counters */ 6450 delta = ixl_stat_delta(sc, 6451 0, I40E_GLV_RDPC(sc->sc_vsi_stat_counter_idx), 6452 &isc->isc_vsi_rx_discards_offset, isc->isc_has_offset); 6453 atomic_add_64(&isc->isc_vsi_rx_discards.ev_count, delta); 6454 6455 delta = ixl_stat_delta(sc, 6456 I40E_GLV_GORCH(sc->sc_vsi_stat_counter_idx), 6457 I40E_GLV_GORCL(sc->sc_vsi_stat_counter_idx), 6458 &isc->isc_vsi_rx_bytes_offset, isc->isc_has_offset); 6459 atomic_add_64(&isc->isc_vsi_rx_bytes.ev_count, delta); 6460 6461 delta = ixl_stat_delta(sc, 6462 I40E_GLV_UPRCH(sc->sc_vsi_stat_counter_idx), 6463 I40E_GLV_UPRCL(sc->sc_vsi_stat_counter_idx), 6464 &isc->isc_vsi_rx_unicast_offset, isc->isc_has_offset); 6465 atomic_add_64(&isc->isc_vsi_rx_unicast.ev_count, delta); 6466 6467 delta = ixl_stat_delta(sc, 6468 I40E_GLV_MPRCH(sc->sc_vsi_stat_counter_idx), 6469 I40E_GLV_MPRCL(sc->sc_vsi_stat_counter_idx), 6470 &isc->isc_vsi_rx_multicast_offset, isc->isc_has_offset); 6471 atomic_add_64(&isc->isc_vsi_rx_multicast.ev_count, delta); 6472 6473 delta = ixl_stat_delta(sc, 6474 I40E_GLV_BPRCH(sc->sc_vsi_stat_counter_idx), 6475 I40E_GLV_BPRCL(sc->sc_vsi_stat_counter_idx), 6476 &isc->isc_vsi_rx_broadcast_offset, isc->isc_has_offset); 6477 atomic_add_64(&isc->isc_vsi_rx_broadcast.ev_count, delta); 6478 6479 /* VSI tx counters */ 6480 delta = ixl_stat_delta(sc, 6481 0, I40E_GLV_TEPC(sc->sc_vsi_stat_counter_idx), 6482 &isc->isc_vsi_tx_errors_offset, isc->isc_has_offset); 6483 atomic_add_64(&isc->isc_vsi_tx_errors.ev_count, delta); 6484 6485 delta = ixl_stat_delta(sc, 6486 I40E_GLV_GOTCH(sc->sc_vsi_stat_counter_idx), 6487 I40E_GLV_GOTCL(sc->sc_vsi_stat_counter_idx), 6488 &isc->isc_vsi_tx_bytes_offset, isc->isc_has_offset); 6489 atomic_add_64(&isc->isc_vsi_tx_bytes.ev_count, delta); 6490 6491 delta = ixl_stat_delta(sc, 6492 I40E_GLV_UPTCH(sc->sc_vsi_stat_counter_idx), 6493 I40E_GLV_UPTCL(sc->sc_vsi_stat_counter_idx), 6494 &isc->isc_vsi_tx_unicast_offset, isc->isc_has_offset); 6495 atomic_add_64(&isc->isc_vsi_tx_unicast.ev_count, delta); 6496 6497 delta = ixl_stat_delta(sc, 6498 I40E_GLV_MPTCH(sc->sc_vsi_stat_counter_idx), 6499 I40E_GLV_MPTCL(sc->sc_vsi_stat_counter_idx), 6500 &isc->isc_vsi_tx_multicast_offset, isc->isc_has_offset); 6501 atomic_add_64(&isc->isc_vsi_tx_multicast.ev_count, delta); 6502 6503 delta = ixl_stat_delta(sc, 6504 I40E_GLV_BPTCH(sc->sc_vsi_stat_counter_idx), 6505 I40E_GLV_BPTCL(sc->sc_vsi_stat_counter_idx), 6506 &isc->isc_vsi_tx_broadcast_offset, isc->isc_has_offset); 6507 atomic_add_64(&isc->isc_vsi_tx_broadcast.ev_count, delta); 6508 } 6509 6510 static int 6511 ixl_setup_sysctls(struct ixl_softc *sc) 6512 { 6513 const char *devname; 6514 struct sysctllog **log; 6515 const struct sysctlnode *rnode, *rxnode, *txnode; 6516 int error; 6517 6518 log = &sc->sc_sysctllog; 6519 devname = device_xname(sc->sc_dev); 6520 6521 error = sysctl_createv(log, 0, NULL, &rnode, 6522 0, CTLTYPE_NODE, devname, 6523 SYSCTL_DESCR("ixl information and settings"), 6524 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL); 6525 if (error) 6526 goto out; 6527 6528 error = sysctl_createv(log, 0, &rnode, NULL, 6529 CTLFLAG_READWRITE, CTLTYPE_BOOL, "txrx_workqueue", 6530 SYSCTL_DESCR("Use workqueue for packet processing"), 6531 NULL, 0, &sc->sc_txrx_workqueue, 0, CTL_CREATE, CTL_EOL); 6532 if (error) 6533 goto out; 6534 6535 error = sysctl_createv(log, 0, &rnode, NULL, 6536 CTLFLAG_READONLY, CTLTYPE_INT, "stats_interval", 6537 SYSCTL_DESCR("Statistics collection interval in milliseconds"), 6538 NULL, 0, &sc->sc_stats_intval, 0, CTL_CREATE, CTL_EOL); 6539 6540 error = sysctl_createv(log, 0, &rnode, &rxnode, 6541 0, CTLTYPE_NODE, "rx", 6542 SYSCTL_DESCR("ixl information and settings for Rx"), 6543 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL); 6544 if (error) 6545 goto out; 6546 6547 error = sysctl_createv(log, 0, &rxnode, NULL, 6548 CTLFLAG_READWRITE, CTLTYPE_INT, "itr", 6549 SYSCTL_DESCR("Interrupt Throttling"), 6550 ixl_sysctl_itr_handler, 0, 6551 (void *)sc, 0, CTL_CREATE, CTL_EOL); 6552 if (error) 6553 goto out; 6554 6555 error = sysctl_createv(log, 0, &rxnode, NULL, 6556 CTLFLAG_READONLY, CTLTYPE_INT, "descriptor_num", 6557 SYSCTL_DESCR("the number of rx descriptors"), 6558 NULL, 0, &sc->sc_rx_ring_ndescs, 0, CTL_CREATE, CTL_EOL); 6559 if (error) 6560 goto out; 6561 6562 error = sysctl_createv(log, 0, &rxnode, NULL, 6563 CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit", 6564 SYSCTL_DESCR("max number of Rx packets" 6565 " to process for interrupt processing"), 6566 NULL, 0, &sc->sc_rx_intr_process_limit, 0, CTL_CREATE, CTL_EOL); 6567 if (error) 6568 goto out; 6569 6570 error = sysctl_createv(log, 0, &rxnode, NULL, 6571 CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit", 6572 SYSCTL_DESCR("max number of Rx packets" 6573 " to process for deferred processing"), 6574 NULL, 0, &sc->sc_rx_process_limit, 0, CTL_CREATE, CTL_EOL); 6575 if (error) 6576 goto out; 6577 6578 error = sysctl_createv(log, 0, &rnode, &txnode, 6579 0, CTLTYPE_NODE, "tx", 6580 SYSCTL_DESCR("ixl information and settings for Tx"), 6581 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL); 6582 if (error) 6583 goto out; 6584 6585 error = sysctl_createv(log, 0, &txnode, NULL, 6586 CTLFLAG_READWRITE, CTLTYPE_INT, "itr", 6587 SYSCTL_DESCR("Interrupt Throttling"), 6588 ixl_sysctl_itr_handler, 0, 6589 (void *)sc, 0, CTL_CREATE, CTL_EOL); 6590 if (error) 6591 goto out; 6592 6593 error = sysctl_createv(log, 0, &txnode, NULL, 6594 CTLFLAG_READONLY, CTLTYPE_INT, "descriptor_num", 6595 SYSCTL_DESCR("the number of tx descriptors"), 6596 NULL, 0, &sc->sc_tx_ring_ndescs, 0, CTL_CREATE, CTL_EOL); 6597 if (error) 6598 goto out; 6599 6600 error = sysctl_createv(log, 0, &txnode, NULL, 6601 CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit", 6602 SYSCTL_DESCR("max number of Tx packets" 6603 " to process for interrupt processing"), 6604 NULL, 0, &sc->sc_tx_intr_process_limit, 0, CTL_CREATE, CTL_EOL); 6605 if (error) 6606 goto out; 6607 6608 error = sysctl_createv(log, 0, &txnode, NULL, 6609 CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit", 6610 SYSCTL_DESCR("max number of Tx packets" 6611 " to process for deferred processing"), 6612 NULL, 0, &sc->sc_tx_process_limit, 0, CTL_CREATE, CTL_EOL); 6613 if (error) 6614 goto out; 6615 6616 out: 6617 if (error) { 6618 aprint_error_dev(sc->sc_dev, 6619 "unable to create sysctl node\n"); 6620 sysctl_teardown(log); 6621 } 6622 6623 return error; 6624 } 6625 6626 static void 6627 ixl_teardown_sysctls(struct ixl_softc *sc) 6628 { 6629 6630 sysctl_teardown(&sc->sc_sysctllog); 6631 } 6632 6633 static bool 6634 ixl_sysctlnode_is_rx(struct sysctlnode *node) 6635 { 6636 6637 if (strstr(node->sysctl_parent->sysctl_name, "rx") != NULL) 6638 return true; 6639 6640 return false; 6641 } 6642 6643 static int 6644 ixl_sysctl_itr_handler(SYSCTLFN_ARGS) 6645 { 6646 struct sysctlnode node = *rnode; 6647 struct ixl_softc *sc = (struct ixl_softc *)node.sysctl_data; 6648 struct ifnet *ifp = &sc->sc_ec.ec_if; 6649 uint32_t newitr, *itrptr; 6650 int error; 6651 6652 if (ixl_sysctlnode_is_rx(&node)) { 6653 itrptr = &sc->sc_itr_rx; 6654 } else { 6655 itrptr = &sc->sc_itr_tx; 6656 } 6657 6658 newitr = *itrptr; 6659 node.sysctl_data = &newitr; 6660 node.sysctl_size = sizeof(newitr); 6661 6662 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 6663 6664 if (error || newp == NULL) 6665 return error; 6666 6667 /* ITRs are applied in ixl_init() for simple implementaion */ 6668 if (ISSET(ifp->if_flags, IFF_RUNNING)) 6669 return EBUSY; 6670 6671 if (newitr > 0x07ff) 6672 return EINVAL; 6673 6674 *itrptr = newitr; 6675 6676 return 0; 6677 } 6678 6679 static struct workqueue * 6680 ixl_workq_create(const char *name, pri_t prio, int ipl, int flags) 6681 { 6682 struct workqueue *wq; 6683 int error; 6684 6685 error = workqueue_create(&wq, name, ixl_workq_work, NULL, 6686 prio, ipl, flags); 6687 6688 if (error) 6689 return NULL; 6690 6691 return wq; 6692 } 6693 6694 static void 6695 ixl_workq_destroy(struct workqueue *wq) 6696 { 6697 6698 workqueue_destroy(wq); 6699 } 6700 6701 static void 6702 ixl_work_set(struct ixl_work *work, void (*func)(void *), void *arg) 6703 { 6704 6705 memset(work, 0, sizeof(*work)); 6706 work->ixw_func = func; 6707 work->ixw_arg = arg; 6708 } 6709 6710 static void 6711 ixl_work_add(struct workqueue *wq, struct ixl_work *work) 6712 { 6713 if (atomic_cas_uint(&work->ixw_added, 0, 1) != 0) 6714 return; 6715 6716 kpreempt_disable(); 6717 workqueue_enqueue(wq, &work->ixw_cookie, NULL); 6718 kpreempt_enable(); 6719 } 6720 6721 static void 6722 ixl_work_wait(struct workqueue *wq, struct ixl_work *work) 6723 { 6724 6725 workqueue_wait(wq, &work->ixw_cookie); 6726 } 6727 6728 static void 6729 ixl_workq_work(struct work *wk, void *context) 6730 { 6731 struct ixl_work *work; 6732 6733 work = container_of(wk, struct ixl_work, ixw_cookie); 6734 6735 atomic_swap_uint(&work->ixw_added, 0); 6736 work->ixw_func(work->ixw_arg); 6737 } 6738 6739 static int 6740 ixl_rx_ctl_read(struct ixl_softc *sc, uint32_t reg, uint32_t *rv) 6741 { 6742 struct ixl_aq_desc iaq; 6743 6744 memset(&iaq, 0, sizeof(iaq)); 6745 iaq.iaq_opcode = htole16(IXL_AQ_OP_RX_CTL_REG_READ); 6746 iaq.iaq_param[1] = htole32(reg); 6747 6748 if (ixl_atq_poll(sc, &iaq, 250) != 0) 6749 return ETIMEDOUT; 6750 6751 switch (htole16(iaq.iaq_retval)) { 6752 case IXL_AQ_RC_OK: 6753 /* success */ 6754 break; 6755 case IXL_AQ_RC_EACCES: 6756 return EPERM; 6757 case IXL_AQ_RC_EAGAIN: 6758 return EAGAIN; 6759 default: 6760 return EIO; 6761 } 6762 6763 *rv = htole32(iaq.iaq_param[3]); 6764 return 0; 6765 } 6766 6767 static uint32_t 6768 ixl_rd_rx_csr(struct ixl_softc *sc, uint32_t reg) 6769 { 6770 uint32_t val; 6771 int rv, retry, retry_limit; 6772 6773 if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RXCTL)) { 6774 retry_limit = 5; 6775 } else { 6776 retry_limit = 0; 6777 } 6778 6779 for (retry = 0; retry < retry_limit; retry++) { 6780 rv = ixl_rx_ctl_read(sc, reg, &val); 6781 if (rv == 0) 6782 return val; 6783 else if (rv == EAGAIN) 6784 delaymsec(1); 6785 else 6786 break; 6787 } 6788 6789 val = ixl_rd(sc, reg); 6790 6791 return val; 6792 } 6793 6794 static int 6795 ixl_rx_ctl_write(struct ixl_softc *sc, uint32_t reg, uint32_t value) 6796 { 6797 struct ixl_aq_desc iaq; 6798 6799 memset(&iaq, 0, sizeof(iaq)); 6800 iaq.iaq_opcode = htole16(IXL_AQ_OP_RX_CTL_REG_WRITE); 6801 iaq.iaq_param[1] = htole32(reg); 6802 iaq.iaq_param[3] = htole32(value); 6803 6804 if (ixl_atq_poll(sc, &iaq, 250) != 0) 6805 return ETIMEDOUT; 6806 6807 switch (htole16(iaq.iaq_retval)) { 6808 case IXL_AQ_RC_OK: 6809 /* success */ 6810 break; 6811 case IXL_AQ_RC_EACCES: 6812 return EPERM; 6813 case IXL_AQ_RC_EAGAIN: 6814 return EAGAIN; 6815 default: 6816 return EIO; 6817 } 6818 6819 return 0; 6820 } 6821 6822 static void 6823 ixl_wr_rx_csr(struct ixl_softc *sc, uint32_t reg, uint32_t value) 6824 { 6825 int rv, retry, retry_limit; 6826 6827 if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RXCTL)) { 6828 retry_limit = 5; 6829 } else { 6830 retry_limit = 0; 6831 } 6832 6833 for (retry = 0; retry < retry_limit; retry++) { 6834 rv = ixl_rx_ctl_write(sc, reg, value); 6835 if (rv == 0) 6836 return; 6837 else if (rv == EAGAIN) 6838 delaymsec(1); 6839 else 6840 break; 6841 } 6842 6843 ixl_wr(sc, reg, value); 6844 } 6845 6846 static int 6847 ixl_nvm_lock(struct ixl_softc *sc, char rw) 6848 { 6849 struct ixl_aq_desc iaq; 6850 struct ixl_aq_req_resource_param *param; 6851 int rv; 6852 6853 if (!ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMLOCK)) 6854 return 0; 6855 6856 memset(&iaq, 0, sizeof(iaq)); 6857 iaq.iaq_opcode = htole16(IXL_AQ_OP_REQUEST_RESOURCE); 6858 6859 param = (struct ixl_aq_req_resource_param *)&iaq.iaq_param; 6860 param->resource_id = htole16(IXL_AQ_RESOURCE_ID_NVM); 6861 if (rw == 'R') { 6862 param->access_type = htole16(IXL_AQ_RESOURCE_ACCES_READ); 6863 } else { 6864 param->access_type = htole16(IXL_AQ_RESOURCE_ACCES_WRITE); 6865 } 6866 6867 rv = ixl_atq_poll(sc, &iaq, 250); 6868 6869 if (rv != 0) 6870 return ETIMEDOUT; 6871 6872 switch (le16toh(iaq.iaq_retval)) { 6873 case IXL_AQ_RC_OK: 6874 break; 6875 case IXL_AQ_RC_EACCES: 6876 return EACCES; 6877 case IXL_AQ_RC_EBUSY: 6878 return EBUSY; 6879 case IXL_AQ_RC_EPERM: 6880 return EPERM; 6881 } 6882 6883 return 0; 6884 } 6885 6886 static int 6887 ixl_nvm_unlock(struct ixl_softc *sc) 6888 { 6889 struct ixl_aq_desc iaq; 6890 struct ixl_aq_rel_resource_param *param; 6891 int rv; 6892 6893 if (!ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMLOCK)) 6894 return 0; 6895 6896 memset(&iaq, 0, sizeof(iaq)); 6897 iaq.iaq_opcode = htole16(IXL_AQ_OP_RELEASE_RESOURCE); 6898 6899 param = (struct ixl_aq_rel_resource_param *)&iaq.iaq_param; 6900 param->resource_id = htole16(IXL_AQ_RESOURCE_ID_NVM); 6901 6902 rv = ixl_atq_poll(sc, &iaq, 250); 6903 6904 if (rv != 0) 6905 return ETIMEDOUT; 6906 6907 switch (le16toh(iaq.iaq_retval)) { 6908 case IXL_AQ_RC_OK: 6909 break; 6910 default: 6911 return EIO; 6912 } 6913 return 0; 6914 } 6915 6916 static int 6917 ixl_srdone_poll(struct ixl_softc *sc) 6918 { 6919 int wait_count; 6920 uint32_t reg; 6921 6922 for (wait_count = 0; wait_count < IXL_SRRD_SRCTL_ATTEMPTS; 6923 wait_count++) { 6924 reg = ixl_rd(sc, I40E_GLNVM_SRCTL); 6925 if (ISSET(reg, I40E_GLNVM_SRCTL_DONE_MASK)) 6926 break; 6927 6928 delaymsec(5); 6929 } 6930 6931 if (wait_count == IXL_SRRD_SRCTL_ATTEMPTS) 6932 return -1; 6933 6934 return 0; 6935 } 6936 6937 static int 6938 ixl_nvm_read_srctl(struct ixl_softc *sc, uint16_t offset, uint16_t *data) 6939 { 6940 uint32_t reg; 6941 6942 if (ixl_srdone_poll(sc) != 0) 6943 return ETIMEDOUT; 6944 6945 reg = ((uint32_t)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) | 6946 __BIT(I40E_GLNVM_SRCTL_START_SHIFT); 6947 ixl_wr(sc, I40E_GLNVM_SRCTL, reg); 6948 6949 if (ixl_srdone_poll(sc) != 0) { 6950 aprint_debug("NVM read error: couldn't access " 6951 "Shadow RAM address: 0x%x\n", offset); 6952 return ETIMEDOUT; 6953 } 6954 6955 reg = ixl_rd(sc, I40E_GLNVM_SRDATA); 6956 *data = (uint16_t)__SHIFTOUT(reg, I40E_GLNVM_SRDATA_RDDATA_MASK); 6957 6958 return 0; 6959 } 6960 6961 static int 6962 ixl_nvm_read_aq(struct ixl_softc *sc, uint16_t offset_word, 6963 void *data, size_t len) 6964 { 6965 struct ixl_dmamem *idm; 6966 struct ixl_aq_desc iaq; 6967 struct ixl_aq_nvm_param *param; 6968 uint32_t offset_bytes; 6969 int rv; 6970 6971 idm = &sc->sc_aqbuf; 6972 if (len > IXL_DMA_LEN(idm)) 6973 return ENOMEM; 6974 6975 memset(IXL_DMA_KVA(idm), 0, IXL_DMA_LEN(idm)); 6976 memset(&iaq, 0, sizeof(iaq)); 6977 iaq.iaq_opcode = htole16(IXL_AQ_OP_NVM_READ); 6978 iaq.iaq_flags = htole16(IXL_AQ_BUF | 6979 ((len > I40E_AQ_LARGE_BUF) ? IXL_AQ_LB : 0)); 6980 iaq.iaq_datalen = htole16(len); 6981 ixl_aq_dva(&iaq, IXL_DMA_DVA(idm)); 6982 6983 param = (struct ixl_aq_nvm_param *)iaq.iaq_param; 6984 param->command_flags = IXL_AQ_NVM_LAST_CMD; 6985 param->module_pointer = 0; 6986 param->length = htole16(len); 6987 offset_bytes = (uint32_t)offset_word * 2; 6988 offset_bytes &= 0x00FFFFFF; 6989 param->offset = htole32(offset_bytes); 6990 6991 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm), 6992 BUS_DMASYNC_PREREAD); 6993 6994 rv = ixl_atq_poll(sc, &iaq, 250); 6995 6996 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm), 6997 BUS_DMASYNC_POSTREAD); 6998 6999 if (rv != 0) { 7000 return ETIMEDOUT; 7001 } 7002 7003 switch (le16toh(iaq.iaq_retval)) { 7004 case IXL_AQ_RC_OK: 7005 break; 7006 case IXL_AQ_RC_EPERM: 7007 return EPERM; 7008 case IXL_AQ_RC_EINVAL: 7009 return EINVAL; 7010 case IXL_AQ_RC_EBUSY: 7011 return EBUSY; 7012 case IXL_AQ_RC_EIO: 7013 default: 7014 return EIO; 7015 } 7016 7017 memcpy(data, IXL_DMA_KVA(idm), len); 7018 7019 return 0; 7020 } 7021 7022 static int 7023 ixl_rd16_nvm(struct ixl_softc *sc, uint16_t offset, uint16_t *data) 7024 { 7025 int error; 7026 uint16_t buf; 7027 7028 error = ixl_nvm_lock(sc, 'R'); 7029 if (error) 7030 return error; 7031 7032 if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMREAD)) { 7033 error = ixl_nvm_read_aq(sc, offset, 7034 &buf, sizeof(buf)); 7035 if (error == 0) 7036 *data = le16toh(buf); 7037 } else { 7038 error = ixl_nvm_read_srctl(sc, offset, &buf); 7039 if (error == 0) 7040 *data = buf; 7041 } 7042 7043 ixl_nvm_unlock(sc); 7044 7045 return error; 7046 } 7047 7048 MODULE(MODULE_CLASS_DRIVER, if_ixl, "pci"); 7049 7050 #ifdef _MODULE 7051 #include "ioconf.c" 7052 #endif 7053 7054 #ifdef _MODULE 7055 static void 7056 ixl_parse_modprop(prop_dictionary_t dict) 7057 { 7058 prop_object_t obj; 7059 int64_t val; 7060 uint64_t uval; 7061 7062 if (dict == NULL) 7063 return; 7064 7065 obj = prop_dictionary_get(dict, "nomsix"); 7066 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_BOOL) { 7067 ixl_param_nomsix = prop_bool_true((prop_bool_t)obj); 7068 } 7069 7070 obj = prop_dictionary_get(dict, "stats_interval"); 7071 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) { 7072 val = prop_number_signed_value((prop_number_t)obj); 7073 7074 /* the range has no reason */ 7075 if (100 < val && val < 180000) { 7076 ixl_param_stats_interval = val; 7077 } 7078 } 7079 7080 obj = prop_dictionary_get(dict, "nqps_limit"); 7081 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) { 7082 val = prop_number_signed_value((prop_number_t)obj); 7083 7084 if (val <= INT32_MAX) 7085 ixl_param_nqps_limit = val; 7086 } 7087 7088 obj = prop_dictionary_get(dict, "rx_ndescs"); 7089 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) { 7090 uval = prop_number_unsigned_integer_value((prop_number_t)obj); 7091 7092 if (uval > 8) 7093 ixl_param_rx_ndescs = uval; 7094 } 7095 7096 obj = prop_dictionary_get(dict, "tx_ndescs"); 7097 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) { 7098 uval = prop_number_unsigned_integer_value((prop_number_t)obj); 7099 7100 if (uval > IXL_TX_PKT_DESCS) 7101 ixl_param_tx_ndescs = uval; 7102 } 7103 7104 } 7105 #endif 7106 7107 static int 7108 if_ixl_modcmd(modcmd_t cmd, void *opaque) 7109 { 7110 int error = 0; 7111 7112 #ifdef _MODULE 7113 switch (cmd) { 7114 case MODULE_CMD_INIT: 7115 ixl_parse_modprop((prop_dictionary_t)opaque); 7116 error = config_init_component(cfdriver_ioconf_if_ixl, 7117 cfattach_ioconf_if_ixl, cfdata_ioconf_if_ixl); 7118 break; 7119 case MODULE_CMD_FINI: 7120 error = config_fini_component(cfdriver_ioconf_if_ixl, 7121 cfattach_ioconf_if_ixl, cfdata_ioconf_if_ixl); 7122 break; 7123 default: 7124 error = ENOTTY; 7125 break; 7126 } 7127 #endif 7128 7129 return error; 7130 } 7131