1 /* $NetBSD: if_ixl.c,v 1.82 2022/03/31 06:23:18 yamaguchi Exp $ */ 2 3 /* 4 * Copyright (c) 2013-2015, Intel Corporation 5 * All rights reserved. 6 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions are met: 9 * 10 * 1. Redistributions of source code must retain the above copyright notice, 11 * this list of conditions and the following disclaimer. 12 * 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * 3. Neither the name of the Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived from 19 * this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 /* 35 * Copyright (c) 2016,2017 David Gwynne <dlg@openbsd.org> 36 * 37 * Permission to use, copy, modify, and distribute this software for any 38 * purpose with or without fee is hereby granted, provided that the above 39 * copyright notice and this permission notice appear in all copies. 40 * 41 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 42 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 43 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 44 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 45 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 46 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 47 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 48 */ 49 50 /* 51 * Copyright (c) 2019 Internet Initiative Japan, Inc. 52 * All rights reserved. 53 * 54 * Redistribution and use in source and binary forms, with or without 55 * modification, are permitted provided that the following conditions 56 * are met: 57 * 1. Redistributions of source code must retain the above copyright 58 * notice, this list of conditions and the following disclaimer. 59 * 2. Redistributions in binary form must reproduce the above copyright 60 * notice, this list of conditions and the following disclaimer in the 61 * documentation and/or other materials provided with the distribution. 62 * 63 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 64 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 65 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 66 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 67 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 68 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 69 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 70 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 71 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 72 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 73 * POSSIBILITY OF SUCH DAMAGE. 74 */ 75 76 #include <sys/cdefs.h> 77 __KERNEL_RCSID(0, "$NetBSD: if_ixl.c,v 1.82 2022/03/31 06:23:18 yamaguchi Exp $"); 78 79 #ifdef _KERNEL_OPT 80 #include "opt_net_mpsafe.h" 81 #include "opt_if_ixl.h" 82 #endif 83 84 #include <sys/param.h> 85 #include <sys/types.h> 86 87 #include <sys/bitops.h> 88 #include <sys/cpu.h> 89 #include <sys/device.h> 90 #include <sys/evcnt.h> 91 #include <sys/interrupt.h> 92 #include <sys/kmem.h> 93 #include <sys/module.h> 94 #include <sys/mutex.h> 95 #include <sys/pcq.h> 96 #include <sys/syslog.h> 97 #include <sys/workqueue.h> 98 #include <sys/xcall.h> 99 100 #include <sys/bus.h> 101 102 #include <net/bpf.h> 103 #include <net/if.h> 104 #include <net/if_dl.h> 105 #include <net/if_media.h> 106 #include <net/if_ether.h> 107 #include <net/rss_config.h> 108 109 #include <netinet/tcp.h> /* for struct tcphdr */ 110 #include <netinet/udp.h> /* for struct udphdr */ 111 112 #include <dev/pci/pcivar.h> 113 #include <dev/pci/pcidevs.h> 114 115 #include <dev/pci/if_ixlreg.h> 116 #include <dev/pci/if_ixlvar.h> 117 118 #include <prop/proplib.h> 119 120 struct ixl_softc; /* defined */ 121 122 #define I40E_PF_RESET_WAIT_COUNT 200 123 #define I40E_AQ_LARGE_BUF 512 124 125 /* bitfields for Tx queue mapping in QTX_CTL */ 126 #define I40E_QTX_CTL_VF_QUEUE 0x0 127 #define I40E_QTX_CTL_VM_QUEUE 0x1 128 #define I40E_QTX_CTL_PF_QUEUE 0x2 129 130 #define I40E_QUEUE_TYPE_EOL 0x7ff 131 #define I40E_INTR_NOTX_QUEUE 0 132 133 #define I40E_QUEUE_TYPE_RX 0x0 134 #define I40E_QUEUE_TYPE_TX 0x1 135 #define I40E_QUEUE_TYPE_PE_CEQ 0x2 136 #define I40E_QUEUE_TYPE_UNKNOWN 0x3 137 138 #define I40E_ITR_INDEX_RX 0x0 139 #define I40E_ITR_INDEX_TX 0x1 140 #define I40E_ITR_INDEX_OTHER 0x2 141 #define I40E_ITR_INDEX_NONE 0x3 142 #define IXL_ITR_RX 0x7a /* 4K intrs/sec */ 143 #define IXL_ITR_TX 0x7a /* 4K intrs/sec */ 144 145 #define I40E_INTR_NOTX_QUEUE 0 146 #define I40E_INTR_NOTX_INTR 0 147 #define I40E_INTR_NOTX_RX_QUEUE 0 148 #define I40E_INTR_NOTX_TX_QUEUE 1 149 #define I40E_INTR_NOTX_RX_MASK I40E_PFINT_ICR0_QUEUE_0_MASK 150 #define I40E_INTR_NOTX_TX_MASK I40E_PFINT_ICR0_QUEUE_1_MASK 151 152 #define I40E_HASH_LUT_SIZE_128 0 153 154 #define IXL_ICR0_CRIT_ERR_MASK \ 155 (I40E_PFINT_ICR0_PCI_EXCEPTION_MASK | \ 156 I40E_PFINT_ICR0_ECC_ERR_MASK | \ 157 I40E_PFINT_ICR0_PE_CRITERR_MASK) 158 159 #define IXL_QUEUE_MAX_XL710 64 160 #define IXL_QUEUE_MAX_X722 128 161 162 #define IXL_TX_PKT_DESCS 8 163 #define IXL_TX_PKT_MAXSIZE (MCLBYTES * IXL_TX_PKT_DESCS) 164 #define IXL_TX_QUEUE_ALIGN 128 165 #define IXL_RX_QUEUE_ALIGN 128 166 167 #define IXL_MCLBYTES (MCLBYTES - ETHER_ALIGN) 168 #define IXL_MTU_ETHERLEN ETHER_HDR_LEN \ 169 + ETHER_CRC_LEN 170 #if 0 171 #define IXL_MAX_MTU (9728 - IXL_MTU_ETHERLEN) 172 #else 173 /* (dbuff * 5) - ETHER_HDR_LEN - ETHER_CRC_LEN */ 174 #define IXL_MAX_MTU (9600 - IXL_MTU_ETHERLEN) 175 #endif 176 #define IXL_MIN_MTU (ETHER_MIN_LEN - ETHER_CRC_LEN) 177 178 #define IXL_PCIREG PCI_MAPREG_START 179 180 #define IXL_ITR0 0x0 181 #define IXL_ITR1 0x1 182 #define IXL_ITR2 0x2 183 #define IXL_NOITR 0x3 184 185 #define IXL_AQ_NUM 256 186 #define IXL_AQ_MASK (IXL_AQ_NUM - 1) 187 #define IXL_AQ_ALIGN 64 /* lol */ 188 #define IXL_AQ_BUFLEN 4096 189 190 #define IXL_HMC_ROUNDUP 512 191 #define IXL_HMC_PGSIZE 4096 192 #define IXL_HMC_DVASZ sizeof(uint64_t) 193 #define IXL_HMC_PGS (IXL_HMC_PGSIZE / IXL_HMC_DVASZ) 194 #define IXL_HMC_L2SZ (IXL_HMC_PGSIZE * IXL_HMC_PGS) 195 #define IXL_HMC_PDVALID 1ULL 196 197 #define IXL_ATQ_EXEC_TIMEOUT (10 * hz) 198 199 #define IXL_SRRD_SRCTL_ATTEMPTS 100000 200 201 struct ixl_aq_regs { 202 bus_size_t atq_tail; 203 bus_size_t atq_head; 204 bus_size_t atq_len; 205 bus_size_t atq_bal; 206 bus_size_t atq_bah; 207 208 bus_size_t arq_tail; 209 bus_size_t arq_head; 210 bus_size_t arq_len; 211 bus_size_t arq_bal; 212 bus_size_t arq_bah; 213 214 uint32_t atq_len_enable; 215 uint32_t atq_tail_mask; 216 uint32_t atq_head_mask; 217 218 uint32_t arq_len_enable; 219 uint32_t arq_tail_mask; 220 uint32_t arq_head_mask; 221 }; 222 223 struct ixl_phy_type { 224 uint64_t phy_type; 225 uint64_t ifm_type; 226 }; 227 228 struct ixl_speed_type { 229 uint8_t dev_speed; 230 uint64_t net_speed; 231 }; 232 233 struct ixl_hmc_entry { 234 uint64_t hmc_base; 235 uint32_t hmc_count; 236 uint64_t hmc_size; 237 }; 238 239 enum ixl_hmc_types { 240 IXL_HMC_LAN_TX = 0, 241 IXL_HMC_LAN_RX, 242 IXL_HMC_FCOE_CTX, 243 IXL_HMC_FCOE_FILTER, 244 IXL_HMC_COUNT 245 }; 246 247 struct ixl_hmc_pack { 248 uint16_t offset; 249 uint16_t width; 250 uint16_t lsb; 251 }; 252 253 /* 254 * these hmc objects have weird sizes and alignments, so these are abstract 255 * representations of them that are nice for c to populate. 256 * 257 * the packing code relies on little-endian values being stored in the fields, 258 * no high bits in the fields being set, and the fields must be packed in the 259 * same order as they are in the ctx structure. 260 */ 261 262 struct ixl_hmc_rxq { 263 uint16_t head; 264 uint8_t cpuid; 265 uint64_t base; 266 #define IXL_HMC_RXQ_BASE_UNIT 128 267 uint16_t qlen; 268 uint16_t dbuff; 269 #define IXL_HMC_RXQ_DBUFF_UNIT 128 270 uint8_t hbuff; 271 #define IXL_HMC_RXQ_HBUFF_UNIT 64 272 uint8_t dtype; 273 #define IXL_HMC_RXQ_DTYPE_NOSPLIT 0x0 274 #define IXL_HMC_RXQ_DTYPE_HSPLIT 0x1 275 #define IXL_HMC_RXQ_DTYPE_SPLIT_ALWAYS 0x2 276 uint8_t dsize; 277 #define IXL_HMC_RXQ_DSIZE_16 0 278 #define IXL_HMC_RXQ_DSIZE_32 1 279 uint8_t crcstrip; 280 uint8_t fc_ena; 281 uint8_t l2sel; 282 uint8_t hsplit_0; 283 uint8_t hsplit_1; 284 uint8_t showiv; 285 uint16_t rxmax; 286 uint8_t tphrdesc_ena; 287 uint8_t tphwdesc_ena; 288 uint8_t tphdata_ena; 289 uint8_t tphhead_ena; 290 uint8_t lrxqthresh; 291 uint8_t prefena; 292 }; 293 294 static const struct ixl_hmc_pack ixl_hmc_pack_rxq[] = { 295 { offsetof(struct ixl_hmc_rxq, head), 13, 0 }, 296 { offsetof(struct ixl_hmc_rxq, cpuid), 8, 13 }, 297 { offsetof(struct ixl_hmc_rxq, base), 57, 32 }, 298 { offsetof(struct ixl_hmc_rxq, qlen), 13, 89 }, 299 { offsetof(struct ixl_hmc_rxq, dbuff), 7, 102 }, 300 { offsetof(struct ixl_hmc_rxq, hbuff), 5, 109 }, 301 { offsetof(struct ixl_hmc_rxq, dtype), 2, 114 }, 302 { offsetof(struct ixl_hmc_rxq, dsize), 1, 116 }, 303 { offsetof(struct ixl_hmc_rxq, crcstrip), 1, 117 }, 304 { offsetof(struct ixl_hmc_rxq, fc_ena), 1, 118 }, 305 { offsetof(struct ixl_hmc_rxq, l2sel), 1, 119 }, 306 { offsetof(struct ixl_hmc_rxq, hsplit_0), 4, 120 }, 307 { offsetof(struct ixl_hmc_rxq, hsplit_1), 2, 124 }, 308 { offsetof(struct ixl_hmc_rxq, showiv), 1, 127 }, 309 { offsetof(struct ixl_hmc_rxq, rxmax), 14, 174 }, 310 { offsetof(struct ixl_hmc_rxq, tphrdesc_ena), 1, 193 }, 311 { offsetof(struct ixl_hmc_rxq, tphwdesc_ena), 1, 194 }, 312 { offsetof(struct ixl_hmc_rxq, tphdata_ena), 1, 195 }, 313 { offsetof(struct ixl_hmc_rxq, tphhead_ena), 1, 196 }, 314 { offsetof(struct ixl_hmc_rxq, lrxqthresh), 3, 198 }, 315 { offsetof(struct ixl_hmc_rxq, prefena), 1, 201 }, 316 }; 317 318 #define IXL_HMC_RXQ_MINSIZE (201 + 1) 319 320 struct ixl_hmc_txq { 321 uint16_t head; 322 uint8_t new_context; 323 uint64_t base; 324 #define IXL_HMC_TXQ_BASE_UNIT 128 325 uint8_t fc_ena; 326 uint8_t timesync_ena; 327 uint8_t fd_ena; 328 uint8_t alt_vlan_ena; 329 uint8_t cpuid; 330 uint16_t thead_wb; 331 uint8_t head_wb_ena; 332 #define IXL_HMC_TXQ_DESC_WB 0 333 #define IXL_HMC_TXQ_HEAD_WB 1 334 uint16_t qlen; 335 uint8_t tphrdesc_ena; 336 uint8_t tphrpacket_ena; 337 uint8_t tphwdesc_ena; 338 uint64_t head_wb_addr; 339 uint32_t crc; 340 uint16_t rdylist; 341 uint8_t rdylist_act; 342 }; 343 344 static const struct ixl_hmc_pack ixl_hmc_pack_txq[] = { 345 { offsetof(struct ixl_hmc_txq, head), 13, 0 }, 346 { offsetof(struct ixl_hmc_txq, new_context), 1, 30 }, 347 { offsetof(struct ixl_hmc_txq, base), 57, 32 }, 348 { offsetof(struct ixl_hmc_txq, fc_ena), 1, 89 }, 349 { offsetof(struct ixl_hmc_txq, timesync_ena), 1, 90 }, 350 { offsetof(struct ixl_hmc_txq, fd_ena), 1, 91 }, 351 { offsetof(struct ixl_hmc_txq, alt_vlan_ena), 1, 92 }, 352 { offsetof(struct ixl_hmc_txq, cpuid), 8, 96 }, 353 /* line 1 */ 354 { offsetof(struct ixl_hmc_txq, thead_wb), 13, 0 + 128 }, 355 { offsetof(struct ixl_hmc_txq, head_wb_ena), 1, 32 + 128 }, 356 { offsetof(struct ixl_hmc_txq, qlen), 13, 33 + 128 }, 357 { offsetof(struct ixl_hmc_txq, tphrdesc_ena), 1, 46 + 128 }, 358 { offsetof(struct ixl_hmc_txq, tphrpacket_ena), 1, 47 + 128 }, 359 { offsetof(struct ixl_hmc_txq, tphwdesc_ena), 1, 48 + 128 }, 360 { offsetof(struct ixl_hmc_txq, head_wb_addr), 64, 64 + 128 }, 361 /* line 7 */ 362 { offsetof(struct ixl_hmc_txq, crc), 32, 0 + (7*128) }, 363 { offsetof(struct ixl_hmc_txq, rdylist), 10, 84 + (7*128) }, 364 { offsetof(struct ixl_hmc_txq, rdylist_act), 1, 94 + (7*128) }, 365 }; 366 367 #define IXL_HMC_TXQ_MINSIZE (94 + (7*128) + 1) 368 369 struct ixl_work { 370 struct work ixw_cookie; 371 void (*ixw_func)(void *); 372 void *ixw_arg; 373 unsigned int ixw_added; 374 }; 375 #define IXL_WORKQUEUE_PRI PRI_SOFTNET 376 377 struct ixl_tx_map { 378 struct mbuf *txm_m; 379 bus_dmamap_t txm_map; 380 unsigned int txm_eop; 381 }; 382 383 struct ixl_tx_ring { 384 kmutex_t txr_lock; 385 struct ixl_softc *txr_sc; 386 387 unsigned int txr_prod; 388 unsigned int txr_cons; 389 390 struct ixl_tx_map *txr_maps; 391 struct ixl_dmamem txr_mem; 392 393 bus_size_t txr_tail; 394 unsigned int txr_qid; 395 pcq_t *txr_intrq; 396 void *txr_si; 397 398 struct evcnt txr_defragged; 399 struct evcnt txr_defrag_failed; 400 struct evcnt txr_pcqdrop; 401 struct evcnt txr_transmitdef; 402 struct evcnt txr_intr; 403 struct evcnt txr_defer; 404 }; 405 406 struct ixl_rx_map { 407 struct mbuf *rxm_m; 408 bus_dmamap_t rxm_map; 409 }; 410 411 struct ixl_rx_ring { 412 kmutex_t rxr_lock; 413 414 unsigned int rxr_prod; 415 unsigned int rxr_cons; 416 417 struct ixl_rx_map *rxr_maps; 418 struct ixl_dmamem rxr_mem; 419 420 struct mbuf *rxr_m_head; 421 struct mbuf **rxr_m_tail; 422 423 bus_size_t rxr_tail; 424 unsigned int rxr_qid; 425 426 struct evcnt rxr_mgethdr_failed; 427 struct evcnt rxr_mgetcl_failed; 428 struct evcnt rxr_mbuf_load_failed; 429 struct evcnt rxr_intr; 430 struct evcnt rxr_defer; 431 }; 432 433 struct ixl_queue_pair { 434 struct ixl_softc *qp_sc; 435 struct ixl_tx_ring *qp_txr; 436 struct ixl_rx_ring *qp_rxr; 437 438 char qp_name[16]; 439 440 void *qp_si; 441 struct work qp_work; 442 bool qp_workqueue; 443 }; 444 445 struct ixl_atq { 446 struct ixl_aq_desc iatq_desc; 447 void (*iatq_fn)(struct ixl_softc *, 448 const struct ixl_aq_desc *); 449 }; 450 SIMPLEQ_HEAD(ixl_atq_list, ixl_atq); 451 452 struct ixl_product { 453 unsigned int vendor_id; 454 unsigned int product_id; 455 }; 456 457 struct ixl_stats_counters { 458 bool isc_has_offset; 459 struct evcnt isc_crc_errors; 460 uint64_t isc_crc_errors_offset; 461 struct evcnt isc_illegal_bytes; 462 uint64_t isc_illegal_bytes_offset; 463 struct evcnt isc_rx_bytes; 464 uint64_t isc_rx_bytes_offset; 465 struct evcnt isc_rx_discards; 466 uint64_t isc_rx_discards_offset; 467 struct evcnt isc_rx_unicast; 468 uint64_t isc_rx_unicast_offset; 469 struct evcnt isc_rx_multicast; 470 uint64_t isc_rx_multicast_offset; 471 struct evcnt isc_rx_broadcast; 472 uint64_t isc_rx_broadcast_offset; 473 struct evcnt isc_rx_size_64; 474 uint64_t isc_rx_size_64_offset; 475 struct evcnt isc_rx_size_127; 476 uint64_t isc_rx_size_127_offset; 477 struct evcnt isc_rx_size_255; 478 uint64_t isc_rx_size_255_offset; 479 struct evcnt isc_rx_size_511; 480 uint64_t isc_rx_size_511_offset; 481 struct evcnt isc_rx_size_1023; 482 uint64_t isc_rx_size_1023_offset; 483 struct evcnt isc_rx_size_1522; 484 uint64_t isc_rx_size_1522_offset; 485 struct evcnt isc_rx_size_big; 486 uint64_t isc_rx_size_big_offset; 487 struct evcnt isc_rx_undersize; 488 uint64_t isc_rx_undersize_offset; 489 struct evcnt isc_rx_oversize; 490 uint64_t isc_rx_oversize_offset; 491 struct evcnt isc_rx_fragments; 492 uint64_t isc_rx_fragments_offset; 493 struct evcnt isc_rx_jabber; 494 uint64_t isc_rx_jabber_offset; 495 struct evcnt isc_tx_bytes; 496 uint64_t isc_tx_bytes_offset; 497 struct evcnt isc_tx_dropped_link_down; 498 uint64_t isc_tx_dropped_link_down_offset; 499 struct evcnt isc_tx_unicast; 500 uint64_t isc_tx_unicast_offset; 501 struct evcnt isc_tx_multicast; 502 uint64_t isc_tx_multicast_offset; 503 struct evcnt isc_tx_broadcast; 504 uint64_t isc_tx_broadcast_offset; 505 struct evcnt isc_tx_size_64; 506 uint64_t isc_tx_size_64_offset; 507 struct evcnt isc_tx_size_127; 508 uint64_t isc_tx_size_127_offset; 509 struct evcnt isc_tx_size_255; 510 uint64_t isc_tx_size_255_offset; 511 struct evcnt isc_tx_size_511; 512 uint64_t isc_tx_size_511_offset; 513 struct evcnt isc_tx_size_1023; 514 uint64_t isc_tx_size_1023_offset; 515 struct evcnt isc_tx_size_1522; 516 uint64_t isc_tx_size_1522_offset; 517 struct evcnt isc_tx_size_big; 518 uint64_t isc_tx_size_big_offset; 519 struct evcnt isc_mac_local_faults; 520 uint64_t isc_mac_local_faults_offset; 521 struct evcnt isc_mac_remote_faults; 522 uint64_t isc_mac_remote_faults_offset; 523 struct evcnt isc_link_xon_rx; 524 uint64_t isc_link_xon_rx_offset; 525 struct evcnt isc_link_xon_tx; 526 uint64_t isc_link_xon_tx_offset; 527 struct evcnt isc_link_xoff_rx; 528 uint64_t isc_link_xoff_rx_offset; 529 struct evcnt isc_link_xoff_tx; 530 uint64_t isc_link_xoff_tx_offset; 531 struct evcnt isc_vsi_rx_discards; 532 uint64_t isc_vsi_rx_discards_offset; 533 struct evcnt isc_vsi_rx_bytes; 534 uint64_t isc_vsi_rx_bytes_offset; 535 struct evcnt isc_vsi_rx_unicast; 536 uint64_t isc_vsi_rx_unicast_offset; 537 struct evcnt isc_vsi_rx_multicast; 538 uint64_t isc_vsi_rx_multicast_offset; 539 struct evcnt isc_vsi_rx_broadcast; 540 uint64_t isc_vsi_rx_broadcast_offset; 541 struct evcnt isc_vsi_tx_errors; 542 uint64_t isc_vsi_tx_errors_offset; 543 struct evcnt isc_vsi_tx_bytes; 544 uint64_t isc_vsi_tx_bytes_offset; 545 struct evcnt isc_vsi_tx_unicast; 546 uint64_t isc_vsi_tx_unicast_offset; 547 struct evcnt isc_vsi_tx_multicast; 548 uint64_t isc_vsi_tx_multicast_offset; 549 struct evcnt isc_vsi_tx_broadcast; 550 uint64_t isc_vsi_tx_broadcast_offset; 551 }; 552 553 /* 554 * Locking notes: 555 * + a field in ixl_tx_ring is protected by txr_lock (a spin mutex), and 556 * a field in ixl_rx_ring is protected by rxr_lock (a spin mutex). 557 * - more than one lock of them cannot be held at once. 558 * + a field named sc_atq_* in ixl_softc is protected by sc_atq_lock 559 * (a spin mutex). 560 * - the lock cannot held with txr_lock or rxr_lock. 561 * + a field named sc_arq_* is not protected by any lock. 562 * - operations for sc_arq_* is done in one context related to 563 * sc_arq_task. 564 * + other fields in ixl_softc is protected by sc_cfg_lock 565 * (an adaptive mutex) 566 * - It must be held before another lock is held, and It can be 567 * released after the other lock is released. 568 * */ 569 570 struct ixl_softc { 571 device_t sc_dev; 572 struct ethercom sc_ec; 573 bool sc_attached; 574 bool sc_dead; 575 uint32_t sc_port; 576 struct sysctllog *sc_sysctllog; 577 struct workqueue *sc_workq; 578 struct workqueue *sc_workq_txrx; 579 int sc_stats_intval; 580 callout_t sc_stats_callout; 581 struct ixl_work sc_stats_task; 582 struct ixl_stats_counters 583 sc_stats_counters; 584 uint8_t sc_enaddr[ETHER_ADDR_LEN]; 585 struct ifmedia sc_media; 586 uint64_t sc_media_status; 587 uint64_t sc_media_active; 588 uint64_t sc_phy_types; 589 uint8_t sc_phy_abilities; 590 uint8_t sc_phy_linkspeed; 591 uint8_t sc_phy_fec_cfg; 592 uint16_t sc_eee_cap; 593 uint32_t sc_eeer_val; 594 uint8_t sc_d3_lpan; 595 kmutex_t sc_cfg_lock; 596 enum i40e_mac_type sc_mac_type; 597 uint32_t sc_rss_table_size; 598 uint32_t sc_rss_table_entry_width; 599 bool sc_txrx_workqueue; 600 u_int sc_tx_process_limit; 601 u_int sc_rx_process_limit; 602 u_int sc_tx_intr_process_limit; 603 u_int sc_rx_intr_process_limit; 604 605 int sc_cur_ec_capenable; 606 607 struct pci_attach_args sc_pa; 608 pci_intr_handle_t *sc_ihp; 609 void **sc_ihs; 610 unsigned int sc_nintrs; 611 612 bus_dma_tag_t sc_dmat; 613 bus_space_tag_t sc_memt; 614 bus_space_handle_t sc_memh; 615 bus_size_t sc_mems; 616 617 uint8_t sc_pf_id; 618 uint16_t sc_uplink_seid; /* le */ 619 uint16_t sc_downlink_seid; /* le */ 620 uint16_t sc_vsi_number; 621 uint16_t sc_vsi_stat_counter_idx; 622 uint16_t sc_seid; 623 unsigned int sc_base_queue; 624 625 pci_intr_type_t sc_intrtype; 626 unsigned int sc_msix_vector_queue; 627 628 struct ixl_dmamem sc_scratch; 629 struct ixl_dmamem sc_aqbuf; 630 631 const struct ixl_aq_regs * 632 sc_aq_regs; 633 uint32_t sc_aq_flags; 634 #define IXL_SC_AQ_FLAG_RXCTL __BIT(0) 635 #define IXL_SC_AQ_FLAG_NVMLOCK __BIT(1) 636 #define IXL_SC_AQ_FLAG_NVMREAD __BIT(2) 637 #define IXL_SC_AQ_FLAG_RSS __BIT(3) 638 639 kmutex_t sc_atq_lock; 640 kcondvar_t sc_atq_cv; 641 struct ixl_dmamem sc_atq; 642 unsigned int sc_atq_prod; 643 unsigned int sc_atq_cons; 644 645 struct ixl_dmamem sc_arq; 646 struct ixl_work sc_arq_task; 647 struct ixl_aq_bufs sc_arq_idle; 648 struct ixl_aq_buf *sc_arq_live[IXL_AQ_NUM]; 649 unsigned int sc_arq_prod; 650 unsigned int sc_arq_cons; 651 652 struct ixl_work sc_link_state_task; 653 struct ixl_atq sc_link_state_atq; 654 655 struct ixl_dmamem sc_hmc_sd; 656 struct ixl_dmamem sc_hmc_pd; 657 struct ixl_hmc_entry sc_hmc_entries[IXL_HMC_COUNT]; 658 659 struct if_percpuq *sc_ipq; 660 unsigned int sc_tx_ring_ndescs; 661 unsigned int sc_rx_ring_ndescs; 662 unsigned int sc_nqueue_pairs; 663 unsigned int sc_nqueue_pairs_max; 664 unsigned int sc_nqueue_pairs_device; 665 struct ixl_queue_pair *sc_qps; 666 uint32_t sc_itr_rx; 667 uint32_t sc_itr_tx; 668 669 struct evcnt sc_event_atq; 670 struct evcnt sc_event_link; 671 struct evcnt sc_event_ecc_err; 672 struct evcnt sc_event_pci_exception; 673 struct evcnt sc_event_crit_err; 674 }; 675 676 #define IXL_TXRX_PROCESS_UNLIMIT UINT_MAX 677 #define IXL_TX_PROCESS_LIMIT 256 678 #define IXL_RX_PROCESS_LIMIT 256 679 #define IXL_TX_INTR_PROCESS_LIMIT 256 680 #define IXL_RX_INTR_PROCESS_LIMIT 0U 681 682 #define IXL_IFCAP_RXCSUM (IFCAP_CSUM_IPv4_Rx | \ 683 IFCAP_CSUM_TCPv4_Rx | \ 684 IFCAP_CSUM_UDPv4_Rx | \ 685 IFCAP_CSUM_TCPv6_Rx | \ 686 IFCAP_CSUM_UDPv6_Rx) 687 #define IXL_IFCAP_TXCSUM (IFCAP_CSUM_IPv4_Tx | \ 688 IFCAP_CSUM_TCPv4_Tx | \ 689 IFCAP_CSUM_UDPv4_Tx | \ 690 IFCAP_CSUM_TCPv6_Tx | \ 691 IFCAP_CSUM_UDPv6_Tx) 692 #define IXL_CSUM_ALL_OFFLOAD (M_CSUM_IPv4 | \ 693 M_CSUM_TCPv4 | M_CSUM_TCPv6 | \ 694 M_CSUM_UDPv4 | M_CSUM_UDPv6) 695 696 #define delaymsec(_x) DELAY(1000 * (_x)) 697 #ifdef IXL_DEBUG 698 #define DDPRINTF(sc, fmt, args...) \ 699 do { \ 700 if ((sc) != NULL) { \ 701 device_printf( \ 702 ((struct ixl_softc *)(sc))->sc_dev, \ 703 ""); \ 704 } \ 705 printf("%s:\t" fmt, __func__, ##args); \ 706 } while (0) 707 #else 708 #define DDPRINTF(sc, fmt, args...) __nothing 709 #endif 710 #ifndef IXL_STATS_INTERVAL_MSEC 711 #define IXL_STATS_INTERVAL_MSEC 10000 712 #endif 713 #ifndef IXL_QUEUE_NUM 714 #define IXL_QUEUE_NUM 0 715 #endif 716 717 static bool ixl_param_nomsix = false; 718 static int ixl_param_stats_interval = IXL_STATS_INTERVAL_MSEC; 719 static int ixl_param_nqps_limit = IXL_QUEUE_NUM; 720 static unsigned int ixl_param_tx_ndescs = 512; 721 static unsigned int ixl_param_rx_ndescs = 256; 722 723 static enum i40e_mac_type 724 ixl_mactype(pci_product_id_t); 725 static void ixl_pci_csr_setup(pci_chipset_tag_t, pcitag_t); 726 static void ixl_clear_hw(struct ixl_softc *); 727 static int ixl_pf_reset(struct ixl_softc *); 728 729 static int ixl_dmamem_alloc(struct ixl_softc *, struct ixl_dmamem *, 730 bus_size_t, bus_size_t); 731 static void ixl_dmamem_free(struct ixl_softc *, struct ixl_dmamem *); 732 733 static int ixl_arq_fill(struct ixl_softc *); 734 static void ixl_arq_unfill(struct ixl_softc *); 735 736 static int ixl_atq_poll(struct ixl_softc *, struct ixl_aq_desc *, 737 unsigned int); 738 static void ixl_atq_set(struct ixl_atq *, 739 void (*)(struct ixl_softc *, const struct ixl_aq_desc *)); 740 static int ixl_atq_post_locked(struct ixl_softc *, struct ixl_atq *); 741 static void ixl_atq_done(struct ixl_softc *); 742 static int ixl_atq_exec(struct ixl_softc *, struct ixl_atq *); 743 static int ixl_atq_exec_locked(struct ixl_softc *, struct ixl_atq *); 744 static int ixl_get_version(struct ixl_softc *); 745 static int ixl_get_nvm_version(struct ixl_softc *); 746 static int ixl_get_hw_capabilities(struct ixl_softc *); 747 static int ixl_pxe_clear(struct ixl_softc *); 748 static int ixl_lldp_shut(struct ixl_softc *); 749 static int ixl_get_mac(struct ixl_softc *); 750 static int ixl_get_switch_config(struct ixl_softc *); 751 static int ixl_phy_mask_ints(struct ixl_softc *); 752 static int ixl_get_phy_info(struct ixl_softc *); 753 static int ixl_set_phy_config(struct ixl_softc *, uint8_t, uint8_t, bool); 754 static int ixl_set_phy_autoselect(struct ixl_softc *); 755 static int ixl_restart_an(struct ixl_softc *); 756 static int ixl_hmc(struct ixl_softc *); 757 static void ixl_hmc_free(struct ixl_softc *); 758 static int ixl_get_vsi(struct ixl_softc *); 759 static int ixl_set_vsi(struct ixl_softc *); 760 static void ixl_set_filter_control(struct ixl_softc *); 761 static void ixl_get_link_status(void *); 762 static int ixl_get_link_status_poll(struct ixl_softc *, int *); 763 static void ixl_get_link_status_done(struct ixl_softc *, 764 const struct ixl_aq_desc *); 765 static int ixl_set_link_status_locked(struct ixl_softc *, 766 const struct ixl_aq_desc *); 767 static uint64_t ixl_search_link_speed(uint8_t); 768 static uint8_t ixl_search_baudrate(uint64_t); 769 static void ixl_config_rss(struct ixl_softc *); 770 static int ixl_add_macvlan(struct ixl_softc *, const uint8_t *, 771 uint16_t, uint16_t); 772 static int ixl_remove_macvlan(struct ixl_softc *, const uint8_t *, 773 uint16_t, uint16_t); 774 static void ixl_arq(void *); 775 static void ixl_hmc_pack(void *, const void *, 776 const struct ixl_hmc_pack *, unsigned int); 777 static uint32_t ixl_rd_rx_csr(struct ixl_softc *, uint32_t); 778 static void ixl_wr_rx_csr(struct ixl_softc *, uint32_t, uint32_t); 779 static int ixl_rd16_nvm(struct ixl_softc *, uint16_t, uint16_t *); 780 781 static int ixl_match(device_t, cfdata_t, void *); 782 static void ixl_attach(device_t, device_t, void *); 783 static int ixl_detach(device_t, int); 784 785 static void ixl_media_add(struct ixl_softc *); 786 static int ixl_media_change(struct ifnet *); 787 static void ixl_media_status(struct ifnet *, struct ifmediareq *); 788 static void ixl_watchdog(struct ifnet *); 789 static int ixl_ioctl(struct ifnet *, u_long, void *); 790 static void ixl_start(struct ifnet *); 791 static int ixl_transmit(struct ifnet *, struct mbuf *); 792 static void ixl_deferred_transmit(void *); 793 static int ixl_intr(void *); 794 static int ixl_queue_intr(void *); 795 static int ixl_other_intr(void *); 796 static void ixl_handle_queue(void *); 797 static void ixl_handle_queue_wk(struct work *, void *); 798 static void ixl_sched_handle_queue(struct ixl_softc *, 799 struct ixl_queue_pair *); 800 static int ixl_init(struct ifnet *); 801 static int ixl_init_locked(struct ixl_softc *); 802 static void ixl_stop(struct ifnet *, int); 803 static void ixl_stop_locked(struct ixl_softc *); 804 static int ixl_iff(struct ixl_softc *); 805 static int ixl_ifflags_cb(struct ethercom *); 806 static int ixl_setup_interrupts(struct ixl_softc *); 807 static int ixl_establish_intx(struct ixl_softc *); 808 static int ixl_establish_msix(struct ixl_softc *); 809 static void ixl_enable_queue_intr(struct ixl_softc *, 810 struct ixl_queue_pair *); 811 static void ixl_disable_queue_intr(struct ixl_softc *, 812 struct ixl_queue_pair *); 813 static void ixl_enable_other_intr(struct ixl_softc *); 814 static void ixl_disable_other_intr(struct ixl_softc *); 815 static void ixl_config_queue_intr(struct ixl_softc *); 816 static void ixl_config_other_intr(struct ixl_softc *); 817 818 static struct ixl_tx_ring * 819 ixl_txr_alloc(struct ixl_softc *, unsigned int); 820 static void ixl_txr_qdis(struct ixl_softc *, struct ixl_tx_ring *, int); 821 static void ixl_txr_config(struct ixl_softc *, struct ixl_tx_ring *); 822 static int ixl_txr_enabled(struct ixl_softc *, struct ixl_tx_ring *); 823 static int ixl_txr_disabled(struct ixl_softc *, struct ixl_tx_ring *); 824 static void ixl_txr_unconfig(struct ixl_softc *, struct ixl_tx_ring *); 825 static void ixl_txr_clean(struct ixl_softc *, struct ixl_tx_ring *); 826 static void ixl_txr_free(struct ixl_softc *, struct ixl_tx_ring *); 827 static int ixl_txeof(struct ixl_softc *, struct ixl_tx_ring *, u_int); 828 829 static struct ixl_rx_ring * 830 ixl_rxr_alloc(struct ixl_softc *, unsigned int); 831 static void ixl_rxr_config(struct ixl_softc *, struct ixl_rx_ring *); 832 static int ixl_rxr_enabled(struct ixl_softc *, struct ixl_rx_ring *); 833 static int ixl_rxr_disabled(struct ixl_softc *, struct ixl_rx_ring *); 834 static void ixl_rxr_unconfig(struct ixl_softc *, struct ixl_rx_ring *); 835 static void ixl_rxr_clean(struct ixl_softc *, struct ixl_rx_ring *); 836 static void ixl_rxr_free(struct ixl_softc *, struct ixl_rx_ring *); 837 static int ixl_rxeof(struct ixl_softc *, struct ixl_rx_ring *, u_int); 838 static int ixl_rxfill(struct ixl_softc *, struct ixl_rx_ring *); 839 840 static struct workqueue * 841 ixl_workq_create(const char *, pri_t, int, int); 842 static void ixl_workq_destroy(struct workqueue *); 843 static int ixl_workqs_teardown(device_t); 844 static void ixl_work_set(struct ixl_work *, void (*)(void *), void *); 845 static void ixl_work_add(struct workqueue *, struct ixl_work *); 846 static void ixl_work_wait(struct workqueue *, struct ixl_work *); 847 static void ixl_workq_work(struct work *, void *); 848 static const struct ixl_product * 849 ixl_lookup(const struct pci_attach_args *pa); 850 static void ixl_link_state_update(struct ixl_softc *, 851 const struct ixl_aq_desc *); 852 static int ixl_vlan_cb(struct ethercom *, uint16_t, bool); 853 static int ixl_setup_vlan_hwfilter(struct ixl_softc *); 854 static void ixl_teardown_vlan_hwfilter(struct ixl_softc *); 855 static int ixl_update_macvlan(struct ixl_softc *); 856 static int ixl_setup_interrupts(struct ixl_softc *); 857 static void ixl_teardown_interrupts(struct ixl_softc *); 858 static int ixl_setup_stats(struct ixl_softc *); 859 static void ixl_teardown_stats(struct ixl_softc *); 860 static void ixl_stats_callout(void *); 861 static void ixl_stats_update(void *); 862 static int ixl_setup_sysctls(struct ixl_softc *); 863 static void ixl_teardown_sysctls(struct ixl_softc *); 864 static int ixl_sysctl_itr_handler(SYSCTLFN_PROTO); 865 static int ixl_queue_pairs_alloc(struct ixl_softc *); 866 static void ixl_queue_pairs_free(struct ixl_softc *); 867 868 static const struct ixl_phy_type ixl_phy_type_map[] = { 869 { 1ULL << IXL_PHY_TYPE_SGMII, IFM_1000_SGMII }, 870 { 1ULL << IXL_PHY_TYPE_1000BASE_KX, IFM_1000_KX }, 871 { 1ULL << IXL_PHY_TYPE_10GBASE_KX4, IFM_10G_KX4 }, 872 { 1ULL << IXL_PHY_TYPE_10GBASE_KR, IFM_10G_KR }, 873 { 1ULL << IXL_PHY_TYPE_40GBASE_KR4, IFM_40G_KR4 }, 874 { 1ULL << IXL_PHY_TYPE_XAUI | 875 1ULL << IXL_PHY_TYPE_XFI, IFM_10G_CX4 }, 876 { 1ULL << IXL_PHY_TYPE_SFI, IFM_10G_SFI }, 877 { 1ULL << IXL_PHY_TYPE_XLAUI | 878 1ULL << IXL_PHY_TYPE_XLPPI, IFM_40G_XLPPI }, 879 { 1ULL << IXL_PHY_TYPE_40GBASE_CR4_CU | 880 1ULL << IXL_PHY_TYPE_40GBASE_CR4, IFM_40G_CR4 }, 881 { 1ULL << IXL_PHY_TYPE_10GBASE_CR1_CU | 882 1ULL << IXL_PHY_TYPE_10GBASE_CR1, IFM_10G_CR1 }, 883 { 1ULL << IXL_PHY_TYPE_10GBASE_AOC, IFM_10G_AOC }, 884 { 1ULL << IXL_PHY_TYPE_40GBASE_AOC, IFM_40G_AOC }, 885 { 1ULL << IXL_PHY_TYPE_100BASE_TX, IFM_100_TX }, 886 { 1ULL << IXL_PHY_TYPE_1000BASE_T_OPTICAL | 887 1ULL << IXL_PHY_TYPE_1000BASE_T, IFM_1000_T }, 888 { 1ULL << IXL_PHY_TYPE_10GBASE_T, IFM_10G_T }, 889 { 1ULL << IXL_PHY_TYPE_10GBASE_SR, IFM_10G_SR }, 890 { 1ULL << IXL_PHY_TYPE_10GBASE_LR, IFM_10G_LR }, 891 { 1ULL << IXL_PHY_TYPE_10GBASE_SFPP_CU, IFM_10G_TWINAX }, 892 { 1ULL << IXL_PHY_TYPE_40GBASE_SR4, IFM_40G_SR4 }, 893 { 1ULL << IXL_PHY_TYPE_40GBASE_LR4, IFM_40G_LR4 }, 894 { 1ULL << IXL_PHY_TYPE_1000BASE_SX, IFM_1000_SX }, 895 { 1ULL << IXL_PHY_TYPE_1000BASE_LX, IFM_1000_LX }, 896 { 1ULL << IXL_PHY_TYPE_20GBASE_KR2, IFM_20G_KR2 }, 897 { 1ULL << IXL_PHY_TYPE_25GBASE_KR, IFM_25G_KR }, 898 { 1ULL << IXL_PHY_TYPE_25GBASE_CR, IFM_25G_CR }, 899 { 1ULL << IXL_PHY_TYPE_25GBASE_SR, IFM_25G_SR }, 900 { 1ULL << IXL_PHY_TYPE_25GBASE_LR, IFM_25G_LR }, 901 { 1ULL << IXL_PHY_TYPE_25GBASE_AOC, IFM_25G_AOC }, 902 { 1ULL << IXL_PHY_TYPE_25GBASE_ACC, IFM_25G_ACC }, 903 { 1ULL << IXL_PHY_TYPE_2500BASE_T_1, IFM_2500_T }, 904 { 1ULL << IXL_PHY_TYPE_5000BASE_T_1, IFM_5000_T }, 905 { 1ULL << IXL_PHY_TYPE_2500BASE_T_2, IFM_2500_T }, 906 { 1ULL << IXL_PHY_TYPE_5000BASE_T_2, IFM_5000_T }, 907 }; 908 909 static const struct ixl_speed_type ixl_speed_type_map[] = { 910 { IXL_AQ_LINK_SPEED_40GB, IF_Gbps(40) }, 911 { IXL_AQ_LINK_SPEED_25GB, IF_Gbps(25) }, 912 { IXL_AQ_LINK_SPEED_10GB, IF_Gbps(10) }, 913 { IXL_AQ_LINK_SPEED_5000MB, IF_Mbps(5000) }, 914 { IXL_AQ_LINK_SPEED_2500MB, IF_Mbps(2500) }, 915 { IXL_AQ_LINK_SPEED_1000MB, IF_Mbps(1000) }, 916 { IXL_AQ_LINK_SPEED_100MB, IF_Mbps(100)}, 917 }; 918 919 static const struct ixl_aq_regs ixl_pf_aq_regs = { 920 .atq_tail = I40E_PF_ATQT, 921 .atq_tail_mask = I40E_PF_ATQT_ATQT_MASK, 922 .atq_head = I40E_PF_ATQH, 923 .atq_head_mask = I40E_PF_ATQH_ATQH_MASK, 924 .atq_len = I40E_PF_ATQLEN, 925 .atq_bal = I40E_PF_ATQBAL, 926 .atq_bah = I40E_PF_ATQBAH, 927 .atq_len_enable = I40E_PF_ATQLEN_ATQENABLE_MASK, 928 929 .arq_tail = I40E_PF_ARQT, 930 .arq_tail_mask = I40E_PF_ARQT_ARQT_MASK, 931 .arq_head = I40E_PF_ARQH, 932 .arq_head_mask = I40E_PF_ARQH_ARQH_MASK, 933 .arq_len = I40E_PF_ARQLEN, 934 .arq_bal = I40E_PF_ARQBAL, 935 .arq_bah = I40E_PF_ARQBAH, 936 .arq_len_enable = I40E_PF_ARQLEN_ARQENABLE_MASK, 937 }; 938 939 #define ixl_rd(_s, _r) \ 940 bus_space_read_4((_s)->sc_memt, (_s)->sc_memh, (_r)) 941 #define ixl_wr(_s, _r, _v) \ 942 bus_space_write_4((_s)->sc_memt, (_s)->sc_memh, (_r), (_v)) 943 #define ixl_barrier(_s, _r, _l, _o) \ 944 bus_space_barrier((_s)->sc_memt, (_s)->sc_memh, (_r), (_l), (_o)) 945 #define ixl_flush(_s) (void)ixl_rd((_s), I40E_GLGEN_STAT) 946 #define ixl_nqueues(_sc) (1 << ((_sc)->sc_nqueue_pairs - 1)) 947 948 CFATTACH_DECL3_NEW(ixl, sizeof(struct ixl_softc), 949 ixl_match, ixl_attach, ixl_detach, NULL, NULL, NULL, 950 DVF_DETACH_SHUTDOWN); 951 952 static const struct ixl_product ixl_products[] = { 953 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_SFP }, 954 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_KX_B }, 955 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_KX_C }, 956 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_A }, 957 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_B }, 958 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_C }, 959 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_10G_T_1 }, 960 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_10G_T_2 }, 961 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_20G_BP_1 }, 962 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_20G_BP_2 }, 963 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_T4_10G }, 964 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XXV710_25G_BP }, 965 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XXV710_25G_SFP28 }, 966 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_KX }, 967 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_QSFP }, 968 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_SFP }, 969 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_1G_BASET }, 970 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_10G_BASET }, 971 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_I_SFP }, 972 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_10G_SFP }, 973 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_10G_BP }, 974 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_V710_5G_T}, 975 /* required last entry */ 976 {0, 0} 977 }; 978 979 static const struct ixl_product * 980 ixl_lookup(const struct pci_attach_args *pa) 981 { 982 const struct ixl_product *ixlp; 983 984 for (ixlp = ixl_products; ixlp->vendor_id != 0; ixlp++) { 985 if (PCI_VENDOR(pa->pa_id) == ixlp->vendor_id && 986 PCI_PRODUCT(pa->pa_id) == ixlp->product_id) 987 return ixlp; 988 } 989 990 return NULL; 991 } 992 993 static void 994 ixl_intr_barrier(void) 995 { 996 997 /* wait for finish of all handler */ 998 xc_barrier(0); 999 } 1000 1001 static int 1002 ixl_match(device_t parent, cfdata_t match, void *aux) 1003 { 1004 const struct pci_attach_args *pa = aux; 1005 1006 return (ixl_lookup(pa) != NULL) ? 1 : 0; 1007 } 1008 1009 static void 1010 ixl_attach(device_t parent, device_t self, void *aux) 1011 { 1012 struct ixl_softc *sc; 1013 struct pci_attach_args *pa = aux; 1014 struct ifnet *ifp; 1015 pcireg_t memtype; 1016 uint32_t firstq, port, ari, func; 1017 char xnamebuf[32]; 1018 int tries, rv, link; 1019 1020 sc = device_private(self); 1021 sc->sc_dev = self; 1022 ifp = &sc->sc_ec.ec_if; 1023 1024 sc->sc_pa = *pa; 1025 sc->sc_dmat = (pci_dma64_available(pa)) ? 1026 pa->pa_dmat64 : pa->pa_dmat; 1027 sc->sc_aq_regs = &ixl_pf_aq_regs; 1028 1029 sc->sc_mac_type = ixl_mactype(PCI_PRODUCT(pa->pa_id)); 1030 1031 ixl_pci_csr_setup(pa->pa_pc, pa->pa_tag); 1032 1033 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, IXL_PCIREG); 1034 if (pci_mapreg_map(pa, IXL_PCIREG, memtype, 0, 1035 &sc->sc_memt, &sc->sc_memh, NULL, &sc->sc_mems)) { 1036 aprint_error(": unable to map registers\n"); 1037 return; 1038 } 1039 1040 mutex_init(&sc->sc_cfg_lock, MUTEX_DEFAULT, IPL_SOFTNET); 1041 1042 firstq = ixl_rd(sc, I40E_PFLAN_QALLOC); 1043 firstq &= I40E_PFLAN_QALLOC_FIRSTQ_MASK; 1044 firstq >>= I40E_PFLAN_QALLOC_FIRSTQ_SHIFT; 1045 sc->sc_base_queue = firstq; 1046 1047 ixl_clear_hw(sc); 1048 if (ixl_pf_reset(sc) == -1) { 1049 /* error printed by ixl pf_reset */ 1050 goto unmap; 1051 } 1052 1053 port = ixl_rd(sc, I40E_PFGEN_PORTNUM); 1054 port &= I40E_PFGEN_PORTNUM_PORT_NUM_MASK; 1055 port >>= I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT; 1056 sc->sc_port = port; 1057 aprint_normal(": port %u", sc->sc_port); 1058 1059 ari = ixl_rd(sc, I40E_GLPCI_CAPSUP); 1060 ari &= I40E_GLPCI_CAPSUP_ARI_EN_MASK; 1061 ari >>= I40E_GLPCI_CAPSUP_ARI_EN_SHIFT; 1062 1063 func = ixl_rd(sc, I40E_PF_FUNC_RID); 1064 sc->sc_pf_id = func & (ari ? 0xff : 0x7); 1065 1066 /* initialise the adminq */ 1067 1068 mutex_init(&sc->sc_atq_lock, MUTEX_DEFAULT, IPL_NET); 1069 1070 if (ixl_dmamem_alloc(sc, &sc->sc_atq, 1071 sizeof(struct ixl_aq_desc) * IXL_AQ_NUM, IXL_AQ_ALIGN) != 0) { 1072 aprint_error("\n" "%s: unable to allocate atq\n", 1073 device_xname(self)); 1074 goto unmap; 1075 } 1076 1077 SIMPLEQ_INIT(&sc->sc_arq_idle); 1078 ixl_work_set(&sc->sc_arq_task, ixl_arq, sc); 1079 sc->sc_arq_cons = 0; 1080 sc->sc_arq_prod = 0; 1081 1082 if (ixl_dmamem_alloc(sc, &sc->sc_arq, 1083 sizeof(struct ixl_aq_desc) * IXL_AQ_NUM, IXL_AQ_ALIGN) != 0) { 1084 aprint_error("\n" "%s: unable to allocate arq\n", 1085 device_xname(self)); 1086 goto free_atq; 1087 } 1088 1089 if (!ixl_arq_fill(sc)) { 1090 aprint_error("\n" "%s: unable to fill arq descriptors\n", 1091 device_xname(self)); 1092 goto free_arq; 1093 } 1094 1095 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 1096 0, IXL_DMA_LEN(&sc->sc_atq), 1097 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1098 1099 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq), 1100 0, IXL_DMA_LEN(&sc->sc_arq), 1101 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1102 1103 for (tries = 0; tries < 10; tries++) { 1104 sc->sc_atq_cons = 0; 1105 sc->sc_atq_prod = 0; 1106 1107 ixl_wr(sc, sc->sc_aq_regs->atq_head, 0); 1108 ixl_wr(sc, sc->sc_aq_regs->arq_head, 0); 1109 ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0); 1110 ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0); 1111 1112 ixl_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE); 1113 1114 ixl_wr(sc, sc->sc_aq_regs->atq_bal, 1115 ixl_dmamem_lo(&sc->sc_atq)); 1116 ixl_wr(sc, sc->sc_aq_regs->atq_bah, 1117 ixl_dmamem_hi(&sc->sc_atq)); 1118 ixl_wr(sc, sc->sc_aq_regs->atq_len, 1119 sc->sc_aq_regs->atq_len_enable | IXL_AQ_NUM); 1120 1121 ixl_wr(sc, sc->sc_aq_regs->arq_bal, 1122 ixl_dmamem_lo(&sc->sc_arq)); 1123 ixl_wr(sc, sc->sc_aq_regs->arq_bah, 1124 ixl_dmamem_hi(&sc->sc_arq)); 1125 ixl_wr(sc, sc->sc_aq_regs->arq_len, 1126 sc->sc_aq_regs->arq_len_enable | IXL_AQ_NUM); 1127 1128 rv = ixl_get_version(sc); 1129 if (rv == 0) 1130 break; 1131 if (rv != ETIMEDOUT) { 1132 aprint_error(", unable to get firmware version\n"); 1133 goto shutdown; 1134 } 1135 1136 delaymsec(100); 1137 } 1138 1139 ixl_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod); 1140 1141 if (ixl_dmamem_alloc(sc, &sc->sc_aqbuf, IXL_AQ_BUFLEN, 0) != 0) { 1142 aprint_error_dev(self, ", unable to allocate nvm buffer\n"); 1143 goto shutdown; 1144 } 1145 1146 ixl_get_nvm_version(sc); 1147 1148 if (sc->sc_mac_type == I40E_MAC_X722) 1149 sc->sc_nqueue_pairs_device = IXL_QUEUE_MAX_X722; 1150 else 1151 sc->sc_nqueue_pairs_device = IXL_QUEUE_MAX_XL710; 1152 1153 rv = ixl_get_hw_capabilities(sc); 1154 if (rv != 0) { 1155 aprint_error(", GET HW CAPABILITIES %s\n", 1156 rv == ETIMEDOUT ? "timeout" : "error"); 1157 goto free_aqbuf; 1158 } 1159 1160 sc->sc_nqueue_pairs_max = MIN((int)sc->sc_nqueue_pairs_device, ncpu); 1161 if (ixl_param_nqps_limit > 0) { 1162 sc->sc_nqueue_pairs_max = MIN((int)sc->sc_nqueue_pairs_max, 1163 ixl_param_nqps_limit); 1164 } 1165 1166 sc->sc_nqueue_pairs = sc->sc_nqueue_pairs_max; 1167 sc->sc_tx_ring_ndescs = ixl_param_tx_ndescs; 1168 sc->sc_rx_ring_ndescs = ixl_param_rx_ndescs; 1169 1170 KASSERT(IXL_TXRX_PROCESS_UNLIMIT > sc->sc_rx_ring_ndescs); 1171 KASSERT(IXL_TXRX_PROCESS_UNLIMIT > sc->sc_tx_ring_ndescs); 1172 KASSERT(sc->sc_rx_ring_ndescs == 1173 (1U << (fls32(sc->sc_rx_ring_ndescs) - 1))); 1174 KASSERT(sc->sc_tx_ring_ndescs == 1175 (1U << (fls32(sc->sc_tx_ring_ndescs) - 1))); 1176 1177 if (ixl_get_mac(sc) != 0) { 1178 /* error printed by ixl_get_mac */ 1179 goto free_aqbuf; 1180 } 1181 1182 aprint_normal("\n"); 1183 aprint_naive("\n"); 1184 1185 aprint_normal_dev(self, "Ethernet address %s\n", 1186 ether_sprintf(sc->sc_enaddr)); 1187 1188 rv = ixl_pxe_clear(sc); 1189 if (rv != 0) { 1190 aprint_debug_dev(self, "CLEAR PXE MODE %s\n", 1191 rv == ETIMEDOUT ? "timeout" : "error"); 1192 } 1193 1194 ixl_set_filter_control(sc); 1195 1196 if (ixl_hmc(sc) != 0) { 1197 /* error printed by ixl_hmc */ 1198 goto free_aqbuf; 1199 } 1200 1201 if (ixl_lldp_shut(sc) != 0) { 1202 /* error printed by ixl_lldp_shut */ 1203 goto free_hmc; 1204 } 1205 1206 if (ixl_phy_mask_ints(sc) != 0) { 1207 /* error printed by ixl_phy_mask_ints */ 1208 goto free_hmc; 1209 } 1210 1211 if (ixl_restart_an(sc) != 0) { 1212 /* error printed by ixl_restart_an */ 1213 goto free_hmc; 1214 } 1215 1216 if (ixl_get_switch_config(sc) != 0) { 1217 /* error printed by ixl_get_switch_config */ 1218 goto free_hmc; 1219 } 1220 1221 rv = ixl_get_link_status_poll(sc, NULL); 1222 if (rv != 0) { 1223 aprint_error_dev(self, "GET LINK STATUS %s\n", 1224 rv == ETIMEDOUT ? "timeout" : "error"); 1225 goto free_hmc; 1226 } 1227 1228 /* 1229 * The FW often returns EIO in "Get PHY Abilities" command 1230 * if there is no delay 1231 */ 1232 DELAY(500); 1233 if (ixl_get_phy_info(sc) != 0) { 1234 /* error printed by ixl_get_phy_info */ 1235 goto free_hmc; 1236 } 1237 1238 if (ixl_dmamem_alloc(sc, &sc->sc_scratch, 1239 sizeof(struct ixl_aq_vsi_data), 8) != 0) { 1240 aprint_error_dev(self, "unable to allocate scratch buffer\n"); 1241 goto free_hmc; 1242 } 1243 1244 rv = ixl_get_vsi(sc); 1245 if (rv != 0) { 1246 aprint_error_dev(self, "GET VSI %s %d\n", 1247 rv == ETIMEDOUT ? "timeout" : "error", rv); 1248 goto free_scratch; 1249 } 1250 1251 rv = ixl_set_vsi(sc); 1252 if (rv != 0) { 1253 aprint_error_dev(self, "UPDATE VSI error %s %d\n", 1254 rv == ETIMEDOUT ? "timeout" : "error", rv); 1255 goto free_scratch; 1256 } 1257 1258 if (ixl_queue_pairs_alloc(sc) != 0) { 1259 /* error printed by ixl_queue_pairs_alloc */ 1260 goto free_scratch; 1261 } 1262 1263 if (ixl_setup_interrupts(sc) != 0) { 1264 /* error printed by ixl_setup_interrupts */ 1265 goto free_queue_pairs; 1266 } 1267 1268 if (ixl_setup_stats(sc) != 0) { 1269 aprint_error_dev(self, "failed to setup event counters\n"); 1270 goto teardown_intrs; 1271 } 1272 1273 if (ixl_setup_sysctls(sc) != 0) { 1274 /* error printed by ixl_setup_sysctls */ 1275 goto teardown_stats; 1276 } 1277 1278 snprintf(xnamebuf, sizeof(xnamebuf), "%s_wq_cfg", device_xname(self)); 1279 sc->sc_workq = ixl_workq_create(xnamebuf, IXL_WORKQUEUE_PRI, 1280 IPL_NET, WQ_MPSAFE); 1281 if (sc->sc_workq == NULL) 1282 goto teardown_sysctls; 1283 1284 snprintf(xnamebuf, sizeof(xnamebuf), "%s_wq_txrx", device_xname(self)); 1285 rv = workqueue_create(&sc->sc_workq_txrx, xnamebuf, ixl_handle_queue_wk, 1286 sc, IXL_WORKQUEUE_PRI, IPL_NET, WQ_PERCPU | WQ_MPSAFE); 1287 if (rv != 0) { 1288 sc->sc_workq_txrx = NULL; 1289 goto teardown_wqs; 1290 } 1291 1292 snprintf(xnamebuf, sizeof(xnamebuf), "%s_atq_cv", device_xname(self)); 1293 cv_init(&sc->sc_atq_cv, xnamebuf); 1294 1295 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ); 1296 1297 ifp->if_softc = sc; 1298 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1299 ifp->if_extflags = IFEF_MPSAFE; 1300 ifp->if_ioctl = ixl_ioctl; 1301 ifp->if_start = ixl_start; 1302 ifp->if_transmit = ixl_transmit; 1303 ifp->if_watchdog = ixl_watchdog; 1304 ifp->if_init = ixl_init; 1305 ifp->if_stop = ixl_stop; 1306 IFQ_SET_MAXLEN(&ifp->if_snd, sc->sc_tx_ring_ndescs); 1307 IFQ_SET_READY(&ifp->if_snd); 1308 ifp->if_capabilities |= IXL_IFCAP_RXCSUM; 1309 ifp->if_capabilities |= IXL_IFCAP_TXCSUM; 1310 #if 0 1311 ifp->if_capabilities |= IFCAP_TSOv4 | IFCAP_TSOv6; 1312 #endif 1313 ether_set_vlan_cb(&sc->sc_ec, ixl_vlan_cb); 1314 sc->sc_ec.ec_capabilities |= ETHERCAP_JUMBO_MTU; 1315 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING; 1316 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWFILTER; 1317 1318 sc->sc_ec.ec_capenable = sc->sc_ec.ec_capabilities; 1319 /* Disable VLAN_HWFILTER by default */ 1320 CLR(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER); 1321 1322 sc->sc_cur_ec_capenable = sc->sc_ec.ec_capenable; 1323 1324 sc->sc_ec.ec_ifmedia = &sc->sc_media; 1325 ifmedia_init_with_lock(&sc->sc_media, IFM_IMASK, ixl_media_change, 1326 ixl_media_status, &sc->sc_cfg_lock); 1327 1328 ixl_media_add(sc); 1329 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL); 1330 if (ISSET(sc->sc_phy_abilities, 1331 (IXL_PHY_ABILITY_PAUSE_TX | IXL_PHY_ABILITY_PAUSE_RX))) { 1332 ifmedia_add(&sc->sc_media, 1333 IFM_ETHER | IFM_AUTO | IFM_FLOW, 0, NULL); 1334 } 1335 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_NONE, 0, NULL); 1336 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO); 1337 1338 if_initialize(ifp); 1339 1340 sc->sc_ipq = if_percpuq_create(ifp); 1341 if_deferred_start_init(ifp, NULL); 1342 ether_ifattach(ifp, sc->sc_enaddr); 1343 ether_set_ifflags_cb(&sc->sc_ec, ixl_ifflags_cb); 1344 1345 rv = ixl_get_link_status_poll(sc, &link); 1346 if (rv != 0) 1347 link = LINK_STATE_UNKNOWN; 1348 if_link_state_change(ifp, link); 1349 1350 ixl_atq_set(&sc->sc_link_state_atq, ixl_get_link_status_done); 1351 ixl_work_set(&sc->sc_link_state_task, ixl_get_link_status, sc); 1352 1353 ixl_config_other_intr(sc); 1354 ixl_enable_other_intr(sc); 1355 1356 ixl_set_phy_autoselect(sc); 1357 1358 /* remove default mac filter and replace it so we can see vlans */ 1359 rv = ixl_remove_macvlan(sc, sc->sc_enaddr, 0, 0); 1360 if (rv != ENOENT) { 1361 aprint_debug_dev(self, 1362 "unable to remove macvlan %u\n", rv); 1363 } 1364 rv = ixl_remove_macvlan(sc, sc->sc_enaddr, 0, 1365 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN); 1366 if (rv != ENOENT) { 1367 aprint_debug_dev(self, 1368 "unable to remove macvlan, ignore vlan %u\n", rv); 1369 } 1370 1371 if (ixl_update_macvlan(sc) != 0) { 1372 aprint_debug_dev(self, 1373 "couldn't enable vlan hardware filter\n"); 1374 CLR(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER); 1375 CLR(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER); 1376 } 1377 1378 sc->sc_txrx_workqueue = true; 1379 sc->sc_tx_process_limit = IXL_TX_PROCESS_LIMIT; 1380 sc->sc_rx_process_limit = IXL_RX_PROCESS_LIMIT; 1381 sc->sc_tx_intr_process_limit = IXL_TX_INTR_PROCESS_LIMIT; 1382 sc->sc_rx_intr_process_limit = IXL_RX_INTR_PROCESS_LIMIT; 1383 1384 ixl_stats_update(sc); 1385 sc->sc_stats_counters.isc_has_offset = true; 1386 1387 if (pmf_device_register(self, NULL, NULL) != true) 1388 aprint_debug_dev(self, "couldn't establish power handler\n"); 1389 sc->sc_itr_rx = IXL_ITR_RX; 1390 sc->sc_itr_tx = IXL_ITR_TX; 1391 sc->sc_attached = true; 1392 if_register(ifp); 1393 1394 return; 1395 1396 teardown_wqs: 1397 config_finalize_register(self, ixl_workqs_teardown); 1398 teardown_sysctls: 1399 ixl_teardown_sysctls(sc); 1400 teardown_stats: 1401 ixl_teardown_stats(sc); 1402 teardown_intrs: 1403 ixl_teardown_interrupts(sc); 1404 free_queue_pairs: 1405 ixl_queue_pairs_free(sc); 1406 free_scratch: 1407 ixl_dmamem_free(sc, &sc->sc_scratch); 1408 free_hmc: 1409 ixl_hmc_free(sc); 1410 free_aqbuf: 1411 ixl_dmamem_free(sc, &sc->sc_aqbuf); 1412 shutdown: 1413 ixl_wr(sc, sc->sc_aq_regs->atq_head, 0); 1414 ixl_wr(sc, sc->sc_aq_regs->arq_head, 0); 1415 ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0); 1416 ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0); 1417 1418 ixl_wr(sc, sc->sc_aq_regs->atq_bal, 0); 1419 ixl_wr(sc, sc->sc_aq_regs->atq_bah, 0); 1420 ixl_wr(sc, sc->sc_aq_regs->atq_len, 0); 1421 1422 ixl_wr(sc, sc->sc_aq_regs->arq_bal, 0); 1423 ixl_wr(sc, sc->sc_aq_regs->arq_bah, 0); 1424 ixl_wr(sc, sc->sc_aq_regs->arq_len, 0); 1425 1426 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq), 1427 0, IXL_DMA_LEN(&sc->sc_arq), 1428 BUS_DMASYNC_POSTREAD); 1429 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 1430 0, IXL_DMA_LEN(&sc->sc_atq), 1431 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1432 1433 ixl_arq_unfill(sc); 1434 free_arq: 1435 ixl_dmamem_free(sc, &sc->sc_arq); 1436 free_atq: 1437 ixl_dmamem_free(sc, &sc->sc_atq); 1438 unmap: 1439 mutex_destroy(&sc->sc_atq_lock); 1440 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems); 1441 mutex_destroy(&sc->sc_cfg_lock); 1442 sc->sc_mems = 0; 1443 1444 sc->sc_attached = false; 1445 } 1446 1447 static int 1448 ixl_detach(device_t self, int flags) 1449 { 1450 struct ixl_softc *sc = device_private(self); 1451 struct ifnet *ifp = &sc->sc_ec.ec_if; 1452 1453 if (!sc->sc_attached) 1454 return 0; 1455 1456 ixl_stop(ifp, 1); 1457 1458 callout_halt(&sc->sc_stats_callout, NULL); 1459 ixl_work_wait(sc->sc_workq, &sc->sc_stats_task); 1460 1461 /* detach the I/F before stop adminq due to callbacks */ 1462 ether_ifdetach(ifp); 1463 if_detach(ifp); 1464 ifmedia_fini(&sc->sc_media); 1465 if_percpuq_destroy(sc->sc_ipq); 1466 1467 ixl_disable_other_intr(sc); 1468 ixl_intr_barrier(); 1469 ixl_work_wait(sc->sc_workq, &sc->sc_arq_task); 1470 ixl_work_wait(sc->sc_workq, &sc->sc_link_state_task); 1471 1472 if (sc->sc_workq != NULL) { 1473 ixl_workq_destroy(sc->sc_workq); 1474 sc->sc_workq = NULL; 1475 } 1476 1477 if (sc->sc_workq_txrx != NULL) { 1478 workqueue_destroy(sc->sc_workq_txrx); 1479 sc->sc_workq_txrx = NULL; 1480 } 1481 1482 ixl_teardown_interrupts(sc); 1483 ixl_teardown_stats(sc); 1484 ixl_teardown_sysctls(sc); 1485 1486 ixl_queue_pairs_free(sc); 1487 1488 ixl_dmamem_free(sc, &sc->sc_scratch); 1489 ixl_hmc_free(sc); 1490 1491 /* shutdown */ 1492 ixl_wr(sc, sc->sc_aq_regs->atq_head, 0); 1493 ixl_wr(sc, sc->sc_aq_regs->arq_head, 0); 1494 ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0); 1495 ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0); 1496 1497 ixl_wr(sc, sc->sc_aq_regs->atq_bal, 0); 1498 ixl_wr(sc, sc->sc_aq_regs->atq_bah, 0); 1499 ixl_wr(sc, sc->sc_aq_regs->atq_len, 0); 1500 1501 ixl_wr(sc, sc->sc_aq_regs->arq_bal, 0); 1502 ixl_wr(sc, sc->sc_aq_regs->arq_bah, 0); 1503 ixl_wr(sc, sc->sc_aq_regs->arq_len, 0); 1504 1505 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq), 1506 0, IXL_DMA_LEN(&sc->sc_arq), 1507 BUS_DMASYNC_POSTREAD); 1508 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 1509 0, IXL_DMA_LEN(&sc->sc_atq), 1510 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1511 1512 ixl_arq_unfill(sc); 1513 1514 ixl_dmamem_free(sc, &sc->sc_arq); 1515 ixl_dmamem_free(sc, &sc->sc_atq); 1516 ixl_dmamem_free(sc, &sc->sc_aqbuf); 1517 1518 cv_destroy(&sc->sc_atq_cv); 1519 mutex_destroy(&sc->sc_atq_lock); 1520 1521 if (sc->sc_mems != 0) { 1522 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems); 1523 sc->sc_mems = 0; 1524 } 1525 1526 mutex_destroy(&sc->sc_cfg_lock); 1527 1528 return 0; 1529 } 1530 1531 static int 1532 ixl_workqs_teardown(device_t self) 1533 { 1534 struct ixl_softc *sc = device_private(self); 1535 1536 if (sc->sc_workq != NULL) { 1537 ixl_workq_destroy(sc->sc_workq); 1538 sc->sc_workq = NULL; 1539 } 1540 1541 if (sc->sc_workq_txrx != NULL) { 1542 workqueue_destroy(sc->sc_workq_txrx); 1543 sc->sc_workq_txrx = NULL; 1544 } 1545 1546 return 0; 1547 } 1548 1549 static int 1550 ixl_vlan_cb(struct ethercom *ec, uint16_t vid, bool set) 1551 { 1552 struct ifnet *ifp = &ec->ec_if; 1553 struct ixl_softc *sc = ifp->if_softc; 1554 int rv; 1555 1556 if (!ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER)) { 1557 return 0; 1558 } 1559 1560 if (set) { 1561 rv = ixl_add_macvlan(sc, sc->sc_enaddr, vid, 1562 IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH); 1563 if (rv == 0) { 1564 rv = ixl_add_macvlan(sc, etherbroadcastaddr, 1565 vid, IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH); 1566 } 1567 } else { 1568 rv = ixl_remove_macvlan(sc, sc->sc_enaddr, vid, 1569 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH); 1570 (void)ixl_remove_macvlan(sc, etherbroadcastaddr, vid, 1571 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH); 1572 } 1573 1574 return rv; 1575 } 1576 1577 static void 1578 ixl_media_add(struct ixl_softc *sc) 1579 { 1580 struct ifmedia *ifm = &sc->sc_media; 1581 const struct ixl_phy_type *itype; 1582 unsigned int i; 1583 bool flow; 1584 1585 if (ISSET(sc->sc_phy_abilities, 1586 (IXL_PHY_ABILITY_PAUSE_TX | IXL_PHY_ABILITY_PAUSE_RX))) { 1587 flow = true; 1588 } else { 1589 flow = false; 1590 } 1591 1592 for (i = 0; i < __arraycount(ixl_phy_type_map); i++) { 1593 itype = &ixl_phy_type_map[i]; 1594 1595 if (ISSET(sc->sc_phy_types, itype->phy_type)) { 1596 ifmedia_add(ifm, 1597 IFM_ETHER | IFM_FDX | itype->ifm_type, 0, NULL); 1598 1599 if (flow) { 1600 ifmedia_add(ifm, 1601 IFM_ETHER | IFM_FDX | IFM_FLOW | 1602 itype->ifm_type, 0, NULL); 1603 } 1604 1605 if (itype->ifm_type != IFM_100_TX) 1606 continue; 1607 1608 ifmedia_add(ifm, IFM_ETHER | itype->ifm_type, 1609 0, NULL); 1610 if (flow) { 1611 ifmedia_add(ifm, 1612 IFM_ETHER | IFM_FLOW | itype->ifm_type, 1613 0, NULL); 1614 } 1615 } 1616 } 1617 } 1618 1619 static void 1620 ixl_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 1621 { 1622 struct ixl_softc *sc = ifp->if_softc; 1623 1624 KASSERT(mutex_owned(&sc->sc_cfg_lock)); 1625 1626 ifmr->ifm_status = sc->sc_media_status; 1627 ifmr->ifm_active = sc->sc_media_active; 1628 } 1629 1630 static int 1631 ixl_media_change(struct ifnet *ifp) 1632 { 1633 struct ixl_softc *sc = ifp->if_softc; 1634 struct ifmedia *ifm = &sc->sc_media; 1635 uint64_t ifm_active = sc->sc_media_active; 1636 uint8_t link_speed, abilities; 1637 1638 switch (IFM_SUBTYPE(ifm_active)) { 1639 case IFM_1000_SGMII: 1640 case IFM_1000_KX: 1641 case IFM_10G_KX4: 1642 case IFM_10G_KR: 1643 case IFM_40G_KR4: 1644 case IFM_20G_KR2: 1645 case IFM_25G_KR: 1646 /* backplanes */ 1647 return EINVAL; 1648 } 1649 1650 abilities = IXL_PHY_ABILITY_AUTONEGO | IXL_PHY_ABILITY_LINKUP; 1651 1652 switch (IFM_SUBTYPE(ifm->ifm_media)) { 1653 case IFM_AUTO: 1654 link_speed = sc->sc_phy_linkspeed; 1655 break; 1656 case IFM_NONE: 1657 link_speed = 0; 1658 CLR(abilities, IXL_PHY_ABILITY_LINKUP); 1659 break; 1660 default: 1661 link_speed = ixl_search_baudrate( 1662 ifmedia_baudrate(ifm->ifm_media)); 1663 } 1664 1665 if (ISSET(abilities, IXL_PHY_ABILITY_LINKUP)) { 1666 if (ISSET(link_speed, sc->sc_phy_linkspeed) == 0) 1667 return EINVAL; 1668 } 1669 1670 if (ifm->ifm_media & IFM_FLOW) { 1671 abilities |= sc->sc_phy_abilities & 1672 (IXL_PHY_ABILITY_PAUSE_TX | IXL_PHY_ABILITY_PAUSE_RX); 1673 } 1674 1675 return ixl_set_phy_config(sc, link_speed, abilities, false); 1676 } 1677 1678 static void 1679 ixl_watchdog(struct ifnet *ifp) 1680 { 1681 1682 } 1683 1684 static void 1685 ixl_del_all_multiaddr(struct ixl_softc *sc) 1686 { 1687 struct ethercom *ec = &sc->sc_ec; 1688 struct ether_multi *enm; 1689 struct ether_multistep step; 1690 1691 ETHER_LOCK(ec); 1692 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL; 1693 ETHER_NEXT_MULTI(step, enm)) { 1694 ixl_remove_macvlan(sc, enm->enm_addrlo, 0, 1695 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN); 1696 } 1697 ETHER_UNLOCK(ec); 1698 } 1699 1700 static int 1701 ixl_add_multi(struct ixl_softc *sc, uint8_t *addrlo, uint8_t *addrhi) 1702 { 1703 struct ifnet *ifp = &sc->sc_ec.ec_if; 1704 int rv; 1705 1706 if (ISSET(ifp->if_flags, IFF_ALLMULTI)) 1707 return 0; 1708 1709 if (memcmp(addrlo, addrhi, ETHER_ADDR_LEN) != 0) { 1710 ixl_del_all_multiaddr(sc); 1711 SET(ifp->if_flags, IFF_ALLMULTI); 1712 return ENETRESET; 1713 } 1714 1715 /* multicast address can not use VLAN HWFILTER */ 1716 rv = ixl_add_macvlan(sc, addrlo, 0, 1717 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN); 1718 1719 if (rv == ENOSPC) { 1720 ixl_del_all_multiaddr(sc); 1721 SET(ifp->if_flags, IFF_ALLMULTI); 1722 return ENETRESET; 1723 } 1724 1725 return rv; 1726 } 1727 1728 static int 1729 ixl_del_multi(struct ixl_softc *sc, uint8_t *addrlo, uint8_t *addrhi) 1730 { 1731 struct ifnet *ifp = &sc->sc_ec.ec_if; 1732 struct ethercom *ec = &sc->sc_ec; 1733 struct ether_multi *enm, *enm_last; 1734 struct ether_multistep step; 1735 int error, rv = 0; 1736 1737 if (!ISSET(ifp->if_flags, IFF_ALLMULTI)) { 1738 ixl_remove_macvlan(sc, addrlo, 0, 1739 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN); 1740 return 0; 1741 } 1742 1743 ETHER_LOCK(ec); 1744 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL; 1745 ETHER_NEXT_MULTI(step, enm)) { 1746 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 1747 ETHER_ADDR_LEN) != 0) { 1748 goto out; 1749 } 1750 } 1751 1752 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL; 1753 ETHER_NEXT_MULTI(step, enm)) { 1754 error = ixl_add_macvlan(sc, enm->enm_addrlo, 0, 1755 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN); 1756 if (error != 0) 1757 break; 1758 } 1759 1760 if (enm != NULL) { 1761 enm_last = enm; 1762 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL; 1763 ETHER_NEXT_MULTI(step, enm)) { 1764 if (enm == enm_last) 1765 break; 1766 1767 ixl_remove_macvlan(sc, enm->enm_addrlo, 0, 1768 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN); 1769 } 1770 } else { 1771 CLR(ifp->if_flags, IFF_ALLMULTI); 1772 rv = ENETRESET; 1773 } 1774 1775 out: 1776 ETHER_UNLOCK(ec); 1777 return rv; 1778 } 1779 1780 static int 1781 ixl_ioctl(struct ifnet *ifp, u_long cmd, void *data) 1782 { 1783 struct ifreq *ifr = (struct ifreq *)data; 1784 struct ixl_softc *sc = (struct ixl_softc *)ifp->if_softc; 1785 const struct sockaddr *sa; 1786 uint8_t addrhi[ETHER_ADDR_LEN], addrlo[ETHER_ADDR_LEN]; 1787 int s, error = 0; 1788 unsigned int nmtu; 1789 1790 switch (cmd) { 1791 case SIOCSIFMTU: 1792 nmtu = ifr->ifr_mtu; 1793 1794 if (nmtu < IXL_MIN_MTU || nmtu > IXL_MAX_MTU) { 1795 error = EINVAL; 1796 break; 1797 } 1798 if (ifp->if_mtu != nmtu) { 1799 s = splnet(); 1800 error = ether_ioctl(ifp, cmd, data); 1801 splx(s); 1802 if (error == ENETRESET) 1803 error = ixl_init(ifp); 1804 } 1805 break; 1806 case SIOCADDMULTI: 1807 sa = ifreq_getaddr(SIOCADDMULTI, ifr); 1808 if (ether_addmulti(sa, &sc->sc_ec) == ENETRESET) { 1809 error = ether_multiaddr(sa, addrlo, addrhi); 1810 if (error != 0) 1811 return error; 1812 1813 error = ixl_add_multi(sc, addrlo, addrhi); 1814 if (error != 0 && error != ENETRESET) { 1815 ether_delmulti(sa, &sc->sc_ec); 1816 error = EIO; 1817 } 1818 } 1819 break; 1820 1821 case SIOCDELMULTI: 1822 sa = ifreq_getaddr(SIOCDELMULTI, ifr); 1823 if (ether_delmulti(sa, &sc->sc_ec) == ENETRESET) { 1824 error = ether_multiaddr(sa, addrlo, addrhi); 1825 if (error != 0) 1826 return error; 1827 1828 error = ixl_del_multi(sc, addrlo, addrhi); 1829 } 1830 break; 1831 1832 default: 1833 s = splnet(); 1834 error = ether_ioctl(ifp, cmd, data); 1835 splx(s); 1836 } 1837 1838 if (error == ENETRESET) 1839 error = ixl_iff(sc); 1840 1841 return error; 1842 } 1843 1844 static enum i40e_mac_type 1845 ixl_mactype(pci_product_id_t id) 1846 { 1847 1848 switch (id) { 1849 case PCI_PRODUCT_INTEL_XL710_SFP: 1850 case PCI_PRODUCT_INTEL_XL710_KX_B: 1851 case PCI_PRODUCT_INTEL_XL710_KX_C: 1852 case PCI_PRODUCT_INTEL_XL710_QSFP_A: 1853 case PCI_PRODUCT_INTEL_XL710_QSFP_B: 1854 case PCI_PRODUCT_INTEL_XL710_QSFP_C: 1855 case PCI_PRODUCT_INTEL_X710_10G_T_1: 1856 case PCI_PRODUCT_INTEL_X710_10G_T_2: 1857 case PCI_PRODUCT_INTEL_XL710_20G_BP_1: 1858 case PCI_PRODUCT_INTEL_XL710_20G_BP_2: 1859 case PCI_PRODUCT_INTEL_X710_T4_10G: 1860 case PCI_PRODUCT_INTEL_XXV710_25G_BP: 1861 case PCI_PRODUCT_INTEL_XXV710_25G_SFP28: 1862 case PCI_PRODUCT_INTEL_X710_10G_SFP: 1863 case PCI_PRODUCT_INTEL_X710_10G_BP: 1864 return I40E_MAC_XL710; 1865 1866 case PCI_PRODUCT_INTEL_X722_KX: 1867 case PCI_PRODUCT_INTEL_X722_QSFP: 1868 case PCI_PRODUCT_INTEL_X722_SFP: 1869 case PCI_PRODUCT_INTEL_X722_1G_BASET: 1870 case PCI_PRODUCT_INTEL_X722_10G_BASET: 1871 case PCI_PRODUCT_INTEL_X722_I_SFP: 1872 return I40E_MAC_X722; 1873 } 1874 1875 return I40E_MAC_GENERIC; 1876 } 1877 1878 static void 1879 ixl_pci_csr_setup(pci_chipset_tag_t pc, pcitag_t tag) 1880 { 1881 pcireg_t csr; 1882 1883 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG); 1884 csr |= (PCI_COMMAND_MASTER_ENABLE | 1885 PCI_COMMAND_MEM_ENABLE); 1886 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr); 1887 } 1888 1889 static inline void * 1890 ixl_hmc_kva(struct ixl_softc *sc, enum ixl_hmc_types type, unsigned int i) 1891 { 1892 uint8_t *kva = IXL_DMA_KVA(&sc->sc_hmc_pd); 1893 struct ixl_hmc_entry *e = &sc->sc_hmc_entries[type]; 1894 1895 if (i >= e->hmc_count) 1896 return NULL; 1897 1898 kva += e->hmc_base; 1899 kva += i * e->hmc_size; 1900 1901 return kva; 1902 } 1903 1904 static inline size_t 1905 ixl_hmc_len(struct ixl_softc *sc, enum ixl_hmc_types type) 1906 { 1907 struct ixl_hmc_entry *e = &sc->sc_hmc_entries[type]; 1908 1909 return e->hmc_size; 1910 } 1911 1912 static void 1913 ixl_enable_queue_intr(struct ixl_softc *sc, struct ixl_queue_pair *qp) 1914 { 1915 struct ixl_rx_ring *rxr = qp->qp_rxr; 1916 1917 ixl_wr(sc, I40E_PFINT_DYN_CTLN(rxr->rxr_qid), 1918 I40E_PFINT_DYN_CTLN_INTENA_MASK | 1919 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | 1920 (IXL_NOITR << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT)); 1921 ixl_flush(sc); 1922 } 1923 1924 static void 1925 ixl_disable_queue_intr(struct ixl_softc *sc, struct ixl_queue_pair *qp) 1926 { 1927 struct ixl_rx_ring *rxr = qp->qp_rxr; 1928 1929 ixl_wr(sc, I40E_PFINT_DYN_CTLN(rxr->rxr_qid), 1930 (IXL_NOITR << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT)); 1931 ixl_flush(sc); 1932 } 1933 1934 static void 1935 ixl_enable_other_intr(struct ixl_softc *sc) 1936 { 1937 1938 ixl_wr(sc, I40E_PFINT_DYN_CTL0, 1939 I40E_PFINT_DYN_CTL0_INTENA_MASK | 1940 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK | 1941 (IXL_NOITR << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT)); 1942 ixl_flush(sc); 1943 } 1944 1945 static void 1946 ixl_disable_other_intr(struct ixl_softc *sc) 1947 { 1948 1949 ixl_wr(sc, I40E_PFINT_DYN_CTL0, 1950 (IXL_NOITR << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT)); 1951 ixl_flush(sc); 1952 } 1953 1954 static int 1955 ixl_reinit(struct ixl_softc *sc) 1956 { 1957 struct ixl_rx_ring *rxr; 1958 struct ixl_tx_ring *txr; 1959 unsigned int i; 1960 uint32_t reg; 1961 1962 KASSERT(mutex_owned(&sc->sc_cfg_lock)); 1963 1964 if (ixl_get_vsi(sc) != 0) 1965 return EIO; 1966 1967 if (ixl_set_vsi(sc) != 0) 1968 return EIO; 1969 1970 for (i = 0; i < sc->sc_nqueue_pairs; i++) { 1971 txr = sc->sc_qps[i].qp_txr; 1972 rxr = sc->sc_qps[i].qp_rxr; 1973 1974 ixl_txr_config(sc, txr); 1975 ixl_rxr_config(sc, rxr); 1976 } 1977 1978 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd), 1979 0, IXL_DMA_LEN(&sc->sc_hmc_pd), BUS_DMASYNC_PREWRITE); 1980 1981 for (i = 0; i < sc->sc_nqueue_pairs; i++) { 1982 txr = sc->sc_qps[i].qp_txr; 1983 rxr = sc->sc_qps[i].qp_rxr; 1984 1985 ixl_wr(sc, I40E_QTX_CTL(i), I40E_QTX_CTL_PF_QUEUE | 1986 (sc->sc_pf_id << I40E_QTX_CTL_PF_INDX_SHIFT)); 1987 ixl_flush(sc); 1988 1989 ixl_wr(sc, txr->txr_tail, txr->txr_prod); 1990 ixl_wr(sc, rxr->rxr_tail, rxr->rxr_prod); 1991 1992 /* ixl_rxfill() needs lock held */ 1993 mutex_enter(&rxr->rxr_lock); 1994 ixl_rxfill(sc, rxr); 1995 mutex_exit(&rxr->rxr_lock); 1996 1997 reg = ixl_rd(sc, I40E_QRX_ENA(i)); 1998 SET(reg, I40E_QRX_ENA_QENA_REQ_MASK); 1999 ixl_wr(sc, I40E_QRX_ENA(i), reg); 2000 if (ixl_rxr_enabled(sc, rxr) != 0) 2001 goto stop; 2002 2003 ixl_txr_qdis(sc, txr, 1); 2004 2005 reg = ixl_rd(sc, I40E_QTX_ENA(i)); 2006 SET(reg, I40E_QTX_ENA_QENA_REQ_MASK); 2007 ixl_wr(sc, I40E_QTX_ENA(i), reg); 2008 2009 if (ixl_txr_enabled(sc, txr) != 0) 2010 goto stop; 2011 } 2012 2013 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd), 2014 0, IXL_DMA_LEN(&sc->sc_hmc_pd), BUS_DMASYNC_POSTWRITE); 2015 2016 return 0; 2017 2018 stop: 2019 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd), 2020 0, IXL_DMA_LEN(&sc->sc_hmc_pd), BUS_DMASYNC_POSTWRITE); 2021 2022 return ETIMEDOUT; 2023 } 2024 2025 static int 2026 ixl_init_locked(struct ixl_softc *sc) 2027 { 2028 struct ifnet *ifp = &sc->sc_ec.ec_if; 2029 unsigned int i; 2030 int error, eccap_change; 2031 2032 KASSERT(mutex_owned(&sc->sc_cfg_lock)); 2033 2034 if (ISSET(ifp->if_flags, IFF_RUNNING)) 2035 ixl_stop_locked(sc); 2036 2037 if (sc->sc_dead) { 2038 return ENXIO; 2039 } 2040 2041 eccap_change = sc->sc_ec.ec_capenable ^ sc->sc_cur_ec_capenable; 2042 if (ISSET(eccap_change, ETHERCAP_VLAN_HWTAGGING)) 2043 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWTAGGING; 2044 2045 if (ISSET(eccap_change, ETHERCAP_VLAN_HWFILTER)) { 2046 if (ixl_update_macvlan(sc) == 0) { 2047 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWFILTER; 2048 } else { 2049 CLR(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER); 2050 CLR(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER); 2051 } 2052 } 2053 2054 if (sc->sc_intrtype != PCI_INTR_TYPE_MSIX) 2055 sc->sc_nqueue_pairs = 1; 2056 else 2057 sc->sc_nqueue_pairs = sc->sc_nqueue_pairs_max; 2058 2059 error = ixl_reinit(sc); 2060 if (error) { 2061 ixl_stop_locked(sc); 2062 return error; 2063 } 2064 2065 SET(ifp->if_flags, IFF_RUNNING); 2066 CLR(ifp->if_flags, IFF_OACTIVE); 2067 2068 ixl_config_rss(sc); 2069 ixl_config_queue_intr(sc); 2070 2071 for (i = 0; i < sc->sc_nqueue_pairs; i++) { 2072 ixl_enable_queue_intr(sc, &sc->sc_qps[i]); 2073 } 2074 2075 error = ixl_iff(sc); 2076 if (error) { 2077 ixl_stop_locked(sc); 2078 return error; 2079 } 2080 2081 callout_schedule(&sc->sc_stats_callout, mstohz(sc->sc_stats_intval)); 2082 2083 return 0; 2084 } 2085 2086 static int 2087 ixl_init(struct ifnet *ifp) 2088 { 2089 struct ixl_softc *sc = ifp->if_softc; 2090 int error; 2091 2092 mutex_enter(&sc->sc_cfg_lock); 2093 error = ixl_init_locked(sc); 2094 mutex_exit(&sc->sc_cfg_lock); 2095 2096 if (error == 0) 2097 (void)ixl_get_link_status(sc); 2098 2099 return error; 2100 } 2101 2102 static int 2103 ixl_iff(struct ixl_softc *sc) 2104 { 2105 struct ifnet *ifp = &sc->sc_ec.ec_if; 2106 struct ixl_atq iatq; 2107 struct ixl_aq_desc *iaq; 2108 struct ixl_aq_vsi_promisc_param *param; 2109 uint16_t flag_add, flag_del; 2110 int error; 2111 2112 if (!ISSET(ifp->if_flags, IFF_RUNNING)) 2113 return 0; 2114 2115 memset(&iatq, 0, sizeof(iatq)); 2116 2117 iaq = &iatq.iatq_desc; 2118 iaq->iaq_opcode = htole16(IXL_AQ_OP_SET_VSI_PROMISC); 2119 2120 param = (struct ixl_aq_vsi_promisc_param *)&iaq->iaq_param; 2121 param->flags = htole16(0); 2122 2123 if (!ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER) 2124 || ISSET(ifp->if_flags, IFF_PROMISC)) { 2125 param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_BCAST | 2126 IXL_AQ_VSI_PROMISC_FLAG_VLAN); 2127 } 2128 2129 if (ISSET(ifp->if_flags, IFF_PROMISC)) { 2130 param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_UCAST | 2131 IXL_AQ_VSI_PROMISC_FLAG_MCAST); 2132 } else if (ISSET(ifp->if_flags, IFF_ALLMULTI)) { 2133 param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_MCAST); 2134 } 2135 param->valid_flags = htole16(IXL_AQ_VSI_PROMISC_FLAG_UCAST | 2136 IXL_AQ_VSI_PROMISC_FLAG_MCAST | IXL_AQ_VSI_PROMISC_FLAG_BCAST | 2137 IXL_AQ_VSI_PROMISC_FLAG_VLAN); 2138 param->seid = sc->sc_seid; 2139 2140 error = ixl_atq_exec(sc, &iatq); 2141 if (error) 2142 return error; 2143 2144 if (iaq->iaq_retval != htole16(IXL_AQ_RC_OK)) 2145 return EIO; 2146 2147 if (memcmp(sc->sc_enaddr, CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN) != 0) { 2148 if (ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER)) { 2149 flag_add = IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH; 2150 flag_del = IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH; 2151 } else { 2152 flag_add = IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN; 2153 flag_del = IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN; 2154 } 2155 2156 ixl_remove_macvlan(sc, sc->sc_enaddr, 0, flag_del); 2157 2158 memcpy(sc->sc_enaddr, CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN); 2159 ixl_add_macvlan(sc, sc->sc_enaddr, 0, flag_add); 2160 } 2161 return 0; 2162 } 2163 2164 static void 2165 ixl_stop_locked(struct ixl_softc *sc) 2166 { 2167 struct ifnet *ifp = &sc->sc_ec.ec_if; 2168 struct ixl_rx_ring *rxr; 2169 struct ixl_tx_ring *txr; 2170 unsigned int i; 2171 uint32_t reg; 2172 2173 KASSERT(mutex_owned(&sc->sc_cfg_lock)); 2174 2175 CLR(ifp->if_flags, IFF_RUNNING | IFF_OACTIVE); 2176 callout_stop(&sc->sc_stats_callout); 2177 2178 for (i = 0; i < sc->sc_nqueue_pairs; i++) { 2179 txr = sc->sc_qps[i].qp_txr; 2180 rxr = sc->sc_qps[i].qp_rxr; 2181 2182 ixl_disable_queue_intr(sc, &sc->sc_qps[i]); 2183 2184 mutex_enter(&txr->txr_lock); 2185 ixl_txr_qdis(sc, txr, 0); 2186 mutex_exit(&txr->txr_lock); 2187 } 2188 2189 /* XXX wait at least 400 usec for all tx queues in one go */ 2190 ixl_flush(sc); 2191 DELAY(500); 2192 2193 for (i = 0; i < sc->sc_nqueue_pairs; i++) { 2194 txr = sc->sc_qps[i].qp_txr; 2195 rxr = sc->sc_qps[i].qp_rxr; 2196 2197 mutex_enter(&txr->txr_lock); 2198 reg = ixl_rd(sc, I40E_QTX_ENA(i)); 2199 CLR(reg, I40E_QTX_ENA_QENA_REQ_MASK); 2200 ixl_wr(sc, I40E_QTX_ENA(i), reg); 2201 mutex_exit(&txr->txr_lock); 2202 2203 mutex_enter(&rxr->rxr_lock); 2204 reg = ixl_rd(sc, I40E_QRX_ENA(i)); 2205 CLR(reg, I40E_QRX_ENA_QENA_REQ_MASK); 2206 ixl_wr(sc, I40E_QRX_ENA(i), reg); 2207 mutex_exit(&rxr->rxr_lock); 2208 } 2209 2210 /* XXX short wait for all queue disables to settle */ 2211 ixl_flush(sc); 2212 DELAY(50); 2213 2214 for (i = 0; i < sc->sc_nqueue_pairs; i++) { 2215 txr = sc->sc_qps[i].qp_txr; 2216 rxr = sc->sc_qps[i].qp_rxr; 2217 2218 mutex_enter(&txr->txr_lock); 2219 if (ixl_txr_disabled(sc, txr) != 0) { 2220 mutex_exit(&txr->txr_lock); 2221 goto die; 2222 } 2223 mutex_exit(&txr->txr_lock); 2224 2225 mutex_enter(&rxr->rxr_lock); 2226 if (ixl_rxr_disabled(sc, rxr) != 0) { 2227 mutex_exit(&rxr->rxr_lock); 2228 goto die; 2229 } 2230 mutex_exit(&rxr->rxr_lock); 2231 } 2232 2233 for (i = 0; i < sc->sc_nqueue_pairs; i++) { 2234 sc->sc_qps[i].qp_workqueue = false; 2235 workqueue_wait(sc->sc_workq_txrx, 2236 &sc->sc_qps[i].qp_work); 2237 } 2238 2239 for (i = 0; i < sc->sc_nqueue_pairs; i++) { 2240 txr = sc->sc_qps[i].qp_txr; 2241 rxr = sc->sc_qps[i].qp_rxr; 2242 2243 mutex_enter(&txr->txr_lock); 2244 ixl_txr_unconfig(sc, txr); 2245 mutex_exit(&txr->txr_lock); 2246 2247 mutex_enter(&rxr->rxr_lock); 2248 ixl_rxr_unconfig(sc, rxr); 2249 mutex_exit(&rxr->rxr_lock); 2250 2251 ixl_txr_clean(sc, txr); 2252 ixl_rxr_clean(sc, rxr); 2253 } 2254 2255 return; 2256 die: 2257 sc->sc_dead = true; 2258 log(LOG_CRIT, "%s: failed to shut down rings", 2259 device_xname(sc->sc_dev)); 2260 return; 2261 } 2262 2263 static void 2264 ixl_stop(struct ifnet *ifp, int disable) 2265 { 2266 struct ixl_softc *sc = ifp->if_softc; 2267 2268 mutex_enter(&sc->sc_cfg_lock); 2269 ixl_stop_locked(sc); 2270 mutex_exit(&sc->sc_cfg_lock); 2271 } 2272 2273 static int 2274 ixl_queue_pairs_alloc(struct ixl_softc *sc) 2275 { 2276 struct ixl_queue_pair *qp; 2277 unsigned int i; 2278 size_t sz; 2279 2280 sz = sizeof(sc->sc_qps[0]) * sc->sc_nqueue_pairs_max; 2281 sc->sc_qps = kmem_zalloc(sz, KM_SLEEP); 2282 2283 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) { 2284 qp = &sc->sc_qps[i]; 2285 2286 qp->qp_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE, 2287 ixl_handle_queue, qp); 2288 if (qp->qp_si == NULL) 2289 goto free; 2290 2291 qp->qp_txr = ixl_txr_alloc(sc, i); 2292 if (qp->qp_txr == NULL) 2293 goto free; 2294 2295 qp->qp_rxr = ixl_rxr_alloc(sc, i); 2296 if (qp->qp_rxr == NULL) 2297 goto free; 2298 2299 qp->qp_sc = sc; 2300 snprintf(qp->qp_name, sizeof(qp->qp_name), 2301 "%s-TXRX%d", device_xname(sc->sc_dev), i); 2302 } 2303 2304 return 0; 2305 free: 2306 if (sc->sc_qps != NULL) { 2307 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) { 2308 qp = &sc->sc_qps[i]; 2309 2310 if (qp->qp_txr != NULL) 2311 ixl_txr_free(sc, qp->qp_txr); 2312 if (qp->qp_rxr != NULL) 2313 ixl_rxr_free(sc, qp->qp_rxr); 2314 if (qp->qp_si != NULL) 2315 softint_disestablish(qp->qp_si); 2316 } 2317 2318 sz = sizeof(sc->sc_qps[0]) * sc->sc_nqueue_pairs_max; 2319 kmem_free(sc->sc_qps, sz); 2320 sc->sc_qps = NULL; 2321 } 2322 2323 return -1; 2324 } 2325 2326 static void 2327 ixl_queue_pairs_free(struct ixl_softc *sc) 2328 { 2329 struct ixl_queue_pair *qp; 2330 unsigned int i; 2331 size_t sz; 2332 2333 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) { 2334 qp = &sc->sc_qps[i]; 2335 ixl_txr_free(sc, qp->qp_txr); 2336 ixl_rxr_free(sc, qp->qp_rxr); 2337 softint_disestablish(qp->qp_si); 2338 } 2339 2340 sz = sizeof(sc->sc_qps[0]) * sc->sc_nqueue_pairs_max; 2341 kmem_free(sc->sc_qps, sz); 2342 sc->sc_qps = NULL; 2343 } 2344 2345 static struct ixl_tx_ring * 2346 ixl_txr_alloc(struct ixl_softc *sc, unsigned int qid) 2347 { 2348 struct ixl_tx_ring *txr = NULL; 2349 struct ixl_tx_map *maps = NULL, *txm; 2350 unsigned int i; 2351 2352 txr = kmem_zalloc(sizeof(*txr), KM_SLEEP); 2353 maps = kmem_zalloc(sizeof(maps[0]) * sc->sc_tx_ring_ndescs, 2354 KM_SLEEP); 2355 2356 if (ixl_dmamem_alloc(sc, &txr->txr_mem, 2357 sizeof(struct ixl_tx_desc) * sc->sc_tx_ring_ndescs, 2358 IXL_TX_QUEUE_ALIGN) != 0) 2359 goto free; 2360 2361 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) { 2362 txm = &maps[i]; 2363 2364 if (bus_dmamap_create(sc->sc_dmat, IXL_TX_PKT_MAXSIZE, 2365 IXL_TX_PKT_DESCS, IXL_TX_PKT_MAXSIZE, 0, 2366 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &txm->txm_map) != 0) 2367 goto uncreate; 2368 2369 txm->txm_eop = -1; 2370 txm->txm_m = NULL; 2371 } 2372 2373 txr->txr_cons = txr->txr_prod = 0; 2374 txr->txr_maps = maps; 2375 2376 txr->txr_intrq = pcq_create(sc->sc_tx_ring_ndescs, KM_NOSLEEP); 2377 if (txr->txr_intrq == NULL) 2378 goto uncreate; 2379 2380 txr->txr_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE, 2381 ixl_deferred_transmit, txr); 2382 if (txr->txr_si == NULL) 2383 goto destroy_pcq; 2384 2385 txr->txr_tail = I40E_QTX_TAIL(qid); 2386 txr->txr_qid = qid; 2387 txr->txr_sc = sc; 2388 mutex_init(&txr->txr_lock, MUTEX_DEFAULT, IPL_NET); 2389 2390 return txr; 2391 2392 destroy_pcq: 2393 pcq_destroy(txr->txr_intrq); 2394 uncreate: 2395 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) { 2396 txm = &maps[i]; 2397 2398 if (txm->txm_map == NULL) 2399 continue; 2400 2401 bus_dmamap_destroy(sc->sc_dmat, txm->txm_map); 2402 } 2403 2404 ixl_dmamem_free(sc, &txr->txr_mem); 2405 free: 2406 kmem_free(maps, sizeof(maps[0]) * sc->sc_tx_ring_ndescs); 2407 kmem_free(txr, sizeof(*txr)); 2408 2409 return NULL; 2410 } 2411 2412 static void 2413 ixl_txr_qdis(struct ixl_softc *sc, struct ixl_tx_ring *txr, int enable) 2414 { 2415 unsigned int qid; 2416 bus_size_t reg; 2417 uint32_t r; 2418 2419 qid = txr->txr_qid + sc->sc_base_queue; 2420 reg = I40E_GLLAN_TXPRE_QDIS(qid / 128); 2421 qid %= 128; 2422 2423 r = ixl_rd(sc, reg); 2424 CLR(r, I40E_GLLAN_TXPRE_QDIS_QINDX_MASK); 2425 SET(r, qid << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT); 2426 SET(r, enable ? I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK : 2427 I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK); 2428 ixl_wr(sc, reg, r); 2429 } 2430 2431 static void 2432 ixl_txr_config(struct ixl_softc *sc, struct ixl_tx_ring *txr) 2433 { 2434 struct ixl_hmc_txq txq; 2435 struct ixl_aq_vsi_data *data = IXL_DMA_KVA(&sc->sc_scratch); 2436 void *hmc; 2437 2438 memset(&txq, 0, sizeof(txq)); 2439 txq.head = htole16(txr->txr_cons); 2440 txq.new_context = 1; 2441 txq.base = htole64(IXL_DMA_DVA(&txr->txr_mem) / IXL_HMC_TXQ_BASE_UNIT); 2442 txq.head_wb_ena = IXL_HMC_TXQ_DESC_WB; 2443 txq.qlen = htole16(sc->sc_tx_ring_ndescs); 2444 txq.tphrdesc_ena = 0; 2445 txq.tphrpacket_ena = 0; 2446 txq.tphwdesc_ena = 0; 2447 txq.rdylist = data->qs_handle[0]; 2448 2449 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_TX, txr->txr_qid); 2450 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_TX)); 2451 ixl_hmc_pack(hmc, &txq, ixl_hmc_pack_txq, 2452 __arraycount(ixl_hmc_pack_txq)); 2453 } 2454 2455 static void 2456 ixl_txr_unconfig(struct ixl_softc *sc, struct ixl_tx_ring *txr) 2457 { 2458 void *hmc; 2459 2460 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_TX, txr->txr_qid); 2461 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_TX)); 2462 txr->txr_cons = txr->txr_prod = 0; 2463 } 2464 2465 static void 2466 ixl_txr_clean(struct ixl_softc *sc, struct ixl_tx_ring *txr) 2467 { 2468 struct ixl_tx_map *maps, *txm; 2469 bus_dmamap_t map; 2470 unsigned int i; 2471 2472 maps = txr->txr_maps; 2473 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) { 2474 txm = &maps[i]; 2475 2476 if (txm->txm_m == NULL) 2477 continue; 2478 2479 map = txm->txm_map; 2480 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 2481 BUS_DMASYNC_POSTWRITE); 2482 bus_dmamap_unload(sc->sc_dmat, map); 2483 2484 m_freem(txm->txm_m); 2485 txm->txm_m = NULL; 2486 } 2487 } 2488 2489 static int 2490 ixl_txr_enabled(struct ixl_softc *sc, struct ixl_tx_ring *txr) 2491 { 2492 bus_size_t ena = I40E_QTX_ENA(txr->txr_qid); 2493 uint32_t reg; 2494 int i; 2495 2496 for (i = 0; i < 10; i++) { 2497 reg = ixl_rd(sc, ena); 2498 if (ISSET(reg, I40E_QTX_ENA_QENA_STAT_MASK)) 2499 return 0; 2500 2501 delaymsec(10); 2502 } 2503 2504 return ETIMEDOUT; 2505 } 2506 2507 static int 2508 ixl_txr_disabled(struct ixl_softc *sc, struct ixl_tx_ring *txr) 2509 { 2510 bus_size_t ena = I40E_QTX_ENA(txr->txr_qid); 2511 uint32_t reg; 2512 int i; 2513 2514 KASSERT(mutex_owned(&txr->txr_lock)); 2515 2516 for (i = 0; i < 10; i++) { 2517 reg = ixl_rd(sc, ena); 2518 if (ISSET(reg, I40E_QTX_ENA_QENA_STAT_MASK) == 0) 2519 return 0; 2520 2521 delaymsec(10); 2522 } 2523 2524 return ETIMEDOUT; 2525 } 2526 2527 static void 2528 ixl_txr_free(struct ixl_softc *sc, struct ixl_tx_ring *txr) 2529 { 2530 struct ixl_tx_map *maps, *txm; 2531 struct mbuf *m; 2532 unsigned int i; 2533 2534 softint_disestablish(txr->txr_si); 2535 while ((m = pcq_get(txr->txr_intrq)) != NULL) 2536 m_freem(m); 2537 pcq_destroy(txr->txr_intrq); 2538 2539 maps = txr->txr_maps; 2540 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) { 2541 txm = &maps[i]; 2542 2543 bus_dmamap_destroy(sc->sc_dmat, txm->txm_map); 2544 } 2545 2546 ixl_dmamem_free(sc, &txr->txr_mem); 2547 mutex_destroy(&txr->txr_lock); 2548 kmem_free(maps, sizeof(maps[0]) * sc->sc_tx_ring_ndescs); 2549 kmem_free(txr, sizeof(*txr)); 2550 } 2551 2552 static inline int 2553 ixl_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf **m0, 2554 struct ixl_tx_ring *txr) 2555 { 2556 struct mbuf *m; 2557 int error; 2558 2559 KASSERT(mutex_owned(&txr->txr_lock)); 2560 2561 m = *m0; 2562 2563 error = bus_dmamap_load_mbuf(dmat, map, m, 2564 BUS_DMA_STREAMING | BUS_DMA_WRITE | BUS_DMA_NOWAIT); 2565 if (error != EFBIG) 2566 return error; 2567 2568 m = m_defrag(m, M_DONTWAIT); 2569 if (m != NULL) { 2570 *m0 = m; 2571 txr->txr_defragged.ev_count++; 2572 2573 error = bus_dmamap_load_mbuf(dmat, map, m, 2574 BUS_DMA_STREAMING | BUS_DMA_WRITE | BUS_DMA_NOWAIT); 2575 } else { 2576 txr->txr_defrag_failed.ev_count++; 2577 error = ENOBUFS; 2578 } 2579 2580 return error; 2581 } 2582 2583 static inline int 2584 ixl_tx_setup_offloads(struct mbuf *m, uint64_t *cmd_txd) 2585 { 2586 struct ether_header *eh; 2587 size_t len; 2588 uint64_t cmd; 2589 2590 cmd = 0; 2591 2592 eh = mtod(m, struct ether_header *); 2593 switch (htons(eh->ether_type)) { 2594 case ETHERTYPE_IP: 2595 case ETHERTYPE_IPV6: 2596 len = ETHER_HDR_LEN; 2597 break; 2598 case ETHERTYPE_VLAN: 2599 len = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 2600 break; 2601 default: 2602 len = 0; 2603 } 2604 cmd |= ((len >> 1) << IXL_TX_DESC_MACLEN_SHIFT); 2605 2606 if (m->m_pkthdr.csum_flags & 2607 (M_CSUM_TSOv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) { 2608 cmd |= IXL_TX_DESC_CMD_IIPT_IPV4; 2609 } 2610 if (m->m_pkthdr.csum_flags & M_CSUM_IPv4) { 2611 cmd |= IXL_TX_DESC_CMD_IIPT_IPV4_CSUM; 2612 } 2613 2614 if (m->m_pkthdr.csum_flags & 2615 (M_CSUM_TSOv6 | M_CSUM_TCPv6 | M_CSUM_UDPv6)) { 2616 cmd |= IXL_TX_DESC_CMD_IIPT_IPV6; 2617 } 2618 2619 switch (cmd & IXL_TX_DESC_CMD_IIPT_MASK) { 2620 case IXL_TX_DESC_CMD_IIPT_IPV4: 2621 case IXL_TX_DESC_CMD_IIPT_IPV4_CSUM: 2622 len = M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data); 2623 break; 2624 case IXL_TX_DESC_CMD_IIPT_IPV6: 2625 len = M_CSUM_DATA_IPv6_IPHL(m->m_pkthdr.csum_data); 2626 break; 2627 default: 2628 len = 0; 2629 } 2630 cmd |= ((len >> 2) << IXL_TX_DESC_IPLEN_SHIFT); 2631 2632 if (m->m_pkthdr.csum_flags & 2633 (M_CSUM_TSOv4 | M_CSUM_TSOv6 | M_CSUM_TCPv4 | M_CSUM_TCPv6)) { 2634 len = sizeof(struct tcphdr); 2635 cmd |= IXL_TX_DESC_CMD_L4T_EOFT_TCP; 2636 } else if (m->m_pkthdr.csum_flags & (M_CSUM_UDPv4 | M_CSUM_UDPv6)) { 2637 len = sizeof(struct udphdr); 2638 cmd |= IXL_TX_DESC_CMD_L4T_EOFT_UDP; 2639 } else { 2640 len = 0; 2641 } 2642 cmd |= ((len >> 2) << IXL_TX_DESC_L4LEN_SHIFT); 2643 2644 *cmd_txd |= cmd; 2645 return 0; 2646 } 2647 2648 static void 2649 ixl_tx_common_locked(struct ifnet *ifp, struct ixl_tx_ring *txr, 2650 bool is_transmit) 2651 { 2652 struct ixl_softc *sc = ifp->if_softc; 2653 struct ixl_tx_desc *ring, *txd; 2654 struct ixl_tx_map *txm; 2655 bus_dmamap_t map; 2656 struct mbuf *m; 2657 uint64_t cmd, cmd_txd; 2658 unsigned int prod, free, last, i; 2659 unsigned int mask; 2660 int post = 0; 2661 2662 KASSERT(mutex_owned(&txr->txr_lock)); 2663 2664 if (!ISSET(ifp->if_flags, IFF_RUNNING) 2665 || (!is_transmit && ISSET(ifp->if_flags, IFF_OACTIVE))) { 2666 if (!is_transmit) 2667 IFQ_PURGE(&ifp->if_snd); 2668 return; 2669 } 2670 2671 prod = txr->txr_prod; 2672 free = txr->txr_cons; 2673 if (free <= prod) 2674 free += sc->sc_tx_ring_ndescs; 2675 free -= prod; 2676 2677 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem), 2678 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTWRITE); 2679 2680 ring = IXL_DMA_KVA(&txr->txr_mem); 2681 mask = sc->sc_tx_ring_ndescs - 1; 2682 last = prod; 2683 cmd = 0; 2684 txd = NULL; 2685 2686 for (;;) { 2687 if (free <= IXL_TX_PKT_DESCS) { 2688 if (!is_transmit) 2689 SET(ifp->if_flags, IFF_OACTIVE); 2690 break; 2691 } 2692 2693 if (is_transmit) 2694 m = pcq_get(txr->txr_intrq); 2695 else 2696 IFQ_DEQUEUE(&ifp->if_snd, m); 2697 2698 if (m == NULL) 2699 break; 2700 2701 txm = &txr->txr_maps[prod]; 2702 map = txm->txm_map; 2703 2704 if (ixl_load_mbuf(sc->sc_dmat, map, &m, txr) != 0) { 2705 if_statinc(ifp, if_oerrors); 2706 m_freem(m); 2707 continue; 2708 } 2709 2710 cmd_txd = 0; 2711 if (m->m_pkthdr.csum_flags & IXL_CSUM_ALL_OFFLOAD) { 2712 ixl_tx_setup_offloads(m, &cmd_txd); 2713 } 2714 2715 if (vlan_has_tag(m)) { 2716 cmd_txd |= (uint64_t)vlan_get_tag(m) << 2717 IXL_TX_DESC_L2TAG1_SHIFT; 2718 cmd_txd |= IXL_TX_DESC_CMD_IL2TAG1; 2719 } 2720 2721 bus_dmamap_sync(sc->sc_dmat, map, 0, 2722 map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2723 2724 for (i = 0; i < (unsigned int)map->dm_nsegs; i++) { 2725 txd = &ring[prod]; 2726 2727 cmd = (uint64_t)map->dm_segs[i].ds_len << 2728 IXL_TX_DESC_BSIZE_SHIFT; 2729 cmd |= IXL_TX_DESC_DTYPE_DATA | IXL_TX_DESC_CMD_ICRC; 2730 cmd |= cmd_txd; 2731 2732 txd->addr = htole64(map->dm_segs[i].ds_addr); 2733 txd->cmd = htole64(cmd); 2734 2735 last = prod; 2736 2737 prod++; 2738 prod &= mask; 2739 } 2740 cmd |= IXL_TX_DESC_CMD_EOP | IXL_TX_DESC_CMD_RS; 2741 txd->cmd = htole64(cmd); 2742 2743 txm->txm_m = m; 2744 txm->txm_eop = last; 2745 2746 bpf_mtap(ifp, m, BPF_D_OUT); 2747 2748 free -= i; 2749 post = 1; 2750 } 2751 2752 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem), 2753 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREWRITE); 2754 2755 if (post) { 2756 txr->txr_prod = prod; 2757 ixl_wr(sc, txr->txr_tail, prod); 2758 } 2759 } 2760 2761 static int 2762 ixl_txeof(struct ixl_softc *sc, struct ixl_tx_ring *txr, u_int txlimit) 2763 { 2764 struct ifnet *ifp = &sc->sc_ec.ec_if; 2765 struct ixl_tx_desc *ring, *txd; 2766 struct ixl_tx_map *txm; 2767 struct mbuf *m; 2768 bus_dmamap_t map; 2769 unsigned int cons, prod, last; 2770 unsigned int mask; 2771 uint64_t dtype; 2772 int done = 0, more = 0; 2773 2774 KASSERT(mutex_owned(&txr->txr_lock)); 2775 2776 prod = txr->txr_prod; 2777 cons = txr->txr_cons; 2778 2779 if (cons == prod) 2780 return 0; 2781 2782 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem), 2783 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTREAD); 2784 2785 ring = IXL_DMA_KVA(&txr->txr_mem); 2786 mask = sc->sc_tx_ring_ndescs - 1; 2787 2788 net_stat_ref_t nsr = IF_STAT_GETREF(ifp); 2789 2790 do { 2791 if (txlimit-- <= 0) { 2792 more = 1; 2793 break; 2794 } 2795 2796 txm = &txr->txr_maps[cons]; 2797 last = txm->txm_eop; 2798 txd = &ring[last]; 2799 2800 dtype = txd->cmd & htole64(IXL_TX_DESC_DTYPE_MASK); 2801 if (dtype != htole64(IXL_TX_DESC_DTYPE_DONE)) 2802 break; 2803 2804 map = txm->txm_map; 2805 2806 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 2807 BUS_DMASYNC_POSTWRITE); 2808 bus_dmamap_unload(sc->sc_dmat, map); 2809 2810 m = txm->txm_m; 2811 if (m != NULL) { 2812 if_statinc_ref(nsr, if_opackets); 2813 if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len); 2814 if (ISSET(m->m_flags, M_MCAST)) 2815 if_statinc_ref(nsr, if_omcasts); 2816 m_freem(m); 2817 } 2818 2819 txm->txm_m = NULL; 2820 txm->txm_eop = -1; 2821 2822 cons = last + 1; 2823 cons &= mask; 2824 done = 1; 2825 } while (cons != prod); 2826 2827 IF_STAT_PUTREF(ifp); 2828 2829 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem), 2830 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREREAD); 2831 2832 txr->txr_cons = cons; 2833 2834 if (done) { 2835 softint_schedule(txr->txr_si); 2836 if (txr->txr_qid == 0) { 2837 CLR(ifp->if_flags, IFF_OACTIVE); 2838 if_schedule_deferred_start(ifp); 2839 } 2840 } 2841 2842 return more; 2843 } 2844 2845 static void 2846 ixl_start(struct ifnet *ifp) 2847 { 2848 struct ixl_softc *sc; 2849 struct ixl_tx_ring *txr; 2850 2851 sc = ifp->if_softc; 2852 txr = sc->sc_qps[0].qp_txr; 2853 2854 mutex_enter(&txr->txr_lock); 2855 ixl_tx_common_locked(ifp, txr, false); 2856 mutex_exit(&txr->txr_lock); 2857 } 2858 2859 static inline unsigned int 2860 ixl_select_txqueue(struct ixl_softc *sc, struct mbuf *m) 2861 { 2862 u_int cpuid; 2863 2864 cpuid = cpu_index(curcpu()); 2865 2866 return (unsigned int)(cpuid % sc->sc_nqueue_pairs); 2867 } 2868 2869 static int 2870 ixl_transmit(struct ifnet *ifp, struct mbuf *m) 2871 { 2872 struct ixl_softc *sc; 2873 struct ixl_tx_ring *txr; 2874 unsigned int qid; 2875 2876 sc = ifp->if_softc; 2877 qid = ixl_select_txqueue(sc, m); 2878 2879 txr = sc->sc_qps[qid].qp_txr; 2880 2881 if (__predict_false(!pcq_put(txr->txr_intrq, m))) { 2882 mutex_enter(&txr->txr_lock); 2883 txr->txr_pcqdrop.ev_count++; 2884 mutex_exit(&txr->txr_lock); 2885 2886 m_freem(m); 2887 return ENOBUFS; 2888 } 2889 2890 if (mutex_tryenter(&txr->txr_lock)) { 2891 ixl_tx_common_locked(ifp, txr, true); 2892 mutex_exit(&txr->txr_lock); 2893 } else { 2894 kpreempt_disable(); 2895 softint_schedule(txr->txr_si); 2896 kpreempt_enable(); 2897 } 2898 2899 return 0; 2900 } 2901 2902 static void 2903 ixl_deferred_transmit(void *xtxr) 2904 { 2905 struct ixl_tx_ring *txr = xtxr; 2906 struct ixl_softc *sc = txr->txr_sc; 2907 struct ifnet *ifp = &sc->sc_ec.ec_if; 2908 2909 mutex_enter(&txr->txr_lock); 2910 txr->txr_transmitdef.ev_count++; 2911 if (pcq_peek(txr->txr_intrq) != NULL) 2912 ixl_tx_common_locked(ifp, txr, true); 2913 mutex_exit(&txr->txr_lock); 2914 } 2915 2916 static struct ixl_rx_ring * 2917 ixl_rxr_alloc(struct ixl_softc *sc, unsigned int qid) 2918 { 2919 struct ixl_rx_ring *rxr = NULL; 2920 struct ixl_rx_map *maps = NULL, *rxm; 2921 unsigned int i; 2922 2923 rxr = kmem_zalloc(sizeof(*rxr), KM_SLEEP); 2924 maps = kmem_zalloc(sizeof(maps[0]) * sc->sc_rx_ring_ndescs, 2925 KM_SLEEP); 2926 2927 if (ixl_dmamem_alloc(sc, &rxr->rxr_mem, 2928 sizeof(struct ixl_rx_rd_desc_32) * sc->sc_rx_ring_ndescs, 2929 IXL_RX_QUEUE_ALIGN) != 0) 2930 goto free; 2931 2932 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) { 2933 rxm = &maps[i]; 2934 2935 if (bus_dmamap_create(sc->sc_dmat, 2936 IXL_MCLBYTES, 1, IXL_MCLBYTES, 0, 2937 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &rxm->rxm_map) != 0) 2938 goto uncreate; 2939 2940 rxm->rxm_m = NULL; 2941 } 2942 2943 rxr->rxr_cons = rxr->rxr_prod = 0; 2944 rxr->rxr_m_head = NULL; 2945 rxr->rxr_m_tail = &rxr->rxr_m_head; 2946 rxr->rxr_maps = maps; 2947 2948 rxr->rxr_tail = I40E_QRX_TAIL(qid); 2949 rxr->rxr_qid = qid; 2950 mutex_init(&rxr->rxr_lock, MUTEX_DEFAULT, IPL_NET); 2951 2952 return rxr; 2953 2954 uncreate: 2955 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) { 2956 rxm = &maps[i]; 2957 2958 if (rxm->rxm_map == NULL) 2959 continue; 2960 2961 bus_dmamap_destroy(sc->sc_dmat, rxm->rxm_map); 2962 } 2963 2964 ixl_dmamem_free(sc, &rxr->rxr_mem); 2965 free: 2966 kmem_free(maps, sizeof(maps[0]) * sc->sc_rx_ring_ndescs); 2967 kmem_free(rxr, sizeof(*rxr)); 2968 2969 return NULL; 2970 } 2971 2972 static void 2973 ixl_rxr_clean(struct ixl_softc *sc, struct ixl_rx_ring *rxr) 2974 { 2975 struct ixl_rx_map *maps, *rxm; 2976 bus_dmamap_t map; 2977 unsigned int i; 2978 2979 maps = rxr->rxr_maps; 2980 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) { 2981 rxm = &maps[i]; 2982 2983 if (rxm->rxm_m == NULL) 2984 continue; 2985 2986 map = rxm->rxm_map; 2987 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 2988 BUS_DMASYNC_POSTWRITE); 2989 bus_dmamap_unload(sc->sc_dmat, map); 2990 2991 m_freem(rxm->rxm_m); 2992 rxm->rxm_m = NULL; 2993 } 2994 2995 m_freem(rxr->rxr_m_head); 2996 rxr->rxr_m_head = NULL; 2997 rxr->rxr_m_tail = &rxr->rxr_m_head; 2998 2999 rxr->rxr_prod = rxr->rxr_cons = 0; 3000 } 3001 3002 static int 3003 ixl_rxr_enabled(struct ixl_softc *sc, struct ixl_rx_ring *rxr) 3004 { 3005 bus_size_t ena = I40E_QRX_ENA(rxr->rxr_qid); 3006 uint32_t reg; 3007 int i; 3008 3009 for (i = 0; i < 10; i++) { 3010 reg = ixl_rd(sc, ena); 3011 if (ISSET(reg, I40E_QRX_ENA_QENA_STAT_MASK)) 3012 return 0; 3013 3014 delaymsec(10); 3015 } 3016 3017 return ETIMEDOUT; 3018 } 3019 3020 static int 3021 ixl_rxr_disabled(struct ixl_softc *sc, struct ixl_rx_ring *rxr) 3022 { 3023 bus_size_t ena = I40E_QRX_ENA(rxr->rxr_qid); 3024 uint32_t reg; 3025 int i; 3026 3027 KASSERT(mutex_owned(&rxr->rxr_lock)); 3028 3029 for (i = 0; i < 10; i++) { 3030 reg = ixl_rd(sc, ena); 3031 if (ISSET(reg, I40E_QRX_ENA_QENA_STAT_MASK) == 0) 3032 return 0; 3033 3034 delaymsec(10); 3035 } 3036 3037 return ETIMEDOUT; 3038 } 3039 3040 static void 3041 ixl_rxr_config(struct ixl_softc *sc, struct ixl_rx_ring *rxr) 3042 { 3043 struct ixl_hmc_rxq rxq; 3044 struct ifnet *ifp = &sc->sc_ec.ec_if; 3045 uint16_t rxmax; 3046 void *hmc; 3047 3048 memset(&rxq, 0, sizeof(rxq)); 3049 rxmax = ifp->if_mtu + IXL_MTU_ETHERLEN; 3050 3051 rxq.head = htole16(rxr->rxr_cons); 3052 rxq.base = htole64(IXL_DMA_DVA(&rxr->rxr_mem) / IXL_HMC_RXQ_BASE_UNIT); 3053 rxq.qlen = htole16(sc->sc_rx_ring_ndescs); 3054 rxq.dbuff = htole16(IXL_MCLBYTES / IXL_HMC_RXQ_DBUFF_UNIT); 3055 rxq.hbuff = 0; 3056 rxq.dtype = IXL_HMC_RXQ_DTYPE_NOSPLIT; 3057 rxq.dsize = IXL_HMC_RXQ_DSIZE_32; 3058 rxq.crcstrip = 1; 3059 rxq.l2sel = 1; 3060 rxq.showiv = 1; 3061 rxq.rxmax = htole16(rxmax); 3062 rxq.tphrdesc_ena = 0; 3063 rxq.tphwdesc_ena = 0; 3064 rxq.tphdata_ena = 0; 3065 rxq.tphhead_ena = 0; 3066 rxq.lrxqthresh = 0; 3067 rxq.prefena = 1; 3068 3069 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_RX, rxr->rxr_qid); 3070 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_RX)); 3071 ixl_hmc_pack(hmc, &rxq, ixl_hmc_pack_rxq, 3072 __arraycount(ixl_hmc_pack_rxq)); 3073 } 3074 3075 static void 3076 ixl_rxr_unconfig(struct ixl_softc *sc, struct ixl_rx_ring *rxr) 3077 { 3078 void *hmc; 3079 3080 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_RX, rxr->rxr_qid); 3081 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_RX)); 3082 rxr->rxr_cons = rxr->rxr_prod = 0; 3083 } 3084 3085 static void 3086 ixl_rxr_free(struct ixl_softc *sc, struct ixl_rx_ring *rxr) 3087 { 3088 struct ixl_rx_map *maps, *rxm; 3089 unsigned int i; 3090 3091 maps = rxr->rxr_maps; 3092 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) { 3093 rxm = &maps[i]; 3094 3095 bus_dmamap_destroy(sc->sc_dmat, rxm->rxm_map); 3096 } 3097 3098 ixl_dmamem_free(sc, &rxr->rxr_mem); 3099 mutex_destroy(&rxr->rxr_lock); 3100 kmem_free(maps, sizeof(maps[0]) * sc->sc_rx_ring_ndescs); 3101 kmem_free(rxr, sizeof(*rxr)); 3102 } 3103 3104 static inline void 3105 ixl_rx_csum(struct mbuf *m, uint64_t qword) 3106 { 3107 int flags_mask; 3108 3109 if (!ISSET(qword, IXL_RX_DESC_L3L4P)) { 3110 /* No L3 or L4 checksum was calculated */ 3111 return; 3112 } 3113 3114 switch (__SHIFTOUT(qword, IXL_RX_DESC_PTYPE_MASK)) { 3115 case IXL_RX_DESC_PTYPE_IPV4FRAG: 3116 case IXL_RX_DESC_PTYPE_IPV4: 3117 case IXL_RX_DESC_PTYPE_SCTPV4: 3118 case IXL_RX_DESC_PTYPE_ICMPV4: 3119 flags_mask = M_CSUM_IPv4 | M_CSUM_IPv4_BAD; 3120 break; 3121 case IXL_RX_DESC_PTYPE_TCPV4: 3122 flags_mask = M_CSUM_IPv4 | M_CSUM_IPv4_BAD; 3123 flags_mask |= M_CSUM_TCPv4 | M_CSUM_TCP_UDP_BAD; 3124 break; 3125 case IXL_RX_DESC_PTYPE_UDPV4: 3126 flags_mask = M_CSUM_IPv4 | M_CSUM_IPv4_BAD; 3127 flags_mask |= M_CSUM_UDPv4 | M_CSUM_TCP_UDP_BAD; 3128 break; 3129 case IXL_RX_DESC_PTYPE_TCPV6: 3130 flags_mask = M_CSUM_TCPv6 | M_CSUM_TCP_UDP_BAD; 3131 break; 3132 case IXL_RX_DESC_PTYPE_UDPV6: 3133 flags_mask = M_CSUM_UDPv6 | M_CSUM_TCP_UDP_BAD; 3134 break; 3135 default: 3136 flags_mask = 0; 3137 } 3138 3139 m->m_pkthdr.csum_flags |= (flags_mask & (M_CSUM_IPv4 | 3140 M_CSUM_TCPv4 | M_CSUM_TCPv6 | M_CSUM_UDPv4 | M_CSUM_UDPv6)); 3141 3142 if (ISSET(qword, IXL_RX_DESC_IPE)) { 3143 m->m_pkthdr.csum_flags |= (flags_mask & M_CSUM_IPv4_BAD); 3144 } 3145 3146 if (ISSET(qword, IXL_RX_DESC_L4E)) { 3147 m->m_pkthdr.csum_flags |= (flags_mask & M_CSUM_TCP_UDP_BAD); 3148 } 3149 } 3150 3151 static int 3152 ixl_rxeof(struct ixl_softc *sc, struct ixl_rx_ring *rxr, u_int rxlimit) 3153 { 3154 struct ifnet *ifp = &sc->sc_ec.ec_if; 3155 struct ixl_rx_wb_desc_32 *ring, *rxd; 3156 struct ixl_rx_map *rxm; 3157 bus_dmamap_t map; 3158 unsigned int cons, prod; 3159 struct mbuf *m; 3160 uint64_t word, word0; 3161 unsigned int len; 3162 unsigned int mask; 3163 int done = 0, more = 0; 3164 3165 KASSERT(mutex_owned(&rxr->rxr_lock)); 3166 3167 if (!ISSET(ifp->if_flags, IFF_RUNNING)) 3168 return 0; 3169 3170 prod = rxr->rxr_prod; 3171 cons = rxr->rxr_cons; 3172 3173 if (cons == prod) 3174 return 0; 3175 3176 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem), 3177 0, IXL_DMA_LEN(&rxr->rxr_mem), 3178 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 3179 3180 ring = IXL_DMA_KVA(&rxr->rxr_mem); 3181 mask = sc->sc_rx_ring_ndescs - 1; 3182 3183 net_stat_ref_t nsr = IF_STAT_GETREF(ifp); 3184 3185 do { 3186 if (rxlimit-- <= 0) { 3187 more = 1; 3188 break; 3189 } 3190 3191 rxd = &ring[cons]; 3192 3193 word = le64toh(rxd->qword1); 3194 3195 if (!ISSET(word, IXL_RX_DESC_DD)) 3196 break; 3197 3198 rxm = &rxr->rxr_maps[cons]; 3199 3200 map = rxm->rxm_map; 3201 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 3202 BUS_DMASYNC_POSTREAD); 3203 bus_dmamap_unload(sc->sc_dmat, map); 3204 3205 m = rxm->rxm_m; 3206 rxm->rxm_m = NULL; 3207 3208 KASSERT(m != NULL); 3209 3210 len = (word & IXL_RX_DESC_PLEN_MASK) >> IXL_RX_DESC_PLEN_SHIFT; 3211 m->m_len = len; 3212 m->m_pkthdr.len = 0; 3213 3214 m->m_next = NULL; 3215 *rxr->rxr_m_tail = m; 3216 rxr->rxr_m_tail = &m->m_next; 3217 3218 m = rxr->rxr_m_head; 3219 m->m_pkthdr.len += len; 3220 3221 if (ISSET(word, IXL_RX_DESC_EOP)) { 3222 word0 = le64toh(rxd->qword0); 3223 3224 if (ISSET(word, IXL_RX_DESC_L2TAG1P)) { 3225 vlan_set_tag(m, 3226 __SHIFTOUT(word0, IXL_RX_DESC_L2TAG1_MASK)); 3227 } 3228 3229 if ((ifp->if_capenable & IXL_IFCAP_RXCSUM) != 0) 3230 ixl_rx_csum(m, word); 3231 3232 if (!ISSET(word, 3233 IXL_RX_DESC_RXE | IXL_RX_DESC_OVERSIZE)) { 3234 m_set_rcvif(m, ifp); 3235 if_statinc_ref(nsr, if_ipackets); 3236 if_statadd_ref(nsr, if_ibytes, 3237 m->m_pkthdr.len); 3238 if_percpuq_enqueue(sc->sc_ipq, m); 3239 } else { 3240 if_statinc_ref(nsr, if_ierrors); 3241 m_freem(m); 3242 } 3243 3244 rxr->rxr_m_head = NULL; 3245 rxr->rxr_m_tail = &rxr->rxr_m_head; 3246 } 3247 3248 cons++; 3249 cons &= mask; 3250 3251 done = 1; 3252 } while (cons != prod); 3253 3254 if (done) { 3255 rxr->rxr_cons = cons; 3256 if (ixl_rxfill(sc, rxr) == -1) 3257 if_statinc_ref(nsr, if_iqdrops); 3258 } 3259 3260 IF_STAT_PUTREF(ifp); 3261 3262 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem), 3263 0, IXL_DMA_LEN(&rxr->rxr_mem), 3264 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 3265 3266 return more; 3267 } 3268 3269 static int 3270 ixl_rxfill(struct ixl_softc *sc, struct ixl_rx_ring *rxr) 3271 { 3272 struct ixl_rx_rd_desc_32 *ring, *rxd; 3273 struct ixl_rx_map *rxm; 3274 bus_dmamap_t map; 3275 struct mbuf *m; 3276 unsigned int prod; 3277 unsigned int slots; 3278 unsigned int mask; 3279 int post = 0, error = 0; 3280 3281 KASSERT(mutex_owned(&rxr->rxr_lock)); 3282 3283 prod = rxr->rxr_prod; 3284 slots = ixl_rxr_unrefreshed(rxr->rxr_prod, rxr->rxr_cons, 3285 sc->sc_rx_ring_ndescs); 3286 3287 ring = IXL_DMA_KVA(&rxr->rxr_mem); 3288 mask = sc->sc_rx_ring_ndescs - 1; 3289 3290 if (__predict_false(slots <= 0)) 3291 return -1; 3292 3293 do { 3294 rxm = &rxr->rxr_maps[prod]; 3295 3296 MGETHDR(m, M_DONTWAIT, MT_DATA); 3297 if (m == NULL) { 3298 rxr->rxr_mgethdr_failed.ev_count++; 3299 error = -1; 3300 break; 3301 } 3302 3303 MCLGET(m, M_DONTWAIT); 3304 if (!ISSET(m->m_flags, M_EXT)) { 3305 rxr->rxr_mgetcl_failed.ev_count++; 3306 error = -1; 3307 m_freem(m); 3308 break; 3309 } 3310 3311 m->m_len = m->m_pkthdr.len = MCLBYTES; 3312 m_adj(m, ETHER_ALIGN); 3313 3314 map = rxm->rxm_map; 3315 3316 if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 3317 BUS_DMA_READ | BUS_DMA_NOWAIT) != 0) { 3318 rxr->rxr_mbuf_load_failed.ev_count++; 3319 error = -1; 3320 m_freem(m); 3321 break; 3322 } 3323 3324 rxm->rxm_m = m; 3325 3326 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 3327 BUS_DMASYNC_PREREAD); 3328 3329 rxd = &ring[prod]; 3330 3331 rxd->paddr = htole64(map->dm_segs[0].ds_addr); 3332 rxd->haddr = htole64(0); 3333 3334 prod++; 3335 prod &= mask; 3336 3337 post = 1; 3338 3339 } while (--slots); 3340 3341 if (post) { 3342 rxr->rxr_prod = prod; 3343 ixl_wr(sc, rxr->rxr_tail, prod); 3344 } 3345 3346 return error; 3347 } 3348 3349 static inline int 3350 ixl_handle_queue_common(struct ixl_softc *sc, struct ixl_queue_pair *qp, 3351 u_int txlimit, struct evcnt *txevcnt, 3352 u_int rxlimit, struct evcnt *rxevcnt) 3353 { 3354 struct ixl_tx_ring *txr = qp->qp_txr; 3355 struct ixl_rx_ring *rxr = qp->qp_rxr; 3356 int txmore, rxmore; 3357 int rv; 3358 3359 mutex_enter(&txr->txr_lock); 3360 txevcnt->ev_count++; 3361 txmore = ixl_txeof(sc, txr, txlimit); 3362 mutex_exit(&txr->txr_lock); 3363 3364 mutex_enter(&rxr->rxr_lock); 3365 rxevcnt->ev_count++; 3366 rxmore = ixl_rxeof(sc, rxr, rxlimit); 3367 mutex_exit(&rxr->rxr_lock); 3368 3369 rv = txmore | (rxmore << 1); 3370 3371 return rv; 3372 } 3373 3374 static void 3375 ixl_sched_handle_queue(struct ixl_softc *sc, struct ixl_queue_pair *qp) 3376 { 3377 3378 if (qp->qp_workqueue) 3379 workqueue_enqueue(sc->sc_workq_txrx, &qp->qp_work, NULL); 3380 else 3381 softint_schedule(qp->qp_si); 3382 } 3383 3384 static int 3385 ixl_intr(void *xsc) 3386 { 3387 struct ixl_softc *sc = xsc; 3388 struct ixl_tx_ring *txr; 3389 struct ixl_rx_ring *rxr; 3390 uint32_t icr, rxintr, txintr; 3391 int rv = 0; 3392 unsigned int i; 3393 3394 KASSERT(sc != NULL); 3395 3396 ixl_enable_other_intr(sc); 3397 icr = ixl_rd(sc, I40E_PFINT_ICR0); 3398 3399 if (ISSET(icr, I40E_PFINT_ICR0_ADMINQ_MASK)) { 3400 atomic_inc_64(&sc->sc_event_atq.ev_count); 3401 ixl_atq_done(sc); 3402 ixl_work_add(sc->sc_workq, &sc->sc_arq_task); 3403 rv = 1; 3404 } 3405 3406 if (ISSET(icr, I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK)) { 3407 atomic_inc_64(&sc->sc_event_link.ev_count); 3408 ixl_work_add(sc->sc_workq, &sc->sc_link_state_task); 3409 rv = 1; 3410 } 3411 3412 rxintr = icr & I40E_INTR_NOTX_RX_MASK; 3413 txintr = icr & I40E_INTR_NOTX_TX_MASK; 3414 3415 if (txintr || rxintr) { 3416 for (i = 0; i < sc->sc_nqueue_pairs; i++) { 3417 txr = sc->sc_qps[i].qp_txr; 3418 rxr = sc->sc_qps[i].qp_rxr; 3419 3420 ixl_handle_queue_common(sc, &sc->sc_qps[i], 3421 IXL_TXRX_PROCESS_UNLIMIT, &txr->txr_intr, 3422 IXL_TXRX_PROCESS_UNLIMIT, &rxr->rxr_intr); 3423 } 3424 rv = 1; 3425 } 3426 3427 return rv; 3428 } 3429 3430 static int 3431 ixl_queue_intr(void *xqp) 3432 { 3433 struct ixl_queue_pair *qp = xqp; 3434 struct ixl_tx_ring *txr = qp->qp_txr; 3435 struct ixl_rx_ring *rxr = qp->qp_rxr; 3436 struct ixl_softc *sc = qp->qp_sc; 3437 u_int txlimit, rxlimit; 3438 int more; 3439 3440 txlimit = sc->sc_tx_intr_process_limit; 3441 rxlimit = sc->sc_rx_intr_process_limit; 3442 qp->qp_workqueue = sc->sc_txrx_workqueue; 3443 3444 more = ixl_handle_queue_common(sc, qp, 3445 txlimit, &txr->txr_intr, rxlimit, &rxr->rxr_intr); 3446 3447 if (more != 0) { 3448 ixl_sched_handle_queue(sc, qp); 3449 } else { 3450 /* for ALTQ */ 3451 if (txr->txr_qid == 0) 3452 if_schedule_deferred_start(&sc->sc_ec.ec_if); 3453 softint_schedule(txr->txr_si); 3454 3455 ixl_enable_queue_intr(sc, qp); 3456 } 3457 3458 return 1; 3459 } 3460 3461 static void 3462 ixl_handle_queue_wk(struct work *wk, void *xsc) 3463 { 3464 struct ixl_queue_pair *qp; 3465 3466 qp = container_of(wk, struct ixl_queue_pair, qp_work); 3467 ixl_handle_queue(qp); 3468 } 3469 3470 static void 3471 ixl_handle_queue(void *xqp) 3472 { 3473 struct ixl_queue_pair *qp = xqp; 3474 struct ixl_softc *sc = qp->qp_sc; 3475 struct ixl_tx_ring *txr = qp->qp_txr; 3476 struct ixl_rx_ring *rxr = qp->qp_rxr; 3477 u_int txlimit, rxlimit; 3478 int more; 3479 3480 txlimit = sc->sc_tx_process_limit; 3481 rxlimit = sc->sc_rx_process_limit; 3482 3483 more = ixl_handle_queue_common(sc, qp, 3484 txlimit, &txr->txr_defer, rxlimit, &rxr->rxr_defer); 3485 3486 if (more != 0) 3487 ixl_sched_handle_queue(sc, qp); 3488 else 3489 ixl_enable_queue_intr(sc, qp); 3490 } 3491 3492 static inline void 3493 ixl_print_hmc_error(struct ixl_softc *sc, uint32_t reg) 3494 { 3495 uint32_t hmc_idx, hmc_isvf; 3496 uint32_t hmc_errtype, hmc_objtype, hmc_data; 3497 3498 hmc_idx = reg & I40E_PFHMC_ERRORINFO_PMF_INDEX_MASK; 3499 hmc_idx = hmc_idx >> I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT; 3500 hmc_isvf = reg & I40E_PFHMC_ERRORINFO_PMF_ISVF_MASK; 3501 hmc_isvf = hmc_isvf >> I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT; 3502 hmc_errtype = reg & I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_MASK; 3503 hmc_errtype = hmc_errtype >> I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT; 3504 hmc_objtype = reg & I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_MASK; 3505 hmc_objtype = hmc_objtype >> I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT; 3506 hmc_data = ixl_rd(sc, I40E_PFHMC_ERRORDATA); 3507 3508 device_printf(sc->sc_dev, 3509 "HMC Error (idx=0x%x, isvf=0x%x, err=0x%x, obj=0x%x, data=0x%x)\n", 3510 hmc_idx, hmc_isvf, hmc_errtype, hmc_objtype, hmc_data); 3511 } 3512 3513 static int 3514 ixl_other_intr(void *xsc) 3515 { 3516 struct ixl_softc *sc = xsc; 3517 uint32_t icr, mask, reg; 3518 int rv; 3519 3520 icr = ixl_rd(sc, I40E_PFINT_ICR0); 3521 mask = ixl_rd(sc, I40E_PFINT_ICR0_ENA); 3522 3523 if (ISSET(icr, I40E_PFINT_ICR0_ADMINQ_MASK)) { 3524 atomic_inc_64(&sc->sc_event_atq.ev_count); 3525 ixl_atq_done(sc); 3526 ixl_work_add(sc->sc_workq, &sc->sc_arq_task); 3527 rv = 1; 3528 } 3529 3530 if (ISSET(icr, I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK)) { 3531 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG)) 3532 device_printf(sc->sc_dev, "link stat changed\n"); 3533 3534 atomic_inc_64(&sc->sc_event_link.ev_count); 3535 ixl_work_add(sc->sc_workq, &sc->sc_link_state_task); 3536 rv = 1; 3537 } 3538 3539 if (ISSET(icr, I40E_PFINT_ICR0_GRST_MASK)) { 3540 CLR(mask, I40E_PFINT_ICR0_ENA_GRST_MASK); 3541 reg = ixl_rd(sc, I40E_GLGEN_RSTAT); 3542 reg = reg & I40E_GLGEN_RSTAT_RESET_TYPE_MASK; 3543 reg = reg >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT; 3544 3545 device_printf(sc->sc_dev, "GRST: %s\n", 3546 reg == I40E_RESET_CORER ? "CORER" : 3547 reg == I40E_RESET_GLOBR ? "GLOBR" : 3548 reg == I40E_RESET_EMPR ? "EMPR" : 3549 "POR"); 3550 } 3551 3552 if (ISSET(icr, I40E_PFINT_ICR0_ECC_ERR_MASK)) 3553 atomic_inc_64(&sc->sc_event_ecc_err.ev_count); 3554 if (ISSET(icr, I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)) 3555 atomic_inc_64(&sc->sc_event_pci_exception.ev_count); 3556 if (ISSET(icr, I40E_PFINT_ICR0_PE_CRITERR_MASK)) 3557 atomic_inc_64(&sc->sc_event_crit_err.ev_count); 3558 3559 if (ISSET(icr, IXL_ICR0_CRIT_ERR_MASK)) { 3560 CLR(mask, IXL_ICR0_CRIT_ERR_MASK); 3561 device_printf(sc->sc_dev, "critical error\n"); 3562 } 3563 3564 if (ISSET(icr, I40E_PFINT_ICR0_HMC_ERR_MASK)) { 3565 reg = ixl_rd(sc, I40E_PFHMC_ERRORINFO); 3566 if (ISSET(reg, I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK)) 3567 ixl_print_hmc_error(sc, reg); 3568 ixl_wr(sc, I40E_PFHMC_ERRORINFO, 0); 3569 } 3570 3571 ixl_wr(sc, I40E_PFINT_ICR0_ENA, mask); 3572 ixl_flush(sc); 3573 ixl_enable_other_intr(sc); 3574 return rv; 3575 } 3576 3577 static void 3578 ixl_get_link_status_done(struct ixl_softc *sc, 3579 const struct ixl_aq_desc *iaq) 3580 { 3581 struct ixl_aq_desc iaq_buf; 3582 3583 memcpy(&iaq_buf, iaq, sizeof(iaq_buf)); 3584 3585 /* 3586 * The lock can be released here 3587 * because there is no post processing about ATQ 3588 */ 3589 mutex_exit(&sc->sc_atq_lock); 3590 ixl_link_state_update(sc, &iaq_buf); 3591 mutex_enter(&sc->sc_atq_lock); 3592 } 3593 3594 static void 3595 ixl_get_link_status(void *xsc) 3596 { 3597 struct ixl_softc *sc = xsc; 3598 struct ixl_aq_desc *iaq; 3599 struct ixl_aq_link_param *param; 3600 int error; 3601 3602 mutex_enter(&sc->sc_atq_lock); 3603 3604 iaq = &sc->sc_link_state_atq.iatq_desc; 3605 memset(iaq, 0, sizeof(*iaq)); 3606 iaq->iaq_opcode = htole16(IXL_AQ_OP_PHY_LINK_STATUS); 3607 param = (struct ixl_aq_link_param *)iaq->iaq_param; 3608 param->notify = IXL_AQ_LINK_NOTIFY; 3609 3610 error = ixl_atq_exec_locked(sc, &sc->sc_link_state_atq); 3611 ixl_atq_set(&sc->sc_link_state_atq, ixl_get_link_status_done); 3612 3613 if (error == 0) { 3614 ixl_get_link_status_done(sc, iaq); 3615 } 3616 3617 mutex_exit(&sc->sc_atq_lock); 3618 } 3619 3620 static void 3621 ixl_link_state_update(struct ixl_softc *sc, const struct ixl_aq_desc *iaq) 3622 { 3623 struct ifnet *ifp = &sc->sc_ec.ec_if; 3624 int link_state; 3625 3626 mutex_enter(&sc->sc_cfg_lock); 3627 link_state = ixl_set_link_status_locked(sc, iaq); 3628 mutex_exit(&sc->sc_cfg_lock); 3629 3630 if (ifp->if_link_state != link_state) 3631 if_link_state_change(ifp, link_state); 3632 3633 if (link_state != LINK_STATE_DOWN) { 3634 kpreempt_disable(); 3635 if_schedule_deferred_start(ifp); 3636 kpreempt_enable(); 3637 } 3638 } 3639 3640 static void 3641 ixl_aq_dump(const struct ixl_softc *sc, const struct ixl_aq_desc *iaq, 3642 const char *msg) 3643 { 3644 char buf[512]; 3645 size_t len; 3646 3647 len = sizeof(buf); 3648 buf[--len] = '\0'; 3649 3650 device_printf(sc->sc_dev, "%s\n", msg); 3651 snprintb(buf, len, IXL_AQ_FLAGS_FMT, le16toh(iaq->iaq_flags)); 3652 device_printf(sc->sc_dev, "flags %s opcode %04x\n", 3653 buf, le16toh(iaq->iaq_opcode)); 3654 device_printf(sc->sc_dev, "datalen %u retval %u\n", 3655 le16toh(iaq->iaq_datalen), le16toh(iaq->iaq_retval)); 3656 device_printf(sc->sc_dev, "cookie %016" PRIx64 "\n", iaq->iaq_cookie); 3657 device_printf(sc->sc_dev, "%08x %08x %08x %08x\n", 3658 le32toh(iaq->iaq_param[0]), le32toh(iaq->iaq_param[1]), 3659 le32toh(iaq->iaq_param[2]), le32toh(iaq->iaq_param[3])); 3660 } 3661 3662 static void 3663 ixl_arq(void *xsc) 3664 { 3665 struct ixl_softc *sc = xsc; 3666 struct ixl_aq_desc *arq, *iaq; 3667 struct ixl_aq_buf *aqb; 3668 unsigned int cons = sc->sc_arq_cons; 3669 unsigned int prod; 3670 int done = 0; 3671 3672 prod = ixl_rd(sc, sc->sc_aq_regs->arq_head) & 3673 sc->sc_aq_regs->arq_head_mask; 3674 3675 if (cons == prod) 3676 goto done; 3677 3678 arq = IXL_DMA_KVA(&sc->sc_arq); 3679 3680 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq), 3681 0, IXL_DMA_LEN(&sc->sc_arq), 3682 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 3683 3684 do { 3685 iaq = &arq[cons]; 3686 aqb = sc->sc_arq_live[cons]; 3687 3688 KASSERT(aqb != NULL); 3689 3690 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, IXL_AQ_BUFLEN, 3691 BUS_DMASYNC_POSTREAD); 3692 3693 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG)) 3694 ixl_aq_dump(sc, iaq, "arq event"); 3695 3696 switch (iaq->iaq_opcode) { 3697 case htole16(IXL_AQ_OP_PHY_LINK_STATUS): 3698 ixl_link_state_update(sc, iaq); 3699 break; 3700 } 3701 3702 memset(iaq, 0, sizeof(*iaq)); 3703 sc->sc_arq_live[cons] = NULL; 3704 SIMPLEQ_INSERT_TAIL(&sc->sc_arq_idle, aqb, aqb_entry); 3705 3706 cons++; 3707 cons &= IXL_AQ_MASK; 3708 3709 done = 1; 3710 } while (cons != prod); 3711 3712 if (done) { 3713 sc->sc_arq_cons = cons; 3714 ixl_arq_fill(sc); 3715 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq), 3716 0, IXL_DMA_LEN(&sc->sc_arq), 3717 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 3718 } 3719 3720 done: 3721 ixl_enable_other_intr(sc); 3722 } 3723 3724 static void 3725 ixl_atq_set(struct ixl_atq *iatq, 3726 void (*fn)(struct ixl_softc *, const struct ixl_aq_desc *)) 3727 { 3728 3729 iatq->iatq_fn = fn; 3730 } 3731 3732 static int 3733 ixl_atq_post_locked(struct ixl_softc *sc, struct ixl_atq *iatq) 3734 { 3735 struct ixl_aq_desc *atq, *slot; 3736 unsigned int prod, cons, prod_next; 3737 3738 /* assert locked */ 3739 KASSERT(mutex_owned(&sc->sc_atq_lock)); 3740 3741 atq = IXL_DMA_KVA(&sc->sc_atq); 3742 prod = sc->sc_atq_prod; 3743 cons = sc->sc_atq_cons; 3744 prod_next = (prod +1) & IXL_AQ_MASK; 3745 3746 if (cons == prod_next) 3747 return ENOMEM; 3748 3749 slot = &atq[prod]; 3750 3751 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 3752 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTWRITE); 3753 3754 KASSERT(iatq->iatq_fn != NULL); 3755 *slot = iatq->iatq_desc; 3756 slot->iaq_cookie = (uint64_t)((intptr_t)iatq); 3757 3758 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG)) 3759 ixl_aq_dump(sc, slot, "atq command"); 3760 3761 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 3762 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREWRITE); 3763 3764 sc->sc_atq_prod = prod_next; 3765 ixl_wr(sc, sc->sc_aq_regs->atq_tail, sc->sc_atq_prod); 3766 3767 return 0; 3768 } 3769 3770 static void 3771 ixl_atq_done_locked(struct ixl_softc *sc) 3772 { 3773 struct ixl_aq_desc *atq, *slot; 3774 struct ixl_atq *iatq; 3775 unsigned int cons; 3776 unsigned int prod; 3777 3778 KASSERT(mutex_owned(&sc->sc_atq_lock)); 3779 3780 prod = sc->sc_atq_prod; 3781 cons = sc->sc_atq_cons; 3782 3783 if (prod == cons) 3784 return; 3785 3786 atq = IXL_DMA_KVA(&sc->sc_atq); 3787 3788 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 3789 0, IXL_DMA_LEN(&sc->sc_atq), 3790 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 3791 3792 do { 3793 slot = &atq[cons]; 3794 if (!ISSET(slot->iaq_flags, htole16(IXL_AQ_DD))) 3795 break; 3796 3797 iatq = (struct ixl_atq *)((intptr_t)slot->iaq_cookie); 3798 iatq->iatq_desc = *slot; 3799 3800 memset(slot, 0, sizeof(*slot)); 3801 3802 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG)) 3803 ixl_aq_dump(sc, &iatq->iatq_desc, "atq response"); 3804 3805 (*iatq->iatq_fn)(sc, &iatq->iatq_desc); 3806 3807 cons++; 3808 cons &= IXL_AQ_MASK; 3809 } while (cons != prod); 3810 3811 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 3812 0, IXL_DMA_LEN(&sc->sc_atq), 3813 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 3814 3815 sc->sc_atq_cons = cons; 3816 } 3817 3818 static void 3819 ixl_atq_done(struct ixl_softc *sc) 3820 { 3821 3822 mutex_enter(&sc->sc_atq_lock); 3823 ixl_atq_done_locked(sc); 3824 mutex_exit(&sc->sc_atq_lock); 3825 } 3826 3827 static void 3828 ixl_wakeup(struct ixl_softc *sc, const struct ixl_aq_desc *iaq) 3829 { 3830 3831 KASSERT(mutex_owned(&sc->sc_atq_lock)); 3832 3833 cv_signal(&sc->sc_atq_cv); 3834 } 3835 3836 static int 3837 ixl_atq_exec(struct ixl_softc *sc, struct ixl_atq *iatq) 3838 { 3839 int error; 3840 3841 mutex_enter(&sc->sc_atq_lock); 3842 error = ixl_atq_exec_locked(sc, iatq); 3843 mutex_exit(&sc->sc_atq_lock); 3844 3845 return error; 3846 } 3847 3848 static int 3849 ixl_atq_exec_locked(struct ixl_softc *sc, struct ixl_atq *iatq) 3850 { 3851 int error; 3852 3853 KASSERT(mutex_owned(&sc->sc_atq_lock)); 3854 KASSERT(iatq->iatq_desc.iaq_cookie == 0); 3855 3856 ixl_atq_set(iatq, ixl_wakeup); 3857 3858 error = ixl_atq_post_locked(sc, iatq); 3859 if (error) 3860 return error; 3861 3862 error = cv_timedwait(&sc->sc_atq_cv, &sc->sc_atq_lock, 3863 IXL_ATQ_EXEC_TIMEOUT); 3864 3865 return error; 3866 } 3867 3868 static int 3869 ixl_atq_poll(struct ixl_softc *sc, struct ixl_aq_desc *iaq, unsigned int tm) 3870 { 3871 struct ixl_aq_desc *atq, *slot; 3872 unsigned int prod; 3873 unsigned int t = 0; 3874 3875 mutex_enter(&sc->sc_atq_lock); 3876 3877 atq = IXL_DMA_KVA(&sc->sc_atq); 3878 prod = sc->sc_atq_prod; 3879 slot = atq + prod; 3880 3881 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 3882 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTWRITE); 3883 3884 *slot = *iaq; 3885 slot->iaq_flags |= htole16(IXL_AQ_SI); 3886 3887 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 3888 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREWRITE); 3889 3890 prod++; 3891 prod &= IXL_AQ_MASK; 3892 sc->sc_atq_prod = prod; 3893 ixl_wr(sc, sc->sc_aq_regs->atq_tail, prod); 3894 3895 while (ixl_rd(sc, sc->sc_aq_regs->atq_head) != prod) { 3896 delaymsec(1); 3897 3898 if (t++ > tm) { 3899 mutex_exit(&sc->sc_atq_lock); 3900 return ETIMEDOUT; 3901 } 3902 } 3903 3904 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 3905 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTREAD); 3906 *iaq = *slot; 3907 memset(slot, 0, sizeof(*slot)); 3908 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 3909 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREREAD); 3910 3911 sc->sc_atq_cons = prod; 3912 3913 mutex_exit(&sc->sc_atq_lock); 3914 3915 return 0; 3916 } 3917 3918 static int 3919 ixl_get_version(struct ixl_softc *sc) 3920 { 3921 struct ixl_aq_desc iaq; 3922 uint32_t fwbuild, fwver, apiver; 3923 uint16_t api_maj_ver, api_min_ver; 3924 3925 memset(&iaq, 0, sizeof(iaq)); 3926 iaq.iaq_opcode = htole16(IXL_AQ_OP_GET_VERSION); 3927 3928 iaq.iaq_retval = le16toh(23); 3929 3930 if (ixl_atq_poll(sc, &iaq, 2000) != 0) 3931 return ETIMEDOUT; 3932 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) 3933 return EIO; 3934 3935 fwbuild = le32toh(iaq.iaq_param[1]); 3936 fwver = le32toh(iaq.iaq_param[2]); 3937 apiver = le32toh(iaq.iaq_param[3]); 3938 3939 api_maj_ver = (uint16_t)apiver; 3940 api_min_ver = (uint16_t)(apiver >> 16); 3941 3942 aprint_normal(", FW %hu.%hu.%05u API %hu.%hu", (uint16_t)fwver, 3943 (uint16_t)(fwver >> 16), fwbuild, api_maj_ver, api_min_ver); 3944 3945 if (sc->sc_mac_type == I40E_MAC_X722) { 3946 SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMLOCK | 3947 IXL_SC_AQ_FLAG_NVMREAD); 3948 SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RXCTL); 3949 SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RSS); 3950 } 3951 3952 #define IXL_API_VER(maj, min) (((uint32_t)(maj) << 16) | (min)) 3953 if (IXL_API_VER(api_maj_ver, api_min_ver) >= IXL_API_VER(1, 5)) { 3954 SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RXCTL); 3955 SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMLOCK); 3956 } 3957 #undef IXL_API_VER 3958 3959 return 0; 3960 } 3961 3962 static int 3963 ixl_get_nvm_version(struct ixl_softc *sc) 3964 { 3965 uint16_t nvmver, cfg_ptr, eetrack_hi, eetrack_lo, oem_hi, oem_lo; 3966 uint32_t eetrack, oem; 3967 uint16_t nvm_maj_ver, nvm_min_ver, oem_build; 3968 uint8_t oem_ver, oem_patch; 3969 3970 nvmver = cfg_ptr = eetrack_hi = eetrack_lo = oem_hi = oem_lo = 0; 3971 ixl_rd16_nvm(sc, I40E_SR_NVM_DEV_STARTER_VERSION, &nvmver); 3972 ixl_rd16_nvm(sc, I40E_SR_NVM_EETRACK_HI, &eetrack_hi); 3973 ixl_rd16_nvm(sc, I40E_SR_NVM_EETRACK_LO, &eetrack_lo); 3974 ixl_rd16_nvm(sc, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr); 3975 ixl_rd16_nvm(sc, cfg_ptr + I40E_NVM_OEM_VER_OFF, &oem_hi); 3976 ixl_rd16_nvm(sc, cfg_ptr + I40E_NVM_OEM_VER_OFF + 1, &oem_lo); 3977 3978 nvm_maj_ver = (uint16_t)__SHIFTOUT(nvmver, IXL_NVM_VERSION_HI_MASK); 3979 nvm_min_ver = (uint16_t)__SHIFTOUT(nvmver, IXL_NVM_VERSION_LO_MASK); 3980 eetrack = ((uint32_t)eetrack_hi << 16) | eetrack_lo; 3981 oem = ((uint32_t)oem_hi << 16) | oem_lo; 3982 oem_ver = __SHIFTOUT(oem, IXL_NVM_OEMVERSION_MASK); 3983 oem_build = __SHIFTOUT(oem, IXL_NVM_OEMBUILD_MASK); 3984 oem_patch = __SHIFTOUT(oem, IXL_NVM_OEMPATCH_MASK); 3985 3986 aprint_normal(" nvm %x.%02x etid %08x oem %d.%d.%d", 3987 nvm_maj_ver, nvm_min_ver, eetrack, 3988 oem_ver, oem_build, oem_patch); 3989 3990 return 0; 3991 } 3992 3993 static int 3994 ixl_pxe_clear(struct ixl_softc *sc) 3995 { 3996 struct ixl_aq_desc iaq; 3997 int rv; 3998 3999 memset(&iaq, 0, sizeof(iaq)); 4000 iaq.iaq_opcode = htole16(IXL_AQ_OP_CLEAR_PXE_MODE); 4001 iaq.iaq_param[0] = htole32(0x2); 4002 4003 rv = ixl_atq_poll(sc, &iaq, 250); 4004 4005 ixl_wr(sc, I40E_GLLAN_RCTL_0, 0x1); 4006 4007 if (rv != 0) 4008 return ETIMEDOUT; 4009 4010 switch (iaq.iaq_retval) { 4011 case htole16(IXL_AQ_RC_OK): 4012 case htole16(IXL_AQ_RC_EEXIST): 4013 break; 4014 default: 4015 return EIO; 4016 } 4017 4018 return 0; 4019 } 4020 4021 static int 4022 ixl_lldp_shut(struct ixl_softc *sc) 4023 { 4024 struct ixl_aq_desc iaq; 4025 4026 memset(&iaq, 0, sizeof(iaq)); 4027 iaq.iaq_opcode = htole16(IXL_AQ_OP_LLDP_STOP_AGENT); 4028 iaq.iaq_param[0] = htole32(IXL_LLDP_SHUTDOWN); 4029 4030 if (ixl_atq_poll(sc, &iaq, 250) != 0) { 4031 aprint_error_dev(sc->sc_dev, "STOP LLDP AGENT timeout\n"); 4032 return -1; 4033 } 4034 4035 switch (iaq.iaq_retval) { 4036 case htole16(IXL_AQ_RC_EMODE): 4037 case htole16(IXL_AQ_RC_EPERM): 4038 /* ignore silently */ 4039 default: 4040 break; 4041 } 4042 4043 return 0; 4044 } 4045 4046 static void 4047 ixl_parse_hw_capability(struct ixl_softc *sc, struct ixl_aq_capability *cap) 4048 { 4049 uint16_t id; 4050 uint32_t number, logical_id; 4051 4052 id = le16toh(cap->cap_id); 4053 number = le32toh(cap->number); 4054 logical_id = le32toh(cap->logical_id); 4055 4056 switch (id) { 4057 case IXL_AQ_CAP_RSS: 4058 sc->sc_rss_table_size = number; 4059 sc->sc_rss_table_entry_width = logical_id; 4060 break; 4061 case IXL_AQ_CAP_RXQ: 4062 case IXL_AQ_CAP_TXQ: 4063 sc->sc_nqueue_pairs_device = MIN(number, 4064 sc->sc_nqueue_pairs_device); 4065 break; 4066 } 4067 } 4068 4069 static int 4070 ixl_get_hw_capabilities(struct ixl_softc *sc) 4071 { 4072 struct ixl_dmamem idm; 4073 struct ixl_aq_desc iaq; 4074 struct ixl_aq_capability *caps; 4075 size_t i, ncaps; 4076 bus_size_t caps_size; 4077 uint16_t status; 4078 int rv; 4079 4080 caps_size = sizeof(caps[0]) * 40; 4081 memset(&iaq, 0, sizeof(iaq)); 4082 iaq.iaq_opcode = htole16(IXL_AQ_OP_LIST_FUNC_CAP); 4083 4084 do { 4085 if (ixl_dmamem_alloc(sc, &idm, caps_size, 0) != 0) { 4086 return -1; 4087 } 4088 4089 iaq.iaq_flags = htole16(IXL_AQ_BUF | 4090 (caps_size > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0)); 4091 iaq.iaq_datalen = htole16(caps_size); 4092 ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm)); 4093 4094 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, 4095 IXL_DMA_LEN(&idm), BUS_DMASYNC_PREREAD); 4096 4097 rv = ixl_atq_poll(sc, &iaq, 250); 4098 4099 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, 4100 IXL_DMA_LEN(&idm), BUS_DMASYNC_POSTREAD); 4101 4102 if (rv != 0) { 4103 aprint_error(", HW capabilities timeout\n"); 4104 goto done; 4105 } 4106 4107 status = le16toh(iaq.iaq_retval); 4108 4109 if (status == IXL_AQ_RC_ENOMEM) { 4110 caps_size = le16toh(iaq.iaq_datalen); 4111 ixl_dmamem_free(sc, &idm); 4112 } 4113 } while (status == IXL_AQ_RC_ENOMEM); 4114 4115 if (status != IXL_AQ_RC_OK) { 4116 aprint_error(", HW capabilities error\n"); 4117 goto done; 4118 } 4119 4120 caps = IXL_DMA_KVA(&idm); 4121 ncaps = le16toh(iaq.iaq_param[1]); 4122 4123 for (i = 0; i < ncaps; i++) { 4124 ixl_parse_hw_capability(sc, &caps[i]); 4125 } 4126 4127 done: 4128 ixl_dmamem_free(sc, &idm); 4129 return rv; 4130 } 4131 4132 static int 4133 ixl_get_mac(struct ixl_softc *sc) 4134 { 4135 struct ixl_dmamem idm; 4136 struct ixl_aq_desc iaq; 4137 struct ixl_aq_mac_addresses *addrs; 4138 int rv; 4139 4140 if (ixl_dmamem_alloc(sc, &idm, sizeof(*addrs), 0) != 0) { 4141 aprint_error(", unable to allocate mac addresses\n"); 4142 return -1; 4143 } 4144 4145 memset(&iaq, 0, sizeof(iaq)); 4146 iaq.iaq_flags = htole16(IXL_AQ_BUF); 4147 iaq.iaq_opcode = htole16(IXL_AQ_OP_MAC_ADDRESS_READ); 4148 iaq.iaq_datalen = htole16(sizeof(*addrs)); 4149 ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm)); 4150 4151 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm), 4152 BUS_DMASYNC_PREREAD); 4153 4154 rv = ixl_atq_poll(sc, &iaq, 250); 4155 4156 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm), 4157 BUS_DMASYNC_POSTREAD); 4158 4159 if (rv != 0) { 4160 aprint_error(", MAC ADDRESS READ timeout\n"); 4161 rv = -1; 4162 goto done; 4163 } 4164 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) { 4165 aprint_error(", MAC ADDRESS READ error\n"); 4166 rv = -1; 4167 goto done; 4168 } 4169 4170 addrs = IXL_DMA_KVA(&idm); 4171 if (!ISSET(iaq.iaq_param[0], htole32(IXL_AQ_MAC_PORT_VALID))) { 4172 printf(", port address is not valid\n"); 4173 goto done; 4174 } 4175 4176 memcpy(sc->sc_enaddr, addrs->port, ETHER_ADDR_LEN); 4177 rv = 0; 4178 4179 done: 4180 ixl_dmamem_free(sc, &idm); 4181 return rv; 4182 } 4183 4184 static int 4185 ixl_get_switch_config(struct ixl_softc *sc) 4186 { 4187 struct ixl_dmamem idm; 4188 struct ixl_aq_desc iaq; 4189 struct ixl_aq_switch_config *hdr; 4190 struct ixl_aq_switch_config_element *elms, *elm; 4191 unsigned int nelm, i; 4192 int rv; 4193 4194 if (ixl_dmamem_alloc(sc, &idm, IXL_AQ_BUFLEN, 0) != 0) { 4195 aprint_error_dev(sc->sc_dev, 4196 "unable to allocate switch config buffer\n"); 4197 return -1; 4198 } 4199 4200 memset(&iaq, 0, sizeof(iaq)); 4201 iaq.iaq_flags = htole16(IXL_AQ_BUF | 4202 (IXL_AQ_BUFLEN > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0)); 4203 iaq.iaq_opcode = htole16(IXL_AQ_OP_SWITCH_GET_CONFIG); 4204 iaq.iaq_datalen = htole16(IXL_AQ_BUFLEN); 4205 ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm)); 4206 4207 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm), 4208 BUS_DMASYNC_PREREAD); 4209 4210 rv = ixl_atq_poll(sc, &iaq, 250); 4211 4212 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm), 4213 BUS_DMASYNC_POSTREAD); 4214 4215 if (rv != 0) { 4216 aprint_error_dev(sc->sc_dev, "GET SWITCH CONFIG timeout\n"); 4217 rv = -1; 4218 goto done; 4219 } 4220 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) { 4221 aprint_error_dev(sc->sc_dev, "GET SWITCH CONFIG error\n"); 4222 rv = -1; 4223 goto done; 4224 } 4225 4226 hdr = IXL_DMA_KVA(&idm); 4227 elms = (struct ixl_aq_switch_config_element *)(hdr + 1); 4228 4229 nelm = le16toh(hdr->num_reported); 4230 if (nelm < 1) { 4231 aprint_error_dev(sc->sc_dev, "no switch config available\n"); 4232 rv = -1; 4233 goto done; 4234 } 4235 4236 for (i = 0; i < nelm; i++) { 4237 elm = &elms[i]; 4238 4239 aprint_debug_dev(sc->sc_dev, 4240 "type %x revision %u seid %04x\n", 4241 elm->type, elm->revision, le16toh(elm->seid)); 4242 aprint_debug_dev(sc->sc_dev, 4243 "uplink %04x downlink %04x\n", 4244 le16toh(elm->uplink_seid), 4245 le16toh(elm->downlink_seid)); 4246 aprint_debug_dev(sc->sc_dev, 4247 "conntype %x scheduler %04x extra %04x\n", 4248 elm->connection_type, 4249 le16toh(elm->scheduler_id), 4250 le16toh(elm->element_info)); 4251 } 4252 4253 elm = &elms[0]; 4254 4255 sc->sc_uplink_seid = elm->uplink_seid; 4256 sc->sc_downlink_seid = elm->downlink_seid; 4257 sc->sc_seid = elm->seid; 4258 4259 if ((sc->sc_uplink_seid == htole16(0)) != 4260 (sc->sc_downlink_seid == htole16(0))) { 4261 aprint_error_dev(sc->sc_dev, "SEIDs are misconfigured\n"); 4262 rv = -1; 4263 goto done; 4264 } 4265 4266 done: 4267 ixl_dmamem_free(sc, &idm); 4268 return rv; 4269 } 4270 4271 static int 4272 ixl_phy_mask_ints(struct ixl_softc *sc) 4273 { 4274 struct ixl_aq_desc iaq; 4275 4276 memset(&iaq, 0, sizeof(iaq)); 4277 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_SET_EVENT_MASK); 4278 iaq.iaq_param[2] = htole32(IXL_AQ_PHY_EV_MASK & 4279 ~(IXL_AQ_PHY_EV_LINK_UPDOWN | IXL_AQ_PHY_EV_MODULE_QUAL_FAIL | 4280 IXL_AQ_PHY_EV_MEDIA_NA)); 4281 4282 if (ixl_atq_poll(sc, &iaq, 250) != 0) { 4283 aprint_error_dev(sc->sc_dev, "SET PHY EVENT MASK timeout\n"); 4284 return -1; 4285 } 4286 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) { 4287 aprint_error_dev(sc->sc_dev, "SET PHY EVENT MASK error\n"); 4288 return -1; 4289 } 4290 4291 return 0; 4292 } 4293 4294 static int 4295 ixl_get_phy_abilities(struct ixl_softc *sc, struct ixl_dmamem *idm) 4296 { 4297 struct ixl_aq_desc iaq; 4298 int rv; 4299 4300 memset(&iaq, 0, sizeof(iaq)); 4301 iaq.iaq_flags = htole16(IXL_AQ_BUF | 4302 (IXL_DMA_LEN(idm) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0)); 4303 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_GET_ABILITIES); 4304 iaq.iaq_datalen = htole16(IXL_DMA_LEN(idm)); 4305 iaq.iaq_param[0] = htole32(IXL_AQ_PHY_REPORT_INIT); 4306 ixl_aq_dva(&iaq, IXL_DMA_DVA(idm)); 4307 4308 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm), 4309 BUS_DMASYNC_PREREAD); 4310 4311 rv = ixl_atq_poll(sc, &iaq, 250); 4312 4313 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm), 4314 BUS_DMASYNC_POSTREAD); 4315 4316 if (rv != 0) 4317 return -1; 4318 4319 return le16toh(iaq.iaq_retval); 4320 } 4321 4322 static int 4323 ixl_get_phy_info(struct ixl_softc *sc) 4324 { 4325 struct ixl_dmamem idm; 4326 struct ixl_aq_phy_abilities *phy; 4327 int rv; 4328 4329 if (ixl_dmamem_alloc(sc, &idm, IXL_AQ_BUFLEN, 0) != 0) { 4330 aprint_error_dev(sc->sc_dev, 4331 "unable to allocate phy abilities buffer\n"); 4332 return -1; 4333 } 4334 4335 rv = ixl_get_phy_abilities(sc, &idm); 4336 switch (rv) { 4337 case -1: 4338 aprint_error_dev(sc->sc_dev, "GET PHY ABILITIES timeout\n"); 4339 goto done; 4340 case IXL_AQ_RC_OK: 4341 break; 4342 case IXL_AQ_RC_EIO: 4343 aprint_error_dev(sc->sc_dev,"unable to query phy types\n"); 4344 goto done; 4345 default: 4346 aprint_error_dev(sc->sc_dev, 4347 "GET PHY ABILITIIES error %u\n", rv); 4348 goto done; 4349 } 4350 4351 phy = IXL_DMA_KVA(&idm); 4352 4353 sc->sc_phy_types = le32toh(phy->phy_type); 4354 sc->sc_phy_types |= (uint64_t)le32toh(phy->phy_type_ext) << 32; 4355 4356 sc->sc_phy_abilities = phy->abilities; 4357 sc->sc_phy_linkspeed = phy->link_speed; 4358 sc->sc_phy_fec_cfg = phy->fec_cfg_curr_mod_ext_info & 4359 (IXL_AQ_ENABLE_FEC_KR | IXL_AQ_ENABLE_FEC_RS | 4360 IXL_AQ_REQUEST_FEC_KR | IXL_AQ_REQUEST_FEC_RS); 4361 sc->sc_eee_cap = phy->eee_capability; 4362 sc->sc_eeer_val = phy->eeer_val; 4363 sc->sc_d3_lpan = phy->d3_lpan; 4364 4365 rv = 0; 4366 4367 done: 4368 ixl_dmamem_free(sc, &idm); 4369 return rv; 4370 } 4371 4372 static int 4373 ixl_set_phy_config(struct ixl_softc *sc, 4374 uint8_t link_speed, uint8_t abilities, bool polling) 4375 { 4376 struct ixl_aq_phy_param *param; 4377 struct ixl_atq iatq; 4378 struct ixl_aq_desc *iaq; 4379 int error; 4380 4381 memset(&iatq, 0, sizeof(iatq)); 4382 4383 iaq = &iatq.iatq_desc; 4384 iaq->iaq_opcode = htole16(IXL_AQ_OP_PHY_SET_CONFIG); 4385 param = (struct ixl_aq_phy_param *)&iaq->iaq_param; 4386 param->phy_types = htole32((uint32_t)sc->sc_phy_types); 4387 param->phy_type_ext = (uint8_t)(sc->sc_phy_types >> 32); 4388 param->link_speed = link_speed; 4389 param->abilities = abilities | IXL_AQ_PHY_ABILITY_AUTO_LINK; 4390 param->fec_cfg = sc->sc_phy_fec_cfg; 4391 param->eee_capability = sc->sc_eee_cap; 4392 param->eeer_val = sc->sc_eeer_val; 4393 param->d3_lpan = sc->sc_d3_lpan; 4394 4395 if (polling) 4396 error = ixl_atq_poll(sc, iaq, 250); 4397 else 4398 error = ixl_atq_exec(sc, &iatq); 4399 4400 if (error != 0) 4401 return error; 4402 4403 switch (le16toh(iaq->iaq_retval)) { 4404 case IXL_AQ_RC_OK: 4405 break; 4406 case IXL_AQ_RC_EPERM: 4407 return EPERM; 4408 default: 4409 return EIO; 4410 } 4411 4412 return 0; 4413 } 4414 4415 static int 4416 ixl_set_phy_autoselect(struct ixl_softc *sc) 4417 { 4418 uint8_t link_speed, abilities; 4419 4420 link_speed = sc->sc_phy_linkspeed; 4421 abilities = IXL_PHY_ABILITY_LINKUP | IXL_PHY_ABILITY_AUTONEGO; 4422 4423 return ixl_set_phy_config(sc, link_speed, abilities, true); 4424 } 4425 4426 static int 4427 ixl_get_link_status_poll(struct ixl_softc *sc, int *l) 4428 { 4429 struct ixl_aq_desc iaq; 4430 struct ixl_aq_link_param *param; 4431 int link; 4432 4433 memset(&iaq, 0, sizeof(iaq)); 4434 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_LINK_STATUS); 4435 param = (struct ixl_aq_link_param *)iaq.iaq_param; 4436 param->notify = IXL_AQ_LINK_NOTIFY; 4437 4438 if (ixl_atq_poll(sc, &iaq, 250) != 0) { 4439 return ETIMEDOUT; 4440 } 4441 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) { 4442 return EIO; 4443 } 4444 4445 /* It is unneccessary to hold lock */ 4446 link = ixl_set_link_status_locked(sc, &iaq); 4447 4448 if (l != NULL) 4449 *l = link; 4450 4451 return 0; 4452 } 4453 4454 static int 4455 ixl_get_vsi(struct ixl_softc *sc) 4456 { 4457 struct ixl_dmamem *vsi = &sc->sc_scratch; 4458 struct ixl_aq_desc iaq; 4459 struct ixl_aq_vsi_param *param; 4460 struct ixl_aq_vsi_reply *reply; 4461 struct ixl_aq_vsi_data *data; 4462 int rv; 4463 4464 /* grumble, vsi info isn't "known" at compile time */ 4465 4466 memset(&iaq, 0, sizeof(iaq)); 4467 iaq.iaq_flags = htole16(IXL_AQ_BUF | 4468 (IXL_DMA_LEN(vsi) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0)); 4469 iaq.iaq_opcode = htole16(IXL_AQ_OP_GET_VSI_PARAMS); 4470 iaq.iaq_datalen = htole16(IXL_DMA_LEN(vsi)); 4471 ixl_aq_dva(&iaq, IXL_DMA_DVA(vsi)); 4472 4473 param = (struct ixl_aq_vsi_param *)iaq.iaq_param; 4474 param->uplink_seid = sc->sc_seid; 4475 4476 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi), 4477 BUS_DMASYNC_PREREAD); 4478 4479 rv = ixl_atq_poll(sc, &iaq, 250); 4480 4481 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi), 4482 BUS_DMASYNC_POSTREAD); 4483 4484 if (rv != 0) { 4485 return ETIMEDOUT; 4486 } 4487 4488 switch (le16toh(iaq.iaq_retval)) { 4489 case IXL_AQ_RC_OK: 4490 break; 4491 case IXL_AQ_RC_ENOENT: 4492 return ENOENT; 4493 case IXL_AQ_RC_EACCES: 4494 return EACCES; 4495 default: 4496 return EIO; 4497 } 4498 4499 reply = (struct ixl_aq_vsi_reply *)iaq.iaq_param; 4500 sc->sc_vsi_number = le16toh(reply->vsi_number); 4501 data = IXL_DMA_KVA(vsi); 4502 sc->sc_vsi_stat_counter_idx = le16toh(data->stat_counter_idx); 4503 4504 return 0; 4505 } 4506 4507 static int 4508 ixl_set_vsi(struct ixl_softc *sc) 4509 { 4510 struct ixl_dmamem *vsi = &sc->sc_scratch; 4511 struct ixl_aq_desc iaq; 4512 struct ixl_aq_vsi_param *param; 4513 struct ixl_aq_vsi_data *data = IXL_DMA_KVA(vsi); 4514 unsigned int qnum; 4515 uint16_t val; 4516 int rv; 4517 4518 qnum = sc->sc_nqueue_pairs - 1; 4519 4520 data->valid_sections = htole16(IXL_AQ_VSI_VALID_QUEUE_MAP | 4521 IXL_AQ_VSI_VALID_VLAN); 4522 4523 CLR(data->mapping_flags, htole16(IXL_AQ_VSI_QUE_MAP_MASK)); 4524 SET(data->mapping_flags, htole16(IXL_AQ_VSI_QUE_MAP_CONTIG)); 4525 data->queue_mapping[0] = htole16(0); 4526 data->tc_mapping[0] = htole16((0 << IXL_AQ_VSI_TC_Q_OFFSET_SHIFT) | 4527 (qnum << IXL_AQ_VSI_TC_Q_NUMBER_SHIFT)); 4528 4529 val = le16toh(data->port_vlan_flags); 4530 CLR(val, IXL_AQ_VSI_PVLAN_MODE_MASK | IXL_AQ_VSI_PVLAN_EMOD_MASK); 4531 SET(val, IXL_AQ_VSI_PVLAN_MODE_ALL); 4532 4533 if (ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWTAGGING)) { 4534 SET(val, IXL_AQ_VSI_PVLAN_EMOD_STR_BOTH); 4535 } else { 4536 SET(val, IXL_AQ_VSI_PVLAN_EMOD_NOTHING); 4537 } 4538 4539 data->port_vlan_flags = htole16(val); 4540 4541 /* grumble, vsi info isn't "known" at compile time */ 4542 4543 memset(&iaq, 0, sizeof(iaq)); 4544 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD | 4545 (IXL_DMA_LEN(vsi) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0)); 4546 iaq.iaq_opcode = htole16(IXL_AQ_OP_UPD_VSI_PARAMS); 4547 iaq.iaq_datalen = htole16(IXL_DMA_LEN(vsi)); 4548 ixl_aq_dva(&iaq, IXL_DMA_DVA(vsi)); 4549 4550 param = (struct ixl_aq_vsi_param *)iaq.iaq_param; 4551 param->uplink_seid = sc->sc_seid; 4552 4553 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi), 4554 BUS_DMASYNC_PREWRITE); 4555 4556 rv = ixl_atq_poll(sc, &iaq, 250); 4557 4558 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi), 4559 BUS_DMASYNC_POSTWRITE); 4560 4561 if (rv != 0) { 4562 return ETIMEDOUT; 4563 } 4564 4565 switch (le16toh(iaq.iaq_retval)) { 4566 case IXL_AQ_RC_OK: 4567 break; 4568 case IXL_AQ_RC_ENOENT: 4569 return ENOENT; 4570 case IXL_AQ_RC_EACCES: 4571 return EACCES; 4572 default: 4573 return EIO; 4574 } 4575 4576 return 0; 4577 } 4578 4579 static void 4580 ixl_set_filter_control(struct ixl_softc *sc) 4581 { 4582 uint32_t reg; 4583 4584 reg = ixl_rd_rx_csr(sc, I40E_PFQF_CTL_0); 4585 4586 CLR(reg, I40E_PFQF_CTL_0_HASHLUTSIZE_MASK); 4587 SET(reg, I40E_HASH_LUT_SIZE_128 << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT); 4588 4589 SET(reg, I40E_PFQF_CTL_0_FD_ENA_MASK); 4590 SET(reg, I40E_PFQF_CTL_0_ETYPE_ENA_MASK); 4591 SET(reg, I40E_PFQF_CTL_0_MACVLAN_ENA_MASK); 4592 4593 ixl_wr_rx_csr(sc, I40E_PFQF_CTL_0, reg); 4594 } 4595 4596 static inline void 4597 ixl_get_default_rss_key(uint32_t *buf, size_t len) 4598 { 4599 size_t cplen; 4600 uint8_t rss_seed[RSS_KEYSIZE]; 4601 4602 rss_getkey(rss_seed); 4603 memset(buf, 0, len); 4604 4605 cplen = MIN(len, sizeof(rss_seed)); 4606 memcpy(buf, rss_seed, cplen); 4607 } 4608 4609 static int 4610 ixl_set_rss_key(struct ixl_softc *sc, uint8_t *key, size_t keylen) 4611 { 4612 struct ixl_dmamem *idm; 4613 struct ixl_atq iatq; 4614 struct ixl_aq_desc *iaq; 4615 struct ixl_aq_rss_key_param *param; 4616 struct ixl_aq_rss_key_data *data; 4617 size_t len, datalen, stdlen, extlen; 4618 uint16_t vsi_id; 4619 int rv; 4620 4621 memset(&iatq, 0, sizeof(iatq)); 4622 iaq = &iatq.iatq_desc; 4623 idm = &sc->sc_aqbuf; 4624 4625 datalen = sizeof(*data); 4626 4627 /*XXX The buf size has to be less than the size of the register */ 4628 datalen = MIN(IXL_RSS_KEY_SIZE_REG * sizeof(uint32_t), datalen); 4629 4630 iaq->iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD | 4631 (datalen > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0)); 4632 iaq->iaq_opcode = htole16(IXL_AQ_OP_RSS_SET_KEY); 4633 iaq->iaq_datalen = htole16(datalen); 4634 4635 param = (struct ixl_aq_rss_key_param *)iaq->iaq_param; 4636 vsi_id = (sc->sc_vsi_number << IXL_AQ_RSSKEY_VSI_ID_SHIFT) | 4637 IXL_AQ_RSSKEY_VSI_VALID; 4638 param->vsi_id = htole16(vsi_id); 4639 4640 memset(IXL_DMA_KVA(idm), 0, IXL_DMA_LEN(idm)); 4641 data = IXL_DMA_KVA(idm); 4642 4643 len = MIN(keylen, datalen); 4644 stdlen = MIN(sizeof(data->standard_rss_key), len); 4645 memcpy(data->standard_rss_key, key, stdlen); 4646 len = (len > stdlen) ? (len - stdlen) : 0; 4647 4648 extlen = MIN(sizeof(data->extended_hash_key), len); 4649 extlen = (stdlen < keylen) ? 0 : keylen - stdlen; 4650 memcpy(data->extended_hash_key, key + stdlen, extlen); 4651 4652 ixl_aq_dva(iaq, IXL_DMA_DVA(idm)); 4653 4654 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, 4655 IXL_DMA_LEN(idm), BUS_DMASYNC_PREWRITE); 4656 4657 rv = ixl_atq_exec(sc, &iatq); 4658 4659 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, 4660 IXL_DMA_LEN(idm), BUS_DMASYNC_POSTWRITE); 4661 4662 if (rv != 0) { 4663 return ETIMEDOUT; 4664 } 4665 4666 if (iaq->iaq_retval != htole16(IXL_AQ_RC_OK)) { 4667 return EIO; 4668 } 4669 4670 return 0; 4671 } 4672 4673 static int 4674 ixl_set_rss_lut(struct ixl_softc *sc, uint8_t *lut, size_t lutlen) 4675 { 4676 struct ixl_dmamem *idm; 4677 struct ixl_atq iatq; 4678 struct ixl_aq_desc *iaq; 4679 struct ixl_aq_rss_lut_param *param; 4680 uint16_t vsi_id; 4681 uint8_t *data; 4682 size_t dmalen; 4683 int rv; 4684 4685 memset(&iatq, 0, sizeof(iatq)); 4686 iaq = &iatq.iatq_desc; 4687 idm = &sc->sc_aqbuf; 4688 4689 dmalen = MIN(lutlen, IXL_DMA_LEN(idm)); 4690 4691 iaq->iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD | 4692 (dmalen > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0)); 4693 iaq->iaq_opcode = htole16(IXL_AQ_OP_RSS_SET_LUT); 4694 iaq->iaq_datalen = htole16(dmalen); 4695 4696 memset(IXL_DMA_KVA(idm), 0, IXL_DMA_LEN(idm)); 4697 data = IXL_DMA_KVA(idm); 4698 memcpy(data, lut, dmalen); 4699 ixl_aq_dva(iaq, IXL_DMA_DVA(idm)); 4700 4701 param = (struct ixl_aq_rss_lut_param *)iaq->iaq_param; 4702 vsi_id = (sc->sc_vsi_number << IXL_AQ_RSSLUT_VSI_ID_SHIFT) | 4703 IXL_AQ_RSSLUT_VSI_VALID; 4704 param->vsi_id = htole16(vsi_id); 4705 param->flags = htole16(IXL_AQ_RSSLUT_TABLE_TYPE_PF << 4706 IXL_AQ_RSSLUT_TABLE_TYPE_SHIFT); 4707 4708 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, 4709 IXL_DMA_LEN(idm), BUS_DMASYNC_PREWRITE); 4710 4711 rv = ixl_atq_exec(sc, &iatq); 4712 4713 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, 4714 IXL_DMA_LEN(idm), BUS_DMASYNC_POSTWRITE); 4715 4716 if (rv != 0) { 4717 return ETIMEDOUT; 4718 } 4719 4720 if (iaq->iaq_retval != htole16(IXL_AQ_RC_OK)) { 4721 return EIO; 4722 } 4723 4724 return 0; 4725 } 4726 4727 static int 4728 ixl_register_rss_key(struct ixl_softc *sc) 4729 { 4730 uint32_t rss_seed[IXL_RSS_KEY_SIZE_REG]; 4731 int rv; 4732 size_t i; 4733 4734 ixl_get_default_rss_key(rss_seed, sizeof(rss_seed)); 4735 4736 if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RSS)) { 4737 rv = ixl_set_rss_key(sc, (uint8_t*)rss_seed, 4738 sizeof(rss_seed)); 4739 } else { 4740 rv = 0; 4741 for (i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) { 4742 ixl_wr_rx_csr(sc, I40E_PFQF_HKEY(i), rss_seed[i]); 4743 } 4744 } 4745 4746 return rv; 4747 } 4748 4749 static void 4750 ixl_register_rss_pctype(struct ixl_softc *sc) 4751 { 4752 uint64_t set_hena = 0; 4753 uint32_t hena0, hena1; 4754 4755 /* 4756 * We use TCP/UDP with IPv4/IPv6 by default. 4757 * Note: the device can not use just IP header in each 4758 * TCP/UDP packets for the RSS hash calculation. 4759 */ 4760 if (sc->sc_mac_type == I40E_MAC_X722) 4761 set_hena = IXL_RSS_HENA_DEFAULT_X722; 4762 else 4763 set_hena = IXL_RSS_HENA_DEFAULT_XL710; 4764 4765 hena0 = ixl_rd_rx_csr(sc, I40E_PFQF_HENA(0)); 4766 hena1 = ixl_rd_rx_csr(sc, I40E_PFQF_HENA(1)); 4767 4768 SET(hena0, set_hena); 4769 SET(hena1, set_hena >> 32); 4770 4771 ixl_wr_rx_csr(sc, I40E_PFQF_HENA(0), hena0); 4772 ixl_wr_rx_csr(sc, I40E_PFQF_HENA(1), hena1); 4773 } 4774 4775 static int 4776 ixl_register_rss_hlut(struct ixl_softc *sc) 4777 { 4778 unsigned int qid; 4779 uint8_t hlut_buf[512], lut_mask; 4780 uint32_t *hluts; 4781 size_t i, hluts_num; 4782 int rv; 4783 4784 lut_mask = (0x01 << sc->sc_rss_table_entry_width) - 1; 4785 4786 for (i = 0; i < sc->sc_rss_table_size; i++) { 4787 qid = i % sc->sc_nqueue_pairs; 4788 hlut_buf[i] = qid & lut_mask; 4789 } 4790 4791 if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RSS)) { 4792 rv = ixl_set_rss_lut(sc, hlut_buf, sizeof(hlut_buf)); 4793 } else { 4794 rv = 0; 4795 hluts = (uint32_t *)hlut_buf; 4796 hluts_num = sc->sc_rss_table_size >> 2; 4797 for (i = 0; i < hluts_num; i++) { 4798 ixl_wr(sc, I40E_PFQF_HLUT(i), hluts[i]); 4799 } 4800 ixl_flush(sc); 4801 } 4802 4803 return rv; 4804 } 4805 4806 static void 4807 ixl_config_rss(struct ixl_softc *sc) 4808 { 4809 4810 KASSERT(mutex_owned(&sc->sc_cfg_lock)); 4811 4812 ixl_register_rss_key(sc); 4813 ixl_register_rss_pctype(sc); 4814 ixl_register_rss_hlut(sc); 4815 } 4816 4817 static const struct ixl_phy_type * 4818 ixl_search_phy_type(uint8_t phy_type) 4819 { 4820 const struct ixl_phy_type *itype; 4821 uint64_t mask; 4822 unsigned int i; 4823 4824 if (phy_type >= 64) 4825 return NULL; 4826 4827 mask = 1ULL << phy_type; 4828 4829 for (i = 0; i < __arraycount(ixl_phy_type_map); i++) { 4830 itype = &ixl_phy_type_map[i]; 4831 4832 if (ISSET(itype->phy_type, mask)) 4833 return itype; 4834 } 4835 4836 return NULL; 4837 } 4838 4839 static uint64_t 4840 ixl_search_link_speed(uint8_t link_speed) 4841 { 4842 const struct ixl_speed_type *type; 4843 unsigned int i; 4844 4845 for (i = 0; i < __arraycount(ixl_speed_type_map); i++) { 4846 type = &ixl_speed_type_map[i]; 4847 4848 if (ISSET(type->dev_speed, link_speed)) 4849 return type->net_speed; 4850 } 4851 4852 return 0; 4853 } 4854 4855 static uint8_t 4856 ixl_search_baudrate(uint64_t baudrate) 4857 { 4858 const struct ixl_speed_type *type; 4859 unsigned int i; 4860 4861 for (i = 0; i < __arraycount(ixl_speed_type_map); i++) { 4862 type = &ixl_speed_type_map[i]; 4863 4864 if (type->net_speed == baudrate) { 4865 return type->dev_speed; 4866 } 4867 } 4868 4869 return 0; 4870 } 4871 4872 static int 4873 ixl_restart_an(struct ixl_softc *sc) 4874 { 4875 struct ixl_aq_desc iaq; 4876 4877 memset(&iaq, 0, sizeof(iaq)); 4878 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_RESTART_AN); 4879 iaq.iaq_param[0] = 4880 htole32(IXL_AQ_PHY_RESTART_AN | IXL_AQ_PHY_LINK_ENABLE); 4881 4882 if (ixl_atq_poll(sc, &iaq, 250) != 0) { 4883 aprint_error_dev(sc->sc_dev, "RESTART AN timeout\n"); 4884 return -1; 4885 } 4886 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) { 4887 aprint_error_dev(sc->sc_dev, "RESTART AN error\n"); 4888 return -1; 4889 } 4890 4891 return 0; 4892 } 4893 4894 static int 4895 ixl_add_macvlan(struct ixl_softc *sc, const uint8_t *macaddr, 4896 uint16_t vlan, uint16_t flags) 4897 { 4898 struct ixl_aq_desc iaq; 4899 struct ixl_aq_add_macvlan *param; 4900 struct ixl_aq_add_macvlan_elem *elem; 4901 4902 memset(&iaq, 0, sizeof(iaq)); 4903 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD); 4904 iaq.iaq_opcode = htole16(IXL_AQ_OP_ADD_MACVLAN); 4905 iaq.iaq_datalen = htole16(sizeof(*elem)); 4906 ixl_aq_dva(&iaq, IXL_DMA_DVA(&sc->sc_scratch)); 4907 4908 param = (struct ixl_aq_add_macvlan *)&iaq.iaq_param; 4909 param->num_addrs = htole16(1); 4910 param->seid0 = htole16(0x8000) | sc->sc_seid; 4911 param->seid1 = 0; 4912 param->seid2 = 0; 4913 4914 elem = IXL_DMA_KVA(&sc->sc_scratch); 4915 memset(elem, 0, sizeof(*elem)); 4916 memcpy(elem->macaddr, macaddr, ETHER_ADDR_LEN); 4917 elem->flags = htole16(IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH | flags); 4918 elem->vlan = htole16(vlan); 4919 4920 if (ixl_atq_poll(sc, &iaq, 250) != 0) { 4921 return IXL_AQ_RC_EINVAL; 4922 } 4923 4924 switch (le16toh(iaq.iaq_retval)) { 4925 case IXL_AQ_RC_OK: 4926 break; 4927 case IXL_AQ_RC_ENOSPC: 4928 return ENOSPC; 4929 case IXL_AQ_RC_ENOENT: 4930 return ENOENT; 4931 case IXL_AQ_RC_EACCES: 4932 return EACCES; 4933 case IXL_AQ_RC_EEXIST: 4934 return EEXIST; 4935 case IXL_AQ_RC_EINVAL: 4936 return EINVAL; 4937 default: 4938 return EIO; 4939 } 4940 4941 return 0; 4942 } 4943 4944 static int 4945 ixl_remove_macvlan(struct ixl_softc *sc, const uint8_t *macaddr, 4946 uint16_t vlan, uint16_t flags) 4947 { 4948 struct ixl_aq_desc iaq; 4949 struct ixl_aq_remove_macvlan *param; 4950 struct ixl_aq_remove_macvlan_elem *elem; 4951 4952 memset(&iaq, 0, sizeof(iaq)); 4953 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD); 4954 iaq.iaq_opcode = htole16(IXL_AQ_OP_REMOVE_MACVLAN); 4955 iaq.iaq_datalen = htole16(sizeof(*elem)); 4956 ixl_aq_dva(&iaq, IXL_DMA_DVA(&sc->sc_scratch)); 4957 4958 param = (struct ixl_aq_remove_macvlan *)&iaq.iaq_param; 4959 param->num_addrs = htole16(1); 4960 param->seid0 = htole16(0x8000) | sc->sc_seid; 4961 param->seid1 = 0; 4962 param->seid2 = 0; 4963 4964 elem = IXL_DMA_KVA(&sc->sc_scratch); 4965 memset(elem, 0, sizeof(*elem)); 4966 memcpy(elem->macaddr, macaddr, ETHER_ADDR_LEN); 4967 elem->flags = htole16(IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH | flags); 4968 elem->vlan = htole16(vlan); 4969 4970 if (ixl_atq_poll(sc, &iaq, 250) != 0) { 4971 return EINVAL; 4972 } 4973 4974 switch (le16toh(iaq.iaq_retval)) { 4975 case IXL_AQ_RC_OK: 4976 break; 4977 case IXL_AQ_RC_ENOENT: 4978 return ENOENT; 4979 case IXL_AQ_RC_EACCES: 4980 return EACCES; 4981 case IXL_AQ_RC_EINVAL: 4982 return EINVAL; 4983 default: 4984 return EIO; 4985 } 4986 4987 return 0; 4988 } 4989 4990 static int 4991 ixl_hmc(struct ixl_softc *sc) 4992 { 4993 struct { 4994 uint32_t count; 4995 uint32_t minsize; 4996 bus_size_t objsiz; 4997 bus_size_t setoff; 4998 bus_size_t setcnt; 4999 } regs[] = { 5000 { 5001 0, 5002 IXL_HMC_TXQ_MINSIZE, 5003 I40E_GLHMC_LANTXOBJSZ, 5004 I40E_GLHMC_LANTXBASE(sc->sc_pf_id), 5005 I40E_GLHMC_LANTXCNT(sc->sc_pf_id), 5006 }, 5007 { 5008 0, 5009 IXL_HMC_RXQ_MINSIZE, 5010 I40E_GLHMC_LANRXOBJSZ, 5011 I40E_GLHMC_LANRXBASE(sc->sc_pf_id), 5012 I40E_GLHMC_LANRXCNT(sc->sc_pf_id), 5013 }, 5014 { 5015 0, 5016 0, 5017 I40E_GLHMC_FCOEDDPOBJSZ, 5018 I40E_GLHMC_FCOEDDPBASE(sc->sc_pf_id), 5019 I40E_GLHMC_FCOEDDPCNT(sc->sc_pf_id), 5020 }, 5021 { 5022 0, 5023 0, 5024 I40E_GLHMC_FCOEFOBJSZ, 5025 I40E_GLHMC_FCOEFBASE(sc->sc_pf_id), 5026 I40E_GLHMC_FCOEFCNT(sc->sc_pf_id), 5027 }, 5028 }; 5029 struct ixl_hmc_entry *e; 5030 uint64_t size, dva; 5031 uint8_t *kva; 5032 uint64_t *sdpage; 5033 unsigned int i; 5034 int npages, tables; 5035 uint32_t reg; 5036 5037 CTASSERT(__arraycount(regs) <= __arraycount(sc->sc_hmc_entries)); 5038 5039 regs[IXL_HMC_LAN_TX].count = regs[IXL_HMC_LAN_RX].count = 5040 ixl_rd(sc, I40E_GLHMC_LANQMAX); 5041 5042 size = 0; 5043 for (i = 0; i < __arraycount(regs); i++) { 5044 e = &sc->sc_hmc_entries[i]; 5045 5046 e->hmc_count = regs[i].count; 5047 reg = ixl_rd(sc, regs[i].objsiz); 5048 e->hmc_size = IXL_BIT_ULL(0x3F & reg); 5049 e->hmc_base = size; 5050 5051 if ((e->hmc_size * 8) < regs[i].minsize) { 5052 aprint_error_dev(sc->sc_dev, 5053 "kernel hmc entry is too big\n"); 5054 return -1; 5055 } 5056 5057 size += roundup(e->hmc_size * e->hmc_count, IXL_HMC_ROUNDUP); 5058 } 5059 size = roundup(size, IXL_HMC_PGSIZE); 5060 npages = size / IXL_HMC_PGSIZE; 5061 5062 tables = roundup(size, IXL_HMC_L2SZ) / IXL_HMC_L2SZ; 5063 5064 if (ixl_dmamem_alloc(sc, &sc->sc_hmc_pd, size, IXL_HMC_PGSIZE) != 0) { 5065 aprint_error_dev(sc->sc_dev, 5066 "unable to allocate hmc pd memory\n"); 5067 return -1; 5068 } 5069 5070 if (ixl_dmamem_alloc(sc, &sc->sc_hmc_sd, tables * IXL_HMC_PGSIZE, 5071 IXL_HMC_PGSIZE) != 0) { 5072 aprint_error_dev(sc->sc_dev, 5073 "unable to allocate hmc sd memory\n"); 5074 ixl_dmamem_free(sc, &sc->sc_hmc_pd); 5075 return -1; 5076 } 5077 5078 kva = IXL_DMA_KVA(&sc->sc_hmc_pd); 5079 memset(kva, 0, IXL_DMA_LEN(&sc->sc_hmc_pd)); 5080 5081 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd), 5082 0, IXL_DMA_LEN(&sc->sc_hmc_pd), 5083 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 5084 5085 dva = IXL_DMA_DVA(&sc->sc_hmc_pd); 5086 sdpage = IXL_DMA_KVA(&sc->sc_hmc_sd); 5087 memset(sdpage, 0, IXL_DMA_LEN(&sc->sc_hmc_sd)); 5088 5089 for (i = 0; (int)i < npages; i++) { 5090 *sdpage = htole64(dva | IXL_HMC_PDVALID); 5091 sdpage++; 5092 5093 dva += IXL_HMC_PGSIZE; 5094 } 5095 5096 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_sd), 5097 0, IXL_DMA_LEN(&sc->sc_hmc_sd), 5098 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 5099 5100 dva = IXL_DMA_DVA(&sc->sc_hmc_sd); 5101 for (i = 0; (int)i < tables; i++) { 5102 uint32_t count; 5103 5104 KASSERT(npages >= 0); 5105 5106 count = ((unsigned int)npages > IXL_HMC_PGS) ? 5107 IXL_HMC_PGS : (unsigned int)npages; 5108 5109 ixl_wr(sc, I40E_PFHMC_SDDATAHIGH, dva >> 32); 5110 ixl_wr(sc, I40E_PFHMC_SDDATALOW, dva | 5111 (count << I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | 5112 (1U << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT)); 5113 ixl_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE); 5114 ixl_wr(sc, I40E_PFHMC_SDCMD, 5115 (1U << I40E_PFHMC_SDCMD_PMSDWR_SHIFT) | i); 5116 5117 npages -= IXL_HMC_PGS; 5118 dva += IXL_HMC_PGSIZE; 5119 } 5120 5121 for (i = 0; i < __arraycount(regs); i++) { 5122 e = &sc->sc_hmc_entries[i]; 5123 5124 ixl_wr(sc, regs[i].setoff, e->hmc_base / IXL_HMC_ROUNDUP); 5125 ixl_wr(sc, regs[i].setcnt, e->hmc_count); 5126 } 5127 5128 return 0; 5129 } 5130 5131 static void 5132 ixl_hmc_free(struct ixl_softc *sc) 5133 { 5134 ixl_dmamem_free(sc, &sc->sc_hmc_sd); 5135 ixl_dmamem_free(sc, &sc->sc_hmc_pd); 5136 } 5137 5138 static void 5139 ixl_hmc_pack(void *d, const void *s, const struct ixl_hmc_pack *packing, 5140 unsigned int npacking) 5141 { 5142 uint8_t *dst = d; 5143 const uint8_t *src = s; 5144 unsigned int i; 5145 5146 for (i = 0; i < npacking; i++) { 5147 const struct ixl_hmc_pack *pack = &packing[i]; 5148 unsigned int offset = pack->lsb / 8; 5149 unsigned int align = pack->lsb % 8; 5150 const uint8_t *in = src + pack->offset; 5151 uint8_t *out = dst + offset; 5152 int width = pack->width; 5153 unsigned int inbits = 0; 5154 5155 if (align) { 5156 inbits = (*in++) << align; 5157 *out++ |= (inbits & 0xff); 5158 inbits >>= 8; 5159 5160 width -= 8 - align; 5161 } 5162 5163 while (width >= 8) { 5164 inbits |= (*in++) << align; 5165 *out++ = (inbits & 0xff); 5166 inbits >>= 8; 5167 5168 width -= 8; 5169 } 5170 5171 if (width > 0) { 5172 inbits |= (*in) << align; 5173 *out |= (inbits & ((1 << width) - 1)); 5174 } 5175 } 5176 } 5177 5178 static struct ixl_aq_buf * 5179 ixl_aqb_alloc(struct ixl_softc *sc) 5180 { 5181 struct ixl_aq_buf *aqb; 5182 5183 aqb = kmem_alloc(sizeof(*aqb), KM_SLEEP); 5184 5185 aqb->aqb_size = IXL_AQ_BUFLEN; 5186 5187 if (bus_dmamap_create(sc->sc_dmat, aqb->aqb_size, 1, 5188 aqb->aqb_size, 0, 5189 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &aqb->aqb_map) != 0) 5190 goto free; 5191 if (bus_dmamem_alloc(sc->sc_dmat, aqb->aqb_size, 5192 IXL_AQ_ALIGN, 0, &aqb->aqb_seg, 1, &aqb->aqb_nsegs, 5193 BUS_DMA_WAITOK) != 0) 5194 goto destroy; 5195 if (bus_dmamem_map(sc->sc_dmat, &aqb->aqb_seg, aqb->aqb_nsegs, 5196 aqb->aqb_size, &aqb->aqb_data, BUS_DMA_WAITOK) != 0) 5197 goto dma_free; 5198 if (bus_dmamap_load(sc->sc_dmat, aqb->aqb_map, aqb->aqb_data, 5199 aqb->aqb_size, NULL, BUS_DMA_WAITOK) != 0) 5200 goto unmap; 5201 5202 return aqb; 5203 unmap: 5204 bus_dmamem_unmap(sc->sc_dmat, aqb->aqb_data, aqb->aqb_size); 5205 dma_free: 5206 bus_dmamem_free(sc->sc_dmat, &aqb->aqb_seg, 1); 5207 destroy: 5208 bus_dmamap_destroy(sc->sc_dmat, aqb->aqb_map); 5209 free: 5210 kmem_free(aqb, sizeof(*aqb)); 5211 5212 return NULL; 5213 } 5214 5215 static void 5216 ixl_aqb_free(struct ixl_softc *sc, struct ixl_aq_buf *aqb) 5217 { 5218 5219 bus_dmamap_unload(sc->sc_dmat, aqb->aqb_map); 5220 bus_dmamem_unmap(sc->sc_dmat, aqb->aqb_data, aqb->aqb_size); 5221 bus_dmamem_free(sc->sc_dmat, &aqb->aqb_seg, 1); 5222 bus_dmamap_destroy(sc->sc_dmat, aqb->aqb_map); 5223 kmem_free(aqb, sizeof(*aqb)); 5224 } 5225 5226 static int 5227 ixl_arq_fill(struct ixl_softc *sc) 5228 { 5229 struct ixl_aq_buf *aqb; 5230 struct ixl_aq_desc *arq, *iaq; 5231 unsigned int prod = sc->sc_arq_prod; 5232 unsigned int n; 5233 int post = 0; 5234 5235 n = ixl_rxr_unrefreshed(sc->sc_arq_prod, sc->sc_arq_cons, 5236 IXL_AQ_NUM); 5237 arq = IXL_DMA_KVA(&sc->sc_arq); 5238 5239 if (__predict_false(n <= 0)) 5240 return 0; 5241 5242 do { 5243 aqb = sc->sc_arq_live[prod]; 5244 iaq = &arq[prod]; 5245 5246 if (aqb == NULL) { 5247 aqb = SIMPLEQ_FIRST(&sc->sc_arq_idle); 5248 if (aqb != NULL) { 5249 SIMPLEQ_REMOVE(&sc->sc_arq_idle, aqb, 5250 ixl_aq_buf, aqb_entry); 5251 } else if ((aqb = ixl_aqb_alloc(sc)) == NULL) { 5252 break; 5253 } 5254 5255 sc->sc_arq_live[prod] = aqb; 5256 memset(aqb->aqb_data, 0, aqb->aqb_size); 5257 5258 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, 5259 aqb->aqb_size, BUS_DMASYNC_PREREAD); 5260 5261 iaq->iaq_flags = htole16(IXL_AQ_BUF | 5262 (IXL_AQ_BUFLEN > I40E_AQ_LARGE_BUF ? 5263 IXL_AQ_LB : 0)); 5264 iaq->iaq_opcode = 0; 5265 iaq->iaq_datalen = htole16(aqb->aqb_size); 5266 iaq->iaq_retval = 0; 5267 iaq->iaq_cookie = 0; 5268 iaq->iaq_param[0] = 0; 5269 iaq->iaq_param[1] = 0; 5270 ixl_aq_dva(iaq, aqb->aqb_map->dm_segs[0].ds_addr); 5271 } 5272 5273 prod++; 5274 prod &= IXL_AQ_MASK; 5275 5276 post = 1; 5277 5278 } while (--n); 5279 5280 if (post) { 5281 sc->sc_arq_prod = prod; 5282 ixl_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod); 5283 } 5284 5285 return post; 5286 } 5287 5288 static void 5289 ixl_arq_unfill(struct ixl_softc *sc) 5290 { 5291 struct ixl_aq_buf *aqb; 5292 unsigned int i; 5293 5294 for (i = 0; i < __arraycount(sc->sc_arq_live); i++) { 5295 aqb = sc->sc_arq_live[i]; 5296 if (aqb == NULL) 5297 continue; 5298 5299 sc->sc_arq_live[i] = NULL; 5300 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, aqb->aqb_size, 5301 BUS_DMASYNC_POSTREAD); 5302 ixl_aqb_free(sc, aqb); 5303 } 5304 5305 while ((aqb = SIMPLEQ_FIRST(&sc->sc_arq_idle)) != NULL) { 5306 SIMPLEQ_REMOVE(&sc->sc_arq_idle, aqb, 5307 ixl_aq_buf, aqb_entry); 5308 ixl_aqb_free(sc, aqb); 5309 } 5310 } 5311 5312 static void 5313 ixl_clear_hw(struct ixl_softc *sc) 5314 { 5315 uint32_t num_queues, base_queue; 5316 uint32_t num_pf_int; 5317 uint32_t num_vf_int; 5318 uint32_t num_vfs; 5319 uint32_t i, j; 5320 uint32_t val; 5321 uint32_t eol = 0x7ff; 5322 5323 /* get number of interrupts, queues, and vfs */ 5324 val = ixl_rd(sc, I40E_GLPCI_CNF2); 5325 num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >> 5326 I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT; 5327 num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >> 5328 I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT; 5329 5330 val = ixl_rd(sc, I40E_PFLAN_QALLOC); 5331 base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >> 5332 I40E_PFLAN_QALLOC_FIRSTQ_SHIFT; 5333 j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >> 5334 I40E_PFLAN_QALLOC_LASTQ_SHIFT; 5335 if (val & I40E_PFLAN_QALLOC_VALID_MASK) 5336 num_queues = (j - base_queue) + 1; 5337 else 5338 num_queues = 0; 5339 5340 val = ixl_rd(sc, I40E_PF_VT_PFALLOC); 5341 i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >> 5342 I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT; 5343 j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >> 5344 I40E_PF_VT_PFALLOC_LASTVF_SHIFT; 5345 if (val & I40E_PF_VT_PFALLOC_VALID_MASK) 5346 num_vfs = (j - i) + 1; 5347 else 5348 num_vfs = 0; 5349 5350 /* stop all the interrupts */ 5351 ixl_wr(sc, I40E_PFINT_ICR0_ENA, 0); 5352 ixl_flush(sc); 5353 val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT; 5354 for (i = 0; i < num_pf_int - 2; i++) 5355 ixl_wr(sc, I40E_PFINT_DYN_CTLN(i), val); 5356 ixl_flush(sc); 5357 5358 /* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */ 5359 val = eol << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT; 5360 ixl_wr(sc, I40E_PFINT_LNKLST0, val); 5361 for (i = 0; i < num_pf_int - 2; i++) 5362 ixl_wr(sc, I40E_PFINT_LNKLSTN(i), val); 5363 val = eol << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT; 5364 for (i = 0; i < num_vfs; i++) 5365 ixl_wr(sc, I40E_VPINT_LNKLST0(i), val); 5366 for (i = 0; i < num_vf_int - 2; i++) 5367 ixl_wr(sc, I40E_VPINT_LNKLSTN(i), val); 5368 5369 /* warn the HW of the coming Tx disables */ 5370 for (i = 0; i < num_queues; i++) { 5371 uint32_t abs_queue_idx = base_queue + i; 5372 uint32_t reg_block = 0; 5373 5374 if (abs_queue_idx >= 128) { 5375 reg_block = abs_queue_idx / 128; 5376 abs_queue_idx %= 128; 5377 } 5378 5379 val = ixl_rd(sc, I40E_GLLAN_TXPRE_QDIS(reg_block)); 5380 val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK; 5381 val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT); 5382 val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK; 5383 5384 ixl_wr(sc, I40E_GLLAN_TXPRE_QDIS(reg_block), val); 5385 } 5386 delaymsec(400); 5387 5388 /* stop all the queues */ 5389 for (i = 0; i < num_queues; i++) { 5390 ixl_wr(sc, I40E_QINT_TQCTL(i), 0); 5391 ixl_wr(sc, I40E_QTX_ENA(i), 0); 5392 ixl_wr(sc, I40E_QINT_RQCTL(i), 0); 5393 ixl_wr(sc, I40E_QRX_ENA(i), 0); 5394 } 5395 5396 /* short wait for all queue disables to settle */ 5397 delaymsec(50); 5398 } 5399 5400 static int 5401 ixl_pf_reset(struct ixl_softc *sc) 5402 { 5403 uint32_t cnt = 0; 5404 uint32_t cnt1 = 0; 5405 uint32_t reg = 0, reg0 = 0; 5406 uint32_t grst_del; 5407 5408 /* 5409 * Poll for Global Reset steady state in case of recent GRST. 5410 * The grst delay value is in 100ms units, and we'll wait a 5411 * couple counts longer to be sure we don't just miss the end. 5412 */ 5413 grst_del = ixl_rd(sc, I40E_GLGEN_RSTCTL); 5414 grst_del &= I40E_GLGEN_RSTCTL_GRSTDEL_MASK; 5415 grst_del >>= I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT; 5416 5417 grst_del = grst_del * 20; 5418 5419 for (cnt = 0; cnt < grst_del; cnt++) { 5420 reg = ixl_rd(sc, I40E_GLGEN_RSTAT); 5421 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK)) 5422 break; 5423 delaymsec(100); 5424 } 5425 if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) { 5426 aprint_error(", Global reset polling failed to complete\n"); 5427 return -1; 5428 } 5429 5430 /* Now Wait for the FW to be ready */ 5431 for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) { 5432 reg = ixl_rd(sc, I40E_GLNVM_ULD); 5433 reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | 5434 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK); 5435 if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | 5436 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK)) 5437 break; 5438 5439 delaymsec(10); 5440 } 5441 if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | 5442 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) { 5443 aprint_error(", wait for FW Reset complete timed out " 5444 "(I40E_GLNVM_ULD = 0x%x)\n", reg); 5445 return -1; 5446 } 5447 5448 /* 5449 * If there was a Global Reset in progress when we got here, 5450 * we don't need to do the PF Reset 5451 */ 5452 if (cnt == 0) { 5453 reg = ixl_rd(sc, I40E_PFGEN_CTRL); 5454 ixl_wr(sc, I40E_PFGEN_CTRL, reg | I40E_PFGEN_CTRL_PFSWR_MASK); 5455 for (cnt = 0; cnt < I40E_PF_RESET_WAIT_COUNT; cnt++) { 5456 reg = ixl_rd(sc, I40E_PFGEN_CTRL); 5457 if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK)) 5458 break; 5459 delaymsec(1); 5460 5461 reg0 = ixl_rd(sc, I40E_GLGEN_RSTAT); 5462 if (reg0 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) { 5463 aprint_error(", Core reset upcoming." 5464 " Skipping PF reset reset request\n"); 5465 return -1; 5466 } 5467 } 5468 if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) { 5469 aprint_error(", PF reset polling failed to complete" 5470 "(I40E_PFGEN_CTRL= 0x%x)\n", reg); 5471 return -1; 5472 } 5473 } 5474 5475 return 0; 5476 } 5477 5478 static int 5479 ixl_dmamem_alloc(struct ixl_softc *sc, struct ixl_dmamem *ixm, 5480 bus_size_t size, bus_size_t align) 5481 { 5482 ixm->ixm_size = size; 5483 5484 if (bus_dmamap_create(sc->sc_dmat, ixm->ixm_size, 1, 5485 ixm->ixm_size, 0, 5486 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, 5487 &ixm->ixm_map) != 0) 5488 return 1; 5489 if (bus_dmamem_alloc(sc->sc_dmat, ixm->ixm_size, 5490 align, 0, &ixm->ixm_seg, 1, &ixm->ixm_nsegs, 5491 BUS_DMA_WAITOK) != 0) 5492 goto destroy; 5493 if (bus_dmamem_map(sc->sc_dmat, &ixm->ixm_seg, ixm->ixm_nsegs, 5494 ixm->ixm_size, &ixm->ixm_kva, BUS_DMA_WAITOK) != 0) 5495 goto free; 5496 if (bus_dmamap_load(sc->sc_dmat, ixm->ixm_map, ixm->ixm_kva, 5497 ixm->ixm_size, NULL, BUS_DMA_WAITOK) != 0) 5498 goto unmap; 5499 5500 memset(ixm->ixm_kva, 0, ixm->ixm_size); 5501 5502 return 0; 5503 unmap: 5504 bus_dmamem_unmap(sc->sc_dmat, ixm->ixm_kva, ixm->ixm_size); 5505 free: 5506 bus_dmamem_free(sc->sc_dmat, &ixm->ixm_seg, 1); 5507 destroy: 5508 bus_dmamap_destroy(sc->sc_dmat, ixm->ixm_map); 5509 return 1; 5510 } 5511 5512 static void 5513 ixl_dmamem_free(struct ixl_softc *sc, struct ixl_dmamem *ixm) 5514 { 5515 bus_dmamap_unload(sc->sc_dmat, ixm->ixm_map); 5516 bus_dmamem_unmap(sc->sc_dmat, ixm->ixm_kva, ixm->ixm_size); 5517 bus_dmamem_free(sc->sc_dmat, &ixm->ixm_seg, 1); 5518 bus_dmamap_destroy(sc->sc_dmat, ixm->ixm_map); 5519 } 5520 5521 static int 5522 ixl_setup_vlan_hwfilter(struct ixl_softc *sc) 5523 { 5524 struct ethercom *ec = &sc->sc_ec; 5525 struct vlanid_list *vlanidp; 5526 int rv; 5527 5528 ixl_remove_macvlan(sc, sc->sc_enaddr, 0, 5529 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN); 5530 ixl_remove_macvlan(sc, etherbroadcastaddr, 0, 5531 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN); 5532 5533 rv = ixl_add_macvlan(sc, sc->sc_enaddr, 0, 5534 IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH); 5535 if (rv != 0) 5536 return rv; 5537 rv = ixl_add_macvlan(sc, etherbroadcastaddr, 0, 5538 IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH); 5539 if (rv != 0) 5540 return rv; 5541 5542 ETHER_LOCK(ec); 5543 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) { 5544 rv = ixl_add_macvlan(sc, sc->sc_enaddr, 5545 vlanidp->vid, IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH); 5546 if (rv != 0) 5547 break; 5548 rv = ixl_add_macvlan(sc, etherbroadcastaddr, 5549 vlanidp->vid, IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH); 5550 if (rv != 0) 5551 break; 5552 } 5553 ETHER_UNLOCK(ec); 5554 5555 return rv; 5556 } 5557 5558 static void 5559 ixl_teardown_vlan_hwfilter(struct ixl_softc *sc) 5560 { 5561 struct vlanid_list *vlanidp; 5562 struct ethercom *ec = &sc->sc_ec; 5563 5564 ixl_remove_macvlan(sc, sc->sc_enaddr, 0, 5565 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH); 5566 ixl_remove_macvlan(sc, etherbroadcastaddr, 0, 5567 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH); 5568 5569 ETHER_LOCK(ec); 5570 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) { 5571 ixl_remove_macvlan(sc, sc->sc_enaddr, 5572 vlanidp->vid, IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH); 5573 ixl_remove_macvlan(sc, etherbroadcastaddr, 5574 vlanidp->vid, IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH); 5575 } 5576 ETHER_UNLOCK(ec); 5577 5578 ixl_add_macvlan(sc, sc->sc_enaddr, 0, 5579 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN); 5580 ixl_add_macvlan(sc, etherbroadcastaddr, 0, 5581 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN); 5582 } 5583 5584 static int 5585 ixl_update_macvlan(struct ixl_softc *sc) 5586 { 5587 int rv = 0; 5588 int next_ec_capenable = sc->sc_ec.ec_capenable; 5589 5590 if (ISSET(next_ec_capenable, ETHERCAP_VLAN_HWFILTER)) { 5591 rv = ixl_setup_vlan_hwfilter(sc); 5592 if (rv != 0) 5593 ixl_teardown_vlan_hwfilter(sc); 5594 } else { 5595 ixl_teardown_vlan_hwfilter(sc); 5596 } 5597 5598 return rv; 5599 } 5600 5601 static int 5602 ixl_ifflags_cb(struct ethercom *ec) 5603 { 5604 struct ifnet *ifp = &ec->ec_if; 5605 struct ixl_softc *sc = ifp->if_softc; 5606 int rv, change; 5607 5608 mutex_enter(&sc->sc_cfg_lock); 5609 5610 change = ec->ec_capenable ^ sc->sc_cur_ec_capenable; 5611 5612 if (ISSET(change, ETHERCAP_VLAN_HWTAGGING)) { 5613 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWTAGGING; 5614 rv = ENETRESET; 5615 goto out; 5616 } 5617 5618 if (ISSET(change, ETHERCAP_VLAN_HWFILTER)) { 5619 rv = ixl_update_macvlan(sc); 5620 if (rv == 0) { 5621 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWFILTER; 5622 } else { 5623 CLR(ec->ec_capenable, ETHERCAP_VLAN_HWFILTER); 5624 CLR(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER); 5625 } 5626 } 5627 5628 rv = ixl_iff(sc); 5629 out: 5630 mutex_exit(&sc->sc_cfg_lock); 5631 5632 return rv; 5633 } 5634 5635 static int 5636 ixl_set_link_status_locked(struct ixl_softc *sc, const struct ixl_aq_desc *iaq) 5637 { 5638 const struct ixl_aq_link_status *status; 5639 const struct ixl_phy_type *itype; 5640 5641 uint64_t ifm_active = IFM_ETHER; 5642 uint64_t ifm_status = IFM_AVALID; 5643 int link_state = LINK_STATE_DOWN; 5644 uint64_t baudrate = 0; 5645 5646 status = (const struct ixl_aq_link_status *)iaq->iaq_param; 5647 if (!ISSET(status->link_info, IXL_AQ_LINK_UP_FUNCTION)) { 5648 ifm_active |= IFM_NONE; 5649 goto done; 5650 } 5651 5652 ifm_active |= IFM_FDX; 5653 ifm_status |= IFM_ACTIVE; 5654 link_state = LINK_STATE_UP; 5655 5656 itype = ixl_search_phy_type(status->phy_type); 5657 if (itype != NULL) 5658 ifm_active |= itype->ifm_type; 5659 5660 if (ISSET(status->an_info, IXL_AQ_LINK_PAUSE_TX)) 5661 ifm_active |= IFM_ETH_TXPAUSE; 5662 if (ISSET(status->an_info, IXL_AQ_LINK_PAUSE_RX)) 5663 ifm_active |= IFM_ETH_RXPAUSE; 5664 5665 baudrate = ixl_search_link_speed(status->link_speed); 5666 5667 done: 5668 /* sc->sc_cfg_lock held expect during attach */ 5669 sc->sc_media_active = ifm_active; 5670 sc->sc_media_status = ifm_status; 5671 5672 sc->sc_ec.ec_if.if_baudrate = baudrate; 5673 5674 return link_state; 5675 } 5676 5677 static int 5678 ixl_establish_intx(struct ixl_softc *sc) 5679 { 5680 pci_chipset_tag_t pc = sc->sc_pa.pa_pc; 5681 pci_intr_handle_t *intr; 5682 char xnamebuf[32]; 5683 char intrbuf[PCI_INTRSTR_LEN]; 5684 char const *intrstr; 5685 5686 KASSERT(sc->sc_nintrs == 1); 5687 5688 intr = &sc->sc_ihp[0]; 5689 5690 intrstr = pci_intr_string(pc, *intr, intrbuf, sizeof(intrbuf)); 5691 snprintf(xnamebuf, sizeof(xnamebuf), "%s:legacy", 5692 device_xname(sc->sc_dev)); 5693 5694 sc->sc_ihs[0] = pci_intr_establish_xname(pc, *intr, IPL_NET, ixl_intr, 5695 sc, xnamebuf); 5696 5697 if (sc->sc_ihs[0] == NULL) { 5698 aprint_error_dev(sc->sc_dev, 5699 "unable to establish interrupt at %s\n", intrstr); 5700 return -1; 5701 } 5702 5703 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr); 5704 return 0; 5705 } 5706 5707 static int 5708 ixl_establish_msix(struct ixl_softc *sc) 5709 { 5710 pci_chipset_tag_t pc = sc->sc_pa.pa_pc; 5711 kcpuset_t *affinity; 5712 unsigned int vector = 0; 5713 unsigned int i; 5714 int affinity_to, r; 5715 char xnamebuf[32]; 5716 char intrbuf[PCI_INTRSTR_LEN]; 5717 char const *intrstr; 5718 5719 kcpuset_create(&affinity, false); 5720 5721 /* the "other" intr is mapped to vector 0 */ 5722 vector = 0; 5723 intrstr = pci_intr_string(pc, sc->sc_ihp[vector], 5724 intrbuf, sizeof(intrbuf)); 5725 snprintf(xnamebuf, sizeof(xnamebuf), "%s others", 5726 device_xname(sc->sc_dev)); 5727 sc->sc_ihs[vector] = pci_intr_establish_xname(pc, 5728 sc->sc_ihp[vector], IPL_NET, ixl_other_intr, 5729 sc, xnamebuf); 5730 if (sc->sc_ihs[vector] == NULL) { 5731 aprint_error_dev(sc->sc_dev, 5732 "unable to establish interrupt at %s\n", intrstr); 5733 goto fail; 5734 } 5735 5736 aprint_normal_dev(sc->sc_dev, "other interrupt at %s", intrstr); 5737 5738 affinity_to = ncpu > (int)sc->sc_nqueue_pairs_max ? 1 : 0; 5739 affinity_to = (affinity_to + sc->sc_nqueue_pairs_max) % ncpu; 5740 5741 kcpuset_zero(affinity); 5742 kcpuset_set(affinity, affinity_to); 5743 r = interrupt_distribute(sc->sc_ihs[vector], affinity, NULL); 5744 if (r == 0) { 5745 aprint_normal(", affinity to %u", affinity_to); 5746 } 5747 aprint_normal("\n"); 5748 vector++; 5749 5750 sc->sc_msix_vector_queue = vector; 5751 affinity_to = ncpu > (int)sc->sc_nqueue_pairs_max ? 1 : 0; 5752 5753 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) { 5754 intrstr = pci_intr_string(pc, sc->sc_ihp[vector], 5755 intrbuf, sizeof(intrbuf)); 5756 snprintf(xnamebuf, sizeof(xnamebuf), "%s TXRX%d", 5757 device_xname(sc->sc_dev), i); 5758 5759 sc->sc_ihs[vector] = pci_intr_establish_xname(pc, 5760 sc->sc_ihp[vector], IPL_NET, ixl_queue_intr, 5761 (void *)&sc->sc_qps[i], xnamebuf); 5762 5763 if (sc->sc_ihs[vector] == NULL) { 5764 aprint_error_dev(sc->sc_dev, 5765 "unable to establish interrupt at %s\n", intrstr); 5766 goto fail; 5767 } 5768 5769 aprint_normal_dev(sc->sc_dev, 5770 "for TXRX%d interrupt at %s", i, intrstr); 5771 5772 kcpuset_zero(affinity); 5773 kcpuset_set(affinity, affinity_to); 5774 r = interrupt_distribute(sc->sc_ihs[vector], affinity, NULL); 5775 if (r == 0) { 5776 aprint_normal(", affinity to %u", affinity_to); 5777 affinity_to = (affinity_to + 1) % ncpu; 5778 } 5779 aprint_normal("\n"); 5780 vector++; 5781 } 5782 5783 kcpuset_destroy(affinity); 5784 5785 return 0; 5786 fail: 5787 for (i = 0; i < vector; i++) { 5788 pci_intr_disestablish(pc, sc->sc_ihs[i]); 5789 } 5790 5791 sc->sc_msix_vector_queue = 0; 5792 sc->sc_msix_vector_queue = 0; 5793 kcpuset_destroy(affinity); 5794 5795 return -1; 5796 } 5797 5798 static void 5799 ixl_config_queue_intr(struct ixl_softc *sc) 5800 { 5801 unsigned int i, vector; 5802 5803 if (sc->sc_intrtype == PCI_INTR_TYPE_MSIX) { 5804 vector = sc->sc_msix_vector_queue; 5805 } else { 5806 vector = I40E_INTR_NOTX_INTR; 5807 5808 ixl_wr(sc, I40E_PFINT_LNKLST0, 5809 (I40E_INTR_NOTX_QUEUE << 5810 I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) | 5811 (I40E_QUEUE_TYPE_RX << 5812 I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)); 5813 } 5814 5815 for (i = 0; i < sc->sc_nqueue_pairs; i++) { 5816 ixl_wr(sc, I40E_PFINT_DYN_CTLN(i), 0); 5817 ixl_flush(sc); 5818 5819 ixl_wr(sc, I40E_PFINT_LNKLSTN(i), 5820 ((i) << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) | 5821 (I40E_QUEUE_TYPE_RX << 5822 I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)); 5823 5824 ixl_wr(sc, I40E_QINT_RQCTL(i), 5825 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) | 5826 (I40E_ITR_INDEX_RX << 5827 I40E_QINT_RQCTL_ITR_INDX_SHIFT) | 5828 (I40E_INTR_NOTX_RX_QUEUE << 5829 I40E_QINT_RQCTL_MSIX0_INDX_SHIFT) | 5830 (i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) | 5831 (I40E_QUEUE_TYPE_TX << 5832 I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) | 5833 I40E_QINT_RQCTL_CAUSE_ENA_MASK); 5834 5835 ixl_wr(sc, I40E_QINT_TQCTL(i), 5836 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) | 5837 (I40E_ITR_INDEX_TX << 5838 I40E_QINT_TQCTL_ITR_INDX_SHIFT) | 5839 (I40E_INTR_NOTX_TX_QUEUE << 5840 I40E_QINT_TQCTL_MSIX0_INDX_SHIFT) | 5841 (I40E_QUEUE_TYPE_EOL << 5842 I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) | 5843 (I40E_QUEUE_TYPE_RX << 5844 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT) | 5845 I40E_QINT_TQCTL_CAUSE_ENA_MASK); 5846 5847 if (sc->sc_intrtype == PCI_INTR_TYPE_MSIX) { 5848 ixl_wr(sc, I40E_PFINT_ITRN(I40E_ITR_INDEX_RX, i), 5849 sc->sc_itr_rx); 5850 ixl_wr(sc, I40E_PFINT_ITRN(I40E_ITR_INDEX_TX, i), 5851 sc->sc_itr_tx); 5852 vector++; 5853 } 5854 } 5855 ixl_flush(sc); 5856 5857 ixl_wr(sc, I40E_PFINT_ITR0(I40E_ITR_INDEX_RX), sc->sc_itr_rx); 5858 ixl_wr(sc, I40E_PFINT_ITR0(I40E_ITR_INDEX_TX), sc->sc_itr_tx); 5859 ixl_flush(sc); 5860 } 5861 5862 static void 5863 ixl_config_other_intr(struct ixl_softc *sc) 5864 { 5865 ixl_wr(sc, I40E_PFINT_ICR0_ENA, 0); 5866 (void)ixl_rd(sc, I40E_PFINT_ICR0); 5867 5868 ixl_wr(sc, I40E_PFINT_ICR0_ENA, 5869 I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | 5870 I40E_PFINT_ICR0_ENA_GRST_MASK | 5871 I40E_PFINT_ICR0_ENA_ADMINQ_MASK | 5872 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | 5873 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | 5874 I40E_PFINT_ICR0_ENA_VFLR_MASK | 5875 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | 5876 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | 5877 I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK); 5878 5879 ixl_wr(sc, I40E_PFINT_LNKLST0, 0x7FF); 5880 ixl_wr(sc, I40E_PFINT_ITR0(I40E_ITR_INDEX_OTHER), 0); 5881 ixl_wr(sc, I40E_PFINT_STAT_CTL0, 5882 (I40E_ITR_INDEX_OTHER << 5883 I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)); 5884 ixl_flush(sc); 5885 } 5886 5887 static int 5888 ixl_setup_interrupts(struct ixl_softc *sc) 5889 { 5890 struct pci_attach_args *pa = &sc->sc_pa; 5891 pci_intr_type_t max_type, intr_type; 5892 int counts[PCI_INTR_TYPE_SIZE]; 5893 int error; 5894 unsigned int i; 5895 bool retry; 5896 5897 memset(counts, 0, sizeof(counts)); 5898 max_type = PCI_INTR_TYPE_MSIX; 5899 /* QPs + other interrupt */ 5900 counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueue_pairs_max + 1; 5901 counts[PCI_INTR_TYPE_INTX] = 1; 5902 5903 if (ixl_param_nomsix) 5904 counts[PCI_INTR_TYPE_MSIX] = 0; 5905 5906 do { 5907 retry = false; 5908 error = pci_intr_alloc(pa, &sc->sc_ihp, counts, max_type); 5909 if (error != 0) { 5910 aprint_error_dev(sc->sc_dev, 5911 "couldn't map interrupt\n"); 5912 break; 5913 } 5914 5915 intr_type = pci_intr_type(pa->pa_pc, sc->sc_ihp[0]); 5916 sc->sc_nintrs = counts[intr_type]; 5917 KASSERT(sc->sc_nintrs > 0); 5918 5919 for (i = 0; i < sc->sc_nintrs; i++) { 5920 pci_intr_setattr(pa->pa_pc, &sc->sc_ihp[i], 5921 PCI_INTR_MPSAFE, true); 5922 } 5923 5924 sc->sc_ihs = kmem_zalloc(sizeof(sc->sc_ihs[0]) * sc->sc_nintrs, 5925 KM_SLEEP); 5926 5927 if (intr_type == PCI_INTR_TYPE_MSIX) { 5928 error = ixl_establish_msix(sc); 5929 if (error) { 5930 counts[PCI_INTR_TYPE_MSIX] = 0; 5931 retry = true; 5932 } 5933 } else if (intr_type == PCI_INTR_TYPE_INTX) { 5934 error = ixl_establish_intx(sc); 5935 } else { 5936 error = -1; 5937 } 5938 5939 if (error) { 5940 kmem_free(sc->sc_ihs, 5941 sizeof(sc->sc_ihs[0]) * sc->sc_nintrs); 5942 pci_intr_release(pa->pa_pc, sc->sc_ihp, sc->sc_nintrs); 5943 } else { 5944 sc->sc_intrtype = intr_type; 5945 } 5946 } while (retry); 5947 5948 return error; 5949 } 5950 5951 static void 5952 ixl_teardown_interrupts(struct ixl_softc *sc) 5953 { 5954 struct pci_attach_args *pa = &sc->sc_pa; 5955 unsigned int i; 5956 5957 for (i = 0; i < sc->sc_nintrs; i++) { 5958 pci_intr_disestablish(pa->pa_pc, sc->sc_ihs[i]); 5959 } 5960 5961 pci_intr_release(pa->pa_pc, sc->sc_ihp, sc->sc_nintrs); 5962 5963 kmem_free(sc->sc_ihs, sizeof(sc->sc_ihs[0]) * sc->sc_nintrs); 5964 sc->sc_ihs = NULL; 5965 sc->sc_nintrs = 0; 5966 } 5967 5968 static int 5969 ixl_setup_stats(struct ixl_softc *sc) 5970 { 5971 struct ixl_queue_pair *qp; 5972 struct ixl_tx_ring *txr; 5973 struct ixl_rx_ring *rxr; 5974 struct ixl_stats_counters *isc; 5975 unsigned int i; 5976 5977 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) { 5978 qp = &sc->sc_qps[i]; 5979 txr = qp->qp_txr; 5980 rxr = qp->qp_rxr; 5981 5982 evcnt_attach_dynamic(&txr->txr_defragged, EVCNT_TYPE_MISC, 5983 NULL, qp->qp_name, "m_defrag successed"); 5984 evcnt_attach_dynamic(&txr->txr_defrag_failed, EVCNT_TYPE_MISC, 5985 NULL, qp->qp_name, "m_defrag_failed"); 5986 evcnt_attach_dynamic(&txr->txr_pcqdrop, EVCNT_TYPE_MISC, 5987 NULL, qp->qp_name, "Dropped in pcq"); 5988 evcnt_attach_dynamic(&txr->txr_transmitdef, EVCNT_TYPE_MISC, 5989 NULL, qp->qp_name, "Deferred transmit"); 5990 evcnt_attach_dynamic(&txr->txr_intr, EVCNT_TYPE_INTR, 5991 NULL, qp->qp_name, "Interrupt on queue"); 5992 evcnt_attach_dynamic(&txr->txr_defer, EVCNT_TYPE_MISC, 5993 NULL, qp->qp_name, "Handled queue in softint/workqueue"); 5994 5995 evcnt_attach_dynamic(&rxr->rxr_mgethdr_failed, EVCNT_TYPE_MISC, 5996 NULL, qp->qp_name, "MGETHDR failed"); 5997 evcnt_attach_dynamic(&rxr->rxr_mgetcl_failed, EVCNT_TYPE_MISC, 5998 NULL, qp->qp_name, "MCLGET failed"); 5999 evcnt_attach_dynamic(&rxr->rxr_mbuf_load_failed, 6000 EVCNT_TYPE_MISC, NULL, qp->qp_name, 6001 "bus_dmamap_load_mbuf failed"); 6002 evcnt_attach_dynamic(&rxr->rxr_intr, EVCNT_TYPE_INTR, 6003 NULL, qp->qp_name, "Interrupt on queue"); 6004 evcnt_attach_dynamic(&rxr->rxr_defer, EVCNT_TYPE_MISC, 6005 NULL, qp->qp_name, "Handled queue in softint/workqueue"); 6006 } 6007 6008 evcnt_attach_dynamic(&sc->sc_event_atq, EVCNT_TYPE_INTR, 6009 NULL, device_xname(sc->sc_dev), "Interrupt for other events"); 6010 evcnt_attach_dynamic(&sc->sc_event_link, EVCNT_TYPE_MISC, 6011 NULL, device_xname(sc->sc_dev), "Link status event"); 6012 evcnt_attach_dynamic(&sc->sc_event_ecc_err, EVCNT_TYPE_MISC, 6013 NULL, device_xname(sc->sc_dev), "ECC error"); 6014 evcnt_attach_dynamic(&sc->sc_event_pci_exception, EVCNT_TYPE_MISC, 6015 NULL, device_xname(sc->sc_dev), "PCI exception"); 6016 evcnt_attach_dynamic(&sc->sc_event_crit_err, EVCNT_TYPE_MISC, 6017 NULL, device_xname(sc->sc_dev), "Critical error"); 6018 6019 isc = &sc->sc_stats_counters; 6020 evcnt_attach_dynamic(&isc->isc_crc_errors, EVCNT_TYPE_MISC, 6021 NULL, device_xname(sc->sc_dev), "CRC errors"); 6022 evcnt_attach_dynamic(&isc->isc_illegal_bytes, EVCNT_TYPE_MISC, 6023 NULL, device_xname(sc->sc_dev), "Illegal bytes"); 6024 evcnt_attach_dynamic(&isc->isc_mac_local_faults, EVCNT_TYPE_MISC, 6025 NULL, device_xname(sc->sc_dev), "Mac local faults"); 6026 evcnt_attach_dynamic(&isc->isc_mac_remote_faults, EVCNT_TYPE_MISC, 6027 NULL, device_xname(sc->sc_dev), "Mac remote faults"); 6028 evcnt_attach_dynamic(&isc->isc_link_xon_rx, EVCNT_TYPE_MISC, 6029 NULL, device_xname(sc->sc_dev), "Rx xon"); 6030 evcnt_attach_dynamic(&isc->isc_link_xon_tx, EVCNT_TYPE_MISC, 6031 NULL, device_xname(sc->sc_dev), "Tx xon"); 6032 evcnt_attach_dynamic(&isc->isc_link_xoff_rx, EVCNT_TYPE_MISC, 6033 NULL, device_xname(sc->sc_dev), "Rx xoff"); 6034 evcnt_attach_dynamic(&isc->isc_link_xoff_tx, EVCNT_TYPE_MISC, 6035 NULL, device_xname(sc->sc_dev), "Tx xoff"); 6036 evcnt_attach_dynamic(&isc->isc_rx_fragments, EVCNT_TYPE_MISC, 6037 NULL, device_xname(sc->sc_dev), "Rx fragments"); 6038 evcnt_attach_dynamic(&isc->isc_rx_jabber, EVCNT_TYPE_MISC, 6039 NULL, device_xname(sc->sc_dev), "Rx jabber"); 6040 6041 evcnt_attach_dynamic(&isc->isc_rx_size_64, EVCNT_TYPE_MISC, 6042 NULL, device_xname(sc->sc_dev), "Rx size 64"); 6043 evcnt_attach_dynamic(&isc->isc_rx_size_127, EVCNT_TYPE_MISC, 6044 NULL, device_xname(sc->sc_dev), "Rx size 127"); 6045 evcnt_attach_dynamic(&isc->isc_rx_size_255, EVCNT_TYPE_MISC, 6046 NULL, device_xname(sc->sc_dev), "Rx size 255"); 6047 evcnt_attach_dynamic(&isc->isc_rx_size_511, EVCNT_TYPE_MISC, 6048 NULL, device_xname(sc->sc_dev), "Rx size 511"); 6049 evcnt_attach_dynamic(&isc->isc_rx_size_1023, EVCNT_TYPE_MISC, 6050 NULL, device_xname(sc->sc_dev), "Rx size 1023"); 6051 evcnt_attach_dynamic(&isc->isc_rx_size_1522, EVCNT_TYPE_MISC, 6052 NULL, device_xname(sc->sc_dev), "Rx size 1522"); 6053 evcnt_attach_dynamic(&isc->isc_rx_size_big, EVCNT_TYPE_MISC, 6054 NULL, device_xname(sc->sc_dev), "Rx jumbo packets"); 6055 evcnt_attach_dynamic(&isc->isc_rx_undersize, EVCNT_TYPE_MISC, 6056 NULL, device_xname(sc->sc_dev), "Rx under size"); 6057 evcnt_attach_dynamic(&isc->isc_rx_oversize, EVCNT_TYPE_MISC, 6058 NULL, device_xname(sc->sc_dev), "Rx over size"); 6059 6060 evcnt_attach_dynamic(&isc->isc_rx_bytes, EVCNT_TYPE_MISC, 6061 NULL, device_xname(sc->sc_dev), "Rx bytes / port"); 6062 evcnt_attach_dynamic(&isc->isc_rx_discards, EVCNT_TYPE_MISC, 6063 NULL, device_xname(sc->sc_dev), "Rx discards / port"); 6064 evcnt_attach_dynamic(&isc->isc_rx_unicast, EVCNT_TYPE_MISC, 6065 NULL, device_xname(sc->sc_dev), "Rx unicast / port"); 6066 evcnt_attach_dynamic(&isc->isc_rx_multicast, EVCNT_TYPE_MISC, 6067 NULL, device_xname(sc->sc_dev), "Rx multicast / port"); 6068 evcnt_attach_dynamic(&isc->isc_rx_broadcast, EVCNT_TYPE_MISC, 6069 NULL, device_xname(sc->sc_dev), "Rx broadcast / port"); 6070 6071 evcnt_attach_dynamic(&isc->isc_vsi_rx_bytes, EVCNT_TYPE_MISC, 6072 NULL, device_xname(sc->sc_dev), "Rx bytes / vsi"); 6073 evcnt_attach_dynamic(&isc->isc_vsi_rx_discards, EVCNT_TYPE_MISC, 6074 NULL, device_xname(sc->sc_dev), "Rx discard / vsi"); 6075 evcnt_attach_dynamic(&isc->isc_vsi_rx_unicast, EVCNT_TYPE_MISC, 6076 NULL, device_xname(sc->sc_dev), "Rx unicast / vsi"); 6077 evcnt_attach_dynamic(&isc->isc_vsi_rx_multicast, EVCNT_TYPE_MISC, 6078 NULL, device_xname(sc->sc_dev), "Rx multicast / vsi"); 6079 evcnt_attach_dynamic(&isc->isc_vsi_rx_broadcast, EVCNT_TYPE_MISC, 6080 NULL, device_xname(sc->sc_dev), "Rx broadcast / vsi"); 6081 6082 evcnt_attach_dynamic(&isc->isc_tx_size_64, EVCNT_TYPE_MISC, 6083 NULL, device_xname(sc->sc_dev), "Tx size 64"); 6084 evcnt_attach_dynamic(&isc->isc_tx_size_127, EVCNT_TYPE_MISC, 6085 NULL, device_xname(sc->sc_dev), "Tx size 127"); 6086 evcnt_attach_dynamic(&isc->isc_tx_size_255, EVCNT_TYPE_MISC, 6087 NULL, device_xname(sc->sc_dev), "Tx size 255"); 6088 evcnt_attach_dynamic(&isc->isc_tx_size_511, EVCNT_TYPE_MISC, 6089 NULL, device_xname(sc->sc_dev), "Tx size 511"); 6090 evcnt_attach_dynamic(&isc->isc_tx_size_1023, EVCNT_TYPE_MISC, 6091 NULL, device_xname(sc->sc_dev), "Tx size 1023"); 6092 evcnt_attach_dynamic(&isc->isc_tx_size_1522, EVCNT_TYPE_MISC, 6093 NULL, device_xname(sc->sc_dev), "Tx size 1522"); 6094 evcnt_attach_dynamic(&isc->isc_tx_size_big, EVCNT_TYPE_MISC, 6095 NULL, device_xname(sc->sc_dev), "Tx jumbo packets"); 6096 6097 evcnt_attach_dynamic(&isc->isc_tx_bytes, EVCNT_TYPE_MISC, 6098 NULL, device_xname(sc->sc_dev), "Tx bytes / port"); 6099 evcnt_attach_dynamic(&isc->isc_tx_dropped_link_down, EVCNT_TYPE_MISC, 6100 NULL, device_xname(sc->sc_dev), 6101 "Tx dropped due to link down / port"); 6102 evcnt_attach_dynamic(&isc->isc_tx_unicast, EVCNT_TYPE_MISC, 6103 NULL, device_xname(sc->sc_dev), "Tx unicast / port"); 6104 evcnt_attach_dynamic(&isc->isc_tx_multicast, EVCNT_TYPE_MISC, 6105 NULL, device_xname(sc->sc_dev), "Tx multicast / port"); 6106 evcnt_attach_dynamic(&isc->isc_tx_broadcast, EVCNT_TYPE_MISC, 6107 NULL, device_xname(sc->sc_dev), "Tx broadcast / port"); 6108 6109 evcnt_attach_dynamic(&isc->isc_vsi_tx_bytes, EVCNT_TYPE_MISC, 6110 NULL, device_xname(sc->sc_dev), "Tx bytes / vsi"); 6111 evcnt_attach_dynamic(&isc->isc_vsi_tx_errors, EVCNT_TYPE_MISC, 6112 NULL, device_xname(sc->sc_dev), "Tx errors / vsi"); 6113 evcnt_attach_dynamic(&isc->isc_vsi_tx_unicast, EVCNT_TYPE_MISC, 6114 NULL, device_xname(sc->sc_dev), "Tx unicast / vsi"); 6115 evcnt_attach_dynamic(&isc->isc_vsi_tx_multicast, EVCNT_TYPE_MISC, 6116 NULL, device_xname(sc->sc_dev), "Tx multicast / vsi"); 6117 evcnt_attach_dynamic(&isc->isc_vsi_tx_broadcast, EVCNT_TYPE_MISC, 6118 NULL, device_xname(sc->sc_dev), "Tx broadcast / vsi"); 6119 6120 sc->sc_stats_intval = ixl_param_stats_interval; 6121 callout_init(&sc->sc_stats_callout, CALLOUT_MPSAFE); 6122 callout_setfunc(&sc->sc_stats_callout, ixl_stats_callout, sc); 6123 ixl_work_set(&sc->sc_stats_task, ixl_stats_update, sc); 6124 6125 return 0; 6126 } 6127 6128 static void 6129 ixl_teardown_stats(struct ixl_softc *sc) 6130 { 6131 struct ixl_tx_ring *txr; 6132 struct ixl_rx_ring *rxr; 6133 struct ixl_stats_counters *isc; 6134 unsigned int i; 6135 6136 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) { 6137 txr = sc->sc_qps[i].qp_txr; 6138 rxr = sc->sc_qps[i].qp_rxr; 6139 6140 evcnt_detach(&txr->txr_defragged); 6141 evcnt_detach(&txr->txr_defrag_failed); 6142 evcnt_detach(&txr->txr_pcqdrop); 6143 evcnt_detach(&txr->txr_transmitdef); 6144 evcnt_detach(&txr->txr_intr); 6145 evcnt_detach(&txr->txr_defer); 6146 6147 evcnt_detach(&rxr->rxr_mgethdr_failed); 6148 evcnt_detach(&rxr->rxr_mgetcl_failed); 6149 evcnt_detach(&rxr->rxr_mbuf_load_failed); 6150 evcnt_detach(&rxr->rxr_intr); 6151 evcnt_detach(&rxr->rxr_defer); 6152 } 6153 6154 isc = &sc->sc_stats_counters; 6155 evcnt_detach(&isc->isc_crc_errors); 6156 evcnt_detach(&isc->isc_illegal_bytes); 6157 evcnt_detach(&isc->isc_mac_local_faults); 6158 evcnt_detach(&isc->isc_mac_remote_faults); 6159 evcnt_detach(&isc->isc_link_xon_rx); 6160 evcnt_detach(&isc->isc_link_xon_tx); 6161 evcnt_detach(&isc->isc_link_xoff_rx); 6162 evcnt_detach(&isc->isc_link_xoff_tx); 6163 evcnt_detach(&isc->isc_rx_fragments); 6164 evcnt_detach(&isc->isc_rx_jabber); 6165 evcnt_detach(&isc->isc_rx_bytes); 6166 evcnt_detach(&isc->isc_rx_discards); 6167 evcnt_detach(&isc->isc_rx_unicast); 6168 evcnt_detach(&isc->isc_rx_multicast); 6169 evcnt_detach(&isc->isc_rx_broadcast); 6170 evcnt_detach(&isc->isc_rx_size_64); 6171 evcnt_detach(&isc->isc_rx_size_127); 6172 evcnt_detach(&isc->isc_rx_size_255); 6173 evcnt_detach(&isc->isc_rx_size_511); 6174 evcnt_detach(&isc->isc_rx_size_1023); 6175 evcnt_detach(&isc->isc_rx_size_1522); 6176 evcnt_detach(&isc->isc_rx_size_big); 6177 evcnt_detach(&isc->isc_rx_undersize); 6178 evcnt_detach(&isc->isc_rx_oversize); 6179 evcnt_detach(&isc->isc_tx_bytes); 6180 evcnt_detach(&isc->isc_tx_dropped_link_down); 6181 evcnt_detach(&isc->isc_tx_unicast); 6182 evcnt_detach(&isc->isc_tx_multicast); 6183 evcnt_detach(&isc->isc_tx_broadcast); 6184 evcnt_detach(&isc->isc_tx_size_64); 6185 evcnt_detach(&isc->isc_tx_size_127); 6186 evcnt_detach(&isc->isc_tx_size_255); 6187 evcnt_detach(&isc->isc_tx_size_511); 6188 evcnt_detach(&isc->isc_tx_size_1023); 6189 evcnt_detach(&isc->isc_tx_size_1522); 6190 evcnt_detach(&isc->isc_tx_size_big); 6191 evcnt_detach(&isc->isc_vsi_rx_discards); 6192 evcnt_detach(&isc->isc_vsi_rx_bytes); 6193 evcnt_detach(&isc->isc_vsi_rx_unicast); 6194 evcnt_detach(&isc->isc_vsi_rx_multicast); 6195 evcnt_detach(&isc->isc_vsi_rx_broadcast); 6196 evcnt_detach(&isc->isc_vsi_tx_errors); 6197 evcnt_detach(&isc->isc_vsi_tx_bytes); 6198 evcnt_detach(&isc->isc_vsi_tx_unicast); 6199 evcnt_detach(&isc->isc_vsi_tx_multicast); 6200 evcnt_detach(&isc->isc_vsi_tx_broadcast); 6201 6202 evcnt_detach(&sc->sc_event_atq); 6203 evcnt_detach(&sc->sc_event_link); 6204 evcnt_detach(&sc->sc_event_ecc_err); 6205 evcnt_detach(&sc->sc_event_pci_exception); 6206 evcnt_detach(&sc->sc_event_crit_err); 6207 6208 callout_destroy(&sc->sc_stats_callout); 6209 } 6210 6211 static void 6212 ixl_stats_callout(void *xsc) 6213 { 6214 struct ixl_softc *sc = xsc; 6215 6216 ixl_work_add(sc->sc_workq, &sc->sc_stats_task); 6217 callout_schedule(&sc->sc_stats_callout, mstohz(sc->sc_stats_intval)); 6218 } 6219 6220 static uint64_t 6221 ixl_stat_delta(struct ixl_softc *sc, uint32_t reg_hi, uint32_t reg_lo, 6222 uint64_t *offset, bool has_offset) 6223 { 6224 uint64_t value, delta; 6225 int bitwidth; 6226 6227 bitwidth = reg_hi == 0 ? 32 : 48; 6228 6229 value = ixl_rd(sc, reg_lo); 6230 6231 if (bitwidth > 32) { 6232 value |= ((uint64_t)ixl_rd(sc, reg_hi) << 32); 6233 } 6234 6235 if (__predict_true(has_offset)) { 6236 delta = value; 6237 if (value < *offset) 6238 delta += ((uint64_t)1 << bitwidth); 6239 delta -= *offset; 6240 } else { 6241 delta = 0; 6242 } 6243 atomic_swap_64(offset, value); 6244 6245 return delta; 6246 } 6247 6248 static void 6249 ixl_stats_update(void *xsc) 6250 { 6251 struct ixl_softc *sc = xsc; 6252 struct ixl_stats_counters *isc; 6253 uint64_t delta; 6254 6255 isc = &sc->sc_stats_counters; 6256 6257 /* errors */ 6258 delta = ixl_stat_delta(sc, 6259 0, I40E_GLPRT_CRCERRS(sc->sc_port), 6260 &isc->isc_crc_errors_offset, isc->isc_has_offset); 6261 atomic_add_64(&isc->isc_crc_errors.ev_count, delta); 6262 6263 delta = ixl_stat_delta(sc, 6264 0, I40E_GLPRT_ILLERRC(sc->sc_port), 6265 &isc->isc_illegal_bytes_offset, isc->isc_has_offset); 6266 atomic_add_64(&isc->isc_illegal_bytes.ev_count, delta); 6267 6268 /* rx */ 6269 delta = ixl_stat_delta(sc, 6270 I40E_GLPRT_GORCH(sc->sc_port), I40E_GLPRT_GORCL(sc->sc_port), 6271 &isc->isc_rx_bytes_offset, isc->isc_has_offset); 6272 atomic_add_64(&isc->isc_rx_bytes.ev_count, delta); 6273 6274 delta = ixl_stat_delta(sc, 6275 0, I40E_GLPRT_RDPC(sc->sc_port), 6276 &isc->isc_rx_discards_offset, isc->isc_has_offset); 6277 atomic_add_64(&isc->isc_rx_discards.ev_count, delta); 6278 6279 delta = ixl_stat_delta(sc, 6280 I40E_GLPRT_UPRCH(sc->sc_port), I40E_GLPRT_UPRCL(sc->sc_port), 6281 &isc->isc_rx_unicast_offset, isc->isc_has_offset); 6282 atomic_add_64(&isc->isc_rx_unicast.ev_count, delta); 6283 6284 delta = ixl_stat_delta(sc, 6285 I40E_GLPRT_MPRCH(sc->sc_port), I40E_GLPRT_MPRCL(sc->sc_port), 6286 &isc->isc_rx_multicast_offset, isc->isc_has_offset); 6287 atomic_add_64(&isc->isc_rx_multicast.ev_count, delta); 6288 6289 delta = ixl_stat_delta(sc, 6290 I40E_GLPRT_BPRCH(sc->sc_port), I40E_GLPRT_BPRCL(sc->sc_port), 6291 &isc->isc_rx_broadcast_offset, isc->isc_has_offset); 6292 atomic_add_64(&isc->isc_rx_broadcast.ev_count, delta); 6293 6294 /* Packet size stats rx */ 6295 delta = ixl_stat_delta(sc, 6296 I40E_GLPRT_PRC64H(sc->sc_port), I40E_GLPRT_PRC64L(sc->sc_port), 6297 &isc->isc_rx_size_64_offset, isc->isc_has_offset); 6298 atomic_add_64(&isc->isc_rx_size_64.ev_count, delta); 6299 6300 delta = ixl_stat_delta(sc, 6301 I40E_GLPRT_PRC127H(sc->sc_port), I40E_GLPRT_PRC127L(sc->sc_port), 6302 &isc->isc_rx_size_127_offset, isc->isc_has_offset); 6303 atomic_add_64(&isc->isc_rx_size_127.ev_count, delta); 6304 6305 delta = ixl_stat_delta(sc, 6306 I40E_GLPRT_PRC255H(sc->sc_port), I40E_GLPRT_PRC255L(sc->sc_port), 6307 &isc->isc_rx_size_255_offset, isc->isc_has_offset); 6308 atomic_add_64(&isc->isc_rx_size_255.ev_count, delta); 6309 6310 delta = ixl_stat_delta(sc, 6311 I40E_GLPRT_PRC511H(sc->sc_port), I40E_GLPRT_PRC511L(sc->sc_port), 6312 &isc->isc_rx_size_511_offset, isc->isc_has_offset); 6313 atomic_add_64(&isc->isc_rx_size_511.ev_count, delta); 6314 6315 delta = ixl_stat_delta(sc, 6316 I40E_GLPRT_PRC1023H(sc->sc_port), I40E_GLPRT_PRC1023L(sc->sc_port), 6317 &isc->isc_rx_size_1023_offset, isc->isc_has_offset); 6318 atomic_add_64(&isc->isc_rx_size_1023.ev_count, delta); 6319 6320 delta = ixl_stat_delta(sc, 6321 I40E_GLPRT_PRC1522H(sc->sc_port), I40E_GLPRT_PRC1522L(sc->sc_port), 6322 &isc->isc_rx_size_1522_offset, isc->isc_has_offset); 6323 atomic_add_64(&isc->isc_rx_size_1522.ev_count, delta); 6324 6325 delta = ixl_stat_delta(sc, 6326 I40E_GLPRT_PRC9522H(sc->sc_port), I40E_GLPRT_PRC9522L(sc->sc_port), 6327 &isc->isc_rx_size_big_offset, isc->isc_has_offset); 6328 atomic_add_64(&isc->isc_rx_size_big.ev_count, delta); 6329 6330 delta = ixl_stat_delta(sc, 6331 0, I40E_GLPRT_RUC(sc->sc_port), 6332 &isc->isc_rx_undersize_offset, isc->isc_has_offset); 6333 atomic_add_64(&isc->isc_rx_undersize.ev_count, delta); 6334 6335 delta = ixl_stat_delta(sc, 6336 0, I40E_GLPRT_ROC(sc->sc_port), 6337 &isc->isc_rx_oversize_offset, isc->isc_has_offset); 6338 atomic_add_64(&isc->isc_rx_oversize.ev_count, delta); 6339 6340 /* tx */ 6341 delta = ixl_stat_delta(sc, 6342 I40E_GLPRT_GOTCH(sc->sc_port), I40E_GLPRT_GOTCL(sc->sc_port), 6343 &isc->isc_tx_bytes_offset, isc->isc_has_offset); 6344 atomic_add_64(&isc->isc_tx_bytes.ev_count, delta); 6345 6346 delta = ixl_stat_delta(sc, 6347 0, I40E_GLPRT_TDOLD(sc->sc_port), 6348 &isc->isc_tx_dropped_link_down_offset, isc->isc_has_offset); 6349 atomic_add_64(&isc->isc_tx_dropped_link_down.ev_count, delta); 6350 6351 delta = ixl_stat_delta(sc, 6352 I40E_GLPRT_UPTCH(sc->sc_port), I40E_GLPRT_UPTCL(sc->sc_port), 6353 &isc->isc_tx_unicast_offset, isc->isc_has_offset); 6354 atomic_add_64(&isc->isc_tx_unicast.ev_count, delta); 6355 6356 delta = ixl_stat_delta(sc, 6357 I40E_GLPRT_MPTCH(sc->sc_port), I40E_GLPRT_MPTCL(sc->sc_port), 6358 &isc->isc_tx_multicast_offset, isc->isc_has_offset); 6359 atomic_add_64(&isc->isc_tx_multicast.ev_count, delta); 6360 6361 delta = ixl_stat_delta(sc, 6362 I40E_GLPRT_BPTCH(sc->sc_port), I40E_GLPRT_BPTCL(sc->sc_port), 6363 &isc->isc_tx_broadcast_offset, isc->isc_has_offset); 6364 atomic_add_64(&isc->isc_tx_broadcast.ev_count, delta); 6365 6366 /* Packet size stats tx */ 6367 delta = ixl_stat_delta(sc, 6368 I40E_GLPRT_PTC64L(sc->sc_port), I40E_GLPRT_PTC64L(sc->sc_port), 6369 &isc->isc_tx_size_64_offset, isc->isc_has_offset); 6370 atomic_add_64(&isc->isc_tx_size_64.ev_count, delta); 6371 6372 delta = ixl_stat_delta(sc, 6373 I40E_GLPRT_PTC127H(sc->sc_port), I40E_GLPRT_PTC127L(sc->sc_port), 6374 &isc->isc_tx_size_127_offset, isc->isc_has_offset); 6375 atomic_add_64(&isc->isc_tx_size_127.ev_count, delta); 6376 6377 delta = ixl_stat_delta(sc, 6378 I40E_GLPRT_PTC255H(sc->sc_port), I40E_GLPRT_PTC255L(sc->sc_port), 6379 &isc->isc_tx_size_255_offset, isc->isc_has_offset); 6380 atomic_add_64(&isc->isc_tx_size_255.ev_count, delta); 6381 6382 delta = ixl_stat_delta(sc, 6383 I40E_GLPRT_PTC511H(sc->sc_port), I40E_GLPRT_PTC511L(sc->sc_port), 6384 &isc->isc_tx_size_511_offset, isc->isc_has_offset); 6385 atomic_add_64(&isc->isc_tx_size_511.ev_count, delta); 6386 6387 delta = ixl_stat_delta(sc, 6388 I40E_GLPRT_PTC1023H(sc->sc_port), I40E_GLPRT_PTC1023L(sc->sc_port), 6389 &isc->isc_tx_size_1023_offset, isc->isc_has_offset); 6390 atomic_add_64(&isc->isc_tx_size_1023.ev_count, delta); 6391 6392 delta = ixl_stat_delta(sc, 6393 I40E_GLPRT_PTC1522H(sc->sc_port), I40E_GLPRT_PTC1522L(sc->sc_port), 6394 &isc->isc_tx_size_1522_offset, isc->isc_has_offset); 6395 atomic_add_64(&isc->isc_tx_size_1522.ev_count, delta); 6396 6397 delta = ixl_stat_delta(sc, 6398 I40E_GLPRT_PTC9522H(sc->sc_port), I40E_GLPRT_PTC9522L(sc->sc_port), 6399 &isc->isc_tx_size_big_offset, isc->isc_has_offset); 6400 atomic_add_64(&isc->isc_tx_size_big.ev_count, delta); 6401 6402 /* mac faults */ 6403 delta = ixl_stat_delta(sc, 6404 0, I40E_GLPRT_MLFC(sc->sc_port), 6405 &isc->isc_mac_local_faults_offset, isc->isc_has_offset); 6406 atomic_add_64(&isc->isc_mac_local_faults.ev_count, delta); 6407 6408 delta = ixl_stat_delta(sc, 6409 0, I40E_GLPRT_MRFC(sc->sc_port), 6410 &isc->isc_mac_remote_faults_offset, isc->isc_has_offset); 6411 atomic_add_64(&isc->isc_mac_remote_faults.ev_count, delta); 6412 6413 /* Flow control (LFC) stats */ 6414 delta = ixl_stat_delta(sc, 6415 0, I40E_GLPRT_LXONRXC(sc->sc_port), 6416 &isc->isc_link_xon_rx_offset, isc->isc_has_offset); 6417 atomic_add_64(&isc->isc_link_xon_rx.ev_count, delta); 6418 6419 delta = ixl_stat_delta(sc, 6420 0, I40E_GLPRT_LXONTXC(sc->sc_port), 6421 &isc->isc_link_xon_tx_offset, isc->isc_has_offset); 6422 atomic_add_64(&isc->isc_link_xon_tx.ev_count, delta); 6423 6424 delta = ixl_stat_delta(sc, 6425 0, I40E_GLPRT_LXOFFRXC(sc->sc_port), 6426 &isc->isc_link_xoff_rx_offset, isc->isc_has_offset); 6427 atomic_add_64(&isc->isc_link_xoff_rx.ev_count, delta); 6428 6429 delta = ixl_stat_delta(sc, 6430 0, I40E_GLPRT_LXOFFTXC(sc->sc_port), 6431 &isc->isc_link_xoff_tx_offset, isc->isc_has_offset); 6432 atomic_add_64(&isc->isc_link_xoff_tx.ev_count, delta); 6433 6434 /* fragments */ 6435 delta = ixl_stat_delta(sc, 6436 0, I40E_GLPRT_RFC(sc->sc_port), 6437 &isc->isc_rx_fragments_offset, isc->isc_has_offset); 6438 atomic_add_64(&isc->isc_rx_fragments.ev_count, delta); 6439 6440 delta = ixl_stat_delta(sc, 6441 0, I40E_GLPRT_RJC(sc->sc_port), 6442 &isc->isc_rx_jabber_offset, isc->isc_has_offset); 6443 atomic_add_64(&isc->isc_rx_jabber.ev_count, delta); 6444 6445 /* VSI rx counters */ 6446 delta = ixl_stat_delta(sc, 6447 0, I40E_GLV_RDPC(sc->sc_vsi_stat_counter_idx), 6448 &isc->isc_vsi_rx_discards_offset, isc->isc_has_offset); 6449 atomic_add_64(&isc->isc_vsi_rx_discards.ev_count, delta); 6450 6451 delta = ixl_stat_delta(sc, 6452 I40E_GLV_GORCH(sc->sc_vsi_stat_counter_idx), 6453 I40E_GLV_GORCL(sc->sc_vsi_stat_counter_idx), 6454 &isc->isc_vsi_rx_bytes_offset, isc->isc_has_offset); 6455 atomic_add_64(&isc->isc_vsi_rx_bytes.ev_count, delta); 6456 6457 delta = ixl_stat_delta(sc, 6458 I40E_GLV_UPRCH(sc->sc_vsi_stat_counter_idx), 6459 I40E_GLV_UPRCL(sc->sc_vsi_stat_counter_idx), 6460 &isc->isc_vsi_rx_unicast_offset, isc->isc_has_offset); 6461 atomic_add_64(&isc->isc_vsi_rx_unicast.ev_count, delta); 6462 6463 delta = ixl_stat_delta(sc, 6464 I40E_GLV_MPRCH(sc->sc_vsi_stat_counter_idx), 6465 I40E_GLV_MPRCL(sc->sc_vsi_stat_counter_idx), 6466 &isc->isc_vsi_rx_multicast_offset, isc->isc_has_offset); 6467 atomic_add_64(&isc->isc_vsi_rx_multicast.ev_count, delta); 6468 6469 delta = ixl_stat_delta(sc, 6470 I40E_GLV_BPRCH(sc->sc_vsi_stat_counter_idx), 6471 I40E_GLV_BPRCL(sc->sc_vsi_stat_counter_idx), 6472 &isc->isc_vsi_rx_broadcast_offset, isc->isc_has_offset); 6473 atomic_add_64(&isc->isc_vsi_rx_broadcast.ev_count, delta); 6474 6475 /* VSI tx counters */ 6476 delta = ixl_stat_delta(sc, 6477 0, I40E_GLV_TEPC(sc->sc_vsi_stat_counter_idx), 6478 &isc->isc_vsi_tx_errors_offset, isc->isc_has_offset); 6479 atomic_add_64(&isc->isc_vsi_tx_errors.ev_count, delta); 6480 6481 delta = ixl_stat_delta(sc, 6482 I40E_GLV_GOTCH(sc->sc_vsi_stat_counter_idx), 6483 I40E_GLV_GOTCL(sc->sc_vsi_stat_counter_idx), 6484 &isc->isc_vsi_tx_bytes_offset, isc->isc_has_offset); 6485 atomic_add_64(&isc->isc_vsi_tx_bytes.ev_count, delta); 6486 6487 delta = ixl_stat_delta(sc, 6488 I40E_GLV_UPTCH(sc->sc_vsi_stat_counter_idx), 6489 I40E_GLV_UPTCL(sc->sc_vsi_stat_counter_idx), 6490 &isc->isc_vsi_tx_unicast_offset, isc->isc_has_offset); 6491 atomic_add_64(&isc->isc_vsi_tx_unicast.ev_count, delta); 6492 6493 delta = ixl_stat_delta(sc, 6494 I40E_GLV_MPTCH(sc->sc_vsi_stat_counter_idx), 6495 I40E_GLV_MPTCL(sc->sc_vsi_stat_counter_idx), 6496 &isc->isc_vsi_tx_multicast_offset, isc->isc_has_offset); 6497 atomic_add_64(&isc->isc_vsi_tx_multicast.ev_count, delta); 6498 6499 delta = ixl_stat_delta(sc, 6500 I40E_GLV_BPTCH(sc->sc_vsi_stat_counter_idx), 6501 I40E_GLV_BPTCL(sc->sc_vsi_stat_counter_idx), 6502 &isc->isc_vsi_tx_broadcast_offset, isc->isc_has_offset); 6503 atomic_add_64(&isc->isc_vsi_tx_broadcast.ev_count, delta); 6504 } 6505 6506 static int 6507 ixl_setup_sysctls(struct ixl_softc *sc) 6508 { 6509 const char *devname; 6510 struct sysctllog **log; 6511 const struct sysctlnode *rnode, *rxnode, *txnode; 6512 int error; 6513 6514 log = &sc->sc_sysctllog; 6515 devname = device_xname(sc->sc_dev); 6516 6517 error = sysctl_createv(log, 0, NULL, &rnode, 6518 0, CTLTYPE_NODE, devname, 6519 SYSCTL_DESCR("ixl information and settings"), 6520 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL); 6521 if (error) 6522 goto out; 6523 6524 error = sysctl_createv(log, 0, &rnode, NULL, 6525 CTLFLAG_READWRITE, CTLTYPE_BOOL, "txrx_workqueue", 6526 SYSCTL_DESCR("Use workqueue for packet processing"), 6527 NULL, 0, &sc->sc_txrx_workqueue, 0, CTL_CREATE, CTL_EOL); 6528 if (error) 6529 goto out; 6530 6531 error = sysctl_createv(log, 0, &rnode, NULL, 6532 CTLFLAG_READONLY, CTLTYPE_INT, "stats_interval", 6533 SYSCTL_DESCR("Statistics collection interval in milliseconds"), 6534 NULL, 0, &sc->sc_stats_intval, 0, CTL_CREATE, CTL_EOL); 6535 6536 error = sysctl_createv(log, 0, &rnode, &rxnode, 6537 0, CTLTYPE_NODE, "rx", 6538 SYSCTL_DESCR("ixl information and settings for Rx"), 6539 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL); 6540 if (error) 6541 goto out; 6542 6543 error = sysctl_createv(log, 0, &rxnode, NULL, 6544 CTLFLAG_READWRITE, CTLTYPE_INT, "itr", 6545 SYSCTL_DESCR("Interrupt Throttling"), 6546 ixl_sysctl_itr_handler, 0, 6547 (void *)sc, 0, CTL_CREATE, CTL_EOL); 6548 if (error) 6549 goto out; 6550 6551 error = sysctl_createv(log, 0, &rxnode, NULL, 6552 CTLFLAG_READONLY, CTLTYPE_INT, "descriptor_num", 6553 SYSCTL_DESCR("the number of rx descriptors"), 6554 NULL, 0, &sc->sc_rx_ring_ndescs, 0, CTL_CREATE, CTL_EOL); 6555 if (error) 6556 goto out; 6557 6558 error = sysctl_createv(log, 0, &rxnode, NULL, 6559 CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit", 6560 SYSCTL_DESCR("max number of Rx packets" 6561 " to process for interrupt processing"), 6562 NULL, 0, &sc->sc_rx_intr_process_limit, 0, CTL_CREATE, CTL_EOL); 6563 if (error) 6564 goto out; 6565 6566 error = sysctl_createv(log, 0, &rxnode, NULL, 6567 CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit", 6568 SYSCTL_DESCR("max number of Rx packets" 6569 " to process for deferred processing"), 6570 NULL, 0, &sc->sc_rx_process_limit, 0, CTL_CREATE, CTL_EOL); 6571 if (error) 6572 goto out; 6573 6574 error = sysctl_createv(log, 0, &rnode, &txnode, 6575 0, CTLTYPE_NODE, "tx", 6576 SYSCTL_DESCR("ixl information and settings for Tx"), 6577 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL); 6578 if (error) 6579 goto out; 6580 6581 error = sysctl_createv(log, 0, &txnode, NULL, 6582 CTLFLAG_READWRITE, CTLTYPE_INT, "itr", 6583 SYSCTL_DESCR("Interrupt Throttling"), 6584 ixl_sysctl_itr_handler, 0, 6585 (void *)sc, 0, CTL_CREATE, CTL_EOL); 6586 if (error) 6587 goto out; 6588 6589 error = sysctl_createv(log, 0, &txnode, NULL, 6590 CTLFLAG_READONLY, CTLTYPE_INT, "descriptor_num", 6591 SYSCTL_DESCR("the number of tx descriptors"), 6592 NULL, 0, &sc->sc_tx_ring_ndescs, 0, CTL_CREATE, CTL_EOL); 6593 if (error) 6594 goto out; 6595 6596 error = sysctl_createv(log, 0, &txnode, NULL, 6597 CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit", 6598 SYSCTL_DESCR("max number of Tx packets" 6599 " to process for interrupt processing"), 6600 NULL, 0, &sc->sc_tx_intr_process_limit, 0, CTL_CREATE, CTL_EOL); 6601 if (error) 6602 goto out; 6603 6604 error = sysctl_createv(log, 0, &txnode, NULL, 6605 CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit", 6606 SYSCTL_DESCR("max number of Tx packets" 6607 " to process for deferred processing"), 6608 NULL, 0, &sc->sc_tx_process_limit, 0, CTL_CREATE, CTL_EOL); 6609 if (error) 6610 goto out; 6611 6612 out: 6613 if (error) { 6614 aprint_error_dev(sc->sc_dev, 6615 "unable to create sysctl node\n"); 6616 sysctl_teardown(log); 6617 } 6618 6619 return error; 6620 } 6621 6622 static void 6623 ixl_teardown_sysctls(struct ixl_softc *sc) 6624 { 6625 6626 sysctl_teardown(&sc->sc_sysctllog); 6627 } 6628 6629 static bool 6630 ixl_sysctlnode_is_rx(struct sysctlnode *node) 6631 { 6632 6633 if (strstr(node->sysctl_parent->sysctl_name, "rx") != NULL) 6634 return true; 6635 6636 return false; 6637 } 6638 6639 static int 6640 ixl_sysctl_itr_handler(SYSCTLFN_ARGS) 6641 { 6642 struct sysctlnode node = *rnode; 6643 struct ixl_softc *sc = (struct ixl_softc *)node.sysctl_data; 6644 struct ifnet *ifp = &sc->sc_ec.ec_if; 6645 uint32_t newitr, *itrptr; 6646 int error; 6647 6648 if (ixl_sysctlnode_is_rx(&node)) { 6649 itrptr = &sc->sc_itr_rx; 6650 } else { 6651 itrptr = &sc->sc_itr_tx; 6652 } 6653 6654 newitr = *itrptr; 6655 node.sysctl_data = &newitr; 6656 node.sysctl_size = sizeof(newitr); 6657 6658 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 6659 6660 if (error || newp == NULL) 6661 return error; 6662 6663 /* ITRs are applied in ixl_init() for simple implementaion */ 6664 if (ISSET(ifp->if_flags, IFF_RUNNING)) 6665 return EBUSY; 6666 6667 if (newitr > 0x07ff) 6668 return EINVAL; 6669 6670 *itrptr = newitr; 6671 6672 return 0; 6673 } 6674 6675 static struct workqueue * 6676 ixl_workq_create(const char *name, pri_t prio, int ipl, int flags) 6677 { 6678 struct workqueue *wq; 6679 int error; 6680 6681 error = workqueue_create(&wq, name, ixl_workq_work, NULL, 6682 prio, ipl, flags); 6683 6684 if (error) 6685 return NULL; 6686 6687 return wq; 6688 } 6689 6690 static void 6691 ixl_workq_destroy(struct workqueue *wq) 6692 { 6693 6694 workqueue_destroy(wq); 6695 } 6696 6697 static void 6698 ixl_work_set(struct ixl_work *work, void (*func)(void *), void *arg) 6699 { 6700 6701 memset(work, 0, sizeof(*work)); 6702 work->ixw_func = func; 6703 work->ixw_arg = arg; 6704 } 6705 6706 static void 6707 ixl_work_add(struct workqueue *wq, struct ixl_work *work) 6708 { 6709 if (atomic_cas_uint(&work->ixw_added, 0, 1) != 0) 6710 return; 6711 6712 kpreempt_disable(); 6713 workqueue_enqueue(wq, &work->ixw_cookie, NULL); 6714 kpreempt_enable(); 6715 } 6716 6717 static void 6718 ixl_work_wait(struct workqueue *wq, struct ixl_work *work) 6719 { 6720 6721 workqueue_wait(wq, &work->ixw_cookie); 6722 } 6723 6724 static void 6725 ixl_workq_work(struct work *wk, void *context) 6726 { 6727 struct ixl_work *work; 6728 6729 work = container_of(wk, struct ixl_work, ixw_cookie); 6730 6731 atomic_swap_uint(&work->ixw_added, 0); 6732 work->ixw_func(work->ixw_arg); 6733 } 6734 6735 static int 6736 ixl_rx_ctl_read(struct ixl_softc *sc, uint32_t reg, uint32_t *rv) 6737 { 6738 struct ixl_aq_desc iaq; 6739 6740 memset(&iaq, 0, sizeof(iaq)); 6741 iaq.iaq_opcode = htole16(IXL_AQ_OP_RX_CTL_REG_READ); 6742 iaq.iaq_param[1] = htole32(reg); 6743 6744 if (ixl_atq_poll(sc, &iaq, 250) != 0) 6745 return ETIMEDOUT; 6746 6747 switch (htole16(iaq.iaq_retval)) { 6748 case IXL_AQ_RC_OK: 6749 /* success */ 6750 break; 6751 case IXL_AQ_RC_EACCES: 6752 return EPERM; 6753 case IXL_AQ_RC_EAGAIN: 6754 return EAGAIN; 6755 default: 6756 return EIO; 6757 } 6758 6759 *rv = htole32(iaq.iaq_param[3]); 6760 return 0; 6761 } 6762 6763 static uint32_t 6764 ixl_rd_rx_csr(struct ixl_softc *sc, uint32_t reg) 6765 { 6766 uint32_t val; 6767 int rv, retry, retry_limit; 6768 6769 if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RXCTL)) { 6770 retry_limit = 5; 6771 } else { 6772 retry_limit = 0; 6773 } 6774 6775 for (retry = 0; retry < retry_limit; retry++) { 6776 rv = ixl_rx_ctl_read(sc, reg, &val); 6777 if (rv == 0) 6778 return val; 6779 else if (rv == EAGAIN) 6780 delaymsec(1); 6781 else 6782 break; 6783 } 6784 6785 val = ixl_rd(sc, reg); 6786 6787 return val; 6788 } 6789 6790 static int 6791 ixl_rx_ctl_write(struct ixl_softc *sc, uint32_t reg, uint32_t value) 6792 { 6793 struct ixl_aq_desc iaq; 6794 6795 memset(&iaq, 0, sizeof(iaq)); 6796 iaq.iaq_opcode = htole16(IXL_AQ_OP_RX_CTL_REG_WRITE); 6797 iaq.iaq_param[1] = htole32(reg); 6798 iaq.iaq_param[3] = htole32(value); 6799 6800 if (ixl_atq_poll(sc, &iaq, 250) != 0) 6801 return ETIMEDOUT; 6802 6803 switch (htole16(iaq.iaq_retval)) { 6804 case IXL_AQ_RC_OK: 6805 /* success */ 6806 break; 6807 case IXL_AQ_RC_EACCES: 6808 return EPERM; 6809 case IXL_AQ_RC_EAGAIN: 6810 return EAGAIN; 6811 default: 6812 return EIO; 6813 } 6814 6815 return 0; 6816 } 6817 6818 static void 6819 ixl_wr_rx_csr(struct ixl_softc *sc, uint32_t reg, uint32_t value) 6820 { 6821 int rv, retry, retry_limit; 6822 6823 if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RXCTL)) { 6824 retry_limit = 5; 6825 } else { 6826 retry_limit = 0; 6827 } 6828 6829 for (retry = 0; retry < retry_limit; retry++) { 6830 rv = ixl_rx_ctl_write(sc, reg, value); 6831 if (rv == 0) 6832 return; 6833 else if (rv == EAGAIN) 6834 delaymsec(1); 6835 else 6836 break; 6837 } 6838 6839 ixl_wr(sc, reg, value); 6840 } 6841 6842 static int 6843 ixl_nvm_lock(struct ixl_softc *sc, char rw) 6844 { 6845 struct ixl_aq_desc iaq; 6846 struct ixl_aq_req_resource_param *param; 6847 int rv; 6848 6849 if (!ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMLOCK)) 6850 return 0; 6851 6852 memset(&iaq, 0, sizeof(iaq)); 6853 iaq.iaq_opcode = htole16(IXL_AQ_OP_REQUEST_RESOURCE); 6854 6855 param = (struct ixl_aq_req_resource_param *)&iaq.iaq_param; 6856 param->resource_id = htole16(IXL_AQ_RESOURCE_ID_NVM); 6857 if (rw == 'R') { 6858 param->access_type = htole16(IXL_AQ_RESOURCE_ACCES_READ); 6859 } else { 6860 param->access_type = htole16(IXL_AQ_RESOURCE_ACCES_WRITE); 6861 } 6862 6863 rv = ixl_atq_poll(sc, &iaq, 250); 6864 6865 if (rv != 0) 6866 return ETIMEDOUT; 6867 6868 switch (le16toh(iaq.iaq_retval)) { 6869 case IXL_AQ_RC_OK: 6870 break; 6871 case IXL_AQ_RC_EACCES: 6872 return EACCES; 6873 case IXL_AQ_RC_EBUSY: 6874 return EBUSY; 6875 case IXL_AQ_RC_EPERM: 6876 return EPERM; 6877 } 6878 6879 return 0; 6880 } 6881 6882 static int 6883 ixl_nvm_unlock(struct ixl_softc *sc) 6884 { 6885 struct ixl_aq_desc iaq; 6886 struct ixl_aq_rel_resource_param *param; 6887 int rv; 6888 6889 if (!ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMLOCK)) 6890 return 0; 6891 6892 memset(&iaq, 0, sizeof(iaq)); 6893 iaq.iaq_opcode = htole16(IXL_AQ_OP_RELEASE_RESOURCE); 6894 6895 param = (struct ixl_aq_rel_resource_param *)&iaq.iaq_param; 6896 param->resource_id = htole16(IXL_AQ_RESOURCE_ID_NVM); 6897 6898 rv = ixl_atq_poll(sc, &iaq, 250); 6899 6900 if (rv != 0) 6901 return ETIMEDOUT; 6902 6903 switch (le16toh(iaq.iaq_retval)) { 6904 case IXL_AQ_RC_OK: 6905 break; 6906 default: 6907 return EIO; 6908 } 6909 return 0; 6910 } 6911 6912 static int 6913 ixl_srdone_poll(struct ixl_softc *sc) 6914 { 6915 int wait_count; 6916 uint32_t reg; 6917 6918 for (wait_count = 0; wait_count < IXL_SRRD_SRCTL_ATTEMPTS; 6919 wait_count++) { 6920 reg = ixl_rd(sc, I40E_GLNVM_SRCTL); 6921 if (ISSET(reg, I40E_GLNVM_SRCTL_DONE_MASK)) 6922 break; 6923 6924 delaymsec(5); 6925 } 6926 6927 if (wait_count == IXL_SRRD_SRCTL_ATTEMPTS) 6928 return -1; 6929 6930 return 0; 6931 } 6932 6933 static int 6934 ixl_nvm_read_srctl(struct ixl_softc *sc, uint16_t offset, uint16_t *data) 6935 { 6936 uint32_t reg; 6937 6938 if (ixl_srdone_poll(sc) != 0) 6939 return ETIMEDOUT; 6940 6941 reg = ((uint32_t)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) | 6942 __BIT(I40E_GLNVM_SRCTL_START_SHIFT); 6943 ixl_wr(sc, I40E_GLNVM_SRCTL, reg); 6944 6945 if (ixl_srdone_poll(sc) != 0) { 6946 aprint_debug("NVM read error: couldn't access " 6947 "Shadow RAM address: 0x%x\n", offset); 6948 return ETIMEDOUT; 6949 } 6950 6951 reg = ixl_rd(sc, I40E_GLNVM_SRDATA); 6952 *data = (uint16_t)__SHIFTOUT(reg, I40E_GLNVM_SRDATA_RDDATA_MASK); 6953 6954 return 0; 6955 } 6956 6957 static int 6958 ixl_nvm_read_aq(struct ixl_softc *sc, uint16_t offset_word, 6959 void *data, size_t len) 6960 { 6961 struct ixl_dmamem *idm; 6962 struct ixl_aq_desc iaq; 6963 struct ixl_aq_nvm_param *param; 6964 uint32_t offset_bytes; 6965 int rv; 6966 6967 idm = &sc->sc_aqbuf; 6968 if (len > IXL_DMA_LEN(idm)) 6969 return ENOMEM; 6970 6971 memset(IXL_DMA_KVA(idm), 0, IXL_DMA_LEN(idm)); 6972 memset(&iaq, 0, sizeof(iaq)); 6973 iaq.iaq_opcode = htole16(IXL_AQ_OP_NVM_READ); 6974 iaq.iaq_flags = htole16(IXL_AQ_BUF | 6975 ((len > I40E_AQ_LARGE_BUF) ? IXL_AQ_LB : 0)); 6976 iaq.iaq_datalen = htole16(len); 6977 ixl_aq_dva(&iaq, IXL_DMA_DVA(idm)); 6978 6979 param = (struct ixl_aq_nvm_param *)iaq.iaq_param; 6980 param->command_flags = IXL_AQ_NVM_LAST_CMD; 6981 param->module_pointer = 0; 6982 param->length = htole16(len); 6983 offset_bytes = (uint32_t)offset_word * 2; 6984 offset_bytes &= 0x00FFFFFF; 6985 param->offset = htole32(offset_bytes); 6986 6987 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm), 6988 BUS_DMASYNC_PREREAD); 6989 6990 rv = ixl_atq_poll(sc, &iaq, 250); 6991 6992 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm), 6993 BUS_DMASYNC_POSTREAD); 6994 6995 if (rv != 0) { 6996 return ETIMEDOUT; 6997 } 6998 6999 switch (le16toh(iaq.iaq_retval)) { 7000 case IXL_AQ_RC_OK: 7001 break; 7002 case IXL_AQ_RC_EPERM: 7003 return EPERM; 7004 case IXL_AQ_RC_EINVAL: 7005 return EINVAL; 7006 case IXL_AQ_RC_EBUSY: 7007 return EBUSY; 7008 case IXL_AQ_RC_EIO: 7009 default: 7010 return EIO; 7011 } 7012 7013 memcpy(data, IXL_DMA_KVA(idm), len); 7014 7015 return 0; 7016 } 7017 7018 static int 7019 ixl_rd16_nvm(struct ixl_softc *sc, uint16_t offset, uint16_t *data) 7020 { 7021 int error; 7022 uint16_t buf; 7023 7024 error = ixl_nvm_lock(sc, 'R'); 7025 if (error) 7026 return error; 7027 7028 if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMREAD)) { 7029 error = ixl_nvm_read_aq(sc, offset, 7030 &buf, sizeof(buf)); 7031 if (error == 0) 7032 *data = le16toh(buf); 7033 } else { 7034 error = ixl_nvm_read_srctl(sc, offset, &buf); 7035 if (error == 0) 7036 *data = buf; 7037 } 7038 7039 ixl_nvm_unlock(sc); 7040 7041 return error; 7042 } 7043 7044 MODULE(MODULE_CLASS_DRIVER, if_ixl, "pci"); 7045 7046 #ifdef _MODULE 7047 #include "ioconf.c" 7048 #endif 7049 7050 #ifdef _MODULE 7051 static void 7052 ixl_parse_modprop(prop_dictionary_t dict) 7053 { 7054 prop_object_t obj; 7055 int64_t val; 7056 uint64_t uval; 7057 7058 if (dict == NULL) 7059 return; 7060 7061 obj = prop_dictionary_get(dict, "nomsix"); 7062 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_BOOL) { 7063 ixl_param_nomsix = prop_bool_true((prop_bool_t)obj); 7064 } 7065 7066 obj = prop_dictionary_get(dict, "stats_interval"); 7067 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) { 7068 val = prop_number_signed_value((prop_number_t)obj); 7069 7070 /* the range has no reason */ 7071 if (100 < val && val < 180000) { 7072 ixl_param_stats_interval = val; 7073 } 7074 } 7075 7076 obj = prop_dictionary_get(dict, "nqps_limit"); 7077 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) { 7078 val = prop_number_signed_value((prop_number_t)obj); 7079 7080 if (val <= INT32_MAX) 7081 ixl_param_nqps_limit = val; 7082 } 7083 7084 obj = prop_dictionary_get(dict, "rx_ndescs"); 7085 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) { 7086 uval = prop_number_unsigned_integer_value((prop_number_t)obj); 7087 7088 if (uval > 8) 7089 ixl_param_rx_ndescs = uval; 7090 } 7091 7092 obj = prop_dictionary_get(dict, "tx_ndescs"); 7093 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) { 7094 uval = prop_number_unsigned_integer_value((prop_number_t)obj); 7095 7096 if (uval > IXL_TX_PKT_DESCS) 7097 ixl_param_tx_ndescs = uval; 7098 } 7099 7100 } 7101 #endif 7102 7103 static int 7104 if_ixl_modcmd(modcmd_t cmd, void *opaque) 7105 { 7106 int error = 0; 7107 7108 #ifdef _MODULE 7109 switch (cmd) { 7110 case MODULE_CMD_INIT: 7111 ixl_parse_modprop((prop_dictionary_t)opaque); 7112 error = config_init_component(cfdriver_ioconf_if_ixl, 7113 cfattach_ioconf_if_ixl, cfdata_ioconf_if_ixl); 7114 break; 7115 case MODULE_CMD_FINI: 7116 error = config_fini_component(cfdriver_ioconf_if_ixl, 7117 cfattach_ioconf_if_ixl, cfdata_ioconf_if_ixl); 7118 break; 7119 default: 7120 error = ENOTTY; 7121 break; 7122 } 7123 #endif 7124 7125 return error; 7126 } 7127