1 /* $NetBSD: if_ixl.c,v 1.68 2020/07/16 01:20:38 yamaguchi Exp $ */ 2 3 /* 4 * Copyright (c) 2013-2015, Intel Corporation 5 * All rights reserved. 6 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions are met: 9 * 10 * 1. Redistributions of source code must retain the above copyright notice, 11 * this list of conditions and the following disclaimer. 12 * 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * 3. Neither the name of the Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived from 19 * this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 /* 35 * Copyright (c) 2016,2017 David Gwynne <dlg@openbsd.org> 36 * 37 * Permission to use, copy, modify, and distribute this software for any 38 * purpose with or without fee is hereby granted, provided that the above 39 * copyright notice and this permission notice appear in all copies. 40 * 41 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 42 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 43 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 44 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 45 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 46 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 47 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 48 */ 49 50 /* 51 * Copyright (c) 2019 Internet Initiative Japan, Inc. 52 * All rights reserved. 53 * 54 * Redistribution and use in source and binary forms, with or without 55 * modification, are permitted provided that the following conditions 56 * are met: 57 * 1. Redistributions of source code must retain the above copyright 58 * notice, this list of conditions and the following disclaimer. 59 * 2. Redistributions in binary form must reproduce the above copyright 60 * notice, this list of conditions and the following disclaimer in the 61 * documentation and/or other materials provided with the distribution. 62 * 63 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 64 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 65 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 66 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 67 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 68 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 69 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 70 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 71 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 72 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 73 * POSSIBILITY OF SUCH DAMAGE. 74 */ 75 76 #include <sys/cdefs.h> 77 __KERNEL_RCSID(0, "$NetBSD: if_ixl.c,v 1.68 2020/07/16 01:20:38 yamaguchi Exp $"); 78 79 #ifdef _KERNEL_OPT 80 #include "opt_net_mpsafe.h" 81 #include "opt_if_ixl.h" 82 #endif 83 84 #include <sys/param.h> 85 #include <sys/types.h> 86 87 #include <sys/cpu.h> 88 #include <sys/device.h> 89 #include <sys/evcnt.h> 90 #include <sys/interrupt.h> 91 #include <sys/kmem.h> 92 #include <sys/module.h> 93 #include <sys/mutex.h> 94 #include <sys/pcq.h> 95 #include <sys/syslog.h> 96 #include <sys/workqueue.h> 97 98 #include <sys/bus.h> 99 100 #include <net/bpf.h> 101 #include <net/if.h> 102 #include <net/if_dl.h> 103 #include <net/if_media.h> 104 #include <net/if_ether.h> 105 #include <net/rss_config.h> 106 107 #include <netinet/tcp.h> /* for struct tcphdr */ 108 #include <netinet/udp.h> /* for struct udphdr */ 109 110 #include <dev/pci/pcivar.h> 111 #include <dev/pci/pcidevs.h> 112 113 #include <dev/pci/if_ixlreg.h> 114 #include <dev/pci/if_ixlvar.h> 115 116 #include <prop/proplib.h> 117 118 struct ixl_softc; /* defined */ 119 120 #define I40E_PF_RESET_WAIT_COUNT 200 121 #define I40E_AQ_LARGE_BUF 512 122 123 /* bitfields for Tx queue mapping in QTX_CTL */ 124 #define I40E_QTX_CTL_VF_QUEUE 0x0 125 #define I40E_QTX_CTL_VM_QUEUE 0x1 126 #define I40E_QTX_CTL_PF_QUEUE 0x2 127 128 #define I40E_QUEUE_TYPE_EOL 0x7ff 129 #define I40E_INTR_NOTX_QUEUE 0 130 131 #define I40E_QUEUE_TYPE_RX 0x0 132 #define I40E_QUEUE_TYPE_TX 0x1 133 #define I40E_QUEUE_TYPE_PE_CEQ 0x2 134 #define I40E_QUEUE_TYPE_UNKNOWN 0x3 135 136 #define I40E_ITR_INDEX_RX 0x0 137 #define I40E_ITR_INDEX_TX 0x1 138 #define I40E_ITR_INDEX_OTHER 0x2 139 #define I40E_ITR_INDEX_NONE 0x3 140 #define IXL_ITR_RX 0x7a /* 4K intrs/sec */ 141 #define IXL_ITR_TX 0x7a /* 4K intrs/sec */ 142 143 #define I40E_INTR_NOTX_QUEUE 0 144 #define I40E_INTR_NOTX_INTR 0 145 #define I40E_INTR_NOTX_RX_QUEUE 0 146 #define I40E_INTR_NOTX_TX_QUEUE 1 147 #define I40E_INTR_NOTX_RX_MASK I40E_PFINT_ICR0_QUEUE_0_MASK 148 #define I40E_INTR_NOTX_TX_MASK I40E_PFINT_ICR0_QUEUE_1_MASK 149 150 #define BIT_ULL(a) (1ULL << (a)) 151 #define IXL_RSS_HENA_DEFAULT_BASE \ 152 (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \ 153 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \ 154 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \ 155 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \ 156 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \ 157 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \ 158 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \ 159 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \ 160 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \ 161 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \ 162 BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD)) 163 #define IXL_RSS_HENA_DEFAULT_XL710 IXL_RSS_HENA_DEFAULT_BASE 164 #define IXL_RSS_HENA_DEFAULT_X722 (IXL_RSS_HENA_DEFAULT_XL710 | \ 165 BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \ 166 BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \ 167 BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \ 168 BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) | \ 169 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \ 170 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK)) 171 #define I40E_HASH_LUT_SIZE_128 0 172 #define IXL_RSS_KEY_SIZE_REG 13 173 174 #define IXL_ICR0_CRIT_ERR_MASK \ 175 (I40E_PFINT_ICR0_PCI_EXCEPTION_MASK | \ 176 I40E_PFINT_ICR0_ECC_ERR_MASK | \ 177 I40E_PFINT_ICR0_PE_CRITERR_MASK) 178 179 #define IXL_QUEUE_MAX_XL710 64 180 #define IXL_QUEUE_MAX_X722 128 181 182 #define IXL_TX_PKT_DESCS 8 183 #define IXL_TX_PKT_MAXSIZE (MCLBYTES * IXL_TX_PKT_DESCS) 184 #define IXL_TX_QUEUE_ALIGN 128 185 #define IXL_RX_QUEUE_ALIGN 128 186 187 #define IXL_MCLBYTES (MCLBYTES - ETHER_ALIGN) 188 #define IXL_MTU_ETHERLEN ETHER_HDR_LEN \ 189 + ETHER_CRC_LEN 190 #if 0 191 #define IXL_MAX_MTU (9728 - IXL_MTU_ETHERLEN) 192 #else 193 /* (dbuff * 5) - ETHER_HDR_LEN - ETHER_CRC_LEN */ 194 #define IXL_MAX_MTU (9600 - IXL_MTU_ETHERLEN) 195 #endif 196 #define IXL_MIN_MTU (ETHER_MIN_LEN - ETHER_CRC_LEN) 197 198 #define IXL_PCIREG PCI_MAPREG_START 199 200 #define IXL_ITR0 0x0 201 #define IXL_ITR1 0x1 202 #define IXL_ITR2 0x2 203 #define IXL_NOITR 0x3 204 205 #define IXL_AQ_NUM 256 206 #define IXL_AQ_MASK (IXL_AQ_NUM - 1) 207 #define IXL_AQ_ALIGN 64 /* lol */ 208 #define IXL_AQ_BUFLEN 4096 209 210 #define IXL_HMC_ROUNDUP 512 211 #define IXL_HMC_PGSIZE 4096 212 #define IXL_HMC_DVASZ sizeof(uint64_t) 213 #define IXL_HMC_PGS (IXL_HMC_PGSIZE / IXL_HMC_DVASZ) 214 #define IXL_HMC_L2SZ (IXL_HMC_PGSIZE * IXL_HMC_PGS) 215 #define IXL_HMC_PDVALID 1ULL 216 217 #define IXL_ATQ_EXEC_TIMEOUT (10 * hz) 218 219 #define IXL_SRRD_SRCTL_ATTEMPTS 100000 220 221 struct ixl_aq_regs { 222 bus_size_t atq_tail; 223 bus_size_t atq_head; 224 bus_size_t atq_len; 225 bus_size_t atq_bal; 226 bus_size_t atq_bah; 227 228 bus_size_t arq_tail; 229 bus_size_t arq_head; 230 bus_size_t arq_len; 231 bus_size_t arq_bal; 232 bus_size_t arq_bah; 233 234 uint32_t atq_len_enable; 235 uint32_t atq_tail_mask; 236 uint32_t atq_head_mask; 237 238 uint32_t arq_len_enable; 239 uint32_t arq_tail_mask; 240 uint32_t arq_head_mask; 241 }; 242 243 struct ixl_phy_type { 244 uint64_t phy_type; 245 uint64_t ifm_type; 246 }; 247 248 struct ixl_speed_type { 249 uint8_t dev_speed; 250 uint64_t net_speed; 251 }; 252 253 struct ixl_aq_buf { 254 SIMPLEQ_ENTRY(ixl_aq_buf) 255 aqb_entry; 256 void *aqb_data; 257 bus_dmamap_t aqb_map; 258 bus_dma_segment_t aqb_seg; 259 size_t aqb_size; 260 int aqb_nsegs; 261 }; 262 SIMPLEQ_HEAD(ixl_aq_bufs, ixl_aq_buf); 263 264 struct ixl_dmamem { 265 bus_dmamap_t ixm_map; 266 bus_dma_segment_t ixm_seg; 267 int ixm_nsegs; 268 size_t ixm_size; 269 void *ixm_kva; 270 }; 271 272 #define IXL_DMA_MAP(_ixm) ((_ixm)->ixm_map) 273 #define IXL_DMA_DVA(_ixm) ((_ixm)->ixm_map->dm_segs[0].ds_addr) 274 #define IXL_DMA_KVA(_ixm) ((void *)(_ixm)->ixm_kva) 275 #define IXL_DMA_LEN(_ixm) ((_ixm)->ixm_size) 276 277 struct ixl_hmc_entry { 278 uint64_t hmc_base; 279 uint32_t hmc_count; 280 uint64_t hmc_size; 281 }; 282 283 enum ixl_hmc_types { 284 IXL_HMC_LAN_TX = 0, 285 IXL_HMC_LAN_RX, 286 IXL_HMC_FCOE_CTX, 287 IXL_HMC_FCOE_FILTER, 288 IXL_HMC_COUNT 289 }; 290 291 struct ixl_hmc_pack { 292 uint16_t offset; 293 uint16_t width; 294 uint16_t lsb; 295 }; 296 297 /* 298 * these hmc objects have weird sizes and alignments, so these are abstract 299 * representations of them that are nice for c to populate. 300 * 301 * the packing code relies on little-endian values being stored in the fields, 302 * no high bits in the fields being set, and the fields must be packed in the 303 * same order as they are in the ctx structure. 304 */ 305 306 struct ixl_hmc_rxq { 307 uint16_t head; 308 uint8_t cpuid; 309 uint64_t base; 310 #define IXL_HMC_RXQ_BASE_UNIT 128 311 uint16_t qlen; 312 uint16_t dbuff; 313 #define IXL_HMC_RXQ_DBUFF_UNIT 128 314 uint8_t hbuff; 315 #define IXL_HMC_RXQ_HBUFF_UNIT 64 316 uint8_t dtype; 317 #define IXL_HMC_RXQ_DTYPE_NOSPLIT 0x0 318 #define IXL_HMC_RXQ_DTYPE_HSPLIT 0x1 319 #define IXL_HMC_RXQ_DTYPE_SPLIT_ALWAYS 0x2 320 uint8_t dsize; 321 #define IXL_HMC_RXQ_DSIZE_16 0 322 #define IXL_HMC_RXQ_DSIZE_32 1 323 uint8_t crcstrip; 324 uint8_t fc_ena; 325 uint8_t l2sel; 326 uint8_t hsplit_0; 327 uint8_t hsplit_1; 328 uint8_t showiv; 329 uint16_t rxmax; 330 uint8_t tphrdesc_ena; 331 uint8_t tphwdesc_ena; 332 uint8_t tphdata_ena; 333 uint8_t tphhead_ena; 334 uint8_t lrxqthresh; 335 uint8_t prefena; 336 }; 337 338 static const struct ixl_hmc_pack ixl_hmc_pack_rxq[] = { 339 { offsetof(struct ixl_hmc_rxq, head), 13, 0 }, 340 { offsetof(struct ixl_hmc_rxq, cpuid), 8, 13 }, 341 { offsetof(struct ixl_hmc_rxq, base), 57, 32 }, 342 { offsetof(struct ixl_hmc_rxq, qlen), 13, 89 }, 343 { offsetof(struct ixl_hmc_rxq, dbuff), 7, 102 }, 344 { offsetof(struct ixl_hmc_rxq, hbuff), 5, 109 }, 345 { offsetof(struct ixl_hmc_rxq, dtype), 2, 114 }, 346 { offsetof(struct ixl_hmc_rxq, dsize), 1, 116 }, 347 { offsetof(struct ixl_hmc_rxq, crcstrip), 1, 117 }, 348 { offsetof(struct ixl_hmc_rxq, fc_ena), 1, 118 }, 349 { offsetof(struct ixl_hmc_rxq, l2sel), 1, 119 }, 350 { offsetof(struct ixl_hmc_rxq, hsplit_0), 4, 120 }, 351 { offsetof(struct ixl_hmc_rxq, hsplit_1), 2, 124 }, 352 { offsetof(struct ixl_hmc_rxq, showiv), 1, 127 }, 353 { offsetof(struct ixl_hmc_rxq, rxmax), 14, 174 }, 354 { offsetof(struct ixl_hmc_rxq, tphrdesc_ena), 1, 193 }, 355 { offsetof(struct ixl_hmc_rxq, tphwdesc_ena), 1, 194 }, 356 { offsetof(struct ixl_hmc_rxq, tphdata_ena), 1, 195 }, 357 { offsetof(struct ixl_hmc_rxq, tphhead_ena), 1, 196 }, 358 { offsetof(struct ixl_hmc_rxq, lrxqthresh), 3, 198 }, 359 { offsetof(struct ixl_hmc_rxq, prefena), 1, 201 }, 360 }; 361 362 #define IXL_HMC_RXQ_MINSIZE (201 + 1) 363 364 struct ixl_hmc_txq { 365 uint16_t head; 366 uint8_t new_context; 367 uint64_t base; 368 #define IXL_HMC_TXQ_BASE_UNIT 128 369 uint8_t fc_ena; 370 uint8_t timesync_ena; 371 uint8_t fd_ena; 372 uint8_t alt_vlan_ena; 373 uint8_t cpuid; 374 uint16_t thead_wb; 375 uint8_t head_wb_ena; 376 #define IXL_HMC_TXQ_DESC_WB 0 377 #define IXL_HMC_TXQ_HEAD_WB 1 378 uint16_t qlen; 379 uint8_t tphrdesc_ena; 380 uint8_t tphrpacket_ena; 381 uint8_t tphwdesc_ena; 382 uint64_t head_wb_addr; 383 uint32_t crc; 384 uint16_t rdylist; 385 uint8_t rdylist_act; 386 }; 387 388 static const struct ixl_hmc_pack ixl_hmc_pack_txq[] = { 389 { offsetof(struct ixl_hmc_txq, head), 13, 0 }, 390 { offsetof(struct ixl_hmc_txq, new_context), 1, 30 }, 391 { offsetof(struct ixl_hmc_txq, base), 57, 32 }, 392 { offsetof(struct ixl_hmc_txq, fc_ena), 1, 89 }, 393 { offsetof(struct ixl_hmc_txq, timesync_ena), 1, 90 }, 394 { offsetof(struct ixl_hmc_txq, fd_ena), 1, 91 }, 395 { offsetof(struct ixl_hmc_txq, alt_vlan_ena), 1, 92 }, 396 { offsetof(struct ixl_hmc_txq, cpuid), 8, 96 }, 397 /* line 1 */ 398 { offsetof(struct ixl_hmc_txq, thead_wb), 13, 0 + 128 }, 399 { offsetof(struct ixl_hmc_txq, head_wb_ena), 1, 32 + 128 }, 400 { offsetof(struct ixl_hmc_txq, qlen), 13, 33 + 128 }, 401 { offsetof(struct ixl_hmc_txq, tphrdesc_ena), 1, 46 + 128 }, 402 { offsetof(struct ixl_hmc_txq, tphrpacket_ena), 1, 47 + 128 }, 403 { offsetof(struct ixl_hmc_txq, tphwdesc_ena), 1, 48 + 128 }, 404 { offsetof(struct ixl_hmc_txq, head_wb_addr), 64, 64 + 128 }, 405 /* line 7 */ 406 { offsetof(struct ixl_hmc_txq, crc), 32, 0 + (7*128) }, 407 { offsetof(struct ixl_hmc_txq, rdylist), 10, 84 + (7*128) }, 408 { offsetof(struct ixl_hmc_txq, rdylist_act), 1, 94 + (7*128) }, 409 }; 410 411 #define IXL_HMC_TXQ_MINSIZE (94 + (7*128) + 1) 412 413 struct ixl_work { 414 struct work ixw_cookie; 415 void (*ixw_func)(void *); 416 void *ixw_arg; 417 unsigned int ixw_added; 418 }; 419 #define IXL_WORKQUEUE_PRI PRI_SOFTNET 420 421 struct ixl_tx_map { 422 struct mbuf *txm_m; 423 bus_dmamap_t txm_map; 424 unsigned int txm_eop; 425 }; 426 427 struct ixl_tx_ring { 428 kmutex_t txr_lock; 429 struct ixl_softc *txr_sc; 430 431 unsigned int txr_prod; 432 unsigned int txr_cons; 433 434 struct ixl_tx_map *txr_maps; 435 struct ixl_dmamem txr_mem; 436 437 bus_size_t txr_tail; 438 unsigned int txr_qid; 439 pcq_t *txr_intrq; 440 void *txr_si; 441 442 struct evcnt txr_defragged; 443 struct evcnt txr_defrag_failed; 444 struct evcnt txr_pcqdrop; 445 struct evcnt txr_transmitdef; 446 struct evcnt txr_intr; 447 struct evcnt txr_defer; 448 }; 449 450 struct ixl_rx_map { 451 struct mbuf *rxm_m; 452 bus_dmamap_t rxm_map; 453 }; 454 455 struct ixl_rx_ring { 456 kmutex_t rxr_lock; 457 458 unsigned int rxr_prod; 459 unsigned int rxr_cons; 460 461 struct ixl_rx_map *rxr_maps; 462 struct ixl_dmamem rxr_mem; 463 464 struct mbuf *rxr_m_head; 465 struct mbuf **rxr_m_tail; 466 467 bus_size_t rxr_tail; 468 unsigned int rxr_qid; 469 470 struct evcnt rxr_mgethdr_failed; 471 struct evcnt rxr_mgetcl_failed; 472 struct evcnt rxr_mbuf_load_failed; 473 struct evcnt rxr_intr; 474 struct evcnt rxr_defer; 475 }; 476 477 struct ixl_queue_pair { 478 struct ixl_softc *qp_sc; 479 struct ixl_tx_ring *qp_txr; 480 struct ixl_rx_ring *qp_rxr; 481 482 char qp_name[16]; 483 484 void *qp_si; 485 struct work qp_work; 486 bool qp_workqueue; 487 }; 488 489 struct ixl_atq { 490 struct ixl_aq_desc iatq_desc; 491 void (*iatq_fn)(struct ixl_softc *, 492 const struct ixl_aq_desc *); 493 }; 494 SIMPLEQ_HEAD(ixl_atq_list, ixl_atq); 495 496 struct ixl_product { 497 unsigned int vendor_id; 498 unsigned int product_id; 499 }; 500 501 struct ixl_stats_counters { 502 bool isc_has_offset; 503 struct evcnt isc_crc_errors; 504 uint64_t isc_crc_errors_offset; 505 struct evcnt isc_illegal_bytes; 506 uint64_t isc_illegal_bytes_offset; 507 struct evcnt isc_rx_bytes; 508 uint64_t isc_rx_bytes_offset; 509 struct evcnt isc_rx_discards; 510 uint64_t isc_rx_discards_offset; 511 struct evcnt isc_rx_unicast; 512 uint64_t isc_rx_unicast_offset; 513 struct evcnt isc_rx_multicast; 514 uint64_t isc_rx_multicast_offset; 515 struct evcnt isc_rx_broadcast; 516 uint64_t isc_rx_broadcast_offset; 517 struct evcnt isc_rx_size_64; 518 uint64_t isc_rx_size_64_offset; 519 struct evcnt isc_rx_size_127; 520 uint64_t isc_rx_size_127_offset; 521 struct evcnt isc_rx_size_255; 522 uint64_t isc_rx_size_255_offset; 523 struct evcnt isc_rx_size_511; 524 uint64_t isc_rx_size_511_offset; 525 struct evcnt isc_rx_size_1023; 526 uint64_t isc_rx_size_1023_offset; 527 struct evcnt isc_rx_size_1522; 528 uint64_t isc_rx_size_1522_offset; 529 struct evcnt isc_rx_size_big; 530 uint64_t isc_rx_size_big_offset; 531 struct evcnt isc_rx_undersize; 532 uint64_t isc_rx_undersize_offset; 533 struct evcnt isc_rx_oversize; 534 uint64_t isc_rx_oversize_offset; 535 struct evcnt isc_rx_fragments; 536 uint64_t isc_rx_fragments_offset; 537 struct evcnt isc_rx_jabber; 538 uint64_t isc_rx_jabber_offset; 539 struct evcnt isc_tx_bytes; 540 uint64_t isc_tx_bytes_offset; 541 struct evcnt isc_tx_dropped_link_down; 542 uint64_t isc_tx_dropped_link_down_offset; 543 struct evcnt isc_tx_unicast; 544 uint64_t isc_tx_unicast_offset; 545 struct evcnt isc_tx_multicast; 546 uint64_t isc_tx_multicast_offset; 547 struct evcnt isc_tx_broadcast; 548 uint64_t isc_tx_broadcast_offset; 549 struct evcnt isc_tx_size_64; 550 uint64_t isc_tx_size_64_offset; 551 struct evcnt isc_tx_size_127; 552 uint64_t isc_tx_size_127_offset; 553 struct evcnt isc_tx_size_255; 554 uint64_t isc_tx_size_255_offset; 555 struct evcnt isc_tx_size_511; 556 uint64_t isc_tx_size_511_offset; 557 struct evcnt isc_tx_size_1023; 558 uint64_t isc_tx_size_1023_offset; 559 struct evcnt isc_tx_size_1522; 560 uint64_t isc_tx_size_1522_offset; 561 struct evcnt isc_tx_size_big; 562 uint64_t isc_tx_size_big_offset; 563 struct evcnt isc_mac_local_faults; 564 uint64_t isc_mac_local_faults_offset; 565 struct evcnt isc_mac_remote_faults; 566 uint64_t isc_mac_remote_faults_offset; 567 struct evcnt isc_link_xon_rx; 568 uint64_t isc_link_xon_rx_offset; 569 struct evcnt isc_link_xon_tx; 570 uint64_t isc_link_xon_tx_offset; 571 struct evcnt isc_link_xoff_rx; 572 uint64_t isc_link_xoff_rx_offset; 573 struct evcnt isc_link_xoff_tx; 574 uint64_t isc_link_xoff_tx_offset; 575 struct evcnt isc_vsi_rx_discards; 576 uint64_t isc_vsi_rx_discards_offset; 577 struct evcnt isc_vsi_rx_bytes; 578 uint64_t isc_vsi_rx_bytes_offset; 579 struct evcnt isc_vsi_rx_unicast; 580 uint64_t isc_vsi_rx_unicast_offset; 581 struct evcnt isc_vsi_rx_multicast; 582 uint64_t isc_vsi_rx_multicast_offset; 583 struct evcnt isc_vsi_rx_broadcast; 584 uint64_t isc_vsi_rx_broadcast_offset; 585 struct evcnt isc_vsi_tx_errors; 586 uint64_t isc_vsi_tx_errors_offset; 587 struct evcnt isc_vsi_tx_bytes; 588 uint64_t isc_vsi_tx_bytes_offset; 589 struct evcnt isc_vsi_tx_unicast; 590 uint64_t isc_vsi_tx_unicast_offset; 591 struct evcnt isc_vsi_tx_multicast; 592 uint64_t isc_vsi_tx_multicast_offset; 593 struct evcnt isc_vsi_tx_broadcast; 594 uint64_t isc_vsi_tx_broadcast_offset; 595 }; 596 597 /* 598 * Locking notes: 599 * + a field in ixl_tx_ring is protected by txr_lock (a spin mutex), and 600 * a field in ixl_rx_ring is protected by rxr_lock (a spin mutex). 601 * - more than one lock of them cannot be held at once. 602 * + a field named sc_atq_* in ixl_softc is protected by sc_atq_lock 603 * (a spin mutex). 604 * - the lock cannot held with txr_lock or rxr_lock. 605 * + a field named sc_arq_* is not protected by any lock. 606 * - operations for sc_arq_* is done in one context related to 607 * sc_arq_task. 608 * + other fields in ixl_softc is protected by sc_cfg_lock 609 * (an adaptive mutex) 610 * - It must be held before another lock is held, and It can be 611 * released after the other lock is released. 612 * */ 613 614 struct ixl_softc { 615 device_t sc_dev; 616 struct ethercom sc_ec; 617 bool sc_attached; 618 bool sc_dead; 619 uint32_t sc_port; 620 struct sysctllog *sc_sysctllog; 621 struct workqueue *sc_workq; 622 struct workqueue *sc_workq_txrx; 623 int sc_stats_intval; 624 callout_t sc_stats_callout; 625 struct ixl_work sc_stats_task; 626 struct ixl_stats_counters 627 sc_stats_counters; 628 uint8_t sc_enaddr[ETHER_ADDR_LEN]; 629 struct ifmedia sc_media; 630 uint64_t sc_media_status; 631 uint64_t sc_media_active; 632 uint64_t sc_phy_types; 633 uint8_t sc_phy_abilities; 634 uint8_t sc_phy_linkspeed; 635 uint8_t sc_phy_fec_cfg; 636 uint16_t sc_eee_cap; 637 uint32_t sc_eeer_val; 638 uint8_t sc_d3_lpan; 639 kmutex_t sc_cfg_lock; 640 enum i40e_mac_type sc_mac_type; 641 uint32_t sc_rss_table_size; 642 uint32_t sc_rss_table_entry_width; 643 bool sc_txrx_workqueue; 644 u_int sc_tx_process_limit; 645 u_int sc_rx_process_limit; 646 u_int sc_tx_intr_process_limit; 647 u_int sc_rx_intr_process_limit; 648 649 int sc_cur_ec_capenable; 650 651 struct pci_attach_args sc_pa; 652 pci_intr_handle_t *sc_ihp; 653 void **sc_ihs; 654 unsigned int sc_nintrs; 655 656 bus_dma_tag_t sc_dmat; 657 bus_space_tag_t sc_memt; 658 bus_space_handle_t sc_memh; 659 bus_size_t sc_mems; 660 661 uint8_t sc_pf_id; 662 uint16_t sc_uplink_seid; /* le */ 663 uint16_t sc_downlink_seid; /* le */ 664 uint16_t sc_vsi_number; 665 uint16_t sc_vsi_stat_counter_idx; 666 uint16_t sc_seid; 667 unsigned int sc_base_queue; 668 669 pci_intr_type_t sc_intrtype; 670 unsigned int sc_msix_vector_queue; 671 672 struct ixl_dmamem sc_scratch; 673 struct ixl_dmamem sc_aqbuf; 674 675 const struct ixl_aq_regs * 676 sc_aq_regs; 677 uint32_t sc_aq_flags; 678 #define IXL_SC_AQ_FLAG_RXCTL __BIT(0) 679 #define IXL_SC_AQ_FLAG_NVMLOCK __BIT(1) 680 #define IXL_SC_AQ_FLAG_NVMREAD __BIT(2) 681 #define IXL_SC_AQ_FLAG_RSS __BIT(3) 682 683 kmutex_t sc_atq_lock; 684 kcondvar_t sc_atq_cv; 685 struct ixl_dmamem sc_atq; 686 unsigned int sc_atq_prod; 687 unsigned int sc_atq_cons; 688 689 struct ixl_dmamem sc_arq; 690 struct ixl_work sc_arq_task; 691 struct ixl_aq_bufs sc_arq_idle; 692 struct ixl_aq_buf *sc_arq_live[IXL_AQ_NUM]; 693 unsigned int sc_arq_prod; 694 unsigned int sc_arq_cons; 695 696 struct ixl_work sc_link_state_task; 697 struct ixl_atq sc_link_state_atq; 698 699 struct ixl_dmamem sc_hmc_sd; 700 struct ixl_dmamem sc_hmc_pd; 701 struct ixl_hmc_entry sc_hmc_entries[IXL_HMC_COUNT]; 702 703 unsigned int sc_tx_ring_ndescs; 704 unsigned int sc_rx_ring_ndescs; 705 unsigned int sc_nqueue_pairs; 706 unsigned int sc_nqueue_pairs_max; 707 unsigned int sc_nqueue_pairs_device; 708 struct ixl_queue_pair *sc_qps; 709 uint32_t sc_itr_rx; 710 uint32_t sc_itr_tx; 711 712 struct evcnt sc_event_atq; 713 struct evcnt sc_event_link; 714 struct evcnt sc_event_ecc_err; 715 struct evcnt sc_event_pci_exception; 716 struct evcnt sc_event_crit_err; 717 }; 718 719 #define IXL_TXRX_PROCESS_UNLIMIT UINT_MAX 720 #define IXL_TX_PROCESS_LIMIT 256 721 #define IXL_RX_PROCESS_LIMIT 256 722 #define IXL_TX_INTR_PROCESS_LIMIT 256 723 #define IXL_RX_INTR_PROCESS_LIMIT 0U 724 725 #define IXL_IFCAP_RXCSUM (IFCAP_CSUM_IPv4_Rx | \ 726 IFCAP_CSUM_TCPv4_Rx | \ 727 IFCAP_CSUM_UDPv4_Rx | \ 728 IFCAP_CSUM_TCPv6_Rx | \ 729 IFCAP_CSUM_UDPv6_Rx) 730 #define IXL_IFCAP_TXCSUM (IFCAP_CSUM_IPv4_Tx | \ 731 IFCAP_CSUM_TCPv4_Tx | \ 732 IFCAP_CSUM_UDPv4_Tx | \ 733 IFCAP_CSUM_TCPv6_Tx | \ 734 IFCAP_CSUM_UDPv6_Tx) 735 #define IXL_CSUM_ALL_OFFLOAD (M_CSUM_IPv4 | \ 736 M_CSUM_TCPv4 | M_CSUM_TCPv6 | \ 737 M_CSUM_UDPv4 | M_CSUM_UDPv6) 738 739 #define delaymsec(_x) DELAY(1000 * (_x)) 740 #ifdef IXL_DEBUG 741 #define DDPRINTF(sc, fmt, args...) \ 742 do { \ 743 if ((sc) != NULL) { \ 744 device_printf( \ 745 ((struct ixl_softc *)(sc))->sc_dev, \ 746 ""); \ 747 } \ 748 printf("%s:\t" fmt, __func__, ##args); \ 749 } while (0) 750 #else 751 #define DDPRINTF(sc, fmt, args...) __nothing 752 #endif 753 #ifndef IXL_STATS_INTERVAL_MSEC 754 #define IXL_STATS_INTERVAL_MSEC 10000 755 #endif 756 #ifndef IXL_QUEUE_NUM 757 #define IXL_QUEUE_NUM 0 758 #endif 759 760 static bool ixl_param_nomsix = false; 761 static int ixl_param_stats_interval = IXL_STATS_INTERVAL_MSEC; 762 static int ixl_param_nqps_limit = IXL_QUEUE_NUM; 763 static unsigned int ixl_param_tx_ndescs = 1024; 764 static unsigned int ixl_param_rx_ndescs = 1024; 765 766 static enum i40e_mac_type 767 ixl_mactype(pci_product_id_t); 768 static void ixl_pci_csr_setup(pci_chipset_tag_t, pcitag_t); 769 static void ixl_clear_hw(struct ixl_softc *); 770 static int ixl_pf_reset(struct ixl_softc *); 771 772 static int ixl_dmamem_alloc(struct ixl_softc *, struct ixl_dmamem *, 773 bus_size_t, bus_size_t); 774 static void ixl_dmamem_free(struct ixl_softc *, struct ixl_dmamem *); 775 776 static int ixl_arq_fill(struct ixl_softc *); 777 static void ixl_arq_unfill(struct ixl_softc *); 778 779 static int ixl_atq_poll(struct ixl_softc *, struct ixl_aq_desc *, 780 unsigned int); 781 static void ixl_atq_set(struct ixl_atq *, 782 void (*)(struct ixl_softc *, const struct ixl_aq_desc *)); 783 static int ixl_atq_post_locked(struct ixl_softc *, struct ixl_atq *); 784 static void ixl_atq_done(struct ixl_softc *); 785 static int ixl_atq_exec(struct ixl_softc *, struct ixl_atq *); 786 static int ixl_atq_exec_locked(struct ixl_softc *, struct ixl_atq *); 787 static int ixl_get_version(struct ixl_softc *); 788 static int ixl_get_nvm_version(struct ixl_softc *); 789 static int ixl_get_hw_capabilities(struct ixl_softc *); 790 static int ixl_pxe_clear(struct ixl_softc *); 791 static int ixl_lldp_shut(struct ixl_softc *); 792 static int ixl_get_mac(struct ixl_softc *); 793 static int ixl_get_switch_config(struct ixl_softc *); 794 static int ixl_phy_mask_ints(struct ixl_softc *); 795 static int ixl_get_phy_info(struct ixl_softc *); 796 static int ixl_set_phy_config(struct ixl_softc *, uint8_t, uint8_t, bool); 797 static int ixl_set_phy_autoselect(struct ixl_softc *); 798 static int ixl_restart_an(struct ixl_softc *); 799 static int ixl_hmc(struct ixl_softc *); 800 static void ixl_hmc_free(struct ixl_softc *); 801 static int ixl_get_vsi(struct ixl_softc *); 802 static int ixl_set_vsi(struct ixl_softc *); 803 static void ixl_set_filter_control(struct ixl_softc *); 804 static void ixl_get_link_status(void *); 805 static int ixl_get_link_status_poll(struct ixl_softc *, int *); 806 static void ixl_get_link_status_done(struct ixl_softc *, 807 const struct ixl_aq_desc *); 808 static int ixl_set_link_status_locked(struct ixl_softc *, 809 const struct ixl_aq_desc *); 810 static uint64_t ixl_search_link_speed(uint8_t); 811 static uint8_t ixl_search_baudrate(uint64_t); 812 static void ixl_config_rss(struct ixl_softc *); 813 static int ixl_add_macvlan(struct ixl_softc *, const uint8_t *, 814 uint16_t, uint16_t); 815 static int ixl_remove_macvlan(struct ixl_softc *, const uint8_t *, 816 uint16_t, uint16_t); 817 static void ixl_arq(void *); 818 static void ixl_hmc_pack(void *, const void *, 819 const struct ixl_hmc_pack *, unsigned int); 820 static uint32_t ixl_rd_rx_csr(struct ixl_softc *, uint32_t); 821 static void ixl_wr_rx_csr(struct ixl_softc *, uint32_t, uint32_t); 822 static int ixl_rd16_nvm(struct ixl_softc *, uint16_t, uint16_t *); 823 824 static int ixl_match(device_t, cfdata_t, void *); 825 static void ixl_attach(device_t, device_t, void *); 826 static int ixl_detach(device_t, int); 827 828 static void ixl_media_add(struct ixl_softc *); 829 static int ixl_media_change(struct ifnet *); 830 static void ixl_media_status(struct ifnet *, struct ifmediareq *); 831 static void ixl_watchdog(struct ifnet *); 832 static int ixl_ioctl(struct ifnet *, u_long, void *); 833 static void ixl_start(struct ifnet *); 834 static int ixl_transmit(struct ifnet *, struct mbuf *); 835 static void ixl_deferred_transmit(void *); 836 static int ixl_intr(void *); 837 static int ixl_queue_intr(void *); 838 static int ixl_other_intr(void *); 839 static void ixl_handle_queue(void *); 840 static void ixl_handle_queue_wk(struct work *, void *); 841 static void ixl_sched_handle_queue(struct ixl_softc *, 842 struct ixl_queue_pair *); 843 static int ixl_init(struct ifnet *); 844 static int ixl_init_locked(struct ixl_softc *); 845 static void ixl_stop(struct ifnet *, int); 846 static void ixl_stop_locked(struct ixl_softc *); 847 static int ixl_iff(struct ixl_softc *); 848 static int ixl_ifflags_cb(struct ethercom *); 849 static int ixl_setup_interrupts(struct ixl_softc *); 850 static int ixl_establish_intx(struct ixl_softc *); 851 static int ixl_establish_msix(struct ixl_softc *); 852 static void ixl_enable_queue_intr(struct ixl_softc *, 853 struct ixl_queue_pair *); 854 static void ixl_disable_queue_intr(struct ixl_softc *, 855 struct ixl_queue_pair *); 856 static void ixl_enable_other_intr(struct ixl_softc *); 857 static void ixl_disable_other_intr(struct ixl_softc *); 858 static void ixl_config_queue_intr(struct ixl_softc *); 859 static void ixl_config_other_intr(struct ixl_softc *); 860 861 static struct ixl_tx_ring * 862 ixl_txr_alloc(struct ixl_softc *, unsigned int); 863 static void ixl_txr_qdis(struct ixl_softc *, struct ixl_tx_ring *, int); 864 static void ixl_txr_config(struct ixl_softc *, struct ixl_tx_ring *); 865 static int ixl_txr_enabled(struct ixl_softc *, struct ixl_tx_ring *); 866 static int ixl_txr_disabled(struct ixl_softc *, struct ixl_tx_ring *); 867 static void ixl_txr_unconfig(struct ixl_softc *, struct ixl_tx_ring *); 868 static void ixl_txr_clean(struct ixl_softc *, struct ixl_tx_ring *); 869 static void ixl_txr_free(struct ixl_softc *, struct ixl_tx_ring *); 870 static int ixl_txeof(struct ixl_softc *, struct ixl_tx_ring *, u_int); 871 872 static struct ixl_rx_ring * 873 ixl_rxr_alloc(struct ixl_softc *, unsigned int); 874 static void ixl_rxr_config(struct ixl_softc *, struct ixl_rx_ring *); 875 static int ixl_rxr_enabled(struct ixl_softc *, struct ixl_rx_ring *); 876 static int ixl_rxr_disabled(struct ixl_softc *, struct ixl_rx_ring *); 877 static void ixl_rxr_unconfig(struct ixl_softc *, struct ixl_rx_ring *); 878 static void ixl_rxr_clean(struct ixl_softc *, struct ixl_rx_ring *); 879 static void ixl_rxr_free(struct ixl_softc *, struct ixl_rx_ring *); 880 static int ixl_rxeof(struct ixl_softc *, struct ixl_rx_ring *, u_int); 881 static int ixl_rxfill(struct ixl_softc *, struct ixl_rx_ring *); 882 883 static struct workqueue * 884 ixl_workq_create(const char *, pri_t, int, int); 885 static void ixl_workq_destroy(struct workqueue *); 886 static int ixl_workqs_teardown(device_t); 887 static void ixl_work_set(struct ixl_work *, void (*)(void *), void *); 888 static void ixl_work_add(struct workqueue *, struct ixl_work *); 889 static void ixl_work_wait(struct workqueue *, struct ixl_work *); 890 static void ixl_workq_work(struct work *, void *); 891 static const struct ixl_product * 892 ixl_lookup(const struct pci_attach_args *pa); 893 static void ixl_link_state_update(struct ixl_softc *, 894 const struct ixl_aq_desc *); 895 static int ixl_vlan_cb(struct ethercom *, uint16_t, bool); 896 static int ixl_setup_vlan_hwfilter(struct ixl_softc *); 897 static void ixl_teardown_vlan_hwfilter(struct ixl_softc *); 898 static int ixl_update_macvlan(struct ixl_softc *); 899 static int ixl_setup_interrupts(struct ixl_softc *); 900 static void ixl_teardown_interrupts(struct ixl_softc *); 901 static int ixl_setup_stats(struct ixl_softc *); 902 static void ixl_teardown_stats(struct ixl_softc *); 903 static void ixl_stats_callout(void *); 904 static void ixl_stats_update(void *); 905 static int ixl_setup_sysctls(struct ixl_softc *); 906 static void ixl_teardown_sysctls(struct ixl_softc *); 907 static int ixl_queue_pairs_alloc(struct ixl_softc *); 908 static void ixl_queue_pairs_free(struct ixl_softc *); 909 910 static const struct ixl_phy_type ixl_phy_type_map[] = { 911 { 1ULL << IXL_PHY_TYPE_SGMII, IFM_1000_SGMII }, 912 { 1ULL << IXL_PHY_TYPE_1000BASE_KX, IFM_1000_KX }, 913 { 1ULL << IXL_PHY_TYPE_10GBASE_KX4, IFM_10G_KX4 }, 914 { 1ULL << IXL_PHY_TYPE_10GBASE_KR, IFM_10G_KR }, 915 { 1ULL << IXL_PHY_TYPE_40GBASE_KR4, IFM_40G_KR4 }, 916 { 1ULL << IXL_PHY_TYPE_XAUI | 917 1ULL << IXL_PHY_TYPE_XFI, IFM_10G_CX4 }, 918 { 1ULL << IXL_PHY_TYPE_SFI, IFM_10G_SFI }, 919 { 1ULL << IXL_PHY_TYPE_XLAUI | 920 1ULL << IXL_PHY_TYPE_XLPPI, IFM_40G_XLPPI }, 921 { 1ULL << IXL_PHY_TYPE_40GBASE_CR4_CU | 922 1ULL << IXL_PHY_TYPE_40GBASE_CR4, IFM_40G_CR4 }, 923 { 1ULL << IXL_PHY_TYPE_10GBASE_CR1_CU | 924 1ULL << IXL_PHY_TYPE_10GBASE_CR1, IFM_10G_CR1 }, 925 { 1ULL << IXL_PHY_TYPE_10GBASE_AOC, IFM_10G_AOC }, 926 { 1ULL << IXL_PHY_TYPE_40GBASE_AOC, IFM_40G_AOC }, 927 { 1ULL << IXL_PHY_TYPE_100BASE_TX, IFM_100_TX }, 928 { 1ULL << IXL_PHY_TYPE_1000BASE_T_OPTICAL | 929 1ULL << IXL_PHY_TYPE_1000BASE_T, IFM_1000_T }, 930 { 1ULL << IXL_PHY_TYPE_10GBASE_T, IFM_10G_T }, 931 { 1ULL << IXL_PHY_TYPE_10GBASE_SR, IFM_10G_SR }, 932 { 1ULL << IXL_PHY_TYPE_10GBASE_LR, IFM_10G_LR }, 933 { 1ULL << IXL_PHY_TYPE_10GBASE_SFPP_CU, IFM_10G_TWINAX }, 934 { 1ULL << IXL_PHY_TYPE_40GBASE_SR4, IFM_40G_SR4 }, 935 { 1ULL << IXL_PHY_TYPE_40GBASE_LR4, IFM_40G_LR4 }, 936 { 1ULL << IXL_PHY_TYPE_1000BASE_SX, IFM_1000_SX }, 937 { 1ULL << IXL_PHY_TYPE_1000BASE_LX, IFM_1000_LX }, 938 { 1ULL << IXL_PHY_TYPE_20GBASE_KR2, IFM_20G_KR2 }, 939 { 1ULL << IXL_PHY_TYPE_25GBASE_KR, IFM_25G_KR }, 940 { 1ULL << IXL_PHY_TYPE_25GBASE_CR, IFM_25G_CR }, 941 { 1ULL << IXL_PHY_TYPE_25GBASE_SR, IFM_25G_SR }, 942 { 1ULL << IXL_PHY_TYPE_25GBASE_LR, IFM_25G_LR }, 943 { 1ULL << IXL_PHY_TYPE_25GBASE_AOC, IFM_25G_AOC }, 944 { 1ULL << IXL_PHY_TYPE_25GBASE_ACC, IFM_25G_ACC }, 945 }; 946 947 static const struct ixl_speed_type ixl_speed_type_map[] = { 948 { IXL_AQ_LINK_SPEED_40GB, IF_Gbps(40) }, 949 { IXL_AQ_LINK_SPEED_25GB, IF_Gbps(25) }, 950 { IXL_AQ_LINK_SPEED_10GB, IF_Gbps(10) }, 951 { IXL_AQ_LINK_SPEED_1000MB, IF_Mbps(1000) }, 952 { IXL_AQ_LINK_SPEED_100MB, IF_Mbps(100)}, 953 }; 954 955 static const struct ixl_aq_regs ixl_pf_aq_regs = { 956 .atq_tail = I40E_PF_ATQT, 957 .atq_tail_mask = I40E_PF_ATQT_ATQT_MASK, 958 .atq_head = I40E_PF_ATQH, 959 .atq_head_mask = I40E_PF_ATQH_ATQH_MASK, 960 .atq_len = I40E_PF_ATQLEN, 961 .atq_bal = I40E_PF_ATQBAL, 962 .atq_bah = I40E_PF_ATQBAH, 963 .atq_len_enable = I40E_PF_ATQLEN_ATQENABLE_MASK, 964 965 .arq_tail = I40E_PF_ARQT, 966 .arq_tail_mask = I40E_PF_ARQT_ARQT_MASK, 967 .arq_head = I40E_PF_ARQH, 968 .arq_head_mask = I40E_PF_ARQH_ARQH_MASK, 969 .arq_len = I40E_PF_ARQLEN, 970 .arq_bal = I40E_PF_ARQBAL, 971 .arq_bah = I40E_PF_ARQBAH, 972 .arq_len_enable = I40E_PF_ARQLEN_ARQENABLE_MASK, 973 }; 974 975 #define ixl_rd(_s, _r) \ 976 bus_space_read_4((_s)->sc_memt, (_s)->sc_memh, (_r)) 977 #define ixl_wr(_s, _r, _v) \ 978 bus_space_write_4((_s)->sc_memt, (_s)->sc_memh, (_r), (_v)) 979 #define ixl_barrier(_s, _r, _l, _o) \ 980 bus_space_barrier((_s)->sc_memt, (_s)->sc_memh, (_r), (_l), (_o)) 981 #define ixl_flush(_s) (void)ixl_rd((_s), I40E_GLGEN_STAT) 982 #define ixl_nqueues(_sc) (1 << ((_sc)->sc_nqueue_pairs - 1)) 983 984 static inline uint32_t 985 ixl_dmamem_hi(struct ixl_dmamem *ixm) 986 { 987 uint32_t retval; 988 uint64_t val; 989 990 if (sizeof(IXL_DMA_DVA(ixm)) > 4) { 991 val = (intptr_t)IXL_DMA_DVA(ixm); 992 retval = (uint32_t)(val >> 32); 993 } else { 994 retval = 0; 995 } 996 997 return retval; 998 } 999 1000 static inline uint32_t 1001 ixl_dmamem_lo(struct ixl_dmamem *ixm) 1002 { 1003 1004 return (uint32_t)IXL_DMA_DVA(ixm); 1005 } 1006 1007 static inline void 1008 ixl_aq_dva(struct ixl_aq_desc *iaq, bus_addr_t addr) 1009 { 1010 uint64_t val; 1011 1012 if (sizeof(addr) > 4) { 1013 val = (intptr_t)addr; 1014 iaq->iaq_param[2] = htole32(val >> 32); 1015 } else { 1016 iaq->iaq_param[2] = htole32(0); 1017 } 1018 1019 iaq->iaq_param[3] = htole32(addr); 1020 } 1021 1022 static inline unsigned int 1023 ixl_rxr_unrefreshed(unsigned int prod, unsigned int cons, unsigned int ndescs) 1024 { 1025 unsigned int num; 1026 1027 if (prod < cons) 1028 num = cons - prod; 1029 else 1030 num = (ndescs - prod) + cons; 1031 1032 if (__predict_true(num > 0)) { 1033 /* device cannot receive packets if all descripter is filled */ 1034 num -= 1; 1035 } 1036 1037 return num; 1038 } 1039 1040 CFATTACH_DECL3_NEW(ixl, sizeof(struct ixl_softc), 1041 ixl_match, ixl_attach, ixl_detach, NULL, NULL, NULL, 1042 DVF_DETACH_SHUTDOWN); 1043 1044 static const struct ixl_product ixl_products[] = { 1045 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_SFP }, 1046 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_KX_B }, 1047 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_KX_C }, 1048 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_A }, 1049 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_B }, 1050 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_C }, 1051 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_10G_T }, 1052 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_20G_BP_1 }, 1053 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_20G_BP_2 }, 1054 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_T4_10G }, 1055 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XXV710_25G_BP }, 1056 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XXV710_25G_SFP28 }, 1057 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_KX }, 1058 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_QSFP }, 1059 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_SFP }, 1060 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_1G_BASET }, 1061 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_10G_BASET }, 1062 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_I_SFP }, 1063 /* required last entry */ 1064 {0, 0} 1065 }; 1066 1067 static const struct ixl_product * 1068 ixl_lookup(const struct pci_attach_args *pa) 1069 { 1070 const struct ixl_product *ixlp; 1071 1072 for (ixlp = ixl_products; ixlp->vendor_id != 0; ixlp++) { 1073 if (PCI_VENDOR(pa->pa_id) == ixlp->vendor_id && 1074 PCI_PRODUCT(pa->pa_id) == ixlp->product_id) 1075 return ixlp; 1076 } 1077 1078 return NULL; 1079 } 1080 1081 static int 1082 ixl_match(device_t parent, cfdata_t match, void *aux) 1083 { 1084 const struct pci_attach_args *pa = aux; 1085 1086 return (ixl_lookup(pa) != NULL) ? 1 : 0; 1087 } 1088 1089 static void 1090 ixl_attach(device_t parent, device_t self, void *aux) 1091 { 1092 struct ixl_softc *sc; 1093 struct pci_attach_args *pa = aux; 1094 struct ifnet *ifp; 1095 pcireg_t memtype; 1096 uint32_t firstq, port, ari, func; 1097 char xnamebuf[32]; 1098 int tries, rv, link; 1099 1100 sc = device_private(self); 1101 sc->sc_dev = self; 1102 ifp = &sc->sc_ec.ec_if; 1103 1104 sc->sc_pa = *pa; 1105 sc->sc_dmat = (pci_dma64_available(pa)) ? 1106 pa->pa_dmat64 : pa->pa_dmat; 1107 sc->sc_aq_regs = &ixl_pf_aq_regs; 1108 1109 sc->sc_mac_type = ixl_mactype(PCI_PRODUCT(pa->pa_id)); 1110 1111 ixl_pci_csr_setup(pa->pa_pc, pa->pa_tag); 1112 1113 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, IXL_PCIREG); 1114 if (pci_mapreg_map(pa, IXL_PCIREG, memtype, 0, 1115 &sc->sc_memt, &sc->sc_memh, NULL, &sc->sc_mems)) { 1116 aprint_error(": unable to map registers\n"); 1117 return; 1118 } 1119 1120 mutex_init(&sc->sc_cfg_lock, MUTEX_DEFAULT, IPL_SOFTNET); 1121 1122 firstq = ixl_rd(sc, I40E_PFLAN_QALLOC); 1123 firstq &= I40E_PFLAN_QALLOC_FIRSTQ_MASK; 1124 firstq >>= I40E_PFLAN_QALLOC_FIRSTQ_SHIFT; 1125 sc->sc_base_queue = firstq; 1126 1127 ixl_clear_hw(sc); 1128 if (ixl_pf_reset(sc) == -1) { 1129 /* error printed by ixl pf_reset */ 1130 goto unmap; 1131 } 1132 1133 port = ixl_rd(sc, I40E_PFGEN_PORTNUM); 1134 port &= I40E_PFGEN_PORTNUM_PORT_NUM_MASK; 1135 port >>= I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT; 1136 sc->sc_port = port; 1137 aprint_normal(": port %u", sc->sc_port); 1138 1139 ari = ixl_rd(sc, I40E_GLPCI_CAPSUP); 1140 ari &= I40E_GLPCI_CAPSUP_ARI_EN_MASK; 1141 ari >>= I40E_GLPCI_CAPSUP_ARI_EN_SHIFT; 1142 1143 func = ixl_rd(sc, I40E_PF_FUNC_RID); 1144 sc->sc_pf_id = func & (ari ? 0xff : 0x7); 1145 1146 /* initialise the adminq */ 1147 1148 mutex_init(&sc->sc_atq_lock, MUTEX_DEFAULT, IPL_NET); 1149 1150 if (ixl_dmamem_alloc(sc, &sc->sc_atq, 1151 sizeof(struct ixl_aq_desc) * IXL_AQ_NUM, IXL_AQ_ALIGN) != 0) { 1152 aprint_error("\n" "%s: unable to allocate atq\n", 1153 device_xname(self)); 1154 goto unmap; 1155 } 1156 1157 SIMPLEQ_INIT(&sc->sc_arq_idle); 1158 ixl_work_set(&sc->sc_arq_task, ixl_arq, sc); 1159 sc->sc_arq_cons = 0; 1160 sc->sc_arq_prod = 0; 1161 1162 if (ixl_dmamem_alloc(sc, &sc->sc_arq, 1163 sizeof(struct ixl_aq_desc) * IXL_AQ_NUM, IXL_AQ_ALIGN) != 0) { 1164 aprint_error("\n" "%s: unable to allocate arq\n", 1165 device_xname(self)); 1166 goto free_atq; 1167 } 1168 1169 if (!ixl_arq_fill(sc)) { 1170 aprint_error("\n" "%s: unable to fill arq descriptors\n", 1171 device_xname(self)); 1172 goto free_arq; 1173 } 1174 1175 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 1176 0, IXL_DMA_LEN(&sc->sc_atq), 1177 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1178 1179 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq), 1180 0, IXL_DMA_LEN(&sc->sc_arq), 1181 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1182 1183 for (tries = 0; tries < 10; tries++) { 1184 sc->sc_atq_cons = 0; 1185 sc->sc_atq_prod = 0; 1186 1187 ixl_wr(sc, sc->sc_aq_regs->atq_head, 0); 1188 ixl_wr(sc, sc->sc_aq_regs->arq_head, 0); 1189 ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0); 1190 ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0); 1191 1192 ixl_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE); 1193 1194 ixl_wr(sc, sc->sc_aq_regs->atq_bal, 1195 ixl_dmamem_lo(&sc->sc_atq)); 1196 ixl_wr(sc, sc->sc_aq_regs->atq_bah, 1197 ixl_dmamem_hi(&sc->sc_atq)); 1198 ixl_wr(sc, sc->sc_aq_regs->atq_len, 1199 sc->sc_aq_regs->atq_len_enable | IXL_AQ_NUM); 1200 1201 ixl_wr(sc, sc->sc_aq_regs->arq_bal, 1202 ixl_dmamem_lo(&sc->sc_arq)); 1203 ixl_wr(sc, sc->sc_aq_regs->arq_bah, 1204 ixl_dmamem_hi(&sc->sc_arq)); 1205 ixl_wr(sc, sc->sc_aq_regs->arq_len, 1206 sc->sc_aq_regs->arq_len_enable | IXL_AQ_NUM); 1207 1208 rv = ixl_get_version(sc); 1209 if (rv == 0) 1210 break; 1211 if (rv != ETIMEDOUT) { 1212 aprint_error(", unable to get firmware version\n"); 1213 goto shutdown; 1214 } 1215 1216 delaymsec(100); 1217 } 1218 1219 ixl_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod); 1220 1221 if (ixl_dmamem_alloc(sc, &sc->sc_aqbuf, IXL_AQ_BUFLEN, 0) != 0) { 1222 aprint_error_dev(self, ", unable to allocate nvm buffer\n"); 1223 goto shutdown; 1224 } 1225 1226 ixl_get_nvm_version(sc); 1227 1228 if (sc->sc_mac_type == I40E_MAC_X722) 1229 sc->sc_nqueue_pairs_device = IXL_QUEUE_MAX_X722; 1230 else 1231 sc->sc_nqueue_pairs_device = IXL_QUEUE_MAX_XL710; 1232 1233 rv = ixl_get_hw_capabilities(sc); 1234 if (rv != 0) { 1235 aprint_error(", GET HW CAPABILITIES %s\n", 1236 rv == ETIMEDOUT ? "timeout" : "error"); 1237 goto free_aqbuf; 1238 } 1239 1240 sc->sc_nqueue_pairs_max = MIN((int)sc->sc_nqueue_pairs_device, ncpu); 1241 if (ixl_param_nqps_limit > 0) { 1242 sc->sc_nqueue_pairs_max = MIN((int)sc->sc_nqueue_pairs_max, 1243 ixl_param_nqps_limit); 1244 } 1245 1246 sc->sc_nqueue_pairs = sc->sc_nqueue_pairs_max; 1247 sc->sc_tx_ring_ndescs = ixl_param_tx_ndescs; 1248 sc->sc_rx_ring_ndescs = ixl_param_rx_ndescs; 1249 1250 KASSERT(IXL_TXRX_PROCESS_UNLIMIT > sc->sc_rx_ring_ndescs); 1251 KASSERT(IXL_TXRX_PROCESS_UNLIMIT > sc->sc_tx_ring_ndescs); 1252 1253 if (ixl_get_mac(sc) != 0) { 1254 /* error printed by ixl_get_mac */ 1255 goto free_aqbuf; 1256 } 1257 1258 aprint_normal("\n"); 1259 aprint_naive("\n"); 1260 1261 aprint_normal_dev(self, "Ethernet address %s\n", 1262 ether_sprintf(sc->sc_enaddr)); 1263 1264 rv = ixl_pxe_clear(sc); 1265 if (rv != 0) { 1266 aprint_debug_dev(self, "CLEAR PXE MODE %s\n", 1267 rv == ETIMEDOUT ? "timeout" : "error"); 1268 } 1269 1270 ixl_set_filter_control(sc); 1271 1272 if (ixl_hmc(sc) != 0) { 1273 /* error printed by ixl_hmc */ 1274 goto free_aqbuf; 1275 } 1276 1277 if (ixl_lldp_shut(sc) != 0) { 1278 /* error printed by ixl_lldp_shut */ 1279 goto free_hmc; 1280 } 1281 1282 if (ixl_phy_mask_ints(sc) != 0) { 1283 /* error printed by ixl_phy_mask_ints */ 1284 goto free_hmc; 1285 } 1286 1287 if (ixl_restart_an(sc) != 0) { 1288 /* error printed by ixl_restart_an */ 1289 goto free_hmc; 1290 } 1291 1292 if (ixl_get_switch_config(sc) != 0) { 1293 /* error printed by ixl_get_switch_config */ 1294 goto free_hmc; 1295 } 1296 1297 rv = ixl_get_link_status_poll(sc, NULL); 1298 if (rv != 0) { 1299 aprint_error_dev(self, "GET LINK STATUS %s\n", 1300 rv == ETIMEDOUT ? "timeout" : "error"); 1301 goto free_hmc; 1302 } 1303 1304 /* 1305 * The FW often returns EIO in "Get PHY Abilities" command 1306 * if there is no delay 1307 */ 1308 DELAY(500); 1309 if (ixl_get_phy_info(sc) != 0) { 1310 /* error printed by ixl_get_phy_info */ 1311 goto free_hmc; 1312 } 1313 1314 if (ixl_dmamem_alloc(sc, &sc->sc_scratch, 1315 sizeof(struct ixl_aq_vsi_data), 8) != 0) { 1316 aprint_error_dev(self, "unable to allocate scratch buffer\n"); 1317 goto free_hmc; 1318 } 1319 1320 rv = ixl_get_vsi(sc); 1321 if (rv != 0) { 1322 aprint_error_dev(self, "GET VSI %s %d\n", 1323 rv == ETIMEDOUT ? "timeout" : "error", rv); 1324 goto free_scratch; 1325 } 1326 1327 rv = ixl_set_vsi(sc); 1328 if (rv != 0) { 1329 aprint_error_dev(self, "UPDATE VSI error %s %d\n", 1330 rv == ETIMEDOUT ? "timeout" : "error", rv); 1331 goto free_scratch; 1332 } 1333 1334 if (ixl_queue_pairs_alloc(sc) != 0) { 1335 /* error printed by ixl_queue_pairs_alloc */ 1336 goto free_scratch; 1337 } 1338 1339 if (ixl_setup_interrupts(sc) != 0) { 1340 /* error printed by ixl_setup_interrupts */ 1341 goto free_queue_pairs; 1342 } 1343 1344 if (ixl_setup_stats(sc) != 0) { 1345 aprint_error_dev(self, "failed to setup event counters\n"); 1346 goto teardown_intrs; 1347 } 1348 1349 if (ixl_setup_sysctls(sc) != 0) { 1350 /* error printed by ixl_setup_sysctls */ 1351 goto teardown_stats; 1352 } 1353 1354 snprintf(xnamebuf, sizeof(xnamebuf), "%s_wq_cfg", device_xname(self)); 1355 sc->sc_workq = ixl_workq_create(xnamebuf, IXL_WORKQUEUE_PRI, 1356 IPL_NET, WQ_MPSAFE); 1357 if (sc->sc_workq == NULL) 1358 goto teardown_sysctls; 1359 1360 snprintf(xnamebuf, sizeof(xnamebuf), "%s_wq_txrx", device_xname(self)); 1361 rv = workqueue_create(&sc->sc_workq_txrx, xnamebuf, ixl_handle_queue_wk, 1362 sc, IXL_WORKQUEUE_PRI, IPL_NET, WQ_PERCPU | WQ_MPSAFE); 1363 if (rv != 0) { 1364 sc->sc_workq_txrx = NULL; 1365 goto teardown_wqs; 1366 } 1367 1368 snprintf(xnamebuf, sizeof(xnamebuf), "%s_atq_cv", device_xname(self)); 1369 cv_init(&sc->sc_atq_cv, xnamebuf); 1370 1371 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ); 1372 1373 ifp->if_softc = sc; 1374 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1375 ifp->if_extflags = IFEF_MPSAFE; 1376 ifp->if_ioctl = ixl_ioctl; 1377 ifp->if_start = ixl_start; 1378 ifp->if_transmit = ixl_transmit; 1379 ifp->if_watchdog = ixl_watchdog; 1380 ifp->if_init = ixl_init; 1381 ifp->if_stop = ixl_stop; 1382 IFQ_SET_MAXLEN(&ifp->if_snd, sc->sc_tx_ring_ndescs); 1383 IFQ_SET_READY(&ifp->if_snd); 1384 ifp->if_capabilities |= IXL_IFCAP_RXCSUM; 1385 ifp->if_capabilities |= IXL_IFCAP_TXCSUM; 1386 #if 0 1387 ifp->if_capabilities |= IFCAP_TSOv4 | IFCAP_TSOv6; 1388 #endif 1389 ether_set_vlan_cb(&sc->sc_ec, ixl_vlan_cb); 1390 sc->sc_ec.ec_capabilities |= ETHERCAP_JUMBO_MTU; 1391 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING; 1392 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWFILTER; 1393 1394 sc->sc_ec.ec_capenable = sc->sc_ec.ec_capabilities; 1395 /* Disable VLAN_HWFILTER by default */ 1396 CLR(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER); 1397 1398 sc->sc_cur_ec_capenable = sc->sc_ec.ec_capenable; 1399 1400 sc->sc_ec.ec_ifmedia = &sc->sc_media; 1401 ifmedia_init_with_lock(&sc->sc_media, IFM_IMASK, ixl_media_change, 1402 ixl_media_status, &sc->sc_cfg_lock); 1403 1404 ixl_media_add(sc); 1405 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL); 1406 if (ISSET(sc->sc_phy_abilities, 1407 (IXL_PHY_ABILITY_PAUSE_TX | IXL_PHY_ABILITY_PAUSE_RX))) { 1408 ifmedia_add(&sc->sc_media, 1409 IFM_ETHER | IFM_AUTO | IFM_FLOW, 0, NULL); 1410 } 1411 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_NONE, 0, NULL); 1412 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO); 1413 1414 if_attach(ifp); 1415 if_deferred_start_init(ifp, NULL); 1416 ether_ifattach(ifp, sc->sc_enaddr); 1417 ether_set_ifflags_cb(&sc->sc_ec, ixl_ifflags_cb); 1418 1419 rv = ixl_get_link_status_poll(sc, &link); 1420 if (rv != 0) 1421 link = LINK_STATE_UNKNOWN; 1422 if_link_state_change(ifp, link); 1423 1424 ixl_atq_set(&sc->sc_link_state_atq, ixl_get_link_status_done); 1425 ixl_work_set(&sc->sc_link_state_task, ixl_get_link_status, sc); 1426 1427 ixl_config_other_intr(sc); 1428 ixl_enable_other_intr(sc); 1429 1430 ixl_set_phy_autoselect(sc); 1431 1432 /* remove default mac filter and replace it so we can see vlans */ 1433 rv = ixl_remove_macvlan(sc, sc->sc_enaddr, 0, 0); 1434 if (rv != ENOENT) { 1435 aprint_debug_dev(self, 1436 "unable to remove macvlan %u\n", rv); 1437 } 1438 rv = ixl_remove_macvlan(sc, sc->sc_enaddr, 0, 1439 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN); 1440 if (rv != ENOENT) { 1441 aprint_debug_dev(self, 1442 "unable to remove macvlan, ignore vlan %u\n", rv); 1443 } 1444 1445 if (ixl_update_macvlan(sc) != 0) { 1446 aprint_debug_dev(self, 1447 "couldn't enable vlan hardware filter\n"); 1448 CLR(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER); 1449 CLR(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER); 1450 } 1451 1452 sc->sc_txrx_workqueue = true; 1453 sc->sc_tx_process_limit = IXL_TX_PROCESS_LIMIT; 1454 sc->sc_rx_process_limit = IXL_RX_PROCESS_LIMIT; 1455 sc->sc_tx_intr_process_limit = IXL_TX_INTR_PROCESS_LIMIT; 1456 sc->sc_rx_intr_process_limit = IXL_RX_INTR_PROCESS_LIMIT; 1457 1458 ixl_stats_update(sc); 1459 sc->sc_stats_counters.isc_has_offset = true; 1460 1461 if (pmf_device_register(self, NULL, NULL) != true) 1462 aprint_debug_dev(self, "couldn't establish power handler\n"); 1463 sc->sc_itr_rx = IXL_ITR_RX; 1464 sc->sc_itr_tx = IXL_ITR_TX; 1465 sc->sc_attached = true; 1466 return; 1467 1468 teardown_wqs: 1469 config_finalize_register(self, ixl_workqs_teardown); 1470 teardown_sysctls: 1471 ixl_teardown_sysctls(sc); 1472 teardown_stats: 1473 ixl_teardown_stats(sc); 1474 teardown_intrs: 1475 ixl_teardown_interrupts(sc); 1476 free_queue_pairs: 1477 ixl_queue_pairs_free(sc); 1478 free_scratch: 1479 ixl_dmamem_free(sc, &sc->sc_scratch); 1480 free_hmc: 1481 ixl_hmc_free(sc); 1482 free_aqbuf: 1483 ixl_dmamem_free(sc, &sc->sc_aqbuf); 1484 shutdown: 1485 ixl_wr(sc, sc->sc_aq_regs->atq_head, 0); 1486 ixl_wr(sc, sc->sc_aq_regs->arq_head, 0); 1487 ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0); 1488 ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0); 1489 1490 ixl_wr(sc, sc->sc_aq_regs->atq_bal, 0); 1491 ixl_wr(sc, sc->sc_aq_regs->atq_bah, 0); 1492 ixl_wr(sc, sc->sc_aq_regs->atq_len, 0); 1493 1494 ixl_wr(sc, sc->sc_aq_regs->arq_bal, 0); 1495 ixl_wr(sc, sc->sc_aq_regs->arq_bah, 0); 1496 ixl_wr(sc, sc->sc_aq_regs->arq_len, 0); 1497 1498 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq), 1499 0, IXL_DMA_LEN(&sc->sc_arq), 1500 BUS_DMASYNC_POSTREAD); 1501 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 1502 0, IXL_DMA_LEN(&sc->sc_atq), 1503 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1504 1505 ixl_arq_unfill(sc); 1506 free_arq: 1507 ixl_dmamem_free(sc, &sc->sc_arq); 1508 free_atq: 1509 ixl_dmamem_free(sc, &sc->sc_atq); 1510 unmap: 1511 mutex_destroy(&sc->sc_atq_lock); 1512 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems); 1513 mutex_destroy(&sc->sc_cfg_lock); 1514 sc->sc_mems = 0; 1515 1516 sc->sc_attached = false; 1517 } 1518 1519 static int 1520 ixl_detach(device_t self, int flags) 1521 { 1522 struct ixl_softc *sc = device_private(self); 1523 struct ifnet *ifp = &sc->sc_ec.ec_if; 1524 1525 if (!sc->sc_attached) 1526 return 0; 1527 1528 ixl_stop(ifp, 1); 1529 1530 ixl_disable_other_intr(sc); 1531 1532 callout_halt(&sc->sc_stats_callout, NULL); 1533 ixl_work_wait(sc->sc_workq, &sc->sc_stats_task); 1534 1535 /* wait for ATQ handler */ 1536 mutex_enter(&sc->sc_atq_lock); 1537 mutex_exit(&sc->sc_atq_lock); 1538 1539 ixl_work_wait(sc->sc_workq, &sc->sc_arq_task); 1540 ixl_work_wait(sc->sc_workq, &sc->sc_link_state_task); 1541 1542 if (sc->sc_workq != NULL) { 1543 ixl_workq_destroy(sc->sc_workq); 1544 sc->sc_workq = NULL; 1545 } 1546 1547 if (sc->sc_workq_txrx != NULL) { 1548 workqueue_destroy(sc->sc_workq_txrx); 1549 sc->sc_workq_txrx = NULL; 1550 } 1551 1552 ether_ifdetach(ifp); 1553 if_detach(ifp); 1554 ifmedia_fini(&sc->sc_media); 1555 1556 ixl_teardown_interrupts(sc); 1557 ixl_teardown_stats(sc); 1558 ixl_teardown_sysctls(sc); 1559 1560 ixl_queue_pairs_free(sc); 1561 1562 ixl_dmamem_free(sc, &sc->sc_scratch); 1563 ixl_hmc_free(sc); 1564 1565 /* shutdown */ 1566 ixl_wr(sc, sc->sc_aq_regs->atq_head, 0); 1567 ixl_wr(sc, sc->sc_aq_regs->arq_head, 0); 1568 ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0); 1569 ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0); 1570 1571 ixl_wr(sc, sc->sc_aq_regs->atq_bal, 0); 1572 ixl_wr(sc, sc->sc_aq_regs->atq_bah, 0); 1573 ixl_wr(sc, sc->sc_aq_regs->atq_len, 0); 1574 1575 ixl_wr(sc, sc->sc_aq_regs->arq_bal, 0); 1576 ixl_wr(sc, sc->sc_aq_regs->arq_bah, 0); 1577 ixl_wr(sc, sc->sc_aq_regs->arq_len, 0); 1578 1579 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq), 1580 0, IXL_DMA_LEN(&sc->sc_arq), 1581 BUS_DMASYNC_POSTREAD); 1582 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 1583 0, IXL_DMA_LEN(&sc->sc_atq), 1584 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1585 1586 ixl_arq_unfill(sc); 1587 1588 ixl_dmamem_free(sc, &sc->sc_arq); 1589 ixl_dmamem_free(sc, &sc->sc_atq); 1590 ixl_dmamem_free(sc, &sc->sc_aqbuf); 1591 1592 cv_destroy(&sc->sc_atq_cv); 1593 mutex_destroy(&sc->sc_atq_lock); 1594 1595 if (sc->sc_mems != 0) { 1596 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems); 1597 sc->sc_mems = 0; 1598 } 1599 1600 mutex_destroy(&sc->sc_cfg_lock); 1601 1602 return 0; 1603 } 1604 1605 static int 1606 ixl_workqs_teardown(device_t self) 1607 { 1608 struct ixl_softc *sc = device_private(self); 1609 1610 if (sc->sc_workq != NULL) { 1611 ixl_workq_destroy(sc->sc_workq); 1612 sc->sc_workq = NULL; 1613 } 1614 1615 if (sc->sc_workq_txrx != NULL) { 1616 workqueue_destroy(sc->sc_workq_txrx); 1617 sc->sc_workq_txrx = NULL; 1618 } 1619 1620 return 0; 1621 } 1622 1623 static int 1624 ixl_vlan_cb(struct ethercom *ec, uint16_t vid, bool set) 1625 { 1626 struct ifnet *ifp = &ec->ec_if; 1627 struct ixl_softc *sc = ifp->if_softc; 1628 int rv; 1629 1630 if (!ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER)) { 1631 return 0; 1632 } 1633 1634 if (set) { 1635 rv = ixl_add_macvlan(sc, sc->sc_enaddr, vid, 1636 IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH); 1637 if (rv == 0) { 1638 rv = ixl_add_macvlan(sc, etherbroadcastaddr, 1639 vid, IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH); 1640 } 1641 } else { 1642 rv = ixl_remove_macvlan(sc, sc->sc_enaddr, vid, 1643 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH); 1644 (void)ixl_remove_macvlan(sc, etherbroadcastaddr, vid, 1645 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH); 1646 } 1647 1648 return rv; 1649 } 1650 1651 static void 1652 ixl_media_add(struct ixl_softc *sc) 1653 { 1654 struct ifmedia *ifm = &sc->sc_media; 1655 const struct ixl_phy_type *itype; 1656 unsigned int i; 1657 bool flow; 1658 1659 if (ISSET(sc->sc_phy_abilities, 1660 (IXL_PHY_ABILITY_PAUSE_TX | IXL_PHY_ABILITY_PAUSE_RX))) { 1661 flow = true; 1662 } else { 1663 flow = false; 1664 } 1665 1666 for (i = 0; i < __arraycount(ixl_phy_type_map); i++) { 1667 itype = &ixl_phy_type_map[i]; 1668 1669 if (ISSET(sc->sc_phy_types, itype->phy_type)) { 1670 ifmedia_add(ifm, 1671 IFM_ETHER | IFM_FDX | itype->ifm_type, 0, NULL); 1672 1673 if (flow) { 1674 ifmedia_add(ifm, 1675 IFM_ETHER | IFM_FDX | IFM_FLOW | 1676 itype->ifm_type, 0, NULL); 1677 } 1678 1679 if (itype->ifm_type != IFM_100_TX) 1680 continue; 1681 1682 ifmedia_add(ifm, IFM_ETHER | itype->ifm_type, 1683 0, NULL); 1684 if (flow) { 1685 ifmedia_add(ifm, 1686 IFM_ETHER | IFM_FLOW | itype->ifm_type, 1687 0, NULL); 1688 } 1689 } 1690 } 1691 } 1692 1693 static void 1694 ixl_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 1695 { 1696 struct ixl_softc *sc = ifp->if_softc; 1697 1698 KASSERT(mutex_owned(&sc->sc_cfg_lock)); 1699 1700 ifmr->ifm_status = sc->sc_media_status; 1701 ifmr->ifm_active = sc->sc_media_active; 1702 } 1703 1704 static int 1705 ixl_media_change(struct ifnet *ifp) 1706 { 1707 struct ixl_softc *sc = ifp->if_softc; 1708 struct ifmedia *ifm = &sc->sc_media; 1709 uint64_t ifm_active = sc->sc_media_active; 1710 uint8_t link_speed, abilities; 1711 1712 switch (IFM_SUBTYPE(ifm_active)) { 1713 case IFM_1000_SGMII: 1714 case IFM_1000_KX: 1715 case IFM_10G_KX4: 1716 case IFM_10G_KR: 1717 case IFM_40G_KR4: 1718 case IFM_20G_KR2: 1719 case IFM_25G_KR: 1720 /* backplanes */ 1721 return EINVAL; 1722 } 1723 1724 abilities = IXL_PHY_ABILITY_AUTONEGO | IXL_PHY_ABILITY_LINKUP; 1725 1726 switch (IFM_SUBTYPE(ifm->ifm_media)) { 1727 case IFM_AUTO: 1728 link_speed = sc->sc_phy_linkspeed; 1729 break; 1730 case IFM_NONE: 1731 link_speed = 0; 1732 CLR(abilities, IXL_PHY_ABILITY_LINKUP); 1733 break; 1734 default: 1735 link_speed = ixl_search_baudrate( 1736 ifmedia_baudrate(ifm->ifm_media)); 1737 } 1738 1739 if (ISSET(abilities, IXL_PHY_ABILITY_LINKUP)) { 1740 if (ISSET(link_speed, sc->sc_phy_linkspeed) == 0) 1741 return EINVAL; 1742 } 1743 1744 if (ifm->ifm_media & IFM_FLOW) { 1745 abilities |= sc->sc_phy_abilities & 1746 (IXL_PHY_ABILITY_PAUSE_TX | IXL_PHY_ABILITY_PAUSE_RX); 1747 } 1748 1749 return ixl_set_phy_config(sc, link_speed, abilities, false); 1750 } 1751 1752 static void 1753 ixl_watchdog(struct ifnet *ifp) 1754 { 1755 1756 } 1757 1758 static void 1759 ixl_del_all_multiaddr(struct ixl_softc *sc) 1760 { 1761 struct ethercom *ec = &sc->sc_ec; 1762 struct ether_multi *enm; 1763 struct ether_multistep step; 1764 1765 ETHER_LOCK(ec); 1766 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL; 1767 ETHER_NEXT_MULTI(step, enm)) { 1768 ixl_remove_macvlan(sc, enm->enm_addrlo, 0, 1769 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN); 1770 } 1771 ETHER_UNLOCK(ec); 1772 } 1773 1774 static int 1775 ixl_add_multi(struct ixl_softc *sc, uint8_t *addrlo, uint8_t *addrhi) 1776 { 1777 struct ifnet *ifp = &sc->sc_ec.ec_if; 1778 int rv; 1779 1780 if (ISSET(ifp->if_flags, IFF_ALLMULTI)) 1781 return 0; 1782 1783 if (memcmp(addrlo, addrhi, ETHER_ADDR_LEN) != 0) { 1784 ixl_del_all_multiaddr(sc); 1785 SET(ifp->if_flags, IFF_ALLMULTI); 1786 return ENETRESET; 1787 } 1788 1789 /* multicast address can not use VLAN HWFILTER */ 1790 rv = ixl_add_macvlan(sc, addrlo, 0, 1791 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN); 1792 1793 if (rv == ENOSPC) { 1794 ixl_del_all_multiaddr(sc); 1795 SET(ifp->if_flags, IFF_ALLMULTI); 1796 return ENETRESET; 1797 } 1798 1799 return rv; 1800 } 1801 1802 static int 1803 ixl_del_multi(struct ixl_softc *sc, uint8_t *addrlo, uint8_t *addrhi) 1804 { 1805 struct ifnet *ifp = &sc->sc_ec.ec_if; 1806 struct ethercom *ec = &sc->sc_ec; 1807 struct ether_multi *enm, *enm_last; 1808 struct ether_multistep step; 1809 int error, rv = 0; 1810 1811 if (!ISSET(ifp->if_flags, IFF_ALLMULTI)) { 1812 ixl_remove_macvlan(sc, addrlo, 0, 1813 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN); 1814 return 0; 1815 } 1816 1817 ETHER_LOCK(ec); 1818 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL; 1819 ETHER_NEXT_MULTI(step, enm)) { 1820 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 1821 ETHER_ADDR_LEN) != 0) { 1822 goto out; 1823 } 1824 } 1825 1826 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL; 1827 ETHER_NEXT_MULTI(step, enm)) { 1828 error = ixl_add_macvlan(sc, enm->enm_addrlo, 0, 1829 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN); 1830 if (error != 0) 1831 break; 1832 } 1833 1834 if (enm != NULL) { 1835 enm_last = enm; 1836 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL; 1837 ETHER_NEXT_MULTI(step, enm)) { 1838 if (enm == enm_last) 1839 break; 1840 1841 ixl_remove_macvlan(sc, enm->enm_addrlo, 0, 1842 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN); 1843 } 1844 } else { 1845 CLR(ifp->if_flags, IFF_ALLMULTI); 1846 rv = ENETRESET; 1847 } 1848 1849 out: 1850 ETHER_UNLOCK(ec); 1851 return rv; 1852 } 1853 1854 static int 1855 ixl_ioctl(struct ifnet *ifp, u_long cmd, void *data) 1856 { 1857 struct ifreq *ifr = (struct ifreq *)data; 1858 struct ixl_softc *sc = (struct ixl_softc *)ifp->if_softc; 1859 const struct sockaddr *sa; 1860 uint8_t addrhi[ETHER_ADDR_LEN], addrlo[ETHER_ADDR_LEN]; 1861 int s, error = 0; 1862 unsigned int nmtu; 1863 1864 switch (cmd) { 1865 case SIOCSIFMTU: 1866 nmtu = ifr->ifr_mtu; 1867 1868 if (nmtu < IXL_MIN_MTU || nmtu > IXL_MAX_MTU) { 1869 error = EINVAL; 1870 break; 1871 } 1872 if (ifp->if_mtu != nmtu) { 1873 s = splnet(); 1874 error = ether_ioctl(ifp, cmd, data); 1875 splx(s); 1876 if (error == ENETRESET) 1877 error = ixl_init(ifp); 1878 } 1879 break; 1880 case SIOCADDMULTI: 1881 sa = ifreq_getaddr(SIOCADDMULTI, ifr); 1882 if (ether_addmulti(sa, &sc->sc_ec) == ENETRESET) { 1883 error = ether_multiaddr(sa, addrlo, addrhi); 1884 if (error != 0) 1885 return error; 1886 1887 error = ixl_add_multi(sc, addrlo, addrhi); 1888 if (error != 0 && error != ENETRESET) { 1889 ether_delmulti(sa, &sc->sc_ec); 1890 error = EIO; 1891 } 1892 } 1893 break; 1894 1895 case SIOCDELMULTI: 1896 sa = ifreq_getaddr(SIOCDELMULTI, ifr); 1897 if (ether_delmulti(sa, &sc->sc_ec) == ENETRESET) { 1898 error = ether_multiaddr(sa, addrlo, addrhi); 1899 if (error != 0) 1900 return error; 1901 1902 error = ixl_del_multi(sc, addrlo, addrhi); 1903 } 1904 break; 1905 1906 default: 1907 s = splnet(); 1908 error = ether_ioctl(ifp, cmd, data); 1909 splx(s); 1910 } 1911 1912 if (error == ENETRESET) 1913 error = ixl_iff(sc); 1914 1915 return error; 1916 } 1917 1918 static enum i40e_mac_type 1919 ixl_mactype(pci_product_id_t id) 1920 { 1921 1922 switch (id) { 1923 case PCI_PRODUCT_INTEL_XL710_SFP: 1924 case PCI_PRODUCT_INTEL_XL710_KX_B: 1925 case PCI_PRODUCT_INTEL_XL710_KX_C: 1926 case PCI_PRODUCT_INTEL_XL710_QSFP_A: 1927 case PCI_PRODUCT_INTEL_XL710_QSFP_B: 1928 case PCI_PRODUCT_INTEL_XL710_QSFP_C: 1929 case PCI_PRODUCT_INTEL_X710_10G_T: 1930 case PCI_PRODUCT_INTEL_XL710_20G_BP_1: 1931 case PCI_PRODUCT_INTEL_XL710_20G_BP_2: 1932 case PCI_PRODUCT_INTEL_X710_T4_10G: 1933 case PCI_PRODUCT_INTEL_XXV710_25G_BP: 1934 case PCI_PRODUCT_INTEL_XXV710_25G_SFP28: 1935 return I40E_MAC_XL710; 1936 1937 case PCI_PRODUCT_INTEL_X722_KX: 1938 case PCI_PRODUCT_INTEL_X722_QSFP: 1939 case PCI_PRODUCT_INTEL_X722_SFP: 1940 case PCI_PRODUCT_INTEL_X722_1G_BASET: 1941 case PCI_PRODUCT_INTEL_X722_10G_BASET: 1942 case PCI_PRODUCT_INTEL_X722_I_SFP: 1943 return I40E_MAC_X722; 1944 } 1945 1946 return I40E_MAC_GENERIC; 1947 } 1948 1949 static void 1950 ixl_pci_csr_setup(pci_chipset_tag_t pc, pcitag_t tag) 1951 { 1952 pcireg_t csr; 1953 1954 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG); 1955 csr |= (PCI_COMMAND_MASTER_ENABLE | 1956 PCI_COMMAND_MEM_ENABLE); 1957 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr); 1958 } 1959 1960 static inline void * 1961 ixl_hmc_kva(struct ixl_softc *sc, enum ixl_hmc_types type, unsigned int i) 1962 { 1963 uint8_t *kva = IXL_DMA_KVA(&sc->sc_hmc_pd); 1964 struct ixl_hmc_entry *e = &sc->sc_hmc_entries[type]; 1965 1966 if (i >= e->hmc_count) 1967 return NULL; 1968 1969 kva += e->hmc_base; 1970 kva += i * e->hmc_size; 1971 1972 return kva; 1973 } 1974 1975 static inline size_t 1976 ixl_hmc_len(struct ixl_softc *sc, enum ixl_hmc_types type) 1977 { 1978 struct ixl_hmc_entry *e = &sc->sc_hmc_entries[type]; 1979 1980 return e->hmc_size; 1981 } 1982 1983 static void 1984 ixl_enable_queue_intr(struct ixl_softc *sc, struct ixl_queue_pair *qp) 1985 { 1986 struct ixl_rx_ring *rxr = qp->qp_rxr; 1987 1988 ixl_wr(sc, I40E_PFINT_DYN_CTLN(rxr->rxr_qid), 1989 I40E_PFINT_DYN_CTLN_INTENA_MASK | 1990 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | 1991 (IXL_NOITR << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT)); 1992 ixl_flush(sc); 1993 } 1994 1995 static void 1996 ixl_disable_queue_intr(struct ixl_softc *sc, struct ixl_queue_pair *qp) 1997 { 1998 struct ixl_rx_ring *rxr = qp->qp_rxr; 1999 2000 ixl_wr(sc, I40E_PFINT_DYN_CTLN(rxr->rxr_qid), 2001 (IXL_NOITR << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT)); 2002 ixl_flush(sc); 2003 } 2004 2005 static void 2006 ixl_enable_other_intr(struct ixl_softc *sc) 2007 { 2008 2009 ixl_wr(sc, I40E_PFINT_DYN_CTL0, 2010 I40E_PFINT_DYN_CTL0_INTENA_MASK | 2011 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK | 2012 (IXL_NOITR << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT)); 2013 ixl_flush(sc); 2014 } 2015 2016 static void 2017 ixl_disable_other_intr(struct ixl_softc *sc) 2018 { 2019 2020 ixl_wr(sc, I40E_PFINT_DYN_CTL0, 2021 (IXL_NOITR << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT)); 2022 ixl_flush(sc); 2023 } 2024 2025 static int 2026 ixl_reinit(struct ixl_softc *sc) 2027 { 2028 struct ixl_rx_ring *rxr; 2029 struct ixl_tx_ring *txr; 2030 unsigned int i; 2031 uint32_t reg; 2032 2033 KASSERT(mutex_owned(&sc->sc_cfg_lock)); 2034 2035 if (ixl_get_vsi(sc) != 0) 2036 return EIO; 2037 2038 if (ixl_set_vsi(sc) != 0) 2039 return EIO; 2040 2041 for (i = 0; i < sc->sc_nqueue_pairs; i++) { 2042 txr = sc->sc_qps[i].qp_txr; 2043 rxr = sc->sc_qps[i].qp_rxr; 2044 2045 ixl_txr_config(sc, txr); 2046 ixl_rxr_config(sc, rxr); 2047 } 2048 2049 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd), 2050 0, IXL_DMA_LEN(&sc->sc_hmc_pd), BUS_DMASYNC_PREWRITE); 2051 2052 for (i = 0; i < sc->sc_nqueue_pairs; i++) { 2053 txr = sc->sc_qps[i].qp_txr; 2054 rxr = sc->sc_qps[i].qp_rxr; 2055 2056 ixl_wr(sc, I40E_QTX_CTL(i), I40E_QTX_CTL_PF_QUEUE | 2057 (sc->sc_pf_id << I40E_QTX_CTL_PF_INDX_SHIFT)); 2058 ixl_flush(sc); 2059 2060 ixl_wr(sc, txr->txr_tail, txr->txr_prod); 2061 ixl_wr(sc, rxr->rxr_tail, rxr->rxr_prod); 2062 2063 /* ixl_rxfill() needs lock held */ 2064 mutex_enter(&rxr->rxr_lock); 2065 ixl_rxfill(sc, rxr); 2066 mutex_exit(&rxr->rxr_lock); 2067 2068 reg = ixl_rd(sc, I40E_QRX_ENA(i)); 2069 SET(reg, I40E_QRX_ENA_QENA_REQ_MASK); 2070 ixl_wr(sc, I40E_QRX_ENA(i), reg); 2071 if (ixl_rxr_enabled(sc, rxr) != 0) 2072 goto stop; 2073 2074 ixl_txr_qdis(sc, txr, 1); 2075 2076 reg = ixl_rd(sc, I40E_QTX_ENA(i)); 2077 SET(reg, I40E_QTX_ENA_QENA_REQ_MASK); 2078 ixl_wr(sc, I40E_QTX_ENA(i), reg); 2079 2080 if (ixl_txr_enabled(sc, txr) != 0) 2081 goto stop; 2082 } 2083 2084 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd), 2085 0, IXL_DMA_LEN(&sc->sc_hmc_pd), BUS_DMASYNC_POSTWRITE); 2086 2087 return 0; 2088 2089 stop: 2090 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd), 2091 0, IXL_DMA_LEN(&sc->sc_hmc_pd), BUS_DMASYNC_POSTWRITE); 2092 2093 return ETIMEDOUT; 2094 } 2095 2096 static int 2097 ixl_init_locked(struct ixl_softc *sc) 2098 { 2099 struct ifnet *ifp = &sc->sc_ec.ec_if; 2100 unsigned int i; 2101 int error, eccap_change; 2102 2103 KASSERT(mutex_owned(&sc->sc_cfg_lock)); 2104 2105 if (ISSET(ifp->if_flags, IFF_RUNNING)) 2106 ixl_stop_locked(sc); 2107 2108 if (sc->sc_dead) { 2109 return ENXIO; 2110 } 2111 2112 eccap_change = sc->sc_ec.ec_capenable ^ sc->sc_cur_ec_capenable; 2113 if (ISSET(eccap_change, ETHERCAP_VLAN_HWTAGGING)) 2114 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWTAGGING; 2115 2116 if (ISSET(eccap_change, ETHERCAP_VLAN_HWFILTER)) { 2117 if (ixl_update_macvlan(sc) == 0) { 2118 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWFILTER; 2119 } else { 2120 CLR(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER); 2121 CLR(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER); 2122 } 2123 } 2124 2125 if (sc->sc_intrtype != PCI_INTR_TYPE_MSIX) 2126 sc->sc_nqueue_pairs = 1; 2127 else 2128 sc->sc_nqueue_pairs = sc->sc_nqueue_pairs_max; 2129 2130 error = ixl_reinit(sc); 2131 if (error) { 2132 ixl_stop_locked(sc); 2133 return error; 2134 } 2135 2136 SET(ifp->if_flags, IFF_RUNNING); 2137 CLR(ifp->if_flags, IFF_OACTIVE); 2138 2139 ixl_config_rss(sc); 2140 ixl_config_queue_intr(sc); 2141 2142 for (i = 0; i < sc->sc_nqueue_pairs; i++) { 2143 ixl_enable_queue_intr(sc, &sc->sc_qps[i]); 2144 } 2145 2146 error = ixl_iff(sc); 2147 if (error) { 2148 ixl_stop_locked(sc); 2149 return error; 2150 } 2151 2152 callout_schedule(&sc->sc_stats_callout, mstohz(sc->sc_stats_intval)); 2153 2154 return 0; 2155 } 2156 2157 static int 2158 ixl_init(struct ifnet *ifp) 2159 { 2160 struct ixl_softc *sc = ifp->if_softc; 2161 int error; 2162 2163 mutex_enter(&sc->sc_cfg_lock); 2164 error = ixl_init_locked(sc); 2165 mutex_exit(&sc->sc_cfg_lock); 2166 2167 if (error == 0) 2168 (void)ixl_get_link_status(sc); 2169 2170 return error; 2171 } 2172 2173 static int 2174 ixl_iff(struct ixl_softc *sc) 2175 { 2176 struct ifnet *ifp = &sc->sc_ec.ec_if; 2177 struct ixl_atq iatq; 2178 struct ixl_aq_desc *iaq; 2179 struct ixl_aq_vsi_promisc_param *param; 2180 uint16_t flag_add, flag_del; 2181 int error; 2182 2183 if (!ISSET(ifp->if_flags, IFF_RUNNING)) 2184 return 0; 2185 2186 memset(&iatq, 0, sizeof(iatq)); 2187 2188 iaq = &iatq.iatq_desc; 2189 iaq->iaq_opcode = htole16(IXL_AQ_OP_SET_VSI_PROMISC); 2190 2191 param = (struct ixl_aq_vsi_promisc_param *)&iaq->iaq_param; 2192 param->flags = htole16(0); 2193 2194 if (!ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER) 2195 || ISSET(ifp->if_flags, IFF_PROMISC)) { 2196 param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_BCAST | 2197 IXL_AQ_VSI_PROMISC_FLAG_VLAN); 2198 } 2199 2200 if (ISSET(ifp->if_flags, IFF_PROMISC)) { 2201 param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_UCAST | 2202 IXL_AQ_VSI_PROMISC_FLAG_MCAST); 2203 } else if (ISSET(ifp->if_flags, IFF_ALLMULTI)) { 2204 param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_MCAST); 2205 } 2206 param->valid_flags = htole16(IXL_AQ_VSI_PROMISC_FLAG_UCAST | 2207 IXL_AQ_VSI_PROMISC_FLAG_MCAST | IXL_AQ_VSI_PROMISC_FLAG_BCAST | 2208 IXL_AQ_VSI_PROMISC_FLAG_VLAN); 2209 param->seid = sc->sc_seid; 2210 2211 error = ixl_atq_exec(sc, &iatq); 2212 if (error) 2213 return error; 2214 2215 if (iaq->iaq_retval != htole16(IXL_AQ_RC_OK)) 2216 return EIO; 2217 2218 if (memcmp(sc->sc_enaddr, CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN) != 0) { 2219 if (ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER)) { 2220 flag_add = IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH; 2221 flag_del = IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH; 2222 } else { 2223 flag_add = IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN; 2224 flag_del = IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN; 2225 } 2226 2227 ixl_remove_macvlan(sc, sc->sc_enaddr, 0, flag_del); 2228 2229 memcpy(sc->sc_enaddr, CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN); 2230 ixl_add_macvlan(sc, sc->sc_enaddr, 0, flag_add); 2231 } 2232 return 0; 2233 } 2234 2235 static void 2236 ixl_stop_rendezvous(struct ixl_softc *sc) 2237 { 2238 struct ixl_tx_ring *txr; 2239 struct ixl_rx_ring *rxr; 2240 unsigned int i; 2241 2242 for (i = 0; i < sc->sc_nqueue_pairs; i++) { 2243 txr = sc->sc_qps[i].qp_txr; 2244 rxr = sc->sc_qps[i].qp_rxr; 2245 2246 mutex_enter(&txr->txr_lock); 2247 mutex_exit(&txr->txr_lock); 2248 2249 mutex_enter(&rxr->rxr_lock); 2250 mutex_exit(&rxr->rxr_lock); 2251 2252 sc->sc_qps[i].qp_workqueue = false; 2253 workqueue_wait(sc->sc_workq_txrx, 2254 &sc->sc_qps[i].qp_work); 2255 } 2256 } 2257 2258 static void 2259 ixl_stop_locked(struct ixl_softc *sc) 2260 { 2261 struct ifnet *ifp = &sc->sc_ec.ec_if; 2262 struct ixl_rx_ring *rxr; 2263 struct ixl_tx_ring *txr; 2264 unsigned int i; 2265 uint32_t reg; 2266 2267 KASSERT(mutex_owned(&sc->sc_cfg_lock)); 2268 2269 CLR(ifp->if_flags, IFF_RUNNING | IFF_OACTIVE); 2270 callout_stop(&sc->sc_stats_callout); 2271 2272 for (i = 0; i < sc->sc_nqueue_pairs; i++) { 2273 txr = sc->sc_qps[i].qp_txr; 2274 rxr = sc->sc_qps[i].qp_rxr; 2275 2276 ixl_disable_queue_intr(sc, &sc->sc_qps[i]); 2277 2278 mutex_enter(&txr->txr_lock); 2279 ixl_txr_qdis(sc, txr, 0); 2280 mutex_exit(&txr->txr_lock); 2281 } 2282 2283 /* XXX wait at least 400 usec for all tx queues in one go */ 2284 ixl_flush(sc); 2285 DELAY(500); 2286 2287 for (i = 0; i < sc->sc_nqueue_pairs; i++) { 2288 txr = sc->sc_qps[i].qp_txr; 2289 rxr = sc->sc_qps[i].qp_rxr; 2290 2291 mutex_enter(&txr->txr_lock); 2292 reg = ixl_rd(sc, I40E_QTX_ENA(i)); 2293 CLR(reg, I40E_QTX_ENA_QENA_REQ_MASK); 2294 ixl_wr(sc, I40E_QTX_ENA(i), reg); 2295 mutex_exit(&txr->txr_lock); 2296 2297 mutex_enter(&rxr->rxr_lock); 2298 reg = ixl_rd(sc, I40E_QRX_ENA(i)); 2299 CLR(reg, I40E_QRX_ENA_QENA_REQ_MASK); 2300 ixl_wr(sc, I40E_QRX_ENA(i), reg); 2301 mutex_exit(&rxr->rxr_lock); 2302 } 2303 2304 /* XXX short wait for all queue disables to settle */ 2305 ixl_flush(sc); 2306 DELAY(50); 2307 2308 for (i = 0; i < sc->sc_nqueue_pairs; i++) { 2309 txr = sc->sc_qps[i].qp_txr; 2310 rxr = sc->sc_qps[i].qp_rxr; 2311 2312 mutex_enter(&txr->txr_lock); 2313 if (ixl_txr_disabled(sc, txr) != 0) { 2314 mutex_exit(&txr->txr_lock); 2315 goto die; 2316 } 2317 mutex_exit(&txr->txr_lock); 2318 2319 mutex_enter(&rxr->rxr_lock); 2320 if (ixl_rxr_disabled(sc, rxr) != 0) { 2321 mutex_exit(&rxr->rxr_lock); 2322 goto die; 2323 } 2324 mutex_exit(&rxr->rxr_lock); 2325 } 2326 2327 ixl_stop_rendezvous(sc); 2328 2329 for (i = 0; i < sc->sc_nqueue_pairs; i++) { 2330 txr = sc->sc_qps[i].qp_txr; 2331 rxr = sc->sc_qps[i].qp_rxr; 2332 2333 mutex_enter(&txr->txr_lock); 2334 ixl_txr_unconfig(sc, txr); 2335 mutex_exit(&txr->txr_lock); 2336 2337 mutex_enter(&rxr->rxr_lock); 2338 ixl_rxr_unconfig(sc, rxr); 2339 mutex_exit(&rxr->rxr_lock); 2340 2341 ixl_txr_clean(sc, txr); 2342 ixl_rxr_clean(sc, rxr); 2343 } 2344 2345 return; 2346 die: 2347 sc->sc_dead = true; 2348 log(LOG_CRIT, "%s: failed to shut down rings", 2349 device_xname(sc->sc_dev)); 2350 return; 2351 } 2352 2353 static void 2354 ixl_stop(struct ifnet *ifp, int disable) 2355 { 2356 struct ixl_softc *sc = ifp->if_softc; 2357 2358 mutex_enter(&sc->sc_cfg_lock); 2359 ixl_stop_locked(sc); 2360 mutex_exit(&sc->sc_cfg_lock); 2361 } 2362 2363 static int 2364 ixl_queue_pairs_alloc(struct ixl_softc *sc) 2365 { 2366 struct ixl_queue_pair *qp; 2367 unsigned int i; 2368 size_t sz; 2369 2370 sz = sizeof(sc->sc_qps[0]) * sc->sc_nqueue_pairs_max; 2371 sc->sc_qps = kmem_zalloc(sz, KM_SLEEP); 2372 2373 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) { 2374 qp = &sc->sc_qps[i]; 2375 2376 qp->qp_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE, 2377 ixl_handle_queue, qp); 2378 if (qp->qp_si == NULL) 2379 goto free; 2380 2381 qp->qp_txr = ixl_txr_alloc(sc, i); 2382 if (qp->qp_txr == NULL) 2383 goto free; 2384 2385 qp->qp_rxr = ixl_rxr_alloc(sc, i); 2386 if (qp->qp_rxr == NULL) 2387 goto free; 2388 2389 qp->qp_sc = sc; 2390 snprintf(qp->qp_name, sizeof(qp->qp_name), 2391 "%s-TXRX%d", device_xname(sc->sc_dev), i); 2392 } 2393 2394 return 0; 2395 free: 2396 if (sc->sc_qps != NULL) { 2397 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) { 2398 qp = &sc->sc_qps[i]; 2399 2400 if (qp->qp_txr != NULL) 2401 ixl_txr_free(sc, qp->qp_txr); 2402 if (qp->qp_rxr != NULL) 2403 ixl_rxr_free(sc, qp->qp_rxr); 2404 if (qp->qp_si != NULL) 2405 softint_disestablish(qp->qp_si); 2406 } 2407 2408 sz = sizeof(sc->sc_qps[0]) * sc->sc_nqueue_pairs_max; 2409 kmem_free(sc->sc_qps, sz); 2410 sc->sc_qps = NULL; 2411 } 2412 2413 return -1; 2414 } 2415 2416 static void 2417 ixl_queue_pairs_free(struct ixl_softc *sc) 2418 { 2419 struct ixl_queue_pair *qp; 2420 unsigned int i; 2421 size_t sz; 2422 2423 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) { 2424 qp = &sc->sc_qps[i]; 2425 ixl_txr_free(sc, qp->qp_txr); 2426 ixl_rxr_free(sc, qp->qp_rxr); 2427 softint_disestablish(qp->qp_si); 2428 } 2429 2430 sz = sizeof(sc->sc_qps[0]) * sc->sc_nqueue_pairs_max; 2431 kmem_free(sc->sc_qps, sz); 2432 sc->sc_qps = NULL; 2433 } 2434 2435 static struct ixl_tx_ring * 2436 ixl_txr_alloc(struct ixl_softc *sc, unsigned int qid) 2437 { 2438 struct ixl_tx_ring *txr = NULL; 2439 struct ixl_tx_map *maps = NULL, *txm; 2440 unsigned int i; 2441 2442 txr = kmem_zalloc(sizeof(*txr), KM_SLEEP); 2443 maps = kmem_zalloc(sizeof(maps[0]) * sc->sc_tx_ring_ndescs, 2444 KM_SLEEP); 2445 2446 if (ixl_dmamem_alloc(sc, &txr->txr_mem, 2447 sizeof(struct ixl_tx_desc) * sc->sc_tx_ring_ndescs, 2448 IXL_TX_QUEUE_ALIGN) != 0) 2449 goto free; 2450 2451 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) { 2452 txm = &maps[i]; 2453 2454 if (bus_dmamap_create(sc->sc_dmat, IXL_TX_PKT_MAXSIZE, 2455 IXL_TX_PKT_DESCS, IXL_TX_PKT_MAXSIZE, 0, 2456 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &txm->txm_map) != 0) 2457 goto uncreate; 2458 2459 txm->txm_eop = -1; 2460 txm->txm_m = NULL; 2461 } 2462 2463 txr->txr_cons = txr->txr_prod = 0; 2464 txr->txr_maps = maps; 2465 2466 txr->txr_intrq = pcq_create(sc->sc_tx_ring_ndescs, KM_NOSLEEP); 2467 if (txr->txr_intrq == NULL) 2468 goto uncreate; 2469 2470 txr->txr_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE, 2471 ixl_deferred_transmit, txr); 2472 if (txr->txr_si == NULL) 2473 goto destroy_pcq; 2474 2475 txr->txr_tail = I40E_QTX_TAIL(qid); 2476 txr->txr_qid = qid; 2477 txr->txr_sc = sc; 2478 mutex_init(&txr->txr_lock, MUTEX_DEFAULT, IPL_NET); 2479 2480 return txr; 2481 2482 destroy_pcq: 2483 pcq_destroy(txr->txr_intrq); 2484 uncreate: 2485 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) { 2486 txm = &maps[i]; 2487 2488 if (txm->txm_map == NULL) 2489 continue; 2490 2491 bus_dmamap_destroy(sc->sc_dmat, txm->txm_map); 2492 } 2493 2494 ixl_dmamem_free(sc, &txr->txr_mem); 2495 free: 2496 kmem_free(maps, sizeof(maps[0]) * sc->sc_tx_ring_ndescs); 2497 kmem_free(txr, sizeof(*txr)); 2498 2499 return NULL; 2500 } 2501 2502 static void 2503 ixl_txr_qdis(struct ixl_softc *sc, struct ixl_tx_ring *txr, int enable) 2504 { 2505 unsigned int qid; 2506 bus_size_t reg; 2507 uint32_t r; 2508 2509 qid = txr->txr_qid + sc->sc_base_queue; 2510 reg = I40E_GLLAN_TXPRE_QDIS(qid / 128); 2511 qid %= 128; 2512 2513 r = ixl_rd(sc, reg); 2514 CLR(r, I40E_GLLAN_TXPRE_QDIS_QINDX_MASK); 2515 SET(r, qid << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT); 2516 SET(r, enable ? I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK : 2517 I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK); 2518 ixl_wr(sc, reg, r); 2519 } 2520 2521 static void 2522 ixl_txr_config(struct ixl_softc *sc, struct ixl_tx_ring *txr) 2523 { 2524 struct ixl_hmc_txq txq; 2525 struct ixl_aq_vsi_data *data = IXL_DMA_KVA(&sc->sc_scratch); 2526 void *hmc; 2527 2528 memset(&txq, 0, sizeof(txq)); 2529 txq.head = htole16(txr->txr_cons); 2530 txq.new_context = 1; 2531 txq.base = htole64(IXL_DMA_DVA(&txr->txr_mem) / IXL_HMC_TXQ_BASE_UNIT); 2532 txq.head_wb_ena = IXL_HMC_TXQ_DESC_WB; 2533 txq.qlen = htole16(sc->sc_tx_ring_ndescs); 2534 txq.tphrdesc_ena = 0; 2535 txq.tphrpacket_ena = 0; 2536 txq.tphwdesc_ena = 0; 2537 txq.rdylist = data->qs_handle[0]; 2538 2539 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_TX, txr->txr_qid); 2540 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_TX)); 2541 ixl_hmc_pack(hmc, &txq, ixl_hmc_pack_txq, 2542 __arraycount(ixl_hmc_pack_txq)); 2543 } 2544 2545 static void 2546 ixl_txr_unconfig(struct ixl_softc *sc, struct ixl_tx_ring *txr) 2547 { 2548 void *hmc; 2549 2550 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_TX, txr->txr_qid); 2551 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_TX)); 2552 txr->txr_cons = txr->txr_prod = 0; 2553 } 2554 2555 static void 2556 ixl_txr_clean(struct ixl_softc *sc, struct ixl_tx_ring *txr) 2557 { 2558 struct ixl_tx_map *maps, *txm; 2559 bus_dmamap_t map; 2560 unsigned int i; 2561 2562 maps = txr->txr_maps; 2563 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) { 2564 txm = &maps[i]; 2565 2566 if (txm->txm_m == NULL) 2567 continue; 2568 2569 map = txm->txm_map; 2570 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 2571 BUS_DMASYNC_POSTWRITE); 2572 bus_dmamap_unload(sc->sc_dmat, map); 2573 2574 m_freem(txm->txm_m); 2575 txm->txm_m = NULL; 2576 } 2577 } 2578 2579 static int 2580 ixl_txr_enabled(struct ixl_softc *sc, struct ixl_tx_ring *txr) 2581 { 2582 bus_size_t ena = I40E_QTX_ENA(txr->txr_qid); 2583 uint32_t reg; 2584 int i; 2585 2586 for (i = 0; i < 10; i++) { 2587 reg = ixl_rd(sc, ena); 2588 if (ISSET(reg, I40E_QTX_ENA_QENA_STAT_MASK)) 2589 return 0; 2590 2591 delaymsec(10); 2592 } 2593 2594 return ETIMEDOUT; 2595 } 2596 2597 static int 2598 ixl_txr_disabled(struct ixl_softc *sc, struct ixl_tx_ring *txr) 2599 { 2600 bus_size_t ena = I40E_QTX_ENA(txr->txr_qid); 2601 uint32_t reg; 2602 int i; 2603 2604 KASSERT(mutex_owned(&txr->txr_lock)); 2605 2606 for (i = 0; i < 10; i++) { 2607 reg = ixl_rd(sc, ena); 2608 if (ISSET(reg, I40E_QTX_ENA_QENA_STAT_MASK) == 0) 2609 return 0; 2610 2611 delaymsec(10); 2612 } 2613 2614 return ETIMEDOUT; 2615 } 2616 2617 static void 2618 ixl_txr_free(struct ixl_softc *sc, struct ixl_tx_ring *txr) 2619 { 2620 struct ixl_tx_map *maps, *txm; 2621 struct mbuf *m; 2622 unsigned int i; 2623 2624 softint_disestablish(txr->txr_si); 2625 while ((m = pcq_get(txr->txr_intrq)) != NULL) 2626 m_freem(m); 2627 pcq_destroy(txr->txr_intrq); 2628 2629 maps = txr->txr_maps; 2630 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) { 2631 txm = &maps[i]; 2632 2633 bus_dmamap_destroy(sc->sc_dmat, txm->txm_map); 2634 } 2635 2636 ixl_dmamem_free(sc, &txr->txr_mem); 2637 mutex_destroy(&txr->txr_lock); 2638 kmem_free(maps, sizeof(maps[0]) * sc->sc_tx_ring_ndescs); 2639 kmem_free(txr, sizeof(*txr)); 2640 } 2641 2642 static inline int 2643 ixl_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf **m0, 2644 struct ixl_tx_ring *txr) 2645 { 2646 struct mbuf *m; 2647 int error; 2648 2649 KASSERT(mutex_owned(&txr->txr_lock)); 2650 2651 m = *m0; 2652 2653 error = bus_dmamap_load_mbuf(dmat, map, m, 2654 BUS_DMA_STREAMING | BUS_DMA_WRITE | BUS_DMA_NOWAIT); 2655 if (error != EFBIG) 2656 return error; 2657 2658 m = m_defrag(m, M_DONTWAIT); 2659 if (m != NULL) { 2660 *m0 = m; 2661 txr->txr_defragged.ev_count++; 2662 2663 error = bus_dmamap_load_mbuf(dmat, map, m, 2664 BUS_DMA_STREAMING | BUS_DMA_WRITE | BUS_DMA_NOWAIT); 2665 } else { 2666 txr->txr_defrag_failed.ev_count++; 2667 error = ENOBUFS; 2668 } 2669 2670 return error; 2671 } 2672 2673 static inline int 2674 ixl_tx_setup_offloads(struct mbuf *m, uint64_t *cmd_txd) 2675 { 2676 struct ether_header *eh; 2677 size_t len; 2678 uint64_t cmd; 2679 2680 cmd = 0; 2681 2682 eh = mtod(m, struct ether_header *); 2683 switch (htons(eh->ether_type)) { 2684 case ETHERTYPE_IP: 2685 case ETHERTYPE_IPV6: 2686 len = ETHER_HDR_LEN; 2687 break; 2688 case ETHERTYPE_VLAN: 2689 len = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 2690 break; 2691 default: 2692 len = 0; 2693 } 2694 cmd |= ((len >> 1) << IXL_TX_DESC_MACLEN_SHIFT); 2695 2696 if (m->m_pkthdr.csum_flags & 2697 (M_CSUM_TSOv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) { 2698 cmd |= IXL_TX_DESC_CMD_IIPT_IPV4; 2699 } 2700 if (m->m_pkthdr.csum_flags & M_CSUM_IPv4) { 2701 cmd |= IXL_TX_DESC_CMD_IIPT_IPV4_CSUM; 2702 } 2703 2704 if (m->m_pkthdr.csum_flags & 2705 (M_CSUM_TSOv6 | M_CSUM_TCPv6 | M_CSUM_UDPv6)) { 2706 cmd |= IXL_TX_DESC_CMD_IIPT_IPV6; 2707 } 2708 2709 switch (cmd & IXL_TX_DESC_CMD_IIPT_MASK) { 2710 case IXL_TX_DESC_CMD_IIPT_IPV4: 2711 case IXL_TX_DESC_CMD_IIPT_IPV4_CSUM: 2712 len = M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data); 2713 break; 2714 case IXL_TX_DESC_CMD_IIPT_IPV6: 2715 len = M_CSUM_DATA_IPv6_IPHL(m->m_pkthdr.csum_data); 2716 break; 2717 default: 2718 len = 0; 2719 } 2720 cmd |= ((len >> 2) << IXL_TX_DESC_IPLEN_SHIFT); 2721 2722 if (m->m_pkthdr.csum_flags & 2723 (M_CSUM_TSOv4 | M_CSUM_TSOv6 | M_CSUM_TCPv4 | M_CSUM_TCPv6)) { 2724 len = sizeof(struct tcphdr); 2725 cmd |= IXL_TX_DESC_CMD_L4T_EOFT_TCP; 2726 } else if (m->m_pkthdr.csum_flags & (M_CSUM_UDPv4 | M_CSUM_UDPv6)) { 2727 len = sizeof(struct udphdr); 2728 cmd |= IXL_TX_DESC_CMD_L4T_EOFT_UDP; 2729 } else { 2730 len = 0; 2731 } 2732 cmd |= ((len >> 2) << IXL_TX_DESC_L4LEN_SHIFT); 2733 2734 *cmd_txd |= cmd; 2735 return 0; 2736 } 2737 2738 static void 2739 ixl_tx_common_locked(struct ifnet *ifp, struct ixl_tx_ring *txr, 2740 bool is_transmit) 2741 { 2742 struct ixl_softc *sc = ifp->if_softc; 2743 struct ixl_tx_desc *ring, *txd; 2744 struct ixl_tx_map *txm; 2745 bus_dmamap_t map; 2746 struct mbuf *m; 2747 uint64_t cmd, cmd_txd; 2748 unsigned int prod, free, last, i; 2749 unsigned int mask; 2750 int post = 0; 2751 2752 KASSERT(mutex_owned(&txr->txr_lock)); 2753 2754 if (!ISSET(ifp->if_flags, IFF_RUNNING) 2755 || (!is_transmit && ISSET(ifp->if_flags, IFF_OACTIVE))) { 2756 if (!is_transmit) 2757 IFQ_PURGE(&ifp->if_snd); 2758 return; 2759 } 2760 2761 prod = txr->txr_prod; 2762 free = txr->txr_cons; 2763 if (free <= prod) 2764 free += sc->sc_tx_ring_ndescs; 2765 free -= prod; 2766 2767 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem), 2768 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTWRITE); 2769 2770 ring = IXL_DMA_KVA(&txr->txr_mem); 2771 mask = sc->sc_tx_ring_ndescs - 1; 2772 last = prod; 2773 cmd = 0; 2774 txd = NULL; 2775 2776 for (;;) { 2777 if (free <= IXL_TX_PKT_DESCS) { 2778 if (!is_transmit) 2779 SET(ifp->if_flags, IFF_OACTIVE); 2780 break; 2781 } 2782 2783 if (is_transmit) 2784 m = pcq_get(txr->txr_intrq); 2785 else 2786 IFQ_DEQUEUE(&ifp->if_snd, m); 2787 2788 if (m == NULL) 2789 break; 2790 2791 txm = &txr->txr_maps[prod]; 2792 map = txm->txm_map; 2793 2794 if (ixl_load_mbuf(sc->sc_dmat, map, &m, txr) != 0) { 2795 if_statinc(ifp, if_oerrors); 2796 m_freem(m); 2797 continue; 2798 } 2799 2800 cmd_txd = 0; 2801 if (m->m_pkthdr.csum_flags & IXL_CSUM_ALL_OFFLOAD) { 2802 ixl_tx_setup_offloads(m, &cmd_txd); 2803 } 2804 2805 if (vlan_has_tag(m)) { 2806 cmd_txd |= (uint64_t)vlan_get_tag(m) << 2807 IXL_TX_DESC_L2TAG1_SHIFT; 2808 cmd_txd |= IXL_TX_DESC_CMD_IL2TAG1; 2809 } 2810 2811 bus_dmamap_sync(sc->sc_dmat, map, 0, 2812 map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2813 2814 for (i = 0; i < (unsigned int)map->dm_nsegs; i++) { 2815 txd = &ring[prod]; 2816 2817 cmd = (uint64_t)map->dm_segs[i].ds_len << 2818 IXL_TX_DESC_BSIZE_SHIFT; 2819 cmd |= IXL_TX_DESC_DTYPE_DATA | IXL_TX_DESC_CMD_ICRC; 2820 cmd |= cmd_txd; 2821 2822 txd->addr = htole64(map->dm_segs[i].ds_addr); 2823 txd->cmd = htole64(cmd); 2824 2825 last = prod; 2826 2827 prod++; 2828 prod &= mask; 2829 } 2830 cmd |= IXL_TX_DESC_CMD_EOP | IXL_TX_DESC_CMD_RS; 2831 txd->cmd = htole64(cmd); 2832 2833 txm->txm_m = m; 2834 txm->txm_eop = last; 2835 2836 bpf_mtap(ifp, m, BPF_D_OUT); 2837 2838 free -= i; 2839 post = 1; 2840 } 2841 2842 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem), 2843 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREWRITE); 2844 2845 if (post) { 2846 txr->txr_prod = prod; 2847 ixl_wr(sc, txr->txr_tail, prod); 2848 } 2849 } 2850 2851 static int 2852 ixl_txeof(struct ixl_softc *sc, struct ixl_tx_ring *txr, u_int txlimit) 2853 { 2854 struct ifnet *ifp = &sc->sc_ec.ec_if; 2855 struct ixl_tx_desc *ring, *txd; 2856 struct ixl_tx_map *txm; 2857 struct mbuf *m; 2858 bus_dmamap_t map; 2859 unsigned int cons, prod, last; 2860 unsigned int mask; 2861 uint64_t dtype; 2862 int done = 0, more = 0; 2863 2864 KASSERT(mutex_owned(&txr->txr_lock)); 2865 2866 prod = txr->txr_prod; 2867 cons = txr->txr_cons; 2868 2869 if (cons == prod) 2870 return 0; 2871 2872 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem), 2873 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTREAD); 2874 2875 ring = IXL_DMA_KVA(&txr->txr_mem); 2876 mask = sc->sc_tx_ring_ndescs - 1; 2877 2878 net_stat_ref_t nsr = IF_STAT_GETREF(ifp); 2879 2880 do { 2881 if (txlimit-- <= 0) { 2882 more = 1; 2883 break; 2884 } 2885 2886 txm = &txr->txr_maps[cons]; 2887 last = txm->txm_eop; 2888 txd = &ring[last]; 2889 2890 dtype = txd->cmd & htole64(IXL_TX_DESC_DTYPE_MASK); 2891 if (dtype != htole64(IXL_TX_DESC_DTYPE_DONE)) 2892 break; 2893 2894 map = txm->txm_map; 2895 2896 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 2897 BUS_DMASYNC_POSTWRITE); 2898 bus_dmamap_unload(sc->sc_dmat, map); 2899 2900 m = txm->txm_m; 2901 if (m != NULL) { 2902 if_statinc_ref(nsr, if_opackets); 2903 if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len); 2904 if (ISSET(m->m_flags, M_MCAST)) 2905 if_statinc_ref(nsr, if_omcasts); 2906 m_freem(m); 2907 } 2908 2909 txm->txm_m = NULL; 2910 txm->txm_eop = -1; 2911 2912 cons = last + 1; 2913 cons &= mask; 2914 done = 1; 2915 } while (cons != prod); 2916 2917 IF_STAT_PUTREF(ifp); 2918 2919 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem), 2920 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREREAD); 2921 2922 txr->txr_cons = cons; 2923 2924 if (done) { 2925 softint_schedule(txr->txr_si); 2926 if (txr->txr_qid == 0) { 2927 CLR(ifp->if_flags, IFF_OACTIVE); 2928 if_schedule_deferred_start(ifp); 2929 } 2930 } 2931 2932 return more; 2933 } 2934 2935 static void 2936 ixl_start(struct ifnet *ifp) 2937 { 2938 struct ixl_softc *sc; 2939 struct ixl_tx_ring *txr; 2940 2941 sc = ifp->if_softc; 2942 txr = sc->sc_qps[0].qp_txr; 2943 2944 mutex_enter(&txr->txr_lock); 2945 ixl_tx_common_locked(ifp, txr, false); 2946 mutex_exit(&txr->txr_lock); 2947 } 2948 2949 static inline unsigned int 2950 ixl_select_txqueue(struct ixl_softc *sc, struct mbuf *m) 2951 { 2952 u_int cpuid; 2953 2954 cpuid = cpu_index(curcpu()); 2955 2956 return (unsigned int)(cpuid % sc->sc_nqueue_pairs); 2957 } 2958 2959 static int 2960 ixl_transmit(struct ifnet *ifp, struct mbuf *m) 2961 { 2962 struct ixl_softc *sc; 2963 struct ixl_tx_ring *txr; 2964 unsigned int qid; 2965 2966 sc = ifp->if_softc; 2967 qid = ixl_select_txqueue(sc, m); 2968 2969 txr = sc->sc_qps[qid].qp_txr; 2970 2971 if (__predict_false(!pcq_put(txr->txr_intrq, m))) { 2972 mutex_enter(&txr->txr_lock); 2973 txr->txr_pcqdrop.ev_count++; 2974 mutex_exit(&txr->txr_lock); 2975 2976 m_freem(m); 2977 return ENOBUFS; 2978 } 2979 2980 if (mutex_tryenter(&txr->txr_lock)) { 2981 ixl_tx_common_locked(ifp, txr, true); 2982 mutex_exit(&txr->txr_lock); 2983 } else { 2984 kpreempt_disable(); 2985 softint_schedule(txr->txr_si); 2986 kpreempt_enable(); 2987 } 2988 2989 return 0; 2990 } 2991 2992 static void 2993 ixl_deferred_transmit(void *xtxr) 2994 { 2995 struct ixl_tx_ring *txr = xtxr; 2996 struct ixl_softc *sc = txr->txr_sc; 2997 struct ifnet *ifp = &sc->sc_ec.ec_if; 2998 2999 mutex_enter(&txr->txr_lock); 3000 txr->txr_transmitdef.ev_count++; 3001 if (pcq_peek(txr->txr_intrq) != NULL) 3002 ixl_tx_common_locked(ifp, txr, true); 3003 mutex_exit(&txr->txr_lock); 3004 } 3005 3006 static struct ixl_rx_ring * 3007 ixl_rxr_alloc(struct ixl_softc *sc, unsigned int qid) 3008 { 3009 struct ixl_rx_ring *rxr = NULL; 3010 struct ixl_rx_map *maps = NULL, *rxm; 3011 unsigned int i; 3012 3013 rxr = kmem_zalloc(sizeof(*rxr), KM_SLEEP); 3014 maps = kmem_zalloc(sizeof(maps[0]) * sc->sc_rx_ring_ndescs, 3015 KM_SLEEP); 3016 3017 if (ixl_dmamem_alloc(sc, &rxr->rxr_mem, 3018 sizeof(struct ixl_rx_rd_desc_32) * sc->sc_rx_ring_ndescs, 3019 IXL_RX_QUEUE_ALIGN) != 0) 3020 goto free; 3021 3022 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) { 3023 rxm = &maps[i]; 3024 3025 if (bus_dmamap_create(sc->sc_dmat, 3026 IXL_MCLBYTES, 1, IXL_MCLBYTES, 0, 3027 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &rxm->rxm_map) != 0) 3028 goto uncreate; 3029 3030 rxm->rxm_m = NULL; 3031 } 3032 3033 rxr->rxr_cons = rxr->rxr_prod = 0; 3034 rxr->rxr_m_head = NULL; 3035 rxr->rxr_m_tail = &rxr->rxr_m_head; 3036 rxr->rxr_maps = maps; 3037 3038 rxr->rxr_tail = I40E_QRX_TAIL(qid); 3039 rxr->rxr_qid = qid; 3040 mutex_init(&rxr->rxr_lock, MUTEX_DEFAULT, IPL_NET); 3041 3042 return rxr; 3043 3044 uncreate: 3045 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) { 3046 rxm = &maps[i]; 3047 3048 if (rxm->rxm_map == NULL) 3049 continue; 3050 3051 bus_dmamap_destroy(sc->sc_dmat, rxm->rxm_map); 3052 } 3053 3054 ixl_dmamem_free(sc, &rxr->rxr_mem); 3055 free: 3056 kmem_free(maps, sizeof(maps[0]) * sc->sc_rx_ring_ndescs); 3057 kmem_free(rxr, sizeof(*rxr)); 3058 3059 return NULL; 3060 } 3061 3062 static void 3063 ixl_rxr_clean(struct ixl_softc *sc, struct ixl_rx_ring *rxr) 3064 { 3065 struct ixl_rx_map *maps, *rxm; 3066 bus_dmamap_t map; 3067 unsigned int i; 3068 3069 maps = rxr->rxr_maps; 3070 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) { 3071 rxm = &maps[i]; 3072 3073 if (rxm->rxm_m == NULL) 3074 continue; 3075 3076 map = rxm->rxm_map; 3077 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 3078 BUS_DMASYNC_POSTWRITE); 3079 bus_dmamap_unload(sc->sc_dmat, map); 3080 3081 m_freem(rxm->rxm_m); 3082 rxm->rxm_m = NULL; 3083 } 3084 3085 m_freem(rxr->rxr_m_head); 3086 rxr->rxr_m_head = NULL; 3087 rxr->rxr_m_tail = &rxr->rxr_m_head; 3088 3089 rxr->rxr_prod = rxr->rxr_cons = 0; 3090 } 3091 3092 static int 3093 ixl_rxr_enabled(struct ixl_softc *sc, struct ixl_rx_ring *rxr) 3094 { 3095 bus_size_t ena = I40E_QRX_ENA(rxr->rxr_qid); 3096 uint32_t reg; 3097 int i; 3098 3099 for (i = 0; i < 10; i++) { 3100 reg = ixl_rd(sc, ena); 3101 if (ISSET(reg, I40E_QRX_ENA_QENA_STAT_MASK)) 3102 return 0; 3103 3104 delaymsec(10); 3105 } 3106 3107 return ETIMEDOUT; 3108 } 3109 3110 static int 3111 ixl_rxr_disabled(struct ixl_softc *sc, struct ixl_rx_ring *rxr) 3112 { 3113 bus_size_t ena = I40E_QRX_ENA(rxr->rxr_qid); 3114 uint32_t reg; 3115 int i; 3116 3117 KASSERT(mutex_owned(&rxr->rxr_lock)); 3118 3119 for (i = 0; i < 10; i++) { 3120 reg = ixl_rd(sc, ena); 3121 if (ISSET(reg, I40E_QRX_ENA_QENA_STAT_MASK) == 0) 3122 return 0; 3123 3124 delaymsec(10); 3125 } 3126 3127 return ETIMEDOUT; 3128 } 3129 3130 static void 3131 ixl_rxr_config(struct ixl_softc *sc, struct ixl_rx_ring *rxr) 3132 { 3133 struct ixl_hmc_rxq rxq; 3134 struct ifnet *ifp = &sc->sc_ec.ec_if; 3135 uint16_t rxmax; 3136 void *hmc; 3137 3138 memset(&rxq, 0, sizeof(rxq)); 3139 rxmax = ifp->if_mtu + IXL_MTU_ETHERLEN; 3140 3141 rxq.head = htole16(rxr->rxr_cons); 3142 rxq.base = htole64(IXL_DMA_DVA(&rxr->rxr_mem) / IXL_HMC_RXQ_BASE_UNIT); 3143 rxq.qlen = htole16(sc->sc_rx_ring_ndescs); 3144 rxq.dbuff = htole16(IXL_MCLBYTES / IXL_HMC_RXQ_DBUFF_UNIT); 3145 rxq.hbuff = 0; 3146 rxq.dtype = IXL_HMC_RXQ_DTYPE_NOSPLIT; 3147 rxq.dsize = IXL_HMC_RXQ_DSIZE_32; 3148 rxq.crcstrip = 1; 3149 rxq.l2sel = 1; 3150 rxq.showiv = 1; 3151 rxq.rxmax = htole16(rxmax); 3152 rxq.tphrdesc_ena = 0; 3153 rxq.tphwdesc_ena = 0; 3154 rxq.tphdata_ena = 0; 3155 rxq.tphhead_ena = 0; 3156 rxq.lrxqthresh = 0; 3157 rxq.prefena = 1; 3158 3159 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_RX, rxr->rxr_qid); 3160 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_RX)); 3161 ixl_hmc_pack(hmc, &rxq, ixl_hmc_pack_rxq, 3162 __arraycount(ixl_hmc_pack_rxq)); 3163 } 3164 3165 static void 3166 ixl_rxr_unconfig(struct ixl_softc *sc, struct ixl_rx_ring *rxr) 3167 { 3168 void *hmc; 3169 3170 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_RX, rxr->rxr_qid); 3171 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_RX)); 3172 rxr->rxr_cons = rxr->rxr_prod = 0; 3173 } 3174 3175 static void 3176 ixl_rxr_free(struct ixl_softc *sc, struct ixl_rx_ring *rxr) 3177 { 3178 struct ixl_rx_map *maps, *rxm; 3179 unsigned int i; 3180 3181 maps = rxr->rxr_maps; 3182 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) { 3183 rxm = &maps[i]; 3184 3185 bus_dmamap_destroy(sc->sc_dmat, rxm->rxm_map); 3186 } 3187 3188 ixl_dmamem_free(sc, &rxr->rxr_mem); 3189 mutex_destroy(&rxr->rxr_lock); 3190 kmem_free(maps, sizeof(maps[0]) * sc->sc_rx_ring_ndescs); 3191 kmem_free(rxr, sizeof(*rxr)); 3192 } 3193 3194 static inline void 3195 ixl_rx_csum(struct mbuf *m, uint64_t qword) 3196 { 3197 int flags_mask; 3198 3199 if (!ISSET(qword, IXL_RX_DESC_L3L4P)) { 3200 /* No L3 or L4 checksum was calculated */ 3201 return; 3202 } 3203 3204 switch (__SHIFTOUT(qword, IXL_RX_DESC_PTYPE_MASK)) { 3205 case IXL_RX_DESC_PTYPE_IPV4FRAG: 3206 case IXL_RX_DESC_PTYPE_IPV4: 3207 case IXL_RX_DESC_PTYPE_SCTPV4: 3208 case IXL_RX_DESC_PTYPE_ICMPV4: 3209 flags_mask = M_CSUM_IPv4 | M_CSUM_IPv4_BAD; 3210 break; 3211 case IXL_RX_DESC_PTYPE_TCPV4: 3212 flags_mask = M_CSUM_IPv4 | M_CSUM_IPv4_BAD; 3213 flags_mask |= M_CSUM_TCPv4 | M_CSUM_TCP_UDP_BAD; 3214 break; 3215 case IXL_RX_DESC_PTYPE_UDPV4: 3216 flags_mask = M_CSUM_IPv4 | M_CSUM_IPv4_BAD; 3217 flags_mask |= M_CSUM_UDPv4 | M_CSUM_TCP_UDP_BAD; 3218 break; 3219 case IXL_RX_DESC_PTYPE_TCPV6: 3220 flags_mask = M_CSUM_TCPv6 | M_CSUM_TCP_UDP_BAD; 3221 break; 3222 case IXL_RX_DESC_PTYPE_UDPV6: 3223 flags_mask = M_CSUM_UDPv6 | M_CSUM_TCP_UDP_BAD; 3224 break; 3225 default: 3226 flags_mask = 0; 3227 } 3228 3229 m->m_pkthdr.csum_flags |= (flags_mask & (M_CSUM_IPv4 | 3230 M_CSUM_TCPv4 | M_CSUM_TCPv6 | M_CSUM_UDPv4 | M_CSUM_UDPv6)); 3231 3232 if (ISSET(qword, IXL_RX_DESC_IPE)) { 3233 m->m_pkthdr.csum_flags |= (flags_mask & M_CSUM_IPv4_BAD); 3234 } 3235 3236 if (ISSET(qword, IXL_RX_DESC_L4E)) { 3237 m->m_pkthdr.csum_flags |= (flags_mask & M_CSUM_TCP_UDP_BAD); 3238 } 3239 } 3240 3241 static int 3242 ixl_rxeof(struct ixl_softc *sc, struct ixl_rx_ring *rxr, u_int rxlimit) 3243 { 3244 struct ifnet *ifp = &sc->sc_ec.ec_if; 3245 struct ixl_rx_wb_desc_32 *ring, *rxd; 3246 struct ixl_rx_map *rxm; 3247 bus_dmamap_t map; 3248 unsigned int cons, prod; 3249 struct mbuf *m; 3250 uint64_t word, word0; 3251 unsigned int len; 3252 unsigned int mask; 3253 int done = 0, more = 0; 3254 3255 KASSERT(mutex_owned(&rxr->rxr_lock)); 3256 3257 if (!ISSET(ifp->if_flags, IFF_RUNNING)) 3258 return 0; 3259 3260 prod = rxr->rxr_prod; 3261 cons = rxr->rxr_cons; 3262 3263 if (cons == prod) 3264 return 0; 3265 3266 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem), 3267 0, IXL_DMA_LEN(&rxr->rxr_mem), 3268 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 3269 3270 ring = IXL_DMA_KVA(&rxr->rxr_mem); 3271 mask = sc->sc_rx_ring_ndescs - 1; 3272 3273 net_stat_ref_t nsr = IF_STAT_GETREF(ifp); 3274 3275 do { 3276 if (rxlimit-- <= 0) { 3277 more = 1; 3278 break; 3279 } 3280 3281 rxd = &ring[cons]; 3282 3283 word = le64toh(rxd->qword1); 3284 3285 if (!ISSET(word, IXL_RX_DESC_DD)) 3286 break; 3287 3288 rxm = &rxr->rxr_maps[cons]; 3289 3290 map = rxm->rxm_map; 3291 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 3292 BUS_DMASYNC_POSTREAD); 3293 bus_dmamap_unload(sc->sc_dmat, map); 3294 3295 m = rxm->rxm_m; 3296 rxm->rxm_m = NULL; 3297 3298 KASSERT(m != NULL); 3299 3300 len = (word & IXL_RX_DESC_PLEN_MASK) >> IXL_RX_DESC_PLEN_SHIFT; 3301 m->m_len = len; 3302 m->m_pkthdr.len = 0; 3303 3304 m->m_next = NULL; 3305 *rxr->rxr_m_tail = m; 3306 rxr->rxr_m_tail = &m->m_next; 3307 3308 m = rxr->rxr_m_head; 3309 m->m_pkthdr.len += len; 3310 3311 if (ISSET(word, IXL_RX_DESC_EOP)) { 3312 word0 = le64toh(rxd->qword0); 3313 3314 if (ISSET(word, IXL_RX_DESC_L2TAG1P)) { 3315 vlan_set_tag(m, 3316 __SHIFTOUT(word0, IXL_RX_DESC_L2TAG1_MASK)); 3317 } 3318 3319 if ((ifp->if_capenable & IXL_IFCAP_RXCSUM) != 0) 3320 ixl_rx_csum(m, word); 3321 3322 if (!ISSET(word, 3323 IXL_RX_DESC_RXE | IXL_RX_DESC_OVERSIZE)) { 3324 m_set_rcvif(m, ifp); 3325 if_statinc_ref(nsr, if_ipackets); 3326 if_statadd_ref(nsr, if_ibytes, 3327 m->m_pkthdr.len); 3328 if_percpuq_enqueue(ifp->if_percpuq, m); 3329 } else { 3330 if_statinc_ref(nsr, if_ierrors); 3331 m_freem(m); 3332 } 3333 3334 rxr->rxr_m_head = NULL; 3335 rxr->rxr_m_tail = &rxr->rxr_m_head; 3336 } 3337 3338 cons++; 3339 cons &= mask; 3340 3341 done = 1; 3342 } while (cons != prod); 3343 3344 if (done) { 3345 rxr->rxr_cons = cons; 3346 if (ixl_rxfill(sc, rxr) == -1) 3347 if_statinc_ref(nsr, if_iqdrops); 3348 } 3349 3350 IF_STAT_PUTREF(ifp); 3351 3352 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem), 3353 0, IXL_DMA_LEN(&rxr->rxr_mem), 3354 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 3355 3356 return more; 3357 } 3358 3359 static int 3360 ixl_rxfill(struct ixl_softc *sc, struct ixl_rx_ring *rxr) 3361 { 3362 struct ixl_rx_rd_desc_32 *ring, *rxd; 3363 struct ixl_rx_map *rxm; 3364 bus_dmamap_t map; 3365 struct mbuf *m; 3366 unsigned int prod; 3367 unsigned int slots; 3368 unsigned int mask; 3369 int post = 0, error = 0; 3370 3371 KASSERT(mutex_owned(&rxr->rxr_lock)); 3372 3373 prod = rxr->rxr_prod; 3374 slots = ixl_rxr_unrefreshed(rxr->rxr_prod, rxr->rxr_cons, 3375 sc->sc_rx_ring_ndescs); 3376 3377 ring = IXL_DMA_KVA(&rxr->rxr_mem); 3378 mask = sc->sc_rx_ring_ndescs - 1; 3379 3380 if (__predict_false(slots <= 0)) 3381 return -1; 3382 3383 do { 3384 rxm = &rxr->rxr_maps[prod]; 3385 3386 MGETHDR(m, M_DONTWAIT, MT_DATA); 3387 if (m == NULL) { 3388 rxr->rxr_mgethdr_failed.ev_count++; 3389 error = -1; 3390 break; 3391 } 3392 3393 MCLGET(m, M_DONTWAIT); 3394 if (!ISSET(m->m_flags, M_EXT)) { 3395 rxr->rxr_mgetcl_failed.ev_count++; 3396 error = -1; 3397 m_freem(m); 3398 break; 3399 } 3400 3401 m->m_len = m->m_pkthdr.len = MCLBYTES; 3402 m_adj(m, ETHER_ALIGN); 3403 3404 map = rxm->rxm_map; 3405 3406 if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 3407 BUS_DMA_READ | BUS_DMA_NOWAIT) != 0) { 3408 rxr->rxr_mbuf_load_failed.ev_count++; 3409 error = -1; 3410 m_freem(m); 3411 break; 3412 } 3413 3414 rxm->rxm_m = m; 3415 3416 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 3417 BUS_DMASYNC_PREREAD); 3418 3419 rxd = &ring[prod]; 3420 3421 rxd->paddr = htole64(map->dm_segs[0].ds_addr); 3422 rxd->haddr = htole64(0); 3423 3424 prod++; 3425 prod &= mask; 3426 3427 post = 1; 3428 3429 } while (--slots); 3430 3431 if (post) { 3432 rxr->rxr_prod = prod; 3433 ixl_wr(sc, rxr->rxr_tail, prod); 3434 } 3435 3436 return error; 3437 } 3438 3439 static inline int 3440 ixl_handle_queue_common(struct ixl_softc *sc, struct ixl_queue_pair *qp, 3441 u_int txlimit, struct evcnt *txevcnt, 3442 u_int rxlimit, struct evcnt *rxevcnt) 3443 { 3444 struct ixl_tx_ring *txr = qp->qp_txr; 3445 struct ixl_rx_ring *rxr = qp->qp_rxr; 3446 int txmore, rxmore; 3447 int rv; 3448 3449 mutex_enter(&txr->txr_lock); 3450 txevcnt->ev_count++; 3451 txmore = ixl_txeof(sc, txr, txlimit); 3452 mutex_exit(&txr->txr_lock); 3453 3454 mutex_enter(&rxr->rxr_lock); 3455 rxevcnt->ev_count++; 3456 rxmore = ixl_rxeof(sc, rxr, rxlimit); 3457 mutex_exit(&rxr->rxr_lock); 3458 3459 rv = txmore | (rxmore << 1); 3460 3461 return rv; 3462 } 3463 3464 static void 3465 ixl_sched_handle_queue(struct ixl_softc *sc, struct ixl_queue_pair *qp) 3466 { 3467 3468 if (qp->qp_workqueue) 3469 workqueue_enqueue(sc->sc_workq_txrx, &qp->qp_work, NULL); 3470 else 3471 softint_schedule(qp->qp_si); 3472 } 3473 3474 static int 3475 ixl_intr(void *xsc) 3476 { 3477 struct ixl_softc *sc = xsc; 3478 struct ixl_tx_ring *txr; 3479 struct ixl_rx_ring *rxr; 3480 uint32_t icr, rxintr, txintr; 3481 int rv = 0; 3482 unsigned int i; 3483 3484 KASSERT(sc != NULL); 3485 3486 ixl_enable_other_intr(sc); 3487 icr = ixl_rd(sc, I40E_PFINT_ICR0); 3488 3489 if (ISSET(icr, I40E_PFINT_ICR0_ADMINQ_MASK)) { 3490 atomic_inc_64(&sc->sc_event_atq.ev_count); 3491 ixl_atq_done(sc); 3492 ixl_work_add(sc->sc_workq, &sc->sc_arq_task); 3493 rv = 1; 3494 } 3495 3496 if (ISSET(icr, I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK)) { 3497 atomic_inc_64(&sc->sc_event_link.ev_count); 3498 ixl_work_add(sc->sc_workq, &sc->sc_link_state_task); 3499 rv = 1; 3500 } 3501 3502 rxintr = icr & I40E_INTR_NOTX_RX_MASK; 3503 txintr = icr & I40E_INTR_NOTX_TX_MASK; 3504 3505 if (txintr || rxintr) { 3506 for (i = 0; i < sc->sc_nqueue_pairs; i++) { 3507 txr = sc->sc_qps[i].qp_txr; 3508 rxr = sc->sc_qps[i].qp_rxr; 3509 3510 ixl_handle_queue_common(sc, &sc->sc_qps[i], 3511 IXL_TXRX_PROCESS_UNLIMIT, &txr->txr_intr, 3512 IXL_TXRX_PROCESS_UNLIMIT, &rxr->rxr_intr); 3513 } 3514 rv = 1; 3515 } 3516 3517 return rv; 3518 } 3519 3520 static int 3521 ixl_queue_intr(void *xqp) 3522 { 3523 struct ixl_queue_pair *qp = xqp; 3524 struct ixl_tx_ring *txr = qp->qp_txr; 3525 struct ixl_rx_ring *rxr = qp->qp_rxr; 3526 struct ixl_softc *sc = qp->qp_sc; 3527 u_int txlimit, rxlimit; 3528 int more; 3529 3530 txlimit = sc->sc_tx_intr_process_limit; 3531 rxlimit = sc->sc_rx_intr_process_limit; 3532 qp->qp_workqueue = sc->sc_txrx_workqueue; 3533 3534 more = ixl_handle_queue_common(sc, qp, 3535 txlimit, &txr->txr_intr, rxlimit, &rxr->rxr_intr); 3536 3537 if (more != 0) { 3538 ixl_sched_handle_queue(sc, qp); 3539 } else { 3540 /* for ALTQ */ 3541 if (txr->txr_qid == 0) 3542 if_schedule_deferred_start(&sc->sc_ec.ec_if); 3543 softint_schedule(txr->txr_si); 3544 3545 ixl_enable_queue_intr(sc, qp); 3546 } 3547 3548 return 1; 3549 } 3550 3551 static void 3552 ixl_handle_queue_wk(struct work *wk, void *xsc) 3553 { 3554 struct ixl_queue_pair *qp; 3555 3556 qp = container_of(wk, struct ixl_queue_pair, qp_work); 3557 ixl_handle_queue(qp); 3558 } 3559 3560 static void 3561 ixl_handle_queue(void *xqp) 3562 { 3563 struct ixl_queue_pair *qp = xqp; 3564 struct ixl_softc *sc = qp->qp_sc; 3565 struct ixl_tx_ring *txr = qp->qp_txr; 3566 struct ixl_rx_ring *rxr = qp->qp_rxr; 3567 u_int txlimit, rxlimit; 3568 int more; 3569 3570 txlimit = sc->sc_tx_process_limit; 3571 rxlimit = sc->sc_rx_process_limit; 3572 3573 more = ixl_handle_queue_common(sc, qp, 3574 txlimit, &txr->txr_defer, rxlimit, &rxr->rxr_defer); 3575 3576 if (more != 0) 3577 ixl_sched_handle_queue(sc, qp); 3578 else 3579 ixl_enable_queue_intr(sc, qp); 3580 } 3581 3582 static inline void 3583 ixl_print_hmc_error(struct ixl_softc *sc, uint32_t reg) 3584 { 3585 uint32_t hmc_idx, hmc_isvf; 3586 uint32_t hmc_errtype, hmc_objtype, hmc_data; 3587 3588 hmc_idx = reg & I40E_PFHMC_ERRORINFO_PMF_INDEX_MASK; 3589 hmc_idx = hmc_idx >> I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT; 3590 hmc_isvf = reg & I40E_PFHMC_ERRORINFO_PMF_ISVF_MASK; 3591 hmc_isvf = hmc_isvf >> I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT; 3592 hmc_errtype = reg & I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_MASK; 3593 hmc_errtype = hmc_errtype >> I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT; 3594 hmc_objtype = reg & I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_MASK; 3595 hmc_objtype = hmc_objtype >> I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT; 3596 hmc_data = ixl_rd(sc, I40E_PFHMC_ERRORDATA); 3597 3598 device_printf(sc->sc_dev, 3599 "HMC Error (idx=0x%x, isvf=0x%x, err=0x%x, obj=0x%x, data=0x%x)\n", 3600 hmc_idx, hmc_isvf, hmc_errtype, hmc_objtype, hmc_data); 3601 } 3602 3603 static int 3604 ixl_other_intr(void *xsc) 3605 { 3606 struct ixl_softc *sc = xsc; 3607 uint32_t icr, mask, reg; 3608 int rv; 3609 3610 icr = ixl_rd(sc, I40E_PFINT_ICR0); 3611 mask = ixl_rd(sc, I40E_PFINT_ICR0_ENA); 3612 3613 if (ISSET(icr, I40E_PFINT_ICR0_ADMINQ_MASK)) { 3614 atomic_inc_64(&sc->sc_event_atq.ev_count); 3615 ixl_atq_done(sc); 3616 ixl_work_add(sc->sc_workq, &sc->sc_arq_task); 3617 rv = 1; 3618 } 3619 3620 if (ISSET(icr, I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK)) { 3621 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG)) 3622 device_printf(sc->sc_dev, "link stat changed\n"); 3623 3624 atomic_inc_64(&sc->sc_event_link.ev_count); 3625 ixl_work_add(sc->sc_workq, &sc->sc_link_state_task); 3626 rv = 1; 3627 } 3628 3629 if (ISSET(icr, I40E_PFINT_ICR0_GRST_MASK)) { 3630 CLR(mask, I40E_PFINT_ICR0_ENA_GRST_MASK); 3631 reg = ixl_rd(sc, I40E_GLGEN_RSTAT); 3632 reg = reg & I40E_GLGEN_RSTAT_RESET_TYPE_MASK; 3633 reg = reg >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT; 3634 3635 device_printf(sc->sc_dev, "GRST: %s\n", 3636 reg == I40E_RESET_CORER ? "CORER" : 3637 reg == I40E_RESET_GLOBR ? "GLOBR" : 3638 reg == I40E_RESET_EMPR ? "EMPR" : 3639 "POR"); 3640 } 3641 3642 if (ISSET(icr, I40E_PFINT_ICR0_ECC_ERR_MASK)) 3643 atomic_inc_64(&sc->sc_event_ecc_err.ev_count); 3644 if (ISSET(icr, I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)) 3645 atomic_inc_64(&sc->sc_event_pci_exception.ev_count); 3646 if (ISSET(icr, I40E_PFINT_ICR0_PE_CRITERR_MASK)) 3647 atomic_inc_64(&sc->sc_event_crit_err.ev_count); 3648 3649 if (ISSET(icr, IXL_ICR0_CRIT_ERR_MASK)) { 3650 CLR(mask, IXL_ICR0_CRIT_ERR_MASK); 3651 device_printf(sc->sc_dev, "critical error\n"); 3652 } 3653 3654 if (ISSET(icr, I40E_PFINT_ICR0_HMC_ERR_MASK)) { 3655 reg = ixl_rd(sc, I40E_PFHMC_ERRORINFO); 3656 if (ISSET(reg, I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK)) 3657 ixl_print_hmc_error(sc, reg); 3658 ixl_wr(sc, I40E_PFHMC_ERRORINFO, 0); 3659 } 3660 3661 ixl_wr(sc, I40E_PFINT_ICR0_ENA, mask); 3662 ixl_flush(sc); 3663 ixl_enable_other_intr(sc); 3664 return rv; 3665 } 3666 3667 static void 3668 ixl_get_link_status_done(struct ixl_softc *sc, 3669 const struct ixl_aq_desc *iaq) 3670 { 3671 struct ixl_aq_desc iaq_buf; 3672 3673 memcpy(&iaq_buf, iaq, sizeof(iaq_buf)); 3674 3675 /* 3676 * The lock can be released here 3677 * because there is no post processing about ATQ 3678 */ 3679 mutex_exit(&sc->sc_atq_lock); 3680 ixl_link_state_update(sc, &iaq_buf); 3681 mutex_enter(&sc->sc_atq_lock); 3682 } 3683 3684 static void 3685 ixl_get_link_status(void *xsc) 3686 { 3687 struct ixl_softc *sc = xsc; 3688 struct ixl_aq_desc *iaq; 3689 struct ixl_aq_link_param *param; 3690 int error; 3691 3692 mutex_enter(&sc->sc_atq_lock); 3693 3694 iaq = &sc->sc_link_state_atq.iatq_desc; 3695 memset(iaq, 0, sizeof(*iaq)); 3696 iaq->iaq_opcode = htole16(IXL_AQ_OP_PHY_LINK_STATUS); 3697 param = (struct ixl_aq_link_param *)iaq->iaq_param; 3698 param->notify = IXL_AQ_LINK_NOTIFY; 3699 3700 error = ixl_atq_exec_locked(sc, &sc->sc_link_state_atq); 3701 ixl_atq_set(&sc->sc_link_state_atq, ixl_get_link_status_done); 3702 3703 if (error == 0) { 3704 ixl_get_link_status_done(sc, iaq); 3705 } 3706 3707 mutex_exit(&sc->sc_atq_lock); 3708 } 3709 3710 static void 3711 ixl_link_state_update(struct ixl_softc *sc, const struct ixl_aq_desc *iaq) 3712 { 3713 struct ifnet *ifp = &sc->sc_ec.ec_if; 3714 int link_state; 3715 3716 mutex_enter(&sc->sc_cfg_lock); 3717 link_state = ixl_set_link_status_locked(sc, iaq); 3718 mutex_exit(&sc->sc_cfg_lock); 3719 3720 if (ifp->if_link_state != link_state) 3721 if_link_state_change(ifp, link_state); 3722 3723 if (link_state != LINK_STATE_DOWN) { 3724 kpreempt_disable(); 3725 if_schedule_deferred_start(ifp); 3726 kpreempt_enable(); 3727 } 3728 } 3729 3730 static void 3731 ixl_aq_dump(const struct ixl_softc *sc, const struct ixl_aq_desc *iaq, 3732 const char *msg) 3733 { 3734 char buf[512]; 3735 size_t len; 3736 3737 len = sizeof(buf); 3738 buf[--len] = '\0'; 3739 3740 device_printf(sc->sc_dev, "%s\n", msg); 3741 snprintb(buf, len, IXL_AQ_FLAGS_FMT, le16toh(iaq->iaq_flags)); 3742 device_printf(sc->sc_dev, "flags %s opcode %04x\n", 3743 buf, le16toh(iaq->iaq_opcode)); 3744 device_printf(sc->sc_dev, "datalen %u retval %u\n", 3745 le16toh(iaq->iaq_datalen), le16toh(iaq->iaq_retval)); 3746 device_printf(sc->sc_dev, "cookie %016" PRIx64 "\n", iaq->iaq_cookie); 3747 device_printf(sc->sc_dev, "%08x %08x %08x %08x\n", 3748 le32toh(iaq->iaq_param[0]), le32toh(iaq->iaq_param[1]), 3749 le32toh(iaq->iaq_param[2]), le32toh(iaq->iaq_param[3])); 3750 } 3751 3752 static void 3753 ixl_arq(void *xsc) 3754 { 3755 struct ixl_softc *sc = xsc; 3756 struct ixl_aq_desc *arq, *iaq; 3757 struct ixl_aq_buf *aqb; 3758 unsigned int cons = sc->sc_arq_cons; 3759 unsigned int prod; 3760 int done = 0; 3761 3762 prod = ixl_rd(sc, sc->sc_aq_regs->arq_head) & 3763 sc->sc_aq_regs->arq_head_mask; 3764 3765 if (cons == prod) 3766 goto done; 3767 3768 arq = IXL_DMA_KVA(&sc->sc_arq); 3769 3770 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq), 3771 0, IXL_DMA_LEN(&sc->sc_arq), 3772 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 3773 3774 do { 3775 iaq = &arq[cons]; 3776 aqb = sc->sc_arq_live[cons]; 3777 3778 KASSERT(aqb != NULL); 3779 3780 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, IXL_AQ_BUFLEN, 3781 BUS_DMASYNC_POSTREAD); 3782 3783 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG)) 3784 ixl_aq_dump(sc, iaq, "arq event"); 3785 3786 switch (iaq->iaq_opcode) { 3787 case htole16(IXL_AQ_OP_PHY_LINK_STATUS): 3788 ixl_link_state_update(sc, iaq); 3789 break; 3790 } 3791 3792 memset(iaq, 0, sizeof(*iaq)); 3793 sc->sc_arq_live[cons] = NULL; 3794 SIMPLEQ_INSERT_TAIL(&sc->sc_arq_idle, aqb, aqb_entry); 3795 3796 cons++; 3797 cons &= IXL_AQ_MASK; 3798 3799 done = 1; 3800 } while (cons != prod); 3801 3802 if (done) { 3803 sc->sc_arq_cons = cons; 3804 ixl_arq_fill(sc); 3805 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq), 3806 0, IXL_DMA_LEN(&sc->sc_arq), 3807 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 3808 } 3809 3810 done: 3811 ixl_enable_other_intr(sc); 3812 } 3813 3814 static void 3815 ixl_atq_set(struct ixl_atq *iatq, 3816 void (*fn)(struct ixl_softc *, const struct ixl_aq_desc *)) 3817 { 3818 3819 iatq->iatq_fn = fn; 3820 } 3821 3822 static int 3823 ixl_atq_post_locked(struct ixl_softc *sc, struct ixl_atq *iatq) 3824 { 3825 struct ixl_aq_desc *atq, *slot; 3826 unsigned int prod, cons, prod_next; 3827 3828 /* assert locked */ 3829 KASSERT(mutex_owned(&sc->sc_atq_lock)); 3830 3831 atq = IXL_DMA_KVA(&sc->sc_atq); 3832 prod = sc->sc_atq_prod; 3833 cons = sc->sc_atq_cons; 3834 prod_next = (prod +1) & IXL_AQ_MASK; 3835 3836 if (cons == prod_next) 3837 return ENOMEM; 3838 3839 slot = &atq[prod]; 3840 3841 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 3842 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTWRITE); 3843 3844 KASSERT(iatq->iatq_fn != NULL); 3845 *slot = iatq->iatq_desc; 3846 slot->iaq_cookie = (uint64_t)((intptr_t)iatq); 3847 3848 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG)) 3849 ixl_aq_dump(sc, slot, "atq command"); 3850 3851 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 3852 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREWRITE); 3853 3854 sc->sc_atq_prod = prod_next; 3855 ixl_wr(sc, sc->sc_aq_regs->atq_tail, sc->sc_atq_prod); 3856 3857 return 0; 3858 } 3859 3860 static void 3861 ixl_atq_done_locked(struct ixl_softc *sc) 3862 { 3863 struct ixl_aq_desc *atq, *slot; 3864 struct ixl_atq *iatq; 3865 unsigned int cons; 3866 unsigned int prod; 3867 3868 KASSERT(mutex_owned(&sc->sc_atq_lock)); 3869 3870 prod = sc->sc_atq_prod; 3871 cons = sc->sc_atq_cons; 3872 3873 if (prod == cons) 3874 return; 3875 3876 atq = IXL_DMA_KVA(&sc->sc_atq); 3877 3878 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 3879 0, IXL_DMA_LEN(&sc->sc_atq), 3880 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 3881 3882 do { 3883 slot = &atq[cons]; 3884 if (!ISSET(slot->iaq_flags, htole16(IXL_AQ_DD))) 3885 break; 3886 3887 iatq = (struct ixl_atq *)((intptr_t)slot->iaq_cookie); 3888 iatq->iatq_desc = *slot; 3889 3890 memset(slot, 0, sizeof(*slot)); 3891 3892 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG)) 3893 ixl_aq_dump(sc, &iatq->iatq_desc, "atq response"); 3894 3895 (*iatq->iatq_fn)(sc, &iatq->iatq_desc); 3896 3897 cons++; 3898 cons &= IXL_AQ_MASK; 3899 } while (cons != prod); 3900 3901 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 3902 0, IXL_DMA_LEN(&sc->sc_atq), 3903 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 3904 3905 sc->sc_atq_cons = cons; 3906 } 3907 3908 static void 3909 ixl_atq_done(struct ixl_softc *sc) 3910 { 3911 3912 mutex_enter(&sc->sc_atq_lock); 3913 ixl_atq_done_locked(sc); 3914 mutex_exit(&sc->sc_atq_lock); 3915 } 3916 3917 static void 3918 ixl_wakeup(struct ixl_softc *sc, const struct ixl_aq_desc *iaq) 3919 { 3920 3921 KASSERT(mutex_owned(&sc->sc_atq_lock)); 3922 3923 cv_signal(&sc->sc_atq_cv); 3924 } 3925 3926 static int 3927 ixl_atq_exec(struct ixl_softc *sc, struct ixl_atq *iatq) 3928 { 3929 int error; 3930 3931 mutex_enter(&sc->sc_atq_lock); 3932 error = ixl_atq_exec_locked(sc, iatq); 3933 mutex_exit(&sc->sc_atq_lock); 3934 3935 return error; 3936 } 3937 3938 static int 3939 ixl_atq_exec_locked(struct ixl_softc *sc, struct ixl_atq *iatq) 3940 { 3941 int error; 3942 3943 KASSERT(mutex_owned(&sc->sc_atq_lock)); 3944 KASSERT(iatq->iatq_desc.iaq_cookie == 0); 3945 3946 ixl_atq_set(iatq, ixl_wakeup); 3947 3948 error = ixl_atq_post_locked(sc, iatq); 3949 if (error) 3950 return error; 3951 3952 error = cv_timedwait(&sc->sc_atq_cv, &sc->sc_atq_lock, 3953 IXL_ATQ_EXEC_TIMEOUT); 3954 3955 return error; 3956 } 3957 3958 static int 3959 ixl_atq_poll(struct ixl_softc *sc, struct ixl_aq_desc *iaq, unsigned int tm) 3960 { 3961 struct ixl_aq_desc *atq, *slot; 3962 unsigned int prod; 3963 unsigned int t = 0; 3964 3965 mutex_enter(&sc->sc_atq_lock); 3966 3967 atq = IXL_DMA_KVA(&sc->sc_atq); 3968 prod = sc->sc_atq_prod; 3969 slot = atq + prod; 3970 3971 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 3972 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTWRITE); 3973 3974 *slot = *iaq; 3975 slot->iaq_flags |= htole16(IXL_AQ_SI); 3976 3977 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 3978 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREWRITE); 3979 3980 prod++; 3981 prod &= IXL_AQ_MASK; 3982 sc->sc_atq_prod = prod; 3983 ixl_wr(sc, sc->sc_aq_regs->atq_tail, prod); 3984 3985 while (ixl_rd(sc, sc->sc_aq_regs->atq_head) != prod) { 3986 delaymsec(1); 3987 3988 if (t++ > tm) { 3989 mutex_exit(&sc->sc_atq_lock); 3990 return ETIMEDOUT; 3991 } 3992 } 3993 3994 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 3995 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTREAD); 3996 *iaq = *slot; 3997 memset(slot, 0, sizeof(*slot)); 3998 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 3999 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREREAD); 4000 4001 sc->sc_atq_cons = prod; 4002 4003 mutex_exit(&sc->sc_atq_lock); 4004 4005 return 0; 4006 } 4007 4008 static int 4009 ixl_get_version(struct ixl_softc *sc) 4010 { 4011 struct ixl_aq_desc iaq; 4012 uint32_t fwbuild, fwver, apiver; 4013 uint16_t api_maj_ver, api_min_ver; 4014 4015 memset(&iaq, 0, sizeof(iaq)); 4016 iaq.iaq_opcode = htole16(IXL_AQ_OP_GET_VERSION); 4017 4018 iaq.iaq_retval = le16toh(23); 4019 4020 if (ixl_atq_poll(sc, &iaq, 2000) != 0) 4021 return ETIMEDOUT; 4022 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) 4023 return EIO; 4024 4025 fwbuild = le32toh(iaq.iaq_param[1]); 4026 fwver = le32toh(iaq.iaq_param[2]); 4027 apiver = le32toh(iaq.iaq_param[3]); 4028 4029 api_maj_ver = (uint16_t)apiver; 4030 api_min_ver = (uint16_t)(apiver >> 16); 4031 4032 aprint_normal(", FW %hu.%hu.%05u API %hu.%hu", (uint16_t)fwver, 4033 (uint16_t)(fwver >> 16), fwbuild, api_maj_ver, api_min_ver); 4034 4035 if (sc->sc_mac_type == I40E_MAC_X722) { 4036 SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMLOCK | 4037 IXL_SC_AQ_FLAG_NVMREAD); 4038 SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RXCTL); 4039 SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RSS); 4040 } 4041 4042 #define IXL_API_VER(maj, min) (((uint32_t)(maj) << 16) | (min)) 4043 if (IXL_API_VER(api_maj_ver, api_min_ver) >= IXL_API_VER(1, 5)) { 4044 SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RXCTL); 4045 SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMLOCK); 4046 } 4047 #undef IXL_API_VER 4048 4049 return 0; 4050 } 4051 4052 static int 4053 ixl_get_nvm_version(struct ixl_softc *sc) 4054 { 4055 uint16_t nvmver, cfg_ptr, eetrack_hi, eetrack_lo, oem_hi, oem_lo; 4056 uint32_t eetrack, oem; 4057 uint16_t nvm_maj_ver, nvm_min_ver, oem_build; 4058 uint8_t oem_ver, oem_patch; 4059 4060 nvmver = cfg_ptr = eetrack_hi = eetrack_lo = oem_hi = oem_lo = 0; 4061 ixl_rd16_nvm(sc, I40E_SR_NVM_DEV_STARTER_VERSION, &nvmver); 4062 ixl_rd16_nvm(sc, I40E_SR_NVM_EETRACK_HI, &eetrack_hi); 4063 ixl_rd16_nvm(sc, I40E_SR_NVM_EETRACK_LO, &eetrack_lo); 4064 ixl_rd16_nvm(sc, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr); 4065 ixl_rd16_nvm(sc, cfg_ptr + I40E_NVM_OEM_VER_OFF, &oem_hi); 4066 ixl_rd16_nvm(sc, cfg_ptr + I40E_NVM_OEM_VER_OFF + 1, &oem_lo); 4067 4068 nvm_maj_ver = (uint16_t)__SHIFTOUT(nvmver, IXL_NVM_VERSION_HI_MASK); 4069 nvm_min_ver = (uint16_t)__SHIFTOUT(nvmver, IXL_NVM_VERSION_LO_MASK); 4070 eetrack = ((uint32_t)eetrack_hi << 16) | eetrack_lo; 4071 oem = ((uint32_t)oem_hi << 16) | oem_lo; 4072 oem_ver = __SHIFTOUT(oem, IXL_NVM_OEMVERSION_MASK); 4073 oem_build = __SHIFTOUT(oem, IXL_NVM_OEMBUILD_MASK); 4074 oem_patch = __SHIFTOUT(oem, IXL_NVM_OEMPATCH_MASK); 4075 4076 aprint_normal(" nvm %x.%02x etid %08x oem %d.%d.%d", 4077 nvm_maj_ver, nvm_min_ver, eetrack, 4078 oem_ver, oem_build, oem_patch); 4079 4080 return 0; 4081 } 4082 4083 static int 4084 ixl_pxe_clear(struct ixl_softc *sc) 4085 { 4086 struct ixl_aq_desc iaq; 4087 int rv; 4088 4089 memset(&iaq, 0, sizeof(iaq)); 4090 iaq.iaq_opcode = htole16(IXL_AQ_OP_CLEAR_PXE_MODE); 4091 iaq.iaq_param[0] = htole32(0x2); 4092 4093 rv = ixl_atq_poll(sc, &iaq, 250); 4094 4095 ixl_wr(sc, I40E_GLLAN_RCTL_0, 0x1); 4096 4097 if (rv != 0) 4098 return ETIMEDOUT; 4099 4100 switch (iaq.iaq_retval) { 4101 case htole16(IXL_AQ_RC_OK): 4102 case htole16(IXL_AQ_RC_EEXIST): 4103 break; 4104 default: 4105 return EIO; 4106 } 4107 4108 return 0; 4109 } 4110 4111 static int 4112 ixl_lldp_shut(struct ixl_softc *sc) 4113 { 4114 struct ixl_aq_desc iaq; 4115 4116 memset(&iaq, 0, sizeof(iaq)); 4117 iaq.iaq_opcode = htole16(IXL_AQ_OP_LLDP_STOP_AGENT); 4118 iaq.iaq_param[0] = htole32(IXL_LLDP_SHUTDOWN); 4119 4120 if (ixl_atq_poll(sc, &iaq, 250) != 0) { 4121 aprint_error_dev(sc->sc_dev, "STOP LLDP AGENT timeout\n"); 4122 return -1; 4123 } 4124 4125 switch (iaq.iaq_retval) { 4126 case htole16(IXL_AQ_RC_EMODE): 4127 case htole16(IXL_AQ_RC_EPERM): 4128 /* ignore silently */ 4129 default: 4130 break; 4131 } 4132 4133 return 0; 4134 } 4135 4136 static void 4137 ixl_parse_hw_capability(struct ixl_softc *sc, struct ixl_aq_capability *cap) 4138 { 4139 uint16_t id; 4140 uint32_t number, logical_id; 4141 4142 id = le16toh(cap->cap_id); 4143 number = le32toh(cap->number); 4144 logical_id = le32toh(cap->logical_id); 4145 4146 switch (id) { 4147 case IXL_AQ_CAP_RSS: 4148 sc->sc_rss_table_size = number; 4149 sc->sc_rss_table_entry_width = logical_id; 4150 break; 4151 case IXL_AQ_CAP_RXQ: 4152 case IXL_AQ_CAP_TXQ: 4153 sc->sc_nqueue_pairs_device = MIN(number, 4154 sc->sc_nqueue_pairs_device); 4155 break; 4156 } 4157 } 4158 4159 static int 4160 ixl_get_hw_capabilities(struct ixl_softc *sc) 4161 { 4162 struct ixl_dmamem idm; 4163 struct ixl_aq_desc iaq; 4164 struct ixl_aq_capability *caps; 4165 size_t i, ncaps; 4166 bus_size_t caps_size; 4167 uint16_t status; 4168 int rv; 4169 4170 caps_size = sizeof(caps[0]) * 40; 4171 memset(&iaq, 0, sizeof(iaq)); 4172 iaq.iaq_opcode = htole16(IXL_AQ_OP_LIST_FUNC_CAP); 4173 4174 do { 4175 if (ixl_dmamem_alloc(sc, &idm, caps_size, 0) != 0) { 4176 return -1; 4177 } 4178 4179 iaq.iaq_flags = htole16(IXL_AQ_BUF | 4180 (caps_size > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0)); 4181 iaq.iaq_datalen = htole16(caps_size); 4182 ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm)); 4183 4184 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, 4185 IXL_DMA_LEN(&idm), BUS_DMASYNC_PREREAD); 4186 4187 rv = ixl_atq_poll(sc, &iaq, 250); 4188 4189 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, 4190 IXL_DMA_LEN(&idm), BUS_DMASYNC_POSTREAD); 4191 4192 if (rv != 0) { 4193 aprint_error(", HW capabilities timeout\n"); 4194 goto done; 4195 } 4196 4197 status = le16toh(iaq.iaq_retval); 4198 4199 if (status == IXL_AQ_RC_ENOMEM) { 4200 caps_size = le16toh(iaq.iaq_datalen); 4201 ixl_dmamem_free(sc, &idm); 4202 } 4203 } while (status == IXL_AQ_RC_ENOMEM); 4204 4205 if (status != IXL_AQ_RC_OK) { 4206 aprint_error(", HW capabilities error\n"); 4207 goto done; 4208 } 4209 4210 caps = IXL_DMA_KVA(&idm); 4211 ncaps = le16toh(iaq.iaq_param[1]); 4212 4213 for (i = 0; i < ncaps; i++) { 4214 ixl_parse_hw_capability(sc, &caps[i]); 4215 } 4216 4217 done: 4218 ixl_dmamem_free(sc, &idm); 4219 return rv; 4220 } 4221 4222 static int 4223 ixl_get_mac(struct ixl_softc *sc) 4224 { 4225 struct ixl_dmamem idm; 4226 struct ixl_aq_desc iaq; 4227 struct ixl_aq_mac_addresses *addrs; 4228 int rv; 4229 4230 if (ixl_dmamem_alloc(sc, &idm, sizeof(*addrs), 0) != 0) { 4231 aprint_error(", unable to allocate mac addresses\n"); 4232 return -1; 4233 } 4234 4235 memset(&iaq, 0, sizeof(iaq)); 4236 iaq.iaq_flags = htole16(IXL_AQ_BUF); 4237 iaq.iaq_opcode = htole16(IXL_AQ_OP_MAC_ADDRESS_READ); 4238 iaq.iaq_datalen = htole16(sizeof(*addrs)); 4239 ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm)); 4240 4241 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm), 4242 BUS_DMASYNC_PREREAD); 4243 4244 rv = ixl_atq_poll(sc, &iaq, 250); 4245 4246 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm), 4247 BUS_DMASYNC_POSTREAD); 4248 4249 if (rv != 0) { 4250 aprint_error(", MAC ADDRESS READ timeout\n"); 4251 rv = -1; 4252 goto done; 4253 } 4254 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) { 4255 aprint_error(", MAC ADDRESS READ error\n"); 4256 rv = -1; 4257 goto done; 4258 } 4259 4260 addrs = IXL_DMA_KVA(&idm); 4261 if (!ISSET(iaq.iaq_param[0], htole32(IXL_AQ_MAC_PORT_VALID))) { 4262 printf(", port address is not valid\n"); 4263 goto done; 4264 } 4265 4266 memcpy(sc->sc_enaddr, addrs->port, ETHER_ADDR_LEN); 4267 rv = 0; 4268 4269 done: 4270 ixl_dmamem_free(sc, &idm); 4271 return rv; 4272 } 4273 4274 static int 4275 ixl_get_switch_config(struct ixl_softc *sc) 4276 { 4277 struct ixl_dmamem idm; 4278 struct ixl_aq_desc iaq; 4279 struct ixl_aq_switch_config *hdr; 4280 struct ixl_aq_switch_config_element *elms, *elm; 4281 unsigned int nelm, i; 4282 int rv; 4283 4284 if (ixl_dmamem_alloc(sc, &idm, IXL_AQ_BUFLEN, 0) != 0) { 4285 aprint_error_dev(sc->sc_dev, 4286 "unable to allocate switch config buffer\n"); 4287 return -1; 4288 } 4289 4290 memset(&iaq, 0, sizeof(iaq)); 4291 iaq.iaq_flags = htole16(IXL_AQ_BUF | 4292 (IXL_AQ_BUFLEN > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0)); 4293 iaq.iaq_opcode = htole16(IXL_AQ_OP_SWITCH_GET_CONFIG); 4294 iaq.iaq_datalen = htole16(IXL_AQ_BUFLEN); 4295 ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm)); 4296 4297 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm), 4298 BUS_DMASYNC_PREREAD); 4299 4300 rv = ixl_atq_poll(sc, &iaq, 250); 4301 4302 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm), 4303 BUS_DMASYNC_POSTREAD); 4304 4305 if (rv != 0) { 4306 aprint_error_dev(sc->sc_dev, "GET SWITCH CONFIG timeout\n"); 4307 rv = -1; 4308 goto done; 4309 } 4310 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) { 4311 aprint_error_dev(sc->sc_dev, "GET SWITCH CONFIG error\n"); 4312 rv = -1; 4313 goto done; 4314 } 4315 4316 hdr = IXL_DMA_KVA(&idm); 4317 elms = (struct ixl_aq_switch_config_element *)(hdr + 1); 4318 4319 nelm = le16toh(hdr->num_reported); 4320 if (nelm < 1) { 4321 aprint_error_dev(sc->sc_dev, "no switch config available\n"); 4322 rv = -1; 4323 goto done; 4324 } 4325 4326 for (i = 0; i < nelm; i++) { 4327 elm = &elms[i]; 4328 4329 aprint_debug_dev(sc->sc_dev, 4330 "type %x revision %u seid %04x\n", 4331 elm->type, elm->revision, le16toh(elm->seid)); 4332 aprint_debug_dev(sc->sc_dev, 4333 "uplink %04x downlink %04x\n", 4334 le16toh(elm->uplink_seid), 4335 le16toh(elm->downlink_seid)); 4336 aprint_debug_dev(sc->sc_dev, 4337 "conntype %x scheduler %04x extra %04x\n", 4338 elm->connection_type, 4339 le16toh(elm->scheduler_id), 4340 le16toh(elm->element_info)); 4341 } 4342 4343 elm = &elms[0]; 4344 4345 sc->sc_uplink_seid = elm->uplink_seid; 4346 sc->sc_downlink_seid = elm->downlink_seid; 4347 sc->sc_seid = elm->seid; 4348 4349 if ((sc->sc_uplink_seid == htole16(0)) != 4350 (sc->sc_downlink_seid == htole16(0))) { 4351 aprint_error_dev(sc->sc_dev, "SEIDs are misconfigured\n"); 4352 rv = -1; 4353 goto done; 4354 } 4355 4356 done: 4357 ixl_dmamem_free(sc, &idm); 4358 return rv; 4359 } 4360 4361 static int 4362 ixl_phy_mask_ints(struct ixl_softc *sc) 4363 { 4364 struct ixl_aq_desc iaq; 4365 4366 memset(&iaq, 0, sizeof(iaq)); 4367 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_SET_EVENT_MASK); 4368 iaq.iaq_param[2] = htole32(IXL_AQ_PHY_EV_MASK & 4369 ~(IXL_AQ_PHY_EV_LINK_UPDOWN | IXL_AQ_PHY_EV_MODULE_QUAL_FAIL | 4370 IXL_AQ_PHY_EV_MEDIA_NA)); 4371 4372 if (ixl_atq_poll(sc, &iaq, 250) != 0) { 4373 aprint_error_dev(sc->sc_dev, "SET PHY EVENT MASK timeout\n"); 4374 return -1; 4375 } 4376 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) { 4377 aprint_error_dev(sc->sc_dev, "SET PHY EVENT MASK error\n"); 4378 return -1; 4379 } 4380 4381 return 0; 4382 } 4383 4384 static int 4385 ixl_get_phy_abilities(struct ixl_softc *sc,struct ixl_dmamem *idm) 4386 { 4387 struct ixl_aq_desc iaq; 4388 int rv; 4389 4390 memset(&iaq, 0, sizeof(iaq)); 4391 iaq.iaq_flags = htole16(IXL_AQ_BUF | 4392 (IXL_DMA_LEN(idm) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0)); 4393 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_GET_ABILITIES); 4394 iaq.iaq_datalen = htole16(IXL_DMA_LEN(idm)); 4395 iaq.iaq_param[0] = htole32(IXL_AQ_PHY_REPORT_INIT); 4396 ixl_aq_dva(&iaq, IXL_DMA_DVA(idm)); 4397 4398 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm), 4399 BUS_DMASYNC_PREREAD); 4400 4401 rv = ixl_atq_poll(sc, &iaq, 250); 4402 4403 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm), 4404 BUS_DMASYNC_POSTREAD); 4405 4406 if (rv != 0) 4407 return -1; 4408 4409 return le16toh(iaq.iaq_retval); 4410 } 4411 4412 static int 4413 ixl_get_phy_info(struct ixl_softc *sc) 4414 { 4415 struct ixl_dmamem idm; 4416 struct ixl_aq_phy_abilities *phy; 4417 int rv; 4418 4419 if (ixl_dmamem_alloc(sc, &idm, IXL_AQ_BUFLEN, 0) != 0) { 4420 aprint_error_dev(sc->sc_dev, 4421 "unable to allocate phy abilities buffer\n"); 4422 return -1; 4423 } 4424 4425 rv = ixl_get_phy_abilities(sc, &idm); 4426 switch (rv) { 4427 case -1: 4428 aprint_error_dev(sc->sc_dev, "GET PHY ABILITIES timeout\n"); 4429 goto done; 4430 case IXL_AQ_RC_OK: 4431 break; 4432 case IXL_AQ_RC_EIO: 4433 aprint_error_dev(sc->sc_dev,"unable to query phy types\n"); 4434 goto done; 4435 default: 4436 aprint_error_dev(sc->sc_dev, 4437 "GET PHY ABILITIIES error %u\n", rv); 4438 goto done; 4439 } 4440 4441 phy = IXL_DMA_KVA(&idm); 4442 4443 sc->sc_phy_types = le32toh(phy->phy_type); 4444 sc->sc_phy_types |= (uint64_t)le32toh(phy->phy_type_ext) << 32; 4445 4446 sc->sc_phy_abilities = phy->abilities; 4447 sc->sc_phy_linkspeed = phy->link_speed; 4448 sc->sc_phy_fec_cfg = phy->fec_cfg_curr_mod_ext_info & 4449 (IXL_AQ_ENABLE_FEC_KR | IXL_AQ_ENABLE_FEC_RS | 4450 IXL_AQ_REQUEST_FEC_KR | IXL_AQ_REQUEST_FEC_RS); 4451 sc->sc_eee_cap = phy->eee_capability; 4452 sc->sc_eeer_val = phy->eeer_val; 4453 sc->sc_d3_lpan = phy->d3_lpan; 4454 4455 rv = 0; 4456 4457 done: 4458 ixl_dmamem_free(sc, &idm); 4459 return rv; 4460 } 4461 4462 static int 4463 ixl_set_phy_config(struct ixl_softc *sc, 4464 uint8_t link_speed, uint8_t abilities, bool polling) 4465 { 4466 struct ixl_aq_phy_param *param; 4467 struct ixl_atq iatq; 4468 struct ixl_aq_desc *iaq; 4469 int error; 4470 4471 memset(&iatq, 0, sizeof(iatq)); 4472 4473 iaq = &iatq.iatq_desc; 4474 iaq->iaq_opcode = htole16(IXL_AQ_OP_PHY_SET_CONFIG); 4475 param = (struct ixl_aq_phy_param *)&iaq->iaq_param; 4476 param->phy_types = htole32((uint32_t)sc->sc_phy_types); 4477 param->phy_type_ext = (uint8_t)(sc->sc_phy_types >> 32); 4478 param->link_speed = link_speed; 4479 param->abilities = abilities | IXL_AQ_PHY_ABILITY_AUTO_LINK; 4480 param->fec_cfg = sc->sc_phy_fec_cfg; 4481 param->eee_capability = sc->sc_eee_cap; 4482 param->eeer_val = sc->sc_eeer_val; 4483 param->d3_lpan = sc->sc_d3_lpan; 4484 4485 if (polling) 4486 error = ixl_atq_poll(sc, iaq, 250); 4487 else 4488 error = ixl_atq_exec(sc, &iatq); 4489 4490 if (error != 0) 4491 return error; 4492 4493 switch (le16toh(iaq->iaq_retval)) { 4494 case IXL_AQ_RC_OK: 4495 break; 4496 case IXL_AQ_RC_EPERM: 4497 return EPERM; 4498 default: 4499 return EIO; 4500 } 4501 4502 return 0; 4503 } 4504 4505 static int 4506 ixl_set_phy_autoselect(struct ixl_softc *sc) 4507 { 4508 uint8_t link_speed, abilities; 4509 4510 link_speed = sc->sc_phy_linkspeed; 4511 abilities = IXL_PHY_ABILITY_LINKUP | IXL_PHY_ABILITY_AUTONEGO; 4512 4513 return ixl_set_phy_config(sc, link_speed, abilities, true); 4514 } 4515 4516 static int 4517 ixl_get_link_status_poll(struct ixl_softc *sc, int *l) 4518 { 4519 struct ixl_aq_desc iaq; 4520 struct ixl_aq_link_param *param; 4521 int link; 4522 4523 memset(&iaq, 0, sizeof(iaq)); 4524 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_LINK_STATUS); 4525 param = (struct ixl_aq_link_param *)iaq.iaq_param; 4526 param->notify = IXL_AQ_LINK_NOTIFY; 4527 4528 if (ixl_atq_poll(sc, &iaq, 250) != 0) { 4529 return ETIMEDOUT; 4530 } 4531 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) { 4532 return EIO; 4533 } 4534 4535 /* It is unneccessary to hold lock */ 4536 link = ixl_set_link_status_locked(sc, &iaq); 4537 4538 if (l != NULL) 4539 *l = link; 4540 4541 return 0; 4542 } 4543 4544 static int 4545 ixl_get_vsi(struct ixl_softc *sc) 4546 { 4547 struct ixl_dmamem *vsi = &sc->sc_scratch; 4548 struct ixl_aq_desc iaq; 4549 struct ixl_aq_vsi_param *param; 4550 struct ixl_aq_vsi_reply *reply; 4551 struct ixl_aq_vsi_data *data; 4552 int rv; 4553 4554 /* grumble, vsi info isn't "known" at compile time */ 4555 4556 memset(&iaq, 0, sizeof(iaq)); 4557 iaq.iaq_flags = htole16(IXL_AQ_BUF | 4558 (IXL_DMA_LEN(vsi) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0)); 4559 iaq.iaq_opcode = htole16(IXL_AQ_OP_GET_VSI_PARAMS); 4560 iaq.iaq_datalen = htole16(IXL_DMA_LEN(vsi)); 4561 ixl_aq_dva(&iaq, IXL_DMA_DVA(vsi)); 4562 4563 param = (struct ixl_aq_vsi_param *)iaq.iaq_param; 4564 param->uplink_seid = sc->sc_seid; 4565 4566 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi), 4567 BUS_DMASYNC_PREREAD); 4568 4569 rv = ixl_atq_poll(sc, &iaq, 250); 4570 4571 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi), 4572 BUS_DMASYNC_POSTREAD); 4573 4574 if (rv != 0) { 4575 return ETIMEDOUT; 4576 } 4577 4578 switch (le16toh(iaq.iaq_retval)) { 4579 case IXL_AQ_RC_OK: 4580 break; 4581 case IXL_AQ_RC_ENOENT: 4582 return ENOENT; 4583 case IXL_AQ_RC_EACCES: 4584 return EACCES; 4585 default: 4586 return EIO; 4587 } 4588 4589 reply = (struct ixl_aq_vsi_reply *)iaq.iaq_param; 4590 sc->sc_vsi_number = le16toh(reply->vsi_number); 4591 data = IXL_DMA_KVA(vsi); 4592 sc->sc_vsi_stat_counter_idx = le16toh(data->stat_counter_idx); 4593 4594 return 0; 4595 } 4596 4597 static int 4598 ixl_set_vsi(struct ixl_softc *sc) 4599 { 4600 struct ixl_dmamem *vsi = &sc->sc_scratch; 4601 struct ixl_aq_desc iaq; 4602 struct ixl_aq_vsi_param *param; 4603 struct ixl_aq_vsi_data *data = IXL_DMA_KVA(vsi); 4604 unsigned int qnum; 4605 uint16_t val; 4606 int rv; 4607 4608 qnum = sc->sc_nqueue_pairs - 1; 4609 4610 data->valid_sections = htole16(IXL_AQ_VSI_VALID_QUEUE_MAP | 4611 IXL_AQ_VSI_VALID_VLAN); 4612 4613 CLR(data->mapping_flags, htole16(IXL_AQ_VSI_QUE_MAP_MASK)); 4614 SET(data->mapping_flags, htole16(IXL_AQ_VSI_QUE_MAP_CONTIG)); 4615 data->queue_mapping[0] = htole16(0); 4616 data->tc_mapping[0] = htole16((0 << IXL_AQ_VSI_TC_Q_OFFSET_SHIFT) | 4617 (qnum << IXL_AQ_VSI_TC_Q_NUMBER_SHIFT)); 4618 4619 val = le16toh(data->port_vlan_flags); 4620 CLR(val, IXL_AQ_VSI_PVLAN_MODE_MASK | IXL_AQ_VSI_PVLAN_EMOD_MASK); 4621 SET(val, IXL_AQ_VSI_PVLAN_MODE_ALL); 4622 4623 if (ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWTAGGING)) { 4624 SET(val, IXL_AQ_VSI_PVLAN_EMOD_STR_BOTH); 4625 } else { 4626 SET(val, IXL_AQ_VSI_PVLAN_EMOD_NOTHING); 4627 } 4628 4629 data->port_vlan_flags = htole16(val); 4630 4631 /* grumble, vsi info isn't "known" at compile time */ 4632 4633 memset(&iaq, 0, sizeof(iaq)); 4634 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD | 4635 (IXL_DMA_LEN(vsi) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0)); 4636 iaq.iaq_opcode = htole16(IXL_AQ_OP_UPD_VSI_PARAMS); 4637 iaq.iaq_datalen = htole16(IXL_DMA_LEN(vsi)); 4638 ixl_aq_dva(&iaq, IXL_DMA_DVA(vsi)); 4639 4640 param = (struct ixl_aq_vsi_param *)iaq.iaq_param; 4641 param->uplink_seid = sc->sc_seid; 4642 4643 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi), 4644 BUS_DMASYNC_PREWRITE); 4645 4646 rv = ixl_atq_poll(sc, &iaq, 250); 4647 4648 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi), 4649 BUS_DMASYNC_POSTWRITE); 4650 4651 if (rv != 0) { 4652 return ETIMEDOUT; 4653 } 4654 4655 switch (le16toh(iaq.iaq_retval)) { 4656 case IXL_AQ_RC_OK: 4657 break; 4658 case IXL_AQ_RC_ENOENT: 4659 return ENOENT; 4660 case IXL_AQ_RC_EACCES: 4661 return EACCES; 4662 default: 4663 return EIO; 4664 } 4665 4666 return 0; 4667 } 4668 4669 static void 4670 ixl_set_filter_control(struct ixl_softc *sc) 4671 { 4672 uint32_t reg; 4673 4674 reg = ixl_rd_rx_csr(sc, I40E_PFQF_CTL_0); 4675 4676 CLR(reg, I40E_PFQF_CTL_0_HASHLUTSIZE_MASK); 4677 SET(reg, I40E_HASH_LUT_SIZE_128 << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT); 4678 4679 SET(reg, I40E_PFQF_CTL_0_FD_ENA_MASK); 4680 SET(reg, I40E_PFQF_CTL_0_ETYPE_ENA_MASK); 4681 SET(reg, I40E_PFQF_CTL_0_MACVLAN_ENA_MASK); 4682 4683 ixl_wr_rx_csr(sc, I40E_PFQF_CTL_0, reg); 4684 } 4685 4686 static inline void 4687 ixl_get_default_rss_key(uint32_t *buf, size_t len) 4688 { 4689 size_t cplen; 4690 uint8_t rss_seed[RSS_KEYSIZE]; 4691 4692 rss_getkey(rss_seed); 4693 memset(buf, 0, len); 4694 4695 cplen = MIN(len, sizeof(rss_seed)); 4696 memcpy(buf, rss_seed, cplen); 4697 } 4698 4699 static int 4700 ixl_set_rss_key(struct ixl_softc *sc, uint8_t *key, size_t keylen) 4701 { 4702 struct ixl_dmamem *idm; 4703 struct ixl_atq iatq; 4704 struct ixl_aq_desc *iaq; 4705 struct ixl_aq_rss_key_param *param; 4706 struct ixl_aq_rss_key_data *data; 4707 size_t len, datalen, stdlen, extlen; 4708 uint16_t vsi_id; 4709 int rv; 4710 4711 memset(&iatq, 0, sizeof(iatq)); 4712 iaq = &iatq.iatq_desc; 4713 idm = &sc->sc_aqbuf; 4714 4715 datalen = sizeof(*data); 4716 4717 /*XXX The buf size has to be less than the size of the register */ 4718 datalen = MIN(IXL_RSS_KEY_SIZE_REG * sizeof(uint32_t), datalen); 4719 4720 iaq->iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD | 4721 (datalen > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0)); 4722 iaq->iaq_opcode = htole16(IXL_AQ_OP_RSS_SET_KEY); 4723 iaq->iaq_datalen = htole16(datalen); 4724 4725 param = (struct ixl_aq_rss_key_param *)iaq->iaq_param; 4726 vsi_id = (sc->sc_vsi_number << IXL_AQ_RSSKEY_VSI_ID_SHIFT) | 4727 IXL_AQ_RSSKEY_VSI_VALID; 4728 param->vsi_id = htole16(vsi_id); 4729 4730 memset(IXL_DMA_KVA(idm), 0, IXL_DMA_LEN(idm)); 4731 data = IXL_DMA_KVA(idm); 4732 4733 len = MIN(keylen, datalen); 4734 stdlen = MIN(sizeof(data->standard_rss_key), len); 4735 memcpy(data->standard_rss_key, key, stdlen); 4736 len = (len > stdlen) ? (len - stdlen) : 0; 4737 4738 extlen = MIN(sizeof(data->extended_hash_key), len); 4739 extlen = (stdlen < keylen) ? 0 : keylen - stdlen; 4740 memcpy(data->extended_hash_key, key + stdlen, extlen); 4741 4742 ixl_aq_dva(iaq, IXL_DMA_DVA(idm)); 4743 4744 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, 4745 IXL_DMA_LEN(idm), BUS_DMASYNC_PREWRITE); 4746 4747 rv = ixl_atq_exec(sc, &iatq); 4748 4749 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, 4750 IXL_DMA_LEN(idm), BUS_DMASYNC_POSTWRITE); 4751 4752 if (rv != 0) { 4753 return ETIMEDOUT; 4754 } 4755 4756 if (iaq->iaq_retval != htole16(IXL_AQ_RC_OK)) { 4757 return EIO; 4758 } 4759 4760 return 0; 4761 } 4762 4763 static int 4764 ixl_set_rss_lut(struct ixl_softc *sc, uint8_t *lut, size_t lutlen) 4765 { 4766 struct ixl_dmamem *idm; 4767 struct ixl_atq iatq; 4768 struct ixl_aq_desc *iaq; 4769 struct ixl_aq_rss_lut_param *param; 4770 uint16_t vsi_id; 4771 uint8_t *data; 4772 size_t dmalen; 4773 int rv; 4774 4775 memset(&iatq, 0, sizeof(iatq)); 4776 iaq = &iatq.iatq_desc; 4777 idm = &sc->sc_aqbuf; 4778 4779 dmalen = MIN(lutlen, IXL_DMA_LEN(idm)); 4780 4781 iaq->iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD | 4782 (dmalen > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0)); 4783 iaq->iaq_opcode = htole16(IXL_AQ_OP_RSS_SET_LUT); 4784 iaq->iaq_datalen = htole16(dmalen); 4785 4786 memset(IXL_DMA_KVA(idm), 0, IXL_DMA_LEN(idm)); 4787 data = IXL_DMA_KVA(idm); 4788 memcpy(data, lut, dmalen); 4789 ixl_aq_dva(iaq, IXL_DMA_DVA(idm)); 4790 4791 param = (struct ixl_aq_rss_lut_param *)iaq->iaq_param; 4792 vsi_id = (sc->sc_vsi_number << IXL_AQ_RSSLUT_VSI_ID_SHIFT) | 4793 IXL_AQ_RSSLUT_VSI_VALID; 4794 param->vsi_id = htole16(vsi_id); 4795 param->flags = htole16(IXL_AQ_RSSLUT_TABLE_TYPE_PF << 4796 IXL_AQ_RSSLUT_TABLE_TYPE_SHIFT); 4797 4798 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, 4799 IXL_DMA_LEN(idm), BUS_DMASYNC_PREWRITE); 4800 4801 rv = ixl_atq_exec(sc, &iatq); 4802 4803 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, 4804 IXL_DMA_LEN(idm), BUS_DMASYNC_POSTWRITE); 4805 4806 if (rv != 0) { 4807 return ETIMEDOUT; 4808 } 4809 4810 if (iaq->iaq_retval != htole16(IXL_AQ_RC_OK)) { 4811 return EIO; 4812 } 4813 4814 return 0; 4815 } 4816 4817 static int 4818 ixl_register_rss_key(struct ixl_softc *sc) 4819 { 4820 uint32_t rss_seed[IXL_RSS_KEY_SIZE_REG]; 4821 int rv; 4822 size_t i; 4823 4824 ixl_get_default_rss_key(rss_seed, sizeof(rss_seed)); 4825 4826 if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RSS)){ 4827 rv = ixl_set_rss_key(sc, (uint8_t*)rss_seed, 4828 sizeof(rss_seed)); 4829 } else { 4830 rv = 0; 4831 for (i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) { 4832 ixl_wr_rx_csr(sc, I40E_PFQF_HKEY(i), rss_seed[i]); 4833 } 4834 } 4835 4836 return rv; 4837 } 4838 4839 static void 4840 ixl_register_rss_pctype(struct ixl_softc *sc) 4841 { 4842 uint64_t set_hena = 0; 4843 uint32_t hena0, hena1; 4844 4845 /* 4846 * We use TCP/UDP with IPv4/IPv6 by default. 4847 * Note: the device can not use just IP header in each 4848 * TCP/UDP packets for the RSS hash calculation. 4849 */ 4850 if (sc->sc_mac_type == I40E_MAC_X722) 4851 set_hena = IXL_RSS_HENA_DEFAULT_X722; 4852 else 4853 set_hena = IXL_RSS_HENA_DEFAULT_XL710; 4854 4855 hena0 = ixl_rd_rx_csr(sc, I40E_PFQF_HENA(0)); 4856 hena1 = ixl_rd_rx_csr(sc, I40E_PFQF_HENA(1)); 4857 4858 SET(hena0, set_hena); 4859 SET(hena1, set_hena >> 32); 4860 4861 ixl_wr_rx_csr(sc, I40E_PFQF_HENA(0), hena0); 4862 ixl_wr_rx_csr(sc, I40E_PFQF_HENA(1), hena1); 4863 } 4864 4865 static int 4866 ixl_register_rss_hlut(struct ixl_softc *sc) 4867 { 4868 unsigned int qid; 4869 uint8_t hlut_buf[512], lut_mask; 4870 uint32_t *hluts; 4871 size_t i, hluts_num; 4872 int rv; 4873 4874 lut_mask = (0x01 << sc->sc_rss_table_entry_width) - 1; 4875 4876 for (i = 0; i < sc->sc_rss_table_size; i++) { 4877 qid = i % sc->sc_nqueue_pairs; 4878 hlut_buf[i] = qid & lut_mask; 4879 } 4880 4881 if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RSS)) { 4882 rv = ixl_set_rss_lut(sc, hlut_buf, sizeof(hlut_buf)); 4883 } else { 4884 rv = 0; 4885 hluts = (uint32_t *)hlut_buf; 4886 hluts_num = sc->sc_rss_table_size >> 2; 4887 for (i = 0; i < hluts_num; i++) { 4888 ixl_wr(sc, I40E_PFQF_HLUT(i), hluts[i]); 4889 } 4890 ixl_flush(sc); 4891 } 4892 4893 return rv; 4894 } 4895 4896 static void 4897 ixl_config_rss(struct ixl_softc *sc) 4898 { 4899 4900 KASSERT(mutex_owned(&sc->sc_cfg_lock)); 4901 4902 ixl_register_rss_key(sc); 4903 ixl_register_rss_pctype(sc); 4904 ixl_register_rss_hlut(sc); 4905 } 4906 4907 static const struct ixl_phy_type * 4908 ixl_search_phy_type(uint8_t phy_type) 4909 { 4910 const struct ixl_phy_type *itype; 4911 uint64_t mask; 4912 unsigned int i; 4913 4914 if (phy_type >= 64) 4915 return NULL; 4916 4917 mask = 1ULL << phy_type; 4918 4919 for (i = 0; i < __arraycount(ixl_phy_type_map); i++) { 4920 itype = &ixl_phy_type_map[i]; 4921 4922 if (ISSET(itype->phy_type, mask)) 4923 return itype; 4924 } 4925 4926 return NULL; 4927 } 4928 4929 static uint64_t 4930 ixl_search_link_speed(uint8_t link_speed) 4931 { 4932 const struct ixl_speed_type *type; 4933 unsigned int i; 4934 4935 for (i = 0; i < __arraycount(ixl_speed_type_map); i++) { 4936 type = &ixl_speed_type_map[i]; 4937 4938 if (ISSET(type->dev_speed, link_speed)) 4939 return type->net_speed; 4940 } 4941 4942 return 0; 4943 } 4944 4945 static uint8_t 4946 ixl_search_baudrate(uint64_t baudrate) 4947 { 4948 const struct ixl_speed_type *type; 4949 unsigned int i; 4950 4951 for (i = 0; i < __arraycount(ixl_speed_type_map); i++) { 4952 type = &ixl_speed_type_map[i]; 4953 4954 if (type->net_speed == baudrate) { 4955 return type->dev_speed; 4956 } 4957 } 4958 4959 return 0; 4960 } 4961 4962 static int 4963 ixl_restart_an(struct ixl_softc *sc) 4964 { 4965 struct ixl_aq_desc iaq; 4966 4967 memset(&iaq, 0, sizeof(iaq)); 4968 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_RESTART_AN); 4969 iaq.iaq_param[0] = 4970 htole32(IXL_AQ_PHY_RESTART_AN | IXL_AQ_PHY_LINK_ENABLE); 4971 4972 if (ixl_atq_poll(sc, &iaq, 250) != 0) { 4973 aprint_error_dev(sc->sc_dev, "RESTART AN timeout\n"); 4974 return -1; 4975 } 4976 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) { 4977 aprint_error_dev(sc->sc_dev, "RESTART AN error\n"); 4978 return -1; 4979 } 4980 4981 return 0; 4982 } 4983 4984 static int 4985 ixl_add_macvlan(struct ixl_softc *sc, const uint8_t *macaddr, 4986 uint16_t vlan, uint16_t flags) 4987 { 4988 struct ixl_aq_desc iaq; 4989 struct ixl_aq_add_macvlan *param; 4990 struct ixl_aq_add_macvlan_elem *elem; 4991 4992 memset(&iaq, 0, sizeof(iaq)); 4993 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD); 4994 iaq.iaq_opcode = htole16(IXL_AQ_OP_ADD_MACVLAN); 4995 iaq.iaq_datalen = htole16(sizeof(*elem)); 4996 ixl_aq_dva(&iaq, IXL_DMA_DVA(&sc->sc_scratch)); 4997 4998 param = (struct ixl_aq_add_macvlan *)&iaq.iaq_param; 4999 param->num_addrs = htole16(1); 5000 param->seid0 = htole16(0x8000) | sc->sc_seid; 5001 param->seid1 = 0; 5002 param->seid2 = 0; 5003 5004 elem = IXL_DMA_KVA(&sc->sc_scratch); 5005 memset(elem, 0, sizeof(*elem)); 5006 memcpy(elem->macaddr, macaddr, ETHER_ADDR_LEN); 5007 elem->flags = htole16(IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH | flags); 5008 elem->vlan = htole16(vlan); 5009 5010 if (ixl_atq_poll(sc, &iaq, 250) != 0) { 5011 return IXL_AQ_RC_EINVAL; 5012 } 5013 5014 switch (le16toh(iaq.iaq_retval)) { 5015 case IXL_AQ_RC_OK: 5016 break; 5017 case IXL_AQ_RC_ENOSPC: 5018 return ENOSPC; 5019 case IXL_AQ_RC_ENOENT: 5020 return ENOENT; 5021 case IXL_AQ_RC_EACCES: 5022 return EACCES; 5023 case IXL_AQ_RC_EEXIST: 5024 return EEXIST; 5025 case IXL_AQ_RC_EINVAL: 5026 return EINVAL; 5027 default: 5028 return EIO; 5029 } 5030 5031 return 0; 5032 } 5033 5034 static int 5035 ixl_remove_macvlan(struct ixl_softc *sc, const uint8_t *macaddr, 5036 uint16_t vlan, uint16_t flags) 5037 { 5038 struct ixl_aq_desc iaq; 5039 struct ixl_aq_remove_macvlan *param; 5040 struct ixl_aq_remove_macvlan_elem *elem; 5041 5042 memset(&iaq, 0, sizeof(iaq)); 5043 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD); 5044 iaq.iaq_opcode = htole16(IXL_AQ_OP_REMOVE_MACVLAN); 5045 iaq.iaq_datalen = htole16(sizeof(*elem)); 5046 ixl_aq_dva(&iaq, IXL_DMA_DVA(&sc->sc_scratch)); 5047 5048 param = (struct ixl_aq_remove_macvlan *)&iaq.iaq_param; 5049 param->num_addrs = htole16(1); 5050 param->seid0 = htole16(0x8000) | sc->sc_seid; 5051 param->seid1 = 0; 5052 param->seid2 = 0; 5053 5054 elem = IXL_DMA_KVA(&sc->sc_scratch); 5055 memset(elem, 0, sizeof(*elem)); 5056 memcpy(elem->macaddr, macaddr, ETHER_ADDR_LEN); 5057 elem->flags = htole16(IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH | flags); 5058 elem->vlan = htole16(vlan); 5059 5060 if (ixl_atq_poll(sc, &iaq, 250) != 0) { 5061 return EINVAL; 5062 } 5063 5064 switch (le16toh(iaq.iaq_retval)) { 5065 case IXL_AQ_RC_OK: 5066 break; 5067 case IXL_AQ_RC_ENOENT: 5068 return ENOENT; 5069 case IXL_AQ_RC_EACCES: 5070 return EACCES; 5071 case IXL_AQ_RC_EINVAL: 5072 return EINVAL; 5073 default: 5074 return EIO; 5075 } 5076 5077 return 0; 5078 } 5079 5080 static int 5081 ixl_hmc(struct ixl_softc *sc) 5082 { 5083 struct { 5084 uint32_t count; 5085 uint32_t minsize; 5086 bus_size_t objsiz; 5087 bus_size_t setoff; 5088 bus_size_t setcnt; 5089 } regs[] = { 5090 { 5091 0, 5092 IXL_HMC_TXQ_MINSIZE, 5093 I40E_GLHMC_LANTXOBJSZ, 5094 I40E_GLHMC_LANTXBASE(sc->sc_pf_id), 5095 I40E_GLHMC_LANTXCNT(sc->sc_pf_id), 5096 }, 5097 { 5098 0, 5099 IXL_HMC_RXQ_MINSIZE, 5100 I40E_GLHMC_LANRXOBJSZ, 5101 I40E_GLHMC_LANRXBASE(sc->sc_pf_id), 5102 I40E_GLHMC_LANRXCNT(sc->sc_pf_id), 5103 }, 5104 { 5105 0, 5106 0, 5107 I40E_GLHMC_FCOEDDPOBJSZ, 5108 I40E_GLHMC_FCOEDDPBASE(sc->sc_pf_id), 5109 I40E_GLHMC_FCOEDDPCNT(sc->sc_pf_id), 5110 }, 5111 { 5112 0, 5113 0, 5114 I40E_GLHMC_FCOEFOBJSZ, 5115 I40E_GLHMC_FCOEFBASE(sc->sc_pf_id), 5116 I40E_GLHMC_FCOEFCNT(sc->sc_pf_id), 5117 }, 5118 }; 5119 struct ixl_hmc_entry *e; 5120 uint64_t size, dva; 5121 uint8_t *kva; 5122 uint64_t *sdpage; 5123 unsigned int i; 5124 int npages, tables; 5125 uint32_t reg; 5126 5127 CTASSERT(__arraycount(regs) <= __arraycount(sc->sc_hmc_entries)); 5128 5129 regs[IXL_HMC_LAN_TX].count = regs[IXL_HMC_LAN_RX].count = 5130 ixl_rd(sc, I40E_GLHMC_LANQMAX); 5131 5132 size = 0; 5133 for (i = 0; i < __arraycount(regs); i++) { 5134 e = &sc->sc_hmc_entries[i]; 5135 5136 e->hmc_count = regs[i].count; 5137 reg = ixl_rd(sc, regs[i].objsiz); 5138 e->hmc_size = BIT_ULL(0x3F & reg); 5139 e->hmc_base = size; 5140 5141 if ((e->hmc_size * 8) < regs[i].minsize) { 5142 aprint_error_dev(sc->sc_dev, 5143 "kernel hmc entry is too big\n"); 5144 return -1; 5145 } 5146 5147 size += roundup(e->hmc_size * e->hmc_count, IXL_HMC_ROUNDUP); 5148 } 5149 size = roundup(size, IXL_HMC_PGSIZE); 5150 npages = size / IXL_HMC_PGSIZE; 5151 5152 tables = roundup(size, IXL_HMC_L2SZ) / IXL_HMC_L2SZ; 5153 5154 if (ixl_dmamem_alloc(sc, &sc->sc_hmc_pd, size, IXL_HMC_PGSIZE) != 0) { 5155 aprint_error_dev(sc->sc_dev, 5156 "unable to allocate hmc pd memory\n"); 5157 return -1; 5158 } 5159 5160 if (ixl_dmamem_alloc(sc, &sc->sc_hmc_sd, tables * IXL_HMC_PGSIZE, 5161 IXL_HMC_PGSIZE) != 0) { 5162 aprint_error_dev(sc->sc_dev, 5163 "unable to allocate hmc sd memory\n"); 5164 ixl_dmamem_free(sc, &sc->sc_hmc_pd); 5165 return -1; 5166 } 5167 5168 kva = IXL_DMA_KVA(&sc->sc_hmc_pd); 5169 memset(kva, 0, IXL_DMA_LEN(&sc->sc_hmc_pd)); 5170 5171 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd), 5172 0, IXL_DMA_LEN(&sc->sc_hmc_pd), 5173 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 5174 5175 dva = IXL_DMA_DVA(&sc->sc_hmc_pd); 5176 sdpage = IXL_DMA_KVA(&sc->sc_hmc_sd); 5177 memset(sdpage, 0, IXL_DMA_LEN(&sc->sc_hmc_sd)); 5178 5179 for (i = 0; (int)i < npages; i++) { 5180 *sdpage = htole64(dva | IXL_HMC_PDVALID); 5181 sdpage++; 5182 5183 dva += IXL_HMC_PGSIZE; 5184 } 5185 5186 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_sd), 5187 0, IXL_DMA_LEN(&sc->sc_hmc_sd), 5188 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 5189 5190 dva = IXL_DMA_DVA(&sc->sc_hmc_sd); 5191 for (i = 0; (int)i < tables; i++) { 5192 uint32_t count; 5193 5194 KASSERT(npages >= 0); 5195 5196 count = ((unsigned int)npages > IXL_HMC_PGS) ? 5197 IXL_HMC_PGS : (unsigned int)npages; 5198 5199 ixl_wr(sc, I40E_PFHMC_SDDATAHIGH, dva >> 32); 5200 ixl_wr(sc, I40E_PFHMC_SDDATALOW, dva | 5201 (count << I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | 5202 (1U << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT)); 5203 ixl_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE); 5204 ixl_wr(sc, I40E_PFHMC_SDCMD, 5205 (1U << I40E_PFHMC_SDCMD_PMSDWR_SHIFT) | i); 5206 5207 npages -= IXL_HMC_PGS; 5208 dva += IXL_HMC_PGSIZE; 5209 } 5210 5211 for (i = 0; i < __arraycount(regs); i++) { 5212 e = &sc->sc_hmc_entries[i]; 5213 5214 ixl_wr(sc, regs[i].setoff, e->hmc_base / IXL_HMC_ROUNDUP); 5215 ixl_wr(sc, regs[i].setcnt, e->hmc_count); 5216 } 5217 5218 return 0; 5219 } 5220 5221 static void 5222 ixl_hmc_free(struct ixl_softc *sc) 5223 { 5224 ixl_dmamem_free(sc, &sc->sc_hmc_sd); 5225 ixl_dmamem_free(sc, &sc->sc_hmc_pd); 5226 } 5227 5228 static void 5229 ixl_hmc_pack(void *d, const void *s, const struct ixl_hmc_pack *packing, 5230 unsigned int npacking) 5231 { 5232 uint8_t *dst = d; 5233 const uint8_t *src = s; 5234 unsigned int i; 5235 5236 for (i = 0; i < npacking; i++) { 5237 const struct ixl_hmc_pack *pack = &packing[i]; 5238 unsigned int offset = pack->lsb / 8; 5239 unsigned int align = pack->lsb % 8; 5240 const uint8_t *in = src + pack->offset; 5241 uint8_t *out = dst + offset; 5242 int width = pack->width; 5243 unsigned int inbits = 0; 5244 5245 if (align) { 5246 inbits = (*in++) << align; 5247 *out++ |= (inbits & 0xff); 5248 inbits >>= 8; 5249 5250 width -= 8 - align; 5251 } 5252 5253 while (width >= 8) { 5254 inbits |= (*in++) << align; 5255 *out++ = (inbits & 0xff); 5256 inbits >>= 8; 5257 5258 width -= 8; 5259 } 5260 5261 if (width > 0) { 5262 inbits |= (*in) << align; 5263 *out |= (inbits & ((1 << width) - 1)); 5264 } 5265 } 5266 } 5267 5268 static struct ixl_aq_buf * 5269 ixl_aqb_alloc(struct ixl_softc *sc) 5270 { 5271 struct ixl_aq_buf *aqb; 5272 5273 aqb = kmem_alloc(sizeof(*aqb), KM_SLEEP); 5274 5275 aqb->aqb_size = IXL_AQ_BUFLEN; 5276 5277 if (bus_dmamap_create(sc->sc_dmat, aqb->aqb_size, 1, 5278 aqb->aqb_size, 0, 5279 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &aqb->aqb_map) != 0) 5280 goto free; 5281 if (bus_dmamem_alloc(sc->sc_dmat, aqb->aqb_size, 5282 IXL_AQ_ALIGN, 0, &aqb->aqb_seg, 1, &aqb->aqb_nsegs, 5283 BUS_DMA_WAITOK) != 0) 5284 goto destroy; 5285 if (bus_dmamem_map(sc->sc_dmat, &aqb->aqb_seg, aqb->aqb_nsegs, 5286 aqb->aqb_size, &aqb->aqb_data, BUS_DMA_WAITOK) != 0) 5287 goto dma_free; 5288 if (bus_dmamap_load(sc->sc_dmat, aqb->aqb_map, aqb->aqb_data, 5289 aqb->aqb_size, NULL, BUS_DMA_WAITOK) != 0) 5290 goto unmap; 5291 5292 return aqb; 5293 unmap: 5294 bus_dmamem_unmap(sc->sc_dmat, aqb->aqb_data, aqb->aqb_size); 5295 dma_free: 5296 bus_dmamem_free(sc->sc_dmat, &aqb->aqb_seg, 1); 5297 destroy: 5298 bus_dmamap_destroy(sc->sc_dmat, aqb->aqb_map); 5299 free: 5300 kmem_free(aqb, sizeof(*aqb)); 5301 5302 return NULL; 5303 } 5304 5305 static void 5306 ixl_aqb_free(struct ixl_softc *sc, struct ixl_aq_buf *aqb) 5307 { 5308 5309 bus_dmamap_unload(sc->sc_dmat, aqb->aqb_map); 5310 bus_dmamem_unmap(sc->sc_dmat, aqb->aqb_data, aqb->aqb_size); 5311 bus_dmamem_free(sc->sc_dmat, &aqb->aqb_seg, 1); 5312 bus_dmamap_destroy(sc->sc_dmat, aqb->aqb_map); 5313 kmem_free(aqb, sizeof(*aqb)); 5314 } 5315 5316 static int 5317 ixl_arq_fill(struct ixl_softc *sc) 5318 { 5319 struct ixl_aq_buf *aqb; 5320 struct ixl_aq_desc *arq, *iaq; 5321 unsigned int prod = sc->sc_arq_prod; 5322 unsigned int n; 5323 int post = 0; 5324 5325 n = ixl_rxr_unrefreshed(sc->sc_arq_prod, sc->sc_arq_cons, 5326 IXL_AQ_NUM); 5327 arq = IXL_DMA_KVA(&sc->sc_arq); 5328 5329 if (__predict_false(n <= 0)) 5330 return 0; 5331 5332 do { 5333 aqb = sc->sc_arq_live[prod]; 5334 iaq = &arq[prod]; 5335 5336 if (aqb == NULL) { 5337 aqb = SIMPLEQ_FIRST(&sc->sc_arq_idle); 5338 if (aqb != NULL) { 5339 SIMPLEQ_REMOVE(&sc->sc_arq_idle, aqb, 5340 ixl_aq_buf, aqb_entry); 5341 } else if ((aqb = ixl_aqb_alloc(sc)) == NULL) { 5342 break; 5343 } 5344 5345 sc->sc_arq_live[prod] = aqb; 5346 memset(aqb->aqb_data, 0, aqb->aqb_size); 5347 5348 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, 5349 aqb->aqb_size, BUS_DMASYNC_PREREAD); 5350 5351 iaq->iaq_flags = htole16(IXL_AQ_BUF | 5352 (IXL_AQ_BUFLEN > I40E_AQ_LARGE_BUF ? 5353 IXL_AQ_LB : 0)); 5354 iaq->iaq_opcode = 0; 5355 iaq->iaq_datalen = htole16(aqb->aqb_size); 5356 iaq->iaq_retval = 0; 5357 iaq->iaq_cookie = 0; 5358 iaq->iaq_param[0] = 0; 5359 iaq->iaq_param[1] = 0; 5360 ixl_aq_dva(iaq, aqb->aqb_map->dm_segs[0].ds_addr); 5361 } 5362 5363 prod++; 5364 prod &= IXL_AQ_MASK; 5365 5366 post = 1; 5367 5368 } while (--n); 5369 5370 if (post) { 5371 sc->sc_arq_prod = prod; 5372 ixl_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod); 5373 } 5374 5375 return post; 5376 } 5377 5378 static void 5379 ixl_arq_unfill(struct ixl_softc *sc) 5380 { 5381 struct ixl_aq_buf *aqb; 5382 unsigned int i; 5383 5384 for (i = 0; i < __arraycount(sc->sc_arq_live); i++) { 5385 aqb = sc->sc_arq_live[i]; 5386 if (aqb == NULL) 5387 continue; 5388 5389 sc->sc_arq_live[i] = NULL; 5390 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, aqb->aqb_size, 5391 BUS_DMASYNC_POSTREAD); 5392 ixl_aqb_free(sc, aqb); 5393 } 5394 5395 while ((aqb = SIMPLEQ_FIRST(&sc->sc_arq_idle)) != NULL) { 5396 SIMPLEQ_REMOVE(&sc->sc_arq_idle, aqb, 5397 ixl_aq_buf, aqb_entry); 5398 ixl_aqb_free(sc, aqb); 5399 } 5400 } 5401 5402 static void 5403 ixl_clear_hw(struct ixl_softc *sc) 5404 { 5405 uint32_t num_queues, base_queue; 5406 uint32_t num_pf_int; 5407 uint32_t num_vf_int; 5408 uint32_t num_vfs; 5409 uint32_t i, j; 5410 uint32_t val; 5411 uint32_t eol = 0x7ff; 5412 5413 /* get number of interrupts, queues, and vfs */ 5414 val = ixl_rd(sc, I40E_GLPCI_CNF2); 5415 num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >> 5416 I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT; 5417 num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >> 5418 I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT; 5419 5420 val = ixl_rd(sc, I40E_PFLAN_QALLOC); 5421 base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >> 5422 I40E_PFLAN_QALLOC_FIRSTQ_SHIFT; 5423 j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >> 5424 I40E_PFLAN_QALLOC_LASTQ_SHIFT; 5425 if (val & I40E_PFLAN_QALLOC_VALID_MASK) 5426 num_queues = (j - base_queue) + 1; 5427 else 5428 num_queues = 0; 5429 5430 val = ixl_rd(sc, I40E_PF_VT_PFALLOC); 5431 i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >> 5432 I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT; 5433 j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >> 5434 I40E_PF_VT_PFALLOC_LASTVF_SHIFT; 5435 if (val & I40E_PF_VT_PFALLOC_VALID_MASK) 5436 num_vfs = (j - i) + 1; 5437 else 5438 num_vfs = 0; 5439 5440 /* stop all the interrupts */ 5441 ixl_wr(sc, I40E_PFINT_ICR0_ENA, 0); 5442 ixl_flush(sc); 5443 val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT; 5444 for (i = 0; i < num_pf_int - 2; i++) 5445 ixl_wr(sc, I40E_PFINT_DYN_CTLN(i), val); 5446 ixl_flush(sc); 5447 5448 /* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */ 5449 val = eol << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT; 5450 ixl_wr(sc, I40E_PFINT_LNKLST0, val); 5451 for (i = 0; i < num_pf_int - 2; i++) 5452 ixl_wr(sc, I40E_PFINT_LNKLSTN(i), val); 5453 val = eol << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT; 5454 for (i = 0; i < num_vfs; i++) 5455 ixl_wr(sc, I40E_VPINT_LNKLST0(i), val); 5456 for (i = 0; i < num_vf_int - 2; i++) 5457 ixl_wr(sc, I40E_VPINT_LNKLSTN(i), val); 5458 5459 /* warn the HW of the coming Tx disables */ 5460 for (i = 0; i < num_queues; i++) { 5461 uint32_t abs_queue_idx = base_queue + i; 5462 uint32_t reg_block = 0; 5463 5464 if (abs_queue_idx >= 128) { 5465 reg_block = abs_queue_idx / 128; 5466 abs_queue_idx %= 128; 5467 } 5468 5469 val = ixl_rd(sc, I40E_GLLAN_TXPRE_QDIS(reg_block)); 5470 val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK; 5471 val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT); 5472 val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK; 5473 5474 ixl_wr(sc, I40E_GLLAN_TXPRE_QDIS(reg_block), val); 5475 } 5476 delaymsec(400); 5477 5478 /* stop all the queues */ 5479 for (i = 0; i < num_queues; i++) { 5480 ixl_wr(sc, I40E_QINT_TQCTL(i), 0); 5481 ixl_wr(sc, I40E_QTX_ENA(i), 0); 5482 ixl_wr(sc, I40E_QINT_RQCTL(i), 0); 5483 ixl_wr(sc, I40E_QRX_ENA(i), 0); 5484 } 5485 5486 /* short wait for all queue disables to settle */ 5487 delaymsec(50); 5488 } 5489 5490 static int 5491 ixl_pf_reset(struct ixl_softc *sc) 5492 { 5493 uint32_t cnt = 0; 5494 uint32_t cnt1 = 0; 5495 uint32_t reg = 0, reg0 = 0; 5496 uint32_t grst_del; 5497 5498 /* 5499 * Poll for Global Reset steady state in case of recent GRST. 5500 * The grst delay value is in 100ms units, and we'll wait a 5501 * couple counts longer to be sure we don't just miss the end. 5502 */ 5503 grst_del = ixl_rd(sc, I40E_GLGEN_RSTCTL); 5504 grst_del &= I40E_GLGEN_RSTCTL_GRSTDEL_MASK; 5505 grst_del >>= I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT; 5506 5507 grst_del = grst_del * 20; 5508 5509 for (cnt = 0; cnt < grst_del; cnt++) { 5510 reg = ixl_rd(sc, I40E_GLGEN_RSTAT); 5511 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK)) 5512 break; 5513 delaymsec(100); 5514 } 5515 if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) { 5516 aprint_error(", Global reset polling failed to complete\n"); 5517 return -1; 5518 } 5519 5520 /* Now Wait for the FW to be ready */ 5521 for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) { 5522 reg = ixl_rd(sc, I40E_GLNVM_ULD); 5523 reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | 5524 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK); 5525 if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | 5526 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK)) 5527 break; 5528 5529 delaymsec(10); 5530 } 5531 if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | 5532 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) { 5533 aprint_error(", wait for FW Reset complete timed out " 5534 "(I40E_GLNVM_ULD = 0x%x)\n", reg); 5535 return -1; 5536 } 5537 5538 /* 5539 * If there was a Global Reset in progress when we got here, 5540 * we don't need to do the PF Reset 5541 */ 5542 if (cnt == 0) { 5543 reg = ixl_rd(sc, I40E_PFGEN_CTRL); 5544 ixl_wr(sc, I40E_PFGEN_CTRL, reg | I40E_PFGEN_CTRL_PFSWR_MASK); 5545 for (cnt = 0; cnt < I40E_PF_RESET_WAIT_COUNT; cnt++) { 5546 reg = ixl_rd(sc, I40E_PFGEN_CTRL); 5547 if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK)) 5548 break; 5549 delaymsec(1); 5550 5551 reg0 = ixl_rd(sc, I40E_GLGEN_RSTAT); 5552 if (reg0 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) { 5553 aprint_error(", Core reset upcoming." 5554 " Skipping PF reset reset request\n"); 5555 return -1; 5556 } 5557 } 5558 if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) { 5559 aprint_error(", PF reset polling failed to complete" 5560 "(I40E_PFGEN_CTRL= 0x%x)\n", reg); 5561 return -1; 5562 } 5563 } 5564 5565 return 0; 5566 } 5567 5568 static int 5569 ixl_dmamem_alloc(struct ixl_softc *sc, struct ixl_dmamem *ixm, 5570 bus_size_t size, bus_size_t align) 5571 { 5572 ixm->ixm_size = size; 5573 5574 if (bus_dmamap_create(sc->sc_dmat, ixm->ixm_size, 1, 5575 ixm->ixm_size, 0, 5576 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, 5577 &ixm->ixm_map) != 0) 5578 return 1; 5579 if (bus_dmamem_alloc(sc->sc_dmat, ixm->ixm_size, 5580 align, 0, &ixm->ixm_seg, 1, &ixm->ixm_nsegs, 5581 BUS_DMA_WAITOK) != 0) 5582 goto destroy; 5583 if (bus_dmamem_map(sc->sc_dmat, &ixm->ixm_seg, ixm->ixm_nsegs, 5584 ixm->ixm_size, &ixm->ixm_kva, BUS_DMA_WAITOK) != 0) 5585 goto free; 5586 if (bus_dmamap_load(sc->sc_dmat, ixm->ixm_map, ixm->ixm_kva, 5587 ixm->ixm_size, NULL, BUS_DMA_WAITOK) != 0) 5588 goto unmap; 5589 5590 memset(ixm->ixm_kva, 0, ixm->ixm_size); 5591 5592 return 0; 5593 unmap: 5594 bus_dmamem_unmap(sc->sc_dmat, ixm->ixm_kva, ixm->ixm_size); 5595 free: 5596 bus_dmamem_free(sc->sc_dmat, &ixm->ixm_seg, 1); 5597 destroy: 5598 bus_dmamap_destroy(sc->sc_dmat, ixm->ixm_map); 5599 return 1; 5600 } 5601 5602 static void 5603 ixl_dmamem_free(struct ixl_softc *sc, struct ixl_dmamem *ixm) 5604 { 5605 bus_dmamap_unload(sc->sc_dmat, ixm->ixm_map); 5606 bus_dmamem_unmap(sc->sc_dmat, ixm->ixm_kva, ixm->ixm_size); 5607 bus_dmamem_free(sc->sc_dmat, &ixm->ixm_seg, 1); 5608 bus_dmamap_destroy(sc->sc_dmat, ixm->ixm_map); 5609 } 5610 5611 static int 5612 ixl_setup_vlan_hwfilter(struct ixl_softc *sc) 5613 { 5614 struct ethercom *ec = &sc->sc_ec; 5615 struct vlanid_list *vlanidp; 5616 int rv; 5617 5618 ixl_remove_macvlan(sc, sc->sc_enaddr, 0, 5619 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN); 5620 ixl_remove_macvlan(sc, etherbroadcastaddr, 0, 5621 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN); 5622 5623 rv = ixl_add_macvlan(sc, sc->sc_enaddr, 0, 5624 IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH); 5625 if (rv != 0) 5626 return rv; 5627 rv = ixl_add_macvlan(sc, etherbroadcastaddr, 0, 5628 IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH); 5629 if (rv != 0) 5630 return rv; 5631 5632 ETHER_LOCK(ec); 5633 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) { 5634 rv = ixl_add_macvlan(sc, sc->sc_enaddr, 5635 vlanidp->vid, IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH); 5636 if (rv != 0) 5637 break; 5638 rv = ixl_add_macvlan(sc, etherbroadcastaddr, 5639 vlanidp->vid, IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH); 5640 if (rv != 0) 5641 break; 5642 } 5643 ETHER_UNLOCK(ec); 5644 5645 return rv; 5646 } 5647 5648 static void 5649 ixl_teardown_vlan_hwfilter(struct ixl_softc *sc) 5650 { 5651 struct vlanid_list *vlanidp; 5652 struct ethercom *ec = &sc->sc_ec; 5653 5654 ixl_remove_macvlan(sc, sc->sc_enaddr, 0, 5655 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH); 5656 ixl_remove_macvlan(sc, etherbroadcastaddr, 0, 5657 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH); 5658 5659 ETHER_LOCK(ec); 5660 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) { 5661 ixl_remove_macvlan(sc, sc->sc_enaddr, 5662 vlanidp->vid, IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH); 5663 ixl_remove_macvlan(sc, etherbroadcastaddr, 5664 vlanidp->vid, IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH); 5665 } 5666 ETHER_UNLOCK(ec); 5667 5668 ixl_add_macvlan(sc, sc->sc_enaddr, 0, 5669 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN); 5670 ixl_add_macvlan(sc, etherbroadcastaddr, 0, 5671 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN); 5672 } 5673 5674 static int 5675 ixl_update_macvlan(struct ixl_softc *sc) 5676 { 5677 int rv = 0; 5678 int next_ec_capenable = sc->sc_ec.ec_capenable; 5679 5680 if (ISSET(next_ec_capenable, ETHERCAP_VLAN_HWFILTER)) { 5681 rv = ixl_setup_vlan_hwfilter(sc); 5682 if (rv != 0) 5683 ixl_teardown_vlan_hwfilter(sc); 5684 } else { 5685 ixl_teardown_vlan_hwfilter(sc); 5686 } 5687 5688 return rv; 5689 } 5690 5691 static int 5692 ixl_ifflags_cb(struct ethercom *ec) 5693 { 5694 struct ifnet *ifp = &ec->ec_if; 5695 struct ixl_softc *sc = ifp->if_softc; 5696 int rv, change; 5697 5698 mutex_enter(&sc->sc_cfg_lock); 5699 5700 change = ec->ec_capenable ^ sc->sc_cur_ec_capenable; 5701 5702 if (ISSET(change, ETHERCAP_VLAN_HWTAGGING)) { 5703 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWTAGGING; 5704 rv = ENETRESET; 5705 goto out; 5706 } 5707 5708 if (ISSET(change, ETHERCAP_VLAN_HWFILTER)) { 5709 rv = ixl_update_macvlan(sc); 5710 if (rv == 0) { 5711 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWFILTER; 5712 } else { 5713 CLR(ec->ec_capenable, ETHERCAP_VLAN_HWFILTER); 5714 CLR(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER); 5715 } 5716 } 5717 5718 rv = ixl_iff(sc); 5719 out: 5720 mutex_exit(&sc->sc_cfg_lock); 5721 5722 return rv; 5723 } 5724 5725 static int 5726 ixl_set_link_status_locked(struct ixl_softc *sc, const struct ixl_aq_desc *iaq) 5727 { 5728 const struct ixl_aq_link_status *status; 5729 const struct ixl_phy_type *itype; 5730 5731 uint64_t ifm_active = IFM_ETHER; 5732 uint64_t ifm_status = IFM_AVALID; 5733 int link_state = LINK_STATE_DOWN; 5734 uint64_t baudrate = 0; 5735 5736 status = (const struct ixl_aq_link_status *)iaq->iaq_param; 5737 if (!ISSET(status->link_info, IXL_AQ_LINK_UP_FUNCTION)) { 5738 ifm_active |= IFM_NONE; 5739 goto done; 5740 } 5741 5742 ifm_active |= IFM_FDX; 5743 ifm_status |= IFM_ACTIVE; 5744 link_state = LINK_STATE_UP; 5745 5746 itype = ixl_search_phy_type(status->phy_type); 5747 if (itype != NULL) 5748 ifm_active |= itype->ifm_type; 5749 5750 if (ISSET(status->an_info, IXL_AQ_LINK_PAUSE_TX)) 5751 ifm_active |= IFM_ETH_TXPAUSE; 5752 if (ISSET(status->an_info, IXL_AQ_LINK_PAUSE_RX)) 5753 ifm_active |= IFM_ETH_RXPAUSE; 5754 5755 baudrate = ixl_search_link_speed(status->link_speed); 5756 5757 done: 5758 /* sc->sc_cfg_lock held expect during attach */ 5759 sc->sc_media_active = ifm_active; 5760 sc->sc_media_status = ifm_status; 5761 5762 sc->sc_ec.ec_if.if_baudrate = baudrate; 5763 5764 return link_state; 5765 } 5766 5767 static int 5768 ixl_establish_intx(struct ixl_softc *sc) 5769 { 5770 pci_chipset_tag_t pc = sc->sc_pa.pa_pc; 5771 pci_intr_handle_t *intr; 5772 char xnamebuf[32]; 5773 char intrbuf[PCI_INTRSTR_LEN]; 5774 char const *intrstr; 5775 5776 KASSERT(sc->sc_nintrs == 1); 5777 5778 intr = &sc->sc_ihp[0]; 5779 5780 intrstr = pci_intr_string(pc, *intr, intrbuf, sizeof(intrbuf)); 5781 snprintf(xnamebuf, sizeof(xnamebuf), "%s:legacy", 5782 device_xname(sc->sc_dev)); 5783 5784 sc->sc_ihs[0] = pci_intr_establish_xname(pc, *intr, IPL_NET, ixl_intr, 5785 sc, xnamebuf); 5786 5787 if (sc->sc_ihs[0] == NULL) { 5788 aprint_error_dev(sc->sc_dev, 5789 "unable to establish interrupt at %s\n", intrstr); 5790 return -1; 5791 } 5792 5793 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr); 5794 return 0; 5795 } 5796 5797 static int 5798 ixl_establish_msix(struct ixl_softc *sc) 5799 { 5800 pci_chipset_tag_t pc = sc->sc_pa.pa_pc; 5801 kcpuset_t *affinity; 5802 unsigned int vector = 0; 5803 unsigned int i; 5804 int affinity_to, r; 5805 char xnamebuf[32]; 5806 char intrbuf[PCI_INTRSTR_LEN]; 5807 char const *intrstr; 5808 5809 kcpuset_create(&affinity, false); 5810 5811 /* the "other" intr is mapped to vector 0 */ 5812 vector = 0; 5813 intrstr = pci_intr_string(pc, sc->sc_ihp[vector], 5814 intrbuf, sizeof(intrbuf)); 5815 snprintf(xnamebuf, sizeof(xnamebuf), "%s others", 5816 device_xname(sc->sc_dev)); 5817 sc->sc_ihs[vector] = pci_intr_establish_xname(pc, 5818 sc->sc_ihp[vector], IPL_NET, ixl_other_intr, 5819 sc, xnamebuf); 5820 if (sc->sc_ihs[vector] == NULL) { 5821 aprint_error_dev(sc->sc_dev, 5822 "unable to establish interrupt at %s\n", intrstr); 5823 goto fail; 5824 } 5825 5826 aprint_normal_dev(sc->sc_dev, "other interrupt at %s", intrstr); 5827 5828 affinity_to = ncpu > (int)sc->sc_nqueue_pairs_max ? 1 : 0; 5829 affinity_to = (affinity_to + sc->sc_nqueue_pairs_max) % ncpu; 5830 5831 kcpuset_zero(affinity); 5832 kcpuset_set(affinity, affinity_to); 5833 r = interrupt_distribute(sc->sc_ihs[vector], affinity, NULL); 5834 if (r == 0) { 5835 aprint_normal(", affinity to %u", affinity_to); 5836 } 5837 aprint_normal("\n"); 5838 vector++; 5839 5840 sc->sc_msix_vector_queue = vector; 5841 affinity_to = ncpu > (int)sc->sc_nqueue_pairs_max ? 1 : 0; 5842 5843 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) { 5844 intrstr = pci_intr_string(pc, sc->sc_ihp[vector], 5845 intrbuf, sizeof(intrbuf)); 5846 snprintf(xnamebuf, sizeof(xnamebuf), "%s TXRX%d", 5847 device_xname(sc->sc_dev), i); 5848 5849 sc->sc_ihs[vector] = pci_intr_establish_xname(pc, 5850 sc->sc_ihp[vector], IPL_NET, ixl_queue_intr, 5851 (void *)&sc->sc_qps[i], xnamebuf); 5852 5853 if (sc->sc_ihs[vector] == NULL) { 5854 aprint_error_dev(sc->sc_dev, 5855 "unable to establish interrupt at %s\n", intrstr); 5856 goto fail; 5857 } 5858 5859 aprint_normal_dev(sc->sc_dev, 5860 "for TXRX%d interrupt at %s",i , intrstr); 5861 5862 kcpuset_zero(affinity); 5863 kcpuset_set(affinity, affinity_to); 5864 r = interrupt_distribute(sc->sc_ihs[vector], affinity, NULL); 5865 if (r == 0) { 5866 aprint_normal(", affinity to %u", affinity_to); 5867 affinity_to = (affinity_to + 1) % ncpu; 5868 } 5869 aprint_normal("\n"); 5870 vector++; 5871 } 5872 5873 kcpuset_destroy(affinity); 5874 5875 return 0; 5876 fail: 5877 for (i = 0; i < vector; i++) { 5878 pci_intr_disestablish(pc, sc->sc_ihs[i]); 5879 } 5880 5881 sc->sc_msix_vector_queue = 0; 5882 sc->sc_msix_vector_queue = 0; 5883 kcpuset_destroy(affinity); 5884 5885 return -1; 5886 } 5887 5888 static void 5889 ixl_config_queue_intr(struct ixl_softc *sc) 5890 { 5891 unsigned int i, vector; 5892 5893 if (sc->sc_intrtype == PCI_INTR_TYPE_MSIX) { 5894 vector = sc->sc_msix_vector_queue; 5895 } else { 5896 vector = I40E_INTR_NOTX_INTR; 5897 5898 ixl_wr(sc, I40E_PFINT_LNKLST0, 5899 (I40E_INTR_NOTX_QUEUE << 5900 I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) | 5901 (I40E_QUEUE_TYPE_RX << 5902 I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)); 5903 } 5904 5905 for (i = 0; i < sc->sc_nqueue_pairs; i++) { 5906 ixl_wr(sc, I40E_PFINT_DYN_CTLN(i), 0); 5907 ixl_flush(sc); 5908 5909 ixl_wr(sc, I40E_PFINT_LNKLSTN(i), 5910 ((i) << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) | 5911 (I40E_QUEUE_TYPE_RX << 5912 I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)); 5913 5914 ixl_wr(sc, I40E_QINT_RQCTL(i), 5915 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) | 5916 (I40E_ITR_INDEX_RX << 5917 I40E_QINT_RQCTL_ITR_INDX_SHIFT) | 5918 (I40E_INTR_NOTX_RX_QUEUE << 5919 I40E_QINT_RQCTL_MSIX0_INDX_SHIFT) | 5920 (i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) | 5921 (I40E_QUEUE_TYPE_TX << 5922 I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) | 5923 I40E_QINT_RQCTL_CAUSE_ENA_MASK); 5924 5925 ixl_wr(sc, I40E_QINT_TQCTL(i), 5926 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) | 5927 (I40E_ITR_INDEX_TX << 5928 I40E_QINT_TQCTL_ITR_INDX_SHIFT) | 5929 (I40E_INTR_NOTX_TX_QUEUE << 5930 I40E_QINT_TQCTL_MSIX0_INDX_SHIFT) | 5931 (I40E_QUEUE_TYPE_EOL << 5932 I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) | 5933 (I40E_QUEUE_TYPE_RX << 5934 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT) | 5935 I40E_QINT_TQCTL_CAUSE_ENA_MASK); 5936 5937 if (sc->sc_intrtype == PCI_INTR_TYPE_MSIX) { 5938 ixl_wr(sc, I40E_PFINT_ITRN(I40E_ITR_INDEX_RX, i), 5939 sc->sc_itr_rx); 5940 ixl_wr(sc, I40E_PFINT_ITRN(I40E_ITR_INDEX_TX, i), 5941 sc->sc_itr_tx); 5942 vector++; 5943 } 5944 } 5945 ixl_flush(sc); 5946 5947 ixl_wr(sc, I40E_PFINT_ITR0(I40E_ITR_INDEX_RX), sc->sc_itr_rx); 5948 ixl_wr(sc, I40E_PFINT_ITR0(I40E_ITR_INDEX_TX), sc->sc_itr_tx); 5949 ixl_flush(sc); 5950 } 5951 5952 static void 5953 ixl_config_other_intr(struct ixl_softc *sc) 5954 { 5955 ixl_wr(sc, I40E_PFINT_ICR0_ENA, 0); 5956 (void)ixl_rd(sc, I40E_PFINT_ICR0); 5957 5958 ixl_wr(sc, I40E_PFINT_ICR0_ENA, 5959 I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | 5960 I40E_PFINT_ICR0_ENA_GRST_MASK | 5961 I40E_PFINT_ICR0_ENA_ADMINQ_MASK | 5962 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | 5963 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | 5964 I40E_PFINT_ICR0_ENA_VFLR_MASK | 5965 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | 5966 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | 5967 I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK); 5968 5969 ixl_wr(sc, I40E_PFINT_LNKLST0, 0x7FF); 5970 ixl_wr(sc, I40E_PFINT_ITR0(I40E_ITR_INDEX_OTHER), 0); 5971 ixl_wr(sc, I40E_PFINT_STAT_CTL0, 5972 (I40E_ITR_INDEX_OTHER << 5973 I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)); 5974 ixl_flush(sc); 5975 } 5976 5977 static int 5978 ixl_setup_interrupts(struct ixl_softc *sc) 5979 { 5980 struct pci_attach_args *pa = &sc->sc_pa; 5981 pci_intr_type_t max_type, intr_type; 5982 int counts[PCI_INTR_TYPE_SIZE]; 5983 int error; 5984 unsigned int i; 5985 bool retry; 5986 5987 memset(counts, 0, sizeof(counts)); 5988 max_type = PCI_INTR_TYPE_MSIX; 5989 /* QPs + other interrupt */ 5990 counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueue_pairs_max + 1; 5991 counts[PCI_INTR_TYPE_INTX] = 1; 5992 5993 if (ixl_param_nomsix) 5994 counts[PCI_INTR_TYPE_MSIX] = 0; 5995 5996 do { 5997 retry = false; 5998 error = pci_intr_alloc(pa, &sc->sc_ihp, counts, max_type); 5999 if (error != 0) { 6000 aprint_error_dev(sc->sc_dev, 6001 "couldn't map interrupt\n"); 6002 break; 6003 } 6004 6005 intr_type = pci_intr_type(pa->pa_pc, sc->sc_ihp[0]); 6006 sc->sc_nintrs = counts[intr_type]; 6007 KASSERT(sc->sc_nintrs > 0); 6008 6009 for (i = 0; i < sc->sc_nintrs; i++) { 6010 pci_intr_setattr(pa->pa_pc, &sc->sc_ihp[i], 6011 PCI_INTR_MPSAFE, true); 6012 } 6013 6014 sc->sc_ihs = kmem_alloc(sizeof(sc->sc_ihs[0]) * sc->sc_nintrs, 6015 KM_SLEEP); 6016 6017 if (intr_type == PCI_INTR_TYPE_MSIX) { 6018 error = ixl_establish_msix(sc); 6019 if (error) { 6020 counts[PCI_INTR_TYPE_MSIX] = 0; 6021 retry = true; 6022 } 6023 } else if (intr_type == PCI_INTR_TYPE_INTX) { 6024 error = ixl_establish_intx(sc); 6025 } else { 6026 error = -1; 6027 } 6028 6029 if (error) { 6030 kmem_free(sc->sc_ihs, 6031 sizeof(sc->sc_ihs[0]) * sc->sc_nintrs); 6032 pci_intr_release(pa->pa_pc, sc->sc_ihp, sc->sc_nintrs); 6033 } else { 6034 sc->sc_intrtype = intr_type; 6035 } 6036 } while (retry); 6037 6038 return error; 6039 } 6040 6041 static void 6042 ixl_teardown_interrupts(struct ixl_softc *sc) 6043 { 6044 struct pci_attach_args *pa = &sc->sc_pa; 6045 unsigned int i; 6046 6047 for (i = 0; i < sc->sc_nintrs; i++) { 6048 pci_intr_disestablish(pa->pa_pc, sc->sc_ihs[i]); 6049 } 6050 6051 pci_intr_release(pa->pa_pc, sc->sc_ihp, sc->sc_nintrs); 6052 6053 kmem_free(sc->sc_ihs, sizeof(sc->sc_ihs[0]) * sc->sc_nintrs); 6054 sc->sc_ihs = NULL; 6055 sc->sc_nintrs = 0; 6056 } 6057 6058 static int 6059 ixl_setup_stats(struct ixl_softc *sc) 6060 { 6061 struct ixl_queue_pair *qp; 6062 struct ixl_tx_ring *txr; 6063 struct ixl_rx_ring *rxr; 6064 struct ixl_stats_counters *isc; 6065 unsigned int i; 6066 6067 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) { 6068 qp = &sc->sc_qps[i]; 6069 txr = qp->qp_txr; 6070 rxr = qp->qp_rxr; 6071 6072 evcnt_attach_dynamic(&txr->txr_defragged, EVCNT_TYPE_MISC, 6073 NULL, qp->qp_name, "m_defrag successed"); 6074 evcnt_attach_dynamic(&txr->txr_defrag_failed, EVCNT_TYPE_MISC, 6075 NULL, qp->qp_name, "m_defrag_failed"); 6076 evcnt_attach_dynamic(&txr->txr_pcqdrop, EVCNT_TYPE_MISC, 6077 NULL, qp->qp_name, "Dropped in pcq"); 6078 evcnt_attach_dynamic(&txr->txr_transmitdef, EVCNT_TYPE_MISC, 6079 NULL, qp->qp_name, "Deferred transmit"); 6080 evcnt_attach_dynamic(&txr->txr_intr, EVCNT_TYPE_INTR, 6081 NULL, qp->qp_name, "Interrupt on queue"); 6082 evcnt_attach_dynamic(&txr->txr_defer, EVCNT_TYPE_MISC, 6083 NULL, qp->qp_name, "Handled queue in softint/workqueue"); 6084 6085 evcnt_attach_dynamic(&rxr->rxr_mgethdr_failed, EVCNT_TYPE_MISC, 6086 NULL, qp->qp_name, "MGETHDR failed"); 6087 evcnt_attach_dynamic(&rxr->rxr_mgetcl_failed, EVCNT_TYPE_MISC, 6088 NULL, qp->qp_name, "MCLGET failed"); 6089 evcnt_attach_dynamic(&rxr->rxr_mbuf_load_failed, 6090 EVCNT_TYPE_MISC, NULL, qp->qp_name, 6091 "bus_dmamap_load_mbuf failed"); 6092 evcnt_attach_dynamic(&rxr->rxr_intr, EVCNT_TYPE_INTR, 6093 NULL, qp->qp_name, "Interrupt on queue"); 6094 evcnt_attach_dynamic(&rxr->rxr_defer, EVCNT_TYPE_MISC, 6095 NULL, qp->qp_name, "Handled queue in softint/workqueue"); 6096 } 6097 6098 evcnt_attach_dynamic(&sc->sc_event_atq, EVCNT_TYPE_INTR, 6099 NULL, device_xname(sc->sc_dev), "Interrupt for other events"); 6100 evcnt_attach_dynamic(&sc->sc_event_link, EVCNT_TYPE_MISC, 6101 NULL, device_xname(sc->sc_dev), "Link status event"); 6102 evcnt_attach_dynamic(&sc->sc_event_ecc_err, EVCNT_TYPE_MISC, 6103 NULL, device_xname(sc->sc_dev), "ECC error"); 6104 evcnt_attach_dynamic(&sc->sc_event_pci_exception, EVCNT_TYPE_MISC, 6105 NULL, device_xname(sc->sc_dev), "PCI exception"); 6106 evcnt_attach_dynamic(&sc->sc_event_crit_err, EVCNT_TYPE_MISC, 6107 NULL, device_xname(sc->sc_dev), "Critical error"); 6108 6109 isc = &sc->sc_stats_counters; 6110 evcnt_attach_dynamic(&isc->isc_crc_errors, EVCNT_TYPE_MISC, 6111 NULL, device_xname(sc->sc_dev), "CRC errors"); 6112 evcnt_attach_dynamic(&isc->isc_illegal_bytes, EVCNT_TYPE_MISC, 6113 NULL, device_xname(sc->sc_dev), "Illegal bytes"); 6114 evcnt_attach_dynamic(&isc->isc_mac_local_faults, EVCNT_TYPE_MISC, 6115 NULL, device_xname(sc->sc_dev), "Mac local faults"); 6116 evcnt_attach_dynamic(&isc->isc_mac_remote_faults, EVCNT_TYPE_MISC, 6117 NULL, device_xname(sc->sc_dev), "Mac remote faults"); 6118 evcnt_attach_dynamic(&isc->isc_link_xon_rx, EVCNT_TYPE_MISC, 6119 NULL, device_xname(sc->sc_dev), "Rx xon"); 6120 evcnt_attach_dynamic(&isc->isc_link_xon_tx, EVCNT_TYPE_MISC, 6121 NULL, device_xname(sc->sc_dev), "Tx xon"); 6122 evcnt_attach_dynamic(&isc->isc_link_xoff_rx, EVCNT_TYPE_MISC, 6123 NULL, device_xname(sc->sc_dev), "Rx xoff"); 6124 evcnt_attach_dynamic(&isc->isc_link_xoff_tx, EVCNT_TYPE_MISC, 6125 NULL, device_xname(sc->sc_dev), "Tx xoff"); 6126 evcnt_attach_dynamic(&isc->isc_rx_fragments, EVCNT_TYPE_MISC, 6127 NULL, device_xname(sc->sc_dev), "Rx fragments"); 6128 evcnt_attach_dynamic(&isc->isc_rx_jabber, EVCNT_TYPE_MISC, 6129 NULL, device_xname(sc->sc_dev), "Rx jabber"); 6130 6131 evcnt_attach_dynamic(&isc->isc_rx_size_64, EVCNT_TYPE_MISC, 6132 NULL, device_xname(sc->sc_dev), "Rx size 64"); 6133 evcnt_attach_dynamic(&isc->isc_rx_size_127, EVCNT_TYPE_MISC, 6134 NULL, device_xname(sc->sc_dev), "Rx size 127"); 6135 evcnt_attach_dynamic(&isc->isc_rx_size_255, EVCNT_TYPE_MISC, 6136 NULL, device_xname(sc->sc_dev), "Rx size 255"); 6137 evcnt_attach_dynamic(&isc->isc_rx_size_511, EVCNT_TYPE_MISC, 6138 NULL, device_xname(sc->sc_dev), "Rx size 511"); 6139 evcnt_attach_dynamic(&isc->isc_rx_size_1023, EVCNT_TYPE_MISC, 6140 NULL, device_xname(sc->sc_dev), "Rx size 1023"); 6141 evcnt_attach_dynamic(&isc->isc_rx_size_1522, EVCNT_TYPE_MISC, 6142 NULL, device_xname(sc->sc_dev), "Rx size 1522"); 6143 evcnt_attach_dynamic(&isc->isc_rx_size_big, EVCNT_TYPE_MISC, 6144 NULL, device_xname(sc->sc_dev), "Rx jumbo packets"); 6145 evcnt_attach_dynamic(&isc->isc_rx_undersize, EVCNT_TYPE_MISC, 6146 NULL, device_xname(sc->sc_dev), "Rx under size"); 6147 evcnt_attach_dynamic(&isc->isc_rx_oversize, EVCNT_TYPE_MISC, 6148 NULL, device_xname(sc->sc_dev), "Rx over size"); 6149 6150 evcnt_attach_dynamic(&isc->isc_rx_bytes, EVCNT_TYPE_MISC, 6151 NULL, device_xname(sc->sc_dev), "Rx bytes / port"); 6152 evcnt_attach_dynamic(&isc->isc_rx_discards, EVCNT_TYPE_MISC, 6153 NULL, device_xname(sc->sc_dev), "Rx discards / port"); 6154 evcnt_attach_dynamic(&isc->isc_rx_unicast, EVCNT_TYPE_MISC, 6155 NULL, device_xname(sc->sc_dev), "Rx unicast / port"); 6156 evcnt_attach_dynamic(&isc->isc_rx_multicast, EVCNT_TYPE_MISC, 6157 NULL, device_xname(sc->sc_dev), "Rx multicast / port"); 6158 evcnt_attach_dynamic(&isc->isc_rx_broadcast, EVCNT_TYPE_MISC, 6159 NULL, device_xname(sc->sc_dev), "Rx broadcast / port"); 6160 6161 evcnt_attach_dynamic(&isc->isc_vsi_rx_bytes, EVCNT_TYPE_MISC, 6162 NULL, device_xname(sc->sc_dev), "Rx bytes / vsi"); 6163 evcnt_attach_dynamic(&isc->isc_vsi_rx_discards, EVCNT_TYPE_MISC, 6164 NULL, device_xname(sc->sc_dev), "Rx discard / vsi"); 6165 evcnt_attach_dynamic(&isc->isc_vsi_rx_unicast, EVCNT_TYPE_MISC, 6166 NULL, device_xname(sc->sc_dev), "Rx unicast / vsi"); 6167 evcnt_attach_dynamic(&isc->isc_vsi_rx_multicast, EVCNT_TYPE_MISC, 6168 NULL, device_xname(sc->sc_dev), "Rx multicast / vsi"); 6169 evcnt_attach_dynamic(&isc->isc_vsi_rx_broadcast, EVCNT_TYPE_MISC, 6170 NULL, device_xname(sc->sc_dev), "Rx broadcast / vsi"); 6171 6172 evcnt_attach_dynamic(&isc->isc_tx_size_64, EVCNT_TYPE_MISC, 6173 NULL, device_xname(sc->sc_dev), "Tx size 64"); 6174 evcnt_attach_dynamic(&isc->isc_tx_size_127, EVCNT_TYPE_MISC, 6175 NULL, device_xname(sc->sc_dev), "Tx size 127"); 6176 evcnt_attach_dynamic(&isc->isc_tx_size_255, EVCNT_TYPE_MISC, 6177 NULL, device_xname(sc->sc_dev), "Tx size 255"); 6178 evcnt_attach_dynamic(&isc->isc_tx_size_511, EVCNT_TYPE_MISC, 6179 NULL, device_xname(sc->sc_dev), "Tx size 511"); 6180 evcnt_attach_dynamic(&isc->isc_tx_size_1023, EVCNT_TYPE_MISC, 6181 NULL, device_xname(sc->sc_dev), "Tx size 1023"); 6182 evcnt_attach_dynamic(&isc->isc_tx_size_1522, EVCNT_TYPE_MISC, 6183 NULL, device_xname(sc->sc_dev), "Tx size 1522"); 6184 evcnt_attach_dynamic(&isc->isc_tx_size_big, EVCNT_TYPE_MISC, 6185 NULL, device_xname(sc->sc_dev), "Tx jumbo packets"); 6186 6187 evcnt_attach_dynamic(&isc->isc_tx_bytes, EVCNT_TYPE_MISC, 6188 NULL, device_xname(sc->sc_dev), "Tx bytes / port"); 6189 evcnt_attach_dynamic(&isc->isc_tx_dropped_link_down, EVCNT_TYPE_MISC, 6190 NULL, device_xname(sc->sc_dev), 6191 "Tx dropped due to link down / port"); 6192 evcnt_attach_dynamic(&isc->isc_tx_unicast, EVCNT_TYPE_MISC, 6193 NULL, device_xname(sc->sc_dev), "Tx unicast / port"); 6194 evcnt_attach_dynamic(&isc->isc_tx_multicast, EVCNT_TYPE_MISC, 6195 NULL, device_xname(sc->sc_dev), "Tx multicast / port"); 6196 evcnt_attach_dynamic(&isc->isc_tx_broadcast, EVCNT_TYPE_MISC, 6197 NULL, device_xname(sc->sc_dev), "Tx broadcast / port"); 6198 6199 evcnt_attach_dynamic(&isc->isc_vsi_tx_bytes, EVCNT_TYPE_MISC, 6200 NULL, device_xname(sc->sc_dev), "Tx bytes / vsi"); 6201 evcnt_attach_dynamic(&isc->isc_vsi_tx_errors, EVCNT_TYPE_MISC, 6202 NULL, device_xname(sc->sc_dev), "Tx errors / vsi"); 6203 evcnt_attach_dynamic(&isc->isc_vsi_tx_unicast, EVCNT_TYPE_MISC, 6204 NULL, device_xname(sc->sc_dev), "Tx unicast / vsi"); 6205 evcnt_attach_dynamic(&isc->isc_vsi_tx_multicast, EVCNT_TYPE_MISC, 6206 NULL, device_xname(sc->sc_dev), "Tx multicast / vsi"); 6207 evcnt_attach_dynamic(&isc->isc_vsi_tx_broadcast, EVCNT_TYPE_MISC, 6208 NULL, device_xname(sc->sc_dev), "Tx broadcast / vsi"); 6209 6210 sc->sc_stats_intval = ixl_param_stats_interval; 6211 callout_init(&sc->sc_stats_callout, CALLOUT_MPSAFE); 6212 callout_setfunc(&sc->sc_stats_callout, ixl_stats_callout, sc); 6213 ixl_work_set(&sc->sc_stats_task, ixl_stats_update, sc); 6214 6215 return 0; 6216 } 6217 6218 static void 6219 ixl_teardown_stats(struct ixl_softc *sc) 6220 { 6221 struct ixl_tx_ring *txr; 6222 struct ixl_rx_ring *rxr; 6223 struct ixl_stats_counters *isc; 6224 unsigned int i; 6225 6226 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) { 6227 txr = sc->sc_qps[i].qp_txr; 6228 rxr = sc->sc_qps[i].qp_rxr; 6229 6230 evcnt_detach(&txr->txr_defragged); 6231 evcnt_detach(&txr->txr_defrag_failed); 6232 evcnt_detach(&txr->txr_pcqdrop); 6233 evcnt_detach(&txr->txr_transmitdef); 6234 evcnt_detach(&txr->txr_intr); 6235 evcnt_detach(&txr->txr_defer); 6236 6237 evcnt_detach(&rxr->rxr_mgethdr_failed); 6238 evcnt_detach(&rxr->rxr_mgetcl_failed); 6239 evcnt_detach(&rxr->rxr_mbuf_load_failed); 6240 evcnt_detach(&rxr->rxr_intr); 6241 evcnt_detach(&rxr->rxr_defer); 6242 } 6243 6244 isc = &sc->sc_stats_counters; 6245 evcnt_detach(&isc->isc_crc_errors); 6246 evcnt_detach(&isc->isc_illegal_bytes); 6247 evcnt_detach(&isc->isc_mac_local_faults); 6248 evcnt_detach(&isc->isc_mac_remote_faults); 6249 evcnt_detach(&isc->isc_link_xon_rx); 6250 evcnt_detach(&isc->isc_link_xon_tx); 6251 evcnt_detach(&isc->isc_link_xoff_rx); 6252 evcnt_detach(&isc->isc_link_xoff_tx); 6253 evcnt_detach(&isc->isc_rx_fragments); 6254 evcnt_detach(&isc->isc_rx_jabber); 6255 evcnt_detach(&isc->isc_rx_bytes); 6256 evcnt_detach(&isc->isc_rx_discards); 6257 evcnt_detach(&isc->isc_rx_unicast); 6258 evcnt_detach(&isc->isc_rx_multicast); 6259 evcnt_detach(&isc->isc_rx_broadcast); 6260 evcnt_detach(&isc->isc_rx_size_64); 6261 evcnt_detach(&isc->isc_rx_size_127); 6262 evcnt_detach(&isc->isc_rx_size_255); 6263 evcnt_detach(&isc->isc_rx_size_511); 6264 evcnt_detach(&isc->isc_rx_size_1023); 6265 evcnt_detach(&isc->isc_rx_size_1522); 6266 evcnt_detach(&isc->isc_rx_size_big); 6267 evcnt_detach(&isc->isc_rx_undersize); 6268 evcnt_detach(&isc->isc_rx_oversize); 6269 evcnt_detach(&isc->isc_tx_bytes); 6270 evcnt_detach(&isc->isc_tx_dropped_link_down); 6271 evcnt_detach(&isc->isc_tx_unicast); 6272 evcnt_detach(&isc->isc_tx_multicast); 6273 evcnt_detach(&isc->isc_tx_broadcast); 6274 evcnt_detach(&isc->isc_tx_size_64); 6275 evcnt_detach(&isc->isc_tx_size_127); 6276 evcnt_detach(&isc->isc_tx_size_255); 6277 evcnt_detach(&isc->isc_tx_size_511); 6278 evcnt_detach(&isc->isc_tx_size_1023); 6279 evcnt_detach(&isc->isc_tx_size_1522); 6280 evcnt_detach(&isc->isc_tx_size_big); 6281 evcnt_detach(&isc->isc_vsi_rx_discards); 6282 evcnt_detach(&isc->isc_vsi_rx_bytes); 6283 evcnt_detach(&isc->isc_vsi_rx_unicast); 6284 evcnt_detach(&isc->isc_vsi_rx_multicast); 6285 evcnt_detach(&isc->isc_vsi_rx_broadcast); 6286 evcnt_detach(&isc->isc_vsi_tx_errors); 6287 evcnt_detach(&isc->isc_vsi_tx_bytes); 6288 evcnt_detach(&isc->isc_vsi_tx_unicast); 6289 evcnt_detach(&isc->isc_vsi_tx_multicast); 6290 evcnt_detach(&isc->isc_vsi_tx_broadcast); 6291 6292 evcnt_detach(&sc->sc_event_atq); 6293 evcnt_detach(&sc->sc_event_link); 6294 evcnt_detach(&sc->sc_event_ecc_err); 6295 evcnt_detach(&sc->sc_event_pci_exception); 6296 evcnt_detach(&sc->sc_event_crit_err); 6297 6298 callout_destroy(&sc->sc_stats_callout); 6299 } 6300 6301 static void 6302 ixl_stats_callout(void *xsc) 6303 { 6304 struct ixl_softc *sc = xsc; 6305 6306 ixl_work_add(sc->sc_workq, &sc->sc_stats_task); 6307 callout_schedule(&sc->sc_stats_callout, mstohz(sc->sc_stats_intval)); 6308 } 6309 6310 static uint64_t 6311 ixl_stat_delta(struct ixl_softc *sc, uint32_t reg_hi, uint32_t reg_lo, 6312 uint64_t *offset, bool has_offset) 6313 { 6314 uint64_t value, delta; 6315 int bitwidth; 6316 6317 bitwidth = reg_hi == 0 ? 32 : 48; 6318 6319 value = ixl_rd(sc, reg_lo); 6320 6321 if (bitwidth > 32) { 6322 value |= ((uint64_t)ixl_rd(sc, reg_hi) << 32); 6323 } 6324 6325 if (__predict_true(has_offset)) { 6326 delta = value; 6327 if (value < *offset) 6328 delta += ((uint64_t)1 << bitwidth); 6329 delta -= *offset; 6330 } else { 6331 delta = 0; 6332 } 6333 atomic_swap_64(offset, value); 6334 6335 return delta; 6336 } 6337 6338 static void 6339 ixl_stats_update(void *xsc) 6340 { 6341 struct ixl_softc *sc = xsc; 6342 struct ixl_stats_counters *isc; 6343 uint64_t delta; 6344 6345 isc = &sc->sc_stats_counters; 6346 6347 /* errors */ 6348 delta = ixl_stat_delta(sc, 6349 0, I40E_GLPRT_CRCERRS(sc->sc_port), 6350 &isc->isc_crc_errors_offset, isc->isc_has_offset); 6351 atomic_add_64(&isc->isc_crc_errors.ev_count, delta); 6352 6353 delta = ixl_stat_delta(sc, 6354 0, I40E_GLPRT_ILLERRC(sc->sc_port), 6355 &isc->isc_illegal_bytes_offset, isc->isc_has_offset); 6356 atomic_add_64(&isc->isc_illegal_bytes.ev_count, delta); 6357 6358 /* rx */ 6359 delta = ixl_stat_delta(sc, 6360 I40E_GLPRT_GORCH(sc->sc_port), I40E_GLPRT_GORCL(sc->sc_port), 6361 &isc->isc_rx_bytes_offset, isc->isc_has_offset); 6362 atomic_add_64(&isc->isc_rx_bytes.ev_count, delta); 6363 6364 delta = ixl_stat_delta(sc, 6365 0, I40E_GLPRT_RDPC(sc->sc_port), 6366 &isc->isc_rx_discards_offset, isc->isc_has_offset); 6367 atomic_add_64(&isc->isc_rx_discards.ev_count, delta); 6368 6369 delta = ixl_stat_delta(sc, 6370 I40E_GLPRT_UPRCH(sc->sc_port), I40E_GLPRT_UPRCL(sc->sc_port), 6371 &isc->isc_rx_unicast_offset, isc->isc_has_offset); 6372 atomic_add_64(&isc->isc_rx_unicast.ev_count, delta); 6373 6374 delta = ixl_stat_delta(sc, 6375 I40E_GLPRT_MPRCH(sc->sc_port), I40E_GLPRT_MPRCL(sc->sc_port), 6376 &isc->isc_rx_multicast_offset, isc->isc_has_offset); 6377 atomic_add_64(&isc->isc_rx_multicast.ev_count, delta); 6378 6379 delta = ixl_stat_delta(sc, 6380 I40E_GLPRT_BPRCH(sc->sc_port), I40E_GLPRT_BPRCL(sc->sc_port), 6381 &isc->isc_rx_broadcast_offset, isc->isc_has_offset); 6382 atomic_add_64(&isc->isc_rx_broadcast.ev_count, delta); 6383 6384 /* Packet size stats rx */ 6385 delta = ixl_stat_delta(sc, 6386 I40E_GLPRT_PRC64H(sc->sc_port), I40E_GLPRT_PRC64L(sc->sc_port), 6387 &isc->isc_rx_size_64_offset, isc->isc_has_offset); 6388 atomic_add_64(&isc->isc_rx_size_64.ev_count, delta); 6389 6390 delta = ixl_stat_delta(sc, 6391 I40E_GLPRT_PRC127H(sc->sc_port), I40E_GLPRT_PRC127L(sc->sc_port), 6392 &isc->isc_rx_size_127_offset, isc->isc_has_offset); 6393 atomic_add_64(&isc->isc_rx_size_127.ev_count, delta); 6394 6395 delta = ixl_stat_delta(sc, 6396 I40E_GLPRT_PRC255H(sc->sc_port), I40E_GLPRT_PRC255L(sc->sc_port), 6397 &isc->isc_rx_size_255_offset, isc->isc_has_offset); 6398 atomic_add_64(&isc->isc_rx_size_255.ev_count, delta); 6399 6400 delta = ixl_stat_delta(sc, 6401 I40E_GLPRT_PRC511H(sc->sc_port), I40E_GLPRT_PRC511L(sc->sc_port), 6402 &isc->isc_rx_size_511_offset, isc->isc_has_offset); 6403 atomic_add_64(&isc->isc_rx_size_511.ev_count, delta); 6404 6405 delta = ixl_stat_delta(sc, 6406 I40E_GLPRT_PRC1023H(sc->sc_port), I40E_GLPRT_PRC1023L(sc->sc_port), 6407 &isc->isc_rx_size_1023_offset, isc->isc_has_offset); 6408 atomic_add_64(&isc->isc_rx_size_1023.ev_count, delta); 6409 6410 delta = ixl_stat_delta(sc, 6411 I40E_GLPRT_PRC1522H(sc->sc_port), I40E_GLPRT_PRC1522L(sc->sc_port), 6412 &isc->isc_rx_size_1522_offset, isc->isc_has_offset); 6413 atomic_add_64(&isc->isc_rx_size_1522.ev_count, delta); 6414 6415 delta = ixl_stat_delta(sc, 6416 I40E_GLPRT_PRC9522H(sc->sc_port), I40E_GLPRT_PRC9522L(sc->sc_port), 6417 &isc->isc_rx_size_big_offset, isc->isc_has_offset); 6418 atomic_add_64(&isc->isc_rx_size_big.ev_count, delta); 6419 6420 delta = ixl_stat_delta(sc, 6421 0, I40E_GLPRT_RUC(sc->sc_port), 6422 &isc->isc_rx_undersize_offset, isc->isc_has_offset); 6423 atomic_add_64(&isc->isc_rx_undersize.ev_count, delta); 6424 6425 delta = ixl_stat_delta(sc, 6426 0, I40E_GLPRT_ROC(sc->sc_port), 6427 &isc->isc_rx_oversize_offset, isc->isc_has_offset); 6428 atomic_add_64(&isc->isc_rx_oversize.ev_count, delta); 6429 6430 /* tx */ 6431 delta = ixl_stat_delta(sc, 6432 I40E_GLPRT_GOTCH(sc->sc_port), I40E_GLPRT_GOTCL(sc->sc_port), 6433 &isc->isc_tx_bytes_offset, isc->isc_has_offset); 6434 atomic_add_64(&isc->isc_tx_bytes.ev_count, delta); 6435 6436 delta = ixl_stat_delta(sc, 6437 0, I40E_GLPRT_TDOLD(sc->sc_port), 6438 &isc->isc_tx_dropped_link_down_offset, isc->isc_has_offset); 6439 atomic_add_64(&isc->isc_tx_dropped_link_down.ev_count, delta); 6440 6441 delta = ixl_stat_delta(sc, 6442 I40E_GLPRT_UPTCH(sc->sc_port), I40E_GLPRT_UPTCL(sc->sc_port), 6443 &isc->isc_tx_unicast_offset, isc->isc_has_offset); 6444 atomic_add_64(&isc->isc_tx_unicast.ev_count, delta); 6445 6446 delta = ixl_stat_delta(sc, 6447 I40E_GLPRT_MPTCH(sc->sc_port), I40E_GLPRT_MPTCL(sc->sc_port), 6448 &isc->isc_tx_multicast_offset, isc->isc_has_offset); 6449 atomic_add_64(&isc->isc_tx_multicast.ev_count, delta); 6450 6451 delta = ixl_stat_delta(sc, 6452 I40E_GLPRT_BPTCH(sc->sc_port), I40E_GLPRT_BPTCL(sc->sc_port), 6453 &isc->isc_tx_broadcast_offset, isc->isc_has_offset); 6454 atomic_add_64(&isc->isc_tx_broadcast.ev_count, delta); 6455 6456 /* Packet size stats tx */ 6457 delta = ixl_stat_delta(sc, 6458 I40E_GLPRT_PTC64L(sc->sc_port), I40E_GLPRT_PTC64L(sc->sc_port), 6459 &isc->isc_tx_size_64_offset, isc->isc_has_offset); 6460 atomic_add_64(&isc->isc_tx_size_64.ev_count, delta); 6461 6462 delta = ixl_stat_delta(sc, 6463 I40E_GLPRT_PTC127H(sc->sc_port), I40E_GLPRT_PTC127L(sc->sc_port), 6464 &isc->isc_tx_size_127_offset, isc->isc_has_offset); 6465 atomic_add_64(&isc->isc_tx_size_127.ev_count, delta); 6466 6467 delta = ixl_stat_delta(sc, 6468 I40E_GLPRT_PTC255H(sc->sc_port), I40E_GLPRT_PTC255L(sc->sc_port), 6469 &isc->isc_tx_size_255_offset, isc->isc_has_offset); 6470 atomic_add_64(&isc->isc_tx_size_255.ev_count, delta); 6471 6472 delta = ixl_stat_delta(sc, 6473 I40E_GLPRT_PTC511H(sc->sc_port), I40E_GLPRT_PTC511L(sc->sc_port), 6474 &isc->isc_tx_size_511_offset, isc->isc_has_offset); 6475 atomic_add_64(&isc->isc_tx_size_511.ev_count, delta); 6476 6477 delta = ixl_stat_delta(sc, 6478 I40E_GLPRT_PTC1023H(sc->sc_port), I40E_GLPRT_PTC1023L(sc->sc_port), 6479 &isc->isc_tx_size_1023_offset, isc->isc_has_offset); 6480 atomic_add_64(&isc->isc_tx_size_1023.ev_count, delta); 6481 6482 delta = ixl_stat_delta(sc, 6483 I40E_GLPRT_PTC1522H(sc->sc_port), I40E_GLPRT_PTC1522L(sc->sc_port), 6484 &isc->isc_tx_size_1522_offset, isc->isc_has_offset); 6485 atomic_add_64(&isc->isc_tx_size_1522.ev_count, delta); 6486 6487 delta = ixl_stat_delta(sc, 6488 I40E_GLPRT_PTC9522H(sc->sc_port), I40E_GLPRT_PTC9522L(sc->sc_port), 6489 &isc->isc_tx_size_big_offset, isc->isc_has_offset); 6490 atomic_add_64(&isc->isc_tx_size_big.ev_count, delta); 6491 6492 /* mac faults */ 6493 delta = ixl_stat_delta(sc, 6494 0, I40E_GLPRT_MLFC(sc->sc_port), 6495 &isc->isc_mac_local_faults_offset, isc->isc_has_offset); 6496 atomic_add_64(&isc->isc_mac_local_faults.ev_count, delta); 6497 6498 delta = ixl_stat_delta(sc, 6499 0, I40E_GLPRT_MRFC(sc->sc_port), 6500 &isc->isc_mac_remote_faults_offset, isc->isc_has_offset); 6501 atomic_add_64(&isc->isc_mac_remote_faults.ev_count, delta); 6502 6503 /* Flow control (LFC) stats */ 6504 delta = ixl_stat_delta(sc, 6505 0, I40E_GLPRT_LXONRXC(sc->sc_port), 6506 &isc->isc_link_xon_rx_offset, isc->isc_has_offset); 6507 atomic_add_64(&isc->isc_link_xon_rx.ev_count, delta); 6508 6509 delta = ixl_stat_delta(sc, 6510 0, I40E_GLPRT_LXONTXC(sc->sc_port), 6511 &isc->isc_link_xon_tx_offset, isc->isc_has_offset); 6512 atomic_add_64(&isc->isc_link_xon_tx.ev_count, delta); 6513 6514 delta = ixl_stat_delta(sc, 6515 0, I40E_GLPRT_LXOFFRXC(sc->sc_port), 6516 &isc->isc_link_xoff_rx_offset, isc->isc_has_offset); 6517 atomic_add_64(&isc->isc_link_xoff_rx.ev_count, delta); 6518 6519 delta = ixl_stat_delta(sc, 6520 0, I40E_GLPRT_LXOFFTXC(sc->sc_port), 6521 &isc->isc_link_xoff_tx_offset, isc->isc_has_offset); 6522 atomic_add_64(&isc->isc_link_xoff_tx.ev_count, delta); 6523 6524 /* fragments */ 6525 delta = ixl_stat_delta(sc, 6526 0, I40E_GLPRT_RFC(sc->sc_port), 6527 &isc->isc_rx_fragments_offset, isc->isc_has_offset); 6528 atomic_add_64(&isc->isc_rx_fragments.ev_count, delta); 6529 6530 delta = ixl_stat_delta(sc, 6531 0, I40E_GLPRT_RJC(sc->sc_port), 6532 &isc->isc_rx_jabber_offset, isc->isc_has_offset); 6533 atomic_add_64(&isc->isc_rx_jabber.ev_count, delta); 6534 6535 /* VSI rx counters */ 6536 delta = ixl_stat_delta(sc, 6537 0, I40E_GLV_RDPC(sc->sc_vsi_stat_counter_idx), 6538 &isc->isc_vsi_rx_discards_offset, isc->isc_has_offset); 6539 atomic_add_64(&isc->isc_vsi_rx_discards.ev_count, delta); 6540 6541 delta = ixl_stat_delta(sc, 6542 I40E_GLV_GORCH(sc->sc_vsi_stat_counter_idx), 6543 I40E_GLV_GORCL(sc->sc_vsi_stat_counter_idx), 6544 &isc->isc_vsi_rx_bytes_offset, isc->isc_has_offset); 6545 atomic_add_64(&isc->isc_vsi_rx_bytes.ev_count, delta); 6546 6547 delta = ixl_stat_delta(sc, 6548 I40E_GLV_UPRCH(sc->sc_vsi_stat_counter_idx), 6549 I40E_GLV_UPRCL(sc->sc_vsi_stat_counter_idx), 6550 &isc->isc_vsi_rx_unicast_offset, isc->isc_has_offset); 6551 atomic_add_64(&isc->isc_vsi_rx_unicast.ev_count, delta); 6552 6553 delta = ixl_stat_delta(sc, 6554 I40E_GLV_MPRCH(sc->sc_vsi_stat_counter_idx), 6555 I40E_GLV_MPRCL(sc->sc_vsi_stat_counter_idx), 6556 &isc->isc_vsi_rx_multicast_offset, isc->isc_has_offset); 6557 atomic_add_64(&isc->isc_vsi_rx_multicast.ev_count, delta); 6558 6559 delta = ixl_stat_delta(sc, 6560 I40E_GLV_BPRCH(sc->sc_vsi_stat_counter_idx), 6561 I40E_GLV_BPRCL(sc->sc_vsi_stat_counter_idx), 6562 &isc->isc_vsi_rx_broadcast_offset, isc->isc_has_offset); 6563 atomic_add_64(&isc->isc_vsi_rx_broadcast.ev_count, delta); 6564 6565 /* VSI tx counters */ 6566 delta = ixl_stat_delta(sc, 6567 0, I40E_GLV_TEPC(sc->sc_vsi_stat_counter_idx), 6568 &isc->isc_vsi_tx_errors_offset, isc->isc_has_offset); 6569 atomic_add_64(&isc->isc_vsi_tx_errors.ev_count, delta); 6570 6571 delta = ixl_stat_delta(sc, 6572 I40E_GLV_GOTCH(sc->sc_vsi_stat_counter_idx), 6573 I40E_GLV_GOTCL(sc->sc_vsi_stat_counter_idx), 6574 &isc->isc_vsi_tx_bytes_offset, isc->isc_has_offset); 6575 atomic_add_64(&isc->isc_vsi_tx_bytes.ev_count, delta); 6576 6577 delta = ixl_stat_delta(sc, 6578 I40E_GLV_UPTCH(sc->sc_vsi_stat_counter_idx), 6579 I40E_GLV_UPTCL(sc->sc_vsi_stat_counter_idx), 6580 &isc->isc_vsi_tx_unicast_offset, isc->isc_has_offset); 6581 atomic_add_64(&isc->isc_vsi_tx_unicast.ev_count, delta); 6582 6583 delta = ixl_stat_delta(sc, 6584 I40E_GLV_MPTCH(sc->sc_vsi_stat_counter_idx), 6585 I40E_GLV_MPTCL(sc->sc_vsi_stat_counter_idx), 6586 &isc->isc_vsi_tx_multicast_offset, isc->isc_has_offset); 6587 atomic_add_64(&isc->isc_vsi_tx_multicast.ev_count, delta); 6588 6589 delta = ixl_stat_delta(sc, 6590 I40E_GLV_BPTCH(sc->sc_vsi_stat_counter_idx), 6591 I40E_GLV_BPTCL(sc->sc_vsi_stat_counter_idx), 6592 &isc->isc_vsi_tx_broadcast_offset, isc->isc_has_offset); 6593 atomic_add_64(&isc->isc_vsi_tx_broadcast.ev_count, delta); 6594 } 6595 6596 static int 6597 ixl_setup_sysctls(struct ixl_softc *sc) 6598 { 6599 const char *devname; 6600 struct sysctllog **log; 6601 const struct sysctlnode *rnode, *rxnode, *txnode; 6602 int error; 6603 6604 log = &sc->sc_sysctllog; 6605 devname = device_xname(sc->sc_dev); 6606 6607 error = sysctl_createv(log, 0, NULL, &rnode, 6608 0, CTLTYPE_NODE, devname, 6609 SYSCTL_DESCR("ixl information and settings"), 6610 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL); 6611 if (error) 6612 goto out; 6613 6614 error = sysctl_createv(log, 0, &rnode, NULL, 6615 CTLFLAG_READWRITE, CTLTYPE_BOOL, "txrx_workqueue", 6616 SYSCTL_DESCR("Use workqueue for packet processing"), 6617 NULL, 0, &sc->sc_txrx_workqueue, 0, CTL_CREATE, CTL_EOL); 6618 if (error) 6619 goto out; 6620 6621 error = sysctl_createv(log, 0, &rnode, NULL, 6622 CTLFLAG_READONLY, CTLTYPE_INT, "stats_interval", 6623 SYSCTL_DESCR("Statistics collection interval in milliseconds"), 6624 NULL, 0, &sc->sc_stats_intval, 0, CTL_CREATE, CTL_EOL); 6625 6626 error = sysctl_createv(log, 0, &rnode, &rxnode, 6627 0, CTLTYPE_NODE, "rx", 6628 SYSCTL_DESCR("ixl information and settings for Rx"), 6629 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL); 6630 if (error) 6631 goto out; 6632 6633 error = sysctl_createv(log, 0, &rxnode, NULL, 6634 CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit", 6635 SYSCTL_DESCR("max number of Rx packets" 6636 " to process for interrupt processing"), 6637 NULL, 0, &sc->sc_rx_intr_process_limit, 0, CTL_CREATE, CTL_EOL); 6638 if (error) 6639 goto out; 6640 6641 error = sysctl_createv(log, 0, &rxnode, NULL, 6642 CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit", 6643 SYSCTL_DESCR("max number of Rx packets" 6644 " to process for deferred processing"), 6645 NULL, 0, &sc->sc_rx_process_limit, 0, CTL_CREATE, CTL_EOL); 6646 if (error) 6647 goto out; 6648 6649 error = sysctl_createv(log, 0, &rnode, &txnode, 6650 0, CTLTYPE_NODE, "tx", 6651 SYSCTL_DESCR("ixl information and settings for Tx"), 6652 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL); 6653 if (error) 6654 goto out; 6655 6656 error = sysctl_createv(log, 0, &txnode, NULL, 6657 CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit", 6658 SYSCTL_DESCR("max number of Tx packets" 6659 " to process for interrupt processing"), 6660 NULL, 0, &sc->sc_tx_intr_process_limit, 0, CTL_CREATE, CTL_EOL); 6661 if (error) 6662 goto out; 6663 6664 error = sysctl_createv(log, 0, &txnode, NULL, 6665 CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit", 6666 SYSCTL_DESCR("max number of Tx packets" 6667 " to process for deferred processing"), 6668 NULL, 0, &sc->sc_tx_process_limit, 0, CTL_CREATE, CTL_EOL); 6669 if (error) 6670 goto out; 6671 6672 out: 6673 if (error) { 6674 aprint_error_dev(sc->sc_dev, 6675 "unable to create sysctl node\n"); 6676 sysctl_teardown(log); 6677 } 6678 6679 return error; 6680 } 6681 6682 static void 6683 ixl_teardown_sysctls(struct ixl_softc *sc) 6684 { 6685 6686 sysctl_teardown(&sc->sc_sysctllog); 6687 } 6688 6689 static struct workqueue * 6690 ixl_workq_create(const char *name, pri_t prio, int ipl, int flags) 6691 { 6692 struct workqueue *wq; 6693 int error; 6694 6695 error = workqueue_create(&wq, name, ixl_workq_work, NULL, 6696 prio, ipl, flags); 6697 6698 if (error) 6699 return NULL; 6700 6701 return wq; 6702 } 6703 6704 static void 6705 ixl_workq_destroy(struct workqueue *wq) 6706 { 6707 6708 workqueue_destroy(wq); 6709 } 6710 6711 static void 6712 ixl_work_set(struct ixl_work *work, void (*func)(void *), void *arg) 6713 { 6714 6715 memset(work, 0, sizeof(*work)); 6716 work->ixw_func = func; 6717 work->ixw_arg = arg; 6718 } 6719 6720 static void 6721 ixl_work_add(struct workqueue *wq, struct ixl_work *work) 6722 { 6723 if (atomic_cas_uint(&work->ixw_added, 0, 1) != 0) 6724 return; 6725 6726 kpreempt_disable(); 6727 workqueue_enqueue(wq, &work->ixw_cookie, NULL); 6728 kpreempt_enable(); 6729 } 6730 6731 static void 6732 ixl_work_wait(struct workqueue *wq, struct ixl_work *work) 6733 { 6734 6735 workqueue_wait(wq, &work->ixw_cookie); 6736 } 6737 6738 static void 6739 ixl_workq_work(struct work *wk, void *context) 6740 { 6741 struct ixl_work *work; 6742 6743 work = container_of(wk, struct ixl_work, ixw_cookie); 6744 6745 atomic_swap_uint(&work->ixw_added, 0); 6746 work->ixw_func(work->ixw_arg); 6747 } 6748 6749 static int 6750 ixl_rx_ctl_read(struct ixl_softc *sc, uint32_t reg, uint32_t *rv) 6751 { 6752 struct ixl_aq_desc iaq; 6753 6754 memset(&iaq, 0, sizeof(iaq)); 6755 iaq.iaq_opcode = htole16(IXL_AQ_OP_RX_CTL_REG_READ); 6756 iaq.iaq_param[1] = htole32(reg); 6757 6758 if (ixl_atq_poll(sc, &iaq, 250) != 0) 6759 return ETIMEDOUT; 6760 6761 switch (htole16(iaq.iaq_retval)) { 6762 case IXL_AQ_RC_OK: 6763 /* success */ 6764 break; 6765 case IXL_AQ_RC_EACCES: 6766 return EPERM; 6767 case IXL_AQ_RC_EAGAIN: 6768 return EAGAIN; 6769 default: 6770 return EIO; 6771 } 6772 6773 *rv = htole32(iaq.iaq_param[3]); 6774 return 0; 6775 } 6776 6777 static uint32_t 6778 ixl_rd_rx_csr(struct ixl_softc *sc, uint32_t reg) 6779 { 6780 uint32_t val; 6781 int rv, retry, retry_limit; 6782 6783 if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RXCTL)) { 6784 retry_limit = 5; 6785 } else { 6786 retry_limit = 0; 6787 } 6788 6789 for (retry = 0; retry < retry_limit; retry++) { 6790 rv = ixl_rx_ctl_read(sc, reg, &val); 6791 if (rv == 0) 6792 return val; 6793 else if (rv == EAGAIN) 6794 delaymsec(1); 6795 else 6796 break; 6797 } 6798 6799 val = ixl_rd(sc, reg); 6800 6801 return val; 6802 } 6803 6804 static int 6805 ixl_rx_ctl_write(struct ixl_softc *sc, uint32_t reg, uint32_t value) 6806 { 6807 struct ixl_aq_desc iaq; 6808 6809 memset(&iaq, 0, sizeof(iaq)); 6810 iaq.iaq_opcode = htole16(IXL_AQ_OP_RX_CTL_REG_WRITE); 6811 iaq.iaq_param[1] = htole32(reg); 6812 iaq.iaq_param[3] = htole32(value); 6813 6814 if (ixl_atq_poll(sc, &iaq, 250) != 0) 6815 return ETIMEDOUT; 6816 6817 switch (htole16(iaq.iaq_retval)) { 6818 case IXL_AQ_RC_OK: 6819 /* success */ 6820 break; 6821 case IXL_AQ_RC_EACCES: 6822 return EPERM; 6823 case IXL_AQ_RC_EAGAIN: 6824 return EAGAIN; 6825 default: 6826 return EIO; 6827 } 6828 6829 return 0; 6830 } 6831 6832 static void 6833 ixl_wr_rx_csr(struct ixl_softc *sc, uint32_t reg, uint32_t value) 6834 { 6835 int rv, retry, retry_limit; 6836 6837 if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RXCTL)) { 6838 retry_limit = 5; 6839 } else { 6840 retry_limit = 0; 6841 } 6842 6843 for (retry = 0; retry < retry_limit; retry++) { 6844 rv = ixl_rx_ctl_write(sc, reg, value); 6845 if (rv == 0) 6846 return; 6847 else if (rv == EAGAIN) 6848 delaymsec(1); 6849 else 6850 break; 6851 } 6852 6853 ixl_wr(sc, reg, value); 6854 } 6855 6856 static int 6857 ixl_nvm_lock(struct ixl_softc *sc, char rw) 6858 { 6859 struct ixl_aq_desc iaq; 6860 struct ixl_aq_req_resource_param *param; 6861 int rv; 6862 6863 if (!ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMLOCK)) 6864 return 0; 6865 6866 memset(&iaq, 0, sizeof(iaq)); 6867 iaq.iaq_opcode = htole16(IXL_AQ_OP_REQUEST_RESOURCE); 6868 6869 param = (struct ixl_aq_req_resource_param *)&iaq.iaq_param; 6870 param->resource_id = htole16(IXL_AQ_RESOURCE_ID_NVM); 6871 if (rw == 'R') { 6872 param->access_type = htole16(IXL_AQ_RESOURCE_ACCES_READ); 6873 } else { 6874 param->access_type = htole16(IXL_AQ_RESOURCE_ACCES_WRITE); 6875 } 6876 6877 rv = ixl_atq_poll(sc, &iaq, 250); 6878 6879 if (rv != 0) 6880 return ETIMEDOUT; 6881 6882 switch (le16toh(iaq.iaq_retval)) { 6883 case IXL_AQ_RC_OK: 6884 break; 6885 case IXL_AQ_RC_EACCES: 6886 return EACCES; 6887 case IXL_AQ_RC_EBUSY: 6888 return EBUSY; 6889 case IXL_AQ_RC_EPERM: 6890 return EPERM; 6891 } 6892 6893 return 0; 6894 } 6895 6896 static int 6897 ixl_nvm_unlock(struct ixl_softc *sc) 6898 { 6899 struct ixl_aq_desc iaq; 6900 struct ixl_aq_rel_resource_param *param; 6901 int rv; 6902 6903 if (!ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMLOCK)) 6904 return 0; 6905 6906 memset(&iaq, 0, sizeof(iaq)); 6907 iaq.iaq_opcode = htole16(IXL_AQ_OP_RELEASE_RESOURCE); 6908 6909 param = (struct ixl_aq_rel_resource_param *)&iaq.iaq_param; 6910 param->resource_id = htole16(IXL_AQ_RESOURCE_ID_NVM); 6911 6912 rv = ixl_atq_poll(sc, &iaq, 250); 6913 6914 if (rv != 0) 6915 return ETIMEDOUT; 6916 6917 switch (le16toh(iaq.iaq_retval)) { 6918 case IXL_AQ_RC_OK: 6919 break; 6920 default: 6921 return EIO; 6922 } 6923 return 0; 6924 } 6925 6926 static int 6927 ixl_srdone_poll(struct ixl_softc *sc) 6928 { 6929 int wait_count; 6930 uint32_t reg; 6931 6932 for (wait_count = 0; wait_count < IXL_SRRD_SRCTL_ATTEMPTS; 6933 wait_count++) { 6934 reg = ixl_rd(sc, I40E_GLNVM_SRCTL); 6935 if (ISSET(reg, I40E_GLNVM_SRCTL_DONE_MASK)) 6936 break; 6937 6938 delaymsec(5); 6939 } 6940 6941 if (wait_count == IXL_SRRD_SRCTL_ATTEMPTS) 6942 return -1; 6943 6944 return 0; 6945 } 6946 6947 static int 6948 ixl_nvm_read_srctl(struct ixl_softc *sc, uint16_t offset, uint16_t *data) 6949 { 6950 uint32_t reg; 6951 6952 if (ixl_srdone_poll(sc) != 0) 6953 return ETIMEDOUT; 6954 6955 reg = ((uint32_t)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) | 6956 __BIT(I40E_GLNVM_SRCTL_START_SHIFT); 6957 ixl_wr(sc, I40E_GLNVM_SRCTL, reg); 6958 6959 if (ixl_srdone_poll(sc) != 0) { 6960 aprint_debug("NVM read error: couldn't access " 6961 "Shadow RAM address: 0x%x\n", offset); 6962 return ETIMEDOUT; 6963 } 6964 6965 reg = ixl_rd(sc, I40E_GLNVM_SRDATA); 6966 *data = (uint16_t)__SHIFTOUT(reg, I40E_GLNVM_SRDATA_RDDATA_MASK); 6967 6968 return 0; 6969 } 6970 6971 static int 6972 ixl_nvm_read_aq(struct ixl_softc *sc, uint16_t offset_word, 6973 void *data, size_t len) 6974 { 6975 struct ixl_dmamem *idm; 6976 struct ixl_aq_desc iaq; 6977 struct ixl_aq_nvm_param *param; 6978 uint32_t offset_bytes; 6979 int rv; 6980 6981 idm = &sc->sc_aqbuf; 6982 if (len > IXL_DMA_LEN(idm)) 6983 return ENOMEM; 6984 6985 memset(IXL_DMA_KVA(idm), 0, IXL_DMA_LEN(idm)); 6986 memset(&iaq, 0, sizeof(iaq)); 6987 iaq.iaq_opcode = htole16(IXL_AQ_OP_NVM_READ); 6988 iaq.iaq_flags = htole16(IXL_AQ_BUF | 6989 ((len > I40E_AQ_LARGE_BUF) ? IXL_AQ_LB : 0)); 6990 iaq.iaq_datalen = htole16(len); 6991 ixl_aq_dva(&iaq, IXL_DMA_DVA(idm)); 6992 6993 param = (struct ixl_aq_nvm_param *)iaq.iaq_param; 6994 param->command_flags = IXL_AQ_NVM_LAST_CMD; 6995 param->module_pointer = 0; 6996 param->length = htole16(len); 6997 offset_bytes = (uint32_t)offset_word * 2; 6998 offset_bytes &= 0x00FFFFFF; 6999 param->offset = htole32(offset_bytes); 7000 7001 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm), 7002 BUS_DMASYNC_PREREAD); 7003 7004 rv = ixl_atq_poll(sc, &iaq, 250); 7005 7006 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm), 7007 BUS_DMASYNC_POSTREAD); 7008 7009 if (rv != 0) { 7010 return ETIMEDOUT; 7011 } 7012 7013 switch (le16toh(iaq.iaq_retval)) { 7014 case IXL_AQ_RC_OK: 7015 break; 7016 case IXL_AQ_RC_EPERM: 7017 return EPERM; 7018 case IXL_AQ_RC_EINVAL: 7019 return EINVAL; 7020 case IXL_AQ_RC_EBUSY: 7021 return EBUSY; 7022 case IXL_AQ_RC_EIO: 7023 default: 7024 return EIO; 7025 } 7026 7027 memcpy(data, IXL_DMA_KVA(idm), len); 7028 7029 return 0; 7030 } 7031 7032 static int 7033 ixl_rd16_nvm(struct ixl_softc *sc, uint16_t offset, uint16_t *data) 7034 { 7035 int error; 7036 uint16_t buf; 7037 7038 error = ixl_nvm_lock(sc, 'R'); 7039 if (error) 7040 return error; 7041 7042 if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMREAD)) { 7043 error = ixl_nvm_read_aq(sc, offset, 7044 &buf, sizeof(buf)); 7045 if (error == 0) 7046 *data = le16toh(buf); 7047 } else { 7048 error = ixl_nvm_read_srctl(sc, offset, &buf); 7049 if (error == 0) 7050 *data = buf; 7051 } 7052 7053 ixl_nvm_unlock(sc); 7054 7055 return error; 7056 } 7057 7058 MODULE(MODULE_CLASS_DRIVER, if_ixl, "pci"); 7059 7060 #ifdef _MODULE 7061 #include "ioconf.c" 7062 #endif 7063 7064 #ifdef _MODULE 7065 static void 7066 ixl_parse_modprop(prop_dictionary_t dict) 7067 { 7068 prop_object_t obj; 7069 int64_t val; 7070 uint64_t uval; 7071 7072 if (dict == NULL) 7073 return; 7074 7075 obj = prop_dictionary_get(dict, "nomsix"); 7076 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_BOOL) { 7077 ixl_param_nomsix = prop_bool_true((prop_bool_t)obj); 7078 } 7079 7080 obj = prop_dictionary_get(dict, "stats_interval"); 7081 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) { 7082 val = prop_number_signed_value((prop_number_t)obj); 7083 7084 /* the range has no reason */ 7085 if (100 < val && val < 180000) { 7086 ixl_param_stats_interval = val; 7087 } 7088 } 7089 7090 obj = prop_dictionary_get(dict, "nqps_limit"); 7091 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) { 7092 val = prop_number_signed_value((prop_number_t)obj); 7093 7094 if (val <= INT32_MAX) 7095 ixl_param_nqps_limit = val; 7096 } 7097 7098 obj = prop_dictionary_get(dict, "rx_ndescs"); 7099 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) { 7100 uval = prop_number_unsigned_integer_value((prop_number_t)obj); 7101 7102 if (uval > 8) 7103 ixl_param_rx_ndescs = uval; 7104 } 7105 7106 obj = prop_dictionary_get(dict, "tx_ndescs"); 7107 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) { 7108 uval = prop_number_unsigned_integer_value((prop_number_t)obj); 7109 7110 if (uval > IXL_TX_PKT_DESCS) 7111 ixl_param_tx_ndescs = uval; 7112 } 7113 7114 } 7115 #endif 7116 7117 static int 7118 if_ixl_modcmd(modcmd_t cmd, void *opaque) 7119 { 7120 int error = 0; 7121 7122 #ifdef _MODULE 7123 switch (cmd) { 7124 case MODULE_CMD_INIT: 7125 ixl_parse_modprop((prop_dictionary_t)opaque); 7126 error = config_init_component(cfdriver_ioconf_if_ixl, 7127 cfattach_ioconf_if_ixl, cfdata_ioconf_if_ixl); 7128 break; 7129 case MODULE_CMD_FINI: 7130 error = config_fini_component(cfdriver_ioconf_if_ixl, 7131 cfattach_ioconf_if_ixl, cfdata_ioconf_if_ixl); 7132 break; 7133 default: 7134 error = ENOTTY; 7135 break; 7136 } 7137 #endif 7138 7139 return error; 7140 } 7141