1 /* $NetBSD: if_ixl.c,v 1.71 2020/07/31 09:34:33 yamaguchi Exp $ */ 2 3 /* 4 * Copyright (c) 2013-2015, Intel Corporation 5 * All rights reserved. 6 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions are met: 9 * 10 * 1. Redistributions of source code must retain the above copyright notice, 11 * this list of conditions and the following disclaimer. 12 * 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * 3. Neither the name of the Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived from 19 * this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 /* 35 * Copyright (c) 2016,2017 David Gwynne <dlg@openbsd.org> 36 * 37 * Permission to use, copy, modify, and distribute this software for any 38 * purpose with or without fee is hereby granted, provided that the above 39 * copyright notice and this permission notice appear in all copies. 40 * 41 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 42 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 43 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 44 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 45 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 46 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 47 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 48 */ 49 50 /* 51 * Copyright (c) 2019 Internet Initiative Japan, Inc. 52 * All rights reserved. 53 * 54 * Redistribution and use in source and binary forms, with or without 55 * modification, are permitted provided that the following conditions 56 * are met: 57 * 1. Redistributions of source code must retain the above copyright 58 * notice, this list of conditions and the following disclaimer. 59 * 2. Redistributions in binary form must reproduce the above copyright 60 * notice, this list of conditions and the following disclaimer in the 61 * documentation and/or other materials provided with the distribution. 62 * 63 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 64 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 65 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 66 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 67 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 68 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 69 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 70 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 71 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 72 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 73 * POSSIBILITY OF SUCH DAMAGE. 74 */ 75 76 #include <sys/cdefs.h> 77 __KERNEL_RCSID(0, "$NetBSD: if_ixl.c,v 1.71 2020/07/31 09:34:33 yamaguchi Exp $"); 78 79 #ifdef _KERNEL_OPT 80 #include "opt_net_mpsafe.h" 81 #include "opt_if_ixl.h" 82 #endif 83 84 #include <sys/param.h> 85 #include <sys/types.h> 86 87 #include <sys/bitops.h> 88 #include <sys/cpu.h> 89 #include <sys/device.h> 90 #include <sys/evcnt.h> 91 #include <sys/interrupt.h> 92 #include <sys/kmem.h> 93 #include <sys/module.h> 94 #include <sys/mutex.h> 95 #include <sys/pcq.h> 96 #include <sys/syslog.h> 97 #include <sys/workqueue.h> 98 99 #include <sys/bus.h> 100 101 #include <net/bpf.h> 102 #include <net/if.h> 103 #include <net/if_dl.h> 104 #include <net/if_media.h> 105 #include <net/if_ether.h> 106 #include <net/rss_config.h> 107 108 #include <netinet/tcp.h> /* for struct tcphdr */ 109 #include <netinet/udp.h> /* for struct udphdr */ 110 111 #include <dev/pci/pcivar.h> 112 #include <dev/pci/pcidevs.h> 113 114 #include <dev/pci/if_ixlreg.h> 115 #include <dev/pci/if_ixlvar.h> 116 117 #include <prop/proplib.h> 118 119 struct ixl_softc; /* defined */ 120 121 #define I40E_PF_RESET_WAIT_COUNT 200 122 #define I40E_AQ_LARGE_BUF 512 123 124 /* bitfields for Tx queue mapping in QTX_CTL */ 125 #define I40E_QTX_CTL_VF_QUEUE 0x0 126 #define I40E_QTX_CTL_VM_QUEUE 0x1 127 #define I40E_QTX_CTL_PF_QUEUE 0x2 128 129 #define I40E_QUEUE_TYPE_EOL 0x7ff 130 #define I40E_INTR_NOTX_QUEUE 0 131 132 #define I40E_QUEUE_TYPE_RX 0x0 133 #define I40E_QUEUE_TYPE_TX 0x1 134 #define I40E_QUEUE_TYPE_PE_CEQ 0x2 135 #define I40E_QUEUE_TYPE_UNKNOWN 0x3 136 137 #define I40E_ITR_INDEX_RX 0x0 138 #define I40E_ITR_INDEX_TX 0x1 139 #define I40E_ITR_INDEX_OTHER 0x2 140 #define I40E_ITR_INDEX_NONE 0x3 141 #define IXL_ITR_RX 0x7a /* 4K intrs/sec */ 142 #define IXL_ITR_TX 0x7a /* 4K intrs/sec */ 143 144 #define I40E_INTR_NOTX_QUEUE 0 145 #define I40E_INTR_NOTX_INTR 0 146 #define I40E_INTR_NOTX_RX_QUEUE 0 147 #define I40E_INTR_NOTX_TX_QUEUE 1 148 #define I40E_INTR_NOTX_RX_MASK I40E_PFINT_ICR0_QUEUE_0_MASK 149 #define I40E_INTR_NOTX_TX_MASK I40E_PFINT_ICR0_QUEUE_1_MASK 150 151 #define BIT_ULL(a) (1ULL << (a)) 152 #define IXL_RSS_HENA_DEFAULT_BASE \ 153 (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \ 154 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \ 155 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \ 156 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \ 157 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \ 158 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \ 159 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \ 160 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \ 161 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \ 162 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \ 163 BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD)) 164 #define IXL_RSS_HENA_DEFAULT_XL710 IXL_RSS_HENA_DEFAULT_BASE 165 #define IXL_RSS_HENA_DEFAULT_X722 (IXL_RSS_HENA_DEFAULT_XL710 | \ 166 BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \ 167 BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \ 168 BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \ 169 BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) | \ 170 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \ 171 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK)) 172 #define I40E_HASH_LUT_SIZE_128 0 173 #define IXL_RSS_KEY_SIZE_REG 13 174 175 #define IXL_ICR0_CRIT_ERR_MASK \ 176 (I40E_PFINT_ICR0_PCI_EXCEPTION_MASK | \ 177 I40E_PFINT_ICR0_ECC_ERR_MASK | \ 178 I40E_PFINT_ICR0_PE_CRITERR_MASK) 179 180 #define IXL_QUEUE_MAX_XL710 64 181 #define IXL_QUEUE_MAX_X722 128 182 183 #define IXL_TX_PKT_DESCS 8 184 #define IXL_TX_PKT_MAXSIZE (MCLBYTES * IXL_TX_PKT_DESCS) 185 #define IXL_TX_QUEUE_ALIGN 128 186 #define IXL_RX_QUEUE_ALIGN 128 187 188 #define IXL_MCLBYTES (MCLBYTES - ETHER_ALIGN) 189 #define IXL_MTU_ETHERLEN ETHER_HDR_LEN \ 190 + ETHER_CRC_LEN 191 #if 0 192 #define IXL_MAX_MTU (9728 - IXL_MTU_ETHERLEN) 193 #else 194 /* (dbuff * 5) - ETHER_HDR_LEN - ETHER_CRC_LEN */ 195 #define IXL_MAX_MTU (9600 - IXL_MTU_ETHERLEN) 196 #endif 197 #define IXL_MIN_MTU (ETHER_MIN_LEN - ETHER_CRC_LEN) 198 199 #define IXL_PCIREG PCI_MAPREG_START 200 201 #define IXL_ITR0 0x0 202 #define IXL_ITR1 0x1 203 #define IXL_ITR2 0x2 204 #define IXL_NOITR 0x3 205 206 #define IXL_AQ_NUM 256 207 #define IXL_AQ_MASK (IXL_AQ_NUM - 1) 208 #define IXL_AQ_ALIGN 64 /* lol */ 209 #define IXL_AQ_BUFLEN 4096 210 211 #define IXL_HMC_ROUNDUP 512 212 #define IXL_HMC_PGSIZE 4096 213 #define IXL_HMC_DVASZ sizeof(uint64_t) 214 #define IXL_HMC_PGS (IXL_HMC_PGSIZE / IXL_HMC_DVASZ) 215 #define IXL_HMC_L2SZ (IXL_HMC_PGSIZE * IXL_HMC_PGS) 216 #define IXL_HMC_PDVALID 1ULL 217 218 #define IXL_ATQ_EXEC_TIMEOUT (10 * hz) 219 220 #define IXL_SRRD_SRCTL_ATTEMPTS 100000 221 222 struct ixl_aq_regs { 223 bus_size_t atq_tail; 224 bus_size_t atq_head; 225 bus_size_t atq_len; 226 bus_size_t atq_bal; 227 bus_size_t atq_bah; 228 229 bus_size_t arq_tail; 230 bus_size_t arq_head; 231 bus_size_t arq_len; 232 bus_size_t arq_bal; 233 bus_size_t arq_bah; 234 235 uint32_t atq_len_enable; 236 uint32_t atq_tail_mask; 237 uint32_t atq_head_mask; 238 239 uint32_t arq_len_enable; 240 uint32_t arq_tail_mask; 241 uint32_t arq_head_mask; 242 }; 243 244 struct ixl_phy_type { 245 uint64_t phy_type; 246 uint64_t ifm_type; 247 }; 248 249 struct ixl_speed_type { 250 uint8_t dev_speed; 251 uint64_t net_speed; 252 }; 253 254 struct ixl_aq_buf { 255 SIMPLEQ_ENTRY(ixl_aq_buf) 256 aqb_entry; 257 void *aqb_data; 258 bus_dmamap_t aqb_map; 259 bus_dma_segment_t aqb_seg; 260 size_t aqb_size; 261 int aqb_nsegs; 262 }; 263 SIMPLEQ_HEAD(ixl_aq_bufs, ixl_aq_buf); 264 265 struct ixl_dmamem { 266 bus_dmamap_t ixm_map; 267 bus_dma_segment_t ixm_seg; 268 int ixm_nsegs; 269 size_t ixm_size; 270 void *ixm_kva; 271 }; 272 273 #define IXL_DMA_MAP(_ixm) ((_ixm)->ixm_map) 274 #define IXL_DMA_DVA(_ixm) ((_ixm)->ixm_map->dm_segs[0].ds_addr) 275 #define IXL_DMA_KVA(_ixm) ((void *)(_ixm)->ixm_kva) 276 #define IXL_DMA_LEN(_ixm) ((_ixm)->ixm_size) 277 278 struct ixl_hmc_entry { 279 uint64_t hmc_base; 280 uint32_t hmc_count; 281 uint64_t hmc_size; 282 }; 283 284 enum ixl_hmc_types { 285 IXL_HMC_LAN_TX = 0, 286 IXL_HMC_LAN_RX, 287 IXL_HMC_FCOE_CTX, 288 IXL_HMC_FCOE_FILTER, 289 IXL_HMC_COUNT 290 }; 291 292 struct ixl_hmc_pack { 293 uint16_t offset; 294 uint16_t width; 295 uint16_t lsb; 296 }; 297 298 /* 299 * these hmc objects have weird sizes and alignments, so these are abstract 300 * representations of them that are nice for c to populate. 301 * 302 * the packing code relies on little-endian values being stored in the fields, 303 * no high bits in the fields being set, and the fields must be packed in the 304 * same order as they are in the ctx structure. 305 */ 306 307 struct ixl_hmc_rxq { 308 uint16_t head; 309 uint8_t cpuid; 310 uint64_t base; 311 #define IXL_HMC_RXQ_BASE_UNIT 128 312 uint16_t qlen; 313 uint16_t dbuff; 314 #define IXL_HMC_RXQ_DBUFF_UNIT 128 315 uint8_t hbuff; 316 #define IXL_HMC_RXQ_HBUFF_UNIT 64 317 uint8_t dtype; 318 #define IXL_HMC_RXQ_DTYPE_NOSPLIT 0x0 319 #define IXL_HMC_RXQ_DTYPE_HSPLIT 0x1 320 #define IXL_HMC_RXQ_DTYPE_SPLIT_ALWAYS 0x2 321 uint8_t dsize; 322 #define IXL_HMC_RXQ_DSIZE_16 0 323 #define IXL_HMC_RXQ_DSIZE_32 1 324 uint8_t crcstrip; 325 uint8_t fc_ena; 326 uint8_t l2sel; 327 uint8_t hsplit_0; 328 uint8_t hsplit_1; 329 uint8_t showiv; 330 uint16_t rxmax; 331 uint8_t tphrdesc_ena; 332 uint8_t tphwdesc_ena; 333 uint8_t tphdata_ena; 334 uint8_t tphhead_ena; 335 uint8_t lrxqthresh; 336 uint8_t prefena; 337 }; 338 339 static const struct ixl_hmc_pack ixl_hmc_pack_rxq[] = { 340 { offsetof(struct ixl_hmc_rxq, head), 13, 0 }, 341 { offsetof(struct ixl_hmc_rxq, cpuid), 8, 13 }, 342 { offsetof(struct ixl_hmc_rxq, base), 57, 32 }, 343 { offsetof(struct ixl_hmc_rxq, qlen), 13, 89 }, 344 { offsetof(struct ixl_hmc_rxq, dbuff), 7, 102 }, 345 { offsetof(struct ixl_hmc_rxq, hbuff), 5, 109 }, 346 { offsetof(struct ixl_hmc_rxq, dtype), 2, 114 }, 347 { offsetof(struct ixl_hmc_rxq, dsize), 1, 116 }, 348 { offsetof(struct ixl_hmc_rxq, crcstrip), 1, 117 }, 349 { offsetof(struct ixl_hmc_rxq, fc_ena), 1, 118 }, 350 { offsetof(struct ixl_hmc_rxq, l2sel), 1, 119 }, 351 { offsetof(struct ixl_hmc_rxq, hsplit_0), 4, 120 }, 352 { offsetof(struct ixl_hmc_rxq, hsplit_1), 2, 124 }, 353 { offsetof(struct ixl_hmc_rxq, showiv), 1, 127 }, 354 { offsetof(struct ixl_hmc_rxq, rxmax), 14, 174 }, 355 { offsetof(struct ixl_hmc_rxq, tphrdesc_ena), 1, 193 }, 356 { offsetof(struct ixl_hmc_rxq, tphwdesc_ena), 1, 194 }, 357 { offsetof(struct ixl_hmc_rxq, tphdata_ena), 1, 195 }, 358 { offsetof(struct ixl_hmc_rxq, tphhead_ena), 1, 196 }, 359 { offsetof(struct ixl_hmc_rxq, lrxqthresh), 3, 198 }, 360 { offsetof(struct ixl_hmc_rxq, prefena), 1, 201 }, 361 }; 362 363 #define IXL_HMC_RXQ_MINSIZE (201 + 1) 364 365 struct ixl_hmc_txq { 366 uint16_t head; 367 uint8_t new_context; 368 uint64_t base; 369 #define IXL_HMC_TXQ_BASE_UNIT 128 370 uint8_t fc_ena; 371 uint8_t timesync_ena; 372 uint8_t fd_ena; 373 uint8_t alt_vlan_ena; 374 uint8_t cpuid; 375 uint16_t thead_wb; 376 uint8_t head_wb_ena; 377 #define IXL_HMC_TXQ_DESC_WB 0 378 #define IXL_HMC_TXQ_HEAD_WB 1 379 uint16_t qlen; 380 uint8_t tphrdesc_ena; 381 uint8_t tphrpacket_ena; 382 uint8_t tphwdesc_ena; 383 uint64_t head_wb_addr; 384 uint32_t crc; 385 uint16_t rdylist; 386 uint8_t rdylist_act; 387 }; 388 389 static const struct ixl_hmc_pack ixl_hmc_pack_txq[] = { 390 { offsetof(struct ixl_hmc_txq, head), 13, 0 }, 391 { offsetof(struct ixl_hmc_txq, new_context), 1, 30 }, 392 { offsetof(struct ixl_hmc_txq, base), 57, 32 }, 393 { offsetof(struct ixl_hmc_txq, fc_ena), 1, 89 }, 394 { offsetof(struct ixl_hmc_txq, timesync_ena), 1, 90 }, 395 { offsetof(struct ixl_hmc_txq, fd_ena), 1, 91 }, 396 { offsetof(struct ixl_hmc_txq, alt_vlan_ena), 1, 92 }, 397 { offsetof(struct ixl_hmc_txq, cpuid), 8, 96 }, 398 /* line 1 */ 399 { offsetof(struct ixl_hmc_txq, thead_wb), 13, 0 + 128 }, 400 { offsetof(struct ixl_hmc_txq, head_wb_ena), 1, 32 + 128 }, 401 { offsetof(struct ixl_hmc_txq, qlen), 13, 33 + 128 }, 402 { offsetof(struct ixl_hmc_txq, tphrdesc_ena), 1, 46 + 128 }, 403 { offsetof(struct ixl_hmc_txq, tphrpacket_ena), 1, 47 + 128 }, 404 { offsetof(struct ixl_hmc_txq, tphwdesc_ena), 1, 48 + 128 }, 405 { offsetof(struct ixl_hmc_txq, head_wb_addr), 64, 64 + 128 }, 406 /* line 7 */ 407 { offsetof(struct ixl_hmc_txq, crc), 32, 0 + (7*128) }, 408 { offsetof(struct ixl_hmc_txq, rdylist), 10, 84 + (7*128) }, 409 { offsetof(struct ixl_hmc_txq, rdylist_act), 1, 94 + (7*128) }, 410 }; 411 412 #define IXL_HMC_TXQ_MINSIZE (94 + (7*128) + 1) 413 414 struct ixl_work { 415 struct work ixw_cookie; 416 void (*ixw_func)(void *); 417 void *ixw_arg; 418 unsigned int ixw_added; 419 }; 420 #define IXL_WORKQUEUE_PRI PRI_SOFTNET 421 422 struct ixl_tx_map { 423 struct mbuf *txm_m; 424 bus_dmamap_t txm_map; 425 unsigned int txm_eop; 426 }; 427 428 struct ixl_tx_ring { 429 kmutex_t txr_lock; 430 struct ixl_softc *txr_sc; 431 432 unsigned int txr_prod; 433 unsigned int txr_cons; 434 435 struct ixl_tx_map *txr_maps; 436 struct ixl_dmamem txr_mem; 437 438 bus_size_t txr_tail; 439 unsigned int txr_qid; 440 pcq_t *txr_intrq; 441 void *txr_si; 442 443 struct evcnt txr_defragged; 444 struct evcnt txr_defrag_failed; 445 struct evcnt txr_pcqdrop; 446 struct evcnt txr_transmitdef; 447 struct evcnt txr_intr; 448 struct evcnt txr_defer; 449 }; 450 451 struct ixl_rx_map { 452 struct mbuf *rxm_m; 453 bus_dmamap_t rxm_map; 454 }; 455 456 struct ixl_rx_ring { 457 kmutex_t rxr_lock; 458 459 unsigned int rxr_prod; 460 unsigned int rxr_cons; 461 462 struct ixl_rx_map *rxr_maps; 463 struct ixl_dmamem rxr_mem; 464 465 struct mbuf *rxr_m_head; 466 struct mbuf **rxr_m_tail; 467 468 bus_size_t rxr_tail; 469 unsigned int rxr_qid; 470 471 struct evcnt rxr_mgethdr_failed; 472 struct evcnt rxr_mgetcl_failed; 473 struct evcnt rxr_mbuf_load_failed; 474 struct evcnt rxr_intr; 475 struct evcnt rxr_defer; 476 }; 477 478 struct ixl_queue_pair { 479 struct ixl_softc *qp_sc; 480 struct ixl_tx_ring *qp_txr; 481 struct ixl_rx_ring *qp_rxr; 482 483 char qp_name[16]; 484 485 void *qp_si; 486 struct work qp_work; 487 bool qp_workqueue; 488 }; 489 490 struct ixl_atq { 491 struct ixl_aq_desc iatq_desc; 492 void (*iatq_fn)(struct ixl_softc *, 493 const struct ixl_aq_desc *); 494 }; 495 SIMPLEQ_HEAD(ixl_atq_list, ixl_atq); 496 497 struct ixl_product { 498 unsigned int vendor_id; 499 unsigned int product_id; 500 }; 501 502 struct ixl_stats_counters { 503 bool isc_has_offset; 504 struct evcnt isc_crc_errors; 505 uint64_t isc_crc_errors_offset; 506 struct evcnt isc_illegal_bytes; 507 uint64_t isc_illegal_bytes_offset; 508 struct evcnt isc_rx_bytes; 509 uint64_t isc_rx_bytes_offset; 510 struct evcnt isc_rx_discards; 511 uint64_t isc_rx_discards_offset; 512 struct evcnt isc_rx_unicast; 513 uint64_t isc_rx_unicast_offset; 514 struct evcnt isc_rx_multicast; 515 uint64_t isc_rx_multicast_offset; 516 struct evcnt isc_rx_broadcast; 517 uint64_t isc_rx_broadcast_offset; 518 struct evcnt isc_rx_size_64; 519 uint64_t isc_rx_size_64_offset; 520 struct evcnt isc_rx_size_127; 521 uint64_t isc_rx_size_127_offset; 522 struct evcnt isc_rx_size_255; 523 uint64_t isc_rx_size_255_offset; 524 struct evcnt isc_rx_size_511; 525 uint64_t isc_rx_size_511_offset; 526 struct evcnt isc_rx_size_1023; 527 uint64_t isc_rx_size_1023_offset; 528 struct evcnt isc_rx_size_1522; 529 uint64_t isc_rx_size_1522_offset; 530 struct evcnt isc_rx_size_big; 531 uint64_t isc_rx_size_big_offset; 532 struct evcnt isc_rx_undersize; 533 uint64_t isc_rx_undersize_offset; 534 struct evcnt isc_rx_oversize; 535 uint64_t isc_rx_oversize_offset; 536 struct evcnt isc_rx_fragments; 537 uint64_t isc_rx_fragments_offset; 538 struct evcnt isc_rx_jabber; 539 uint64_t isc_rx_jabber_offset; 540 struct evcnt isc_tx_bytes; 541 uint64_t isc_tx_bytes_offset; 542 struct evcnt isc_tx_dropped_link_down; 543 uint64_t isc_tx_dropped_link_down_offset; 544 struct evcnt isc_tx_unicast; 545 uint64_t isc_tx_unicast_offset; 546 struct evcnt isc_tx_multicast; 547 uint64_t isc_tx_multicast_offset; 548 struct evcnt isc_tx_broadcast; 549 uint64_t isc_tx_broadcast_offset; 550 struct evcnt isc_tx_size_64; 551 uint64_t isc_tx_size_64_offset; 552 struct evcnt isc_tx_size_127; 553 uint64_t isc_tx_size_127_offset; 554 struct evcnt isc_tx_size_255; 555 uint64_t isc_tx_size_255_offset; 556 struct evcnt isc_tx_size_511; 557 uint64_t isc_tx_size_511_offset; 558 struct evcnt isc_tx_size_1023; 559 uint64_t isc_tx_size_1023_offset; 560 struct evcnt isc_tx_size_1522; 561 uint64_t isc_tx_size_1522_offset; 562 struct evcnt isc_tx_size_big; 563 uint64_t isc_tx_size_big_offset; 564 struct evcnt isc_mac_local_faults; 565 uint64_t isc_mac_local_faults_offset; 566 struct evcnt isc_mac_remote_faults; 567 uint64_t isc_mac_remote_faults_offset; 568 struct evcnt isc_link_xon_rx; 569 uint64_t isc_link_xon_rx_offset; 570 struct evcnt isc_link_xon_tx; 571 uint64_t isc_link_xon_tx_offset; 572 struct evcnt isc_link_xoff_rx; 573 uint64_t isc_link_xoff_rx_offset; 574 struct evcnt isc_link_xoff_tx; 575 uint64_t isc_link_xoff_tx_offset; 576 struct evcnt isc_vsi_rx_discards; 577 uint64_t isc_vsi_rx_discards_offset; 578 struct evcnt isc_vsi_rx_bytes; 579 uint64_t isc_vsi_rx_bytes_offset; 580 struct evcnt isc_vsi_rx_unicast; 581 uint64_t isc_vsi_rx_unicast_offset; 582 struct evcnt isc_vsi_rx_multicast; 583 uint64_t isc_vsi_rx_multicast_offset; 584 struct evcnt isc_vsi_rx_broadcast; 585 uint64_t isc_vsi_rx_broadcast_offset; 586 struct evcnt isc_vsi_tx_errors; 587 uint64_t isc_vsi_tx_errors_offset; 588 struct evcnt isc_vsi_tx_bytes; 589 uint64_t isc_vsi_tx_bytes_offset; 590 struct evcnt isc_vsi_tx_unicast; 591 uint64_t isc_vsi_tx_unicast_offset; 592 struct evcnt isc_vsi_tx_multicast; 593 uint64_t isc_vsi_tx_multicast_offset; 594 struct evcnt isc_vsi_tx_broadcast; 595 uint64_t isc_vsi_tx_broadcast_offset; 596 }; 597 598 /* 599 * Locking notes: 600 * + a field in ixl_tx_ring is protected by txr_lock (a spin mutex), and 601 * a field in ixl_rx_ring is protected by rxr_lock (a spin mutex). 602 * - more than one lock of them cannot be held at once. 603 * + a field named sc_atq_* in ixl_softc is protected by sc_atq_lock 604 * (a spin mutex). 605 * - the lock cannot held with txr_lock or rxr_lock. 606 * + a field named sc_arq_* is not protected by any lock. 607 * - operations for sc_arq_* is done in one context related to 608 * sc_arq_task. 609 * + other fields in ixl_softc is protected by sc_cfg_lock 610 * (an adaptive mutex) 611 * - It must be held before another lock is held, and It can be 612 * released after the other lock is released. 613 * */ 614 615 struct ixl_softc { 616 device_t sc_dev; 617 struct ethercom sc_ec; 618 bool sc_attached; 619 bool sc_dead; 620 uint32_t sc_port; 621 struct sysctllog *sc_sysctllog; 622 struct workqueue *sc_workq; 623 struct workqueue *sc_workq_txrx; 624 int sc_stats_intval; 625 callout_t sc_stats_callout; 626 struct ixl_work sc_stats_task; 627 struct ixl_stats_counters 628 sc_stats_counters; 629 uint8_t sc_enaddr[ETHER_ADDR_LEN]; 630 struct ifmedia sc_media; 631 uint64_t sc_media_status; 632 uint64_t sc_media_active; 633 uint64_t sc_phy_types; 634 uint8_t sc_phy_abilities; 635 uint8_t sc_phy_linkspeed; 636 uint8_t sc_phy_fec_cfg; 637 uint16_t sc_eee_cap; 638 uint32_t sc_eeer_val; 639 uint8_t sc_d3_lpan; 640 kmutex_t sc_cfg_lock; 641 enum i40e_mac_type sc_mac_type; 642 uint32_t sc_rss_table_size; 643 uint32_t sc_rss_table_entry_width; 644 bool sc_txrx_workqueue; 645 u_int sc_tx_process_limit; 646 u_int sc_rx_process_limit; 647 u_int sc_tx_intr_process_limit; 648 u_int sc_rx_intr_process_limit; 649 650 int sc_cur_ec_capenable; 651 652 struct pci_attach_args sc_pa; 653 pci_intr_handle_t *sc_ihp; 654 void **sc_ihs; 655 unsigned int sc_nintrs; 656 657 bus_dma_tag_t sc_dmat; 658 bus_space_tag_t sc_memt; 659 bus_space_handle_t sc_memh; 660 bus_size_t sc_mems; 661 662 uint8_t sc_pf_id; 663 uint16_t sc_uplink_seid; /* le */ 664 uint16_t sc_downlink_seid; /* le */ 665 uint16_t sc_vsi_number; 666 uint16_t sc_vsi_stat_counter_idx; 667 uint16_t sc_seid; 668 unsigned int sc_base_queue; 669 670 pci_intr_type_t sc_intrtype; 671 unsigned int sc_msix_vector_queue; 672 673 struct ixl_dmamem sc_scratch; 674 struct ixl_dmamem sc_aqbuf; 675 676 const struct ixl_aq_regs * 677 sc_aq_regs; 678 uint32_t sc_aq_flags; 679 #define IXL_SC_AQ_FLAG_RXCTL __BIT(0) 680 #define IXL_SC_AQ_FLAG_NVMLOCK __BIT(1) 681 #define IXL_SC_AQ_FLAG_NVMREAD __BIT(2) 682 #define IXL_SC_AQ_FLAG_RSS __BIT(3) 683 684 kmutex_t sc_atq_lock; 685 kcondvar_t sc_atq_cv; 686 struct ixl_dmamem sc_atq; 687 unsigned int sc_atq_prod; 688 unsigned int sc_atq_cons; 689 690 struct ixl_dmamem sc_arq; 691 struct ixl_work sc_arq_task; 692 struct ixl_aq_bufs sc_arq_idle; 693 struct ixl_aq_buf *sc_arq_live[IXL_AQ_NUM]; 694 unsigned int sc_arq_prod; 695 unsigned int sc_arq_cons; 696 697 struct ixl_work sc_link_state_task; 698 struct ixl_atq sc_link_state_atq; 699 700 struct ixl_dmamem sc_hmc_sd; 701 struct ixl_dmamem sc_hmc_pd; 702 struct ixl_hmc_entry sc_hmc_entries[IXL_HMC_COUNT]; 703 704 struct if_percpuq *sc_ipq; 705 unsigned int sc_tx_ring_ndescs; 706 unsigned int sc_rx_ring_ndescs; 707 unsigned int sc_nqueue_pairs; 708 unsigned int sc_nqueue_pairs_max; 709 unsigned int sc_nqueue_pairs_device; 710 struct ixl_queue_pair *sc_qps; 711 uint32_t sc_itr_rx; 712 uint32_t sc_itr_tx; 713 714 struct evcnt sc_event_atq; 715 struct evcnt sc_event_link; 716 struct evcnt sc_event_ecc_err; 717 struct evcnt sc_event_pci_exception; 718 struct evcnt sc_event_crit_err; 719 }; 720 721 #define IXL_TXRX_PROCESS_UNLIMIT UINT_MAX 722 #define IXL_TX_PROCESS_LIMIT 256 723 #define IXL_RX_PROCESS_LIMIT 256 724 #define IXL_TX_INTR_PROCESS_LIMIT 256 725 #define IXL_RX_INTR_PROCESS_LIMIT 0U 726 727 #define IXL_IFCAP_RXCSUM (IFCAP_CSUM_IPv4_Rx | \ 728 IFCAP_CSUM_TCPv4_Rx | \ 729 IFCAP_CSUM_UDPv4_Rx | \ 730 IFCAP_CSUM_TCPv6_Rx | \ 731 IFCAP_CSUM_UDPv6_Rx) 732 #define IXL_IFCAP_TXCSUM (IFCAP_CSUM_IPv4_Tx | \ 733 IFCAP_CSUM_TCPv4_Tx | \ 734 IFCAP_CSUM_UDPv4_Tx | \ 735 IFCAP_CSUM_TCPv6_Tx | \ 736 IFCAP_CSUM_UDPv6_Tx) 737 #define IXL_CSUM_ALL_OFFLOAD (M_CSUM_IPv4 | \ 738 M_CSUM_TCPv4 | M_CSUM_TCPv6 | \ 739 M_CSUM_UDPv4 | M_CSUM_UDPv6) 740 741 #define delaymsec(_x) DELAY(1000 * (_x)) 742 #ifdef IXL_DEBUG 743 #define DDPRINTF(sc, fmt, args...) \ 744 do { \ 745 if ((sc) != NULL) { \ 746 device_printf( \ 747 ((struct ixl_softc *)(sc))->sc_dev, \ 748 ""); \ 749 } \ 750 printf("%s:\t" fmt, __func__, ##args); \ 751 } while (0) 752 #else 753 #define DDPRINTF(sc, fmt, args...) __nothing 754 #endif 755 #ifndef IXL_STATS_INTERVAL_MSEC 756 #define IXL_STATS_INTERVAL_MSEC 10000 757 #endif 758 #ifndef IXL_QUEUE_NUM 759 #define IXL_QUEUE_NUM 0 760 #endif 761 762 static bool ixl_param_nomsix = false; 763 static int ixl_param_stats_interval = IXL_STATS_INTERVAL_MSEC; 764 static int ixl_param_nqps_limit = IXL_QUEUE_NUM; 765 static unsigned int ixl_param_tx_ndescs = 1024; 766 static unsigned int ixl_param_rx_ndescs = 1024; 767 768 static enum i40e_mac_type 769 ixl_mactype(pci_product_id_t); 770 static void ixl_pci_csr_setup(pci_chipset_tag_t, pcitag_t); 771 static void ixl_clear_hw(struct ixl_softc *); 772 static int ixl_pf_reset(struct ixl_softc *); 773 774 static int ixl_dmamem_alloc(struct ixl_softc *, struct ixl_dmamem *, 775 bus_size_t, bus_size_t); 776 static void ixl_dmamem_free(struct ixl_softc *, struct ixl_dmamem *); 777 778 static int ixl_arq_fill(struct ixl_softc *); 779 static void ixl_arq_unfill(struct ixl_softc *); 780 781 static int ixl_atq_poll(struct ixl_softc *, struct ixl_aq_desc *, 782 unsigned int); 783 static void ixl_atq_set(struct ixl_atq *, 784 void (*)(struct ixl_softc *, const struct ixl_aq_desc *)); 785 static int ixl_atq_post_locked(struct ixl_softc *, struct ixl_atq *); 786 static void ixl_atq_done(struct ixl_softc *); 787 static int ixl_atq_exec(struct ixl_softc *, struct ixl_atq *); 788 static int ixl_atq_exec_locked(struct ixl_softc *, struct ixl_atq *); 789 static int ixl_get_version(struct ixl_softc *); 790 static int ixl_get_nvm_version(struct ixl_softc *); 791 static int ixl_get_hw_capabilities(struct ixl_softc *); 792 static int ixl_pxe_clear(struct ixl_softc *); 793 static int ixl_lldp_shut(struct ixl_softc *); 794 static int ixl_get_mac(struct ixl_softc *); 795 static int ixl_get_switch_config(struct ixl_softc *); 796 static int ixl_phy_mask_ints(struct ixl_softc *); 797 static int ixl_get_phy_info(struct ixl_softc *); 798 static int ixl_set_phy_config(struct ixl_softc *, uint8_t, uint8_t, bool); 799 static int ixl_set_phy_autoselect(struct ixl_softc *); 800 static int ixl_restart_an(struct ixl_softc *); 801 static int ixl_hmc(struct ixl_softc *); 802 static void ixl_hmc_free(struct ixl_softc *); 803 static int ixl_get_vsi(struct ixl_softc *); 804 static int ixl_set_vsi(struct ixl_softc *); 805 static void ixl_set_filter_control(struct ixl_softc *); 806 static void ixl_get_link_status(void *); 807 static int ixl_get_link_status_poll(struct ixl_softc *, int *); 808 static void ixl_get_link_status_done(struct ixl_softc *, 809 const struct ixl_aq_desc *); 810 static int ixl_set_link_status_locked(struct ixl_softc *, 811 const struct ixl_aq_desc *); 812 static uint64_t ixl_search_link_speed(uint8_t); 813 static uint8_t ixl_search_baudrate(uint64_t); 814 static void ixl_config_rss(struct ixl_softc *); 815 static int ixl_add_macvlan(struct ixl_softc *, const uint8_t *, 816 uint16_t, uint16_t); 817 static int ixl_remove_macvlan(struct ixl_softc *, const uint8_t *, 818 uint16_t, uint16_t); 819 static void ixl_arq(void *); 820 static void ixl_hmc_pack(void *, const void *, 821 const struct ixl_hmc_pack *, unsigned int); 822 static uint32_t ixl_rd_rx_csr(struct ixl_softc *, uint32_t); 823 static void ixl_wr_rx_csr(struct ixl_softc *, uint32_t, uint32_t); 824 static int ixl_rd16_nvm(struct ixl_softc *, uint16_t, uint16_t *); 825 826 static int ixl_match(device_t, cfdata_t, void *); 827 static void ixl_attach(device_t, device_t, void *); 828 static int ixl_detach(device_t, int); 829 830 static void ixl_media_add(struct ixl_softc *); 831 static int ixl_media_change(struct ifnet *); 832 static void ixl_media_status(struct ifnet *, struct ifmediareq *); 833 static void ixl_watchdog(struct ifnet *); 834 static int ixl_ioctl(struct ifnet *, u_long, void *); 835 static void ixl_start(struct ifnet *); 836 static int ixl_transmit(struct ifnet *, struct mbuf *); 837 static void ixl_deferred_transmit(void *); 838 static int ixl_intr(void *); 839 static int ixl_queue_intr(void *); 840 static int ixl_other_intr(void *); 841 static void ixl_handle_queue(void *); 842 static void ixl_handle_queue_wk(struct work *, void *); 843 static void ixl_sched_handle_queue(struct ixl_softc *, 844 struct ixl_queue_pair *); 845 static int ixl_init(struct ifnet *); 846 static int ixl_init_locked(struct ixl_softc *); 847 static void ixl_stop(struct ifnet *, int); 848 static void ixl_stop_locked(struct ixl_softc *); 849 static int ixl_iff(struct ixl_softc *); 850 static int ixl_ifflags_cb(struct ethercom *); 851 static int ixl_setup_interrupts(struct ixl_softc *); 852 static int ixl_establish_intx(struct ixl_softc *); 853 static int ixl_establish_msix(struct ixl_softc *); 854 static void ixl_enable_queue_intr(struct ixl_softc *, 855 struct ixl_queue_pair *); 856 static void ixl_disable_queue_intr(struct ixl_softc *, 857 struct ixl_queue_pair *); 858 static void ixl_enable_other_intr(struct ixl_softc *); 859 static void ixl_disable_other_intr(struct ixl_softc *); 860 static void ixl_config_queue_intr(struct ixl_softc *); 861 static void ixl_config_other_intr(struct ixl_softc *); 862 863 static struct ixl_tx_ring * 864 ixl_txr_alloc(struct ixl_softc *, unsigned int); 865 static void ixl_txr_qdis(struct ixl_softc *, struct ixl_tx_ring *, int); 866 static void ixl_txr_config(struct ixl_softc *, struct ixl_tx_ring *); 867 static int ixl_txr_enabled(struct ixl_softc *, struct ixl_tx_ring *); 868 static int ixl_txr_disabled(struct ixl_softc *, struct ixl_tx_ring *); 869 static void ixl_txr_unconfig(struct ixl_softc *, struct ixl_tx_ring *); 870 static void ixl_txr_clean(struct ixl_softc *, struct ixl_tx_ring *); 871 static void ixl_txr_free(struct ixl_softc *, struct ixl_tx_ring *); 872 static int ixl_txeof(struct ixl_softc *, struct ixl_tx_ring *, u_int); 873 874 static struct ixl_rx_ring * 875 ixl_rxr_alloc(struct ixl_softc *, unsigned int); 876 static void ixl_rxr_config(struct ixl_softc *, struct ixl_rx_ring *); 877 static int ixl_rxr_enabled(struct ixl_softc *, struct ixl_rx_ring *); 878 static int ixl_rxr_disabled(struct ixl_softc *, struct ixl_rx_ring *); 879 static void ixl_rxr_unconfig(struct ixl_softc *, struct ixl_rx_ring *); 880 static void ixl_rxr_clean(struct ixl_softc *, struct ixl_rx_ring *); 881 static void ixl_rxr_free(struct ixl_softc *, struct ixl_rx_ring *); 882 static int ixl_rxeof(struct ixl_softc *, struct ixl_rx_ring *, u_int); 883 static int ixl_rxfill(struct ixl_softc *, struct ixl_rx_ring *); 884 885 static struct workqueue * 886 ixl_workq_create(const char *, pri_t, int, int); 887 static void ixl_workq_destroy(struct workqueue *); 888 static int ixl_workqs_teardown(device_t); 889 static void ixl_work_set(struct ixl_work *, void (*)(void *), void *); 890 static void ixl_work_add(struct workqueue *, struct ixl_work *); 891 static void ixl_work_wait(struct workqueue *, struct ixl_work *); 892 static void ixl_workq_work(struct work *, void *); 893 static const struct ixl_product * 894 ixl_lookup(const struct pci_attach_args *pa); 895 static void ixl_link_state_update(struct ixl_softc *, 896 const struct ixl_aq_desc *); 897 static int ixl_vlan_cb(struct ethercom *, uint16_t, bool); 898 static int ixl_setup_vlan_hwfilter(struct ixl_softc *); 899 static void ixl_teardown_vlan_hwfilter(struct ixl_softc *); 900 static int ixl_update_macvlan(struct ixl_softc *); 901 static int ixl_setup_interrupts(struct ixl_softc *); 902 static void ixl_teardown_interrupts(struct ixl_softc *); 903 static int ixl_setup_stats(struct ixl_softc *); 904 static void ixl_teardown_stats(struct ixl_softc *); 905 static void ixl_stats_callout(void *); 906 static void ixl_stats_update(void *); 907 static int ixl_setup_sysctls(struct ixl_softc *); 908 static void ixl_teardown_sysctls(struct ixl_softc *); 909 static int ixl_sysctl_itr_handler(SYSCTLFN_PROTO); 910 static int ixl_sysctl_ndescs_handler(SYSCTLFN_PROTO); 911 static int ixl_queue_pairs_alloc(struct ixl_softc *); 912 static void ixl_queue_pairs_free(struct ixl_softc *); 913 914 static const struct ixl_phy_type ixl_phy_type_map[] = { 915 { 1ULL << IXL_PHY_TYPE_SGMII, IFM_1000_SGMII }, 916 { 1ULL << IXL_PHY_TYPE_1000BASE_KX, IFM_1000_KX }, 917 { 1ULL << IXL_PHY_TYPE_10GBASE_KX4, IFM_10G_KX4 }, 918 { 1ULL << IXL_PHY_TYPE_10GBASE_KR, IFM_10G_KR }, 919 { 1ULL << IXL_PHY_TYPE_40GBASE_KR4, IFM_40G_KR4 }, 920 { 1ULL << IXL_PHY_TYPE_XAUI | 921 1ULL << IXL_PHY_TYPE_XFI, IFM_10G_CX4 }, 922 { 1ULL << IXL_PHY_TYPE_SFI, IFM_10G_SFI }, 923 { 1ULL << IXL_PHY_TYPE_XLAUI | 924 1ULL << IXL_PHY_TYPE_XLPPI, IFM_40G_XLPPI }, 925 { 1ULL << IXL_PHY_TYPE_40GBASE_CR4_CU | 926 1ULL << IXL_PHY_TYPE_40GBASE_CR4, IFM_40G_CR4 }, 927 { 1ULL << IXL_PHY_TYPE_10GBASE_CR1_CU | 928 1ULL << IXL_PHY_TYPE_10GBASE_CR1, IFM_10G_CR1 }, 929 { 1ULL << IXL_PHY_TYPE_10GBASE_AOC, IFM_10G_AOC }, 930 { 1ULL << IXL_PHY_TYPE_40GBASE_AOC, IFM_40G_AOC }, 931 { 1ULL << IXL_PHY_TYPE_100BASE_TX, IFM_100_TX }, 932 { 1ULL << IXL_PHY_TYPE_1000BASE_T_OPTICAL | 933 1ULL << IXL_PHY_TYPE_1000BASE_T, IFM_1000_T }, 934 { 1ULL << IXL_PHY_TYPE_10GBASE_T, IFM_10G_T }, 935 { 1ULL << IXL_PHY_TYPE_10GBASE_SR, IFM_10G_SR }, 936 { 1ULL << IXL_PHY_TYPE_10GBASE_LR, IFM_10G_LR }, 937 { 1ULL << IXL_PHY_TYPE_10GBASE_SFPP_CU, IFM_10G_TWINAX }, 938 { 1ULL << IXL_PHY_TYPE_40GBASE_SR4, IFM_40G_SR4 }, 939 { 1ULL << IXL_PHY_TYPE_40GBASE_LR4, IFM_40G_LR4 }, 940 { 1ULL << IXL_PHY_TYPE_1000BASE_SX, IFM_1000_SX }, 941 { 1ULL << IXL_PHY_TYPE_1000BASE_LX, IFM_1000_LX }, 942 { 1ULL << IXL_PHY_TYPE_20GBASE_KR2, IFM_20G_KR2 }, 943 { 1ULL << IXL_PHY_TYPE_25GBASE_KR, IFM_25G_KR }, 944 { 1ULL << IXL_PHY_TYPE_25GBASE_CR, IFM_25G_CR }, 945 { 1ULL << IXL_PHY_TYPE_25GBASE_SR, IFM_25G_SR }, 946 { 1ULL << IXL_PHY_TYPE_25GBASE_LR, IFM_25G_LR }, 947 { 1ULL << IXL_PHY_TYPE_25GBASE_AOC, IFM_25G_AOC }, 948 { 1ULL << IXL_PHY_TYPE_25GBASE_ACC, IFM_25G_ACC }, 949 }; 950 951 static const struct ixl_speed_type ixl_speed_type_map[] = { 952 { IXL_AQ_LINK_SPEED_40GB, IF_Gbps(40) }, 953 { IXL_AQ_LINK_SPEED_25GB, IF_Gbps(25) }, 954 { IXL_AQ_LINK_SPEED_10GB, IF_Gbps(10) }, 955 { IXL_AQ_LINK_SPEED_1000MB, IF_Mbps(1000) }, 956 { IXL_AQ_LINK_SPEED_100MB, IF_Mbps(100)}, 957 }; 958 959 static const struct ixl_aq_regs ixl_pf_aq_regs = { 960 .atq_tail = I40E_PF_ATQT, 961 .atq_tail_mask = I40E_PF_ATQT_ATQT_MASK, 962 .atq_head = I40E_PF_ATQH, 963 .atq_head_mask = I40E_PF_ATQH_ATQH_MASK, 964 .atq_len = I40E_PF_ATQLEN, 965 .atq_bal = I40E_PF_ATQBAL, 966 .atq_bah = I40E_PF_ATQBAH, 967 .atq_len_enable = I40E_PF_ATQLEN_ATQENABLE_MASK, 968 969 .arq_tail = I40E_PF_ARQT, 970 .arq_tail_mask = I40E_PF_ARQT_ARQT_MASK, 971 .arq_head = I40E_PF_ARQH, 972 .arq_head_mask = I40E_PF_ARQH_ARQH_MASK, 973 .arq_len = I40E_PF_ARQLEN, 974 .arq_bal = I40E_PF_ARQBAL, 975 .arq_bah = I40E_PF_ARQBAH, 976 .arq_len_enable = I40E_PF_ARQLEN_ARQENABLE_MASK, 977 }; 978 979 #define ixl_rd(_s, _r) \ 980 bus_space_read_4((_s)->sc_memt, (_s)->sc_memh, (_r)) 981 #define ixl_wr(_s, _r, _v) \ 982 bus_space_write_4((_s)->sc_memt, (_s)->sc_memh, (_r), (_v)) 983 #define ixl_barrier(_s, _r, _l, _o) \ 984 bus_space_barrier((_s)->sc_memt, (_s)->sc_memh, (_r), (_l), (_o)) 985 #define ixl_flush(_s) (void)ixl_rd((_s), I40E_GLGEN_STAT) 986 #define ixl_nqueues(_sc) (1 << ((_sc)->sc_nqueue_pairs - 1)) 987 988 static inline uint32_t 989 ixl_dmamem_hi(struct ixl_dmamem *ixm) 990 { 991 uint32_t retval; 992 uint64_t val; 993 994 if (sizeof(IXL_DMA_DVA(ixm)) > 4) { 995 val = (intptr_t)IXL_DMA_DVA(ixm); 996 retval = (uint32_t)(val >> 32); 997 } else { 998 retval = 0; 999 } 1000 1001 return retval; 1002 } 1003 1004 static inline uint32_t 1005 ixl_dmamem_lo(struct ixl_dmamem *ixm) 1006 { 1007 1008 return (uint32_t)IXL_DMA_DVA(ixm); 1009 } 1010 1011 static inline void 1012 ixl_aq_dva(struct ixl_aq_desc *iaq, bus_addr_t addr) 1013 { 1014 uint64_t val; 1015 1016 if (sizeof(addr) > 4) { 1017 val = (intptr_t)addr; 1018 iaq->iaq_param[2] = htole32(val >> 32); 1019 } else { 1020 iaq->iaq_param[2] = htole32(0); 1021 } 1022 1023 iaq->iaq_param[3] = htole32(addr); 1024 } 1025 1026 static inline unsigned int 1027 ixl_rxr_unrefreshed(unsigned int prod, unsigned int cons, unsigned int ndescs) 1028 { 1029 unsigned int num; 1030 1031 if (prod < cons) 1032 num = cons - prod; 1033 else 1034 num = (ndescs - prod) + cons; 1035 1036 if (__predict_true(num > 0)) { 1037 /* device cannot receive packets if all descripter is filled */ 1038 num -= 1; 1039 } 1040 1041 return num; 1042 } 1043 1044 CFATTACH_DECL3_NEW(ixl, sizeof(struct ixl_softc), 1045 ixl_match, ixl_attach, ixl_detach, NULL, NULL, NULL, 1046 DVF_DETACH_SHUTDOWN); 1047 1048 static const struct ixl_product ixl_products[] = { 1049 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_SFP }, 1050 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_KX_B }, 1051 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_KX_C }, 1052 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_A }, 1053 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_B }, 1054 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_C }, 1055 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_10G_T }, 1056 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_20G_BP_1 }, 1057 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_20G_BP_2 }, 1058 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_T4_10G }, 1059 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XXV710_25G_BP }, 1060 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XXV710_25G_SFP28 }, 1061 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_KX }, 1062 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_QSFP }, 1063 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_SFP }, 1064 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_1G_BASET }, 1065 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_10G_BASET }, 1066 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_I_SFP }, 1067 /* required last entry */ 1068 {0, 0} 1069 }; 1070 1071 static const struct ixl_product * 1072 ixl_lookup(const struct pci_attach_args *pa) 1073 { 1074 const struct ixl_product *ixlp; 1075 1076 for (ixlp = ixl_products; ixlp->vendor_id != 0; ixlp++) { 1077 if (PCI_VENDOR(pa->pa_id) == ixlp->vendor_id && 1078 PCI_PRODUCT(pa->pa_id) == ixlp->product_id) 1079 return ixlp; 1080 } 1081 1082 return NULL; 1083 } 1084 1085 static int 1086 ixl_match(device_t parent, cfdata_t match, void *aux) 1087 { 1088 const struct pci_attach_args *pa = aux; 1089 1090 return (ixl_lookup(pa) != NULL) ? 1 : 0; 1091 } 1092 1093 static void 1094 ixl_attach(device_t parent, device_t self, void *aux) 1095 { 1096 struct ixl_softc *sc; 1097 struct pci_attach_args *pa = aux; 1098 struct ifnet *ifp; 1099 pcireg_t memtype; 1100 uint32_t firstq, port, ari, func; 1101 char xnamebuf[32]; 1102 int tries, rv, link; 1103 1104 sc = device_private(self); 1105 sc->sc_dev = self; 1106 ifp = &sc->sc_ec.ec_if; 1107 1108 sc->sc_pa = *pa; 1109 sc->sc_dmat = (pci_dma64_available(pa)) ? 1110 pa->pa_dmat64 : pa->pa_dmat; 1111 sc->sc_aq_regs = &ixl_pf_aq_regs; 1112 1113 sc->sc_mac_type = ixl_mactype(PCI_PRODUCT(pa->pa_id)); 1114 1115 ixl_pci_csr_setup(pa->pa_pc, pa->pa_tag); 1116 1117 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, IXL_PCIREG); 1118 if (pci_mapreg_map(pa, IXL_PCIREG, memtype, 0, 1119 &sc->sc_memt, &sc->sc_memh, NULL, &sc->sc_mems)) { 1120 aprint_error(": unable to map registers\n"); 1121 return; 1122 } 1123 1124 mutex_init(&sc->sc_cfg_lock, MUTEX_DEFAULT, IPL_SOFTNET); 1125 1126 firstq = ixl_rd(sc, I40E_PFLAN_QALLOC); 1127 firstq &= I40E_PFLAN_QALLOC_FIRSTQ_MASK; 1128 firstq >>= I40E_PFLAN_QALLOC_FIRSTQ_SHIFT; 1129 sc->sc_base_queue = firstq; 1130 1131 ixl_clear_hw(sc); 1132 if (ixl_pf_reset(sc) == -1) { 1133 /* error printed by ixl pf_reset */ 1134 goto unmap; 1135 } 1136 1137 port = ixl_rd(sc, I40E_PFGEN_PORTNUM); 1138 port &= I40E_PFGEN_PORTNUM_PORT_NUM_MASK; 1139 port >>= I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT; 1140 sc->sc_port = port; 1141 aprint_normal(": port %u", sc->sc_port); 1142 1143 ari = ixl_rd(sc, I40E_GLPCI_CAPSUP); 1144 ari &= I40E_GLPCI_CAPSUP_ARI_EN_MASK; 1145 ari >>= I40E_GLPCI_CAPSUP_ARI_EN_SHIFT; 1146 1147 func = ixl_rd(sc, I40E_PF_FUNC_RID); 1148 sc->sc_pf_id = func & (ari ? 0xff : 0x7); 1149 1150 /* initialise the adminq */ 1151 1152 mutex_init(&sc->sc_atq_lock, MUTEX_DEFAULT, IPL_NET); 1153 1154 if (ixl_dmamem_alloc(sc, &sc->sc_atq, 1155 sizeof(struct ixl_aq_desc) * IXL_AQ_NUM, IXL_AQ_ALIGN) != 0) { 1156 aprint_error("\n" "%s: unable to allocate atq\n", 1157 device_xname(self)); 1158 goto unmap; 1159 } 1160 1161 SIMPLEQ_INIT(&sc->sc_arq_idle); 1162 ixl_work_set(&sc->sc_arq_task, ixl_arq, sc); 1163 sc->sc_arq_cons = 0; 1164 sc->sc_arq_prod = 0; 1165 1166 if (ixl_dmamem_alloc(sc, &sc->sc_arq, 1167 sizeof(struct ixl_aq_desc) * IXL_AQ_NUM, IXL_AQ_ALIGN) != 0) { 1168 aprint_error("\n" "%s: unable to allocate arq\n", 1169 device_xname(self)); 1170 goto free_atq; 1171 } 1172 1173 if (!ixl_arq_fill(sc)) { 1174 aprint_error("\n" "%s: unable to fill arq descriptors\n", 1175 device_xname(self)); 1176 goto free_arq; 1177 } 1178 1179 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 1180 0, IXL_DMA_LEN(&sc->sc_atq), 1181 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1182 1183 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq), 1184 0, IXL_DMA_LEN(&sc->sc_arq), 1185 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1186 1187 for (tries = 0; tries < 10; tries++) { 1188 sc->sc_atq_cons = 0; 1189 sc->sc_atq_prod = 0; 1190 1191 ixl_wr(sc, sc->sc_aq_regs->atq_head, 0); 1192 ixl_wr(sc, sc->sc_aq_regs->arq_head, 0); 1193 ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0); 1194 ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0); 1195 1196 ixl_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE); 1197 1198 ixl_wr(sc, sc->sc_aq_regs->atq_bal, 1199 ixl_dmamem_lo(&sc->sc_atq)); 1200 ixl_wr(sc, sc->sc_aq_regs->atq_bah, 1201 ixl_dmamem_hi(&sc->sc_atq)); 1202 ixl_wr(sc, sc->sc_aq_regs->atq_len, 1203 sc->sc_aq_regs->atq_len_enable | IXL_AQ_NUM); 1204 1205 ixl_wr(sc, sc->sc_aq_regs->arq_bal, 1206 ixl_dmamem_lo(&sc->sc_arq)); 1207 ixl_wr(sc, sc->sc_aq_regs->arq_bah, 1208 ixl_dmamem_hi(&sc->sc_arq)); 1209 ixl_wr(sc, sc->sc_aq_regs->arq_len, 1210 sc->sc_aq_regs->arq_len_enable | IXL_AQ_NUM); 1211 1212 rv = ixl_get_version(sc); 1213 if (rv == 0) 1214 break; 1215 if (rv != ETIMEDOUT) { 1216 aprint_error(", unable to get firmware version\n"); 1217 goto shutdown; 1218 } 1219 1220 delaymsec(100); 1221 } 1222 1223 ixl_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod); 1224 1225 if (ixl_dmamem_alloc(sc, &sc->sc_aqbuf, IXL_AQ_BUFLEN, 0) != 0) { 1226 aprint_error_dev(self, ", unable to allocate nvm buffer\n"); 1227 goto shutdown; 1228 } 1229 1230 ixl_get_nvm_version(sc); 1231 1232 if (sc->sc_mac_type == I40E_MAC_X722) 1233 sc->sc_nqueue_pairs_device = IXL_QUEUE_MAX_X722; 1234 else 1235 sc->sc_nqueue_pairs_device = IXL_QUEUE_MAX_XL710; 1236 1237 rv = ixl_get_hw_capabilities(sc); 1238 if (rv != 0) { 1239 aprint_error(", GET HW CAPABILITIES %s\n", 1240 rv == ETIMEDOUT ? "timeout" : "error"); 1241 goto free_aqbuf; 1242 } 1243 1244 sc->sc_nqueue_pairs_max = MIN((int)sc->sc_nqueue_pairs_device, ncpu); 1245 if (ixl_param_nqps_limit > 0) { 1246 sc->sc_nqueue_pairs_max = MIN((int)sc->sc_nqueue_pairs_max, 1247 ixl_param_nqps_limit); 1248 } 1249 1250 sc->sc_nqueue_pairs = sc->sc_nqueue_pairs_max; 1251 sc->sc_tx_ring_ndescs = ixl_param_tx_ndescs; 1252 sc->sc_rx_ring_ndescs = ixl_param_rx_ndescs; 1253 1254 KASSERT(IXL_TXRX_PROCESS_UNLIMIT > sc->sc_rx_ring_ndescs); 1255 KASSERT(IXL_TXRX_PROCESS_UNLIMIT > sc->sc_tx_ring_ndescs); 1256 KASSERT(sc->sc_rx_ring_ndescs == 1257 (1U << (fls32(sc->sc_rx_ring_ndescs) - 1))); 1258 KASSERT(sc->sc_tx_ring_ndescs == 1259 (1U << (fls32(sc->sc_tx_ring_ndescs) - 1))); 1260 1261 if (ixl_get_mac(sc) != 0) { 1262 /* error printed by ixl_get_mac */ 1263 goto free_aqbuf; 1264 } 1265 1266 aprint_normal("\n"); 1267 aprint_naive("\n"); 1268 1269 aprint_normal_dev(self, "Ethernet address %s\n", 1270 ether_sprintf(sc->sc_enaddr)); 1271 1272 rv = ixl_pxe_clear(sc); 1273 if (rv != 0) { 1274 aprint_debug_dev(self, "CLEAR PXE MODE %s\n", 1275 rv == ETIMEDOUT ? "timeout" : "error"); 1276 } 1277 1278 ixl_set_filter_control(sc); 1279 1280 if (ixl_hmc(sc) != 0) { 1281 /* error printed by ixl_hmc */ 1282 goto free_aqbuf; 1283 } 1284 1285 if (ixl_lldp_shut(sc) != 0) { 1286 /* error printed by ixl_lldp_shut */ 1287 goto free_hmc; 1288 } 1289 1290 if (ixl_phy_mask_ints(sc) != 0) { 1291 /* error printed by ixl_phy_mask_ints */ 1292 goto free_hmc; 1293 } 1294 1295 if (ixl_restart_an(sc) != 0) { 1296 /* error printed by ixl_restart_an */ 1297 goto free_hmc; 1298 } 1299 1300 if (ixl_get_switch_config(sc) != 0) { 1301 /* error printed by ixl_get_switch_config */ 1302 goto free_hmc; 1303 } 1304 1305 rv = ixl_get_link_status_poll(sc, NULL); 1306 if (rv != 0) { 1307 aprint_error_dev(self, "GET LINK STATUS %s\n", 1308 rv == ETIMEDOUT ? "timeout" : "error"); 1309 goto free_hmc; 1310 } 1311 1312 /* 1313 * The FW often returns EIO in "Get PHY Abilities" command 1314 * if there is no delay 1315 */ 1316 DELAY(500); 1317 if (ixl_get_phy_info(sc) != 0) { 1318 /* error printed by ixl_get_phy_info */ 1319 goto free_hmc; 1320 } 1321 1322 if (ixl_dmamem_alloc(sc, &sc->sc_scratch, 1323 sizeof(struct ixl_aq_vsi_data), 8) != 0) { 1324 aprint_error_dev(self, "unable to allocate scratch buffer\n"); 1325 goto free_hmc; 1326 } 1327 1328 rv = ixl_get_vsi(sc); 1329 if (rv != 0) { 1330 aprint_error_dev(self, "GET VSI %s %d\n", 1331 rv == ETIMEDOUT ? "timeout" : "error", rv); 1332 goto free_scratch; 1333 } 1334 1335 rv = ixl_set_vsi(sc); 1336 if (rv != 0) { 1337 aprint_error_dev(self, "UPDATE VSI error %s %d\n", 1338 rv == ETIMEDOUT ? "timeout" : "error", rv); 1339 goto free_scratch; 1340 } 1341 1342 if (ixl_queue_pairs_alloc(sc) != 0) { 1343 /* error printed by ixl_queue_pairs_alloc */ 1344 goto free_scratch; 1345 } 1346 1347 if (ixl_setup_interrupts(sc) != 0) { 1348 /* error printed by ixl_setup_interrupts */ 1349 goto free_queue_pairs; 1350 } 1351 1352 if (ixl_setup_stats(sc) != 0) { 1353 aprint_error_dev(self, "failed to setup event counters\n"); 1354 goto teardown_intrs; 1355 } 1356 1357 if (ixl_setup_sysctls(sc) != 0) { 1358 /* error printed by ixl_setup_sysctls */ 1359 goto teardown_stats; 1360 } 1361 1362 snprintf(xnamebuf, sizeof(xnamebuf), "%s_wq_cfg", device_xname(self)); 1363 sc->sc_workq = ixl_workq_create(xnamebuf, IXL_WORKQUEUE_PRI, 1364 IPL_NET, WQ_MPSAFE); 1365 if (sc->sc_workq == NULL) 1366 goto teardown_sysctls; 1367 1368 snprintf(xnamebuf, sizeof(xnamebuf), "%s_wq_txrx", device_xname(self)); 1369 rv = workqueue_create(&sc->sc_workq_txrx, xnamebuf, ixl_handle_queue_wk, 1370 sc, IXL_WORKQUEUE_PRI, IPL_NET, WQ_PERCPU | WQ_MPSAFE); 1371 if (rv != 0) { 1372 sc->sc_workq_txrx = NULL; 1373 goto teardown_wqs; 1374 } 1375 1376 snprintf(xnamebuf, sizeof(xnamebuf), "%s_atq_cv", device_xname(self)); 1377 cv_init(&sc->sc_atq_cv, xnamebuf); 1378 1379 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ); 1380 1381 ifp->if_softc = sc; 1382 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1383 ifp->if_extflags = IFEF_MPSAFE; 1384 ifp->if_ioctl = ixl_ioctl; 1385 ifp->if_start = ixl_start; 1386 ifp->if_transmit = ixl_transmit; 1387 ifp->if_watchdog = ixl_watchdog; 1388 ifp->if_init = ixl_init; 1389 ifp->if_stop = ixl_stop; 1390 IFQ_SET_MAXLEN(&ifp->if_snd, sc->sc_tx_ring_ndescs); 1391 IFQ_SET_READY(&ifp->if_snd); 1392 ifp->if_capabilities |= IXL_IFCAP_RXCSUM; 1393 ifp->if_capabilities |= IXL_IFCAP_TXCSUM; 1394 #if 0 1395 ifp->if_capabilities |= IFCAP_TSOv4 | IFCAP_TSOv6; 1396 #endif 1397 ether_set_vlan_cb(&sc->sc_ec, ixl_vlan_cb); 1398 sc->sc_ec.ec_capabilities |= ETHERCAP_JUMBO_MTU; 1399 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING; 1400 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWFILTER; 1401 1402 sc->sc_ec.ec_capenable = sc->sc_ec.ec_capabilities; 1403 /* Disable VLAN_HWFILTER by default */ 1404 CLR(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER); 1405 1406 sc->sc_cur_ec_capenable = sc->sc_ec.ec_capenable; 1407 1408 sc->sc_ec.ec_ifmedia = &sc->sc_media; 1409 ifmedia_init_with_lock(&sc->sc_media, IFM_IMASK, ixl_media_change, 1410 ixl_media_status, &sc->sc_cfg_lock); 1411 1412 ixl_media_add(sc); 1413 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL); 1414 if (ISSET(sc->sc_phy_abilities, 1415 (IXL_PHY_ABILITY_PAUSE_TX | IXL_PHY_ABILITY_PAUSE_RX))) { 1416 ifmedia_add(&sc->sc_media, 1417 IFM_ETHER | IFM_AUTO | IFM_FLOW, 0, NULL); 1418 } 1419 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_NONE, 0, NULL); 1420 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO); 1421 1422 rv = if_initialize(ifp); 1423 if (rv != 0) { 1424 aprint_error_dev(self, "if_initialize failed=%d\n", rv); 1425 goto teardown_wqs; 1426 } 1427 1428 sc->sc_ipq = if_percpuq_create(ifp); 1429 if_deferred_start_init(ifp, NULL); 1430 ether_ifattach(ifp, sc->sc_enaddr); 1431 ether_set_ifflags_cb(&sc->sc_ec, ixl_ifflags_cb); 1432 1433 rv = ixl_get_link_status_poll(sc, &link); 1434 if (rv != 0) 1435 link = LINK_STATE_UNKNOWN; 1436 if_link_state_change(ifp, link); 1437 1438 ixl_atq_set(&sc->sc_link_state_atq, ixl_get_link_status_done); 1439 ixl_work_set(&sc->sc_link_state_task, ixl_get_link_status, sc); 1440 1441 ixl_config_other_intr(sc); 1442 ixl_enable_other_intr(sc); 1443 1444 ixl_set_phy_autoselect(sc); 1445 1446 /* remove default mac filter and replace it so we can see vlans */ 1447 rv = ixl_remove_macvlan(sc, sc->sc_enaddr, 0, 0); 1448 if (rv != ENOENT) { 1449 aprint_debug_dev(self, 1450 "unable to remove macvlan %u\n", rv); 1451 } 1452 rv = ixl_remove_macvlan(sc, sc->sc_enaddr, 0, 1453 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN); 1454 if (rv != ENOENT) { 1455 aprint_debug_dev(self, 1456 "unable to remove macvlan, ignore vlan %u\n", rv); 1457 } 1458 1459 if (ixl_update_macvlan(sc) != 0) { 1460 aprint_debug_dev(self, 1461 "couldn't enable vlan hardware filter\n"); 1462 CLR(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER); 1463 CLR(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER); 1464 } 1465 1466 sc->sc_txrx_workqueue = true; 1467 sc->sc_tx_process_limit = IXL_TX_PROCESS_LIMIT; 1468 sc->sc_rx_process_limit = IXL_RX_PROCESS_LIMIT; 1469 sc->sc_tx_intr_process_limit = IXL_TX_INTR_PROCESS_LIMIT; 1470 sc->sc_rx_intr_process_limit = IXL_RX_INTR_PROCESS_LIMIT; 1471 1472 ixl_stats_update(sc); 1473 sc->sc_stats_counters.isc_has_offset = true; 1474 1475 if (pmf_device_register(self, NULL, NULL) != true) 1476 aprint_debug_dev(self, "couldn't establish power handler\n"); 1477 sc->sc_itr_rx = IXL_ITR_RX; 1478 sc->sc_itr_tx = IXL_ITR_TX; 1479 sc->sc_attached = true; 1480 if_register(ifp); 1481 1482 return; 1483 1484 teardown_wqs: 1485 config_finalize_register(self, ixl_workqs_teardown); 1486 teardown_sysctls: 1487 ixl_teardown_sysctls(sc); 1488 teardown_stats: 1489 ixl_teardown_stats(sc); 1490 teardown_intrs: 1491 ixl_teardown_interrupts(sc); 1492 free_queue_pairs: 1493 ixl_queue_pairs_free(sc); 1494 free_scratch: 1495 ixl_dmamem_free(sc, &sc->sc_scratch); 1496 free_hmc: 1497 ixl_hmc_free(sc); 1498 free_aqbuf: 1499 ixl_dmamem_free(sc, &sc->sc_aqbuf); 1500 shutdown: 1501 ixl_wr(sc, sc->sc_aq_regs->atq_head, 0); 1502 ixl_wr(sc, sc->sc_aq_regs->arq_head, 0); 1503 ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0); 1504 ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0); 1505 1506 ixl_wr(sc, sc->sc_aq_regs->atq_bal, 0); 1507 ixl_wr(sc, sc->sc_aq_regs->atq_bah, 0); 1508 ixl_wr(sc, sc->sc_aq_regs->atq_len, 0); 1509 1510 ixl_wr(sc, sc->sc_aq_regs->arq_bal, 0); 1511 ixl_wr(sc, sc->sc_aq_regs->arq_bah, 0); 1512 ixl_wr(sc, sc->sc_aq_regs->arq_len, 0); 1513 1514 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq), 1515 0, IXL_DMA_LEN(&sc->sc_arq), 1516 BUS_DMASYNC_POSTREAD); 1517 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 1518 0, IXL_DMA_LEN(&sc->sc_atq), 1519 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1520 1521 ixl_arq_unfill(sc); 1522 free_arq: 1523 ixl_dmamem_free(sc, &sc->sc_arq); 1524 free_atq: 1525 ixl_dmamem_free(sc, &sc->sc_atq); 1526 unmap: 1527 mutex_destroy(&sc->sc_atq_lock); 1528 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems); 1529 mutex_destroy(&sc->sc_cfg_lock); 1530 sc->sc_mems = 0; 1531 1532 sc->sc_attached = false; 1533 } 1534 1535 static int 1536 ixl_detach(device_t self, int flags) 1537 { 1538 struct ixl_softc *sc = device_private(self); 1539 struct ifnet *ifp = &sc->sc_ec.ec_if; 1540 1541 if (!sc->sc_attached) 1542 return 0; 1543 1544 ixl_stop(ifp, 1); 1545 1546 ixl_disable_other_intr(sc); 1547 1548 callout_halt(&sc->sc_stats_callout, NULL); 1549 ixl_work_wait(sc->sc_workq, &sc->sc_stats_task); 1550 1551 /* wait for ATQ handler */ 1552 mutex_enter(&sc->sc_atq_lock); 1553 mutex_exit(&sc->sc_atq_lock); 1554 1555 ixl_work_wait(sc->sc_workq, &sc->sc_arq_task); 1556 ixl_work_wait(sc->sc_workq, &sc->sc_link_state_task); 1557 1558 if (sc->sc_workq != NULL) { 1559 ixl_workq_destroy(sc->sc_workq); 1560 sc->sc_workq = NULL; 1561 } 1562 1563 if (sc->sc_workq_txrx != NULL) { 1564 workqueue_destroy(sc->sc_workq_txrx); 1565 sc->sc_workq_txrx = NULL; 1566 } 1567 1568 if_percpuq_destroy(sc->sc_ipq); 1569 ether_ifdetach(ifp); 1570 if_detach(ifp); 1571 ifmedia_fini(&sc->sc_media); 1572 1573 ixl_teardown_interrupts(sc); 1574 ixl_teardown_stats(sc); 1575 ixl_teardown_sysctls(sc); 1576 1577 ixl_queue_pairs_free(sc); 1578 1579 ixl_dmamem_free(sc, &sc->sc_scratch); 1580 ixl_hmc_free(sc); 1581 1582 /* shutdown */ 1583 ixl_wr(sc, sc->sc_aq_regs->atq_head, 0); 1584 ixl_wr(sc, sc->sc_aq_regs->arq_head, 0); 1585 ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0); 1586 ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0); 1587 1588 ixl_wr(sc, sc->sc_aq_regs->atq_bal, 0); 1589 ixl_wr(sc, sc->sc_aq_regs->atq_bah, 0); 1590 ixl_wr(sc, sc->sc_aq_regs->atq_len, 0); 1591 1592 ixl_wr(sc, sc->sc_aq_regs->arq_bal, 0); 1593 ixl_wr(sc, sc->sc_aq_regs->arq_bah, 0); 1594 ixl_wr(sc, sc->sc_aq_regs->arq_len, 0); 1595 1596 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq), 1597 0, IXL_DMA_LEN(&sc->sc_arq), 1598 BUS_DMASYNC_POSTREAD); 1599 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 1600 0, IXL_DMA_LEN(&sc->sc_atq), 1601 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1602 1603 ixl_arq_unfill(sc); 1604 1605 ixl_dmamem_free(sc, &sc->sc_arq); 1606 ixl_dmamem_free(sc, &sc->sc_atq); 1607 ixl_dmamem_free(sc, &sc->sc_aqbuf); 1608 1609 cv_destroy(&sc->sc_atq_cv); 1610 mutex_destroy(&sc->sc_atq_lock); 1611 1612 if (sc->sc_mems != 0) { 1613 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems); 1614 sc->sc_mems = 0; 1615 } 1616 1617 mutex_destroy(&sc->sc_cfg_lock); 1618 1619 return 0; 1620 } 1621 1622 static int 1623 ixl_workqs_teardown(device_t self) 1624 { 1625 struct ixl_softc *sc = device_private(self); 1626 1627 if (sc->sc_workq != NULL) { 1628 ixl_workq_destroy(sc->sc_workq); 1629 sc->sc_workq = NULL; 1630 } 1631 1632 if (sc->sc_workq_txrx != NULL) { 1633 workqueue_destroy(sc->sc_workq_txrx); 1634 sc->sc_workq_txrx = NULL; 1635 } 1636 1637 return 0; 1638 } 1639 1640 static int 1641 ixl_vlan_cb(struct ethercom *ec, uint16_t vid, bool set) 1642 { 1643 struct ifnet *ifp = &ec->ec_if; 1644 struct ixl_softc *sc = ifp->if_softc; 1645 int rv; 1646 1647 if (!ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER)) { 1648 return 0; 1649 } 1650 1651 if (set) { 1652 rv = ixl_add_macvlan(sc, sc->sc_enaddr, vid, 1653 IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH); 1654 if (rv == 0) { 1655 rv = ixl_add_macvlan(sc, etherbroadcastaddr, 1656 vid, IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH); 1657 } 1658 } else { 1659 rv = ixl_remove_macvlan(sc, sc->sc_enaddr, vid, 1660 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH); 1661 (void)ixl_remove_macvlan(sc, etherbroadcastaddr, vid, 1662 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH); 1663 } 1664 1665 return rv; 1666 } 1667 1668 static void 1669 ixl_media_add(struct ixl_softc *sc) 1670 { 1671 struct ifmedia *ifm = &sc->sc_media; 1672 const struct ixl_phy_type *itype; 1673 unsigned int i; 1674 bool flow; 1675 1676 if (ISSET(sc->sc_phy_abilities, 1677 (IXL_PHY_ABILITY_PAUSE_TX | IXL_PHY_ABILITY_PAUSE_RX))) { 1678 flow = true; 1679 } else { 1680 flow = false; 1681 } 1682 1683 for (i = 0; i < __arraycount(ixl_phy_type_map); i++) { 1684 itype = &ixl_phy_type_map[i]; 1685 1686 if (ISSET(sc->sc_phy_types, itype->phy_type)) { 1687 ifmedia_add(ifm, 1688 IFM_ETHER | IFM_FDX | itype->ifm_type, 0, NULL); 1689 1690 if (flow) { 1691 ifmedia_add(ifm, 1692 IFM_ETHER | IFM_FDX | IFM_FLOW | 1693 itype->ifm_type, 0, NULL); 1694 } 1695 1696 if (itype->ifm_type != IFM_100_TX) 1697 continue; 1698 1699 ifmedia_add(ifm, IFM_ETHER | itype->ifm_type, 1700 0, NULL); 1701 if (flow) { 1702 ifmedia_add(ifm, 1703 IFM_ETHER | IFM_FLOW | itype->ifm_type, 1704 0, NULL); 1705 } 1706 } 1707 } 1708 } 1709 1710 static void 1711 ixl_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 1712 { 1713 struct ixl_softc *sc = ifp->if_softc; 1714 1715 KASSERT(mutex_owned(&sc->sc_cfg_lock)); 1716 1717 ifmr->ifm_status = sc->sc_media_status; 1718 ifmr->ifm_active = sc->sc_media_active; 1719 } 1720 1721 static int 1722 ixl_media_change(struct ifnet *ifp) 1723 { 1724 struct ixl_softc *sc = ifp->if_softc; 1725 struct ifmedia *ifm = &sc->sc_media; 1726 uint64_t ifm_active = sc->sc_media_active; 1727 uint8_t link_speed, abilities; 1728 1729 switch (IFM_SUBTYPE(ifm_active)) { 1730 case IFM_1000_SGMII: 1731 case IFM_1000_KX: 1732 case IFM_10G_KX4: 1733 case IFM_10G_KR: 1734 case IFM_40G_KR4: 1735 case IFM_20G_KR2: 1736 case IFM_25G_KR: 1737 /* backplanes */ 1738 return EINVAL; 1739 } 1740 1741 abilities = IXL_PHY_ABILITY_AUTONEGO | IXL_PHY_ABILITY_LINKUP; 1742 1743 switch (IFM_SUBTYPE(ifm->ifm_media)) { 1744 case IFM_AUTO: 1745 link_speed = sc->sc_phy_linkspeed; 1746 break; 1747 case IFM_NONE: 1748 link_speed = 0; 1749 CLR(abilities, IXL_PHY_ABILITY_LINKUP); 1750 break; 1751 default: 1752 link_speed = ixl_search_baudrate( 1753 ifmedia_baudrate(ifm->ifm_media)); 1754 } 1755 1756 if (ISSET(abilities, IXL_PHY_ABILITY_LINKUP)) { 1757 if (ISSET(link_speed, sc->sc_phy_linkspeed) == 0) 1758 return EINVAL; 1759 } 1760 1761 if (ifm->ifm_media & IFM_FLOW) { 1762 abilities |= sc->sc_phy_abilities & 1763 (IXL_PHY_ABILITY_PAUSE_TX | IXL_PHY_ABILITY_PAUSE_RX); 1764 } 1765 1766 return ixl_set_phy_config(sc, link_speed, abilities, false); 1767 } 1768 1769 static void 1770 ixl_watchdog(struct ifnet *ifp) 1771 { 1772 1773 } 1774 1775 static void 1776 ixl_del_all_multiaddr(struct ixl_softc *sc) 1777 { 1778 struct ethercom *ec = &sc->sc_ec; 1779 struct ether_multi *enm; 1780 struct ether_multistep step; 1781 1782 ETHER_LOCK(ec); 1783 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL; 1784 ETHER_NEXT_MULTI(step, enm)) { 1785 ixl_remove_macvlan(sc, enm->enm_addrlo, 0, 1786 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN); 1787 } 1788 ETHER_UNLOCK(ec); 1789 } 1790 1791 static int 1792 ixl_add_multi(struct ixl_softc *sc, uint8_t *addrlo, uint8_t *addrhi) 1793 { 1794 struct ifnet *ifp = &sc->sc_ec.ec_if; 1795 int rv; 1796 1797 if (ISSET(ifp->if_flags, IFF_ALLMULTI)) 1798 return 0; 1799 1800 if (memcmp(addrlo, addrhi, ETHER_ADDR_LEN) != 0) { 1801 ixl_del_all_multiaddr(sc); 1802 SET(ifp->if_flags, IFF_ALLMULTI); 1803 return ENETRESET; 1804 } 1805 1806 /* multicast address can not use VLAN HWFILTER */ 1807 rv = ixl_add_macvlan(sc, addrlo, 0, 1808 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN); 1809 1810 if (rv == ENOSPC) { 1811 ixl_del_all_multiaddr(sc); 1812 SET(ifp->if_flags, IFF_ALLMULTI); 1813 return ENETRESET; 1814 } 1815 1816 return rv; 1817 } 1818 1819 static int 1820 ixl_del_multi(struct ixl_softc *sc, uint8_t *addrlo, uint8_t *addrhi) 1821 { 1822 struct ifnet *ifp = &sc->sc_ec.ec_if; 1823 struct ethercom *ec = &sc->sc_ec; 1824 struct ether_multi *enm, *enm_last; 1825 struct ether_multistep step; 1826 int error, rv = 0; 1827 1828 if (!ISSET(ifp->if_flags, IFF_ALLMULTI)) { 1829 ixl_remove_macvlan(sc, addrlo, 0, 1830 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN); 1831 return 0; 1832 } 1833 1834 ETHER_LOCK(ec); 1835 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL; 1836 ETHER_NEXT_MULTI(step, enm)) { 1837 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 1838 ETHER_ADDR_LEN) != 0) { 1839 goto out; 1840 } 1841 } 1842 1843 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL; 1844 ETHER_NEXT_MULTI(step, enm)) { 1845 error = ixl_add_macvlan(sc, enm->enm_addrlo, 0, 1846 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN); 1847 if (error != 0) 1848 break; 1849 } 1850 1851 if (enm != NULL) { 1852 enm_last = enm; 1853 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL; 1854 ETHER_NEXT_MULTI(step, enm)) { 1855 if (enm == enm_last) 1856 break; 1857 1858 ixl_remove_macvlan(sc, enm->enm_addrlo, 0, 1859 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN); 1860 } 1861 } else { 1862 CLR(ifp->if_flags, IFF_ALLMULTI); 1863 rv = ENETRESET; 1864 } 1865 1866 out: 1867 ETHER_UNLOCK(ec); 1868 return rv; 1869 } 1870 1871 static int 1872 ixl_ioctl(struct ifnet *ifp, u_long cmd, void *data) 1873 { 1874 struct ifreq *ifr = (struct ifreq *)data; 1875 struct ixl_softc *sc = (struct ixl_softc *)ifp->if_softc; 1876 const struct sockaddr *sa; 1877 uint8_t addrhi[ETHER_ADDR_LEN], addrlo[ETHER_ADDR_LEN]; 1878 int s, error = 0; 1879 unsigned int nmtu; 1880 1881 switch (cmd) { 1882 case SIOCSIFMTU: 1883 nmtu = ifr->ifr_mtu; 1884 1885 if (nmtu < IXL_MIN_MTU || nmtu > IXL_MAX_MTU) { 1886 error = EINVAL; 1887 break; 1888 } 1889 if (ifp->if_mtu != nmtu) { 1890 s = splnet(); 1891 error = ether_ioctl(ifp, cmd, data); 1892 splx(s); 1893 if (error == ENETRESET) 1894 error = ixl_init(ifp); 1895 } 1896 break; 1897 case SIOCADDMULTI: 1898 sa = ifreq_getaddr(SIOCADDMULTI, ifr); 1899 if (ether_addmulti(sa, &sc->sc_ec) == ENETRESET) { 1900 error = ether_multiaddr(sa, addrlo, addrhi); 1901 if (error != 0) 1902 return error; 1903 1904 error = ixl_add_multi(sc, addrlo, addrhi); 1905 if (error != 0 && error != ENETRESET) { 1906 ether_delmulti(sa, &sc->sc_ec); 1907 error = EIO; 1908 } 1909 } 1910 break; 1911 1912 case SIOCDELMULTI: 1913 sa = ifreq_getaddr(SIOCDELMULTI, ifr); 1914 if (ether_delmulti(sa, &sc->sc_ec) == ENETRESET) { 1915 error = ether_multiaddr(sa, addrlo, addrhi); 1916 if (error != 0) 1917 return error; 1918 1919 error = ixl_del_multi(sc, addrlo, addrhi); 1920 } 1921 break; 1922 1923 default: 1924 s = splnet(); 1925 error = ether_ioctl(ifp, cmd, data); 1926 splx(s); 1927 } 1928 1929 if (error == ENETRESET) 1930 error = ixl_iff(sc); 1931 1932 return error; 1933 } 1934 1935 static enum i40e_mac_type 1936 ixl_mactype(pci_product_id_t id) 1937 { 1938 1939 switch (id) { 1940 case PCI_PRODUCT_INTEL_XL710_SFP: 1941 case PCI_PRODUCT_INTEL_XL710_KX_B: 1942 case PCI_PRODUCT_INTEL_XL710_KX_C: 1943 case PCI_PRODUCT_INTEL_XL710_QSFP_A: 1944 case PCI_PRODUCT_INTEL_XL710_QSFP_B: 1945 case PCI_PRODUCT_INTEL_XL710_QSFP_C: 1946 case PCI_PRODUCT_INTEL_X710_10G_T: 1947 case PCI_PRODUCT_INTEL_XL710_20G_BP_1: 1948 case PCI_PRODUCT_INTEL_XL710_20G_BP_2: 1949 case PCI_PRODUCT_INTEL_X710_T4_10G: 1950 case PCI_PRODUCT_INTEL_XXV710_25G_BP: 1951 case PCI_PRODUCT_INTEL_XXV710_25G_SFP28: 1952 return I40E_MAC_XL710; 1953 1954 case PCI_PRODUCT_INTEL_X722_KX: 1955 case PCI_PRODUCT_INTEL_X722_QSFP: 1956 case PCI_PRODUCT_INTEL_X722_SFP: 1957 case PCI_PRODUCT_INTEL_X722_1G_BASET: 1958 case PCI_PRODUCT_INTEL_X722_10G_BASET: 1959 case PCI_PRODUCT_INTEL_X722_I_SFP: 1960 return I40E_MAC_X722; 1961 } 1962 1963 return I40E_MAC_GENERIC; 1964 } 1965 1966 static void 1967 ixl_pci_csr_setup(pci_chipset_tag_t pc, pcitag_t tag) 1968 { 1969 pcireg_t csr; 1970 1971 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG); 1972 csr |= (PCI_COMMAND_MASTER_ENABLE | 1973 PCI_COMMAND_MEM_ENABLE); 1974 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr); 1975 } 1976 1977 static inline void * 1978 ixl_hmc_kva(struct ixl_softc *sc, enum ixl_hmc_types type, unsigned int i) 1979 { 1980 uint8_t *kva = IXL_DMA_KVA(&sc->sc_hmc_pd); 1981 struct ixl_hmc_entry *e = &sc->sc_hmc_entries[type]; 1982 1983 if (i >= e->hmc_count) 1984 return NULL; 1985 1986 kva += e->hmc_base; 1987 kva += i * e->hmc_size; 1988 1989 return kva; 1990 } 1991 1992 static inline size_t 1993 ixl_hmc_len(struct ixl_softc *sc, enum ixl_hmc_types type) 1994 { 1995 struct ixl_hmc_entry *e = &sc->sc_hmc_entries[type]; 1996 1997 return e->hmc_size; 1998 } 1999 2000 static void 2001 ixl_enable_queue_intr(struct ixl_softc *sc, struct ixl_queue_pair *qp) 2002 { 2003 struct ixl_rx_ring *rxr = qp->qp_rxr; 2004 2005 ixl_wr(sc, I40E_PFINT_DYN_CTLN(rxr->rxr_qid), 2006 I40E_PFINT_DYN_CTLN_INTENA_MASK | 2007 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | 2008 (IXL_NOITR << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT)); 2009 ixl_flush(sc); 2010 } 2011 2012 static void 2013 ixl_disable_queue_intr(struct ixl_softc *sc, struct ixl_queue_pair *qp) 2014 { 2015 struct ixl_rx_ring *rxr = qp->qp_rxr; 2016 2017 ixl_wr(sc, I40E_PFINT_DYN_CTLN(rxr->rxr_qid), 2018 (IXL_NOITR << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT)); 2019 ixl_flush(sc); 2020 } 2021 2022 static void 2023 ixl_enable_other_intr(struct ixl_softc *sc) 2024 { 2025 2026 ixl_wr(sc, I40E_PFINT_DYN_CTL0, 2027 I40E_PFINT_DYN_CTL0_INTENA_MASK | 2028 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK | 2029 (IXL_NOITR << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT)); 2030 ixl_flush(sc); 2031 } 2032 2033 static void 2034 ixl_disable_other_intr(struct ixl_softc *sc) 2035 { 2036 2037 ixl_wr(sc, I40E_PFINT_DYN_CTL0, 2038 (IXL_NOITR << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT)); 2039 ixl_flush(sc); 2040 } 2041 2042 static int 2043 ixl_reinit(struct ixl_softc *sc) 2044 { 2045 struct ixl_rx_ring *rxr; 2046 struct ixl_tx_ring *txr; 2047 unsigned int i; 2048 uint32_t reg; 2049 2050 KASSERT(mutex_owned(&sc->sc_cfg_lock)); 2051 2052 if (ixl_get_vsi(sc) != 0) 2053 return EIO; 2054 2055 if (ixl_set_vsi(sc) != 0) 2056 return EIO; 2057 2058 for (i = 0; i < sc->sc_nqueue_pairs; i++) { 2059 txr = sc->sc_qps[i].qp_txr; 2060 rxr = sc->sc_qps[i].qp_rxr; 2061 2062 ixl_txr_config(sc, txr); 2063 ixl_rxr_config(sc, rxr); 2064 } 2065 2066 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd), 2067 0, IXL_DMA_LEN(&sc->sc_hmc_pd), BUS_DMASYNC_PREWRITE); 2068 2069 for (i = 0; i < sc->sc_nqueue_pairs; i++) { 2070 txr = sc->sc_qps[i].qp_txr; 2071 rxr = sc->sc_qps[i].qp_rxr; 2072 2073 ixl_wr(sc, I40E_QTX_CTL(i), I40E_QTX_CTL_PF_QUEUE | 2074 (sc->sc_pf_id << I40E_QTX_CTL_PF_INDX_SHIFT)); 2075 ixl_flush(sc); 2076 2077 ixl_wr(sc, txr->txr_tail, txr->txr_prod); 2078 ixl_wr(sc, rxr->rxr_tail, rxr->rxr_prod); 2079 2080 /* ixl_rxfill() needs lock held */ 2081 mutex_enter(&rxr->rxr_lock); 2082 ixl_rxfill(sc, rxr); 2083 mutex_exit(&rxr->rxr_lock); 2084 2085 reg = ixl_rd(sc, I40E_QRX_ENA(i)); 2086 SET(reg, I40E_QRX_ENA_QENA_REQ_MASK); 2087 ixl_wr(sc, I40E_QRX_ENA(i), reg); 2088 if (ixl_rxr_enabled(sc, rxr) != 0) 2089 goto stop; 2090 2091 ixl_txr_qdis(sc, txr, 1); 2092 2093 reg = ixl_rd(sc, I40E_QTX_ENA(i)); 2094 SET(reg, I40E_QTX_ENA_QENA_REQ_MASK); 2095 ixl_wr(sc, I40E_QTX_ENA(i), reg); 2096 2097 if (ixl_txr_enabled(sc, txr) != 0) 2098 goto stop; 2099 } 2100 2101 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd), 2102 0, IXL_DMA_LEN(&sc->sc_hmc_pd), BUS_DMASYNC_POSTWRITE); 2103 2104 return 0; 2105 2106 stop: 2107 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd), 2108 0, IXL_DMA_LEN(&sc->sc_hmc_pd), BUS_DMASYNC_POSTWRITE); 2109 2110 return ETIMEDOUT; 2111 } 2112 2113 static int 2114 ixl_init_locked(struct ixl_softc *sc) 2115 { 2116 struct ifnet *ifp = &sc->sc_ec.ec_if; 2117 unsigned int i; 2118 int error, eccap_change; 2119 2120 KASSERT(mutex_owned(&sc->sc_cfg_lock)); 2121 2122 if (ISSET(ifp->if_flags, IFF_RUNNING)) 2123 ixl_stop_locked(sc); 2124 2125 if (sc->sc_dead) { 2126 return ENXIO; 2127 } 2128 2129 eccap_change = sc->sc_ec.ec_capenable ^ sc->sc_cur_ec_capenable; 2130 if (ISSET(eccap_change, ETHERCAP_VLAN_HWTAGGING)) 2131 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWTAGGING; 2132 2133 if (ISSET(eccap_change, ETHERCAP_VLAN_HWFILTER)) { 2134 if (ixl_update_macvlan(sc) == 0) { 2135 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWFILTER; 2136 } else { 2137 CLR(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER); 2138 CLR(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER); 2139 } 2140 } 2141 2142 if (sc->sc_intrtype != PCI_INTR_TYPE_MSIX) 2143 sc->sc_nqueue_pairs = 1; 2144 else 2145 sc->sc_nqueue_pairs = sc->sc_nqueue_pairs_max; 2146 2147 error = ixl_reinit(sc); 2148 if (error) { 2149 ixl_stop_locked(sc); 2150 return error; 2151 } 2152 2153 SET(ifp->if_flags, IFF_RUNNING); 2154 CLR(ifp->if_flags, IFF_OACTIVE); 2155 2156 ixl_config_rss(sc); 2157 ixl_config_queue_intr(sc); 2158 2159 for (i = 0; i < sc->sc_nqueue_pairs; i++) { 2160 ixl_enable_queue_intr(sc, &sc->sc_qps[i]); 2161 } 2162 2163 error = ixl_iff(sc); 2164 if (error) { 2165 ixl_stop_locked(sc); 2166 return error; 2167 } 2168 2169 callout_schedule(&sc->sc_stats_callout, mstohz(sc->sc_stats_intval)); 2170 2171 return 0; 2172 } 2173 2174 static int 2175 ixl_init(struct ifnet *ifp) 2176 { 2177 struct ixl_softc *sc = ifp->if_softc; 2178 int error; 2179 2180 mutex_enter(&sc->sc_cfg_lock); 2181 error = ixl_init_locked(sc); 2182 mutex_exit(&sc->sc_cfg_lock); 2183 2184 if (error == 0) 2185 (void)ixl_get_link_status(sc); 2186 2187 return error; 2188 } 2189 2190 static int 2191 ixl_iff(struct ixl_softc *sc) 2192 { 2193 struct ifnet *ifp = &sc->sc_ec.ec_if; 2194 struct ixl_atq iatq; 2195 struct ixl_aq_desc *iaq; 2196 struct ixl_aq_vsi_promisc_param *param; 2197 uint16_t flag_add, flag_del; 2198 int error; 2199 2200 if (!ISSET(ifp->if_flags, IFF_RUNNING)) 2201 return 0; 2202 2203 memset(&iatq, 0, sizeof(iatq)); 2204 2205 iaq = &iatq.iatq_desc; 2206 iaq->iaq_opcode = htole16(IXL_AQ_OP_SET_VSI_PROMISC); 2207 2208 param = (struct ixl_aq_vsi_promisc_param *)&iaq->iaq_param; 2209 param->flags = htole16(0); 2210 2211 if (!ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER) 2212 || ISSET(ifp->if_flags, IFF_PROMISC)) { 2213 param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_BCAST | 2214 IXL_AQ_VSI_PROMISC_FLAG_VLAN); 2215 } 2216 2217 if (ISSET(ifp->if_flags, IFF_PROMISC)) { 2218 param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_UCAST | 2219 IXL_AQ_VSI_PROMISC_FLAG_MCAST); 2220 } else if (ISSET(ifp->if_flags, IFF_ALLMULTI)) { 2221 param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_MCAST); 2222 } 2223 param->valid_flags = htole16(IXL_AQ_VSI_PROMISC_FLAG_UCAST | 2224 IXL_AQ_VSI_PROMISC_FLAG_MCAST | IXL_AQ_VSI_PROMISC_FLAG_BCAST | 2225 IXL_AQ_VSI_PROMISC_FLAG_VLAN); 2226 param->seid = sc->sc_seid; 2227 2228 error = ixl_atq_exec(sc, &iatq); 2229 if (error) 2230 return error; 2231 2232 if (iaq->iaq_retval != htole16(IXL_AQ_RC_OK)) 2233 return EIO; 2234 2235 if (memcmp(sc->sc_enaddr, CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN) != 0) { 2236 if (ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER)) { 2237 flag_add = IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH; 2238 flag_del = IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH; 2239 } else { 2240 flag_add = IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN; 2241 flag_del = IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN; 2242 } 2243 2244 ixl_remove_macvlan(sc, sc->sc_enaddr, 0, flag_del); 2245 2246 memcpy(sc->sc_enaddr, CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN); 2247 ixl_add_macvlan(sc, sc->sc_enaddr, 0, flag_add); 2248 } 2249 return 0; 2250 } 2251 2252 static void 2253 ixl_stop_rendezvous(struct ixl_softc *sc) 2254 { 2255 struct ixl_tx_ring *txr; 2256 struct ixl_rx_ring *rxr; 2257 unsigned int i; 2258 2259 for (i = 0; i < sc->sc_nqueue_pairs; i++) { 2260 txr = sc->sc_qps[i].qp_txr; 2261 rxr = sc->sc_qps[i].qp_rxr; 2262 2263 mutex_enter(&txr->txr_lock); 2264 mutex_exit(&txr->txr_lock); 2265 2266 mutex_enter(&rxr->rxr_lock); 2267 mutex_exit(&rxr->rxr_lock); 2268 2269 sc->sc_qps[i].qp_workqueue = false; 2270 workqueue_wait(sc->sc_workq_txrx, 2271 &sc->sc_qps[i].qp_work); 2272 } 2273 } 2274 2275 static void 2276 ixl_stop_locked(struct ixl_softc *sc) 2277 { 2278 struct ifnet *ifp = &sc->sc_ec.ec_if; 2279 struct ixl_rx_ring *rxr; 2280 struct ixl_tx_ring *txr; 2281 unsigned int i; 2282 uint32_t reg; 2283 2284 KASSERT(mutex_owned(&sc->sc_cfg_lock)); 2285 2286 CLR(ifp->if_flags, IFF_RUNNING | IFF_OACTIVE); 2287 callout_stop(&sc->sc_stats_callout); 2288 2289 for (i = 0; i < sc->sc_nqueue_pairs; i++) { 2290 txr = sc->sc_qps[i].qp_txr; 2291 rxr = sc->sc_qps[i].qp_rxr; 2292 2293 ixl_disable_queue_intr(sc, &sc->sc_qps[i]); 2294 2295 mutex_enter(&txr->txr_lock); 2296 ixl_txr_qdis(sc, txr, 0); 2297 mutex_exit(&txr->txr_lock); 2298 } 2299 2300 /* XXX wait at least 400 usec for all tx queues in one go */ 2301 ixl_flush(sc); 2302 DELAY(500); 2303 2304 for (i = 0; i < sc->sc_nqueue_pairs; i++) { 2305 txr = sc->sc_qps[i].qp_txr; 2306 rxr = sc->sc_qps[i].qp_rxr; 2307 2308 mutex_enter(&txr->txr_lock); 2309 reg = ixl_rd(sc, I40E_QTX_ENA(i)); 2310 CLR(reg, I40E_QTX_ENA_QENA_REQ_MASK); 2311 ixl_wr(sc, I40E_QTX_ENA(i), reg); 2312 mutex_exit(&txr->txr_lock); 2313 2314 mutex_enter(&rxr->rxr_lock); 2315 reg = ixl_rd(sc, I40E_QRX_ENA(i)); 2316 CLR(reg, I40E_QRX_ENA_QENA_REQ_MASK); 2317 ixl_wr(sc, I40E_QRX_ENA(i), reg); 2318 mutex_exit(&rxr->rxr_lock); 2319 } 2320 2321 /* XXX short wait for all queue disables to settle */ 2322 ixl_flush(sc); 2323 DELAY(50); 2324 2325 for (i = 0; i < sc->sc_nqueue_pairs; i++) { 2326 txr = sc->sc_qps[i].qp_txr; 2327 rxr = sc->sc_qps[i].qp_rxr; 2328 2329 mutex_enter(&txr->txr_lock); 2330 if (ixl_txr_disabled(sc, txr) != 0) { 2331 mutex_exit(&txr->txr_lock); 2332 goto die; 2333 } 2334 mutex_exit(&txr->txr_lock); 2335 2336 mutex_enter(&rxr->rxr_lock); 2337 if (ixl_rxr_disabled(sc, rxr) != 0) { 2338 mutex_exit(&rxr->rxr_lock); 2339 goto die; 2340 } 2341 mutex_exit(&rxr->rxr_lock); 2342 } 2343 2344 ixl_stop_rendezvous(sc); 2345 2346 for (i = 0; i < sc->sc_nqueue_pairs; i++) { 2347 txr = sc->sc_qps[i].qp_txr; 2348 rxr = sc->sc_qps[i].qp_rxr; 2349 2350 mutex_enter(&txr->txr_lock); 2351 ixl_txr_unconfig(sc, txr); 2352 mutex_exit(&txr->txr_lock); 2353 2354 mutex_enter(&rxr->rxr_lock); 2355 ixl_rxr_unconfig(sc, rxr); 2356 mutex_exit(&rxr->rxr_lock); 2357 2358 ixl_txr_clean(sc, txr); 2359 ixl_rxr_clean(sc, rxr); 2360 } 2361 2362 return; 2363 die: 2364 sc->sc_dead = true; 2365 log(LOG_CRIT, "%s: failed to shut down rings", 2366 device_xname(sc->sc_dev)); 2367 return; 2368 } 2369 2370 static void 2371 ixl_stop(struct ifnet *ifp, int disable) 2372 { 2373 struct ixl_softc *sc = ifp->if_softc; 2374 2375 mutex_enter(&sc->sc_cfg_lock); 2376 ixl_stop_locked(sc); 2377 mutex_exit(&sc->sc_cfg_lock); 2378 } 2379 2380 static int 2381 ixl_queue_pairs_alloc(struct ixl_softc *sc) 2382 { 2383 struct ixl_queue_pair *qp; 2384 unsigned int i; 2385 size_t sz; 2386 2387 sz = sizeof(sc->sc_qps[0]) * sc->sc_nqueue_pairs_max; 2388 sc->sc_qps = kmem_zalloc(sz, KM_SLEEP); 2389 2390 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) { 2391 qp = &sc->sc_qps[i]; 2392 2393 qp->qp_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE, 2394 ixl_handle_queue, qp); 2395 if (qp->qp_si == NULL) 2396 goto free; 2397 2398 qp->qp_txr = ixl_txr_alloc(sc, i); 2399 if (qp->qp_txr == NULL) 2400 goto free; 2401 2402 qp->qp_rxr = ixl_rxr_alloc(sc, i); 2403 if (qp->qp_rxr == NULL) 2404 goto free; 2405 2406 qp->qp_sc = sc; 2407 snprintf(qp->qp_name, sizeof(qp->qp_name), 2408 "%s-TXRX%d", device_xname(sc->sc_dev), i); 2409 } 2410 2411 return 0; 2412 free: 2413 if (sc->sc_qps != NULL) { 2414 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) { 2415 qp = &sc->sc_qps[i]; 2416 2417 if (qp->qp_txr != NULL) 2418 ixl_txr_free(sc, qp->qp_txr); 2419 if (qp->qp_rxr != NULL) 2420 ixl_rxr_free(sc, qp->qp_rxr); 2421 if (qp->qp_si != NULL) 2422 softint_disestablish(qp->qp_si); 2423 } 2424 2425 sz = sizeof(sc->sc_qps[0]) * sc->sc_nqueue_pairs_max; 2426 kmem_free(sc->sc_qps, sz); 2427 sc->sc_qps = NULL; 2428 } 2429 2430 return -1; 2431 } 2432 2433 static void 2434 ixl_queue_pairs_free(struct ixl_softc *sc) 2435 { 2436 struct ixl_queue_pair *qp; 2437 unsigned int i; 2438 size_t sz; 2439 2440 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) { 2441 qp = &sc->sc_qps[i]; 2442 ixl_txr_free(sc, qp->qp_txr); 2443 ixl_rxr_free(sc, qp->qp_rxr); 2444 softint_disestablish(qp->qp_si); 2445 } 2446 2447 sz = sizeof(sc->sc_qps[0]) * sc->sc_nqueue_pairs_max; 2448 kmem_free(sc->sc_qps, sz); 2449 sc->sc_qps = NULL; 2450 } 2451 2452 static struct ixl_tx_ring * 2453 ixl_txr_alloc(struct ixl_softc *sc, unsigned int qid) 2454 { 2455 struct ixl_tx_ring *txr = NULL; 2456 struct ixl_tx_map *maps = NULL, *txm; 2457 unsigned int i; 2458 2459 txr = kmem_zalloc(sizeof(*txr), KM_SLEEP); 2460 maps = kmem_zalloc(sizeof(maps[0]) * sc->sc_tx_ring_ndescs, 2461 KM_SLEEP); 2462 2463 if (ixl_dmamem_alloc(sc, &txr->txr_mem, 2464 sizeof(struct ixl_tx_desc) * sc->sc_tx_ring_ndescs, 2465 IXL_TX_QUEUE_ALIGN) != 0) 2466 goto free; 2467 2468 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) { 2469 txm = &maps[i]; 2470 2471 if (bus_dmamap_create(sc->sc_dmat, IXL_TX_PKT_MAXSIZE, 2472 IXL_TX_PKT_DESCS, IXL_TX_PKT_MAXSIZE, 0, 2473 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &txm->txm_map) != 0) 2474 goto uncreate; 2475 2476 txm->txm_eop = -1; 2477 txm->txm_m = NULL; 2478 } 2479 2480 txr->txr_cons = txr->txr_prod = 0; 2481 txr->txr_maps = maps; 2482 2483 txr->txr_intrq = pcq_create(sc->sc_tx_ring_ndescs, KM_NOSLEEP); 2484 if (txr->txr_intrq == NULL) 2485 goto uncreate; 2486 2487 txr->txr_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE, 2488 ixl_deferred_transmit, txr); 2489 if (txr->txr_si == NULL) 2490 goto destroy_pcq; 2491 2492 txr->txr_tail = I40E_QTX_TAIL(qid); 2493 txr->txr_qid = qid; 2494 txr->txr_sc = sc; 2495 mutex_init(&txr->txr_lock, MUTEX_DEFAULT, IPL_NET); 2496 2497 return txr; 2498 2499 destroy_pcq: 2500 pcq_destroy(txr->txr_intrq); 2501 uncreate: 2502 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) { 2503 txm = &maps[i]; 2504 2505 if (txm->txm_map == NULL) 2506 continue; 2507 2508 bus_dmamap_destroy(sc->sc_dmat, txm->txm_map); 2509 } 2510 2511 ixl_dmamem_free(sc, &txr->txr_mem); 2512 free: 2513 kmem_free(maps, sizeof(maps[0]) * sc->sc_tx_ring_ndescs); 2514 kmem_free(txr, sizeof(*txr)); 2515 2516 return NULL; 2517 } 2518 2519 static void 2520 ixl_txr_qdis(struct ixl_softc *sc, struct ixl_tx_ring *txr, int enable) 2521 { 2522 unsigned int qid; 2523 bus_size_t reg; 2524 uint32_t r; 2525 2526 qid = txr->txr_qid + sc->sc_base_queue; 2527 reg = I40E_GLLAN_TXPRE_QDIS(qid / 128); 2528 qid %= 128; 2529 2530 r = ixl_rd(sc, reg); 2531 CLR(r, I40E_GLLAN_TXPRE_QDIS_QINDX_MASK); 2532 SET(r, qid << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT); 2533 SET(r, enable ? I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK : 2534 I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK); 2535 ixl_wr(sc, reg, r); 2536 } 2537 2538 static void 2539 ixl_txr_config(struct ixl_softc *sc, struct ixl_tx_ring *txr) 2540 { 2541 struct ixl_hmc_txq txq; 2542 struct ixl_aq_vsi_data *data = IXL_DMA_KVA(&sc->sc_scratch); 2543 void *hmc; 2544 2545 memset(&txq, 0, sizeof(txq)); 2546 txq.head = htole16(txr->txr_cons); 2547 txq.new_context = 1; 2548 txq.base = htole64(IXL_DMA_DVA(&txr->txr_mem) / IXL_HMC_TXQ_BASE_UNIT); 2549 txq.head_wb_ena = IXL_HMC_TXQ_DESC_WB; 2550 txq.qlen = htole16(sc->sc_tx_ring_ndescs); 2551 txq.tphrdesc_ena = 0; 2552 txq.tphrpacket_ena = 0; 2553 txq.tphwdesc_ena = 0; 2554 txq.rdylist = data->qs_handle[0]; 2555 2556 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_TX, txr->txr_qid); 2557 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_TX)); 2558 ixl_hmc_pack(hmc, &txq, ixl_hmc_pack_txq, 2559 __arraycount(ixl_hmc_pack_txq)); 2560 } 2561 2562 static void 2563 ixl_txr_unconfig(struct ixl_softc *sc, struct ixl_tx_ring *txr) 2564 { 2565 void *hmc; 2566 2567 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_TX, txr->txr_qid); 2568 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_TX)); 2569 txr->txr_cons = txr->txr_prod = 0; 2570 } 2571 2572 static void 2573 ixl_txr_clean(struct ixl_softc *sc, struct ixl_tx_ring *txr) 2574 { 2575 struct ixl_tx_map *maps, *txm; 2576 bus_dmamap_t map; 2577 unsigned int i; 2578 2579 maps = txr->txr_maps; 2580 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) { 2581 txm = &maps[i]; 2582 2583 if (txm->txm_m == NULL) 2584 continue; 2585 2586 map = txm->txm_map; 2587 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 2588 BUS_DMASYNC_POSTWRITE); 2589 bus_dmamap_unload(sc->sc_dmat, map); 2590 2591 m_freem(txm->txm_m); 2592 txm->txm_m = NULL; 2593 } 2594 } 2595 2596 static int 2597 ixl_txr_enabled(struct ixl_softc *sc, struct ixl_tx_ring *txr) 2598 { 2599 bus_size_t ena = I40E_QTX_ENA(txr->txr_qid); 2600 uint32_t reg; 2601 int i; 2602 2603 for (i = 0; i < 10; i++) { 2604 reg = ixl_rd(sc, ena); 2605 if (ISSET(reg, I40E_QTX_ENA_QENA_STAT_MASK)) 2606 return 0; 2607 2608 delaymsec(10); 2609 } 2610 2611 return ETIMEDOUT; 2612 } 2613 2614 static int 2615 ixl_txr_disabled(struct ixl_softc *sc, struct ixl_tx_ring *txr) 2616 { 2617 bus_size_t ena = I40E_QTX_ENA(txr->txr_qid); 2618 uint32_t reg; 2619 int i; 2620 2621 KASSERT(mutex_owned(&txr->txr_lock)); 2622 2623 for (i = 0; i < 10; i++) { 2624 reg = ixl_rd(sc, ena); 2625 if (ISSET(reg, I40E_QTX_ENA_QENA_STAT_MASK) == 0) 2626 return 0; 2627 2628 delaymsec(10); 2629 } 2630 2631 return ETIMEDOUT; 2632 } 2633 2634 static void 2635 ixl_txr_free(struct ixl_softc *sc, struct ixl_tx_ring *txr) 2636 { 2637 struct ixl_tx_map *maps, *txm; 2638 struct mbuf *m; 2639 unsigned int i; 2640 2641 softint_disestablish(txr->txr_si); 2642 while ((m = pcq_get(txr->txr_intrq)) != NULL) 2643 m_freem(m); 2644 pcq_destroy(txr->txr_intrq); 2645 2646 maps = txr->txr_maps; 2647 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) { 2648 txm = &maps[i]; 2649 2650 bus_dmamap_destroy(sc->sc_dmat, txm->txm_map); 2651 } 2652 2653 ixl_dmamem_free(sc, &txr->txr_mem); 2654 mutex_destroy(&txr->txr_lock); 2655 kmem_free(maps, sizeof(maps[0]) * sc->sc_tx_ring_ndescs); 2656 kmem_free(txr, sizeof(*txr)); 2657 } 2658 2659 static inline int 2660 ixl_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf **m0, 2661 struct ixl_tx_ring *txr) 2662 { 2663 struct mbuf *m; 2664 int error; 2665 2666 KASSERT(mutex_owned(&txr->txr_lock)); 2667 2668 m = *m0; 2669 2670 error = bus_dmamap_load_mbuf(dmat, map, m, 2671 BUS_DMA_STREAMING | BUS_DMA_WRITE | BUS_DMA_NOWAIT); 2672 if (error != EFBIG) 2673 return error; 2674 2675 m = m_defrag(m, M_DONTWAIT); 2676 if (m != NULL) { 2677 *m0 = m; 2678 txr->txr_defragged.ev_count++; 2679 2680 error = bus_dmamap_load_mbuf(dmat, map, m, 2681 BUS_DMA_STREAMING | BUS_DMA_WRITE | BUS_DMA_NOWAIT); 2682 } else { 2683 txr->txr_defrag_failed.ev_count++; 2684 error = ENOBUFS; 2685 } 2686 2687 return error; 2688 } 2689 2690 static inline int 2691 ixl_tx_setup_offloads(struct mbuf *m, uint64_t *cmd_txd) 2692 { 2693 struct ether_header *eh; 2694 size_t len; 2695 uint64_t cmd; 2696 2697 cmd = 0; 2698 2699 eh = mtod(m, struct ether_header *); 2700 switch (htons(eh->ether_type)) { 2701 case ETHERTYPE_IP: 2702 case ETHERTYPE_IPV6: 2703 len = ETHER_HDR_LEN; 2704 break; 2705 case ETHERTYPE_VLAN: 2706 len = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 2707 break; 2708 default: 2709 len = 0; 2710 } 2711 cmd |= ((len >> 1) << IXL_TX_DESC_MACLEN_SHIFT); 2712 2713 if (m->m_pkthdr.csum_flags & 2714 (M_CSUM_TSOv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) { 2715 cmd |= IXL_TX_DESC_CMD_IIPT_IPV4; 2716 } 2717 if (m->m_pkthdr.csum_flags & M_CSUM_IPv4) { 2718 cmd |= IXL_TX_DESC_CMD_IIPT_IPV4_CSUM; 2719 } 2720 2721 if (m->m_pkthdr.csum_flags & 2722 (M_CSUM_TSOv6 | M_CSUM_TCPv6 | M_CSUM_UDPv6)) { 2723 cmd |= IXL_TX_DESC_CMD_IIPT_IPV6; 2724 } 2725 2726 switch (cmd & IXL_TX_DESC_CMD_IIPT_MASK) { 2727 case IXL_TX_DESC_CMD_IIPT_IPV4: 2728 case IXL_TX_DESC_CMD_IIPT_IPV4_CSUM: 2729 len = M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data); 2730 break; 2731 case IXL_TX_DESC_CMD_IIPT_IPV6: 2732 len = M_CSUM_DATA_IPv6_IPHL(m->m_pkthdr.csum_data); 2733 break; 2734 default: 2735 len = 0; 2736 } 2737 cmd |= ((len >> 2) << IXL_TX_DESC_IPLEN_SHIFT); 2738 2739 if (m->m_pkthdr.csum_flags & 2740 (M_CSUM_TSOv4 | M_CSUM_TSOv6 | M_CSUM_TCPv4 | M_CSUM_TCPv6)) { 2741 len = sizeof(struct tcphdr); 2742 cmd |= IXL_TX_DESC_CMD_L4T_EOFT_TCP; 2743 } else if (m->m_pkthdr.csum_flags & (M_CSUM_UDPv4 | M_CSUM_UDPv6)) { 2744 len = sizeof(struct udphdr); 2745 cmd |= IXL_TX_DESC_CMD_L4T_EOFT_UDP; 2746 } else { 2747 len = 0; 2748 } 2749 cmd |= ((len >> 2) << IXL_TX_DESC_L4LEN_SHIFT); 2750 2751 *cmd_txd |= cmd; 2752 return 0; 2753 } 2754 2755 static void 2756 ixl_tx_common_locked(struct ifnet *ifp, struct ixl_tx_ring *txr, 2757 bool is_transmit) 2758 { 2759 struct ixl_softc *sc = ifp->if_softc; 2760 struct ixl_tx_desc *ring, *txd; 2761 struct ixl_tx_map *txm; 2762 bus_dmamap_t map; 2763 struct mbuf *m; 2764 uint64_t cmd, cmd_txd; 2765 unsigned int prod, free, last, i; 2766 unsigned int mask; 2767 int post = 0; 2768 2769 KASSERT(mutex_owned(&txr->txr_lock)); 2770 2771 if (!ISSET(ifp->if_flags, IFF_RUNNING) 2772 || (!is_transmit && ISSET(ifp->if_flags, IFF_OACTIVE))) { 2773 if (!is_transmit) 2774 IFQ_PURGE(&ifp->if_snd); 2775 return; 2776 } 2777 2778 prod = txr->txr_prod; 2779 free = txr->txr_cons; 2780 if (free <= prod) 2781 free += sc->sc_tx_ring_ndescs; 2782 free -= prod; 2783 2784 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem), 2785 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTWRITE); 2786 2787 ring = IXL_DMA_KVA(&txr->txr_mem); 2788 mask = sc->sc_tx_ring_ndescs - 1; 2789 last = prod; 2790 cmd = 0; 2791 txd = NULL; 2792 2793 for (;;) { 2794 if (free <= IXL_TX_PKT_DESCS) { 2795 if (!is_transmit) 2796 SET(ifp->if_flags, IFF_OACTIVE); 2797 break; 2798 } 2799 2800 if (is_transmit) 2801 m = pcq_get(txr->txr_intrq); 2802 else 2803 IFQ_DEQUEUE(&ifp->if_snd, m); 2804 2805 if (m == NULL) 2806 break; 2807 2808 txm = &txr->txr_maps[prod]; 2809 map = txm->txm_map; 2810 2811 if (ixl_load_mbuf(sc->sc_dmat, map, &m, txr) != 0) { 2812 if_statinc(ifp, if_oerrors); 2813 m_freem(m); 2814 continue; 2815 } 2816 2817 cmd_txd = 0; 2818 if (m->m_pkthdr.csum_flags & IXL_CSUM_ALL_OFFLOAD) { 2819 ixl_tx_setup_offloads(m, &cmd_txd); 2820 } 2821 2822 if (vlan_has_tag(m)) { 2823 cmd_txd |= (uint64_t)vlan_get_tag(m) << 2824 IXL_TX_DESC_L2TAG1_SHIFT; 2825 cmd_txd |= IXL_TX_DESC_CMD_IL2TAG1; 2826 } 2827 2828 bus_dmamap_sync(sc->sc_dmat, map, 0, 2829 map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2830 2831 for (i = 0; i < (unsigned int)map->dm_nsegs; i++) { 2832 txd = &ring[prod]; 2833 2834 cmd = (uint64_t)map->dm_segs[i].ds_len << 2835 IXL_TX_DESC_BSIZE_SHIFT; 2836 cmd |= IXL_TX_DESC_DTYPE_DATA | IXL_TX_DESC_CMD_ICRC; 2837 cmd |= cmd_txd; 2838 2839 txd->addr = htole64(map->dm_segs[i].ds_addr); 2840 txd->cmd = htole64(cmd); 2841 2842 last = prod; 2843 2844 prod++; 2845 prod &= mask; 2846 } 2847 cmd |= IXL_TX_DESC_CMD_EOP | IXL_TX_DESC_CMD_RS; 2848 txd->cmd = htole64(cmd); 2849 2850 txm->txm_m = m; 2851 txm->txm_eop = last; 2852 2853 bpf_mtap(ifp, m, BPF_D_OUT); 2854 2855 free -= i; 2856 post = 1; 2857 } 2858 2859 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem), 2860 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREWRITE); 2861 2862 if (post) { 2863 txr->txr_prod = prod; 2864 ixl_wr(sc, txr->txr_tail, prod); 2865 } 2866 } 2867 2868 static int 2869 ixl_txeof(struct ixl_softc *sc, struct ixl_tx_ring *txr, u_int txlimit) 2870 { 2871 struct ifnet *ifp = &sc->sc_ec.ec_if; 2872 struct ixl_tx_desc *ring, *txd; 2873 struct ixl_tx_map *txm; 2874 struct mbuf *m; 2875 bus_dmamap_t map; 2876 unsigned int cons, prod, last; 2877 unsigned int mask; 2878 uint64_t dtype; 2879 int done = 0, more = 0; 2880 2881 KASSERT(mutex_owned(&txr->txr_lock)); 2882 2883 prod = txr->txr_prod; 2884 cons = txr->txr_cons; 2885 2886 if (cons == prod) 2887 return 0; 2888 2889 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem), 2890 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTREAD); 2891 2892 ring = IXL_DMA_KVA(&txr->txr_mem); 2893 mask = sc->sc_tx_ring_ndescs - 1; 2894 2895 net_stat_ref_t nsr = IF_STAT_GETREF(ifp); 2896 2897 do { 2898 if (txlimit-- <= 0) { 2899 more = 1; 2900 break; 2901 } 2902 2903 txm = &txr->txr_maps[cons]; 2904 last = txm->txm_eop; 2905 txd = &ring[last]; 2906 2907 dtype = txd->cmd & htole64(IXL_TX_DESC_DTYPE_MASK); 2908 if (dtype != htole64(IXL_TX_DESC_DTYPE_DONE)) 2909 break; 2910 2911 map = txm->txm_map; 2912 2913 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 2914 BUS_DMASYNC_POSTWRITE); 2915 bus_dmamap_unload(sc->sc_dmat, map); 2916 2917 m = txm->txm_m; 2918 if (m != NULL) { 2919 if_statinc_ref(nsr, if_opackets); 2920 if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len); 2921 if (ISSET(m->m_flags, M_MCAST)) 2922 if_statinc_ref(nsr, if_omcasts); 2923 m_freem(m); 2924 } 2925 2926 txm->txm_m = NULL; 2927 txm->txm_eop = -1; 2928 2929 cons = last + 1; 2930 cons &= mask; 2931 done = 1; 2932 } while (cons != prod); 2933 2934 IF_STAT_PUTREF(ifp); 2935 2936 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem), 2937 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREREAD); 2938 2939 txr->txr_cons = cons; 2940 2941 if (done) { 2942 softint_schedule(txr->txr_si); 2943 if (txr->txr_qid == 0) { 2944 CLR(ifp->if_flags, IFF_OACTIVE); 2945 if_schedule_deferred_start(ifp); 2946 } 2947 } 2948 2949 return more; 2950 } 2951 2952 static void 2953 ixl_start(struct ifnet *ifp) 2954 { 2955 struct ixl_softc *sc; 2956 struct ixl_tx_ring *txr; 2957 2958 sc = ifp->if_softc; 2959 txr = sc->sc_qps[0].qp_txr; 2960 2961 mutex_enter(&txr->txr_lock); 2962 ixl_tx_common_locked(ifp, txr, false); 2963 mutex_exit(&txr->txr_lock); 2964 } 2965 2966 static inline unsigned int 2967 ixl_select_txqueue(struct ixl_softc *sc, struct mbuf *m) 2968 { 2969 u_int cpuid; 2970 2971 cpuid = cpu_index(curcpu()); 2972 2973 return (unsigned int)(cpuid % sc->sc_nqueue_pairs); 2974 } 2975 2976 static int 2977 ixl_transmit(struct ifnet *ifp, struct mbuf *m) 2978 { 2979 struct ixl_softc *sc; 2980 struct ixl_tx_ring *txr; 2981 unsigned int qid; 2982 2983 sc = ifp->if_softc; 2984 qid = ixl_select_txqueue(sc, m); 2985 2986 txr = sc->sc_qps[qid].qp_txr; 2987 2988 if (__predict_false(!pcq_put(txr->txr_intrq, m))) { 2989 mutex_enter(&txr->txr_lock); 2990 txr->txr_pcqdrop.ev_count++; 2991 mutex_exit(&txr->txr_lock); 2992 2993 m_freem(m); 2994 return ENOBUFS; 2995 } 2996 2997 if (mutex_tryenter(&txr->txr_lock)) { 2998 ixl_tx_common_locked(ifp, txr, true); 2999 mutex_exit(&txr->txr_lock); 3000 } else { 3001 kpreempt_disable(); 3002 softint_schedule(txr->txr_si); 3003 kpreempt_enable(); 3004 } 3005 3006 return 0; 3007 } 3008 3009 static void 3010 ixl_deferred_transmit(void *xtxr) 3011 { 3012 struct ixl_tx_ring *txr = xtxr; 3013 struct ixl_softc *sc = txr->txr_sc; 3014 struct ifnet *ifp = &sc->sc_ec.ec_if; 3015 3016 mutex_enter(&txr->txr_lock); 3017 txr->txr_transmitdef.ev_count++; 3018 if (pcq_peek(txr->txr_intrq) != NULL) 3019 ixl_tx_common_locked(ifp, txr, true); 3020 mutex_exit(&txr->txr_lock); 3021 } 3022 3023 static struct ixl_rx_ring * 3024 ixl_rxr_alloc(struct ixl_softc *sc, unsigned int qid) 3025 { 3026 struct ixl_rx_ring *rxr = NULL; 3027 struct ixl_rx_map *maps = NULL, *rxm; 3028 unsigned int i; 3029 3030 rxr = kmem_zalloc(sizeof(*rxr), KM_SLEEP); 3031 maps = kmem_zalloc(sizeof(maps[0]) * sc->sc_rx_ring_ndescs, 3032 KM_SLEEP); 3033 3034 if (ixl_dmamem_alloc(sc, &rxr->rxr_mem, 3035 sizeof(struct ixl_rx_rd_desc_32) * sc->sc_rx_ring_ndescs, 3036 IXL_RX_QUEUE_ALIGN) != 0) 3037 goto free; 3038 3039 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) { 3040 rxm = &maps[i]; 3041 3042 if (bus_dmamap_create(sc->sc_dmat, 3043 IXL_MCLBYTES, 1, IXL_MCLBYTES, 0, 3044 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &rxm->rxm_map) != 0) 3045 goto uncreate; 3046 3047 rxm->rxm_m = NULL; 3048 } 3049 3050 rxr->rxr_cons = rxr->rxr_prod = 0; 3051 rxr->rxr_m_head = NULL; 3052 rxr->rxr_m_tail = &rxr->rxr_m_head; 3053 rxr->rxr_maps = maps; 3054 3055 rxr->rxr_tail = I40E_QRX_TAIL(qid); 3056 rxr->rxr_qid = qid; 3057 mutex_init(&rxr->rxr_lock, MUTEX_DEFAULT, IPL_NET); 3058 3059 return rxr; 3060 3061 uncreate: 3062 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) { 3063 rxm = &maps[i]; 3064 3065 if (rxm->rxm_map == NULL) 3066 continue; 3067 3068 bus_dmamap_destroy(sc->sc_dmat, rxm->rxm_map); 3069 } 3070 3071 ixl_dmamem_free(sc, &rxr->rxr_mem); 3072 free: 3073 kmem_free(maps, sizeof(maps[0]) * sc->sc_rx_ring_ndescs); 3074 kmem_free(rxr, sizeof(*rxr)); 3075 3076 return NULL; 3077 } 3078 3079 static void 3080 ixl_rxr_clean(struct ixl_softc *sc, struct ixl_rx_ring *rxr) 3081 { 3082 struct ixl_rx_map *maps, *rxm; 3083 bus_dmamap_t map; 3084 unsigned int i; 3085 3086 maps = rxr->rxr_maps; 3087 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) { 3088 rxm = &maps[i]; 3089 3090 if (rxm->rxm_m == NULL) 3091 continue; 3092 3093 map = rxm->rxm_map; 3094 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 3095 BUS_DMASYNC_POSTWRITE); 3096 bus_dmamap_unload(sc->sc_dmat, map); 3097 3098 m_freem(rxm->rxm_m); 3099 rxm->rxm_m = NULL; 3100 } 3101 3102 m_freem(rxr->rxr_m_head); 3103 rxr->rxr_m_head = NULL; 3104 rxr->rxr_m_tail = &rxr->rxr_m_head; 3105 3106 rxr->rxr_prod = rxr->rxr_cons = 0; 3107 } 3108 3109 static int 3110 ixl_rxr_enabled(struct ixl_softc *sc, struct ixl_rx_ring *rxr) 3111 { 3112 bus_size_t ena = I40E_QRX_ENA(rxr->rxr_qid); 3113 uint32_t reg; 3114 int i; 3115 3116 for (i = 0; i < 10; i++) { 3117 reg = ixl_rd(sc, ena); 3118 if (ISSET(reg, I40E_QRX_ENA_QENA_STAT_MASK)) 3119 return 0; 3120 3121 delaymsec(10); 3122 } 3123 3124 return ETIMEDOUT; 3125 } 3126 3127 static int 3128 ixl_rxr_disabled(struct ixl_softc *sc, struct ixl_rx_ring *rxr) 3129 { 3130 bus_size_t ena = I40E_QRX_ENA(rxr->rxr_qid); 3131 uint32_t reg; 3132 int i; 3133 3134 KASSERT(mutex_owned(&rxr->rxr_lock)); 3135 3136 for (i = 0; i < 10; i++) { 3137 reg = ixl_rd(sc, ena); 3138 if (ISSET(reg, I40E_QRX_ENA_QENA_STAT_MASK) == 0) 3139 return 0; 3140 3141 delaymsec(10); 3142 } 3143 3144 return ETIMEDOUT; 3145 } 3146 3147 static void 3148 ixl_rxr_config(struct ixl_softc *sc, struct ixl_rx_ring *rxr) 3149 { 3150 struct ixl_hmc_rxq rxq; 3151 struct ifnet *ifp = &sc->sc_ec.ec_if; 3152 uint16_t rxmax; 3153 void *hmc; 3154 3155 memset(&rxq, 0, sizeof(rxq)); 3156 rxmax = ifp->if_mtu + IXL_MTU_ETHERLEN; 3157 3158 rxq.head = htole16(rxr->rxr_cons); 3159 rxq.base = htole64(IXL_DMA_DVA(&rxr->rxr_mem) / IXL_HMC_RXQ_BASE_UNIT); 3160 rxq.qlen = htole16(sc->sc_rx_ring_ndescs); 3161 rxq.dbuff = htole16(IXL_MCLBYTES / IXL_HMC_RXQ_DBUFF_UNIT); 3162 rxq.hbuff = 0; 3163 rxq.dtype = IXL_HMC_RXQ_DTYPE_NOSPLIT; 3164 rxq.dsize = IXL_HMC_RXQ_DSIZE_32; 3165 rxq.crcstrip = 1; 3166 rxq.l2sel = 1; 3167 rxq.showiv = 1; 3168 rxq.rxmax = htole16(rxmax); 3169 rxq.tphrdesc_ena = 0; 3170 rxq.tphwdesc_ena = 0; 3171 rxq.tphdata_ena = 0; 3172 rxq.tphhead_ena = 0; 3173 rxq.lrxqthresh = 0; 3174 rxq.prefena = 1; 3175 3176 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_RX, rxr->rxr_qid); 3177 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_RX)); 3178 ixl_hmc_pack(hmc, &rxq, ixl_hmc_pack_rxq, 3179 __arraycount(ixl_hmc_pack_rxq)); 3180 } 3181 3182 static void 3183 ixl_rxr_unconfig(struct ixl_softc *sc, struct ixl_rx_ring *rxr) 3184 { 3185 void *hmc; 3186 3187 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_RX, rxr->rxr_qid); 3188 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_RX)); 3189 rxr->rxr_cons = rxr->rxr_prod = 0; 3190 } 3191 3192 static void 3193 ixl_rxr_free(struct ixl_softc *sc, struct ixl_rx_ring *rxr) 3194 { 3195 struct ixl_rx_map *maps, *rxm; 3196 unsigned int i; 3197 3198 maps = rxr->rxr_maps; 3199 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) { 3200 rxm = &maps[i]; 3201 3202 bus_dmamap_destroy(sc->sc_dmat, rxm->rxm_map); 3203 } 3204 3205 ixl_dmamem_free(sc, &rxr->rxr_mem); 3206 mutex_destroy(&rxr->rxr_lock); 3207 kmem_free(maps, sizeof(maps[0]) * sc->sc_rx_ring_ndescs); 3208 kmem_free(rxr, sizeof(*rxr)); 3209 } 3210 3211 static inline void 3212 ixl_rx_csum(struct mbuf *m, uint64_t qword) 3213 { 3214 int flags_mask; 3215 3216 if (!ISSET(qword, IXL_RX_DESC_L3L4P)) { 3217 /* No L3 or L4 checksum was calculated */ 3218 return; 3219 } 3220 3221 switch (__SHIFTOUT(qword, IXL_RX_DESC_PTYPE_MASK)) { 3222 case IXL_RX_DESC_PTYPE_IPV4FRAG: 3223 case IXL_RX_DESC_PTYPE_IPV4: 3224 case IXL_RX_DESC_PTYPE_SCTPV4: 3225 case IXL_RX_DESC_PTYPE_ICMPV4: 3226 flags_mask = M_CSUM_IPv4 | M_CSUM_IPv4_BAD; 3227 break; 3228 case IXL_RX_DESC_PTYPE_TCPV4: 3229 flags_mask = M_CSUM_IPv4 | M_CSUM_IPv4_BAD; 3230 flags_mask |= M_CSUM_TCPv4 | M_CSUM_TCP_UDP_BAD; 3231 break; 3232 case IXL_RX_DESC_PTYPE_UDPV4: 3233 flags_mask = M_CSUM_IPv4 | M_CSUM_IPv4_BAD; 3234 flags_mask |= M_CSUM_UDPv4 | M_CSUM_TCP_UDP_BAD; 3235 break; 3236 case IXL_RX_DESC_PTYPE_TCPV6: 3237 flags_mask = M_CSUM_TCPv6 | M_CSUM_TCP_UDP_BAD; 3238 break; 3239 case IXL_RX_DESC_PTYPE_UDPV6: 3240 flags_mask = M_CSUM_UDPv6 | M_CSUM_TCP_UDP_BAD; 3241 break; 3242 default: 3243 flags_mask = 0; 3244 } 3245 3246 m->m_pkthdr.csum_flags |= (flags_mask & (M_CSUM_IPv4 | 3247 M_CSUM_TCPv4 | M_CSUM_TCPv6 | M_CSUM_UDPv4 | M_CSUM_UDPv6)); 3248 3249 if (ISSET(qword, IXL_RX_DESC_IPE)) { 3250 m->m_pkthdr.csum_flags |= (flags_mask & M_CSUM_IPv4_BAD); 3251 } 3252 3253 if (ISSET(qword, IXL_RX_DESC_L4E)) { 3254 m->m_pkthdr.csum_flags |= (flags_mask & M_CSUM_TCP_UDP_BAD); 3255 } 3256 } 3257 3258 static int 3259 ixl_rxeof(struct ixl_softc *sc, struct ixl_rx_ring *rxr, u_int rxlimit) 3260 { 3261 struct ifnet *ifp = &sc->sc_ec.ec_if; 3262 struct ixl_rx_wb_desc_32 *ring, *rxd; 3263 struct ixl_rx_map *rxm; 3264 bus_dmamap_t map; 3265 unsigned int cons, prod; 3266 struct mbuf *m; 3267 uint64_t word, word0; 3268 unsigned int len; 3269 unsigned int mask; 3270 int done = 0, more = 0; 3271 3272 KASSERT(mutex_owned(&rxr->rxr_lock)); 3273 3274 if (!ISSET(ifp->if_flags, IFF_RUNNING)) 3275 return 0; 3276 3277 prod = rxr->rxr_prod; 3278 cons = rxr->rxr_cons; 3279 3280 if (cons == prod) 3281 return 0; 3282 3283 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem), 3284 0, IXL_DMA_LEN(&rxr->rxr_mem), 3285 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 3286 3287 ring = IXL_DMA_KVA(&rxr->rxr_mem); 3288 mask = sc->sc_rx_ring_ndescs - 1; 3289 3290 net_stat_ref_t nsr = IF_STAT_GETREF(ifp); 3291 3292 do { 3293 if (rxlimit-- <= 0) { 3294 more = 1; 3295 break; 3296 } 3297 3298 rxd = &ring[cons]; 3299 3300 word = le64toh(rxd->qword1); 3301 3302 if (!ISSET(word, IXL_RX_DESC_DD)) 3303 break; 3304 3305 rxm = &rxr->rxr_maps[cons]; 3306 3307 map = rxm->rxm_map; 3308 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 3309 BUS_DMASYNC_POSTREAD); 3310 bus_dmamap_unload(sc->sc_dmat, map); 3311 3312 m = rxm->rxm_m; 3313 rxm->rxm_m = NULL; 3314 3315 KASSERT(m != NULL); 3316 3317 len = (word & IXL_RX_DESC_PLEN_MASK) >> IXL_RX_DESC_PLEN_SHIFT; 3318 m->m_len = len; 3319 m->m_pkthdr.len = 0; 3320 3321 m->m_next = NULL; 3322 *rxr->rxr_m_tail = m; 3323 rxr->rxr_m_tail = &m->m_next; 3324 3325 m = rxr->rxr_m_head; 3326 m->m_pkthdr.len += len; 3327 3328 if (ISSET(word, IXL_RX_DESC_EOP)) { 3329 word0 = le64toh(rxd->qword0); 3330 3331 if (ISSET(word, IXL_RX_DESC_L2TAG1P)) { 3332 vlan_set_tag(m, 3333 __SHIFTOUT(word0, IXL_RX_DESC_L2TAG1_MASK)); 3334 } 3335 3336 if ((ifp->if_capenable & IXL_IFCAP_RXCSUM) != 0) 3337 ixl_rx_csum(m, word); 3338 3339 if (!ISSET(word, 3340 IXL_RX_DESC_RXE | IXL_RX_DESC_OVERSIZE)) { 3341 m_set_rcvif(m, ifp); 3342 if_statinc_ref(nsr, if_ipackets); 3343 if_statadd_ref(nsr, if_ibytes, 3344 m->m_pkthdr.len); 3345 if_percpuq_enqueue(sc->sc_ipq, m); 3346 } else { 3347 if_statinc_ref(nsr, if_ierrors); 3348 m_freem(m); 3349 } 3350 3351 rxr->rxr_m_head = NULL; 3352 rxr->rxr_m_tail = &rxr->rxr_m_head; 3353 } 3354 3355 cons++; 3356 cons &= mask; 3357 3358 done = 1; 3359 } while (cons != prod); 3360 3361 if (done) { 3362 rxr->rxr_cons = cons; 3363 if (ixl_rxfill(sc, rxr) == -1) 3364 if_statinc_ref(nsr, if_iqdrops); 3365 } 3366 3367 IF_STAT_PUTREF(ifp); 3368 3369 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem), 3370 0, IXL_DMA_LEN(&rxr->rxr_mem), 3371 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 3372 3373 return more; 3374 } 3375 3376 static int 3377 ixl_rxfill(struct ixl_softc *sc, struct ixl_rx_ring *rxr) 3378 { 3379 struct ixl_rx_rd_desc_32 *ring, *rxd; 3380 struct ixl_rx_map *rxm; 3381 bus_dmamap_t map; 3382 struct mbuf *m; 3383 unsigned int prod; 3384 unsigned int slots; 3385 unsigned int mask; 3386 int post = 0, error = 0; 3387 3388 KASSERT(mutex_owned(&rxr->rxr_lock)); 3389 3390 prod = rxr->rxr_prod; 3391 slots = ixl_rxr_unrefreshed(rxr->rxr_prod, rxr->rxr_cons, 3392 sc->sc_rx_ring_ndescs); 3393 3394 ring = IXL_DMA_KVA(&rxr->rxr_mem); 3395 mask = sc->sc_rx_ring_ndescs - 1; 3396 3397 if (__predict_false(slots <= 0)) 3398 return -1; 3399 3400 do { 3401 rxm = &rxr->rxr_maps[prod]; 3402 3403 MGETHDR(m, M_DONTWAIT, MT_DATA); 3404 if (m == NULL) { 3405 rxr->rxr_mgethdr_failed.ev_count++; 3406 error = -1; 3407 break; 3408 } 3409 3410 MCLGET(m, M_DONTWAIT); 3411 if (!ISSET(m->m_flags, M_EXT)) { 3412 rxr->rxr_mgetcl_failed.ev_count++; 3413 error = -1; 3414 m_freem(m); 3415 break; 3416 } 3417 3418 m->m_len = m->m_pkthdr.len = MCLBYTES; 3419 m_adj(m, ETHER_ALIGN); 3420 3421 map = rxm->rxm_map; 3422 3423 if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 3424 BUS_DMA_READ | BUS_DMA_NOWAIT) != 0) { 3425 rxr->rxr_mbuf_load_failed.ev_count++; 3426 error = -1; 3427 m_freem(m); 3428 break; 3429 } 3430 3431 rxm->rxm_m = m; 3432 3433 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 3434 BUS_DMASYNC_PREREAD); 3435 3436 rxd = &ring[prod]; 3437 3438 rxd->paddr = htole64(map->dm_segs[0].ds_addr); 3439 rxd->haddr = htole64(0); 3440 3441 prod++; 3442 prod &= mask; 3443 3444 post = 1; 3445 3446 } while (--slots); 3447 3448 if (post) { 3449 rxr->rxr_prod = prod; 3450 ixl_wr(sc, rxr->rxr_tail, prod); 3451 } 3452 3453 return error; 3454 } 3455 3456 static inline int 3457 ixl_handle_queue_common(struct ixl_softc *sc, struct ixl_queue_pair *qp, 3458 u_int txlimit, struct evcnt *txevcnt, 3459 u_int rxlimit, struct evcnt *rxevcnt) 3460 { 3461 struct ixl_tx_ring *txr = qp->qp_txr; 3462 struct ixl_rx_ring *rxr = qp->qp_rxr; 3463 int txmore, rxmore; 3464 int rv; 3465 3466 mutex_enter(&txr->txr_lock); 3467 txevcnt->ev_count++; 3468 txmore = ixl_txeof(sc, txr, txlimit); 3469 mutex_exit(&txr->txr_lock); 3470 3471 mutex_enter(&rxr->rxr_lock); 3472 rxevcnt->ev_count++; 3473 rxmore = ixl_rxeof(sc, rxr, rxlimit); 3474 mutex_exit(&rxr->rxr_lock); 3475 3476 rv = txmore | (rxmore << 1); 3477 3478 return rv; 3479 } 3480 3481 static void 3482 ixl_sched_handle_queue(struct ixl_softc *sc, struct ixl_queue_pair *qp) 3483 { 3484 3485 if (qp->qp_workqueue) 3486 workqueue_enqueue(sc->sc_workq_txrx, &qp->qp_work, NULL); 3487 else 3488 softint_schedule(qp->qp_si); 3489 } 3490 3491 static int 3492 ixl_intr(void *xsc) 3493 { 3494 struct ixl_softc *sc = xsc; 3495 struct ixl_tx_ring *txr; 3496 struct ixl_rx_ring *rxr; 3497 uint32_t icr, rxintr, txintr; 3498 int rv = 0; 3499 unsigned int i; 3500 3501 KASSERT(sc != NULL); 3502 3503 ixl_enable_other_intr(sc); 3504 icr = ixl_rd(sc, I40E_PFINT_ICR0); 3505 3506 if (ISSET(icr, I40E_PFINT_ICR0_ADMINQ_MASK)) { 3507 atomic_inc_64(&sc->sc_event_atq.ev_count); 3508 ixl_atq_done(sc); 3509 ixl_work_add(sc->sc_workq, &sc->sc_arq_task); 3510 rv = 1; 3511 } 3512 3513 if (ISSET(icr, I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK)) { 3514 atomic_inc_64(&sc->sc_event_link.ev_count); 3515 ixl_work_add(sc->sc_workq, &sc->sc_link_state_task); 3516 rv = 1; 3517 } 3518 3519 rxintr = icr & I40E_INTR_NOTX_RX_MASK; 3520 txintr = icr & I40E_INTR_NOTX_TX_MASK; 3521 3522 if (txintr || rxintr) { 3523 for (i = 0; i < sc->sc_nqueue_pairs; i++) { 3524 txr = sc->sc_qps[i].qp_txr; 3525 rxr = sc->sc_qps[i].qp_rxr; 3526 3527 ixl_handle_queue_common(sc, &sc->sc_qps[i], 3528 IXL_TXRX_PROCESS_UNLIMIT, &txr->txr_intr, 3529 IXL_TXRX_PROCESS_UNLIMIT, &rxr->rxr_intr); 3530 } 3531 rv = 1; 3532 } 3533 3534 return rv; 3535 } 3536 3537 static int 3538 ixl_queue_intr(void *xqp) 3539 { 3540 struct ixl_queue_pair *qp = xqp; 3541 struct ixl_tx_ring *txr = qp->qp_txr; 3542 struct ixl_rx_ring *rxr = qp->qp_rxr; 3543 struct ixl_softc *sc = qp->qp_sc; 3544 u_int txlimit, rxlimit; 3545 int more; 3546 3547 txlimit = sc->sc_tx_intr_process_limit; 3548 rxlimit = sc->sc_rx_intr_process_limit; 3549 qp->qp_workqueue = sc->sc_txrx_workqueue; 3550 3551 more = ixl_handle_queue_common(sc, qp, 3552 txlimit, &txr->txr_intr, rxlimit, &rxr->rxr_intr); 3553 3554 if (more != 0) { 3555 ixl_sched_handle_queue(sc, qp); 3556 } else { 3557 /* for ALTQ */ 3558 if (txr->txr_qid == 0) 3559 if_schedule_deferred_start(&sc->sc_ec.ec_if); 3560 softint_schedule(txr->txr_si); 3561 3562 ixl_enable_queue_intr(sc, qp); 3563 } 3564 3565 return 1; 3566 } 3567 3568 static void 3569 ixl_handle_queue_wk(struct work *wk, void *xsc) 3570 { 3571 struct ixl_queue_pair *qp; 3572 3573 qp = container_of(wk, struct ixl_queue_pair, qp_work); 3574 ixl_handle_queue(qp); 3575 } 3576 3577 static void 3578 ixl_handle_queue(void *xqp) 3579 { 3580 struct ixl_queue_pair *qp = xqp; 3581 struct ixl_softc *sc = qp->qp_sc; 3582 struct ixl_tx_ring *txr = qp->qp_txr; 3583 struct ixl_rx_ring *rxr = qp->qp_rxr; 3584 u_int txlimit, rxlimit; 3585 int more; 3586 3587 txlimit = sc->sc_tx_process_limit; 3588 rxlimit = sc->sc_rx_process_limit; 3589 3590 more = ixl_handle_queue_common(sc, qp, 3591 txlimit, &txr->txr_defer, rxlimit, &rxr->rxr_defer); 3592 3593 if (more != 0) 3594 ixl_sched_handle_queue(sc, qp); 3595 else 3596 ixl_enable_queue_intr(sc, qp); 3597 } 3598 3599 static inline void 3600 ixl_print_hmc_error(struct ixl_softc *sc, uint32_t reg) 3601 { 3602 uint32_t hmc_idx, hmc_isvf; 3603 uint32_t hmc_errtype, hmc_objtype, hmc_data; 3604 3605 hmc_idx = reg & I40E_PFHMC_ERRORINFO_PMF_INDEX_MASK; 3606 hmc_idx = hmc_idx >> I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT; 3607 hmc_isvf = reg & I40E_PFHMC_ERRORINFO_PMF_ISVF_MASK; 3608 hmc_isvf = hmc_isvf >> I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT; 3609 hmc_errtype = reg & I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_MASK; 3610 hmc_errtype = hmc_errtype >> I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT; 3611 hmc_objtype = reg & I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_MASK; 3612 hmc_objtype = hmc_objtype >> I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT; 3613 hmc_data = ixl_rd(sc, I40E_PFHMC_ERRORDATA); 3614 3615 device_printf(sc->sc_dev, 3616 "HMC Error (idx=0x%x, isvf=0x%x, err=0x%x, obj=0x%x, data=0x%x)\n", 3617 hmc_idx, hmc_isvf, hmc_errtype, hmc_objtype, hmc_data); 3618 } 3619 3620 static int 3621 ixl_other_intr(void *xsc) 3622 { 3623 struct ixl_softc *sc = xsc; 3624 uint32_t icr, mask, reg; 3625 int rv; 3626 3627 icr = ixl_rd(sc, I40E_PFINT_ICR0); 3628 mask = ixl_rd(sc, I40E_PFINT_ICR0_ENA); 3629 3630 if (ISSET(icr, I40E_PFINT_ICR0_ADMINQ_MASK)) { 3631 atomic_inc_64(&sc->sc_event_atq.ev_count); 3632 ixl_atq_done(sc); 3633 ixl_work_add(sc->sc_workq, &sc->sc_arq_task); 3634 rv = 1; 3635 } 3636 3637 if (ISSET(icr, I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK)) { 3638 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG)) 3639 device_printf(sc->sc_dev, "link stat changed\n"); 3640 3641 atomic_inc_64(&sc->sc_event_link.ev_count); 3642 ixl_work_add(sc->sc_workq, &sc->sc_link_state_task); 3643 rv = 1; 3644 } 3645 3646 if (ISSET(icr, I40E_PFINT_ICR0_GRST_MASK)) { 3647 CLR(mask, I40E_PFINT_ICR0_ENA_GRST_MASK); 3648 reg = ixl_rd(sc, I40E_GLGEN_RSTAT); 3649 reg = reg & I40E_GLGEN_RSTAT_RESET_TYPE_MASK; 3650 reg = reg >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT; 3651 3652 device_printf(sc->sc_dev, "GRST: %s\n", 3653 reg == I40E_RESET_CORER ? "CORER" : 3654 reg == I40E_RESET_GLOBR ? "GLOBR" : 3655 reg == I40E_RESET_EMPR ? "EMPR" : 3656 "POR"); 3657 } 3658 3659 if (ISSET(icr, I40E_PFINT_ICR0_ECC_ERR_MASK)) 3660 atomic_inc_64(&sc->sc_event_ecc_err.ev_count); 3661 if (ISSET(icr, I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)) 3662 atomic_inc_64(&sc->sc_event_pci_exception.ev_count); 3663 if (ISSET(icr, I40E_PFINT_ICR0_PE_CRITERR_MASK)) 3664 atomic_inc_64(&sc->sc_event_crit_err.ev_count); 3665 3666 if (ISSET(icr, IXL_ICR0_CRIT_ERR_MASK)) { 3667 CLR(mask, IXL_ICR0_CRIT_ERR_MASK); 3668 device_printf(sc->sc_dev, "critical error\n"); 3669 } 3670 3671 if (ISSET(icr, I40E_PFINT_ICR0_HMC_ERR_MASK)) { 3672 reg = ixl_rd(sc, I40E_PFHMC_ERRORINFO); 3673 if (ISSET(reg, I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK)) 3674 ixl_print_hmc_error(sc, reg); 3675 ixl_wr(sc, I40E_PFHMC_ERRORINFO, 0); 3676 } 3677 3678 ixl_wr(sc, I40E_PFINT_ICR0_ENA, mask); 3679 ixl_flush(sc); 3680 ixl_enable_other_intr(sc); 3681 return rv; 3682 } 3683 3684 static void 3685 ixl_get_link_status_done(struct ixl_softc *sc, 3686 const struct ixl_aq_desc *iaq) 3687 { 3688 struct ixl_aq_desc iaq_buf; 3689 3690 memcpy(&iaq_buf, iaq, sizeof(iaq_buf)); 3691 3692 /* 3693 * The lock can be released here 3694 * because there is no post processing about ATQ 3695 */ 3696 mutex_exit(&sc->sc_atq_lock); 3697 ixl_link_state_update(sc, &iaq_buf); 3698 mutex_enter(&sc->sc_atq_lock); 3699 } 3700 3701 static void 3702 ixl_get_link_status(void *xsc) 3703 { 3704 struct ixl_softc *sc = xsc; 3705 struct ixl_aq_desc *iaq; 3706 struct ixl_aq_link_param *param; 3707 int error; 3708 3709 mutex_enter(&sc->sc_atq_lock); 3710 3711 iaq = &sc->sc_link_state_atq.iatq_desc; 3712 memset(iaq, 0, sizeof(*iaq)); 3713 iaq->iaq_opcode = htole16(IXL_AQ_OP_PHY_LINK_STATUS); 3714 param = (struct ixl_aq_link_param *)iaq->iaq_param; 3715 param->notify = IXL_AQ_LINK_NOTIFY; 3716 3717 error = ixl_atq_exec_locked(sc, &sc->sc_link_state_atq); 3718 ixl_atq_set(&sc->sc_link_state_atq, ixl_get_link_status_done); 3719 3720 if (error == 0) { 3721 ixl_get_link_status_done(sc, iaq); 3722 } 3723 3724 mutex_exit(&sc->sc_atq_lock); 3725 } 3726 3727 static void 3728 ixl_link_state_update(struct ixl_softc *sc, const struct ixl_aq_desc *iaq) 3729 { 3730 struct ifnet *ifp = &sc->sc_ec.ec_if; 3731 int link_state; 3732 3733 mutex_enter(&sc->sc_cfg_lock); 3734 link_state = ixl_set_link_status_locked(sc, iaq); 3735 mutex_exit(&sc->sc_cfg_lock); 3736 3737 if (ifp->if_link_state != link_state) 3738 if_link_state_change(ifp, link_state); 3739 3740 if (link_state != LINK_STATE_DOWN) { 3741 kpreempt_disable(); 3742 if_schedule_deferred_start(ifp); 3743 kpreempt_enable(); 3744 } 3745 } 3746 3747 static void 3748 ixl_aq_dump(const struct ixl_softc *sc, const struct ixl_aq_desc *iaq, 3749 const char *msg) 3750 { 3751 char buf[512]; 3752 size_t len; 3753 3754 len = sizeof(buf); 3755 buf[--len] = '\0'; 3756 3757 device_printf(sc->sc_dev, "%s\n", msg); 3758 snprintb(buf, len, IXL_AQ_FLAGS_FMT, le16toh(iaq->iaq_flags)); 3759 device_printf(sc->sc_dev, "flags %s opcode %04x\n", 3760 buf, le16toh(iaq->iaq_opcode)); 3761 device_printf(sc->sc_dev, "datalen %u retval %u\n", 3762 le16toh(iaq->iaq_datalen), le16toh(iaq->iaq_retval)); 3763 device_printf(sc->sc_dev, "cookie %016" PRIx64 "\n", iaq->iaq_cookie); 3764 device_printf(sc->sc_dev, "%08x %08x %08x %08x\n", 3765 le32toh(iaq->iaq_param[0]), le32toh(iaq->iaq_param[1]), 3766 le32toh(iaq->iaq_param[2]), le32toh(iaq->iaq_param[3])); 3767 } 3768 3769 static void 3770 ixl_arq(void *xsc) 3771 { 3772 struct ixl_softc *sc = xsc; 3773 struct ixl_aq_desc *arq, *iaq; 3774 struct ixl_aq_buf *aqb; 3775 unsigned int cons = sc->sc_arq_cons; 3776 unsigned int prod; 3777 int done = 0; 3778 3779 prod = ixl_rd(sc, sc->sc_aq_regs->arq_head) & 3780 sc->sc_aq_regs->arq_head_mask; 3781 3782 if (cons == prod) 3783 goto done; 3784 3785 arq = IXL_DMA_KVA(&sc->sc_arq); 3786 3787 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq), 3788 0, IXL_DMA_LEN(&sc->sc_arq), 3789 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 3790 3791 do { 3792 iaq = &arq[cons]; 3793 aqb = sc->sc_arq_live[cons]; 3794 3795 KASSERT(aqb != NULL); 3796 3797 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, IXL_AQ_BUFLEN, 3798 BUS_DMASYNC_POSTREAD); 3799 3800 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG)) 3801 ixl_aq_dump(sc, iaq, "arq event"); 3802 3803 switch (iaq->iaq_opcode) { 3804 case htole16(IXL_AQ_OP_PHY_LINK_STATUS): 3805 ixl_link_state_update(sc, iaq); 3806 break; 3807 } 3808 3809 memset(iaq, 0, sizeof(*iaq)); 3810 sc->sc_arq_live[cons] = NULL; 3811 SIMPLEQ_INSERT_TAIL(&sc->sc_arq_idle, aqb, aqb_entry); 3812 3813 cons++; 3814 cons &= IXL_AQ_MASK; 3815 3816 done = 1; 3817 } while (cons != prod); 3818 3819 if (done) { 3820 sc->sc_arq_cons = cons; 3821 ixl_arq_fill(sc); 3822 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq), 3823 0, IXL_DMA_LEN(&sc->sc_arq), 3824 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 3825 } 3826 3827 done: 3828 ixl_enable_other_intr(sc); 3829 } 3830 3831 static void 3832 ixl_atq_set(struct ixl_atq *iatq, 3833 void (*fn)(struct ixl_softc *, const struct ixl_aq_desc *)) 3834 { 3835 3836 iatq->iatq_fn = fn; 3837 } 3838 3839 static int 3840 ixl_atq_post_locked(struct ixl_softc *sc, struct ixl_atq *iatq) 3841 { 3842 struct ixl_aq_desc *atq, *slot; 3843 unsigned int prod, cons, prod_next; 3844 3845 /* assert locked */ 3846 KASSERT(mutex_owned(&sc->sc_atq_lock)); 3847 3848 atq = IXL_DMA_KVA(&sc->sc_atq); 3849 prod = sc->sc_atq_prod; 3850 cons = sc->sc_atq_cons; 3851 prod_next = (prod +1) & IXL_AQ_MASK; 3852 3853 if (cons == prod_next) 3854 return ENOMEM; 3855 3856 slot = &atq[prod]; 3857 3858 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 3859 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTWRITE); 3860 3861 KASSERT(iatq->iatq_fn != NULL); 3862 *slot = iatq->iatq_desc; 3863 slot->iaq_cookie = (uint64_t)((intptr_t)iatq); 3864 3865 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG)) 3866 ixl_aq_dump(sc, slot, "atq command"); 3867 3868 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 3869 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREWRITE); 3870 3871 sc->sc_atq_prod = prod_next; 3872 ixl_wr(sc, sc->sc_aq_regs->atq_tail, sc->sc_atq_prod); 3873 3874 return 0; 3875 } 3876 3877 static void 3878 ixl_atq_done_locked(struct ixl_softc *sc) 3879 { 3880 struct ixl_aq_desc *atq, *slot; 3881 struct ixl_atq *iatq; 3882 unsigned int cons; 3883 unsigned int prod; 3884 3885 KASSERT(mutex_owned(&sc->sc_atq_lock)); 3886 3887 prod = sc->sc_atq_prod; 3888 cons = sc->sc_atq_cons; 3889 3890 if (prod == cons) 3891 return; 3892 3893 atq = IXL_DMA_KVA(&sc->sc_atq); 3894 3895 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 3896 0, IXL_DMA_LEN(&sc->sc_atq), 3897 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 3898 3899 do { 3900 slot = &atq[cons]; 3901 if (!ISSET(slot->iaq_flags, htole16(IXL_AQ_DD))) 3902 break; 3903 3904 iatq = (struct ixl_atq *)((intptr_t)slot->iaq_cookie); 3905 iatq->iatq_desc = *slot; 3906 3907 memset(slot, 0, sizeof(*slot)); 3908 3909 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG)) 3910 ixl_aq_dump(sc, &iatq->iatq_desc, "atq response"); 3911 3912 (*iatq->iatq_fn)(sc, &iatq->iatq_desc); 3913 3914 cons++; 3915 cons &= IXL_AQ_MASK; 3916 } while (cons != prod); 3917 3918 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 3919 0, IXL_DMA_LEN(&sc->sc_atq), 3920 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 3921 3922 sc->sc_atq_cons = cons; 3923 } 3924 3925 static void 3926 ixl_atq_done(struct ixl_softc *sc) 3927 { 3928 3929 mutex_enter(&sc->sc_atq_lock); 3930 ixl_atq_done_locked(sc); 3931 mutex_exit(&sc->sc_atq_lock); 3932 } 3933 3934 static void 3935 ixl_wakeup(struct ixl_softc *sc, const struct ixl_aq_desc *iaq) 3936 { 3937 3938 KASSERT(mutex_owned(&sc->sc_atq_lock)); 3939 3940 cv_signal(&sc->sc_atq_cv); 3941 } 3942 3943 static int 3944 ixl_atq_exec(struct ixl_softc *sc, struct ixl_atq *iatq) 3945 { 3946 int error; 3947 3948 mutex_enter(&sc->sc_atq_lock); 3949 error = ixl_atq_exec_locked(sc, iatq); 3950 mutex_exit(&sc->sc_atq_lock); 3951 3952 return error; 3953 } 3954 3955 static int 3956 ixl_atq_exec_locked(struct ixl_softc *sc, struct ixl_atq *iatq) 3957 { 3958 int error; 3959 3960 KASSERT(mutex_owned(&sc->sc_atq_lock)); 3961 KASSERT(iatq->iatq_desc.iaq_cookie == 0); 3962 3963 ixl_atq_set(iatq, ixl_wakeup); 3964 3965 error = ixl_atq_post_locked(sc, iatq); 3966 if (error) 3967 return error; 3968 3969 error = cv_timedwait(&sc->sc_atq_cv, &sc->sc_atq_lock, 3970 IXL_ATQ_EXEC_TIMEOUT); 3971 3972 return error; 3973 } 3974 3975 static int 3976 ixl_atq_poll(struct ixl_softc *sc, struct ixl_aq_desc *iaq, unsigned int tm) 3977 { 3978 struct ixl_aq_desc *atq, *slot; 3979 unsigned int prod; 3980 unsigned int t = 0; 3981 3982 mutex_enter(&sc->sc_atq_lock); 3983 3984 atq = IXL_DMA_KVA(&sc->sc_atq); 3985 prod = sc->sc_atq_prod; 3986 slot = atq + prod; 3987 3988 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 3989 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTWRITE); 3990 3991 *slot = *iaq; 3992 slot->iaq_flags |= htole16(IXL_AQ_SI); 3993 3994 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 3995 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREWRITE); 3996 3997 prod++; 3998 prod &= IXL_AQ_MASK; 3999 sc->sc_atq_prod = prod; 4000 ixl_wr(sc, sc->sc_aq_regs->atq_tail, prod); 4001 4002 while (ixl_rd(sc, sc->sc_aq_regs->atq_head) != prod) { 4003 delaymsec(1); 4004 4005 if (t++ > tm) { 4006 mutex_exit(&sc->sc_atq_lock); 4007 return ETIMEDOUT; 4008 } 4009 } 4010 4011 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 4012 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTREAD); 4013 *iaq = *slot; 4014 memset(slot, 0, sizeof(*slot)); 4015 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 4016 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREREAD); 4017 4018 sc->sc_atq_cons = prod; 4019 4020 mutex_exit(&sc->sc_atq_lock); 4021 4022 return 0; 4023 } 4024 4025 static int 4026 ixl_get_version(struct ixl_softc *sc) 4027 { 4028 struct ixl_aq_desc iaq; 4029 uint32_t fwbuild, fwver, apiver; 4030 uint16_t api_maj_ver, api_min_ver; 4031 4032 memset(&iaq, 0, sizeof(iaq)); 4033 iaq.iaq_opcode = htole16(IXL_AQ_OP_GET_VERSION); 4034 4035 iaq.iaq_retval = le16toh(23); 4036 4037 if (ixl_atq_poll(sc, &iaq, 2000) != 0) 4038 return ETIMEDOUT; 4039 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) 4040 return EIO; 4041 4042 fwbuild = le32toh(iaq.iaq_param[1]); 4043 fwver = le32toh(iaq.iaq_param[2]); 4044 apiver = le32toh(iaq.iaq_param[3]); 4045 4046 api_maj_ver = (uint16_t)apiver; 4047 api_min_ver = (uint16_t)(apiver >> 16); 4048 4049 aprint_normal(", FW %hu.%hu.%05u API %hu.%hu", (uint16_t)fwver, 4050 (uint16_t)(fwver >> 16), fwbuild, api_maj_ver, api_min_ver); 4051 4052 if (sc->sc_mac_type == I40E_MAC_X722) { 4053 SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMLOCK | 4054 IXL_SC_AQ_FLAG_NVMREAD); 4055 SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RXCTL); 4056 SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RSS); 4057 } 4058 4059 #define IXL_API_VER(maj, min) (((uint32_t)(maj) << 16) | (min)) 4060 if (IXL_API_VER(api_maj_ver, api_min_ver) >= IXL_API_VER(1, 5)) { 4061 SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RXCTL); 4062 SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMLOCK); 4063 } 4064 #undef IXL_API_VER 4065 4066 return 0; 4067 } 4068 4069 static int 4070 ixl_get_nvm_version(struct ixl_softc *sc) 4071 { 4072 uint16_t nvmver, cfg_ptr, eetrack_hi, eetrack_lo, oem_hi, oem_lo; 4073 uint32_t eetrack, oem; 4074 uint16_t nvm_maj_ver, nvm_min_ver, oem_build; 4075 uint8_t oem_ver, oem_patch; 4076 4077 nvmver = cfg_ptr = eetrack_hi = eetrack_lo = oem_hi = oem_lo = 0; 4078 ixl_rd16_nvm(sc, I40E_SR_NVM_DEV_STARTER_VERSION, &nvmver); 4079 ixl_rd16_nvm(sc, I40E_SR_NVM_EETRACK_HI, &eetrack_hi); 4080 ixl_rd16_nvm(sc, I40E_SR_NVM_EETRACK_LO, &eetrack_lo); 4081 ixl_rd16_nvm(sc, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr); 4082 ixl_rd16_nvm(sc, cfg_ptr + I40E_NVM_OEM_VER_OFF, &oem_hi); 4083 ixl_rd16_nvm(sc, cfg_ptr + I40E_NVM_OEM_VER_OFF + 1, &oem_lo); 4084 4085 nvm_maj_ver = (uint16_t)__SHIFTOUT(nvmver, IXL_NVM_VERSION_HI_MASK); 4086 nvm_min_ver = (uint16_t)__SHIFTOUT(nvmver, IXL_NVM_VERSION_LO_MASK); 4087 eetrack = ((uint32_t)eetrack_hi << 16) | eetrack_lo; 4088 oem = ((uint32_t)oem_hi << 16) | oem_lo; 4089 oem_ver = __SHIFTOUT(oem, IXL_NVM_OEMVERSION_MASK); 4090 oem_build = __SHIFTOUT(oem, IXL_NVM_OEMBUILD_MASK); 4091 oem_patch = __SHIFTOUT(oem, IXL_NVM_OEMPATCH_MASK); 4092 4093 aprint_normal(" nvm %x.%02x etid %08x oem %d.%d.%d", 4094 nvm_maj_ver, nvm_min_ver, eetrack, 4095 oem_ver, oem_build, oem_patch); 4096 4097 return 0; 4098 } 4099 4100 static int 4101 ixl_pxe_clear(struct ixl_softc *sc) 4102 { 4103 struct ixl_aq_desc iaq; 4104 int rv; 4105 4106 memset(&iaq, 0, sizeof(iaq)); 4107 iaq.iaq_opcode = htole16(IXL_AQ_OP_CLEAR_PXE_MODE); 4108 iaq.iaq_param[0] = htole32(0x2); 4109 4110 rv = ixl_atq_poll(sc, &iaq, 250); 4111 4112 ixl_wr(sc, I40E_GLLAN_RCTL_0, 0x1); 4113 4114 if (rv != 0) 4115 return ETIMEDOUT; 4116 4117 switch (iaq.iaq_retval) { 4118 case htole16(IXL_AQ_RC_OK): 4119 case htole16(IXL_AQ_RC_EEXIST): 4120 break; 4121 default: 4122 return EIO; 4123 } 4124 4125 return 0; 4126 } 4127 4128 static int 4129 ixl_lldp_shut(struct ixl_softc *sc) 4130 { 4131 struct ixl_aq_desc iaq; 4132 4133 memset(&iaq, 0, sizeof(iaq)); 4134 iaq.iaq_opcode = htole16(IXL_AQ_OP_LLDP_STOP_AGENT); 4135 iaq.iaq_param[0] = htole32(IXL_LLDP_SHUTDOWN); 4136 4137 if (ixl_atq_poll(sc, &iaq, 250) != 0) { 4138 aprint_error_dev(sc->sc_dev, "STOP LLDP AGENT timeout\n"); 4139 return -1; 4140 } 4141 4142 switch (iaq.iaq_retval) { 4143 case htole16(IXL_AQ_RC_EMODE): 4144 case htole16(IXL_AQ_RC_EPERM): 4145 /* ignore silently */ 4146 default: 4147 break; 4148 } 4149 4150 return 0; 4151 } 4152 4153 static void 4154 ixl_parse_hw_capability(struct ixl_softc *sc, struct ixl_aq_capability *cap) 4155 { 4156 uint16_t id; 4157 uint32_t number, logical_id; 4158 4159 id = le16toh(cap->cap_id); 4160 number = le32toh(cap->number); 4161 logical_id = le32toh(cap->logical_id); 4162 4163 switch (id) { 4164 case IXL_AQ_CAP_RSS: 4165 sc->sc_rss_table_size = number; 4166 sc->sc_rss_table_entry_width = logical_id; 4167 break; 4168 case IXL_AQ_CAP_RXQ: 4169 case IXL_AQ_CAP_TXQ: 4170 sc->sc_nqueue_pairs_device = MIN(number, 4171 sc->sc_nqueue_pairs_device); 4172 break; 4173 } 4174 } 4175 4176 static int 4177 ixl_get_hw_capabilities(struct ixl_softc *sc) 4178 { 4179 struct ixl_dmamem idm; 4180 struct ixl_aq_desc iaq; 4181 struct ixl_aq_capability *caps; 4182 size_t i, ncaps; 4183 bus_size_t caps_size; 4184 uint16_t status; 4185 int rv; 4186 4187 caps_size = sizeof(caps[0]) * 40; 4188 memset(&iaq, 0, sizeof(iaq)); 4189 iaq.iaq_opcode = htole16(IXL_AQ_OP_LIST_FUNC_CAP); 4190 4191 do { 4192 if (ixl_dmamem_alloc(sc, &idm, caps_size, 0) != 0) { 4193 return -1; 4194 } 4195 4196 iaq.iaq_flags = htole16(IXL_AQ_BUF | 4197 (caps_size > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0)); 4198 iaq.iaq_datalen = htole16(caps_size); 4199 ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm)); 4200 4201 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, 4202 IXL_DMA_LEN(&idm), BUS_DMASYNC_PREREAD); 4203 4204 rv = ixl_atq_poll(sc, &iaq, 250); 4205 4206 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, 4207 IXL_DMA_LEN(&idm), BUS_DMASYNC_POSTREAD); 4208 4209 if (rv != 0) { 4210 aprint_error(", HW capabilities timeout\n"); 4211 goto done; 4212 } 4213 4214 status = le16toh(iaq.iaq_retval); 4215 4216 if (status == IXL_AQ_RC_ENOMEM) { 4217 caps_size = le16toh(iaq.iaq_datalen); 4218 ixl_dmamem_free(sc, &idm); 4219 } 4220 } while (status == IXL_AQ_RC_ENOMEM); 4221 4222 if (status != IXL_AQ_RC_OK) { 4223 aprint_error(", HW capabilities error\n"); 4224 goto done; 4225 } 4226 4227 caps = IXL_DMA_KVA(&idm); 4228 ncaps = le16toh(iaq.iaq_param[1]); 4229 4230 for (i = 0; i < ncaps; i++) { 4231 ixl_parse_hw_capability(sc, &caps[i]); 4232 } 4233 4234 done: 4235 ixl_dmamem_free(sc, &idm); 4236 return rv; 4237 } 4238 4239 static int 4240 ixl_get_mac(struct ixl_softc *sc) 4241 { 4242 struct ixl_dmamem idm; 4243 struct ixl_aq_desc iaq; 4244 struct ixl_aq_mac_addresses *addrs; 4245 int rv; 4246 4247 if (ixl_dmamem_alloc(sc, &idm, sizeof(*addrs), 0) != 0) { 4248 aprint_error(", unable to allocate mac addresses\n"); 4249 return -1; 4250 } 4251 4252 memset(&iaq, 0, sizeof(iaq)); 4253 iaq.iaq_flags = htole16(IXL_AQ_BUF); 4254 iaq.iaq_opcode = htole16(IXL_AQ_OP_MAC_ADDRESS_READ); 4255 iaq.iaq_datalen = htole16(sizeof(*addrs)); 4256 ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm)); 4257 4258 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm), 4259 BUS_DMASYNC_PREREAD); 4260 4261 rv = ixl_atq_poll(sc, &iaq, 250); 4262 4263 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm), 4264 BUS_DMASYNC_POSTREAD); 4265 4266 if (rv != 0) { 4267 aprint_error(", MAC ADDRESS READ timeout\n"); 4268 rv = -1; 4269 goto done; 4270 } 4271 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) { 4272 aprint_error(", MAC ADDRESS READ error\n"); 4273 rv = -1; 4274 goto done; 4275 } 4276 4277 addrs = IXL_DMA_KVA(&idm); 4278 if (!ISSET(iaq.iaq_param[0], htole32(IXL_AQ_MAC_PORT_VALID))) { 4279 printf(", port address is not valid\n"); 4280 goto done; 4281 } 4282 4283 memcpy(sc->sc_enaddr, addrs->port, ETHER_ADDR_LEN); 4284 rv = 0; 4285 4286 done: 4287 ixl_dmamem_free(sc, &idm); 4288 return rv; 4289 } 4290 4291 static int 4292 ixl_get_switch_config(struct ixl_softc *sc) 4293 { 4294 struct ixl_dmamem idm; 4295 struct ixl_aq_desc iaq; 4296 struct ixl_aq_switch_config *hdr; 4297 struct ixl_aq_switch_config_element *elms, *elm; 4298 unsigned int nelm, i; 4299 int rv; 4300 4301 if (ixl_dmamem_alloc(sc, &idm, IXL_AQ_BUFLEN, 0) != 0) { 4302 aprint_error_dev(sc->sc_dev, 4303 "unable to allocate switch config buffer\n"); 4304 return -1; 4305 } 4306 4307 memset(&iaq, 0, sizeof(iaq)); 4308 iaq.iaq_flags = htole16(IXL_AQ_BUF | 4309 (IXL_AQ_BUFLEN > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0)); 4310 iaq.iaq_opcode = htole16(IXL_AQ_OP_SWITCH_GET_CONFIG); 4311 iaq.iaq_datalen = htole16(IXL_AQ_BUFLEN); 4312 ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm)); 4313 4314 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm), 4315 BUS_DMASYNC_PREREAD); 4316 4317 rv = ixl_atq_poll(sc, &iaq, 250); 4318 4319 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm), 4320 BUS_DMASYNC_POSTREAD); 4321 4322 if (rv != 0) { 4323 aprint_error_dev(sc->sc_dev, "GET SWITCH CONFIG timeout\n"); 4324 rv = -1; 4325 goto done; 4326 } 4327 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) { 4328 aprint_error_dev(sc->sc_dev, "GET SWITCH CONFIG error\n"); 4329 rv = -1; 4330 goto done; 4331 } 4332 4333 hdr = IXL_DMA_KVA(&idm); 4334 elms = (struct ixl_aq_switch_config_element *)(hdr + 1); 4335 4336 nelm = le16toh(hdr->num_reported); 4337 if (nelm < 1) { 4338 aprint_error_dev(sc->sc_dev, "no switch config available\n"); 4339 rv = -1; 4340 goto done; 4341 } 4342 4343 for (i = 0; i < nelm; i++) { 4344 elm = &elms[i]; 4345 4346 aprint_debug_dev(sc->sc_dev, 4347 "type %x revision %u seid %04x\n", 4348 elm->type, elm->revision, le16toh(elm->seid)); 4349 aprint_debug_dev(sc->sc_dev, 4350 "uplink %04x downlink %04x\n", 4351 le16toh(elm->uplink_seid), 4352 le16toh(elm->downlink_seid)); 4353 aprint_debug_dev(sc->sc_dev, 4354 "conntype %x scheduler %04x extra %04x\n", 4355 elm->connection_type, 4356 le16toh(elm->scheduler_id), 4357 le16toh(elm->element_info)); 4358 } 4359 4360 elm = &elms[0]; 4361 4362 sc->sc_uplink_seid = elm->uplink_seid; 4363 sc->sc_downlink_seid = elm->downlink_seid; 4364 sc->sc_seid = elm->seid; 4365 4366 if ((sc->sc_uplink_seid == htole16(0)) != 4367 (sc->sc_downlink_seid == htole16(0))) { 4368 aprint_error_dev(sc->sc_dev, "SEIDs are misconfigured\n"); 4369 rv = -1; 4370 goto done; 4371 } 4372 4373 done: 4374 ixl_dmamem_free(sc, &idm); 4375 return rv; 4376 } 4377 4378 static int 4379 ixl_phy_mask_ints(struct ixl_softc *sc) 4380 { 4381 struct ixl_aq_desc iaq; 4382 4383 memset(&iaq, 0, sizeof(iaq)); 4384 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_SET_EVENT_MASK); 4385 iaq.iaq_param[2] = htole32(IXL_AQ_PHY_EV_MASK & 4386 ~(IXL_AQ_PHY_EV_LINK_UPDOWN | IXL_AQ_PHY_EV_MODULE_QUAL_FAIL | 4387 IXL_AQ_PHY_EV_MEDIA_NA)); 4388 4389 if (ixl_atq_poll(sc, &iaq, 250) != 0) { 4390 aprint_error_dev(sc->sc_dev, "SET PHY EVENT MASK timeout\n"); 4391 return -1; 4392 } 4393 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) { 4394 aprint_error_dev(sc->sc_dev, "SET PHY EVENT MASK error\n"); 4395 return -1; 4396 } 4397 4398 return 0; 4399 } 4400 4401 static int 4402 ixl_get_phy_abilities(struct ixl_softc *sc,struct ixl_dmamem *idm) 4403 { 4404 struct ixl_aq_desc iaq; 4405 int rv; 4406 4407 memset(&iaq, 0, sizeof(iaq)); 4408 iaq.iaq_flags = htole16(IXL_AQ_BUF | 4409 (IXL_DMA_LEN(idm) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0)); 4410 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_GET_ABILITIES); 4411 iaq.iaq_datalen = htole16(IXL_DMA_LEN(idm)); 4412 iaq.iaq_param[0] = htole32(IXL_AQ_PHY_REPORT_INIT); 4413 ixl_aq_dva(&iaq, IXL_DMA_DVA(idm)); 4414 4415 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm), 4416 BUS_DMASYNC_PREREAD); 4417 4418 rv = ixl_atq_poll(sc, &iaq, 250); 4419 4420 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm), 4421 BUS_DMASYNC_POSTREAD); 4422 4423 if (rv != 0) 4424 return -1; 4425 4426 return le16toh(iaq.iaq_retval); 4427 } 4428 4429 static int 4430 ixl_get_phy_info(struct ixl_softc *sc) 4431 { 4432 struct ixl_dmamem idm; 4433 struct ixl_aq_phy_abilities *phy; 4434 int rv; 4435 4436 if (ixl_dmamem_alloc(sc, &idm, IXL_AQ_BUFLEN, 0) != 0) { 4437 aprint_error_dev(sc->sc_dev, 4438 "unable to allocate phy abilities buffer\n"); 4439 return -1; 4440 } 4441 4442 rv = ixl_get_phy_abilities(sc, &idm); 4443 switch (rv) { 4444 case -1: 4445 aprint_error_dev(sc->sc_dev, "GET PHY ABILITIES timeout\n"); 4446 goto done; 4447 case IXL_AQ_RC_OK: 4448 break; 4449 case IXL_AQ_RC_EIO: 4450 aprint_error_dev(sc->sc_dev,"unable to query phy types\n"); 4451 goto done; 4452 default: 4453 aprint_error_dev(sc->sc_dev, 4454 "GET PHY ABILITIIES error %u\n", rv); 4455 goto done; 4456 } 4457 4458 phy = IXL_DMA_KVA(&idm); 4459 4460 sc->sc_phy_types = le32toh(phy->phy_type); 4461 sc->sc_phy_types |= (uint64_t)le32toh(phy->phy_type_ext) << 32; 4462 4463 sc->sc_phy_abilities = phy->abilities; 4464 sc->sc_phy_linkspeed = phy->link_speed; 4465 sc->sc_phy_fec_cfg = phy->fec_cfg_curr_mod_ext_info & 4466 (IXL_AQ_ENABLE_FEC_KR | IXL_AQ_ENABLE_FEC_RS | 4467 IXL_AQ_REQUEST_FEC_KR | IXL_AQ_REQUEST_FEC_RS); 4468 sc->sc_eee_cap = phy->eee_capability; 4469 sc->sc_eeer_val = phy->eeer_val; 4470 sc->sc_d3_lpan = phy->d3_lpan; 4471 4472 rv = 0; 4473 4474 done: 4475 ixl_dmamem_free(sc, &idm); 4476 return rv; 4477 } 4478 4479 static int 4480 ixl_set_phy_config(struct ixl_softc *sc, 4481 uint8_t link_speed, uint8_t abilities, bool polling) 4482 { 4483 struct ixl_aq_phy_param *param; 4484 struct ixl_atq iatq; 4485 struct ixl_aq_desc *iaq; 4486 int error; 4487 4488 memset(&iatq, 0, sizeof(iatq)); 4489 4490 iaq = &iatq.iatq_desc; 4491 iaq->iaq_opcode = htole16(IXL_AQ_OP_PHY_SET_CONFIG); 4492 param = (struct ixl_aq_phy_param *)&iaq->iaq_param; 4493 param->phy_types = htole32((uint32_t)sc->sc_phy_types); 4494 param->phy_type_ext = (uint8_t)(sc->sc_phy_types >> 32); 4495 param->link_speed = link_speed; 4496 param->abilities = abilities | IXL_AQ_PHY_ABILITY_AUTO_LINK; 4497 param->fec_cfg = sc->sc_phy_fec_cfg; 4498 param->eee_capability = sc->sc_eee_cap; 4499 param->eeer_val = sc->sc_eeer_val; 4500 param->d3_lpan = sc->sc_d3_lpan; 4501 4502 if (polling) 4503 error = ixl_atq_poll(sc, iaq, 250); 4504 else 4505 error = ixl_atq_exec(sc, &iatq); 4506 4507 if (error != 0) 4508 return error; 4509 4510 switch (le16toh(iaq->iaq_retval)) { 4511 case IXL_AQ_RC_OK: 4512 break; 4513 case IXL_AQ_RC_EPERM: 4514 return EPERM; 4515 default: 4516 return EIO; 4517 } 4518 4519 return 0; 4520 } 4521 4522 static int 4523 ixl_set_phy_autoselect(struct ixl_softc *sc) 4524 { 4525 uint8_t link_speed, abilities; 4526 4527 link_speed = sc->sc_phy_linkspeed; 4528 abilities = IXL_PHY_ABILITY_LINKUP | IXL_PHY_ABILITY_AUTONEGO; 4529 4530 return ixl_set_phy_config(sc, link_speed, abilities, true); 4531 } 4532 4533 static int 4534 ixl_get_link_status_poll(struct ixl_softc *sc, int *l) 4535 { 4536 struct ixl_aq_desc iaq; 4537 struct ixl_aq_link_param *param; 4538 int link; 4539 4540 memset(&iaq, 0, sizeof(iaq)); 4541 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_LINK_STATUS); 4542 param = (struct ixl_aq_link_param *)iaq.iaq_param; 4543 param->notify = IXL_AQ_LINK_NOTIFY; 4544 4545 if (ixl_atq_poll(sc, &iaq, 250) != 0) { 4546 return ETIMEDOUT; 4547 } 4548 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) { 4549 return EIO; 4550 } 4551 4552 /* It is unneccessary to hold lock */ 4553 link = ixl_set_link_status_locked(sc, &iaq); 4554 4555 if (l != NULL) 4556 *l = link; 4557 4558 return 0; 4559 } 4560 4561 static int 4562 ixl_get_vsi(struct ixl_softc *sc) 4563 { 4564 struct ixl_dmamem *vsi = &sc->sc_scratch; 4565 struct ixl_aq_desc iaq; 4566 struct ixl_aq_vsi_param *param; 4567 struct ixl_aq_vsi_reply *reply; 4568 struct ixl_aq_vsi_data *data; 4569 int rv; 4570 4571 /* grumble, vsi info isn't "known" at compile time */ 4572 4573 memset(&iaq, 0, sizeof(iaq)); 4574 iaq.iaq_flags = htole16(IXL_AQ_BUF | 4575 (IXL_DMA_LEN(vsi) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0)); 4576 iaq.iaq_opcode = htole16(IXL_AQ_OP_GET_VSI_PARAMS); 4577 iaq.iaq_datalen = htole16(IXL_DMA_LEN(vsi)); 4578 ixl_aq_dva(&iaq, IXL_DMA_DVA(vsi)); 4579 4580 param = (struct ixl_aq_vsi_param *)iaq.iaq_param; 4581 param->uplink_seid = sc->sc_seid; 4582 4583 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi), 4584 BUS_DMASYNC_PREREAD); 4585 4586 rv = ixl_atq_poll(sc, &iaq, 250); 4587 4588 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi), 4589 BUS_DMASYNC_POSTREAD); 4590 4591 if (rv != 0) { 4592 return ETIMEDOUT; 4593 } 4594 4595 switch (le16toh(iaq.iaq_retval)) { 4596 case IXL_AQ_RC_OK: 4597 break; 4598 case IXL_AQ_RC_ENOENT: 4599 return ENOENT; 4600 case IXL_AQ_RC_EACCES: 4601 return EACCES; 4602 default: 4603 return EIO; 4604 } 4605 4606 reply = (struct ixl_aq_vsi_reply *)iaq.iaq_param; 4607 sc->sc_vsi_number = le16toh(reply->vsi_number); 4608 data = IXL_DMA_KVA(vsi); 4609 sc->sc_vsi_stat_counter_idx = le16toh(data->stat_counter_idx); 4610 4611 return 0; 4612 } 4613 4614 static int 4615 ixl_set_vsi(struct ixl_softc *sc) 4616 { 4617 struct ixl_dmamem *vsi = &sc->sc_scratch; 4618 struct ixl_aq_desc iaq; 4619 struct ixl_aq_vsi_param *param; 4620 struct ixl_aq_vsi_data *data = IXL_DMA_KVA(vsi); 4621 unsigned int qnum; 4622 uint16_t val; 4623 int rv; 4624 4625 qnum = sc->sc_nqueue_pairs - 1; 4626 4627 data->valid_sections = htole16(IXL_AQ_VSI_VALID_QUEUE_MAP | 4628 IXL_AQ_VSI_VALID_VLAN); 4629 4630 CLR(data->mapping_flags, htole16(IXL_AQ_VSI_QUE_MAP_MASK)); 4631 SET(data->mapping_flags, htole16(IXL_AQ_VSI_QUE_MAP_CONTIG)); 4632 data->queue_mapping[0] = htole16(0); 4633 data->tc_mapping[0] = htole16((0 << IXL_AQ_VSI_TC_Q_OFFSET_SHIFT) | 4634 (qnum << IXL_AQ_VSI_TC_Q_NUMBER_SHIFT)); 4635 4636 val = le16toh(data->port_vlan_flags); 4637 CLR(val, IXL_AQ_VSI_PVLAN_MODE_MASK | IXL_AQ_VSI_PVLAN_EMOD_MASK); 4638 SET(val, IXL_AQ_VSI_PVLAN_MODE_ALL); 4639 4640 if (ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWTAGGING)) { 4641 SET(val, IXL_AQ_VSI_PVLAN_EMOD_STR_BOTH); 4642 } else { 4643 SET(val, IXL_AQ_VSI_PVLAN_EMOD_NOTHING); 4644 } 4645 4646 data->port_vlan_flags = htole16(val); 4647 4648 /* grumble, vsi info isn't "known" at compile time */ 4649 4650 memset(&iaq, 0, sizeof(iaq)); 4651 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD | 4652 (IXL_DMA_LEN(vsi) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0)); 4653 iaq.iaq_opcode = htole16(IXL_AQ_OP_UPD_VSI_PARAMS); 4654 iaq.iaq_datalen = htole16(IXL_DMA_LEN(vsi)); 4655 ixl_aq_dva(&iaq, IXL_DMA_DVA(vsi)); 4656 4657 param = (struct ixl_aq_vsi_param *)iaq.iaq_param; 4658 param->uplink_seid = sc->sc_seid; 4659 4660 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi), 4661 BUS_DMASYNC_PREWRITE); 4662 4663 rv = ixl_atq_poll(sc, &iaq, 250); 4664 4665 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi), 4666 BUS_DMASYNC_POSTWRITE); 4667 4668 if (rv != 0) { 4669 return ETIMEDOUT; 4670 } 4671 4672 switch (le16toh(iaq.iaq_retval)) { 4673 case IXL_AQ_RC_OK: 4674 break; 4675 case IXL_AQ_RC_ENOENT: 4676 return ENOENT; 4677 case IXL_AQ_RC_EACCES: 4678 return EACCES; 4679 default: 4680 return EIO; 4681 } 4682 4683 return 0; 4684 } 4685 4686 static void 4687 ixl_set_filter_control(struct ixl_softc *sc) 4688 { 4689 uint32_t reg; 4690 4691 reg = ixl_rd_rx_csr(sc, I40E_PFQF_CTL_0); 4692 4693 CLR(reg, I40E_PFQF_CTL_0_HASHLUTSIZE_MASK); 4694 SET(reg, I40E_HASH_LUT_SIZE_128 << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT); 4695 4696 SET(reg, I40E_PFQF_CTL_0_FD_ENA_MASK); 4697 SET(reg, I40E_PFQF_CTL_0_ETYPE_ENA_MASK); 4698 SET(reg, I40E_PFQF_CTL_0_MACVLAN_ENA_MASK); 4699 4700 ixl_wr_rx_csr(sc, I40E_PFQF_CTL_0, reg); 4701 } 4702 4703 static inline void 4704 ixl_get_default_rss_key(uint32_t *buf, size_t len) 4705 { 4706 size_t cplen; 4707 uint8_t rss_seed[RSS_KEYSIZE]; 4708 4709 rss_getkey(rss_seed); 4710 memset(buf, 0, len); 4711 4712 cplen = MIN(len, sizeof(rss_seed)); 4713 memcpy(buf, rss_seed, cplen); 4714 } 4715 4716 static int 4717 ixl_set_rss_key(struct ixl_softc *sc, uint8_t *key, size_t keylen) 4718 { 4719 struct ixl_dmamem *idm; 4720 struct ixl_atq iatq; 4721 struct ixl_aq_desc *iaq; 4722 struct ixl_aq_rss_key_param *param; 4723 struct ixl_aq_rss_key_data *data; 4724 size_t len, datalen, stdlen, extlen; 4725 uint16_t vsi_id; 4726 int rv; 4727 4728 memset(&iatq, 0, sizeof(iatq)); 4729 iaq = &iatq.iatq_desc; 4730 idm = &sc->sc_aqbuf; 4731 4732 datalen = sizeof(*data); 4733 4734 /*XXX The buf size has to be less than the size of the register */ 4735 datalen = MIN(IXL_RSS_KEY_SIZE_REG * sizeof(uint32_t), datalen); 4736 4737 iaq->iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD | 4738 (datalen > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0)); 4739 iaq->iaq_opcode = htole16(IXL_AQ_OP_RSS_SET_KEY); 4740 iaq->iaq_datalen = htole16(datalen); 4741 4742 param = (struct ixl_aq_rss_key_param *)iaq->iaq_param; 4743 vsi_id = (sc->sc_vsi_number << IXL_AQ_RSSKEY_VSI_ID_SHIFT) | 4744 IXL_AQ_RSSKEY_VSI_VALID; 4745 param->vsi_id = htole16(vsi_id); 4746 4747 memset(IXL_DMA_KVA(idm), 0, IXL_DMA_LEN(idm)); 4748 data = IXL_DMA_KVA(idm); 4749 4750 len = MIN(keylen, datalen); 4751 stdlen = MIN(sizeof(data->standard_rss_key), len); 4752 memcpy(data->standard_rss_key, key, stdlen); 4753 len = (len > stdlen) ? (len - stdlen) : 0; 4754 4755 extlen = MIN(sizeof(data->extended_hash_key), len); 4756 extlen = (stdlen < keylen) ? 0 : keylen - stdlen; 4757 memcpy(data->extended_hash_key, key + stdlen, extlen); 4758 4759 ixl_aq_dva(iaq, IXL_DMA_DVA(idm)); 4760 4761 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, 4762 IXL_DMA_LEN(idm), BUS_DMASYNC_PREWRITE); 4763 4764 rv = ixl_atq_exec(sc, &iatq); 4765 4766 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, 4767 IXL_DMA_LEN(idm), BUS_DMASYNC_POSTWRITE); 4768 4769 if (rv != 0) { 4770 return ETIMEDOUT; 4771 } 4772 4773 if (iaq->iaq_retval != htole16(IXL_AQ_RC_OK)) { 4774 return EIO; 4775 } 4776 4777 return 0; 4778 } 4779 4780 static int 4781 ixl_set_rss_lut(struct ixl_softc *sc, uint8_t *lut, size_t lutlen) 4782 { 4783 struct ixl_dmamem *idm; 4784 struct ixl_atq iatq; 4785 struct ixl_aq_desc *iaq; 4786 struct ixl_aq_rss_lut_param *param; 4787 uint16_t vsi_id; 4788 uint8_t *data; 4789 size_t dmalen; 4790 int rv; 4791 4792 memset(&iatq, 0, sizeof(iatq)); 4793 iaq = &iatq.iatq_desc; 4794 idm = &sc->sc_aqbuf; 4795 4796 dmalen = MIN(lutlen, IXL_DMA_LEN(idm)); 4797 4798 iaq->iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD | 4799 (dmalen > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0)); 4800 iaq->iaq_opcode = htole16(IXL_AQ_OP_RSS_SET_LUT); 4801 iaq->iaq_datalen = htole16(dmalen); 4802 4803 memset(IXL_DMA_KVA(idm), 0, IXL_DMA_LEN(idm)); 4804 data = IXL_DMA_KVA(idm); 4805 memcpy(data, lut, dmalen); 4806 ixl_aq_dva(iaq, IXL_DMA_DVA(idm)); 4807 4808 param = (struct ixl_aq_rss_lut_param *)iaq->iaq_param; 4809 vsi_id = (sc->sc_vsi_number << IXL_AQ_RSSLUT_VSI_ID_SHIFT) | 4810 IXL_AQ_RSSLUT_VSI_VALID; 4811 param->vsi_id = htole16(vsi_id); 4812 param->flags = htole16(IXL_AQ_RSSLUT_TABLE_TYPE_PF << 4813 IXL_AQ_RSSLUT_TABLE_TYPE_SHIFT); 4814 4815 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, 4816 IXL_DMA_LEN(idm), BUS_DMASYNC_PREWRITE); 4817 4818 rv = ixl_atq_exec(sc, &iatq); 4819 4820 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, 4821 IXL_DMA_LEN(idm), BUS_DMASYNC_POSTWRITE); 4822 4823 if (rv != 0) { 4824 return ETIMEDOUT; 4825 } 4826 4827 if (iaq->iaq_retval != htole16(IXL_AQ_RC_OK)) { 4828 return EIO; 4829 } 4830 4831 return 0; 4832 } 4833 4834 static int 4835 ixl_register_rss_key(struct ixl_softc *sc) 4836 { 4837 uint32_t rss_seed[IXL_RSS_KEY_SIZE_REG]; 4838 int rv; 4839 size_t i; 4840 4841 ixl_get_default_rss_key(rss_seed, sizeof(rss_seed)); 4842 4843 if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RSS)){ 4844 rv = ixl_set_rss_key(sc, (uint8_t*)rss_seed, 4845 sizeof(rss_seed)); 4846 } else { 4847 rv = 0; 4848 for (i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) { 4849 ixl_wr_rx_csr(sc, I40E_PFQF_HKEY(i), rss_seed[i]); 4850 } 4851 } 4852 4853 return rv; 4854 } 4855 4856 static void 4857 ixl_register_rss_pctype(struct ixl_softc *sc) 4858 { 4859 uint64_t set_hena = 0; 4860 uint32_t hena0, hena1; 4861 4862 /* 4863 * We use TCP/UDP with IPv4/IPv6 by default. 4864 * Note: the device can not use just IP header in each 4865 * TCP/UDP packets for the RSS hash calculation. 4866 */ 4867 if (sc->sc_mac_type == I40E_MAC_X722) 4868 set_hena = IXL_RSS_HENA_DEFAULT_X722; 4869 else 4870 set_hena = IXL_RSS_HENA_DEFAULT_XL710; 4871 4872 hena0 = ixl_rd_rx_csr(sc, I40E_PFQF_HENA(0)); 4873 hena1 = ixl_rd_rx_csr(sc, I40E_PFQF_HENA(1)); 4874 4875 SET(hena0, set_hena); 4876 SET(hena1, set_hena >> 32); 4877 4878 ixl_wr_rx_csr(sc, I40E_PFQF_HENA(0), hena0); 4879 ixl_wr_rx_csr(sc, I40E_PFQF_HENA(1), hena1); 4880 } 4881 4882 static int 4883 ixl_register_rss_hlut(struct ixl_softc *sc) 4884 { 4885 unsigned int qid; 4886 uint8_t hlut_buf[512], lut_mask; 4887 uint32_t *hluts; 4888 size_t i, hluts_num; 4889 int rv; 4890 4891 lut_mask = (0x01 << sc->sc_rss_table_entry_width) - 1; 4892 4893 for (i = 0; i < sc->sc_rss_table_size; i++) { 4894 qid = i % sc->sc_nqueue_pairs; 4895 hlut_buf[i] = qid & lut_mask; 4896 } 4897 4898 if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RSS)) { 4899 rv = ixl_set_rss_lut(sc, hlut_buf, sizeof(hlut_buf)); 4900 } else { 4901 rv = 0; 4902 hluts = (uint32_t *)hlut_buf; 4903 hluts_num = sc->sc_rss_table_size >> 2; 4904 for (i = 0; i < hluts_num; i++) { 4905 ixl_wr(sc, I40E_PFQF_HLUT(i), hluts[i]); 4906 } 4907 ixl_flush(sc); 4908 } 4909 4910 return rv; 4911 } 4912 4913 static void 4914 ixl_config_rss(struct ixl_softc *sc) 4915 { 4916 4917 KASSERT(mutex_owned(&sc->sc_cfg_lock)); 4918 4919 ixl_register_rss_key(sc); 4920 ixl_register_rss_pctype(sc); 4921 ixl_register_rss_hlut(sc); 4922 } 4923 4924 static const struct ixl_phy_type * 4925 ixl_search_phy_type(uint8_t phy_type) 4926 { 4927 const struct ixl_phy_type *itype; 4928 uint64_t mask; 4929 unsigned int i; 4930 4931 if (phy_type >= 64) 4932 return NULL; 4933 4934 mask = 1ULL << phy_type; 4935 4936 for (i = 0; i < __arraycount(ixl_phy_type_map); i++) { 4937 itype = &ixl_phy_type_map[i]; 4938 4939 if (ISSET(itype->phy_type, mask)) 4940 return itype; 4941 } 4942 4943 return NULL; 4944 } 4945 4946 static uint64_t 4947 ixl_search_link_speed(uint8_t link_speed) 4948 { 4949 const struct ixl_speed_type *type; 4950 unsigned int i; 4951 4952 for (i = 0; i < __arraycount(ixl_speed_type_map); i++) { 4953 type = &ixl_speed_type_map[i]; 4954 4955 if (ISSET(type->dev_speed, link_speed)) 4956 return type->net_speed; 4957 } 4958 4959 return 0; 4960 } 4961 4962 static uint8_t 4963 ixl_search_baudrate(uint64_t baudrate) 4964 { 4965 const struct ixl_speed_type *type; 4966 unsigned int i; 4967 4968 for (i = 0; i < __arraycount(ixl_speed_type_map); i++) { 4969 type = &ixl_speed_type_map[i]; 4970 4971 if (type->net_speed == baudrate) { 4972 return type->dev_speed; 4973 } 4974 } 4975 4976 return 0; 4977 } 4978 4979 static int 4980 ixl_restart_an(struct ixl_softc *sc) 4981 { 4982 struct ixl_aq_desc iaq; 4983 4984 memset(&iaq, 0, sizeof(iaq)); 4985 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_RESTART_AN); 4986 iaq.iaq_param[0] = 4987 htole32(IXL_AQ_PHY_RESTART_AN | IXL_AQ_PHY_LINK_ENABLE); 4988 4989 if (ixl_atq_poll(sc, &iaq, 250) != 0) { 4990 aprint_error_dev(sc->sc_dev, "RESTART AN timeout\n"); 4991 return -1; 4992 } 4993 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) { 4994 aprint_error_dev(sc->sc_dev, "RESTART AN error\n"); 4995 return -1; 4996 } 4997 4998 return 0; 4999 } 5000 5001 static int 5002 ixl_add_macvlan(struct ixl_softc *sc, const uint8_t *macaddr, 5003 uint16_t vlan, uint16_t flags) 5004 { 5005 struct ixl_aq_desc iaq; 5006 struct ixl_aq_add_macvlan *param; 5007 struct ixl_aq_add_macvlan_elem *elem; 5008 5009 memset(&iaq, 0, sizeof(iaq)); 5010 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD); 5011 iaq.iaq_opcode = htole16(IXL_AQ_OP_ADD_MACVLAN); 5012 iaq.iaq_datalen = htole16(sizeof(*elem)); 5013 ixl_aq_dva(&iaq, IXL_DMA_DVA(&sc->sc_scratch)); 5014 5015 param = (struct ixl_aq_add_macvlan *)&iaq.iaq_param; 5016 param->num_addrs = htole16(1); 5017 param->seid0 = htole16(0x8000) | sc->sc_seid; 5018 param->seid1 = 0; 5019 param->seid2 = 0; 5020 5021 elem = IXL_DMA_KVA(&sc->sc_scratch); 5022 memset(elem, 0, sizeof(*elem)); 5023 memcpy(elem->macaddr, macaddr, ETHER_ADDR_LEN); 5024 elem->flags = htole16(IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH | flags); 5025 elem->vlan = htole16(vlan); 5026 5027 if (ixl_atq_poll(sc, &iaq, 250) != 0) { 5028 return IXL_AQ_RC_EINVAL; 5029 } 5030 5031 switch (le16toh(iaq.iaq_retval)) { 5032 case IXL_AQ_RC_OK: 5033 break; 5034 case IXL_AQ_RC_ENOSPC: 5035 return ENOSPC; 5036 case IXL_AQ_RC_ENOENT: 5037 return ENOENT; 5038 case IXL_AQ_RC_EACCES: 5039 return EACCES; 5040 case IXL_AQ_RC_EEXIST: 5041 return EEXIST; 5042 case IXL_AQ_RC_EINVAL: 5043 return EINVAL; 5044 default: 5045 return EIO; 5046 } 5047 5048 return 0; 5049 } 5050 5051 static int 5052 ixl_remove_macvlan(struct ixl_softc *sc, const uint8_t *macaddr, 5053 uint16_t vlan, uint16_t flags) 5054 { 5055 struct ixl_aq_desc iaq; 5056 struct ixl_aq_remove_macvlan *param; 5057 struct ixl_aq_remove_macvlan_elem *elem; 5058 5059 memset(&iaq, 0, sizeof(iaq)); 5060 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD); 5061 iaq.iaq_opcode = htole16(IXL_AQ_OP_REMOVE_MACVLAN); 5062 iaq.iaq_datalen = htole16(sizeof(*elem)); 5063 ixl_aq_dva(&iaq, IXL_DMA_DVA(&sc->sc_scratch)); 5064 5065 param = (struct ixl_aq_remove_macvlan *)&iaq.iaq_param; 5066 param->num_addrs = htole16(1); 5067 param->seid0 = htole16(0x8000) | sc->sc_seid; 5068 param->seid1 = 0; 5069 param->seid2 = 0; 5070 5071 elem = IXL_DMA_KVA(&sc->sc_scratch); 5072 memset(elem, 0, sizeof(*elem)); 5073 memcpy(elem->macaddr, macaddr, ETHER_ADDR_LEN); 5074 elem->flags = htole16(IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH | flags); 5075 elem->vlan = htole16(vlan); 5076 5077 if (ixl_atq_poll(sc, &iaq, 250) != 0) { 5078 return EINVAL; 5079 } 5080 5081 switch (le16toh(iaq.iaq_retval)) { 5082 case IXL_AQ_RC_OK: 5083 break; 5084 case IXL_AQ_RC_ENOENT: 5085 return ENOENT; 5086 case IXL_AQ_RC_EACCES: 5087 return EACCES; 5088 case IXL_AQ_RC_EINVAL: 5089 return EINVAL; 5090 default: 5091 return EIO; 5092 } 5093 5094 return 0; 5095 } 5096 5097 static int 5098 ixl_hmc(struct ixl_softc *sc) 5099 { 5100 struct { 5101 uint32_t count; 5102 uint32_t minsize; 5103 bus_size_t objsiz; 5104 bus_size_t setoff; 5105 bus_size_t setcnt; 5106 } regs[] = { 5107 { 5108 0, 5109 IXL_HMC_TXQ_MINSIZE, 5110 I40E_GLHMC_LANTXOBJSZ, 5111 I40E_GLHMC_LANTXBASE(sc->sc_pf_id), 5112 I40E_GLHMC_LANTXCNT(sc->sc_pf_id), 5113 }, 5114 { 5115 0, 5116 IXL_HMC_RXQ_MINSIZE, 5117 I40E_GLHMC_LANRXOBJSZ, 5118 I40E_GLHMC_LANRXBASE(sc->sc_pf_id), 5119 I40E_GLHMC_LANRXCNT(sc->sc_pf_id), 5120 }, 5121 { 5122 0, 5123 0, 5124 I40E_GLHMC_FCOEDDPOBJSZ, 5125 I40E_GLHMC_FCOEDDPBASE(sc->sc_pf_id), 5126 I40E_GLHMC_FCOEDDPCNT(sc->sc_pf_id), 5127 }, 5128 { 5129 0, 5130 0, 5131 I40E_GLHMC_FCOEFOBJSZ, 5132 I40E_GLHMC_FCOEFBASE(sc->sc_pf_id), 5133 I40E_GLHMC_FCOEFCNT(sc->sc_pf_id), 5134 }, 5135 }; 5136 struct ixl_hmc_entry *e; 5137 uint64_t size, dva; 5138 uint8_t *kva; 5139 uint64_t *sdpage; 5140 unsigned int i; 5141 int npages, tables; 5142 uint32_t reg; 5143 5144 CTASSERT(__arraycount(regs) <= __arraycount(sc->sc_hmc_entries)); 5145 5146 regs[IXL_HMC_LAN_TX].count = regs[IXL_HMC_LAN_RX].count = 5147 ixl_rd(sc, I40E_GLHMC_LANQMAX); 5148 5149 size = 0; 5150 for (i = 0; i < __arraycount(regs); i++) { 5151 e = &sc->sc_hmc_entries[i]; 5152 5153 e->hmc_count = regs[i].count; 5154 reg = ixl_rd(sc, regs[i].objsiz); 5155 e->hmc_size = BIT_ULL(0x3F & reg); 5156 e->hmc_base = size; 5157 5158 if ((e->hmc_size * 8) < regs[i].minsize) { 5159 aprint_error_dev(sc->sc_dev, 5160 "kernel hmc entry is too big\n"); 5161 return -1; 5162 } 5163 5164 size += roundup(e->hmc_size * e->hmc_count, IXL_HMC_ROUNDUP); 5165 } 5166 size = roundup(size, IXL_HMC_PGSIZE); 5167 npages = size / IXL_HMC_PGSIZE; 5168 5169 tables = roundup(size, IXL_HMC_L2SZ) / IXL_HMC_L2SZ; 5170 5171 if (ixl_dmamem_alloc(sc, &sc->sc_hmc_pd, size, IXL_HMC_PGSIZE) != 0) { 5172 aprint_error_dev(sc->sc_dev, 5173 "unable to allocate hmc pd memory\n"); 5174 return -1; 5175 } 5176 5177 if (ixl_dmamem_alloc(sc, &sc->sc_hmc_sd, tables * IXL_HMC_PGSIZE, 5178 IXL_HMC_PGSIZE) != 0) { 5179 aprint_error_dev(sc->sc_dev, 5180 "unable to allocate hmc sd memory\n"); 5181 ixl_dmamem_free(sc, &sc->sc_hmc_pd); 5182 return -1; 5183 } 5184 5185 kva = IXL_DMA_KVA(&sc->sc_hmc_pd); 5186 memset(kva, 0, IXL_DMA_LEN(&sc->sc_hmc_pd)); 5187 5188 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd), 5189 0, IXL_DMA_LEN(&sc->sc_hmc_pd), 5190 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 5191 5192 dva = IXL_DMA_DVA(&sc->sc_hmc_pd); 5193 sdpage = IXL_DMA_KVA(&sc->sc_hmc_sd); 5194 memset(sdpage, 0, IXL_DMA_LEN(&sc->sc_hmc_sd)); 5195 5196 for (i = 0; (int)i < npages; i++) { 5197 *sdpage = htole64(dva | IXL_HMC_PDVALID); 5198 sdpage++; 5199 5200 dva += IXL_HMC_PGSIZE; 5201 } 5202 5203 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_sd), 5204 0, IXL_DMA_LEN(&sc->sc_hmc_sd), 5205 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 5206 5207 dva = IXL_DMA_DVA(&sc->sc_hmc_sd); 5208 for (i = 0; (int)i < tables; i++) { 5209 uint32_t count; 5210 5211 KASSERT(npages >= 0); 5212 5213 count = ((unsigned int)npages > IXL_HMC_PGS) ? 5214 IXL_HMC_PGS : (unsigned int)npages; 5215 5216 ixl_wr(sc, I40E_PFHMC_SDDATAHIGH, dva >> 32); 5217 ixl_wr(sc, I40E_PFHMC_SDDATALOW, dva | 5218 (count << I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | 5219 (1U << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT)); 5220 ixl_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE); 5221 ixl_wr(sc, I40E_PFHMC_SDCMD, 5222 (1U << I40E_PFHMC_SDCMD_PMSDWR_SHIFT) | i); 5223 5224 npages -= IXL_HMC_PGS; 5225 dva += IXL_HMC_PGSIZE; 5226 } 5227 5228 for (i = 0; i < __arraycount(regs); i++) { 5229 e = &sc->sc_hmc_entries[i]; 5230 5231 ixl_wr(sc, regs[i].setoff, e->hmc_base / IXL_HMC_ROUNDUP); 5232 ixl_wr(sc, regs[i].setcnt, e->hmc_count); 5233 } 5234 5235 return 0; 5236 } 5237 5238 static void 5239 ixl_hmc_free(struct ixl_softc *sc) 5240 { 5241 ixl_dmamem_free(sc, &sc->sc_hmc_sd); 5242 ixl_dmamem_free(sc, &sc->sc_hmc_pd); 5243 } 5244 5245 static void 5246 ixl_hmc_pack(void *d, const void *s, const struct ixl_hmc_pack *packing, 5247 unsigned int npacking) 5248 { 5249 uint8_t *dst = d; 5250 const uint8_t *src = s; 5251 unsigned int i; 5252 5253 for (i = 0; i < npacking; i++) { 5254 const struct ixl_hmc_pack *pack = &packing[i]; 5255 unsigned int offset = pack->lsb / 8; 5256 unsigned int align = pack->lsb % 8; 5257 const uint8_t *in = src + pack->offset; 5258 uint8_t *out = dst + offset; 5259 int width = pack->width; 5260 unsigned int inbits = 0; 5261 5262 if (align) { 5263 inbits = (*in++) << align; 5264 *out++ |= (inbits & 0xff); 5265 inbits >>= 8; 5266 5267 width -= 8 - align; 5268 } 5269 5270 while (width >= 8) { 5271 inbits |= (*in++) << align; 5272 *out++ = (inbits & 0xff); 5273 inbits >>= 8; 5274 5275 width -= 8; 5276 } 5277 5278 if (width > 0) { 5279 inbits |= (*in) << align; 5280 *out |= (inbits & ((1 << width) - 1)); 5281 } 5282 } 5283 } 5284 5285 static struct ixl_aq_buf * 5286 ixl_aqb_alloc(struct ixl_softc *sc) 5287 { 5288 struct ixl_aq_buf *aqb; 5289 5290 aqb = kmem_alloc(sizeof(*aqb), KM_SLEEP); 5291 5292 aqb->aqb_size = IXL_AQ_BUFLEN; 5293 5294 if (bus_dmamap_create(sc->sc_dmat, aqb->aqb_size, 1, 5295 aqb->aqb_size, 0, 5296 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &aqb->aqb_map) != 0) 5297 goto free; 5298 if (bus_dmamem_alloc(sc->sc_dmat, aqb->aqb_size, 5299 IXL_AQ_ALIGN, 0, &aqb->aqb_seg, 1, &aqb->aqb_nsegs, 5300 BUS_DMA_WAITOK) != 0) 5301 goto destroy; 5302 if (bus_dmamem_map(sc->sc_dmat, &aqb->aqb_seg, aqb->aqb_nsegs, 5303 aqb->aqb_size, &aqb->aqb_data, BUS_DMA_WAITOK) != 0) 5304 goto dma_free; 5305 if (bus_dmamap_load(sc->sc_dmat, aqb->aqb_map, aqb->aqb_data, 5306 aqb->aqb_size, NULL, BUS_DMA_WAITOK) != 0) 5307 goto unmap; 5308 5309 return aqb; 5310 unmap: 5311 bus_dmamem_unmap(sc->sc_dmat, aqb->aqb_data, aqb->aqb_size); 5312 dma_free: 5313 bus_dmamem_free(sc->sc_dmat, &aqb->aqb_seg, 1); 5314 destroy: 5315 bus_dmamap_destroy(sc->sc_dmat, aqb->aqb_map); 5316 free: 5317 kmem_free(aqb, sizeof(*aqb)); 5318 5319 return NULL; 5320 } 5321 5322 static void 5323 ixl_aqb_free(struct ixl_softc *sc, struct ixl_aq_buf *aqb) 5324 { 5325 5326 bus_dmamap_unload(sc->sc_dmat, aqb->aqb_map); 5327 bus_dmamem_unmap(sc->sc_dmat, aqb->aqb_data, aqb->aqb_size); 5328 bus_dmamem_free(sc->sc_dmat, &aqb->aqb_seg, 1); 5329 bus_dmamap_destroy(sc->sc_dmat, aqb->aqb_map); 5330 kmem_free(aqb, sizeof(*aqb)); 5331 } 5332 5333 static int 5334 ixl_arq_fill(struct ixl_softc *sc) 5335 { 5336 struct ixl_aq_buf *aqb; 5337 struct ixl_aq_desc *arq, *iaq; 5338 unsigned int prod = sc->sc_arq_prod; 5339 unsigned int n; 5340 int post = 0; 5341 5342 n = ixl_rxr_unrefreshed(sc->sc_arq_prod, sc->sc_arq_cons, 5343 IXL_AQ_NUM); 5344 arq = IXL_DMA_KVA(&sc->sc_arq); 5345 5346 if (__predict_false(n <= 0)) 5347 return 0; 5348 5349 do { 5350 aqb = sc->sc_arq_live[prod]; 5351 iaq = &arq[prod]; 5352 5353 if (aqb == NULL) { 5354 aqb = SIMPLEQ_FIRST(&sc->sc_arq_idle); 5355 if (aqb != NULL) { 5356 SIMPLEQ_REMOVE(&sc->sc_arq_idle, aqb, 5357 ixl_aq_buf, aqb_entry); 5358 } else if ((aqb = ixl_aqb_alloc(sc)) == NULL) { 5359 break; 5360 } 5361 5362 sc->sc_arq_live[prod] = aqb; 5363 memset(aqb->aqb_data, 0, aqb->aqb_size); 5364 5365 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, 5366 aqb->aqb_size, BUS_DMASYNC_PREREAD); 5367 5368 iaq->iaq_flags = htole16(IXL_AQ_BUF | 5369 (IXL_AQ_BUFLEN > I40E_AQ_LARGE_BUF ? 5370 IXL_AQ_LB : 0)); 5371 iaq->iaq_opcode = 0; 5372 iaq->iaq_datalen = htole16(aqb->aqb_size); 5373 iaq->iaq_retval = 0; 5374 iaq->iaq_cookie = 0; 5375 iaq->iaq_param[0] = 0; 5376 iaq->iaq_param[1] = 0; 5377 ixl_aq_dva(iaq, aqb->aqb_map->dm_segs[0].ds_addr); 5378 } 5379 5380 prod++; 5381 prod &= IXL_AQ_MASK; 5382 5383 post = 1; 5384 5385 } while (--n); 5386 5387 if (post) { 5388 sc->sc_arq_prod = prod; 5389 ixl_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod); 5390 } 5391 5392 return post; 5393 } 5394 5395 static void 5396 ixl_arq_unfill(struct ixl_softc *sc) 5397 { 5398 struct ixl_aq_buf *aqb; 5399 unsigned int i; 5400 5401 for (i = 0; i < __arraycount(sc->sc_arq_live); i++) { 5402 aqb = sc->sc_arq_live[i]; 5403 if (aqb == NULL) 5404 continue; 5405 5406 sc->sc_arq_live[i] = NULL; 5407 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, aqb->aqb_size, 5408 BUS_DMASYNC_POSTREAD); 5409 ixl_aqb_free(sc, aqb); 5410 } 5411 5412 while ((aqb = SIMPLEQ_FIRST(&sc->sc_arq_idle)) != NULL) { 5413 SIMPLEQ_REMOVE(&sc->sc_arq_idle, aqb, 5414 ixl_aq_buf, aqb_entry); 5415 ixl_aqb_free(sc, aqb); 5416 } 5417 } 5418 5419 static void 5420 ixl_clear_hw(struct ixl_softc *sc) 5421 { 5422 uint32_t num_queues, base_queue; 5423 uint32_t num_pf_int; 5424 uint32_t num_vf_int; 5425 uint32_t num_vfs; 5426 uint32_t i, j; 5427 uint32_t val; 5428 uint32_t eol = 0x7ff; 5429 5430 /* get number of interrupts, queues, and vfs */ 5431 val = ixl_rd(sc, I40E_GLPCI_CNF2); 5432 num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >> 5433 I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT; 5434 num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >> 5435 I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT; 5436 5437 val = ixl_rd(sc, I40E_PFLAN_QALLOC); 5438 base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >> 5439 I40E_PFLAN_QALLOC_FIRSTQ_SHIFT; 5440 j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >> 5441 I40E_PFLAN_QALLOC_LASTQ_SHIFT; 5442 if (val & I40E_PFLAN_QALLOC_VALID_MASK) 5443 num_queues = (j - base_queue) + 1; 5444 else 5445 num_queues = 0; 5446 5447 val = ixl_rd(sc, I40E_PF_VT_PFALLOC); 5448 i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >> 5449 I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT; 5450 j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >> 5451 I40E_PF_VT_PFALLOC_LASTVF_SHIFT; 5452 if (val & I40E_PF_VT_PFALLOC_VALID_MASK) 5453 num_vfs = (j - i) + 1; 5454 else 5455 num_vfs = 0; 5456 5457 /* stop all the interrupts */ 5458 ixl_wr(sc, I40E_PFINT_ICR0_ENA, 0); 5459 ixl_flush(sc); 5460 val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT; 5461 for (i = 0; i < num_pf_int - 2; i++) 5462 ixl_wr(sc, I40E_PFINT_DYN_CTLN(i), val); 5463 ixl_flush(sc); 5464 5465 /* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */ 5466 val = eol << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT; 5467 ixl_wr(sc, I40E_PFINT_LNKLST0, val); 5468 for (i = 0; i < num_pf_int - 2; i++) 5469 ixl_wr(sc, I40E_PFINT_LNKLSTN(i), val); 5470 val = eol << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT; 5471 for (i = 0; i < num_vfs; i++) 5472 ixl_wr(sc, I40E_VPINT_LNKLST0(i), val); 5473 for (i = 0; i < num_vf_int - 2; i++) 5474 ixl_wr(sc, I40E_VPINT_LNKLSTN(i), val); 5475 5476 /* warn the HW of the coming Tx disables */ 5477 for (i = 0; i < num_queues; i++) { 5478 uint32_t abs_queue_idx = base_queue + i; 5479 uint32_t reg_block = 0; 5480 5481 if (abs_queue_idx >= 128) { 5482 reg_block = abs_queue_idx / 128; 5483 abs_queue_idx %= 128; 5484 } 5485 5486 val = ixl_rd(sc, I40E_GLLAN_TXPRE_QDIS(reg_block)); 5487 val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK; 5488 val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT); 5489 val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK; 5490 5491 ixl_wr(sc, I40E_GLLAN_TXPRE_QDIS(reg_block), val); 5492 } 5493 delaymsec(400); 5494 5495 /* stop all the queues */ 5496 for (i = 0; i < num_queues; i++) { 5497 ixl_wr(sc, I40E_QINT_TQCTL(i), 0); 5498 ixl_wr(sc, I40E_QTX_ENA(i), 0); 5499 ixl_wr(sc, I40E_QINT_RQCTL(i), 0); 5500 ixl_wr(sc, I40E_QRX_ENA(i), 0); 5501 } 5502 5503 /* short wait for all queue disables to settle */ 5504 delaymsec(50); 5505 } 5506 5507 static int 5508 ixl_pf_reset(struct ixl_softc *sc) 5509 { 5510 uint32_t cnt = 0; 5511 uint32_t cnt1 = 0; 5512 uint32_t reg = 0, reg0 = 0; 5513 uint32_t grst_del; 5514 5515 /* 5516 * Poll for Global Reset steady state in case of recent GRST. 5517 * The grst delay value is in 100ms units, and we'll wait a 5518 * couple counts longer to be sure we don't just miss the end. 5519 */ 5520 grst_del = ixl_rd(sc, I40E_GLGEN_RSTCTL); 5521 grst_del &= I40E_GLGEN_RSTCTL_GRSTDEL_MASK; 5522 grst_del >>= I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT; 5523 5524 grst_del = grst_del * 20; 5525 5526 for (cnt = 0; cnt < grst_del; cnt++) { 5527 reg = ixl_rd(sc, I40E_GLGEN_RSTAT); 5528 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK)) 5529 break; 5530 delaymsec(100); 5531 } 5532 if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) { 5533 aprint_error(", Global reset polling failed to complete\n"); 5534 return -1; 5535 } 5536 5537 /* Now Wait for the FW to be ready */ 5538 for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) { 5539 reg = ixl_rd(sc, I40E_GLNVM_ULD); 5540 reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | 5541 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK); 5542 if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | 5543 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK)) 5544 break; 5545 5546 delaymsec(10); 5547 } 5548 if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | 5549 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) { 5550 aprint_error(", wait for FW Reset complete timed out " 5551 "(I40E_GLNVM_ULD = 0x%x)\n", reg); 5552 return -1; 5553 } 5554 5555 /* 5556 * If there was a Global Reset in progress when we got here, 5557 * we don't need to do the PF Reset 5558 */ 5559 if (cnt == 0) { 5560 reg = ixl_rd(sc, I40E_PFGEN_CTRL); 5561 ixl_wr(sc, I40E_PFGEN_CTRL, reg | I40E_PFGEN_CTRL_PFSWR_MASK); 5562 for (cnt = 0; cnt < I40E_PF_RESET_WAIT_COUNT; cnt++) { 5563 reg = ixl_rd(sc, I40E_PFGEN_CTRL); 5564 if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK)) 5565 break; 5566 delaymsec(1); 5567 5568 reg0 = ixl_rd(sc, I40E_GLGEN_RSTAT); 5569 if (reg0 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) { 5570 aprint_error(", Core reset upcoming." 5571 " Skipping PF reset reset request\n"); 5572 return -1; 5573 } 5574 } 5575 if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) { 5576 aprint_error(", PF reset polling failed to complete" 5577 "(I40E_PFGEN_CTRL= 0x%x)\n", reg); 5578 return -1; 5579 } 5580 } 5581 5582 return 0; 5583 } 5584 5585 static int 5586 ixl_dmamem_alloc(struct ixl_softc *sc, struct ixl_dmamem *ixm, 5587 bus_size_t size, bus_size_t align) 5588 { 5589 ixm->ixm_size = size; 5590 5591 if (bus_dmamap_create(sc->sc_dmat, ixm->ixm_size, 1, 5592 ixm->ixm_size, 0, 5593 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, 5594 &ixm->ixm_map) != 0) 5595 return 1; 5596 if (bus_dmamem_alloc(sc->sc_dmat, ixm->ixm_size, 5597 align, 0, &ixm->ixm_seg, 1, &ixm->ixm_nsegs, 5598 BUS_DMA_WAITOK) != 0) 5599 goto destroy; 5600 if (bus_dmamem_map(sc->sc_dmat, &ixm->ixm_seg, ixm->ixm_nsegs, 5601 ixm->ixm_size, &ixm->ixm_kva, BUS_DMA_WAITOK) != 0) 5602 goto free; 5603 if (bus_dmamap_load(sc->sc_dmat, ixm->ixm_map, ixm->ixm_kva, 5604 ixm->ixm_size, NULL, BUS_DMA_WAITOK) != 0) 5605 goto unmap; 5606 5607 memset(ixm->ixm_kva, 0, ixm->ixm_size); 5608 5609 return 0; 5610 unmap: 5611 bus_dmamem_unmap(sc->sc_dmat, ixm->ixm_kva, ixm->ixm_size); 5612 free: 5613 bus_dmamem_free(sc->sc_dmat, &ixm->ixm_seg, 1); 5614 destroy: 5615 bus_dmamap_destroy(sc->sc_dmat, ixm->ixm_map); 5616 return 1; 5617 } 5618 5619 static void 5620 ixl_dmamem_free(struct ixl_softc *sc, struct ixl_dmamem *ixm) 5621 { 5622 bus_dmamap_unload(sc->sc_dmat, ixm->ixm_map); 5623 bus_dmamem_unmap(sc->sc_dmat, ixm->ixm_kva, ixm->ixm_size); 5624 bus_dmamem_free(sc->sc_dmat, &ixm->ixm_seg, 1); 5625 bus_dmamap_destroy(sc->sc_dmat, ixm->ixm_map); 5626 } 5627 5628 static int 5629 ixl_setup_vlan_hwfilter(struct ixl_softc *sc) 5630 { 5631 struct ethercom *ec = &sc->sc_ec; 5632 struct vlanid_list *vlanidp; 5633 int rv; 5634 5635 ixl_remove_macvlan(sc, sc->sc_enaddr, 0, 5636 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN); 5637 ixl_remove_macvlan(sc, etherbroadcastaddr, 0, 5638 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN); 5639 5640 rv = ixl_add_macvlan(sc, sc->sc_enaddr, 0, 5641 IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH); 5642 if (rv != 0) 5643 return rv; 5644 rv = ixl_add_macvlan(sc, etherbroadcastaddr, 0, 5645 IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH); 5646 if (rv != 0) 5647 return rv; 5648 5649 ETHER_LOCK(ec); 5650 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) { 5651 rv = ixl_add_macvlan(sc, sc->sc_enaddr, 5652 vlanidp->vid, IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH); 5653 if (rv != 0) 5654 break; 5655 rv = ixl_add_macvlan(sc, etherbroadcastaddr, 5656 vlanidp->vid, IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH); 5657 if (rv != 0) 5658 break; 5659 } 5660 ETHER_UNLOCK(ec); 5661 5662 return rv; 5663 } 5664 5665 static void 5666 ixl_teardown_vlan_hwfilter(struct ixl_softc *sc) 5667 { 5668 struct vlanid_list *vlanidp; 5669 struct ethercom *ec = &sc->sc_ec; 5670 5671 ixl_remove_macvlan(sc, sc->sc_enaddr, 0, 5672 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH); 5673 ixl_remove_macvlan(sc, etherbroadcastaddr, 0, 5674 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH); 5675 5676 ETHER_LOCK(ec); 5677 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) { 5678 ixl_remove_macvlan(sc, sc->sc_enaddr, 5679 vlanidp->vid, IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH); 5680 ixl_remove_macvlan(sc, etherbroadcastaddr, 5681 vlanidp->vid, IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH); 5682 } 5683 ETHER_UNLOCK(ec); 5684 5685 ixl_add_macvlan(sc, sc->sc_enaddr, 0, 5686 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN); 5687 ixl_add_macvlan(sc, etherbroadcastaddr, 0, 5688 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN); 5689 } 5690 5691 static int 5692 ixl_update_macvlan(struct ixl_softc *sc) 5693 { 5694 int rv = 0; 5695 int next_ec_capenable = sc->sc_ec.ec_capenable; 5696 5697 if (ISSET(next_ec_capenable, ETHERCAP_VLAN_HWFILTER)) { 5698 rv = ixl_setup_vlan_hwfilter(sc); 5699 if (rv != 0) 5700 ixl_teardown_vlan_hwfilter(sc); 5701 } else { 5702 ixl_teardown_vlan_hwfilter(sc); 5703 } 5704 5705 return rv; 5706 } 5707 5708 static int 5709 ixl_ifflags_cb(struct ethercom *ec) 5710 { 5711 struct ifnet *ifp = &ec->ec_if; 5712 struct ixl_softc *sc = ifp->if_softc; 5713 int rv, change; 5714 5715 mutex_enter(&sc->sc_cfg_lock); 5716 5717 change = ec->ec_capenable ^ sc->sc_cur_ec_capenable; 5718 5719 if (ISSET(change, ETHERCAP_VLAN_HWTAGGING)) { 5720 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWTAGGING; 5721 rv = ENETRESET; 5722 goto out; 5723 } 5724 5725 if (ISSET(change, ETHERCAP_VLAN_HWFILTER)) { 5726 rv = ixl_update_macvlan(sc); 5727 if (rv == 0) { 5728 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWFILTER; 5729 } else { 5730 CLR(ec->ec_capenable, ETHERCAP_VLAN_HWFILTER); 5731 CLR(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER); 5732 } 5733 } 5734 5735 rv = ixl_iff(sc); 5736 out: 5737 mutex_exit(&sc->sc_cfg_lock); 5738 5739 return rv; 5740 } 5741 5742 static int 5743 ixl_set_link_status_locked(struct ixl_softc *sc, const struct ixl_aq_desc *iaq) 5744 { 5745 const struct ixl_aq_link_status *status; 5746 const struct ixl_phy_type *itype; 5747 5748 uint64_t ifm_active = IFM_ETHER; 5749 uint64_t ifm_status = IFM_AVALID; 5750 int link_state = LINK_STATE_DOWN; 5751 uint64_t baudrate = 0; 5752 5753 status = (const struct ixl_aq_link_status *)iaq->iaq_param; 5754 if (!ISSET(status->link_info, IXL_AQ_LINK_UP_FUNCTION)) { 5755 ifm_active |= IFM_NONE; 5756 goto done; 5757 } 5758 5759 ifm_active |= IFM_FDX; 5760 ifm_status |= IFM_ACTIVE; 5761 link_state = LINK_STATE_UP; 5762 5763 itype = ixl_search_phy_type(status->phy_type); 5764 if (itype != NULL) 5765 ifm_active |= itype->ifm_type; 5766 5767 if (ISSET(status->an_info, IXL_AQ_LINK_PAUSE_TX)) 5768 ifm_active |= IFM_ETH_TXPAUSE; 5769 if (ISSET(status->an_info, IXL_AQ_LINK_PAUSE_RX)) 5770 ifm_active |= IFM_ETH_RXPAUSE; 5771 5772 baudrate = ixl_search_link_speed(status->link_speed); 5773 5774 done: 5775 /* sc->sc_cfg_lock held expect during attach */ 5776 sc->sc_media_active = ifm_active; 5777 sc->sc_media_status = ifm_status; 5778 5779 sc->sc_ec.ec_if.if_baudrate = baudrate; 5780 5781 return link_state; 5782 } 5783 5784 static int 5785 ixl_establish_intx(struct ixl_softc *sc) 5786 { 5787 pci_chipset_tag_t pc = sc->sc_pa.pa_pc; 5788 pci_intr_handle_t *intr; 5789 char xnamebuf[32]; 5790 char intrbuf[PCI_INTRSTR_LEN]; 5791 char const *intrstr; 5792 5793 KASSERT(sc->sc_nintrs == 1); 5794 5795 intr = &sc->sc_ihp[0]; 5796 5797 intrstr = pci_intr_string(pc, *intr, intrbuf, sizeof(intrbuf)); 5798 snprintf(xnamebuf, sizeof(xnamebuf), "%s:legacy", 5799 device_xname(sc->sc_dev)); 5800 5801 sc->sc_ihs[0] = pci_intr_establish_xname(pc, *intr, IPL_NET, ixl_intr, 5802 sc, xnamebuf); 5803 5804 if (sc->sc_ihs[0] == NULL) { 5805 aprint_error_dev(sc->sc_dev, 5806 "unable to establish interrupt at %s\n", intrstr); 5807 return -1; 5808 } 5809 5810 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr); 5811 return 0; 5812 } 5813 5814 static int 5815 ixl_establish_msix(struct ixl_softc *sc) 5816 { 5817 pci_chipset_tag_t pc = sc->sc_pa.pa_pc; 5818 kcpuset_t *affinity; 5819 unsigned int vector = 0; 5820 unsigned int i; 5821 int affinity_to, r; 5822 char xnamebuf[32]; 5823 char intrbuf[PCI_INTRSTR_LEN]; 5824 char const *intrstr; 5825 5826 kcpuset_create(&affinity, false); 5827 5828 /* the "other" intr is mapped to vector 0 */ 5829 vector = 0; 5830 intrstr = pci_intr_string(pc, sc->sc_ihp[vector], 5831 intrbuf, sizeof(intrbuf)); 5832 snprintf(xnamebuf, sizeof(xnamebuf), "%s others", 5833 device_xname(sc->sc_dev)); 5834 sc->sc_ihs[vector] = pci_intr_establish_xname(pc, 5835 sc->sc_ihp[vector], IPL_NET, ixl_other_intr, 5836 sc, xnamebuf); 5837 if (sc->sc_ihs[vector] == NULL) { 5838 aprint_error_dev(sc->sc_dev, 5839 "unable to establish interrupt at %s\n", intrstr); 5840 goto fail; 5841 } 5842 5843 aprint_normal_dev(sc->sc_dev, "other interrupt at %s", intrstr); 5844 5845 affinity_to = ncpu > (int)sc->sc_nqueue_pairs_max ? 1 : 0; 5846 affinity_to = (affinity_to + sc->sc_nqueue_pairs_max) % ncpu; 5847 5848 kcpuset_zero(affinity); 5849 kcpuset_set(affinity, affinity_to); 5850 r = interrupt_distribute(sc->sc_ihs[vector], affinity, NULL); 5851 if (r == 0) { 5852 aprint_normal(", affinity to %u", affinity_to); 5853 } 5854 aprint_normal("\n"); 5855 vector++; 5856 5857 sc->sc_msix_vector_queue = vector; 5858 affinity_to = ncpu > (int)sc->sc_nqueue_pairs_max ? 1 : 0; 5859 5860 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) { 5861 intrstr = pci_intr_string(pc, sc->sc_ihp[vector], 5862 intrbuf, sizeof(intrbuf)); 5863 snprintf(xnamebuf, sizeof(xnamebuf), "%s TXRX%d", 5864 device_xname(sc->sc_dev), i); 5865 5866 sc->sc_ihs[vector] = pci_intr_establish_xname(pc, 5867 sc->sc_ihp[vector], IPL_NET, ixl_queue_intr, 5868 (void *)&sc->sc_qps[i], xnamebuf); 5869 5870 if (sc->sc_ihs[vector] == NULL) { 5871 aprint_error_dev(sc->sc_dev, 5872 "unable to establish interrupt at %s\n", intrstr); 5873 goto fail; 5874 } 5875 5876 aprint_normal_dev(sc->sc_dev, 5877 "for TXRX%d interrupt at %s",i , intrstr); 5878 5879 kcpuset_zero(affinity); 5880 kcpuset_set(affinity, affinity_to); 5881 r = interrupt_distribute(sc->sc_ihs[vector], affinity, NULL); 5882 if (r == 0) { 5883 aprint_normal(", affinity to %u", affinity_to); 5884 affinity_to = (affinity_to + 1) % ncpu; 5885 } 5886 aprint_normal("\n"); 5887 vector++; 5888 } 5889 5890 kcpuset_destroy(affinity); 5891 5892 return 0; 5893 fail: 5894 for (i = 0; i < vector; i++) { 5895 pci_intr_disestablish(pc, sc->sc_ihs[i]); 5896 } 5897 5898 sc->sc_msix_vector_queue = 0; 5899 sc->sc_msix_vector_queue = 0; 5900 kcpuset_destroy(affinity); 5901 5902 return -1; 5903 } 5904 5905 static void 5906 ixl_config_queue_intr(struct ixl_softc *sc) 5907 { 5908 unsigned int i, vector; 5909 5910 if (sc->sc_intrtype == PCI_INTR_TYPE_MSIX) { 5911 vector = sc->sc_msix_vector_queue; 5912 } else { 5913 vector = I40E_INTR_NOTX_INTR; 5914 5915 ixl_wr(sc, I40E_PFINT_LNKLST0, 5916 (I40E_INTR_NOTX_QUEUE << 5917 I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) | 5918 (I40E_QUEUE_TYPE_RX << 5919 I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)); 5920 } 5921 5922 for (i = 0; i < sc->sc_nqueue_pairs; i++) { 5923 ixl_wr(sc, I40E_PFINT_DYN_CTLN(i), 0); 5924 ixl_flush(sc); 5925 5926 ixl_wr(sc, I40E_PFINT_LNKLSTN(i), 5927 ((i) << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) | 5928 (I40E_QUEUE_TYPE_RX << 5929 I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)); 5930 5931 ixl_wr(sc, I40E_QINT_RQCTL(i), 5932 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) | 5933 (I40E_ITR_INDEX_RX << 5934 I40E_QINT_RQCTL_ITR_INDX_SHIFT) | 5935 (I40E_INTR_NOTX_RX_QUEUE << 5936 I40E_QINT_RQCTL_MSIX0_INDX_SHIFT) | 5937 (i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) | 5938 (I40E_QUEUE_TYPE_TX << 5939 I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) | 5940 I40E_QINT_RQCTL_CAUSE_ENA_MASK); 5941 5942 ixl_wr(sc, I40E_QINT_TQCTL(i), 5943 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) | 5944 (I40E_ITR_INDEX_TX << 5945 I40E_QINT_TQCTL_ITR_INDX_SHIFT) | 5946 (I40E_INTR_NOTX_TX_QUEUE << 5947 I40E_QINT_TQCTL_MSIX0_INDX_SHIFT) | 5948 (I40E_QUEUE_TYPE_EOL << 5949 I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) | 5950 (I40E_QUEUE_TYPE_RX << 5951 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT) | 5952 I40E_QINT_TQCTL_CAUSE_ENA_MASK); 5953 5954 if (sc->sc_intrtype == PCI_INTR_TYPE_MSIX) { 5955 ixl_wr(sc, I40E_PFINT_ITRN(I40E_ITR_INDEX_RX, i), 5956 sc->sc_itr_rx); 5957 ixl_wr(sc, I40E_PFINT_ITRN(I40E_ITR_INDEX_TX, i), 5958 sc->sc_itr_tx); 5959 vector++; 5960 } 5961 } 5962 ixl_flush(sc); 5963 5964 ixl_wr(sc, I40E_PFINT_ITR0(I40E_ITR_INDEX_RX), sc->sc_itr_rx); 5965 ixl_wr(sc, I40E_PFINT_ITR0(I40E_ITR_INDEX_TX), sc->sc_itr_tx); 5966 ixl_flush(sc); 5967 } 5968 5969 static void 5970 ixl_config_other_intr(struct ixl_softc *sc) 5971 { 5972 ixl_wr(sc, I40E_PFINT_ICR0_ENA, 0); 5973 (void)ixl_rd(sc, I40E_PFINT_ICR0); 5974 5975 ixl_wr(sc, I40E_PFINT_ICR0_ENA, 5976 I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | 5977 I40E_PFINT_ICR0_ENA_GRST_MASK | 5978 I40E_PFINT_ICR0_ENA_ADMINQ_MASK | 5979 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | 5980 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | 5981 I40E_PFINT_ICR0_ENA_VFLR_MASK | 5982 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | 5983 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | 5984 I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK); 5985 5986 ixl_wr(sc, I40E_PFINT_LNKLST0, 0x7FF); 5987 ixl_wr(sc, I40E_PFINT_ITR0(I40E_ITR_INDEX_OTHER), 0); 5988 ixl_wr(sc, I40E_PFINT_STAT_CTL0, 5989 (I40E_ITR_INDEX_OTHER << 5990 I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)); 5991 ixl_flush(sc); 5992 } 5993 5994 static int 5995 ixl_setup_interrupts(struct ixl_softc *sc) 5996 { 5997 struct pci_attach_args *pa = &sc->sc_pa; 5998 pci_intr_type_t max_type, intr_type; 5999 int counts[PCI_INTR_TYPE_SIZE]; 6000 int error; 6001 unsigned int i; 6002 bool retry; 6003 6004 memset(counts, 0, sizeof(counts)); 6005 max_type = PCI_INTR_TYPE_MSIX; 6006 /* QPs + other interrupt */ 6007 counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueue_pairs_max + 1; 6008 counts[PCI_INTR_TYPE_INTX] = 1; 6009 6010 if (ixl_param_nomsix) 6011 counts[PCI_INTR_TYPE_MSIX] = 0; 6012 6013 do { 6014 retry = false; 6015 error = pci_intr_alloc(pa, &sc->sc_ihp, counts, max_type); 6016 if (error != 0) { 6017 aprint_error_dev(sc->sc_dev, 6018 "couldn't map interrupt\n"); 6019 break; 6020 } 6021 6022 intr_type = pci_intr_type(pa->pa_pc, sc->sc_ihp[0]); 6023 sc->sc_nintrs = counts[intr_type]; 6024 KASSERT(sc->sc_nintrs > 0); 6025 6026 for (i = 0; i < sc->sc_nintrs; i++) { 6027 pci_intr_setattr(pa->pa_pc, &sc->sc_ihp[i], 6028 PCI_INTR_MPSAFE, true); 6029 } 6030 6031 sc->sc_ihs = kmem_alloc(sizeof(sc->sc_ihs[0]) * sc->sc_nintrs, 6032 KM_SLEEP); 6033 6034 if (intr_type == PCI_INTR_TYPE_MSIX) { 6035 error = ixl_establish_msix(sc); 6036 if (error) { 6037 counts[PCI_INTR_TYPE_MSIX] = 0; 6038 retry = true; 6039 } 6040 } else if (intr_type == PCI_INTR_TYPE_INTX) { 6041 error = ixl_establish_intx(sc); 6042 } else { 6043 error = -1; 6044 } 6045 6046 if (error) { 6047 kmem_free(sc->sc_ihs, 6048 sizeof(sc->sc_ihs[0]) * sc->sc_nintrs); 6049 pci_intr_release(pa->pa_pc, sc->sc_ihp, sc->sc_nintrs); 6050 } else { 6051 sc->sc_intrtype = intr_type; 6052 } 6053 } while (retry); 6054 6055 return error; 6056 } 6057 6058 static void 6059 ixl_teardown_interrupts(struct ixl_softc *sc) 6060 { 6061 struct pci_attach_args *pa = &sc->sc_pa; 6062 unsigned int i; 6063 6064 for (i = 0; i < sc->sc_nintrs; i++) { 6065 pci_intr_disestablish(pa->pa_pc, sc->sc_ihs[i]); 6066 } 6067 6068 pci_intr_release(pa->pa_pc, sc->sc_ihp, sc->sc_nintrs); 6069 6070 kmem_free(sc->sc_ihs, sizeof(sc->sc_ihs[0]) * sc->sc_nintrs); 6071 sc->sc_ihs = NULL; 6072 sc->sc_nintrs = 0; 6073 } 6074 6075 static int 6076 ixl_setup_stats(struct ixl_softc *sc) 6077 { 6078 struct ixl_queue_pair *qp; 6079 struct ixl_tx_ring *txr; 6080 struct ixl_rx_ring *rxr; 6081 struct ixl_stats_counters *isc; 6082 unsigned int i; 6083 6084 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) { 6085 qp = &sc->sc_qps[i]; 6086 txr = qp->qp_txr; 6087 rxr = qp->qp_rxr; 6088 6089 evcnt_attach_dynamic(&txr->txr_defragged, EVCNT_TYPE_MISC, 6090 NULL, qp->qp_name, "m_defrag successed"); 6091 evcnt_attach_dynamic(&txr->txr_defrag_failed, EVCNT_TYPE_MISC, 6092 NULL, qp->qp_name, "m_defrag_failed"); 6093 evcnt_attach_dynamic(&txr->txr_pcqdrop, EVCNT_TYPE_MISC, 6094 NULL, qp->qp_name, "Dropped in pcq"); 6095 evcnt_attach_dynamic(&txr->txr_transmitdef, EVCNT_TYPE_MISC, 6096 NULL, qp->qp_name, "Deferred transmit"); 6097 evcnt_attach_dynamic(&txr->txr_intr, EVCNT_TYPE_INTR, 6098 NULL, qp->qp_name, "Interrupt on queue"); 6099 evcnt_attach_dynamic(&txr->txr_defer, EVCNT_TYPE_MISC, 6100 NULL, qp->qp_name, "Handled queue in softint/workqueue"); 6101 6102 evcnt_attach_dynamic(&rxr->rxr_mgethdr_failed, EVCNT_TYPE_MISC, 6103 NULL, qp->qp_name, "MGETHDR failed"); 6104 evcnt_attach_dynamic(&rxr->rxr_mgetcl_failed, EVCNT_TYPE_MISC, 6105 NULL, qp->qp_name, "MCLGET failed"); 6106 evcnt_attach_dynamic(&rxr->rxr_mbuf_load_failed, 6107 EVCNT_TYPE_MISC, NULL, qp->qp_name, 6108 "bus_dmamap_load_mbuf failed"); 6109 evcnt_attach_dynamic(&rxr->rxr_intr, EVCNT_TYPE_INTR, 6110 NULL, qp->qp_name, "Interrupt on queue"); 6111 evcnt_attach_dynamic(&rxr->rxr_defer, EVCNT_TYPE_MISC, 6112 NULL, qp->qp_name, "Handled queue in softint/workqueue"); 6113 } 6114 6115 evcnt_attach_dynamic(&sc->sc_event_atq, EVCNT_TYPE_INTR, 6116 NULL, device_xname(sc->sc_dev), "Interrupt for other events"); 6117 evcnt_attach_dynamic(&sc->sc_event_link, EVCNT_TYPE_MISC, 6118 NULL, device_xname(sc->sc_dev), "Link status event"); 6119 evcnt_attach_dynamic(&sc->sc_event_ecc_err, EVCNT_TYPE_MISC, 6120 NULL, device_xname(sc->sc_dev), "ECC error"); 6121 evcnt_attach_dynamic(&sc->sc_event_pci_exception, EVCNT_TYPE_MISC, 6122 NULL, device_xname(sc->sc_dev), "PCI exception"); 6123 evcnt_attach_dynamic(&sc->sc_event_crit_err, EVCNT_TYPE_MISC, 6124 NULL, device_xname(sc->sc_dev), "Critical error"); 6125 6126 isc = &sc->sc_stats_counters; 6127 evcnt_attach_dynamic(&isc->isc_crc_errors, EVCNT_TYPE_MISC, 6128 NULL, device_xname(sc->sc_dev), "CRC errors"); 6129 evcnt_attach_dynamic(&isc->isc_illegal_bytes, EVCNT_TYPE_MISC, 6130 NULL, device_xname(sc->sc_dev), "Illegal bytes"); 6131 evcnt_attach_dynamic(&isc->isc_mac_local_faults, EVCNT_TYPE_MISC, 6132 NULL, device_xname(sc->sc_dev), "Mac local faults"); 6133 evcnt_attach_dynamic(&isc->isc_mac_remote_faults, EVCNT_TYPE_MISC, 6134 NULL, device_xname(sc->sc_dev), "Mac remote faults"); 6135 evcnt_attach_dynamic(&isc->isc_link_xon_rx, EVCNT_TYPE_MISC, 6136 NULL, device_xname(sc->sc_dev), "Rx xon"); 6137 evcnt_attach_dynamic(&isc->isc_link_xon_tx, EVCNT_TYPE_MISC, 6138 NULL, device_xname(sc->sc_dev), "Tx xon"); 6139 evcnt_attach_dynamic(&isc->isc_link_xoff_rx, EVCNT_TYPE_MISC, 6140 NULL, device_xname(sc->sc_dev), "Rx xoff"); 6141 evcnt_attach_dynamic(&isc->isc_link_xoff_tx, EVCNT_TYPE_MISC, 6142 NULL, device_xname(sc->sc_dev), "Tx xoff"); 6143 evcnt_attach_dynamic(&isc->isc_rx_fragments, EVCNT_TYPE_MISC, 6144 NULL, device_xname(sc->sc_dev), "Rx fragments"); 6145 evcnt_attach_dynamic(&isc->isc_rx_jabber, EVCNT_TYPE_MISC, 6146 NULL, device_xname(sc->sc_dev), "Rx jabber"); 6147 6148 evcnt_attach_dynamic(&isc->isc_rx_size_64, EVCNT_TYPE_MISC, 6149 NULL, device_xname(sc->sc_dev), "Rx size 64"); 6150 evcnt_attach_dynamic(&isc->isc_rx_size_127, EVCNT_TYPE_MISC, 6151 NULL, device_xname(sc->sc_dev), "Rx size 127"); 6152 evcnt_attach_dynamic(&isc->isc_rx_size_255, EVCNT_TYPE_MISC, 6153 NULL, device_xname(sc->sc_dev), "Rx size 255"); 6154 evcnt_attach_dynamic(&isc->isc_rx_size_511, EVCNT_TYPE_MISC, 6155 NULL, device_xname(sc->sc_dev), "Rx size 511"); 6156 evcnt_attach_dynamic(&isc->isc_rx_size_1023, EVCNT_TYPE_MISC, 6157 NULL, device_xname(sc->sc_dev), "Rx size 1023"); 6158 evcnt_attach_dynamic(&isc->isc_rx_size_1522, EVCNT_TYPE_MISC, 6159 NULL, device_xname(sc->sc_dev), "Rx size 1522"); 6160 evcnt_attach_dynamic(&isc->isc_rx_size_big, EVCNT_TYPE_MISC, 6161 NULL, device_xname(sc->sc_dev), "Rx jumbo packets"); 6162 evcnt_attach_dynamic(&isc->isc_rx_undersize, EVCNT_TYPE_MISC, 6163 NULL, device_xname(sc->sc_dev), "Rx under size"); 6164 evcnt_attach_dynamic(&isc->isc_rx_oversize, EVCNT_TYPE_MISC, 6165 NULL, device_xname(sc->sc_dev), "Rx over size"); 6166 6167 evcnt_attach_dynamic(&isc->isc_rx_bytes, EVCNT_TYPE_MISC, 6168 NULL, device_xname(sc->sc_dev), "Rx bytes / port"); 6169 evcnt_attach_dynamic(&isc->isc_rx_discards, EVCNT_TYPE_MISC, 6170 NULL, device_xname(sc->sc_dev), "Rx discards / port"); 6171 evcnt_attach_dynamic(&isc->isc_rx_unicast, EVCNT_TYPE_MISC, 6172 NULL, device_xname(sc->sc_dev), "Rx unicast / port"); 6173 evcnt_attach_dynamic(&isc->isc_rx_multicast, EVCNT_TYPE_MISC, 6174 NULL, device_xname(sc->sc_dev), "Rx multicast / port"); 6175 evcnt_attach_dynamic(&isc->isc_rx_broadcast, EVCNT_TYPE_MISC, 6176 NULL, device_xname(sc->sc_dev), "Rx broadcast / port"); 6177 6178 evcnt_attach_dynamic(&isc->isc_vsi_rx_bytes, EVCNT_TYPE_MISC, 6179 NULL, device_xname(sc->sc_dev), "Rx bytes / vsi"); 6180 evcnt_attach_dynamic(&isc->isc_vsi_rx_discards, EVCNT_TYPE_MISC, 6181 NULL, device_xname(sc->sc_dev), "Rx discard / vsi"); 6182 evcnt_attach_dynamic(&isc->isc_vsi_rx_unicast, EVCNT_TYPE_MISC, 6183 NULL, device_xname(sc->sc_dev), "Rx unicast / vsi"); 6184 evcnt_attach_dynamic(&isc->isc_vsi_rx_multicast, EVCNT_TYPE_MISC, 6185 NULL, device_xname(sc->sc_dev), "Rx multicast / vsi"); 6186 evcnt_attach_dynamic(&isc->isc_vsi_rx_broadcast, EVCNT_TYPE_MISC, 6187 NULL, device_xname(sc->sc_dev), "Rx broadcast / vsi"); 6188 6189 evcnt_attach_dynamic(&isc->isc_tx_size_64, EVCNT_TYPE_MISC, 6190 NULL, device_xname(sc->sc_dev), "Tx size 64"); 6191 evcnt_attach_dynamic(&isc->isc_tx_size_127, EVCNT_TYPE_MISC, 6192 NULL, device_xname(sc->sc_dev), "Tx size 127"); 6193 evcnt_attach_dynamic(&isc->isc_tx_size_255, EVCNT_TYPE_MISC, 6194 NULL, device_xname(sc->sc_dev), "Tx size 255"); 6195 evcnt_attach_dynamic(&isc->isc_tx_size_511, EVCNT_TYPE_MISC, 6196 NULL, device_xname(sc->sc_dev), "Tx size 511"); 6197 evcnt_attach_dynamic(&isc->isc_tx_size_1023, EVCNT_TYPE_MISC, 6198 NULL, device_xname(sc->sc_dev), "Tx size 1023"); 6199 evcnt_attach_dynamic(&isc->isc_tx_size_1522, EVCNT_TYPE_MISC, 6200 NULL, device_xname(sc->sc_dev), "Tx size 1522"); 6201 evcnt_attach_dynamic(&isc->isc_tx_size_big, EVCNT_TYPE_MISC, 6202 NULL, device_xname(sc->sc_dev), "Tx jumbo packets"); 6203 6204 evcnt_attach_dynamic(&isc->isc_tx_bytes, EVCNT_TYPE_MISC, 6205 NULL, device_xname(sc->sc_dev), "Tx bytes / port"); 6206 evcnt_attach_dynamic(&isc->isc_tx_dropped_link_down, EVCNT_TYPE_MISC, 6207 NULL, device_xname(sc->sc_dev), 6208 "Tx dropped due to link down / port"); 6209 evcnt_attach_dynamic(&isc->isc_tx_unicast, EVCNT_TYPE_MISC, 6210 NULL, device_xname(sc->sc_dev), "Tx unicast / port"); 6211 evcnt_attach_dynamic(&isc->isc_tx_multicast, EVCNT_TYPE_MISC, 6212 NULL, device_xname(sc->sc_dev), "Tx multicast / port"); 6213 evcnt_attach_dynamic(&isc->isc_tx_broadcast, EVCNT_TYPE_MISC, 6214 NULL, device_xname(sc->sc_dev), "Tx broadcast / port"); 6215 6216 evcnt_attach_dynamic(&isc->isc_vsi_tx_bytes, EVCNT_TYPE_MISC, 6217 NULL, device_xname(sc->sc_dev), "Tx bytes / vsi"); 6218 evcnt_attach_dynamic(&isc->isc_vsi_tx_errors, EVCNT_TYPE_MISC, 6219 NULL, device_xname(sc->sc_dev), "Tx errors / vsi"); 6220 evcnt_attach_dynamic(&isc->isc_vsi_tx_unicast, EVCNT_TYPE_MISC, 6221 NULL, device_xname(sc->sc_dev), "Tx unicast / vsi"); 6222 evcnt_attach_dynamic(&isc->isc_vsi_tx_multicast, EVCNT_TYPE_MISC, 6223 NULL, device_xname(sc->sc_dev), "Tx multicast / vsi"); 6224 evcnt_attach_dynamic(&isc->isc_vsi_tx_broadcast, EVCNT_TYPE_MISC, 6225 NULL, device_xname(sc->sc_dev), "Tx broadcast / vsi"); 6226 6227 sc->sc_stats_intval = ixl_param_stats_interval; 6228 callout_init(&sc->sc_stats_callout, CALLOUT_MPSAFE); 6229 callout_setfunc(&sc->sc_stats_callout, ixl_stats_callout, sc); 6230 ixl_work_set(&sc->sc_stats_task, ixl_stats_update, sc); 6231 6232 return 0; 6233 } 6234 6235 static void 6236 ixl_teardown_stats(struct ixl_softc *sc) 6237 { 6238 struct ixl_tx_ring *txr; 6239 struct ixl_rx_ring *rxr; 6240 struct ixl_stats_counters *isc; 6241 unsigned int i; 6242 6243 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) { 6244 txr = sc->sc_qps[i].qp_txr; 6245 rxr = sc->sc_qps[i].qp_rxr; 6246 6247 evcnt_detach(&txr->txr_defragged); 6248 evcnt_detach(&txr->txr_defrag_failed); 6249 evcnt_detach(&txr->txr_pcqdrop); 6250 evcnt_detach(&txr->txr_transmitdef); 6251 evcnt_detach(&txr->txr_intr); 6252 evcnt_detach(&txr->txr_defer); 6253 6254 evcnt_detach(&rxr->rxr_mgethdr_failed); 6255 evcnt_detach(&rxr->rxr_mgetcl_failed); 6256 evcnt_detach(&rxr->rxr_mbuf_load_failed); 6257 evcnt_detach(&rxr->rxr_intr); 6258 evcnt_detach(&rxr->rxr_defer); 6259 } 6260 6261 isc = &sc->sc_stats_counters; 6262 evcnt_detach(&isc->isc_crc_errors); 6263 evcnt_detach(&isc->isc_illegal_bytes); 6264 evcnt_detach(&isc->isc_mac_local_faults); 6265 evcnt_detach(&isc->isc_mac_remote_faults); 6266 evcnt_detach(&isc->isc_link_xon_rx); 6267 evcnt_detach(&isc->isc_link_xon_tx); 6268 evcnt_detach(&isc->isc_link_xoff_rx); 6269 evcnt_detach(&isc->isc_link_xoff_tx); 6270 evcnt_detach(&isc->isc_rx_fragments); 6271 evcnt_detach(&isc->isc_rx_jabber); 6272 evcnt_detach(&isc->isc_rx_bytes); 6273 evcnt_detach(&isc->isc_rx_discards); 6274 evcnt_detach(&isc->isc_rx_unicast); 6275 evcnt_detach(&isc->isc_rx_multicast); 6276 evcnt_detach(&isc->isc_rx_broadcast); 6277 evcnt_detach(&isc->isc_rx_size_64); 6278 evcnt_detach(&isc->isc_rx_size_127); 6279 evcnt_detach(&isc->isc_rx_size_255); 6280 evcnt_detach(&isc->isc_rx_size_511); 6281 evcnt_detach(&isc->isc_rx_size_1023); 6282 evcnt_detach(&isc->isc_rx_size_1522); 6283 evcnt_detach(&isc->isc_rx_size_big); 6284 evcnt_detach(&isc->isc_rx_undersize); 6285 evcnt_detach(&isc->isc_rx_oversize); 6286 evcnt_detach(&isc->isc_tx_bytes); 6287 evcnt_detach(&isc->isc_tx_dropped_link_down); 6288 evcnt_detach(&isc->isc_tx_unicast); 6289 evcnt_detach(&isc->isc_tx_multicast); 6290 evcnt_detach(&isc->isc_tx_broadcast); 6291 evcnt_detach(&isc->isc_tx_size_64); 6292 evcnt_detach(&isc->isc_tx_size_127); 6293 evcnt_detach(&isc->isc_tx_size_255); 6294 evcnt_detach(&isc->isc_tx_size_511); 6295 evcnt_detach(&isc->isc_tx_size_1023); 6296 evcnt_detach(&isc->isc_tx_size_1522); 6297 evcnt_detach(&isc->isc_tx_size_big); 6298 evcnt_detach(&isc->isc_vsi_rx_discards); 6299 evcnt_detach(&isc->isc_vsi_rx_bytes); 6300 evcnt_detach(&isc->isc_vsi_rx_unicast); 6301 evcnt_detach(&isc->isc_vsi_rx_multicast); 6302 evcnt_detach(&isc->isc_vsi_rx_broadcast); 6303 evcnt_detach(&isc->isc_vsi_tx_errors); 6304 evcnt_detach(&isc->isc_vsi_tx_bytes); 6305 evcnt_detach(&isc->isc_vsi_tx_unicast); 6306 evcnt_detach(&isc->isc_vsi_tx_multicast); 6307 evcnt_detach(&isc->isc_vsi_tx_broadcast); 6308 6309 evcnt_detach(&sc->sc_event_atq); 6310 evcnt_detach(&sc->sc_event_link); 6311 evcnt_detach(&sc->sc_event_ecc_err); 6312 evcnt_detach(&sc->sc_event_pci_exception); 6313 evcnt_detach(&sc->sc_event_crit_err); 6314 6315 callout_destroy(&sc->sc_stats_callout); 6316 } 6317 6318 static void 6319 ixl_stats_callout(void *xsc) 6320 { 6321 struct ixl_softc *sc = xsc; 6322 6323 ixl_work_add(sc->sc_workq, &sc->sc_stats_task); 6324 callout_schedule(&sc->sc_stats_callout, mstohz(sc->sc_stats_intval)); 6325 } 6326 6327 static uint64_t 6328 ixl_stat_delta(struct ixl_softc *sc, uint32_t reg_hi, uint32_t reg_lo, 6329 uint64_t *offset, bool has_offset) 6330 { 6331 uint64_t value, delta; 6332 int bitwidth; 6333 6334 bitwidth = reg_hi == 0 ? 32 : 48; 6335 6336 value = ixl_rd(sc, reg_lo); 6337 6338 if (bitwidth > 32) { 6339 value |= ((uint64_t)ixl_rd(sc, reg_hi) << 32); 6340 } 6341 6342 if (__predict_true(has_offset)) { 6343 delta = value; 6344 if (value < *offset) 6345 delta += ((uint64_t)1 << bitwidth); 6346 delta -= *offset; 6347 } else { 6348 delta = 0; 6349 } 6350 atomic_swap_64(offset, value); 6351 6352 return delta; 6353 } 6354 6355 static void 6356 ixl_stats_update(void *xsc) 6357 { 6358 struct ixl_softc *sc = xsc; 6359 struct ixl_stats_counters *isc; 6360 uint64_t delta; 6361 6362 isc = &sc->sc_stats_counters; 6363 6364 /* errors */ 6365 delta = ixl_stat_delta(sc, 6366 0, I40E_GLPRT_CRCERRS(sc->sc_port), 6367 &isc->isc_crc_errors_offset, isc->isc_has_offset); 6368 atomic_add_64(&isc->isc_crc_errors.ev_count, delta); 6369 6370 delta = ixl_stat_delta(sc, 6371 0, I40E_GLPRT_ILLERRC(sc->sc_port), 6372 &isc->isc_illegal_bytes_offset, isc->isc_has_offset); 6373 atomic_add_64(&isc->isc_illegal_bytes.ev_count, delta); 6374 6375 /* rx */ 6376 delta = ixl_stat_delta(sc, 6377 I40E_GLPRT_GORCH(sc->sc_port), I40E_GLPRT_GORCL(sc->sc_port), 6378 &isc->isc_rx_bytes_offset, isc->isc_has_offset); 6379 atomic_add_64(&isc->isc_rx_bytes.ev_count, delta); 6380 6381 delta = ixl_stat_delta(sc, 6382 0, I40E_GLPRT_RDPC(sc->sc_port), 6383 &isc->isc_rx_discards_offset, isc->isc_has_offset); 6384 atomic_add_64(&isc->isc_rx_discards.ev_count, delta); 6385 6386 delta = ixl_stat_delta(sc, 6387 I40E_GLPRT_UPRCH(sc->sc_port), I40E_GLPRT_UPRCL(sc->sc_port), 6388 &isc->isc_rx_unicast_offset, isc->isc_has_offset); 6389 atomic_add_64(&isc->isc_rx_unicast.ev_count, delta); 6390 6391 delta = ixl_stat_delta(sc, 6392 I40E_GLPRT_MPRCH(sc->sc_port), I40E_GLPRT_MPRCL(sc->sc_port), 6393 &isc->isc_rx_multicast_offset, isc->isc_has_offset); 6394 atomic_add_64(&isc->isc_rx_multicast.ev_count, delta); 6395 6396 delta = ixl_stat_delta(sc, 6397 I40E_GLPRT_BPRCH(sc->sc_port), I40E_GLPRT_BPRCL(sc->sc_port), 6398 &isc->isc_rx_broadcast_offset, isc->isc_has_offset); 6399 atomic_add_64(&isc->isc_rx_broadcast.ev_count, delta); 6400 6401 /* Packet size stats rx */ 6402 delta = ixl_stat_delta(sc, 6403 I40E_GLPRT_PRC64H(sc->sc_port), I40E_GLPRT_PRC64L(sc->sc_port), 6404 &isc->isc_rx_size_64_offset, isc->isc_has_offset); 6405 atomic_add_64(&isc->isc_rx_size_64.ev_count, delta); 6406 6407 delta = ixl_stat_delta(sc, 6408 I40E_GLPRT_PRC127H(sc->sc_port), I40E_GLPRT_PRC127L(sc->sc_port), 6409 &isc->isc_rx_size_127_offset, isc->isc_has_offset); 6410 atomic_add_64(&isc->isc_rx_size_127.ev_count, delta); 6411 6412 delta = ixl_stat_delta(sc, 6413 I40E_GLPRT_PRC255H(sc->sc_port), I40E_GLPRT_PRC255L(sc->sc_port), 6414 &isc->isc_rx_size_255_offset, isc->isc_has_offset); 6415 atomic_add_64(&isc->isc_rx_size_255.ev_count, delta); 6416 6417 delta = ixl_stat_delta(sc, 6418 I40E_GLPRT_PRC511H(sc->sc_port), I40E_GLPRT_PRC511L(sc->sc_port), 6419 &isc->isc_rx_size_511_offset, isc->isc_has_offset); 6420 atomic_add_64(&isc->isc_rx_size_511.ev_count, delta); 6421 6422 delta = ixl_stat_delta(sc, 6423 I40E_GLPRT_PRC1023H(sc->sc_port), I40E_GLPRT_PRC1023L(sc->sc_port), 6424 &isc->isc_rx_size_1023_offset, isc->isc_has_offset); 6425 atomic_add_64(&isc->isc_rx_size_1023.ev_count, delta); 6426 6427 delta = ixl_stat_delta(sc, 6428 I40E_GLPRT_PRC1522H(sc->sc_port), I40E_GLPRT_PRC1522L(sc->sc_port), 6429 &isc->isc_rx_size_1522_offset, isc->isc_has_offset); 6430 atomic_add_64(&isc->isc_rx_size_1522.ev_count, delta); 6431 6432 delta = ixl_stat_delta(sc, 6433 I40E_GLPRT_PRC9522H(sc->sc_port), I40E_GLPRT_PRC9522L(sc->sc_port), 6434 &isc->isc_rx_size_big_offset, isc->isc_has_offset); 6435 atomic_add_64(&isc->isc_rx_size_big.ev_count, delta); 6436 6437 delta = ixl_stat_delta(sc, 6438 0, I40E_GLPRT_RUC(sc->sc_port), 6439 &isc->isc_rx_undersize_offset, isc->isc_has_offset); 6440 atomic_add_64(&isc->isc_rx_undersize.ev_count, delta); 6441 6442 delta = ixl_stat_delta(sc, 6443 0, I40E_GLPRT_ROC(sc->sc_port), 6444 &isc->isc_rx_oversize_offset, isc->isc_has_offset); 6445 atomic_add_64(&isc->isc_rx_oversize.ev_count, delta); 6446 6447 /* tx */ 6448 delta = ixl_stat_delta(sc, 6449 I40E_GLPRT_GOTCH(sc->sc_port), I40E_GLPRT_GOTCL(sc->sc_port), 6450 &isc->isc_tx_bytes_offset, isc->isc_has_offset); 6451 atomic_add_64(&isc->isc_tx_bytes.ev_count, delta); 6452 6453 delta = ixl_stat_delta(sc, 6454 0, I40E_GLPRT_TDOLD(sc->sc_port), 6455 &isc->isc_tx_dropped_link_down_offset, isc->isc_has_offset); 6456 atomic_add_64(&isc->isc_tx_dropped_link_down.ev_count, delta); 6457 6458 delta = ixl_stat_delta(sc, 6459 I40E_GLPRT_UPTCH(sc->sc_port), I40E_GLPRT_UPTCL(sc->sc_port), 6460 &isc->isc_tx_unicast_offset, isc->isc_has_offset); 6461 atomic_add_64(&isc->isc_tx_unicast.ev_count, delta); 6462 6463 delta = ixl_stat_delta(sc, 6464 I40E_GLPRT_MPTCH(sc->sc_port), I40E_GLPRT_MPTCL(sc->sc_port), 6465 &isc->isc_tx_multicast_offset, isc->isc_has_offset); 6466 atomic_add_64(&isc->isc_tx_multicast.ev_count, delta); 6467 6468 delta = ixl_stat_delta(sc, 6469 I40E_GLPRT_BPTCH(sc->sc_port), I40E_GLPRT_BPTCL(sc->sc_port), 6470 &isc->isc_tx_broadcast_offset, isc->isc_has_offset); 6471 atomic_add_64(&isc->isc_tx_broadcast.ev_count, delta); 6472 6473 /* Packet size stats tx */ 6474 delta = ixl_stat_delta(sc, 6475 I40E_GLPRT_PTC64L(sc->sc_port), I40E_GLPRT_PTC64L(sc->sc_port), 6476 &isc->isc_tx_size_64_offset, isc->isc_has_offset); 6477 atomic_add_64(&isc->isc_tx_size_64.ev_count, delta); 6478 6479 delta = ixl_stat_delta(sc, 6480 I40E_GLPRT_PTC127H(sc->sc_port), I40E_GLPRT_PTC127L(sc->sc_port), 6481 &isc->isc_tx_size_127_offset, isc->isc_has_offset); 6482 atomic_add_64(&isc->isc_tx_size_127.ev_count, delta); 6483 6484 delta = ixl_stat_delta(sc, 6485 I40E_GLPRT_PTC255H(sc->sc_port), I40E_GLPRT_PTC255L(sc->sc_port), 6486 &isc->isc_tx_size_255_offset, isc->isc_has_offset); 6487 atomic_add_64(&isc->isc_tx_size_255.ev_count, delta); 6488 6489 delta = ixl_stat_delta(sc, 6490 I40E_GLPRT_PTC511H(sc->sc_port), I40E_GLPRT_PTC511L(sc->sc_port), 6491 &isc->isc_tx_size_511_offset, isc->isc_has_offset); 6492 atomic_add_64(&isc->isc_tx_size_511.ev_count, delta); 6493 6494 delta = ixl_stat_delta(sc, 6495 I40E_GLPRT_PTC1023H(sc->sc_port), I40E_GLPRT_PTC1023L(sc->sc_port), 6496 &isc->isc_tx_size_1023_offset, isc->isc_has_offset); 6497 atomic_add_64(&isc->isc_tx_size_1023.ev_count, delta); 6498 6499 delta = ixl_stat_delta(sc, 6500 I40E_GLPRT_PTC1522H(sc->sc_port), I40E_GLPRT_PTC1522L(sc->sc_port), 6501 &isc->isc_tx_size_1522_offset, isc->isc_has_offset); 6502 atomic_add_64(&isc->isc_tx_size_1522.ev_count, delta); 6503 6504 delta = ixl_stat_delta(sc, 6505 I40E_GLPRT_PTC9522H(sc->sc_port), I40E_GLPRT_PTC9522L(sc->sc_port), 6506 &isc->isc_tx_size_big_offset, isc->isc_has_offset); 6507 atomic_add_64(&isc->isc_tx_size_big.ev_count, delta); 6508 6509 /* mac faults */ 6510 delta = ixl_stat_delta(sc, 6511 0, I40E_GLPRT_MLFC(sc->sc_port), 6512 &isc->isc_mac_local_faults_offset, isc->isc_has_offset); 6513 atomic_add_64(&isc->isc_mac_local_faults.ev_count, delta); 6514 6515 delta = ixl_stat_delta(sc, 6516 0, I40E_GLPRT_MRFC(sc->sc_port), 6517 &isc->isc_mac_remote_faults_offset, isc->isc_has_offset); 6518 atomic_add_64(&isc->isc_mac_remote_faults.ev_count, delta); 6519 6520 /* Flow control (LFC) stats */ 6521 delta = ixl_stat_delta(sc, 6522 0, I40E_GLPRT_LXONRXC(sc->sc_port), 6523 &isc->isc_link_xon_rx_offset, isc->isc_has_offset); 6524 atomic_add_64(&isc->isc_link_xon_rx.ev_count, delta); 6525 6526 delta = ixl_stat_delta(sc, 6527 0, I40E_GLPRT_LXONTXC(sc->sc_port), 6528 &isc->isc_link_xon_tx_offset, isc->isc_has_offset); 6529 atomic_add_64(&isc->isc_link_xon_tx.ev_count, delta); 6530 6531 delta = ixl_stat_delta(sc, 6532 0, I40E_GLPRT_LXOFFRXC(sc->sc_port), 6533 &isc->isc_link_xoff_rx_offset, isc->isc_has_offset); 6534 atomic_add_64(&isc->isc_link_xoff_rx.ev_count, delta); 6535 6536 delta = ixl_stat_delta(sc, 6537 0, I40E_GLPRT_LXOFFTXC(sc->sc_port), 6538 &isc->isc_link_xoff_tx_offset, isc->isc_has_offset); 6539 atomic_add_64(&isc->isc_link_xoff_tx.ev_count, delta); 6540 6541 /* fragments */ 6542 delta = ixl_stat_delta(sc, 6543 0, I40E_GLPRT_RFC(sc->sc_port), 6544 &isc->isc_rx_fragments_offset, isc->isc_has_offset); 6545 atomic_add_64(&isc->isc_rx_fragments.ev_count, delta); 6546 6547 delta = ixl_stat_delta(sc, 6548 0, I40E_GLPRT_RJC(sc->sc_port), 6549 &isc->isc_rx_jabber_offset, isc->isc_has_offset); 6550 atomic_add_64(&isc->isc_rx_jabber.ev_count, delta); 6551 6552 /* VSI rx counters */ 6553 delta = ixl_stat_delta(sc, 6554 0, I40E_GLV_RDPC(sc->sc_vsi_stat_counter_idx), 6555 &isc->isc_vsi_rx_discards_offset, isc->isc_has_offset); 6556 atomic_add_64(&isc->isc_vsi_rx_discards.ev_count, delta); 6557 6558 delta = ixl_stat_delta(sc, 6559 I40E_GLV_GORCH(sc->sc_vsi_stat_counter_idx), 6560 I40E_GLV_GORCL(sc->sc_vsi_stat_counter_idx), 6561 &isc->isc_vsi_rx_bytes_offset, isc->isc_has_offset); 6562 atomic_add_64(&isc->isc_vsi_rx_bytes.ev_count, delta); 6563 6564 delta = ixl_stat_delta(sc, 6565 I40E_GLV_UPRCH(sc->sc_vsi_stat_counter_idx), 6566 I40E_GLV_UPRCL(sc->sc_vsi_stat_counter_idx), 6567 &isc->isc_vsi_rx_unicast_offset, isc->isc_has_offset); 6568 atomic_add_64(&isc->isc_vsi_rx_unicast.ev_count, delta); 6569 6570 delta = ixl_stat_delta(sc, 6571 I40E_GLV_MPRCH(sc->sc_vsi_stat_counter_idx), 6572 I40E_GLV_MPRCL(sc->sc_vsi_stat_counter_idx), 6573 &isc->isc_vsi_rx_multicast_offset, isc->isc_has_offset); 6574 atomic_add_64(&isc->isc_vsi_rx_multicast.ev_count, delta); 6575 6576 delta = ixl_stat_delta(sc, 6577 I40E_GLV_BPRCH(sc->sc_vsi_stat_counter_idx), 6578 I40E_GLV_BPRCL(sc->sc_vsi_stat_counter_idx), 6579 &isc->isc_vsi_rx_broadcast_offset, isc->isc_has_offset); 6580 atomic_add_64(&isc->isc_vsi_rx_broadcast.ev_count, delta); 6581 6582 /* VSI tx counters */ 6583 delta = ixl_stat_delta(sc, 6584 0, I40E_GLV_TEPC(sc->sc_vsi_stat_counter_idx), 6585 &isc->isc_vsi_tx_errors_offset, isc->isc_has_offset); 6586 atomic_add_64(&isc->isc_vsi_tx_errors.ev_count, delta); 6587 6588 delta = ixl_stat_delta(sc, 6589 I40E_GLV_GOTCH(sc->sc_vsi_stat_counter_idx), 6590 I40E_GLV_GOTCL(sc->sc_vsi_stat_counter_idx), 6591 &isc->isc_vsi_tx_bytes_offset, isc->isc_has_offset); 6592 atomic_add_64(&isc->isc_vsi_tx_bytes.ev_count, delta); 6593 6594 delta = ixl_stat_delta(sc, 6595 I40E_GLV_UPTCH(sc->sc_vsi_stat_counter_idx), 6596 I40E_GLV_UPTCL(sc->sc_vsi_stat_counter_idx), 6597 &isc->isc_vsi_tx_unicast_offset, isc->isc_has_offset); 6598 atomic_add_64(&isc->isc_vsi_tx_unicast.ev_count, delta); 6599 6600 delta = ixl_stat_delta(sc, 6601 I40E_GLV_MPTCH(sc->sc_vsi_stat_counter_idx), 6602 I40E_GLV_MPTCL(sc->sc_vsi_stat_counter_idx), 6603 &isc->isc_vsi_tx_multicast_offset, isc->isc_has_offset); 6604 atomic_add_64(&isc->isc_vsi_tx_multicast.ev_count, delta); 6605 6606 delta = ixl_stat_delta(sc, 6607 I40E_GLV_BPTCH(sc->sc_vsi_stat_counter_idx), 6608 I40E_GLV_BPTCL(sc->sc_vsi_stat_counter_idx), 6609 &isc->isc_vsi_tx_broadcast_offset, isc->isc_has_offset); 6610 atomic_add_64(&isc->isc_vsi_tx_broadcast.ev_count, delta); 6611 } 6612 6613 static int 6614 ixl_setup_sysctls(struct ixl_softc *sc) 6615 { 6616 const char *devname; 6617 struct sysctllog **log; 6618 const struct sysctlnode *rnode, *rxnode, *txnode; 6619 int error; 6620 6621 log = &sc->sc_sysctllog; 6622 devname = device_xname(sc->sc_dev); 6623 6624 error = sysctl_createv(log, 0, NULL, &rnode, 6625 0, CTLTYPE_NODE, devname, 6626 SYSCTL_DESCR("ixl information and settings"), 6627 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL); 6628 if (error) 6629 goto out; 6630 6631 error = sysctl_createv(log, 0, &rnode, NULL, 6632 CTLFLAG_READWRITE, CTLTYPE_BOOL, "txrx_workqueue", 6633 SYSCTL_DESCR("Use workqueue for packet processing"), 6634 NULL, 0, &sc->sc_txrx_workqueue, 0, CTL_CREATE, CTL_EOL); 6635 if (error) 6636 goto out; 6637 6638 error = sysctl_createv(log, 0, &rnode, NULL, 6639 CTLFLAG_READONLY, CTLTYPE_INT, "stats_interval", 6640 SYSCTL_DESCR("Statistics collection interval in milliseconds"), 6641 NULL, 0, &sc->sc_stats_intval, 0, CTL_CREATE, CTL_EOL); 6642 6643 error = sysctl_createv(log, 0, &rnode, &rxnode, 6644 0, CTLTYPE_NODE, "rx", 6645 SYSCTL_DESCR("ixl information and settings for Rx"), 6646 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL); 6647 if (error) 6648 goto out; 6649 6650 error = sysctl_createv(log, 0, &rxnode, NULL, 6651 CTLFLAG_READWRITE, CTLTYPE_INT, "itr", 6652 SYSCTL_DESCR("Interrupt Throttling"), 6653 ixl_sysctl_itr_handler, 0, 6654 (void *)sc, 0, CTL_CREATE, CTL_EOL); 6655 if (error) 6656 goto out; 6657 6658 error = sysctl_createv(log, 0, &rxnode, NULL, 6659 CTLFLAG_READWRITE, CTLTYPE_INT, "descriptor_num", 6660 SYSCTL_DESCR("the number of rx descriptors"), 6661 ixl_sysctl_ndescs_handler, 0, 6662 (void *)sc, 0, CTL_CREATE, CTL_EOL); 6663 if (error) 6664 goto out; 6665 6666 error = sysctl_createv(log, 0, &rxnode, NULL, 6667 CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit", 6668 SYSCTL_DESCR("max number of Rx packets" 6669 " to process for interrupt processing"), 6670 NULL, 0, &sc->sc_rx_intr_process_limit, 0, CTL_CREATE, CTL_EOL); 6671 if (error) 6672 goto out; 6673 6674 error = sysctl_createv(log, 0, &rxnode, NULL, 6675 CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit", 6676 SYSCTL_DESCR("max number of Rx packets" 6677 " to process for deferred processing"), 6678 NULL, 0, &sc->sc_rx_process_limit, 0, CTL_CREATE, CTL_EOL); 6679 if (error) 6680 goto out; 6681 6682 error = sysctl_createv(log, 0, &rnode, &txnode, 6683 0, CTLTYPE_NODE, "tx", 6684 SYSCTL_DESCR("ixl information and settings for Tx"), 6685 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL); 6686 if (error) 6687 goto out; 6688 6689 error = sysctl_createv(log, 0, &txnode, NULL, 6690 CTLFLAG_READWRITE, CTLTYPE_INT, "itr", 6691 SYSCTL_DESCR("Interrupt Throttling"), 6692 ixl_sysctl_itr_handler, 0, 6693 (void *)sc, 0, CTL_CREATE, CTL_EOL); 6694 if (error) 6695 goto out; 6696 6697 error = sysctl_createv(log, 0, &txnode, NULL, 6698 CTLFLAG_READWRITE, CTLTYPE_INT, "descriptor_num", 6699 SYSCTL_DESCR("the number of tx descriptors"), 6700 ixl_sysctl_ndescs_handler, 0, 6701 (void *)sc, 0, CTL_CREATE, CTL_EOL); 6702 if (error) 6703 goto out; 6704 6705 error = sysctl_createv(log, 0, &txnode, NULL, 6706 CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit", 6707 SYSCTL_DESCR("max number of Tx packets" 6708 " to process for interrupt processing"), 6709 NULL, 0, &sc->sc_tx_intr_process_limit, 0, CTL_CREATE, CTL_EOL); 6710 if (error) 6711 goto out; 6712 6713 error = sysctl_createv(log, 0, &txnode, NULL, 6714 CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit", 6715 SYSCTL_DESCR("max number of Tx packets" 6716 " to process for deferred processing"), 6717 NULL, 0, &sc->sc_tx_process_limit, 0, CTL_CREATE, CTL_EOL); 6718 if (error) 6719 goto out; 6720 6721 out: 6722 if (error) { 6723 aprint_error_dev(sc->sc_dev, 6724 "unable to create sysctl node\n"); 6725 sysctl_teardown(log); 6726 } 6727 6728 return error; 6729 } 6730 6731 static void 6732 ixl_teardown_sysctls(struct ixl_softc *sc) 6733 { 6734 6735 sysctl_teardown(&sc->sc_sysctllog); 6736 } 6737 6738 static bool 6739 ixl_sysctlnode_is_rx(struct sysctlnode *node) 6740 { 6741 6742 if (strstr(node->sysctl_parent->sysctl_name, "rx") != NULL) 6743 return true; 6744 6745 return false; 6746 } 6747 6748 static int 6749 ixl_sysctl_itr_handler(SYSCTLFN_ARGS) 6750 { 6751 struct sysctlnode node = *rnode; 6752 struct ixl_softc *sc = (struct ixl_softc *)node.sysctl_data; 6753 struct ifnet *ifp = &sc->sc_ec.ec_if; 6754 uint32_t newitr, *itrptr; 6755 int error; 6756 6757 if (ixl_sysctlnode_is_rx(&node)) { 6758 itrptr = &sc->sc_itr_rx; 6759 } else { 6760 itrptr = &sc->sc_itr_tx; 6761 } 6762 6763 newitr = *itrptr; 6764 node.sysctl_data = &newitr; 6765 node.sysctl_size = sizeof(newitr); 6766 6767 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 6768 6769 if (error || newp == NULL) 6770 return error; 6771 6772 /* ITRs are applied in ixl_init() for simple implementaion */ 6773 if (ISSET(ifp->if_flags, IFF_RUNNING)) 6774 return EBUSY; 6775 6776 if (newitr > 0x07ff) 6777 return EINVAL; 6778 6779 *itrptr = newitr; 6780 6781 return 0; 6782 } 6783 6784 static int 6785 ixl_sysctl_ndescs_handler(SYSCTLFN_ARGS) 6786 { 6787 struct sysctlnode node = *rnode; 6788 struct ixl_softc *sc = (struct ixl_softc *)node.sysctl_data; 6789 struct ifnet *ifp = &sc->sc_ec.ec_if; 6790 unsigned int *ndescs_ptr, ndescs, n; 6791 int error; 6792 6793 if (ixl_sysctlnode_is_rx(&node)) { 6794 ndescs_ptr = &sc->sc_rx_ring_ndescs; 6795 } else { 6796 ndescs_ptr = &sc->sc_tx_ring_ndescs; 6797 } 6798 6799 ndescs = *ndescs_ptr; 6800 node.sysctl_data = &ndescs; 6801 node.sysctl_size = sizeof(ndescs); 6802 6803 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 6804 6805 if (error || newp == NULL) 6806 return error; 6807 6808 if (ISSET(ifp->if_flags, IFF_RUNNING)) 6809 return EBUSY; 6810 6811 if (ndescs < 8 || 0xffff < ndescs) 6812 return EINVAL; 6813 6814 n = 1U << (fls32(ndescs) - 1); 6815 if (n != ndescs) 6816 return EINVAL; 6817 6818 *ndescs_ptr = ndescs; 6819 6820 return 0; 6821 } 6822 6823 static struct workqueue * 6824 ixl_workq_create(const char *name, pri_t prio, int ipl, int flags) 6825 { 6826 struct workqueue *wq; 6827 int error; 6828 6829 error = workqueue_create(&wq, name, ixl_workq_work, NULL, 6830 prio, ipl, flags); 6831 6832 if (error) 6833 return NULL; 6834 6835 return wq; 6836 } 6837 6838 static void 6839 ixl_workq_destroy(struct workqueue *wq) 6840 { 6841 6842 workqueue_destroy(wq); 6843 } 6844 6845 static void 6846 ixl_work_set(struct ixl_work *work, void (*func)(void *), void *arg) 6847 { 6848 6849 memset(work, 0, sizeof(*work)); 6850 work->ixw_func = func; 6851 work->ixw_arg = arg; 6852 } 6853 6854 static void 6855 ixl_work_add(struct workqueue *wq, struct ixl_work *work) 6856 { 6857 if (atomic_cas_uint(&work->ixw_added, 0, 1) != 0) 6858 return; 6859 6860 kpreempt_disable(); 6861 workqueue_enqueue(wq, &work->ixw_cookie, NULL); 6862 kpreempt_enable(); 6863 } 6864 6865 static void 6866 ixl_work_wait(struct workqueue *wq, struct ixl_work *work) 6867 { 6868 6869 workqueue_wait(wq, &work->ixw_cookie); 6870 } 6871 6872 static void 6873 ixl_workq_work(struct work *wk, void *context) 6874 { 6875 struct ixl_work *work; 6876 6877 work = container_of(wk, struct ixl_work, ixw_cookie); 6878 6879 atomic_swap_uint(&work->ixw_added, 0); 6880 work->ixw_func(work->ixw_arg); 6881 } 6882 6883 static int 6884 ixl_rx_ctl_read(struct ixl_softc *sc, uint32_t reg, uint32_t *rv) 6885 { 6886 struct ixl_aq_desc iaq; 6887 6888 memset(&iaq, 0, sizeof(iaq)); 6889 iaq.iaq_opcode = htole16(IXL_AQ_OP_RX_CTL_REG_READ); 6890 iaq.iaq_param[1] = htole32(reg); 6891 6892 if (ixl_atq_poll(sc, &iaq, 250) != 0) 6893 return ETIMEDOUT; 6894 6895 switch (htole16(iaq.iaq_retval)) { 6896 case IXL_AQ_RC_OK: 6897 /* success */ 6898 break; 6899 case IXL_AQ_RC_EACCES: 6900 return EPERM; 6901 case IXL_AQ_RC_EAGAIN: 6902 return EAGAIN; 6903 default: 6904 return EIO; 6905 } 6906 6907 *rv = htole32(iaq.iaq_param[3]); 6908 return 0; 6909 } 6910 6911 static uint32_t 6912 ixl_rd_rx_csr(struct ixl_softc *sc, uint32_t reg) 6913 { 6914 uint32_t val; 6915 int rv, retry, retry_limit; 6916 6917 if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RXCTL)) { 6918 retry_limit = 5; 6919 } else { 6920 retry_limit = 0; 6921 } 6922 6923 for (retry = 0; retry < retry_limit; retry++) { 6924 rv = ixl_rx_ctl_read(sc, reg, &val); 6925 if (rv == 0) 6926 return val; 6927 else if (rv == EAGAIN) 6928 delaymsec(1); 6929 else 6930 break; 6931 } 6932 6933 val = ixl_rd(sc, reg); 6934 6935 return val; 6936 } 6937 6938 static int 6939 ixl_rx_ctl_write(struct ixl_softc *sc, uint32_t reg, uint32_t value) 6940 { 6941 struct ixl_aq_desc iaq; 6942 6943 memset(&iaq, 0, sizeof(iaq)); 6944 iaq.iaq_opcode = htole16(IXL_AQ_OP_RX_CTL_REG_WRITE); 6945 iaq.iaq_param[1] = htole32(reg); 6946 iaq.iaq_param[3] = htole32(value); 6947 6948 if (ixl_atq_poll(sc, &iaq, 250) != 0) 6949 return ETIMEDOUT; 6950 6951 switch (htole16(iaq.iaq_retval)) { 6952 case IXL_AQ_RC_OK: 6953 /* success */ 6954 break; 6955 case IXL_AQ_RC_EACCES: 6956 return EPERM; 6957 case IXL_AQ_RC_EAGAIN: 6958 return EAGAIN; 6959 default: 6960 return EIO; 6961 } 6962 6963 return 0; 6964 } 6965 6966 static void 6967 ixl_wr_rx_csr(struct ixl_softc *sc, uint32_t reg, uint32_t value) 6968 { 6969 int rv, retry, retry_limit; 6970 6971 if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RXCTL)) { 6972 retry_limit = 5; 6973 } else { 6974 retry_limit = 0; 6975 } 6976 6977 for (retry = 0; retry < retry_limit; retry++) { 6978 rv = ixl_rx_ctl_write(sc, reg, value); 6979 if (rv == 0) 6980 return; 6981 else if (rv == EAGAIN) 6982 delaymsec(1); 6983 else 6984 break; 6985 } 6986 6987 ixl_wr(sc, reg, value); 6988 } 6989 6990 static int 6991 ixl_nvm_lock(struct ixl_softc *sc, char rw) 6992 { 6993 struct ixl_aq_desc iaq; 6994 struct ixl_aq_req_resource_param *param; 6995 int rv; 6996 6997 if (!ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMLOCK)) 6998 return 0; 6999 7000 memset(&iaq, 0, sizeof(iaq)); 7001 iaq.iaq_opcode = htole16(IXL_AQ_OP_REQUEST_RESOURCE); 7002 7003 param = (struct ixl_aq_req_resource_param *)&iaq.iaq_param; 7004 param->resource_id = htole16(IXL_AQ_RESOURCE_ID_NVM); 7005 if (rw == 'R') { 7006 param->access_type = htole16(IXL_AQ_RESOURCE_ACCES_READ); 7007 } else { 7008 param->access_type = htole16(IXL_AQ_RESOURCE_ACCES_WRITE); 7009 } 7010 7011 rv = ixl_atq_poll(sc, &iaq, 250); 7012 7013 if (rv != 0) 7014 return ETIMEDOUT; 7015 7016 switch (le16toh(iaq.iaq_retval)) { 7017 case IXL_AQ_RC_OK: 7018 break; 7019 case IXL_AQ_RC_EACCES: 7020 return EACCES; 7021 case IXL_AQ_RC_EBUSY: 7022 return EBUSY; 7023 case IXL_AQ_RC_EPERM: 7024 return EPERM; 7025 } 7026 7027 return 0; 7028 } 7029 7030 static int 7031 ixl_nvm_unlock(struct ixl_softc *sc) 7032 { 7033 struct ixl_aq_desc iaq; 7034 struct ixl_aq_rel_resource_param *param; 7035 int rv; 7036 7037 if (!ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMLOCK)) 7038 return 0; 7039 7040 memset(&iaq, 0, sizeof(iaq)); 7041 iaq.iaq_opcode = htole16(IXL_AQ_OP_RELEASE_RESOURCE); 7042 7043 param = (struct ixl_aq_rel_resource_param *)&iaq.iaq_param; 7044 param->resource_id = htole16(IXL_AQ_RESOURCE_ID_NVM); 7045 7046 rv = ixl_atq_poll(sc, &iaq, 250); 7047 7048 if (rv != 0) 7049 return ETIMEDOUT; 7050 7051 switch (le16toh(iaq.iaq_retval)) { 7052 case IXL_AQ_RC_OK: 7053 break; 7054 default: 7055 return EIO; 7056 } 7057 return 0; 7058 } 7059 7060 static int 7061 ixl_srdone_poll(struct ixl_softc *sc) 7062 { 7063 int wait_count; 7064 uint32_t reg; 7065 7066 for (wait_count = 0; wait_count < IXL_SRRD_SRCTL_ATTEMPTS; 7067 wait_count++) { 7068 reg = ixl_rd(sc, I40E_GLNVM_SRCTL); 7069 if (ISSET(reg, I40E_GLNVM_SRCTL_DONE_MASK)) 7070 break; 7071 7072 delaymsec(5); 7073 } 7074 7075 if (wait_count == IXL_SRRD_SRCTL_ATTEMPTS) 7076 return -1; 7077 7078 return 0; 7079 } 7080 7081 static int 7082 ixl_nvm_read_srctl(struct ixl_softc *sc, uint16_t offset, uint16_t *data) 7083 { 7084 uint32_t reg; 7085 7086 if (ixl_srdone_poll(sc) != 0) 7087 return ETIMEDOUT; 7088 7089 reg = ((uint32_t)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) | 7090 __BIT(I40E_GLNVM_SRCTL_START_SHIFT); 7091 ixl_wr(sc, I40E_GLNVM_SRCTL, reg); 7092 7093 if (ixl_srdone_poll(sc) != 0) { 7094 aprint_debug("NVM read error: couldn't access " 7095 "Shadow RAM address: 0x%x\n", offset); 7096 return ETIMEDOUT; 7097 } 7098 7099 reg = ixl_rd(sc, I40E_GLNVM_SRDATA); 7100 *data = (uint16_t)__SHIFTOUT(reg, I40E_GLNVM_SRDATA_RDDATA_MASK); 7101 7102 return 0; 7103 } 7104 7105 static int 7106 ixl_nvm_read_aq(struct ixl_softc *sc, uint16_t offset_word, 7107 void *data, size_t len) 7108 { 7109 struct ixl_dmamem *idm; 7110 struct ixl_aq_desc iaq; 7111 struct ixl_aq_nvm_param *param; 7112 uint32_t offset_bytes; 7113 int rv; 7114 7115 idm = &sc->sc_aqbuf; 7116 if (len > IXL_DMA_LEN(idm)) 7117 return ENOMEM; 7118 7119 memset(IXL_DMA_KVA(idm), 0, IXL_DMA_LEN(idm)); 7120 memset(&iaq, 0, sizeof(iaq)); 7121 iaq.iaq_opcode = htole16(IXL_AQ_OP_NVM_READ); 7122 iaq.iaq_flags = htole16(IXL_AQ_BUF | 7123 ((len > I40E_AQ_LARGE_BUF) ? IXL_AQ_LB : 0)); 7124 iaq.iaq_datalen = htole16(len); 7125 ixl_aq_dva(&iaq, IXL_DMA_DVA(idm)); 7126 7127 param = (struct ixl_aq_nvm_param *)iaq.iaq_param; 7128 param->command_flags = IXL_AQ_NVM_LAST_CMD; 7129 param->module_pointer = 0; 7130 param->length = htole16(len); 7131 offset_bytes = (uint32_t)offset_word * 2; 7132 offset_bytes &= 0x00FFFFFF; 7133 param->offset = htole32(offset_bytes); 7134 7135 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm), 7136 BUS_DMASYNC_PREREAD); 7137 7138 rv = ixl_atq_poll(sc, &iaq, 250); 7139 7140 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm), 7141 BUS_DMASYNC_POSTREAD); 7142 7143 if (rv != 0) { 7144 return ETIMEDOUT; 7145 } 7146 7147 switch (le16toh(iaq.iaq_retval)) { 7148 case IXL_AQ_RC_OK: 7149 break; 7150 case IXL_AQ_RC_EPERM: 7151 return EPERM; 7152 case IXL_AQ_RC_EINVAL: 7153 return EINVAL; 7154 case IXL_AQ_RC_EBUSY: 7155 return EBUSY; 7156 case IXL_AQ_RC_EIO: 7157 default: 7158 return EIO; 7159 } 7160 7161 memcpy(data, IXL_DMA_KVA(idm), len); 7162 7163 return 0; 7164 } 7165 7166 static int 7167 ixl_rd16_nvm(struct ixl_softc *sc, uint16_t offset, uint16_t *data) 7168 { 7169 int error; 7170 uint16_t buf; 7171 7172 error = ixl_nvm_lock(sc, 'R'); 7173 if (error) 7174 return error; 7175 7176 if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMREAD)) { 7177 error = ixl_nvm_read_aq(sc, offset, 7178 &buf, sizeof(buf)); 7179 if (error == 0) 7180 *data = le16toh(buf); 7181 } else { 7182 error = ixl_nvm_read_srctl(sc, offset, &buf); 7183 if (error == 0) 7184 *data = buf; 7185 } 7186 7187 ixl_nvm_unlock(sc); 7188 7189 return error; 7190 } 7191 7192 MODULE(MODULE_CLASS_DRIVER, if_ixl, "pci"); 7193 7194 #ifdef _MODULE 7195 #include "ioconf.c" 7196 #endif 7197 7198 #ifdef _MODULE 7199 static void 7200 ixl_parse_modprop(prop_dictionary_t dict) 7201 { 7202 prop_object_t obj; 7203 int64_t val; 7204 uint64_t uval; 7205 7206 if (dict == NULL) 7207 return; 7208 7209 obj = prop_dictionary_get(dict, "nomsix"); 7210 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_BOOL) { 7211 ixl_param_nomsix = prop_bool_true((prop_bool_t)obj); 7212 } 7213 7214 obj = prop_dictionary_get(dict, "stats_interval"); 7215 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) { 7216 val = prop_number_signed_value((prop_number_t)obj); 7217 7218 /* the range has no reason */ 7219 if (100 < val && val < 180000) { 7220 ixl_param_stats_interval = val; 7221 } 7222 } 7223 7224 obj = prop_dictionary_get(dict, "nqps_limit"); 7225 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) { 7226 val = prop_number_signed_value((prop_number_t)obj); 7227 7228 if (val <= INT32_MAX) 7229 ixl_param_nqps_limit = val; 7230 } 7231 7232 obj = prop_dictionary_get(dict, "rx_ndescs"); 7233 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) { 7234 uval = prop_number_unsigned_integer_value((prop_number_t)obj); 7235 7236 if (uval > 8) 7237 ixl_param_rx_ndescs = uval; 7238 } 7239 7240 obj = prop_dictionary_get(dict, "tx_ndescs"); 7241 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) { 7242 uval = prop_number_unsigned_integer_value((prop_number_t)obj); 7243 7244 if (uval > IXL_TX_PKT_DESCS) 7245 ixl_param_tx_ndescs = uval; 7246 } 7247 7248 } 7249 #endif 7250 7251 static int 7252 if_ixl_modcmd(modcmd_t cmd, void *opaque) 7253 { 7254 int error = 0; 7255 7256 #ifdef _MODULE 7257 switch (cmd) { 7258 case MODULE_CMD_INIT: 7259 ixl_parse_modprop((prop_dictionary_t)opaque); 7260 error = config_init_component(cfdriver_ioconf_if_ixl, 7261 cfattach_ioconf_if_ixl, cfdata_ioconf_if_ixl); 7262 break; 7263 case MODULE_CMD_FINI: 7264 error = config_fini_component(cfdriver_ioconf_if_ixl, 7265 cfattach_ioconf_if_ixl, cfdata_ioconf_if_ixl); 7266 break; 7267 default: 7268 error = ENOTTY; 7269 break; 7270 } 7271 #endif 7272 7273 return error; 7274 } 7275