1 /* $NetBSD: if_ixl.c,v 1.74 2020/08/19 09:22:05 yamaguchi Exp $ */ 2 3 /* 4 * Copyright (c) 2013-2015, Intel Corporation 5 * All rights reserved. 6 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions are met: 9 * 10 * 1. Redistributions of source code must retain the above copyright notice, 11 * this list of conditions and the following disclaimer. 12 * 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * 3. Neither the name of the Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived from 19 * this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 /* 35 * Copyright (c) 2016,2017 David Gwynne <dlg@openbsd.org> 36 * 37 * Permission to use, copy, modify, and distribute this software for any 38 * purpose with or without fee is hereby granted, provided that the above 39 * copyright notice and this permission notice appear in all copies. 40 * 41 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 42 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 43 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 44 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 45 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 46 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 47 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 48 */ 49 50 /* 51 * Copyright (c) 2019 Internet Initiative Japan, Inc. 52 * All rights reserved. 53 * 54 * Redistribution and use in source and binary forms, with or without 55 * modification, are permitted provided that the following conditions 56 * are met: 57 * 1. Redistributions of source code must retain the above copyright 58 * notice, this list of conditions and the following disclaimer. 59 * 2. Redistributions in binary form must reproduce the above copyright 60 * notice, this list of conditions and the following disclaimer in the 61 * documentation and/or other materials provided with the distribution. 62 * 63 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 64 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 65 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 66 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 67 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 68 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 69 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 70 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 71 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 72 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 73 * POSSIBILITY OF SUCH DAMAGE. 74 */ 75 76 #include <sys/cdefs.h> 77 __KERNEL_RCSID(0, "$NetBSD: if_ixl.c,v 1.74 2020/08/19 09:22:05 yamaguchi Exp $"); 78 79 #ifdef _KERNEL_OPT 80 #include "opt_net_mpsafe.h" 81 #include "opt_if_ixl.h" 82 #endif 83 84 #include <sys/param.h> 85 #include <sys/types.h> 86 87 #include <sys/bitops.h> 88 #include <sys/cpu.h> 89 #include <sys/device.h> 90 #include <sys/evcnt.h> 91 #include <sys/interrupt.h> 92 #include <sys/kmem.h> 93 #include <sys/module.h> 94 #include <sys/mutex.h> 95 #include <sys/pcq.h> 96 #include <sys/syslog.h> 97 #include <sys/workqueue.h> 98 99 #include <sys/bus.h> 100 101 #include <net/bpf.h> 102 #include <net/if.h> 103 #include <net/if_dl.h> 104 #include <net/if_media.h> 105 #include <net/if_ether.h> 106 #include <net/rss_config.h> 107 108 #include <netinet/tcp.h> /* for struct tcphdr */ 109 #include <netinet/udp.h> /* for struct udphdr */ 110 111 #include <dev/pci/pcivar.h> 112 #include <dev/pci/pcidevs.h> 113 114 #include <dev/pci/if_ixlreg.h> 115 #include <dev/pci/if_ixlvar.h> 116 117 #include <prop/proplib.h> 118 119 struct ixl_softc; /* defined */ 120 121 #define I40E_PF_RESET_WAIT_COUNT 200 122 #define I40E_AQ_LARGE_BUF 512 123 124 /* bitfields for Tx queue mapping in QTX_CTL */ 125 #define I40E_QTX_CTL_VF_QUEUE 0x0 126 #define I40E_QTX_CTL_VM_QUEUE 0x1 127 #define I40E_QTX_CTL_PF_QUEUE 0x2 128 129 #define I40E_QUEUE_TYPE_EOL 0x7ff 130 #define I40E_INTR_NOTX_QUEUE 0 131 132 #define I40E_QUEUE_TYPE_RX 0x0 133 #define I40E_QUEUE_TYPE_TX 0x1 134 #define I40E_QUEUE_TYPE_PE_CEQ 0x2 135 #define I40E_QUEUE_TYPE_UNKNOWN 0x3 136 137 #define I40E_ITR_INDEX_RX 0x0 138 #define I40E_ITR_INDEX_TX 0x1 139 #define I40E_ITR_INDEX_OTHER 0x2 140 #define I40E_ITR_INDEX_NONE 0x3 141 #define IXL_ITR_RX 0x7a /* 4K intrs/sec */ 142 #define IXL_ITR_TX 0x7a /* 4K intrs/sec */ 143 144 #define I40E_INTR_NOTX_QUEUE 0 145 #define I40E_INTR_NOTX_INTR 0 146 #define I40E_INTR_NOTX_RX_QUEUE 0 147 #define I40E_INTR_NOTX_TX_QUEUE 1 148 #define I40E_INTR_NOTX_RX_MASK I40E_PFINT_ICR0_QUEUE_0_MASK 149 #define I40E_INTR_NOTX_TX_MASK I40E_PFINT_ICR0_QUEUE_1_MASK 150 151 #define BIT_ULL(a) (1ULL << (a)) 152 #define IXL_RSS_HENA_DEFAULT_BASE \ 153 (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \ 154 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \ 155 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \ 156 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \ 157 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \ 158 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \ 159 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \ 160 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \ 161 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \ 162 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \ 163 BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD)) 164 #define IXL_RSS_HENA_DEFAULT_XL710 IXL_RSS_HENA_DEFAULT_BASE 165 #define IXL_RSS_HENA_DEFAULT_X722 (IXL_RSS_HENA_DEFAULT_XL710 | \ 166 BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \ 167 BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \ 168 BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \ 169 BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) | \ 170 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \ 171 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK)) 172 #define I40E_HASH_LUT_SIZE_128 0 173 #define IXL_RSS_KEY_SIZE_REG 13 174 175 #define IXL_ICR0_CRIT_ERR_MASK \ 176 (I40E_PFINT_ICR0_PCI_EXCEPTION_MASK | \ 177 I40E_PFINT_ICR0_ECC_ERR_MASK | \ 178 I40E_PFINT_ICR0_PE_CRITERR_MASK) 179 180 #define IXL_QUEUE_MAX_XL710 64 181 #define IXL_QUEUE_MAX_X722 128 182 183 #define IXL_TX_PKT_DESCS 8 184 #define IXL_TX_PKT_MAXSIZE (MCLBYTES * IXL_TX_PKT_DESCS) 185 #define IXL_TX_QUEUE_ALIGN 128 186 #define IXL_RX_QUEUE_ALIGN 128 187 188 #define IXL_MCLBYTES (MCLBYTES - ETHER_ALIGN) 189 #define IXL_MTU_ETHERLEN ETHER_HDR_LEN \ 190 + ETHER_CRC_LEN 191 #if 0 192 #define IXL_MAX_MTU (9728 - IXL_MTU_ETHERLEN) 193 #else 194 /* (dbuff * 5) - ETHER_HDR_LEN - ETHER_CRC_LEN */ 195 #define IXL_MAX_MTU (9600 - IXL_MTU_ETHERLEN) 196 #endif 197 #define IXL_MIN_MTU (ETHER_MIN_LEN - ETHER_CRC_LEN) 198 199 #define IXL_PCIREG PCI_MAPREG_START 200 201 #define IXL_ITR0 0x0 202 #define IXL_ITR1 0x1 203 #define IXL_ITR2 0x2 204 #define IXL_NOITR 0x3 205 206 #define IXL_AQ_NUM 256 207 #define IXL_AQ_MASK (IXL_AQ_NUM - 1) 208 #define IXL_AQ_ALIGN 64 /* lol */ 209 #define IXL_AQ_BUFLEN 4096 210 211 #define IXL_HMC_ROUNDUP 512 212 #define IXL_HMC_PGSIZE 4096 213 #define IXL_HMC_DVASZ sizeof(uint64_t) 214 #define IXL_HMC_PGS (IXL_HMC_PGSIZE / IXL_HMC_DVASZ) 215 #define IXL_HMC_L2SZ (IXL_HMC_PGSIZE * IXL_HMC_PGS) 216 #define IXL_HMC_PDVALID 1ULL 217 218 #define IXL_ATQ_EXEC_TIMEOUT (10 * hz) 219 220 #define IXL_SRRD_SRCTL_ATTEMPTS 100000 221 222 struct ixl_aq_regs { 223 bus_size_t atq_tail; 224 bus_size_t atq_head; 225 bus_size_t atq_len; 226 bus_size_t atq_bal; 227 bus_size_t atq_bah; 228 229 bus_size_t arq_tail; 230 bus_size_t arq_head; 231 bus_size_t arq_len; 232 bus_size_t arq_bal; 233 bus_size_t arq_bah; 234 235 uint32_t atq_len_enable; 236 uint32_t atq_tail_mask; 237 uint32_t atq_head_mask; 238 239 uint32_t arq_len_enable; 240 uint32_t arq_tail_mask; 241 uint32_t arq_head_mask; 242 }; 243 244 struct ixl_phy_type { 245 uint64_t phy_type; 246 uint64_t ifm_type; 247 }; 248 249 struct ixl_speed_type { 250 uint8_t dev_speed; 251 uint64_t net_speed; 252 }; 253 254 struct ixl_aq_buf { 255 SIMPLEQ_ENTRY(ixl_aq_buf) 256 aqb_entry; 257 void *aqb_data; 258 bus_dmamap_t aqb_map; 259 bus_dma_segment_t aqb_seg; 260 size_t aqb_size; 261 int aqb_nsegs; 262 }; 263 SIMPLEQ_HEAD(ixl_aq_bufs, ixl_aq_buf); 264 265 struct ixl_dmamem { 266 bus_dmamap_t ixm_map; 267 bus_dma_segment_t ixm_seg; 268 int ixm_nsegs; 269 size_t ixm_size; 270 void *ixm_kva; 271 }; 272 273 #define IXL_DMA_MAP(_ixm) ((_ixm)->ixm_map) 274 #define IXL_DMA_DVA(_ixm) ((_ixm)->ixm_map->dm_segs[0].ds_addr) 275 #define IXL_DMA_KVA(_ixm) ((void *)(_ixm)->ixm_kva) 276 #define IXL_DMA_LEN(_ixm) ((_ixm)->ixm_size) 277 278 struct ixl_hmc_entry { 279 uint64_t hmc_base; 280 uint32_t hmc_count; 281 uint64_t hmc_size; 282 }; 283 284 enum ixl_hmc_types { 285 IXL_HMC_LAN_TX = 0, 286 IXL_HMC_LAN_RX, 287 IXL_HMC_FCOE_CTX, 288 IXL_HMC_FCOE_FILTER, 289 IXL_HMC_COUNT 290 }; 291 292 struct ixl_hmc_pack { 293 uint16_t offset; 294 uint16_t width; 295 uint16_t lsb; 296 }; 297 298 /* 299 * these hmc objects have weird sizes and alignments, so these are abstract 300 * representations of them that are nice for c to populate. 301 * 302 * the packing code relies on little-endian values being stored in the fields, 303 * no high bits in the fields being set, and the fields must be packed in the 304 * same order as they are in the ctx structure. 305 */ 306 307 struct ixl_hmc_rxq { 308 uint16_t head; 309 uint8_t cpuid; 310 uint64_t base; 311 #define IXL_HMC_RXQ_BASE_UNIT 128 312 uint16_t qlen; 313 uint16_t dbuff; 314 #define IXL_HMC_RXQ_DBUFF_UNIT 128 315 uint8_t hbuff; 316 #define IXL_HMC_RXQ_HBUFF_UNIT 64 317 uint8_t dtype; 318 #define IXL_HMC_RXQ_DTYPE_NOSPLIT 0x0 319 #define IXL_HMC_RXQ_DTYPE_HSPLIT 0x1 320 #define IXL_HMC_RXQ_DTYPE_SPLIT_ALWAYS 0x2 321 uint8_t dsize; 322 #define IXL_HMC_RXQ_DSIZE_16 0 323 #define IXL_HMC_RXQ_DSIZE_32 1 324 uint8_t crcstrip; 325 uint8_t fc_ena; 326 uint8_t l2sel; 327 uint8_t hsplit_0; 328 uint8_t hsplit_1; 329 uint8_t showiv; 330 uint16_t rxmax; 331 uint8_t tphrdesc_ena; 332 uint8_t tphwdesc_ena; 333 uint8_t tphdata_ena; 334 uint8_t tphhead_ena; 335 uint8_t lrxqthresh; 336 uint8_t prefena; 337 }; 338 339 static const struct ixl_hmc_pack ixl_hmc_pack_rxq[] = { 340 { offsetof(struct ixl_hmc_rxq, head), 13, 0 }, 341 { offsetof(struct ixl_hmc_rxq, cpuid), 8, 13 }, 342 { offsetof(struct ixl_hmc_rxq, base), 57, 32 }, 343 { offsetof(struct ixl_hmc_rxq, qlen), 13, 89 }, 344 { offsetof(struct ixl_hmc_rxq, dbuff), 7, 102 }, 345 { offsetof(struct ixl_hmc_rxq, hbuff), 5, 109 }, 346 { offsetof(struct ixl_hmc_rxq, dtype), 2, 114 }, 347 { offsetof(struct ixl_hmc_rxq, dsize), 1, 116 }, 348 { offsetof(struct ixl_hmc_rxq, crcstrip), 1, 117 }, 349 { offsetof(struct ixl_hmc_rxq, fc_ena), 1, 118 }, 350 { offsetof(struct ixl_hmc_rxq, l2sel), 1, 119 }, 351 { offsetof(struct ixl_hmc_rxq, hsplit_0), 4, 120 }, 352 { offsetof(struct ixl_hmc_rxq, hsplit_1), 2, 124 }, 353 { offsetof(struct ixl_hmc_rxq, showiv), 1, 127 }, 354 { offsetof(struct ixl_hmc_rxq, rxmax), 14, 174 }, 355 { offsetof(struct ixl_hmc_rxq, tphrdesc_ena), 1, 193 }, 356 { offsetof(struct ixl_hmc_rxq, tphwdesc_ena), 1, 194 }, 357 { offsetof(struct ixl_hmc_rxq, tphdata_ena), 1, 195 }, 358 { offsetof(struct ixl_hmc_rxq, tphhead_ena), 1, 196 }, 359 { offsetof(struct ixl_hmc_rxq, lrxqthresh), 3, 198 }, 360 { offsetof(struct ixl_hmc_rxq, prefena), 1, 201 }, 361 }; 362 363 #define IXL_HMC_RXQ_MINSIZE (201 + 1) 364 365 struct ixl_hmc_txq { 366 uint16_t head; 367 uint8_t new_context; 368 uint64_t base; 369 #define IXL_HMC_TXQ_BASE_UNIT 128 370 uint8_t fc_ena; 371 uint8_t timesync_ena; 372 uint8_t fd_ena; 373 uint8_t alt_vlan_ena; 374 uint8_t cpuid; 375 uint16_t thead_wb; 376 uint8_t head_wb_ena; 377 #define IXL_HMC_TXQ_DESC_WB 0 378 #define IXL_HMC_TXQ_HEAD_WB 1 379 uint16_t qlen; 380 uint8_t tphrdesc_ena; 381 uint8_t tphrpacket_ena; 382 uint8_t tphwdesc_ena; 383 uint64_t head_wb_addr; 384 uint32_t crc; 385 uint16_t rdylist; 386 uint8_t rdylist_act; 387 }; 388 389 static const struct ixl_hmc_pack ixl_hmc_pack_txq[] = { 390 { offsetof(struct ixl_hmc_txq, head), 13, 0 }, 391 { offsetof(struct ixl_hmc_txq, new_context), 1, 30 }, 392 { offsetof(struct ixl_hmc_txq, base), 57, 32 }, 393 { offsetof(struct ixl_hmc_txq, fc_ena), 1, 89 }, 394 { offsetof(struct ixl_hmc_txq, timesync_ena), 1, 90 }, 395 { offsetof(struct ixl_hmc_txq, fd_ena), 1, 91 }, 396 { offsetof(struct ixl_hmc_txq, alt_vlan_ena), 1, 92 }, 397 { offsetof(struct ixl_hmc_txq, cpuid), 8, 96 }, 398 /* line 1 */ 399 { offsetof(struct ixl_hmc_txq, thead_wb), 13, 0 + 128 }, 400 { offsetof(struct ixl_hmc_txq, head_wb_ena), 1, 32 + 128 }, 401 { offsetof(struct ixl_hmc_txq, qlen), 13, 33 + 128 }, 402 { offsetof(struct ixl_hmc_txq, tphrdesc_ena), 1, 46 + 128 }, 403 { offsetof(struct ixl_hmc_txq, tphrpacket_ena), 1, 47 + 128 }, 404 { offsetof(struct ixl_hmc_txq, tphwdesc_ena), 1, 48 + 128 }, 405 { offsetof(struct ixl_hmc_txq, head_wb_addr), 64, 64 + 128 }, 406 /* line 7 */ 407 { offsetof(struct ixl_hmc_txq, crc), 32, 0 + (7*128) }, 408 { offsetof(struct ixl_hmc_txq, rdylist), 10, 84 + (7*128) }, 409 { offsetof(struct ixl_hmc_txq, rdylist_act), 1, 94 + (7*128) }, 410 }; 411 412 #define IXL_HMC_TXQ_MINSIZE (94 + (7*128) + 1) 413 414 struct ixl_work { 415 struct work ixw_cookie; 416 void (*ixw_func)(void *); 417 void *ixw_arg; 418 unsigned int ixw_added; 419 }; 420 #define IXL_WORKQUEUE_PRI PRI_SOFTNET 421 422 struct ixl_tx_map { 423 struct mbuf *txm_m; 424 bus_dmamap_t txm_map; 425 unsigned int txm_eop; 426 }; 427 428 struct ixl_tx_ring { 429 kmutex_t txr_lock; 430 struct ixl_softc *txr_sc; 431 432 unsigned int txr_prod; 433 unsigned int txr_cons; 434 435 struct ixl_tx_map *txr_maps; 436 struct ixl_dmamem txr_mem; 437 438 bus_size_t txr_tail; 439 unsigned int txr_qid; 440 pcq_t *txr_intrq; 441 void *txr_si; 442 443 struct evcnt txr_defragged; 444 struct evcnt txr_defrag_failed; 445 struct evcnt txr_pcqdrop; 446 struct evcnt txr_transmitdef; 447 struct evcnt txr_intr; 448 struct evcnt txr_defer; 449 }; 450 451 struct ixl_rx_map { 452 struct mbuf *rxm_m; 453 bus_dmamap_t rxm_map; 454 }; 455 456 struct ixl_rx_ring { 457 kmutex_t rxr_lock; 458 459 unsigned int rxr_prod; 460 unsigned int rxr_cons; 461 462 struct ixl_rx_map *rxr_maps; 463 struct ixl_dmamem rxr_mem; 464 465 struct mbuf *rxr_m_head; 466 struct mbuf **rxr_m_tail; 467 468 bus_size_t rxr_tail; 469 unsigned int rxr_qid; 470 471 struct evcnt rxr_mgethdr_failed; 472 struct evcnt rxr_mgetcl_failed; 473 struct evcnt rxr_mbuf_load_failed; 474 struct evcnt rxr_intr; 475 struct evcnt rxr_defer; 476 }; 477 478 struct ixl_queue_pair { 479 struct ixl_softc *qp_sc; 480 struct ixl_tx_ring *qp_txr; 481 struct ixl_rx_ring *qp_rxr; 482 483 char qp_name[16]; 484 485 void *qp_si; 486 struct work qp_work; 487 bool qp_workqueue; 488 }; 489 490 struct ixl_atq { 491 struct ixl_aq_desc iatq_desc; 492 void (*iatq_fn)(struct ixl_softc *, 493 const struct ixl_aq_desc *); 494 }; 495 SIMPLEQ_HEAD(ixl_atq_list, ixl_atq); 496 497 struct ixl_product { 498 unsigned int vendor_id; 499 unsigned int product_id; 500 }; 501 502 struct ixl_stats_counters { 503 bool isc_has_offset; 504 struct evcnt isc_crc_errors; 505 uint64_t isc_crc_errors_offset; 506 struct evcnt isc_illegal_bytes; 507 uint64_t isc_illegal_bytes_offset; 508 struct evcnt isc_rx_bytes; 509 uint64_t isc_rx_bytes_offset; 510 struct evcnt isc_rx_discards; 511 uint64_t isc_rx_discards_offset; 512 struct evcnt isc_rx_unicast; 513 uint64_t isc_rx_unicast_offset; 514 struct evcnt isc_rx_multicast; 515 uint64_t isc_rx_multicast_offset; 516 struct evcnt isc_rx_broadcast; 517 uint64_t isc_rx_broadcast_offset; 518 struct evcnt isc_rx_size_64; 519 uint64_t isc_rx_size_64_offset; 520 struct evcnt isc_rx_size_127; 521 uint64_t isc_rx_size_127_offset; 522 struct evcnt isc_rx_size_255; 523 uint64_t isc_rx_size_255_offset; 524 struct evcnt isc_rx_size_511; 525 uint64_t isc_rx_size_511_offset; 526 struct evcnt isc_rx_size_1023; 527 uint64_t isc_rx_size_1023_offset; 528 struct evcnt isc_rx_size_1522; 529 uint64_t isc_rx_size_1522_offset; 530 struct evcnt isc_rx_size_big; 531 uint64_t isc_rx_size_big_offset; 532 struct evcnt isc_rx_undersize; 533 uint64_t isc_rx_undersize_offset; 534 struct evcnt isc_rx_oversize; 535 uint64_t isc_rx_oversize_offset; 536 struct evcnt isc_rx_fragments; 537 uint64_t isc_rx_fragments_offset; 538 struct evcnt isc_rx_jabber; 539 uint64_t isc_rx_jabber_offset; 540 struct evcnt isc_tx_bytes; 541 uint64_t isc_tx_bytes_offset; 542 struct evcnt isc_tx_dropped_link_down; 543 uint64_t isc_tx_dropped_link_down_offset; 544 struct evcnt isc_tx_unicast; 545 uint64_t isc_tx_unicast_offset; 546 struct evcnt isc_tx_multicast; 547 uint64_t isc_tx_multicast_offset; 548 struct evcnt isc_tx_broadcast; 549 uint64_t isc_tx_broadcast_offset; 550 struct evcnt isc_tx_size_64; 551 uint64_t isc_tx_size_64_offset; 552 struct evcnt isc_tx_size_127; 553 uint64_t isc_tx_size_127_offset; 554 struct evcnt isc_tx_size_255; 555 uint64_t isc_tx_size_255_offset; 556 struct evcnt isc_tx_size_511; 557 uint64_t isc_tx_size_511_offset; 558 struct evcnt isc_tx_size_1023; 559 uint64_t isc_tx_size_1023_offset; 560 struct evcnt isc_tx_size_1522; 561 uint64_t isc_tx_size_1522_offset; 562 struct evcnt isc_tx_size_big; 563 uint64_t isc_tx_size_big_offset; 564 struct evcnt isc_mac_local_faults; 565 uint64_t isc_mac_local_faults_offset; 566 struct evcnt isc_mac_remote_faults; 567 uint64_t isc_mac_remote_faults_offset; 568 struct evcnt isc_link_xon_rx; 569 uint64_t isc_link_xon_rx_offset; 570 struct evcnt isc_link_xon_tx; 571 uint64_t isc_link_xon_tx_offset; 572 struct evcnt isc_link_xoff_rx; 573 uint64_t isc_link_xoff_rx_offset; 574 struct evcnt isc_link_xoff_tx; 575 uint64_t isc_link_xoff_tx_offset; 576 struct evcnt isc_vsi_rx_discards; 577 uint64_t isc_vsi_rx_discards_offset; 578 struct evcnt isc_vsi_rx_bytes; 579 uint64_t isc_vsi_rx_bytes_offset; 580 struct evcnt isc_vsi_rx_unicast; 581 uint64_t isc_vsi_rx_unicast_offset; 582 struct evcnt isc_vsi_rx_multicast; 583 uint64_t isc_vsi_rx_multicast_offset; 584 struct evcnt isc_vsi_rx_broadcast; 585 uint64_t isc_vsi_rx_broadcast_offset; 586 struct evcnt isc_vsi_tx_errors; 587 uint64_t isc_vsi_tx_errors_offset; 588 struct evcnt isc_vsi_tx_bytes; 589 uint64_t isc_vsi_tx_bytes_offset; 590 struct evcnt isc_vsi_tx_unicast; 591 uint64_t isc_vsi_tx_unicast_offset; 592 struct evcnt isc_vsi_tx_multicast; 593 uint64_t isc_vsi_tx_multicast_offset; 594 struct evcnt isc_vsi_tx_broadcast; 595 uint64_t isc_vsi_tx_broadcast_offset; 596 }; 597 598 /* 599 * Locking notes: 600 * + a field in ixl_tx_ring is protected by txr_lock (a spin mutex), and 601 * a field in ixl_rx_ring is protected by rxr_lock (a spin mutex). 602 * - more than one lock of them cannot be held at once. 603 * + a field named sc_atq_* in ixl_softc is protected by sc_atq_lock 604 * (a spin mutex). 605 * - the lock cannot held with txr_lock or rxr_lock. 606 * + a field named sc_arq_* is not protected by any lock. 607 * - operations for sc_arq_* is done in one context related to 608 * sc_arq_task. 609 * + other fields in ixl_softc is protected by sc_cfg_lock 610 * (an adaptive mutex) 611 * - It must be held before another lock is held, and It can be 612 * released after the other lock is released. 613 * */ 614 615 struct ixl_softc { 616 device_t sc_dev; 617 struct ethercom sc_ec; 618 bool sc_attached; 619 bool sc_dead; 620 uint32_t sc_port; 621 struct sysctllog *sc_sysctllog; 622 struct workqueue *sc_workq; 623 struct workqueue *sc_workq_txrx; 624 int sc_stats_intval; 625 callout_t sc_stats_callout; 626 struct ixl_work sc_stats_task; 627 struct ixl_stats_counters 628 sc_stats_counters; 629 uint8_t sc_enaddr[ETHER_ADDR_LEN]; 630 struct ifmedia sc_media; 631 uint64_t sc_media_status; 632 uint64_t sc_media_active; 633 uint64_t sc_phy_types; 634 uint8_t sc_phy_abilities; 635 uint8_t sc_phy_linkspeed; 636 uint8_t sc_phy_fec_cfg; 637 uint16_t sc_eee_cap; 638 uint32_t sc_eeer_val; 639 uint8_t sc_d3_lpan; 640 kmutex_t sc_cfg_lock; 641 enum i40e_mac_type sc_mac_type; 642 uint32_t sc_rss_table_size; 643 uint32_t sc_rss_table_entry_width; 644 bool sc_txrx_workqueue; 645 u_int sc_tx_process_limit; 646 u_int sc_rx_process_limit; 647 u_int sc_tx_intr_process_limit; 648 u_int sc_rx_intr_process_limit; 649 650 int sc_cur_ec_capenable; 651 652 struct pci_attach_args sc_pa; 653 pci_intr_handle_t *sc_ihp; 654 void **sc_ihs; 655 unsigned int sc_nintrs; 656 657 bus_dma_tag_t sc_dmat; 658 bus_space_tag_t sc_memt; 659 bus_space_handle_t sc_memh; 660 bus_size_t sc_mems; 661 662 uint8_t sc_pf_id; 663 uint16_t sc_uplink_seid; /* le */ 664 uint16_t sc_downlink_seid; /* le */ 665 uint16_t sc_vsi_number; 666 uint16_t sc_vsi_stat_counter_idx; 667 uint16_t sc_seid; 668 unsigned int sc_base_queue; 669 670 pci_intr_type_t sc_intrtype; 671 unsigned int sc_msix_vector_queue; 672 673 struct ixl_dmamem sc_scratch; 674 struct ixl_dmamem sc_aqbuf; 675 676 const struct ixl_aq_regs * 677 sc_aq_regs; 678 uint32_t sc_aq_flags; 679 #define IXL_SC_AQ_FLAG_RXCTL __BIT(0) 680 #define IXL_SC_AQ_FLAG_NVMLOCK __BIT(1) 681 #define IXL_SC_AQ_FLAG_NVMREAD __BIT(2) 682 #define IXL_SC_AQ_FLAG_RSS __BIT(3) 683 684 kmutex_t sc_atq_lock; 685 kcondvar_t sc_atq_cv; 686 struct ixl_dmamem sc_atq; 687 unsigned int sc_atq_prod; 688 unsigned int sc_atq_cons; 689 690 struct ixl_dmamem sc_arq; 691 struct ixl_work sc_arq_task; 692 struct ixl_aq_bufs sc_arq_idle; 693 struct ixl_aq_buf *sc_arq_live[IXL_AQ_NUM]; 694 unsigned int sc_arq_prod; 695 unsigned int sc_arq_cons; 696 697 struct ixl_work sc_link_state_task; 698 struct ixl_atq sc_link_state_atq; 699 700 struct ixl_dmamem sc_hmc_sd; 701 struct ixl_dmamem sc_hmc_pd; 702 struct ixl_hmc_entry sc_hmc_entries[IXL_HMC_COUNT]; 703 704 struct if_percpuq *sc_ipq; 705 unsigned int sc_tx_ring_ndescs; 706 unsigned int sc_rx_ring_ndescs; 707 unsigned int sc_nqueue_pairs; 708 unsigned int sc_nqueue_pairs_max; 709 unsigned int sc_nqueue_pairs_device; 710 struct ixl_queue_pair *sc_qps; 711 uint32_t sc_itr_rx; 712 uint32_t sc_itr_tx; 713 714 struct evcnt sc_event_atq; 715 struct evcnt sc_event_link; 716 struct evcnt sc_event_ecc_err; 717 struct evcnt sc_event_pci_exception; 718 struct evcnt sc_event_crit_err; 719 }; 720 721 #define IXL_TXRX_PROCESS_UNLIMIT UINT_MAX 722 #define IXL_TX_PROCESS_LIMIT 256 723 #define IXL_RX_PROCESS_LIMIT 256 724 #define IXL_TX_INTR_PROCESS_LIMIT 256 725 #define IXL_RX_INTR_PROCESS_LIMIT 0U 726 727 #define IXL_IFCAP_RXCSUM (IFCAP_CSUM_IPv4_Rx | \ 728 IFCAP_CSUM_TCPv4_Rx | \ 729 IFCAP_CSUM_UDPv4_Rx | \ 730 IFCAP_CSUM_TCPv6_Rx | \ 731 IFCAP_CSUM_UDPv6_Rx) 732 #define IXL_IFCAP_TXCSUM (IFCAP_CSUM_IPv4_Tx | \ 733 IFCAP_CSUM_TCPv4_Tx | \ 734 IFCAP_CSUM_UDPv4_Tx | \ 735 IFCAP_CSUM_TCPv6_Tx | \ 736 IFCAP_CSUM_UDPv6_Tx) 737 #define IXL_CSUM_ALL_OFFLOAD (M_CSUM_IPv4 | \ 738 M_CSUM_TCPv4 | M_CSUM_TCPv6 | \ 739 M_CSUM_UDPv4 | M_CSUM_UDPv6) 740 741 #define delaymsec(_x) DELAY(1000 * (_x)) 742 #ifdef IXL_DEBUG 743 #define DDPRINTF(sc, fmt, args...) \ 744 do { \ 745 if ((sc) != NULL) { \ 746 device_printf( \ 747 ((struct ixl_softc *)(sc))->sc_dev, \ 748 ""); \ 749 } \ 750 printf("%s:\t" fmt, __func__, ##args); \ 751 } while (0) 752 #else 753 #define DDPRINTF(sc, fmt, args...) __nothing 754 #endif 755 #ifndef IXL_STATS_INTERVAL_MSEC 756 #define IXL_STATS_INTERVAL_MSEC 10000 757 #endif 758 #ifndef IXL_QUEUE_NUM 759 #define IXL_QUEUE_NUM 0 760 #endif 761 762 static bool ixl_param_nomsix = false; 763 static int ixl_param_stats_interval = IXL_STATS_INTERVAL_MSEC; 764 static int ixl_param_nqps_limit = IXL_QUEUE_NUM; 765 static unsigned int ixl_param_tx_ndescs = 512; 766 static unsigned int ixl_param_rx_ndescs = 256; 767 768 static enum i40e_mac_type 769 ixl_mactype(pci_product_id_t); 770 static void ixl_pci_csr_setup(pci_chipset_tag_t, pcitag_t); 771 static void ixl_clear_hw(struct ixl_softc *); 772 static int ixl_pf_reset(struct ixl_softc *); 773 774 static int ixl_dmamem_alloc(struct ixl_softc *, struct ixl_dmamem *, 775 bus_size_t, bus_size_t); 776 static void ixl_dmamem_free(struct ixl_softc *, struct ixl_dmamem *); 777 778 static int ixl_arq_fill(struct ixl_softc *); 779 static void ixl_arq_unfill(struct ixl_softc *); 780 781 static int ixl_atq_poll(struct ixl_softc *, struct ixl_aq_desc *, 782 unsigned int); 783 static void ixl_atq_set(struct ixl_atq *, 784 void (*)(struct ixl_softc *, const struct ixl_aq_desc *)); 785 static int ixl_atq_post_locked(struct ixl_softc *, struct ixl_atq *); 786 static void ixl_atq_done(struct ixl_softc *); 787 static int ixl_atq_exec(struct ixl_softc *, struct ixl_atq *); 788 static int ixl_atq_exec_locked(struct ixl_softc *, struct ixl_atq *); 789 static int ixl_get_version(struct ixl_softc *); 790 static int ixl_get_nvm_version(struct ixl_softc *); 791 static int ixl_get_hw_capabilities(struct ixl_softc *); 792 static int ixl_pxe_clear(struct ixl_softc *); 793 static int ixl_lldp_shut(struct ixl_softc *); 794 static int ixl_get_mac(struct ixl_softc *); 795 static int ixl_get_switch_config(struct ixl_softc *); 796 static int ixl_phy_mask_ints(struct ixl_softc *); 797 static int ixl_get_phy_info(struct ixl_softc *); 798 static int ixl_set_phy_config(struct ixl_softc *, uint8_t, uint8_t, bool); 799 static int ixl_set_phy_autoselect(struct ixl_softc *); 800 static int ixl_restart_an(struct ixl_softc *); 801 static int ixl_hmc(struct ixl_softc *); 802 static void ixl_hmc_free(struct ixl_softc *); 803 static int ixl_get_vsi(struct ixl_softc *); 804 static int ixl_set_vsi(struct ixl_softc *); 805 static void ixl_set_filter_control(struct ixl_softc *); 806 static void ixl_get_link_status(void *); 807 static int ixl_get_link_status_poll(struct ixl_softc *, int *); 808 static void ixl_get_link_status_done(struct ixl_softc *, 809 const struct ixl_aq_desc *); 810 static int ixl_set_link_status_locked(struct ixl_softc *, 811 const struct ixl_aq_desc *); 812 static uint64_t ixl_search_link_speed(uint8_t); 813 static uint8_t ixl_search_baudrate(uint64_t); 814 static void ixl_config_rss(struct ixl_softc *); 815 static int ixl_add_macvlan(struct ixl_softc *, const uint8_t *, 816 uint16_t, uint16_t); 817 static int ixl_remove_macvlan(struct ixl_softc *, const uint8_t *, 818 uint16_t, uint16_t); 819 static void ixl_arq(void *); 820 static void ixl_hmc_pack(void *, const void *, 821 const struct ixl_hmc_pack *, unsigned int); 822 static uint32_t ixl_rd_rx_csr(struct ixl_softc *, uint32_t); 823 static void ixl_wr_rx_csr(struct ixl_softc *, uint32_t, uint32_t); 824 static int ixl_rd16_nvm(struct ixl_softc *, uint16_t, uint16_t *); 825 826 static int ixl_match(device_t, cfdata_t, void *); 827 static void ixl_attach(device_t, device_t, void *); 828 static int ixl_detach(device_t, int); 829 830 static void ixl_media_add(struct ixl_softc *); 831 static int ixl_media_change(struct ifnet *); 832 static void ixl_media_status(struct ifnet *, struct ifmediareq *); 833 static void ixl_watchdog(struct ifnet *); 834 static int ixl_ioctl(struct ifnet *, u_long, void *); 835 static void ixl_start(struct ifnet *); 836 static int ixl_transmit(struct ifnet *, struct mbuf *); 837 static void ixl_deferred_transmit(void *); 838 static int ixl_intr(void *); 839 static int ixl_queue_intr(void *); 840 static int ixl_other_intr(void *); 841 static void ixl_handle_queue(void *); 842 static void ixl_handle_queue_wk(struct work *, void *); 843 static void ixl_sched_handle_queue(struct ixl_softc *, 844 struct ixl_queue_pair *); 845 static int ixl_init(struct ifnet *); 846 static int ixl_init_locked(struct ixl_softc *); 847 static void ixl_stop(struct ifnet *, int); 848 static void ixl_stop_locked(struct ixl_softc *); 849 static int ixl_iff(struct ixl_softc *); 850 static int ixl_ifflags_cb(struct ethercom *); 851 static int ixl_setup_interrupts(struct ixl_softc *); 852 static int ixl_establish_intx(struct ixl_softc *); 853 static int ixl_establish_msix(struct ixl_softc *); 854 static void ixl_enable_queue_intr(struct ixl_softc *, 855 struct ixl_queue_pair *); 856 static void ixl_disable_queue_intr(struct ixl_softc *, 857 struct ixl_queue_pair *); 858 static void ixl_enable_other_intr(struct ixl_softc *); 859 static void ixl_disable_other_intr(struct ixl_softc *); 860 static void ixl_config_queue_intr(struct ixl_softc *); 861 static void ixl_config_other_intr(struct ixl_softc *); 862 863 static struct ixl_tx_ring * 864 ixl_txr_alloc(struct ixl_softc *, unsigned int); 865 static void ixl_txr_qdis(struct ixl_softc *, struct ixl_tx_ring *, int); 866 static void ixl_txr_config(struct ixl_softc *, struct ixl_tx_ring *); 867 static int ixl_txr_enabled(struct ixl_softc *, struct ixl_tx_ring *); 868 static int ixl_txr_disabled(struct ixl_softc *, struct ixl_tx_ring *); 869 static void ixl_txr_unconfig(struct ixl_softc *, struct ixl_tx_ring *); 870 static void ixl_txr_clean(struct ixl_softc *, struct ixl_tx_ring *); 871 static void ixl_txr_free(struct ixl_softc *, struct ixl_tx_ring *); 872 static int ixl_txeof(struct ixl_softc *, struct ixl_tx_ring *, u_int); 873 874 static struct ixl_rx_ring * 875 ixl_rxr_alloc(struct ixl_softc *, unsigned int); 876 static void ixl_rxr_config(struct ixl_softc *, struct ixl_rx_ring *); 877 static int ixl_rxr_enabled(struct ixl_softc *, struct ixl_rx_ring *); 878 static int ixl_rxr_disabled(struct ixl_softc *, struct ixl_rx_ring *); 879 static void ixl_rxr_unconfig(struct ixl_softc *, struct ixl_rx_ring *); 880 static void ixl_rxr_clean(struct ixl_softc *, struct ixl_rx_ring *); 881 static void ixl_rxr_free(struct ixl_softc *, struct ixl_rx_ring *); 882 static int ixl_rxeof(struct ixl_softc *, struct ixl_rx_ring *, u_int); 883 static int ixl_rxfill(struct ixl_softc *, struct ixl_rx_ring *); 884 885 static struct workqueue * 886 ixl_workq_create(const char *, pri_t, int, int); 887 static void ixl_workq_destroy(struct workqueue *); 888 static int ixl_workqs_teardown(device_t); 889 static void ixl_work_set(struct ixl_work *, void (*)(void *), void *); 890 static void ixl_work_add(struct workqueue *, struct ixl_work *); 891 static void ixl_work_wait(struct workqueue *, struct ixl_work *); 892 static void ixl_workq_work(struct work *, void *); 893 static const struct ixl_product * 894 ixl_lookup(const struct pci_attach_args *pa); 895 static void ixl_link_state_update(struct ixl_softc *, 896 const struct ixl_aq_desc *); 897 static int ixl_vlan_cb(struct ethercom *, uint16_t, bool); 898 static int ixl_setup_vlan_hwfilter(struct ixl_softc *); 899 static void ixl_teardown_vlan_hwfilter(struct ixl_softc *); 900 static int ixl_update_macvlan(struct ixl_softc *); 901 static int ixl_setup_interrupts(struct ixl_softc *); 902 static void ixl_teardown_interrupts(struct ixl_softc *); 903 static int ixl_setup_stats(struct ixl_softc *); 904 static void ixl_teardown_stats(struct ixl_softc *); 905 static void ixl_stats_callout(void *); 906 static void ixl_stats_update(void *); 907 static int ixl_setup_sysctls(struct ixl_softc *); 908 static void ixl_teardown_sysctls(struct ixl_softc *); 909 static int ixl_sysctl_itr_handler(SYSCTLFN_PROTO); 910 static int ixl_queue_pairs_alloc(struct ixl_softc *); 911 static void ixl_queue_pairs_free(struct ixl_softc *); 912 913 static const struct ixl_phy_type ixl_phy_type_map[] = { 914 { 1ULL << IXL_PHY_TYPE_SGMII, IFM_1000_SGMII }, 915 { 1ULL << IXL_PHY_TYPE_1000BASE_KX, IFM_1000_KX }, 916 { 1ULL << IXL_PHY_TYPE_10GBASE_KX4, IFM_10G_KX4 }, 917 { 1ULL << IXL_PHY_TYPE_10GBASE_KR, IFM_10G_KR }, 918 { 1ULL << IXL_PHY_TYPE_40GBASE_KR4, IFM_40G_KR4 }, 919 { 1ULL << IXL_PHY_TYPE_XAUI | 920 1ULL << IXL_PHY_TYPE_XFI, IFM_10G_CX4 }, 921 { 1ULL << IXL_PHY_TYPE_SFI, IFM_10G_SFI }, 922 { 1ULL << IXL_PHY_TYPE_XLAUI | 923 1ULL << IXL_PHY_TYPE_XLPPI, IFM_40G_XLPPI }, 924 { 1ULL << IXL_PHY_TYPE_40GBASE_CR4_CU | 925 1ULL << IXL_PHY_TYPE_40GBASE_CR4, IFM_40G_CR4 }, 926 { 1ULL << IXL_PHY_TYPE_10GBASE_CR1_CU | 927 1ULL << IXL_PHY_TYPE_10GBASE_CR1, IFM_10G_CR1 }, 928 { 1ULL << IXL_PHY_TYPE_10GBASE_AOC, IFM_10G_AOC }, 929 { 1ULL << IXL_PHY_TYPE_40GBASE_AOC, IFM_40G_AOC }, 930 { 1ULL << IXL_PHY_TYPE_100BASE_TX, IFM_100_TX }, 931 { 1ULL << IXL_PHY_TYPE_1000BASE_T_OPTICAL | 932 1ULL << IXL_PHY_TYPE_1000BASE_T, IFM_1000_T }, 933 { 1ULL << IXL_PHY_TYPE_10GBASE_T, IFM_10G_T }, 934 { 1ULL << IXL_PHY_TYPE_10GBASE_SR, IFM_10G_SR }, 935 { 1ULL << IXL_PHY_TYPE_10GBASE_LR, IFM_10G_LR }, 936 { 1ULL << IXL_PHY_TYPE_10GBASE_SFPP_CU, IFM_10G_TWINAX }, 937 { 1ULL << IXL_PHY_TYPE_40GBASE_SR4, IFM_40G_SR4 }, 938 { 1ULL << IXL_PHY_TYPE_40GBASE_LR4, IFM_40G_LR4 }, 939 { 1ULL << IXL_PHY_TYPE_1000BASE_SX, IFM_1000_SX }, 940 { 1ULL << IXL_PHY_TYPE_1000BASE_LX, IFM_1000_LX }, 941 { 1ULL << IXL_PHY_TYPE_20GBASE_KR2, IFM_20G_KR2 }, 942 { 1ULL << IXL_PHY_TYPE_25GBASE_KR, IFM_25G_KR }, 943 { 1ULL << IXL_PHY_TYPE_25GBASE_CR, IFM_25G_CR }, 944 { 1ULL << IXL_PHY_TYPE_25GBASE_SR, IFM_25G_SR }, 945 { 1ULL << IXL_PHY_TYPE_25GBASE_LR, IFM_25G_LR }, 946 { 1ULL << IXL_PHY_TYPE_25GBASE_AOC, IFM_25G_AOC }, 947 { 1ULL << IXL_PHY_TYPE_25GBASE_ACC, IFM_25G_ACC }, 948 }; 949 950 static const struct ixl_speed_type ixl_speed_type_map[] = { 951 { IXL_AQ_LINK_SPEED_40GB, IF_Gbps(40) }, 952 { IXL_AQ_LINK_SPEED_25GB, IF_Gbps(25) }, 953 { IXL_AQ_LINK_SPEED_10GB, IF_Gbps(10) }, 954 { IXL_AQ_LINK_SPEED_1000MB, IF_Mbps(1000) }, 955 { IXL_AQ_LINK_SPEED_100MB, IF_Mbps(100)}, 956 }; 957 958 static const struct ixl_aq_regs ixl_pf_aq_regs = { 959 .atq_tail = I40E_PF_ATQT, 960 .atq_tail_mask = I40E_PF_ATQT_ATQT_MASK, 961 .atq_head = I40E_PF_ATQH, 962 .atq_head_mask = I40E_PF_ATQH_ATQH_MASK, 963 .atq_len = I40E_PF_ATQLEN, 964 .atq_bal = I40E_PF_ATQBAL, 965 .atq_bah = I40E_PF_ATQBAH, 966 .atq_len_enable = I40E_PF_ATQLEN_ATQENABLE_MASK, 967 968 .arq_tail = I40E_PF_ARQT, 969 .arq_tail_mask = I40E_PF_ARQT_ARQT_MASK, 970 .arq_head = I40E_PF_ARQH, 971 .arq_head_mask = I40E_PF_ARQH_ARQH_MASK, 972 .arq_len = I40E_PF_ARQLEN, 973 .arq_bal = I40E_PF_ARQBAL, 974 .arq_bah = I40E_PF_ARQBAH, 975 .arq_len_enable = I40E_PF_ARQLEN_ARQENABLE_MASK, 976 }; 977 978 #define ixl_rd(_s, _r) \ 979 bus_space_read_4((_s)->sc_memt, (_s)->sc_memh, (_r)) 980 #define ixl_wr(_s, _r, _v) \ 981 bus_space_write_4((_s)->sc_memt, (_s)->sc_memh, (_r), (_v)) 982 #define ixl_barrier(_s, _r, _l, _o) \ 983 bus_space_barrier((_s)->sc_memt, (_s)->sc_memh, (_r), (_l), (_o)) 984 #define ixl_flush(_s) (void)ixl_rd((_s), I40E_GLGEN_STAT) 985 #define ixl_nqueues(_sc) (1 << ((_sc)->sc_nqueue_pairs - 1)) 986 987 static inline uint32_t 988 ixl_dmamem_hi(struct ixl_dmamem *ixm) 989 { 990 uint32_t retval; 991 uint64_t val; 992 993 if (sizeof(IXL_DMA_DVA(ixm)) > 4) { 994 val = (intptr_t)IXL_DMA_DVA(ixm); 995 retval = (uint32_t)(val >> 32); 996 } else { 997 retval = 0; 998 } 999 1000 return retval; 1001 } 1002 1003 static inline uint32_t 1004 ixl_dmamem_lo(struct ixl_dmamem *ixm) 1005 { 1006 1007 return (uint32_t)IXL_DMA_DVA(ixm); 1008 } 1009 1010 static inline void 1011 ixl_aq_dva(struct ixl_aq_desc *iaq, bus_addr_t addr) 1012 { 1013 uint64_t val; 1014 1015 if (sizeof(addr) > 4) { 1016 val = (intptr_t)addr; 1017 iaq->iaq_param[2] = htole32(val >> 32); 1018 } else { 1019 iaq->iaq_param[2] = htole32(0); 1020 } 1021 1022 iaq->iaq_param[3] = htole32(addr); 1023 } 1024 1025 static inline unsigned int 1026 ixl_rxr_unrefreshed(unsigned int prod, unsigned int cons, unsigned int ndescs) 1027 { 1028 unsigned int num; 1029 1030 if (prod < cons) 1031 num = cons - prod; 1032 else 1033 num = (ndescs - prod) + cons; 1034 1035 if (__predict_true(num > 0)) { 1036 /* device cannot receive packets if all descripter is filled */ 1037 num -= 1; 1038 } 1039 1040 return num; 1041 } 1042 1043 CFATTACH_DECL3_NEW(ixl, sizeof(struct ixl_softc), 1044 ixl_match, ixl_attach, ixl_detach, NULL, NULL, NULL, 1045 DVF_DETACH_SHUTDOWN); 1046 1047 static const struct ixl_product ixl_products[] = { 1048 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_SFP }, 1049 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_KX_B }, 1050 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_KX_C }, 1051 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_A }, 1052 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_B }, 1053 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_C }, 1054 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_10G_T }, 1055 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_20G_BP_1 }, 1056 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_20G_BP_2 }, 1057 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_T4_10G }, 1058 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XXV710_25G_BP }, 1059 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XXV710_25G_SFP28 }, 1060 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_KX }, 1061 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_QSFP }, 1062 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_SFP }, 1063 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_1G_BASET }, 1064 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_10G_BASET }, 1065 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_I_SFP }, 1066 /* required last entry */ 1067 {0, 0} 1068 }; 1069 1070 static const struct ixl_product * 1071 ixl_lookup(const struct pci_attach_args *pa) 1072 { 1073 const struct ixl_product *ixlp; 1074 1075 for (ixlp = ixl_products; ixlp->vendor_id != 0; ixlp++) { 1076 if (PCI_VENDOR(pa->pa_id) == ixlp->vendor_id && 1077 PCI_PRODUCT(pa->pa_id) == ixlp->product_id) 1078 return ixlp; 1079 } 1080 1081 return NULL; 1082 } 1083 1084 static int 1085 ixl_match(device_t parent, cfdata_t match, void *aux) 1086 { 1087 const struct pci_attach_args *pa = aux; 1088 1089 return (ixl_lookup(pa) != NULL) ? 1 : 0; 1090 } 1091 1092 static void 1093 ixl_attach(device_t parent, device_t self, void *aux) 1094 { 1095 struct ixl_softc *sc; 1096 struct pci_attach_args *pa = aux; 1097 struct ifnet *ifp; 1098 pcireg_t memtype; 1099 uint32_t firstq, port, ari, func; 1100 char xnamebuf[32]; 1101 int tries, rv, link; 1102 1103 sc = device_private(self); 1104 sc->sc_dev = self; 1105 ifp = &sc->sc_ec.ec_if; 1106 1107 sc->sc_pa = *pa; 1108 sc->sc_dmat = (pci_dma64_available(pa)) ? 1109 pa->pa_dmat64 : pa->pa_dmat; 1110 sc->sc_aq_regs = &ixl_pf_aq_regs; 1111 1112 sc->sc_mac_type = ixl_mactype(PCI_PRODUCT(pa->pa_id)); 1113 1114 ixl_pci_csr_setup(pa->pa_pc, pa->pa_tag); 1115 1116 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, IXL_PCIREG); 1117 if (pci_mapreg_map(pa, IXL_PCIREG, memtype, 0, 1118 &sc->sc_memt, &sc->sc_memh, NULL, &sc->sc_mems)) { 1119 aprint_error(": unable to map registers\n"); 1120 return; 1121 } 1122 1123 mutex_init(&sc->sc_cfg_lock, MUTEX_DEFAULT, IPL_SOFTNET); 1124 1125 firstq = ixl_rd(sc, I40E_PFLAN_QALLOC); 1126 firstq &= I40E_PFLAN_QALLOC_FIRSTQ_MASK; 1127 firstq >>= I40E_PFLAN_QALLOC_FIRSTQ_SHIFT; 1128 sc->sc_base_queue = firstq; 1129 1130 ixl_clear_hw(sc); 1131 if (ixl_pf_reset(sc) == -1) { 1132 /* error printed by ixl pf_reset */ 1133 goto unmap; 1134 } 1135 1136 port = ixl_rd(sc, I40E_PFGEN_PORTNUM); 1137 port &= I40E_PFGEN_PORTNUM_PORT_NUM_MASK; 1138 port >>= I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT; 1139 sc->sc_port = port; 1140 aprint_normal(": port %u", sc->sc_port); 1141 1142 ari = ixl_rd(sc, I40E_GLPCI_CAPSUP); 1143 ari &= I40E_GLPCI_CAPSUP_ARI_EN_MASK; 1144 ari >>= I40E_GLPCI_CAPSUP_ARI_EN_SHIFT; 1145 1146 func = ixl_rd(sc, I40E_PF_FUNC_RID); 1147 sc->sc_pf_id = func & (ari ? 0xff : 0x7); 1148 1149 /* initialise the adminq */ 1150 1151 mutex_init(&sc->sc_atq_lock, MUTEX_DEFAULT, IPL_NET); 1152 1153 if (ixl_dmamem_alloc(sc, &sc->sc_atq, 1154 sizeof(struct ixl_aq_desc) * IXL_AQ_NUM, IXL_AQ_ALIGN) != 0) { 1155 aprint_error("\n" "%s: unable to allocate atq\n", 1156 device_xname(self)); 1157 goto unmap; 1158 } 1159 1160 SIMPLEQ_INIT(&sc->sc_arq_idle); 1161 ixl_work_set(&sc->sc_arq_task, ixl_arq, sc); 1162 sc->sc_arq_cons = 0; 1163 sc->sc_arq_prod = 0; 1164 1165 if (ixl_dmamem_alloc(sc, &sc->sc_arq, 1166 sizeof(struct ixl_aq_desc) * IXL_AQ_NUM, IXL_AQ_ALIGN) != 0) { 1167 aprint_error("\n" "%s: unable to allocate arq\n", 1168 device_xname(self)); 1169 goto free_atq; 1170 } 1171 1172 if (!ixl_arq_fill(sc)) { 1173 aprint_error("\n" "%s: unable to fill arq descriptors\n", 1174 device_xname(self)); 1175 goto free_arq; 1176 } 1177 1178 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 1179 0, IXL_DMA_LEN(&sc->sc_atq), 1180 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1181 1182 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq), 1183 0, IXL_DMA_LEN(&sc->sc_arq), 1184 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1185 1186 for (tries = 0; tries < 10; tries++) { 1187 sc->sc_atq_cons = 0; 1188 sc->sc_atq_prod = 0; 1189 1190 ixl_wr(sc, sc->sc_aq_regs->atq_head, 0); 1191 ixl_wr(sc, sc->sc_aq_regs->arq_head, 0); 1192 ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0); 1193 ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0); 1194 1195 ixl_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE); 1196 1197 ixl_wr(sc, sc->sc_aq_regs->atq_bal, 1198 ixl_dmamem_lo(&sc->sc_atq)); 1199 ixl_wr(sc, sc->sc_aq_regs->atq_bah, 1200 ixl_dmamem_hi(&sc->sc_atq)); 1201 ixl_wr(sc, sc->sc_aq_regs->atq_len, 1202 sc->sc_aq_regs->atq_len_enable | IXL_AQ_NUM); 1203 1204 ixl_wr(sc, sc->sc_aq_regs->arq_bal, 1205 ixl_dmamem_lo(&sc->sc_arq)); 1206 ixl_wr(sc, sc->sc_aq_regs->arq_bah, 1207 ixl_dmamem_hi(&sc->sc_arq)); 1208 ixl_wr(sc, sc->sc_aq_regs->arq_len, 1209 sc->sc_aq_regs->arq_len_enable | IXL_AQ_NUM); 1210 1211 rv = ixl_get_version(sc); 1212 if (rv == 0) 1213 break; 1214 if (rv != ETIMEDOUT) { 1215 aprint_error(", unable to get firmware version\n"); 1216 goto shutdown; 1217 } 1218 1219 delaymsec(100); 1220 } 1221 1222 ixl_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod); 1223 1224 if (ixl_dmamem_alloc(sc, &sc->sc_aqbuf, IXL_AQ_BUFLEN, 0) != 0) { 1225 aprint_error_dev(self, ", unable to allocate nvm buffer\n"); 1226 goto shutdown; 1227 } 1228 1229 ixl_get_nvm_version(sc); 1230 1231 if (sc->sc_mac_type == I40E_MAC_X722) 1232 sc->sc_nqueue_pairs_device = IXL_QUEUE_MAX_X722; 1233 else 1234 sc->sc_nqueue_pairs_device = IXL_QUEUE_MAX_XL710; 1235 1236 rv = ixl_get_hw_capabilities(sc); 1237 if (rv != 0) { 1238 aprint_error(", GET HW CAPABILITIES %s\n", 1239 rv == ETIMEDOUT ? "timeout" : "error"); 1240 goto free_aqbuf; 1241 } 1242 1243 sc->sc_nqueue_pairs_max = MIN((int)sc->sc_nqueue_pairs_device, ncpu); 1244 if (ixl_param_nqps_limit > 0) { 1245 sc->sc_nqueue_pairs_max = MIN((int)sc->sc_nqueue_pairs_max, 1246 ixl_param_nqps_limit); 1247 } 1248 1249 sc->sc_nqueue_pairs = sc->sc_nqueue_pairs_max; 1250 sc->sc_tx_ring_ndescs = ixl_param_tx_ndescs; 1251 sc->sc_rx_ring_ndescs = ixl_param_rx_ndescs; 1252 1253 KASSERT(IXL_TXRX_PROCESS_UNLIMIT > sc->sc_rx_ring_ndescs); 1254 KASSERT(IXL_TXRX_PROCESS_UNLIMIT > sc->sc_tx_ring_ndescs); 1255 KASSERT(sc->sc_rx_ring_ndescs == 1256 (1U << (fls32(sc->sc_rx_ring_ndescs) - 1))); 1257 KASSERT(sc->sc_tx_ring_ndescs == 1258 (1U << (fls32(sc->sc_tx_ring_ndescs) - 1))); 1259 1260 if (ixl_get_mac(sc) != 0) { 1261 /* error printed by ixl_get_mac */ 1262 goto free_aqbuf; 1263 } 1264 1265 aprint_normal("\n"); 1266 aprint_naive("\n"); 1267 1268 aprint_normal_dev(self, "Ethernet address %s\n", 1269 ether_sprintf(sc->sc_enaddr)); 1270 1271 rv = ixl_pxe_clear(sc); 1272 if (rv != 0) { 1273 aprint_debug_dev(self, "CLEAR PXE MODE %s\n", 1274 rv == ETIMEDOUT ? "timeout" : "error"); 1275 } 1276 1277 ixl_set_filter_control(sc); 1278 1279 if (ixl_hmc(sc) != 0) { 1280 /* error printed by ixl_hmc */ 1281 goto free_aqbuf; 1282 } 1283 1284 if (ixl_lldp_shut(sc) != 0) { 1285 /* error printed by ixl_lldp_shut */ 1286 goto free_hmc; 1287 } 1288 1289 if (ixl_phy_mask_ints(sc) != 0) { 1290 /* error printed by ixl_phy_mask_ints */ 1291 goto free_hmc; 1292 } 1293 1294 if (ixl_restart_an(sc) != 0) { 1295 /* error printed by ixl_restart_an */ 1296 goto free_hmc; 1297 } 1298 1299 if (ixl_get_switch_config(sc) != 0) { 1300 /* error printed by ixl_get_switch_config */ 1301 goto free_hmc; 1302 } 1303 1304 rv = ixl_get_link_status_poll(sc, NULL); 1305 if (rv != 0) { 1306 aprint_error_dev(self, "GET LINK STATUS %s\n", 1307 rv == ETIMEDOUT ? "timeout" : "error"); 1308 goto free_hmc; 1309 } 1310 1311 /* 1312 * The FW often returns EIO in "Get PHY Abilities" command 1313 * if there is no delay 1314 */ 1315 DELAY(500); 1316 if (ixl_get_phy_info(sc) != 0) { 1317 /* error printed by ixl_get_phy_info */ 1318 goto free_hmc; 1319 } 1320 1321 if (ixl_dmamem_alloc(sc, &sc->sc_scratch, 1322 sizeof(struct ixl_aq_vsi_data), 8) != 0) { 1323 aprint_error_dev(self, "unable to allocate scratch buffer\n"); 1324 goto free_hmc; 1325 } 1326 1327 rv = ixl_get_vsi(sc); 1328 if (rv != 0) { 1329 aprint_error_dev(self, "GET VSI %s %d\n", 1330 rv == ETIMEDOUT ? "timeout" : "error", rv); 1331 goto free_scratch; 1332 } 1333 1334 rv = ixl_set_vsi(sc); 1335 if (rv != 0) { 1336 aprint_error_dev(self, "UPDATE VSI error %s %d\n", 1337 rv == ETIMEDOUT ? "timeout" : "error", rv); 1338 goto free_scratch; 1339 } 1340 1341 if (ixl_queue_pairs_alloc(sc) != 0) { 1342 /* error printed by ixl_queue_pairs_alloc */ 1343 goto free_scratch; 1344 } 1345 1346 if (ixl_setup_interrupts(sc) != 0) { 1347 /* error printed by ixl_setup_interrupts */ 1348 goto free_queue_pairs; 1349 } 1350 1351 if (ixl_setup_stats(sc) != 0) { 1352 aprint_error_dev(self, "failed to setup event counters\n"); 1353 goto teardown_intrs; 1354 } 1355 1356 if (ixl_setup_sysctls(sc) != 0) { 1357 /* error printed by ixl_setup_sysctls */ 1358 goto teardown_stats; 1359 } 1360 1361 snprintf(xnamebuf, sizeof(xnamebuf), "%s_wq_cfg", device_xname(self)); 1362 sc->sc_workq = ixl_workq_create(xnamebuf, IXL_WORKQUEUE_PRI, 1363 IPL_NET, WQ_MPSAFE); 1364 if (sc->sc_workq == NULL) 1365 goto teardown_sysctls; 1366 1367 snprintf(xnamebuf, sizeof(xnamebuf), "%s_wq_txrx", device_xname(self)); 1368 rv = workqueue_create(&sc->sc_workq_txrx, xnamebuf, ixl_handle_queue_wk, 1369 sc, IXL_WORKQUEUE_PRI, IPL_NET, WQ_PERCPU | WQ_MPSAFE); 1370 if (rv != 0) { 1371 sc->sc_workq_txrx = NULL; 1372 goto teardown_wqs; 1373 } 1374 1375 snprintf(xnamebuf, sizeof(xnamebuf), "%s_atq_cv", device_xname(self)); 1376 cv_init(&sc->sc_atq_cv, xnamebuf); 1377 1378 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ); 1379 1380 ifp->if_softc = sc; 1381 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1382 ifp->if_extflags = IFEF_MPSAFE; 1383 ifp->if_ioctl = ixl_ioctl; 1384 ifp->if_start = ixl_start; 1385 ifp->if_transmit = ixl_transmit; 1386 ifp->if_watchdog = ixl_watchdog; 1387 ifp->if_init = ixl_init; 1388 ifp->if_stop = ixl_stop; 1389 IFQ_SET_MAXLEN(&ifp->if_snd, sc->sc_tx_ring_ndescs); 1390 IFQ_SET_READY(&ifp->if_snd); 1391 ifp->if_capabilities |= IXL_IFCAP_RXCSUM; 1392 ifp->if_capabilities |= IXL_IFCAP_TXCSUM; 1393 #if 0 1394 ifp->if_capabilities |= IFCAP_TSOv4 | IFCAP_TSOv6; 1395 #endif 1396 ether_set_vlan_cb(&sc->sc_ec, ixl_vlan_cb); 1397 sc->sc_ec.ec_capabilities |= ETHERCAP_JUMBO_MTU; 1398 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING; 1399 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWFILTER; 1400 1401 sc->sc_ec.ec_capenable = sc->sc_ec.ec_capabilities; 1402 /* Disable VLAN_HWFILTER by default */ 1403 CLR(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER); 1404 1405 sc->sc_cur_ec_capenable = sc->sc_ec.ec_capenable; 1406 1407 sc->sc_ec.ec_ifmedia = &sc->sc_media; 1408 ifmedia_init_with_lock(&sc->sc_media, IFM_IMASK, ixl_media_change, 1409 ixl_media_status, &sc->sc_cfg_lock); 1410 1411 ixl_media_add(sc); 1412 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL); 1413 if (ISSET(sc->sc_phy_abilities, 1414 (IXL_PHY_ABILITY_PAUSE_TX | IXL_PHY_ABILITY_PAUSE_RX))) { 1415 ifmedia_add(&sc->sc_media, 1416 IFM_ETHER | IFM_AUTO | IFM_FLOW, 0, NULL); 1417 } 1418 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_NONE, 0, NULL); 1419 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO); 1420 1421 rv = if_initialize(ifp); 1422 if (rv != 0) { 1423 aprint_error_dev(self, "if_initialize failed=%d\n", rv); 1424 goto teardown_wqs; 1425 } 1426 1427 sc->sc_ipq = if_percpuq_create(ifp); 1428 if_deferred_start_init(ifp, NULL); 1429 ether_ifattach(ifp, sc->sc_enaddr); 1430 ether_set_ifflags_cb(&sc->sc_ec, ixl_ifflags_cb); 1431 1432 rv = ixl_get_link_status_poll(sc, &link); 1433 if (rv != 0) 1434 link = LINK_STATE_UNKNOWN; 1435 if_link_state_change(ifp, link); 1436 1437 ixl_atq_set(&sc->sc_link_state_atq, ixl_get_link_status_done); 1438 ixl_work_set(&sc->sc_link_state_task, ixl_get_link_status, sc); 1439 1440 ixl_config_other_intr(sc); 1441 ixl_enable_other_intr(sc); 1442 1443 ixl_set_phy_autoselect(sc); 1444 1445 /* remove default mac filter and replace it so we can see vlans */ 1446 rv = ixl_remove_macvlan(sc, sc->sc_enaddr, 0, 0); 1447 if (rv != ENOENT) { 1448 aprint_debug_dev(self, 1449 "unable to remove macvlan %u\n", rv); 1450 } 1451 rv = ixl_remove_macvlan(sc, sc->sc_enaddr, 0, 1452 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN); 1453 if (rv != ENOENT) { 1454 aprint_debug_dev(self, 1455 "unable to remove macvlan, ignore vlan %u\n", rv); 1456 } 1457 1458 if (ixl_update_macvlan(sc) != 0) { 1459 aprint_debug_dev(self, 1460 "couldn't enable vlan hardware filter\n"); 1461 CLR(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER); 1462 CLR(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER); 1463 } 1464 1465 sc->sc_txrx_workqueue = true; 1466 sc->sc_tx_process_limit = IXL_TX_PROCESS_LIMIT; 1467 sc->sc_rx_process_limit = IXL_RX_PROCESS_LIMIT; 1468 sc->sc_tx_intr_process_limit = IXL_TX_INTR_PROCESS_LIMIT; 1469 sc->sc_rx_intr_process_limit = IXL_RX_INTR_PROCESS_LIMIT; 1470 1471 ixl_stats_update(sc); 1472 sc->sc_stats_counters.isc_has_offset = true; 1473 1474 if (pmf_device_register(self, NULL, NULL) != true) 1475 aprint_debug_dev(self, "couldn't establish power handler\n"); 1476 sc->sc_itr_rx = IXL_ITR_RX; 1477 sc->sc_itr_tx = IXL_ITR_TX; 1478 sc->sc_attached = true; 1479 if_register(ifp); 1480 1481 return; 1482 1483 teardown_wqs: 1484 config_finalize_register(self, ixl_workqs_teardown); 1485 teardown_sysctls: 1486 ixl_teardown_sysctls(sc); 1487 teardown_stats: 1488 ixl_teardown_stats(sc); 1489 teardown_intrs: 1490 ixl_teardown_interrupts(sc); 1491 free_queue_pairs: 1492 ixl_queue_pairs_free(sc); 1493 free_scratch: 1494 ixl_dmamem_free(sc, &sc->sc_scratch); 1495 free_hmc: 1496 ixl_hmc_free(sc); 1497 free_aqbuf: 1498 ixl_dmamem_free(sc, &sc->sc_aqbuf); 1499 shutdown: 1500 ixl_wr(sc, sc->sc_aq_regs->atq_head, 0); 1501 ixl_wr(sc, sc->sc_aq_regs->arq_head, 0); 1502 ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0); 1503 ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0); 1504 1505 ixl_wr(sc, sc->sc_aq_regs->atq_bal, 0); 1506 ixl_wr(sc, sc->sc_aq_regs->atq_bah, 0); 1507 ixl_wr(sc, sc->sc_aq_regs->atq_len, 0); 1508 1509 ixl_wr(sc, sc->sc_aq_regs->arq_bal, 0); 1510 ixl_wr(sc, sc->sc_aq_regs->arq_bah, 0); 1511 ixl_wr(sc, sc->sc_aq_regs->arq_len, 0); 1512 1513 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq), 1514 0, IXL_DMA_LEN(&sc->sc_arq), 1515 BUS_DMASYNC_POSTREAD); 1516 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 1517 0, IXL_DMA_LEN(&sc->sc_atq), 1518 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1519 1520 ixl_arq_unfill(sc); 1521 free_arq: 1522 ixl_dmamem_free(sc, &sc->sc_arq); 1523 free_atq: 1524 ixl_dmamem_free(sc, &sc->sc_atq); 1525 unmap: 1526 mutex_destroy(&sc->sc_atq_lock); 1527 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems); 1528 mutex_destroy(&sc->sc_cfg_lock); 1529 sc->sc_mems = 0; 1530 1531 sc->sc_attached = false; 1532 } 1533 1534 static int 1535 ixl_detach(device_t self, int flags) 1536 { 1537 struct ixl_softc *sc = device_private(self); 1538 struct ifnet *ifp = &sc->sc_ec.ec_if; 1539 1540 if (!sc->sc_attached) 1541 return 0; 1542 1543 ixl_stop(ifp, 1); 1544 1545 ixl_disable_other_intr(sc); 1546 1547 callout_halt(&sc->sc_stats_callout, NULL); 1548 ixl_work_wait(sc->sc_workq, &sc->sc_stats_task); 1549 1550 /* wait for ATQ handler */ 1551 mutex_enter(&sc->sc_atq_lock); 1552 mutex_exit(&sc->sc_atq_lock); 1553 1554 ixl_work_wait(sc->sc_workq, &sc->sc_arq_task); 1555 ixl_work_wait(sc->sc_workq, &sc->sc_link_state_task); 1556 1557 if (sc->sc_workq != NULL) { 1558 ixl_workq_destroy(sc->sc_workq); 1559 sc->sc_workq = NULL; 1560 } 1561 1562 if (sc->sc_workq_txrx != NULL) { 1563 workqueue_destroy(sc->sc_workq_txrx); 1564 sc->sc_workq_txrx = NULL; 1565 } 1566 1567 if_percpuq_destroy(sc->sc_ipq); 1568 ether_ifdetach(ifp); 1569 if_detach(ifp); 1570 ifmedia_fini(&sc->sc_media); 1571 1572 ixl_teardown_interrupts(sc); 1573 ixl_teardown_stats(sc); 1574 ixl_teardown_sysctls(sc); 1575 1576 ixl_queue_pairs_free(sc); 1577 1578 ixl_dmamem_free(sc, &sc->sc_scratch); 1579 ixl_hmc_free(sc); 1580 1581 /* shutdown */ 1582 ixl_wr(sc, sc->sc_aq_regs->atq_head, 0); 1583 ixl_wr(sc, sc->sc_aq_regs->arq_head, 0); 1584 ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0); 1585 ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0); 1586 1587 ixl_wr(sc, sc->sc_aq_regs->atq_bal, 0); 1588 ixl_wr(sc, sc->sc_aq_regs->atq_bah, 0); 1589 ixl_wr(sc, sc->sc_aq_regs->atq_len, 0); 1590 1591 ixl_wr(sc, sc->sc_aq_regs->arq_bal, 0); 1592 ixl_wr(sc, sc->sc_aq_regs->arq_bah, 0); 1593 ixl_wr(sc, sc->sc_aq_regs->arq_len, 0); 1594 1595 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq), 1596 0, IXL_DMA_LEN(&sc->sc_arq), 1597 BUS_DMASYNC_POSTREAD); 1598 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 1599 0, IXL_DMA_LEN(&sc->sc_atq), 1600 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1601 1602 ixl_arq_unfill(sc); 1603 1604 ixl_dmamem_free(sc, &sc->sc_arq); 1605 ixl_dmamem_free(sc, &sc->sc_atq); 1606 ixl_dmamem_free(sc, &sc->sc_aqbuf); 1607 1608 cv_destroy(&sc->sc_atq_cv); 1609 mutex_destroy(&sc->sc_atq_lock); 1610 1611 if (sc->sc_mems != 0) { 1612 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems); 1613 sc->sc_mems = 0; 1614 } 1615 1616 mutex_destroy(&sc->sc_cfg_lock); 1617 1618 return 0; 1619 } 1620 1621 static int 1622 ixl_workqs_teardown(device_t self) 1623 { 1624 struct ixl_softc *sc = device_private(self); 1625 1626 if (sc->sc_workq != NULL) { 1627 ixl_workq_destroy(sc->sc_workq); 1628 sc->sc_workq = NULL; 1629 } 1630 1631 if (sc->sc_workq_txrx != NULL) { 1632 workqueue_destroy(sc->sc_workq_txrx); 1633 sc->sc_workq_txrx = NULL; 1634 } 1635 1636 return 0; 1637 } 1638 1639 static int 1640 ixl_vlan_cb(struct ethercom *ec, uint16_t vid, bool set) 1641 { 1642 struct ifnet *ifp = &ec->ec_if; 1643 struct ixl_softc *sc = ifp->if_softc; 1644 int rv; 1645 1646 if (!ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER)) { 1647 return 0; 1648 } 1649 1650 if (set) { 1651 rv = ixl_add_macvlan(sc, sc->sc_enaddr, vid, 1652 IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH); 1653 if (rv == 0) { 1654 rv = ixl_add_macvlan(sc, etherbroadcastaddr, 1655 vid, IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH); 1656 } 1657 } else { 1658 rv = ixl_remove_macvlan(sc, sc->sc_enaddr, vid, 1659 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH); 1660 (void)ixl_remove_macvlan(sc, etherbroadcastaddr, vid, 1661 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH); 1662 } 1663 1664 return rv; 1665 } 1666 1667 static void 1668 ixl_media_add(struct ixl_softc *sc) 1669 { 1670 struct ifmedia *ifm = &sc->sc_media; 1671 const struct ixl_phy_type *itype; 1672 unsigned int i; 1673 bool flow; 1674 1675 if (ISSET(sc->sc_phy_abilities, 1676 (IXL_PHY_ABILITY_PAUSE_TX | IXL_PHY_ABILITY_PAUSE_RX))) { 1677 flow = true; 1678 } else { 1679 flow = false; 1680 } 1681 1682 for (i = 0; i < __arraycount(ixl_phy_type_map); i++) { 1683 itype = &ixl_phy_type_map[i]; 1684 1685 if (ISSET(sc->sc_phy_types, itype->phy_type)) { 1686 ifmedia_add(ifm, 1687 IFM_ETHER | IFM_FDX | itype->ifm_type, 0, NULL); 1688 1689 if (flow) { 1690 ifmedia_add(ifm, 1691 IFM_ETHER | IFM_FDX | IFM_FLOW | 1692 itype->ifm_type, 0, NULL); 1693 } 1694 1695 if (itype->ifm_type != IFM_100_TX) 1696 continue; 1697 1698 ifmedia_add(ifm, IFM_ETHER | itype->ifm_type, 1699 0, NULL); 1700 if (flow) { 1701 ifmedia_add(ifm, 1702 IFM_ETHER | IFM_FLOW | itype->ifm_type, 1703 0, NULL); 1704 } 1705 } 1706 } 1707 } 1708 1709 static void 1710 ixl_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 1711 { 1712 struct ixl_softc *sc = ifp->if_softc; 1713 1714 KASSERT(mutex_owned(&sc->sc_cfg_lock)); 1715 1716 ifmr->ifm_status = sc->sc_media_status; 1717 ifmr->ifm_active = sc->sc_media_active; 1718 } 1719 1720 static int 1721 ixl_media_change(struct ifnet *ifp) 1722 { 1723 struct ixl_softc *sc = ifp->if_softc; 1724 struct ifmedia *ifm = &sc->sc_media; 1725 uint64_t ifm_active = sc->sc_media_active; 1726 uint8_t link_speed, abilities; 1727 1728 switch (IFM_SUBTYPE(ifm_active)) { 1729 case IFM_1000_SGMII: 1730 case IFM_1000_KX: 1731 case IFM_10G_KX4: 1732 case IFM_10G_KR: 1733 case IFM_40G_KR4: 1734 case IFM_20G_KR2: 1735 case IFM_25G_KR: 1736 /* backplanes */ 1737 return EINVAL; 1738 } 1739 1740 abilities = IXL_PHY_ABILITY_AUTONEGO | IXL_PHY_ABILITY_LINKUP; 1741 1742 switch (IFM_SUBTYPE(ifm->ifm_media)) { 1743 case IFM_AUTO: 1744 link_speed = sc->sc_phy_linkspeed; 1745 break; 1746 case IFM_NONE: 1747 link_speed = 0; 1748 CLR(abilities, IXL_PHY_ABILITY_LINKUP); 1749 break; 1750 default: 1751 link_speed = ixl_search_baudrate( 1752 ifmedia_baudrate(ifm->ifm_media)); 1753 } 1754 1755 if (ISSET(abilities, IXL_PHY_ABILITY_LINKUP)) { 1756 if (ISSET(link_speed, sc->sc_phy_linkspeed) == 0) 1757 return EINVAL; 1758 } 1759 1760 if (ifm->ifm_media & IFM_FLOW) { 1761 abilities |= sc->sc_phy_abilities & 1762 (IXL_PHY_ABILITY_PAUSE_TX | IXL_PHY_ABILITY_PAUSE_RX); 1763 } 1764 1765 return ixl_set_phy_config(sc, link_speed, abilities, false); 1766 } 1767 1768 static void 1769 ixl_watchdog(struct ifnet *ifp) 1770 { 1771 1772 } 1773 1774 static void 1775 ixl_del_all_multiaddr(struct ixl_softc *sc) 1776 { 1777 struct ethercom *ec = &sc->sc_ec; 1778 struct ether_multi *enm; 1779 struct ether_multistep step; 1780 1781 ETHER_LOCK(ec); 1782 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL; 1783 ETHER_NEXT_MULTI(step, enm)) { 1784 ixl_remove_macvlan(sc, enm->enm_addrlo, 0, 1785 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN); 1786 } 1787 ETHER_UNLOCK(ec); 1788 } 1789 1790 static int 1791 ixl_add_multi(struct ixl_softc *sc, uint8_t *addrlo, uint8_t *addrhi) 1792 { 1793 struct ifnet *ifp = &sc->sc_ec.ec_if; 1794 int rv; 1795 1796 if (ISSET(ifp->if_flags, IFF_ALLMULTI)) 1797 return 0; 1798 1799 if (memcmp(addrlo, addrhi, ETHER_ADDR_LEN) != 0) { 1800 ixl_del_all_multiaddr(sc); 1801 SET(ifp->if_flags, IFF_ALLMULTI); 1802 return ENETRESET; 1803 } 1804 1805 /* multicast address can not use VLAN HWFILTER */ 1806 rv = ixl_add_macvlan(sc, addrlo, 0, 1807 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN); 1808 1809 if (rv == ENOSPC) { 1810 ixl_del_all_multiaddr(sc); 1811 SET(ifp->if_flags, IFF_ALLMULTI); 1812 return ENETRESET; 1813 } 1814 1815 return rv; 1816 } 1817 1818 static int 1819 ixl_del_multi(struct ixl_softc *sc, uint8_t *addrlo, uint8_t *addrhi) 1820 { 1821 struct ifnet *ifp = &sc->sc_ec.ec_if; 1822 struct ethercom *ec = &sc->sc_ec; 1823 struct ether_multi *enm, *enm_last; 1824 struct ether_multistep step; 1825 int error, rv = 0; 1826 1827 if (!ISSET(ifp->if_flags, IFF_ALLMULTI)) { 1828 ixl_remove_macvlan(sc, addrlo, 0, 1829 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN); 1830 return 0; 1831 } 1832 1833 ETHER_LOCK(ec); 1834 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL; 1835 ETHER_NEXT_MULTI(step, enm)) { 1836 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 1837 ETHER_ADDR_LEN) != 0) { 1838 goto out; 1839 } 1840 } 1841 1842 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL; 1843 ETHER_NEXT_MULTI(step, enm)) { 1844 error = ixl_add_macvlan(sc, enm->enm_addrlo, 0, 1845 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN); 1846 if (error != 0) 1847 break; 1848 } 1849 1850 if (enm != NULL) { 1851 enm_last = enm; 1852 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL; 1853 ETHER_NEXT_MULTI(step, enm)) { 1854 if (enm == enm_last) 1855 break; 1856 1857 ixl_remove_macvlan(sc, enm->enm_addrlo, 0, 1858 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN); 1859 } 1860 } else { 1861 CLR(ifp->if_flags, IFF_ALLMULTI); 1862 rv = ENETRESET; 1863 } 1864 1865 out: 1866 ETHER_UNLOCK(ec); 1867 return rv; 1868 } 1869 1870 static int 1871 ixl_ioctl(struct ifnet *ifp, u_long cmd, void *data) 1872 { 1873 struct ifreq *ifr = (struct ifreq *)data; 1874 struct ixl_softc *sc = (struct ixl_softc *)ifp->if_softc; 1875 const struct sockaddr *sa; 1876 uint8_t addrhi[ETHER_ADDR_LEN], addrlo[ETHER_ADDR_LEN]; 1877 int s, error = 0; 1878 unsigned int nmtu; 1879 1880 switch (cmd) { 1881 case SIOCSIFMTU: 1882 nmtu = ifr->ifr_mtu; 1883 1884 if (nmtu < IXL_MIN_MTU || nmtu > IXL_MAX_MTU) { 1885 error = EINVAL; 1886 break; 1887 } 1888 if (ifp->if_mtu != nmtu) { 1889 s = splnet(); 1890 error = ether_ioctl(ifp, cmd, data); 1891 splx(s); 1892 if (error == ENETRESET) 1893 error = ixl_init(ifp); 1894 } 1895 break; 1896 case SIOCADDMULTI: 1897 sa = ifreq_getaddr(SIOCADDMULTI, ifr); 1898 if (ether_addmulti(sa, &sc->sc_ec) == ENETRESET) { 1899 error = ether_multiaddr(sa, addrlo, addrhi); 1900 if (error != 0) 1901 return error; 1902 1903 error = ixl_add_multi(sc, addrlo, addrhi); 1904 if (error != 0 && error != ENETRESET) { 1905 ether_delmulti(sa, &sc->sc_ec); 1906 error = EIO; 1907 } 1908 } 1909 break; 1910 1911 case SIOCDELMULTI: 1912 sa = ifreq_getaddr(SIOCDELMULTI, ifr); 1913 if (ether_delmulti(sa, &sc->sc_ec) == ENETRESET) { 1914 error = ether_multiaddr(sa, addrlo, addrhi); 1915 if (error != 0) 1916 return error; 1917 1918 error = ixl_del_multi(sc, addrlo, addrhi); 1919 } 1920 break; 1921 1922 default: 1923 s = splnet(); 1924 error = ether_ioctl(ifp, cmd, data); 1925 splx(s); 1926 } 1927 1928 if (error == ENETRESET) 1929 error = ixl_iff(sc); 1930 1931 return error; 1932 } 1933 1934 static enum i40e_mac_type 1935 ixl_mactype(pci_product_id_t id) 1936 { 1937 1938 switch (id) { 1939 case PCI_PRODUCT_INTEL_XL710_SFP: 1940 case PCI_PRODUCT_INTEL_XL710_KX_B: 1941 case PCI_PRODUCT_INTEL_XL710_KX_C: 1942 case PCI_PRODUCT_INTEL_XL710_QSFP_A: 1943 case PCI_PRODUCT_INTEL_XL710_QSFP_B: 1944 case PCI_PRODUCT_INTEL_XL710_QSFP_C: 1945 case PCI_PRODUCT_INTEL_X710_10G_T: 1946 case PCI_PRODUCT_INTEL_XL710_20G_BP_1: 1947 case PCI_PRODUCT_INTEL_XL710_20G_BP_2: 1948 case PCI_PRODUCT_INTEL_X710_T4_10G: 1949 case PCI_PRODUCT_INTEL_XXV710_25G_BP: 1950 case PCI_PRODUCT_INTEL_XXV710_25G_SFP28: 1951 return I40E_MAC_XL710; 1952 1953 case PCI_PRODUCT_INTEL_X722_KX: 1954 case PCI_PRODUCT_INTEL_X722_QSFP: 1955 case PCI_PRODUCT_INTEL_X722_SFP: 1956 case PCI_PRODUCT_INTEL_X722_1G_BASET: 1957 case PCI_PRODUCT_INTEL_X722_10G_BASET: 1958 case PCI_PRODUCT_INTEL_X722_I_SFP: 1959 return I40E_MAC_X722; 1960 } 1961 1962 return I40E_MAC_GENERIC; 1963 } 1964 1965 static void 1966 ixl_pci_csr_setup(pci_chipset_tag_t pc, pcitag_t tag) 1967 { 1968 pcireg_t csr; 1969 1970 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG); 1971 csr |= (PCI_COMMAND_MASTER_ENABLE | 1972 PCI_COMMAND_MEM_ENABLE); 1973 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr); 1974 } 1975 1976 static inline void * 1977 ixl_hmc_kva(struct ixl_softc *sc, enum ixl_hmc_types type, unsigned int i) 1978 { 1979 uint8_t *kva = IXL_DMA_KVA(&sc->sc_hmc_pd); 1980 struct ixl_hmc_entry *e = &sc->sc_hmc_entries[type]; 1981 1982 if (i >= e->hmc_count) 1983 return NULL; 1984 1985 kva += e->hmc_base; 1986 kva += i * e->hmc_size; 1987 1988 return kva; 1989 } 1990 1991 static inline size_t 1992 ixl_hmc_len(struct ixl_softc *sc, enum ixl_hmc_types type) 1993 { 1994 struct ixl_hmc_entry *e = &sc->sc_hmc_entries[type]; 1995 1996 return e->hmc_size; 1997 } 1998 1999 static void 2000 ixl_enable_queue_intr(struct ixl_softc *sc, struct ixl_queue_pair *qp) 2001 { 2002 struct ixl_rx_ring *rxr = qp->qp_rxr; 2003 2004 ixl_wr(sc, I40E_PFINT_DYN_CTLN(rxr->rxr_qid), 2005 I40E_PFINT_DYN_CTLN_INTENA_MASK | 2006 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | 2007 (IXL_NOITR << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT)); 2008 ixl_flush(sc); 2009 } 2010 2011 static void 2012 ixl_disable_queue_intr(struct ixl_softc *sc, struct ixl_queue_pair *qp) 2013 { 2014 struct ixl_rx_ring *rxr = qp->qp_rxr; 2015 2016 ixl_wr(sc, I40E_PFINT_DYN_CTLN(rxr->rxr_qid), 2017 (IXL_NOITR << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT)); 2018 ixl_flush(sc); 2019 } 2020 2021 static void 2022 ixl_enable_other_intr(struct ixl_softc *sc) 2023 { 2024 2025 ixl_wr(sc, I40E_PFINT_DYN_CTL0, 2026 I40E_PFINT_DYN_CTL0_INTENA_MASK | 2027 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK | 2028 (IXL_NOITR << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT)); 2029 ixl_flush(sc); 2030 } 2031 2032 static void 2033 ixl_disable_other_intr(struct ixl_softc *sc) 2034 { 2035 2036 ixl_wr(sc, I40E_PFINT_DYN_CTL0, 2037 (IXL_NOITR << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT)); 2038 ixl_flush(sc); 2039 } 2040 2041 static int 2042 ixl_reinit(struct ixl_softc *sc) 2043 { 2044 struct ixl_rx_ring *rxr; 2045 struct ixl_tx_ring *txr; 2046 unsigned int i; 2047 uint32_t reg; 2048 2049 KASSERT(mutex_owned(&sc->sc_cfg_lock)); 2050 2051 if (ixl_get_vsi(sc) != 0) 2052 return EIO; 2053 2054 if (ixl_set_vsi(sc) != 0) 2055 return EIO; 2056 2057 for (i = 0; i < sc->sc_nqueue_pairs; i++) { 2058 txr = sc->sc_qps[i].qp_txr; 2059 rxr = sc->sc_qps[i].qp_rxr; 2060 2061 ixl_txr_config(sc, txr); 2062 ixl_rxr_config(sc, rxr); 2063 } 2064 2065 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd), 2066 0, IXL_DMA_LEN(&sc->sc_hmc_pd), BUS_DMASYNC_PREWRITE); 2067 2068 for (i = 0; i < sc->sc_nqueue_pairs; i++) { 2069 txr = sc->sc_qps[i].qp_txr; 2070 rxr = sc->sc_qps[i].qp_rxr; 2071 2072 ixl_wr(sc, I40E_QTX_CTL(i), I40E_QTX_CTL_PF_QUEUE | 2073 (sc->sc_pf_id << I40E_QTX_CTL_PF_INDX_SHIFT)); 2074 ixl_flush(sc); 2075 2076 ixl_wr(sc, txr->txr_tail, txr->txr_prod); 2077 ixl_wr(sc, rxr->rxr_tail, rxr->rxr_prod); 2078 2079 /* ixl_rxfill() needs lock held */ 2080 mutex_enter(&rxr->rxr_lock); 2081 ixl_rxfill(sc, rxr); 2082 mutex_exit(&rxr->rxr_lock); 2083 2084 reg = ixl_rd(sc, I40E_QRX_ENA(i)); 2085 SET(reg, I40E_QRX_ENA_QENA_REQ_MASK); 2086 ixl_wr(sc, I40E_QRX_ENA(i), reg); 2087 if (ixl_rxr_enabled(sc, rxr) != 0) 2088 goto stop; 2089 2090 ixl_txr_qdis(sc, txr, 1); 2091 2092 reg = ixl_rd(sc, I40E_QTX_ENA(i)); 2093 SET(reg, I40E_QTX_ENA_QENA_REQ_MASK); 2094 ixl_wr(sc, I40E_QTX_ENA(i), reg); 2095 2096 if (ixl_txr_enabled(sc, txr) != 0) 2097 goto stop; 2098 } 2099 2100 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd), 2101 0, IXL_DMA_LEN(&sc->sc_hmc_pd), BUS_DMASYNC_POSTWRITE); 2102 2103 return 0; 2104 2105 stop: 2106 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd), 2107 0, IXL_DMA_LEN(&sc->sc_hmc_pd), BUS_DMASYNC_POSTWRITE); 2108 2109 return ETIMEDOUT; 2110 } 2111 2112 static int 2113 ixl_init_locked(struct ixl_softc *sc) 2114 { 2115 struct ifnet *ifp = &sc->sc_ec.ec_if; 2116 unsigned int i; 2117 int error, eccap_change; 2118 2119 KASSERT(mutex_owned(&sc->sc_cfg_lock)); 2120 2121 if (ISSET(ifp->if_flags, IFF_RUNNING)) 2122 ixl_stop_locked(sc); 2123 2124 if (sc->sc_dead) { 2125 return ENXIO; 2126 } 2127 2128 eccap_change = sc->sc_ec.ec_capenable ^ sc->sc_cur_ec_capenable; 2129 if (ISSET(eccap_change, ETHERCAP_VLAN_HWTAGGING)) 2130 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWTAGGING; 2131 2132 if (ISSET(eccap_change, ETHERCAP_VLAN_HWFILTER)) { 2133 if (ixl_update_macvlan(sc) == 0) { 2134 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWFILTER; 2135 } else { 2136 CLR(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER); 2137 CLR(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER); 2138 } 2139 } 2140 2141 if (sc->sc_intrtype != PCI_INTR_TYPE_MSIX) 2142 sc->sc_nqueue_pairs = 1; 2143 else 2144 sc->sc_nqueue_pairs = sc->sc_nqueue_pairs_max; 2145 2146 error = ixl_reinit(sc); 2147 if (error) { 2148 ixl_stop_locked(sc); 2149 return error; 2150 } 2151 2152 SET(ifp->if_flags, IFF_RUNNING); 2153 CLR(ifp->if_flags, IFF_OACTIVE); 2154 2155 ixl_config_rss(sc); 2156 ixl_config_queue_intr(sc); 2157 2158 for (i = 0; i < sc->sc_nqueue_pairs; i++) { 2159 ixl_enable_queue_intr(sc, &sc->sc_qps[i]); 2160 } 2161 2162 error = ixl_iff(sc); 2163 if (error) { 2164 ixl_stop_locked(sc); 2165 return error; 2166 } 2167 2168 callout_schedule(&sc->sc_stats_callout, mstohz(sc->sc_stats_intval)); 2169 2170 return 0; 2171 } 2172 2173 static int 2174 ixl_init(struct ifnet *ifp) 2175 { 2176 struct ixl_softc *sc = ifp->if_softc; 2177 int error; 2178 2179 mutex_enter(&sc->sc_cfg_lock); 2180 error = ixl_init_locked(sc); 2181 mutex_exit(&sc->sc_cfg_lock); 2182 2183 if (error == 0) 2184 (void)ixl_get_link_status(sc); 2185 2186 return error; 2187 } 2188 2189 static int 2190 ixl_iff(struct ixl_softc *sc) 2191 { 2192 struct ifnet *ifp = &sc->sc_ec.ec_if; 2193 struct ixl_atq iatq; 2194 struct ixl_aq_desc *iaq; 2195 struct ixl_aq_vsi_promisc_param *param; 2196 uint16_t flag_add, flag_del; 2197 int error; 2198 2199 if (!ISSET(ifp->if_flags, IFF_RUNNING)) 2200 return 0; 2201 2202 memset(&iatq, 0, sizeof(iatq)); 2203 2204 iaq = &iatq.iatq_desc; 2205 iaq->iaq_opcode = htole16(IXL_AQ_OP_SET_VSI_PROMISC); 2206 2207 param = (struct ixl_aq_vsi_promisc_param *)&iaq->iaq_param; 2208 param->flags = htole16(0); 2209 2210 if (!ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER) 2211 || ISSET(ifp->if_flags, IFF_PROMISC)) { 2212 param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_BCAST | 2213 IXL_AQ_VSI_PROMISC_FLAG_VLAN); 2214 } 2215 2216 if (ISSET(ifp->if_flags, IFF_PROMISC)) { 2217 param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_UCAST | 2218 IXL_AQ_VSI_PROMISC_FLAG_MCAST); 2219 } else if (ISSET(ifp->if_flags, IFF_ALLMULTI)) { 2220 param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_MCAST); 2221 } 2222 param->valid_flags = htole16(IXL_AQ_VSI_PROMISC_FLAG_UCAST | 2223 IXL_AQ_VSI_PROMISC_FLAG_MCAST | IXL_AQ_VSI_PROMISC_FLAG_BCAST | 2224 IXL_AQ_VSI_PROMISC_FLAG_VLAN); 2225 param->seid = sc->sc_seid; 2226 2227 error = ixl_atq_exec(sc, &iatq); 2228 if (error) 2229 return error; 2230 2231 if (iaq->iaq_retval != htole16(IXL_AQ_RC_OK)) 2232 return EIO; 2233 2234 if (memcmp(sc->sc_enaddr, CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN) != 0) { 2235 if (ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER)) { 2236 flag_add = IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH; 2237 flag_del = IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH; 2238 } else { 2239 flag_add = IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN; 2240 flag_del = IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN; 2241 } 2242 2243 ixl_remove_macvlan(sc, sc->sc_enaddr, 0, flag_del); 2244 2245 memcpy(sc->sc_enaddr, CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN); 2246 ixl_add_macvlan(sc, sc->sc_enaddr, 0, flag_add); 2247 } 2248 return 0; 2249 } 2250 2251 static void 2252 ixl_stop_rendezvous(struct ixl_softc *sc) 2253 { 2254 struct ixl_tx_ring *txr; 2255 struct ixl_rx_ring *rxr; 2256 unsigned int i; 2257 2258 for (i = 0; i < sc->sc_nqueue_pairs; i++) { 2259 txr = sc->sc_qps[i].qp_txr; 2260 rxr = sc->sc_qps[i].qp_rxr; 2261 2262 mutex_enter(&txr->txr_lock); 2263 mutex_exit(&txr->txr_lock); 2264 2265 mutex_enter(&rxr->rxr_lock); 2266 mutex_exit(&rxr->rxr_lock); 2267 2268 sc->sc_qps[i].qp_workqueue = false; 2269 workqueue_wait(sc->sc_workq_txrx, 2270 &sc->sc_qps[i].qp_work); 2271 } 2272 } 2273 2274 static void 2275 ixl_stop_locked(struct ixl_softc *sc) 2276 { 2277 struct ifnet *ifp = &sc->sc_ec.ec_if; 2278 struct ixl_rx_ring *rxr; 2279 struct ixl_tx_ring *txr; 2280 unsigned int i; 2281 uint32_t reg; 2282 2283 KASSERT(mutex_owned(&sc->sc_cfg_lock)); 2284 2285 CLR(ifp->if_flags, IFF_RUNNING | IFF_OACTIVE); 2286 callout_stop(&sc->sc_stats_callout); 2287 2288 for (i = 0; i < sc->sc_nqueue_pairs; i++) { 2289 txr = sc->sc_qps[i].qp_txr; 2290 rxr = sc->sc_qps[i].qp_rxr; 2291 2292 ixl_disable_queue_intr(sc, &sc->sc_qps[i]); 2293 2294 mutex_enter(&txr->txr_lock); 2295 ixl_txr_qdis(sc, txr, 0); 2296 mutex_exit(&txr->txr_lock); 2297 } 2298 2299 /* XXX wait at least 400 usec for all tx queues in one go */ 2300 ixl_flush(sc); 2301 DELAY(500); 2302 2303 for (i = 0; i < sc->sc_nqueue_pairs; i++) { 2304 txr = sc->sc_qps[i].qp_txr; 2305 rxr = sc->sc_qps[i].qp_rxr; 2306 2307 mutex_enter(&txr->txr_lock); 2308 reg = ixl_rd(sc, I40E_QTX_ENA(i)); 2309 CLR(reg, I40E_QTX_ENA_QENA_REQ_MASK); 2310 ixl_wr(sc, I40E_QTX_ENA(i), reg); 2311 mutex_exit(&txr->txr_lock); 2312 2313 mutex_enter(&rxr->rxr_lock); 2314 reg = ixl_rd(sc, I40E_QRX_ENA(i)); 2315 CLR(reg, I40E_QRX_ENA_QENA_REQ_MASK); 2316 ixl_wr(sc, I40E_QRX_ENA(i), reg); 2317 mutex_exit(&rxr->rxr_lock); 2318 } 2319 2320 /* XXX short wait for all queue disables to settle */ 2321 ixl_flush(sc); 2322 DELAY(50); 2323 2324 for (i = 0; i < sc->sc_nqueue_pairs; i++) { 2325 txr = sc->sc_qps[i].qp_txr; 2326 rxr = sc->sc_qps[i].qp_rxr; 2327 2328 mutex_enter(&txr->txr_lock); 2329 if (ixl_txr_disabled(sc, txr) != 0) { 2330 mutex_exit(&txr->txr_lock); 2331 goto die; 2332 } 2333 mutex_exit(&txr->txr_lock); 2334 2335 mutex_enter(&rxr->rxr_lock); 2336 if (ixl_rxr_disabled(sc, rxr) != 0) { 2337 mutex_exit(&rxr->rxr_lock); 2338 goto die; 2339 } 2340 mutex_exit(&rxr->rxr_lock); 2341 } 2342 2343 ixl_stop_rendezvous(sc); 2344 2345 for (i = 0; i < sc->sc_nqueue_pairs; i++) { 2346 txr = sc->sc_qps[i].qp_txr; 2347 rxr = sc->sc_qps[i].qp_rxr; 2348 2349 mutex_enter(&txr->txr_lock); 2350 ixl_txr_unconfig(sc, txr); 2351 mutex_exit(&txr->txr_lock); 2352 2353 mutex_enter(&rxr->rxr_lock); 2354 ixl_rxr_unconfig(sc, rxr); 2355 mutex_exit(&rxr->rxr_lock); 2356 2357 ixl_txr_clean(sc, txr); 2358 ixl_rxr_clean(sc, rxr); 2359 } 2360 2361 return; 2362 die: 2363 sc->sc_dead = true; 2364 log(LOG_CRIT, "%s: failed to shut down rings", 2365 device_xname(sc->sc_dev)); 2366 return; 2367 } 2368 2369 static void 2370 ixl_stop(struct ifnet *ifp, int disable) 2371 { 2372 struct ixl_softc *sc = ifp->if_softc; 2373 2374 mutex_enter(&sc->sc_cfg_lock); 2375 ixl_stop_locked(sc); 2376 mutex_exit(&sc->sc_cfg_lock); 2377 } 2378 2379 static int 2380 ixl_queue_pairs_alloc(struct ixl_softc *sc) 2381 { 2382 struct ixl_queue_pair *qp; 2383 unsigned int i; 2384 size_t sz; 2385 2386 sz = sizeof(sc->sc_qps[0]) * sc->sc_nqueue_pairs_max; 2387 sc->sc_qps = kmem_zalloc(sz, KM_SLEEP); 2388 2389 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) { 2390 qp = &sc->sc_qps[i]; 2391 2392 qp->qp_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE, 2393 ixl_handle_queue, qp); 2394 if (qp->qp_si == NULL) 2395 goto free; 2396 2397 qp->qp_txr = ixl_txr_alloc(sc, i); 2398 if (qp->qp_txr == NULL) 2399 goto free; 2400 2401 qp->qp_rxr = ixl_rxr_alloc(sc, i); 2402 if (qp->qp_rxr == NULL) 2403 goto free; 2404 2405 qp->qp_sc = sc; 2406 snprintf(qp->qp_name, sizeof(qp->qp_name), 2407 "%s-TXRX%d", device_xname(sc->sc_dev), i); 2408 } 2409 2410 return 0; 2411 free: 2412 if (sc->sc_qps != NULL) { 2413 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) { 2414 qp = &sc->sc_qps[i]; 2415 2416 if (qp->qp_txr != NULL) 2417 ixl_txr_free(sc, qp->qp_txr); 2418 if (qp->qp_rxr != NULL) 2419 ixl_rxr_free(sc, qp->qp_rxr); 2420 if (qp->qp_si != NULL) 2421 softint_disestablish(qp->qp_si); 2422 } 2423 2424 sz = sizeof(sc->sc_qps[0]) * sc->sc_nqueue_pairs_max; 2425 kmem_free(sc->sc_qps, sz); 2426 sc->sc_qps = NULL; 2427 } 2428 2429 return -1; 2430 } 2431 2432 static void 2433 ixl_queue_pairs_free(struct ixl_softc *sc) 2434 { 2435 struct ixl_queue_pair *qp; 2436 unsigned int i; 2437 size_t sz; 2438 2439 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) { 2440 qp = &sc->sc_qps[i]; 2441 ixl_txr_free(sc, qp->qp_txr); 2442 ixl_rxr_free(sc, qp->qp_rxr); 2443 softint_disestablish(qp->qp_si); 2444 } 2445 2446 sz = sizeof(sc->sc_qps[0]) * sc->sc_nqueue_pairs_max; 2447 kmem_free(sc->sc_qps, sz); 2448 sc->sc_qps = NULL; 2449 } 2450 2451 static struct ixl_tx_ring * 2452 ixl_txr_alloc(struct ixl_softc *sc, unsigned int qid) 2453 { 2454 struct ixl_tx_ring *txr = NULL; 2455 struct ixl_tx_map *maps = NULL, *txm; 2456 unsigned int i; 2457 2458 txr = kmem_zalloc(sizeof(*txr), KM_SLEEP); 2459 maps = kmem_zalloc(sizeof(maps[0]) * sc->sc_tx_ring_ndescs, 2460 KM_SLEEP); 2461 2462 if (ixl_dmamem_alloc(sc, &txr->txr_mem, 2463 sizeof(struct ixl_tx_desc) * sc->sc_tx_ring_ndescs, 2464 IXL_TX_QUEUE_ALIGN) != 0) 2465 goto free; 2466 2467 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) { 2468 txm = &maps[i]; 2469 2470 if (bus_dmamap_create(sc->sc_dmat, IXL_TX_PKT_MAXSIZE, 2471 IXL_TX_PKT_DESCS, IXL_TX_PKT_MAXSIZE, 0, 2472 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &txm->txm_map) != 0) 2473 goto uncreate; 2474 2475 txm->txm_eop = -1; 2476 txm->txm_m = NULL; 2477 } 2478 2479 txr->txr_cons = txr->txr_prod = 0; 2480 txr->txr_maps = maps; 2481 2482 txr->txr_intrq = pcq_create(sc->sc_tx_ring_ndescs, KM_NOSLEEP); 2483 if (txr->txr_intrq == NULL) 2484 goto uncreate; 2485 2486 txr->txr_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE, 2487 ixl_deferred_transmit, txr); 2488 if (txr->txr_si == NULL) 2489 goto destroy_pcq; 2490 2491 txr->txr_tail = I40E_QTX_TAIL(qid); 2492 txr->txr_qid = qid; 2493 txr->txr_sc = sc; 2494 mutex_init(&txr->txr_lock, MUTEX_DEFAULT, IPL_NET); 2495 2496 return txr; 2497 2498 destroy_pcq: 2499 pcq_destroy(txr->txr_intrq); 2500 uncreate: 2501 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) { 2502 txm = &maps[i]; 2503 2504 if (txm->txm_map == NULL) 2505 continue; 2506 2507 bus_dmamap_destroy(sc->sc_dmat, txm->txm_map); 2508 } 2509 2510 ixl_dmamem_free(sc, &txr->txr_mem); 2511 free: 2512 kmem_free(maps, sizeof(maps[0]) * sc->sc_tx_ring_ndescs); 2513 kmem_free(txr, sizeof(*txr)); 2514 2515 return NULL; 2516 } 2517 2518 static void 2519 ixl_txr_qdis(struct ixl_softc *sc, struct ixl_tx_ring *txr, int enable) 2520 { 2521 unsigned int qid; 2522 bus_size_t reg; 2523 uint32_t r; 2524 2525 qid = txr->txr_qid + sc->sc_base_queue; 2526 reg = I40E_GLLAN_TXPRE_QDIS(qid / 128); 2527 qid %= 128; 2528 2529 r = ixl_rd(sc, reg); 2530 CLR(r, I40E_GLLAN_TXPRE_QDIS_QINDX_MASK); 2531 SET(r, qid << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT); 2532 SET(r, enable ? I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK : 2533 I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK); 2534 ixl_wr(sc, reg, r); 2535 } 2536 2537 static void 2538 ixl_txr_config(struct ixl_softc *sc, struct ixl_tx_ring *txr) 2539 { 2540 struct ixl_hmc_txq txq; 2541 struct ixl_aq_vsi_data *data = IXL_DMA_KVA(&sc->sc_scratch); 2542 void *hmc; 2543 2544 memset(&txq, 0, sizeof(txq)); 2545 txq.head = htole16(txr->txr_cons); 2546 txq.new_context = 1; 2547 txq.base = htole64(IXL_DMA_DVA(&txr->txr_mem) / IXL_HMC_TXQ_BASE_UNIT); 2548 txq.head_wb_ena = IXL_HMC_TXQ_DESC_WB; 2549 txq.qlen = htole16(sc->sc_tx_ring_ndescs); 2550 txq.tphrdesc_ena = 0; 2551 txq.tphrpacket_ena = 0; 2552 txq.tphwdesc_ena = 0; 2553 txq.rdylist = data->qs_handle[0]; 2554 2555 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_TX, txr->txr_qid); 2556 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_TX)); 2557 ixl_hmc_pack(hmc, &txq, ixl_hmc_pack_txq, 2558 __arraycount(ixl_hmc_pack_txq)); 2559 } 2560 2561 static void 2562 ixl_txr_unconfig(struct ixl_softc *sc, struct ixl_tx_ring *txr) 2563 { 2564 void *hmc; 2565 2566 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_TX, txr->txr_qid); 2567 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_TX)); 2568 txr->txr_cons = txr->txr_prod = 0; 2569 } 2570 2571 static void 2572 ixl_txr_clean(struct ixl_softc *sc, struct ixl_tx_ring *txr) 2573 { 2574 struct ixl_tx_map *maps, *txm; 2575 bus_dmamap_t map; 2576 unsigned int i; 2577 2578 maps = txr->txr_maps; 2579 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) { 2580 txm = &maps[i]; 2581 2582 if (txm->txm_m == NULL) 2583 continue; 2584 2585 map = txm->txm_map; 2586 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 2587 BUS_DMASYNC_POSTWRITE); 2588 bus_dmamap_unload(sc->sc_dmat, map); 2589 2590 m_freem(txm->txm_m); 2591 txm->txm_m = NULL; 2592 } 2593 } 2594 2595 static int 2596 ixl_txr_enabled(struct ixl_softc *sc, struct ixl_tx_ring *txr) 2597 { 2598 bus_size_t ena = I40E_QTX_ENA(txr->txr_qid); 2599 uint32_t reg; 2600 int i; 2601 2602 for (i = 0; i < 10; i++) { 2603 reg = ixl_rd(sc, ena); 2604 if (ISSET(reg, I40E_QTX_ENA_QENA_STAT_MASK)) 2605 return 0; 2606 2607 delaymsec(10); 2608 } 2609 2610 return ETIMEDOUT; 2611 } 2612 2613 static int 2614 ixl_txr_disabled(struct ixl_softc *sc, struct ixl_tx_ring *txr) 2615 { 2616 bus_size_t ena = I40E_QTX_ENA(txr->txr_qid); 2617 uint32_t reg; 2618 int i; 2619 2620 KASSERT(mutex_owned(&txr->txr_lock)); 2621 2622 for (i = 0; i < 10; i++) { 2623 reg = ixl_rd(sc, ena); 2624 if (ISSET(reg, I40E_QTX_ENA_QENA_STAT_MASK) == 0) 2625 return 0; 2626 2627 delaymsec(10); 2628 } 2629 2630 return ETIMEDOUT; 2631 } 2632 2633 static void 2634 ixl_txr_free(struct ixl_softc *sc, struct ixl_tx_ring *txr) 2635 { 2636 struct ixl_tx_map *maps, *txm; 2637 struct mbuf *m; 2638 unsigned int i; 2639 2640 softint_disestablish(txr->txr_si); 2641 while ((m = pcq_get(txr->txr_intrq)) != NULL) 2642 m_freem(m); 2643 pcq_destroy(txr->txr_intrq); 2644 2645 maps = txr->txr_maps; 2646 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) { 2647 txm = &maps[i]; 2648 2649 bus_dmamap_destroy(sc->sc_dmat, txm->txm_map); 2650 } 2651 2652 ixl_dmamem_free(sc, &txr->txr_mem); 2653 mutex_destroy(&txr->txr_lock); 2654 kmem_free(maps, sizeof(maps[0]) * sc->sc_tx_ring_ndescs); 2655 kmem_free(txr, sizeof(*txr)); 2656 } 2657 2658 static inline int 2659 ixl_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf **m0, 2660 struct ixl_tx_ring *txr) 2661 { 2662 struct mbuf *m; 2663 int error; 2664 2665 KASSERT(mutex_owned(&txr->txr_lock)); 2666 2667 m = *m0; 2668 2669 error = bus_dmamap_load_mbuf(dmat, map, m, 2670 BUS_DMA_STREAMING | BUS_DMA_WRITE | BUS_DMA_NOWAIT); 2671 if (error != EFBIG) 2672 return error; 2673 2674 m = m_defrag(m, M_DONTWAIT); 2675 if (m != NULL) { 2676 *m0 = m; 2677 txr->txr_defragged.ev_count++; 2678 2679 error = bus_dmamap_load_mbuf(dmat, map, m, 2680 BUS_DMA_STREAMING | BUS_DMA_WRITE | BUS_DMA_NOWAIT); 2681 } else { 2682 txr->txr_defrag_failed.ev_count++; 2683 error = ENOBUFS; 2684 } 2685 2686 return error; 2687 } 2688 2689 static inline int 2690 ixl_tx_setup_offloads(struct mbuf *m, uint64_t *cmd_txd) 2691 { 2692 struct ether_header *eh; 2693 size_t len; 2694 uint64_t cmd; 2695 2696 cmd = 0; 2697 2698 eh = mtod(m, struct ether_header *); 2699 switch (htons(eh->ether_type)) { 2700 case ETHERTYPE_IP: 2701 case ETHERTYPE_IPV6: 2702 len = ETHER_HDR_LEN; 2703 break; 2704 case ETHERTYPE_VLAN: 2705 len = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 2706 break; 2707 default: 2708 len = 0; 2709 } 2710 cmd |= ((len >> 1) << IXL_TX_DESC_MACLEN_SHIFT); 2711 2712 if (m->m_pkthdr.csum_flags & 2713 (M_CSUM_TSOv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) { 2714 cmd |= IXL_TX_DESC_CMD_IIPT_IPV4; 2715 } 2716 if (m->m_pkthdr.csum_flags & M_CSUM_IPv4) { 2717 cmd |= IXL_TX_DESC_CMD_IIPT_IPV4_CSUM; 2718 } 2719 2720 if (m->m_pkthdr.csum_flags & 2721 (M_CSUM_TSOv6 | M_CSUM_TCPv6 | M_CSUM_UDPv6)) { 2722 cmd |= IXL_TX_DESC_CMD_IIPT_IPV6; 2723 } 2724 2725 switch (cmd & IXL_TX_DESC_CMD_IIPT_MASK) { 2726 case IXL_TX_DESC_CMD_IIPT_IPV4: 2727 case IXL_TX_DESC_CMD_IIPT_IPV4_CSUM: 2728 len = M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data); 2729 break; 2730 case IXL_TX_DESC_CMD_IIPT_IPV6: 2731 len = M_CSUM_DATA_IPv6_IPHL(m->m_pkthdr.csum_data); 2732 break; 2733 default: 2734 len = 0; 2735 } 2736 cmd |= ((len >> 2) << IXL_TX_DESC_IPLEN_SHIFT); 2737 2738 if (m->m_pkthdr.csum_flags & 2739 (M_CSUM_TSOv4 | M_CSUM_TSOv6 | M_CSUM_TCPv4 | M_CSUM_TCPv6)) { 2740 len = sizeof(struct tcphdr); 2741 cmd |= IXL_TX_DESC_CMD_L4T_EOFT_TCP; 2742 } else if (m->m_pkthdr.csum_flags & (M_CSUM_UDPv4 | M_CSUM_UDPv6)) { 2743 len = sizeof(struct udphdr); 2744 cmd |= IXL_TX_DESC_CMD_L4T_EOFT_UDP; 2745 } else { 2746 len = 0; 2747 } 2748 cmd |= ((len >> 2) << IXL_TX_DESC_L4LEN_SHIFT); 2749 2750 *cmd_txd |= cmd; 2751 return 0; 2752 } 2753 2754 static void 2755 ixl_tx_common_locked(struct ifnet *ifp, struct ixl_tx_ring *txr, 2756 bool is_transmit) 2757 { 2758 struct ixl_softc *sc = ifp->if_softc; 2759 struct ixl_tx_desc *ring, *txd; 2760 struct ixl_tx_map *txm; 2761 bus_dmamap_t map; 2762 struct mbuf *m; 2763 uint64_t cmd, cmd_txd; 2764 unsigned int prod, free, last, i; 2765 unsigned int mask; 2766 int post = 0; 2767 2768 KASSERT(mutex_owned(&txr->txr_lock)); 2769 2770 if (!ISSET(ifp->if_flags, IFF_RUNNING) 2771 || (!is_transmit && ISSET(ifp->if_flags, IFF_OACTIVE))) { 2772 if (!is_transmit) 2773 IFQ_PURGE(&ifp->if_snd); 2774 return; 2775 } 2776 2777 prod = txr->txr_prod; 2778 free = txr->txr_cons; 2779 if (free <= prod) 2780 free += sc->sc_tx_ring_ndescs; 2781 free -= prod; 2782 2783 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem), 2784 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTWRITE); 2785 2786 ring = IXL_DMA_KVA(&txr->txr_mem); 2787 mask = sc->sc_tx_ring_ndescs - 1; 2788 last = prod; 2789 cmd = 0; 2790 txd = NULL; 2791 2792 for (;;) { 2793 if (free <= IXL_TX_PKT_DESCS) { 2794 if (!is_transmit) 2795 SET(ifp->if_flags, IFF_OACTIVE); 2796 break; 2797 } 2798 2799 if (is_transmit) 2800 m = pcq_get(txr->txr_intrq); 2801 else 2802 IFQ_DEQUEUE(&ifp->if_snd, m); 2803 2804 if (m == NULL) 2805 break; 2806 2807 txm = &txr->txr_maps[prod]; 2808 map = txm->txm_map; 2809 2810 if (ixl_load_mbuf(sc->sc_dmat, map, &m, txr) != 0) { 2811 if_statinc(ifp, if_oerrors); 2812 m_freem(m); 2813 continue; 2814 } 2815 2816 cmd_txd = 0; 2817 if (m->m_pkthdr.csum_flags & IXL_CSUM_ALL_OFFLOAD) { 2818 ixl_tx_setup_offloads(m, &cmd_txd); 2819 } 2820 2821 if (vlan_has_tag(m)) { 2822 cmd_txd |= (uint64_t)vlan_get_tag(m) << 2823 IXL_TX_DESC_L2TAG1_SHIFT; 2824 cmd_txd |= IXL_TX_DESC_CMD_IL2TAG1; 2825 } 2826 2827 bus_dmamap_sync(sc->sc_dmat, map, 0, 2828 map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2829 2830 for (i = 0; i < (unsigned int)map->dm_nsegs; i++) { 2831 txd = &ring[prod]; 2832 2833 cmd = (uint64_t)map->dm_segs[i].ds_len << 2834 IXL_TX_DESC_BSIZE_SHIFT; 2835 cmd |= IXL_TX_DESC_DTYPE_DATA | IXL_TX_DESC_CMD_ICRC; 2836 cmd |= cmd_txd; 2837 2838 txd->addr = htole64(map->dm_segs[i].ds_addr); 2839 txd->cmd = htole64(cmd); 2840 2841 last = prod; 2842 2843 prod++; 2844 prod &= mask; 2845 } 2846 cmd |= IXL_TX_DESC_CMD_EOP | IXL_TX_DESC_CMD_RS; 2847 txd->cmd = htole64(cmd); 2848 2849 txm->txm_m = m; 2850 txm->txm_eop = last; 2851 2852 bpf_mtap(ifp, m, BPF_D_OUT); 2853 2854 free -= i; 2855 post = 1; 2856 } 2857 2858 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem), 2859 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREWRITE); 2860 2861 if (post) { 2862 txr->txr_prod = prod; 2863 ixl_wr(sc, txr->txr_tail, prod); 2864 } 2865 } 2866 2867 static int 2868 ixl_txeof(struct ixl_softc *sc, struct ixl_tx_ring *txr, u_int txlimit) 2869 { 2870 struct ifnet *ifp = &sc->sc_ec.ec_if; 2871 struct ixl_tx_desc *ring, *txd; 2872 struct ixl_tx_map *txm; 2873 struct mbuf *m; 2874 bus_dmamap_t map; 2875 unsigned int cons, prod, last; 2876 unsigned int mask; 2877 uint64_t dtype; 2878 int done = 0, more = 0; 2879 2880 KASSERT(mutex_owned(&txr->txr_lock)); 2881 2882 prod = txr->txr_prod; 2883 cons = txr->txr_cons; 2884 2885 if (cons == prod) 2886 return 0; 2887 2888 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem), 2889 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTREAD); 2890 2891 ring = IXL_DMA_KVA(&txr->txr_mem); 2892 mask = sc->sc_tx_ring_ndescs - 1; 2893 2894 net_stat_ref_t nsr = IF_STAT_GETREF(ifp); 2895 2896 do { 2897 if (txlimit-- <= 0) { 2898 more = 1; 2899 break; 2900 } 2901 2902 txm = &txr->txr_maps[cons]; 2903 last = txm->txm_eop; 2904 txd = &ring[last]; 2905 2906 dtype = txd->cmd & htole64(IXL_TX_DESC_DTYPE_MASK); 2907 if (dtype != htole64(IXL_TX_DESC_DTYPE_DONE)) 2908 break; 2909 2910 map = txm->txm_map; 2911 2912 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 2913 BUS_DMASYNC_POSTWRITE); 2914 bus_dmamap_unload(sc->sc_dmat, map); 2915 2916 m = txm->txm_m; 2917 if (m != NULL) { 2918 if_statinc_ref(nsr, if_opackets); 2919 if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len); 2920 if (ISSET(m->m_flags, M_MCAST)) 2921 if_statinc_ref(nsr, if_omcasts); 2922 m_freem(m); 2923 } 2924 2925 txm->txm_m = NULL; 2926 txm->txm_eop = -1; 2927 2928 cons = last + 1; 2929 cons &= mask; 2930 done = 1; 2931 } while (cons != prod); 2932 2933 IF_STAT_PUTREF(ifp); 2934 2935 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem), 2936 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREREAD); 2937 2938 txr->txr_cons = cons; 2939 2940 if (done) { 2941 softint_schedule(txr->txr_si); 2942 if (txr->txr_qid == 0) { 2943 CLR(ifp->if_flags, IFF_OACTIVE); 2944 if_schedule_deferred_start(ifp); 2945 } 2946 } 2947 2948 return more; 2949 } 2950 2951 static void 2952 ixl_start(struct ifnet *ifp) 2953 { 2954 struct ixl_softc *sc; 2955 struct ixl_tx_ring *txr; 2956 2957 sc = ifp->if_softc; 2958 txr = sc->sc_qps[0].qp_txr; 2959 2960 mutex_enter(&txr->txr_lock); 2961 ixl_tx_common_locked(ifp, txr, false); 2962 mutex_exit(&txr->txr_lock); 2963 } 2964 2965 static inline unsigned int 2966 ixl_select_txqueue(struct ixl_softc *sc, struct mbuf *m) 2967 { 2968 u_int cpuid; 2969 2970 cpuid = cpu_index(curcpu()); 2971 2972 return (unsigned int)(cpuid % sc->sc_nqueue_pairs); 2973 } 2974 2975 static int 2976 ixl_transmit(struct ifnet *ifp, struct mbuf *m) 2977 { 2978 struct ixl_softc *sc; 2979 struct ixl_tx_ring *txr; 2980 unsigned int qid; 2981 2982 sc = ifp->if_softc; 2983 qid = ixl_select_txqueue(sc, m); 2984 2985 txr = sc->sc_qps[qid].qp_txr; 2986 2987 if (__predict_false(!pcq_put(txr->txr_intrq, m))) { 2988 mutex_enter(&txr->txr_lock); 2989 txr->txr_pcqdrop.ev_count++; 2990 mutex_exit(&txr->txr_lock); 2991 2992 m_freem(m); 2993 return ENOBUFS; 2994 } 2995 2996 if (mutex_tryenter(&txr->txr_lock)) { 2997 ixl_tx_common_locked(ifp, txr, true); 2998 mutex_exit(&txr->txr_lock); 2999 } else { 3000 kpreempt_disable(); 3001 softint_schedule(txr->txr_si); 3002 kpreempt_enable(); 3003 } 3004 3005 return 0; 3006 } 3007 3008 static void 3009 ixl_deferred_transmit(void *xtxr) 3010 { 3011 struct ixl_tx_ring *txr = xtxr; 3012 struct ixl_softc *sc = txr->txr_sc; 3013 struct ifnet *ifp = &sc->sc_ec.ec_if; 3014 3015 mutex_enter(&txr->txr_lock); 3016 txr->txr_transmitdef.ev_count++; 3017 if (pcq_peek(txr->txr_intrq) != NULL) 3018 ixl_tx_common_locked(ifp, txr, true); 3019 mutex_exit(&txr->txr_lock); 3020 } 3021 3022 static struct ixl_rx_ring * 3023 ixl_rxr_alloc(struct ixl_softc *sc, unsigned int qid) 3024 { 3025 struct ixl_rx_ring *rxr = NULL; 3026 struct ixl_rx_map *maps = NULL, *rxm; 3027 unsigned int i; 3028 3029 rxr = kmem_zalloc(sizeof(*rxr), KM_SLEEP); 3030 maps = kmem_zalloc(sizeof(maps[0]) * sc->sc_rx_ring_ndescs, 3031 KM_SLEEP); 3032 3033 if (ixl_dmamem_alloc(sc, &rxr->rxr_mem, 3034 sizeof(struct ixl_rx_rd_desc_32) * sc->sc_rx_ring_ndescs, 3035 IXL_RX_QUEUE_ALIGN) != 0) 3036 goto free; 3037 3038 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) { 3039 rxm = &maps[i]; 3040 3041 if (bus_dmamap_create(sc->sc_dmat, 3042 IXL_MCLBYTES, 1, IXL_MCLBYTES, 0, 3043 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &rxm->rxm_map) != 0) 3044 goto uncreate; 3045 3046 rxm->rxm_m = NULL; 3047 } 3048 3049 rxr->rxr_cons = rxr->rxr_prod = 0; 3050 rxr->rxr_m_head = NULL; 3051 rxr->rxr_m_tail = &rxr->rxr_m_head; 3052 rxr->rxr_maps = maps; 3053 3054 rxr->rxr_tail = I40E_QRX_TAIL(qid); 3055 rxr->rxr_qid = qid; 3056 mutex_init(&rxr->rxr_lock, MUTEX_DEFAULT, IPL_NET); 3057 3058 return rxr; 3059 3060 uncreate: 3061 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) { 3062 rxm = &maps[i]; 3063 3064 if (rxm->rxm_map == NULL) 3065 continue; 3066 3067 bus_dmamap_destroy(sc->sc_dmat, rxm->rxm_map); 3068 } 3069 3070 ixl_dmamem_free(sc, &rxr->rxr_mem); 3071 free: 3072 kmem_free(maps, sizeof(maps[0]) * sc->sc_rx_ring_ndescs); 3073 kmem_free(rxr, sizeof(*rxr)); 3074 3075 return NULL; 3076 } 3077 3078 static void 3079 ixl_rxr_clean(struct ixl_softc *sc, struct ixl_rx_ring *rxr) 3080 { 3081 struct ixl_rx_map *maps, *rxm; 3082 bus_dmamap_t map; 3083 unsigned int i; 3084 3085 maps = rxr->rxr_maps; 3086 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) { 3087 rxm = &maps[i]; 3088 3089 if (rxm->rxm_m == NULL) 3090 continue; 3091 3092 map = rxm->rxm_map; 3093 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 3094 BUS_DMASYNC_POSTWRITE); 3095 bus_dmamap_unload(sc->sc_dmat, map); 3096 3097 m_freem(rxm->rxm_m); 3098 rxm->rxm_m = NULL; 3099 } 3100 3101 m_freem(rxr->rxr_m_head); 3102 rxr->rxr_m_head = NULL; 3103 rxr->rxr_m_tail = &rxr->rxr_m_head; 3104 3105 rxr->rxr_prod = rxr->rxr_cons = 0; 3106 } 3107 3108 static int 3109 ixl_rxr_enabled(struct ixl_softc *sc, struct ixl_rx_ring *rxr) 3110 { 3111 bus_size_t ena = I40E_QRX_ENA(rxr->rxr_qid); 3112 uint32_t reg; 3113 int i; 3114 3115 for (i = 0; i < 10; i++) { 3116 reg = ixl_rd(sc, ena); 3117 if (ISSET(reg, I40E_QRX_ENA_QENA_STAT_MASK)) 3118 return 0; 3119 3120 delaymsec(10); 3121 } 3122 3123 return ETIMEDOUT; 3124 } 3125 3126 static int 3127 ixl_rxr_disabled(struct ixl_softc *sc, struct ixl_rx_ring *rxr) 3128 { 3129 bus_size_t ena = I40E_QRX_ENA(rxr->rxr_qid); 3130 uint32_t reg; 3131 int i; 3132 3133 KASSERT(mutex_owned(&rxr->rxr_lock)); 3134 3135 for (i = 0; i < 10; i++) { 3136 reg = ixl_rd(sc, ena); 3137 if (ISSET(reg, I40E_QRX_ENA_QENA_STAT_MASK) == 0) 3138 return 0; 3139 3140 delaymsec(10); 3141 } 3142 3143 return ETIMEDOUT; 3144 } 3145 3146 static void 3147 ixl_rxr_config(struct ixl_softc *sc, struct ixl_rx_ring *rxr) 3148 { 3149 struct ixl_hmc_rxq rxq; 3150 struct ifnet *ifp = &sc->sc_ec.ec_if; 3151 uint16_t rxmax; 3152 void *hmc; 3153 3154 memset(&rxq, 0, sizeof(rxq)); 3155 rxmax = ifp->if_mtu + IXL_MTU_ETHERLEN; 3156 3157 rxq.head = htole16(rxr->rxr_cons); 3158 rxq.base = htole64(IXL_DMA_DVA(&rxr->rxr_mem) / IXL_HMC_RXQ_BASE_UNIT); 3159 rxq.qlen = htole16(sc->sc_rx_ring_ndescs); 3160 rxq.dbuff = htole16(IXL_MCLBYTES / IXL_HMC_RXQ_DBUFF_UNIT); 3161 rxq.hbuff = 0; 3162 rxq.dtype = IXL_HMC_RXQ_DTYPE_NOSPLIT; 3163 rxq.dsize = IXL_HMC_RXQ_DSIZE_32; 3164 rxq.crcstrip = 1; 3165 rxq.l2sel = 1; 3166 rxq.showiv = 1; 3167 rxq.rxmax = htole16(rxmax); 3168 rxq.tphrdesc_ena = 0; 3169 rxq.tphwdesc_ena = 0; 3170 rxq.tphdata_ena = 0; 3171 rxq.tphhead_ena = 0; 3172 rxq.lrxqthresh = 0; 3173 rxq.prefena = 1; 3174 3175 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_RX, rxr->rxr_qid); 3176 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_RX)); 3177 ixl_hmc_pack(hmc, &rxq, ixl_hmc_pack_rxq, 3178 __arraycount(ixl_hmc_pack_rxq)); 3179 } 3180 3181 static void 3182 ixl_rxr_unconfig(struct ixl_softc *sc, struct ixl_rx_ring *rxr) 3183 { 3184 void *hmc; 3185 3186 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_RX, rxr->rxr_qid); 3187 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_RX)); 3188 rxr->rxr_cons = rxr->rxr_prod = 0; 3189 } 3190 3191 static void 3192 ixl_rxr_free(struct ixl_softc *sc, struct ixl_rx_ring *rxr) 3193 { 3194 struct ixl_rx_map *maps, *rxm; 3195 unsigned int i; 3196 3197 maps = rxr->rxr_maps; 3198 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) { 3199 rxm = &maps[i]; 3200 3201 bus_dmamap_destroy(sc->sc_dmat, rxm->rxm_map); 3202 } 3203 3204 ixl_dmamem_free(sc, &rxr->rxr_mem); 3205 mutex_destroy(&rxr->rxr_lock); 3206 kmem_free(maps, sizeof(maps[0]) * sc->sc_rx_ring_ndescs); 3207 kmem_free(rxr, sizeof(*rxr)); 3208 } 3209 3210 static inline void 3211 ixl_rx_csum(struct mbuf *m, uint64_t qword) 3212 { 3213 int flags_mask; 3214 3215 if (!ISSET(qword, IXL_RX_DESC_L3L4P)) { 3216 /* No L3 or L4 checksum was calculated */ 3217 return; 3218 } 3219 3220 switch (__SHIFTOUT(qword, IXL_RX_DESC_PTYPE_MASK)) { 3221 case IXL_RX_DESC_PTYPE_IPV4FRAG: 3222 case IXL_RX_DESC_PTYPE_IPV4: 3223 case IXL_RX_DESC_PTYPE_SCTPV4: 3224 case IXL_RX_DESC_PTYPE_ICMPV4: 3225 flags_mask = M_CSUM_IPv4 | M_CSUM_IPv4_BAD; 3226 break; 3227 case IXL_RX_DESC_PTYPE_TCPV4: 3228 flags_mask = M_CSUM_IPv4 | M_CSUM_IPv4_BAD; 3229 flags_mask |= M_CSUM_TCPv4 | M_CSUM_TCP_UDP_BAD; 3230 break; 3231 case IXL_RX_DESC_PTYPE_UDPV4: 3232 flags_mask = M_CSUM_IPv4 | M_CSUM_IPv4_BAD; 3233 flags_mask |= M_CSUM_UDPv4 | M_CSUM_TCP_UDP_BAD; 3234 break; 3235 case IXL_RX_DESC_PTYPE_TCPV6: 3236 flags_mask = M_CSUM_TCPv6 | M_CSUM_TCP_UDP_BAD; 3237 break; 3238 case IXL_RX_DESC_PTYPE_UDPV6: 3239 flags_mask = M_CSUM_UDPv6 | M_CSUM_TCP_UDP_BAD; 3240 break; 3241 default: 3242 flags_mask = 0; 3243 } 3244 3245 m->m_pkthdr.csum_flags |= (flags_mask & (M_CSUM_IPv4 | 3246 M_CSUM_TCPv4 | M_CSUM_TCPv6 | M_CSUM_UDPv4 | M_CSUM_UDPv6)); 3247 3248 if (ISSET(qword, IXL_RX_DESC_IPE)) { 3249 m->m_pkthdr.csum_flags |= (flags_mask & M_CSUM_IPv4_BAD); 3250 } 3251 3252 if (ISSET(qword, IXL_RX_DESC_L4E)) { 3253 m->m_pkthdr.csum_flags |= (flags_mask & M_CSUM_TCP_UDP_BAD); 3254 } 3255 } 3256 3257 static int 3258 ixl_rxeof(struct ixl_softc *sc, struct ixl_rx_ring *rxr, u_int rxlimit) 3259 { 3260 struct ifnet *ifp = &sc->sc_ec.ec_if; 3261 struct ixl_rx_wb_desc_32 *ring, *rxd; 3262 struct ixl_rx_map *rxm; 3263 bus_dmamap_t map; 3264 unsigned int cons, prod; 3265 struct mbuf *m; 3266 uint64_t word, word0; 3267 unsigned int len; 3268 unsigned int mask; 3269 int done = 0, more = 0; 3270 3271 KASSERT(mutex_owned(&rxr->rxr_lock)); 3272 3273 if (!ISSET(ifp->if_flags, IFF_RUNNING)) 3274 return 0; 3275 3276 prod = rxr->rxr_prod; 3277 cons = rxr->rxr_cons; 3278 3279 if (cons == prod) 3280 return 0; 3281 3282 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem), 3283 0, IXL_DMA_LEN(&rxr->rxr_mem), 3284 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 3285 3286 ring = IXL_DMA_KVA(&rxr->rxr_mem); 3287 mask = sc->sc_rx_ring_ndescs - 1; 3288 3289 net_stat_ref_t nsr = IF_STAT_GETREF(ifp); 3290 3291 do { 3292 if (rxlimit-- <= 0) { 3293 more = 1; 3294 break; 3295 } 3296 3297 rxd = &ring[cons]; 3298 3299 word = le64toh(rxd->qword1); 3300 3301 if (!ISSET(word, IXL_RX_DESC_DD)) 3302 break; 3303 3304 rxm = &rxr->rxr_maps[cons]; 3305 3306 map = rxm->rxm_map; 3307 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 3308 BUS_DMASYNC_POSTREAD); 3309 bus_dmamap_unload(sc->sc_dmat, map); 3310 3311 m = rxm->rxm_m; 3312 rxm->rxm_m = NULL; 3313 3314 KASSERT(m != NULL); 3315 3316 len = (word & IXL_RX_DESC_PLEN_MASK) >> IXL_RX_DESC_PLEN_SHIFT; 3317 m->m_len = len; 3318 m->m_pkthdr.len = 0; 3319 3320 m->m_next = NULL; 3321 *rxr->rxr_m_tail = m; 3322 rxr->rxr_m_tail = &m->m_next; 3323 3324 m = rxr->rxr_m_head; 3325 m->m_pkthdr.len += len; 3326 3327 if (ISSET(word, IXL_RX_DESC_EOP)) { 3328 word0 = le64toh(rxd->qword0); 3329 3330 if (ISSET(word, IXL_RX_DESC_L2TAG1P)) { 3331 vlan_set_tag(m, 3332 __SHIFTOUT(word0, IXL_RX_DESC_L2TAG1_MASK)); 3333 } 3334 3335 if ((ifp->if_capenable & IXL_IFCAP_RXCSUM) != 0) 3336 ixl_rx_csum(m, word); 3337 3338 if (!ISSET(word, 3339 IXL_RX_DESC_RXE | IXL_RX_DESC_OVERSIZE)) { 3340 m_set_rcvif(m, ifp); 3341 if_statinc_ref(nsr, if_ipackets); 3342 if_statadd_ref(nsr, if_ibytes, 3343 m->m_pkthdr.len); 3344 if_percpuq_enqueue(sc->sc_ipq, m); 3345 } else { 3346 if_statinc_ref(nsr, if_ierrors); 3347 m_freem(m); 3348 } 3349 3350 rxr->rxr_m_head = NULL; 3351 rxr->rxr_m_tail = &rxr->rxr_m_head; 3352 } 3353 3354 cons++; 3355 cons &= mask; 3356 3357 done = 1; 3358 } while (cons != prod); 3359 3360 if (done) { 3361 rxr->rxr_cons = cons; 3362 if (ixl_rxfill(sc, rxr) == -1) 3363 if_statinc_ref(nsr, if_iqdrops); 3364 } 3365 3366 IF_STAT_PUTREF(ifp); 3367 3368 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem), 3369 0, IXL_DMA_LEN(&rxr->rxr_mem), 3370 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 3371 3372 return more; 3373 } 3374 3375 static int 3376 ixl_rxfill(struct ixl_softc *sc, struct ixl_rx_ring *rxr) 3377 { 3378 struct ixl_rx_rd_desc_32 *ring, *rxd; 3379 struct ixl_rx_map *rxm; 3380 bus_dmamap_t map; 3381 struct mbuf *m; 3382 unsigned int prod; 3383 unsigned int slots; 3384 unsigned int mask; 3385 int post = 0, error = 0; 3386 3387 KASSERT(mutex_owned(&rxr->rxr_lock)); 3388 3389 prod = rxr->rxr_prod; 3390 slots = ixl_rxr_unrefreshed(rxr->rxr_prod, rxr->rxr_cons, 3391 sc->sc_rx_ring_ndescs); 3392 3393 ring = IXL_DMA_KVA(&rxr->rxr_mem); 3394 mask = sc->sc_rx_ring_ndescs - 1; 3395 3396 if (__predict_false(slots <= 0)) 3397 return -1; 3398 3399 do { 3400 rxm = &rxr->rxr_maps[prod]; 3401 3402 MGETHDR(m, M_DONTWAIT, MT_DATA); 3403 if (m == NULL) { 3404 rxr->rxr_mgethdr_failed.ev_count++; 3405 error = -1; 3406 break; 3407 } 3408 3409 MCLGET(m, M_DONTWAIT); 3410 if (!ISSET(m->m_flags, M_EXT)) { 3411 rxr->rxr_mgetcl_failed.ev_count++; 3412 error = -1; 3413 m_freem(m); 3414 break; 3415 } 3416 3417 m->m_len = m->m_pkthdr.len = MCLBYTES; 3418 m_adj(m, ETHER_ALIGN); 3419 3420 map = rxm->rxm_map; 3421 3422 if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 3423 BUS_DMA_READ | BUS_DMA_NOWAIT) != 0) { 3424 rxr->rxr_mbuf_load_failed.ev_count++; 3425 error = -1; 3426 m_freem(m); 3427 break; 3428 } 3429 3430 rxm->rxm_m = m; 3431 3432 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 3433 BUS_DMASYNC_PREREAD); 3434 3435 rxd = &ring[prod]; 3436 3437 rxd->paddr = htole64(map->dm_segs[0].ds_addr); 3438 rxd->haddr = htole64(0); 3439 3440 prod++; 3441 prod &= mask; 3442 3443 post = 1; 3444 3445 } while (--slots); 3446 3447 if (post) { 3448 rxr->rxr_prod = prod; 3449 ixl_wr(sc, rxr->rxr_tail, prod); 3450 } 3451 3452 return error; 3453 } 3454 3455 static inline int 3456 ixl_handle_queue_common(struct ixl_softc *sc, struct ixl_queue_pair *qp, 3457 u_int txlimit, struct evcnt *txevcnt, 3458 u_int rxlimit, struct evcnt *rxevcnt) 3459 { 3460 struct ixl_tx_ring *txr = qp->qp_txr; 3461 struct ixl_rx_ring *rxr = qp->qp_rxr; 3462 int txmore, rxmore; 3463 int rv; 3464 3465 mutex_enter(&txr->txr_lock); 3466 txevcnt->ev_count++; 3467 txmore = ixl_txeof(sc, txr, txlimit); 3468 mutex_exit(&txr->txr_lock); 3469 3470 mutex_enter(&rxr->rxr_lock); 3471 rxevcnt->ev_count++; 3472 rxmore = ixl_rxeof(sc, rxr, rxlimit); 3473 mutex_exit(&rxr->rxr_lock); 3474 3475 rv = txmore | (rxmore << 1); 3476 3477 return rv; 3478 } 3479 3480 static void 3481 ixl_sched_handle_queue(struct ixl_softc *sc, struct ixl_queue_pair *qp) 3482 { 3483 3484 if (qp->qp_workqueue) 3485 workqueue_enqueue(sc->sc_workq_txrx, &qp->qp_work, NULL); 3486 else 3487 softint_schedule(qp->qp_si); 3488 } 3489 3490 static int 3491 ixl_intr(void *xsc) 3492 { 3493 struct ixl_softc *sc = xsc; 3494 struct ixl_tx_ring *txr; 3495 struct ixl_rx_ring *rxr; 3496 uint32_t icr, rxintr, txintr; 3497 int rv = 0; 3498 unsigned int i; 3499 3500 KASSERT(sc != NULL); 3501 3502 ixl_enable_other_intr(sc); 3503 icr = ixl_rd(sc, I40E_PFINT_ICR0); 3504 3505 if (ISSET(icr, I40E_PFINT_ICR0_ADMINQ_MASK)) { 3506 atomic_inc_64(&sc->sc_event_atq.ev_count); 3507 ixl_atq_done(sc); 3508 ixl_work_add(sc->sc_workq, &sc->sc_arq_task); 3509 rv = 1; 3510 } 3511 3512 if (ISSET(icr, I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK)) { 3513 atomic_inc_64(&sc->sc_event_link.ev_count); 3514 ixl_work_add(sc->sc_workq, &sc->sc_link_state_task); 3515 rv = 1; 3516 } 3517 3518 rxintr = icr & I40E_INTR_NOTX_RX_MASK; 3519 txintr = icr & I40E_INTR_NOTX_TX_MASK; 3520 3521 if (txintr || rxintr) { 3522 for (i = 0; i < sc->sc_nqueue_pairs; i++) { 3523 txr = sc->sc_qps[i].qp_txr; 3524 rxr = sc->sc_qps[i].qp_rxr; 3525 3526 ixl_handle_queue_common(sc, &sc->sc_qps[i], 3527 IXL_TXRX_PROCESS_UNLIMIT, &txr->txr_intr, 3528 IXL_TXRX_PROCESS_UNLIMIT, &rxr->rxr_intr); 3529 } 3530 rv = 1; 3531 } 3532 3533 return rv; 3534 } 3535 3536 static int 3537 ixl_queue_intr(void *xqp) 3538 { 3539 struct ixl_queue_pair *qp = xqp; 3540 struct ixl_tx_ring *txr = qp->qp_txr; 3541 struct ixl_rx_ring *rxr = qp->qp_rxr; 3542 struct ixl_softc *sc = qp->qp_sc; 3543 u_int txlimit, rxlimit; 3544 int more; 3545 3546 txlimit = sc->sc_tx_intr_process_limit; 3547 rxlimit = sc->sc_rx_intr_process_limit; 3548 qp->qp_workqueue = sc->sc_txrx_workqueue; 3549 3550 more = ixl_handle_queue_common(sc, qp, 3551 txlimit, &txr->txr_intr, rxlimit, &rxr->rxr_intr); 3552 3553 if (more != 0) { 3554 ixl_sched_handle_queue(sc, qp); 3555 } else { 3556 /* for ALTQ */ 3557 if (txr->txr_qid == 0) 3558 if_schedule_deferred_start(&sc->sc_ec.ec_if); 3559 softint_schedule(txr->txr_si); 3560 3561 ixl_enable_queue_intr(sc, qp); 3562 } 3563 3564 return 1; 3565 } 3566 3567 static void 3568 ixl_handle_queue_wk(struct work *wk, void *xsc) 3569 { 3570 struct ixl_queue_pair *qp; 3571 3572 qp = container_of(wk, struct ixl_queue_pair, qp_work); 3573 ixl_handle_queue(qp); 3574 } 3575 3576 static void 3577 ixl_handle_queue(void *xqp) 3578 { 3579 struct ixl_queue_pair *qp = xqp; 3580 struct ixl_softc *sc = qp->qp_sc; 3581 struct ixl_tx_ring *txr = qp->qp_txr; 3582 struct ixl_rx_ring *rxr = qp->qp_rxr; 3583 u_int txlimit, rxlimit; 3584 int more; 3585 3586 txlimit = sc->sc_tx_process_limit; 3587 rxlimit = sc->sc_rx_process_limit; 3588 3589 more = ixl_handle_queue_common(sc, qp, 3590 txlimit, &txr->txr_defer, rxlimit, &rxr->rxr_defer); 3591 3592 if (more != 0) 3593 ixl_sched_handle_queue(sc, qp); 3594 else 3595 ixl_enable_queue_intr(sc, qp); 3596 } 3597 3598 static inline void 3599 ixl_print_hmc_error(struct ixl_softc *sc, uint32_t reg) 3600 { 3601 uint32_t hmc_idx, hmc_isvf; 3602 uint32_t hmc_errtype, hmc_objtype, hmc_data; 3603 3604 hmc_idx = reg & I40E_PFHMC_ERRORINFO_PMF_INDEX_MASK; 3605 hmc_idx = hmc_idx >> I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT; 3606 hmc_isvf = reg & I40E_PFHMC_ERRORINFO_PMF_ISVF_MASK; 3607 hmc_isvf = hmc_isvf >> I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT; 3608 hmc_errtype = reg & I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_MASK; 3609 hmc_errtype = hmc_errtype >> I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT; 3610 hmc_objtype = reg & I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_MASK; 3611 hmc_objtype = hmc_objtype >> I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT; 3612 hmc_data = ixl_rd(sc, I40E_PFHMC_ERRORDATA); 3613 3614 device_printf(sc->sc_dev, 3615 "HMC Error (idx=0x%x, isvf=0x%x, err=0x%x, obj=0x%x, data=0x%x)\n", 3616 hmc_idx, hmc_isvf, hmc_errtype, hmc_objtype, hmc_data); 3617 } 3618 3619 static int 3620 ixl_other_intr(void *xsc) 3621 { 3622 struct ixl_softc *sc = xsc; 3623 uint32_t icr, mask, reg; 3624 int rv; 3625 3626 icr = ixl_rd(sc, I40E_PFINT_ICR0); 3627 mask = ixl_rd(sc, I40E_PFINT_ICR0_ENA); 3628 3629 if (ISSET(icr, I40E_PFINT_ICR0_ADMINQ_MASK)) { 3630 atomic_inc_64(&sc->sc_event_atq.ev_count); 3631 ixl_atq_done(sc); 3632 ixl_work_add(sc->sc_workq, &sc->sc_arq_task); 3633 rv = 1; 3634 } 3635 3636 if (ISSET(icr, I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK)) { 3637 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG)) 3638 device_printf(sc->sc_dev, "link stat changed\n"); 3639 3640 atomic_inc_64(&sc->sc_event_link.ev_count); 3641 ixl_work_add(sc->sc_workq, &sc->sc_link_state_task); 3642 rv = 1; 3643 } 3644 3645 if (ISSET(icr, I40E_PFINT_ICR0_GRST_MASK)) { 3646 CLR(mask, I40E_PFINT_ICR0_ENA_GRST_MASK); 3647 reg = ixl_rd(sc, I40E_GLGEN_RSTAT); 3648 reg = reg & I40E_GLGEN_RSTAT_RESET_TYPE_MASK; 3649 reg = reg >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT; 3650 3651 device_printf(sc->sc_dev, "GRST: %s\n", 3652 reg == I40E_RESET_CORER ? "CORER" : 3653 reg == I40E_RESET_GLOBR ? "GLOBR" : 3654 reg == I40E_RESET_EMPR ? "EMPR" : 3655 "POR"); 3656 } 3657 3658 if (ISSET(icr, I40E_PFINT_ICR0_ECC_ERR_MASK)) 3659 atomic_inc_64(&sc->sc_event_ecc_err.ev_count); 3660 if (ISSET(icr, I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)) 3661 atomic_inc_64(&sc->sc_event_pci_exception.ev_count); 3662 if (ISSET(icr, I40E_PFINT_ICR0_PE_CRITERR_MASK)) 3663 atomic_inc_64(&sc->sc_event_crit_err.ev_count); 3664 3665 if (ISSET(icr, IXL_ICR0_CRIT_ERR_MASK)) { 3666 CLR(mask, IXL_ICR0_CRIT_ERR_MASK); 3667 device_printf(sc->sc_dev, "critical error\n"); 3668 } 3669 3670 if (ISSET(icr, I40E_PFINT_ICR0_HMC_ERR_MASK)) { 3671 reg = ixl_rd(sc, I40E_PFHMC_ERRORINFO); 3672 if (ISSET(reg, I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK)) 3673 ixl_print_hmc_error(sc, reg); 3674 ixl_wr(sc, I40E_PFHMC_ERRORINFO, 0); 3675 } 3676 3677 ixl_wr(sc, I40E_PFINT_ICR0_ENA, mask); 3678 ixl_flush(sc); 3679 ixl_enable_other_intr(sc); 3680 return rv; 3681 } 3682 3683 static void 3684 ixl_get_link_status_done(struct ixl_softc *sc, 3685 const struct ixl_aq_desc *iaq) 3686 { 3687 struct ixl_aq_desc iaq_buf; 3688 3689 memcpy(&iaq_buf, iaq, sizeof(iaq_buf)); 3690 3691 /* 3692 * The lock can be released here 3693 * because there is no post processing about ATQ 3694 */ 3695 mutex_exit(&sc->sc_atq_lock); 3696 ixl_link_state_update(sc, &iaq_buf); 3697 mutex_enter(&sc->sc_atq_lock); 3698 } 3699 3700 static void 3701 ixl_get_link_status(void *xsc) 3702 { 3703 struct ixl_softc *sc = xsc; 3704 struct ixl_aq_desc *iaq; 3705 struct ixl_aq_link_param *param; 3706 int error; 3707 3708 mutex_enter(&sc->sc_atq_lock); 3709 3710 iaq = &sc->sc_link_state_atq.iatq_desc; 3711 memset(iaq, 0, sizeof(*iaq)); 3712 iaq->iaq_opcode = htole16(IXL_AQ_OP_PHY_LINK_STATUS); 3713 param = (struct ixl_aq_link_param *)iaq->iaq_param; 3714 param->notify = IXL_AQ_LINK_NOTIFY; 3715 3716 error = ixl_atq_exec_locked(sc, &sc->sc_link_state_atq); 3717 ixl_atq_set(&sc->sc_link_state_atq, ixl_get_link_status_done); 3718 3719 if (error == 0) { 3720 ixl_get_link_status_done(sc, iaq); 3721 } 3722 3723 mutex_exit(&sc->sc_atq_lock); 3724 } 3725 3726 static void 3727 ixl_link_state_update(struct ixl_softc *sc, const struct ixl_aq_desc *iaq) 3728 { 3729 struct ifnet *ifp = &sc->sc_ec.ec_if; 3730 int link_state; 3731 3732 mutex_enter(&sc->sc_cfg_lock); 3733 link_state = ixl_set_link_status_locked(sc, iaq); 3734 mutex_exit(&sc->sc_cfg_lock); 3735 3736 if (ifp->if_link_state != link_state) 3737 if_link_state_change(ifp, link_state); 3738 3739 if (link_state != LINK_STATE_DOWN) { 3740 kpreempt_disable(); 3741 if_schedule_deferred_start(ifp); 3742 kpreempt_enable(); 3743 } 3744 } 3745 3746 static void 3747 ixl_aq_dump(const struct ixl_softc *sc, const struct ixl_aq_desc *iaq, 3748 const char *msg) 3749 { 3750 char buf[512]; 3751 size_t len; 3752 3753 len = sizeof(buf); 3754 buf[--len] = '\0'; 3755 3756 device_printf(sc->sc_dev, "%s\n", msg); 3757 snprintb(buf, len, IXL_AQ_FLAGS_FMT, le16toh(iaq->iaq_flags)); 3758 device_printf(sc->sc_dev, "flags %s opcode %04x\n", 3759 buf, le16toh(iaq->iaq_opcode)); 3760 device_printf(sc->sc_dev, "datalen %u retval %u\n", 3761 le16toh(iaq->iaq_datalen), le16toh(iaq->iaq_retval)); 3762 device_printf(sc->sc_dev, "cookie %016" PRIx64 "\n", iaq->iaq_cookie); 3763 device_printf(sc->sc_dev, "%08x %08x %08x %08x\n", 3764 le32toh(iaq->iaq_param[0]), le32toh(iaq->iaq_param[1]), 3765 le32toh(iaq->iaq_param[2]), le32toh(iaq->iaq_param[3])); 3766 } 3767 3768 static void 3769 ixl_arq(void *xsc) 3770 { 3771 struct ixl_softc *sc = xsc; 3772 struct ixl_aq_desc *arq, *iaq; 3773 struct ixl_aq_buf *aqb; 3774 unsigned int cons = sc->sc_arq_cons; 3775 unsigned int prod; 3776 int done = 0; 3777 3778 prod = ixl_rd(sc, sc->sc_aq_regs->arq_head) & 3779 sc->sc_aq_regs->arq_head_mask; 3780 3781 if (cons == prod) 3782 goto done; 3783 3784 arq = IXL_DMA_KVA(&sc->sc_arq); 3785 3786 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq), 3787 0, IXL_DMA_LEN(&sc->sc_arq), 3788 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 3789 3790 do { 3791 iaq = &arq[cons]; 3792 aqb = sc->sc_arq_live[cons]; 3793 3794 KASSERT(aqb != NULL); 3795 3796 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, IXL_AQ_BUFLEN, 3797 BUS_DMASYNC_POSTREAD); 3798 3799 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG)) 3800 ixl_aq_dump(sc, iaq, "arq event"); 3801 3802 switch (iaq->iaq_opcode) { 3803 case htole16(IXL_AQ_OP_PHY_LINK_STATUS): 3804 ixl_link_state_update(sc, iaq); 3805 break; 3806 } 3807 3808 memset(iaq, 0, sizeof(*iaq)); 3809 sc->sc_arq_live[cons] = NULL; 3810 SIMPLEQ_INSERT_TAIL(&sc->sc_arq_idle, aqb, aqb_entry); 3811 3812 cons++; 3813 cons &= IXL_AQ_MASK; 3814 3815 done = 1; 3816 } while (cons != prod); 3817 3818 if (done) { 3819 sc->sc_arq_cons = cons; 3820 ixl_arq_fill(sc); 3821 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq), 3822 0, IXL_DMA_LEN(&sc->sc_arq), 3823 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 3824 } 3825 3826 done: 3827 ixl_enable_other_intr(sc); 3828 } 3829 3830 static void 3831 ixl_atq_set(struct ixl_atq *iatq, 3832 void (*fn)(struct ixl_softc *, const struct ixl_aq_desc *)) 3833 { 3834 3835 iatq->iatq_fn = fn; 3836 } 3837 3838 static int 3839 ixl_atq_post_locked(struct ixl_softc *sc, struct ixl_atq *iatq) 3840 { 3841 struct ixl_aq_desc *atq, *slot; 3842 unsigned int prod, cons, prod_next; 3843 3844 /* assert locked */ 3845 KASSERT(mutex_owned(&sc->sc_atq_lock)); 3846 3847 atq = IXL_DMA_KVA(&sc->sc_atq); 3848 prod = sc->sc_atq_prod; 3849 cons = sc->sc_atq_cons; 3850 prod_next = (prod +1) & IXL_AQ_MASK; 3851 3852 if (cons == prod_next) 3853 return ENOMEM; 3854 3855 slot = &atq[prod]; 3856 3857 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 3858 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTWRITE); 3859 3860 KASSERT(iatq->iatq_fn != NULL); 3861 *slot = iatq->iatq_desc; 3862 slot->iaq_cookie = (uint64_t)((intptr_t)iatq); 3863 3864 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG)) 3865 ixl_aq_dump(sc, slot, "atq command"); 3866 3867 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 3868 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREWRITE); 3869 3870 sc->sc_atq_prod = prod_next; 3871 ixl_wr(sc, sc->sc_aq_regs->atq_tail, sc->sc_atq_prod); 3872 3873 return 0; 3874 } 3875 3876 static void 3877 ixl_atq_done_locked(struct ixl_softc *sc) 3878 { 3879 struct ixl_aq_desc *atq, *slot; 3880 struct ixl_atq *iatq; 3881 unsigned int cons; 3882 unsigned int prod; 3883 3884 KASSERT(mutex_owned(&sc->sc_atq_lock)); 3885 3886 prod = sc->sc_atq_prod; 3887 cons = sc->sc_atq_cons; 3888 3889 if (prod == cons) 3890 return; 3891 3892 atq = IXL_DMA_KVA(&sc->sc_atq); 3893 3894 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 3895 0, IXL_DMA_LEN(&sc->sc_atq), 3896 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 3897 3898 do { 3899 slot = &atq[cons]; 3900 if (!ISSET(slot->iaq_flags, htole16(IXL_AQ_DD))) 3901 break; 3902 3903 iatq = (struct ixl_atq *)((intptr_t)slot->iaq_cookie); 3904 iatq->iatq_desc = *slot; 3905 3906 memset(slot, 0, sizeof(*slot)); 3907 3908 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG)) 3909 ixl_aq_dump(sc, &iatq->iatq_desc, "atq response"); 3910 3911 (*iatq->iatq_fn)(sc, &iatq->iatq_desc); 3912 3913 cons++; 3914 cons &= IXL_AQ_MASK; 3915 } while (cons != prod); 3916 3917 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 3918 0, IXL_DMA_LEN(&sc->sc_atq), 3919 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 3920 3921 sc->sc_atq_cons = cons; 3922 } 3923 3924 static void 3925 ixl_atq_done(struct ixl_softc *sc) 3926 { 3927 3928 mutex_enter(&sc->sc_atq_lock); 3929 ixl_atq_done_locked(sc); 3930 mutex_exit(&sc->sc_atq_lock); 3931 } 3932 3933 static void 3934 ixl_wakeup(struct ixl_softc *sc, const struct ixl_aq_desc *iaq) 3935 { 3936 3937 KASSERT(mutex_owned(&sc->sc_atq_lock)); 3938 3939 cv_signal(&sc->sc_atq_cv); 3940 } 3941 3942 static int 3943 ixl_atq_exec(struct ixl_softc *sc, struct ixl_atq *iatq) 3944 { 3945 int error; 3946 3947 mutex_enter(&sc->sc_atq_lock); 3948 error = ixl_atq_exec_locked(sc, iatq); 3949 mutex_exit(&sc->sc_atq_lock); 3950 3951 return error; 3952 } 3953 3954 static int 3955 ixl_atq_exec_locked(struct ixl_softc *sc, struct ixl_atq *iatq) 3956 { 3957 int error; 3958 3959 KASSERT(mutex_owned(&sc->sc_atq_lock)); 3960 KASSERT(iatq->iatq_desc.iaq_cookie == 0); 3961 3962 ixl_atq_set(iatq, ixl_wakeup); 3963 3964 error = ixl_atq_post_locked(sc, iatq); 3965 if (error) 3966 return error; 3967 3968 error = cv_timedwait(&sc->sc_atq_cv, &sc->sc_atq_lock, 3969 IXL_ATQ_EXEC_TIMEOUT); 3970 3971 return error; 3972 } 3973 3974 static int 3975 ixl_atq_poll(struct ixl_softc *sc, struct ixl_aq_desc *iaq, unsigned int tm) 3976 { 3977 struct ixl_aq_desc *atq, *slot; 3978 unsigned int prod; 3979 unsigned int t = 0; 3980 3981 mutex_enter(&sc->sc_atq_lock); 3982 3983 atq = IXL_DMA_KVA(&sc->sc_atq); 3984 prod = sc->sc_atq_prod; 3985 slot = atq + prod; 3986 3987 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 3988 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTWRITE); 3989 3990 *slot = *iaq; 3991 slot->iaq_flags |= htole16(IXL_AQ_SI); 3992 3993 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 3994 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREWRITE); 3995 3996 prod++; 3997 prod &= IXL_AQ_MASK; 3998 sc->sc_atq_prod = prod; 3999 ixl_wr(sc, sc->sc_aq_regs->atq_tail, prod); 4000 4001 while (ixl_rd(sc, sc->sc_aq_regs->atq_head) != prod) { 4002 delaymsec(1); 4003 4004 if (t++ > tm) { 4005 mutex_exit(&sc->sc_atq_lock); 4006 return ETIMEDOUT; 4007 } 4008 } 4009 4010 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 4011 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTREAD); 4012 *iaq = *slot; 4013 memset(slot, 0, sizeof(*slot)); 4014 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 4015 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREREAD); 4016 4017 sc->sc_atq_cons = prod; 4018 4019 mutex_exit(&sc->sc_atq_lock); 4020 4021 return 0; 4022 } 4023 4024 static int 4025 ixl_get_version(struct ixl_softc *sc) 4026 { 4027 struct ixl_aq_desc iaq; 4028 uint32_t fwbuild, fwver, apiver; 4029 uint16_t api_maj_ver, api_min_ver; 4030 4031 memset(&iaq, 0, sizeof(iaq)); 4032 iaq.iaq_opcode = htole16(IXL_AQ_OP_GET_VERSION); 4033 4034 iaq.iaq_retval = le16toh(23); 4035 4036 if (ixl_atq_poll(sc, &iaq, 2000) != 0) 4037 return ETIMEDOUT; 4038 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) 4039 return EIO; 4040 4041 fwbuild = le32toh(iaq.iaq_param[1]); 4042 fwver = le32toh(iaq.iaq_param[2]); 4043 apiver = le32toh(iaq.iaq_param[3]); 4044 4045 api_maj_ver = (uint16_t)apiver; 4046 api_min_ver = (uint16_t)(apiver >> 16); 4047 4048 aprint_normal(", FW %hu.%hu.%05u API %hu.%hu", (uint16_t)fwver, 4049 (uint16_t)(fwver >> 16), fwbuild, api_maj_ver, api_min_ver); 4050 4051 if (sc->sc_mac_type == I40E_MAC_X722) { 4052 SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMLOCK | 4053 IXL_SC_AQ_FLAG_NVMREAD); 4054 SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RXCTL); 4055 SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RSS); 4056 } 4057 4058 #define IXL_API_VER(maj, min) (((uint32_t)(maj) << 16) | (min)) 4059 if (IXL_API_VER(api_maj_ver, api_min_ver) >= IXL_API_VER(1, 5)) { 4060 SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RXCTL); 4061 SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMLOCK); 4062 } 4063 #undef IXL_API_VER 4064 4065 return 0; 4066 } 4067 4068 static int 4069 ixl_get_nvm_version(struct ixl_softc *sc) 4070 { 4071 uint16_t nvmver, cfg_ptr, eetrack_hi, eetrack_lo, oem_hi, oem_lo; 4072 uint32_t eetrack, oem; 4073 uint16_t nvm_maj_ver, nvm_min_ver, oem_build; 4074 uint8_t oem_ver, oem_patch; 4075 4076 nvmver = cfg_ptr = eetrack_hi = eetrack_lo = oem_hi = oem_lo = 0; 4077 ixl_rd16_nvm(sc, I40E_SR_NVM_DEV_STARTER_VERSION, &nvmver); 4078 ixl_rd16_nvm(sc, I40E_SR_NVM_EETRACK_HI, &eetrack_hi); 4079 ixl_rd16_nvm(sc, I40E_SR_NVM_EETRACK_LO, &eetrack_lo); 4080 ixl_rd16_nvm(sc, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr); 4081 ixl_rd16_nvm(sc, cfg_ptr + I40E_NVM_OEM_VER_OFF, &oem_hi); 4082 ixl_rd16_nvm(sc, cfg_ptr + I40E_NVM_OEM_VER_OFF + 1, &oem_lo); 4083 4084 nvm_maj_ver = (uint16_t)__SHIFTOUT(nvmver, IXL_NVM_VERSION_HI_MASK); 4085 nvm_min_ver = (uint16_t)__SHIFTOUT(nvmver, IXL_NVM_VERSION_LO_MASK); 4086 eetrack = ((uint32_t)eetrack_hi << 16) | eetrack_lo; 4087 oem = ((uint32_t)oem_hi << 16) | oem_lo; 4088 oem_ver = __SHIFTOUT(oem, IXL_NVM_OEMVERSION_MASK); 4089 oem_build = __SHIFTOUT(oem, IXL_NVM_OEMBUILD_MASK); 4090 oem_patch = __SHIFTOUT(oem, IXL_NVM_OEMPATCH_MASK); 4091 4092 aprint_normal(" nvm %x.%02x etid %08x oem %d.%d.%d", 4093 nvm_maj_ver, nvm_min_ver, eetrack, 4094 oem_ver, oem_build, oem_patch); 4095 4096 return 0; 4097 } 4098 4099 static int 4100 ixl_pxe_clear(struct ixl_softc *sc) 4101 { 4102 struct ixl_aq_desc iaq; 4103 int rv; 4104 4105 memset(&iaq, 0, sizeof(iaq)); 4106 iaq.iaq_opcode = htole16(IXL_AQ_OP_CLEAR_PXE_MODE); 4107 iaq.iaq_param[0] = htole32(0x2); 4108 4109 rv = ixl_atq_poll(sc, &iaq, 250); 4110 4111 ixl_wr(sc, I40E_GLLAN_RCTL_0, 0x1); 4112 4113 if (rv != 0) 4114 return ETIMEDOUT; 4115 4116 switch (iaq.iaq_retval) { 4117 case htole16(IXL_AQ_RC_OK): 4118 case htole16(IXL_AQ_RC_EEXIST): 4119 break; 4120 default: 4121 return EIO; 4122 } 4123 4124 return 0; 4125 } 4126 4127 static int 4128 ixl_lldp_shut(struct ixl_softc *sc) 4129 { 4130 struct ixl_aq_desc iaq; 4131 4132 memset(&iaq, 0, sizeof(iaq)); 4133 iaq.iaq_opcode = htole16(IXL_AQ_OP_LLDP_STOP_AGENT); 4134 iaq.iaq_param[0] = htole32(IXL_LLDP_SHUTDOWN); 4135 4136 if (ixl_atq_poll(sc, &iaq, 250) != 0) { 4137 aprint_error_dev(sc->sc_dev, "STOP LLDP AGENT timeout\n"); 4138 return -1; 4139 } 4140 4141 switch (iaq.iaq_retval) { 4142 case htole16(IXL_AQ_RC_EMODE): 4143 case htole16(IXL_AQ_RC_EPERM): 4144 /* ignore silently */ 4145 default: 4146 break; 4147 } 4148 4149 return 0; 4150 } 4151 4152 static void 4153 ixl_parse_hw_capability(struct ixl_softc *sc, struct ixl_aq_capability *cap) 4154 { 4155 uint16_t id; 4156 uint32_t number, logical_id; 4157 4158 id = le16toh(cap->cap_id); 4159 number = le32toh(cap->number); 4160 logical_id = le32toh(cap->logical_id); 4161 4162 switch (id) { 4163 case IXL_AQ_CAP_RSS: 4164 sc->sc_rss_table_size = number; 4165 sc->sc_rss_table_entry_width = logical_id; 4166 break; 4167 case IXL_AQ_CAP_RXQ: 4168 case IXL_AQ_CAP_TXQ: 4169 sc->sc_nqueue_pairs_device = MIN(number, 4170 sc->sc_nqueue_pairs_device); 4171 break; 4172 } 4173 } 4174 4175 static int 4176 ixl_get_hw_capabilities(struct ixl_softc *sc) 4177 { 4178 struct ixl_dmamem idm; 4179 struct ixl_aq_desc iaq; 4180 struct ixl_aq_capability *caps; 4181 size_t i, ncaps; 4182 bus_size_t caps_size; 4183 uint16_t status; 4184 int rv; 4185 4186 caps_size = sizeof(caps[0]) * 40; 4187 memset(&iaq, 0, sizeof(iaq)); 4188 iaq.iaq_opcode = htole16(IXL_AQ_OP_LIST_FUNC_CAP); 4189 4190 do { 4191 if (ixl_dmamem_alloc(sc, &idm, caps_size, 0) != 0) { 4192 return -1; 4193 } 4194 4195 iaq.iaq_flags = htole16(IXL_AQ_BUF | 4196 (caps_size > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0)); 4197 iaq.iaq_datalen = htole16(caps_size); 4198 ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm)); 4199 4200 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, 4201 IXL_DMA_LEN(&idm), BUS_DMASYNC_PREREAD); 4202 4203 rv = ixl_atq_poll(sc, &iaq, 250); 4204 4205 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, 4206 IXL_DMA_LEN(&idm), BUS_DMASYNC_POSTREAD); 4207 4208 if (rv != 0) { 4209 aprint_error(", HW capabilities timeout\n"); 4210 goto done; 4211 } 4212 4213 status = le16toh(iaq.iaq_retval); 4214 4215 if (status == IXL_AQ_RC_ENOMEM) { 4216 caps_size = le16toh(iaq.iaq_datalen); 4217 ixl_dmamem_free(sc, &idm); 4218 } 4219 } while (status == IXL_AQ_RC_ENOMEM); 4220 4221 if (status != IXL_AQ_RC_OK) { 4222 aprint_error(", HW capabilities error\n"); 4223 goto done; 4224 } 4225 4226 caps = IXL_DMA_KVA(&idm); 4227 ncaps = le16toh(iaq.iaq_param[1]); 4228 4229 for (i = 0; i < ncaps; i++) { 4230 ixl_parse_hw_capability(sc, &caps[i]); 4231 } 4232 4233 done: 4234 ixl_dmamem_free(sc, &idm); 4235 return rv; 4236 } 4237 4238 static int 4239 ixl_get_mac(struct ixl_softc *sc) 4240 { 4241 struct ixl_dmamem idm; 4242 struct ixl_aq_desc iaq; 4243 struct ixl_aq_mac_addresses *addrs; 4244 int rv; 4245 4246 if (ixl_dmamem_alloc(sc, &idm, sizeof(*addrs), 0) != 0) { 4247 aprint_error(", unable to allocate mac addresses\n"); 4248 return -1; 4249 } 4250 4251 memset(&iaq, 0, sizeof(iaq)); 4252 iaq.iaq_flags = htole16(IXL_AQ_BUF); 4253 iaq.iaq_opcode = htole16(IXL_AQ_OP_MAC_ADDRESS_READ); 4254 iaq.iaq_datalen = htole16(sizeof(*addrs)); 4255 ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm)); 4256 4257 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm), 4258 BUS_DMASYNC_PREREAD); 4259 4260 rv = ixl_atq_poll(sc, &iaq, 250); 4261 4262 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm), 4263 BUS_DMASYNC_POSTREAD); 4264 4265 if (rv != 0) { 4266 aprint_error(", MAC ADDRESS READ timeout\n"); 4267 rv = -1; 4268 goto done; 4269 } 4270 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) { 4271 aprint_error(", MAC ADDRESS READ error\n"); 4272 rv = -1; 4273 goto done; 4274 } 4275 4276 addrs = IXL_DMA_KVA(&idm); 4277 if (!ISSET(iaq.iaq_param[0], htole32(IXL_AQ_MAC_PORT_VALID))) { 4278 printf(", port address is not valid\n"); 4279 goto done; 4280 } 4281 4282 memcpy(sc->sc_enaddr, addrs->port, ETHER_ADDR_LEN); 4283 rv = 0; 4284 4285 done: 4286 ixl_dmamem_free(sc, &idm); 4287 return rv; 4288 } 4289 4290 static int 4291 ixl_get_switch_config(struct ixl_softc *sc) 4292 { 4293 struct ixl_dmamem idm; 4294 struct ixl_aq_desc iaq; 4295 struct ixl_aq_switch_config *hdr; 4296 struct ixl_aq_switch_config_element *elms, *elm; 4297 unsigned int nelm, i; 4298 int rv; 4299 4300 if (ixl_dmamem_alloc(sc, &idm, IXL_AQ_BUFLEN, 0) != 0) { 4301 aprint_error_dev(sc->sc_dev, 4302 "unable to allocate switch config buffer\n"); 4303 return -1; 4304 } 4305 4306 memset(&iaq, 0, sizeof(iaq)); 4307 iaq.iaq_flags = htole16(IXL_AQ_BUF | 4308 (IXL_AQ_BUFLEN > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0)); 4309 iaq.iaq_opcode = htole16(IXL_AQ_OP_SWITCH_GET_CONFIG); 4310 iaq.iaq_datalen = htole16(IXL_AQ_BUFLEN); 4311 ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm)); 4312 4313 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm), 4314 BUS_DMASYNC_PREREAD); 4315 4316 rv = ixl_atq_poll(sc, &iaq, 250); 4317 4318 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm), 4319 BUS_DMASYNC_POSTREAD); 4320 4321 if (rv != 0) { 4322 aprint_error_dev(sc->sc_dev, "GET SWITCH CONFIG timeout\n"); 4323 rv = -1; 4324 goto done; 4325 } 4326 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) { 4327 aprint_error_dev(sc->sc_dev, "GET SWITCH CONFIG error\n"); 4328 rv = -1; 4329 goto done; 4330 } 4331 4332 hdr = IXL_DMA_KVA(&idm); 4333 elms = (struct ixl_aq_switch_config_element *)(hdr + 1); 4334 4335 nelm = le16toh(hdr->num_reported); 4336 if (nelm < 1) { 4337 aprint_error_dev(sc->sc_dev, "no switch config available\n"); 4338 rv = -1; 4339 goto done; 4340 } 4341 4342 for (i = 0; i < nelm; i++) { 4343 elm = &elms[i]; 4344 4345 aprint_debug_dev(sc->sc_dev, 4346 "type %x revision %u seid %04x\n", 4347 elm->type, elm->revision, le16toh(elm->seid)); 4348 aprint_debug_dev(sc->sc_dev, 4349 "uplink %04x downlink %04x\n", 4350 le16toh(elm->uplink_seid), 4351 le16toh(elm->downlink_seid)); 4352 aprint_debug_dev(sc->sc_dev, 4353 "conntype %x scheduler %04x extra %04x\n", 4354 elm->connection_type, 4355 le16toh(elm->scheduler_id), 4356 le16toh(elm->element_info)); 4357 } 4358 4359 elm = &elms[0]; 4360 4361 sc->sc_uplink_seid = elm->uplink_seid; 4362 sc->sc_downlink_seid = elm->downlink_seid; 4363 sc->sc_seid = elm->seid; 4364 4365 if ((sc->sc_uplink_seid == htole16(0)) != 4366 (sc->sc_downlink_seid == htole16(0))) { 4367 aprint_error_dev(sc->sc_dev, "SEIDs are misconfigured\n"); 4368 rv = -1; 4369 goto done; 4370 } 4371 4372 done: 4373 ixl_dmamem_free(sc, &idm); 4374 return rv; 4375 } 4376 4377 static int 4378 ixl_phy_mask_ints(struct ixl_softc *sc) 4379 { 4380 struct ixl_aq_desc iaq; 4381 4382 memset(&iaq, 0, sizeof(iaq)); 4383 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_SET_EVENT_MASK); 4384 iaq.iaq_param[2] = htole32(IXL_AQ_PHY_EV_MASK & 4385 ~(IXL_AQ_PHY_EV_LINK_UPDOWN | IXL_AQ_PHY_EV_MODULE_QUAL_FAIL | 4386 IXL_AQ_PHY_EV_MEDIA_NA)); 4387 4388 if (ixl_atq_poll(sc, &iaq, 250) != 0) { 4389 aprint_error_dev(sc->sc_dev, "SET PHY EVENT MASK timeout\n"); 4390 return -1; 4391 } 4392 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) { 4393 aprint_error_dev(sc->sc_dev, "SET PHY EVENT MASK error\n"); 4394 return -1; 4395 } 4396 4397 return 0; 4398 } 4399 4400 static int 4401 ixl_get_phy_abilities(struct ixl_softc *sc, struct ixl_dmamem *idm) 4402 { 4403 struct ixl_aq_desc iaq; 4404 int rv; 4405 4406 memset(&iaq, 0, sizeof(iaq)); 4407 iaq.iaq_flags = htole16(IXL_AQ_BUF | 4408 (IXL_DMA_LEN(idm) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0)); 4409 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_GET_ABILITIES); 4410 iaq.iaq_datalen = htole16(IXL_DMA_LEN(idm)); 4411 iaq.iaq_param[0] = htole32(IXL_AQ_PHY_REPORT_INIT); 4412 ixl_aq_dva(&iaq, IXL_DMA_DVA(idm)); 4413 4414 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm), 4415 BUS_DMASYNC_PREREAD); 4416 4417 rv = ixl_atq_poll(sc, &iaq, 250); 4418 4419 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm), 4420 BUS_DMASYNC_POSTREAD); 4421 4422 if (rv != 0) 4423 return -1; 4424 4425 return le16toh(iaq.iaq_retval); 4426 } 4427 4428 static int 4429 ixl_get_phy_info(struct ixl_softc *sc) 4430 { 4431 struct ixl_dmamem idm; 4432 struct ixl_aq_phy_abilities *phy; 4433 int rv; 4434 4435 if (ixl_dmamem_alloc(sc, &idm, IXL_AQ_BUFLEN, 0) != 0) { 4436 aprint_error_dev(sc->sc_dev, 4437 "unable to allocate phy abilities buffer\n"); 4438 return -1; 4439 } 4440 4441 rv = ixl_get_phy_abilities(sc, &idm); 4442 switch (rv) { 4443 case -1: 4444 aprint_error_dev(sc->sc_dev, "GET PHY ABILITIES timeout\n"); 4445 goto done; 4446 case IXL_AQ_RC_OK: 4447 break; 4448 case IXL_AQ_RC_EIO: 4449 aprint_error_dev(sc->sc_dev,"unable to query phy types\n"); 4450 goto done; 4451 default: 4452 aprint_error_dev(sc->sc_dev, 4453 "GET PHY ABILITIIES error %u\n", rv); 4454 goto done; 4455 } 4456 4457 phy = IXL_DMA_KVA(&idm); 4458 4459 sc->sc_phy_types = le32toh(phy->phy_type); 4460 sc->sc_phy_types |= (uint64_t)le32toh(phy->phy_type_ext) << 32; 4461 4462 sc->sc_phy_abilities = phy->abilities; 4463 sc->sc_phy_linkspeed = phy->link_speed; 4464 sc->sc_phy_fec_cfg = phy->fec_cfg_curr_mod_ext_info & 4465 (IXL_AQ_ENABLE_FEC_KR | IXL_AQ_ENABLE_FEC_RS | 4466 IXL_AQ_REQUEST_FEC_KR | IXL_AQ_REQUEST_FEC_RS); 4467 sc->sc_eee_cap = phy->eee_capability; 4468 sc->sc_eeer_val = phy->eeer_val; 4469 sc->sc_d3_lpan = phy->d3_lpan; 4470 4471 rv = 0; 4472 4473 done: 4474 ixl_dmamem_free(sc, &idm); 4475 return rv; 4476 } 4477 4478 static int 4479 ixl_set_phy_config(struct ixl_softc *sc, 4480 uint8_t link_speed, uint8_t abilities, bool polling) 4481 { 4482 struct ixl_aq_phy_param *param; 4483 struct ixl_atq iatq; 4484 struct ixl_aq_desc *iaq; 4485 int error; 4486 4487 memset(&iatq, 0, sizeof(iatq)); 4488 4489 iaq = &iatq.iatq_desc; 4490 iaq->iaq_opcode = htole16(IXL_AQ_OP_PHY_SET_CONFIG); 4491 param = (struct ixl_aq_phy_param *)&iaq->iaq_param; 4492 param->phy_types = htole32((uint32_t)sc->sc_phy_types); 4493 param->phy_type_ext = (uint8_t)(sc->sc_phy_types >> 32); 4494 param->link_speed = link_speed; 4495 param->abilities = abilities | IXL_AQ_PHY_ABILITY_AUTO_LINK; 4496 param->fec_cfg = sc->sc_phy_fec_cfg; 4497 param->eee_capability = sc->sc_eee_cap; 4498 param->eeer_val = sc->sc_eeer_val; 4499 param->d3_lpan = sc->sc_d3_lpan; 4500 4501 if (polling) 4502 error = ixl_atq_poll(sc, iaq, 250); 4503 else 4504 error = ixl_atq_exec(sc, &iatq); 4505 4506 if (error != 0) 4507 return error; 4508 4509 switch (le16toh(iaq->iaq_retval)) { 4510 case IXL_AQ_RC_OK: 4511 break; 4512 case IXL_AQ_RC_EPERM: 4513 return EPERM; 4514 default: 4515 return EIO; 4516 } 4517 4518 return 0; 4519 } 4520 4521 static int 4522 ixl_set_phy_autoselect(struct ixl_softc *sc) 4523 { 4524 uint8_t link_speed, abilities; 4525 4526 link_speed = sc->sc_phy_linkspeed; 4527 abilities = IXL_PHY_ABILITY_LINKUP | IXL_PHY_ABILITY_AUTONEGO; 4528 4529 return ixl_set_phy_config(sc, link_speed, abilities, true); 4530 } 4531 4532 static int 4533 ixl_get_link_status_poll(struct ixl_softc *sc, int *l) 4534 { 4535 struct ixl_aq_desc iaq; 4536 struct ixl_aq_link_param *param; 4537 int link; 4538 4539 memset(&iaq, 0, sizeof(iaq)); 4540 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_LINK_STATUS); 4541 param = (struct ixl_aq_link_param *)iaq.iaq_param; 4542 param->notify = IXL_AQ_LINK_NOTIFY; 4543 4544 if (ixl_atq_poll(sc, &iaq, 250) != 0) { 4545 return ETIMEDOUT; 4546 } 4547 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) { 4548 return EIO; 4549 } 4550 4551 /* It is unneccessary to hold lock */ 4552 link = ixl_set_link_status_locked(sc, &iaq); 4553 4554 if (l != NULL) 4555 *l = link; 4556 4557 return 0; 4558 } 4559 4560 static int 4561 ixl_get_vsi(struct ixl_softc *sc) 4562 { 4563 struct ixl_dmamem *vsi = &sc->sc_scratch; 4564 struct ixl_aq_desc iaq; 4565 struct ixl_aq_vsi_param *param; 4566 struct ixl_aq_vsi_reply *reply; 4567 struct ixl_aq_vsi_data *data; 4568 int rv; 4569 4570 /* grumble, vsi info isn't "known" at compile time */ 4571 4572 memset(&iaq, 0, sizeof(iaq)); 4573 iaq.iaq_flags = htole16(IXL_AQ_BUF | 4574 (IXL_DMA_LEN(vsi) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0)); 4575 iaq.iaq_opcode = htole16(IXL_AQ_OP_GET_VSI_PARAMS); 4576 iaq.iaq_datalen = htole16(IXL_DMA_LEN(vsi)); 4577 ixl_aq_dva(&iaq, IXL_DMA_DVA(vsi)); 4578 4579 param = (struct ixl_aq_vsi_param *)iaq.iaq_param; 4580 param->uplink_seid = sc->sc_seid; 4581 4582 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi), 4583 BUS_DMASYNC_PREREAD); 4584 4585 rv = ixl_atq_poll(sc, &iaq, 250); 4586 4587 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi), 4588 BUS_DMASYNC_POSTREAD); 4589 4590 if (rv != 0) { 4591 return ETIMEDOUT; 4592 } 4593 4594 switch (le16toh(iaq.iaq_retval)) { 4595 case IXL_AQ_RC_OK: 4596 break; 4597 case IXL_AQ_RC_ENOENT: 4598 return ENOENT; 4599 case IXL_AQ_RC_EACCES: 4600 return EACCES; 4601 default: 4602 return EIO; 4603 } 4604 4605 reply = (struct ixl_aq_vsi_reply *)iaq.iaq_param; 4606 sc->sc_vsi_number = le16toh(reply->vsi_number); 4607 data = IXL_DMA_KVA(vsi); 4608 sc->sc_vsi_stat_counter_idx = le16toh(data->stat_counter_idx); 4609 4610 return 0; 4611 } 4612 4613 static int 4614 ixl_set_vsi(struct ixl_softc *sc) 4615 { 4616 struct ixl_dmamem *vsi = &sc->sc_scratch; 4617 struct ixl_aq_desc iaq; 4618 struct ixl_aq_vsi_param *param; 4619 struct ixl_aq_vsi_data *data = IXL_DMA_KVA(vsi); 4620 unsigned int qnum; 4621 uint16_t val; 4622 int rv; 4623 4624 qnum = sc->sc_nqueue_pairs - 1; 4625 4626 data->valid_sections = htole16(IXL_AQ_VSI_VALID_QUEUE_MAP | 4627 IXL_AQ_VSI_VALID_VLAN); 4628 4629 CLR(data->mapping_flags, htole16(IXL_AQ_VSI_QUE_MAP_MASK)); 4630 SET(data->mapping_flags, htole16(IXL_AQ_VSI_QUE_MAP_CONTIG)); 4631 data->queue_mapping[0] = htole16(0); 4632 data->tc_mapping[0] = htole16((0 << IXL_AQ_VSI_TC_Q_OFFSET_SHIFT) | 4633 (qnum << IXL_AQ_VSI_TC_Q_NUMBER_SHIFT)); 4634 4635 val = le16toh(data->port_vlan_flags); 4636 CLR(val, IXL_AQ_VSI_PVLAN_MODE_MASK | IXL_AQ_VSI_PVLAN_EMOD_MASK); 4637 SET(val, IXL_AQ_VSI_PVLAN_MODE_ALL); 4638 4639 if (ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWTAGGING)) { 4640 SET(val, IXL_AQ_VSI_PVLAN_EMOD_STR_BOTH); 4641 } else { 4642 SET(val, IXL_AQ_VSI_PVLAN_EMOD_NOTHING); 4643 } 4644 4645 data->port_vlan_flags = htole16(val); 4646 4647 /* grumble, vsi info isn't "known" at compile time */ 4648 4649 memset(&iaq, 0, sizeof(iaq)); 4650 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD | 4651 (IXL_DMA_LEN(vsi) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0)); 4652 iaq.iaq_opcode = htole16(IXL_AQ_OP_UPD_VSI_PARAMS); 4653 iaq.iaq_datalen = htole16(IXL_DMA_LEN(vsi)); 4654 ixl_aq_dva(&iaq, IXL_DMA_DVA(vsi)); 4655 4656 param = (struct ixl_aq_vsi_param *)iaq.iaq_param; 4657 param->uplink_seid = sc->sc_seid; 4658 4659 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi), 4660 BUS_DMASYNC_PREWRITE); 4661 4662 rv = ixl_atq_poll(sc, &iaq, 250); 4663 4664 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi), 4665 BUS_DMASYNC_POSTWRITE); 4666 4667 if (rv != 0) { 4668 return ETIMEDOUT; 4669 } 4670 4671 switch (le16toh(iaq.iaq_retval)) { 4672 case IXL_AQ_RC_OK: 4673 break; 4674 case IXL_AQ_RC_ENOENT: 4675 return ENOENT; 4676 case IXL_AQ_RC_EACCES: 4677 return EACCES; 4678 default: 4679 return EIO; 4680 } 4681 4682 return 0; 4683 } 4684 4685 static void 4686 ixl_set_filter_control(struct ixl_softc *sc) 4687 { 4688 uint32_t reg; 4689 4690 reg = ixl_rd_rx_csr(sc, I40E_PFQF_CTL_0); 4691 4692 CLR(reg, I40E_PFQF_CTL_0_HASHLUTSIZE_MASK); 4693 SET(reg, I40E_HASH_LUT_SIZE_128 << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT); 4694 4695 SET(reg, I40E_PFQF_CTL_0_FD_ENA_MASK); 4696 SET(reg, I40E_PFQF_CTL_0_ETYPE_ENA_MASK); 4697 SET(reg, I40E_PFQF_CTL_0_MACVLAN_ENA_MASK); 4698 4699 ixl_wr_rx_csr(sc, I40E_PFQF_CTL_0, reg); 4700 } 4701 4702 static inline void 4703 ixl_get_default_rss_key(uint32_t *buf, size_t len) 4704 { 4705 size_t cplen; 4706 uint8_t rss_seed[RSS_KEYSIZE]; 4707 4708 rss_getkey(rss_seed); 4709 memset(buf, 0, len); 4710 4711 cplen = MIN(len, sizeof(rss_seed)); 4712 memcpy(buf, rss_seed, cplen); 4713 } 4714 4715 static int 4716 ixl_set_rss_key(struct ixl_softc *sc, uint8_t *key, size_t keylen) 4717 { 4718 struct ixl_dmamem *idm; 4719 struct ixl_atq iatq; 4720 struct ixl_aq_desc *iaq; 4721 struct ixl_aq_rss_key_param *param; 4722 struct ixl_aq_rss_key_data *data; 4723 size_t len, datalen, stdlen, extlen; 4724 uint16_t vsi_id; 4725 int rv; 4726 4727 memset(&iatq, 0, sizeof(iatq)); 4728 iaq = &iatq.iatq_desc; 4729 idm = &sc->sc_aqbuf; 4730 4731 datalen = sizeof(*data); 4732 4733 /*XXX The buf size has to be less than the size of the register */ 4734 datalen = MIN(IXL_RSS_KEY_SIZE_REG * sizeof(uint32_t), datalen); 4735 4736 iaq->iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD | 4737 (datalen > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0)); 4738 iaq->iaq_opcode = htole16(IXL_AQ_OP_RSS_SET_KEY); 4739 iaq->iaq_datalen = htole16(datalen); 4740 4741 param = (struct ixl_aq_rss_key_param *)iaq->iaq_param; 4742 vsi_id = (sc->sc_vsi_number << IXL_AQ_RSSKEY_VSI_ID_SHIFT) | 4743 IXL_AQ_RSSKEY_VSI_VALID; 4744 param->vsi_id = htole16(vsi_id); 4745 4746 memset(IXL_DMA_KVA(idm), 0, IXL_DMA_LEN(idm)); 4747 data = IXL_DMA_KVA(idm); 4748 4749 len = MIN(keylen, datalen); 4750 stdlen = MIN(sizeof(data->standard_rss_key), len); 4751 memcpy(data->standard_rss_key, key, stdlen); 4752 len = (len > stdlen) ? (len - stdlen) : 0; 4753 4754 extlen = MIN(sizeof(data->extended_hash_key), len); 4755 extlen = (stdlen < keylen) ? 0 : keylen - stdlen; 4756 memcpy(data->extended_hash_key, key + stdlen, extlen); 4757 4758 ixl_aq_dva(iaq, IXL_DMA_DVA(idm)); 4759 4760 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, 4761 IXL_DMA_LEN(idm), BUS_DMASYNC_PREWRITE); 4762 4763 rv = ixl_atq_exec(sc, &iatq); 4764 4765 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, 4766 IXL_DMA_LEN(idm), BUS_DMASYNC_POSTWRITE); 4767 4768 if (rv != 0) { 4769 return ETIMEDOUT; 4770 } 4771 4772 if (iaq->iaq_retval != htole16(IXL_AQ_RC_OK)) { 4773 return EIO; 4774 } 4775 4776 return 0; 4777 } 4778 4779 static int 4780 ixl_set_rss_lut(struct ixl_softc *sc, uint8_t *lut, size_t lutlen) 4781 { 4782 struct ixl_dmamem *idm; 4783 struct ixl_atq iatq; 4784 struct ixl_aq_desc *iaq; 4785 struct ixl_aq_rss_lut_param *param; 4786 uint16_t vsi_id; 4787 uint8_t *data; 4788 size_t dmalen; 4789 int rv; 4790 4791 memset(&iatq, 0, sizeof(iatq)); 4792 iaq = &iatq.iatq_desc; 4793 idm = &sc->sc_aqbuf; 4794 4795 dmalen = MIN(lutlen, IXL_DMA_LEN(idm)); 4796 4797 iaq->iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD | 4798 (dmalen > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0)); 4799 iaq->iaq_opcode = htole16(IXL_AQ_OP_RSS_SET_LUT); 4800 iaq->iaq_datalen = htole16(dmalen); 4801 4802 memset(IXL_DMA_KVA(idm), 0, IXL_DMA_LEN(idm)); 4803 data = IXL_DMA_KVA(idm); 4804 memcpy(data, lut, dmalen); 4805 ixl_aq_dva(iaq, IXL_DMA_DVA(idm)); 4806 4807 param = (struct ixl_aq_rss_lut_param *)iaq->iaq_param; 4808 vsi_id = (sc->sc_vsi_number << IXL_AQ_RSSLUT_VSI_ID_SHIFT) | 4809 IXL_AQ_RSSLUT_VSI_VALID; 4810 param->vsi_id = htole16(vsi_id); 4811 param->flags = htole16(IXL_AQ_RSSLUT_TABLE_TYPE_PF << 4812 IXL_AQ_RSSLUT_TABLE_TYPE_SHIFT); 4813 4814 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, 4815 IXL_DMA_LEN(idm), BUS_DMASYNC_PREWRITE); 4816 4817 rv = ixl_atq_exec(sc, &iatq); 4818 4819 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, 4820 IXL_DMA_LEN(idm), BUS_DMASYNC_POSTWRITE); 4821 4822 if (rv != 0) { 4823 return ETIMEDOUT; 4824 } 4825 4826 if (iaq->iaq_retval != htole16(IXL_AQ_RC_OK)) { 4827 return EIO; 4828 } 4829 4830 return 0; 4831 } 4832 4833 static int 4834 ixl_register_rss_key(struct ixl_softc *sc) 4835 { 4836 uint32_t rss_seed[IXL_RSS_KEY_SIZE_REG]; 4837 int rv; 4838 size_t i; 4839 4840 ixl_get_default_rss_key(rss_seed, sizeof(rss_seed)); 4841 4842 if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RSS)) { 4843 rv = ixl_set_rss_key(sc, (uint8_t*)rss_seed, 4844 sizeof(rss_seed)); 4845 } else { 4846 rv = 0; 4847 for (i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) { 4848 ixl_wr_rx_csr(sc, I40E_PFQF_HKEY(i), rss_seed[i]); 4849 } 4850 } 4851 4852 return rv; 4853 } 4854 4855 static void 4856 ixl_register_rss_pctype(struct ixl_softc *sc) 4857 { 4858 uint64_t set_hena = 0; 4859 uint32_t hena0, hena1; 4860 4861 /* 4862 * We use TCP/UDP with IPv4/IPv6 by default. 4863 * Note: the device can not use just IP header in each 4864 * TCP/UDP packets for the RSS hash calculation. 4865 */ 4866 if (sc->sc_mac_type == I40E_MAC_X722) 4867 set_hena = IXL_RSS_HENA_DEFAULT_X722; 4868 else 4869 set_hena = IXL_RSS_HENA_DEFAULT_XL710; 4870 4871 hena0 = ixl_rd_rx_csr(sc, I40E_PFQF_HENA(0)); 4872 hena1 = ixl_rd_rx_csr(sc, I40E_PFQF_HENA(1)); 4873 4874 SET(hena0, set_hena); 4875 SET(hena1, set_hena >> 32); 4876 4877 ixl_wr_rx_csr(sc, I40E_PFQF_HENA(0), hena0); 4878 ixl_wr_rx_csr(sc, I40E_PFQF_HENA(1), hena1); 4879 } 4880 4881 static int 4882 ixl_register_rss_hlut(struct ixl_softc *sc) 4883 { 4884 unsigned int qid; 4885 uint8_t hlut_buf[512], lut_mask; 4886 uint32_t *hluts; 4887 size_t i, hluts_num; 4888 int rv; 4889 4890 lut_mask = (0x01 << sc->sc_rss_table_entry_width) - 1; 4891 4892 for (i = 0; i < sc->sc_rss_table_size; i++) { 4893 qid = i % sc->sc_nqueue_pairs; 4894 hlut_buf[i] = qid & lut_mask; 4895 } 4896 4897 if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RSS)) { 4898 rv = ixl_set_rss_lut(sc, hlut_buf, sizeof(hlut_buf)); 4899 } else { 4900 rv = 0; 4901 hluts = (uint32_t *)hlut_buf; 4902 hluts_num = sc->sc_rss_table_size >> 2; 4903 for (i = 0; i < hluts_num; i++) { 4904 ixl_wr(sc, I40E_PFQF_HLUT(i), hluts[i]); 4905 } 4906 ixl_flush(sc); 4907 } 4908 4909 return rv; 4910 } 4911 4912 static void 4913 ixl_config_rss(struct ixl_softc *sc) 4914 { 4915 4916 KASSERT(mutex_owned(&sc->sc_cfg_lock)); 4917 4918 ixl_register_rss_key(sc); 4919 ixl_register_rss_pctype(sc); 4920 ixl_register_rss_hlut(sc); 4921 } 4922 4923 static const struct ixl_phy_type * 4924 ixl_search_phy_type(uint8_t phy_type) 4925 { 4926 const struct ixl_phy_type *itype; 4927 uint64_t mask; 4928 unsigned int i; 4929 4930 if (phy_type >= 64) 4931 return NULL; 4932 4933 mask = 1ULL << phy_type; 4934 4935 for (i = 0; i < __arraycount(ixl_phy_type_map); i++) { 4936 itype = &ixl_phy_type_map[i]; 4937 4938 if (ISSET(itype->phy_type, mask)) 4939 return itype; 4940 } 4941 4942 return NULL; 4943 } 4944 4945 static uint64_t 4946 ixl_search_link_speed(uint8_t link_speed) 4947 { 4948 const struct ixl_speed_type *type; 4949 unsigned int i; 4950 4951 for (i = 0; i < __arraycount(ixl_speed_type_map); i++) { 4952 type = &ixl_speed_type_map[i]; 4953 4954 if (ISSET(type->dev_speed, link_speed)) 4955 return type->net_speed; 4956 } 4957 4958 return 0; 4959 } 4960 4961 static uint8_t 4962 ixl_search_baudrate(uint64_t baudrate) 4963 { 4964 const struct ixl_speed_type *type; 4965 unsigned int i; 4966 4967 for (i = 0; i < __arraycount(ixl_speed_type_map); i++) { 4968 type = &ixl_speed_type_map[i]; 4969 4970 if (type->net_speed == baudrate) { 4971 return type->dev_speed; 4972 } 4973 } 4974 4975 return 0; 4976 } 4977 4978 static int 4979 ixl_restart_an(struct ixl_softc *sc) 4980 { 4981 struct ixl_aq_desc iaq; 4982 4983 memset(&iaq, 0, sizeof(iaq)); 4984 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_RESTART_AN); 4985 iaq.iaq_param[0] = 4986 htole32(IXL_AQ_PHY_RESTART_AN | IXL_AQ_PHY_LINK_ENABLE); 4987 4988 if (ixl_atq_poll(sc, &iaq, 250) != 0) { 4989 aprint_error_dev(sc->sc_dev, "RESTART AN timeout\n"); 4990 return -1; 4991 } 4992 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) { 4993 aprint_error_dev(sc->sc_dev, "RESTART AN error\n"); 4994 return -1; 4995 } 4996 4997 return 0; 4998 } 4999 5000 static int 5001 ixl_add_macvlan(struct ixl_softc *sc, const uint8_t *macaddr, 5002 uint16_t vlan, uint16_t flags) 5003 { 5004 struct ixl_aq_desc iaq; 5005 struct ixl_aq_add_macvlan *param; 5006 struct ixl_aq_add_macvlan_elem *elem; 5007 5008 memset(&iaq, 0, sizeof(iaq)); 5009 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD); 5010 iaq.iaq_opcode = htole16(IXL_AQ_OP_ADD_MACVLAN); 5011 iaq.iaq_datalen = htole16(sizeof(*elem)); 5012 ixl_aq_dva(&iaq, IXL_DMA_DVA(&sc->sc_scratch)); 5013 5014 param = (struct ixl_aq_add_macvlan *)&iaq.iaq_param; 5015 param->num_addrs = htole16(1); 5016 param->seid0 = htole16(0x8000) | sc->sc_seid; 5017 param->seid1 = 0; 5018 param->seid2 = 0; 5019 5020 elem = IXL_DMA_KVA(&sc->sc_scratch); 5021 memset(elem, 0, sizeof(*elem)); 5022 memcpy(elem->macaddr, macaddr, ETHER_ADDR_LEN); 5023 elem->flags = htole16(IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH | flags); 5024 elem->vlan = htole16(vlan); 5025 5026 if (ixl_atq_poll(sc, &iaq, 250) != 0) { 5027 return IXL_AQ_RC_EINVAL; 5028 } 5029 5030 switch (le16toh(iaq.iaq_retval)) { 5031 case IXL_AQ_RC_OK: 5032 break; 5033 case IXL_AQ_RC_ENOSPC: 5034 return ENOSPC; 5035 case IXL_AQ_RC_ENOENT: 5036 return ENOENT; 5037 case IXL_AQ_RC_EACCES: 5038 return EACCES; 5039 case IXL_AQ_RC_EEXIST: 5040 return EEXIST; 5041 case IXL_AQ_RC_EINVAL: 5042 return EINVAL; 5043 default: 5044 return EIO; 5045 } 5046 5047 return 0; 5048 } 5049 5050 static int 5051 ixl_remove_macvlan(struct ixl_softc *sc, const uint8_t *macaddr, 5052 uint16_t vlan, uint16_t flags) 5053 { 5054 struct ixl_aq_desc iaq; 5055 struct ixl_aq_remove_macvlan *param; 5056 struct ixl_aq_remove_macvlan_elem *elem; 5057 5058 memset(&iaq, 0, sizeof(iaq)); 5059 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD); 5060 iaq.iaq_opcode = htole16(IXL_AQ_OP_REMOVE_MACVLAN); 5061 iaq.iaq_datalen = htole16(sizeof(*elem)); 5062 ixl_aq_dva(&iaq, IXL_DMA_DVA(&sc->sc_scratch)); 5063 5064 param = (struct ixl_aq_remove_macvlan *)&iaq.iaq_param; 5065 param->num_addrs = htole16(1); 5066 param->seid0 = htole16(0x8000) | sc->sc_seid; 5067 param->seid1 = 0; 5068 param->seid2 = 0; 5069 5070 elem = IXL_DMA_KVA(&sc->sc_scratch); 5071 memset(elem, 0, sizeof(*elem)); 5072 memcpy(elem->macaddr, macaddr, ETHER_ADDR_LEN); 5073 elem->flags = htole16(IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH | flags); 5074 elem->vlan = htole16(vlan); 5075 5076 if (ixl_atq_poll(sc, &iaq, 250) != 0) { 5077 return EINVAL; 5078 } 5079 5080 switch (le16toh(iaq.iaq_retval)) { 5081 case IXL_AQ_RC_OK: 5082 break; 5083 case IXL_AQ_RC_ENOENT: 5084 return ENOENT; 5085 case IXL_AQ_RC_EACCES: 5086 return EACCES; 5087 case IXL_AQ_RC_EINVAL: 5088 return EINVAL; 5089 default: 5090 return EIO; 5091 } 5092 5093 return 0; 5094 } 5095 5096 static int 5097 ixl_hmc(struct ixl_softc *sc) 5098 { 5099 struct { 5100 uint32_t count; 5101 uint32_t minsize; 5102 bus_size_t objsiz; 5103 bus_size_t setoff; 5104 bus_size_t setcnt; 5105 } regs[] = { 5106 { 5107 0, 5108 IXL_HMC_TXQ_MINSIZE, 5109 I40E_GLHMC_LANTXOBJSZ, 5110 I40E_GLHMC_LANTXBASE(sc->sc_pf_id), 5111 I40E_GLHMC_LANTXCNT(sc->sc_pf_id), 5112 }, 5113 { 5114 0, 5115 IXL_HMC_RXQ_MINSIZE, 5116 I40E_GLHMC_LANRXOBJSZ, 5117 I40E_GLHMC_LANRXBASE(sc->sc_pf_id), 5118 I40E_GLHMC_LANRXCNT(sc->sc_pf_id), 5119 }, 5120 { 5121 0, 5122 0, 5123 I40E_GLHMC_FCOEDDPOBJSZ, 5124 I40E_GLHMC_FCOEDDPBASE(sc->sc_pf_id), 5125 I40E_GLHMC_FCOEDDPCNT(sc->sc_pf_id), 5126 }, 5127 { 5128 0, 5129 0, 5130 I40E_GLHMC_FCOEFOBJSZ, 5131 I40E_GLHMC_FCOEFBASE(sc->sc_pf_id), 5132 I40E_GLHMC_FCOEFCNT(sc->sc_pf_id), 5133 }, 5134 }; 5135 struct ixl_hmc_entry *e; 5136 uint64_t size, dva; 5137 uint8_t *kva; 5138 uint64_t *sdpage; 5139 unsigned int i; 5140 int npages, tables; 5141 uint32_t reg; 5142 5143 CTASSERT(__arraycount(regs) <= __arraycount(sc->sc_hmc_entries)); 5144 5145 regs[IXL_HMC_LAN_TX].count = regs[IXL_HMC_LAN_RX].count = 5146 ixl_rd(sc, I40E_GLHMC_LANQMAX); 5147 5148 size = 0; 5149 for (i = 0; i < __arraycount(regs); i++) { 5150 e = &sc->sc_hmc_entries[i]; 5151 5152 e->hmc_count = regs[i].count; 5153 reg = ixl_rd(sc, regs[i].objsiz); 5154 e->hmc_size = BIT_ULL(0x3F & reg); 5155 e->hmc_base = size; 5156 5157 if ((e->hmc_size * 8) < regs[i].minsize) { 5158 aprint_error_dev(sc->sc_dev, 5159 "kernel hmc entry is too big\n"); 5160 return -1; 5161 } 5162 5163 size += roundup(e->hmc_size * e->hmc_count, IXL_HMC_ROUNDUP); 5164 } 5165 size = roundup(size, IXL_HMC_PGSIZE); 5166 npages = size / IXL_HMC_PGSIZE; 5167 5168 tables = roundup(size, IXL_HMC_L2SZ) / IXL_HMC_L2SZ; 5169 5170 if (ixl_dmamem_alloc(sc, &sc->sc_hmc_pd, size, IXL_HMC_PGSIZE) != 0) { 5171 aprint_error_dev(sc->sc_dev, 5172 "unable to allocate hmc pd memory\n"); 5173 return -1; 5174 } 5175 5176 if (ixl_dmamem_alloc(sc, &sc->sc_hmc_sd, tables * IXL_HMC_PGSIZE, 5177 IXL_HMC_PGSIZE) != 0) { 5178 aprint_error_dev(sc->sc_dev, 5179 "unable to allocate hmc sd memory\n"); 5180 ixl_dmamem_free(sc, &sc->sc_hmc_pd); 5181 return -1; 5182 } 5183 5184 kva = IXL_DMA_KVA(&sc->sc_hmc_pd); 5185 memset(kva, 0, IXL_DMA_LEN(&sc->sc_hmc_pd)); 5186 5187 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd), 5188 0, IXL_DMA_LEN(&sc->sc_hmc_pd), 5189 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 5190 5191 dva = IXL_DMA_DVA(&sc->sc_hmc_pd); 5192 sdpage = IXL_DMA_KVA(&sc->sc_hmc_sd); 5193 memset(sdpage, 0, IXL_DMA_LEN(&sc->sc_hmc_sd)); 5194 5195 for (i = 0; (int)i < npages; i++) { 5196 *sdpage = htole64(dva | IXL_HMC_PDVALID); 5197 sdpage++; 5198 5199 dva += IXL_HMC_PGSIZE; 5200 } 5201 5202 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_sd), 5203 0, IXL_DMA_LEN(&sc->sc_hmc_sd), 5204 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 5205 5206 dva = IXL_DMA_DVA(&sc->sc_hmc_sd); 5207 for (i = 0; (int)i < tables; i++) { 5208 uint32_t count; 5209 5210 KASSERT(npages >= 0); 5211 5212 count = ((unsigned int)npages > IXL_HMC_PGS) ? 5213 IXL_HMC_PGS : (unsigned int)npages; 5214 5215 ixl_wr(sc, I40E_PFHMC_SDDATAHIGH, dva >> 32); 5216 ixl_wr(sc, I40E_PFHMC_SDDATALOW, dva | 5217 (count << I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | 5218 (1U << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT)); 5219 ixl_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE); 5220 ixl_wr(sc, I40E_PFHMC_SDCMD, 5221 (1U << I40E_PFHMC_SDCMD_PMSDWR_SHIFT) | i); 5222 5223 npages -= IXL_HMC_PGS; 5224 dva += IXL_HMC_PGSIZE; 5225 } 5226 5227 for (i = 0; i < __arraycount(regs); i++) { 5228 e = &sc->sc_hmc_entries[i]; 5229 5230 ixl_wr(sc, regs[i].setoff, e->hmc_base / IXL_HMC_ROUNDUP); 5231 ixl_wr(sc, regs[i].setcnt, e->hmc_count); 5232 } 5233 5234 return 0; 5235 } 5236 5237 static void 5238 ixl_hmc_free(struct ixl_softc *sc) 5239 { 5240 ixl_dmamem_free(sc, &sc->sc_hmc_sd); 5241 ixl_dmamem_free(sc, &sc->sc_hmc_pd); 5242 } 5243 5244 static void 5245 ixl_hmc_pack(void *d, const void *s, const struct ixl_hmc_pack *packing, 5246 unsigned int npacking) 5247 { 5248 uint8_t *dst = d; 5249 const uint8_t *src = s; 5250 unsigned int i; 5251 5252 for (i = 0; i < npacking; i++) { 5253 const struct ixl_hmc_pack *pack = &packing[i]; 5254 unsigned int offset = pack->lsb / 8; 5255 unsigned int align = pack->lsb % 8; 5256 const uint8_t *in = src + pack->offset; 5257 uint8_t *out = dst + offset; 5258 int width = pack->width; 5259 unsigned int inbits = 0; 5260 5261 if (align) { 5262 inbits = (*in++) << align; 5263 *out++ |= (inbits & 0xff); 5264 inbits >>= 8; 5265 5266 width -= 8 - align; 5267 } 5268 5269 while (width >= 8) { 5270 inbits |= (*in++) << align; 5271 *out++ = (inbits & 0xff); 5272 inbits >>= 8; 5273 5274 width -= 8; 5275 } 5276 5277 if (width > 0) { 5278 inbits |= (*in) << align; 5279 *out |= (inbits & ((1 << width) - 1)); 5280 } 5281 } 5282 } 5283 5284 static struct ixl_aq_buf * 5285 ixl_aqb_alloc(struct ixl_softc *sc) 5286 { 5287 struct ixl_aq_buf *aqb; 5288 5289 aqb = kmem_alloc(sizeof(*aqb), KM_SLEEP); 5290 5291 aqb->aqb_size = IXL_AQ_BUFLEN; 5292 5293 if (bus_dmamap_create(sc->sc_dmat, aqb->aqb_size, 1, 5294 aqb->aqb_size, 0, 5295 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &aqb->aqb_map) != 0) 5296 goto free; 5297 if (bus_dmamem_alloc(sc->sc_dmat, aqb->aqb_size, 5298 IXL_AQ_ALIGN, 0, &aqb->aqb_seg, 1, &aqb->aqb_nsegs, 5299 BUS_DMA_WAITOK) != 0) 5300 goto destroy; 5301 if (bus_dmamem_map(sc->sc_dmat, &aqb->aqb_seg, aqb->aqb_nsegs, 5302 aqb->aqb_size, &aqb->aqb_data, BUS_DMA_WAITOK) != 0) 5303 goto dma_free; 5304 if (bus_dmamap_load(sc->sc_dmat, aqb->aqb_map, aqb->aqb_data, 5305 aqb->aqb_size, NULL, BUS_DMA_WAITOK) != 0) 5306 goto unmap; 5307 5308 return aqb; 5309 unmap: 5310 bus_dmamem_unmap(sc->sc_dmat, aqb->aqb_data, aqb->aqb_size); 5311 dma_free: 5312 bus_dmamem_free(sc->sc_dmat, &aqb->aqb_seg, 1); 5313 destroy: 5314 bus_dmamap_destroy(sc->sc_dmat, aqb->aqb_map); 5315 free: 5316 kmem_free(aqb, sizeof(*aqb)); 5317 5318 return NULL; 5319 } 5320 5321 static void 5322 ixl_aqb_free(struct ixl_softc *sc, struct ixl_aq_buf *aqb) 5323 { 5324 5325 bus_dmamap_unload(sc->sc_dmat, aqb->aqb_map); 5326 bus_dmamem_unmap(sc->sc_dmat, aqb->aqb_data, aqb->aqb_size); 5327 bus_dmamem_free(sc->sc_dmat, &aqb->aqb_seg, 1); 5328 bus_dmamap_destroy(sc->sc_dmat, aqb->aqb_map); 5329 kmem_free(aqb, sizeof(*aqb)); 5330 } 5331 5332 static int 5333 ixl_arq_fill(struct ixl_softc *sc) 5334 { 5335 struct ixl_aq_buf *aqb; 5336 struct ixl_aq_desc *arq, *iaq; 5337 unsigned int prod = sc->sc_arq_prod; 5338 unsigned int n; 5339 int post = 0; 5340 5341 n = ixl_rxr_unrefreshed(sc->sc_arq_prod, sc->sc_arq_cons, 5342 IXL_AQ_NUM); 5343 arq = IXL_DMA_KVA(&sc->sc_arq); 5344 5345 if (__predict_false(n <= 0)) 5346 return 0; 5347 5348 do { 5349 aqb = sc->sc_arq_live[prod]; 5350 iaq = &arq[prod]; 5351 5352 if (aqb == NULL) { 5353 aqb = SIMPLEQ_FIRST(&sc->sc_arq_idle); 5354 if (aqb != NULL) { 5355 SIMPLEQ_REMOVE(&sc->sc_arq_idle, aqb, 5356 ixl_aq_buf, aqb_entry); 5357 } else if ((aqb = ixl_aqb_alloc(sc)) == NULL) { 5358 break; 5359 } 5360 5361 sc->sc_arq_live[prod] = aqb; 5362 memset(aqb->aqb_data, 0, aqb->aqb_size); 5363 5364 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, 5365 aqb->aqb_size, BUS_DMASYNC_PREREAD); 5366 5367 iaq->iaq_flags = htole16(IXL_AQ_BUF | 5368 (IXL_AQ_BUFLEN > I40E_AQ_LARGE_BUF ? 5369 IXL_AQ_LB : 0)); 5370 iaq->iaq_opcode = 0; 5371 iaq->iaq_datalen = htole16(aqb->aqb_size); 5372 iaq->iaq_retval = 0; 5373 iaq->iaq_cookie = 0; 5374 iaq->iaq_param[0] = 0; 5375 iaq->iaq_param[1] = 0; 5376 ixl_aq_dva(iaq, aqb->aqb_map->dm_segs[0].ds_addr); 5377 } 5378 5379 prod++; 5380 prod &= IXL_AQ_MASK; 5381 5382 post = 1; 5383 5384 } while (--n); 5385 5386 if (post) { 5387 sc->sc_arq_prod = prod; 5388 ixl_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod); 5389 } 5390 5391 return post; 5392 } 5393 5394 static void 5395 ixl_arq_unfill(struct ixl_softc *sc) 5396 { 5397 struct ixl_aq_buf *aqb; 5398 unsigned int i; 5399 5400 for (i = 0; i < __arraycount(sc->sc_arq_live); i++) { 5401 aqb = sc->sc_arq_live[i]; 5402 if (aqb == NULL) 5403 continue; 5404 5405 sc->sc_arq_live[i] = NULL; 5406 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, aqb->aqb_size, 5407 BUS_DMASYNC_POSTREAD); 5408 ixl_aqb_free(sc, aqb); 5409 } 5410 5411 while ((aqb = SIMPLEQ_FIRST(&sc->sc_arq_idle)) != NULL) { 5412 SIMPLEQ_REMOVE(&sc->sc_arq_idle, aqb, 5413 ixl_aq_buf, aqb_entry); 5414 ixl_aqb_free(sc, aqb); 5415 } 5416 } 5417 5418 static void 5419 ixl_clear_hw(struct ixl_softc *sc) 5420 { 5421 uint32_t num_queues, base_queue; 5422 uint32_t num_pf_int; 5423 uint32_t num_vf_int; 5424 uint32_t num_vfs; 5425 uint32_t i, j; 5426 uint32_t val; 5427 uint32_t eol = 0x7ff; 5428 5429 /* get number of interrupts, queues, and vfs */ 5430 val = ixl_rd(sc, I40E_GLPCI_CNF2); 5431 num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >> 5432 I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT; 5433 num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >> 5434 I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT; 5435 5436 val = ixl_rd(sc, I40E_PFLAN_QALLOC); 5437 base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >> 5438 I40E_PFLAN_QALLOC_FIRSTQ_SHIFT; 5439 j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >> 5440 I40E_PFLAN_QALLOC_LASTQ_SHIFT; 5441 if (val & I40E_PFLAN_QALLOC_VALID_MASK) 5442 num_queues = (j - base_queue) + 1; 5443 else 5444 num_queues = 0; 5445 5446 val = ixl_rd(sc, I40E_PF_VT_PFALLOC); 5447 i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >> 5448 I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT; 5449 j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >> 5450 I40E_PF_VT_PFALLOC_LASTVF_SHIFT; 5451 if (val & I40E_PF_VT_PFALLOC_VALID_MASK) 5452 num_vfs = (j - i) + 1; 5453 else 5454 num_vfs = 0; 5455 5456 /* stop all the interrupts */ 5457 ixl_wr(sc, I40E_PFINT_ICR0_ENA, 0); 5458 ixl_flush(sc); 5459 val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT; 5460 for (i = 0; i < num_pf_int - 2; i++) 5461 ixl_wr(sc, I40E_PFINT_DYN_CTLN(i), val); 5462 ixl_flush(sc); 5463 5464 /* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */ 5465 val = eol << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT; 5466 ixl_wr(sc, I40E_PFINT_LNKLST0, val); 5467 for (i = 0; i < num_pf_int - 2; i++) 5468 ixl_wr(sc, I40E_PFINT_LNKLSTN(i), val); 5469 val = eol << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT; 5470 for (i = 0; i < num_vfs; i++) 5471 ixl_wr(sc, I40E_VPINT_LNKLST0(i), val); 5472 for (i = 0; i < num_vf_int - 2; i++) 5473 ixl_wr(sc, I40E_VPINT_LNKLSTN(i), val); 5474 5475 /* warn the HW of the coming Tx disables */ 5476 for (i = 0; i < num_queues; i++) { 5477 uint32_t abs_queue_idx = base_queue + i; 5478 uint32_t reg_block = 0; 5479 5480 if (abs_queue_idx >= 128) { 5481 reg_block = abs_queue_idx / 128; 5482 abs_queue_idx %= 128; 5483 } 5484 5485 val = ixl_rd(sc, I40E_GLLAN_TXPRE_QDIS(reg_block)); 5486 val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK; 5487 val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT); 5488 val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK; 5489 5490 ixl_wr(sc, I40E_GLLAN_TXPRE_QDIS(reg_block), val); 5491 } 5492 delaymsec(400); 5493 5494 /* stop all the queues */ 5495 for (i = 0; i < num_queues; i++) { 5496 ixl_wr(sc, I40E_QINT_TQCTL(i), 0); 5497 ixl_wr(sc, I40E_QTX_ENA(i), 0); 5498 ixl_wr(sc, I40E_QINT_RQCTL(i), 0); 5499 ixl_wr(sc, I40E_QRX_ENA(i), 0); 5500 } 5501 5502 /* short wait for all queue disables to settle */ 5503 delaymsec(50); 5504 } 5505 5506 static int 5507 ixl_pf_reset(struct ixl_softc *sc) 5508 { 5509 uint32_t cnt = 0; 5510 uint32_t cnt1 = 0; 5511 uint32_t reg = 0, reg0 = 0; 5512 uint32_t grst_del; 5513 5514 /* 5515 * Poll for Global Reset steady state in case of recent GRST. 5516 * The grst delay value is in 100ms units, and we'll wait a 5517 * couple counts longer to be sure we don't just miss the end. 5518 */ 5519 grst_del = ixl_rd(sc, I40E_GLGEN_RSTCTL); 5520 grst_del &= I40E_GLGEN_RSTCTL_GRSTDEL_MASK; 5521 grst_del >>= I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT; 5522 5523 grst_del = grst_del * 20; 5524 5525 for (cnt = 0; cnt < grst_del; cnt++) { 5526 reg = ixl_rd(sc, I40E_GLGEN_RSTAT); 5527 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK)) 5528 break; 5529 delaymsec(100); 5530 } 5531 if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) { 5532 aprint_error(", Global reset polling failed to complete\n"); 5533 return -1; 5534 } 5535 5536 /* Now Wait for the FW to be ready */ 5537 for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) { 5538 reg = ixl_rd(sc, I40E_GLNVM_ULD); 5539 reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | 5540 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK); 5541 if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | 5542 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK)) 5543 break; 5544 5545 delaymsec(10); 5546 } 5547 if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | 5548 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) { 5549 aprint_error(", wait for FW Reset complete timed out " 5550 "(I40E_GLNVM_ULD = 0x%x)\n", reg); 5551 return -1; 5552 } 5553 5554 /* 5555 * If there was a Global Reset in progress when we got here, 5556 * we don't need to do the PF Reset 5557 */ 5558 if (cnt == 0) { 5559 reg = ixl_rd(sc, I40E_PFGEN_CTRL); 5560 ixl_wr(sc, I40E_PFGEN_CTRL, reg | I40E_PFGEN_CTRL_PFSWR_MASK); 5561 for (cnt = 0; cnt < I40E_PF_RESET_WAIT_COUNT; cnt++) { 5562 reg = ixl_rd(sc, I40E_PFGEN_CTRL); 5563 if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK)) 5564 break; 5565 delaymsec(1); 5566 5567 reg0 = ixl_rd(sc, I40E_GLGEN_RSTAT); 5568 if (reg0 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) { 5569 aprint_error(", Core reset upcoming." 5570 " Skipping PF reset reset request\n"); 5571 return -1; 5572 } 5573 } 5574 if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) { 5575 aprint_error(", PF reset polling failed to complete" 5576 "(I40E_PFGEN_CTRL= 0x%x)\n", reg); 5577 return -1; 5578 } 5579 } 5580 5581 return 0; 5582 } 5583 5584 static int 5585 ixl_dmamem_alloc(struct ixl_softc *sc, struct ixl_dmamem *ixm, 5586 bus_size_t size, bus_size_t align) 5587 { 5588 ixm->ixm_size = size; 5589 5590 if (bus_dmamap_create(sc->sc_dmat, ixm->ixm_size, 1, 5591 ixm->ixm_size, 0, 5592 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, 5593 &ixm->ixm_map) != 0) 5594 return 1; 5595 if (bus_dmamem_alloc(sc->sc_dmat, ixm->ixm_size, 5596 align, 0, &ixm->ixm_seg, 1, &ixm->ixm_nsegs, 5597 BUS_DMA_WAITOK) != 0) 5598 goto destroy; 5599 if (bus_dmamem_map(sc->sc_dmat, &ixm->ixm_seg, ixm->ixm_nsegs, 5600 ixm->ixm_size, &ixm->ixm_kva, BUS_DMA_WAITOK) != 0) 5601 goto free; 5602 if (bus_dmamap_load(sc->sc_dmat, ixm->ixm_map, ixm->ixm_kva, 5603 ixm->ixm_size, NULL, BUS_DMA_WAITOK) != 0) 5604 goto unmap; 5605 5606 memset(ixm->ixm_kva, 0, ixm->ixm_size); 5607 5608 return 0; 5609 unmap: 5610 bus_dmamem_unmap(sc->sc_dmat, ixm->ixm_kva, ixm->ixm_size); 5611 free: 5612 bus_dmamem_free(sc->sc_dmat, &ixm->ixm_seg, 1); 5613 destroy: 5614 bus_dmamap_destroy(sc->sc_dmat, ixm->ixm_map); 5615 return 1; 5616 } 5617 5618 static void 5619 ixl_dmamem_free(struct ixl_softc *sc, struct ixl_dmamem *ixm) 5620 { 5621 bus_dmamap_unload(sc->sc_dmat, ixm->ixm_map); 5622 bus_dmamem_unmap(sc->sc_dmat, ixm->ixm_kva, ixm->ixm_size); 5623 bus_dmamem_free(sc->sc_dmat, &ixm->ixm_seg, 1); 5624 bus_dmamap_destroy(sc->sc_dmat, ixm->ixm_map); 5625 } 5626 5627 static int 5628 ixl_setup_vlan_hwfilter(struct ixl_softc *sc) 5629 { 5630 struct ethercom *ec = &sc->sc_ec; 5631 struct vlanid_list *vlanidp; 5632 int rv; 5633 5634 ixl_remove_macvlan(sc, sc->sc_enaddr, 0, 5635 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN); 5636 ixl_remove_macvlan(sc, etherbroadcastaddr, 0, 5637 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN); 5638 5639 rv = ixl_add_macvlan(sc, sc->sc_enaddr, 0, 5640 IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH); 5641 if (rv != 0) 5642 return rv; 5643 rv = ixl_add_macvlan(sc, etherbroadcastaddr, 0, 5644 IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH); 5645 if (rv != 0) 5646 return rv; 5647 5648 ETHER_LOCK(ec); 5649 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) { 5650 rv = ixl_add_macvlan(sc, sc->sc_enaddr, 5651 vlanidp->vid, IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH); 5652 if (rv != 0) 5653 break; 5654 rv = ixl_add_macvlan(sc, etherbroadcastaddr, 5655 vlanidp->vid, IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH); 5656 if (rv != 0) 5657 break; 5658 } 5659 ETHER_UNLOCK(ec); 5660 5661 return rv; 5662 } 5663 5664 static void 5665 ixl_teardown_vlan_hwfilter(struct ixl_softc *sc) 5666 { 5667 struct vlanid_list *vlanidp; 5668 struct ethercom *ec = &sc->sc_ec; 5669 5670 ixl_remove_macvlan(sc, sc->sc_enaddr, 0, 5671 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH); 5672 ixl_remove_macvlan(sc, etherbroadcastaddr, 0, 5673 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH); 5674 5675 ETHER_LOCK(ec); 5676 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) { 5677 ixl_remove_macvlan(sc, sc->sc_enaddr, 5678 vlanidp->vid, IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH); 5679 ixl_remove_macvlan(sc, etherbroadcastaddr, 5680 vlanidp->vid, IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH); 5681 } 5682 ETHER_UNLOCK(ec); 5683 5684 ixl_add_macvlan(sc, sc->sc_enaddr, 0, 5685 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN); 5686 ixl_add_macvlan(sc, etherbroadcastaddr, 0, 5687 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN); 5688 } 5689 5690 static int 5691 ixl_update_macvlan(struct ixl_softc *sc) 5692 { 5693 int rv = 0; 5694 int next_ec_capenable = sc->sc_ec.ec_capenable; 5695 5696 if (ISSET(next_ec_capenable, ETHERCAP_VLAN_HWFILTER)) { 5697 rv = ixl_setup_vlan_hwfilter(sc); 5698 if (rv != 0) 5699 ixl_teardown_vlan_hwfilter(sc); 5700 } else { 5701 ixl_teardown_vlan_hwfilter(sc); 5702 } 5703 5704 return rv; 5705 } 5706 5707 static int 5708 ixl_ifflags_cb(struct ethercom *ec) 5709 { 5710 struct ifnet *ifp = &ec->ec_if; 5711 struct ixl_softc *sc = ifp->if_softc; 5712 int rv, change; 5713 5714 mutex_enter(&sc->sc_cfg_lock); 5715 5716 change = ec->ec_capenable ^ sc->sc_cur_ec_capenable; 5717 5718 if (ISSET(change, ETHERCAP_VLAN_HWTAGGING)) { 5719 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWTAGGING; 5720 rv = ENETRESET; 5721 goto out; 5722 } 5723 5724 if (ISSET(change, ETHERCAP_VLAN_HWFILTER)) { 5725 rv = ixl_update_macvlan(sc); 5726 if (rv == 0) { 5727 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWFILTER; 5728 } else { 5729 CLR(ec->ec_capenable, ETHERCAP_VLAN_HWFILTER); 5730 CLR(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER); 5731 } 5732 } 5733 5734 rv = ixl_iff(sc); 5735 out: 5736 mutex_exit(&sc->sc_cfg_lock); 5737 5738 return rv; 5739 } 5740 5741 static int 5742 ixl_set_link_status_locked(struct ixl_softc *sc, const struct ixl_aq_desc *iaq) 5743 { 5744 const struct ixl_aq_link_status *status; 5745 const struct ixl_phy_type *itype; 5746 5747 uint64_t ifm_active = IFM_ETHER; 5748 uint64_t ifm_status = IFM_AVALID; 5749 int link_state = LINK_STATE_DOWN; 5750 uint64_t baudrate = 0; 5751 5752 status = (const struct ixl_aq_link_status *)iaq->iaq_param; 5753 if (!ISSET(status->link_info, IXL_AQ_LINK_UP_FUNCTION)) { 5754 ifm_active |= IFM_NONE; 5755 goto done; 5756 } 5757 5758 ifm_active |= IFM_FDX; 5759 ifm_status |= IFM_ACTIVE; 5760 link_state = LINK_STATE_UP; 5761 5762 itype = ixl_search_phy_type(status->phy_type); 5763 if (itype != NULL) 5764 ifm_active |= itype->ifm_type; 5765 5766 if (ISSET(status->an_info, IXL_AQ_LINK_PAUSE_TX)) 5767 ifm_active |= IFM_ETH_TXPAUSE; 5768 if (ISSET(status->an_info, IXL_AQ_LINK_PAUSE_RX)) 5769 ifm_active |= IFM_ETH_RXPAUSE; 5770 5771 baudrate = ixl_search_link_speed(status->link_speed); 5772 5773 done: 5774 /* sc->sc_cfg_lock held expect during attach */ 5775 sc->sc_media_active = ifm_active; 5776 sc->sc_media_status = ifm_status; 5777 5778 sc->sc_ec.ec_if.if_baudrate = baudrate; 5779 5780 return link_state; 5781 } 5782 5783 static int 5784 ixl_establish_intx(struct ixl_softc *sc) 5785 { 5786 pci_chipset_tag_t pc = sc->sc_pa.pa_pc; 5787 pci_intr_handle_t *intr; 5788 char xnamebuf[32]; 5789 char intrbuf[PCI_INTRSTR_LEN]; 5790 char const *intrstr; 5791 5792 KASSERT(sc->sc_nintrs == 1); 5793 5794 intr = &sc->sc_ihp[0]; 5795 5796 intrstr = pci_intr_string(pc, *intr, intrbuf, sizeof(intrbuf)); 5797 snprintf(xnamebuf, sizeof(xnamebuf), "%s:legacy", 5798 device_xname(sc->sc_dev)); 5799 5800 sc->sc_ihs[0] = pci_intr_establish_xname(pc, *intr, IPL_NET, ixl_intr, 5801 sc, xnamebuf); 5802 5803 if (sc->sc_ihs[0] == NULL) { 5804 aprint_error_dev(sc->sc_dev, 5805 "unable to establish interrupt at %s\n", intrstr); 5806 return -1; 5807 } 5808 5809 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr); 5810 return 0; 5811 } 5812 5813 static int 5814 ixl_establish_msix(struct ixl_softc *sc) 5815 { 5816 pci_chipset_tag_t pc = sc->sc_pa.pa_pc; 5817 kcpuset_t *affinity; 5818 unsigned int vector = 0; 5819 unsigned int i; 5820 int affinity_to, r; 5821 char xnamebuf[32]; 5822 char intrbuf[PCI_INTRSTR_LEN]; 5823 char const *intrstr; 5824 5825 kcpuset_create(&affinity, false); 5826 5827 /* the "other" intr is mapped to vector 0 */ 5828 vector = 0; 5829 intrstr = pci_intr_string(pc, sc->sc_ihp[vector], 5830 intrbuf, sizeof(intrbuf)); 5831 snprintf(xnamebuf, sizeof(xnamebuf), "%s others", 5832 device_xname(sc->sc_dev)); 5833 sc->sc_ihs[vector] = pci_intr_establish_xname(pc, 5834 sc->sc_ihp[vector], IPL_NET, ixl_other_intr, 5835 sc, xnamebuf); 5836 if (sc->sc_ihs[vector] == NULL) { 5837 aprint_error_dev(sc->sc_dev, 5838 "unable to establish interrupt at %s\n", intrstr); 5839 goto fail; 5840 } 5841 5842 aprint_normal_dev(sc->sc_dev, "other interrupt at %s", intrstr); 5843 5844 affinity_to = ncpu > (int)sc->sc_nqueue_pairs_max ? 1 : 0; 5845 affinity_to = (affinity_to + sc->sc_nqueue_pairs_max) % ncpu; 5846 5847 kcpuset_zero(affinity); 5848 kcpuset_set(affinity, affinity_to); 5849 r = interrupt_distribute(sc->sc_ihs[vector], affinity, NULL); 5850 if (r == 0) { 5851 aprint_normal(", affinity to %u", affinity_to); 5852 } 5853 aprint_normal("\n"); 5854 vector++; 5855 5856 sc->sc_msix_vector_queue = vector; 5857 affinity_to = ncpu > (int)sc->sc_nqueue_pairs_max ? 1 : 0; 5858 5859 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) { 5860 intrstr = pci_intr_string(pc, sc->sc_ihp[vector], 5861 intrbuf, sizeof(intrbuf)); 5862 snprintf(xnamebuf, sizeof(xnamebuf), "%s TXRX%d", 5863 device_xname(sc->sc_dev), i); 5864 5865 sc->sc_ihs[vector] = pci_intr_establish_xname(pc, 5866 sc->sc_ihp[vector], IPL_NET, ixl_queue_intr, 5867 (void *)&sc->sc_qps[i], xnamebuf); 5868 5869 if (sc->sc_ihs[vector] == NULL) { 5870 aprint_error_dev(sc->sc_dev, 5871 "unable to establish interrupt at %s\n", intrstr); 5872 goto fail; 5873 } 5874 5875 aprint_normal_dev(sc->sc_dev, 5876 "for TXRX%d interrupt at %s", i, intrstr); 5877 5878 kcpuset_zero(affinity); 5879 kcpuset_set(affinity, affinity_to); 5880 r = interrupt_distribute(sc->sc_ihs[vector], affinity, NULL); 5881 if (r == 0) { 5882 aprint_normal(", affinity to %u", affinity_to); 5883 affinity_to = (affinity_to + 1) % ncpu; 5884 } 5885 aprint_normal("\n"); 5886 vector++; 5887 } 5888 5889 kcpuset_destroy(affinity); 5890 5891 return 0; 5892 fail: 5893 for (i = 0; i < vector; i++) { 5894 pci_intr_disestablish(pc, sc->sc_ihs[i]); 5895 } 5896 5897 sc->sc_msix_vector_queue = 0; 5898 sc->sc_msix_vector_queue = 0; 5899 kcpuset_destroy(affinity); 5900 5901 return -1; 5902 } 5903 5904 static void 5905 ixl_config_queue_intr(struct ixl_softc *sc) 5906 { 5907 unsigned int i, vector; 5908 5909 if (sc->sc_intrtype == PCI_INTR_TYPE_MSIX) { 5910 vector = sc->sc_msix_vector_queue; 5911 } else { 5912 vector = I40E_INTR_NOTX_INTR; 5913 5914 ixl_wr(sc, I40E_PFINT_LNKLST0, 5915 (I40E_INTR_NOTX_QUEUE << 5916 I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) | 5917 (I40E_QUEUE_TYPE_RX << 5918 I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)); 5919 } 5920 5921 for (i = 0; i < sc->sc_nqueue_pairs; i++) { 5922 ixl_wr(sc, I40E_PFINT_DYN_CTLN(i), 0); 5923 ixl_flush(sc); 5924 5925 ixl_wr(sc, I40E_PFINT_LNKLSTN(i), 5926 ((i) << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) | 5927 (I40E_QUEUE_TYPE_RX << 5928 I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)); 5929 5930 ixl_wr(sc, I40E_QINT_RQCTL(i), 5931 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) | 5932 (I40E_ITR_INDEX_RX << 5933 I40E_QINT_RQCTL_ITR_INDX_SHIFT) | 5934 (I40E_INTR_NOTX_RX_QUEUE << 5935 I40E_QINT_RQCTL_MSIX0_INDX_SHIFT) | 5936 (i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) | 5937 (I40E_QUEUE_TYPE_TX << 5938 I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) | 5939 I40E_QINT_RQCTL_CAUSE_ENA_MASK); 5940 5941 ixl_wr(sc, I40E_QINT_TQCTL(i), 5942 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) | 5943 (I40E_ITR_INDEX_TX << 5944 I40E_QINT_TQCTL_ITR_INDX_SHIFT) | 5945 (I40E_INTR_NOTX_TX_QUEUE << 5946 I40E_QINT_TQCTL_MSIX0_INDX_SHIFT) | 5947 (I40E_QUEUE_TYPE_EOL << 5948 I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) | 5949 (I40E_QUEUE_TYPE_RX << 5950 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT) | 5951 I40E_QINT_TQCTL_CAUSE_ENA_MASK); 5952 5953 if (sc->sc_intrtype == PCI_INTR_TYPE_MSIX) { 5954 ixl_wr(sc, I40E_PFINT_ITRN(I40E_ITR_INDEX_RX, i), 5955 sc->sc_itr_rx); 5956 ixl_wr(sc, I40E_PFINT_ITRN(I40E_ITR_INDEX_TX, i), 5957 sc->sc_itr_tx); 5958 vector++; 5959 } 5960 } 5961 ixl_flush(sc); 5962 5963 ixl_wr(sc, I40E_PFINT_ITR0(I40E_ITR_INDEX_RX), sc->sc_itr_rx); 5964 ixl_wr(sc, I40E_PFINT_ITR0(I40E_ITR_INDEX_TX), sc->sc_itr_tx); 5965 ixl_flush(sc); 5966 } 5967 5968 static void 5969 ixl_config_other_intr(struct ixl_softc *sc) 5970 { 5971 ixl_wr(sc, I40E_PFINT_ICR0_ENA, 0); 5972 (void)ixl_rd(sc, I40E_PFINT_ICR0); 5973 5974 ixl_wr(sc, I40E_PFINT_ICR0_ENA, 5975 I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | 5976 I40E_PFINT_ICR0_ENA_GRST_MASK | 5977 I40E_PFINT_ICR0_ENA_ADMINQ_MASK | 5978 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | 5979 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | 5980 I40E_PFINT_ICR0_ENA_VFLR_MASK | 5981 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | 5982 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | 5983 I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK); 5984 5985 ixl_wr(sc, I40E_PFINT_LNKLST0, 0x7FF); 5986 ixl_wr(sc, I40E_PFINT_ITR0(I40E_ITR_INDEX_OTHER), 0); 5987 ixl_wr(sc, I40E_PFINT_STAT_CTL0, 5988 (I40E_ITR_INDEX_OTHER << 5989 I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)); 5990 ixl_flush(sc); 5991 } 5992 5993 static int 5994 ixl_setup_interrupts(struct ixl_softc *sc) 5995 { 5996 struct pci_attach_args *pa = &sc->sc_pa; 5997 pci_intr_type_t max_type, intr_type; 5998 int counts[PCI_INTR_TYPE_SIZE]; 5999 int error; 6000 unsigned int i; 6001 bool retry; 6002 6003 memset(counts, 0, sizeof(counts)); 6004 max_type = PCI_INTR_TYPE_MSIX; 6005 /* QPs + other interrupt */ 6006 counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueue_pairs_max + 1; 6007 counts[PCI_INTR_TYPE_INTX] = 1; 6008 6009 if (ixl_param_nomsix) 6010 counts[PCI_INTR_TYPE_MSIX] = 0; 6011 6012 do { 6013 retry = false; 6014 error = pci_intr_alloc(pa, &sc->sc_ihp, counts, max_type); 6015 if (error != 0) { 6016 aprint_error_dev(sc->sc_dev, 6017 "couldn't map interrupt\n"); 6018 break; 6019 } 6020 6021 intr_type = pci_intr_type(pa->pa_pc, sc->sc_ihp[0]); 6022 sc->sc_nintrs = counts[intr_type]; 6023 KASSERT(sc->sc_nintrs > 0); 6024 6025 for (i = 0; i < sc->sc_nintrs; i++) { 6026 pci_intr_setattr(pa->pa_pc, &sc->sc_ihp[i], 6027 PCI_INTR_MPSAFE, true); 6028 } 6029 6030 sc->sc_ihs = kmem_alloc(sizeof(sc->sc_ihs[0]) * sc->sc_nintrs, 6031 KM_SLEEP); 6032 6033 if (intr_type == PCI_INTR_TYPE_MSIX) { 6034 error = ixl_establish_msix(sc); 6035 if (error) { 6036 counts[PCI_INTR_TYPE_MSIX] = 0; 6037 retry = true; 6038 } 6039 } else if (intr_type == PCI_INTR_TYPE_INTX) { 6040 error = ixl_establish_intx(sc); 6041 } else { 6042 error = -1; 6043 } 6044 6045 if (error) { 6046 kmem_free(sc->sc_ihs, 6047 sizeof(sc->sc_ihs[0]) * sc->sc_nintrs); 6048 pci_intr_release(pa->pa_pc, sc->sc_ihp, sc->sc_nintrs); 6049 } else { 6050 sc->sc_intrtype = intr_type; 6051 } 6052 } while (retry); 6053 6054 return error; 6055 } 6056 6057 static void 6058 ixl_teardown_interrupts(struct ixl_softc *sc) 6059 { 6060 struct pci_attach_args *pa = &sc->sc_pa; 6061 unsigned int i; 6062 6063 for (i = 0; i < sc->sc_nintrs; i++) { 6064 pci_intr_disestablish(pa->pa_pc, sc->sc_ihs[i]); 6065 } 6066 6067 pci_intr_release(pa->pa_pc, sc->sc_ihp, sc->sc_nintrs); 6068 6069 kmem_free(sc->sc_ihs, sizeof(sc->sc_ihs[0]) * sc->sc_nintrs); 6070 sc->sc_ihs = NULL; 6071 sc->sc_nintrs = 0; 6072 } 6073 6074 static int 6075 ixl_setup_stats(struct ixl_softc *sc) 6076 { 6077 struct ixl_queue_pair *qp; 6078 struct ixl_tx_ring *txr; 6079 struct ixl_rx_ring *rxr; 6080 struct ixl_stats_counters *isc; 6081 unsigned int i; 6082 6083 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) { 6084 qp = &sc->sc_qps[i]; 6085 txr = qp->qp_txr; 6086 rxr = qp->qp_rxr; 6087 6088 evcnt_attach_dynamic(&txr->txr_defragged, EVCNT_TYPE_MISC, 6089 NULL, qp->qp_name, "m_defrag successed"); 6090 evcnt_attach_dynamic(&txr->txr_defrag_failed, EVCNT_TYPE_MISC, 6091 NULL, qp->qp_name, "m_defrag_failed"); 6092 evcnt_attach_dynamic(&txr->txr_pcqdrop, EVCNT_TYPE_MISC, 6093 NULL, qp->qp_name, "Dropped in pcq"); 6094 evcnt_attach_dynamic(&txr->txr_transmitdef, EVCNT_TYPE_MISC, 6095 NULL, qp->qp_name, "Deferred transmit"); 6096 evcnt_attach_dynamic(&txr->txr_intr, EVCNT_TYPE_INTR, 6097 NULL, qp->qp_name, "Interrupt on queue"); 6098 evcnt_attach_dynamic(&txr->txr_defer, EVCNT_TYPE_MISC, 6099 NULL, qp->qp_name, "Handled queue in softint/workqueue"); 6100 6101 evcnt_attach_dynamic(&rxr->rxr_mgethdr_failed, EVCNT_TYPE_MISC, 6102 NULL, qp->qp_name, "MGETHDR failed"); 6103 evcnt_attach_dynamic(&rxr->rxr_mgetcl_failed, EVCNT_TYPE_MISC, 6104 NULL, qp->qp_name, "MCLGET failed"); 6105 evcnt_attach_dynamic(&rxr->rxr_mbuf_load_failed, 6106 EVCNT_TYPE_MISC, NULL, qp->qp_name, 6107 "bus_dmamap_load_mbuf failed"); 6108 evcnt_attach_dynamic(&rxr->rxr_intr, EVCNT_TYPE_INTR, 6109 NULL, qp->qp_name, "Interrupt on queue"); 6110 evcnt_attach_dynamic(&rxr->rxr_defer, EVCNT_TYPE_MISC, 6111 NULL, qp->qp_name, "Handled queue in softint/workqueue"); 6112 } 6113 6114 evcnt_attach_dynamic(&sc->sc_event_atq, EVCNT_TYPE_INTR, 6115 NULL, device_xname(sc->sc_dev), "Interrupt for other events"); 6116 evcnt_attach_dynamic(&sc->sc_event_link, EVCNT_TYPE_MISC, 6117 NULL, device_xname(sc->sc_dev), "Link status event"); 6118 evcnt_attach_dynamic(&sc->sc_event_ecc_err, EVCNT_TYPE_MISC, 6119 NULL, device_xname(sc->sc_dev), "ECC error"); 6120 evcnt_attach_dynamic(&sc->sc_event_pci_exception, EVCNT_TYPE_MISC, 6121 NULL, device_xname(sc->sc_dev), "PCI exception"); 6122 evcnt_attach_dynamic(&sc->sc_event_crit_err, EVCNT_TYPE_MISC, 6123 NULL, device_xname(sc->sc_dev), "Critical error"); 6124 6125 isc = &sc->sc_stats_counters; 6126 evcnt_attach_dynamic(&isc->isc_crc_errors, EVCNT_TYPE_MISC, 6127 NULL, device_xname(sc->sc_dev), "CRC errors"); 6128 evcnt_attach_dynamic(&isc->isc_illegal_bytes, EVCNT_TYPE_MISC, 6129 NULL, device_xname(sc->sc_dev), "Illegal bytes"); 6130 evcnt_attach_dynamic(&isc->isc_mac_local_faults, EVCNT_TYPE_MISC, 6131 NULL, device_xname(sc->sc_dev), "Mac local faults"); 6132 evcnt_attach_dynamic(&isc->isc_mac_remote_faults, EVCNT_TYPE_MISC, 6133 NULL, device_xname(sc->sc_dev), "Mac remote faults"); 6134 evcnt_attach_dynamic(&isc->isc_link_xon_rx, EVCNT_TYPE_MISC, 6135 NULL, device_xname(sc->sc_dev), "Rx xon"); 6136 evcnt_attach_dynamic(&isc->isc_link_xon_tx, EVCNT_TYPE_MISC, 6137 NULL, device_xname(sc->sc_dev), "Tx xon"); 6138 evcnt_attach_dynamic(&isc->isc_link_xoff_rx, EVCNT_TYPE_MISC, 6139 NULL, device_xname(sc->sc_dev), "Rx xoff"); 6140 evcnt_attach_dynamic(&isc->isc_link_xoff_tx, EVCNT_TYPE_MISC, 6141 NULL, device_xname(sc->sc_dev), "Tx xoff"); 6142 evcnt_attach_dynamic(&isc->isc_rx_fragments, EVCNT_TYPE_MISC, 6143 NULL, device_xname(sc->sc_dev), "Rx fragments"); 6144 evcnt_attach_dynamic(&isc->isc_rx_jabber, EVCNT_TYPE_MISC, 6145 NULL, device_xname(sc->sc_dev), "Rx jabber"); 6146 6147 evcnt_attach_dynamic(&isc->isc_rx_size_64, EVCNT_TYPE_MISC, 6148 NULL, device_xname(sc->sc_dev), "Rx size 64"); 6149 evcnt_attach_dynamic(&isc->isc_rx_size_127, EVCNT_TYPE_MISC, 6150 NULL, device_xname(sc->sc_dev), "Rx size 127"); 6151 evcnt_attach_dynamic(&isc->isc_rx_size_255, EVCNT_TYPE_MISC, 6152 NULL, device_xname(sc->sc_dev), "Rx size 255"); 6153 evcnt_attach_dynamic(&isc->isc_rx_size_511, EVCNT_TYPE_MISC, 6154 NULL, device_xname(sc->sc_dev), "Rx size 511"); 6155 evcnt_attach_dynamic(&isc->isc_rx_size_1023, EVCNT_TYPE_MISC, 6156 NULL, device_xname(sc->sc_dev), "Rx size 1023"); 6157 evcnt_attach_dynamic(&isc->isc_rx_size_1522, EVCNT_TYPE_MISC, 6158 NULL, device_xname(sc->sc_dev), "Rx size 1522"); 6159 evcnt_attach_dynamic(&isc->isc_rx_size_big, EVCNT_TYPE_MISC, 6160 NULL, device_xname(sc->sc_dev), "Rx jumbo packets"); 6161 evcnt_attach_dynamic(&isc->isc_rx_undersize, EVCNT_TYPE_MISC, 6162 NULL, device_xname(sc->sc_dev), "Rx under size"); 6163 evcnt_attach_dynamic(&isc->isc_rx_oversize, EVCNT_TYPE_MISC, 6164 NULL, device_xname(sc->sc_dev), "Rx over size"); 6165 6166 evcnt_attach_dynamic(&isc->isc_rx_bytes, EVCNT_TYPE_MISC, 6167 NULL, device_xname(sc->sc_dev), "Rx bytes / port"); 6168 evcnt_attach_dynamic(&isc->isc_rx_discards, EVCNT_TYPE_MISC, 6169 NULL, device_xname(sc->sc_dev), "Rx discards / port"); 6170 evcnt_attach_dynamic(&isc->isc_rx_unicast, EVCNT_TYPE_MISC, 6171 NULL, device_xname(sc->sc_dev), "Rx unicast / port"); 6172 evcnt_attach_dynamic(&isc->isc_rx_multicast, EVCNT_TYPE_MISC, 6173 NULL, device_xname(sc->sc_dev), "Rx multicast / port"); 6174 evcnt_attach_dynamic(&isc->isc_rx_broadcast, EVCNT_TYPE_MISC, 6175 NULL, device_xname(sc->sc_dev), "Rx broadcast / port"); 6176 6177 evcnt_attach_dynamic(&isc->isc_vsi_rx_bytes, EVCNT_TYPE_MISC, 6178 NULL, device_xname(sc->sc_dev), "Rx bytes / vsi"); 6179 evcnt_attach_dynamic(&isc->isc_vsi_rx_discards, EVCNT_TYPE_MISC, 6180 NULL, device_xname(sc->sc_dev), "Rx discard / vsi"); 6181 evcnt_attach_dynamic(&isc->isc_vsi_rx_unicast, EVCNT_TYPE_MISC, 6182 NULL, device_xname(sc->sc_dev), "Rx unicast / vsi"); 6183 evcnt_attach_dynamic(&isc->isc_vsi_rx_multicast, EVCNT_TYPE_MISC, 6184 NULL, device_xname(sc->sc_dev), "Rx multicast / vsi"); 6185 evcnt_attach_dynamic(&isc->isc_vsi_rx_broadcast, EVCNT_TYPE_MISC, 6186 NULL, device_xname(sc->sc_dev), "Rx broadcast / vsi"); 6187 6188 evcnt_attach_dynamic(&isc->isc_tx_size_64, EVCNT_TYPE_MISC, 6189 NULL, device_xname(sc->sc_dev), "Tx size 64"); 6190 evcnt_attach_dynamic(&isc->isc_tx_size_127, EVCNT_TYPE_MISC, 6191 NULL, device_xname(sc->sc_dev), "Tx size 127"); 6192 evcnt_attach_dynamic(&isc->isc_tx_size_255, EVCNT_TYPE_MISC, 6193 NULL, device_xname(sc->sc_dev), "Tx size 255"); 6194 evcnt_attach_dynamic(&isc->isc_tx_size_511, EVCNT_TYPE_MISC, 6195 NULL, device_xname(sc->sc_dev), "Tx size 511"); 6196 evcnt_attach_dynamic(&isc->isc_tx_size_1023, EVCNT_TYPE_MISC, 6197 NULL, device_xname(sc->sc_dev), "Tx size 1023"); 6198 evcnt_attach_dynamic(&isc->isc_tx_size_1522, EVCNT_TYPE_MISC, 6199 NULL, device_xname(sc->sc_dev), "Tx size 1522"); 6200 evcnt_attach_dynamic(&isc->isc_tx_size_big, EVCNT_TYPE_MISC, 6201 NULL, device_xname(sc->sc_dev), "Tx jumbo packets"); 6202 6203 evcnt_attach_dynamic(&isc->isc_tx_bytes, EVCNT_TYPE_MISC, 6204 NULL, device_xname(sc->sc_dev), "Tx bytes / port"); 6205 evcnt_attach_dynamic(&isc->isc_tx_dropped_link_down, EVCNT_TYPE_MISC, 6206 NULL, device_xname(sc->sc_dev), 6207 "Tx dropped due to link down / port"); 6208 evcnt_attach_dynamic(&isc->isc_tx_unicast, EVCNT_TYPE_MISC, 6209 NULL, device_xname(sc->sc_dev), "Tx unicast / port"); 6210 evcnt_attach_dynamic(&isc->isc_tx_multicast, EVCNT_TYPE_MISC, 6211 NULL, device_xname(sc->sc_dev), "Tx multicast / port"); 6212 evcnt_attach_dynamic(&isc->isc_tx_broadcast, EVCNT_TYPE_MISC, 6213 NULL, device_xname(sc->sc_dev), "Tx broadcast / port"); 6214 6215 evcnt_attach_dynamic(&isc->isc_vsi_tx_bytes, EVCNT_TYPE_MISC, 6216 NULL, device_xname(sc->sc_dev), "Tx bytes / vsi"); 6217 evcnt_attach_dynamic(&isc->isc_vsi_tx_errors, EVCNT_TYPE_MISC, 6218 NULL, device_xname(sc->sc_dev), "Tx errors / vsi"); 6219 evcnt_attach_dynamic(&isc->isc_vsi_tx_unicast, EVCNT_TYPE_MISC, 6220 NULL, device_xname(sc->sc_dev), "Tx unicast / vsi"); 6221 evcnt_attach_dynamic(&isc->isc_vsi_tx_multicast, EVCNT_TYPE_MISC, 6222 NULL, device_xname(sc->sc_dev), "Tx multicast / vsi"); 6223 evcnt_attach_dynamic(&isc->isc_vsi_tx_broadcast, EVCNT_TYPE_MISC, 6224 NULL, device_xname(sc->sc_dev), "Tx broadcast / vsi"); 6225 6226 sc->sc_stats_intval = ixl_param_stats_interval; 6227 callout_init(&sc->sc_stats_callout, CALLOUT_MPSAFE); 6228 callout_setfunc(&sc->sc_stats_callout, ixl_stats_callout, sc); 6229 ixl_work_set(&sc->sc_stats_task, ixl_stats_update, sc); 6230 6231 return 0; 6232 } 6233 6234 static void 6235 ixl_teardown_stats(struct ixl_softc *sc) 6236 { 6237 struct ixl_tx_ring *txr; 6238 struct ixl_rx_ring *rxr; 6239 struct ixl_stats_counters *isc; 6240 unsigned int i; 6241 6242 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) { 6243 txr = sc->sc_qps[i].qp_txr; 6244 rxr = sc->sc_qps[i].qp_rxr; 6245 6246 evcnt_detach(&txr->txr_defragged); 6247 evcnt_detach(&txr->txr_defrag_failed); 6248 evcnt_detach(&txr->txr_pcqdrop); 6249 evcnt_detach(&txr->txr_transmitdef); 6250 evcnt_detach(&txr->txr_intr); 6251 evcnt_detach(&txr->txr_defer); 6252 6253 evcnt_detach(&rxr->rxr_mgethdr_failed); 6254 evcnt_detach(&rxr->rxr_mgetcl_failed); 6255 evcnt_detach(&rxr->rxr_mbuf_load_failed); 6256 evcnt_detach(&rxr->rxr_intr); 6257 evcnt_detach(&rxr->rxr_defer); 6258 } 6259 6260 isc = &sc->sc_stats_counters; 6261 evcnt_detach(&isc->isc_crc_errors); 6262 evcnt_detach(&isc->isc_illegal_bytes); 6263 evcnt_detach(&isc->isc_mac_local_faults); 6264 evcnt_detach(&isc->isc_mac_remote_faults); 6265 evcnt_detach(&isc->isc_link_xon_rx); 6266 evcnt_detach(&isc->isc_link_xon_tx); 6267 evcnt_detach(&isc->isc_link_xoff_rx); 6268 evcnt_detach(&isc->isc_link_xoff_tx); 6269 evcnt_detach(&isc->isc_rx_fragments); 6270 evcnt_detach(&isc->isc_rx_jabber); 6271 evcnt_detach(&isc->isc_rx_bytes); 6272 evcnt_detach(&isc->isc_rx_discards); 6273 evcnt_detach(&isc->isc_rx_unicast); 6274 evcnt_detach(&isc->isc_rx_multicast); 6275 evcnt_detach(&isc->isc_rx_broadcast); 6276 evcnt_detach(&isc->isc_rx_size_64); 6277 evcnt_detach(&isc->isc_rx_size_127); 6278 evcnt_detach(&isc->isc_rx_size_255); 6279 evcnt_detach(&isc->isc_rx_size_511); 6280 evcnt_detach(&isc->isc_rx_size_1023); 6281 evcnt_detach(&isc->isc_rx_size_1522); 6282 evcnt_detach(&isc->isc_rx_size_big); 6283 evcnt_detach(&isc->isc_rx_undersize); 6284 evcnt_detach(&isc->isc_rx_oversize); 6285 evcnt_detach(&isc->isc_tx_bytes); 6286 evcnt_detach(&isc->isc_tx_dropped_link_down); 6287 evcnt_detach(&isc->isc_tx_unicast); 6288 evcnt_detach(&isc->isc_tx_multicast); 6289 evcnt_detach(&isc->isc_tx_broadcast); 6290 evcnt_detach(&isc->isc_tx_size_64); 6291 evcnt_detach(&isc->isc_tx_size_127); 6292 evcnt_detach(&isc->isc_tx_size_255); 6293 evcnt_detach(&isc->isc_tx_size_511); 6294 evcnt_detach(&isc->isc_tx_size_1023); 6295 evcnt_detach(&isc->isc_tx_size_1522); 6296 evcnt_detach(&isc->isc_tx_size_big); 6297 evcnt_detach(&isc->isc_vsi_rx_discards); 6298 evcnt_detach(&isc->isc_vsi_rx_bytes); 6299 evcnt_detach(&isc->isc_vsi_rx_unicast); 6300 evcnt_detach(&isc->isc_vsi_rx_multicast); 6301 evcnt_detach(&isc->isc_vsi_rx_broadcast); 6302 evcnt_detach(&isc->isc_vsi_tx_errors); 6303 evcnt_detach(&isc->isc_vsi_tx_bytes); 6304 evcnt_detach(&isc->isc_vsi_tx_unicast); 6305 evcnt_detach(&isc->isc_vsi_tx_multicast); 6306 evcnt_detach(&isc->isc_vsi_tx_broadcast); 6307 6308 evcnt_detach(&sc->sc_event_atq); 6309 evcnt_detach(&sc->sc_event_link); 6310 evcnt_detach(&sc->sc_event_ecc_err); 6311 evcnt_detach(&sc->sc_event_pci_exception); 6312 evcnt_detach(&sc->sc_event_crit_err); 6313 6314 callout_destroy(&sc->sc_stats_callout); 6315 } 6316 6317 static void 6318 ixl_stats_callout(void *xsc) 6319 { 6320 struct ixl_softc *sc = xsc; 6321 6322 ixl_work_add(sc->sc_workq, &sc->sc_stats_task); 6323 callout_schedule(&sc->sc_stats_callout, mstohz(sc->sc_stats_intval)); 6324 } 6325 6326 static uint64_t 6327 ixl_stat_delta(struct ixl_softc *sc, uint32_t reg_hi, uint32_t reg_lo, 6328 uint64_t *offset, bool has_offset) 6329 { 6330 uint64_t value, delta; 6331 int bitwidth; 6332 6333 bitwidth = reg_hi == 0 ? 32 : 48; 6334 6335 value = ixl_rd(sc, reg_lo); 6336 6337 if (bitwidth > 32) { 6338 value |= ((uint64_t)ixl_rd(sc, reg_hi) << 32); 6339 } 6340 6341 if (__predict_true(has_offset)) { 6342 delta = value; 6343 if (value < *offset) 6344 delta += ((uint64_t)1 << bitwidth); 6345 delta -= *offset; 6346 } else { 6347 delta = 0; 6348 } 6349 atomic_swap_64(offset, value); 6350 6351 return delta; 6352 } 6353 6354 static void 6355 ixl_stats_update(void *xsc) 6356 { 6357 struct ixl_softc *sc = xsc; 6358 struct ixl_stats_counters *isc; 6359 uint64_t delta; 6360 6361 isc = &sc->sc_stats_counters; 6362 6363 /* errors */ 6364 delta = ixl_stat_delta(sc, 6365 0, I40E_GLPRT_CRCERRS(sc->sc_port), 6366 &isc->isc_crc_errors_offset, isc->isc_has_offset); 6367 atomic_add_64(&isc->isc_crc_errors.ev_count, delta); 6368 6369 delta = ixl_stat_delta(sc, 6370 0, I40E_GLPRT_ILLERRC(sc->sc_port), 6371 &isc->isc_illegal_bytes_offset, isc->isc_has_offset); 6372 atomic_add_64(&isc->isc_illegal_bytes.ev_count, delta); 6373 6374 /* rx */ 6375 delta = ixl_stat_delta(sc, 6376 I40E_GLPRT_GORCH(sc->sc_port), I40E_GLPRT_GORCL(sc->sc_port), 6377 &isc->isc_rx_bytes_offset, isc->isc_has_offset); 6378 atomic_add_64(&isc->isc_rx_bytes.ev_count, delta); 6379 6380 delta = ixl_stat_delta(sc, 6381 0, I40E_GLPRT_RDPC(sc->sc_port), 6382 &isc->isc_rx_discards_offset, isc->isc_has_offset); 6383 atomic_add_64(&isc->isc_rx_discards.ev_count, delta); 6384 6385 delta = ixl_stat_delta(sc, 6386 I40E_GLPRT_UPRCH(sc->sc_port), I40E_GLPRT_UPRCL(sc->sc_port), 6387 &isc->isc_rx_unicast_offset, isc->isc_has_offset); 6388 atomic_add_64(&isc->isc_rx_unicast.ev_count, delta); 6389 6390 delta = ixl_stat_delta(sc, 6391 I40E_GLPRT_MPRCH(sc->sc_port), I40E_GLPRT_MPRCL(sc->sc_port), 6392 &isc->isc_rx_multicast_offset, isc->isc_has_offset); 6393 atomic_add_64(&isc->isc_rx_multicast.ev_count, delta); 6394 6395 delta = ixl_stat_delta(sc, 6396 I40E_GLPRT_BPRCH(sc->sc_port), I40E_GLPRT_BPRCL(sc->sc_port), 6397 &isc->isc_rx_broadcast_offset, isc->isc_has_offset); 6398 atomic_add_64(&isc->isc_rx_broadcast.ev_count, delta); 6399 6400 /* Packet size stats rx */ 6401 delta = ixl_stat_delta(sc, 6402 I40E_GLPRT_PRC64H(sc->sc_port), I40E_GLPRT_PRC64L(sc->sc_port), 6403 &isc->isc_rx_size_64_offset, isc->isc_has_offset); 6404 atomic_add_64(&isc->isc_rx_size_64.ev_count, delta); 6405 6406 delta = ixl_stat_delta(sc, 6407 I40E_GLPRT_PRC127H(sc->sc_port), I40E_GLPRT_PRC127L(sc->sc_port), 6408 &isc->isc_rx_size_127_offset, isc->isc_has_offset); 6409 atomic_add_64(&isc->isc_rx_size_127.ev_count, delta); 6410 6411 delta = ixl_stat_delta(sc, 6412 I40E_GLPRT_PRC255H(sc->sc_port), I40E_GLPRT_PRC255L(sc->sc_port), 6413 &isc->isc_rx_size_255_offset, isc->isc_has_offset); 6414 atomic_add_64(&isc->isc_rx_size_255.ev_count, delta); 6415 6416 delta = ixl_stat_delta(sc, 6417 I40E_GLPRT_PRC511H(sc->sc_port), I40E_GLPRT_PRC511L(sc->sc_port), 6418 &isc->isc_rx_size_511_offset, isc->isc_has_offset); 6419 atomic_add_64(&isc->isc_rx_size_511.ev_count, delta); 6420 6421 delta = ixl_stat_delta(sc, 6422 I40E_GLPRT_PRC1023H(sc->sc_port), I40E_GLPRT_PRC1023L(sc->sc_port), 6423 &isc->isc_rx_size_1023_offset, isc->isc_has_offset); 6424 atomic_add_64(&isc->isc_rx_size_1023.ev_count, delta); 6425 6426 delta = ixl_stat_delta(sc, 6427 I40E_GLPRT_PRC1522H(sc->sc_port), I40E_GLPRT_PRC1522L(sc->sc_port), 6428 &isc->isc_rx_size_1522_offset, isc->isc_has_offset); 6429 atomic_add_64(&isc->isc_rx_size_1522.ev_count, delta); 6430 6431 delta = ixl_stat_delta(sc, 6432 I40E_GLPRT_PRC9522H(sc->sc_port), I40E_GLPRT_PRC9522L(sc->sc_port), 6433 &isc->isc_rx_size_big_offset, isc->isc_has_offset); 6434 atomic_add_64(&isc->isc_rx_size_big.ev_count, delta); 6435 6436 delta = ixl_stat_delta(sc, 6437 0, I40E_GLPRT_RUC(sc->sc_port), 6438 &isc->isc_rx_undersize_offset, isc->isc_has_offset); 6439 atomic_add_64(&isc->isc_rx_undersize.ev_count, delta); 6440 6441 delta = ixl_stat_delta(sc, 6442 0, I40E_GLPRT_ROC(sc->sc_port), 6443 &isc->isc_rx_oversize_offset, isc->isc_has_offset); 6444 atomic_add_64(&isc->isc_rx_oversize.ev_count, delta); 6445 6446 /* tx */ 6447 delta = ixl_stat_delta(sc, 6448 I40E_GLPRT_GOTCH(sc->sc_port), I40E_GLPRT_GOTCL(sc->sc_port), 6449 &isc->isc_tx_bytes_offset, isc->isc_has_offset); 6450 atomic_add_64(&isc->isc_tx_bytes.ev_count, delta); 6451 6452 delta = ixl_stat_delta(sc, 6453 0, I40E_GLPRT_TDOLD(sc->sc_port), 6454 &isc->isc_tx_dropped_link_down_offset, isc->isc_has_offset); 6455 atomic_add_64(&isc->isc_tx_dropped_link_down.ev_count, delta); 6456 6457 delta = ixl_stat_delta(sc, 6458 I40E_GLPRT_UPTCH(sc->sc_port), I40E_GLPRT_UPTCL(sc->sc_port), 6459 &isc->isc_tx_unicast_offset, isc->isc_has_offset); 6460 atomic_add_64(&isc->isc_tx_unicast.ev_count, delta); 6461 6462 delta = ixl_stat_delta(sc, 6463 I40E_GLPRT_MPTCH(sc->sc_port), I40E_GLPRT_MPTCL(sc->sc_port), 6464 &isc->isc_tx_multicast_offset, isc->isc_has_offset); 6465 atomic_add_64(&isc->isc_tx_multicast.ev_count, delta); 6466 6467 delta = ixl_stat_delta(sc, 6468 I40E_GLPRT_BPTCH(sc->sc_port), I40E_GLPRT_BPTCL(sc->sc_port), 6469 &isc->isc_tx_broadcast_offset, isc->isc_has_offset); 6470 atomic_add_64(&isc->isc_tx_broadcast.ev_count, delta); 6471 6472 /* Packet size stats tx */ 6473 delta = ixl_stat_delta(sc, 6474 I40E_GLPRT_PTC64L(sc->sc_port), I40E_GLPRT_PTC64L(sc->sc_port), 6475 &isc->isc_tx_size_64_offset, isc->isc_has_offset); 6476 atomic_add_64(&isc->isc_tx_size_64.ev_count, delta); 6477 6478 delta = ixl_stat_delta(sc, 6479 I40E_GLPRT_PTC127H(sc->sc_port), I40E_GLPRT_PTC127L(sc->sc_port), 6480 &isc->isc_tx_size_127_offset, isc->isc_has_offset); 6481 atomic_add_64(&isc->isc_tx_size_127.ev_count, delta); 6482 6483 delta = ixl_stat_delta(sc, 6484 I40E_GLPRT_PTC255H(sc->sc_port), I40E_GLPRT_PTC255L(sc->sc_port), 6485 &isc->isc_tx_size_255_offset, isc->isc_has_offset); 6486 atomic_add_64(&isc->isc_tx_size_255.ev_count, delta); 6487 6488 delta = ixl_stat_delta(sc, 6489 I40E_GLPRT_PTC511H(sc->sc_port), I40E_GLPRT_PTC511L(sc->sc_port), 6490 &isc->isc_tx_size_511_offset, isc->isc_has_offset); 6491 atomic_add_64(&isc->isc_tx_size_511.ev_count, delta); 6492 6493 delta = ixl_stat_delta(sc, 6494 I40E_GLPRT_PTC1023H(sc->sc_port), I40E_GLPRT_PTC1023L(sc->sc_port), 6495 &isc->isc_tx_size_1023_offset, isc->isc_has_offset); 6496 atomic_add_64(&isc->isc_tx_size_1023.ev_count, delta); 6497 6498 delta = ixl_stat_delta(sc, 6499 I40E_GLPRT_PTC1522H(sc->sc_port), I40E_GLPRT_PTC1522L(sc->sc_port), 6500 &isc->isc_tx_size_1522_offset, isc->isc_has_offset); 6501 atomic_add_64(&isc->isc_tx_size_1522.ev_count, delta); 6502 6503 delta = ixl_stat_delta(sc, 6504 I40E_GLPRT_PTC9522H(sc->sc_port), I40E_GLPRT_PTC9522L(sc->sc_port), 6505 &isc->isc_tx_size_big_offset, isc->isc_has_offset); 6506 atomic_add_64(&isc->isc_tx_size_big.ev_count, delta); 6507 6508 /* mac faults */ 6509 delta = ixl_stat_delta(sc, 6510 0, I40E_GLPRT_MLFC(sc->sc_port), 6511 &isc->isc_mac_local_faults_offset, isc->isc_has_offset); 6512 atomic_add_64(&isc->isc_mac_local_faults.ev_count, delta); 6513 6514 delta = ixl_stat_delta(sc, 6515 0, I40E_GLPRT_MRFC(sc->sc_port), 6516 &isc->isc_mac_remote_faults_offset, isc->isc_has_offset); 6517 atomic_add_64(&isc->isc_mac_remote_faults.ev_count, delta); 6518 6519 /* Flow control (LFC) stats */ 6520 delta = ixl_stat_delta(sc, 6521 0, I40E_GLPRT_LXONRXC(sc->sc_port), 6522 &isc->isc_link_xon_rx_offset, isc->isc_has_offset); 6523 atomic_add_64(&isc->isc_link_xon_rx.ev_count, delta); 6524 6525 delta = ixl_stat_delta(sc, 6526 0, I40E_GLPRT_LXONTXC(sc->sc_port), 6527 &isc->isc_link_xon_tx_offset, isc->isc_has_offset); 6528 atomic_add_64(&isc->isc_link_xon_tx.ev_count, delta); 6529 6530 delta = ixl_stat_delta(sc, 6531 0, I40E_GLPRT_LXOFFRXC(sc->sc_port), 6532 &isc->isc_link_xoff_rx_offset, isc->isc_has_offset); 6533 atomic_add_64(&isc->isc_link_xoff_rx.ev_count, delta); 6534 6535 delta = ixl_stat_delta(sc, 6536 0, I40E_GLPRT_LXOFFTXC(sc->sc_port), 6537 &isc->isc_link_xoff_tx_offset, isc->isc_has_offset); 6538 atomic_add_64(&isc->isc_link_xoff_tx.ev_count, delta); 6539 6540 /* fragments */ 6541 delta = ixl_stat_delta(sc, 6542 0, I40E_GLPRT_RFC(sc->sc_port), 6543 &isc->isc_rx_fragments_offset, isc->isc_has_offset); 6544 atomic_add_64(&isc->isc_rx_fragments.ev_count, delta); 6545 6546 delta = ixl_stat_delta(sc, 6547 0, I40E_GLPRT_RJC(sc->sc_port), 6548 &isc->isc_rx_jabber_offset, isc->isc_has_offset); 6549 atomic_add_64(&isc->isc_rx_jabber.ev_count, delta); 6550 6551 /* VSI rx counters */ 6552 delta = ixl_stat_delta(sc, 6553 0, I40E_GLV_RDPC(sc->sc_vsi_stat_counter_idx), 6554 &isc->isc_vsi_rx_discards_offset, isc->isc_has_offset); 6555 atomic_add_64(&isc->isc_vsi_rx_discards.ev_count, delta); 6556 6557 delta = ixl_stat_delta(sc, 6558 I40E_GLV_GORCH(sc->sc_vsi_stat_counter_idx), 6559 I40E_GLV_GORCL(sc->sc_vsi_stat_counter_idx), 6560 &isc->isc_vsi_rx_bytes_offset, isc->isc_has_offset); 6561 atomic_add_64(&isc->isc_vsi_rx_bytes.ev_count, delta); 6562 6563 delta = ixl_stat_delta(sc, 6564 I40E_GLV_UPRCH(sc->sc_vsi_stat_counter_idx), 6565 I40E_GLV_UPRCL(sc->sc_vsi_stat_counter_idx), 6566 &isc->isc_vsi_rx_unicast_offset, isc->isc_has_offset); 6567 atomic_add_64(&isc->isc_vsi_rx_unicast.ev_count, delta); 6568 6569 delta = ixl_stat_delta(sc, 6570 I40E_GLV_MPRCH(sc->sc_vsi_stat_counter_idx), 6571 I40E_GLV_MPRCL(sc->sc_vsi_stat_counter_idx), 6572 &isc->isc_vsi_rx_multicast_offset, isc->isc_has_offset); 6573 atomic_add_64(&isc->isc_vsi_rx_multicast.ev_count, delta); 6574 6575 delta = ixl_stat_delta(sc, 6576 I40E_GLV_BPRCH(sc->sc_vsi_stat_counter_idx), 6577 I40E_GLV_BPRCL(sc->sc_vsi_stat_counter_idx), 6578 &isc->isc_vsi_rx_broadcast_offset, isc->isc_has_offset); 6579 atomic_add_64(&isc->isc_vsi_rx_broadcast.ev_count, delta); 6580 6581 /* VSI tx counters */ 6582 delta = ixl_stat_delta(sc, 6583 0, I40E_GLV_TEPC(sc->sc_vsi_stat_counter_idx), 6584 &isc->isc_vsi_tx_errors_offset, isc->isc_has_offset); 6585 atomic_add_64(&isc->isc_vsi_tx_errors.ev_count, delta); 6586 6587 delta = ixl_stat_delta(sc, 6588 I40E_GLV_GOTCH(sc->sc_vsi_stat_counter_idx), 6589 I40E_GLV_GOTCL(sc->sc_vsi_stat_counter_idx), 6590 &isc->isc_vsi_tx_bytes_offset, isc->isc_has_offset); 6591 atomic_add_64(&isc->isc_vsi_tx_bytes.ev_count, delta); 6592 6593 delta = ixl_stat_delta(sc, 6594 I40E_GLV_UPTCH(sc->sc_vsi_stat_counter_idx), 6595 I40E_GLV_UPTCL(sc->sc_vsi_stat_counter_idx), 6596 &isc->isc_vsi_tx_unicast_offset, isc->isc_has_offset); 6597 atomic_add_64(&isc->isc_vsi_tx_unicast.ev_count, delta); 6598 6599 delta = ixl_stat_delta(sc, 6600 I40E_GLV_MPTCH(sc->sc_vsi_stat_counter_idx), 6601 I40E_GLV_MPTCL(sc->sc_vsi_stat_counter_idx), 6602 &isc->isc_vsi_tx_multicast_offset, isc->isc_has_offset); 6603 atomic_add_64(&isc->isc_vsi_tx_multicast.ev_count, delta); 6604 6605 delta = ixl_stat_delta(sc, 6606 I40E_GLV_BPTCH(sc->sc_vsi_stat_counter_idx), 6607 I40E_GLV_BPTCL(sc->sc_vsi_stat_counter_idx), 6608 &isc->isc_vsi_tx_broadcast_offset, isc->isc_has_offset); 6609 atomic_add_64(&isc->isc_vsi_tx_broadcast.ev_count, delta); 6610 } 6611 6612 static int 6613 ixl_setup_sysctls(struct ixl_softc *sc) 6614 { 6615 const char *devname; 6616 struct sysctllog **log; 6617 const struct sysctlnode *rnode, *rxnode, *txnode; 6618 int error; 6619 6620 log = &sc->sc_sysctllog; 6621 devname = device_xname(sc->sc_dev); 6622 6623 error = sysctl_createv(log, 0, NULL, &rnode, 6624 0, CTLTYPE_NODE, devname, 6625 SYSCTL_DESCR("ixl information and settings"), 6626 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL); 6627 if (error) 6628 goto out; 6629 6630 error = sysctl_createv(log, 0, &rnode, NULL, 6631 CTLFLAG_READWRITE, CTLTYPE_BOOL, "txrx_workqueue", 6632 SYSCTL_DESCR("Use workqueue for packet processing"), 6633 NULL, 0, &sc->sc_txrx_workqueue, 0, CTL_CREATE, CTL_EOL); 6634 if (error) 6635 goto out; 6636 6637 error = sysctl_createv(log, 0, &rnode, NULL, 6638 CTLFLAG_READONLY, CTLTYPE_INT, "stats_interval", 6639 SYSCTL_DESCR("Statistics collection interval in milliseconds"), 6640 NULL, 0, &sc->sc_stats_intval, 0, CTL_CREATE, CTL_EOL); 6641 6642 error = sysctl_createv(log, 0, &rnode, &rxnode, 6643 0, CTLTYPE_NODE, "rx", 6644 SYSCTL_DESCR("ixl information and settings for Rx"), 6645 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL); 6646 if (error) 6647 goto out; 6648 6649 error = sysctl_createv(log, 0, &rxnode, NULL, 6650 CTLFLAG_READWRITE, CTLTYPE_INT, "itr", 6651 SYSCTL_DESCR("Interrupt Throttling"), 6652 ixl_sysctl_itr_handler, 0, 6653 (void *)sc, 0, CTL_CREATE, CTL_EOL); 6654 if (error) 6655 goto out; 6656 6657 error = sysctl_createv(log, 0, &rxnode, NULL, 6658 CTLFLAG_READONLY, CTLTYPE_INT, "descriptor_num", 6659 SYSCTL_DESCR("the number of rx descriptors"), 6660 NULL, 0, &sc->sc_rx_ring_ndescs, 0, CTL_CREATE, CTL_EOL); 6661 if (error) 6662 goto out; 6663 6664 error = sysctl_createv(log, 0, &rxnode, NULL, 6665 CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit", 6666 SYSCTL_DESCR("max number of Rx packets" 6667 " to process for interrupt processing"), 6668 NULL, 0, &sc->sc_rx_intr_process_limit, 0, CTL_CREATE, CTL_EOL); 6669 if (error) 6670 goto out; 6671 6672 error = sysctl_createv(log, 0, &rxnode, NULL, 6673 CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit", 6674 SYSCTL_DESCR("max number of Rx packets" 6675 " to process for deferred processing"), 6676 NULL, 0, &sc->sc_rx_process_limit, 0, CTL_CREATE, CTL_EOL); 6677 if (error) 6678 goto out; 6679 6680 error = sysctl_createv(log, 0, &rnode, &txnode, 6681 0, CTLTYPE_NODE, "tx", 6682 SYSCTL_DESCR("ixl information and settings for Tx"), 6683 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL); 6684 if (error) 6685 goto out; 6686 6687 error = sysctl_createv(log, 0, &txnode, NULL, 6688 CTLFLAG_READWRITE, CTLTYPE_INT, "itr", 6689 SYSCTL_DESCR("Interrupt Throttling"), 6690 ixl_sysctl_itr_handler, 0, 6691 (void *)sc, 0, CTL_CREATE, CTL_EOL); 6692 if (error) 6693 goto out; 6694 6695 error = sysctl_createv(log, 0, &txnode, NULL, 6696 CTLFLAG_READONLY, CTLTYPE_INT, "descriptor_num", 6697 SYSCTL_DESCR("the number of tx descriptors"), 6698 NULL, 0, &sc->sc_tx_ring_ndescs, 0, CTL_CREATE, CTL_EOL); 6699 if (error) 6700 goto out; 6701 6702 error = sysctl_createv(log, 0, &txnode, NULL, 6703 CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit", 6704 SYSCTL_DESCR("max number of Tx packets" 6705 " to process for interrupt processing"), 6706 NULL, 0, &sc->sc_tx_intr_process_limit, 0, CTL_CREATE, CTL_EOL); 6707 if (error) 6708 goto out; 6709 6710 error = sysctl_createv(log, 0, &txnode, NULL, 6711 CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit", 6712 SYSCTL_DESCR("max number of Tx packets" 6713 " to process for deferred processing"), 6714 NULL, 0, &sc->sc_tx_process_limit, 0, CTL_CREATE, CTL_EOL); 6715 if (error) 6716 goto out; 6717 6718 out: 6719 if (error) { 6720 aprint_error_dev(sc->sc_dev, 6721 "unable to create sysctl node\n"); 6722 sysctl_teardown(log); 6723 } 6724 6725 return error; 6726 } 6727 6728 static void 6729 ixl_teardown_sysctls(struct ixl_softc *sc) 6730 { 6731 6732 sysctl_teardown(&sc->sc_sysctllog); 6733 } 6734 6735 static bool 6736 ixl_sysctlnode_is_rx(struct sysctlnode *node) 6737 { 6738 6739 if (strstr(node->sysctl_parent->sysctl_name, "rx") != NULL) 6740 return true; 6741 6742 return false; 6743 } 6744 6745 static int 6746 ixl_sysctl_itr_handler(SYSCTLFN_ARGS) 6747 { 6748 struct sysctlnode node = *rnode; 6749 struct ixl_softc *sc = (struct ixl_softc *)node.sysctl_data; 6750 struct ifnet *ifp = &sc->sc_ec.ec_if; 6751 uint32_t newitr, *itrptr; 6752 int error; 6753 6754 if (ixl_sysctlnode_is_rx(&node)) { 6755 itrptr = &sc->sc_itr_rx; 6756 } else { 6757 itrptr = &sc->sc_itr_tx; 6758 } 6759 6760 newitr = *itrptr; 6761 node.sysctl_data = &newitr; 6762 node.sysctl_size = sizeof(newitr); 6763 6764 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 6765 6766 if (error || newp == NULL) 6767 return error; 6768 6769 /* ITRs are applied in ixl_init() for simple implementaion */ 6770 if (ISSET(ifp->if_flags, IFF_RUNNING)) 6771 return EBUSY; 6772 6773 if (newitr > 0x07ff) 6774 return EINVAL; 6775 6776 *itrptr = newitr; 6777 6778 return 0; 6779 } 6780 6781 static struct workqueue * 6782 ixl_workq_create(const char *name, pri_t prio, int ipl, int flags) 6783 { 6784 struct workqueue *wq; 6785 int error; 6786 6787 error = workqueue_create(&wq, name, ixl_workq_work, NULL, 6788 prio, ipl, flags); 6789 6790 if (error) 6791 return NULL; 6792 6793 return wq; 6794 } 6795 6796 static void 6797 ixl_workq_destroy(struct workqueue *wq) 6798 { 6799 6800 workqueue_destroy(wq); 6801 } 6802 6803 static void 6804 ixl_work_set(struct ixl_work *work, void (*func)(void *), void *arg) 6805 { 6806 6807 memset(work, 0, sizeof(*work)); 6808 work->ixw_func = func; 6809 work->ixw_arg = arg; 6810 } 6811 6812 static void 6813 ixl_work_add(struct workqueue *wq, struct ixl_work *work) 6814 { 6815 if (atomic_cas_uint(&work->ixw_added, 0, 1) != 0) 6816 return; 6817 6818 kpreempt_disable(); 6819 workqueue_enqueue(wq, &work->ixw_cookie, NULL); 6820 kpreempt_enable(); 6821 } 6822 6823 static void 6824 ixl_work_wait(struct workqueue *wq, struct ixl_work *work) 6825 { 6826 6827 workqueue_wait(wq, &work->ixw_cookie); 6828 } 6829 6830 static void 6831 ixl_workq_work(struct work *wk, void *context) 6832 { 6833 struct ixl_work *work; 6834 6835 work = container_of(wk, struct ixl_work, ixw_cookie); 6836 6837 atomic_swap_uint(&work->ixw_added, 0); 6838 work->ixw_func(work->ixw_arg); 6839 } 6840 6841 static int 6842 ixl_rx_ctl_read(struct ixl_softc *sc, uint32_t reg, uint32_t *rv) 6843 { 6844 struct ixl_aq_desc iaq; 6845 6846 memset(&iaq, 0, sizeof(iaq)); 6847 iaq.iaq_opcode = htole16(IXL_AQ_OP_RX_CTL_REG_READ); 6848 iaq.iaq_param[1] = htole32(reg); 6849 6850 if (ixl_atq_poll(sc, &iaq, 250) != 0) 6851 return ETIMEDOUT; 6852 6853 switch (htole16(iaq.iaq_retval)) { 6854 case IXL_AQ_RC_OK: 6855 /* success */ 6856 break; 6857 case IXL_AQ_RC_EACCES: 6858 return EPERM; 6859 case IXL_AQ_RC_EAGAIN: 6860 return EAGAIN; 6861 default: 6862 return EIO; 6863 } 6864 6865 *rv = htole32(iaq.iaq_param[3]); 6866 return 0; 6867 } 6868 6869 static uint32_t 6870 ixl_rd_rx_csr(struct ixl_softc *sc, uint32_t reg) 6871 { 6872 uint32_t val; 6873 int rv, retry, retry_limit; 6874 6875 if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RXCTL)) { 6876 retry_limit = 5; 6877 } else { 6878 retry_limit = 0; 6879 } 6880 6881 for (retry = 0; retry < retry_limit; retry++) { 6882 rv = ixl_rx_ctl_read(sc, reg, &val); 6883 if (rv == 0) 6884 return val; 6885 else if (rv == EAGAIN) 6886 delaymsec(1); 6887 else 6888 break; 6889 } 6890 6891 val = ixl_rd(sc, reg); 6892 6893 return val; 6894 } 6895 6896 static int 6897 ixl_rx_ctl_write(struct ixl_softc *sc, uint32_t reg, uint32_t value) 6898 { 6899 struct ixl_aq_desc iaq; 6900 6901 memset(&iaq, 0, sizeof(iaq)); 6902 iaq.iaq_opcode = htole16(IXL_AQ_OP_RX_CTL_REG_WRITE); 6903 iaq.iaq_param[1] = htole32(reg); 6904 iaq.iaq_param[3] = htole32(value); 6905 6906 if (ixl_atq_poll(sc, &iaq, 250) != 0) 6907 return ETIMEDOUT; 6908 6909 switch (htole16(iaq.iaq_retval)) { 6910 case IXL_AQ_RC_OK: 6911 /* success */ 6912 break; 6913 case IXL_AQ_RC_EACCES: 6914 return EPERM; 6915 case IXL_AQ_RC_EAGAIN: 6916 return EAGAIN; 6917 default: 6918 return EIO; 6919 } 6920 6921 return 0; 6922 } 6923 6924 static void 6925 ixl_wr_rx_csr(struct ixl_softc *sc, uint32_t reg, uint32_t value) 6926 { 6927 int rv, retry, retry_limit; 6928 6929 if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RXCTL)) { 6930 retry_limit = 5; 6931 } else { 6932 retry_limit = 0; 6933 } 6934 6935 for (retry = 0; retry < retry_limit; retry++) { 6936 rv = ixl_rx_ctl_write(sc, reg, value); 6937 if (rv == 0) 6938 return; 6939 else if (rv == EAGAIN) 6940 delaymsec(1); 6941 else 6942 break; 6943 } 6944 6945 ixl_wr(sc, reg, value); 6946 } 6947 6948 static int 6949 ixl_nvm_lock(struct ixl_softc *sc, char rw) 6950 { 6951 struct ixl_aq_desc iaq; 6952 struct ixl_aq_req_resource_param *param; 6953 int rv; 6954 6955 if (!ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMLOCK)) 6956 return 0; 6957 6958 memset(&iaq, 0, sizeof(iaq)); 6959 iaq.iaq_opcode = htole16(IXL_AQ_OP_REQUEST_RESOURCE); 6960 6961 param = (struct ixl_aq_req_resource_param *)&iaq.iaq_param; 6962 param->resource_id = htole16(IXL_AQ_RESOURCE_ID_NVM); 6963 if (rw == 'R') { 6964 param->access_type = htole16(IXL_AQ_RESOURCE_ACCES_READ); 6965 } else { 6966 param->access_type = htole16(IXL_AQ_RESOURCE_ACCES_WRITE); 6967 } 6968 6969 rv = ixl_atq_poll(sc, &iaq, 250); 6970 6971 if (rv != 0) 6972 return ETIMEDOUT; 6973 6974 switch (le16toh(iaq.iaq_retval)) { 6975 case IXL_AQ_RC_OK: 6976 break; 6977 case IXL_AQ_RC_EACCES: 6978 return EACCES; 6979 case IXL_AQ_RC_EBUSY: 6980 return EBUSY; 6981 case IXL_AQ_RC_EPERM: 6982 return EPERM; 6983 } 6984 6985 return 0; 6986 } 6987 6988 static int 6989 ixl_nvm_unlock(struct ixl_softc *sc) 6990 { 6991 struct ixl_aq_desc iaq; 6992 struct ixl_aq_rel_resource_param *param; 6993 int rv; 6994 6995 if (!ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMLOCK)) 6996 return 0; 6997 6998 memset(&iaq, 0, sizeof(iaq)); 6999 iaq.iaq_opcode = htole16(IXL_AQ_OP_RELEASE_RESOURCE); 7000 7001 param = (struct ixl_aq_rel_resource_param *)&iaq.iaq_param; 7002 param->resource_id = htole16(IXL_AQ_RESOURCE_ID_NVM); 7003 7004 rv = ixl_atq_poll(sc, &iaq, 250); 7005 7006 if (rv != 0) 7007 return ETIMEDOUT; 7008 7009 switch (le16toh(iaq.iaq_retval)) { 7010 case IXL_AQ_RC_OK: 7011 break; 7012 default: 7013 return EIO; 7014 } 7015 return 0; 7016 } 7017 7018 static int 7019 ixl_srdone_poll(struct ixl_softc *sc) 7020 { 7021 int wait_count; 7022 uint32_t reg; 7023 7024 for (wait_count = 0; wait_count < IXL_SRRD_SRCTL_ATTEMPTS; 7025 wait_count++) { 7026 reg = ixl_rd(sc, I40E_GLNVM_SRCTL); 7027 if (ISSET(reg, I40E_GLNVM_SRCTL_DONE_MASK)) 7028 break; 7029 7030 delaymsec(5); 7031 } 7032 7033 if (wait_count == IXL_SRRD_SRCTL_ATTEMPTS) 7034 return -1; 7035 7036 return 0; 7037 } 7038 7039 static int 7040 ixl_nvm_read_srctl(struct ixl_softc *sc, uint16_t offset, uint16_t *data) 7041 { 7042 uint32_t reg; 7043 7044 if (ixl_srdone_poll(sc) != 0) 7045 return ETIMEDOUT; 7046 7047 reg = ((uint32_t)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) | 7048 __BIT(I40E_GLNVM_SRCTL_START_SHIFT); 7049 ixl_wr(sc, I40E_GLNVM_SRCTL, reg); 7050 7051 if (ixl_srdone_poll(sc) != 0) { 7052 aprint_debug("NVM read error: couldn't access " 7053 "Shadow RAM address: 0x%x\n", offset); 7054 return ETIMEDOUT; 7055 } 7056 7057 reg = ixl_rd(sc, I40E_GLNVM_SRDATA); 7058 *data = (uint16_t)__SHIFTOUT(reg, I40E_GLNVM_SRDATA_RDDATA_MASK); 7059 7060 return 0; 7061 } 7062 7063 static int 7064 ixl_nvm_read_aq(struct ixl_softc *sc, uint16_t offset_word, 7065 void *data, size_t len) 7066 { 7067 struct ixl_dmamem *idm; 7068 struct ixl_aq_desc iaq; 7069 struct ixl_aq_nvm_param *param; 7070 uint32_t offset_bytes; 7071 int rv; 7072 7073 idm = &sc->sc_aqbuf; 7074 if (len > IXL_DMA_LEN(idm)) 7075 return ENOMEM; 7076 7077 memset(IXL_DMA_KVA(idm), 0, IXL_DMA_LEN(idm)); 7078 memset(&iaq, 0, sizeof(iaq)); 7079 iaq.iaq_opcode = htole16(IXL_AQ_OP_NVM_READ); 7080 iaq.iaq_flags = htole16(IXL_AQ_BUF | 7081 ((len > I40E_AQ_LARGE_BUF) ? IXL_AQ_LB : 0)); 7082 iaq.iaq_datalen = htole16(len); 7083 ixl_aq_dva(&iaq, IXL_DMA_DVA(idm)); 7084 7085 param = (struct ixl_aq_nvm_param *)iaq.iaq_param; 7086 param->command_flags = IXL_AQ_NVM_LAST_CMD; 7087 param->module_pointer = 0; 7088 param->length = htole16(len); 7089 offset_bytes = (uint32_t)offset_word * 2; 7090 offset_bytes &= 0x00FFFFFF; 7091 param->offset = htole32(offset_bytes); 7092 7093 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm), 7094 BUS_DMASYNC_PREREAD); 7095 7096 rv = ixl_atq_poll(sc, &iaq, 250); 7097 7098 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm), 7099 BUS_DMASYNC_POSTREAD); 7100 7101 if (rv != 0) { 7102 return ETIMEDOUT; 7103 } 7104 7105 switch (le16toh(iaq.iaq_retval)) { 7106 case IXL_AQ_RC_OK: 7107 break; 7108 case IXL_AQ_RC_EPERM: 7109 return EPERM; 7110 case IXL_AQ_RC_EINVAL: 7111 return EINVAL; 7112 case IXL_AQ_RC_EBUSY: 7113 return EBUSY; 7114 case IXL_AQ_RC_EIO: 7115 default: 7116 return EIO; 7117 } 7118 7119 memcpy(data, IXL_DMA_KVA(idm), len); 7120 7121 return 0; 7122 } 7123 7124 static int 7125 ixl_rd16_nvm(struct ixl_softc *sc, uint16_t offset, uint16_t *data) 7126 { 7127 int error; 7128 uint16_t buf; 7129 7130 error = ixl_nvm_lock(sc, 'R'); 7131 if (error) 7132 return error; 7133 7134 if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMREAD)) { 7135 error = ixl_nvm_read_aq(sc, offset, 7136 &buf, sizeof(buf)); 7137 if (error == 0) 7138 *data = le16toh(buf); 7139 } else { 7140 error = ixl_nvm_read_srctl(sc, offset, &buf); 7141 if (error == 0) 7142 *data = buf; 7143 } 7144 7145 ixl_nvm_unlock(sc); 7146 7147 return error; 7148 } 7149 7150 MODULE(MODULE_CLASS_DRIVER, if_ixl, "pci"); 7151 7152 #ifdef _MODULE 7153 #include "ioconf.c" 7154 #endif 7155 7156 #ifdef _MODULE 7157 static void 7158 ixl_parse_modprop(prop_dictionary_t dict) 7159 { 7160 prop_object_t obj; 7161 int64_t val; 7162 uint64_t uval; 7163 7164 if (dict == NULL) 7165 return; 7166 7167 obj = prop_dictionary_get(dict, "nomsix"); 7168 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_BOOL) { 7169 ixl_param_nomsix = prop_bool_true((prop_bool_t)obj); 7170 } 7171 7172 obj = prop_dictionary_get(dict, "stats_interval"); 7173 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) { 7174 val = prop_number_signed_value((prop_number_t)obj); 7175 7176 /* the range has no reason */ 7177 if (100 < val && val < 180000) { 7178 ixl_param_stats_interval = val; 7179 } 7180 } 7181 7182 obj = prop_dictionary_get(dict, "nqps_limit"); 7183 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) { 7184 val = prop_number_signed_value((prop_number_t)obj); 7185 7186 if (val <= INT32_MAX) 7187 ixl_param_nqps_limit = val; 7188 } 7189 7190 obj = prop_dictionary_get(dict, "rx_ndescs"); 7191 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) { 7192 uval = prop_number_unsigned_integer_value((prop_number_t)obj); 7193 7194 if (uval > 8) 7195 ixl_param_rx_ndescs = uval; 7196 } 7197 7198 obj = prop_dictionary_get(dict, "tx_ndescs"); 7199 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) { 7200 uval = prop_number_unsigned_integer_value((prop_number_t)obj); 7201 7202 if (uval > IXL_TX_PKT_DESCS) 7203 ixl_param_tx_ndescs = uval; 7204 } 7205 7206 } 7207 #endif 7208 7209 static int 7210 if_ixl_modcmd(modcmd_t cmd, void *opaque) 7211 { 7212 int error = 0; 7213 7214 #ifdef _MODULE 7215 switch (cmd) { 7216 case MODULE_CMD_INIT: 7217 ixl_parse_modprop((prop_dictionary_t)opaque); 7218 error = config_init_component(cfdriver_ioconf_if_ixl, 7219 cfattach_ioconf_if_ixl, cfdata_ioconf_if_ixl); 7220 break; 7221 case MODULE_CMD_FINI: 7222 error = config_fini_component(cfdriver_ioconf_if_ixl, 7223 cfattach_ioconf_if_ixl, cfdata_ioconf_if_ixl); 7224 break; 7225 default: 7226 error = ENOTTY; 7227 break; 7228 } 7229 #endif 7230 7231 return error; 7232 } 7233