1 /* $NetBSD: if_ixl.c,v 1.65 2020/03/19 03:11:23 yamaguchi Exp $ */ 2 3 /* 4 * Copyright (c) 2013-2015, Intel Corporation 5 * All rights reserved. 6 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions are met: 9 * 10 * 1. Redistributions of source code must retain the above copyright notice, 11 * this list of conditions and the following disclaimer. 12 * 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * 3. Neither the name of the Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived from 19 * this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 /* 35 * Copyright (c) 2016,2017 David Gwynne <dlg@openbsd.org> 36 * 37 * Permission to use, copy, modify, and distribute this software for any 38 * purpose with or without fee is hereby granted, provided that the above 39 * copyright notice and this permission notice appear in all copies. 40 * 41 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 42 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 43 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 44 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 45 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 46 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 47 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 48 */ 49 50 /* 51 * Copyright (c) 2019 Internet Initiative Japan, Inc. 52 * All rights reserved. 53 * 54 * Redistribution and use in source and binary forms, with or without 55 * modification, are permitted provided that the following conditions 56 * are met: 57 * 1. Redistributions of source code must retain the above copyright 58 * notice, this list of conditions and the following disclaimer. 59 * 2. Redistributions in binary form must reproduce the above copyright 60 * notice, this list of conditions and the following disclaimer in the 61 * documentation and/or other materials provided with the distribution. 62 * 63 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 64 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 65 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 66 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 67 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 68 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 69 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 70 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 71 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 72 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 73 * POSSIBILITY OF SUCH DAMAGE. 74 */ 75 76 #include <sys/cdefs.h> 77 __KERNEL_RCSID(0, "$NetBSD: if_ixl.c,v 1.65 2020/03/19 03:11:23 yamaguchi Exp $"); 78 79 #ifdef _KERNEL_OPT 80 #include "opt_net_mpsafe.h" 81 #include "opt_if_ixl.h" 82 #endif 83 84 #include <sys/param.h> 85 #include <sys/types.h> 86 87 #include <sys/cpu.h> 88 #include <sys/device.h> 89 #include <sys/evcnt.h> 90 #include <sys/interrupt.h> 91 #include <sys/kmem.h> 92 #include <sys/module.h> 93 #include <sys/mutex.h> 94 #include <sys/pcq.h> 95 #include <sys/syslog.h> 96 #include <sys/workqueue.h> 97 98 #include <sys/bus.h> 99 100 #include <net/bpf.h> 101 #include <net/if.h> 102 #include <net/if_dl.h> 103 #include <net/if_media.h> 104 #include <net/if_ether.h> 105 #include <net/rss_config.h> 106 107 #include <netinet/tcp.h> /* for struct tcphdr */ 108 #include <netinet/udp.h> /* for struct udphdr */ 109 110 #include <dev/pci/pcivar.h> 111 #include <dev/pci/pcidevs.h> 112 113 #include <dev/pci/if_ixlreg.h> 114 #include <dev/pci/if_ixlvar.h> 115 116 #include <prop/proplib.h> 117 118 struct ixl_softc; /* defined */ 119 120 #define I40E_PF_RESET_WAIT_COUNT 200 121 #define I40E_AQ_LARGE_BUF 512 122 123 /* bitfields for Tx queue mapping in QTX_CTL */ 124 #define I40E_QTX_CTL_VF_QUEUE 0x0 125 #define I40E_QTX_CTL_VM_QUEUE 0x1 126 #define I40E_QTX_CTL_PF_QUEUE 0x2 127 128 #define I40E_QUEUE_TYPE_EOL 0x7ff 129 #define I40E_INTR_NOTX_QUEUE 0 130 131 #define I40E_QUEUE_TYPE_RX 0x0 132 #define I40E_QUEUE_TYPE_TX 0x1 133 #define I40E_QUEUE_TYPE_PE_CEQ 0x2 134 #define I40E_QUEUE_TYPE_UNKNOWN 0x3 135 136 #define I40E_ITR_INDEX_RX 0x0 137 #define I40E_ITR_INDEX_TX 0x1 138 #define I40E_ITR_INDEX_OTHER 0x2 139 #define I40E_ITR_INDEX_NONE 0x3 140 #define IXL_ITR_RX 0x7a /* 4K intrs/sec */ 141 #define IXL_ITR_TX 0x7a /* 4K intrs/sec */ 142 143 #define I40E_INTR_NOTX_QUEUE 0 144 #define I40E_INTR_NOTX_INTR 0 145 #define I40E_INTR_NOTX_RX_QUEUE 0 146 #define I40E_INTR_NOTX_TX_QUEUE 1 147 #define I40E_INTR_NOTX_RX_MASK I40E_PFINT_ICR0_QUEUE_0_MASK 148 #define I40E_INTR_NOTX_TX_MASK I40E_PFINT_ICR0_QUEUE_1_MASK 149 150 #define BIT_ULL(a) (1ULL << (a)) 151 #define IXL_RSS_HENA_DEFAULT_BASE \ 152 (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \ 153 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \ 154 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \ 155 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \ 156 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \ 157 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \ 158 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \ 159 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \ 160 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \ 161 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \ 162 BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD)) 163 #define IXL_RSS_HENA_DEFAULT_XL710 IXL_RSS_HENA_DEFAULT_BASE 164 #define IXL_RSS_HENA_DEFAULT_X722 (IXL_RSS_HENA_DEFAULT_XL710 | \ 165 BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \ 166 BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \ 167 BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \ 168 BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) | \ 169 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \ 170 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK)) 171 #define I40E_HASH_LUT_SIZE_128 0 172 #define IXL_RSS_KEY_SIZE_REG 13 173 174 #define IXL_ICR0_CRIT_ERR_MASK \ 175 (I40E_PFINT_ICR0_PCI_EXCEPTION_MASK | \ 176 I40E_PFINT_ICR0_ECC_ERR_MASK | \ 177 I40E_PFINT_ICR0_PE_CRITERR_MASK) 178 179 #define IXL_QUEUE_MAX_XL710 64 180 #define IXL_QUEUE_MAX_X722 128 181 182 #define IXL_TX_PKT_DESCS 8 183 #define IXL_TX_PKT_MAXSIZE (MCLBYTES * IXL_TX_PKT_DESCS) 184 #define IXL_TX_QUEUE_ALIGN 128 185 #define IXL_RX_QUEUE_ALIGN 128 186 187 #define IXL_MCLBYTES (MCLBYTES - ETHER_ALIGN) 188 #define IXL_MTU_ETHERLEN ETHER_HDR_LEN \ 189 + ETHER_CRC_LEN 190 #if 0 191 #define IXL_MAX_MTU (9728 - IXL_MTU_ETHERLEN) 192 #else 193 /* (dbuff * 5) - ETHER_HDR_LEN - ETHER_CRC_LEN */ 194 #define IXL_MAX_MTU (9600 - IXL_MTU_ETHERLEN) 195 #endif 196 #define IXL_MIN_MTU (ETHER_MIN_LEN - ETHER_CRC_LEN) 197 198 #define IXL_PCIREG PCI_MAPREG_START 199 200 #define IXL_ITR0 0x0 201 #define IXL_ITR1 0x1 202 #define IXL_ITR2 0x2 203 #define IXL_NOITR 0x3 204 205 #define IXL_AQ_NUM 256 206 #define IXL_AQ_MASK (IXL_AQ_NUM - 1) 207 #define IXL_AQ_ALIGN 64 /* lol */ 208 #define IXL_AQ_BUFLEN 4096 209 210 #define IXL_HMC_ROUNDUP 512 211 #define IXL_HMC_PGSIZE 4096 212 #define IXL_HMC_DVASZ sizeof(uint64_t) 213 #define IXL_HMC_PGS (IXL_HMC_PGSIZE / IXL_HMC_DVASZ) 214 #define IXL_HMC_L2SZ (IXL_HMC_PGSIZE * IXL_HMC_PGS) 215 #define IXL_HMC_PDVALID 1ULL 216 217 #define IXL_ATQ_EXEC_TIMEOUT (10 * hz) 218 219 #define IXL_SRRD_SRCTL_ATTEMPTS 100000 220 221 struct ixl_aq_regs { 222 bus_size_t atq_tail; 223 bus_size_t atq_head; 224 bus_size_t atq_len; 225 bus_size_t atq_bal; 226 bus_size_t atq_bah; 227 228 bus_size_t arq_tail; 229 bus_size_t arq_head; 230 bus_size_t arq_len; 231 bus_size_t arq_bal; 232 bus_size_t arq_bah; 233 234 uint32_t atq_len_enable; 235 uint32_t atq_tail_mask; 236 uint32_t atq_head_mask; 237 238 uint32_t arq_len_enable; 239 uint32_t arq_tail_mask; 240 uint32_t arq_head_mask; 241 }; 242 243 struct ixl_phy_type { 244 uint64_t phy_type; 245 uint64_t ifm_type; 246 }; 247 248 struct ixl_speed_type { 249 uint8_t dev_speed; 250 uint64_t net_speed; 251 }; 252 253 struct ixl_aq_buf { 254 SIMPLEQ_ENTRY(ixl_aq_buf) 255 aqb_entry; 256 void *aqb_data; 257 bus_dmamap_t aqb_map; 258 bus_dma_segment_t aqb_seg; 259 size_t aqb_size; 260 int aqb_nsegs; 261 }; 262 SIMPLEQ_HEAD(ixl_aq_bufs, ixl_aq_buf); 263 264 struct ixl_dmamem { 265 bus_dmamap_t ixm_map; 266 bus_dma_segment_t ixm_seg; 267 int ixm_nsegs; 268 size_t ixm_size; 269 void *ixm_kva; 270 }; 271 272 #define IXL_DMA_MAP(_ixm) ((_ixm)->ixm_map) 273 #define IXL_DMA_DVA(_ixm) ((_ixm)->ixm_map->dm_segs[0].ds_addr) 274 #define IXL_DMA_KVA(_ixm) ((void *)(_ixm)->ixm_kva) 275 #define IXL_DMA_LEN(_ixm) ((_ixm)->ixm_size) 276 277 struct ixl_hmc_entry { 278 uint64_t hmc_base; 279 uint32_t hmc_count; 280 uint64_t hmc_size; 281 }; 282 283 enum ixl_hmc_types { 284 IXL_HMC_LAN_TX = 0, 285 IXL_HMC_LAN_RX, 286 IXL_HMC_FCOE_CTX, 287 IXL_HMC_FCOE_FILTER, 288 IXL_HMC_COUNT 289 }; 290 291 struct ixl_hmc_pack { 292 uint16_t offset; 293 uint16_t width; 294 uint16_t lsb; 295 }; 296 297 /* 298 * these hmc objects have weird sizes and alignments, so these are abstract 299 * representations of them that are nice for c to populate. 300 * 301 * the packing code relies on little-endian values being stored in the fields, 302 * no high bits in the fields being set, and the fields must be packed in the 303 * same order as they are in the ctx structure. 304 */ 305 306 struct ixl_hmc_rxq { 307 uint16_t head; 308 uint8_t cpuid; 309 uint64_t base; 310 #define IXL_HMC_RXQ_BASE_UNIT 128 311 uint16_t qlen; 312 uint16_t dbuff; 313 #define IXL_HMC_RXQ_DBUFF_UNIT 128 314 uint8_t hbuff; 315 #define IXL_HMC_RXQ_HBUFF_UNIT 64 316 uint8_t dtype; 317 #define IXL_HMC_RXQ_DTYPE_NOSPLIT 0x0 318 #define IXL_HMC_RXQ_DTYPE_HSPLIT 0x1 319 #define IXL_HMC_RXQ_DTYPE_SPLIT_ALWAYS 0x2 320 uint8_t dsize; 321 #define IXL_HMC_RXQ_DSIZE_16 0 322 #define IXL_HMC_RXQ_DSIZE_32 1 323 uint8_t crcstrip; 324 uint8_t fc_ena; 325 uint8_t l2sel; 326 uint8_t hsplit_0; 327 uint8_t hsplit_1; 328 uint8_t showiv; 329 uint16_t rxmax; 330 uint8_t tphrdesc_ena; 331 uint8_t tphwdesc_ena; 332 uint8_t tphdata_ena; 333 uint8_t tphhead_ena; 334 uint8_t lrxqthresh; 335 uint8_t prefena; 336 }; 337 338 static const struct ixl_hmc_pack ixl_hmc_pack_rxq[] = { 339 { offsetof(struct ixl_hmc_rxq, head), 13, 0 }, 340 { offsetof(struct ixl_hmc_rxq, cpuid), 8, 13 }, 341 { offsetof(struct ixl_hmc_rxq, base), 57, 32 }, 342 { offsetof(struct ixl_hmc_rxq, qlen), 13, 89 }, 343 { offsetof(struct ixl_hmc_rxq, dbuff), 7, 102 }, 344 { offsetof(struct ixl_hmc_rxq, hbuff), 5, 109 }, 345 { offsetof(struct ixl_hmc_rxq, dtype), 2, 114 }, 346 { offsetof(struct ixl_hmc_rxq, dsize), 1, 116 }, 347 { offsetof(struct ixl_hmc_rxq, crcstrip), 1, 117 }, 348 { offsetof(struct ixl_hmc_rxq, fc_ena), 1, 118 }, 349 { offsetof(struct ixl_hmc_rxq, l2sel), 1, 119 }, 350 { offsetof(struct ixl_hmc_rxq, hsplit_0), 4, 120 }, 351 { offsetof(struct ixl_hmc_rxq, hsplit_1), 2, 124 }, 352 { offsetof(struct ixl_hmc_rxq, showiv), 1, 127 }, 353 { offsetof(struct ixl_hmc_rxq, rxmax), 14, 174 }, 354 { offsetof(struct ixl_hmc_rxq, tphrdesc_ena), 1, 193 }, 355 { offsetof(struct ixl_hmc_rxq, tphwdesc_ena), 1, 194 }, 356 { offsetof(struct ixl_hmc_rxq, tphdata_ena), 1, 195 }, 357 { offsetof(struct ixl_hmc_rxq, tphhead_ena), 1, 196 }, 358 { offsetof(struct ixl_hmc_rxq, lrxqthresh), 3, 198 }, 359 { offsetof(struct ixl_hmc_rxq, prefena), 1, 201 }, 360 }; 361 362 #define IXL_HMC_RXQ_MINSIZE (201 + 1) 363 364 struct ixl_hmc_txq { 365 uint16_t head; 366 uint8_t new_context; 367 uint64_t base; 368 #define IXL_HMC_TXQ_BASE_UNIT 128 369 uint8_t fc_ena; 370 uint8_t timesync_ena; 371 uint8_t fd_ena; 372 uint8_t alt_vlan_ena; 373 uint8_t cpuid; 374 uint16_t thead_wb; 375 uint8_t head_wb_ena; 376 #define IXL_HMC_TXQ_DESC_WB 0 377 #define IXL_HMC_TXQ_HEAD_WB 1 378 uint16_t qlen; 379 uint8_t tphrdesc_ena; 380 uint8_t tphrpacket_ena; 381 uint8_t tphwdesc_ena; 382 uint64_t head_wb_addr; 383 uint32_t crc; 384 uint16_t rdylist; 385 uint8_t rdylist_act; 386 }; 387 388 static const struct ixl_hmc_pack ixl_hmc_pack_txq[] = { 389 { offsetof(struct ixl_hmc_txq, head), 13, 0 }, 390 { offsetof(struct ixl_hmc_txq, new_context), 1, 30 }, 391 { offsetof(struct ixl_hmc_txq, base), 57, 32 }, 392 { offsetof(struct ixl_hmc_txq, fc_ena), 1, 89 }, 393 { offsetof(struct ixl_hmc_txq, timesync_ena), 1, 90 }, 394 { offsetof(struct ixl_hmc_txq, fd_ena), 1, 91 }, 395 { offsetof(struct ixl_hmc_txq, alt_vlan_ena), 1, 92 }, 396 { offsetof(struct ixl_hmc_txq, cpuid), 8, 96 }, 397 /* line 1 */ 398 { offsetof(struct ixl_hmc_txq, thead_wb), 13, 0 + 128 }, 399 { offsetof(struct ixl_hmc_txq, head_wb_ena), 1, 32 + 128 }, 400 { offsetof(struct ixl_hmc_txq, qlen), 13, 33 + 128 }, 401 { offsetof(struct ixl_hmc_txq, tphrdesc_ena), 1, 46 + 128 }, 402 { offsetof(struct ixl_hmc_txq, tphrpacket_ena), 1, 47 + 128 }, 403 { offsetof(struct ixl_hmc_txq, tphwdesc_ena), 1, 48 + 128 }, 404 { offsetof(struct ixl_hmc_txq, head_wb_addr), 64, 64 + 128 }, 405 /* line 7 */ 406 { offsetof(struct ixl_hmc_txq, crc), 32, 0 + (7*128) }, 407 { offsetof(struct ixl_hmc_txq, rdylist), 10, 84 + (7*128) }, 408 { offsetof(struct ixl_hmc_txq, rdylist_act), 1, 94 + (7*128) }, 409 }; 410 411 #define IXL_HMC_TXQ_MINSIZE (94 + (7*128) + 1) 412 413 struct ixl_work { 414 struct work ixw_cookie; 415 void (*ixw_func)(void *); 416 void *ixw_arg; 417 unsigned int ixw_added; 418 }; 419 #define IXL_WORKQUEUE_PRI PRI_SOFTNET 420 421 struct ixl_tx_map { 422 struct mbuf *txm_m; 423 bus_dmamap_t txm_map; 424 unsigned int txm_eop; 425 }; 426 427 struct ixl_tx_ring { 428 kmutex_t txr_lock; 429 struct ixl_softc *txr_sc; 430 431 unsigned int txr_prod; 432 unsigned int txr_cons; 433 434 struct ixl_tx_map *txr_maps; 435 struct ixl_dmamem txr_mem; 436 437 bus_size_t txr_tail; 438 unsigned int txr_qid; 439 pcq_t *txr_intrq; 440 void *txr_si; 441 442 struct evcnt txr_defragged; 443 struct evcnt txr_defrag_failed; 444 struct evcnt txr_pcqdrop; 445 struct evcnt txr_transmitdef; 446 struct evcnt txr_intr; 447 struct evcnt txr_defer; 448 }; 449 450 struct ixl_rx_map { 451 struct mbuf *rxm_m; 452 bus_dmamap_t rxm_map; 453 }; 454 455 struct ixl_rx_ring { 456 kmutex_t rxr_lock; 457 458 unsigned int rxr_prod; 459 unsigned int rxr_cons; 460 461 struct ixl_rx_map *rxr_maps; 462 struct ixl_dmamem rxr_mem; 463 464 struct mbuf *rxr_m_head; 465 struct mbuf **rxr_m_tail; 466 467 bus_size_t rxr_tail; 468 unsigned int rxr_qid; 469 470 struct evcnt rxr_mgethdr_failed; 471 struct evcnt rxr_mgetcl_failed; 472 struct evcnt rxr_mbuf_load_failed; 473 struct evcnt rxr_intr; 474 struct evcnt rxr_defer; 475 }; 476 477 struct ixl_queue_pair { 478 struct ixl_softc *qp_sc; 479 struct ixl_tx_ring *qp_txr; 480 struct ixl_rx_ring *qp_rxr; 481 482 char qp_name[16]; 483 484 void *qp_si; 485 struct work qp_work; 486 bool qp_workqueue; 487 }; 488 489 struct ixl_atq { 490 struct ixl_aq_desc iatq_desc; 491 void (*iatq_fn)(struct ixl_softc *, 492 const struct ixl_aq_desc *); 493 }; 494 SIMPLEQ_HEAD(ixl_atq_list, ixl_atq); 495 496 struct ixl_product { 497 unsigned int vendor_id; 498 unsigned int product_id; 499 }; 500 501 struct ixl_stats_counters { 502 bool isc_has_offset; 503 struct evcnt isc_crc_errors; 504 uint64_t isc_crc_errors_offset; 505 struct evcnt isc_illegal_bytes; 506 uint64_t isc_illegal_bytes_offset; 507 struct evcnt isc_rx_bytes; 508 uint64_t isc_rx_bytes_offset; 509 struct evcnt isc_rx_discards; 510 uint64_t isc_rx_discards_offset; 511 struct evcnt isc_rx_unicast; 512 uint64_t isc_rx_unicast_offset; 513 struct evcnt isc_rx_multicast; 514 uint64_t isc_rx_multicast_offset; 515 struct evcnt isc_rx_broadcast; 516 uint64_t isc_rx_broadcast_offset; 517 struct evcnt isc_rx_size_64; 518 uint64_t isc_rx_size_64_offset; 519 struct evcnt isc_rx_size_127; 520 uint64_t isc_rx_size_127_offset; 521 struct evcnt isc_rx_size_255; 522 uint64_t isc_rx_size_255_offset; 523 struct evcnt isc_rx_size_511; 524 uint64_t isc_rx_size_511_offset; 525 struct evcnt isc_rx_size_1023; 526 uint64_t isc_rx_size_1023_offset; 527 struct evcnt isc_rx_size_1522; 528 uint64_t isc_rx_size_1522_offset; 529 struct evcnt isc_rx_size_big; 530 uint64_t isc_rx_size_big_offset; 531 struct evcnt isc_rx_undersize; 532 uint64_t isc_rx_undersize_offset; 533 struct evcnt isc_rx_oversize; 534 uint64_t isc_rx_oversize_offset; 535 struct evcnt isc_rx_fragments; 536 uint64_t isc_rx_fragments_offset; 537 struct evcnt isc_rx_jabber; 538 uint64_t isc_rx_jabber_offset; 539 struct evcnt isc_tx_bytes; 540 uint64_t isc_tx_bytes_offset; 541 struct evcnt isc_tx_dropped_link_down; 542 uint64_t isc_tx_dropped_link_down_offset; 543 struct evcnt isc_tx_unicast; 544 uint64_t isc_tx_unicast_offset; 545 struct evcnt isc_tx_multicast; 546 uint64_t isc_tx_multicast_offset; 547 struct evcnt isc_tx_broadcast; 548 uint64_t isc_tx_broadcast_offset; 549 struct evcnt isc_tx_size_64; 550 uint64_t isc_tx_size_64_offset; 551 struct evcnt isc_tx_size_127; 552 uint64_t isc_tx_size_127_offset; 553 struct evcnt isc_tx_size_255; 554 uint64_t isc_tx_size_255_offset; 555 struct evcnt isc_tx_size_511; 556 uint64_t isc_tx_size_511_offset; 557 struct evcnt isc_tx_size_1023; 558 uint64_t isc_tx_size_1023_offset; 559 struct evcnt isc_tx_size_1522; 560 uint64_t isc_tx_size_1522_offset; 561 struct evcnt isc_tx_size_big; 562 uint64_t isc_tx_size_big_offset; 563 struct evcnt isc_mac_local_faults; 564 uint64_t isc_mac_local_faults_offset; 565 struct evcnt isc_mac_remote_faults; 566 uint64_t isc_mac_remote_faults_offset; 567 struct evcnt isc_link_xon_rx; 568 uint64_t isc_link_xon_rx_offset; 569 struct evcnt isc_link_xon_tx; 570 uint64_t isc_link_xon_tx_offset; 571 struct evcnt isc_link_xoff_rx; 572 uint64_t isc_link_xoff_rx_offset; 573 struct evcnt isc_link_xoff_tx; 574 uint64_t isc_link_xoff_tx_offset; 575 struct evcnt isc_vsi_rx_discards; 576 uint64_t isc_vsi_rx_discards_offset; 577 struct evcnt isc_vsi_rx_bytes; 578 uint64_t isc_vsi_rx_bytes_offset; 579 struct evcnt isc_vsi_rx_unicast; 580 uint64_t isc_vsi_rx_unicast_offset; 581 struct evcnt isc_vsi_rx_multicast; 582 uint64_t isc_vsi_rx_multicast_offset; 583 struct evcnt isc_vsi_rx_broadcast; 584 uint64_t isc_vsi_rx_broadcast_offset; 585 struct evcnt isc_vsi_tx_errors; 586 uint64_t isc_vsi_tx_errors_offset; 587 struct evcnt isc_vsi_tx_bytes; 588 uint64_t isc_vsi_tx_bytes_offset; 589 struct evcnt isc_vsi_tx_unicast; 590 uint64_t isc_vsi_tx_unicast_offset; 591 struct evcnt isc_vsi_tx_multicast; 592 uint64_t isc_vsi_tx_multicast_offset; 593 struct evcnt isc_vsi_tx_broadcast; 594 uint64_t isc_vsi_tx_broadcast_offset; 595 }; 596 597 /* 598 * Locking notes: 599 * + a field in ixl_tx_ring is protected by txr_lock (a spin mutex), and 600 * a field in ixl_rx_ring is protected by rxr_lock (a spin mutex). 601 * - more than one lock of them cannot be held at once. 602 * + a field named sc_atq_* in ixl_softc is protected by sc_atq_lock 603 * (a spin mutex). 604 * - the lock cannot held with txr_lock or rxr_lock. 605 * + a field named sc_arq_* is not protected by any lock. 606 * - operations for sc_arq_* is done in one context related to 607 * sc_arq_task. 608 * + other fields in ixl_softc is protected by sc_cfg_lock 609 * (an adaptive mutex) 610 * - It must be held before another lock is held, and It can be 611 * released after the other lock is released. 612 * */ 613 614 struct ixl_softc { 615 device_t sc_dev; 616 struct ethercom sc_ec; 617 bool sc_attached; 618 bool sc_dead; 619 uint32_t sc_port; 620 struct sysctllog *sc_sysctllog; 621 struct workqueue *sc_workq; 622 struct workqueue *sc_workq_txrx; 623 int sc_stats_intval; 624 callout_t sc_stats_callout; 625 struct ixl_work sc_stats_task; 626 struct ixl_stats_counters 627 sc_stats_counters; 628 uint8_t sc_enaddr[ETHER_ADDR_LEN]; 629 struct ifmedia sc_media; 630 uint64_t sc_media_status; 631 uint64_t sc_media_active; 632 uint64_t sc_phy_types; 633 uint8_t sc_phy_abilities; 634 uint8_t sc_phy_linkspeed; 635 uint8_t sc_phy_fec_cfg; 636 uint16_t sc_eee_cap; 637 uint32_t sc_eeer_val; 638 uint8_t sc_d3_lpan; 639 kmutex_t sc_cfg_lock; 640 enum i40e_mac_type sc_mac_type; 641 uint32_t sc_rss_table_size; 642 uint32_t sc_rss_table_entry_width; 643 bool sc_txrx_workqueue; 644 u_int sc_tx_process_limit; 645 u_int sc_rx_process_limit; 646 u_int sc_tx_intr_process_limit; 647 u_int sc_rx_intr_process_limit; 648 649 int sc_cur_ec_capenable; 650 651 struct pci_attach_args sc_pa; 652 pci_intr_handle_t *sc_ihp; 653 void **sc_ihs; 654 unsigned int sc_nintrs; 655 656 bus_dma_tag_t sc_dmat; 657 bus_space_tag_t sc_memt; 658 bus_space_handle_t sc_memh; 659 bus_size_t sc_mems; 660 661 uint8_t sc_pf_id; 662 uint16_t sc_uplink_seid; /* le */ 663 uint16_t sc_downlink_seid; /* le */ 664 uint16_t sc_vsi_number; 665 uint16_t sc_vsi_stat_counter_idx; 666 uint16_t sc_seid; 667 unsigned int sc_base_queue; 668 669 pci_intr_type_t sc_intrtype; 670 unsigned int sc_msix_vector_queue; 671 672 struct ixl_dmamem sc_scratch; 673 struct ixl_dmamem sc_aqbuf; 674 675 const struct ixl_aq_regs * 676 sc_aq_regs; 677 uint32_t sc_aq_flags; 678 #define IXL_SC_AQ_FLAG_RXCTL __BIT(0) 679 #define IXL_SC_AQ_FLAG_NVMLOCK __BIT(1) 680 #define IXL_SC_AQ_FLAG_NVMREAD __BIT(2) 681 #define IXL_SC_AQ_FLAG_RSS __BIT(3) 682 683 kmutex_t sc_atq_lock; 684 kcondvar_t sc_atq_cv; 685 struct ixl_dmamem sc_atq; 686 unsigned int sc_atq_prod; 687 unsigned int sc_atq_cons; 688 689 struct ixl_dmamem sc_arq; 690 struct ixl_work sc_arq_task; 691 struct ixl_aq_bufs sc_arq_idle; 692 struct ixl_aq_buf *sc_arq_live[IXL_AQ_NUM]; 693 unsigned int sc_arq_prod; 694 unsigned int sc_arq_cons; 695 696 struct ixl_work sc_link_state_task; 697 struct ixl_atq sc_link_state_atq; 698 699 struct ixl_dmamem sc_hmc_sd; 700 struct ixl_dmamem sc_hmc_pd; 701 struct ixl_hmc_entry sc_hmc_entries[IXL_HMC_COUNT]; 702 703 unsigned int sc_tx_ring_ndescs; 704 unsigned int sc_rx_ring_ndescs; 705 unsigned int sc_nqueue_pairs; 706 unsigned int sc_nqueue_pairs_max; 707 unsigned int sc_nqueue_pairs_device; 708 struct ixl_queue_pair *sc_qps; 709 uint32_t sc_itr_rx; 710 uint32_t sc_itr_tx; 711 712 struct evcnt sc_event_atq; 713 struct evcnt sc_event_link; 714 struct evcnt sc_event_ecc_err; 715 struct evcnt sc_event_pci_exception; 716 struct evcnt sc_event_crit_err; 717 }; 718 719 #define IXL_TXRX_PROCESS_UNLIMIT UINT_MAX 720 #define IXL_TX_PROCESS_LIMIT 256 721 #define IXL_RX_PROCESS_LIMIT 256 722 #define IXL_TX_INTR_PROCESS_LIMIT 256 723 #define IXL_RX_INTR_PROCESS_LIMIT 0U 724 725 #define IXL_IFCAP_RXCSUM (IFCAP_CSUM_IPv4_Rx | \ 726 IFCAP_CSUM_TCPv4_Rx | \ 727 IFCAP_CSUM_UDPv4_Rx | \ 728 IFCAP_CSUM_TCPv6_Rx | \ 729 IFCAP_CSUM_UDPv6_Rx) 730 #define IXL_IFCAP_TXCSUM (IFCAP_CSUM_IPv4_Tx | \ 731 IFCAP_CSUM_TCPv4_Tx | \ 732 IFCAP_CSUM_UDPv4_Tx | \ 733 IFCAP_CSUM_TCPv6_Tx | \ 734 IFCAP_CSUM_UDPv6_Tx) 735 #define IXL_CSUM_ALL_OFFLOAD (M_CSUM_IPv4 | \ 736 M_CSUM_TCPv4 | M_CSUM_TCPv6 | \ 737 M_CSUM_UDPv4 | M_CSUM_UDPv6) 738 739 #define delaymsec(_x) DELAY(1000 * (_x)) 740 #ifdef IXL_DEBUG 741 #define DDPRINTF(sc, fmt, args...) \ 742 do { \ 743 if ((sc) != NULL) { \ 744 device_printf( \ 745 ((struct ixl_softc *)(sc))->sc_dev, \ 746 ""); \ 747 } \ 748 printf("%s:\t" fmt, __func__, ##args); \ 749 } while (0) 750 #else 751 #define DDPRINTF(sc, fmt, args...) __nothing 752 #endif 753 #ifndef IXL_STATS_INTERVAL_MSEC 754 #define IXL_STATS_INTERVAL_MSEC 10000 755 #endif 756 #ifndef IXL_QUEUE_NUM 757 #define IXL_QUEUE_NUM 0 758 #endif 759 760 static bool ixl_param_nomsix = false; 761 static int ixl_param_stats_interval = IXL_STATS_INTERVAL_MSEC; 762 static int ixl_param_nqps_limit = IXL_QUEUE_NUM; 763 static unsigned int ixl_param_tx_ndescs = 1024; 764 static unsigned int ixl_param_rx_ndescs = 1024; 765 766 static enum i40e_mac_type 767 ixl_mactype(pci_product_id_t); 768 static void ixl_clear_hw(struct ixl_softc *); 769 static int ixl_pf_reset(struct ixl_softc *); 770 771 static int ixl_dmamem_alloc(struct ixl_softc *, struct ixl_dmamem *, 772 bus_size_t, bus_size_t); 773 static void ixl_dmamem_free(struct ixl_softc *, struct ixl_dmamem *); 774 775 static int ixl_arq_fill(struct ixl_softc *); 776 static void ixl_arq_unfill(struct ixl_softc *); 777 778 static int ixl_atq_poll(struct ixl_softc *, struct ixl_aq_desc *, 779 unsigned int); 780 static void ixl_atq_set(struct ixl_atq *, 781 void (*)(struct ixl_softc *, const struct ixl_aq_desc *)); 782 static int ixl_atq_post_locked(struct ixl_softc *, struct ixl_atq *); 783 static void ixl_atq_done(struct ixl_softc *); 784 static int ixl_atq_exec(struct ixl_softc *, struct ixl_atq *); 785 static int ixl_atq_exec_locked(struct ixl_softc *, struct ixl_atq *); 786 static int ixl_get_version(struct ixl_softc *); 787 static int ixl_get_nvm_version(struct ixl_softc *); 788 static int ixl_get_hw_capabilities(struct ixl_softc *); 789 static int ixl_pxe_clear(struct ixl_softc *); 790 static int ixl_lldp_shut(struct ixl_softc *); 791 static int ixl_get_mac(struct ixl_softc *); 792 static int ixl_get_switch_config(struct ixl_softc *); 793 static int ixl_phy_mask_ints(struct ixl_softc *); 794 static int ixl_get_phy_info(struct ixl_softc *); 795 static int ixl_set_phy_config(struct ixl_softc *, uint8_t, uint8_t, bool); 796 static int ixl_set_phy_autoselect(struct ixl_softc *); 797 static int ixl_restart_an(struct ixl_softc *); 798 static int ixl_hmc(struct ixl_softc *); 799 static void ixl_hmc_free(struct ixl_softc *); 800 static int ixl_get_vsi(struct ixl_softc *); 801 static int ixl_set_vsi(struct ixl_softc *); 802 static void ixl_set_filter_control(struct ixl_softc *); 803 static void ixl_get_link_status(void *); 804 static int ixl_get_link_status_poll(struct ixl_softc *, int *); 805 static void ixl_get_link_status_done(struct ixl_softc *, 806 const struct ixl_aq_desc *); 807 static int ixl_set_link_status_locked(struct ixl_softc *, 808 const struct ixl_aq_desc *); 809 static uint64_t ixl_search_link_speed(uint8_t); 810 static uint8_t ixl_search_baudrate(uint64_t); 811 static void ixl_config_rss(struct ixl_softc *); 812 static int ixl_add_macvlan(struct ixl_softc *, const uint8_t *, 813 uint16_t, uint16_t); 814 static int ixl_remove_macvlan(struct ixl_softc *, const uint8_t *, 815 uint16_t, uint16_t); 816 static void ixl_arq(void *); 817 static void ixl_hmc_pack(void *, const void *, 818 const struct ixl_hmc_pack *, unsigned int); 819 static uint32_t ixl_rd_rx_csr(struct ixl_softc *, uint32_t); 820 static void ixl_wr_rx_csr(struct ixl_softc *, uint32_t, uint32_t); 821 static int ixl_rd16_nvm(struct ixl_softc *, uint16_t, uint16_t *); 822 823 static int ixl_match(device_t, cfdata_t, void *); 824 static void ixl_attach(device_t, device_t, void *); 825 static int ixl_detach(device_t, int); 826 827 static void ixl_media_add(struct ixl_softc *); 828 static int ixl_media_change(struct ifnet *); 829 static void ixl_media_status(struct ifnet *, struct ifmediareq *); 830 static void ixl_watchdog(struct ifnet *); 831 static int ixl_ioctl(struct ifnet *, u_long, void *); 832 static void ixl_start(struct ifnet *); 833 static int ixl_transmit(struct ifnet *, struct mbuf *); 834 static void ixl_deferred_transmit(void *); 835 static int ixl_intr(void *); 836 static int ixl_queue_intr(void *); 837 static int ixl_other_intr(void *); 838 static void ixl_handle_queue(void *); 839 static void ixl_handle_queue_wk(struct work *, void *); 840 static void ixl_sched_handle_queue(struct ixl_softc *, 841 struct ixl_queue_pair *); 842 static int ixl_init(struct ifnet *); 843 static int ixl_init_locked(struct ixl_softc *); 844 static void ixl_stop(struct ifnet *, int); 845 static void ixl_stop_locked(struct ixl_softc *); 846 static int ixl_iff(struct ixl_softc *); 847 static int ixl_ifflags_cb(struct ethercom *); 848 static int ixl_setup_interrupts(struct ixl_softc *); 849 static int ixl_establish_intx(struct ixl_softc *); 850 static int ixl_establish_msix(struct ixl_softc *); 851 static void ixl_enable_queue_intr(struct ixl_softc *, 852 struct ixl_queue_pair *); 853 static void ixl_disable_queue_intr(struct ixl_softc *, 854 struct ixl_queue_pair *); 855 static void ixl_enable_other_intr(struct ixl_softc *); 856 static void ixl_disable_other_intr(struct ixl_softc *); 857 static void ixl_config_queue_intr(struct ixl_softc *); 858 static void ixl_config_other_intr(struct ixl_softc *); 859 860 static struct ixl_tx_ring * 861 ixl_txr_alloc(struct ixl_softc *, unsigned int); 862 static void ixl_txr_qdis(struct ixl_softc *, struct ixl_tx_ring *, int); 863 static void ixl_txr_config(struct ixl_softc *, struct ixl_tx_ring *); 864 static int ixl_txr_enabled(struct ixl_softc *, struct ixl_tx_ring *); 865 static int ixl_txr_disabled(struct ixl_softc *, struct ixl_tx_ring *); 866 static void ixl_txr_unconfig(struct ixl_softc *, struct ixl_tx_ring *); 867 static void ixl_txr_clean(struct ixl_softc *, struct ixl_tx_ring *); 868 static void ixl_txr_free(struct ixl_softc *, struct ixl_tx_ring *); 869 static int ixl_txeof(struct ixl_softc *, struct ixl_tx_ring *, u_int); 870 871 static struct ixl_rx_ring * 872 ixl_rxr_alloc(struct ixl_softc *, unsigned int); 873 static void ixl_rxr_config(struct ixl_softc *, struct ixl_rx_ring *); 874 static int ixl_rxr_enabled(struct ixl_softc *, struct ixl_rx_ring *); 875 static int ixl_rxr_disabled(struct ixl_softc *, struct ixl_rx_ring *); 876 static void ixl_rxr_unconfig(struct ixl_softc *, struct ixl_rx_ring *); 877 static void ixl_rxr_clean(struct ixl_softc *, struct ixl_rx_ring *); 878 static void ixl_rxr_free(struct ixl_softc *, struct ixl_rx_ring *); 879 static int ixl_rxeof(struct ixl_softc *, struct ixl_rx_ring *, u_int); 880 static int ixl_rxfill(struct ixl_softc *, struct ixl_rx_ring *); 881 882 static struct workqueue * 883 ixl_workq_create(const char *, pri_t, int, int); 884 static void ixl_workq_destroy(struct workqueue *); 885 static int ixl_workqs_teardown(device_t); 886 static void ixl_work_set(struct ixl_work *, void (*)(void *), void *); 887 static void ixl_work_add(struct workqueue *, struct ixl_work *); 888 static void ixl_work_wait(struct workqueue *, struct ixl_work *); 889 static void ixl_workq_work(struct work *, void *); 890 static const struct ixl_product * 891 ixl_lookup(const struct pci_attach_args *pa); 892 static void ixl_link_state_update(struct ixl_softc *, 893 const struct ixl_aq_desc *); 894 static int ixl_vlan_cb(struct ethercom *, uint16_t, bool); 895 static int ixl_setup_vlan_hwfilter(struct ixl_softc *); 896 static void ixl_teardown_vlan_hwfilter(struct ixl_softc *); 897 static int ixl_update_macvlan(struct ixl_softc *); 898 static int ixl_setup_interrupts(struct ixl_softc *);; 899 static void ixl_teardown_interrupts(struct ixl_softc *); 900 static int ixl_setup_stats(struct ixl_softc *); 901 static void ixl_teardown_stats(struct ixl_softc *); 902 static void ixl_stats_callout(void *); 903 static void ixl_stats_update(void *); 904 static int ixl_setup_sysctls(struct ixl_softc *); 905 static void ixl_teardown_sysctls(struct ixl_softc *); 906 static int ixl_queue_pairs_alloc(struct ixl_softc *); 907 static void ixl_queue_pairs_free(struct ixl_softc *); 908 909 static const struct ixl_phy_type ixl_phy_type_map[] = { 910 { 1ULL << IXL_PHY_TYPE_SGMII, IFM_1000_SGMII }, 911 { 1ULL << IXL_PHY_TYPE_1000BASE_KX, IFM_1000_KX }, 912 { 1ULL << IXL_PHY_TYPE_10GBASE_KX4, IFM_10G_KX4 }, 913 { 1ULL << IXL_PHY_TYPE_10GBASE_KR, IFM_10G_KR }, 914 { 1ULL << IXL_PHY_TYPE_40GBASE_KR4, IFM_40G_KR4 }, 915 { 1ULL << IXL_PHY_TYPE_XAUI | 916 1ULL << IXL_PHY_TYPE_XFI, IFM_10G_CX4 }, 917 { 1ULL << IXL_PHY_TYPE_SFI, IFM_10G_SFI }, 918 { 1ULL << IXL_PHY_TYPE_XLAUI | 919 1ULL << IXL_PHY_TYPE_XLPPI, IFM_40G_XLPPI }, 920 { 1ULL << IXL_PHY_TYPE_40GBASE_CR4_CU | 921 1ULL << IXL_PHY_TYPE_40GBASE_CR4, IFM_40G_CR4 }, 922 { 1ULL << IXL_PHY_TYPE_10GBASE_CR1_CU | 923 1ULL << IXL_PHY_TYPE_10GBASE_CR1, IFM_10G_CR1 }, 924 { 1ULL << IXL_PHY_TYPE_10GBASE_AOC, IFM_10G_AOC }, 925 { 1ULL << IXL_PHY_TYPE_40GBASE_AOC, IFM_40G_AOC }, 926 { 1ULL << IXL_PHY_TYPE_100BASE_TX, IFM_100_TX }, 927 { 1ULL << IXL_PHY_TYPE_1000BASE_T_OPTICAL | 928 1ULL << IXL_PHY_TYPE_1000BASE_T, IFM_1000_T }, 929 { 1ULL << IXL_PHY_TYPE_10GBASE_T, IFM_10G_T }, 930 { 1ULL << IXL_PHY_TYPE_10GBASE_SR, IFM_10G_SR }, 931 { 1ULL << IXL_PHY_TYPE_10GBASE_LR, IFM_10G_LR }, 932 { 1ULL << IXL_PHY_TYPE_10GBASE_SFPP_CU, IFM_10G_TWINAX }, 933 { 1ULL << IXL_PHY_TYPE_40GBASE_SR4, IFM_40G_SR4 }, 934 { 1ULL << IXL_PHY_TYPE_40GBASE_LR4, IFM_40G_LR4 }, 935 { 1ULL << IXL_PHY_TYPE_1000BASE_SX, IFM_1000_SX }, 936 { 1ULL << IXL_PHY_TYPE_1000BASE_LX, IFM_1000_LX }, 937 { 1ULL << IXL_PHY_TYPE_20GBASE_KR2, IFM_20G_KR2 }, 938 { 1ULL << IXL_PHY_TYPE_25GBASE_KR, IFM_25G_KR }, 939 { 1ULL << IXL_PHY_TYPE_25GBASE_CR, IFM_25G_CR }, 940 { 1ULL << IXL_PHY_TYPE_25GBASE_SR, IFM_25G_SR }, 941 { 1ULL << IXL_PHY_TYPE_25GBASE_LR, IFM_25G_LR }, 942 { 1ULL << IXL_PHY_TYPE_25GBASE_AOC, IFM_25G_AOC }, 943 { 1ULL << IXL_PHY_TYPE_25GBASE_ACC, IFM_25G_ACC }, 944 }; 945 946 static const struct ixl_speed_type ixl_speed_type_map[] = { 947 { IXL_AQ_LINK_SPEED_40GB, IF_Gbps(40) }, 948 { IXL_AQ_LINK_SPEED_25GB, IF_Gbps(25) }, 949 { IXL_AQ_LINK_SPEED_10GB, IF_Gbps(10) }, 950 { IXL_AQ_LINK_SPEED_1000MB, IF_Mbps(1000) }, 951 { IXL_AQ_LINK_SPEED_100MB, IF_Mbps(100)}, 952 }; 953 954 static const struct ixl_aq_regs ixl_pf_aq_regs = { 955 .atq_tail = I40E_PF_ATQT, 956 .atq_tail_mask = I40E_PF_ATQT_ATQT_MASK, 957 .atq_head = I40E_PF_ATQH, 958 .atq_head_mask = I40E_PF_ATQH_ATQH_MASK, 959 .atq_len = I40E_PF_ATQLEN, 960 .atq_bal = I40E_PF_ATQBAL, 961 .atq_bah = I40E_PF_ATQBAH, 962 .atq_len_enable = I40E_PF_ATQLEN_ATQENABLE_MASK, 963 964 .arq_tail = I40E_PF_ARQT, 965 .arq_tail_mask = I40E_PF_ARQT_ARQT_MASK, 966 .arq_head = I40E_PF_ARQH, 967 .arq_head_mask = I40E_PF_ARQH_ARQH_MASK, 968 .arq_len = I40E_PF_ARQLEN, 969 .arq_bal = I40E_PF_ARQBAL, 970 .arq_bah = I40E_PF_ARQBAH, 971 .arq_len_enable = I40E_PF_ARQLEN_ARQENABLE_MASK, 972 }; 973 974 #define ixl_rd(_s, _r) \ 975 bus_space_read_4((_s)->sc_memt, (_s)->sc_memh, (_r)) 976 #define ixl_wr(_s, _r, _v) \ 977 bus_space_write_4((_s)->sc_memt, (_s)->sc_memh, (_r), (_v)) 978 #define ixl_barrier(_s, _r, _l, _o) \ 979 bus_space_barrier((_s)->sc_memt, (_s)->sc_memh, (_r), (_l), (_o)) 980 #define ixl_flush(_s) (void)ixl_rd((_s), I40E_GLGEN_STAT) 981 #define ixl_nqueues(_sc) (1 << ((_sc)->sc_nqueue_pairs - 1)) 982 983 static inline uint32_t 984 ixl_dmamem_hi(struct ixl_dmamem *ixm) 985 { 986 uint32_t retval; 987 uint64_t val; 988 989 if (sizeof(IXL_DMA_DVA(ixm)) > 4) { 990 val = (intptr_t)IXL_DMA_DVA(ixm); 991 retval = (uint32_t)(val >> 32); 992 } else { 993 retval = 0; 994 } 995 996 return retval; 997 } 998 999 static inline uint32_t 1000 ixl_dmamem_lo(struct ixl_dmamem *ixm) 1001 { 1002 1003 return (uint32_t)IXL_DMA_DVA(ixm); 1004 } 1005 1006 static inline void 1007 ixl_aq_dva(struct ixl_aq_desc *iaq, bus_addr_t addr) 1008 { 1009 uint64_t val; 1010 1011 if (sizeof(addr) > 4) { 1012 val = (intptr_t)addr; 1013 iaq->iaq_param[2] = htole32(val >> 32); 1014 } else { 1015 iaq->iaq_param[2] = htole32(0); 1016 } 1017 1018 iaq->iaq_param[3] = htole32(addr); 1019 } 1020 1021 static inline unsigned int 1022 ixl_rxr_unrefreshed(unsigned int prod, unsigned int cons, unsigned int ndescs) 1023 { 1024 unsigned int num; 1025 1026 if (prod < cons) 1027 num = cons - prod; 1028 else 1029 num = (ndescs - prod) + cons; 1030 1031 if (__predict_true(num > 0)) { 1032 /* device cannot receive packets if all descripter is filled */ 1033 num -= 1; 1034 } 1035 1036 return num; 1037 } 1038 1039 CFATTACH_DECL3_NEW(ixl, sizeof(struct ixl_softc), 1040 ixl_match, ixl_attach, ixl_detach, NULL, NULL, NULL, 1041 DVF_DETACH_SHUTDOWN); 1042 1043 static const struct ixl_product ixl_products[] = { 1044 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_SFP }, 1045 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_KX_B }, 1046 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_KX_C }, 1047 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_A }, 1048 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_B }, 1049 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_C }, 1050 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_10G_T }, 1051 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_20G_BP_1 }, 1052 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_20G_BP_2 }, 1053 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_T4_10G }, 1054 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XXV710_25G_BP }, 1055 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XXV710_25G_SFP28 }, 1056 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_KX }, 1057 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_QSFP }, 1058 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_SFP }, 1059 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_1G_BASET }, 1060 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_10G_BASET }, 1061 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_I_SFP }, 1062 /* required last entry */ 1063 {0, 0} 1064 }; 1065 1066 static const struct ixl_product * 1067 ixl_lookup(const struct pci_attach_args *pa) 1068 { 1069 const struct ixl_product *ixlp; 1070 1071 for (ixlp = ixl_products; ixlp->vendor_id != 0; ixlp++) { 1072 if (PCI_VENDOR(pa->pa_id) == ixlp->vendor_id && 1073 PCI_PRODUCT(pa->pa_id) == ixlp->product_id) 1074 return ixlp; 1075 } 1076 1077 return NULL; 1078 } 1079 1080 static int 1081 ixl_match(device_t parent, cfdata_t match, void *aux) 1082 { 1083 const struct pci_attach_args *pa = aux; 1084 1085 return (ixl_lookup(pa) != NULL) ? 1 : 0; 1086 } 1087 1088 static void 1089 ixl_attach(device_t parent, device_t self, void *aux) 1090 { 1091 struct ixl_softc *sc; 1092 struct pci_attach_args *pa = aux; 1093 struct ifnet *ifp; 1094 pcireg_t memtype; 1095 uint32_t firstq, port, ari, func; 1096 char xnamebuf[32]; 1097 int tries, rv, link; 1098 1099 sc = device_private(self); 1100 sc->sc_dev = self; 1101 ifp = &sc->sc_ec.ec_if; 1102 1103 sc->sc_pa = *pa; 1104 sc->sc_dmat = (pci_dma64_available(pa)) ? 1105 pa->pa_dmat64 : pa->pa_dmat; 1106 sc->sc_aq_regs = &ixl_pf_aq_regs; 1107 1108 sc->sc_mac_type = ixl_mactype(PCI_PRODUCT(pa->pa_id)); 1109 1110 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, IXL_PCIREG); 1111 if (pci_mapreg_map(pa, IXL_PCIREG, memtype, 0, 1112 &sc->sc_memt, &sc->sc_memh, NULL, &sc->sc_mems)) { 1113 aprint_error(": unable to map registers\n"); 1114 return; 1115 } 1116 1117 mutex_init(&sc->sc_cfg_lock, MUTEX_DEFAULT, IPL_SOFTNET); 1118 1119 firstq = ixl_rd(sc, I40E_PFLAN_QALLOC); 1120 firstq &= I40E_PFLAN_QALLOC_FIRSTQ_MASK; 1121 firstq >>= I40E_PFLAN_QALLOC_FIRSTQ_SHIFT; 1122 sc->sc_base_queue = firstq; 1123 1124 ixl_clear_hw(sc); 1125 if (ixl_pf_reset(sc) == -1) { 1126 /* error printed by ixl pf_reset */ 1127 goto unmap; 1128 } 1129 1130 port = ixl_rd(sc, I40E_PFGEN_PORTNUM); 1131 port &= I40E_PFGEN_PORTNUM_PORT_NUM_MASK; 1132 port >>= I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT; 1133 sc->sc_port = port; 1134 aprint_normal(": port %u", sc->sc_port); 1135 1136 ari = ixl_rd(sc, I40E_GLPCI_CAPSUP); 1137 ari &= I40E_GLPCI_CAPSUP_ARI_EN_MASK; 1138 ari >>= I40E_GLPCI_CAPSUP_ARI_EN_SHIFT; 1139 1140 func = ixl_rd(sc, I40E_PF_FUNC_RID); 1141 sc->sc_pf_id = func & (ari ? 0xff : 0x7); 1142 1143 /* initialise the adminq */ 1144 1145 mutex_init(&sc->sc_atq_lock, MUTEX_DEFAULT, IPL_NET); 1146 1147 if (ixl_dmamem_alloc(sc, &sc->sc_atq, 1148 sizeof(struct ixl_aq_desc) * IXL_AQ_NUM, IXL_AQ_ALIGN) != 0) { 1149 aprint_error("\n" "%s: unable to allocate atq\n", 1150 device_xname(self)); 1151 goto unmap; 1152 } 1153 1154 SIMPLEQ_INIT(&sc->sc_arq_idle); 1155 ixl_work_set(&sc->sc_arq_task, ixl_arq, sc); 1156 sc->sc_arq_cons = 0; 1157 sc->sc_arq_prod = 0; 1158 1159 if (ixl_dmamem_alloc(sc, &sc->sc_arq, 1160 sizeof(struct ixl_aq_desc) * IXL_AQ_NUM, IXL_AQ_ALIGN) != 0) { 1161 aprint_error("\n" "%s: unable to allocate arq\n", 1162 device_xname(self)); 1163 goto free_atq; 1164 } 1165 1166 if (!ixl_arq_fill(sc)) { 1167 aprint_error("\n" "%s: unable to fill arq descriptors\n", 1168 device_xname(self)); 1169 goto free_arq; 1170 } 1171 1172 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 1173 0, IXL_DMA_LEN(&sc->sc_atq), 1174 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1175 1176 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq), 1177 0, IXL_DMA_LEN(&sc->sc_arq), 1178 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1179 1180 for (tries = 0; tries < 10; tries++) { 1181 sc->sc_atq_cons = 0; 1182 sc->sc_atq_prod = 0; 1183 1184 ixl_wr(sc, sc->sc_aq_regs->atq_head, 0); 1185 ixl_wr(sc, sc->sc_aq_regs->arq_head, 0); 1186 ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0); 1187 ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0); 1188 1189 ixl_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE); 1190 1191 ixl_wr(sc, sc->sc_aq_regs->atq_bal, 1192 ixl_dmamem_lo(&sc->sc_atq)); 1193 ixl_wr(sc, sc->sc_aq_regs->atq_bah, 1194 ixl_dmamem_hi(&sc->sc_atq)); 1195 ixl_wr(sc, sc->sc_aq_regs->atq_len, 1196 sc->sc_aq_regs->atq_len_enable | IXL_AQ_NUM); 1197 1198 ixl_wr(sc, sc->sc_aq_regs->arq_bal, 1199 ixl_dmamem_lo(&sc->sc_arq)); 1200 ixl_wr(sc, sc->sc_aq_regs->arq_bah, 1201 ixl_dmamem_hi(&sc->sc_arq)); 1202 ixl_wr(sc, sc->sc_aq_regs->arq_len, 1203 sc->sc_aq_regs->arq_len_enable | IXL_AQ_NUM); 1204 1205 rv = ixl_get_version(sc); 1206 if (rv == 0) 1207 break; 1208 if (rv != ETIMEDOUT) { 1209 aprint_error(", unable to get firmware version\n"); 1210 goto shutdown; 1211 } 1212 1213 delaymsec(100); 1214 } 1215 1216 ixl_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod); 1217 1218 if (ixl_dmamem_alloc(sc, &sc->sc_aqbuf, IXL_AQ_BUFLEN, 0) != 0) { 1219 aprint_error_dev(self, ", unable to allocate nvm buffer\n"); 1220 goto shutdown; 1221 } 1222 1223 ixl_get_nvm_version(sc); 1224 1225 if (sc->sc_mac_type == I40E_MAC_X722) 1226 sc->sc_nqueue_pairs_device = IXL_QUEUE_MAX_X722; 1227 else 1228 sc->sc_nqueue_pairs_device = IXL_QUEUE_MAX_XL710; 1229 1230 rv = ixl_get_hw_capabilities(sc); 1231 if (rv != 0) { 1232 aprint_error(", GET HW CAPABILITIES %s\n", 1233 rv == ETIMEDOUT ? "timeout" : "error"); 1234 goto free_aqbuf; 1235 } 1236 1237 sc->sc_nqueue_pairs_max = MIN((int)sc->sc_nqueue_pairs_device, ncpu); 1238 if (ixl_param_nqps_limit > 0) { 1239 sc->sc_nqueue_pairs_max = MIN((int)sc->sc_nqueue_pairs_max, 1240 ixl_param_nqps_limit); 1241 } 1242 1243 sc->sc_nqueue_pairs = sc->sc_nqueue_pairs_max; 1244 sc->sc_tx_ring_ndescs = ixl_param_tx_ndescs; 1245 sc->sc_rx_ring_ndescs = ixl_param_rx_ndescs; 1246 1247 KASSERT(IXL_TXRX_PROCESS_UNLIMIT > sc->sc_rx_ring_ndescs); 1248 KASSERT(IXL_TXRX_PROCESS_UNLIMIT > sc->sc_tx_ring_ndescs); 1249 1250 if (ixl_get_mac(sc) != 0) { 1251 /* error printed by ixl_get_mac */ 1252 goto free_aqbuf; 1253 } 1254 1255 aprint_normal("\n"); 1256 aprint_naive("\n"); 1257 1258 aprint_normal_dev(self, "Ethernet address %s\n", 1259 ether_sprintf(sc->sc_enaddr)); 1260 1261 rv = ixl_pxe_clear(sc); 1262 if (rv != 0) { 1263 aprint_debug_dev(self, "CLEAR PXE MODE %s\n", 1264 rv == ETIMEDOUT ? "timeout" : "error"); 1265 } 1266 1267 ixl_set_filter_control(sc); 1268 1269 if (ixl_hmc(sc) != 0) { 1270 /* error printed by ixl_hmc */ 1271 goto free_aqbuf; 1272 } 1273 1274 if (ixl_lldp_shut(sc) != 0) { 1275 /* error printed by ixl_lldp_shut */ 1276 goto free_hmc; 1277 } 1278 1279 if (ixl_phy_mask_ints(sc) != 0) { 1280 /* error printed by ixl_phy_mask_ints */ 1281 goto free_hmc; 1282 } 1283 1284 if (ixl_restart_an(sc) != 0) { 1285 /* error printed by ixl_restart_an */ 1286 goto free_hmc; 1287 } 1288 1289 if (ixl_get_switch_config(sc) != 0) { 1290 /* error printed by ixl_get_switch_config */ 1291 goto free_hmc; 1292 } 1293 1294 rv = ixl_get_link_status_poll(sc, NULL); 1295 if (rv != 0) { 1296 aprint_error_dev(self, "GET LINK STATUS %s\n", 1297 rv == ETIMEDOUT ? "timeout" : "error"); 1298 goto free_hmc; 1299 } 1300 1301 /* 1302 * The FW often returns EIO in "Get PHY Abilities" command 1303 * if there is no delay 1304 */ 1305 DELAY(500); 1306 if (ixl_get_phy_info(sc) != 0) { 1307 /* error printed by ixl_get_phy_info */ 1308 goto free_hmc; 1309 } 1310 1311 if (ixl_dmamem_alloc(sc, &sc->sc_scratch, 1312 sizeof(struct ixl_aq_vsi_data), 8) != 0) { 1313 aprint_error_dev(self, "unable to allocate scratch buffer\n"); 1314 goto free_hmc; 1315 } 1316 1317 rv = ixl_get_vsi(sc); 1318 if (rv != 0) { 1319 aprint_error_dev(self, "GET VSI %s %d\n", 1320 rv == ETIMEDOUT ? "timeout" : "error", rv); 1321 goto free_scratch; 1322 } 1323 1324 rv = ixl_set_vsi(sc); 1325 if (rv != 0) { 1326 aprint_error_dev(self, "UPDATE VSI error %s %d\n", 1327 rv == ETIMEDOUT ? "timeout" : "error", rv); 1328 goto free_scratch; 1329 } 1330 1331 if (ixl_queue_pairs_alloc(sc) != 0) { 1332 /* error printed by ixl_queue_pairs_alloc */ 1333 goto free_scratch; 1334 } 1335 1336 if (ixl_setup_interrupts(sc) != 0) { 1337 /* error printed by ixl_setup_interrupts */ 1338 goto free_queue_pairs; 1339 } 1340 1341 if (ixl_setup_stats(sc) != 0) { 1342 aprint_error_dev(self, "failed to setup event counters\n"); 1343 goto teardown_intrs; 1344 } 1345 1346 if (ixl_setup_sysctls(sc) != 0) { 1347 /* error printed by ixl_setup_sysctls */ 1348 goto teardown_stats; 1349 } 1350 1351 snprintf(xnamebuf, sizeof(xnamebuf), "%s_wq_cfg", device_xname(self)); 1352 sc->sc_workq = ixl_workq_create(xnamebuf, IXL_WORKQUEUE_PRI, 1353 IPL_NET, WQ_MPSAFE); 1354 if (sc->sc_workq == NULL) 1355 goto teardown_sysctls; 1356 1357 snprintf(xnamebuf, sizeof(xnamebuf), "%s_wq_txrx", device_xname(self)); 1358 rv = workqueue_create(&sc->sc_workq_txrx, xnamebuf, ixl_handle_queue_wk, 1359 sc, IXL_WORKQUEUE_PRI, IPL_NET, WQ_PERCPU | WQ_MPSAFE); 1360 if (rv != 0) { 1361 sc->sc_workq_txrx = NULL; 1362 goto teardown_wqs; 1363 } 1364 1365 snprintf(xnamebuf, sizeof(xnamebuf), "%s_atq_cv", device_xname(self)); 1366 cv_init(&sc->sc_atq_cv, xnamebuf); 1367 1368 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ); 1369 1370 ifp->if_softc = sc; 1371 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1372 ifp->if_extflags = IFEF_MPSAFE; 1373 ifp->if_ioctl = ixl_ioctl; 1374 ifp->if_start = ixl_start; 1375 ifp->if_transmit = ixl_transmit; 1376 ifp->if_watchdog = ixl_watchdog; 1377 ifp->if_init = ixl_init; 1378 ifp->if_stop = ixl_stop; 1379 IFQ_SET_MAXLEN(&ifp->if_snd, sc->sc_tx_ring_ndescs); 1380 IFQ_SET_READY(&ifp->if_snd); 1381 ifp->if_capabilities |= IXL_IFCAP_RXCSUM; 1382 ifp->if_capabilities |= IXL_IFCAP_TXCSUM; 1383 #if 0 1384 ifp->if_capabilities |= IFCAP_TSOv4 | IFCAP_TSOv6; 1385 #endif 1386 ether_set_vlan_cb(&sc->sc_ec, ixl_vlan_cb); 1387 sc->sc_ec.ec_capabilities |= ETHERCAP_JUMBO_MTU; 1388 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING; 1389 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWFILTER; 1390 1391 sc->sc_ec.ec_capenable = sc->sc_ec.ec_capabilities; 1392 /* Disable VLAN_HWFILTER by default */ 1393 CLR(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER); 1394 1395 sc->sc_cur_ec_capenable = sc->sc_ec.ec_capenable; 1396 1397 sc->sc_ec.ec_ifmedia = &sc->sc_media; 1398 ifmedia_init_with_lock(&sc->sc_media, IFM_IMASK, ixl_media_change, 1399 ixl_media_status, &sc->sc_cfg_lock); 1400 1401 ixl_media_add(sc); 1402 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL); 1403 if (ISSET(sc->sc_phy_abilities, 1404 (IXL_PHY_ABILITY_PAUSE_TX | IXL_PHY_ABILITY_PAUSE_RX))) { 1405 ifmedia_add(&sc->sc_media, 1406 IFM_ETHER | IFM_AUTO | IFM_FLOW, 0, NULL); 1407 } 1408 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_NONE, 0, NULL); 1409 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO); 1410 1411 if_attach(ifp); 1412 if_deferred_start_init(ifp, NULL); 1413 ether_ifattach(ifp, sc->sc_enaddr); 1414 ether_set_ifflags_cb(&sc->sc_ec, ixl_ifflags_cb); 1415 1416 rv = ixl_get_link_status_poll(sc, &link); 1417 if (rv != 0) 1418 link = LINK_STATE_UNKNOWN; 1419 if_link_state_change(ifp, link); 1420 1421 ixl_atq_set(&sc->sc_link_state_atq, ixl_get_link_status_done); 1422 ixl_work_set(&sc->sc_link_state_task, ixl_get_link_status, sc); 1423 1424 ixl_config_other_intr(sc); 1425 ixl_enable_other_intr(sc); 1426 1427 ixl_set_phy_autoselect(sc); 1428 1429 /* remove default mac filter and replace it so we can see vlans */ 1430 rv = ixl_remove_macvlan(sc, sc->sc_enaddr, 0, 0); 1431 if (rv != ENOENT) { 1432 aprint_debug_dev(self, 1433 "unable to remove macvlan %u\n", rv); 1434 } 1435 rv = ixl_remove_macvlan(sc, sc->sc_enaddr, 0, 1436 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN); 1437 if (rv != ENOENT) { 1438 aprint_debug_dev(self, 1439 "unable to remove macvlan, ignore vlan %u\n", rv); 1440 } 1441 1442 if (ixl_update_macvlan(sc) != 0) { 1443 aprint_debug_dev(self, 1444 "couldn't enable vlan hardware filter\n"); 1445 CLR(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER); 1446 CLR(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER); 1447 } 1448 1449 sc->sc_txrx_workqueue = true; 1450 sc->sc_tx_process_limit = IXL_TX_PROCESS_LIMIT; 1451 sc->sc_rx_process_limit = IXL_RX_PROCESS_LIMIT; 1452 sc->sc_tx_intr_process_limit = IXL_TX_INTR_PROCESS_LIMIT; 1453 sc->sc_rx_intr_process_limit = IXL_RX_INTR_PROCESS_LIMIT; 1454 1455 ixl_stats_update(sc); 1456 sc->sc_stats_counters.isc_has_offset = true; 1457 1458 if (pmf_device_register(self, NULL, NULL) != true) 1459 aprint_debug_dev(self, "couldn't establish power handler\n"); 1460 sc->sc_itr_rx = IXL_ITR_RX; 1461 sc->sc_itr_tx = IXL_ITR_TX; 1462 sc->sc_attached = true; 1463 return; 1464 1465 teardown_wqs: 1466 config_finalize_register(self, ixl_workqs_teardown); 1467 teardown_sysctls: 1468 ixl_teardown_sysctls(sc); 1469 teardown_stats: 1470 ixl_teardown_stats(sc); 1471 teardown_intrs: 1472 ixl_teardown_interrupts(sc); 1473 free_queue_pairs: 1474 ixl_queue_pairs_free(sc); 1475 free_scratch: 1476 ixl_dmamem_free(sc, &sc->sc_scratch); 1477 free_hmc: 1478 ixl_hmc_free(sc); 1479 free_aqbuf: 1480 ixl_dmamem_free(sc, &sc->sc_aqbuf); 1481 shutdown: 1482 ixl_wr(sc, sc->sc_aq_regs->atq_head, 0); 1483 ixl_wr(sc, sc->sc_aq_regs->arq_head, 0); 1484 ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0); 1485 ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0); 1486 1487 ixl_wr(sc, sc->sc_aq_regs->atq_bal, 0); 1488 ixl_wr(sc, sc->sc_aq_regs->atq_bah, 0); 1489 ixl_wr(sc, sc->sc_aq_regs->atq_len, 0); 1490 1491 ixl_wr(sc, sc->sc_aq_regs->arq_bal, 0); 1492 ixl_wr(sc, sc->sc_aq_regs->arq_bah, 0); 1493 ixl_wr(sc, sc->sc_aq_regs->arq_len, 0); 1494 1495 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq), 1496 0, IXL_DMA_LEN(&sc->sc_arq), 1497 BUS_DMASYNC_POSTREAD); 1498 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 1499 0, IXL_DMA_LEN(&sc->sc_atq), 1500 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1501 1502 ixl_arq_unfill(sc); 1503 free_arq: 1504 ixl_dmamem_free(sc, &sc->sc_arq); 1505 free_atq: 1506 ixl_dmamem_free(sc, &sc->sc_atq); 1507 unmap: 1508 mutex_destroy(&sc->sc_atq_lock); 1509 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems); 1510 mutex_destroy(&sc->sc_cfg_lock); 1511 sc->sc_mems = 0; 1512 1513 sc->sc_attached = false; 1514 } 1515 1516 static int 1517 ixl_detach(device_t self, int flags) 1518 { 1519 struct ixl_softc *sc = device_private(self); 1520 struct ifnet *ifp = &sc->sc_ec.ec_if; 1521 1522 if (!sc->sc_attached) 1523 return 0; 1524 1525 ixl_stop(ifp, 1); 1526 1527 ixl_disable_other_intr(sc); 1528 1529 callout_halt(&sc->sc_stats_callout, NULL); 1530 ixl_work_wait(sc->sc_workq, &sc->sc_stats_task); 1531 1532 /* wait for ATQ handler */ 1533 mutex_enter(&sc->sc_atq_lock); 1534 mutex_exit(&sc->sc_atq_lock); 1535 1536 ixl_work_wait(sc->sc_workq, &sc->sc_arq_task); 1537 ixl_work_wait(sc->sc_workq, &sc->sc_link_state_task); 1538 1539 if (sc->sc_workq != NULL) { 1540 ixl_workq_destroy(sc->sc_workq); 1541 sc->sc_workq = NULL; 1542 } 1543 1544 if (sc->sc_workq_txrx != NULL) { 1545 workqueue_destroy(sc->sc_workq_txrx); 1546 sc->sc_workq_txrx = NULL; 1547 } 1548 1549 ether_ifdetach(ifp); 1550 if_detach(ifp); 1551 ifmedia_fini(&sc->sc_media); 1552 1553 ixl_teardown_interrupts(sc); 1554 ixl_teardown_stats(sc); 1555 ixl_teardown_sysctls(sc); 1556 1557 ixl_queue_pairs_free(sc); 1558 1559 ixl_dmamem_free(sc, &sc->sc_scratch); 1560 ixl_hmc_free(sc); 1561 1562 /* shutdown */ 1563 ixl_wr(sc, sc->sc_aq_regs->atq_head, 0); 1564 ixl_wr(sc, sc->sc_aq_regs->arq_head, 0); 1565 ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0); 1566 ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0); 1567 1568 ixl_wr(sc, sc->sc_aq_regs->atq_bal, 0); 1569 ixl_wr(sc, sc->sc_aq_regs->atq_bah, 0); 1570 ixl_wr(sc, sc->sc_aq_regs->atq_len, 0); 1571 1572 ixl_wr(sc, sc->sc_aq_regs->arq_bal, 0); 1573 ixl_wr(sc, sc->sc_aq_regs->arq_bah, 0); 1574 ixl_wr(sc, sc->sc_aq_regs->arq_len, 0); 1575 1576 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq), 1577 0, IXL_DMA_LEN(&sc->sc_arq), 1578 BUS_DMASYNC_POSTREAD); 1579 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 1580 0, IXL_DMA_LEN(&sc->sc_atq), 1581 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1582 1583 ixl_arq_unfill(sc); 1584 1585 ixl_dmamem_free(sc, &sc->sc_arq); 1586 ixl_dmamem_free(sc, &sc->sc_atq); 1587 ixl_dmamem_free(sc, &sc->sc_aqbuf); 1588 1589 cv_destroy(&sc->sc_atq_cv); 1590 mutex_destroy(&sc->sc_atq_lock); 1591 1592 if (sc->sc_mems != 0) { 1593 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems); 1594 sc->sc_mems = 0; 1595 } 1596 1597 mutex_destroy(&sc->sc_cfg_lock); 1598 1599 return 0; 1600 } 1601 1602 static int 1603 ixl_workqs_teardown(device_t self) 1604 { 1605 struct ixl_softc *sc = device_private(self); 1606 1607 if (sc->sc_workq != NULL) { 1608 ixl_workq_destroy(sc->sc_workq); 1609 sc->sc_workq = NULL; 1610 } 1611 1612 if (sc->sc_workq_txrx != NULL) { 1613 workqueue_destroy(sc->sc_workq_txrx); 1614 sc->sc_workq_txrx = NULL; 1615 } 1616 1617 return 0; 1618 } 1619 1620 static int 1621 ixl_vlan_cb(struct ethercom *ec, uint16_t vid, bool set) 1622 { 1623 struct ifnet *ifp = &ec->ec_if; 1624 struct ixl_softc *sc = ifp->if_softc; 1625 int rv; 1626 1627 if (!ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER)) { 1628 return 0; 1629 } 1630 1631 if (set) { 1632 rv = ixl_add_macvlan(sc, sc->sc_enaddr, vid, 1633 IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH); 1634 if (rv == 0) { 1635 rv = ixl_add_macvlan(sc, etherbroadcastaddr, 1636 vid, IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH); 1637 } 1638 } else { 1639 rv = ixl_remove_macvlan(sc, sc->sc_enaddr, vid, 1640 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH); 1641 (void)ixl_remove_macvlan(sc, etherbroadcastaddr, vid, 1642 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH); 1643 } 1644 1645 return rv; 1646 } 1647 1648 static void 1649 ixl_media_add(struct ixl_softc *sc) 1650 { 1651 struct ifmedia *ifm = &sc->sc_media; 1652 const struct ixl_phy_type *itype; 1653 unsigned int i; 1654 bool flow; 1655 1656 if (ISSET(sc->sc_phy_abilities, 1657 (IXL_PHY_ABILITY_PAUSE_TX | IXL_PHY_ABILITY_PAUSE_RX))) { 1658 flow = true; 1659 } else { 1660 flow = false; 1661 } 1662 1663 for (i = 0; i < __arraycount(ixl_phy_type_map); i++) { 1664 itype = &ixl_phy_type_map[i]; 1665 1666 if (ISSET(sc->sc_phy_types, itype->phy_type)) { 1667 ifmedia_add(ifm, 1668 IFM_ETHER | IFM_FDX | itype->ifm_type, 0, NULL); 1669 1670 if (flow) { 1671 ifmedia_add(ifm, 1672 IFM_ETHER | IFM_FDX | IFM_FLOW | 1673 itype->ifm_type, 0, NULL); 1674 } 1675 1676 if (itype->ifm_type != IFM_100_TX) 1677 continue; 1678 1679 ifmedia_add(ifm, IFM_ETHER | itype->ifm_type, 1680 0, NULL); 1681 if (flow) { 1682 ifmedia_add(ifm, 1683 IFM_ETHER | IFM_FLOW | itype->ifm_type, 1684 0, NULL); 1685 } 1686 } 1687 } 1688 } 1689 1690 static void 1691 ixl_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 1692 { 1693 struct ixl_softc *sc = ifp->if_softc; 1694 1695 KASSERT(mutex_owned(&sc->sc_cfg_lock)); 1696 1697 ifmr->ifm_status = sc->sc_media_status; 1698 ifmr->ifm_active = sc->sc_media_active; 1699 } 1700 1701 static int 1702 ixl_media_change(struct ifnet *ifp) 1703 { 1704 struct ixl_softc *sc = ifp->if_softc; 1705 struct ifmedia *ifm = &sc->sc_media; 1706 uint64_t ifm_active = sc->sc_media_active; 1707 uint8_t link_speed, abilities; 1708 1709 switch (IFM_SUBTYPE(ifm_active)) { 1710 case IFM_1000_SGMII: 1711 case IFM_1000_KX: 1712 case IFM_10G_KX4: 1713 case IFM_10G_KR: 1714 case IFM_40G_KR4: 1715 case IFM_20G_KR2: 1716 case IFM_25G_KR: 1717 /* backplanes */ 1718 return EINVAL; 1719 } 1720 1721 abilities = IXL_PHY_ABILITY_AUTONEGO | IXL_PHY_ABILITY_LINKUP; 1722 1723 switch (IFM_SUBTYPE(ifm->ifm_media)) { 1724 case IFM_AUTO: 1725 link_speed = sc->sc_phy_linkspeed; 1726 break; 1727 case IFM_NONE: 1728 link_speed = 0; 1729 CLR(abilities, IXL_PHY_ABILITY_LINKUP); 1730 break; 1731 default: 1732 link_speed = ixl_search_baudrate( 1733 ifmedia_baudrate(ifm->ifm_media)); 1734 } 1735 1736 if (ISSET(abilities, IXL_PHY_ABILITY_LINKUP)) { 1737 if (ISSET(link_speed, sc->sc_phy_linkspeed) == 0) 1738 return EINVAL; 1739 } 1740 1741 if (ifm->ifm_media & IFM_FLOW) { 1742 abilities |= sc->sc_phy_abilities & 1743 (IXL_PHY_ABILITY_PAUSE_TX | IXL_PHY_ABILITY_PAUSE_RX); 1744 } 1745 1746 return ixl_set_phy_config(sc, link_speed, abilities, false); 1747 } 1748 1749 static void 1750 ixl_watchdog(struct ifnet *ifp) 1751 { 1752 1753 } 1754 1755 static void 1756 ixl_del_all_multiaddr(struct ixl_softc *sc) 1757 { 1758 struct ethercom *ec = &sc->sc_ec; 1759 struct ether_multi *enm; 1760 struct ether_multistep step; 1761 1762 ETHER_LOCK(ec); 1763 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL; 1764 ETHER_NEXT_MULTI(step, enm)) { 1765 ixl_remove_macvlan(sc, enm->enm_addrlo, 0, 1766 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN); 1767 } 1768 ETHER_UNLOCK(ec); 1769 } 1770 1771 static int 1772 ixl_add_multi(struct ixl_softc *sc, uint8_t *addrlo, uint8_t *addrhi) 1773 { 1774 struct ifnet *ifp = &sc->sc_ec.ec_if; 1775 int rv; 1776 1777 if (ISSET(ifp->if_flags, IFF_ALLMULTI)) 1778 return 0; 1779 1780 if (memcmp(addrlo, addrhi, ETHER_ADDR_LEN) != 0) { 1781 ixl_del_all_multiaddr(sc); 1782 SET(ifp->if_flags, IFF_ALLMULTI); 1783 return ENETRESET; 1784 } 1785 1786 /* multicast address can not use VLAN HWFILTER */ 1787 rv = ixl_add_macvlan(sc, addrlo, 0, 1788 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN); 1789 1790 if (rv == ENOSPC) { 1791 ixl_del_all_multiaddr(sc); 1792 SET(ifp->if_flags, IFF_ALLMULTI); 1793 return ENETRESET; 1794 } 1795 1796 return rv; 1797 } 1798 1799 static int 1800 ixl_del_multi(struct ixl_softc *sc, uint8_t *addrlo, uint8_t *addrhi) 1801 { 1802 struct ifnet *ifp = &sc->sc_ec.ec_if; 1803 struct ethercom *ec = &sc->sc_ec; 1804 struct ether_multi *enm, *enm_last; 1805 struct ether_multistep step; 1806 int error, rv = 0; 1807 1808 if (!ISSET(ifp->if_flags, IFF_ALLMULTI)) { 1809 ixl_remove_macvlan(sc, addrlo, 0, 1810 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN); 1811 return 0; 1812 } 1813 1814 ETHER_LOCK(ec); 1815 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL; 1816 ETHER_NEXT_MULTI(step, enm)) { 1817 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 1818 ETHER_ADDR_LEN) != 0) { 1819 goto out; 1820 } 1821 } 1822 1823 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL; 1824 ETHER_NEXT_MULTI(step, enm)) { 1825 error = ixl_add_macvlan(sc, enm->enm_addrlo, 0, 1826 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN); 1827 if (error != 0) 1828 break; 1829 } 1830 1831 if (enm != NULL) { 1832 enm_last = enm; 1833 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL; 1834 ETHER_NEXT_MULTI(step, enm)) { 1835 if (enm == enm_last) 1836 break; 1837 1838 ixl_remove_macvlan(sc, enm->enm_addrlo, 0, 1839 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN); 1840 } 1841 } else { 1842 CLR(ifp->if_flags, IFF_ALLMULTI); 1843 rv = ENETRESET; 1844 } 1845 1846 out: 1847 ETHER_UNLOCK(ec); 1848 return rv; 1849 } 1850 1851 static int 1852 ixl_ioctl(struct ifnet *ifp, u_long cmd, void *data) 1853 { 1854 struct ifreq *ifr = (struct ifreq *)data; 1855 struct ixl_softc *sc = (struct ixl_softc *)ifp->if_softc; 1856 const struct sockaddr *sa; 1857 uint8_t addrhi[ETHER_ADDR_LEN], addrlo[ETHER_ADDR_LEN]; 1858 int s, error = 0; 1859 unsigned int nmtu; 1860 1861 switch (cmd) { 1862 case SIOCSIFMTU: 1863 nmtu = ifr->ifr_mtu; 1864 1865 if (nmtu < IXL_MIN_MTU || nmtu > IXL_MAX_MTU) { 1866 error = EINVAL; 1867 break; 1868 } 1869 if (ifp->if_mtu != nmtu) { 1870 s = splnet(); 1871 error = ether_ioctl(ifp, cmd, data); 1872 splx(s); 1873 if (error == ENETRESET) 1874 error = ixl_init(ifp); 1875 } 1876 break; 1877 case SIOCADDMULTI: 1878 sa = ifreq_getaddr(SIOCADDMULTI, ifr); 1879 if (ether_addmulti(sa, &sc->sc_ec) == ENETRESET) { 1880 error = ether_multiaddr(sa, addrlo, addrhi); 1881 if (error != 0) 1882 return error; 1883 1884 error = ixl_add_multi(sc, addrlo, addrhi); 1885 if (error != 0 && error != ENETRESET) { 1886 ether_delmulti(sa, &sc->sc_ec); 1887 error = EIO; 1888 } 1889 } 1890 break; 1891 1892 case SIOCDELMULTI: 1893 sa = ifreq_getaddr(SIOCDELMULTI, ifr); 1894 if (ether_delmulti(sa, &sc->sc_ec) == ENETRESET) { 1895 error = ether_multiaddr(sa, addrlo, addrhi); 1896 if (error != 0) 1897 return error; 1898 1899 error = ixl_del_multi(sc, addrlo, addrhi); 1900 } 1901 break; 1902 1903 default: 1904 s = splnet(); 1905 error = ether_ioctl(ifp, cmd, data); 1906 splx(s); 1907 } 1908 1909 if (error == ENETRESET) 1910 error = ixl_iff(sc); 1911 1912 return error; 1913 } 1914 1915 static enum i40e_mac_type 1916 ixl_mactype(pci_product_id_t id) 1917 { 1918 1919 switch (id) { 1920 case PCI_PRODUCT_INTEL_XL710_SFP: 1921 case PCI_PRODUCT_INTEL_XL710_KX_B: 1922 case PCI_PRODUCT_INTEL_XL710_KX_C: 1923 case PCI_PRODUCT_INTEL_XL710_QSFP_A: 1924 case PCI_PRODUCT_INTEL_XL710_QSFP_B: 1925 case PCI_PRODUCT_INTEL_XL710_QSFP_C: 1926 case PCI_PRODUCT_INTEL_X710_10G_T: 1927 case PCI_PRODUCT_INTEL_XL710_20G_BP_1: 1928 case PCI_PRODUCT_INTEL_XL710_20G_BP_2: 1929 case PCI_PRODUCT_INTEL_X710_T4_10G: 1930 case PCI_PRODUCT_INTEL_XXV710_25G_BP: 1931 case PCI_PRODUCT_INTEL_XXV710_25G_SFP28: 1932 return I40E_MAC_XL710; 1933 1934 case PCI_PRODUCT_INTEL_X722_KX: 1935 case PCI_PRODUCT_INTEL_X722_QSFP: 1936 case PCI_PRODUCT_INTEL_X722_SFP: 1937 case PCI_PRODUCT_INTEL_X722_1G_BASET: 1938 case PCI_PRODUCT_INTEL_X722_10G_BASET: 1939 case PCI_PRODUCT_INTEL_X722_I_SFP: 1940 return I40E_MAC_X722; 1941 } 1942 1943 return I40E_MAC_GENERIC; 1944 } 1945 1946 static inline void * 1947 ixl_hmc_kva(struct ixl_softc *sc, enum ixl_hmc_types type, unsigned int i) 1948 { 1949 uint8_t *kva = IXL_DMA_KVA(&sc->sc_hmc_pd); 1950 struct ixl_hmc_entry *e = &sc->sc_hmc_entries[type]; 1951 1952 if (i >= e->hmc_count) 1953 return NULL; 1954 1955 kva += e->hmc_base; 1956 kva += i * e->hmc_size; 1957 1958 return kva; 1959 } 1960 1961 static inline size_t 1962 ixl_hmc_len(struct ixl_softc *sc, enum ixl_hmc_types type) 1963 { 1964 struct ixl_hmc_entry *e = &sc->sc_hmc_entries[type]; 1965 1966 return e->hmc_size; 1967 } 1968 1969 static void 1970 ixl_enable_queue_intr(struct ixl_softc *sc, struct ixl_queue_pair *qp) 1971 { 1972 struct ixl_rx_ring *rxr = qp->qp_rxr; 1973 1974 ixl_wr(sc, I40E_PFINT_DYN_CTLN(rxr->rxr_qid), 1975 I40E_PFINT_DYN_CTLN_INTENA_MASK | 1976 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | 1977 (IXL_NOITR << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT)); 1978 ixl_flush(sc); 1979 } 1980 1981 static void 1982 ixl_disable_queue_intr(struct ixl_softc *sc, struct ixl_queue_pair *qp) 1983 { 1984 struct ixl_rx_ring *rxr = qp->qp_rxr; 1985 1986 ixl_wr(sc, I40E_PFINT_DYN_CTLN(rxr->rxr_qid), 1987 (IXL_NOITR << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT)); 1988 ixl_flush(sc); 1989 } 1990 1991 static void 1992 ixl_enable_other_intr(struct ixl_softc *sc) 1993 { 1994 1995 ixl_wr(sc, I40E_PFINT_DYN_CTL0, 1996 I40E_PFINT_DYN_CTL0_INTENA_MASK | 1997 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK | 1998 (IXL_NOITR << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT)); 1999 ixl_flush(sc); 2000 } 2001 2002 static void 2003 ixl_disable_other_intr(struct ixl_softc *sc) 2004 { 2005 2006 ixl_wr(sc, I40E_PFINT_DYN_CTL0, 2007 (IXL_NOITR << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT)); 2008 ixl_flush(sc); 2009 } 2010 2011 static int 2012 ixl_reinit(struct ixl_softc *sc) 2013 { 2014 struct ixl_rx_ring *rxr; 2015 struct ixl_tx_ring *txr; 2016 unsigned int i; 2017 uint32_t reg; 2018 2019 KASSERT(mutex_owned(&sc->sc_cfg_lock)); 2020 2021 if (ixl_get_vsi(sc) != 0) 2022 return EIO; 2023 2024 if (ixl_set_vsi(sc) != 0) 2025 return EIO; 2026 2027 for (i = 0; i < sc->sc_nqueue_pairs; i++) { 2028 txr = sc->sc_qps[i].qp_txr; 2029 rxr = sc->sc_qps[i].qp_rxr; 2030 2031 ixl_txr_config(sc, txr); 2032 ixl_rxr_config(sc, rxr); 2033 } 2034 2035 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd), 2036 0, IXL_DMA_LEN(&sc->sc_hmc_pd), BUS_DMASYNC_PREWRITE); 2037 2038 for (i = 0; i < sc->sc_nqueue_pairs; i++) { 2039 txr = sc->sc_qps[i].qp_txr; 2040 rxr = sc->sc_qps[i].qp_rxr; 2041 2042 ixl_wr(sc, I40E_QTX_CTL(i), I40E_QTX_CTL_PF_QUEUE | 2043 (sc->sc_pf_id << I40E_QTX_CTL_PF_INDX_SHIFT)); 2044 ixl_flush(sc); 2045 2046 ixl_wr(sc, txr->txr_tail, txr->txr_prod); 2047 ixl_wr(sc, rxr->rxr_tail, rxr->rxr_prod); 2048 2049 /* ixl_rxfill() needs lock held */ 2050 mutex_enter(&rxr->rxr_lock); 2051 ixl_rxfill(sc, rxr); 2052 mutex_exit(&rxr->rxr_lock); 2053 2054 reg = ixl_rd(sc, I40E_QRX_ENA(i)); 2055 SET(reg, I40E_QRX_ENA_QENA_REQ_MASK); 2056 ixl_wr(sc, I40E_QRX_ENA(i), reg); 2057 if (ixl_rxr_enabled(sc, rxr) != 0) 2058 goto stop; 2059 2060 ixl_txr_qdis(sc, txr, 1); 2061 2062 reg = ixl_rd(sc, I40E_QTX_ENA(i)); 2063 SET(reg, I40E_QTX_ENA_QENA_REQ_MASK); 2064 ixl_wr(sc, I40E_QTX_ENA(i), reg); 2065 2066 if (ixl_txr_enabled(sc, txr) != 0) 2067 goto stop; 2068 } 2069 2070 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd), 2071 0, IXL_DMA_LEN(&sc->sc_hmc_pd), BUS_DMASYNC_POSTWRITE); 2072 2073 return 0; 2074 2075 stop: 2076 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd), 2077 0, IXL_DMA_LEN(&sc->sc_hmc_pd), BUS_DMASYNC_POSTWRITE); 2078 2079 return ETIMEDOUT; 2080 } 2081 2082 static int 2083 ixl_init_locked(struct ixl_softc *sc) 2084 { 2085 struct ifnet *ifp = &sc->sc_ec.ec_if; 2086 unsigned int i; 2087 int error, eccap_change; 2088 2089 KASSERT(mutex_owned(&sc->sc_cfg_lock)); 2090 2091 if (ISSET(ifp->if_flags, IFF_RUNNING)) 2092 ixl_stop_locked(sc); 2093 2094 if (sc->sc_dead) { 2095 return ENXIO; 2096 } 2097 2098 eccap_change = sc->sc_ec.ec_capenable ^ sc->sc_cur_ec_capenable; 2099 if (ISSET(eccap_change, ETHERCAP_VLAN_HWTAGGING)) 2100 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWTAGGING; 2101 2102 if (ISSET(eccap_change, ETHERCAP_VLAN_HWFILTER)) { 2103 if (ixl_update_macvlan(sc) == 0) { 2104 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWFILTER; 2105 } else { 2106 CLR(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER); 2107 CLR(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER); 2108 } 2109 } 2110 2111 if (sc->sc_intrtype != PCI_INTR_TYPE_MSIX) 2112 sc->sc_nqueue_pairs = 1; 2113 else 2114 sc->sc_nqueue_pairs = sc->sc_nqueue_pairs_max; 2115 2116 error = ixl_reinit(sc); 2117 if (error) { 2118 ixl_stop_locked(sc); 2119 return error; 2120 } 2121 2122 SET(ifp->if_flags, IFF_RUNNING); 2123 CLR(ifp->if_flags, IFF_OACTIVE); 2124 2125 ixl_config_rss(sc); 2126 ixl_config_queue_intr(sc); 2127 2128 for (i = 0; i < sc->sc_nqueue_pairs; i++) { 2129 ixl_enable_queue_intr(sc, &sc->sc_qps[i]); 2130 } 2131 2132 error = ixl_iff(sc); 2133 if (error) { 2134 ixl_stop_locked(sc); 2135 return error; 2136 } 2137 2138 callout_schedule(&sc->sc_stats_callout, mstohz(sc->sc_stats_intval)); 2139 2140 return 0; 2141 } 2142 2143 static int 2144 ixl_init(struct ifnet *ifp) 2145 { 2146 struct ixl_softc *sc = ifp->if_softc; 2147 int error; 2148 2149 mutex_enter(&sc->sc_cfg_lock); 2150 error = ixl_init_locked(sc); 2151 mutex_exit(&sc->sc_cfg_lock); 2152 2153 if (error == 0) 2154 (void)ixl_get_link_status(sc); 2155 2156 return error; 2157 } 2158 2159 static int 2160 ixl_iff(struct ixl_softc *sc) 2161 { 2162 struct ifnet *ifp = &sc->sc_ec.ec_if; 2163 struct ixl_atq iatq; 2164 struct ixl_aq_desc *iaq; 2165 struct ixl_aq_vsi_promisc_param *param; 2166 uint16_t flag_add, flag_del; 2167 int error; 2168 2169 if (!ISSET(ifp->if_flags, IFF_RUNNING)) 2170 return 0; 2171 2172 memset(&iatq, 0, sizeof(iatq)); 2173 2174 iaq = &iatq.iatq_desc; 2175 iaq->iaq_opcode = htole16(IXL_AQ_OP_SET_VSI_PROMISC); 2176 2177 param = (struct ixl_aq_vsi_promisc_param *)&iaq->iaq_param; 2178 param->flags = htole16(0); 2179 2180 if (!ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER) 2181 || ISSET(ifp->if_flags, IFF_PROMISC)) { 2182 param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_BCAST | 2183 IXL_AQ_VSI_PROMISC_FLAG_VLAN); 2184 } 2185 2186 if (ISSET(ifp->if_flags, IFF_PROMISC)) { 2187 param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_UCAST | 2188 IXL_AQ_VSI_PROMISC_FLAG_MCAST); 2189 } else if (ISSET(ifp->if_flags, IFF_ALLMULTI)) { 2190 param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_MCAST); 2191 } 2192 param->valid_flags = htole16(IXL_AQ_VSI_PROMISC_FLAG_UCAST | 2193 IXL_AQ_VSI_PROMISC_FLAG_MCAST | IXL_AQ_VSI_PROMISC_FLAG_BCAST | 2194 IXL_AQ_VSI_PROMISC_FLAG_VLAN); 2195 param->seid = sc->sc_seid; 2196 2197 error = ixl_atq_exec(sc, &iatq); 2198 if (error) 2199 return error; 2200 2201 if (iaq->iaq_retval != htole16(IXL_AQ_RC_OK)) 2202 return EIO; 2203 2204 if (memcmp(sc->sc_enaddr, CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN) != 0) { 2205 if (ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER)) { 2206 flag_add = IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH; 2207 flag_del = IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH; 2208 } else { 2209 flag_add = IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN; 2210 flag_del = IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN; 2211 } 2212 2213 ixl_remove_macvlan(sc, sc->sc_enaddr, 0, flag_del); 2214 2215 memcpy(sc->sc_enaddr, CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN); 2216 ixl_add_macvlan(sc, sc->sc_enaddr, 0, flag_add); 2217 } 2218 return 0; 2219 } 2220 2221 static void 2222 ixl_stop_rendezvous(struct ixl_softc *sc) 2223 { 2224 struct ixl_tx_ring *txr; 2225 struct ixl_rx_ring *rxr; 2226 unsigned int i; 2227 2228 for (i = 0; i < sc->sc_nqueue_pairs; i++) { 2229 txr = sc->sc_qps[i].qp_txr; 2230 rxr = sc->sc_qps[i].qp_rxr; 2231 2232 mutex_enter(&txr->txr_lock); 2233 mutex_exit(&txr->txr_lock); 2234 2235 mutex_enter(&rxr->rxr_lock); 2236 mutex_exit(&rxr->rxr_lock); 2237 2238 sc->sc_qps[i].qp_workqueue = false; 2239 workqueue_wait(sc->sc_workq_txrx, 2240 &sc->sc_qps[i].qp_work); 2241 } 2242 } 2243 2244 static void 2245 ixl_stop_locked(struct ixl_softc *sc) 2246 { 2247 struct ifnet *ifp = &sc->sc_ec.ec_if; 2248 struct ixl_rx_ring *rxr; 2249 struct ixl_tx_ring *txr; 2250 unsigned int i; 2251 uint32_t reg; 2252 2253 KASSERT(mutex_owned(&sc->sc_cfg_lock)); 2254 2255 CLR(ifp->if_flags, IFF_RUNNING | IFF_OACTIVE); 2256 callout_stop(&sc->sc_stats_callout); 2257 2258 for (i = 0; i < sc->sc_nqueue_pairs; i++) { 2259 txr = sc->sc_qps[i].qp_txr; 2260 rxr = sc->sc_qps[i].qp_rxr; 2261 2262 ixl_disable_queue_intr(sc, &sc->sc_qps[i]); 2263 2264 mutex_enter(&txr->txr_lock); 2265 ixl_txr_qdis(sc, txr, 0); 2266 mutex_exit(&txr->txr_lock); 2267 } 2268 2269 /* XXX wait at least 400 usec for all tx queues in one go */ 2270 ixl_flush(sc); 2271 DELAY(500); 2272 2273 for (i = 0; i < sc->sc_nqueue_pairs; i++) { 2274 txr = sc->sc_qps[i].qp_txr; 2275 rxr = sc->sc_qps[i].qp_rxr; 2276 2277 mutex_enter(&txr->txr_lock); 2278 reg = ixl_rd(sc, I40E_QTX_ENA(i)); 2279 CLR(reg, I40E_QTX_ENA_QENA_REQ_MASK); 2280 ixl_wr(sc, I40E_QTX_ENA(i), reg); 2281 mutex_exit(&txr->txr_lock); 2282 2283 mutex_enter(&rxr->rxr_lock); 2284 reg = ixl_rd(sc, I40E_QRX_ENA(i)); 2285 CLR(reg, I40E_QRX_ENA_QENA_REQ_MASK); 2286 ixl_wr(sc, I40E_QRX_ENA(i), reg); 2287 mutex_exit(&rxr->rxr_lock); 2288 } 2289 2290 /* XXX short wait for all queue disables to settle */ 2291 ixl_flush(sc); 2292 DELAY(50); 2293 2294 for (i = 0; i < sc->sc_nqueue_pairs; i++) { 2295 txr = sc->sc_qps[i].qp_txr; 2296 rxr = sc->sc_qps[i].qp_rxr; 2297 2298 mutex_enter(&txr->txr_lock); 2299 if (ixl_txr_disabled(sc, txr) != 0) { 2300 mutex_exit(&txr->txr_lock); 2301 goto die; 2302 } 2303 mutex_exit(&txr->txr_lock); 2304 2305 mutex_enter(&rxr->rxr_lock); 2306 if (ixl_rxr_disabled(sc, rxr) != 0) { 2307 mutex_exit(&rxr->rxr_lock); 2308 goto die; 2309 } 2310 mutex_exit(&rxr->rxr_lock); 2311 } 2312 2313 ixl_stop_rendezvous(sc); 2314 2315 for (i = 0; i < sc->sc_nqueue_pairs; i++) { 2316 txr = sc->sc_qps[i].qp_txr; 2317 rxr = sc->sc_qps[i].qp_rxr; 2318 2319 mutex_enter(&txr->txr_lock); 2320 ixl_txr_unconfig(sc, txr); 2321 mutex_exit(&txr->txr_lock); 2322 2323 mutex_enter(&rxr->rxr_lock); 2324 ixl_rxr_unconfig(sc, rxr); 2325 mutex_exit(&rxr->rxr_lock); 2326 2327 ixl_txr_clean(sc, txr); 2328 ixl_rxr_clean(sc, rxr); 2329 } 2330 2331 return; 2332 die: 2333 sc->sc_dead = true; 2334 log(LOG_CRIT, "%s: failed to shut down rings", 2335 device_xname(sc->sc_dev)); 2336 return; 2337 } 2338 2339 static void 2340 ixl_stop(struct ifnet *ifp, int disable) 2341 { 2342 struct ixl_softc *sc = ifp->if_softc; 2343 2344 mutex_enter(&sc->sc_cfg_lock); 2345 ixl_stop_locked(sc); 2346 mutex_exit(&sc->sc_cfg_lock); 2347 } 2348 2349 static int 2350 ixl_queue_pairs_alloc(struct ixl_softc *sc) 2351 { 2352 struct ixl_queue_pair *qp; 2353 unsigned int i; 2354 size_t sz; 2355 2356 sz = sizeof(sc->sc_qps[0]) * sc->sc_nqueue_pairs_max; 2357 sc->sc_qps = kmem_zalloc(sz, KM_SLEEP); 2358 2359 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) { 2360 qp = &sc->sc_qps[i]; 2361 2362 qp->qp_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE, 2363 ixl_handle_queue, qp); 2364 if (qp->qp_si == NULL) 2365 goto free; 2366 2367 qp->qp_txr = ixl_txr_alloc(sc, i); 2368 if (qp->qp_txr == NULL) 2369 goto free; 2370 2371 qp->qp_rxr = ixl_rxr_alloc(sc, i); 2372 if (qp->qp_rxr == NULL) 2373 goto free; 2374 2375 qp->qp_sc = sc; 2376 snprintf(qp->qp_name, sizeof(qp->qp_name), 2377 "%s-TXRX%d", device_xname(sc->sc_dev), i); 2378 } 2379 2380 return 0; 2381 free: 2382 if (sc->sc_qps != NULL) { 2383 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) { 2384 qp = &sc->sc_qps[i]; 2385 2386 if (qp->qp_txr != NULL) 2387 ixl_txr_free(sc, qp->qp_txr); 2388 if (qp->qp_rxr != NULL) 2389 ixl_rxr_free(sc, qp->qp_rxr); 2390 if (qp->qp_si != NULL) 2391 softint_disestablish(qp->qp_si); 2392 } 2393 2394 sz = sizeof(sc->sc_qps[0]) * sc->sc_nqueue_pairs_max; 2395 kmem_free(sc->sc_qps, sz); 2396 sc->sc_qps = NULL; 2397 } 2398 2399 return -1; 2400 } 2401 2402 static void 2403 ixl_queue_pairs_free(struct ixl_softc *sc) 2404 { 2405 struct ixl_queue_pair *qp; 2406 unsigned int i; 2407 size_t sz; 2408 2409 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) { 2410 qp = &sc->sc_qps[i]; 2411 ixl_txr_free(sc, qp->qp_txr); 2412 ixl_rxr_free(sc, qp->qp_rxr); 2413 softint_disestablish(qp->qp_si); 2414 } 2415 2416 sz = sizeof(sc->sc_qps[0]) * sc->sc_nqueue_pairs_max; 2417 kmem_free(sc->sc_qps, sz); 2418 sc->sc_qps = NULL; 2419 } 2420 2421 static struct ixl_tx_ring * 2422 ixl_txr_alloc(struct ixl_softc *sc, unsigned int qid) 2423 { 2424 struct ixl_tx_ring *txr = NULL; 2425 struct ixl_tx_map *maps = NULL, *txm; 2426 unsigned int i; 2427 2428 txr = kmem_zalloc(sizeof(*txr), KM_SLEEP); 2429 maps = kmem_zalloc(sizeof(maps[0]) * sc->sc_tx_ring_ndescs, 2430 KM_SLEEP); 2431 2432 if (ixl_dmamem_alloc(sc, &txr->txr_mem, 2433 sizeof(struct ixl_tx_desc) * sc->sc_tx_ring_ndescs, 2434 IXL_TX_QUEUE_ALIGN) != 0) 2435 goto free; 2436 2437 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) { 2438 txm = &maps[i]; 2439 2440 if (bus_dmamap_create(sc->sc_dmat, IXL_TX_PKT_MAXSIZE, 2441 IXL_TX_PKT_DESCS, IXL_TX_PKT_MAXSIZE, 0, 2442 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &txm->txm_map) != 0) 2443 goto uncreate; 2444 2445 txm->txm_eop = -1; 2446 txm->txm_m = NULL; 2447 } 2448 2449 txr->txr_cons = txr->txr_prod = 0; 2450 txr->txr_maps = maps; 2451 2452 txr->txr_intrq = pcq_create(sc->sc_tx_ring_ndescs, KM_NOSLEEP); 2453 if (txr->txr_intrq == NULL) 2454 goto uncreate; 2455 2456 txr->txr_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE, 2457 ixl_deferred_transmit, txr); 2458 if (txr->txr_si == NULL) 2459 goto destroy_pcq; 2460 2461 txr->txr_tail = I40E_QTX_TAIL(qid); 2462 txr->txr_qid = qid; 2463 txr->txr_sc = sc; 2464 mutex_init(&txr->txr_lock, MUTEX_DEFAULT, IPL_NET); 2465 2466 return txr; 2467 2468 destroy_pcq: 2469 pcq_destroy(txr->txr_intrq); 2470 uncreate: 2471 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) { 2472 txm = &maps[i]; 2473 2474 if (txm->txm_map == NULL) 2475 continue; 2476 2477 bus_dmamap_destroy(sc->sc_dmat, txm->txm_map); 2478 } 2479 2480 ixl_dmamem_free(sc, &txr->txr_mem); 2481 free: 2482 kmem_free(maps, sizeof(maps[0]) * sc->sc_tx_ring_ndescs); 2483 kmem_free(txr, sizeof(*txr)); 2484 2485 return NULL; 2486 } 2487 2488 static void 2489 ixl_txr_qdis(struct ixl_softc *sc, struct ixl_tx_ring *txr, int enable) 2490 { 2491 unsigned int qid; 2492 bus_size_t reg; 2493 uint32_t r; 2494 2495 qid = txr->txr_qid + sc->sc_base_queue; 2496 reg = I40E_GLLAN_TXPRE_QDIS(qid / 128); 2497 qid %= 128; 2498 2499 r = ixl_rd(sc, reg); 2500 CLR(r, I40E_GLLAN_TXPRE_QDIS_QINDX_MASK); 2501 SET(r, qid << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT); 2502 SET(r, enable ? I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK : 2503 I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK); 2504 ixl_wr(sc, reg, r); 2505 } 2506 2507 static void 2508 ixl_txr_config(struct ixl_softc *sc, struct ixl_tx_ring *txr) 2509 { 2510 struct ixl_hmc_txq txq; 2511 struct ixl_aq_vsi_data *data = IXL_DMA_KVA(&sc->sc_scratch); 2512 void *hmc; 2513 2514 memset(&txq, 0, sizeof(txq)); 2515 txq.head = htole16(txr->txr_cons); 2516 txq.new_context = 1; 2517 txq.base = htole64(IXL_DMA_DVA(&txr->txr_mem) / IXL_HMC_TXQ_BASE_UNIT); 2518 txq.head_wb_ena = IXL_HMC_TXQ_DESC_WB; 2519 txq.qlen = htole16(sc->sc_tx_ring_ndescs); 2520 txq.tphrdesc_ena = 0; 2521 txq.tphrpacket_ena = 0; 2522 txq.tphwdesc_ena = 0; 2523 txq.rdylist = data->qs_handle[0]; 2524 2525 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_TX, txr->txr_qid); 2526 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_TX)); 2527 ixl_hmc_pack(hmc, &txq, ixl_hmc_pack_txq, 2528 __arraycount(ixl_hmc_pack_txq)); 2529 } 2530 2531 static void 2532 ixl_txr_unconfig(struct ixl_softc *sc, struct ixl_tx_ring *txr) 2533 { 2534 void *hmc; 2535 2536 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_TX, txr->txr_qid); 2537 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_TX)); 2538 txr->txr_cons = txr->txr_prod = 0; 2539 } 2540 2541 static void 2542 ixl_txr_clean(struct ixl_softc *sc, struct ixl_tx_ring *txr) 2543 { 2544 struct ixl_tx_map *maps, *txm; 2545 bus_dmamap_t map; 2546 unsigned int i; 2547 2548 maps = txr->txr_maps; 2549 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) { 2550 txm = &maps[i]; 2551 2552 if (txm->txm_m == NULL) 2553 continue; 2554 2555 map = txm->txm_map; 2556 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 2557 BUS_DMASYNC_POSTWRITE); 2558 bus_dmamap_unload(sc->sc_dmat, map); 2559 2560 m_freem(txm->txm_m); 2561 txm->txm_m = NULL; 2562 } 2563 } 2564 2565 static int 2566 ixl_txr_enabled(struct ixl_softc *sc, struct ixl_tx_ring *txr) 2567 { 2568 bus_size_t ena = I40E_QTX_ENA(txr->txr_qid); 2569 uint32_t reg; 2570 int i; 2571 2572 for (i = 0; i < 10; i++) { 2573 reg = ixl_rd(sc, ena); 2574 if (ISSET(reg, I40E_QTX_ENA_QENA_STAT_MASK)) 2575 return 0; 2576 2577 delaymsec(10); 2578 } 2579 2580 return ETIMEDOUT; 2581 } 2582 2583 static int 2584 ixl_txr_disabled(struct ixl_softc *sc, struct ixl_tx_ring *txr) 2585 { 2586 bus_size_t ena = I40E_QTX_ENA(txr->txr_qid); 2587 uint32_t reg; 2588 int i; 2589 2590 KASSERT(mutex_owned(&txr->txr_lock)); 2591 2592 for (i = 0; i < 10; i++) { 2593 reg = ixl_rd(sc, ena); 2594 if (ISSET(reg, I40E_QTX_ENA_QENA_STAT_MASK) == 0) 2595 return 0; 2596 2597 delaymsec(10); 2598 } 2599 2600 return ETIMEDOUT; 2601 } 2602 2603 static void 2604 ixl_txr_free(struct ixl_softc *sc, struct ixl_tx_ring *txr) 2605 { 2606 struct ixl_tx_map *maps, *txm; 2607 struct mbuf *m; 2608 unsigned int i; 2609 2610 softint_disestablish(txr->txr_si); 2611 while ((m = pcq_get(txr->txr_intrq)) != NULL) 2612 m_freem(m); 2613 pcq_destroy(txr->txr_intrq); 2614 2615 maps = txr->txr_maps; 2616 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) { 2617 txm = &maps[i]; 2618 2619 bus_dmamap_destroy(sc->sc_dmat, txm->txm_map); 2620 } 2621 2622 ixl_dmamem_free(sc, &txr->txr_mem); 2623 mutex_destroy(&txr->txr_lock); 2624 kmem_free(maps, sizeof(maps[0]) * sc->sc_tx_ring_ndescs); 2625 kmem_free(txr, sizeof(*txr)); 2626 } 2627 2628 static inline int 2629 ixl_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf **m0, 2630 struct ixl_tx_ring *txr) 2631 { 2632 struct mbuf *m; 2633 int error; 2634 2635 KASSERT(mutex_owned(&txr->txr_lock)); 2636 2637 m = *m0; 2638 2639 error = bus_dmamap_load_mbuf(dmat, map, m, 2640 BUS_DMA_STREAMING | BUS_DMA_WRITE | BUS_DMA_NOWAIT); 2641 if (error != EFBIG) 2642 return error; 2643 2644 m = m_defrag(m, M_DONTWAIT); 2645 if (m != NULL) { 2646 *m0 = m; 2647 txr->txr_defragged.ev_count++; 2648 2649 error = bus_dmamap_load_mbuf(dmat, map, m, 2650 BUS_DMA_STREAMING | BUS_DMA_WRITE | BUS_DMA_NOWAIT); 2651 } else { 2652 txr->txr_defrag_failed.ev_count++; 2653 error = ENOBUFS; 2654 } 2655 2656 return error; 2657 } 2658 2659 static inline int 2660 ixl_tx_setup_offloads(struct mbuf *m, uint64_t *cmd_txd) 2661 { 2662 struct ether_header *eh; 2663 size_t len; 2664 uint64_t cmd; 2665 2666 cmd = 0; 2667 2668 eh = mtod(m, struct ether_header *); 2669 switch (htons(eh->ether_type)) { 2670 case ETHERTYPE_IP: 2671 case ETHERTYPE_IPV6: 2672 len = ETHER_HDR_LEN; 2673 break; 2674 case ETHERTYPE_VLAN: 2675 len = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 2676 break; 2677 default: 2678 len = 0; 2679 } 2680 cmd |= ((len >> 1) << IXL_TX_DESC_MACLEN_SHIFT); 2681 2682 if (m->m_pkthdr.csum_flags & 2683 (M_CSUM_TSOv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) { 2684 cmd |= IXL_TX_DESC_CMD_IIPT_IPV4; 2685 } 2686 if (m->m_pkthdr.csum_flags & M_CSUM_IPv4) { 2687 cmd |= IXL_TX_DESC_CMD_IIPT_IPV4_CSUM; 2688 } 2689 2690 if (m->m_pkthdr.csum_flags & 2691 (M_CSUM_TSOv6 | M_CSUM_TCPv6 | M_CSUM_UDPv6)) { 2692 cmd |= IXL_TX_DESC_CMD_IIPT_IPV6; 2693 } 2694 2695 switch (cmd & IXL_TX_DESC_CMD_IIPT_MASK) { 2696 case IXL_TX_DESC_CMD_IIPT_IPV4: 2697 case IXL_TX_DESC_CMD_IIPT_IPV4_CSUM: 2698 len = M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data); 2699 break; 2700 case IXL_TX_DESC_CMD_IIPT_IPV6: 2701 len = M_CSUM_DATA_IPv6_IPHL(m->m_pkthdr.csum_data); 2702 break; 2703 default: 2704 len = 0; 2705 } 2706 cmd |= ((len >> 2) << IXL_TX_DESC_IPLEN_SHIFT); 2707 2708 if (m->m_pkthdr.csum_flags & 2709 (M_CSUM_TSOv4 | M_CSUM_TSOv6 | M_CSUM_TCPv4 | M_CSUM_TCPv6)) { 2710 len = sizeof(struct tcphdr); 2711 cmd |= IXL_TX_DESC_CMD_L4T_EOFT_TCP; 2712 } else if (m->m_pkthdr.csum_flags & (M_CSUM_UDPv4 | M_CSUM_UDPv6)) { 2713 len = sizeof(struct udphdr); 2714 cmd |= IXL_TX_DESC_CMD_L4T_EOFT_UDP; 2715 } else { 2716 len = 0; 2717 } 2718 cmd |= ((len >> 2) << IXL_TX_DESC_L4LEN_SHIFT); 2719 2720 *cmd_txd |= cmd; 2721 return 0; 2722 } 2723 2724 static void 2725 ixl_tx_common_locked(struct ifnet *ifp, struct ixl_tx_ring *txr, 2726 bool is_transmit) 2727 { 2728 struct ixl_softc *sc = ifp->if_softc; 2729 struct ixl_tx_desc *ring, *txd; 2730 struct ixl_tx_map *txm; 2731 bus_dmamap_t map; 2732 struct mbuf *m; 2733 uint64_t cmd, cmd_txd; 2734 unsigned int prod, free, last, i; 2735 unsigned int mask; 2736 int post = 0; 2737 2738 KASSERT(mutex_owned(&txr->txr_lock)); 2739 2740 if (!ISSET(ifp->if_flags, IFF_RUNNING) 2741 || (!is_transmit && ISSET(ifp->if_flags, IFF_OACTIVE))) { 2742 if (!is_transmit) 2743 IFQ_PURGE(&ifp->if_snd); 2744 return; 2745 } 2746 2747 prod = txr->txr_prod; 2748 free = txr->txr_cons; 2749 if (free <= prod) 2750 free += sc->sc_tx_ring_ndescs; 2751 free -= prod; 2752 2753 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem), 2754 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTWRITE); 2755 2756 ring = IXL_DMA_KVA(&txr->txr_mem); 2757 mask = sc->sc_tx_ring_ndescs - 1; 2758 last = prod; 2759 cmd = 0; 2760 txd = NULL; 2761 2762 for (;;) { 2763 if (free <= IXL_TX_PKT_DESCS) { 2764 if (!is_transmit) 2765 SET(ifp->if_flags, IFF_OACTIVE); 2766 break; 2767 } 2768 2769 if (is_transmit) 2770 m = pcq_get(txr->txr_intrq); 2771 else 2772 IFQ_DEQUEUE(&ifp->if_snd, m); 2773 2774 if (m == NULL) 2775 break; 2776 2777 txm = &txr->txr_maps[prod]; 2778 map = txm->txm_map; 2779 2780 if (ixl_load_mbuf(sc->sc_dmat, map, &m, txr) != 0) { 2781 if_statinc(ifp, if_oerrors); 2782 m_freem(m); 2783 continue; 2784 } 2785 2786 cmd_txd = 0; 2787 if (m->m_pkthdr.csum_flags & IXL_CSUM_ALL_OFFLOAD) { 2788 ixl_tx_setup_offloads(m, &cmd_txd); 2789 } 2790 2791 if (vlan_has_tag(m)) { 2792 cmd_txd |= (uint64_t)vlan_get_tag(m) << 2793 IXL_TX_DESC_L2TAG1_SHIFT; 2794 cmd_txd |= IXL_TX_DESC_CMD_IL2TAG1; 2795 } 2796 2797 bus_dmamap_sync(sc->sc_dmat, map, 0, 2798 map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2799 2800 for (i = 0; i < (unsigned int)map->dm_nsegs; i++) { 2801 txd = &ring[prod]; 2802 2803 cmd = (uint64_t)map->dm_segs[i].ds_len << 2804 IXL_TX_DESC_BSIZE_SHIFT; 2805 cmd |= IXL_TX_DESC_DTYPE_DATA | IXL_TX_DESC_CMD_ICRC; 2806 cmd |= cmd_txd; 2807 2808 txd->addr = htole64(map->dm_segs[i].ds_addr); 2809 txd->cmd = htole64(cmd); 2810 2811 last = prod; 2812 2813 prod++; 2814 prod &= mask; 2815 } 2816 cmd |= IXL_TX_DESC_CMD_EOP | IXL_TX_DESC_CMD_RS; 2817 txd->cmd = htole64(cmd); 2818 2819 txm->txm_m = m; 2820 txm->txm_eop = last; 2821 2822 bpf_mtap(ifp, m, BPF_D_OUT); 2823 2824 free -= i; 2825 post = 1; 2826 } 2827 2828 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem), 2829 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREWRITE); 2830 2831 if (post) { 2832 txr->txr_prod = prod; 2833 ixl_wr(sc, txr->txr_tail, prod); 2834 } 2835 } 2836 2837 static int 2838 ixl_txeof(struct ixl_softc *sc, struct ixl_tx_ring *txr, u_int txlimit) 2839 { 2840 struct ifnet *ifp = &sc->sc_ec.ec_if; 2841 struct ixl_tx_desc *ring, *txd; 2842 struct ixl_tx_map *txm; 2843 struct mbuf *m; 2844 bus_dmamap_t map; 2845 unsigned int cons, prod, last; 2846 unsigned int mask; 2847 uint64_t dtype; 2848 int done = 0, more = 0; 2849 2850 KASSERT(mutex_owned(&txr->txr_lock)); 2851 2852 prod = txr->txr_prod; 2853 cons = txr->txr_cons; 2854 2855 if (cons == prod) 2856 return 0; 2857 2858 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem), 2859 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTREAD); 2860 2861 ring = IXL_DMA_KVA(&txr->txr_mem); 2862 mask = sc->sc_tx_ring_ndescs - 1; 2863 2864 net_stat_ref_t nsr = IF_STAT_GETREF(ifp); 2865 2866 do { 2867 if (txlimit-- <= 0) { 2868 more = 1; 2869 break; 2870 } 2871 2872 txm = &txr->txr_maps[cons]; 2873 last = txm->txm_eop; 2874 txd = &ring[last]; 2875 2876 dtype = txd->cmd & htole64(IXL_TX_DESC_DTYPE_MASK); 2877 if (dtype != htole64(IXL_TX_DESC_DTYPE_DONE)) 2878 break; 2879 2880 map = txm->txm_map; 2881 2882 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 2883 BUS_DMASYNC_POSTWRITE); 2884 bus_dmamap_unload(sc->sc_dmat, map); 2885 2886 m = txm->txm_m; 2887 if (m != NULL) { 2888 if_statinc_ref(nsr, if_opackets); 2889 if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len); 2890 if (ISSET(m->m_flags, M_MCAST)) 2891 if_statinc_ref(nsr, if_omcasts); 2892 m_freem(m); 2893 } 2894 2895 txm->txm_m = NULL; 2896 txm->txm_eop = -1; 2897 2898 cons = last + 1; 2899 cons &= mask; 2900 done = 1; 2901 } while (cons != prod); 2902 2903 IF_STAT_PUTREF(ifp); 2904 2905 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem), 2906 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREREAD); 2907 2908 txr->txr_cons = cons; 2909 2910 if (done) { 2911 softint_schedule(txr->txr_si); 2912 if (txr->txr_qid == 0) { 2913 CLR(ifp->if_flags, IFF_OACTIVE); 2914 if_schedule_deferred_start(ifp); 2915 } 2916 } 2917 2918 return more; 2919 } 2920 2921 static void 2922 ixl_start(struct ifnet *ifp) 2923 { 2924 struct ixl_softc *sc; 2925 struct ixl_tx_ring *txr; 2926 2927 sc = ifp->if_softc; 2928 txr = sc->sc_qps[0].qp_txr; 2929 2930 mutex_enter(&txr->txr_lock); 2931 ixl_tx_common_locked(ifp, txr, false); 2932 mutex_exit(&txr->txr_lock); 2933 } 2934 2935 static inline unsigned int 2936 ixl_select_txqueue(struct ixl_softc *sc, struct mbuf *m) 2937 { 2938 u_int cpuid; 2939 2940 cpuid = cpu_index(curcpu()); 2941 2942 return (unsigned int)(cpuid % sc->sc_nqueue_pairs); 2943 } 2944 2945 static int 2946 ixl_transmit(struct ifnet *ifp, struct mbuf *m) 2947 { 2948 struct ixl_softc *sc; 2949 struct ixl_tx_ring *txr; 2950 unsigned int qid; 2951 2952 sc = ifp->if_softc; 2953 qid = ixl_select_txqueue(sc, m); 2954 2955 txr = sc->sc_qps[qid].qp_txr; 2956 2957 if (__predict_false(!pcq_put(txr->txr_intrq, m))) { 2958 mutex_enter(&txr->txr_lock); 2959 txr->txr_pcqdrop.ev_count++; 2960 mutex_exit(&txr->txr_lock); 2961 2962 m_freem(m); 2963 return ENOBUFS; 2964 } 2965 2966 if (mutex_tryenter(&txr->txr_lock)) { 2967 ixl_tx_common_locked(ifp, txr, true); 2968 mutex_exit(&txr->txr_lock); 2969 } else { 2970 kpreempt_disable(); 2971 softint_schedule(txr->txr_si); 2972 kpreempt_enable(); 2973 } 2974 2975 return 0; 2976 } 2977 2978 static void 2979 ixl_deferred_transmit(void *xtxr) 2980 { 2981 struct ixl_tx_ring *txr = xtxr; 2982 struct ixl_softc *sc = txr->txr_sc; 2983 struct ifnet *ifp = &sc->sc_ec.ec_if; 2984 2985 mutex_enter(&txr->txr_lock); 2986 txr->txr_transmitdef.ev_count++; 2987 if (pcq_peek(txr->txr_intrq) != NULL) 2988 ixl_tx_common_locked(ifp, txr, true); 2989 mutex_exit(&txr->txr_lock); 2990 } 2991 2992 static struct ixl_rx_ring * 2993 ixl_rxr_alloc(struct ixl_softc *sc, unsigned int qid) 2994 { 2995 struct ixl_rx_ring *rxr = NULL; 2996 struct ixl_rx_map *maps = NULL, *rxm; 2997 unsigned int i; 2998 2999 rxr = kmem_zalloc(sizeof(*rxr), KM_SLEEP); 3000 maps = kmem_zalloc(sizeof(maps[0]) * sc->sc_rx_ring_ndescs, 3001 KM_SLEEP); 3002 3003 if (ixl_dmamem_alloc(sc, &rxr->rxr_mem, 3004 sizeof(struct ixl_rx_rd_desc_32) * sc->sc_rx_ring_ndescs, 3005 IXL_RX_QUEUE_ALIGN) != 0) 3006 goto free; 3007 3008 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) { 3009 rxm = &maps[i]; 3010 3011 if (bus_dmamap_create(sc->sc_dmat, 3012 IXL_MCLBYTES, 1, IXL_MCLBYTES, 0, 3013 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &rxm->rxm_map) != 0) 3014 goto uncreate; 3015 3016 rxm->rxm_m = NULL; 3017 } 3018 3019 rxr->rxr_cons = rxr->rxr_prod = 0; 3020 rxr->rxr_m_head = NULL; 3021 rxr->rxr_m_tail = &rxr->rxr_m_head; 3022 rxr->rxr_maps = maps; 3023 3024 rxr->rxr_tail = I40E_QRX_TAIL(qid); 3025 rxr->rxr_qid = qid; 3026 mutex_init(&rxr->rxr_lock, MUTEX_DEFAULT, IPL_NET); 3027 3028 return rxr; 3029 3030 uncreate: 3031 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) { 3032 rxm = &maps[i]; 3033 3034 if (rxm->rxm_map == NULL) 3035 continue; 3036 3037 bus_dmamap_destroy(sc->sc_dmat, rxm->rxm_map); 3038 } 3039 3040 ixl_dmamem_free(sc, &rxr->rxr_mem); 3041 free: 3042 kmem_free(maps, sizeof(maps[0]) * sc->sc_rx_ring_ndescs); 3043 kmem_free(rxr, sizeof(*rxr)); 3044 3045 return NULL; 3046 } 3047 3048 static void 3049 ixl_rxr_clean(struct ixl_softc *sc, struct ixl_rx_ring *rxr) 3050 { 3051 struct ixl_rx_map *maps, *rxm; 3052 bus_dmamap_t map; 3053 unsigned int i; 3054 3055 maps = rxr->rxr_maps; 3056 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) { 3057 rxm = &maps[i]; 3058 3059 if (rxm->rxm_m == NULL) 3060 continue; 3061 3062 map = rxm->rxm_map; 3063 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 3064 BUS_DMASYNC_POSTWRITE); 3065 bus_dmamap_unload(sc->sc_dmat, map); 3066 3067 m_freem(rxm->rxm_m); 3068 rxm->rxm_m = NULL; 3069 } 3070 3071 m_freem(rxr->rxr_m_head); 3072 rxr->rxr_m_head = NULL; 3073 rxr->rxr_m_tail = &rxr->rxr_m_head; 3074 3075 rxr->rxr_prod = rxr->rxr_cons = 0; 3076 } 3077 3078 static int 3079 ixl_rxr_enabled(struct ixl_softc *sc, struct ixl_rx_ring *rxr) 3080 { 3081 bus_size_t ena = I40E_QRX_ENA(rxr->rxr_qid); 3082 uint32_t reg; 3083 int i; 3084 3085 for (i = 0; i < 10; i++) { 3086 reg = ixl_rd(sc, ena); 3087 if (ISSET(reg, I40E_QRX_ENA_QENA_STAT_MASK)) 3088 return 0; 3089 3090 delaymsec(10); 3091 } 3092 3093 return ETIMEDOUT; 3094 } 3095 3096 static int 3097 ixl_rxr_disabled(struct ixl_softc *sc, struct ixl_rx_ring *rxr) 3098 { 3099 bus_size_t ena = I40E_QRX_ENA(rxr->rxr_qid); 3100 uint32_t reg; 3101 int i; 3102 3103 KASSERT(mutex_owned(&rxr->rxr_lock)); 3104 3105 for (i = 0; i < 10; i++) { 3106 reg = ixl_rd(sc, ena); 3107 if (ISSET(reg, I40E_QRX_ENA_QENA_STAT_MASK) == 0) 3108 return 0; 3109 3110 delaymsec(10); 3111 } 3112 3113 return ETIMEDOUT; 3114 } 3115 3116 static void 3117 ixl_rxr_config(struct ixl_softc *sc, struct ixl_rx_ring *rxr) 3118 { 3119 struct ixl_hmc_rxq rxq; 3120 struct ifnet *ifp = &sc->sc_ec.ec_if; 3121 uint16_t rxmax; 3122 void *hmc; 3123 3124 memset(&rxq, 0, sizeof(rxq)); 3125 rxmax = ifp->if_mtu + IXL_MTU_ETHERLEN; 3126 3127 rxq.head = htole16(rxr->rxr_cons); 3128 rxq.base = htole64(IXL_DMA_DVA(&rxr->rxr_mem) / IXL_HMC_RXQ_BASE_UNIT); 3129 rxq.qlen = htole16(sc->sc_rx_ring_ndescs); 3130 rxq.dbuff = htole16(IXL_MCLBYTES / IXL_HMC_RXQ_DBUFF_UNIT); 3131 rxq.hbuff = 0; 3132 rxq.dtype = IXL_HMC_RXQ_DTYPE_NOSPLIT; 3133 rxq.dsize = IXL_HMC_RXQ_DSIZE_32; 3134 rxq.crcstrip = 1; 3135 rxq.l2sel = 1; 3136 rxq.showiv = 1; 3137 rxq.rxmax = htole16(rxmax); 3138 rxq.tphrdesc_ena = 0; 3139 rxq.tphwdesc_ena = 0; 3140 rxq.tphdata_ena = 0; 3141 rxq.tphhead_ena = 0; 3142 rxq.lrxqthresh = 0; 3143 rxq.prefena = 1; 3144 3145 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_RX, rxr->rxr_qid); 3146 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_RX)); 3147 ixl_hmc_pack(hmc, &rxq, ixl_hmc_pack_rxq, 3148 __arraycount(ixl_hmc_pack_rxq)); 3149 } 3150 3151 static void 3152 ixl_rxr_unconfig(struct ixl_softc *sc, struct ixl_rx_ring *rxr) 3153 { 3154 void *hmc; 3155 3156 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_RX, rxr->rxr_qid); 3157 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_RX)); 3158 rxr->rxr_cons = rxr->rxr_prod = 0; 3159 } 3160 3161 static void 3162 ixl_rxr_free(struct ixl_softc *sc, struct ixl_rx_ring *rxr) 3163 { 3164 struct ixl_rx_map *maps, *rxm; 3165 unsigned int i; 3166 3167 maps = rxr->rxr_maps; 3168 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) { 3169 rxm = &maps[i]; 3170 3171 bus_dmamap_destroy(sc->sc_dmat, rxm->rxm_map); 3172 } 3173 3174 ixl_dmamem_free(sc, &rxr->rxr_mem); 3175 mutex_destroy(&rxr->rxr_lock); 3176 kmem_free(maps, sizeof(maps[0]) * sc->sc_rx_ring_ndescs); 3177 kmem_free(rxr, sizeof(*rxr)); 3178 } 3179 3180 static inline void 3181 ixl_rx_csum(struct mbuf *m, uint64_t qword) 3182 { 3183 int flags_mask; 3184 3185 if (!ISSET(qword, IXL_RX_DESC_L3L4P)) { 3186 /* No L3 or L4 checksum was calculated */ 3187 return; 3188 } 3189 3190 switch (__SHIFTOUT(qword, IXL_RX_DESC_PTYPE_MASK)) { 3191 case IXL_RX_DESC_PTYPE_IPV4FRAG: 3192 case IXL_RX_DESC_PTYPE_IPV4: 3193 case IXL_RX_DESC_PTYPE_SCTPV4: 3194 case IXL_RX_DESC_PTYPE_ICMPV4: 3195 flags_mask = M_CSUM_IPv4 | M_CSUM_IPv4_BAD; 3196 break; 3197 case IXL_RX_DESC_PTYPE_TCPV4: 3198 flags_mask = M_CSUM_IPv4 | M_CSUM_IPv4_BAD; 3199 flags_mask |= M_CSUM_TCPv4 | M_CSUM_TCP_UDP_BAD; 3200 break; 3201 case IXL_RX_DESC_PTYPE_UDPV4: 3202 flags_mask = M_CSUM_IPv4 | M_CSUM_IPv4_BAD; 3203 flags_mask |= M_CSUM_UDPv4 | M_CSUM_TCP_UDP_BAD; 3204 break; 3205 case IXL_RX_DESC_PTYPE_TCPV6: 3206 flags_mask = M_CSUM_TCPv6 | M_CSUM_TCP_UDP_BAD; 3207 break; 3208 case IXL_RX_DESC_PTYPE_UDPV6: 3209 flags_mask = M_CSUM_UDPv6 | M_CSUM_TCP_UDP_BAD; 3210 break; 3211 default: 3212 flags_mask = 0; 3213 } 3214 3215 m->m_pkthdr.csum_flags |= (flags_mask & (M_CSUM_IPv4 | 3216 M_CSUM_TCPv4 | M_CSUM_TCPv6 | M_CSUM_UDPv4 | M_CSUM_UDPv6)); 3217 3218 if (ISSET(qword, IXL_RX_DESC_IPE)) { 3219 m->m_pkthdr.csum_flags |= (flags_mask & M_CSUM_IPv4_BAD); 3220 } 3221 3222 if (ISSET(qword, IXL_RX_DESC_L4E)) { 3223 m->m_pkthdr.csum_flags |= (flags_mask & M_CSUM_TCP_UDP_BAD); 3224 } 3225 } 3226 3227 static int 3228 ixl_rxeof(struct ixl_softc *sc, struct ixl_rx_ring *rxr, u_int rxlimit) 3229 { 3230 struct ifnet *ifp = &sc->sc_ec.ec_if; 3231 struct ixl_rx_wb_desc_32 *ring, *rxd; 3232 struct ixl_rx_map *rxm; 3233 bus_dmamap_t map; 3234 unsigned int cons, prod; 3235 struct mbuf *m; 3236 uint64_t word, word0; 3237 unsigned int len; 3238 unsigned int mask; 3239 int done = 0, more = 0; 3240 3241 KASSERT(mutex_owned(&rxr->rxr_lock)); 3242 3243 if (!ISSET(ifp->if_flags, IFF_RUNNING)) 3244 return 0; 3245 3246 prod = rxr->rxr_prod; 3247 cons = rxr->rxr_cons; 3248 3249 if (cons == prod) 3250 return 0; 3251 3252 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem), 3253 0, IXL_DMA_LEN(&rxr->rxr_mem), 3254 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 3255 3256 ring = IXL_DMA_KVA(&rxr->rxr_mem); 3257 mask = sc->sc_rx_ring_ndescs - 1; 3258 3259 net_stat_ref_t nsr = IF_STAT_GETREF(ifp); 3260 3261 do { 3262 if (rxlimit-- <= 0) { 3263 more = 1; 3264 break; 3265 } 3266 3267 rxd = &ring[cons]; 3268 3269 word = le64toh(rxd->qword1); 3270 3271 if (!ISSET(word, IXL_RX_DESC_DD)) 3272 break; 3273 3274 rxm = &rxr->rxr_maps[cons]; 3275 3276 map = rxm->rxm_map; 3277 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 3278 BUS_DMASYNC_POSTREAD); 3279 bus_dmamap_unload(sc->sc_dmat, map); 3280 3281 m = rxm->rxm_m; 3282 rxm->rxm_m = NULL; 3283 3284 KASSERT(m != NULL); 3285 3286 len = (word & IXL_RX_DESC_PLEN_MASK) >> IXL_RX_DESC_PLEN_SHIFT; 3287 m->m_len = len; 3288 m->m_pkthdr.len = 0; 3289 3290 m->m_next = NULL; 3291 *rxr->rxr_m_tail = m; 3292 rxr->rxr_m_tail = &m->m_next; 3293 3294 m = rxr->rxr_m_head; 3295 m->m_pkthdr.len += len; 3296 3297 if (ISSET(word, IXL_RX_DESC_EOP)) { 3298 word0 = le64toh(rxd->qword0); 3299 3300 if (ISSET(word, IXL_RX_DESC_L2TAG1P)) { 3301 vlan_set_tag(m, 3302 __SHIFTOUT(word0, IXL_RX_DESC_L2TAG1_MASK)); 3303 } 3304 3305 if ((ifp->if_capenable & IXL_IFCAP_RXCSUM) != 0) 3306 ixl_rx_csum(m, word); 3307 3308 if (!ISSET(word, 3309 IXL_RX_DESC_RXE | IXL_RX_DESC_OVERSIZE)) { 3310 m_set_rcvif(m, ifp); 3311 if_statinc_ref(nsr, if_ipackets); 3312 if_statadd_ref(nsr, if_ibytes, 3313 m->m_pkthdr.len); 3314 if_percpuq_enqueue(ifp->if_percpuq, m); 3315 } else { 3316 if_statinc_ref(nsr, if_ierrors); 3317 m_freem(m); 3318 } 3319 3320 rxr->rxr_m_head = NULL; 3321 rxr->rxr_m_tail = &rxr->rxr_m_head; 3322 } 3323 3324 cons++; 3325 cons &= mask; 3326 3327 done = 1; 3328 } while (cons != prod); 3329 3330 if (done) { 3331 rxr->rxr_cons = cons; 3332 if (ixl_rxfill(sc, rxr) == -1) 3333 if_statinc_ref(nsr, if_iqdrops); 3334 } 3335 3336 IF_STAT_PUTREF(ifp); 3337 3338 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem), 3339 0, IXL_DMA_LEN(&rxr->rxr_mem), 3340 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 3341 3342 return more; 3343 } 3344 3345 static int 3346 ixl_rxfill(struct ixl_softc *sc, struct ixl_rx_ring *rxr) 3347 { 3348 struct ixl_rx_rd_desc_32 *ring, *rxd; 3349 struct ixl_rx_map *rxm; 3350 bus_dmamap_t map; 3351 struct mbuf *m; 3352 unsigned int prod; 3353 unsigned int slots; 3354 unsigned int mask; 3355 int post = 0, error = 0; 3356 3357 KASSERT(mutex_owned(&rxr->rxr_lock)); 3358 3359 prod = rxr->rxr_prod; 3360 slots = ixl_rxr_unrefreshed(rxr->rxr_prod, rxr->rxr_cons, 3361 sc->sc_rx_ring_ndescs); 3362 3363 ring = IXL_DMA_KVA(&rxr->rxr_mem); 3364 mask = sc->sc_rx_ring_ndescs - 1; 3365 3366 if (__predict_false(slots <= 0)) 3367 return -1; 3368 3369 do { 3370 rxm = &rxr->rxr_maps[prod]; 3371 3372 MGETHDR(m, M_DONTWAIT, MT_DATA); 3373 if (m == NULL) { 3374 rxr->rxr_mgethdr_failed.ev_count++; 3375 error = -1; 3376 break; 3377 } 3378 3379 MCLGET(m, M_DONTWAIT); 3380 if (!ISSET(m->m_flags, M_EXT)) { 3381 rxr->rxr_mgetcl_failed.ev_count++; 3382 error = -1; 3383 m_freem(m); 3384 break; 3385 } 3386 3387 m->m_len = m->m_pkthdr.len = MCLBYTES; 3388 m_adj(m, ETHER_ALIGN); 3389 3390 map = rxm->rxm_map; 3391 3392 if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 3393 BUS_DMA_READ | BUS_DMA_NOWAIT) != 0) { 3394 rxr->rxr_mbuf_load_failed.ev_count++; 3395 error = -1; 3396 m_freem(m); 3397 break; 3398 } 3399 3400 rxm->rxm_m = m; 3401 3402 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 3403 BUS_DMASYNC_PREREAD); 3404 3405 rxd = &ring[prod]; 3406 3407 rxd->paddr = htole64(map->dm_segs[0].ds_addr); 3408 rxd->haddr = htole64(0); 3409 3410 prod++; 3411 prod &= mask; 3412 3413 post = 1; 3414 3415 } while (--slots); 3416 3417 if (post) { 3418 rxr->rxr_prod = prod; 3419 ixl_wr(sc, rxr->rxr_tail, prod); 3420 } 3421 3422 return error; 3423 } 3424 3425 static inline int 3426 ixl_handle_queue_common(struct ixl_softc *sc, struct ixl_queue_pair *qp, 3427 u_int txlimit, struct evcnt *txevcnt, 3428 u_int rxlimit, struct evcnt *rxevcnt) 3429 { 3430 struct ixl_tx_ring *txr = qp->qp_txr; 3431 struct ixl_rx_ring *rxr = qp->qp_rxr; 3432 int txmore, rxmore; 3433 int rv; 3434 3435 mutex_enter(&txr->txr_lock); 3436 txevcnt->ev_count++; 3437 txmore = ixl_txeof(sc, txr, txlimit); 3438 mutex_exit(&txr->txr_lock); 3439 3440 mutex_enter(&rxr->rxr_lock); 3441 rxevcnt->ev_count++; 3442 rxmore = ixl_rxeof(sc, rxr, rxlimit); 3443 mutex_exit(&rxr->rxr_lock); 3444 3445 rv = txmore | (rxmore << 1); 3446 3447 return rv; 3448 } 3449 3450 static void 3451 ixl_sched_handle_queue(struct ixl_softc *sc, struct ixl_queue_pair *qp) 3452 { 3453 3454 if (qp->qp_workqueue) 3455 workqueue_enqueue(sc->sc_workq_txrx, &qp->qp_work, NULL); 3456 else 3457 softint_schedule(qp->qp_si); 3458 } 3459 3460 static int 3461 ixl_intr(void *xsc) 3462 { 3463 struct ixl_softc *sc = xsc; 3464 struct ixl_tx_ring *txr; 3465 struct ixl_rx_ring *rxr; 3466 uint32_t icr, rxintr, txintr; 3467 int rv = 0; 3468 unsigned int i; 3469 3470 KASSERT(sc != NULL); 3471 3472 ixl_enable_other_intr(sc); 3473 icr = ixl_rd(sc, I40E_PFINT_ICR0); 3474 3475 if (ISSET(icr, I40E_PFINT_ICR0_ADMINQ_MASK)) { 3476 atomic_inc_64(&sc->sc_event_atq.ev_count); 3477 ixl_atq_done(sc); 3478 ixl_work_add(sc->sc_workq, &sc->sc_arq_task); 3479 rv = 1; 3480 } 3481 3482 if (ISSET(icr, I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK)) { 3483 atomic_inc_64(&sc->sc_event_link.ev_count); 3484 ixl_work_add(sc->sc_workq, &sc->sc_link_state_task); 3485 rv = 1; 3486 } 3487 3488 rxintr = icr & I40E_INTR_NOTX_RX_MASK; 3489 txintr = icr & I40E_INTR_NOTX_TX_MASK; 3490 3491 if (txintr || rxintr) { 3492 for (i = 0; i < sc->sc_nqueue_pairs; i++) { 3493 txr = sc->sc_qps[i].qp_txr; 3494 rxr = sc->sc_qps[i].qp_rxr; 3495 3496 ixl_handle_queue_common(sc, &sc->sc_qps[i], 3497 IXL_TXRX_PROCESS_UNLIMIT, &txr->txr_intr, 3498 IXL_TXRX_PROCESS_UNLIMIT, &rxr->rxr_intr); 3499 } 3500 rv = 1; 3501 } 3502 3503 return rv; 3504 } 3505 3506 static int 3507 ixl_queue_intr(void *xqp) 3508 { 3509 struct ixl_queue_pair *qp = xqp; 3510 struct ixl_tx_ring *txr = qp->qp_txr; 3511 struct ixl_rx_ring *rxr = qp->qp_rxr; 3512 struct ixl_softc *sc = qp->qp_sc; 3513 u_int txlimit, rxlimit; 3514 int more; 3515 3516 txlimit = sc->sc_tx_intr_process_limit; 3517 rxlimit = sc->sc_rx_intr_process_limit; 3518 qp->qp_workqueue = sc->sc_txrx_workqueue; 3519 3520 more = ixl_handle_queue_common(sc, qp, 3521 txlimit, &txr->txr_intr, rxlimit, &rxr->rxr_intr); 3522 3523 if (more != 0) { 3524 ixl_sched_handle_queue(sc, qp); 3525 } else { 3526 /* for ALTQ */ 3527 if (txr->txr_qid == 0) 3528 if_schedule_deferred_start(&sc->sc_ec.ec_if); 3529 softint_schedule(txr->txr_si); 3530 3531 ixl_enable_queue_intr(sc, qp); 3532 } 3533 3534 return 1; 3535 } 3536 3537 static void 3538 ixl_handle_queue_wk(struct work *wk, void *xsc) 3539 { 3540 struct ixl_queue_pair *qp; 3541 3542 qp = container_of(wk, struct ixl_queue_pair, qp_work); 3543 ixl_handle_queue(qp); 3544 } 3545 3546 static void 3547 ixl_handle_queue(void *xqp) 3548 { 3549 struct ixl_queue_pair *qp = xqp; 3550 struct ixl_softc *sc = qp->qp_sc; 3551 struct ixl_tx_ring *txr = qp->qp_txr; 3552 struct ixl_rx_ring *rxr = qp->qp_rxr; 3553 u_int txlimit, rxlimit; 3554 int more; 3555 3556 txlimit = sc->sc_tx_process_limit; 3557 rxlimit = sc->sc_rx_process_limit; 3558 3559 more = ixl_handle_queue_common(sc, qp, 3560 txlimit, &txr->txr_defer, rxlimit, &rxr->rxr_defer); 3561 3562 if (more != 0) 3563 ixl_sched_handle_queue(sc, qp); 3564 else 3565 ixl_enable_queue_intr(sc, qp); 3566 } 3567 3568 static inline void 3569 ixl_print_hmc_error(struct ixl_softc *sc, uint32_t reg) 3570 { 3571 uint32_t hmc_idx, hmc_isvf; 3572 uint32_t hmc_errtype, hmc_objtype, hmc_data; 3573 3574 hmc_idx = reg & I40E_PFHMC_ERRORINFO_PMF_INDEX_MASK; 3575 hmc_idx = hmc_idx >> I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT; 3576 hmc_isvf = reg & I40E_PFHMC_ERRORINFO_PMF_ISVF_MASK; 3577 hmc_isvf = hmc_isvf >> I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT; 3578 hmc_errtype = reg & I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_MASK; 3579 hmc_errtype = hmc_errtype >> I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT; 3580 hmc_objtype = reg & I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_MASK; 3581 hmc_objtype = hmc_objtype >> I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT; 3582 hmc_data = ixl_rd(sc, I40E_PFHMC_ERRORDATA); 3583 3584 device_printf(sc->sc_dev, 3585 "HMC Error (idx=0x%x, isvf=0x%x, err=0x%x, obj=0x%x, data=0x%x)\n", 3586 hmc_idx, hmc_isvf, hmc_errtype, hmc_objtype, hmc_data); 3587 } 3588 3589 static int 3590 ixl_other_intr(void *xsc) 3591 { 3592 struct ixl_softc *sc = xsc; 3593 uint32_t icr, mask, reg; 3594 int rv; 3595 3596 icr = ixl_rd(sc, I40E_PFINT_ICR0); 3597 mask = ixl_rd(sc, I40E_PFINT_ICR0_ENA); 3598 3599 if (ISSET(icr, I40E_PFINT_ICR0_ADMINQ_MASK)) { 3600 atomic_inc_64(&sc->sc_event_atq.ev_count); 3601 ixl_atq_done(sc); 3602 ixl_work_add(sc->sc_workq, &sc->sc_arq_task); 3603 rv = 1; 3604 } 3605 3606 if (ISSET(icr, I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK)) { 3607 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG)) 3608 device_printf(sc->sc_dev, "link stat changed\n"); 3609 3610 atomic_inc_64(&sc->sc_event_link.ev_count); 3611 ixl_work_add(sc->sc_workq, &sc->sc_link_state_task); 3612 rv = 1; 3613 } 3614 3615 if (ISSET(icr, I40E_PFINT_ICR0_GRST_MASK)) { 3616 CLR(mask, I40E_PFINT_ICR0_ENA_GRST_MASK); 3617 reg = ixl_rd(sc, I40E_GLGEN_RSTAT); 3618 reg = reg & I40E_GLGEN_RSTAT_RESET_TYPE_MASK; 3619 reg = reg >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT; 3620 3621 device_printf(sc->sc_dev, "GRST: %s\n", 3622 reg == I40E_RESET_CORER ? "CORER" : 3623 reg == I40E_RESET_GLOBR ? "GLOBR" : 3624 reg == I40E_RESET_EMPR ? "EMPR" : 3625 "POR"); 3626 } 3627 3628 if (ISSET(icr, I40E_PFINT_ICR0_ECC_ERR_MASK)) 3629 atomic_inc_64(&sc->sc_event_ecc_err.ev_count); 3630 if (ISSET(icr, I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)) 3631 atomic_inc_64(&sc->sc_event_pci_exception.ev_count); 3632 if (ISSET(icr, I40E_PFINT_ICR0_PE_CRITERR_MASK)) 3633 atomic_inc_64(&sc->sc_event_crit_err.ev_count); 3634 3635 if (ISSET(icr, IXL_ICR0_CRIT_ERR_MASK)) { 3636 CLR(mask, IXL_ICR0_CRIT_ERR_MASK); 3637 device_printf(sc->sc_dev, "critical error\n"); 3638 } 3639 3640 if (ISSET(icr, I40E_PFINT_ICR0_HMC_ERR_MASK)) { 3641 reg = ixl_rd(sc, I40E_PFHMC_ERRORINFO); 3642 if (ISSET(reg, I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK)) 3643 ixl_print_hmc_error(sc, reg); 3644 ixl_wr(sc, I40E_PFHMC_ERRORINFO, 0); 3645 } 3646 3647 ixl_wr(sc, I40E_PFINT_ICR0_ENA, mask); 3648 ixl_flush(sc); 3649 ixl_enable_other_intr(sc); 3650 return rv; 3651 } 3652 3653 static void 3654 ixl_get_link_status_done(struct ixl_softc *sc, 3655 const struct ixl_aq_desc *iaq) 3656 { 3657 struct ixl_aq_desc iaq_buf; 3658 3659 memcpy(&iaq_buf, iaq, sizeof(iaq_buf)); 3660 3661 /* 3662 * The lock can be released here 3663 * because there is no post processing about ATQ 3664 */ 3665 mutex_exit(&sc->sc_atq_lock); 3666 ixl_link_state_update(sc, &iaq_buf); 3667 mutex_enter(&sc->sc_atq_lock); 3668 } 3669 3670 static void 3671 ixl_get_link_status(void *xsc) 3672 { 3673 struct ixl_softc *sc = xsc; 3674 struct ixl_aq_desc *iaq; 3675 struct ixl_aq_link_param *param; 3676 int error; 3677 3678 mutex_enter(&sc->sc_atq_lock); 3679 3680 iaq = &sc->sc_link_state_atq.iatq_desc; 3681 memset(iaq, 0, sizeof(*iaq)); 3682 iaq->iaq_opcode = htole16(IXL_AQ_OP_PHY_LINK_STATUS); 3683 param = (struct ixl_aq_link_param *)iaq->iaq_param; 3684 param->notify = IXL_AQ_LINK_NOTIFY; 3685 3686 error = ixl_atq_exec_locked(sc, &sc->sc_link_state_atq); 3687 ixl_atq_set(&sc->sc_link_state_atq, ixl_get_link_status_done); 3688 3689 if (error == 0) { 3690 ixl_get_link_status_done(sc, iaq); 3691 } 3692 3693 mutex_exit(&sc->sc_atq_lock); 3694 } 3695 3696 static void 3697 ixl_link_state_update(struct ixl_softc *sc, const struct ixl_aq_desc *iaq) 3698 { 3699 struct ifnet *ifp = &sc->sc_ec.ec_if; 3700 int link_state; 3701 3702 mutex_enter(&sc->sc_cfg_lock); 3703 link_state = ixl_set_link_status_locked(sc, iaq); 3704 mutex_exit(&sc->sc_cfg_lock); 3705 3706 if (ifp->if_link_state != link_state) 3707 if_link_state_change(ifp, link_state); 3708 3709 if (link_state != LINK_STATE_DOWN) { 3710 kpreempt_disable(); 3711 if_schedule_deferred_start(ifp); 3712 kpreempt_enable(); 3713 } 3714 } 3715 3716 static void 3717 ixl_aq_dump(const struct ixl_softc *sc, const struct ixl_aq_desc *iaq, 3718 const char *msg) 3719 { 3720 char buf[512]; 3721 size_t len; 3722 3723 len = sizeof(buf); 3724 buf[--len] = '\0'; 3725 3726 device_printf(sc->sc_dev, "%s\n", msg); 3727 snprintb(buf, len, IXL_AQ_FLAGS_FMT, le16toh(iaq->iaq_flags)); 3728 device_printf(sc->sc_dev, "flags %s opcode %04x\n", 3729 buf, le16toh(iaq->iaq_opcode)); 3730 device_printf(sc->sc_dev, "datalen %u retval %u\n", 3731 le16toh(iaq->iaq_datalen), le16toh(iaq->iaq_retval)); 3732 device_printf(sc->sc_dev, "cookie %016" PRIx64 "\n", iaq->iaq_cookie); 3733 device_printf(sc->sc_dev, "%08x %08x %08x %08x\n", 3734 le32toh(iaq->iaq_param[0]), le32toh(iaq->iaq_param[1]), 3735 le32toh(iaq->iaq_param[2]), le32toh(iaq->iaq_param[3])); 3736 } 3737 3738 static void 3739 ixl_arq(void *xsc) 3740 { 3741 struct ixl_softc *sc = xsc; 3742 struct ixl_aq_desc *arq, *iaq; 3743 struct ixl_aq_buf *aqb; 3744 unsigned int cons = sc->sc_arq_cons; 3745 unsigned int prod; 3746 int done = 0; 3747 3748 prod = ixl_rd(sc, sc->sc_aq_regs->arq_head) & 3749 sc->sc_aq_regs->arq_head_mask; 3750 3751 if (cons == prod) 3752 goto done; 3753 3754 arq = IXL_DMA_KVA(&sc->sc_arq); 3755 3756 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq), 3757 0, IXL_DMA_LEN(&sc->sc_arq), 3758 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 3759 3760 do { 3761 iaq = &arq[cons]; 3762 aqb = sc->sc_arq_live[cons]; 3763 3764 KASSERT(aqb != NULL); 3765 3766 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, IXL_AQ_BUFLEN, 3767 BUS_DMASYNC_POSTREAD); 3768 3769 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG)) 3770 ixl_aq_dump(sc, iaq, "arq event"); 3771 3772 switch (iaq->iaq_opcode) { 3773 case htole16(IXL_AQ_OP_PHY_LINK_STATUS): 3774 ixl_link_state_update(sc, iaq); 3775 break; 3776 } 3777 3778 memset(iaq, 0, sizeof(*iaq)); 3779 sc->sc_arq_live[cons] = NULL; 3780 SIMPLEQ_INSERT_TAIL(&sc->sc_arq_idle, aqb, aqb_entry); 3781 3782 cons++; 3783 cons &= IXL_AQ_MASK; 3784 3785 done = 1; 3786 } while (cons != prod); 3787 3788 if (done) { 3789 sc->sc_arq_cons = cons; 3790 ixl_arq_fill(sc); 3791 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq), 3792 0, IXL_DMA_LEN(&sc->sc_arq), 3793 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 3794 } 3795 3796 done: 3797 ixl_enable_other_intr(sc); 3798 } 3799 3800 static void 3801 ixl_atq_set(struct ixl_atq *iatq, 3802 void (*fn)(struct ixl_softc *, const struct ixl_aq_desc *)) 3803 { 3804 3805 iatq->iatq_fn = fn; 3806 } 3807 3808 static int 3809 ixl_atq_post_locked(struct ixl_softc *sc, struct ixl_atq *iatq) 3810 { 3811 struct ixl_aq_desc *atq, *slot; 3812 unsigned int prod, cons, prod_next; 3813 3814 /* assert locked */ 3815 KASSERT(mutex_owned(&sc->sc_atq_lock)); 3816 3817 atq = IXL_DMA_KVA(&sc->sc_atq); 3818 prod = sc->sc_atq_prod; 3819 cons = sc->sc_atq_cons; 3820 prod_next = (prod +1) & IXL_AQ_MASK; 3821 3822 if (cons == prod_next) 3823 return ENOMEM; 3824 3825 slot = &atq[prod]; 3826 3827 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 3828 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTWRITE); 3829 3830 KASSERT(iatq->iatq_fn != NULL); 3831 *slot = iatq->iatq_desc; 3832 slot->iaq_cookie = (uint64_t)((intptr_t)iatq); 3833 3834 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG)) 3835 ixl_aq_dump(sc, slot, "atq command"); 3836 3837 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 3838 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREWRITE); 3839 3840 sc->sc_atq_prod = prod_next; 3841 ixl_wr(sc, sc->sc_aq_regs->atq_tail, sc->sc_atq_prod); 3842 3843 return 0; 3844 } 3845 3846 static void 3847 ixl_atq_done_locked(struct ixl_softc *sc) 3848 { 3849 struct ixl_aq_desc *atq, *slot; 3850 struct ixl_atq *iatq; 3851 unsigned int cons; 3852 unsigned int prod; 3853 3854 KASSERT(mutex_owned(&sc->sc_atq_lock)); 3855 3856 prod = sc->sc_atq_prod; 3857 cons = sc->sc_atq_cons; 3858 3859 if (prod == cons) 3860 return; 3861 3862 atq = IXL_DMA_KVA(&sc->sc_atq); 3863 3864 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 3865 0, IXL_DMA_LEN(&sc->sc_atq), 3866 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 3867 3868 do { 3869 slot = &atq[cons]; 3870 if (!ISSET(slot->iaq_flags, htole16(IXL_AQ_DD))) 3871 break; 3872 3873 iatq = (struct ixl_atq *)((intptr_t)slot->iaq_cookie); 3874 iatq->iatq_desc = *slot; 3875 3876 memset(slot, 0, sizeof(*slot)); 3877 3878 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG)) 3879 ixl_aq_dump(sc, &iatq->iatq_desc, "atq response"); 3880 3881 (*iatq->iatq_fn)(sc, &iatq->iatq_desc); 3882 3883 cons++; 3884 cons &= IXL_AQ_MASK; 3885 } while (cons != prod); 3886 3887 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 3888 0, IXL_DMA_LEN(&sc->sc_atq), 3889 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 3890 3891 sc->sc_atq_cons = cons; 3892 } 3893 3894 static void 3895 ixl_atq_done(struct ixl_softc *sc) 3896 { 3897 3898 mutex_enter(&sc->sc_atq_lock); 3899 ixl_atq_done_locked(sc); 3900 mutex_exit(&sc->sc_atq_lock); 3901 } 3902 3903 static void 3904 ixl_wakeup(struct ixl_softc *sc, const struct ixl_aq_desc *iaq) 3905 { 3906 3907 KASSERT(mutex_owned(&sc->sc_atq_lock)); 3908 3909 cv_signal(&sc->sc_atq_cv); 3910 } 3911 3912 static int 3913 ixl_atq_exec(struct ixl_softc *sc, struct ixl_atq *iatq) 3914 { 3915 int error; 3916 3917 mutex_enter(&sc->sc_atq_lock); 3918 error = ixl_atq_exec_locked(sc, iatq); 3919 mutex_exit(&sc->sc_atq_lock); 3920 3921 return error; 3922 } 3923 3924 static int 3925 ixl_atq_exec_locked(struct ixl_softc *sc, struct ixl_atq *iatq) 3926 { 3927 int error; 3928 3929 KASSERT(mutex_owned(&sc->sc_atq_lock)); 3930 KASSERT(iatq->iatq_desc.iaq_cookie == 0); 3931 3932 ixl_atq_set(iatq, ixl_wakeup); 3933 3934 error = ixl_atq_post_locked(sc, iatq); 3935 if (error) 3936 return error; 3937 3938 error = cv_timedwait(&sc->sc_atq_cv, &sc->sc_atq_lock, 3939 IXL_ATQ_EXEC_TIMEOUT); 3940 3941 return error; 3942 } 3943 3944 static int 3945 ixl_atq_poll(struct ixl_softc *sc, struct ixl_aq_desc *iaq, unsigned int tm) 3946 { 3947 struct ixl_aq_desc *atq, *slot; 3948 unsigned int prod; 3949 unsigned int t = 0; 3950 3951 mutex_enter(&sc->sc_atq_lock); 3952 3953 atq = IXL_DMA_KVA(&sc->sc_atq); 3954 prod = sc->sc_atq_prod; 3955 slot = atq + prod; 3956 3957 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 3958 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTWRITE); 3959 3960 *slot = *iaq; 3961 slot->iaq_flags |= htole16(IXL_AQ_SI); 3962 3963 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 3964 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREWRITE); 3965 3966 prod++; 3967 prod &= IXL_AQ_MASK; 3968 sc->sc_atq_prod = prod; 3969 ixl_wr(sc, sc->sc_aq_regs->atq_tail, prod); 3970 3971 while (ixl_rd(sc, sc->sc_aq_regs->atq_head) != prod) { 3972 delaymsec(1); 3973 3974 if (t++ > tm) { 3975 mutex_exit(&sc->sc_atq_lock); 3976 return ETIMEDOUT; 3977 } 3978 } 3979 3980 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 3981 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTREAD); 3982 *iaq = *slot; 3983 memset(slot, 0, sizeof(*slot)); 3984 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 3985 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREREAD); 3986 3987 sc->sc_atq_cons = prod; 3988 3989 mutex_exit(&sc->sc_atq_lock); 3990 3991 return 0; 3992 } 3993 3994 static int 3995 ixl_get_version(struct ixl_softc *sc) 3996 { 3997 struct ixl_aq_desc iaq; 3998 uint32_t fwbuild, fwver, apiver; 3999 uint16_t api_maj_ver, api_min_ver; 4000 4001 memset(&iaq, 0, sizeof(iaq)); 4002 iaq.iaq_opcode = htole16(IXL_AQ_OP_GET_VERSION); 4003 4004 iaq.iaq_retval = le16toh(23); 4005 4006 if (ixl_atq_poll(sc, &iaq, 2000) != 0) 4007 return ETIMEDOUT; 4008 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) 4009 return EIO; 4010 4011 fwbuild = le32toh(iaq.iaq_param[1]); 4012 fwver = le32toh(iaq.iaq_param[2]); 4013 apiver = le32toh(iaq.iaq_param[3]); 4014 4015 api_maj_ver = (uint16_t)apiver; 4016 api_min_ver = (uint16_t)(apiver >> 16); 4017 4018 aprint_normal(", FW %hu.%hu.%05u API %hu.%hu", (uint16_t)fwver, 4019 (uint16_t)(fwver >> 16), fwbuild, api_maj_ver, api_min_ver); 4020 4021 if (sc->sc_mac_type == I40E_MAC_X722) { 4022 SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMLOCK | 4023 IXL_SC_AQ_FLAG_NVMREAD); 4024 SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RXCTL); 4025 SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RSS); 4026 } 4027 4028 #define IXL_API_VER(maj, min) (((uint32_t)(maj) << 16) | (min)) 4029 if (IXL_API_VER(api_maj_ver, api_min_ver) >= IXL_API_VER(1, 5)) { 4030 SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RXCTL); 4031 SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMLOCK); 4032 } 4033 #undef IXL_API_VER 4034 4035 return 0; 4036 } 4037 4038 static int 4039 ixl_get_nvm_version(struct ixl_softc *sc) 4040 { 4041 uint16_t nvmver, cfg_ptr, eetrack_hi, eetrack_lo, oem_hi, oem_lo; 4042 uint32_t eetrack, oem; 4043 uint16_t nvm_maj_ver, nvm_min_ver, oem_build; 4044 uint8_t oem_ver, oem_patch; 4045 4046 nvmver = cfg_ptr = eetrack_hi = eetrack_lo = oem_hi = oem_lo = 0; 4047 ixl_rd16_nvm(sc, I40E_SR_NVM_DEV_STARTER_VERSION, &nvmver); 4048 ixl_rd16_nvm(sc, I40E_SR_NVM_EETRACK_HI, &eetrack_hi); 4049 ixl_rd16_nvm(sc, I40E_SR_NVM_EETRACK_LO, &eetrack_lo); 4050 ixl_rd16_nvm(sc, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr); 4051 ixl_rd16_nvm(sc, cfg_ptr + I40E_NVM_OEM_VER_OFF, &oem_hi); 4052 ixl_rd16_nvm(sc, cfg_ptr + I40E_NVM_OEM_VER_OFF + 1, &oem_lo); 4053 4054 nvm_maj_ver = (uint16_t)__SHIFTOUT(nvmver, IXL_NVM_VERSION_HI_MASK); 4055 nvm_min_ver = (uint16_t)__SHIFTOUT(nvmver, IXL_NVM_VERSION_LO_MASK); 4056 eetrack = ((uint32_t)eetrack_hi << 16) | eetrack_lo; 4057 oem = ((uint32_t)oem_hi << 16) | oem_lo; 4058 oem_ver = __SHIFTOUT(oem, IXL_NVM_OEMVERSION_MASK); 4059 oem_build = __SHIFTOUT(oem, IXL_NVM_OEMBUILD_MASK); 4060 oem_patch = __SHIFTOUT(oem, IXL_NVM_OEMPATCH_MASK); 4061 4062 aprint_normal(" nvm %x.%02x etid %08x oem %d.%d.%d", 4063 nvm_maj_ver, nvm_min_ver, eetrack, 4064 oem_ver, oem_build, oem_patch); 4065 4066 return 0; 4067 } 4068 4069 static int 4070 ixl_pxe_clear(struct ixl_softc *sc) 4071 { 4072 struct ixl_aq_desc iaq; 4073 int rv; 4074 4075 memset(&iaq, 0, sizeof(iaq)); 4076 iaq.iaq_opcode = htole16(IXL_AQ_OP_CLEAR_PXE_MODE); 4077 iaq.iaq_param[0] = htole32(0x2); 4078 4079 rv = ixl_atq_poll(sc, &iaq, 250); 4080 4081 ixl_wr(sc, I40E_GLLAN_RCTL_0, 0x1); 4082 4083 if (rv != 0) 4084 return ETIMEDOUT; 4085 4086 switch (iaq.iaq_retval) { 4087 case htole16(IXL_AQ_RC_OK): 4088 case htole16(IXL_AQ_RC_EEXIST): 4089 break; 4090 default: 4091 return EIO; 4092 } 4093 4094 return 0; 4095 } 4096 4097 static int 4098 ixl_lldp_shut(struct ixl_softc *sc) 4099 { 4100 struct ixl_aq_desc iaq; 4101 4102 memset(&iaq, 0, sizeof(iaq)); 4103 iaq.iaq_opcode = htole16(IXL_AQ_OP_LLDP_STOP_AGENT); 4104 iaq.iaq_param[0] = htole32(IXL_LLDP_SHUTDOWN); 4105 4106 if (ixl_atq_poll(sc, &iaq, 250) != 0) { 4107 aprint_error_dev(sc->sc_dev, "STOP LLDP AGENT timeout\n"); 4108 return -1; 4109 } 4110 4111 switch (iaq.iaq_retval) { 4112 case htole16(IXL_AQ_RC_EMODE): 4113 case htole16(IXL_AQ_RC_EPERM): 4114 /* ignore silently */ 4115 default: 4116 break; 4117 } 4118 4119 return 0; 4120 } 4121 4122 static void 4123 ixl_parse_hw_capability(struct ixl_softc *sc, struct ixl_aq_capability *cap) 4124 { 4125 uint16_t id; 4126 uint32_t number, logical_id; 4127 4128 id = le16toh(cap->cap_id); 4129 number = le32toh(cap->number); 4130 logical_id = le32toh(cap->logical_id); 4131 4132 switch (id) { 4133 case IXL_AQ_CAP_RSS: 4134 sc->sc_rss_table_size = number; 4135 sc->sc_rss_table_entry_width = logical_id; 4136 break; 4137 case IXL_AQ_CAP_RXQ: 4138 case IXL_AQ_CAP_TXQ: 4139 sc->sc_nqueue_pairs_device = MIN(number, 4140 sc->sc_nqueue_pairs_device); 4141 break; 4142 } 4143 } 4144 4145 static int 4146 ixl_get_hw_capabilities(struct ixl_softc *sc) 4147 { 4148 struct ixl_dmamem idm; 4149 struct ixl_aq_desc iaq; 4150 struct ixl_aq_capability *caps; 4151 size_t i, ncaps; 4152 bus_size_t caps_size; 4153 uint16_t status; 4154 int rv; 4155 4156 caps_size = sizeof(caps[0]) * 40; 4157 memset(&iaq, 0, sizeof(iaq)); 4158 iaq.iaq_opcode = htole16(IXL_AQ_OP_LIST_FUNC_CAP); 4159 4160 do { 4161 if (ixl_dmamem_alloc(sc, &idm, caps_size, 0) != 0) { 4162 return -1; 4163 } 4164 4165 iaq.iaq_flags = htole16(IXL_AQ_BUF | 4166 (caps_size > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0)); 4167 iaq.iaq_datalen = htole16(caps_size); 4168 ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm)); 4169 4170 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, 4171 IXL_DMA_LEN(&idm), BUS_DMASYNC_PREREAD); 4172 4173 rv = ixl_atq_poll(sc, &iaq, 250); 4174 4175 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, 4176 IXL_DMA_LEN(&idm), BUS_DMASYNC_POSTREAD); 4177 4178 if (rv != 0) { 4179 aprint_error(", HW capabilities timeout\n"); 4180 goto done; 4181 } 4182 4183 status = le16toh(iaq.iaq_retval); 4184 4185 if (status == IXL_AQ_RC_ENOMEM) { 4186 caps_size = le16toh(iaq.iaq_datalen); 4187 ixl_dmamem_free(sc, &idm); 4188 } 4189 } while (status == IXL_AQ_RC_ENOMEM); 4190 4191 if (status != IXL_AQ_RC_OK) { 4192 aprint_error(", HW capabilities error\n"); 4193 goto done; 4194 } 4195 4196 caps = IXL_DMA_KVA(&idm); 4197 ncaps = le16toh(iaq.iaq_param[1]); 4198 4199 for (i = 0; i < ncaps; i++) { 4200 ixl_parse_hw_capability(sc, &caps[i]); 4201 } 4202 4203 done: 4204 ixl_dmamem_free(sc, &idm); 4205 return rv; 4206 } 4207 4208 static int 4209 ixl_get_mac(struct ixl_softc *sc) 4210 { 4211 struct ixl_dmamem idm; 4212 struct ixl_aq_desc iaq; 4213 struct ixl_aq_mac_addresses *addrs; 4214 int rv; 4215 4216 if (ixl_dmamem_alloc(sc, &idm, sizeof(*addrs), 0) != 0) { 4217 aprint_error(", unable to allocate mac addresses\n"); 4218 return -1; 4219 } 4220 4221 memset(&iaq, 0, sizeof(iaq)); 4222 iaq.iaq_flags = htole16(IXL_AQ_BUF); 4223 iaq.iaq_opcode = htole16(IXL_AQ_OP_MAC_ADDRESS_READ); 4224 iaq.iaq_datalen = htole16(sizeof(*addrs)); 4225 ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm)); 4226 4227 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm), 4228 BUS_DMASYNC_PREREAD); 4229 4230 rv = ixl_atq_poll(sc, &iaq, 250); 4231 4232 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm), 4233 BUS_DMASYNC_POSTREAD); 4234 4235 if (rv != 0) { 4236 aprint_error(", MAC ADDRESS READ timeout\n"); 4237 rv = -1; 4238 goto done; 4239 } 4240 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) { 4241 aprint_error(", MAC ADDRESS READ error\n"); 4242 rv = -1; 4243 goto done; 4244 } 4245 4246 addrs = IXL_DMA_KVA(&idm); 4247 if (!ISSET(iaq.iaq_param[0], htole32(IXL_AQ_MAC_PORT_VALID))) { 4248 printf(", port address is not valid\n"); 4249 goto done; 4250 } 4251 4252 memcpy(sc->sc_enaddr, addrs->port, ETHER_ADDR_LEN); 4253 rv = 0; 4254 4255 done: 4256 ixl_dmamem_free(sc, &idm); 4257 return rv; 4258 } 4259 4260 static int 4261 ixl_get_switch_config(struct ixl_softc *sc) 4262 { 4263 struct ixl_dmamem idm; 4264 struct ixl_aq_desc iaq; 4265 struct ixl_aq_switch_config *hdr; 4266 struct ixl_aq_switch_config_element *elms, *elm; 4267 unsigned int nelm, i; 4268 int rv; 4269 4270 if (ixl_dmamem_alloc(sc, &idm, IXL_AQ_BUFLEN, 0) != 0) { 4271 aprint_error_dev(sc->sc_dev, 4272 "unable to allocate switch config buffer\n"); 4273 return -1; 4274 } 4275 4276 memset(&iaq, 0, sizeof(iaq)); 4277 iaq.iaq_flags = htole16(IXL_AQ_BUF | 4278 (IXL_AQ_BUFLEN > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0)); 4279 iaq.iaq_opcode = htole16(IXL_AQ_OP_SWITCH_GET_CONFIG); 4280 iaq.iaq_datalen = htole16(IXL_AQ_BUFLEN); 4281 ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm)); 4282 4283 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm), 4284 BUS_DMASYNC_PREREAD); 4285 4286 rv = ixl_atq_poll(sc, &iaq, 250); 4287 4288 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm), 4289 BUS_DMASYNC_POSTREAD); 4290 4291 if (rv != 0) { 4292 aprint_error_dev(sc->sc_dev, "GET SWITCH CONFIG timeout\n"); 4293 rv = -1; 4294 goto done; 4295 } 4296 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) { 4297 aprint_error_dev(sc->sc_dev, "GET SWITCH CONFIG error\n"); 4298 rv = -1; 4299 goto done; 4300 } 4301 4302 hdr = IXL_DMA_KVA(&idm); 4303 elms = (struct ixl_aq_switch_config_element *)(hdr + 1); 4304 4305 nelm = le16toh(hdr->num_reported); 4306 if (nelm < 1) { 4307 aprint_error_dev(sc->sc_dev, "no switch config available\n"); 4308 rv = -1; 4309 goto done; 4310 } 4311 4312 for (i = 0; i < nelm; i++) { 4313 elm = &elms[i]; 4314 4315 aprint_debug_dev(sc->sc_dev, 4316 "type %x revision %u seid %04x\n", 4317 elm->type, elm->revision, le16toh(elm->seid)); 4318 aprint_debug_dev(sc->sc_dev, 4319 "uplink %04x downlink %04x\n", 4320 le16toh(elm->uplink_seid), 4321 le16toh(elm->downlink_seid)); 4322 aprint_debug_dev(sc->sc_dev, 4323 "conntype %x scheduler %04x extra %04x\n", 4324 elm->connection_type, 4325 le16toh(elm->scheduler_id), 4326 le16toh(elm->element_info)); 4327 } 4328 4329 elm = &elms[0]; 4330 4331 sc->sc_uplink_seid = elm->uplink_seid; 4332 sc->sc_downlink_seid = elm->downlink_seid; 4333 sc->sc_seid = elm->seid; 4334 4335 if ((sc->sc_uplink_seid == htole16(0)) != 4336 (sc->sc_downlink_seid == htole16(0))) { 4337 aprint_error_dev(sc->sc_dev, "SEIDs are misconfigured\n"); 4338 rv = -1; 4339 goto done; 4340 } 4341 4342 done: 4343 ixl_dmamem_free(sc, &idm); 4344 return rv; 4345 } 4346 4347 static int 4348 ixl_phy_mask_ints(struct ixl_softc *sc) 4349 { 4350 struct ixl_aq_desc iaq; 4351 4352 memset(&iaq, 0, sizeof(iaq)); 4353 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_SET_EVENT_MASK); 4354 iaq.iaq_param[2] = htole32(IXL_AQ_PHY_EV_MASK & 4355 ~(IXL_AQ_PHY_EV_LINK_UPDOWN | IXL_AQ_PHY_EV_MODULE_QUAL_FAIL | 4356 IXL_AQ_PHY_EV_MEDIA_NA)); 4357 4358 if (ixl_atq_poll(sc, &iaq, 250) != 0) { 4359 aprint_error_dev(sc->sc_dev, "SET PHY EVENT MASK timeout\n"); 4360 return -1; 4361 } 4362 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) { 4363 aprint_error_dev(sc->sc_dev, "SET PHY EVENT MASK error\n"); 4364 return -1; 4365 } 4366 4367 return 0; 4368 } 4369 4370 static int 4371 ixl_get_phy_abilities(struct ixl_softc *sc,struct ixl_dmamem *idm) 4372 { 4373 struct ixl_aq_desc iaq; 4374 int rv; 4375 4376 memset(&iaq, 0, sizeof(iaq)); 4377 iaq.iaq_flags = htole16(IXL_AQ_BUF | 4378 (IXL_DMA_LEN(idm) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0)); 4379 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_GET_ABILITIES); 4380 iaq.iaq_datalen = htole16(IXL_DMA_LEN(idm)); 4381 iaq.iaq_param[0] = htole32(IXL_AQ_PHY_REPORT_INIT); 4382 ixl_aq_dva(&iaq, IXL_DMA_DVA(idm)); 4383 4384 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm), 4385 BUS_DMASYNC_PREREAD); 4386 4387 rv = ixl_atq_poll(sc, &iaq, 250); 4388 4389 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm), 4390 BUS_DMASYNC_POSTREAD); 4391 4392 if (rv != 0) 4393 return -1; 4394 4395 return le16toh(iaq.iaq_retval); 4396 } 4397 4398 static int 4399 ixl_get_phy_info(struct ixl_softc *sc) 4400 { 4401 struct ixl_dmamem idm; 4402 struct ixl_aq_phy_abilities *phy; 4403 int rv; 4404 4405 if (ixl_dmamem_alloc(sc, &idm, IXL_AQ_BUFLEN, 0) != 0) { 4406 aprint_error_dev(sc->sc_dev, 4407 "unable to allocate phy abilities buffer\n"); 4408 return -1; 4409 } 4410 4411 rv = ixl_get_phy_abilities(sc, &idm); 4412 switch (rv) { 4413 case -1: 4414 aprint_error_dev(sc->sc_dev, "GET PHY ABILITIES timeout\n"); 4415 goto done; 4416 case IXL_AQ_RC_OK: 4417 break; 4418 case IXL_AQ_RC_EIO: 4419 aprint_error_dev(sc->sc_dev,"unable to query phy types\n"); 4420 goto done; 4421 default: 4422 aprint_error_dev(sc->sc_dev, 4423 "GET PHY ABILITIIES error %u\n", rv); 4424 goto done; 4425 } 4426 4427 phy = IXL_DMA_KVA(&idm); 4428 4429 sc->sc_phy_types = le32toh(phy->phy_type); 4430 sc->sc_phy_types |= (uint64_t)le32toh(phy->phy_type_ext) << 32; 4431 4432 sc->sc_phy_abilities = phy->abilities; 4433 sc->sc_phy_linkspeed = phy->link_speed; 4434 sc->sc_phy_fec_cfg = phy->fec_cfg_curr_mod_ext_info & 4435 (IXL_AQ_ENABLE_FEC_KR | IXL_AQ_ENABLE_FEC_RS | 4436 IXL_AQ_REQUEST_FEC_KR | IXL_AQ_REQUEST_FEC_RS); 4437 sc->sc_eee_cap = phy->eee_capability; 4438 sc->sc_eeer_val = phy->eeer_val; 4439 sc->sc_d3_lpan = phy->d3_lpan; 4440 4441 rv = 0; 4442 4443 done: 4444 ixl_dmamem_free(sc, &idm); 4445 return rv; 4446 } 4447 4448 static int 4449 ixl_set_phy_config(struct ixl_softc *sc, 4450 uint8_t link_speed, uint8_t abilities, bool polling) 4451 { 4452 struct ixl_aq_phy_param *param; 4453 struct ixl_atq iatq; 4454 struct ixl_aq_desc *iaq; 4455 int error; 4456 4457 memset(&iatq, 0, sizeof(iatq)); 4458 4459 iaq = &iatq.iatq_desc; 4460 iaq->iaq_opcode = htole16(IXL_AQ_OP_PHY_SET_CONFIG); 4461 param = (struct ixl_aq_phy_param *)&iaq->iaq_param; 4462 param->phy_types = htole32((uint32_t)sc->sc_phy_types); 4463 param->phy_type_ext = (uint8_t)(sc->sc_phy_types >> 32); 4464 param->link_speed = link_speed; 4465 param->abilities = abilities | IXL_AQ_PHY_ABILITY_AUTO_LINK; 4466 param->fec_cfg = sc->sc_phy_fec_cfg; 4467 param->eee_capability = sc->sc_eee_cap; 4468 param->eeer_val = sc->sc_eeer_val; 4469 param->d3_lpan = sc->sc_d3_lpan; 4470 4471 if (polling) 4472 error = ixl_atq_poll(sc, iaq, 250); 4473 else 4474 error = ixl_atq_exec(sc, &iatq); 4475 4476 if (error != 0) 4477 return error; 4478 4479 switch (le16toh(iaq->iaq_retval)) { 4480 case IXL_AQ_RC_OK: 4481 break; 4482 case IXL_AQ_RC_EPERM: 4483 return EPERM; 4484 default: 4485 return EIO; 4486 } 4487 4488 return 0; 4489 } 4490 4491 static int 4492 ixl_set_phy_autoselect(struct ixl_softc *sc) 4493 { 4494 uint8_t link_speed, abilities; 4495 4496 link_speed = sc->sc_phy_linkspeed; 4497 abilities = IXL_PHY_ABILITY_LINKUP | IXL_PHY_ABILITY_AUTONEGO; 4498 4499 return ixl_set_phy_config(sc, link_speed, abilities, true); 4500 } 4501 4502 static int 4503 ixl_get_link_status_poll(struct ixl_softc *sc, int *l) 4504 { 4505 struct ixl_aq_desc iaq; 4506 struct ixl_aq_link_param *param; 4507 int link; 4508 4509 memset(&iaq, 0, sizeof(iaq)); 4510 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_LINK_STATUS); 4511 param = (struct ixl_aq_link_param *)iaq.iaq_param; 4512 param->notify = IXL_AQ_LINK_NOTIFY; 4513 4514 if (ixl_atq_poll(sc, &iaq, 250) != 0) { 4515 return ETIMEDOUT; 4516 } 4517 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) { 4518 return EIO; 4519 } 4520 4521 /* It is unneccessary to hold lock */ 4522 link = ixl_set_link_status_locked(sc, &iaq); 4523 4524 if (l != NULL) 4525 *l = link; 4526 4527 return 0; 4528 } 4529 4530 static int 4531 ixl_get_vsi(struct ixl_softc *sc) 4532 { 4533 struct ixl_dmamem *vsi = &sc->sc_scratch; 4534 struct ixl_aq_desc iaq; 4535 struct ixl_aq_vsi_param *param; 4536 struct ixl_aq_vsi_reply *reply; 4537 struct ixl_aq_vsi_data *data; 4538 int rv; 4539 4540 /* grumble, vsi info isn't "known" at compile time */ 4541 4542 memset(&iaq, 0, sizeof(iaq)); 4543 iaq.iaq_flags = htole16(IXL_AQ_BUF | 4544 (IXL_DMA_LEN(vsi) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0)); 4545 iaq.iaq_opcode = htole16(IXL_AQ_OP_GET_VSI_PARAMS); 4546 iaq.iaq_datalen = htole16(IXL_DMA_LEN(vsi)); 4547 ixl_aq_dva(&iaq, IXL_DMA_DVA(vsi)); 4548 4549 param = (struct ixl_aq_vsi_param *)iaq.iaq_param; 4550 param->uplink_seid = sc->sc_seid; 4551 4552 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi), 4553 BUS_DMASYNC_PREREAD); 4554 4555 rv = ixl_atq_poll(sc, &iaq, 250); 4556 4557 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi), 4558 BUS_DMASYNC_POSTREAD); 4559 4560 if (rv != 0) { 4561 return ETIMEDOUT; 4562 } 4563 4564 switch (le16toh(iaq.iaq_retval)) { 4565 case IXL_AQ_RC_OK: 4566 break; 4567 case IXL_AQ_RC_ENOENT: 4568 return ENOENT; 4569 case IXL_AQ_RC_EACCES: 4570 return EACCES; 4571 default: 4572 return EIO; 4573 } 4574 4575 reply = (struct ixl_aq_vsi_reply *)iaq.iaq_param; 4576 sc->sc_vsi_number = le16toh(reply->vsi_number); 4577 data = IXL_DMA_KVA(vsi); 4578 sc->sc_vsi_stat_counter_idx = le16toh(data->stat_counter_idx); 4579 4580 return 0; 4581 } 4582 4583 static int 4584 ixl_set_vsi(struct ixl_softc *sc) 4585 { 4586 struct ixl_dmamem *vsi = &sc->sc_scratch; 4587 struct ixl_aq_desc iaq; 4588 struct ixl_aq_vsi_param *param; 4589 struct ixl_aq_vsi_data *data = IXL_DMA_KVA(vsi); 4590 unsigned int qnum; 4591 uint16_t val; 4592 int rv; 4593 4594 qnum = sc->sc_nqueue_pairs - 1; 4595 4596 data->valid_sections = htole16(IXL_AQ_VSI_VALID_QUEUE_MAP | 4597 IXL_AQ_VSI_VALID_VLAN); 4598 4599 CLR(data->mapping_flags, htole16(IXL_AQ_VSI_QUE_MAP_MASK)); 4600 SET(data->mapping_flags, htole16(IXL_AQ_VSI_QUE_MAP_CONTIG)); 4601 data->queue_mapping[0] = htole16(0); 4602 data->tc_mapping[0] = htole16((0 << IXL_AQ_VSI_TC_Q_OFFSET_SHIFT) | 4603 (qnum << IXL_AQ_VSI_TC_Q_NUMBER_SHIFT)); 4604 4605 val = le16toh(data->port_vlan_flags); 4606 CLR(val, IXL_AQ_VSI_PVLAN_MODE_MASK | IXL_AQ_VSI_PVLAN_EMOD_MASK); 4607 SET(val, IXL_AQ_VSI_PVLAN_MODE_ALL); 4608 4609 if (ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWTAGGING)) { 4610 SET(val, IXL_AQ_VSI_PVLAN_EMOD_STR_BOTH); 4611 } else { 4612 SET(val, IXL_AQ_VSI_PVLAN_EMOD_NOTHING); 4613 } 4614 4615 data->port_vlan_flags = htole16(val); 4616 4617 /* grumble, vsi info isn't "known" at compile time */ 4618 4619 memset(&iaq, 0, sizeof(iaq)); 4620 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD | 4621 (IXL_DMA_LEN(vsi) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0)); 4622 iaq.iaq_opcode = htole16(IXL_AQ_OP_UPD_VSI_PARAMS); 4623 iaq.iaq_datalen = htole16(IXL_DMA_LEN(vsi)); 4624 ixl_aq_dva(&iaq, IXL_DMA_DVA(vsi)); 4625 4626 param = (struct ixl_aq_vsi_param *)iaq.iaq_param; 4627 param->uplink_seid = sc->sc_seid; 4628 4629 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi), 4630 BUS_DMASYNC_PREWRITE); 4631 4632 rv = ixl_atq_poll(sc, &iaq, 250); 4633 4634 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi), 4635 BUS_DMASYNC_POSTWRITE); 4636 4637 if (rv != 0) { 4638 return ETIMEDOUT; 4639 } 4640 4641 switch (le16toh(iaq.iaq_retval)) { 4642 case IXL_AQ_RC_OK: 4643 break; 4644 case IXL_AQ_RC_ENOENT: 4645 return ENOENT; 4646 case IXL_AQ_RC_EACCES: 4647 return EACCES; 4648 default: 4649 return EIO; 4650 } 4651 4652 return 0; 4653 } 4654 4655 static void 4656 ixl_set_filter_control(struct ixl_softc *sc) 4657 { 4658 uint32_t reg; 4659 4660 reg = ixl_rd_rx_csr(sc, I40E_PFQF_CTL_0); 4661 4662 CLR(reg, I40E_PFQF_CTL_0_HASHLUTSIZE_MASK); 4663 SET(reg, I40E_HASH_LUT_SIZE_128 << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT); 4664 4665 SET(reg, I40E_PFQF_CTL_0_FD_ENA_MASK); 4666 SET(reg, I40E_PFQF_CTL_0_ETYPE_ENA_MASK); 4667 SET(reg, I40E_PFQF_CTL_0_MACVLAN_ENA_MASK); 4668 4669 ixl_wr_rx_csr(sc, I40E_PFQF_CTL_0, reg); 4670 } 4671 4672 static inline void 4673 ixl_get_default_rss_key(uint32_t *buf, size_t len) 4674 { 4675 size_t cplen; 4676 uint8_t rss_seed[RSS_KEYSIZE]; 4677 4678 rss_getkey(rss_seed); 4679 memset(buf, 0, len); 4680 4681 cplen = MIN(len, sizeof(rss_seed)); 4682 memcpy(buf, rss_seed, cplen); 4683 } 4684 4685 static int 4686 ixl_set_rss_key(struct ixl_softc *sc, uint8_t *key, size_t keylen) 4687 { 4688 struct ixl_dmamem *idm; 4689 struct ixl_atq iatq; 4690 struct ixl_aq_desc *iaq; 4691 struct ixl_aq_rss_key_param *param; 4692 struct ixl_aq_rss_key_data *data; 4693 size_t len, datalen, stdlen, extlen; 4694 uint16_t vsi_id; 4695 int rv; 4696 4697 memset(&iatq, 0, sizeof(iatq)); 4698 iaq = &iatq.iatq_desc; 4699 idm = &sc->sc_aqbuf; 4700 4701 datalen = sizeof(*data); 4702 4703 /*XXX The buf size has to be less than the size of the register */ 4704 datalen = MIN(IXL_RSS_KEY_SIZE_REG * sizeof(uint32_t), datalen); 4705 4706 iaq->iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD | 4707 (datalen > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0)); 4708 iaq->iaq_opcode = htole16(IXL_AQ_OP_RSS_SET_KEY); 4709 iaq->iaq_datalen = htole16(datalen); 4710 4711 param = (struct ixl_aq_rss_key_param *)iaq->iaq_param; 4712 vsi_id = (sc->sc_vsi_number << IXL_AQ_RSSKEY_VSI_ID_SHIFT) | 4713 IXL_AQ_RSSKEY_VSI_VALID; 4714 param->vsi_id = htole16(vsi_id); 4715 4716 memset(IXL_DMA_KVA(idm), 0, IXL_DMA_LEN(idm)); 4717 data = IXL_DMA_KVA(idm); 4718 4719 len = MIN(keylen, datalen); 4720 stdlen = MIN(sizeof(data->standard_rss_key), len); 4721 memcpy(data->standard_rss_key, key, stdlen); 4722 len = (len > stdlen) ? (len - stdlen) : 0; 4723 4724 extlen = MIN(sizeof(data->extended_hash_key), len); 4725 extlen = (stdlen < keylen) ? 0 : keylen - stdlen; 4726 memcpy(data->extended_hash_key, key + stdlen, extlen); 4727 4728 ixl_aq_dva(iaq, IXL_DMA_DVA(idm)); 4729 4730 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, 4731 IXL_DMA_LEN(idm), BUS_DMASYNC_PREWRITE); 4732 4733 rv = ixl_atq_exec(sc, &iatq); 4734 4735 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, 4736 IXL_DMA_LEN(idm), BUS_DMASYNC_POSTWRITE); 4737 4738 if (rv != 0) { 4739 return ETIMEDOUT; 4740 } 4741 4742 if (iaq->iaq_retval != htole16(IXL_AQ_RC_OK)) { 4743 return EIO; 4744 } 4745 4746 return 0; 4747 } 4748 4749 static int 4750 ixl_set_rss_lut(struct ixl_softc *sc, uint8_t *lut, size_t lutlen) 4751 { 4752 struct ixl_dmamem *idm; 4753 struct ixl_atq iatq; 4754 struct ixl_aq_desc *iaq; 4755 struct ixl_aq_rss_lut_param *param; 4756 uint16_t vsi_id; 4757 uint8_t *data; 4758 size_t dmalen; 4759 int rv; 4760 4761 memset(&iatq, 0, sizeof(iatq)); 4762 iaq = &iatq.iatq_desc; 4763 idm = &sc->sc_aqbuf; 4764 4765 dmalen = MIN(lutlen, IXL_DMA_LEN(idm)); 4766 4767 iaq->iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD | 4768 (dmalen > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0)); 4769 iaq->iaq_opcode = htole16(IXL_AQ_OP_RSS_SET_LUT); 4770 iaq->iaq_datalen = htole16(dmalen); 4771 4772 memset(IXL_DMA_KVA(idm), 0, IXL_DMA_LEN(idm)); 4773 data = IXL_DMA_KVA(idm); 4774 memcpy(data, lut, dmalen); 4775 ixl_aq_dva(iaq, IXL_DMA_DVA(idm)); 4776 4777 param = (struct ixl_aq_rss_lut_param *)iaq->iaq_param; 4778 vsi_id = (sc->sc_vsi_number << IXL_AQ_RSSLUT_VSI_ID_SHIFT) | 4779 IXL_AQ_RSSLUT_VSI_VALID; 4780 param->vsi_id = htole16(vsi_id); 4781 param->flags = htole16(IXL_AQ_RSSLUT_TABLE_TYPE_PF << 4782 IXL_AQ_RSSLUT_TABLE_TYPE_SHIFT); 4783 4784 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, 4785 IXL_DMA_LEN(idm), BUS_DMASYNC_PREWRITE); 4786 4787 rv = ixl_atq_exec(sc, &iatq); 4788 4789 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, 4790 IXL_DMA_LEN(idm), BUS_DMASYNC_POSTWRITE); 4791 4792 if (rv != 0) { 4793 return ETIMEDOUT; 4794 } 4795 4796 if (iaq->iaq_retval != htole16(IXL_AQ_RC_OK)) { 4797 return EIO; 4798 } 4799 4800 return 0; 4801 } 4802 4803 static int 4804 ixl_register_rss_key(struct ixl_softc *sc) 4805 { 4806 uint32_t rss_seed[IXL_RSS_KEY_SIZE_REG]; 4807 int rv; 4808 size_t i; 4809 4810 ixl_get_default_rss_key(rss_seed, sizeof(rss_seed)); 4811 4812 if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RSS)){ 4813 rv = ixl_set_rss_key(sc, (uint8_t*)rss_seed, 4814 sizeof(rss_seed)); 4815 } else { 4816 rv = 0; 4817 for (i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) { 4818 ixl_wr_rx_csr(sc, I40E_PFQF_HKEY(i), rss_seed[i]); 4819 } 4820 } 4821 4822 return rv; 4823 } 4824 4825 static void 4826 ixl_register_rss_pctype(struct ixl_softc *sc) 4827 { 4828 uint64_t set_hena = 0; 4829 uint32_t hena0, hena1; 4830 4831 /* 4832 * We use TCP/UDP with IPv4/IPv6 by default. 4833 * Note: the device can not use just IP header in each 4834 * TCP/UDP packets for the RSS hash calculation. 4835 */ 4836 if (sc->sc_mac_type == I40E_MAC_X722) 4837 set_hena = IXL_RSS_HENA_DEFAULT_X722; 4838 else 4839 set_hena = IXL_RSS_HENA_DEFAULT_XL710; 4840 4841 hena0 = ixl_rd_rx_csr(sc, I40E_PFQF_HENA(0)); 4842 hena1 = ixl_rd_rx_csr(sc, I40E_PFQF_HENA(1)); 4843 4844 SET(hena0, set_hena); 4845 SET(hena1, set_hena >> 32); 4846 4847 ixl_wr_rx_csr(sc, I40E_PFQF_HENA(0), hena0); 4848 ixl_wr_rx_csr(sc, I40E_PFQF_HENA(1), hena1); 4849 } 4850 4851 static int 4852 ixl_register_rss_hlut(struct ixl_softc *sc) 4853 { 4854 unsigned int qid; 4855 uint8_t hlut_buf[512], lut_mask; 4856 uint32_t *hluts; 4857 size_t i, hluts_num; 4858 int rv; 4859 4860 lut_mask = (0x01 << sc->sc_rss_table_entry_width) - 1; 4861 4862 for (i = 0; i < sc->sc_rss_table_size; i++) { 4863 qid = i % sc->sc_nqueue_pairs; 4864 hlut_buf[i] = qid & lut_mask; 4865 } 4866 4867 if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RSS)) { 4868 rv = ixl_set_rss_lut(sc, hlut_buf, sizeof(hlut_buf)); 4869 } else { 4870 rv = 0; 4871 hluts = (uint32_t *)hlut_buf; 4872 hluts_num = sc->sc_rss_table_size >> 2; 4873 for (i = 0; i < hluts_num; i++) { 4874 ixl_wr(sc, I40E_PFQF_HLUT(i), hluts[i]); 4875 } 4876 ixl_flush(sc); 4877 } 4878 4879 return rv; 4880 } 4881 4882 static void 4883 ixl_config_rss(struct ixl_softc *sc) 4884 { 4885 4886 KASSERT(mutex_owned(&sc->sc_cfg_lock)); 4887 4888 ixl_register_rss_key(sc); 4889 ixl_register_rss_pctype(sc); 4890 ixl_register_rss_hlut(sc); 4891 } 4892 4893 static const struct ixl_phy_type * 4894 ixl_search_phy_type(uint8_t phy_type) 4895 { 4896 const struct ixl_phy_type *itype; 4897 uint64_t mask; 4898 unsigned int i; 4899 4900 if (phy_type >= 64) 4901 return NULL; 4902 4903 mask = 1ULL << phy_type; 4904 4905 for (i = 0; i < __arraycount(ixl_phy_type_map); i++) { 4906 itype = &ixl_phy_type_map[i]; 4907 4908 if (ISSET(itype->phy_type, mask)) 4909 return itype; 4910 } 4911 4912 return NULL; 4913 } 4914 4915 static uint64_t 4916 ixl_search_link_speed(uint8_t link_speed) 4917 { 4918 const struct ixl_speed_type *type; 4919 unsigned int i; 4920 4921 for (i = 0; i < __arraycount(ixl_speed_type_map); i++) { 4922 type = &ixl_speed_type_map[i]; 4923 4924 if (ISSET(type->dev_speed, link_speed)) 4925 return type->net_speed; 4926 } 4927 4928 return 0; 4929 } 4930 4931 static uint8_t 4932 ixl_search_baudrate(uint64_t baudrate) 4933 { 4934 const struct ixl_speed_type *type; 4935 unsigned int i; 4936 4937 for (i = 0; i < __arraycount(ixl_speed_type_map); i++) { 4938 type = &ixl_speed_type_map[i]; 4939 4940 if (type->net_speed == baudrate) { 4941 return type->dev_speed; 4942 } 4943 } 4944 4945 return 0; 4946 } 4947 4948 static int 4949 ixl_restart_an(struct ixl_softc *sc) 4950 { 4951 struct ixl_aq_desc iaq; 4952 4953 memset(&iaq, 0, sizeof(iaq)); 4954 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_RESTART_AN); 4955 iaq.iaq_param[0] = 4956 htole32(IXL_AQ_PHY_RESTART_AN | IXL_AQ_PHY_LINK_ENABLE); 4957 4958 if (ixl_atq_poll(sc, &iaq, 250) != 0) { 4959 aprint_error_dev(sc->sc_dev, "RESTART AN timeout\n"); 4960 return -1; 4961 } 4962 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) { 4963 aprint_error_dev(sc->sc_dev, "RESTART AN error\n"); 4964 return -1; 4965 } 4966 4967 return 0; 4968 } 4969 4970 static int 4971 ixl_add_macvlan(struct ixl_softc *sc, const uint8_t *macaddr, 4972 uint16_t vlan, uint16_t flags) 4973 { 4974 struct ixl_aq_desc iaq; 4975 struct ixl_aq_add_macvlan *param; 4976 struct ixl_aq_add_macvlan_elem *elem; 4977 4978 memset(&iaq, 0, sizeof(iaq)); 4979 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD); 4980 iaq.iaq_opcode = htole16(IXL_AQ_OP_ADD_MACVLAN); 4981 iaq.iaq_datalen = htole16(sizeof(*elem)); 4982 ixl_aq_dva(&iaq, IXL_DMA_DVA(&sc->sc_scratch)); 4983 4984 param = (struct ixl_aq_add_macvlan *)&iaq.iaq_param; 4985 param->num_addrs = htole16(1); 4986 param->seid0 = htole16(0x8000) | sc->sc_seid; 4987 param->seid1 = 0; 4988 param->seid2 = 0; 4989 4990 elem = IXL_DMA_KVA(&sc->sc_scratch); 4991 memset(elem, 0, sizeof(*elem)); 4992 memcpy(elem->macaddr, macaddr, ETHER_ADDR_LEN); 4993 elem->flags = htole16(IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH | flags); 4994 elem->vlan = htole16(vlan); 4995 4996 if (ixl_atq_poll(sc, &iaq, 250) != 0) { 4997 return IXL_AQ_RC_EINVAL; 4998 } 4999 5000 switch (le16toh(iaq.iaq_retval)) { 5001 case IXL_AQ_RC_OK: 5002 break; 5003 case IXL_AQ_RC_ENOSPC: 5004 return ENOSPC; 5005 case IXL_AQ_RC_ENOENT: 5006 return ENOENT; 5007 case IXL_AQ_RC_EACCES: 5008 return EACCES; 5009 case IXL_AQ_RC_EEXIST: 5010 return EEXIST; 5011 case IXL_AQ_RC_EINVAL: 5012 return EINVAL; 5013 default: 5014 return EIO; 5015 } 5016 5017 return 0; 5018 } 5019 5020 static int 5021 ixl_remove_macvlan(struct ixl_softc *sc, const uint8_t *macaddr, 5022 uint16_t vlan, uint16_t flags) 5023 { 5024 struct ixl_aq_desc iaq; 5025 struct ixl_aq_remove_macvlan *param; 5026 struct ixl_aq_remove_macvlan_elem *elem; 5027 5028 memset(&iaq, 0, sizeof(iaq)); 5029 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD); 5030 iaq.iaq_opcode = htole16(IXL_AQ_OP_REMOVE_MACVLAN); 5031 iaq.iaq_datalen = htole16(sizeof(*elem)); 5032 ixl_aq_dva(&iaq, IXL_DMA_DVA(&sc->sc_scratch)); 5033 5034 param = (struct ixl_aq_remove_macvlan *)&iaq.iaq_param; 5035 param->num_addrs = htole16(1); 5036 param->seid0 = htole16(0x8000) | sc->sc_seid; 5037 param->seid1 = 0; 5038 param->seid2 = 0; 5039 5040 elem = IXL_DMA_KVA(&sc->sc_scratch); 5041 memset(elem, 0, sizeof(*elem)); 5042 memcpy(elem->macaddr, macaddr, ETHER_ADDR_LEN); 5043 elem->flags = htole16(IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH | flags); 5044 elem->vlan = htole16(vlan); 5045 5046 if (ixl_atq_poll(sc, &iaq, 250) != 0) { 5047 return EINVAL; 5048 } 5049 5050 switch (le16toh(iaq.iaq_retval)) { 5051 case IXL_AQ_RC_OK: 5052 break; 5053 case IXL_AQ_RC_ENOENT: 5054 return ENOENT; 5055 case IXL_AQ_RC_EACCES: 5056 return EACCES; 5057 case IXL_AQ_RC_EINVAL: 5058 return EINVAL; 5059 default: 5060 return EIO; 5061 } 5062 5063 return 0; 5064 } 5065 5066 static int 5067 ixl_hmc(struct ixl_softc *sc) 5068 { 5069 struct { 5070 uint32_t count; 5071 uint32_t minsize; 5072 bus_size_t objsiz; 5073 bus_size_t setoff; 5074 bus_size_t setcnt; 5075 } regs[] = { 5076 { 5077 0, 5078 IXL_HMC_TXQ_MINSIZE, 5079 I40E_GLHMC_LANTXOBJSZ, 5080 I40E_GLHMC_LANTXBASE(sc->sc_pf_id), 5081 I40E_GLHMC_LANTXCNT(sc->sc_pf_id), 5082 }, 5083 { 5084 0, 5085 IXL_HMC_RXQ_MINSIZE, 5086 I40E_GLHMC_LANRXOBJSZ, 5087 I40E_GLHMC_LANRXBASE(sc->sc_pf_id), 5088 I40E_GLHMC_LANRXCNT(sc->sc_pf_id), 5089 }, 5090 { 5091 0, 5092 0, 5093 I40E_GLHMC_FCOEDDPOBJSZ, 5094 I40E_GLHMC_FCOEDDPBASE(sc->sc_pf_id), 5095 I40E_GLHMC_FCOEDDPCNT(sc->sc_pf_id), 5096 }, 5097 { 5098 0, 5099 0, 5100 I40E_GLHMC_FCOEFOBJSZ, 5101 I40E_GLHMC_FCOEFBASE(sc->sc_pf_id), 5102 I40E_GLHMC_FCOEFCNT(sc->sc_pf_id), 5103 }, 5104 }; 5105 struct ixl_hmc_entry *e; 5106 uint64_t size, dva; 5107 uint8_t *kva; 5108 uint64_t *sdpage; 5109 unsigned int i; 5110 int npages, tables; 5111 uint32_t reg; 5112 5113 CTASSERT(__arraycount(regs) <= __arraycount(sc->sc_hmc_entries)); 5114 5115 regs[IXL_HMC_LAN_TX].count = regs[IXL_HMC_LAN_RX].count = 5116 ixl_rd(sc, I40E_GLHMC_LANQMAX); 5117 5118 size = 0; 5119 for (i = 0; i < __arraycount(regs); i++) { 5120 e = &sc->sc_hmc_entries[i]; 5121 5122 e->hmc_count = regs[i].count; 5123 reg = ixl_rd(sc, regs[i].objsiz); 5124 e->hmc_size = BIT_ULL(0x3F & reg); 5125 e->hmc_base = size; 5126 5127 if ((e->hmc_size * 8) < regs[i].minsize) { 5128 aprint_error_dev(sc->sc_dev, 5129 "kernel hmc entry is too big\n"); 5130 return -1; 5131 } 5132 5133 size += roundup(e->hmc_size * e->hmc_count, IXL_HMC_ROUNDUP); 5134 } 5135 size = roundup(size, IXL_HMC_PGSIZE); 5136 npages = size / IXL_HMC_PGSIZE; 5137 5138 tables = roundup(size, IXL_HMC_L2SZ) / IXL_HMC_L2SZ; 5139 5140 if (ixl_dmamem_alloc(sc, &sc->sc_hmc_pd, size, IXL_HMC_PGSIZE) != 0) { 5141 aprint_error_dev(sc->sc_dev, 5142 "unable to allocate hmc pd memory\n"); 5143 return -1; 5144 } 5145 5146 if (ixl_dmamem_alloc(sc, &sc->sc_hmc_sd, tables * IXL_HMC_PGSIZE, 5147 IXL_HMC_PGSIZE) != 0) { 5148 aprint_error_dev(sc->sc_dev, 5149 "unable to allocate hmc sd memory\n"); 5150 ixl_dmamem_free(sc, &sc->sc_hmc_pd); 5151 return -1; 5152 } 5153 5154 kva = IXL_DMA_KVA(&sc->sc_hmc_pd); 5155 memset(kva, 0, IXL_DMA_LEN(&sc->sc_hmc_pd)); 5156 5157 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd), 5158 0, IXL_DMA_LEN(&sc->sc_hmc_pd), 5159 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 5160 5161 dva = IXL_DMA_DVA(&sc->sc_hmc_pd); 5162 sdpage = IXL_DMA_KVA(&sc->sc_hmc_sd); 5163 memset(sdpage, 0, IXL_DMA_LEN(&sc->sc_hmc_sd)); 5164 5165 for (i = 0; (int)i < npages; i++) { 5166 *sdpage = htole64(dva | IXL_HMC_PDVALID); 5167 sdpage++; 5168 5169 dva += IXL_HMC_PGSIZE; 5170 } 5171 5172 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_sd), 5173 0, IXL_DMA_LEN(&sc->sc_hmc_sd), 5174 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 5175 5176 dva = IXL_DMA_DVA(&sc->sc_hmc_sd); 5177 for (i = 0; (int)i < tables; i++) { 5178 uint32_t count; 5179 5180 KASSERT(npages >= 0); 5181 5182 count = ((unsigned int)npages > IXL_HMC_PGS) ? 5183 IXL_HMC_PGS : (unsigned int)npages; 5184 5185 ixl_wr(sc, I40E_PFHMC_SDDATAHIGH, dva >> 32); 5186 ixl_wr(sc, I40E_PFHMC_SDDATALOW, dva | 5187 (count << I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | 5188 (1U << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT)); 5189 ixl_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE); 5190 ixl_wr(sc, I40E_PFHMC_SDCMD, 5191 (1U << I40E_PFHMC_SDCMD_PMSDWR_SHIFT) | i); 5192 5193 npages -= IXL_HMC_PGS; 5194 dva += IXL_HMC_PGSIZE; 5195 } 5196 5197 for (i = 0; i < __arraycount(regs); i++) { 5198 e = &sc->sc_hmc_entries[i]; 5199 5200 ixl_wr(sc, regs[i].setoff, e->hmc_base / IXL_HMC_ROUNDUP); 5201 ixl_wr(sc, regs[i].setcnt, e->hmc_count); 5202 } 5203 5204 return 0; 5205 } 5206 5207 static void 5208 ixl_hmc_free(struct ixl_softc *sc) 5209 { 5210 ixl_dmamem_free(sc, &sc->sc_hmc_sd); 5211 ixl_dmamem_free(sc, &sc->sc_hmc_pd); 5212 } 5213 5214 static void 5215 ixl_hmc_pack(void *d, const void *s, const struct ixl_hmc_pack *packing, 5216 unsigned int npacking) 5217 { 5218 uint8_t *dst = d; 5219 const uint8_t *src = s; 5220 unsigned int i; 5221 5222 for (i = 0; i < npacking; i++) { 5223 const struct ixl_hmc_pack *pack = &packing[i]; 5224 unsigned int offset = pack->lsb / 8; 5225 unsigned int align = pack->lsb % 8; 5226 const uint8_t *in = src + pack->offset; 5227 uint8_t *out = dst + offset; 5228 int width = pack->width; 5229 unsigned int inbits = 0; 5230 5231 if (align) { 5232 inbits = (*in++) << align; 5233 *out++ |= (inbits & 0xff); 5234 inbits >>= 8; 5235 5236 width -= 8 - align; 5237 } 5238 5239 while (width >= 8) { 5240 inbits |= (*in++) << align; 5241 *out++ = (inbits & 0xff); 5242 inbits >>= 8; 5243 5244 width -= 8; 5245 } 5246 5247 if (width > 0) { 5248 inbits |= (*in) << align; 5249 *out |= (inbits & ((1 << width) - 1)); 5250 } 5251 } 5252 } 5253 5254 static struct ixl_aq_buf * 5255 ixl_aqb_alloc(struct ixl_softc *sc) 5256 { 5257 struct ixl_aq_buf *aqb; 5258 5259 aqb = kmem_alloc(sizeof(*aqb), KM_SLEEP); 5260 5261 aqb->aqb_size = IXL_AQ_BUFLEN; 5262 5263 if (bus_dmamap_create(sc->sc_dmat, aqb->aqb_size, 1, 5264 aqb->aqb_size, 0, 5265 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &aqb->aqb_map) != 0) 5266 goto free; 5267 if (bus_dmamem_alloc(sc->sc_dmat, aqb->aqb_size, 5268 IXL_AQ_ALIGN, 0, &aqb->aqb_seg, 1, &aqb->aqb_nsegs, 5269 BUS_DMA_WAITOK) != 0) 5270 goto destroy; 5271 if (bus_dmamem_map(sc->sc_dmat, &aqb->aqb_seg, aqb->aqb_nsegs, 5272 aqb->aqb_size, &aqb->aqb_data, BUS_DMA_WAITOK) != 0) 5273 goto dma_free; 5274 if (bus_dmamap_load(sc->sc_dmat, aqb->aqb_map, aqb->aqb_data, 5275 aqb->aqb_size, NULL, BUS_DMA_WAITOK) != 0) 5276 goto unmap; 5277 5278 return aqb; 5279 unmap: 5280 bus_dmamem_unmap(sc->sc_dmat, aqb->aqb_data, aqb->aqb_size); 5281 dma_free: 5282 bus_dmamem_free(sc->sc_dmat, &aqb->aqb_seg, 1); 5283 destroy: 5284 bus_dmamap_destroy(sc->sc_dmat, aqb->aqb_map); 5285 free: 5286 kmem_free(aqb, sizeof(*aqb)); 5287 5288 return NULL; 5289 } 5290 5291 static void 5292 ixl_aqb_free(struct ixl_softc *sc, struct ixl_aq_buf *aqb) 5293 { 5294 5295 bus_dmamap_unload(sc->sc_dmat, aqb->aqb_map); 5296 bus_dmamem_unmap(sc->sc_dmat, aqb->aqb_data, aqb->aqb_size); 5297 bus_dmamem_free(sc->sc_dmat, &aqb->aqb_seg, 1); 5298 bus_dmamap_destroy(sc->sc_dmat, aqb->aqb_map); 5299 kmem_free(aqb, sizeof(*aqb)); 5300 } 5301 5302 static int 5303 ixl_arq_fill(struct ixl_softc *sc) 5304 { 5305 struct ixl_aq_buf *aqb; 5306 struct ixl_aq_desc *arq, *iaq; 5307 unsigned int prod = sc->sc_arq_prod; 5308 unsigned int n; 5309 int post = 0; 5310 5311 n = ixl_rxr_unrefreshed(sc->sc_arq_prod, sc->sc_arq_cons, 5312 IXL_AQ_NUM); 5313 arq = IXL_DMA_KVA(&sc->sc_arq); 5314 5315 if (__predict_false(n <= 0)) 5316 return 0; 5317 5318 do { 5319 aqb = sc->sc_arq_live[prod]; 5320 iaq = &arq[prod]; 5321 5322 if (aqb == NULL) { 5323 aqb = SIMPLEQ_FIRST(&sc->sc_arq_idle); 5324 if (aqb != NULL) { 5325 SIMPLEQ_REMOVE(&sc->sc_arq_idle, aqb, 5326 ixl_aq_buf, aqb_entry); 5327 } else if ((aqb = ixl_aqb_alloc(sc)) == NULL) { 5328 break; 5329 } 5330 5331 sc->sc_arq_live[prod] = aqb; 5332 memset(aqb->aqb_data, 0, aqb->aqb_size); 5333 5334 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, 5335 aqb->aqb_size, BUS_DMASYNC_PREREAD); 5336 5337 iaq->iaq_flags = htole16(IXL_AQ_BUF | 5338 (IXL_AQ_BUFLEN > I40E_AQ_LARGE_BUF ? 5339 IXL_AQ_LB : 0)); 5340 iaq->iaq_opcode = 0; 5341 iaq->iaq_datalen = htole16(aqb->aqb_size); 5342 iaq->iaq_retval = 0; 5343 iaq->iaq_cookie = 0; 5344 iaq->iaq_param[0] = 0; 5345 iaq->iaq_param[1] = 0; 5346 ixl_aq_dva(iaq, aqb->aqb_map->dm_segs[0].ds_addr); 5347 } 5348 5349 prod++; 5350 prod &= IXL_AQ_MASK; 5351 5352 post = 1; 5353 5354 } while (--n); 5355 5356 if (post) { 5357 sc->sc_arq_prod = prod; 5358 ixl_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod); 5359 } 5360 5361 return post; 5362 } 5363 5364 static void 5365 ixl_arq_unfill(struct ixl_softc *sc) 5366 { 5367 struct ixl_aq_buf *aqb; 5368 unsigned int i; 5369 5370 for (i = 0; i < __arraycount(sc->sc_arq_live); i++) { 5371 aqb = sc->sc_arq_live[i]; 5372 if (aqb == NULL) 5373 continue; 5374 5375 sc->sc_arq_live[i] = NULL; 5376 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, aqb->aqb_size, 5377 BUS_DMASYNC_POSTREAD); 5378 ixl_aqb_free(sc, aqb); 5379 } 5380 5381 while ((aqb = SIMPLEQ_FIRST(&sc->sc_arq_idle)) != NULL) { 5382 SIMPLEQ_REMOVE(&sc->sc_arq_idle, aqb, 5383 ixl_aq_buf, aqb_entry); 5384 ixl_aqb_free(sc, aqb); 5385 } 5386 } 5387 5388 static void 5389 ixl_clear_hw(struct ixl_softc *sc) 5390 { 5391 uint32_t num_queues, base_queue; 5392 uint32_t num_pf_int; 5393 uint32_t num_vf_int; 5394 uint32_t num_vfs; 5395 uint32_t i, j; 5396 uint32_t val; 5397 uint32_t eol = 0x7ff; 5398 5399 /* get number of interrupts, queues, and vfs */ 5400 val = ixl_rd(sc, I40E_GLPCI_CNF2); 5401 num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >> 5402 I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT; 5403 num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >> 5404 I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT; 5405 5406 val = ixl_rd(sc, I40E_PFLAN_QALLOC); 5407 base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >> 5408 I40E_PFLAN_QALLOC_FIRSTQ_SHIFT; 5409 j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >> 5410 I40E_PFLAN_QALLOC_LASTQ_SHIFT; 5411 if (val & I40E_PFLAN_QALLOC_VALID_MASK) 5412 num_queues = (j - base_queue) + 1; 5413 else 5414 num_queues = 0; 5415 5416 val = ixl_rd(sc, I40E_PF_VT_PFALLOC); 5417 i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >> 5418 I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT; 5419 j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >> 5420 I40E_PF_VT_PFALLOC_LASTVF_SHIFT; 5421 if (val & I40E_PF_VT_PFALLOC_VALID_MASK) 5422 num_vfs = (j - i) + 1; 5423 else 5424 num_vfs = 0; 5425 5426 /* stop all the interrupts */ 5427 ixl_wr(sc, I40E_PFINT_ICR0_ENA, 0); 5428 ixl_flush(sc); 5429 val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT; 5430 for (i = 0; i < num_pf_int - 2; i++) 5431 ixl_wr(sc, I40E_PFINT_DYN_CTLN(i), val); 5432 ixl_flush(sc); 5433 5434 /* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */ 5435 val = eol << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT; 5436 ixl_wr(sc, I40E_PFINT_LNKLST0, val); 5437 for (i = 0; i < num_pf_int - 2; i++) 5438 ixl_wr(sc, I40E_PFINT_LNKLSTN(i), val); 5439 val = eol << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT; 5440 for (i = 0; i < num_vfs; i++) 5441 ixl_wr(sc, I40E_VPINT_LNKLST0(i), val); 5442 for (i = 0; i < num_vf_int - 2; i++) 5443 ixl_wr(sc, I40E_VPINT_LNKLSTN(i), val); 5444 5445 /* warn the HW of the coming Tx disables */ 5446 for (i = 0; i < num_queues; i++) { 5447 uint32_t abs_queue_idx = base_queue + i; 5448 uint32_t reg_block = 0; 5449 5450 if (abs_queue_idx >= 128) { 5451 reg_block = abs_queue_idx / 128; 5452 abs_queue_idx %= 128; 5453 } 5454 5455 val = ixl_rd(sc, I40E_GLLAN_TXPRE_QDIS(reg_block)); 5456 val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK; 5457 val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT); 5458 val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK; 5459 5460 ixl_wr(sc, I40E_GLLAN_TXPRE_QDIS(reg_block), val); 5461 } 5462 delaymsec(400); 5463 5464 /* stop all the queues */ 5465 for (i = 0; i < num_queues; i++) { 5466 ixl_wr(sc, I40E_QINT_TQCTL(i), 0); 5467 ixl_wr(sc, I40E_QTX_ENA(i), 0); 5468 ixl_wr(sc, I40E_QINT_RQCTL(i), 0); 5469 ixl_wr(sc, I40E_QRX_ENA(i), 0); 5470 } 5471 5472 /* short wait for all queue disables to settle */ 5473 delaymsec(50); 5474 } 5475 5476 static int 5477 ixl_pf_reset(struct ixl_softc *sc) 5478 { 5479 uint32_t cnt = 0; 5480 uint32_t cnt1 = 0; 5481 uint32_t reg = 0, reg0 = 0; 5482 uint32_t grst_del; 5483 5484 /* 5485 * Poll for Global Reset steady state in case of recent GRST. 5486 * The grst delay value is in 100ms units, and we'll wait a 5487 * couple counts longer to be sure we don't just miss the end. 5488 */ 5489 grst_del = ixl_rd(sc, I40E_GLGEN_RSTCTL); 5490 grst_del &= I40E_GLGEN_RSTCTL_GRSTDEL_MASK; 5491 grst_del >>= I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT; 5492 5493 grst_del = grst_del * 20; 5494 5495 for (cnt = 0; cnt < grst_del; cnt++) { 5496 reg = ixl_rd(sc, I40E_GLGEN_RSTAT); 5497 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK)) 5498 break; 5499 delaymsec(100); 5500 } 5501 if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) { 5502 aprint_error(", Global reset polling failed to complete\n"); 5503 return -1; 5504 } 5505 5506 /* Now Wait for the FW to be ready */ 5507 for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) { 5508 reg = ixl_rd(sc, I40E_GLNVM_ULD); 5509 reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | 5510 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK); 5511 if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | 5512 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK)) 5513 break; 5514 5515 delaymsec(10); 5516 } 5517 if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | 5518 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) { 5519 aprint_error(", wait for FW Reset complete timed out " 5520 "(I40E_GLNVM_ULD = 0x%x)\n", reg); 5521 return -1; 5522 } 5523 5524 /* 5525 * If there was a Global Reset in progress when we got here, 5526 * we don't need to do the PF Reset 5527 */ 5528 if (cnt == 0) { 5529 reg = ixl_rd(sc, I40E_PFGEN_CTRL); 5530 ixl_wr(sc, I40E_PFGEN_CTRL, reg | I40E_PFGEN_CTRL_PFSWR_MASK); 5531 for (cnt = 0; cnt < I40E_PF_RESET_WAIT_COUNT; cnt++) { 5532 reg = ixl_rd(sc, I40E_PFGEN_CTRL); 5533 if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK)) 5534 break; 5535 delaymsec(1); 5536 5537 reg0 = ixl_rd(sc, I40E_GLGEN_RSTAT); 5538 if (reg0 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) { 5539 aprint_error(", Core reset upcoming." 5540 " Skipping PF reset reset request\n"); 5541 return -1; 5542 } 5543 } 5544 if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) { 5545 aprint_error(", PF reset polling failed to complete" 5546 "(I40E_PFGEN_CTRL= 0x%x)\n", reg); 5547 return -1; 5548 } 5549 } 5550 5551 return 0; 5552 } 5553 5554 static int 5555 ixl_dmamem_alloc(struct ixl_softc *sc, struct ixl_dmamem *ixm, 5556 bus_size_t size, bus_size_t align) 5557 { 5558 ixm->ixm_size = size; 5559 5560 if (bus_dmamap_create(sc->sc_dmat, ixm->ixm_size, 1, 5561 ixm->ixm_size, 0, 5562 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, 5563 &ixm->ixm_map) != 0) 5564 return 1; 5565 if (bus_dmamem_alloc(sc->sc_dmat, ixm->ixm_size, 5566 align, 0, &ixm->ixm_seg, 1, &ixm->ixm_nsegs, 5567 BUS_DMA_WAITOK) != 0) 5568 goto destroy; 5569 if (bus_dmamem_map(sc->sc_dmat, &ixm->ixm_seg, ixm->ixm_nsegs, 5570 ixm->ixm_size, &ixm->ixm_kva, BUS_DMA_WAITOK) != 0) 5571 goto free; 5572 if (bus_dmamap_load(sc->sc_dmat, ixm->ixm_map, ixm->ixm_kva, 5573 ixm->ixm_size, NULL, BUS_DMA_WAITOK) != 0) 5574 goto unmap; 5575 5576 memset(ixm->ixm_kva, 0, ixm->ixm_size); 5577 5578 return 0; 5579 unmap: 5580 bus_dmamem_unmap(sc->sc_dmat, ixm->ixm_kva, ixm->ixm_size); 5581 free: 5582 bus_dmamem_free(sc->sc_dmat, &ixm->ixm_seg, 1); 5583 destroy: 5584 bus_dmamap_destroy(sc->sc_dmat, ixm->ixm_map); 5585 return 1; 5586 } 5587 5588 static void 5589 ixl_dmamem_free(struct ixl_softc *sc, struct ixl_dmamem *ixm) 5590 { 5591 bus_dmamap_unload(sc->sc_dmat, ixm->ixm_map); 5592 bus_dmamem_unmap(sc->sc_dmat, ixm->ixm_kva, ixm->ixm_size); 5593 bus_dmamem_free(sc->sc_dmat, &ixm->ixm_seg, 1); 5594 bus_dmamap_destroy(sc->sc_dmat, ixm->ixm_map); 5595 } 5596 5597 static int 5598 ixl_setup_vlan_hwfilter(struct ixl_softc *sc) 5599 { 5600 struct ethercom *ec = &sc->sc_ec; 5601 struct vlanid_list *vlanidp; 5602 int rv; 5603 5604 ixl_remove_macvlan(sc, sc->sc_enaddr, 0, 5605 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN); 5606 ixl_remove_macvlan(sc, etherbroadcastaddr, 0, 5607 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN); 5608 5609 rv = ixl_add_macvlan(sc, sc->sc_enaddr, 0, 5610 IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH); 5611 if (rv != 0) 5612 return rv; 5613 rv = ixl_add_macvlan(sc, etherbroadcastaddr, 0, 5614 IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH); 5615 if (rv != 0) 5616 return rv; 5617 5618 ETHER_LOCK(ec); 5619 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) { 5620 rv = ixl_add_macvlan(sc, sc->sc_enaddr, 5621 vlanidp->vid, IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH); 5622 if (rv != 0) 5623 break; 5624 rv = ixl_add_macvlan(sc, etherbroadcastaddr, 5625 vlanidp->vid, IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH); 5626 if (rv != 0) 5627 break; 5628 } 5629 ETHER_UNLOCK(ec); 5630 5631 return rv; 5632 } 5633 5634 static void 5635 ixl_teardown_vlan_hwfilter(struct ixl_softc *sc) 5636 { 5637 struct vlanid_list *vlanidp; 5638 struct ethercom *ec = &sc->sc_ec; 5639 5640 ixl_remove_macvlan(sc, sc->sc_enaddr, 0, 5641 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH); 5642 ixl_remove_macvlan(sc, etherbroadcastaddr, 0, 5643 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH); 5644 5645 ETHER_LOCK(ec); 5646 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) { 5647 ixl_remove_macvlan(sc, sc->sc_enaddr, 5648 vlanidp->vid, IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH); 5649 ixl_remove_macvlan(sc, etherbroadcastaddr, 5650 vlanidp->vid, IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH); 5651 } 5652 ETHER_UNLOCK(ec); 5653 5654 ixl_add_macvlan(sc, sc->sc_enaddr, 0, 5655 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN); 5656 ixl_add_macvlan(sc, etherbroadcastaddr, 0, 5657 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN); 5658 } 5659 5660 static int 5661 ixl_update_macvlan(struct ixl_softc *sc) 5662 { 5663 int rv = 0; 5664 int next_ec_capenable = sc->sc_ec.ec_capenable; 5665 5666 if (ISSET(next_ec_capenable, ETHERCAP_VLAN_HWFILTER)) { 5667 rv = ixl_setup_vlan_hwfilter(sc); 5668 if (rv != 0) 5669 ixl_teardown_vlan_hwfilter(sc); 5670 } else { 5671 ixl_teardown_vlan_hwfilter(sc); 5672 } 5673 5674 return rv; 5675 } 5676 5677 static int 5678 ixl_ifflags_cb(struct ethercom *ec) 5679 { 5680 struct ifnet *ifp = &ec->ec_if; 5681 struct ixl_softc *sc = ifp->if_softc; 5682 int rv, change; 5683 5684 mutex_enter(&sc->sc_cfg_lock); 5685 5686 change = ec->ec_capenable ^ sc->sc_cur_ec_capenable; 5687 5688 if (ISSET(change, ETHERCAP_VLAN_HWTAGGING)) { 5689 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWTAGGING; 5690 rv = ENETRESET; 5691 goto out; 5692 } 5693 5694 if (ISSET(change, ETHERCAP_VLAN_HWFILTER)) { 5695 rv = ixl_update_macvlan(sc); 5696 if (rv == 0) { 5697 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWFILTER; 5698 } else { 5699 CLR(ec->ec_capenable, ETHERCAP_VLAN_HWFILTER); 5700 CLR(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER); 5701 } 5702 } 5703 5704 rv = ixl_iff(sc); 5705 out: 5706 mutex_exit(&sc->sc_cfg_lock); 5707 5708 return rv; 5709 } 5710 5711 static int 5712 ixl_set_link_status_locked(struct ixl_softc *sc, const struct ixl_aq_desc *iaq) 5713 { 5714 const struct ixl_aq_link_status *status; 5715 const struct ixl_phy_type *itype; 5716 5717 uint64_t ifm_active = IFM_ETHER; 5718 uint64_t ifm_status = IFM_AVALID; 5719 int link_state = LINK_STATE_DOWN; 5720 uint64_t baudrate = 0; 5721 5722 status = (const struct ixl_aq_link_status *)iaq->iaq_param; 5723 if (!ISSET(status->link_info, IXL_AQ_LINK_UP_FUNCTION)) { 5724 ifm_active |= IFM_NONE; 5725 goto done; 5726 } 5727 5728 ifm_active |= IFM_FDX; 5729 ifm_status |= IFM_ACTIVE; 5730 link_state = LINK_STATE_UP; 5731 5732 itype = ixl_search_phy_type(status->phy_type); 5733 if (itype != NULL) 5734 ifm_active |= itype->ifm_type; 5735 5736 if (ISSET(status->an_info, IXL_AQ_LINK_PAUSE_TX)) 5737 ifm_active |= IFM_ETH_TXPAUSE; 5738 if (ISSET(status->an_info, IXL_AQ_LINK_PAUSE_RX)) 5739 ifm_active |= IFM_ETH_RXPAUSE; 5740 5741 baudrate = ixl_search_link_speed(status->link_speed); 5742 5743 done: 5744 /* sc->sc_cfg_lock held expect during attach */ 5745 sc->sc_media_active = ifm_active; 5746 sc->sc_media_status = ifm_status; 5747 5748 sc->sc_ec.ec_if.if_baudrate = baudrate; 5749 5750 return link_state; 5751 } 5752 5753 static int 5754 ixl_establish_intx(struct ixl_softc *sc) 5755 { 5756 pci_chipset_tag_t pc = sc->sc_pa.pa_pc; 5757 pci_intr_handle_t *intr; 5758 char xnamebuf[32]; 5759 char intrbuf[PCI_INTRSTR_LEN]; 5760 char const *intrstr; 5761 5762 KASSERT(sc->sc_nintrs == 1); 5763 5764 intr = &sc->sc_ihp[0]; 5765 5766 intrstr = pci_intr_string(pc, *intr, intrbuf, sizeof(intrbuf)); 5767 snprintf(xnamebuf, sizeof(xnamebuf), "%s:legacy", 5768 device_xname(sc->sc_dev)); 5769 5770 sc->sc_ihs[0] = pci_intr_establish_xname(pc, *intr, IPL_NET, ixl_intr, 5771 sc, xnamebuf); 5772 5773 if (sc->sc_ihs[0] == NULL) { 5774 aprint_error_dev(sc->sc_dev, 5775 "unable to establish interrupt at %s\n", intrstr); 5776 return -1; 5777 } 5778 5779 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr); 5780 return 0; 5781 } 5782 5783 static int 5784 ixl_establish_msix(struct ixl_softc *sc) 5785 { 5786 pci_chipset_tag_t pc = sc->sc_pa.pa_pc; 5787 kcpuset_t *affinity; 5788 unsigned int vector = 0; 5789 unsigned int i; 5790 int affinity_to, r; 5791 char xnamebuf[32]; 5792 char intrbuf[PCI_INTRSTR_LEN]; 5793 char const *intrstr; 5794 5795 kcpuset_create(&affinity, false); 5796 5797 /* the "other" intr is mapped to vector 0 */ 5798 vector = 0; 5799 intrstr = pci_intr_string(pc, sc->sc_ihp[vector], 5800 intrbuf, sizeof(intrbuf)); 5801 snprintf(xnamebuf, sizeof(xnamebuf), "%s others", 5802 device_xname(sc->sc_dev)); 5803 sc->sc_ihs[vector] = pci_intr_establish_xname(pc, 5804 sc->sc_ihp[vector], IPL_NET, ixl_other_intr, 5805 sc, xnamebuf); 5806 if (sc->sc_ihs[vector] == NULL) { 5807 aprint_error_dev(sc->sc_dev, 5808 "unable to establish interrupt at %s\n", intrstr); 5809 goto fail; 5810 } 5811 5812 aprint_normal_dev(sc->sc_dev, "other interrupt at %s", intrstr); 5813 5814 affinity_to = ncpu > (int)sc->sc_nqueue_pairs_max ? 1 : 0; 5815 affinity_to = (affinity_to + sc->sc_nqueue_pairs_max) % ncpu; 5816 5817 kcpuset_zero(affinity); 5818 kcpuset_set(affinity, affinity_to); 5819 r = interrupt_distribute(sc->sc_ihs[vector], affinity, NULL); 5820 if (r == 0) { 5821 aprint_normal(", affinity to %u", affinity_to); 5822 } 5823 aprint_normal("\n"); 5824 vector++; 5825 5826 sc->sc_msix_vector_queue = vector; 5827 affinity_to = ncpu > (int)sc->sc_nqueue_pairs_max ? 1 : 0; 5828 5829 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) { 5830 intrstr = pci_intr_string(pc, sc->sc_ihp[vector], 5831 intrbuf, sizeof(intrbuf)); 5832 snprintf(xnamebuf, sizeof(xnamebuf), "%s TXRX%d", 5833 device_xname(sc->sc_dev), i); 5834 5835 sc->sc_ihs[vector] = pci_intr_establish_xname(pc, 5836 sc->sc_ihp[vector], IPL_NET, ixl_queue_intr, 5837 (void *)&sc->sc_qps[i], xnamebuf); 5838 5839 if (sc->sc_ihs[vector] == NULL) { 5840 aprint_error_dev(sc->sc_dev, 5841 "unable to establish interrupt at %s\n", intrstr); 5842 goto fail; 5843 } 5844 5845 aprint_normal_dev(sc->sc_dev, 5846 "for TXRX%d interrupt at %s",i , intrstr); 5847 5848 kcpuset_zero(affinity); 5849 kcpuset_set(affinity, affinity_to); 5850 r = interrupt_distribute(sc->sc_ihs[vector], affinity, NULL); 5851 if (r == 0) { 5852 aprint_normal(", affinity to %u", affinity_to); 5853 affinity_to = (affinity_to + 1) % ncpu; 5854 } 5855 aprint_normal("\n"); 5856 vector++; 5857 } 5858 5859 kcpuset_destroy(affinity); 5860 5861 return 0; 5862 fail: 5863 for (i = 0; i < vector; i++) { 5864 pci_intr_disestablish(pc, sc->sc_ihs[i]); 5865 } 5866 5867 sc->sc_msix_vector_queue = 0; 5868 sc->sc_msix_vector_queue = 0; 5869 kcpuset_destroy(affinity); 5870 5871 return -1; 5872 } 5873 5874 static void 5875 ixl_config_queue_intr(struct ixl_softc *sc) 5876 { 5877 unsigned int i, vector; 5878 5879 if (sc->sc_intrtype == PCI_INTR_TYPE_MSIX) { 5880 vector = sc->sc_msix_vector_queue; 5881 } else { 5882 vector = I40E_INTR_NOTX_INTR; 5883 5884 ixl_wr(sc, I40E_PFINT_LNKLST0, 5885 (I40E_INTR_NOTX_QUEUE << 5886 I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) | 5887 (I40E_QUEUE_TYPE_RX << 5888 I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)); 5889 } 5890 5891 for (i = 0; i < sc->sc_nqueue_pairs; i++) { 5892 ixl_wr(sc, I40E_PFINT_DYN_CTLN(i), 0); 5893 ixl_flush(sc); 5894 5895 ixl_wr(sc, I40E_PFINT_LNKLSTN(i), 5896 ((i) << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) | 5897 (I40E_QUEUE_TYPE_RX << 5898 I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)); 5899 5900 ixl_wr(sc, I40E_QINT_RQCTL(i), 5901 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) | 5902 (I40E_ITR_INDEX_RX << 5903 I40E_QINT_RQCTL_ITR_INDX_SHIFT) | 5904 (I40E_INTR_NOTX_RX_QUEUE << 5905 I40E_QINT_RQCTL_MSIX0_INDX_SHIFT) | 5906 (i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) | 5907 (I40E_QUEUE_TYPE_TX << 5908 I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) | 5909 I40E_QINT_RQCTL_CAUSE_ENA_MASK); 5910 5911 ixl_wr(sc, I40E_QINT_TQCTL(i), 5912 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) | 5913 (I40E_ITR_INDEX_TX << 5914 I40E_QINT_TQCTL_ITR_INDX_SHIFT) | 5915 (I40E_INTR_NOTX_TX_QUEUE << 5916 I40E_QINT_TQCTL_MSIX0_INDX_SHIFT) | 5917 (I40E_QUEUE_TYPE_EOL << 5918 I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) | 5919 (I40E_QUEUE_TYPE_RX << 5920 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT) | 5921 I40E_QINT_TQCTL_CAUSE_ENA_MASK); 5922 5923 if (sc->sc_intrtype == PCI_INTR_TYPE_MSIX) { 5924 ixl_wr(sc, I40E_PFINT_ITRN(I40E_ITR_INDEX_RX, i), 5925 sc->sc_itr_rx); 5926 ixl_wr(sc, I40E_PFINT_ITRN(I40E_ITR_INDEX_TX, i), 5927 sc->sc_itr_tx); 5928 vector++; 5929 } 5930 } 5931 ixl_flush(sc); 5932 5933 ixl_wr(sc, I40E_PFINT_ITR0(I40E_ITR_INDEX_RX), sc->sc_itr_rx); 5934 ixl_wr(sc, I40E_PFINT_ITR0(I40E_ITR_INDEX_TX), sc->sc_itr_tx); 5935 ixl_flush(sc); 5936 } 5937 5938 static void 5939 ixl_config_other_intr(struct ixl_softc *sc) 5940 { 5941 ixl_wr(sc, I40E_PFINT_ICR0_ENA, 0); 5942 (void)ixl_rd(sc, I40E_PFINT_ICR0); 5943 5944 ixl_wr(sc, I40E_PFINT_ICR0_ENA, 5945 I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | 5946 I40E_PFINT_ICR0_ENA_GRST_MASK | 5947 I40E_PFINT_ICR0_ENA_ADMINQ_MASK | 5948 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | 5949 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | 5950 I40E_PFINT_ICR0_ENA_VFLR_MASK | 5951 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | 5952 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | 5953 I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK); 5954 5955 ixl_wr(sc, I40E_PFINT_LNKLST0, 0x7FF); 5956 ixl_wr(sc, I40E_PFINT_ITR0(I40E_ITR_INDEX_OTHER), 0); 5957 ixl_wr(sc, I40E_PFINT_STAT_CTL0, 5958 (I40E_ITR_INDEX_OTHER << 5959 I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)); 5960 ixl_flush(sc); 5961 } 5962 5963 static int 5964 ixl_setup_interrupts(struct ixl_softc *sc) 5965 { 5966 struct pci_attach_args *pa = &sc->sc_pa; 5967 pci_intr_type_t max_type, intr_type; 5968 int counts[PCI_INTR_TYPE_SIZE]; 5969 int error; 5970 unsigned int i; 5971 bool retry; 5972 5973 memset(counts, 0, sizeof(counts)); 5974 max_type = PCI_INTR_TYPE_MSIX; 5975 /* QPs + other interrupt */ 5976 counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueue_pairs_max + 1; 5977 counts[PCI_INTR_TYPE_INTX] = 1; 5978 5979 if (ixl_param_nomsix) 5980 counts[PCI_INTR_TYPE_MSIX] = 0; 5981 5982 do { 5983 retry = false; 5984 error = pci_intr_alloc(pa, &sc->sc_ihp, counts, max_type); 5985 if (error != 0) { 5986 aprint_error_dev(sc->sc_dev, 5987 "couldn't map interrupt\n"); 5988 break; 5989 } 5990 5991 intr_type = pci_intr_type(pa->pa_pc, sc->sc_ihp[0]); 5992 sc->sc_nintrs = counts[intr_type]; 5993 KASSERT(sc->sc_nintrs > 0); 5994 5995 for (i = 0; i < sc->sc_nintrs; i++) { 5996 pci_intr_setattr(pa->pa_pc, &sc->sc_ihp[i], 5997 PCI_INTR_MPSAFE, true); 5998 } 5999 6000 sc->sc_ihs = kmem_alloc(sizeof(sc->sc_ihs[0]) * sc->sc_nintrs, 6001 KM_SLEEP); 6002 6003 if (intr_type == PCI_INTR_TYPE_MSIX) { 6004 error = ixl_establish_msix(sc); 6005 if (error) { 6006 counts[PCI_INTR_TYPE_MSIX] = 0; 6007 retry = true; 6008 } 6009 } else if (intr_type == PCI_INTR_TYPE_INTX) { 6010 error = ixl_establish_intx(sc); 6011 } else { 6012 error = -1; 6013 } 6014 6015 if (error) { 6016 kmem_free(sc->sc_ihs, 6017 sizeof(sc->sc_ihs[0]) * sc->sc_nintrs); 6018 pci_intr_release(pa->pa_pc, sc->sc_ihp, sc->sc_nintrs); 6019 } else { 6020 sc->sc_intrtype = intr_type; 6021 } 6022 } while (retry); 6023 6024 return error; 6025 } 6026 6027 static void 6028 ixl_teardown_interrupts(struct ixl_softc *sc) 6029 { 6030 struct pci_attach_args *pa = &sc->sc_pa; 6031 unsigned int i; 6032 6033 for (i = 0; i < sc->sc_nintrs; i++) { 6034 pci_intr_disestablish(pa->pa_pc, sc->sc_ihs[i]); 6035 } 6036 6037 pci_intr_release(pa->pa_pc, sc->sc_ihp, sc->sc_nintrs); 6038 6039 kmem_free(sc->sc_ihs, sizeof(sc->sc_ihs[0]) * sc->sc_nintrs); 6040 sc->sc_ihs = NULL; 6041 sc->sc_nintrs = 0; 6042 } 6043 6044 static int 6045 ixl_setup_stats(struct ixl_softc *sc) 6046 { 6047 struct ixl_queue_pair *qp; 6048 struct ixl_tx_ring *txr; 6049 struct ixl_rx_ring *rxr; 6050 struct ixl_stats_counters *isc; 6051 unsigned int i; 6052 6053 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) { 6054 qp = &sc->sc_qps[i]; 6055 txr = qp->qp_txr; 6056 rxr = qp->qp_rxr; 6057 6058 evcnt_attach_dynamic(&txr->txr_defragged, EVCNT_TYPE_MISC, 6059 NULL, qp->qp_name, "m_defrag successed"); 6060 evcnt_attach_dynamic(&txr->txr_defrag_failed, EVCNT_TYPE_MISC, 6061 NULL, qp->qp_name, "m_defrag_failed"); 6062 evcnt_attach_dynamic(&txr->txr_pcqdrop, EVCNT_TYPE_MISC, 6063 NULL, qp->qp_name, "Dropped in pcq"); 6064 evcnt_attach_dynamic(&txr->txr_transmitdef, EVCNT_TYPE_MISC, 6065 NULL, qp->qp_name, "Deferred transmit"); 6066 evcnt_attach_dynamic(&txr->txr_intr, EVCNT_TYPE_INTR, 6067 NULL, qp->qp_name, "Interrupt on queue"); 6068 evcnt_attach_dynamic(&txr->txr_defer, EVCNT_TYPE_MISC, 6069 NULL, qp->qp_name, "Handled queue in softint/workqueue"); 6070 6071 evcnt_attach_dynamic(&rxr->rxr_mgethdr_failed, EVCNT_TYPE_MISC, 6072 NULL, qp->qp_name, "MGETHDR failed"); 6073 evcnt_attach_dynamic(&rxr->rxr_mgetcl_failed, EVCNT_TYPE_MISC, 6074 NULL, qp->qp_name, "MCLGET failed"); 6075 evcnt_attach_dynamic(&rxr->rxr_mbuf_load_failed, 6076 EVCNT_TYPE_MISC, NULL, qp->qp_name, 6077 "bus_dmamap_load_mbuf failed"); 6078 evcnt_attach_dynamic(&rxr->rxr_intr, EVCNT_TYPE_INTR, 6079 NULL, qp->qp_name, "Interrupt on queue"); 6080 evcnt_attach_dynamic(&rxr->rxr_defer, EVCNT_TYPE_MISC, 6081 NULL, qp->qp_name, "Handled queue in softint/workqueue"); 6082 } 6083 6084 evcnt_attach_dynamic(&sc->sc_event_atq, EVCNT_TYPE_INTR, 6085 NULL, device_xname(sc->sc_dev), "Interrupt for other events"); 6086 evcnt_attach_dynamic(&sc->sc_event_link, EVCNT_TYPE_MISC, 6087 NULL, device_xname(sc->sc_dev), "Link status event"); 6088 evcnt_attach_dynamic(&sc->sc_event_ecc_err, EVCNT_TYPE_MISC, 6089 NULL, device_xname(sc->sc_dev), "ECC error"); 6090 evcnt_attach_dynamic(&sc->sc_event_pci_exception, EVCNT_TYPE_MISC, 6091 NULL, device_xname(sc->sc_dev), "PCI exception"); 6092 evcnt_attach_dynamic(&sc->sc_event_crit_err, EVCNT_TYPE_MISC, 6093 NULL, device_xname(sc->sc_dev), "Critical error"); 6094 6095 isc = &sc->sc_stats_counters; 6096 evcnt_attach_dynamic(&isc->isc_crc_errors, EVCNT_TYPE_MISC, 6097 NULL, device_xname(sc->sc_dev), "CRC errors"); 6098 evcnt_attach_dynamic(&isc->isc_illegal_bytes, EVCNT_TYPE_MISC, 6099 NULL, device_xname(sc->sc_dev), "Illegal bytes"); 6100 evcnt_attach_dynamic(&isc->isc_mac_local_faults, EVCNT_TYPE_MISC, 6101 NULL, device_xname(sc->sc_dev), "Mac local faults"); 6102 evcnt_attach_dynamic(&isc->isc_mac_remote_faults, EVCNT_TYPE_MISC, 6103 NULL, device_xname(sc->sc_dev), "Mac remote faults"); 6104 evcnt_attach_dynamic(&isc->isc_link_xon_rx, EVCNT_TYPE_MISC, 6105 NULL, device_xname(sc->sc_dev), "Rx xon"); 6106 evcnt_attach_dynamic(&isc->isc_link_xon_tx, EVCNT_TYPE_MISC, 6107 NULL, device_xname(sc->sc_dev), "Tx xon"); 6108 evcnt_attach_dynamic(&isc->isc_link_xoff_rx, EVCNT_TYPE_MISC, 6109 NULL, device_xname(sc->sc_dev), "Rx xoff"); 6110 evcnt_attach_dynamic(&isc->isc_link_xoff_tx, EVCNT_TYPE_MISC, 6111 NULL, device_xname(sc->sc_dev), "Tx xoff"); 6112 evcnt_attach_dynamic(&isc->isc_rx_fragments, EVCNT_TYPE_MISC, 6113 NULL, device_xname(sc->sc_dev), "Rx fragments"); 6114 evcnt_attach_dynamic(&isc->isc_rx_jabber, EVCNT_TYPE_MISC, 6115 NULL, device_xname(sc->sc_dev), "Rx jabber"); 6116 6117 evcnt_attach_dynamic(&isc->isc_rx_size_64, EVCNT_TYPE_MISC, 6118 NULL, device_xname(sc->sc_dev), "Rx size 64"); 6119 evcnt_attach_dynamic(&isc->isc_rx_size_127, EVCNT_TYPE_MISC, 6120 NULL, device_xname(sc->sc_dev), "Rx size 127"); 6121 evcnt_attach_dynamic(&isc->isc_rx_size_255, EVCNT_TYPE_MISC, 6122 NULL, device_xname(sc->sc_dev), "Rx size 255"); 6123 evcnt_attach_dynamic(&isc->isc_rx_size_511, EVCNT_TYPE_MISC, 6124 NULL, device_xname(sc->sc_dev), "Rx size 511"); 6125 evcnt_attach_dynamic(&isc->isc_rx_size_1023, EVCNT_TYPE_MISC, 6126 NULL, device_xname(sc->sc_dev), "Rx size 1023"); 6127 evcnt_attach_dynamic(&isc->isc_rx_size_1522, EVCNT_TYPE_MISC, 6128 NULL, device_xname(sc->sc_dev), "Rx size 1522"); 6129 evcnt_attach_dynamic(&isc->isc_rx_size_big, EVCNT_TYPE_MISC, 6130 NULL, device_xname(sc->sc_dev), "Rx jumbo packets"); 6131 evcnt_attach_dynamic(&isc->isc_rx_undersize, EVCNT_TYPE_MISC, 6132 NULL, device_xname(sc->sc_dev), "Rx under size"); 6133 evcnt_attach_dynamic(&isc->isc_rx_oversize, EVCNT_TYPE_MISC, 6134 NULL, device_xname(sc->sc_dev), "Rx over size"); 6135 6136 evcnt_attach_dynamic(&isc->isc_rx_bytes, EVCNT_TYPE_MISC, 6137 NULL, device_xname(sc->sc_dev), "Rx bytes / port"); 6138 evcnt_attach_dynamic(&isc->isc_rx_discards, EVCNT_TYPE_MISC, 6139 NULL, device_xname(sc->sc_dev), "Rx discards / port"); 6140 evcnt_attach_dynamic(&isc->isc_rx_unicast, EVCNT_TYPE_MISC, 6141 NULL, device_xname(sc->sc_dev), "Rx unicast / port"); 6142 evcnt_attach_dynamic(&isc->isc_rx_multicast, EVCNT_TYPE_MISC, 6143 NULL, device_xname(sc->sc_dev), "Rx multicast / port"); 6144 evcnt_attach_dynamic(&isc->isc_rx_broadcast, EVCNT_TYPE_MISC, 6145 NULL, device_xname(sc->sc_dev), "Rx broadcast / port"); 6146 6147 evcnt_attach_dynamic(&isc->isc_vsi_rx_bytes, EVCNT_TYPE_MISC, 6148 NULL, device_xname(sc->sc_dev), "Rx bytes / vsi"); 6149 evcnt_attach_dynamic(&isc->isc_vsi_rx_discards, EVCNT_TYPE_MISC, 6150 NULL, device_xname(sc->sc_dev), "Rx discard / vsi"); 6151 evcnt_attach_dynamic(&isc->isc_vsi_rx_unicast, EVCNT_TYPE_MISC, 6152 NULL, device_xname(sc->sc_dev), "Rx unicast / vsi"); 6153 evcnt_attach_dynamic(&isc->isc_vsi_rx_multicast, EVCNT_TYPE_MISC, 6154 NULL, device_xname(sc->sc_dev), "Rx multicast / vsi"); 6155 evcnt_attach_dynamic(&isc->isc_vsi_rx_broadcast, EVCNT_TYPE_MISC, 6156 NULL, device_xname(sc->sc_dev), "Rx broadcast / vsi"); 6157 6158 evcnt_attach_dynamic(&isc->isc_tx_size_64, EVCNT_TYPE_MISC, 6159 NULL, device_xname(sc->sc_dev), "Tx size 64"); 6160 evcnt_attach_dynamic(&isc->isc_tx_size_127, EVCNT_TYPE_MISC, 6161 NULL, device_xname(sc->sc_dev), "Tx size 127"); 6162 evcnt_attach_dynamic(&isc->isc_tx_size_255, EVCNT_TYPE_MISC, 6163 NULL, device_xname(sc->sc_dev), "Tx size 255"); 6164 evcnt_attach_dynamic(&isc->isc_tx_size_511, EVCNT_TYPE_MISC, 6165 NULL, device_xname(sc->sc_dev), "Tx size 511"); 6166 evcnt_attach_dynamic(&isc->isc_tx_size_1023, EVCNT_TYPE_MISC, 6167 NULL, device_xname(sc->sc_dev), "Tx size 1023"); 6168 evcnt_attach_dynamic(&isc->isc_tx_size_1522, EVCNT_TYPE_MISC, 6169 NULL, device_xname(sc->sc_dev), "Tx size 1522"); 6170 evcnt_attach_dynamic(&isc->isc_tx_size_big, EVCNT_TYPE_MISC, 6171 NULL, device_xname(sc->sc_dev), "Tx jumbo packets"); 6172 6173 evcnt_attach_dynamic(&isc->isc_tx_bytes, EVCNT_TYPE_MISC, 6174 NULL, device_xname(sc->sc_dev), "Tx bytes / port"); 6175 evcnt_attach_dynamic(&isc->isc_tx_dropped_link_down, EVCNT_TYPE_MISC, 6176 NULL, device_xname(sc->sc_dev), 6177 "Tx dropped due to link down / port"); 6178 evcnt_attach_dynamic(&isc->isc_tx_unicast, EVCNT_TYPE_MISC, 6179 NULL, device_xname(sc->sc_dev), "Tx unicast / port"); 6180 evcnt_attach_dynamic(&isc->isc_tx_multicast, EVCNT_TYPE_MISC, 6181 NULL, device_xname(sc->sc_dev), "Tx multicast / port"); 6182 evcnt_attach_dynamic(&isc->isc_tx_broadcast, EVCNT_TYPE_MISC, 6183 NULL, device_xname(sc->sc_dev), "Tx broadcast / port"); 6184 6185 evcnt_attach_dynamic(&isc->isc_vsi_tx_bytes, EVCNT_TYPE_MISC, 6186 NULL, device_xname(sc->sc_dev), "Tx bytes / vsi"); 6187 evcnt_attach_dynamic(&isc->isc_vsi_tx_errors, EVCNT_TYPE_MISC, 6188 NULL, device_xname(sc->sc_dev), "Tx errors / vsi"); 6189 evcnt_attach_dynamic(&isc->isc_vsi_tx_unicast, EVCNT_TYPE_MISC, 6190 NULL, device_xname(sc->sc_dev), "Tx unicast / vsi"); 6191 evcnt_attach_dynamic(&isc->isc_vsi_tx_multicast, EVCNT_TYPE_MISC, 6192 NULL, device_xname(sc->sc_dev), "Tx multicast / vsi"); 6193 evcnt_attach_dynamic(&isc->isc_vsi_tx_broadcast, EVCNT_TYPE_MISC, 6194 NULL, device_xname(sc->sc_dev), "Tx broadcast / vsi"); 6195 6196 sc->sc_stats_intval = ixl_param_stats_interval; 6197 callout_init(&sc->sc_stats_callout, CALLOUT_MPSAFE); 6198 callout_setfunc(&sc->sc_stats_callout, ixl_stats_callout, sc); 6199 ixl_work_set(&sc->sc_stats_task, ixl_stats_update, sc); 6200 6201 return 0; 6202 } 6203 6204 static void 6205 ixl_teardown_stats(struct ixl_softc *sc) 6206 { 6207 struct ixl_tx_ring *txr; 6208 struct ixl_rx_ring *rxr; 6209 struct ixl_stats_counters *isc; 6210 unsigned int i; 6211 6212 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) { 6213 txr = sc->sc_qps[i].qp_txr; 6214 rxr = sc->sc_qps[i].qp_rxr; 6215 6216 evcnt_detach(&txr->txr_defragged); 6217 evcnt_detach(&txr->txr_defrag_failed); 6218 evcnt_detach(&txr->txr_pcqdrop); 6219 evcnt_detach(&txr->txr_transmitdef); 6220 evcnt_detach(&txr->txr_intr); 6221 evcnt_detach(&txr->txr_defer); 6222 6223 evcnt_detach(&rxr->rxr_mgethdr_failed); 6224 evcnt_detach(&rxr->rxr_mgetcl_failed); 6225 evcnt_detach(&rxr->rxr_mbuf_load_failed); 6226 evcnt_detach(&rxr->rxr_intr); 6227 evcnt_detach(&rxr->rxr_defer); 6228 } 6229 6230 isc = &sc->sc_stats_counters; 6231 evcnt_detach(&isc->isc_crc_errors); 6232 evcnt_detach(&isc->isc_illegal_bytes); 6233 evcnt_detach(&isc->isc_mac_local_faults); 6234 evcnt_detach(&isc->isc_mac_remote_faults); 6235 evcnt_detach(&isc->isc_link_xon_rx); 6236 evcnt_detach(&isc->isc_link_xon_tx); 6237 evcnt_detach(&isc->isc_link_xoff_rx); 6238 evcnt_detach(&isc->isc_link_xoff_tx); 6239 evcnt_detach(&isc->isc_rx_fragments); 6240 evcnt_detach(&isc->isc_rx_jabber); 6241 evcnt_detach(&isc->isc_rx_bytes); 6242 evcnt_detach(&isc->isc_rx_discards); 6243 evcnt_detach(&isc->isc_rx_unicast); 6244 evcnt_detach(&isc->isc_rx_multicast); 6245 evcnt_detach(&isc->isc_rx_broadcast); 6246 evcnt_detach(&isc->isc_rx_size_64); 6247 evcnt_detach(&isc->isc_rx_size_127); 6248 evcnt_detach(&isc->isc_rx_size_255); 6249 evcnt_detach(&isc->isc_rx_size_511); 6250 evcnt_detach(&isc->isc_rx_size_1023); 6251 evcnt_detach(&isc->isc_rx_size_1522); 6252 evcnt_detach(&isc->isc_rx_size_big); 6253 evcnt_detach(&isc->isc_rx_undersize); 6254 evcnt_detach(&isc->isc_rx_oversize); 6255 evcnt_detach(&isc->isc_tx_bytes); 6256 evcnt_detach(&isc->isc_tx_dropped_link_down); 6257 evcnt_detach(&isc->isc_tx_unicast); 6258 evcnt_detach(&isc->isc_tx_multicast); 6259 evcnt_detach(&isc->isc_tx_broadcast); 6260 evcnt_detach(&isc->isc_tx_size_64); 6261 evcnt_detach(&isc->isc_tx_size_127); 6262 evcnt_detach(&isc->isc_tx_size_255); 6263 evcnt_detach(&isc->isc_tx_size_511); 6264 evcnt_detach(&isc->isc_tx_size_1023); 6265 evcnt_detach(&isc->isc_tx_size_1522); 6266 evcnt_detach(&isc->isc_tx_size_big); 6267 evcnt_detach(&isc->isc_vsi_rx_discards); 6268 evcnt_detach(&isc->isc_vsi_rx_bytes); 6269 evcnt_detach(&isc->isc_vsi_rx_unicast); 6270 evcnt_detach(&isc->isc_vsi_rx_multicast); 6271 evcnt_detach(&isc->isc_vsi_rx_broadcast); 6272 evcnt_detach(&isc->isc_vsi_tx_errors); 6273 evcnt_detach(&isc->isc_vsi_tx_bytes); 6274 evcnt_detach(&isc->isc_vsi_tx_unicast); 6275 evcnt_detach(&isc->isc_vsi_tx_multicast); 6276 evcnt_detach(&isc->isc_vsi_tx_broadcast); 6277 6278 evcnt_detach(&sc->sc_event_atq); 6279 evcnt_detach(&sc->sc_event_link); 6280 evcnt_detach(&sc->sc_event_ecc_err); 6281 evcnt_detach(&sc->sc_event_pci_exception); 6282 evcnt_detach(&sc->sc_event_crit_err); 6283 6284 callout_destroy(&sc->sc_stats_callout); 6285 } 6286 6287 static void 6288 ixl_stats_callout(void *xsc) 6289 { 6290 struct ixl_softc *sc = xsc; 6291 6292 ixl_work_add(sc->sc_workq, &sc->sc_stats_task); 6293 callout_schedule(&sc->sc_stats_callout, mstohz(sc->sc_stats_intval)); 6294 } 6295 6296 static uint64_t 6297 ixl_stat_delta(struct ixl_softc *sc, uint32_t reg_hi, uint32_t reg_lo, 6298 uint64_t *offset, bool has_offset) 6299 { 6300 uint64_t value, delta; 6301 int bitwidth; 6302 6303 bitwidth = reg_hi == 0 ? 32 : 48; 6304 6305 value = ixl_rd(sc, reg_lo); 6306 6307 if (bitwidth > 32) { 6308 value |= ((uint64_t)ixl_rd(sc, reg_hi) << 32); 6309 } 6310 6311 if (__predict_true(has_offset)) { 6312 delta = value; 6313 if (value < *offset) 6314 delta += ((uint64_t)1 << bitwidth); 6315 delta -= *offset; 6316 } else { 6317 delta = 0; 6318 } 6319 atomic_swap_64(offset, value); 6320 6321 return delta; 6322 } 6323 6324 static void 6325 ixl_stats_update(void *xsc) 6326 { 6327 struct ixl_softc *sc = xsc; 6328 struct ixl_stats_counters *isc; 6329 uint64_t delta; 6330 6331 isc = &sc->sc_stats_counters; 6332 6333 /* errors */ 6334 delta = ixl_stat_delta(sc, 6335 0, I40E_GLPRT_CRCERRS(sc->sc_port), 6336 &isc->isc_crc_errors_offset, isc->isc_has_offset); 6337 atomic_add_64(&isc->isc_crc_errors.ev_count, delta); 6338 6339 delta = ixl_stat_delta(sc, 6340 0, I40E_GLPRT_ILLERRC(sc->sc_port), 6341 &isc->isc_illegal_bytes_offset, isc->isc_has_offset); 6342 atomic_add_64(&isc->isc_illegal_bytes.ev_count, delta); 6343 6344 /* rx */ 6345 delta = ixl_stat_delta(sc, 6346 I40E_GLPRT_GORCH(sc->sc_port), I40E_GLPRT_GORCL(sc->sc_port), 6347 &isc->isc_rx_bytes_offset, isc->isc_has_offset); 6348 atomic_add_64(&isc->isc_rx_bytes.ev_count, delta); 6349 6350 delta = ixl_stat_delta(sc, 6351 0, I40E_GLPRT_RDPC(sc->sc_port), 6352 &isc->isc_rx_discards_offset, isc->isc_has_offset); 6353 atomic_add_64(&isc->isc_rx_discards.ev_count, delta); 6354 6355 delta = ixl_stat_delta(sc, 6356 I40E_GLPRT_UPRCH(sc->sc_port), I40E_GLPRT_UPRCL(sc->sc_port), 6357 &isc->isc_rx_unicast_offset, isc->isc_has_offset); 6358 atomic_add_64(&isc->isc_rx_unicast.ev_count, delta); 6359 6360 delta = ixl_stat_delta(sc, 6361 I40E_GLPRT_MPRCH(sc->sc_port), I40E_GLPRT_MPRCL(sc->sc_port), 6362 &isc->isc_rx_multicast_offset, isc->isc_has_offset); 6363 atomic_add_64(&isc->isc_rx_multicast.ev_count, delta); 6364 6365 delta = ixl_stat_delta(sc, 6366 I40E_GLPRT_BPRCH(sc->sc_port), I40E_GLPRT_BPRCL(sc->sc_port), 6367 &isc->isc_rx_broadcast_offset, isc->isc_has_offset); 6368 atomic_add_64(&isc->isc_rx_broadcast.ev_count, delta); 6369 6370 /* Packet size stats rx */ 6371 delta = ixl_stat_delta(sc, 6372 I40E_GLPRT_PRC64H(sc->sc_port), I40E_GLPRT_PRC64L(sc->sc_port), 6373 &isc->isc_rx_size_64_offset, isc->isc_has_offset); 6374 atomic_add_64(&isc->isc_rx_size_64.ev_count, delta); 6375 6376 delta = ixl_stat_delta(sc, 6377 I40E_GLPRT_PRC127H(sc->sc_port), I40E_GLPRT_PRC127L(sc->sc_port), 6378 &isc->isc_rx_size_127_offset, isc->isc_has_offset); 6379 atomic_add_64(&isc->isc_rx_size_127.ev_count, delta); 6380 6381 delta = ixl_stat_delta(sc, 6382 I40E_GLPRT_PRC255H(sc->sc_port), I40E_GLPRT_PRC255L(sc->sc_port), 6383 &isc->isc_rx_size_255_offset, isc->isc_has_offset); 6384 atomic_add_64(&isc->isc_rx_size_255.ev_count, delta); 6385 6386 delta = ixl_stat_delta(sc, 6387 I40E_GLPRT_PRC511H(sc->sc_port), I40E_GLPRT_PRC511L(sc->sc_port), 6388 &isc->isc_rx_size_511_offset, isc->isc_has_offset); 6389 atomic_add_64(&isc->isc_rx_size_511.ev_count, delta); 6390 6391 delta = ixl_stat_delta(sc, 6392 I40E_GLPRT_PRC1023H(sc->sc_port), I40E_GLPRT_PRC1023L(sc->sc_port), 6393 &isc->isc_rx_size_1023_offset, isc->isc_has_offset); 6394 atomic_add_64(&isc->isc_rx_size_1023.ev_count, delta); 6395 6396 delta = ixl_stat_delta(sc, 6397 I40E_GLPRT_PRC1522H(sc->sc_port), I40E_GLPRT_PRC1522L(sc->sc_port), 6398 &isc->isc_rx_size_1522_offset, isc->isc_has_offset); 6399 atomic_add_64(&isc->isc_rx_size_1522.ev_count, delta); 6400 6401 delta = ixl_stat_delta(sc, 6402 I40E_GLPRT_PRC9522H(sc->sc_port), I40E_GLPRT_PRC9522L(sc->sc_port), 6403 &isc->isc_rx_size_big_offset, isc->isc_has_offset); 6404 atomic_add_64(&isc->isc_rx_size_big.ev_count, delta); 6405 6406 delta = ixl_stat_delta(sc, 6407 0, I40E_GLPRT_RUC(sc->sc_port), 6408 &isc->isc_rx_undersize_offset, isc->isc_has_offset); 6409 atomic_add_64(&isc->isc_rx_undersize.ev_count, delta); 6410 6411 delta = ixl_stat_delta(sc, 6412 0, I40E_GLPRT_ROC(sc->sc_port), 6413 &isc->isc_rx_oversize_offset, isc->isc_has_offset); 6414 atomic_add_64(&isc->isc_rx_oversize.ev_count, delta); 6415 6416 /* tx */ 6417 delta = ixl_stat_delta(sc, 6418 I40E_GLPRT_GOTCH(sc->sc_port), I40E_GLPRT_GOTCL(sc->sc_port), 6419 &isc->isc_tx_bytes_offset, isc->isc_has_offset); 6420 atomic_add_64(&isc->isc_tx_bytes.ev_count, delta); 6421 6422 delta = ixl_stat_delta(sc, 6423 0, I40E_GLPRT_TDOLD(sc->sc_port), 6424 &isc->isc_tx_dropped_link_down_offset, isc->isc_has_offset); 6425 atomic_add_64(&isc->isc_tx_dropped_link_down.ev_count, delta); 6426 6427 delta = ixl_stat_delta(sc, 6428 I40E_GLPRT_UPTCH(sc->sc_port), I40E_GLPRT_UPTCL(sc->sc_port), 6429 &isc->isc_tx_unicast_offset, isc->isc_has_offset); 6430 atomic_add_64(&isc->isc_tx_unicast.ev_count, delta); 6431 6432 delta = ixl_stat_delta(sc, 6433 I40E_GLPRT_MPTCH(sc->sc_port), I40E_GLPRT_MPTCL(sc->sc_port), 6434 &isc->isc_tx_multicast_offset, isc->isc_has_offset); 6435 atomic_add_64(&isc->isc_tx_multicast.ev_count, delta); 6436 6437 delta = ixl_stat_delta(sc, 6438 I40E_GLPRT_BPTCH(sc->sc_port), I40E_GLPRT_BPTCL(sc->sc_port), 6439 &isc->isc_tx_broadcast_offset, isc->isc_has_offset); 6440 atomic_add_64(&isc->isc_tx_broadcast.ev_count, delta); 6441 6442 /* Packet size stats tx */ 6443 delta = ixl_stat_delta(sc, 6444 I40E_GLPRT_PTC64L(sc->sc_port), I40E_GLPRT_PTC64L(sc->sc_port), 6445 &isc->isc_tx_size_64_offset, isc->isc_has_offset); 6446 atomic_add_64(&isc->isc_tx_size_64.ev_count, delta); 6447 6448 delta = ixl_stat_delta(sc, 6449 I40E_GLPRT_PTC127H(sc->sc_port), I40E_GLPRT_PTC127L(sc->sc_port), 6450 &isc->isc_tx_size_127_offset, isc->isc_has_offset); 6451 atomic_add_64(&isc->isc_tx_size_127.ev_count, delta); 6452 6453 delta = ixl_stat_delta(sc, 6454 I40E_GLPRT_PTC255H(sc->sc_port), I40E_GLPRT_PTC255L(sc->sc_port), 6455 &isc->isc_tx_size_255_offset, isc->isc_has_offset); 6456 atomic_add_64(&isc->isc_tx_size_255.ev_count, delta); 6457 6458 delta = ixl_stat_delta(sc, 6459 I40E_GLPRT_PTC511H(sc->sc_port), I40E_GLPRT_PTC511L(sc->sc_port), 6460 &isc->isc_tx_size_511_offset, isc->isc_has_offset); 6461 atomic_add_64(&isc->isc_tx_size_511.ev_count, delta); 6462 6463 delta = ixl_stat_delta(sc, 6464 I40E_GLPRT_PTC1023H(sc->sc_port), I40E_GLPRT_PTC1023L(sc->sc_port), 6465 &isc->isc_tx_size_1023_offset, isc->isc_has_offset); 6466 atomic_add_64(&isc->isc_tx_size_1023.ev_count, delta); 6467 6468 delta = ixl_stat_delta(sc, 6469 I40E_GLPRT_PTC1522H(sc->sc_port), I40E_GLPRT_PTC1522L(sc->sc_port), 6470 &isc->isc_tx_size_1522_offset, isc->isc_has_offset); 6471 atomic_add_64(&isc->isc_tx_size_1522.ev_count, delta); 6472 6473 delta = ixl_stat_delta(sc, 6474 I40E_GLPRT_PTC9522H(sc->sc_port), I40E_GLPRT_PTC9522L(sc->sc_port), 6475 &isc->isc_tx_size_big_offset, isc->isc_has_offset); 6476 atomic_add_64(&isc->isc_tx_size_big.ev_count, delta); 6477 6478 /* mac faults */ 6479 delta = ixl_stat_delta(sc, 6480 0, I40E_GLPRT_MLFC(sc->sc_port), 6481 &isc->isc_mac_local_faults_offset, isc->isc_has_offset); 6482 atomic_add_64(&isc->isc_mac_local_faults.ev_count, delta); 6483 6484 delta = ixl_stat_delta(sc, 6485 0, I40E_GLPRT_MRFC(sc->sc_port), 6486 &isc->isc_mac_remote_faults_offset, isc->isc_has_offset); 6487 atomic_add_64(&isc->isc_mac_remote_faults.ev_count, delta); 6488 6489 /* Flow control (LFC) stats */ 6490 delta = ixl_stat_delta(sc, 6491 0, I40E_GLPRT_LXONRXC(sc->sc_port), 6492 &isc->isc_link_xon_rx_offset, isc->isc_has_offset); 6493 atomic_add_64(&isc->isc_link_xon_rx.ev_count, delta); 6494 6495 delta = ixl_stat_delta(sc, 6496 0, I40E_GLPRT_LXONTXC(sc->sc_port), 6497 &isc->isc_link_xon_tx_offset, isc->isc_has_offset); 6498 atomic_add_64(&isc->isc_link_xon_tx.ev_count, delta); 6499 6500 delta = ixl_stat_delta(sc, 6501 0, I40E_GLPRT_LXOFFRXC(sc->sc_port), 6502 &isc->isc_link_xoff_rx_offset, isc->isc_has_offset); 6503 atomic_add_64(&isc->isc_link_xoff_rx.ev_count, delta); 6504 6505 delta = ixl_stat_delta(sc, 6506 0, I40E_GLPRT_LXOFFTXC(sc->sc_port), 6507 &isc->isc_link_xoff_tx_offset, isc->isc_has_offset); 6508 atomic_add_64(&isc->isc_link_xoff_tx.ev_count, delta); 6509 6510 /* fragments */ 6511 delta = ixl_stat_delta(sc, 6512 0, I40E_GLPRT_RFC(sc->sc_port), 6513 &isc->isc_rx_fragments_offset, isc->isc_has_offset); 6514 atomic_add_64(&isc->isc_rx_fragments.ev_count, delta); 6515 6516 delta = ixl_stat_delta(sc, 6517 0, I40E_GLPRT_RJC(sc->sc_port), 6518 &isc->isc_rx_jabber_offset, isc->isc_has_offset); 6519 atomic_add_64(&isc->isc_rx_jabber.ev_count, delta); 6520 6521 /* VSI rx counters */ 6522 delta = ixl_stat_delta(sc, 6523 0, I40E_GLV_RDPC(sc->sc_vsi_stat_counter_idx), 6524 &isc->isc_vsi_rx_discards_offset, isc->isc_has_offset); 6525 atomic_add_64(&isc->isc_vsi_rx_discards.ev_count, delta); 6526 6527 delta = ixl_stat_delta(sc, 6528 I40E_GLV_GORCH(sc->sc_vsi_stat_counter_idx), 6529 I40E_GLV_GORCL(sc->sc_vsi_stat_counter_idx), 6530 &isc->isc_vsi_rx_bytes_offset, isc->isc_has_offset); 6531 atomic_add_64(&isc->isc_vsi_rx_bytes.ev_count, delta); 6532 6533 delta = ixl_stat_delta(sc, 6534 I40E_GLV_UPRCH(sc->sc_vsi_stat_counter_idx), 6535 I40E_GLV_UPRCL(sc->sc_vsi_stat_counter_idx), 6536 &isc->isc_vsi_rx_unicast_offset, isc->isc_has_offset); 6537 atomic_add_64(&isc->isc_vsi_rx_unicast.ev_count, delta); 6538 6539 delta = ixl_stat_delta(sc, 6540 I40E_GLV_MPRCH(sc->sc_vsi_stat_counter_idx), 6541 I40E_GLV_MPRCL(sc->sc_vsi_stat_counter_idx), 6542 &isc->isc_vsi_rx_multicast_offset, isc->isc_has_offset); 6543 atomic_add_64(&isc->isc_vsi_rx_multicast.ev_count, delta); 6544 6545 delta = ixl_stat_delta(sc, 6546 I40E_GLV_BPRCH(sc->sc_vsi_stat_counter_idx), 6547 I40E_GLV_BPRCL(sc->sc_vsi_stat_counter_idx), 6548 &isc->isc_vsi_rx_broadcast_offset, isc->isc_has_offset); 6549 atomic_add_64(&isc->isc_vsi_rx_broadcast.ev_count, delta); 6550 6551 /* VSI tx counters */ 6552 delta = ixl_stat_delta(sc, 6553 0, I40E_GLV_TEPC(sc->sc_vsi_stat_counter_idx), 6554 &isc->isc_vsi_tx_errors_offset, isc->isc_has_offset); 6555 atomic_add_64(&isc->isc_vsi_tx_errors.ev_count, delta); 6556 6557 delta = ixl_stat_delta(sc, 6558 I40E_GLV_GOTCH(sc->sc_vsi_stat_counter_idx), 6559 I40E_GLV_GOTCL(sc->sc_vsi_stat_counter_idx), 6560 &isc->isc_vsi_tx_bytes_offset, isc->isc_has_offset); 6561 atomic_add_64(&isc->isc_vsi_tx_bytes.ev_count, delta); 6562 6563 delta = ixl_stat_delta(sc, 6564 I40E_GLV_UPTCH(sc->sc_vsi_stat_counter_idx), 6565 I40E_GLV_UPTCL(sc->sc_vsi_stat_counter_idx), 6566 &isc->isc_vsi_tx_unicast_offset, isc->isc_has_offset); 6567 atomic_add_64(&isc->isc_vsi_tx_unicast.ev_count, delta); 6568 6569 delta = ixl_stat_delta(sc, 6570 I40E_GLV_MPTCH(sc->sc_vsi_stat_counter_idx), 6571 I40E_GLV_MPTCL(sc->sc_vsi_stat_counter_idx), 6572 &isc->isc_vsi_tx_multicast_offset, isc->isc_has_offset); 6573 atomic_add_64(&isc->isc_vsi_tx_multicast.ev_count, delta); 6574 6575 delta = ixl_stat_delta(sc, 6576 I40E_GLV_BPTCH(sc->sc_vsi_stat_counter_idx), 6577 I40E_GLV_BPTCL(sc->sc_vsi_stat_counter_idx), 6578 &isc->isc_vsi_tx_broadcast_offset, isc->isc_has_offset); 6579 atomic_add_64(&isc->isc_vsi_tx_broadcast.ev_count, delta); 6580 } 6581 6582 static int 6583 ixl_setup_sysctls(struct ixl_softc *sc) 6584 { 6585 const char *devname; 6586 struct sysctllog **log; 6587 const struct sysctlnode *rnode, *rxnode, *txnode; 6588 int error; 6589 6590 log = &sc->sc_sysctllog; 6591 devname = device_xname(sc->sc_dev); 6592 6593 error = sysctl_createv(log, 0, NULL, &rnode, 6594 0, CTLTYPE_NODE, devname, 6595 SYSCTL_DESCR("ixl information and settings"), 6596 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL); 6597 if (error) 6598 goto out; 6599 6600 error = sysctl_createv(log, 0, &rnode, NULL, 6601 CTLFLAG_READWRITE, CTLTYPE_BOOL, "txrx_workqueue", 6602 SYSCTL_DESCR("Use workqueue for packet processing"), 6603 NULL, 0, &sc->sc_txrx_workqueue, 0, CTL_CREATE, CTL_EOL); 6604 if (error) 6605 goto out; 6606 6607 error = sysctl_createv(log, 0, &rnode, NULL, 6608 CTLFLAG_READONLY, CTLTYPE_INT, "stats_interval", 6609 SYSCTL_DESCR("Statistics collection interval in milliseconds"), 6610 NULL, 0, &sc->sc_stats_intval, 0, CTL_CREATE, CTL_EOL); 6611 6612 error = sysctl_createv(log, 0, &rnode, &rxnode, 6613 0, CTLTYPE_NODE, "rx", 6614 SYSCTL_DESCR("ixl information and settings for Rx"), 6615 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL); 6616 if (error) 6617 goto out; 6618 6619 error = sysctl_createv(log, 0, &rxnode, NULL, 6620 CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit", 6621 SYSCTL_DESCR("max number of Rx packets" 6622 " to process for interrupt processing"), 6623 NULL, 0, &sc->sc_rx_intr_process_limit, 0, CTL_CREATE, CTL_EOL); 6624 if (error) 6625 goto out; 6626 6627 error = sysctl_createv(log, 0, &rxnode, NULL, 6628 CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit", 6629 SYSCTL_DESCR("max number of Rx packets" 6630 " to process for deferred processing"), 6631 NULL, 0, &sc->sc_rx_process_limit, 0, CTL_CREATE, CTL_EOL); 6632 if (error) 6633 goto out; 6634 6635 error = sysctl_createv(log, 0, &rnode, &txnode, 6636 0, CTLTYPE_NODE, "tx", 6637 SYSCTL_DESCR("ixl information and settings for Tx"), 6638 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL); 6639 if (error) 6640 goto out; 6641 6642 error = sysctl_createv(log, 0, &txnode, NULL, 6643 CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit", 6644 SYSCTL_DESCR("max number of Tx packets" 6645 " to process for interrupt processing"), 6646 NULL, 0, &sc->sc_tx_intr_process_limit, 0, CTL_CREATE, CTL_EOL); 6647 if (error) 6648 goto out; 6649 6650 error = sysctl_createv(log, 0, &txnode, NULL, 6651 CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit", 6652 SYSCTL_DESCR("max number of Tx packets" 6653 " to process for deferred processing"), 6654 NULL, 0, &sc->sc_tx_process_limit, 0, CTL_CREATE, CTL_EOL); 6655 if (error) 6656 goto out; 6657 6658 out: 6659 if (error) { 6660 aprint_error_dev(sc->sc_dev, 6661 "unable to create sysctl node\n"); 6662 sysctl_teardown(log); 6663 } 6664 6665 return error; 6666 } 6667 6668 static void 6669 ixl_teardown_sysctls(struct ixl_softc *sc) 6670 { 6671 6672 sysctl_teardown(&sc->sc_sysctllog); 6673 } 6674 6675 static struct workqueue * 6676 ixl_workq_create(const char *name, pri_t prio, int ipl, int flags) 6677 { 6678 struct workqueue *wq; 6679 int error; 6680 6681 error = workqueue_create(&wq, name, ixl_workq_work, NULL, 6682 prio, ipl, flags); 6683 6684 if (error) 6685 return NULL; 6686 6687 return wq; 6688 } 6689 6690 static void 6691 ixl_workq_destroy(struct workqueue *wq) 6692 { 6693 6694 workqueue_destroy(wq); 6695 } 6696 6697 static void 6698 ixl_work_set(struct ixl_work *work, void (*func)(void *), void *arg) 6699 { 6700 6701 memset(work, 0, sizeof(*work)); 6702 work->ixw_func = func; 6703 work->ixw_arg = arg; 6704 } 6705 6706 static void 6707 ixl_work_add(struct workqueue *wq, struct ixl_work *work) 6708 { 6709 if (atomic_cas_uint(&work->ixw_added, 0, 1) != 0) 6710 return; 6711 6712 kpreempt_disable(); 6713 workqueue_enqueue(wq, &work->ixw_cookie, NULL); 6714 kpreempt_enable(); 6715 } 6716 6717 static void 6718 ixl_work_wait(struct workqueue *wq, struct ixl_work *work) 6719 { 6720 6721 workqueue_wait(wq, &work->ixw_cookie); 6722 } 6723 6724 static void 6725 ixl_workq_work(struct work *wk, void *context) 6726 { 6727 struct ixl_work *work; 6728 6729 work = container_of(wk, struct ixl_work, ixw_cookie); 6730 6731 atomic_swap_uint(&work->ixw_added, 0); 6732 work->ixw_func(work->ixw_arg); 6733 } 6734 6735 static int 6736 ixl_rx_ctl_read(struct ixl_softc *sc, uint32_t reg, uint32_t *rv) 6737 { 6738 struct ixl_aq_desc iaq; 6739 6740 memset(&iaq, 0, sizeof(iaq)); 6741 iaq.iaq_opcode = htole16(IXL_AQ_OP_RX_CTL_REG_READ); 6742 iaq.iaq_param[1] = htole32(reg); 6743 6744 if (ixl_atq_poll(sc, &iaq, 250) != 0) 6745 return ETIMEDOUT; 6746 6747 switch (htole16(iaq.iaq_retval)) { 6748 case IXL_AQ_RC_OK: 6749 /* success */ 6750 break; 6751 case IXL_AQ_RC_EACCES: 6752 return EPERM; 6753 case IXL_AQ_RC_EAGAIN: 6754 return EAGAIN; 6755 default: 6756 return EIO; 6757 } 6758 6759 *rv = htole32(iaq.iaq_param[3]); 6760 return 0; 6761 } 6762 6763 static uint32_t 6764 ixl_rd_rx_csr(struct ixl_softc *sc, uint32_t reg) 6765 { 6766 uint32_t val; 6767 int rv, retry, retry_limit; 6768 6769 if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RXCTL)) { 6770 retry_limit = 5; 6771 } else { 6772 retry_limit = 0; 6773 } 6774 6775 for (retry = 0; retry < retry_limit; retry++) { 6776 rv = ixl_rx_ctl_read(sc, reg, &val); 6777 if (rv == 0) 6778 return val; 6779 else if (rv == EAGAIN) 6780 delaymsec(1); 6781 else 6782 break; 6783 } 6784 6785 val = ixl_rd(sc, reg); 6786 6787 return val; 6788 } 6789 6790 static int 6791 ixl_rx_ctl_write(struct ixl_softc *sc, uint32_t reg, uint32_t value) 6792 { 6793 struct ixl_aq_desc iaq; 6794 6795 memset(&iaq, 0, sizeof(iaq)); 6796 iaq.iaq_opcode = htole16(IXL_AQ_OP_RX_CTL_REG_WRITE); 6797 iaq.iaq_param[1] = htole32(reg); 6798 iaq.iaq_param[3] = htole32(value); 6799 6800 if (ixl_atq_poll(sc, &iaq, 250) != 0) 6801 return ETIMEDOUT; 6802 6803 switch (htole16(iaq.iaq_retval)) { 6804 case IXL_AQ_RC_OK: 6805 /* success */ 6806 break; 6807 case IXL_AQ_RC_EACCES: 6808 return EPERM; 6809 case IXL_AQ_RC_EAGAIN: 6810 return EAGAIN; 6811 default: 6812 return EIO; 6813 } 6814 6815 return 0; 6816 } 6817 6818 static void 6819 ixl_wr_rx_csr(struct ixl_softc *sc, uint32_t reg, uint32_t value) 6820 { 6821 int rv, retry, retry_limit; 6822 6823 if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RXCTL)) { 6824 retry_limit = 5; 6825 } else { 6826 retry_limit = 0; 6827 } 6828 6829 for (retry = 0; retry < retry_limit; retry++) { 6830 rv = ixl_rx_ctl_write(sc, reg, value); 6831 if (rv == 0) 6832 return; 6833 else if (rv == EAGAIN) 6834 delaymsec(1); 6835 else 6836 break; 6837 } 6838 6839 ixl_wr(sc, reg, value); 6840 } 6841 6842 static int 6843 ixl_nvm_lock(struct ixl_softc *sc, char rw) 6844 { 6845 struct ixl_aq_desc iaq; 6846 struct ixl_aq_req_resource_param *param; 6847 int rv; 6848 6849 if (!ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMLOCK)) 6850 return 0; 6851 6852 memset(&iaq, 0, sizeof(iaq)); 6853 iaq.iaq_opcode = htole16(IXL_AQ_OP_REQUEST_RESOURCE); 6854 6855 param = (struct ixl_aq_req_resource_param *)&iaq.iaq_param; 6856 param->resource_id = htole16(IXL_AQ_RESOURCE_ID_NVM); 6857 if (rw == 'R') { 6858 param->access_type = htole16(IXL_AQ_RESOURCE_ACCES_READ); 6859 } else { 6860 param->access_type = htole16(IXL_AQ_RESOURCE_ACCES_WRITE); 6861 } 6862 6863 rv = ixl_atq_poll(sc, &iaq, 250); 6864 6865 if (rv != 0) 6866 return ETIMEDOUT; 6867 6868 switch (le16toh(iaq.iaq_retval)) { 6869 case IXL_AQ_RC_OK: 6870 break; 6871 case IXL_AQ_RC_EACCES: 6872 return EACCES; 6873 case IXL_AQ_RC_EBUSY: 6874 return EBUSY; 6875 case IXL_AQ_RC_EPERM: 6876 return EPERM; 6877 } 6878 6879 return 0; 6880 } 6881 6882 static int 6883 ixl_nvm_unlock(struct ixl_softc *sc) 6884 { 6885 struct ixl_aq_desc iaq; 6886 struct ixl_aq_rel_resource_param *param; 6887 int rv; 6888 6889 if (!ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMLOCK)) 6890 return 0; 6891 6892 memset(&iaq, 0, sizeof(iaq)); 6893 iaq.iaq_opcode = htole16(IXL_AQ_OP_RELEASE_RESOURCE); 6894 6895 param = (struct ixl_aq_rel_resource_param *)&iaq.iaq_param; 6896 param->resource_id = htole16(IXL_AQ_RESOURCE_ID_NVM); 6897 6898 rv = ixl_atq_poll(sc, &iaq, 250); 6899 6900 if (rv != 0) 6901 return ETIMEDOUT; 6902 6903 switch (le16toh(iaq.iaq_retval)) { 6904 case IXL_AQ_RC_OK: 6905 break; 6906 default: 6907 return EIO; 6908 } 6909 return 0; 6910 } 6911 6912 static int 6913 ixl_srdone_poll(struct ixl_softc *sc) 6914 { 6915 int wait_count; 6916 uint32_t reg; 6917 6918 for (wait_count = 0; wait_count < IXL_SRRD_SRCTL_ATTEMPTS; 6919 wait_count++) { 6920 reg = ixl_rd(sc, I40E_GLNVM_SRCTL); 6921 if (ISSET(reg, I40E_GLNVM_SRCTL_DONE_MASK)) 6922 break; 6923 6924 delaymsec(5); 6925 } 6926 6927 if (wait_count == IXL_SRRD_SRCTL_ATTEMPTS) 6928 return -1; 6929 6930 return 0; 6931 } 6932 6933 static int 6934 ixl_nvm_read_srctl(struct ixl_softc *sc, uint16_t offset, uint16_t *data) 6935 { 6936 uint32_t reg; 6937 6938 if (ixl_srdone_poll(sc) != 0) 6939 return ETIMEDOUT; 6940 6941 reg = ((uint32_t)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) | 6942 __BIT(I40E_GLNVM_SRCTL_START_SHIFT); 6943 ixl_wr(sc, I40E_GLNVM_SRCTL, reg); 6944 6945 if (ixl_srdone_poll(sc) != 0) { 6946 aprint_debug("NVM read error: couldn't access " 6947 "Shadow RAM address: 0x%x\n", offset); 6948 return ETIMEDOUT; 6949 } 6950 6951 reg = ixl_rd(sc, I40E_GLNVM_SRDATA); 6952 *data = (uint16_t)__SHIFTOUT(reg, I40E_GLNVM_SRDATA_RDDATA_MASK); 6953 6954 return 0; 6955 } 6956 6957 static int 6958 ixl_nvm_read_aq(struct ixl_softc *sc, uint16_t offset_word, 6959 void *data, size_t len) 6960 { 6961 struct ixl_dmamem *idm; 6962 struct ixl_aq_desc iaq; 6963 struct ixl_aq_nvm_param *param; 6964 uint32_t offset_bytes; 6965 int rv; 6966 6967 idm = &sc->sc_aqbuf; 6968 if (len > IXL_DMA_LEN(idm)) 6969 return ENOMEM; 6970 6971 memset(IXL_DMA_KVA(idm), 0, IXL_DMA_LEN(idm)); 6972 memset(&iaq, 0, sizeof(iaq)); 6973 iaq.iaq_opcode = htole16(IXL_AQ_OP_NVM_READ); 6974 iaq.iaq_flags = htole16(IXL_AQ_BUF | 6975 ((len > I40E_AQ_LARGE_BUF) ? IXL_AQ_LB : 0)); 6976 iaq.iaq_datalen = htole16(len); 6977 ixl_aq_dva(&iaq, IXL_DMA_DVA(idm)); 6978 6979 param = (struct ixl_aq_nvm_param *)iaq.iaq_param; 6980 param->command_flags = IXL_AQ_NVM_LAST_CMD; 6981 param->module_pointer = 0; 6982 param->length = htole16(len); 6983 offset_bytes = (uint32_t)offset_word * 2; 6984 offset_bytes &= 0x00FFFFFF; 6985 param->offset = htole32(offset_bytes); 6986 6987 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm), 6988 BUS_DMASYNC_PREREAD); 6989 6990 rv = ixl_atq_poll(sc, &iaq, 250); 6991 6992 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm), 6993 BUS_DMASYNC_POSTREAD); 6994 6995 if (rv != 0) { 6996 return ETIMEDOUT; 6997 } 6998 6999 switch (le16toh(iaq.iaq_retval)) { 7000 case IXL_AQ_RC_OK: 7001 break; 7002 case IXL_AQ_RC_EPERM: 7003 return EPERM; 7004 case IXL_AQ_RC_EINVAL: 7005 return EINVAL; 7006 case IXL_AQ_RC_EBUSY: 7007 return EBUSY; 7008 case IXL_AQ_RC_EIO: 7009 default: 7010 return EIO; 7011 } 7012 7013 memcpy(data, IXL_DMA_KVA(idm), len); 7014 7015 return 0; 7016 } 7017 7018 static int 7019 ixl_rd16_nvm(struct ixl_softc *sc, uint16_t offset, uint16_t *data) 7020 { 7021 int error; 7022 uint16_t buf; 7023 7024 error = ixl_nvm_lock(sc, 'R'); 7025 if (error) 7026 return error; 7027 7028 if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMREAD)) { 7029 error = ixl_nvm_read_aq(sc, offset, 7030 &buf, sizeof(buf)); 7031 if (error == 0) 7032 *data = le16toh(buf); 7033 } else { 7034 error = ixl_nvm_read_srctl(sc, offset, &buf); 7035 if (error == 0) 7036 *data = buf; 7037 } 7038 7039 ixl_nvm_unlock(sc); 7040 7041 return error; 7042 } 7043 7044 MODULE(MODULE_CLASS_DRIVER, if_ixl, "pci"); 7045 7046 #ifdef _MODULE 7047 #include "ioconf.c" 7048 #endif 7049 7050 #ifdef _MODULE 7051 static void 7052 ixl_parse_modprop(prop_dictionary_t dict) 7053 { 7054 prop_object_t obj; 7055 int64_t val; 7056 uint64_t uval; 7057 7058 if (dict == NULL) 7059 return; 7060 7061 obj = prop_dictionary_get(dict, "nomsix"); 7062 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_BOOL) { 7063 ixl_param_nomsix = prop_bool_true((prop_bool_t)obj); 7064 } 7065 7066 obj = prop_dictionary_get(dict, "stats_interval"); 7067 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) { 7068 val = prop_number_integer_value((prop_number_t)obj); 7069 7070 /* the range has no reason */ 7071 if (100 < val && val < 180000) { 7072 ixl_param_stats_interval = val; 7073 } 7074 } 7075 7076 obj = prop_dictionary_get(dict, "nqps_limit"); 7077 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) { 7078 val = prop_number_integer_value((prop_number_t)obj); 7079 7080 if (val <= INT32_MAX) 7081 ixl_param_nqps_limit = val; 7082 } 7083 7084 obj = prop_dictionary_get(dict, "rx_ndescs"); 7085 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) { 7086 uval = prop_number_unsigned_integer_value((prop_number_t)obj); 7087 7088 if (uval > 8) 7089 ixl_param_rx_ndescs = uval; 7090 } 7091 7092 obj = prop_dictionary_get(dict, "tx_ndescs"); 7093 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) { 7094 uval = prop_number_unsigned_integer_value((prop_number_t)obj); 7095 7096 if (uval > IXL_TX_PKT_DESCS) 7097 ixl_param_tx_ndescs = uval; 7098 } 7099 7100 } 7101 #endif 7102 7103 static int 7104 if_ixl_modcmd(modcmd_t cmd, void *opaque) 7105 { 7106 int error = 0; 7107 7108 #ifdef _MODULE 7109 switch (cmd) { 7110 case MODULE_CMD_INIT: 7111 ixl_parse_modprop((prop_dictionary_t)opaque); 7112 error = config_init_component(cfdriver_ioconf_if_ixl, 7113 cfattach_ioconf_if_ixl, cfdata_ioconf_if_ixl); 7114 break; 7115 case MODULE_CMD_FINI: 7116 error = config_fini_component(cfdriver_ioconf_if_ixl, 7117 cfattach_ioconf_if_ixl, cfdata_ioconf_if_ixl); 7118 break; 7119 default: 7120 error = ENOTTY; 7121 break; 7122 } 7123 #endif 7124 7125 return error; 7126 } 7127