1 /* $OpenBSD: if_ixl.c,v 1.19 2019/02/01 06:11:16 jmatthew Exp $ */ 2 3 /* 4 * Copyright (c) 2013-2015, Intel Corporation 5 * All rights reserved. 6 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions are met: 9 * 10 * 1. Redistributions of source code must retain the above copyright notice, 11 * this list of conditions and the following disclaimer. 12 * 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * 3. Neither the name of the Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived from 19 * this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 /* 35 * Copyright (c) 2016,2017 David Gwynne <dlg@openbsd.org> 36 * 37 * Permission to use, copy, modify, and distribute this software for any 38 * purpose with or without fee is hereby granted, provided that the above 39 * copyright notice and this permission notice appear in all copies. 40 * 41 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 42 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 43 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 44 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 45 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 46 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 47 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 48 */ 49 50 #include "bpfilter.h" 51 52 #include <sys/param.h> 53 #include <sys/systm.h> 54 #include <sys/sockio.h> 55 #include <sys/mbuf.h> 56 #include <sys/kernel.h> 57 #include <sys/socket.h> 58 #include <sys/device.h> 59 #include <sys/pool.h> 60 #include <sys/queue.h> 61 #include <sys/timeout.h> 62 #include <sys/task.h> 63 64 #include <machine/bus.h> 65 #include <machine/intr.h> 66 67 #include <net/if.h> 68 #include <net/if_dl.h> 69 #include <net/if_media.h> 70 71 #if NBPFILTER > 0 72 #include <net/bpf.h> 73 #endif 74 75 #include <netinet/in.h> 76 #include <netinet/if_ether.h> 77 78 #include <dev/pci/pcireg.h> 79 #include <dev/pci/pcivar.h> 80 #include <dev/pci/pcidevs.h> 81 82 #define I40E_MASK(mask, shift) ((mask) << (shift)) 83 #define I40E_PF_RESET_WAIT_COUNT 200 84 #define I40E_AQ_LARGE_BUF 512 85 86 /* bitfields for Tx queue mapping in QTX_CTL */ 87 #define I40E_QTX_CTL_VF_QUEUE 0x0 88 #define I40E_QTX_CTL_VM_QUEUE 0x1 89 #define I40E_QTX_CTL_PF_QUEUE 0x2 90 91 #define I40E_QUEUE_TYPE_EOL 0x7ff 92 #define I40E_INTR_NOTX_QUEUE 0 93 94 #define I40E_QUEUE_TYPE_RX 0x0 95 #define I40E_QUEUE_TYPE_TX 0x1 96 #define I40E_QUEUE_TYPE_PE_CEQ 0x2 97 #define I40E_QUEUE_TYPE_UNKNOWN 0x3 98 99 #define I40E_ITR_INDEX_RX 0x0 100 #define I40E_ITR_INDEX_TX 0x1 101 #define I40E_ITR_INDEX_OTHER 0x2 102 #define I40E_ITR_INDEX_NONE 0x3 103 104 #include <dev/pci/if_ixlreg.h> 105 106 #define I40E_INTR_NOTX_QUEUE 0 107 #define I40E_INTR_NOTX_INTR 0 108 #define I40E_INTR_NOTX_RX_QUEUE 0 109 #define I40E_INTR_NOTX_TX_QUEUE 1 110 #define I40E_INTR_NOTX_RX_MASK I40E_PFINT_ICR0_QUEUE_0_MASK 111 #define I40E_INTR_NOTX_TX_MASK I40E_PFINT_ICR0_QUEUE_1_MASK 112 113 struct ixl_aq_desc { 114 uint16_t iaq_flags; 115 #define IXL_AQ_DD (1U << 0) 116 #define IXL_AQ_CMP (1U << 1) 117 #define IXL_AQ_ERR (1U << 2) 118 #define IXL_AQ_VFE (1U << 3) 119 #define IXL_AQ_LB (1U << 9) 120 #define IXL_AQ_RD (1U << 10) 121 #define IXL_AQ_VFC (1U << 11) 122 #define IXL_AQ_BUF (1U << 12) 123 #define IXL_AQ_SI (1U << 13) 124 #define IXL_AQ_EI (1U << 14) 125 #define IXL_AQ_FE (1U << 15) 126 127 #define IXL_AQ_FLAGS_FMT "\020" "\020FE" "\017EI" "\016SI" "\015BUF" \ 128 "\014VFC" "\013DB" "\012LB" "\004VFE" \ 129 "\003ERR" "\002CMP" "\001DD" 130 131 uint16_t iaq_opcode; 132 133 uint16_t iaq_datalen; 134 uint16_t iaq_retval; 135 136 uint64_t iaq_cookie; 137 138 uint32_t iaq_param[4]; 139 /* iaq_data_hi iaq_param[2] */ 140 /* iaq_data_lo iaq_param[3] */ 141 } __packed __aligned(8); 142 143 /* aq commands */ 144 #define IXL_AQ_OP_GET_VERSION 0x0001 145 #define IXL_AQ_OP_DRIVER_VERSION 0x0002 146 #define IXL_AQ_OP_QUEUE_SHUTDOWN 0x0003 147 #define IXL_AQ_OP_SET_PF_CONTEXT 0x0004 148 #define IXL_AQ_OP_GET_AQ_ERR_REASON 0x0005 149 #define IXL_AQ_OP_REQUEST_RESOURCE 0x0008 150 #define IXL_AQ_OP_RELEASE_RESOURCE 0x0009 151 #define IXL_AQ_OP_LIST_FUNC_CAP 0x000a 152 #define IXL_AQ_OP_LIST_DEV_CAP 0x000b 153 #define IXL_AQ_OP_MAC_ADDRESS_READ 0x0107 154 #define IXL_AQ_OP_CLEAR_PXE_MODE 0x0110 155 #define IXL_AQ_OP_SWITCH_GET_CONFIG 0x0200 156 #define IXL_AQ_OP_ADD_VSI 0x0210 157 #define IXL_AQ_OP_UPD_VSI_PARAMS 0x0211 158 #define IXL_AQ_OP_GET_VSI_PARAMS 0x0212 159 #define IXL_AQ_OP_ADD_VEB 0x0230 160 #define IXL_AQ_OP_UPD_VEB_PARAMS 0x0231 161 #define IXL_AQ_OP_GET_VEB_PARAMS 0x0232 162 #define IXL_AQ_OP_ADD_MACVLAN 0x0250 163 #define IXL_AQ_OP_REMOVE_MACVLAN 0x0251 164 #define IXL_AQ_OP_SET_VSI_PROMISC 0x0254 165 #define IXL_AQ_OP_PHY_GET_ABILITIES 0x0600 166 #define IXL_AQ_OP_PHY_SET_CONFIG 0x0601 167 #define IXL_AQ_OP_PHY_SET_MAC_CONFIG 0x0603 168 #define IXL_AQ_OP_PHY_RESTART_AN 0x0605 169 #define IXL_AQ_OP_PHY_LINK_STATUS 0x0607 170 #define IXL_AQ_OP_PHY_SET_EVENT_MASK 0x0613 171 #define IXL_AQ_OP_LLDP_GET_MIB 0x0a00 172 #define IXL_AQ_OP_LLDP_MIB_CHG_EV 0x0a01 173 #define IXL_AQ_OP_LLDP_ADD_TLV 0x0a02 174 #define IXL_AQ_OP_LLDP_UPD_TLV 0x0a03 175 #define IXL_AQ_OP_LLDP_DEL_TLV 0x0a04 176 #define IXL_AQ_OP_LLDP_STOP_AGENT 0x0a05 177 #define IXL_AQ_OP_LLDP_START_AGENT 0x0a06 178 #define IXL_AQ_OP_LLDP_GET_CEE_DCBX 0x0a07 179 #define IXL_AQ_OP_LLDP_SPECIFIC_AGENT 0x0a09 180 181 struct ixl_aq_mac_addresses { 182 uint8_t pf_lan[ETHER_ADDR_LEN]; 183 uint8_t pf_san[ETHER_ADDR_LEN]; 184 uint8_t port[ETHER_ADDR_LEN]; 185 uint8_t pf_wol[ETHER_ADDR_LEN]; 186 } __packed; 187 188 #define IXL_AQ_MAC_PF_LAN_VALID (1U << 4) 189 #define IXL_AQ_MAC_PF_SAN_VALID (1U << 5) 190 #define IXL_AQ_MAC_PORT_VALID (1U << 6) 191 #define IXL_AQ_MAC_PF_WOL_VALID (1U << 7) 192 193 struct ixl_aq_capability { 194 uint16_t cap_id; 195 #define IXL_AQ_CAP_SWITCH_MODE 0x0001 196 #define IXL_AQ_CAP_MNG_MODE 0x0002 197 #define IXL_AQ_CAP_NPAR_ACTIVE 0x0003 198 #define IXL_AQ_CAP_OS2BMC_CAP 0x0004 199 #define IXL_AQ_CAP_FUNCTIONS_VALID 0x0005 200 #define IXL_AQ_CAP_ALTERNATE_RAM 0x0006 201 #define IXL_AQ_CAP_WOL_AND_PROXY 0x0008 202 #define IXL_AQ_CAP_SRIOV 0x0012 203 #define IXL_AQ_CAP_VF 0x0013 204 #define IXL_AQ_CAP_VMDQ 0x0014 205 #define IXL_AQ_CAP_8021QBG 0x0015 206 #define IXL_AQ_CAP_8021QBR 0x0016 207 #define IXL_AQ_CAP_VSI 0x0017 208 #define IXL_AQ_CAP_DCB 0x0018 209 #define IXL_AQ_CAP_FCOE 0x0021 210 #define IXL_AQ_CAP_ISCSI 0x0022 211 #define IXL_AQ_CAP_RSS 0x0040 212 #define IXL_AQ_CAP_RXQ 0x0041 213 #define IXL_AQ_CAP_TXQ 0x0042 214 #define IXL_AQ_CAP_MSIX 0x0043 215 #define IXL_AQ_CAP_VF_MSIX 0x0044 216 #define IXL_AQ_CAP_FLOW_DIRECTOR 0x0045 217 #define IXL_AQ_CAP_1588 0x0046 218 #define IXL_AQ_CAP_IWARP 0x0051 219 #define IXL_AQ_CAP_LED 0x0061 220 #define IXL_AQ_CAP_SDP 0x0062 221 #define IXL_AQ_CAP_MDIO 0x0063 222 #define IXL_AQ_CAP_WSR_PROT 0x0064 223 #define IXL_AQ_CAP_NVM_MGMT 0x0080 224 #define IXL_AQ_CAP_FLEX10 0x00F1 225 #define IXL_AQ_CAP_CEM 0x00F2 226 uint8_t major_rev; 227 uint8_t minor_rev; 228 uint32_t number; 229 uint32_t logical_id; 230 uint32_t phys_id; 231 uint8_t _reserved[16]; 232 } __packed __aligned(4); 233 234 #define IXL_LLDP_SHUTDOWN 0x1 235 236 struct ixl_aq_switch_config { 237 uint16_t num_reported; 238 uint16_t num_total; 239 uint8_t _reserved[12]; 240 } __packed __aligned(4); 241 242 struct ixl_aq_switch_config_element { 243 uint8_t type; 244 #define IXL_AQ_SW_ELEM_TYPE_MAC 1 245 #define IXL_AQ_SW_ELEM_TYPE_PF 2 246 #define IXL_AQ_SW_ELEM_TYPE_VF 3 247 #define IXL_AQ_SW_ELEM_TYPE_EMP 4 248 #define IXL_AQ_SW_ELEM_TYPE_BMC 5 249 #define IXL_AQ_SW_ELEM_TYPE_PV 16 250 #define IXL_AQ_SW_ELEM_TYPE_VEB 17 251 #define IXL_AQ_SW_ELEM_TYPE_PA 18 252 #define IXL_AQ_SW_ELEM_TYPE_VSI 19 253 uint8_t revision; 254 #define IXL_AQ_SW_ELEM_REV_1 1 255 uint16_t seid; 256 257 uint16_t uplink_seid; 258 uint16_t downlink_seid; 259 260 uint8_t _reserved[3]; 261 uint8_t connection_type; 262 #define IXL_AQ_CONN_TYPE_REGULAR 0x1 263 #define IXL_AQ_CONN_TYPE_DEFAULT 0x2 264 #define IXL_AQ_CONN_TYPE_CASCADED 0x3 265 266 uint16_t scheduler_id; 267 uint16_t element_info; 268 } __packed __aligned(4); 269 270 #define IXL_PHY_TYPE_SGMII 0x00 271 #define IXL_PHY_TYPE_1000BASE_KX 0x01 272 #define IXL_PHY_TYPE_10GBASE_KX4 0x02 273 #define IXL_PHY_TYPE_10GBASE_KR 0x03 274 #define IXL_PHY_TYPE_40GBASE_KR4 0x04 275 #define IXL_PHY_TYPE_XAUI 0x05 276 #define IXL_PHY_TYPE_XFI 0x06 277 #define IXL_PHY_TYPE_SFI 0x07 278 #define IXL_PHY_TYPE_XLAUI 0x08 279 #define IXL_PHY_TYPE_XLPPI 0x09 280 #define IXL_PHY_TYPE_40GBASE_CR4_CU 0x0a 281 #define IXL_PHY_TYPE_10GBASE_CR1_CU 0x0b 282 #define IXL_PHY_TYPE_10GBASE_AOC 0x0c 283 #define IXL_PHY_TYPE_40GBASE_AOC 0x0d 284 #define IXL_PHY_TYPE_100BASE_TX 0x11 285 #define IXL_PHY_TYPE_1000BASE_T 0x12 286 #define IXL_PHY_TYPE_10GBASE_T 0x13 287 #define IXL_PHY_TYPE_10GBASE_SR 0x14 288 #define IXL_PHY_TYPE_10GBASE_LR 0x15 289 #define IXL_PHY_TYPE_10GBASE_SFPP_CU 0x16 290 #define IXL_PHY_TYPE_10GBASE_CR1 0x17 291 #define IXL_PHY_TYPE_40GBASE_CR4 0x18 292 #define IXL_PHY_TYPE_40GBASE_SR4 0x19 293 #define IXL_PHY_TYPE_40GBASE_LR4 0x1a 294 #define IXL_PHY_TYPE_1000BASE_SX 0x1b 295 #define IXL_PHY_TYPE_1000BASE_LX 0x1c 296 #define IXL_PHY_TYPE_1000BASE_T_OPTICAL 0x1d 297 #define IXL_PHY_TYPE_20GBASE_KR2 0x1e 298 299 #define IXL_PHY_TYPE_25GBASE_KR 0x1f 300 #define IXL_PHY_TYPE_25GBASE_CR 0x20 301 #define IXL_PHY_TYPE_25GBASE_SR 0x21 302 #define IXL_PHY_TYPE_25GBASE_LR 0x22 303 #define IXL_PHY_TYPE_25GBASE_AOC 0x23 304 #define IXL_PHY_TYPE_25GBASE_ACC 0x24 305 306 struct ixl_aq_module_desc { 307 uint8_t oui[3]; 308 uint8_t _reserved1; 309 uint8_t part_number[16]; 310 uint8_t revision[4]; 311 uint8_t _reserved2[8]; 312 } __packed __aligned(4); 313 314 struct ixl_aq_phy_abilities { 315 uint32_t phy_type; 316 317 uint8_t link_speed; 318 #define IXL_AQ_PHY_LINK_SPEED_100MB 0x1 319 #define IXL_AQ_PHY_LINK_SPEED_1000MB 0x2 320 #define IXL_AQ_PHY_LINK_SPEED_10GB 0x3 321 #define IXL_AQ_PHY_LINK_SPEED_40GB 0x4 322 #define IXL_AQ_PHY_LINK_SPEED_20GB 0x5 323 #define IXL_AQ_PHY_LINK_SPEED_25GB 0x6 324 uint8_t abilities; 325 uint16_t eee_capability; 326 327 uint32_t eeer_val; 328 329 uint8_t d3_lpan; 330 uint8_t phy_type_ext; 331 #define IXL_AQ_PHY_TYPE_EXT_25G_KR 0x01 332 #define IXL_AQ_PHY_TYPE_EXT_25G_CR 0x02 333 #define IXL_AQ_PHY_TYPE_EXT_25G_SR 0x04 334 #define IXL_AQ_PHY_TYPE_EXT_25G_LR 0x08 335 uint8_t fec_cfg_curr_mod_ext_info; 336 #define IXL_AQ_ENABLE_FEC_KR 0x01 337 #define IXL_AQ_ENABLE_FEC_RS 0x02 338 #define IXL_AQ_REQUEST_FEC_KR 0x04 339 #define IXL_AQ_REQUEST_FEC_RS 0x08 340 #define IXL_AQ_ENABLE_FEC_AUTO 0x10 341 #define IXL_AQ_MODULE_TYPE_EXT_MASK 0xe0 342 #define IXL_AQ_MODULE_TYPE_EXT_SHIFT 5 343 uint8_t ext_comp_code; 344 345 uint8_t phy_id[4]; 346 347 uint8_t module_type[3]; 348 uint8_t qualified_module_count; 349 #define IXL_AQ_PHY_MAX_QMS 16 350 struct ixl_aq_module_desc 351 qualified_module[IXL_AQ_PHY_MAX_QMS]; 352 } __packed __aligned(4); 353 354 struct ixl_aq_link_param { 355 uint8_t notify; 356 #define IXL_AQ_LINK_NOTIFY 0x03 357 uint8_t _reserved1; 358 uint8_t phy; 359 uint8_t speed; 360 uint8_t status; 361 uint8_t _reserved2[11]; 362 } __packed __aligned(4); 363 364 struct ixl_aq_vsi_param { 365 uint16_t uplink_seid; 366 uint8_t connect_type; 367 #define IXL_AQ_VSI_CONN_TYPE_NORMAL (0x1) 368 #define IXL_AQ_VSI_CONN_TYPE_DEFAULT (0x2) 369 #define IXL_AQ_VSI_CONN_TYPE_CASCADED (0x3) 370 uint8_t _reserved1; 371 372 uint8_t vf_id; 373 uint8_t _reserved2; 374 uint16_t vsi_flags; 375 #define IXL_AQ_VSI_TYPE_SHIFT 0x0 376 #define IXL_AQ_VSI_TYPE_MASK (0x3 << IXL_AQ_VSI_TYPE_SHIFT) 377 #define IXL_AQ_VSI_TYPE_VF 0x0 378 #define IXL_AQ_VSI_TYPE_VMDQ2 0x1 379 #define IXL_AQ_VSI_TYPE_PF 0x2 380 #define IXL_AQ_VSI_TYPE_EMP_MNG 0x3 381 #define IXL_AQ_VSI_FLAG_CASCADED_PV 0x4 382 383 uint32_t addr_hi; 384 uint32_t addr_lo; 385 } __packed __aligned(16); 386 387 struct ixl_aq_add_macvlan { 388 uint16_t num_addrs; 389 uint16_t seid0; 390 uint16_t seid1; 391 uint16_t seid2; 392 uint32_t addr_hi; 393 uint32_t addr_lo; 394 } __packed __aligned(16); 395 396 struct ixl_aq_add_macvlan_elem { 397 uint8_t macaddr[6]; 398 uint16_t vlan; 399 uint16_t flags; 400 #define IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH 0x0001 401 #define IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN 0x0004 402 uint16_t queue; 403 uint32_t _reserved; 404 } __packed __aligned(16); 405 406 struct ixl_aq_remove_macvlan { 407 uint16_t num_addrs; 408 uint16_t seid0; 409 uint16_t seid1; 410 uint16_t seid2; 411 uint32_t addr_hi; 412 uint32_t addr_lo; 413 } __packed __aligned(16); 414 415 struct ixl_aq_remove_macvlan_elem { 416 uint8_t macaddr[6]; 417 uint16_t vlan; 418 uint8_t flags; 419 #define IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH 0x0001 420 #define IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN 0x0008 421 uint8_t _reserved[7]; 422 } __packed __aligned(16); 423 424 struct ixl_aq_vsi_reply { 425 uint16_t seid; 426 uint16_t vsi_number; 427 428 uint16_t vsis_used; 429 uint16_t vsis_free; 430 431 uint32_t addr_hi; 432 uint32_t addr_lo; 433 } __packed __aligned(16); 434 435 struct ixl_aq_vsi_data { 436 /* first 96 byte are written by SW */ 437 uint16_t valid_sections; 438 #define IXL_AQ_VSI_VALID_SWITCH (1 << 0) 439 #define IXL_AQ_VSI_VALID_SECURITY (1 << 1) 440 #define IXL_AQ_VSI_VALID_VLAN (1 << 2) 441 #define IXL_AQ_VSI_VALID_CAS_PV (1 << 3) 442 #define IXL_AQ_VSI_VALID_INGRESS_UP (1 << 4) 443 #define IXL_AQ_VSI_VALID_EGRESS_UP (1 << 5) 444 #define IXL_AQ_VSI_VALID_QUEUE_MAP (1 << 6) 445 #define IXL_AQ_VSI_VALID_QUEUE_OPT (1 << 7) 446 #define IXL_AQ_VSI_VALID_OUTER_UP (1 << 8) 447 #define IXL_AQ_VSI_VALID_SCHED (1 << 9) 448 /* switch section */ 449 uint16_t switch_id; 450 #define IXL_AQ_VSI_SWITCH_ID_SHIFT 0 451 #define IXL_AQ_VSI_SWITCH_ID_MASK (0xfff << IXL_AQ_VSI_SWITCH_ID_SHIFT) 452 #define IXL_AQ_VSI_SWITCH_NOT_STAG (1 << 12) 453 #define IXL_AQ_VSI_SWITCH_LOCAL_LB (1 << 14) 454 455 uint8_t _reserved1[2]; 456 /* security section */ 457 uint8_t sec_flags; 458 #define IXL_AQ_VSI_SEC_ALLOW_DEST_OVRD (1 << 0) 459 #define IXL_AQ_VSI_SEC_ENABLE_VLAN_CHK (1 << 1) 460 #define IXL_AQ_VSI_SEC_ENABLE_MAC_CHK (1 << 2) 461 uint8_t _reserved2; 462 463 /* vlan section */ 464 uint16_t pvid; 465 uint16_t fcoe_pvid; 466 467 uint8_t port_vlan_flags; 468 #define IXL_AQ_VSI_PVLAN_MODE_SHIFT 0 469 #define IXL_AQ_VSI_PVLAN_MODE_MASK (0x3 << IXL_AQ_VSI_PVLAN_MODE_SHIFT) 470 #define IXL_AQ_VSI_PVLAN_MODE_TAGGED (0x1 << IXL_AQ_VSI_PVLAN_MODE_SHIFT) 471 #define IXL_AQ_VSI_PVLAN_MODE_UNTAGGED (0x2 << IXL_AQ_VSI_PVLAN_MODE_SHIFT) 472 #define IXL_AQ_VSI_PVLAN_MODE_ALL (0x3 << IXL_AQ_VSI_PVLAN_MODE_SHIFT) 473 #define IXL_AQ_VSI_PVLAN_INSERT_PVID (0x4 << IXL_AQ_VSI_PVLAN_MODE_SHIFT) 474 #define IXL_AQ_VSI_PVLAN_EMOD_SHIFT 0x3 475 #define IXL_AQ_VSI_PVLAN_EMOD_MASK (0x3 << IXL_AQ_VSI_PVLAN_EMOD_SHIFT) 476 #define IXL_AQ_VSI_PVLAN_EMOD_STR_BOTH (0x0 << IXL_AQ_VSI_PVLAN_EMOD_SHIFT) 477 #define IXL_AQ_VSI_PVLAN_EMOD_STR_UP (0x1 << IXL_AQ_VSI_PVLAN_EMOD_SHIFT) 478 #define IXL_AQ_VSI_PVLAN_EMOD_STR (0x2 << IXL_AQ_VSI_PVLAN_EMOD_SHIFT) 479 #define IXL_AQ_VSI_PVLAN_EMOD_NOTHING (0x3 << IXL_AQ_VSI_PVLAN_EMOD_SHIFT) 480 uint8_t _reserved3[3]; 481 482 /* ingress egress up section */ 483 uint32_t ingress_table; 484 #define IXL_AQ_VSI_UP_SHIFT(_up) ((_up) * 3) 485 #define IXL_AQ_VSI_UP_MASK(_up) (0x7 << (IXL_AQ_VSI_UP_SHIFT(_up)) 486 uint32_t egress_table; 487 488 /* cascaded pv section */ 489 uint16_t cas_pv_tag; 490 uint8_t cas_pv_flags; 491 #define IXL_AQ_VSI_CAS_PV_TAGX_SHIFT 0 492 #define IXL_AQ_VSI_CAS_PV_TAGX_MASK (0x3 << IXL_AQ_VSI_CAS_PV_TAGX_SHIFT) 493 #define IXL_AQ_VSI_CAS_PV_TAGX_LEAVE (0x0 << IXL_AQ_VSI_CAS_PV_TAGX_SHIFT) 494 #define IXL_AQ_VSI_CAS_PV_TAGX_REMOVE (0x1 << IXL_AQ_VSI_CAS_PV_TAGX_SHIFT) 495 #define IXL_AQ_VSI_CAS_PV_TAGX_COPY (0x2 << IXL_AQ_VSI_CAS_PV_TAGX_SHIFT) 496 #define IXL_AQ_VSI_CAS_PV_INSERT_TAG (1 << 4) 497 #define IXL_AQ_VSI_CAS_PV_ETAG_PRUNE (1 << 5) 498 #define IXL_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG \ 499 (1 << 6) 500 uint8_t _reserved4; 501 502 /* queue mapping section */ 503 uint16_t mapping_flags; 504 #define IXL_AQ_VSI_QUE_MAP_MASK 0x1 505 #define IXL_AQ_VSI_QUE_MAP_CONTIG 0x0 506 #define IXL_AQ_VSI_QUE_MAP_NONCONTIG 0x1 507 uint16_t queue_mapping[16]; 508 #define IXL_AQ_VSI_QUEUE_SHIFT 0x0 509 #define IXL_AQ_VSI_QUEUE_MASK (0x7ff << IXL_AQ_VSI_QUEUE_SHIFT) 510 uint16_t tc_mapping[8]; 511 #define IXL_AQ_VSI_TC_Q_OFFSET_SHIFT 0 512 #define IXL_AQ_VSI_TC_Q_OFFSET_MASK (0x1ff << IXL_AQ_VSI_TC_Q_OFFSET_SHIFT) 513 #define IXL_AQ_VSI_TC_Q_NUMBER_SHIFT 9 514 #define IXL_AQ_VSI_TC_Q_NUMBER_MASK (0x7 << IXL_AQ_VSI_TC_Q_NUMBER_SHIFT) 515 516 /* queueing option section */ 517 uint8_t queueing_opt_flags; 518 #define IXL_AQ_VSI_QUE_OPT_MCAST_UDP_EN (1 << 2) 519 #define IXL_AQ_VSI_QUE_OPT_UCAST_UDP_EN (1 << 3) 520 #define IXL_AQ_VSI_QUE_OPT_TCP_EN (1 << 4) 521 #define IXL_AQ_VSI_QUE_OPT_FCOE_EN (1 << 5) 522 #define IXL_AQ_VSI_QUE_OPT_RSS_LUT_PF 0 523 #define IXL_AQ_VSI_QUE_OPT_RSS_LUT_VSI (1 << 6) 524 uint8_t _reserved5[3]; 525 526 /* scheduler section */ 527 uint8_t up_enable_bits; 528 uint8_t _reserved6; 529 530 /* outer up section */ 531 uint32_t outer_up_table; /* same as ingress/egress tables */ 532 uint8_t _reserved7[8]; 533 534 /* last 32 bytes are written by FW */ 535 uint16_t qs_handle[8]; 536 #define IXL_AQ_VSI_QS_HANDLE_INVALID 0xffff 537 uint16_t stat_counter_idx; 538 uint16_t sched_id; 539 540 uint8_t _reserved8[12]; 541 } __packed __aligned(8); 542 543 CTASSERT(sizeof(struct ixl_aq_vsi_data) == 128); 544 545 struct ixl_aq_vsi_promisc_param { 546 uint16_t flags; 547 uint16_t valid_flags; 548 #define IXL_AQ_VSI_PROMISC_FLAG_UCAST (1 << 0) 549 #define IXL_AQ_VSI_PROMISC_FLAG_MCAST (1 << 1) 550 #define IXL_AQ_VSI_PROMISC_FLAG_BCAST (1 << 2) 551 #define IXL_AQ_VSI_PROMISC_FLAG_DFLT (1 << 3) 552 #define IXL_AQ_VSI_PROMISC_FLAG_VLAN (1 << 4) 553 #define IXL_AQ_VSI_PROMISC_FLAG_RXONLY (1 << 15) 554 555 uint16_t seid; 556 #define IXL_AQ_VSI_PROMISC_SEID_VALID (1 << 15) 557 uint16_t vlan; 558 #define IXL_AQ_VSI_PROMISC_VLAN_VALID (1 << 15) 559 uint32_t reserved[2]; 560 } __packed __aligned(8); 561 562 struct ixl_aq_veb_param { 563 uint16_t uplink_seid; 564 uint16_t downlink_seid; 565 uint16_t veb_flags; 566 #define IXL_AQ_ADD_VEB_FLOATING (1 << 0) 567 #define IXL_AQ_ADD_VEB_PORT_TYPE_SHIFT 1 568 #define IXL_AQ_ADD_VEB_PORT_TYPE_MASK (0x3 << IXL_AQ_ADD_VEB_PORT_TYPE_SHIFT) 569 #define IXL_AQ_ADD_VEB_PORT_TYPE_DEFAULT \ 570 (0x2 << IXL_AQ_ADD_VEB_PORT_TYPE_SHIFT) 571 #define IXL_AQ_ADD_VEB_PORT_TYPE_DATA (0x4 << IXL_AQ_ADD_VEB_PORT_TYPE_SHIFT) 572 #define IXL_AQ_ADD_VEB_ENABLE_L2_FILTER (1 << 3) /* deprecated */ 573 #define IXL_AQ_ADD_VEB_DISABLE_STATS (1 << 4) 574 uint8_t enable_tcs; 575 uint8_t _reserved[9]; 576 } __packed __aligned(16); 577 578 struct ixl_aq_veb_reply { 579 uint16_t _reserved1; 580 uint16_t _reserved2; 581 uint16_t _reserved3; 582 uint16_t switch_seid; 583 uint16_t veb_seid; 584 #define IXL_AQ_VEB_ERR_FLAG_NO_VEB (1 << 0) 585 #define IXL_AQ_VEB_ERR_FLAG_NO_SCHED (1 << 1) 586 #define IXL_AQ_VEB_ERR_FLAG_NO_COUNTER (1 << 2) 587 #define IXL_AQ_VEB_ERR_FLAG_NO_ENTRY (1 << 3); 588 uint16_t statistic_index; 589 uint16_t vebs_used; 590 uint16_t vebs_free; 591 } __packed __aligned(16); 592 593 /* GET PHY ABILITIES param[0] */ 594 #define IXL_AQ_PHY_REPORT_QUAL (1 << 0) 595 #define IXL_AQ_PHY_REPORT_INIT (1 << 1) 596 597 /* RESTART_AN param[0] */ 598 #define IXL_AQ_PHY_RESTART_AN (1 << 1) 599 #define IXL_AQ_PHY_LINK_ENABLE (1 << 2) 600 601 struct ixl_aq_link_status { /* this occupies the iaq_param space */ 602 uint16_t command_flags; /* only field set on command */ 603 #define IXL_AQ_LSE_MASK 0x3 604 #define IXL_AQ_LSE_NOP 0x0 605 #define IXL_AQ_LSE_DISABLE 0x2 606 #define IXL_AQ_LSE_ENABLE 0x3 607 #define IXL_AQ_LSE_IS_ENABLED 0x1 /* only set in response */ 608 uint8_t phy_type; 609 uint8_t link_speed; 610 uint8_t link_info; 611 #define IXL_AQ_LINK_UP_FUNCTION 0x01 612 #define IXL_AQ_LINK_FAULT 0x02 613 #define IXL_AQ_LINK_FAULT_TX 0x04 614 #define IXL_AQ_LINK_FAULT_RX 0x08 615 #define IXL_AQ_LINK_FAULT_REMOTE 0x10 616 #define IXL_AQ_LINK_UP_PORT 0x20 617 #define IXL_AQ_MEDIA_AVAILABLE 0x40 618 #define IXL_AQ_SIGNAL_DETECT 0x80 619 uint8_t an_info; 620 #define IXL_AQ_AN_COMPLETED 0x01 621 #define IXL_AQ_LP_AN_ABILITY 0x02 622 #define IXL_AQ_PD_FAULT 0x04 623 #define IXL_AQ_FEC_EN 0x08 624 #define IXL_AQ_PHY_LOW_POWER 0x10 625 #define IXL_AQ_LINK_PAUSE_TX 0x20 626 #define IXL_AQ_LINK_PAUSE_RX 0x40 627 #define IXL_AQ_QUALIFIED_MODULE 0x80 628 629 uint8_t ext_info; 630 #define IXL_AQ_LINK_PHY_TEMP_ALARM 0x01 631 #define IXL_AQ_LINK_XCESSIVE_ERRORS 0x02 632 #define IXL_AQ_LINK_TX_SHIFT 0x02 633 #define IXL_AQ_LINK_TX_MASK (0x03 << IXL_AQ_LINK_TX_SHIFT) 634 #define IXL_AQ_LINK_TX_ACTIVE 0x00 635 #define IXL_AQ_LINK_TX_DRAINED 0x01 636 #define IXL_AQ_LINK_TX_FLUSHED 0x03 637 #define IXL_AQ_LINK_FORCED_40G 0x10 638 /* 25G Error Codes */ 639 #define IXL_AQ_25G_NO_ERR 0X00 640 #define IXL_AQ_25G_NOT_PRESENT 0X01 641 #define IXL_AQ_25G_NVM_CRC_ERR 0X02 642 #define IXL_AQ_25G_SBUS_UCODE_ERR 0X03 643 #define IXL_AQ_25G_SERDES_UCODE_ERR 0X04 644 #define IXL_AQ_25G_NIMB_UCODE_ERR 0X05 645 uint8_t loopback; 646 uint16_t max_frame_size; 647 648 uint8_t config; 649 #define IXL_AQ_CONFIG_FEC_KR_ENA 0x01 650 #define IXL_AQ_CONFIG_FEC_RS_ENA 0x02 651 #define IXL_AQ_CONFIG_CRC_ENA 0x04 652 #define IXL_AQ_CONFIG_PACING_MASK 0x78 653 uint8_t power_desc; 654 #define IXL_AQ_LINK_POWER_CLASS_1 0x00 655 #define IXL_AQ_LINK_POWER_CLASS_2 0x01 656 #define IXL_AQ_LINK_POWER_CLASS_3 0x02 657 #define IXL_AQ_LINK_POWER_CLASS_4 0x03 658 #define IXL_AQ_PWR_CLASS_MASK 0x03 659 660 uint8_t reserved[4]; 661 } __packed __aligned(4); 662 /* event mask command flags for param[2] */ 663 #define IXL_AQ_PHY_EV_MASK 0x3ff 664 #define IXL_AQ_PHY_EV_LINK_UPDOWN (1 << 1) 665 #define IXL_AQ_PHY_EV_MEDIA_NA (1 << 2) 666 #define IXL_AQ_PHY_EV_LINK_FAULT (1 << 3) 667 #define IXL_AQ_PHY_EV_PHY_TEMP_ALARM (1 << 4) 668 #define IXL_AQ_PHY_EV_EXCESS_ERRORS (1 << 5) 669 #define IXL_AQ_PHY_EV_SIGNAL_DETECT (1 << 6) 670 #define IXL_AQ_PHY_EV_AN_COMPLETED (1 << 7) 671 #define IXL_AQ_PHY_EV_MODULE_QUAL_FAIL (1 << 8) 672 #define IXL_AQ_PHY_EV_PORT_TX_SUSPENDED (1 << 9) 673 674 /* aq response codes */ 675 #define IXL_AQ_RC_OK 0 /* success */ 676 #define IXL_AQ_RC_EPERM 1 /* Operation not permitted */ 677 #define IXL_AQ_RC_ENOENT 2 /* No such element */ 678 #define IXL_AQ_RC_ESRCH 3 /* Bad opcode */ 679 #define IXL_AQ_RC_EINTR 4 /* operation interrupted */ 680 #define IXL_AQ_RC_EIO 5 /* I/O error */ 681 #define IXL_AQ_RC_ENXIO 6 /* No such resource */ 682 #define IXL_AQ_RC_E2BIG 7 /* Arg too long */ 683 #define IXL_AQ_RC_EAGAIN 8 /* Try again */ 684 #define IXL_AQ_RC_ENOMEM 9 /* Out of memory */ 685 #define IXL_AQ_RC_EACCES 10 /* Permission denied */ 686 #define IXL_AQ_RC_EFAULT 11 /* Bad address */ 687 #define IXL_AQ_RC_EBUSY 12 /* Device or resource busy */ 688 #define IXL_AQ_RC_EEXIST 13 /* object already exists */ 689 #define IXL_AQ_RC_EINVAL 14 /* invalid argument */ 690 #define IXL_AQ_RC_ENOTTY 15 /* not a typewriter */ 691 #define IXL_AQ_RC_ENOSPC 16 /* No space or alloc failure */ 692 #define IXL_AQ_RC_ENOSYS 17 /* function not implemented */ 693 #define IXL_AQ_RC_ERANGE 18 /* parameter out of range */ 694 #define IXL_AQ_RC_EFLUSHED 19 /* cmd flushed due to prev error */ 695 #define IXL_AQ_RC_BAD_ADDR 20 /* contains a bad pointer */ 696 #define IXL_AQ_RC_EMODE 21 /* not allowed in current mode */ 697 #define IXL_AQ_RC_EFBIG 22 /* file too large */ 698 699 struct ixl_tx_desc { 700 uint64_t addr; 701 uint64_t cmd; 702 #define IXL_TX_DESC_DTYPE_SHIFT 0 703 #define IXL_TX_DESC_DTYPE_MASK (0xfULL << IXL_TX_DESC_DTYPE_SHIFT) 704 #define IXL_TX_DESC_DTYPE_DATA (0x0ULL << IXL_TX_DESC_DTYPE_SHIFT) 705 #define IXL_TX_DESC_DTYPE_NOP (0x1ULL << IXL_TX_DESC_DTYPE_SHIFT) 706 #define IXL_TX_DESC_DTYPE_CONTEXT (0x1ULL << IXL_TX_DESC_DTYPE_SHIFT) 707 #define IXL_TX_DESC_DTYPE_FCOE_CTX (0x2ULL << IXL_TX_DESC_DTYPE_SHIFT) 708 #define IXL_TX_DESC_DTYPE_FD (0x8ULL << IXL_TX_DESC_DTYPE_SHIFT) 709 #define IXL_TX_DESC_DTYPE_DDP_CTX (0x9ULL << IXL_TX_DESC_DTYPE_SHIFT) 710 #define IXL_TX_DESC_DTYPE_FLEX_DATA (0xbULL << IXL_TX_DESC_DTYPE_SHIFT) 711 #define IXL_TX_DESC_DTYPE_FLEX_CTX_1 (0xcULL << IXL_TX_DESC_DTYPE_SHIFT) 712 #define IXL_TX_DESC_DTYPE_FLEX_CTX_2 (0xdULL << IXL_TX_DESC_DTYPE_SHIFT) 713 #define IXL_TX_DESC_DTYPE_DONE (0xfULL << IXL_TX_DESC_DTYPE_SHIFT) 714 715 #define IXL_TX_DESC_CMD_SHIFT 4 716 #define IXL_TX_DESC_CMD_MASK (0x3ffULL << IXL_TX_DESC_CMD_SHIFT) 717 #define IXL_TX_DESC_CMD_EOP (0x001 << IXL_TX_DESC_CMD_SHIFT) 718 #define IXL_TX_DESC_CMD_RS (0x002 << IXL_TX_DESC_CMD_SHIFT) 719 #define IXL_TX_DESC_CMD_ICRC (0x004 << IXL_TX_DESC_CMD_SHIFT) 720 #define IXL_TX_DESC_CMD_IL2TAG1 (0x008 << IXL_TX_DESC_CMD_SHIFT) 721 #define IXL_TX_DESC_CMD_DUMMY (0x010 << IXL_TX_DESC_CMD_SHIFT) 722 #define IXL_TX_DESC_CMD_IIPT_MASK (0x060 << IXL_TX_DESC_CMD_SHIFT) 723 #define IXL_TX_DESC_CMD_IIPT_NONIP (0x000 << IXL_TX_DESC_CMD_SHIFT) 724 #define IXL_TX_DESC_CMD_IIPT_IPV6 (0x020 << IXL_TX_DESC_CMD_SHIFT) 725 #define IXL_TX_DESC_CMD_IIPT_IPV4 (0x040 << IXL_TX_DESC_CMD_SHIFT) 726 #define IXL_TX_DESC_CMD_IIPT_IPV4_CSUM (0x060 << IXL_TX_DESC_CMD_SHIFT) 727 #define IXL_TX_DESC_CMD_FCOET (0x080 << IXL_TX_DESC_CMD_SHIFT) 728 #define IXL_TX_DESC_CMD_L4T_EOFT_MASK (0x300 << IXL_TX_DESC_CMD_SHIFT) 729 #define IXL_TX_DESC_CMD_L4T_EOFT_UNK (0x000 << IXL_TX_DESC_CMD_SHIFT) 730 #define IXL_TX_DESC_CMD_L4T_EOFT_TCP (0x100 << IXL_TX_DESC_CMD_SHIFT) 731 #define IXL_TX_DESC_CMD_L4T_EOFT_SCTP (0x200 << IXL_TX_DESC_CMD_SHIFT) 732 #define IXL_TX_DESC_CMD_L4T_EOFT_UDP (0x300 << IXL_TX_DESC_CMD_SHIFT) 733 734 #define IXL_TX_DESC_MACLEN_SHIFT 16 735 #define IXL_TX_DESC_MACLEN_MASK (0x7fULL << IXL_TX_DESC_MACLEN_SHIFT) 736 #define IXL_TX_DESC_IPLEN_SHIFT 23 737 #define IXL_TX_DESC_IPLEN_MASK (0x7fULL << IXL_TX_DESC_IPLEN_SHIFT) 738 #define IXL_TX_DESC_L4LEN_SHIFT 30 739 #define IXL_TX_DESC_L4LEN_MASK (0xfULL << IXL_TX_DESC_L4LEN_SHIFT) 740 #define IXL_TX_DESC_FCLEN_SHIFT 30 741 #define IXL_TX_DESC_FCLEN_MASK (0xfULL << IXL_TX_DESC_FCLEN_SHIFT) 742 743 #define IXL_TX_DESC_BSIZE_SHIFT 34 744 #define IXL_TX_DESC_BSIZE_MAX 0x3fffULL 745 #define IXL_TX_DESC_BSIZE_MASK \ 746 (IXL_TX_DESC_BSIZE_MAX << IXL_TX_DESC_BSIZE_SHIFT) 747 } __packed __aligned(16); 748 749 struct ixl_rx_rd_desc_16 { 750 uint64_t paddr; /* packet addr */ 751 uint64_t haddr; /* header addr */ 752 } __packed __aligned(16); 753 754 struct ixl_rx_rd_desc_32 { 755 uint64_t paddr; /* packet addr */ 756 uint64_t haddr; /* header addr */ 757 uint64_t _reserved1; 758 uint64_t _reserved2; 759 } __packed __aligned(16); 760 761 struct ixl_rx_wb_desc_16 { 762 uint64_t qword0; 763 uint64_t qword1; 764 #define IXL_RX_DESC_DD (1 << 0) 765 #define IXL_RX_DESC_EOP (1 << 1) 766 #define IXL_RX_DESC_L2TAG1P (1 << 2) 767 #define IXL_RX_DESC_L3L4P (1 << 3) 768 #define IXL_RX_DESC_CRCP (1 << 4) 769 #define IXL_RX_DESC_TSYNINDX_SHIFT 5 /* TSYNINDX */ 770 #define IXL_RX_DESC_TSYNINDX_MASK (7 << IXL_RX_DESC_TSYNINDX_SHIFT) 771 #define IXL_RX_DESC_UMB_SHIFT 9 772 #define IXL_RX_DESC_UMB_MASK (0x3 << IXL_RX_DESC_UMB_SHIFT) 773 #define IXL_RX_DESC_UMB_UCAST (0x0 << IXL_RX_DESC_UMB_SHIFT) 774 #define IXL_RX_DESC_UMB_MCAST (0x1 << IXL_RX_DESC_UMB_SHIFT) 775 #define IXL_RX_DESC_UMB_BCAST (0x2 << IXL_RX_DESC_UMB_SHIFT) 776 #define IXL_RX_DESC_UMB_MIRROR (0x3 << IXL_RX_DESC_UMB_SHIFT) 777 #define IXL_RX_DESC_FLM (1 << 11) 778 #define IXL_RX_DESC_FLTSTAT_SHIFT 12 779 #define IXL_RX_DESC_FLTSTAT_MASK (0x3 << IXL_RX_DESC_FLTSTAT_SHIFT) 780 #define IXL_RX_DESC_FLTSTAT_NODATA (0x0 << IXL_RX_DESC_FLTSTAT_SHIFT) 781 #define IXL_RX_DESC_FLTSTAT_FDFILTID (0x1 << IXL_RX_DESC_FLTSTAT_SHIFT) 782 #define IXL_RX_DESC_FLTSTAT_RSS (0x3 << IXL_RX_DESC_FLTSTAT_SHIFT) 783 #define IXL_RX_DESC_LPBK (1 << 14) 784 #define IXL_RX_DESC_IPV6EXTADD (1 << 15) 785 #define IXL_RX_DESC_INT_UDP_0 (1 << 18) 786 787 #define IXL_RX_DESC_RXE (1 << 19) 788 #define IXL_RX_DESC_HBO (1 << 21) 789 #define IXL_RX_DESC_IPE (1 << 22) 790 #define IXL_RX_DESC_L4E (1 << 23) 791 #define IXL_RX_DESC_EIPE (1 << 24) 792 #define IXL_RX_DESC_OVERSIZE (1 << 25) 793 794 #define IXL_RX_DESC_PTYPE_SHIFT 30 795 #define IXL_RX_DESC_PTYPE_MASK (0xffULL << IXL_RX_DESC_PTYPE_SHIFT) 796 797 #define IXL_RX_DESC_PLEN_SHIFT 38 798 #define IXL_RX_DESC_PLEN_MASK (0x3fffULL << IXL_RX_DESC_PLEN_SHIFT) 799 #define IXL_RX_DESC_HLEN_SHIFT 42 800 #define IXL_RX_DESC_HLEN_MASK (0x7ffULL << IXL_RX_DESC_HLEN_SHIFT) 801 } __packed __aligned(16); 802 803 struct ixl_rx_wb_desc_32 { 804 uint64_t qword0; 805 uint64_t qword1; 806 uint64_t qword2; 807 uint64_t qword3; 808 } __packed __aligned(16); 809 810 #define IXL_TX_PKT_DESCS 8 811 #define IXL_TX_QUEUE_ALIGN 128 812 #define IXL_RX_QUEUE_ALIGN 128 813 814 #define IXL_HARDMTU 9712 /* 9726 - ETHER_HDR_LEN */ 815 816 #define IXL_PCIREG PCI_MAPREG_START 817 818 #define IXL_ITR0 0x0 819 #define IXL_ITR1 0x1 820 #define IXL_ITR2 0x2 821 #define IXL_NOITR 0x2 822 823 #define IXL_AQ_NUM 256 824 #define IXL_AQ_MASK (IXL_AQ_NUM - 1) 825 #define IXL_AQ_ALIGN 64 /* lol */ 826 #define IXL_AQ_BUFLEN 4096 827 828 #define IXL_HMC_ROUNDUP 512 829 #define IXL_HMC_PGSIZE 4096 830 #define IXL_HMC_DVASZ sizeof(uint64_t) 831 #define IXL_HMC_PGS (IXL_HMC_PGSIZE / IXL_HMC_DVASZ) 832 #define IXL_HMC_L2SZ (IXL_HMC_PGSIZE * IXL_HMC_PGS) 833 #define IXL_HMC_PDVALID 1ULL 834 835 struct ixl_aq_regs { 836 bus_size_t atq_tail; 837 bus_size_t atq_head; 838 bus_size_t atq_len; 839 bus_size_t atq_bal; 840 bus_size_t atq_bah; 841 842 bus_size_t arq_tail; 843 bus_size_t arq_head; 844 bus_size_t arq_len; 845 bus_size_t arq_bal; 846 bus_size_t arq_bah; 847 848 uint32_t atq_len_enable; 849 uint32_t atq_tail_mask; 850 uint32_t atq_head_mask; 851 852 uint32_t arq_len_enable; 853 uint32_t arq_tail_mask; 854 uint32_t arq_head_mask; 855 }; 856 857 struct ixl_phy_type { 858 uint64_t phy_type; 859 uint64_t ifm_type; 860 }; 861 862 struct ixl_speed_type { 863 uint8_t dev_speed; 864 uint64_t net_speed; 865 }; 866 867 struct ixl_aq_buf { 868 SIMPLEQ_ENTRY(ixl_aq_buf) 869 aqb_entry; 870 void *aqb_data; 871 bus_dmamap_t aqb_map; 872 }; 873 SIMPLEQ_HEAD(ixl_aq_bufs, ixl_aq_buf); 874 875 struct ixl_dmamem { 876 bus_dmamap_t ixm_map; 877 bus_dma_segment_t ixm_seg; 878 int ixm_nsegs; 879 size_t ixm_size; 880 caddr_t ixm_kva; 881 }; 882 #define IXL_DMA_MAP(_ixm) ((_ixm)->ixm_map) 883 #define IXL_DMA_DVA(_ixm) ((_ixm)->ixm_map->dm_segs[0].ds_addr) 884 #define IXL_DMA_KVA(_ixm) ((void *)(_ixm)->ixm_kva) 885 #define IXL_DMA_LEN(_ixm) ((_ixm)->ixm_size) 886 887 struct ixl_hmc_entry { 888 uint64_t hmc_base; 889 uint32_t hmc_count; 890 uint32_t hmc_size; 891 }; 892 893 #define IXL_HMC_LAN_TX 0 894 #define IXL_HMC_LAN_RX 1 895 #define IXL_HMC_FCOE_CTX 2 896 #define IXL_HMC_FCOE_FILTER 3 897 #define IXL_HMC_COUNT 4 898 899 struct ixl_hmc_pack { 900 uint16_t offset; 901 uint16_t width; 902 uint16_t lsb; 903 }; 904 905 /* 906 * these hmc objects have weird sizes and alignments, so these are abstract 907 * representations of them that are nice for c to populate. 908 * 909 * the packing code relies on little-endian values being stored in the fields, 910 * no high bits in the fields being set, and the fields must be packed in the 911 * same order as they are in the ctx structure. 912 */ 913 914 struct ixl_hmc_rxq { 915 uint16_t head; 916 uint8_t cpuid; 917 uint64_t base; 918 #define IXL_HMC_RXQ_BASE_UNIT 128 919 uint16_t qlen; 920 uint16_t dbuff; 921 #define IXL_HMC_RXQ_DBUFF_UNIT 128 922 uint8_t hbuff; 923 #define IXL_HMC_RXQ_HBUFF_UNIT 64 924 uint8_t dtype; 925 #define IXL_HMC_RXQ_DTYPE_NOSPLIT 0x0 926 #define IXL_HMC_RXQ_DTYPE_HSPLIT 0x1 927 #define IXL_HMC_RXQ_DTYPE_SPLIT_ALWAYS 0x2 928 uint8_t dsize; 929 #define IXL_HMC_RXQ_DSIZE_16 0 930 #define IXL_HMC_RXQ_DSIZE_32 1 931 uint8_t crcstrip; 932 uint8_t fc_ena; 933 uint8_t l2sel; 934 uint8_t hsplit_0; 935 uint8_t hsplit_1; 936 uint8_t showiv; 937 uint16_t rxmax; 938 uint8_t tphrdesc_ena; 939 uint8_t tphwdesc_ena; 940 uint8_t tphdata_ena; 941 uint8_t tphhead_ena; 942 uint8_t lrxqthresh; 943 uint8_t prefena; 944 }; 945 946 static const struct ixl_hmc_pack ixl_hmc_pack_rxq[] = { 947 { offsetof(struct ixl_hmc_rxq, head), 13, 0 }, 948 { offsetof(struct ixl_hmc_rxq, cpuid), 8, 13 }, 949 { offsetof(struct ixl_hmc_rxq, base), 57, 32 }, 950 { offsetof(struct ixl_hmc_rxq, qlen), 13, 89 }, 951 { offsetof(struct ixl_hmc_rxq, dbuff), 7, 102 }, 952 { offsetof(struct ixl_hmc_rxq, hbuff), 5, 109 }, 953 { offsetof(struct ixl_hmc_rxq, dtype), 2, 114 }, 954 { offsetof(struct ixl_hmc_rxq, dsize), 1, 116 }, 955 { offsetof(struct ixl_hmc_rxq, crcstrip), 1, 117 }, 956 { offsetof(struct ixl_hmc_rxq, fc_ena), 1, 118 }, 957 { offsetof(struct ixl_hmc_rxq, l2sel), 1, 119 }, 958 { offsetof(struct ixl_hmc_rxq, hsplit_0), 4, 120 }, 959 { offsetof(struct ixl_hmc_rxq, hsplit_1), 2, 124 }, 960 { offsetof(struct ixl_hmc_rxq, showiv), 1, 127 }, 961 { offsetof(struct ixl_hmc_rxq, rxmax), 14, 174 }, 962 { offsetof(struct ixl_hmc_rxq, tphrdesc_ena), 1, 193 }, 963 { offsetof(struct ixl_hmc_rxq, tphwdesc_ena), 1, 194 }, 964 { offsetof(struct ixl_hmc_rxq, tphdata_ena), 1, 195 }, 965 { offsetof(struct ixl_hmc_rxq, tphhead_ena), 1, 196 }, 966 { offsetof(struct ixl_hmc_rxq, lrxqthresh), 3, 198 }, 967 { offsetof(struct ixl_hmc_rxq, prefena), 1, 201 }, 968 }; 969 970 #define IXL_HMC_RXQ_MINSIZE (201 + 1) 971 972 struct ixl_hmc_txq { 973 uint16_t head; 974 uint8_t new_context; 975 uint64_t base; 976 #define IXL_HMC_TXQ_BASE_UNIT 128 977 uint8_t fc_ena; 978 uint8_t timesync_ena; 979 uint8_t fd_ena; 980 uint8_t alt_vlan_ena; 981 uint16_t thead_wb; 982 uint8_t cpuid; 983 uint8_t head_wb_ena; 984 #define IXL_HMC_TXQ_DESC_WB 0 985 #define IXL_HMC_TXQ_HEAD_WB 1 986 uint16_t qlen; 987 uint8_t tphrdesc_ena; 988 uint8_t tphrpacket_ena; 989 uint8_t tphwdesc_ena; 990 uint64_t head_wb_addr; 991 uint32_t crc; 992 uint16_t rdylist; 993 uint8_t rdylist_act; 994 }; 995 996 static const struct ixl_hmc_pack ixl_hmc_pack_txq[] = { 997 { offsetof(struct ixl_hmc_txq, head), 13, 0 }, 998 { offsetof(struct ixl_hmc_txq, new_context), 1, 30 }, 999 { offsetof(struct ixl_hmc_txq, base), 57, 32 }, 1000 { offsetof(struct ixl_hmc_txq, fc_ena), 1, 89 }, 1001 { offsetof(struct ixl_hmc_txq, timesync_ena), 1, 90 }, 1002 { offsetof(struct ixl_hmc_txq, fd_ena), 1, 91 }, 1003 { offsetof(struct ixl_hmc_txq, alt_vlan_ena), 1, 92 }, 1004 { offsetof(struct ixl_hmc_txq, cpuid), 8, 96 }, 1005 /* line 1 */ 1006 { offsetof(struct ixl_hmc_txq, thead_wb), 13, 0 + 128 }, 1007 { offsetof(struct ixl_hmc_txq, head_wb_ena), 1, 32 + 128 }, 1008 { offsetof(struct ixl_hmc_txq, qlen), 13, 33 + 128 }, 1009 { offsetof(struct ixl_hmc_txq, tphrdesc_ena), 1, 46 + 128 }, 1010 { offsetof(struct ixl_hmc_txq, tphrpacket_ena), 1, 47 + 128 }, 1011 { offsetof(struct ixl_hmc_txq, tphwdesc_ena), 1, 48 + 128 }, 1012 { offsetof(struct ixl_hmc_txq, head_wb_addr), 64, 64 + 128 }, 1013 /* line 7 */ 1014 { offsetof(struct ixl_hmc_txq, crc), 32, 0 + (7*128) }, 1015 { offsetof(struct ixl_hmc_txq, rdylist), 10, 84 + (7*128) }, 1016 { offsetof(struct ixl_hmc_txq, rdylist_act), 1, 94 + (7*128) }, 1017 }; 1018 1019 #define IXL_HMC_TXQ_MINSIZE (94 + (7*128) + 1) 1020 1021 struct ixl_tx_map { 1022 struct mbuf *txm_m; 1023 bus_dmamap_t txm_map; 1024 unsigned int txm_eop; 1025 }; 1026 1027 struct ixl_tx_ring { 1028 unsigned int txr_prod; 1029 unsigned int txr_cons; 1030 1031 struct ixl_tx_map *txr_maps; 1032 struct ixl_dmamem txr_mem; 1033 1034 bus_size_t txr_tail; 1035 unsigned int txr_qid; 1036 }; 1037 1038 struct ixl_rx_map { 1039 struct mbuf *rxm_m; 1040 bus_dmamap_t rxm_map; 1041 }; 1042 1043 struct ixl_rx_ring { 1044 struct ixl_softc *rxr_sc; 1045 1046 struct if_rxring rxr_acct; 1047 struct timeout rxr_refill; 1048 1049 unsigned int rxr_prod; 1050 unsigned int rxr_cons; 1051 1052 struct ixl_rx_map *rxr_maps; 1053 struct ixl_dmamem rxr_mem; 1054 1055 struct mbuf *rxr_m_head; 1056 struct mbuf **rxr_m_tail; 1057 1058 bus_size_t rxr_tail; 1059 unsigned int rxr_qid; 1060 }; 1061 1062 struct ixl_atq { 1063 SIMPLEQ_ENTRY(ixl_atq) iatq_entry; 1064 struct ixl_aq_desc iatq_desc; 1065 void *iatq_arg; 1066 void (*iatq_fn)(struct ixl_softc *, void *); 1067 }; 1068 SIMPLEQ_HEAD(ixl_atq_list, ixl_atq); 1069 1070 struct ixl_softc { 1071 struct device sc_dev; 1072 struct arpcom sc_ac; 1073 struct ifmedia sc_media; 1074 uint64_t sc_media_status; 1075 uint64_t sc_media_active; 1076 1077 pci_chipset_tag_t sc_pc; 1078 pci_intr_handle_t sc_ih; 1079 void *sc_ihc; 1080 pcitag_t sc_tag; 1081 1082 bus_dma_tag_t sc_dmat; 1083 bus_space_tag_t sc_memt; 1084 bus_space_handle_t sc_memh; 1085 bus_size_t sc_mems; 1086 1087 uint8_t sc_pf_id; 1088 uint16_t sc_uplink_seid; /* le */ 1089 uint16_t sc_downlink_seid; /* le */ 1090 uint16_t sc_veb_seid; /* le */ 1091 uint16_t sc_vsi_number; /* le */ 1092 uint16_t sc_seid; 1093 unsigned int sc_base_queue; 1094 1095 struct ixl_dmamem sc_scratch; 1096 1097 const struct ixl_aq_regs * 1098 sc_aq_regs; 1099 1100 struct mutex sc_atq_mtx; 1101 struct ixl_dmamem sc_atq; 1102 unsigned int sc_atq_prod; 1103 unsigned int sc_atq_cons; 1104 1105 struct ixl_dmamem sc_arq; 1106 struct task sc_arq_task; 1107 struct ixl_aq_bufs sc_arq_idle; 1108 struct ixl_aq_bufs sc_arq_live; 1109 struct if_rxring sc_arq_ring; 1110 unsigned int sc_arq_prod; 1111 unsigned int sc_arq_cons; 1112 1113 struct task sc_link_state_task; 1114 struct ixl_atq sc_link_state_atq; 1115 1116 struct ixl_dmamem sc_hmc_sd; 1117 struct ixl_dmamem sc_hmc_pd; 1118 struct ixl_hmc_entry sc_hmc_entries[IXL_HMC_COUNT]; 1119 1120 unsigned int sc_nrings; 1121 1122 unsigned int sc_tx_ring_ndescs; 1123 unsigned int sc_rx_ring_ndescs; 1124 unsigned int sc_nqueues; /* 1 << sc_nqueues */ 1125 }; 1126 #define DEVNAME(_sc) ((_sc)->sc_dev.dv_xname) 1127 1128 #define delaymsec(_ms) delay(1000 * (_ms)) 1129 1130 static void ixl_clear_hw(struct ixl_softc *); 1131 static int ixl_pf_reset(struct ixl_softc *); 1132 1133 static int ixl_dmamem_alloc(struct ixl_softc *, struct ixl_dmamem *, 1134 bus_size_t, u_int); 1135 static void ixl_dmamem_free(struct ixl_softc *, struct ixl_dmamem *); 1136 1137 static int ixl_arq_fill(struct ixl_softc *); 1138 static void ixl_arq_unfill(struct ixl_softc *); 1139 1140 static int ixl_atq_poll(struct ixl_softc *, struct ixl_aq_desc *, 1141 unsigned int); 1142 static void ixl_atq_set(struct ixl_atq *, 1143 void (*)(struct ixl_softc *, void *), void *); 1144 static void ixl_atq_post(struct ixl_softc *, struct ixl_atq *); 1145 static void ixl_atq_done(struct ixl_softc *); 1146 static void ixl_atq_exec(struct ixl_softc *, struct ixl_atq *, 1147 const char *); 1148 static int ixl_get_version(struct ixl_softc *); 1149 static int ixl_pxe_clear(struct ixl_softc *); 1150 static int ixl_lldp_shut(struct ixl_softc *); 1151 static int ixl_get_mac(struct ixl_softc *); 1152 static int ixl_get_switch_config(struct ixl_softc *); 1153 static int ixl_phy_mask_ints(struct ixl_softc *); 1154 static int ixl_get_phy_abilities(struct ixl_softc *, uint64_t *); 1155 static int ixl_restart_an(struct ixl_softc *); 1156 static int ixl_hmc(struct ixl_softc *); 1157 static void ixl_hmc_free(struct ixl_softc *); 1158 static int ixl_get_vsi(struct ixl_softc *); 1159 static int ixl_set_vsi(struct ixl_softc *); 1160 static int ixl_get_link_status(struct ixl_softc *); 1161 static int ixl_set_link_status(struct ixl_softc *, 1162 const struct ixl_aq_desc *); 1163 static int ixl_add_macvlan(struct ixl_softc *, uint8_t *, uint16_t, 1164 uint16_t); 1165 static int ixl_remove_macvlan(struct ixl_softc *, uint8_t *, uint16_t, 1166 uint16_t); 1167 static void ixl_link_state_update(void *); 1168 static void ixl_arq(void *); 1169 static void ixl_hmc_pack(void *, const void *, 1170 const struct ixl_hmc_pack *, unsigned int); 1171 1172 static int ixl_match(struct device *, void *, void *); 1173 static void ixl_attach(struct device *, struct device *, void *); 1174 1175 static void ixl_media_add(struct ixl_softc *, uint64_t); 1176 static int ixl_media_change(struct ifnet *); 1177 static void ixl_media_status(struct ifnet *, struct ifmediareq *); 1178 static void ixl_watchdog(struct ifnet *); 1179 static int ixl_ioctl(struct ifnet *, u_long, caddr_t); 1180 static void ixl_start(struct ifqueue *); 1181 static int ixl_intr(void *); 1182 static int ixl_up(struct ixl_softc *); 1183 static int ixl_down(struct ixl_softc *); 1184 static int ixl_iff(struct ixl_softc *); 1185 1186 static struct ixl_tx_ring * 1187 ixl_txr_alloc(struct ixl_softc *, unsigned int); 1188 static void ixl_txr_qdis(struct ixl_softc *, struct ixl_tx_ring *, int); 1189 static void ixl_txr_config(struct ixl_softc *, struct ixl_tx_ring *); 1190 static int ixl_txr_enabled(struct ixl_softc *, struct ixl_tx_ring *); 1191 static int ixl_txr_disabled(struct ixl_softc *, struct ixl_tx_ring *); 1192 static void ixl_txr_unconfig(struct ixl_softc *, struct ixl_tx_ring *); 1193 static void ixl_txr_clean(struct ixl_softc *, struct ixl_tx_ring *); 1194 static void ixl_txr_free(struct ixl_softc *, struct ixl_tx_ring *); 1195 static int ixl_txeof(struct ixl_softc *, struct ifqueue *); 1196 1197 static struct ixl_rx_ring * 1198 ixl_rxr_alloc(struct ixl_softc *, unsigned int); 1199 static void ixl_rxr_config(struct ixl_softc *, struct ixl_rx_ring *); 1200 static int ixl_rxr_enabled(struct ixl_softc *, struct ixl_rx_ring *); 1201 static int ixl_rxr_disabled(struct ixl_softc *, struct ixl_rx_ring *); 1202 static void ixl_rxr_unconfig(struct ixl_softc *, struct ixl_rx_ring *); 1203 static void ixl_rxr_clean(struct ixl_softc *, struct ixl_rx_ring *); 1204 static void ixl_rxr_free(struct ixl_softc *, struct ixl_rx_ring *); 1205 static int ixl_rxeof(struct ixl_softc *, struct ifiqueue *); 1206 static void ixl_rxfill(struct ixl_softc *, struct ixl_rx_ring *); 1207 static void ixl_rxrefill(void *); 1208 static int ixl_rxrinfo(struct ixl_softc *, struct if_rxrinfo *); 1209 1210 struct cfdriver ixl_cd = { 1211 NULL, 1212 "ixl", 1213 DV_IFNET, 1214 }; 1215 1216 struct cfattach ixl_ca = { 1217 sizeof(struct ixl_softc), 1218 ixl_match, 1219 ixl_attach, 1220 }; 1221 1222 static const struct ixl_phy_type ixl_phy_type_map[] = { 1223 { 1ULL << IXL_PHY_TYPE_SGMII, IFM_1000_SGMII }, 1224 { 1ULL << IXL_PHY_TYPE_1000BASE_KX, IFM_1000_KX }, 1225 { 1ULL << IXL_PHY_TYPE_10GBASE_KX4, IFM_10G_KX4 }, 1226 { 1ULL << IXL_PHY_TYPE_10GBASE_KR, IFM_10G_KR }, 1227 { 1ULL << IXL_PHY_TYPE_40GBASE_KR4, IFM_40G_KR4 }, 1228 { 1ULL << IXL_PHY_TYPE_XAUI | 1229 1ULL << IXL_PHY_TYPE_XFI, IFM_10G_CX4 }, 1230 { 1ULL << IXL_PHY_TYPE_SFI, IFM_10G_SFI }, 1231 { 1ULL << IXL_PHY_TYPE_XLAUI | 1232 1ULL << IXL_PHY_TYPE_XLPPI, IFM_40G_XLPPI }, 1233 { 1ULL << IXL_PHY_TYPE_40GBASE_CR4_CU | 1234 1ULL << IXL_PHY_TYPE_40GBASE_CR4, IFM_40G_CR4 }, 1235 { 1ULL << IXL_PHY_TYPE_10GBASE_CR1_CU | 1236 1ULL << IXL_PHY_TYPE_10GBASE_CR1, IFM_10G_CR1 }, 1237 { 1ULL << IXL_PHY_TYPE_10GBASE_AOC, IFM_10G_AOC }, 1238 { 1ULL << IXL_PHY_TYPE_40GBASE_AOC, IFM_40G_AOC }, 1239 { 1ULL << IXL_PHY_TYPE_100BASE_TX, IFM_100_TX }, 1240 { 1ULL << IXL_PHY_TYPE_1000BASE_T_OPTICAL | 1241 1ULL << IXL_PHY_TYPE_1000BASE_T, IFM_1000_T }, 1242 { 1ULL << IXL_PHY_TYPE_10GBASE_T, IFM_10G_T }, 1243 { 1ULL << IXL_PHY_TYPE_10GBASE_SR, IFM_10G_SR }, 1244 { 1ULL << IXL_PHY_TYPE_10GBASE_LR, IFM_10G_LR }, 1245 { 1ULL << IXL_PHY_TYPE_10GBASE_SFPP_CU, IFM_10G_SFP_CU }, 1246 { 1ULL << IXL_PHY_TYPE_40GBASE_SR4, IFM_40G_SR4 }, 1247 { 1ULL << IXL_PHY_TYPE_40GBASE_LR4, IFM_40G_LR4 }, 1248 { 1ULL << IXL_PHY_TYPE_1000BASE_SX, IFM_1000_SX }, 1249 { 1ULL << IXL_PHY_TYPE_1000BASE_LX, IFM_1000_LX }, 1250 { 1ULL << IXL_PHY_TYPE_20GBASE_KR2, IFM_20G_KR2 }, 1251 { 1ULL << IXL_PHY_TYPE_25GBASE_KR, IFM_25G_KR }, 1252 { 1ULL << IXL_PHY_TYPE_25GBASE_CR, IFM_25G_CR }, 1253 { 1ULL << IXL_PHY_TYPE_25GBASE_SR, IFM_25G_SR }, 1254 { 1ULL << IXL_PHY_TYPE_25GBASE_LR, IFM_25G_LR }, 1255 { 1ULL << IXL_PHY_TYPE_25GBASE_AOC, IFM_25G_AOC }, 1256 { 1ULL << IXL_PHY_TYPE_25GBASE_ACC, IFM_25G_CR }, 1257 }; 1258 1259 static const struct ixl_speed_type ixl_speed_type_map[] = { 1260 { IXL_AQ_PHY_LINK_SPEED_40GB, IF_Gbps(40) }, 1261 { IXL_AQ_PHY_LINK_SPEED_25GB, IF_Gbps(25) }, 1262 { IXL_AQ_PHY_LINK_SPEED_20GB, IF_Gbps(20) }, 1263 { IXL_AQ_PHY_LINK_SPEED_10GB, IF_Gbps(10) }, 1264 { IXL_AQ_PHY_LINK_SPEED_1000MB, IF_Mbps(1000) }, 1265 { IXL_AQ_PHY_LINK_SPEED_100MB, IF_Mbps(100) }, 1266 }; 1267 1268 static const struct ixl_aq_regs ixl_pf_aq_regs = { 1269 .atq_tail = I40E_PF_ATQT, 1270 .atq_tail_mask = I40E_PF_ATQT_ATQT_MASK, 1271 .atq_head = I40E_PF_ATQH, 1272 .atq_head_mask = I40E_PF_ATQH_ATQH_MASK, 1273 .atq_len = I40E_PF_ATQLEN, 1274 .atq_bal = I40E_PF_ATQBAL, 1275 .atq_bah = I40E_PF_ATQBAH, 1276 .atq_len_enable = I40E_PF_ATQLEN_ATQENABLE_MASK, 1277 1278 .arq_tail = I40E_PF_ARQT, 1279 .arq_tail_mask = I40E_PF_ARQT_ARQT_MASK, 1280 .arq_head = I40E_PF_ARQH, 1281 .arq_head_mask = I40E_PF_ARQH_ARQH_MASK, 1282 .arq_len = I40E_PF_ARQLEN, 1283 .arq_bal = I40E_PF_ARQBAL, 1284 .arq_bah = I40E_PF_ARQBAH, 1285 .arq_len_enable = I40E_PF_ARQLEN_ARQENABLE_MASK, 1286 }; 1287 1288 #ifdef notyet 1289 static const struct ixl_aq_regs ixl_vf_aq_regs = { 1290 .atq_tail = I40E_VF_ATQT1, 1291 .atq_tail_mask = I40E_VF_ATQT1_ATQT_MASK; 1292 .atq_head = I40E_VF_ATQH1, 1293 .atq_head_mask = I40E_VF_ARQH1_ARQH_MASK; 1294 .atq_len = I40E_VF_ATQLEN1, 1295 .atq_bal = I40E_VF_ATQBAL1, 1296 .atq_bah = I40E_VF_ATQBAH1, 1297 .atq_len_enable = I40E_VF_ATQLEN1_ATQENABLE_MASK, 1298 1299 .arq_tail = I40E_VF_ARQT1, 1300 .arq_tail_mask = I40E_VF_ARQT1_ARQT_MASK; 1301 .arq_head = I40E_VF_ARQH1, 1302 .arq_head_mask = I40E_VF_ARQH1_ARQH_MASK; 1303 .arq_len = I40E_VF_ARQLEN1, 1304 .arq_bal = I40E_VF_ARQBAL1, 1305 .arq_bah = I40E_VF_ARQBAH1, 1306 .arq_len_enable = I40E_VF_ARQLEN1_ARQENABLE_MASK, 1307 }; 1308 #endif 1309 1310 #define ixl_rd(_s, _r) \ 1311 bus_space_read_4((_s)->sc_memt, (_s)->sc_memh, (_r)) 1312 #define ixl_wr(_s, _r, _v) \ 1313 bus_space_write_4((_s)->sc_memt, (_s)->sc_memh, (_r), (_v)) 1314 #define ixl_barrier(_s, _r, _l, _o) \ 1315 bus_space_barrier((_s)->sc_memt, (_s)->sc_memh, (_r), (_l), (_o)) 1316 #define ixl_intr_enable(_s) \ 1317 ixl_wr((_s), I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_INTENA_MASK | \ 1318 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK | \ 1319 (IXL_NOITR << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT)) 1320 1321 #define ixl_nqueues(_sc) (1 << (_sc)->sc_nqueues) 1322 1323 #ifdef __LP64__ 1324 #define ixl_dmamem_hi(_ixm) (uint32_t)(IXL_DMA_DVA(_ixm) >> 32) 1325 #else 1326 #define ixl_dmamem_hi(_ixm) 0 1327 #endif 1328 1329 #define ixl_dmamem_lo(_ixm) (uint32_t)IXL_DMA_DVA(_ixm) 1330 1331 static inline void 1332 ixl_aq_dva(struct ixl_aq_desc *iaq, bus_addr_t addr) 1333 { 1334 #ifdef __LP64__ 1335 htolem32(&iaq->iaq_param[2], addr >> 32); 1336 #else 1337 iaq->iaq_param[2] = htole32(0); 1338 #endif 1339 htolem32(&iaq->iaq_param[3], addr); 1340 } 1341 1342 #if _BYTE_ORDER == _BIG_ENDIAN 1343 #define HTOLE16(_x) (uint16_t)(((_x) & 0xff) << 8 | ((_x) & 0xff00) >> 8) 1344 #else 1345 #define HTOLE16(_x) (_x) 1346 #endif 1347 1348 static const struct pci_matchid ixl_devices[] = { 1349 #ifdef notyet 1350 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_VF }, 1351 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_VF_HV }, 1352 #endif 1353 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_10G_SFP }, 1354 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_40G_BP }, 1355 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_10G_BP }, 1356 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_1 }, 1357 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_2 }, 1358 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_10G_QSFP }, 1359 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_10G_BASET }, 1360 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_20G_BP_1 }, 1361 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_20G_BP_2 }, 1362 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_T4_10G }, 1363 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XXV710_25G_BP }, 1364 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XXV710_25G_SFP28 }, 1365 }; 1366 1367 static int 1368 ixl_match(struct device *parent, void *match, void *aux) 1369 { 1370 return (pci_matchbyid(aux, ixl_devices, nitems(ixl_devices))); 1371 } 1372 1373 void 1374 ixl_attach(struct device *parent, struct device *self, void *aux) 1375 { 1376 struct ixl_softc *sc = (struct ixl_softc *)self; 1377 struct ifnet *ifp = &sc->sc_ac.ac_if; 1378 struct pci_attach_args *pa = aux; 1379 pcireg_t memtype; 1380 uint32_t port, ari, func; 1381 uint64_t phy_types = 0; 1382 int tries; 1383 1384 sc->sc_pc = pa->pa_pc; 1385 sc->sc_tag = pa->pa_tag; 1386 sc->sc_dmat = pa->pa_dmat; 1387 sc->sc_aq_regs = &ixl_pf_aq_regs; /* VF? */ 1388 1389 sc->sc_nqueues = 0; /* 1 << 0 is 1 queue */ 1390 sc->sc_tx_ring_ndescs = 1024; 1391 sc->sc_rx_ring_ndescs = 1024; 1392 1393 memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, IXL_PCIREG); 1394 if (pci_mapreg_map(pa, IXL_PCIREG, memtype, BUS_SPACE_MAP_PREFETCHABLE, 1395 &sc->sc_memt, &sc->sc_memh, NULL, &sc->sc_mems, 0)) { 1396 printf(": unable to map registers\n"); 1397 return; 1398 } 1399 1400 sc->sc_base_queue = (ixl_rd(sc, I40E_PFLAN_QALLOC) & 1401 I40E_PFLAN_QALLOC_FIRSTQ_MASK) >> 1402 I40E_PFLAN_QALLOC_FIRSTQ_SHIFT; 1403 1404 ixl_clear_hw(sc); 1405 1406 if (ixl_pf_reset(sc) == -1) { 1407 /* error printed by ixl_pf_reset */ 1408 goto unmap; 1409 } 1410 1411 port = ixl_rd(sc, I40E_PFGEN_PORTNUM); 1412 port &= I40E_PFGEN_PORTNUM_PORT_NUM_MASK; 1413 port >>= I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT; 1414 printf(": port %u", port); 1415 1416 ari = ixl_rd(sc, I40E_GLPCI_CAPSUP); 1417 ari &= I40E_GLPCI_CAPSUP_ARI_EN_MASK; 1418 ari >>= I40E_GLPCI_CAPSUP_ARI_EN_SHIFT; 1419 1420 func = ixl_rd(sc, I40E_PF_FUNC_RID); 1421 sc->sc_pf_id = func & (ari ? 0xff : 0x7); 1422 1423 /* initialise the adminq */ 1424 1425 mtx_init(&sc->sc_atq_mtx, IPL_NET); 1426 1427 if (ixl_dmamem_alloc(sc, &sc->sc_atq, 1428 sizeof(struct ixl_aq_desc) * IXL_AQ_NUM, IXL_AQ_ALIGN) != 0) { 1429 printf("\n" "%s: unable to allocate atq\n", DEVNAME(sc)); 1430 goto unmap; 1431 } 1432 1433 SIMPLEQ_INIT(&sc->sc_arq_idle); 1434 SIMPLEQ_INIT(&sc->sc_arq_live); 1435 if_rxr_init(&sc->sc_arq_ring, 2, IXL_AQ_NUM - 1); 1436 task_set(&sc->sc_arq_task, ixl_arq, sc); 1437 sc->sc_arq_cons = 0; 1438 sc->sc_arq_prod = 0; 1439 1440 if (ixl_dmamem_alloc(sc, &sc->sc_arq, 1441 sizeof(struct ixl_aq_desc) * IXL_AQ_NUM, IXL_AQ_ALIGN) != 0) { 1442 printf("\n" "%s: unable to allocate arq\n", DEVNAME(sc)); 1443 goto free_atq; 1444 } 1445 1446 if (!ixl_arq_fill(sc)) { 1447 printf("\n" "%s: unable to fill arq descriptors\n", 1448 DEVNAME(sc)); 1449 goto free_arq; 1450 } 1451 1452 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 1453 0, IXL_DMA_LEN(&sc->sc_atq), 1454 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1455 1456 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq), 1457 0, IXL_DMA_LEN(&sc->sc_arq), 1458 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1459 1460 for (tries = 0; tries < 10; tries++) { 1461 int rv; 1462 1463 sc->sc_atq_cons = 0; 1464 sc->sc_atq_prod = 0; 1465 1466 ixl_wr(sc, sc->sc_aq_regs->atq_head, 0); 1467 ixl_wr(sc, sc->sc_aq_regs->arq_head, 0); 1468 ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0); 1469 ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0); 1470 1471 ixl_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE); 1472 1473 ixl_wr(sc, sc->sc_aq_regs->atq_bal, 1474 ixl_dmamem_lo(&sc->sc_atq)); 1475 ixl_wr(sc, sc->sc_aq_regs->atq_bah, 1476 ixl_dmamem_hi(&sc->sc_atq)); 1477 ixl_wr(sc, sc->sc_aq_regs->atq_len, 1478 sc->sc_aq_regs->atq_len_enable | IXL_AQ_NUM); 1479 1480 ixl_wr(sc, sc->sc_aq_regs->arq_bal, 1481 ixl_dmamem_lo(&sc->sc_arq)); 1482 ixl_wr(sc, sc->sc_aq_regs->arq_bah, 1483 ixl_dmamem_hi(&sc->sc_arq)); 1484 ixl_wr(sc, sc->sc_aq_regs->arq_len, 1485 sc->sc_aq_regs->arq_len_enable | IXL_AQ_NUM); 1486 1487 rv = ixl_get_version(sc); 1488 if (rv == 0) 1489 break; 1490 if (rv != ETIMEDOUT) { 1491 printf(", unable to get firmware version\n"); 1492 goto shutdown; 1493 } 1494 1495 delaymsec(100); 1496 } 1497 1498 ixl_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod); 1499 1500 if (ixl_pxe_clear(sc) != 0) { 1501 /* error printed by ixl_pxe_clear */ 1502 goto shutdown; 1503 } 1504 1505 if (ixl_get_mac(sc) != 0) { 1506 /* error printed by ixl_get_mac */ 1507 goto shutdown; 1508 } 1509 1510 if (pci_intr_map_msi(pa, &sc->sc_ih) != 0 && 1511 pci_intr_map(pa, &sc->sc_ih) != 0) { 1512 printf(", unable to map interrupt\n"); 1513 goto shutdown; 1514 } 1515 1516 printf(", %s, address %s\n", pci_intr_string(sc->sc_pc, sc->sc_ih), 1517 ether_sprintf(sc->sc_ac.ac_enaddr)); 1518 1519 if (ixl_hmc(sc) != 0) { 1520 /* error printed by ixl_hmc */ 1521 goto shutdown; 1522 } 1523 1524 if (ixl_lldp_shut(sc) != 0) { 1525 /* error printed by ixl_lldp_shut */ 1526 goto free_hmc; 1527 } 1528 1529 if (ixl_phy_mask_ints(sc) != 0) { 1530 /* error printed by ixl_phy_mask_ints */ 1531 goto free_hmc; 1532 } 1533 1534 if (ixl_restart_an(sc) != 0) { 1535 /* error printed by ixl_restart_an */ 1536 goto free_hmc; 1537 } 1538 1539 if (ixl_get_switch_config(sc) != 0) { 1540 /* error printed by ixl_get_switch_config */ 1541 goto free_hmc; 1542 } 1543 1544 if (ixl_get_phy_abilities(sc, &phy_types) != 0) { 1545 /* error printed by ixl_get_phy_abilities */ 1546 goto free_hmc; 1547 } 1548 1549 if (ixl_get_link_status(sc) != 0) { 1550 /* error printed by ixl_get_link_status */ 1551 goto free_hmc; 1552 } 1553 1554 if (ixl_dmamem_alloc(sc, &sc->sc_scratch, 1555 sizeof(struct ixl_aq_vsi_data), 8) != 0) { 1556 printf("%s: unable to allocate scratch buffer\n", DEVNAME(sc)); 1557 goto free_hmc; 1558 } 1559 1560 if (ixl_get_vsi(sc) != 0) { 1561 /* error printed by ixl_get_vsi */ 1562 goto free_hmc; 1563 } 1564 1565 if (ixl_set_vsi(sc) != 0) { 1566 /* error printed by ixl_set_vsi */ 1567 goto free_scratch; 1568 } 1569 1570 sc->sc_ihc = pci_intr_establish(sc->sc_pc, sc->sc_ih, 1571 IPL_NET | IPL_MPSAFE, ixl_intr, sc, DEVNAME(sc)); 1572 if (sc->sc_ihc == NULL) { 1573 printf("%s: unable to establish interrupt handler\n", 1574 DEVNAME(sc)); 1575 goto free_scratch; 1576 } 1577 1578 ifp->if_softc = sc; 1579 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1580 ifp->if_xflags = IFXF_MPSAFE; 1581 ifp->if_ioctl = ixl_ioctl; 1582 ifp->if_qstart = ixl_start; 1583 ifp->if_watchdog = ixl_watchdog; 1584 ifp->if_hardmtu = IXL_HARDMTU; 1585 strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ); 1586 IFQ_SET_MAXLEN(&ifp->if_snd, 1); 1587 1588 ifp->if_capabilities = IFCAP_VLAN_MTU; 1589 #if 0 1590 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 1591 ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | 1592 IFCAP_CSUM_UDPv4; 1593 #endif 1594 1595 ifmedia_init(&sc->sc_media, 0, ixl_media_change, ixl_media_status); 1596 1597 ixl_media_add(sc, phy_types); 1598 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL); 1599 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO); 1600 1601 if_attach(ifp); 1602 ether_ifattach(ifp); 1603 1604 if_attach_queues(ifp, ixl_nqueues(sc)); 1605 if_attach_iqueues(ifp, ixl_nqueues(sc)); 1606 1607 task_set(&sc->sc_link_state_task, ixl_link_state_update, sc); 1608 ixl_wr(sc, I40E_PFINT_ICR0_ENA, 1609 I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK | 1610 I40E_PFINT_ICR0_ENA_ADMINQ_MASK); 1611 ixl_wr(sc, I40E_PFINT_STAT_CTL0, 1612 IXL_NOITR << I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT); 1613 1614 /* remove default mac filter and replace it so we can see vlans */ 1615 ixl_remove_macvlan(sc, sc->sc_ac.ac_enaddr, 0, 0); 1616 ixl_remove_macvlan(sc, sc->sc_ac.ac_enaddr, 0, 1617 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN); 1618 ixl_add_macvlan(sc, sc->sc_ac.ac_enaddr, 0, 1619 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN); 1620 ixl_add_macvlan(sc, etherbroadcastaddr, 0, 1621 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN); 1622 1623 ixl_intr_enable(sc); 1624 1625 return; 1626 free_scratch: 1627 ixl_dmamem_free(sc, &sc->sc_scratch); 1628 free_hmc: 1629 ixl_hmc_free(sc); 1630 shutdown: 1631 ixl_wr(sc, sc->sc_aq_regs->atq_head, 0); 1632 ixl_wr(sc, sc->sc_aq_regs->arq_head, 0); 1633 ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0); 1634 ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0); 1635 1636 ixl_wr(sc, sc->sc_aq_regs->atq_bal, 0); 1637 ixl_wr(sc, sc->sc_aq_regs->atq_bah, 0); 1638 ixl_wr(sc, sc->sc_aq_regs->atq_len, 0); 1639 1640 ixl_wr(sc, sc->sc_aq_regs->arq_bal, 0); 1641 ixl_wr(sc, sc->sc_aq_regs->arq_bah, 0); 1642 ixl_wr(sc, sc->sc_aq_regs->arq_len, 0); 1643 1644 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq), 1645 0, IXL_DMA_LEN(&sc->sc_arq), 1646 BUS_DMASYNC_POSTREAD); 1647 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 1648 0, IXL_DMA_LEN(&sc->sc_atq), 1649 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1650 1651 ixl_arq_unfill(sc); 1652 free_arq: 1653 ixl_dmamem_free(sc, &sc->sc_arq); 1654 free_atq: 1655 ixl_dmamem_free(sc, &sc->sc_atq); 1656 unmap: 1657 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems); 1658 sc->sc_mems = 0; 1659 } 1660 1661 static void 1662 ixl_media_add(struct ixl_softc *sc, uint64_t phy_types) 1663 { 1664 struct ifmedia *ifm = &sc->sc_media; 1665 const struct ixl_phy_type *itype; 1666 unsigned int i; 1667 1668 for (i = 0; i < nitems(ixl_phy_type_map); i++) { 1669 itype = &ixl_phy_type_map[i]; 1670 1671 if (ISSET(phy_types, itype->phy_type)) 1672 ifmedia_add(ifm, IFM_ETHER | itype->ifm_type, 0, NULL); 1673 } 1674 } 1675 1676 static int 1677 ixl_media_change(struct ifnet *ifp) 1678 { 1679 /* ignore? */ 1680 return (EOPNOTSUPP); 1681 } 1682 1683 static void 1684 ixl_media_status(struct ifnet *ifp, struct ifmediareq *ifm) 1685 { 1686 struct ixl_softc *sc = ifp->if_softc; 1687 1688 NET_ASSERT_LOCKED(); 1689 1690 ifm->ifm_status = sc->sc_media_status; 1691 ifm->ifm_active = sc->sc_media_active; 1692 } 1693 1694 static void 1695 ixl_watchdog(struct ifnet *ifp) 1696 { 1697 1698 } 1699 1700 int 1701 ixl_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1702 { 1703 struct ixl_softc *sc = (struct ixl_softc *)ifp->if_softc; 1704 struct ifreq *ifr = (struct ifreq *)data; 1705 uint8_t addrhi[ETHER_ADDR_LEN], addrlo[ETHER_ADDR_LEN]; 1706 int aqerror, error = 0; 1707 1708 switch (cmd) { 1709 case SIOCSIFADDR: 1710 ifp->if_flags |= IFF_UP; 1711 /* FALLTHROUGH */ 1712 1713 case SIOCSIFFLAGS: 1714 if (ISSET(ifp->if_flags, IFF_UP)) { 1715 if (ISSET(ifp->if_flags, IFF_RUNNING)) 1716 error = ENETRESET; 1717 else 1718 error = ixl_up(sc); 1719 } else { 1720 if (ISSET(ifp->if_flags, IFF_RUNNING)) 1721 error = ixl_down(sc); 1722 } 1723 break; 1724 1725 case SIOCGIFMEDIA: 1726 case SIOCSIFMEDIA: 1727 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd); 1728 break; 1729 1730 case SIOCGIFRXR: 1731 error = ixl_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data); 1732 break; 1733 1734 case SIOCADDMULTI: 1735 if (ether_addmulti(ifr, &sc->sc_ac) == ENETRESET) { 1736 error = ether_multiaddr(&ifr->ifr_addr, addrlo, addrhi); 1737 if (error != 0) 1738 return (error); 1739 1740 aqerror = ixl_add_macvlan(sc, addrlo, 0, 1741 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN); 1742 if (aqerror == IXL_AQ_RC_ENOSPC) { 1743 ether_delmulti(ifr, &sc->sc_ac); 1744 error = ENOSPC; 1745 } 1746 1747 if (sc->sc_ac.ac_multirangecnt > 0) { 1748 SET(ifp->if_flags, IFF_ALLMULTI); 1749 error = ENETRESET; 1750 } 1751 } 1752 break; 1753 1754 case SIOCDELMULTI: 1755 if (ether_delmulti(ifr, &sc->sc_ac) == ENETRESET) { 1756 error = ether_multiaddr(&ifr->ifr_addr, addrlo, addrhi); 1757 if (error != 0) 1758 return (error); 1759 1760 ixl_remove_macvlan(sc, addrlo, 0, 1761 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN); 1762 1763 if (ISSET(ifp->if_flags, IFF_ALLMULTI) && 1764 sc->sc_ac.ac_multirangecnt == 0) { 1765 CLR(ifp->if_flags, IFF_ALLMULTI); 1766 error = ENETRESET; 1767 } 1768 } 1769 break; 1770 1771 default: 1772 error = ether_ioctl(ifp, &sc->sc_ac, cmd, data); 1773 break; 1774 } 1775 1776 if (error == ENETRESET) 1777 error = ixl_iff(sc); 1778 1779 return (error); 1780 } 1781 1782 static inline void * 1783 ixl_hmc_kva(struct ixl_softc *sc, unsigned int type, unsigned int i) 1784 { 1785 uint8_t *kva = IXL_DMA_KVA(&sc->sc_hmc_pd); 1786 struct ixl_hmc_entry *e = &sc->sc_hmc_entries[type]; 1787 1788 if (i >= e->hmc_count) 1789 return (NULL); 1790 1791 kva += e->hmc_base; 1792 kva += i * e->hmc_size; 1793 1794 return (kva); 1795 } 1796 1797 static inline size_t 1798 ixl_hmc_len(struct ixl_softc *sc, unsigned int type) 1799 { 1800 struct ixl_hmc_entry *e = &sc->sc_hmc_entries[type]; 1801 1802 return (e->hmc_size); 1803 } 1804 1805 static int 1806 ixl_up(struct ixl_softc *sc) 1807 { 1808 struct ifnet *ifp = &sc->sc_ac.ac_if; 1809 struct ixl_rx_ring *rxr; 1810 struct ixl_tx_ring *txr; 1811 unsigned int nqueues, i; 1812 uint32_t reg; 1813 int rv = ENOMEM; 1814 1815 nqueues = ixl_nqueues(sc); 1816 KASSERT(nqueues == 1); /* XXX */ 1817 1818 /* allocation is the only thing that can fail, so do it up front */ 1819 for (i = 0; i < nqueues; i++) { 1820 rxr = ixl_rxr_alloc(sc, i); 1821 if (rxr == NULL) 1822 goto free; 1823 1824 txr = ixl_txr_alloc(sc, i); 1825 if (txr == NULL) { 1826 ixl_rxr_free(sc, rxr); 1827 goto free; 1828 } 1829 1830 ifp->if_iqs[i]->ifiq_softc = rxr; 1831 ifp->if_ifqs[i]->ifq_softc = txr; 1832 } 1833 1834 /* XXX wait 50ms from completion of last RX queue disable */ 1835 1836 for (i = 0; i < nqueues; i++) { 1837 rxr = ifp->if_iqs[i]->ifiq_softc; 1838 txr = ifp->if_ifqs[i]->ifq_softc; 1839 1840 ixl_txr_qdis(sc, txr, 1); 1841 1842 ixl_rxr_config(sc, rxr); 1843 ixl_txr_config(sc, txr); 1844 1845 ixl_wr(sc, I40E_QTX_CTL(i), I40E_QTX_CTL_PF_QUEUE | 1846 (sc->sc_pf_id << I40E_QTX_CTL_PF_INDX_SHIFT)); 1847 1848 ixl_wr(sc, rxr->rxr_tail, 0); 1849 ixl_rxfill(sc, rxr); 1850 1851 reg = ixl_rd(sc, I40E_QRX_ENA(i)); 1852 SET(reg, I40E_QRX_ENA_QENA_REQ_MASK); 1853 ixl_wr(sc, I40E_QRX_ENA(i), reg); 1854 1855 reg = ixl_rd(sc, I40E_QTX_ENA(i)); 1856 SET(reg, I40E_QTX_ENA_QENA_REQ_MASK); 1857 ixl_wr(sc, I40E_QTX_ENA(i), reg); 1858 } 1859 1860 for (i = 0; i < nqueues; i++) { 1861 rxr = ifp->if_iqs[i]->ifiq_softc; 1862 txr = ifp->if_ifqs[i]->ifq_softc; 1863 1864 if (ixl_rxr_enabled(sc, rxr) != 0) 1865 goto down; 1866 1867 if (ixl_txr_enabled(sc, txr) != 0) 1868 goto down; 1869 } 1870 1871 SET(ifp->if_flags, IFF_RUNNING); 1872 1873 ixl_wr(sc, I40E_PFINT_LNKLST0, 1874 (I40E_INTR_NOTX_QUEUE << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) | 1875 (I40E_QUEUE_TYPE_RX << I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)); 1876 1877 ixl_wr(sc, I40E_QINT_RQCTL(I40E_INTR_NOTX_QUEUE), 1878 (I40E_INTR_NOTX_INTR << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) | 1879 (I40E_ITR_INDEX_RX << I40E_QINT_RQCTL_ITR_INDX_SHIFT) | 1880 (I40E_INTR_NOTX_RX_QUEUE << I40E_QINT_RQCTL_MSIX0_INDX_SHIFT) | 1881 (I40E_INTR_NOTX_QUEUE << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) | 1882 (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) | 1883 I40E_QINT_RQCTL_CAUSE_ENA_MASK); 1884 1885 ixl_wr(sc, I40E_QINT_TQCTL(I40E_INTR_NOTX_QUEUE), 1886 (I40E_INTR_NOTX_INTR << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) | 1887 (I40E_ITR_INDEX_TX << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | 1888 (I40E_INTR_NOTX_TX_QUEUE << I40E_QINT_TQCTL_MSIX0_INDX_SHIFT) | 1889 (I40E_QUEUE_TYPE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) | 1890 (I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT) | 1891 I40E_QINT_TQCTL_CAUSE_ENA_MASK); 1892 1893 ixl_wr(sc, I40E_PFINT_ITR0(0), 0x7a); 1894 ixl_wr(sc, I40E_PFINT_ITR0(1), 0x7a); 1895 ixl_wr(sc, I40E_PFINT_ITR0(2), 0); 1896 1897 return (ENETRESET); 1898 1899 free: 1900 for (i = 0; i < nqueues; i++) { 1901 rxr = ifp->if_iqs[i]->ifiq_softc; 1902 txr = ifp->if_ifqs[i]->ifq_softc; 1903 1904 if (rxr == NULL) { 1905 /* 1906 * tx and rx get set at the same time, so if one 1907 * is NULL, the other is too. 1908 */ 1909 continue; 1910 } 1911 1912 ixl_txr_free(sc, txr); 1913 ixl_rxr_free(sc, rxr); 1914 } 1915 return (rv); 1916 down: 1917 ixl_down(sc); 1918 return (ETIMEDOUT); 1919 } 1920 1921 static int 1922 ixl_iff(struct ixl_softc *sc) 1923 { 1924 struct ifnet *ifp = &sc->sc_ac.ac_if; 1925 struct ixl_atq iatq; 1926 struct ixl_aq_desc *iaq; 1927 struct ixl_aq_vsi_promisc_param *param; 1928 1929 if (!ISSET(ifp->if_flags, IFF_RUNNING)) 1930 return (0); 1931 1932 memset(&iatq, 0, sizeof(iatq)); 1933 1934 iaq = &iatq.iatq_desc; 1935 iaq->iaq_opcode = htole16(IXL_AQ_OP_SET_VSI_PROMISC); 1936 1937 param = (struct ixl_aq_vsi_promisc_param *)&iaq->iaq_param; 1938 param->flags = htole16(IXL_AQ_VSI_PROMISC_FLAG_BCAST | 1939 IXL_AQ_VSI_PROMISC_FLAG_VLAN); 1940 if (ISSET(ifp->if_flags, IFF_PROMISC)) { 1941 SET(ifp->if_flags, IFF_ALLMULTI); 1942 param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_UCAST | 1943 IXL_AQ_VSI_PROMISC_FLAG_MCAST); 1944 } else if (ISSET(ifp->if_flags, IFF_ALLMULTI)) { 1945 param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_MCAST); 1946 } 1947 param->valid_flags = htole16(IXL_AQ_VSI_PROMISC_FLAG_UCAST | 1948 IXL_AQ_VSI_PROMISC_FLAG_MCAST | IXL_AQ_VSI_PROMISC_FLAG_BCAST | 1949 IXL_AQ_VSI_PROMISC_FLAG_VLAN); 1950 param->seid = sc->sc_seid; 1951 1952 ixl_atq_exec(sc, &iatq, "ixliff"); 1953 1954 if (iaq->iaq_retval != htole16(IXL_AQ_RC_OK)) 1955 return (EIO); 1956 1957 return (0); 1958 } 1959 1960 static int 1961 ixl_down(struct ixl_softc *sc) 1962 { 1963 struct ifnet *ifp = &sc->sc_ac.ac_if; 1964 struct ixl_rx_ring *rxr; 1965 struct ixl_tx_ring *txr; 1966 unsigned int nqueues, i; 1967 uint32_t reg; 1968 int error = 0; 1969 1970 nqueues = ixl_nqueues(sc); 1971 1972 CLR(ifp->if_flags, IFF_RUNNING); 1973 1974 /* mask interrupts */ 1975 reg = ixl_rd(sc, I40E_QINT_RQCTL(I40E_INTR_NOTX_QUEUE)); 1976 CLR(reg, I40E_QINT_RQCTL_CAUSE_ENA_MASK); 1977 ixl_wr(sc, I40E_QINT_RQCTL(I40E_INTR_NOTX_QUEUE), reg); 1978 1979 reg = ixl_rd(sc, I40E_QINT_TQCTL(I40E_INTR_NOTX_QUEUE)); 1980 CLR(reg, I40E_QINT_TQCTL_CAUSE_ENA_MASK); 1981 ixl_wr(sc, I40E_QINT_TQCTL(I40E_INTR_NOTX_QUEUE), reg); 1982 1983 ixl_wr(sc, I40E_PFINT_LNKLST0, I40E_QUEUE_TYPE_EOL); 1984 1985 /* make sure the no hw generated work is still in flight */ 1986 intr_barrier(sc->sc_ihc); 1987 for (i = 0; i < nqueues; i++) { 1988 rxr = ifp->if_iqs[i]->ifiq_softc; 1989 txr = ifp->if_ifqs[i]->ifq_softc; 1990 1991 ixl_txr_qdis(sc, txr, 0); 1992 1993 ifiq_barrier(ifp->if_iqs[i]); 1994 ifq_barrier(ifp->if_ifqs[i]); 1995 1996 if (!timeout_del(&rxr->rxr_refill)) 1997 timeout_barrier(&rxr->rxr_refill); 1998 } 1999 2000 /* XXX wait at least 400 usec for all tx queues in one go */ 2001 delay(500); 2002 2003 for (i = 0; i < nqueues; i++) { 2004 rxr = ifp->if_iqs[i]->ifiq_softc; 2005 txr = ifp->if_ifqs[i]->ifq_softc; 2006 2007 reg = ixl_rd(sc, I40E_QTX_ENA(i)); 2008 CLR(reg, I40E_QTX_ENA_QENA_REQ_MASK); 2009 ixl_wr(sc, I40E_QTX_ENA(i), reg); 2010 2011 reg = ixl_rd(sc, I40E_QRX_ENA(i)); 2012 CLR(reg, I40E_QRX_ENA_QENA_REQ_MASK); 2013 ixl_wr(sc, I40E_QRX_ENA(i), reg); 2014 } 2015 2016 for (i = 0; i < nqueues; i++) { 2017 rxr = ifp->if_iqs[i]->ifiq_softc; 2018 txr = ifp->if_ifqs[i]->ifq_softc; 2019 2020 if (ixl_txr_disabled(sc, txr) != 0) 2021 error = ETIMEDOUT; 2022 2023 if (ixl_rxr_disabled(sc, rxr) != 0) 2024 error = ETIMEDOUT; 2025 } 2026 2027 if (error) { 2028 printf("%s: failed to shut down rings\n", DEVNAME(sc)); 2029 return (error); 2030 } 2031 2032 for (i = 0; i < nqueues; i++) { 2033 rxr = ifp->if_iqs[i]->ifiq_softc; 2034 txr = ifp->if_ifqs[i]->ifq_softc; 2035 2036 ixl_txr_unconfig(sc, txr); 2037 ixl_rxr_unconfig(sc, rxr); 2038 2039 ixl_txr_clean(sc, txr); 2040 ixl_rxr_clean(sc, rxr); 2041 2042 ixl_txr_free(sc, txr); 2043 ixl_rxr_free(sc, rxr); 2044 2045 ifp->if_iqs[i]->ifiq_softc = NULL; 2046 ifp->if_ifqs[i]->ifq_softc = NULL; 2047 } 2048 2049 return (0); 2050 } 2051 2052 static struct ixl_tx_ring * 2053 ixl_txr_alloc(struct ixl_softc *sc, unsigned int qid) 2054 { 2055 struct ixl_tx_ring *txr; 2056 struct ixl_tx_map *maps, *txm; 2057 unsigned int i; 2058 2059 txr = malloc(sizeof(*txr), M_DEVBUF, M_WAITOK|M_CANFAIL); 2060 if (txr == NULL) 2061 return (NULL); 2062 2063 maps = mallocarray(sizeof(*maps), 2064 sc->sc_tx_ring_ndescs, M_DEVBUF, M_WAITOK|M_CANFAIL|M_ZERO); 2065 if (maps == NULL) 2066 goto free; 2067 2068 if (ixl_dmamem_alloc(sc, &txr->txr_mem, 2069 sizeof(struct ixl_tx_desc) * sc->sc_tx_ring_ndescs, 2070 IXL_TX_QUEUE_ALIGN) != 0) 2071 goto freemap; 2072 2073 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) { 2074 txm = &maps[i]; 2075 2076 if (bus_dmamap_create(sc->sc_dmat, 2077 IXL_HARDMTU, IXL_TX_PKT_DESCS, IXL_HARDMTU, 0, 2078 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT, 2079 &txm->txm_map) != 0) 2080 goto uncreate; 2081 2082 txm->txm_eop = -1; 2083 txm->txm_m = NULL; 2084 } 2085 2086 txr->txr_cons = txr->txr_prod = 0; 2087 txr->txr_maps = maps; 2088 2089 txr->txr_tail = I40E_QTX_TAIL(qid); 2090 txr->txr_qid = qid; 2091 2092 return (txr); 2093 2094 uncreate: 2095 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) { 2096 txm = &maps[i]; 2097 2098 if (txm->txm_map == NULL) 2099 continue; 2100 2101 bus_dmamap_destroy(sc->sc_dmat, txm->txm_map); 2102 } 2103 2104 ixl_dmamem_free(sc, &txr->txr_mem); 2105 freemap: 2106 free(maps, M_DEVBUF, sizeof(*maps) * sc->sc_tx_ring_ndescs); 2107 free: 2108 free(txr, M_DEVBUF, sizeof(*txr)); 2109 return (NULL); 2110 } 2111 2112 static void 2113 ixl_txr_qdis(struct ixl_softc *sc, struct ixl_tx_ring *txr, int enable) 2114 { 2115 unsigned int qid; 2116 bus_size_t reg; 2117 uint32_t r; 2118 2119 qid = txr->txr_qid + sc->sc_base_queue; 2120 reg = I40E_GLLAN_TXPRE_QDIS(qid / 128); 2121 qid %= 128; 2122 2123 r = ixl_rd(sc, reg); 2124 CLR(r, I40E_GLLAN_TXPRE_QDIS_QINDX_MASK); 2125 SET(r, qid << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT); 2126 SET(r, enable ? I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK : 2127 I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK); 2128 ixl_wr(sc, reg, r); 2129 } 2130 2131 static void 2132 ixl_txr_config(struct ixl_softc *sc, struct ixl_tx_ring *txr) 2133 { 2134 struct ixl_hmc_txq txq; 2135 struct ixl_aq_vsi_data *data = IXL_DMA_KVA(&sc->sc_scratch); 2136 void *hmc; 2137 2138 memset(&txq, 0, sizeof(txq)); 2139 txq.head = htole16(0); 2140 txq.new_context = 1; 2141 htolem64(&txq.base, 2142 IXL_DMA_DVA(&txr->txr_mem) / IXL_HMC_TXQ_BASE_UNIT); 2143 txq.head_wb_ena = IXL_HMC_TXQ_DESC_WB; 2144 htolem16(&txq.qlen, sc->sc_tx_ring_ndescs); 2145 txq.tphrdesc_ena = 0; 2146 txq.tphrpacket_ena = 0; 2147 txq.tphwdesc_ena = 0; 2148 txq.rdylist = data->qs_handle[0]; 2149 2150 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_TX, txr->txr_qid); 2151 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_TX)); 2152 ixl_hmc_pack(hmc, &txq, ixl_hmc_pack_txq, nitems(ixl_hmc_pack_txq)); 2153 } 2154 2155 static void 2156 ixl_txr_unconfig(struct ixl_softc *sc, struct ixl_tx_ring *txr) 2157 { 2158 void *hmc; 2159 2160 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_TX, txr->txr_qid); 2161 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_TX)); 2162 } 2163 2164 static void 2165 ixl_txr_clean(struct ixl_softc *sc, struct ixl_tx_ring *txr) 2166 { 2167 struct ixl_tx_map *maps, *txm; 2168 bus_dmamap_t map; 2169 unsigned int i; 2170 2171 maps = txr->txr_maps; 2172 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) { 2173 txm = &maps[i]; 2174 2175 if (txm->txm_m == NULL) 2176 continue; 2177 2178 map = txm->txm_map; 2179 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 2180 BUS_DMASYNC_POSTWRITE); 2181 bus_dmamap_unload(sc->sc_dmat, map); 2182 2183 m_freem(txm->txm_m); 2184 txm->txm_m = NULL; 2185 } 2186 } 2187 2188 static int 2189 ixl_txr_enabled(struct ixl_softc *sc, struct ixl_tx_ring *txr) 2190 { 2191 bus_size_t ena = I40E_QTX_ENA(txr->txr_qid); 2192 uint32_t reg; 2193 int i; 2194 2195 for (i = 0; i < 10; i++) { 2196 reg = ixl_rd(sc, ena); 2197 if (ISSET(reg, I40E_QTX_ENA_QENA_STAT_MASK)) 2198 return (0); 2199 2200 delaymsec(10); 2201 } 2202 2203 return (ETIMEDOUT); 2204 } 2205 2206 static int 2207 ixl_txr_disabled(struct ixl_softc *sc, struct ixl_tx_ring *txr) 2208 { 2209 bus_size_t ena = I40E_QTX_ENA(txr->txr_qid); 2210 uint32_t reg; 2211 int i; 2212 2213 for (i = 0; i < 20; i++) { 2214 reg = ixl_rd(sc, ena); 2215 if (ISSET(reg, I40E_QTX_ENA_QENA_STAT_MASK) == 0) 2216 return (0); 2217 2218 delaymsec(10); 2219 } 2220 2221 return (ETIMEDOUT); 2222 } 2223 2224 static void 2225 ixl_txr_free(struct ixl_softc *sc, struct ixl_tx_ring *txr) 2226 { 2227 struct ixl_tx_map *maps, *txm; 2228 unsigned int i; 2229 2230 maps = txr->txr_maps; 2231 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) { 2232 txm = &maps[i]; 2233 2234 bus_dmamap_destroy(sc->sc_dmat, txm->txm_map); 2235 } 2236 2237 ixl_dmamem_free(sc, &txr->txr_mem); 2238 free(maps, M_DEVBUF, sizeof(*maps) * sc->sc_tx_ring_ndescs); 2239 free(txr, M_DEVBUF, sizeof(*txr)); 2240 } 2241 2242 static inline int 2243 ixl_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m) 2244 { 2245 int error; 2246 2247 error = bus_dmamap_load_mbuf(dmat, map, m, 2248 BUS_DMA_STREAMING | BUS_DMA_NOWAIT); 2249 if (error != EFBIG || m_defrag(m, M_DONTWAIT) != 0) 2250 return (error); 2251 2252 return (bus_dmamap_load_mbuf(dmat, map, m, 2253 BUS_DMA_STREAMING | BUS_DMA_NOWAIT)); 2254 } 2255 2256 static void 2257 ixl_start(struct ifqueue *ifq) 2258 { 2259 struct ifnet *ifp = ifq->ifq_if; 2260 struct ixl_softc *sc = ifp->if_softc; 2261 struct ixl_tx_ring *txr = ifq->ifq_softc; 2262 struct ixl_tx_desc *ring, *txd; 2263 struct ixl_tx_map *txm; 2264 bus_dmamap_t map; 2265 struct mbuf *m; 2266 uint64_t cmd; 2267 unsigned int prod, free, last, i; 2268 unsigned int mask; 2269 int post = 0; 2270 #if NBPFILTER > 0 2271 caddr_t if_bpf; 2272 #endif 2273 2274 if (!LINK_STATE_IS_UP(ifp->if_link_state)) { 2275 ifq_purge(ifq); 2276 return; 2277 } 2278 2279 prod = txr->txr_prod; 2280 free = txr->txr_cons; 2281 if (free <= prod) 2282 free += sc->sc_tx_ring_ndescs; 2283 free -= prod; 2284 2285 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem), 2286 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTWRITE); 2287 2288 ring = IXL_DMA_KVA(&txr->txr_mem); 2289 mask = sc->sc_tx_ring_ndescs - 1; 2290 2291 for (;;) { 2292 if (free <= IXL_TX_PKT_DESCS) { 2293 ifq_set_oactive(ifq); 2294 break; 2295 } 2296 2297 m = ifq_dequeue(ifq); 2298 if (m == NULL) 2299 break; 2300 2301 txm = &txr->txr_maps[prod]; 2302 map = txm->txm_map; 2303 2304 if (ixl_load_mbuf(sc->sc_dmat, map, m) != 0) { 2305 m_freem(m); 2306 continue; 2307 } 2308 2309 bus_dmamap_sync(sc->sc_dmat, map, 0, 2310 map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2311 2312 for (i = 0; i < map->dm_nsegs; i++) { 2313 txd = &ring[prod]; 2314 2315 cmd = (uint64_t)map->dm_segs[i].ds_len << 2316 IXL_TX_DESC_BSIZE_SHIFT; 2317 cmd |= IXL_TX_DESC_DTYPE_DATA | IXL_TX_DESC_CMD_ICRC; 2318 2319 htolem64(&txd->addr, map->dm_segs[i].ds_addr); 2320 htolem64(&txd->cmd, cmd); 2321 2322 last = prod; 2323 2324 prod++; 2325 prod &= mask; 2326 } 2327 cmd |= IXL_TX_DESC_CMD_EOP | IXL_TX_DESC_CMD_RS; 2328 htolem64(&txd->cmd, cmd); 2329 2330 txm->txm_m = m; 2331 txm->txm_eop = last; 2332 2333 #if NBPFILTER > 0 2334 if_bpf = ifp->if_bpf; 2335 if (if_bpf) 2336 bpf_mtap_ether(if_bpf, m, BPF_DIRECTION_OUT); 2337 #endif 2338 2339 free -= i; 2340 post = 1; 2341 } 2342 2343 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem), 2344 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREWRITE); 2345 2346 if (post) { 2347 txr->txr_prod = prod; 2348 ixl_wr(sc, txr->txr_tail, prod); 2349 } 2350 } 2351 2352 static int 2353 ixl_txeof(struct ixl_softc *sc, struct ifqueue *ifq) 2354 { 2355 struct ixl_tx_ring *txr = ifq->ifq_softc; 2356 struct ixl_tx_desc *ring, *txd; 2357 struct ixl_tx_map *txm; 2358 bus_dmamap_t map; 2359 unsigned int cons, prod, last; 2360 unsigned int mask; 2361 uint64_t dtype; 2362 int done = 0; 2363 2364 prod = txr->txr_prod; 2365 cons = txr->txr_cons; 2366 2367 if (cons == prod) 2368 return (0); 2369 2370 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem), 2371 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTREAD); 2372 2373 ring = IXL_DMA_KVA(&txr->txr_mem); 2374 mask = sc->sc_tx_ring_ndescs - 1; 2375 2376 do { 2377 txm = &txr->txr_maps[cons]; 2378 last = txm->txm_eop; 2379 txd = &ring[last]; 2380 2381 dtype = txd->cmd & htole64(IXL_TX_DESC_DTYPE_MASK); 2382 if (dtype != htole64(IXL_TX_DESC_DTYPE_DONE)) 2383 break; 2384 2385 map = txm->txm_map; 2386 2387 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 2388 BUS_DMASYNC_POSTWRITE); 2389 bus_dmamap_unload(sc->sc_dmat, map); 2390 m_freem(txm->txm_m); 2391 2392 txm->txm_m = NULL; 2393 txm->txm_eop = -1; 2394 2395 cons = last + 1; 2396 cons &= mask; 2397 2398 done = 1; 2399 } while (cons != prod); 2400 2401 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem), 2402 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREREAD); 2403 2404 txr->txr_cons = cons; 2405 2406 //ixl_enable(sc, txr->txr_msix); 2407 2408 if (ifq_is_oactive(ifq)) 2409 ifq_restart(ifq); 2410 2411 return (done); 2412 } 2413 2414 static struct ixl_rx_ring * 2415 ixl_rxr_alloc(struct ixl_softc *sc, unsigned int qid) 2416 { 2417 struct ixl_rx_ring *rxr; 2418 struct ixl_rx_map *maps, *rxm; 2419 unsigned int i; 2420 2421 rxr = malloc(sizeof(*rxr), M_DEVBUF, M_WAITOK|M_CANFAIL); 2422 if (rxr == NULL) 2423 return (NULL); 2424 2425 maps = mallocarray(sizeof(*maps), 2426 sc->sc_rx_ring_ndescs, M_DEVBUF, M_WAITOK|M_CANFAIL|M_ZERO); 2427 if (maps == NULL) 2428 goto free; 2429 2430 if (ixl_dmamem_alloc(sc, &rxr->rxr_mem, 2431 sizeof(struct ixl_rx_rd_desc_16) * sc->sc_rx_ring_ndescs, 2432 IXL_RX_QUEUE_ALIGN) != 0) 2433 goto freemap; 2434 2435 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) { 2436 rxm = &maps[i]; 2437 2438 if (bus_dmamap_create(sc->sc_dmat, 2439 IXL_HARDMTU, 1, IXL_HARDMTU, 0, 2440 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT, 2441 &rxm->rxm_map) != 0) 2442 goto uncreate; 2443 2444 rxm->rxm_m = NULL; 2445 } 2446 2447 rxr->rxr_sc = sc; 2448 if_rxr_init(&rxr->rxr_acct, 17, sc->sc_rx_ring_ndescs - 1); 2449 timeout_set(&rxr->rxr_refill, ixl_rxrefill, rxr); 2450 rxr->rxr_cons = rxr->rxr_prod = 0; 2451 rxr->rxr_m_head = NULL; 2452 rxr->rxr_m_tail = &rxr->rxr_m_head; 2453 rxr->rxr_maps = maps; 2454 2455 rxr->rxr_tail = I40E_QRX_TAIL(qid); 2456 rxr->rxr_qid = qid; 2457 2458 return (rxr); 2459 2460 uncreate: 2461 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) { 2462 rxm = &maps[i]; 2463 2464 if (rxm->rxm_map == NULL) 2465 continue; 2466 2467 bus_dmamap_destroy(sc->sc_dmat, rxm->rxm_map); 2468 } 2469 2470 ixl_dmamem_free(sc, &rxr->rxr_mem); 2471 freemap: 2472 free(maps, M_DEVBUF, sizeof(*maps) * sc->sc_rx_ring_ndescs); 2473 free: 2474 free(rxr, M_DEVBUF, sizeof(*rxr)); 2475 return (NULL); 2476 } 2477 2478 static void 2479 ixl_rxr_clean(struct ixl_softc *sc, struct ixl_rx_ring *rxr) 2480 { 2481 struct ixl_rx_map *maps, *rxm; 2482 bus_dmamap_t map; 2483 unsigned int i; 2484 2485 if (!timeout_del(&rxr->rxr_refill)) 2486 timeout_barrier(&rxr->rxr_refill); 2487 2488 maps = rxr->rxr_maps; 2489 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) { 2490 rxm = &maps[i]; 2491 2492 if (rxm->rxm_m == NULL) 2493 continue; 2494 2495 map = rxm->rxm_map; 2496 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 2497 BUS_DMASYNC_POSTWRITE); 2498 bus_dmamap_unload(sc->sc_dmat, map); 2499 2500 m_freem(rxm->rxm_m); 2501 rxm->rxm_m = NULL; 2502 } 2503 2504 m_freem(rxr->rxr_m_head); 2505 rxr->rxr_m_head = NULL; 2506 rxr->rxr_m_tail = &rxr->rxr_m_head; 2507 2508 rxr->rxr_prod = rxr->rxr_cons = 0; 2509 } 2510 2511 static int 2512 ixl_rxr_enabled(struct ixl_softc *sc, struct ixl_rx_ring *rxr) 2513 { 2514 bus_size_t ena = I40E_QRX_ENA(rxr->rxr_qid); 2515 uint32_t reg; 2516 int i; 2517 2518 for (i = 0; i < 10; i++) { 2519 reg = ixl_rd(sc, ena); 2520 if (ISSET(reg, I40E_QRX_ENA_QENA_STAT_MASK)) 2521 return (0); 2522 2523 delaymsec(10); 2524 } 2525 2526 return (ETIMEDOUT); 2527 } 2528 2529 static int 2530 ixl_rxr_disabled(struct ixl_softc *sc, struct ixl_rx_ring *rxr) 2531 { 2532 bus_size_t ena = I40E_QRX_ENA(rxr->rxr_qid); 2533 uint32_t reg; 2534 int i; 2535 2536 for (i = 0; i < 20; i++) { 2537 reg = ixl_rd(sc, ena); 2538 if (ISSET(reg, I40E_QRX_ENA_QENA_STAT_MASK) == 0) 2539 return (0); 2540 2541 delaymsec(10); 2542 } 2543 2544 return (ETIMEDOUT); 2545 } 2546 2547 static void 2548 ixl_rxr_config(struct ixl_softc *sc, struct ixl_rx_ring *rxr) 2549 { 2550 struct ixl_hmc_rxq rxq; 2551 void *hmc; 2552 2553 memset(&rxq, 0, sizeof(rxq)); 2554 2555 rxq.head = htole16(0); 2556 htolem64(&rxq.base, 2557 IXL_DMA_DVA(&rxr->rxr_mem) / IXL_HMC_RXQ_BASE_UNIT); 2558 htolem16(&rxq.qlen, sc->sc_rx_ring_ndescs); 2559 rxq.dbuff = htole16(MCLBYTES / IXL_HMC_RXQ_DBUFF_UNIT); 2560 rxq.hbuff = 0; 2561 rxq.dtype = IXL_HMC_RXQ_DTYPE_NOSPLIT; 2562 rxq.dsize = IXL_HMC_RXQ_DSIZE_16; 2563 rxq.crcstrip = 1; 2564 rxq.l2sel = 0; 2565 rxq.showiv = 0; 2566 rxq.rxmax = htole16(IXL_HARDMTU); 2567 rxq.tphrdesc_ena = 0; 2568 rxq.tphwdesc_ena = 0; 2569 rxq.tphdata_ena = 0; 2570 rxq.tphhead_ena = 0; 2571 rxq.lrxqthresh = 0; 2572 rxq.prefena = 1; 2573 2574 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_RX, rxr->rxr_qid); 2575 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_RX)); 2576 ixl_hmc_pack(hmc, &rxq, ixl_hmc_pack_rxq, nitems(ixl_hmc_pack_rxq)); 2577 } 2578 2579 static void 2580 ixl_rxr_unconfig(struct ixl_softc *sc, struct ixl_rx_ring *rxr) 2581 { 2582 void *hmc; 2583 2584 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_RX, rxr->rxr_qid); 2585 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_RX)); 2586 } 2587 2588 static void 2589 ixl_rxr_free(struct ixl_softc *sc, struct ixl_rx_ring *rxr) 2590 { 2591 struct ixl_rx_map *maps, *rxm; 2592 unsigned int i; 2593 2594 maps = rxr->rxr_maps; 2595 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) { 2596 rxm = &maps[i]; 2597 2598 bus_dmamap_destroy(sc->sc_dmat, rxm->rxm_map); 2599 } 2600 2601 ixl_dmamem_free(sc, &rxr->rxr_mem); 2602 free(maps, M_DEVBUF, sizeof(*maps) * sc->sc_rx_ring_ndescs); 2603 free(rxr, M_DEVBUF, sizeof(*rxr)); 2604 } 2605 2606 static int 2607 ixl_rxeof(struct ixl_softc *sc, struct ifiqueue *ifiq) 2608 { 2609 struct ixl_rx_ring *rxr = ifiq->ifiq_softc; 2610 struct ifnet *ifp = &sc->sc_ac.ac_if; 2611 struct ixl_rx_wb_desc_16 *ring, *rxd; 2612 struct ixl_rx_map *rxm; 2613 bus_dmamap_t map; 2614 unsigned int cons, prod; 2615 struct mbuf_list ml = MBUF_LIST_INITIALIZER(); 2616 struct mbuf *m; 2617 uint64_t word; 2618 unsigned int len; 2619 unsigned int mask; 2620 int done = 0; 2621 2622 prod = rxr->rxr_prod; 2623 cons = rxr->rxr_cons; 2624 2625 if (cons == prod) 2626 return (0); 2627 2628 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem), 2629 0, IXL_DMA_LEN(&rxr->rxr_mem), 2630 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 2631 2632 ring = IXL_DMA_KVA(&rxr->rxr_mem); 2633 mask = sc->sc_rx_ring_ndescs - 1; 2634 2635 do { 2636 rxd = &ring[cons]; 2637 2638 word = lemtoh64(&rxd->qword1); 2639 if (!ISSET(word, IXL_RX_DESC_DD)) 2640 break; 2641 2642 if_rxr_put(&rxr->rxr_acct, 1); 2643 2644 rxm = &rxr->rxr_maps[cons]; 2645 2646 map = rxm->rxm_map; 2647 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 2648 BUS_DMASYNC_POSTREAD); 2649 bus_dmamap_unload(sc->sc_dmat, map); 2650 2651 m = rxm->rxm_m; 2652 rxm->rxm_m = NULL; 2653 2654 len = (word & IXL_RX_DESC_PLEN_MASK) >> IXL_RX_DESC_PLEN_SHIFT; 2655 m->m_len = len; 2656 m->m_pkthdr.len = 0; 2657 2658 m->m_next = NULL; 2659 *rxr->rxr_m_tail = m; 2660 rxr->rxr_m_tail = &m->m_next; 2661 2662 m = rxr->rxr_m_head; 2663 m->m_pkthdr.len += len; 2664 2665 if (ISSET(word, IXL_RX_DESC_EOP)) { 2666 if (!ISSET(word, 2667 IXL_RX_DESC_RXE | IXL_RX_DESC_OVERSIZE)) { 2668 ml_enqueue(&ml, m); 2669 } else { 2670 ifp->if_ierrors++; /* XXX */ 2671 m_freem(m); 2672 } 2673 2674 rxr->rxr_m_head = NULL; 2675 rxr->rxr_m_tail = &rxr->rxr_m_head; 2676 } 2677 2678 cons++; 2679 cons &= mask; 2680 2681 done = 1; 2682 } while (cons != prod); 2683 2684 if (done) { 2685 rxr->rxr_cons = cons; 2686 ixl_rxfill(sc, rxr); 2687 if_input(ifp, &ml); 2688 } 2689 2690 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem), 2691 0, IXL_DMA_LEN(&rxr->rxr_mem), 2692 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 2693 2694 return (done); 2695 } 2696 2697 static void 2698 ixl_rxfill(struct ixl_softc *sc, struct ixl_rx_ring *rxr) 2699 { 2700 struct ixl_rx_rd_desc_16 *ring, *rxd; 2701 struct ixl_rx_map *rxm; 2702 bus_dmamap_t map; 2703 struct mbuf *m; 2704 unsigned int prod; 2705 unsigned int slots; 2706 unsigned int mask; 2707 int post = 0; 2708 2709 slots = if_rxr_get(&rxr->rxr_acct, sc->sc_rx_ring_ndescs); 2710 if (slots == 0) 2711 return; 2712 2713 prod = rxr->rxr_prod; 2714 2715 ring = IXL_DMA_KVA(&rxr->rxr_mem); 2716 mask = sc->sc_rx_ring_ndescs - 1; 2717 2718 do { 2719 rxm = &rxr->rxr_maps[prod]; 2720 2721 m = MCLGETI(NULL, M_DONTWAIT, NULL, MCLBYTES + ETHER_ALIGN); 2722 if (m == NULL) 2723 break; 2724 m->m_len = m->m_pkthdr.len = MCLBYTES + ETHER_ALIGN; 2725 m_adj(m, ETHER_ALIGN); 2726 2727 map = rxm->rxm_map; 2728 2729 if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 2730 BUS_DMA_NOWAIT) != 0) { 2731 m_freem(m); 2732 break; 2733 } 2734 2735 rxm->rxm_m = m; 2736 2737 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 2738 BUS_DMASYNC_PREREAD); 2739 2740 rxd = &ring[prod]; 2741 2742 htolem64(&rxd->paddr, map->dm_segs[0].ds_addr); 2743 rxd->haddr = htole64(0); 2744 2745 prod++; 2746 prod &= mask; 2747 2748 post = 1; 2749 } while (--slots); 2750 2751 if_rxr_put(&rxr->rxr_acct, slots); 2752 2753 if (if_rxr_inuse(&rxr->rxr_acct) == 0) 2754 timeout_add(&rxr->rxr_refill, 1); 2755 else if (post) { 2756 rxr->rxr_prod = prod; 2757 ixl_wr(sc, rxr->rxr_tail, prod); 2758 } 2759 } 2760 2761 void 2762 ixl_rxrefill(void *arg) 2763 { 2764 struct ixl_rx_ring *rxr = arg; 2765 struct ixl_softc *sc = rxr->rxr_sc; 2766 2767 ixl_rxfill(sc, rxr); 2768 } 2769 2770 static int 2771 ixl_rxrinfo(struct ixl_softc *sc, struct if_rxrinfo *ifri) 2772 { 2773 struct ifnet *ifp = &sc->sc_ac.ac_if; 2774 struct if_rxring_info *ifr; 2775 struct ixl_rx_ring *ring; 2776 int i, rv; 2777 2778 if (!ISSET(ifp->if_flags, IFF_RUNNING)) 2779 return (ENOTTY); 2780 2781 ifr = mallocarray(sizeof(*ifr), ixl_nqueues(sc), M_TEMP, 2782 M_WAITOK|M_CANFAIL|M_ZERO); 2783 if (ifr == NULL) 2784 return (ENOMEM); 2785 2786 for (i = 0; i < ixl_nqueues(sc); i++) { 2787 ring = ifp->if_iqs[i]->ifiq_softc; 2788 ifr[i].ifr_size = MCLBYTES; 2789 ifr[i].ifr_info = ring->rxr_acct; 2790 } 2791 2792 rv = if_rxr_info_ioctl(ifri, ixl_nqueues(sc), ifr); 2793 free(ifr, M_TEMP, ixl_nqueues(sc) * sizeof(*ifr)); 2794 2795 return (rv); 2796 } 2797 2798 static int 2799 ixl_intr(void *xsc) 2800 { 2801 struct ixl_softc *sc = xsc; 2802 struct ifnet *ifp = &sc->sc_ac.ac_if; 2803 uint32_t icr; 2804 int rv = 0; 2805 2806 ixl_intr_enable(sc); 2807 icr = ixl_rd(sc, I40E_PFINT_ICR0); 2808 2809 if (ISSET(icr, I40E_PFINT_ICR0_ADMINQ_MASK)) { 2810 ixl_atq_done(sc); 2811 task_add(systq, &sc->sc_arq_task); 2812 rv = 1; 2813 } 2814 2815 if (ISSET(icr, I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK)) { 2816 task_add(systq, &sc->sc_link_state_task); 2817 rv = 1; 2818 } 2819 2820 if (ISSET(icr, I40E_INTR_NOTX_RX_MASK)) 2821 rv |= ixl_rxeof(sc, ifp->if_iqs[0]); 2822 if (ISSET(icr, I40E_INTR_NOTX_TX_MASK)) 2823 rv |= ixl_txeof(sc, ifp->if_ifqs[0]); 2824 2825 return (rv); 2826 } 2827 2828 static void 2829 ixl_link_state_update_done(struct ixl_softc *sc, void *arg) 2830 { 2831 /* IXL_AQ_OP_PHY_LINK_STATUS already posted to admin reply queue */ 2832 } 2833 2834 static void 2835 ixl_link_state_update(void *xsc) 2836 { 2837 struct ixl_softc *sc = xsc; 2838 struct ixl_aq_desc *iaq; 2839 struct ixl_aq_link_param *param; 2840 2841 memset(&sc->sc_link_state_atq, 0, sizeof(sc->sc_link_state_atq)); 2842 iaq = &sc->sc_link_state_atq.iatq_desc; 2843 iaq->iaq_opcode = htole16(IXL_AQ_OP_PHY_LINK_STATUS); 2844 param = (struct ixl_aq_link_param *)iaq->iaq_param; 2845 param->notify = IXL_AQ_LINK_NOTIFY; 2846 2847 ixl_atq_set(&sc->sc_link_state_atq, ixl_link_state_update_done, NULL); 2848 ixl_atq_post(sc, &sc->sc_link_state_atq); 2849 } 2850 2851 static void 2852 ixl_arq_link_status(struct ixl_softc *sc, const struct ixl_aq_desc *iaq) 2853 { 2854 struct ifnet *ifp = &sc->sc_ac.ac_if; 2855 int link_state; 2856 2857 NET_LOCK(); 2858 link_state = ixl_set_link_status(sc, iaq); 2859 if (ifp->if_link_state != link_state) { 2860 ifp->if_link_state = link_state; 2861 if_link_state_change(ifp); 2862 } 2863 NET_UNLOCK(); 2864 } 2865 2866 #if 0 2867 static void 2868 ixl_aq_dump(const struct ixl_softc *sc, const struct ixl_aq_desc *iaq) 2869 { 2870 printf("%s: flags %b opcode %04x\n", DEVNAME(sc), 2871 lemtoh16(&iaq->iaq_flags), IXL_AQ_FLAGS_FMT, 2872 lemtoh16(&iaq->iaq_opcode)); 2873 printf("%s: datalen %u retval %u\n", DEVNAME(sc), 2874 lemtoh16(&iaq->iaq_datalen), lemtoh16(&iaq->iaq_retval)); 2875 printf("%s: cookie %016llx\n", DEVNAME(sc), iaq->iaq_cookie); 2876 printf("%s: %08x %08x %08x %08x\n", DEVNAME(sc), 2877 lemtoh32(&iaq->iaq_param[0]), lemtoh32(&iaq->iaq_param[1]), 2878 lemtoh32(&iaq->iaq_param[2]), lemtoh32(&iaq->iaq_param[3])); 2879 } 2880 #endif 2881 2882 static void 2883 ixl_arq(void *xsc) 2884 { 2885 struct ixl_softc *sc = xsc; 2886 struct ixl_aq_desc *arq, *iaq; 2887 struct ixl_aq_buf *aqb; 2888 unsigned int cons = sc->sc_arq_cons; 2889 unsigned int prod; 2890 int done = 0; 2891 2892 prod = ixl_rd(sc, sc->sc_aq_regs->arq_head) & 2893 sc->sc_aq_regs->arq_head_mask; 2894 2895 if (cons == prod) 2896 goto done; 2897 2898 arq = IXL_DMA_KVA(&sc->sc_arq); 2899 2900 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq), 2901 0, IXL_DMA_LEN(&sc->sc_arq), 2902 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 2903 2904 do { 2905 iaq = &arq[cons]; 2906 2907 aqb = SIMPLEQ_FIRST(&sc->sc_arq_live); 2908 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, IXL_AQ_BUFLEN, 2909 BUS_DMASYNC_POSTREAD); 2910 2911 switch (iaq->iaq_opcode) { 2912 case HTOLE16(IXL_AQ_OP_PHY_LINK_STATUS): 2913 ixl_arq_link_status(sc, iaq); 2914 break; 2915 } 2916 2917 memset(iaq, 0, sizeof(*iaq)); 2918 SIMPLEQ_INSERT_TAIL(&sc->sc_arq_idle, aqb, aqb_entry); 2919 if_rxr_put(&sc->sc_arq_ring, 1); 2920 2921 cons++; 2922 cons &= IXL_AQ_MASK; 2923 2924 done = 1; 2925 } while (cons != prod); 2926 2927 if (done && ixl_arq_fill(sc)) 2928 ixl_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod); 2929 2930 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq), 2931 0, IXL_DMA_LEN(&sc->sc_arq), 2932 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 2933 2934 sc->sc_arq_cons = cons; 2935 2936 done: 2937 ixl_intr_enable(sc); 2938 } 2939 2940 static void 2941 ixl_atq_set(struct ixl_atq *iatq, 2942 void (*fn)(struct ixl_softc *, void *), void *arg) 2943 { 2944 iatq->iatq_fn = fn; 2945 iatq->iatq_arg = arg; 2946 } 2947 2948 static void 2949 ixl_atq_post(struct ixl_softc *sc, struct ixl_atq *iatq) 2950 { 2951 struct ixl_aq_desc *atq, *slot; 2952 unsigned int prod; 2953 2954 /* assert locked */ 2955 2956 atq = IXL_DMA_KVA(&sc->sc_atq); 2957 prod = sc->sc_atq_prod; 2958 slot = atq + prod; 2959 2960 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 2961 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTWRITE); 2962 2963 *slot = iatq->iatq_desc; 2964 slot->iaq_cookie = (uint64_t)iatq; 2965 2966 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 2967 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREWRITE); 2968 2969 prod++; 2970 prod &= IXL_AQ_MASK; 2971 sc->sc_atq_prod = prod; 2972 ixl_wr(sc, sc->sc_aq_regs->atq_tail, prod); 2973 } 2974 2975 static void 2976 ixl_atq_done(struct ixl_softc *sc) 2977 { 2978 struct ixl_atq_list cmds = SIMPLEQ_HEAD_INITIALIZER(cmds); 2979 struct ixl_aq_desc *atq, *slot; 2980 struct ixl_atq *iatq; 2981 unsigned int cons; 2982 unsigned int prod; 2983 2984 prod = sc->sc_atq_prod; 2985 cons = sc->sc_atq_cons; 2986 2987 if (prod == cons) 2988 return; 2989 2990 atq = IXL_DMA_KVA(&sc->sc_atq); 2991 2992 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 2993 0, IXL_DMA_LEN(&sc->sc_atq), 2994 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 2995 2996 do { 2997 slot = &atq[cons]; 2998 2999 iatq = (struct ixl_atq *)slot->iaq_cookie; 3000 iatq->iatq_desc = *slot; 3001 SIMPLEQ_INSERT_TAIL(&cmds, iatq, iatq_entry); 3002 3003 memset(slot, 0, sizeof(*slot)); 3004 3005 cons++; 3006 cons &= IXL_AQ_MASK; 3007 } while (cons != prod); 3008 3009 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 3010 0, IXL_DMA_LEN(&sc->sc_atq), 3011 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 3012 3013 sc->sc_atq_cons = cons; 3014 3015 while ((iatq = SIMPLEQ_FIRST(&cmds)) != NULL) { 3016 SIMPLEQ_REMOVE_HEAD(&cmds, iatq_entry); 3017 3018 (*iatq->iatq_fn)(sc, iatq->iatq_arg); 3019 } 3020 } 3021 3022 struct ixl_wakeup { 3023 struct mutex mtx; 3024 int notdone; 3025 }; 3026 3027 static void 3028 ixl_wakeup(struct ixl_softc *sc, void *arg) 3029 { 3030 struct ixl_wakeup *wake = arg; 3031 3032 mtx_enter(&wake->mtx); 3033 wake->notdone = 0; 3034 mtx_leave(&wake->mtx); 3035 3036 wakeup(wake); 3037 } 3038 3039 static void 3040 ixl_atq_exec(struct ixl_softc *sc, struct ixl_atq *iatq, const char *wmesg) 3041 { 3042 struct ixl_wakeup wake = { MUTEX_INITIALIZER(IPL_NET), 1 }; 3043 3044 KASSERT(iatq->iatq_desc.iaq_cookie == 0); 3045 3046 ixl_atq_set(iatq, ixl_wakeup, &wake); 3047 ixl_atq_post(sc, iatq); 3048 3049 mtx_enter(&wake.mtx); 3050 while (wake.notdone) { 3051 mtx_leave(&wake.mtx); 3052 ixl_atq_done(sc); 3053 mtx_enter(&wake.mtx); 3054 msleep(&wake, &wake.mtx, 0, wmesg, 1); 3055 } 3056 mtx_leave(&wake.mtx); 3057 } 3058 3059 static int 3060 ixl_atq_poll(struct ixl_softc *sc, struct ixl_aq_desc *iaq, unsigned int tm) 3061 { 3062 struct ixl_aq_desc *atq, *slot; 3063 unsigned int prod; 3064 unsigned int t = 0; 3065 3066 atq = IXL_DMA_KVA(&sc->sc_atq); 3067 prod = sc->sc_atq_prod; 3068 slot = atq + prod; 3069 3070 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 3071 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTWRITE); 3072 3073 *slot = *iaq; 3074 slot->iaq_flags |= htole16(IXL_AQ_SI); 3075 3076 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 3077 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREWRITE); 3078 3079 prod++; 3080 prod &= IXL_AQ_MASK; 3081 sc->sc_atq_prod = prod; 3082 ixl_wr(sc, sc->sc_aq_regs->atq_tail, prod); 3083 3084 while (ixl_rd(sc, sc->sc_aq_regs->atq_head) != prod) { 3085 delaymsec(1); 3086 3087 if (t++ > tm) 3088 return (ETIMEDOUT); 3089 } 3090 3091 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 3092 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTREAD); 3093 *iaq = *slot; 3094 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 3095 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREREAD); 3096 3097 sc->sc_atq_cons = prod; 3098 3099 return (0); 3100 } 3101 3102 static int 3103 ixl_get_version(struct ixl_softc *sc) 3104 { 3105 struct ixl_aq_desc iaq; 3106 uint32_t fwbuild, fwver, apiver; 3107 3108 memset(&iaq, 0, sizeof(iaq)); 3109 iaq.iaq_opcode = htole16(IXL_AQ_OP_GET_VERSION); 3110 3111 if (ixl_atq_poll(sc, &iaq, 2000) != 0) 3112 return (ETIMEDOUT); 3113 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) 3114 return (EIO); 3115 3116 fwbuild = lemtoh32(&iaq.iaq_param[1]); 3117 fwver = lemtoh32(&iaq.iaq_param[2]); 3118 apiver = lemtoh32(&iaq.iaq_param[3]); 3119 3120 printf(", FW %hu.%hu.%05u API %hu.%hu", (uint16_t)fwver, 3121 (uint16_t)(fwver >> 16), fwbuild, (uint16_t)apiver, 3122 (uint16_t)(apiver >> 16)); 3123 3124 return (0); 3125 } 3126 3127 static int 3128 ixl_pxe_clear(struct ixl_softc *sc) 3129 { 3130 struct ixl_aq_desc iaq; 3131 3132 memset(&iaq, 0, sizeof(iaq)); 3133 iaq.iaq_opcode = htole16(IXL_AQ_OP_CLEAR_PXE_MODE); 3134 iaq.iaq_param[0] = htole32(0x2); 3135 3136 if (ixl_atq_poll(sc, &iaq, 250) != 0) { 3137 printf(", CLEAR PXE MODE timeout\n"); 3138 return (-1); 3139 } 3140 3141 switch (iaq.iaq_retval) { 3142 case HTOLE16(IXL_AQ_RC_OK): 3143 case HTOLE16(IXL_AQ_RC_EEXIST): 3144 break; 3145 default: 3146 printf(", CLEAR PXE MODE error\n"); 3147 return (-1); 3148 } 3149 3150 return (0); 3151 } 3152 3153 static int 3154 ixl_lldp_shut(struct ixl_softc *sc) 3155 { 3156 struct ixl_aq_desc iaq; 3157 3158 memset(&iaq, 0, sizeof(iaq)); 3159 iaq.iaq_opcode = htole16(IXL_AQ_OP_LLDP_STOP_AGENT); 3160 iaq.iaq_param[0] = htole32(IXL_LLDP_SHUTDOWN); 3161 3162 if (ixl_atq_poll(sc, &iaq, 250) != 0) { 3163 printf(", STOP LLDP AGENT timeout\n"); 3164 return (-1); 3165 } 3166 3167 switch (iaq.iaq_retval) { 3168 case HTOLE16(IXL_AQ_RC_EMODE): 3169 case HTOLE16(IXL_AQ_RC_EPERM): 3170 /* ignore silently */ 3171 default: 3172 break; 3173 } 3174 3175 return (0); 3176 } 3177 3178 static int 3179 ixl_get_mac(struct ixl_softc *sc) 3180 { 3181 struct ixl_dmamem idm; 3182 struct ixl_aq_desc iaq; 3183 struct ixl_aq_mac_addresses *addrs; 3184 int rv; 3185 3186 if (ixl_dmamem_alloc(sc, &idm, sizeof(*addrs), 0) != 0) { 3187 printf(", unable to allocate mac addresses\n"); 3188 return (-1); 3189 } 3190 3191 memset(&iaq, 0, sizeof(iaq)); 3192 iaq.iaq_flags = htole16(IXL_AQ_BUF); 3193 iaq.iaq_opcode = htole16(IXL_AQ_OP_MAC_ADDRESS_READ); 3194 iaq.iaq_datalen = htole16(sizeof(*addrs)); 3195 ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm)); 3196 3197 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm), 3198 BUS_DMASYNC_PREREAD); 3199 3200 rv = ixl_atq_poll(sc, &iaq, 250); 3201 3202 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm), 3203 BUS_DMASYNC_POSTREAD); 3204 3205 if (rv != 0) { 3206 printf(", MAC ADDRESS READ timeout\n"); 3207 rv = -1; 3208 goto done; 3209 } 3210 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) { 3211 printf(", MAC ADDRESS READ error\n"); 3212 rv = -1; 3213 goto done; 3214 } 3215 3216 addrs = IXL_DMA_KVA(&idm); 3217 if (!ISSET(iaq.iaq_param[0], htole32(IXL_AQ_MAC_PORT_VALID))) { 3218 printf(", port address is not valid\n"); 3219 goto done; 3220 } 3221 3222 memcpy(sc->sc_ac.ac_enaddr, addrs->port, ETHER_ADDR_LEN); 3223 rv = 0; 3224 3225 done: 3226 ixl_dmamem_free(sc, &idm); 3227 return (rv); 3228 } 3229 3230 static int 3231 ixl_get_switch_config(struct ixl_softc *sc) 3232 { 3233 struct ixl_dmamem idm; 3234 struct ixl_aq_desc iaq; 3235 struct ixl_aq_switch_config *hdr; 3236 struct ixl_aq_switch_config_element *elms, *elm; 3237 unsigned int nelm; 3238 int rv; 3239 3240 if (ixl_dmamem_alloc(sc, &idm, IXL_AQ_BUFLEN, 0) != 0) { 3241 printf("%s: unable to allocate switch config buffer\n", 3242 DEVNAME(sc)); 3243 return (-1); 3244 } 3245 3246 memset(&iaq, 0, sizeof(iaq)); 3247 iaq.iaq_flags = htole16(IXL_AQ_BUF | 3248 (IXL_AQ_BUFLEN > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0)); 3249 iaq.iaq_opcode = htole16(IXL_AQ_OP_SWITCH_GET_CONFIG); 3250 iaq.iaq_datalen = htole16(IXL_AQ_BUFLEN); 3251 ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm)); 3252 3253 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm), 3254 BUS_DMASYNC_PREREAD); 3255 3256 rv = ixl_atq_poll(sc, &iaq, 250); 3257 3258 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm), 3259 BUS_DMASYNC_POSTREAD); 3260 3261 if (rv != 0) { 3262 printf("%s: GET SWITCH CONFIG timeout\n", DEVNAME(sc)); 3263 rv = -1; 3264 goto done; 3265 } 3266 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) { 3267 printf("%s: GET SWITCH CONFIG error\n", DEVNAME(sc)); 3268 rv = -1; 3269 goto done; 3270 } 3271 3272 hdr = IXL_DMA_KVA(&idm); 3273 elms = (struct ixl_aq_switch_config_element *)(hdr + 1); 3274 3275 nelm = lemtoh16(&hdr->num_reported); 3276 if (nelm < 1) { 3277 printf("%s: no switch config available\n", DEVNAME(sc)); 3278 rv = -1; 3279 goto done; 3280 } 3281 3282 #if 0 3283 for (i = 0; i < nelm; i++) { 3284 elm = &elms[i]; 3285 3286 printf("%s: type %x revision %u seid %04x\n", DEVNAME(sc), 3287 elm->type, elm->revision, lemtoh16(&elm->seid)); 3288 printf("%s: uplink %04x downlink %04x\n", DEVNAME(sc), 3289 lemtoh16(&elm->uplink_seid), 3290 lemtoh16(&elm->downlink_seid)); 3291 printf("%s: conntype %x scheduler %04x extra %04x\n", 3292 DEVNAME(sc), elm->connection_type, 3293 lemtoh16(&elm->scheduler_id), 3294 lemtoh16(&elm->element_info)); 3295 } 3296 #endif 3297 3298 elm = &elms[0]; 3299 3300 sc->sc_uplink_seid = elm->uplink_seid; 3301 sc->sc_downlink_seid = elm->downlink_seid; 3302 sc->sc_seid = elm->seid; 3303 3304 if ((sc->sc_uplink_seid == htole16(0)) != 3305 (sc->sc_downlink_seid == htole16(0))) { 3306 printf("%s: SEIDs are misconfigured\n", DEVNAME(sc)); 3307 rv = -1; 3308 goto done; 3309 } 3310 3311 done: 3312 ixl_dmamem_free(sc, &idm); 3313 return (rv); 3314 } 3315 3316 static int 3317 ixl_phy_mask_ints(struct ixl_softc *sc) 3318 { 3319 struct ixl_aq_desc iaq; 3320 3321 memset(&iaq, 0, sizeof(iaq)); 3322 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_SET_EVENT_MASK); 3323 iaq.iaq_param[2] = htole32(IXL_AQ_PHY_EV_MASK & 3324 ~(IXL_AQ_PHY_EV_LINK_UPDOWN | IXL_AQ_PHY_EV_MODULE_QUAL_FAIL | 3325 IXL_AQ_PHY_EV_MEDIA_NA)); 3326 3327 if (ixl_atq_poll(sc, &iaq, 250) != 0) { 3328 printf("%s: SET PHY EVENT MASK timeout\n", DEVNAME(sc)); 3329 return (-1); 3330 } 3331 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) { 3332 printf("%s: SET PHY EVENT MASK error\n", DEVNAME(sc)); 3333 return (-1); 3334 } 3335 3336 return (0); 3337 } 3338 3339 static int 3340 ixl_get_phy_abilities(struct ixl_softc *sc, uint64_t *phy_types_ptr) 3341 { 3342 struct ixl_dmamem idm; 3343 struct ixl_aq_desc iaq; 3344 struct ixl_aq_phy_abilities *phy; 3345 uint64_t phy_types; 3346 int rv; 3347 3348 if (ixl_dmamem_alloc(sc, &idm, IXL_AQ_BUFLEN, 0) != 0) { 3349 printf("%s: unable to allocate switch config buffer\n", 3350 DEVNAME(sc)); 3351 return (-1); 3352 } 3353 3354 memset(&iaq, 0, sizeof(iaq)); 3355 iaq.iaq_flags = htole16(IXL_AQ_BUF | 3356 (IXL_AQ_BUFLEN > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0)); 3357 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_GET_ABILITIES); 3358 iaq.iaq_datalen = htole16(IXL_AQ_BUFLEN); 3359 iaq.iaq_param[0] = htole32(IXL_AQ_PHY_REPORT_INIT); 3360 ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm)); 3361 3362 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm), 3363 BUS_DMASYNC_PREREAD); 3364 3365 rv = ixl_atq_poll(sc, &iaq, 250); 3366 3367 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm), 3368 BUS_DMASYNC_POSTREAD); 3369 3370 if (rv != 0) { 3371 printf("%s: GET PHY ABILITIES timeout\n", DEVNAME(sc)); 3372 rv = -1; 3373 goto done; 3374 } 3375 switch (iaq.iaq_retval) { 3376 case HTOLE16(IXL_AQ_RC_OK): 3377 break; 3378 case HTOLE16(IXL_AQ_RC_EIO): 3379 printf("%s: unable to query phy types\n", DEVNAME(sc)); 3380 rv = 0; 3381 goto done; 3382 default: 3383 printf("%s: GET PHY ABILITIIES error\n", DEVNAME(sc)); 3384 rv = -1; 3385 goto done; 3386 } 3387 3388 phy = IXL_DMA_KVA(&idm); 3389 3390 phy_types = lemtoh32(&phy->phy_type); 3391 phy_types |= (uint64_t)phy->phy_type_ext << 32; 3392 3393 *phy_types_ptr = phy_types; 3394 3395 rv = 0; 3396 3397 done: 3398 ixl_dmamem_free(sc, &idm); 3399 return (rv); 3400 } 3401 3402 static int 3403 ixl_get_link_status(struct ixl_softc *sc) 3404 { 3405 struct ixl_aq_desc iaq; 3406 struct ixl_aq_link_param *param; 3407 3408 memset(&iaq, 0, sizeof(iaq)); 3409 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_LINK_STATUS); 3410 param = (struct ixl_aq_link_param *)iaq.iaq_param; 3411 param->notify = IXL_AQ_LINK_NOTIFY; 3412 3413 if (ixl_atq_poll(sc, &iaq, 250) != 0) { 3414 printf("%s: GET LINK STATUS timeout\n", DEVNAME(sc)); 3415 return (-1); 3416 } 3417 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) { 3418 printf("%s: GET LINK STATUS error\n", DEVNAME(sc)); 3419 return (0); 3420 } 3421 3422 sc->sc_ac.ac_if.if_link_state = ixl_set_link_status(sc, &iaq); 3423 3424 return (0); 3425 } 3426 3427 static int 3428 ixl_get_vsi(struct ixl_softc *sc) 3429 { 3430 struct ixl_dmamem *vsi = &sc->sc_scratch; 3431 struct ixl_aq_desc iaq; 3432 struct ixl_aq_vsi_param *param; 3433 struct ixl_aq_vsi_reply *reply; 3434 int rv; 3435 3436 /* grumble, vsi info isn't "known" at compile time */ 3437 3438 memset(&iaq, 0, sizeof(iaq)); 3439 htolem16(&iaq.iaq_flags, IXL_AQ_BUF | 3440 (IXL_DMA_LEN(vsi) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0)); 3441 iaq.iaq_opcode = htole16(IXL_AQ_OP_GET_VSI_PARAMS); 3442 htolem16(&iaq.iaq_datalen, IXL_DMA_LEN(vsi)); 3443 ixl_aq_dva(&iaq, IXL_DMA_DVA(vsi)); 3444 3445 param = (struct ixl_aq_vsi_param *)iaq.iaq_param; 3446 param->uplink_seid = sc->sc_seid; 3447 3448 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi), 3449 BUS_DMASYNC_PREREAD); 3450 3451 rv = ixl_atq_poll(sc, &iaq, 250); 3452 3453 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi), 3454 BUS_DMASYNC_POSTREAD); 3455 3456 if (rv != 0) { 3457 printf("%s: GET VSI timeout\n", DEVNAME(sc)); 3458 return (-1); 3459 } 3460 3461 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) { 3462 printf("%s: GET VSI error %u\n", DEVNAME(sc), 3463 lemtoh16(&iaq.iaq_retval)); 3464 return (-1); 3465 } 3466 3467 reply = (struct ixl_aq_vsi_reply *)iaq.iaq_param; 3468 sc->sc_vsi_number = reply->vsi_number; 3469 3470 return (0); 3471 } 3472 3473 static int 3474 ixl_set_vsi(struct ixl_softc *sc) 3475 { 3476 struct ixl_dmamem *vsi = &sc->sc_scratch; 3477 struct ixl_aq_desc iaq; 3478 struct ixl_aq_vsi_param *param; 3479 struct ixl_aq_vsi_data *data = IXL_DMA_KVA(vsi); 3480 int rv; 3481 3482 data->valid_sections = htole16(IXL_AQ_VSI_VALID_QUEUE_MAP | 3483 IXL_AQ_VSI_VALID_VLAN); 3484 3485 CLR(data->mapping_flags, htole16(IXL_AQ_VSI_QUE_MAP_MASK)); 3486 SET(data->mapping_flags, htole16(IXL_AQ_VSI_QUE_MAP_CONTIG)); 3487 data->queue_mapping[0] = htole16(0); 3488 data->tc_mapping[0] = htole16((0 << IXL_AQ_VSI_TC_Q_OFFSET_SHIFT) | 3489 (sc->sc_nqueues << IXL_AQ_VSI_TC_Q_NUMBER_SHIFT)); 3490 3491 CLR(data->port_vlan_flags, 3492 htole16(IXL_AQ_VSI_PVLAN_MODE_MASK | IXL_AQ_VSI_PVLAN_EMOD_MASK)); 3493 SET(data->port_vlan_flags, 3494 htole16(IXL_AQ_VSI_PVLAN_MODE_ALL | IXL_AQ_VSI_PVLAN_EMOD_NOTHING)); 3495 3496 /* grumble, vsi info isn't "known" at compile time */ 3497 3498 memset(&iaq, 0, sizeof(iaq)); 3499 htolem16(&iaq.iaq_flags, IXL_AQ_BUF | IXL_AQ_RD | 3500 (IXL_DMA_LEN(vsi) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0)); 3501 iaq.iaq_opcode = htole16(IXL_AQ_OP_UPD_VSI_PARAMS); 3502 htolem16(&iaq.iaq_datalen, IXL_DMA_LEN(vsi)); 3503 ixl_aq_dva(&iaq, IXL_DMA_DVA(vsi)); 3504 3505 param = (struct ixl_aq_vsi_param *)iaq.iaq_param; 3506 param->uplink_seid = sc->sc_seid; 3507 3508 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi), 3509 BUS_DMASYNC_PREWRITE); 3510 3511 rv = ixl_atq_poll(sc, &iaq, 250); 3512 3513 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi), 3514 BUS_DMASYNC_POSTWRITE); 3515 3516 if (rv != 0) { 3517 printf("%s: UPDATE VSI timeout\n", DEVNAME(sc)); 3518 return (-1); 3519 } 3520 3521 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) { 3522 printf("%s: UPDATE VSI error %u\n", DEVNAME(sc), 3523 lemtoh16(&iaq.iaq_retval)); 3524 return (-1); 3525 } 3526 3527 return (0); 3528 } 3529 3530 static const struct ixl_phy_type * 3531 ixl_search_phy_type(uint8_t phy_type) 3532 { 3533 const struct ixl_phy_type *itype; 3534 uint64_t mask; 3535 unsigned int i; 3536 3537 if (phy_type >= 64) 3538 return (NULL); 3539 3540 mask = 1ULL << phy_type; 3541 3542 for (i = 0; i < nitems(ixl_phy_type_map); i++) { 3543 itype = &ixl_phy_type_map[i]; 3544 3545 if (ISSET(itype->phy_type, mask)) 3546 return (itype); 3547 } 3548 3549 return (NULL); 3550 } 3551 3552 static uint64_t 3553 ixl_search_link_speed(uint8_t link_speed) 3554 { 3555 const struct ixl_speed_type *type; 3556 unsigned int i; 3557 3558 for (i = 0; i < nitems(ixl_phy_type_map); i++) { 3559 type = &ixl_speed_type_map[i]; 3560 3561 if (ISSET(type->dev_speed, link_speed)) 3562 return (type->net_speed); 3563 } 3564 3565 return (0); 3566 } 3567 3568 static int 3569 ixl_set_link_status(struct ixl_softc *sc, const struct ixl_aq_desc *iaq) 3570 { 3571 const struct ixl_aq_link_status *status; 3572 const struct ixl_phy_type *itype; 3573 3574 uint64_t ifm_active = IFM_ETHER; 3575 uint64_t ifm_status = IFM_AVALID; 3576 int link_state = LINK_STATE_DOWN; 3577 uint64_t baudrate = 0; 3578 3579 status = (const struct ixl_aq_link_status *)iaq->iaq_param; 3580 if (!ISSET(status->link_info, IXL_AQ_LINK_UP_FUNCTION)) 3581 goto done; 3582 3583 ifm_active |= IFM_FDX; 3584 ifm_status |= IFM_ACTIVE; 3585 link_state = LINK_STATE_FULL_DUPLEX; 3586 3587 itype = ixl_search_phy_type(status->phy_type); 3588 if (itype != NULL) 3589 ifm_active |= itype->ifm_type; 3590 3591 if (ISSET(status->an_info, IXL_AQ_LINK_PAUSE_TX)) 3592 ifm_active |= IFM_ETH_TXPAUSE; 3593 if (ISSET(status->an_info, IXL_AQ_LINK_PAUSE_RX)) 3594 ifm_active |= IFM_ETH_RXPAUSE; 3595 3596 baudrate = ixl_search_link_speed(status->link_speed); 3597 3598 done: 3599 /* NET_ASSERT_LOCKED() except during attach */ 3600 sc->sc_media_active = ifm_active; 3601 sc->sc_media_status = ifm_status; 3602 sc->sc_ac.ac_if.if_baudrate = baudrate; 3603 3604 return (link_state); 3605 } 3606 3607 static int 3608 ixl_restart_an(struct ixl_softc *sc) 3609 { 3610 struct ixl_aq_desc iaq; 3611 3612 memset(&iaq, 0, sizeof(iaq)); 3613 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_RESTART_AN); 3614 iaq.iaq_param[0] = 3615 htole32(IXL_AQ_PHY_RESTART_AN | IXL_AQ_PHY_LINK_ENABLE); 3616 3617 if (ixl_atq_poll(sc, &iaq, 250) != 0) { 3618 printf("%s: RESTART AN timeout\n", DEVNAME(sc)); 3619 return (-1); 3620 } 3621 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) { 3622 printf("%s: RESTART AN error\n", DEVNAME(sc)); 3623 return (-1); 3624 } 3625 3626 return (0); 3627 } 3628 3629 static int 3630 ixl_add_macvlan(struct ixl_softc *sc, uint8_t *macaddr, uint16_t vlan, uint16_t flags) 3631 { 3632 struct ixl_aq_desc iaq; 3633 struct ixl_aq_add_macvlan *param; 3634 struct ixl_aq_add_macvlan_elem *elem; 3635 3636 memset(&iaq, 0, sizeof(iaq)); 3637 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD); 3638 iaq.iaq_opcode = htole16(IXL_AQ_OP_ADD_MACVLAN); 3639 iaq.iaq_datalen = htole16(sizeof(*elem)); 3640 ixl_aq_dva(&iaq, IXL_DMA_DVA(&sc->sc_scratch)); 3641 3642 param = (struct ixl_aq_add_macvlan *)&iaq.iaq_param; 3643 param->num_addrs = htole16(1); 3644 param->seid0 = htole16(0x8000) | sc->sc_seid; 3645 param->seid1 = 0; 3646 param->seid2 = 0; 3647 3648 elem = IXL_DMA_KVA(&sc->sc_scratch); 3649 memset(elem, 0, sizeof(*elem)); 3650 memcpy(elem->macaddr, macaddr, ETHER_ADDR_LEN); 3651 elem->flags = htole16(IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH | flags); 3652 elem->vlan = htole16(vlan); 3653 3654 if (ixl_atq_poll(sc, &iaq, 250) != 0) { 3655 printf("%s: ADD_MACVLAN timeout\n", DEVNAME(sc)); 3656 return (IXL_AQ_RC_EINVAL); 3657 } 3658 3659 return letoh16(iaq.iaq_retval); 3660 } 3661 3662 static int 3663 ixl_remove_macvlan(struct ixl_softc *sc, uint8_t *macaddr, uint16_t vlan, uint16_t flags) 3664 { 3665 struct ixl_aq_desc iaq; 3666 struct ixl_aq_remove_macvlan *param; 3667 struct ixl_aq_remove_macvlan_elem *elem; 3668 3669 memset(&iaq, 0, sizeof(iaq)); 3670 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD); 3671 iaq.iaq_opcode = htole16(IXL_AQ_OP_REMOVE_MACVLAN); 3672 iaq.iaq_datalen = htole16(sizeof(*elem)); 3673 ixl_aq_dva(&iaq, IXL_DMA_DVA(&sc->sc_scratch)); 3674 3675 param = (struct ixl_aq_remove_macvlan *)&iaq.iaq_param; 3676 param->num_addrs = htole16(1); 3677 param->seid0 = htole16(0x8000) | sc->sc_seid; 3678 param->seid1 = 0; 3679 param->seid2 = 0; 3680 3681 elem = IXL_DMA_KVA(&sc->sc_scratch); 3682 memset(elem, 0, sizeof(*elem)); 3683 memcpy(elem->macaddr, macaddr, ETHER_ADDR_LEN); 3684 elem->flags = htole16(IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH | flags); 3685 elem->vlan = htole16(vlan); 3686 3687 if (ixl_atq_poll(sc, &iaq, 250) != 0) { 3688 printf("%s: REMOVE_MACVLAN timeout\n", DEVNAME(sc)); 3689 return (IXL_AQ_RC_EINVAL); 3690 } 3691 3692 return letoh16(iaq.iaq_retval); 3693 } 3694 3695 static int 3696 ixl_hmc(struct ixl_softc *sc) 3697 { 3698 struct { 3699 uint32_t count; 3700 uint32_t minsize; 3701 bus_size_t maxcnt; 3702 bus_size_t setoff; 3703 bus_size_t setcnt; 3704 } regs[] = { 3705 { 3706 0, 3707 IXL_HMC_TXQ_MINSIZE, 3708 I40E_GLHMC_LANTXOBJSZ, 3709 I40E_GLHMC_LANTXBASE(sc->sc_pf_id), 3710 I40E_GLHMC_LANTXCNT(sc->sc_pf_id), 3711 }, 3712 { 3713 0, 3714 IXL_HMC_RXQ_MINSIZE, 3715 I40E_GLHMC_LANRXOBJSZ, 3716 I40E_GLHMC_LANRXBASE(sc->sc_pf_id), 3717 I40E_GLHMC_LANRXCNT(sc->sc_pf_id), 3718 }, 3719 { 3720 0, 3721 0, 3722 I40E_GLHMC_FCOEMAX, 3723 I40E_GLHMC_FCOEDDPBASE(sc->sc_pf_id), 3724 I40E_GLHMC_FCOEDDPCNT(sc->sc_pf_id), 3725 }, 3726 { 3727 0, 3728 0, 3729 I40E_GLHMC_FCOEFMAX, 3730 I40E_GLHMC_FCOEFBASE(sc->sc_pf_id), 3731 I40E_GLHMC_FCOEFCNT(sc->sc_pf_id), 3732 }, 3733 }; 3734 struct ixl_hmc_entry *e; 3735 uint64_t size, dva; 3736 uint8_t *kva; 3737 uint64_t *sdpage; 3738 unsigned int i; 3739 int npages, tables; 3740 3741 CTASSERT(nitems(regs) <= nitems(sc->sc_hmc_entries)); 3742 3743 regs[IXL_HMC_LAN_TX].count = regs[IXL_HMC_LAN_RX].count = 3744 ixl_rd(sc, I40E_GLHMC_LANQMAX); 3745 3746 size = 0; 3747 for (i = 0; i < nitems(regs); i++) { 3748 e = &sc->sc_hmc_entries[i]; 3749 3750 e->hmc_count = regs[i].count; 3751 e->hmc_size = 1U << ixl_rd(sc, regs[i].maxcnt); 3752 e->hmc_base = size; 3753 3754 if ((e->hmc_size * 8) < regs[i].minsize) { 3755 printf("%s: kernel hmc entry is too big\n", 3756 DEVNAME(sc)); 3757 return (-1); 3758 } 3759 3760 size += roundup(e->hmc_size * e->hmc_count, IXL_HMC_ROUNDUP); 3761 } 3762 size = roundup(size, IXL_HMC_PGSIZE); 3763 npages = size / IXL_HMC_PGSIZE; 3764 3765 tables = roundup(size, IXL_HMC_L2SZ) / IXL_HMC_L2SZ; 3766 3767 if (ixl_dmamem_alloc(sc, &sc->sc_hmc_pd, size, IXL_HMC_PGSIZE) != 0) { 3768 printf("%s: unable to allocate hmc pd memory\n", DEVNAME(sc)); 3769 return (-1); 3770 } 3771 3772 if (ixl_dmamem_alloc(sc, &sc->sc_hmc_sd, tables * IXL_HMC_PGSIZE, 3773 IXL_HMC_PGSIZE) != 0) { 3774 printf("%s: unable to allocate hmc sd memory\n", DEVNAME(sc)); 3775 ixl_dmamem_free(sc, &sc->sc_hmc_pd); 3776 return (-1); 3777 } 3778 3779 kva = IXL_DMA_KVA(&sc->sc_hmc_pd); 3780 memset(kva, 0, IXL_DMA_LEN(&sc->sc_hmc_pd)); 3781 3782 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd), 3783 0, IXL_DMA_LEN(&sc->sc_hmc_pd), 3784 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 3785 3786 dva = IXL_DMA_DVA(&sc->sc_hmc_pd); 3787 sdpage = IXL_DMA_KVA(&sc->sc_hmc_sd); 3788 for (i = 0; i < npages; i++) { 3789 htolem64(sdpage++, dva | IXL_HMC_PDVALID); 3790 3791 dva += IXL_HMC_PGSIZE; 3792 } 3793 3794 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_sd), 3795 0, IXL_DMA_LEN(&sc->sc_hmc_sd), 3796 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 3797 3798 dva = IXL_DMA_DVA(&sc->sc_hmc_sd); 3799 for (i = 0; i < tables; i++) { 3800 uint32_t count; 3801 3802 KASSERT(npages >= 0); 3803 3804 count = (npages > IXL_HMC_PGS) ? IXL_HMC_PGS : npages; 3805 3806 ixl_wr(sc, I40E_PFHMC_SDDATAHIGH, dva >> 32); 3807 ixl_wr(sc, I40E_PFHMC_SDDATALOW, dva | 3808 (count << I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | 3809 (1U << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT)); 3810 ixl_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE); 3811 ixl_wr(sc, I40E_PFHMC_SDCMD, 3812 (1U << I40E_PFHMC_SDCMD_PMSDWR_SHIFT) | i); 3813 3814 npages -= IXL_HMC_PGS; 3815 dva += IXL_HMC_PGSIZE; 3816 } 3817 3818 for (i = 0; i < nitems(regs); i++) { 3819 e = &sc->sc_hmc_entries[i]; 3820 3821 ixl_wr(sc, regs[i].setoff, e->hmc_base / IXL_HMC_ROUNDUP); 3822 ixl_wr(sc, regs[i].setcnt, e->hmc_count); 3823 } 3824 3825 return (0); 3826 } 3827 3828 static void 3829 ixl_hmc_free(struct ixl_softc *sc) 3830 { 3831 ixl_dmamem_free(sc, &sc->sc_hmc_sd); 3832 ixl_dmamem_free(sc, &sc->sc_hmc_pd); 3833 } 3834 3835 static void 3836 ixl_hmc_pack(void *d, const void *s, const struct ixl_hmc_pack *packing, 3837 unsigned int npacking) 3838 { 3839 uint8_t *dst = d; 3840 const uint8_t *src = s; 3841 unsigned int i; 3842 3843 for (i = 0; i < npacking; i++) { 3844 const struct ixl_hmc_pack *pack = &packing[i]; 3845 unsigned int offset = pack->lsb / 8; 3846 unsigned int align = pack->lsb % 8; 3847 const uint8_t *in = src + pack->offset; 3848 uint8_t *out = dst + offset; 3849 int width = pack->width; 3850 unsigned int inbits = 0; 3851 3852 if (align) { 3853 inbits = (*in++) << align; 3854 *out++ |= (inbits & 0xff); 3855 inbits >>= 8; 3856 3857 width -= 8 - align; 3858 } 3859 3860 while (width >= 8) { 3861 inbits |= (*in++) << align; 3862 *out++ = (inbits & 0xff); 3863 inbits >>= 8; 3864 3865 width -= 8; 3866 } 3867 3868 if (width > 0) { 3869 inbits |= (*in) << align; 3870 *out |= (inbits & ((1 << width) - 1)); 3871 } 3872 } 3873 } 3874 3875 static struct ixl_aq_buf * 3876 ixl_aqb_alloc(struct ixl_softc *sc) 3877 { 3878 struct ixl_aq_buf *aqb; 3879 3880 aqb = malloc(sizeof(*aqb), M_DEVBUF, M_WAITOK); 3881 if (aqb == NULL) 3882 return (NULL); 3883 3884 aqb->aqb_data = dma_alloc(IXL_AQ_BUFLEN, PR_WAITOK); 3885 if (aqb->aqb_data == NULL) 3886 goto free; 3887 3888 if (bus_dmamap_create(sc->sc_dmat, IXL_AQ_BUFLEN, 1, 3889 IXL_AQ_BUFLEN, 0, 3890 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT, 3891 &aqb->aqb_map) != 0) 3892 goto dma_free; 3893 3894 if (bus_dmamap_load(sc->sc_dmat, aqb->aqb_map, aqb->aqb_data, 3895 IXL_AQ_BUFLEN, NULL, BUS_DMA_WAITOK) != 0) 3896 goto destroy; 3897 3898 return (aqb); 3899 3900 destroy: 3901 bus_dmamap_destroy(sc->sc_dmat, aqb->aqb_map); 3902 dma_free: 3903 dma_free(aqb->aqb_data, IXL_AQ_BUFLEN); 3904 free: 3905 free(aqb, M_DEVBUF, sizeof(*aqb)); 3906 3907 return (NULL); 3908 } 3909 3910 static void 3911 ixl_aqb_free(struct ixl_softc *sc, struct ixl_aq_buf *aqb) 3912 { 3913 bus_dmamap_unload(sc->sc_dmat, aqb->aqb_map); 3914 bus_dmamap_destroy(sc->sc_dmat, aqb->aqb_map); 3915 dma_free(aqb->aqb_data, IXL_AQ_BUFLEN); 3916 free(aqb, M_DEVBUF, sizeof(*aqb)); 3917 } 3918 3919 static int 3920 ixl_arq_fill(struct ixl_softc *sc) 3921 { 3922 struct ixl_aq_buf *aqb; 3923 struct ixl_aq_desc *arq, *iaq; 3924 unsigned int prod = sc->sc_arq_prod; 3925 unsigned int n; 3926 int post = 0; 3927 3928 n = if_rxr_get(&sc->sc_arq_ring, IXL_AQ_NUM); 3929 arq = IXL_DMA_KVA(&sc->sc_arq); 3930 3931 while (n > 0) { 3932 aqb = SIMPLEQ_FIRST(&sc->sc_arq_idle); 3933 if (aqb != NULL) 3934 SIMPLEQ_REMOVE_HEAD(&sc->sc_arq_idle, aqb_entry); 3935 else if ((aqb = ixl_aqb_alloc(sc)) == NULL) 3936 break; 3937 3938 memset(aqb->aqb_data, 0, IXL_AQ_BUFLEN); 3939 3940 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, IXL_AQ_BUFLEN, 3941 BUS_DMASYNC_PREREAD); 3942 3943 iaq = &arq[prod]; 3944 iaq->iaq_flags = htole16(IXL_AQ_BUF | 3945 (IXL_AQ_BUFLEN > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0)); 3946 iaq->iaq_opcode = 0; 3947 iaq->iaq_datalen = htole16(IXL_AQ_BUFLEN); 3948 iaq->iaq_retval = 0; 3949 iaq->iaq_cookie = 0; 3950 iaq->iaq_param[0] = 0; 3951 iaq->iaq_param[1] = 0; 3952 ixl_aq_dva(iaq, aqb->aqb_map->dm_segs[0].ds_addr); 3953 3954 SIMPLEQ_INSERT_TAIL(&sc->sc_arq_live, aqb, aqb_entry); 3955 3956 prod++; 3957 prod &= IXL_AQ_MASK; 3958 3959 post = 1; 3960 3961 n--; 3962 } 3963 3964 if_rxr_put(&sc->sc_arq_ring, n); 3965 sc->sc_arq_prod = prod; 3966 3967 return (post); 3968 } 3969 3970 static void 3971 ixl_arq_unfill(struct ixl_softc *sc) 3972 { 3973 struct ixl_aq_buf *aqb; 3974 3975 while ((aqb = SIMPLEQ_FIRST(&sc->sc_arq_live)) != NULL) { 3976 SIMPLEQ_REMOVE_HEAD(&sc->sc_arq_live, aqb_entry); 3977 3978 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, IXL_AQ_BUFLEN, 3979 BUS_DMASYNC_POSTREAD); 3980 ixl_aqb_free(sc, aqb); 3981 } 3982 } 3983 3984 static void 3985 ixl_clear_hw(struct ixl_softc *sc) 3986 { 3987 uint32_t num_queues, base_queue; 3988 uint32_t num_pf_int; 3989 uint32_t num_vf_int; 3990 uint32_t num_vfs; 3991 uint32_t i, j; 3992 uint32_t val; 3993 uint32_t eol = 0x7ff; 3994 3995 /* get number of interrupts, queues, and vfs */ 3996 val = ixl_rd(sc, I40E_GLPCI_CNF2); 3997 num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >> 3998 I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT; 3999 num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >> 4000 I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT; 4001 4002 val = ixl_rd(sc, I40E_PFLAN_QALLOC); 4003 base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >> 4004 I40E_PFLAN_QALLOC_FIRSTQ_SHIFT; 4005 j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >> 4006 I40E_PFLAN_QALLOC_LASTQ_SHIFT; 4007 if (val & I40E_PFLAN_QALLOC_VALID_MASK) 4008 num_queues = (j - base_queue) + 1; 4009 else 4010 num_queues = 0; 4011 4012 val = ixl_rd(sc, I40E_PF_VT_PFALLOC); 4013 i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >> 4014 I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT; 4015 j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >> 4016 I40E_PF_VT_PFALLOC_LASTVF_SHIFT; 4017 if (val & I40E_PF_VT_PFALLOC_VALID_MASK) 4018 num_vfs = (j - i) + 1; 4019 else 4020 num_vfs = 0; 4021 4022 /* stop all the interrupts */ 4023 ixl_wr(sc, I40E_PFINT_ICR0_ENA, 0); 4024 val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT; 4025 for (i = 0; i < num_pf_int - 2; i++) 4026 ixl_wr(sc, I40E_PFINT_DYN_CTLN(i), val); 4027 4028 /* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */ 4029 val = eol << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT; 4030 ixl_wr(sc, I40E_PFINT_LNKLST0, val); 4031 for (i = 0; i < num_pf_int - 2; i++) 4032 ixl_wr(sc, I40E_PFINT_LNKLSTN(i), val); 4033 val = eol << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT; 4034 for (i = 0; i < num_vfs; i++) 4035 ixl_wr(sc, I40E_VPINT_LNKLST0(i), val); 4036 for (i = 0; i < num_vf_int - 2; i++) 4037 ixl_wr(sc, I40E_VPINT_LNKLSTN(i), val); 4038 4039 /* warn the HW of the coming Tx disables */ 4040 for (i = 0; i < num_queues; i++) { 4041 uint32_t abs_queue_idx = base_queue + i; 4042 uint32_t reg_block = 0; 4043 4044 if (abs_queue_idx >= 128) { 4045 reg_block = abs_queue_idx / 128; 4046 abs_queue_idx %= 128; 4047 } 4048 4049 val = ixl_rd(sc, I40E_GLLAN_TXPRE_QDIS(reg_block)); 4050 val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK; 4051 val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT); 4052 val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK; 4053 4054 ixl_wr(sc, I40E_GLLAN_TXPRE_QDIS(reg_block), val); 4055 } 4056 delaymsec(400); 4057 4058 /* stop all the queues */ 4059 for (i = 0; i < num_queues; i++) { 4060 ixl_wr(sc, I40E_QINT_TQCTL(i), 0); 4061 ixl_wr(sc, I40E_QTX_ENA(i), 0); 4062 ixl_wr(sc, I40E_QINT_RQCTL(i), 0); 4063 ixl_wr(sc, I40E_QRX_ENA(i), 0); 4064 } 4065 4066 /* short wait for all queue disables to settle */ 4067 delaymsec(50); 4068 } 4069 4070 static int 4071 ixl_pf_reset(struct ixl_softc *sc) 4072 { 4073 uint32_t cnt = 0; 4074 uint32_t cnt1 = 0; 4075 uint32_t reg = 0; 4076 uint32_t grst_del; 4077 4078 /* 4079 * Poll for Global Reset steady state in case of recent GRST. 4080 * The grst delay value is in 100ms units, and we'll wait a 4081 * couple counts longer to be sure we don't just miss the end. 4082 */ 4083 grst_del = ixl_rd(sc, I40E_GLGEN_RSTCTL); 4084 grst_del &= I40E_GLGEN_RSTCTL_GRSTDEL_MASK; 4085 grst_del >>= I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT; 4086 grst_del += 10; 4087 4088 for (cnt = 0; cnt < grst_del; cnt++) { 4089 reg = ixl_rd(sc, I40E_GLGEN_RSTAT); 4090 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK)) 4091 break; 4092 delaymsec(100); 4093 } 4094 if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) { 4095 printf(", Global reset polling failed to complete\n"); 4096 return (-1); 4097 } 4098 4099 /* Now Wait for the FW to be ready */ 4100 for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) { 4101 reg = ixl_rd(sc, I40E_GLNVM_ULD); 4102 reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | 4103 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK); 4104 if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | 4105 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK)) 4106 break; 4107 4108 delaymsec(10); 4109 } 4110 if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | 4111 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) { 4112 printf(", wait for FW Reset complete timed out " 4113 "(I40E_GLNVM_ULD = 0x%x)\n", reg); 4114 return (-1); 4115 } 4116 4117 /* 4118 * If there was a Global Reset in progress when we got here, 4119 * we don't need to do the PF Reset 4120 */ 4121 if (cnt == 0) { 4122 reg = ixl_rd(sc, I40E_PFGEN_CTRL); 4123 ixl_wr(sc, I40E_PFGEN_CTRL, reg | I40E_PFGEN_CTRL_PFSWR_MASK); 4124 for (cnt = 0; cnt < I40E_PF_RESET_WAIT_COUNT; cnt++) { 4125 reg = ixl_rd(sc, I40E_PFGEN_CTRL); 4126 if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK)) 4127 break; 4128 delaymsec(1); 4129 } 4130 if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) { 4131 printf(", PF reset polling failed to complete" 4132 "(I40E_PFGEN_CTRL= 0x%x)\n", reg); 4133 return (-1); 4134 } 4135 } 4136 4137 return (0); 4138 } 4139 4140 static int 4141 ixl_dmamem_alloc(struct ixl_softc *sc, struct ixl_dmamem *ixm, 4142 bus_size_t size, u_int align) 4143 { 4144 ixm->ixm_size = size; 4145 4146 if (bus_dmamap_create(sc->sc_dmat, ixm->ixm_size, 1, 4147 ixm->ixm_size, 0, 4148 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT, 4149 &ixm->ixm_map) != 0) 4150 return (1); 4151 if (bus_dmamem_alloc(sc->sc_dmat, ixm->ixm_size, 4152 align, 0, &ixm->ixm_seg, 1, &ixm->ixm_nsegs, 4153 BUS_DMA_WAITOK | BUS_DMA_ZERO) != 0) 4154 goto destroy; 4155 if (bus_dmamem_map(sc->sc_dmat, &ixm->ixm_seg, ixm->ixm_nsegs, 4156 ixm->ixm_size, &ixm->ixm_kva, BUS_DMA_WAITOK) != 0) 4157 goto free; 4158 if (bus_dmamap_load(sc->sc_dmat, ixm->ixm_map, ixm->ixm_kva, 4159 ixm->ixm_size, NULL, BUS_DMA_WAITOK) != 0) 4160 goto unmap; 4161 4162 return (0); 4163 unmap: 4164 bus_dmamem_unmap(sc->sc_dmat, ixm->ixm_kva, ixm->ixm_size); 4165 free: 4166 bus_dmamem_free(sc->sc_dmat, &ixm->ixm_seg, 1); 4167 destroy: 4168 bus_dmamap_destroy(sc->sc_dmat, ixm->ixm_map); 4169 return (1); 4170 } 4171 4172 static void 4173 ixl_dmamem_free(struct ixl_softc *sc, struct ixl_dmamem *ixm) 4174 { 4175 bus_dmamap_unload(sc->sc_dmat, ixm->ixm_map); 4176 bus_dmamem_unmap(sc->sc_dmat, ixm->ixm_kva, ixm->ixm_size); 4177 bus_dmamem_free(sc->sc_dmat, &ixm->ixm_seg, 1); 4178 bus_dmamap_destroy(sc->sc_dmat, ixm->ixm_map); 4179 } 4180