1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2018 Aquantia Corporation 3 */ 4 5 #include <rte_string_fns.h> 6 #include <rte_ethdev_pci.h> 7 #include <rte_alarm.h> 8 9 #include "atl_ethdev.h" 10 #include "atl_common.h" 11 #include "atl_hw_regs.h" 12 #include "atl_logs.h" 13 #include "hw_atl/hw_atl_llh.h" 14 #include "hw_atl/hw_atl_b0.h" 15 #include "hw_atl/hw_atl_b0_internal.h" 16 17 static int eth_atl_dev_init(struct rte_eth_dev *eth_dev); 18 static int eth_atl_dev_uninit(struct rte_eth_dev *eth_dev); 19 20 static int atl_dev_configure(struct rte_eth_dev *dev); 21 static int atl_dev_start(struct rte_eth_dev *dev); 22 static void atl_dev_stop(struct rte_eth_dev *dev); 23 static int atl_dev_set_link_up(struct rte_eth_dev *dev); 24 static int atl_dev_set_link_down(struct rte_eth_dev *dev); 25 static void atl_dev_close(struct rte_eth_dev *dev); 26 static int atl_dev_reset(struct rte_eth_dev *dev); 27 static int atl_dev_promiscuous_enable(struct rte_eth_dev *dev); 28 static int atl_dev_promiscuous_disable(struct rte_eth_dev *dev); 29 static int atl_dev_allmulticast_enable(struct rte_eth_dev *dev); 30 static int atl_dev_allmulticast_disable(struct rte_eth_dev *dev); 31 static int atl_dev_link_update(struct rte_eth_dev *dev, int wait); 32 33 static int atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused, 34 struct rte_eth_xstat_name *xstats_names, 35 unsigned int size); 36 37 static int atl_dev_stats_get(struct rte_eth_dev *dev, 38 struct rte_eth_stats *stats); 39 40 static int atl_dev_xstats_get(struct rte_eth_dev *dev, 41 struct rte_eth_xstat *stats, unsigned int n); 42 43 static int atl_dev_stats_reset(struct rte_eth_dev *dev); 44 45 static int atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version, 46 size_t fw_size); 47 48 static const uint32_t *atl_dev_supported_ptypes_get(struct rte_eth_dev *dev); 49 50 static int atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 51 52 /* VLAN stuff */ 53 static int atl_vlan_filter_set(struct rte_eth_dev *dev, 54 uint16_t vlan_id, int on); 55 56 static int atl_vlan_offload_set(struct rte_eth_dev *dev, int mask); 57 58 static void atl_vlan_strip_queue_set(struct rte_eth_dev *dev, 59 uint16_t queue_id, int on); 60 61 static int atl_vlan_tpid_set(struct rte_eth_dev *dev, 62 enum rte_vlan_type vlan_type, uint16_t tpid); 63 64 /* EEPROM */ 65 static int atl_dev_get_eeprom_length(struct rte_eth_dev *dev); 66 static int atl_dev_get_eeprom(struct rte_eth_dev *dev, 67 struct rte_dev_eeprom_info *eeprom); 68 static int atl_dev_set_eeprom(struct rte_eth_dev *dev, 69 struct rte_dev_eeprom_info *eeprom); 70 71 /* Regs */ 72 static int atl_dev_get_regs(struct rte_eth_dev *dev, 73 struct rte_dev_reg_info *regs); 74 75 /* Flow control */ 76 static int atl_flow_ctrl_get(struct rte_eth_dev *dev, 77 struct rte_eth_fc_conf *fc_conf); 78 static int atl_flow_ctrl_set(struct rte_eth_dev *dev, 79 struct rte_eth_fc_conf *fc_conf); 80 81 static void atl_dev_link_status_print(struct rte_eth_dev *dev); 82 83 /* Interrupts */ 84 static int atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev); 85 static int atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on); 86 static int atl_dev_interrupt_get_status(struct rte_eth_dev *dev); 87 static int atl_dev_interrupt_action(struct rte_eth_dev *dev, 88 struct rte_intr_handle *handle); 89 static void atl_dev_interrupt_handler(void *param); 90 91 92 static int atl_add_mac_addr(struct rte_eth_dev *dev, 93 struct rte_ether_addr *mac_addr, 94 uint32_t index, uint32_t pool); 95 static void atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index); 96 static int atl_set_default_mac_addr(struct rte_eth_dev *dev, 97 struct rte_ether_addr *mac_addr); 98 99 static int atl_dev_set_mc_addr_list(struct rte_eth_dev *dev, 100 struct rte_ether_addr *mc_addr_set, 101 uint32_t nb_mc_addr); 102 103 /* RSS */ 104 static int atl_reta_update(struct rte_eth_dev *dev, 105 struct rte_eth_rss_reta_entry64 *reta_conf, 106 uint16_t reta_size); 107 static int atl_reta_query(struct rte_eth_dev *dev, 108 struct rte_eth_rss_reta_entry64 *reta_conf, 109 uint16_t reta_size); 110 static int atl_rss_hash_update(struct rte_eth_dev *dev, 111 struct rte_eth_rss_conf *rss_conf); 112 static int atl_rss_hash_conf_get(struct rte_eth_dev *dev, 113 struct rte_eth_rss_conf *rss_conf); 114 115 116 static int eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 117 struct rte_pci_device *pci_dev); 118 static int eth_atl_pci_remove(struct rte_pci_device *pci_dev); 119 120 static int atl_dev_info_get(struct rte_eth_dev *dev, 121 struct rte_eth_dev_info *dev_info); 122 123 /* 124 * The set of PCI devices this driver supports 125 */ 126 static const struct rte_pci_id pci_id_atl_map[] = { 127 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_0001) }, 128 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D100) }, 129 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D107) }, 130 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D108) }, 131 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D109) }, 132 133 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100) }, 134 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107) }, 135 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108) }, 136 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109) }, 137 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111) }, 138 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112) }, 139 140 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100S) }, 141 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107S) }, 142 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108S) }, 143 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109S) }, 144 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111S) }, 145 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112S) }, 146 147 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111E) }, 148 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112E) }, 149 { .vendor_id = 0, /* sentinel */ }, 150 }; 151 152 static struct rte_pci_driver rte_atl_pmd = { 153 .id_table = pci_id_atl_map, 154 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 155 .probe = eth_atl_pci_probe, 156 .remove = eth_atl_pci_remove, 157 }; 158 159 #define ATL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_STRIP \ 160 | DEV_RX_OFFLOAD_IPV4_CKSUM \ 161 | DEV_RX_OFFLOAD_UDP_CKSUM \ 162 | DEV_RX_OFFLOAD_TCP_CKSUM \ 163 | DEV_RX_OFFLOAD_JUMBO_FRAME \ 164 | DEV_RX_OFFLOAD_MACSEC_STRIP \ 165 | DEV_RX_OFFLOAD_VLAN_FILTER) 166 167 #define ATL_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT \ 168 | DEV_TX_OFFLOAD_IPV4_CKSUM \ 169 | DEV_TX_OFFLOAD_UDP_CKSUM \ 170 | DEV_TX_OFFLOAD_TCP_CKSUM \ 171 | DEV_TX_OFFLOAD_TCP_TSO \ 172 | DEV_TX_OFFLOAD_MACSEC_INSERT \ 173 | DEV_TX_OFFLOAD_MULTI_SEGS) 174 175 #define SFP_EEPROM_SIZE 0x100 176 177 static const struct rte_eth_desc_lim rx_desc_lim = { 178 .nb_max = ATL_MAX_RING_DESC, 179 .nb_min = ATL_MIN_RING_DESC, 180 .nb_align = ATL_RXD_ALIGN, 181 }; 182 183 static const struct rte_eth_desc_lim tx_desc_lim = { 184 .nb_max = ATL_MAX_RING_DESC, 185 .nb_min = ATL_MIN_RING_DESC, 186 .nb_align = ATL_TXD_ALIGN, 187 .nb_seg_max = ATL_TX_MAX_SEG, 188 .nb_mtu_seg_max = ATL_TX_MAX_SEG, 189 }; 190 191 enum atl_xstats_type { 192 XSTATS_TYPE_MSM = 0, 193 XSTATS_TYPE_MACSEC, 194 }; 195 196 #define ATL_XSTATS_FIELD(name) { \ 197 #name, \ 198 offsetof(struct aq_stats_s, name), \ 199 XSTATS_TYPE_MSM \ 200 } 201 202 #define ATL_MACSEC_XSTATS_FIELD(name) { \ 203 #name, \ 204 offsetof(struct macsec_stats, name), \ 205 XSTATS_TYPE_MACSEC \ 206 } 207 208 struct atl_xstats_tbl_s { 209 const char *name; 210 unsigned int offset; 211 enum atl_xstats_type type; 212 }; 213 214 static struct atl_xstats_tbl_s atl_xstats_tbl[] = { 215 ATL_XSTATS_FIELD(uprc), 216 ATL_XSTATS_FIELD(mprc), 217 ATL_XSTATS_FIELD(bprc), 218 ATL_XSTATS_FIELD(erpt), 219 ATL_XSTATS_FIELD(uptc), 220 ATL_XSTATS_FIELD(mptc), 221 ATL_XSTATS_FIELD(bptc), 222 ATL_XSTATS_FIELD(erpr), 223 ATL_XSTATS_FIELD(ubrc), 224 ATL_XSTATS_FIELD(ubtc), 225 ATL_XSTATS_FIELD(mbrc), 226 ATL_XSTATS_FIELD(mbtc), 227 ATL_XSTATS_FIELD(bbrc), 228 ATL_XSTATS_FIELD(bbtc), 229 /* Ingress Common Counters */ 230 ATL_MACSEC_XSTATS_FIELD(in_ctl_pkts), 231 ATL_MACSEC_XSTATS_FIELD(in_tagged_miss_pkts), 232 ATL_MACSEC_XSTATS_FIELD(in_untagged_miss_pkts), 233 ATL_MACSEC_XSTATS_FIELD(in_notag_pkts), 234 ATL_MACSEC_XSTATS_FIELD(in_untagged_pkts), 235 ATL_MACSEC_XSTATS_FIELD(in_bad_tag_pkts), 236 ATL_MACSEC_XSTATS_FIELD(in_no_sci_pkts), 237 ATL_MACSEC_XSTATS_FIELD(in_unknown_sci_pkts), 238 /* Ingress SA Counters */ 239 ATL_MACSEC_XSTATS_FIELD(in_untagged_hit_pkts), 240 ATL_MACSEC_XSTATS_FIELD(in_not_using_sa), 241 ATL_MACSEC_XSTATS_FIELD(in_unused_sa), 242 ATL_MACSEC_XSTATS_FIELD(in_not_valid_pkts), 243 ATL_MACSEC_XSTATS_FIELD(in_invalid_pkts), 244 ATL_MACSEC_XSTATS_FIELD(in_ok_pkts), 245 ATL_MACSEC_XSTATS_FIELD(in_unchecked_pkts), 246 ATL_MACSEC_XSTATS_FIELD(in_validated_octets), 247 ATL_MACSEC_XSTATS_FIELD(in_decrypted_octets), 248 /* Egress Common Counters */ 249 ATL_MACSEC_XSTATS_FIELD(out_ctl_pkts), 250 ATL_MACSEC_XSTATS_FIELD(out_unknown_sa_pkts), 251 ATL_MACSEC_XSTATS_FIELD(out_untagged_pkts), 252 ATL_MACSEC_XSTATS_FIELD(out_too_long), 253 /* Egress SC Counters */ 254 ATL_MACSEC_XSTATS_FIELD(out_sc_protected_pkts), 255 ATL_MACSEC_XSTATS_FIELD(out_sc_encrypted_pkts), 256 /* Egress SA Counters */ 257 ATL_MACSEC_XSTATS_FIELD(out_sa_hit_drop_redirect), 258 ATL_MACSEC_XSTATS_FIELD(out_sa_protected2_pkts), 259 ATL_MACSEC_XSTATS_FIELD(out_sa_protected_pkts), 260 ATL_MACSEC_XSTATS_FIELD(out_sa_encrypted_pkts), 261 }; 262 263 static const struct eth_dev_ops atl_eth_dev_ops = { 264 .dev_configure = atl_dev_configure, 265 .dev_start = atl_dev_start, 266 .dev_stop = atl_dev_stop, 267 .dev_set_link_up = atl_dev_set_link_up, 268 .dev_set_link_down = atl_dev_set_link_down, 269 .dev_close = atl_dev_close, 270 .dev_reset = atl_dev_reset, 271 272 /* PROMISC */ 273 .promiscuous_enable = atl_dev_promiscuous_enable, 274 .promiscuous_disable = atl_dev_promiscuous_disable, 275 .allmulticast_enable = atl_dev_allmulticast_enable, 276 .allmulticast_disable = atl_dev_allmulticast_disable, 277 278 /* Link */ 279 .link_update = atl_dev_link_update, 280 281 .get_reg = atl_dev_get_regs, 282 283 /* Stats */ 284 .stats_get = atl_dev_stats_get, 285 .xstats_get = atl_dev_xstats_get, 286 .xstats_get_names = atl_dev_xstats_get_names, 287 .stats_reset = atl_dev_stats_reset, 288 .xstats_reset = atl_dev_stats_reset, 289 290 .fw_version_get = atl_fw_version_get, 291 .dev_infos_get = atl_dev_info_get, 292 .dev_supported_ptypes_get = atl_dev_supported_ptypes_get, 293 294 .mtu_set = atl_dev_mtu_set, 295 296 /* VLAN */ 297 .vlan_filter_set = atl_vlan_filter_set, 298 .vlan_offload_set = atl_vlan_offload_set, 299 .vlan_tpid_set = atl_vlan_tpid_set, 300 .vlan_strip_queue_set = atl_vlan_strip_queue_set, 301 302 /* Queue Control */ 303 .rx_queue_start = atl_rx_queue_start, 304 .rx_queue_stop = atl_rx_queue_stop, 305 .rx_queue_setup = atl_rx_queue_setup, 306 .rx_queue_release = atl_rx_queue_release, 307 308 .tx_queue_start = atl_tx_queue_start, 309 .tx_queue_stop = atl_tx_queue_stop, 310 .tx_queue_setup = atl_tx_queue_setup, 311 .tx_queue_release = atl_tx_queue_release, 312 313 .rx_queue_intr_enable = atl_dev_rx_queue_intr_enable, 314 .rx_queue_intr_disable = atl_dev_rx_queue_intr_disable, 315 316 .rx_queue_count = atl_rx_queue_count, 317 .rx_descriptor_status = atl_dev_rx_descriptor_status, 318 .tx_descriptor_status = atl_dev_tx_descriptor_status, 319 320 /* EEPROM */ 321 .get_eeprom_length = atl_dev_get_eeprom_length, 322 .get_eeprom = atl_dev_get_eeprom, 323 .set_eeprom = atl_dev_set_eeprom, 324 325 /* Flow Control */ 326 .flow_ctrl_get = atl_flow_ctrl_get, 327 .flow_ctrl_set = atl_flow_ctrl_set, 328 329 /* MAC */ 330 .mac_addr_add = atl_add_mac_addr, 331 .mac_addr_remove = atl_remove_mac_addr, 332 .mac_addr_set = atl_set_default_mac_addr, 333 .set_mc_addr_list = atl_dev_set_mc_addr_list, 334 .rxq_info_get = atl_rxq_info_get, 335 .txq_info_get = atl_txq_info_get, 336 337 .reta_update = atl_reta_update, 338 .reta_query = atl_reta_query, 339 .rss_hash_update = atl_rss_hash_update, 340 .rss_hash_conf_get = atl_rss_hash_conf_get, 341 }; 342 343 static inline int32_t 344 atl_reset_hw(struct aq_hw_s *hw) 345 { 346 return hw_atl_b0_hw_reset(hw); 347 } 348 349 static inline void 350 atl_enable_intr(struct rte_eth_dev *dev) 351 { 352 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private); 353 354 hw_atl_itr_irq_msk_setlsw_set(hw, 0xffffffff); 355 } 356 357 static void 358 atl_disable_intr(struct aq_hw_s *hw) 359 { 360 PMD_INIT_FUNC_TRACE(); 361 hw_atl_itr_irq_msk_clearlsw_set(hw, 0xffffffff); 362 } 363 364 static int 365 eth_atl_dev_init(struct rte_eth_dev *eth_dev) 366 { 367 struct atl_adapter *adapter = eth_dev->data->dev_private; 368 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 369 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 370 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 371 int err = 0; 372 373 PMD_INIT_FUNC_TRACE(); 374 375 eth_dev->dev_ops = &atl_eth_dev_ops; 376 eth_dev->rx_pkt_burst = &atl_recv_pkts; 377 eth_dev->tx_pkt_burst = &atl_xmit_pkts; 378 eth_dev->tx_pkt_prepare = &atl_prep_pkts; 379 380 /* For secondary processes, the primary process has done all the work */ 381 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 382 return 0; 383 384 /* Vendor and Device ID need to be set before init of shared code */ 385 hw->device_id = pci_dev->id.device_id; 386 hw->vendor_id = pci_dev->id.vendor_id; 387 hw->mmio = (void *)pci_dev->mem_resource[0].addr; 388 389 /* Hardware configuration - hardcode */ 390 adapter->hw_cfg.is_lro = false; 391 adapter->hw_cfg.wol = false; 392 adapter->hw_cfg.is_rss = false; 393 adapter->hw_cfg.num_rss_queues = HW_ATL_B0_RSS_MAX; 394 395 adapter->hw_cfg.link_speed_msk = AQ_NIC_RATE_10G | 396 AQ_NIC_RATE_5G | 397 AQ_NIC_RATE_2G5 | 398 AQ_NIC_RATE_1G | 399 AQ_NIC_RATE_100M; 400 401 adapter->hw_cfg.flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX); 402 adapter->hw_cfg.aq_rss.indirection_table_size = 403 HW_ATL_B0_RSS_REDIRECTION_MAX; 404 405 hw->aq_nic_cfg = &adapter->hw_cfg; 406 407 pthread_mutex_init(&hw->mbox_mutex, NULL); 408 409 /* disable interrupt */ 410 atl_disable_intr(hw); 411 412 /* Allocate memory for storing MAC addresses */ 413 eth_dev->data->mac_addrs = rte_zmalloc("atlantic", 414 RTE_ETHER_ADDR_LEN, 0); 415 if (eth_dev->data->mac_addrs == NULL) { 416 PMD_INIT_LOG(ERR, "MAC Malloc failed"); 417 return -ENOMEM; 418 } 419 420 err = hw_atl_utils_initfw(hw, &hw->aq_fw_ops); 421 if (err) 422 return err; 423 424 /* Copy the permanent MAC address */ 425 if (hw->aq_fw_ops->get_mac_permanent(hw, 426 eth_dev->data->mac_addrs->addr_bytes) != 0) 427 return -EINVAL; 428 429 /* Reset the hw statistics */ 430 atl_dev_stats_reset(eth_dev); 431 432 rte_intr_callback_register(intr_handle, 433 atl_dev_interrupt_handler, eth_dev); 434 435 /* enable uio/vfio intr/eventfd mapping */ 436 rte_intr_enable(intr_handle); 437 438 /* enable support intr */ 439 atl_enable_intr(eth_dev); 440 441 return err; 442 } 443 444 static int 445 eth_atl_dev_uninit(struct rte_eth_dev *eth_dev) 446 { 447 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 448 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 449 struct aq_hw_s *hw; 450 451 PMD_INIT_FUNC_TRACE(); 452 453 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 454 return -EPERM; 455 456 hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 457 458 if (hw->adapter_stopped == 0) 459 atl_dev_close(eth_dev); 460 461 eth_dev->dev_ops = NULL; 462 eth_dev->rx_pkt_burst = NULL; 463 eth_dev->tx_pkt_burst = NULL; 464 465 /* disable uio intr before callback unregister */ 466 rte_intr_disable(intr_handle); 467 rte_intr_callback_unregister(intr_handle, 468 atl_dev_interrupt_handler, eth_dev); 469 470 rte_free(eth_dev->data->mac_addrs); 471 eth_dev->data->mac_addrs = NULL; 472 473 pthread_mutex_destroy(&hw->mbox_mutex); 474 475 return 0; 476 } 477 478 static int 479 eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 480 struct rte_pci_device *pci_dev) 481 { 482 return rte_eth_dev_pci_generic_probe(pci_dev, 483 sizeof(struct atl_adapter), eth_atl_dev_init); 484 } 485 486 static int 487 eth_atl_pci_remove(struct rte_pci_device *pci_dev) 488 { 489 return rte_eth_dev_pci_generic_remove(pci_dev, eth_atl_dev_uninit); 490 } 491 492 static int 493 atl_dev_configure(struct rte_eth_dev *dev) 494 { 495 struct atl_interrupt *intr = 496 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 497 498 PMD_INIT_FUNC_TRACE(); 499 500 /* set flag to update link status after init */ 501 intr->flags |= ATL_FLAG_NEED_LINK_UPDATE; 502 503 return 0; 504 } 505 506 /* 507 * Configure device link speed and setup link. 508 * It returns 0 on success. 509 */ 510 static int 511 atl_dev_start(struct rte_eth_dev *dev) 512 { 513 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private); 514 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 515 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 516 uint32_t intr_vector = 0; 517 int status; 518 int err; 519 520 PMD_INIT_FUNC_TRACE(); 521 522 /* set adapter started */ 523 hw->adapter_stopped = 0; 524 525 if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) { 526 PMD_INIT_LOG(ERR, 527 "Invalid link_speeds for port %u, fix speed not supported", 528 dev->data->port_id); 529 return -EINVAL; 530 } 531 532 /* disable uio/vfio intr/eventfd mapping */ 533 rte_intr_disable(intr_handle); 534 535 /* reinitialize adapter 536 * this calls reset and start 537 */ 538 status = atl_reset_hw(hw); 539 if (status != 0) 540 return -EIO; 541 542 err = hw_atl_b0_hw_init(hw, dev->data->mac_addrs->addr_bytes); 543 544 hw_atl_b0_hw_start(hw); 545 /* check and configure queue intr-vector mapping */ 546 if ((rte_intr_cap_multiple(intr_handle) || 547 !RTE_ETH_DEV_SRIOV(dev).active) && 548 dev->data->dev_conf.intr_conf.rxq != 0) { 549 intr_vector = dev->data->nb_rx_queues; 550 if (intr_vector > ATL_MAX_INTR_QUEUE_NUM) { 551 PMD_INIT_LOG(ERR, "At most %d intr queues supported", 552 ATL_MAX_INTR_QUEUE_NUM); 553 return -ENOTSUP; 554 } 555 if (rte_intr_efd_enable(intr_handle, intr_vector)) { 556 PMD_INIT_LOG(ERR, "rte_intr_efd_enable failed"); 557 return -1; 558 } 559 } 560 561 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { 562 intr_handle->intr_vec = rte_zmalloc("intr_vec", 563 dev->data->nb_rx_queues * sizeof(int), 0); 564 if (intr_handle->intr_vec == NULL) { 565 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" 566 " intr_vec", dev->data->nb_rx_queues); 567 return -ENOMEM; 568 } 569 } 570 571 /* initialize transmission unit */ 572 atl_tx_init(dev); 573 574 /* This can fail when allocating mbufs for descriptor rings */ 575 err = atl_rx_init(dev); 576 if (err) { 577 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware"); 578 goto error; 579 } 580 581 PMD_INIT_LOG(DEBUG, "FW version: %u.%u.%u", 582 hw->fw_ver_actual >> 24, 583 (hw->fw_ver_actual >> 16) & 0xFF, 584 hw->fw_ver_actual & 0xFFFF); 585 PMD_INIT_LOG(DEBUG, "Driver version: %s", ATL_PMD_DRIVER_VERSION); 586 587 err = atl_start_queues(dev); 588 if (err < 0) { 589 PMD_INIT_LOG(ERR, "Unable to start rxtx queues"); 590 goto error; 591 } 592 593 err = atl_dev_set_link_up(dev); 594 595 err = hw->aq_fw_ops->update_link_status(hw); 596 597 if (err) 598 goto error; 599 600 dev->data->dev_link.link_status = hw->aq_link_status.mbps != 0; 601 602 if (rte_intr_allow_others(intr_handle)) { 603 /* check if lsc interrupt is enabled */ 604 if (dev->data->dev_conf.intr_conf.lsc != 0) 605 atl_dev_lsc_interrupt_setup(dev, true); 606 else 607 atl_dev_lsc_interrupt_setup(dev, false); 608 } else { 609 rte_intr_callback_unregister(intr_handle, 610 atl_dev_interrupt_handler, dev); 611 if (dev->data->dev_conf.intr_conf.lsc != 0) 612 PMD_INIT_LOG(INFO, "lsc won't enable because of" 613 " no intr multiplex"); 614 } 615 616 /* check if rxq interrupt is enabled */ 617 if (dev->data->dev_conf.intr_conf.rxq != 0 && 618 rte_intr_dp_is_en(intr_handle)) 619 atl_dev_rxq_interrupt_setup(dev); 620 621 /* enable uio/vfio intr/eventfd mapping */ 622 rte_intr_enable(intr_handle); 623 624 /* resume enabled intr since hw reset */ 625 atl_enable_intr(dev); 626 627 return 0; 628 629 error: 630 atl_stop_queues(dev); 631 return -EIO; 632 } 633 634 /* 635 * Stop device: disable rx and tx functions to allow for reconfiguring. 636 */ 637 static void 638 atl_dev_stop(struct rte_eth_dev *dev) 639 { 640 struct rte_eth_link link; 641 struct aq_hw_s *hw = 642 ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private); 643 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 644 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 645 646 PMD_INIT_FUNC_TRACE(); 647 648 /* disable interrupts */ 649 atl_disable_intr(hw); 650 651 /* reset the NIC */ 652 atl_reset_hw(hw); 653 hw->adapter_stopped = 1; 654 655 atl_stop_queues(dev); 656 657 /* Clear stored conf */ 658 dev->data->scattered_rx = 0; 659 dev->data->lro = 0; 660 661 /* Clear recorded link status */ 662 memset(&link, 0, sizeof(link)); 663 rte_eth_linkstatus_set(dev, &link); 664 665 if (!rte_intr_allow_others(intr_handle)) 666 /* resume to the default handler */ 667 rte_intr_callback_register(intr_handle, 668 atl_dev_interrupt_handler, 669 (void *)dev); 670 671 /* Clean datapath event and queue/vec mapping */ 672 rte_intr_efd_disable(intr_handle); 673 if (intr_handle->intr_vec != NULL) { 674 rte_free(intr_handle->intr_vec); 675 intr_handle->intr_vec = NULL; 676 } 677 } 678 679 /* 680 * Set device link up: enable tx. 681 */ 682 static int 683 atl_dev_set_link_up(struct rte_eth_dev *dev) 684 { 685 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private); 686 uint32_t link_speeds = dev->data->dev_conf.link_speeds; 687 uint32_t speed_mask = 0; 688 689 if (link_speeds == ETH_LINK_SPEED_AUTONEG) { 690 speed_mask = hw->aq_nic_cfg->link_speed_msk; 691 } else { 692 if (link_speeds & ETH_LINK_SPEED_10G) 693 speed_mask |= AQ_NIC_RATE_10G; 694 if (link_speeds & ETH_LINK_SPEED_5G) 695 speed_mask |= AQ_NIC_RATE_5G; 696 if (link_speeds & ETH_LINK_SPEED_1G) 697 speed_mask |= AQ_NIC_RATE_1G; 698 if (link_speeds & ETH_LINK_SPEED_2_5G) 699 speed_mask |= AQ_NIC_RATE_2G5; 700 if (link_speeds & ETH_LINK_SPEED_100M) 701 speed_mask |= AQ_NIC_RATE_100M; 702 } 703 704 return hw->aq_fw_ops->set_link_speed(hw, speed_mask); 705 } 706 707 /* 708 * Set device link down: disable tx. 709 */ 710 static int 711 atl_dev_set_link_down(struct rte_eth_dev *dev) 712 { 713 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private); 714 715 return hw->aq_fw_ops->set_link_speed(hw, 0); 716 } 717 718 /* 719 * Reset and stop device. 720 */ 721 static void 722 atl_dev_close(struct rte_eth_dev *dev) 723 { 724 PMD_INIT_FUNC_TRACE(); 725 726 atl_dev_stop(dev); 727 728 atl_free_queues(dev); 729 } 730 731 static int 732 atl_dev_reset(struct rte_eth_dev *dev) 733 { 734 int ret; 735 736 ret = eth_atl_dev_uninit(dev); 737 if (ret) 738 return ret; 739 740 ret = eth_atl_dev_init(dev); 741 742 return ret; 743 } 744 745 static int 746 atl_dev_configure_macsec(struct rte_eth_dev *dev) 747 { 748 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private); 749 struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private); 750 struct aq_macsec_config *aqcfg = &cf->aq_macsec; 751 struct macsec_msg_fw_request msg_macsec; 752 struct macsec_msg_fw_response response; 753 754 if (!aqcfg->common.macsec_enabled || 755 hw->aq_fw_ops->send_macsec_req == NULL) 756 return 0; 757 758 memset(&msg_macsec, 0, sizeof(msg_macsec)); 759 760 /* Creating set of sc/sa structures from parameters provided by DPDK */ 761 762 /* Configure macsec */ 763 msg_macsec.msg_type = macsec_cfg_msg; 764 msg_macsec.cfg.enabled = aqcfg->common.macsec_enabled; 765 msg_macsec.cfg.interrupts_enabled = 1; 766 767 hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response); 768 769 if (response.result) 770 return -1; 771 772 memset(&msg_macsec, 0, sizeof(msg_macsec)); 773 774 /* Configure TX SC */ 775 776 msg_macsec.msg_type = macsec_add_tx_sc_msg; 777 msg_macsec.txsc.index = 0; /* TXSC always one (??) */ 778 msg_macsec.txsc.protect = aqcfg->common.encryption_enabled; 779 780 /* MAC addr for TX */ 781 msg_macsec.txsc.mac_sa[0] = rte_bswap32(aqcfg->txsc.mac[1]); 782 msg_macsec.txsc.mac_sa[1] = rte_bswap32(aqcfg->txsc.mac[0]); 783 msg_macsec.txsc.sa_mask = 0x3f; 784 785 msg_macsec.txsc.da_mask = 0; 786 msg_macsec.txsc.tci = 0x0B; 787 msg_macsec.txsc.curr_an = 0; /* SA index which currently used */ 788 789 /* 790 * Creating SCI (Secure Channel Identifier). 791 * SCI constructed from Source MAC and Port identifier 792 */ 793 uint32_t sci_hi_part = (msg_macsec.txsc.mac_sa[1] << 16) | 794 (msg_macsec.txsc.mac_sa[0] >> 16); 795 uint32_t sci_low_part = (msg_macsec.txsc.mac_sa[0] << 16); 796 797 uint32_t port_identifier = 1; 798 799 msg_macsec.txsc.sci[1] = sci_hi_part; 800 msg_macsec.txsc.sci[0] = sci_low_part | port_identifier; 801 802 hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response); 803 804 if (response.result) 805 return -1; 806 807 memset(&msg_macsec, 0, sizeof(msg_macsec)); 808 809 /* Configure RX SC */ 810 811 msg_macsec.msg_type = macsec_add_rx_sc_msg; 812 msg_macsec.rxsc.index = aqcfg->rxsc.pi; 813 msg_macsec.rxsc.replay_protect = 814 aqcfg->common.replay_protection_enabled; 815 msg_macsec.rxsc.anti_replay_window = 0; 816 817 /* MAC addr for RX */ 818 msg_macsec.rxsc.mac_da[0] = rte_bswap32(aqcfg->rxsc.mac[1]); 819 msg_macsec.rxsc.mac_da[1] = rte_bswap32(aqcfg->rxsc.mac[0]); 820 msg_macsec.rxsc.da_mask = 0;//0x3f; 821 822 msg_macsec.rxsc.sa_mask = 0; 823 824 hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response); 825 826 if (response.result) 827 return -1; 828 829 memset(&msg_macsec, 0, sizeof(msg_macsec)); 830 831 /* Configure RX SC */ 832 833 msg_macsec.msg_type = macsec_add_tx_sa_msg; 834 msg_macsec.txsa.index = aqcfg->txsa.idx; 835 msg_macsec.txsa.next_pn = aqcfg->txsa.pn; 836 837 msg_macsec.txsa.key[0] = rte_bswap32(aqcfg->txsa.key[3]); 838 msg_macsec.txsa.key[1] = rte_bswap32(aqcfg->txsa.key[2]); 839 msg_macsec.txsa.key[2] = rte_bswap32(aqcfg->txsa.key[1]); 840 msg_macsec.txsa.key[3] = rte_bswap32(aqcfg->txsa.key[0]); 841 842 hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response); 843 844 if (response.result) 845 return -1; 846 847 memset(&msg_macsec, 0, sizeof(msg_macsec)); 848 849 /* Configure RX SA */ 850 851 msg_macsec.msg_type = macsec_add_rx_sa_msg; 852 msg_macsec.rxsa.index = aqcfg->rxsa.idx; 853 msg_macsec.rxsa.next_pn = aqcfg->rxsa.pn; 854 855 msg_macsec.rxsa.key[0] = rte_bswap32(aqcfg->rxsa.key[3]); 856 msg_macsec.rxsa.key[1] = rte_bswap32(aqcfg->rxsa.key[2]); 857 msg_macsec.rxsa.key[2] = rte_bswap32(aqcfg->rxsa.key[1]); 858 msg_macsec.rxsa.key[3] = rte_bswap32(aqcfg->rxsa.key[0]); 859 860 hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response); 861 862 if (response.result) 863 return -1; 864 865 return 0; 866 } 867 868 int atl_macsec_enable(struct rte_eth_dev *dev, 869 uint8_t encr, uint8_t repl_prot) 870 { 871 struct aq_hw_cfg_s *cfg = 872 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private); 873 874 cfg->aq_macsec.common.macsec_enabled = 1; 875 cfg->aq_macsec.common.encryption_enabled = encr; 876 cfg->aq_macsec.common.replay_protection_enabled = repl_prot; 877 878 return 0; 879 } 880 881 int atl_macsec_disable(struct rte_eth_dev *dev) 882 { 883 struct aq_hw_cfg_s *cfg = 884 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private); 885 886 cfg->aq_macsec.common.macsec_enabled = 0; 887 888 return 0; 889 } 890 891 int atl_macsec_config_txsc(struct rte_eth_dev *dev, uint8_t *mac) 892 { 893 struct aq_hw_cfg_s *cfg = 894 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private); 895 896 memset(&cfg->aq_macsec.txsc.mac, 0, sizeof(cfg->aq_macsec.txsc.mac)); 897 memcpy((uint8_t *)&cfg->aq_macsec.txsc.mac + 2, mac, 898 RTE_ETHER_ADDR_LEN); 899 900 return 0; 901 } 902 903 int atl_macsec_config_rxsc(struct rte_eth_dev *dev, 904 uint8_t *mac, uint16_t pi) 905 { 906 struct aq_hw_cfg_s *cfg = 907 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private); 908 909 memset(&cfg->aq_macsec.rxsc.mac, 0, sizeof(cfg->aq_macsec.rxsc.mac)); 910 memcpy((uint8_t *)&cfg->aq_macsec.rxsc.mac + 2, mac, 911 RTE_ETHER_ADDR_LEN); 912 cfg->aq_macsec.rxsc.pi = pi; 913 914 return 0; 915 } 916 917 int atl_macsec_select_txsa(struct rte_eth_dev *dev, 918 uint8_t idx, uint8_t an, 919 uint32_t pn, uint8_t *key) 920 { 921 struct aq_hw_cfg_s *cfg = 922 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private); 923 924 cfg->aq_macsec.txsa.idx = idx; 925 cfg->aq_macsec.txsa.pn = pn; 926 cfg->aq_macsec.txsa.an = an; 927 928 memcpy(&cfg->aq_macsec.txsa.key, key, 16); 929 return 0; 930 } 931 932 int atl_macsec_select_rxsa(struct rte_eth_dev *dev, 933 uint8_t idx, uint8_t an, 934 uint32_t pn, uint8_t *key) 935 { 936 struct aq_hw_cfg_s *cfg = 937 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private); 938 939 cfg->aq_macsec.rxsa.idx = idx; 940 cfg->aq_macsec.rxsa.pn = pn; 941 cfg->aq_macsec.rxsa.an = an; 942 943 memcpy(&cfg->aq_macsec.rxsa.key, key, 16); 944 return 0; 945 } 946 947 static int 948 atl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 949 { 950 struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev); 951 struct aq_hw_s *hw = &adapter->hw; 952 struct atl_sw_stats *swstats = &adapter->sw_stats; 953 unsigned int i; 954 955 hw->aq_fw_ops->update_stats(hw); 956 957 /* Fill out the rte_eth_stats statistics structure */ 958 stats->ipackets = hw->curr_stats.dma_pkt_rc; 959 stats->ibytes = hw->curr_stats.dma_oct_rc; 960 stats->imissed = hw->curr_stats.dpc; 961 stats->ierrors = hw->curr_stats.erpt; 962 963 stats->opackets = hw->curr_stats.dma_pkt_tc; 964 stats->obytes = hw->curr_stats.dma_oct_tc; 965 stats->oerrors = 0; 966 967 stats->rx_nombuf = swstats->rx_nombuf; 968 969 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 970 stats->q_ipackets[i] = swstats->q_ipackets[i]; 971 stats->q_opackets[i] = swstats->q_opackets[i]; 972 stats->q_ibytes[i] = swstats->q_ibytes[i]; 973 stats->q_obytes[i] = swstats->q_obytes[i]; 974 stats->q_errors[i] = swstats->q_errors[i]; 975 } 976 return 0; 977 } 978 979 static int 980 atl_dev_stats_reset(struct rte_eth_dev *dev) 981 { 982 struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev); 983 struct aq_hw_s *hw = &adapter->hw; 984 985 hw->aq_fw_ops->update_stats(hw); 986 987 /* Reset software totals */ 988 memset(&hw->curr_stats, 0, sizeof(hw->curr_stats)); 989 990 memset(&adapter->sw_stats, 0, sizeof(adapter->sw_stats)); 991 992 return 0; 993 } 994 995 static int 996 atl_dev_xstats_get_count(struct rte_eth_dev *dev) 997 { 998 struct atl_adapter *adapter = 999 (struct atl_adapter *)dev->data->dev_private; 1000 1001 struct aq_hw_s *hw = &adapter->hw; 1002 unsigned int i, count = 0; 1003 1004 for (i = 0; i < RTE_DIM(atl_xstats_tbl); i++) { 1005 if (atl_xstats_tbl[i].type == XSTATS_TYPE_MACSEC && 1006 ((hw->caps_lo & BIT(CAPS_LO_MACSEC)) == 0)) 1007 continue; 1008 1009 count++; 1010 } 1011 1012 return count; 1013 } 1014 1015 static int 1016 atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused, 1017 struct rte_eth_xstat_name *xstats_names, 1018 unsigned int size) 1019 { 1020 unsigned int i; 1021 unsigned int count = atl_dev_xstats_get_count(dev); 1022 1023 if (xstats_names) { 1024 for (i = 0; i < size && i < count; i++) { 1025 snprintf(xstats_names[i].name, 1026 RTE_ETH_XSTATS_NAME_SIZE, "%s", 1027 atl_xstats_tbl[i].name); 1028 } 1029 } 1030 1031 return count; 1032 } 1033 1034 static int 1035 atl_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats, 1036 unsigned int n) 1037 { 1038 struct atl_adapter *adapter = dev->data->dev_private; 1039 struct aq_hw_s *hw = &adapter->hw; 1040 struct get_stats req = { 0 }; 1041 struct macsec_msg_fw_request msg = { 0 }; 1042 struct macsec_msg_fw_response resp = { 0 }; 1043 int err = -1; 1044 unsigned int i; 1045 unsigned int count = atl_dev_xstats_get_count(dev); 1046 1047 if (!stats) 1048 return count; 1049 1050 if (hw->aq_fw_ops->send_macsec_req != NULL) { 1051 req.ingress_sa_index = 0xff; 1052 req.egress_sc_index = 0xff; 1053 req.egress_sa_index = 0xff; 1054 1055 msg.msg_type = macsec_get_stats_msg; 1056 msg.stats = req; 1057 1058 err = hw->aq_fw_ops->send_macsec_req(hw, &msg, &resp); 1059 } 1060 1061 for (i = 0; i < n && i < count; i++) { 1062 stats[i].id = i; 1063 1064 switch (atl_xstats_tbl[i].type) { 1065 case XSTATS_TYPE_MSM: 1066 stats[i].value = *(u64 *)((uint8_t *)&hw->curr_stats + 1067 atl_xstats_tbl[i].offset); 1068 break; 1069 case XSTATS_TYPE_MACSEC: 1070 if (!err) { 1071 stats[i].value = 1072 *(u64 *)((uint8_t *)&resp.stats + 1073 atl_xstats_tbl[i].offset); 1074 } 1075 break; 1076 } 1077 } 1078 1079 return i; 1080 } 1081 1082 static int 1083 atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) 1084 { 1085 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1086 uint32_t fw_ver = 0; 1087 unsigned int ret = 0; 1088 1089 ret = hw_atl_utils_get_fw_version(hw, &fw_ver); 1090 if (ret) 1091 return -EIO; 1092 1093 ret = snprintf(fw_version, fw_size, "%u.%u.%u", fw_ver >> 24, 1094 (fw_ver >> 16) & 0xFFU, fw_ver & 0xFFFFU); 1095 1096 ret += 1; /* add string null-terminator */ 1097 1098 if (fw_size < ret) 1099 return ret; 1100 1101 return 0; 1102 } 1103 1104 static int 1105 atl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 1106 { 1107 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 1108 1109 dev_info->max_rx_queues = AQ_HW_MAX_RX_QUEUES; 1110 dev_info->max_tx_queues = AQ_HW_MAX_TX_QUEUES; 1111 1112 dev_info->min_rx_bufsize = 1024; 1113 dev_info->max_rx_pktlen = HW_ATL_B0_MTU_JUMBO; 1114 dev_info->max_mac_addrs = HW_ATL_B0_MAC_MAX; 1115 dev_info->max_vfs = pci_dev->max_vfs; 1116 1117 dev_info->max_hash_mac_addrs = 0; 1118 dev_info->max_vmdq_pools = 0; 1119 dev_info->vmdq_queue_num = 0; 1120 1121 dev_info->rx_offload_capa = ATL_RX_OFFLOADS; 1122 1123 dev_info->tx_offload_capa = ATL_TX_OFFLOADS; 1124 1125 1126 dev_info->default_rxconf = (struct rte_eth_rxconf) { 1127 .rx_free_thresh = ATL_DEFAULT_RX_FREE_THRESH, 1128 }; 1129 1130 dev_info->default_txconf = (struct rte_eth_txconf) { 1131 .tx_free_thresh = ATL_DEFAULT_TX_FREE_THRESH, 1132 }; 1133 1134 dev_info->rx_desc_lim = rx_desc_lim; 1135 dev_info->tx_desc_lim = tx_desc_lim; 1136 1137 dev_info->hash_key_size = HW_ATL_B0_RSS_HASHKEY_BITS / 8; 1138 dev_info->reta_size = HW_ATL_B0_RSS_REDIRECTION_MAX; 1139 dev_info->flow_type_rss_offloads = ATL_RSS_OFFLOAD_ALL; 1140 1141 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G; 1142 dev_info->speed_capa |= ETH_LINK_SPEED_100M; 1143 dev_info->speed_capa |= ETH_LINK_SPEED_2_5G; 1144 dev_info->speed_capa |= ETH_LINK_SPEED_5G; 1145 1146 return 0; 1147 } 1148 1149 static const uint32_t * 1150 atl_dev_supported_ptypes_get(struct rte_eth_dev *dev) 1151 { 1152 static const uint32_t ptypes[] = { 1153 RTE_PTYPE_L2_ETHER, 1154 RTE_PTYPE_L2_ETHER_ARP, 1155 RTE_PTYPE_L2_ETHER_VLAN, 1156 RTE_PTYPE_L3_IPV4, 1157 RTE_PTYPE_L3_IPV6, 1158 RTE_PTYPE_L4_TCP, 1159 RTE_PTYPE_L4_UDP, 1160 RTE_PTYPE_L4_SCTP, 1161 RTE_PTYPE_L4_ICMP, 1162 RTE_PTYPE_UNKNOWN 1163 }; 1164 1165 if (dev->rx_pkt_burst == atl_recv_pkts) 1166 return ptypes; 1167 1168 return NULL; 1169 } 1170 1171 static void 1172 atl_dev_delayed_handler(void *param) 1173 { 1174 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 1175 1176 atl_dev_configure_macsec(dev); 1177 } 1178 1179 1180 /* return 0 means link status changed, -1 means not changed */ 1181 static int 1182 atl_dev_link_update(struct rte_eth_dev *dev, int wait __rte_unused) 1183 { 1184 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1185 struct rte_eth_link link, old; 1186 u32 fc = AQ_NIC_FC_OFF; 1187 int err = 0; 1188 1189 link.link_status = ETH_LINK_DOWN; 1190 link.link_speed = 0; 1191 link.link_duplex = ETH_LINK_FULL_DUPLEX; 1192 link.link_autoneg = hw->is_autoneg ? ETH_LINK_AUTONEG : ETH_LINK_FIXED; 1193 memset(&old, 0, sizeof(old)); 1194 1195 /* load old link status */ 1196 rte_eth_linkstatus_get(dev, &old); 1197 1198 /* read current link status */ 1199 err = hw->aq_fw_ops->update_link_status(hw); 1200 1201 if (err) 1202 return 0; 1203 1204 if (hw->aq_link_status.mbps == 0) { 1205 /* write default (down) link status */ 1206 rte_eth_linkstatus_set(dev, &link); 1207 if (link.link_status == old.link_status) 1208 return -1; 1209 return 0; 1210 } 1211 1212 link.link_status = ETH_LINK_UP; 1213 link.link_duplex = ETH_LINK_FULL_DUPLEX; 1214 link.link_speed = hw->aq_link_status.mbps; 1215 1216 rte_eth_linkstatus_set(dev, &link); 1217 1218 if (link.link_status == old.link_status) 1219 return -1; 1220 1221 /* Driver has to update flow control settings on RX block 1222 * on any link event. 1223 * We should query FW whether it negotiated FC. 1224 */ 1225 if (hw->aq_fw_ops->get_flow_control) { 1226 hw->aq_fw_ops->get_flow_control(hw, &fc); 1227 hw_atl_b0_set_fc(hw, fc, 0U); 1228 } 1229 1230 if (rte_eal_alarm_set(1000 * 1000, 1231 atl_dev_delayed_handler, (void *)dev) < 0) 1232 PMD_DRV_LOG(ERR, "rte_eal_alarm_set fail"); 1233 1234 return 0; 1235 } 1236 1237 static int 1238 atl_dev_promiscuous_enable(struct rte_eth_dev *dev) 1239 { 1240 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1241 1242 hw_atl_rpfl2promiscuous_mode_en_set(hw, true); 1243 1244 return 0; 1245 } 1246 1247 static int 1248 atl_dev_promiscuous_disable(struct rte_eth_dev *dev) 1249 { 1250 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1251 1252 hw_atl_rpfl2promiscuous_mode_en_set(hw, false); 1253 1254 return 0; 1255 } 1256 1257 static int 1258 atl_dev_allmulticast_enable(struct rte_eth_dev *dev) 1259 { 1260 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1261 1262 hw_atl_rpfl2_accept_all_mc_packets_set(hw, true); 1263 1264 return 0; 1265 } 1266 1267 static int 1268 atl_dev_allmulticast_disable(struct rte_eth_dev *dev) 1269 { 1270 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1271 1272 if (dev->data->promiscuous == 1) 1273 return 0; /* must remain in all_multicast mode */ 1274 1275 hw_atl_rpfl2_accept_all_mc_packets_set(hw, false); 1276 1277 return 0; 1278 } 1279 1280 /** 1281 * It clears the interrupt causes and enables the interrupt. 1282 * It will be called once only during nic initialized. 1283 * 1284 * @param dev 1285 * Pointer to struct rte_eth_dev. 1286 * @param on 1287 * Enable or Disable. 1288 * 1289 * @return 1290 * - On success, zero. 1291 * - On failure, a negative value. 1292 */ 1293 1294 static int 1295 atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on __rte_unused) 1296 { 1297 atl_dev_link_status_print(dev); 1298 return 0; 1299 } 1300 1301 static int 1302 atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev __rte_unused) 1303 { 1304 return 0; 1305 } 1306 1307 1308 static int 1309 atl_dev_interrupt_get_status(struct rte_eth_dev *dev) 1310 { 1311 struct atl_interrupt *intr = 1312 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 1313 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1314 u64 cause = 0; 1315 1316 hw_atl_b0_hw_irq_read(hw, &cause); 1317 1318 atl_disable_intr(hw); 1319 1320 if (cause & BIT(ATL_IRQ_CAUSE_LINK)) 1321 intr->flags |= ATL_FLAG_NEED_LINK_UPDATE; 1322 1323 return 0; 1324 } 1325 1326 /** 1327 * It gets and then prints the link status. 1328 * 1329 * @param dev 1330 * Pointer to struct rte_eth_dev. 1331 * 1332 * @return 1333 * - On success, zero. 1334 * - On failure, a negative value. 1335 */ 1336 static void 1337 atl_dev_link_status_print(struct rte_eth_dev *dev) 1338 { 1339 struct rte_eth_link link; 1340 1341 memset(&link, 0, sizeof(link)); 1342 rte_eth_linkstatus_get(dev, &link); 1343 if (link.link_status) { 1344 PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s", 1345 (int)(dev->data->port_id), 1346 (unsigned int)link.link_speed, 1347 link.link_duplex == ETH_LINK_FULL_DUPLEX ? 1348 "full-duplex" : "half-duplex"); 1349 } else { 1350 PMD_DRV_LOG(INFO, " Port %d: Link Down", 1351 (int)(dev->data->port_id)); 1352 } 1353 1354 1355 #ifdef DEBUG 1356 { 1357 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 1358 1359 PMD_DRV_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT, 1360 pci_dev->addr.domain, 1361 pci_dev->addr.bus, 1362 pci_dev->addr.devid, 1363 pci_dev->addr.function); 1364 } 1365 #endif 1366 1367 PMD_DRV_LOG(INFO, "Link speed:%d", link.link_speed); 1368 } 1369 1370 /* 1371 * It executes link_update after knowing an interrupt occurred. 1372 * 1373 * @param dev 1374 * Pointer to struct rte_eth_dev. 1375 * 1376 * @return 1377 * - On success, zero. 1378 * - On failure, a negative value. 1379 */ 1380 static int 1381 atl_dev_interrupt_action(struct rte_eth_dev *dev, 1382 struct rte_intr_handle *intr_handle) 1383 { 1384 struct atl_interrupt *intr = 1385 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 1386 struct atl_adapter *adapter = dev->data->dev_private; 1387 struct aq_hw_s *hw = &adapter->hw; 1388 1389 if (!(intr->flags & ATL_FLAG_NEED_LINK_UPDATE)) 1390 goto done; 1391 1392 intr->flags &= ~ATL_FLAG_NEED_LINK_UPDATE; 1393 1394 /* Notify userapp if link status changed */ 1395 if (!atl_dev_link_update(dev, 0)) { 1396 atl_dev_link_status_print(dev); 1397 _rte_eth_dev_callback_process(dev, 1398 RTE_ETH_EVENT_INTR_LSC, NULL); 1399 } else { 1400 if (hw->aq_fw_ops->send_macsec_req == NULL) 1401 goto done; 1402 1403 /* Check macsec Keys expired */ 1404 struct get_stats req = { 0 }; 1405 struct macsec_msg_fw_request msg = { 0 }; 1406 struct macsec_msg_fw_response resp = { 0 }; 1407 1408 req.ingress_sa_index = 0x0; 1409 req.egress_sc_index = 0x0; 1410 req.egress_sa_index = 0x0; 1411 msg.msg_type = macsec_get_stats_msg; 1412 msg.stats = req; 1413 1414 int err = hw->aq_fw_ops->send_macsec_req(hw, &msg, &resp); 1415 if (err) { 1416 PMD_DRV_LOG(ERR, "send_macsec_req fail"); 1417 goto done; 1418 } 1419 if (resp.stats.egress_threshold_expired || 1420 resp.stats.ingress_threshold_expired || 1421 resp.stats.egress_expired || 1422 resp.stats.ingress_expired) { 1423 PMD_DRV_LOG(INFO, "RTE_ETH_EVENT_MACSEC"); 1424 _rte_eth_dev_callback_process(dev, 1425 RTE_ETH_EVENT_MACSEC, NULL); 1426 } 1427 } 1428 done: 1429 atl_enable_intr(dev); 1430 rte_intr_ack(intr_handle); 1431 1432 return 0; 1433 } 1434 1435 /** 1436 * Interrupt handler triggered by NIC for handling 1437 * specific interrupt. 1438 * 1439 * @param handle 1440 * Pointer to interrupt handle. 1441 * @param param 1442 * The address of parameter (struct rte_eth_dev *) regsitered before. 1443 * 1444 * @return 1445 * void 1446 */ 1447 static void 1448 atl_dev_interrupt_handler(void *param) 1449 { 1450 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 1451 1452 atl_dev_interrupt_get_status(dev); 1453 atl_dev_interrupt_action(dev, dev->intr_handle); 1454 } 1455 1456 1457 static int 1458 atl_dev_get_eeprom_length(struct rte_eth_dev *dev __rte_unused) 1459 { 1460 return SFP_EEPROM_SIZE; 1461 } 1462 1463 int atl_dev_get_eeprom(struct rte_eth_dev *dev, 1464 struct rte_dev_eeprom_info *eeprom) 1465 { 1466 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1467 uint32_t dev_addr = SMBUS_DEVICE_ID; 1468 1469 if (hw->aq_fw_ops->get_eeprom == NULL) 1470 return -ENOTSUP; 1471 1472 if (eeprom->length + eeprom->offset > SFP_EEPROM_SIZE || 1473 eeprom->data == NULL) 1474 return -EINVAL; 1475 1476 if (eeprom->magic > 0x7F) 1477 return -EINVAL; 1478 1479 if (eeprom->magic) 1480 dev_addr = eeprom->magic; 1481 1482 return hw->aq_fw_ops->get_eeprom(hw, dev_addr, eeprom->data, 1483 eeprom->length, eeprom->offset); 1484 } 1485 1486 int atl_dev_set_eeprom(struct rte_eth_dev *dev, 1487 struct rte_dev_eeprom_info *eeprom) 1488 { 1489 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1490 uint32_t dev_addr = SMBUS_DEVICE_ID; 1491 1492 if (hw->aq_fw_ops->set_eeprom == NULL) 1493 return -ENOTSUP; 1494 1495 if (eeprom->length + eeprom->offset > SFP_EEPROM_SIZE || 1496 eeprom->data == NULL) 1497 return -EINVAL; 1498 1499 if (eeprom->magic > 0x7F) 1500 return -EINVAL; 1501 1502 if (eeprom->magic) 1503 dev_addr = eeprom->magic; 1504 1505 return hw->aq_fw_ops->set_eeprom(hw, dev_addr, eeprom->data, 1506 eeprom->length, eeprom->offset); 1507 } 1508 1509 static int 1510 atl_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs) 1511 { 1512 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1513 u32 mif_id; 1514 int err; 1515 1516 if (regs->data == NULL) { 1517 regs->length = hw_atl_utils_hw_get_reg_length(); 1518 regs->width = sizeof(u32); 1519 return 0; 1520 } 1521 1522 /* Only full register dump is supported */ 1523 if (regs->length && regs->length != hw_atl_utils_hw_get_reg_length()) 1524 return -ENOTSUP; 1525 1526 err = hw_atl_utils_hw_get_regs(hw, regs->data); 1527 1528 /* Device version */ 1529 mif_id = hw_atl_reg_glb_mif_id_get(hw); 1530 regs->version = mif_id & 0xFFU; 1531 1532 return err; 1533 } 1534 1535 static int 1536 atl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 1537 { 1538 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1539 u32 fc = AQ_NIC_FC_OFF; 1540 1541 if (hw->aq_fw_ops->get_flow_control == NULL) 1542 return -ENOTSUP; 1543 1544 hw->aq_fw_ops->get_flow_control(hw, &fc); 1545 1546 if (fc == AQ_NIC_FC_OFF) 1547 fc_conf->mode = RTE_FC_NONE; 1548 else if ((fc & AQ_NIC_FC_RX) && (fc & AQ_NIC_FC_TX)) 1549 fc_conf->mode = RTE_FC_FULL; 1550 else if (fc & AQ_NIC_FC_RX) 1551 fc_conf->mode = RTE_FC_RX_PAUSE; 1552 else if (fc & AQ_NIC_FC_TX) 1553 fc_conf->mode = RTE_FC_TX_PAUSE; 1554 1555 return 0; 1556 } 1557 1558 static int 1559 atl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 1560 { 1561 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1562 uint32_t old_flow_control = hw->aq_nic_cfg->flow_control; 1563 1564 1565 if (hw->aq_fw_ops->set_flow_control == NULL) 1566 return -ENOTSUP; 1567 1568 if (fc_conf->mode == RTE_FC_NONE) 1569 hw->aq_nic_cfg->flow_control = AQ_NIC_FC_OFF; 1570 else if (fc_conf->mode == RTE_FC_RX_PAUSE) 1571 hw->aq_nic_cfg->flow_control = AQ_NIC_FC_RX; 1572 else if (fc_conf->mode == RTE_FC_TX_PAUSE) 1573 hw->aq_nic_cfg->flow_control = AQ_NIC_FC_TX; 1574 else if (fc_conf->mode == RTE_FC_FULL) 1575 hw->aq_nic_cfg->flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX); 1576 1577 if (old_flow_control != hw->aq_nic_cfg->flow_control) 1578 return hw->aq_fw_ops->set_flow_control(hw); 1579 1580 return 0; 1581 } 1582 1583 static int 1584 atl_update_mac_addr(struct rte_eth_dev *dev, uint32_t index, 1585 u8 *mac_addr, bool enable) 1586 { 1587 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1588 unsigned int h = 0U; 1589 unsigned int l = 0U; 1590 int err; 1591 1592 if (mac_addr) { 1593 h = (mac_addr[0] << 8) | (mac_addr[1]); 1594 l = (mac_addr[2] << 24) | (mac_addr[3] << 16) | 1595 (mac_addr[4] << 8) | mac_addr[5]; 1596 } 1597 1598 hw_atl_rpfl2_uc_flr_en_set(hw, 0U, index); 1599 hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l, index); 1600 hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h, index); 1601 1602 if (enable) 1603 hw_atl_rpfl2_uc_flr_en_set(hw, 1U, index); 1604 1605 err = aq_hw_err_from_flags(hw); 1606 1607 return err; 1608 } 1609 1610 static int 1611 atl_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, 1612 uint32_t index __rte_unused, uint32_t pool __rte_unused) 1613 { 1614 if (rte_is_zero_ether_addr(mac_addr)) { 1615 PMD_DRV_LOG(ERR, "Invalid Ethernet Address"); 1616 return -EINVAL; 1617 } 1618 1619 return atl_update_mac_addr(dev, index, (u8 *)mac_addr, true); 1620 } 1621 1622 static void 1623 atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index) 1624 { 1625 atl_update_mac_addr(dev, index, NULL, false); 1626 } 1627 1628 static int 1629 atl_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr) 1630 { 1631 atl_remove_mac_addr(dev, 0); 1632 atl_add_mac_addr(dev, addr, 0, 0); 1633 return 0; 1634 } 1635 1636 static int 1637 atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 1638 { 1639 struct rte_eth_dev_info dev_info; 1640 int ret; 1641 uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; 1642 1643 ret = atl_dev_info_get(dev, &dev_info); 1644 if (ret != 0) 1645 return ret; 1646 1647 if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen) 1648 return -EINVAL; 1649 1650 /* update max frame size */ 1651 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; 1652 1653 return 0; 1654 } 1655 1656 static int 1657 atl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 1658 { 1659 struct aq_hw_cfg_s *cfg = 1660 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private); 1661 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1662 int err = 0; 1663 int i = 0; 1664 1665 PMD_INIT_FUNC_TRACE(); 1666 1667 for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) { 1668 if (cfg->vlan_filter[i] == vlan_id) { 1669 if (!on) { 1670 /* Disable VLAN filter. */ 1671 hw_atl_rpf_vlan_flr_en_set(hw, 0U, i); 1672 1673 /* Clear VLAN filter entry */ 1674 cfg->vlan_filter[i] = 0; 1675 } 1676 break; 1677 } 1678 } 1679 1680 /* VLAN_ID was not found. So, nothing to delete. */ 1681 if (i == HW_ATL_B0_MAX_VLAN_IDS && !on) 1682 goto exit; 1683 1684 /* VLAN_ID already exist, or already removed above. Nothing to do. */ 1685 if (i != HW_ATL_B0_MAX_VLAN_IDS) 1686 goto exit; 1687 1688 /* Try to found free VLAN filter to add new VLAN_ID */ 1689 for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) { 1690 if (cfg->vlan_filter[i] == 0) 1691 break; 1692 } 1693 1694 if (i == HW_ATL_B0_MAX_VLAN_IDS) { 1695 /* We have no free VLAN filter to add new VLAN_ID*/ 1696 err = -ENOMEM; 1697 goto exit; 1698 } 1699 1700 cfg->vlan_filter[i] = vlan_id; 1701 hw_atl_rpf_vlan_flr_act_set(hw, 1U, i); 1702 hw_atl_rpf_vlan_id_flr_set(hw, vlan_id, i); 1703 hw_atl_rpf_vlan_flr_en_set(hw, 1U, i); 1704 1705 exit: 1706 /* Enable VLAN promisc mode if vlan_filter empty */ 1707 for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) { 1708 if (cfg->vlan_filter[i] != 0) 1709 break; 1710 } 1711 1712 hw_atl_rpf_vlan_prom_mode_en_set(hw, i == HW_ATL_B0_MAX_VLAN_IDS); 1713 1714 return err; 1715 } 1716 1717 static int 1718 atl_enable_vlan_filter(struct rte_eth_dev *dev, int en) 1719 { 1720 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1721 struct aq_hw_cfg_s *cfg = 1722 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private); 1723 int i; 1724 1725 PMD_INIT_FUNC_TRACE(); 1726 1727 for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) { 1728 if (cfg->vlan_filter[i]) 1729 hw_atl_rpf_vlan_flr_en_set(hw, en, i); 1730 } 1731 return 0; 1732 } 1733 1734 static int 1735 atl_vlan_offload_set(struct rte_eth_dev *dev, int mask) 1736 { 1737 struct aq_hw_cfg_s *cfg = 1738 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private); 1739 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1740 int ret = 0; 1741 int i; 1742 1743 PMD_INIT_FUNC_TRACE(); 1744 1745 ret = atl_enable_vlan_filter(dev, mask & ETH_VLAN_FILTER_MASK); 1746 1747 cfg->vlan_strip = !!(mask & ETH_VLAN_STRIP_MASK); 1748 1749 for (i = 0; i < dev->data->nb_rx_queues; i++) 1750 hw_atl_rpo_rx_desc_vlan_stripping_set(hw, cfg->vlan_strip, i); 1751 1752 if (mask & ETH_VLAN_EXTEND_MASK) 1753 ret = -ENOTSUP; 1754 1755 return ret; 1756 } 1757 1758 static int 1759 atl_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type, 1760 uint16_t tpid) 1761 { 1762 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1763 int err = 0; 1764 1765 PMD_INIT_FUNC_TRACE(); 1766 1767 switch (vlan_type) { 1768 case ETH_VLAN_TYPE_INNER: 1769 hw_atl_rpf_vlan_inner_etht_set(hw, tpid); 1770 break; 1771 case ETH_VLAN_TYPE_OUTER: 1772 hw_atl_rpf_vlan_outer_etht_set(hw, tpid); 1773 break; 1774 default: 1775 PMD_DRV_LOG(ERR, "Unsupported VLAN type"); 1776 err = -ENOTSUP; 1777 } 1778 1779 return err; 1780 } 1781 1782 static void 1783 atl_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue_id, int on) 1784 { 1785 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1786 1787 PMD_INIT_FUNC_TRACE(); 1788 1789 if (queue_id > dev->data->nb_rx_queues) { 1790 PMD_DRV_LOG(ERR, "Invalid queue id"); 1791 return; 1792 } 1793 1794 hw_atl_rpo_rx_desc_vlan_stripping_set(hw, on, queue_id); 1795 } 1796 1797 static int 1798 atl_dev_set_mc_addr_list(struct rte_eth_dev *dev, 1799 struct rte_ether_addr *mc_addr_set, 1800 uint32_t nb_mc_addr) 1801 { 1802 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1803 u32 i; 1804 1805 if (nb_mc_addr > AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN) 1806 return -EINVAL; 1807 1808 /* Update whole uc filters table */ 1809 for (i = 0; i < AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN; i++) { 1810 u8 *mac_addr = NULL; 1811 u32 l = 0, h = 0; 1812 1813 if (i < nb_mc_addr) { 1814 mac_addr = mc_addr_set[i].addr_bytes; 1815 l = (mac_addr[2] << 24) | (mac_addr[3] << 16) | 1816 (mac_addr[4] << 8) | mac_addr[5]; 1817 h = (mac_addr[0] << 8) | mac_addr[1]; 1818 } 1819 1820 hw_atl_rpfl2_uc_flr_en_set(hw, 0U, HW_ATL_B0_MAC_MIN + i); 1821 hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l, 1822 HW_ATL_B0_MAC_MIN + i); 1823 hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h, 1824 HW_ATL_B0_MAC_MIN + i); 1825 hw_atl_rpfl2_uc_flr_en_set(hw, !!mac_addr, 1826 HW_ATL_B0_MAC_MIN + i); 1827 } 1828 1829 return 0; 1830 } 1831 1832 static int 1833 atl_reta_update(struct rte_eth_dev *dev, 1834 struct rte_eth_rss_reta_entry64 *reta_conf, 1835 uint16_t reta_size) 1836 { 1837 int i; 1838 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1839 struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private); 1840 1841 for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++) 1842 cf->aq_rss.indirection_table[i] = min(reta_conf->reta[i], 1843 dev->data->nb_rx_queues - 1); 1844 1845 hw_atl_b0_hw_rss_set(hw, &cf->aq_rss); 1846 return 0; 1847 } 1848 1849 static int 1850 atl_reta_query(struct rte_eth_dev *dev, 1851 struct rte_eth_rss_reta_entry64 *reta_conf, 1852 uint16_t reta_size) 1853 { 1854 int i; 1855 struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private); 1856 1857 for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++) 1858 reta_conf->reta[i] = cf->aq_rss.indirection_table[i]; 1859 reta_conf->mask = ~0U; 1860 return 0; 1861 } 1862 1863 static int 1864 atl_rss_hash_update(struct rte_eth_dev *dev, 1865 struct rte_eth_rss_conf *rss_conf) 1866 { 1867 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1868 struct aq_hw_cfg_s *cfg = 1869 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private); 1870 static u8 def_rss_key[40] = { 1871 0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d, 1872 0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18, 1873 0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8, 1874 0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70, 1875 0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c 1876 }; 1877 1878 cfg->is_rss = !!rss_conf->rss_hf; 1879 if (rss_conf->rss_key) { 1880 memcpy(cfg->aq_rss.hash_secret_key, rss_conf->rss_key, 1881 rss_conf->rss_key_len); 1882 cfg->aq_rss.hash_secret_key_size = rss_conf->rss_key_len; 1883 } else { 1884 memcpy(cfg->aq_rss.hash_secret_key, def_rss_key, 1885 sizeof(def_rss_key)); 1886 cfg->aq_rss.hash_secret_key_size = sizeof(def_rss_key); 1887 } 1888 1889 hw_atl_b0_hw_rss_set(hw, &cfg->aq_rss); 1890 hw_atl_b0_hw_rss_hash_set(hw, &cfg->aq_rss); 1891 return 0; 1892 } 1893 1894 static int 1895 atl_rss_hash_conf_get(struct rte_eth_dev *dev, 1896 struct rte_eth_rss_conf *rss_conf) 1897 { 1898 struct aq_hw_cfg_s *cfg = 1899 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private); 1900 1901 rss_conf->rss_hf = cfg->is_rss ? ATL_RSS_OFFLOAD_ALL : 0; 1902 if (rss_conf->rss_key) { 1903 rss_conf->rss_key_len = cfg->aq_rss.hash_secret_key_size; 1904 memcpy(rss_conf->rss_key, cfg->aq_rss.hash_secret_key, 1905 rss_conf->rss_key_len); 1906 } 1907 1908 return 0; 1909 } 1910 1911 static bool 1912 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv) 1913 { 1914 if (strcmp(dev->device->driver->name, drv->driver.name)) 1915 return false; 1916 1917 return true; 1918 } 1919 1920 bool 1921 is_atlantic_supported(struct rte_eth_dev *dev) 1922 { 1923 return is_device_supported(dev, &rte_atl_pmd); 1924 } 1925 1926 RTE_PMD_REGISTER_PCI(net_atlantic, rte_atl_pmd); 1927 RTE_PMD_REGISTER_PCI_TABLE(net_atlantic, pci_id_atl_map); 1928 RTE_PMD_REGISTER_KMOD_DEP(net_atlantic, "* igb_uio | uio_pci_generic"); 1929 RTE_LOG_REGISTER(atl_logtype_init, pmd.net.atlantic.init, NOTICE); 1930 RTE_LOG_REGISTER(atl_logtype_driver, pmd.net.atlantic.driver, NOTICE); 1931