1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. 3 * Copyright(c) 2018 Synopsys, Inc. All rights reserved. 4 */ 5 6 #include "axgbe_rxtx.h" 7 #include "axgbe_ethdev.h" 8 #include "axgbe_common.h" 9 #include "axgbe_phy.h" 10 11 static int eth_axgbe_dev_init(struct rte_eth_dev *eth_dev); 12 static int eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev); 13 static void axgbe_dev_interrupt_handler(void *param); 14 static void axgbe_dev_close(struct rte_eth_dev *dev); 15 static void axgbe_dev_info_get(struct rte_eth_dev *dev, 16 struct rte_eth_dev_info *dev_info); 17 18 /* The set of PCI devices this driver supports */ 19 #define AMD_PCI_VENDOR_ID 0x1022 20 #define AMD_PCI_AXGBE_DEVICE_V2A 0x1458 21 #define AMD_PCI_AXGBE_DEVICE_V2B 0x1459 22 23 int axgbe_logtype_init; 24 int axgbe_logtype_driver; 25 26 static const struct rte_pci_id pci_id_axgbe_map[] = { 27 {RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_AXGBE_DEVICE_V2A)}, 28 {RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_AXGBE_DEVICE_V2B)}, 29 { .vendor_id = 0, }, 30 }; 31 32 static struct axgbe_version_data axgbe_v2a = { 33 .init_function_ptrs_phy_impl = axgbe_init_function_ptrs_phy_v2, 34 .xpcs_access = AXGBE_XPCS_ACCESS_V2, 35 .mmc_64bit = 1, 36 .tx_max_fifo_size = 229376, 37 .rx_max_fifo_size = 229376, 38 .tx_tstamp_workaround = 1, 39 .ecc_support = 1, 40 .i2c_support = 1, 41 }; 42 43 static struct axgbe_version_data axgbe_v2b = { 44 .init_function_ptrs_phy_impl = axgbe_init_function_ptrs_phy_v2, 45 .xpcs_access = AXGBE_XPCS_ACCESS_V2, 46 .mmc_64bit = 1, 47 .tx_max_fifo_size = 65536, 48 .rx_max_fifo_size = 65536, 49 .tx_tstamp_workaround = 1, 50 .ecc_support = 1, 51 .i2c_support = 1, 52 }; 53 54 static const struct rte_eth_desc_lim rx_desc_lim = { 55 .nb_max = AXGBE_MAX_RING_DESC, 56 .nb_min = AXGBE_MIN_RING_DESC, 57 .nb_align = 8, 58 }; 59 60 static const struct rte_eth_desc_lim tx_desc_lim = { 61 .nb_max = AXGBE_MAX_RING_DESC, 62 .nb_min = AXGBE_MIN_RING_DESC, 63 .nb_align = 8, 64 }; 65 66 static const struct eth_dev_ops axgbe_eth_dev_ops = { 67 .dev_close = axgbe_dev_close, 68 .dev_infos_get = axgbe_dev_info_get, 69 .rx_queue_setup = axgbe_dev_rx_queue_setup, 70 .rx_queue_release = axgbe_dev_rx_queue_release, 71 .tx_queue_setup = axgbe_dev_tx_queue_setup, 72 .tx_queue_release = axgbe_dev_tx_queue_release, 73 }; 74 75 /* 76 * Interrupt handler triggered by NIC for handling 77 * specific interrupt. 78 * 79 * @param handle 80 * Pointer to interrupt handle. 81 * @param param 82 * The address of parameter (struct rte_eth_dev *) regsitered before. 83 * 84 * @return 85 * void 86 */ 87 static void 88 axgbe_dev_interrupt_handler(void *param) 89 { 90 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 91 struct axgbe_port *pdata = dev->data->dev_private; 92 93 pdata->phy_if.an_isr(pdata); 94 95 /* Enable interrupts since disabled after generation*/ 96 rte_intr_enable(&pdata->pci_dev->intr_handle); 97 } 98 99 /* Clear all resources like TX/RX queues. */ 100 static void 101 axgbe_dev_close(struct rte_eth_dev *dev) 102 { 103 axgbe_dev_clear_queues(dev); 104 } 105 106 static void 107 axgbe_dev_info_get(struct rte_eth_dev *dev, 108 struct rte_eth_dev_info *dev_info) 109 { 110 struct axgbe_port *pdata = dev->data->dev_private; 111 112 dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev); 113 dev_info->max_rx_queues = pdata->rx_ring_count; 114 dev_info->max_tx_queues = pdata->tx_ring_count; 115 dev_info->min_rx_bufsize = AXGBE_RX_MIN_BUF_SIZE; 116 dev_info->max_rx_pktlen = AXGBE_RX_MAX_BUF_SIZE; 117 dev_info->max_mac_addrs = AXGBE_MAX_MAC_ADDRS; 118 dev_info->speed_capa = ETH_LINK_SPEED_10G; 119 120 dev_info->rx_offload_capa = 121 DEV_RX_OFFLOAD_IPV4_CKSUM | 122 DEV_RX_OFFLOAD_UDP_CKSUM | 123 DEV_RX_OFFLOAD_TCP_CKSUM; 124 125 dev_info->tx_offload_capa = 126 DEV_TX_OFFLOAD_IPV4_CKSUM | 127 DEV_TX_OFFLOAD_UDP_CKSUM | 128 DEV_TX_OFFLOAD_TCP_CKSUM; 129 130 if (pdata->hw_feat.rss) { 131 dev_info->flow_type_rss_offloads = AXGBE_RSS_OFFLOAD; 132 dev_info->reta_size = pdata->hw_feat.hash_table_size; 133 dev_info->hash_key_size = AXGBE_RSS_HASH_KEY_SIZE; 134 } 135 136 dev_info->rx_desc_lim = rx_desc_lim; 137 dev_info->tx_desc_lim = tx_desc_lim; 138 139 dev_info->default_rxconf = (struct rte_eth_rxconf) { 140 .rx_free_thresh = AXGBE_RX_FREE_THRESH, 141 }; 142 143 dev_info->default_txconf = (struct rte_eth_txconf) { 144 .tx_free_thresh = AXGBE_TX_FREE_THRESH, 145 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | 146 ETH_TXQ_FLAGS_NOOFFLOADS, 147 }; 148 } 149 150 static void axgbe_get_all_hw_features(struct axgbe_port *pdata) 151 { 152 unsigned int mac_hfr0, mac_hfr1, mac_hfr2; 153 struct axgbe_hw_features *hw_feat = &pdata->hw_feat; 154 155 mac_hfr0 = AXGMAC_IOREAD(pdata, MAC_HWF0R); 156 mac_hfr1 = AXGMAC_IOREAD(pdata, MAC_HWF1R); 157 mac_hfr2 = AXGMAC_IOREAD(pdata, MAC_HWF2R); 158 159 memset(hw_feat, 0, sizeof(*hw_feat)); 160 161 hw_feat->version = AXGMAC_IOREAD(pdata, MAC_VR); 162 163 /* Hardware feature register 0 */ 164 hw_feat->gmii = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL); 165 hw_feat->vlhash = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH); 166 hw_feat->sma = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL); 167 hw_feat->rwk = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL); 168 hw_feat->mgk = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL); 169 hw_feat->mmc = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL); 170 hw_feat->aoe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL); 171 hw_feat->ts = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL); 172 hw_feat->eee = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL); 173 hw_feat->tx_coe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL); 174 hw_feat->rx_coe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL); 175 hw_feat->addn_mac = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, 176 ADDMACADRSEL); 177 hw_feat->ts_src = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL); 178 hw_feat->sa_vlan_ins = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS); 179 180 /* Hardware feature register 1 */ 181 hw_feat->rx_fifo_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, 182 RXFIFOSIZE); 183 hw_feat->tx_fifo_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, 184 TXFIFOSIZE); 185 hw_feat->adv_ts_hi = AXGMAC_GET_BITS(mac_hfr1, 186 MAC_HWF1R, ADVTHWORD); 187 hw_feat->dma_width = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADDR64); 188 hw_feat->dcb = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN); 189 hw_feat->sph = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN); 190 hw_feat->tso = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN); 191 hw_feat->dma_debug = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA); 192 hw_feat->rss = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, RSSEN); 193 hw_feat->tc_cnt = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC); 194 hw_feat->hash_table_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, 195 HASHTBLSZ); 196 hw_feat->l3l4_filter_num = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, 197 L3L4FNUM); 198 199 /* Hardware feature register 2 */ 200 hw_feat->rx_q_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXQCNT); 201 hw_feat->tx_q_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXQCNT); 202 hw_feat->rx_ch_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXCHCNT); 203 hw_feat->tx_ch_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXCHCNT); 204 hw_feat->pps_out_num = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM); 205 hw_feat->aux_snap_num = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, 206 AUXSNAPNUM); 207 208 /* Translate the Hash Table size into actual number */ 209 switch (hw_feat->hash_table_size) { 210 case 0: 211 break; 212 case 1: 213 hw_feat->hash_table_size = 64; 214 break; 215 case 2: 216 hw_feat->hash_table_size = 128; 217 break; 218 case 3: 219 hw_feat->hash_table_size = 256; 220 break; 221 } 222 223 /* Translate the address width setting into actual number */ 224 switch (hw_feat->dma_width) { 225 case 0: 226 hw_feat->dma_width = 32; 227 break; 228 case 1: 229 hw_feat->dma_width = 40; 230 break; 231 case 2: 232 hw_feat->dma_width = 48; 233 break; 234 default: 235 hw_feat->dma_width = 32; 236 } 237 238 /* The Queue, Channel and TC counts are zero based so increment them 239 * to get the actual number 240 */ 241 hw_feat->rx_q_cnt++; 242 hw_feat->tx_q_cnt++; 243 hw_feat->rx_ch_cnt++; 244 hw_feat->tx_ch_cnt++; 245 hw_feat->tc_cnt++; 246 247 /* Translate the fifo sizes into actual numbers */ 248 hw_feat->rx_fifo_size = 1 << (hw_feat->rx_fifo_size + 7); 249 hw_feat->tx_fifo_size = 1 << (hw_feat->tx_fifo_size + 7); 250 } 251 252 static void axgbe_init_all_fptrs(struct axgbe_port *pdata) 253 { 254 axgbe_init_function_ptrs_dev(&pdata->hw_if); 255 axgbe_init_function_ptrs_phy(&pdata->phy_if); 256 axgbe_init_function_ptrs_i2c(&pdata->i2c_if); 257 pdata->vdata->init_function_ptrs_phy_impl(&pdata->phy_if); 258 } 259 260 static void axgbe_set_counts(struct axgbe_port *pdata) 261 { 262 /* Set all the function pointers */ 263 axgbe_init_all_fptrs(pdata); 264 265 /* Populate the hardware features */ 266 axgbe_get_all_hw_features(pdata); 267 268 /* Set default max values if not provided */ 269 if (!pdata->tx_max_channel_count) 270 pdata->tx_max_channel_count = pdata->hw_feat.tx_ch_cnt; 271 if (!pdata->rx_max_channel_count) 272 pdata->rx_max_channel_count = pdata->hw_feat.rx_ch_cnt; 273 274 if (!pdata->tx_max_q_count) 275 pdata->tx_max_q_count = pdata->hw_feat.tx_q_cnt; 276 if (!pdata->rx_max_q_count) 277 pdata->rx_max_q_count = pdata->hw_feat.rx_q_cnt; 278 279 /* Calculate the number of Tx and Rx rings to be created 280 * -Tx (DMA) Channels map 1-to-1 to Tx Queues so set 281 * the number of Tx queues to the number of Tx channels 282 * enabled 283 * -Rx (DMA) Channels do not map 1-to-1 so use the actual 284 * number of Rx queues or maximum allowed 285 */ 286 pdata->tx_ring_count = RTE_MIN(pdata->hw_feat.tx_ch_cnt, 287 pdata->tx_max_channel_count); 288 pdata->tx_ring_count = RTE_MIN(pdata->tx_ring_count, 289 pdata->tx_max_q_count); 290 291 pdata->tx_q_count = pdata->tx_ring_count; 292 293 pdata->rx_ring_count = RTE_MIN(pdata->hw_feat.rx_ch_cnt, 294 pdata->rx_max_channel_count); 295 296 pdata->rx_q_count = RTE_MIN(pdata->hw_feat.rx_q_cnt, 297 pdata->rx_max_q_count); 298 } 299 300 static void axgbe_default_config(struct axgbe_port *pdata) 301 { 302 pdata->pblx8 = DMA_PBL_X8_ENABLE; 303 pdata->tx_sf_mode = MTL_TSF_ENABLE; 304 pdata->tx_threshold = MTL_TX_THRESHOLD_64; 305 pdata->tx_pbl = DMA_PBL_32; 306 pdata->tx_osp_mode = DMA_OSP_ENABLE; 307 pdata->rx_sf_mode = MTL_RSF_ENABLE; 308 pdata->rx_threshold = MTL_RX_THRESHOLD_64; 309 pdata->rx_pbl = DMA_PBL_32; 310 pdata->pause_autoneg = 1; 311 pdata->tx_pause = 0; 312 pdata->rx_pause = 0; 313 pdata->phy_speed = SPEED_UNKNOWN; 314 pdata->power_down = 0; 315 } 316 317 /* 318 * It returns 0 on success. 319 */ 320 static int 321 eth_axgbe_dev_init(struct rte_eth_dev *eth_dev) 322 { 323 PMD_INIT_FUNC_TRACE(); 324 struct axgbe_port *pdata; 325 struct rte_pci_device *pci_dev; 326 uint32_t reg, mac_lo, mac_hi; 327 int ret; 328 329 eth_dev->dev_ops = &axgbe_eth_dev_ops; 330 331 /* 332 * For secondary processes, we don't initialise any further as primary 333 * has already done this work. 334 */ 335 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 336 return 0; 337 338 pdata = (struct axgbe_port *)eth_dev->data->dev_private; 339 /* initial state */ 340 axgbe_set_bit(AXGBE_DOWN, &pdata->dev_state); 341 axgbe_set_bit(AXGBE_STOPPED, &pdata->dev_state); 342 pdata->eth_dev = eth_dev; 343 344 pci_dev = RTE_DEV_TO_PCI(eth_dev->device); 345 pdata->pci_dev = pci_dev; 346 347 pdata->xgmac_regs = 348 (uint64_t)pci_dev->mem_resource[AXGBE_AXGMAC_BAR].addr; 349 pdata->xprop_regs = pdata->xgmac_regs + AXGBE_MAC_PROP_OFFSET; 350 pdata->xi2c_regs = pdata->xgmac_regs + AXGBE_I2C_CTRL_OFFSET; 351 pdata->xpcs_regs = (uint64_t)pci_dev->mem_resource[AXGBE_XPCS_BAR].addr; 352 353 /* version specific driver data*/ 354 if (pci_dev->id.device_id == AMD_PCI_AXGBE_DEVICE_V2A) 355 pdata->vdata = &axgbe_v2a; 356 else 357 pdata->vdata = &axgbe_v2b; 358 359 /* Configure the PCS indirect addressing support */ 360 reg = XPCS32_IOREAD(pdata, PCS_V2_WINDOW_DEF); 361 pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET); 362 pdata->xpcs_window <<= 6; 363 pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE); 364 pdata->xpcs_window_size = 1 << (pdata->xpcs_window_size + 7); 365 pdata->xpcs_window_mask = pdata->xpcs_window_size - 1; 366 pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF; 367 pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT; 368 PMD_INIT_LOG(DEBUG, 369 "xpcs window :%x, size :%x, mask :%x ", pdata->xpcs_window, 370 pdata->xpcs_window_size, pdata->xpcs_window_mask); 371 XP_IOWRITE(pdata, XP_INT_EN, 0x1fffff); 372 373 /* Retrieve the MAC address */ 374 mac_lo = XP_IOREAD(pdata, XP_MAC_ADDR_LO); 375 mac_hi = XP_IOREAD(pdata, XP_MAC_ADDR_HI); 376 pdata->mac_addr.addr_bytes[0] = mac_lo & 0xff; 377 pdata->mac_addr.addr_bytes[1] = (mac_lo >> 8) & 0xff; 378 pdata->mac_addr.addr_bytes[2] = (mac_lo >> 16) & 0xff; 379 pdata->mac_addr.addr_bytes[3] = (mac_lo >> 24) & 0xff; 380 pdata->mac_addr.addr_bytes[4] = mac_hi & 0xff; 381 pdata->mac_addr.addr_bytes[5] = (mac_hi >> 8) & 0xff; 382 383 eth_dev->data->mac_addrs = rte_zmalloc("axgbe_mac_addr", 384 ETHER_ADDR_LEN, 0); 385 if (!eth_dev->data->mac_addrs) { 386 PMD_INIT_LOG(ERR, 387 "Failed to alloc %u bytes needed to store MAC addr tbl", 388 ETHER_ADDR_LEN); 389 return -ENOMEM; 390 } 391 392 if (!is_valid_assigned_ether_addr(&pdata->mac_addr)) 393 eth_random_addr(pdata->mac_addr.addr_bytes); 394 395 /* Copy the permanent MAC address */ 396 ether_addr_copy(&pdata->mac_addr, ð_dev->data->mac_addrs[0]); 397 398 /* Clock settings */ 399 pdata->sysclk_rate = AXGBE_V2_DMA_CLOCK_FREQ; 400 pdata->ptpclk_rate = AXGBE_V2_PTP_CLOCK_FREQ; 401 402 /* Set the DMA coherency values */ 403 pdata->coherent = 1; 404 pdata->axdomain = AXGBE_DMA_OS_AXDOMAIN; 405 pdata->arcache = AXGBE_DMA_OS_ARCACHE; 406 pdata->awcache = AXGBE_DMA_OS_AWCACHE; 407 408 /* Set the maximum channels and queues */ 409 reg = XP_IOREAD(pdata, XP_PROP_1); 410 pdata->tx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_DMA); 411 pdata->rx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_DMA); 412 pdata->tx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_QUEUES); 413 pdata->rx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_QUEUES); 414 415 /* Set the hardware channel and queue counts */ 416 axgbe_set_counts(pdata); 417 418 /* Set the maximum fifo amounts */ 419 reg = XP_IOREAD(pdata, XP_PROP_2); 420 pdata->tx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, TX_FIFO_SIZE); 421 pdata->tx_max_fifo_size *= 16384; 422 pdata->tx_max_fifo_size = RTE_MIN(pdata->tx_max_fifo_size, 423 pdata->vdata->tx_max_fifo_size); 424 pdata->rx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, RX_FIFO_SIZE); 425 pdata->rx_max_fifo_size *= 16384; 426 pdata->rx_max_fifo_size = RTE_MIN(pdata->rx_max_fifo_size, 427 pdata->vdata->rx_max_fifo_size); 428 /* Issue software reset to DMA */ 429 ret = pdata->hw_if.exit(pdata); 430 if (ret) 431 PMD_DRV_LOG(ERR, "hw_if->exit EBUSY error\n"); 432 433 /* Set default configuration data */ 434 axgbe_default_config(pdata); 435 436 /* Set default max values if not provided */ 437 if (!pdata->tx_max_fifo_size) 438 pdata->tx_max_fifo_size = pdata->hw_feat.tx_fifo_size; 439 if (!pdata->rx_max_fifo_size) 440 pdata->rx_max_fifo_size = pdata->hw_feat.rx_fifo_size; 441 442 pdata->tx_desc_count = AXGBE_MAX_RING_DESC; 443 pdata->rx_desc_count = AXGBE_MAX_RING_DESC; 444 pthread_mutex_init(&pdata->xpcs_mutex, NULL); 445 pthread_mutex_init(&pdata->i2c_mutex, NULL); 446 pthread_mutex_init(&pdata->an_mutex, NULL); 447 pthread_mutex_init(&pdata->phy_mutex, NULL); 448 449 ret = pdata->phy_if.phy_init(pdata); 450 if (ret) { 451 rte_free(eth_dev->data->mac_addrs); 452 return ret; 453 } 454 455 rte_intr_callback_register(&pci_dev->intr_handle, 456 axgbe_dev_interrupt_handler, 457 (void *)eth_dev); 458 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x", 459 eth_dev->data->port_id, pci_dev->id.vendor_id, 460 pci_dev->id.device_id); 461 462 return 0; 463 } 464 465 static int 466 eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev) 467 { 468 struct rte_pci_device *pci_dev; 469 470 PMD_INIT_FUNC_TRACE(); 471 472 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 473 return 0; 474 475 pci_dev = RTE_DEV_TO_PCI(eth_dev->device); 476 /*Free macaddres*/ 477 rte_free(eth_dev->data->mac_addrs); 478 eth_dev->data->mac_addrs = NULL; 479 eth_dev->dev_ops = NULL; 480 axgbe_dev_clear_queues(eth_dev); 481 482 /* disable uio intr before callback unregister */ 483 rte_intr_disable(&pci_dev->intr_handle); 484 rte_intr_callback_unregister(&pci_dev->intr_handle, 485 axgbe_dev_interrupt_handler, 486 (void *)eth_dev); 487 488 return 0; 489 } 490 491 static int eth_axgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 492 struct rte_pci_device *pci_dev) 493 { 494 return rte_eth_dev_pci_generic_probe(pci_dev, 495 sizeof(struct axgbe_port), eth_axgbe_dev_init); 496 } 497 498 static int eth_axgbe_pci_remove(struct rte_pci_device *pci_dev) 499 { 500 return rte_eth_dev_pci_generic_remove(pci_dev, eth_axgbe_dev_uninit); 501 } 502 503 static struct rte_pci_driver rte_axgbe_pmd = { 504 .id_table = pci_id_axgbe_map, 505 .drv_flags = RTE_PCI_DRV_NEED_MAPPING, 506 .probe = eth_axgbe_pci_probe, 507 .remove = eth_axgbe_pci_remove, 508 }; 509 510 RTE_PMD_REGISTER_PCI(net_axgbe, rte_axgbe_pmd); 511 RTE_PMD_REGISTER_PCI_TABLE(net_axgbe, pci_id_axgbe_map); 512 RTE_PMD_REGISTER_KMOD_DEP(net_axgbe, "* igb_uio | uio_pci_generic | vfio-pci"); 513 514 RTE_INIT(axgbe_init_log); 515 static void 516 axgbe_init_log(void) 517 { 518 axgbe_logtype_init = rte_log_register("pmd.net.axgbe.init"); 519 if (axgbe_logtype_init >= 0) 520 rte_log_set_level(axgbe_logtype_init, RTE_LOG_NOTICE); 521 axgbe_logtype_driver = rte_log_register("pmd.net.axgbe.driver"); 522 if (axgbe_logtype_driver >= 0) 523 rte_log_set_level(axgbe_logtype_driver, RTE_LOG_NOTICE); 524 } 525