1 /* 2 * Copyright (c) 2016 QLogic Corporation. 3 * All rights reserved. 4 * www.qlogic.com 5 * 6 * See LICENSE.qede_pmd for copyright and licensing details. 7 */ 8 9 #include "qede_ethdev.h" 10 #include <rte_alarm.h> 11 #include <rte_version.h> 12 13 /* Globals */ 14 static const struct qed_eth_ops *qed_ops; 15 static int64_t timer_period = 1; 16 17 /* VXLAN tunnel classification mapping */ 18 const struct _qede_vxlan_tunn_types { 19 uint16_t rte_filter_type; 20 enum ecore_filter_ucast_type qede_type; 21 enum ecore_tunn_clss qede_tunn_clss; 22 const char *string; 23 } qede_tunn_types[] = { 24 { 25 ETH_TUNNEL_FILTER_OMAC, 26 ECORE_FILTER_MAC, 27 ECORE_TUNN_CLSS_MAC_VLAN, 28 "outer-mac" 29 }, 30 { 31 ETH_TUNNEL_FILTER_TENID, 32 ECORE_FILTER_VNI, 33 ECORE_TUNN_CLSS_MAC_VNI, 34 "vni" 35 }, 36 { 37 ETH_TUNNEL_FILTER_IMAC, 38 ECORE_FILTER_INNER_MAC, 39 ECORE_TUNN_CLSS_INNER_MAC_VLAN, 40 "inner-mac" 41 }, 42 { 43 ETH_TUNNEL_FILTER_IVLAN, 44 ECORE_FILTER_INNER_VLAN, 45 ECORE_TUNN_CLSS_INNER_MAC_VLAN, 46 "inner-vlan" 47 }, 48 { 49 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID, 50 ECORE_FILTER_MAC_VNI_PAIR, 51 ECORE_TUNN_CLSS_MAC_VNI, 52 "outer-mac and vni" 53 }, 54 { 55 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IMAC, 56 ECORE_FILTER_UNUSED, 57 MAX_ECORE_TUNN_CLSS, 58 "outer-mac and inner-mac" 59 }, 60 { 61 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IVLAN, 62 ECORE_FILTER_UNUSED, 63 MAX_ECORE_TUNN_CLSS, 64 "outer-mac and inner-vlan" 65 }, 66 { 67 ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IMAC, 68 ECORE_FILTER_INNER_MAC_VNI_PAIR, 69 ECORE_TUNN_CLSS_INNER_MAC_VNI, 70 "vni and inner-mac", 71 }, 72 { 73 ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IVLAN, 74 ECORE_FILTER_UNUSED, 75 MAX_ECORE_TUNN_CLSS, 76 "vni and inner-vlan", 77 }, 78 { 79 ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN, 80 ECORE_FILTER_INNER_PAIR, 81 ECORE_TUNN_CLSS_INNER_MAC_VLAN, 82 "inner-mac and inner-vlan", 83 }, 84 { 85 ETH_TUNNEL_FILTER_OIP, 86 ECORE_FILTER_UNUSED, 87 MAX_ECORE_TUNN_CLSS, 88 "outer-IP" 89 }, 90 { 91 ETH_TUNNEL_FILTER_IIP, 92 ECORE_FILTER_UNUSED, 93 MAX_ECORE_TUNN_CLSS, 94 "inner-IP" 95 }, 96 { 97 RTE_TUNNEL_FILTER_IMAC_IVLAN, 98 ECORE_FILTER_UNUSED, 99 MAX_ECORE_TUNN_CLSS, 100 "IMAC_IVLAN" 101 }, 102 { 103 RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID, 104 ECORE_FILTER_UNUSED, 105 MAX_ECORE_TUNN_CLSS, 106 "IMAC_IVLAN_TENID" 107 }, 108 { 109 RTE_TUNNEL_FILTER_IMAC_TENID, 110 ECORE_FILTER_UNUSED, 111 MAX_ECORE_TUNN_CLSS, 112 "IMAC_TENID" 113 }, 114 { 115 RTE_TUNNEL_FILTER_OMAC_TENID_IMAC, 116 ECORE_FILTER_UNUSED, 117 MAX_ECORE_TUNN_CLSS, 118 "OMAC_TENID_IMAC" 119 }, 120 }; 121 122 struct rte_qede_xstats_name_off { 123 char name[RTE_ETH_XSTATS_NAME_SIZE]; 124 uint64_t offset; 125 }; 126 127 static const struct rte_qede_xstats_name_off qede_xstats_strings[] = { 128 {"rx_unicast_bytes", offsetof(struct ecore_eth_stats, rx_ucast_bytes)}, 129 {"rx_multicast_bytes", 130 offsetof(struct ecore_eth_stats, rx_mcast_bytes)}, 131 {"rx_broadcast_bytes", 132 offsetof(struct ecore_eth_stats, rx_bcast_bytes)}, 133 {"rx_unicast_packets", offsetof(struct ecore_eth_stats, rx_ucast_pkts)}, 134 {"rx_multicast_packets", 135 offsetof(struct ecore_eth_stats, rx_mcast_pkts)}, 136 {"rx_broadcast_packets", 137 offsetof(struct ecore_eth_stats, rx_bcast_pkts)}, 138 139 {"tx_unicast_bytes", offsetof(struct ecore_eth_stats, tx_ucast_bytes)}, 140 {"tx_multicast_bytes", 141 offsetof(struct ecore_eth_stats, tx_mcast_bytes)}, 142 {"tx_broadcast_bytes", 143 offsetof(struct ecore_eth_stats, tx_bcast_bytes)}, 144 {"tx_unicast_packets", offsetof(struct ecore_eth_stats, tx_ucast_pkts)}, 145 {"tx_multicast_packets", 146 offsetof(struct ecore_eth_stats, tx_mcast_pkts)}, 147 {"tx_broadcast_packets", 148 offsetof(struct ecore_eth_stats, tx_bcast_pkts)}, 149 150 {"rx_64_byte_packets", 151 offsetof(struct ecore_eth_stats, rx_64_byte_packets)}, 152 {"rx_65_to_127_byte_packets", 153 offsetof(struct ecore_eth_stats, rx_65_to_127_byte_packets)}, 154 {"rx_128_to_255_byte_packets", 155 offsetof(struct ecore_eth_stats, rx_128_to_255_byte_packets)}, 156 {"rx_256_to_511_byte_packets", 157 offsetof(struct ecore_eth_stats, rx_256_to_511_byte_packets)}, 158 {"rx_512_to_1023_byte_packets", 159 offsetof(struct ecore_eth_stats, rx_512_to_1023_byte_packets)}, 160 {"rx_1024_to_1518_byte_packets", 161 offsetof(struct ecore_eth_stats, rx_1024_to_1518_byte_packets)}, 162 {"rx_1519_to_1522_byte_packets", 163 offsetof(struct ecore_eth_stats, rx_1519_to_1522_byte_packets)}, 164 {"rx_1519_to_2047_byte_packets", 165 offsetof(struct ecore_eth_stats, rx_1519_to_2047_byte_packets)}, 166 {"rx_2048_to_4095_byte_packets", 167 offsetof(struct ecore_eth_stats, rx_2048_to_4095_byte_packets)}, 168 {"rx_4096_to_9216_byte_packets", 169 offsetof(struct ecore_eth_stats, rx_4096_to_9216_byte_packets)}, 170 {"rx_9217_to_16383_byte_packets", 171 offsetof(struct ecore_eth_stats, 172 rx_9217_to_16383_byte_packets)}, 173 {"tx_64_byte_packets", 174 offsetof(struct ecore_eth_stats, tx_64_byte_packets)}, 175 {"tx_65_to_127_byte_packets", 176 offsetof(struct ecore_eth_stats, tx_65_to_127_byte_packets)}, 177 {"tx_128_to_255_byte_packets", 178 offsetof(struct ecore_eth_stats, tx_128_to_255_byte_packets)}, 179 {"tx_256_to_511_byte_packets", 180 offsetof(struct ecore_eth_stats, tx_256_to_511_byte_packets)}, 181 {"tx_512_to_1023_byte_packets", 182 offsetof(struct ecore_eth_stats, tx_512_to_1023_byte_packets)}, 183 {"tx_1024_to_1518_byte_packets", 184 offsetof(struct ecore_eth_stats, tx_1024_to_1518_byte_packets)}, 185 {"trx_1519_to_1522_byte_packets", 186 offsetof(struct ecore_eth_stats, tx_1519_to_2047_byte_packets)}, 187 {"tx_2048_to_4095_byte_packets", 188 offsetof(struct ecore_eth_stats, tx_2048_to_4095_byte_packets)}, 189 {"tx_4096_to_9216_byte_packets", 190 offsetof(struct ecore_eth_stats, tx_4096_to_9216_byte_packets)}, 191 {"tx_9217_to_16383_byte_packets", 192 offsetof(struct ecore_eth_stats, 193 tx_9217_to_16383_byte_packets)}, 194 195 {"rx_mac_crtl_frames", 196 offsetof(struct ecore_eth_stats, rx_mac_crtl_frames)}, 197 {"tx_mac_control_frames", 198 offsetof(struct ecore_eth_stats, tx_mac_ctrl_frames)}, 199 {"rx_pause_frames", offsetof(struct ecore_eth_stats, rx_pause_frames)}, 200 {"tx_pause_frames", offsetof(struct ecore_eth_stats, tx_pause_frames)}, 201 {"rx_priority_flow_control_frames", 202 offsetof(struct ecore_eth_stats, rx_pfc_frames)}, 203 {"tx_priority_flow_control_frames", 204 offsetof(struct ecore_eth_stats, tx_pfc_frames)}, 205 206 {"rx_crc_errors", offsetof(struct ecore_eth_stats, rx_crc_errors)}, 207 {"rx_align_errors", offsetof(struct ecore_eth_stats, rx_align_errors)}, 208 {"rx_carrier_errors", 209 offsetof(struct ecore_eth_stats, rx_carrier_errors)}, 210 {"rx_oversize_packet_errors", 211 offsetof(struct ecore_eth_stats, rx_oversize_packets)}, 212 {"rx_jabber_errors", offsetof(struct ecore_eth_stats, rx_jabbers)}, 213 {"rx_undersize_packet_errors", 214 offsetof(struct ecore_eth_stats, rx_undersize_packets)}, 215 {"rx_fragments", offsetof(struct ecore_eth_stats, rx_fragments)}, 216 {"rx_host_buffer_not_available", 217 offsetof(struct ecore_eth_stats, no_buff_discards)}, 218 /* Number of packets discarded because they are bigger than MTU */ 219 {"rx_packet_too_big_discards", 220 offsetof(struct ecore_eth_stats, packet_too_big_discard)}, 221 {"rx_ttl_zero_discards", 222 offsetof(struct ecore_eth_stats, ttl0_discard)}, 223 {"rx_multi_function_tag_filter_discards", 224 offsetof(struct ecore_eth_stats, mftag_filter_discards)}, 225 {"rx_mac_filter_discards", 226 offsetof(struct ecore_eth_stats, mac_filter_discards)}, 227 {"rx_hw_buffer_truncates", 228 offsetof(struct ecore_eth_stats, brb_truncates)}, 229 {"rx_hw_buffer_discards", 230 offsetof(struct ecore_eth_stats, brb_discards)}, 231 {"tx_lpi_entry_count", 232 offsetof(struct ecore_eth_stats, tx_lpi_entry_count)}, 233 {"tx_total_collisions", 234 offsetof(struct ecore_eth_stats, tx_total_collisions)}, 235 {"tx_error_drop_packets", 236 offsetof(struct ecore_eth_stats, tx_err_drop_pkts)}, 237 238 {"rx_mac_bytes", offsetof(struct ecore_eth_stats, rx_mac_bytes)}, 239 {"rx_mac_unicast_packets", 240 offsetof(struct ecore_eth_stats, rx_mac_uc_packets)}, 241 {"rx_mac_multicast_packets", 242 offsetof(struct ecore_eth_stats, rx_mac_mc_packets)}, 243 {"rx_mac_broadcast_packets", 244 offsetof(struct ecore_eth_stats, rx_mac_bc_packets)}, 245 {"rx_mac_frames_ok", 246 offsetof(struct ecore_eth_stats, rx_mac_frames_ok)}, 247 {"tx_mac_bytes", offsetof(struct ecore_eth_stats, tx_mac_bytes)}, 248 {"tx_mac_unicast_packets", 249 offsetof(struct ecore_eth_stats, tx_mac_uc_packets)}, 250 {"tx_mac_multicast_packets", 251 offsetof(struct ecore_eth_stats, tx_mac_mc_packets)}, 252 {"tx_mac_broadcast_packets", 253 offsetof(struct ecore_eth_stats, tx_mac_bc_packets)}, 254 255 {"lro_coalesced_packets", 256 offsetof(struct ecore_eth_stats, tpa_coalesced_pkts)}, 257 {"lro_coalesced_events", 258 offsetof(struct ecore_eth_stats, tpa_coalesced_events)}, 259 {"lro_aborts_num", 260 offsetof(struct ecore_eth_stats, tpa_aborts_num)}, 261 {"lro_not_coalesced_packets", 262 offsetof(struct ecore_eth_stats, tpa_not_coalesced_pkts)}, 263 {"lro_coalesced_bytes", 264 offsetof(struct ecore_eth_stats, tpa_coalesced_bytes)}, 265 }; 266 267 static const struct rte_qede_xstats_name_off qede_rxq_xstats_strings[] = { 268 {"rx_q_segments", 269 offsetof(struct qede_rx_queue, rx_segs)}, 270 {"rx_q_hw_errors", 271 offsetof(struct qede_rx_queue, rx_hw_errors)}, 272 {"rx_q_allocation_errors", 273 offsetof(struct qede_rx_queue, rx_alloc_errors)} 274 }; 275 276 static void qede_interrupt_action(struct ecore_hwfn *p_hwfn) 277 { 278 ecore_int_sp_dpc((osal_int_ptr_t)(p_hwfn)); 279 } 280 281 static void 282 qede_interrupt_handler(struct rte_intr_handle *handle, void *param) 283 { 284 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; 285 struct qede_dev *qdev = eth_dev->data->dev_private; 286 struct ecore_dev *edev = &qdev->edev; 287 288 qede_interrupt_action(ECORE_LEADING_HWFN(edev)); 289 if (rte_intr_enable(handle)) 290 DP_ERR(edev, "rte_intr_enable failed\n"); 291 } 292 293 static void 294 qede_alloc_etherdev(struct qede_dev *qdev, struct qed_dev_eth_info *info) 295 { 296 rte_memcpy(&qdev->dev_info, info, sizeof(*info)); 297 qdev->num_tc = qdev->dev_info.num_tc; 298 qdev->ops = qed_ops; 299 } 300 301 static void qede_print_adapter_info(struct qede_dev *qdev) 302 { 303 struct ecore_dev *edev = &qdev->edev; 304 struct qed_dev_info *info = &qdev->dev_info.common; 305 static char drv_ver[QEDE_PMD_DRV_VER_STR_SIZE]; 306 static char ver_str[QEDE_PMD_DRV_VER_STR_SIZE]; 307 308 DP_INFO(edev, "*********************************\n"); 309 DP_INFO(edev, " DPDK version:%s\n", rte_version()); 310 DP_INFO(edev, " Chip details : %s%d\n", 311 ECORE_IS_BB(edev) ? "BB" : "AH", 312 CHIP_REV_IS_A0(edev) ? 0 : 1); 313 snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%d.%d.%d.%d", 314 info->fw_major, info->fw_minor, info->fw_rev, info->fw_eng); 315 snprintf(drv_ver, QEDE_PMD_DRV_VER_STR_SIZE, "%s_%s", 316 ver_str, QEDE_PMD_VERSION); 317 DP_INFO(edev, " Driver version : %s\n", drv_ver); 318 DP_INFO(edev, " Firmware version : %s\n", ver_str); 319 320 snprintf(ver_str, MCP_DRV_VER_STR_SIZE, 321 "%d.%d.%d.%d", 322 (info->mfw_rev >> 24) & 0xff, 323 (info->mfw_rev >> 16) & 0xff, 324 (info->mfw_rev >> 8) & 0xff, (info->mfw_rev) & 0xff); 325 DP_INFO(edev, " Management Firmware version : %s\n", ver_str); 326 DP_INFO(edev, " Firmware file : %s\n", fw_file); 327 DP_INFO(edev, "*********************************\n"); 328 } 329 330 static void qede_set_ucast_cmn_params(struct ecore_filter_ucast *ucast) 331 { 332 memset(ucast, 0, sizeof(struct ecore_filter_ucast)); 333 ucast->is_rx_filter = true; 334 ucast->is_tx_filter = true; 335 /* ucast->assert_on_error = true; - For debug */ 336 } 337 338 static void qede_set_cmn_tunn_param(struct ecore_tunn_update_params *params, 339 uint8_t clss, uint64_t mode, uint64_t mask) 340 { 341 memset(params, 0, sizeof(struct ecore_tunn_update_params)); 342 params->tunn_mode = mode; 343 params->tunn_mode_update_mask = mask; 344 params->update_tx_pf_clss = 1; 345 params->update_rx_pf_clss = 1; 346 params->tunn_clss_vxlan = clss; 347 } 348 349 static int 350 qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast, 351 bool add) 352 { 353 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 354 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 355 struct qede_ucast_entry *tmp = NULL; 356 struct qede_ucast_entry *u; 357 struct ether_addr *mac_addr; 358 359 mac_addr = (struct ether_addr *)ucast->mac; 360 if (add) { 361 SLIST_FOREACH(tmp, &qdev->uc_list_head, list) { 362 if ((memcmp(mac_addr, &tmp->mac, 363 ETHER_ADDR_LEN) == 0) && 364 ucast->vlan == tmp->vlan) { 365 DP_ERR(edev, "Unicast MAC is already added" 366 " with vlan = %u, vni = %u\n", 367 ucast->vlan, ucast->vni); 368 return -EEXIST; 369 } 370 } 371 u = rte_malloc(NULL, sizeof(struct qede_ucast_entry), 372 RTE_CACHE_LINE_SIZE); 373 if (!u) { 374 DP_ERR(edev, "Did not allocate memory for ucast\n"); 375 return -ENOMEM; 376 } 377 ether_addr_copy(mac_addr, &u->mac); 378 u->vlan = ucast->vlan; 379 u->vni = ucast->vni; 380 SLIST_INSERT_HEAD(&qdev->uc_list_head, u, list); 381 qdev->num_uc_addr++; 382 } else { 383 SLIST_FOREACH(tmp, &qdev->uc_list_head, list) { 384 if ((memcmp(mac_addr, &tmp->mac, 385 ETHER_ADDR_LEN) == 0) && 386 ucast->vlan == tmp->vlan && 387 ucast->vni == tmp->vni) 388 break; 389 } 390 if (tmp == NULL) { 391 DP_INFO(edev, "Unicast MAC is not found\n"); 392 return -EINVAL; 393 } 394 SLIST_REMOVE(&qdev->uc_list_head, tmp, qede_ucast_entry, list); 395 qdev->num_uc_addr--; 396 } 397 398 return 0; 399 } 400 401 static int 402 qede_mcast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *mcast, 403 bool add) 404 { 405 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 406 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 407 struct ether_addr *mac_addr; 408 struct qede_mcast_entry *tmp = NULL; 409 struct qede_mcast_entry *m; 410 411 mac_addr = (struct ether_addr *)mcast->mac; 412 if (add) { 413 SLIST_FOREACH(tmp, &qdev->mc_list_head, list) { 414 if (memcmp(mac_addr, &tmp->mac, ETHER_ADDR_LEN) == 0) { 415 DP_ERR(edev, 416 "Multicast MAC is already added\n"); 417 return -EEXIST; 418 } 419 } 420 m = rte_malloc(NULL, sizeof(struct qede_mcast_entry), 421 RTE_CACHE_LINE_SIZE); 422 if (!m) { 423 DP_ERR(edev, 424 "Did not allocate memory for mcast\n"); 425 return -ENOMEM; 426 } 427 ether_addr_copy(mac_addr, &m->mac); 428 SLIST_INSERT_HEAD(&qdev->mc_list_head, m, list); 429 qdev->num_mc_addr++; 430 } else { 431 SLIST_FOREACH(tmp, &qdev->mc_list_head, list) { 432 if (memcmp(mac_addr, &tmp->mac, ETHER_ADDR_LEN) == 0) 433 break; 434 } 435 if (tmp == NULL) { 436 DP_INFO(edev, "Multicast mac is not found\n"); 437 return -EINVAL; 438 } 439 SLIST_REMOVE(&qdev->mc_list_head, tmp, 440 qede_mcast_entry, list); 441 qdev->num_mc_addr--; 442 } 443 444 return 0; 445 } 446 447 static enum _ecore_status_t 448 qede_mac_int_ops(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast, 449 bool add) 450 { 451 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 452 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 453 enum _ecore_status_t rc; 454 struct ecore_filter_mcast mcast; 455 struct qede_mcast_entry *tmp; 456 uint16_t j = 0; 457 458 /* Multicast */ 459 if (is_multicast_ether_addr((struct ether_addr *)ucast->mac)) { 460 if (add) { 461 if (qdev->num_mc_addr >= ECORE_MAX_MC_ADDRS) { 462 DP_ERR(edev, 463 "Mcast filter table limit exceeded, " 464 "Please enable mcast promisc mode\n"); 465 return -ECORE_INVAL; 466 } 467 } 468 rc = qede_mcast_filter(eth_dev, ucast, add); 469 if (rc == 0) { 470 DP_INFO(edev, "num_mc_addrs = %u\n", qdev->num_mc_addr); 471 memset(&mcast, 0, sizeof(mcast)); 472 mcast.num_mc_addrs = qdev->num_mc_addr; 473 mcast.opcode = ECORE_FILTER_ADD; 474 SLIST_FOREACH(tmp, &qdev->mc_list_head, list) { 475 ether_addr_copy(&tmp->mac, 476 (struct ether_addr *)&mcast.mac[j]); 477 j++; 478 } 479 rc = ecore_filter_mcast_cmd(edev, &mcast, 480 ECORE_SPQ_MODE_CB, NULL); 481 } 482 if (rc != ECORE_SUCCESS) { 483 DP_ERR(edev, "Failed to add multicast filter" 484 " rc = %d, op = %d\n", rc, add); 485 } 486 } else { /* Unicast */ 487 if (add) { 488 if (qdev->num_uc_addr >= qdev->dev_info.num_mac_addrs) { 489 DP_ERR(edev, 490 "Ucast filter table limit exceeded," 491 " Please enable promisc mode\n"); 492 return -ECORE_INVAL; 493 } 494 } 495 rc = qede_ucast_filter(eth_dev, ucast, add); 496 if (rc == 0) 497 rc = ecore_filter_ucast_cmd(edev, ucast, 498 ECORE_SPQ_MODE_CB, NULL); 499 if (rc != ECORE_SUCCESS) { 500 DP_ERR(edev, "MAC filter failed, rc = %d, op = %d\n", 501 rc, add); 502 } 503 } 504 505 return rc; 506 } 507 508 static void 509 qede_mac_addr_add(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr, 510 uint32_t index, __rte_unused uint32_t pool) 511 { 512 struct ecore_filter_ucast ucast; 513 514 qede_set_ucast_cmn_params(&ucast); 515 ucast.type = ECORE_FILTER_MAC; 516 ether_addr_copy(mac_addr, (struct ether_addr *)&ucast.mac); 517 (void)qede_mac_int_ops(eth_dev, &ucast, 1); 518 } 519 520 static void 521 qede_mac_addr_remove(struct rte_eth_dev *eth_dev, uint32_t index) 522 { 523 struct qede_dev *qdev = eth_dev->data->dev_private; 524 struct ecore_dev *edev = &qdev->edev; 525 struct ether_addr mac_addr; 526 struct ecore_filter_ucast ucast; 527 int rc; 528 529 PMD_INIT_FUNC_TRACE(edev); 530 531 if (index >= qdev->dev_info.num_mac_addrs) { 532 DP_ERR(edev, "Index %u is above MAC filter limit %u\n", 533 index, qdev->dev_info.num_mac_addrs); 534 return; 535 } 536 537 qede_set_ucast_cmn_params(&ucast); 538 ucast.opcode = ECORE_FILTER_REMOVE; 539 ucast.type = ECORE_FILTER_MAC; 540 541 /* Use the index maintained by rte */ 542 ether_addr_copy(ð_dev->data->mac_addrs[index], 543 (struct ether_addr *)&ucast.mac); 544 545 ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, NULL); 546 } 547 548 static void 549 qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr) 550 { 551 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 552 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 553 struct ecore_filter_ucast ucast; 554 int rc; 555 556 if (IS_VF(edev) && !ecore_vf_check_mac(ECORE_LEADING_HWFN(edev), 557 mac_addr->addr_bytes)) { 558 DP_ERR(edev, "Setting MAC address is not allowed\n"); 559 ether_addr_copy(&qdev->primary_mac, 560 ð_dev->data->mac_addrs[0]); 561 return; 562 } 563 564 /* First remove the primary mac */ 565 qede_set_ucast_cmn_params(&ucast); 566 ucast.opcode = ECORE_FILTER_REMOVE; 567 ucast.type = ECORE_FILTER_MAC; 568 ether_addr_copy(&qdev->primary_mac, 569 (struct ether_addr *)&ucast.mac); 570 rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, NULL); 571 if (rc != 0) { 572 DP_ERR(edev, "Unable to remove current macaddr" 573 " Reverting to previous default mac\n"); 574 ether_addr_copy(&qdev->primary_mac, 575 ð_dev->data->mac_addrs[0]); 576 return; 577 } 578 579 /* Add new MAC */ 580 ucast.opcode = ECORE_FILTER_ADD; 581 ether_addr_copy(mac_addr, (struct ether_addr *)&ucast.mac); 582 rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, NULL); 583 if (rc != 0) 584 DP_ERR(edev, "Unable to add new default mac\n"); 585 else 586 ether_addr_copy(mac_addr, &qdev->primary_mac); 587 } 588 589 static void qede_config_accept_any_vlan(struct qede_dev *qdev, bool action) 590 { 591 struct ecore_dev *edev = &qdev->edev; 592 struct qed_update_vport_params params = { 593 .vport_id = 0, 594 .accept_any_vlan = action, 595 .update_accept_any_vlan_flg = 1, 596 }; 597 int rc; 598 599 /* Proceed only if action actually needs to be performed */ 600 if (qdev->accept_any_vlan == action) 601 return; 602 603 rc = qdev->ops->vport_update(edev, ¶ms); 604 if (rc) { 605 DP_ERR(edev, "Failed to %s accept-any-vlan\n", 606 action ? "enable" : "disable"); 607 } else { 608 DP_INFO(edev, "%s accept-any-vlan\n", 609 action ? "enabled" : "disabled"); 610 qdev->accept_any_vlan = action; 611 } 612 } 613 614 static int qede_vlan_stripping(struct rte_eth_dev *eth_dev, bool set_stripping) 615 { 616 struct qed_update_vport_params vport_update_params; 617 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 618 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 619 int rc; 620 621 memset(&vport_update_params, 0, sizeof(vport_update_params)); 622 vport_update_params.vport_id = 0; 623 vport_update_params.update_inner_vlan_removal_flg = 1; 624 vport_update_params.inner_vlan_removal_flg = set_stripping; 625 rc = qdev->ops->vport_update(edev, &vport_update_params); 626 if (rc) { 627 DP_ERR(edev, "Update V-PORT failed %d\n", rc); 628 return rc; 629 } 630 631 return 0; 632 } 633 634 static void qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask) 635 { 636 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 637 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 638 struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode; 639 640 if (mask & ETH_VLAN_STRIP_MASK) { 641 if (rxmode->hw_vlan_strip) 642 (void)qede_vlan_stripping(eth_dev, 1); 643 else 644 (void)qede_vlan_stripping(eth_dev, 0); 645 } 646 647 if (mask & ETH_VLAN_FILTER_MASK) { 648 /* VLAN filtering kicks in when a VLAN is added */ 649 if (rxmode->hw_vlan_filter) { 650 qede_vlan_filter_set(eth_dev, 0, 1); 651 } else { 652 if (qdev->configured_vlans > 1) { /* Excluding VLAN0 */ 653 DP_NOTICE(edev, false, 654 " Please remove existing VLAN filters" 655 " before disabling VLAN filtering\n"); 656 /* Signal app that VLAN filtering is still 657 * enabled 658 */ 659 rxmode->hw_vlan_filter = true; 660 } else { 661 qede_vlan_filter_set(eth_dev, 0, 0); 662 } 663 } 664 } 665 666 if (mask & ETH_VLAN_EXTEND_MASK) 667 DP_INFO(edev, "No offloads are supported with VLAN Q-in-Q" 668 " and classification is based on outer tag only\n"); 669 670 DP_INFO(edev, "vlan offload mask %d vlan-strip %d vlan-filter %d\n", 671 mask, rxmode->hw_vlan_strip, rxmode->hw_vlan_filter); 672 } 673 674 static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev, 675 uint16_t vlan_id, int on) 676 { 677 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 678 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 679 struct qed_dev_eth_info *dev_info = &qdev->dev_info; 680 struct qede_vlan_entry *tmp = NULL; 681 struct qede_vlan_entry *vlan; 682 struct ecore_filter_ucast ucast; 683 int rc; 684 685 if (on) { 686 if (qdev->configured_vlans == dev_info->num_vlan_filters) { 687 DP_INFO(edev, "Reached max VLAN filter limit" 688 " enabling accept_any_vlan\n"); 689 qede_config_accept_any_vlan(qdev, true); 690 return 0; 691 } 692 693 SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) { 694 if (tmp->vid == vlan_id) { 695 DP_ERR(edev, "VLAN %u already configured\n", 696 vlan_id); 697 return -EEXIST; 698 } 699 } 700 701 vlan = rte_malloc(NULL, sizeof(struct qede_vlan_entry), 702 RTE_CACHE_LINE_SIZE); 703 704 if (!vlan) { 705 DP_ERR(edev, "Did not allocate memory for VLAN\n"); 706 return -ENOMEM; 707 } 708 709 qede_set_ucast_cmn_params(&ucast); 710 ucast.opcode = ECORE_FILTER_ADD; 711 ucast.type = ECORE_FILTER_VLAN; 712 ucast.vlan = vlan_id; 713 rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, 714 NULL); 715 if (rc != 0) { 716 DP_ERR(edev, "Failed to add VLAN %u rc %d\n", vlan_id, 717 rc); 718 rte_free(vlan); 719 } else { 720 vlan->vid = vlan_id; 721 SLIST_INSERT_HEAD(&qdev->vlan_list_head, vlan, list); 722 qdev->configured_vlans++; 723 DP_INFO(edev, "VLAN %u added, configured_vlans %u\n", 724 vlan_id, qdev->configured_vlans); 725 } 726 } else { 727 SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) { 728 if (tmp->vid == vlan_id) 729 break; 730 } 731 732 if (!tmp) { 733 if (qdev->configured_vlans == 0) { 734 DP_INFO(edev, 735 "No VLAN filters configured yet\n"); 736 return 0; 737 } 738 739 DP_ERR(edev, "VLAN %u not configured\n", vlan_id); 740 return -EINVAL; 741 } 742 743 SLIST_REMOVE(&qdev->vlan_list_head, tmp, qede_vlan_entry, list); 744 745 qede_set_ucast_cmn_params(&ucast); 746 ucast.opcode = ECORE_FILTER_REMOVE; 747 ucast.type = ECORE_FILTER_VLAN; 748 ucast.vlan = vlan_id; 749 rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, 750 NULL); 751 if (rc != 0) { 752 DP_ERR(edev, "Failed to delete VLAN %u rc %d\n", 753 vlan_id, rc); 754 } else { 755 qdev->configured_vlans--; 756 DP_INFO(edev, "VLAN %u removed configured_vlans %u\n", 757 vlan_id, qdev->configured_vlans); 758 } 759 } 760 761 return rc; 762 } 763 764 static int qede_init_vport(struct qede_dev *qdev) 765 { 766 struct ecore_dev *edev = &qdev->edev; 767 struct qed_start_vport_params start = {0}; 768 int rc; 769 770 start.remove_inner_vlan = 1; 771 start.gro_enable = 0; 772 start.mtu = ETHER_MTU + QEDE_ETH_OVERHEAD; 773 start.vport_id = 0; 774 start.drop_ttl0 = false; 775 start.clear_stats = 1; 776 start.handle_ptp_pkts = 0; 777 778 rc = qdev->ops->vport_start(edev, &start); 779 if (rc) { 780 DP_ERR(edev, "Start V-PORT failed %d\n", rc); 781 return rc; 782 } 783 784 DP_INFO(edev, 785 "Start vport ramrod passed, vport_id = %d, MTU = %u\n", 786 start.vport_id, ETHER_MTU); 787 788 return 0; 789 } 790 791 static void qede_prandom_bytes(uint32_t *buff) 792 { 793 uint8_t i; 794 795 srand((unsigned int)time(NULL)); 796 for (i = 0; i < ECORE_RSS_KEY_SIZE; i++) 797 buff[i] = rand(); 798 } 799 800 static int qede_config_rss(struct rte_eth_dev *eth_dev) 801 { 802 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 803 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 804 uint32_t def_rss_key[ECORE_RSS_KEY_SIZE]; 805 struct rte_eth_rss_reta_entry64 reta_conf[2]; 806 struct rte_eth_rss_conf rss_conf; 807 uint32_t i, id, pos, q; 808 809 rss_conf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf; 810 if (!rss_conf.rss_key) { 811 DP_INFO(edev, "Applying driver default key\n"); 812 rss_conf.rss_key_len = ECORE_RSS_KEY_SIZE * sizeof(uint32_t); 813 qede_prandom_bytes(&def_rss_key[0]); 814 rss_conf.rss_key = (uint8_t *)&def_rss_key[0]; 815 } 816 817 /* Configure RSS hash */ 818 if (qede_rss_hash_update(eth_dev, &rss_conf)) 819 return -EINVAL; 820 821 /* Configure default RETA */ 822 memset(reta_conf, 0, sizeof(reta_conf)); 823 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) 824 reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX; 825 826 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) { 827 id = i / RTE_RETA_GROUP_SIZE; 828 pos = i % RTE_RETA_GROUP_SIZE; 829 q = i % QEDE_RSS_COUNT(qdev); 830 reta_conf[id].reta[pos] = q; 831 } 832 if (qede_rss_reta_update(eth_dev, &reta_conf[0], 833 ECORE_RSS_IND_TABLE_SIZE)) 834 return -EINVAL; 835 836 return 0; 837 } 838 839 static int qede_dev_configure(struct rte_eth_dev *eth_dev) 840 { 841 struct qede_dev *qdev = eth_dev->data->dev_private; 842 struct ecore_dev *edev = &qdev->edev; 843 struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode; 844 int rc, i, j; 845 846 PMD_INIT_FUNC_TRACE(edev); 847 848 /* Check requirements for 100G mode */ 849 if (edev->num_hwfns > 1) { 850 if (eth_dev->data->nb_rx_queues < 2 || 851 eth_dev->data->nb_tx_queues < 2) { 852 DP_NOTICE(edev, false, 853 "100G mode needs min. 2 RX/TX queues\n"); 854 return -EINVAL; 855 } 856 857 if ((eth_dev->data->nb_rx_queues % 2 != 0) || 858 (eth_dev->data->nb_tx_queues % 2 != 0)) { 859 DP_NOTICE(edev, false, 860 "100G mode needs even no. of RX/TX queues\n"); 861 return -EINVAL; 862 } 863 } 864 865 /* Sanity checks and throw warnings */ 866 if (rxmode->enable_scatter == 1) 867 eth_dev->data->scattered_rx = 1; 868 869 if (rxmode->enable_lro == 1) { 870 DP_INFO(edev, "LRO is not supported\n"); 871 return -EINVAL; 872 } 873 874 if (!rxmode->hw_strip_crc) 875 DP_INFO(edev, "L2 CRC stripping is always enabled in hw\n"); 876 877 if (!rxmode->hw_ip_checksum) 878 DP_INFO(edev, "IP/UDP/TCP checksum offload is always enabled " 879 "in hw\n"); 880 881 /* Check for the port restart case */ 882 if (qdev->state != QEDE_DEV_INIT) { 883 rc = qdev->ops->vport_stop(edev, 0); 884 if (rc != 0) 885 return rc; 886 qede_dealloc_fp_resc(eth_dev); 887 } 888 889 qdev->fp_num_tx = eth_dev->data->nb_tx_queues; 890 qdev->fp_num_rx = eth_dev->data->nb_rx_queues; 891 qdev->num_queues = qdev->fp_num_tx + qdev->fp_num_rx; 892 893 /* Fastpath status block should be initialized before sending 894 * VPORT-START in the case of VF. Anyway, do it for both VF/PF. 895 */ 896 rc = qede_alloc_fp_resc(qdev); 897 if (rc != 0) 898 return rc; 899 900 /* Issue VPORT-START with default config values to allow 901 * other port configurations early on. 902 */ 903 rc = qede_init_vport(qdev); 904 if (rc != 0) 905 return rc; 906 907 /* Do RSS configuration after vport-start */ 908 switch (rxmode->mq_mode) { 909 case ETH_MQ_RX_RSS: 910 rc = qede_config_rss(eth_dev); 911 if (rc != 0) { 912 qdev->ops->vport_stop(edev, 0); 913 qede_dealloc_fp_resc(eth_dev); 914 return -EINVAL; 915 } 916 break; 917 case ETH_MQ_RX_NONE: 918 DP_INFO(edev, "RSS is disabled\n"); 919 break; 920 default: 921 DP_ERR(edev, "Unsupported RSS mode\n"); 922 qdev->ops->vport_stop(edev, 0); 923 qede_dealloc_fp_resc(eth_dev); 924 return -EINVAL; 925 } 926 927 SLIST_INIT(&qdev->vlan_list_head); 928 929 /* Add primary mac for PF */ 930 if (IS_PF(edev)) 931 qede_mac_addr_set(eth_dev, &qdev->primary_mac); 932 933 /* Enable VLAN offloads by default */ 934 qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK | 935 ETH_VLAN_FILTER_MASK | 936 ETH_VLAN_EXTEND_MASK); 937 938 qdev->state = QEDE_DEV_CONFIG; 939 940 DP_INFO(edev, "Allocated RSS=%d TSS=%d (with CoS=%d)\n", 941 (int)QEDE_RSS_COUNT(qdev), (int)QEDE_TSS_COUNT(qdev), 942 qdev->num_tc); 943 944 return 0; 945 } 946 947 /* Info about HW descriptor ring limitations */ 948 static const struct rte_eth_desc_lim qede_rx_desc_lim = { 949 .nb_max = NUM_RX_BDS_MAX, 950 .nb_min = 128, 951 .nb_align = 128 /* lowest common multiple */ 952 }; 953 954 static const struct rte_eth_desc_lim qede_tx_desc_lim = { 955 .nb_max = NUM_TX_BDS_MAX, 956 .nb_min = 256, 957 .nb_align = 256 958 }; 959 960 static void 961 qede_dev_info_get(struct rte_eth_dev *eth_dev, 962 struct rte_eth_dev_info *dev_info) 963 { 964 struct qede_dev *qdev = eth_dev->data->dev_private; 965 struct ecore_dev *edev = &qdev->edev; 966 struct qed_link_output link; 967 uint32_t speed_cap = 0; 968 969 PMD_INIT_FUNC_TRACE(edev); 970 971 dev_info->pci_dev = RTE_DEV_TO_PCI(eth_dev->device); 972 dev_info->min_rx_bufsize = (uint32_t)(ETHER_MIN_MTU + 973 QEDE_ETH_OVERHEAD); 974 dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN; 975 dev_info->rx_desc_lim = qede_rx_desc_lim; 976 dev_info->tx_desc_lim = qede_tx_desc_lim; 977 dev_info->max_rx_queues = (uint16_t)QEDE_MAX_RSS_CNT(qdev); 978 dev_info->max_tx_queues = dev_info->max_rx_queues; 979 dev_info->max_mac_addrs = qdev->dev_info.num_mac_addrs; 980 dev_info->max_vfs = 0; 981 dev_info->reta_size = ECORE_RSS_IND_TABLE_SIZE; 982 dev_info->hash_key_size = ECORE_RSS_KEY_SIZE * sizeof(uint32_t); 983 dev_info->flow_type_rss_offloads = (uint64_t)QEDE_RSS_OFFLOAD_ALL; 984 985 dev_info->default_txconf = (struct rte_eth_txconf) { 986 .txq_flags = QEDE_TXQ_FLAGS, 987 }; 988 989 dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_VLAN_STRIP | 990 DEV_RX_OFFLOAD_IPV4_CKSUM | 991 DEV_RX_OFFLOAD_UDP_CKSUM | 992 DEV_RX_OFFLOAD_TCP_CKSUM | 993 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM); 994 dev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT | 995 DEV_TX_OFFLOAD_IPV4_CKSUM | 996 DEV_TX_OFFLOAD_UDP_CKSUM | 997 DEV_TX_OFFLOAD_TCP_CKSUM | 998 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM); 999 1000 memset(&link, 0, sizeof(struct qed_link_output)); 1001 qdev->ops->common->get_link(edev, &link); 1002 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 1003 speed_cap |= ETH_LINK_SPEED_1G; 1004 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 1005 speed_cap |= ETH_LINK_SPEED_10G; 1006 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) 1007 speed_cap |= ETH_LINK_SPEED_25G; 1008 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) 1009 speed_cap |= ETH_LINK_SPEED_40G; 1010 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) 1011 speed_cap |= ETH_LINK_SPEED_50G; 1012 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) 1013 speed_cap |= ETH_LINK_SPEED_100G; 1014 dev_info->speed_capa = speed_cap; 1015 } 1016 1017 /* return 0 means link status changed, -1 means not changed */ 1018 static int 1019 qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete) 1020 { 1021 struct qede_dev *qdev = eth_dev->data->dev_private; 1022 struct ecore_dev *edev = &qdev->edev; 1023 uint16_t link_duplex; 1024 struct qed_link_output link; 1025 struct rte_eth_link *curr = ð_dev->data->dev_link; 1026 1027 memset(&link, 0, sizeof(struct qed_link_output)); 1028 qdev->ops->common->get_link(edev, &link); 1029 1030 /* Link Speed */ 1031 curr->link_speed = link.speed; 1032 1033 /* Link Mode */ 1034 switch (link.duplex) { 1035 case QEDE_DUPLEX_HALF: 1036 link_duplex = ETH_LINK_HALF_DUPLEX; 1037 break; 1038 case QEDE_DUPLEX_FULL: 1039 link_duplex = ETH_LINK_FULL_DUPLEX; 1040 break; 1041 case QEDE_DUPLEX_UNKNOWN: 1042 default: 1043 link_duplex = -1; 1044 } 1045 curr->link_duplex = link_duplex; 1046 1047 /* Link Status */ 1048 curr->link_status = (link.link_up) ? ETH_LINK_UP : ETH_LINK_DOWN; 1049 1050 /* AN */ 1051 curr->link_autoneg = (link.supported_caps & QEDE_SUPPORTED_AUTONEG) ? 1052 ETH_LINK_AUTONEG : ETH_LINK_FIXED; 1053 1054 DP_INFO(edev, "Link - Speed %u Mode %u AN %u Status %u\n", 1055 curr->link_speed, curr->link_duplex, 1056 curr->link_autoneg, curr->link_status); 1057 1058 /* return 0 means link status changed, -1 means not changed */ 1059 return ((curr->link_status == link.link_up) ? -1 : 0); 1060 } 1061 1062 static void qede_promiscuous_enable(struct rte_eth_dev *eth_dev) 1063 { 1064 struct qede_dev *qdev = eth_dev->data->dev_private; 1065 struct ecore_dev *edev = &qdev->edev; 1066 1067 PMD_INIT_FUNC_TRACE(edev); 1068 1069 enum qed_filter_rx_mode_type type = QED_FILTER_RX_MODE_TYPE_PROMISC; 1070 1071 if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1) 1072 type |= QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC; 1073 1074 qed_configure_filter_rx_mode(eth_dev, type); 1075 } 1076 1077 static void qede_promiscuous_disable(struct rte_eth_dev *eth_dev) 1078 { 1079 struct qede_dev *qdev = eth_dev->data->dev_private; 1080 struct ecore_dev *edev = &qdev->edev; 1081 1082 PMD_INIT_FUNC_TRACE(edev); 1083 1084 if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1) 1085 qed_configure_filter_rx_mode(eth_dev, 1086 QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC); 1087 else 1088 qed_configure_filter_rx_mode(eth_dev, 1089 QED_FILTER_RX_MODE_TYPE_REGULAR); 1090 } 1091 1092 static void qede_poll_sp_sb_cb(void *param) 1093 { 1094 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; 1095 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1096 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1097 int rc; 1098 1099 qede_interrupt_action(ECORE_LEADING_HWFN(edev)); 1100 qede_interrupt_action(&edev->hwfns[1]); 1101 1102 rc = rte_eal_alarm_set(timer_period * US_PER_S, 1103 qede_poll_sp_sb_cb, 1104 (void *)eth_dev); 1105 if (rc != 0) { 1106 DP_ERR(edev, "Unable to start periodic" 1107 " timer rc %d\n", rc); 1108 assert(false && "Unable to start periodic timer"); 1109 } 1110 } 1111 1112 static void qede_dev_close(struct rte_eth_dev *eth_dev) 1113 { 1114 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(eth_dev->device); 1115 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1116 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1117 int rc; 1118 1119 PMD_INIT_FUNC_TRACE(edev); 1120 1121 /* dev_stop() shall cleanup fp resources in hw but without releasing 1122 * dma memories and sw structures so that dev_start() can be called 1123 * by the app without reconfiguration. However, in dev_close() we 1124 * can release all the resources and device can be brought up newly 1125 */ 1126 if (qdev->state != QEDE_DEV_STOP) 1127 qede_dev_stop(eth_dev); 1128 else 1129 DP_INFO(edev, "Device is already stopped\n"); 1130 1131 rc = qdev->ops->vport_stop(edev, 0); 1132 if (rc != 0) 1133 DP_ERR(edev, "Failed to stop VPORT\n"); 1134 1135 qede_dealloc_fp_resc(eth_dev); 1136 1137 qdev->ops->common->slowpath_stop(edev); 1138 1139 qdev->ops->common->remove(edev); 1140 1141 rte_intr_disable(&pci_dev->intr_handle); 1142 1143 rte_intr_callback_unregister(&pci_dev->intr_handle, 1144 qede_interrupt_handler, (void *)eth_dev); 1145 1146 if (edev->num_hwfns > 1) 1147 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, (void *)eth_dev); 1148 1149 qdev->state = QEDE_DEV_INIT; /* Go back to init state */ 1150 } 1151 1152 static void 1153 qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats) 1154 { 1155 struct qede_dev *qdev = eth_dev->data->dev_private; 1156 struct ecore_dev *edev = &qdev->edev; 1157 struct ecore_eth_stats stats; 1158 unsigned int i = 0, j = 0, qid; 1159 struct qede_tx_queue *txq; 1160 1161 qdev->ops->get_vport_stats(edev, &stats); 1162 1163 /* RX Stats */ 1164 eth_stats->ipackets = stats.rx_ucast_pkts + 1165 stats.rx_mcast_pkts + stats.rx_bcast_pkts; 1166 1167 eth_stats->ibytes = stats.rx_ucast_bytes + 1168 stats.rx_mcast_bytes + stats.rx_bcast_bytes; 1169 1170 eth_stats->ierrors = stats.rx_crc_errors + 1171 stats.rx_align_errors + 1172 stats.rx_carrier_errors + 1173 stats.rx_oversize_packets + 1174 stats.rx_jabbers + stats.rx_undersize_packets; 1175 1176 eth_stats->rx_nombuf = stats.no_buff_discards; 1177 1178 eth_stats->imissed = stats.mftag_filter_discards + 1179 stats.mac_filter_discards + 1180 stats.no_buff_discards + stats.brb_truncates + stats.brb_discards; 1181 1182 /* TX stats */ 1183 eth_stats->opackets = stats.tx_ucast_pkts + 1184 stats.tx_mcast_pkts + stats.tx_bcast_pkts; 1185 1186 eth_stats->obytes = stats.tx_ucast_bytes + 1187 stats.tx_mcast_bytes + stats.tx_bcast_bytes; 1188 1189 eth_stats->oerrors = stats.tx_err_drop_pkts; 1190 1191 /* Queue stats */ 1192 for (qid = 0; qid < QEDE_QUEUE_CNT(qdev); qid++) { 1193 if (qdev->fp_array[qid].type & QEDE_FASTPATH_RX) { 1194 eth_stats->q_ipackets[i] = 1195 *(uint64_t *)( 1196 ((char *)(qdev->fp_array[(qid)].rxq)) + 1197 offsetof(struct qede_rx_queue, 1198 rcv_pkts)); 1199 eth_stats->q_errors[i] = 1200 *(uint64_t *)( 1201 ((char *)(qdev->fp_array[(qid)].rxq)) + 1202 offsetof(struct qede_rx_queue, 1203 rx_hw_errors)) + 1204 *(uint64_t *)( 1205 ((char *)(qdev->fp_array[(qid)].rxq)) + 1206 offsetof(struct qede_rx_queue, 1207 rx_alloc_errors)); 1208 i++; 1209 } 1210 1211 if (qdev->fp_array[qid].type & QEDE_FASTPATH_TX) { 1212 txq = qdev->fp_array[(qid)].txqs[0]; 1213 eth_stats->q_opackets[j] = 1214 *((uint64_t *)(uintptr_t) 1215 (((uint64_t)(uintptr_t)(txq)) + 1216 offsetof(struct qede_tx_queue, 1217 xmit_pkts))); 1218 j++; 1219 } 1220 } 1221 } 1222 1223 static unsigned 1224 qede_get_xstats_count(struct qede_dev *qdev) { 1225 return RTE_DIM(qede_xstats_strings) + 1226 (RTE_DIM(qede_rxq_xstats_strings) * QEDE_RSS_COUNT(qdev)); 1227 } 1228 1229 static int 1230 qede_get_xstats_names(__rte_unused struct rte_eth_dev *dev, 1231 struct rte_eth_xstat_name *xstats_names, unsigned limit) 1232 { 1233 struct qede_dev *qdev = dev->data->dev_private; 1234 const unsigned int stat_cnt = qede_get_xstats_count(qdev); 1235 unsigned int i, qid, stat_idx = 0; 1236 1237 if (xstats_names != NULL) { 1238 for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) { 1239 snprintf(xstats_names[stat_idx].name, 1240 sizeof(xstats_names[stat_idx].name), 1241 "%s", 1242 qede_xstats_strings[i].name); 1243 stat_idx++; 1244 } 1245 1246 for (qid = 0; qid < QEDE_RSS_COUNT(qdev); qid++) { 1247 for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) { 1248 snprintf(xstats_names[stat_idx].name, 1249 sizeof(xstats_names[stat_idx].name), 1250 "%.4s%d%s", 1251 qede_rxq_xstats_strings[i].name, qid, 1252 qede_rxq_xstats_strings[i].name + 4); 1253 stat_idx++; 1254 } 1255 } 1256 } 1257 1258 return stat_cnt; 1259 } 1260 1261 static int 1262 qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 1263 unsigned int n) 1264 { 1265 struct qede_dev *qdev = dev->data->dev_private; 1266 struct ecore_dev *edev = &qdev->edev; 1267 struct ecore_eth_stats stats; 1268 const unsigned int num = qede_get_xstats_count(qdev); 1269 unsigned int i, qid, stat_idx = 0; 1270 1271 if (n < num) 1272 return num; 1273 1274 qdev->ops->get_vport_stats(edev, &stats); 1275 1276 for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) { 1277 xstats[stat_idx].value = *(uint64_t *)(((char *)&stats) + 1278 qede_xstats_strings[i].offset); 1279 xstats[stat_idx].id = stat_idx; 1280 stat_idx++; 1281 } 1282 1283 for (qid = 0; qid < QEDE_QUEUE_CNT(qdev); qid++) { 1284 if (qdev->fp_array[qid].type & QEDE_FASTPATH_RX) { 1285 for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) { 1286 xstats[stat_idx].value = *(uint64_t *)( 1287 ((char *)(qdev->fp_array[(qid)].rxq)) + 1288 qede_rxq_xstats_strings[i].offset); 1289 xstats[stat_idx].id = stat_idx; 1290 stat_idx++; 1291 } 1292 } 1293 } 1294 1295 return stat_idx; 1296 } 1297 1298 static void 1299 qede_reset_xstats(struct rte_eth_dev *dev) 1300 { 1301 struct qede_dev *qdev = dev->data->dev_private; 1302 struct ecore_dev *edev = &qdev->edev; 1303 1304 ecore_reset_vport_stats(edev); 1305 } 1306 1307 int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up) 1308 { 1309 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1310 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1311 struct qed_link_params link_params; 1312 int rc; 1313 1314 DP_INFO(edev, "setting link state %d\n", link_up); 1315 memset(&link_params, 0, sizeof(link_params)); 1316 link_params.link_up = link_up; 1317 rc = qdev->ops->common->set_link(edev, &link_params); 1318 if (rc != ECORE_SUCCESS) 1319 DP_ERR(edev, "Unable to set link state %d\n", link_up); 1320 1321 return rc; 1322 } 1323 1324 static int qede_dev_set_link_up(struct rte_eth_dev *eth_dev) 1325 { 1326 return qede_dev_set_link_state(eth_dev, true); 1327 } 1328 1329 static int qede_dev_set_link_down(struct rte_eth_dev *eth_dev) 1330 { 1331 return qede_dev_set_link_state(eth_dev, false); 1332 } 1333 1334 static void qede_reset_stats(struct rte_eth_dev *eth_dev) 1335 { 1336 struct qede_dev *qdev = eth_dev->data->dev_private; 1337 struct ecore_dev *edev = &qdev->edev; 1338 1339 ecore_reset_vport_stats(edev); 1340 } 1341 1342 static void qede_allmulticast_enable(struct rte_eth_dev *eth_dev) 1343 { 1344 enum qed_filter_rx_mode_type type = 1345 QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC; 1346 1347 if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) 1348 type |= QED_FILTER_RX_MODE_TYPE_PROMISC; 1349 1350 qed_configure_filter_rx_mode(eth_dev, type); 1351 } 1352 1353 static void qede_allmulticast_disable(struct rte_eth_dev *eth_dev) 1354 { 1355 if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) 1356 qed_configure_filter_rx_mode(eth_dev, 1357 QED_FILTER_RX_MODE_TYPE_PROMISC); 1358 else 1359 qed_configure_filter_rx_mode(eth_dev, 1360 QED_FILTER_RX_MODE_TYPE_REGULAR); 1361 } 1362 1363 static int qede_flow_ctrl_set(struct rte_eth_dev *eth_dev, 1364 struct rte_eth_fc_conf *fc_conf) 1365 { 1366 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1367 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1368 struct qed_link_output current_link; 1369 struct qed_link_params params; 1370 1371 memset(¤t_link, 0, sizeof(current_link)); 1372 qdev->ops->common->get_link(edev, ¤t_link); 1373 1374 memset(¶ms, 0, sizeof(params)); 1375 params.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG; 1376 if (fc_conf->autoneg) { 1377 if (!(current_link.supported_caps & QEDE_SUPPORTED_AUTONEG)) { 1378 DP_ERR(edev, "Autoneg not supported\n"); 1379 return -EINVAL; 1380 } 1381 params.pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE; 1382 } 1383 1384 /* Pause is assumed to be supported (SUPPORTED_Pause) */ 1385 if (fc_conf->mode == RTE_FC_FULL) 1386 params.pause_config |= (QED_LINK_PAUSE_TX_ENABLE | 1387 QED_LINK_PAUSE_RX_ENABLE); 1388 if (fc_conf->mode == RTE_FC_TX_PAUSE) 1389 params.pause_config |= QED_LINK_PAUSE_TX_ENABLE; 1390 if (fc_conf->mode == RTE_FC_RX_PAUSE) 1391 params.pause_config |= QED_LINK_PAUSE_RX_ENABLE; 1392 1393 params.link_up = true; 1394 (void)qdev->ops->common->set_link(edev, ¶ms); 1395 1396 return 0; 1397 } 1398 1399 static int qede_flow_ctrl_get(struct rte_eth_dev *eth_dev, 1400 struct rte_eth_fc_conf *fc_conf) 1401 { 1402 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1403 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1404 struct qed_link_output current_link; 1405 1406 memset(¤t_link, 0, sizeof(current_link)); 1407 qdev->ops->common->get_link(edev, ¤t_link); 1408 1409 if (current_link.pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE) 1410 fc_conf->autoneg = true; 1411 1412 if (current_link.pause_config & (QED_LINK_PAUSE_RX_ENABLE | 1413 QED_LINK_PAUSE_TX_ENABLE)) 1414 fc_conf->mode = RTE_FC_FULL; 1415 else if (current_link.pause_config & QED_LINK_PAUSE_RX_ENABLE) 1416 fc_conf->mode = RTE_FC_RX_PAUSE; 1417 else if (current_link.pause_config & QED_LINK_PAUSE_TX_ENABLE) 1418 fc_conf->mode = RTE_FC_TX_PAUSE; 1419 else 1420 fc_conf->mode = RTE_FC_NONE; 1421 1422 return 0; 1423 } 1424 1425 static const uint32_t * 1426 qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev) 1427 { 1428 static const uint32_t ptypes[] = { 1429 RTE_PTYPE_L3_IPV4, 1430 RTE_PTYPE_L3_IPV6, 1431 RTE_PTYPE_UNKNOWN 1432 }; 1433 1434 if (eth_dev->rx_pkt_burst == qede_recv_pkts) 1435 return ptypes; 1436 1437 return NULL; 1438 } 1439 1440 static void qede_init_rss_caps(uint8_t *rss_caps, uint64_t hf) 1441 { 1442 *rss_caps = 0; 1443 *rss_caps |= (hf & ETH_RSS_IPV4) ? ECORE_RSS_IPV4 : 0; 1444 *rss_caps |= (hf & ETH_RSS_IPV6) ? ECORE_RSS_IPV6 : 0; 1445 *rss_caps |= (hf & ETH_RSS_IPV6_EX) ? ECORE_RSS_IPV6 : 0; 1446 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? ECORE_RSS_IPV4_TCP : 0; 1447 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? ECORE_RSS_IPV6_TCP : 0; 1448 *rss_caps |= (hf & ETH_RSS_IPV6_TCP_EX) ? ECORE_RSS_IPV6_TCP : 0; 1449 } 1450 1451 static int qede_rss_hash_update(struct rte_eth_dev *eth_dev, 1452 struct rte_eth_rss_conf *rss_conf) 1453 { 1454 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1455 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1456 struct ecore_sp_vport_update_params vport_update_params; 1457 struct ecore_rss_params rss_params; 1458 struct ecore_rss_params params; 1459 struct ecore_hwfn *p_hwfn; 1460 uint32_t *key = (uint32_t *)rss_conf->rss_key; 1461 uint64_t hf = rss_conf->rss_hf; 1462 uint8_t len = rss_conf->rss_key_len; 1463 uint8_t i; 1464 int rc; 1465 1466 memset(&vport_update_params, 0, sizeof(vport_update_params)); 1467 memset(&rss_params, 0, sizeof(rss_params)); 1468 1469 DP_INFO(edev, "RSS hf = 0x%lx len = %u key = %p\n", 1470 (unsigned long)hf, len, key); 1471 1472 if (hf != 0) { 1473 /* Enabling RSS */ 1474 DP_INFO(edev, "Enabling rss\n"); 1475 1476 /* RSS caps */ 1477 qede_init_rss_caps(&rss_params.rss_caps, hf); 1478 rss_params.update_rss_capabilities = 1; 1479 1480 /* RSS hash key */ 1481 if (key) { 1482 if (len > (ECORE_RSS_KEY_SIZE * sizeof(uint32_t))) { 1483 DP_ERR(edev, "RSS key length exceeds limit\n"); 1484 return -EINVAL; 1485 } 1486 DP_INFO(edev, "Applying user supplied hash key\n"); 1487 rss_params.update_rss_key = 1; 1488 memcpy(&rss_params.rss_key, key, len); 1489 } 1490 rss_params.rss_enable = 1; 1491 } 1492 1493 rss_params.update_rss_config = 1; 1494 /* tbl_size has to be set with capabilities */ 1495 rss_params.rss_table_size_log = 7; 1496 vport_update_params.vport_id = 0; 1497 vport_update_params.rss_params = &rss_params; 1498 1499 for_each_hwfn(edev, i) { 1500 p_hwfn = &edev->hwfns[i]; 1501 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid; 1502 rc = ecore_sp_vport_update(p_hwfn, &vport_update_params, 1503 ECORE_SPQ_MODE_EBLOCK, NULL); 1504 if (rc) { 1505 DP_ERR(edev, "vport-update for RSS failed\n"); 1506 return rc; 1507 } 1508 } 1509 qdev->rss_enable = rss_params.rss_enable; 1510 1511 /* Update local structure for hash query */ 1512 qdev->rss_conf.rss_hf = hf; 1513 qdev->rss_conf.rss_key_len = len; 1514 if (qdev->rss_enable) { 1515 if (qdev->rss_conf.rss_key == NULL) { 1516 qdev->rss_conf.rss_key = (uint8_t *)malloc(len); 1517 if (qdev->rss_conf.rss_key == NULL) { 1518 DP_ERR(edev, "No memory to store RSS key\n"); 1519 return -ENOMEM; 1520 } 1521 } 1522 if (key && len) { 1523 DP_INFO(edev, "Storing RSS key\n"); 1524 memcpy(qdev->rss_conf.rss_key, key, len); 1525 } 1526 } else if (!qdev->rss_enable && len == 0) { 1527 if (qdev->rss_conf.rss_key) { 1528 free(qdev->rss_conf.rss_key); 1529 qdev->rss_conf.rss_key = NULL; 1530 DP_INFO(edev, "Free RSS key\n"); 1531 } 1532 } 1533 1534 return 0; 1535 } 1536 1537 static int qede_rss_hash_conf_get(struct rte_eth_dev *eth_dev, 1538 struct rte_eth_rss_conf *rss_conf) 1539 { 1540 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1541 1542 rss_conf->rss_hf = qdev->rss_conf.rss_hf; 1543 rss_conf->rss_key_len = qdev->rss_conf.rss_key_len; 1544 1545 if (rss_conf->rss_key && qdev->rss_conf.rss_key) 1546 memcpy(rss_conf->rss_key, qdev->rss_conf.rss_key, 1547 rss_conf->rss_key_len); 1548 return 0; 1549 } 1550 1551 static int qede_rss_reta_update(struct rte_eth_dev *eth_dev, 1552 struct rte_eth_rss_reta_entry64 *reta_conf, 1553 uint16_t reta_size) 1554 { 1555 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1556 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1557 struct ecore_sp_vport_update_params vport_update_params; 1558 struct ecore_rss_params params; 1559 struct ecore_hwfn *p_hwfn; 1560 uint16_t i, idx, shift; 1561 uint8_t entry; 1562 int rc; 1563 1564 if (reta_size > ETH_RSS_RETA_SIZE_128) { 1565 DP_ERR(edev, "reta_size %d is not supported by hardware\n", 1566 reta_size); 1567 return -EINVAL; 1568 } 1569 1570 memset(&vport_update_params, 0, sizeof(vport_update_params)); 1571 memset(¶ms, 0, sizeof(params)); 1572 1573 for (i = 0; i < reta_size; i++) { 1574 idx = i / RTE_RETA_GROUP_SIZE; 1575 shift = i % RTE_RETA_GROUP_SIZE; 1576 if (reta_conf[idx].mask & (1ULL << shift)) { 1577 entry = reta_conf[idx].reta[shift]; 1578 params.rss_ind_table[i] = entry; 1579 } 1580 } 1581 1582 /* Fix up RETA for CMT mode device */ 1583 if (edev->num_hwfns > 1) 1584 qdev->rss_enable = qed_update_rss_parm_cmt(edev, 1585 ¶ms.rss_ind_table[0]); 1586 params.update_rss_ind_table = 1; 1587 params.rss_table_size_log = 7; 1588 params.update_rss_config = 1; 1589 vport_update_params.vport_id = 0; 1590 /* Use the current value of rss_enable */ 1591 params.rss_enable = qdev->rss_enable; 1592 vport_update_params.rss_params = ¶ms; 1593 1594 for_each_hwfn(edev, i) { 1595 p_hwfn = &edev->hwfns[i]; 1596 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid; 1597 rc = ecore_sp_vport_update(p_hwfn, &vport_update_params, 1598 ECORE_SPQ_MODE_EBLOCK, NULL); 1599 if (rc) { 1600 DP_ERR(edev, "vport-update for RSS failed\n"); 1601 return rc; 1602 } 1603 } 1604 1605 /* Update the local copy for RETA query command */ 1606 memcpy(qdev->rss_ind_table, params.rss_ind_table, 1607 sizeof(params.rss_ind_table)); 1608 1609 return 0; 1610 } 1611 1612 static int qede_rss_reta_query(struct rte_eth_dev *eth_dev, 1613 struct rte_eth_rss_reta_entry64 *reta_conf, 1614 uint16_t reta_size) 1615 { 1616 struct qede_dev *qdev = eth_dev->data->dev_private; 1617 struct ecore_dev *edev = &qdev->edev; 1618 uint16_t i, idx, shift; 1619 uint8_t entry; 1620 1621 if (reta_size > ETH_RSS_RETA_SIZE_128) { 1622 DP_ERR(edev, "reta_size %d is not supported\n", 1623 reta_size); 1624 return -EINVAL; 1625 } 1626 1627 for (i = 0; i < reta_size; i++) { 1628 idx = i / RTE_RETA_GROUP_SIZE; 1629 shift = i % RTE_RETA_GROUP_SIZE; 1630 if (reta_conf[idx].mask & (1ULL << shift)) { 1631 entry = qdev->rss_ind_table[i]; 1632 reta_conf[idx].reta[shift] = entry; 1633 } 1634 } 1635 1636 return 0; 1637 } 1638 1639 int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) 1640 { 1641 uint32_t frame_size; 1642 struct qede_dev *qdev = dev->data->dev_private; 1643 struct rte_eth_dev_info dev_info = {0}; 1644 1645 qede_dev_info_get(dev, &dev_info); 1646 1647 /* VLAN_TAG = 4 */ 1648 frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + 4; 1649 1650 if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) 1651 return -EINVAL; 1652 1653 if (!dev->data->scattered_rx && 1654 frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) 1655 return -EINVAL; 1656 1657 if (frame_size > ETHER_MAX_LEN) 1658 dev->data->dev_conf.rxmode.jumbo_frame = 1; 1659 else 1660 dev->data->dev_conf.rxmode.jumbo_frame = 0; 1661 1662 /* update max frame size */ 1663 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; 1664 qdev->mtu = mtu; 1665 qede_dev_stop(dev); 1666 qede_dev_start(dev); 1667 1668 return 0; 1669 } 1670 1671 static int 1672 qede_conf_udp_dst_port(struct rte_eth_dev *eth_dev, 1673 struct rte_eth_udp_tunnel *tunnel_udp, 1674 bool add) 1675 { 1676 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1677 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1678 struct ecore_tunn_update_params params; 1679 struct ecore_hwfn *p_hwfn; 1680 int rc, i; 1681 1682 PMD_INIT_FUNC_TRACE(edev); 1683 1684 memset(¶ms, 0, sizeof(params)); 1685 if (tunnel_udp->prot_type == RTE_TUNNEL_TYPE_VXLAN) { 1686 params.update_vxlan_udp_port = 1; 1687 params.vxlan_udp_port = (add) ? tunnel_udp->udp_port : 1688 QEDE_VXLAN_DEF_PORT; 1689 for_each_hwfn(edev, i) { 1690 p_hwfn = &edev->hwfns[i]; 1691 rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, ¶ms, 1692 ECORE_SPQ_MODE_CB, NULL); 1693 if (rc != ECORE_SUCCESS) { 1694 DP_ERR(edev, "Unable to config UDP port %u\n", 1695 params.vxlan_udp_port); 1696 return rc; 1697 } 1698 } 1699 } 1700 1701 return 0; 1702 } 1703 1704 int 1705 qede_udp_dst_port_del(struct rte_eth_dev *eth_dev, 1706 struct rte_eth_udp_tunnel *tunnel_udp) 1707 { 1708 return qede_conf_udp_dst_port(eth_dev, tunnel_udp, false); 1709 } 1710 1711 int 1712 qede_udp_dst_port_add(struct rte_eth_dev *eth_dev, 1713 struct rte_eth_udp_tunnel *tunnel_udp) 1714 { 1715 return qede_conf_udp_dst_port(eth_dev, tunnel_udp, true); 1716 } 1717 1718 static void qede_get_ecore_tunn_params(uint32_t filter, uint32_t *type, 1719 uint32_t *clss, char *str) 1720 { 1721 uint16_t j; 1722 *clss = MAX_ECORE_TUNN_CLSS; 1723 1724 for (j = 0; j < RTE_DIM(qede_tunn_types); j++) { 1725 if (filter == qede_tunn_types[j].rte_filter_type) { 1726 *type = qede_tunn_types[j].qede_type; 1727 *clss = qede_tunn_types[j].qede_tunn_clss; 1728 strcpy(str, qede_tunn_types[j].string); 1729 return; 1730 } 1731 } 1732 } 1733 1734 static int 1735 qede_set_ucast_tunn_cmn_param(struct ecore_filter_ucast *ucast, 1736 const struct rte_eth_tunnel_filter_conf *conf, 1737 uint32_t type) 1738 { 1739 /* Init commmon ucast params first */ 1740 qede_set_ucast_cmn_params(ucast); 1741 1742 /* Copy out the required fields based on classification type */ 1743 ucast->type = type; 1744 1745 switch (type) { 1746 case ECORE_FILTER_VNI: 1747 ucast->vni = conf->tenant_id; 1748 break; 1749 case ECORE_FILTER_INNER_VLAN: 1750 ucast->vlan = conf->inner_vlan; 1751 break; 1752 case ECORE_FILTER_MAC: 1753 memcpy(ucast->mac, conf->outer_mac.addr_bytes, 1754 ETHER_ADDR_LEN); 1755 break; 1756 case ECORE_FILTER_INNER_MAC: 1757 memcpy(ucast->mac, conf->inner_mac.addr_bytes, 1758 ETHER_ADDR_LEN); 1759 break; 1760 case ECORE_FILTER_MAC_VNI_PAIR: 1761 memcpy(ucast->mac, conf->outer_mac.addr_bytes, 1762 ETHER_ADDR_LEN); 1763 ucast->vni = conf->tenant_id; 1764 break; 1765 case ECORE_FILTER_INNER_MAC_VNI_PAIR: 1766 memcpy(ucast->mac, conf->inner_mac.addr_bytes, 1767 ETHER_ADDR_LEN); 1768 ucast->vni = conf->tenant_id; 1769 break; 1770 case ECORE_FILTER_INNER_PAIR: 1771 memcpy(ucast->mac, conf->inner_mac.addr_bytes, 1772 ETHER_ADDR_LEN); 1773 ucast->vlan = conf->inner_vlan; 1774 break; 1775 default: 1776 return -EINVAL; 1777 } 1778 1779 return ECORE_SUCCESS; 1780 } 1781 1782 static int qede_vxlan_tunn_config(struct rte_eth_dev *eth_dev, 1783 enum rte_filter_op filter_op, 1784 const struct rte_eth_tunnel_filter_conf *conf) 1785 { 1786 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1787 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1788 struct ecore_tunn_update_params params; 1789 struct ecore_hwfn *p_hwfn; 1790 enum ecore_filter_ucast_type type; 1791 enum ecore_tunn_clss clss; 1792 struct ecore_filter_ucast ucast; 1793 char str[80]; 1794 uint16_t filter_type; 1795 int rc, i; 1796 1797 filter_type = conf->filter_type | qdev->vxlan_filter_type; 1798 /* First determine if the given filter classification is supported */ 1799 qede_get_ecore_tunn_params(filter_type, &type, &clss, str); 1800 if (clss == MAX_ECORE_TUNN_CLSS) { 1801 DP_ERR(edev, "Wrong filter type\n"); 1802 return -EINVAL; 1803 } 1804 /* Init tunnel ucast params */ 1805 rc = qede_set_ucast_tunn_cmn_param(&ucast, conf, type); 1806 if (rc != ECORE_SUCCESS) { 1807 DP_ERR(edev, "Unsupported VxLAN filter type 0x%x\n", 1808 conf->filter_type); 1809 return rc; 1810 } 1811 DP_INFO(edev, "Rule: \"%s\", op %d, type 0x%x\n", 1812 str, filter_op, ucast.type); 1813 switch (filter_op) { 1814 case RTE_ETH_FILTER_ADD: 1815 ucast.opcode = ECORE_FILTER_ADD; 1816 1817 /* Skip MAC/VLAN if filter is based on VNI */ 1818 if (!(filter_type & ETH_TUNNEL_FILTER_TENID)) { 1819 rc = qede_mac_int_ops(eth_dev, &ucast, 1); 1820 if (rc == 0) { 1821 /* Enable accept anyvlan */ 1822 qede_config_accept_any_vlan(qdev, true); 1823 } 1824 } else { 1825 rc = qede_ucast_filter(eth_dev, &ucast, 1); 1826 if (rc == 0) 1827 rc = ecore_filter_ucast_cmd(edev, &ucast, 1828 ECORE_SPQ_MODE_CB, NULL); 1829 } 1830 1831 if (rc != ECORE_SUCCESS) 1832 return rc; 1833 1834 qdev->vxlan_filter_type = filter_type; 1835 1836 DP_INFO(edev, "Enabling VXLAN tunneling\n"); 1837 qede_set_cmn_tunn_param(¶ms, clss, 1838 (1 << ECORE_MODE_VXLAN_TUNN), 1839 (1 << ECORE_MODE_VXLAN_TUNN)); 1840 for_each_hwfn(edev, i) { 1841 p_hwfn = &edev->hwfns[i]; 1842 rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, 1843 ¶ms, ECORE_SPQ_MODE_CB, NULL); 1844 if (rc != ECORE_SUCCESS) { 1845 DP_ERR(edev, "Failed to update tunn_clss %u\n", 1846 params.tunn_clss_vxlan); 1847 } 1848 } 1849 qdev->num_tunn_filters++; /* Filter added successfully */ 1850 break; 1851 case RTE_ETH_FILTER_DELETE: 1852 ucast.opcode = ECORE_FILTER_REMOVE; 1853 1854 if (!(filter_type & ETH_TUNNEL_FILTER_TENID)) { 1855 rc = qede_mac_int_ops(eth_dev, &ucast, 0); 1856 } else { 1857 rc = qede_ucast_filter(eth_dev, &ucast, 0); 1858 if (rc == 0) 1859 rc = ecore_filter_ucast_cmd(edev, &ucast, 1860 ECORE_SPQ_MODE_CB, NULL); 1861 } 1862 if (rc != ECORE_SUCCESS) 1863 return rc; 1864 1865 qdev->vxlan_filter_type = filter_type; 1866 qdev->num_tunn_filters--; 1867 1868 /* Disable VXLAN if VXLAN filters become 0 */ 1869 if (qdev->num_tunn_filters == 0) { 1870 DP_INFO(edev, "Disabling VXLAN tunneling\n"); 1871 1872 /* Use 0 as tunnel mode */ 1873 qede_set_cmn_tunn_param(¶ms, clss, 0, 1874 (1 << ECORE_MODE_VXLAN_TUNN)); 1875 for_each_hwfn(edev, i) { 1876 p_hwfn = &edev->hwfns[i]; 1877 rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, 1878 ¶ms, ECORE_SPQ_MODE_CB, NULL); 1879 if (rc != ECORE_SUCCESS) { 1880 DP_ERR(edev, 1881 "Failed to update tunn_clss %u\n", 1882 params.tunn_clss_vxlan); 1883 break; 1884 } 1885 } 1886 } 1887 break; 1888 default: 1889 DP_ERR(edev, "Unsupported operation %d\n", filter_op); 1890 return -EINVAL; 1891 } 1892 DP_INFO(edev, "Current VXLAN filters %d\n", qdev->num_tunn_filters); 1893 1894 return 0; 1895 } 1896 1897 int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev, 1898 enum rte_filter_type filter_type, 1899 enum rte_filter_op filter_op, 1900 void *arg) 1901 { 1902 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1903 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1904 struct rte_eth_tunnel_filter_conf *filter_conf = 1905 (struct rte_eth_tunnel_filter_conf *)arg; 1906 1907 switch (filter_type) { 1908 case RTE_ETH_FILTER_TUNNEL: 1909 switch (filter_conf->tunnel_type) { 1910 case RTE_TUNNEL_TYPE_VXLAN: 1911 DP_INFO(edev, 1912 "Packet steering to the specified Rx queue" 1913 " is not supported with VXLAN tunneling"); 1914 return(qede_vxlan_tunn_config(eth_dev, filter_op, 1915 filter_conf)); 1916 /* Place holders for future tunneling support */ 1917 case RTE_TUNNEL_TYPE_GENEVE: 1918 case RTE_TUNNEL_TYPE_TEREDO: 1919 case RTE_TUNNEL_TYPE_NVGRE: 1920 case RTE_TUNNEL_TYPE_IP_IN_GRE: 1921 case RTE_L2_TUNNEL_TYPE_E_TAG: 1922 DP_ERR(edev, "Unsupported tunnel type %d\n", 1923 filter_conf->tunnel_type); 1924 return -EINVAL; 1925 case RTE_TUNNEL_TYPE_NONE: 1926 default: 1927 return 0; 1928 } 1929 break; 1930 case RTE_ETH_FILTER_FDIR: 1931 case RTE_ETH_FILTER_MACVLAN: 1932 case RTE_ETH_FILTER_ETHERTYPE: 1933 case RTE_ETH_FILTER_FLEXIBLE: 1934 case RTE_ETH_FILTER_SYN: 1935 case RTE_ETH_FILTER_NTUPLE: 1936 case RTE_ETH_FILTER_HASH: 1937 case RTE_ETH_FILTER_L2_TUNNEL: 1938 case RTE_ETH_FILTER_MAX: 1939 default: 1940 DP_ERR(edev, "Unsupported filter type %d\n", 1941 filter_type); 1942 return -EINVAL; 1943 } 1944 1945 return 0; 1946 } 1947 1948 static const struct eth_dev_ops qede_eth_dev_ops = { 1949 .dev_configure = qede_dev_configure, 1950 .dev_infos_get = qede_dev_info_get, 1951 .rx_queue_setup = qede_rx_queue_setup, 1952 .rx_queue_release = qede_rx_queue_release, 1953 .tx_queue_setup = qede_tx_queue_setup, 1954 .tx_queue_release = qede_tx_queue_release, 1955 .dev_start = qede_dev_start, 1956 .dev_set_link_up = qede_dev_set_link_up, 1957 .dev_set_link_down = qede_dev_set_link_down, 1958 .link_update = qede_link_update, 1959 .promiscuous_enable = qede_promiscuous_enable, 1960 .promiscuous_disable = qede_promiscuous_disable, 1961 .allmulticast_enable = qede_allmulticast_enable, 1962 .allmulticast_disable = qede_allmulticast_disable, 1963 .dev_stop = qede_dev_stop, 1964 .dev_close = qede_dev_close, 1965 .stats_get = qede_get_stats, 1966 .stats_reset = qede_reset_stats, 1967 .xstats_get = qede_get_xstats, 1968 .xstats_reset = qede_reset_xstats, 1969 .xstats_get_names = qede_get_xstats_names, 1970 .mac_addr_add = qede_mac_addr_add, 1971 .mac_addr_remove = qede_mac_addr_remove, 1972 .mac_addr_set = qede_mac_addr_set, 1973 .vlan_offload_set = qede_vlan_offload_set, 1974 .vlan_filter_set = qede_vlan_filter_set, 1975 .flow_ctrl_set = qede_flow_ctrl_set, 1976 .flow_ctrl_get = qede_flow_ctrl_get, 1977 .dev_supported_ptypes_get = qede_dev_supported_ptypes_get, 1978 .rss_hash_update = qede_rss_hash_update, 1979 .rss_hash_conf_get = qede_rss_hash_conf_get, 1980 .reta_update = qede_rss_reta_update, 1981 .reta_query = qede_rss_reta_query, 1982 .mtu_set = qede_set_mtu, 1983 .filter_ctrl = qede_dev_filter_ctrl, 1984 .udp_tunnel_port_add = qede_udp_dst_port_add, 1985 .udp_tunnel_port_del = qede_udp_dst_port_del, 1986 }; 1987 1988 static const struct eth_dev_ops qede_eth_vf_dev_ops = { 1989 .dev_configure = qede_dev_configure, 1990 .dev_infos_get = qede_dev_info_get, 1991 .rx_queue_setup = qede_rx_queue_setup, 1992 .rx_queue_release = qede_rx_queue_release, 1993 .tx_queue_setup = qede_tx_queue_setup, 1994 .tx_queue_release = qede_tx_queue_release, 1995 .dev_start = qede_dev_start, 1996 .dev_set_link_up = qede_dev_set_link_up, 1997 .dev_set_link_down = qede_dev_set_link_down, 1998 .link_update = qede_link_update, 1999 .promiscuous_enable = qede_promiscuous_enable, 2000 .promiscuous_disable = qede_promiscuous_disable, 2001 .allmulticast_enable = qede_allmulticast_enable, 2002 .allmulticast_disable = qede_allmulticast_disable, 2003 .dev_stop = qede_dev_stop, 2004 .dev_close = qede_dev_close, 2005 .stats_get = qede_get_stats, 2006 .stats_reset = qede_reset_stats, 2007 .xstats_get = qede_get_xstats, 2008 .xstats_reset = qede_reset_xstats, 2009 .xstats_get_names = qede_get_xstats_names, 2010 .vlan_offload_set = qede_vlan_offload_set, 2011 .vlan_filter_set = qede_vlan_filter_set, 2012 .dev_supported_ptypes_get = qede_dev_supported_ptypes_get, 2013 .rss_hash_update = qede_rss_hash_update, 2014 .rss_hash_conf_get = qede_rss_hash_conf_get, 2015 .reta_update = qede_rss_reta_update, 2016 .reta_query = qede_rss_reta_query, 2017 .mtu_set = qede_set_mtu, 2018 }; 2019 2020 static void qede_update_pf_params(struct ecore_dev *edev) 2021 { 2022 struct ecore_pf_params pf_params; 2023 /* 32 rx + 32 tx */ 2024 memset(&pf_params, 0, sizeof(struct ecore_pf_params)); 2025 pf_params.eth_pf_params.num_cons = 64; 2026 qed_ops->common->update_pf_params(edev, &pf_params); 2027 } 2028 2029 static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) 2030 { 2031 struct rte_pci_device *pci_dev; 2032 struct rte_pci_addr pci_addr; 2033 struct qede_dev *adapter; 2034 struct ecore_dev *edev; 2035 struct qed_dev_eth_info dev_info; 2036 struct qed_slowpath_params params; 2037 static bool do_once = true; 2038 uint8_t bulletin_change; 2039 uint8_t vf_mac[ETHER_ADDR_LEN]; 2040 uint8_t is_mac_forced; 2041 bool is_mac_exist; 2042 /* Fix up ecore debug level */ 2043 uint32_t dp_module = ~0 & ~ECORE_MSG_HW; 2044 uint8_t dp_level = ECORE_LEVEL_VERBOSE; 2045 uint32_t max_mac_addrs; 2046 int rc; 2047 2048 /* Extract key data structures */ 2049 adapter = eth_dev->data->dev_private; 2050 edev = &adapter->edev; 2051 pci_dev = RTE_DEV_TO_PCI(eth_dev->device); 2052 pci_addr = pci_dev->addr; 2053 2054 PMD_INIT_FUNC_TRACE(edev); 2055 2056 snprintf(edev->name, NAME_SIZE, PCI_SHORT_PRI_FMT ":dpdk-port-%u", 2057 pci_addr.bus, pci_addr.devid, pci_addr.function, 2058 eth_dev->data->port_id); 2059 2060 eth_dev->rx_pkt_burst = qede_recv_pkts; 2061 eth_dev->tx_pkt_burst = qede_xmit_pkts; 2062 2063 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 2064 DP_NOTICE(edev, false, 2065 "Skipping device init from secondary process\n"); 2066 return 0; 2067 } 2068 2069 rte_eth_copy_pci_info(eth_dev, pci_dev); 2070 2071 qed_ops = qed_get_eth_ops(); 2072 if (!qed_ops) { 2073 DP_ERR(edev, "Failed to get qed_eth_ops_pass\n"); 2074 return -EINVAL; 2075 } 2076 2077 DP_INFO(edev, "Starting qede probe\n"); 2078 2079 rc = qed_ops->common->probe(edev, pci_dev, QED_PROTOCOL_ETH, 2080 dp_module, dp_level, is_vf); 2081 2082 if (rc != 0) { 2083 DP_ERR(edev, "qede probe failed rc %d\n", rc); 2084 return -ENODEV; 2085 } 2086 2087 qede_update_pf_params(edev); 2088 2089 rte_intr_callback_register(&pci_dev->intr_handle, 2090 qede_interrupt_handler, (void *)eth_dev); 2091 2092 if (rte_intr_enable(&pci_dev->intr_handle)) { 2093 DP_ERR(edev, "rte_intr_enable() failed\n"); 2094 return -ENODEV; 2095 } 2096 2097 /* Start the Slowpath-process */ 2098 memset(¶ms, 0, sizeof(struct qed_slowpath_params)); 2099 params.int_mode = ECORE_INT_MODE_MSIX; 2100 params.drv_major = QEDE_PMD_VERSION_MAJOR; 2101 params.drv_minor = QEDE_PMD_VERSION_MINOR; 2102 params.drv_rev = QEDE_PMD_VERSION_REVISION; 2103 params.drv_eng = QEDE_PMD_VERSION_PATCH; 2104 strncpy((char *)params.name, QEDE_PMD_VER_PREFIX, 2105 QEDE_PMD_DRV_VER_STR_SIZE); 2106 2107 /* For CMT mode device do periodic polling for slowpath events. 2108 * This is required since uio device uses only one MSI-x 2109 * interrupt vector but we need one for each engine. 2110 */ 2111 if (edev->num_hwfns > 1 && IS_PF(edev)) { 2112 rc = rte_eal_alarm_set(timer_period * US_PER_S, 2113 qede_poll_sp_sb_cb, 2114 (void *)eth_dev); 2115 if (rc != 0) { 2116 DP_ERR(edev, "Unable to start periodic" 2117 " timer rc %d\n", rc); 2118 return -EINVAL; 2119 } 2120 } 2121 2122 rc = qed_ops->common->slowpath_start(edev, ¶ms); 2123 if (rc) { 2124 DP_ERR(edev, "Cannot start slowpath rc = %d\n", rc); 2125 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, 2126 (void *)eth_dev); 2127 return -ENODEV; 2128 } 2129 2130 rc = qed_ops->fill_dev_info(edev, &dev_info); 2131 if (rc) { 2132 DP_ERR(edev, "Cannot get device_info rc %d\n", rc); 2133 qed_ops->common->slowpath_stop(edev); 2134 qed_ops->common->remove(edev); 2135 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, 2136 (void *)eth_dev); 2137 return -ENODEV; 2138 } 2139 2140 qede_alloc_etherdev(adapter, &dev_info); 2141 2142 adapter->ops->common->set_id(edev, edev->name, QEDE_PMD_VERSION); 2143 2144 if (!is_vf) 2145 adapter->dev_info.num_mac_addrs = 2146 (uint32_t)RESC_NUM(ECORE_LEADING_HWFN(edev), 2147 ECORE_MAC); 2148 else 2149 ecore_vf_get_num_mac_filters(ECORE_LEADING_HWFN(edev), 2150 &adapter->dev_info.num_mac_addrs); 2151 2152 /* Allocate memory for storing MAC addr */ 2153 eth_dev->data->mac_addrs = rte_zmalloc(edev->name, 2154 (ETHER_ADDR_LEN * 2155 adapter->dev_info.num_mac_addrs), 2156 RTE_CACHE_LINE_SIZE); 2157 2158 if (eth_dev->data->mac_addrs == NULL) { 2159 DP_ERR(edev, "Failed to allocate MAC address\n"); 2160 qed_ops->common->slowpath_stop(edev); 2161 qed_ops->common->remove(edev); 2162 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, 2163 (void *)eth_dev); 2164 return -ENOMEM; 2165 } 2166 2167 if (!is_vf) { 2168 ether_addr_copy((struct ether_addr *)edev->hwfns[0]. 2169 hw_info.hw_mac_addr, 2170 ð_dev->data->mac_addrs[0]); 2171 ether_addr_copy(ð_dev->data->mac_addrs[0], 2172 &adapter->primary_mac); 2173 } else { 2174 ecore_vf_read_bulletin(ECORE_LEADING_HWFN(edev), 2175 &bulletin_change); 2176 if (bulletin_change) { 2177 is_mac_exist = 2178 ecore_vf_bulletin_get_forced_mac( 2179 ECORE_LEADING_HWFN(edev), 2180 vf_mac, 2181 &is_mac_forced); 2182 if (is_mac_exist && is_mac_forced) { 2183 DP_INFO(edev, "VF macaddr received from PF\n"); 2184 ether_addr_copy((struct ether_addr *)&vf_mac, 2185 ð_dev->data->mac_addrs[0]); 2186 ether_addr_copy(ð_dev->data->mac_addrs[0], 2187 &adapter->primary_mac); 2188 } else { 2189 DP_NOTICE(edev, false, 2190 "No VF macaddr assigned\n"); 2191 } 2192 } 2193 } 2194 2195 eth_dev->dev_ops = (is_vf) ? &qede_eth_vf_dev_ops : &qede_eth_dev_ops; 2196 2197 if (do_once) { 2198 qede_print_adapter_info(adapter); 2199 do_once = false; 2200 } 2201 2202 adapter->state = QEDE_DEV_INIT; 2203 2204 DP_NOTICE(edev, false, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n", 2205 adapter->primary_mac.addr_bytes[0], 2206 adapter->primary_mac.addr_bytes[1], 2207 adapter->primary_mac.addr_bytes[2], 2208 adapter->primary_mac.addr_bytes[3], 2209 adapter->primary_mac.addr_bytes[4], 2210 adapter->primary_mac.addr_bytes[5]); 2211 2212 return rc; 2213 } 2214 2215 static int qedevf_eth_dev_init(struct rte_eth_dev *eth_dev) 2216 { 2217 return qede_common_dev_init(eth_dev, 1); 2218 } 2219 2220 static int qede_eth_dev_init(struct rte_eth_dev *eth_dev) 2221 { 2222 return qede_common_dev_init(eth_dev, 0); 2223 } 2224 2225 static int qede_dev_common_uninit(struct rte_eth_dev *eth_dev) 2226 { 2227 /* only uninitialize in the primary process */ 2228 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 2229 return 0; 2230 2231 /* safe to close dev here */ 2232 qede_dev_close(eth_dev); 2233 2234 eth_dev->dev_ops = NULL; 2235 eth_dev->rx_pkt_burst = NULL; 2236 eth_dev->tx_pkt_burst = NULL; 2237 2238 if (eth_dev->data->mac_addrs) 2239 rte_free(eth_dev->data->mac_addrs); 2240 2241 eth_dev->data->mac_addrs = NULL; 2242 2243 return 0; 2244 } 2245 2246 static int qede_eth_dev_uninit(struct rte_eth_dev *eth_dev) 2247 { 2248 return qede_dev_common_uninit(eth_dev); 2249 } 2250 2251 static int qedevf_eth_dev_uninit(struct rte_eth_dev *eth_dev) 2252 { 2253 return qede_dev_common_uninit(eth_dev); 2254 } 2255 2256 static struct rte_pci_id pci_id_qedevf_map[] = { 2257 #define QEDEVF_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev) 2258 { 2259 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_NX2_VF) 2260 }, 2261 { 2262 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_IOV) 2263 }, 2264 {.vendor_id = 0,} 2265 }; 2266 2267 static struct rte_pci_id pci_id_qede_map[] = { 2268 #define QEDE_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev) 2269 { 2270 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_NX2_57980E) 2271 }, 2272 { 2273 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_NX2_57980S) 2274 }, 2275 { 2276 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_40) 2277 }, 2278 { 2279 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_25) 2280 }, 2281 { 2282 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_100) 2283 }, 2284 {.vendor_id = 0,} 2285 }; 2286 2287 static struct eth_driver rte_qedevf_pmd = { 2288 .pci_drv = { 2289 .id_table = pci_id_qedevf_map, 2290 .drv_flags = 2291 RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 2292 .probe = rte_eth_dev_pci_probe, 2293 .remove = rte_eth_dev_pci_remove, 2294 }, 2295 .eth_dev_init = qedevf_eth_dev_init, 2296 .eth_dev_uninit = qedevf_eth_dev_uninit, 2297 .dev_private_size = sizeof(struct qede_dev), 2298 }; 2299 2300 static struct eth_driver rte_qede_pmd = { 2301 .pci_drv = { 2302 .id_table = pci_id_qede_map, 2303 .drv_flags = 2304 RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 2305 .probe = rte_eth_dev_pci_probe, 2306 .remove = rte_eth_dev_pci_remove, 2307 }, 2308 .eth_dev_init = qede_eth_dev_init, 2309 .eth_dev_uninit = qede_eth_dev_uninit, 2310 .dev_private_size = sizeof(struct qede_dev), 2311 }; 2312 2313 RTE_PMD_REGISTER_PCI(net_qede, rte_qede_pmd.pci_drv); 2314 RTE_PMD_REGISTER_PCI_TABLE(net_qede, pci_id_qede_map); 2315 RTE_PMD_REGISTER_KMOD_DEP(net_qede, "* igb_uio | uio_pci_generic | vfio"); 2316 RTE_PMD_REGISTER_PCI(net_qede_vf, rte_qedevf_pmd.pci_drv); 2317 RTE_PMD_REGISTER_PCI_TABLE(net_qede_vf, pci_id_qedevf_map); 2318 RTE_PMD_REGISTER_KMOD_DEP(net_qede_vf, "* igb_uio | vfio"); 2319