1 /* 2 * Copyright (c) 2016 QLogic Corporation. 3 * All rights reserved. 4 * www.qlogic.com 5 * 6 * See LICENSE.qede_pmd for copyright and licensing details. 7 */ 8 9 #include "qede_ethdev.h" 10 #include <rte_alarm.h> 11 #include <rte_version.h> 12 13 /* Globals */ 14 static const struct qed_eth_ops *qed_ops; 15 static int64_t timer_period = 1; 16 17 /* VXLAN tunnel classification mapping */ 18 const struct _qede_vxlan_tunn_types { 19 uint16_t rte_filter_type; 20 enum ecore_filter_ucast_type qede_type; 21 enum ecore_tunn_clss qede_tunn_clss; 22 const char *string; 23 } qede_tunn_types[] = { 24 { 25 ETH_TUNNEL_FILTER_OMAC, 26 ECORE_FILTER_MAC, 27 ECORE_TUNN_CLSS_MAC_VLAN, 28 "outer-mac" 29 }, 30 { 31 ETH_TUNNEL_FILTER_TENID, 32 ECORE_FILTER_VNI, 33 ECORE_TUNN_CLSS_MAC_VNI, 34 "vni" 35 }, 36 { 37 ETH_TUNNEL_FILTER_IMAC, 38 ECORE_FILTER_INNER_MAC, 39 ECORE_TUNN_CLSS_INNER_MAC_VLAN, 40 "inner-mac" 41 }, 42 { 43 ETH_TUNNEL_FILTER_IVLAN, 44 ECORE_FILTER_INNER_VLAN, 45 ECORE_TUNN_CLSS_INNER_MAC_VLAN, 46 "inner-vlan" 47 }, 48 { 49 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID, 50 ECORE_FILTER_MAC_VNI_PAIR, 51 ECORE_TUNN_CLSS_MAC_VNI, 52 "outer-mac and vni" 53 }, 54 { 55 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IMAC, 56 ECORE_FILTER_UNUSED, 57 MAX_ECORE_TUNN_CLSS, 58 "outer-mac and inner-mac" 59 }, 60 { 61 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IVLAN, 62 ECORE_FILTER_UNUSED, 63 MAX_ECORE_TUNN_CLSS, 64 "outer-mac and inner-vlan" 65 }, 66 { 67 ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IMAC, 68 ECORE_FILTER_INNER_MAC_VNI_PAIR, 69 ECORE_TUNN_CLSS_INNER_MAC_VNI, 70 "vni and inner-mac", 71 }, 72 { 73 ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IVLAN, 74 ECORE_FILTER_UNUSED, 75 MAX_ECORE_TUNN_CLSS, 76 "vni and inner-vlan", 77 }, 78 { 79 ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN, 80 ECORE_FILTER_INNER_PAIR, 81 ECORE_TUNN_CLSS_INNER_MAC_VLAN, 82 "inner-mac and inner-vlan", 83 }, 84 { 85 ETH_TUNNEL_FILTER_OIP, 86 ECORE_FILTER_UNUSED, 87 MAX_ECORE_TUNN_CLSS, 88 "outer-IP" 89 }, 90 { 91 ETH_TUNNEL_FILTER_IIP, 92 ECORE_FILTER_UNUSED, 93 MAX_ECORE_TUNN_CLSS, 94 "inner-IP" 95 }, 96 { 97 RTE_TUNNEL_FILTER_IMAC_IVLAN, 98 ECORE_FILTER_UNUSED, 99 MAX_ECORE_TUNN_CLSS, 100 "IMAC_IVLAN" 101 }, 102 { 103 RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID, 104 ECORE_FILTER_UNUSED, 105 MAX_ECORE_TUNN_CLSS, 106 "IMAC_IVLAN_TENID" 107 }, 108 { 109 RTE_TUNNEL_FILTER_IMAC_TENID, 110 ECORE_FILTER_UNUSED, 111 MAX_ECORE_TUNN_CLSS, 112 "IMAC_TENID" 113 }, 114 { 115 RTE_TUNNEL_FILTER_OMAC_TENID_IMAC, 116 ECORE_FILTER_UNUSED, 117 MAX_ECORE_TUNN_CLSS, 118 "OMAC_TENID_IMAC" 119 }, 120 }; 121 122 struct rte_qede_xstats_name_off { 123 char name[RTE_ETH_XSTATS_NAME_SIZE]; 124 uint64_t offset; 125 }; 126 127 static const struct rte_qede_xstats_name_off qede_xstats_strings[] = { 128 {"rx_unicast_bytes", offsetof(struct ecore_eth_stats, rx_ucast_bytes)}, 129 {"rx_multicast_bytes", 130 offsetof(struct ecore_eth_stats, rx_mcast_bytes)}, 131 {"rx_broadcast_bytes", 132 offsetof(struct ecore_eth_stats, rx_bcast_bytes)}, 133 {"rx_unicast_packets", offsetof(struct ecore_eth_stats, rx_ucast_pkts)}, 134 {"rx_multicast_packets", 135 offsetof(struct ecore_eth_stats, rx_mcast_pkts)}, 136 {"rx_broadcast_packets", 137 offsetof(struct ecore_eth_stats, rx_bcast_pkts)}, 138 139 {"tx_unicast_bytes", offsetof(struct ecore_eth_stats, tx_ucast_bytes)}, 140 {"tx_multicast_bytes", 141 offsetof(struct ecore_eth_stats, tx_mcast_bytes)}, 142 {"tx_broadcast_bytes", 143 offsetof(struct ecore_eth_stats, tx_bcast_bytes)}, 144 {"tx_unicast_packets", offsetof(struct ecore_eth_stats, tx_ucast_pkts)}, 145 {"tx_multicast_packets", 146 offsetof(struct ecore_eth_stats, tx_mcast_pkts)}, 147 {"tx_broadcast_packets", 148 offsetof(struct ecore_eth_stats, tx_bcast_pkts)}, 149 150 {"rx_64_byte_packets", 151 offsetof(struct ecore_eth_stats, rx_64_byte_packets)}, 152 {"rx_65_to_127_byte_packets", 153 offsetof(struct ecore_eth_stats, rx_65_to_127_byte_packets)}, 154 {"rx_128_to_255_byte_packets", 155 offsetof(struct ecore_eth_stats, rx_128_to_255_byte_packets)}, 156 {"rx_256_to_511_byte_packets", 157 offsetof(struct ecore_eth_stats, rx_256_to_511_byte_packets)}, 158 {"rx_512_to_1023_byte_packets", 159 offsetof(struct ecore_eth_stats, rx_512_to_1023_byte_packets)}, 160 {"rx_1024_to_1518_byte_packets", 161 offsetof(struct ecore_eth_stats, rx_1024_to_1518_byte_packets)}, 162 {"rx_1519_to_1522_byte_packets", 163 offsetof(struct ecore_eth_stats, rx_1519_to_1522_byte_packets)}, 164 {"rx_1519_to_2047_byte_packets", 165 offsetof(struct ecore_eth_stats, rx_1519_to_2047_byte_packets)}, 166 {"rx_2048_to_4095_byte_packets", 167 offsetof(struct ecore_eth_stats, rx_2048_to_4095_byte_packets)}, 168 {"rx_4096_to_9216_byte_packets", 169 offsetof(struct ecore_eth_stats, rx_4096_to_9216_byte_packets)}, 170 {"rx_9217_to_16383_byte_packets", 171 offsetof(struct ecore_eth_stats, 172 rx_9217_to_16383_byte_packets)}, 173 {"tx_64_byte_packets", 174 offsetof(struct ecore_eth_stats, tx_64_byte_packets)}, 175 {"tx_65_to_127_byte_packets", 176 offsetof(struct ecore_eth_stats, tx_65_to_127_byte_packets)}, 177 {"tx_128_to_255_byte_packets", 178 offsetof(struct ecore_eth_stats, tx_128_to_255_byte_packets)}, 179 {"tx_256_to_511_byte_packets", 180 offsetof(struct ecore_eth_stats, tx_256_to_511_byte_packets)}, 181 {"tx_512_to_1023_byte_packets", 182 offsetof(struct ecore_eth_stats, tx_512_to_1023_byte_packets)}, 183 {"tx_1024_to_1518_byte_packets", 184 offsetof(struct ecore_eth_stats, tx_1024_to_1518_byte_packets)}, 185 {"trx_1519_to_1522_byte_packets", 186 offsetof(struct ecore_eth_stats, tx_1519_to_2047_byte_packets)}, 187 {"tx_2048_to_4095_byte_packets", 188 offsetof(struct ecore_eth_stats, tx_2048_to_4095_byte_packets)}, 189 {"tx_4096_to_9216_byte_packets", 190 offsetof(struct ecore_eth_stats, tx_4096_to_9216_byte_packets)}, 191 {"tx_9217_to_16383_byte_packets", 192 offsetof(struct ecore_eth_stats, 193 tx_9217_to_16383_byte_packets)}, 194 195 {"rx_mac_crtl_frames", 196 offsetof(struct ecore_eth_stats, rx_mac_crtl_frames)}, 197 {"tx_mac_control_frames", 198 offsetof(struct ecore_eth_stats, tx_mac_ctrl_frames)}, 199 {"rx_pause_frames", offsetof(struct ecore_eth_stats, rx_pause_frames)}, 200 {"tx_pause_frames", offsetof(struct ecore_eth_stats, tx_pause_frames)}, 201 {"rx_priority_flow_control_frames", 202 offsetof(struct ecore_eth_stats, rx_pfc_frames)}, 203 {"tx_priority_flow_control_frames", 204 offsetof(struct ecore_eth_stats, tx_pfc_frames)}, 205 206 {"rx_crc_errors", offsetof(struct ecore_eth_stats, rx_crc_errors)}, 207 {"rx_align_errors", offsetof(struct ecore_eth_stats, rx_align_errors)}, 208 {"rx_carrier_errors", 209 offsetof(struct ecore_eth_stats, rx_carrier_errors)}, 210 {"rx_oversize_packet_errors", 211 offsetof(struct ecore_eth_stats, rx_oversize_packets)}, 212 {"rx_jabber_errors", offsetof(struct ecore_eth_stats, rx_jabbers)}, 213 {"rx_undersize_packet_errors", 214 offsetof(struct ecore_eth_stats, rx_undersize_packets)}, 215 {"rx_fragments", offsetof(struct ecore_eth_stats, rx_fragments)}, 216 {"rx_host_buffer_not_available", 217 offsetof(struct ecore_eth_stats, no_buff_discards)}, 218 /* Number of packets discarded because they are bigger than MTU */ 219 {"rx_packet_too_big_discards", 220 offsetof(struct ecore_eth_stats, packet_too_big_discard)}, 221 {"rx_ttl_zero_discards", 222 offsetof(struct ecore_eth_stats, ttl0_discard)}, 223 {"rx_multi_function_tag_filter_discards", 224 offsetof(struct ecore_eth_stats, mftag_filter_discards)}, 225 {"rx_mac_filter_discards", 226 offsetof(struct ecore_eth_stats, mac_filter_discards)}, 227 {"rx_hw_buffer_truncates", 228 offsetof(struct ecore_eth_stats, brb_truncates)}, 229 {"rx_hw_buffer_discards", 230 offsetof(struct ecore_eth_stats, brb_discards)}, 231 {"tx_lpi_entry_count", 232 offsetof(struct ecore_eth_stats, tx_lpi_entry_count)}, 233 {"tx_total_collisions", 234 offsetof(struct ecore_eth_stats, tx_total_collisions)}, 235 {"tx_error_drop_packets", 236 offsetof(struct ecore_eth_stats, tx_err_drop_pkts)}, 237 238 {"rx_mac_bytes", offsetof(struct ecore_eth_stats, rx_mac_bytes)}, 239 {"rx_mac_unicast_packets", 240 offsetof(struct ecore_eth_stats, rx_mac_uc_packets)}, 241 {"rx_mac_multicast_packets", 242 offsetof(struct ecore_eth_stats, rx_mac_mc_packets)}, 243 {"rx_mac_broadcast_packets", 244 offsetof(struct ecore_eth_stats, rx_mac_bc_packets)}, 245 {"rx_mac_frames_ok", 246 offsetof(struct ecore_eth_stats, rx_mac_frames_ok)}, 247 {"tx_mac_bytes", offsetof(struct ecore_eth_stats, tx_mac_bytes)}, 248 {"tx_mac_unicast_packets", 249 offsetof(struct ecore_eth_stats, tx_mac_uc_packets)}, 250 {"tx_mac_multicast_packets", 251 offsetof(struct ecore_eth_stats, tx_mac_mc_packets)}, 252 {"tx_mac_broadcast_packets", 253 offsetof(struct ecore_eth_stats, tx_mac_bc_packets)}, 254 255 {"lro_coalesced_packets", 256 offsetof(struct ecore_eth_stats, tpa_coalesced_pkts)}, 257 {"lro_coalesced_events", 258 offsetof(struct ecore_eth_stats, tpa_coalesced_events)}, 259 {"lro_aborts_num", 260 offsetof(struct ecore_eth_stats, tpa_aborts_num)}, 261 {"lro_not_coalesced_packets", 262 offsetof(struct ecore_eth_stats, tpa_not_coalesced_pkts)}, 263 {"lro_coalesced_bytes", 264 offsetof(struct ecore_eth_stats, tpa_coalesced_bytes)}, 265 }; 266 267 static const struct rte_qede_xstats_name_off qede_rxq_xstats_strings[] = { 268 {"rx_q_segments", 269 offsetof(struct qede_rx_queue, rx_segs)}, 270 {"rx_q_hw_errors", 271 offsetof(struct qede_rx_queue, rx_hw_errors)}, 272 {"rx_q_allocation_errors", 273 offsetof(struct qede_rx_queue, rx_alloc_errors)} 274 }; 275 276 static void qede_interrupt_action(struct ecore_hwfn *p_hwfn) 277 { 278 ecore_int_sp_dpc((osal_int_ptr_t)(p_hwfn)); 279 } 280 281 static void 282 qede_interrupt_handler(struct rte_intr_handle *handle, void *param) 283 { 284 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; 285 struct qede_dev *qdev = eth_dev->data->dev_private; 286 struct ecore_dev *edev = &qdev->edev; 287 288 qede_interrupt_action(ECORE_LEADING_HWFN(edev)); 289 if (rte_intr_enable(handle)) 290 DP_ERR(edev, "rte_intr_enable failed\n"); 291 } 292 293 static void 294 qede_alloc_etherdev(struct qede_dev *qdev, struct qed_dev_eth_info *info) 295 { 296 rte_memcpy(&qdev->dev_info, info, sizeof(*info)); 297 qdev->num_tc = qdev->dev_info.num_tc; 298 qdev->ops = qed_ops; 299 } 300 301 static void qede_print_adapter_info(struct qede_dev *qdev) 302 { 303 struct ecore_dev *edev = &qdev->edev; 304 struct qed_dev_info *info = &qdev->dev_info.common; 305 static char drv_ver[QEDE_PMD_DRV_VER_STR_SIZE]; 306 static char ver_str[QEDE_PMD_DRV_VER_STR_SIZE]; 307 308 DP_INFO(edev, "*********************************\n"); 309 DP_INFO(edev, " DPDK version:%s\n", rte_version()); 310 DP_INFO(edev, " Chip details : %s%d\n", 311 ECORE_IS_BB(edev) ? "BB" : "AH", 312 CHIP_REV_IS_A0(edev) ? 0 : 1); 313 snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%d.%d.%d.%d", 314 info->fw_major, info->fw_minor, info->fw_rev, info->fw_eng); 315 snprintf(drv_ver, QEDE_PMD_DRV_VER_STR_SIZE, "%s_%s", 316 ver_str, QEDE_PMD_VERSION); 317 DP_INFO(edev, " Driver version : %s\n", drv_ver); 318 DP_INFO(edev, " Firmware version : %s\n", ver_str); 319 320 snprintf(ver_str, MCP_DRV_VER_STR_SIZE, 321 "%d.%d.%d.%d", 322 (info->mfw_rev >> 24) & 0xff, 323 (info->mfw_rev >> 16) & 0xff, 324 (info->mfw_rev >> 8) & 0xff, (info->mfw_rev) & 0xff); 325 DP_INFO(edev, " Management Firmware version : %s\n", ver_str); 326 DP_INFO(edev, " Firmware file : %s\n", fw_file); 327 DP_INFO(edev, "*********************************\n"); 328 } 329 330 static void qede_set_ucast_cmn_params(struct ecore_filter_ucast *ucast) 331 { 332 memset(ucast, 0, sizeof(struct ecore_filter_ucast)); 333 ucast->is_rx_filter = true; 334 ucast->is_tx_filter = true; 335 /* ucast->assert_on_error = true; - For debug */ 336 } 337 338 static void qede_set_cmn_tunn_param(struct ecore_tunn_update_params *params, 339 uint8_t clss, uint64_t mode, uint64_t mask) 340 { 341 memset(params, 0, sizeof(struct ecore_tunn_update_params)); 342 params->tunn_mode = mode; 343 params->tunn_mode_update_mask = mask; 344 params->update_tx_pf_clss = 1; 345 params->update_rx_pf_clss = 1; 346 params->tunn_clss_vxlan = clss; 347 } 348 349 static int 350 qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast, 351 bool add) 352 { 353 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 354 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 355 struct qede_ucast_entry *tmp = NULL; 356 struct qede_ucast_entry *u; 357 struct ether_addr *mac_addr; 358 359 mac_addr = (struct ether_addr *)ucast->mac; 360 if (add) { 361 SLIST_FOREACH(tmp, &qdev->uc_list_head, list) { 362 if ((memcmp(mac_addr, &tmp->mac, 363 ETHER_ADDR_LEN) == 0) && 364 ucast->vlan == tmp->vlan) { 365 DP_ERR(edev, "Unicast MAC is already added" 366 " with vlan = %u, vni = %u\n", 367 ucast->vlan, ucast->vni); 368 return -EEXIST; 369 } 370 } 371 u = rte_malloc(NULL, sizeof(struct qede_ucast_entry), 372 RTE_CACHE_LINE_SIZE); 373 if (!u) { 374 DP_ERR(edev, "Did not allocate memory for ucast\n"); 375 return -ENOMEM; 376 } 377 ether_addr_copy(mac_addr, &u->mac); 378 u->vlan = ucast->vlan; 379 u->vni = ucast->vni; 380 SLIST_INSERT_HEAD(&qdev->uc_list_head, u, list); 381 qdev->num_uc_addr++; 382 } else { 383 SLIST_FOREACH(tmp, &qdev->uc_list_head, list) { 384 if ((memcmp(mac_addr, &tmp->mac, 385 ETHER_ADDR_LEN) == 0) && 386 ucast->vlan == tmp->vlan && 387 ucast->vni == tmp->vni) 388 break; 389 } 390 if (tmp == NULL) { 391 DP_INFO(edev, "Unicast MAC is not found\n"); 392 return -EINVAL; 393 } 394 SLIST_REMOVE(&qdev->uc_list_head, tmp, qede_ucast_entry, list); 395 qdev->num_uc_addr--; 396 } 397 398 return 0; 399 } 400 401 static int 402 qede_mcast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *mcast, 403 bool add) 404 { 405 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 406 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 407 struct ether_addr *mac_addr; 408 struct qede_mcast_entry *tmp = NULL; 409 struct qede_mcast_entry *m; 410 411 mac_addr = (struct ether_addr *)mcast->mac; 412 if (add) { 413 SLIST_FOREACH(tmp, &qdev->mc_list_head, list) { 414 if (memcmp(mac_addr, &tmp->mac, ETHER_ADDR_LEN) == 0) { 415 DP_ERR(edev, 416 "Multicast MAC is already added\n"); 417 return -EEXIST; 418 } 419 } 420 m = rte_malloc(NULL, sizeof(struct qede_mcast_entry), 421 RTE_CACHE_LINE_SIZE); 422 if (!m) { 423 DP_ERR(edev, 424 "Did not allocate memory for mcast\n"); 425 return -ENOMEM; 426 } 427 ether_addr_copy(mac_addr, &m->mac); 428 SLIST_INSERT_HEAD(&qdev->mc_list_head, m, list); 429 qdev->num_mc_addr++; 430 } else { 431 SLIST_FOREACH(tmp, &qdev->mc_list_head, list) { 432 if (memcmp(mac_addr, &tmp->mac, ETHER_ADDR_LEN) == 0) 433 break; 434 } 435 if (tmp == NULL) { 436 DP_INFO(edev, "Multicast mac is not found\n"); 437 return -EINVAL; 438 } 439 SLIST_REMOVE(&qdev->mc_list_head, tmp, 440 qede_mcast_entry, list); 441 qdev->num_mc_addr--; 442 } 443 444 return 0; 445 } 446 447 static enum _ecore_status_t 448 qede_mac_int_ops(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast, 449 bool add) 450 { 451 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 452 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 453 enum _ecore_status_t rc; 454 struct ecore_filter_mcast mcast; 455 struct qede_mcast_entry *tmp; 456 uint16_t j = 0; 457 458 /* Multicast */ 459 if (is_multicast_ether_addr((struct ether_addr *)ucast->mac)) { 460 if (add) { 461 if (qdev->num_mc_addr >= ECORE_MAX_MC_ADDRS) { 462 DP_ERR(edev, 463 "Mcast filter table limit exceeded, " 464 "Please enable mcast promisc mode\n"); 465 return -ECORE_INVAL; 466 } 467 } 468 rc = qede_mcast_filter(eth_dev, ucast, add); 469 if (rc == 0) { 470 DP_INFO(edev, "num_mc_addrs = %u\n", qdev->num_mc_addr); 471 memset(&mcast, 0, sizeof(mcast)); 472 mcast.num_mc_addrs = qdev->num_mc_addr; 473 mcast.opcode = ECORE_FILTER_ADD; 474 SLIST_FOREACH(tmp, &qdev->mc_list_head, list) { 475 ether_addr_copy(&tmp->mac, 476 (struct ether_addr *)&mcast.mac[j]); 477 j++; 478 } 479 rc = ecore_filter_mcast_cmd(edev, &mcast, 480 ECORE_SPQ_MODE_CB, NULL); 481 } 482 if (rc != ECORE_SUCCESS) { 483 DP_ERR(edev, "Failed to add multicast filter" 484 " rc = %d, op = %d\n", rc, add); 485 } 486 } else { /* Unicast */ 487 if (add) { 488 if (qdev->num_uc_addr >= qdev->dev_info.num_mac_addrs) { 489 DP_ERR(edev, 490 "Ucast filter table limit exceeded," 491 " Please enable promisc mode\n"); 492 return -ECORE_INVAL; 493 } 494 } 495 rc = qede_ucast_filter(eth_dev, ucast, add); 496 if (rc == 0) 497 rc = ecore_filter_ucast_cmd(edev, ucast, 498 ECORE_SPQ_MODE_CB, NULL); 499 if (rc != ECORE_SUCCESS) { 500 DP_ERR(edev, "MAC filter failed, rc = %d, op = %d\n", 501 rc, add); 502 } 503 } 504 505 return rc; 506 } 507 508 static void 509 qede_mac_addr_add(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr, 510 uint32_t index, __rte_unused uint32_t pool) 511 { 512 struct ecore_filter_ucast ucast; 513 514 qede_set_ucast_cmn_params(&ucast); 515 ucast.type = ECORE_FILTER_MAC; 516 ether_addr_copy(mac_addr, (struct ether_addr *)&ucast.mac); 517 (void)qede_mac_int_ops(eth_dev, &ucast, 1); 518 } 519 520 static void 521 qede_mac_addr_remove(struct rte_eth_dev *eth_dev, uint32_t index) 522 { 523 struct qede_dev *qdev = eth_dev->data->dev_private; 524 struct ecore_dev *edev = &qdev->edev; 525 struct ether_addr mac_addr; 526 struct ecore_filter_ucast ucast; 527 int rc; 528 529 PMD_INIT_FUNC_TRACE(edev); 530 531 if (index >= qdev->dev_info.num_mac_addrs) { 532 DP_ERR(edev, "Index %u is above MAC filter limit %u\n", 533 index, qdev->dev_info.num_mac_addrs); 534 return; 535 } 536 537 qede_set_ucast_cmn_params(&ucast); 538 ucast.opcode = ECORE_FILTER_REMOVE; 539 ucast.type = ECORE_FILTER_MAC; 540 541 /* Use the index maintained by rte */ 542 ether_addr_copy(ð_dev->data->mac_addrs[index], 543 (struct ether_addr *)&ucast.mac); 544 545 ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, NULL); 546 } 547 548 static void 549 qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr) 550 { 551 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 552 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 553 struct ecore_filter_ucast ucast; 554 int rc; 555 556 if (IS_VF(edev) && !ecore_vf_check_mac(ECORE_LEADING_HWFN(edev), 557 mac_addr->addr_bytes)) { 558 DP_ERR(edev, "Setting MAC address is not allowed\n"); 559 ether_addr_copy(&qdev->primary_mac, 560 ð_dev->data->mac_addrs[0]); 561 return; 562 } 563 564 /* First remove the primary mac */ 565 qede_set_ucast_cmn_params(&ucast); 566 ucast.opcode = ECORE_FILTER_REMOVE; 567 ucast.type = ECORE_FILTER_MAC; 568 ether_addr_copy(&qdev->primary_mac, 569 (struct ether_addr *)&ucast.mac); 570 rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, NULL); 571 if (rc != 0) { 572 DP_ERR(edev, "Unable to remove current macaddr" 573 " Reverting to previous default mac\n"); 574 ether_addr_copy(&qdev->primary_mac, 575 ð_dev->data->mac_addrs[0]); 576 return; 577 } 578 579 /* Add new MAC */ 580 ucast.opcode = ECORE_FILTER_ADD; 581 ether_addr_copy(mac_addr, (struct ether_addr *)&ucast.mac); 582 rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, NULL); 583 if (rc != 0) 584 DP_ERR(edev, "Unable to add new default mac\n"); 585 else 586 ether_addr_copy(mac_addr, &qdev->primary_mac); 587 } 588 589 static void qede_config_accept_any_vlan(struct qede_dev *qdev, bool action) 590 { 591 struct ecore_dev *edev = &qdev->edev; 592 struct qed_update_vport_params params = { 593 .vport_id = 0, 594 .accept_any_vlan = action, 595 .update_accept_any_vlan_flg = 1, 596 }; 597 int rc; 598 599 /* Proceed only if action actually needs to be performed */ 600 if (qdev->accept_any_vlan == action) 601 return; 602 603 rc = qdev->ops->vport_update(edev, ¶ms); 604 if (rc) { 605 DP_ERR(edev, "Failed to %s accept-any-vlan\n", 606 action ? "enable" : "disable"); 607 } else { 608 DP_INFO(edev, "%s accept-any-vlan\n", 609 action ? "enabled" : "disabled"); 610 qdev->accept_any_vlan = action; 611 } 612 } 613 614 static int qede_vlan_stripping(struct rte_eth_dev *eth_dev, bool set_stripping) 615 { 616 struct qed_update_vport_params vport_update_params; 617 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 618 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 619 int rc; 620 621 memset(&vport_update_params, 0, sizeof(vport_update_params)); 622 vport_update_params.vport_id = 0; 623 vport_update_params.update_inner_vlan_removal_flg = 1; 624 vport_update_params.inner_vlan_removal_flg = set_stripping; 625 rc = qdev->ops->vport_update(edev, &vport_update_params); 626 if (rc) { 627 DP_ERR(edev, "Update V-PORT failed %d\n", rc); 628 return rc; 629 } 630 631 return 0; 632 } 633 634 static void qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask) 635 { 636 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 637 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 638 struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode; 639 640 if (mask & ETH_VLAN_STRIP_MASK) { 641 if (rxmode->hw_vlan_strip) 642 (void)qede_vlan_stripping(eth_dev, 1); 643 else 644 (void)qede_vlan_stripping(eth_dev, 0); 645 } 646 647 if (mask & ETH_VLAN_FILTER_MASK) { 648 /* VLAN filtering kicks in when a VLAN is added */ 649 if (rxmode->hw_vlan_filter) { 650 qede_vlan_filter_set(eth_dev, 0, 1); 651 } else { 652 if (qdev->configured_vlans > 1) { /* Excluding VLAN0 */ 653 DP_ERR(edev, 654 " Please remove existing VLAN filters" 655 " before disabling VLAN filtering\n"); 656 /* Signal app that VLAN filtering is still 657 * enabled 658 */ 659 rxmode->hw_vlan_filter = true; 660 } else { 661 qede_vlan_filter_set(eth_dev, 0, 0); 662 } 663 } 664 } 665 666 if (mask & ETH_VLAN_EXTEND_MASK) 667 DP_INFO(edev, "No offloads are supported with VLAN Q-in-Q" 668 " and classification is based on outer tag only\n"); 669 670 DP_INFO(edev, "vlan offload mask %d vlan-strip %d vlan-filter %d\n", 671 mask, rxmode->hw_vlan_strip, rxmode->hw_vlan_filter); 672 } 673 674 static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev, 675 uint16_t vlan_id, int on) 676 { 677 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 678 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 679 struct qed_dev_eth_info *dev_info = &qdev->dev_info; 680 struct qede_vlan_entry *tmp = NULL; 681 struct qede_vlan_entry *vlan; 682 struct ecore_filter_ucast ucast; 683 int rc; 684 685 if (on) { 686 if (qdev->configured_vlans == dev_info->num_vlan_filters) { 687 DP_ERR(edev, "Reached max VLAN filter limit" 688 " enabling accept_any_vlan\n"); 689 qede_config_accept_any_vlan(qdev, true); 690 return 0; 691 } 692 693 SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) { 694 if (tmp->vid == vlan_id) { 695 DP_ERR(edev, "VLAN %u already configured\n", 696 vlan_id); 697 return -EEXIST; 698 } 699 } 700 701 vlan = rte_malloc(NULL, sizeof(struct qede_vlan_entry), 702 RTE_CACHE_LINE_SIZE); 703 704 if (!vlan) { 705 DP_ERR(edev, "Did not allocate memory for VLAN\n"); 706 return -ENOMEM; 707 } 708 709 qede_set_ucast_cmn_params(&ucast); 710 ucast.opcode = ECORE_FILTER_ADD; 711 ucast.type = ECORE_FILTER_VLAN; 712 ucast.vlan = vlan_id; 713 rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, 714 NULL); 715 if (rc != 0) { 716 DP_ERR(edev, "Failed to add VLAN %u rc %d\n", vlan_id, 717 rc); 718 rte_free(vlan); 719 } else { 720 vlan->vid = vlan_id; 721 SLIST_INSERT_HEAD(&qdev->vlan_list_head, vlan, list); 722 qdev->configured_vlans++; 723 DP_INFO(edev, "VLAN %u added, configured_vlans %u\n", 724 vlan_id, qdev->configured_vlans); 725 } 726 } else { 727 SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) { 728 if (tmp->vid == vlan_id) 729 break; 730 } 731 732 if (!tmp) { 733 if (qdev->configured_vlans == 0) { 734 DP_INFO(edev, 735 "No VLAN filters configured yet\n"); 736 return 0; 737 } 738 739 DP_ERR(edev, "VLAN %u not configured\n", vlan_id); 740 return -EINVAL; 741 } 742 743 SLIST_REMOVE(&qdev->vlan_list_head, tmp, qede_vlan_entry, list); 744 745 qede_set_ucast_cmn_params(&ucast); 746 ucast.opcode = ECORE_FILTER_REMOVE; 747 ucast.type = ECORE_FILTER_VLAN; 748 ucast.vlan = vlan_id; 749 rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, 750 NULL); 751 if (rc != 0) { 752 DP_ERR(edev, "Failed to delete VLAN %u rc %d\n", 753 vlan_id, rc); 754 } else { 755 qdev->configured_vlans--; 756 DP_INFO(edev, "VLAN %u removed configured_vlans %u\n", 757 vlan_id, qdev->configured_vlans); 758 } 759 } 760 761 return rc; 762 } 763 764 static int qede_init_vport(struct qede_dev *qdev) 765 { 766 struct ecore_dev *edev = &qdev->edev; 767 struct qed_start_vport_params start = {0}; 768 int rc; 769 770 start.remove_inner_vlan = 1; 771 start.gro_enable = 0; 772 start.mtu = ETHER_MTU + QEDE_ETH_OVERHEAD; 773 start.vport_id = 0; 774 start.drop_ttl0 = false; 775 start.clear_stats = 1; 776 start.handle_ptp_pkts = 0; 777 778 rc = qdev->ops->vport_start(edev, &start); 779 if (rc) { 780 DP_ERR(edev, "Start V-PORT failed %d\n", rc); 781 return rc; 782 } 783 784 DP_INFO(edev, 785 "Start vport ramrod passed, vport_id = %d, MTU = %u\n", 786 start.vport_id, ETHER_MTU); 787 788 return 0; 789 } 790 791 static void qede_prandom_bytes(uint32_t *buff) 792 { 793 uint8_t i; 794 795 srand((unsigned int)time(NULL)); 796 for (i = 0; i < ECORE_RSS_KEY_SIZE; i++) 797 buff[i] = rand(); 798 } 799 800 static int qede_config_rss(struct rte_eth_dev *eth_dev) 801 { 802 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 803 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 804 uint32_t def_rss_key[ECORE_RSS_KEY_SIZE]; 805 struct rte_eth_rss_reta_entry64 reta_conf[2]; 806 struct rte_eth_rss_conf rss_conf; 807 uint32_t i, id, pos, q; 808 809 rss_conf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf; 810 if (!rss_conf.rss_key) { 811 DP_INFO(edev, "Applying driver default key\n"); 812 rss_conf.rss_key_len = ECORE_RSS_KEY_SIZE * sizeof(uint32_t); 813 qede_prandom_bytes(&def_rss_key[0]); 814 rss_conf.rss_key = (uint8_t *)&def_rss_key[0]; 815 } 816 817 /* Configure RSS hash */ 818 if (qede_rss_hash_update(eth_dev, &rss_conf)) 819 return -EINVAL; 820 821 /* Configure default RETA */ 822 memset(reta_conf, 0, sizeof(reta_conf)); 823 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) 824 reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX; 825 826 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) { 827 id = i / RTE_RETA_GROUP_SIZE; 828 pos = i % RTE_RETA_GROUP_SIZE; 829 q = i % QEDE_RSS_COUNT(qdev); 830 reta_conf[id].reta[pos] = q; 831 } 832 if (qede_rss_reta_update(eth_dev, &reta_conf[0], 833 ECORE_RSS_IND_TABLE_SIZE)) 834 return -EINVAL; 835 836 return 0; 837 } 838 839 static int qede_dev_configure(struct rte_eth_dev *eth_dev) 840 { 841 struct qede_dev *qdev = eth_dev->data->dev_private; 842 struct ecore_dev *edev = &qdev->edev; 843 struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode; 844 int rc, i, j; 845 846 PMD_INIT_FUNC_TRACE(edev); 847 848 /* Check requirements for 100G mode */ 849 if (edev->num_hwfns > 1) { 850 if (eth_dev->data->nb_rx_queues < 2 || 851 eth_dev->data->nb_tx_queues < 2) { 852 DP_ERR(edev, "100G mode needs min. 2 RX/TX queues\n"); 853 return -EINVAL; 854 } 855 856 if ((eth_dev->data->nb_rx_queues % 2 != 0) || 857 (eth_dev->data->nb_tx_queues % 2 != 0)) { 858 DP_ERR(edev, 859 "100G mode needs even no. of RX/TX queues\n"); 860 return -EINVAL; 861 } 862 } 863 864 /* Sanity checks and throw warnings */ 865 if (rxmode->enable_scatter == 1) 866 eth_dev->data->scattered_rx = 1; 867 868 if (rxmode->enable_lro == 1) { 869 DP_ERR(edev, "LRO is not supported\n"); 870 return -EINVAL; 871 } 872 873 if (!rxmode->hw_strip_crc) 874 DP_INFO(edev, "L2 CRC stripping is always enabled in hw\n"); 875 876 if (!rxmode->hw_ip_checksum) 877 DP_INFO(edev, "IP/UDP/TCP checksum offload is always enabled " 878 "in hw\n"); 879 880 /* Check for the port restart case */ 881 if (qdev->state != QEDE_DEV_INIT) { 882 rc = qdev->ops->vport_stop(edev, 0); 883 if (rc != 0) 884 return rc; 885 qede_dealloc_fp_resc(eth_dev); 886 } 887 888 qdev->fp_num_tx = eth_dev->data->nb_tx_queues; 889 qdev->fp_num_rx = eth_dev->data->nb_rx_queues; 890 qdev->num_queues = qdev->fp_num_tx + qdev->fp_num_rx; 891 892 /* Fastpath status block should be initialized before sending 893 * VPORT-START in the case of VF. Anyway, do it for both VF/PF. 894 */ 895 rc = qede_alloc_fp_resc(qdev); 896 if (rc != 0) 897 return rc; 898 899 /* Issue VPORT-START with default config values to allow 900 * other port configurations early on. 901 */ 902 rc = qede_init_vport(qdev); 903 if (rc != 0) 904 return rc; 905 906 /* Do RSS configuration after vport-start */ 907 switch (rxmode->mq_mode) { 908 case ETH_MQ_RX_RSS: 909 rc = qede_config_rss(eth_dev); 910 if (rc != 0) { 911 qdev->ops->vport_stop(edev, 0); 912 qede_dealloc_fp_resc(eth_dev); 913 return -EINVAL; 914 } 915 break; 916 case ETH_MQ_RX_NONE: 917 DP_INFO(edev, "RSS is disabled\n"); 918 break; 919 default: 920 DP_ERR(edev, "Unsupported RSS mode\n"); 921 qdev->ops->vport_stop(edev, 0); 922 qede_dealloc_fp_resc(eth_dev); 923 return -EINVAL; 924 } 925 926 SLIST_INIT(&qdev->vlan_list_head); 927 928 /* Add primary mac for PF */ 929 if (IS_PF(edev)) 930 qede_mac_addr_set(eth_dev, &qdev->primary_mac); 931 932 /* Enable VLAN offloads by default */ 933 qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK | 934 ETH_VLAN_FILTER_MASK | 935 ETH_VLAN_EXTEND_MASK); 936 937 qdev->state = QEDE_DEV_CONFIG; 938 939 DP_INFO(edev, "Allocated RSS=%d TSS=%d (with CoS=%d)\n", 940 (int)QEDE_RSS_COUNT(qdev), (int)QEDE_TSS_COUNT(qdev), 941 qdev->num_tc); 942 943 return 0; 944 } 945 946 /* Info about HW descriptor ring limitations */ 947 static const struct rte_eth_desc_lim qede_rx_desc_lim = { 948 .nb_max = NUM_RX_BDS_MAX, 949 .nb_min = 128, 950 .nb_align = 128 /* lowest common multiple */ 951 }; 952 953 static const struct rte_eth_desc_lim qede_tx_desc_lim = { 954 .nb_max = NUM_TX_BDS_MAX, 955 .nb_min = 256, 956 .nb_align = 256 957 }; 958 959 static void 960 qede_dev_info_get(struct rte_eth_dev *eth_dev, 961 struct rte_eth_dev_info *dev_info) 962 { 963 struct qede_dev *qdev = eth_dev->data->dev_private; 964 struct ecore_dev *edev = &qdev->edev; 965 struct qed_link_output link; 966 uint32_t speed_cap = 0; 967 968 PMD_INIT_FUNC_TRACE(edev); 969 970 dev_info->pci_dev = RTE_DEV_TO_PCI(eth_dev->device); 971 dev_info->min_rx_bufsize = (uint32_t)QEDE_MIN_RX_BUFF_SIZE; 972 dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN; 973 dev_info->rx_desc_lim = qede_rx_desc_lim; 974 dev_info->tx_desc_lim = qede_tx_desc_lim; 975 976 if (IS_PF(edev)) 977 dev_info->max_rx_queues = (uint16_t)RTE_MIN( 978 QEDE_MAX_RSS_CNT(qdev), QEDE_PF_NUM_CONNS / 2); 979 else 980 dev_info->max_rx_queues = (uint16_t)RTE_MIN( 981 QEDE_MAX_RSS_CNT(qdev), ECORE_MAX_VF_CHAINS_PER_PF); 982 dev_info->max_tx_queues = dev_info->max_rx_queues; 983 984 dev_info->max_mac_addrs = qdev->dev_info.num_mac_addrs; 985 dev_info->max_vfs = 0; 986 dev_info->reta_size = ECORE_RSS_IND_TABLE_SIZE; 987 dev_info->hash_key_size = ECORE_RSS_KEY_SIZE * sizeof(uint32_t); 988 dev_info->flow_type_rss_offloads = (uint64_t)QEDE_RSS_OFFLOAD_ALL; 989 990 dev_info->default_txconf = (struct rte_eth_txconf) { 991 .txq_flags = QEDE_TXQ_FLAGS, 992 }; 993 994 dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_VLAN_STRIP | 995 DEV_RX_OFFLOAD_IPV4_CKSUM | 996 DEV_RX_OFFLOAD_UDP_CKSUM | 997 DEV_RX_OFFLOAD_TCP_CKSUM | 998 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM); 999 dev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT | 1000 DEV_TX_OFFLOAD_IPV4_CKSUM | 1001 DEV_TX_OFFLOAD_UDP_CKSUM | 1002 DEV_TX_OFFLOAD_TCP_CKSUM | 1003 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM); 1004 1005 memset(&link, 0, sizeof(struct qed_link_output)); 1006 qdev->ops->common->get_link(edev, &link); 1007 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 1008 speed_cap |= ETH_LINK_SPEED_1G; 1009 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 1010 speed_cap |= ETH_LINK_SPEED_10G; 1011 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) 1012 speed_cap |= ETH_LINK_SPEED_25G; 1013 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) 1014 speed_cap |= ETH_LINK_SPEED_40G; 1015 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) 1016 speed_cap |= ETH_LINK_SPEED_50G; 1017 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) 1018 speed_cap |= ETH_LINK_SPEED_100G; 1019 dev_info->speed_capa = speed_cap; 1020 } 1021 1022 /* return 0 means link status changed, -1 means not changed */ 1023 static int 1024 qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete) 1025 { 1026 struct qede_dev *qdev = eth_dev->data->dev_private; 1027 struct ecore_dev *edev = &qdev->edev; 1028 uint16_t link_duplex; 1029 struct qed_link_output link; 1030 struct rte_eth_link *curr = ð_dev->data->dev_link; 1031 1032 memset(&link, 0, sizeof(struct qed_link_output)); 1033 qdev->ops->common->get_link(edev, &link); 1034 1035 /* Link Speed */ 1036 curr->link_speed = link.speed; 1037 1038 /* Link Mode */ 1039 switch (link.duplex) { 1040 case QEDE_DUPLEX_HALF: 1041 link_duplex = ETH_LINK_HALF_DUPLEX; 1042 break; 1043 case QEDE_DUPLEX_FULL: 1044 link_duplex = ETH_LINK_FULL_DUPLEX; 1045 break; 1046 case QEDE_DUPLEX_UNKNOWN: 1047 default: 1048 link_duplex = -1; 1049 } 1050 curr->link_duplex = link_duplex; 1051 1052 /* Link Status */ 1053 curr->link_status = (link.link_up) ? ETH_LINK_UP : ETH_LINK_DOWN; 1054 1055 /* AN */ 1056 curr->link_autoneg = (link.supported_caps & QEDE_SUPPORTED_AUTONEG) ? 1057 ETH_LINK_AUTONEG : ETH_LINK_FIXED; 1058 1059 DP_INFO(edev, "Link - Speed %u Mode %u AN %u Status %u\n", 1060 curr->link_speed, curr->link_duplex, 1061 curr->link_autoneg, curr->link_status); 1062 1063 /* return 0 means link status changed, -1 means not changed */ 1064 return ((curr->link_status == link.link_up) ? -1 : 0); 1065 } 1066 1067 static void qede_promiscuous_enable(struct rte_eth_dev *eth_dev) 1068 { 1069 struct qede_dev *qdev = eth_dev->data->dev_private; 1070 struct ecore_dev *edev = &qdev->edev; 1071 1072 PMD_INIT_FUNC_TRACE(edev); 1073 1074 enum qed_filter_rx_mode_type type = QED_FILTER_RX_MODE_TYPE_PROMISC; 1075 1076 if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1) 1077 type |= QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC; 1078 1079 qed_configure_filter_rx_mode(eth_dev, type); 1080 } 1081 1082 static void qede_promiscuous_disable(struct rte_eth_dev *eth_dev) 1083 { 1084 struct qede_dev *qdev = eth_dev->data->dev_private; 1085 struct ecore_dev *edev = &qdev->edev; 1086 1087 PMD_INIT_FUNC_TRACE(edev); 1088 1089 if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1) 1090 qed_configure_filter_rx_mode(eth_dev, 1091 QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC); 1092 else 1093 qed_configure_filter_rx_mode(eth_dev, 1094 QED_FILTER_RX_MODE_TYPE_REGULAR); 1095 } 1096 1097 static void qede_poll_sp_sb_cb(void *param) 1098 { 1099 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; 1100 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1101 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1102 int rc; 1103 1104 qede_interrupt_action(ECORE_LEADING_HWFN(edev)); 1105 qede_interrupt_action(&edev->hwfns[1]); 1106 1107 rc = rte_eal_alarm_set(timer_period * US_PER_S, 1108 qede_poll_sp_sb_cb, 1109 (void *)eth_dev); 1110 if (rc != 0) { 1111 DP_ERR(edev, "Unable to start periodic" 1112 " timer rc %d\n", rc); 1113 assert(false && "Unable to start periodic timer"); 1114 } 1115 } 1116 1117 static void qede_dev_close(struct rte_eth_dev *eth_dev) 1118 { 1119 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(eth_dev->device); 1120 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1121 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1122 int rc; 1123 1124 PMD_INIT_FUNC_TRACE(edev); 1125 1126 /* dev_stop() shall cleanup fp resources in hw but without releasing 1127 * dma memories and sw structures so that dev_start() can be called 1128 * by the app without reconfiguration. However, in dev_close() we 1129 * can release all the resources and device can be brought up newly 1130 */ 1131 if (qdev->state != QEDE_DEV_STOP) 1132 qede_dev_stop(eth_dev); 1133 else 1134 DP_INFO(edev, "Device is already stopped\n"); 1135 1136 rc = qdev->ops->vport_stop(edev, 0); 1137 if (rc != 0) 1138 DP_ERR(edev, "Failed to stop VPORT\n"); 1139 1140 qede_dealloc_fp_resc(eth_dev); 1141 1142 qdev->ops->common->slowpath_stop(edev); 1143 1144 qdev->ops->common->remove(edev); 1145 1146 rte_intr_disable(&pci_dev->intr_handle); 1147 1148 rte_intr_callback_unregister(&pci_dev->intr_handle, 1149 qede_interrupt_handler, (void *)eth_dev); 1150 1151 if (edev->num_hwfns > 1) 1152 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, (void *)eth_dev); 1153 1154 qdev->state = QEDE_DEV_INIT; /* Go back to init state */ 1155 } 1156 1157 static void 1158 qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats) 1159 { 1160 struct qede_dev *qdev = eth_dev->data->dev_private; 1161 struct ecore_dev *edev = &qdev->edev; 1162 struct ecore_eth_stats stats; 1163 unsigned int i = 0, j = 0, qid; 1164 unsigned int rxq_stat_cntrs, txq_stat_cntrs; 1165 struct qede_tx_queue *txq; 1166 1167 qdev->ops->get_vport_stats(edev, &stats); 1168 1169 /* RX Stats */ 1170 eth_stats->ipackets = stats.rx_ucast_pkts + 1171 stats.rx_mcast_pkts + stats.rx_bcast_pkts; 1172 1173 eth_stats->ibytes = stats.rx_ucast_bytes + 1174 stats.rx_mcast_bytes + stats.rx_bcast_bytes; 1175 1176 eth_stats->ierrors = stats.rx_crc_errors + 1177 stats.rx_align_errors + 1178 stats.rx_carrier_errors + 1179 stats.rx_oversize_packets + 1180 stats.rx_jabbers + stats.rx_undersize_packets; 1181 1182 eth_stats->rx_nombuf = stats.no_buff_discards; 1183 1184 eth_stats->imissed = stats.mftag_filter_discards + 1185 stats.mac_filter_discards + 1186 stats.no_buff_discards + stats.brb_truncates + stats.brb_discards; 1187 1188 /* TX stats */ 1189 eth_stats->opackets = stats.tx_ucast_pkts + 1190 stats.tx_mcast_pkts + stats.tx_bcast_pkts; 1191 1192 eth_stats->obytes = stats.tx_ucast_bytes + 1193 stats.tx_mcast_bytes + stats.tx_bcast_bytes; 1194 1195 eth_stats->oerrors = stats.tx_err_drop_pkts; 1196 1197 /* Queue stats */ 1198 rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev), 1199 RTE_ETHDEV_QUEUE_STAT_CNTRS); 1200 txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(qdev), 1201 RTE_ETHDEV_QUEUE_STAT_CNTRS); 1202 if ((rxq_stat_cntrs != QEDE_RSS_COUNT(qdev)) || 1203 (txq_stat_cntrs != QEDE_TSS_COUNT(qdev))) 1204 DP_VERBOSE(edev, ECORE_MSG_DEBUG, 1205 "Not all the queue stats will be displayed. Set" 1206 " RTE_ETHDEV_QUEUE_STAT_CNTRS config param" 1207 " appropriately and retry.\n"); 1208 1209 for (qid = 0; qid < QEDE_QUEUE_CNT(qdev); qid++) { 1210 if (qdev->fp_array[qid].type & QEDE_FASTPATH_RX) { 1211 eth_stats->q_ipackets[i] = 1212 *(uint64_t *)( 1213 ((char *)(qdev->fp_array[(qid)].rxq)) + 1214 offsetof(struct qede_rx_queue, 1215 rcv_pkts)); 1216 eth_stats->q_errors[i] = 1217 *(uint64_t *)( 1218 ((char *)(qdev->fp_array[(qid)].rxq)) + 1219 offsetof(struct qede_rx_queue, 1220 rx_hw_errors)) + 1221 *(uint64_t *)( 1222 ((char *)(qdev->fp_array[(qid)].rxq)) + 1223 offsetof(struct qede_rx_queue, 1224 rx_alloc_errors)); 1225 i++; 1226 } 1227 if (i == rxq_stat_cntrs) 1228 break; 1229 } 1230 1231 for (qid = 0; qid < QEDE_QUEUE_CNT(qdev); qid++) { 1232 if (qdev->fp_array[qid].type & QEDE_FASTPATH_TX) { 1233 txq = qdev->fp_array[(qid)].txqs[0]; 1234 eth_stats->q_opackets[j] = 1235 *((uint64_t *)(uintptr_t) 1236 (((uint64_t)(uintptr_t)(txq)) + 1237 offsetof(struct qede_tx_queue, 1238 xmit_pkts))); 1239 j++; 1240 } 1241 if (j == txq_stat_cntrs) 1242 break; 1243 } 1244 } 1245 1246 static unsigned 1247 qede_get_xstats_count(struct qede_dev *qdev) { 1248 return RTE_DIM(qede_xstats_strings) + 1249 (RTE_DIM(qede_rxq_xstats_strings) * 1250 RTE_MIN(QEDE_RSS_COUNT(qdev), 1251 RTE_ETHDEV_QUEUE_STAT_CNTRS)); 1252 } 1253 1254 static int 1255 qede_get_xstats_names(__rte_unused struct rte_eth_dev *dev, 1256 struct rte_eth_xstat_name *xstats_names, unsigned limit) 1257 { 1258 struct qede_dev *qdev = dev->data->dev_private; 1259 const unsigned int stat_cnt = qede_get_xstats_count(qdev); 1260 unsigned int i, qid, stat_idx = 0; 1261 unsigned int rxq_stat_cntrs; 1262 1263 if (xstats_names != NULL) { 1264 for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) { 1265 snprintf(xstats_names[stat_idx].name, 1266 sizeof(xstats_names[stat_idx].name), 1267 "%s", 1268 qede_xstats_strings[i].name); 1269 stat_idx++; 1270 } 1271 1272 rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev), 1273 RTE_ETHDEV_QUEUE_STAT_CNTRS); 1274 for (qid = 0; qid < rxq_stat_cntrs; qid++) { 1275 for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) { 1276 snprintf(xstats_names[stat_idx].name, 1277 sizeof(xstats_names[stat_idx].name), 1278 "%.4s%d%s", 1279 qede_rxq_xstats_strings[i].name, qid, 1280 qede_rxq_xstats_strings[i].name + 4); 1281 stat_idx++; 1282 } 1283 } 1284 } 1285 1286 return stat_cnt; 1287 } 1288 1289 static int 1290 qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 1291 unsigned int n) 1292 { 1293 struct qede_dev *qdev = dev->data->dev_private; 1294 struct ecore_dev *edev = &qdev->edev; 1295 struct ecore_eth_stats stats; 1296 const unsigned int num = qede_get_xstats_count(qdev); 1297 unsigned int i, qid, stat_idx = 0; 1298 unsigned int rxq_stat_cntrs; 1299 1300 if (n < num) 1301 return num; 1302 1303 qdev->ops->get_vport_stats(edev, &stats); 1304 1305 for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) { 1306 xstats[stat_idx].value = *(uint64_t *)(((char *)&stats) + 1307 qede_xstats_strings[i].offset); 1308 xstats[stat_idx].id = stat_idx; 1309 stat_idx++; 1310 } 1311 1312 rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev), 1313 RTE_ETHDEV_QUEUE_STAT_CNTRS); 1314 for (qid = 0; qid < rxq_stat_cntrs; qid++) { 1315 if (qdev->fp_array[qid].type & QEDE_FASTPATH_RX) { 1316 for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) { 1317 xstats[stat_idx].value = *(uint64_t *)( 1318 ((char *)(qdev->fp_array[(qid)].rxq)) + 1319 qede_rxq_xstats_strings[i].offset); 1320 xstats[stat_idx].id = stat_idx; 1321 stat_idx++; 1322 } 1323 } 1324 } 1325 1326 return stat_idx; 1327 } 1328 1329 static void 1330 qede_reset_xstats(struct rte_eth_dev *dev) 1331 { 1332 struct qede_dev *qdev = dev->data->dev_private; 1333 struct ecore_dev *edev = &qdev->edev; 1334 1335 ecore_reset_vport_stats(edev); 1336 } 1337 1338 int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up) 1339 { 1340 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1341 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1342 struct qed_link_params link_params; 1343 int rc; 1344 1345 DP_INFO(edev, "setting link state %d\n", link_up); 1346 memset(&link_params, 0, sizeof(link_params)); 1347 link_params.link_up = link_up; 1348 rc = qdev->ops->common->set_link(edev, &link_params); 1349 if (rc != ECORE_SUCCESS) 1350 DP_ERR(edev, "Unable to set link state %d\n", link_up); 1351 1352 return rc; 1353 } 1354 1355 static int qede_dev_set_link_up(struct rte_eth_dev *eth_dev) 1356 { 1357 return qede_dev_set_link_state(eth_dev, true); 1358 } 1359 1360 static int qede_dev_set_link_down(struct rte_eth_dev *eth_dev) 1361 { 1362 return qede_dev_set_link_state(eth_dev, false); 1363 } 1364 1365 static void qede_reset_stats(struct rte_eth_dev *eth_dev) 1366 { 1367 struct qede_dev *qdev = eth_dev->data->dev_private; 1368 struct ecore_dev *edev = &qdev->edev; 1369 1370 ecore_reset_vport_stats(edev); 1371 } 1372 1373 static void qede_allmulticast_enable(struct rte_eth_dev *eth_dev) 1374 { 1375 enum qed_filter_rx_mode_type type = 1376 QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC; 1377 1378 if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) 1379 type |= QED_FILTER_RX_MODE_TYPE_PROMISC; 1380 1381 qed_configure_filter_rx_mode(eth_dev, type); 1382 } 1383 1384 static void qede_allmulticast_disable(struct rte_eth_dev *eth_dev) 1385 { 1386 if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) 1387 qed_configure_filter_rx_mode(eth_dev, 1388 QED_FILTER_RX_MODE_TYPE_PROMISC); 1389 else 1390 qed_configure_filter_rx_mode(eth_dev, 1391 QED_FILTER_RX_MODE_TYPE_REGULAR); 1392 } 1393 1394 static int qede_flow_ctrl_set(struct rte_eth_dev *eth_dev, 1395 struct rte_eth_fc_conf *fc_conf) 1396 { 1397 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1398 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1399 struct qed_link_output current_link; 1400 struct qed_link_params params; 1401 1402 memset(¤t_link, 0, sizeof(current_link)); 1403 qdev->ops->common->get_link(edev, ¤t_link); 1404 1405 memset(¶ms, 0, sizeof(params)); 1406 params.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG; 1407 if (fc_conf->autoneg) { 1408 if (!(current_link.supported_caps & QEDE_SUPPORTED_AUTONEG)) { 1409 DP_ERR(edev, "Autoneg not supported\n"); 1410 return -EINVAL; 1411 } 1412 params.pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE; 1413 } 1414 1415 /* Pause is assumed to be supported (SUPPORTED_Pause) */ 1416 if (fc_conf->mode == RTE_FC_FULL) 1417 params.pause_config |= (QED_LINK_PAUSE_TX_ENABLE | 1418 QED_LINK_PAUSE_RX_ENABLE); 1419 if (fc_conf->mode == RTE_FC_TX_PAUSE) 1420 params.pause_config |= QED_LINK_PAUSE_TX_ENABLE; 1421 if (fc_conf->mode == RTE_FC_RX_PAUSE) 1422 params.pause_config |= QED_LINK_PAUSE_RX_ENABLE; 1423 1424 params.link_up = true; 1425 (void)qdev->ops->common->set_link(edev, ¶ms); 1426 1427 return 0; 1428 } 1429 1430 static int qede_flow_ctrl_get(struct rte_eth_dev *eth_dev, 1431 struct rte_eth_fc_conf *fc_conf) 1432 { 1433 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1434 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1435 struct qed_link_output current_link; 1436 1437 memset(¤t_link, 0, sizeof(current_link)); 1438 qdev->ops->common->get_link(edev, ¤t_link); 1439 1440 if (current_link.pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE) 1441 fc_conf->autoneg = true; 1442 1443 if (current_link.pause_config & (QED_LINK_PAUSE_RX_ENABLE | 1444 QED_LINK_PAUSE_TX_ENABLE)) 1445 fc_conf->mode = RTE_FC_FULL; 1446 else if (current_link.pause_config & QED_LINK_PAUSE_RX_ENABLE) 1447 fc_conf->mode = RTE_FC_RX_PAUSE; 1448 else if (current_link.pause_config & QED_LINK_PAUSE_TX_ENABLE) 1449 fc_conf->mode = RTE_FC_TX_PAUSE; 1450 else 1451 fc_conf->mode = RTE_FC_NONE; 1452 1453 return 0; 1454 } 1455 1456 static const uint32_t * 1457 qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev) 1458 { 1459 static const uint32_t ptypes[] = { 1460 RTE_PTYPE_L3_IPV4, 1461 RTE_PTYPE_L3_IPV6, 1462 RTE_PTYPE_UNKNOWN 1463 }; 1464 1465 if (eth_dev->rx_pkt_burst == qede_recv_pkts) 1466 return ptypes; 1467 1468 return NULL; 1469 } 1470 1471 static void qede_init_rss_caps(uint8_t *rss_caps, uint64_t hf) 1472 { 1473 *rss_caps = 0; 1474 *rss_caps |= (hf & ETH_RSS_IPV4) ? ECORE_RSS_IPV4 : 0; 1475 *rss_caps |= (hf & ETH_RSS_IPV6) ? ECORE_RSS_IPV6 : 0; 1476 *rss_caps |= (hf & ETH_RSS_IPV6_EX) ? ECORE_RSS_IPV6 : 0; 1477 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? ECORE_RSS_IPV4_TCP : 0; 1478 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? ECORE_RSS_IPV6_TCP : 0; 1479 *rss_caps |= (hf & ETH_RSS_IPV6_TCP_EX) ? ECORE_RSS_IPV6_TCP : 0; 1480 } 1481 1482 static int qede_rss_hash_update(struct rte_eth_dev *eth_dev, 1483 struct rte_eth_rss_conf *rss_conf) 1484 { 1485 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1486 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1487 struct ecore_sp_vport_update_params vport_update_params; 1488 struct ecore_rss_params rss_params; 1489 struct ecore_rss_params params; 1490 struct ecore_hwfn *p_hwfn; 1491 uint32_t *key = (uint32_t *)rss_conf->rss_key; 1492 uint64_t hf = rss_conf->rss_hf; 1493 uint8_t len = rss_conf->rss_key_len; 1494 uint8_t i; 1495 int rc; 1496 1497 memset(&vport_update_params, 0, sizeof(vport_update_params)); 1498 memset(&rss_params, 0, sizeof(rss_params)); 1499 1500 DP_INFO(edev, "RSS hf = 0x%lx len = %u key = %p\n", 1501 (unsigned long)hf, len, key); 1502 1503 if (hf != 0) { 1504 /* Enabling RSS */ 1505 DP_INFO(edev, "Enabling rss\n"); 1506 1507 /* RSS caps */ 1508 qede_init_rss_caps(&rss_params.rss_caps, hf); 1509 rss_params.update_rss_capabilities = 1; 1510 1511 /* RSS hash key */ 1512 if (key) { 1513 if (len > (ECORE_RSS_KEY_SIZE * sizeof(uint32_t))) { 1514 DP_ERR(edev, "RSS key length exceeds limit\n"); 1515 return -EINVAL; 1516 } 1517 DP_INFO(edev, "Applying user supplied hash key\n"); 1518 rss_params.update_rss_key = 1; 1519 memcpy(&rss_params.rss_key, key, len); 1520 } 1521 rss_params.rss_enable = 1; 1522 } 1523 1524 rss_params.update_rss_config = 1; 1525 /* tbl_size has to be set with capabilities */ 1526 rss_params.rss_table_size_log = 7; 1527 vport_update_params.vport_id = 0; 1528 vport_update_params.rss_params = &rss_params; 1529 1530 for_each_hwfn(edev, i) { 1531 p_hwfn = &edev->hwfns[i]; 1532 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid; 1533 rc = ecore_sp_vport_update(p_hwfn, &vport_update_params, 1534 ECORE_SPQ_MODE_EBLOCK, NULL); 1535 if (rc) { 1536 DP_ERR(edev, "vport-update for RSS failed\n"); 1537 return rc; 1538 } 1539 } 1540 qdev->rss_enable = rss_params.rss_enable; 1541 1542 /* Update local structure for hash query */ 1543 qdev->rss_conf.rss_hf = hf; 1544 qdev->rss_conf.rss_key_len = len; 1545 if (qdev->rss_enable) { 1546 if (qdev->rss_conf.rss_key == NULL) { 1547 qdev->rss_conf.rss_key = (uint8_t *)malloc(len); 1548 if (qdev->rss_conf.rss_key == NULL) { 1549 DP_ERR(edev, "No memory to store RSS key\n"); 1550 return -ENOMEM; 1551 } 1552 } 1553 if (key && len) { 1554 DP_INFO(edev, "Storing RSS key\n"); 1555 memcpy(qdev->rss_conf.rss_key, key, len); 1556 } 1557 } else if (!qdev->rss_enable && len == 0) { 1558 if (qdev->rss_conf.rss_key) { 1559 free(qdev->rss_conf.rss_key); 1560 qdev->rss_conf.rss_key = NULL; 1561 DP_INFO(edev, "Free RSS key\n"); 1562 } 1563 } 1564 1565 return 0; 1566 } 1567 1568 static int qede_rss_hash_conf_get(struct rte_eth_dev *eth_dev, 1569 struct rte_eth_rss_conf *rss_conf) 1570 { 1571 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1572 1573 rss_conf->rss_hf = qdev->rss_conf.rss_hf; 1574 rss_conf->rss_key_len = qdev->rss_conf.rss_key_len; 1575 1576 if (rss_conf->rss_key && qdev->rss_conf.rss_key) 1577 memcpy(rss_conf->rss_key, qdev->rss_conf.rss_key, 1578 rss_conf->rss_key_len); 1579 return 0; 1580 } 1581 1582 static int qede_rss_reta_update(struct rte_eth_dev *eth_dev, 1583 struct rte_eth_rss_reta_entry64 *reta_conf, 1584 uint16_t reta_size) 1585 { 1586 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1587 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1588 struct ecore_sp_vport_update_params vport_update_params; 1589 struct ecore_rss_params params; 1590 struct ecore_hwfn *p_hwfn; 1591 uint16_t i, idx, shift; 1592 uint8_t entry; 1593 int rc; 1594 1595 if (reta_size > ETH_RSS_RETA_SIZE_128) { 1596 DP_ERR(edev, "reta_size %d is not supported by hardware\n", 1597 reta_size); 1598 return -EINVAL; 1599 } 1600 1601 memset(&vport_update_params, 0, sizeof(vport_update_params)); 1602 memset(¶ms, 0, sizeof(params)); 1603 1604 for (i = 0; i < reta_size; i++) { 1605 idx = i / RTE_RETA_GROUP_SIZE; 1606 shift = i % RTE_RETA_GROUP_SIZE; 1607 if (reta_conf[idx].mask & (1ULL << shift)) { 1608 entry = reta_conf[idx].reta[shift]; 1609 params.rss_ind_table[i] = entry; 1610 } 1611 } 1612 1613 /* Fix up RETA for CMT mode device */ 1614 if (edev->num_hwfns > 1) 1615 qdev->rss_enable = qed_update_rss_parm_cmt(edev, 1616 ¶ms.rss_ind_table[0]); 1617 params.update_rss_ind_table = 1; 1618 params.rss_table_size_log = 7; 1619 params.update_rss_config = 1; 1620 vport_update_params.vport_id = 0; 1621 /* Use the current value of rss_enable */ 1622 params.rss_enable = qdev->rss_enable; 1623 vport_update_params.rss_params = ¶ms; 1624 1625 for_each_hwfn(edev, i) { 1626 p_hwfn = &edev->hwfns[i]; 1627 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid; 1628 rc = ecore_sp_vport_update(p_hwfn, &vport_update_params, 1629 ECORE_SPQ_MODE_EBLOCK, NULL); 1630 if (rc) { 1631 DP_ERR(edev, "vport-update for RSS failed\n"); 1632 return rc; 1633 } 1634 } 1635 1636 /* Update the local copy for RETA query command */ 1637 memcpy(qdev->rss_ind_table, params.rss_ind_table, 1638 sizeof(params.rss_ind_table)); 1639 1640 return 0; 1641 } 1642 1643 static int qede_rss_reta_query(struct rte_eth_dev *eth_dev, 1644 struct rte_eth_rss_reta_entry64 *reta_conf, 1645 uint16_t reta_size) 1646 { 1647 struct qede_dev *qdev = eth_dev->data->dev_private; 1648 struct ecore_dev *edev = &qdev->edev; 1649 uint16_t i, idx, shift; 1650 uint8_t entry; 1651 1652 if (reta_size > ETH_RSS_RETA_SIZE_128) { 1653 DP_ERR(edev, "reta_size %d is not supported\n", 1654 reta_size); 1655 return -EINVAL; 1656 } 1657 1658 for (i = 0; i < reta_size; i++) { 1659 idx = i / RTE_RETA_GROUP_SIZE; 1660 shift = i % RTE_RETA_GROUP_SIZE; 1661 if (reta_conf[idx].mask & (1ULL << shift)) { 1662 entry = qdev->rss_ind_table[i]; 1663 reta_conf[idx].reta[shift] = entry; 1664 } 1665 } 1666 1667 return 0; 1668 } 1669 1670 int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) 1671 { 1672 uint32_t frame_size; 1673 struct qede_dev *qdev = dev->data->dev_private; 1674 struct rte_eth_dev_info dev_info = {0}; 1675 1676 qede_dev_info_get(dev, &dev_info); 1677 1678 /* VLAN_TAG = 4 */ 1679 frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + 4; 1680 1681 if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) 1682 return -EINVAL; 1683 1684 if (!dev->data->scattered_rx && 1685 frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) 1686 return -EINVAL; 1687 1688 if (frame_size > ETHER_MAX_LEN) 1689 dev->data->dev_conf.rxmode.jumbo_frame = 1; 1690 else 1691 dev->data->dev_conf.rxmode.jumbo_frame = 0; 1692 1693 /* update max frame size */ 1694 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; 1695 qdev->mtu = mtu; 1696 qede_dev_stop(dev); 1697 qede_dev_start(dev); 1698 1699 return 0; 1700 } 1701 1702 static int 1703 qede_conf_udp_dst_port(struct rte_eth_dev *eth_dev, 1704 struct rte_eth_udp_tunnel *tunnel_udp, 1705 bool add) 1706 { 1707 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1708 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1709 struct ecore_tunn_update_params params; 1710 struct ecore_hwfn *p_hwfn; 1711 int rc, i; 1712 1713 PMD_INIT_FUNC_TRACE(edev); 1714 1715 memset(¶ms, 0, sizeof(params)); 1716 if (tunnel_udp->prot_type == RTE_TUNNEL_TYPE_VXLAN) { 1717 params.update_vxlan_udp_port = 1; 1718 params.vxlan_udp_port = (add) ? tunnel_udp->udp_port : 1719 QEDE_VXLAN_DEF_PORT; 1720 for_each_hwfn(edev, i) { 1721 p_hwfn = &edev->hwfns[i]; 1722 rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, ¶ms, 1723 ECORE_SPQ_MODE_CB, NULL); 1724 if (rc != ECORE_SUCCESS) { 1725 DP_ERR(edev, "Unable to config UDP port %u\n", 1726 params.vxlan_udp_port); 1727 return rc; 1728 } 1729 } 1730 } 1731 1732 return 0; 1733 } 1734 1735 int 1736 qede_udp_dst_port_del(struct rte_eth_dev *eth_dev, 1737 struct rte_eth_udp_tunnel *tunnel_udp) 1738 { 1739 return qede_conf_udp_dst_port(eth_dev, tunnel_udp, false); 1740 } 1741 1742 int 1743 qede_udp_dst_port_add(struct rte_eth_dev *eth_dev, 1744 struct rte_eth_udp_tunnel *tunnel_udp) 1745 { 1746 return qede_conf_udp_dst_port(eth_dev, tunnel_udp, true); 1747 } 1748 1749 static void qede_get_ecore_tunn_params(uint32_t filter, uint32_t *type, 1750 uint32_t *clss, char *str) 1751 { 1752 uint16_t j; 1753 *clss = MAX_ECORE_TUNN_CLSS; 1754 1755 for (j = 0; j < RTE_DIM(qede_tunn_types); j++) { 1756 if (filter == qede_tunn_types[j].rte_filter_type) { 1757 *type = qede_tunn_types[j].qede_type; 1758 *clss = qede_tunn_types[j].qede_tunn_clss; 1759 strcpy(str, qede_tunn_types[j].string); 1760 return; 1761 } 1762 } 1763 } 1764 1765 static int 1766 qede_set_ucast_tunn_cmn_param(struct ecore_filter_ucast *ucast, 1767 const struct rte_eth_tunnel_filter_conf *conf, 1768 uint32_t type) 1769 { 1770 /* Init commmon ucast params first */ 1771 qede_set_ucast_cmn_params(ucast); 1772 1773 /* Copy out the required fields based on classification type */ 1774 ucast->type = type; 1775 1776 switch (type) { 1777 case ECORE_FILTER_VNI: 1778 ucast->vni = conf->tenant_id; 1779 break; 1780 case ECORE_FILTER_INNER_VLAN: 1781 ucast->vlan = conf->inner_vlan; 1782 break; 1783 case ECORE_FILTER_MAC: 1784 memcpy(ucast->mac, conf->outer_mac.addr_bytes, 1785 ETHER_ADDR_LEN); 1786 break; 1787 case ECORE_FILTER_INNER_MAC: 1788 memcpy(ucast->mac, conf->inner_mac.addr_bytes, 1789 ETHER_ADDR_LEN); 1790 break; 1791 case ECORE_FILTER_MAC_VNI_PAIR: 1792 memcpy(ucast->mac, conf->outer_mac.addr_bytes, 1793 ETHER_ADDR_LEN); 1794 ucast->vni = conf->tenant_id; 1795 break; 1796 case ECORE_FILTER_INNER_MAC_VNI_PAIR: 1797 memcpy(ucast->mac, conf->inner_mac.addr_bytes, 1798 ETHER_ADDR_LEN); 1799 ucast->vni = conf->tenant_id; 1800 break; 1801 case ECORE_FILTER_INNER_PAIR: 1802 memcpy(ucast->mac, conf->inner_mac.addr_bytes, 1803 ETHER_ADDR_LEN); 1804 ucast->vlan = conf->inner_vlan; 1805 break; 1806 default: 1807 return -EINVAL; 1808 } 1809 1810 return ECORE_SUCCESS; 1811 } 1812 1813 static int qede_vxlan_tunn_config(struct rte_eth_dev *eth_dev, 1814 enum rte_filter_op filter_op, 1815 const struct rte_eth_tunnel_filter_conf *conf) 1816 { 1817 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1818 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1819 struct ecore_tunn_update_params params; 1820 struct ecore_hwfn *p_hwfn; 1821 enum ecore_filter_ucast_type type; 1822 enum ecore_tunn_clss clss; 1823 struct ecore_filter_ucast ucast; 1824 char str[80]; 1825 uint16_t filter_type; 1826 int rc, i; 1827 1828 filter_type = conf->filter_type | qdev->vxlan_filter_type; 1829 /* First determine if the given filter classification is supported */ 1830 qede_get_ecore_tunn_params(filter_type, &type, &clss, str); 1831 if (clss == MAX_ECORE_TUNN_CLSS) { 1832 DP_ERR(edev, "Wrong filter type\n"); 1833 return -EINVAL; 1834 } 1835 /* Init tunnel ucast params */ 1836 rc = qede_set_ucast_tunn_cmn_param(&ucast, conf, type); 1837 if (rc != ECORE_SUCCESS) { 1838 DP_ERR(edev, "Unsupported VxLAN filter type 0x%x\n", 1839 conf->filter_type); 1840 return rc; 1841 } 1842 DP_INFO(edev, "Rule: \"%s\", op %d, type 0x%x\n", 1843 str, filter_op, ucast.type); 1844 switch (filter_op) { 1845 case RTE_ETH_FILTER_ADD: 1846 ucast.opcode = ECORE_FILTER_ADD; 1847 1848 /* Skip MAC/VLAN if filter is based on VNI */ 1849 if (!(filter_type & ETH_TUNNEL_FILTER_TENID)) { 1850 rc = qede_mac_int_ops(eth_dev, &ucast, 1); 1851 if (rc == 0) { 1852 /* Enable accept anyvlan */ 1853 qede_config_accept_any_vlan(qdev, true); 1854 } 1855 } else { 1856 rc = qede_ucast_filter(eth_dev, &ucast, 1); 1857 if (rc == 0) 1858 rc = ecore_filter_ucast_cmd(edev, &ucast, 1859 ECORE_SPQ_MODE_CB, NULL); 1860 } 1861 1862 if (rc != ECORE_SUCCESS) 1863 return rc; 1864 1865 qdev->vxlan_filter_type = filter_type; 1866 1867 DP_INFO(edev, "Enabling VXLAN tunneling\n"); 1868 qede_set_cmn_tunn_param(¶ms, clss, 1869 (1 << ECORE_MODE_VXLAN_TUNN), 1870 (1 << ECORE_MODE_VXLAN_TUNN)); 1871 for_each_hwfn(edev, i) { 1872 p_hwfn = &edev->hwfns[i]; 1873 rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, 1874 ¶ms, ECORE_SPQ_MODE_CB, NULL); 1875 if (rc != ECORE_SUCCESS) { 1876 DP_ERR(edev, "Failed to update tunn_clss %u\n", 1877 params.tunn_clss_vxlan); 1878 } 1879 } 1880 qdev->num_tunn_filters++; /* Filter added successfully */ 1881 break; 1882 case RTE_ETH_FILTER_DELETE: 1883 ucast.opcode = ECORE_FILTER_REMOVE; 1884 1885 if (!(filter_type & ETH_TUNNEL_FILTER_TENID)) { 1886 rc = qede_mac_int_ops(eth_dev, &ucast, 0); 1887 } else { 1888 rc = qede_ucast_filter(eth_dev, &ucast, 0); 1889 if (rc == 0) 1890 rc = ecore_filter_ucast_cmd(edev, &ucast, 1891 ECORE_SPQ_MODE_CB, NULL); 1892 } 1893 if (rc != ECORE_SUCCESS) 1894 return rc; 1895 1896 qdev->vxlan_filter_type = filter_type; 1897 qdev->num_tunn_filters--; 1898 1899 /* Disable VXLAN if VXLAN filters become 0 */ 1900 if (qdev->num_tunn_filters == 0) { 1901 DP_INFO(edev, "Disabling VXLAN tunneling\n"); 1902 1903 /* Use 0 as tunnel mode */ 1904 qede_set_cmn_tunn_param(¶ms, clss, 0, 1905 (1 << ECORE_MODE_VXLAN_TUNN)); 1906 for_each_hwfn(edev, i) { 1907 p_hwfn = &edev->hwfns[i]; 1908 rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, 1909 ¶ms, ECORE_SPQ_MODE_CB, NULL); 1910 if (rc != ECORE_SUCCESS) { 1911 DP_ERR(edev, 1912 "Failed to update tunn_clss %u\n", 1913 params.tunn_clss_vxlan); 1914 break; 1915 } 1916 } 1917 } 1918 break; 1919 default: 1920 DP_ERR(edev, "Unsupported operation %d\n", filter_op); 1921 return -EINVAL; 1922 } 1923 DP_INFO(edev, "Current VXLAN filters %d\n", qdev->num_tunn_filters); 1924 1925 return 0; 1926 } 1927 1928 int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev, 1929 enum rte_filter_type filter_type, 1930 enum rte_filter_op filter_op, 1931 void *arg) 1932 { 1933 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1934 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1935 struct rte_eth_tunnel_filter_conf *filter_conf = 1936 (struct rte_eth_tunnel_filter_conf *)arg; 1937 1938 switch (filter_type) { 1939 case RTE_ETH_FILTER_TUNNEL: 1940 switch (filter_conf->tunnel_type) { 1941 case RTE_TUNNEL_TYPE_VXLAN: 1942 DP_INFO(edev, 1943 "Packet steering to the specified Rx queue" 1944 " is not supported with VXLAN tunneling"); 1945 return(qede_vxlan_tunn_config(eth_dev, filter_op, 1946 filter_conf)); 1947 /* Place holders for future tunneling support */ 1948 case RTE_TUNNEL_TYPE_GENEVE: 1949 case RTE_TUNNEL_TYPE_TEREDO: 1950 case RTE_TUNNEL_TYPE_NVGRE: 1951 case RTE_TUNNEL_TYPE_IP_IN_GRE: 1952 case RTE_L2_TUNNEL_TYPE_E_TAG: 1953 DP_ERR(edev, "Unsupported tunnel type %d\n", 1954 filter_conf->tunnel_type); 1955 return -EINVAL; 1956 case RTE_TUNNEL_TYPE_NONE: 1957 default: 1958 return 0; 1959 } 1960 break; 1961 case RTE_ETH_FILTER_FDIR: 1962 case RTE_ETH_FILTER_MACVLAN: 1963 case RTE_ETH_FILTER_ETHERTYPE: 1964 case RTE_ETH_FILTER_FLEXIBLE: 1965 case RTE_ETH_FILTER_SYN: 1966 case RTE_ETH_FILTER_NTUPLE: 1967 case RTE_ETH_FILTER_HASH: 1968 case RTE_ETH_FILTER_L2_TUNNEL: 1969 case RTE_ETH_FILTER_MAX: 1970 default: 1971 DP_ERR(edev, "Unsupported filter type %d\n", 1972 filter_type); 1973 return -EINVAL; 1974 } 1975 1976 return 0; 1977 } 1978 1979 static const struct eth_dev_ops qede_eth_dev_ops = { 1980 .dev_configure = qede_dev_configure, 1981 .dev_infos_get = qede_dev_info_get, 1982 .rx_queue_setup = qede_rx_queue_setup, 1983 .rx_queue_release = qede_rx_queue_release, 1984 .tx_queue_setup = qede_tx_queue_setup, 1985 .tx_queue_release = qede_tx_queue_release, 1986 .dev_start = qede_dev_start, 1987 .dev_set_link_up = qede_dev_set_link_up, 1988 .dev_set_link_down = qede_dev_set_link_down, 1989 .link_update = qede_link_update, 1990 .promiscuous_enable = qede_promiscuous_enable, 1991 .promiscuous_disable = qede_promiscuous_disable, 1992 .allmulticast_enable = qede_allmulticast_enable, 1993 .allmulticast_disable = qede_allmulticast_disable, 1994 .dev_stop = qede_dev_stop, 1995 .dev_close = qede_dev_close, 1996 .stats_get = qede_get_stats, 1997 .stats_reset = qede_reset_stats, 1998 .xstats_get = qede_get_xstats, 1999 .xstats_reset = qede_reset_xstats, 2000 .xstats_get_names = qede_get_xstats_names, 2001 .mac_addr_add = qede_mac_addr_add, 2002 .mac_addr_remove = qede_mac_addr_remove, 2003 .mac_addr_set = qede_mac_addr_set, 2004 .vlan_offload_set = qede_vlan_offload_set, 2005 .vlan_filter_set = qede_vlan_filter_set, 2006 .flow_ctrl_set = qede_flow_ctrl_set, 2007 .flow_ctrl_get = qede_flow_ctrl_get, 2008 .dev_supported_ptypes_get = qede_dev_supported_ptypes_get, 2009 .rss_hash_update = qede_rss_hash_update, 2010 .rss_hash_conf_get = qede_rss_hash_conf_get, 2011 .reta_update = qede_rss_reta_update, 2012 .reta_query = qede_rss_reta_query, 2013 .mtu_set = qede_set_mtu, 2014 .filter_ctrl = qede_dev_filter_ctrl, 2015 .udp_tunnel_port_add = qede_udp_dst_port_add, 2016 .udp_tunnel_port_del = qede_udp_dst_port_del, 2017 }; 2018 2019 static const struct eth_dev_ops qede_eth_vf_dev_ops = { 2020 .dev_configure = qede_dev_configure, 2021 .dev_infos_get = qede_dev_info_get, 2022 .rx_queue_setup = qede_rx_queue_setup, 2023 .rx_queue_release = qede_rx_queue_release, 2024 .tx_queue_setup = qede_tx_queue_setup, 2025 .tx_queue_release = qede_tx_queue_release, 2026 .dev_start = qede_dev_start, 2027 .dev_set_link_up = qede_dev_set_link_up, 2028 .dev_set_link_down = qede_dev_set_link_down, 2029 .link_update = qede_link_update, 2030 .promiscuous_enable = qede_promiscuous_enable, 2031 .promiscuous_disable = qede_promiscuous_disable, 2032 .allmulticast_enable = qede_allmulticast_enable, 2033 .allmulticast_disable = qede_allmulticast_disable, 2034 .dev_stop = qede_dev_stop, 2035 .dev_close = qede_dev_close, 2036 .stats_get = qede_get_stats, 2037 .stats_reset = qede_reset_stats, 2038 .xstats_get = qede_get_xstats, 2039 .xstats_reset = qede_reset_xstats, 2040 .xstats_get_names = qede_get_xstats_names, 2041 .vlan_offload_set = qede_vlan_offload_set, 2042 .vlan_filter_set = qede_vlan_filter_set, 2043 .dev_supported_ptypes_get = qede_dev_supported_ptypes_get, 2044 .rss_hash_update = qede_rss_hash_update, 2045 .rss_hash_conf_get = qede_rss_hash_conf_get, 2046 .reta_update = qede_rss_reta_update, 2047 .reta_query = qede_rss_reta_query, 2048 .mtu_set = qede_set_mtu, 2049 }; 2050 2051 static void qede_update_pf_params(struct ecore_dev *edev) 2052 { 2053 struct ecore_pf_params pf_params; 2054 2055 memset(&pf_params, 0, sizeof(struct ecore_pf_params)); 2056 pf_params.eth_pf_params.num_cons = QEDE_PF_NUM_CONNS; 2057 qed_ops->common->update_pf_params(edev, &pf_params); 2058 } 2059 2060 static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) 2061 { 2062 struct rte_pci_device *pci_dev; 2063 struct rte_pci_addr pci_addr; 2064 struct qede_dev *adapter; 2065 struct ecore_dev *edev; 2066 struct qed_dev_eth_info dev_info; 2067 struct qed_slowpath_params params; 2068 static bool do_once = true; 2069 uint8_t bulletin_change; 2070 uint8_t vf_mac[ETHER_ADDR_LEN]; 2071 uint8_t is_mac_forced; 2072 bool is_mac_exist; 2073 /* Fix up ecore debug level */ 2074 uint32_t dp_module = ~0 & ~ECORE_MSG_HW; 2075 uint8_t dp_level = ECORE_LEVEL_VERBOSE; 2076 uint32_t max_mac_addrs; 2077 int rc; 2078 2079 /* Extract key data structures */ 2080 adapter = eth_dev->data->dev_private; 2081 edev = &adapter->edev; 2082 pci_dev = RTE_DEV_TO_PCI(eth_dev->device); 2083 pci_addr = pci_dev->addr; 2084 2085 PMD_INIT_FUNC_TRACE(edev); 2086 2087 snprintf(edev->name, NAME_SIZE, PCI_SHORT_PRI_FMT ":dpdk-port-%u", 2088 pci_addr.bus, pci_addr.devid, pci_addr.function, 2089 eth_dev->data->port_id); 2090 2091 eth_dev->rx_pkt_burst = qede_recv_pkts; 2092 eth_dev->tx_pkt_burst = qede_xmit_pkts; 2093 2094 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 2095 DP_NOTICE(edev, false, 2096 "Skipping device init from secondary process\n"); 2097 return 0; 2098 } 2099 2100 rte_eth_copy_pci_info(eth_dev, pci_dev); 2101 2102 /* @DPDK */ 2103 edev->vendor_id = pci_dev->id.vendor_id; 2104 edev->device_id = pci_dev->id.device_id; 2105 2106 qed_ops = qed_get_eth_ops(); 2107 if (!qed_ops) { 2108 DP_ERR(edev, "Failed to get qed_eth_ops_pass\n"); 2109 return -EINVAL; 2110 } 2111 2112 DP_INFO(edev, "Starting qede probe\n"); 2113 2114 rc = qed_ops->common->probe(edev, pci_dev, QED_PROTOCOL_ETH, 2115 dp_module, dp_level, is_vf); 2116 2117 if (rc != 0) { 2118 DP_ERR(edev, "qede probe failed rc %d\n", rc); 2119 return -ENODEV; 2120 } 2121 2122 qede_update_pf_params(edev); 2123 2124 rte_intr_callback_register(&pci_dev->intr_handle, 2125 qede_interrupt_handler, (void *)eth_dev); 2126 2127 if (rte_intr_enable(&pci_dev->intr_handle)) { 2128 DP_ERR(edev, "rte_intr_enable() failed\n"); 2129 return -ENODEV; 2130 } 2131 2132 /* Start the Slowpath-process */ 2133 memset(¶ms, 0, sizeof(struct qed_slowpath_params)); 2134 params.int_mode = ECORE_INT_MODE_MSIX; 2135 params.drv_major = QEDE_PMD_VERSION_MAJOR; 2136 params.drv_minor = QEDE_PMD_VERSION_MINOR; 2137 params.drv_rev = QEDE_PMD_VERSION_REVISION; 2138 params.drv_eng = QEDE_PMD_VERSION_PATCH; 2139 strncpy((char *)params.name, QEDE_PMD_VER_PREFIX, 2140 QEDE_PMD_DRV_VER_STR_SIZE); 2141 2142 /* For CMT mode device do periodic polling for slowpath events. 2143 * This is required since uio device uses only one MSI-x 2144 * interrupt vector but we need one for each engine. 2145 */ 2146 if (edev->num_hwfns > 1 && IS_PF(edev)) { 2147 rc = rte_eal_alarm_set(timer_period * US_PER_S, 2148 qede_poll_sp_sb_cb, 2149 (void *)eth_dev); 2150 if (rc != 0) { 2151 DP_ERR(edev, "Unable to start periodic" 2152 " timer rc %d\n", rc); 2153 return -EINVAL; 2154 } 2155 } 2156 2157 rc = qed_ops->common->slowpath_start(edev, ¶ms); 2158 if (rc) { 2159 DP_ERR(edev, "Cannot start slowpath rc = %d\n", rc); 2160 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, 2161 (void *)eth_dev); 2162 return -ENODEV; 2163 } 2164 2165 rc = qed_ops->fill_dev_info(edev, &dev_info); 2166 if (rc) { 2167 DP_ERR(edev, "Cannot get device_info rc %d\n", rc); 2168 qed_ops->common->slowpath_stop(edev); 2169 qed_ops->common->remove(edev); 2170 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, 2171 (void *)eth_dev); 2172 return -ENODEV; 2173 } 2174 2175 qede_alloc_etherdev(adapter, &dev_info); 2176 2177 adapter->ops->common->set_id(edev, edev->name, QEDE_PMD_VERSION); 2178 2179 if (!is_vf) 2180 adapter->dev_info.num_mac_addrs = 2181 (uint32_t)RESC_NUM(ECORE_LEADING_HWFN(edev), 2182 ECORE_MAC); 2183 else 2184 ecore_vf_get_num_mac_filters(ECORE_LEADING_HWFN(edev), 2185 &adapter->dev_info.num_mac_addrs); 2186 2187 /* Allocate memory for storing MAC addr */ 2188 eth_dev->data->mac_addrs = rte_zmalloc(edev->name, 2189 (ETHER_ADDR_LEN * 2190 adapter->dev_info.num_mac_addrs), 2191 RTE_CACHE_LINE_SIZE); 2192 2193 if (eth_dev->data->mac_addrs == NULL) { 2194 DP_ERR(edev, "Failed to allocate MAC address\n"); 2195 qed_ops->common->slowpath_stop(edev); 2196 qed_ops->common->remove(edev); 2197 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, 2198 (void *)eth_dev); 2199 return -ENOMEM; 2200 } 2201 2202 if (!is_vf) { 2203 ether_addr_copy((struct ether_addr *)edev->hwfns[0]. 2204 hw_info.hw_mac_addr, 2205 ð_dev->data->mac_addrs[0]); 2206 ether_addr_copy(ð_dev->data->mac_addrs[0], 2207 &adapter->primary_mac); 2208 } else { 2209 ecore_vf_read_bulletin(ECORE_LEADING_HWFN(edev), 2210 &bulletin_change); 2211 if (bulletin_change) { 2212 is_mac_exist = 2213 ecore_vf_bulletin_get_forced_mac( 2214 ECORE_LEADING_HWFN(edev), 2215 vf_mac, 2216 &is_mac_forced); 2217 if (is_mac_exist && is_mac_forced) { 2218 DP_INFO(edev, "VF macaddr received from PF\n"); 2219 ether_addr_copy((struct ether_addr *)&vf_mac, 2220 ð_dev->data->mac_addrs[0]); 2221 ether_addr_copy(ð_dev->data->mac_addrs[0], 2222 &adapter->primary_mac); 2223 } else { 2224 DP_NOTICE(edev, false, 2225 "No VF macaddr assigned\n"); 2226 } 2227 } 2228 } 2229 2230 eth_dev->dev_ops = (is_vf) ? &qede_eth_vf_dev_ops : &qede_eth_dev_ops; 2231 2232 if (do_once) { 2233 qede_print_adapter_info(adapter); 2234 do_once = false; 2235 } 2236 2237 adapter->state = QEDE_DEV_INIT; 2238 2239 DP_NOTICE(edev, false, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n", 2240 adapter->primary_mac.addr_bytes[0], 2241 adapter->primary_mac.addr_bytes[1], 2242 adapter->primary_mac.addr_bytes[2], 2243 adapter->primary_mac.addr_bytes[3], 2244 adapter->primary_mac.addr_bytes[4], 2245 adapter->primary_mac.addr_bytes[5]); 2246 2247 return rc; 2248 } 2249 2250 static int qedevf_eth_dev_init(struct rte_eth_dev *eth_dev) 2251 { 2252 return qede_common_dev_init(eth_dev, 1); 2253 } 2254 2255 static int qede_eth_dev_init(struct rte_eth_dev *eth_dev) 2256 { 2257 return qede_common_dev_init(eth_dev, 0); 2258 } 2259 2260 static int qede_dev_common_uninit(struct rte_eth_dev *eth_dev) 2261 { 2262 /* only uninitialize in the primary process */ 2263 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 2264 return 0; 2265 2266 /* safe to close dev here */ 2267 qede_dev_close(eth_dev); 2268 2269 eth_dev->dev_ops = NULL; 2270 eth_dev->rx_pkt_burst = NULL; 2271 eth_dev->tx_pkt_burst = NULL; 2272 2273 if (eth_dev->data->mac_addrs) 2274 rte_free(eth_dev->data->mac_addrs); 2275 2276 eth_dev->data->mac_addrs = NULL; 2277 2278 return 0; 2279 } 2280 2281 static int qede_eth_dev_uninit(struct rte_eth_dev *eth_dev) 2282 { 2283 return qede_dev_common_uninit(eth_dev); 2284 } 2285 2286 static int qedevf_eth_dev_uninit(struct rte_eth_dev *eth_dev) 2287 { 2288 return qede_dev_common_uninit(eth_dev); 2289 } 2290 2291 static const struct rte_pci_id pci_id_qedevf_map[] = { 2292 #define QEDEVF_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev) 2293 { 2294 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_VF) 2295 }, 2296 { 2297 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_IOV) 2298 }, 2299 { 2300 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_IOV) 2301 }, 2302 {.vendor_id = 0,} 2303 }; 2304 2305 static const struct rte_pci_id pci_id_qede_map[] = { 2306 #define QEDE_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev) 2307 { 2308 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_57980E) 2309 }, 2310 { 2311 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_57980S) 2312 }, 2313 { 2314 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_40) 2315 }, 2316 { 2317 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_25) 2318 }, 2319 { 2320 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_100) 2321 }, 2322 { 2323 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_50) 2324 }, 2325 { 2326 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_50G) 2327 }, 2328 { 2329 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_10G) 2330 }, 2331 { 2332 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_40G) 2333 }, 2334 { 2335 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_25G) 2336 }, 2337 {.vendor_id = 0,} 2338 }; 2339 2340 static struct eth_driver rte_qedevf_pmd = { 2341 .pci_drv = { 2342 .id_table = pci_id_qedevf_map, 2343 .drv_flags = 2344 RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 2345 .probe = rte_eth_dev_pci_probe, 2346 .remove = rte_eth_dev_pci_remove, 2347 }, 2348 .eth_dev_init = qedevf_eth_dev_init, 2349 .eth_dev_uninit = qedevf_eth_dev_uninit, 2350 .dev_private_size = sizeof(struct qede_dev), 2351 }; 2352 2353 static struct eth_driver rte_qede_pmd = { 2354 .pci_drv = { 2355 .id_table = pci_id_qede_map, 2356 .drv_flags = 2357 RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 2358 .probe = rte_eth_dev_pci_probe, 2359 .remove = rte_eth_dev_pci_remove, 2360 }, 2361 .eth_dev_init = qede_eth_dev_init, 2362 .eth_dev_uninit = qede_eth_dev_uninit, 2363 .dev_private_size = sizeof(struct qede_dev), 2364 }; 2365 2366 RTE_PMD_REGISTER_PCI(net_qede, rte_qede_pmd.pci_drv); 2367 RTE_PMD_REGISTER_PCI_TABLE(net_qede, pci_id_qede_map); 2368 RTE_PMD_REGISTER_KMOD_DEP(net_qede, "* igb_uio | uio_pci_generic | vfio"); 2369 RTE_PMD_REGISTER_PCI(net_qede_vf, rte_qedevf_pmd.pci_drv); 2370 RTE_PMD_REGISTER_PCI_TABLE(net_qede_vf, pci_id_qedevf_map); 2371 RTE_PMD_REGISTER_KMOD_DEP(net_qede_vf, "* igb_uio | vfio"); 2372