1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) 2016 - 2018 Cavium Inc. 3 * All rights reserved. 4 * www.cavium.com 5 */ 6 7 #include "bcm_osal.h" 8 9 #include "ecore.h" 10 #include "ecore_status.h" 11 #include "ecore_hsi_eth.h" 12 #include "ecore_chain.h" 13 #include "ecore_spq.h" 14 #include "ecore_init_fw_funcs.h" 15 #include "ecore_cxt.h" 16 #include "ecore_l2.h" 17 #include "ecore_sp_commands.h" 18 #include "ecore_gtt_reg_addr.h" 19 #include "ecore_iro.h" 20 #include "reg_addr.h" 21 #include "ecore_int.h" 22 #include "ecore_hw.h" 23 #include "ecore_vf.h" 24 #include "ecore_sriov.h" 25 #include "ecore_mcp.h" 26 27 #define ECORE_MAX_SGES_NUM 16 28 #define CRC32_POLY 0x1edc6f41 29 30 struct ecore_l2_info { 31 u32 queues; 32 u32 **pp_qid_usage; 33 34 /* The lock is meant to synchronize access to the qid usage */ 35 osal_mutex_t lock; 36 }; 37 38 enum _ecore_status_t ecore_l2_alloc(struct ecore_hwfn *p_hwfn) 39 { 40 struct ecore_l2_info *p_l2_info; 41 u32 **pp_qids; 42 u32 i; 43 44 if (!ECORE_IS_L2_PERSONALITY(p_hwfn)) 45 return ECORE_SUCCESS; 46 47 p_l2_info = OSAL_VZALLOC(p_hwfn->p_dev, sizeof(*p_l2_info)); 48 if (!p_l2_info) 49 return ECORE_NOMEM; 50 p_hwfn->p_l2_info = p_l2_info; 51 52 if (IS_PF(p_hwfn->p_dev)) { 53 p_l2_info->queues = RESC_NUM(p_hwfn, ECORE_L2_QUEUE); 54 } else { 55 u8 rx = 0, tx = 0; 56 57 ecore_vf_get_num_rxqs(p_hwfn, &rx); 58 ecore_vf_get_num_txqs(p_hwfn, &tx); 59 60 p_l2_info->queues = (u32)OSAL_MAX_T(u8, rx, tx); 61 } 62 63 pp_qids = OSAL_VZALLOC(p_hwfn->p_dev, 64 sizeof(unsigned long *) * 65 p_l2_info->queues); 66 if (pp_qids == OSAL_NULL) 67 return ECORE_NOMEM; 68 p_l2_info->pp_qid_usage = pp_qids; 69 70 for (i = 0; i < p_l2_info->queues; i++) { 71 pp_qids[i] = OSAL_VZALLOC(p_hwfn->p_dev, 72 MAX_QUEUES_PER_QZONE / 8); 73 if (pp_qids[i] == OSAL_NULL) 74 return ECORE_NOMEM; 75 } 76 77 #ifdef CONFIG_ECORE_LOCK_ALLOC 78 if (OSAL_MUTEX_ALLOC(p_hwfn, &p_l2_info->lock)) 79 return ECORE_NOMEM; 80 #endif 81 82 return ECORE_SUCCESS; 83 } 84 85 void ecore_l2_setup(struct ecore_hwfn *p_hwfn) 86 { 87 if (!ECORE_IS_L2_PERSONALITY(p_hwfn)) 88 return; 89 90 OSAL_MUTEX_INIT(&p_hwfn->p_l2_info->lock); 91 } 92 93 void ecore_l2_free(struct ecore_hwfn *p_hwfn) 94 { 95 u32 i; 96 97 if (!ECORE_IS_L2_PERSONALITY(p_hwfn)) 98 return; 99 100 if (p_hwfn->p_l2_info == OSAL_NULL) 101 return; 102 103 if (p_hwfn->p_l2_info->pp_qid_usage == OSAL_NULL) 104 goto out_l2_info; 105 106 /* Free until hit first uninitialized entry */ 107 for (i = 0; i < p_hwfn->p_l2_info->queues; i++) { 108 if (p_hwfn->p_l2_info->pp_qid_usage[i] == OSAL_NULL) 109 break; 110 OSAL_VFREE(p_hwfn->p_dev, 111 p_hwfn->p_l2_info->pp_qid_usage[i]); 112 p_hwfn->p_l2_info->pp_qid_usage[i] = OSAL_NULL; 113 } 114 115 #ifdef CONFIG_ECORE_LOCK_ALLOC 116 /* Lock is last to initialize, if everything else was */ 117 if (i == p_hwfn->p_l2_info->queues) 118 OSAL_MUTEX_DEALLOC(&p_hwfn->p_l2_info->lock); 119 #endif 120 121 OSAL_VFREE(p_hwfn->p_dev, p_hwfn->p_l2_info->pp_qid_usage); 122 p_hwfn->p_l2_info->pp_qid_usage = OSAL_NULL; 123 124 out_l2_info: 125 OSAL_VFREE(p_hwfn->p_dev, p_hwfn->p_l2_info); 126 p_hwfn->p_l2_info = OSAL_NULL; 127 } 128 129 /* TODO - we'll need locking around these... */ 130 static bool ecore_eth_queue_qid_usage_add(struct ecore_hwfn *p_hwfn, 131 struct ecore_queue_cid *p_cid) 132 { 133 struct ecore_l2_info *p_l2_info = p_hwfn->p_l2_info; 134 u16 queue_id = p_cid->rel.queue_id; 135 bool b_rc = true; 136 u8 first; 137 138 OSAL_MUTEX_ACQUIRE(&p_l2_info->lock); 139 140 if (queue_id > p_l2_info->queues) { 141 DP_NOTICE(p_hwfn, true, 142 "Requested to increase usage for qzone %04x out of %08x\n", 143 queue_id, p_l2_info->queues); 144 b_rc = false; 145 goto out; 146 } 147 148 first = (u8)OSAL_FIND_FIRST_ZERO_BIT(p_l2_info->pp_qid_usage[queue_id], 149 MAX_QUEUES_PER_QZONE); 150 if (first >= MAX_QUEUES_PER_QZONE) { 151 b_rc = false; 152 goto out; 153 } 154 155 OSAL_SET_BIT(first, p_l2_info->pp_qid_usage[queue_id]); 156 p_cid->qid_usage_idx = first; 157 158 out: 159 OSAL_MUTEX_RELEASE(&p_l2_info->lock); 160 return b_rc; 161 } 162 163 static void ecore_eth_queue_qid_usage_del(struct ecore_hwfn *p_hwfn, 164 struct ecore_queue_cid *p_cid) 165 { 166 OSAL_MUTEX_ACQUIRE(&p_hwfn->p_l2_info->lock); 167 168 OSAL_CLEAR_BIT(p_cid->qid_usage_idx, 169 p_hwfn->p_l2_info->pp_qid_usage[p_cid->rel.queue_id]); 170 171 OSAL_MUTEX_RELEASE(&p_hwfn->p_l2_info->lock); 172 } 173 174 void ecore_eth_queue_cid_release(struct ecore_hwfn *p_hwfn, 175 struct ecore_queue_cid *p_cid) 176 { 177 bool b_legacy_vf = !!(p_cid->vf_legacy & 178 ECORE_QCID_LEGACY_VF_CID); 179 180 /* VFs' CIDs are 0-based in PF-view, and uninitialized on VF. 181 * For legacy vf-queues, the CID doesn't go through here. 182 */ 183 if (IS_PF(p_hwfn->p_dev) && !b_legacy_vf) 184 _ecore_cxt_release_cid(p_hwfn, p_cid->cid, p_cid->vfid); 185 186 /* VFs maintain the index inside queue-zone on their own */ 187 if (p_cid->vfid == ECORE_QUEUE_CID_PF) 188 ecore_eth_queue_qid_usage_del(p_hwfn, p_cid); 189 190 OSAL_VFREE(p_hwfn->p_dev, p_cid); 191 } 192 193 /* The internal is only meant to be directly called by PFs initializeing CIDs 194 * for their VFs. 195 */ 196 static struct ecore_queue_cid * 197 _ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn, 198 u16 opaque_fid, u32 cid, 199 struct ecore_queue_start_common_params *p_params, 200 bool b_is_rx, 201 struct ecore_queue_cid_vf_params *p_vf_params) 202 { 203 struct ecore_queue_cid *p_cid; 204 enum _ecore_status_t rc; 205 206 p_cid = OSAL_VZALLOC(p_hwfn->p_dev, sizeof(*p_cid)); 207 if (p_cid == OSAL_NULL) 208 return OSAL_NULL; 209 210 p_cid->opaque_fid = opaque_fid; 211 p_cid->cid = cid; 212 p_cid->p_owner = p_hwfn; 213 214 /* Fill in parameters */ 215 p_cid->rel.vport_id = p_params->vport_id; 216 p_cid->rel.queue_id = p_params->queue_id; 217 p_cid->rel.stats_id = p_params->stats_id; 218 p_cid->sb_igu_id = p_params->p_sb->igu_sb_id; 219 p_cid->b_is_rx = b_is_rx; 220 p_cid->sb_idx = p_params->sb_idx; 221 222 /* Fill-in bits related to VFs' queues if information was provided */ 223 if (p_vf_params != OSAL_NULL) { 224 p_cid->vfid = p_vf_params->vfid; 225 p_cid->vf_qid = p_vf_params->vf_qid; 226 p_cid->vf_legacy = p_vf_params->vf_legacy; 227 } else { 228 p_cid->vfid = ECORE_QUEUE_CID_PF; 229 } 230 231 /* Don't try calculating the absolute indices for VFs */ 232 if (IS_VF(p_hwfn->p_dev)) { 233 p_cid->abs = p_cid->rel; 234 235 goto out; 236 } 237 238 /* Calculate the engine-absolute indices of the resources. 239 * This would guarantee they're valid later on. 240 * In some cases [SBs] we already have the right values. 241 */ 242 rc = ecore_fw_vport(p_hwfn, p_cid->rel.vport_id, &p_cid->abs.vport_id); 243 if (rc != ECORE_SUCCESS) 244 goto fail; 245 246 rc = ecore_fw_l2_queue(p_hwfn, p_cid->rel.queue_id, 247 &p_cid->abs.queue_id); 248 if (rc != ECORE_SUCCESS) 249 goto fail; 250 251 /* In case of a PF configuring its VF's queues, the stats-id is already 252 * absolute [since there's a single index that's suitable per-VF]. 253 */ 254 if (p_cid->vfid == ECORE_QUEUE_CID_PF) { 255 rc = ecore_fw_vport(p_hwfn, p_cid->rel.stats_id, 256 &p_cid->abs.stats_id); 257 if (rc != ECORE_SUCCESS) 258 goto fail; 259 } else { 260 p_cid->abs.stats_id = p_cid->rel.stats_id; 261 } 262 263 out: 264 /* VF-images have provided the qid_usage_idx on their own. 265 * Otherwise, we need to allocate a unique one. 266 */ 267 if (!p_vf_params) { 268 if (!ecore_eth_queue_qid_usage_add(p_hwfn, p_cid)) 269 goto fail; 270 } else { 271 p_cid->qid_usage_idx = p_vf_params->qid_usage_idx; 272 } 273 274 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 275 "opaque_fid: %04x CID %08x vport %02x [%02x] qzone %04x.%02x [%04x] stats %02x [%02x] SB %04x PI %02x\n", 276 p_cid->opaque_fid, p_cid->cid, 277 p_cid->rel.vport_id, p_cid->abs.vport_id, 278 p_cid->rel.queue_id, p_cid->qid_usage_idx, 279 p_cid->abs.queue_id, 280 p_cid->rel.stats_id, p_cid->abs.stats_id, 281 p_cid->sb_igu_id, p_cid->sb_idx); 282 283 return p_cid; 284 285 fail: 286 OSAL_VFREE(p_hwfn->p_dev, p_cid); 287 return OSAL_NULL; 288 } 289 290 struct ecore_queue_cid * 291 ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn, u16 opaque_fid, 292 struct ecore_queue_start_common_params *p_params, 293 bool b_is_rx, 294 struct ecore_queue_cid_vf_params *p_vf_params) 295 { 296 struct ecore_queue_cid *p_cid; 297 u8 vfid = ECORE_CXT_PF_CID; 298 bool b_legacy_vf = false; 299 u32 cid = 0; 300 301 /* In case of legacy VFs, The CID can be derived from the additional 302 * VF parameters - the VF assumes queue X uses CID X, so we can simply 303 * use the vf_qid for this purpose as well. 304 */ 305 if (p_vf_params) { 306 vfid = p_vf_params->vfid; 307 308 if (p_vf_params->vf_legacy & 309 ECORE_QCID_LEGACY_VF_CID) { 310 b_legacy_vf = true; 311 cid = p_vf_params->vf_qid; 312 } 313 } 314 315 /* Get a unique firmware CID for this queue, in case it's a PF. 316 * VF's don't need a CID as the queue configuration will be done 317 * by PF. 318 */ 319 if (IS_PF(p_hwfn->p_dev) && !b_legacy_vf) { 320 if (_ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, 321 &cid, vfid) != ECORE_SUCCESS) { 322 DP_NOTICE(p_hwfn, true, "Failed to acquire cid\n"); 323 return OSAL_NULL; 324 } 325 } 326 327 p_cid = _ecore_eth_queue_to_cid(p_hwfn, opaque_fid, cid, 328 p_params, b_is_rx, p_vf_params); 329 if ((p_cid == OSAL_NULL) && IS_PF(p_hwfn->p_dev) && !b_legacy_vf) 330 _ecore_cxt_release_cid(p_hwfn, cid, vfid); 331 332 return p_cid; 333 } 334 335 static struct ecore_queue_cid * 336 ecore_eth_queue_to_cid_pf(struct ecore_hwfn *p_hwfn, u16 opaque_fid, 337 bool b_is_rx, 338 struct ecore_queue_start_common_params *p_params) 339 { 340 return ecore_eth_queue_to_cid(p_hwfn, opaque_fid, p_params, b_is_rx, 341 OSAL_NULL); 342 } 343 344 enum _ecore_status_t 345 ecore_sp_eth_vport_start(struct ecore_hwfn *p_hwfn, 346 struct ecore_sp_vport_start_params *p_params) 347 { 348 struct vport_start_ramrod_data *p_ramrod = OSAL_NULL; 349 struct ecore_spq_entry *p_ent = OSAL_NULL; 350 struct ecore_sp_init_data init_data; 351 struct eth_vport_tpa_param *p_tpa; 352 u16 rx_mode = 0, tx_err = 0; 353 u8 abs_vport_id = 0; 354 enum _ecore_status_t rc = ECORE_NOTIMPL; 355 356 rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); 357 if (rc != ECORE_SUCCESS) 358 return rc; 359 360 /* Get SPQ entry */ 361 OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 362 init_data.cid = ecore_spq_get_cid(p_hwfn); 363 init_data.opaque_fid = p_params->opaque_fid; 364 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; 365 366 rc = ecore_sp_init_request(p_hwfn, &p_ent, 367 ETH_RAMROD_VPORT_START, 368 PROTOCOLID_ETH, &init_data); 369 if (rc != ECORE_SUCCESS) 370 return rc; 371 372 p_ramrod = &p_ent->ramrod.vport_start; 373 p_ramrod->vport_id = abs_vport_id; 374 375 p_ramrod->mtu = OSAL_CPU_TO_LE16(p_params->mtu); 376 p_ramrod->handle_ptp_pkts = p_params->handle_ptp_pkts; 377 p_ramrod->inner_vlan_removal_en = p_params->remove_inner_vlan; 378 p_ramrod->drop_ttl0_en = p_params->drop_ttl0; 379 p_ramrod->untagged = p_params->only_untagged; 380 p_ramrod->zero_placement_offset = p_params->zero_placement_offset; 381 382 SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1); 383 SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1); 384 385 p_ramrod->rx_mode.state = OSAL_CPU_TO_LE16(rx_mode); 386 387 /* Handle requests for strict behavior on transmission errors */ 388 SET_FIELD(tx_err, ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE, 389 p_params->b_err_illegal_vlan_mode ? 390 ETH_TX_ERR_ASSERT_MALICIOUS : 0); 391 SET_FIELD(tx_err, ETH_TX_ERR_VALS_PACKET_TOO_SMALL, 392 p_params->b_err_small_pkt ? 393 ETH_TX_ERR_ASSERT_MALICIOUS : 0); 394 SET_FIELD(tx_err, ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR, 395 p_params->b_err_anti_spoof ? 396 ETH_TX_ERR_ASSERT_MALICIOUS : 0); 397 SET_FIELD(tx_err, ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS, 398 p_params->b_err_illegal_inband_mode ? 399 ETH_TX_ERR_ASSERT_MALICIOUS : 0); 400 SET_FIELD(tx_err, ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG, 401 p_params->b_err_vlan_insert_with_inband ? 402 ETH_TX_ERR_ASSERT_MALICIOUS : 0); 403 SET_FIELD(tx_err, ETH_TX_ERR_VALS_MTU_VIOLATION, 404 p_params->b_err_big_pkt ? 405 ETH_TX_ERR_ASSERT_MALICIOUS : 0); 406 SET_FIELD(tx_err, ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME, 407 p_params->b_err_ctrl_frame ? 408 ETH_TX_ERR_ASSERT_MALICIOUS : 0); 409 p_ramrod->tx_err_behav.values = OSAL_CPU_TO_LE16(tx_err); 410 411 /* TPA related fields */ 412 p_tpa = &p_ramrod->tpa_param; 413 OSAL_MEMSET(p_tpa, 0, sizeof(struct eth_vport_tpa_param)); 414 p_tpa->max_buff_num = p_params->max_buffers_per_cqe; 415 416 switch (p_params->tpa_mode) { 417 case ECORE_TPA_MODE_GRO: 418 p_tpa->tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM; 419 p_tpa->tpa_max_size = (u16)-1; 420 p_tpa->tpa_min_size_to_cont = p_params->mtu / 2; 421 p_tpa->tpa_min_size_to_start = p_params->mtu / 2; 422 p_tpa->tpa_ipv4_en_flg = 1; 423 p_tpa->tpa_ipv6_en_flg = 1; 424 p_tpa->tpa_ipv4_tunn_en_flg = 1; 425 p_tpa->tpa_ipv6_tunn_en_flg = 1; 426 p_tpa->tpa_pkt_split_flg = 1; 427 p_tpa->tpa_gro_consistent_flg = 1; 428 break; 429 default: 430 break; 431 } 432 433 p_ramrod->tx_switching_en = p_params->tx_switching; 434 #ifndef ASIC_ONLY 435 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) 436 p_ramrod->tx_switching_en = 0; 437 #endif 438 439 p_ramrod->ctl_frame_mac_check_en = !!p_params->check_mac; 440 p_ramrod->ctl_frame_ethtype_check_en = !!p_params->check_ethtype; 441 442 /* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */ 443 p_ramrod->sw_fid = ecore_concrete_to_sw_fid(p_params->concrete_fid); 444 445 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 446 } 447 448 enum _ecore_status_t 449 ecore_sp_vport_start(struct ecore_hwfn *p_hwfn, 450 struct ecore_sp_vport_start_params *p_params) 451 { 452 if (IS_VF(p_hwfn->p_dev)) 453 return ecore_vf_pf_vport_start(p_hwfn, p_params->vport_id, 454 p_params->mtu, 455 p_params->remove_inner_vlan, 456 p_params->tpa_mode, 457 p_params->max_buffers_per_cqe, 458 p_params->only_untagged); 459 460 return ecore_sp_eth_vport_start(p_hwfn, p_params); 461 } 462 463 static enum _ecore_status_t 464 ecore_sp_vport_update_rss(struct ecore_hwfn *p_hwfn, 465 struct vport_update_ramrod_data *p_ramrod, 466 struct ecore_rss_params *p_rss) 467 { 468 struct eth_vport_rss_config *p_config; 469 u16 capabilities = 0; 470 int i, table_size; 471 enum _ecore_status_t rc = ECORE_SUCCESS; 472 473 if (!p_rss) { 474 p_ramrod->common.update_rss_flg = 0; 475 return rc; 476 } 477 p_config = &p_ramrod->rss_config; 478 479 OSAL_BUILD_BUG_ON(ECORE_RSS_IND_TABLE_SIZE != 480 ETH_RSS_IND_TABLE_ENTRIES_NUM); 481 482 rc = ecore_fw_rss_eng(p_hwfn, p_rss->rss_eng_id, &p_config->rss_id); 483 if (rc != ECORE_SUCCESS) 484 return rc; 485 486 p_ramrod->common.update_rss_flg = p_rss->update_rss_config; 487 p_config->update_rss_capabilities = p_rss->update_rss_capabilities; 488 p_config->update_rss_ind_table = p_rss->update_rss_ind_table; 489 p_config->update_rss_key = p_rss->update_rss_key; 490 491 p_config->rss_mode = p_rss->rss_enable ? 492 ETH_VPORT_RSS_MODE_REGULAR : ETH_VPORT_RSS_MODE_DISABLED; 493 494 p_config->capabilities = 0; 495 496 SET_FIELD(capabilities, 497 ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY, 498 !!(p_rss->rss_caps & ECORE_RSS_IPV4)); 499 SET_FIELD(capabilities, 500 ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY, 501 !!(p_rss->rss_caps & ECORE_RSS_IPV6)); 502 SET_FIELD(capabilities, 503 ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY, 504 !!(p_rss->rss_caps & ECORE_RSS_IPV4_TCP)); 505 SET_FIELD(capabilities, 506 ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY, 507 !!(p_rss->rss_caps & ECORE_RSS_IPV6_TCP)); 508 SET_FIELD(capabilities, 509 ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY, 510 !!(p_rss->rss_caps & ECORE_RSS_IPV4_UDP)); 511 SET_FIELD(capabilities, 512 ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY, 513 !!(p_rss->rss_caps & ECORE_RSS_IPV6_UDP)); 514 p_config->tbl_size = p_rss->rss_table_size_log; 515 p_config->capabilities = OSAL_CPU_TO_LE16(capabilities); 516 517 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP, 518 "update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n", 519 p_ramrod->common.update_rss_flg, 520 p_config->rss_mode, 521 p_config->update_rss_capabilities, 522 p_config->capabilities, 523 p_config->update_rss_ind_table, p_config->update_rss_key); 524 525 table_size = OSAL_MIN_T(int, ECORE_RSS_IND_TABLE_SIZE, 526 1 << p_config->tbl_size); 527 for (i = 0; i < table_size; i++) { 528 struct ecore_queue_cid *p_queue = p_rss->rss_ind_table[i]; 529 530 if (!p_queue) 531 return ECORE_INVAL; 532 533 p_config->indirection_table[i] = 534 OSAL_CPU_TO_LE16(p_queue->abs.queue_id); 535 } 536 537 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP, 538 "Configured RSS indirection table [%d entries]:\n", 539 table_size); 540 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i += 0x10) { 541 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP, 542 "%04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x\n", 543 OSAL_LE16_TO_CPU(p_config->indirection_table[i]), 544 OSAL_LE16_TO_CPU(p_config->indirection_table[i + 1]), 545 OSAL_LE16_TO_CPU(p_config->indirection_table[i + 2]), 546 OSAL_LE16_TO_CPU(p_config->indirection_table[i + 3]), 547 OSAL_LE16_TO_CPU(p_config->indirection_table[i + 4]), 548 OSAL_LE16_TO_CPU(p_config->indirection_table[i + 5]), 549 OSAL_LE16_TO_CPU(p_config->indirection_table[i + 6]), 550 OSAL_LE16_TO_CPU(p_config->indirection_table[i + 7]), 551 OSAL_LE16_TO_CPU(p_config->indirection_table[i + 8]), 552 OSAL_LE16_TO_CPU(p_config->indirection_table[i + 9]), 553 OSAL_LE16_TO_CPU(p_config->indirection_table[i + 10]), 554 OSAL_LE16_TO_CPU(p_config->indirection_table[i + 11]), 555 OSAL_LE16_TO_CPU(p_config->indirection_table[i + 12]), 556 OSAL_LE16_TO_CPU(p_config->indirection_table[i + 13]), 557 OSAL_LE16_TO_CPU(p_config->indirection_table[i + 14]), 558 OSAL_LE16_TO_CPU(p_config->indirection_table[i + 15])); 559 } 560 561 for (i = 0; i < 10; i++) 562 p_config->rss_key[i] = OSAL_CPU_TO_LE32(p_rss->rss_key[i]); 563 564 return rc; 565 } 566 567 static void 568 ecore_sp_update_accept_mode(struct ecore_hwfn *p_hwfn, 569 struct vport_update_ramrod_data *p_ramrod, 570 struct ecore_filter_accept_flags accept_flags) 571 { 572 p_ramrod->common.update_rx_mode_flg = 573 accept_flags.update_rx_mode_config; 574 p_ramrod->common.update_tx_mode_flg = 575 accept_flags.update_tx_mode_config; 576 577 #ifndef ASIC_ONLY 578 /* On B0 emulation we cannot enable Tx, since this would cause writes 579 * to PVFC HW block which isn't implemented in emulation. 580 */ 581 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) { 582 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 583 "Non-Asic - prevent Tx mode in vport update\n"); 584 p_ramrod->common.update_tx_mode_flg = 0; 585 } 586 #endif 587 588 /* Set Rx mode accept flags */ 589 if (p_ramrod->common.update_rx_mode_flg) { 590 u8 accept_filter = accept_flags.rx_accept_filter; 591 u16 state = 0; 592 593 SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 594 !(!!(accept_filter & ECORE_ACCEPT_UCAST_MATCHED) || 595 !!(accept_filter & ECORE_ACCEPT_UCAST_UNMATCHED))); 596 597 SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED, 598 !!(accept_filter & ECORE_ACCEPT_UCAST_UNMATCHED)); 599 600 SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 601 !(!!(accept_filter & ECORE_ACCEPT_MCAST_MATCHED) || 602 !!(accept_filter & ECORE_ACCEPT_MCAST_UNMATCHED))); 603 604 SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL, 605 (!!(accept_filter & ECORE_ACCEPT_MCAST_MATCHED) && 606 !!(accept_filter & ECORE_ACCEPT_MCAST_UNMATCHED))); 607 608 SET_FIELD(state, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL, 609 !!(accept_filter & ECORE_ACCEPT_BCAST)); 610 611 SET_FIELD(state, ETH_VPORT_RX_MODE_ACCEPT_ANY_VNI, 612 !!(accept_filter & ECORE_ACCEPT_ANY_VNI)); 613 614 p_ramrod->rx_mode.state = OSAL_CPU_TO_LE16(state); 615 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 616 "vport[%02x] p_ramrod->rx_mode.state = 0x%x\n", 617 p_ramrod->common.vport_id, state); 618 } 619 620 /* Set Tx mode accept flags */ 621 if (p_ramrod->common.update_tx_mode_flg) { 622 u8 accept_filter = accept_flags.tx_accept_filter; 623 u16 state = 0; 624 625 SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL, 626 !!(accept_filter & ECORE_ACCEPT_NONE)); 627 628 SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL, 629 !!(accept_filter & ECORE_ACCEPT_NONE)); 630 631 SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL, 632 (!!(accept_filter & ECORE_ACCEPT_MCAST_MATCHED) && 633 !!(accept_filter & ECORE_ACCEPT_MCAST_UNMATCHED))); 634 635 SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL, 636 !!(accept_filter & ECORE_ACCEPT_BCAST)); 637 638 p_ramrod->tx_mode.state = OSAL_CPU_TO_LE16(state); 639 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 640 "vport[%02x] p_ramrod->tx_mode.state = 0x%x\n", 641 p_ramrod->common.vport_id, state); 642 } 643 } 644 645 static void 646 ecore_sp_vport_update_sge_tpa(struct vport_update_ramrod_data *p_ramrod, 647 struct ecore_sge_tpa_params *p_params) 648 { 649 struct eth_vport_tpa_param *p_tpa; 650 u16 val; 651 652 if (!p_params) { 653 p_ramrod->common.update_tpa_param_flg = 0; 654 p_ramrod->common.update_tpa_en_flg = 0; 655 p_ramrod->common.update_tpa_param_flg = 0; 656 return; 657 } 658 659 p_ramrod->common.update_tpa_en_flg = p_params->update_tpa_en_flg; 660 p_tpa = &p_ramrod->tpa_param; 661 p_tpa->tpa_ipv4_en_flg = p_params->tpa_ipv4_en_flg; 662 p_tpa->tpa_ipv6_en_flg = p_params->tpa_ipv6_en_flg; 663 p_tpa->tpa_ipv4_tunn_en_flg = p_params->tpa_ipv4_tunn_en_flg; 664 p_tpa->tpa_ipv6_tunn_en_flg = p_params->tpa_ipv6_tunn_en_flg; 665 666 p_ramrod->common.update_tpa_param_flg = p_params->update_tpa_param_flg; 667 p_tpa->max_buff_num = p_params->max_buffers_per_cqe; 668 p_tpa->tpa_pkt_split_flg = p_params->tpa_pkt_split_flg; 669 p_tpa->tpa_hdr_data_split_flg = p_params->tpa_hdr_data_split_flg; 670 p_tpa->tpa_gro_consistent_flg = p_params->tpa_gro_consistent_flg; 671 p_tpa->tpa_max_aggs_num = p_params->tpa_max_aggs_num; 672 val = p_params->tpa_max_size; 673 p_tpa->tpa_max_size = OSAL_CPU_TO_LE16(val); 674 val = p_params->tpa_min_size_to_start; 675 p_tpa->tpa_min_size_to_start = OSAL_CPU_TO_LE16(val); 676 val = p_params->tpa_min_size_to_cont; 677 p_tpa->tpa_min_size_to_cont = OSAL_CPU_TO_LE16(val); 678 } 679 680 static void 681 ecore_sp_update_mcast_bin(struct vport_update_ramrod_data *p_ramrod, 682 struct ecore_sp_vport_update_params *p_params) 683 { 684 int i; 685 686 OSAL_MEMSET(&p_ramrod->approx_mcast.bins, 0, 687 sizeof(p_ramrod->approx_mcast.bins)); 688 689 if (!p_params->update_approx_mcast_flg) 690 return; 691 692 p_ramrod->common.update_approx_mcast_flg = 1; 693 for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) { 694 u32 *p_bins = p_params->bins; 695 696 p_ramrod->approx_mcast.bins[i] = OSAL_CPU_TO_LE32(p_bins[i]); 697 } 698 } 699 700 enum _ecore_status_t 701 ecore_sp_vport_update(struct ecore_hwfn *p_hwfn, 702 struct ecore_sp_vport_update_params *p_params, 703 enum spq_mode comp_mode, 704 struct ecore_spq_comp_cb *p_comp_data) 705 { 706 struct ecore_rss_params *p_rss_params = p_params->rss_params; 707 struct vport_update_ramrod_data_cmn *p_cmn; 708 struct ecore_sp_init_data init_data; 709 struct vport_update_ramrod_data *p_ramrod = OSAL_NULL; 710 struct ecore_spq_entry *p_ent = OSAL_NULL; 711 u8 abs_vport_id = 0, val; 712 enum _ecore_status_t rc = ECORE_NOTIMPL; 713 714 if (IS_VF(p_hwfn->p_dev)) { 715 rc = ecore_vf_pf_vport_update(p_hwfn, p_params); 716 return rc; 717 } 718 719 rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); 720 if (rc != ECORE_SUCCESS) 721 return rc; 722 723 /* Get SPQ entry */ 724 OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 725 init_data.cid = ecore_spq_get_cid(p_hwfn); 726 init_data.opaque_fid = p_params->opaque_fid; 727 init_data.comp_mode = comp_mode; 728 init_data.p_comp_data = p_comp_data; 729 730 rc = ecore_sp_init_request(p_hwfn, &p_ent, 731 ETH_RAMROD_VPORT_UPDATE, 732 PROTOCOLID_ETH, &init_data); 733 if (rc != ECORE_SUCCESS) 734 return rc; 735 736 /* Copy input params to ramrod according to FW struct */ 737 p_ramrod = &p_ent->ramrod.vport_update; 738 p_cmn = &p_ramrod->common; 739 740 p_cmn->vport_id = abs_vport_id; 741 742 p_cmn->rx_active_flg = p_params->vport_active_rx_flg; 743 p_cmn->update_rx_active_flg = p_params->update_vport_active_rx_flg; 744 p_cmn->tx_active_flg = p_params->vport_active_tx_flg; 745 p_cmn->update_tx_active_flg = p_params->update_vport_active_tx_flg; 746 747 p_cmn->accept_any_vlan = p_params->accept_any_vlan; 748 val = p_params->update_accept_any_vlan_flg; 749 p_cmn->update_accept_any_vlan_flg = val; 750 751 p_cmn->inner_vlan_removal_en = p_params->inner_vlan_removal_flg; 752 val = p_params->update_inner_vlan_removal_flg; 753 p_cmn->update_inner_vlan_removal_en_flg = val; 754 755 p_cmn->default_vlan_en = p_params->default_vlan_enable_flg; 756 val = p_params->update_default_vlan_enable_flg; 757 p_cmn->update_default_vlan_en_flg = val; 758 759 p_cmn->default_vlan = OSAL_CPU_TO_LE16(p_params->default_vlan); 760 p_cmn->update_default_vlan_flg = p_params->update_default_vlan_flg; 761 762 p_cmn->silent_vlan_removal_en = p_params->silent_vlan_removal_flg; 763 764 p_ramrod->common.tx_switching_en = p_params->tx_switching_flg; 765 766 #ifndef ASIC_ONLY 767 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) 768 if (p_ramrod->common.tx_switching_en || 769 p_ramrod->common.update_tx_switching_en_flg) { 770 DP_NOTICE(p_hwfn, false, 771 "FPGA - why are we seeing tx-switching? Overriding it\n"); 772 p_ramrod->common.tx_switching_en = 0; 773 p_ramrod->common.update_tx_switching_en_flg = 1; 774 } 775 #endif 776 p_cmn->update_tx_switching_en_flg = p_params->update_tx_switching_flg; 777 778 p_cmn->anti_spoofing_en = p_params->anti_spoofing_en; 779 val = p_params->update_anti_spoofing_en_flg; 780 p_ramrod->common.update_anti_spoofing_en_flg = val; 781 782 rc = ecore_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params); 783 if (rc != ECORE_SUCCESS) { 784 /* Return spq entry which is taken in ecore_sp_init_request()*/ 785 ecore_spq_return_entry(p_hwfn, p_ent); 786 return rc; 787 } 788 789 if (p_params->update_ctl_frame_check) { 790 p_cmn->ctl_frame_mac_check_en = p_params->mac_chk_en; 791 p_cmn->ctl_frame_ethtype_check_en = p_params->ethtype_chk_en; 792 } 793 794 /* Update mcast bins for VFs, PF doesn't use this functionality */ 795 ecore_sp_update_mcast_bin(p_ramrod, p_params); 796 797 ecore_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags); 798 ecore_sp_vport_update_sge_tpa(p_ramrod, p_params->sge_tpa_params); 799 if (p_params->mtu) { 800 p_ramrod->common.update_mtu_flg = 1; 801 p_ramrod->common.mtu = OSAL_CPU_TO_LE16(p_params->mtu); 802 } 803 804 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 805 } 806 807 enum _ecore_status_t ecore_sp_vport_stop(struct ecore_hwfn *p_hwfn, 808 u16 opaque_fid, u8 vport_id) 809 { 810 struct vport_stop_ramrod_data *p_ramrod; 811 struct ecore_sp_init_data init_data; 812 struct ecore_spq_entry *p_ent; 813 u8 abs_vport_id = 0; 814 enum _ecore_status_t rc; 815 816 if (IS_VF(p_hwfn->p_dev)) 817 return ecore_vf_pf_vport_stop(p_hwfn); 818 819 rc = ecore_fw_vport(p_hwfn, vport_id, &abs_vport_id); 820 if (rc != ECORE_SUCCESS) 821 return rc; 822 823 /* Get SPQ entry */ 824 OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 825 init_data.cid = ecore_spq_get_cid(p_hwfn); 826 init_data.opaque_fid = opaque_fid; 827 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; 828 829 rc = ecore_sp_init_request(p_hwfn, &p_ent, 830 ETH_RAMROD_VPORT_STOP, 831 PROTOCOLID_ETH, &init_data); 832 if (rc != ECORE_SUCCESS) 833 return rc; 834 835 p_ramrod = &p_ent->ramrod.vport_stop; 836 p_ramrod->vport_id = abs_vport_id; 837 838 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 839 } 840 841 static enum _ecore_status_t 842 ecore_vf_pf_accept_flags(struct ecore_hwfn *p_hwfn, 843 struct ecore_filter_accept_flags *p_accept_flags) 844 { 845 struct ecore_sp_vport_update_params s_params; 846 847 OSAL_MEMSET(&s_params, 0, sizeof(s_params)); 848 OSAL_MEMCPY(&s_params.accept_flags, p_accept_flags, 849 sizeof(struct ecore_filter_accept_flags)); 850 851 return ecore_vf_pf_vport_update(p_hwfn, &s_params); 852 } 853 854 enum _ecore_status_t 855 ecore_filter_accept_cmd(struct ecore_dev *p_dev, 856 u8 vport, 857 struct ecore_filter_accept_flags accept_flags, 858 u8 update_accept_any_vlan, 859 u8 accept_any_vlan, 860 enum spq_mode comp_mode, 861 struct ecore_spq_comp_cb *p_comp_data) 862 { 863 struct ecore_sp_vport_update_params vport_update_params; 864 int i, rc; 865 866 /* Prepare and send the vport rx_mode change */ 867 OSAL_MEMSET(&vport_update_params, 0, sizeof(vport_update_params)); 868 vport_update_params.vport_id = vport; 869 vport_update_params.accept_flags = accept_flags; 870 vport_update_params.update_accept_any_vlan_flg = update_accept_any_vlan; 871 vport_update_params.accept_any_vlan = accept_any_vlan; 872 873 for_each_hwfn(p_dev, i) { 874 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 875 876 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid; 877 878 if (IS_VF(p_dev)) { 879 rc = ecore_vf_pf_accept_flags(p_hwfn, &accept_flags); 880 if (rc != ECORE_SUCCESS) 881 return rc; 882 continue; 883 } 884 885 rc = ecore_sp_vport_update(p_hwfn, &vport_update_params, 886 comp_mode, p_comp_data); 887 if (rc != ECORE_SUCCESS) { 888 DP_ERR(p_dev, "Update rx_mode failed %d\n", rc); 889 return rc; 890 } 891 892 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 893 "Accept filter configured, flags = [Rx]%x [Tx]%x\n", 894 accept_flags.rx_accept_filter, 895 accept_flags.tx_accept_filter); 896 897 if (update_accept_any_vlan) 898 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 899 "accept_any_vlan=%d configured\n", 900 accept_any_vlan); 901 } 902 903 return 0; 904 } 905 906 enum _ecore_status_t 907 ecore_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn, 908 struct ecore_queue_cid *p_cid, 909 u16 bd_max_bytes, 910 dma_addr_t bd_chain_phys_addr, 911 dma_addr_t cqe_pbl_addr, 912 u16 cqe_pbl_size) 913 { 914 struct rx_queue_start_ramrod_data *p_ramrod = OSAL_NULL; 915 struct ecore_spq_entry *p_ent = OSAL_NULL; 916 struct ecore_sp_init_data init_data; 917 enum _ecore_status_t rc = ECORE_NOTIMPL; 918 919 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 920 "opaque_fid=0x%x, cid=0x%x, rx_qzone=0x%x, vport_id=0x%x, sb_id=0x%x\n", 921 p_cid->opaque_fid, p_cid->cid, p_cid->abs.queue_id, 922 p_cid->abs.vport_id, p_cid->sb_igu_id); 923 924 /* Get SPQ entry */ 925 OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 926 init_data.cid = p_cid->cid; 927 init_data.opaque_fid = p_cid->opaque_fid; 928 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; 929 930 rc = ecore_sp_init_request(p_hwfn, &p_ent, 931 ETH_RAMROD_RX_QUEUE_START, 932 PROTOCOLID_ETH, &init_data); 933 if (rc != ECORE_SUCCESS) 934 return rc; 935 936 p_ramrod = &p_ent->ramrod.rx_queue_start; 937 938 p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_cid->sb_igu_id); 939 p_ramrod->sb_index = p_cid->sb_idx; 940 p_ramrod->vport_id = p_cid->abs.vport_id; 941 p_ramrod->stats_counter_id = p_cid->abs.stats_id; 942 p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id); 943 p_ramrod->complete_cqe_flg = 0; 944 p_ramrod->complete_event_flg = 1; 945 946 p_ramrod->bd_max_bytes = OSAL_CPU_TO_LE16(bd_max_bytes); 947 DMA_REGPAIR_LE(p_ramrod->bd_base, bd_chain_phys_addr); 948 949 p_ramrod->num_of_pbl_pages = OSAL_CPU_TO_LE16(cqe_pbl_size); 950 DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr); 951 952 if (p_cid->vfid != ECORE_QUEUE_CID_PF) { 953 bool b_legacy_vf = !!(p_cid->vf_legacy & 954 ECORE_QCID_LEGACY_VF_RX_PROD); 955 956 p_ramrod->vf_rx_prod_index = p_cid->vf_qid; 957 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 958 "Queue%s is meant for VF rxq[%02x]\n", 959 b_legacy_vf ? " [legacy]" : "", 960 p_cid->vf_qid); 961 p_ramrod->vf_rx_prod_use_zone_a = b_legacy_vf; 962 } 963 964 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 965 } 966 967 static enum _ecore_status_t 968 ecore_eth_pf_rx_queue_start(struct ecore_hwfn *p_hwfn, 969 struct ecore_queue_cid *p_cid, 970 u16 bd_max_bytes, 971 dma_addr_t bd_chain_phys_addr, 972 dma_addr_t cqe_pbl_addr, 973 u16 cqe_pbl_size, 974 void OSAL_IOMEM * *pp_prod) 975 { 976 u32 init_prod_val = 0; 977 978 *pp_prod = (u8 OSAL_IOMEM *) 979 p_hwfn->regview + 980 GTT_BAR0_MAP_REG_MSDM_RAM + 981 MSTORM_ETH_PF_PRODS_OFFSET(p_cid->abs.queue_id); 982 983 /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */ 984 __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32), 985 (u32 *)(&init_prod_val)); 986 987 return ecore_eth_rxq_start_ramrod(p_hwfn, p_cid, 988 bd_max_bytes, 989 bd_chain_phys_addr, 990 cqe_pbl_addr, cqe_pbl_size); 991 } 992 993 enum _ecore_status_t 994 ecore_eth_rx_queue_start(struct ecore_hwfn *p_hwfn, 995 u16 opaque_fid, 996 struct ecore_queue_start_common_params *p_params, 997 u16 bd_max_bytes, 998 dma_addr_t bd_chain_phys_addr, 999 dma_addr_t cqe_pbl_addr, 1000 u16 cqe_pbl_size, 1001 struct ecore_rxq_start_ret_params *p_ret_params) 1002 { 1003 struct ecore_queue_cid *p_cid; 1004 enum _ecore_status_t rc; 1005 1006 /* Allocate a CID for the queue */ 1007 p_cid = ecore_eth_queue_to_cid_pf(p_hwfn, opaque_fid, true, p_params); 1008 if (p_cid == OSAL_NULL) 1009 return ECORE_NOMEM; 1010 1011 if (IS_PF(p_hwfn->p_dev)) 1012 rc = ecore_eth_pf_rx_queue_start(p_hwfn, p_cid, 1013 bd_max_bytes, 1014 bd_chain_phys_addr, 1015 cqe_pbl_addr, cqe_pbl_size, 1016 &p_ret_params->p_prod); 1017 else 1018 rc = ecore_vf_pf_rxq_start(p_hwfn, p_cid, 1019 bd_max_bytes, 1020 bd_chain_phys_addr, 1021 cqe_pbl_addr, 1022 cqe_pbl_size, 1023 &p_ret_params->p_prod); 1024 1025 /* Provide the caller with a reference to as handler */ 1026 if (rc != ECORE_SUCCESS) 1027 ecore_eth_queue_cid_release(p_hwfn, p_cid); 1028 else 1029 p_ret_params->p_handle = (void *)p_cid; 1030 1031 return rc; 1032 } 1033 1034 enum _ecore_status_t 1035 ecore_sp_eth_rx_queues_update(struct ecore_hwfn *p_hwfn, 1036 void **pp_rxq_handles, 1037 u8 num_rxqs, 1038 u8 complete_cqe_flg, 1039 u8 complete_event_flg, 1040 enum spq_mode comp_mode, 1041 struct ecore_spq_comp_cb *p_comp_data) 1042 { 1043 struct rx_queue_update_ramrod_data *p_ramrod = OSAL_NULL; 1044 struct ecore_spq_entry *p_ent = OSAL_NULL; 1045 struct ecore_sp_init_data init_data; 1046 struct ecore_queue_cid *p_cid; 1047 enum _ecore_status_t rc = ECORE_NOTIMPL; 1048 u8 i; 1049 1050 if (IS_VF(p_hwfn->p_dev)) 1051 return ecore_vf_pf_rxqs_update(p_hwfn, 1052 (struct ecore_queue_cid **) 1053 pp_rxq_handles, 1054 num_rxqs, 1055 complete_cqe_flg, 1056 complete_event_flg); 1057 1058 OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 1059 init_data.comp_mode = comp_mode; 1060 init_data.p_comp_data = p_comp_data; 1061 1062 for (i = 0; i < num_rxqs; i++) { 1063 p_cid = ((struct ecore_queue_cid **)pp_rxq_handles)[i]; 1064 1065 /* Get SPQ entry */ 1066 init_data.cid = p_cid->cid; 1067 init_data.opaque_fid = p_cid->opaque_fid; 1068 1069 rc = ecore_sp_init_request(p_hwfn, &p_ent, 1070 ETH_RAMROD_RX_QUEUE_UPDATE, 1071 PROTOCOLID_ETH, &init_data); 1072 if (rc != ECORE_SUCCESS) 1073 return rc; 1074 1075 p_ramrod = &p_ent->ramrod.rx_queue_update; 1076 p_ramrod->vport_id = p_cid->abs.vport_id; 1077 1078 p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id); 1079 p_ramrod->complete_cqe_flg = complete_cqe_flg; 1080 p_ramrod->complete_event_flg = complete_event_flg; 1081 1082 rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 1083 if (rc != ECORE_SUCCESS) 1084 return rc; 1085 } 1086 1087 return rc; 1088 } 1089 1090 static enum _ecore_status_t 1091 ecore_eth_pf_rx_queue_stop(struct ecore_hwfn *p_hwfn, 1092 struct ecore_queue_cid *p_cid, 1093 bool b_eq_completion_only, 1094 bool b_cqe_completion) 1095 { 1096 struct rx_queue_stop_ramrod_data *p_ramrod = OSAL_NULL; 1097 struct ecore_spq_entry *p_ent = OSAL_NULL; 1098 struct ecore_sp_init_data init_data; 1099 enum _ecore_status_t rc; 1100 1101 OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 1102 init_data.cid = p_cid->cid; 1103 init_data.opaque_fid = p_cid->opaque_fid; 1104 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; 1105 1106 rc = ecore_sp_init_request(p_hwfn, &p_ent, 1107 ETH_RAMROD_RX_QUEUE_STOP, 1108 PROTOCOLID_ETH, &init_data); 1109 if (rc != ECORE_SUCCESS) 1110 return rc; 1111 1112 p_ramrod = &p_ent->ramrod.rx_queue_stop; 1113 p_ramrod->vport_id = p_cid->abs.vport_id; 1114 p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id); 1115 1116 /* Cleaning the queue requires the completion to arrive there. 1117 * In addition, VFs require the answer to come as eqe to PF. 1118 */ 1119 p_ramrod->complete_cqe_flg = ((p_cid->vfid == ECORE_QUEUE_CID_PF) && 1120 !b_eq_completion_only) || 1121 b_cqe_completion; 1122 p_ramrod->complete_event_flg = (p_cid->vfid != ECORE_QUEUE_CID_PF) || 1123 b_eq_completion_only; 1124 1125 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 1126 } 1127 1128 enum _ecore_status_t ecore_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn, 1129 void *p_rxq, 1130 bool eq_completion_only, 1131 bool cqe_completion) 1132 { 1133 struct ecore_queue_cid *p_cid = (struct ecore_queue_cid *)p_rxq; 1134 enum _ecore_status_t rc = ECORE_NOTIMPL; 1135 1136 if (IS_PF(p_hwfn->p_dev)) 1137 rc = ecore_eth_pf_rx_queue_stop(p_hwfn, p_cid, 1138 eq_completion_only, 1139 cqe_completion); 1140 else 1141 rc = ecore_vf_pf_rxq_stop(p_hwfn, p_cid, cqe_completion); 1142 1143 if (rc == ECORE_SUCCESS) 1144 ecore_eth_queue_cid_release(p_hwfn, p_cid); 1145 return rc; 1146 } 1147 1148 enum _ecore_status_t 1149 ecore_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn, 1150 struct ecore_queue_cid *p_cid, 1151 dma_addr_t pbl_addr, u16 pbl_size, 1152 u16 pq_id) 1153 { 1154 struct tx_queue_start_ramrod_data *p_ramrod = OSAL_NULL; 1155 struct ecore_spq_entry *p_ent = OSAL_NULL; 1156 struct ecore_sp_init_data init_data; 1157 enum _ecore_status_t rc = ECORE_NOTIMPL; 1158 1159 /* Get SPQ entry */ 1160 OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 1161 init_data.cid = p_cid->cid; 1162 init_data.opaque_fid = p_cid->opaque_fid; 1163 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; 1164 1165 rc = ecore_sp_init_request(p_hwfn, &p_ent, 1166 ETH_RAMROD_TX_QUEUE_START, 1167 PROTOCOLID_ETH, &init_data); 1168 if (rc != ECORE_SUCCESS) 1169 return rc; 1170 1171 p_ramrod = &p_ent->ramrod.tx_queue_start; 1172 p_ramrod->vport_id = p_cid->abs.vport_id; 1173 1174 p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_cid->sb_igu_id); 1175 p_ramrod->sb_index = p_cid->sb_idx; 1176 p_ramrod->stats_counter_id = p_cid->abs.stats_id; 1177 1178 p_ramrod->queue_zone_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id); 1179 p_ramrod->same_as_last_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id); 1180 1181 p_ramrod->pbl_size = OSAL_CPU_TO_LE16(pbl_size); 1182 DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr); 1183 1184 p_ramrod->qm_pq_id = OSAL_CPU_TO_LE16(pq_id); 1185 1186 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 1187 } 1188 1189 static enum _ecore_status_t 1190 ecore_eth_pf_tx_queue_start(struct ecore_hwfn *p_hwfn, 1191 struct ecore_queue_cid *p_cid, 1192 u8 tc, 1193 dma_addr_t pbl_addr, u16 pbl_size, 1194 void OSAL_IOMEM * *pp_doorbell) 1195 { 1196 enum _ecore_status_t rc; 1197 u16 pq_id; 1198 1199 /* TODO - set tc in the pq_params for multi-cos. 1200 * If pacing is enabled then select queue according to 1201 * rate limiter availability otherwise select queue based 1202 * on multi cos. 1203 */ 1204 if (IS_ECORE_PACING(p_hwfn)) 1205 pq_id = ecore_get_cm_pq_idx_rl(p_hwfn, p_cid->rel.queue_id); 1206 else 1207 pq_id = ecore_get_cm_pq_idx_mcos(p_hwfn, tc); 1208 1209 rc = ecore_eth_txq_start_ramrod(p_hwfn, p_cid, pbl_addr, 1210 pbl_size, pq_id); 1211 if (rc != ECORE_SUCCESS) 1212 return rc; 1213 1214 /* Provide the caller with the necessary return values */ 1215 *pp_doorbell = (u8 OSAL_IOMEM *) 1216 p_hwfn->doorbells + 1217 DB_ADDR(p_cid->cid, DQ_DEMS_LEGACY); 1218 1219 return ECORE_SUCCESS; 1220 } 1221 1222 enum _ecore_status_t 1223 ecore_eth_tx_queue_start(struct ecore_hwfn *p_hwfn, u16 opaque_fid, 1224 struct ecore_queue_start_common_params *p_params, 1225 u8 tc, 1226 dma_addr_t pbl_addr, u16 pbl_size, 1227 struct ecore_txq_start_ret_params *p_ret_params) 1228 { 1229 struct ecore_queue_cid *p_cid; 1230 enum _ecore_status_t rc; 1231 1232 p_cid = ecore_eth_queue_to_cid_pf(p_hwfn, opaque_fid, false, p_params); 1233 if (p_cid == OSAL_NULL) 1234 return ECORE_INVAL; 1235 1236 if (IS_PF(p_hwfn->p_dev)) 1237 rc = ecore_eth_pf_tx_queue_start(p_hwfn, p_cid, tc, 1238 pbl_addr, pbl_size, 1239 &p_ret_params->p_doorbell); 1240 else 1241 rc = ecore_vf_pf_txq_start(p_hwfn, p_cid, 1242 pbl_addr, pbl_size, 1243 &p_ret_params->p_doorbell); 1244 1245 if (rc != ECORE_SUCCESS) 1246 ecore_eth_queue_cid_release(p_hwfn, p_cid); 1247 else 1248 p_ret_params->p_handle = (void *)p_cid; 1249 1250 return rc; 1251 } 1252 1253 static enum _ecore_status_t 1254 ecore_eth_pf_tx_queue_stop(struct ecore_hwfn *p_hwfn, 1255 struct ecore_queue_cid *p_cid) 1256 { 1257 struct ecore_spq_entry *p_ent = OSAL_NULL; 1258 struct ecore_sp_init_data init_data; 1259 enum _ecore_status_t rc; 1260 1261 OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 1262 init_data.cid = p_cid->cid; 1263 init_data.opaque_fid = p_cid->opaque_fid; 1264 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; 1265 1266 rc = ecore_sp_init_request(p_hwfn, &p_ent, 1267 ETH_RAMROD_TX_QUEUE_STOP, 1268 PROTOCOLID_ETH, &init_data); 1269 if (rc != ECORE_SUCCESS) 1270 return rc; 1271 1272 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 1273 } 1274 1275 enum _ecore_status_t ecore_eth_tx_queue_stop(struct ecore_hwfn *p_hwfn, 1276 void *p_handle) 1277 { 1278 struct ecore_queue_cid *p_cid = (struct ecore_queue_cid *)p_handle; 1279 enum _ecore_status_t rc; 1280 1281 if (IS_PF(p_hwfn->p_dev)) 1282 rc = ecore_eth_pf_tx_queue_stop(p_hwfn, p_cid); 1283 else 1284 rc = ecore_vf_pf_txq_stop(p_hwfn, p_cid); 1285 1286 if (rc == ECORE_SUCCESS) 1287 ecore_eth_queue_cid_release(p_hwfn, p_cid); 1288 return rc; 1289 } 1290 1291 static enum eth_filter_action 1292 ecore_filter_action(enum ecore_filter_opcode opcode) 1293 { 1294 enum eth_filter_action action = MAX_ETH_FILTER_ACTION; 1295 1296 switch (opcode) { 1297 case ECORE_FILTER_ADD: 1298 action = ETH_FILTER_ACTION_ADD; 1299 break; 1300 case ECORE_FILTER_REMOVE: 1301 action = ETH_FILTER_ACTION_REMOVE; 1302 break; 1303 case ECORE_FILTER_FLUSH: 1304 action = ETH_FILTER_ACTION_REMOVE_ALL; 1305 break; 1306 default: 1307 action = MAX_ETH_FILTER_ACTION; 1308 } 1309 1310 return action; 1311 } 1312 1313 static enum _ecore_status_t 1314 ecore_filter_ucast_common(struct ecore_hwfn *p_hwfn, 1315 u16 opaque_fid, 1316 struct ecore_filter_ucast *p_filter_cmd, 1317 struct vport_filter_update_ramrod_data **pp_ramrod, 1318 struct ecore_spq_entry **pp_ent, 1319 enum spq_mode comp_mode, 1320 struct ecore_spq_comp_cb *p_comp_data) 1321 { 1322 u8 vport_to_add_to = 0, vport_to_remove_from = 0; 1323 struct vport_filter_update_ramrod_data *p_ramrod; 1324 struct eth_filter_cmd *p_first_filter; 1325 struct eth_filter_cmd *p_second_filter; 1326 struct ecore_sp_init_data init_data; 1327 enum eth_filter_action action; 1328 enum _ecore_status_t rc; 1329 1330 rc = ecore_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from, 1331 &vport_to_remove_from); 1332 if (rc != ECORE_SUCCESS) 1333 return rc; 1334 1335 rc = ecore_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to, 1336 &vport_to_add_to); 1337 if (rc != ECORE_SUCCESS) 1338 return rc; 1339 1340 /* Get SPQ entry */ 1341 OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 1342 init_data.cid = ecore_spq_get_cid(p_hwfn); 1343 init_data.opaque_fid = opaque_fid; 1344 init_data.comp_mode = comp_mode; 1345 init_data.p_comp_data = p_comp_data; 1346 1347 rc = ecore_sp_init_request(p_hwfn, pp_ent, 1348 ETH_RAMROD_FILTERS_UPDATE, 1349 PROTOCOLID_ETH, &init_data); 1350 if (rc != ECORE_SUCCESS) 1351 return rc; 1352 1353 *pp_ramrod = &(*pp_ent)->ramrod.vport_filter_update; 1354 p_ramrod = *pp_ramrod; 1355 p_ramrod->filter_cmd_hdr.rx = p_filter_cmd->is_rx_filter ? 1 : 0; 1356 p_ramrod->filter_cmd_hdr.tx = p_filter_cmd->is_tx_filter ? 1 : 0; 1357 1358 #ifndef ASIC_ONLY 1359 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) { 1360 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 1361 "Non-Asic - prevent Tx filters\n"); 1362 p_ramrod->filter_cmd_hdr.tx = 0; 1363 } 1364 #endif 1365 1366 switch (p_filter_cmd->opcode) { 1367 case ECORE_FILTER_REPLACE: 1368 case ECORE_FILTER_MOVE: 1369 p_ramrod->filter_cmd_hdr.cmd_cnt = 2; 1370 break; 1371 default: 1372 p_ramrod->filter_cmd_hdr.cmd_cnt = 1; 1373 break; 1374 } 1375 1376 p_first_filter = &p_ramrod->filter_cmds[0]; 1377 p_second_filter = &p_ramrod->filter_cmds[1]; 1378 1379 switch (p_filter_cmd->type) { 1380 case ECORE_FILTER_MAC: 1381 p_first_filter->type = ETH_FILTER_TYPE_MAC; 1382 break; 1383 case ECORE_FILTER_VLAN: 1384 p_first_filter->type = ETH_FILTER_TYPE_VLAN; 1385 break; 1386 case ECORE_FILTER_MAC_VLAN: 1387 p_first_filter->type = ETH_FILTER_TYPE_PAIR; 1388 break; 1389 case ECORE_FILTER_INNER_MAC: 1390 p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC; 1391 break; 1392 case ECORE_FILTER_INNER_VLAN: 1393 p_first_filter->type = ETH_FILTER_TYPE_INNER_VLAN; 1394 break; 1395 case ECORE_FILTER_INNER_PAIR: 1396 p_first_filter->type = ETH_FILTER_TYPE_INNER_PAIR; 1397 break; 1398 case ECORE_FILTER_INNER_MAC_VNI_PAIR: 1399 p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR; 1400 break; 1401 case ECORE_FILTER_MAC_VNI_PAIR: 1402 p_first_filter->type = ETH_FILTER_TYPE_MAC_VNI_PAIR; 1403 break; 1404 case ECORE_FILTER_VNI: 1405 p_first_filter->type = ETH_FILTER_TYPE_VNI; 1406 break; 1407 case ECORE_FILTER_UNUSED: /* @DPDK */ 1408 p_first_filter->type = MAX_ETH_FILTER_TYPE; 1409 break; 1410 } 1411 1412 if ((p_first_filter->type == ETH_FILTER_TYPE_MAC) || 1413 (p_first_filter->type == ETH_FILTER_TYPE_PAIR) || 1414 (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC) || 1415 (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR) || 1416 (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) || 1417 (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR)) 1418 ecore_set_fw_mac_addr(&p_first_filter->mac_msb, 1419 &p_first_filter->mac_mid, 1420 &p_first_filter->mac_lsb, 1421 (u8 *)p_filter_cmd->mac); 1422 1423 if ((p_first_filter->type == ETH_FILTER_TYPE_VLAN) || 1424 (p_first_filter->type == ETH_FILTER_TYPE_PAIR) || 1425 (p_first_filter->type == ETH_FILTER_TYPE_INNER_VLAN) || 1426 (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR)) 1427 p_first_filter->vlan_id = OSAL_CPU_TO_LE16(p_filter_cmd->vlan); 1428 1429 if ((p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) || 1430 (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR) || 1431 (p_first_filter->type == ETH_FILTER_TYPE_VNI)) 1432 p_first_filter->vni = OSAL_CPU_TO_LE32(p_filter_cmd->vni); 1433 1434 if (p_filter_cmd->opcode == ECORE_FILTER_MOVE) { 1435 p_second_filter->type = p_first_filter->type; 1436 p_second_filter->mac_msb = p_first_filter->mac_msb; 1437 p_second_filter->mac_mid = p_first_filter->mac_mid; 1438 p_second_filter->mac_lsb = p_first_filter->mac_lsb; 1439 p_second_filter->vlan_id = p_first_filter->vlan_id; 1440 p_second_filter->vni = p_first_filter->vni; 1441 1442 p_first_filter->action = ETH_FILTER_ACTION_REMOVE; 1443 1444 p_first_filter->vport_id = vport_to_remove_from; 1445 1446 p_second_filter->action = ETH_FILTER_ACTION_ADD; 1447 p_second_filter->vport_id = vport_to_add_to; 1448 } else if (p_filter_cmd->opcode == ECORE_FILTER_REPLACE) { 1449 p_first_filter->vport_id = vport_to_add_to; 1450 OSAL_MEMCPY(p_second_filter, p_first_filter, 1451 sizeof(*p_second_filter)); 1452 p_first_filter->action = ETH_FILTER_ACTION_REMOVE_ALL; 1453 p_second_filter->action = ETH_FILTER_ACTION_ADD; 1454 } else { 1455 action = ecore_filter_action(p_filter_cmd->opcode); 1456 1457 if (action == MAX_ETH_FILTER_ACTION) { 1458 DP_NOTICE(p_hwfn, true, 1459 "%d is not supported yet\n", 1460 p_filter_cmd->opcode); 1461 return ECORE_NOTIMPL; 1462 } 1463 1464 p_first_filter->action = action; 1465 p_first_filter->vport_id = 1466 (p_filter_cmd->opcode == ECORE_FILTER_REMOVE) ? 1467 vport_to_remove_from : vport_to_add_to; 1468 } 1469 1470 return ECORE_SUCCESS; 1471 } 1472 1473 enum _ecore_status_t 1474 ecore_sp_eth_filter_ucast(struct ecore_hwfn *p_hwfn, 1475 u16 opaque_fid, 1476 struct ecore_filter_ucast *p_filter_cmd, 1477 enum spq_mode comp_mode, 1478 struct ecore_spq_comp_cb *p_comp_data) 1479 { 1480 struct vport_filter_update_ramrod_data *p_ramrod = OSAL_NULL; 1481 struct ecore_spq_entry *p_ent = OSAL_NULL; 1482 struct eth_filter_cmd_header *p_header; 1483 enum _ecore_status_t rc; 1484 1485 rc = ecore_filter_ucast_common(p_hwfn, opaque_fid, p_filter_cmd, 1486 &p_ramrod, &p_ent, 1487 comp_mode, p_comp_data); 1488 if (rc != ECORE_SUCCESS) { 1489 DP_ERR(p_hwfn, "Uni. filter command failed %d\n", rc); 1490 return rc; 1491 } 1492 p_header = &p_ramrod->filter_cmd_hdr; 1493 p_header->assert_on_error = p_filter_cmd->assert_on_error; 1494 1495 rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 1496 if (rc != ECORE_SUCCESS) { 1497 DP_ERR(p_hwfn, "Unicast filter ADD command failed %d\n", rc); 1498 return rc; 1499 } 1500 1501 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 1502 "Unicast filter configured, opcode = %s, type = %s, cmd_cnt = %d, is_rx_filter = %d, is_tx_filter = %d\n", 1503 (p_filter_cmd->opcode == ECORE_FILTER_ADD) ? "ADD" : 1504 ((p_filter_cmd->opcode == ECORE_FILTER_REMOVE) ? 1505 "REMOVE" : 1506 ((p_filter_cmd->opcode == ECORE_FILTER_MOVE) ? 1507 "MOVE" : "REPLACE")), 1508 (p_filter_cmd->type == ECORE_FILTER_MAC) ? "MAC" : 1509 ((p_filter_cmd->type == ECORE_FILTER_VLAN) ? 1510 "VLAN" : "MAC & VLAN"), 1511 p_ramrod->filter_cmd_hdr.cmd_cnt, 1512 p_filter_cmd->is_rx_filter, p_filter_cmd->is_tx_filter); 1513 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 1514 "vport_to_add_to = %d, vport_to_remove_from = %d, mac = %2x:%2x:%2x:%2x:%2x:%2x, vlan = %d\n", 1515 p_filter_cmd->vport_to_add_to, 1516 p_filter_cmd->vport_to_remove_from, 1517 p_filter_cmd->mac[0], p_filter_cmd->mac[1], 1518 p_filter_cmd->mac[2], p_filter_cmd->mac[3], 1519 p_filter_cmd->mac[4], p_filter_cmd->mac[5], 1520 p_filter_cmd->vlan); 1521 1522 return ECORE_SUCCESS; 1523 } 1524 1525 /******************************************************************************* 1526 * Description: 1527 * Calculates crc 32 on a buffer 1528 * Note: crc32_length MUST be aligned to 8 1529 * Return: 1530 ******************************************************************************/ 1531 static u32 ecore_calc_crc32c(u8 *crc32_packet, u32 crc32_length, u32 crc32_seed) 1532 { 1533 u32 byte = 0, bit = 0, crc32_result = crc32_seed; 1534 u8 msb = 0, current_byte = 0; 1535 1536 if ((crc32_packet == OSAL_NULL) || 1537 (crc32_length == 0) || ((crc32_length % 8) != 0)) { 1538 return crc32_result; 1539 } 1540 1541 for (byte = 0; byte < crc32_length; byte++) { 1542 current_byte = crc32_packet[byte]; 1543 for (bit = 0; bit < 8; bit++) { 1544 msb = (u8)(crc32_result >> 31); 1545 crc32_result = crc32_result << 1; 1546 if (msb != (0x1 & (current_byte >> bit))) { 1547 crc32_result = crc32_result ^ CRC32_POLY; 1548 crc32_result |= 1; 1549 } 1550 } 1551 } 1552 1553 return crc32_result; 1554 } 1555 1556 static u32 ecore_crc32c_le(u32 seed, u8 *mac) 1557 { 1558 u32 packet_buf[2] = { 0 }; 1559 1560 OSAL_MEMCPY((u8 *)(&packet_buf[0]), &mac[0], 6); 1561 return ecore_calc_crc32c((u8 *)packet_buf, 8, seed); 1562 } 1563 1564 u8 ecore_mcast_bin_from_mac(u8 *mac) 1565 { 1566 u32 crc = ecore_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED, mac); 1567 1568 return crc & 0xff; 1569 } 1570 1571 static enum _ecore_status_t 1572 ecore_sp_eth_filter_mcast(struct ecore_hwfn *p_hwfn, 1573 struct ecore_filter_mcast *p_filter_cmd, 1574 enum spq_mode comp_mode, 1575 struct ecore_spq_comp_cb *p_comp_data) 1576 { 1577 struct vport_update_ramrod_data *p_ramrod = OSAL_NULL; 1578 u32 bins[ETH_MULTICAST_MAC_BINS_IN_REGS]; 1579 struct ecore_spq_entry *p_ent = OSAL_NULL; 1580 struct ecore_sp_init_data init_data; 1581 u8 abs_vport_id = 0; 1582 enum _ecore_status_t rc; 1583 int i; 1584 1585 if (p_filter_cmd->opcode == ECORE_FILTER_ADD) 1586 rc = ecore_fw_vport(p_hwfn, 1587 p_filter_cmd->vport_to_add_to, 1588 &abs_vport_id); 1589 else 1590 rc = ecore_fw_vport(p_hwfn, 1591 p_filter_cmd->vport_to_remove_from, 1592 &abs_vport_id); 1593 if (rc != ECORE_SUCCESS) 1594 return rc; 1595 1596 /* Get SPQ entry */ 1597 OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 1598 init_data.cid = ecore_spq_get_cid(p_hwfn); 1599 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 1600 init_data.comp_mode = comp_mode; 1601 init_data.p_comp_data = p_comp_data; 1602 1603 rc = ecore_sp_init_request(p_hwfn, &p_ent, 1604 ETH_RAMROD_VPORT_UPDATE, 1605 PROTOCOLID_ETH, &init_data); 1606 if (rc != ECORE_SUCCESS) { 1607 DP_ERR(p_hwfn, "Multi-cast command failed %d\n", rc); 1608 return rc; 1609 } 1610 1611 p_ramrod = &p_ent->ramrod.vport_update; 1612 p_ramrod->common.update_approx_mcast_flg = 1; 1613 1614 /* explicitly clear out the entire vector */ 1615 OSAL_MEMSET(&p_ramrod->approx_mcast.bins, 1616 0, sizeof(p_ramrod->approx_mcast.bins)); 1617 OSAL_MEMSET(bins, 0, sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS); 1618 /* filter ADD op is explicit set op and it removes 1619 * any existing filters for the vport. 1620 */ 1621 if (p_filter_cmd->opcode == ECORE_FILTER_ADD) { 1622 for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) { 1623 u32 bit; 1624 1625 bit = ecore_mcast_bin_from_mac(p_filter_cmd->mac[i]); 1626 bins[bit / 32] |= 1 << (bit % 32); 1627 } 1628 1629 /* Convert to correct endianity */ 1630 for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) { 1631 struct vport_update_ramrod_mcast *p_ramrod_bins; 1632 1633 p_ramrod_bins = &p_ramrod->approx_mcast; 1634 p_ramrod_bins->bins[i] = OSAL_CPU_TO_LE32(bins[i]); 1635 } 1636 } 1637 1638 p_ramrod->common.vport_id = abs_vport_id; 1639 1640 rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 1641 if (rc != ECORE_SUCCESS) 1642 DP_ERR(p_hwfn, "Multicast filter command failed %d\n", rc); 1643 1644 return rc; 1645 } 1646 1647 enum _ecore_status_t 1648 ecore_filter_mcast_cmd(struct ecore_dev *p_dev, 1649 struct ecore_filter_mcast *p_filter_cmd, 1650 enum spq_mode comp_mode, 1651 struct ecore_spq_comp_cb *p_comp_data) 1652 { 1653 enum _ecore_status_t rc = ECORE_SUCCESS; 1654 int i; 1655 1656 /* only ADD and REMOVE operations are supported for multi-cast */ 1657 if ((p_filter_cmd->opcode != ECORE_FILTER_ADD && 1658 (p_filter_cmd->opcode != ECORE_FILTER_REMOVE)) || 1659 (p_filter_cmd->num_mc_addrs > ECORE_MAX_MC_ADDRS)) { 1660 return ECORE_INVAL; 1661 } 1662 1663 for_each_hwfn(p_dev, i) { 1664 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 1665 1666 if (IS_VF(p_dev)) { 1667 ecore_vf_pf_filter_mcast(p_hwfn, p_filter_cmd); 1668 continue; 1669 } 1670 1671 rc = ecore_sp_eth_filter_mcast(p_hwfn, 1672 p_filter_cmd, 1673 comp_mode, p_comp_data); 1674 if (rc != ECORE_SUCCESS) 1675 break; 1676 } 1677 1678 return rc; 1679 } 1680 1681 enum _ecore_status_t 1682 ecore_filter_ucast_cmd(struct ecore_dev *p_dev, 1683 struct ecore_filter_ucast *p_filter_cmd, 1684 enum spq_mode comp_mode, 1685 struct ecore_spq_comp_cb *p_comp_data) 1686 { 1687 enum _ecore_status_t rc = ECORE_SUCCESS; 1688 int i; 1689 1690 for_each_hwfn(p_dev, i) { 1691 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 1692 u16 opaque_fid; 1693 1694 if (IS_VF(p_dev)) { 1695 rc = ecore_vf_pf_filter_ucast(p_hwfn, p_filter_cmd); 1696 continue; 1697 } 1698 1699 opaque_fid = p_hwfn->hw_info.opaque_fid; 1700 rc = ecore_sp_eth_filter_ucast(p_hwfn, 1701 opaque_fid, 1702 p_filter_cmd, 1703 comp_mode, p_comp_data); 1704 if (rc != ECORE_SUCCESS) 1705 break; 1706 } 1707 1708 return rc; 1709 } 1710 1711 /* Statistics related code */ 1712 static void __ecore_get_vport_pstats_addrlen(struct ecore_hwfn *p_hwfn, 1713 u32 *p_addr, u32 *p_len, 1714 u16 statistics_bin) 1715 { 1716 if (IS_PF(p_hwfn->p_dev)) { 1717 *p_addr = BAR0_MAP_REG_PSDM_RAM + 1718 PSTORM_QUEUE_STAT_OFFSET(statistics_bin); 1719 *p_len = sizeof(struct eth_pstorm_per_queue_stat); 1720 } else { 1721 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 1722 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp; 1723 1724 *p_addr = p_resp->pfdev_info.stats_info.pstats.address; 1725 *p_len = p_resp->pfdev_info.stats_info.pstats.len; 1726 } 1727 } 1728 1729 static void __ecore_get_vport_pstats(struct ecore_hwfn *p_hwfn, 1730 struct ecore_ptt *p_ptt, 1731 struct ecore_eth_stats *p_stats, 1732 u16 statistics_bin) 1733 { 1734 struct eth_pstorm_per_queue_stat pstats; 1735 u32 pstats_addr = 0, pstats_len = 0; 1736 1737 __ecore_get_vport_pstats_addrlen(p_hwfn, &pstats_addr, &pstats_len, 1738 statistics_bin); 1739 1740 OSAL_MEMSET(&pstats, 0, sizeof(pstats)); 1741 ecore_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, pstats_len); 1742 1743 p_stats->common.tx_ucast_bytes += 1744 HILO_64_REGPAIR(pstats.sent_ucast_bytes); 1745 p_stats->common.tx_mcast_bytes += 1746 HILO_64_REGPAIR(pstats.sent_mcast_bytes); 1747 p_stats->common.tx_bcast_bytes += 1748 HILO_64_REGPAIR(pstats.sent_bcast_bytes); 1749 p_stats->common.tx_ucast_pkts += 1750 HILO_64_REGPAIR(pstats.sent_ucast_pkts); 1751 p_stats->common.tx_mcast_pkts += 1752 HILO_64_REGPAIR(pstats.sent_mcast_pkts); 1753 p_stats->common.tx_bcast_pkts += 1754 HILO_64_REGPAIR(pstats.sent_bcast_pkts); 1755 p_stats->common.tx_err_drop_pkts += 1756 HILO_64_REGPAIR(pstats.error_drop_pkts); 1757 } 1758 1759 static void __ecore_get_vport_tstats(struct ecore_hwfn *p_hwfn, 1760 struct ecore_ptt *p_ptt, 1761 struct ecore_eth_stats *p_stats) 1762 { 1763 struct tstorm_per_port_stat tstats; 1764 u32 tstats_addr, tstats_len; 1765 1766 if (IS_PF(p_hwfn->p_dev)) { 1767 tstats_addr = BAR0_MAP_REG_TSDM_RAM + 1768 TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn)); 1769 tstats_len = sizeof(struct tstorm_per_port_stat); 1770 } else { 1771 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 1772 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp; 1773 1774 tstats_addr = p_resp->pfdev_info.stats_info.tstats.address; 1775 tstats_len = p_resp->pfdev_info.stats_info.tstats.len; 1776 } 1777 1778 OSAL_MEMSET(&tstats, 0, sizeof(tstats)); 1779 ecore_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, tstats_len); 1780 1781 p_stats->common.mftag_filter_discards += 1782 HILO_64_REGPAIR(tstats.mftag_filter_discard); 1783 p_stats->common.mac_filter_discards += 1784 HILO_64_REGPAIR(tstats.eth_mac_filter_discard); 1785 p_stats->common.gft_filter_drop += 1786 HILO_64_REGPAIR(tstats.eth_gft_drop_pkt); 1787 } 1788 1789 static void __ecore_get_vport_ustats_addrlen(struct ecore_hwfn *p_hwfn, 1790 u32 *p_addr, u32 *p_len, 1791 u16 statistics_bin) 1792 { 1793 if (IS_PF(p_hwfn->p_dev)) { 1794 *p_addr = BAR0_MAP_REG_USDM_RAM + 1795 USTORM_QUEUE_STAT_OFFSET(statistics_bin); 1796 *p_len = sizeof(struct eth_ustorm_per_queue_stat); 1797 } else { 1798 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 1799 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp; 1800 1801 *p_addr = p_resp->pfdev_info.stats_info.ustats.address; 1802 *p_len = p_resp->pfdev_info.stats_info.ustats.len; 1803 } 1804 } 1805 1806 static void __ecore_get_vport_ustats(struct ecore_hwfn *p_hwfn, 1807 struct ecore_ptt *p_ptt, 1808 struct ecore_eth_stats *p_stats, 1809 u16 statistics_bin) 1810 { 1811 struct eth_ustorm_per_queue_stat ustats; 1812 u32 ustats_addr = 0, ustats_len = 0; 1813 1814 __ecore_get_vport_ustats_addrlen(p_hwfn, &ustats_addr, &ustats_len, 1815 statistics_bin); 1816 1817 OSAL_MEMSET(&ustats, 0, sizeof(ustats)); 1818 ecore_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, ustats_len); 1819 1820 p_stats->common.rx_ucast_bytes += 1821 HILO_64_REGPAIR(ustats.rcv_ucast_bytes); 1822 p_stats->common.rx_mcast_bytes += 1823 HILO_64_REGPAIR(ustats.rcv_mcast_bytes); 1824 p_stats->common.rx_bcast_bytes += 1825 HILO_64_REGPAIR(ustats.rcv_bcast_bytes); 1826 p_stats->common.rx_ucast_pkts += 1827 HILO_64_REGPAIR(ustats.rcv_ucast_pkts); 1828 p_stats->common.rx_mcast_pkts += 1829 HILO_64_REGPAIR(ustats.rcv_mcast_pkts); 1830 p_stats->common.rx_bcast_pkts += 1831 HILO_64_REGPAIR(ustats.rcv_bcast_pkts); 1832 } 1833 1834 static void __ecore_get_vport_mstats_addrlen(struct ecore_hwfn *p_hwfn, 1835 u32 *p_addr, u32 *p_len, 1836 u16 statistics_bin) 1837 { 1838 if (IS_PF(p_hwfn->p_dev)) { 1839 *p_addr = BAR0_MAP_REG_MSDM_RAM + 1840 MSTORM_QUEUE_STAT_OFFSET(statistics_bin); 1841 *p_len = sizeof(struct eth_mstorm_per_queue_stat); 1842 } else { 1843 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 1844 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp; 1845 1846 *p_addr = p_resp->pfdev_info.stats_info.mstats.address; 1847 *p_len = p_resp->pfdev_info.stats_info.mstats.len; 1848 } 1849 } 1850 1851 static void __ecore_get_vport_mstats(struct ecore_hwfn *p_hwfn, 1852 struct ecore_ptt *p_ptt, 1853 struct ecore_eth_stats *p_stats, 1854 u16 statistics_bin) 1855 { 1856 struct eth_mstorm_per_queue_stat mstats; 1857 u32 mstats_addr = 0, mstats_len = 0; 1858 1859 __ecore_get_vport_mstats_addrlen(p_hwfn, &mstats_addr, &mstats_len, 1860 statistics_bin); 1861 1862 OSAL_MEMSET(&mstats, 0, sizeof(mstats)); 1863 ecore_memcpy_from(p_hwfn, p_ptt, &mstats, mstats_addr, mstats_len); 1864 1865 p_stats->common.no_buff_discards += 1866 HILO_64_REGPAIR(mstats.no_buff_discard); 1867 p_stats->common.packet_too_big_discard += 1868 HILO_64_REGPAIR(mstats.packet_too_big_discard); 1869 p_stats->common.ttl0_discard += 1870 HILO_64_REGPAIR(mstats.ttl0_discard); 1871 p_stats->common.tpa_coalesced_pkts += 1872 HILO_64_REGPAIR(mstats.tpa_coalesced_pkts); 1873 p_stats->common.tpa_coalesced_events += 1874 HILO_64_REGPAIR(mstats.tpa_coalesced_events); 1875 p_stats->common.tpa_aborts_num += 1876 HILO_64_REGPAIR(mstats.tpa_aborts_num); 1877 p_stats->common.tpa_coalesced_bytes += 1878 HILO_64_REGPAIR(mstats.tpa_coalesced_bytes); 1879 } 1880 1881 static void __ecore_get_vport_port_stats(struct ecore_hwfn *p_hwfn, 1882 struct ecore_ptt *p_ptt, 1883 struct ecore_eth_stats *p_stats) 1884 { 1885 struct ecore_eth_stats_common *p_common = &p_stats->common; 1886 struct port_stats port_stats; 1887 int j; 1888 1889 OSAL_MEMSET(&port_stats, 0, sizeof(port_stats)); 1890 1891 ecore_memcpy_from(p_hwfn, p_ptt, &port_stats, 1892 p_hwfn->mcp_info->port_addr + 1893 OFFSETOF(struct public_port, stats), 1894 sizeof(port_stats)); 1895 1896 p_common->rx_64_byte_packets += port_stats.eth.r64; 1897 p_common->rx_65_to_127_byte_packets += port_stats.eth.r127; 1898 p_common->rx_128_to_255_byte_packets += port_stats.eth.r255; 1899 p_common->rx_256_to_511_byte_packets += port_stats.eth.r511; 1900 p_common->rx_512_to_1023_byte_packets += port_stats.eth.r1023; 1901 p_common->rx_1024_to_1518_byte_packets += port_stats.eth.r1518; 1902 p_common->rx_crc_errors += port_stats.eth.rfcs; 1903 p_common->rx_mac_crtl_frames += port_stats.eth.rxcf; 1904 p_common->rx_pause_frames += port_stats.eth.rxpf; 1905 p_common->rx_pfc_frames += port_stats.eth.rxpp; 1906 p_common->rx_align_errors += port_stats.eth.raln; 1907 p_common->rx_carrier_errors += port_stats.eth.rfcr; 1908 p_common->rx_oversize_packets += port_stats.eth.rovr; 1909 p_common->rx_jabbers += port_stats.eth.rjbr; 1910 p_common->rx_undersize_packets += port_stats.eth.rund; 1911 p_common->rx_fragments += port_stats.eth.rfrg; 1912 p_common->tx_64_byte_packets += port_stats.eth.t64; 1913 p_common->tx_65_to_127_byte_packets += port_stats.eth.t127; 1914 p_common->tx_128_to_255_byte_packets += port_stats.eth.t255; 1915 p_common->tx_256_to_511_byte_packets += port_stats.eth.t511; 1916 p_common->tx_512_to_1023_byte_packets += port_stats.eth.t1023; 1917 p_common->tx_1024_to_1518_byte_packets += port_stats.eth.t1518; 1918 p_common->tx_pause_frames += port_stats.eth.txpf; 1919 p_common->tx_pfc_frames += port_stats.eth.txpp; 1920 p_common->rx_mac_bytes += port_stats.eth.rbyte; 1921 p_common->rx_mac_uc_packets += port_stats.eth.rxuca; 1922 p_common->rx_mac_mc_packets += port_stats.eth.rxmca; 1923 p_common->rx_mac_bc_packets += port_stats.eth.rxbca; 1924 p_common->rx_mac_frames_ok += port_stats.eth.rxpok; 1925 p_common->tx_mac_bytes += port_stats.eth.tbyte; 1926 p_common->tx_mac_uc_packets += port_stats.eth.txuca; 1927 p_common->tx_mac_mc_packets += port_stats.eth.txmca; 1928 p_common->tx_mac_bc_packets += port_stats.eth.txbca; 1929 p_common->tx_mac_ctrl_frames += port_stats.eth.txcf; 1930 for (j = 0; j < 8; j++) { 1931 p_common->brb_truncates += port_stats.brb.brb_truncate[j]; 1932 p_common->brb_discards += port_stats.brb.brb_discard[j]; 1933 } 1934 1935 if (ECORE_IS_BB(p_hwfn->p_dev)) { 1936 struct ecore_eth_stats_bb *p_bb = &p_stats->bb; 1937 1938 p_bb->rx_1519_to_1522_byte_packets += 1939 port_stats.eth.u0.bb0.r1522; 1940 p_bb->rx_1519_to_2047_byte_packets += 1941 port_stats.eth.u0.bb0.r2047; 1942 p_bb->rx_2048_to_4095_byte_packets += 1943 port_stats.eth.u0.bb0.r4095; 1944 p_bb->rx_4096_to_9216_byte_packets += 1945 port_stats.eth.u0.bb0.r9216; 1946 p_bb->rx_9217_to_16383_byte_packets += 1947 port_stats.eth.u0.bb0.r16383; 1948 p_bb->tx_1519_to_2047_byte_packets += 1949 port_stats.eth.u1.bb1.t2047; 1950 p_bb->tx_2048_to_4095_byte_packets += 1951 port_stats.eth.u1.bb1.t4095; 1952 p_bb->tx_4096_to_9216_byte_packets += 1953 port_stats.eth.u1.bb1.t9216; 1954 p_bb->tx_9217_to_16383_byte_packets += 1955 port_stats.eth.u1.bb1.t16383; 1956 p_bb->tx_lpi_entry_count += port_stats.eth.u2.bb2.tlpiec; 1957 p_bb->tx_total_collisions += port_stats.eth.u2.bb2.tncl; 1958 } else { 1959 struct ecore_eth_stats_ah *p_ah = &p_stats->ah; 1960 1961 p_ah->rx_1519_to_max_byte_packets += 1962 port_stats.eth.u0.ah0.r1519_to_max; 1963 p_ah->tx_1519_to_max_byte_packets = 1964 port_stats.eth.u1.ah1.t1519_to_max; 1965 } 1966 1967 p_common->link_change_count = ecore_rd(p_hwfn, p_ptt, 1968 p_hwfn->mcp_info->port_addr + 1969 OFFSETOF(struct public_port, 1970 link_change_count)); 1971 } 1972 1973 void __ecore_get_vport_stats(struct ecore_hwfn *p_hwfn, 1974 struct ecore_ptt *p_ptt, 1975 struct ecore_eth_stats *stats, 1976 u16 statistics_bin, bool b_get_port_stats) 1977 { 1978 __ecore_get_vport_mstats(p_hwfn, p_ptt, stats, statistics_bin); 1979 __ecore_get_vport_ustats(p_hwfn, p_ptt, stats, statistics_bin); 1980 __ecore_get_vport_tstats(p_hwfn, p_ptt, stats); 1981 __ecore_get_vport_pstats(p_hwfn, p_ptt, stats, statistics_bin); 1982 1983 #ifndef ASIC_ONLY 1984 /* Avoid getting PORT stats for emulation. */ 1985 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) 1986 return; 1987 #endif 1988 1989 if (b_get_port_stats && p_hwfn->mcp_info) 1990 __ecore_get_vport_port_stats(p_hwfn, p_ptt, stats); 1991 } 1992 1993 static void _ecore_get_vport_stats(struct ecore_dev *p_dev, 1994 struct ecore_eth_stats *stats) 1995 { 1996 u8 fw_vport = 0; 1997 int i; 1998 1999 OSAL_MEMSET(stats, 0, sizeof(*stats)); 2000 2001 for_each_hwfn(p_dev, i) { 2002 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 2003 struct ecore_ptt *p_ptt = IS_PF(p_dev) ? 2004 ecore_ptt_acquire(p_hwfn) : OSAL_NULL; 2005 bool b_get_port_stats; 2006 2007 if (IS_PF(p_dev)) { 2008 /* The main vport index is relative first */ 2009 if (ecore_fw_vport(p_hwfn, 0, &fw_vport)) { 2010 DP_ERR(p_hwfn, "No vport available!\n"); 2011 goto out; 2012 } 2013 } 2014 2015 if (IS_PF(p_dev) && !p_ptt) { 2016 DP_ERR(p_hwfn, "Failed to acquire ptt\n"); 2017 continue; 2018 } 2019 2020 b_get_port_stats = IS_PF(p_dev) && IS_LEAD_HWFN(p_hwfn); 2021 __ecore_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport, 2022 b_get_port_stats); 2023 2024 out: 2025 if (IS_PF(p_dev) && p_ptt) 2026 ecore_ptt_release(p_hwfn, p_ptt); 2027 } 2028 } 2029 2030 void ecore_get_vport_stats(struct ecore_dev *p_dev, 2031 struct ecore_eth_stats *stats) 2032 { 2033 u32 i; 2034 2035 if (!p_dev) { 2036 OSAL_MEMSET(stats, 0, sizeof(*stats)); 2037 return; 2038 } 2039 2040 _ecore_get_vport_stats(p_dev, stats); 2041 2042 if (!p_dev->reset_stats) 2043 return; 2044 2045 /* Reduce the statistics baseline */ 2046 for (i = 0; i < sizeof(struct ecore_eth_stats) / sizeof(u64); i++) 2047 ((u64 *)stats)[i] -= ((u64 *)p_dev->reset_stats)[i]; 2048 } 2049 2050 /* zeroes V-PORT specific portion of stats (Port stats remains untouched) */ 2051 void ecore_reset_vport_stats(struct ecore_dev *p_dev) 2052 { 2053 int i; 2054 2055 for_each_hwfn(p_dev, i) { 2056 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 2057 struct eth_mstorm_per_queue_stat mstats; 2058 struct eth_ustorm_per_queue_stat ustats; 2059 struct eth_pstorm_per_queue_stat pstats; 2060 struct ecore_ptt *p_ptt = IS_PF(p_dev) ? 2061 ecore_ptt_acquire(p_hwfn) : OSAL_NULL; 2062 u32 addr = 0, len = 0; 2063 2064 if (IS_PF(p_dev) && !p_ptt) { 2065 DP_ERR(p_hwfn, "Failed to acquire ptt\n"); 2066 continue; 2067 } 2068 2069 OSAL_MEMSET(&mstats, 0, sizeof(mstats)); 2070 __ecore_get_vport_mstats_addrlen(p_hwfn, &addr, &len, 0); 2071 ecore_memcpy_to(p_hwfn, p_ptt, addr, &mstats, len); 2072 2073 OSAL_MEMSET(&ustats, 0, sizeof(ustats)); 2074 __ecore_get_vport_ustats_addrlen(p_hwfn, &addr, &len, 0); 2075 ecore_memcpy_to(p_hwfn, p_ptt, addr, &ustats, len); 2076 2077 OSAL_MEMSET(&pstats, 0, sizeof(pstats)); 2078 __ecore_get_vport_pstats_addrlen(p_hwfn, &addr, &len, 0); 2079 ecore_memcpy_to(p_hwfn, p_ptt, addr, &pstats, len); 2080 2081 if (IS_PF(p_dev)) 2082 ecore_ptt_release(p_hwfn, p_ptt); 2083 } 2084 2085 /* PORT statistics are not necessarily reset, so we need to 2086 * read and create a baseline for future statistics. 2087 * Link change stat is maintained by MFW, return its value as is. 2088 */ 2089 if (!p_dev->reset_stats) 2090 DP_INFO(p_dev, "Reset stats not allocated\n"); 2091 else { 2092 _ecore_get_vport_stats(p_dev, p_dev->reset_stats); 2093 p_dev->reset_stats->common.link_change_count = 0; 2094 } 2095 } 2096 2097 static enum gft_profile_type 2098 ecore_arfs_mode_to_hsi(enum ecore_filter_config_mode mode) 2099 { 2100 if (mode == ECORE_FILTER_CONFIG_MODE_5_TUPLE) 2101 return GFT_PROFILE_TYPE_4_TUPLE; 2102 2103 if (mode == ECORE_FILTER_CONFIG_MODE_IP_DEST) 2104 return GFT_PROFILE_TYPE_IP_DST_ADDR; 2105 2106 if (mode == ECORE_FILTER_CONFIG_MODE_TUNN_TYPE) 2107 return GFT_PROFILE_TYPE_TUNNEL_TYPE; 2108 2109 if (mode == ECORE_FILTER_CONFIG_MODE_IP_SRC) 2110 return GFT_PROFILE_TYPE_IP_SRC_ADDR; 2111 2112 return GFT_PROFILE_TYPE_L4_DST_PORT; 2113 } 2114 2115 void ecore_arfs_mode_configure(struct ecore_hwfn *p_hwfn, 2116 struct ecore_ptt *p_ptt, 2117 struct ecore_arfs_config_params *p_cfg_params) 2118 { 2119 if (OSAL_GET_BIT(ECORE_MF_DISABLE_ARFS, &p_hwfn->p_dev->mf_bits)) 2120 return; 2121 2122 if (p_cfg_params->mode != ECORE_FILTER_CONFIG_MODE_DISABLE) { 2123 ecore_gft_config(p_hwfn, p_ptt, p_hwfn->rel_pf_id, 2124 p_cfg_params->tcp, 2125 p_cfg_params->udp, 2126 p_cfg_params->ipv4, 2127 p_cfg_params->ipv6, 2128 ecore_arfs_mode_to_hsi(p_cfg_params->mode)); 2129 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 2130 "tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s\n", 2131 p_cfg_params->tcp ? "Enable" : "Disable", 2132 p_cfg_params->udp ? "Enable" : "Disable", 2133 p_cfg_params->ipv4 ? "Enable" : "Disable", 2134 p_cfg_params->ipv6 ? "Enable" : "Disable"); 2135 } else { 2136 ecore_gft_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id); 2137 } 2138 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Configured ARFS mode : %d\n", 2139 (int)p_cfg_params->mode); 2140 } 2141 2142 enum _ecore_status_t 2143 ecore_configure_rfs_ntuple_filter(struct ecore_hwfn *p_hwfn, 2144 struct ecore_spq_comp_cb *p_cb, 2145 struct ecore_ntuple_filter_params *p_params) 2146 { 2147 struct rx_update_gft_filter_data *p_ramrod = OSAL_NULL; 2148 struct ecore_spq_entry *p_ent = OSAL_NULL; 2149 struct ecore_sp_init_data init_data; 2150 u16 abs_rx_q_id = 0; 2151 u8 abs_vport_id = 0; 2152 enum _ecore_status_t rc = ECORE_NOTIMPL; 2153 2154 /* Get SPQ entry */ 2155 OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 2156 init_data.cid = ecore_spq_get_cid(p_hwfn); 2157 2158 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 2159 2160 if (p_cb) { 2161 init_data.comp_mode = ECORE_SPQ_MODE_CB; 2162 init_data.p_comp_data = p_cb; 2163 } else { 2164 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; 2165 } 2166 2167 rc = ecore_sp_init_request(p_hwfn, &p_ent, 2168 ETH_RAMROD_GFT_UPDATE_FILTER, 2169 PROTOCOLID_ETH, &init_data); 2170 if (rc != ECORE_SUCCESS) 2171 return rc; 2172 2173 p_ramrod = &p_ent->ramrod.rx_update_gft; 2174 2175 DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_params->addr); 2176 p_ramrod->pkt_hdr_length = OSAL_CPU_TO_LE16(p_params->length); 2177 2178 if (p_params->b_is_drop) { 2179 p_ramrod->vport_id = OSAL_CPU_TO_LE16(ETH_GFT_TRASHCAN_VPORT); 2180 } else { 2181 rc = ecore_fw_vport(p_hwfn, p_params->vport_id, 2182 &abs_vport_id); 2183 if (rc) 2184 return rc; 2185 2186 if (p_params->qid != ECORE_RFS_NTUPLE_QID_RSS) { 2187 rc = ecore_fw_l2_queue(p_hwfn, p_params->qid, 2188 &abs_rx_q_id); 2189 if (rc) 2190 return rc; 2191 2192 p_ramrod->rx_qid_valid = 1; 2193 p_ramrod->rx_qid = OSAL_CPU_TO_LE16(abs_rx_q_id); 2194 } 2195 2196 p_ramrod->vport_id = OSAL_CPU_TO_LE16((u16)abs_vport_id); 2197 } 2198 2199 p_ramrod->flow_id_valid = 0; 2200 p_ramrod->flow_id = 0; 2201 2202 p_ramrod->filter_action = p_params->b_is_add ? GFT_ADD_FILTER 2203 : GFT_DELETE_FILTER; 2204 2205 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 2206 "V[%0x], Q[%04x] - %s filter from 0x%lx [length %04xb]\n", 2207 abs_vport_id, abs_rx_q_id, 2208 p_params->b_is_add ? "Adding" : "Removing", 2209 (unsigned long)p_params->addr, p_params->length); 2210 2211 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 2212 } 2213 2214 enum _ecore_status_t ecore_get_rxq_coalesce(struct ecore_hwfn *p_hwfn, 2215 struct ecore_ptt *p_ptt, 2216 struct ecore_queue_cid *p_cid, 2217 u16 *p_rx_coal) 2218 { 2219 u32 coalesce, address, is_valid; 2220 struct cau_sb_entry sb_entry; 2221 u8 timer_res; 2222 enum _ecore_status_t rc; 2223 2224 rc = ecore_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY + 2225 p_cid->sb_igu_id * sizeof(u64), 2226 (u64)(osal_uintptr_t)&sb_entry, 2, 2227 OSAL_NULL /* default parameters */); 2228 if (rc != ECORE_SUCCESS) { 2229 DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc); 2230 return rc; 2231 } 2232 2233 timer_res = GET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0); 2234 2235 address = BAR0_MAP_REG_USDM_RAM + 2236 USTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id); 2237 coalesce = ecore_rd(p_hwfn, p_ptt, address); 2238 2239 is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID); 2240 if (!is_valid) 2241 return ECORE_INVAL; 2242 2243 coalesce = GET_FIELD(coalesce, COALESCING_TIMESET_TIMESET); 2244 *p_rx_coal = (u16)(coalesce << timer_res); 2245 2246 return ECORE_SUCCESS; 2247 } 2248 2249 enum _ecore_status_t ecore_get_txq_coalesce(struct ecore_hwfn *p_hwfn, 2250 struct ecore_ptt *p_ptt, 2251 struct ecore_queue_cid *p_cid, 2252 u16 *p_tx_coal) 2253 { 2254 u32 coalesce, address, is_valid; 2255 struct cau_sb_entry sb_entry; 2256 u8 timer_res; 2257 enum _ecore_status_t rc; 2258 2259 rc = ecore_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY + 2260 p_cid->sb_igu_id * sizeof(u64), 2261 (u64)(osal_uintptr_t)&sb_entry, 2, 2262 OSAL_NULL /* default parameters */); 2263 if (rc != ECORE_SUCCESS) { 2264 DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc); 2265 return rc; 2266 } 2267 2268 timer_res = GET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1); 2269 2270 address = BAR0_MAP_REG_XSDM_RAM + 2271 XSTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id); 2272 coalesce = ecore_rd(p_hwfn, p_ptt, address); 2273 2274 is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID); 2275 if (!is_valid) 2276 return ECORE_INVAL; 2277 2278 coalesce = GET_FIELD(coalesce, COALESCING_TIMESET_TIMESET); 2279 *p_tx_coal = (u16)(coalesce << timer_res); 2280 2281 return ECORE_SUCCESS; 2282 } 2283 2284 enum _ecore_status_t 2285 ecore_get_queue_coalesce(struct ecore_hwfn *p_hwfn, u16 *p_coal, 2286 void *handle) 2287 { 2288 struct ecore_queue_cid *p_cid = (struct ecore_queue_cid *)handle; 2289 enum _ecore_status_t rc = ECORE_SUCCESS; 2290 struct ecore_ptt *p_ptt; 2291 2292 if (IS_VF(p_hwfn->p_dev)) { 2293 rc = ecore_vf_pf_get_coalesce(p_hwfn, p_coal, p_cid); 2294 if (rc != ECORE_SUCCESS) 2295 DP_NOTICE(p_hwfn, false, 2296 "Unable to read queue calescing\n"); 2297 2298 return rc; 2299 } 2300 2301 p_ptt = ecore_ptt_acquire(p_hwfn); 2302 if (!p_ptt) 2303 return ECORE_AGAIN; 2304 2305 if (p_cid->b_is_rx) { 2306 rc = ecore_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, p_coal); 2307 if (rc != ECORE_SUCCESS) 2308 goto out; 2309 } else { 2310 rc = ecore_get_txq_coalesce(p_hwfn, p_ptt, p_cid, p_coal); 2311 if (rc != ECORE_SUCCESS) 2312 goto out; 2313 } 2314 2315 out: 2316 ecore_ptt_release(p_hwfn, p_ptt); 2317 2318 return rc; 2319 } 2320 2321 enum _ecore_status_t 2322 ecore_eth_tx_queue_maxrate(struct ecore_hwfn *p_hwfn, 2323 struct ecore_ptt *p_ptt, 2324 struct ecore_queue_cid *p_cid, u32 rate) 2325 { 2326 u16 rl_id; 2327 u8 vport; 2328 2329 vport = (u8)ecore_get_qm_vport_idx_rl(p_hwfn, p_cid->rel.queue_id); 2330 2331 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 2332 "About to rate limit qm vport %d for queue %d with rate %d\n", 2333 vport, p_cid->rel.queue_id, rate); 2334 2335 rl_id = vport; /* The "rl_id" is set as the "vport_id" */ 2336 return ecore_init_global_rl(p_hwfn, p_ptt, rl_id, rate); 2337 } 2338 2339 #define RSS_TSTORM_UPDATE_STATUS_MAX_POLL_COUNT 100 2340 #define RSS_TSTORM_UPDATE_STATUS_POLL_PERIOD_US 1 2341 2342 enum _ecore_status_t 2343 ecore_update_eth_rss_ind_table_entry(struct ecore_hwfn *p_hwfn, 2344 u8 vport_id, 2345 u8 ind_table_index, 2346 u16 ind_table_value) 2347 { 2348 struct eth_tstorm_rss_update_data update_data = { 0 }; 2349 void OSAL_IOMEM *addr = OSAL_NULL; 2350 enum _ecore_status_t rc; 2351 u8 abs_vport_id; 2352 u32 cnt = 0; 2353 2354 OSAL_BUILD_BUG_ON(sizeof(update_data) != sizeof(u64)); 2355 2356 rc = ecore_fw_vport(p_hwfn, vport_id, &abs_vport_id); 2357 if (rc != ECORE_SUCCESS) 2358 return rc; 2359 2360 addr = (u8 *)p_hwfn->regview + GTT_BAR0_MAP_REG_TSDM_RAM + 2361 TSTORM_ETH_RSS_UPDATE_OFFSET(p_hwfn->rel_pf_id); 2362 2363 *(u64 *)(&update_data) = DIRECT_REG_RD64(p_hwfn, addr); 2364 2365 for (cnt = 0; update_data.valid && 2366 cnt < RSS_TSTORM_UPDATE_STATUS_MAX_POLL_COUNT; cnt++) { 2367 OSAL_UDELAY(RSS_TSTORM_UPDATE_STATUS_POLL_PERIOD_US); 2368 *(u64 *)(&update_data) = DIRECT_REG_RD64(p_hwfn, addr); 2369 } 2370 2371 if (update_data.valid) { 2372 DP_NOTICE(p_hwfn, true, 2373 "rss update valid status is not clear! valid=0x%x vport id=%d ind_Table_idx=%d ind_table_value=%d.\n", 2374 update_data.valid, vport_id, ind_table_index, 2375 ind_table_value); 2376 2377 return ECORE_AGAIN; 2378 } 2379 2380 update_data.valid = 1; 2381 update_data.ind_table_index = ind_table_index; 2382 update_data.ind_table_value = ind_table_value; 2383 update_data.vport_id = abs_vport_id; 2384 2385 DIRECT_REG_WR64(p_hwfn, addr, *(u64 *)(&update_data)); 2386 2387 return ECORE_SUCCESS; 2388 } 2389