1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) 2016 - 2018 Cavium Inc. 3 * All rights reserved. 4 * www.cavium.com 5 */ 6 7 #include "bcm_osal.h" 8 #include "reg_addr.h" 9 #include "ecore_gtt_reg_addr.h" 10 #include "ecore.h" 11 #include "ecore_chain.h" 12 #include "ecore_status.h" 13 #include "ecore_hw.h" 14 #include "ecore_rt_defs.h" 15 #include "ecore_init_ops.h" 16 #include "ecore_int.h" 17 #include "ecore_cxt.h" 18 #include "ecore_spq.h" 19 #include "ecore_init_fw_funcs.h" 20 #include "ecore_sp_commands.h" 21 #include "ecore_dev_api.h" 22 #include "ecore_sriov.h" 23 #include "ecore_vf.h" 24 #include "ecore_mcp.h" 25 #include "ecore_hw_defs.h" 26 #include "mcp_public.h" 27 #include "ecore_iro.h" 28 #include "nvm_cfg.h" 29 #include "ecore_dcbx.h" 30 #include "ecore_l2.h" 31 32 /* TODO - there's a bug in DCBx re-configuration flows in MF, as the QM 33 * registers involved are not split and thus configuration is a race where 34 * some of the PFs configuration might be lost. 35 * Eventually, this needs to move into a MFW-covered HW-lock as arbitration 36 * mechanism as this doesn't cover some cases [E.g., PDA or scenarios where 37 * there's more than a single compiled ecore component in system]. 38 */ 39 static osal_spinlock_t qm_lock; 40 static u32 qm_lock_ref_cnt; 41 42 /******************** Doorbell Recovery *******************/ 43 /* The doorbell recovery mechanism consists of a list of entries which represent 44 * doorbelling entities (l2 queues, roce sq/rq/cqs, the slowpath spq, etc). Each 45 * entity needs to register with the mechanism and provide the parameters 46 * describing it's doorbell, including a location where last used doorbell data 47 * can be found. The doorbell execute function will traverse the list and 48 * doorbell all of the registered entries. 49 */ 50 struct ecore_db_recovery_entry { 51 osal_list_entry_t list_entry; 52 void OSAL_IOMEM *db_addr; 53 void *db_data; 54 enum ecore_db_rec_width db_width; 55 enum ecore_db_rec_space db_space; 56 u8 hwfn_idx; 57 }; 58 59 /* display a single doorbell recovery entry */ 60 void ecore_db_recovery_dp_entry(struct ecore_hwfn *p_hwfn, 61 struct ecore_db_recovery_entry *db_entry, 62 const char *action) 63 { 64 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "(%s: db_entry %p, addr %p, data %p, width %s, %s space, hwfn %d)\n", 65 action, db_entry, db_entry->db_addr, db_entry->db_data, 66 db_entry->db_width == DB_REC_WIDTH_32B ? "32b" : "64b", 67 db_entry->db_space == DB_REC_USER ? "user" : "kernel", 68 db_entry->hwfn_idx); 69 } 70 71 /* doorbell address sanity (address within doorbell bar range) */ 72 bool ecore_db_rec_sanity(struct ecore_dev *p_dev, void OSAL_IOMEM *db_addr, 73 void *db_data) 74 { 75 /* make sure doorbell address is within the doorbell bar */ 76 if (db_addr < p_dev->doorbells || (u8 *)db_addr > 77 (u8 *)p_dev->doorbells + p_dev->db_size) { 78 OSAL_WARN(true, 79 "Illegal doorbell address: %p. Legal range for doorbell addresses is [%p..%p]\n", 80 db_addr, p_dev->doorbells, 81 (u8 *)p_dev->doorbells + p_dev->db_size); 82 return false; 83 } 84 85 /* make sure doorbell data pointer is not null */ 86 if (!db_data) { 87 OSAL_WARN(true, "Illegal doorbell data pointer: %p", db_data); 88 return false; 89 } 90 91 return true; 92 } 93 94 /* find hwfn according to the doorbell address */ 95 struct ecore_hwfn *ecore_db_rec_find_hwfn(struct ecore_dev *p_dev, 96 void OSAL_IOMEM *db_addr) 97 { 98 struct ecore_hwfn *p_hwfn; 99 100 /* In CMT doorbell bar is split down the middle between engine 0 and 101 * enigne 1 102 */ 103 if (ECORE_IS_CMT(p_dev)) 104 p_hwfn = db_addr < p_dev->hwfns[1].doorbells ? 105 &p_dev->hwfns[0] : &p_dev->hwfns[1]; 106 else 107 p_hwfn = ECORE_LEADING_HWFN(p_dev); 108 109 return p_hwfn; 110 } 111 112 /* add a new entry to the doorbell recovery mechanism */ 113 enum _ecore_status_t ecore_db_recovery_add(struct ecore_dev *p_dev, 114 void OSAL_IOMEM *db_addr, 115 void *db_data, 116 enum ecore_db_rec_width db_width, 117 enum ecore_db_rec_space db_space) 118 { 119 struct ecore_db_recovery_entry *db_entry; 120 struct ecore_hwfn *p_hwfn; 121 122 /* shortcircuit VFs, for now */ 123 if (IS_VF(p_dev)) { 124 DP_VERBOSE(p_dev, ECORE_MSG_IOV, "db recovery - skipping VF doorbell\n"); 125 return ECORE_SUCCESS; 126 } 127 128 /* sanitize doorbell address */ 129 if (!ecore_db_rec_sanity(p_dev, db_addr, db_data)) 130 return ECORE_INVAL; 131 132 /* obtain hwfn from doorbell address */ 133 p_hwfn = ecore_db_rec_find_hwfn(p_dev, db_addr); 134 135 /* create entry */ 136 db_entry = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*db_entry)); 137 if (!db_entry) { 138 DP_NOTICE(p_dev, false, "Failed to allocate a db recovery entry\n"); 139 return ECORE_NOMEM; 140 } 141 142 /* populate entry */ 143 db_entry->db_addr = db_addr; 144 db_entry->db_data = db_data; 145 db_entry->db_width = db_width; 146 db_entry->db_space = db_space; 147 db_entry->hwfn_idx = p_hwfn->my_id; 148 149 /* display */ 150 ecore_db_recovery_dp_entry(p_hwfn, db_entry, "Adding"); 151 152 /* protect the list */ 153 OSAL_SPIN_LOCK(&p_hwfn->db_recovery_info.lock); 154 OSAL_LIST_PUSH_TAIL(&db_entry->list_entry, 155 &p_hwfn->db_recovery_info.list); 156 OSAL_SPIN_UNLOCK(&p_hwfn->db_recovery_info.lock); 157 158 return ECORE_SUCCESS; 159 } 160 161 /* remove an entry from the doorbell recovery mechanism */ 162 enum _ecore_status_t ecore_db_recovery_del(struct ecore_dev *p_dev, 163 void OSAL_IOMEM *db_addr, 164 void *db_data) 165 { 166 struct ecore_db_recovery_entry *db_entry = OSAL_NULL; 167 enum _ecore_status_t rc = ECORE_INVAL; 168 struct ecore_hwfn *p_hwfn; 169 170 /* shortcircuit VFs, for now */ 171 if (IS_VF(p_dev)) { 172 DP_VERBOSE(p_dev, ECORE_MSG_IOV, "db recovery - skipping VF doorbell\n"); 173 return ECORE_SUCCESS; 174 } 175 176 /* sanitize doorbell address */ 177 if (!ecore_db_rec_sanity(p_dev, db_addr, db_data)) 178 return ECORE_INVAL; 179 180 /* obtain hwfn from doorbell address */ 181 p_hwfn = ecore_db_rec_find_hwfn(p_dev, db_addr); 182 183 /* protect the list */ 184 OSAL_SPIN_LOCK(&p_hwfn->db_recovery_info.lock); 185 OSAL_LIST_FOR_EACH_ENTRY(db_entry, 186 &p_hwfn->db_recovery_info.list, 187 list_entry, 188 struct ecore_db_recovery_entry) { 189 /* search according to db_data addr since db_addr is not unique 190 * (roce) 191 */ 192 if (db_entry->db_data == db_data) { 193 ecore_db_recovery_dp_entry(p_hwfn, db_entry, 194 "Deleting"); 195 OSAL_LIST_REMOVE_ENTRY(&db_entry->list_entry, 196 &p_hwfn->db_recovery_info.list); 197 rc = ECORE_SUCCESS; 198 break; 199 } 200 } 201 202 OSAL_SPIN_UNLOCK(&p_hwfn->db_recovery_info.lock); 203 204 if (rc == ECORE_INVAL) 205 /*OSAL_WARN(true,*/ 206 DP_NOTICE(p_hwfn, false, 207 "Failed to find element in list. Key (db_data addr) was %p. db_addr was %p\n", 208 db_data, db_addr); 209 else 210 OSAL_FREE(p_dev, db_entry); 211 212 return rc; 213 } 214 215 /* initialize the doorbell recovery mechanism */ 216 enum _ecore_status_t ecore_db_recovery_setup(struct ecore_hwfn *p_hwfn) 217 { 218 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "Setting up db recovery\n"); 219 220 /* make sure db_size was set in p_dev */ 221 if (!p_hwfn->p_dev->db_size) { 222 DP_ERR(p_hwfn->p_dev, "db_size not set\n"); 223 return ECORE_INVAL; 224 } 225 226 OSAL_LIST_INIT(&p_hwfn->db_recovery_info.list); 227 #ifdef CONFIG_ECORE_LOCK_ALLOC 228 if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_hwfn->db_recovery_info.lock)) 229 return ECORE_NOMEM; 230 #endif 231 OSAL_SPIN_LOCK_INIT(&p_hwfn->db_recovery_info.lock); 232 p_hwfn->db_recovery_info.db_recovery_counter = 0; 233 234 return ECORE_SUCCESS; 235 } 236 237 /* destroy the doorbell recovery mechanism */ 238 void ecore_db_recovery_teardown(struct ecore_hwfn *p_hwfn) 239 { 240 struct ecore_db_recovery_entry *db_entry = OSAL_NULL; 241 242 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "Tearing down db recovery\n"); 243 if (!OSAL_LIST_IS_EMPTY(&p_hwfn->db_recovery_info.list)) { 244 DP_VERBOSE(p_hwfn, false, "Doorbell Recovery teardown found the doorbell recovery list was not empty (Expected in disorderly driver unload (e.g. recovery) otherwise this probably means some flow forgot to db_recovery_del). Prepare to purge doorbell recovery list...\n"); 245 while (!OSAL_LIST_IS_EMPTY(&p_hwfn->db_recovery_info.list)) { 246 db_entry = OSAL_LIST_FIRST_ENTRY( 247 &p_hwfn->db_recovery_info.list, 248 struct ecore_db_recovery_entry, 249 list_entry); 250 ecore_db_recovery_dp_entry(p_hwfn, db_entry, "Purging"); 251 OSAL_LIST_REMOVE_ENTRY(&db_entry->list_entry, 252 &p_hwfn->db_recovery_info.list); 253 OSAL_FREE(p_hwfn->p_dev, db_entry); 254 } 255 } 256 #ifdef CONFIG_ECORE_LOCK_ALLOC 257 OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->db_recovery_info.lock); 258 #endif 259 p_hwfn->db_recovery_info.db_recovery_counter = 0; 260 } 261 262 /* print the content of the doorbell recovery mechanism */ 263 void ecore_db_recovery_dp(struct ecore_hwfn *p_hwfn) 264 { 265 struct ecore_db_recovery_entry *db_entry = OSAL_NULL; 266 267 DP_NOTICE(p_hwfn, false, 268 "Dispalying doorbell recovery database. Counter was %d\n", 269 p_hwfn->db_recovery_info.db_recovery_counter); 270 271 /* protect the list */ 272 OSAL_SPIN_LOCK(&p_hwfn->db_recovery_info.lock); 273 OSAL_LIST_FOR_EACH_ENTRY(db_entry, 274 &p_hwfn->db_recovery_info.list, 275 list_entry, 276 struct ecore_db_recovery_entry) { 277 ecore_db_recovery_dp_entry(p_hwfn, db_entry, "Printing"); 278 } 279 280 OSAL_SPIN_UNLOCK(&p_hwfn->db_recovery_info.lock); 281 } 282 283 /* ring the doorbell of a single doorbell recovery entry */ 284 void ecore_db_recovery_ring(struct ecore_hwfn *p_hwfn, 285 struct ecore_db_recovery_entry *db_entry, 286 enum ecore_db_rec_exec db_exec) 287 { 288 /* Print according to width */ 289 if (db_entry->db_width == DB_REC_WIDTH_32B) 290 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "%s doorbell address %p data %x\n", 291 db_exec == DB_REC_DRY_RUN ? "would have rung" : "ringing", 292 db_entry->db_addr, *(u32 *)db_entry->db_data); 293 else 294 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "%s doorbell address %p data %lx\n", 295 db_exec == DB_REC_DRY_RUN ? "would have rung" : "ringing", 296 db_entry->db_addr, 297 *(unsigned long *)(db_entry->db_data)); 298 299 /* Sanity */ 300 if (!ecore_db_rec_sanity(p_hwfn->p_dev, db_entry->db_addr, 301 db_entry->db_data)) 302 return; 303 304 /* Flush the write combined buffer. Since there are multiple doorbelling 305 * entities using the same address, if we don't flush, a transaction 306 * could be lost. 307 */ 308 OSAL_WMB(p_hwfn->p_dev); 309 310 /* Ring the doorbell */ 311 if (db_exec == DB_REC_REAL_DEAL || db_exec == DB_REC_ONCE) { 312 if (db_entry->db_width == DB_REC_WIDTH_32B) 313 DIRECT_REG_WR(p_hwfn, db_entry->db_addr, 314 *(u32 *)(db_entry->db_data)); 315 else 316 DIRECT_REG_WR64(p_hwfn, db_entry->db_addr, 317 *(u64 *)(db_entry->db_data)); 318 } 319 320 /* Flush the write combined buffer. Next doorbell may come from a 321 * different entity to the same address... 322 */ 323 OSAL_WMB(p_hwfn->p_dev); 324 } 325 326 /* traverse the doorbell recovery entry list and ring all the doorbells */ 327 void ecore_db_recovery_execute(struct ecore_hwfn *p_hwfn, 328 enum ecore_db_rec_exec db_exec) 329 { 330 struct ecore_db_recovery_entry *db_entry = OSAL_NULL; 331 332 if (db_exec != DB_REC_ONCE) { 333 DP_NOTICE(p_hwfn, false, "Executing doorbell recovery. Counter was %d\n", 334 p_hwfn->db_recovery_info.db_recovery_counter); 335 336 /* track amount of times recovery was executed */ 337 p_hwfn->db_recovery_info.db_recovery_counter++; 338 } 339 340 /* protect the list */ 341 OSAL_SPIN_LOCK(&p_hwfn->db_recovery_info.lock); 342 OSAL_LIST_FOR_EACH_ENTRY(db_entry, 343 &p_hwfn->db_recovery_info.list, 344 list_entry, 345 struct ecore_db_recovery_entry) { 346 ecore_db_recovery_ring(p_hwfn, db_entry, db_exec); 347 if (db_exec == DB_REC_ONCE) 348 break; 349 } 350 351 OSAL_SPIN_UNLOCK(&p_hwfn->db_recovery_info.lock); 352 } 353 /******************** Doorbell Recovery end ****************/ 354 355 /* Configurable */ 356 #define ECORE_MIN_DPIS (4) /* The minimal num of DPIs required to 357 * load the driver. The number was 358 * arbitrarily set. 359 */ 360 361 /* Derived */ 362 #define ECORE_MIN_PWM_REGION (ECORE_WID_SIZE * ECORE_MIN_DPIS) 363 364 static u32 ecore_hw_bar_size(struct ecore_hwfn *p_hwfn, 365 struct ecore_ptt *p_ptt, 366 enum BAR_ID bar_id) 367 { 368 u32 bar_reg = (bar_id == BAR_ID_0 ? 369 PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE); 370 u32 val; 371 372 if (IS_VF(p_hwfn->p_dev)) 373 return ecore_vf_hw_bar_size(p_hwfn, bar_id); 374 375 val = ecore_rd(p_hwfn, p_ptt, bar_reg); 376 if (val) 377 return 1 << (val + 15); 378 379 /* The above registers were updated in the past only in CMT mode. Since 380 * they were found to be useful MFW started updating them from 8.7.7.0. 381 * In older MFW versions they are set to 0 which means disabled. 382 */ 383 if (ECORE_IS_CMT(p_hwfn->p_dev)) { 384 DP_INFO(p_hwfn, 385 "BAR size not configured. Assuming BAR size of 256kB for GRC and 512kB for DB\n"); 386 val = BAR_ID_0 ? 256 * 1024 : 512 * 1024; 387 } else { 388 DP_INFO(p_hwfn, 389 "BAR size not configured. Assuming BAR size of 512kB for GRC and 512kB for DB\n"); 390 val = 512 * 1024; 391 } 392 393 return val; 394 } 395 396 void ecore_init_dp(struct ecore_dev *p_dev, 397 u32 dp_module, u8 dp_level, void *dp_ctx) 398 { 399 u32 i; 400 401 p_dev->dp_level = dp_level; 402 p_dev->dp_module = dp_module; 403 p_dev->dp_ctx = dp_ctx; 404 for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) { 405 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 406 407 p_hwfn->dp_level = dp_level; 408 p_hwfn->dp_module = dp_module; 409 p_hwfn->dp_ctx = dp_ctx; 410 } 411 } 412 413 enum _ecore_status_t ecore_init_struct(struct ecore_dev *p_dev) 414 { 415 u8 i; 416 417 for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) { 418 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 419 420 p_hwfn->p_dev = p_dev; 421 p_hwfn->my_id = i; 422 p_hwfn->b_active = false; 423 424 #ifdef CONFIG_ECORE_LOCK_ALLOC 425 if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_hwfn->dmae_info.lock)) 426 goto handle_err; 427 #endif 428 OSAL_SPIN_LOCK_INIT(&p_hwfn->dmae_info.lock); 429 } 430 431 /* hwfn 0 is always active */ 432 p_dev->hwfns[0].b_active = true; 433 434 /* set the default cache alignment to 128 (may be overridden later) */ 435 p_dev->cache_shift = 7; 436 return ECORE_SUCCESS; 437 #ifdef CONFIG_ECORE_LOCK_ALLOC 438 handle_err: 439 while (--i) { 440 struct ecore_hwfn *p_hwfn = OSAL_NULL; 441 442 p_hwfn = &p_dev->hwfns[i]; 443 OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->dmae_info.lock); 444 } 445 return ECORE_NOMEM; 446 #endif 447 } 448 449 static void ecore_qm_info_free(struct ecore_hwfn *p_hwfn) 450 { 451 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 452 453 OSAL_FREE(p_hwfn->p_dev, qm_info->qm_pq_params); 454 OSAL_FREE(p_hwfn->p_dev, qm_info->qm_vport_params); 455 OSAL_FREE(p_hwfn->p_dev, qm_info->qm_port_params); 456 OSAL_FREE(p_hwfn->p_dev, qm_info->wfq_data); 457 } 458 459 void ecore_resc_free(struct ecore_dev *p_dev) 460 { 461 int i; 462 463 if (IS_VF(p_dev)) { 464 for_each_hwfn(p_dev, i) 465 ecore_l2_free(&p_dev->hwfns[i]); 466 return; 467 } 468 469 OSAL_FREE(p_dev, p_dev->fw_data); 470 471 OSAL_FREE(p_dev, p_dev->reset_stats); 472 473 for_each_hwfn(p_dev, i) { 474 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 475 476 ecore_cxt_mngr_free(p_hwfn); 477 ecore_qm_info_free(p_hwfn); 478 ecore_spq_free(p_hwfn); 479 ecore_eq_free(p_hwfn); 480 ecore_consq_free(p_hwfn); 481 ecore_int_free(p_hwfn); 482 ecore_iov_free(p_hwfn); 483 ecore_l2_free(p_hwfn); 484 ecore_dmae_info_free(p_hwfn); 485 ecore_dcbx_info_free(p_hwfn); 486 /* @@@TBD Flush work-queue ? */ 487 488 /* destroy doorbell recovery mechanism */ 489 ecore_db_recovery_teardown(p_hwfn); 490 } 491 } 492 493 /******************** QM initialization *******************/ 494 495 /* bitmaps for indicating active traffic classes. 496 * Special case for Arrowhead 4 port 497 */ 498 /* 0..3 actualy used, 4 serves OOO, 7 serves high priority stuff (e.g. DCQCN) */ 499 #define ACTIVE_TCS_BMAP 0x9f 500 /* 0..3 actually used, OOO and high priority stuff all use 3 */ 501 #define ACTIVE_TCS_BMAP_4PORT_K2 0xf 502 503 /* determines the physical queue flags for a given PF. */ 504 static u32 ecore_get_pq_flags(struct ecore_hwfn *p_hwfn) 505 { 506 u32 flags; 507 508 /* common flags */ 509 flags = PQ_FLAGS_LB; 510 511 /* feature flags */ 512 if (IS_ECORE_SRIOV(p_hwfn->p_dev)) 513 flags |= PQ_FLAGS_VFS; 514 if (IS_ECORE_PACING(p_hwfn)) 515 flags |= PQ_FLAGS_RLS; 516 517 /* protocol flags */ 518 switch (p_hwfn->hw_info.personality) { 519 case ECORE_PCI_ETH: 520 if (!IS_ECORE_PACING(p_hwfn)) 521 flags |= PQ_FLAGS_MCOS; 522 break; 523 case ECORE_PCI_FCOE: 524 flags |= PQ_FLAGS_OFLD; 525 break; 526 case ECORE_PCI_ISCSI: 527 flags |= PQ_FLAGS_ACK | PQ_FLAGS_OOO | PQ_FLAGS_OFLD; 528 break; 529 case ECORE_PCI_ETH_ROCE: 530 flags |= PQ_FLAGS_OFLD | PQ_FLAGS_LLT; 531 if (!IS_ECORE_PACING(p_hwfn)) 532 flags |= PQ_FLAGS_MCOS; 533 break; 534 case ECORE_PCI_ETH_IWARP: 535 flags |= PQ_FLAGS_ACK | PQ_FLAGS_OOO | PQ_FLAGS_OFLD; 536 if (!IS_ECORE_PACING(p_hwfn)) 537 flags |= PQ_FLAGS_MCOS; 538 break; 539 default: 540 DP_ERR(p_hwfn, "unknown personality %d\n", 541 p_hwfn->hw_info.personality); 542 return 0; 543 } 544 return flags; 545 } 546 547 /* Getters for resource amounts necessary for qm initialization */ 548 u8 ecore_init_qm_get_num_tcs(struct ecore_hwfn *p_hwfn) 549 { 550 return p_hwfn->hw_info.num_hw_tc; 551 } 552 553 u16 ecore_init_qm_get_num_vfs(struct ecore_hwfn *p_hwfn) 554 { 555 return IS_ECORE_SRIOV(p_hwfn->p_dev) ? 556 p_hwfn->p_dev->p_iov_info->total_vfs : 0; 557 } 558 559 #define NUM_DEFAULT_RLS 1 560 561 u16 ecore_init_qm_get_num_pf_rls(struct ecore_hwfn *p_hwfn) 562 { 563 u16 num_pf_rls, num_vfs = ecore_init_qm_get_num_vfs(p_hwfn); 564 565 /* @DPDK */ 566 /* num RLs can't exceed resource amount of rls or vports or the 567 * dcqcn qps 568 */ 569 num_pf_rls = (u16)OSAL_MIN_T(u32, RESC_NUM(p_hwfn, ECORE_RL), 570 (u16)RESC_NUM(p_hwfn, ECORE_VPORT)); 571 572 /* make sure after we reserve the default and VF rls we'll have 573 * something left 574 */ 575 if (num_pf_rls < num_vfs + NUM_DEFAULT_RLS) { 576 DP_NOTICE(p_hwfn, false, 577 "no rate limiters left for PF rate limiting" 578 " [num_pf_rls %d num_vfs %d]\n", num_pf_rls, num_vfs); 579 return 0; 580 } 581 582 /* subtract rls necessary for VFs and one default one for the PF */ 583 num_pf_rls -= num_vfs + NUM_DEFAULT_RLS; 584 585 return num_pf_rls; 586 } 587 588 u16 ecore_init_qm_get_num_vports(struct ecore_hwfn *p_hwfn) 589 { 590 u32 pq_flags = ecore_get_pq_flags(p_hwfn); 591 592 /* all pqs share the same vport (hence the 1 below), except for vfs 593 * and pf_rl pqs 594 */ 595 return (!!(PQ_FLAGS_RLS & pq_flags)) * 596 ecore_init_qm_get_num_pf_rls(p_hwfn) + 597 (!!(PQ_FLAGS_VFS & pq_flags)) * 598 ecore_init_qm_get_num_vfs(p_hwfn) + 1; 599 } 600 601 /* calc amount of PQs according to the requested flags */ 602 u16 ecore_init_qm_get_num_pqs(struct ecore_hwfn *p_hwfn) 603 { 604 u32 pq_flags = ecore_get_pq_flags(p_hwfn); 605 606 return (!!(PQ_FLAGS_RLS & pq_flags)) * 607 ecore_init_qm_get_num_pf_rls(p_hwfn) + 608 (!!(PQ_FLAGS_MCOS & pq_flags)) * 609 ecore_init_qm_get_num_tcs(p_hwfn) + 610 (!!(PQ_FLAGS_LB & pq_flags)) + 611 (!!(PQ_FLAGS_OOO & pq_flags)) + 612 (!!(PQ_FLAGS_ACK & pq_flags)) + 613 (!!(PQ_FLAGS_OFLD & pq_flags)) + 614 (!!(PQ_FLAGS_VFS & pq_flags)) * 615 ecore_init_qm_get_num_vfs(p_hwfn); 616 } 617 618 /* initialize the top level QM params */ 619 static void ecore_init_qm_params(struct ecore_hwfn *p_hwfn) 620 { 621 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 622 bool four_port; 623 624 /* pq and vport bases for this PF */ 625 qm_info->start_pq = (u16)RESC_START(p_hwfn, ECORE_PQ); 626 qm_info->start_vport = (u8)RESC_START(p_hwfn, ECORE_VPORT); 627 628 /* rate limiting and weighted fair queueing are always enabled */ 629 qm_info->vport_rl_en = 1; 630 qm_info->vport_wfq_en = 1; 631 632 /* TC config is different for AH 4 port */ 633 four_port = p_hwfn->p_dev->num_ports_in_engine == MAX_NUM_PORTS_K2; 634 635 /* in AH 4 port we have fewer TCs per port */ 636 qm_info->max_phys_tcs_per_port = four_port ? NUM_PHYS_TCS_4PORT_K2 : 637 NUM_OF_PHYS_TCS; 638 639 /* unless MFW indicated otherwise, ooo_tc should be 3 for AH 4 port and 640 * 4 otherwise 641 */ 642 if (!qm_info->ooo_tc) 643 qm_info->ooo_tc = four_port ? DCBX_TCP_OOO_K2_4PORT_TC : 644 DCBX_TCP_OOO_TC; 645 } 646 647 /* initialize qm vport params */ 648 static void ecore_init_qm_vport_params(struct ecore_hwfn *p_hwfn) 649 { 650 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 651 u8 i; 652 653 /* all vports participate in weighted fair queueing */ 654 for (i = 0; i < ecore_init_qm_get_num_vports(p_hwfn); i++) 655 qm_info->qm_vport_params[i].vport_wfq = 1; 656 } 657 658 /* initialize qm port params */ 659 static void ecore_init_qm_port_params(struct ecore_hwfn *p_hwfn) 660 { 661 /* Initialize qm port parameters */ 662 u8 i, active_phys_tcs, num_ports = p_hwfn->p_dev->num_ports_in_engine; 663 664 /* indicate how ooo and high pri traffic is dealt with */ 665 active_phys_tcs = num_ports == MAX_NUM_PORTS_K2 ? 666 ACTIVE_TCS_BMAP_4PORT_K2 : ACTIVE_TCS_BMAP; 667 668 for (i = 0; i < num_ports; i++) { 669 struct init_qm_port_params *p_qm_port = 670 &p_hwfn->qm_info.qm_port_params[i]; 671 672 p_qm_port->active = 1; 673 p_qm_port->active_phys_tcs = active_phys_tcs; 674 p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES_E4 / num_ports; 675 p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports; 676 } 677 } 678 679 /* Reset the params which must be reset for qm init. QM init may be called as 680 * a result of flows other than driver load (e.g. dcbx renegotiation). Other 681 * params may be affected by the init but would simply recalculate to the same 682 * values. The allocations made for QM init, ports, vports, pqs and vfqs are not 683 * affected as these amounts stay the same. 684 */ 685 static void ecore_init_qm_reset_params(struct ecore_hwfn *p_hwfn) 686 { 687 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 688 689 qm_info->num_pqs = 0; 690 qm_info->num_vports = 0; 691 qm_info->num_pf_rls = 0; 692 qm_info->num_vf_pqs = 0; 693 qm_info->first_vf_pq = 0; 694 qm_info->first_mcos_pq = 0; 695 qm_info->first_rl_pq = 0; 696 } 697 698 static void ecore_init_qm_advance_vport(struct ecore_hwfn *p_hwfn) 699 { 700 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 701 702 qm_info->num_vports++; 703 704 if (qm_info->num_vports > ecore_init_qm_get_num_vports(p_hwfn)) 705 DP_ERR(p_hwfn, 706 "vport overflow! qm_info->num_vports %d," 707 " qm_init_get_num_vports() %d\n", 708 qm_info->num_vports, 709 ecore_init_qm_get_num_vports(p_hwfn)); 710 } 711 712 /* initialize a single pq and manage qm_info resources accounting. 713 * The pq_init_flags param determines whether the PQ is rate limited 714 * (for VF or PF) 715 * and whether a new vport is allocated to the pq or not (i.e. vport will be 716 * shared) 717 */ 718 719 /* flags for pq init */ 720 #define PQ_INIT_SHARE_VPORT (1 << 0) 721 #define PQ_INIT_PF_RL (1 << 1) 722 #define PQ_INIT_VF_RL (1 << 2) 723 724 /* defines for pq init */ 725 #define PQ_INIT_DEFAULT_WRR_GROUP 1 726 #define PQ_INIT_DEFAULT_TC 0 727 #define PQ_INIT_OFLD_TC (p_hwfn->hw_info.offload_tc) 728 729 static void ecore_init_qm_pq(struct ecore_hwfn *p_hwfn, 730 struct ecore_qm_info *qm_info, 731 u8 tc, u32 pq_init_flags) 732 { 733 u16 pq_idx = qm_info->num_pqs, max_pq = 734 ecore_init_qm_get_num_pqs(p_hwfn); 735 736 if (pq_idx > max_pq) 737 DP_ERR(p_hwfn, 738 "pq overflow! pq %d, max pq %d\n", pq_idx, max_pq); 739 740 /* init pq params */ 741 qm_info->qm_pq_params[pq_idx].port_id = p_hwfn->port_id; 742 qm_info->qm_pq_params[pq_idx].vport_id = qm_info->start_vport + 743 qm_info->num_vports; 744 qm_info->qm_pq_params[pq_idx].tc_id = tc; 745 qm_info->qm_pq_params[pq_idx].wrr_group = PQ_INIT_DEFAULT_WRR_GROUP; 746 qm_info->qm_pq_params[pq_idx].rl_valid = 747 (pq_init_flags & PQ_INIT_PF_RL || 748 pq_init_flags & PQ_INIT_VF_RL); 749 750 /* qm params accounting */ 751 qm_info->num_pqs++; 752 if (!(pq_init_flags & PQ_INIT_SHARE_VPORT)) 753 qm_info->num_vports++; 754 755 if (pq_init_flags & PQ_INIT_PF_RL) 756 qm_info->num_pf_rls++; 757 758 if (qm_info->num_vports > ecore_init_qm_get_num_vports(p_hwfn)) 759 DP_ERR(p_hwfn, 760 "vport overflow! qm_info->num_vports %d," 761 " qm_init_get_num_vports() %d\n", 762 qm_info->num_vports, 763 ecore_init_qm_get_num_vports(p_hwfn)); 764 765 if (qm_info->num_pf_rls > ecore_init_qm_get_num_pf_rls(p_hwfn)) 766 DP_ERR(p_hwfn, "rl overflow! qm_info->num_pf_rls %d," 767 " qm_init_get_num_pf_rls() %d\n", 768 qm_info->num_pf_rls, 769 ecore_init_qm_get_num_pf_rls(p_hwfn)); 770 } 771 772 /* get pq index according to PQ_FLAGS */ 773 static u16 *ecore_init_qm_get_idx_from_flags(struct ecore_hwfn *p_hwfn, 774 u32 pq_flags) 775 { 776 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 777 778 /* Can't have multiple flags set here */ 779 if (OSAL_BITMAP_WEIGHT((unsigned long *)&pq_flags, 780 sizeof(pq_flags)) > 1) 781 goto err; 782 783 switch (pq_flags) { 784 case PQ_FLAGS_RLS: 785 return &qm_info->first_rl_pq; 786 case PQ_FLAGS_MCOS: 787 return &qm_info->first_mcos_pq; 788 case PQ_FLAGS_LB: 789 return &qm_info->pure_lb_pq; 790 case PQ_FLAGS_OOO: 791 return &qm_info->ooo_pq; 792 case PQ_FLAGS_ACK: 793 return &qm_info->pure_ack_pq; 794 case PQ_FLAGS_OFLD: 795 return &qm_info->offload_pq; 796 case PQ_FLAGS_VFS: 797 return &qm_info->first_vf_pq; 798 default: 799 goto err; 800 } 801 802 err: 803 DP_ERR(p_hwfn, "BAD pq flags %d\n", pq_flags); 804 return OSAL_NULL; 805 } 806 807 /* save pq index in qm info */ 808 static void ecore_init_qm_set_idx(struct ecore_hwfn *p_hwfn, 809 u32 pq_flags, u16 pq_val) 810 { 811 u16 *base_pq_idx = ecore_init_qm_get_idx_from_flags(p_hwfn, pq_flags); 812 813 *base_pq_idx = p_hwfn->qm_info.start_pq + pq_val; 814 } 815 816 /* get tx pq index, with the PQ TX base already set (ready for context init) */ 817 u16 ecore_get_cm_pq_idx(struct ecore_hwfn *p_hwfn, u32 pq_flags) 818 { 819 u16 *base_pq_idx = ecore_init_qm_get_idx_from_flags(p_hwfn, pq_flags); 820 821 return *base_pq_idx + CM_TX_PQ_BASE; 822 } 823 824 u16 ecore_get_cm_pq_idx_mcos(struct ecore_hwfn *p_hwfn, u8 tc) 825 { 826 u8 max_tc = ecore_init_qm_get_num_tcs(p_hwfn); 827 828 if (tc > max_tc) 829 DP_ERR(p_hwfn, "tc %d must be smaller than %d\n", tc, max_tc); 830 831 return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_MCOS) + tc; 832 } 833 834 u16 ecore_get_cm_pq_idx_vf(struct ecore_hwfn *p_hwfn, u16 vf) 835 { 836 u16 max_vf = ecore_init_qm_get_num_vfs(p_hwfn); 837 838 if (vf > max_vf) 839 DP_ERR(p_hwfn, "vf %d must be smaller than %d\n", vf, max_vf); 840 841 return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + vf; 842 } 843 844 u16 ecore_get_cm_pq_idx_rl(struct ecore_hwfn *p_hwfn, u16 rl) 845 { 846 u16 max_rl = ecore_init_qm_get_num_pf_rls(p_hwfn); 847 848 if (rl > max_rl) 849 DP_ERR(p_hwfn, "rl %d must be smaller than %d\n", rl, max_rl); 850 851 return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_RLS) + rl; 852 } 853 854 u16 ecore_get_qm_vport_idx_rl(struct ecore_hwfn *p_hwfn, u16 rl) 855 { 856 u16 start_pq, pq, qm_pq_idx; 857 858 pq = ecore_get_cm_pq_idx_rl(p_hwfn, rl); 859 start_pq = p_hwfn->qm_info.start_pq; 860 qm_pq_idx = pq - start_pq - CM_TX_PQ_BASE; 861 862 if (qm_pq_idx > p_hwfn->qm_info.num_pqs) { 863 DP_ERR(p_hwfn, 864 "qm_pq_idx %d must be smaller than %d\n", 865 qm_pq_idx, p_hwfn->qm_info.num_pqs); 866 } 867 868 return p_hwfn->qm_info.qm_pq_params[qm_pq_idx].vport_id; 869 } 870 871 /* Functions for creating specific types of pqs */ 872 static void ecore_init_qm_lb_pq(struct ecore_hwfn *p_hwfn) 873 { 874 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 875 876 if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_LB)) 877 return; 878 879 ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_LB, qm_info->num_pqs); 880 ecore_init_qm_pq(p_hwfn, qm_info, PURE_LB_TC, PQ_INIT_SHARE_VPORT); 881 } 882 883 static void ecore_init_qm_ooo_pq(struct ecore_hwfn *p_hwfn) 884 { 885 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 886 887 if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_OOO)) 888 return; 889 890 ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_OOO, qm_info->num_pqs); 891 ecore_init_qm_pq(p_hwfn, qm_info, qm_info->ooo_tc, PQ_INIT_SHARE_VPORT); 892 } 893 894 static void ecore_init_qm_pure_ack_pq(struct ecore_hwfn *p_hwfn) 895 { 896 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 897 898 if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_ACK)) 899 return; 900 901 ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_ACK, qm_info->num_pqs); 902 ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT); 903 } 904 905 static void ecore_init_qm_offload_pq(struct ecore_hwfn *p_hwfn) 906 { 907 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 908 909 if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_OFLD)) 910 return; 911 912 ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_OFLD, qm_info->num_pqs); 913 ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT); 914 } 915 916 static void ecore_init_qm_mcos_pqs(struct ecore_hwfn *p_hwfn) 917 { 918 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 919 u8 tc_idx; 920 921 if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_MCOS)) 922 return; 923 924 ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_MCOS, qm_info->num_pqs); 925 for (tc_idx = 0; tc_idx < ecore_init_qm_get_num_tcs(p_hwfn); tc_idx++) 926 ecore_init_qm_pq(p_hwfn, qm_info, tc_idx, PQ_INIT_SHARE_VPORT); 927 } 928 929 static void ecore_init_qm_vf_pqs(struct ecore_hwfn *p_hwfn) 930 { 931 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 932 u16 vf_idx, num_vfs = ecore_init_qm_get_num_vfs(p_hwfn); 933 934 if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_VFS)) 935 return; 936 937 ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_VFS, qm_info->num_pqs); 938 939 qm_info->num_vf_pqs = num_vfs; 940 for (vf_idx = 0; vf_idx < num_vfs; vf_idx++) 941 ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_DEFAULT_TC, 942 PQ_INIT_VF_RL); 943 } 944 945 static void ecore_init_qm_rl_pqs(struct ecore_hwfn *p_hwfn) 946 { 947 u16 pf_rls_idx, num_pf_rls = ecore_init_qm_get_num_pf_rls(p_hwfn); 948 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 949 950 if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_RLS)) 951 return; 952 953 ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_RLS, qm_info->num_pqs); 954 for (pf_rls_idx = 0; pf_rls_idx < num_pf_rls; pf_rls_idx++) 955 ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, 956 PQ_INIT_PF_RL); 957 } 958 959 static void ecore_init_qm_pq_params(struct ecore_hwfn *p_hwfn) 960 { 961 /* rate limited pqs, must come first (FW assumption) */ 962 ecore_init_qm_rl_pqs(p_hwfn); 963 964 /* pqs for multi cos */ 965 ecore_init_qm_mcos_pqs(p_hwfn); 966 967 /* pure loopback pq */ 968 ecore_init_qm_lb_pq(p_hwfn); 969 970 /* out of order pq */ 971 ecore_init_qm_ooo_pq(p_hwfn); 972 973 /* pure ack pq */ 974 ecore_init_qm_pure_ack_pq(p_hwfn); 975 976 /* pq for offloaded protocol */ 977 ecore_init_qm_offload_pq(p_hwfn); 978 979 /* done sharing vports */ 980 ecore_init_qm_advance_vport(p_hwfn); 981 982 /* pqs for vfs */ 983 ecore_init_qm_vf_pqs(p_hwfn); 984 } 985 986 /* compare values of getters against resources amounts */ 987 static enum _ecore_status_t ecore_init_qm_sanity(struct ecore_hwfn *p_hwfn) 988 { 989 if (ecore_init_qm_get_num_vports(p_hwfn) > 990 RESC_NUM(p_hwfn, ECORE_VPORT)) { 991 DP_ERR(p_hwfn, "requested amount of vports exceeds resource\n"); 992 return ECORE_INVAL; 993 } 994 995 if (ecore_init_qm_get_num_pqs(p_hwfn) > RESC_NUM(p_hwfn, ECORE_PQ)) { 996 DP_ERR(p_hwfn, "requested amount of pqs exceeds resource\n"); 997 return ECORE_INVAL; 998 } 999 1000 return ECORE_SUCCESS; 1001 } 1002 1003 /* 1004 * Function for verbose printing of the qm initialization results 1005 */ 1006 static void ecore_dp_init_qm_params(struct ecore_hwfn *p_hwfn) 1007 { 1008 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 1009 struct init_qm_vport_params *vport; 1010 struct init_qm_port_params *port; 1011 struct init_qm_pq_params *pq; 1012 int i, tc; 1013 1014 /* top level params */ 1015 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 1016 "qm init top level params: start_pq %d, start_vport %d," 1017 " pure_lb_pq %d, offload_pq %d, pure_ack_pq %d\n", 1018 qm_info->start_pq, qm_info->start_vport, qm_info->pure_lb_pq, 1019 qm_info->offload_pq, qm_info->pure_ack_pq); 1020 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 1021 "ooo_pq %d, first_vf_pq %d, num_pqs %d, num_vf_pqs %d," 1022 " num_vports %d, max_phys_tcs_per_port %d\n", 1023 qm_info->ooo_pq, qm_info->first_vf_pq, qm_info->num_pqs, 1024 qm_info->num_vf_pqs, qm_info->num_vports, 1025 qm_info->max_phys_tcs_per_port); 1026 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 1027 "pf_rl_en %d, pf_wfq_en %d, vport_rl_en %d, vport_wfq_en %d," 1028 " pf_wfq %d, pf_rl %d, num_pf_rls %d, pq_flags %x\n", 1029 qm_info->pf_rl_en, qm_info->pf_wfq_en, qm_info->vport_rl_en, 1030 qm_info->vport_wfq_en, qm_info->pf_wfq, qm_info->pf_rl, 1031 qm_info->num_pf_rls, ecore_get_pq_flags(p_hwfn)); 1032 1033 /* port table */ 1034 for (i = 0; i < p_hwfn->p_dev->num_ports_in_engine; i++) { 1035 port = &qm_info->qm_port_params[i]; 1036 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 1037 "port idx %d, active %d, active_phys_tcs %d," 1038 " num_pbf_cmd_lines %d, num_btb_blocks %d," 1039 " reserved %d\n", 1040 i, port->active, port->active_phys_tcs, 1041 port->num_pbf_cmd_lines, port->num_btb_blocks, 1042 port->reserved); 1043 } 1044 1045 /* vport table */ 1046 for (i = 0; i < qm_info->num_vports; i++) { 1047 vport = &qm_info->qm_vport_params[i]; 1048 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 1049 "vport idx %d, vport_rl %d, wfq %d," 1050 " first_tx_pq_id [ ", 1051 qm_info->start_vport + i, vport->vport_rl, 1052 vport->vport_wfq); 1053 for (tc = 0; tc < NUM_OF_TCS; tc++) 1054 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "%d ", 1055 vport->first_tx_pq_id[tc]); 1056 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "]\n"); 1057 } 1058 1059 /* pq table */ 1060 for (i = 0; i < qm_info->num_pqs; i++) { 1061 pq = &qm_info->qm_pq_params[i]; 1062 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 1063 "pq idx %d, port %d, vport_id %d, tc %d, wrr_grp %d, rl_valid %d\n", 1064 qm_info->start_pq + i, pq->port_id, pq->vport_id, 1065 pq->tc_id, pq->wrr_group, pq->rl_valid); 1066 } 1067 } 1068 1069 static void ecore_init_qm_info(struct ecore_hwfn *p_hwfn) 1070 { 1071 /* reset params required for init run */ 1072 ecore_init_qm_reset_params(p_hwfn); 1073 1074 /* init QM top level params */ 1075 ecore_init_qm_params(p_hwfn); 1076 1077 /* init QM port params */ 1078 ecore_init_qm_port_params(p_hwfn); 1079 1080 /* init QM vport params */ 1081 ecore_init_qm_vport_params(p_hwfn); 1082 1083 /* init QM physical queue params */ 1084 ecore_init_qm_pq_params(p_hwfn); 1085 1086 /* display all that init */ 1087 ecore_dp_init_qm_params(p_hwfn); 1088 } 1089 1090 /* This function reconfigures the QM pf on the fly. 1091 * For this purpose we: 1092 * 1. reconfigure the QM database 1093 * 2. set new values to runtime array 1094 * 3. send an sdm_qm_cmd through the rbc interface to stop the QM 1095 * 4. activate init tool in QM_PF stage 1096 * 5. send an sdm_qm_cmd through rbc interface to release the QM 1097 */ 1098 enum _ecore_status_t ecore_qm_reconf(struct ecore_hwfn *p_hwfn, 1099 struct ecore_ptt *p_ptt) 1100 { 1101 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 1102 bool b_rc; 1103 enum _ecore_status_t rc; 1104 1105 /* initialize ecore's qm data structure */ 1106 ecore_init_qm_info(p_hwfn); 1107 1108 /* stop PF's qm queues */ 1109 OSAL_SPIN_LOCK(&qm_lock); 1110 b_rc = ecore_send_qm_stop_cmd(p_hwfn, p_ptt, false, true, 1111 qm_info->start_pq, qm_info->num_pqs); 1112 OSAL_SPIN_UNLOCK(&qm_lock); 1113 if (!b_rc) 1114 return ECORE_INVAL; 1115 1116 /* clear the QM_PF runtime phase leftovers from previous init */ 1117 ecore_init_clear_rt_data(p_hwfn); 1118 1119 /* prepare QM portion of runtime array */ 1120 ecore_qm_init_pf(p_hwfn, p_ptt, false); 1121 1122 /* activate init tool on runtime array */ 1123 rc = ecore_init_run(p_hwfn, p_ptt, PHASE_QM_PF, p_hwfn->rel_pf_id, 1124 p_hwfn->hw_info.hw_mode); 1125 if (rc != ECORE_SUCCESS) 1126 return rc; 1127 1128 /* start PF's qm queues */ 1129 OSAL_SPIN_LOCK(&qm_lock); 1130 b_rc = ecore_send_qm_stop_cmd(p_hwfn, p_ptt, true, true, 1131 qm_info->start_pq, qm_info->num_pqs); 1132 OSAL_SPIN_UNLOCK(&qm_lock); 1133 if (!b_rc) 1134 return ECORE_INVAL; 1135 1136 return ECORE_SUCCESS; 1137 } 1138 1139 static enum _ecore_status_t ecore_alloc_qm_data(struct ecore_hwfn *p_hwfn) 1140 { 1141 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 1142 enum _ecore_status_t rc; 1143 1144 rc = ecore_init_qm_sanity(p_hwfn); 1145 if (rc != ECORE_SUCCESS) 1146 goto alloc_err; 1147 1148 qm_info->qm_pq_params = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, 1149 sizeof(struct init_qm_pq_params) * 1150 ecore_init_qm_get_num_pqs(p_hwfn)); 1151 if (!qm_info->qm_pq_params) 1152 goto alloc_err; 1153 1154 qm_info->qm_vport_params = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, 1155 sizeof(struct init_qm_vport_params) * 1156 ecore_init_qm_get_num_vports(p_hwfn)); 1157 if (!qm_info->qm_vport_params) 1158 goto alloc_err; 1159 1160 qm_info->qm_port_params = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, 1161 sizeof(struct init_qm_port_params) * 1162 p_hwfn->p_dev->num_ports_in_engine); 1163 if (!qm_info->qm_port_params) 1164 goto alloc_err; 1165 1166 qm_info->wfq_data = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, 1167 sizeof(struct ecore_wfq_data) * 1168 ecore_init_qm_get_num_vports(p_hwfn)); 1169 if (!qm_info->wfq_data) 1170 goto alloc_err; 1171 1172 return ECORE_SUCCESS; 1173 1174 alloc_err: 1175 DP_NOTICE(p_hwfn, false, "Failed to allocate memory for QM params\n"); 1176 ecore_qm_info_free(p_hwfn); 1177 return ECORE_NOMEM; 1178 } 1179 /******************** End QM initialization ***************/ 1180 1181 enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev) 1182 { 1183 enum _ecore_status_t rc = ECORE_SUCCESS; 1184 int i; 1185 1186 if (IS_VF(p_dev)) { 1187 for_each_hwfn(p_dev, i) { 1188 rc = ecore_l2_alloc(&p_dev->hwfns[i]); 1189 if (rc != ECORE_SUCCESS) 1190 return rc; 1191 } 1192 return rc; 1193 } 1194 1195 p_dev->fw_data = OSAL_ZALLOC(p_dev, GFP_KERNEL, 1196 sizeof(*p_dev->fw_data)); 1197 if (!p_dev->fw_data) 1198 return ECORE_NOMEM; 1199 1200 for_each_hwfn(p_dev, i) { 1201 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 1202 u32 n_eqes, num_cons; 1203 1204 /* initialize the doorbell recovery mechanism */ 1205 rc = ecore_db_recovery_setup(p_hwfn); 1206 if (rc) 1207 goto alloc_err; 1208 1209 /* First allocate the context manager structure */ 1210 rc = ecore_cxt_mngr_alloc(p_hwfn); 1211 if (rc) 1212 goto alloc_err; 1213 1214 /* Set the HW cid/tid numbers (in the context manager) 1215 * Must be done prior to any further computations. 1216 */ 1217 rc = ecore_cxt_set_pf_params(p_hwfn); 1218 if (rc) 1219 goto alloc_err; 1220 1221 rc = ecore_alloc_qm_data(p_hwfn); 1222 if (rc) 1223 goto alloc_err; 1224 1225 /* init qm info */ 1226 ecore_init_qm_info(p_hwfn); 1227 1228 /* Compute the ILT client partition */ 1229 rc = ecore_cxt_cfg_ilt_compute(p_hwfn); 1230 if (rc) 1231 goto alloc_err; 1232 1233 /* CID map / ILT shadow table / T2 1234 * The talbes sizes are determined by the computations above 1235 */ 1236 rc = ecore_cxt_tables_alloc(p_hwfn); 1237 if (rc) 1238 goto alloc_err; 1239 1240 /* SPQ, must follow ILT because initializes SPQ context */ 1241 rc = ecore_spq_alloc(p_hwfn); 1242 if (rc) 1243 goto alloc_err; 1244 1245 /* SP status block allocation */ 1246 p_hwfn->p_dpc_ptt = ecore_get_reserved_ptt(p_hwfn, 1247 RESERVED_PTT_DPC); 1248 1249 rc = ecore_int_alloc(p_hwfn, p_hwfn->p_main_ptt); 1250 if (rc) 1251 goto alloc_err; 1252 1253 rc = ecore_iov_alloc(p_hwfn); 1254 if (rc) 1255 goto alloc_err; 1256 1257 /* EQ */ 1258 n_eqes = ecore_chain_get_capacity(&p_hwfn->p_spq->chain); 1259 if (ECORE_IS_RDMA_PERSONALITY(p_hwfn)) { 1260 /* Calculate the EQ size 1261 * --------------------- 1262 * Each ICID may generate up to one event at a time i.e. 1263 * the event must be handled/cleared before a new one 1264 * can be generated. We calculate the sum of events per 1265 * protocol and create an EQ deep enough to handle the 1266 * worst case: 1267 * - Core - according to SPQ. 1268 * - RoCE - per QP there are a couple of ICIDs, one 1269 * responder and one requester, each can 1270 * generate an EQE => n_eqes_qp = 2 * n_qp. 1271 * Each CQ can generate an EQE. There are 2 CQs 1272 * per QP => n_eqes_cq = 2 * n_qp. 1273 * Hence the RoCE total is 4 * n_qp or 1274 * 2 * num_cons. 1275 * - ENet - There can be up to two events per VF. One 1276 * for VF-PF channel and another for VF FLR 1277 * initial cleanup. The number of VFs is 1278 * bounded by MAX_NUM_VFS_BB, and is much 1279 * smaller than RoCE's so we avoid exact 1280 * calculation. 1281 */ 1282 if (ECORE_IS_ROCE_PERSONALITY(p_hwfn)) { 1283 num_cons = 1284 ecore_cxt_get_proto_cid_count( 1285 p_hwfn, 1286 PROTOCOLID_ROCE, 1287 OSAL_NULL); 1288 num_cons *= 2; 1289 } else { 1290 num_cons = ecore_cxt_get_proto_cid_count( 1291 p_hwfn, 1292 PROTOCOLID_IWARP, 1293 OSAL_NULL); 1294 } 1295 n_eqes += num_cons + 2 * MAX_NUM_VFS_BB; 1296 } else if (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) { 1297 num_cons = 1298 ecore_cxt_get_proto_cid_count(p_hwfn, 1299 PROTOCOLID_ISCSI, 1300 OSAL_NULL); 1301 n_eqes += 2 * num_cons; 1302 } 1303 1304 if (n_eqes > 0xFFFF) { 1305 DP_ERR(p_hwfn, "Cannot allocate 0x%x EQ elements." 1306 "The maximum of a u16 chain is 0x%x\n", 1307 n_eqes, 0xFFFF); 1308 goto alloc_no_mem; 1309 } 1310 1311 rc = ecore_eq_alloc(p_hwfn, (u16)n_eqes); 1312 if (rc) 1313 goto alloc_err; 1314 1315 rc = ecore_consq_alloc(p_hwfn); 1316 if (rc) 1317 goto alloc_err; 1318 1319 rc = ecore_l2_alloc(p_hwfn); 1320 if (rc != ECORE_SUCCESS) 1321 goto alloc_err; 1322 1323 /* DMA info initialization */ 1324 rc = ecore_dmae_info_alloc(p_hwfn); 1325 if (rc) { 1326 DP_NOTICE(p_hwfn, false, "Failed to allocate memory for dmae_info structure\n"); 1327 goto alloc_err; 1328 } 1329 1330 /* DCBX initialization */ 1331 rc = ecore_dcbx_info_alloc(p_hwfn); 1332 if (rc) { 1333 DP_NOTICE(p_hwfn, false, 1334 "Failed to allocate memory for dcbx structure\n"); 1335 goto alloc_err; 1336 } 1337 } 1338 1339 p_dev->reset_stats = OSAL_ZALLOC(p_dev, GFP_KERNEL, 1340 sizeof(*p_dev->reset_stats)); 1341 if (!p_dev->reset_stats) { 1342 DP_NOTICE(p_dev, false, "Failed to allocate reset statistics\n"); 1343 goto alloc_no_mem; 1344 } 1345 1346 return ECORE_SUCCESS; 1347 1348 alloc_no_mem: 1349 rc = ECORE_NOMEM; 1350 alloc_err: 1351 ecore_resc_free(p_dev); 1352 return rc; 1353 } 1354 1355 void ecore_resc_setup(struct ecore_dev *p_dev) 1356 { 1357 int i; 1358 1359 if (IS_VF(p_dev)) { 1360 for_each_hwfn(p_dev, i) 1361 ecore_l2_setup(&p_dev->hwfns[i]); 1362 return; 1363 } 1364 1365 for_each_hwfn(p_dev, i) { 1366 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 1367 1368 ecore_cxt_mngr_setup(p_hwfn); 1369 ecore_spq_setup(p_hwfn); 1370 ecore_eq_setup(p_hwfn); 1371 ecore_consq_setup(p_hwfn); 1372 1373 /* Read shadow of current MFW mailbox */ 1374 ecore_mcp_read_mb(p_hwfn, p_hwfn->p_main_ptt); 1375 OSAL_MEMCPY(p_hwfn->mcp_info->mfw_mb_shadow, 1376 p_hwfn->mcp_info->mfw_mb_cur, 1377 p_hwfn->mcp_info->mfw_mb_length); 1378 1379 ecore_int_setup(p_hwfn, p_hwfn->p_main_ptt); 1380 1381 ecore_l2_setup(p_hwfn); 1382 ecore_iov_setup(p_hwfn); 1383 } 1384 } 1385 1386 #define FINAL_CLEANUP_POLL_CNT (100) 1387 #define FINAL_CLEANUP_POLL_TIME (10) 1388 enum _ecore_status_t ecore_final_cleanup(struct ecore_hwfn *p_hwfn, 1389 struct ecore_ptt *p_ptt, 1390 u16 id, bool is_vf) 1391 { 1392 u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT; 1393 enum _ecore_status_t rc = ECORE_TIMEOUT; 1394 1395 #ifndef ASIC_ONLY 1396 if (CHIP_REV_IS_TEDIBEAR(p_hwfn->p_dev) || 1397 CHIP_REV_IS_SLOW(p_hwfn->p_dev)) { 1398 DP_INFO(p_hwfn, "Skipping final cleanup for non-ASIC\n"); 1399 return ECORE_SUCCESS; 1400 } 1401 #endif 1402 1403 addr = GTT_BAR0_MAP_REG_USDM_RAM + 1404 USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn->rel_pf_id); 1405 1406 if (is_vf) 1407 id += 0x10; 1408 1409 command |= X_FINAL_CLEANUP_AGG_INT << 1410 SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT; 1411 command |= 1 << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT; 1412 command |= id << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT; 1413 command |= SDM_COMP_TYPE_AGG_INT << SDM_OP_GEN_COMP_TYPE_SHIFT; 1414 1415 /* Make sure notification is not set before initiating final cleanup */ 1416 1417 if (REG_RD(p_hwfn, addr)) { 1418 DP_NOTICE(p_hwfn, false, 1419 "Unexpected; Found final cleanup notification"); 1420 DP_NOTICE(p_hwfn, false, 1421 " before initiating final cleanup\n"); 1422 REG_WR(p_hwfn, addr, 0); 1423 } 1424 1425 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 1426 "Sending final cleanup for PFVF[%d] [Command %08x]\n", 1427 id, command); 1428 1429 ecore_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN, command); 1430 1431 /* Poll until completion */ 1432 while (!REG_RD(p_hwfn, addr) && count--) 1433 OSAL_MSLEEP(FINAL_CLEANUP_POLL_TIME); 1434 1435 if (REG_RD(p_hwfn, addr)) 1436 rc = ECORE_SUCCESS; 1437 else 1438 DP_NOTICE(p_hwfn, true, 1439 "Failed to receive FW final cleanup notification\n"); 1440 1441 /* Cleanup afterwards */ 1442 REG_WR(p_hwfn, addr, 0); 1443 1444 return rc; 1445 } 1446 1447 static enum _ecore_status_t ecore_calc_hw_mode(struct ecore_hwfn *p_hwfn) 1448 { 1449 int hw_mode = 0; 1450 1451 if (ECORE_IS_BB_B0(p_hwfn->p_dev)) { 1452 hw_mode |= 1 << MODE_BB; 1453 } else if (ECORE_IS_AH(p_hwfn->p_dev)) { 1454 hw_mode |= 1 << MODE_K2; 1455 } else { 1456 DP_NOTICE(p_hwfn, true, "Unknown chip type %#x\n", 1457 p_hwfn->p_dev->type); 1458 return ECORE_INVAL; 1459 } 1460 1461 /* Ports per engine is based on the values in CNIG_REG_NW_PORT_MODE */ 1462 switch (p_hwfn->p_dev->num_ports_in_engine) { 1463 case 1: 1464 hw_mode |= 1 << MODE_PORTS_PER_ENG_1; 1465 break; 1466 case 2: 1467 hw_mode |= 1 << MODE_PORTS_PER_ENG_2; 1468 break; 1469 case 4: 1470 hw_mode |= 1 << MODE_PORTS_PER_ENG_4; 1471 break; 1472 default: 1473 DP_NOTICE(p_hwfn, true, 1474 "num_ports_in_engine = %d not supported\n", 1475 p_hwfn->p_dev->num_ports_in_engine); 1476 return ECORE_INVAL; 1477 } 1478 1479 if (OSAL_TEST_BIT(ECORE_MF_OVLAN_CLSS, 1480 &p_hwfn->p_dev->mf_bits)) 1481 hw_mode |= 1 << MODE_MF_SD; 1482 else 1483 hw_mode |= 1 << MODE_MF_SI; 1484 1485 #ifndef ASIC_ONLY 1486 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) { 1487 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) { 1488 hw_mode |= 1 << MODE_FPGA; 1489 } else { 1490 if (p_hwfn->p_dev->b_is_emul_full) 1491 hw_mode |= 1 << MODE_EMUL_FULL; 1492 else 1493 hw_mode |= 1 << MODE_EMUL_REDUCED; 1494 } 1495 } else 1496 #endif 1497 hw_mode |= 1 << MODE_ASIC; 1498 1499 if (ECORE_IS_CMT(p_hwfn->p_dev)) 1500 hw_mode |= 1 << MODE_100G; 1501 1502 p_hwfn->hw_info.hw_mode = hw_mode; 1503 1504 DP_VERBOSE(p_hwfn, (ECORE_MSG_PROBE | ECORE_MSG_IFUP), 1505 "Configuring function for hw_mode: 0x%08x\n", 1506 p_hwfn->hw_info.hw_mode); 1507 1508 return ECORE_SUCCESS; 1509 } 1510 1511 #ifndef ASIC_ONLY 1512 /* MFW-replacement initializations for non-ASIC */ 1513 static enum _ecore_status_t ecore_hw_init_chip(struct ecore_hwfn *p_hwfn, 1514 struct ecore_ptt *p_ptt) 1515 { 1516 struct ecore_dev *p_dev = p_hwfn->p_dev; 1517 u32 pl_hv = 1; 1518 int i; 1519 1520 if (CHIP_REV_IS_EMUL(p_dev)) { 1521 if (ECORE_IS_AH(p_dev)) 1522 pl_hv |= 0x600; 1523 } 1524 1525 ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV + 4, pl_hv); 1526 1527 if (CHIP_REV_IS_EMUL(p_dev) && 1528 (ECORE_IS_AH(p_dev))) 1529 ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV_2_K2_E5, 1530 0x3ffffff); 1531 1532 /* initialize port mode to 4x10G_E (10G with 4x10 SERDES) */ 1533 /* CNIG_REG_NW_PORT_MODE is same for A0 and B0 */ 1534 if (!CHIP_REV_IS_EMUL(p_dev) || ECORE_IS_BB(p_dev)) 1535 ecore_wr(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB, 4); 1536 1537 if (CHIP_REV_IS_EMUL(p_dev)) { 1538 if (ECORE_IS_AH(p_dev)) { 1539 /* 2 for 4-port, 1 for 2-port, 0 for 1-port */ 1540 ecore_wr(p_hwfn, p_ptt, MISC_REG_PORT_MODE, 1541 (p_dev->num_ports_in_engine >> 1)); 1542 1543 ecore_wr(p_hwfn, p_ptt, MISC_REG_BLOCK_256B_EN, 1544 p_dev->num_ports_in_engine == 4 ? 0 : 3); 1545 } 1546 } 1547 1548 /* Poll on RBC */ 1549 ecore_wr(p_hwfn, p_ptt, PSWRQ2_REG_RBC_DONE, 1); 1550 for (i = 0; i < 100; i++) { 1551 OSAL_UDELAY(50); 1552 if (ecore_rd(p_hwfn, p_ptt, PSWRQ2_REG_CFG_DONE) == 1) 1553 break; 1554 } 1555 if (i == 100) 1556 DP_NOTICE(p_hwfn, true, 1557 "RBC done failed to complete in PSWRQ2\n"); 1558 1559 return ECORE_SUCCESS; 1560 } 1561 #endif 1562 1563 /* Init run time data for all PFs and their VFs on an engine. 1564 * TBD - for VFs - Once we have parent PF info for each VF in 1565 * shmem available as CAU requires knowledge of parent PF for each VF. 1566 */ 1567 static void ecore_init_cau_rt_data(struct ecore_dev *p_dev) 1568 { 1569 u32 offset = CAU_REG_SB_VAR_MEMORY_RT_OFFSET; 1570 int i, igu_sb_id; 1571 1572 for_each_hwfn(p_dev, i) { 1573 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 1574 struct ecore_igu_info *p_igu_info; 1575 struct ecore_igu_block *p_block; 1576 struct cau_sb_entry sb_entry; 1577 1578 p_igu_info = p_hwfn->hw_info.p_igu_info; 1579 1580 for (igu_sb_id = 0; 1581 igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_dev); 1582 igu_sb_id++) { 1583 p_block = &p_igu_info->entry[igu_sb_id]; 1584 1585 if (!p_block->is_pf) 1586 continue; 1587 1588 ecore_init_cau_sb_entry(p_hwfn, &sb_entry, 1589 p_block->function_id, 0, 0); 1590 STORE_RT_REG_AGG(p_hwfn, offset + igu_sb_id * 2, 1591 sb_entry); 1592 } 1593 } 1594 } 1595 1596 static void ecore_init_cache_line_size(struct ecore_hwfn *p_hwfn, 1597 struct ecore_ptt *p_ptt) 1598 { 1599 u32 val, wr_mbs, cache_line_size; 1600 1601 val = ecore_rd(p_hwfn, p_ptt, PSWRQ2_REG_WR_MBS0); 1602 switch (val) { 1603 case 0: 1604 wr_mbs = 128; 1605 break; 1606 case 1: 1607 wr_mbs = 256; 1608 break; 1609 case 2: 1610 wr_mbs = 512; 1611 break; 1612 default: 1613 DP_INFO(p_hwfn, 1614 "Unexpected value of PSWRQ2_REG_WR_MBS0 [0x%x]. Avoid configuring PGLUE_B_REG_CACHE_LINE_SIZE.\n", 1615 val); 1616 return; 1617 } 1618 1619 cache_line_size = OSAL_MIN_T(u32, OSAL_CACHE_LINE_SIZE, wr_mbs); 1620 switch (cache_line_size) { 1621 case 32: 1622 val = 0; 1623 break; 1624 case 64: 1625 val = 1; 1626 break; 1627 case 128: 1628 val = 2; 1629 break; 1630 case 256: 1631 val = 3; 1632 break; 1633 default: 1634 DP_INFO(p_hwfn, 1635 "Unexpected value of cache line size [0x%x]. Avoid configuring PGLUE_B_REG_CACHE_LINE_SIZE.\n", 1636 cache_line_size); 1637 } 1638 1639 if (wr_mbs < OSAL_CACHE_LINE_SIZE) 1640 DP_INFO(p_hwfn, 1641 "The cache line size for padding is suboptimal for performance [OS cache line size 0x%x, wr mbs 0x%x]\n", 1642 OSAL_CACHE_LINE_SIZE, wr_mbs); 1643 1644 STORE_RT_REG(p_hwfn, PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET, val); 1645 if (val > 0) { 1646 STORE_RT_REG(p_hwfn, PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET, val); 1647 STORE_RT_REG(p_hwfn, PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET, val); 1648 } 1649 } 1650 1651 static enum _ecore_status_t ecore_hw_init_common(struct ecore_hwfn *p_hwfn, 1652 struct ecore_ptt *p_ptt, 1653 int hw_mode) 1654 { 1655 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 1656 struct ecore_dev *p_dev = p_hwfn->p_dev; 1657 u8 vf_id, max_num_vfs; 1658 u16 num_pfs, pf_id; 1659 u32 concrete_fid; 1660 enum _ecore_status_t rc = ECORE_SUCCESS; 1661 1662 ecore_init_cau_rt_data(p_dev); 1663 1664 /* Program GTT windows */ 1665 ecore_gtt_init(p_hwfn, p_ptt); 1666 1667 #ifndef ASIC_ONLY 1668 if (CHIP_REV_IS_EMUL(p_dev)) { 1669 rc = ecore_hw_init_chip(p_hwfn, p_ptt); 1670 if (rc != ECORE_SUCCESS) 1671 return rc; 1672 } 1673 #endif 1674 1675 if (p_hwfn->mcp_info) { 1676 if (p_hwfn->mcp_info->func_info.bandwidth_max) 1677 qm_info->pf_rl_en = 1; 1678 if (p_hwfn->mcp_info->func_info.bandwidth_min) 1679 qm_info->pf_wfq_en = 1; 1680 } 1681 1682 ecore_qm_common_rt_init(p_hwfn, 1683 p_dev->num_ports_in_engine, 1684 qm_info->max_phys_tcs_per_port, 1685 qm_info->pf_rl_en, qm_info->pf_wfq_en, 1686 qm_info->vport_rl_en, qm_info->vport_wfq_en, 1687 qm_info->qm_port_params); 1688 1689 ecore_cxt_hw_init_common(p_hwfn); 1690 1691 ecore_init_cache_line_size(p_hwfn, p_ptt); 1692 1693 rc = ecore_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ECORE_PATH_ID(p_hwfn), 1694 hw_mode); 1695 if (rc != ECORE_SUCCESS) 1696 return rc; 1697 1698 /* @@TBD MichalK - should add VALIDATE_VFID to init tool... 1699 * need to decide with which value, maybe runtime 1700 */ 1701 ecore_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0); 1702 ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1); 1703 1704 if (ECORE_IS_BB(p_dev)) { 1705 /* Workaround clears ROCE search for all functions to prevent 1706 * involving non initialized function in processing ROCE packet. 1707 */ 1708 num_pfs = NUM_OF_ENG_PFS(p_dev); 1709 for (pf_id = 0; pf_id < num_pfs; pf_id++) { 1710 ecore_fid_pretend(p_hwfn, p_ptt, pf_id); 1711 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0); 1712 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0); 1713 } 1714 /* pretend to original PF */ 1715 ecore_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id); 1716 } 1717 1718 /* Workaround for avoiding CCFC execution error when getting packets 1719 * with CRC errors, and allowing instead the invoking of the FW error 1720 * handler. 1721 * This is not done inside the init tool since it currently can't 1722 * perform a pretending to VFs. 1723 */ 1724 max_num_vfs = ECORE_IS_AH(p_dev) ? MAX_NUM_VFS_K2 : MAX_NUM_VFS_BB; 1725 for (vf_id = 0; vf_id < max_num_vfs; vf_id++) { 1726 concrete_fid = ecore_vfid_to_concrete(p_hwfn, vf_id); 1727 ecore_fid_pretend(p_hwfn, p_ptt, (u16)concrete_fid); 1728 ecore_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1); 1729 ecore_wr(p_hwfn, p_ptt, CCFC_REG_WEAK_ENABLE_VF, 0x0); 1730 ecore_wr(p_hwfn, p_ptt, TCFC_REG_STRONG_ENABLE_VF, 0x1); 1731 ecore_wr(p_hwfn, p_ptt, TCFC_REG_WEAK_ENABLE_VF, 0x0); 1732 } 1733 /* pretend to original PF */ 1734 ecore_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id); 1735 1736 return rc; 1737 } 1738 1739 #ifndef ASIC_ONLY 1740 #define MISC_REG_RESET_REG_2_XMAC_BIT (1 << 4) 1741 #define MISC_REG_RESET_REG_2_XMAC_SOFT_BIT (1 << 5) 1742 1743 #define PMEG_IF_BYTE_COUNT 8 1744 1745 static void ecore_wr_nw_port(struct ecore_hwfn *p_hwfn, 1746 struct ecore_ptt *p_ptt, 1747 u32 addr, u64 data, u8 reg_type, u8 port) 1748 { 1749 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 1750 "CMD: %08x, ADDR: 0x%08x, DATA: %08x:%08x\n", 1751 ecore_rd(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB) | 1752 (8 << PMEG_IF_BYTE_COUNT), 1753 (reg_type << 25) | (addr << 8) | port, 1754 (u32)((data >> 32) & 0xffffffff), 1755 (u32)(data & 0xffffffff)); 1756 1757 ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB, 1758 (ecore_rd(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB) & 1759 0xffff00fe) | (8 << PMEG_IF_BYTE_COUNT)); 1760 ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_ADDR_BB, 1761 (reg_type << 25) | (addr << 8) | port); 1762 ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_WRDATA_BB, data & 0xffffffff); 1763 ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_WRDATA_BB, 1764 (data >> 32) & 0xffffffff); 1765 } 1766 1767 #define XLPORT_MODE_REG (0x20a) 1768 #define XLPORT_MAC_CONTROL (0x210) 1769 #define XLPORT_FLOW_CONTROL_CONFIG (0x207) 1770 #define XLPORT_ENABLE_REG (0x20b) 1771 1772 #define XLMAC_CTRL (0x600) 1773 #define XLMAC_MODE (0x601) 1774 #define XLMAC_RX_MAX_SIZE (0x608) 1775 #define XLMAC_TX_CTRL (0x604) 1776 #define XLMAC_PAUSE_CTRL (0x60d) 1777 #define XLMAC_PFC_CTRL (0x60e) 1778 1779 static void ecore_emul_link_init_bb(struct ecore_hwfn *p_hwfn, 1780 struct ecore_ptt *p_ptt) 1781 { 1782 u8 loopback = 0, port = p_hwfn->port_id * 2; 1783 1784 DP_INFO(p_hwfn->p_dev, "Configurating Emulation Link %02x\n", port); 1785 1786 /* XLPORT MAC MODE *//* 0 Quad, 4 Single... */ 1787 ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_MODE_REG, (0x4 << 4) | 0x4, 1, 1788 port); 1789 ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_MAC_CONTROL, 0, 1, port); 1790 /* XLMAC: SOFT RESET */ 1791 ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_CTRL, 0x40, 0, port); 1792 /* XLMAC: Port Speed >= 10Gbps */ 1793 ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_MODE, 0x40, 0, port); 1794 /* XLMAC: Max Size */ 1795 ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_RX_MAX_SIZE, 0x3fff, 0, port); 1796 ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_TX_CTRL, 1797 0x01000000800ULL | (0xa << 12) | ((u64)1 << 38), 1798 0, port); 1799 ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_PAUSE_CTRL, 0x7c000, 0, port); 1800 ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_PFC_CTRL, 1801 0x30ffffc000ULL, 0, port); 1802 ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_CTRL, 0x3 | (loopback << 2), 0, 1803 port); /* XLMAC: TX_EN, RX_EN */ 1804 /* XLMAC: TX_EN, RX_EN, SW_LINK_STATUS */ 1805 ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_CTRL, 1806 0x1003 | (loopback << 2), 0, port); 1807 /* Enabled Parallel PFC interface */ 1808 ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_FLOW_CONTROL_CONFIG, 1, 0, port); 1809 1810 /* XLPORT port enable */ 1811 ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_ENABLE_REG, 0xf, 1, port); 1812 } 1813 1814 static void ecore_emul_link_init_ah_e5(struct ecore_hwfn *p_hwfn, 1815 struct ecore_ptt *p_ptt) 1816 { 1817 u8 port = p_hwfn->port_id; 1818 u32 mac_base = NWM_REG_MAC0_K2_E5 + (port << 2) * NWM_REG_MAC0_SIZE; 1819 1820 DP_INFO(p_hwfn->p_dev, "Configurating Emulation Link %02x\n", port); 1821 1822 ecore_wr(p_hwfn, p_ptt, CNIG_REG_NIG_PORT0_CONF_K2_E5 + (port << 2), 1823 (1 << CNIG_REG_NIG_PORT0_CONF_NIG_PORT_ENABLE_0_K2_E5_SHIFT) | 1824 (port << 1825 CNIG_REG_NIG_PORT0_CONF_NIG_PORT_NWM_PORT_MAP_0_K2_E5_SHIFT) | 1826 (0 << CNIG_REG_NIG_PORT0_CONF_NIG_PORT_RATE_0_K2_E5_SHIFT)); 1827 1828 ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_XIF_MODE_K2_E5, 1829 1 << ETH_MAC_REG_XIF_MODE_XGMII_K2_E5_SHIFT); 1830 1831 ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_FRM_LENGTH_K2_E5, 1832 9018 << ETH_MAC_REG_FRM_LENGTH_FRM_LENGTH_K2_E5_SHIFT); 1833 1834 ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_TX_IPG_LENGTH_K2_E5, 1835 0xc << ETH_MAC_REG_TX_IPG_LENGTH_TXIPG_K2_E5_SHIFT); 1836 1837 ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_RX_FIFO_SECTIONS_K2_E5, 1838 8 << ETH_MAC_REG_RX_FIFO_SECTIONS_RX_SECTION_FULL_K2_E5_SHIFT); 1839 1840 ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_TX_FIFO_SECTIONS_K2_E5, 1841 (0xA << 1842 ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_EMPTY_K2_E5_SHIFT) | 1843 (8 << 1844 ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_FULL_K2_E5_SHIFT)); 1845 1846 ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_COMMAND_CONFIG_K2_E5, 1847 0xa853); 1848 } 1849 1850 static void ecore_emul_link_init(struct ecore_hwfn *p_hwfn, 1851 struct ecore_ptt *p_ptt) 1852 { 1853 if (ECORE_IS_AH(p_hwfn->p_dev)) 1854 ecore_emul_link_init_ah_e5(p_hwfn, p_ptt); 1855 else /* BB */ 1856 ecore_emul_link_init_bb(p_hwfn, p_ptt); 1857 } 1858 1859 static void ecore_link_init_bb(struct ecore_hwfn *p_hwfn, 1860 struct ecore_ptt *p_ptt, u8 port) 1861 { 1862 int port_offset = port ? 0x800 : 0; 1863 u32 xmac_rxctrl = 0; 1864 1865 /* Reset of XMAC */ 1866 /* FIXME: move to common start */ 1867 ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + 2 * sizeof(u32), 1868 MISC_REG_RESET_REG_2_XMAC_BIT); /* Clear */ 1869 OSAL_MSLEEP(1); 1870 ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + sizeof(u32), 1871 MISC_REG_RESET_REG_2_XMAC_BIT); /* Set */ 1872 1873 ecore_wr(p_hwfn, p_ptt, MISC_REG_XMAC_CORE_PORT_MODE_BB, 1); 1874 1875 /* Set the number of ports on the Warp Core to 10G */ 1876 ecore_wr(p_hwfn, p_ptt, MISC_REG_XMAC_PHY_PORT_MODE_BB, 3); 1877 1878 /* Soft reset of XMAC */ 1879 ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + 2 * sizeof(u32), 1880 MISC_REG_RESET_REG_2_XMAC_SOFT_BIT); 1881 OSAL_MSLEEP(1); 1882 ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + sizeof(u32), 1883 MISC_REG_RESET_REG_2_XMAC_SOFT_BIT); 1884 1885 /* FIXME: move to common end */ 1886 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) 1887 ecore_wr(p_hwfn, p_ptt, XMAC_REG_MODE_BB + port_offset, 0x20); 1888 1889 /* Set Max packet size: initialize XMAC block register for port 0 */ 1890 ecore_wr(p_hwfn, p_ptt, XMAC_REG_RX_MAX_SIZE_BB + port_offset, 0x2710); 1891 1892 /* CRC append for Tx packets: init XMAC block register for port 1 */ 1893 ecore_wr(p_hwfn, p_ptt, XMAC_REG_TX_CTRL_LO_BB + port_offset, 0xC800); 1894 1895 /* Enable TX and RX: initialize XMAC block register for port 1 */ 1896 ecore_wr(p_hwfn, p_ptt, XMAC_REG_CTRL_BB + port_offset, 1897 XMAC_REG_CTRL_TX_EN_BB | XMAC_REG_CTRL_RX_EN_BB); 1898 xmac_rxctrl = ecore_rd(p_hwfn, p_ptt, 1899 XMAC_REG_RX_CTRL_BB + port_offset); 1900 xmac_rxctrl |= XMAC_REG_RX_CTRL_PROCESS_VARIABLE_PREAMBLE_BB; 1901 ecore_wr(p_hwfn, p_ptt, XMAC_REG_RX_CTRL_BB + port_offset, xmac_rxctrl); 1902 } 1903 #endif 1904 1905 static enum _ecore_status_t 1906 ecore_hw_init_dpi_size(struct ecore_hwfn *p_hwfn, 1907 struct ecore_ptt *p_ptt, u32 pwm_region_size, u32 n_cpus) 1908 { 1909 u32 dpi_bit_shift, dpi_count, dpi_page_size; 1910 u32 min_dpis; 1911 u32 n_wids; 1912 1913 /* Calculate DPI size 1914 * ------------------ 1915 * The PWM region contains Doorbell Pages. The first is reserverd for 1916 * the kernel for, e.g, L2. The others are free to be used by non- 1917 * trusted applications, typically from user space. Each page, called a 1918 * doorbell page is sectioned into windows that allow doorbells to be 1919 * issued in parallel by the kernel/application. The size of such a 1920 * window (a.k.a. WID) is 1kB. 1921 * Summary: 1922 * 1kB WID x N WIDS = DPI page size 1923 * DPI page size x N DPIs = PWM region size 1924 * Notes: 1925 * The size of the DPI page size must be in multiples of OSAL_PAGE_SIZE 1926 * in order to ensure that two applications won't share the same page. 1927 * It also must contain at least one WID per CPU to allow parallelism. 1928 * It also must be a power of 2, since it is stored as a bit shift. 1929 * 1930 * The DPI page size is stored in a register as 'dpi_bit_shift' so that 1931 * 0 is 4kB, 1 is 8kB and etc. Hence the minimum size is 4,096 1932 * containing 4 WIDs. 1933 */ 1934 n_wids = OSAL_MAX_T(u32, ECORE_MIN_WIDS, n_cpus); 1935 dpi_page_size = ECORE_WID_SIZE * OSAL_ROUNDUP_POW_OF_TWO(n_wids); 1936 dpi_page_size = (dpi_page_size + OSAL_PAGE_SIZE - 1) & 1937 ~(OSAL_PAGE_SIZE - 1); 1938 dpi_bit_shift = OSAL_LOG2(dpi_page_size / 4096); 1939 dpi_count = pwm_region_size / dpi_page_size; 1940 1941 min_dpis = p_hwfn->pf_params.rdma_pf_params.min_dpis; 1942 min_dpis = OSAL_MAX_T(u32, ECORE_MIN_DPIS, min_dpis); 1943 1944 /* Update hwfn */ 1945 p_hwfn->dpi_size = dpi_page_size; 1946 p_hwfn->dpi_count = dpi_count; 1947 1948 /* Update registers */ 1949 ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPI_BIT_SHIFT, dpi_bit_shift); 1950 1951 if (dpi_count < min_dpis) 1952 return ECORE_NORESOURCES; 1953 1954 return ECORE_SUCCESS; 1955 } 1956 1957 enum ECORE_ROCE_EDPM_MODE { 1958 ECORE_ROCE_EDPM_MODE_ENABLE = 0, 1959 ECORE_ROCE_EDPM_MODE_FORCE_ON = 1, 1960 ECORE_ROCE_EDPM_MODE_DISABLE = 2, 1961 }; 1962 1963 static enum _ecore_status_t 1964 ecore_hw_init_pf_doorbell_bar(struct ecore_hwfn *p_hwfn, 1965 struct ecore_ptt *p_ptt) 1966 { 1967 u32 pwm_regsize, norm_regsize; 1968 u32 non_pwm_conn, min_addr_reg1; 1969 u32 db_bar_size, n_cpus; 1970 u32 roce_edpm_mode; 1971 u32 pf_dems_shift; 1972 enum _ecore_status_t rc = ECORE_SUCCESS; 1973 u8 cond; 1974 1975 db_bar_size = ecore_hw_bar_size(p_hwfn, p_ptt, BAR_ID_1); 1976 if (ECORE_IS_CMT(p_hwfn->p_dev)) 1977 db_bar_size /= 2; 1978 1979 /* Calculate doorbell regions 1980 * ----------------------------------- 1981 * The doorbell BAR is made of two regions. The first is called normal 1982 * region and the second is called PWM region. In the normal region 1983 * each ICID has its own set of addresses so that writing to that 1984 * specific address identifies the ICID. In the Process Window Mode 1985 * region the ICID is given in the data written to the doorbell. The 1986 * above per PF register denotes the offset in the doorbell BAR in which 1987 * the PWM region begins. 1988 * The normal region has ECORE_PF_DEMS_SIZE bytes per ICID, that is per 1989 * non-PWM connection. The calculation below computes the total non-PWM 1990 * connections. The DORQ_REG_PF_MIN_ADDR_REG1 register is 1991 * in units of 4,096 bytes. 1992 */ 1993 non_pwm_conn = ecore_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_CORE) + 1994 ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_CORE, 1995 OSAL_NULL) + 1996 ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, OSAL_NULL); 1997 norm_regsize = ROUNDUP(ECORE_PF_DEMS_SIZE * non_pwm_conn, 1998 OSAL_PAGE_SIZE); 1999 min_addr_reg1 = norm_regsize / 4096; 2000 pwm_regsize = db_bar_size - norm_regsize; 2001 2002 /* Check that the normal and PWM sizes are valid */ 2003 if (db_bar_size < norm_regsize) { 2004 DP_ERR(p_hwfn->p_dev, 2005 "Doorbell BAR size 0x%x is too small (normal region is 0x%0x )\n", 2006 db_bar_size, norm_regsize); 2007 return ECORE_NORESOURCES; 2008 } 2009 if (pwm_regsize < ECORE_MIN_PWM_REGION) { 2010 DP_ERR(p_hwfn->p_dev, 2011 "PWM region size 0x%0x is too small. Should be at least 0x%0x (Doorbell BAR size is 0x%x and normal region size is 0x%0x)\n", 2012 pwm_regsize, ECORE_MIN_PWM_REGION, db_bar_size, 2013 norm_regsize); 2014 return ECORE_NORESOURCES; 2015 } 2016 2017 /* Calculate number of DPIs */ 2018 roce_edpm_mode = p_hwfn->pf_params.rdma_pf_params.roce_edpm_mode; 2019 if ((roce_edpm_mode == ECORE_ROCE_EDPM_MODE_ENABLE) || 2020 ((roce_edpm_mode == ECORE_ROCE_EDPM_MODE_FORCE_ON))) { 2021 /* Either EDPM is mandatory, or we are attempting to allocate a 2022 * WID per CPU. 2023 */ 2024 n_cpus = OSAL_NUM_CPUS(); 2025 rc = ecore_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus); 2026 } 2027 2028 cond = ((rc != ECORE_SUCCESS) && 2029 (roce_edpm_mode == ECORE_ROCE_EDPM_MODE_ENABLE)) || 2030 (roce_edpm_mode == ECORE_ROCE_EDPM_MODE_DISABLE); 2031 if (cond || p_hwfn->dcbx_no_edpm) { 2032 /* Either EDPM is disabled from user configuration, or it is 2033 * disabled via DCBx, or it is not mandatory and we failed to 2034 * allocated a WID per CPU. 2035 */ 2036 n_cpus = 1; 2037 rc = ecore_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus); 2038 2039 /* If we entered this flow due to DCBX then the DPM register is 2040 * already configured. 2041 */ 2042 } 2043 2044 DP_INFO(p_hwfn, 2045 "doorbell bar: normal_region_size=%d, pwm_region_size=%d", 2046 norm_regsize, pwm_regsize); 2047 DP_INFO(p_hwfn, 2048 " dpi_size=%d, dpi_count=%d, roce_edpm=%s\n", 2049 p_hwfn->dpi_size, p_hwfn->dpi_count, 2050 ((p_hwfn->dcbx_no_edpm) || (p_hwfn->db_bar_no_edpm)) ? 2051 "disabled" : "enabled"); 2052 2053 /* Check return codes from above calls */ 2054 if (rc != ECORE_SUCCESS) { 2055 DP_ERR(p_hwfn, 2056 "Failed to allocate enough DPIs\n"); 2057 return ECORE_NORESOURCES; 2058 } 2059 2060 /* Update hwfn */ 2061 p_hwfn->dpi_start_offset = norm_regsize; 2062 2063 /* Update registers */ 2064 /* DEMS size is configured log2 of DWORDs, hence the division by 4 */ 2065 pf_dems_shift = OSAL_LOG2(ECORE_PF_DEMS_SIZE / 4); 2066 ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_ICID_BIT_SHIFT_NORM, pf_dems_shift); 2067 ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_MIN_ADDR_REG1, min_addr_reg1); 2068 2069 return ECORE_SUCCESS; 2070 } 2071 2072 static enum _ecore_status_t ecore_hw_init_port(struct ecore_hwfn *p_hwfn, 2073 struct ecore_ptt *p_ptt, 2074 int hw_mode) 2075 { 2076 u32 ppf_to_eng_sel[NIG_REG_PPF_TO_ENGINE_SEL_RT_SIZE]; 2077 u32 val; 2078 enum _ecore_status_t rc = ECORE_SUCCESS; 2079 u8 i; 2080 2081 /* In CMT for non-RoCE packets - use connection based classification */ 2082 val = ECORE_IS_CMT(p_hwfn->p_dev) ? 0x8 : 0x0; 2083 for (i = 0; i < NIG_REG_PPF_TO_ENGINE_SEL_RT_SIZE; i++) 2084 ppf_to_eng_sel[i] = val; 2085 STORE_RT_REG_AGG(p_hwfn, NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET, 2086 ppf_to_eng_sel); 2087 2088 /* In CMT the gate should be cleared by the 2nd hwfn */ 2089 if (!ECORE_IS_CMT(p_hwfn->p_dev) || !IS_LEAD_HWFN(p_hwfn)) 2090 STORE_RT_REG(p_hwfn, NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET, 0); 2091 2092 rc = ecore_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id, 2093 hw_mode); 2094 if (rc != ECORE_SUCCESS) 2095 return rc; 2096 2097 ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_WRITE_PAD_ENABLE, 0); 2098 2099 #ifndef ASIC_ONLY 2100 if (CHIP_REV_IS_ASIC(p_hwfn->p_dev)) 2101 return ECORE_SUCCESS; 2102 2103 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) { 2104 if (ECORE_IS_AH(p_hwfn->p_dev)) 2105 return ECORE_SUCCESS; 2106 else if (ECORE_IS_BB(p_hwfn->p_dev)) 2107 ecore_link_init_bb(p_hwfn, p_ptt, p_hwfn->port_id); 2108 } else if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) { 2109 if (ECORE_IS_CMT(p_hwfn->p_dev)) { 2110 /* Activate OPTE in CMT */ 2111 u32 val; 2112 2113 val = ecore_rd(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV); 2114 val |= 0x10; 2115 ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV, val); 2116 ecore_wr(p_hwfn, p_ptt, MISC_REG_CLK_100G_MODE, 1); 2117 ecore_wr(p_hwfn, p_ptt, MISCS_REG_CLK_100G_MODE, 1); 2118 ecore_wr(p_hwfn, p_ptt, MISC_REG_OPTE_MODE, 1); 2119 ecore_wr(p_hwfn, p_ptt, 2120 NIG_REG_LLH_ENG_CLS_TCP_4_TUPLE_SEARCH, 1); 2121 ecore_wr(p_hwfn, p_ptt, 2122 NIG_REG_LLH_ENG_CLS_ENG_ID_TBL, 0x55555555); 2123 ecore_wr(p_hwfn, p_ptt, 2124 NIG_REG_LLH_ENG_CLS_ENG_ID_TBL + 0x4, 2125 0x55555555); 2126 } 2127 2128 ecore_emul_link_init(p_hwfn, p_ptt); 2129 } else { 2130 DP_INFO(p_hwfn->p_dev, "link is not being configured\n"); 2131 } 2132 #endif 2133 2134 return rc; 2135 } 2136 2137 static enum _ecore_status_t 2138 ecore_hw_init_pf(struct ecore_hwfn *p_hwfn, 2139 struct ecore_ptt *p_ptt, 2140 struct ecore_tunnel_info *p_tunn, 2141 int hw_mode, 2142 bool b_hw_start, 2143 enum ecore_int_mode int_mode, bool allow_npar_tx_switch) 2144 { 2145 u8 rel_pf_id = p_hwfn->rel_pf_id; 2146 u32 prs_reg; 2147 enum _ecore_status_t rc = ECORE_SUCCESS; 2148 u16 ctrl; 2149 int pos; 2150 2151 if (p_hwfn->mcp_info) { 2152 struct ecore_mcp_function_info *p_info; 2153 2154 p_info = &p_hwfn->mcp_info->func_info; 2155 if (p_info->bandwidth_min) 2156 p_hwfn->qm_info.pf_wfq = p_info->bandwidth_min; 2157 2158 /* Update rate limit once we'll actually have a link */ 2159 p_hwfn->qm_info.pf_rl = 100000; 2160 } 2161 ecore_cxt_hw_init_pf(p_hwfn, p_ptt); 2162 2163 ecore_int_igu_init_rt(p_hwfn); 2164 2165 /* Set VLAN in NIG if needed */ 2166 if (hw_mode & (1 << MODE_MF_SD)) { 2167 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "Configuring LLH_FUNC_TAG\n"); 2168 STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1); 2169 STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET, 2170 p_hwfn->hw_info.ovlan); 2171 2172 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 2173 "Configuring LLH_FUNC_FILTER_HDR_SEL\n"); 2174 STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET, 2175 1); 2176 } 2177 2178 /* Enable classification by MAC if needed */ 2179 if (hw_mode & (1 << MODE_MF_SI)) { 2180 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 2181 "Configuring TAGMAC_CLS_TYPE\n"); 2182 STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET, 2183 1); 2184 } 2185 2186 /* Protocl Configuration - @@@TBD - should we set 0 otherwise? */ 2187 STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET, 2188 (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) ? 1 : 0); 2189 STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET, 2190 (p_hwfn->hw_info.personality == ECORE_PCI_FCOE) ? 1 : 0); 2191 STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0); 2192 2193 /* perform debug configuration when chip is out of reset */ 2194 OSAL_BEFORE_PF_START((void *)p_hwfn->p_dev, p_hwfn->my_id); 2195 2196 /* Sanity check before the PF init sequence that uses DMAE */ 2197 rc = ecore_dmae_sanity(p_hwfn, p_ptt, "pf_phase"); 2198 if (rc) 2199 return rc; 2200 2201 /* PF Init sequence */ 2202 rc = ecore_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode); 2203 if (rc) 2204 return rc; 2205 2206 /* QM_PF Init sequence (may be invoked separately e.g. for DCB) */ 2207 rc = ecore_init_run(p_hwfn, p_ptt, PHASE_QM_PF, rel_pf_id, hw_mode); 2208 if (rc) 2209 return rc; 2210 2211 /* Pure runtime initializations - directly to the HW */ 2212 ecore_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true); 2213 2214 /* PCI relaxed ordering causes a decrease in the performance on some 2215 * systems. Till a root cause is found, disable this attribute in the 2216 * PCI config space. 2217 */ 2218 /* Not in use @DPDK 2219 * pos = OSAL_PCI_FIND_CAPABILITY(p_hwfn->p_dev, PCI_CAP_ID_EXP); 2220 * if (!pos) { 2221 * DP_NOTICE(p_hwfn, true, 2222 * "Failed to find the PCIe Cap\n"); 2223 * return ECORE_IO; 2224 * } 2225 * OSAL_PCI_READ_CONFIG_WORD(p_hwfn->p_dev, pos + PCI_EXP_DEVCTL, &ctrl); 2226 * ctrl &= ~PCI_EXP_DEVCTL_RELAX_EN; 2227 * OSAL_PCI_WRITE_CONFIG_WORD(p_hwfn->p_dev, pos + PCI_EXP_DEVCTL, ctrl); 2228 */ 2229 2230 rc = ecore_hw_init_pf_doorbell_bar(p_hwfn, p_ptt); 2231 if (rc) 2232 return rc; 2233 if (b_hw_start) { 2234 /* enable interrupts */ 2235 rc = ecore_int_igu_enable(p_hwfn, p_ptt, int_mode); 2236 if (rc != ECORE_SUCCESS) 2237 return rc; 2238 2239 /* send function start command */ 2240 rc = ecore_sp_pf_start(p_hwfn, p_ptt, p_tunn, 2241 allow_npar_tx_switch); 2242 if (rc) { 2243 DP_NOTICE(p_hwfn, true, 2244 "Function start ramrod failed\n"); 2245 } else { 2246 return rc; 2247 } 2248 prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1); 2249 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, 2250 "PRS_REG_SEARCH_TAG1: %x\n", prs_reg); 2251 2252 if (p_hwfn->hw_info.personality == ECORE_PCI_FCOE) { 2253 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1, 2254 (1 << 2)); 2255 ecore_wr(p_hwfn, p_ptt, 2256 PRS_REG_PKT_LEN_STAT_TAGS_NOT_COUNTED_FIRST, 2257 0x100); 2258 } 2259 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, 2260 "PRS_REG_SEARCH registers after start PFn\n"); 2261 prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP); 2262 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, 2263 "PRS_REG_SEARCH_TCP: %x\n", prs_reg); 2264 prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP); 2265 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, 2266 "PRS_REG_SEARCH_UDP: %x\n", prs_reg); 2267 prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE); 2268 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, 2269 "PRS_REG_SEARCH_FCOE: %x\n", prs_reg); 2270 prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE); 2271 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, 2272 "PRS_REG_SEARCH_ROCE: %x\n", prs_reg); 2273 prs_reg = ecore_rd(p_hwfn, p_ptt, 2274 PRS_REG_SEARCH_TCP_FIRST_FRAG); 2275 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, 2276 "PRS_REG_SEARCH_TCP_FIRST_FRAG: %x\n", 2277 prs_reg); 2278 prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1); 2279 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, 2280 "PRS_REG_SEARCH_TAG1: %x\n", prs_reg); 2281 } 2282 return ECORE_SUCCESS; 2283 } 2284 2285 enum _ecore_status_t ecore_pglueb_set_pfid_enable(struct ecore_hwfn *p_hwfn, 2286 struct ecore_ptt *p_ptt, 2287 bool b_enable) 2288 { 2289 u32 delay_idx = 0, val, set_val = b_enable ? 1 : 0; 2290 2291 /* Configure the PF's internal FID_enable for master transactions */ 2292 ecore_wr(p_hwfn, p_ptt, 2293 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, set_val); 2294 2295 /* Wait until value is set - try for 1 second every 50us */ 2296 for (delay_idx = 0; delay_idx < 20000; delay_idx++) { 2297 val = ecore_rd(p_hwfn, p_ptt, 2298 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER); 2299 if (val == set_val) 2300 break; 2301 2302 OSAL_UDELAY(50); 2303 } 2304 2305 if (val != set_val) { 2306 DP_NOTICE(p_hwfn, true, 2307 "PFID_ENABLE_MASTER wasn't changed after a second\n"); 2308 return ECORE_UNKNOWN_ERROR; 2309 } 2310 2311 return ECORE_SUCCESS; 2312 } 2313 2314 static void ecore_reset_mb_shadow(struct ecore_hwfn *p_hwfn, 2315 struct ecore_ptt *p_main_ptt) 2316 { 2317 /* Read shadow of current MFW mailbox */ 2318 ecore_mcp_read_mb(p_hwfn, p_main_ptt); 2319 OSAL_MEMCPY(p_hwfn->mcp_info->mfw_mb_shadow, 2320 p_hwfn->mcp_info->mfw_mb_cur, 2321 p_hwfn->mcp_info->mfw_mb_length); 2322 } 2323 2324 static void ecore_pglueb_clear_err(struct ecore_hwfn *p_hwfn, 2325 struct ecore_ptt *p_ptt) 2326 { 2327 ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR, 2328 1 << p_hwfn->abs_pf_id); 2329 } 2330 2331 static enum _ecore_status_t 2332 ecore_fill_load_req_params(struct ecore_hwfn *p_hwfn, 2333 struct ecore_load_req_params *p_load_req, 2334 struct ecore_drv_load_params *p_drv_load) 2335 { 2336 /* Make sure that if ecore-client didn't provide inputs, all the 2337 * expected defaults are indeed zero. 2338 */ 2339 OSAL_BUILD_BUG_ON(ECORE_DRV_ROLE_OS != 0); 2340 OSAL_BUILD_BUG_ON(ECORE_LOAD_REQ_LOCK_TO_DEFAULT != 0); 2341 OSAL_BUILD_BUG_ON(ECORE_OVERRIDE_FORCE_LOAD_NONE != 0); 2342 2343 OSAL_MEM_ZERO(p_load_req, sizeof(*p_load_req)); 2344 2345 if (p_drv_load == OSAL_NULL) 2346 goto out; 2347 2348 p_load_req->drv_role = p_drv_load->is_crash_kernel ? 2349 ECORE_DRV_ROLE_KDUMP : 2350 ECORE_DRV_ROLE_OS; 2351 p_load_req->avoid_eng_reset = p_drv_load->avoid_eng_reset; 2352 p_load_req->override_force_load = p_drv_load->override_force_load; 2353 2354 /* Old MFW versions don't support timeout values other than default and 2355 * none, so these values are replaced according to the fall-back action. 2356 */ 2357 2358 if (p_drv_load->mfw_timeout_val == ECORE_LOAD_REQ_LOCK_TO_DEFAULT || 2359 p_drv_load->mfw_timeout_val == ECORE_LOAD_REQ_LOCK_TO_NONE || 2360 (p_hwfn->mcp_info->capabilities & 2361 FW_MB_PARAM_FEATURE_SUPPORT_DRV_LOAD_TO)) { 2362 p_load_req->timeout_val = p_drv_load->mfw_timeout_val; 2363 goto out; 2364 } 2365 2366 switch (p_drv_load->mfw_timeout_fallback) { 2367 case ECORE_TO_FALLBACK_TO_NONE: 2368 p_load_req->timeout_val = ECORE_LOAD_REQ_LOCK_TO_NONE; 2369 break; 2370 case ECORE_TO_FALLBACK_TO_DEFAULT: 2371 p_load_req->timeout_val = ECORE_LOAD_REQ_LOCK_TO_DEFAULT; 2372 break; 2373 case ECORE_TO_FALLBACK_FAIL_LOAD: 2374 DP_NOTICE(p_hwfn, false, 2375 "Received %d as a value for MFW timeout while the MFW supports only default [%d] or none [%d]. Abort.\n", 2376 p_drv_load->mfw_timeout_val, 2377 ECORE_LOAD_REQ_LOCK_TO_DEFAULT, 2378 ECORE_LOAD_REQ_LOCK_TO_NONE); 2379 return ECORE_ABORTED; 2380 } 2381 2382 DP_INFO(p_hwfn, 2383 "Modified the MFW timeout value from %d to %s [%d] due to lack of MFW support\n", 2384 p_drv_load->mfw_timeout_val, 2385 (p_load_req->timeout_val == ECORE_LOAD_REQ_LOCK_TO_DEFAULT) ? 2386 "default" : "none", 2387 p_load_req->timeout_val); 2388 out: 2389 return ECORE_SUCCESS; 2390 } 2391 2392 enum _ecore_status_t ecore_vf_start(struct ecore_hwfn *p_hwfn, 2393 struct ecore_hw_init_params *p_params) 2394 { 2395 if (p_params->p_tunn) { 2396 ecore_vf_set_vf_start_tunn_update_param(p_params->p_tunn); 2397 ecore_vf_pf_tunnel_param_update(p_hwfn, p_params->p_tunn); 2398 } 2399 2400 p_hwfn->b_int_enabled = 1; 2401 2402 return ECORE_SUCCESS; 2403 } 2404 2405 enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev, 2406 struct ecore_hw_init_params *p_params) 2407 { 2408 struct ecore_load_req_params load_req_params; 2409 u32 load_code, resp, param, drv_mb_param; 2410 bool b_default_mtu = true; 2411 struct ecore_hwfn *p_hwfn; 2412 enum _ecore_status_t rc = ECORE_SUCCESS; 2413 int i; 2414 2415 if ((p_params->int_mode == ECORE_INT_MODE_MSI) && ECORE_IS_CMT(p_dev)) { 2416 DP_NOTICE(p_dev, false, 2417 "MSI mode is not supported for CMT devices\n"); 2418 return ECORE_INVAL; 2419 } 2420 2421 if (IS_PF(p_dev)) { 2422 rc = ecore_init_fw_data(p_dev, p_params->bin_fw_data); 2423 if (rc != ECORE_SUCCESS) 2424 return rc; 2425 } 2426 2427 for_each_hwfn(p_dev, i) { 2428 p_hwfn = &p_dev->hwfns[i]; 2429 2430 /* If management didn't provide a default, set one of our own */ 2431 if (!p_hwfn->hw_info.mtu) { 2432 p_hwfn->hw_info.mtu = 1500; 2433 b_default_mtu = false; 2434 } 2435 2436 if (IS_VF(p_dev)) { 2437 ecore_vf_start(p_hwfn, p_params); 2438 continue; 2439 } 2440 2441 rc = ecore_calc_hw_mode(p_hwfn); 2442 if (rc != ECORE_SUCCESS) 2443 return rc; 2444 2445 ecore_set_spq_block_timeout(p_hwfn, p_params->spq_timeout_ms); 2446 2447 rc = ecore_fill_load_req_params(p_hwfn, &load_req_params, 2448 p_params->p_drv_load_params); 2449 if (rc != ECORE_SUCCESS) 2450 return rc; 2451 2452 rc = ecore_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt, 2453 &load_req_params); 2454 if (rc != ECORE_SUCCESS) { 2455 DP_NOTICE(p_hwfn, false, 2456 "Failed sending a LOAD_REQ command\n"); 2457 return rc; 2458 } 2459 2460 load_code = load_req_params.load_code; 2461 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 2462 "Load request was sent. Load code: 0x%x\n", 2463 load_code); 2464 2465 ecore_mcp_set_capabilities(p_hwfn, p_hwfn->p_main_ptt); 2466 2467 /* CQ75580: 2468 * When coming back from hiberbate state, the registers from 2469 * which shadow is read initially are not initialized. It turns 2470 * out that these registers get initialized during the call to 2471 * ecore_mcp_load_req request. So we need to reread them here 2472 * to get the proper shadow register value. 2473 * Note: This is a workaround for the missing MFW 2474 * initialization. It may be removed once the implementation 2475 * is done. 2476 */ 2477 ecore_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt); 2478 2479 /* Only relevant for recovery: 2480 * Clear the indication after the LOAD_REQ command is responded 2481 * by the MFW. 2482 */ 2483 p_dev->recov_in_prog = false; 2484 2485 p_hwfn->first_on_engine = (load_code == 2486 FW_MSG_CODE_DRV_LOAD_ENGINE); 2487 2488 if (!qm_lock_ref_cnt) { 2489 #ifdef CONFIG_ECORE_LOCK_ALLOC 2490 rc = OSAL_SPIN_LOCK_ALLOC(p_hwfn, &qm_lock); 2491 if (rc) { 2492 DP_ERR(p_hwfn, "qm_lock allocation failed\n"); 2493 goto qm_lock_fail; 2494 } 2495 #endif 2496 OSAL_SPIN_LOCK_INIT(&qm_lock); 2497 } 2498 ++qm_lock_ref_cnt; 2499 2500 /* Clean up chip from previous driver if such remains exist. 2501 * This is not needed when the PF is the first one on the 2502 * engine, since afterwards we are going to init the FW. 2503 */ 2504 if (load_code != FW_MSG_CODE_DRV_LOAD_ENGINE) { 2505 rc = ecore_final_cleanup(p_hwfn, p_hwfn->p_main_ptt, 2506 p_hwfn->rel_pf_id, false); 2507 if (rc != ECORE_SUCCESS) { 2508 ecore_hw_err_notify(p_hwfn, 2509 ECORE_HW_ERR_RAMROD_FAIL); 2510 goto load_err; 2511 } 2512 } 2513 2514 /* Log and clear previous pglue_b errors if such exist */ 2515 ecore_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_main_ptt, true); 2516 2517 /* Enable the PF's internal FID_enable in the PXP */ 2518 rc = ecore_pglueb_set_pfid_enable(p_hwfn, p_hwfn->p_main_ptt, 2519 true); 2520 if (rc != ECORE_SUCCESS) 2521 goto load_err; 2522 2523 /* Clear the pglue_b was_error indication. 2524 * In E4 it must be done after the BME and the internal 2525 * FID_enable for the PF are set, since VDMs may cause the 2526 * indication to be set again. 2527 */ 2528 ecore_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt); 2529 2530 switch (load_code) { 2531 case FW_MSG_CODE_DRV_LOAD_ENGINE: 2532 rc = ecore_hw_init_common(p_hwfn, p_hwfn->p_main_ptt, 2533 p_hwfn->hw_info.hw_mode); 2534 if (rc != ECORE_SUCCESS) 2535 break; 2536 /* Fall into */ 2537 case FW_MSG_CODE_DRV_LOAD_PORT: 2538 rc = ecore_hw_init_port(p_hwfn, p_hwfn->p_main_ptt, 2539 p_hwfn->hw_info.hw_mode); 2540 if (rc != ECORE_SUCCESS) 2541 break; 2542 /* Fall into */ 2543 case FW_MSG_CODE_DRV_LOAD_FUNCTION: 2544 rc = ecore_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt, 2545 p_params->p_tunn, 2546 p_hwfn->hw_info.hw_mode, 2547 p_params->b_hw_start, 2548 p_params->int_mode, 2549 p_params->allow_npar_tx_switch); 2550 break; 2551 default: 2552 DP_NOTICE(p_hwfn, false, 2553 "Unexpected load code [0x%08x]", load_code); 2554 rc = ECORE_NOTIMPL; 2555 break; 2556 } 2557 2558 if (rc != ECORE_SUCCESS) { 2559 DP_NOTICE(p_hwfn, false, 2560 "init phase failed for loadcode 0x%x (rc %d)\n", 2561 load_code, rc); 2562 goto load_err; 2563 } 2564 2565 rc = ecore_mcp_load_done(p_hwfn, p_hwfn->p_main_ptt); 2566 if (rc != ECORE_SUCCESS) { 2567 DP_NOTICE(p_hwfn, false, 2568 "Sending load done failed, rc = %d\n", rc); 2569 if (rc == ECORE_NOMEM) { 2570 DP_NOTICE(p_hwfn, false, 2571 "Sending load done was failed due to memory allocation failure\n"); 2572 goto load_err; 2573 } 2574 return rc; 2575 } 2576 2577 /* send DCBX attention request command */ 2578 DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, 2579 "sending phony dcbx set command to trigger DCBx attention handling\n"); 2580 rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, 2581 DRV_MSG_CODE_SET_DCBX, 2582 1 << DRV_MB_PARAM_DCBX_NOTIFY_OFFSET, &resp, 2583 ¶m); 2584 if (rc != ECORE_SUCCESS) { 2585 DP_NOTICE(p_hwfn, false, 2586 "Failed to send DCBX attention request\n"); 2587 return rc; 2588 } 2589 2590 p_hwfn->hw_init_done = true; 2591 } 2592 2593 if (IS_PF(p_dev)) { 2594 /* Get pre-negotiated values for stag, bandwidth etc. */ 2595 p_hwfn = ECORE_LEADING_HWFN(p_dev); 2596 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, 2597 "Sending GET_OEM_UPDATES command to trigger stag/bandwidth attention handling\n"); 2598 rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, 2599 DRV_MSG_CODE_GET_OEM_UPDATES, 2600 1 << DRV_MB_PARAM_DUMMY_OEM_UPDATES_OFFSET, 2601 &resp, ¶m); 2602 if (rc != ECORE_SUCCESS) 2603 DP_NOTICE(p_hwfn, false, 2604 "Failed to send GET_OEM_UPDATES attention request\n"); 2605 } 2606 2607 if (IS_PF(p_dev)) { 2608 p_hwfn = ECORE_LEADING_HWFN(p_dev); 2609 drv_mb_param = STORM_FW_VERSION; 2610 rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, 2611 DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER, 2612 drv_mb_param, &resp, ¶m); 2613 if (rc != ECORE_SUCCESS) 2614 DP_INFO(p_hwfn, "Failed to update firmware version\n"); 2615 2616 if (!b_default_mtu) { 2617 rc = ecore_mcp_ov_update_mtu(p_hwfn, p_hwfn->p_main_ptt, 2618 p_hwfn->hw_info.mtu); 2619 if (rc != ECORE_SUCCESS) 2620 DP_INFO(p_hwfn, "Failed to update default mtu\n"); 2621 } 2622 2623 rc = ecore_mcp_ov_update_driver_state(p_hwfn, 2624 p_hwfn->p_main_ptt, 2625 ECORE_OV_DRIVER_STATE_DISABLED); 2626 if (rc != ECORE_SUCCESS) 2627 DP_INFO(p_hwfn, "Failed to update driver state\n"); 2628 2629 rc = ecore_mcp_ov_update_eswitch(p_hwfn, p_hwfn->p_main_ptt, 2630 ECORE_OV_ESWITCH_NONE); 2631 if (rc != ECORE_SUCCESS) 2632 DP_INFO(p_hwfn, "Failed to update eswitch mode\n"); 2633 } 2634 2635 return rc; 2636 2637 load_err: 2638 --qm_lock_ref_cnt; 2639 #ifdef CONFIG_ECORE_LOCK_ALLOC 2640 if (!qm_lock_ref_cnt) 2641 OSAL_SPIN_LOCK_DEALLOC(&qm_lock); 2642 qm_lock_fail: 2643 #endif 2644 /* The MFW load lock should be released regardless of success or failure 2645 * of initialization. 2646 * TODO: replace this with an attempt to send cancel_load. 2647 */ 2648 ecore_mcp_load_done(p_hwfn, p_hwfn->p_main_ptt); 2649 return rc; 2650 } 2651 2652 #define ECORE_HW_STOP_RETRY_LIMIT (10) 2653 static void ecore_hw_timers_stop(struct ecore_dev *p_dev, 2654 struct ecore_hwfn *p_hwfn, 2655 struct ecore_ptt *p_ptt) 2656 { 2657 int i; 2658 2659 /* close timers */ 2660 ecore_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0); 2661 ecore_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0); 2662 for (i = 0; i < ECORE_HW_STOP_RETRY_LIMIT && !p_dev->recov_in_prog; 2663 i++) { 2664 if ((!ecore_rd(p_hwfn, p_ptt, 2665 TM_REG_PF_SCAN_ACTIVE_CONN)) && 2666 (!ecore_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK))) 2667 break; 2668 2669 /* Dependent on number of connection/tasks, possibly 2670 * 1ms sleep is required between polls 2671 */ 2672 OSAL_MSLEEP(1); 2673 } 2674 2675 if (i < ECORE_HW_STOP_RETRY_LIMIT) 2676 return; 2677 2678 DP_NOTICE(p_hwfn, false, 2679 "Timers linear scans are not over [Connection %02x Tasks %02x]\n", 2680 (u8)ecore_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_CONN), 2681 (u8)ecore_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK)); 2682 } 2683 2684 void ecore_hw_timers_stop_all(struct ecore_dev *p_dev) 2685 { 2686 int j; 2687 2688 for_each_hwfn(p_dev, j) { 2689 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j]; 2690 struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt; 2691 2692 ecore_hw_timers_stop(p_dev, p_hwfn, p_ptt); 2693 } 2694 } 2695 2696 static enum _ecore_status_t ecore_verify_reg_val(struct ecore_hwfn *p_hwfn, 2697 struct ecore_ptt *p_ptt, 2698 u32 addr, u32 expected_val) 2699 { 2700 u32 val = ecore_rd(p_hwfn, p_ptt, addr); 2701 2702 if (val != expected_val) { 2703 DP_NOTICE(p_hwfn, true, 2704 "Value at address 0x%08x is 0x%08x while the expected value is 0x%08x\n", 2705 addr, val, expected_val); 2706 return ECORE_UNKNOWN_ERROR; 2707 } 2708 2709 return ECORE_SUCCESS; 2710 } 2711 2712 enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev) 2713 { 2714 struct ecore_hwfn *p_hwfn; 2715 struct ecore_ptt *p_ptt; 2716 enum _ecore_status_t rc, rc2 = ECORE_SUCCESS; 2717 int j; 2718 2719 for_each_hwfn(p_dev, j) { 2720 p_hwfn = &p_dev->hwfns[j]; 2721 p_ptt = p_hwfn->p_main_ptt; 2722 2723 DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN, "Stopping hw/fw\n"); 2724 2725 if (IS_VF(p_dev)) { 2726 ecore_vf_pf_int_cleanup(p_hwfn); 2727 rc = ecore_vf_pf_reset(p_hwfn); 2728 if (rc != ECORE_SUCCESS) { 2729 DP_NOTICE(p_hwfn, true, 2730 "ecore_vf_pf_reset failed. rc = %d.\n", 2731 rc); 2732 rc2 = ECORE_UNKNOWN_ERROR; 2733 } 2734 continue; 2735 } 2736 2737 /* mark the hw as uninitialized... */ 2738 p_hwfn->hw_init_done = false; 2739 2740 /* Send unload command to MCP */ 2741 if (!p_dev->recov_in_prog) { 2742 rc = ecore_mcp_unload_req(p_hwfn, p_ptt); 2743 if (rc != ECORE_SUCCESS) { 2744 DP_NOTICE(p_hwfn, false, 2745 "Failed sending a UNLOAD_REQ command. rc = %d.\n", 2746 rc); 2747 rc2 = ECORE_UNKNOWN_ERROR; 2748 } 2749 } 2750 2751 OSAL_DPC_SYNC(p_hwfn); 2752 2753 /* After this point no MFW attentions are expected, e.g. prevent 2754 * race between pf stop and dcbx pf update. 2755 */ 2756 2757 rc = ecore_sp_pf_stop(p_hwfn); 2758 if (rc != ECORE_SUCCESS) { 2759 DP_NOTICE(p_hwfn, false, 2760 "Failed to close PF against FW [rc = %d]. Continue to stop HW to prevent illegal host access by the device.\n", 2761 rc); 2762 rc2 = ECORE_UNKNOWN_ERROR; 2763 } 2764 2765 /* perform debug action after PF stop was sent */ 2766 OSAL_AFTER_PF_STOP((void *)p_dev, p_hwfn->my_id); 2767 2768 /* close NIG to BRB gate */ 2769 ecore_wr(p_hwfn, p_ptt, 2770 NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1); 2771 2772 /* close parser */ 2773 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0); 2774 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0); 2775 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0); 2776 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0); 2777 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0); 2778 2779 /* @@@TBD - clean transmission queues (5.b) */ 2780 /* @@@TBD - clean BTB (5.c) */ 2781 2782 ecore_hw_timers_stop(p_dev, p_hwfn, p_ptt); 2783 2784 /* @@@TBD - verify DMAE requests are done (8) */ 2785 2786 /* Disable Attention Generation */ 2787 ecore_int_igu_disable_int(p_hwfn, p_ptt); 2788 ecore_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0); 2789 ecore_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0); 2790 ecore_int_igu_init_pure_rt(p_hwfn, p_ptt, false, true); 2791 rc = ecore_int_igu_reset_cam_default(p_hwfn, p_ptt); 2792 if (rc != ECORE_SUCCESS) { 2793 DP_NOTICE(p_hwfn, true, 2794 "Failed to return IGU CAM to default\n"); 2795 rc2 = ECORE_UNKNOWN_ERROR; 2796 } 2797 2798 /* Need to wait 1ms to guarantee SBs are cleared */ 2799 OSAL_MSLEEP(1); 2800 2801 if (!p_dev->recov_in_prog) { 2802 ecore_verify_reg_val(p_hwfn, p_ptt, 2803 QM_REG_USG_CNT_PF_TX, 0); 2804 ecore_verify_reg_val(p_hwfn, p_ptt, 2805 QM_REG_USG_CNT_PF_OTHER, 0); 2806 /* @@@TBD - assert on incorrect xCFC values (10.b) */ 2807 } 2808 2809 /* Disable PF in HW blocks */ 2810 ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_DB_ENABLE, 0); 2811 ecore_wr(p_hwfn, p_ptt, QM_REG_PF_EN, 0); 2812 2813 --qm_lock_ref_cnt; 2814 #ifdef CONFIG_ECORE_LOCK_ALLOC 2815 if (!qm_lock_ref_cnt) 2816 OSAL_SPIN_LOCK_DEALLOC(&qm_lock); 2817 #endif 2818 2819 if (!p_dev->recov_in_prog) { 2820 rc = ecore_mcp_unload_done(p_hwfn, p_ptt); 2821 if (rc == ECORE_NOMEM) { 2822 DP_NOTICE(p_hwfn, false, 2823 "Failed sending an UNLOAD_DONE command due to a memory allocation failure. Resending.\n"); 2824 rc = ecore_mcp_unload_done(p_hwfn, p_ptt); 2825 } 2826 if (rc != ECORE_SUCCESS) { 2827 DP_NOTICE(p_hwfn, false, 2828 "Failed sending a UNLOAD_DONE command. rc = %d.\n", 2829 rc); 2830 rc2 = ECORE_UNKNOWN_ERROR; 2831 } 2832 } 2833 } /* hwfn loop */ 2834 2835 if (IS_PF(p_dev) && !p_dev->recov_in_prog) { 2836 p_hwfn = ECORE_LEADING_HWFN(p_dev); 2837 p_ptt = ECORE_LEADING_HWFN(p_dev)->p_main_ptt; 2838 2839 /* Clear the PF's internal FID_enable in the PXP. 2840 * In CMT this should only be done for first hw-function, and 2841 * only after all transactions have stopped for all active 2842 * hw-functions. 2843 */ 2844 rc = ecore_pglueb_set_pfid_enable(p_hwfn, p_hwfn->p_main_ptt, 2845 false); 2846 if (rc != ECORE_SUCCESS) { 2847 DP_NOTICE(p_hwfn, true, 2848 "ecore_pglueb_set_pfid_enable() failed. rc = %d.\n", 2849 rc); 2850 rc2 = ECORE_UNKNOWN_ERROR; 2851 } 2852 } 2853 2854 return rc2; 2855 } 2856 2857 enum _ecore_status_t ecore_hw_stop_fastpath(struct ecore_dev *p_dev) 2858 { 2859 int j; 2860 2861 for_each_hwfn(p_dev, j) { 2862 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j]; 2863 struct ecore_ptt *p_ptt; 2864 2865 if (IS_VF(p_dev)) { 2866 ecore_vf_pf_int_cleanup(p_hwfn); 2867 continue; 2868 } 2869 p_ptt = ecore_ptt_acquire(p_hwfn); 2870 if (!p_ptt) 2871 return ECORE_AGAIN; 2872 2873 DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN, 2874 "Shutting down the fastpath\n"); 2875 2876 ecore_wr(p_hwfn, p_ptt, 2877 NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1); 2878 2879 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0); 2880 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0); 2881 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0); 2882 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0); 2883 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0); 2884 2885 /* @@@TBD - clean transmission queues (5.b) */ 2886 /* @@@TBD - clean BTB (5.c) */ 2887 2888 /* @@@TBD - verify DMAE requests are done (8) */ 2889 2890 ecore_int_igu_init_pure_rt(p_hwfn, p_ptt, false, false); 2891 /* Need to wait 1ms to guarantee SBs are cleared */ 2892 OSAL_MSLEEP(1); 2893 ecore_ptt_release(p_hwfn, p_ptt); 2894 } 2895 2896 return ECORE_SUCCESS; 2897 } 2898 2899 enum _ecore_status_t ecore_hw_start_fastpath(struct ecore_hwfn *p_hwfn) 2900 { 2901 struct ecore_ptt *p_ptt; 2902 2903 if (IS_VF(p_hwfn->p_dev)) 2904 return ECORE_SUCCESS; 2905 2906 p_ptt = ecore_ptt_acquire(p_hwfn); 2907 if (!p_ptt) 2908 return ECORE_AGAIN; 2909 2910 /* If roce info is allocated it means roce is initialized and should 2911 * be enabled in searcher. 2912 */ 2913 if (p_hwfn->p_rdma_info) { 2914 if (p_hwfn->b_rdma_enabled_in_prs) 2915 ecore_wr(p_hwfn, p_ptt, 2916 p_hwfn->rdma_prs_search_reg, 0x1); 2917 ecore_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x1); 2918 } 2919 2920 /* Re-open incoming traffic */ 2921 ecore_wr(p_hwfn, p_ptt, 2922 NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0); 2923 ecore_ptt_release(p_hwfn, p_ptt); 2924 2925 return ECORE_SUCCESS; 2926 } 2927 2928 /* Free hwfn memory and resources acquired in hw_hwfn_prepare */ 2929 static void ecore_hw_hwfn_free(struct ecore_hwfn *p_hwfn) 2930 { 2931 ecore_ptt_pool_free(p_hwfn); 2932 OSAL_FREE(p_hwfn->p_dev, p_hwfn->hw_info.p_igu_info); 2933 } 2934 2935 /* Setup bar access */ 2936 static void ecore_hw_hwfn_prepare(struct ecore_hwfn *p_hwfn) 2937 { 2938 /* clear indirect access */ 2939 if (ECORE_IS_AH(p_hwfn->p_dev)) { 2940 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 2941 PGLUE_B_REG_PGL_ADDR_E8_F0_K2_E5, 0); 2942 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 2943 PGLUE_B_REG_PGL_ADDR_EC_F0_K2_E5, 0); 2944 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 2945 PGLUE_B_REG_PGL_ADDR_F0_F0_K2_E5, 0); 2946 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 2947 PGLUE_B_REG_PGL_ADDR_F4_F0_K2_E5, 0); 2948 } else { 2949 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 2950 PGLUE_B_REG_PGL_ADDR_88_F0_BB, 0); 2951 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 2952 PGLUE_B_REG_PGL_ADDR_8C_F0_BB, 0); 2953 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 2954 PGLUE_B_REG_PGL_ADDR_90_F0_BB, 0); 2955 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 2956 PGLUE_B_REG_PGL_ADDR_94_F0_BB, 0); 2957 } 2958 2959 /* Clean previous pglue_b errors if such exist */ 2960 ecore_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt); 2961 2962 /* enable internal target-read */ 2963 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 2964 PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); 2965 } 2966 2967 static void get_function_id(struct ecore_hwfn *p_hwfn) 2968 { 2969 /* ME Register */ 2970 p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, 2971 PXP_PF_ME_OPAQUE_ADDR); 2972 2973 p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR); 2974 2975 /* Bits 16-19 from the ME registers are the pf_num */ 2976 p_hwfn->abs_pf_id = (p_hwfn->hw_info.concrete_fid >> 16) & 0xf; 2977 p_hwfn->rel_pf_id = GET_FIELD(p_hwfn->hw_info.concrete_fid, 2978 PXP_CONCRETE_FID_PFID); 2979 p_hwfn->port_id = GET_FIELD(p_hwfn->hw_info.concrete_fid, 2980 PXP_CONCRETE_FID_PORT); 2981 2982 DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE, 2983 "Read ME register: Concrete 0x%08x Opaque 0x%04x\n", 2984 p_hwfn->hw_info.concrete_fid, p_hwfn->hw_info.opaque_fid); 2985 } 2986 2987 static void ecore_hw_set_feat(struct ecore_hwfn *p_hwfn) 2988 { 2989 u32 *feat_num = p_hwfn->hw_info.feat_num; 2990 struct ecore_sb_cnt_info sb_cnt; 2991 u32 non_l2_sbs = 0; 2992 2993 OSAL_MEM_ZERO(&sb_cnt, sizeof(sb_cnt)); 2994 ecore_int_get_num_sbs(p_hwfn, &sb_cnt); 2995 2996 /* L2 Queues require each: 1 status block. 1 L2 queue */ 2997 if (ECORE_IS_L2_PERSONALITY(p_hwfn)) { 2998 /* Start by allocating VF queues, then PF's */ 2999 feat_num[ECORE_VF_L2_QUE] = 3000 OSAL_MIN_T(u32, 3001 RESC_NUM(p_hwfn, ECORE_L2_QUEUE), 3002 sb_cnt.iov_cnt); 3003 feat_num[ECORE_PF_L2_QUE] = 3004 OSAL_MIN_T(u32, 3005 sb_cnt.cnt - non_l2_sbs, 3006 RESC_NUM(p_hwfn, ECORE_L2_QUEUE) - 3007 FEAT_NUM(p_hwfn, ECORE_VF_L2_QUE)); 3008 } 3009 3010 if (ECORE_IS_FCOE_PERSONALITY(p_hwfn)) 3011 feat_num[ECORE_FCOE_CQ] = 3012 OSAL_MIN_T(u32, sb_cnt.cnt, RESC_NUM(p_hwfn, 3013 ECORE_CMDQS_CQS)); 3014 3015 if (ECORE_IS_ISCSI_PERSONALITY(p_hwfn)) 3016 feat_num[ECORE_ISCSI_CQ] = 3017 OSAL_MIN_T(u32, sb_cnt.cnt, RESC_NUM(p_hwfn, 3018 ECORE_CMDQS_CQS)); 3019 3020 DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE, 3021 "#PF_L2_QUEUE=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d #FCOE_CQ=%d #ISCSI_CQ=%d #SB=%d\n", 3022 (int)FEAT_NUM(p_hwfn, ECORE_PF_L2_QUE), 3023 (int)FEAT_NUM(p_hwfn, ECORE_VF_L2_QUE), 3024 (int)FEAT_NUM(p_hwfn, ECORE_RDMA_CNQ), 3025 (int)FEAT_NUM(p_hwfn, ECORE_FCOE_CQ), 3026 (int)FEAT_NUM(p_hwfn, ECORE_ISCSI_CQ), 3027 (int)sb_cnt.cnt); 3028 } 3029 3030 const char *ecore_hw_get_resc_name(enum ecore_resources res_id) 3031 { 3032 switch (res_id) { 3033 case ECORE_L2_QUEUE: 3034 return "L2_QUEUE"; 3035 case ECORE_VPORT: 3036 return "VPORT"; 3037 case ECORE_RSS_ENG: 3038 return "RSS_ENG"; 3039 case ECORE_PQ: 3040 return "PQ"; 3041 case ECORE_RL: 3042 return "RL"; 3043 case ECORE_MAC: 3044 return "MAC"; 3045 case ECORE_VLAN: 3046 return "VLAN"; 3047 case ECORE_RDMA_CNQ_RAM: 3048 return "RDMA_CNQ_RAM"; 3049 case ECORE_ILT: 3050 return "ILT"; 3051 case ECORE_LL2_QUEUE: 3052 return "LL2_QUEUE"; 3053 case ECORE_CMDQS_CQS: 3054 return "CMDQS_CQS"; 3055 case ECORE_RDMA_STATS_QUEUE: 3056 return "RDMA_STATS_QUEUE"; 3057 case ECORE_BDQ: 3058 return "BDQ"; 3059 case ECORE_SB: 3060 return "SB"; 3061 default: 3062 return "UNKNOWN_RESOURCE"; 3063 } 3064 } 3065 3066 static enum _ecore_status_t 3067 __ecore_hw_set_soft_resc_size(struct ecore_hwfn *p_hwfn, 3068 struct ecore_ptt *p_ptt, 3069 enum ecore_resources res_id, 3070 u32 resc_max_val, 3071 u32 *p_mcp_resp) 3072 { 3073 enum _ecore_status_t rc; 3074 3075 rc = ecore_mcp_set_resc_max_val(p_hwfn, p_ptt, res_id, 3076 resc_max_val, p_mcp_resp); 3077 if (rc != ECORE_SUCCESS) { 3078 DP_NOTICE(p_hwfn, false, 3079 "MFW response failure for a max value setting of resource %d [%s]\n", 3080 res_id, ecore_hw_get_resc_name(res_id)); 3081 return rc; 3082 } 3083 3084 if (*p_mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK) 3085 DP_INFO(p_hwfn, 3086 "Failed to set the max value of resource %d [%s]. mcp_resp = 0x%08x.\n", 3087 res_id, ecore_hw_get_resc_name(res_id), *p_mcp_resp); 3088 3089 return ECORE_SUCCESS; 3090 } 3091 3092 static enum _ecore_status_t 3093 ecore_hw_set_soft_resc_size(struct ecore_hwfn *p_hwfn, 3094 struct ecore_ptt *p_ptt) 3095 { 3096 bool b_ah = ECORE_IS_AH(p_hwfn->p_dev); 3097 u32 resc_max_val, mcp_resp; 3098 u8 res_id; 3099 enum _ecore_status_t rc; 3100 3101 for (res_id = 0; res_id < ECORE_MAX_RESC; res_id++) { 3102 /* @DPDK */ 3103 switch (res_id) { 3104 case ECORE_LL2_QUEUE: 3105 case ECORE_RDMA_CNQ_RAM: 3106 case ECORE_RDMA_STATS_QUEUE: 3107 case ECORE_BDQ: 3108 resc_max_val = 0; 3109 break; 3110 default: 3111 continue; 3112 } 3113 3114 rc = __ecore_hw_set_soft_resc_size(p_hwfn, p_ptt, res_id, 3115 resc_max_val, &mcp_resp); 3116 if (rc != ECORE_SUCCESS) 3117 return rc; 3118 3119 /* There's no point to continue to the next resource if the 3120 * command is not supported by the MFW. 3121 * We do continue if the command is supported but the resource 3122 * is unknown to the MFW. Such a resource will be later 3123 * configured with the default allocation values. 3124 */ 3125 if (mcp_resp == FW_MSG_CODE_UNSUPPORTED) 3126 return ECORE_NOTIMPL; 3127 } 3128 3129 return ECORE_SUCCESS; 3130 } 3131 3132 static 3133 enum _ecore_status_t ecore_hw_get_dflt_resc(struct ecore_hwfn *p_hwfn, 3134 enum ecore_resources res_id, 3135 u32 *p_resc_num, u32 *p_resc_start) 3136 { 3137 u8 num_funcs = p_hwfn->num_funcs_on_engine; 3138 bool b_ah = ECORE_IS_AH(p_hwfn->p_dev); 3139 3140 switch (res_id) { 3141 case ECORE_L2_QUEUE: 3142 *p_resc_num = (b_ah ? MAX_NUM_L2_QUEUES_K2 : 3143 MAX_NUM_L2_QUEUES_BB) / num_funcs; 3144 break; 3145 case ECORE_VPORT: 3146 *p_resc_num = (b_ah ? MAX_NUM_VPORTS_K2 : 3147 MAX_NUM_VPORTS_BB) / num_funcs; 3148 break; 3149 case ECORE_RSS_ENG: 3150 *p_resc_num = (b_ah ? ETH_RSS_ENGINE_NUM_K2 : 3151 ETH_RSS_ENGINE_NUM_BB) / num_funcs; 3152 break; 3153 case ECORE_PQ: 3154 *p_resc_num = (b_ah ? MAX_QM_TX_QUEUES_K2 : 3155 MAX_QM_TX_QUEUES_BB) / num_funcs; 3156 break; 3157 case ECORE_RL: 3158 *p_resc_num = MAX_QM_GLOBAL_RLS / num_funcs; 3159 break; 3160 case ECORE_MAC: 3161 case ECORE_VLAN: 3162 /* Each VFC resource can accommodate both a MAC and a VLAN */ 3163 *p_resc_num = ETH_NUM_MAC_FILTERS / num_funcs; 3164 break; 3165 case ECORE_ILT: 3166 *p_resc_num = (b_ah ? PXP_NUM_ILT_RECORDS_K2 : 3167 PXP_NUM_ILT_RECORDS_BB) / num_funcs; 3168 break; 3169 case ECORE_LL2_QUEUE: 3170 *p_resc_num = MAX_NUM_LL2_RX_QUEUES / num_funcs; 3171 break; 3172 case ECORE_RDMA_CNQ_RAM: 3173 case ECORE_CMDQS_CQS: 3174 /* CNQ/CMDQS are the same resource */ 3175 /* @DPDK */ 3176 *p_resc_num = (NUM_OF_GLOBAL_QUEUES / 2) / num_funcs; 3177 break; 3178 case ECORE_RDMA_STATS_QUEUE: 3179 /* @DPDK */ 3180 *p_resc_num = (b_ah ? MAX_NUM_VPORTS_K2 : 3181 MAX_NUM_VPORTS_BB) / num_funcs; 3182 break; 3183 case ECORE_BDQ: 3184 /* @DPDK */ 3185 *p_resc_num = 0; 3186 break; 3187 default: 3188 break; 3189 } 3190 3191 3192 switch (res_id) { 3193 case ECORE_BDQ: 3194 if (!*p_resc_num) 3195 *p_resc_start = 0; 3196 break; 3197 case ECORE_SB: 3198 /* Since we want its value to reflect whether MFW supports 3199 * the new scheme, have a default of 0. 3200 */ 3201 *p_resc_num = 0; 3202 break; 3203 default: 3204 *p_resc_start = *p_resc_num * p_hwfn->enabled_func_idx; 3205 break; 3206 } 3207 3208 return ECORE_SUCCESS; 3209 } 3210 3211 static enum _ecore_status_t 3212 __ecore_hw_set_resc_info(struct ecore_hwfn *p_hwfn, enum ecore_resources res_id, 3213 bool drv_resc_alloc) 3214 { 3215 u32 dflt_resc_num = 0, dflt_resc_start = 0; 3216 u32 mcp_resp, *p_resc_num, *p_resc_start; 3217 enum _ecore_status_t rc; 3218 3219 p_resc_num = &RESC_NUM(p_hwfn, res_id); 3220 p_resc_start = &RESC_START(p_hwfn, res_id); 3221 3222 rc = ecore_hw_get_dflt_resc(p_hwfn, res_id, &dflt_resc_num, 3223 &dflt_resc_start); 3224 if (rc != ECORE_SUCCESS) { 3225 DP_ERR(p_hwfn, 3226 "Failed to get default amount for resource %d [%s]\n", 3227 res_id, ecore_hw_get_resc_name(res_id)); 3228 return rc; 3229 } 3230 3231 #ifndef ASIC_ONLY 3232 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) { 3233 *p_resc_num = dflt_resc_num; 3234 *p_resc_start = dflt_resc_start; 3235 goto out; 3236 } 3237 #endif 3238 3239 rc = ecore_mcp_get_resc_info(p_hwfn, p_hwfn->p_main_ptt, res_id, 3240 &mcp_resp, p_resc_num, p_resc_start); 3241 if (rc != ECORE_SUCCESS) { 3242 DP_NOTICE(p_hwfn, true, 3243 "MFW response failure for an allocation request for" 3244 " resource %d [%s]\n", 3245 res_id, ecore_hw_get_resc_name(res_id)); 3246 return rc; 3247 } 3248 3249 /* Default driver values are applied in the following cases: 3250 * - The resource allocation MB command is not supported by the MFW 3251 * - There is an internal error in the MFW while processing the request 3252 * - The resource ID is unknown to the MFW 3253 */ 3254 if (mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK) { 3255 DP_INFO(p_hwfn, 3256 "Failed to receive allocation info for resource %d [%s]." 3257 " mcp_resp = 0x%x. Applying default values" 3258 " [%d,%d].\n", 3259 res_id, ecore_hw_get_resc_name(res_id), mcp_resp, 3260 dflt_resc_num, dflt_resc_start); 3261 3262 *p_resc_num = dflt_resc_num; 3263 *p_resc_start = dflt_resc_start; 3264 goto out; 3265 } 3266 3267 if ((*p_resc_num != dflt_resc_num || 3268 *p_resc_start != dflt_resc_start) && 3269 res_id != ECORE_SB) { 3270 DP_INFO(p_hwfn, 3271 "MFW allocation for resource %d [%s] differs from default values [%d,%d vs. %d,%d]%s\n", 3272 res_id, ecore_hw_get_resc_name(res_id), *p_resc_num, 3273 *p_resc_start, dflt_resc_num, dflt_resc_start, 3274 drv_resc_alloc ? " - Applying default values" : ""); 3275 if (drv_resc_alloc) { 3276 *p_resc_num = dflt_resc_num; 3277 *p_resc_start = dflt_resc_start; 3278 } 3279 } 3280 out: 3281 return ECORE_SUCCESS; 3282 } 3283 3284 static enum _ecore_status_t ecore_hw_set_resc_info(struct ecore_hwfn *p_hwfn, 3285 bool drv_resc_alloc) 3286 { 3287 enum _ecore_status_t rc; 3288 u8 res_id; 3289 3290 for (res_id = 0; res_id < ECORE_MAX_RESC; res_id++) { 3291 rc = __ecore_hw_set_resc_info(p_hwfn, res_id, drv_resc_alloc); 3292 if (rc != ECORE_SUCCESS) 3293 return rc; 3294 } 3295 3296 return ECORE_SUCCESS; 3297 } 3298 3299 static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn, 3300 struct ecore_ptt *p_ptt, 3301 bool drv_resc_alloc) 3302 { 3303 struct ecore_resc_unlock_params resc_unlock_params; 3304 struct ecore_resc_lock_params resc_lock_params; 3305 bool b_ah = ECORE_IS_AH(p_hwfn->p_dev); 3306 u8 res_id; 3307 enum _ecore_status_t rc; 3308 #ifndef ASIC_ONLY 3309 u32 *resc_start = p_hwfn->hw_info.resc_start; 3310 u32 *resc_num = p_hwfn->hw_info.resc_num; 3311 /* For AH, an equal share of the ILT lines between the maximal number of 3312 * PFs is not enough for RoCE. This would be solved by the future 3313 * resource allocation scheme, but isn't currently present for 3314 * FPGA/emulation. For now we keep a number that is sufficient for RoCE 3315 * to work - the BB number of ILT lines divided by its max PFs number. 3316 */ 3317 u32 roce_min_ilt_lines = PXP_NUM_ILT_RECORDS_BB / MAX_NUM_PFS_BB; 3318 #endif 3319 3320 /* Setting the max values of the soft resources and the following 3321 * resources allocation queries should be atomic. Since several PFs can 3322 * run in parallel - a resource lock is needed. 3323 * If either the resource lock or resource set value commands are not 3324 * supported - skip the max values setting, release the lock if 3325 * needed, and proceed to the queries. Other failures, including a 3326 * failure to acquire the lock, will cause this function to fail. 3327 * Old drivers that don't acquire the lock can run in parallel, and 3328 * their allocation values won't be affected by the updated max values. 3329 */ 3330 ecore_mcp_resc_lock_default_init(&resc_lock_params, &resc_unlock_params, 3331 ECORE_RESC_LOCK_RESC_ALLOC, false); 3332 3333 rc = ecore_mcp_resc_lock(p_hwfn, p_ptt, &resc_lock_params); 3334 if (rc != ECORE_SUCCESS && rc != ECORE_NOTIMPL) { 3335 return rc; 3336 } else if (rc == ECORE_NOTIMPL) { 3337 DP_INFO(p_hwfn, 3338 "Skip the max values setting of the soft resources since the resource lock is not supported by the MFW\n"); 3339 } else if (rc == ECORE_SUCCESS && !resc_lock_params.b_granted) { 3340 DP_NOTICE(p_hwfn, false, 3341 "Failed to acquire the resource lock for the resource allocation commands\n"); 3342 rc = ECORE_BUSY; 3343 goto unlock_and_exit; 3344 } else { 3345 rc = ecore_hw_set_soft_resc_size(p_hwfn, p_ptt); 3346 if (rc != ECORE_SUCCESS && rc != ECORE_NOTIMPL) { 3347 DP_NOTICE(p_hwfn, false, 3348 "Failed to set the max values of the soft resources\n"); 3349 goto unlock_and_exit; 3350 } else if (rc == ECORE_NOTIMPL) { 3351 DP_INFO(p_hwfn, 3352 "Skip the max values setting of the soft resources since it is not supported by the MFW\n"); 3353 rc = ecore_mcp_resc_unlock(p_hwfn, p_ptt, 3354 &resc_unlock_params); 3355 if (rc != ECORE_SUCCESS) 3356 DP_INFO(p_hwfn, 3357 "Failed to release the resource lock for the resource allocation commands\n"); 3358 } 3359 } 3360 3361 rc = ecore_hw_set_resc_info(p_hwfn, drv_resc_alloc); 3362 if (rc != ECORE_SUCCESS) 3363 goto unlock_and_exit; 3364 3365 if (resc_lock_params.b_granted && !resc_unlock_params.b_released) { 3366 rc = ecore_mcp_resc_unlock(p_hwfn, p_ptt, 3367 &resc_unlock_params); 3368 if (rc != ECORE_SUCCESS) 3369 DP_INFO(p_hwfn, 3370 "Failed to release the resource lock for the resource allocation commands\n"); 3371 } 3372 3373 #ifndef ASIC_ONLY 3374 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) { 3375 /* Reduced build contains less PQs */ 3376 if (!(p_hwfn->p_dev->b_is_emul_full)) { 3377 resc_num[ECORE_PQ] = 32; 3378 resc_start[ECORE_PQ] = resc_num[ECORE_PQ] * 3379 p_hwfn->enabled_func_idx; 3380 } 3381 3382 /* For AH emulation, since we have a possible maximal number of 3383 * 16 enabled PFs, in case there are not enough ILT lines - 3384 * allocate only first PF as RoCE and have all the other ETH 3385 * only with less ILT lines. 3386 */ 3387 if (!p_hwfn->rel_pf_id && p_hwfn->p_dev->b_is_emul_full) 3388 resc_num[ECORE_ILT] = OSAL_MAX_T(u32, 3389 resc_num[ECORE_ILT], 3390 roce_min_ilt_lines); 3391 } 3392 3393 /* Correct the common ILT calculation if PF0 has more */ 3394 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev) && 3395 p_hwfn->p_dev->b_is_emul_full && 3396 p_hwfn->rel_pf_id && resc_num[ECORE_ILT] < roce_min_ilt_lines) 3397 resc_start[ECORE_ILT] += roce_min_ilt_lines - 3398 resc_num[ECORE_ILT]; 3399 #endif 3400 3401 /* Sanity for ILT */ 3402 if ((b_ah && (RESC_END(p_hwfn, ECORE_ILT) > PXP_NUM_ILT_RECORDS_K2)) || 3403 (!b_ah && (RESC_END(p_hwfn, ECORE_ILT) > PXP_NUM_ILT_RECORDS_BB))) { 3404 DP_NOTICE(p_hwfn, true, 3405 "Can't assign ILT pages [%08x,...,%08x]\n", 3406 RESC_START(p_hwfn, ECORE_ILT), RESC_END(p_hwfn, 3407 ECORE_ILT) - 3408 1); 3409 return ECORE_INVAL; 3410 } 3411 3412 /* This will also learn the number of SBs from MFW */ 3413 if (ecore_int_igu_reset_cam(p_hwfn, p_ptt)) 3414 return ECORE_INVAL; 3415 3416 ecore_hw_set_feat(p_hwfn); 3417 3418 DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE, 3419 "The numbers for each resource are:\n"); 3420 for (res_id = 0; res_id < ECORE_MAX_RESC; res_id++) 3421 DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE, "%s = %d start = %d\n", 3422 ecore_hw_get_resc_name(res_id), 3423 RESC_NUM(p_hwfn, res_id), 3424 RESC_START(p_hwfn, res_id)); 3425 3426 return ECORE_SUCCESS; 3427 3428 unlock_and_exit: 3429 if (resc_lock_params.b_granted && !resc_unlock_params.b_released) 3430 ecore_mcp_resc_unlock(p_hwfn, p_ptt, 3431 &resc_unlock_params); 3432 return rc; 3433 } 3434 3435 static enum _ecore_status_t 3436 ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn, 3437 struct ecore_ptt *p_ptt, 3438 struct ecore_hw_prepare_params *p_params) 3439 { 3440 u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg, dcbx_mode; 3441 u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities; 3442 struct ecore_mcp_link_capabilities *p_caps; 3443 struct ecore_mcp_link_params *link; 3444 enum _ecore_status_t rc; 3445 3446 /* Read global nvm_cfg address */ 3447 nvm_cfg_addr = ecore_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0); 3448 3449 /* Verify MCP has initialized it */ 3450 if (!nvm_cfg_addr) { 3451 DP_NOTICE(p_hwfn, false, "Shared memory not initialized\n"); 3452 if (p_params->b_relaxed_probe) 3453 p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_NVM; 3454 return ECORE_INVAL; 3455 } 3456 3457 /* Read nvm_cfg1 (Notice this is just offset, and not offsize (TBD) */ 3458 3459 nvm_cfg1_offset = ecore_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4); 3460 3461 addr = MCP_REG_SCRATCH + nvm_cfg1_offset + 3462 OFFSETOF(struct nvm_cfg1, glob) + 3463 OFFSETOF(struct nvm_cfg1_glob, core_cfg); 3464 3465 core_cfg = ecore_rd(p_hwfn, p_ptt, addr); 3466 3467 switch ((core_cfg & NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK) >> 3468 NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET) { 3469 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_2X40G: 3470 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X40G; 3471 break; 3472 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X50G: 3473 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X50G; 3474 break; 3475 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_1X100G: 3476 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_1X100G; 3477 break; 3478 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X10G_F: 3479 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X10G_F; 3480 break; 3481 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X10G_E: 3482 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X10G_E; 3483 break; 3484 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X20G: 3485 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X20G; 3486 break; 3487 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X40G: 3488 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_1X40G; 3489 break; 3490 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G: 3491 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X25G; 3492 break; 3493 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G: 3494 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X10G; 3495 break; 3496 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G: 3497 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_1X25G; 3498 break; 3499 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G: 3500 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X25G; 3501 break; 3502 default: 3503 DP_NOTICE(p_hwfn, true, "Unknown port mode in 0x%08x\n", 3504 core_cfg); 3505 break; 3506 } 3507 3508 /* Read DCBX configuration */ 3509 port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset + 3510 OFFSETOF(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]); 3511 dcbx_mode = ecore_rd(p_hwfn, p_ptt, 3512 port_cfg_addr + 3513 OFFSETOF(struct nvm_cfg1_port, generic_cont0)); 3514 dcbx_mode = (dcbx_mode & NVM_CFG1_PORT_DCBX_MODE_MASK) 3515 >> NVM_CFG1_PORT_DCBX_MODE_OFFSET; 3516 switch (dcbx_mode) { 3517 case NVM_CFG1_PORT_DCBX_MODE_DYNAMIC: 3518 p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_DYNAMIC; 3519 break; 3520 case NVM_CFG1_PORT_DCBX_MODE_CEE: 3521 p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_CEE; 3522 break; 3523 case NVM_CFG1_PORT_DCBX_MODE_IEEE: 3524 p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_IEEE; 3525 break; 3526 default: 3527 p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_DISABLED; 3528 } 3529 3530 /* Read default link configuration */ 3531 link = &p_hwfn->mcp_info->link_input; 3532 p_caps = &p_hwfn->mcp_info->link_capabilities; 3533 port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset + 3534 OFFSETOF(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]); 3535 link_temp = ecore_rd(p_hwfn, p_ptt, 3536 port_cfg_addr + 3537 OFFSETOF(struct nvm_cfg1_port, speed_cap_mask)); 3538 link_temp &= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK; 3539 link->speed.advertised_speeds = link_temp; 3540 p_caps->speed_capabilities = link->speed.advertised_speeds; 3541 3542 link_temp = ecore_rd(p_hwfn, p_ptt, 3543 port_cfg_addr + 3544 OFFSETOF(struct nvm_cfg1_port, link_settings)); 3545 switch ((link_temp & NVM_CFG1_PORT_DRV_LINK_SPEED_MASK) >> 3546 NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET) { 3547 case NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG: 3548 link->speed.autoneg = true; 3549 break; 3550 case NVM_CFG1_PORT_DRV_LINK_SPEED_1G: 3551 link->speed.forced_speed = 1000; 3552 break; 3553 case NVM_CFG1_PORT_DRV_LINK_SPEED_10G: 3554 link->speed.forced_speed = 10000; 3555 break; 3556 case NVM_CFG1_PORT_DRV_LINK_SPEED_25G: 3557 link->speed.forced_speed = 25000; 3558 break; 3559 case NVM_CFG1_PORT_DRV_LINK_SPEED_40G: 3560 link->speed.forced_speed = 40000; 3561 break; 3562 case NVM_CFG1_PORT_DRV_LINK_SPEED_50G: 3563 link->speed.forced_speed = 50000; 3564 break; 3565 case NVM_CFG1_PORT_DRV_LINK_SPEED_BB_100G: 3566 link->speed.forced_speed = 100000; 3567 break; 3568 default: 3569 DP_NOTICE(p_hwfn, true, "Unknown Speed in 0x%08x\n", link_temp); 3570 } 3571 3572 p_caps->default_speed = link->speed.forced_speed; 3573 p_caps->default_speed_autoneg = link->speed.autoneg; 3574 3575 link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK; 3576 link_temp >>= NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET; 3577 link->pause.autoneg = !!(link_temp & 3578 NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG); 3579 link->pause.forced_rx = !!(link_temp & 3580 NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX); 3581 link->pause.forced_tx = !!(link_temp & 3582 NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX); 3583 link->loopback_mode = 0; 3584 3585 if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) { 3586 link_temp = ecore_rd(p_hwfn, p_ptt, port_cfg_addr + 3587 OFFSETOF(struct nvm_cfg1_port, ext_phy)); 3588 link_temp &= NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_MASK; 3589 link_temp >>= NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_OFFSET; 3590 p_caps->default_eee = ECORE_MCP_EEE_ENABLED; 3591 link->eee.enable = true; 3592 switch (link_temp) { 3593 case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_DISABLED: 3594 p_caps->default_eee = ECORE_MCP_EEE_DISABLED; 3595 link->eee.enable = false; 3596 break; 3597 case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_BALANCED: 3598 p_caps->eee_lpi_timer = EEE_TX_TIMER_USEC_BALANCED_TIME; 3599 break; 3600 case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_AGGRESSIVE: 3601 p_caps->eee_lpi_timer = 3602 EEE_TX_TIMER_USEC_AGGRESSIVE_TIME; 3603 break; 3604 case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_LOW_LATENCY: 3605 p_caps->eee_lpi_timer = EEE_TX_TIMER_USEC_LATENCY_TIME; 3606 break; 3607 } 3608 3609 link->eee.tx_lpi_timer = p_caps->eee_lpi_timer; 3610 link->eee.tx_lpi_enable = link->eee.enable; 3611 link->eee.adv_caps = ECORE_EEE_1G_ADV | ECORE_EEE_10G_ADV; 3612 } else { 3613 p_caps->default_eee = ECORE_MCP_EEE_UNSUPPORTED; 3614 } 3615 3616 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 3617 "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x\n EEE: %02x [%08x usec]", 3618 link->speed.forced_speed, link->speed.advertised_speeds, 3619 link->speed.autoneg, link->pause.autoneg, 3620 p_caps->default_eee, p_caps->eee_lpi_timer); 3621 3622 /* Read Multi-function information from shmem */ 3623 addr = MCP_REG_SCRATCH + nvm_cfg1_offset + 3624 OFFSETOF(struct nvm_cfg1, glob) + 3625 OFFSETOF(struct nvm_cfg1_glob, generic_cont0); 3626 3627 generic_cont0 = ecore_rd(p_hwfn, p_ptt, addr); 3628 3629 mf_mode = (generic_cont0 & NVM_CFG1_GLOB_MF_MODE_MASK) >> 3630 NVM_CFG1_GLOB_MF_MODE_OFFSET; 3631 3632 switch (mf_mode) { 3633 case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED: 3634 p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_OVLAN_CLSS; 3635 break; 3636 case NVM_CFG1_GLOB_MF_MODE_UFP: 3637 p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_OVLAN_CLSS | 3638 1 << ECORE_MF_UFP_SPECIFIC | 3639 1 << ECORE_MF_8021Q_TAGGING; 3640 break; 3641 case NVM_CFG1_GLOB_MF_MODE_BD: 3642 p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_OVLAN_CLSS | 3643 1 << ECORE_MF_LLH_PROTO_CLSS | 3644 1 << ECORE_MF_8021AD_TAGGING; 3645 break; 3646 case NVM_CFG1_GLOB_MF_MODE_NPAR1_0: 3647 p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_LLH_MAC_CLSS | 3648 1 << ECORE_MF_LLH_PROTO_CLSS | 3649 1 << ECORE_MF_LL2_NON_UNICAST | 3650 1 << ECORE_MF_INTER_PF_SWITCH | 3651 1 << ECORE_MF_DISABLE_ARFS; 3652 break; 3653 case NVM_CFG1_GLOB_MF_MODE_DEFAULT: 3654 p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_LLH_MAC_CLSS | 3655 1 << ECORE_MF_LLH_PROTO_CLSS | 3656 1 << ECORE_MF_LL2_NON_UNICAST; 3657 if (ECORE_IS_BB(p_hwfn->p_dev)) 3658 p_hwfn->p_dev->mf_bits |= 1 << ECORE_MF_NEED_DEF_PF; 3659 break; 3660 } 3661 DP_INFO(p_hwfn, "Multi function mode is 0x%lx\n", 3662 p_hwfn->p_dev->mf_bits); 3663 3664 if (ECORE_IS_CMT(p_hwfn->p_dev)) 3665 p_hwfn->p_dev->mf_bits |= (1 << ECORE_MF_DISABLE_ARFS); 3666 3667 /* It's funny since we have another switch, but it's easier 3668 * to throw this away in linux this way. Long term, it might be 3669 * better to have have getters for needed ECORE_MF_* fields, 3670 * convert client code and eliminate this. 3671 */ 3672 switch (mf_mode) { 3673 case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED: 3674 case NVM_CFG1_GLOB_MF_MODE_BD: 3675 p_hwfn->p_dev->mf_mode = ECORE_MF_OVLAN; 3676 break; 3677 case NVM_CFG1_GLOB_MF_MODE_NPAR1_0: 3678 p_hwfn->p_dev->mf_mode = ECORE_MF_NPAR; 3679 break; 3680 case NVM_CFG1_GLOB_MF_MODE_DEFAULT: 3681 p_hwfn->p_dev->mf_mode = ECORE_MF_DEFAULT; 3682 break; 3683 case NVM_CFG1_GLOB_MF_MODE_UFP: 3684 p_hwfn->p_dev->mf_mode = ECORE_MF_UFP; 3685 break; 3686 } 3687 3688 /* Read Multi-function information from shmem */ 3689 addr = MCP_REG_SCRATCH + nvm_cfg1_offset + 3690 OFFSETOF(struct nvm_cfg1, glob) + 3691 OFFSETOF(struct nvm_cfg1_glob, device_capabilities); 3692 3693 device_capabilities = ecore_rd(p_hwfn, p_ptt, addr); 3694 if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET) 3695 OSAL_SET_BIT(ECORE_DEV_CAP_ETH, 3696 &p_hwfn->hw_info.device_capabilities); 3697 if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_FCOE) 3698 OSAL_SET_BIT(ECORE_DEV_CAP_FCOE, 3699 &p_hwfn->hw_info.device_capabilities); 3700 if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI) 3701 OSAL_SET_BIT(ECORE_DEV_CAP_ISCSI, 3702 &p_hwfn->hw_info.device_capabilities); 3703 if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE) 3704 OSAL_SET_BIT(ECORE_DEV_CAP_ROCE, 3705 &p_hwfn->hw_info.device_capabilities); 3706 if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_IWARP) 3707 OSAL_SET_BIT(ECORE_DEV_CAP_IWARP, 3708 &p_hwfn->hw_info.device_capabilities); 3709 3710 rc = ecore_mcp_fill_shmem_func_info(p_hwfn, p_ptt); 3711 if (rc != ECORE_SUCCESS && p_params->b_relaxed_probe) { 3712 rc = ECORE_SUCCESS; 3713 p_params->p_relaxed_res = ECORE_HW_PREPARE_BAD_MCP; 3714 } 3715 3716 return rc; 3717 } 3718 3719 static void ecore_get_num_funcs(struct ecore_hwfn *p_hwfn, 3720 struct ecore_ptt *p_ptt) 3721 { 3722 u8 num_funcs, enabled_func_idx = p_hwfn->rel_pf_id; 3723 u32 reg_function_hide, tmp, eng_mask, low_pfs_mask; 3724 struct ecore_dev *p_dev = p_hwfn->p_dev; 3725 3726 num_funcs = ECORE_IS_AH(p_dev) ? MAX_NUM_PFS_K2 : MAX_NUM_PFS_BB; 3727 3728 /* Bit 0 of MISCS_REG_FUNCTION_HIDE indicates whether the bypass values 3729 * in the other bits are selected. 3730 * Bits 1-15 are for functions 1-15, respectively, and their value is 3731 * '0' only for enabled functions (function 0 always exists and 3732 * enabled). 3733 * In case of CMT in BB, only the "even" functions are enabled, and thus 3734 * the number of functions for both hwfns is learnt from the same bits. 3735 */ 3736 if (ECORE_IS_BB(p_dev) || ECORE_IS_AH(p_dev)) { 3737 reg_function_hide = ecore_rd(p_hwfn, p_ptt, 3738 MISCS_REG_FUNCTION_HIDE_BB_K2); 3739 } else { /* E5 */ 3740 reg_function_hide = 0; 3741 } 3742 3743 if (reg_function_hide & 0x1) { 3744 if (ECORE_IS_BB(p_dev)) { 3745 if (ECORE_PATH_ID(p_hwfn) && !ECORE_IS_CMT(p_dev)) { 3746 num_funcs = 0; 3747 eng_mask = 0xaaaa; 3748 } else { 3749 num_funcs = 1; 3750 eng_mask = 0x5554; 3751 } 3752 } else { 3753 num_funcs = 1; 3754 eng_mask = 0xfffe; 3755 } 3756 3757 /* Get the number of the enabled functions on the engine */ 3758 tmp = (reg_function_hide ^ 0xffffffff) & eng_mask; 3759 while (tmp) { 3760 if (tmp & 0x1) 3761 num_funcs++; 3762 tmp >>= 0x1; 3763 } 3764 3765 /* Get the PF index within the enabled functions */ 3766 low_pfs_mask = (0x1 << p_hwfn->abs_pf_id) - 1; 3767 tmp = reg_function_hide & eng_mask & low_pfs_mask; 3768 while (tmp) { 3769 if (tmp & 0x1) 3770 enabled_func_idx--; 3771 tmp >>= 0x1; 3772 } 3773 } 3774 3775 p_hwfn->num_funcs_on_engine = num_funcs; 3776 p_hwfn->enabled_func_idx = enabled_func_idx; 3777 3778 #ifndef ASIC_ONLY 3779 if (CHIP_REV_IS_FPGA(p_dev)) { 3780 DP_NOTICE(p_hwfn, false, 3781 "FPGA: Limit number of PFs to 4 [would affect resource allocation, needed for IOV]\n"); 3782 p_hwfn->num_funcs_on_engine = 4; 3783 } 3784 #endif 3785 3786 DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE, 3787 "PF [rel_id %d, abs_id %d] occupies index %d within the %d enabled functions on the engine\n", 3788 p_hwfn->rel_pf_id, p_hwfn->abs_pf_id, 3789 p_hwfn->enabled_func_idx, p_hwfn->num_funcs_on_engine); 3790 } 3791 3792 static void ecore_hw_info_port_num_bb(struct ecore_hwfn *p_hwfn, 3793 struct ecore_ptt *p_ptt) 3794 { 3795 struct ecore_dev *p_dev = p_hwfn->p_dev; 3796 u32 port_mode; 3797 3798 #ifndef ASIC_ONLY 3799 /* Read the port mode */ 3800 if (CHIP_REV_IS_FPGA(p_dev)) 3801 port_mode = 4; 3802 else if (CHIP_REV_IS_EMUL(p_dev) && ECORE_IS_CMT(p_dev)) 3803 /* In CMT on emulation, assume 1 port */ 3804 port_mode = 1; 3805 else 3806 #endif 3807 port_mode = ecore_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB); 3808 3809 if (port_mode < 3) { 3810 p_dev->num_ports_in_engine = 1; 3811 } else if (port_mode <= 5) { 3812 p_dev->num_ports_in_engine = 2; 3813 } else { 3814 DP_NOTICE(p_hwfn, true, "PORT MODE: %d not supported\n", 3815 p_dev->num_ports_in_engine); 3816 3817 /* Default num_ports_in_engine to something */ 3818 p_dev->num_ports_in_engine = 1; 3819 } 3820 } 3821 3822 static void ecore_hw_info_port_num_ah_e5(struct ecore_hwfn *p_hwfn, 3823 struct ecore_ptt *p_ptt) 3824 { 3825 struct ecore_dev *p_dev = p_hwfn->p_dev; 3826 u32 port; 3827 int i; 3828 3829 p_dev->num_ports_in_engine = 0; 3830 3831 #ifndef ASIC_ONLY 3832 if (CHIP_REV_IS_EMUL(p_dev)) { 3833 port = ecore_rd(p_hwfn, p_ptt, MISCS_REG_ECO_RESERVED); 3834 switch ((port & 0xf000) >> 12) { 3835 case 1: 3836 p_dev->num_ports_in_engine = 1; 3837 break; 3838 case 3: 3839 p_dev->num_ports_in_engine = 2; 3840 break; 3841 case 0xf: 3842 p_dev->num_ports_in_engine = 4; 3843 break; 3844 default: 3845 DP_NOTICE(p_hwfn, false, 3846 "Unknown port mode in ECO_RESERVED %08x\n", 3847 port); 3848 } 3849 } else 3850 #endif 3851 for (i = 0; i < MAX_NUM_PORTS_K2; i++) { 3852 port = ecore_rd(p_hwfn, p_ptt, 3853 CNIG_REG_NIG_PORT0_CONF_K2_E5 + 3854 (i * 4)); 3855 if (port & 1) 3856 p_dev->num_ports_in_engine++; 3857 } 3858 3859 if (!p_dev->num_ports_in_engine) { 3860 DP_NOTICE(p_hwfn, true, "All NIG ports are inactive\n"); 3861 3862 /* Default num_ports_in_engine to something */ 3863 p_dev->num_ports_in_engine = 1; 3864 } 3865 } 3866 3867 static void ecore_hw_info_port_num(struct ecore_hwfn *p_hwfn, 3868 struct ecore_ptt *p_ptt) 3869 { 3870 struct ecore_dev *p_dev = p_hwfn->p_dev; 3871 3872 /* Determine the number of ports per engine */ 3873 if (ECORE_IS_BB(p_dev)) 3874 ecore_hw_info_port_num_bb(p_hwfn, p_ptt); 3875 else 3876 ecore_hw_info_port_num_ah_e5(p_hwfn, p_ptt); 3877 3878 /* Get the total number of ports of the device */ 3879 if (ECORE_IS_CMT(p_dev)) { 3880 /* In CMT there is always only one port */ 3881 p_dev->num_ports = 1; 3882 #ifndef ASIC_ONLY 3883 } else if (CHIP_REV_IS_EMUL(p_dev) || CHIP_REV_IS_TEDIBEAR(p_dev)) { 3884 p_dev->num_ports = p_dev->num_ports_in_engine * 3885 ecore_device_num_engines(p_dev); 3886 #endif 3887 } else { 3888 u32 addr, global_offsize, global_addr; 3889 3890 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, 3891 PUBLIC_GLOBAL); 3892 global_offsize = ecore_rd(p_hwfn, p_ptt, addr); 3893 global_addr = SECTION_ADDR(global_offsize, 0); 3894 addr = global_addr + OFFSETOF(struct public_global, max_ports); 3895 p_dev->num_ports = (u8)ecore_rd(p_hwfn, p_ptt, addr); 3896 } 3897 } 3898 3899 static void ecore_mcp_get_eee_caps(struct ecore_hwfn *p_hwfn, 3900 struct ecore_ptt *p_ptt) 3901 { 3902 struct ecore_mcp_link_capabilities *p_caps; 3903 u32 eee_status; 3904 3905 p_caps = &p_hwfn->mcp_info->link_capabilities; 3906 if (p_caps->default_eee == ECORE_MCP_EEE_UNSUPPORTED) 3907 return; 3908 3909 p_caps->eee_speed_caps = 0; 3910 eee_status = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr + 3911 OFFSETOF(struct public_port, eee_status)); 3912 eee_status = (eee_status & EEE_SUPPORTED_SPEED_MASK) >> 3913 EEE_SUPPORTED_SPEED_OFFSET; 3914 if (eee_status & EEE_1G_SUPPORTED) 3915 p_caps->eee_speed_caps |= ECORE_EEE_1G_ADV; 3916 if (eee_status & EEE_10G_ADV) 3917 p_caps->eee_speed_caps |= ECORE_EEE_10G_ADV; 3918 } 3919 3920 static enum _ecore_status_t 3921 ecore_get_hw_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 3922 enum ecore_pci_personality personality, 3923 struct ecore_hw_prepare_params *p_params) 3924 { 3925 bool drv_resc_alloc = p_params->drv_resc_alloc; 3926 enum _ecore_status_t rc; 3927 3928 if (IS_ECORE_PACING(p_hwfn)) { 3929 DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_IOV, 3930 "Skipping IOV as packet pacing is requested\n"); 3931 } 3932 3933 /* Since all information is common, only first hwfns should do this */ 3934 if (IS_LEAD_HWFN(p_hwfn) && !IS_ECORE_PACING(p_hwfn)) { 3935 rc = ecore_iov_hw_info(p_hwfn); 3936 if (rc != ECORE_SUCCESS) { 3937 if (p_params->b_relaxed_probe) 3938 p_params->p_relaxed_res = 3939 ECORE_HW_PREPARE_BAD_IOV; 3940 else 3941 return rc; 3942 } 3943 } 3944 3945 if (IS_LEAD_HWFN(p_hwfn)) 3946 ecore_hw_info_port_num(p_hwfn, p_ptt); 3947 3948 ecore_mcp_get_capabilities(p_hwfn, p_ptt); 3949 3950 #ifndef ASIC_ONLY 3951 if (CHIP_REV_IS_ASIC(p_hwfn->p_dev)) { 3952 #endif 3953 rc = ecore_hw_get_nvm_info(p_hwfn, p_ptt, p_params); 3954 if (rc != ECORE_SUCCESS) 3955 return rc; 3956 #ifndef ASIC_ONLY 3957 } 3958 #endif 3959 3960 rc = ecore_int_igu_read_cam(p_hwfn, p_ptt); 3961 if (rc != ECORE_SUCCESS) { 3962 if (p_params->b_relaxed_probe) 3963 p_params->p_relaxed_res = ECORE_HW_PREPARE_BAD_IGU; 3964 else 3965 return rc; 3966 } 3967 3968 #ifndef ASIC_ONLY 3969 if (CHIP_REV_IS_ASIC(p_hwfn->p_dev) && ecore_mcp_is_init(p_hwfn)) { 3970 #endif 3971 OSAL_MEMCPY(p_hwfn->hw_info.hw_mac_addr, 3972 p_hwfn->mcp_info->func_info.mac, ETH_ALEN); 3973 #ifndef ASIC_ONLY 3974 } else { 3975 static u8 mcp_hw_mac[6] = { 0, 2, 3, 4, 5, 6 }; 3976 3977 OSAL_MEMCPY(p_hwfn->hw_info.hw_mac_addr, mcp_hw_mac, ETH_ALEN); 3978 p_hwfn->hw_info.hw_mac_addr[5] = p_hwfn->abs_pf_id; 3979 } 3980 #endif 3981 3982 if (ecore_mcp_is_init(p_hwfn)) { 3983 if (p_hwfn->mcp_info->func_info.ovlan != ECORE_MCP_VLAN_UNSET) 3984 p_hwfn->hw_info.ovlan = 3985 p_hwfn->mcp_info->func_info.ovlan; 3986 3987 ecore_mcp_cmd_port_init(p_hwfn, p_ptt); 3988 3989 ecore_mcp_get_eee_caps(p_hwfn, p_ptt); 3990 3991 ecore_mcp_read_ufp_config(p_hwfn, p_ptt); 3992 } 3993 3994 if (personality != ECORE_PCI_DEFAULT) { 3995 p_hwfn->hw_info.personality = personality; 3996 } else if (ecore_mcp_is_init(p_hwfn)) { 3997 enum ecore_pci_personality protocol; 3998 3999 protocol = p_hwfn->mcp_info->func_info.protocol; 4000 p_hwfn->hw_info.personality = protocol; 4001 } 4002 4003 #ifndef ASIC_ONLY 4004 /* To overcome ILT lack for emulation, until at least until we'll have 4005 * a definite answer from system about it, allow only PF0 to be RoCE. 4006 */ 4007 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && ECORE_IS_AH(p_hwfn->p_dev)) { 4008 if (!p_hwfn->rel_pf_id) 4009 p_hwfn->hw_info.personality = ECORE_PCI_ETH_ROCE; 4010 else 4011 p_hwfn->hw_info.personality = ECORE_PCI_ETH; 4012 } 4013 #endif 4014 4015 /* although in BB some constellations may support more than 4 tcs, 4016 * that can result in performance penalty in some cases. 4 4017 * represents a good tradeoff between performance and flexibility. 4018 */ 4019 if (IS_ECORE_PACING(p_hwfn)) 4020 p_hwfn->hw_info.num_hw_tc = 1; 4021 else 4022 p_hwfn->hw_info.num_hw_tc = NUM_PHYS_TCS_4PORT_K2; 4023 4024 /* start out with a single active tc. This can be increased either 4025 * by dcbx negotiation or by upper layer driver 4026 */ 4027 p_hwfn->hw_info.num_active_tc = 1; 4028 4029 ecore_get_num_funcs(p_hwfn, p_ptt); 4030 4031 if (ecore_mcp_is_init(p_hwfn)) 4032 p_hwfn->hw_info.mtu = p_hwfn->mcp_info->func_info.mtu; 4033 4034 /* In case of forcing the driver's default resource allocation, calling 4035 * ecore_hw_get_resc() should come after initializing the personality 4036 * and after getting the number of functions, since the calculation of 4037 * the resources/features depends on them. 4038 * This order is not harmful if not forcing. 4039 */ 4040 rc = ecore_hw_get_resc(p_hwfn, p_ptt, drv_resc_alloc); 4041 if (rc != ECORE_SUCCESS && p_params->b_relaxed_probe) { 4042 rc = ECORE_SUCCESS; 4043 p_params->p_relaxed_res = ECORE_HW_PREPARE_BAD_MCP; 4044 } 4045 4046 return rc; 4047 } 4048 4049 static enum _ecore_status_t ecore_get_dev_info(struct ecore_hwfn *p_hwfn, 4050 struct ecore_ptt *p_ptt) 4051 { 4052 struct ecore_dev *p_dev = p_hwfn->p_dev; 4053 u16 device_id_mask; 4054 u32 tmp; 4055 4056 /* Read Vendor Id / Device Id */ 4057 OSAL_PCI_READ_CONFIG_WORD(p_dev, PCICFG_VENDOR_ID_OFFSET, 4058 &p_dev->vendor_id); 4059 OSAL_PCI_READ_CONFIG_WORD(p_dev, PCICFG_DEVICE_ID_OFFSET, 4060 &p_dev->device_id); 4061 4062 /* Determine type */ 4063 device_id_mask = p_dev->device_id & ECORE_DEV_ID_MASK; 4064 switch (device_id_mask) { 4065 case ECORE_DEV_ID_MASK_BB: 4066 p_dev->type = ECORE_DEV_TYPE_BB; 4067 break; 4068 case ECORE_DEV_ID_MASK_AH: 4069 p_dev->type = ECORE_DEV_TYPE_AH; 4070 break; 4071 default: 4072 DP_NOTICE(p_hwfn, true, "Unknown device id 0x%x\n", 4073 p_dev->device_id); 4074 return ECORE_ABORTED; 4075 } 4076 4077 tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_NUM); 4078 p_dev->chip_num = (u16)GET_FIELD(tmp, CHIP_NUM); 4079 tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_REV); 4080 p_dev->chip_rev = (u8)GET_FIELD(tmp, CHIP_REV); 4081 4082 /* Learn number of HW-functions */ 4083 tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_CMT_ENABLED_FOR_PAIR); 4084 4085 if (tmp & (1 << p_hwfn->rel_pf_id)) { 4086 DP_NOTICE(p_dev->hwfns, false, "device in CMT mode\n"); 4087 p_dev->num_hwfns = 2; 4088 } else { 4089 p_dev->num_hwfns = 1; 4090 } 4091 4092 #ifndef ASIC_ONLY 4093 if (CHIP_REV_IS_EMUL(p_dev)) { 4094 /* For some reason we have problems with this register 4095 * in B0 emulation; Simply assume no CMT 4096 */ 4097 DP_NOTICE(p_dev->hwfns, false, 4098 "device on emul - assume no CMT\n"); 4099 p_dev->num_hwfns = 1; 4100 } 4101 #endif 4102 4103 tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_TEST_REG); 4104 p_dev->chip_bond_id = (u8)GET_FIELD(tmp, CHIP_BOND_ID); 4105 tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_METAL); 4106 p_dev->chip_metal = (u8)GET_FIELD(tmp, CHIP_METAL); 4107 4108 DP_INFO(p_dev->hwfns, 4109 "Chip details - %s %c%d, Num: %04x Rev: %02x Bond id: %02x Metal: %02x\n", 4110 ECORE_IS_BB(p_dev) ? "BB" : "AH", 4111 'A' + p_dev->chip_rev, (int)p_dev->chip_metal, 4112 p_dev->chip_num, p_dev->chip_rev, p_dev->chip_bond_id, 4113 p_dev->chip_metal); 4114 4115 if (ECORE_IS_BB_A0(p_dev)) { 4116 DP_NOTICE(p_dev->hwfns, false, 4117 "The chip type/rev (BB A0) is not supported!\n"); 4118 return ECORE_ABORTED; 4119 } 4120 #ifndef ASIC_ONLY 4121 if (CHIP_REV_IS_EMUL(p_dev) && ECORE_IS_AH(p_dev)) 4122 ecore_wr(p_hwfn, p_ptt, MISCS_REG_PLL_MAIN_CTRL_4, 0x1); 4123 4124 if (CHIP_REV_IS_EMUL(p_dev)) { 4125 tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_ECO_RESERVED); 4126 if (tmp & (1 << 29)) { 4127 DP_NOTICE(p_hwfn, false, 4128 "Emulation: Running on a FULL build\n"); 4129 p_dev->b_is_emul_full = true; 4130 } else { 4131 DP_NOTICE(p_hwfn, false, 4132 "Emulation: Running on a REDUCED build\n"); 4133 } 4134 } 4135 #endif 4136 4137 return ECORE_SUCCESS; 4138 } 4139 4140 #ifndef LINUX_REMOVE 4141 void ecore_prepare_hibernate(struct ecore_dev *p_dev) 4142 { 4143 int j; 4144 4145 if (IS_VF(p_dev)) 4146 return; 4147 4148 for_each_hwfn(p_dev, j) { 4149 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j]; 4150 4151 DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN, 4152 "Mark hw/fw uninitialized\n"); 4153 4154 p_hwfn->hw_init_done = false; 4155 4156 ecore_ptt_invalidate(p_hwfn); 4157 } 4158 } 4159 #endif 4160 4161 static enum _ecore_status_t 4162 ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn, 4163 void OSAL_IOMEM * p_regview, 4164 void OSAL_IOMEM * p_doorbells, 4165 struct ecore_hw_prepare_params *p_params) 4166 { 4167 struct ecore_mdump_retain_data mdump_retain; 4168 struct ecore_dev *p_dev = p_hwfn->p_dev; 4169 struct ecore_mdump_info mdump_info; 4170 enum _ecore_status_t rc = ECORE_SUCCESS; 4171 4172 /* Split PCI bars evenly between hwfns */ 4173 p_hwfn->regview = p_regview; 4174 p_hwfn->doorbells = p_doorbells; 4175 4176 if (IS_VF(p_dev)) 4177 return ecore_vf_hw_prepare(p_hwfn); 4178 4179 /* Validate that chip access is feasible */ 4180 if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) { 4181 DP_ERR(p_hwfn, 4182 "Reading the ME register returns all Fs; Preventing further chip access\n"); 4183 if (p_params->b_relaxed_probe) 4184 p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_ME; 4185 return ECORE_INVAL; 4186 } 4187 4188 get_function_id(p_hwfn); 4189 4190 /* Allocate PTT pool */ 4191 rc = ecore_ptt_pool_alloc(p_hwfn); 4192 if (rc) { 4193 DP_NOTICE(p_hwfn, false, "Failed to prepare hwfn's hw\n"); 4194 if (p_params->b_relaxed_probe) 4195 p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_MEM; 4196 goto err0; 4197 } 4198 4199 /* Allocate the main PTT */ 4200 p_hwfn->p_main_ptt = ecore_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN); 4201 4202 /* First hwfn learns basic information, e.g., number of hwfns */ 4203 if (!p_hwfn->my_id) { 4204 rc = ecore_get_dev_info(p_hwfn, p_hwfn->p_main_ptt); 4205 if (rc != ECORE_SUCCESS) { 4206 if (p_params->b_relaxed_probe) 4207 p_params->p_relaxed_res = 4208 ECORE_HW_PREPARE_FAILED_DEV; 4209 goto err1; 4210 } 4211 } 4212 4213 ecore_hw_hwfn_prepare(p_hwfn); 4214 4215 /* Initialize MCP structure */ 4216 rc = ecore_mcp_cmd_init(p_hwfn, p_hwfn->p_main_ptt); 4217 if (rc) { 4218 DP_NOTICE(p_hwfn, false, "Failed initializing mcp command\n"); 4219 if (p_params->b_relaxed_probe) 4220 p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_MEM; 4221 goto err1; 4222 } 4223 4224 /* Read the device configuration information from the HW and SHMEM */ 4225 rc = ecore_get_hw_info(p_hwfn, p_hwfn->p_main_ptt, 4226 p_params->personality, p_params); 4227 if (rc) { 4228 DP_NOTICE(p_hwfn, false, "Failed to get HW information\n"); 4229 goto err2; 4230 } 4231 4232 /* Sending a mailbox to the MFW should be after ecore_get_hw_info() is 4233 * called, since among others it sets the ports number in an engine. 4234 */ 4235 if (p_params->initiate_pf_flr && IS_LEAD_HWFN(p_hwfn) && 4236 !p_dev->recov_in_prog) { 4237 rc = ecore_mcp_initiate_pf_flr(p_hwfn, p_hwfn->p_main_ptt); 4238 if (rc != ECORE_SUCCESS) 4239 DP_NOTICE(p_hwfn, false, "Failed to initiate PF FLR\n"); 4240 } 4241 4242 /* Check if mdump logs/data are present and update the epoch value */ 4243 if (IS_LEAD_HWFN(p_hwfn)) { 4244 #ifndef ASIC_ONLY 4245 if (!CHIP_REV_IS_EMUL(p_dev)) { 4246 #endif 4247 rc = ecore_mcp_mdump_get_info(p_hwfn, p_hwfn->p_main_ptt, 4248 &mdump_info); 4249 if (rc == ECORE_SUCCESS && mdump_info.num_of_logs) 4250 DP_NOTICE(p_hwfn, false, 4251 "* * * IMPORTANT - HW ERROR register dump captured by device * * *\n"); 4252 4253 rc = ecore_mcp_mdump_get_retain(p_hwfn, p_hwfn->p_main_ptt, 4254 &mdump_retain); 4255 if (rc == ECORE_SUCCESS && mdump_retain.valid) 4256 DP_NOTICE(p_hwfn, false, 4257 "mdump retained data: epoch 0x%08x, pf 0x%x, status 0x%08x\n", 4258 mdump_retain.epoch, mdump_retain.pf, 4259 mdump_retain.status); 4260 4261 ecore_mcp_mdump_set_values(p_hwfn, p_hwfn->p_main_ptt, 4262 p_params->epoch); 4263 #ifndef ASIC_ONLY 4264 } 4265 #endif 4266 } 4267 4268 /* Allocate the init RT array and initialize the init-ops engine */ 4269 rc = ecore_init_alloc(p_hwfn); 4270 if (rc) { 4271 DP_NOTICE(p_hwfn, false, "Failed to allocate the init array\n"); 4272 if (p_params->b_relaxed_probe) 4273 p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_MEM; 4274 goto err2; 4275 } 4276 #ifndef ASIC_ONLY 4277 if (CHIP_REV_IS_FPGA(p_dev)) { 4278 DP_NOTICE(p_hwfn, false, 4279 "FPGA: workaround; Prevent DMAE parities\n"); 4280 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, PCIE_REG_PRTY_MASK_K2_E5, 4281 7); 4282 4283 DP_NOTICE(p_hwfn, false, 4284 "FPGA: workaround: Set VF bar0 size\n"); 4285 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 4286 PGLUE_B_REG_VF_BAR0_SIZE_K2_E5, 4); 4287 } 4288 #endif 4289 4290 return rc; 4291 err2: 4292 if (IS_LEAD_HWFN(p_hwfn)) 4293 ecore_iov_free_hw_info(p_dev); 4294 ecore_mcp_free(p_hwfn); 4295 err1: 4296 ecore_hw_hwfn_free(p_hwfn); 4297 err0: 4298 return rc; 4299 } 4300 4301 enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev, 4302 struct ecore_hw_prepare_params *p_params) 4303 { 4304 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); 4305 enum _ecore_status_t rc; 4306 4307 p_dev->chk_reg_fifo = p_params->chk_reg_fifo; 4308 p_dev->allow_mdump = p_params->allow_mdump; 4309 p_hwfn->b_en_pacing = p_params->b_en_pacing; 4310 4311 if (p_params->b_relaxed_probe) 4312 p_params->p_relaxed_res = ECORE_HW_PREPARE_SUCCESS; 4313 4314 /* Store the precompiled init data ptrs */ 4315 if (IS_PF(p_dev)) 4316 ecore_init_iro_array(p_dev); 4317 4318 /* Initialize the first hwfn - will learn number of hwfns */ 4319 rc = ecore_hw_prepare_single(p_hwfn, 4320 p_dev->regview, 4321 p_dev->doorbells, p_params); 4322 if (rc != ECORE_SUCCESS) 4323 return rc; 4324 4325 p_params->personality = p_hwfn->hw_info.personality; 4326 4327 /* initilalize 2nd hwfn if necessary */ 4328 if (ECORE_IS_CMT(p_dev)) { 4329 void OSAL_IOMEM *p_regview, *p_doorbell; 4330 u8 OSAL_IOMEM *addr; 4331 4332 /* adjust bar offset for second engine */ 4333 addr = (u8 OSAL_IOMEM *)p_dev->regview + 4334 ecore_hw_bar_size(p_hwfn, 4335 p_hwfn->p_main_ptt, 4336 BAR_ID_0) / 2; 4337 p_regview = (void OSAL_IOMEM *)addr; 4338 4339 addr = (u8 OSAL_IOMEM *)p_dev->doorbells + 4340 ecore_hw_bar_size(p_hwfn, 4341 p_hwfn->p_main_ptt, 4342 BAR_ID_1) / 2; 4343 p_doorbell = (void OSAL_IOMEM *)addr; 4344 4345 p_dev->hwfns[1].b_en_pacing = p_params->b_en_pacing; 4346 /* prepare second hw function */ 4347 rc = ecore_hw_prepare_single(&p_dev->hwfns[1], p_regview, 4348 p_doorbell, p_params); 4349 4350 /* in case of error, need to free the previously 4351 * initiliazed hwfn 0. 4352 */ 4353 if (rc != ECORE_SUCCESS) { 4354 if (p_params->b_relaxed_probe) 4355 p_params->p_relaxed_res = 4356 ECORE_HW_PREPARE_FAILED_ENG2; 4357 4358 if (IS_PF(p_dev)) { 4359 ecore_init_free(p_hwfn); 4360 ecore_mcp_free(p_hwfn); 4361 ecore_hw_hwfn_free(p_hwfn); 4362 } else { 4363 DP_NOTICE(p_dev, false, "What do we need to free when VF hwfn1 init fails\n"); 4364 } 4365 return rc; 4366 } 4367 } 4368 4369 return rc; 4370 } 4371 4372 void ecore_hw_remove(struct ecore_dev *p_dev) 4373 { 4374 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); 4375 int i; 4376 4377 if (IS_PF(p_dev)) 4378 ecore_mcp_ov_update_driver_state(p_hwfn, p_hwfn->p_main_ptt, 4379 ECORE_OV_DRIVER_STATE_NOT_LOADED); 4380 4381 for_each_hwfn(p_dev, i) { 4382 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 4383 4384 if (IS_VF(p_dev)) { 4385 ecore_vf_pf_release(p_hwfn); 4386 continue; 4387 } 4388 4389 ecore_init_free(p_hwfn); 4390 ecore_hw_hwfn_free(p_hwfn); 4391 ecore_mcp_free(p_hwfn); 4392 4393 #ifdef CONFIG_ECORE_LOCK_ALLOC 4394 OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->dmae_info.lock); 4395 #endif 4396 } 4397 4398 ecore_iov_free_hw_info(p_dev); 4399 } 4400 4401 static void ecore_chain_free_next_ptr(struct ecore_dev *p_dev, 4402 struct ecore_chain *p_chain) 4403 { 4404 void *p_virt = p_chain->p_virt_addr, *p_virt_next = OSAL_NULL; 4405 dma_addr_t p_phys = p_chain->p_phys_addr, p_phys_next = 0; 4406 struct ecore_chain_next *p_next; 4407 u32 size, i; 4408 4409 if (!p_virt) 4410 return; 4411 4412 size = p_chain->elem_size * p_chain->usable_per_page; 4413 4414 for (i = 0; i < p_chain->page_cnt; i++) { 4415 if (!p_virt) 4416 break; 4417 4418 p_next = (struct ecore_chain_next *)((u8 *)p_virt + size); 4419 p_virt_next = p_next->next_virt; 4420 p_phys_next = HILO_DMA_REGPAIR(p_next->next_phys); 4421 4422 OSAL_DMA_FREE_COHERENT(p_dev, p_virt, p_phys, 4423 ECORE_CHAIN_PAGE_SIZE); 4424 4425 p_virt = p_virt_next; 4426 p_phys = p_phys_next; 4427 } 4428 } 4429 4430 static void ecore_chain_free_single(struct ecore_dev *p_dev, 4431 struct ecore_chain *p_chain) 4432 { 4433 if (!p_chain->p_virt_addr) 4434 return; 4435 4436 OSAL_DMA_FREE_COHERENT(p_dev, p_chain->p_virt_addr, 4437 p_chain->p_phys_addr, ECORE_CHAIN_PAGE_SIZE); 4438 } 4439 4440 static void ecore_chain_free_pbl(struct ecore_dev *p_dev, 4441 struct ecore_chain *p_chain) 4442 { 4443 void **pp_virt_addr_tbl = p_chain->pbl.pp_virt_addr_tbl; 4444 u8 *p_pbl_virt = (u8 *)p_chain->pbl_sp.p_virt_table; 4445 u32 page_cnt = p_chain->page_cnt, i, pbl_size; 4446 4447 if (!pp_virt_addr_tbl) 4448 return; 4449 4450 if (!p_pbl_virt) 4451 goto out; 4452 4453 for (i = 0; i < page_cnt; i++) { 4454 if (!pp_virt_addr_tbl[i]) 4455 break; 4456 4457 OSAL_DMA_FREE_COHERENT(p_dev, pp_virt_addr_tbl[i], 4458 *(dma_addr_t *)p_pbl_virt, 4459 ECORE_CHAIN_PAGE_SIZE); 4460 4461 p_pbl_virt += ECORE_CHAIN_PBL_ENTRY_SIZE; 4462 } 4463 4464 pbl_size = page_cnt * ECORE_CHAIN_PBL_ENTRY_SIZE; 4465 4466 if (!p_chain->b_external_pbl) 4467 OSAL_DMA_FREE_COHERENT(p_dev, p_chain->pbl_sp.p_virt_table, 4468 p_chain->pbl_sp.p_phys_table, pbl_size); 4469 out: 4470 OSAL_VFREE(p_dev, p_chain->pbl.pp_virt_addr_tbl); 4471 } 4472 4473 void ecore_chain_free(struct ecore_dev *p_dev, struct ecore_chain *p_chain) 4474 { 4475 switch (p_chain->mode) { 4476 case ECORE_CHAIN_MODE_NEXT_PTR: 4477 ecore_chain_free_next_ptr(p_dev, p_chain); 4478 break; 4479 case ECORE_CHAIN_MODE_SINGLE: 4480 ecore_chain_free_single(p_dev, p_chain); 4481 break; 4482 case ECORE_CHAIN_MODE_PBL: 4483 ecore_chain_free_pbl(p_dev, p_chain); 4484 break; 4485 } 4486 } 4487 4488 static enum _ecore_status_t 4489 ecore_chain_alloc_sanity_check(struct ecore_dev *p_dev, 4490 enum ecore_chain_cnt_type cnt_type, 4491 osal_size_t elem_size, u32 page_cnt) 4492 { 4493 u64 chain_size = ELEMS_PER_PAGE(elem_size) * page_cnt; 4494 4495 /* The actual chain size can be larger than the maximal possible value 4496 * after rounding up the requested elements number to pages, and after 4497 * taking into acount the unusuable elements (next-ptr elements). 4498 * The size of a "u16" chain can be (U16_MAX + 1) since the chain 4499 * size/capacity fields are of a u32 type. 4500 */ 4501 if ((cnt_type == ECORE_CHAIN_CNT_TYPE_U16 && 4502 chain_size > ((u32)ECORE_U16_MAX + 1)) || 4503 (cnt_type == ECORE_CHAIN_CNT_TYPE_U32 && 4504 chain_size > ECORE_U32_MAX)) { 4505 DP_NOTICE(p_dev, true, 4506 "The actual chain size (0x%lx) is larger than the maximal possible value\n", 4507 (unsigned long)chain_size); 4508 return ECORE_INVAL; 4509 } 4510 4511 return ECORE_SUCCESS; 4512 } 4513 4514 static enum _ecore_status_t 4515 ecore_chain_alloc_next_ptr(struct ecore_dev *p_dev, struct ecore_chain *p_chain) 4516 { 4517 void *p_virt = OSAL_NULL, *p_virt_prev = OSAL_NULL; 4518 dma_addr_t p_phys = 0; 4519 u32 i; 4520 4521 for (i = 0; i < p_chain->page_cnt; i++) { 4522 p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys, 4523 ECORE_CHAIN_PAGE_SIZE); 4524 if (!p_virt) { 4525 DP_NOTICE(p_dev, false, 4526 "Failed to allocate chain memory\n"); 4527 return ECORE_NOMEM; 4528 } 4529 4530 if (i == 0) { 4531 ecore_chain_init_mem(p_chain, p_virt, p_phys); 4532 ecore_chain_reset(p_chain); 4533 } else { 4534 ecore_chain_init_next_ptr_elem(p_chain, p_virt_prev, 4535 p_virt, p_phys); 4536 } 4537 4538 p_virt_prev = p_virt; 4539 } 4540 /* Last page's next element should point to the beginning of the 4541 * chain. 4542 */ 4543 ecore_chain_init_next_ptr_elem(p_chain, p_virt_prev, 4544 p_chain->p_virt_addr, 4545 p_chain->p_phys_addr); 4546 4547 return ECORE_SUCCESS; 4548 } 4549 4550 static enum _ecore_status_t 4551 ecore_chain_alloc_single(struct ecore_dev *p_dev, struct ecore_chain *p_chain) 4552 { 4553 dma_addr_t p_phys = 0; 4554 void *p_virt = OSAL_NULL; 4555 4556 p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys, ECORE_CHAIN_PAGE_SIZE); 4557 if (!p_virt) { 4558 DP_NOTICE(p_dev, false, "Failed to allocate chain memory\n"); 4559 return ECORE_NOMEM; 4560 } 4561 4562 ecore_chain_init_mem(p_chain, p_virt, p_phys); 4563 ecore_chain_reset(p_chain); 4564 4565 return ECORE_SUCCESS; 4566 } 4567 4568 static enum _ecore_status_t 4569 ecore_chain_alloc_pbl(struct ecore_dev *p_dev, 4570 struct ecore_chain *p_chain, 4571 struct ecore_chain_ext_pbl *ext_pbl) 4572 { 4573 u32 page_cnt = p_chain->page_cnt, size, i; 4574 dma_addr_t p_phys = 0, p_pbl_phys = 0; 4575 void **pp_virt_addr_tbl = OSAL_NULL; 4576 u8 *p_pbl_virt = OSAL_NULL; 4577 void *p_virt = OSAL_NULL; 4578 4579 size = page_cnt * sizeof(*pp_virt_addr_tbl); 4580 pp_virt_addr_tbl = (void **)OSAL_VZALLOC(p_dev, size); 4581 if (!pp_virt_addr_tbl) { 4582 DP_NOTICE(p_dev, false, 4583 "Failed to allocate memory for the chain virtual addresses table\n"); 4584 return ECORE_NOMEM; 4585 } 4586 4587 /* The allocation of the PBL table is done with its full size, since it 4588 * is expected to be successive. 4589 * ecore_chain_init_pbl_mem() is called even in a case of an allocation 4590 * failure, since pp_virt_addr_tbl was previously allocated, and it 4591 * should be saved to allow its freeing during the error flow. 4592 */ 4593 size = page_cnt * ECORE_CHAIN_PBL_ENTRY_SIZE; 4594 4595 if (ext_pbl == OSAL_NULL) { 4596 p_pbl_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_pbl_phys, size); 4597 } else { 4598 p_pbl_virt = ext_pbl->p_pbl_virt; 4599 p_pbl_phys = ext_pbl->p_pbl_phys; 4600 p_chain->b_external_pbl = true; 4601 } 4602 4603 ecore_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys, 4604 pp_virt_addr_tbl); 4605 if (!p_pbl_virt) { 4606 DP_NOTICE(p_dev, false, "Failed to allocate chain pbl memory\n"); 4607 return ECORE_NOMEM; 4608 } 4609 4610 for (i = 0; i < page_cnt; i++) { 4611 p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys, 4612 ECORE_CHAIN_PAGE_SIZE); 4613 if (!p_virt) { 4614 DP_NOTICE(p_dev, false, 4615 "Failed to allocate chain memory\n"); 4616 return ECORE_NOMEM; 4617 } 4618 4619 if (i == 0) { 4620 ecore_chain_init_mem(p_chain, p_virt, p_phys); 4621 ecore_chain_reset(p_chain); 4622 } 4623 4624 /* Fill the PBL table with the physical address of the page */ 4625 *(dma_addr_t *)p_pbl_virt = p_phys; 4626 /* Keep the virtual address of the page */ 4627 p_chain->pbl.pp_virt_addr_tbl[i] = p_virt; 4628 4629 p_pbl_virt += ECORE_CHAIN_PBL_ENTRY_SIZE; 4630 } 4631 4632 return ECORE_SUCCESS; 4633 } 4634 4635 enum _ecore_status_t ecore_chain_alloc(struct ecore_dev *p_dev, 4636 enum ecore_chain_use_mode intended_use, 4637 enum ecore_chain_mode mode, 4638 enum ecore_chain_cnt_type cnt_type, 4639 u32 num_elems, osal_size_t elem_size, 4640 struct ecore_chain *p_chain, 4641 struct ecore_chain_ext_pbl *ext_pbl) 4642 { 4643 u32 page_cnt; 4644 enum _ecore_status_t rc = ECORE_SUCCESS; 4645 4646 if (mode == ECORE_CHAIN_MODE_SINGLE) 4647 page_cnt = 1; 4648 else 4649 page_cnt = ECORE_CHAIN_PAGE_CNT(num_elems, elem_size, mode); 4650 4651 rc = ecore_chain_alloc_sanity_check(p_dev, cnt_type, elem_size, 4652 page_cnt); 4653 if (rc) { 4654 DP_NOTICE(p_dev, false, 4655 "Cannot allocate a chain with the given arguments:\n" 4656 "[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu]\n", 4657 intended_use, mode, cnt_type, num_elems, elem_size); 4658 return rc; 4659 } 4660 4661 ecore_chain_init_params(p_chain, page_cnt, (u8)elem_size, intended_use, 4662 mode, cnt_type, p_dev->dp_ctx); 4663 4664 switch (mode) { 4665 case ECORE_CHAIN_MODE_NEXT_PTR: 4666 rc = ecore_chain_alloc_next_ptr(p_dev, p_chain); 4667 break; 4668 case ECORE_CHAIN_MODE_SINGLE: 4669 rc = ecore_chain_alloc_single(p_dev, p_chain); 4670 break; 4671 case ECORE_CHAIN_MODE_PBL: 4672 rc = ecore_chain_alloc_pbl(p_dev, p_chain, ext_pbl); 4673 break; 4674 } 4675 if (rc) 4676 goto nomem; 4677 4678 return ECORE_SUCCESS; 4679 4680 nomem: 4681 ecore_chain_free(p_dev, p_chain); 4682 return rc; 4683 } 4684 4685 enum _ecore_status_t ecore_fw_l2_queue(struct ecore_hwfn *p_hwfn, 4686 u16 src_id, u16 *dst_id) 4687 { 4688 if (src_id >= RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) { 4689 u16 min, max; 4690 4691 min = (u16)RESC_START(p_hwfn, ECORE_L2_QUEUE); 4692 max = min + RESC_NUM(p_hwfn, ECORE_L2_QUEUE); 4693 DP_NOTICE(p_hwfn, true, 4694 "l2_queue id [%d] is not valid, available indices [%d - %d]\n", 4695 src_id, min, max); 4696 4697 return ECORE_INVAL; 4698 } 4699 4700 *dst_id = RESC_START(p_hwfn, ECORE_L2_QUEUE) + src_id; 4701 4702 return ECORE_SUCCESS; 4703 } 4704 4705 enum _ecore_status_t ecore_fw_vport(struct ecore_hwfn *p_hwfn, 4706 u8 src_id, u8 *dst_id) 4707 { 4708 if (src_id >= RESC_NUM(p_hwfn, ECORE_VPORT)) { 4709 u8 min, max; 4710 4711 min = (u8)RESC_START(p_hwfn, ECORE_VPORT); 4712 max = min + RESC_NUM(p_hwfn, ECORE_VPORT); 4713 DP_NOTICE(p_hwfn, true, 4714 "vport id [%d] is not valid, available indices [%d - %d]\n", 4715 src_id, min, max); 4716 4717 return ECORE_INVAL; 4718 } 4719 4720 *dst_id = RESC_START(p_hwfn, ECORE_VPORT) + src_id; 4721 4722 return ECORE_SUCCESS; 4723 } 4724 4725 enum _ecore_status_t ecore_fw_rss_eng(struct ecore_hwfn *p_hwfn, 4726 u8 src_id, u8 *dst_id) 4727 { 4728 if (src_id >= RESC_NUM(p_hwfn, ECORE_RSS_ENG)) { 4729 u8 min, max; 4730 4731 min = (u8)RESC_START(p_hwfn, ECORE_RSS_ENG); 4732 max = min + RESC_NUM(p_hwfn, ECORE_RSS_ENG); 4733 DP_NOTICE(p_hwfn, true, 4734 "rss_eng id [%d] is not valid, available indices [%d - %d]\n", 4735 src_id, min, max); 4736 4737 return ECORE_INVAL; 4738 } 4739 4740 *dst_id = RESC_START(p_hwfn, ECORE_RSS_ENG) + src_id; 4741 4742 return ECORE_SUCCESS; 4743 } 4744 4745 static enum _ecore_status_t 4746 ecore_llh_add_mac_filter_bb_ah(struct ecore_hwfn *p_hwfn, 4747 struct ecore_ptt *p_ptt, u32 high, u32 low, 4748 u32 *p_entry_num) 4749 { 4750 u32 en; 4751 int i; 4752 4753 /* Find a free entry and utilize it */ 4754 for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) { 4755 en = ecore_rd(p_hwfn, p_ptt, 4756 NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + 4757 i * sizeof(u32)); 4758 if (en) 4759 continue; 4760 ecore_wr(p_hwfn, p_ptt, 4761 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4762 2 * i * sizeof(u32), low); 4763 ecore_wr(p_hwfn, p_ptt, 4764 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4765 (2 * i + 1) * sizeof(u32), high); 4766 ecore_wr(p_hwfn, p_ptt, 4767 NIG_REG_LLH_FUNC_FILTER_MODE_BB_K2 + 4768 i * sizeof(u32), 0); 4769 ecore_wr(p_hwfn, p_ptt, 4770 NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_BB_K2 + 4771 i * sizeof(u32), 0); 4772 ecore_wr(p_hwfn, p_ptt, 4773 NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + 4774 i * sizeof(u32), 1); 4775 break; 4776 } 4777 4778 if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) 4779 return ECORE_NORESOURCES; 4780 4781 *p_entry_num = i; 4782 4783 return ECORE_SUCCESS; 4784 } 4785 4786 enum _ecore_status_t ecore_llh_add_mac_filter(struct ecore_hwfn *p_hwfn, 4787 struct ecore_ptt *p_ptt, u8 *p_filter) 4788 { 4789 u32 high, low, entry_num; 4790 enum _ecore_status_t rc = ECORE_SUCCESS; 4791 4792 if (!OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS, 4793 &p_hwfn->p_dev->mf_bits)) 4794 return ECORE_SUCCESS; 4795 4796 high = p_filter[1] | (p_filter[0] << 8); 4797 low = p_filter[5] | (p_filter[4] << 8) | 4798 (p_filter[3] << 16) | (p_filter[2] << 24); 4799 4800 if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev)) 4801 rc = ecore_llh_add_mac_filter_bb_ah(p_hwfn, p_ptt, high, low, 4802 &entry_num); 4803 if (rc != ECORE_SUCCESS) { 4804 DP_NOTICE(p_hwfn, false, 4805 "Failed to find an empty LLH filter to utilize\n"); 4806 return rc; 4807 } 4808 4809 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 4810 "MAC: %02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx is added at %d\n", 4811 p_filter[0], p_filter[1], p_filter[2], p_filter[3], 4812 p_filter[4], p_filter[5], entry_num); 4813 4814 return rc; 4815 } 4816 4817 static enum _ecore_status_t 4818 ecore_llh_remove_mac_filter_bb_ah(struct ecore_hwfn *p_hwfn, 4819 struct ecore_ptt *p_ptt, u32 high, u32 low, 4820 u32 *p_entry_num) 4821 { 4822 int i; 4823 4824 /* Find the entry and clean it */ 4825 for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) { 4826 if (ecore_rd(p_hwfn, p_ptt, 4827 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4828 2 * i * sizeof(u32)) != low) 4829 continue; 4830 if (ecore_rd(p_hwfn, p_ptt, 4831 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4832 (2 * i + 1) * sizeof(u32)) != high) 4833 continue; 4834 4835 ecore_wr(p_hwfn, p_ptt, 4836 NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + i * sizeof(u32), 0); 4837 ecore_wr(p_hwfn, p_ptt, 4838 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4839 2 * i * sizeof(u32), 0); 4840 ecore_wr(p_hwfn, p_ptt, 4841 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4842 (2 * i + 1) * sizeof(u32), 0); 4843 break; 4844 } 4845 4846 if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) 4847 return ECORE_INVAL; 4848 4849 *p_entry_num = i; 4850 4851 return ECORE_SUCCESS; 4852 } 4853 4854 void ecore_llh_remove_mac_filter(struct ecore_hwfn *p_hwfn, 4855 struct ecore_ptt *p_ptt, u8 *p_filter) 4856 { 4857 u32 high, low, entry_num; 4858 enum _ecore_status_t rc = ECORE_SUCCESS; 4859 4860 if (!OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS, 4861 &p_hwfn->p_dev->mf_bits)) 4862 return; 4863 4864 high = p_filter[1] | (p_filter[0] << 8); 4865 low = p_filter[5] | (p_filter[4] << 8) | 4866 (p_filter[3] << 16) | (p_filter[2] << 24); 4867 4868 if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev)) 4869 rc = ecore_llh_remove_mac_filter_bb_ah(p_hwfn, p_ptt, high, 4870 low, &entry_num); 4871 if (rc != ECORE_SUCCESS) { 4872 DP_NOTICE(p_hwfn, false, 4873 "Tried to remove a non-configured filter\n"); 4874 return; 4875 } 4876 4877 4878 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 4879 "MAC: %02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx was removed from %d\n", 4880 p_filter[0], p_filter[1], p_filter[2], p_filter[3], 4881 p_filter[4], p_filter[5], entry_num); 4882 } 4883 4884 static enum _ecore_status_t 4885 ecore_llh_add_protocol_filter_bb_ah(struct ecore_hwfn *p_hwfn, 4886 struct ecore_ptt *p_ptt, 4887 enum ecore_llh_port_filter_type_t type, 4888 u32 high, u32 low, u32 *p_entry_num) 4889 { 4890 u32 en; 4891 int i; 4892 4893 /* Find a free entry and utilize it */ 4894 for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) { 4895 en = ecore_rd(p_hwfn, p_ptt, 4896 NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + 4897 i * sizeof(u32)); 4898 if (en) 4899 continue; 4900 ecore_wr(p_hwfn, p_ptt, 4901 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4902 2 * i * sizeof(u32), low); 4903 ecore_wr(p_hwfn, p_ptt, 4904 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4905 (2 * i + 1) * sizeof(u32), high); 4906 ecore_wr(p_hwfn, p_ptt, 4907 NIG_REG_LLH_FUNC_FILTER_MODE_BB_K2 + 4908 i * sizeof(u32), 1); 4909 ecore_wr(p_hwfn, p_ptt, 4910 NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_BB_K2 + 4911 i * sizeof(u32), 1 << type); 4912 ecore_wr(p_hwfn, p_ptt, 4913 NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + i * sizeof(u32), 1); 4914 break; 4915 } 4916 4917 if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) 4918 return ECORE_NORESOURCES; 4919 4920 *p_entry_num = i; 4921 4922 return ECORE_SUCCESS; 4923 } 4924 4925 enum _ecore_status_t 4926 ecore_llh_add_protocol_filter(struct ecore_hwfn *p_hwfn, 4927 struct ecore_ptt *p_ptt, 4928 u16 source_port_or_eth_type, 4929 u16 dest_port, 4930 enum ecore_llh_port_filter_type_t type) 4931 { 4932 u32 high, low, entry_num; 4933 enum _ecore_status_t rc = ECORE_SUCCESS; 4934 4935 if (!OSAL_TEST_BIT(ECORE_MF_LLH_PROTO_CLSS, 4936 &p_hwfn->p_dev->mf_bits)) 4937 return rc; 4938 4939 high = 0; 4940 low = 0; 4941 4942 switch (type) { 4943 case ECORE_LLH_FILTER_ETHERTYPE: 4944 high = source_port_or_eth_type; 4945 break; 4946 case ECORE_LLH_FILTER_TCP_SRC_PORT: 4947 case ECORE_LLH_FILTER_UDP_SRC_PORT: 4948 low = source_port_or_eth_type << 16; 4949 break; 4950 case ECORE_LLH_FILTER_TCP_DEST_PORT: 4951 case ECORE_LLH_FILTER_UDP_DEST_PORT: 4952 low = dest_port; 4953 break; 4954 case ECORE_LLH_FILTER_TCP_SRC_AND_DEST_PORT: 4955 case ECORE_LLH_FILTER_UDP_SRC_AND_DEST_PORT: 4956 low = (source_port_or_eth_type << 16) | dest_port; 4957 break; 4958 default: 4959 DP_NOTICE(p_hwfn, true, 4960 "Non valid LLH protocol filter type %d\n", type); 4961 return ECORE_INVAL; 4962 } 4963 4964 if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev)) 4965 rc = ecore_llh_add_protocol_filter_bb_ah(p_hwfn, p_ptt, type, 4966 high, low, &entry_num); 4967 if (rc != ECORE_SUCCESS) { 4968 DP_NOTICE(p_hwfn, false, 4969 "Failed to find an empty LLH filter to utilize\n"); 4970 return rc; 4971 } 4972 switch (type) { 4973 case ECORE_LLH_FILTER_ETHERTYPE: 4974 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 4975 "ETH type %x is added at %d\n", 4976 source_port_or_eth_type, entry_num); 4977 break; 4978 case ECORE_LLH_FILTER_TCP_SRC_PORT: 4979 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 4980 "TCP src port %x is added at %d\n", 4981 source_port_or_eth_type, entry_num); 4982 break; 4983 case ECORE_LLH_FILTER_UDP_SRC_PORT: 4984 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 4985 "UDP src port %x is added at %d\n", 4986 source_port_or_eth_type, entry_num); 4987 break; 4988 case ECORE_LLH_FILTER_TCP_DEST_PORT: 4989 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 4990 "TCP dst port %x is added at %d\n", dest_port, 4991 entry_num); 4992 break; 4993 case ECORE_LLH_FILTER_UDP_DEST_PORT: 4994 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 4995 "UDP dst port %x is added at %d\n", dest_port, 4996 entry_num); 4997 break; 4998 case ECORE_LLH_FILTER_TCP_SRC_AND_DEST_PORT: 4999 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 5000 "TCP src/dst ports %x/%x are added at %d\n", 5001 source_port_or_eth_type, dest_port, entry_num); 5002 break; 5003 case ECORE_LLH_FILTER_UDP_SRC_AND_DEST_PORT: 5004 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 5005 "UDP src/dst ports %x/%x are added at %d\n", 5006 source_port_or_eth_type, dest_port, entry_num); 5007 break; 5008 } 5009 5010 return rc; 5011 } 5012 5013 static enum _ecore_status_t 5014 ecore_llh_remove_protocol_filter_bb_ah(struct ecore_hwfn *p_hwfn, 5015 struct ecore_ptt *p_ptt, 5016 enum ecore_llh_port_filter_type_t type, 5017 u32 high, u32 low, u32 *p_entry_num) 5018 { 5019 int i; 5020 5021 /* Find the entry and clean it */ 5022 for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) { 5023 if (!ecore_rd(p_hwfn, p_ptt, 5024 NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + 5025 i * sizeof(u32))) 5026 continue; 5027 if (!ecore_rd(p_hwfn, p_ptt, 5028 NIG_REG_LLH_FUNC_FILTER_MODE_BB_K2 + 5029 i * sizeof(u32))) 5030 continue; 5031 if (!(ecore_rd(p_hwfn, p_ptt, 5032 NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_BB_K2 + 5033 i * sizeof(u32)) & (1 << type))) 5034 continue; 5035 if (ecore_rd(p_hwfn, p_ptt, 5036 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 5037 2 * i * sizeof(u32)) != low) 5038 continue; 5039 if (ecore_rd(p_hwfn, p_ptt, 5040 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 5041 (2 * i + 1) * sizeof(u32)) != high) 5042 continue; 5043 5044 ecore_wr(p_hwfn, p_ptt, 5045 NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + i * sizeof(u32), 0); 5046 ecore_wr(p_hwfn, p_ptt, 5047 NIG_REG_LLH_FUNC_FILTER_MODE_BB_K2 + 5048 i * sizeof(u32), 0); 5049 ecore_wr(p_hwfn, p_ptt, 5050 NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_BB_K2 + 5051 i * sizeof(u32), 0); 5052 ecore_wr(p_hwfn, p_ptt, 5053 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 5054 2 * i * sizeof(u32), 0); 5055 ecore_wr(p_hwfn, p_ptt, 5056 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 5057 (2 * i + 1) * sizeof(u32), 0); 5058 break; 5059 } 5060 5061 if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) 5062 return ECORE_INVAL; 5063 5064 *p_entry_num = i; 5065 5066 return ECORE_SUCCESS; 5067 } 5068 5069 void 5070 ecore_llh_remove_protocol_filter(struct ecore_hwfn *p_hwfn, 5071 struct ecore_ptt *p_ptt, 5072 u16 source_port_or_eth_type, 5073 u16 dest_port, 5074 enum ecore_llh_port_filter_type_t type) 5075 { 5076 u32 high, low, entry_num; 5077 enum _ecore_status_t rc = ECORE_SUCCESS; 5078 5079 if (!OSAL_TEST_BIT(ECORE_MF_LLH_PROTO_CLSS, 5080 &p_hwfn->p_dev->mf_bits)) 5081 return; 5082 5083 high = 0; 5084 low = 0; 5085 5086 switch (type) { 5087 case ECORE_LLH_FILTER_ETHERTYPE: 5088 high = source_port_or_eth_type; 5089 break; 5090 case ECORE_LLH_FILTER_TCP_SRC_PORT: 5091 case ECORE_LLH_FILTER_UDP_SRC_PORT: 5092 low = source_port_or_eth_type << 16; 5093 break; 5094 case ECORE_LLH_FILTER_TCP_DEST_PORT: 5095 case ECORE_LLH_FILTER_UDP_DEST_PORT: 5096 low = dest_port; 5097 break; 5098 case ECORE_LLH_FILTER_TCP_SRC_AND_DEST_PORT: 5099 case ECORE_LLH_FILTER_UDP_SRC_AND_DEST_PORT: 5100 low = (source_port_or_eth_type << 16) | dest_port; 5101 break; 5102 default: 5103 DP_NOTICE(p_hwfn, true, 5104 "Non valid LLH protocol filter type %d\n", type); 5105 return; 5106 } 5107 5108 if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev)) 5109 rc = ecore_llh_remove_protocol_filter_bb_ah(p_hwfn, p_ptt, type, 5110 high, low, 5111 &entry_num); 5112 if (rc != ECORE_SUCCESS) { 5113 DP_NOTICE(p_hwfn, false, 5114 "Tried to remove a non-configured filter [type %d, source_port_or_eth_type 0x%x, dest_port 0x%x]\n", 5115 type, source_port_or_eth_type, dest_port); 5116 return; 5117 } 5118 5119 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 5120 "Protocol filter [type %d, source_port_or_eth_type 0x%x, dest_port 0x%x] was removed from %d\n", 5121 type, source_port_or_eth_type, dest_port, entry_num); 5122 } 5123 5124 static void ecore_llh_clear_all_filters_bb_ah(struct ecore_hwfn *p_hwfn, 5125 struct ecore_ptt *p_ptt) 5126 { 5127 int i; 5128 5129 if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn))) 5130 return; 5131 5132 for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) { 5133 ecore_wr(p_hwfn, p_ptt, 5134 NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + 5135 i * sizeof(u32), 0); 5136 ecore_wr(p_hwfn, p_ptt, 5137 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 5138 2 * i * sizeof(u32), 0); 5139 ecore_wr(p_hwfn, p_ptt, 5140 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 5141 (2 * i + 1) * sizeof(u32), 0); 5142 } 5143 } 5144 5145 void ecore_llh_clear_all_filters(struct ecore_hwfn *p_hwfn, 5146 struct ecore_ptt *p_ptt) 5147 { 5148 if (!OSAL_TEST_BIT(ECORE_MF_LLH_PROTO_CLSS, 5149 &p_hwfn->p_dev->mf_bits) && 5150 !OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS, 5151 &p_hwfn->p_dev->mf_bits)) 5152 return; 5153 5154 if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev)) 5155 ecore_llh_clear_all_filters_bb_ah(p_hwfn, p_ptt); 5156 } 5157 5158 enum _ecore_status_t 5159 ecore_llh_set_function_as_default(struct ecore_hwfn *p_hwfn, 5160 struct ecore_ptt *p_ptt) 5161 { 5162 if (OSAL_TEST_BIT(ECORE_MF_NEED_DEF_PF, &p_hwfn->p_dev->mf_bits)) { 5163 ecore_wr(p_hwfn, p_ptt, 5164 NIG_REG_LLH_TAGMAC_DEF_PF_VECTOR, 5165 1 << p_hwfn->abs_pf_id / 2); 5166 ecore_wr(p_hwfn, p_ptt, PRS_REG_MSG_INFO, 0); 5167 return ECORE_SUCCESS; 5168 } 5169 5170 DP_NOTICE(p_hwfn, false, 5171 "This function can't be set as default\n"); 5172 return ECORE_INVAL; 5173 } 5174 5175 static enum _ecore_status_t ecore_set_coalesce(struct ecore_hwfn *p_hwfn, 5176 struct ecore_ptt *p_ptt, 5177 u32 hw_addr, void *p_eth_qzone, 5178 osal_size_t eth_qzone_size, 5179 u8 timeset) 5180 { 5181 struct coalescing_timeset *p_coal_timeset; 5182 5183 if (p_hwfn->p_dev->int_coalescing_mode != ECORE_COAL_MODE_ENABLE) { 5184 DP_NOTICE(p_hwfn, true, 5185 "Coalescing configuration not enabled\n"); 5186 return ECORE_INVAL; 5187 } 5188 5189 p_coal_timeset = p_eth_qzone; 5190 OSAL_MEMSET(p_eth_qzone, 0, eth_qzone_size); 5191 SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_TIMESET, timeset); 5192 SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_VALID, 1); 5193 ecore_memcpy_to(p_hwfn, p_ptt, hw_addr, p_eth_qzone, eth_qzone_size); 5194 5195 return ECORE_SUCCESS; 5196 } 5197 5198 enum _ecore_status_t ecore_set_queue_coalesce(struct ecore_hwfn *p_hwfn, 5199 u16 rx_coal, u16 tx_coal, 5200 void *p_handle) 5201 { 5202 struct ecore_queue_cid *p_cid = (struct ecore_queue_cid *)p_handle; 5203 enum _ecore_status_t rc = ECORE_SUCCESS; 5204 struct ecore_ptt *p_ptt; 5205 5206 /* TODO - Configuring a single queue's coalescing but 5207 * claiming all queues are abiding same configuration 5208 * for PF and VF both. 5209 */ 5210 5211 if (IS_VF(p_hwfn->p_dev)) 5212 return ecore_vf_pf_set_coalesce(p_hwfn, rx_coal, 5213 tx_coal, p_cid); 5214 5215 p_ptt = ecore_ptt_acquire(p_hwfn); 5216 if (!p_ptt) 5217 return ECORE_AGAIN; 5218 5219 if (rx_coal) { 5220 rc = ecore_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid); 5221 if (rc) 5222 goto out; 5223 p_hwfn->p_dev->rx_coalesce_usecs = rx_coal; 5224 } 5225 5226 if (tx_coal) { 5227 rc = ecore_set_txq_coalesce(p_hwfn, p_ptt, tx_coal, p_cid); 5228 if (rc) 5229 goto out; 5230 p_hwfn->p_dev->tx_coalesce_usecs = tx_coal; 5231 } 5232 out: 5233 ecore_ptt_release(p_hwfn, p_ptt); 5234 5235 return rc; 5236 } 5237 5238 enum _ecore_status_t ecore_set_rxq_coalesce(struct ecore_hwfn *p_hwfn, 5239 struct ecore_ptt *p_ptt, 5240 u16 coalesce, 5241 struct ecore_queue_cid *p_cid) 5242 { 5243 struct ustorm_eth_queue_zone eth_qzone; 5244 u8 timeset, timer_res; 5245 u32 address; 5246 enum _ecore_status_t rc; 5247 5248 /* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */ 5249 if (coalesce <= 0x7F) { 5250 timer_res = 0; 5251 } else if (coalesce <= 0xFF) { 5252 timer_res = 1; 5253 } else if (coalesce <= 0x1FF) { 5254 timer_res = 2; 5255 } else { 5256 DP_ERR(p_hwfn, "Invalid coalesce value - %d\n", coalesce); 5257 return ECORE_INVAL; 5258 } 5259 timeset = (u8)(coalesce >> timer_res); 5260 5261 rc = ecore_int_set_timer_res(p_hwfn, p_ptt, timer_res, 5262 p_cid->sb_igu_id, false); 5263 if (rc != ECORE_SUCCESS) 5264 goto out; 5265 5266 address = BAR0_MAP_REG_USDM_RAM + 5267 USTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id); 5268 5269 rc = ecore_set_coalesce(p_hwfn, p_ptt, address, ð_qzone, 5270 sizeof(struct ustorm_eth_queue_zone), timeset); 5271 if (rc != ECORE_SUCCESS) 5272 goto out; 5273 5274 out: 5275 return rc; 5276 } 5277 5278 enum _ecore_status_t ecore_set_txq_coalesce(struct ecore_hwfn *p_hwfn, 5279 struct ecore_ptt *p_ptt, 5280 u16 coalesce, 5281 struct ecore_queue_cid *p_cid) 5282 { 5283 struct xstorm_eth_queue_zone eth_qzone; 5284 u8 timeset, timer_res; 5285 u32 address; 5286 enum _ecore_status_t rc; 5287 5288 /* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */ 5289 if (coalesce <= 0x7F) { 5290 timer_res = 0; 5291 } else if (coalesce <= 0xFF) { 5292 timer_res = 1; 5293 } else if (coalesce <= 0x1FF) { 5294 timer_res = 2; 5295 } else { 5296 DP_ERR(p_hwfn, "Invalid coalesce value - %d\n", coalesce); 5297 return ECORE_INVAL; 5298 } 5299 5300 timeset = (u8)(coalesce >> timer_res); 5301 5302 rc = ecore_int_set_timer_res(p_hwfn, p_ptt, timer_res, 5303 p_cid->sb_igu_id, true); 5304 if (rc != ECORE_SUCCESS) 5305 goto out; 5306 5307 address = BAR0_MAP_REG_XSDM_RAM + 5308 XSTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id); 5309 5310 rc = ecore_set_coalesce(p_hwfn, p_ptt, address, ð_qzone, 5311 sizeof(struct xstorm_eth_queue_zone), timeset); 5312 out: 5313 return rc; 5314 } 5315 5316 /* Calculate final WFQ values for all vports and configure it. 5317 * After this configuration each vport must have 5318 * approx min rate = vport_wfq * min_pf_rate / ECORE_WFQ_UNIT 5319 */ 5320 static void ecore_configure_wfq_for_all_vports(struct ecore_hwfn *p_hwfn, 5321 struct ecore_ptt *p_ptt, 5322 u32 min_pf_rate) 5323 { 5324 struct init_qm_vport_params *vport_params; 5325 int i; 5326 5327 vport_params = p_hwfn->qm_info.qm_vport_params; 5328 5329 for (i = 0; i < p_hwfn->qm_info.num_vports; i++) { 5330 u32 wfq_speed = p_hwfn->qm_info.wfq_data[i].min_speed; 5331 5332 vport_params[i].vport_wfq = (wfq_speed * ECORE_WFQ_UNIT) / 5333 min_pf_rate; 5334 ecore_init_vport_wfq(p_hwfn, p_ptt, 5335 vport_params[i].first_tx_pq_id, 5336 vport_params[i].vport_wfq); 5337 } 5338 } 5339 5340 static void ecore_init_wfq_default_param(struct ecore_hwfn *p_hwfn) 5341 { 5342 int i; 5343 5344 for (i = 0; i < p_hwfn->qm_info.num_vports; i++) 5345 p_hwfn->qm_info.qm_vport_params[i].vport_wfq = 1; 5346 } 5347 5348 static void ecore_disable_wfq_for_all_vports(struct ecore_hwfn *p_hwfn, 5349 struct ecore_ptt *p_ptt) 5350 { 5351 struct init_qm_vport_params *vport_params; 5352 int i; 5353 5354 vport_params = p_hwfn->qm_info.qm_vport_params; 5355 5356 for (i = 0; i < p_hwfn->qm_info.num_vports; i++) { 5357 ecore_init_wfq_default_param(p_hwfn); 5358 ecore_init_vport_wfq(p_hwfn, p_ptt, 5359 vport_params[i].first_tx_pq_id, 5360 vport_params[i].vport_wfq); 5361 } 5362 } 5363 5364 /* This function performs several validations for WFQ 5365 * configuration and required min rate for a given vport 5366 * 1. req_rate must be greater than one percent of min_pf_rate. 5367 * 2. req_rate should not cause other vports [not configured for WFQ explicitly] 5368 * rates to get less than one percent of min_pf_rate. 5369 * 3. total_req_min_rate [all vports min rate sum] shouldn't exceed min_pf_rate. 5370 */ 5371 static enum _ecore_status_t ecore_init_wfq_param(struct ecore_hwfn *p_hwfn, 5372 u16 vport_id, u32 req_rate, 5373 u32 min_pf_rate) 5374 { 5375 u32 total_req_min_rate = 0, total_left_rate = 0, left_rate_per_vp = 0; 5376 int non_requested_count = 0, req_count = 0, i, num_vports; 5377 5378 num_vports = p_hwfn->qm_info.num_vports; 5379 5380 /* Accounting for the vports which are configured for WFQ explicitly */ 5381 5382 for (i = 0; i < num_vports; i++) { 5383 u32 tmp_speed; 5384 5385 if ((i != vport_id) && p_hwfn->qm_info.wfq_data[i].configured) { 5386 req_count++; 5387 tmp_speed = p_hwfn->qm_info.wfq_data[i].min_speed; 5388 total_req_min_rate += tmp_speed; 5389 } 5390 } 5391 5392 /* Include current vport data as well */ 5393 req_count++; 5394 total_req_min_rate += req_rate; 5395 non_requested_count = num_vports - req_count; 5396 5397 /* validate possible error cases */ 5398 if (req_rate < min_pf_rate / ECORE_WFQ_UNIT) { 5399 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 5400 "Vport [%d] - Requested rate[%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n", 5401 vport_id, req_rate, min_pf_rate); 5402 return ECORE_INVAL; 5403 } 5404 5405 /* TBD - for number of vports greater than 100 */ 5406 if (num_vports > ECORE_WFQ_UNIT) { 5407 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 5408 "Number of vports is greater than %d\n", 5409 ECORE_WFQ_UNIT); 5410 return ECORE_INVAL; 5411 } 5412 5413 if (total_req_min_rate > min_pf_rate) { 5414 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 5415 "Total requested min rate for all vports[%d Mbps] is greater than configured PF min rate[%d Mbps]\n", 5416 total_req_min_rate, min_pf_rate); 5417 return ECORE_INVAL; 5418 } 5419 5420 /* Data left for non requested vports */ 5421 total_left_rate = min_pf_rate - total_req_min_rate; 5422 left_rate_per_vp = total_left_rate / non_requested_count; 5423 5424 /* validate if non requested get < 1% of min bw */ 5425 if (left_rate_per_vp < min_pf_rate / ECORE_WFQ_UNIT) { 5426 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 5427 "Non WFQ configured vports rate [%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n", 5428 left_rate_per_vp, min_pf_rate); 5429 return ECORE_INVAL; 5430 } 5431 5432 /* now req_rate for given vport passes all scenarios. 5433 * assign final wfq rates to all vports. 5434 */ 5435 p_hwfn->qm_info.wfq_data[vport_id].min_speed = req_rate; 5436 p_hwfn->qm_info.wfq_data[vport_id].configured = true; 5437 5438 for (i = 0; i < num_vports; i++) { 5439 if (p_hwfn->qm_info.wfq_data[i].configured) 5440 continue; 5441 5442 p_hwfn->qm_info.wfq_data[i].min_speed = left_rate_per_vp; 5443 } 5444 5445 return ECORE_SUCCESS; 5446 } 5447 5448 static int __ecore_configure_vport_wfq(struct ecore_hwfn *p_hwfn, 5449 struct ecore_ptt *p_ptt, 5450 u16 vp_id, u32 rate) 5451 { 5452 struct ecore_mcp_link_state *p_link; 5453 int rc = ECORE_SUCCESS; 5454 5455 p_link = &p_hwfn->p_dev->hwfns[0].mcp_info->link_output; 5456 5457 if (!p_link->min_pf_rate) { 5458 p_hwfn->qm_info.wfq_data[vp_id].min_speed = rate; 5459 p_hwfn->qm_info.wfq_data[vp_id].configured = true; 5460 return rc; 5461 } 5462 5463 rc = ecore_init_wfq_param(p_hwfn, vp_id, rate, p_link->min_pf_rate); 5464 5465 if (rc == ECORE_SUCCESS) 5466 ecore_configure_wfq_for_all_vports(p_hwfn, p_ptt, 5467 p_link->min_pf_rate); 5468 else 5469 DP_NOTICE(p_hwfn, false, 5470 "Validation failed while configuring min rate\n"); 5471 5472 return rc; 5473 } 5474 5475 static int __ecore_configure_vp_wfq_on_link_change(struct ecore_hwfn *p_hwfn, 5476 struct ecore_ptt *p_ptt, 5477 u32 min_pf_rate) 5478 { 5479 bool use_wfq = false; 5480 int rc = ECORE_SUCCESS; 5481 u16 i; 5482 5483 /* Validate all pre configured vports for wfq */ 5484 for (i = 0; i < p_hwfn->qm_info.num_vports; i++) { 5485 u32 rate; 5486 5487 if (!p_hwfn->qm_info.wfq_data[i].configured) 5488 continue; 5489 5490 rate = p_hwfn->qm_info.wfq_data[i].min_speed; 5491 use_wfq = true; 5492 5493 rc = ecore_init_wfq_param(p_hwfn, i, rate, min_pf_rate); 5494 if (rc != ECORE_SUCCESS) { 5495 DP_NOTICE(p_hwfn, false, 5496 "WFQ validation failed while configuring min rate\n"); 5497 break; 5498 } 5499 } 5500 5501 if (rc == ECORE_SUCCESS && use_wfq) 5502 ecore_configure_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate); 5503 else 5504 ecore_disable_wfq_for_all_vports(p_hwfn, p_ptt); 5505 5506 return rc; 5507 } 5508 5509 /* Main API for ecore clients to configure vport min rate. 5510 * vp_id - vport id in PF Range[0 - (total_num_vports_per_pf - 1)] 5511 * rate - Speed in Mbps needs to be assigned to a given vport. 5512 */ 5513 int ecore_configure_vport_wfq(struct ecore_dev *p_dev, u16 vp_id, u32 rate) 5514 { 5515 int i, rc = ECORE_INVAL; 5516 5517 /* TBD - for multiple hardware functions - that is 100 gig */ 5518 if (ECORE_IS_CMT(p_dev)) { 5519 DP_NOTICE(p_dev, false, 5520 "WFQ configuration is not supported for this device\n"); 5521 return rc; 5522 } 5523 5524 for_each_hwfn(p_dev, i) { 5525 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 5526 struct ecore_ptt *p_ptt; 5527 5528 p_ptt = ecore_ptt_acquire(p_hwfn); 5529 if (!p_ptt) 5530 return ECORE_TIMEOUT; 5531 5532 rc = __ecore_configure_vport_wfq(p_hwfn, p_ptt, vp_id, rate); 5533 5534 if (rc != ECORE_SUCCESS) { 5535 ecore_ptt_release(p_hwfn, p_ptt); 5536 return rc; 5537 } 5538 5539 ecore_ptt_release(p_hwfn, p_ptt); 5540 } 5541 5542 return rc; 5543 } 5544 5545 /* API to configure WFQ from mcp link change */ 5546 void ecore_configure_vp_wfq_on_link_change(struct ecore_dev *p_dev, 5547 struct ecore_ptt *p_ptt, 5548 u32 min_pf_rate) 5549 { 5550 int i; 5551 5552 /* TBD - for multiple hardware functions - that is 100 gig */ 5553 if (ECORE_IS_CMT(p_dev)) { 5554 DP_VERBOSE(p_dev, ECORE_MSG_LINK, 5555 "WFQ configuration is not supported for this device\n"); 5556 return; 5557 } 5558 5559 for_each_hwfn(p_dev, i) { 5560 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 5561 5562 __ecore_configure_vp_wfq_on_link_change(p_hwfn, p_ptt, 5563 min_pf_rate); 5564 } 5565 } 5566 5567 int __ecore_configure_pf_max_bandwidth(struct ecore_hwfn *p_hwfn, 5568 struct ecore_ptt *p_ptt, 5569 struct ecore_mcp_link_state *p_link, 5570 u8 max_bw) 5571 { 5572 int rc = ECORE_SUCCESS; 5573 5574 p_hwfn->mcp_info->func_info.bandwidth_max = max_bw; 5575 5576 if (!p_link->line_speed && (max_bw != 100)) 5577 return rc; 5578 5579 p_link->speed = (p_link->line_speed * max_bw) / 100; 5580 p_hwfn->qm_info.pf_rl = p_link->speed; 5581 5582 /* Since the limiter also affects Tx-switched traffic, we don't want it 5583 * to limit such traffic in case there's no actual limit. 5584 * In that case, set limit to imaginary high boundary. 5585 */ 5586 if (max_bw == 100) 5587 p_hwfn->qm_info.pf_rl = 100000; 5588 5589 rc = ecore_init_pf_rl(p_hwfn, p_ptt, p_hwfn->rel_pf_id, 5590 p_hwfn->qm_info.pf_rl); 5591 5592 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 5593 "Configured MAX bandwidth to be %08x Mb/sec\n", 5594 p_link->speed); 5595 5596 return rc; 5597 } 5598 5599 /* Main API to configure PF max bandwidth where bw range is [1 - 100] */ 5600 int ecore_configure_pf_max_bandwidth(struct ecore_dev *p_dev, u8 max_bw) 5601 { 5602 int i, rc = ECORE_INVAL; 5603 5604 if (max_bw < 1 || max_bw > 100) { 5605 DP_NOTICE(p_dev, false, "PF max bw valid range is [1-100]\n"); 5606 return rc; 5607 } 5608 5609 for_each_hwfn(p_dev, i) { 5610 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 5611 struct ecore_hwfn *p_lead = ECORE_LEADING_HWFN(p_dev); 5612 struct ecore_mcp_link_state *p_link; 5613 struct ecore_ptt *p_ptt; 5614 5615 p_link = &p_lead->mcp_info->link_output; 5616 5617 p_ptt = ecore_ptt_acquire(p_hwfn); 5618 if (!p_ptt) 5619 return ECORE_TIMEOUT; 5620 5621 rc = __ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt, 5622 p_link, max_bw); 5623 5624 ecore_ptt_release(p_hwfn, p_ptt); 5625 5626 if (rc != ECORE_SUCCESS) 5627 break; 5628 } 5629 5630 return rc; 5631 } 5632 5633 int __ecore_configure_pf_min_bandwidth(struct ecore_hwfn *p_hwfn, 5634 struct ecore_ptt *p_ptt, 5635 struct ecore_mcp_link_state *p_link, 5636 u8 min_bw) 5637 { 5638 int rc = ECORE_SUCCESS; 5639 5640 p_hwfn->mcp_info->func_info.bandwidth_min = min_bw; 5641 p_hwfn->qm_info.pf_wfq = min_bw; 5642 5643 if (!p_link->line_speed) 5644 return rc; 5645 5646 p_link->min_pf_rate = (p_link->line_speed * min_bw) / 100; 5647 5648 rc = ecore_init_pf_wfq(p_hwfn, p_ptt, p_hwfn->rel_pf_id, min_bw); 5649 5650 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 5651 "Configured MIN bandwidth to be %d Mb/sec\n", 5652 p_link->min_pf_rate); 5653 5654 return rc; 5655 } 5656 5657 /* Main API to configure PF min bandwidth where bw range is [1-100] */ 5658 int ecore_configure_pf_min_bandwidth(struct ecore_dev *p_dev, u8 min_bw) 5659 { 5660 int i, rc = ECORE_INVAL; 5661 5662 if (min_bw < 1 || min_bw > 100) { 5663 DP_NOTICE(p_dev, false, "PF min bw valid range is [1-100]\n"); 5664 return rc; 5665 } 5666 5667 for_each_hwfn(p_dev, i) { 5668 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 5669 struct ecore_hwfn *p_lead = ECORE_LEADING_HWFN(p_dev); 5670 struct ecore_mcp_link_state *p_link; 5671 struct ecore_ptt *p_ptt; 5672 5673 p_link = &p_lead->mcp_info->link_output; 5674 5675 p_ptt = ecore_ptt_acquire(p_hwfn); 5676 if (!p_ptt) 5677 return ECORE_TIMEOUT; 5678 5679 rc = __ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt, 5680 p_link, min_bw); 5681 if (rc != ECORE_SUCCESS) { 5682 ecore_ptt_release(p_hwfn, p_ptt); 5683 return rc; 5684 } 5685 5686 if (p_link->min_pf_rate) { 5687 u32 min_rate = p_link->min_pf_rate; 5688 5689 rc = __ecore_configure_vp_wfq_on_link_change(p_hwfn, 5690 p_ptt, 5691 min_rate); 5692 } 5693 5694 ecore_ptt_release(p_hwfn, p_ptt); 5695 } 5696 5697 return rc; 5698 } 5699 5700 void ecore_clean_wfq_db(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) 5701 { 5702 struct ecore_mcp_link_state *p_link; 5703 5704 p_link = &p_hwfn->mcp_info->link_output; 5705 5706 if (p_link->min_pf_rate) 5707 ecore_disable_wfq_for_all_vports(p_hwfn, p_ptt); 5708 5709 OSAL_MEMSET(p_hwfn->qm_info.wfq_data, 0, 5710 sizeof(*p_hwfn->qm_info.wfq_data) * 5711 p_hwfn->qm_info.num_vports); 5712 } 5713 5714 int ecore_device_num_engines(struct ecore_dev *p_dev) 5715 { 5716 return ECORE_IS_BB(p_dev) ? 2 : 1; 5717 } 5718 5719 int ecore_device_num_ports(struct ecore_dev *p_dev) 5720 { 5721 return p_dev->num_ports; 5722 } 5723 5724 void ecore_set_fw_mac_addr(__le16 *fw_msb, 5725 __le16 *fw_mid, 5726 __le16 *fw_lsb, 5727 u8 *mac) 5728 { 5729 ((u8 *)fw_msb)[0] = mac[1]; 5730 ((u8 *)fw_msb)[1] = mac[0]; 5731 ((u8 *)fw_mid)[0] = mac[3]; 5732 ((u8 *)fw_mid)[1] = mac[2]; 5733 ((u8 *)fw_lsb)[0] = mac[5]; 5734 ((u8 *)fw_lsb)[1] = mac[4]; 5735 } 5736