1 /* 2 * Copyright (c) 2016 QLogic Corporation. 3 * All rights reserved. 4 * www.qlogic.com 5 * 6 * See LICENSE.qede_pmd for copyright and licensing details. 7 */ 8 9 #include "bcm_osal.h" 10 #include "reg_addr.h" 11 #include "ecore_gtt_reg_addr.h" 12 #include "ecore.h" 13 #include "ecore_chain.h" 14 #include "ecore_status.h" 15 #include "ecore_hw.h" 16 #include "ecore_rt_defs.h" 17 #include "ecore_init_ops.h" 18 #include "ecore_int.h" 19 #include "ecore_cxt.h" 20 #include "ecore_spq.h" 21 #include "ecore_init_fw_funcs.h" 22 #include "ecore_sp_commands.h" 23 #include "ecore_dev_api.h" 24 #include "ecore_sriov.h" 25 #include "ecore_vf.h" 26 #include "ecore_mcp.h" 27 #include "ecore_hw_defs.h" 28 #include "mcp_public.h" 29 #include "ecore_iro.h" 30 #include "nvm_cfg.h" 31 #include "ecore_dcbx.h" 32 #include "ecore_l2.h" 33 34 /* TODO - there's a bug in DCBx re-configuration flows in MF, as the QM 35 * registers involved are not split and thus configuration is a race where 36 * some of the PFs configuration might be lost. 37 * Eventually, this needs to move into a MFW-covered HW-lock as arbitration 38 * mechanism as this doesn't cover some cases [E.g., PDA or scenarios where 39 * there's more than a single compiled ecore component in system]. 40 */ 41 static osal_spinlock_t qm_lock; 42 static bool qm_lock_init; 43 44 /******************** Doorbell Recovery *******************/ 45 /* The doorbell recovery mechanism consists of a list of entries which represent 46 * doorbelling entities (l2 queues, roce sq/rq/cqs, the slowpath spq, etc). Each 47 * entity needs to register with the mechanism and provide the parameters 48 * describing it's doorbell, including a location where last used doorbell data 49 * can be found. The doorbell execute function will traverse the list and 50 * doorbell all of the registered entries. 51 */ 52 struct ecore_db_recovery_entry { 53 osal_list_entry_t list_entry; 54 void OSAL_IOMEM *db_addr; 55 void *db_data; 56 enum ecore_db_rec_width db_width; 57 enum ecore_db_rec_space db_space; 58 u8 hwfn_idx; 59 }; 60 61 /* display a single doorbell recovery entry */ 62 void ecore_db_recovery_dp_entry(struct ecore_hwfn *p_hwfn, 63 struct ecore_db_recovery_entry *db_entry, 64 const char *action) 65 { 66 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "(%s: db_entry %p, addr %p, data %p, width %s, %s space, hwfn %d)\n", 67 action, db_entry, db_entry->db_addr, db_entry->db_data, 68 db_entry->db_width == DB_REC_WIDTH_32B ? "32b" : "64b", 69 db_entry->db_space == DB_REC_USER ? "user" : "kernel", 70 db_entry->hwfn_idx); 71 } 72 73 /* doorbell address sanity (address within doorbell bar range) */ 74 bool ecore_db_rec_sanity(struct ecore_dev *p_dev, void OSAL_IOMEM *db_addr, 75 void *db_data) 76 { 77 /* make sure doorbell address is within the doorbell bar */ 78 if (db_addr < p_dev->doorbells || (u8 *)db_addr > 79 (u8 *)p_dev->doorbells + p_dev->db_size) { 80 OSAL_WARN(true, 81 "Illegal doorbell address: %p. Legal range for doorbell addresses is [%p..%p]\n", 82 db_addr, p_dev->doorbells, 83 (u8 *)p_dev->doorbells + p_dev->db_size); 84 return false; 85 } 86 87 /* make sure doorbell data pointer is not null */ 88 if (!db_data) { 89 OSAL_WARN(true, "Illegal doorbell data pointer: %p", db_data); 90 return false; 91 } 92 93 return true; 94 } 95 96 /* find hwfn according to the doorbell address */ 97 struct ecore_hwfn *ecore_db_rec_find_hwfn(struct ecore_dev *p_dev, 98 void OSAL_IOMEM *db_addr) 99 { 100 struct ecore_hwfn *p_hwfn; 101 102 /* In CMT doorbell bar is split down the middle between engine 0 and 103 * enigne 1 104 */ 105 if (ECORE_IS_CMT(p_dev)) 106 p_hwfn = db_addr < p_dev->hwfns[1].doorbells ? 107 &p_dev->hwfns[0] : &p_dev->hwfns[1]; 108 else 109 p_hwfn = ECORE_LEADING_HWFN(p_dev); 110 111 return p_hwfn; 112 } 113 114 /* add a new entry to the doorbell recovery mechanism */ 115 enum _ecore_status_t ecore_db_recovery_add(struct ecore_dev *p_dev, 116 void OSAL_IOMEM *db_addr, 117 void *db_data, 118 enum ecore_db_rec_width db_width, 119 enum ecore_db_rec_space db_space) 120 { 121 struct ecore_db_recovery_entry *db_entry; 122 struct ecore_hwfn *p_hwfn; 123 124 /* shortcircuit VFs, for now */ 125 if (IS_VF(p_dev)) { 126 DP_VERBOSE(p_dev, ECORE_MSG_IOV, "db recovery - skipping VF doorbell\n"); 127 return ECORE_SUCCESS; 128 } 129 130 /* sanitize doorbell address */ 131 if (!ecore_db_rec_sanity(p_dev, db_addr, db_data)) 132 return ECORE_INVAL; 133 134 /* obtain hwfn from doorbell address */ 135 p_hwfn = ecore_db_rec_find_hwfn(p_dev, db_addr); 136 137 /* create entry */ 138 db_entry = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*db_entry)); 139 if (!db_entry) { 140 DP_NOTICE(p_dev, false, "Failed to allocate a db recovery entry\n"); 141 return ECORE_NOMEM; 142 } 143 144 /* populate entry */ 145 db_entry->db_addr = db_addr; 146 db_entry->db_data = db_data; 147 db_entry->db_width = db_width; 148 db_entry->db_space = db_space; 149 db_entry->hwfn_idx = p_hwfn->my_id; 150 151 /* display */ 152 ecore_db_recovery_dp_entry(p_hwfn, db_entry, "Adding"); 153 154 /* protect the list */ 155 OSAL_SPIN_LOCK(&p_hwfn->db_recovery_info.lock); 156 OSAL_LIST_PUSH_TAIL(&db_entry->list_entry, 157 &p_hwfn->db_recovery_info.list); 158 OSAL_SPIN_UNLOCK(&p_hwfn->db_recovery_info.lock); 159 160 return ECORE_SUCCESS; 161 } 162 163 /* remove an entry from the doorbell recovery mechanism */ 164 enum _ecore_status_t ecore_db_recovery_del(struct ecore_dev *p_dev, 165 void OSAL_IOMEM *db_addr, 166 void *db_data) 167 { 168 struct ecore_db_recovery_entry *db_entry = OSAL_NULL; 169 enum _ecore_status_t rc = ECORE_INVAL; 170 struct ecore_hwfn *p_hwfn; 171 172 /* shortcircuit VFs, for now */ 173 if (IS_VF(p_dev)) { 174 DP_VERBOSE(p_dev, ECORE_MSG_IOV, "db recovery - skipping VF doorbell\n"); 175 return ECORE_SUCCESS; 176 } 177 178 /* sanitize doorbell address */ 179 if (!ecore_db_rec_sanity(p_dev, db_addr, db_data)) 180 return ECORE_INVAL; 181 182 /* obtain hwfn from doorbell address */ 183 p_hwfn = ecore_db_rec_find_hwfn(p_dev, db_addr); 184 185 /* protect the list */ 186 OSAL_SPIN_LOCK(&p_hwfn->db_recovery_info.lock); 187 OSAL_LIST_FOR_EACH_ENTRY(db_entry, 188 &p_hwfn->db_recovery_info.list, 189 list_entry, 190 struct ecore_db_recovery_entry) { 191 /* search according to db_data addr since db_addr is not unique 192 * (roce) 193 */ 194 if (db_entry->db_data == db_data) { 195 ecore_db_recovery_dp_entry(p_hwfn, db_entry, 196 "Deleting"); 197 OSAL_LIST_REMOVE_ENTRY(&db_entry->list_entry, 198 &p_hwfn->db_recovery_info.list); 199 rc = ECORE_SUCCESS; 200 break; 201 } 202 } 203 204 OSAL_SPIN_UNLOCK(&p_hwfn->db_recovery_info.lock); 205 206 if (rc == ECORE_INVAL) 207 /*OSAL_WARN(true,*/ 208 DP_NOTICE(p_hwfn, false, 209 "Failed to find element in list. Key (db_data addr) was %p. db_addr was %p\n", 210 db_data, db_addr); 211 else 212 OSAL_FREE(p_dev, db_entry); 213 214 return rc; 215 } 216 217 /* initialize the doorbell recovery mechanism */ 218 enum _ecore_status_t ecore_db_recovery_setup(struct ecore_hwfn *p_hwfn) 219 { 220 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "Setting up db recovery\n"); 221 222 /* make sure db_size was set in p_dev */ 223 if (!p_hwfn->p_dev->db_size) { 224 DP_ERR(p_hwfn->p_dev, "db_size not set\n"); 225 return ECORE_INVAL; 226 } 227 228 OSAL_LIST_INIT(&p_hwfn->db_recovery_info.list); 229 #ifdef CONFIG_ECORE_LOCK_ALLOC 230 OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_hwfn->db_recovery_info.lock); 231 #endif 232 OSAL_SPIN_LOCK_INIT(&p_hwfn->db_recovery_info.lock); 233 p_hwfn->db_recovery_info.db_recovery_counter = 0; 234 235 return ECORE_SUCCESS; 236 } 237 238 /* destroy the doorbell recovery mechanism */ 239 void ecore_db_recovery_teardown(struct ecore_hwfn *p_hwfn) 240 { 241 struct ecore_db_recovery_entry *db_entry = OSAL_NULL; 242 243 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "Tearing down db recovery\n"); 244 if (!OSAL_LIST_IS_EMPTY(&p_hwfn->db_recovery_info.list)) { 245 DP_VERBOSE(p_hwfn, false, "Doorbell Recovery teardown found the doorbell recovery list was not empty (Expected in disorderly driver unload (e.g. recovery) otherwise this probably means some flow forgot to db_recovery_del). Prepare to purge doorbell recovery list...\n"); 246 while (!OSAL_LIST_IS_EMPTY(&p_hwfn->db_recovery_info.list)) { 247 db_entry = OSAL_LIST_FIRST_ENTRY( 248 &p_hwfn->db_recovery_info.list, 249 struct ecore_db_recovery_entry, 250 list_entry); 251 ecore_db_recovery_dp_entry(p_hwfn, db_entry, "Purging"); 252 OSAL_LIST_REMOVE_ENTRY(&db_entry->list_entry, 253 &p_hwfn->db_recovery_info.list); 254 OSAL_FREE(p_hwfn->p_dev, db_entry); 255 } 256 } 257 #ifdef CONFIG_ECORE_LOCK_ALLOC 258 OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->db_recovery_info.lock); 259 #endif 260 p_hwfn->db_recovery_info.db_recovery_counter = 0; 261 } 262 263 /* print the content of the doorbell recovery mechanism */ 264 void ecore_db_recovery_dp(struct ecore_hwfn *p_hwfn) 265 { 266 struct ecore_db_recovery_entry *db_entry = OSAL_NULL; 267 268 DP_NOTICE(p_hwfn, false, 269 "Dispalying doorbell recovery database. Counter was %d\n", 270 p_hwfn->db_recovery_info.db_recovery_counter); 271 272 /* protect the list */ 273 OSAL_SPIN_LOCK(&p_hwfn->db_recovery_info.lock); 274 OSAL_LIST_FOR_EACH_ENTRY(db_entry, 275 &p_hwfn->db_recovery_info.list, 276 list_entry, 277 struct ecore_db_recovery_entry) { 278 ecore_db_recovery_dp_entry(p_hwfn, db_entry, "Printing"); 279 } 280 281 OSAL_SPIN_UNLOCK(&p_hwfn->db_recovery_info.lock); 282 } 283 284 /* ring the doorbell of a single doorbell recovery entry */ 285 void ecore_db_recovery_ring(struct ecore_hwfn *p_hwfn, 286 struct ecore_db_recovery_entry *db_entry, 287 enum ecore_db_rec_exec db_exec) 288 { 289 /* Print according to width */ 290 if (db_entry->db_width == DB_REC_WIDTH_32B) 291 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "%s doorbell address %p data %x\n", 292 db_exec == DB_REC_DRY_RUN ? "would have rung" : "ringing", 293 db_entry->db_addr, *(u32 *)db_entry->db_data); 294 else 295 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "%s doorbell address %p data %lx\n", 296 db_exec == DB_REC_DRY_RUN ? "would have rung" : "ringing", 297 db_entry->db_addr, 298 *(unsigned long *)(db_entry->db_data)); 299 300 /* Sanity */ 301 if (!ecore_db_rec_sanity(p_hwfn->p_dev, db_entry->db_addr, 302 db_entry->db_data)) 303 return; 304 305 /* Flush the write combined buffer. Since there are multiple doorbelling 306 * entities using the same address, if we don't flush, a transaction 307 * could be lost. 308 */ 309 OSAL_WMB(p_hwfn->p_dev); 310 311 /* Ring the doorbell */ 312 if (db_exec == DB_REC_REAL_DEAL || db_exec == DB_REC_ONCE) { 313 if (db_entry->db_width == DB_REC_WIDTH_32B) 314 DIRECT_REG_WR(p_hwfn, db_entry->db_addr, 315 *(u32 *)(db_entry->db_data)); 316 else 317 DIRECT_REG_WR64(p_hwfn, db_entry->db_addr, 318 *(u64 *)(db_entry->db_data)); 319 } 320 321 /* Flush the write combined buffer. Next doorbell may come from a 322 * different entity to the same address... 323 */ 324 OSAL_WMB(p_hwfn->p_dev); 325 } 326 327 /* traverse the doorbell recovery entry list and ring all the doorbells */ 328 void ecore_db_recovery_execute(struct ecore_hwfn *p_hwfn, 329 enum ecore_db_rec_exec db_exec) 330 { 331 struct ecore_db_recovery_entry *db_entry = OSAL_NULL; 332 333 if (db_exec != DB_REC_ONCE) { 334 DP_NOTICE(p_hwfn, false, "Executing doorbell recovery. Counter was %d\n", 335 p_hwfn->db_recovery_info.db_recovery_counter); 336 337 /* track amount of times recovery was executed */ 338 p_hwfn->db_recovery_info.db_recovery_counter++; 339 } 340 341 /* protect the list */ 342 OSAL_SPIN_LOCK(&p_hwfn->db_recovery_info.lock); 343 OSAL_LIST_FOR_EACH_ENTRY(db_entry, 344 &p_hwfn->db_recovery_info.list, 345 list_entry, 346 struct ecore_db_recovery_entry) { 347 ecore_db_recovery_ring(p_hwfn, db_entry, db_exec); 348 if (db_exec == DB_REC_ONCE) 349 break; 350 } 351 352 OSAL_SPIN_UNLOCK(&p_hwfn->db_recovery_info.lock); 353 } 354 /******************** Doorbell Recovery end ****************/ 355 356 /* Configurable */ 357 #define ECORE_MIN_DPIS (4) /* The minimal num of DPIs required to 358 * load the driver. The number was 359 * arbitrarily set. 360 */ 361 362 /* Derived */ 363 #define ECORE_MIN_PWM_REGION (ECORE_WID_SIZE * ECORE_MIN_DPIS) 364 365 static u32 ecore_hw_bar_size(struct ecore_hwfn *p_hwfn, 366 struct ecore_ptt *p_ptt, 367 enum BAR_ID bar_id) 368 { 369 u32 bar_reg = (bar_id == BAR_ID_0 ? 370 PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE); 371 u32 val; 372 373 if (IS_VF(p_hwfn->p_dev)) 374 return ecore_vf_hw_bar_size(p_hwfn, bar_id); 375 376 val = ecore_rd(p_hwfn, p_ptt, bar_reg); 377 if (val) 378 return 1 << (val + 15); 379 380 /* The above registers were updated in the past only in CMT mode. Since 381 * they were found to be useful MFW started updating them from 8.7.7.0. 382 * In older MFW versions they are set to 0 which means disabled. 383 */ 384 if (ECORE_IS_CMT(p_hwfn->p_dev)) { 385 DP_INFO(p_hwfn, 386 "BAR size not configured. Assuming BAR size of 256kB for GRC and 512kB for DB\n"); 387 val = BAR_ID_0 ? 256 * 1024 : 512 * 1024; 388 } else { 389 DP_INFO(p_hwfn, 390 "BAR size not configured. Assuming BAR size of 512kB for GRC and 512kB for DB\n"); 391 val = 512 * 1024; 392 } 393 394 return val; 395 } 396 397 void ecore_init_dp(struct ecore_dev *p_dev, 398 u32 dp_module, u8 dp_level, void *dp_ctx) 399 { 400 u32 i; 401 402 p_dev->dp_level = dp_level; 403 p_dev->dp_module = dp_module; 404 p_dev->dp_ctx = dp_ctx; 405 for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) { 406 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 407 408 p_hwfn->dp_level = dp_level; 409 p_hwfn->dp_module = dp_module; 410 p_hwfn->dp_ctx = dp_ctx; 411 } 412 } 413 414 void ecore_init_struct(struct ecore_dev *p_dev) 415 { 416 u8 i; 417 418 for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) { 419 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 420 421 p_hwfn->p_dev = p_dev; 422 p_hwfn->my_id = i; 423 p_hwfn->b_active = false; 424 425 #ifdef CONFIG_ECORE_LOCK_ALLOC 426 OSAL_MUTEX_ALLOC(p_hwfn, &p_hwfn->dmae_info.mutex); 427 #endif 428 OSAL_MUTEX_INIT(&p_hwfn->dmae_info.mutex); 429 } 430 431 /* hwfn 0 is always active */ 432 p_dev->hwfns[0].b_active = true; 433 434 /* set the default cache alignment to 128 (may be overridden later) */ 435 p_dev->cache_shift = 7; 436 } 437 438 static void ecore_qm_info_free(struct ecore_hwfn *p_hwfn) 439 { 440 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 441 442 OSAL_FREE(p_hwfn->p_dev, qm_info->qm_pq_params); 443 OSAL_FREE(p_hwfn->p_dev, qm_info->qm_vport_params); 444 OSAL_FREE(p_hwfn->p_dev, qm_info->qm_port_params); 445 OSAL_FREE(p_hwfn->p_dev, qm_info->wfq_data); 446 } 447 448 void ecore_resc_free(struct ecore_dev *p_dev) 449 { 450 int i; 451 452 if (IS_VF(p_dev)) { 453 for_each_hwfn(p_dev, i) 454 ecore_l2_free(&p_dev->hwfns[i]); 455 return; 456 } 457 458 OSAL_FREE(p_dev, p_dev->fw_data); 459 460 OSAL_FREE(p_dev, p_dev->reset_stats); 461 462 for_each_hwfn(p_dev, i) { 463 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 464 465 ecore_cxt_mngr_free(p_hwfn); 466 ecore_qm_info_free(p_hwfn); 467 ecore_spq_free(p_hwfn); 468 ecore_eq_free(p_hwfn); 469 ecore_consq_free(p_hwfn); 470 ecore_int_free(p_hwfn); 471 ecore_iov_free(p_hwfn); 472 ecore_l2_free(p_hwfn); 473 ecore_dmae_info_free(p_hwfn); 474 ecore_dcbx_info_free(p_hwfn); 475 /* @@@TBD Flush work-queue ? */ 476 477 /* destroy doorbell recovery mechanism */ 478 ecore_db_recovery_teardown(p_hwfn); 479 } 480 } 481 482 /******************** QM initialization *******************/ 483 484 /* bitmaps for indicating active traffic classes. 485 * Special case for Arrowhead 4 port 486 */ 487 /* 0..3 actualy used, 4 serves OOO, 7 serves high priority stuff (e.g. DCQCN) */ 488 #define ACTIVE_TCS_BMAP 0x9f 489 /* 0..3 actually used, OOO and high priority stuff all use 3 */ 490 #define ACTIVE_TCS_BMAP_4PORT_K2 0xf 491 492 /* determines the physical queue flags for a given PF. */ 493 static u32 ecore_get_pq_flags(struct ecore_hwfn *p_hwfn) 494 { 495 u32 flags; 496 497 /* common flags */ 498 flags = PQ_FLAGS_LB; 499 500 /* feature flags */ 501 if (IS_ECORE_SRIOV(p_hwfn->p_dev)) 502 flags |= PQ_FLAGS_VFS; 503 504 /* protocol flags */ 505 switch (p_hwfn->hw_info.personality) { 506 case ECORE_PCI_ETH: 507 flags |= PQ_FLAGS_MCOS; 508 break; 509 case ECORE_PCI_FCOE: 510 flags |= PQ_FLAGS_OFLD; 511 break; 512 case ECORE_PCI_ISCSI: 513 flags |= PQ_FLAGS_ACK | PQ_FLAGS_OOO | PQ_FLAGS_OFLD; 514 break; 515 case ECORE_PCI_ETH_ROCE: 516 flags |= PQ_FLAGS_MCOS | PQ_FLAGS_OFLD; 517 break; 518 case ECORE_PCI_ETH_IWARP: 519 flags |= PQ_FLAGS_MCOS | PQ_FLAGS_ACK | PQ_FLAGS_OOO | 520 PQ_FLAGS_OFLD; 521 break; 522 default: 523 DP_ERR(p_hwfn, "unknown personality %d\n", 524 p_hwfn->hw_info.personality); 525 return 0; 526 } 527 return flags; 528 } 529 530 /* Getters for resource amounts necessary for qm initialization */ 531 u8 ecore_init_qm_get_num_tcs(struct ecore_hwfn *p_hwfn) 532 { 533 return p_hwfn->hw_info.num_hw_tc; 534 } 535 536 u16 ecore_init_qm_get_num_vfs(struct ecore_hwfn *p_hwfn) 537 { 538 return IS_ECORE_SRIOV(p_hwfn->p_dev) ? 539 p_hwfn->p_dev->p_iov_info->total_vfs : 0; 540 } 541 542 #define NUM_DEFAULT_RLS 1 543 544 u16 ecore_init_qm_get_num_pf_rls(struct ecore_hwfn *p_hwfn) 545 { 546 u16 num_pf_rls, num_vfs = ecore_init_qm_get_num_vfs(p_hwfn); 547 548 /* @DPDK */ 549 /* num RLs can't exceed resource amount of rls or vports or the 550 * dcqcn qps 551 */ 552 num_pf_rls = (u16)OSAL_MIN_T(u32, RESC_NUM(p_hwfn, ECORE_RL), 553 (u16)RESC_NUM(p_hwfn, ECORE_VPORT)); 554 555 /* make sure after we reserve the default and VF rls we'll have 556 * something left 557 */ 558 if (num_pf_rls < num_vfs + NUM_DEFAULT_RLS) { 559 DP_NOTICE(p_hwfn, false, 560 "no rate limiters left for PF rate limiting" 561 " [num_pf_rls %d num_vfs %d]\n", num_pf_rls, num_vfs); 562 return 0; 563 } 564 565 /* subtract rls necessary for VFs and one default one for the PF */ 566 num_pf_rls -= num_vfs + NUM_DEFAULT_RLS; 567 568 return num_pf_rls; 569 } 570 571 u16 ecore_init_qm_get_num_vports(struct ecore_hwfn *p_hwfn) 572 { 573 u32 pq_flags = ecore_get_pq_flags(p_hwfn); 574 575 /* all pqs share the same vport (hence the 1 below), except for vfs 576 * and pf_rl pqs 577 */ 578 return (!!(PQ_FLAGS_RLS & pq_flags)) * 579 ecore_init_qm_get_num_pf_rls(p_hwfn) + 580 (!!(PQ_FLAGS_VFS & pq_flags)) * 581 ecore_init_qm_get_num_vfs(p_hwfn) + 1; 582 } 583 584 /* calc amount of PQs according to the requested flags */ 585 u16 ecore_init_qm_get_num_pqs(struct ecore_hwfn *p_hwfn) 586 { 587 u32 pq_flags = ecore_get_pq_flags(p_hwfn); 588 589 return (!!(PQ_FLAGS_RLS & pq_flags)) * 590 ecore_init_qm_get_num_pf_rls(p_hwfn) + 591 (!!(PQ_FLAGS_MCOS & pq_flags)) * 592 ecore_init_qm_get_num_tcs(p_hwfn) + 593 (!!(PQ_FLAGS_LB & pq_flags)) + 594 (!!(PQ_FLAGS_OOO & pq_flags)) + 595 (!!(PQ_FLAGS_ACK & pq_flags)) + 596 (!!(PQ_FLAGS_OFLD & pq_flags)) + 597 (!!(PQ_FLAGS_VFS & pq_flags)) * 598 ecore_init_qm_get_num_vfs(p_hwfn); 599 } 600 601 /* initialize the top level QM params */ 602 static void ecore_init_qm_params(struct ecore_hwfn *p_hwfn) 603 { 604 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 605 bool four_port; 606 607 /* pq and vport bases for this PF */ 608 qm_info->start_pq = (u16)RESC_START(p_hwfn, ECORE_PQ); 609 qm_info->start_vport = (u8)RESC_START(p_hwfn, ECORE_VPORT); 610 611 /* rate limiting and weighted fair queueing are always enabled */ 612 qm_info->vport_rl_en = 1; 613 qm_info->vport_wfq_en = 1; 614 615 /* TC config is different for AH 4 port */ 616 four_port = p_hwfn->p_dev->num_ports_in_engine == MAX_NUM_PORTS_K2; 617 618 /* in AH 4 port we have fewer TCs per port */ 619 qm_info->max_phys_tcs_per_port = four_port ? NUM_PHYS_TCS_4PORT_K2 : 620 NUM_OF_PHYS_TCS; 621 622 /* unless MFW indicated otherwise, ooo_tc should be 3 for AH 4 port and 623 * 4 otherwise 624 */ 625 if (!qm_info->ooo_tc) 626 qm_info->ooo_tc = four_port ? DCBX_TCP_OOO_K2_4PORT_TC : 627 DCBX_TCP_OOO_TC; 628 } 629 630 /* initialize qm vport params */ 631 static void ecore_init_qm_vport_params(struct ecore_hwfn *p_hwfn) 632 { 633 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 634 u8 i; 635 636 /* all vports participate in weighted fair queueing */ 637 for (i = 0; i < ecore_init_qm_get_num_vports(p_hwfn); i++) 638 qm_info->qm_vport_params[i].vport_wfq = 1; 639 } 640 641 /* initialize qm port params */ 642 static void ecore_init_qm_port_params(struct ecore_hwfn *p_hwfn) 643 { 644 /* Initialize qm port parameters */ 645 u8 i, active_phys_tcs, num_ports = p_hwfn->p_dev->num_ports_in_engine; 646 647 /* indicate how ooo and high pri traffic is dealt with */ 648 active_phys_tcs = num_ports == MAX_NUM_PORTS_K2 ? 649 ACTIVE_TCS_BMAP_4PORT_K2 : ACTIVE_TCS_BMAP; 650 651 for (i = 0; i < num_ports; i++) { 652 struct init_qm_port_params *p_qm_port = 653 &p_hwfn->qm_info.qm_port_params[i]; 654 655 p_qm_port->active = 1; 656 p_qm_port->active_phys_tcs = active_phys_tcs; 657 p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES_E4 / num_ports; 658 p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports; 659 } 660 } 661 662 /* Reset the params which must be reset for qm init. QM init may be called as 663 * a result of flows other than driver load (e.g. dcbx renegotiation). Other 664 * params may be affected by the init but would simply recalculate to the same 665 * values. The allocations made for QM init, ports, vports, pqs and vfqs are not 666 * affected as these amounts stay the same. 667 */ 668 static void ecore_init_qm_reset_params(struct ecore_hwfn *p_hwfn) 669 { 670 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 671 672 qm_info->num_pqs = 0; 673 qm_info->num_vports = 0; 674 qm_info->num_pf_rls = 0; 675 qm_info->num_vf_pqs = 0; 676 qm_info->first_vf_pq = 0; 677 qm_info->first_mcos_pq = 0; 678 qm_info->first_rl_pq = 0; 679 } 680 681 static void ecore_init_qm_advance_vport(struct ecore_hwfn *p_hwfn) 682 { 683 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 684 685 qm_info->num_vports++; 686 687 if (qm_info->num_vports > ecore_init_qm_get_num_vports(p_hwfn)) 688 DP_ERR(p_hwfn, 689 "vport overflow! qm_info->num_vports %d," 690 " qm_init_get_num_vports() %d\n", 691 qm_info->num_vports, 692 ecore_init_qm_get_num_vports(p_hwfn)); 693 } 694 695 /* initialize a single pq and manage qm_info resources accounting. 696 * The pq_init_flags param determines whether the PQ is rate limited 697 * (for VF or PF) 698 * and whether a new vport is allocated to the pq or not (i.e. vport will be 699 * shared) 700 */ 701 702 /* flags for pq init */ 703 #define PQ_INIT_SHARE_VPORT (1 << 0) 704 #define PQ_INIT_PF_RL (1 << 1) 705 #define PQ_INIT_VF_RL (1 << 2) 706 707 /* defines for pq init */ 708 #define PQ_INIT_DEFAULT_WRR_GROUP 1 709 #define PQ_INIT_DEFAULT_TC 0 710 #define PQ_INIT_OFLD_TC (p_hwfn->hw_info.offload_tc) 711 712 static void ecore_init_qm_pq(struct ecore_hwfn *p_hwfn, 713 struct ecore_qm_info *qm_info, 714 u8 tc, u32 pq_init_flags) 715 { 716 u16 pq_idx = qm_info->num_pqs, max_pq = 717 ecore_init_qm_get_num_pqs(p_hwfn); 718 719 if (pq_idx > max_pq) 720 DP_ERR(p_hwfn, 721 "pq overflow! pq %d, max pq %d\n", pq_idx, max_pq); 722 723 /* init pq params */ 724 qm_info->qm_pq_params[pq_idx].vport_id = qm_info->start_vport + 725 qm_info->num_vports; 726 qm_info->qm_pq_params[pq_idx].tc_id = tc; 727 qm_info->qm_pq_params[pq_idx].wrr_group = PQ_INIT_DEFAULT_WRR_GROUP; 728 qm_info->qm_pq_params[pq_idx].rl_valid = 729 (pq_init_flags & PQ_INIT_PF_RL || 730 pq_init_flags & PQ_INIT_VF_RL); 731 732 /* qm params accounting */ 733 qm_info->num_pqs++; 734 if (!(pq_init_flags & PQ_INIT_SHARE_VPORT)) 735 qm_info->num_vports++; 736 737 if (pq_init_flags & PQ_INIT_PF_RL) 738 qm_info->num_pf_rls++; 739 740 if (qm_info->num_vports > ecore_init_qm_get_num_vports(p_hwfn)) 741 DP_ERR(p_hwfn, 742 "vport overflow! qm_info->num_vports %d," 743 " qm_init_get_num_vports() %d\n", 744 qm_info->num_vports, 745 ecore_init_qm_get_num_vports(p_hwfn)); 746 747 if (qm_info->num_pf_rls > ecore_init_qm_get_num_pf_rls(p_hwfn)) 748 DP_ERR(p_hwfn, "rl overflow! qm_info->num_pf_rls %d," 749 " qm_init_get_num_pf_rls() %d\n", 750 qm_info->num_pf_rls, 751 ecore_init_qm_get_num_pf_rls(p_hwfn)); 752 } 753 754 /* get pq index according to PQ_FLAGS */ 755 static u16 *ecore_init_qm_get_idx_from_flags(struct ecore_hwfn *p_hwfn, 756 u32 pq_flags) 757 { 758 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 759 760 /* Can't have multiple flags set here */ 761 if (OSAL_BITMAP_WEIGHT((unsigned long *)&pq_flags, 762 sizeof(pq_flags)) > 1) 763 goto err; 764 765 switch (pq_flags) { 766 case PQ_FLAGS_RLS: 767 return &qm_info->first_rl_pq; 768 case PQ_FLAGS_MCOS: 769 return &qm_info->first_mcos_pq; 770 case PQ_FLAGS_LB: 771 return &qm_info->pure_lb_pq; 772 case PQ_FLAGS_OOO: 773 return &qm_info->ooo_pq; 774 case PQ_FLAGS_ACK: 775 return &qm_info->pure_ack_pq; 776 case PQ_FLAGS_OFLD: 777 return &qm_info->offload_pq; 778 case PQ_FLAGS_VFS: 779 return &qm_info->first_vf_pq; 780 default: 781 goto err; 782 } 783 784 err: 785 DP_ERR(p_hwfn, "BAD pq flags %d\n", pq_flags); 786 return OSAL_NULL; 787 } 788 789 /* save pq index in qm info */ 790 static void ecore_init_qm_set_idx(struct ecore_hwfn *p_hwfn, 791 u32 pq_flags, u16 pq_val) 792 { 793 u16 *base_pq_idx = ecore_init_qm_get_idx_from_flags(p_hwfn, pq_flags); 794 795 *base_pq_idx = p_hwfn->qm_info.start_pq + pq_val; 796 } 797 798 /* get tx pq index, with the PQ TX base already set (ready for context init) */ 799 u16 ecore_get_cm_pq_idx(struct ecore_hwfn *p_hwfn, u32 pq_flags) 800 { 801 u16 *base_pq_idx = ecore_init_qm_get_idx_from_flags(p_hwfn, pq_flags); 802 803 return *base_pq_idx + CM_TX_PQ_BASE; 804 } 805 806 u16 ecore_get_cm_pq_idx_mcos(struct ecore_hwfn *p_hwfn, u8 tc) 807 { 808 u8 max_tc = ecore_init_qm_get_num_tcs(p_hwfn); 809 810 if (tc > max_tc) 811 DP_ERR(p_hwfn, "tc %d must be smaller than %d\n", tc, max_tc); 812 813 return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_MCOS) + tc; 814 } 815 816 u16 ecore_get_cm_pq_idx_vf(struct ecore_hwfn *p_hwfn, u16 vf) 817 { 818 u16 max_vf = ecore_init_qm_get_num_vfs(p_hwfn); 819 820 if (vf > max_vf) 821 DP_ERR(p_hwfn, "vf %d must be smaller than %d\n", vf, max_vf); 822 823 return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + vf; 824 } 825 826 u16 ecore_get_cm_pq_idx_rl(struct ecore_hwfn *p_hwfn, u8 rl) 827 { 828 u16 max_rl = ecore_init_qm_get_num_pf_rls(p_hwfn); 829 830 if (rl > max_rl) 831 DP_ERR(p_hwfn, "rl %d must be smaller than %d\n", rl, max_rl); 832 833 return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_RLS) + rl; 834 } 835 836 /* Functions for creating specific types of pqs */ 837 static void ecore_init_qm_lb_pq(struct ecore_hwfn *p_hwfn) 838 { 839 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 840 841 if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_LB)) 842 return; 843 844 ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_LB, qm_info->num_pqs); 845 ecore_init_qm_pq(p_hwfn, qm_info, PURE_LB_TC, PQ_INIT_SHARE_VPORT); 846 } 847 848 static void ecore_init_qm_ooo_pq(struct ecore_hwfn *p_hwfn) 849 { 850 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 851 852 if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_OOO)) 853 return; 854 855 ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_OOO, qm_info->num_pqs); 856 ecore_init_qm_pq(p_hwfn, qm_info, qm_info->ooo_tc, PQ_INIT_SHARE_VPORT); 857 } 858 859 static void ecore_init_qm_pure_ack_pq(struct ecore_hwfn *p_hwfn) 860 { 861 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 862 863 if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_ACK)) 864 return; 865 866 ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_ACK, qm_info->num_pqs); 867 ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT); 868 } 869 870 static void ecore_init_qm_offload_pq(struct ecore_hwfn *p_hwfn) 871 { 872 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 873 874 if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_OFLD)) 875 return; 876 877 ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_OFLD, qm_info->num_pqs); 878 ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT); 879 } 880 881 static void ecore_init_qm_mcos_pqs(struct ecore_hwfn *p_hwfn) 882 { 883 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 884 u8 tc_idx; 885 886 if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_MCOS)) 887 return; 888 889 ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_MCOS, qm_info->num_pqs); 890 for (tc_idx = 0; tc_idx < ecore_init_qm_get_num_tcs(p_hwfn); tc_idx++) 891 ecore_init_qm_pq(p_hwfn, qm_info, tc_idx, PQ_INIT_SHARE_VPORT); 892 } 893 894 static void ecore_init_qm_vf_pqs(struct ecore_hwfn *p_hwfn) 895 { 896 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 897 u16 vf_idx, num_vfs = ecore_init_qm_get_num_vfs(p_hwfn); 898 899 if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_VFS)) 900 return; 901 902 ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_VFS, qm_info->num_pqs); 903 904 qm_info->num_vf_pqs = num_vfs; 905 for (vf_idx = 0; vf_idx < num_vfs; vf_idx++) 906 ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_DEFAULT_TC, 907 PQ_INIT_VF_RL); 908 } 909 910 static void ecore_init_qm_rl_pqs(struct ecore_hwfn *p_hwfn) 911 { 912 u16 pf_rls_idx, num_pf_rls = ecore_init_qm_get_num_pf_rls(p_hwfn); 913 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 914 915 if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_RLS)) 916 return; 917 918 ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_RLS, qm_info->num_pqs); 919 for (pf_rls_idx = 0; pf_rls_idx < num_pf_rls; pf_rls_idx++) 920 ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, 921 PQ_INIT_PF_RL); 922 } 923 924 static void ecore_init_qm_pq_params(struct ecore_hwfn *p_hwfn) 925 { 926 /* rate limited pqs, must come first (FW assumption) */ 927 ecore_init_qm_rl_pqs(p_hwfn); 928 929 /* pqs for multi cos */ 930 ecore_init_qm_mcos_pqs(p_hwfn); 931 932 /* pure loopback pq */ 933 ecore_init_qm_lb_pq(p_hwfn); 934 935 /* out of order pq */ 936 ecore_init_qm_ooo_pq(p_hwfn); 937 938 /* pure ack pq */ 939 ecore_init_qm_pure_ack_pq(p_hwfn); 940 941 /* pq for offloaded protocol */ 942 ecore_init_qm_offload_pq(p_hwfn); 943 944 /* done sharing vports */ 945 ecore_init_qm_advance_vport(p_hwfn); 946 947 /* pqs for vfs */ 948 ecore_init_qm_vf_pqs(p_hwfn); 949 } 950 951 /* compare values of getters against resources amounts */ 952 static enum _ecore_status_t ecore_init_qm_sanity(struct ecore_hwfn *p_hwfn) 953 { 954 if (ecore_init_qm_get_num_vports(p_hwfn) > 955 RESC_NUM(p_hwfn, ECORE_VPORT)) { 956 DP_ERR(p_hwfn, "requested amount of vports exceeds resource\n"); 957 return ECORE_INVAL; 958 } 959 960 if (ecore_init_qm_get_num_pqs(p_hwfn) > RESC_NUM(p_hwfn, ECORE_PQ)) { 961 DP_ERR(p_hwfn, "requested amount of pqs exceeds resource\n"); 962 return ECORE_INVAL; 963 } 964 965 return ECORE_SUCCESS; 966 } 967 968 /* 969 * Function for verbose printing of the qm initialization results 970 */ 971 static void ecore_dp_init_qm_params(struct ecore_hwfn *p_hwfn) 972 { 973 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 974 struct init_qm_vport_params *vport; 975 struct init_qm_port_params *port; 976 struct init_qm_pq_params *pq; 977 int i, tc; 978 979 /* top level params */ 980 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 981 "qm init top level params: start_pq %d, start_vport %d," 982 " pure_lb_pq %d, offload_pq %d, pure_ack_pq %d\n", 983 qm_info->start_pq, qm_info->start_vport, qm_info->pure_lb_pq, 984 qm_info->offload_pq, qm_info->pure_ack_pq); 985 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 986 "ooo_pq %d, first_vf_pq %d, num_pqs %d, num_vf_pqs %d," 987 " num_vports %d, max_phys_tcs_per_port %d\n", 988 qm_info->ooo_pq, qm_info->first_vf_pq, qm_info->num_pqs, 989 qm_info->num_vf_pqs, qm_info->num_vports, 990 qm_info->max_phys_tcs_per_port); 991 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 992 "pf_rl_en %d, pf_wfq_en %d, vport_rl_en %d, vport_wfq_en %d," 993 " pf_wfq %d, pf_rl %d, num_pf_rls %d, pq_flags %x\n", 994 qm_info->pf_rl_en, qm_info->pf_wfq_en, qm_info->vport_rl_en, 995 qm_info->vport_wfq_en, qm_info->pf_wfq, qm_info->pf_rl, 996 qm_info->num_pf_rls, ecore_get_pq_flags(p_hwfn)); 997 998 /* port table */ 999 for (i = 0; i < p_hwfn->p_dev->num_ports_in_engine; i++) { 1000 port = &qm_info->qm_port_params[i]; 1001 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 1002 "port idx %d, active %d, active_phys_tcs %d," 1003 " num_pbf_cmd_lines %d, num_btb_blocks %d," 1004 " reserved %d\n", 1005 i, port->active, port->active_phys_tcs, 1006 port->num_pbf_cmd_lines, port->num_btb_blocks, 1007 port->reserved); 1008 } 1009 1010 /* vport table */ 1011 for (i = 0; i < qm_info->num_vports; i++) { 1012 vport = &qm_info->qm_vport_params[i]; 1013 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 1014 "vport idx %d, vport_rl %d, wfq %d," 1015 " first_tx_pq_id [ ", 1016 qm_info->start_vport + i, vport->vport_rl, 1017 vport->vport_wfq); 1018 for (tc = 0; tc < NUM_OF_TCS; tc++) 1019 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "%d ", 1020 vport->first_tx_pq_id[tc]); 1021 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "]\n"); 1022 } 1023 1024 /* pq table */ 1025 for (i = 0; i < qm_info->num_pqs; i++) { 1026 pq = &qm_info->qm_pq_params[i]; 1027 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 1028 "pq idx %d, vport_id %d, tc %d, wrr_grp %d," 1029 " rl_valid %d\n", 1030 qm_info->start_pq + i, pq->vport_id, pq->tc_id, 1031 pq->wrr_group, pq->rl_valid); 1032 } 1033 } 1034 1035 static void ecore_init_qm_info(struct ecore_hwfn *p_hwfn) 1036 { 1037 /* reset params required for init run */ 1038 ecore_init_qm_reset_params(p_hwfn); 1039 1040 /* init QM top level params */ 1041 ecore_init_qm_params(p_hwfn); 1042 1043 /* init QM port params */ 1044 ecore_init_qm_port_params(p_hwfn); 1045 1046 /* init QM vport params */ 1047 ecore_init_qm_vport_params(p_hwfn); 1048 1049 /* init QM physical queue params */ 1050 ecore_init_qm_pq_params(p_hwfn); 1051 1052 /* display all that init */ 1053 ecore_dp_init_qm_params(p_hwfn); 1054 } 1055 1056 /* This function reconfigures the QM pf on the fly. 1057 * For this purpose we: 1058 * 1. reconfigure the QM database 1059 * 2. set new values to runtime array 1060 * 3. send an sdm_qm_cmd through the rbc interface to stop the QM 1061 * 4. activate init tool in QM_PF stage 1062 * 5. send an sdm_qm_cmd through rbc interface to release the QM 1063 */ 1064 enum _ecore_status_t ecore_qm_reconf(struct ecore_hwfn *p_hwfn, 1065 struct ecore_ptt *p_ptt) 1066 { 1067 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 1068 bool b_rc; 1069 enum _ecore_status_t rc; 1070 1071 /* initialize ecore's qm data structure */ 1072 ecore_init_qm_info(p_hwfn); 1073 1074 /* stop PF's qm queues */ 1075 OSAL_SPIN_LOCK(&qm_lock); 1076 b_rc = ecore_send_qm_stop_cmd(p_hwfn, p_ptt, false, true, 1077 qm_info->start_pq, qm_info->num_pqs); 1078 OSAL_SPIN_UNLOCK(&qm_lock); 1079 if (!b_rc) 1080 return ECORE_INVAL; 1081 1082 /* clear the QM_PF runtime phase leftovers from previous init */ 1083 ecore_init_clear_rt_data(p_hwfn); 1084 1085 /* prepare QM portion of runtime array */ 1086 ecore_qm_init_pf(p_hwfn, p_ptt); 1087 1088 /* activate init tool on runtime array */ 1089 rc = ecore_init_run(p_hwfn, p_ptt, PHASE_QM_PF, p_hwfn->rel_pf_id, 1090 p_hwfn->hw_info.hw_mode); 1091 if (rc != ECORE_SUCCESS) 1092 return rc; 1093 1094 /* start PF's qm queues */ 1095 OSAL_SPIN_LOCK(&qm_lock); 1096 b_rc = ecore_send_qm_stop_cmd(p_hwfn, p_ptt, true, true, 1097 qm_info->start_pq, qm_info->num_pqs); 1098 OSAL_SPIN_UNLOCK(&qm_lock); 1099 if (!b_rc) 1100 return ECORE_INVAL; 1101 1102 return ECORE_SUCCESS; 1103 } 1104 1105 static enum _ecore_status_t ecore_alloc_qm_data(struct ecore_hwfn *p_hwfn) 1106 { 1107 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 1108 enum _ecore_status_t rc; 1109 1110 rc = ecore_init_qm_sanity(p_hwfn); 1111 if (rc != ECORE_SUCCESS) 1112 goto alloc_err; 1113 1114 qm_info->qm_pq_params = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, 1115 sizeof(struct init_qm_pq_params) * 1116 ecore_init_qm_get_num_pqs(p_hwfn)); 1117 if (!qm_info->qm_pq_params) 1118 goto alloc_err; 1119 1120 qm_info->qm_vport_params = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, 1121 sizeof(struct init_qm_vport_params) * 1122 ecore_init_qm_get_num_vports(p_hwfn)); 1123 if (!qm_info->qm_vport_params) 1124 goto alloc_err; 1125 1126 qm_info->qm_port_params = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, 1127 sizeof(struct init_qm_port_params) * 1128 p_hwfn->p_dev->num_ports_in_engine); 1129 if (!qm_info->qm_port_params) 1130 goto alloc_err; 1131 1132 qm_info->wfq_data = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, 1133 sizeof(struct ecore_wfq_data) * 1134 ecore_init_qm_get_num_vports(p_hwfn)); 1135 if (!qm_info->wfq_data) 1136 goto alloc_err; 1137 1138 return ECORE_SUCCESS; 1139 1140 alloc_err: 1141 DP_NOTICE(p_hwfn, false, "Failed to allocate memory for QM params\n"); 1142 ecore_qm_info_free(p_hwfn); 1143 return ECORE_NOMEM; 1144 } 1145 /******************** End QM initialization ***************/ 1146 1147 enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev) 1148 { 1149 enum _ecore_status_t rc = ECORE_SUCCESS; 1150 int i; 1151 1152 if (IS_VF(p_dev)) { 1153 for_each_hwfn(p_dev, i) { 1154 rc = ecore_l2_alloc(&p_dev->hwfns[i]); 1155 if (rc != ECORE_SUCCESS) 1156 return rc; 1157 } 1158 return rc; 1159 } 1160 1161 p_dev->fw_data = OSAL_ZALLOC(p_dev, GFP_KERNEL, 1162 sizeof(*p_dev->fw_data)); 1163 if (!p_dev->fw_data) 1164 return ECORE_NOMEM; 1165 1166 for_each_hwfn(p_dev, i) { 1167 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 1168 u32 n_eqes, num_cons; 1169 1170 /* initialize the doorbell recovery mechanism */ 1171 rc = ecore_db_recovery_setup(p_hwfn); 1172 if (rc) 1173 goto alloc_err; 1174 1175 /* First allocate the context manager structure */ 1176 rc = ecore_cxt_mngr_alloc(p_hwfn); 1177 if (rc) 1178 goto alloc_err; 1179 1180 /* Set the HW cid/tid numbers (in the context manager) 1181 * Must be done prior to any further computations. 1182 */ 1183 rc = ecore_cxt_set_pf_params(p_hwfn); 1184 if (rc) 1185 goto alloc_err; 1186 1187 rc = ecore_alloc_qm_data(p_hwfn); 1188 if (rc) 1189 goto alloc_err; 1190 1191 /* init qm info */ 1192 ecore_init_qm_info(p_hwfn); 1193 1194 /* Compute the ILT client partition */ 1195 rc = ecore_cxt_cfg_ilt_compute(p_hwfn); 1196 if (rc) 1197 goto alloc_err; 1198 1199 /* CID map / ILT shadow table / T2 1200 * The talbes sizes are determined by the computations above 1201 */ 1202 rc = ecore_cxt_tables_alloc(p_hwfn); 1203 if (rc) 1204 goto alloc_err; 1205 1206 /* SPQ, must follow ILT because initializes SPQ context */ 1207 rc = ecore_spq_alloc(p_hwfn); 1208 if (rc) 1209 goto alloc_err; 1210 1211 /* SP status block allocation */ 1212 p_hwfn->p_dpc_ptt = ecore_get_reserved_ptt(p_hwfn, 1213 RESERVED_PTT_DPC); 1214 1215 rc = ecore_int_alloc(p_hwfn, p_hwfn->p_main_ptt); 1216 if (rc) 1217 goto alloc_err; 1218 1219 rc = ecore_iov_alloc(p_hwfn); 1220 if (rc) 1221 goto alloc_err; 1222 1223 /* EQ */ 1224 n_eqes = ecore_chain_get_capacity(&p_hwfn->p_spq->chain); 1225 if (ECORE_IS_RDMA_PERSONALITY(p_hwfn)) { 1226 /* Calculate the EQ size 1227 * --------------------- 1228 * Each ICID may generate up to one event at a time i.e. 1229 * the event must be handled/cleared before a new one 1230 * can be generated. We calculate the sum of events per 1231 * protocol and create an EQ deep enough to handle the 1232 * worst case: 1233 * - Core - according to SPQ. 1234 * - RoCE - per QP there are a couple of ICIDs, one 1235 * responder and one requester, each can 1236 * generate an EQE => n_eqes_qp = 2 * n_qp. 1237 * Each CQ can generate an EQE. There are 2 CQs 1238 * per QP => n_eqes_cq = 2 * n_qp. 1239 * Hence the RoCE total is 4 * n_qp or 1240 * 2 * num_cons. 1241 * - ENet - There can be up to two events per VF. One 1242 * for VF-PF channel and another for VF FLR 1243 * initial cleanup. The number of VFs is 1244 * bounded by MAX_NUM_VFS_BB, and is much 1245 * smaller than RoCE's so we avoid exact 1246 * calculation. 1247 */ 1248 if (ECORE_IS_ROCE_PERSONALITY(p_hwfn)) { 1249 num_cons = 1250 ecore_cxt_get_proto_cid_count( 1251 p_hwfn, 1252 PROTOCOLID_ROCE, 1253 OSAL_NULL); 1254 num_cons *= 2; 1255 } else { 1256 num_cons = ecore_cxt_get_proto_cid_count( 1257 p_hwfn, 1258 PROTOCOLID_IWARP, 1259 OSAL_NULL); 1260 } 1261 n_eqes += num_cons + 2 * MAX_NUM_VFS_BB; 1262 } else if (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) { 1263 num_cons = 1264 ecore_cxt_get_proto_cid_count(p_hwfn, 1265 PROTOCOLID_ISCSI, 1266 OSAL_NULL); 1267 n_eqes += 2 * num_cons; 1268 } 1269 1270 if (n_eqes > 0xFFFF) { 1271 DP_ERR(p_hwfn, "Cannot allocate 0x%x EQ elements." 1272 "The maximum of a u16 chain is 0x%x\n", 1273 n_eqes, 0xFFFF); 1274 goto alloc_no_mem; 1275 } 1276 1277 rc = ecore_eq_alloc(p_hwfn, (u16)n_eqes); 1278 if (rc) 1279 goto alloc_err; 1280 1281 rc = ecore_consq_alloc(p_hwfn); 1282 if (rc) 1283 goto alloc_err; 1284 1285 rc = ecore_l2_alloc(p_hwfn); 1286 if (rc != ECORE_SUCCESS) 1287 goto alloc_err; 1288 1289 /* DMA info initialization */ 1290 rc = ecore_dmae_info_alloc(p_hwfn); 1291 if (rc) { 1292 DP_NOTICE(p_hwfn, true, 1293 "Failed to allocate memory for dmae_info" 1294 " structure\n"); 1295 goto alloc_err; 1296 } 1297 1298 /* DCBX initialization */ 1299 rc = ecore_dcbx_info_alloc(p_hwfn); 1300 if (rc) { 1301 DP_NOTICE(p_hwfn, true, 1302 "Failed to allocate memory for dcbx structure\n"); 1303 goto alloc_err; 1304 } 1305 } 1306 1307 p_dev->reset_stats = OSAL_ZALLOC(p_dev, GFP_KERNEL, 1308 sizeof(*p_dev->reset_stats)); 1309 if (!p_dev->reset_stats) { 1310 DP_NOTICE(p_dev, true, "Failed to allocate reset statistics\n"); 1311 goto alloc_no_mem; 1312 } 1313 1314 return ECORE_SUCCESS; 1315 1316 alloc_no_mem: 1317 rc = ECORE_NOMEM; 1318 alloc_err: 1319 ecore_resc_free(p_dev); 1320 return rc; 1321 } 1322 1323 void ecore_resc_setup(struct ecore_dev *p_dev) 1324 { 1325 int i; 1326 1327 if (IS_VF(p_dev)) { 1328 for_each_hwfn(p_dev, i) 1329 ecore_l2_setup(&p_dev->hwfns[i]); 1330 return; 1331 } 1332 1333 for_each_hwfn(p_dev, i) { 1334 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 1335 1336 ecore_cxt_mngr_setup(p_hwfn); 1337 ecore_spq_setup(p_hwfn); 1338 ecore_eq_setup(p_hwfn); 1339 ecore_consq_setup(p_hwfn); 1340 1341 /* Read shadow of current MFW mailbox */ 1342 ecore_mcp_read_mb(p_hwfn, p_hwfn->p_main_ptt); 1343 OSAL_MEMCPY(p_hwfn->mcp_info->mfw_mb_shadow, 1344 p_hwfn->mcp_info->mfw_mb_cur, 1345 p_hwfn->mcp_info->mfw_mb_length); 1346 1347 ecore_int_setup(p_hwfn, p_hwfn->p_main_ptt); 1348 1349 ecore_l2_setup(p_hwfn); 1350 ecore_iov_setup(p_hwfn); 1351 } 1352 } 1353 1354 #define FINAL_CLEANUP_POLL_CNT (100) 1355 #define FINAL_CLEANUP_POLL_TIME (10) 1356 enum _ecore_status_t ecore_final_cleanup(struct ecore_hwfn *p_hwfn, 1357 struct ecore_ptt *p_ptt, 1358 u16 id, bool is_vf) 1359 { 1360 u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT; 1361 enum _ecore_status_t rc = ECORE_TIMEOUT; 1362 1363 #ifndef ASIC_ONLY 1364 if (CHIP_REV_IS_TEDIBEAR(p_hwfn->p_dev) || 1365 CHIP_REV_IS_SLOW(p_hwfn->p_dev)) { 1366 DP_INFO(p_hwfn, "Skipping final cleanup for non-ASIC\n"); 1367 return ECORE_SUCCESS; 1368 } 1369 #endif 1370 1371 addr = GTT_BAR0_MAP_REG_USDM_RAM + 1372 USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn->rel_pf_id); 1373 1374 if (is_vf) 1375 id += 0x10; 1376 1377 command |= X_FINAL_CLEANUP_AGG_INT << 1378 SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT; 1379 command |= 1 << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT; 1380 command |= id << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT; 1381 command |= SDM_COMP_TYPE_AGG_INT << SDM_OP_GEN_COMP_TYPE_SHIFT; 1382 1383 /* Make sure notification is not set before initiating final cleanup */ 1384 1385 if (REG_RD(p_hwfn, addr)) { 1386 DP_NOTICE(p_hwfn, false, 1387 "Unexpected; Found final cleanup notification"); 1388 DP_NOTICE(p_hwfn, false, 1389 " before initiating final cleanup\n"); 1390 REG_WR(p_hwfn, addr, 0); 1391 } 1392 1393 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 1394 "Sending final cleanup for PFVF[%d] [Command %08x]\n", 1395 id, command); 1396 1397 ecore_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN, command); 1398 1399 /* Poll until completion */ 1400 while (!REG_RD(p_hwfn, addr) && count--) 1401 OSAL_MSLEEP(FINAL_CLEANUP_POLL_TIME); 1402 1403 if (REG_RD(p_hwfn, addr)) 1404 rc = ECORE_SUCCESS; 1405 else 1406 DP_NOTICE(p_hwfn, true, 1407 "Failed to receive FW final cleanup notification\n"); 1408 1409 /* Cleanup afterwards */ 1410 REG_WR(p_hwfn, addr, 0); 1411 1412 return rc; 1413 } 1414 1415 static enum _ecore_status_t ecore_calc_hw_mode(struct ecore_hwfn *p_hwfn) 1416 { 1417 int hw_mode = 0; 1418 1419 if (ECORE_IS_BB_B0(p_hwfn->p_dev)) { 1420 hw_mode |= 1 << MODE_BB; 1421 } else if (ECORE_IS_AH(p_hwfn->p_dev)) { 1422 hw_mode |= 1 << MODE_K2; 1423 } else { 1424 DP_NOTICE(p_hwfn, true, "Unknown chip type %#x\n", 1425 p_hwfn->p_dev->type); 1426 return ECORE_INVAL; 1427 } 1428 1429 /* Ports per engine is based on the values in CNIG_REG_NW_PORT_MODE */ 1430 switch (p_hwfn->p_dev->num_ports_in_engine) { 1431 case 1: 1432 hw_mode |= 1 << MODE_PORTS_PER_ENG_1; 1433 break; 1434 case 2: 1435 hw_mode |= 1 << MODE_PORTS_PER_ENG_2; 1436 break; 1437 case 4: 1438 hw_mode |= 1 << MODE_PORTS_PER_ENG_4; 1439 break; 1440 default: 1441 DP_NOTICE(p_hwfn, true, 1442 "num_ports_in_engine = %d not supported\n", 1443 p_hwfn->p_dev->num_ports_in_engine); 1444 return ECORE_INVAL; 1445 } 1446 1447 if (OSAL_TEST_BIT(ECORE_MF_OVLAN_CLSS, 1448 &p_hwfn->p_dev->mf_bits)) 1449 hw_mode |= 1 << MODE_MF_SD; 1450 else 1451 hw_mode |= 1 << MODE_MF_SI; 1452 1453 #ifndef ASIC_ONLY 1454 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) { 1455 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) { 1456 hw_mode |= 1 << MODE_FPGA; 1457 } else { 1458 if (p_hwfn->p_dev->b_is_emul_full) 1459 hw_mode |= 1 << MODE_EMUL_FULL; 1460 else 1461 hw_mode |= 1 << MODE_EMUL_REDUCED; 1462 } 1463 } else 1464 #endif 1465 hw_mode |= 1 << MODE_ASIC; 1466 1467 if (ECORE_IS_CMT(p_hwfn->p_dev)) 1468 hw_mode |= 1 << MODE_100G; 1469 1470 p_hwfn->hw_info.hw_mode = hw_mode; 1471 1472 DP_VERBOSE(p_hwfn, (ECORE_MSG_PROBE | ECORE_MSG_IFUP), 1473 "Configuring function for hw_mode: 0x%08x\n", 1474 p_hwfn->hw_info.hw_mode); 1475 1476 return ECORE_SUCCESS; 1477 } 1478 1479 #ifndef ASIC_ONLY 1480 /* MFW-replacement initializations for non-ASIC */ 1481 static enum _ecore_status_t ecore_hw_init_chip(struct ecore_hwfn *p_hwfn, 1482 struct ecore_ptt *p_ptt) 1483 { 1484 struct ecore_dev *p_dev = p_hwfn->p_dev; 1485 u32 pl_hv = 1; 1486 int i; 1487 1488 if (CHIP_REV_IS_EMUL(p_dev)) { 1489 if (ECORE_IS_AH(p_dev)) 1490 pl_hv |= 0x600; 1491 } 1492 1493 ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV + 4, pl_hv); 1494 1495 if (CHIP_REV_IS_EMUL(p_dev) && 1496 (ECORE_IS_AH(p_dev))) 1497 ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV_2_K2_E5, 1498 0x3ffffff); 1499 1500 /* initialize port mode to 4x10G_E (10G with 4x10 SERDES) */ 1501 /* CNIG_REG_NW_PORT_MODE is same for A0 and B0 */ 1502 if (!CHIP_REV_IS_EMUL(p_dev) || ECORE_IS_BB(p_dev)) 1503 ecore_wr(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB, 4); 1504 1505 if (CHIP_REV_IS_EMUL(p_dev)) { 1506 if (ECORE_IS_AH(p_dev)) { 1507 /* 2 for 4-port, 1 for 2-port, 0 for 1-port */ 1508 ecore_wr(p_hwfn, p_ptt, MISC_REG_PORT_MODE, 1509 (p_dev->num_ports_in_engine >> 1)); 1510 1511 ecore_wr(p_hwfn, p_ptt, MISC_REG_BLOCK_256B_EN, 1512 p_dev->num_ports_in_engine == 4 ? 0 : 3); 1513 } 1514 } 1515 1516 /* Poll on RBC */ 1517 ecore_wr(p_hwfn, p_ptt, PSWRQ2_REG_RBC_DONE, 1); 1518 for (i = 0; i < 100; i++) { 1519 OSAL_UDELAY(50); 1520 if (ecore_rd(p_hwfn, p_ptt, PSWRQ2_REG_CFG_DONE) == 1) 1521 break; 1522 } 1523 if (i == 100) 1524 DP_NOTICE(p_hwfn, true, 1525 "RBC done failed to complete in PSWRQ2\n"); 1526 1527 return ECORE_SUCCESS; 1528 } 1529 #endif 1530 1531 /* Init run time data for all PFs and their VFs on an engine. 1532 * TBD - for VFs - Once we have parent PF info for each VF in 1533 * shmem available as CAU requires knowledge of parent PF for each VF. 1534 */ 1535 static void ecore_init_cau_rt_data(struct ecore_dev *p_dev) 1536 { 1537 u32 offset = CAU_REG_SB_VAR_MEMORY_RT_OFFSET; 1538 int i, igu_sb_id; 1539 1540 for_each_hwfn(p_dev, i) { 1541 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 1542 struct ecore_igu_info *p_igu_info; 1543 struct ecore_igu_block *p_block; 1544 struct cau_sb_entry sb_entry; 1545 1546 p_igu_info = p_hwfn->hw_info.p_igu_info; 1547 1548 for (igu_sb_id = 0; 1549 igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_dev); 1550 igu_sb_id++) { 1551 p_block = &p_igu_info->entry[igu_sb_id]; 1552 1553 if (!p_block->is_pf) 1554 continue; 1555 1556 ecore_init_cau_sb_entry(p_hwfn, &sb_entry, 1557 p_block->function_id, 0, 0); 1558 STORE_RT_REG_AGG(p_hwfn, offset + igu_sb_id * 2, 1559 sb_entry); 1560 } 1561 } 1562 } 1563 1564 static void ecore_init_cache_line_size(struct ecore_hwfn *p_hwfn, 1565 struct ecore_ptt *p_ptt) 1566 { 1567 u32 val, wr_mbs, cache_line_size; 1568 1569 val = ecore_rd(p_hwfn, p_ptt, PSWRQ2_REG_WR_MBS0); 1570 switch (val) { 1571 case 0: 1572 wr_mbs = 128; 1573 break; 1574 case 1: 1575 wr_mbs = 256; 1576 break; 1577 case 2: 1578 wr_mbs = 512; 1579 break; 1580 default: 1581 DP_INFO(p_hwfn, 1582 "Unexpected value of PSWRQ2_REG_WR_MBS0 [0x%x]. Avoid configuring PGLUE_B_REG_CACHE_LINE_SIZE.\n", 1583 val); 1584 return; 1585 } 1586 1587 cache_line_size = OSAL_MIN_T(u32, OSAL_CACHE_LINE_SIZE, wr_mbs); 1588 switch (cache_line_size) { 1589 case 32: 1590 val = 0; 1591 break; 1592 case 64: 1593 val = 1; 1594 break; 1595 case 128: 1596 val = 2; 1597 break; 1598 case 256: 1599 val = 3; 1600 break; 1601 default: 1602 DP_INFO(p_hwfn, 1603 "Unexpected value of cache line size [0x%x]. Avoid configuring PGLUE_B_REG_CACHE_LINE_SIZE.\n", 1604 cache_line_size); 1605 } 1606 1607 if (wr_mbs < OSAL_CACHE_LINE_SIZE) 1608 DP_INFO(p_hwfn, 1609 "The cache line size for padding is suboptimal for performance [OS cache line size 0x%x, wr mbs 0x%x]\n", 1610 OSAL_CACHE_LINE_SIZE, wr_mbs); 1611 1612 STORE_RT_REG(p_hwfn, PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET, val); 1613 if (val > 0) { 1614 STORE_RT_REG(p_hwfn, PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET, val); 1615 STORE_RT_REG(p_hwfn, PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET, val); 1616 } 1617 } 1618 1619 static enum _ecore_status_t ecore_hw_init_common(struct ecore_hwfn *p_hwfn, 1620 struct ecore_ptt *p_ptt, 1621 int hw_mode) 1622 { 1623 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 1624 struct ecore_dev *p_dev = p_hwfn->p_dev; 1625 u8 vf_id, max_num_vfs; 1626 u16 num_pfs, pf_id; 1627 u32 concrete_fid; 1628 enum _ecore_status_t rc = ECORE_SUCCESS; 1629 1630 ecore_init_cau_rt_data(p_dev); 1631 1632 /* Program GTT windows */ 1633 ecore_gtt_init(p_hwfn, p_ptt); 1634 1635 #ifndef ASIC_ONLY 1636 if (CHIP_REV_IS_EMUL(p_dev)) { 1637 rc = ecore_hw_init_chip(p_hwfn, p_ptt); 1638 if (rc != ECORE_SUCCESS) 1639 return rc; 1640 } 1641 #endif 1642 1643 if (p_hwfn->mcp_info) { 1644 if (p_hwfn->mcp_info->func_info.bandwidth_max) 1645 qm_info->pf_rl_en = 1; 1646 if (p_hwfn->mcp_info->func_info.bandwidth_min) 1647 qm_info->pf_wfq_en = 1; 1648 } 1649 1650 ecore_qm_common_rt_init(p_hwfn, 1651 p_dev->num_ports_in_engine, 1652 qm_info->max_phys_tcs_per_port, 1653 qm_info->pf_rl_en, qm_info->pf_wfq_en, 1654 qm_info->vport_rl_en, qm_info->vport_wfq_en, 1655 qm_info->qm_port_params); 1656 1657 ecore_cxt_hw_init_common(p_hwfn); 1658 1659 ecore_init_cache_line_size(p_hwfn, p_ptt); 1660 1661 rc = ecore_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode); 1662 if (rc != ECORE_SUCCESS) 1663 return rc; 1664 1665 /* @@TBD MichalK - should add VALIDATE_VFID to init tool... 1666 * need to decide with which value, maybe runtime 1667 */ 1668 ecore_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0); 1669 ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1); 1670 1671 if (ECORE_IS_BB(p_dev)) { 1672 /* Workaround clears ROCE search for all functions to prevent 1673 * involving non initialized function in processing ROCE packet. 1674 */ 1675 num_pfs = NUM_OF_ENG_PFS(p_dev); 1676 for (pf_id = 0; pf_id < num_pfs; pf_id++) { 1677 ecore_fid_pretend(p_hwfn, p_ptt, pf_id); 1678 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0); 1679 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0); 1680 } 1681 /* pretend to original PF */ 1682 ecore_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id); 1683 } 1684 1685 /* Workaround for avoiding CCFC execution error when getting packets 1686 * with CRC errors, and allowing instead the invoking of the FW error 1687 * handler. 1688 * This is not done inside the init tool since it currently can't 1689 * perform a pretending to VFs. 1690 */ 1691 max_num_vfs = ECORE_IS_AH(p_dev) ? MAX_NUM_VFS_K2 : MAX_NUM_VFS_BB; 1692 for (vf_id = 0; vf_id < max_num_vfs; vf_id++) { 1693 concrete_fid = ecore_vfid_to_concrete(p_hwfn, vf_id); 1694 ecore_fid_pretend(p_hwfn, p_ptt, (u16)concrete_fid); 1695 ecore_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1); 1696 ecore_wr(p_hwfn, p_ptt, CCFC_REG_WEAK_ENABLE_VF, 0x0); 1697 ecore_wr(p_hwfn, p_ptt, TCFC_REG_STRONG_ENABLE_VF, 0x1); 1698 ecore_wr(p_hwfn, p_ptt, TCFC_REG_WEAK_ENABLE_VF, 0x0); 1699 } 1700 /* pretend to original PF */ 1701 ecore_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id); 1702 1703 return rc; 1704 } 1705 1706 #ifndef ASIC_ONLY 1707 #define MISC_REG_RESET_REG_2_XMAC_BIT (1 << 4) 1708 #define MISC_REG_RESET_REG_2_XMAC_SOFT_BIT (1 << 5) 1709 1710 #define PMEG_IF_BYTE_COUNT 8 1711 1712 static void ecore_wr_nw_port(struct ecore_hwfn *p_hwfn, 1713 struct ecore_ptt *p_ptt, 1714 u32 addr, u64 data, u8 reg_type, u8 port) 1715 { 1716 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 1717 "CMD: %08x, ADDR: 0x%08x, DATA: %08x:%08x\n", 1718 ecore_rd(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB) | 1719 (8 << PMEG_IF_BYTE_COUNT), 1720 (reg_type << 25) | (addr << 8) | port, 1721 (u32)((data >> 32) & 0xffffffff), 1722 (u32)(data & 0xffffffff)); 1723 1724 ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB, 1725 (ecore_rd(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB) & 1726 0xffff00fe) | (8 << PMEG_IF_BYTE_COUNT)); 1727 ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_ADDR_BB, 1728 (reg_type << 25) | (addr << 8) | port); 1729 ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_WRDATA_BB, data & 0xffffffff); 1730 ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_WRDATA_BB, 1731 (data >> 32) & 0xffffffff); 1732 } 1733 1734 #define XLPORT_MODE_REG (0x20a) 1735 #define XLPORT_MAC_CONTROL (0x210) 1736 #define XLPORT_FLOW_CONTROL_CONFIG (0x207) 1737 #define XLPORT_ENABLE_REG (0x20b) 1738 1739 #define XLMAC_CTRL (0x600) 1740 #define XLMAC_MODE (0x601) 1741 #define XLMAC_RX_MAX_SIZE (0x608) 1742 #define XLMAC_TX_CTRL (0x604) 1743 #define XLMAC_PAUSE_CTRL (0x60d) 1744 #define XLMAC_PFC_CTRL (0x60e) 1745 1746 static void ecore_emul_link_init_bb(struct ecore_hwfn *p_hwfn, 1747 struct ecore_ptt *p_ptt) 1748 { 1749 u8 loopback = 0, port = p_hwfn->port_id * 2; 1750 1751 DP_INFO(p_hwfn->p_dev, "Configurating Emulation Link %02x\n", port); 1752 1753 /* XLPORT MAC MODE *//* 0 Quad, 4 Single... */ 1754 ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_MODE_REG, (0x4 << 4) | 0x4, 1, 1755 port); 1756 ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_MAC_CONTROL, 0, 1, port); 1757 /* XLMAC: SOFT RESET */ 1758 ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_CTRL, 0x40, 0, port); 1759 /* XLMAC: Port Speed >= 10Gbps */ 1760 ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_MODE, 0x40, 0, port); 1761 /* XLMAC: Max Size */ 1762 ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_RX_MAX_SIZE, 0x3fff, 0, port); 1763 ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_TX_CTRL, 1764 0x01000000800ULL | (0xa << 12) | ((u64)1 << 38), 1765 0, port); 1766 ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_PAUSE_CTRL, 0x7c000, 0, port); 1767 ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_PFC_CTRL, 1768 0x30ffffc000ULL, 0, port); 1769 ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_CTRL, 0x3 | (loopback << 2), 0, 1770 port); /* XLMAC: TX_EN, RX_EN */ 1771 /* XLMAC: TX_EN, RX_EN, SW_LINK_STATUS */ 1772 ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_CTRL, 1773 0x1003 | (loopback << 2), 0, port); 1774 /* Enabled Parallel PFC interface */ 1775 ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_FLOW_CONTROL_CONFIG, 1, 0, port); 1776 1777 /* XLPORT port enable */ 1778 ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_ENABLE_REG, 0xf, 1, port); 1779 } 1780 1781 static void ecore_emul_link_init_ah_e5(struct ecore_hwfn *p_hwfn, 1782 struct ecore_ptt *p_ptt) 1783 { 1784 u8 port = p_hwfn->port_id; 1785 u32 mac_base = NWM_REG_MAC0_K2_E5 + (port << 2) * NWM_REG_MAC0_SIZE; 1786 1787 DP_INFO(p_hwfn->p_dev, "Configurating Emulation Link %02x\n", port); 1788 1789 ecore_wr(p_hwfn, p_ptt, CNIG_REG_NIG_PORT0_CONF_K2_E5 + (port << 2), 1790 (1 << CNIG_REG_NIG_PORT0_CONF_NIG_PORT_ENABLE_0_K2_E5_SHIFT) | 1791 (port << 1792 CNIG_REG_NIG_PORT0_CONF_NIG_PORT_NWM_PORT_MAP_0_K2_E5_SHIFT) | 1793 (0 << CNIG_REG_NIG_PORT0_CONF_NIG_PORT_RATE_0_K2_E5_SHIFT)); 1794 1795 ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_XIF_MODE_K2_E5, 1796 1 << ETH_MAC_REG_XIF_MODE_XGMII_K2_E5_SHIFT); 1797 1798 ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_FRM_LENGTH_K2_E5, 1799 9018 << ETH_MAC_REG_FRM_LENGTH_FRM_LENGTH_K2_E5_SHIFT); 1800 1801 ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_TX_IPG_LENGTH_K2_E5, 1802 0xc << ETH_MAC_REG_TX_IPG_LENGTH_TXIPG_K2_E5_SHIFT); 1803 1804 ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_RX_FIFO_SECTIONS_K2_E5, 1805 8 << ETH_MAC_REG_RX_FIFO_SECTIONS_RX_SECTION_FULL_K2_E5_SHIFT); 1806 1807 ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_TX_FIFO_SECTIONS_K2_E5, 1808 (0xA << 1809 ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_EMPTY_K2_E5_SHIFT) | 1810 (8 << 1811 ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_FULL_K2_E5_SHIFT)); 1812 1813 ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_COMMAND_CONFIG_K2_E5, 1814 0xa853); 1815 } 1816 1817 static void ecore_emul_link_init(struct ecore_hwfn *p_hwfn, 1818 struct ecore_ptt *p_ptt) 1819 { 1820 if (ECORE_IS_AH(p_hwfn->p_dev)) 1821 ecore_emul_link_init_ah_e5(p_hwfn, p_ptt); 1822 else /* BB */ 1823 ecore_emul_link_init_bb(p_hwfn, p_ptt); 1824 } 1825 1826 static void ecore_link_init_bb(struct ecore_hwfn *p_hwfn, 1827 struct ecore_ptt *p_ptt, u8 port) 1828 { 1829 int port_offset = port ? 0x800 : 0; 1830 u32 xmac_rxctrl = 0; 1831 1832 /* Reset of XMAC */ 1833 /* FIXME: move to common start */ 1834 ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + 2 * sizeof(u32), 1835 MISC_REG_RESET_REG_2_XMAC_BIT); /* Clear */ 1836 OSAL_MSLEEP(1); 1837 ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + sizeof(u32), 1838 MISC_REG_RESET_REG_2_XMAC_BIT); /* Set */ 1839 1840 ecore_wr(p_hwfn, p_ptt, MISC_REG_XMAC_CORE_PORT_MODE_BB, 1); 1841 1842 /* Set the number of ports on the Warp Core to 10G */ 1843 ecore_wr(p_hwfn, p_ptt, MISC_REG_XMAC_PHY_PORT_MODE_BB, 3); 1844 1845 /* Soft reset of XMAC */ 1846 ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + 2 * sizeof(u32), 1847 MISC_REG_RESET_REG_2_XMAC_SOFT_BIT); 1848 OSAL_MSLEEP(1); 1849 ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + sizeof(u32), 1850 MISC_REG_RESET_REG_2_XMAC_SOFT_BIT); 1851 1852 /* FIXME: move to common end */ 1853 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) 1854 ecore_wr(p_hwfn, p_ptt, XMAC_REG_MODE_BB + port_offset, 0x20); 1855 1856 /* Set Max packet size: initialize XMAC block register for port 0 */ 1857 ecore_wr(p_hwfn, p_ptt, XMAC_REG_RX_MAX_SIZE_BB + port_offset, 0x2710); 1858 1859 /* CRC append for Tx packets: init XMAC block register for port 1 */ 1860 ecore_wr(p_hwfn, p_ptt, XMAC_REG_TX_CTRL_LO_BB + port_offset, 0xC800); 1861 1862 /* Enable TX and RX: initialize XMAC block register for port 1 */ 1863 ecore_wr(p_hwfn, p_ptt, XMAC_REG_CTRL_BB + port_offset, 1864 XMAC_REG_CTRL_TX_EN_BB | XMAC_REG_CTRL_RX_EN_BB); 1865 xmac_rxctrl = ecore_rd(p_hwfn, p_ptt, 1866 XMAC_REG_RX_CTRL_BB + port_offset); 1867 xmac_rxctrl |= XMAC_REG_RX_CTRL_PROCESS_VARIABLE_PREAMBLE_BB; 1868 ecore_wr(p_hwfn, p_ptt, XMAC_REG_RX_CTRL_BB + port_offset, xmac_rxctrl); 1869 } 1870 #endif 1871 1872 static enum _ecore_status_t 1873 ecore_hw_init_dpi_size(struct ecore_hwfn *p_hwfn, 1874 struct ecore_ptt *p_ptt, u32 pwm_region_size, u32 n_cpus) 1875 { 1876 u32 dpi_bit_shift, dpi_count, dpi_page_size; 1877 u32 min_dpis; 1878 u32 n_wids; 1879 1880 /* Calculate DPI size 1881 * ------------------ 1882 * The PWM region contains Doorbell Pages. The first is reserverd for 1883 * the kernel for, e.g, L2. The others are free to be used by non- 1884 * trusted applications, typically from user space. Each page, called a 1885 * doorbell page is sectioned into windows that allow doorbells to be 1886 * issued in parallel by the kernel/application. The size of such a 1887 * window (a.k.a. WID) is 1kB. 1888 * Summary: 1889 * 1kB WID x N WIDS = DPI page size 1890 * DPI page size x N DPIs = PWM region size 1891 * Notes: 1892 * The size of the DPI page size must be in multiples of OSAL_PAGE_SIZE 1893 * in order to ensure that two applications won't share the same page. 1894 * It also must contain at least one WID per CPU to allow parallelism. 1895 * It also must be a power of 2, since it is stored as a bit shift. 1896 * 1897 * The DPI page size is stored in a register as 'dpi_bit_shift' so that 1898 * 0 is 4kB, 1 is 8kB and etc. Hence the minimum size is 4,096 1899 * containing 4 WIDs. 1900 */ 1901 n_wids = OSAL_MAX_T(u32, ECORE_MIN_WIDS, n_cpus); 1902 dpi_page_size = ECORE_WID_SIZE * OSAL_ROUNDUP_POW_OF_TWO(n_wids); 1903 dpi_page_size = (dpi_page_size + OSAL_PAGE_SIZE - 1) & 1904 ~(OSAL_PAGE_SIZE - 1); 1905 dpi_bit_shift = OSAL_LOG2(dpi_page_size / 4096); 1906 dpi_count = pwm_region_size / dpi_page_size; 1907 1908 min_dpis = p_hwfn->pf_params.rdma_pf_params.min_dpis; 1909 min_dpis = OSAL_MAX_T(u32, ECORE_MIN_DPIS, min_dpis); 1910 1911 /* Update hwfn */ 1912 p_hwfn->dpi_size = dpi_page_size; 1913 p_hwfn->dpi_count = dpi_count; 1914 1915 /* Update registers */ 1916 ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPI_BIT_SHIFT, dpi_bit_shift); 1917 1918 if (dpi_count < min_dpis) 1919 return ECORE_NORESOURCES; 1920 1921 return ECORE_SUCCESS; 1922 } 1923 1924 enum ECORE_ROCE_EDPM_MODE { 1925 ECORE_ROCE_EDPM_MODE_ENABLE = 0, 1926 ECORE_ROCE_EDPM_MODE_FORCE_ON = 1, 1927 ECORE_ROCE_EDPM_MODE_DISABLE = 2, 1928 }; 1929 1930 static enum _ecore_status_t 1931 ecore_hw_init_pf_doorbell_bar(struct ecore_hwfn *p_hwfn, 1932 struct ecore_ptt *p_ptt) 1933 { 1934 u32 pwm_regsize, norm_regsize; 1935 u32 non_pwm_conn, min_addr_reg1; 1936 u32 db_bar_size, n_cpus; 1937 u32 roce_edpm_mode; 1938 u32 pf_dems_shift; 1939 enum _ecore_status_t rc = ECORE_SUCCESS; 1940 u8 cond; 1941 1942 db_bar_size = ecore_hw_bar_size(p_hwfn, p_ptt, BAR_ID_1); 1943 if (ECORE_IS_CMT(p_hwfn->p_dev)) 1944 db_bar_size /= 2; 1945 1946 /* Calculate doorbell regions 1947 * ----------------------------------- 1948 * The doorbell BAR is made of two regions. The first is called normal 1949 * region and the second is called PWM region. In the normal region 1950 * each ICID has its own set of addresses so that writing to that 1951 * specific address identifies the ICID. In the Process Window Mode 1952 * region the ICID is given in the data written to the doorbell. The 1953 * above per PF register denotes the offset in the doorbell BAR in which 1954 * the PWM region begins. 1955 * The normal region has ECORE_PF_DEMS_SIZE bytes per ICID, that is per 1956 * non-PWM connection. The calculation below computes the total non-PWM 1957 * connections. The DORQ_REG_PF_MIN_ADDR_REG1 register is 1958 * in units of 4,096 bytes. 1959 */ 1960 non_pwm_conn = ecore_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_CORE) + 1961 ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_CORE, 1962 OSAL_NULL) + 1963 ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, OSAL_NULL); 1964 norm_regsize = ROUNDUP(ECORE_PF_DEMS_SIZE * non_pwm_conn, 1965 OSAL_PAGE_SIZE); 1966 min_addr_reg1 = norm_regsize / 4096; 1967 pwm_regsize = db_bar_size - norm_regsize; 1968 1969 /* Check that the normal and PWM sizes are valid */ 1970 if (db_bar_size < norm_regsize) { 1971 DP_ERR(p_hwfn->p_dev, 1972 "Doorbell BAR size 0x%x is too small (normal region is 0x%0x )\n", 1973 db_bar_size, norm_regsize); 1974 return ECORE_NORESOURCES; 1975 } 1976 if (pwm_regsize < ECORE_MIN_PWM_REGION) { 1977 DP_ERR(p_hwfn->p_dev, 1978 "PWM region size 0x%0x is too small. Should be at least 0x%0x (Doorbell BAR size is 0x%x and normal region size is 0x%0x)\n", 1979 pwm_regsize, ECORE_MIN_PWM_REGION, db_bar_size, 1980 norm_regsize); 1981 return ECORE_NORESOURCES; 1982 } 1983 1984 /* Calculate number of DPIs */ 1985 roce_edpm_mode = p_hwfn->pf_params.rdma_pf_params.roce_edpm_mode; 1986 if ((roce_edpm_mode == ECORE_ROCE_EDPM_MODE_ENABLE) || 1987 ((roce_edpm_mode == ECORE_ROCE_EDPM_MODE_FORCE_ON))) { 1988 /* Either EDPM is mandatory, or we are attempting to allocate a 1989 * WID per CPU. 1990 */ 1991 n_cpus = OSAL_NUM_CPUS(); 1992 rc = ecore_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus); 1993 } 1994 1995 cond = ((rc != ECORE_SUCCESS) && 1996 (roce_edpm_mode == ECORE_ROCE_EDPM_MODE_ENABLE)) || 1997 (roce_edpm_mode == ECORE_ROCE_EDPM_MODE_DISABLE); 1998 if (cond || p_hwfn->dcbx_no_edpm) { 1999 /* Either EDPM is disabled from user configuration, or it is 2000 * disabled via DCBx, or it is not mandatory and we failed to 2001 * allocated a WID per CPU. 2002 */ 2003 n_cpus = 1; 2004 rc = ecore_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus); 2005 2006 /* If we entered this flow due to DCBX then the DPM register is 2007 * already configured. 2008 */ 2009 } 2010 2011 DP_INFO(p_hwfn, 2012 "doorbell bar: normal_region_size=%d, pwm_region_size=%d", 2013 norm_regsize, pwm_regsize); 2014 DP_INFO(p_hwfn, 2015 " dpi_size=%d, dpi_count=%d, roce_edpm=%s\n", 2016 p_hwfn->dpi_size, p_hwfn->dpi_count, 2017 ((p_hwfn->dcbx_no_edpm) || (p_hwfn->db_bar_no_edpm)) ? 2018 "disabled" : "enabled"); 2019 2020 /* Check return codes from above calls */ 2021 if (rc != ECORE_SUCCESS) { 2022 DP_ERR(p_hwfn, 2023 "Failed to allocate enough DPIs\n"); 2024 return ECORE_NORESOURCES; 2025 } 2026 2027 /* Update hwfn */ 2028 p_hwfn->dpi_start_offset = norm_regsize; 2029 2030 /* Update registers */ 2031 /* DEMS size is configured log2 of DWORDs, hence the division by 4 */ 2032 pf_dems_shift = OSAL_LOG2(ECORE_PF_DEMS_SIZE / 4); 2033 ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_ICID_BIT_SHIFT_NORM, pf_dems_shift); 2034 ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_MIN_ADDR_REG1, min_addr_reg1); 2035 2036 return ECORE_SUCCESS; 2037 } 2038 2039 static enum _ecore_status_t ecore_hw_init_port(struct ecore_hwfn *p_hwfn, 2040 struct ecore_ptt *p_ptt, 2041 int hw_mode) 2042 { 2043 u32 ppf_to_eng_sel[NIG_REG_PPF_TO_ENGINE_SEL_RT_SIZE]; 2044 u32 val; 2045 enum _ecore_status_t rc = ECORE_SUCCESS; 2046 u8 i; 2047 2048 /* In CMT for non-RoCE packets - use connection based classification */ 2049 val = ECORE_IS_CMT(p_hwfn->p_dev) ? 0x8 : 0x0; 2050 for (i = 0; i < NIG_REG_PPF_TO_ENGINE_SEL_RT_SIZE; i++) 2051 ppf_to_eng_sel[i] = val; 2052 STORE_RT_REG_AGG(p_hwfn, NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET, 2053 ppf_to_eng_sel); 2054 2055 /* In CMT the gate should be cleared by the 2nd hwfn */ 2056 if (!ECORE_IS_CMT(p_hwfn->p_dev) || !IS_LEAD_HWFN(p_hwfn)) 2057 STORE_RT_REG(p_hwfn, NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET, 0); 2058 2059 rc = ecore_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id, 2060 hw_mode); 2061 if (rc != ECORE_SUCCESS) 2062 return rc; 2063 2064 ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_WRITE_PAD_ENABLE, 0); 2065 2066 #ifndef ASIC_ONLY 2067 if (CHIP_REV_IS_ASIC(p_hwfn->p_dev)) 2068 return ECORE_SUCCESS; 2069 2070 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) { 2071 if (ECORE_IS_AH(p_hwfn->p_dev)) 2072 return ECORE_SUCCESS; 2073 else if (ECORE_IS_BB(p_hwfn->p_dev)) 2074 ecore_link_init_bb(p_hwfn, p_ptt, p_hwfn->port_id); 2075 } else if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) { 2076 if (ECORE_IS_CMT(p_hwfn->p_dev)) { 2077 /* Activate OPTE in CMT */ 2078 u32 val; 2079 2080 val = ecore_rd(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV); 2081 val |= 0x10; 2082 ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV, val); 2083 ecore_wr(p_hwfn, p_ptt, MISC_REG_CLK_100G_MODE, 1); 2084 ecore_wr(p_hwfn, p_ptt, MISCS_REG_CLK_100G_MODE, 1); 2085 ecore_wr(p_hwfn, p_ptt, MISC_REG_OPTE_MODE, 1); 2086 ecore_wr(p_hwfn, p_ptt, 2087 NIG_REG_LLH_ENG_CLS_TCP_4_TUPLE_SEARCH, 1); 2088 ecore_wr(p_hwfn, p_ptt, 2089 NIG_REG_LLH_ENG_CLS_ENG_ID_TBL, 0x55555555); 2090 ecore_wr(p_hwfn, p_ptt, 2091 NIG_REG_LLH_ENG_CLS_ENG_ID_TBL + 0x4, 2092 0x55555555); 2093 } 2094 2095 ecore_emul_link_init(p_hwfn, p_ptt); 2096 } else { 2097 DP_INFO(p_hwfn->p_dev, "link is not being configured\n"); 2098 } 2099 #endif 2100 2101 return rc; 2102 } 2103 2104 static enum _ecore_status_t 2105 ecore_hw_init_pf(struct ecore_hwfn *p_hwfn, 2106 struct ecore_ptt *p_ptt, 2107 struct ecore_tunnel_info *p_tunn, 2108 int hw_mode, 2109 bool b_hw_start, 2110 enum ecore_int_mode int_mode, bool allow_npar_tx_switch) 2111 { 2112 u8 rel_pf_id = p_hwfn->rel_pf_id; 2113 u32 prs_reg; 2114 enum _ecore_status_t rc = ECORE_SUCCESS; 2115 u16 ctrl; 2116 int pos; 2117 2118 if (p_hwfn->mcp_info) { 2119 struct ecore_mcp_function_info *p_info; 2120 2121 p_info = &p_hwfn->mcp_info->func_info; 2122 if (p_info->bandwidth_min) 2123 p_hwfn->qm_info.pf_wfq = p_info->bandwidth_min; 2124 2125 /* Update rate limit once we'll actually have a link */ 2126 p_hwfn->qm_info.pf_rl = 100000; 2127 } 2128 ecore_cxt_hw_init_pf(p_hwfn, p_ptt); 2129 2130 ecore_int_igu_init_rt(p_hwfn); 2131 2132 /* Set VLAN in NIG if needed */ 2133 if (hw_mode & (1 << MODE_MF_SD)) { 2134 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "Configuring LLH_FUNC_TAG\n"); 2135 STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1); 2136 STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET, 2137 p_hwfn->hw_info.ovlan); 2138 2139 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 2140 "Configuring LLH_FUNC_FILTER_HDR_SEL\n"); 2141 STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET, 2142 1); 2143 } 2144 2145 /* Enable classification by MAC if needed */ 2146 if (hw_mode & (1 << MODE_MF_SI)) { 2147 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 2148 "Configuring TAGMAC_CLS_TYPE\n"); 2149 STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET, 2150 1); 2151 } 2152 2153 /* Protocl Configuration - @@@TBD - should we set 0 otherwise? */ 2154 STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET, 2155 (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) ? 1 : 0); 2156 STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET, 2157 (p_hwfn->hw_info.personality == ECORE_PCI_FCOE) ? 1 : 0); 2158 STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0); 2159 2160 /* perform debug configuration when chip is out of reset */ 2161 OSAL_BEFORE_PF_START((void *)p_hwfn->p_dev, p_hwfn->my_id); 2162 2163 /* PF Init sequence */ 2164 rc = ecore_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode); 2165 if (rc) 2166 return rc; 2167 2168 /* QM_PF Init sequence (may be invoked separately e.g. for DCB) */ 2169 rc = ecore_init_run(p_hwfn, p_ptt, PHASE_QM_PF, rel_pf_id, hw_mode); 2170 if (rc) 2171 return rc; 2172 2173 /* Pure runtime initializations - directly to the HW */ 2174 ecore_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true); 2175 2176 /* PCI relaxed ordering causes a decrease in the performance on some 2177 * systems. Till a root cause is found, disable this attribute in the 2178 * PCI config space. 2179 */ 2180 /* Not in use @DPDK 2181 * pos = OSAL_PCI_FIND_CAPABILITY(p_hwfn->p_dev, PCI_CAP_ID_EXP); 2182 * if (!pos) { 2183 * DP_NOTICE(p_hwfn, true, 2184 * "Failed to find the PCIe Cap\n"); 2185 * return ECORE_IO; 2186 * } 2187 * OSAL_PCI_READ_CONFIG_WORD(p_hwfn->p_dev, pos + PCI_EXP_DEVCTL, &ctrl); 2188 * ctrl &= ~PCI_EXP_DEVCTL_RELAX_EN; 2189 * OSAL_PCI_WRITE_CONFIG_WORD(p_hwfn->p_dev, pos + PCI_EXP_DEVCTL, ctrl); 2190 */ 2191 2192 rc = ecore_hw_init_pf_doorbell_bar(p_hwfn, p_ptt); 2193 if (rc) 2194 return rc; 2195 if (b_hw_start) { 2196 /* enable interrupts */ 2197 rc = ecore_int_igu_enable(p_hwfn, p_ptt, int_mode); 2198 if (rc != ECORE_SUCCESS) 2199 return rc; 2200 2201 /* send function start command */ 2202 rc = ecore_sp_pf_start(p_hwfn, p_ptt, p_tunn, 2203 allow_npar_tx_switch); 2204 if (rc) { 2205 DP_NOTICE(p_hwfn, true, 2206 "Function start ramrod failed\n"); 2207 } else { 2208 prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1); 2209 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, 2210 "PRS_REG_SEARCH_TAG1: %x\n", prs_reg); 2211 2212 if (p_hwfn->hw_info.personality == ECORE_PCI_FCOE) { 2213 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1, 2214 (1 << 2)); 2215 ecore_wr(p_hwfn, p_ptt, 2216 PRS_REG_PKT_LEN_STAT_TAGS_NOT_COUNTED_FIRST, 2217 0x100); 2218 } 2219 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, 2220 "PRS_REG_SEARCH registers after start PFn\n"); 2221 prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP); 2222 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, 2223 "PRS_REG_SEARCH_TCP: %x\n", prs_reg); 2224 prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP); 2225 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, 2226 "PRS_REG_SEARCH_UDP: %x\n", prs_reg); 2227 prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE); 2228 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, 2229 "PRS_REG_SEARCH_FCOE: %x\n", prs_reg); 2230 prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE); 2231 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, 2232 "PRS_REG_SEARCH_ROCE: %x\n", prs_reg); 2233 prs_reg = ecore_rd(p_hwfn, p_ptt, 2234 PRS_REG_SEARCH_TCP_FIRST_FRAG); 2235 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, 2236 "PRS_REG_SEARCH_TCP_FIRST_FRAG: %x\n", 2237 prs_reg); 2238 prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1); 2239 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, 2240 "PRS_REG_SEARCH_TAG1: %x\n", prs_reg); 2241 } 2242 } 2243 return rc; 2244 } 2245 2246 enum _ecore_status_t ecore_pglueb_set_pfid_enable(struct ecore_hwfn *p_hwfn, 2247 struct ecore_ptt *p_ptt, 2248 bool b_enable) 2249 { 2250 u32 delay_idx = 0, val, set_val = b_enable ? 1 : 0; 2251 2252 /* Configure the PF's internal FID_enable for master transactions */ 2253 ecore_wr(p_hwfn, p_ptt, 2254 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, set_val); 2255 2256 /* Wait until value is set - try for 1 second every 50us */ 2257 for (delay_idx = 0; delay_idx < 20000; delay_idx++) { 2258 val = ecore_rd(p_hwfn, p_ptt, 2259 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER); 2260 if (val == set_val) 2261 break; 2262 2263 OSAL_UDELAY(50); 2264 } 2265 2266 if (val != set_val) { 2267 DP_NOTICE(p_hwfn, true, 2268 "PFID_ENABLE_MASTER wasn't changed after a second\n"); 2269 return ECORE_UNKNOWN_ERROR; 2270 } 2271 2272 return ECORE_SUCCESS; 2273 } 2274 2275 static void ecore_reset_mb_shadow(struct ecore_hwfn *p_hwfn, 2276 struct ecore_ptt *p_main_ptt) 2277 { 2278 /* Read shadow of current MFW mailbox */ 2279 ecore_mcp_read_mb(p_hwfn, p_main_ptt); 2280 OSAL_MEMCPY(p_hwfn->mcp_info->mfw_mb_shadow, 2281 p_hwfn->mcp_info->mfw_mb_cur, 2282 p_hwfn->mcp_info->mfw_mb_length); 2283 } 2284 2285 static void ecore_pglueb_clear_err(struct ecore_hwfn *p_hwfn, 2286 struct ecore_ptt *p_ptt) 2287 { 2288 ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR, 2289 1 << p_hwfn->abs_pf_id); 2290 } 2291 2292 static void 2293 ecore_fill_load_req_params(struct ecore_load_req_params *p_load_req, 2294 struct ecore_drv_load_params *p_drv_load) 2295 { 2296 /* Make sure that if ecore-client didn't provide inputs, all the 2297 * expected defaults are indeed zero. 2298 */ 2299 OSAL_BUILD_BUG_ON(ECORE_DRV_ROLE_OS != 0); 2300 OSAL_BUILD_BUG_ON(ECORE_LOAD_REQ_LOCK_TO_DEFAULT != 0); 2301 OSAL_BUILD_BUG_ON(ECORE_OVERRIDE_FORCE_LOAD_NONE != 0); 2302 2303 OSAL_MEM_ZERO(p_load_req, sizeof(*p_load_req)); 2304 2305 if (p_drv_load != OSAL_NULL) { 2306 p_load_req->drv_role = p_drv_load->is_crash_kernel ? 2307 ECORE_DRV_ROLE_KDUMP : 2308 ECORE_DRV_ROLE_OS; 2309 p_load_req->timeout_val = p_drv_load->mfw_timeout_val; 2310 p_load_req->avoid_eng_reset = p_drv_load->avoid_eng_reset; 2311 p_load_req->override_force_load = 2312 p_drv_load->override_force_load; 2313 } 2314 } 2315 2316 enum _ecore_status_t ecore_vf_start(struct ecore_hwfn *p_hwfn, 2317 struct ecore_hw_init_params *p_params) 2318 { 2319 if (p_params->p_tunn) { 2320 ecore_vf_set_vf_start_tunn_update_param(p_params->p_tunn); 2321 ecore_vf_pf_tunnel_param_update(p_hwfn, p_params->p_tunn); 2322 } 2323 2324 p_hwfn->b_int_enabled = 1; 2325 2326 return ECORE_SUCCESS; 2327 } 2328 2329 enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev, 2330 struct ecore_hw_init_params *p_params) 2331 { 2332 struct ecore_load_req_params load_req_params; 2333 u32 load_code, resp, param, drv_mb_param; 2334 bool b_default_mtu = true; 2335 struct ecore_hwfn *p_hwfn; 2336 enum _ecore_status_t rc = ECORE_SUCCESS; 2337 int i; 2338 2339 if ((p_params->int_mode == ECORE_INT_MODE_MSI) && ECORE_IS_CMT(p_dev)) { 2340 DP_NOTICE(p_dev, false, 2341 "MSI mode is not supported for CMT devices\n"); 2342 return ECORE_INVAL; 2343 } 2344 2345 if (IS_PF(p_dev)) { 2346 rc = ecore_init_fw_data(p_dev, p_params->bin_fw_data); 2347 if (rc != ECORE_SUCCESS) 2348 return rc; 2349 } 2350 2351 for_each_hwfn(p_dev, i) { 2352 p_hwfn = &p_dev->hwfns[i]; 2353 2354 /* If management didn't provide a default, set one of our own */ 2355 if (!p_hwfn->hw_info.mtu) { 2356 p_hwfn->hw_info.mtu = 1500; 2357 b_default_mtu = false; 2358 } 2359 2360 if (IS_VF(p_dev)) { 2361 ecore_vf_start(p_hwfn, p_params); 2362 continue; 2363 } 2364 2365 rc = ecore_calc_hw_mode(p_hwfn); 2366 if (rc != ECORE_SUCCESS) 2367 return rc; 2368 2369 ecore_fill_load_req_params(&load_req_params, 2370 p_params->p_drv_load_params); 2371 rc = ecore_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt, 2372 &load_req_params); 2373 if (rc != ECORE_SUCCESS) { 2374 DP_NOTICE(p_hwfn, true, 2375 "Failed sending a LOAD_REQ command\n"); 2376 return rc; 2377 } 2378 2379 load_code = load_req_params.load_code; 2380 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 2381 "Load request was sent. Load code: 0x%x\n", 2382 load_code); 2383 2384 ecore_mcp_set_capabilities(p_hwfn, p_hwfn->p_main_ptt); 2385 2386 /* CQ75580: 2387 * When coming back from hiberbate state, the registers from 2388 * which shadow is read initially are not initialized. It turns 2389 * out that these registers get initialized during the call to 2390 * ecore_mcp_load_req request. So we need to reread them here 2391 * to get the proper shadow register value. 2392 * Note: This is a workaround for the missing MFW 2393 * initialization. It may be removed once the implementation 2394 * is done. 2395 */ 2396 ecore_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt); 2397 2398 /* Only relevant for recovery: 2399 * Clear the indication after the LOAD_REQ command is responded 2400 * by the MFW. 2401 */ 2402 p_dev->recov_in_prog = false; 2403 2404 p_hwfn->first_on_engine = (load_code == 2405 FW_MSG_CODE_DRV_LOAD_ENGINE); 2406 2407 if (!qm_lock_init) { 2408 OSAL_SPIN_LOCK_INIT(&qm_lock); 2409 qm_lock_init = true; 2410 } 2411 2412 /* Clean up chip from previous driver if such remains exist. 2413 * This is not needed when the PF is the first one on the 2414 * engine, since afterwards we are going to init the FW. 2415 */ 2416 if (load_code != FW_MSG_CODE_DRV_LOAD_ENGINE) { 2417 rc = ecore_final_cleanup(p_hwfn, p_hwfn->p_main_ptt, 2418 p_hwfn->rel_pf_id, false); 2419 if (rc != ECORE_SUCCESS) { 2420 ecore_hw_err_notify(p_hwfn, 2421 ECORE_HW_ERR_RAMROD_FAIL); 2422 goto load_err; 2423 } 2424 } 2425 2426 /* Log and clean previous pglue_b errors if such exist */ 2427 ecore_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_main_ptt); 2428 ecore_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt); 2429 2430 /* Enable the PF's internal FID_enable in the PXP */ 2431 rc = ecore_pglueb_set_pfid_enable(p_hwfn, p_hwfn->p_main_ptt, 2432 true); 2433 if (rc != ECORE_SUCCESS) 2434 goto load_err; 2435 2436 switch (load_code) { 2437 case FW_MSG_CODE_DRV_LOAD_ENGINE: 2438 rc = ecore_hw_init_common(p_hwfn, p_hwfn->p_main_ptt, 2439 p_hwfn->hw_info.hw_mode); 2440 if (rc != ECORE_SUCCESS) 2441 break; 2442 /* Fall into */ 2443 case FW_MSG_CODE_DRV_LOAD_PORT: 2444 rc = ecore_hw_init_port(p_hwfn, p_hwfn->p_main_ptt, 2445 p_hwfn->hw_info.hw_mode); 2446 if (rc != ECORE_SUCCESS) 2447 break; 2448 /* Fall into */ 2449 case FW_MSG_CODE_DRV_LOAD_FUNCTION: 2450 rc = ecore_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt, 2451 p_params->p_tunn, 2452 p_hwfn->hw_info.hw_mode, 2453 p_params->b_hw_start, 2454 p_params->int_mode, 2455 p_params->allow_npar_tx_switch); 2456 break; 2457 default: 2458 DP_NOTICE(p_hwfn, false, 2459 "Unexpected load code [0x%08x]", load_code); 2460 rc = ECORE_NOTIMPL; 2461 break; 2462 } 2463 2464 if (rc != ECORE_SUCCESS) { 2465 DP_NOTICE(p_hwfn, true, 2466 "init phase failed for loadcode 0x%x (rc %d)\n", 2467 load_code, rc); 2468 goto load_err; 2469 } 2470 2471 rc = ecore_mcp_load_done(p_hwfn, p_hwfn->p_main_ptt); 2472 if (rc != ECORE_SUCCESS) 2473 return rc; 2474 2475 /* send DCBX attention request command */ 2476 DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, 2477 "sending phony dcbx set command to trigger DCBx attention handling\n"); 2478 rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, 2479 DRV_MSG_CODE_SET_DCBX, 2480 1 << DRV_MB_PARAM_DCBX_NOTIFY_OFFSET, &resp, 2481 ¶m); 2482 if (rc != ECORE_SUCCESS) { 2483 DP_NOTICE(p_hwfn, true, 2484 "Failed to send DCBX attention request\n"); 2485 return rc; 2486 } 2487 2488 p_hwfn->hw_init_done = true; 2489 } 2490 2491 if (IS_PF(p_dev)) { 2492 p_hwfn = ECORE_LEADING_HWFN(p_dev); 2493 drv_mb_param = STORM_FW_VERSION; 2494 rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, 2495 DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER, 2496 drv_mb_param, &resp, ¶m); 2497 if (rc != ECORE_SUCCESS) 2498 DP_INFO(p_hwfn, "Failed to update firmware version\n"); 2499 2500 if (!b_default_mtu) 2501 rc = ecore_mcp_ov_update_mtu(p_hwfn, p_hwfn->p_main_ptt, 2502 p_hwfn->hw_info.mtu); 2503 if (rc != ECORE_SUCCESS) 2504 DP_INFO(p_hwfn, "Failed to update default mtu\n"); 2505 2506 rc = ecore_mcp_ov_update_driver_state(p_hwfn, 2507 p_hwfn->p_main_ptt, 2508 ECORE_OV_DRIVER_STATE_DISABLED); 2509 if (rc != ECORE_SUCCESS) 2510 DP_INFO(p_hwfn, "Failed to update driver state\n"); 2511 } 2512 2513 return rc; 2514 2515 load_err: 2516 /* The MFW load lock should be released regardless of success or failure 2517 * of initialization. 2518 * TODO: replace this with an attempt to send cancel_load. 2519 */ 2520 ecore_mcp_load_done(p_hwfn, p_hwfn->p_main_ptt); 2521 return rc; 2522 } 2523 2524 #define ECORE_HW_STOP_RETRY_LIMIT (10) 2525 static void ecore_hw_timers_stop(struct ecore_dev *p_dev, 2526 struct ecore_hwfn *p_hwfn, 2527 struct ecore_ptt *p_ptt) 2528 { 2529 int i; 2530 2531 /* close timers */ 2532 ecore_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0); 2533 ecore_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0); 2534 for (i = 0; i < ECORE_HW_STOP_RETRY_LIMIT && !p_dev->recov_in_prog; 2535 i++) { 2536 if ((!ecore_rd(p_hwfn, p_ptt, 2537 TM_REG_PF_SCAN_ACTIVE_CONN)) && 2538 (!ecore_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK))) 2539 break; 2540 2541 /* Dependent on number of connection/tasks, possibly 2542 * 1ms sleep is required between polls 2543 */ 2544 OSAL_MSLEEP(1); 2545 } 2546 2547 if (i < ECORE_HW_STOP_RETRY_LIMIT) 2548 return; 2549 2550 DP_NOTICE(p_hwfn, true, "Timers linear scans are not over" 2551 " [Connection %02x Tasks %02x]\n", 2552 (u8)ecore_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_CONN), 2553 (u8)ecore_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK)); 2554 } 2555 2556 void ecore_hw_timers_stop_all(struct ecore_dev *p_dev) 2557 { 2558 int j; 2559 2560 for_each_hwfn(p_dev, j) { 2561 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j]; 2562 struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt; 2563 2564 ecore_hw_timers_stop(p_dev, p_hwfn, p_ptt); 2565 } 2566 } 2567 2568 static enum _ecore_status_t ecore_verify_reg_val(struct ecore_hwfn *p_hwfn, 2569 struct ecore_ptt *p_ptt, 2570 u32 addr, u32 expected_val) 2571 { 2572 u32 val = ecore_rd(p_hwfn, p_ptt, addr); 2573 2574 if (val != expected_val) { 2575 DP_NOTICE(p_hwfn, true, 2576 "Value at address 0x%08x is 0x%08x while the expected value is 0x%08x\n", 2577 addr, val, expected_val); 2578 return ECORE_UNKNOWN_ERROR; 2579 } 2580 2581 return ECORE_SUCCESS; 2582 } 2583 2584 enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev) 2585 { 2586 struct ecore_hwfn *p_hwfn; 2587 struct ecore_ptt *p_ptt; 2588 enum _ecore_status_t rc, rc2 = ECORE_SUCCESS; 2589 int j; 2590 2591 for_each_hwfn(p_dev, j) { 2592 p_hwfn = &p_dev->hwfns[j]; 2593 p_ptt = p_hwfn->p_main_ptt; 2594 2595 DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN, "Stopping hw/fw\n"); 2596 2597 if (IS_VF(p_dev)) { 2598 ecore_vf_pf_int_cleanup(p_hwfn); 2599 rc = ecore_vf_pf_reset(p_hwfn); 2600 if (rc != ECORE_SUCCESS) { 2601 DP_NOTICE(p_hwfn, true, 2602 "ecore_vf_pf_reset failed. rc = %d.\n", 2603 rc); 2604 rc2 = ECORE_UNKNOWN_ERROR; 2605 } 2606 continue; 2607 } 2608 2609 /* mark the hw as uninitialized... */ 2610 p_hwfn->hw_init_done = false; 2611 2612 /* Send unload command to MCP */ 2613 if (!p_dev->recov_in_prog) { 2614 rc = ecore_mcp_unload_req(p_hwfn, p_ptt); 2615 if (rc != ECORE_SUCCESS) { 2616 DP_NOTICE(p_hwfn, true, 2617 "Failed sending a UNLOAD_REQ command. rc = %d.\n", 2618 rc); 2619 rc2 = ECORE_UNKNOWN_ERROR; 2620 } 2621 } 2622 2623 OSAL_DPC_SYNC(p_hwfn); 2624 2625 /* After this point no MFW attentions are expected, e.g. prevent 2626 * race between pf stop and dcbx pf update. 2627 */ 2628 2629 rc = ecore_sp_pf_stop(p_hwfn); 2630 if (rc != ECORE_SUCCESS) { 2631 DP_NOTICE(p_hwfn, true, 2632 "Failed to close PF against FW [rc = %d]. Continue to stop HW to prevent illegal host access by the device.\n", 2633 rc); 2634 rc2 = ECORE_UNKNOWN_ERROR; 2635 } 2636 2637 /* perform debug action after PF stop was sent */ 2638 OSAL_AFTER_PF_STOP((void *)p_dev, p_hwfn->my_id); 2639 2640 /* close NIG to BRB gate */ 2641 ecore_wr(p_hwfn, p_ptt, 2642 NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1); 2643 2644 /* close parser */ 2645 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0); 2646 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0); 2647 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0); 2648 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0); 2649 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0); 2650 2651 /* @@@TBD - clean transmission queues (5.b) */ 2652 /* @@@TBD - clean BTB (5.c) */ 2653 2654 ecore_hw_timers_stop(p_dev, p_hwfn, p_ptt); 2655 2656 /* @@@TBD - verify DMAE requests are done (8) */ 2657 2658 /* Disable Attention Generation */ 2659 ecore_int_igu_disable_int(p_hwfn, p_ptt); 2660 ecore_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0); 2661 ecore_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0); 2662 ecore_int_igu_init_pure_rt(p_hwfn, p_ptt, false, true); 2663 rc = ecore_int_igu_reset_cam_default(p_hwfn, p_ptt); 2664 if (rc != ECORE_SUCCESS) { 2665 DP_NOTICE(p_hwfn, true, 2666 "Failed to return IGU CAM to default\n"); 2667 rc2 = ECORE_UNKNOWN_ERROR; 2668 } 2669 2670 /* Need to wait 1ms to guarantee SBs are cleared */ 2671 OSAL_MSLEEP(1); 2672 2673 if (!p_dev->recov_in_prog) { 2674 ecore_verify_reg_val(p_hwfn, p_ptt, 2675 QM_REG_USG_CNT_PF_TX, 0); 2676 ecore_verify_reg_val(p_hwfn, p_ptt, 2677 QM_REG_USG_CNT_PF_OTHER, 0); 2678 /* @@@TBD - assert on incorrect xCFC values (10.b) */ 2679 } 2680 2681 /* Disable PF in HW blocks */ 2682 ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_DB_ENABLE, 0); 2683 ecore_wr(p_hwfn, p_ptt, QM_REG_PF_EN, 0); 2684 2685 if (!p_dev->recov_in_prog) { 2686 ecore_mcp_unload_done(p_hwfn, p_ptt); 2687 if (rc != ECORE_SUCCESS) { 2688 DP_NOTICE(p_hwfn, true, 2689 "Failed sending a UNLOAD_DONE command. rc = %d.\n", 2690 rc); 2691 rc2 = ECORE_UNKNOWN_ERROR; 2692 } 2693 } 2694 } /* hwfn loop */ 2695 2696 if (IS_PF(p_dev) && !p_dev->recov_in_prog) { 2697 p_hwfn = ECORE_LEADING_HWFN(p_dev); 2698 p_ptt = ECORE_LEADING_HWFN(p_dev)->p_main_ptt; 2699 2700 /* Clear the PF's internal FID_enable in the PXP. 2701 * In CMT this should only be done for first hw-function, and 2702 * only after all transactions have stopped for all active 2703 * hw-functions. 2704 */ 2705 rc = ecore_pglueb_set_pfid_enable(p_hwfn, p_hwfn->p_main_ptt, 2706 false); 2707 if (rc != ECORE_SUCCESS) { 2708 DP_NOTICE(p_hwfn, true, 2709 "ecore_pglueb_set_pfid_enable() failed. rc = %d.\n", 2710 rc); 2711 rc2 = ECORE_UNKNOWN_ERROR; 2712 } 2713 } 2714 2715 return rc2; 2716 } 2717 2718 enum _ecore_status_t ecore_hw_stop_fastpath(struct ecore_dev *p_dev) 2719 { 2720 int j; 2721 2722 for_each_hwfn(p_dev, j) { 2723 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j]; 2724 struct ecore_ptt *p_ptt; 2725 2726 if (IS_VF(p_dev)) { 2727 ecore_vf_pf_int_cleanup(p_hwfn); 2728 continue; 2729 } 2730 p_ptt = ecore_ptt_acquire(p_hwfn); 2731 if (!p_ptt) 2732 return ECORE_AGAIN; 2733 2734 DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN, 2735 "Shutting down the fastpath\n"); 2736 2737 ecore_wr(p_hwfn, p_ptt, 2738 NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1); 2739 2740 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0); 2741 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0); 2742 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0); 2743 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0); 2744 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0); 2745 2746 /* @@@TBD - clean transmission queues (5.b) */ 2747 /* @@@TBD - clean BTB (5.c) */ 2748 2749 /* @@@TBD - verify DMAE requests are done (8) */ 2750 2751 ecore_int_igu_init_pure_rt(p_hwfn, p_ptt, false, false); 2752 /* Need to wait 1ms to guarantee SBs are cleared */ 2753 OSAL_MSLEEP(1); 2754 ecore_ptt_release(p_hwfn, p_ptt); 2755 } 2756 2757 return ECORE_SUCCESS; 2758 } 2759 2760 enum _ecore_status_t ecore_hw_start_fastpath(struct ecore_hwfn *p_hwfn) 2761 { 2762 struct ecore_ptt *p_ptt; 2763 2764 if (IS_VF(p_hwfn->p_dev)) 2765 return ECORE_SUCCESS; 2766 2767 p_ptt = ecore_ptt_acquire(p_hwfn); 2768 if (!p_ptt) 2769 return ECORE_AGAIN; 2770 2771 /* If roce info is allocated it means roce is initialized and should 2772 * be enabled in searcher. 2773 */ 2774 if (p_hwfn->p_rdma_info) { 2775 if (p_hwfn->b_rdma_enabled_in_prs) 2776 ecore_wr(p_hwfn, p_ptt, 2777 p_hwfn->rdma_prs_search_reg, 0x1); 2778 ecore_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x1); 2779 } 2780 2781 /* Re-open incoming traffic */ 2782 ecore_wr(p_hwfn, p_ptt, 2783 NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0); 2784 ecore_ptt_release(p_hwfn, p_ptt); 2785 2786 return ECORE_SUCCESS; 2787 } 2788 2789 /* Free hwfn memory and resources acquired in hw_hwfn_prepare */ 2790 static void ecore_hw_hwfn_free(struct ecore_hwfn *p_hwfn) 2791 { 2792 ecore_ptt_pool_free(p_hwfn); 2793 OSAL_FREE(p_hwfn->p_dev, p_hwfn->hw_info.p_igu_info); 2794 } 2795 2796 /* Setup bar access */ 2797 static void ecore_hw_hwfn_prepare(struct ecore_hwfn *p_hwfn) 2798 { 2799 /* clear indirect access */ 2800 if (ECORE_IS_AH(p_hwfn->p_dev)) { 2801 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 2802 PGLUE_B_REG_PGL_ADDR_E8_F0_K2_E5, 0); 2803 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 2804 PGLUE_B_REG_PGL_ADDR_EC_F0_K2_E5, 0); 2805 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 2806 PGLUE_B_REG_PGL_ADDR_F0_F0_K2_E5, 0); 2807 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 2808 PGLUE_B_REG_PGL_ADDR_F4_F0_K2_E5, 0); 2809 } else { 2810 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 2811 PGLUE_B_REG_PGL_ADDR_88_F0_BB, 0); 2812 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 2813 PGLUE_B_REG_PGL_ADDR_8C_F0_BB, 0); 2814 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 2815 PGLUE_B_REG_PGL_ADDR_90_F0_BB, 0); 2816 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 2817 PGLUE_B_REG_PGL_ADDR_94_F0_BB, 0); 2818 } 2819 2820 /* Clean previous pglue_b errors if such exist */ 2821 ecore_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt); 2822 2823 /* enable internal target-read */ 2824 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 2825 PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); 2826 } 2827 2828 static void get_function_id(struct ecore_hwfn *p_hwfn) 2829 { 2830 /* ME Register */ 2831 p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, 2832 PXP_PF_ME_OPAQUE_ADDR); 2833 2834 p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR); 2835 2836 /* Bits 16-19 from the ME registers are the pf_num */ 2837 p_hwfn->abs_pf_id = (p_hwfn->hw_info.concrete_fid >> 16) & 0xf; 2838 p_hwfn->rel_pf_id = GET_FIELD(p_hwfn->hw_info.concrete_fid, 2839 PXP_CONCRETE_FID_PFID); 2840 p_hwfn->port_id = GET_FIELD(p_hwfn->hw_info.concrete_fid, 2841 PXP_CONCRETE_FID_PORT); 2842 2843 DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE, 2844 "Read ME register: Concrete 0x%08x Opaque 0x%04x\n", 2845 p_hwfn->hw_info.concrete_fid, p_hwfn->hw_info.opaque_fid); 2846 } 2847 2848 static void ecore_hw_set_feat(struct ecore_hwfn *p_hwfn) 2849 { 2850 u32 *feat_num = p_hwfn->hw_info.feat_num; 2851 struct ecore_sb_cnt_info sb_cnt; 2852 u32 non_l2_sbs = 0; 2853 2854 OSAL_MEM_ZERO(&sb_cnt, sizeof(sb_cnt)); 2855 ecore_int_get_num_sbs(p_hwfn, &sb_cnt); 2856 2857 /* L2 Queues require each: 1 status block. 1 L2 queue */ 2858 if (ECORE_IS_L2_PERSONALITY(p_hwfn)) { 2859 /* Start by allocating VF queues, then PF's */ 2860 feat_num[ECORE_VF_L2_QUE] = 2861 OSAL_MIN_T(u32, 2862 RESC_NUM(p_hwfn, ECORE_L2_QUEUE), 2863 sb_cnt.iov_cnt); 2864 feat_num[ECORE_PF_L2_QUE] = 2865 OSAL_MIN_T(u32, 2866 sb_cnt.cnt - non_l2_sbs, 2867 RESC_NUM(p_hwfn, ECORE_L2_QUEUE) - 2868 FEAT_NUM(p_hwfn, ECORE_VF_L2_QUE)); 2869 } 2870 2871 if (ECORE_IS_FCOE_PERSONALITY(p_hwfn)) 2872 feat_num[ECORE_FCOE_CQ] = 2873 OSAL_MIN_T(u32, sb_cnt.cnt, RESC_NUM(p_hwfn, 2874 ECORE_CMDQS_CQS)); 2875 2876 if (ECORE_IS_ISCSI_PERSONALITY(p_hwfn)) 2877 feat_num[ECORE_ISCSI_CQ] = 2878 OSAL_MIN_T(u32, sb_cnt.cnt, RESC_NUM(p_hwfn, 2879 ECORE_CMDQS_CQS)); 2880 2881 DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE, 2882 "#PF_L2_QUEUE=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d #FCOE_CQ=%d #ISCSI_CQ=%d #SB=%d\n", 2883 (int)FEAT_NUM(p_hwfn, ECORE_PF_L2_QUE), 2884 (int)FEAT_NUM(p_hwfn, ECORE_VF_L2_QUE), 2885 (int)FEAT_NUM(p_hwfn, ECORE_RDMA_CNQ), 2886 (int)FEAT_NUM(p_hwfn, ECORE_FCOE_CQ), 2887 (int)FEAT_NUM(p_hwfn, ECORE_ISCSI_CQ), 2888 (int)sb_cnt.cnt); 2889 } 2890 2891 const char *ecore_hw_get_resc_name(enum ecore_resources res_id) 2892 { 2893 switch (res_id) { 2894 case ECORE_L2_QUEUE: 2895 return "L2_QUEUE"; 2896 case ECORE_VPORT: 2897 return "VPORT"; 2898 case ECORE_RSS_ENG: 2899 return "RSS_ENG"; 2900 case ECORE_PQ: 2901 return "PQ"; 2902 case ECORE_RL: 2903 return "RL"; 2904 case ECORE_MAC: 2905 return "MAC"; 2906 case ECORE_VLAN: 2907 return "VLAN"; 2908 case ECORE_RDMA_CNQ_RAM: 2909 return "RDMA_CNQ_RAM"; 2910 case ECORE_ILT: 2911 return "ILT"; 2912 case ECORE_LL2_QUEUE: 2913 return "LL2_QUEUE"; 2914 case ECORE_CMDQS_CQS: 2915 return "CMDQS_CQS"; 2916 case ECORE_RDMA_STATS_QUEUE: 2917 return "RDMA_STATS_QUEUE"; 2918 case ECORE_BDQ: 2919 return "BDQ"; 2920 case ECORE_SB: 2921 return "SB"; 2922 default: 2923 return "UNKNOWN_RESOURCE"; 2924 } 2925 } 2926 2927 static enum _ecore_status_t 2928 __ecore_hw_set_soft_resc_size(struct ecore_hwfn *p_hwfn, 2929 struct ecore_ptt *p_ptt, 2930 enum ecore_resources res_id, 2931 u32 resc_max_val, 2932 u32 *p_mcp_resp) 2933 { 2934 enum _ecore_status_t rc; 2935 2936 rc = ecore_mcp_set_resc_max_val(p_hwfn, p_ptt, res_id, 2937 resc_max_val, p_mcp_resp); 2938 if (rc != ECORE_SUCCESS) { 2939 DP_NOTICE(p_hwfn, true, 2940 "MFW response failure for a max value setting of resource %d [%s]\n", 2941 res_id, ecore_hw_get_resc_name(res_id)); 2942 return rc; 2943 } 2944 2945 if (*p_mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK) 2946 DP_INFO(p_hwfn, 2947 "Failed to set the max value of resource %d [%s]. mcp_resp = 0x%08x.\n", 2948 res_id, ecore_hw_get_resc_name(res_id), *p_mcp_resp); 2949 2950 return ECORE_SUCCESS; 2951 } 2952 2953 static enum _ecore_status_t 2954 ecore_hw_set_soft_resc_size(struct ecore_hwfn *p_hwfn, 2955 struct ecore_ptt *p_ptt) 2956 { 2957 bool b_ah = ECORE_IS_AH(p_hwfn->p_dev); 2958 u32 resc_max_val, mcp_resp; 2959 u8 res_id; 2960 enum _ecore_status_t rc; 2961 2962 for (res_id = 0; res_id < ECORE_MAX_RESC; res_id++) { 2963 /* @DPDK */ 2964 switch (res_id) { 2965 case ECORE_LL2_QUEUE: 2966 case ECORE_RDMA_CNQ_RAM: 2967 case ECORE_RDMA_STATS_QUEUE: 2968 case ECORE_BDQ: 2969 resc_max_val = 0; 2970 break; 2971 default: 2972 continue; 2973 } 2974 2975 rc = __ecore_hw_set_soft_resc_size(p_hwfn, p_ptt, res_id, 2976 resc_max_val, &mcp_resp); 2977 if (rc != ECORE_SUCCESS) 2978 return rc; 2979 2980 /* There's no point to continue to the next resource if the 2981 * command is not supported by the MFW. 2982 * We do continue if the command is supported but the resource 2983 * is unknown to the MFW. Such a resource will be later 2984 * configured with the default allocation values. 2985 */ 2986 if (mcp_resp == FW_MSG_CODE_UNSUPPORTED) 2987 return ECORE_NOTIMPL; 2988 } 2989 2990 return ECORE_SUCCESS; 2991 } 2992 2993 static 2994 enum _ecore_status_t ecore_hw_get_dflt_resc(struct ecore_hwfn *p_hwfn, 2995 enum ecore_resources res_id, 2996 u32 *p_resc_num, u32 *p_resc_start) 2997 { 2998 u8 num_funcs = p_hwfn->num_funcs_on_engine; 2999 bool b_ah = ECORE_IS_AH(p_hwfn->p_dev); 3000 3001 switch (res_id) { 3002 case ECORE_L2_QUEUE: 3003 *p_resc_num = (b_ah ? MAX_NUM_L2_QUEUES_K2 : 3004 MAX_NUM_L2_QUEUES_BB) / num_funcs; 3005 break; 3006 case ECORE_VPORT: 3007 *p_resc_num = (b_ah ? MAX_NUM_VPORTS_K2 : 3008 MAX_NUM_VPORTS_BB) / num_funcs; 3009 break; 3010 case ECORE_RSS_ENG: 3011 *p_resc_num = (b_ah ? ETH_RSS_ENGINE_NUM_K2 : 3012 ETH_RSS_ENGINE_NUM_BB) / num_funcs; 3013 break; 3014 case ECORE_PQ: 3015 *p_resc_num = (b_ah ? MAX_QM_TX_QUEUES_K2 : 3016 MAX_QM_TX_QUEUES_BB) / num_funcs; 3017 break; 3018 case ECORE_RL: 3019 *p_resc_num = MAX_QM_GLOBAL_RLS / num_funcs; 3020 break; 3021 case ECORE_MAC: 3022 case ECORE_VLAN: 3023 /* Each VFC resource can accommodate both a MAC and a VLAN */ 3024 *p_resc_num = ETH_NUM_MAC_FILTERS / num_funcs; 3025 break; 3026 case ECORE_ILT: 3027 *p_resc_num = (b_ah ? PXP_NUM_ILT_RECORDS_K2 : 3028 PXP_NUM_ILT_RECORDS_BB) / num_funcs; 3029 break; 3030 case ECORE_LL2_QUEUE: 3031 *p_resc_num = MAX_NUM_LL2_RX_QUEUES / num_funcs; 3032 break; 3033 case ECORE_RDMA_CNQ_RAM: 3034 case ECORE_CMDQS_CQS: 3035 /* CNQ/CMDQS are the same resource */ 3036 /* @DPDK */ 3037 *p_resc_num = (NUM_OF_GLOBAL_QUEUES / 2) / num_funcs; 3038 break; 3039 case ECORE_RDMA_STATS_QUEUE: 3040 /* @DPDK */ 3041 *p_resc_num = (b_ah ? MAX_NUM_VPORTS_K2 : 3042 MAX_NUM_VPORTS_BB) / num_funcs; 3043 break; 3044 case ECORE_BDQ: 3045 /* @DPDK */ 3046 *p_resc_num = 0; 3047 break; 3048 default: 3049 break; 3050 } 3051 3052 3053 switch (res_id) { 3054 case ECORE_BDQ: 3055 if (!*p_resc_num) 3056 *p_resc_start = 0; 3057 break; 3058 case ECORE_SB: 3059 /* Since we want its value to reflect whether MFW supports 3060 * the new scheme, have a default of 0. 3061 */ 3062 *p_resc_num = 0; 3063 break; 3064 default: 3065 *p_resc_start = *p_resc_num * p_hwfn->enabled_func_idx; 3066 break; 3067 } 3068 3069 return ECORE_SUCCESS; 3070 } 3071 3072 static enum _ecore_status_t 3073 __ecore_hw_set_resc_info(struct ecore_hwfn *p_hwfn, enum ecore_resources res_id, 3074 bool drv_resc_alloc) 3075 { 3076 u32 dflt_resc_num = 0, dflt_resc_start = 0; 3077 u32 mcp_resp, *p_resc_num, *p_resc_start; 3078 enum _ecore_status_t rc; 3079 3080 p_resc_num = &RESC_NUM(p_hwfn, res_id); 3081 p_resc_start = &RESC_START(p_hwfn, res_id); 3082 3083 rc = ecore_hw_get_dflt_resc(p_hwfn, res_id, &dflt_resc_num, 3084 &dflt_resc_start); 3085 if (rc != ECORE_SUCCESS) { 3086 DP_ERR(p_hwfn, 3087 "Failed to get default amount for resource %d [%s]\n", 3088 res_id, ecore_hw_get_resc_name(res_id)); 3089 return rc; 3090 } 3091 3092 #ifndef ASIC_ONLY 3093 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) { 3094 *p_resc_num = dflt_resc_num; 3095 *p_resc_start = dflt_resc_start; 3096 goto out; 3097 } 3098 #endif 3099 3100 rc = ecore_mcp_get_resc_info(p_hwfn, p_hwfn->p_main_ptt, res_id, 3101 &mcp_resp, p_resc_num, p_resc_start); 3102 if (rc != ECORE_SUCCESS) { 3103 DP_NOTICE(p_hwfn, true, 3104 "MFW response failure for an allocation request for" 3105 " resource %d [%s]\n", 3106 res_id, ecore_hw_get_resc_name(res_id)); 3107 return rc; 3108 } 3109 3110 /* Default driver values are applied in the following cases: 3111 * - The resource allocation MB command is not supported by the MFW 3112 * - There is an internal error in the MFW while processing the request 3113 * - The resource ID is unknown to the MFW 3114 */ 3115 if (mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK) { 3116 DP_INFO(p_hwfn, 3117 "Failed to receive allocation info for resource %d [%s]." 3118 " mcp_resp = 0x%x. Applying default values" 3119 " [%d,%d].\n", 3120 res_id, ecore_hw_get_resc_name(res_id), mcp_resp, 3121 dflt_resc_num, dflt_resc_start); 3122 3123 *p_resc_num = dflt_resc_num; 3124 *p_resc_start = dflt_resc_start; 3125 goto out; 3126 } 3127 3128 if ((*p_resc_num != dflt_resc_num || 3129 *p_resc_start != dflt_resc_start) && 3130 res_id != ECORE_SB) { 3131 DP_INFO(p_hwfn, 3132 "MFW allocation for resource %d [%s] differs from default values [%d,%d vs. %d,%d]%s\n", 3133 res_id, ecore_hw_get_resc_name(res_id), *p_resc_num, 3134 *p_resc_start, dflt_resc_num, dflt_resc_start, 3135 drv_resc_alloc ? " - Applying default values" : ""); 3136 if (drv_resc_alloc) { 3137 *p_resc_num = dflt_resc_num; 3138 *p_resc_start = dflt_resc_start; 3139 } 3140 } 3141 out: 3142 return ECORE_SUCCESS; 3143 } 3144 3145 static enum _ecore_status_t ecore_hw_set_resc_info(struct ecore_hwfn *p_hwfn, 3146 bool drv_resc_alloc) 3147 { 3148 enum _ecore_status_t rc; 3149 u8 res_id; 3150 3151 for (res_id = 0; res_id < ECORE_MAX_RESC; res_id++) { 3152 rc = __ecore_hw_set_resc_info(p_hwfn, res_id, drv_resc_alloc); 3153 if (rc != ECORE_SUCCESS) 3154 return rc; 3155 } 3156 3157 return ECORE_SUCCESS; 3158 } 3159 3160 static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn, 3161 struct ecore_ptt *p_ptt, 3162 bool drv_resc_alloc) 3163 { 3164 struct ecore_resc_unlock_params resc_unlock_params; 3165 struct ecore_resc_lock_params resc_lock_params; 3166 bool b_ah = ECORE_IS_AH(p_hwfn->p_dev); 3167 u8 res_id; 3168 enum _ecore_status_t rc; 3169 #ifndef ASIC_ONLY 3170 u32 *resc_start = p_hwfn->hw_info.resc_start; 3171 u32 *resc_num = p_hwfn->hw_info.resc_num; 3172 /* For AH, an equal share of the ILT lines between the maximal number of 3173 * PFs is not enough for RoCE. This would be solved by the future 3174 * resource allocation scheme, but isn't currently present for 3175 * FPGA/emulation. For now we keep a number that is sufficient for RoCE 3176 * to work - the BB number of ILT lines divided by its max PFs number. 3177 */ 3178 u32 roce_min_ilt_lines = PXP_NUM_ILT_RECORDS_BB / MAX_NUM_PFS_BB; 3179 #endif 3180 3181 /* Setting the max values of the soft resources and the following 3182 * resources allocation queries should be atomic. Since several PFs can 3183 * run in parallel - a resource lock is needed. 3184 * If either the resource lock or resource set value commands are not 3185 * supported - skip the max values setting, release the lock if 3186 * needed, and proceed to the queries. Other failures, including a 3187 * failure to acquire the lock, will cause this function to fail. 3188 * Old drivers that don't acquire the lock can run in parallel, and 3189 * their allocation values won't be affected by the updated max values. 3190 */ 3191 ecore_mcp_resc_lock_default_init(&resc_lock_params, &resc_unlock_params, 3192 ECORE_RESC_LOCK_RESC_ALLOC, false); 3193 3194 rc = ecore_mcp_resc_lock(p_hwfn, p_ptt, &resc_lock_params); 3195 if (rc != ECORE_SUCCESS && rc != ECORE_NOTIMPL) { 3196 return rc; 3197 } else if (rc == ECORE_NOTIMPL) { 3198 DP_INFO(p_hwfn, 3199 "Skip the max values setting of the soft resources since the resource lock is not supported by the MFW\n"); 3200 } else if (rc == ECORE_SUCCESS && !resc_lock_params.b_granted) { 3201 DP_NOTICE(p_hwfn, false, 3202 "Failed to acquire the resource lock for the resource allocation commands\n"); 3203 rc = ECORE_BUSY; 3204 goto unlock_and_exit; 3205 } else { 3206 rc = ecore_hw_set_soft_resc_size(p_hwfn, p_ptt); 3207 if (rc != ECORE_SUCCESS && rc != ECORE_NOTIMPL) { 3208 DP_NOTICE(p_hwfn, false, 3209 "Failed to set the max values of the soft resources\n"); 3210 goto unlock_and_exit; 3211 } else if (rc == ECORE_NOTIMPL) { 3212 DP_INFO(p_hwfn, 3213 "Skip the max values setting of the soft resources since it is not supported by the MFW\n"); 3214 rc = ecore_mcp_resc_unlock(p_hwfn, p_ptt, 3215 &resc_unlock_params); 3216 if (rc != ECORE_SUCCESS) 3217 DP_INFO(p_hwfn, 3218 "Failed to release the resource lock for the resource allocation commands\n"); 3219 } 3220 } 3221 3222 rc = ecore_hw_set_resc_info(p_hwfn, drv_resc_alloc); 3223 if (rc != ECORE_SUCCESS) 3224 goto unlock_and_exit; 3225 3226 if (resc_lock_params.b_granted && !resc_unlock_params.b_released) { 3227 rc = ecore_mcp_resc_unlock(p_hwfn, p_ptt, 3228 &resc_unlock_params); 3229 if (rc != ECORE_SUCCESS) 3230 DP_INFO(p_hwfn, 3231 "Failed to release the resource lock for the resource allocation commands\n"); 3232 } 3233 3234 #ifndef ASIC_ONLY 3235 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) { 3236 /* Reduced build contains less PQs */ 3237 if (!(p_hwfn->p_dev->b_is_emul_full)) { 3238 resc_num[ECORE_PQ] = 32; 3239 resc_start[ECORE_PQ] = resc_num[ECORE_PQ] * 3240 p_hwfn->enabled_func_idx; 3241 } 3242 3243 /* For AH emulation, since we have a possible maximal number of 3244 * 16 enabled PFs, in case there are not enough ILT lines - 3245 * allocate only first PF as RoCE and have all the other ETH 3246 * only with less ILT lines. 3247 */ 3248 if (!p_hwfn->rel_pf_id && p_hwfn->p_dev->b_is_emul_full) 3249 resc_num[ECORE_ILT] = OSAL_MAX_T(u32, 3250 resc_num[ECORE_ILT], 3251 roce_min_ilt_lines); 3252 } 3253 3254 /* Correct the common ILT calculation if PF0 has more */ 3255 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev) && 3256 p_hwfn->p_dev->b_is_emul_full && 3257 p_hwfn->rel_pf_id && resc_num[ECORE_ILT] < roce_min_ilt_lines) 3258 resc_start[ECORE_ILT] += roce_min_ilt_lines - 3259 resc_num[ECORE_ILT]; 3260 #endif 3261 3262 /* Sanity for ILT */ 3263 if ((b_ah && (RESC_END(p_hwfn, ECORE_ILT) > PXP_NUM_ILT_RECORDS_K2)) || 3264 (!b_ah && (RESC_END(p_hwfn, ECORE_ILT) > PXP_NUM_ILT_RECORDS_BB))) { 3265 DP_NOTICE(p_hwfn, true, 3266 "Can't assign ILT pages [%08x,...,%08x]\n", 3267 RESC_START(p_hwfn, ECORE_ILT), RESC_END(p_hwfn, 3268 ECORE_ILT) - 3269 1); 3270 return ECORE_INVAL; 3271 } 3272 3273 /* This will also learn the number of SBs from MFW */ 3274 if (ecore_int_igu_reset_cam(p_hwfn, p_ptt)) 3275 return ECORE_INVAL; 3276 3277 ecore_hw_set_feat(p_hwfn); 3278 3279 DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE, 3280 "The numbers for each resource are:\n"); 3281 for (res_id = 0; res_id < ECORE_MAX_RESC; res_id++) 3282 DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE, "%s = %d start = %d\n", 3283 ecore_hw_get_resc_name(res_id), 3284 RESC_NUM(p_hwfn, res_id), 3285 RESC_START(p_hwfn, res_id)); 3286 3287 return ECORE_SUCCESS; 3288 3289 unlock_and_exit: 3290 if (resc_lock_params.b_granted && !resc_unlock_params.b_released) 3291 ecore_mcp_resc_unlock(p_hwfn, p_ptt, 3292 &resc_unlock_params); 3293 return rc; 3294 } 3295 3296 static enum _ecore_status_t 3297 ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn, 3298 struct ecore_ptt *p_ptt, 3299 struct ecore_hw_prepare_params *p_params) 3300 { 3301 u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg, dcbx_mode; 3302 u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities; 3303 struct ecore_mcp_link_capabilities *p_caps; 3304 struct ecore_mcp_link_params *link; 3305 enum _ecore_status_t rc; 3306 3307 /* Read global nvm_cfg address */ 3308 nvm_cfg_addr = ecore_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0); 3309 3310 /* Verify MCP has initialized it */ 3311 if (!nvm_cfg_addr) { 3312 DP_NOTICE(p_hwfn, false, "Shared memory not initialized\n"); 3313 if (p_params->b_relaxed_probe) 3314 p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_NVM; 3315 return ECORE_INVAL; 3316 } 3317 3318 /* Read nvm_cfg1 (Notice this is just offset, and not offsize (TBD) */ 3319 3320 nvm_cfg1_offset = ecore_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4); 3321 3322 addr = MCP_REG_SCRATCH + nvm_cfg1_offset + 3323 OFFSETOF(struct nvm_cfg1, glob) + 3324 OFFSETOF(struct nvm_cfg1_glob, core_cfg); 3325 3326 core_cfg = ecore_rd(p_hwfn, p_ptt, addr); 3327 3328 switch ((core_cfg & NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK) >> 3329 NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET) { 3330 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_2X40G: 3331 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X40G; 3332 break; 3333 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X50G: 3334 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X50G; 3335 break; 3336 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_1X100G: 3337 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_1X100G; 3338 break; 3339 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X10G_F: 3340 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X10G_F; 3341 break; 3342 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X10G_E: 3343 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X10G_E; 3344 break; 3345 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X20G: 3346 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X20G; 3347 break; 3348 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X40G: 3349 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_1X40G; 3350 break; 3351 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G: 3352 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X25G; 3353 break; 3354 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G: 3355 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X10G; 3356 break; 3357 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G: 3358 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_1X25G; 3359 break; 3360 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G: 3361 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X25G; 3362 break; 3363 default: 3364 DP_NOTICE(p_hwfn, true, "Unknown port mode in 0x%08x\n", 3365 core_cfg); 3366 break; 3367 } 3368 3369 /* Read DCBX configuration */ 3370 port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset + 3371 OFFSETOF(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]); 3372 dcbx_mode = ecore_rd(p_hwfn, p_ptt, 3373 port_cfg_addr + 3374 OFFSETOF(struct nvm_cfg1_port, generic_cont0)); 3375 dcbx_mode = (dcbx_mode & NVM_CFG1_PORT_DCBX_MODE_MASK) 3376 >> NVM_CFG1_PORT_DCBX_MODE_OFFSET; 3377 switch (dcbx_mode) { 3378 case NVM_CFG1_PORT_DCBX_MODE_DYNAMIC: 3379 p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_DYNAMIC; 3380 break; 3381 case NVM_CFG1_PORT_DCBX_MODE_CEE: 3382 p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_CEE; 3383 break; 3384 case NVM_CFG1_PORT_DCBX_MODE_IEEE: 3385 p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_IEEE; 3386 break; 3387 default: 3388 p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_DISABLED; 3389 } 3390 3391 /* Read default link configuration */ 3392 link = &p_hwfn->mcp_info->link_input; 3393 p_caps = &p_hwfn->mcp_info->link_capabilities; 3394 port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset + 3395 OFFSETOF(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]); 3396 link_temp = ecore_rd(p_hwfn, p_ptt, 3397 port_cfg_addr + 3398 OFFSETOF(struct nvm_cfg1_port, speed_cap_mask)); 3399 link_temp &= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK; 3400 link->speed.advertised_speeds = link_temp; 3401 p_caps->speed_capabilities = link->speed.advertised_speeds; 3402 3403 link_temp = ecore_rd(p_hwfn, p_ptt, 3404 port_cfg_addr + 3405 OFFSETOF(struct nvm_cfg1_port, link_settings)); 3406 switch ((link_temp & NVM_CFG1_PORT_DRV_LINK_SPEED_MASK) >> 3407 NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET) { 3408 case NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG: 3409 link->speed.autoneg = true; 3410 break; 3411 case NVM_CFG1_PORT_DRV_LINK_SPEED_1G: 3412 link->speed.forced_speed = 1000; 3413 break; 3414 case NVM_CFG1_PORT_DRV_LINK_SPEED_10G: 3415 link->speed.forced_speed = 10000; 3416 break; 3417 case NVM_CFG1_PORT_DRV_LINK_SPEED_25G: 3418 link->speed.forced_speed = 25000; 3419 break; 3420 case NVM_CFG1_PORT_DRV_LINK_SPEED_40G: 3421 link->speed.forced_speed = 40000; 3422 break; 3423 case NVM_CFG1_PORT_DRV_LINK_SPEED_50G: 3424 link->speed.forced_speed = 50000; 3425 break; 3426 case NVM_CFG1_PORT_DRV_LINK_SPEED_BB_100G: 3427 link->speed.forced_speed = 100000; 3428 break; 3429 default: 3430 DP_NOTICE(p_hwfn, true, "Unknown Speed in 0x%08x\n", link_temp); 3431 } 3432 3433 p_caps->default_speed = link->speed.forced_speed; 3434 p_caps->default_speed_autoneg = link->speed.autoneg; 3435 3436 link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK; 3437 link_temp >>= NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET; 3438 link->pause.autoneg = !!(link_temp & 3439 NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG); 3440 link->pause.forced_rx = !!(link_temp & 3441 NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX); 3442 link->pause.forced_tx = !!(link_temp & 3443 NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX); 3444 link->loopback_mode = 0; 3445 3446 if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) { 3447 link_temp = ecore_rd(p_hwfn, p_ptt, port_cfg_addr + 3448 OFFSETOF(struct nvm_cfg1_port, ext_phy)); 3449 link_temp &= NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_MASK; 3450 link_temp >>= NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_OFFSET; 3451 p_caps->default_eee = ECORE_MCP_EEE_ENABLED; 3452 link->eee.enable = true; 3453 switch (link_temp) { 3454 case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_DISABLED: 3455 p_caps->default_eee = ECORE_MCP_EEE_DISABLED; 3456 link->eee.enable = false; 3457 break; 3458 case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_BALANCED: 3459 p_caps->eee_lpi_timer = EEE_TX_TIMER_USEC_BALANCED_TIME; 3460 break; 3461 case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_AGGRESSIVE: 3462 p_caps->eee_lpi_timer = 3463 EEE_TX_TIMER_USEC_AGGRESSIVE_TIME; 3464 break; 3465 case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_LOW_LATENCY: 3466 p_caps->eee_lpi_timer = EEE_TX_TIMER_USEC_LATENCY_TIME; 3467 break; 3468 } 3469 3470 link->eee.tx_lpi_timer = p_caps->eee_lpi_timer; 3471 link->eee.tx_lpi_enable = link->eee.enable; 3472 link->eee.adv_caps = ECORE_EEE_1G_ADV | ECORE_EEE_10G_ADV; 3473 } else { 3474 p_caps->default_eee = ECORE_MCP_EEE_UNSUPPORTED; 3475 } 3476 3477 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 3478 "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x\n EEE: %02x [%08x usec]", 3479 link->speed.forced_speed, link->speed.advertised_speeds, 3480 link->speed.autoneg, link->pause.autoneg, 3481 p_caps->default_eee, p_caps->eee_lpi_timer); 3482 3483 /* Read Multi-function information from shmem */ 3484 addr = MCP_REG_SCRATCH + nvm_cfg1_offset + 3485 OFFSETOF(struct nvm_cfg1, glob) + 3486 OFFSETOF(struct nvm_cfg1_glob, generic_cont0); 3487 3488 generic_cont0 = ecore_rd(p_hwfn, p_ptt, addr); 3489 3490 mf_mode = (generic_cont0 & NVM_CFG1_GLOB_MF_MODE_MASK) >> 3491 NVM_CFG1_GLOB_MF_MODE_OFFSET; 3492 3493 switch (mf_mode) { 3494 case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED: 3495 p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_OVLAN_CLSS; 3496 break; 3497 case NVM_CFG1_GLOB_MF_MODE_UFP: 3498 p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_OVLAN_CLSS | 3499 1 << ECORE_MF_UFP_SPECIFIC; 3500 break; 3501 3502 case NVM_CFG1_GLOB_MF_MODE_NPAR1_0: 3503 p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_LLH_MAC_CLSS | 3504 1 << ECORE_MF_LLH_PROTO_CLSS | 3505 1 << ECORE_MF_LL2_NON_UNICAST | 3506 1 << ECORE_MF_INTER_PF_SWITCH | 3507 1 << ECORE_MF_DISABLE_ARFS; 3508 break; 3509 case NVM_CFG1_GLOB_MF_MODE_DEFAULT: 3510 p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_LLH_MAC_CLSS | 3511 1 << ECORE_MF_LLH_PROTO_CLSS | 3512 1 << ECORE_MF_LL2_NON_UNICAST; 3513 if (ECORE_IS_BB(p_hwfn->p_dev)) 3514 p_hwfn->p_dev->mf_bits |= 1 << ECORE_MF_NEED_DEF_PF; 3515 break; 3516 } 3517 DP_INFO(p_hwfn, "Multi function mode is 0x%lx\n", 3518 p_hwfn->p_dev->mf_bits); 3519 3520 if (ECORE_IS_CMT(p_hwfn->p_dev)) 3521 p_hwfn->p_dev->mf_bits |= (1 << ECORE_MF_DISABLE_ARFS); 3522 3523 /* It's funny since we have another switch, but it's easier 3524 * to throw this away in linux this way. Long term, it might be 3525 * better to have have getters for needed ECORE_MF_* fields, 3526 * convert client code and eliminate this. 3527 */ 3528 switch (mf_mode) { 3529 case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED: 3530 p_hwfn->p_dev->mf_mode = ECORE_MF_OVLAN; 3531 break; 3532 case NVM_CFG1_GLOB_MF_MODE_NPAR1_0: 3533 p_hwfn->p_dev->mf_mode = ECORE_MF_NPAR; 3534 break; 3535 case NVM_CFG1_GLOB_MF_MODE_DEFAULT: 3536 p_hwfn->p_dev->mf_mode = ECORE_MF_DEFAULT; 3537 break; 3538 case NVM_CFG1_GLOB_MF_MODE_UFP: 3539 p_hwfn->p_dev->mf_mode = ECORE_MF_UFP; 3540 break; 3541 } 3542 3543 /* Read Multi-function information from shmem */ 3544 addr = MCP_REG_SCRATCH + nvm_cfg1_offset + 3545 OFFSETOF(struct nvm_cfg1, glob) + 3546 OFFSETOF(struct nvm_cfg1_glob, device_capabilities); 3547 3548 device_capabilities = ecore_rd(p_hwfn, p_ptt, addr); 3549 if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET) 3550 OSAL_SET_BIT(ECORE_DEV_CAP_ETH, 3551 &p_hwfn->hw_info.device_capabilities); 3552 if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_FCOE) 3553 OSAL_SET_BIT(ECORE_DEV_CAP_FCOE, 3554 &p_hwfn->hw_info.device_capabilities); 3555 if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI) 3556 OSAL_SET_BIT(ECORE_DEV_CAP_ISCSI, 3557 &p_hwfn->hw_info.device_capabilities); 3558 if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE) 3559 OSAL_SET_BIT(ECORE_DEV_CAP_ROCE, 3560 &p_hwfn->hw_info.device_capabilities); 3561 if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_IWARP) 3562 OSAL_SET_BIT(ECORE_DEV_CAP_IWARP, 3563 &p_hwfn->hw_info.device_capabilities); 3564 3565 rc = ecore_mcp_fill_shmem_func_info(p_hwfn, p_ptt); 3566 if (rc != ECORE_SUCCESS && p_params->b_relaxed_probe) { 3567 rc = ECORE_SUCCESS; 3568 p_params->p_relaxed_res = ECORE_HW_PREPARE_BAD_MCP; 3569 } 3570 3571 return rc; 3572 } 3573 3574 static void ecore_get_num_funcs(struct ecore_hwfn *p_hwfn, 3575 struct ecore_ptt *p_ptt) 3576 { 3577 u8 num_funcs, enabled_func_idx = p_hwfn->rel_pf_id; 3578 u32 reg_function_hide, tmp, eng_mask, low_pfs_mask; 3579 struct ecore_dev *p_dev = p_hwfn->p_dev; 3580 3581 num_funcs = ECORE_IS_AH(p_dev) ? MAX_NUM_PFS_K2 : MAX_NUM_PFS_BB; 3582 3583 /* Bit 0 of MISCS_REG_FUNCTION_HIDE indicates whether the bypass values 3584 * in the other bits are selected. 3585 * Bits 1-15 are for functions 1-15, respectively, and their value is 3586 * '0' only for enabled functions (function 0 always exists and 3587 * enabled). 3588 * In case of CMT in BB, only the "even" functions are enabled, and thus 3589 * the number of functions for both hwfns is learnt from the same bits. 3590 */ 3591 if (ECORE_IS_BB(p_dev) || ECORE_IS_AH(p_dev)) { 3592 reg_function_hide = ecore_rd(p_hwfn, p_ptt, 3593 MISCS_REG_FUNCTION_HIDE_BB_K2); 3594 } else { /* E5 */ 3595 reg_function_hide = 0; 3596 } 3597 3598 if (reg_function_hide & 0x1) { 3599 if (ECORE_IS_BB(p_dev)) { 3600 if (ECORE_PATH_ID(p_hwfn) && !ECORE_IS_CMT(p_dev)) { 3601 num_funcs = 0; 3602 eng_mask = 0xaaaa; 3603 } else { 3604 num_funcs = 1; 3605 eng_mask = 0x5554; 3606 } 3607 } else { 3608 num_funcs = 1; 3609 eng_mask = 0xfffe; 3610 } 3611 3612 /* Get the number of the enabled functions on the engine */ 3613 tmp = (reg_function_hide ^ 0xffffffff) & eng_mask; 3614 while (tmp) { 3615 if (tmp & 0x1) 3616 num_funcs++; 3617 tmp >>= 0x1; 3618 } 3619 3620 /* Get the PF index within the enabled functions */ 3621 low_pfs_mask = (0x1 << p_hwfn->abs_pf_id) - 1; 3622 tmp = reg_function_hide & eng_mask & low_pfs_mask; 3623 while (tmp) { 3624 if (tmp & 0x1) 3625 enabled_func_idx--; 3626 tmp >>= 0x1; 3627 } 3628 } 3629 3630 p_hwfn->num_funcs_on_engine = num_funcs; 3631 p_hwfn->enabled_func_idx = enabled_func_idx; 3632 3633 #ifndef ASIC_ONLY 3634 if (CHIP_REV_IS_FPGA(p_dev)) { 3635 DP_NOTICE(p_hwfn, false, 3636 "FPGA: Limit number of PFs to 4 [would affect resource allocation, needed for IOV]\n"); 3637 p_hwfn->num_funcs_on_engine = 4; 3638 } 3639 #endif 3640 3641 DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE, 3642 "PF [rel_id %d, abs_id %d] occupies index %d within the %d enabled functions on the engine\n", 3643 p_hwfn->rel_pf_id, p_hwfn->abs_pf_id, 3644 p_hwfn->enabled_func_idx, p_hwfn->num_funcs_on_engine); 3645 } 3646 3647 static void ecore_hw_info_port_num_bb(struct ecore_hwfn *p_hwfn, 3648 struct ecore_ptt *p_ptt) 3649 { 3650 struct ecore_dev *p_dev = p_hwfn->p_dev; 3651 u32 port_mode; 3652 3653 #ifndef ASIC_ONLY 3654 /* Read the port mode */ 3655 if (CHIP_REV_IS_FPGA(p_dev)) 3656 port_mode = 4; 3657 else if (CHIP_REV_IS_EMUL(p_dev) && ECORE_IS_CMT(p_dev)) 3658 /* In CMT on emulation, assume 1 port */ 3659 port_mode = 1; 3660 else 3661 #endif 3662 port_mode = ecore_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB); 3663 3664 if (port_mode < 3) { 3665 p_dev->num_ports_in_engine = 1; 3666 } else if (port_mode <= 5) { 3667 p_dev->num_ports_in_engine = 2; 3668 } else { 3669 DP_NOTICE(p_hwfn, true, "PORT MODE: %d not supported\n", 3670 p_dev->num_ports_in_engine); 3671 3672 /* Default num_ports_in_engine to something */ 3673 p_dev->num_ports_in_engine = 1; 3674 } 3675 } 3676 3677 static void ecore_hw_info_port_num_ah_e5(struct ecore_hwfn *p_hwfn, 3678 struct ecore_ptt *p_ptt) 3679 { 3680 struct ecore_dev *p_dev = p_hwfn->p_dev; 3681 u32 port; 3682 int i; 3683 3684 p_dev->num_ports_in_engine = 0; 3685 3686 #ifndef ASIC_ONLY 3687 if (CHIP_REV_IS_EMUL(p_dev)) { 3688 port = ecore_rd(p_hwfn, p_ptt, MISCS_REG_ECO_RESERVED); 3689 switch ((port & 0xf000) >> 12) { 3690 case 1: 3691 p_dev->num_ports_in_engine = 1; 3692 break; 3693 case 3: 3694 p_dev->num_ports_in_engine = 2; 3695 break; 3696 case 0xf: 3697 p_dev->num_ports_in_engine = 4; 3698 break; 3699 default: 3700 DP_NOTICE(p_hwfn, false, 3701 "Unknown port mode in ECO_RESERVED %08x\n", 3702 port); 3703 } 3704 } else 3705 #endif 3706 for (i = 0; i < MAX_NUM_PORTS_K2; i++) { 3707 port = ecore_rd(p_hwfn, p_ptt, 3708 CNIG_REG_NIG_PORT0_CONF_K2_E5 + 3709 (i * 4)); 3710 if (port & 1) 3711 p_dev->num_ports_in_engine++; 3712 } 3713 3714 if (!p_dev->num_ports_in_engine) { 3715 DP_NOTICE(p_hwfn, true, "All NIG ports are inactive\n"); 3716 3717 /* Default num_ports_in_engine to something */ 3718 p_dev->num_ports_in_engine = 1; 3719 } 3720 } 3721 3722 static void ecore_hw_info_port_num(struct ecore_hwfn *p_hwfn, 3723 struct ecore_ptt *p_ptt) 3724 { 3725 struct ecore_dev *p_dev = p_hwfn->p_dev; 3726 3727 /* Determine the number of ports per engine */ 3728 if (ECORE_IS_BB(p_dev)) 3729 ecore_hw_info_port_num_bb(p_hwfn, p_ptt); 3730 else 3731 ecore_hw_info_port_num_ah_e5(p_hwfn, p_ptt); 3732 3733 /* Get the total number of ports of the device */ 3734 if (ECORE_IS_CMT(p_dev)) { 3735 /* In CMT there is always only one port */ 3736 p_dev->num_ports = 1; 3737 #ifndef ASIC_ONLY 3738 } else if (CHIP_REV_IS_EMUL(p_dev) || CHIP_REV_IS_TEDIBEAR(p_dev)) { 3739 p_dev->num_ports = p_dev->num_ports_in_engine * 3740 ecore_device_num_engines(p_dev); 3741 #endif 3742 } else { 3743 u32 addr, global_offsize, global_addr; 3744 3745 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, 3746 PUBLIC_GLOBAL); 3747 global_offsize = ecore_rd(p_hwfn, p_ptt, addr); 3748 global_addr = SECTION_ADDR(global_offsize, 0); 3749 addr = global_addr + OFFSETOF(struct public_global, max_ports); 3750 p_dev->num_ports = (u8)ecore_rd(p_hwfn, p_ptt, addr); 3751 } 3752 } 3753 3754 static void ecore_mcp_get_eee_caps(struct ecore_hwfn *p_hwfn, 3755 struct ecore_ptt *p_ptt) 3756 { 3757 struct ecore_mcp_link_capabilities *p_caps; 3758 u32 eee_status; 3759 3760 p_caps = &p_hwfn->mcp_info->link_capabilities; 3761 if (p_caps->default_eee == ECORE_MCP_EEE_UNSUPPORTED) 3762 return; 3763 3764 p_caps->eee_speed_caps = 0; 3765 eee_status = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr + 3766 OFFSETOF(struct public_port, eee_status)); 3767 eee_status = (eee_status & EEE_SUPPORTED_SPEED_MASK) >> 3768 EEE_SUPPORTED_SPEED_OFFSET; 3769 if (eee_status & EEE_1G_SUPPORTED) 3770 p_caps->eee_speed_caps |= ECORE_EEE_1G_ADV; 3771 if (eee_status & EEE_10G_ADV) 3772 p_caps->eee_speed_caps |= ECORE_EEE_10G_ADV; 3773 } 3774 3775 static enum _ecore_status_t 3776 ecore_get_hw_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 3777 enum ecore_pci_personality personality, 3778 struct ecore_hw_prepare_params *p_params) 3779 { 3780 bool drv_resc_alloc = p_params->drv_resc_alloc; 3781 enum _ecore_status_t rc; 3782 3783 /* Since all information is common, only first hwfns should do this */ 3784 if (IS_LEAD_HWFN(p_hwfn)) { 3785 rc = ecore_iov_hw_info(p_hwfn); 3786 if (rc != ECORE_SUCCESS) { 3787 if (p_params->b_relaxed_probe) 3788 p_params->p_relaxed_res = 3789 ECORE_HW_PREPARE_BAD_IOV; 3790 else 3791 return rc; 3792 } 3793 } 3794 3795 if (IS_LEAD_HWFN(p_hwfn)) 3796 ecore_hw_info_port_num(p_hwfn, p_ptt); 3797 3798 ecore_mcp_get_capabilities(p_hwfn, p_ptt); 3799 3800 #ifndef ASIC_ONLY 3801 if (CHIP_REV_IS_ASIC(p_hwfn->p_dev)) { 3802 #endif 3803 rc = ecore_hw_get_nvm_info(p_hwfn, p_ptt, p_params); 3804 if (rc != ECORE_SUCCESS) 3805 return rc; 3806 #ifndef ASIC_ONLY 3807 } 3808 #endif 3809 3810 rc = ecore_int_igu_read_cam(p_hwfn, p_ptt); 3811 if (rc != ECORE_SUCCESS) { 3812 if (p_params->b_relaxed_probe) 3813 p_params->p_relaxed_res = ECORE_HW_PREPARE_BAD_IGU; 3814 else 3815 return rc; 3816 } 3817 3818 #ifndef ASIC_ONLY 3819 if (CHIP_REV_IS_ASIC(p_hwfn->p_dev) && ecore_mcp_is_init(p_hwfn)) { 3820 #endif 3821 OSAL_MEMCPY(p_hwfn->hw_info.hw_mac_addr, 3822 p_hwfn->mcp_info->func_info.mac, ETH_ALEN); 3823 #ifndef ASIC_ONLY 3824 } else { 3825 static u8 mcp_hw_mac[6] = { 0, 2, 3, 4, 5, 6 }; 3826 3827 OSAL_MEMCPY(p_hwfn->hw_info.hw_mac_addr, mcp_hw_mac, ETH_ALEN); 3828 p_hwfn->hw_info.hw_mac_addr[5] = p_hwfn->abs_pf_id; 3829 } 3830 #endif 3831 3832 if (ecore_mcp_is_init(p_hwfn)) { 3833 if (p_hwfn->mcp_info->func_info.ovlan != ECORE_MCP_VLAN_UNSET) 3834 p_hwfn->hw_info.ovlan = 3835 p_hwfn->mcp_info->func_info.ovlan; 3836 3837 ecore_mcp_cmd_port_init(p_hwfn, p_ptt); 3838 3839 ecore_mcp_get_eee_caps(p_hwfn, p_ptt); 3840 3841 ecore_mcp_read_ufp_config(p_hwfn, p_ptt); 3842 } 3843 3844 if (personality != ECORE_PCI_DEFAULT) { 3845 p_hwfn->hw_info.personality = personality; 3846 } else if (ecore_mcp_is_init(p_hwfn)) { 3847 enum ecore_pci_personality protocol; 3848 3849 protocol = p_hwfn->mcp_info->func_info.protocol; 3850 p_hwfn->hw_info.personality = protocol; 3851 } 3852 3853 #ifndef ASIC_ONLY 3854 /* To overcome ILT lack for emulation, until at least until we'll have 3855 * a definite answer from system about it, allow only PF0 to be RoCE. 3856 */ 3857 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && ECORE_IS_AH(p_hwfn->p_dev)) { 3858 if (!p_hwfn->rel_pf_id) 3859 p_hwfn->hw_info.personality = ECORE_PCI_ETH_ROCE; 3860 else 3861 p_hwfn->hw_info.personality = ECORE_PCI_ETH; 3862 } 3863 #endif 3864 3865 /* although in BB some constellations may support more than 4 tcs, 3866 * that can result in performance penalty in some cases. 4 3867 * represents a good tradeoff between performance and flexibility. 3868 */ 3869 p_hwfn->hw_info.num_hw_tc = NUM_PHYS_TCS_4PORT_K2; 3870 3871 /* start out with a single active tc. This can be increased either 3872 * by dcbx negotiation or by upper layer driver 3873 */ 3874 p_hwfn->hw_info.num_active_tc = 1; 3875 3876 ecore_get_num_funcs(p_hwfn, p_ptt); 3877 3878 if (ecore_mcp_is_init(p_hwfn)) 3879 p_hwfn->hw_info.mtu = p_hwfn->mcp_info->func_info.mtu; 3880 3881 /* In case of forcing the driver's default resource allocation, calling 3882 * ecore_hw_get_resc() should come after initializing the personality 3883 * and after getting the number of functions, since the calculation of 3884 * the resources/features depends on them. 3885 * This order is not harmful if not forcing. 3886 */ 3887 rc = ecore_hw_get_resc(p_hwfn, p_ptt, drv_resc_alloc); 3888 if (rc != ECORE_SUCCESS && p_params->b_relaxed_probe) { 3889 rc = ECORE_SUCCESS; 3890 p_params->p_relaxed_res = ECORE_HW_PREPARE_BAD_MCP; 3891 } 3892 3893 return rc; 3894 } 3895 3896 static enum _ecore_status_t ecore_get_dev_info(struct ecore_hwfn *p_hwfn, 3897 struct ecore_ptt *p_ptt) 3898 { 3899 struct ecore_dev *p_dev = p_hwfn->p_dev; 3900 u16 device_id_mask; 3901 u32 tmp; 3902 3903 /* Read Vendor Id / Device Id */ 3904 OSAL_PCI_READ_CONFIG_WORD(p_dev, PCICFG_VENDOR_ID_OFFSET, 3905 &p_dev->vendor_id); 3906 OSAL_PCI_READ_CONFIG_WORD(p_dev, PCICFG_DEVICE_ID_OFFSET, 3907 &p_dev->device_id); 3908 3909 /* Determine type */ 3910 device_id_mask = p_dev->device_id & ECORE_DEV_ID_MASK; 3911 switch (device_id_mask) { 3912 case ECORE_DEV_ID_MASK_BB: 3913 p_dev->type = ECORE_DEV_TYPE_BB; 3914 break; 3915 case ECORE_DEV_ID_MASK_AH: 3916 p_dev->type = ECORE_DEV_TYPE_AH; 3917 break; 3918 default: 3919 DP_NOTICE(p_hwfn, true, "Unknown device id 0x%x\n", 3920 p_dev->device_id); 3921 return ECORE_ABORTED; 3922 } 3923 3924 tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_NUM); 3925 p_dev->chip_num = (u16)GET_FIELD(tmp, CHIP_NUM); 3926 tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_REV); 3927 p_dev->chip_rev = (u8)GET_FIELD(tmp, CHIP_REV); 3928 3929 /* Learn number of HW-functions */ 3930 tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_CMT_ENABLED_FOR_PAIR); 3931 3932 if (tmp & (1 << p_hwfn->rel_pf_id)) { 3933 DP_NOTICE(p_dev->hwfns, false, "device in CMT mode\n"); 3934 p_dev->num_hwfns = 2; 3935 } else { 3936 p_dev->num_hwfns = 1; 3937 } 3938 3939 #ifndef ASIC_ONLY 3940 if (CHIP_REV_IS_EMUL(p_dev)) { 3941 /* For some reason we have problems with this register 3942 * in B0 emulation; Simply assume no CMT 3943 */ 3944 DP_NOTICE(p_dev->hwfns, false, 3945 "device on emul - assume no CMT\n"); 3946 p_dev->num_hwfns = 1; 3947 } 3948 #endif 3949 3950 tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_TEST_REG); 3951 p_dev->chip_bond_id = (u8)GET_FIELD(tmp, CHIP_BOND_ID); 3952 tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_METAL); 3953 p_dev->chip_metal = (u8)GET_FIELD(tmp, CHIP_METAL); 3954 3955 DP_INFO(p_dev->hwfns, 3956 "Chip details - %s %c%d, Num: %04x Rev: %02x Bond id: %02x Metal: %02x\n", 3957 ECORE_IS_BB(p_dev) ? "BB" : "AH", 3958 'A' + p_dev->chip_rev, (int)p_dev->chip_metal, 3959 p_dev->chip_num, p_dev->chip_rev, p_dev->chip_bond_id, 3960 p_dev->chip_metal); 3961 3962 if (ECORE_IS_BB_A0(p_dev)) { 3963 DP_NOTICE(p_dev->hwfns, false, 3964 "The chip type/rev (BB A0) is not supported!\n"); 3965 return ECORE_ABORTED; 3966 } 3967 #ifndef ASIC_ONLY 3968 if (CHIP_REV_IS_EMUL(p_dev) && ECORE_IS_AH(p_dev)) 3969 ecore_wr(p_hwfn, p_ptt, MISCS_REG_PLL_MAIN_CTRL_4, 0x1); 3970 3971 if (CHIP_REV_IS_EMUL(p_dev)) { 3972 tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_ECO_RESERVED); 3973 if (tmp & (1 << 29)) { 3974 DP_NOTICE(p_hwfn, false, 3975 "Emulation: Running on a FULL build\n"); 3976 p_dev->b_is_emul_full = true; 3977 } else { 3978 DP_NOTICE(p_hwfn, false, 3979 "Emulation: Running on a REDUCED build\n"); 3980 } 3981 } 3982 #endif 3983 3984 return ECORE_SUCCESS; 3985 } 3986 3987 #ifndef LINUX_REMOVE 3988 void ecore_prepare_hibernate(struct ecore_dev *p_dev) 3989 { 3990 int j; 3991 3992 if (IS_VF(p_dev)) 3993 return; 3994 3995 for_each_hwfn(p_dev, j) { 3996 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j]; 3997 3998 DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN, 3999 "Mark hw/fw uninitialized\n"); 4000 4001 p_hwfn->hw_init_done = false; 4002 4003 ecore_ptt_invalidate(p_hwfn); 4004 } 4005 } 4006 #endif 4007 4008 static enum _ecore_status_t 4009 ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn, 4010 void OSAL_IOMEM * p_regview, 4011 void OSAL_IOMEM * p_doorbells, 4012 struct ecore_hw_prepare_params *p_params) 4013 { 4014 struct ecore_mdump_retain_data mdump_retain; 4015 struct ecore_dev *p_dev = p_hwfn->p_dev; 4016 struct ecore_mdump_info mdump_info; 4017 enum _ecore_status_t rc = ECORE_SUCCESS; 4018 4019 /* Split PCI bars evenly between hwfns */ 4020 p_hwfn->regview = p_regview; 4021 p_hwfn->doorbells = p_doorbells; 4022 4023 if (IS_VF(p_dev)) 4024 return ecore_vf_hw_prepare(p_hwfn); 4025 4026 /* Validate that chip access is feasible */ 4027 if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) { 4028 DP_ERR(p_hwfn, 4029 "Reading the ME register returns all Fs; Preventing further chip access\n"); 4030 if (p_params->b_relaxed_probe) 4031 p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_ME; 4032 return ECORE_INVAL; 4033 } 4034 4035 get_function_id(p_hwfn); 4036 4037 /* Allocate PTT pool */ 4038 rc = ecore_ptt_pool_alloc(p_hwfn); 4039 if (rc) { 4040 DP_NOTICE(p_hwfn, true, "Failed to prepare hwfn's hw\n"); 4041 if (p_params->b_relaxed_probe) 4042 p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_MEM; 4043 goto err0; 4044 } 4045 4046 /* Allocate the main PTT */ 4047 p_hwfn->p_main_ptt = ecore_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN); 4048 4049 /* First hwfn learns basic information, e.g., number of hwfns */ 4050 if (!p_hwfn->my_id) { 4051 rc = ecore_get_dev_info(p_hwfn, p_hwfn->p_main_ptt); 4052 if (rc != ECORE_SUCCESS) { 4053 if (p_params->b_relaxed_probe) 4054 p_params->p_relaxed_res = 4055 ECORE_HW_PREPARE_FAILED_DEV; 4056 goto err1; 4057 } 4058 } 4059 4060 ecore_hw_hwfn_prepare(p_hwfn); 4061 4062 /* Initialize MCP structure */ 4063 rc = ecore_mcp_cmd_init(p_hwfn, p_hwfn->p_main_ptt); 4064 if (rc) { 4065 DP_NOTICE(p_hwfn, true, "Failed initializing mcp command\n"); 4066 if (p_params->b_relaxed_probe) 4067 p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_MEM; 4068 goto err1; 4069 } 4070 4071 /* Read the device configuration information from the HW and SHMEM */ 4072 rc = ecore_get_hw_info(p_hwfn, p_hwfn->p_main_ptt, 4073 p_params->personality, p_params); 4074 if (rc) { 4075 DP_NOTICE(p_hwfn, true, "Failed to get HW information\n"); 4076 goto err2; 4077 } 4078 4079 /* Sending a mailbox to the MFW should be after ecore_get_hw_info() is 4080 * called, since among others it sets the ports number in an engine. 4081 */ 4082 if (p_params->initiate_pf_flr && IS_LEAD_HWFN(p_hwfn) && 4083 !p_dev->recov_in_prog) { 4084 rc = ecore_mcp_initiate_pf_flr(p_hwfn, p_hwfn->p_main_ptt); 4085 if (rc != ECORE_SUCCESS) 4086 DP_NOTICE(p_hwfn, false, "Failed to initiate PF FLR\n"); 4087 } 4088 4089 /* Check if mdump logs/data are present and update the epoch value */ 4090 if (IS_LEAD_HWFN(p_hwfn)) { 4091 #ifndef ASIC_ONLY 4092 if (!CHIP_REV_IS_EMUL(p_dev)) { 4093 #endif 4094 rc = ecore_mcp_mdump_get_info(p_hwfn, p_hwfn->p_main_ptt, 4095 &mdump_info); 4096 if (rc == ECORE_SUCCESS && mdump_info.num_of_logs) 4097 DP_NOTICE(p_hwfn, false, 4098 "* * * IMPORTANT - HW ERROR register dump captured by device * * *\n"); 4099 4100 rc = ecore_mcp_mdump_get_retain(p_hwfn, p_hwfn->p_main_ptt, 4101 &mdump_retain); 4102 if (rc == ECORE_SUCCESS && mdump_retain.valid) 4103 DP_NOTICE(p_hwfn, false, 4104 "mdump retained data: epoch 0x%08x, pf 0x%x, status 0x%08x\n", 4105 mdump_retain.epoch, mdump_retain.pf, 4106 mdump_retain.status); 4107 4108 ecore_mcp_mdump_set_values(p_hwfn, p_hwfn->p_main_ptt, 4109 p_params->epoch); 4110 #ifndef ASIC_ONLY 4111 } 4112 #endif 4113 } 4114 4115 /* Allocate the init RT array and initialize the init-ops engine */ 4116 rc = ecore_init_alloc(p_hwfn); 4117 if (rc) { 4118 DP_NOTICE(p_hwfn, true, "Failed to allocate the init array\n"); 4119 if (p_params->b_relaxed_probe) 4120 p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_MEM; 4121 goto err2; 4122 } 4123 #ifndef ASIC_ONLY 4124 if (CHIP_REV_IS_FPGA(p_dev)) { 4125 DP_NOTICE(p_hwfn, false, 4126 "FPGA: workaround; Prevent DMAE parities\n"); 4127 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, PCIE_REG_PRTY_MASK_K2_E5, 4128 7); 4129 4130 DP_NOTICE(p_hwfn, false, 4131 "FPGA: workaround: Set VF bar0 size\n"); 4132 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 4133 PGLUE_B_REG_VF_BAR0_SIZE_K2_E5, 4); 4134 } 4135 #endif 4136 4137 return rc; 4138 err2: 4139 if (IS_LEAD_HWFN(p_hwfn)) 4140 ecore_iov_free_hw_info(p_dev); 4141 ecore_mcp_free(p_hwfn); 4142 err1: 4143 ecore_hw_hwfn_free(p_hwfn); 4144 err0: 4145 return rc; 4146 } 4147 4148 enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev, 4149 struct ecore_hw_prepare_params *p_params) 4150 { 4151 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); 4152 enum _ecore_status_t rc; 4153 4154 p_dev->chk_reg_fifo = p_params->chk_reg_fifo; 4155 p_dev->allow_mdump = p_params->allow_mdump; 4156 4157 if (p_params->b_relaxed_probe) 4158 p_params->p_relaxed_res = ECORE_HW_PREPARE_SUCCESS; 4159 4160 /* Store the precompiled init data ptrs */ 4161 if (IS_PF(p_dev)) 4162 ecore_init_iro_array(p_dev); 4163 4164 /* Initialize the first hwfn - will learn number of hwfns */ 4165 rc = ecore_hw_prepare_single(p_hwfn, 4166 p_dev->regview, 4167 p_dev->doorbells, p_params); 4168 if (rc != ECORE_SUCCESS) 4169 return rc; 4170 4171 p_params->personality = p_hwfn->hw_info.personality; 4172 4173 /* initilalize 2nd hwfn if necessary */ 4174 if (ECORE_IS_CMT(p_dev)) { 4175 void OSAL_IOMEM *p_regview, *p_doorbell; 4176 u8 OSAL_IOMEM *addr; 4177 4178 /* adjust bar offset for second engine */ 4179 addr = (u8 OSAL_IOMEM *)p_dev->regview + 4180 ecore_hw_bar_size(p_hwfn, 4181 p_hwfn->p_main_ptt, 4182 BAR_ID_0) / 2; 4183 p_regview = (void OSAL_IOMEM *)addr; 4184 4185 addr = (u8 OSAL_IOMEM *)p_dev->doorbells + 4186 ecore_hw_bar_size(p_hwfn, 4187 p_hwfn->p_main_ptt, 4188 BAR_ID_1) / 2; 4189 p_doorbell = (void OSAL_IOMEM *)addr; 4190 4191 /* prepare second hw function */ 4192 rc = ecore_hw_prepare_single(&p_dev->hwfns[1], p_regview, 4193 p_doorbell, p_params); 4194 4195 /* in case of error, need to free the previously 4196 * initiliazed hwfn 0. 4197 */ 4198 if (rc != ECORE_SUCCESS) { 4199 if (p_params->b_relaxed_probe) 4200 p_params->p_relaxed_res = 4201 ECORE_HW_PREPARE_FAILED_ENG2; 4202 4203 if (IS_PF(p_dev)) { 4204 ecore_init_free(p_hwfn); 4205 ecore_mcp_free(p_hwfn); 4206 ecore_hw_hwfn_free(p_hwfn); 4207 } else { 4208 DP_NOTICE(p_dev, true, 4209 "What do we need to free when VF hwfn1 init fails\n"); 4210 } 4211 return rc; 4212 } 4213 } 4214 4215 return rc; 4216 } 4217 4218 void ecore_hw_remove(struct ecore_dev *p_dev) 4219 { 4220 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); 4221 int i; 4222 4223 if (IS_PF(p_dev)) 4224 ecore_mcp_ov_update_driver_state(p_hwfn, p_hwfn->p_main_ptt, 4225 ECORE_OV_DRIVER_STATE_NOT_LOADED); 4226 4227 for_each_hwfn(p_dev, i) { 4228 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 4229 4230 if (IS_VF(p_dev)) { 4231 ecore_vf_pf_release(p_hwfn); 4232 continue; 4233 } 4234 4235 ecore_init_free(p_hwfn); 4236 ecore_hw_hwfn_free(p_hwfn); 4237 ecore_mcp_free(p_hwfn); 4238 4239 #ifdef CONFIG_ECORE_LOCK_ALLOC 4240 OSAL_MUTEX_DEALLOC(&p_hwfn->dmae_info.mutex); 4241 #endif 4242 } 4243 4244 ecore_iov_free_hw_info(p_dev); 4245 } 4246 4247 static void ecore_chain_free_next_ptr(struct ecore_dev *p_dev, 4248 struct ecore_chain *p_chain) 4249 { 4250 void *p_virt = p_chain->p_virt_addr, *p_virt_next = OSAL_NULL; 4251 dma_addr_t p_phys = p_chain->p_phys_addr, p_phys_next = 0; 4252 struct ecore_chain_next *p_next; 4253 u32 size, i; 4254 4255 if (!p_virt) 4256 return; 4257 4258 size = p_chain->elem_size * p_chain->usable_per_page; 4259 4260 for (i = 0; i < p_chain->page_cnt; i++) { 4261 if (!p_virt) 4262 break; 4263 4264 p_next = (struct ecore_chain_next *)((u8 *)p_virt + size); 4265 p_virt_next = p_next->next_virt; 4266 p_phys_next = HILO_DMA_REGPAIR(p_next->next_phys); 4267 4268 OSAL_DMA_FREE_COHERENT(p_dev, p_virt, p_phys, 4269 ECORE_CHAIN_PAGE_SIZE); 4270 4271 p_virt = p_virt_next; 4272 p_phys = p_phys_next; 4273 } 4274 } 4275 4276 static void ecore_chain_free_single(struct ecore_dev *p_dev, 4277 struct ecore_chain *p_chain) 4278 { 4279 if (!p_chain->p_virt_addr) 4280 return; 4281 4282 OSAL_DMA_FREE_COHERENT(p_dev, p_chain->p_virt_addr, 4283 p_chain->p_phys_addr, ECORE_CHAIN_PAGE_SIZE); 4284 } 4285 4286 static void ecore_chain_free_pbl(struct ecore_dev *p_dev, 4287 struct ecore_chain *p_chain) 4288 { 4289 void **pp_virt_addr_tbl = p_chain->pbl.pp_virt_addr_tbl; 4290 u8 *p_pbl_virt = (u8 *)p_chain->pbl_sp.p_virt_table; 4291 u32 page_cnt = p_chain->page_cnt, i, pbl_size; 4292 4293 if (!pp_virt_addr_tbl) 4294 return; 4295 4296 if (!p_pbl_virt) 4297 goto out; 4298 4299 for (i = 0; i < page_cnt; i++) { 4300 if (!pp_virt_addr_tbl[i]) 4301 break; 4302 4303 OSAL_DMA_FREE_COHERENT(p_dev, pp_virt_addr_tbl[i], 4304 *(dma_addr_t *)p_pbl_virt, 4305 ECORE_CHAIN_PAGE_SIZE); 4306 4307 p_pbl_virt += ECORE_CHAIN_PBL_ENTRY_SIZE; 4308 } 4309 4310 pbl_size = page_cnt * ECORE_CHAIN_PBL_ENTRY_SIZE; 4311 4312 if (!p_chain->b_external_pbl) 4313 OSAL_DMA_FREE_COHERENT(p_dev, p_chain->pbl_sp.p_virt_table, 4314 p_chain->pbl_sp.p_phys_table, pbl_size); 4315 out: 4316 OSAL_VFREE(p_dev, p_chain->pbl.pp_virt_addr_tbl); 4317 } 4318 4319 void ecore_chain_free(struct ecore_dev *p_dev, struct ecore_chain *p_chain) 4320 { 4321 switch (p_chain->mode) { 4322 case ECORE_CHAIN_MODE_NEXT_PTR: 4323 ecore_chain_free_next_ptr(p_dev, p_chain); 4324 break; 4325 case ECORE_CHAIN_MODE_SINGLE: 4326 ecore_chain_free_single(p_dev, p_chain); 4327 break; 4328 case ECORE_CHAIN_MODE_PBL: 4329 ecore_chain_free_pbl(p_dev, p_chain); 4330 break; 4331 } 4332 } 4333 4334 static enum _ecore_status_t 4335 ecore_chain_alloc_sanity_check(struct ecore_dev *p_dev, 4336 enum ecore_chain_cnt_type cnt_type, 4337 osal_size_t elem_size, u32 page_cnt) 4338 { 4339 u64 chain_size = ELEMS_PER_PAGE(elem_size) * page_cnt; 4340 4341 /* The actual chain size can be larger than the maximal possible value 4342 * after rounding up the requested elements number to pages, and after 4343 * taking into acount the unusuable elements (next-ptr elements). 4344 * The size of a "u16" chain can be (U16_MAX + 1) since the chain 4345 * size/capacity fields are of a u32 type. 4346 */ 4347 if ((cnt_type == ECORE_CHAIN_CNT_TYPE_U16 && 4348 chain_size > ((u32)ECORE_U16_MAX + 1)) || 4349 (cnt_type == ECORE_CHAIN_CNT_TYPE_U32 && 4350 chain_size > ECORE_U32_MAX)) { 4351 DP_NOTICE(p_dev, true, 4352 "The actual chain size (0x%lx) is larger than the maximal possible value\n", 4353 (unsigned long)chain_size); 4354 return ECORE_INVAL; 4355 } 4356 4357 return ECORE_SUCCESS; 4358 } 4359 4360 static enum _ecore_status_t 4361 ecore_chain_alloc_next_ptr(struct ecore_dev *p_dev, struct ecore_chain *p_chain) 4362 { 4363 void *p_virt = OSAL_NULL, *p_virt_prev = OSAL_NULL; 4364 dma_addr_t p_phys = 0; 4365 u32 i; 4366 4367 for (i = 0; i < p_chain->page_cnt; i++) { 4368 p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys, 4369 ECORE_CHAIN_PAGE_SIZE); 4370 if (!p_virt) { 4371 DP_NOTICE(p_dev, true, 4372 "Failed to allocate chain memory\n"); 4373 return ECORE_NOMEM; 4374 } 4375 4376 if (i == 0) { 4377 ecore_chain_init_mem(p_chain, p_virt, p_phys); 4378 ecore_chain_reset(p_chain); 4379 } else { 4380 ecore_chain_init_next_ptr_elem(p_chain, p_virt_prev, 4381 p_virt, p_phys); 4382 } 4383 4384 p_virt_prev = p_virt; 4385 } 4386 /* Last page's next element should point to the beginning of the 4387 * chain. 4388 */ 4389 ecore_chain_init_next_ptr_elem(p_chain, p_virt_prev, 4390 p_chain->p_virt_addr, 4391 p_chain->p_phys_addr); 4392 4393 return ECORE_SUCCESS; 4394 } 4395 4396 static enum _ecore_status_t 4397 ecore_chain_alloc_single(struct ecore_dev *p_dev, struct ecore_chain *p_chain) 4398 { 4399 dma_addr_t p_phys = 0; 4400 void *p_virt = OSAL_NULL; 4401 4402 p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys, ECORE_CHAIN_PAGE_SIZE); 4403 if (!p_virt) { 4404 DP_NOTICE(p_dev, true, "Failed to allocate chain memory\n"); 4405 return ECORE_NOMEM; 4406 } 4407 4408 ecore_chain_init_mem(p_chain, p_virt, p_phys); 4409 ecore_chain_reset(p_chain); 4410 4411 return ECORE_SUCCESS; 4412 } 4413 4414 static enum _ecore_status_t 4415 ecore_chain_alloc_pbl(struct ecore_dev *p_dev, 4416 struct ecore_chain *p_chain, 4417 struct ecore_chain_ext_pbl *ext_pbl) 4418 { 4419 u32 page_cnt = p_chain->page_cnt, size, i; 4420 dma_addr_t p_phys = 0, p_pbl_phys = 0; 4421 void **pp_virt_addr_tbl = OSAL_NULL; 4422 u8 *p_pbl_virt = OSAL_NULL; 4423 void *p_virt = OSAL_NULL; 4424 4425 size = page_cnt * sizeof(*pp_virt_addr_tbl); 4426 pp_virt_addr_tbl = (void **)OSAL_VZALLOC(p_dev, size); 4427 if (!pp_virt_addr_tbl) { 4428 DP_NOTICE(p_dev, true, 4429 "Failed to allocate memory for the chain virtual addresses table\n"); 4430 return ECORE_NOMEM; 4431 } 4432 4433 /* The allocation of the PBL table is done with its full size, since it 4434 * is expected to be successive. 4435 * ecore_chain_init_pbl_mem() is called even in a case of an allocation 4436 * failure, since pp_virt_addr_tbl was previously allocated, and it 4437 * should be saved to allow its freeing during the error flow. 4438 */ 4439 size = page_cnt * ECORE_CHAIN_PBL_ENTRY_SIZE; 4440 4441 if (ext_pbl == OSAL_NULL) { 4442 p_pbl_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_pbl_phys, size); 4443 } else { 4444 p_pbl_virt = ext_pbl->p_pbl_virt; 4445 p_pbl_phys = ext_pbl->p_pbl_phys; 4446 p_chain->b_external_pbl = true; 4447 } 4448 4449 ecore_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys, 4450 pp_virt_addr_tbl); 4451 if (!p_pbl_virt) { 4452 DP_NOTICE(p_dev, true, "Failed to allocate chain pbl memory\n"); 4453 return ECORE_NOMEM; 4454 } 4455 4456 for (i = 0; i < page_cnt; i++) { 4457 p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys, 4458 ECORE_CHAIN_PAGE_SIZE); 4459 if (!p_virt) { 4460 DP_NOTICE(p_dev, true, 4461 "Failed to allocate chain memory\n"); 4462 return ECORE_NOMEM; 4463 } 4464 4465 if (i == 0) { 4466 ecore_chain_init_mem(p_chain, p_virt, p_phys); 4467 ecore_chain_reset(p_chain); 4468 } 4469 4470 /* Fill the PBL table with the physical address of the page */ 4471 *(dma_addr_t *)p_pbl_virt = p_phys; 4472 /* Keep the virtual address of the page */ 4473 p_chain->pbl.pp_virt_addr_tbl[i] = p_virt; 4474 4475 p_pbl_virt += ECORE_CHAIN_PBL_ENTRY_SIZE; 4476 } 4477 4478 return ECORE_SUCCESS; 4479 } 4480 4481 enum _ecore_status_t ecore_chain_alloc(struct ecore_dev *p_dev, 4482 enum ecore_chain_use_mode intended_use, 4483 enum ecore_chain_mode mode, 4484 enum ecore_chain_cnt_type cnt_type, 4485 u32 num_elems, osal_size_t elem_size, 4486 struct ecore_chain *p_chain, 4487 struct ecore_chain_ext_pbl *ext_pbl) 4488 { 4489 u32 page_cnt; 4490 enum _ecore_status_t rc = ECORE_SUCCESS; 4491 4492 if (mode == ECORE_CHAIN_MODE_SINGLE) 4493 page_cnt = 1; 4494 else 4495 page_cnt = ECORE_CHAIN_PAGE_CNT(num_elems, elem_size, mode); 4496 4497 rc = ecore_chain_alloc_sanity_check(p_dev, cnt_type, elem_size, 4498 page_cnt); 4499 if (rc) { 4500 DP_NOTICE(p_dev, true, 4501 "Cannot allocate a chain with the given arguments:\n" 4502 "[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu]\n", 4503 intended_use, mode, cnt_type, num_elems, elem_size); 4504 return rc; 4505 } 4506 4507 ecore_chain_init_params(p_chain, page_cnt, (u8)elem_size, intended_use, 4508 mode, cnt_type, p_dev->dp_ctx); 4509 4510 switch (mode) { 4511 case ECORE_CHAIN_MODE_NEXT_PTR: 4512 rc = ecore_chain_alloc_next_ptr(p_dev, p_chain); 4513 break; 4514 case ECORE_CHAIN_MODE_SINGLE: 4515 rc = ecore_chain_alloc_single(p_dev, p_chain); 4516 break; 4517 case ECORE_CHAIN_MODE_PBL: 4518 rc = ecore_chain_alloc_pbl(p_dev, p_chain, ext_pbl); 4519 break; 4520 } 4521 if (rc) 4522 goto nomem; 4523 4524 return ECORE_SUCCESS; 4525 4526 nomem: 4527 ecore_chain_free(p_dev, p_chain); 4528 return rc; 4529 } 4530 4531 enum _ecore_status_t ecore_fw_l2_queue(struct ecore_hwfn *p_hwfn, 4532 u16 src_id, u16 *dst_id) 4533 { 4534 if (src_id >= RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) { 4535 u16 min, max; 4536 4537 min = (u16)RESC_START(p_hwfn, ECORE_L2_QUEUE); 4538 max = min + RESC_NUM(p_hwfn, ECORE_L2_QUEUE); 4539 DP_NOTICE(p_hwfn, true, 4540 "l2_queue id [%d] is not valid, available indices [%d - %d]\n", 4541 src_id, min, max); 4542 4543 return ECORE_INVAL; 4544 } 4545 4546 *dst_id = RESC_START(p_hwfn, ECORE_L2_QUEUE) + src_id; 4547 4548 return ECORE_SUCCESS; 4549 } 4550 4551 enum _ecore_status_t ecore_fw_vport(struct ecore_hwfn *p_hwfn, 4552 u8 src_id, u8 *dst_id) 4553 { 4554 if (src_id >= RESC_NUM(p_hwfn, ECORE_VPORT)) { 4555 u8 min, max; 4556 4557 min = (u8)RESC_START(p_hwfn, ECORE_VPORT); 4558 max = min + RESC_NUM(p_hwfn, ECORE_VPORT); 4559 DP_NOTICE(p_hwfn, true, 4560 "vport id [%d] is not valid, available indices [%d - %d]\n", 4561 src_id, min, max); 4562 4563 return ECORE_INVAL; 4564 } 4565 4566 *dst_id = RESC_START(p_hwfn, ECORE_VPORT) + src_id; 4567 4568 return ECORE_SUCCESS; 4569 } 4570 4571 enum _ecore_status_t ecore_fw_rss_eng(struct ecore_hwfn *p_hwfn, 4572 u8 src_id, u8 *dst_id) 4573 { 4574 if (src_id >= RESC_NUM(p_hwfn, ECORE_RSS_ENG)) { 4575 u8 min, max; 4576 4577 min = (u8)RESC_START(p_hwfn, ECORE_RSS_ENG); 4578 max = min + RESC_NUM(p_hwfn, ECORE_RSS_ENG); 4579 DP_NOTICE(p_hwfn, true, 4580 "rss_eng id [%d] is not valid, available indices [%d - %d]\n", 4581 src_id, min, max); 4582 4583 return ECORE_INVAL; 4584 } 4585 4586 *dst_id = RESC_START(p_hwfn, ECORE_RSS_ENG) + src_id; 4587 4588 return ECORE_SUCCESS; 4589 } 4590 4591 static enum _ecore_status_t 4592 ecore_llh_add_mac_filter_bb_ah(struct ecore_hwfn *p_hwfn, 4593 struct ecore_ptt *p_ptt, u32 high, u32 low, 4594 u32 *p_entry_num) 4595 { 4596 u32 en; 4597 int i; 4598 4599 /* Find a free entry and utilize it */ 4600 for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) { 4601 en = ecore_rd(p_hwfn, p_ptt, 4602 NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + 4603 i * sizeof(u32)); 4604 if (en) 4605 continue; 4606 ecore_wr(p_hwfn, p_ptt, 4607 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4608 2 * i * sizeof(u32), low); 4609 ecore_wr(p_hwfn, p_ptt, 4610 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4611 (2 * i + 1) * sizeof(u32), high); 4612 ecore_wr(p_hwfn, p_ptt, 4613 NIG_REG_LLH_FUNC_FILTER_MODE_BB_K2 + 4614 i * sizeof(u32), 0); 4615 ecore_wr(p_hwfn, p_ptt, 4616 NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_BB_K2 + 4617 i * sizeof(u32), 0); 4618 ecore_wr(p_hwfn, p_ptt, 4619 NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + 4620 i * sizeof(u32), 1); 4621 break; 4622 } 4623 4624 if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) 4625 return ECORE_NORESOURCES; 4626 4627 *p_entry_num = i; 4628 4629 return ECORE_SUCCESS; 4630 } 4631 4632 enum _ecore_status_t ecore_llh_add_mac_filter(struct ecore_hwfn *p_hwfn, 4633 struct ecore_ptt *p_ptt, u8 *p_filter) 4634 { 4635 u32 high, low, entry_num; 4636 enum _ecore_status_t rc = ECORE_SUCCESS; 4637 4638 if (!OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS, 4639 &p_hwfn->p_dev->mf_bits)) 4640 return ECORE_SUCCESS; 4641 4642 high = p_filter[1] | (p_filter[0] << 8); 4643 low = p_filter[5] | (p_filter[4] << 8) | 4644 (p_filter[3] << 16) | (p_filter[2] << 24); 4645 4646 if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev)) 4647 rc = ecore_llh_add_mac_filter_bb_ah(p_hwfn, p_ptt, high, low, 4648 &entry_num); 4649 if (rc != ECORE_SUCCESS) { 4650 DP_NOTICE(p_hwfn, false, 4651 "Failed to find an empty LLH filter to utilize\n"); 4652 return rc; 4653 } 4654 4655 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 4656 "MAC: %02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx is added at %d\n", 4657 p_filter[0], p_filter[1], p_filter[2], p_filter[3], 4658 p_filter[4], p_filter[5], entry_num); 4659 4660 return rc; 4661 } 4662 4663 static enum _ecore_status_t 4664 ecore_llh_remove_mac_filter_bb_ah(struct ecore_hwfn *p_hwfn, 4665 struct ecore_ptt *p_ptt, u32 high, u32 low, 4666 u32 *p_entry_num) 4667 { 4668 int i; 4669 4670 /* Find the entry and clean it */ 4671 for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) { 4672 if (ecore_rd(p_hwfn, p_ptt, 4673 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4674 2 * i * sizeof(u32)) != low) 4675 continue; 4676 if (ecore_rd(p_hwfn, p_ptt, 4677 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4678 (2 * i + 1) * sizeof(u32)) != high) 4679 continue; 4680 4681 ecore_wr(p_hwfn, p_ptt, 4682 NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + i * sizeof(u32), 0); 4683 ecore_wr(p_hwfn, p_ptt, 4684 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4685 2 * i * sizeof(u32), 0); 4686 ecore_wr(p_hwfn, p_ptt, 4687 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4688 (2 * i + 1) * sizeof(u32), 0); 4689 break; 4690 } 4691 4692 if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) 4693 return ECORE_INVAL; 4694 4695 *p_entry_num = i; 4696 4697 return ECORE_SUCCESS; 4698 } 4699 4700 void ecore_llh_remove_mac_filter(struct ecore_hwfn *p_hwfn, 4701 struct ecore_ptt *p_ptt, u8 *p_filter) 4702 { 4703 u32 high, low, entry_num; 4704 enum _ecore_status_t rc = ECORE_SUCCESS; 4705 4706 if (!OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS, 4707 &p_hwfn->p_dev->mf_bits)) 4708 return; 4709 4710 high = p_filter[1] | (p_filter[0] << 8); 4711 low = p_filter[5] | (p_filter[4] << 8) | 4712 (p_filter[3] << 16) | (p_filter[2] << 24); 4713 4714 if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev)) 4715 rc = ecore_llh_remove_mac_filter_bb_ah(p_hwfn, p_ptt, high, 4716 low, &entry_num); 4717 if (rc != ECORE_SUCCESS) { 4718 DP_NOTICE(p_hwfn, false, 4719 "Tried to remove a non-configured filter\n"); 4720 return; 4721 } 4722 4723 4724 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 4725 "MAC: %02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx was removed from %d\n", 4726 p_filter[0], p_filter[1], p_filter[2], p_filter[3], 4727 p_filter[4], p_filter[5], entry_num); 4728 } 4729 4730 static enum _ecore_status_t 4731 ecore_llh_add_protocol_filter_bb_ah(struct ecore_hwfn *p_hwfn, 4732 struct ecore_ptt *p_ptt, 4733 enum ecore_llh_port_filter_type_t type, 4734 u32 high, u32 low, u32 *p_entry_num) 4735 { 4736 u32 en; 4737 int i; 4738 4739 /* Find a free entry and utilize it */ 4740 for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) { 4741 en = ecore_rd(p_hwfn, p_ptt, 4742 NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + 4743 i * sizeof(u32)); 4744 if (en) 4745 continue; 4746 ecore_wr(p_hwfn, p_ptt, 4747 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4748 2 * i * sizeof(u32), low); 4749 ecore_wr(p_hwfn, p_ptt, 4750 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4751 (2 * i + 1) * sizeof(u32), high); 4752 ecore_wr(p_hwfn, p_ptt, 4753 NIG_REG_LLH_FUNC_FILTER_MODE_BB_K2 + 4754 i * sizeof(u32), 1); 4755 ecore_wr(p_hwfn, p_ptt, 4756 NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_BB_K2 + 4757 i * sizeof(u32), 1 << type); 4758 ecore_wr(p_hwfn, p_ptt, 4759 NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + i * sizeof(u32), 1); 4760 break; 4761 } 4762 4763 if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) 4764 return ECORE_NORESOURCES; 4765 4766 *p_entry_num = i; 4767 4768 return ECORE_SUCCESS; 4769 } 4770 4771 enum _ecore_status_t 4772 ecore_llh_add_protocol_filter(struct ecore_hwfn *p_hwfn, 4773 struct ecore_ptt *p_ptt, 4774 u16 source_port_or_eth_type, 4775 u16 dest_port, 4776 enum ecore_llh_port_filter_type_t type) 4777 { 4778 u32 high, low, entry_num; 4779 enum _ecore_status_t rc = ECORE_SUCCESS; 4780 4781 if (!OSAL_TEST_BIT(ECORE_MF_LLH_PROTO_CLSS, 4782 &p_hwfn->p_dev->mf_bits)) 4783 return rc; 4784 4785 high = 0; 4786 low = 0; 4787 4788 switch (type) { 4789 case ECORE_LLH_FILTER_ETHERTYPE: 4790 high = source_port_or_eth_type; 4791 break; 4792 case ECORE_LLH_FILTER_TCP_SRC_PORT: 4793 case ECORE_LLH_FILTER_UDP_SRC_PORT: 4794 low = source_port_or_eth_type << 16; 4795 break; 4796 case ECORE_LLH_FILTER_TCP_DEST_PORT: 4797 case ECORE_LLH_FILTER_UDP_DEST_PORT: 4798 low = dest_port; 4799 break; 4800 case ECORE_LLH_FILTER_TCP_SRC_AND_DEST_PORT: 4801 case ECORE_LLH_FILTER_UDP_SRC_AND_DEST_PORT: 4802 low = (source_port_or_eth_type << 16) | dest_port; 4803 break; 4804 default: 4805 DP_NOTICE(p_hwfn, true, 4806 "Non valid LLH protocol filter type %d\n", type); 4807 return ECORE_INVAL; 4808 } 4809 4810 if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev)) 4811 rc = ecore_llh_add_protocol_filter_bb_ah(p_hwfn, p_ptt, type, 4812 high, low, &entry_num); 4813 if (rc != ECORE_SUCCESS) { 4814 DP_NOTICE(p_hwfn, false, 4815 "Failed to find an empty LLH filter to utilize\n"); 4816 return rc; 4817 } 4818 switch (type) { 4819 case ECORE_LLH_FILTER_ETHERTYPE: 4820 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 4821 "ETH type %x is added at %d\n", 4822 source_port_or_eth_type, entry_num); 4823 break; 4824 case ECORE_LLH_FILTER_TCP_SRC_PORT: 4825 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 4826 "TCP src port %x is added at %d\n", 4827 source_port_or_eth_type, entry_num); 4828 break; 4829 case ECORE_LLH_FILTER_UDP_SRC_PORT: 4830 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 4831 "UDP src port %x is added at %d\n", 4832 source_port_or_eth_type, entry_num); 4833 break; 4834 case ECORE_LLH_FILTER_TCP_DEST_PORT: 4835 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 4836 "TCP dst port %x is added at %d\n", dest_port, 4837 entry_num); 4838 break; 4839 case ECORE_LLH_FILTER_UDP_DEST_PORT: 4840 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 4841 "UDP dst port %x is added at %d\n", dest_port, 4842 entry_num); 4843 break; 4844 case ECORE_LLH_FILTER_TCP_SRC_AND_DEST_PORT: 4845 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 4846 "TCP src/dst ports %x/%x are added at %d\n", 4847 source_port_or_eth_type, dest_port, entry_num); 4848 break; 4849 case ECORE_LLH_FILTER_UDP_SRC_AND_DEST_PORT: 4850 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 4851 "UDP src/dst ports %x/%x are added at %d\n", 4852 source_port_or_eth_type, dest_port, entry_num); 4853 break; 4854 } 4855 4856 return rc; 4857 } 4858 4859 static enum _ecore_status_t 4860 ecore_llh_remove_protocol_filter_bb_ah(struct ecore_hwfn *p_hwfn, 4861 struct ecore_ptt *p_ptt, 4862 enum ecore_llh_port_filter_type_t type, 4863 u32 high, u32 low, u32 *p_entry_num) 4864 { 4865 int i; 4866 4867 /* Find the entry and clean it */ 4868 for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) { 4869 if (!ecore_rd(p_hwfn, p_ptt, 4870 NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + 4871 i * sizeof(u32))) 4872 continue; 4873 if (!ecore_rd(p_hwfn, p_ptt, 4874 NIG_REG_LLH_FUNC_FILTER_MODE_BB_K2 + 4875 i * sizeof(u32))) 4876 continue; 4877 if (!(ecore_rd(p_hwfn, p_ptt, 4878 NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_BB_K2 + 4879 i * sizeof(u32)) & (1 << type))) 4880 continue; 4881 if (ecore_rd(p_hwfn, p_ptt, 4882 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4883 2 * i * sizeof(u32)) != low) 4884 continue; 4885 if (ecore_rd(p_hwfn, p_ptt, 4886 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4887 (2 * i + 1) * sizeof(u32)) != high) 4888 continue; 4889 4890 ecore_wr(p_hwfn, p_ptt, 4891 NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + i * sizeof(u32), 0); 4892 ecore_wr(p_hwfn, p_ptt, 4893 NIG_REG_LLH_FUNC_FILTER_MODE_BB_K2 + 4894 i * sizeof(u32), 0); 4895 ecore_wr(p_hwfn, p_ptt, 4896 NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_BB_K2 + 4897 i * sizeof(u32), 0); 4898 ecore_wr(p_hwfn, p_ptt, 4899 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4900 2 * i * sizeof(u32), 0); 4901 ecore_wr(p_hwfn, p_ptt, 4902 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4903 (2 * i + 1) * sizeof(u32), 0); 4904 break; 4905 } 4906 4907 if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) 4908 return ECORE_INVAL; 4909 4910 *p_entry_num = i; 4911 4912 return ECORE_SUCCESS; 4913 } 4914 4915 void 4916 ecore_llh_remove_protocol_filter(struct ecore_hwfn *p_hwfn, 4917 struct ecore_ptt *p_ptt, 4918 u16 source_port_or_eth_type, 4919 u16 dest_port, 4920 enum ecore_llh_port_filter_type_t type) 4921 { 4922 u32 high, low, entry_num; 4923 enum _ecore_status_t rc = ECORE_SUCCESS; 4924 4925 if (!OSAL_TEST_BIT(ECORE_MF_LLH_PROTO_CLSS, 4926 &p_hwfn->p_dev->mf_bits)) 4927 return; 4928 4929 high = 0; 4930 low = 0; 4931 4932 switch (type) { 4933 case ECORE_LLH_FILTER_ETHERTYPE: 4934 high = source_port_or_eth_type; 4935 break; 4936 case ECORE_LLH_FILTER_TCP_SRC_PORT: 4937 case ECORE_LLH_FILTER_UDP_SRC_PORT: 4938 low = source_port_or_eth_type << 16; 4939 break; 4940 case ECORE_LLH_FILTER_TCP_DEST_PORT: 4941 case ECORE_LLH_FILTER_UDP_DEST_PORT: 4942 low = dest_port; 4943 break; 4944 case ECORE_LLH_FILTER_TCP_SRC_AND_DEST_PORT: 4945 case ECORE_LLH_FILTER_UDP_SRC_AND_DEST_PORT: 4946 low = (source_port_or_eth_type << 16) | dest_port; 4947 break; 4948 default: 4949 DP_NOTICE(p_hwfn, true, 4950 "Non valid LLH protocol filter type %d\n", type); 4951 return; 4952 } 4953 4954 if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev)) 4955 rc = ecore_llh_remove_protocol_filter_bb_ah(p_hwfn, p_ptt, type, 4956 high, low, 4957 &entry_num); 4958 if (rc != ECORE_SUCCESS) { 4959 DP_NOTICE(p_hwfn, false, 4960 "Tried to remove a non-configured filter [type %d, source_port_or_eth_type 0x%x, dest_port 0x%x]\n", 4961 type, source_port_or_eth_type, dest_port); 4962 return; 4963 } 4964 4965 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 4966 "Protocol filter [type %d, source_port_or_eth_type 0x%x, dest_port 0x%x] was removed from %d\n", 4967 type, source_port_or_eth_type, dest_port, entry_num); 4968 } 4969 4970 static void ecore_llh_clear_all_filters_bb_ah(struct ecore_hwfn *p_hwfn, 4971 struct ecore_ptt *p_ptt) 4972 { 4973 int i; 4974 4975 if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn))) 4976 return; 4977 4978 for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) { 4979 ecore_wr(p_hwfn, p_ptt, 4980 NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + 4981 i * sizeof(u32), 0); 4982 ecore_wr(p_hwfn, p_ptt, 4983 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4984 2 * i * sizeof(u32), 0); 4985 ecore_wr(p_hwfn, p_ptt, 4986 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4987 (2 * i + 1) * sizeof(u32), 0); 4988 } 4989 } 4990 4991 void ecore_llh_clear_all_filters(struct ecore_hwfn *p_hwfn, 4992 struct ecore_ptt *p_ptt) 4993 { 4994 if (!OSAL_TEST_BIT(ECORE_MF_LLH_PROTO_CLSS, 4995 &p_hwfn->p_dev->mf_bits) && 4996 !OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS, 4997 &p_hwfn->p_dev->mf_bits)) 4998 return; 4999 5000 if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev)) 5001 ecore_llh_clear_all_filters_bb_ah(p_hwfn, p_ptt); 5002 } 5003 5004 enum _ecore_status_t 5005 ecore_llh_set_function_as_default(struct ecore_hwfn *p_hwfn, 5006 struct ecore_ptt *p_ptt) 5007 { 5008 if (OSAL_TEST_BIT(ECORE_MF_NEED_DEF_PF, &p_hwfn->p_dev->mf_bits)) { 5009 ecore_wr(p_hwfn, p_ptt, 5010 NIG_REG_LLH_TAGMAC_DEF_PF_VECTOR, 5011 1 << p_hwfn->abs_pf_id / 2); 5012 ecore_wr(p_hwfn, p_ptt, PRS_REG_MSG_INFO, 0); 5013 return ECORE_SUCCESS; 5014 } 5015 5016 DP_NOTICE(p_hwfn, false, 5017 "This function can't be set as default\n"); 5018 return ECORE_INVAL; 5019 } 5020 5021 static enum _ecore_status_t ecore_set_coalesce(struct ecore_hwfn *p_hwfn, 5022 struct ecore_ptt *p_ptt, 5023 u32 hw_addr, void *p_eth_qzone, 5024 osal_size_t eth_qzone_size, 5025 u8 timeset) 5026 { 5027 struct coalescing_timeset *p_coal_timeset; 5028 5029 if (p_hwfn->p_dev->int_coalescing_mode != ECORE_COAL_MODE_ENABLE) { 5030 DP_NOTICE(p_hwfn, true, 5031 "Coalescing configuration not enabled\n"); 5032 return ECORE_INVAL; 5033 } 5034 5035 p_coal_timeset = p_eth_qzone; 5036 OSAL_MEMSET(p_eth_qzone, 0, eth_qzone_size); 5037 SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_TIMESET, timeset); 5038 SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_VALID, 1); 5039 ecore_memcpy_to(p_hwfn, p_ptt, hw_addr, p_eth_qzone, eth_qzone_size); 5040 5041 return ECORE_SUCCESS; 5042 } 5043 5044 enum _ecore_status_t ecore_set_queue_coalesce(struct ecore_hwfn *p_hwfn, 5045 u16 rx_coal, u16 tx_coal, 5046 void *p_handle) 5047 { 5048 struct ecore_queue_cid *p_cid = (struct ecore_queue_cid *)p_handle; 5049 enum _ecore_status_t rc = ECORE_SUCCESS; 5050 struct ecore_ptt *p_ptt; 5051 5052 /* TODO - Configuring a single queue's coalescing but 5053 * claiming all queues are abiding same configuration 5054 * for PF and VF both. 5055 */ 5056 5057 if (IS_VF(p_hwfn->p_dev)) 5058 return ecore_vf_pf_set_coalesce(p_hwfn, rx_coal, 5059 tx_coal, p_cid); 5060 5061 p_ptt = ecore_ptt_acquire(p_hwfn); 5062 if (!p_ptt) 5063 return ECORE_AGAIN; 5064 5065 if (rx_coal) { 5066 rc = ecore_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid); 5067 if (rc) 5068 goto out; 5069 p_hwfn->p_dev->rx_coalesce_usecs = rx_coal; 5070 } 5071 5072 if (tx_coal) { 5073 rc = ecore_set_txq_coalesce(p_hwfn, p_ptt, tx_coal, p_cid); 5074 if (rc) 5075 goto out; 5076 p_hwfn->p_dev->tx_coalesce_usecs = tx_coal; 5077 } 5078 out: 5079 ecore_ptt_release(p_hwfn, p_ptt); 5080 5081 return rc; 5082 } 5083 5084 enum _ecore_status_t ecore_set_rxq_coalesce(struct ecore_hwfn *p_hwfn, 5085 struct ecore_ptt *p_ptt, 5086 u16 coalesce, 5087 struct ecore_queue_cid *p_cid) 5088 { 5089 struct ustorm_eth_queue_zone eth_qzone; 5090 u8 timeset, timer_res; 5091 u32 address; 5092 enum _ecore_status_t rc; 5093 5094 /* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */ 5095 if (coalesce <= 0x7F) { 5096 timer_res = 0; 5097 } else if (coalesce <= 0xFF) { 5098 timer_res = 1; 5099 } else if (coalesce <= 0x1FF) { 5100 timer_res = 2; 5101 } else { 5102 DP_ERR(p_hwfn, "Invalid coalesce value - %d\n", coalesce); 5103 return ECORE_INVAL; 5104 } 5105 timeset = (u8)(coalesce >> timer_res); 5106 5107 rc = ecore_int_set_timer_res(p_hwfn, p_ptt, timer_res, 5108 p_cid->sb_igu_id, false); 5109 if (rc != ECORE_SUCCESS) 5110 goto out; 5111 5112 address = BAR0_MAP_REG_USDM_RAM + 5113 USTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id); 5114 5115 rc = ecore_set_coalesce(p_hwfn, p_ptt, address, ð_qzone, 5116 sizeof(struct ustorm_eth_queue_zone), timeset); 5117 if (rc != ECORE_SUCCESS) 5118 goto out; 5119 5120 out: 5121 return rc; 5122 } 5123 5124 enum _ecore_status_t ecore_set_txq_coalesce(struct ecore_hwfn *p_hwfn, 5125 struct ecore_ptt *p_ptt, 5126 u16 coalesce, 5127 struct ecore_queue_cid *p_cid) 5128 { 5129 struct xstorm_eth_queue_zone eth_qzone; 5130 u8 timeset, timer_res; 5131 u32 address; 5132 enum _ecore_status_t rc; 5133 5134 /* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */ 5135 if (coalesce <= 0x7F) { 5136 timer_res = 0; 5137 } else if (coalesce <= 0xFF) { 5138 timer_res = 1; 5139 } else if (coalesce <= 0x1FF) { 5140 timer_res = 2; 5141 } else { 5142 DP_ERR(p_hwfn, "Invalid coalesce value - %d\n", coalesce); 5143 return ECORE_INVAL; 5144 } 5145 5146 timeset = (u8)(coalesce >> timer_res); 5147 5148 rc = ecore_int_set_timer_res(p_hwfn, p_ptt, timer_res, 5149 p_cid->sb_igu_id, true); 5150 if (rc != ECORE_SUCCESS) 5151 goto out; 5152 5153 address = BAR0_MAP_REG_XSDM_RAM + 5154 XSTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id); 5155 5156 rc = ecore_set_coalesce(p_hwfn, p_ptt, address, ð_qzone, 5157 sizeof(struct xstorm_eth_queue_zone), timeset); 5158 out: 5159 return rc; 5160 } 5161 5162 /* Calculate final WFQ values for all vports and configure it. 5163 * After this configuration each vport must have 5164 * approx min rate = vport_wfq * min_pf_rate / ECORE_WFQ_UNIT 5165 */ 5166 static void ecore_configure_wfq_for_all_vports(struct ecore_hwfn *p_hwfn, 5167 struct ecore_ptt *p_ptt, 5168 u32 min_pf_rate) 5169 { 5170 struct init_qm_vport_params *vport_params; 5171 int i; 5172 5173 vport_params = p_hwfn->qm_info.qm_vport_params; 5174 5175 for (i = 0; i < p_hwfn->qm_info.num_vports; i++) { 5176 u32 wfq_speed = p_hwfn->qm_info.wfq_data[i].min_speed; 5177 5178 vport_params[i].vport_wfq = (wfq_speed * ECORE_WFQ_UNIT) / 5179 min_pf_rate; 5180 ecore_init_vport_wfq(p_hwfn, p_ptt, 5181 vport_params[i].first_tx_pq_id, 5182 vport_params[i].vport_wfq); 5183 } 5184 } 5185 5186 static void ecore_init_wfq_default_param(struct ecore_hwfn *p_hwfn) 5187 { 5188 int i; 5189 5190 for (i = 0; i < p_hwfn->qm_info.num_vports; i++) 5191 p_hwfn->qm_info.qm_vport_params[i].vport_wfq = 1; 5192 } 5193 5194 static void ecore_disable_wfq_for_all_vports(struct ecore_hwfn *p_hwfn, 5195 struct ecore_ptt *p_ptt) 5196 { 5197 struct init_qm_vport_params *vport_params; 5198 int i; 5199 5200 vport_params = p_hwfn->qm_info.qm_vport_params; 5201 5202 for (i = 0; i < p_hwfn->qm_info.num_vports; i++) { 5203 ecore_init_wfq_default_param(p_hwfn); 5204 ecore_init_vport_wfq(p_hwfn, p_ptt, 5205 vport_params[i].first_tx_pq_id, 5206 vport_params[i].vport_wfq); 5207 } 5208 } 5209 5210 /* This function performs several validations for WFQ 5211 * configuration and required min rate for a given vport 5212 * 1. req_rate must be greater than one percent of min_pf_rate. 5213 * 2. req_rate should not cause other vports [not configured for WFQ explicitly] 5214 * rates to get less than one percent of min_pf_rate. 5215 * 3. total_req_min_rate [all vports min rate sum] shouldn't exceed min_pf_rate. 5216 */ 5217 static enum _ecore_status_t ecore_init_wfq_param(struct ecore_hwfn *p_hwfn, 5218 u16 vport_id, u32 req_rate, 5219 u32 min_pf_rate) 5220 { 5221 u32 total_req_min_rate = 0, total_left_rate = 0, left_rate_per_vp = 0; 5222 int non_requested_count = 0, req_count = 0, i, num_vports; 5223 5224 num_vports = p_hwfn->qm_info.num_vports; 5225 5226 /* Accounting for the vports which are configured for WFQ explicitly */ 5227 5228 for (i = 0; i < num_vports; i++) { 5229 u32 tmp_speed; 5230 5231 if ((i != vport_id) && p_hwfn->qm_info.wfq_data[i].configured) { 5232 req_count++; 5233 tmp_speed = p_hwfn->qm_info.wfq_data[i].min_speed; 5234 total_req_min_rate += tmp_speed; 5235 } 5236 } 5237 5238 /* Include current vport data as well */ 5239 req_count++; 5240 total_req_min_rate += req_rate; 5241 non_requested_count = num_vports - req_count; 5242 5243 /* validate possible error cases */ 5244 if (req_rate < min_pf_rate / ECORE_WFQ_UNIT) { 5245 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 5246 "Vport [%d] - Requested rate[%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n", 5247 vport_id, req_rate, min_pf_rate); 5248 return ECORE_INVAL; 5249 } 5250 5251 /* TBD - for number of vports greater than 100 */ 5252 if (num_vports > ECORE_WFQ_UNIT) { 5253 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 5254 "Number of vports is greater than %d\n", 5255 ECORE_WFQ_UNIT); 5256 return ECORE_INVAL; 5257 } 5258 5259 if (total_req_min_rate > min_pf_rate) { 5260 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 5261 "Total requested min rate for all vports[%d Mbps] is greater than configured PF min rate[%d Mbps]\n", 5262 total_req_min_rate, min_pf_rate); 5263 return ECORE_INVAL; 5264 } 5265 5266 /* Data left for non requested vports */ 5267 total_left_rate = min_pf_rate - total_req_min_rate; 5268 left_rate_per_vp = total_left_rate / non_requested_count; 5269 5270 /* validate if non requested get < 1% of min bw */ 5271 if (left_rate_per_vp < min_pf_rate / ECORE_WFQ_UNIT) { 5272 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 5273 "Non WFQ configured vports rate [%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n", 5274 left_rate_per_vp, min_pf_rate); 5275 return ECORE_INVAL; 5276 } 5277 5278 /* now req_rate for given vport passes all scenarios. 5279 * assign final wfq rates to all vports. 5280 */ 5281 p_hwfn->qm_info.wfq_data[vport_id].min_speed = req_rate; 5282 p_hwfn->qm_info.wfq_data[vport_id].configured = true; 5283 5284 for (i = 0; i < num_vports; i++) { 5285 if (p_hwfn->qm_info.wfq_data[i].configured) 5286 continue; 5287 5288 p_hwfn->qm_info.wfq_data[i].min_speed = left_rate_per_vp; 5289 } 5290 5291 return ECORE_SUCCESS; 5292 } 5293 5294 static int __ecore_configure_vport_wfq(struct ecore_hwfn *p_hwfn, 5295 struct ecore_ptt *p_ptt, 5296 u16 vp_id, u32 rate) 5297 { 5298 struct ecore_mcp_link_state *p_link; 5299 int rc = ECORE_SUCCESS; 5300 5301 p_link = &p_hwfn->p_dev->hwfns[0].mcp_info->link_output; 5302 5303 if (!p_link->min_pf_rate) { 5304 p_hwfn->qm_info.wfq_data[vp_id].min_speed = rate; 5305 p_hwfn->qm_info.wfq_data[vp_id].configured = true; 5306 return rc; 5307 } 5308 5309 rc = ecore_init_wfq_param(p_hwfn, vp_id, rate, p_link->min_pf_rate); 5310 5311 if (rc == ECORE_SUCCESS) 5312 ecore_configure_wfq_for_all_vports(p_hwfn, p_ptt, 5313 p_link->min_pf_rate); 5314 else 5315 DP_NOTICE(p_hwfn, false, 5316 "Validation failed while configuring min rate\n"); 5317 5318 return rc; 5319 } 5320 5321 static int __ecore_configure_vp_wfq_on_link_change(struct ecore_hwfn *p_hwfn, 5322 struct ecore_ptt *p_ptt, 5323 u32 min_pf_rate) 5324 { 5325 bool use_wfq = false; 5326 int rc = ECORE_SUCCESS; 5327 u16 i; 5328 5329 /* Validate all pre configured vports for wfq */ 5330 for (i = 0; i < p_hwfn->qm_info.num_vports; i++) { 5331 u32 rate; 5332 5333 if (!p_hwfn->qm_info.wfq_data[i].configured) 5334 continue; 5335 5336 rate = p_hwfn->qm_info.wfq_data[i].min_speed; 5337 use_wfq = true; 5338 5339 rc = ecore_init_wfq_param(p_hwfn, i, rate, min_pf_rate); 5340 if (rc != ECORE_SUCCESS) { 5341 DP_NOTICE(p_hwfn, false, 5342 "WFQ validation failed while configuring min rate\n"); 5343 break; 5344 } 5345 } 5346 5347 if (rc == ECORE_SUCCESS && use_wfq) 5348 ecore_configure_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate); 5349 else 5350 ecore_disable_wfq_for_all_vports(p_hwfn, p_ptt); 5351 5352 return rc; 5353 } 5354 5355 /* Main API for ecore clients to configure vport min rate. 5356 * vp_id - vport id in PF Range[0 - (total_num_vports_per_pf - 1)] 5357 * rate - Speed in Mbps needs to be assigned to a given vport. 5358 */ 5359 int ecore_configure_vport_wfq(struct ecore_dev *p_dev, u16 vp_id, u32 rate) 5360 { 5361 int i, rc = ECORE_INVAL; 5362 5363 /* TBD - for multiple hardware functions - that is 100 gig */ 5364 if (ECORE_IS_CMT(p_dev)) { 5365 DP_NOTICE(p_dev, false, 5366 "WFQ configuration is not supported for this device\n"); 5367 return rc; 5368 } 5369 5370 for_each_hwfn(p_dev, i) { 5371 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 5372 struct ecore_ptt *p_ptt; 5373 5374 p_ptt = ecore_ptt_acquire(p_hwfn); 5375 if (!p_ptt) 5376 return ECORE_TIMEOUT; 5377 5378 rc = __ecore_configure_vport_wfq(p_hwfn, p_ptt, vp_id, rate); 5379 5380 if (rc != ECORE_SUCCESS) { 5381 ecore_ptt_release(p_hwfn, p_ptt); 5382 return rc; 5383 } 5384 5385 ecore_ptt_release(p_hwfn, p_ptt); 5386 } 5387 5388 return rc; 5389 } 5390 5391 /* API to configure WFQ from mcp link change */ 5392 void ecore_configure_vp_wfq_on_link_change(struct ecore_dev *p_dev, 5393 struct ecore_ptt *p_ptt, 5394 u32 min_pf_rate) 5395 { 5396 int i; 5397 5398 /* TBD - for multiple hardware functions - that is 100 gig */ 5399 if (ECORE_IS_CMT(p_dev)) { 5400 DP_VERBOSE(p_dev, ECORE_MSG_LINK, 5401 "WFQ configuration is not supported for this device\n"); 5402 return; 5403 } 5404 5405 for_each_hwfn(p_dev, i) { 5406 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 5407 5408 __ecore_configure_vp_wfq_on_link_change(p_hwfn, p_ptt, 5409 min_pf_rate); 5410 } 5411 } 5412 5413 int __ecore_configure_pf_max_bandwidth(struct ecore_hwfn *p_hwfn, 5414 struct ecore_ptt *p_ptt, 5415 struct ecore_mcp_link_state *p_link, 5416 u8 max_bw) 5417 { 5418 int rc = ECORE_SUCCESS; 5419 5420 p_hwfn->mcp_info->func_info.bandwidth_max = max_bw; 5421 5422 if (!p_link->line_speed && (max_bw != 100)) 5423 return rc; 5424 5425 p_link->speed = (p_link->line_speed * max_bw) / 100; 5426 p_hwfn->qm_info.pf_rl = p_link->speed; 5427 5428 /* Since the limiter also affects Tx-switched traffic, we don't want it 5429 * to limit such traffic in case there's no actual limit. 5430 * In that case, set limit to imaginary high boundary. 5431 */ 5432 if (max_bw == 100) 5433 p_hwfn->qm_info.pf_rl = 100000; 5434 5435 rc = ecore_init_pf_rl(p_hwfn, p_ptt, p_hwfn->rel_pf_id, 5436 p_hwfn->qm_info.pf_rl); 5437 5438 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 5439 "Configured MAX bandwidth to be %08x Mb/sec\n", 5440 p_link->speed); 5441 5442 return rc; 5443 } 5444 5445 /* Main API to configure PF max bandwidth where bw range is [1 - 100] */ 5446 int ecore_configure_pf_max_bandwidth(struct ecore_dev *p_dev, u8 max_bw) 5447 { 5448 int i, rc = ECORE_INVAL; 5449 5450 if (max_bw < 1 || max_bw > 100) { 5451 DP_NOTICE(p_dev, false, "PF max bw valid range is [1-100]\n"); 5452 return rc; 5453 } 5454 5455 for_each_hwfn(p_dev, i) { 5456 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 5457 struct ecore_hwfn *p_lead = ECORE_LEADING_HWFN(p_dev); 5458 struct ecore_mcp_link_state *p_link; 5459 struct ecore_ptt *p_ptt; 5460 5461 p_link = &p_lead->mcp_info->link_output; 5462 5463 p_ptt = ecore_ptt_acquire(p_hwfn); 5464 if (!p_ptt) 5465 return ECORE_TIMEOUT; 5466 5467 rc = __ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt, 5468 p_link, max_bw); 5469 5470 ecore_ptt_release(p_hwfn, p_ptt); 5471 5472 if (rc != ECORE_SUCCESS) 5473 break; 5474 } 5475 5476 return rc; 5477 } 5478 5479 int __ecore_configure_pf_min_bandwidth(struct ecore_hwfn *p_hwfn, 5480 struct ecore_ptt *p_ptt, 5481 struct ecore_mcp_link_state *p_link, 5482 u8 min_bw) 5483 { 5484 int rc = ECORE_SUCCESS; 5485 5486 p_hwfn->mcp_info->func_info.bandwidth_min = min_bw; 5487 p_hwfn->qm_info.pf_wfq = min_bw; 5488 5489 if (!p_link->line_speed) 5490 return rc; 5491 5492 p_link->min_pf_rate = (p_link->line_speed * min_bw) / 100; 5493 5494 rc = ecore_init_pf_wfq(p_hwfn, p_ptt, p_hwfn->rel_pf_id, min_bw); 5495 5496 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 5497 "Configured MIN bandwidth to be %d Mb/sec\n", 5498 p_link->min_pf_rate); 5499 5500 return rc; 5501 } 5502 5503 /* Main API to configure PF min bandwidth where bw range is [1-100] */ 5504 int ecore_configure_pf_min_bandwidth(struct ecore_dev *p_dev, u8 min_bw) 5505 { 5506 int i, rc = ECORE_INVAL; 5507 5508 if (min_bw < 1 || min_bw > 100) { 5509 DP_NOTICE(p_dev, false, "PF min bw valid range is [1-100]\n"); 5510 return rc; 5511 } 5512 5513 for_each_hwfn(p_dev, i) { 5514 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 5515 struct ecore_hwfn *p_lead = ECORE_LEADING_HWFN(p_dev); 5516 struct ecore_mcp_link_state *p_link; 5517 struct ecore_ptt *p_ptt; 5518 5519 p_link = &p_lead->mcp_info->link_output; 5520 5521 p_ptt = ecore_ptt_acquire(p_hwfn); 5522 if (!p_ptt) 5523 return ECORE_TIMEOUT; 5524 5525 rc = __ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt, 5526 p_link, min_bw); 5527 if (rc != ECORE_SUCCESS) { 5528 ecore_ptt_release(p_hwfn, p_ptt); 5529 return rc; 5530 } 5531 5532 if (p_link->min_pf_rate) { 5533 u32 min_rate = p_link->min_pf_rate; 5534 5535 rc = __ecore_configure_vp_wfq_on_link_change(p_hwfn, 5536 p_ptt, 5537 min_rate); 5538 } 5539 5540 ecore_ptt_release(p_hwfn, p_ptt); 5541 } 5542 5543 return rc; 5544 } 5545 5546 void ecore_clean_wfq_db(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) 5547 { 5548 struct ecore_mcp_link_state *p_link; 5549 5550 p_link = &p_hwfn->mcp_info->link_output; 5551 5552 if (p_link->min_pf_rate) 5553 ecore_disable_wfq_for_all_vports(p_hwfn, p_ptt); 5554 5555 OSAL_MEMSET(p_hwfn->qm_info.wfq_data, 0, 5556 sizeof(*p_hwfn->qm_info.wfq_data) * 5557 p_hwfn->qm_info.num_vports); 5558 } 5559 5560 int ecore_device_num_engines(struct ecore_dev *p_dev) 5561 { 5562 return ECORE_IS_BB(p_dev) ? 2 : 1; 5563 } 5564 5565 int ecore_device_num_ports(struct ecore_dev *p_dev) 5566 { 5567 return p_dev->num_ports; 5568 } 5569 5570 void ecore_set_fw_mac_addr(__le16 *fw_msb, 5571 __le16 *fw_mid, 5572 __le16 *fw_lsb, 5573 u8 *mac) 5574 { 5575 ((u8 *)fw_msb)[0] = mac[1]; 5576 ((u8 *)fw_msb)[1] = mac[0]; 5577 ((u8 *)fw_mid)[0] = mac[3]; 5578 ((u8 *)fw_mid)[1] = mac[2]; 5579 ((u8 *)fw_lsb)[0] = mac[5]; 5580 ((u8 *)fw_lsb)[1] = mac[4]; 5581 } 5582