1 /*- 2 * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved. 3 * 4 * Eric Davis <edavis@broadcom.com> 5 * David Christensen <davidch@broadcom.com> 6 * Gary Zambrano <zambrano@broadcom.com> 7 * 8 * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. 9 * Copyright (c) 2015 QLogic Corporation. 10 * All rights reserved. 11 * www.qlogic.com 12 * 13 * See LICENSE.bnx2x_pmd for copyright and licensing details. 14 */ 15 16 #include "bnx2x.h" 17 #include "ecore_init.h" 18 19 /**** Exe Queue interfaces ****/ 20 21 /** 22 * ecore_exe_queue_init - init the Exe Queue object 23 * 24 * @o: pointer to the object 25 * @exe_len: length 26 * @owner: pointer to the owner 27 * @validate: validate function pointer 28 * @optimize: optimize function pointer 29 * @exec: execute function pointer 30 * @get: get function pointer 31 */ 32 static void 33 ecore_exe_queue_init(struct bnx2x_softc *sc __rte_unused, 34 struct ecore_exe_queue_obj *o, 35 int exe_len, 36 union ecore_qable_obj *owner, 37 exe_q_validate validate, 38 exe_q_remove remove, 39 exe_q_optimize optimize, exe_q_execute exec, exe_q_get get) 40 { 41 ECORE_MEMSET(o, 0, sizeof(*o)); 42 43 ECORE_LIST_INIT(&o->exe_queue); 44 ECORE_LIST_INIT(&o->pending_comp); 45 46 ECORE_SPIN_LOCK_INIT(&o->lock, sc); 47 48 o->exe_chunk_len = exe_len; 49 o->owner = owner; 50 51 /* Owner specific callbacks */ 52 o->validate = validate; 53 o->remove = remove; 54 o->optimize = optimize; 55 o->execute = exec; 56 o->get = get; 57 58 ECORE_MSG("Setup the execution queue with the chunk length of %d", 59 exe_len); 60 } 61 62 static void ecore_exe_queue_free_elem(struct bnx2x_softc *sc __rte_unused, 63 struct ecore_exeq_elem *elem) 64 { 65 ECORE_MSG("Deleting an exe_queue element"); 66 ECORE_FREE(sc, elem, sizeof(*elem)); 67 } 68 69 static inline int ecore_exe_queue_length(struct ecore_exe_queue_obj *o) 70 { 71 struct ecore_exeq_elem *elem; 72 int cnt = 0; 73 74 ECORE_SPIN_LOCK_BH(&o->lock); 75 76 ECORE_LIST_FOR_EACH_ENTRY(elem, &o->exe_queue, link, 77 struct ecore_exeq_elem) cnt++; 78 79 ECORE_SPIN_UNLOCK_BH(&o->lock); 80 81 return cnt; 82 } 83 84 /** 85 * ecore_exe_queue_add - add a new element to the execution queue 86 * 87 * @sc: driver handle 88 * @o: queue 89 * @cmd: new command to add 90 * @restore: true - do not optimize the command 91 * 92 * If the element is optimized or is illegal, frees it. 93 */ 94 static int ecore_exe_queue_add(struct bnx2x_softc *sc, 95 struct ecore_exe_queue_obj *o, 96 struct ecore_exeq_elem *elem, int restore) 97 { 98 int rc; 99 100 ECORE_SPIN_LOCK_BH(&o->lock); 101 102 if (!restore) { 103 /* Try to cancel this element queue */ 104 rc = o->optimize(sc, o->owner, elem); 105 if (rc) 106 goto free_and_exit; 107 108 /* Check if this request is ok */ 109 rc = o->validate(sc, o->owner, elem); 110 if (rc) { 111 ECORE_MSG("Preamble failed: %d", rc); 112 goto free_and_exit; 113 } 114 } 115 116 /* If so, add it to the execution queue */ 117 ECORE_LIST_PUSH_TAIL(&elem->link, &o->exe_queue); 118 119 ECORE_SPIN_UNLOCK_BH(&o->lock); 120 121 return ECORE_SUCCESS; 122 123 free_and_exit: 124 ecore_exe_queue_free_elem(sc, elem); 125 126 ECORE_SPIN_UNLOCK_BH(&o->lock); 127 128 return rc; 129 } 130 131 static void __ecore_exe_queue_reset_pending(struct bnx2x_softc *sc, struct ecore_exe_queue_obj 132 *o) 133 { 134 struct ecore_exeq_elem *elem; 135 136 while (!ECORE_LIST_IS_EMPTY(&o->pending_comp)) { 137 elem = ECORE_LIST_FIRST_ENTRY(&o->pending_comp, 138 struct ecore_exeq_elem, link); 139 140 ECORE_LIST_REMOVE_ENTRY(&elem->link, &o->pending_comp); 141 ecore_exe_queue_free_elem(sc, elem); 142 } 143 } 144 145 static inline void ecore_exe_queue_reset_pending(struct bnx2x_softc *sc, 146 struct ecore_exe_queue_obj *o) 147 { 148 ECORE_SPIN_LOCK_BH(&o->lock); 149 150 __ecore_exe_queue_reset_pending(sc, o); 151 152 ECORE_SPIN_UNLOCK_BH(&o->lock); 153 } 154 155 /** 156 * ecore_exe_queue_step - execute one execution chunk atomically 157 * 158 * @sc: driver handle 159 * @o: queue 160 * @ramrod_flags: flags 161 * 162 * (Should be called while holding the exe_queue->lock). 163 */ 164 static int ecore_exe_queue_step(struct bnx2x_softc *sc, 165 struct ecore_exe_queue_obj *o, 166 unsigned long *ramrod_flags) 167 { 168 struct ecore_exeq_elem *elem, spacer; 169 int cur_len = 0, rc; 170 171 ECORE_MEMSET(&spacer, 0, sizeof(spacer)); 172 173 /* Next step should not be performed until the current is finished, 174 * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to 175 * properly clear object internals without sending any command to the FW 176 * which also implies there won't be any completion to clear the 177 * 'pending' list. 178 */ 179 if (!ECORE_LIST_IS_EMPTY(&o->pending_comp)) { 180 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags)) { 181 ECORE_MSG 182 ("RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list"); 183 __ecore_exe_queue_reset_pending(sc, o); 184 } else { 185 return ECORE_PENDING; 186 } 187 } 188 189 /* Run through the pending commands list and create a next 190 * execution chunk. 191 */ 192 while (!ECORE_LIST_IS_EMPTY(&o->exe_queue)) { 193 elem = ECORE_LIST_FIRST_ENTRY(&o->exe_queue, 194 struct ecore_exeq_elem, link); 195 ECORE_DBG_BREAK_IF(!elem->cmd_len); 196 197 if (cur_len + elem->cmd_len <= o->exe_chunk_len) { 198 cur_len += elem->cmd_len; 199 /* Prevent from both lists being empty when moving an 200 * element. This will allow the call of 201 * ecore_exe_queue_empty() without locking. 202 */ 203 ECORE_LIST_PUSH_TAIL(&spacer.link, &o->pending_comp); 204 mb(); 205 ECORE_LIST_REMOVE_ENTRY(&elem->link, &o->exe_queue); 206 ECORE_LIST_PUSH_TAIL(&elem->link, &o->pending_comp); 207 ECORE_LIST_REMOVE_ENTRY(&spacer.link, &o->pending_comp); 208 } else 209 break; 210 } 211 212 /* Sanity check */ 213 if (!cur_len) 214 return ECORE_SUCCESS; 215 216 rc = o->execute(sc, o->owner, &o->pending_comp, ramrod_flags); 217 if (rc < 0) 218 /* In case of an error return the commands back to the queue 219 * and reset the pending_comp. 220 */ 221 ECORE_LIST_SPLICE_INIT(&o->pending_comp, &o->exe_queue); 222 else if (!rc) 223 /* If zero is returned, means there are no outstanding pending 224 * completions and we may dismiss the pending list. 225 */ 226 __ecore_exe_queue_reset_pending(sc, o); 227 228 return rc; 229 } 230 231 static inline int ecore_exe_queue_empty(struct ecore_exe_queue_obj *o) 232 { 233 int empty = ECORE_LIST_IS_EMPTY(&o->exe_queue); 234 235 /* Don't reorder!!! */ 236 mb(); 237 238 return empty && ECORE_LIST_IS_EMPTY(&o->pending_comp); 239 } 240 241 static struct ecore_exeq_elem *ecore_exe_queue_alloc_elem(struct 242 bnx2x_softc *sc 243 __rte_unused) 244 { 245 ECORE_MSG("Allocating a new exe_queue element"); 246 return ECORE_ZALLOC(sizeof(struct ecore_exeq_elem), GFP_ATOMIC, sc); 247 } 248 249 /************************ raw_obj functions ***********************************/ 250 static int ecore_raw_check_pending(struct ecore_raw_obj *o) 251 { 252 /* 253 * !! converts the value returned by ECORE_TEST_BIT such that it 254 * is guaranteed not to be truncated regardless of int definition. 255 * 256 * Note we cannot simply define the function's return value type 257 * to match the type returned by ECORE_TEST_BIT, as it varies by 258 * platform/implementation. 259 */ 260 261 return ! !ECORE_TEST_BIT(o->state, o->pstate); 262 } 263 264 static void ecore_raw_clear_pending(struct ecore_raw_obj *o) 265 { 266 ECORE_SMP_MB_BEFORE_CLEAR_BIT(); 267 ECORE_CLEAR_BIT(o->state, o->pstate); 268 ECORE_SMP_MB_AFTER_CLEAR_BIT(); 269 } 270 271 static void ecore_raw_set_pending(struct ecore_raw_obj *o) 272 { 273 ECORE_SMP_MB_BEFORE_CLEAR_BIT(); 274 ECORE_SET_BIT(o->state, o->pstate); 275 ECORE_SMP_MB_AFTER_CLEAR_BIT(); 276 } 277 278 /** 279 * ecore_state_wait - wait until the given bit(state) is cleared 280 * 281 * @sc: device handle 282 * @state: state which is to be cleared 283 * @state_p: state buffer 284 * 285 */ 286 static int ecore_state_wait(struct bnx2x_softc *sc, int state, 287 unsigned long *pstate) 288 { 289 /* can take a while if any port is running */ 290 int cnt = 5000; 291 292 if (CHIP_REV_IS_EMUL(sc)) 293 cnt *= 20; 294 295 ECORE_MSG("waiting for state to become %d", state); 296 297 ECORE_MIGHT_SLEEP(); 298 while (cnt--) { 299 bnx2x_intr_legacy(sc, 1); 300 if (!ECORE_TEST_BIT(state, pstate)) { 301 #ifdef ECORE_STOP_ON_ERROR 302 ECORE_MSG("exit (cnt %d)", 5000 - cnt); 303 #endif 304 return ECORE_SUCCESS; 305 } 306 307 ECORE_WAIT(sc, delay_us); 308 309 if (sc->panic) 310 return ECORE_IO; 311 } 312 313 /* timeout! */ 314 PMD_DRV_LOG(ERR, "timeout waiting for state %d", state); 315 #ifdef ECORE_STOP_ON_ERROR 316 ecore_panic(); 317 #endif 318 319 return ECORE_TIMEOUT; 320 } 321 322 static int ecore_raw_wait(struct bnx2x_softc *sc, struct ecore_raw_obj *raw) 323 { 324 return ecore_state_wait(sc, raw->state, raw->pstate); 325 } 326 327 /***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/ 328 /* credit handling callbacks */ 329 static int ecore_get_cam_offset_mac(struct ecore_vlan_mac_obj *o, int *offset) 330 { 331 struct ecore_credit_pool_obj *mp = o->macs_pool; 332 333 ECORE_DBG_BREAK_IF(!mp); 334 335 return mp->get_entry(mp, offset); 336 } 337 338 static int ecore_get_credit_mac(struct ecore_vlan_mac_obj *o) 339 { 340 struct ecore_credit_pool_obj *mp = o->macs_pool; 341 342 ECORE_DBG_BREAK_IF(!mp); 343 344 return mp->get(mp, 1); 345 } 346 347 static int ecore_put_cam_offset_mac(struct ecore_vlan_mac_obj *o, int offset) 348 { 349 struct ecore_credit_pool_obj *mp = o->macs_pool; 350 351 return mp->put_entry(mp, offset); 352 } 353 354 static int ecore_put_credit_mac(struct ecore_vlan_mac_obj *o) 355 { 356 struct ecore_credit_pool_obj *mp = o->macs_pool; 357 358 return mp->put(mp, 1); 359 } 360 361 /** 362 * __ecore_vlan_mac_h_write_trylock - try getting the writer lock on vlan mac 363 * head list. 364 * 365 * @sc: device handle 366 * @o: vlan_mac object 367 * 368 * @details: Non-blocking implementation; should be called under execution 369 * queue lock. 370 */ 371 static int __ecore_vlan_mac_h_write_trylock(struct bnx2x_softc *sc __rte_unused, 372 struct ecore_vlan_mac_obj *o) 373 { 374 if (o->head_reader) { 375 ECORE_MSG("vlan_mac_lock writer - There are readers; Busy"); 376 return ECORE_BUSY; 377 } 378 379 ECORE_MSG("vlan_mac_lock writer - Taken"); 380 return ECORE_SUCCESS; 381 } 382 383 /** 384 * __ecore_vlan_mac_h_exec_pending - execute step instead of a previous step 385 * which wasn't able to run due to a taken lock on vlan mac head list. 386 * 387 * @sc: device handle 388 * @o: vlan_mac object 389 * 390 * @details Should be called under execution queue lock; notice it might release 391 * and reclaim it during its run. 392 */ 393 static void __ecore_vlan_mac_h_exec_pending(struct bnx2x_softc *sc, 394 struct ecore_vlan_mac_obj *o) 395 { 396 int rc; 397 unsigned long ramrod_flags = o->saved_ramrod_flags; 398 399 ECORE_MSG("vlan_mac_lock execute pending command with ramrod flags %lu", 400 ramrod_flags); 401 o->head_exe_request = FALSE; 402 o->saved_ramrod_flags = 0; 403 rc = ecore_exe_queue_step(sc, &o->exe_queue, &ramrod_flags); 404 if (rc != ECORE_SUCCESS) { 405 PMD_DRV_LOG(ERR, 406 "execution of pending commands failed with rc %d", 407 rc); 408 #ifdef ECORE_STOP_ON_ERROR 409 ecore_panic(); 410 #endif 411 } 412 } 413 414 /** 415 * __ecore_vlan_mac_h_pend - Pend an execution step which couldn't have been 416 * called due to vlan mac head list lock being taken. 417 * 418 * @sc: device handle 419 * @o: vlan_mac object 420 * @ramrod_flags: ramrod flags of missed execution 421 * 422 * @details Should be called under execution queue lock. 423 */ 424 static void __ecore_vlan_mac_h_pend(struct bnx2x_softc *sc __rte_unused, 425 struct ecore_vlan_mac_obj *o, 426 unsigned long ramrod_flags) 427 { 428 o->head_exe_request = TRUE; 429 o->saved_ramrod_flags = ramrod_flags; 430 ECORE_MSG("Placing pending execution with ramrod flags %lu", 431 ramrod_flags); 432 } 433 434 /** 435 * __ecore_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock 436 * 437 * @sc: device handle 438 * @o: vlan_mac object 439 * 440 * @details Should be called under execution queue lock. Notice if a pending 441 * execution exists, it would perform it - possibly releasing and 442 * reclaiming the execution queue lock. 443 */ 444 static void __ecore_vlan_mac_h_write_unlock(struct bnx2x_softc *sc, 445 struct ecore_vlan_mac_obj *o) 446 { 447 /* It's possible a new pending execution was added since this writer 448 * executed. If so, execute again. [Ad infinitum] 449 */ 450 while (o->head_exe_request) { 451 ECORE_MSG 452 ("vlan_mac_lock - writer release encountered a pending request"); 453 __ecore_vlan_mac_h_exec_pending(sc, o); 454 } 455 } 456 457 /** 458 * ecore_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock 459 * 460 * @sc: device handle 461 * @o: vlan_mac object 462 * 463 * @details Notice if a pending execution exists, it would perform it - 464 * possibly releasing and reclaiming the execution queue lock. 465 */ 466 void ecore_vlan_mac_h_write_unlock(struct bnx2x_softc *sc, 467 struct ecore_vlan_mac_obj *o) 468 { 469 ECORE_SPIN_LOCK_BH(&o->exe_queue.lock); 470 __ecore_vlan_mac_h_write_unlock(sc, o); 471 ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock); 472 } 473 474 /** 475 * __ecore_vlan_mac_h_read_lock - lock the vlan mac head list reader lock 476 * 477 * @sc: device handle 478 * @o: vlan_mac object 479 * 480 * @details Should be called under the execution queue lock. May sleep. May 481 * release and reclaim execution queue lock during its run. 482 */ 483 static int __ecore_vlan_mac_h_read_lock(struct bnx2x_softc *sc __rte_unused, 484 struct ecore_vlan_mac_obj *o) 485 { 486 /* If we got here, we're holding lock --> no WRITER exists */ 487 o->head_reader++; 488 ECORE_MSG("vlan_mac_lock - locked reader - number %d", o->head_reader); 489 490 return ECORE_SUCCESS; 491 } 492 493 /** 494 * ecore_vlan_mac_h_read_lock - lock the vlan mac head list reader lock 495 * 496 * @sc: device handle 497 * @o: vlan_mac object 498 * 499 * @details May sleep. Claims and releases execution queue lock during its run. 500 */ 501 static int ecore_vlan_mac_h_read_lock(struct bnx2x_softc *sc, 502 struct ecore_vlan_mac_obj *o) 503 { 504 int rc; 505 506 ECORE_SPIN_LOCK_BH(&o->exe_queue.lock); 507 rc = __ecore_vlan_mac_h_read_lock(sc, o); 508 ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock); 509 510 return rc; 511 } 512 513 /** 514 * __ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock 515 * 516 * @sc: device handle 517 * @o: vlan_mac object 518 * 519 * @details Should be called under execution queue lock. Notice if a pending 520 * execution exists, it would be performed if this was the last 521 * reader. possibly releasing and reclaiming the execution queue lock. 522 */ 523 static void __ecore_vlan_mac_h_read_unlock(struct bnx2x_softc *sc, 524 struct ecore_vlan_mac_obj *o) 525 { 526 if (!o->head_reader) { 527 PMD_DRV_LOG(ERR, 528 "Need to release vlan mac reader lock, but lock isn't taken"); 529 #ifdef ECORE_STOP_ON_ERROR 530 ecore_panic(); 531 #endif 532 } else { 533 o->head_reader--; 534 PMD_DRV_LOG(INFO, 535 "vlan_mac_lock - decreased readers to %d", 536 o->head_reader); 537 } 538 539 /* It's possible a new pending execution was added, and that this reader 540 * was last - if so we need to execute the command. 541 */ 542 if (!o->head_reader && o->head_exe_request) { 543 PMD_DRV_LOG(INFO, 544 "vlan_mac_lock - reader release encountered a pending request"); 545 546 /* Writer release will do the trick */ 547 __ecore_vlan_mac_h_write_unlock(sc, o); 548 } 549 } 550 551 /** 552 * ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock 553 * 554 * @sc: device handle 555 * @o: vlan_mac object 556 * 557 * @details Notice if a pending execution exists, it would be performed if this 558 * was the last reader. Claims and releases the execution queue lock 559 * during its run. 560 */ 561 void ecore_vlan_mac_h_read_unlock(struct bnx2x_softc *sc, 562 struct ecore_vlan_mac_obj *o) 563 { 564 ECORE_SPIN_LOCK_BH(&o->exe_queue.lock); 565 __ecore_vlan_mac_h_read_unlock(sc, o); 566 ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock); 567 } 568 569 /** 570 * ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock 571 * 572 * @sc: device handle 573 * @o: vlan_mac object 574 * @n: number of elements to get 575 * @base: base address for element placement 576 * @stride: stride between elements (in bytes) 577 */ 578 static int ecore_get_n_elements(struct bnx2x_softc *sc, 579 struct ecore_vlan_mac_obj *o, int n, 580 uint8_t * base, uint8_t stride, uint8_t size) 581 { 582 struct ecore_vlan_mac_registry_elem *pos; 583 uint8_t *next = base; 584 int counter = 0, read_lock; 585 586 ECORE_MSG("get_n_elements - taking vlan_mac_lock (reader)"); 587 read_lock = ecore_vlan_mac_h_read_lock(sc, o); 588 if (read_lock != ECORE_SUCCESS) 589 PMD_DRV_LOG(ERR, 590 "get_n_elements failed to get vlan mac reader lock; Access without lock"); 591 592 /* traverse list */ 593 ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link, 594 struct ecore_vlan_mac_registry_elem) { 595 if (counter < n) { 596 ECORE_MEMCPY(next, &pos->u, size); 597 counter++; 598 ECORE_MSG 599 ("copied element number %d to address %p element was:", 600 counter, next); 601 next += stride + size; 602 } 603 } 604 605 if (read_lock == ECORE_SUCCESS) { 606 ECORE_MSG("get_n_elements - releasing vlan_mac_lock (reader)"); 607 ecore_vlan_mac_h_read_unlock(sc, o); 608 } 609 610 return counter * ETH_ALEN; 611 } 612 613 /* check_add() callbacks */ 614 static int ecore_check_mac_add(struct bnx2x_softc *sc __rte_unused, 615 struct ecore_vlan_mac_obj *o, 616 union ecore_classification_ramrod_data *data) 617 { 618 struct ecore_vlan_mac_registry_elem *pos; 619 620 ECORE_MSG("Checking MAC %02x:%02x:%02x:%02x:%02x:%02x for ADD command", 621 data->mac.mac[0], data->mac.mac[1], data->mac.mac[2], 622 data->mac.mac[3], data->mac.mac[4], data->mac.mac[5]); 623 624 if (!ECORE_IS_VALID_ETHER_ADDR(data->mac.mac)) 625 return ECORE_INVAL; 626 627 /* Check if a requested MAC already exists */ 628 ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link, 629 struct ecore_vlan_mac_registry_elem) 630 if (!ECORE_MEMCMP(data->mac.mac, pos->u.mac.mac, ETH_ALEN) && 631 (data->mac.is_inner_mac == pos->u.mac.is_inner_mac)) 632 return ECORE_EXISTS; 633 634 return ECORE_SUCCESS; 635 } 636 637 /* check_del() callbacks */ 638 static struct ecore_vlan_mac_registry_elem *ecore_check_mac_del(struct bnx2x_softc 639 *sc 640 __rte_unused, 641 struct 642 ecore_vlan_mac_obj 643 *o, union 644 ecore_classification_ramrod_data 645 *data) 646 { 647 struct ecore_vlan_mac_registry_elem *pos; 648 649 ECORE_MSG("Checking MAC %02x:%02x:%02x:%02x:%02x:%02x for DEL command", 650 data->mac.mac[0], data->mac.mac[1], data->mac.mac[2], 651 data->mac.mac[3], data->mac.mac[4], data->mac.mac[5]); 652 653 ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link, 654 struct ecore_vlan_mac_registry_elem) 655 if ((!ECORE_MEMCMP(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) && 656 (data->mac.is_inner_mac == pos->u.mac.is_inner_mac)) 657 return pos; 658 659 return NULL; 660 } 661 662 /* check_move() callback */ 663 static int ecore_check_move(struct bnx2x_softc *sc, 664 struct ecore_vlan_mac_obj *src_o, 665 struct ecore_vlan_mac_obj *dst_o, 666 union ecore_classification_ramrod_data *data) 667 { 668 struct ecore_vlan_mac_registry_elem *pos; 669 int rc; 670 671 /* Check if we can delete the requested configuration from the first 672 * object. 673 */ 674 pos = src_o->check_del(sc, src_o, data); 675 676 /* check if configuration can be added */ 677 rc = dst_o->check_add(sc, dst_o, data); 678 679 /* If this classification can not be added (is already set) 680 * or can't be deleted - return an error. 681 */ 682 if (rc || !pos) 683 return FALSE; 684 685 return TRUE; 686 } 687 688 static int ecore_check_move_always_err(__rte_unused struct bnx2x_softc *sc, 689 __rte_unused struct ecore_vlan_mac_obj 690 *src_o, __rte_unused struct ecore_vlan_mac_obj 691 *dst_o, __rte_unused union 692 ecore_classification_ramrod_data *data) 693 { 694 return FALSE; 695 } 696 697 static uint8_t ecore_vlan_mac_get_rx_tx_flag(struct ecore_vlan_mac_obj 698 *o) 699 { 700 struct ecore_raw_obj *raw = &o->raw; 701 uint8_t rx_tx_flag = 0; 702 703 if ((raw->obj_type == ECORE_OBJ_TYPE_TX) || 704 (raw->obj_type == ECORE_OBJ_TYPE_RX_TX)) 705 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD; 706 707 if ((raw->obj_type == ECORE_OBJ_TYPE_RX) || 708 (raw->obj_type == ECORE_OBJ_TYPE_RX_TX)) 709 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD; 710 711 return rx_tx_flag; 712 } 713 714 static void ecore_set_mac_in_nig(struct bnx2x_softc *sc, 715 int add, unsigned char *dev_addr, int index) 716 { 717 uint32_t wb_data[2]; 718 uint32_t reg_offset = ECORE_PORT_ID(sc) ? NIG_REG_LLH1_FUNC_MEM : 719 NIG_REG_LLH0_FUNC_MEM; 720 721 if (!ECORE_IS_MF_SI_MODE(sc) && !IS_MF_AFEX(sc)) 722 return; 723 724 if (index > ECORE_LLH_CAM_MAX_PF_LINE) 725 return; 726 727 ECORE_MSG("Going to %s LLH configuration at entry %d", 728 (add ? "ADD" : "DELETE"), index); 729 730 if (add) { 731 /* LLH_FUNC_MEM is a uint64_t WB register */ 732 reg_offset += 8 * index; 733 734 wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) | 735 (dev_addr[4] << 8) | dev_addr[5]); 736 wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]); 737 738 ECORE_REG_WR_DMAE_LEN(sc, reg_offset, wb_data, 2); 739 } 740 741 REG_WR(sc, (ECORE_PORT_ID(sc) ? NIG_REG_LLH1_FUNC_MEM_ENABLE : 742 NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4 * index, add); 743 } 744 745 /** 746 * ecore_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod 747 * 748 * @sc: device handle 749 * @o: queue for which we want to configure this rule 750 * @add: if TRUE the command is an ADD command, DEL otherwise 751 * @opcode: CLASSIFY_RULE_OPCODE_XXX 752 * @hdr: pointer to a header to setup 753 * 754 */ 755 static void ecore_vlan_mac_set_cmd_hdr_e2(struct ecore_vlan_mac_obj *o, 756 int add, int opcode, 757 struct eth_classify_cmd_header 758 *hdr) 759 { 760 struct ecore_raw_obj *raw = &o->raw; 761 762 hdr->client_id = raw->cl_id; 763 hdr->func_id = raw->func_id; 764 765 /* Rx or/and Tx (internal switching) configuration ? */ 766 hdr->cmd_general_data |= ecore_vlan_mac_get_rx_tx_flag(o); 767 768 if (add) 769 hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD; 770 771 hdr->cmd_general_data |= 772 (opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT); 773 } 774 775 /** 776 * ecore_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header 777 * 778 * @cid: connection id 779 * @type: ECORE_FILTER_XXX_PENDING 780 * @hdr: pointer to header to setup 781 * @rule_cnt: 782 * 783 * currently we always configure one rule and echo field to contain a CID and an 784 * opcode type. 785 */ 786 static void ecore_vlan_mac_set_rdata_hdr_e2(uint32_t cid, int type, struct eth_classify_header 787 *hdr, int rule_cnt) 788 { 789 hdr->echo = ECORE_CPU_TO_LE32((cid & ECORE_SWCID_MASK) | 790 (type << ECORE_SWCID_SHIFT)); 791 hdr->rule_cnt = (uint8_t) rule_cnt; 792 } 793 794 /* hw_config() callbacks */ 795 static void ecore_set_one_mac_e2(struct bnx2x_softc *sc, 796 struct ecore_vlan_mac_obj *o, 797 struct ecore_exeq_elem *elem, int rule_idx, 798 __rte_unused int cam_offset) 799 { 800 struct ecore_raw_obj *raw = &o->raw; 801 struct eth_classify_rules_ramrod_data *data = 802 (struct eth_classify_rules_ramrod_data *)(raw->rdata); 803 int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd; 804 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx]; 805 int add = (cmd == ECORE_VLAN_MAC_ADD) ? TRUE : FALSE; 806 unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags; 807 uint8_t *mac = elem->cmd_data.vlan_mac.u.mac.mac; 808 809 /* Set LLH CAM entry: currently only iSCSI and ETH macs are 810 * relevant. In addition, current implementation is tuned for a 811 * single ETH MAC. 812 * 813 * When multiple unicast ETH MACs PF configuration in switch 814 * independent mode is required (NetQ, multiple netdev MACs, 815 * etc.), consider better utilisation of 8 per function MAC 816 * entries in the LLH register. There is also 817 * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the 818 * total number of CAM entries to 16. 819 * 820 * Currently we won't configure NIG for MACs other than a primary ETH 821 * MAC and iSCSI L2 MAC. 822 * 823 * If this MAC is moving from one Queue to another, no need to change 824 * NIG configuration. 825 */ 826 if (cmd != ECORE_VLAN_MAC_MOVE) { 827 if (ECORE_TEST_BIT(ECORE_ISCSI_ETH_MAC, vlan_mac_flags)) 828 ecore_set_mac_in_nig(sc, add, mac, 829 ECORE_LLH_CAM_ISCSI_ETH_LINE); 830 else if (ECORE_TEST_BIT(ECORE_ETH_MAC, vlan_mac_flags)) 831 ecore_set_mac_in_nig(sc, add, mac, 832 ECORE_LLH_CAM_ETH_LINE); 833 } 834 835 /* Reset the ramrod data buffer for the first rule */ 836 if (rule_idx == 0) 837 ECORE_MEMSET(data, 0, sizeof(*data)); 838 839 /* Setup a command header */ 840 ecore_vlan_mac_set_cmd_hdr_e2(o, add, CLASSIFY_RULE_OPCODE_MAC, 841 &rule_entry->mac.header); 842 843 ECORE_MSG("About to %s MAC %02x:%02x:%02x:%02x:%02x:%02x for Queue %d", 844 (add ? "add" : "delete"), mac[0], mac[1], mac[2], mac[3], 845 mac[4], mac[5], raw->cl_id); 846 847 /* Set a MAC itself */ 848 ecore_set_fw_mac_addr(&rule_entry->mac.mac_msb, 849 &rule_entry->mac.mac_mid, 850 &rule_entry->mac.mac_lsb, mac); 851 rule_entry->mac.inner_mac = elem->cmd_data.vlan_mac.u.mac.is_inner_mac; 852 853 /* MOVE: Add a rule that will add this MAC to the target Queue */ 854 if (cmd == ECORE_VLAN_MAC_MOVE) { 855 rule_entry++; 856 rule_cnt++; 857 858 /* Setup ramrod data */ 859 ecore_vlan_mac_set_cmd_hdr_e2(elem->cmd_data. 860 vlan_mac.target_obj, TRUE, 861 CLASSIFY_RULE_OPCODE_MAC, 862 &rule_entry->mac.header); 863 864 /* Set a MAC itself */ 865 ecore_set_fw_mac_addr(&rule_entry->mac.mac_msb, 866 &rule_entry->mac.mac_mid, 867 &rule_entry->mac.mac_lsb, mac); 868 rule_entry->mac.inner_mac = 869 elem->cmd_data.vlan_mac.u.mac.is_inner_mac; 870 } 871 872 /* Set the ramrod data header */ 873 ecore_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header, 874 rule_cnt); 875 } 876 877 /** 878 * ecore_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod 879 * 880 * @sc: device handle 881 * @o: queue 882 * @type: 883 * @cam_offset: offset in cam memory 884 * @hdr: pointer to a header to setup 885 * 886 * E1H 887 */ 888 static void ecore_vlan_mac_set_rdata_hdr_e1x(struct ecore_vlan_mac_obj 889 *o, int type, int cam_offset, struct mac_configuration_hdr 890 *hdr) 891 { 892 struct ecore_raw_obj *r = &o->raw; 893 894 hdr->length = 1; 895 hdr->offset = (uint8_t) cam_offset; 896 hdr->client_id = ECORE_CPU_TO_LE16(0xff); 897 hdr->echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) | 898 (type << ECORE_SWCID_SHIFT)); 899 } 900 901 static void ecore_vlan_mac_set_cfg_entry_e1x(struct ecore_vlan_mac_obj 902 *o, int add, int opcode, 903 uint8_t * mac, 904 uint16_t vlan_id, struct 905 mac_configuration_entry 906 *cfg_entry) 907 { 908 struct ecore_raw_obj *r = &o->raw; 909 uint32_t cl_bit_vec = (1 << r->cl_id); 910 911 cfg_entry->clients_bit_vector = ECORE_CPU_TO_LE32(cl_bit_vec); 912 cfg_entry->pf_id = r->func_id; 913 cfg_entry->vlan_id = ECORE_CPU_TO_LE16(vlan_id); 914 915 if (add) { 916 ECORE_SET_FLAG(cfg_entry->flags, 917 MAC_CONFIGURATION_ENTRY_ACTION_TYPE, 918 T_ETH_MAC_COMMAND_SET); 919 ECORE_SET_FLAG(cfg_entry->flags, 920 MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE, 921 opcode); 922 923 /* Set a MAC in a ramrod data */ 924 ecore_set_fw_mac_addr(&cfg_entry->msb_mac_addr, 925 &cfg_entry->middle_mac_addr, 926 &cfg_entry->lsb_mac_addr, mac); 927 } else 928 ECORE_SET_FLAG(cfg_entry->flags, 929 MAC_CONFIGURATION_ENTRY_ACTION_TYPE, 930 T_ETH_MAC_COMMAND_INVALIDATE); 931 } 932 933 static void ecore_vlan_mac_set_rdata_e1x(struct bnx2x_softc *sc 934 __rte_unused, 935 struct ecore_vlan_mac_obj *o, 936 int type, int cam_offset, 937 int add, uint8_t * mac, 938 uint16_t vlan_id, int opcode, 939 struct mac_configuration_cmd 940 *config) 941 { 942 struct mac_configuration_entry *cfg_entry = &config->config_table[0]; 943 944 ecore_vlan_mac_set_rdata_hdr_e1x(o, type, cam_offset, &config->hdr); 945 ecore_vlan_mac_set_cfg_entry_e1x(o, add, opcode, mac, vlan_id, 946 cfg_entry); 947 948 ECORE_MSG("%s MAC %02x:%02x:%02x:%02x:%02x:%02x CLID %d CAM offset %d", 949 (add ? "setting" : "clearing"), 950 mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], 951 o->raw.cl_id, cam_offset); 952 } 953 954 /** 955 * ecore_set_one_mac_e1x - fill a single MAC rule ramrod data 956 * 957 * @sc: device handle 958 * @o: ecore_vlan_mac_obj 959 * @elem: ecore_exeq_elem 960 * @rule_idx: rule_idx 961 * @cam_offset: cam_offset 962 */ 963 static void ecore_set_one_mac_e1x(struct bnx2x_softc *sc, 964 struct ecore_vlan_mac_obj *o, 965 struct ecore_exeq_elem *elem, 966 __rte_unused int rule_idx, int cam_offset) 967 { 968 struct ecore_raw_obj *raw = &o->raw; 969 struct mac_configuration_cmd *config = 970 (struct mac_configuration_cmd *)(raw->rdata); 971 /* 57711 do not support MOVE command, 972 * so it's either ADD or DEL 973 */ 974 int add = (elem->cmd_data.vlan_mac.cmd == ECORE_VLAN_MAC_ADD) ? 975 TRUE : FALSE; 976 977 /* Reset the ramrod data buffer */ 978 ECORE_MEMSET(config, 0, sizeof(*config)); 979 980 ecore_vlan_mac_set_rdata_e1x(sc, o, raw->state, 981 cam_offset, add, 982 elem->cmd_data.vlan_mac.u.mac.mac, 0, 983 ETH_VLAN_FILTER_ANY_VLAN, config); 984 } 985 986 /** 987 * ecore_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element 988 * 989 * @sc: device handle 990 * @p: command parameters 991 * @ppos: pointer to the cookie 992 * 993 * reconfigure next MAC/VLAN/VLAN-MAC element from the 994 * previously configured elements list. 995 * 996 * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken 997 * into an account 998 * 999 * pointer to the cookie - that should be given back in the next call to make 1000 * function handle the next element. If *ppos is set to NULL it will restart the 1001 * iterator. If returned *ppos == NULL this means that the last element has been 1002 * handled. 1003 * 1004 */ 1005 static int ecore_vlan_mac_restore(struct bnx2x_softc *sc, 1006 struct ecore_vlan_mac_ramrod_params *p, 1007 struct ecore_vlan_mac_registry_elem **ppos) 1008 { 1009 struct ecore_vlan_mac_registry_elem *pos; 1010 struct ecore_vlan_mac_obj *o = p->vlan_mac_obj; 1011 1012 /* If list is empty - there is nothing to do here */ 1013 if (ECORE_LIST_IS_EMPTY(&o->head)) { 1014 *ppos = NULL; 1015 return 0; 1016 } 1017 1018 /* make a step... */ 1019 if (*ppos == NULL) 1020 *ppos = ECORE_LIST_FIRST_ENTRY(&o->head, struct 1021 ecore_vlan_mac_registry_elem, 1022 link); 1023 else 1024 *ppos = ECORE_LIST_NEXT(*ppos, link, 1025 struct ecore_vlan_mac_registry_elem); 1026 1027 pos = *ppos; 1028 1029 /* If it's the last step - return NULL */ 1030 if (ECORE_LIST_IS_LAST(&pos->link, &o->head)) 1031 *ppos = NULL; 1032 1033 /* Prepare a 'user_req' */ 1034 ECORE_MEMCPY(&p->user_req.u, &pos->u, sizeof(pos->u)); 1035 1036 /* Set the command */ 1037 p->user_req.cmd = ECORE_VLAN_MAC_ADD; 1038 1039 /* Set vlan_mac_flags */ 1040 p->user_req.vlan_mac_flags = pos->vlan_mac_flags; 1041 1042 /* Set a restore bit */ 1043 ECORE_SET_BIT_NA(RAMROD_RESTORE, &p->ramrod_flags); 1044 1045 return ecore_config_vlan_mac(sc, p); 1046 } 1047 1048 /* ecore_exeq_get_mac/ecore_exeq_get_vlan/ecore_exeq_get_vlan_mac return a 1049 * pointer to an element with a specific criteria and NULL if such an element 1050 * hasn't been found. 1051 */ 1052 static struct ecore_exeq_elem *ecore_exeq_get_mac(struct ecore_exe_queue_obj *o, 1053 struct ecore_exeq_elem *elem) 1054 { 1055 struct ecore_exeq_elem *pos; 1056 struct ecore_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac; 1057 1058 /* Check pending for execution commands */ 1059 ECORE_LIST_FOR_EACH_ENTRY(pos, &o->exe_queue, link, 1060 struct ecore_exeq_elem) 1061 if (!ECORE_MEMCMP(&pos->cmd_data.vlan_mac.u.mac, data, 1062 sizeof(*data)) && 1063 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd)) 1064 return pos; 1065 1066 return NULL; 1067 } 1068 1069 /** 1070 * ecore_validate_vlan_mac_add - check if an ADD command can be executed 1071 * 1072 * @sc: device handle 1073 * @qo: ecore_qable_obj 1074 * @elem: ecore_exeq_elem 1075 * 1076 * Checks that the requested configuration can be added. If yes and if 1077 * requested, consume CAM credit. 1078 * 1079 * The 'validate' is run after the 'optimize'. 1080 * 1081 */ 1082 static int ecore_validate_vlan_mac_add(struct bnx2x_softc *sc, 1083 union ecore_qable_obj *qo, 1084 struct ecore_exeq_elem *elem) 1085 { 1086 struct ecore_vlan_mac_obj *o = &qo->vlan_mac; 1087 struct ecore_exe_queue_obj *exeq = &o->exe_queue; 1088 int rc; 1089 1090 /* Check the registry */ 1091 rc = o->check_add(sc, o, &elem->cmd_data.vlan_mac.u); 1092 if (rc) { 1093 ECORE_MSG 1094 ("ADD command is not allowed considering current registry state."); 1095 return rc; 1096 } 1097 1098 /* Check if there is a pending ADD command for this 1099 * MAC/VLAN/VLAN-MAC. Return an error if there is. 1100 */ 1101 if (exeq->get(exeq, elem)) { 1102 ECORE_MSG("There is a pending ADD command already"); 1103 return ECORE_EXISTS; 1104 } 1105 1106 /* Consume the credit if not requested not to */ 1107 if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT, 1108 &elem->cmd_data.vlan_mac.vlan_mac_flags) || 1109 o->get_credit(o))) 1110 return ECORE_INVAL; 1111 1112 return ECORE_SUCCESS; 1113 } 1114 1115 /** 1116 * ecore_validate_vlan_mac_del - check if the DEL command can be executed 1117 * 1118 * @sc: device handle 1119 * @qo: quable object to check 1120 * @elem: element that needs to be deleted 1121 * 1122 * Checks that the requested configuration can be deleted. If yes and if 1123 * requested, returns a CAM credit. 1124 * 1125 * The 'validate' is run after the 'optimize'. 1126 */ 1127 static int ecore_validate_vlan_mac_del(struct bnx2x_softc *sc, 1128 union ecore_qable_obj *qo, 1129 struct ecore_exeq_elem *elem) 1130 { 1131 struct ecore_vlan_mac_obj *o = &qo->vlan_mac; 1132 struct ecore_vlan_mac_registry_elem *pos; 1133 struct ecore_exe_queue_obj *exeq = &o->exe_queue; 1134 struct ecore_exeq_elem query_elem; 1135 1136 /* If this classification can not be deleted (doesn't exist) 1137 * - return a ECORE_EXIST. 1138 */ 1139 pos = o->check_del(sc, o, &elem->cmd_data.vlan_mac.u); 1140 if (!pos) { 1141 ECORE_MSG 1142 ("DEL command is not allowed considering current registry state"); 1143 return ECORE_EXISTS; 1144 } 1145 1146 /* Check if there are pending DEL or MOVE commands for this 1147 * MAC/VLAN/VLAN-MAC. Return an error if so. 1148 */ 1149 ECORE_MEMCPY(&query_elem, elem, sizeof(query_elem)); 1150 1151 /* Check for MOVE commands */ 1152 query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_MOVE; 1153 if (exeq->get(exeq, &query_elem)) { 1154 PMD_DRV_LOG(ERR, "There is a pending MOVE command already"); 1155 return ECORE_INVAL; 1156 } 1157 1158 /* Check for DEL commands */ 1159 if (exeq->get(exeq, elem)) { 1160 ECORE_MSG("There is a pending DEL command already"); 1161 return ECORE_EXISTS; 1162 } 1163 1164 /* Return the credit to the credit pool if not requested not to */ 1165 if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT, 1166 &elem->cmd_data.vlan_mac.vlan_mac_flags) || 1167 o->put_credit(o))) { 1168 PMD_DRV_LOG(ERR, "Failed to return a credit"); 1169 return ECORE_INVAL; 1170 } 1171 1172 return ECORE_SUCCESS; 1173 } 1174 1175 /** 1176 * ecore_validate_vlan_mac_move - check if the MOVE command can be executed 1177 * 1178 * @sc: device handle 1179 * @qo: quable object to check (source) 1180 * @elem: element that needs to be moved 1181 * 1182 * Checks that the requested configuration can be moved. If yes and if 1183 * requested, returns a CAM credit. 1184 * 1185 * The 'validate' is run after the 'optimize'. 1186 */ 1187 static int ecore_validate_vlan_mac_move(struct bnx2x_softc *sc, 1188 union ecore_qable_obj *qo, 1189 struct ecore_exeq_elem *elem) 1190 { 1191 struct ecore_vlan_mac_obj *src_o = &qo->vlan_mac; 1192 struct ecore_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj; 1193 struct ecore_exeq_elem query_elem; 1194 struct ecore_exe_queue_obj *src_exeq = &src_o->exe_queue; 1195 struct ecore_exe_queue_obj *dest_exeq = &dest_o->exe_queue; 1196 1197 /* Check if we can perform this operation based on the current registry 1198 * state. 1199 */ 1200 if (!src_o->check_move(sc, src_o, dest_o, &elem->cmd_data.vlan_mac.u)) { 1201 ECORE_MSG 1202 ("MOVE command is not allowed considering current registry state"); 1203 return ECORE_INVAL; 1204 } 1205 1206 /* Check if there is an already pending DEL or MOVE command for the 1207 * source object or ADD command for a destination object. Return an 1208 * error if so. 1209 */ 1210 ECORE_MEMCPY(&query_elem, elem, sizeof(query_elem)); 1211 1212 /* Check DEL on source */ 1213 query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_DEL; 1214 if (src_exeq->get(src_exeq, &query_elem)) { 1215 PMD_DRV_LOG(ERR, 1216 "There is a pending DEL command on the source queue already"); 1217 return ECORE_INVAL; 1218 } 1219 1220 /* Check MOVE on source */ 1221 if (src_exeq->get(src_exeq, elem)) { 1222 ECORE_MSG("There is a pending MOVE command already"); 1223 return ECORE_EXISTS; 1224 } 1225 1226 /* Check ADD on destination */ 1227 query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_ADD; 1228 if (dest_exeq->get(dest_exeq, &query_elem)) { 1229 PMD_DRV_LOG(ERR, 1230 "There is a pending ADD command on the destination queue already"); 1231 return ECORE_INVAL; 1232 } 1233 1234 /* Consume the credit if not requested not to */ 1235 if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT_DEST, 1236 &elem->cmd_data.vlan_mac.vlan_mac_flags) || 1237 dest_o->get_credit(dest_o))) 1238 return ECORE_INVAL; 1239 1240 if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT, 1241 &elem->cmd_data.vlan_mac.vlan_mac_flags) || 1242 src_o->put_credit(src_o))) { 1243 /* return the credit taken from dest... */ 1244 dest_o->put_credit(dest_o); 1245 return ECORE_INVAL; 1246 } 1247 1248 return ECORE_SUCCESS; 1249 } 1250 1251 static int ecore_validate_vlan_mac(struct bnx2x_softc *sc, 1252 union ecore_qable_obj *qo, 1253 struct ecore_exeq_elem *elem) 1254 { 1255 switch (elem->cmd_data.vlan_mac.cmd) { 1256 case ECORE_VLAN_MAC_ADD: 1257 return ecore_validate_vlan_mac_add(sc, qo, elem); 1258 case ECORE_VLAN_MAC_DEL: 1259 return ecore_validate_vlan_mac_del(sc, qo, elem); 1260 case ECORE_VLAN_MAC_MOVE: 1261 return ecore_validate_vlan_mac_move(sc, qo, elem); 1262 default: 1263 return ECORE_INVAL; 1264 } 1265 } 1266 1267 static int ecore_remove_vlan_mac(__rte_unused struct bnx2x_softc *sc, 1268 union ecore_qable_obj *qo, 1269 struct ecore_exeq_elem *elem) 1270 { 1271 int rc = 0; 1272 1273 /* If consumption wasn't required, nothing to do */ 1274 if (ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT, 1275 &elem->cmd_data.vlan_mac.vlan_mac_flags)) 1276 return ECORE_SUCCESS; 1277 1278 switch (elem->cmd_data.vlan_mac.cmd) { 1279 case ECORE_VLAN_MAC_ADD: 1280 case ECORE_VLAN_MAC_MOVE: 1281 rc = qo->vlan_mac.put_credit(&qo->vlan_mac); 1282 break; 1283 case ECORE_VLAN_MAC_DEL: 1284 rc = qo->vlan_mac.get_credit(&qo->vlan_mac); 1285 break; 1286 default: 1287 return ECORE_INVAL; 1288 } 1289 1290 if (rc != TRUE) 1291 return ECORE_INVAL; 1292 1293 return ECORE_SUCCESS; 1294 } 1295 1296 /** 1297 * ecore_wait_vlan_mac - passively wait for 5 seconds until all work completes. 1298 * 1299 * @sc: device handle 1300 * @o: ecore_vlan_mac_obj 1301 * 1302 */ 1303 static int ecore_wait_vlan_mac(struct bnx2x_softc *sc, 1304 struct ecore_vlan_mac_obj *o) 1305 { 1306 int cnt = 5000, rc; 1307 struct ecore_exe_queue_obj *exeq = &o->exe_queue; 1308 struct ecore_raw_obj *raw = &o->raw; 1309 1310 while (cnt--) { 1311 /* Wait for the current command to complete */ 1312 rc = raw->wait_comp(sc, raw); 1313 if (rc) 1314 return rc; 1315 1316 /* Wait until there are no pending commands */ 1317 if (!ecore_exe_queue_empty(exeq)) 1318 ECORE_WAIT(sc, 1000); 1319 else 1320 return ECORE_SUCCESS; 1321 } 1322 1323 return ECORE_TIMEOUT; 1324 } 1325 1326 static int __ecore_vlan_mac_execute_step(struct bnx2x_softc *sc, 1327 struct ecore_vlan_mac_obj *o, 1328 unsigned long *ramrod_flags) 1329 { 1330 int rc = ECORE_SUCCESS; 1331 1332 ECORE_SPIN_LOCK_BH(&o->exe_queue.lock); 1333 1334 ECORE_MSG("vlan_mac_execute_step - trying to take writer lock"); 1335 rc = __ecore_vlan_mac_h_write_trylock(sc, o); 1336 1337 if (rc != ECORE_SUCCESS) { 1338 __ecore_vlan_mac_h_pend(sc, o, *ramrod_flags); 1339 1340 /** Calling function should not diffrentiate between this case 1341 * and the case in which there is already a pending ramrod 1342 */ 1343 rc = ECORE_PENDING; 1344 } else { 1345 rc = ecore_exe_queue_step(sc, &o->exe_queue, ramrod_flags); 1346 } 1347 ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock); 1348 1349 return rc; 1350 } 1351 1352 /** 1353 * ecore_complete_vlan_mac - complete one VLAN-MAC ramrod 1354 * 1355 * @sc: device handle 1356 * @o: ecore_vlan_mac_obj 1357 * @cqe: 1358 * @cont: if TRUE schedule next execution chunk 1359 * 1360 */ 1361 static int ecore_complete_vlan_mac(struct bnx2x_softc *sc, 1362 struct ecore_vlan_mac_obj *o, 1363 union event_ring_elem *cqe, 1364 unsigned long *ramrod_flags) 1365 { 1366 struct ecore_raw_obj *r = &o->raw; 1367 int rc; 1368 1369 /* Reset pending list */ 1370 ecore_exe_queue_reset_pending(sc, &o->exe_queue); 1371 1372 /* Clear pending */ 1373 r->clear_pending(r); 1374 1375 /* If ramrod failed this is most likely a SW bug */ 1376 if (cqe->message.error) 1377 return ECORE_INVAL; 1378 1379 /* Run the next bulk of pending commands if requested */ 1380 if (ECORE_TEST_BIT(RAMROD_CONT, ramrod_flags)) { 1381 rc = __ecore_vlan_mac_execute_step(sc, o, ramrod_flags); 1382 if (rc < 0) 1383 return rc; 1384 } 1385 1386 /* If there is more work to do return PENDING */ 1387 if (!ecore_exe_queue_empty(&o->exe_queue)) 1388 return ECORE_PENDING; 1389 1390 return ECORE_SUCCESS; 1391 } 1392 1393 /** 1394 * ecore_optimize_vlan_mac - optimize ADD and DEL commands. 1395 * 1396 * @sc: device handle 1397 * @o: ecore_qable_obj 1398 * @elem: ecore_exeq_elem 1399 */ 1400 static int ecore_optimize_vlan_mac(struct bnx2x_softc *sc, 1401 union ecore_qable_obj *qo, 1402 struct ecore_exeq_elem *elem) 1403 { 1404 struct ecore_exeq_elem query, *pos; 1405 struct ecore_vlan_mac_obj *o = &qo->vlan_mac; 1406 struct ecore_exe_queue_obj *exeq = &o->exe_queue; 1407 1408 ECORE_MEMCPY(&query, elem, sizeof(query)); 1409 1410 switch (elem->cmd_data.vlan_mac.cmd) { 1411 case ECORE_VLAN_MAC_ADD: 1412 query.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_DEL; 1413 break; 1414 case ECORE_VLAN_MAC_DEL: 1415 query.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_ADD; 1416 break; 1417 default: 1418 /* Don't handle anything other than ADD or DEL */ 1419 return 0; 1420 } 1421 1422 /* If we found the appropriate element - delete it */ 1423 pos = exeq->get(exeq, &query); 1424 if (pos) { 1425 1426 /* Return the credit of the optimized command */ 1427 if (!ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT, 1428 &pos->cmd_data.vlan_mac.vlan_mac_flags)) { 1429 if ((query.cmd_data.vlan_mac.cmd == 1430 ECORE_VLAN_MAC_ADD) && !o->put_credit(o)) { 1431 PMD_DRV_LOG(ERR, 1432 "Failed to return the credit for the optimized ADD command"); 1433 return ECORE_INVAL; 1434 } else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */ 1435 PMD_DRV_LOG(ERR, 1436 "Failed to recover the credit from the optimized DEL command"); 1437 return ECORE_INVAL; 1438 } 1439 } 1440 1441 ECORE_MSG("Optimizing %s command", 1442 (elem->cmd_data.vlan_mac.cmd == ECORE_VLAN_MAC_ADD) ? 1443 "ADD" : "DEL"); 1444 1445 ECORE_LIST_REMOVE_ENTRY(&pos->link, &exeq->exe_queue); 1446 ecore_exe_queue_free_elem(sc, pos); 1447 return 1; 1448 } 1449 1450 return 0; 1451 } 1452 1453 /** 1454 * ecore_vlan_mac_get_registry_elem - prepare a registry element 1455 * 1456 * @sc: device handle 1457 * @o: 1458 * @elem: 1459 * @restore: 1460 * @re: 1461 * 1462 * prepare a registry element according to the current command request. 1463 */ 1464 static int ecore_vlan_mac_get_registry_elem(struct bnx2x_softc *sc, 1465 struct ecore_vlan_mac_obj *o, 1466 struct ecore_exeq_elem *elem, 1467 int restore, struct 1468 ecore_vlan_mac_registry_elem 1469 **re) 1470 { 1471 enum ecore_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd; 1472 struct ecore_vlan_mac_registry_elem *reg_elem; 1473 1474 /* Allocate a new registry element if needed. */ 1475 if (!restore && 1476 ((cmd == ECORE_VLAN_MAC_ADD) || (cmd == ECORE_VLAN_MAC_MOVE))) { 1477 reg_elem = ECORE_ZALLOC(sizeof(*reg_elem), GFP_ATOMIC, sc); 1478 if (!reg_elem) 1479 return ECORE_NOMEM; 1480 1481 /* Get a new CAM offset */ 1482 if (!o->get_cam_offset(o, ®_elem->cam_offset)) { 1483 /* This shall never happen, because we have checked the 1484 * CAM availability in the 'validate'. 1485 */ 1486 ECORE_DBG_BREAK_IF(1); 1487 ECORE_FREE(sc, reg_elem, sizeof(*reg_elem)); 1488 return ECORE_INVAL; 1489 } 1490 1491 ECORE_MSG("Got cam offset %d", reg_elem->cam_offset); 1492 1493 /* Set a VLAN-MAC data */ 1494 ECORE_MEMCPY(®_elem->u, &elem->cmd_data.vlan_mac.u, 1495 sizeof(reg_elem->u)); 1496 1497 /* Copy the flags (needed for DEL and RESTORE flows) */ 1498 reg_elem->vlan_mac_flags = 1499 elem->cmd_data.vlan_mac.vlan_mac_flags; 1500 } else /* DEL, RESTORE */ 1501 reg_elem = o->check_del(sc, o, &elem->cmd_data.vlan_mac.u); 1502 1503 *re = reg_elem; 1504 return ECORE_SUCCESS; 1505 } 1506 1507 /** 1508 * ecore_execute_vlan_mac - execute vlan mac command 1509 * 1510 * @sc: device handle 1511 * @qo: 1512 * @exe_chunk: 1513 * @ramrod_flags: 1514 * 1515 * go and send a ramrod! 1516 */ 1517 static int ecore_execute_vlan_mac(struct bnx2x_softc *sc, 1518 union ecore_qable_obj *qo, 1519 ecore_list_t * exe_chunk, 1520 unsigned long *ramrod_flags) 1521 { 1522 struct ecore_exeq_elem *elem; 1523 struct ecore_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj; 1524 struct ecore_raw_obj *r = &o->raw; 1525 int rc, idx = 0; 1526 int restore = ECORE_TEST_BIT(RAMROD_RESTORE, ramrod_flags); 1527 int drv_only = ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags); 1528 struct ecore_vlan_mac_registry_elem *reg_elem; 1529 enum ecore_vlan_mac_cmd cmd; 1530 1531 /* If DRIVER_ONLY execution is requested, cleanup a registry 1532 * and exit. Otherwise send a ramrod to FW. 1533 */ 1534 if (!drv_only) { 1535 1536 /* Set pending */ 1537 r->set_pending(r); 1538 1539 /* Fill the ramrod data */ 1540 ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link, 1541 struct ecore_exeq_elem) { 1542 cmd = elem->cmd_data.vlan_mac.cmd; 1543 /* We will add to the target object in MOVE command, so 1544 * change the object for a CAM search. 1545 */ 1546 if (cmd == ECORE_VLAN_MAC_MOVE) 1547 cam_obj = elem->cmd_data.vlan_mac.target_obj; 1548 else 1549 cam_obj = o; 1550 1551 rc = ecore_vlan_mac_get_registry_elem(sc, cam_obj, 1552 elem, restore, 1553 ®_elem); 1554 if (rc) 1555 goto error_exit; 1556 1557 ECORE_DBG_BREAK_IF(!reg_elem); 1558 1559 /* Push a new entry into the registry */ 1560 if (!restore && 1561 ((cmd == ECORE_VLAN_MAC_ADD) || 1562 (cmd == ECORE_VLAN_MAC_MOVE))) 1563 ECORE_LIST_PUSH_HEAD(®_elem->link, 1564 &cam_obj->head); 1565 1566 /* Configure a single command in a ramrod data buffer */ 1567 o->set_one_rule(sc, o, elem, idx, reg_elem->cam_offset); 1568 1569 /* MOVE command consumes 2 entries in the ramrod data */ 1570 if (cmd == ECORE_VLAN_MAC_MOVE) 1571 idx += 2; 1572 else 1573 idx++; 1574 } 1575 1576 /* 1577 * No need for an explicit memory barrier here as long we would 1578 * need to ensure the ordering of writing to the SPQ element 1579 * and updating of the SPQ producer which involves a memory 1580 * read and we will have to put a full memory barrier there 1581 * (inside ecore_sp_post()). 1582 */ 1583 1584 rc = ecore_sp_post(sc, o->ramrod_cmd, r->cid, 1585 r->rdata_mapping, ETH_CONNECTION_TYPE); 1586 if (rc) 1587 goto error_exit; 1588 } 1589 1590 /* Now, when we are done with the ramrod - clean up the registry */ 1591 ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link, struct ecore_exeq_elem) { 1592 cmd = elem->cmd_data.vlan_mac.cmd; 1593 if ((cmd == ECORE_VLAN_MAC_DEL) || (cmd == ECORE_VLAN_MAC_MOVE)) { 1594 reg_elem = o->check_del(sc, o, 1595 &elem->cmd_data.vlan_mac.u); 1596 1597 ECORE_DBG_BREAK_IF(!reg_elem); 1598 1599 o->put_cam_offset(o, reg_elem->cam_offset); 1600 ECORE_LIST_REMOVE_ENTRY(®_elem->link, &o->head); 1601 ECORE_FREE(sc, reg_elem, sizeof(*reg_elem)); 1602 } 1603 } 1604 1605 if (!drv_only) 1606 return ECORE_PENDING; 1607 else 1608 return ECORE_SUCCESS; 1609 1610 error_exit: 1611 r->clear_pending(r); 1612 1613 /* Cleanup a registry in case of a failure */ 1614 ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link, struct ecore_exeq_elem) { 1615 cmd = elem->cmd_data.vlan_mac.cmd; 1616 1617 if (cmd == ECORE_VLAN_MAC_MOVE) 1618 cam_obj = elem->cmd_data.vlan_mac.target_obj; 1619 else 1620 cam_obj = o; 1621 1622 /* Delete all newly added above entries */ 1623 if (!restore && 1624 ((cmd == ECORE_VLAN_MAC_ADD) || 1625 (cmd == ECORE_VLAN_MAC_MOVE))) { 1626 reg_elem = o->check_del(sc, cam_obj, 1627 &elem->cmd_data.vlan_mac.u); 1628 if (reg_elem) { 1629 ECORE_LIST_REMOVE_ENTRY(®_elem->link, 1630 &cam_obj->head); 1631 ECORE_FREE(sc, reg_elem, sizeof(*reg_elem)); 1632 } 1633 } 1634 } 1635 1636 return rc; 1637 } 1638 1639 static int ecore_vlan_mac_push_new_cmd(struct bnx2x_softc *sc, struct 1640 ecore_vlan_mac_ramrod_params *p) 1641 { 1642 struct ecore_exeq_elem *elem; 1643 struct ecore_vlan_mac_obj *o = p->vlan_mac_obj; 1644 int restore = ECORE_TEST_BIT(RAMROD_RESTORE, &p->ramrod_flags); 1645 1646 /* Allocate the execution queue element */ 1647 elem = ecore_exe_queue_alloc_elem(sc); 1648 if (!elem) 1649 return ECORE_NOMEM; 1650 1651 /* Set the command 'length' */ 1652 switch (p->user_req.cmd) { 1653 case ECORE_VLAN_MAC_MOVE: 1654 elem->cmd_len = 2; 1655 break; 1656 default: 1657 elem->cmd_len = 1; 1658 } 1659 1660 /* Fill the object specific info */ 1661 ECORE_MEMCPY(&elem->cmd_data.vlan_mac, &p->user_req, 1662 sizeof(p->user_req)); 1663 1664 /* Try to add a new command to the pending list */ 1665 return ecore_exe_queue_add(sc, &o->exe_queue, elem, restore); 1666 } 1667 1668 /** 1669 * ecore_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules. 1670 * 1671 * @sc: device handle 1672 * @p: 1673 * 1674 */ 1675 int ecore_config_vlan_mac(struct bnx2x_softc *sc, 1676 struct ecore_vlan_mac_ramrod_params *p) 1677 { 1678 int rc = ECORE_SUCCESS; 1679 struct ecore_vlan_mac_obj *o = p->vlan_mac_obj; 1680 unsigned long *ramrod_flags = &p->ramrod_flags; 1681 int cont = ECORE_TEST_BIT(RAMROD_CONT, ramrod_flags); 1682 struct ecore_raw_obj *raw = &o->raw; 1683 1684 /* 1685 * Add new elements to the execution list for commands that require it. 1686 */ 1687 if (!cont) { 1688 rc = ecore_vlan_mac_push_new_cmd(sc, p); 1689 if (rc) 1690 return rc; 1691 } 1692 1693 /* If nothing will be executed further in this iteration we want to 1694 * return PENDING if there are pending commands 1695 */ 1696 if (!ecore_exe_queue_empty(&o->exe_queue)) 1697 rc = ECORE_PENDING; 1698 1699 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags)) { 1700 ECORE_MSG 1701 ("RAMROD_DRV_CLR_ONLY requested: clearing a pending bit."); 1702 raw->clear_pending(raw); 1703 } 1704 1705 /* Execute commands if required */ 1706 if (cont || ECORE_TEST_BIT(RAMROD_EXEC, ramrod_flags) || 1707 ECORE_TEST_BIT(RAMROD_COMP_WAIT, ramrod_flags)) { 1708 rc = __ecore_vlan_mac_execute_step(sc, p->vlan_mac_obj, 1709 &p->ramrod_flags); 1710 if (rc < 0) 1711 return rc; 1712 } 1713 1714 /* RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set 1715 * then user want to wait until the last command is done. 1716 */ 1717 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags)) { 1718 /* Wait maximum for the current exe_queue length iterations plus 1719 * one (for the current pending command). 1720 */ 1721 int max_iterations = ecore_exe_queue_length(&o->exe_queue) + 1; 1722 1723 while (!ecore_exe_queue_empty(&o->exe_queue) && 1724 max_iterations--) { 1725 1726 /* Wait for the current command to complete */ 1727 rc = raw->wait_comp(sc, raw); 1728 if (rc) 1729 return rc; 1730 1731 /* Make a next step */ 1732 rc = __ecore_vlan_mac_execute_step(sc, 1733 p->vlan_mac_obj, 1734 &p->ramrod_flags); 1735 if (rc < 0) 1736 return rc; 1737 } 1738 1739 return ECORE_SUCCESS; 1740 } 1741 1742 return rc; 1743 } 1744 1745 /** 1746 * ecore_vlan_mac_del_all - delete elements with given vlan_mac_flags spec 1747 * 1748 * @sc: device handle 1749 * @o: 1750 * @vlan_mac_flags: 1751 * @ramrod_flags: execution flags to be used for this deletion 1752 * 1753 * if the last operation has completed successfully and there are no 1754 * more elements left, positive value if the last operation has completed 1755 * successfully and there are more previously configured elements, negative 1756 * value is current operation has failed. 1757 */ 1758 static int ecore_vlan_mac_del_all(struct bnx2x_softc *sc, 1759 struct ecore_vlan_mac_obj *o, 1760 unsigned long *vlan_mac_flags, 1761 unsigned long *ramrod_flags) 1762 { 1763 struct ecore_vlan_mac_registry_elem *pos = NULL; 1764 int rc = 0, read_lock; 1765 struct ecore_vlan_mac_ramrod_params p; 1766 struct ecore_exe_queue_obj *exeq = &o->exe_queue; 1767 struct ecore_exeq_elem *exeq_pos, *exeq_pos_n; 1768 1769 /* Clear pending commands first */ 1770 1771 ECORE_SPIN_LOCK_BH(&exeq->lock); 1772 1773 ECORE_LIST_FOR_EACH_ENTRY_SAFE(exeq_pos, exeq_pos_n, 1774 &exeq->exe_queue, link, 1775 struct ecore_exeq_elem) { 1776 if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags == 1777 *vlan_mac_flags) { 1778 rc = exeq->remove(sc, exeq->owner, exeq_pos); 1779 if (rc) { 1780 PMD_DRV_LOG(ERR, "Failed to remove command"); 1781 ECORE_SPIN_UNLOCK_BH(&exeq->lock); 1782 return rc; 1783 } 1784 ECORE_LIST_REMOVE_ENTRY(&exeq_pos->link, 1785 &exeq->exe_queue); 1786 ecore_exe_queue_free_elem(sc, exeq_pos); 1787 } 1788 } 1789 1790 ECORE_SPIN_UNLOCK_BH(&exeq->lock); 1791 1792 /* Prepare a command request */ 1793 ECORE_MEMSET(&p, 0, sizeof(p)); 1794 p.vlan_mac_obj = o; 1795 p.ramrod_flags = *ramrod_flags; 1796 p.user_req.cmd = ECORE_VLAN_MAC_DEL; 1797 1798 /* Add all but the last VLAN-MAC to the execution queue without actually 1799 * execution anything. 1800 */ 1801 ECORE_CLEAR_BIT_NA(RAMROD_COMP_WAIT, &p.ramrod_flags); 1802 ECORE_CLEAR_BIT_NA(RAMROD_EXEC, &p.ramrod_flags); 1803 ECORE_CLEAR_BIT_NA(RAMROD_CONT, &p.ramrod_flags); 1804 1805 ECORE_MSG("vlan_mac_del_all -- taking vlan_mac_lock (reader)"); 1806 read_lock = ecore_vlan_mac_h_read_lock(sc, o); 1807 if (read_lock != ECORE_SUCCESS) 1808 return read_lock; 1809 1810 ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link, 1811 struct ecore_vlan_mac_registry_elem) { 1812 if (pos->vlan_mac_flags == *vlan_mac_flags) { 1813 p.user_req.vlan_mac_flags = pos->vlan_mac_flags; 1814 ECORE_MEMCPY(&p.user_req.u, &pos->u, sizeof(pos->u)); 1815 rc = ecore_config_vlan_mac(sc, &p); 1816 if (rc < 0) { 1817 PMD_DRV_LOG(ERR, 1818 "Failed to add a new DEL command"); 1819 ecore_vlan_mac_h_read_unlock(sc, o); 1820 return rc; 1821 } 1822 } 1823 } 1824 1825 ECORE_MSG("vlan_mac_del_all -- releasing vlan_mac_lock (reader)"); 1826 ecore_vlan_mac_h_read_unlock(sc, o); 1827 1828 p.ramrod_flags = *ramrod_flags; 1829 ECORE_SET_BIT_NA(RAMROD_CONT, &p.ramrod_flags); 1830 1831 return ecore_config_vlan_mac(sc, &p); 1832 } 1833 1834 static void ecore_init_raw_obj(struct ecore_raw_obj *raw, uint8_t cl_id, 1835 uint32_t cid, uint8_t func_id, 1836 void *rdata, 1837 ecore_dma_addr_t rdata_mapping, int state, 1838 unsigned long *pstate, ecore_obj_type type) 1839 { 1840 raw->func_id = func_id; 1841 raw->cid = cid; 1842 raw->cl_id = cl_id; 1843 raw->rdata = rdata; 1844 raw->rdata_mapping = rdata_mapping; 1845 raw->state = state; 1846 raw->pstate = pstate; 1847 raw->obj_type = type; 1848 raw->check_pending = ecore_raw_check_pending; 1849 raw->clear_pending = ecore_raw_clear_pending; 1850 raw->set_pending = ecore_raw_set_pending; 1851 raw->wait_comp = ecore_raw_wait; 1852 } 1853 1854 static void ecore_init_vlan_mac_common(struct ecore_vlan_mac_obj *o, 1855 uint8_t cl_id, uint32_t cid, 1856 uint8_t func_id, void *rdata, 1857 ecore_dma_addr_t rdata_mapping, 1858 int state, unsigned long *pstate, 1859 ecore_obj_type type, 1860 struct ecore_credit_pool_obj 1861 *macs_pool, struct ecore_credit_pool_obj 1862 *vlans_pool) 1863 { 1864 ECORE_LIST_INIT(&o->head); 1865 o->head_reader = 0; 1866 o->head_exe_request = FALSE; 1867 o->saved_ramrod_flags = 0; 1868 1869 o->macs_pool = macs_pool; 1870 o->vlans_pool = vlans_pool; 1871 1872 o->delete_all = ecore_vlan_mac_del_all; 1873 o->restore = ecore_vlan_mac_restore; 1874 o->complete = ecore_complete_vlan_mac; 1875 o->wait = ecore_wait_vlan_mac; 1876 1877 ecore_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping, 1878 state, pstate, type); 1879 } 1880 1881 void ecore_init_mac_obj(struct bnx2x_softc *sc, 1882 struct ecore_vlan_mac_obj *mac_obj, 1883 uint8_t cl_id, uint32_t cid, uint8_t func_id, 1884 void *rdata, ecore_dma_addr_t rdata_mapping, int state, 1885 unsigned long *pstate, ecore_obj_type type, 1886 struct ecore_credit_pool_obj *macs_pool) 1887 { 1888 union ecore_qable_obj *qable_obj = (union ecore_qable_obj *)mac_obj; 1889 1890 ecore_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata, 1891 rdata_mapping, state, pstate, type, 1892 macs_pool, NULL); 1893 1894 /* CAM credit pool handling */ 1895 mac_obj->get_credit = ecore_get_credit_mac; 1896 mac_obj->put_credit = ecore_put_credit_mac; 1897 mac_obj->get_cam_offset = ecore_get_cam_offset_mac; 1898 mac_obj->put_cam_offset = ecore_put_cam_offset_mac; 1899 1900 if (CHIP_IS_E1x(sc)) { 1901 mac_obj->set_one_rule = ecore_set_one_mac_e1x; 1902 mac_obj->check_del = ecore_check_mac_del; 1903 mac_obj->check_add = ecore_check_mac_add; 1904 mac_obj->check_move = ecore_check_move_always_err; 1905 mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC; 1906 1907 /* Exe Queue */ 1908 ecore_exe_queue_init(sc, 1909 &mac_obj->exe_queue, 1, qable_obj, 1910 ecore_validate_vlan_mac, 1911 ecore_remove_vlan_mac, 1912 ecore_optimize_vlan_mac, 1913 ecore_execute_vlan_mac, 1914 ecore_exeq_get_mac); 1915 } else { 1916 mac_obj->set_one_rule = ecore_set_one_mac_e2; 1917 mac_obj->check_del = ecore_check_mac_del; 1918 mac_obj->check_add = ecore_check_mac_add; 1919 mac_obj->check_move = ecore_check_move; 1920 mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES; 1921 mac_obj->get_n_elements = ecore_get_n_elements; 1922 1923 /* Exe Queue */ 1924 ecore_exe_queue_init(sc, 1925 &mac_obj->exe_queue, CLASSIFY_RULES_COUNT, 1926 qable_obj, ecore_validate_vlan_mac, 1927 ecore_remove_vlan_mac, 1928 ecore_optimize_vlan_mac, 1929 ecore_execute_vlan_mac, 1930 ecore_exeq_get_mac); 1931 } 1932 } 1933 1934 /* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */ 1935 static void __storm_memset_mac_filters(struct bnx2x_softc *sc, struct 1936 tstorm_eth_mac_filter_config 1937 *mac_filters, uint16_t pf_id) 1938 { 1939 size_t size = sizeof(struct tstorm_eth_mac_filter_config); 1940 1941 uint32_t addr = BAR_TSTRORM_INTMEM + 1942 TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id); 1943 1944 ecore_storm_memset_struct(sc, addr, size, (uint32_t *) mac_filters); 1945 } 1946 1947 static int ecore_set_rx_mode_e1x(struct bnx2x_softc *sc, 1948 struct ecore_rx_mode_ramrod_params *p) 1949 { 1950 /* update the sc MAC filter structure */ 1951 uint32_t mask = (1 << p->cl_id); 1952 1953 struct tstorm_eth_mac_filter_config *mac_filters = 1954 (struct tstorm_eth_mac_filter_config *)p->rdata; 1955 1956 /* initial setting is drop-all */ 1957 uint8_t drop_all_ucast = 1, drop_all_mcast = 1; 1958 uint8_t accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0; 1959 uint8_t unmatched_unicast = 0; 1960 1961 /* In e1x there we only take into account rx accept flag since tx switching 1962 * isn't enabled. */ 1963 if (ECORE_TEST_BIT(ECORE_ACCEPT_UNICAST, &p->rx_accept_flags)) 1964 /* accept matched ucast */ 1965 drop_all_ucast = 0; 1966 1967 if (ECORE_TEST_BIT(ECORE_ACCEPT_MULTICAST, &p->rx_accept_flags)) 1968 /* accept matched mcast */ 1969 drop_all_mcast = 0; 1970 1971 if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) { 1972 /* accept all mcast */ 1973 drop_all_ucast = 0; 1974 accp_all_ucast = 1; 1975 } 1976 if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) { 1977 /* accept all mcast */ 1978 drop_all_mcast = 0; 1979 accp_all_mcast = 1; 1980 } 1981 if (ECORE_TEST_BIT(ECORE_ACCEPT_BROADCAST, &p->rx_accept_flags)) 1982 /* accept (all) bcast */ 1983 accp_all_bcast = 1; 1984 if (ECORE_TEST_BIT(ECORE_ACCEPT_UNMATCHED, &p->rx_accept_flags)) 1985 /* accept unmatched unicasts */ 1986 unmatched_unicast = 1; 1987 1988 mac_filters->ucast_drop_all = drop_all_ucast ? 1989 mac_filters->ucast_drop_all | mask : 1990 mac_filters->ucast_drop_all & ~mask; 1991 1992 mac_filters->mcast_drop_all = drop_all_mcast ? 1993 mac_filters->mcast_drop_all | mask : 1994 mac_filters->mcast_drop_all & ~mask; 1995 1996 mac_filters->ucast_accept_all = accp_all_ucast ? 1997 mac_filters->ucast_accept_all | mask : 1998 mac_filters->ucast_accept_all & ~mask; 1999 2000 mac_filters->mcast_accept_all = accp_all_mcast ? 2001 mac_filters->mcast_accept_all | mask : 2002 mac_filters->mcast_accept_all & ~mask; 2003 2004 mac_filters->bcast_accept_all = accp_all_bcast ? 2005 mac_filters->bcast_accept_all | mask : 2006 mac_filters->bcast_accept_all & ~mask; 2007 2008 mac_filters->unmatched_unicast = unmatched_unicast ? 2009 mac_filters->unmatched_unicast | mask : 2010 mac_filters->unmatched_unicast & ~mask; 2011 2012 ECORE_MSG("drop_ucast 0x%xdrop_mcast 0x%x accp_ucast 0x%x" 2013 "accp_mcast 0x%xaccp_bcast 0x%x", 2014 mac_filters->ucast_drop_all, mac_filters->mcast_drop_all, 2015 mac_filters->ucast_accept_all, mac_filters->mcast_accept_all, 2016 mac_filters->bcast_accept_all); 2017 2018 /* write the MAC filter structure */ 2019 __storm_memset_mac_filters(sc, mac_filters, p->func_id); 2020 2021 /* The operation is completed */ 2022 ECORE_CLEAR_BIT(p->state, p->pstate); 2023 ECORE_SMP_MB_AFTER_CLEAR_BIT(); 2024 2025 return ECORE_SUCCESS; 2026 } 2027 2028 /* Setup ramrod data */ 2029 static void ecore_rx_mode_set_rdata_hdr_e2(uint32_t cid, struct eth_classify_header 2030 *hdr, uint8_t rule_cnt) 2031 { 2032 hdr->echo = ECORE_CPU_TO_LE32(cid); 2033 hdr->rule_cnt = rule_cnt; 2034 } 2035 2036 static void ecore_rx_mode_set_cmd_state_e2(unsigned long *accept_flags, struct eth_filter_rules_cmd 2037 *cmd, int clear_accept_all) 2038 { 2039 uint16_t state; 2040 2041 /* start with 'drop-all' */ 2042 state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL | 2043 ETH_FILTER_RULES_CMD_MCAST_DROP_ALL; 2044 2045 if (ECORE_TEST_BIT(ECORE_ACCEPT_UNICAST, accept_flags)) 2046 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL; 2047 2048 if (ECORE_TEST_BIT(ECORE_ACCEPT_MULTICAST, accept_flags)) 2049 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL; 2050 2051 if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_UNICAST, accept_flags)) { 2052 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL; 2053 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL; 2054 } 2055 2056 if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_MULTICAST, accept_flags)) { 2057 state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL; 2058 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL; 2059 } 2060 if (ECORE_TEST_BIT(ECORE_ACCEPT_BROADCAST, accept_flags)) 2061 state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL; 2062 2063 if (ECORE_TEST_BIT(ECORE_ACCEPT_UNMATCHED, accept_flags)) { 2064 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL; 2065 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED; 2066 } 2067 if (ECORE_TEST_BIT(ECORE_ACCEPT_ANY_VLAN, accept_flags)) 2068 state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN; 2069 2070 /* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */ 2071 if (clear_accept_all) { 2072 state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL; 2073 state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL; 2074 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL; 2075 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED; 2076 } 2077 2078 cmd->state = ECORE_CPU_TO_LE16(state); 2079 } 2080 2081 static int ecore_set_rx_mode_e2(struct bnx2x_softc *sc, 2082 struct ecore_rx_mode_ramrod_params *p) 2083 { 2084 struct eth_filter_rules_ramrod_data *data = p->rdata; 2085 int rc; 2086 uint8_t rule_idx = 0; 2087 2088 /* Reset the ramrod data buffer */ 2089 ECORE_MEMSET(data, 0, sizeof(*data)); 2090 2091 /* Setup ramrod data */ 2092 2093 /* Tx (internal switching) */ 2094 if (ECORE_TEST_BIT(RAMROD_TX, &p->ramrod_flags)) { 2095 data->rules[rule_idx].client_id = p->cl_id; 2096 data->rules[rule_idx].func_id = p->func_id; 2097 2098 data->rules[rule_idx].cmd_general_data = 2099 ETH_FILTER_RULES_CMD_TX_CMD; 2100 2101 ecore_rx_mode_set_cmd_state_e2(&p->tx_accept_flags, 2102 &(data->rules[rule_idx++]), 2103 FALSE); 2104 } 2105 2106 /* Rx */ 2107 if (ECORE_TEST_BIT(RAMROD_RX, &p->ramrod_flags)) { 2108 data->rules[rule_idx].client_id = p->cl_id; 2109 data->rules[rule_idx].func_id = p->func_id; 2110 2111 data->rules[rule_idx].cmd_general_data = 2112 ETH_FILTER_RULES_CMD_RX_CMD; 2113 2114 ecore_rx_mode_set_cmd_state_e2(&p->rx_accept_flags, 2115 &(data->rules[rule_idx++]), 2116 FALSE); 2117 } 2118 2119 /* If FCoE Queue configuration has been requested configure the Rx and 2120 * internal switching modes for this queue in separate rules. 2121 * 2122 * FCoE queue shell never be set to ACCEPT_ALL packets of any sort: 2123 * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED. 2124 */ 2125 if (ECORE_TEST_BIT(ECORE_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) { 2126 /* Tx (internal switching) */ 2127 if (ECORE_TEST_BIT(RAMROD_TX, &p->ramrod_flags)) { 2128 data->rules[rule_idx].client_id = ECORE_FCOE_CID(sc); 2129 data->rules[rule_idx].func_id = p->func_id; 2130 2131 data->rules[rule_idx].cmd_general_data = 2132 ETH_FILTER_RULES_CMD_TX_CMD; 2133 2134 ecore_rx_mode_set_cmd_state_e2(&p->tx_accept_flags, 2135 &(data->rules 2136 [rule_idx++]), TRUE); 2137 } 2138 2139 /* Rx */ 2140 if (ECORE_TEST_BIT(RAMROD_RX, &p->ramrod_flags)) { 2141 data->rules[rule_idx].client_id = ECORE_FCOE_CID(sc); 2142 data->rules[rule_idx].func_id = p->func_id; 2143 2144 data->rules[rule_idx].cmd_general_data = 2145 ETH_FILTER_RULES_CMD_RX_CMD; 2146 2147 ecore_rx_mode_set_cmd_state_e2(&p->rx_accept_flags, 2148 &(data->rules 2149 [rule_idx++]), TRUE); 2150 } 2151 } 2152 2153 /* Set the ramrod header (most importantly - number of rules to 2154 * configure). 2155 */ 2156 ecore_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx); 2157 2158 ECORE_MSG 2159 ("About to configure %d rules, rx_accept_flags 0x%lx, tx_accept_flags 0x%lx", 2160 data->header.rule_cnt, p->rx_accept_flags, p->tx_accept_flags); 2161 2162 /* No need for an explicit memory barrier here as long we would 2163 * need to ensure the ordering of writing to the SPQ element 2164 * and updating of the SPQ producer which involves a memory 2165 * read and we will have to put a full memory barrier there 2166 * (inside ecore_sp_post()). 2167 */ 2168 2169 /* Send a ramrod */ 2170 rc = ecore_sp_post(sc, 2171 RAMROD_CMD_ID_ETH_FILTER_RULES, 2172 p->cid, p->rdata_mapping, ETH_CONNECTION_TYPE); 2173 if (rc) 2174 return rc; 2175 2176 /* Ramrod completion is pending */ 2177 return ECORE_PENDING; 2178 } 2179 2180 static int ecore_wait_rx_mode_comp_e2(struct bnx2x_softc *sc, 2181 struct ecore_rx_mode_ramrod_params *p) 2182 { 2183 return ecore_state_wait(sc, p->state, p->pstate); 2184 } 2185 2186 static int ecore_empty_rx_mode_wait(__rte_unused struct bnx2x_softc *sc, 2187 __rte_unused struct 2188 ecore_rx_mode_ramrod_params *p) 2189 { 2190 /* Do nothing */ 2191 return ECORE_SUCCESS; 2192 } 2193 2194 int ecore_config_rx_mode(struct bnx2x_softc *sc, 2195 struct ecore_rx_mode_ramrod_params *p) 2196 { 2197 int rc; 2198 2199 /* Configure the new classification in the chip */ 2200 if (p->rx_mode_obj->config_rx_mode) { 2201 rc = p->rx_mode_obj->config_rx_mode(sc, p); 2202 if (rc < 0) 2203 return rc; 2204 2205 /* Wait for a ramrod completion if was requested */ 2206 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags)) { 2207 rc = p->rx_mode_obj->wait_comp(sc, p); 2208 if (rc) 2209 return rc; 2210 } 2211 } else { 2212 ECORE_MSG("ERROR: config_rx_mode is NULL"); 2213 return -1; 2214 } 2215 2216 return rc; 2217 } 2218 2219 void ecore_init_rx_mode_obj(struct bnx2x_softc *sc, struct ecore_rx_mode_obj *o) 2220 { 2221 if (CHIP_IS_E1x(sc)) { 2222 o->wait_comp = ecore_empty_rx_mode_wait; 2223 o->config_rx_mode = ecore_set_rx_mode_e1x; 2224 } else { 2225 o->wait_comp = ecore_wait_rx_mode_comp_e2; 2226 o->config_rx_mode = ecore_set_rx_mode_e2; 2227 } 2228 } 2229 2230 /********************* Multicast verbs: SET, CLEAR ****************************/ 2231 static uint8_t ecore_mcast_bin_from_mac(uint8_t * mac) 2232 { 2233 return (ECORE_CRC32_LE(0, mac, ETH_ALEN) >> 24) & 0xff; 2234 } 2235 2236 struct ecore_mcast_mac_elem { 2237 ecore_list_entry_t link; 2238 uint8_t mac[ETH_ALEN]; 2239 uint8_t pad[2]; /* For a natural alignment of the following buffer */ 2240 }; 2241 2242 struct ecore_pending_mcast_cmd { 2243 ecore_list_entry_t link; 2244 int type; /* ECORE_MCAST_CMD_X */ 2245 union { 2246 ecore_list_t macs_head; 2247 uint32_t macs_num; /* Needed for DEL command */ 2248 int next_bin; /* Needed for RESTORE flow with aprox match */ 2249 } data; 2250 2251 int done; /* set to TRUE, when the command has been handled, 2252 * practically used in 57712 handling only, where one pending 2253 * command may be handled in a few operations. As long as for 2254 * other chips every operation handling is completed in a 2255 * single ramrod, there is no need to utilize this field. 2256 */ 2257 }; 2258 2259 static int ecore_mcast_wait(struct bnx2x_softc *sc, struct ecore_mcast_obj *o) 2260 { 2261 if (ecore_state_wait(sc, o->sched_state, o->raw.pstate) || 2262 o->raw.wait_comp(sc, &o->raw)) 2263 return ECORE_TIMEOUT; 2264 2265 return ECORE_SUCCESS; 2266 } 2267 2268 static int ecore_mcast_enqueue_cmd(struct bnx2x_softc *sc __rte_unused, 2269 struct ecore_mcast_obj *o, 2270 struct ecore_mcast_ramrod_params *p, 2271 enum ecore_mcast_cmd cmd) 2272 { 2273 int total_sz; 2274 struct ecore_pending_mcast_cmd *new_cmd; 2275 struct ecore_mcast_mac_elem *cur_mac = NULL; 2276 struct ecore_mcast_list_elem *pos; 2277 int macs_list_len = ((cmd == ECORE_MCAST_CMD_ADD) ? 2278 p->mcast_list_len : 0); 2279 2280 /* If the command is empty ("handle pending commands only"), break */ 2281 if (!p->mcast_list_len) 2282 return ECORE_SUCCESS; 2283 2284 total_sz = sizeof(*new_cmd) + 2285 macs_list_len * sizeof(struct ecore_mcast_mac_elem); 2286 2287 /* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */ 2288 new_cmd = ECORE_ZALLOC(total_sz, GFP_ATOMIC, sc); 2289 2290 if (!new_cmd) 2291 return ECORE_NOMEM; 2292 2293 ECORE_MSG("About to enqueue a new %d command. macs_list_len=%d", 2294 cmd, macs_list_len); 2295 2296 ECORE_LIST_INIT(&new_cmd->data.macs_head); 2297 2298 new_cmd->type = cmd; 2299 new_cmd->done = FALSE; 2300 2301 switch (cmd) { 2302 case ECORE_MCAST_CMD_ADD: 2303 cur_mac = (struct ecore_mcast_mac_elem *) 2304 ((uint8_t *) new_cmd + sizeof(*new_cmd)); 2305 2306 /* Push the MACs of the current command into the pending command 2307 * MACs list: FIFO 2308 */ 2309 ECORE_LIST_FOR_EACH_ENTRY(pos, &p->mcast_list, link, 2310 struct ecore_mcast_list_elem) { 2311 ECORE_MEMCPY(cur_mac->mac, pos->mac, ETH_ALEN); 2312 ECORE_LIST_PUSH_TAIL(&cur_mac->link, 2313 &new_cmd->data.macs_head); 2314 cur_mac++; 2315 } 2316 2317 break; 2318 2319 case ECORE_MCAST_CMD_DEL: 2320 new_cmd->data.macs_num = p->mcast_list_len; 2321 break; 2322 2323 case ECORE_MCAST_CMD_RESTORE: 2324 new_cmd->data.next_bin = 0; 2325 break; 2326 2327 default: 2328 ECORE_FREE(sc, new_cmd, total_sz); 2329 PMD_DRV_LOG(ERR, "Unknown command: %d", cmd); 2330 return ECORE_INVAL; 2331 } 2332 2333 /* Push the new pending command to the tail of the pending list: FIFO */ 2334 ECORE_LIST_PUSH_TAIL(&new_cmd->link, &o->pending_cmds_head); 2335 2336 o->set_sched(o); 2337 2338 return ECORE_PENDING; 2339 } 2340 2341 /** 2342 * ecore_mcast_get_next_bin - get the next set bin (index) 2343 * 2344 * @o: 2345 * @last: index to start looking from (including) 2346 * 2347 * returns the next found (set) bin or a negative value if none is found. 2348 */ 2349 static int ecore_mcast_get_next_bin(struct ecore_mcast_obj *o, int last) 2350 { 2351 int i, j, inner_start = last % BIT_VEC64_ELEM_SZ; 2352 2353 for (i = last / BIT_VEC64_ELEM_SZ; i < ECORE_MCAST_VEC_SZ; i++) { 2354 if (o->registry.aprox_match.vec[i]) 2355 for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) { 2356 int cur_bit = j + BIT_VEC64_ELEM_SZ * i; 2357 if (BIT_VEC64_TEST_BIT 2358 (o->registry.aprox_match.vec, cur_bit)) { 2359 return cur_bit; 2360 } 2361 } 2362 inner_start = 0; 2363 } 2364 2365 /* None found */ 2366 return -1; 2367 } 2368 2369 /** 2370 * ecore_mcast_clear_first_bin - find the first set bin and clear it 2371 * 2372 * @o: 2373 * 2374 * returns the index of the found bin or -1 if none is found 2375 */ 2376 static int ecore_mcast_clear_first_bin(struct ecore_mcast_obj *o) 2377 { 2378 int cur_bit = ecore_mcast_get_next_bin(o, 0); 2379 2380 if (cur_bit >= 0) 2381 BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit); 2382 2383 return cur_bit; 2384 } 2385 2386 static uint8_t ecore_mcast_get_rx_tx_flag(struct ecore_mcast_obj *o) 2387 { 2388 struct ecore_raw_obj *raw = &o->raw; 2389 uint8_t rx_tx_flag = 0; 2390 2391 if ((raw->obj_type == ECORE_OBJ_TYPE_TX) || 2392 (raw->obj_type == ECORE_OBJ_TYPE_RX_TX)) 2393 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD; 2394 2395 if ((raw->obj_type == ECORE_OBJ_TYPE_RX) || 2396 (raw->obj_type == ECORE_OBJ_TYPE_RX_TX)) 2397 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD; 2398 2399 return rx_tx_flag; 2400 } 2401 2402 static void ecore_mcast_set_one_rule_e2(struct bnx2x_softc *sc __rte_unused, 2403 struct ecore_mcast_obj *o, int idx, 2404 union ecore_mcast_config_data *cfg_data, 2405 enum ecore_mcast_cmd cmd) 2406 { 2407 struct ecore_raw_obj *r = &o->raw; 2408 struct eth_multicast_rules_ramrod_data *data = 2409 (struct eth_multicast_rules_ramrod_data *)(r->rdata); 2410 uint8_t func_id = r->func_id; 2411 uint8_t rx_tx_add_flag = ecore_mcast_get_rx_tx_flag(o); 2412 int bin; 2413 2414 if ((cmd == ECORE_MCAST_CMD_ADD) || (cmd == ECORE_MCAST_CMD_RESTORE)) 2415 rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD; 2416 2417 data->rules[idx].cmd_general_data |= rx_tx_add_flag; 2418 2419 /* Get a bin and update a bins' vector */ 2420 switch (cmd) { 2421 case ECORE_MCAST_CMD_ADD: 2422 bin = ecore_mcast_bin_from_mac(cfg_data->mac); 2423 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin); 2424 break; 2425 2426 case ECORE_MCAST_CMD_DEL: 2427 /* If there were no more bins to clear 2428 * (ecore_mcast_clear_first_bin() returns -1) then we would 2429 * clear any (0xff) bin. 2430 * See ecore_mcast_validate_e2() for explanation when it may 2431 * happen. 2432 */ 2433 bin = ecore_mcast_clear_first_bin(o); 2434 break; 2435 2436 case ECORE_MCAST_CMD_RESTORE: 2437 bin = cfg_data->bin; 2438 break; 2439 2440 default: 2441 PMD_DRV_LOG(ERR, "Unknown command: %d", cmd); 2442 return; 2443 } 2444 2445 ECORE_MSG("%s bin %d", 2446 ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ? 2447 "Setting" : "Clearing"), bin); 2448 2449 data->rules[idx].bin_id = (uint8_t) bin; 2450 data->rules[idx].func_id = func_id; 2451 data->rules[idx].engine_id = o->engine_id; 2452 } 2453 2454 /** 2455 * ecore_mcast_handle_restore_cmd_e2 - restore configuration from the registry 2456 * 2457 * @sc: device handle 2458 * @o: 2459 * @start_bin: index in the registry to start from (including) 2460 * @rdata_idx: index in the ramrod data to start from 2461 * 2462 * returns last handled bin index or -1 if all bins have been handled 2463 */ 2464 static int ecore_mcast_handle_restore_cmd_e2(struct bnx2x_softc *sc, 2465 struct ecore_mcast_obj *o, 2466 int start_bin, int *rdata_idx) 2467 { 2468 int cur_bin, cnt = *rdata_idx; 2469 union ecore_mcast_config_data cfg_data = { NULL }; 2470 2471 /* go through the registry and configure the bins from it */ 2472 for (cur_bin = ecore_mcast_get_next_bin(o, start_bin); cur_bin >= 0; 2473 cur_bin = ecore_mcast_get_next_bin(o, cur_bin + 1)) { 2474 2475 cfg_data.bin = (uint8_t) cur_bin; 2476 o->set_one_rule(sc, o, cnt, &cfg_data, ECORE_MCAST_CMD_RESTORE); 2477 2478 cnt++; 2479 2480 ECORE_MSG("About to configure a bin %d", cur_bin); 2481 2482 /* Break if we reached the maximum number 2483 * of rules. 2484 */ 2485 if (cnt >= o->max_cmd_len) 2486 break; 2487 } 2488 2489 *rdata_idx = cnt; 2490 2491 return cur_bin; 2492 } 2493 2494 static void ecore_mcast_hdl_pending_add_e2(struct bnx2x_softc *sc, 2495 struct ecore_mcast_obj *o, 2496 struct ecore_pending_mcast_cmd 2497 *cmd_pos, int *line_idx) 2498 { 2499 struct ecore_mcast_mac_elem *pmac_pos, *pmac_pos_n; 2500 int cnt = *line_idx; 2501 union ecore_mcast_config_data cfg_data = { NULL }; 2502 2503 ECORE_LIST_FOR_EACH_ENTRY_SAFE(pmac_pos, pmac_pos_n, 2504 &cmd_pos->data.macs_head, link, 2505 struct ecore_mcast_mac_elem) { 2506 2507 cfg_data.mac = &pmac_pos->mac[0]; 2508 o->set_one_rule(sc, o, cnt, &cfg_data, cmd_pos->type); 2509 2510 cnt++; 2511 2512 ECORE_MSG 2513 ("About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC", 2514 pmac_pos->mac[0], pmac_pos->mac[1], pmac_pos->mac[2], 2515 pmac_pos->mac[3], pmac_pos->mac[4], pmac_pos->mac[5]); 2516 2517 ECORE_LIST_REMOVE_ENTRY(&pmac_pos->link, 2518 &cmd_pos->data.macs_head); 2519 2520 /* Break if we reached the maximum number 2521 * of rules. 2522 */ 2523 if (cnt >= o->max_cmd_len) 2524 break; 2525 } 2526 2527 *line_idx = cnt; 2528 2529 /* if no more MACs to configure - we are done */ 2530 if (ECORE_LIST_IS_EMPTY(&cmd_pos->data.macs_head)) 2531 cmd_pos->done = TRUE; 2532 } 2533 2534 static void ecore_mcast_hdl_pending_del_e2(struct bnx2x_softc *sc, 2535 struct ecore_mcast_obj *o, 2536 struct ecore_pending_mcast_cmd 2537 *cmd_pos, int *line_idx) 2538 { 2539 int cnt = *line_idx; 2540 2541 while (cmd_pos->data.macs_num) { 2542 o->set_one_rule(sc, o, cnt, NULL, cmd_pos->type); 2543 2544 cnt++; 2545 2546 cmd_pos->data.macs_num--; 2547 2548 ECORE_MSG("Deleting MAC. %d left,cnt is %d", 2549 cmd_pos->data.macs_num, cnt); 2550 2551 /* Break if we reached the maximum 2552 * number of rules. 2553 */ 2554 if (cnt >= o->max_cmd_len) 2555 break; 2556 } 2557 2558 *line_idx = cnt; 2559 2560 /* If we cleared all bins - we are done */ 2561 if (!cmd_pos->data.macs_num) 2562 cmd_pos->done = TRUE; 2563 } 2564 2565 static void ecore_mcast_hdl_pending_restore_e2(struct bnx2x_softc *sc, 2566 struct ecore_mcast_obj *o, struct 2567 ecore_pending_mcast_cmd 2568 *cmd_pos, int *line_idx) 2569 { 2570 cmd_pos->data.next_bin = o->hdl_restore(sc, o, cmd_pos->data.next_bin, 2571 line_idx); 2572 2573 if (cmd_pos->data.next_bin < 0) 2574 /* If o->set_restore returned -1 we are done */ 2575 cmd_pos->done = TRUE; 2576 else 2577 /* Start from the next bin next time */ 2578 cmd_pos->data.next_bin++; 2579 } 2580 2581 static int ecore_mcast_handle_pending_cmds_e2(struct bnx2x_softc *sc, struct 2582 ecore_mcast_ramrod_params 2583 *p) 2584 { 2585 struct ecore_pending_mcast_cmd *cmd_pos, *cmd_pos_n; 2586 int cnt = 0; 2587 struct ecore_mcast_obj *o = p->mcast_obj; 2588 2589 ECORE_LIST_FOR_EACH_ENTRY_SAFE(cmd_pos, cmd_pos_n, 2590 &o->pending_cmds_head, link, 2591 struct ecore_pending_mcast_cmd) { 2592 switch (cmd_pos->type) { 2593 case ECORE_MCAST_CMD_ADD: 2594 ecore_mcast_hdl_pending_add_e2(sc, o, cmd_pos, &cnt); 2595 break; 2596 2597 case ECORE_MCAST_CMD_DEL: 2598 ecore_mcast_hdl_pending_del_e2(sc, o, cmd_pos, &cnt); 2599 break; 2600 2601 case ECORE_MCAST_CMD_RESTORE: 2602 ecore_mcast_hdl_pending_restore_e2(sc, o, cmd_pos, 2603 &cnt); 2604 break; 2605 2606 default: 2607 PMD_DRV_LOG(ERR, "Unknown command: %d", cmd_pos->type); 2608 return ECORE_INVAL; 2609 } 2610 2611 /* If the command has been completed - remove it from the list 2612 * and free the memory 2613 */ 2614 if (cmd_pos->done) { 2615 ECORE_LIST_REMOVE_ENTRY(&cmd_pos->link, 2616 &o->pending_cmds_head); 2617 ECORE_FREE(sc, cmd_pos, cmd_pos->alloc_len); 2618 } 2619 2620 /* Break if we reached the maximum number of rules */ 2621 if (cnt >= o->max_cmd_len) 2622 break; 2623 } 2624 2625 return cnt; 2626 } 2627 2628 static void ecore_mcast_hdl_add(struct bnx2x_softc *sc, 2629 struct ecore_mcast_obj *o, 2630 struct ecore_mcast_ramrod_params *p, 2631 int *line_idx) 2632 { 2633 struct ecore_mcast_list_elem *mlist_pos; 2634 union ecore_mcast_config_data cfg_data = { NULL }; 2635 int cnt = *line_idx; 2636 2637 ECORE_LIST_FOR_EACH_ENTRY(mlist_pos, &p->mcast_list, link, 2638 struct ecore_mcast_list_elem) { 2639 cfg_data.mac = mlist_pos->mac; 2640 o->set_one_rule(sc, o, cnt, &cfg_data, ECORE_MCAST_CMD_ADD); 2641 2642 cnt++; 2643 2644 ECORE_MSG 2645 ("About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC", 2646 mlist_pos->mac[0], mlist_pos->mac[1], mlist_pos->mac[2], 2647 mlist_pos->mac[3], mlist_pos->mac[4], mlist_pos->mac[5]); 2648 } 2649 2650 *line_idx = cnt; 2651 } 2652 2653 static void ecore_mcast_hdl_del(struct bnx2x_softc *sc, 2654 struct ecore_mcast_obj *o, 2655 struct ecore_mcast_ramrod_params *p, 2656 int *line_idx) 2657 { 2658 int cnt = *line_idx, i; 2659 2660 for (i = 0; i < p->mcast_list_len; i++) { 2661 o->set_one_rule(sc, o, cnt, NULL, ECORE_MCAST_CMD_DEL); 2662 2663 cnt++; 2664 2665 ECORE_MSG("Deleting MAC. %d left", p->mcast_list_len - i - 1); 2666 } 2667 2668 *line_idx = cnt; 2669 } 2670 2671 /** 2672 * ecore_mcast_handle_current_cmd - 2673 * 2674 * @sc: device handle 2675 * @p: 2676 * @cmd: 2677 * @start_cnt: first line in the ramrod data that may be used 2678 * 2679 * This function is called if there is enough place for the current command in 2680 * the ramrod data. 2681 * Returns number of lines filled in the ramrod data in total. 2682 */ 2683 static int ecore_mcast_handle_current_cmd(struct bnx2x_softc *sc, struct 2684 ecore_mcast_ramrod_params *p, 2685 enum ecore_mcast_cmd cmd, 2686 int start_cnt) 2687 { 2688 struct ecore_mcast_obj *o = p->mcast_obj; 2689 int cnt = start_cnt; 2690 2691 ECORE_MSG("p->mcast_list_len=%d", p->mcast_list_len); 2692 2693 switch (cmd) { 2694 case ECORE_MCAST_CMD_ADD: 2695 ecore_mcast_hdl_add(sc, o, p, &cnt); 2696 break; 2697 2698 case ECORE_MCAST_CMD_DEL: 2699 ecore_mcast_hdl_del(sc, o, p, &cnt); 2700 break; 2701 2702 case ECORE_MCAST_CMD_RESTORE: 2703 o->hdl_restore(sc, o, 0, &cnt); 2704 break; 2705 2706 default: 2707 PMD_DRV_LOG(ERR, "Unknown command: %d", cmd); 2708 return ECORE_INVAL; 2709 } 2710 2711 /* The current command has been handled */ 2712 p->mcast_list_len = 0; 2713 2714 return cnt; 2715 } 2716 2717 static int ecore_mcast_validate_e2(__rte_unused struct bnx2x_softc *sc, 2718 struct ecore_mcast_ramrod_params *p, 2719 enum ecore_mcast_cmd cmd) 2720 { 2721 struct ecore_mcast_obj *o = p->mcast_obj; 2722 int reg_sz = o->get_registry_size(o); 2723 2724 switch (cmd) { 2725 /* DEL command deletes all currently configured MACs */ 2726 case ECORE_MCAST_CMD_DEL: 2727 o->set_registry_size(o, 0); 2728 /* fall-through */ 2729 2730 /* RESTORE command will restore the entire multicast configuration */ 2731 case ECORE_MCAST_CMD_RESTORE: 2732 /* Here we set the approximate amount of work to do, which in 2733 * fact may be only less as some MACs in postponed ADD 2734 * command(s) scheduled before this command may fall into 2735 * the same bin and the actual number of bins set in the 2736 * registry would be less than we estimated here. See 2737 * ecore_mcast_set_one_rule_e2() for further details. 2738 */ 2739 p->mcast_list_len = reg_sz; 2740 break; 2741 2742 case ECORE_MCAST_CMD_ADD: 2743 case ECORE_MCAST_CMD_CONT: 2744 /* Here we assume that all new MACs will fall into new bins. 2745 * However we will correct the real registry size after we 2746 * handle all pending commands. 2747 */ 2748 o->set_registry_size(o, reg_sz + p->mcast_list_len); 2749 break; 2750 2751 default: 2752 PMD_DRV_LOG(ERR, "Unknown command: %d", cmd); 2753 return ECORE_INVAL; 2754 } 2755 2756 /* Increase the total number of MACs pending to be configured */ 2757 o->total_pending_num += p->mcast_list_len; 2758 2759 return ECORE_SUCCESS; 2760 } 2761 2762 static void ecore_mcast_revert_e2(__rte_unused struct bnx2x_softc *sc, 2763 struct ecore_mcast_ramrod_params *p, 2764 int old_num_bins) 2765 { 2766 struct ecore_mcast_obj *o = p->mcast_obj; 2767 2768 o->set_registry_size(o, old_num_bins); 2769 o->total_pending_num -= p->mcast_list_len; 2770 } 2771 2772 /** 2773 * ecore_mcast_set_rdata_hdr_e2 - sets a header values 2774 * 2775 * @sc: device handle 2776 * @p: 2777 * @len: number of rules to handle 2778 */ 2779 static void ecore_mcast_set_rdata_hdr_e2(__rte_unused struct bnx2x_softc 2780 *sc, struct ecore_mcast_ramrod_params 2781 *p, uint8_t len) 2782 { 2783 struct ecore_raw_obj *r = &p->mcast_obj->raw; 2784 struct eth_multicast_rules_ramrod_data *data = 2785 (struct eth_multicast_rules_ramrod_data *)(r->rdata); 2786 2787 data->header.echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) | 2788 (ECORE_FILTER_MCAST_PENDING << 2789 ECORE_SWCID_SHIFT)); 2790 data->header.rule_cnt = len; 2791 } 2792 2793 /** 2794 * ecore_mcast_refresh_registry_e2 - recalculate the actual number of set bins 2795 * 2796 * @sc: device handle 2797 * @o: 2798 * 2799 * Recalculate the actual number of set bins in the registry using Brian 2800 * Kernighan's algorithm: it's execution complexity is as a number of set bins. 2801 */ 2802 static int ecore_mcast_refresh_registry_e2(struct ecore_mcast_obj *o) 2803 { 2804 int i, cnt = 0; 2805 uint64_t elem; 2806 2807 for (i = 0; i < ECORE_MCAST_VEC_SZ; i++) { 2808 elem = o->registry.aprox_match.vec[i]; 2809 for (; elem; cnt++) 2810 elem &= elem - 1; 2811 } 2812 2813 o->set_registry_size(o, cnt); 2814 2815 return ECORE_SUCCESS; 2816 } 2817 2818 static int ecore_mcast_setup_e2(struct bnx2x_softc *sc, 2819 struct ecore_mcast_ramrod_params *p, 2820 enum ecore_mcast_cmd cmd) 2821 { 2822 struct ecore_raw_obj *raw = &p->mcast_obj->raw; 2823 struct ecore_mcast_obj *o = p->mcast_obj; 2824 struct eth_multicast_rules_ramrod_data *data = 2825 (struct eth_multicast_rules_ramrod_data *)(raw->rdata); 2826 int cnt = 0, rc; 2827 2828 /* Reset the ramrod data buffer */ 2829 ECORE_MEMSET(data, 0, sizeof(*data)); 2830 2831 cnt = ecore_mcast_handle_pending_cmds_e2(sc, p); 2832 2833 /* If there are no more pending commands - clear SCHEDULED state */ 2834 if (ECORE_LIST_IS_EMPTY(&o->pending_cmds_head)) 2835 o->clear_sched(o); 2836 2837 /* The below may be TRUE if there was enough room in ramrod 2838 * data for all pending commands and for the current 2839 * command. Otherwise the current command would have been added 2840 * to the pending commands and p->mcast_list_len would have been 2841 * zeroed. 2842 */ 2843 if (p->mcast_list_len > 0) 2844 cnt = ecore_mcast_handle_current_cmd(sc, p, cmd, cnt); 2845 2846 /* We've pulled out some MACs - update the total number of 2847 * outstanding. 2848 */ 2849 o->total_pending_num -= cnt; 2850 2851 /* send a ramrod */ 2852 ECORE_DBG_BREAK_IF(o->total_pending_num < 0); 2853 ECORE_DBG_BREAK_IF(cnt > o->max_cmd_len); 2854 2855 ecore_mcast_set_rdata_hdr_e2(sc, p, (uint8_t) cnt); 2856 2857 /* Update a registry size if there are no more pending operations. 2858 * 2859 * We don't want to change the value of the registry size if there are 2860 * pending operations because we want it to always be equal to the 2861 * exact or the approximate number (see ecore_mcast_validate_e2()) of 2862 * set bins after the last requested operation in order to properly 2863 * evaluate the size of the next DEL/RESTORE operation. 2864 * 2865 * Note that we update the registry itself during command(s) handling 2866 * - see ecore_mcast_set_one_rule_e2(). That's because for 57712 we 2867 * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but 2868 * with a limited amount of update commands (per MAC/bin) and we don't 2869 * know in this scope what the actual state of bins configuration is 2870 * going to be after this ramrod. 2871 */ 2872 if (!o->total_pending_num) 2873 ecore_mcast_refresh_registry_e2(o); 2874 2875 /* If CLEAR_ONLY was requested - don't send a ramrod and clear 2876 * RAMROD_PENDING status immediately. 2877 */ 2878 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) { 2879 raw->clear_pending(raw); 2880 return ECORE_SUCCESS; 2881 } else { 2882 /* No need for an explicit memory barrier here as long we would 2883 * need to ensure the ordering of writing to the SPQ element 2884 * and updating of the SPQ producer which involves a memory 2885 * read and we will have to put a full memory barrier there 2886 * (inside ecore_sp_post()). 2887 */ 2888 2889 /* Send a ramrod */ 2890 rc = ecore_sp_post(sc, 2891 RAMROD_CMD_ID_ETH_MULTICAST_RULES, 2892 raw->cid, 2893 raw->rdata_mapping, ETH_CONNECTION_TYPE); 2894 if (rc) 2895 return rc; 2896 2897 /* Ramrod completion is pending */ 2898 return ECORE_PENDING; 2899 } 2900 } 2901 2902 static int ecore_mcast_validate_e1h(__rte_unused struct bnx2x_softc *sc, 2903 struct ecore_mcast_ramrod_params *p, 2904 enum ecore_mcast_cmd cmd) 2905 { 2906 /* Mark, that there is a work to do */ 2907 if ((cmd == ECORE_MCAST_CMD_DEL) || (cmd == ECORE_MCAST_CMD_RESTORE)) 2908 p->mcast_list_len = 1; 2909 2910 return ECORE_SUCCESS; 2911 } 2912 2913 static void ecore_mcast_revert_e1h(__rte_unused struct bnx2x_softc *sc, 2914 __rte_unused struct ecore_mcast_ramrod_params 2915 *p, __rte_unused int old_num_bins) 2916 { 2917 /* Do nothing */ 2918 } 2919 2920 #define ECORE_57711_SET_MC_FILTER(filter, bit) \ 2921 do { \ 2922 (filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \ 2923 } while (0) 2924 2925 static void ecore_mcast_hdl_add_e1h(struct bnx2x_softc *sc __rte_unused, 2926 struct ecore_mcast_obj *o, 2927 struct ecore_mcast_ramrod_params *p, 2928 uint32_t * mc_filter) 2929 { 2930 struct ecore_mcast_list_elem *mlist_pos; 2931 int bit; 2932 2933 ECORE_LIST_FOR_EACH_ENTRY(mlist_pos, &p->mcast_list, link, 2934 struct ecore_mcast_list_elem) { 2935 bit = ecore_mcast_bin_from_mac(mlist_pos->mac); 2936 ECORE_57711_SET_MC_FILTER(mc_filter, bit); 2937 2938 ECORE_MSG 2939 ("About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC, bin %d", 2940 mlist_pos->mac[0], mlist_pos->mac[1], mlist_pos->mac[2], 2941 mlist_pos->mac[3], mlist_pos->mac[4], mlist_pos->mac[5], 2942 bit); 2943 2944 /* bookkeeping... */ 2945 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bit); 2946 } 2947 } 2948 2949 static void ecore_mcast_hdl_restore_e1h(struct bnx2x_softc *sc 2950 __rte_unused, 2951 struct ecore_mcast_obj *o, 2952 uint32_t * mc_filter) 2953 { 2954 int bit; 2955 2956 for (bit = ecore_mcast_get_next_bin(o, 0); 2957 bit >= 0; bit = ecore_mcast_get_next_bin(o, bit + 1)) { 2958 ECORE_57711_SET_MC_FILTER(mc_filter, bit); 2959 ECORE_MSG("About to set bin %d", bit); 2960 } 2961 } 2962 2963 /* On 57711 we write the multicast MACs' approximate match 2964 * table by directly into the TSTORM's internal RAM. So we don't 2965 * really need to handle any tricks to make it work. 2966 */ 2967 static int ecore_mcast_setup_e1h(struct bnx2x_softc *sc, 2968 struct ecore_mcast_ramrod_params *p, 2969 enum ecore_mcast_cmd cmd) 2970 { 2971 int i; 2972 struct ecore_mcast_obj *o = p->mcast_obj; 2973 struct ecore_raw_obj *r = &o->raw; 2974 2975 /* If CLEAR_ONLY has been requested - clear the registry 2976 * and clear a pending bit. 2977 */ 2978 if (!ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) { 2979 uint32_t mc_filter[ECORE_MC_HASH_SIZE] = { 0 }; 2980 2981 /* Set the multicast filter bits before writing it into 2982 * the internal memory. 2983 */ 2984 switch (cmd) { 2985 case ECORE_MCAST_CMD_ADD: 2986 ecore_mcast_hdl_add_e1h(sc, o, p, mc_filter); 2987 break; 2988 2989 case ECORE_MCAST_CMD_DEL: 2990 ECORE_MSG("Invalidating multicast MACs configuration"); 2991 2992 /* clear the registry */ 2993 ECORE_MEMSET(o->registry.aprox_match.vec, 0, 2994 sizeof(o->registry.aprox_match.vec)); 2995 break; 2996 2997 case ECORE_MCAST_CMD_RESTORE: 2998 ecore_mcast_hdl_restore_e1h(sc, o, mc_filter); 2999 break; 3000 3001 default: 3002 PMD_DRV_LOG(ERR, "Unknown command: %d", cmd); 3003 return ECORE_INVAL; 3004 } 3005 3006 /* Set the mcast filter in the internal memory */ 3007 for (i = 0; i < ECORE_MC_HASH_SIZE; i++) 3008 REG_WR(sc, ECORE_MC_HASH_OFFSET(sc, i), mc_filter[i]); 3009 } else 3010 /* clear the registry */ 3011 ECORE_MEMSET(o->registry.aprox_match.vec, 0, 3012 sizeof(o->registry.aprox_match.vec)); 3013 3014 /* We are done */ 3015 r->clear_pending(r); 3016 3017 return ECORE_SUCCESS; 3018 } 3019 3020 static int ecore_mcast_get_registry_size_aprox(struct ecore_mcast_obj *o) 3021 { 3022 return o->registry.aprox_match.num_bins_set; 3023 } 3024 3025 static void ecore_mcast_set_registry_size_aprox(struct ecore_mcast_obj *o, 3026 int n) 3027 { 3028 o->registry.aprox_match.num_bins_set = n; 3029 } 3030 3031 int ecore_config_mcast(struct bnx2x_softc *sc, 3032 struct ecore_mcast_ramrod_params *p, 3033 enum ecore_mcast_cmd cmd) 3034 { 3035 struct ecore_mcast_obj *o = p->mcast_obj; 3036 struct ecore_raw_obj *r = &o->raw; 3037 int rc = 0, old_reg_size; 3038 3039 /* This is needed to recover number of currently configured mcast macs 3040 * in case of failure. 3041 */ 3042 old_reg_size = o->get_registry_size(o); 3043 3044 /* Do some calculations and checks */ 3045 rc = o->validate(sc, p, cmd); 3046 if (rc) 3047 return rc; 3048 3049 /* Return if there is no work to do */ 3050 if ((!p->mcast_list_len) && (!o->check_sched(o))) 3051 return ECORE_SUCCESS; 3052 3053 ECORE_MSG 3054 ("o->total_pending_num=%d p->mcast_list_len=%d o->max_cmd_len=%d", 3055 o->total_pending_num, p->mcast_list_len, o->max_cmd_len); 3056 3057 /* Enqueue the current command to the pending list if we can't complete 3058 * it in the current iteration 3059 */ 3060 if (r->check_pending(r) || 3061 ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) { 3062 rc = o->enqueue_cmd(sc, p->mcast_obj, p, cmd); 3063 if (rc < 0) 3064 goto error_exit1; 3065 3066 /* As long as the current command is in a command list we 3067 * don't need to handle it separately. 3068 */ 3069 p->mcast_list_len = 0; 3070 } 3071 3072 if (!r->check_pending(r)) { 3073 3074 /* Set 'pending' state */ 3075 r->set_pending(r); 3076 3077 /* Configure the new classification in the chip */ 3078 rc = o->config_mcast(sc, p, cmd); 3079 if (rc < 0) 3080 goto error_exit2; 3081 3082 /* Wait for a ramrod completion if was requested */ 3083 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags)) 3084 rc = o->wait_comp(sc, o); 3085 } 3086 3087 return rc; 3088 3089 error_exit2: 3090 r->clear_pending(r); 3091 3092 error_exit1: 3093 o->revert(sc, p, old_reg_size); 3094 3095 return rc; 3096 } 3097 3098 static void ecore_mcast_clear_sched(struct ecore_mcast_obj *o) 3099 { 3100 ECORE_SMP_MB_BEFORE_CLEAR_BIT(); 3101 ECORE_CLEAR_BIT(o->sched_state, o->raw.pstate); 3102 ECORE_SMP_MB_AFTER_CLEAR_BIT(); 3103 } 3104 3105 static void ecore_mcast_set_sched(struct ecore_mcast_obj *o) 3106 { 3107 ECORE_SMP_MB_BEFORE_CLEAR_BIT(); 3108 ECORE_SET_BIT(o->sched_state, o->raw.pstate); 3109 ECORE_SMP_MB_AFTER_CLEAR_BIT(); 3110 } 3111 3112 static int ecore_mcast_check_sched(struct ecore_mcast_obj *o) 3113 { 3114 return ! !ECORE_TEST_BIT(o->sched_state, o->raw.pstate); 3115 } 3116 3117 static int ecore_mcast_check_pending(struct ecore_mcast_obj *o) 3118 { 3119 return o->raw.check_pending(&o->raw) || o->check_sched(o); 3120 } 3121 3122 void ecore_init_mcast_obj(struct bnx2x_softc *sc, 3123 struct ecore_mcast_obj *mcast_obj, 3124 uint8_t mcast_cl_id, uint32_t mcast_cid, 3125 uint8_t func_id, uint8_t engine_id, void *rdata, 3126 ecore_dma_addr_t rdata_mapping, int state, 3127 unsigned long *pstate, ecore_obj_type type) 3128 { 3129 ECORE_MEMSET(mcast_obj, 0, sizeof(*mcast_obj)); 3130 3131 ecore_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id, 3132 rdata, rdata_mapping, state, pstate, type); 3133 3134 mcast_obj->engine_id = engine_id; 3135 3136 ECORE_LIST_INIT(&mcast_obj->pending_cmds_head); 3137 3138 mcast_obj->sched_state = ECORE_FILTER_MCAST_SCHED; 3139 mcast_obj->check_sched = ecore_mcast_check_sched; 3140 mcast_obj->set_sched = ecore_mcast_set_sched; 3141 mcast_obj->clear_sched = ecore_mcast_clear_sched; 3142 3143 if (CHIP_IS_E1H(sc)) { 3144 mcast_obj->config_mcast = ecore_mcast_setup_e1h; 3145 mcast_obj->enqueue_cmd = NULL; 3146 mcast_obj->hdl_restore = NULL; 3147 mcast_obj->check_pending = ecore_mcast_check_pending; 3148 3149 /* 57711 doesn't send a ramrod, so it has unlimited credit 3150 * for one command. 3151 */ 3152 mcast_obj->max_cmd_len = -1; 3153 mcast_obj->wait_comp = ecore_mcast_wait; 3154 mcast_obj->set_one_rule = NULL; 3155 mcast_obj->validate = ecore_mcast_validate_e1h; 3156 mcast_obj->revert = ecore_mcast_revert_e1h; 3157 mcast_obj->get_registry_size = 3158 ecore_mcast_get_registry_size_aprox; 3159 mcast_obj->set_registry_size = 3160 ecore_mcast_set_registry_size_aprox; 3161 } else { 3162 mcast_obj->config_mcast = ecore_mcast_setup_e2; 3163 mcast_obj->enqueue_cmd = ecore_mcast_enqueue_cmd; 3164 mcast_obj->hdl_restore = ecore_mcast_handle_restore_cmd_e2; 3165 mcast_obj->check_pending = ecore_mcast_check_pending; 3166 mcast_obj->max_cmd_len = 16; 3167 mcast_obj->wait_comp = ecore_mcast_wait; 3168 mcast_obj->set_one_rule = ecore_mcast_set_one_rule_e2; 3169 mcast_obj->validate = ecore_mcast_validate_e2; 3170 mcast_obj->revert = ecore_mcast_revert_e2; 3171 mcast_obj->get_registry_size = 3172 ecore_mcast_get_registry_size_aprox; 3173 mcast_obj->set_registry_size = 3174 ecore_mcast_set_registry_size_aprox; 3175 } 3176 } 3177 3178 /*************************** Credit handling **********************************/ 3179 3180 /** 3181 * atomic_add_ifless - add if the result is less than a given value. 3182 * 3183 * @v: pointer of type ecore_atomic_t 3184 * @a: the amount to add to v... 3185 * @u: ...if (v + a) is less than u. 3186 * 3187 * returns TRUE if (v + a) was less than u, and FALSE otherwise. 3188 * 3189 */ 3190 static int __atomic_add_ifless(ecore_atomic_t * v, int a, int u) 3191 { 3192 int c, old; 3193 3194 c = ECORE_ATOMIC_READ(v); 3195 for (;;) { 3196 if (ECORE_UNLIKELY(c + a >= u)) 3197 return FALSE; 3198 3199 old = ECORE_ATOMIC_CMPXCHG((v), c, c + a); 3200 if (ECORE_LIKELY(old == c)) 3201 break; 3202 c = old; 3203 } 3204 3205 return TRUE; 3206 } 3207 3208 /** 3209 * atomic_dec_ifmoe - dec if the result is more or equal than a given value. 3210 * 3211 * @v: pointer of type ecore_atomic_t 3212 * @a: the amount to dec from v... 3213 * @u: ...if (v - a) is more or equal than u. 3214 * 3215 * returns TRUE if (v - a) was more or equal than u, and FALSE 3216 * otherwise. 3217 */ 3218 static int __atomic_dec_ifmoe(ecore_atomic_t * v, int a, int u) 3219 { 3220 int c, old; 3221 3222 c = ECORE_ATOMIC_READ(v); 3223 for (;;) { 3224 if (ECORE_UNLIKELY(c - a < u)) 3225 return FALSE; 3226 3227 old = ECORE_ATOMIC_CMPXCHG((v), c, c - a); 3228 if (ECORE_LIKELY(old == c)) 3229 break; 3230 c = old; 3231 } 3232 3233 return TRUE; 3234 } 3235 3236 static int ecore_credit_pool_get(struct ecore_credit_pool_obj *o, int cnt) 3237 { 3238 int rc; 3239 3240 ECORE_SMP_MB(); 3241 rc = __atomic_dec_ifmoe(&o->credit, cnt, 0); 3242 ECORE_SMP_MB(); 3243 3244 return rc; 3245 } 3246 3247 static int ecore_credit_pool_put(struct ecore_credit_pool_obj *o, int cnt) 3248 { 3249 int rc; 3250 3251 ECORE_SMP_MB(); 3252 3253 /* Don't let to refill if credit + cnt > pool_sz */ 3254 rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1); 3255 3256 ECORE_SMP_MB(); 3257 3258 return rc; 3259 } 3260 3261 static int ecore_credit_pool_check(struct ecore_credit_pool_obj *o) 3262 { 3263 int cur_credit; 3264 3265 ECORE_SMP_MB(); 3266 cur_credit = ECORE_ATOMIC_READ(&o->credit); 3267 3268 return cur_credit; 3269 } 3270 3271 static int ecore_credit_pool_always_TRUE(__rte_unused struct 3272 ecore_credit_pool_obj *o, 3273 __rte_unused int cnt) 3274 { 3275 return TRUE; 3276 } 3277 3278 static int ecore_credit_pool_get_entry(struct ecore_credit_pool_obj *o, 3279 int *offset) 3280 { 3281 int idx, vec, i; 3282 3283 *offset = -1; 3284 3285 /* Find "internal cam-offset" then add to base for this object... */ 3286 for (vec = 0; vec < ECORE_POOL_VEC_SIZE; vec++) { 3287 3288 /* Skip the current vector if there are no free entries in it */ 3289 if (!o->pool_mirror[vec]) 3290 continue; 3291 3292 /* If we've got here we are going to find a free entry */ 3293 for (idx = vec * BIT_VEC64_ELEM_SZ, i = 0; 3294 i < BIT_VEC64_ELEM_SZ; idx++, i++) 3295 3296 if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) { 3297 /* Got one!! */ 3298 BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx); 3299 *offset = o->base_pool_offset + idx; 3300 return TRUE; 3301 } 3302 } 3303 3304 return FALSE; 3305 } 3306 3307 static int ecore_credit_pool_put_entry(struct ecore_credit_pool_obj *o, 3308 int offset) 3309 { 3310 if (offset < o->base_pool_offset) 3311 return FALSE; 3312 3313 offset -= o->base_pool_offset; 3314 3315 if (offset >= o->pool_sz) 3316 return FALSE; 3317 3318 /* Return the entry to the pool */ 3319 BIT_VEC64_SET_BIT(o->pool_mirror, offset); 3320 3321 return TRUE; 3322 } 3323 3324 static int ecore_credit_pool_put_entry_always_TRUE(__rte_unused struct 3325 ecore_credit_pool_obj *o, 3326 __rte_unused int offset) 3327 { 3328 return TRUE; 3329 } 3330 3331 static int ecore_credit_pool_get_entry_always_TRUE(__rte_unused struct 3332 ecore_credit_pool_obj *o, 3333 __rte_unused int *offset) 3334 { 3335 *offset = -1; 3336 return TRUE; 3337 } 3338 3339 /** 3340 * ecore_init_credit_pool - initialize credit pool internals. 3341 * 3342 * @p: 3343 * @base: Base entry in the CAM to use. 3344 * @credit: pool size. 3345 * 3346 * If base is negative no CAM entries handling will be performed. 3347 * If credit is negative pool operations will always succeed (unlimited pool). 3348 * 3349 */ 3350 static void ecore_init_credit_pool(struct ecore_credit_pool_obj *p, 3351 int base, int credit) 3352 { 3353 /* Zero the object first */ 3354 ECORE_MEMSET(p, 0, sizeof(*p)); 3355 3356 /* Set the table to all 1s */ 3357 ECORE_MEMSET(&p->pool_mirror, 0xff, sizeof(p->pool_mirror)); 3358 3359 /* Init a pool as full */ 3360 ECORE_ATOMIC_SET(&p->credit, credit); 3361 3362 /* The total poll size */ 3363 p->pool_sz = credit; 3364 3365 p->base_pool_offset = base; 3366 3367 /* Commit the change */ 3368 ECORE_SMP_MB(); 3369 3370 p->check = ecore_credit_pool_check; 3371 3372 /* if pool credit is negative - disable the checks */ 3373 if (credit >= 0) { 3374 p->put = ecore_credit_pool_put; 3375 p->get = ecore_credit_pool_get; 3376 p->put_entry = ecore_credit_pool_put_entry; 3377 p->get_entry = ecore_credit_pool_get_entry; 3378 } else { 3379 p->put = ecore_credit_pool_always_TRUE; 3380 p->get = ecore_credit_pool_always_TRUE; 3381 p->put_entry = ecore_credit_pool_put_entry_always_TRUE; 3382 p->get_entry = ecore_credit_pool_get_entry_always_TRUE; 3383 } 3384 3385 /* If base is negative - disable entries handling */ 3386 if (base < 0) { 3387 p->put_entry = ecore_credit_pool_put_entry_always_TRUE; 3388 p->get_entry = ecore_credit_pool_get_entry_always_TRUE; 3389 } 3390 } 3391 3392 void ecore_init_mac_credit_pool(struct bnx2x_softc *sc, 3393 struct ecore_credit_pool_obj *p, 3394 uint8_t func_id, uint8_t func_num) 3395 { 3396 3397 #define ECORE_CAM_SIZE_EMUL 5 3398 3399 int cam_sz; 3400 3401 if (CHIP_IS_E1H(sc)) { 3402 /* CAM credit is equally divided between all active functions 3403 * on the PORT!. 3404 */ 3405 if ((func_num > 0)) { 3406 if (!CHIP_REV_IS_SLOW(sc)) 3407 cam_sz = (MAX_MAC_CREDIT_E1H / (2 * func_num)); 3408 else 3409 cam_sz = ECORE_CAM_SIZE_EMUL; 3410 ecore_init_credit_pool(p, func_id * cam_sz, cam_sz); 3411 } else { 3412 /* this should never happen! Block MAC operations. */ 3413 ecore_init_credit_pool(p, 0, 0); 3414 } 3415 3416 } else { 3417 3418 /* 3419 * CAM credit is equaly divided between all active functions 3420 * on the PATH. 3421 */ 3422 if ((func_num > 0)) { 3423 if (!CHIP_REV_IS_SLOW(sc)) 3424 cam_sz = (MAX_MAC_CREDIT_E2 / func_num); 3425 else 3426 cam_sz = ECORE_CAM_SIZE_EMUL; 3427 3428 /* No need for CAM entries handling for 57712 and 3429 * newer. 3430 */ 3431 ecore_init_credit_pool(p, -1, cam_sz); 3432 } else { 3433 /* this should never happen! Block MAC operations. */ 3434 ecore_init_credit_pool(p, 0, 0); 3435 } 3436 } 3437 } 3438 3439 void ecore_init_vlan_credit_pool(struct bnx2x_softc *sc, 3440 struct ecore_credit_pool_obj *p, 3441 uint8_t func_id, uint8_t func_num) 3442 { 3443 if (CHIP_IS_E1x(sc)) { 3444 /* There is no VLAN credit in HW on 57711 only 3445 * MAC / MAC-VLAN can be set 3446 */ 3447 ecore_init_credit_pool(p, 0, -1); 3448 } else { 3449 /* CAM credit is equally divided between all active functions 3450 * on the PATH. 3451 */ 3452 if (func_num > 0) { 3453 int credit = MAX_VLAN_CREDIT_E2 / func_num; 3454 ecore_init_credit_pool(p, func_id * credit, credit); 3455 } else 3456 /* this should never happen! Block VLAN operations. */ 3457 ecore_init_credit_pool(p, 0, 0); 3458 } 3459 } 3460 3461 /****************** RSS Configuration ******************/ 3462 3463 /** 3464 * ecore_setup_rss - configure RSS 3465 * 3466 * @sc: device handle 3467 * @p: rss configuration 3468 * 3469 * sends on UPDATE ramrod for that matter. 3470 */ 3471 static int ecore_setup_rss(struct bnx2x_softc *sc, 3472 struct ecore_config_rss_params *p) 3473 { 3474 struct ecore_rss_config_obj *o = p->rss_obj; 3475 struct ecore_raw_obj *r = &o->raw; 3476 struct eth_rss_update_ramrod_data *data = 3477 (struct eth_rss_update_ramrod_data *)(r->rdata); 3478 uint8_t rss_mode = 0; 3479 int rc; 3480 3481 ECORE_MEMSET(data, 0, sizeof(*data)); 3482 3483 ECORE_MSG("Configuring RSS"); 3484 3485 /* Set an echo field */ 3486 data->echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) | 3487 (r->state << ECORE_SWCID_SHIFT)); 3488 3489 /* RSS mode */ 3490 if (ECORE_TEST_BIT(ECORE_RSS_MODE_DISABLED, &p->rss_flags)) 3491 rss_mode = ETH_RSS_MODE_DISABLED; 3492 else if (ECORE_TEST_BIT(ECORE_RSS_MODE_REGULAR, &p->rss_flags)) 3493 rss_mode = ETH_RSS_MODE_REGULAR; 3494 3495 data->rss_mode = rss_mode; 3496 3497 ECORE_MSG("rss_mode=%d", rss_mode); 3498 3499 /* RSS capabilities */ 3500 if (ECORE_TEST_BIT(ECORE_RSS_IPV4, &p->rss_flags)) 3501 data->capabilities |= 3502 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY; 3503 3504 if (ECORE_TEST_BIT(ECORE_RSS_IPV4_TCP, &p->rss_flags)) 3505 data->capabilities |= 3506 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY; 3507 3508 if (ECORE_TEST_BIT(ECORE_RSS_IPV4_UDP, &p->rss_flags)) 3509 data->capabilities |= 3510 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY; 3511 3512 if (ECORE_TEST_BIT(ECORE_RSS_IPV6, &p->rss_flags)) 3513 data->capabilities |= 3514 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY; 3515 3516 if (ECORE_TEST_BIT(ECORE_RSS_IPV6_TCP, &p->rss_flags)) 3517 data->capabilities |= 3518 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY; 3519 3520 if (ECORE_TEST_BIT(ECORE_RSS_IPV6_UDP, &p->rss_flags)) 3521 data->capabilities |= 3522 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY; 3523 3524 if (ECORE_TEST_BIT(ECORE_RSS_TUNNELING, &p->rss_flags)) { 3525 data->udp_4tuple_dst_port_mask = 3526 ECORE_CPU_TO_LE16(p->tunnel_mask); 3527 data->udp_4tuple_dst_port_value = 3528 ECORE_CPU_TO_LE16(p->tunnel_value); 3529 } 3530 3531 /* Hashing mask */ 3532 data->rss_result_mask = p->rss_result_mask; 3533 3534 /* RSS engine ID */ 3535 data->rss_engine_id = o->engine_id; 3536 3537 ECORE_MSG("rss_engine_id=%d", data->rss_engine_id); 3538 3539 /* Indirection table */ 3540 ECORE_MEMCPY(data->indirection_table, p->ind_table, 3541 T_ETH_INDIRECTION_TABLE_SIZE); 3542 3543 /* Remember the last configuration */ 3544 ECORE_MEMCPY(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE); 3545 3546 /* RSS keys */ 3547 if (ECORE_TEST_BIT(ECORE_RSS_SET_SRCH, &p->rss_flags)) { 3548 ECORE_MEMCPY(&data->rss_key[0], &p->rss_key[0], 3549 sizeof(data->rss_key)); 3550 data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY; 3551 } 3552 3553 /* No need for an explicit memory barrier here as long we would 3554 * need to ensure the ordering of writing to the SPQ element 3555 * and updating of the SPQ producer which involves a memory 3556 * read and we will have to put a full memory barrier there 3557 * (inside ecore_sp_post()). 3558 */ 3559 3560 /* Send a ramrod */ 3561 rc = ecore_sp_post(sc, 3562 RAMROD_CMD_ID_ETH_RSS_UPDATE, 3563 r->cid, r->rdata_mapping, ETH_CONNECTION_TYPE); 3564 3565 if (rc < 0) 3566 return rc; 3567 3568 return ECORE_PENDING; 3569 } 3570 3571 int ecore_config_rss(struct bnx2x_softc *sc, struct ecore_config_rss_params *p) 3572 { 3573 int rc; 3574 struct ecore_rss_config_obj *o = p->rss_obj; 3575 struct ecore_raw_obj *r = &o->raw; 3576 3577 /* Do nothing if only driver cleanup was requested */ 3578 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) 3579 return ECORE_SUCCESS; 3580 3581 r->set_pending(r); 3582 3583 rc = o->config_rss(sc, p); 3584 if (rc < 0) { 3585 r->clear_pending(r); 3586 return rc; 3587 } 3588 3589 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags)) 3590 rc = r->wait_comp(sc, r); 3591 3592 return rc; 3593 } 3594 3595 void ecore_init_rss_config_obj(struct ecore_rss_config_obj *rss_obj, 3596 uint8_t cl_id, uint32_t cid, uint8_t func_id, 3597 uint8_t engine_id, void *rdata, 3598 ecore_dma_addr_t rdata_mapping, int state, 3599 unsigned long *pstate, ecore_obj_type type) 3600 { 3601 ecore_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata, 3602 rdata_mapping, state, pstate, type); 3603 3604 rss_obj->engine_id = engine_id; 3605 rss_obj->config_rss = ecore_setup_rss; 3606 } 3607 3608 /********************** Queue state object ***********************************/ 3609 3610 /** 3611 * ecore_queue_state_change - perform Queue state change transition 3612 * 3613 * @sc: device handle 3614 * @params: parameters to perform the transition 3615 * 3616 * returns 0 in case of successfully completed transition, negative error 3617 * code in case of failure, positive (EBUSY) value if there is a completion 3618 * to that is still pending (possible only if RAMROD_COMP_WAIT is 3619 * not set in params->ramrod_flags for asynchronous commands). 3620 * 3621 */ 3622 int ecore_queue_state_change(struct bnx2x_softc *sc, 3623 struct ecore_queue_state_params *params) 3624 { 3625 struct ecore_queue_sp_obj *o = params->q_obj; 3626 int rc, pending_bit; 3627 unsigned long *pending = &o->pending; 3628 3629 /* Check that the requested transition is legal */ 3630 rc = o->check_transition(sc, o, params); 3631 if (rc) { 3632 PMD_DRV_LOG(ERR, "check transition returned an error. rc %d", 3633 rc); 3634 return ECORE_INVAL; 3635 } 3636 3637 /* Set "pending" bit */ 3638 ECORE_MSG("pending bit was=%lx", o->pending); 3639 pending_bit = o->set_pending(o, params); 3640 ECORE_MSG("pending bit now=%lx", o->pending); 3641 3642 /* Don't send a command if only driver cleanup was requested */ 3643 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) 3644 o->complete_cmd(sc, o, pending_bit); 3645 else { 3646 /* Send a ramrod */ 3647 rc = o->send_cmd(sc, params); 3648 if (rc) { 3649 o->next_state = ECORE_Q_STATE_MAX; 3650 ECORE_CLEAR_BIT(pending_bit, pending); 3651 ECORE_SMP_MB_AFTER_CLEAR_BIT(); 3652 return rc; 3653 } 3654 3655 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, ¶ms->ramrod_flags)) { 3656 rc = o->wait_comp(sc, o, pending_bit); 3657 if (rc) 3658 return rc; 3659 3660 return ECORE_SUCCESS; 3661 } 3662 } 3663 3664 return ECORE_RET_PENDING(pending_bit, pending); 3665 } 3666 3667 static int ecore_queue_set_pending(struct ecore_queue_sp_obj *obj, 3668 struct ecore_queue_state_params *params) 3669 { 3670 enum ecore_queue_cmd cmd = params->cmd, bit; 3671 3672 /* ACTIVATE and DEACTIVATE commands are implemented on top of 3673 * UPDATE command. 3674 */ 3675 if ((cmd == ECORE_Q_CMD_ACTIVATE) || (cmd == ECORE_Q_CMD_DEACTIVATE)) 3676 bit = ECORE_Q_CMD_UPDATE; 3677 else 3678 bit = cmd; 3679 3680 ECORE_SET_BIT(bit, &obj->pending); 3681 return bit; 3682 } 3683 3684 static int ecore_queue_wait_comp(struct bnx2x_softc *sc, 3685 struct ecore_queue_sp_obj *o, 3686 enum ecore_queue_cmd cmd) 3687 { 3688 return ecore_state_wait(sc, cmd, &o->pending); 3689 } 3690 3691 /** 3692 * ecore_queue_comp_cmd - complete the state change command. 3693 * 3694 * @sc: device handle 3695 * @o: 3696 * @cmd: 3697 * 3698 * Checks that the arrived completion is expected. 3699 */ 3700 static int ecore_queue_comp_cmd(struct bnx2x_softc *sc __rte_unused, 3701 struct ecore_queue_sp_obj *o, 3702 enum ecore_queue_cmd cmd) 3703 { 3704 unsigned long cur_pending = o->pending; 3705 3706 if (!ECORE_TEST_AND_CLEAR_BIT(cmd, &cur_pending)) { 3707 PMD_DRV_LOG(ERR, 3708 "Bad MC reply %d for queue %d in state %d pending 0x%lx, next_state %d", 3709 cmd, o->cids[ECORE_PRIMARY_CID_INDEX], o->state, 3710 cur_pending, o->next_state); 3711 return ECORE_INVAL; 3712 } 3713 3714 if (o->next_tx_only >= o->max_cos) 3715 /* >= because tx only must always be smaller than cos since the 3716 * primary connection supports COS 0 3717 */ 3718 PMD_DRV_LOG(ERR, 3719 "illegal value for next tx_only: %d. max cos was %d", 3720 o->next_tx_only, o->max_cos); 3721 3722 ECORE_MSG("Completing command %d for queue %d, setting state to %d", 3723 cmd, o->cids[ECORE_PRIMARY_CID_INDEX], o->next_state); 3724 3725 if (o->next_tx_only) /* print num tx-only if any exist */ 3726 ECORE_MSG("primary cid %d: num tx-only cons %d", 3727 o->cids[ECORE_PRIMARY_CID_INDEX], o->next_tx_only); 3728 3729 o->state = o->next_state; 3730 o->num_tx_only = o->next_tx_only; 3731 o->next_state = ECORE_Q_STATE_MAX; 3732 3733 /* It's important that o->state and o->next_state are 3734 * updated before o->pending. 3735 */ 3736 wmb(); 3737 3738 ECORE_CLEAR_BIT(cmd, &o->pending); 3739 ECORE_SMP_MB_AFTER_CLEAR_BIT(); 3740 3741 return ECORE_SUCCESS; 3742 } 3743 3744 static void ecore_q_fill_setup_data_e2(struct ecore_queue_state_params 3745 *cmd_params, 3746 struct client_init_ramrod_data *data) 3747 { 3748 struct ecore_queue_setup_params *params = &cmd_params->params.setup; 3749 3750 /* Rx data */ 3751 3752 /* IPv6 TPA supported for E2 and above only */ 3753 data->rx.tpa_en |= ECORE_TEST_BIT(ECORE_Q_FLG_TPA_IPV6, 3754 ¶ms->flags) * 3755 CLIENT_INIT_RX_DATA_TPA_EN_IPV6; 3756 } 3757 3758 static void ecore_q_fill_init_general_data(struct bnx2x_softc *sc __rte_unused, 3759 struct ecore_queue_sp_obj *o, 3760 struct ecore_general_setup_params 3761 *params, struct client_init_general_data 3762 *gen_data, unsigned long *flags) 3763 { 3764 gen_data->client_id = o->cl_id; 3765 3766 if (ECORE_TEST_BIT(ECORE_Q_FLG_STATS, flags)) { 3767 gen_data->statistics_counter_id = params->stat_id; 3768 gen_data->statistics_en_flg = 1; 3769 gen_data->statistics_zero_flg = 3770 ECORE_TEST_BIT(ECORE_Q_FLG_ZERO_STATS, flags); 3771 } else 3772 gen_data->statistics_counter_id = 3773 DISABLE_STATISTIC_COUNTER_ID_VALUE; 3774 3775 gen_data->is_fcoe_flg = ECORE_TEST_BIT(ECORE_Q_FLG_FCOE, flags); 3776 gen_data->activate_flg = ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE, flags); 3777 gen_data->sp_client_id = params->spcl_id; 3778 gen_data->mtu = ECORE_CPU_TO_LE16(params->mtu); 3779 gen_data->func_id = o->func_id; 3780 3781 gen_data->cos = params->cos; 3782 3783 gen_data->traffic_type = 3784 ECORE_TEST_BIT(ECORE_Q_FLG_FCOE, flags) ? 3785 LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW; 3786 3787 ECORE_MSG("flags: active %d, cos %d, stats en %d", 3788 gen_data->activate_flg, gen_data->cos, 3789 gen_data->statistics_en_flg); 3790 } 3791 3792 static void ecore_q_fill_init_tx_data(struct ecore_txq_setup_params *params, 3793 struct client_init_tx_data *tx_data, 3794 unsigned long *flags) 3795 { 3796 tx_data->enforce_security_flg = 3797 ECORE_TEST_BIT(ECORE_Q_FLG_TX_SEC, flags); 3798 tx_data->default_vlan = ECORE_CPU_TO_LE16(params->default_vlan); 3799 tx_data->default_vlan_flg = ECORE_TEST_BIT(ECORE_Q_FLG_DEF_VLAN, flags); 3800 tx_data->tx_switching_flg = 3801 ECORE_TEST_BIT(ECORE_Q_FLG_TX_SWITCH, flags); 3802 tx_data->anti_spoofing_flg = 3803 ECORE_TEST_BIT(ECORE_Q_FLG_ANTI_SPOOF, flags); 3804 tx_data->force_default_pri_flg = 3805 ECORE_TEST_BIT(ECORE_Q_FLG_FORCE_DEFAULT_PRI, flags); 3806 tx_data->refuse_outband_vlan_flg = 3807 ECORE_TEST_BIT(ECORE_Q_FLG_REFUSE_OUTBAND_VLAN, flags); 3808 tx_data->tunnel_non_lso_pcsum_location = 3809 ECORE_TEST_BIT(ECORE_Q_FLG_PCSUM_ON_PKT, flags) ? CSUM_ON_PKT : 3810 CSUM_ON_BD; 3811 3812 tx_data->tx_status_block_id = params->fw_sb_id; 3813 tx_data->tx_sb_index_number = params->sb_cq_index; 3814 tx_data->tss_leading_client_id = params->tss_leading_cl_id; 3815 3816 tx_data->tx_bd_page_base.lo = 3817 ECORE_CPU_TO_LE32(U64_LO(params->dscr_map)); 3818 tx_data->tx_bd_page_base.hi = 3819 ECORE_CPU_TO_LE32(U64_HI(params->dscr_map)); 3820 3821 /* Don't configure any Tx switching mode during queue SETUP */ 3822 tx_data->state = 0; 3823 } 3824 3825 static void ecore_q_fill_init_pause_data(struct rxq_pause_params *params, 3826 struct client_init_rx_data *rx_data) 3827 { 3828 /* flow control data */ 3829 rx_data->cqe_pause_thr_low = ECORE_CPU_TO_LE16(params->rcq_th_lo); 3830 rx_data->cqe_pause_thr_high = ECORE_CPU_TO_LE16(params->rcq_th_hi); 3831 rx_data->bd_pause_thr_low = ECORE_CPU_TO_LE16(params->bd_th_lo); 3832 rx_data->bd_pause_thr_high = ECORE_CPU_TO_LE16(params->bd_th_hi); 3833 rx_data->sge_pause_thr_low = ECORE_CPU_TO_LE16(params->sge_th_lo); 3834 rx_data->sge_pause_thr_high = ECORE_CPU_TO_LE16(params->sge_th_hi); 3835 rx_data->rx_cos_mask = ECORE_CPU_TO_LE16(params->pri_map); 3836 } 3837 3838 static void ecore_q_fill_init_rx_data(struct ecore_rxq_setup_params *params, 3839 struct client_init_rx_data *rx_data, 3840 unsigned long *flags) 3841 { 3842 rx_data->tpa_en = ECORE_TEST_BIT(ECORE_Q_FLG_TPA, flags) * 3843 CLIENT_INIT_RX_DATA_TPA_EN_IPV4; 3844 rx_data->tpa_en |= ECORE_TEST_BIT(ECORE_Q_FLG_TPA_GRO, flags) * 3845 CLIENT_INIT_RX_DATA_TPA_MODE; 3846 rx_data->vmqueue_mode_en_flg = 0; 3847 3848 rx_data->extra_data_over_sgl_en_flg = 3849 ECORE_TEST_BIT(ECORE_Q_FLG_OOO, flags); 3850 rx_data->cache_line_alignment_log_size = params->cache_line_log; 3851 rx_data->enable_dynamic_hc = ECORE_TEST_BIT(ECORE_Q_FLG_DHC, flags); 3852 rx_data->client_qzone_id = params->cl_qzone_id; 3853 rx_data->max_agg_size = ECORE_CPU_TO_LE16(params->tpa_agg_sz); 3854 3855 /* Always start in DROP_ALL mode */ 3856 rx_data->state = ECORE_CPU_TO_LE16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL | 3857 CLIENT_INIT_RX_DATA_MCAST_DROP_ALL); 3858 3859 /* We don't set drop flags */ 3860 rx_data->drop_ip_cs_err_flg = 0; 3861 rx_data->drop_tcp_cs_err_flg = 0; 3862 rx_data->drop_ttl0_flg = 0; 3863 rx_data->drop_udp_cs_err_flg = 0; 3864 rx_data->inner_vlan_removal_enable_flg = 3865 ECORE_TEST_BIT(ECORE_Q_FLG_VLAN, flags); 3866 rx_data->outer_vlan_removal_enable_flg = 3867 ECORE_TEST_BIT(ECORE_Q_FLG_OV, flags); 3868 rx_data->status_block_id = params->fw_sb_id; 3869 rx_data->rx_sb_index_number = params->sb_cq_index; 3870 rx_data->max_tpa_queues = params->max_tpa_queues; 3871 rx_data->max_bytes_on_bd = ECORE_CPU_TO_LE16(params->buf_sz); 3872 rx_data->bd_page_base.lo = ECORE_CPU_TO_LE32(U64_LO(params->dscr_map)); 3873 rx_data->bd_page_base.hi = ECORE_CPU_TO_LE32(U64_HI(params->dscr_map)); 3874 rx_data->cqe_page_base.lo = ECORE_CPU_TO_LE32(U64_LO(params->rcq_map)); 3875 rx_data->cqe_page_base.hi = ECORE_CPU_TO_LE32(U64_HI(params->rcq_map)); 3876 rx_data->is_leading_rss = ECORE_TEST_BIT(ECORE_Q_FLG_LEADING_RSS, 3877 flags); 3878 3879 if (ECORE_TEST_BIT(ECORE_Q_FLG_MCAST, flags)) { 3880 rx_data->approx_mcast_engine_id = params->mcast_engine_id; 3881 rx_data->is_approx_mcast = 1; 3882 } 3883 3884 rx_data->rss_engine_id = params->rss_engine_id; 3885 3886 /* silent vlan removal */ 3887 rx_data->silent_vlan_removal_flg = 3888 ECORE_TEST_BIT(ECORE_Q_FLG_SILENT_VLAN_REM, flags); 3889 rx_data->silent_vlan_value = 3890 ECORE_CPU_TO_LE16(params->silent_removal_value); 3891 rx_data->silent_vlan_mask = 3892 ECORE_CPU_TO_LE16(params->silent_removal_mask); 3893 } 3894 3895 /* initialize the general, tx and rx parts of a queue object */ 3896 static void ecore_q_fill_setup_data_cmn(struct bnx2x_softc *sc, struct ecore_queue_state_params 3897 *cmd_params, 3898 struct client_init_ramrod_data *data) 3899 { 3900 ecore_q_fill_init_general_data(sc, cmd_params->q_obj, 3901 &cmd_params->params.setup.gen_params, 3902 &data->general, 3903 &cmd_params->params.setup.flags); 3904 3905 ecore_q_fill_init_tx_data(&cmd_params->params.setup.txq_params, 3906 &data->tx, &cmd_params->params.setup.flags); 3907 3908 ecore_q_fill_init_rx_data(&cmd_params->params.setup.rxq_params, 3909 &data->rx, &cmd_params->params.setup.flags); 3910 3911 ecore_q_fill_init_pause_data(&cmd_params->params.setup.pause_params, 3912 &data->rx); 3913 } 3914 3915 /* initialize the general and tx parts of a tx-only queue object */ 3916 static void ecore_q_fill_setup_tx_only(struct bnx2x_softc *sc, struct ecore_queue_state_params 3917 *cmd_params, 3918 struct tx_queue_init_ramrod_data *data) 3919 { 3920 ecore_q_fill_init_general_data(sc, cmd_params->q_obj, 3921 &cmd_params->params.tx_only.gen_params, 3922 &data->general, 3923 &cmd_params->params.tx_only.flags); 3924 3925 ecore_q_fill_init_tx_data(&cmd_params->params.tx_only.txq_params, 3926 &data->tx, &cmd_params->params.tx_only.flags); 3927 3928 ECORE_MSG("cid %d, tx bd page lo %x hi %x", 3929 cmd_params->q_obj->cids[0], 3930 data->tx.tx_bd_page_base.lo, data->tx.tx_bd_page_base.hi); 3931 } 3932 3933 /** 3934 * ecore_q_init - init HW/FW queue 3935 * 3936 * @sc: device handle 3937 * @params: 3938 * 3939 * HW/FW initial Queue configuration: 3940 * - HC: Rx and Tx 3941 * - CDU context validation 3942 * 3943 */ 3944 static int ecore_q_init(struct bnx2x_softc *sc, 3945 struct ecore_queue_state_params *params) 3946 { 3947 struct ecore_queue_sp_obj *o = params->q_obj; 3948 struct ecore_queue_init_params *init = ¶ms->params.init; 3949 uint16_t hc_usec; 3950 uint8_t cos; 3951 3952 /* Tx HC configuration */ 3953 if (ECORE_TEST_BIT(ECORE_Q_TYPE_HAS_TX, &o->type) && 3954 ECORE_TEST_BIT(ECORE_Q_FLG_HC, &init->tx.flags)) { 3955 hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0; 3956 3957 ECORE_UPDATE_COALESCE_SB_INDEX(sc, init->tx.fw_sb_id, 3958 init->tx.sb_cq_index, 3959 !ECORE_TEST_BIT 3960 (ECORE_Q_FLG_HC_EN, 3961 &init->tx.flags), hc_usec); 3962 } 3963 3964 /* Rx HC configuration */ 3965 if (ECORE_TEST_BIT(ECORE_Q_TYPE_HAS_RX, &o->type) && 3966 ECORE_TEST_BIT(ECORE_Q_FLG_HC, &init->rx.flags)) { 3967 hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0; 3968 3969 ECORE_UPDATE_COALESCE_SB_INDEX(sc, init->rx.fw_sb_id, 3970 init->rx.sb_cq_index, 3971 !ECORE_TEST_BIT 3972 (ECORE_Q_FLG_HC_EN, 3973 &init->rx.flags), hc_usec); 3974 } 3975 3976 /* Set CDU context validation values */ 3977 for (cos = 0; cos < o->max_cos; cos++) { 3978 ECORE_MSG("setting context validation. cid %d, cos %d", 3979 o->cids[cos], cos); 3980 ECORE_MSG("context pointer %p", init->cxts[cos]); 3981 ECORE_SET_CTX_VALIDATION(sc, init->cxts[cos], o->cids[cos]); 3982 } 3983 3984 /* As no ramrod is sent, complete the command immediately */ 3985 o->complete_cmd(sc, o, ECORE_Q_CMD_INIT); 3986 3987 ECORE_MMIOWB(); 3988 ECORE_SMP_MB(); 3989 3990 return ECORE_SUCCESS; 3991 } 3992 3993 static int ecore_q_send_setup_e1x(struct bnx2x_softc *sc, struct ecore_queue_state_params 3994 *params) 3995 { 3996 struct ecore_queue_sp_obj *o = params->q_obj; 3997 struct client_init_ramrod_data *rdata = 3998 (struct client_init_ramrod_data *)o->rdata; 3999 ecore_dma_addr_t data_mapping = o->rdata_mapping; 4000 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP; 4001 4002 /* Clear the ramrod data */ 4003 ECORE_MEMSET(rdata, 0, sizeof(*rdata)); 4004 4005 /* Fill the ramrod data */ 4006 ecore_q_fill_setup_data_cmn(sc, params, rdata); 4007 4008 /* No need for an explicit memory barrier here as long we would 4009 * need to ensure the ordering of writing to the SPQ element 4010 * and updating of the SPQ producer which involves a memory 4011 * read and we will have to put a full memory barrier there 4012 * (inside ecore_sp_post()). 4013 */ 4014 4015 return ecore_sp_post(sc, 4016 ramrod, 4017 o->cids[ECORE_PRIMARY_CID_INDEX], 4018 data_mapping, ETH_CONNECTION_TYPE); 4019 } 4020 4021 static int ecore_q_send_setup_e2(struct bnx2x_softc *sc, 4022 struct ecore_queue_state_params *params) 4023 { 4024 struct ecore_queue_sp_obj *o = params->q_obj; 4025 struct client_init_ramrod_data *rdata = 4026 (struct client_init_ramrod_data *)o->rdata; 4027 ecore_dma_addr_t data_mapping = o->rdata_mapping; 4028 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP; 4029 4030 /* Clear the ramrod data */ 4031 ECORE_MEMSET(rdata, 0, sizeof(*rdata)); 4032 4033 /* Fill the ramrod data */ 4034 ecore_q_fill_setup_data_cmn(sc, params, rdata); 4035 ecore_q_fill_setup_data_e2(params, rdata); 4036 4037 /* No need for an explicit memory barrier here as long we would 4038 * need to ensure the ordering of writing to the SPQ element 4039 * and updating of the SPQ producer which involves a memory 4040 * read and we will have to put a full memory barrier there 4041 * (inside ecore_sp_post()). 4042 */ 4043 4044 return ecore_sp_post(sc, 4045 ramrod, 4046 o->cids[ECORE_PRIMARY_CID_INDEX], 4047 data_mapping, ETH_CONNECTION_TYPE); 4048 } 4049 4050 static int ecore_q_send_setup_tx_only(struct bnx2x_softc *sc, struct ecore_queue_state_params 4051 *params) 4052 { 4053 struct ecore_queue_sp_obj *o = params->q_obj; 4054 struct tx_queue_init_ramrod_data *rdata = 4055 (struct tx_queue_init_ramrod_data *)o->rdata; 4056 ecore_dma_addr_t data_mapping = o->rdata_mapping; 4057 int ramrod = RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP; 4058 struct ecore_queue_setup_tx_only_params *tx_only_params = 4059 ¶ms->params.tx_only; 4060 uint8_t cid_index = tx_only_params->cid_index; 4061 4062 if (ECORE_TEST_BIT(ECORE_Q_TYPE_FWD, &o->type)) 4063 ramrod = RAMROD_CMD_ID_ETH_FORWARD_SETUP; 4064 ECORE_MSG("sending forward tx-only ramrod"); 4065 4066 if (cid_index >= o->max_cos) { 4067 PMD_DRV_LOG(ERR, "queue[%d]: cid_index (%d) is out of range", 4068 o->cl_id, cid_index); 4069 return ECORE_INVAL; 4070 } 4071 4072 ECORE_MSG("parameters received: cos: %d sp-id: %d", 4073 tx_only_params->gen_params.cos, 4074 tx_only_params->gen_params.spcl_id); 4075 4076 /* Clear the ramrod data */ 4077 ECORE_MEMSET(rdata, 0, sizeof(*rdata)); 4078 4079 /* Fill the ramrod data */ 4080 ecore_q_fill_setup_tx_only(sc, params, rdata); 4081 4082 ECORE_MSG 4083 ("sending tx-only ramrod: cid %d, client-id %d, sp-client id %d, cos %d", 4084 o->cids[cid_index], rdata->general.client_id, 4085 rdata->general.sp_client_id, rdata->general.cos); 4086 4087 /* No need for an explicit memory barrier here as long we would 4088 * need to ensure the ordering of writing to the SPQ element 4089 * and updating of the SPQ producer which involves a memory 4090 * read and we will have to put a full memory barrier there 4091 * (inside ecore_sp_post()). 4092 */ 4093 4094 return ecore_sp_post(sc, ramrod, o->cids[cid_index], 4095 data_mapping, ETH_CONNECTION_TYPE); 4096 } 4097 4098 static void ecore_q_fill_update_data(struct ecore_queue_sp_obj *obj, 4099 struct ecore_queue_update_params *params, 4100 struct client_update_ramrod_data *data) 4101 { 4102 /* Client ID of the client to update */ 4103 data->client_id = obj->cl_id; 4104 4105 /* Function ID of the client to update */ 4106 data->func_id = obj->func_id; 4107 4108 /* Default VLAN value */ 4109 data->default_vlan = ECORE_CPU_TO_LE16(params->def_vlan); 4110 4111 /* Inner VLAN stripping */ 4112 data->inner_vlan_removal_enable_flg = 4113 ECORE_TEST_BIT(ECORE_Q_UPDATE_IN_VLAN_REM, ¶ms->update_flags); 4114 data->inner_vlan_removal_change_flg = 4115 ECORE_TEST_BIT(ECORE_Q_UPDATE_IN_VLAN_REM_CHNG, 4116 ¶ms->update_flags); 4117 4118 /* Outer VLAN stripping */ 4119 data->outer_vlan_removal_enable_flg = 4120 ECORE_TEST_BIT(ECORE_Q_UPDATE_OUT_VLAN_REM, ¶ms->update_flags); 4121 data->outer_vlan_removal_change_flg = 4122 ECORE_TEST_BIT(ECORE_Q_UPDATE_OUT_VLAN_REM_CHNG, 4123 ¶ms->update_flags); 4124 4125 /* Drop packets that have source MAC that doesn't belong to this 4126 * Queue. 4127 */ 4128 data->anti_spoofing_enable_flg = 4129 ECORE_TEST_BIT(ECORE_Q_UPDATE_ANTI_SPOOF, ¶ms->update_flags); 4130 data->anti_spoofing_change_flg = 4131 ECORE_TEST_BIT(ECORE_Q_UPDATE_ANTI_SPOOF_CHNG, 4132 ¶ms->update_flags); 4133 4134 /* Activate/Deactivate */ 4135 data->activate_flg = 4136 ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE, ¶ms->update_flags); 4137 data->activate_change_flg = 4138 ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG, ¶ms->update_flags); 4139 4140 /* Enable default VLAN */ 4141 data->default_vlan_enable_flg = 4142 ECORE_TEST_BIT(ECORE_Q_UPDATE_DEF_VLAN_EN, ¶ms->update_flags); 4143 data->default_vlan_change_flg = 4144 ECORE_TEST_BIT(ECORE_Q_UPDATE_DEF_VLAN_EN_CHNG, 4145 ¶ms->update_flags); 4146 4147 /* silent vlan removal */ 4148 data->silent_vlan_change_flg = 4149 ECORE_TEST_BIT(ECORE_Q_UPDATE_SILENT_VLAN_REM_CHNG, 4150 ¶ms->update_flags); 4151 data->silent_vlan_removal_flg = 4152 ECORE_TEST_BIT(ECORE_Q_UPDATE_SILENT_VLAN_REM, 4153 ¶ms->update_flags); 4154 data->silent_vlan_value = 4155 ECORE_CPU_TO_LE16(params->silent_removal_value); 4156 data->silent_vlan_mask = ECORE_CPU_TO_LE16(params->silent_removal_mask); 4157 4158 /* tx switching */ 4159 data->tx_switching_flg = 4160 ECORE_TEST_BIT(ECORE_Q_UPDATE_TX_SWITCHING, ¶ms->update_flags); 4161 data->tx_switching_change_flg = 4162 ECORE_TEST_BIT(ECORE_Q_UPDATE_TX_SWITCHING_CHNG, 4163 ¶ms->update_flags); 4164 } 4165 4166 static int ecore_q_send_update(struct bnx2x_softc *sc, 4167 struct ecore_queue_state_params *params) 4168 { 4169 struct ecore_queue_sp_obj *o = params->q_obj; 4170 struct client_update_ramrod_data *rdata = 4171 (struct client_update_ramrod_data *)o->rdata; 4172 ecore_dma_addr_t data_mapping = o->rdata_mapping; 4173 struct ecore_queue_update_params *update_params = 4174 ¶ms->params.update; 4175 uint8_t cid_index = update_params->cid_index; 4176 4177 if (cid_index >= o->max_cos) { 4178 PMD_DRV_LOG(ERR, "queue[%d]: cid_index (%d) is out of range", 4179 o->cl_id, cid_index); 4180 return ECORE_INVAL; 4181 } 4182 4183 /* Clear the ramrod data */ 4184 ECORE_MEMSET(rdata, 0, sizeof(*rdata)); 4185 4186 /* Fill the ramrod data */ 4187 ecore_q_fill_update_data(o, update_params, rdata); 4188 4189 /* No need for an explicit memory barrier here as long we would 4190 * need to ensure the ordering of writing to the SPQ element 4191 * and updating of the SPQ producer which involves a memory 4192 * read and we will have to put a full memory barrier there 4193 * (inside ecore_sp_post()). 4194 */ 4195 4196 return ecore_sp_post(sc, RAMROD_CMD_ID_ETH_CLIENT_UPDATE, 4197 o->cids[cid_index], data_mapping, 4198 ETH_CONNECTION_TYPE); 4199 } 4200 4201 /** 4202 * ecore_q_send_deactivate - send DEACTIVATE command 4203 * 4204 * @sc: device handle 4205 * @params: 4206 * 4207 * implemented using the UPDATE command. 4208 */ 4209 static int ecore_q_send_deactivate(struct bnx2x_softc *sc, struct ecore_queue_state_params 4210 *params) 4211 { 4212 struct ecore_queue_update_params *update = ¶ms->params.update; 4213 4214 ECORE_MEMSET(update, 0, sizeof(*update)); 4215 4216 ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags); 4217 4218 return ecore_q_send_update(sc, params); 4219 } 4220 4221 /** 4222 * ecore_q_send_activate - send ACTIVATE command 4223 * 4224 * @sc: device handle 4225 * @params: 4226 * 4227 * implemented using the UPDATE command. 4228 */ 4229 static int ecore_q_send_activate(struct bnx2x_softc *sc, 4230 struct ecore_queue_state_params *params) 4231 { 4232 struct ecore_queue_update_params *update = ¶ms->params.update; 4233 4234 ECORE_MEMSET(update, 0, sizeof(*update)); 4235 4236 ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE, &update->update_flags); 4237 ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags); 4238 4239 return ecore_q_send_update(sc, params); 4240 } 4241 4242 static int ecore_q_send_update_tpa(__rte_unused struct bnx2x_softc *sc, 4243 __rte_unused struct 4244 ecore_queue_state_params *params) 4245 { 4246 /* Not implemented yet. */ 4247 return -1; 4248 } 4249 4250 static int ecore_q_send_halt(struct bnx2x_softc *sc, 4251 struct ecore_queue_state_params *params) 4252 { 4253 struct ecore_queue_sp_obj *o = params->q_obj; 4254 4255 /* build eth_halt_ramrod_data.client_id in a big-endian friendly way */ 4256 ecore_dma_addr_t data_mapping = 0; 4257 data_mapping = (ecore_dma_addr_t) o->cl_id; 4258 4259 return ecore_sp_post(sc, 4260 RAMROD_CMD_ID_ETH_HALT, 4261 o->cids[ECORE_PRIMARY_CID_INDEX], 4262 data_mapping, ETH_CONNECTION_TYPE); 4263 } 4264 4265 static int ecore_q_send_cfc_del(struct bnx2x_softc *sc, 4266 struct ecore_queue_state_params *params) 4267 { 4268 struct ecore_queue_sp_obj *o = params->q_obj; 4269 uint8_t cid_idx = params->params.cfc_del.cid_index; 4270 4271 if (cid_idx >= o->max_cos) { 4272 PMD_DRV_LOG(ERR, "queue[%d]: cid_index (%d) is out of range", 4273 o->cl_id, cid_idx); 4274 return ECORE_INVAL; 4275 } 4276 4277 return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_CFC_DEL, 4278 o->cids[cid_idx], 0, NONE_CONNECTION_TYPE); 4279 } 4280 4281 static int ecore_q_send_terminate(struct bnx2x_softc *sc, struct ecore_queue_state_params 4282 *params) 4283 { 4284 struct ecore_queue_sp_obj *o = params->q_obj; 4285 uint8_t cid_index = params->params.terminate.cid_index; 4286 4287 if (cid_index >= o->max_cos) { 4288 PMD_DRV_LOG(ERR, "queue[%d]: cid_index (%d) is out of range", 4289 o->cl_id, cid_index); 4290 return ECORE_INVAL; 4291 } 4292 4293 return ecore_sp_post(sc, RAMROD_CMD_ID_ETH_TERMINATE, 4294 o->cids[cid_index], 0, ETH_CONNECTION_TYPE); 4295 } 4296 4297 static int ecore_q_send_empty(struct bnx2x_softc *sc, 4298 struct ecore_queue_state_params *params) 4299 { 4300 struct ecore_queue_sp_obj *o = params->q_obj; 4301 4302 return ecore_sp_post(sc, RAMROD_CMD_ID_ETH_EMPTY, 4303 o->cids[ECORE_PRIMARY_CID_INDEX], 0, 4304 ETH_CONNECTION_TYPE); 4305 } 4306 4307 static int ecore_queue_send_cmd_cmn(struct bnx2x_softc *sc, struct ecore_queue_state_params 4308 *params) 4309 { 4310 switch (params->cmd) { 4311 case ECORE_Q_CMD_INIT: 4312 return ecore_q_init(sc, params); 4313 case ECORE_Q_CMD_SETUP_TX_ONLY: 4314 return ecore_q_send_setup_tx_only(sc, params); 4315 case ECORE_Q_CMD_DEACTIVATE: 4316 return ecore_q_send_deactivate(sc, params); 4317 case ECORE_Q_CMD_ACTIVATE: 4318 return ecore_q_send_activate(sc, params); 4319 case ECORE_Q_CMD_UPDATE: 4320 return ecore_q_send_update(sc, params); 4321 case ECORE_Q_CMD_UPDATE_TPA: 4322 return ecore_q_send_update_tpa(sc, params); 4323 case ECORE_Q_CMD_HALT: 4324 return ecore_q_send_halt(sc, params); 4325 case ECORE_Q_CMD_CFC_DEL: 4326 return ecore_q_send_cfc_del(sc, params); 4327 case ECORE_Q_CMD_TERMINATE: 4328 return ecore_q_send_terminate(sc, params); 4329 case ECORE_Q_CMD_EMPTY: 4330 return ecore_q_send_empty(sc, params); 4331 default: 4332 PMD_DRV_LOG(ERR, "Unknown command: %d", params->cmd); 4333 return ECORE_INVAL; 4334 } 4335 } 4336 4337 static int ecore_queue_send_cmd_e1x(struct bnx2x_softc *sc, 4338 struct ecore_queue_state_params *params) 4339 { 4340 switch (params->cmd) { 4341 case ECORE_Q_CMD_SETUP: 4342 return ecore_q_send_setup_e1x(sc, params); 4343 case ECORE_Q_CMD_INIT: 4344 case ECORE_Q_CMD_SETUP_TX_ONLY: 4345 case ECORE_Q_CMD_DEACTIVATE: 4346 case ECORE_Q_CMD_ACTIVATE: 4347 case ECORE_Q_CMD_UPDATE: 4348 case ECORE_Q_CMD_UPDATE_TPA: 4349 case ECORE_Q_CMD_HALT: 4350 case ECORE_Q_CMD_CFC_DEL: 4351 case ECORE_Q_CMD_TERMINATE: 4352 case ECORE_Q_CMD_EMPTY: 4353 return ecore_queue_send_cmd_cmn(sc, params); 4354 default: 4355 PMD_DRV_LOG(ERR, "Unknown command: %d", params->cmd); 4356 return ECORE_INVAL; 4357 } 4358 } 4359 4360 static int ecore_queue_send_cmd_e2(struct bnx2x_softc *sc, 4361 struct ecore_queue_state_params *params) 4362 { 4363 switch (params->cmd) { 4364 case ECORE_Q_CMD_SETUP: 4365 return ecore_q_send_setup_e2(sc, params); 4366 case ECORE_Q_CMD_INIT: 4367 case ECORE_Q_CMD_SETUP_TX_ONLY: 4368 case ECORE_Q_CMD_DEACTIVATE: 4369 case ECORE_Q_CMD_ACTIVATE: 4370 case ECORE_Q_CMD_UPDATE: 4371 case ECORE_Q_CMD_UPDATE_TPA: 4372 case ECORE_Q_CMD_HALT: 4373 case ECORE_Q_CMD_CFC_DEL: 4374 case ECORE_Q_CMD_TERMINATE: 4375 case ECORE_Q_CMD_EMPTY: 4376 return ecore_queue_send_cmd_cmn(sc, params); 4377 default: 4378 PMD_DRV_LOG(ERR, "Unknown command: %d", params->cmd); 4379 return ECORE_INVAL; 4380 } 4381 } 4382 4383 /** 4384 * ecore_queue_chk_transition - check state machine of a regular Queue 4385 * 4386 * @sc: device handle 4387 * @o: 4388 * @params: 4389 * 4390 * (not Forwarding) 4391 * It both checks if the requested command is legal in a current 4392 * state and, if it's legal, sets a `next_state' in the object 4393 * that will be used in the completion flow to set the `state' 4394 * of the object. 4395 * 4396 * returns 0 if a requested command is a legal transition, 4397 * ECORE_INVAL otherwise. 4398 */ 4399 static int ecore_queue_chk_transition(struct bnx2x_softc *sc __rte_unused, 4400 struct ecore_queue_sp_obj *o, 4401 struct ecore_queue_state_params *params) 4402 { 4403 enum ecore_q_state state = o->state, next_state = ECORE_Q_STATE_MAX; 4404 enum ecore_queue_cmd cmd = params->cmd; 4405 struct ecore_queue_update_params *update_params = 4406 ¶ms->params.update; 4407 uint8_t next_tx_only = o->num_tx_only; 4408 4409 /* Forget all pending for completion commands if a driver only state 4410 * transition has been requested. 4411 */ 4412 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) { 4413 o->pending = 0; 4414 o->next_state = ECORE_Q_STATE_MAX; 4415 } 4416 4417 /* Don't allow a next state transition if we are in the middle of 4418 * the previous one. 4419 */ 4420 if (o->pending) { 4421 PMD_DRV_LOG(ERR, "Blocking transition since pending was %lx", 4422 o->pending); 4423 return ECORE_BUSY; 4424 } 4425 4426 switch (state) { 4427 case ECORE_Q_STATE_RESET: 4428 if (cmd == ECORE_Q_CMD_INIT) 4429 next_state = ECORE_Q_STATE_INITIALIZED; 4430 4431 break; 4432 case ECORE_Q_STATE_INITIALIZED: 4433 if (cmd == ECORE_Q_CMD_SETUP) { 4434 if (ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE, 4435 ¶ms->params.setup.flags)) 4436 next_state = ECORE_Q_STATE_ACTIVE; 4437 else 4438 next_state = ECORE_Q_STATE_INACTIVE; 4439 } 4440 4441 break; 4442 case ECORE_Q_STATE_ACTIVE: 4443 if (cmd == ECORE_Q_CMD_DEACTIVATE) 4444 next_state = ECORE_Q_STATE_INACTIVE; 4445 4446 else if ((cmd == ECORE_Q_CMD_EMPTY) || 4447 (cmd == ECORE_Q_CMD_UPDATE_TPA)) 4448 next_state = ECORE_Q_STATE_ACTIVE; 4449 4450 else if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) { 4451 next_state = ECORE_Q_STATE_MULTI_COS; 4452 next_tx_only = 1; 4453 } 4454 4455 else if (cmd == ECORE_Q_CMD_HALT) 4456 next_state = ECORE_Q_STATE_STOPPED; 4457 4458 else if (cmd == ECORE_Q_CMD_UPDATE) { 4459 /* If "active" state change is requested, update the 4460 * state accordingly. 4461 */ 4462 if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG, 4463 &update_params->update_flags) && 4464 !ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE, 4465 &update_params->update_flags)) 4466 next_state = ECORE_Q_STATE_INACTIVE; 4467 else 4468 next_state = ECORE_Q_STATE_ACTIVE; 4469 } 4470 4471 break; 4472 case ECORE_Q_STATE_MULTI_COS: 4473 if (cmd == ECORE_Q_CMD_TERMINATE) 4474 next_state = ECORE_Q_STATE_MCOS_TERMINATED; 4475 4476 else if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) { 4477 next_state = ECORE_Q_STATE_MULTI_COS; 4478 next_tx_only = o->num_tx_only + 1; 4479 } 4480 4481 else if ((cmd == ECORE_Q_CMD_EMPTY) || 4482 (cmd == ECORE_Q_CMD_UPDATE_TPA)) 4483 next_state = ECORE_Q_STATE_MULTI_COS; 4484 4485 else if (cmd == ECORE_Q_CMD_UPDATE) { 4486 /* If "active" state change is requested, update the 4487 * state accordingly. 4488 */ 4489 if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG, 4490 &update_params->update_flags) && 4491 !ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE, 4492 &update_params->update_flags)) 4493 next_state = ECORE_Q_STATE_INACTIVE; 4494 else 4495 next_state = ECORE_Q_STATE_MULTI_COS; 4496 } 4497 4498 break; 4499 case ECORE_Q_STATE_MCOS_TERMINATED: 4500 if (cmd == ECORE_Q_CMD_CFC_DEL) { 4501 next_tx_only = o->num_tx_only - 1; 4502 if (next_tx_only == 0) 4503 next_state = ECORE_Q_STATE_ACTIVE; 4504 else 4505 next_state = ECORE_Q_STATE_MULTI_COS; 4506 } 4507 4508 break; 4509 case ECORE_Q_STATE_INACTIVE: 4510 if (cmd == ECORE_Q_CMD_ACTIVATE) 4511 next_state = ECORE_Q_STATE_ACTIVE; 4512 4513 else if ((cmd == ECORE_Q_CMD_EMPTY) || 4514 (cmd == ECORE_Q_CMD_UPDATE_TPA)) 4515 next_state = ECORE_Q_STATE_INACTIVE; 4516 4517 else if (cmd == ECORE_Q_CMD_HALT) 4518 next_state = ECORE_Q_STATE_STOPPED; 4519 4520 else if (cmd == ECORE_Q_CMD_UPDATE) { 4521 /* If "active" state change is requested, update the 4522 * state accordingly. 4523 */ 4524 if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG, 4525 &update_params->update_flags) && 4526 ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE, 4527 &update_params->update_flags)) { 4528 if (o->num_tx_only == 0) 4529 next_state = ECORE_Q_STATE_ACTIVE; 4530 else /* tx only queues exist for this queue */ 4531 next_state = ECORE_Q_STATE_MULTI_COS; 4532 } else 4533 next_state = ECORE_Q_STATE_INACTIVE; 4534 } 4535 4536 break; 4537 case ECORE_Q_STATE_STOPPED: 4538 if (cmd == ECORE_Q_CMD_TERMINATE) 4539 next_state = ECORE_Q_STATE_TERMINATED; 4540 4541 break; 4542 case ECORE_Q_STATE_TERMINATED: 4543 if (cmd == ECORE_Q_CMD_CFC_DEL) 4544 next_state = ECORE_Q_STATE_RESET; 4545 4546 break; 4547 default: 4548 PMD_DRV_LOG(ERR, "Illegal state: %d", state); 4549 } 4550 4551 /* Transition is assured */ 4552 if (next_state != ECORE_Q_STATE_MAX) { 4553 ECORE_MSG("Good state transition: %d(%d)->%d", 4554 state, cmd, next_state); 4555 o->next_state = next_state; 4556 o->next_tx_only = next_tx_only; 4557 return ECORE_SUCCESS; 4558 } 4559 4560 ECORE_MSG("Bad state transition request: %d %d", state, cmd); 4561 4562 return ECORE_INVAL; 4563 } 4564 4565 /** 4566 * ecore_queue_chk_fwd_transition - check state machine of a Forwarding Queue. 4567 * 4568 * @sc: device handle 4569 * @o: 4570 * @params: 4571 * 4572 * It both checks if the requested command is legal in a current 4573 * state and, if it's legal, sets a `next_state' in the object 4574 * that will be used in the completion flow to set the `state' 4575 * of the object. 4576 * 4577 * returns 0 if a requested command is a legal transition, 4578 * ECORE_INVAL otherwise. 4579 */ 4580 static int ecore_queue_chk_fwd_transition(struct bnx2x_softc *sc __rte_unused, 4581 struct ecore_queue_sp_obj *o, 4582 struct ecore_queue_state_params 4583 *params) 4584 { 4585 enum ecore_q_state state = o->state, next_state = ECORE_Q_STATE_MAX; 4586 enum ecore_queue_cmd cmd = params->cmd; 4587 4588 switch (state) { 4589 case ECORE_Q_STATE_RESET: 4590 if (cmd == ECORE_Q_CMD_INIT) 4591 next_state = ECORE_Q_STATE_INITIALIZED; 4592 4593 break; 4594 case ECORE_Q_STATE_INITIALIZED: 4595 if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) { 4596 if (ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE, 4597 ¶ms->params.tx_only.flags)) 4598 next_state = ECORE_Q_STATE_ACTIVE; 4599 else 4600 next_state = ECORE_Q_STATE_INACTIVE; 4601 } 4602 4603 break; 4604 case ECORE_Q_STATE_ACTIVE: 4605 case ECORE_Q_STATE_INACTIVE: 4606 if (cmd == ECORE_Q_CMD_CFC_DEL) 4607 next_state = ECORE_Q_STATE_RESET; 4608 4609 break; 4610 default: 4611 PMD_DRV_LOG(ERR, "Illegal state: %d", state); 4612 } 4613 4614 /* Transition is assured */ 4615 if (next_state != ECORE_Q_STATE_MAX) { 4616 ECORE_MSG("Good state transition: %d(%d)->%d", 4617 state, cmd, next_state); 4618 o->next_state = next_state; 4619 return ECORE_SUCCESS; 4620 } 4621 4622 ECORE_MSG("Bad state transition request: %d %d", state, cmd); 4623 return ECORE_INVAL; 4624 } 4625 4626 void ecore_init_queue_obj(struct bnx2x_softc *sc, 4627 struct ecore_queue_sp_obj *obj, 4628 uint8_t cl_id, uint32_t * cids, uint8_t cid_cnt, 4629 uint8_t func_id, void *rdata, 4630 ecore_dma_addr_t rdata_mapping, unsigned long type) 4631 { 4632 ECORE_MEMSET(obj, 0, sizeof(*obj)); 4633 4634 /* We support only ECORE_MULTI_TX_COS Tx CoS at the moment */ 4635 ECORE_BUG_ON(ECORE_MULTI_TX_COS < cid_cnt); 4636 4637 rte_memcpy(obj->cids, cids, sizeof(obj->cids[0]) * cid_cnt); 4638 obj->max_cos = cid_cnt; 4639 obj->cl_id = cl_id; 4640 obj->func_id = func_id; 4641 obj->rdata = rdata; 4642 obj->rdata_mapping = rdata_mapping; 4643 obj->type = type; 4644 obj->next_state = ECORE_Q_STATE_MAX; 4645 4646 if (CHIP_IS_E1x(sc)) 4647 obj->send_cmd = ecore_queue_send_cmd_e1x; 4648 else 4649 obj->send_cmd = ecore_queue_send_cmd_e2; 4650 4651 if (ECORE_TEST_BIT(ECORE_Q_TYPE_FWD, &type)) 4652 obj->check_transition = ecore_queue_chk_fwd_transition; 4653 else 4654 obj->check_transition = ecore_queue_chk_transition; 4655 4656 obj->complete_cmd = ecore_queue_comp_cmd; 4657 obj->wait_comp = ecore_queue_wait_comp; 4658 obj->set_pending = ecore_queue_set_pending; 4659 } 4660 4661 /********************** Function state object *********************************/ 4662 enum ecore_func_state ecore_func_get_state(__rte_unused struct bnx2x_softc *sc, 4663 struct ecore_func_sp_obj *o) 4664 { 4665 /* in the middle of transaction - return INVALID state */ 4666 if (o->pending) 4667 return ECORE_F_STATE_MAX; 4668 4669 /* unsure the order of reading of o->pending and o->state 4670 * o->pending should be read first 4671 */ 4672 rmb(); 4673 4674 return o->state; 4675 } 4676 4677 static int ecore_func_wait_comp(struct bnx2x_softc *sc, 4678 struct ecore_func_sp_obj *o, 4679 enum ecore_func_cmd cmd) 4680 { 4681 return ecore_state_wait(sc, cmd, &o->pending); 4682 } 4683 4684 /** 4685 * ecore_func_state_change_comp - complete the state machine transition 4686 * 4687 * @sc: device handle 4688 * @o: 4689 * @cmd: 4690 * 4691 * Called on state change transition. Completes the state 4692 * machine transition only - no HW interaction. 4693 */ 4694 static int 4695 ecore_func_state_change_comp(struct bnx2x_softc *sc __rte_unused, 4696 struct ecore_func_sp_obj *o, 4697 enum ecore_func_cmd cmd) 4698 { 4699 unsigned long cur_pending = o->pending; 4700 4701 if (!ECORE_TEST_AND_CLEAR_BIT(cmd, &cur_pending)) { 4702 PMD_DRV_LOG(ERR, 4703 "Bad MC reply %d for func %d in state %d pending 0x%lx, next_state %d", 4704 cmd, ECORE_FUNC_ID(sc), o->state, cur_pending, 4705 o->next_state); 4706 return ECORE_INVAL; 4707 } 4708 4709 ECORE_MSG("Completing command %d for func %d, setting state to %d", 4710 cmd, ECORE_FUNC_ID(sc), o->next_state); 4711 4712 o->state = o->next_state; 4713 o->next_state = ECORE_F_STATE_MAX; 4714 4715 /* It's important that o->state and o->next_state are 4716 * updated before o->pending. 4717 */ 4718 wmb(); 4719 4720 ECORE_CLEAR_BIT(cmd, &o->pending); 4721 ECORE_SMP_MB_AFTER_CLEAR_BIT(); 4722 4723 return ECORE_SUCCESS; 4724 } 4725 4726 /** 4727 * ecore_func_comp_cmd - complete the state change command 4728 * 4729 * @sc: device handle 4730 * @o: 4731 * @cmd: 4732 * 4733 * Checks that the arrived completion is expected. 4734 */ 4735 static int ecore_func_comp_cmd(struct bnx2x_softc *sc, 4736 struct ecore_func_sp_obj *o, 4737 enum ecore_func_cmd cmd) 4738 { 4739 /* Complete the state machine part first, check if it's a 4740 * legal completion. 4741 */ 4742 int rc = ecore_func_state_change_comp(sc, o, cmd); 4743 return rc; 4744 } 4745 4746 /** 4747 * ecore_func_chk_transition - perform function state machine transition 4748 * 4749 * @sc: device handle 4750 * @o: 4751 * @params: 4752 * 4753 * It both checks if the requested command is legal in a current 4754 * state and, if it's legal, sets a `next_state' in the object 4755 * that will be used in the completion flow to set the `state' 4756 * of the object. 4757 * 4758 * returns 0 if a requested command is a legal transition, 4759 * ECORE_INVAL otherwise. 4760 */ 4761 static int ecore_func_chk_transition(struct bnx2x_softc *sc __rte_unused, 4762 struct ecore_func_sp_obj *o, 4763 struct ecore_func_state_params *params) 4764 { 4765 enum ecore_func_state state = o->state, next_state = ECORE_F_STATE_MAX; 4766 enum ecore_func_cmd cmd = params->cmd; 4767 4768 /* Forget all pending for completion commands if a driver only state 4769 * transition has been requested. 4770 */ 4771 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) { 4772 o->pending = 0; 4773 o->next_state = ECORE_F_STATE_MAX; 4774 } 4775 4776 /* Don't allow a next state transition if we are in the middle of 4777 * the previous one. 4778 */ 4779 if (o->pending) 4780 return ECORE_BUSY; 4781 4782 switch (state) { 4783 case ECORE_F_STATE_RESET: 4784 if (cmd == ECORE_F_CMD_HW_INIT) 4785 next_state = ECORE_F_STATE_INITIALIZED; 4786 4787 break; 4788 case ECORE_F_STATE_INITIALIZED: 4789 if (cmd == ECORE_F_CMD_START) 4790 next_state = ECORE_F_STATE_STARTED; 4791 4792 else if (cmd == ECORE_F_CMD_HW_RESET) 4793 next_state = ECORE_F_STATE_RESET; 4794 4795 break; 4796 case ECORE_F_STATE_STARTED: 4797 if (cmd == ECORE_F_CMD_STOP) 4798 next_state = ECORE_F_STATE_INITIALIZED; 4799 /* afex ramrods can be sent only in started mode, and only 4800 * if not pending for function_stop ramrod completion 4801 * for these events - next state remained STARTED. 4802 */ 4803 else if ((cmd == ECORE_F_CMD_AFEX_UPDATE) && 4804 (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending))) 4805 next_state = ECORE_F_STATE_STARTED; 4806 4807 else if ((cmd == ECORE_F_CMD_AFEX_VIFLISTS) && 4808 (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending))) 4809 next_state = ECORE_F_STATE_STARTED; 4810 4811 /* Switch_update ramrod can be sent in either started or 4812 * tx_stopped state, and it doesn't change the state. 4813 */ 4814 else if ((cmd == ECORE_F_CMD_SWITCH_UPDATE) && 4815 (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending))) 4816 next_state = ECORE_F_STATE_STARTED; 4817 4818 else if (cmd == ECORE_F_CMD_TX_STOP) 4819 next_state = ECORE_F_STATE_TX_STOPPED; 4820 4821 break; 4822 case ECORE_F_STATE_TX_STOPPED: 4823 if ((cmd == ECORE_F_CMD_SWITCH_UPDATE) && 4824 (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending))) 4825 next_state = ECORE_F_STATE_TX_STOPPED; 4826 4827 else if (cmd == ECORE_F_CMD_TX_START) 4828 next_state = ECORE_F_STATE_STARTED; 4829 4830 break; 4831 default: 4832 PMD_DRV_LOG(ERR, "Unknown state: %d", state); 4833 } 4834 4835 /* Transition is assured */ 4836 if (next_state != ECORE_F_STATE_MAX) { 4837 ECORE_MSG("Good function state transition: %d(%d)->%d", 4838 state, cmd, next_state); 4839 o->next_state = next_state; 4840 return ECORE_SUCCESS; 4841 } 4842 4843 ECORE_MSG("Bad function state transition request: %d %d", state, cmd); 4844 4845 return ECORE_INVAL; 4846 } 4847 4848 /** 4849 * ecore_func_init_func - performs HW init at function stage 4850 * 4851 * @sc: device handle 4852 * @drv: 4853 * 4854 * Init HW when the current phase is 4855 * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only 4856 * HW blocks. 4857 */ 4858 static int ecore_func_init_func(struct bnx2x_softc *sc, 4859 const struct ecore_func_sp_drv_ops *drv) 4860 { 4861 return drv->init_hw_func(sc); 4862 } 4863 4864 /** 4865 * ecore_func_init_port - performs HW init at port stage 4866 * 4867 * @sc: device handle 4868 * @drv: 4869 * 4870 * Init HW when the current phase is 4871 * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and 4872 * FUNCTION-only HW blocks. 4873 * 4874 */ 4875 static int ecore_func_init_port(struct bnx2x_softc *sc, 4876 const struct ecore_func_sp_drv_ops *drv) 4877 { 4878 int rc = drv->init_hw_port(sc); 4879 if (rc) 4880 return rc; 4881 4882 return ecore_func_init_func(sc, drv); 4883 } 4884 4885 /** 4886 * ecore_func_init_cmn_chip - performs HW init at chip-common stage 4887 * 4888 * @sc: device handle 4889 * @drv: 4890 * 4891 * Init HW when the current phase is 4892 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP, 4893 * PORT-only and FUNCTION-only HW blocks. 4894 */ 4895 static int ecore_func_init_cmn_chip(struct bnx2x_softc *sc, const struct ecore_func_sp_drv_ops 4896 *drv) 4897 { 4898 int rc = drv->init_hw_cmn_chip(sc); 4899 if (rc) 4900 return rc; 4901 4902 return ecore_func_init_port(sc, drv); 4903 } 4904 4905 /** 4906 * ecore_func_init_cmn - performs HW init at common stage 4907 * 4908 * @sc: device handle 4909 * @drv: 4910 * 4911 * Init HW when the current phase is 4912 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON, 4913 * PORT-only and FUNCTION-only HW blocks. 4914 */ 4915 static int ecore_func_init_cmn(struct bnx2x_softc *sc, 4916 const struct ecore_func_sp_drv_ops *drv) 4917 { 4918 int rc = drv->init_hw_cmn(sc); 4919 if (rc) 4920 return rc; 4921 4922 return ecore_func_init_port(sc, drv); 4923 } 4924 4925 static int ecore_func_hw_init(struct bnx2x_softc *sc, 4926 struct ecore_func_state_params *params) 4927 { 4928 uint32_t load_code = params->params.hw_init.load_phase; 4929 struct ecore_func_sp_obj *o = params->f_obj; 4930 const struct ecore_func_sp_drv_ops *drv = o->drv; 4931 int rc = 0; 4932 4933 ECORE_MSG("function %d load_code %x", 4934 ECORE_ABS_FUNC_ID(sc), load_code); 4935 4936 /* Prepare FW */ 4937 rc = drv->init_fw(sc); 4938 if (rc) { 4939 PMD_DRV_LOG(ERR, "Error loading firmware"); 4940 goto init_err; 4941 } 4942 4943 /* Handle the beginning of COMMON_XXX pases separately... */ 4944 switch (load_code) { 4945 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: 4946 rc = ecore_func_init_cmn_chip(sc, drv); 4947 if (rc) 4948 goto init_err; 4949 4950 break; 4951 case FW_MSG_CODE_DRV_LOAD_COMMON: 4952 rc = ecore_func_init_cmn(sc, drv); 4953 if (rc) 4954 goto init_err; 4955 4956 break; 4957 case FW_MSG_CODE_DRV_LOAD_PORT: 4958 rc = ecore_func_init_port(sc, drv); 4959 if (rc) 4960 goto init_err; 4961 4962 break; 4963 case FW_MSG_CODE_DRV_LOAD_FUNCTION: 4964 rc = ecore_func_init_func(sc, drv); 4965 if (rc) 4966 goto init_err; 4967 4968 break; 4969 default: 4970 PMD_DRV_LOG(ERR, "Unknown load_code (0x%x) from MCP", 4971 load_code); 4972 rc = ECORE_INVAL; 4973 } 4974 4975 init_err: 4976 /* In case of success, complete the command immediately: no ramrods 4977 * have been sent. 4978 */ 4979 if (!rc) 4980 o->complete_cmd(sc, o, ECORE_F_CMD_HW_INIT); 4981 4982 return rc; 4983 } 4984 4985 /** 4986 * ecore_func_reset_func - reset HW at function stage 4987 * 4988 * @sc: device handle 4989 * @drv: 4990 * 4991 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only 4992 * FUNCTION-only HW blocks. 4993 */ 4994 static void ecore_func_reset_func(struct bnx2x_softc *sc, const struct ecore_func_sp_drv_ops 4995 *drv) 4996 { 4997 drv->reset_hw_func(sc); 4998 } 4999 5000 /** 5001 * ecore_func_reset_port - reser HW at port stage 5002 * 5003 * @sc: device handle 5004 * @drv: 5005 * 5006 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset 5007 * FUNCTION-only and PORT-only HW blocks. 5008 * 5009 * !!!IMPORTANT!!! 5010 * 5011 * It's important to call reset_port before reset_func() as the last thing 5012 * reset_func does is pf_disable() thus disabling PGLUE_B, which 5013 * makes impossible any DMAE transactions. 5014 */ 5015 static void ecore_func_reset_port(struct bnx2x_softc *sc, const struct ecore_func_sp_drv_ops 5016 *drv) 5017 { 5018 drv->reset_hw_port(sc); 5019 ecore_func_reset_func(sc, drv); 5020 } 5021 5022 /** 5023 * ecore_func_reset_cmn - reser HW at common stage 5024 * 5025 * @sc: device handle 5026 * @drv: 5027 * 5028 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and 5029 * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON, 5030 * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks. 5031 */ 5032 static void ecore_func_reset_cmn(struct bnx2x_softc *sc, 5033 const struct ecore_func_sp_drv_ops *drv) 5034 { 5035 ecore_func_reset_port(sc, drv); 5036 drv->reset_hw_cmn(sc); 5037 } 5038 5039 static int ecore_func_hw_reset(struct bnx2x_softc *sc, 5040 struct ecore_func_state_params *params) 5041 { 5042 uint32_t reset_phase = params->params.hw_reset.reset_phase; 5043 struct ecore_func_sp_obj *o = params->f_obj; 5044 const struct ecore_func_sp_drv_ops *drv = o->drv; 5045 5046 ECORE_MSG("function %d reset_phase %x", ECORE_ABS_FUNC_ID(sc), 5047 reset_phase); 5048 5049 switch (reset_phase) { 5050 case FW_MSG_CODE_DRV_UNLOAD_COMMON: 5051 ecore_func_reset_cmn(sc, drv); 5052 break; 5053 case FW_MSG_CODE_DRV_UNLOAD_PORT: 5054 ecore_func_reset_port(sc, drv); 5055 break; 5056 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION: 5057 ecore_func_reset_func(sc, drv); 5058 break; 5059 default: 5060 PMD_DRV_LOG(ERR, "Unknown reset_phase (0x%x) from MCP", 5061 reset_phase); 5062 break; 5063 } 5064 5065 /* Complete the command immediately: no ramrods have been sent. */ 5066 o->complete_cmd(sc, o, ECORE_F_CMD_HW_RESET); 5067 5068 return ECORE_SUCCESS; 5069 } 5070 5071 static int ecore_func_send_start(struct bnx2x_softc *sc, 5072 struct ecore_func_state_params *params) 5073 { 5074 struct ecore_func_sp_obj *o = params->f_obj; 5075 struct function_start_data *rdata = 5076 (struct function_start_data *)o->rdata; 5077 ecore_dma_addr_t data_mapping = o->rdata_mapping; 5078 struct ecore_func_start_params *start_params = ¶ms->params.start; 5079 5080 ECORE_MEMSET(rdata, 0, sizeof(*rdata)); 5081 5082 /* Fill the ramrod data with provided parameters */ 5083 rdata->function_mode = (uint8_t) start_params->mf_mode; 5084 rdata->sd_vlan_tag = ECORE_CPU_TO_LE16(start_params->sd_vlan_tag); 5085 rdata->path_id = ECORE_PATH_ID(sc); 5086 rdata->network_cos_mode = start_params->network_cos_mode; 5087 rdata->gre_tunnel_mode = start_params->gre_tunnel_mode; 5088 rdata->gre_tunnel_rss = start_params->gre_tunnel_rss; 5089 5090 /* 5091 * No need for an explicit memory barrier here as long we would 5092 * need to ensure the ordering of writing to the SPQ element 5093 * and updating of the SPQ producer which involves a memory 5094 * read and we will have to put a full memory barrier there 5095 * (inside ecore_sp_post()). 5096 */ 5097 5098 return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, 5099 data_mapping, NONE_CONNECTION_TYPE); 5100 } 5101 5102 static int ecore_func_send_switch_update(struct bnx2x_softc *sc, struct ecore_func_state_params 5103 *params) 5104 { 5105 struct ecore_func_sp_obj *o = params->f_obj; 5106 struct function_update_data *rdata = 5107 (struct function_update_data *)o->rdata; 5108 ecore_dma_addr_t data_mapping = o->rdata_mapping; 5109 struct ecore_func_switch_update_params *switch_update_params = 5110 ¶ms->params.switch_update; 5111 5112 ECORE_MEMSET(rdata, 0, sizeof(*rdata)); 5113 5114 /* Fill the ramrod data with provided parameters */ 5115 rdata->tx_switch_suspend_change_flg = 1; 5116 rdata->tx_switch_suspend = switch_update_params->suspend; 5117 rdata->echo = SWITCH_UPDATE; 5118 5119 return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0, 5120 data_mapping, NONE_CONNECTION_TYPE); 5121 } 5122 5123 static int ecore_func_send_afex_update(struct bnx2x_softc *sc, struct ecore_func_state_params 5124 *params) 5125 { 5126 struct ecore_func_sp_obj *o = params->f_obj; 5127 struct function_update_data *rdata = 5128 (struct function_update_data *)o->afex_rdata; 5129 ecore_dma_addr_t data_mapping = o->afex_rdata_mapping; 5130 struct ecore_func_afex_update_params *afex_update_params = 5131 ¶ms->params.afex_update; 5132 5133 ECORE_MEMSET(rdata, 0, sizeof(*rdata)); 5134 5135 /* Fill the ramrod data with provided parameters */ 5136 rdata->vif_id_change_flg = 1; 5137 rdata->vif_id = ECORE_CPU_TO_LE16(afex_update_params->vif_id); 5138 rdata->afex_default_vlan_change_flg = 1; 5139 rdata->afex_default_vlan = 5140 ECORE_CPU_TO_LE16(afex_update_params->afex_default_vlan); 5141 rdata->allowed_priorities_change_flg = 1; 5142 rdata->allowed_priorities = afex_update_params->allowed_priorities; 5143 rdata->echo = AFEX_UPDATE; 5144 5145 /* No need for an explicit memory barrier here as long we would 5146 * need to ensure the ordering of writing to the SPQ element 5147 * and updating of the SPQ producer which involves a memory 5148 * read and we will have to put a full memory barrier there 5149 * (inside ecore_sp_post()). 5150 */ 5151 ECORE_MSG("afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x", 5152 rdata->vif_id, 5153 rdata->afex_default_vlan, rdata->allowed_priorities); 5154 5155 return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0, 5156 data_mapping, NONE_CONNECTION_TYPE); 5157 } 5158 5159 static 5160 inline int ecore_func_send_afex_viflists(struct bnx2x_softc *sc, 5161 struct ecore_func_state_params *params) 5162 { 5163 struct ecore_func_sp_obj *o = params->f_obj; 5164 struct afex_vif_list_ramrod_data *rdata = 5165 (struct afex_vif_list_ramrod_data *)o->afex_rdata; 5166 struct ecore_func_afex_viflists_params *afex_vif_params = 5167 ¶ms->params.afex_viflists; 5168 uint64_t *p_rdata = (uint64_t *) rdata; 5169 5170 ECORE_MEMSET(rdata, 0, sizeof(*rdata)); 5171 5172 /* Fill the ramrod data with provided parameters */ 5173 rdata->vif_list_index = 5174 ECORE_CPU_TO_LE16(afex_vif_params->vif_list_index); 5175 rdata->func_bit_map = afex_vif_params->func_bit_map; 5176 rdata->afex_vif_list_command = afex_vif_params->afex_vif_list_command; 5177 rdata->func_to_clear = afex_vif_params->func_to_clear; 5178 5179 /* send in echo type of sub command */ 5180 rdata->echo = afex_vif_params->afex_vif_list_command; 5181 5182 /* No need for an explicit memory barrier here as long we would 5183 * need to ensure the ordering of writing to the SPQ element 5184 * and updating of the SPQ producer which involves a memory 5185 * read and we will have to put a full memory barrier there 5186 * (inside ecore_sp_post()). 5187 */ 5188 5189 ECORE_MSG 5190 ("afex: ramrod lists, cmd 0x%x index 0x%x func_bit_map 0x%x func_to_clr 0x%x", 5191 rdata->afex_vif_list_command, rdata->vif_list_index, 5192 rdata->func_bit_map, rdata->func_to_clear); 5193 5194 /* this ramrod sends data directly and not through DMA mapping */ 5195 return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS, 0, 5196 *p_rdata, NONE_CONNECTION_TYPE); 5197 } 5198 5199 static int ecore_func_send_stop(struct bnx2x_softc *sc, __rte_unused struct 5200 ecore_func_state_params *params) 5201 { 5202 return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 5203 NONE_CONNECTION_TYPE); 5204 } 5205 5206 static int ecore_func_send_tx_stop(struct bnx2x_softc *sc, __rte_unused struct 5207 ecore_func_state_params *params) 5208 { 5209 return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 0, 0, 5210 NONE_CONNECTION_TYPE); 5211 } 5212 5213 static int ecore_func_send_tx_start(struct bnx2x_softc *sc, struct ecore_func_state_params 5214 *params) 5215 { 5216 struct ecore_func_sp_obj *o = params->f_obj; 5217 struct flow_control_configuration *rdata = 5218 (struct flow_control_configuration *)o->rdata; 5219 ecore_dma_addr_t data_mapping = o->rdata_mapping; 5220 struct ecore_func_tx_start_params *tx_start_params = 5221 ¶ms->params.tx_start; 5222 uint32_t i; 5223 5224 ECORE_MEMSET(rdata, 0, sizeof(*rdata)); 5225 5226 rdata->dcb_enabled = tx_start_params->dcb_enabled; 5227 rdata->dcb_version = tx_start_params->dcb_version; 5228 rdata->dont_add_pri_0 = tx_start_params->dont_add_pri_0; 5229 5230 for (i = 0; i < ARRAY_SIZE(rdata->traffic_type_to_priority_cos); i++) 5231 rdata->traffic_type_to_priority_cos[i] = 5232 tx_start_params->traffic_type_to_priority_cos[i]; 5233 5234 return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0, 5235 data_mapping, NONE_CONNECTION_TYPE); 5236 } 5237 5238 static int ecore_func_send_cmd(struct bnx2x_softc *sc, 5239 struct ecore_func_state_params *params) 5240 { 5241 switch (params->cmd) { 5242 case ECORE_F_CMD_HW_INIT: 5243 return ecore_func_hw_init(sc, params); 5244 case ECORE_F_CMD_START: 5245 return ecore_func_send_start(sc, params); 5246 case ECORE_F_CMD_STOP: 5247 return ecore_func_send_stop(sc, params); 5248 case ECORE_F_CMD_HW_RESET: 5249 return ecore_func_hw_reset(sc, params); 5250 case ECORE_F_CMD_AFEX_UPDATE: 5251 return ecore_func_send_afex_update(sc, params); 5252 case ECORE_F_CMD_AFEX_VIFLISTS: 5253 return ecore_func_send_afex_viflists(sc, params); 5254 case ECORE_F_CMD_TX_STOP: 5255 return ecore_func_send_tx_stop(sc, params); 5256 case ECORE_F_CMD_TX_START: 5257 return ecore_func_send_tx_start(sc, params); 5258 case ECORE_F_CMD_SWITCH_UPDATE: 5259 return ecore_func_send_switch_update(sc, params); 5260 default: 5261 PMD_DRV_LOG(ERR, "Unknown command: %d", params->cmd); 5262 return ECORE_INVAL; 5263 } 5264 } 5265 5266 void ecore_init_func_obj(__rte_unused struct bnx2x_softc *sc, 5267 struct ecore_func_sp_obj *obj, 5268 void *rdata, ecore_dma_addr_t rdata_mapping, 5269 void *afex_rdata, ecore_dma_addr_t afex_rdata_mapping, 5270 struct ecore_func_sp_drv_ops *drv_iface) 5271 { 5272 ECORE_MEMSET(obj, 0, sizeof(*obj)); 5273 5274 ECORE_MUTEX_INIT(&obj->one_pending_mutex); 5275 5276 obj->rdata = rdata; 5277 obj->rdata_mapping = rdata_mapping; 5278 obj->afex_rdata = afex_rdata; 5279 obj->afex_rdata_mapping = afex_rdata_mapping; 5280 obj->send_cmd = ecore_func_send_cmd; 5281 obj->check_transition = ecore_func_chk_transition; 5282 obj->complete_cmd = ecore_func_comp_cmd; 5283 obj->wait_comp = ecore_func_wait_comp; 5284 obj->drv = drv_iface; 5285 } 5286 5287 /** 5288 * ecore_func_state_change - perform Function state change transition 5289 * 5290 * @sc: device handle 5291 * @params: parameters to perform the transaction 5292 * 5293 * returns 0 in case of successfully completed transition, 5294 * negative error code in case of failure, positive 5295 * (EBUSY) value if there is a completion to that is 5296 * still pending (possible only if RAMROD_COMP_WAIT is 5297 * not set in params->ramrod_flags for asynchronous 5298 * commands). 5299 */ 5300 int ecore_func_state_change(struct bnx2x_softc *sc, 5301 struct ecore_func_state_params *params) 5302 { 5303 struct ecore_func_sp_obj *o = params->f_obj; 5304 int rc, cnt = 300; 5305 enum ecore_func_cmd cmd = params->cmd; 5306 unsigned long *pending = &o->pending; 5307 5308 ECORE_MUTEX_LOCK(&o->one_pending_mutex); 5309 5310 /* Check that the requested transition is legal */ 5311 rc = o->check_transition(sc, o, params); 5312 if ((rc == ECORE_BUSY) && 5313 (ECORE_TEST_BIT(RAMROD_RETRY, ¶ms->ramrod_flags))) { 5314 while ((rc == ECORE_BUSY) && (--cnt > 0)) { 5315 ECORE_MUTEX_UNLOCK(&o->one_pending_mutex); 5316 ECORE_MSLEEP(10); 5317 ECORE_MUTEX_LOCK(&o->one_pending_mutex); 5318 rc = o->check_transition(sc, o, params); 5319 } 5320 if (rc == ECORE_BUSY) { 5321 ECORE_MUTEX_UNLOCK(&o->one_pending_mutex); 5322 PMD_DRV_LOG(ERR, 5323 "timeout waiting for previous ramrod completion"); 5324 return rc; 5325 } 5326 } else if (rc) { 5327 ECORE_MUTEX_UNLOCK(&o->one_pending_mutex); 5328 return rc; 5329 } 5330 5331 /* Set "pending" bit */ 5332 ECORE_SET_BIT(cmd, pending); 5333 5334 /* Don't send a command if only driver cleanup was requested */ 5335 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) { 5336 ecore_func_state_change_comp(sc, o, cmd); 5337 ECORE_MUTEX_UNLOCK(&o->one_pending_mutex); 5338 } else { 5339 /* Send a ramrod */ 5340 rc = o->send_cmd(sc, params); 5341 5342 ECORE_MUTEX_UNLOCK(&o->one_pending_mutex); 5343 5344 if (rc) { 5345 o->next_state = ECORE_F_STATE_MAX; 5346 ECORE_CLEAR_BIT(cmd, pending); 5347 ECORE_SMP_MB_AFTER_CLEAR_BIT(); 5348 return rc; 5349 } 5350 5351 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, ¶ms->ramrod_flags)) { 5352 rc = o->wait_comp(sc, o, cmd); 5353 if (rc) 5354 return rc; 5355 5356 return ECORE_SUCCESS; 5357 } 5358 } 5359 5360 return ECORE_RET_PENDING(cmd, pending); 5361 } 5362 5363 /****************************************************************************** 5364 * Description: 5365 * Calculates crc 8 on a word value: polynomial 0-1-2-8 5366 * Code was translated from Verilog. 5367 * Return: 5368 *****************************************************************************/ 5369 uint8_t ecore_calc_crc8(uint32_t data, uint8_t crc) 5370 { 5371 uint8_t D[32]; 5372 uint8_t NewCRC[8]; 5373 uint8_t C[8]; 5374 uint8_t crc_res; 5375 uint8_t i; 5376 5377 /* split the data into 31 bits */ 5378 for (i = 0; i < 32; i++) { 5379 D[i] = (uint8_t) (data & 1); 5380 data = data >> 1; 5381 } 5382 5383 /* split the crc into 8 bits */ 5384 for (i = 0; i < 8; i++) { 5385 C[i] = crc & 1; 5386 crc = crc >> 1; 5387 } 5388 5389 NewCRC[0] = D[31] ^ D[30] ^ D[28] ^ D[23] ^ D[21] ^ D[19] ^ D[18] ^ 5390 D[16] ^ D[14] ^ D[12] ^ D[8] ^ D[7] ^ D[6] ^ D[0] ^ C[4] ^ 5391 C[6] ^ C[7]; 5392 NewCRC[1] = D[30] ^ D[29] ^ D[28] ^ D[24] ^ D[23] ^ D[22] ^ D[21] ^ 5393 D[20] ^ D[18] ^ D[17] ^ D[16] ^ D[15] ^ D[14] ^ D[13] ^ 5394 D[12] ^ D[9] ^ D[6] ^ D[1] ^ D[0] ^ C[0] ^ C[4] ^ C[5] ^ C[6]; 5395 NewCRC[2] = D[29] ^ D[28] ^ D[25] ^ D[24] ^ D[22] ^ D[17] ^ D[15] ^ 5396 D[13] ^ D[12] ^ D[10] ^ D[8] ^ D[6] ^ D[2] ^ D[1] ^ D[0] ^ 5397 C[0] ^ C[1] ^ C[4] ^ C[5]; 5398 NewCRC[3] = D[30] ^ D[29] ^ D[26] ^ D[25] ^ D[23] ^ D[18] ^ D[16] ^ 5399 D[14] ^ D[13] ^ D[11] ^ D[9] ^ D[7] ^ D[3] ^ D[2] ^ D[1] ^ 5400 C[1] ^ C[2] ^ C[5] ^ C[6]; 5401 NewCRC[4] = D[31] ^ D[30] ^ D[27] ^ D[26] ^ D[24] ^ D[19] ^ D[17] ^ 5402 D[15] ^ D[14] ^ D[12] ^ D[10] ^ D[8] ^ D[4] ^ D[3] ^ D[2] ^ 5403 C[0] ^ C[2] ^ C[3] ^ C[6] ^ C[7]; 5404 NewCRC[5] = D[31] ^ D[28] ^ D[27] ^ D[25] ^ D[20] ^ D[18] ^ D[16] ^ 5405 D[15] ^ D[13] ^ D[11] ^ D[9] ^ D[5] ^ D[4] ^ D[3] ^ C[1] ^ 5406 C[3] ^ C[4] ^ C[7]; 5407 NewCRC[6] = D[29] ^ D[28] ^ D[26] ^ D[21] ^ D[19] ^ D[17] ^ D[16] ^ 5408 D[14] ^ D[12] ^ D[10] ^ D[6] ^ D[5] ^ D[4] ^ C[2] ^ C[4] ^ C[5]; 5409 NewCRC[7] = D[30] ^ D[29] ^ D[27] ^ D[22] ^ D[20] ^ D[18] ^ D[17] ^ 5410 D[15] ^ D[13] ^ D[11] ^ D[7] ^ D[6] ^ D[5] ^ C[3] ^ C[5] ^ C[6]; 5411 5412 crc_res = 0; 5413 for (i = 0; i < 8; i++) { 5414 crc_res |= (NewCRC[i] << i); 5415 } 5416 5417 return crc_res; 5418 } 5419 5420 uint32_t 5421 ecore_calc_crc32(uint32_t crc, uint8_t const *p, uint32_t len, uint32_t magic) 5422 { 5423 int i; 5424 while (len--) { 5425 crc ^= *p++; 5426 for (i = 0; i < 8; i++) 5427 crc = (crc >> 1) ^ ((crc & 1) ? magic : 0); 5428 } 5429 return crc; 5430 } 5431