xref: /dpdk/drivers/net/bnx2x/ecore_sp.c (revision 7be78d027918dbc846e502780faf94d5acdf5f75)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2007-2013 Broadcom Corporation.
3  *
4  * Eric Davis        <edavis@broadcom.com>
5  * David Christensen <davidch@broadcom.com>
6  * Gary Zambrano     <zambrano@broadcom.com>
7  *
8  * Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
9  * Copyright (c) 2015-2018 Cavium Inc.
10  * All rights reserved.
11  * www.cavium.com
12  */
13 
14 #include "bnx2x.h"
15 #include "ecore_init.h"
16 
17 /**** Exe Queue interfaces ****/
18 
19 /**
20  * ecore_exe_queue_init - init the Exe Queue object
21  *
22  * @o:		pointer to the object
23  * @exe_len:	length
24  * @owner:	pointer to the owner
25  * @validate:	validate function pointer
26  * @optimize:	optimize function pointer
27  * @exec:	execute function pointer
28  * @get:	get function pointer
29  */
30 static void
ecore_exe_queue_init(struct bnx2x_softc * sc __rte_unused,struct ecore_exe_queue_obj * o,int exe_len,union ecore_qable_obj * owner,exe_q_validate validate,exe_q_remove remove,exe_q_optimize optimize,exe_q_execute exec,exe_q_get get)31 ecore_exe_queue_init(struct bnx2x_softc *sc __rte_unused,
32 		     struct ecore_exe_queue_obj *o,
33 		     int exe_len,
34 		     union ecore_qable_obj *owner,
35 		     exe_q_validate validate,
36 		     exe_q_remove remove,
37 		     exe_q_optimize optimize, exe_q_execute exec, exe_q_get get)
38 {
39 	ECORE_MEMSET(o, 0, sizeof(*o));
40 
41 	ECORE_LIST_INIT(&o->exe_queue);
42 	ECORE_LIST_INIT(&o->pending_comp);
43 
44 	ECORE_SPIN_LOCK_INIT(&o->lock, sc);
45 
46 	o->exe_chunk_len = exe_len;
47 	o->owner = owner;
48 
49 	/* Owner specific callbacks */
50 	o->validate = validate;
51 	o->remove = remove;
52 	o->optimize = optimize;
53 	o->execute = exec;
54 	o->get = get;
55 
56 	ECORE_MSG(sc, "Setup the execution queue with the chunk length of %d",
57 		  exe_len);
58 }
59 
ecore_exe_queue_free_elem(struct bnx2x_softc * sc __rte_unused,struct ecore_exeq_elem * elem)60 static void ecore_exe_queue_free_elem(struct bnx2x_softc *sc __rte_unused,
61 				      struct ecore_exeq_elem *elem)
62 {
63 	ECORE_MSG(sc, "Deleting an exe_queue element");
64 	ECORE_FREE(sc, elem, sizeof(*elem));
65 }
66 
ecore_exe_queue_length(struct ecore_exe_queue_obj * o)67 static inline int ecore_exe_queue_length(struct ecore_exe_queue_obj *o)
68 {
69 	struct ecore_exeq_elem *elem;
70 	int cnt = 0;
71 
72 	ECORE_SPIN_LOCK_BH(&o->lock);
73 
74 	ECORE_LIST_FOR_EACH_ENTRY(elem, &o->exe_queue, link,
75 				  struct ecore_exeq_elem) cnt++;
76 
77 	ECORE_SPIN_UNLOCK_BH(&o->lock);
78 
79 	return cnt;
80 }
81 
82 /**
83  * ecore_exe_queue_add - add a new element to the execution queue
84  *
85  * @sc:		driver handle
86  * @o:		queue
87  * @cmd:	new command to add
88  * @restore:	true - do not optimize the command
89  *
90  * If the element is optimized or is illegal, frees it.
91  */
ecore_exe_queue_add(struct bnx2x_softc * sc,struct ecore_exe_queue_obj * o,struct ecore_exeq_elem * elem,int restore)92 static int ecore_exe_queue_add(struct bnx2x_softc *sc,
93 			       struct ecore_exe_queue_obj *o,
94 			       struct ecore_exeq_elem *elem, int restore)
95 {
96 	int rc;
97 
98 	ECORE_SPIN_LOCK_BH(&o->lock);
99 
100 	if (!restore) {
101 		/* Try to cancel this element queue */
102 		rc = o->optimize(sc, o->owner, elem);
103 		if (rc)
104 			goto free_and_exit;
105 
106 		/* Check if this request is ok */
107 		rc = o->validate(sc, o->owner, elem);
108 		if (rc) {
109 			ECORE_MSG(sc, "Preamble failed: %d", rc);
110 			goto free_and_exit;
111 		}
112 	}
113 
114 	/* If so, add it to the execution queue */
115 	ECORE_LIST_PUSH_TAIL(&elem->link, &o->exe_queue);
116 
117 	ECORE_SPIN_UNLOCK_BH(&o->lock);
118 
119 	return ECORE_SUCCESS;
120 
121 free_and_exit:
122 	ecore_exe_queue_free_elem(sc, elem);
123 
124 	ECORE_SPIN_UNLOCK_BH(&o->lock);
125 
126 	return rc;
127 }
128 
__ecore_exe_queue_reset_pending(struct bnx2x_softc * sc,struct ecore_exe_queue_obj * o)129 static void __ecore_exe_queue_reset_pending(struct bnx2x_softc *sc, struct ecore_exe_queue_obj
130 					    *o)
131 {
132 	struct ecore_exeq_elem *elem;
133 
134 	while (!ECORE_LIST_IS_EMPTY(&o->pending_comp)) {
135 		elem = ECORE_LIST_FIRST_ENTRY(&o->pending_comp,
136 					      struct ecore_exeq_elem, link);
137 
138 		ECORE_LIST_REMOVE_ENTRY(&elem->link, &o->pending_comp);
139 		ecore_exe_queue_free_elem(sc, elem);
140 	}
141 }
142 
ecore_exe_queue_reset_pending(struct bnx2x_softc * sc,struct ecore_exe_queue_obj * o)143 static inline void ecore_exe_queue_reset_pending(struct bnx2x_softc *sc,
144 						 struct ecore_exe_queue_obj *o)
145 {
146 	ECORE_SPIN_LOCK_BH(&o->lock);
147 
148 	__ecore_exe_queue_reset_pending(sc, o);
149 
150 	ECORE_SPIN_UNLOCK_BH(&o->lock);
151 }
152 
153 /**
154  * ecore_exe_queue_step - execute one execution chunk atomically
155  *
156  * @sc:			driver handle
157  * @o:			queue
158  * @ramrod_flags:	flags
159  *
160  * (Should be called while holding the exe_queue->lock).
161  */
ecore_exe_queue_step(struct bnx2x_softc * sc,struct ecore_exe_queue_obj * o,uint32_t * ramrod_flags)162 static int ecore_exe_queue_step(struct bnx2x_softc *sc,
163 				struct ecore_exe_queue_obj *o,
164 				uint32_t *ramrod_flags)
165 {
166 	struct ecore_exeq_elem *elem, spacer;
167 	int cur_len = 0, rc;
168 
169 	ECORE_MEMSET(&spacer, 0, sizeof(spacer));
170 
171 	/* Next step should not be performed until the current is finished,
172 	 * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
173 	 * properly clear object internals without sending any command to the FW
174 	 * which also implies there won't be any completion to clear the
175 	 * 'pending' list.
176 	 */
177 	if (!ECORE_LIST_IS_EMPTY(&o->pending_comp)) {
178 		if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
179 			ECORE_MSG(sc,
180 				  "RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list");
181 			__ecore_exe_queue_reset_pending(sc, o);
182 		} else {
183 			return ECORE_PENDING;
184 		}
185 	}
186 
187 	/* Run through the pending commands list and create a next
188 	 * execution chunk.
189 	 */
190 	while (!ECORE_LIST_IS_EMPTY(&o->exe_queue)) {
191 		elem = ECORE_LIST_FIRST_ENTRY(&o->exe_queue,
192 					      struct ecore_exeq_elem, link);
193 		ECORE_DBG_BREAK_IF(!elem->cmd_len);
194 
195 		if (cur_len + elem->cmd_len <= o->exe_chunk_len) {
196 			cur_len += elem->cmd_len;
197 			/* Prevent from both lists being empty when moving an
198 			 * element. This will allow the call of
199 			 * ecore_exe_queue_empty() without locking.
200 			 */
201 			ECORE_LIST_PUSH_TAIL(&spacer.link, &o->pending_comp);
202 			mb();
203 			ECORE_LIST_REMOVE_ENTRY(&elem->link, &o->exe_queue);
204 			ECORE_LIST_PUSH_TAIL(&elem->link, &o->pending_comp);
205 			ECORE_LIST_REMOVE_ENTRY(&spacer.link, &o->pending_comp);
206 		} else
207 			break;
208 	}
209 
210 	/* Sanity check */
211 	if (!cur_len)
212 		return ECORE_SUCCESS;
213 
214 	rc = o->execute(sc, o->owner, &o->pending_comp, ramrod_flags);
215 	if (rc < 0)
216 		/* In case of an error return the commands back to the queue
217 		 *  and reset the pending_comp.
218 		 */
219 		ECORE_LIST_SPLICE_INIT(&o->pending_comp, &o->exe_queue);
220 	else if (!rc)
221 		/* If zero is returned, means there are no outstanding pending
222 		 * completions and we may dismiss the pending list.
223 		 */
224 		__ecore_exe_queue_reset_pending(sc, o);
225 
226 	return rc;
227 }
228 
ecore_exe_queue_empty(struct ecore_exe_queue_obj * o)229 static inline int ecore_exe_queue_empty(struct ecore_exe_queue_obj *o)
230 {
231 	int empty = ECORE_LIST_IS_EMPTY(&o->exe_queue);
232 
233 	/* Don't reorder!!! */
234 	mb();
235 
236 	return empty && ECORE_LIST_IS_EMPTY(&o->pending_comp);
237 }
238 
ecore_exe_queue_alloc_elem(struct bnx2x_softc * sc __rte_unused)239 static struct ecore_exeq_elem *ecore_exe_queue_alloc_elem(struct
240 							  bnx2x_softc *sc
241 							  __rte_unused)
242 {
243 	ECORE_MSG(sc, "Allocating a new exe_queue element");
244 	return ECORE_ZALLOC(sizeof(struct ecore_exeq_elem), GFP_ATOMIC, sc);
245 }
246 
247 /************************ raw_obj functions ***********************************/
ecore_raw_check_pending(struct ecore_raw_obj * o)248 static bool ecore_raw_check_pending(struct ecore_raw_obj *o)
249 {
250 	/*
251 	 * !! converts the value returned by ECORE_TEST_BIT such that it
252 	 * is guaranteed not to be truncated regardless of int definition.
253 	 *
254 	 * Note we cannot simply define the function's return value type
255 	 * to match the type returned by ECORE_TEST_BIT, as it varies by
256 	 * platform/implementation.
257 	 */
258 
259 	return ! !ECORE_TEST_BIT(o->state, o->pstate);
260 }
261 
ecore_raw_clear_pending(struct ecore_raw_obj * o)262 static void ecore_raw_clear_pending(struct ecore_raw_obj *o)
263 {
264 	ECORE_SMP_MB_BEFORE_CLEAR_BIT();
265 	ECORE_CLEAR_BIT(o->state, o->pstate);
266 	ECORE_SMP_MB_AFTER_CLEAR_BIT();
267 }
268 
ecore_raw_set_pending(struct ecore_raw_obj * o)269 static void ecore_raw_set_pending(struct ecore_raw_obj *o)
270 {
271 	ECORE_SMP_MB_BEFORE_CLEAR_BIT();
272 	ECORE_SET_BIT(o->state, o->pstate);
273 	ECORE_SMP_MB_AFTER_CLEAR_BIT();
274 }
275 
276 /**
277  * ecore_state_wait - wait until the given bit(state) is cleared
278  *
279  * @sc:		device handle
280  * @state:	state which is to be cleared
281  * @state_p:	state buffer
282  *
283  */
ecore_state_wait(struct bnx2x_softc * sc,int state,uint32_t * pstate)284 static int ecore_state_wait(struct bnx2x_softc *sc, int state,
285 			    uint32_t *pstate)
286 {
287 	/* can take a while if any port is running */
288 	int cnt = 5000;
289 
290 	if (CHIP_REV_IS_EMUL(sc))
291 		cnt *= 20;
292 
293 	ECORE_MSG(sc, "waiting for state to become %d", state);
294 
295 	ECORE_MIGHT_SLEEP();
296 	while (cnt--) {
297 		bnx2x_intr_legacy(sc);
298 		if (!ECORE_TEST_BIT(state, pstate)) {
299 #ifdef ECORE_STOP_ON_ERROR
300 			ECORE_MSG(sc, "exit  (cnt %d)", 5000 - cnt);
301 #endif
302 			rte_atomic32_set(&sc->scan_fp, 0);
303 			return ECORE_SUCCESS;
304 		}
305 
306 		ECORE_WAIT(sc, delay_us);
307 
308 		if (sc->panic) {
309 			rte_atomic32_set(&sc->scan_fp, 0);
310 			return ECORE_IO;
311 		}
312 	}
313 
314 	/* timeout! */
315 	PMD_DRV_LOG(ERR, sc, "timeout waiting for state %d", state);
316 	rte_atomic32_set(&sc->scan_fp, 0);
317 #ifdef ECORE_STOP_ON_ERROR
318 	ecore_panic();
319 #endif
320 
321 	return ECORE_TIMEOUT;
322 }
323 
ecore_raw_wait(struct bnx2x_softc * sc,struct ecore_raw_obj * raw)324 static int ecore_raw_wait(struct bnx2x_softc *sc, struct ecore_raw_obj *raw)
325 {
326 	return ecore_state_wait(sc, raw->state, raw->pstate);
327 }
328 
329 /***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
330 /* credit handling callbacks */
ecore_get_cam_offset_mac(struct ecore_vlan_mac_obj * o,int * offset)331 static bool ecore_get_cam_offset_mac(struct ecore_vlan_mac_obj *o, int *offset)
332 {
333 	struct ecore_credit_pool_obj *mp = o->macs_pool;
334 
335 	ECORE_DBG_BREAK_IF(!mp);
336 
337 	return mp->get_entry(mp, offset);
338 }
339 
ecore_get_credit_mac(struct ecore_vlan_mac_obj * o)340 static bool ecore_get_credit_mac(struct ecore_vlan_mac_obj *o)
341 {
342 	struct ecore_credit_pool_obj *mp = o->macs_pool;
343 
344 	ECORE_DBG_BREAK_IF(!mp);
345 
346 	return mp->get(mp, 1);
347 }
348 
ecore_put_cam_offset_mac(struct ecore_vlan_mac_obj * o,int offset)349 static bool ecore_put_cam_offset_mac(struct ecore_vlan_mac_obj *o, int offset)
350 {
351 	struct ecore_credit_pool_obj *mp = o->macs_pool;
352 
353 	return mp->put_entry(mp, offset);
354 }
355 
ecore_put_credit_mac(struct ecore_vlan_mac_obj * o)356 static bool ecore_put_credit_mac(struct ecore_vlan_mac_obj *o)
357 {
358 	struct ecore_credit_pool_obj *mp = o->macs_pool;
359 
360 	return mp->put(mp, 1);
361 }
362 
363 /**
364  * __ecore_vlan_mac_h_write_trylock - try getting the writer lock on vlan mac
365  * head list.
366  *
367  * @sc:		device handle
368  * @o:		vlan_mac object
369  *
370  * @details: Non-blocking implementation; should be called under execution
371  *           queue lock.
372  */
__ecore_vlan_mac_h_write_trylock(struct bnx2x_softc * sc __rte_unused,struct ecore_vlan_mac_obj * o)373 static int __ecore_vlan_mac_h_write_trylock(struct bnx2x_softc *sc __rte_unused,
374 					    struct ecore_vlan_mac_obj *o)
375 {
376 	if (o->head_reader) {
377 		ECORE_MSG(sc, "vlan_mac_lock writer - There are readers; Busy");
378 		return ECORE_BUSY;
379 	}
380 
381 	ECORE_MSG(sc, "vlan_mac_lock writer - Taken");
382 	return ECORE_SUCCESS;
383 }
384 
385 /**
386  * __ecore_vlan_mac_h_exec_pending - execute step instead of a previous step
387  * which wasn't able to run due to a taken lock on vlan mac head list.
388  *
389  * @sc:		device handle
390  * @o:		vlan_mac object
391  *
392  * @details Should be called under execution queue lock; notice it might release
393  *          and reclaim it during its run.
394  */
__ecore_vlan_mac_h_exec_pending(struct bnx2x_softc * sc,struct ecore_vlan_mac_obj * o)395 static void __ecore_vlan_mac_h_exec_pending(struct bnx2x_softc *sc,
396 					    struct ecore_vlan_mac_obj *o)
397 {
398 	int rc;
399 	uint32_t ramrod_flags = o->saved_ramrod_flags;
400 
401 	ECORE_MSG(sc, "vlan_mac_lock execute pending command with ramrod flags %u",
402 		  ramrod_flags);
403 	o->head_exe_request = FALSE;
404 	o->saved_ramrod_flags = 0;
405 	rc = ecore_exe_queue_step(sc, &o->exe_queue, &ramrod_flags);
406 	if (rc != ECORE_SUCCESS) {
407 		PMD_DRV_LOG(ERR, sc,
408 			    "execution of pending commands failed with rc %d",
409 			    rc);
410 #ifdef ECORE_STOP_ON_ERROR
411 		ecore_panic();
412 #endif
413 	}
414 }
415 
416 /**
417  * __ecore_vlan_mac_h_pend - Pend an execution step which couldn't have been
418  * called due to vlan mac head list lock being taken.
419  *
420  * @sc:			device handle
421  * @o:			vlan_mac object
422  * @ramrod_flags:	ramrod flags of missed execution
423  *
424  * @details Should be called under execution queue lock.
425  */
__ecore_vlan_mac_h_pend(struct bnx2x_softc * sc __rte_unused,struct ecore_vlan_mac_obj * o,uint32_t ramrod_flags)426 static void __ecore_vlan_mac_h_pend(struct bnx2x_softc *sc __rte_unused,
427 				    struct ecore_vlan_mac_obj *o,
428 				    uint32_t ramrod_flags)
429 {
430 	o->head_exe_request = TRUE;
431 	o->saved_ramrod_flags = ramrod_flags;
432 	ECORE_MSG(sc, "Placing pending execution with ramrod flags %u",
433 		  ramrod_flags);
434 }
435 
436 /**
437  * __ecore_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
438  *
439  * @sc:			device handle
440  * @o:			vlan_mac object
441  *
442  * @details Should be called under execution queue lock. Notice if a pending
443  *          execution exists, it would perform it - possibly releasing and
444  *          reclaiming the execution queue lock.
445  */
__ecore_vlan_mac_h_write_unlock(struct bnx2x_softc * sc,struct ecore_vlan_mac_obj * o)446 static void __ecore_vlan_mac_h_write_unlock(struct bnx2x_softc *sc,
447 					    struct ecore_vlan_mac_obj *o)
448 {
449 	/* It's possible a new pending execution was added since this writer
450 	 * executed. If so, execute again. [Ad infinitum]
451 	 */
452 	while (o->head_exe_request) {
453 		ECORE_MSG(sc,
454 			  "vlan_mac_lock - writer release encountered a pending request");
455 		__ecore_vlan_mac_h_exec_pending(sc, o);
456 	}
457 }
458 
459 /**
460  * ecore_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
461  *
462  * @sc:			device handle
463  * @o:			vlan_mac object
464  *
465  * @details Notice if a pending execution exists, it would perform it -
466  *          possibly releasing and reclaiming the execution queue lock.
467  */
ecore_vlan_mac_h_write_unlock(struct bnx2x_softc * sc,struct ecore_vlan_mac_obj * o)468 void ecore_vlan_mac_h_write_unlock(struct bnx2x_softc *sc,
469 				   struct ecore_vlan_mac_obj *o)
470 {
471 	ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
472 	__ecore_vlan_mac_h_write_unlock(sc, o);
473 	ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
474 }
475 
476 /**
477  * __ecore_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
478  *
479  * @sc:			device handle
480  * @o:			vlan_mac object
481  *
482  * @details Should be called under the execution queue lock. May sleep. May
483  *          release and reclaim execution queue lock during its run.
484  */
__ecore_vlan_mac_h_read_lock(struct bnx2x_softc * sc __rte_unused,struct ecore_vlan_mac_obj * o)485 static int __ecore_vlan_mac_h_read_lock(struct bnx2x_softc *sc __rte_unused,
486 					struct ecore_vlan_mac_obj *o)
487 {
488 	/* If we got here, we're holding lock --> no WRITER exists */
489 	o->head_reader++;
490 	ECORE_MSG(sc,
491 		  "vlan_mac_lock - locked reader - number %d", o->head_reader);
492 
493 	return ECORE_SUCCESS;
494 }
495 
496 /**
497  * ecore_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
498  *
499  * @sc:			device handle
500  * @o:			vlan_mac object
501  *
502  * @details May sleep. Claims and releases execution queue lock during its run.
503  */
ecore_vlan_mac_h_read_lock(struct bnx2x_softc * sc,struct ecore_vlan_mac_obj * o)504 int ecore_vlan_mac_h_read_lock(struct bnx2x_softc *sc,
505 				      struct ecore_vlan_mac_obj *o)
506 {
507 	int rc;
508 
509 	ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
510 	rc = __ecore_vlan_mac_h_read_lock(sc, o);
511 	ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
512 
513 	return rc;
514 }
515 
516 /**
517  * __ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
518  *
519  * @sc:			device handle
520  * @o:			vlan_mac object
521  *
522  * @details Should be called under execution queue lock. Notice if a pending
523  *          execution exists, it would be performed if this was the last
524  *          reader. possibly releasing and reclaiming the execution queue lock.
525  */
__ecore_vlan_mac_h_read_unlock(struct bnx2x_softc * sc,struct ecore_vlan_mac_obj * o)526 static void __ecore_vlan_mac_h_read_unlock(struct bnx2x_softc *sc,
527 					   struct ecore_vlan_mac_obj *o)
528 {
529 	if (!o->head_reader) {
530 		PMD_DRV_LOG(ERR, sc,
531 			    "Need to release vlan mac reader lock, but lock isn't taken");
532 #ifdef ECORE_STOP_ON_ERROR
533 		ecore_panic();
534 #endif
535 	} else {
536 		o->head_reader--;
537 		ECORE_MSG(sc, "vlan_mac_lock - decreased readers to %d",
538 			  o->head_reader);
539 	}
540 
541 	/* It's possible a new pending execution was added, and that this reader
542 	 * was last - if so we need to execute the command.
543 	 */
544 	if (!o->head_reader && o->head_exe_request) {
545 		ECORE_MSG(sc, "vlan_mac_lock - reader release encountered a pending request");
546 
547 		/* Writer release will do the trick */
548 		__ecore_vlan_mac_h_write_unlock(sc, o);
549 	}
550 }
551 
552 /**
553  * ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
554  *
555  * @sc:			device handle
556  * @o:			vlan_mac object
557  *
558  * @details Notice if a pending execution exists, it would be performed if this
559  *          was the last reader. Claims and releases the execution queue lock
560  *          during its run.
561  */
ecore_vlan_mac_h_read_unlock(struct bnx2x_softc * sc,struct ecore_vlan_mac_obj * o)562 void ecore_vlan_mac_h_read_unlock(struct bnx2x_softc *sc,
563 				  struct ecore_vlan_mac_obj *o)
564 {
565 	ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
566 	__ecore_vlan_mac_h_read_unlock(sc, o);
567 	ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
568 }
569 
570 /**
571  * ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
572  *
573  * @sc:			device handle
574  * @o:			vlan_mac object
575  * @n:			number of elements to get
576  * @base:		base address for element placement
577  * @stride:		stride between elements (in bytes)
578  */
ecore_get_n_elements(struct bnx2x_softc * sc,struct ecore_vlan_mac_obj * o,int n,uint8_t * base,uint8_t stride,uint8_t size)579 static int ecore_get_n_elements(struct bnx2x_softc *sc,
580 				struct ecore_vlan_mac_obj *o, int n,
581 				uint8_t * base, uint8_t stride, uint8_t size)
582 {
583 	struct ecore_vlan_mac_registry_elem *pos;
584 	uint8_t *next = base;
585 	int counter = 0, read_lock;
586 
587 	ECORE_MSG(sc, "get_n_elements - taking vlan_mac_lock (reader)");
588 	read_lock = ecore_vlan_mac_h_read_lock(sc, o);
589 	if (read_lock != ECORE_SUCCESS)
590 		PMD_DRV_LOG(ERR, sc,
591 			    "get_n_elements failed to get vlan mac reader lock; Access without lock");
592 
593 	/* traverse list */
594 	ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
595 				  struct ecore_vlan_mac_registry_elem) {
596 		if (counter < n) {
597 			ECORE_MEMCPY(next, &pos->u, size);
598 			counter++;
599 			    ECORE_MSG
600 			    (sc, "copied element number %d to address %p element was:",
601 			     counter, next);
602 			next += stride + size;
603 		}
604 	}
605 
606 	if (read_lock == ECORE_SUCCESS) {
607 		ECORE_MSG(sc, "get_n_elements - releasing vlan_mac_lock (reader)");
608 		ecore_vlan_mac_h_read_unlock(sc, o);
609 	}
610 
611 	return counter * ETH_ALEN;
612 }
613 
614 /* check_add() callbacks */
ecore_check_mac_add(struct bnx2x_softc * sc __rte_unused,struct ecore_vlan_mac_obj * o,union ecore_classification_ramrod_data * data)615 static int ecore_check_mac_add(struct bnx2x_softc *sc __rte_unused,
616 			       struct ecore_vlan_mac_obj *o,
617 			       union ecore_classification_ramrod_data *data)
618 {
619 	struct ecore_vlan_mac_registry_elem *pos;
620 
621 	ECORE_MSG(sc, "Checking MAC " RTE_ETHER_ADDR_PRT_FMT " for ADD command",
622 		  data->mac.mac[0], data->mac.mac[1], data->mac.mac[2],
623 		  data->mac.mac[3], data->mac.mac[4], data->mac.mac[5]);
624 
625 	if (!ECORE_IS_VALID_ETHER_ADDR(data->mac.mac))
626 		return ECORE_INVAL;
627 
628 	/* Check if a requested MAC already exists */
629 	ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
630 				  struct ecore_vlan_mac_registry_elem)
631 	    if (!ECORE_MEMCMP(data->mac.mac, pos->u.mac.mac, ETH_ALEN) &&
632 		(data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
633 		return ECORE_EXISTS;
634 
635 	return ECORE_SUCCESS;
636 }
637 
638 /* check_del() callbacks */
ecore_check_mac_del(struct bnx2x_softc * sc __rte_unused,struct ecore_vlan_mac_obj * o,union ecore_classification_ramrod_data * data)639 static struct ecore_vlan_mac_registry_elem *ecore_check_mac_del(struct bnx2x_softc
640 								*sc
641 								__rte_unused,
642 								struct
643 								ecore_vlan_mac_obj
644 								*o, union
645 								ecore_classification_ramrod_data
646 								*data)
647 {
648 	struct ecore_vlan_mac_registry_elem *pos;
649 
650 	ECORE_MSG(sc, "Checking MAC " RTE_ETHER_ADDR_PRT_FMT " for DEL command",
651 		  data->mac.mac[0], data->mac.mac[1], data->mac.mac[2],
652 		  data->mac.mac[3], data->mac.mac[4], data->mac.mac[5]);
653 
654 	ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
655 				  struct ecore_vlan_mac_registry_elem)
656 	if ((!ECORE_MEMCMP(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) &&
657 	    (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
658 		return pos;
659 
660 	return NULL;
661 }
662 
663 /* check_move() callback */
ecore_check_move(struct bnx2x_softc * sc,struct ecore_vlan_mac_obj * src_o,struct ecore_vlan_mac_obj * dst_o,union ecore_classification_ramrod_data * data)664 static bool ecore_check_move(struct bnx2x_softc *sc,
665 			    struct ecore_vlan_mac_obj *src_o,
666 			    struct ecore_vlan_mac_obj *dst_o,
667 			    union ecore_classification_ramrod_data *data)
668 {
669 	struct ecore_vlan_mac_registry_elem *pos;
670 	int rc;
671 
672 	/* Check if we can delete the requested configuration from the first
673 	 * object.
674 	 */
675 	pos = src_o->check_del(sc, src_o, data);
676 
677 	/*  check if configuration can be added */
678 	rc = dst_o->check_add(sc, dst_o, data);
679 
680 	/* If this classification can not be added (is already set)
681 	 * or can't be deleted - return an error.
682 	 */
683 	if (rc || !pos)
684 		return FALSE;
685 
686 	return TRUE;
687 }
688 
ecore_check_move_always_err(__rte_unused struct bnx2x_softc * sc,__rte_unused struct ecore_vlan_mac_obj * src_o,__rte_unused struct ecore_vlan_mac_obj * dst_o,__rte_unused union ecore_classification_ramrod_data * data)689 static bool ecore_check_move_always_err(__rte_unused struct bnx2x_softc *sc,
690 				       __rte_unused struct ecore_vlan_mac_obj
691 				       *src_o, __rte_unused struct ecore_vlan_mac_obj
692 				       *dst_o, __rte_unused union
693 				       ecore_classification_ramrod_data *data)
694 {
695 	return FALSE;
696 }
697 
ecore_vlan_mac_get_rx_tx_flag(struct ecore_vlan_mac_obj * o)698 static uint8_t ecore_vlan_mac_get_rx_tx_flag(struct ecore_vlan_mac_obj
699 					     *o)
700 {
701 	struct ecore_raw_obj *raw = &o->raw;
702 	uint8_t rx_tx_flag = 0;
703 
704 	if ((raw->obj_type == ECORE_OBJ_TYPE_TX) ||
705 	    (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
706 		rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD;
707 
708 	if ((raw->obj_type == ECORE_OBJ_TYPE_RX) ||
709 	    (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
710 		rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD;
711 
712 	return rx_tx_flag;
713 }
714 
ecore_set_mac_in_nig(struct bnx2x_softc * sc,bool add,unsigned char * dev_addr,int index)715 void ecore_set_mac_in_nig(struct bnx2x_softc *sc,
716 				 bool add, unsigned char *dev_addr, int index)
717 {
718 	uint32_t wb_data[2];
719 	uint32_t reg_offset = ECORE_PORT_ID(sc) ? NIG_REG_LLH1_FUNC_MEM :
720 	    NIG_REG_LLH0_FUNC_MEM;
721 
722 	if (!ECORE_IS_MF_SI_MODE(sc) && !IS_MF_AFEX(sc))
723 		return;
724 
725 	if (index > ECORE_LLH_CAM_MAX_PF_LINE)
726 		return;
727 
728 	ECORE_MSG(sc, "Going to %s LLH configuration at entry %d",
729 		  (add ? "ADD" : "DELETE"), index);
730 
731 	if (add) {
732 		/* LLH_FUNC_MEM is a uint64_t WB register */
733 		reg_offset += 8 * index;
734 
735 		wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
736 			      (dev_addr[4] << 8) | dev_addr[5]);
737 		wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]);
738 
739 		ECORE_REG_WR_DMAE_LEN(sc, reg_offset, wb_data, 2);
740 	}
741 
742 	REG_WR(sc, (ECORE_PORT_ID(sc) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
743 		    NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4 * index, add);
744 }
745 
746 /**
747  * ecore_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod
748  *
749  * @sc:		device handle
750  * @o:		queue for which we want to configure this rule
751  * @add:	if TRUE the command is an ADD command, DEL otherwise
752  * @opcode:	CLASSIFY_RULE_OPCODE_XXX
753  * @hdr:	pointer to a header to setup
754  *
755  */
ecore_vlan_mac_set_cmd_hdr_e2(struct ecore_vlan_mac_obj * o,bool add,int opcode,struct eth_classify_cmd_header * hdr)756 static void ecore_vlan_mac_set_cmd_hdr_e2(struct ecore_vlan_mac_obj *o,
757 					  bool add, int opcode,
758 					  struct eth_classify_cmd_header
759 					  *hdr)
760 {
761 	struct ecore_raw_obj *raw = &o->raw;
762 
763 	hdr->client_id = raw->cl_id;
764 	hdr->func_id = raw->func_id;
765 
766 	/* Rx or/and Tx (internal switching) configuration ? */
767 	hdr->cmd_general_data |= ecore_vlan_mac_get_rx_tx_flag(o);
768 
769 	if (add)
770 		hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD;
771 
772 	hdr->cmd_general_data |=
773 	    (opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT);
774 }
775 
776 /**
777  * ecore_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header
778  *
779  * @cid:	connection id
780  * @type:	ECORE_FILTER_XXX_PENDING
781  * @hdr:	pointer to header to setup
782  * @rule_cnt:
783  *
784  * currently we always configure one rule and echo field to contain a CID and an
785  * opcode type.
786  */
ecore_vlan_mac_set_rdata_hdr_e2(uint32_t cid,int type,struct eth_classify_header * hdr,int rule_cnt)787 static void ecore_vlan_mac_set_rdata_hdr_e2(uint32_t cid, int type, struct eth_classify_header
788 					    *hdr, int rule_cnt)
789 {
790 	hdr->echo = ECORE_CPU_TO_LE32((cid & ECORE_SWCID_MASK) |
791 				      (type << ECORE_SWCID_SHIFT));
792 	hdr->rule_cnt = (uint8_t) rule_cnt;
793 }
794 
795 /* hw_config() callbacks */
ecore_set_one_mac_e2(struct bnx2x_softc * sc,struct ecore_vlan_mac_obj * o,struct ecore_exeq_elem * elem,int rule_idx,__rte_unused int cam_offset)796 static void ecore_set_one_mac_e2(struct bnx2x_softc *sc,
797 				 struct ecore_vlan_mac_obj *o,
798 				 struct ecore_exeq_elem *elem, int rule_idx,
799 				 __rte_unused int cam_offset)
800 {
801 	struct ecore_raw_obj *raw = &o->raw;
802 	struct eth_classify_rules_ramrod_data *data =
803 	    (struct eth_classify_rules_ramrod_data *)(raw->rdata);
804 	int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd;
805 	union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
806 	bool add = (cmd == ECORE_VLAN_MAC_ADD) ? TRUE : FALSE;
807 	uint32_t *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags;
808 	uint8_t *mac = elem->cmd_data.vlan_mac.u.mac.mac;
809 
810 	/* Set LLH CAM entry: currently only iSCSI and ETH macs are
811 	 * relevant. In addition, current implementation is tuned for a
812 	 * single ETH MAC.
813 	 *
814 	 * When multiple unicast ETH MACs PF configuration in switch
815 	 * independent mode is required (NetQ, multiple netdev MACs,
816 	 * etc.), consider better utilisation of 8 per function MAC
817 	 * entries in the LLH register. There is also
818 	 * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the
819 	 * total number of CAM entries to 16.
820 	 *
821 	 * Currently we won't configure NIG for MACs other than a primary ETH
822 	 * MAC and iSCSI L2 MAC.
823 	 *
824 	 * If this MAC is moving from one Queue to another, no need to change
825 	 * NIG configuration.
826 	 */
827 	if (cmd != ECORE_VLAN_MAC_MOVE) {
828 		if (ECORE_TEST_BIT(ECORE_ISCSI_ETH_MAC, vlan_mac_flags))
829 			ecore_set_mac_in_nig(sc, add, mac,
830 					     ECORE_LLH_CAM_ISCSI_ETH_LINE);
831 		else if (ECORE_TEST_BIT(ECORE_ETH_MAC, vlan_mac_flags))
832 			ecore_set_mac_in_nig(sc, add, mac,
833 					     ECORE_LLH_CAM_ETH_LINE);
834 	}
835 
836 	/* Reset the ramrod data buffer for the first rule */
837 	if (rule_idx == 0)
838 		ECORE_MEMSET(data, 0, sizeof(*data));
839 
840 	/* Setup a command header */
841 	ecore_vlan_mac_set_cmd_hdr_e2(o, add, CLASSIFY_RULE_OPCODE_MAC,
842 				      &rule_entry->mac.header);
843 
844 	ECORE_MSG(sc, "About to %s MAC " RTE_ETHER_ADDR_PRT_FMT " for Queue %d",
845 		  (add ? "add" : "delete"), mac[0], mac[1], mac[2], mac[3],
846 		  mac[4], mac[5], raw->cl_id);
847 
848 	/* Set a MAC itself */
849 	ecore_set_fw_mac_addr(&rule_entry->mac.mac_msb,
850 			      &rule_entry->mac.mac_mid,
851 			      &rule_entry->mac.mac_lsb, mac);
852 	rule_entry->mac.inner_mac = elem->cmd_data.vlan_mac.u.mac.is_inner_mac;
853 
854 	/* MOVE: Add a rule that will add this MAC to the target Queue */
855 	if (cmd == ECORE_VLAN_MAC_MOVE) {
856 		rule_entry++;
857 		rule_cnt++;
858 
859 		/* Setup ramrod data */
860 		ecore_vlan_mac_set_cmd_hdr_e2(elem->cmd_data.
861 					      vlan_mac.target_obj, TRUE,
862 					      CLASSIFY_RULE_OPCODE_MAC,
863 					      &rule_entry->mac.header);
864 
865 		/* Set a MAC itself */
866 		ecore_set_fw_mac_addr(&rule_entry->mac.mac_msb,
867 				      &rule_entry->mac.mac_mid,
868 				      &rule_entry->mac.mac_lsb, mac);
869 		rule_entry->mac.inner_mac =
870 		    elem->cmd_data.vlan_mac.u.mac.is_inner_mac;
871 	}
872 
873 	/* Set the ramrod data header */
874 	ecore_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
875 					rule_cnt);
876 }
877 
878 /**
879  * ecore_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod
880  *
881  * @sc:		device handle
882  * @o:		queue
883  * @type:
884  * @cam_offset:	offset in cam memory
885  * @hdr:	pointer to a header to setup
886  *
887  * E1H
888  */
ecore_vlan_mac_set_rdata_hdr_e1x(struct ecore_vlan_mac_obj * o,int type,int cam_offset,struct mac_configuration_hdr * hdr)889 static void ecore_vlan_mac_set_rdata_hdr_e1x(struct ecore_vlan_mac_obj
890 					     *o, int type, int cam_offset, struct mac_configuration_hdr
891 					     *hdr)
892 {
893 	struct ecore_raw_obj *r = &o->raw;
894 
895 	hdr->length = 1;
896 	hdr->offset = (uint8_t) cam_offset;
897 	hdr->client_id = ECORE_CPU_TO_LE16(0xff);
898 	hdr->echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) |
899 				      (type << ECORE_SWCID_SHIFT));
900 }
901 
ecore_vlan_mac_set_cfg_entry_e1x(struct ecore_vlan_mac_obj * o,int add,int opcode,uint8_t * mac,uint16_t vlan_id,struct mac_configuration_entry * cfg_entry)902 static void ecore_vlan_mac_set_cfg_entry_e1x(struct ecore_vlan_mac_obj
903 					     *o, int add, int opcode,
904 					     uint8_t * mac,
905 					     uint16_t vlan_id, struct
906 					     mac_configuration_entry
907 					     *cfg_entry)
908 {
909 	struct ecore_raw_obj *r = &o->raw;
910 	uint32_t cl_bit_vec = (1 << r->cl_id);
911 
912 	cfg_entry->clients_bit_vector = ECORE_CPU_TO_LE32(cl_bit_vec);
913 	cfg_entry->pf_id = r->func_id;
914 	cfg_entry->vlan_id = ECORE_CPU_TO_LE16(vlan_id);
915 
916 	if (add) {
917 		ECORE_SET_FLAG(cfg_entry->flags,
918 			       MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
919 			       T_ETH_MAC_COMMAND_SET);
920 		ECORE_SET_FLAG(cfg_entry->flags,
921 			       MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE,
922 			       opcode);
923 
924 		/* Set a MAC in a ramrod data */
925 		ecore_set_fw_mac_addr(&cfg_entry->msb_mac_addr,
926 				      &cfg_entry->middle_mac_addr,
927 				      &cfg_entry->lsb_mac_addr, mac);
928 	} else
929 		ECORE_SET_FLAG(cfg_entry->flags,
930 			       MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
931 			       T_ETH_MAC_COMMAND_INVALIDATE);
932 }
933 
ecore_vlan_mac_set_rdata_e1x(struct bnx2x_softc * sc __rte_unused,struct ecore_vlan_mac_obj * o,int type,int cam_offset,int add,uint8_t * mac,uint16_t vlan_id,int opcode,struct mac_configuration_cmd * config)934 static void ecore_vlan_mac_set_rdata_e1x(struct bnx2x_softc *sc
935 					 __rte_unused,
936 					 struct ecore_vlan_mac_obj *o,
937 					 int type, int cam_offset,
938 					 int add, uint8_t * mac,
939 					 uint16_t vlan_id, int opcode,
940 					 struct mac_configuration_cmd
941 					 *config)
942 {
943 	struct mac_configuration_entry *cfg_entry = &config->config_table[0];
944 
945 	ecore_vlan_mac_set_rdata_hdr_e1x(o, type, cam_offset, &config->hdr);
946 	ecore_vlan_mac_set_cfg_entry_e1x(o, add, opcode, mac, vlan_id,
947 					 cfg_entry);
948 
949 	ECORE_MSG(sc, "%s  MAC " RTE_ETHER_ADDR_PRT_FMT " CLID %d CAM offset %d",
950 		  (add ? "setting" : "clearing"),
951 		  mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
952 		  o->raw.cl_id, cam_offset);
953 }
954 
955 /**
956  * ecore_set_one_mac_e1x - fill a single MAC rule ramrod data
957  *
958  * @sc:		device handle
959  * @o:		ecore_vlan_mac_obj
960  * @elem:	ecore_exeq_elem
961  * @rule_idx:	rule_idx
962  * @cam_offset: cam_offset
963  */
ecore_set_one_mac_e1x(struct bnx2x_softc * sc,struct ecore_vlan_mac_obj * o,struct ecore_exeq_elem * elem,__rte_unused int rule_idx,int cam_offset)964 static void ecore_set_one_mac_e1x(struct bnx2x_softc *sc,
965 				  struct ecore_vlan_mac_obj *o,
966 				  struct ecore_exeq_elem *elem,
967 				  __rte_unused int rule_idx, int cam_offset)
968 {
969 	struct ecore_raw_obj *raw = &o->raw;
970 	struct mac_configuration_cmd *config =
971 	    (struct mac_configuration_cmd *)(raw->rdata);
972 	/* 57711 do not support MOVE command,
973 	 * so it's either ADD or DEL
974 	 */
975 	int add = (elem->cmd_data.vlan_mac.cmd == ECORE_VLAN_MAC_ADD) ?
976 	    TRUE : FALSE;
977 
978 	/* Reset the ramrod data buffer */
979 	ECORE_MEMSET(config, 0, sizeof(*config));
980 
981 	ecore_vlan_mac_set_rdata_e1x(sc, o, raw->state,
982 				     cam_offset, add,
983 				     elem->cmd_data.vlan_mac.u.mac.mac, 0,
984 				     ETH_VLAN_FILTER_ANY_VLAN, config);
985 }
986 
987 /**
988  * ecore_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
989  *
990  * @sc:		device handle
991  * @p:		command parameters
992  * @ppos:	pointer to the cookie
993  *
994  * reconfigure next MAC/VLAN/VLAN-MAC element from the
995  * previously configured elements list.
996  *
997  * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is	taken
998  * into an account
999  *
1000  * pointer to the cookie  - that should be given back in the next call to make
1001  * function handle the next element. If *ppos is set to NULL it will restart the
1002  * iterator. If returned *ppos == NULL this means that the last element has been
1003  * handled.
1004  *
1005  */
ecore_vlan_mac_restore(struct bnx2x_softc * sc,struct ecore_vlan_mac_ramrod_params * p,struct ecore_vlan_mac_registry_elem ** ppos)1006 static int ecore_vlan_mac_restore(struct bnx2x_softc *sc,
1007 				  struct ecore_vlan_mac_ramrod_params *p,
1008 				  struct ecore_vlan_mac_registry_elem **ppos)
1009 {
1010 	struct ecore_vlan_mac_registry_elem *pos;
1011 	struct ecore_vlan_mac_obj *o = p->vlan_mac_obj;
1012 
1013 	/* If list is empty - there is nothing to do here */
1014 	if (ECORE_LIST_IS_EMPTY(&o->head)) {
1015 		*ppos = NULL;
1016 		return 0;
1017 	}
1018 
1019 	/* make a step... */
1020 	if (*ppos == NULL)
1021 		*ppos = ECORE_LIST_FIRST_ENTRY(&o->head, struct
1022 					       ecore_vlan_mac_registry_elem,
1023 					       link);
1024 	else
1025 		*ppos = ECORE_LIST_NEXT(*ppos, link,
1026 					struct ecore_vlan_mac_registry_elem);
1027 
1028 	pos = *ppos;
1029 
1030 	/* If it's the last step - return NULL */
1031 	if (ECORE_LIST_IS_LAST(&pos->link, &o->head))
1032 		*ppos = NULL;
1033 
1034 	/* Prepare a 'user_req' */
1035 	ECORE_MEMCPY(&p->user_req.u, &pos->u, sizeof(pos->u));
1036 
1037 	/* Set the command */
1038 	p->user_req.cmd = ECORE_VLAN_MAC_ADD;
1039 
1040 	/* Set vlan_mac_flags */
1041 	p->user_req.vlan_mac_flags = pos->vlan_mac_flags;
1042 
1043 	/* Set a restore bit */
1044 	ECORE_SET_BIT_NA(RAMROD_RESTORE, &p->ramrod_flags);
1045 
1046 	return ecore_config_vlan_mac(sc, p);
1047 }
1048 
1049 /* ecore_exeq_get_mac/ecore_exeq_get_vlan/ecore_exeq_get_vlan_mac return a
1050  * pointer to an element with a specific criteria and NULL if such an element
1051  * hasn't been found.
1052  */
ecore_exeq_get_mac(struct ecore_exe_queue_obj * o,struct ecore_exeq_elem * elem)1053 static struct ecore_exeq_elem *ecore_exeq_get_mac(struct ecore_exe_queue_obj *o,
1054 						  struct ecore_exeq_elem *elem)
1055 {
1056 	struct ecore_exeq_elem *pos;
1057 	struct ecore_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac;
1058 
1059 	/* Check pending for execution commands */
1060 	ECORE_LIST_FOR_EACH_ENTRY(pos, &o->exe_queue, link,
1061 				  struct ecore_exeq_elem)
1062 	if (!ECORE_MEMCMP(&pos->cmd_data.vlan_mac.u.mac, data,
1063 			  sizeof(*data)) &&
1064 	    (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1065 		return pos;
1066 
1067 	return NULL;
1068 }
1069 
1070 /**
1071  * ecore_validate_vlan_mac_add - check if an ADD command can be executed
1072  *
1073  * @sc:		device handle
1074  * @qo:		ecore_qable_obj
1075  * @elem:	ecore_exeq_elem
1076  *
1077  * Checks that the requested configuration can be added. If yes and if
1078  * requested, consume CAM credit.
1079  *
1080  * The 'validate' is run after the 'optimize'.
1081  *
1082  */
ecore_validate_vlan_mac_add(struct bnx2x_softc * sc,union ecore_qable_obj * qo,struct ecore_exeq_elem * elem)1083 static int ecore_validate_vlan_mac_add(struct bnx2x_softc *sc,
1084 				       union ecore_qable_obj *qo,
1085 				       struct ecore_exeq_elem *elem)
1086 {
1087 	struct ecore_vlan_mac_obj *o = &qo->vlan_mac;
1088 	struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1089 	int rc;
1090 
1091 	/* Check the registry */
1092 	rc = o->check_add(sc, o, &elem->cmd_data.vlan_mac.u);
1093 	if (rc) {
1094 		ECORE_MSG(sc,
1095 			  "ADD command is not allowed considering current registry state.");
1096 		return rc;
1097 	}
1098 
1099 	/* Check if there is a pending ADD command for this
1100 	 * MAC/VLAN/VLAN-MAC. Return an error if there is.
1101 	 */
1102 	if (exeq->get(exeq, elem)) {
1103 		ECORE_MSG(sc, "There is a pending ADD command already");
1104 		return ECORE_EXISTS;
1105 	}
1106 
1107 	/* Consume the credit if not requested not to */
1108 	if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1109 			     &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1110 	      o->get_credit(o)))
1111 		return ECORE_INVAL;
1112 
1113 	return ECORE_SUCCESS;
1114 }
1115 
1116 /**
1117  * ecore_validate_vlan_mac_del - check if the DEL command can be executed
1118  *
1119  * @sc:		device handle
1120  * @qo:		quable object to check
1121  * @elem:	element that needs to be deleted
1122  *
1123  * Checks that the requested configuration can be deleted. If yes and if
1124  * requested, returns a CAM credit.
1125  *
1126  * The 'validate' is run after the 'optimize'.
1127  */
ecore_validate_vlan_mac_del(struct bnx2x_softc * sc,union ecore_qable_obj * qo,struct ecore_exeq_elem * elem)1128 static int ecore_validate_vlan_mac_del(struct bnx2x_softc *sc,
1129 				       union ecore_qable_obj *qo,
1130 				       struct ecore_exeq_elem *elem)
1131 {
1132 	struct ecore_vlan_mac_obj *o = &qo->vlan_mac;
1133 	struct ecore_vlan_mac_registry_elem *pos;
1134 	struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1135 	struct ecore_exeq_elem query_elem;
1136 
1137 	/* If this classification can not be deleted (doesn't exist)
1138 	 * - return a ECORE_EXIST.
1139 	 */
1140 	pos = o->check_del(sc, o, &elem->cmd_data.vlan_mac.u);
1141 	if (!pos) {
1142 		ECORE_MSG(sc,
1143 			  "DEL command is not allowed considering current registry state");
1144 		return ECORE_EXISTS;
1145 	}
1146 
1147 	/* Check if there are pending DEL or MOVE commands for this
1148 	 * MAC/VLAN/VLAN-MAC. Return an error if so.
1149 	 */
1150 	ECORE_MEMCPY(&query_elem, elem, sizeof(query_elem));
1151 
1152 	/* Check for MOVE commands */
1153 	query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_MOVE;
1154 	if (exeq->get(exeq, &query_elem)) {
1155 		PMD_DRV_LOG(ERR, sc, "There is a pending MOVE command already");
1156 		return ECORE_INVAL;
1157 	}
1158 
1159 	/* Check for DEL commands */
1160 	if (exeq->get(exeq, elem)) {
1161 		ECORE_MSG(sc, "There is a pending DEL command already");
1162 		return ECORE_EXISTS;
1163 	}
1164 
1165 	/* Return the credit to the credit pool if not requested not to */
1166 	if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1167 			     &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1168 	      o->put_credit(o))) {
1169 		PMD_DRV_LOG(ERR, sc, "Failed to return a credit");
1170 		return ECORE_INVAL;
1171 	}
1172 
1173 	return ECORE_SUCCESS;
1174 }
1175 
1176 /**
1177  * ecore_validate_vlan_mac_move - check if the MOVE command can be executed
1178  *
1179  * @sc:		device handle
1180  * @qo:		quable object to check (source)
1181  * @elem:	element that needs to be moved
1182  *
1183  * Checks that the requested configuration can be moved. If yes and if
1184  * requested, returns a CAM credit.
1185  *
1186  * The 'validate' is run after the 'optimize'.
1187  */
ecore_validate_vlan_mac_move(struct bnx2x_softc * sc,union ecore_qable_obj * qo,struct ecore_exeq_elem * elem)1188 static int ecore_validate_vlan_mac_move(struct bnx2x_softc *sc,
1189 					union ecore_qable_obj *qo,
1190 					struct ecore_exeq_elem *elem)
1191 {
1192 	struct ecore_vlan_mac_obj *src_o = &qo->vlan_mac;
1193 	struct ecore_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj;
1194 	struct ecore_exeq_elem query_elem;
1195 	struct ecore_exe_queue_obj *src_exeq = &src_o->exe_queue;
1196 	struct ecore_exe_queue_obj *dest_exeq = &dest_o->exe_queue;
1197 
1198 	/* Check if we can perform this operation based on the current registry
1199 	 * state.
1200 	 */
1201 	if (!src_o->check_move(sc, src_o, dest_o, &elem->cmd_data.vlan_mac.u)) {
1202 		ECORE_MSG(sc,
1203 			  "MOVE command is not allowed considering current registry state");
1204 		return ECORE_INVAL;
1205 	}
1206 
1207 	/* Check if there is an already pending DEL or MOVE command for the
1208 	 * source object or ADD command for a destination object. Return an
1209 	 * error if so.
1210 	 */
1211 	ECORE_MEMCPY(&query_elem, elem, sizeof(query_elem));
1212 
1213 	/* Check DEL on source */
1214 	query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_DEL;
1215 	if (src_exeq->get(src_exeq, &query_elem)) {
1216 		PMD_DRV_LOG(ERR, sc,
1217 			    "There is a pending DEL command on the source queue already");
1218 		return ECORE_INVAL;
1219 	}
1220 
1221 	/* Check MOVE on source */
1222 	if (src_exeq->get(src_exeq, elem)) {
1223 		ECORE_MSG(sc, "There is a pending MOVE command already");
1224 		return ECORE_EXISTS;
1225 	}
1226 
1227 	/* Check ADD on destination */
1228 	query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_ADD;
1229 	if (dest_exeq->get(dest_exeq, &query_elem)) {
1230 		PMD_DRV_LOG(ERR, sc,
1231 			    "There is a pending ADD command on the destination queue already");
1232 		return ECORE_INVAL;
1233 	}
1234 
1235 	/* Consume the credit if not requested not to */
1236 	if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT_DEST,
1237 			     &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1238 	      dest_o->get_credit(dest_o)))
1239 		return ECORE_INVAL;
1240 
1241 	if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1242 			     &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1243 	      src_o->put_credit(src_o))) {
1244 		/* return the credit taken from dest... */
1245 		dest_o->put_credit(dest_o);
1246 		return ECORE_INVAL;
1247 	}
1248 
1249 	return ECORE_SUCCESS;
1250 }
1251 
ecore_validate_vlan_mac(struct bnx2x_softc * sc,union ecore_qable_obj * qo,struct ecore_exeq_elem * elem)1252 static int ecore_validate_vlan_mac(struct bnx2x_softc *sc,
1253 				   union ecore_qable_obj *qo,
1254 				   struct ecore_exeq_elem *elem)
1255 {
1256 	switch (elem->cmd_data.vlan_mac.cmd) {
1257 	case ECORE_VLAN_MAC_ADD:
1258 		return ecore_validate_vlan_mac_add(sc, qo, elem);
1259 	case ECORE_VLAN_MAC_DEL:
1260 		return ecore_validate_vlan_mac_del(sc, qo, elem);
1261 	case ECORE_VLAN_MAC_MOVE:
1262 		return ecore_validate_vlan_mac_move(sc, qo, elem);
1263 	default:
1264 		return ECORE_INVAL;
1265 	}
1266 }
1267 
ecore_remove_vlan_mac(__rte_unused struct bnx2x_softc * sc,union ecore_qable_obj * qo,struct ecore_exeq_elem * elem)1268 static int ecore_remove_vlan_mac(__rte_unused struct bnx2x_softc *sc,
1269 				 union ecore_qable_obj *qo,
1270 				 struct ecore_exeq_elem *elem)
1271 {
1272 	int rc = 0;
1273 
1274 	/* If consumption wasn't required, nothing to do */
1275 	if (ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1276 			   &elem->cmd_data.vlan_mac.vlan_mac_flags))
1277 		return ECORE_SUCCESS;
1278 
1279 	switch (elem->cmd_data.vlan_mac.cmd) {
1280 	case ECORE_VLAN_MAC_ADD:
1281 	case ECORE_VLAN_MAC_MOVE:
1282 		rc = qo->vlan_mac.put_credit(&qo->vlan_mac);
1283 		break;
1284 	case ECORE_VLAN_MAC_DEL:
1285 		rc = qo->vlan_mac.get_credit(&qo->vlan_mac);
1286 		break;
1287 	default:
1288 		return ECORE_INVAL;
1289 	}
1290 
1291 	if (rc != TRUE)
1292 		return ECORE_INVAL;
1293 
1294 	return ECORE_SUCCESS;
1295 }
1296 
1297 /**
1298  * ecore_wait_vlan_mac - passively wait for 5 seconds until all work completes.
1299  *
1300  * @sc:		device handle
1301  * @o:		ecore_vlan_mac_obj
1302  *
1303  */
ecore_wait_vlan_mac(struct bnx2x_softc * sc,struct ecore_vlan_mac_obj * o)1304 static int ecore_wait_vlan_mac(struct bnx2x_softc *sc,
1305 			       struct ecore_vlan_mac_obj *o)
1306 {
1307 	int cnt = 5000, rc;
1308 	struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1309 	struct ecore_raw_obj *raw = &o->raw;
1310 
1311 	while (cnt--) {
1312 		/* Wait for the current command to complete */
1313 		rc = raw->wait_comp(sc, raw);
1314 		if (rc)
1315 			return rc;
1316 
1317 		/* Wait until there are no pending commands */
1318 		if (!ecore_exe_queue_empty(exeq))
1319 			ECORE_WAIT(sc, 1000);
1320 		else
1321 			return ECORE_SUCCESS;
1322 	}
1323 
1324 	return ECORE_TIMEOUT;
1325 }
1326 
__ecore_vlan_mac_execute_step(struct bnx2x_softc * sc,struct ecore_vlan_mac_obj * o,uint32_t * ramrod_flags)1327 static int __ecore_vlan_mac_execute_step(struct bnx2x_softc *sc,
1328 					 struct ecore_vlan_mac_obj *o,
1329 					 uint32_t *ramrod_flags)
1330 {
1331 	int rc = ECORE_SUCCESS;
1332 
1333 	ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
1334 
1335 	ECORE_MSG(sc, "vlan_mac_execute_step - trying to take writer lock");
1336 	rc = __ecore_vlan_mac_h_write_trylock(sc, o);
1337 
1338 	if (rc != ECORE_SUCCESS) {
1339 		__ecore_vlan_mac_h_pend(sc, o, *ramrod_flags);
1340 
1341 		/** Calling function should not differentiate between this case
1342 		 *  and the case in which there is already a pending ramrod
1343 		 */
1344 		rc = ECORE_PENDING;
1345 	} else {
1346 		rc = ecore_exe_queue_step(sc, &o->exe_queue, ramrod_flags);
1347 	}
1348 	ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
1349 
1350 	return rc;
1351 }
1352 
1353 /**
1354  * ecore_complete_vlan_mac - complete one VLAN-MAC ramrod
1355  *
1356  * @sc:		device handle
1357  * @o:		ecore_vlan_mac_obj
1358  * @cqe:
1359  * @cont:	if TRUE schedule next execution chunk
1360  *
1361  */
ecore_complete_vlan_mac(struct bnx2x_softc * sc,struct ecore_vlan_mac_obj * o,union event_ring_elem * cqe,uint32_t * ramrod_flags)1362 static int ecore_complete_vlan_mac(struct bnx2x_softc *sc,
1363 				   struct ecore_vlan_mac_obj *o,
1364 				   union event_ring_elem *cqe,
1365 				   uint32_t *ramrod_flags)
1366 {
1367 	struct ecore_raw_obj *r = &o->raw;
1368 	int rc;
1369 
1370 	/* Reset pending list */
1371 	ecore_exe_queue_reset_pending(sc, &o->exe_queue);
1372 
1373 	/* Clear pending */
1374 	r->clear_pending(r);
1375 
1376 	/* If ramrod failed this is most likely a SW bug */
1377 	if (cqe->message.error)
1378 		return ECORE_INVAL;
1379 
1380 	/* Run the next bulk of pending commands if requested */
1381 	if (ECORE_TEST_BIT(RAMROD_CONT, ramrod_flags)) {
1382 		rc = __ecore_vlan_mac_execute_step(sc, o, ramrod_flags);
1383 		if (rc < 0)
1384 			return rc;
1385 	}
1386 
1387 	/* If there is more work to do return PENDING */
1388 	if (!ecore_exe_queue_empty(&o->exe_queue))
1389 		return ECORE_PENDING;
1390 
1391 	return ECORE_SUCCESS;
1392 }
1393 
1394 /**
1395  * ecore_optimize_vlan_mac - optimize ADD and DEL commands.
1396  *
1397  * @sc:		device handle
1398  * @o:		ecore_qable_obj
1399  * @elem:	ecore_exeq_elem
1400  */
ecore_optimize_vlan_mac(struct bnx2x_softc * sc,union ecore_qable_obj * qo,struct ecore_exeq_elem * elem)1401 static int ecore_optimize_vlan_mac(struct bnx2x_softc *sc,
1402 				   union ecore_qable_obj *qo,
1403 				   struct ecore_exeq_elem *elem)
1404 {
1405 	struct ecore_exeq_elem query, *pos;
1406 	struct ecore_vlan_mac_obj *o = &qo->vlan_mac;
1407 	struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1408 
1409 	ECORE_MEMCPY(&query, elem, sizeof(query));
1410 
1411 	switch (elem->cmd_data.vlan_mac.cmd) {
1412 	case ECORE_VLAN_MAC_ADD:
1413 		query.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_DEL;
1414 		break;
1415 	case ECORE_VLAN_MAC_DEL:
1416 		query.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_ADD;
1417 		break;
1418 	default:
1419 		/* Don't handle anything other than ADD or DEL */
1420 		return 0;
1421 	}
1422 
1423 	/* If we found the appropriate element - delete it */
1424 	pos = exeq->get(exeq, &query);
1425 	if (pos) {
1426 
1427 		/* Return the credit of the optimized command */
1428 		if (!ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1429 				    &pos->cmd_data.vlan_mac.vlan_mac_flags)) {
1430 			if ((query.cmd_data.vlan_mac.cmd ==
1431 			     ECORE_VLAN_MAC_ADD) && !o->put_credit(o)) {
1432 				PMD_DRV_LOG(ERR, sc,
1433 					    "Failed to return the credit for the optimized ADD command");
1434 				return ECORE_INVAL;
1435 			} else if (!o->get_credit(o)) {	/* VLAN_MAC_DEL */
1436 				PMD_DRV_LOG(ERR, sc,
1437 					    "Failed to recover the credit from the optimized DEL command");
1438 				return ECORE_INVAL;
1439 			}
1440 		}
1441 
1442 		ECORE_MSG(sc, "Optimizing %s command",
1443 			  (elem->cmd_data.vlan_mac.cmd == ECORE_VLAN_MAC_ADD) ?
1444 			  "ADD" : "DEL");
1445 
1446 		ECORE_LIST_REMOVE_ENTRY(&pos->link, &exeq->exe_queue);
1447 		ecore_exe_queue_free_elem(sc, pos);
1448 		return 1;
1449 	}
1450 
1451 	return 0;
1452 }
1453 
1454 /**
1455  * ecore_vlan_mac_get_registry_elem - prepare a registry element
1456  *
1457  * @sc:	  device handle
1458  * @o:
1459  * @elem:
1460  * @restore:
1461  * @re:
1462  *
1463  * prepare a registry element according to the current command request.
1464  */
ecore_vlan_mac_get_registry_elem(struct bnx2x_softc * sc,struct ecore_vlan_mac_obj * o,struct ecore_exeq_elem * elem,int restore,struct ecore_vlan_mac_registry_elem ** re)1465 static int ecore_vlan_mac_get_registry_elem(struct bnx2x_softc *sc,
1466 					    struct ecore_vlan_mac_obj *o,
1467 					    struct ecore_exeq_elem *elem,
1468 					    int restore, struct
1469 					    ecore_vlan_mac_registry_elem
1470 					    **re)
1471 {
1472 	enum ecore_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
1473 	struct ecore_vlan_mac_registry_elem *reg_elem;
1474 
1475 	/* Allocate a new registry element if needed. */
1476 	if (!restore &&
1477 	    ((cmd == ECORE_VLAN_MAC_ADD) || (cmd == ECORE_VLAN_MAC_MOVE))) {
1478 		reg_elem = ECORE_ZALLOC(sizeof(*reg_elem), GFP_ATOMIC, sc);
1479 		if (!reg_elem)
1480 			return ECORE_NOMEM;
1481 
1482 		/* Get a new CAM offset */
1483 		if (!o->get_cam_offset(o, &reg_elem->cam_offset)) {
1484 			/* This shall never happen, because we have checked the
1485 			 * CAM availability in the 'validate'.
1486 			 */
1487 			ECORE_DBG_BREAK_IF(1);
1488 			ECORE_FREE(sc, reg_elem, sizeof(*reg_elem));
1489 			return ECORE_INVAL;
1490 		}
1491 
1492 		ECORE_MSG(sc, "Got cam offset %d", reg_elem->cam_offset);
1493 
1494 		/* Set a VLAN-MAC data */
1495 		ECORE_MEMCPY(&reg_elem->u, &elem->cmd_data.vlan_mac.u,
1496 			     sizeof(reg_elem->u));
1497 
1498 		/* Copy the flags (needed for DEL and RESTORE flows) */
1499 		reg_elem->vlan_mac_flags =
1500 		    elem->cmd_data.vlan_mac.vlan_mac_flags;
1501 	} else			/* DEL, RESTORE */
1502 		reg_elem = o->check_del(sc, o, &elem->cmd_data.vlan_mac.u);
1503 
1504 	*re = reg_elem;
1505 	return ECORE_SUCCESS;
1506 }
1507 
1508 /**
1509  * ecore_execute_vlan_mac - execute vlan mac command
1510  *
1511  * @sc:			device handle
1512  * @qo:
1513  * @exe_chunk:
1514  * @ramrod_flags:
1515  *
1516  * go and send a ramrod!
1517  */
ecore_execute_vlan_mac(struct bnx2x_softc * sc,union ecore_qable_obj * qo,ecore_list_t * exe_chunk,uint32_t * ramrod_flags)1518 static int ecore_execute_vlan_mac(struct bnx2x_softc *sc,
1519 				  union ecore_qable_obj *qo,
1520 				  ecore_list_t * exe_chunk,
1521 				  uint32_t *ramrod_flags)
1522 {
1523 	struct ecore_exeq_elem *elem;
1524 	struct ecore_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj;
1525 	struct ecore_raw_obj *r = &o->raw;
1526 	int rc, idx = 0;
1527 	int restore = ECORE_TEST_BIT(RAMROD_RESTORE, ramrod_flags);
1528 	int drv_only = ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags);
1529 	struct ecore_vlan_mac_registry_elem *reg_elem;
1530 	enum ecore_vlan_mac_cmd cmd;
1531 
1532 	/* If DRIVER_ONLY execution is requested, cleanup a registry
1533 	 * and exit. Otherwise send a ramrod to FW.
1534 	 */
1535 	if (!drv_only) {
1536 
1537 		/* Set pending */
1538 		r->set_pending(r);
1539 
1540 		/* Fill the ramrod data */
1541 		ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link,
1542 					  struct ecore_exeq_elem) {
1543 			cmd = elem->cmd_data.vlan_mac.cmd;
1544 			/* We will add to the target object in MOVE command, so
1545 			 * change the object for a CAM search.
1546 			 */
1547 			if (cmd == ECORE_VLAN_MAC_MOVE)
1548 				cam_obj = elem->cmd_data.vlan_mac.target_obj;
1549 			else
1550 				cam_obj = o;
1551 
1552 			rc = ecore_vlan_mac_get_registry_elem(sc, cam_obj,
1553 							      elem, restore,
1554 							      &reg_elem);
1555 			if (rc)
1556 				goto error_exit;
1557 
1558 			ECORE_DBG_BREAK_IF(!reg_elem);
1559 
1560 			/* Push a new entry into the registry */
1561 			if (!restore &&
1562 			    ((cmd == ECORE_VLAN_MAC_ADD) ||
1563 			     (cmd == ECORE_VLAN_MAC_MOVE)))
1564 				ECORE_LIST_PUSH_HEAD(&reg_elem->link,
1565 						     &cam_obj->head);
1566 
1567 			/* Configure a single command in a ramrod data buffer */
1568 			o->set_one_rule(sc, o, elem, idx, reg_elem->cam_offset);
1569 
1570 			/* MOVE command consumes 2 entries in the ramrod data */
1571 			if (cmd == ECORE_VLAN_MAC_MOVE)
1572 				idx += 2;
1573 			else
1574 				idx++;
1575 		}
1576 
1577 		/*
1578 		 *  No need for an explicit memory barrier here as long we would
1579 		 *  need to ensure the ordering of writing to the SPQ element
1580 		 *  and updating of the SPQ producer which involves a memory
1581 		 *  read and we will have to put a full memory barrier there
1582 		 *  (inside ecore_sp_post()).
1583 		 */
1584 
1585 		rc = ecore_sp_post(sc, o->ramrod_cmd, r->cid,
1586 				   r->rdata_mapping, ETH_CONNECTION_TYPE);
1587 		if (rc)
1588 			goto error_exit;
1589 	}
1590 
1591 	/* Now, when we are done with the ramrod - clean up the registry */
1592 	ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link, struct ecore_exeq_elem) {
1593 		cmd = elem->cmd_data.vlan_mac.cmd;
1594 		if ((cmd == ECORE_VLAN_MAC_DEL) || (cmd == ECORE_VLAN_MAC_MOVE)) {
1595 			reg_elem = o->check_del(sc, o,
1596 						&elem->cmd_data.vlan_mac.u);
1597 
1598 			ECORE_DBG_BREAK_IF(!reg_elem);
1599 
1600 			o->put_cam_offset(o, reg_elem->cam_offset);
1601 			ECORE_LIST_REMOVE_ENTRY(&reg_elem->link, &o->head);
1602 			ECORE_FREE(sc, reg_elem, sizeof(*reg_elem));
1603 		}
1604 	}
1605 
1606 	if (!drv_only)
1607 		return ECORE_PENDING;
1608 	else
1609 		return ECORE_SUCCESS;
1610 
1611 error_exit:
1612 	r->clear_pending(r);
1613 
1614 	/* Cleanup a registry in case of a failure */
1615 	ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link, struct ecore_exeq_elem) {
1616 		cmd = elem->cmd_data.vlan_mac.cmd;
1617 
1618 		if (cmd == ECORE_VLAN_MAC_MOVE)
1619 			cam_obj = elem->cmd_data.vlan_mac.target_obj;
1620 		else
1621 			cam_obj = o;
1622 
1623 		/* Delete all newly added above entries */
1624 		if (!restore &&
1625 		    ((cmd == ECORE_VLAN_MAC_ADD) ||
1626 		     (cmd == ECORE_VLAN_MAC_MOVE))) {
1627 			reg_elem = o->check_del(sc, cam_obj,
1628 						&elem->cmd_data.vlan_mac.u);
1629 			if (reg_elem) {
1630 				ECORE_LIST_REMOVE_ENTRY(&reg_elem->link,
1631 							&cam_obj->head);
1632 				ECORE_FREE(sc, reg_elem, sizeof(*reg_elem));
1633 			}
1634 		}
1635 	}
1636 
1637 	return rc;
1638 }
1639 
ecore_vlan_mac_push_new_cmd(struct bnx2x_softc * sc,struct ecore_vlan_mac_ramrod_params * p)1640 static int ecore_vlan_mac_push_new_cmd(struct bnx2x_softc *sc, struct
1641 				       ecore_vlan_mac_ramrod_params *p)
1642 {
1643 	struct ecore_exeq_elem *elem;
1644 	struct ecore_vlan_mac_obj *o = p->vlan_mac_obj;
1645 	int restore = ECORE_TEST_BIT(RAMROD_RESTORE, &p->ramrod_flags);
1646 
1647 	/* Allocate the execution queue element */
1648 	elem = ecore_exe_queue_alloc_elem(sc);
1649 	if (!elem)
1650 		return ECORE_NOMEM;
1651 
1652 	/* Set the command 'length' */
1653 	switch (p->user_req.cmd) {
1654 	case ECORE_VLAN_MAC_MOVE:
1655 		elem->cmd_len = 2;
1656 		break;
1657 	default:
1658 		elem->cmd_len = 1;
1659 	}
1660 
1661 	/* Fill the object specific info */
1662 	ECORE_MEMCPY(&elem->cmd_data.vlan_mac, &p->user_req,
1663 		     sizeof(p->user_req));
1664 
1665 	/* Try to add a new command to the pending list */
1666 	return ecore_exe_queue_add(sc, &o->exe_queue, elem, restore);
1667 }
1668 
1669 /**
1670  * ecore_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules.
1671  *
1672  * @sc:	  device handle
1673  * @p:
1674  *
1675  */
ecore_config_vlan_mac(struct bnx2x_softc * sc,struct ecore_vlan_mac_ramrod_params * p)1676 int ecore_config_vlan_mac(struct bnx2x_softc *sc,
1677 			  struct ecore_vlan_mac_ramrod_params *p)
1678 {
1679 	int rc = ECORE_SUCCESS;
1680 	struct ecore_vlan_mac_obj *o = p->vlan_mac_obj;
1681 	uint32_t *ramrod_flags = &p->ramrod_flags;
1682 	int cont = ECORE_TEST_BIT(RAMROD_CONT, ramrod_flags);
1683 	struct ecore_raw_obj *raw = &o->raw;
1684 
1685 	/*
1686 	 * Add new elements to the execution list for commands that require it.
1687 	 */
1688 	if (!cont) {
1689 		rc = ecore_vlan_mac_push_new_cmd(sc, p);
1690 		if (rc)
1691 			return rc;
1692 	}
1693 
1694 	/* If nothing will be executed further in this iteration we want to
1695 	 * return PENDING if there are pending commands
1696 	 */
1697 	if (!ecore_exe_queue_empty(&o->exe_queue))
1698 		rc = ECORE_PENDING;
1699 
1700 	if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
1701 		ECORE_MSG(sc,
1702 			  "RAMROD_DRV_CLR_ONLY requested: clearing a pending bit.");
1703 		raw->clear_pending(raw);
1704 	}
1705 
1706 	/* Execute commands if required */
1707 	if (cont || ECORE_TEST_BIT(RAMROD_EXEC, ramrod_flags) ||
1708 	    ECORE_TEST_BIT(RAMROD_COMP_WAIT, ramrod_flags)) {
1709 		rc = __ecore_vlan_mac_execute_step(sc, p->vlan_mac_obj,
1710 						   &p->ramrod_flags);
1711 		if (rc < 0)
1712 			return rc;
1713 	}
1714 
1715 	/* RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set
1716 	 * then user want to wait until the last command is done.
1717 	 */
1718 	if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
1719 		/* Wait maximum for the current exe_queue length iterations plus
1720 		 * one (for the current pending command).
1721 		 */
1722 		int max_iterations = ecore_exe_queue_length(&o->exe_queue) + 1;
1723 
1724 		while (!ecore_exe_queue_empty(&o->exe_queue) &&
1725 		       max_iterations--) {
1726 
1727 			/* Wait for the current command to complete */
1728 			rc = raw->wait_comp(sc, raw);
1729 			if (rc)
1730 				return rc;
1731 
1732 			/* Make a next step */
1733 			rc = __ecore_vlan_mac_execute_step(sc,
1734 							   p->vlan_mac_obj,
1735 							   &p->ramrod_flags);
1736 			if (rc < 0)
1737 				return rc;
1738 		}
1739 
1740 		return ECORE_SUCCESS;
1741 	}
1742 
1743 	return rc;
1744 }
1745 
1746 /**
1747  * ecore_vlan_mac_del_all - delete elements with given vlan_mac_flags spec
1748  *
1749  * @sc:			device handle
1750  * @o:
1751  * @vlan_mac_flags:
1752  * @ramrod_flags:	execution flags to be used for this deletion
1753  *
1754  * if the last operation has completed successfully and there are no
1755  * more elements left, positive value if the last operation has completed
1756  * successfully and there are more previously configured elements, negative
1757  * value is current operation has failed.
1758  */
ecore_vlan_mac_del_all(struct bnx2x_softc * sc,struct ecore_vlan_mac_obj * o,uint32_t * vlan_mac_flags,uint32_t * ramrod_flags)1759 static int ecore_vlan_mac_del_all(struct bnx2x_softc *sc,
1760 				  struct ecore_vlan_mac_obj *o,
1761 				  uint32_t *vlan_mac_flags,
1762 				  uint32_t *ramrod_flags)
1763 {
1764 	struct ecore_vlan_mac_registry_elem *pos = NULL;
1765 	int rc = 0, read_lock;
1766 	struct ecore_vlan_mac_ramrod_params p;
1767 	struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1768 	struct ecore_exeq_elem *exeq_pos, *exeq_pos_n;
1769 
1770 	/* Clear pending commands first */
1771 
1772 	ECORE_SPIN_LOCK_BH(&exeq->lock);
1773 
1774 	ECORE_LIST_FOR_EACH_ENTRY_SAFE(exeq_pos, exeq_pos_n,
1775 				       &exeq->exe_queue, link,
1776 				       struct ecore_exeq_elem) {
1777 		if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags ==
1778 		    *vlan_mac_flags) {
1779 			rc = exeq->remove(sc, exeq->owner, exeq_pos);
1780 			if (rc) {
1781 				PMD_DRV_LOG(ERR, sc, "Failed to remove command");
1782 				ECORE_SPIN_UNLOCK_BH(&exeq->lock);
1783 				return rc;
1784 			}
1785 			ECORE_LIST_REMOVE_ENTRY(&exeq_pos->link,
1786 						&exeq->exe_queue);
1787 			ecore_exe_queue_free_elem(sc, exeq_pos);
1788 		}
1789 	}
1790 
1791 	ECORE_SPIN_UNLOCK_BH(&exeq->lock);
1792 
1793 	/* Prepare a command request */
1794 	ECORE_MEMSET(&p, 0, sizeof(p));
1795 	p.vlan_mac_obj = o;
1796 	p.ramrod_flags = *ramrod_flags;
1797 	p.user_req.cmd = ECORE_VLAN_MAC_DEL;
1798 
1799 	/* Add all but the last VLAN-MAC to the execution queue without actually
1800 	 * execution anything.
1801 	 */
1802 	ECORE_CLEAR_BIT_NA(RAMROD_COMP_WAIT, &p.ramrod_flags);
1803 	ECORE_CLEAR_BIT_NA(RAMROD_EXEC, &p.ramrod_flags);
1804 	ECORE_CLEAR_BIT_NA(RAMROD_CONT, &p.ramrod_flags);
1805 
1806 	ECORE_MSG(sc, "vlan_mac_del_all -- taking vlan_mac_lock (reader)");
1807 	read_lock = ecore_vlan_mac_h_read_lock(sc, o);
1808 	if (read_lock != ECORE_SUCCESS)
1809 		return read_lock;
1810 
1811 	ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
1812 				  struct ecore_vlan_mac_registry_elem) {
1813 		if (pos->vlan_mac_flags == *vlan_mac_flags) {
1814 			p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
1815 			ECORE_MEMCPY(&p.user_req.u, &pos->u, sizeof(pos->u));
1816 			rc = ecore_config_vlan_mac(sc, &p);
1817 			if (rc < 0) {
1818 				PMD_DRV_LOG(ERR, sc,
1819 					    "Failed to add a new DEL command");
1820 				ecore_vlan_mac_h_read_unlock(sc, o);
1821 				return rc;
1822 			}
1823 		}
1824 	}
1825 
1826 	ECORE_MSG(sc, "vlan_mac_del_all -- releasing vlan_mac_lock (reader)");
1827 	ecore_vlan_mac_h_read_unlock(sc, o);
1828 
1829 	p.ramrod_flags = *ramrod_flags;
1830 	ECORE_SET_BIT_NA(RAMROD_CONT, &p.ramrod_flags);
1831 
1832 	return ecore_config_vlan_mac(sc, &p);
1833 }
1834 
ecore_init_raw_obj(struct ecore_raw_obj * raw,uint8_t cl_id,uint32_t cid,uint8_t func_id,void * rdata,ecore_dma_addr_t rdata_mapping,int state,uint32_t * pstate,ecore_obj_type type)1835 static void ecore_init_raw_obj(struct ecore_raw_obj *raw, uint8_t cl_id,
1836 			       uint32_t cid, uint8_t func_id,
1837 			       void *rdata,
1838 			       ecore_dma_addr_t rdata_mapping, int state,
1839 			       uint32_t *pstate, ecore_obj_type type)
1840 {
1841 	raw->func_id = func_id;
1842 	raw->cid = cid;
1843 	raw->cl_id = cl_id;
1844 	raw->rdata = rdata;
1845 	raw->rdata_mapping = rdata_mapping;
1846 	raw->state = state;
1847 	raw->pstate = pstate;
1848 	raw->obj_type = type;
1849 	raw->check_pending = ecore_raw_check_pending;
1850 	raw->clear_pending = ecore_raw_clear_pending;
1851 	raw->set_pending = ecore_raw_set_pending;
1852 	raw->wait_comp = ecore_raw_wait;
1853 }
1854 
ecore_init_vlan_mac_common(struct ecore_vlan_mac_obj * o,uint8_t cl_id,uint32_t cid,uint8_t func_id,void * rdata,ecore_dma_addr_t rdata_mapping,int state,uint32_t * pstate,ecore_obj_type type,struct ecore_credit_pool_obj * macs_pool,struct ecore_credit_pool_obj * vlans_pool)1855 static void ecore_init_vlan_mac_common(struct ecore_vlan_mac_obj *o,
1856 				       uint8_t cl_id, uint32_t cid,
1857 				       uint8_t func_id, void *rdata,
1858 				       ecore_dma_addr_t rdata_mapping,
1859 				       int state, uint32_t *pstate,
1860 				       ecore_obj_type type,
1861 				       struct ecore_credit_pool_obj
1862 				       *macs_pool, struct ecore_credit_pool_obj
1863 				       *vlans_pool)
1864 {
1865 	ECORE_LIST_INIT(&o->head);
1866 	o->head_reader = 0;
1867 	o->head_exe_request = FALSE;
1868 	o->saved_ramrod_flags = 0;
1869 
1870 	o->macs_pool = macs_pool;
1871 	o->vlans_pool = vlans_pool;
1872 
1873 	o->delete_all = ecore_vlan_mac_del_all;
1874 	o->restore = ecore_vlan_mac_restore;
1875 	o->complete = ecore_complete_vlan_mac;
1876 	o->wait = ecore_wait_vlan_mac;
1877 
1878 	ecore_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping,
1879 			   state, pstate, type);
1880 }
1881 
ecore_init_mac_obj(struct bnx2x_softc * sc,struct ecore_vlan_mac_obj * mac_obj,uint8_t cl_id,uint32_t cid,uint8_t func_id,void * rdata,ecore_dma_addr_t rdata_mapping,int state,uint32_t * pstate,ecore_obj_type type,struct ecore_credit_pool_obj * macs_pool)1882 void ecore_init_mac_obj(struct bnx2x_softc *sc,
1883 			struct ecore_vlan_mac_obj *mac_obj,
1884 			uint8_t cl_id, uint32_t cid, uint8_t func_id,
1885 			void *rdata, ecore_dma_addr_t rdata_mapping, int state,
1886 			uint32_t *pstate, ecore_obj_type type,
1887 			struct ecore_credit_pool_obj *macs_pool)
1888 {
1889 	union ecore_qable_obj *qable_obj = (union ecore_qable_obj *)mac_obj;
1890 
1891 	ecore_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata,
1892 				   rdata_mapping, state, pstate, type,
1893 				   macs_pool, NULL);
1894 
1895 	/* CAM credit pool handling */
1896 	mac_obj->get_credit = ecore_get_credit_mac;
1897 	mac_obj->put_credit = ecore_put_credit_mac;
1898 	mac_obj->get_cam_offset = ecore_get_cam_offset_mac;
1899 	mac_obj->put_cam_offset = ecore_put_cam_offset_mac;
1900 
1901 	if (CHIP_IS_E1x(sc)) {
1902 		mac_obj->set_one_rule = ecore_set_one_mac_e1x;
1903 		mac_obj->check_del = ecore_check_mac_del;
1904 		mac_obj->check_add = ecore_check_mac_add;
1905 		mac_obj->check_move = ecore_check_move_always_err;
1906 		mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC;
1907 
1908 		/* Exe Queue */
1909 		ecore_exe_queue_init(sc,
1910 				     &mac_obj->exe_queue, 1, qable_obj,
1911 				     ecore_validate_vlan_mac,
1912 				     ecore_remove_vlan_mac,
1913 				     ecore_optimize_vlan_mac,
1914 				     ecore_execute_vlan_mac,
1915 				     ecore_exeq_get_mac);
1916 	} else {
1917 		mac_obj->set_one_rule = ecore_set_one_mac_e2;
1918 		mac_obj->check_del = ecore_check_mac_del;
1919 		mac_obj->check_add = ecore_check_mac_add;
1920 		mac_obj->check_move = ecore_check_move;
1921 		mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
1922 		mac_obj->get_n_elements = ecore_get_n_elements;
1923 
1924 		/* Exe Queue */
1925 		ecore_exe_queue_init(sc,
1926 				     &mac_obj->exe_queue, CLASSIFY_RULES_COUNT,
1927 				     qable_obj, ecore_validate_vlan_mac,
1928 				     ecore_remove_vlan_mac,
1929 				     ecore_optimize_vlan_mac,
1930 				     ecore_execute_vlan_mac,
1931 				     ecore_exeq_get_mac);
1932 	}
1933 }
1934 
1935 /* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
__storm_memset_mac_filters(struct bnx2x_softc * sc,struct tstorm_eth_mac_filter_config * mac_filters,uint16_t pf_id)1936 static void __storm_memset_mac_filters(struct bnx2x_softc *sc, struct
1937 				       tstorm_eth_mac_filter_config
1938 				       *mac_filters, uint16_t pf_id)
1939 {
1940 	size_t size = sizeof(struct tstorm_eth_mac_filter_config);
1941 
1942 	uint32_t addr = BAR_TSTRORM_INTMEM +
1943 	    TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id);
1944 
1945 	ecore_storm_memset_struct(sc, addr, size, (uint32_t *) mac_filters);
1946 }
1947 
ecore_set_rx_mode_e1x(struct bnx2x_softc * sc,struct ecore_rx_mode_ramrod_params * p)1948 static int ecore_set_rx_mode_e1x(struct bnx2x_softc *sc,
1949 				 struct ecore_rx_mode_ramrod_params *p)
1950 {
1951 	/* update the sc MAC filter structure */
1952 	uint32_t mask = (1 << p->cl_id);
1953 
1954 	struct tstorm_eth_mac_filter_config *mac_filters =
1955 	    (struct tstorm_eth_mac_filter_config *)p->rdata;
1956 
1957 	/* initial setting is drop-all */
1958 	uint8_t drop_all_ucast = 1, drop_all_mcast = 1;
1959 	uint8_t accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
1960 	uint8_t unmatched_unicast = 0;
1961 
1962 	/* In e1x there we only take into account rx accept flag since tx switching
1963 	 * isn't enabled. */
1964 	if (ECORE_TEST_BIT(ECORE_ACCEPT_UNICAST, &p->rx_accept_flags))
1965 		/* accept matched ucast */
1966 		drop_all_ucast = 0;
1967 
1968 	if (ECORE_TEST_BIT(ECORE_ACCEPT_MULTICAST, &p->rx_accept_flags))
1969 		/* accept matched mcast */
1970 		drop_all_mcast = 0;
1971 
1972 	if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) {
1973 		/* accept all mcast */
1974 		drop_all_ucast = 0;
1975 		accp_all_ucast = 1;
1976 	}
1977 	if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) {
1978 		/* accept all mcast */
1979 		drop_all_mcast = 0;
1980 		accp_all_mcast = 1;
1981 	}
1982 	if (ECORE_TEST_BIT(ECORE_ACCEPT_BROADCAST, &p->rx_accept_flags))
1983 		/* accept (all) bcast */
1984 		accp_all_bcast = 1;
1985 	if (ECORE_TEST_BIT(ECORE_ACCEPT_UNMATCHED, &p->rx_accept_flags))
1986 		/* accept unmatched unicasts */
1987 		unmatched_unicast = 1;
1988 
1989 	mac_filters->ucast_drop_all = drop_all_ucast ?
1990 	    mac_filters->ucast_drop_all | mask :
1991 	    mac_filters->ucast_drop_all & ~mask;
1992 
1993 	mac_filters->mcast_drop_all = drop_all_mcast ?
1994 	    mac_filters->mcast_drop_all | mask :
1995 	    mac_filters->mcast_drop_all & ~mask;
1996 
1997 	mac_filters->ucast_accept_all = accp_all_ucast ?
1998 	    mac_filters->ucast_accept_all | mask :
1999 	    mac_filters->ucast_accept_all & ~mask;
2000 
2001 	mac_filters->mcast_accept_all = accp_all_mcast ?
2002 	    mac_filters->mcast_accept_all | mask :
2003 	    mac_filters->mcast_accept_all & ~mask;
2004 
2005 	mac_filters->bcast_accept_all = accp_all_bcast ?
2006 	    mac_filters->bcast_accept_all | mask :
2007 	    mac_filters->bcast_accept_all & ~mask;
2008 
2009 	mac_filters->unmatched_unicast = unmatched_unicast ?
2010 	    mac_filters->unmatched_unicast | mask :
2011 	    mac_filters->unmatched_unicast & ~mask;
2012 
2013 	ECORE_MSG(sc, "drop_ucast 0x%xdrop_mcast 0x%x accp_ucast 0x%x"
2014 		  "accp_mcast 0x%xaccp_bcast 0x%x",
2015 		  mac_filters->ucast_drop_all, mac_filters->mcast_drop_all,
2016 		  mac_filters->ucast_accept_all, mac_filters->mcast_accept_all,
2017 		  mac_filters->bcast_accept_all);
2018 
2019 	/* write the MAC filter structure */
2020 	__storm_memset_mac_filters(sc, mac_filters, p->func_id);
2021 
2022 	/* The operation is completed */
2023 	ECORE_CLEAR_BIT(p->state, p->pstate);
2024 	ECORE_SMP_MB_AFTER_CLEAR_BIT();
2025 
2026 	return ECORE_SUCCESS;
2027 }
2028 
2029 /* Setup ramrod data */
ecore_rx_mode_set_rdata_hdr_e2(uint32_t cid,struct eth_classify_header * hdr,uint8_t rule_cnt)2030 static void ecore_rx_mode_set_rdata_hdr_e2(uint32_t cid, struct eth_classify_header
2031 					   *hdr, uint8_t rule_cnt)
2032 {
2033 	hdr->echo = ECORE_CPU_TO_LE32(cid);
2034 	hdr->rule_cnt = rule_cnt;
2035 }
2036 
ecore_rx_mode_set_cmd_state_e2(uint32_t * accept_flags,struct eth_filter_rules_cmd * cmd,int clear_accept_all)2037 static void ecore_rx_mode_set_cmd_state_e2(uint32_t *accept_flags,
2038 			struct eth_filter_rules_cmd *cmd, int clear_accept_all)
2039 {
2040 	uint16_t state;
2041 
2042 	/* start with 'drop-all' */
2043 	state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL |
2044 	    ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2045 
2046 	if (ECORE_TEST_BIT(ECORE_ACCEPT_UNICAST, accept_flags))
2047 		state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2048 
2049 	if (ECORE_TEST_BIT(ECORE_ACCEPT_MULTICAST, accept_flags))
2050 		state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2051 
2052 	if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_UNICAST, accept_flags)) {
2053 		state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2054 		state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2055 	}
2056 
2057 	if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_MULTICAST, accept_flags)) {
2058 		state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2059 		state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2060 	}
2061 	if (ECORE_TEST_BIT(ECORE_ACCEPT_BROADCAST, accept_flags))
2062 		state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2063 
2064 	if (ECORE_TEST_BIT(ECORE_ACCEPT_UNMATCHED, accept_flags)) {
2065 		state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2066 		state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2067 	}
2068 	if (ECORE_TEST_BIT(ECORE_ACCEPT_ANY_VLAN, accept_flags))
2069 		state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN;
2070 
2071 	/* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */
2072 	if (clear_accept_all) {
2073 		state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2074 		state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2075 		state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2076 		state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2077 	}
2078 
2079 	cmd->state = ECORE_CPU_TO_LE16(state);
2080 }
2081 
ecore_set_rx_mode_e2(struct bnx2x_softc * sc,struct ecore_rx_mode_ramrod_params * p)2082 static int ecore_set_rx_mode_e2(struct bnx2x_softc *sc,
2083 				struct ecore_rx_mode_ramrod_params *p)
2084 {
2085 	struct eth_filter_rules_ramrod_data *data = p->rdata;
2086 	int rc;
2087 	uint8_t rule_idx = 0;
2088 
2089 	/* Reset the ramrod data buffer */
2090 	ECORE_MEMSET(data, 0, sizeof(*data));
2091 
2092 	/* Setup ramrod data */
2093 
2094 	/* Tx (internal switching) */
2095 	if (ECORE_TEST_BIT(RAMROD_TX, &p->ramrod_flags)) {
2096 		data->rules[rule_idx].client_id = p->cl_id;
2097 		data->rules[rule_idx].func_id = p->func_id;
2098 
2099 		data->rules[rule_idx].cmd_general_data =
2100 		    ETH_FILTER_RULES_CMD_TX_CMD;
2101 
2102 		ecore_rx_mode_set_cmd_state_e2(&p->tx_accept_flags,
2103 					       &(data->rules[rule_idx++]),
2104 					       FALSE);
2105 	}
2106 
2107 	/* Rx */
2108 	if (ECORE_TEST_BIT(RAMROD_RX, &p->ramrod_flags)) {
2109 		data->rules[rule_idx].client_id = p->cl_id;
2110 		data->rules[rule_idx].func_id = p->func_id;
2111 
2112 		data->rules[rule_idx].cmd_general_data =
2113 		    ETH_FILTER_RULES_CMD_RX_CMD;
2114 
2115 		ecore_rx_mode_set_cmd_state_e2(&p->rx_accept_flags,
2116 					       &(data->rules[rule_idx++]),
2117 					       FALSE);
2118 	}
2119 
2120 	/* If FCoE Queue configuration has been requested configure the Rx and
2121 	 * internal switching modes for this queue in separate rules.
2122 	 *
2123 	 * FCoE queue shell never be set to ACCEPT_ALL packets of any sort:
2124 	 * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED.
2125 	 */
2126 	if (ECORE_TEST_BIT(ECORE_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) {
2127 		/*  Tx (internal switching) */
2128 		if (ECORE_TEST_BIT(RAMROD_TX, &p->ramrod_flags)) {
2129 			data->rules[rule_idx].client_id = ECORE_FCOE_CID(sc);
2130 			data->rules[rule_idx].func_id = p->func_id;
2131 
2132 			data->rules[rule_idx].cmd_general_data =
2133 			    ETH_FILTER_RULES_CMD_TX_CMD;
2134 
2135 			ecore_rx_mode_set_cmd_state_e2(&p->tx_accept_flags,
2136 						       &(data->rules
2137 							 [rule_idx++]), TRUE);
2138 		}
2139 
2140 		/* Rx */
2141 		if (ECORE_TEST_BIT(RAMROD_RX, &p->ramrod_flags)) {
2142 			data->rules[rule_idx].client_id = ECORE_FCOE_CID(sc);
2143 			data->rules[rule_idx].func_id = p->func_id;
2144 
2145 			data->rules[rule_idx].cmd_general_data =
2146 			    ETH_FILTER_RULES_CMD_RX_CMD;
2147 
2148 			ecore_rx_mode_set_cmd_state_e2(&p->rx_accept_flags,
2149 						       &(data->rules
2150 							 [rule_idx++]), TRUE);
2151 		}
2152 	}
2153 
2154 	/* Set the ramrod header (most importantly - number of rules to
2155 	 * configure).
2156 	 */
2157 	ecore_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx);
2158 
2159 	    ECORE_MSG
2160 	    (sc, "About to configure %d rules, rx_accept_flags 0x%x, tx_accept_flags 0x%x",
2161 	     data->header.rule_cnt, p->rx_accept_flags, p->tx_accept_flags);
2162 
2163 	/* No need for an explicit memory barrier here as long we would
2164 	 * need to ensure the ordering of writing to the SPQ element
2165 	 * and updating of the SPQ producer which involves a memory
2166 	 * read and we will have to put a full memory barrier there
2167 	 * (inside ecore_sp_post()).
2168 	 */
2169 
2170 	/* Send a ramrod */
2171 	rc = ecore_sp_post(sc,
2172 			   RAMROD_CMD_ID_ETH_FILTER_RULES,
2173 			   p->cid, p->rdata_mapping, ETH_CONNECTION_TYPE);
2174 	if (rc)
2175 		return rc;
2176 
2177 	/* Ramrod completion is pending */
2178 	return ECORE_PENDING;
2179 }
2180 
ecore_wait_rx_mode_comp_e2(struct bnx2x_softc * sc,struct ecore_rx_mode_ramrod_params * p)2181 static int ecore_wait_rx_mode_comp_e2(struct bnx2x_softc *sc,
2182 				      struct ecore_rx_mode_ramrod_params *p)
2183 {
2184 	return ecore_state_wait(sc, p->state, p->pstate);
2185 }
2186 
ecore_empty_rx_mode_wait(__rte_unused struct bnx2x_softc * sc,__rte_unused struct ecore_rx_mode_ramrod_params * p)2187 static int ecore_empty_rx_mode_wait(__rte_unused struct bnx2x_softc *sc,
2188 				    __rte_unused struct
2189 				    ecore_rx_mode_ramrod_params *p)
2190 {
2191 	/* Do nothing */
2192 	return ECORE_SUCCESS;
2193 }
2194 
ecore_config_rx_mode(struct bnx2x_softc * sc,struct ecore_rx_mode_ramrod_params * p)2195 int ecore_config_rx_mode(struct bnx2x_softc *sc,
2196 			 struct ecore_rx_mode_ramrod_params *p)
2197 {
2198 	int rc;
2199 
2200 	/* Configure the new classification in the chip */
2201 	if (p->rx_mode_obj->config_rx_mode) {
2202 		rc = p->rx_mode_obj->config_rx_mode(sc, p);
2203 		if (rc < 0)
2204 			return rc;
2205 
2206 		/* Wait for a ramrod completion if was requested */
2207 		if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
2208 			rc = p->rx_mode_obj->wait_comp(sc, p);
2209 			if (rc)
2210 				return rc;
2211 		}
2212 	} else {
2213 		ECORE_MSG(sc, "ERROR: config_rx_mode is NULL");
2214 		return -1;
2215 	}
2216 
2217 	return rc;
2218 }
2219 
ecore_init_rx_mode_obj(struct bnx2x_softc * sc,struct ecore_rx_mode_obj * o)2220 void ecore_init_rx_mode_obj(struct bnx2x_softc *sc, struct ecore_rx_mode_obj *o)
2221 {
2222 	if (CHIP_IS_E1x(sc)) {
2223 		o->wait_comp = ecore_empty_rx_mode_wait;
2224 		o->config_rx_mode = ecore_set_rx_mode_e1x;
2225 	} else {
2226 		o->wait_comp = ecore_wait_rx_mode_comp_e2;
2227 		o->config_rx_mode = ecore_set_rx_mode_e2;
2228 	}
2229 }
2230 
2231 /********************* Multicast verbs: SET, CLEAR ****************************/
ecore_mcast_bin_from_mac(uint8_t * mac)2232 static uint8_t ecore_mcast_bin_from_mac(uint8_t * mac)
2233 {
2234 	return (ECORE_CRC32_LE(0, mac, ETH_ALEN) >> 24) & 0xff;
2235 }
2236 
2237 struct ecore_mcast_mac_elem {
2238 	ecore_list_entry_t link;
2239 	uint8_t mac[ETH_ALEN];
2240 	uint8_t pad[2];		/* For a natural alignment of the following buffer */
2241 };
2242 
2243 struct ecore_pending_mcast_cmd {
2244 	ecore_list_entry_t link;
2245 	int type;		/* ECORE_MCAST_CMD_X */
2246 	union {
2247 		ecore_list_t macs_head;
2248 		uint32_t macs_num;	/* Needed for DEL command */
2249 		int next_bin;	/* Needed for RESTORE flow with approx match */
2250 	} data;
2251 
2252 	int done;		/* set to TRUE, when the command has been handled,
2253 				 * practically used in 57712 handling only, where one pending
2254 				 * command may be handled in a few operations. As long as for
2255 				 * other chips every operation handling is completed in a
2256 				 * single ramrod, there is no need to utilize this field.
2257 				 */
2258 };
2259 
ecore_mcast_wait(struct bnx2x_softc * sc,struct ecore_mcast_obj * o)2260 static int ecore_mcast_wait(struct bnx2x_softc *sc, struct ecore_mcast_obj *o)
2261 {
2262 	if (ecore_state_wait(sc, o->sched_state, o->raw.pstate) ||
2263 	    o->raw.wait_comp(sc, &o->raw))
2264 		return ECORE_TIMEOUT;
2265 
2266 	return ECORE_SUCCESS;
2267 }
2268 
ecore_mcast_enqueue_cmd(struct bnx2x_softc * sc __rte_unused,struct ecore_mcast_obj * o,struct ecore_mcast_ramrod_params * p,enum ecore_mcast_cmd cmd)2269 static int ecore_mcast_enqueue_cmd(struct bnx2x_softc *sc __rte_unused,
2270 				   struct ecore_mcast_obj *o,
2271 				   struct ecore_mcast_ramrod_params *p,
2272 				   enum ecore_mcast_cmd cmd)
2273 {
2274 	int total_sz;
2275 	struct ecore_pending_mcast_cmd *new_cmd;
2276 	struct ecore_mcast_mac_elem *cur_mac = NULL;
2277 	struct ecore_mcast_list_elem *pos;
2278 	int macs_list_len = ((cmd == ECORE_MCAST_CMD_ADD) ?
2279 			     p->mcast_list_len : 0);
2280 
2281 	/* If the command is empty ("handle pending commands only"), break */
2282 	if (!p->mcast_list_len)
2283 		return ECORE_SUCCESS;
2284 
2285 	total_sz = sizeof(*new_cmd) +
2286 	    macs_list_len * sizeof(struct ecore_mcast_mac_elem);
2287 
2288 	/* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */
2289 	new_cmd = ECORE_ZALLOC(total_sz, GFP_ATOMIC, sc);
2290 
2291 	if (!new_cmd)
2292 		return ECORE_NOMEM;
2293 
2294 	ECORE_MSG(sc, "About to enqueue a new %d command. macs_list_len=%d",
2295 		  cmd, macs_list_len);
2296 
2297 	ECORE_LIST_INIT(&new_cmd->data.macs_head);
2298 
2299 	new_cmd->type = cmd;
2300 	new_cmd->done = FALSE;
2301 
2302 	switch (cmd) {
2303 	case ECORE_MCAST_CMD_ADD:
2304 		cur_mac = (struct ecore_mcast_mac_elem *)
2305 		    ((uint8_t *) new_cmd + sizeof(*new_cmd));
2306 
2307 		/* Push the MACs of the current command into the pending command
2308 		 * MACs list: FIFO
2309 		 */
2310 		ECORE_LIST_FOR_EACH_ENTRY(pos, &p->mcast_list, link,
2311 					  struct ecore_mcast_list_elem) {
2312 			ECORE_MEMCPY(cur_mac->mac, pos->mac, ETH_ALEN);
2313 			ECORE_LIST_PUSH_TAIL(&cur_mac->link,
2314 					     &new_cmd->data.macs_head);
2315 			cur_mac++;
2316 		}
2317 
2318 		break;
2319 
2320 	case ECORE_MCAST_CMD_DEL:
2321 		new_cmd->data.macs_num = p->mcast_list_len;
2322 		break;
2323 
2324 	case ECORE_MCAST_CMD_RESTORE:
2325 		new_cmd->data.next_bin = 0;
2326 		break;
2327 
2328 	default:
2329 		ECORE_FREE(sc, new_cmd, total_sz);
2330 		PMD_DRV_LOG(ERR, sc, "Unknown command: %d", cmd);
2331 		return ECORE_INVAL;
2332 	}
2333 
2334 	/* Push the new pending command to the tail of the pending list: FIFO */
2335 	ECORE_LIST_PUSH_TAIL(&new_cmd->link, &o->pending_cmds_head);
2336 
2337 	o->set_sched(o);
2338 
2339 	return ECORE_PENDING;
2340 }
2341 
2342 /**
2343  * ecore_mcast_get_next_bin - get the next set bin (index)
2344  *
2345  * @o:
2346  * @last:	index to start looking from (including)
2347  *
2348  * returns the next found (set) bin or a negative value if none is found.
2349  */
ecore_mcast_get_next_bin(struct ecore_mcast_obj * o,int last)2350 static int ecore_mcast_get_next_bin(struct ecore_mcast_obj *o, int last)
2351 {
2352 	int i, j, inner_start = last % BIT_VEC64_ELEM_SZ;
2353 
2354 	for (i = last / BIT_VEC64_ELEM_SZ; i < ECORE_MCAST_VEC_SZ; i++) {
2355 		if (o->registry.aprox_match.vec[i])
2356 			for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) {
2357 				int cur_bit = j + BIT_VEC64_ELEM_SZ * i;
2358 				if (BIT_VEC64_TEST_BIT
2359 				    (o->registry.aprox_match.vec, cur_bit)) {
2360 					return cur_bit;
2361 				}
2362 			}
2363 		inner_start = 0;
2364 	}
2365 
2366 	/* None found */
2367 	return -1;
2368 }
2369 
2370 /**
2371  * ecore_mcast_clear_first_bin - find the first set bin and clear it
2372  *
2373  * @o:
2374  *
2375  * returns the index of the found bin or -1 if none is found
2376  */
ecore_mcast_clear_first_bin(struct ecore_mcast_obj * o)2377 static int ecore_mcast_clear_first_bin(struct ecore_mcast_obj *o)
2378 {
2379 	int cur_bit = ecore_mcast_get_next_bin(o, 0);
2380 
2381 	if (cur_bit >= 0)
2382 		BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit);
2383 
2384 	return cur_bit;
2385 }
2386 
ecore_mcast_get_rx_tx_flag(struct ecore_mcast_obj * o)2387 static uint8_t ecore_mcast_get_rx_tx_flag(struct ecore_mcast_obj *o)
2388 {
2389 	struct ecore_raw_obj *raw = &o->raw;
2390 	uint8_t rx_tx_flag = 0;
2391 
2392 	if ((raw->obj_type == ECORE_OBJ_TYPE_TX) ||
2393 	    (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
2394 		rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD;
2395 
2396 	if ((raw->obj_type == ECORE_OBJ_TYPE_RX) ||
2397 	    (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
2398 		rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD;
2399 
2400 	return rx_tx_flag;
2401 }
2402 
ecore_mcast_set_one_rule_e2(struct bnx2x_softc * sc __rte_unused,struct ecore_mcast_obj * o,int idx,union ecore_mcast_config_data * cfg_data,enum ecore_mcast_cmd cmd)2403 static void ecore_mcast_set_one_rule_e2(struct bnx2x_softc *sc __rte_unused,
2404 					struct ecore_mcast_obj *o, int idx,
2405 					union ecore_mcast_config_data *cfg_data,
2406 					enum ecore_mcast_cmd cmd)
2407 {
2408 	struct ecore_raw_obj *r = &o->raw;
2409 	struct eth_multicast_rules_ramrod_data *data =
2410 	    (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2411 	uint8_t func_id = r->func_id;
2412 	uint8_t rx_tx_add_flag = ecore_mcast_get_rx_tx_flag(o);
2413 	int bin;
2414 
2415 	if ((cmd == ECORE_MCAST_CMD_ADD) || (cmd == ECORE_MCAST_CMD_RESTORE))
2416 		rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD;
2417 
2418 	data->rules[idx].cmd_general_data |= rx_tx_add_flag;
2419 
2420 	/* Get a bin and update a bins' vector */
2421 	switch (cmd) {
2422 	case ECORE_MCAST_CMD_ADD:
2423 		bin = ecore_mcast_bin_from_mac(cfg_data->mac);
2424 		BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin);
2425 		break;
2426 
2427 	case ECORE_MCAST_CMD_DEL:
2428 		/* If there were no more bins to clear
2429 		 * (ecore_mcast_clear_first_bin() returns -1) then we would
2430 		 * clear any (0xff) bin.
2431 		 * See ecore_mcast_validate_e2() for explanation when it may
2432 		 * happen.
2433 		 */
2434 		bin = ecore_mcast_clear_first_bin(o);
2435 		break;
2436 
2437 	case ECORE_MCAST_CMD_RESTORE:
2438 		bin = cfg_data->bin;
2439 		break;
2440 
2441 	default:
2442 		PMD_DRV_LOG(ERR, sc, "Unknown command: %d", cmd);
2443 		return;
2444 	}
2445 
2446 	ECORE_MSG(sc, "%s bin %d",
2447 		  ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ?
2448 		   "Setting" : "Clearing"), bin);
2449 
2450 	data->rules[idx].bin_id = (uint8_t) bin;
2451 	data->rules[idx].func_id = func_id;
2452 	data->rules[idx].engine_id = o->engine_id;
2453 }
2454 
2455 /**
2456  * ecore_mcast_handle_restore_cmd_e2 - restore configuration from the registry
2457  *
2458  * @sc:		device handle
2459  * @o:
2460  * @start_bin:	index in the registry to start from (including)
2461  * @rdata_idx:	index in the ramrod data to start from
2462  *
2463  * returns last handled bin index or -1 if all bins have been handled
2464  */
ecore_mcast_handle_restore_cmd_e2(struct bnx2x_softc * sc,struct ecore_mcast_obj * o,int start_bin,int * rdata_idx)2465 static int ecore_mcast_handle_restore_cmd_e2(struct bnx2x_softc *sc,
2466 					     struct ecore_mcast_obj *o,
2467 					     int start_bin, int *rdata_idx)
2468 {
2469 	int cur_bin, cnt = *rdata_idx;
2470 	union ecore_mcast_config_data cfg_data = { NULL };
2471 
2472 	/* go through the registry and configure the bins from it */
2473 	for (cur_bin = ecore_mcast_get_next_bin(o, start_bin); cur_bin >= 0;
2474 	     cur_bin = ecore_mcast_get_next_bin(o, cur_bin + 1)) {
2475 
2476 		cfg_data.bin = (uint8_t) cur_bin;
2477 		o->set_one_rule(sc, o, cnt, &cfg_data, ECORE_MCAST_CMD_RESTORE);
2478 
2479 		cnt++;
2480 
2481 		ECORE_MSG(sc, "About to configure a bin %d", cur_bin);
2482 
2483 		/* Break if we reached the maximum number
2484 		 * of rules.
2485 		 */
2486 		if (cnt >= o->max_cmd_len)
2487 			break;
2488 	}
2489 
2490 	*rdata_idx = cnt;
2491 
2492 	return cur_bin;
2493 }
2494 
ecore_mcast_hdl_pending_add_e2(struct bnx2x_softc * sc,struct ecore_mcast_obj * o,struct ecore_pending_mcast_cmd * cmd_pos,int * line_idx)2495 static void ecore_mcast_hdl_pending_add_e2(struct bnx2x_softc *sc,
2496 					   struct ecore_mcast_obj *o,
2497 					   struct ecore_pending_mcast_cmd
2498 					   *cmd_pos, int *line_idx)
2499 {
2500 	struct ecore_mcast_mac_elem *pmac_pos, *pmac_pos_n;
2501 	int cnt = *line_idx;
2502 	union ecore_mcast_config_data cfg_data = { NULL };
2503 
2504 	ECORE_LIST_FOR_EACH_ENTRY_SAFE(pmac_pos, pmac_pos_n,
2505 				       &cmd_pos->data.macs_head, link,
2506 				       struct ecore_mcast_mac_elem) {
2507 
2508 		cfg_data.mac = &pmac_pos->mac[0];
2509 		o->set_one_rule(sc, o, cnt, &cfg_data, cmd_pos->type);
2510 
2511 		cnt++;
2512 
2513 		    ECORE_MSG
2514 		    (sc, "About to configure " RTE_ETHER_ADDR_PRT_FMT " mcast MAC",
2515 		     pmac_pos->mac[0], pmac_pos->mac[1], pmac_pos->mac[2],
2516 		     pmac_pos->mac[3], pmac_pos->mac[4], pmac_pos->mac[5]);
2517 
2518 		ECORE_LIST_REMOVE_ENTRY(&pmac_pos->link,
2519 					&cmd_pos->data.macs_head);
2520 
2521 		/* Break if we reached the maximum number
2522 		 * of rules.
2523 		 */
2524 		if (cnt >= o->max_cmd_len)
2525 			break;
2526 	}
2527 
2528 	*line_idx = cnt;
2529 
2530 	/* if no more MACs to configure - we are done */
2531 	if (ECORE_LIST_IS_EMPTY(&cmd_pos->data.macs_head))
2532 		cmd_pos->done = TRUE;
2533 }
2534 
ecore_mcast_hdl_pending_del_e2(struct bnx2x_softc * sc,struct ecore_mcast_obj * o,struct ecore_pending_mcast_cmd * cmd_pos,int * line_idx)2535 static void ecore_mcast_hdl_pending_del_e2(struct bnx2x_softc *sc,
2536 					   struct ecore_mcast_obj *o,
2537 					   struct ecore_pending_mcast_cmd
2538 					   *cmd_pos, int *line_idx)
2539 {
2540 	int cnt = *line_idx;
2541 
2542 	while (cmd_pos->data.macs_num) {
2543 		o->set_one_rule(sc, o, cnt, NULL, cmd_pos->type);
2544 
2545 		cnt++;
2546 
2547 		cmd_pos->data.macs_num--;
2548 
2549 		ECORE_MSG(sc, "Deleting MAC. %d left,cnt is %d",
2550 			  cmd_pos->data.macs_num, cnt);
2551 
2552 		/* Break if we reached the maximum
2553 		 * number of rules.
2554 		 */
2555 		if (cnt >= o->max_cmd_len)
2556 			break;
2557 	}
2558 
2559 	*line_idx = cnt;
2560 
2561 	/* If we cleared all bins - we are done */
2562 	if (!cmd_pos->data.macs_num)
2563 		cmd_pos->done = TRUE;
2564 }
2565 
ecore_mcast_hdl_pending_restore_e2(struct bnx2x_softc * sc,struct ecore_mcast_obj * o,struct ecore_pending_mcast_cmd * cmd_pos,int * line_idx)2566 static void ecore_mcast_hdl_pending_restore_e2(struct bnx2x_softc *sc,
2567 					       struct ecore_mcast_obj *o, struct
2568 					       ecore_pending_mcast_cmd
2569 					       *cmd_pos, int *line_idx)
2570 {
2571 	cmd_pos->data.next_bin = o->hdl_restore(sc, o, cmd_pos->data.next_bin,
2572 						line_idx);
2573 
2574 	if (cmd_pos->data.next_bin < 0)
2575 		/* If o->set_restore returned -1 we are done */
2576 		cmd_pos->done = TRUE;
2577 	else
2578 		/* Start from the next bin next time */
2579 		cmd_pos->data.next_bin++;
2580 }
2581 
ecore_mcast_handle_pending_cmds_e2(struct bnx2x_softc * sc,struct ecore_mcast_ramrod_params * p)2582 static int ecore_mcast_handle_pending_cmds_e2(struct bnx2x_softc *sc, struct
2583 					      ecore_mcast_ramrod_params
2584 					      *p)
2585 {
2586 	struct ecore_pending_mcast_cmd *cmd_pos, *cmd_pos_n;
2587 	int cnt = 0;
2588 	struct ecore_mcast_obj *o = p->mcast_obj;
2589 
2590 	ECORE_LIST_FOR_EACH_ENTRY_SAFE(cmd_pos, cmd_pos_n,
2591 				       &o->pending_cmds_head, link,
2592 				       struct ecore_pending_mcast_cmd) {
2593 		switch (cmd_pos->type) {
2594 		case ECORE_MCAST_CMD_ADD:
2595 			ecore_mcast_hdl_pending_add_e2(sc, o, cmd_pos, &cnt);
2596 			break;
2597 
2598 		case ECORE_MCAST_CMD_DEL:
2599 			ecore_mcast_hdl_pending_del_e2(sc, o, cmd_pos, &cnt);
2600 			break;
2601 
2602 		case ECORE_MCAST_CMD_RESTORE:
2603 			ecore_mcast_hdl_pending_restore_e2(sc, o, cmd_pos,
2604 							   &cnt);
2605 			break;
2606 
2607 		default:
2608 			PMD_DRV_LOG(ERR, sc,
2609 				    "Unknown command: %d", cmd_pos->type);
2610 			return ECORE_INVAL;
2611 		}
2612 
2613 		/* If the command has been completed - remove it from the list
2614 		 * and free the memory
2615 		 */
2616 		if (cmd_pos->done) {
2617 			ECORE_LIST_REMOVE_ENTRY(&cmd_pos->link,
2618 						&o->pending_cmds_head);
2619 			ECORE_FREE(sc, cmd_pos, cmd_pos->alloc_len);
2620 		}
2621 
2622 		/* Break if we reached the maximum number of rules */
2623 		if (cnt >= o->max_cmd_len)
2624 			break;
2625 	}
2626 
2627 	return cnt;
2628 }
2629 
ecore_mcast_hdl_add(struct bnx2x_softc * sc,struct ecore_mcast_obj * o,struct ecore_mcast_ramrod_params * p,int * line_idx)2630 static void ecore_mcast_hdl_add(struct bnx2x_softc *sc,
2631 				struct ecore_mcast_obj *o,
2632 				struct ecore_mcast_ramrod_params *p,
2633 				int *line_idx)
2634 {
2635 	struct ecore_mcast_list_elem *mlist_pos;
2636 	union ecore_mcast_config_data cfg_data = { NULL };
2637 	int cnt = *line_idx;
2638 
2639 	ECORE_LIST_FOR_EACH_ENTRY(mlist_pos, &p->mcast_list, link,
2640 				  struct ecore_mcast_list_elem) {
2641 		cfg_data.mac = mlist_pos->mac;
2642 		o->set_one_rule(sc, o, cnt, &cfg_data, ECORE_MCAST_CMD_ADD);
2643 
2644 		cnt++;
2645 
2646 		    ECORE_MSG
2647 		    (sc, "About to configure " RTE_ETHER_ADDR_PRT_FMT " mcast MAC",
2648 		     mlist_pos->mac[0], mlist_pos->mac[1], mlist_pos->mac[2],
2649 		     mlist_pos->mac[3], mlist_pos->mac[4], mlist_pos->mac[5]);
2650 	}
2651 
2652 	*line_idx = cnt;
2653 }
2654 
ecore_mcast_hdl_del(struct bnx2x_softc * sc,struct ecore_mcast_obj * o,struct ecore_mcast_ramrod_params * p,int * line_idx)2655 static void ecore_mcast_hdl_del(struct bnx2x_softc *sc,
2656 				struct ecore_mcast_obj *o,
2657 				struct ecore_mcast_ramrod_params *p,
2658 				int *line_idx)
2659 {
2660 	int cnt = *line_idx, i;
2661 
2662 	for (i = 0; i < p->mcast_list_len; i++) {
2663 		o->set_one_rule(sc, o, cnt, NULL, ECORE_MCAST_CMD_DEL);
2664 
2665 		cnt++;
2666 
2667 		ECORE_MSG(sc,
2668 			  "Deleting MAC. %d left", p->mcast_list_len - i - 1);
2669 	}
2670 
2671 	*line_idx = cnt;
2672 }
2673 
2674 /**
2675  * ecore_mcast_handle_current_cmd -
2676  *
2677  * @sc:		device handle
2678  * @p:
2679  * @cmd:
2680  * @start_cnt:	first line in the ramrod data that may be used
2681  *
2682  * This function is called if there is enough place for the current command in
2683  * the ramrod data.
2684  * Returns number of lines filled in the ramrod data in total.
2685  */
ecore_mcast_handle_current_cmd(struct bnx2x_softc * sc,struct ecore_mcast_ramrod_params * p,enum ecore_mcast_cmd cmd,int start_cnt)2686 static int ecore_mcast_handle_current_cmd(struct bnx2x_softc *sc, struct
2687 					  ecore_mcast_ramrod_params *p,
2688 					  enum ecore_mcast_cmd cmd,
2689 					  int start_cnt)
2690 {
2691 	struct ecore_mcast_obj *o = p->mcast_obj;
2692 	int cnt = start_cnt;
2693 
2694 	ECORE_MSG(sc, "p->mcast_list_len=%d", p->mcast_list_len);
2695 
2696 	switch (cmd) {
2697 	case ECORE_MCAST_CMD_ADD:
2698 		ecore_mcast_hdl_add(sc, o, p, &cnt);
2699 		break;
2700 
2701 	case ECORE_MCAST_CMD_DEL:
2702 		ecore_mcast_hdl_del(sc, o, p, &cnt);
2703 		break;
2704 
2705 	case ECORE_MCAST_CMD_RESTORE:
2706 		o->hdl_restore(sc, o, 0, &cnt);
2707 		break;
2708 
2709 	default:
2710 		PMD_DRV_LOG(ERR, sc, "Unknown command: %d", cmd);
2711 		return ECORE_INVAL;
2712 	}
2713 
2714 	/* The current command has been handled */
2715 	p->mcast_list_len = 0;
2716 
2717 	return cnt;
2718 }
2719 
ecore_mcast_validate_e2(__rte_unused struct bnx2x_softc * sc,struct ecore_mcast_ramrod_params * p,enum ecore_mcast_cmd cmd)2720 static int ecore_mcast_validate_e2(__rte_unused struct bnx2x_softc *sc,
2721 				   struct ecore_mcast_ramrod_params *p,
2722 				   enum ecore_mcast_cmd cmd)
2723 {
2724 	struct ecore_mcast_obj *o = p->mcast_obj;
2725 	int reg_sz = o->get_registry_size(o);
2726 
2727 	switch (cmd) {
2728 		/* DEL command deletes all currently configured MACs */
2729 	case ECORE_MCAST_CMD_DEL:
2730 		o->set_registry_size(o, 0);
2731 		/* fall-through */
2732 
2733 		/* RESTORE command will restore the entire multicast configuration */
2734 	case ECORE_MCAST_CMD_RESTORE:
2735 		/* Here we set the approximate amount of work to do, which in
2736 		 * fact may be only less as some MACs in postponed ADD
2737 		 * command(s) scheduled before this command may fall into
2738 		 * the same bin and the actual number of bins set in the
2739 		 * registry would be less than we estimated here. See
2740 		 * ecore_mcast_set_one_rule_e2() for further details.
2741 		 */
2742 		p->mcast_list_len = reg_sz;
2743 		break;
2744 
2745 	case ECORE_MCAST_CMD_ADD:
2746 	case ECORE_MCAST_CMD_CONT:
2747 		/* Here we assume that all new MACs will fall into new bins.
2748 		 * However we will correct the real registry size after we
2749 		 * handle all pending commands.
2750 		 */
2751 		o->set_registry_size(o, reg_sz + p->mcast_list_len);
2752 		break;
2753 
2754 	default:
2755 		PMD_DRV_LOG(ERR, sc, "Unknown command: %d", cmd);
2756 		return ECORE_INVAL;
2757 	}
2758 
2759 	/* Increase the total number of MACs pending to be configured */
2760 	o->total_pending_num += p->mcast_list_len;
2761 
2762 	return ECORE_SUCCESS;
2763 }
2764 
ecore_mcast_revert_e2(__rte_unused struct bnx2x_softc * sc,struct ecore_mcast_ramrod_params * p,int old_num_bins,enum ecore_mcast_cmd cmd)2765 static void ecore_mcast_revert_e2(__rte_unused struct bnx2x_softc *sc,
2766 				  struct ecore_mcast_ramrod_params *p,
2767 				  int old_num_bins,
2768 				  enum ecore_mcast_cmd cmd)
2769 {
2770 	struct ecore_mcast_obj *o = p->mcast_obj;
2771 
2772 	o->set_registry_size(o, old_num_bins);
2773 	o->total_pending_num -= p->mcast_list_len;
2774 
2775 	if (cmd == ECORE_MCAST_CMD_SET)
2776 		o->total_pending_num -= o->max_cmd_len;
2777 }
2778 
2779 /**
2780  * ecore_mcast_set_rdata_hdr_e2 - sets a header values
2781  *
2782  * @sc:		device handle
2783  * @p:
2784  * @len:	number of rules to handle
2785  */
ecore_mcast_set_rdata_hdr_e2(__rte_unused struct bnx2x_softc * sc,struct ecore_mcast_ramrod_params * p,uint8_t len)2786 static void ecore_mcast_set_rdata_hdr_e2(__rte_unused struct bnx2x_softc
2787 					 *sc, struct ecore_mcast_ramrod_params
2788 					 *p, uint8_t len)
2789 {
2790 	struct ecore_raw_obj *r = &p->mcast_obj->raw;
2791 	struct eth_multicast_rules_ramrod_data *data =
2792 	    (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2793 
2794 	data->header.echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) |
2795 					      (ECORE_FILTER_MCAST_PENDING <<
2796 					       ECORE_SWCID_SHIFT));
2797 	data->header.rule_cnt = len;
2798 }
2799 
2800 /**
2801  * ecore_mcast_refresh_registry_e2 - recalculate the actual number of set bins
2802  *
2803  * @sc:		device handle
2804  * @o:
2805  *
2806  * Recalculate the actual number of set bins in the registry using Brian
2807  * Kernighan's algorithm: it's execution complexity is as a number of set bins.
2808  */
ecore_mcast_refresh_registry_e2(struct ecore_mcast_obj * o)2809 static int ecore_mcast_refresh_registry_e2(struct ecore_mcast_obj *o)
2810 {
2811 	int i, cnt = 0;
2812 	uint64_t elem;
2813 
2814 	for (i = 0; i < ECORE_MCAST_VEC_SZ; i++) {
2815 		elem = o->registry.aprox_match.vec[i];
2816 		for (; elem; cnt++)
2817 			elem &= elem - 1;
2818 	}
2819 
2820 	o->set_registry_size(o, cnt);
2821 
2822 	return ECORE_SUCCESS;
2823 }
2824 
ecore_mcast_setup_e2(struct bnx2x_softc * sc,struct ecore_mcast_ramrod_params * p,enum ecore_mcast_cmd cmd)2825 static int ecore_mcast_setup_e2(struct bnx2x_softc *sc,
2826 				struct ecore_mcast_ramrod_params *p,
2827 				enum ecore_mcast_cmd cmd)
2828 {
2829 	struct ecore_raw_obj *raw = &p->mcast_obj->raw;
2830 	struct ecore_mcast_obj *o = p->mcast_obj;
2831 	struct eth_multicast_rules_ramrod_data *data =
2832 	    (struct eth_multicast_rules_ramrod_data *)(raw->rdata);
2833 	int cnt = 0, rc;
2834 
2835 	/* Reset the ramrod data buffer */
2836 	ECORE_MEMSET(data, 0, sizeof(*data));
2837 
2838 	cnt = ecore_mcast_handle_pending_cmds_e2(sc, p);
2839 
2840 	/* If there are no more pending commands - clear SCHEDULED state */
2841 	if (ECORE_LIST_IS_EMPTY(&o->pending_cmds_head))
2842 		o->clear_sched(o);
2843 
2844 	/* The below may be TRUE if there was enough room in ramrod
2845 	 * data for all pending commands and for the current
2846 	 * command. Otherwise the current command would have been added
2847 	 * to the pending commands and p->mcast_list_len would have been
2848 	 * zeroed.
2849 	 */
2850 	if (p->mcast_list_len > 0)
2851 		cnt = ecore_mcast_handle_current_cmd(sc, p, cmd, cnt);
2852 
2853 	/* We've pulled out some MACs - update the total number of
2854 	 * outstanding.
2855 	 */
2856 	o->total_pending_num -= cnt;
2857 
2858 	/* send a ramrod */
2859 	ECORE_DBG_BREAK_IF(o->total_pending_num < 0);
2860 	ECORE_DBG_BREAK_IF(cnt > o->max_cmd_len);
2861 
2862 	ecore_mcast_set_rdata_hdr_e2(sc, p, (uint8_t) cnt);
2863 
2864 	/* Update a registry size if there are no more pending operations.
2865 	 *
2866 	 * We don't want to change the value of the registry size if there are
2867 	 * pending operations because we want it to always be equal to the
2868 	 * exact or the approximate number (see ecore_mcast_validate_e2()) of
2869 	 * set bins after the last requested operation in order to properly
2870 	 * evaluate the size of the next DEL/RESTORE operation.
2871 	 *
2872 	 * Note that we update the registry itself during command(s) handling
2873 	 * - see ecore_mcast_set_one_rule_e2(). That's because for 57712 we
2874 	 * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but
2875 	 * with a limited amount of update commands (per MAC/bin) and we don't
2876 	 * know in this scope what the actual state of bins configuration is
2877 	 * going to be after this ramrod.
2878 	 */
2879 	if (!o->total_pending_num)
2880 		ecore_mcast_refresh_registry_e2(o);
2881 
2882 	/* If CLEAR_ONLY was requested - don't send a ramrod and clear
2883 	 * RAMROD_PENDING status immediately.
2884 	 */
2885 	if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
2886 		raw->clear_pending(raw);
2887 		return ECORE_SUCCESS;
2888 	} else {
2889 		/* No need for an explicit memory barrier here as long we would
2890 		 * need to ensure the ordering of writing to the SPQ element
2891 		 * and updating of the SPQ producer which involves a memory
2892 		 * read and we will have to put a full memory barrier there
2893 		 * (inside ecore_sp_post()).
2894 		 */
2895 
2896 		/* Send a ramrod */
2897 		rc = ecore_sp_post(sc,
2898 				   RAMROD_CMD_ID_ETH_MULTICAST_RULES,
2899 				   raw->cid,
2900 				   raw->rdata_mapping, ETH_CONNECTION_TYPE);
2901 		if (rc)
2902 			return rc;
2903 
2904 		/* Ramrod completion is pending */
2905 		return ECORE_PENDING;
2906 	}
2907 }
2908 
ecore_mcast_validate_e1h(__rte_unused struct bnx2x_softc * sc,struct ecore_mcast_ramrod_params * p,enum ecore_mcast_cmd cmd)2909 static int ecore_mcast_validate_e1h(__rte_unused struct bnx2x_softc *sc,
2910 				    struct ecore_mcast_ramrod_params *p,
2911 				    enum ecore_mcast_cmd cmd)
2912 {
2913 	/* Mark, that there is a work to do */
2914 	if ((cmd == ECORE_MCAST_CMD_DEL) || (cmd == ECORE_MCAST_CMD_RESTORE))
2915 		p->mcast_list_len = 1;
2916 
2917 	return ECORE_SUCCESS;
2918 }
2919 
ecore_mcast_revert_e1h(__rte_unused struct bnx2x_softc * sc,__rte_unused struct ecore_mcast_ramrod_params * p,__rte_unused int old_num_bins,__rte_unused enum ecore_mcast_cmd cmd)2920 static void ecore_mcast_revert_e1h(__rte_unused struct bnx2x_softc *sc,
2921 				   __rte_unused struct ecore_mcast_ramrod_params
2922 				   *p, __rte_unused int old_num_bins,
2923 				   __rte_unused enum ecore_mcast_cmd cmd)
2924 {
2925 	/* Do nothing */
2926 }
2927 
2928 #define ECORE_57711_SET_MC_FILTER(filter, bit) \
2929 do { \
2930 	(filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \
2931 } while (0)
2932 
ecore_mcast_hdl_add_e1h(struct bnx2x_softc * sc __rte_unused,struct ecore_mcast_obj * o,struct ecore_mcast_ramrod_params * p,uint32_t * mc_filter)2933 static void ecore_mcast_hdl_add_e1h(struct bnx2x_softc *sc __rte_unused,
2934 				    struct ecore_mcast_obj *o,
2935 				    struct ecore_mcast_ramrod_params *p,
2936 				    uint32_t * mc_filter)
2937 {
2938 	struct ecore_mcast_list_elem *mlist_pos;
2939 	int bit;
2940 
2941 	ECORE_LIST_FOR_EACH_ENTRY(mlist_pos, &p->mcast_list, link,
2942 				  struct ecore_mcast_list_elem) {
2943 		bit = ecore_mcast_bin_from_mac(mlist_pos->mac);
2944 		ECORE_57711_SET_MC_FILTER(mc_filter, bit);
2945 
2946 		    ECORE_MSG
2947 		    (sc, "About to configure " RTE_ETHER_ADDR_PRT_FMT " mcast MAC, bin %d",
2948 		     mlist_pos->mac[0], mlist_pos->mac[1], mlist_pos->mac[2],
2949 		     mlist_pos->mac[3], mlist_pos->mac[4], mlist_pos->mac[5],
2950 		     bit);
2951 
2952 		/* bookkeeping... */
2953 		BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bit);
2954 	}
2955 }
2956 
ecore_mcast_hdl_restore_e1h(struct bnx2x_softc * sc __rte_unused,struct ecore_mcast_obj * o,uint32_t * mc_filter)2957 static void ecore_mcast_hdl_restore_e1h(struct bnx2x_softc *sc
2958 					__rte_unused,
2959 					struct ecore_mcast_obj *o,
2960 					uint32_t * mc_filter)
2961 {
2962 	int bit;
2963 
2964 	for (bit = ecore_mcast_get_next_bin(o, 0);
2965 	     bit >= 0; bit = ecore_mcast_get_next_bin(o, bit + 1)) {
2966 		ECORE_57711_SET_MC_FILTER(mc_filter, bit);
2967 		ECORE_MSG(sc, "About to set bin %d", bit);
2968 	}
2969 }
2970 
2971 /* On 57711 we write the multicast MACs' approximate match
2972  * table by directly into the TSTORM's internal RAM. So we don't
2973  * really need to handle any tricks to make it work.
2974  */
ecore_mcast_setup_e1h(struct bnx2x_softc * sc,struct ecore_mcast_ramrod_params * p,enum ecore_mcast_cmd cmd)2975 static int ecore_mcast_setup_e1h(struct bnx2x_softc *sc,
2976 				 struct ecore_mcast_ramrod_params *p,
2977 				 enum ecore_mcast_cmd cmd)
2978 {
2979 	int i;
2980 	struct ecore_mcast_obj *o = p->mcast_obj;
2981 	struct ecore_raw_obj *r = &o->raw;
2982 
2983 	/* If CLEAR_ONLY has been requested - clear the registry
2984 	 * and clear a pending bit.
2985 	 */
2986 	if (!ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
2987 		uint32_t mc_filter[ECORE_MC_HASH_SIZE] = { 0 };
2988 
2989 		/* Set the multicast filter bits before writing it into
2990 		 * the internal memory.
2991 		 */
2992 		switch (cmd) {
2993 		case ECORE_MCAST_CMD_ADD:
2994 			ecore_mcast_hdl_add_e1h(sc, o, p, mc_filter);
2995 			break;
2996 
2997 		case ECORE_MCAST_CMD_DEL:
2998 			ECORE_MSG(sc, "Invalidating multicast MACs configuration");
2999 
3000 			/* clear the registry */
3001 			ECORE_MEMSET(o->registry.aprox_match.vec, 0,
3002 				     sizeof(o->registry.aprox_match.vec));
3003 			break;
3004 
3005 		case ECORE_MCAST_CMD_RESTORE:
3006 			ecore_mcast_hdl_restore_e1h(sc, o, mc_filter);
3007 			break;
3008 
3009 		default:
3010 			PMD_DRV_LOG(ERR, sc, "Unknown command: %d", cmd);
3011 			return ECORE_INVAL;
3012 		}
3013 
3014 		/* Set the mcast filter in the internal memory */
3015 		for (i = 0; i < ECORE_MC_HASH_SIZE; i++)
3016 			REG_WR(sc, ECORE_MC_HASH_OFFSET(sc, i), mc_filter[i]);
3017 	} else
3018 		/* clear the registry */
3019 		ECORE_MEMSET(o->registry.aprox_match.vec, 0,
3020 			     sizeof(o->registry.aprox_match.vec));
3021 
3022 	/* We are done */
3023 	r->clear_pending(r);
3024 
3025 	return ECORE_SUCCESS;
3026 }
3027 
ecore_mcast_get_registry_size_aprox(struct ecore_mcast_obj * o)3028 static int ecore_mcast_get_registry_size_aprox(struct ecore_mcast_obj *o)
3029 {
3030 	return o->registry.aprox_match.num_bins_set;
3031 }
3032 
ecore_mcast_set_registry_size_aprox(struct ecore_mcast_obj * o,int n)3033 static void ecore_mcast_set_registry_size_aprox(struct ecore_mcast_obj *o,
3034 						int n)
3035 {
3036 	o->registry.aprox_match.num_bins_set = n;
3037 }
3038 
ecore_config_mcast(struct bnx2x_softc * sc,struct ecore_mcast_ramrod_params * p,enum ecore_mcast_cmd cmd)3039 int ecore_config_mcast(struct bnx2x_softc *sc,
3040 		       struct ecore_mcast_ramrod_params *p,
3041 		       enum ecore_mcast_cmd cmd)
3042 {
3043 	struct ecore_mcast_obj *o = p->mcast_obj;
3044 	struct ecore_raw_obj *r = &o->raw;
3045 	int rc = 0, old_reg_size;
3046 
3047 	/* This is needed to recover number of currently configured mcast macs
3048 	 * in case of failure.
3049 	 */
3050 	old_reg_size = o->get_registry_size(o);
3051 
3052 	/* Do some calculations and checks */
3053 	rc = o->validate(sc, p, cmd);
3054 	if (rc)
3055 		return rc;
3056 
3057 	/* Return if there is no work to do */
3058 	if ((!p->mcast_list_len) && (!o->check_sched(o)))
3059 		return ECORE_SUCCESS;
3060 
3061 	    ECORE_MSG
3062 	    (sc, "o->total_pending_num=%d p->mcast_list_len=%d o->max_cmd_len=%d",
3063 	     o->total_pending_num, p->mcast_list_len, o->max_cmd_len);
3064 
3065 	/* Enqueue the current command to the pending list if we can't complete
3066 	 * it in the current iteration
3067 	 */
3068 	if (r->check_pending(r) ||
3069 	    ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) {
3070 		rc = o->enqueue_cmd(sc, p->mcast_obj, p, cmd);
3071 		if (rc < 0)
3072 			goto error_exit1;
3073 
3074 		/* As long as the current command is in a command list we
3075 		 * don't need to handle it separately.
3076 		 */
3077 		p->mcast_list_len = 0;
3078 	}
3079 
3080 	if (!r->check_pending(r)) {
3081 
3082 		/* Set 'pending' state */
3083 		r->set_pending(r);
3084 
3085 		/* Configure the new classification in the chip */
3086 		rc = o->config_mcast(sc, p, cmd);
3087 		if (rc < 0)
3088 			goto error_exit2;
3089 
3090 		/* Wait for a ramrod completion if was requested */
3091 		if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags))
3092 			rc = o->wait_comp(sc, o);
3093 	}
3094 
3095 	return rc;
3096 
3097 error_exit2:
3098 	r->clear_pending(r);
3099 
3100 error_exit1:
3101 	o->revert(sc, p, old_reg_size, cmd);
3102 
3103 	return rc;
3104 }
3105 
ecore_mcast_clear_sched(struct ecore_mcast_obj * o)3106 static void ecore_mcast_clear_sched(struct ecore_mcast_obj *o)
3107 {
3108 	ECORE_SMP_MB_BEFORE_CLEAR_BIT();
3109 	ECORE_CLEAR_BIT(o->sched_state, o->raw.pstate);
3110 	ECORE_SMP_MB_AFTER_CLEAR_BIT();
3111 }
3112 
ecore_mcast_set_sched(struct ecore_mcast_obj * o)3113 static void ecore_mcast_set_sched(struct ecore_mcast_obj *o)
3114 {
3115 	ECORE_SMP_MB_BEFORE_CLEAR_BIT();
3116 	ECORE_SET_BIT(o->sched_state, o->raw.pstate);
3117 	ECORE_SMP_MB_AFTER_CLEAR_BIT();
3118 }
3119 
ecore_mcast_check_sched(struct ecore_mcast_obj * o)3120 static bool ecore_mcast_check_sched(struct ecore_mcast_obj *o)
3121 {
3122 	return ! !ECORE_TEST_BIT(o->sched_state, o->raw.pstate);
3123 }
3124 
ecore_mcast_check_pending(struct ecore_mcast_obj * o)3125 static bool ecore_mcast_check_pending(struct ecore_mcast_obj *o)
3126 {
3127 	return o->raw.check_pending(&o->raw) || o->check_sched(o);
3128 }
3129 
ecore_init_mcast_obj(struct bnx2x_softc * sc,struct ecore_mcast_obj * mcast_obj,uint8_t mcast_cl_id,uint32_t mcast_cid,uint8_t func_id,uint8_t engine_id,void * rdata,ecore_dma_addr_t rdata_mapping,int state,uint32_t * pstate,ecore_obj_type type)3130 void ecore_init_mcast_obj(struct bnx2x_softc *sc,
3131 			  struct ecore_mcast_obj *mcast_obj,
3132 			  uint8_t mcast_cl_id, uint32_t mcast_cid,
3133 			  uint8_t func_id, uint8_t engine_id, void *rdata,
3134 			  ecore_dma_addr_t rdata_mapping, int state,
3135 			  uint32_t *pstate, ecore_obj_type type)
3136 {
3137 	ECORE_MEMSET(mcast_obj, 0, sizeof(*mcast_obj));
3138 
3139 	ecore_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id,
3140 			   rdata, rdata_mapping, state, pstate, type);
3141 
3142 	mcast_obj->engine_id = engine_id;
3143 
3144 	ECORE_LIST_INIT(&mcast_obj->pending_cmds_head);
3145 
3146 	mcast_obj->sched_state = ECORE_FILTER_MCAST_SCHED;
3147 	mcast_obj->check_sched = ecore_mcast_check_sched;
3148 	mcast_obj->set_sched = ecore_mcast_set_sched;
3149 	mcast_obj->clear_sched = ecore_mcast_clear_sched;
3150 
3151 	if (CHIP_IS_E1H(sc)) {
3152 		mcast_obj->config_mcast = ecore_mcast_setup_e1h;
3153 		mcast_obj->enqueue_cmd = NULL;
3154 		mcast_obj->hdl_restore = NULL;
3155 		mcast_obj->check_pending = ecore_mcast_check_pending;
3156 
3157 		/* 57711 doesn't send a ramrod, so it has unlimited credit
3158 		 * for one command.
3159 		 */
3160 		mcast_obj->max_cmd_len = -1;
3161 		mcast_obj->wait_comp = ecore_mcast_wait;
3162 		mcast_obj->set_one_rule = NULL;
3163 		mcast_obj->validate = ecore_mcast_validate_e1h;
3164 		mcast_obj->revert = ecore_mcast_revert_e1h;
3165 		mcast_obj->get_registry_size =
3166 		    ecore_mcast_get_registry_size_aprox;
3167 		mcast_obj->set_registry_size =
3168 		    ecore_mcast_set_registry_size_aprox;
3169 	} else {
3170 		mcast_obj->config_mcast = ecore_mcast_setup_e2;
3171 		mcast_obj->enqueue_cmd = ecore_mcast_enqueue_cmd;
3172 		mcast_obj->hdl_restore = ecore_mcast_handle_restore_cmd_e2;
3173 		mcast_obj->check_pending = ecore_mcast_check_pending;
3174 		mcast_obj->max_cmd_len = 16;
3175 		mcast_obj->wait_comp = ecore_mcast_wait;
3176 		mcast_obj->set_one_rule = ecore_mcast_set_one_rule_e2;
3177 		mcast_obj->validate = ecore_mcast_validate_e2;
3178 		mcast_obj->revert = ecore_mcast_revert_e2;
3179 		mcast_obj->get_registry_size =
3180 		    ecore_mcast_get_registry_size_aprox;
3181 		mcast_obj->set_registry_size =
3182 		    ecore_mcast_set_registry_size_aprox;
3183 	}
3184 }
3185 
3186 /*************************** Credit handling **********************************/
3187 
3188 /**
3189  * atomic_add_ifless - add if the result is less than a given value.
3190  *
3191  * @v:	pointer of type ecore_atomic_t
3192  * @a:	the amount to add to v...
3193  * @u:	...if (v + a) is less than u.
3194  *
3195  * returns TRUE if (v + a) was less than u, and FALSE otherwise.
3196  *
3197  */
__atomic_add_ifless(ecore_atomic_t * v,int a,int u)3198 static bool __atomic_add_ifless(ecore_atomic_t *v, int a, int u)
3199 {
3200 	int c, old;
3201 
3202 	c = ECORE_ATOMIC_READ(v);
3203 	for (;;) {
3204 		if (ECORE_UNLIKELY(c + a >= u))
3205 			return FALSE;
3206 
3207 		old = ECORE_ATOMIC_CMPXCHG((v), c, c + a);
3208 		if (ECORE_LIKELY(old == c))
3209 			break;
3210 		c = old;
3211 	}
3212 
3213 	return TRUE;
3214 }
3215 
3216 /**
3217  * atomic_dec_ifmoe - dec if the result is more or equal than a given value.
3218  *
3219  * @v:	pointer of type ecore_atomic_t
3220  * @a:	the amount to dec from v...
3221  * @u:	...if (v - a) is more or equal than u.
3222  *
3223  * returns TRUE if (v - a) was more or equal than u, and FALSE
3224  * otherwise.
3225  */
__atomic_dec_ifmoe(ecore_atomic_t * v,int a,int u)3226 static bool __atomic_dec_ifmoe(ecore_atomic_t *v, int a, int u)
3227 {
3228 	int c, old;
3229 
3230 	c = ECORE_ATOMIC_READ(v);
3231 	for (;;) {
3232 		if (ECORE_UNLIKELY(c - a < u))
3233 			return FALSE;
3234 
3235 		old = ECORE_ATOMIC_CMPXCHG((v), c, c - a);
3236 		if (ECORE_LIKELY(old == c))
3237 			break;
3238 		c = old;
3239 	}
3240 
3241 	return TRUE;
3242 }
3243 
ecore_credit_pool_get(struct ecore_credit_pool_obj * o,int cnt)3244 static bool ecore_credit_pool_get(struct ecore_credit_pool_obj *o, int cnt)
3245 {
3246 	bool rc;
3247 
3248 	ECORE_SMP_MB();
3249 	rc = __atomic_dec_ifmoe(&o->credit, cnt, 0);
3250 	ECORE_SMP_MB();
3251 
3252 	return rc;
3253 }
3254 
ecore_credit_pool_put(struct ecore_credit_pool_obj * o,int cnt)3255 static bool ecore_credit_pool_put(struct ecore_credit_pool_obj *o, int cnt)
3256 {
3257 	bool rc;
3258 
3259 	ECORE_SMP_MB();
3260 
3261 	/* Don't let to refill if credit + cnt > pool_sz */
3262 	rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1);
3263 
3264 	ECORE_SMP_MB();
3265 
3266 	return rc;
3267 }
3268 
ecore_credit_pool_check(struct ecore_credit_pool_obj * o)3269 static int ecore_credit_pool_check(struct ecore_credit_pool_obj *o)
3270 {
3271 	int cur_credit;
3272 
3273 	ECORE_SMP_MB();
3274 	cur_credit = ECORE_ATOMIC_READ(&o->credit);
3275 
3276 	return cur_credit;
3277 }
3278 
ecore_credit_pool_always_TRUE(__rte_unused struct ecore_credit_pool_obj * o,__rte_unused int cnt)3279 static bool ecore_credit_pool_always_TRUE(__rte_unused struct
3280 					 ecore_credit_pool_obj *o,
3281 					 __rte_unused int cnt)
3282 {
3283 	return TRUE;
3284 }
3285 
ecore_credit_pool_get_entry(struct ecore_credit_pool_obj * o,int * offset)3286 static bool ecore_credit_pool_get_entry(struct ecore_credit_pool_obj *o,
3287 				       int *offset)
3288 {
3289 	int idx, vec, i;
3290 
3291 	*offset = -1;
3292 
3293 	/* Find "internal cam-offset" then add to base for this object... */
3294 	for (vec = 0; vec < ECORE_POOL_VEC_SIZE; vec++) {
3295 
3296 		/* Skip the current vector if there are no free entries in it */
3297 		if (!o->pool_mirror[vec])
3298 			continue;
3299 
3300 		/* If we've got here we are going to find a free entry */
3301 		for (idx = vec * BIT_VEC64_ELEM_SZ, i = 0;
3302 		     i < BIT_VEC64_ELEM_SZ; idx++, i++)
3303 
3304 			if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) {
3305 				/* Got one!! */
3306 				BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx);
3307 				*offset = o->base_pool_offset + idx;
3308 				return TRUE;
3309 			}
3310 	}
3311 
3312 	return FALSE;
3313 }
3314 
ecore_credit_pool_put_entry(struct ecore_credit_pool_obj * o,int offset)3315 static bool ecore_credit_pool_put_entry(struct ecore_credit_pool_obj *o,
3316 				       int offset)
3317 {
3318 	if (offset < o->base_pool_offset)
3319 		return FALSE;
3320 
3321 	offset -= o->base_pool_offset;
3322 
3323 	if (offset >= o->pool_sz)
3324 		return FALSE;
3325 
3326 	/* Return the entry to the pool */
3327 	BIT_VEC64_SET_BIT(o->pool_mirror, offset);
3328 
3329 	return TRUE;
3330 }
3331 
ecore_credit_pool_put_entry_always_TRUE(__rte_unused struct ecore_credit_pool_obj * o,__rte_unused int offset)3332 static bool ecore_credit_pool_put_entry_always_TRUE(__rte_unused struct
3333 						   ecore_credit_pool_obj *o,
3334 						   __rte_unused int offset)
3335 {
3336 	return TRUE;
3337 }
3338 
ecore_credit_pool_get_entry_always_TRUE(__rte_unused struct ecore_credit_pool_obj * o,__rte_unused int * offset)3339 static bool ecore_credit_pool_get_entry_always_TRUE(__rte_unused struct
3340 						   ecore_credit_pool_obj *o,
3341 						   __rte_unused int *offset)
3342 {
3343 	*offset = -1;
3344 	return TRUE;
3345 }
3346 
3347 /**
3348  * ecore_init_credit_pool - initialize credit pool internals.
3349  *
3350  * @p:
3351  * @base:	Base entry in the CAM to use.
3352  * @credit:	pool size.
3353  *
3354  * If base is negative no CAM entries handling will be performed.
3355  * If credit is negative pool operations will always succeed (unlimited pool).
3356  *
3357  */
ecore_init_credit_pool(struct ecore_credit_pool_obj * p,int base,int credit)3358 void ecore_init_credit_pool(struct ecore_credit_pool_obj *p,
3359 				   int base, int credit)
3360 {
3361 	/* Zero the object first */
3362 	ECORE_MEMSET(p, 0, sizeof(*p));
3363 
3364 	/* Set the table to all 1s */
3365 	ECORE_MEMSET(&p->pool_mirror, 0xff, sizeof(p->pool_mirror));
3366 
3367 	/* Init a pool as full */
3368 	ECORE_ATOMIC_SET(&p->credit, credit);
3369 
3370 	/* The total poll size */
3371 	p->pool_sz = credit;
3372 
3373 	p->base_pool_offset = base;
3374 
3375 	/* Commit the change */
3376 	ECORE_SMP_MB();
3377 
3378 	p->check = ecore_credit_pool_check;
3379 
3380 	/* if pool credit is negative - disable the checks */
3381 	if (credit >= 0) {
3382 		p->put = ecore_credit_pool_put;
3383 		p->get = ecore_credit_pool_get;
3384 		p->put_entry = ecore_credit_pool_put_entry;
3385 		p->get_entry = ecore_credit_pool_get_entry;
3386 	} else {
3387 		p->put = ecore_credit_pool_always_TRUE;
3388 		p->get = ecore_credit_pool_always_TRUE;
3389 		p->put_entry = ecore_credit_pool_put_entry_always_TRUE;
3390 		p->get_entry = ecore_credit_pool_get_entry_always_TRUE;
3391 	}
3392 
3393 	/* If base is negative - disable entries handling */
3394 	if (base < 0) {
3395 		p->put_entry = ecore_credit_pool_put_entry_always_TRUE;
3396 		p->get_entry = ecore_credit_pool_get_entry_always_TRUE;
3397 	}
3398 }
3399 
ecore_init_mac_credit_pool(struct bnx2x_softc * sc,struct ecore_credit_pool_obj * p,uint8_t func_id,uint8_t func_num)3400 void ecore_init_mac_credit_pool(struct bnx2x_softc *sc,
3401 				struct ecore_credit_pool_obj *p,
3402 				uint8_t func_id, uint8_t func_num)
3403 {
3404 
3405 #define ECORE_CAM_SIZE_EMUL 5
3406 
3407 	int cam_sz;
3408 
3409 	if (CHIP_IS_E1H(sc)) {
3410 		/* CAM credit is equally divided between all active functions
3411 		 * on the PORT!.
3412 		 */
3413 		if (func_num > 0) {
3414 			if (!CHIP_REV_IS_SLOW(sc))
3415 				cam_sz = (MAX_MAC_CREDIT_E1H / (2 * func_num));
3416 			else
3417 				cam_sz = ECORE_CAM_SIZE_EMUL;
3418 			ecore_init_credit_pool(p, func_id * cam_sz, cam_sz);
3419 		} else {
3420 			/* this should never happen! Block MAC operations. */
3421 			ecore_init_credit_pool(p, 0, 0);
3422 		}
3423 
3424 	} else {
3425 
3426 		/*
3427 		 * CAM credit is equally divided between all active functions
3428 		 * on the PATH.
3429 		 */
3430 		if (func_num > 0) {
3431 			if (!CHIP_REV_IS_SLOW(sc))
3432 				cam_sz = (MAX_MAC_CREDIT_E2 / func_num);
3433 			else
3434 				cam_sz = ECORE_CAM_SIZE_EMUL;
3435 
3436 			/* No need for CAM entries handling for 57712 and
3437 			 * newer.
3438 			 */
3439 			ecore_init_credit_pool(p, -1, cam_sz);
3440 		} else {
3441 			/* this should never happen! Block MAC operations. */
3442 			ecore_init_credit_pool(p, 0, 0);
3443 		}
3444 	}
3445 }
3446 
ecore_init_vlan_credit_pool(struct bnx2x_softc * sc,struct ecore_credit_pool_obj * p,uint8_t func_id,uint8_t func_num)3447 void ecore_init_vlan_credit_pool(struct bnx2x_softc *sc,
3448 				 struct ecore_credit_pool_obj *p,
3449 				 uint8_t func_id, uint8_t func_num)
3450 {
3451 	if (CHIP_IS_E1x(sc)) {
3452 		/* There is no VLAN credit in HW on 57711 only
3453 		 * MAC / MAC-VLAN can be set
3454 		 */
3455 		ecore_init_credit_pool(p, 0, -1);
3456 	} else {
3457 		/* CAM credit is equally divided between all active functions
3458 		 * on the PATH.
3459 		 */
3460 		if (func_num > 0) {
3461 			int credit = MAX_VLAN_CREDIT_E2 / func_num;
3462 			ecore_init_credit_pool(p, func_id * credit, credit);
3463 		} else
3464 			/* this should never happen! Block VLAN operations. */
3465 			ecore_init_credit_pool(p, 0, 0);
3466 	}
3467 }
3468 
3469 /****************** RSS Configuration ******************/
3470 
3471 /**
3472  * ecore_setup_rss - configure RSS
3473  *
3474  * @sc:		device handle
3475  * @p:		rss configuration
3476  *
3477  * sends on UPDATE ramrod for that matter.
3478  */
ecore_setup_rss(struct bnx2x_softc * sc,struct ecore_config_rss_params * p)3479 static int ecore_setup_rss(struct bnx2x_softc *sc,
3480 			   struct ecore_config_rss_params *p)
3481 {
3482 	struct ecore_rss_config_obj *o = p->rss_obj;
3483 	struct ecore_raw_obj *r = &o->raw;
3484 	struct eth_rss_update_ramrod_data *data =
3485 	    (struct eth_rss_update_ramrod_data *)(r->rdata);
3486 	uint8_t rss_mode = 0;
3487 	int rc;
3488 
3489 	ECORE_MEMSET(data, 0, sizeof(*data));
3490 
3491 	ECORE_MSG(sc, "Configuring RSS");
3492 
3493 	/* Set an echo field */
3494 	data->echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) |
3495 				       (r->state << ECORE_SWCID_SHIFT));
3496 
3497 	/* RSS mode */
3498 	if (ECORE_TEST_BIT(ECORE_RSS_MODE_DISABLED, &p->rss_flags))
3499 		rss_mode = ETH_RSS_MODE_DISABLED;
3500 	else if (ECORE_TEST_BIT(ECORE_RSS_MODE_REGULAR, &p->rss_flags))
3501 		rss_mode = ETH_RSS_MODE_REGULAR;
3502 
3503 	data->rss_mode = rss_mode;
3504 
3505 	ECORE_MSG(sc, "rss_mode=%d", rss_mode);
3506 
3507 	/* RSS capabilities */
3508 	if (ECORE_TEST_BIT(ECORE_RSS_IPV4, &p->rss_flags))
3509 		data->capabilities |=
3510 		    ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY;
3511 
3512 	if (ECORE_TEST_BIT(ECORE_RSS_IPV4_TCP, &p->rss_flags))
3513 		data->capabilities |=
3514 		    ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
3515 
3516 	if (ECORE_TEST_BIT(ECORE_RSS_IPV4_UDP, &p->rss_flags))
3517 		data->capabilities |=
3518 		    ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY;
3519 
3520 	if (ECORE_TEST_BIT(ECORE_RSS_IPV6, &p->rss_flags))
3521 		data->capabilities |=
3522 		    ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
3523 
3524 	if (ECORE_TEST_BIT(ECORE_RSS_IPV6_TCP, &p->rss_flags))
3525 		data->capabilities |=
3526 		    ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
3527 
3528 	if (ECORE_TEST_BIT(ECORE_RSS_IPV6_UDP, &p->rss_flags))
3529 		data->capabilities |=
3530 		    ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY;
3531 
3532 	/* Hashing mask */
3533 	data->rss_result_mask = p->rss_result_mask;
3534 
3535 	/* RSS engine ID */
3536 	data->rss_engine_id = o->engine_id;
3537 
3538 	ECORE_MSG(sc, "rss_engine_id=%d", data->rss_engine_id);
3539 
3540 	/* Indirection table */
3541 	ECORE_MEMCPY(data->indirection_table, p->ind_table,
3542 		     T_ETH_INDIRECTION_TABLE_SIZE);
3543 
3544 	/* Remember the last configuration */
3545 	ECORE_MEMCPY(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
3546 
3547 	/* RSS keys */
3548 	if (ECORE_TEST_BIT(ECORE_RSS_SET_SRCH, &p->rss_flags)) {
3549 		ECORE_MEMCPY(&data->rss_key[0], &p->rss_key[0],
3550 			     sizeof(data->rss_key));
3551 		data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
3552 	}
3553 
3554 	/* No need for an explicit memory barrier here as long we would
3555 	 * need to ensure the ordering of writing to the SPQ element
3556 	 * and updating of the SPQ producer which involves a memory
3557 	 * read and we will have to put a full memory barrier there
3558 	 * (inside ecore_sp_post()).
3559 	 */
3560 
3561 	/* Send a ramrod */
3562 	rc = ecore_sp_post(sc,
3563 			   RAMROD_CMD_ID_ETH_RSS_UPDATE,
3564 			   r->cid, r->rdata_mapping, ETH_CONNECTION_TYPE);
3565 
3566 	if (rc < 0)
3567 		return rc;
3568 
3569 	return ECORE_PENDING;
3570 }
3571 
ecore_config_rss(struct bnx2x_softc * sc,struct ecore_config_rss_params * p)3572 int ecore_config_rss(struct bnx2x_softc *sc, struct ecore_config_rss_params *p)
3573 {
3574 	int rc;
3575 	struct ecore_rss_config_obj *o = p->rss_obj;
3576 	struct ecore_raw_obj *r = &o->raw;
3577 
3578 	/* Do nothing if only driver cleanup was requested */
3579 	if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags))
3580 		return ECORE_SUCCESS;
3581 
3582 	r->set_pending(r);
3583 
3584 	rc = o->config_rss(sc, p);
3585 	if (rc < 0) {
3586 		r->clear_pending(r);
3587 		return rc;
3588 	}
3589 
3590 	if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags))
3591 		rc = r->wait_comp(sc, r);
3592 
3593 	return rc;
3594 }
3595 
ecore_init_rss_config_obj(struct bnx2x_softc * sc __rte_unused,struct ecore_rss_config_obj * rss_obj,uint8_t cl_id,uint32_t cid,uint8_t func_id,uint8_t engine_id,void * rdata,ecore_dma_addr_t rdata_mapping,int state,uint32_t * pstate,ecore_obj_type type)3596 void ecore_init_rss_config_obj(struct bnx2x_softc *sc __rte_unused,
3597 			       struct ecore_rss_config_obj *rss_obj,
3598 			       uint8_t cl_id, uint32_t cid, uint8_t func_id,
3599 			       uint8_t engine_id,
3600 			       void *rdata, ecore_dma_addr_t rdata_mapping,
3601 			       int state, uint32_t *pstate,
3602 			       ecore_obj_type type)
3603 {
3604 	ecore_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata,
3605 			   rdata_mapping, state, pstate, type);
3606 
3607 	rss_obj->engine_id = engine_id;
3608 	rss_obj->config_rss = ecore_setup_rss;
3609 }
3610 
3611 /********************** Queue state object ***********************************/
3612 
3613 /**
3614  * ecore_queue_state_change - perform Queue state change transition
3615  *
3616  * @sc:		device handle
3617  * @params:	parameters to perform the transition
3618  *
3619  * returns 0 in case of successfully completed transition, negative error
3620  * code in case of failure, positive (EBUSY) value if there is a completion
3621  * to that is still pending (possible only if RAMROD_COMP_WAIT is
3622  * not set in params->ramrod_flags for asynchronous commands).
3623  *
3624  */
ecore_queue_state_change(struct bnx2x_softc * sc,struct ecore_queue_state_params * params)3625 int ecore_queue_state_change(struct bnx2x_softc *sc,
3626 			     struct ecore_queue_state_params *params)
3627 {
3628 	struct ecore_queue_sp_obj *o = params->q_obj;
3629 	int rc, pending_bit;
3630 	uint32_t *pending = &o->pending;
3631 
3632 	/* Check that the requested transition is legal */
3633 	rc = o->check_transition(sc, o, params);
3634 	if (rc) {
3635 		PMD_DRV_LOG(ERR, sc, "check transition returned an error. rc %d",
3636 			    rc);
3637 		return ECORE_INVAL;
3638 	}
3639 
3640 	/* Set "pending" bit */
3641 	ECORE_MSG(sc, "pending bit was=%x", o->pending);
3642 	pending_bit = o->set_pending(o, params);
3643 	ECORE_MSG(sc, "pending bit now=%x", o->pending);
3644 
3645 	/* Don't send a command if only driver cleanup was requested */
3646 	if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags))
3647 		o->complete_cmd(sc, o, pending_bit);
3648 	else {
3649 		/* Send a ramrod */
3650 		rc = o->send_cmd(sc, params);
3651 		if (rc) {
3652 			o->next_state = ECORE_Q_STATE_MAX;
3653 			ECORE_CLEAR_BIT(pending_bit, pending);
3654 			ECORE_SMP_MB_AFTER_CLEAR_BIT();
3655 			return rc;
3656 		}
3657 
3658 		if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
3659 			rc = o->wait_comp(sc, o, pending_bit);
3660 			if (rc)
3661 				return rc;
3662 
3663 			return ECORE_SUCCESS;
3664 		}
3665 	}
3666 
3667 	return ECORE_RET_PENDING(pending_bit, pending);
3668 }
3669 
ecore_queue_set_pending(struct ecore_queue_sp_obj * obj,struct ecore_queue_state_params * params)3670 static int ecore_queue_set_pending(struct ecore_queue_sp_obj *obj,
3671 				   struct ecore_queue_state_params *params)
3672 {
3673 	enum ecore_queue_cmd cmd = params->cmd, bit;
3674 
3675 	/* ACTIVATE and DEACTIVATE commands are implemented on top of
3676 	 * UPDATE command.
3677 	 */
3678 	if ((cmd == ECORE_Q_CMD_ACTIVATE) || (cmd == ECORE_Q_CMD_DEACTIVATE))
3679 		bit = ECORE_Q_CMD_UPDATE;
3680 	else
3681 		bit = cmd;
3682 
3683 	ECORE_SET_BIT(bit, &obj->pending);
3684 	return bit;
3685 }
3686 
ecore_queue_wait_comp(struct bnx2x_softc * sc,struct ecore_queue_sp_obj * o,enum ecore_queue_cmd cmd)3687 static int ecore_queue_wait_comp(struct bnx2x_softc *sc,
3688 				 struct ecore_queue_sp_obj *o,
3689 				 enum ecore_queue_cmd cmd)
3690 {
3691 	return ecore_state_wait(sc, cmd, &o->pending);
3692 }
3693 
3694 /**
3695  * ecore_queue_comp_cmd - complete the state change command.
3696  *
3697  * @sc:		device handle
3698  * @o:
3699  * @cmd:
3700  *
3701  * Checks that the arrived completion is expected.
3702  */
ecore_queue_comp_cmd(struct bnx2x_softc * sc __rte_unused,struct ecore_queue_sp_obj * o,enum ecore_queue_cmd cmd)3703 static int ecore_queue_comp_cmd(struct bnx2x_softc *sc __rte_unused,
3704 				struct ecore_queue_sp_obj *o,
3705 				enum ecore_queue_cmd cmd)
3706 {
3707 	uint32_t cur_pending = o->pending;
3708 
3709 	if (!ECORE_TEST_AND_CLEAR_BIT(cmd, &cur_pending)) {
3710 		PMD_DRV_LOG(ERR, sc,
3711 			    "Bad MC reply %d for queue %d in state %d pending 0x%x, next_state %d",
3712 			    cmd, o->cids[ECORE_PRIMARY_CID_INDEX], o->state,
3713 			    cur_pending, o->next_state);
3714 		return ECORE_INVAL;
3715 	}
3716 
3717 	if (o->next_tx_only >= o->max_cos)
3718 		/* >= because tx only must always be smaller than cos since the
3719 		 * primary connection supports COS 0
3720 		 */
3721 		PMD_DRV_LOG(ERR, sc,
3722 			    "illegal value for next tx_only: %d. max cos was %d",
3723 			    o->next_tx_only, o->max_cos);
3724 
3725 	ECORE_MSG(sc, "Completing command %d for queue %d, setting state to %d",
3726 		  cmd, o->cids[ECORE_PRIMARY_CID_INDEX], o->next_state);
3727 
3728 	if (o->next_tx_only)	/* print num tx-only if any exist */
3729 		ECORE_MSG(sc, "primary cid %d: num tx-only cons %d",
3730 			  o->cids[ECORE_PRIMARY_CID_INDEX], o->next_tx_only);
3731 
3732 	o->state = o->next_state;
3733 	o->num_tx_only = o->next_tx_only;
3734 	o->next_state = ECORE_Q_STATE_MAX;
3735 
3736 	/* It's important that o->state and o->next_state are
3737 	 * updated before o->pending.
3738 	 */
3739 	wmb();
3740 
3741 	ECORE_CLEAR_BIT(cmd, &o->pending);
3742 	ECORE_SMP_MB_AFTER_CLEAR_BIT();
3743 
3744 	return ECORE_SUCCESS;
3745 }
3746 
ecore_q_fill_setup_data_e2(struct ecore_queue_state_params * cmd_params,struct client_init_ramrod_data * data)3747 static void ecore_q_fill_setup_data_e2(struct ecore_queue_state_params
3748 				       *cmd_params,
3749 				       struct client_init_ramrod_data *data)
3750 {
3751 	struct ecore_queue_setup_params *params = &cmd_params->params.setup;
3752 
3753 	/* Rx data */
3754 
3755 	/* IPv6 TPA supported for E2 and above only */
3756 	data->rx.tpa_en |= ECORE_TEST_BIT(ECORE_Q_FLG_TPA_IPV6,
3757 					  &params->flags) *
3758 	    CLIENT_INIT_RX_DATA_TPA_EN_IPV6;
3759 }
3760 
ecore_q_fill_init_general_data(struct bnx2x_softc * sc __rte_unused,struct ecore_queue_sp_obj * o,struct ecore_general_setup_params * params,struct client_init_general_data * gen_data,uint32_t * flags)3761 static void ecore_q_fill_init_general_data(struct bnx2x_softc *sc __rte_unused,
3762 					   struct ecore_queue_sp_obj *o,
3763 					   struct ecore_general_setup_params
3764 					   *params, struct client_init_general_data
3765 					   *gen_data, uint32_t *flags)
3766 {
3767 	gen_data->client_id = o->cl_id;
3768 
3769 	if (ECORE_TEST_BIT(ECORE_Q_FLG_STATS, flags)) {
3770 		gen_data->statistics_counter_id = params->stat_id;
3771 		gen_data->statistics_en_flg = 1;
3772 		gen_data->statistics_zero_flg =
3773 		    ECORE_TEST_BIT(ECORE_Q_FLG_ZERO_STATS, flags);
3774 	} else
3775 		gen_data->statistics_counter_id =
3776 		    DISABLE_STATISTIC_COUNTER_ID_VALUE;
3777 
3778 	gen_data->is_fcoe_flg = ECORE_TEST_BIT(ECORE_Q_FLG_FCOE, flags);
3779 	gen_data->activate_flg = ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE, flags);
3780 	gen_data->sp_client_id = params->spcl_id;
3781 	gen_data->mtu = ECORE_CPU_TO_LE16(params->mtu);
3782 	gen_data->func_id = o->func_id;
3783 
3784 	gen_data->cos = params->cos;
3785 
3786 	gen_data->traffic_type =
3787 	    ECORE_TEST_BIT(ECORE_Q_FLG_FCOE, flags) ?
3788 	    LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
3789 
3790 	ECORE_MSG(sc, "flags: active %d, cos %d, stats en %d",
3791 		  gen_data->activate_flg, gen_data->cos,
3792 		  gen_data->statistics_en_flg);
3793 }
3794 
ecore_q_fill_init_tx_data(struct ecore_txq_setup_params * params,struct client_init_tx_data * tx_data,uint32_t * flags)3795 static void ecore_q_fill_init_tx_data(struct ecore_txq_setup_params *params,
3796 				      struct client_init_tx_data *tx_data,
3797 				      uint32_t *flags)
3798 {
3799 	tx_data->enforce_security_flg =
3800 	    ECORE_TEST_BIT(ECORE_Q_FLG_TX_SEC, flags);
3801 	tx_data->default_vlan = ECORE_CPU_TO_LE16(params->default_vlan);
3802 	tx_data->default_vlan_flg = ECORE_TEST_BIT(ECORE_Q_FLG_DEF_VLAN, flags);
3803 	tx_data->tx_switching_flg =
3804 	    ECORE_TEST_BIT(ECORE_Q_FLG_TX_SWITCH, flags);
3805 	tx_data->anti_spoofing_flg =
3806 	    ECORE_TEST_BIT(ECORE_Q_FLG_ANTI_SPOOF, flags);
3807 	tx_data->force_default_pri_flg =
3808 	    ECORE_TEST_BIT(ECORE_Q_FLG_FORCE_DEFAULT_PRI, flags);
3809 	tx_data->refuse_outband_vlan_flg =
3810 	    ECORE_TEST_BIT(ECORE_Q_FLG_REFUSE_OUTBAND_VLAN, flags);
3811 	tx_data->tunnel_non_lso_pcsum_location =
3812 	    ECORE_TEST_BIT(ECORE_Q_FLG_PCSUM_ON_PKT, flags) ? CSUM_ON_PKT :
3813 	    CSUM_ON_BD;
3814 
3815 	tx_data->tx_status_block_id = params->fw_sb_id;
3816 	tx_data->tx_sb_index_number = params->sb_cq_index;
3817 	tx_data->tss_leading_client_id = params->tss_leading_cl_id;
3818 
3819 	tx_data->tx_bd_page_base.lo =
3820 	    ECORE_CPU_TO_LE32(U64_LO(params->dscr_map));
3821 	tx_data->tx_bd_page_base.hi =
3822 	    ECORE_CPU_TO_LE32(U64_HI(params->dscr_map));
3823 
3824 	/* Don't configure any Tx switching mode during queue SETUP */
3825 	tx_data->state = 0;
3826 }
3827 
ecore_q_fill_init_pause_data(struct rxq_pause_params * params,struct client_init_rx_data * rx_data)3828 static void ecore_q_fill_init_pause_data(struct rxq_pause_params *params,
3829 					 struct client_init_rx_data *rx_data)
3830 {
3831 	/* flow control data */
3832 	rx_data->cqe_pause_thr_low = ECORE_CPU_TO_LE16(params->rcq_th_lo);
3833 	rx_data->cqe_pause_thr_high = ECORE_CPU_TO_LE16(params->rcq_th_hi);
3834 	rx_data->bd_pause_thr_low = ECORE_CPU_TO_LE16(params->bd_th_lo);
3835 	rx_data->bd_pause_thr_high = ECORE_CPU_TO_LE16(params->bd_th_hi);
3836 	rx_data->sge_pause_thr_low = ECORE_CPU_TO_LE16(params->sge_th_lo);
3837 	rx_data->sge_pause_thr_high = ECORE_CPU_TO_LE16(params->sge_th_hi);
3838 	rx_data->rx_cos_mask = ECORE_CPU_TO_LE16(params->pri_map);
3839 }
3840 
ecore_q_fill_init_rx_data(struct ecore_rxq_setup_params * params,struct client_init_rx_data * rx_data,uint32_t * flags)3841 static void ecore_q_fill_init_rx_data(struct ecore_rxq_setup_params *params,
3842 				      struct client_init_rx_data *rx_data,
3843 				      uint32_t *flags)
3844 {
3845 	rx_data->tpa_en = ECORE_TEST_BIT(ECORE_Q_FLG_TPA, flags) *
3846 	    CLIENT_INIT_RX_DATA_TPA_EN_IPV4;
3847 	rx_data->tpa_en |= ECORE_TEST_BIT(ECORE_Q_FLG_TPA_GRO, flags) *
3848 	    CLIENT_INIT_RX_DATA_TPA_MODE;
3849 	rx_data->vmqueue_mode_en_flg = 0;
3850 
3851 	rx_data->extra_data_over_sgl_en_flg =
3852 	    ECORE_TEST_BIT(ECORE_Q_FLG_OOO, flags);
3853 	rx_data->cache_line_alignment_log_size = params->cache_line_log;
3854 	rx_data->enable_dynamic_hc = ECORE_TEST_BIT(ECORE_Q_FLG_DHC, flags);
3855 	rx_data->client_qzone_id = params->cl_qzone_id;
3856 	rx_data->max_agg_size = ECORE_CPU_TO_LE16(params->tpa_agg_sz);
3857 
3858 	/* Always start in DROP_ALL mode */
3859 	rx_data->state = ECORE_CPU_TO_LE16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL |
3860 					   CLIENT_INIT_RX_DATA_MCAST_DROP_ALL);
3861 
3862 	/* We don't set drop flags */
3863 	rx_data->drop_ip_cs_err_flg = 0;
3864 	rx_data->drop_tcp_cs_err_flg = 0;
3865 	rx_data->drop_ttl0_flg = 0;
3866 	rx_data->drop_udp_cs_err_flg = 0;
3867 	rx_data->inner_vlan_removal_enable_flg =
3868 	    ECORE_TEST_BIT(ECORE_Q_FLG_VLAN, flags);
3869 	rx_data->outer_vlan_removal_enable_flg =
3870 	    ECORE_TEST_BIT(ECORE_Q_FLG_OV, flags);
3871 	rx_data->status_block_id = params->fw_sb_id;
3872 	rx_data->rx_sb_index_number = params->sb_cq_index;
3873 	rx_data->max_tpa_queues = params->max_tpa_queues;
3874 	rx_data->max_bytes_on_bd = ECORE_CPU_TO_LE16(params->buf_sz);
3875 	rx_data->bd_page_base.lo = ECORE_CPU_TO_LE32(U64_LO(params->dscr_map));
3876 	rx_data->bd_page_base.hi = ECORE_CPU_TO_LE32(U64_HI(params->dscr_map));
3877 	rx_data->cqe_page_base.lo = ECORE_CPU_TO_LE32(U64_LO(params->rcq_map));
3878 	rx_data->cqe_page_base.hi = ECORE_CPU_TO_LE32(U64_HI(params->rcq_map));
3879 	rx_data->is_leading_rss = ECORE_TEST_BIT(ECORE_Q_FLG_LEADING_RSS,
3880 						 flags);
3881 
3882 	if (ECORE_TEST_BIT(ECORE_Q_FLG_MCAST, flags)) {
3883 		rx_data->approx_mcast_engine_id = params->mcast_engine_id;
3884 		rx_data->is_approx_mcast = 1;
3885 	}
3886 
3887 	rx_data->rss_engine_id = params->rss_engine_id;
3888 
3889 	/* silent vlan removal */
3890 	rx_data->silent_vlan_removal_flg =
3891 	    ECORE_TEST_BIT(ECORE_Q_FLG_SILENT_VLAN_REM, flags);
3892 	rx_data->silent_vlan_value =
3893 	    ECORE_CPU_TO_LE16(params->silent_removal_value);
3894 	rx_data->silent_vlan_mask =
3895 	    ECORE_CPU_TO_LE16(params->silent_removal_mask);
3896 }
3897 
3898 /* initialize the general, tx and rx parts of a queue object */
ecore_q_fill_setup_data_cmn(struct bnx2x_softc * sc,struct ecore_queue_state_params * cmd_params,struct client_init_ramrod_data * data)3899 static void ecore_q_fill_setup_data_cmn(struct bnx2x_softc *sc, struct ecore_queue_state_params
3900 					*cmd_params,
3901 					struct client_init_ramrod_data *data)
3902 {
3903 	ecore_q_fill_init_general_data(sc, cmd_params->q_obj,
3904 				       &cmd_params->params.setup.gen_params,
3905 				       &data->general,
3906 				       &cmd_params->params.setup.flags);
3907 
3908 	ecore_q_fill_init_tx_data(&cmd_params->params.setup.txq_params,
3909 				  &data->tx, &cmd_params->params.setup.flags);
3910 
3911 	ecore_q_fill_init_rx_data(&cmd_params->params.setup.rxq_params,
3912 				  &data->rx, &cmd_params->params.setup.flags);
3913 
3914 	ecore_q_fill_init_pause_data(&cmd_params->params.setup.pause_params,
3915 				     &data->rx);
3916 }
3917 
3918 /* initialize the general and tx parts of a tx-only queue object */
ecore_q_fill_setup_tx_only(struct bnx2x_softc * sc,struct ecore_queue_state_params * cmd_params,struct tx_queue_init_ramrod_data * data)3919 static void ecore_q_fill_setup_tx_only(struct bnx2x_softc *sc, struct ecore_queue_state_params
3920 				       *cmd_params,
3921 				       struct tx_queue_init_ramrod_data *data)
3922 {
3923 	ecore_q_fill_init_general_data(sc, cmd_params->q_obj,
3924 				       &cmd_params->params.tx_only.gen_params,
3925 				       &data->general,
3926 				       &cmd_params->params.tx_only.flags);
3927 
3928 	ecore_q_fill_init_tx_data(&cmd_params->params.tx_only.txq_params,
3929 				  &data->tx, &cmd_params->params.tx_only.flags);
3930 
3931 	ECORE_MSG(sc, "cid %d, tx bd page lo %x hi %x",
3932 		  cmd_params->q_obj->cids[0],
3933 		  data->tx.tx_bd_page_base.lo, data->tx.tx_bd_page_base.hi);
3934 }
3935 
3936 /**
3937  * ecore_q_init - init HW/FW queue
3938  *
3939  * @sc:		device handle
3940  * @params:
3941  *
3942  * HW/FW initial Queue configuration:
3943  *      - HC: Rx and Tx
3944  *      - CDU context validation
3945  *
3946  */
ecore_q_init(struct bnx2x_softc * sc,struct ecore_queue_state_params * params)3947 static int ecore_q_init(struct bnx2x_softc *sc,
3948 			struct ecore_queue_state_params *params)
3949 {
3950 	struct ecore_queue_sp_obj *o = params->q_obj;
3951 	struct ecore_queue_init_params *init = &params->params.init;
3952 	uint16_t hc_usec;
3953 	uint8_t cos;
3954 
3955 	/* Tx HC configuration */
3956 	if (ECORE_TEST_BIT(ECORE_Q_TYPE_HAS_TX, &o->type) &&
3957 	    ECORE_TEST_BIT(ECORE_Q_FLG_HC, &init->tx.flags)) {
3958 		hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0;
3959 
3960 		ECORE_UPDATE_COALESCE_SB_INDEX(sc, init->tx.fw_sb_id,
3961 					       init->tx.sb_cq_index,
3962 					       !ECORE_TEST_BIT
3963 					       (ECORE_Q_FLG_HC_EN,
3964 						&init->tx.flags), hc_usec);
3965 	}
3966 
3967 	/* Rx HC configuration */
3968 	if (ECORE_TEST_BIT(ECORE_Q_TYPE_HAS_RX, &o->type) &&
3969 	    ECORE_TEST_BIT(ECORE_Q_FLG_HC, &init->rx.flags)) {
3970 		hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0;
3971 
3972 		ECORE_UPDATE_COALESCE_SB_INDEX(sc, init->rx.fw_sb_id,
3973 					       init->rx.sb_cq_index,
3974 					       !ECORE_TEST_BIT
3975 					       (ECORE_Q_FLG_HC_EN,
3976 						&init->rx.flags), hc_usec);
3977 	}
3978 
3979 	/* Set CDU context validation values */
3980 	for (cos = 0; cos < o->max_cos; cos++) {
3981 		ECORE_MSG(sc, "setting context validation. cid %d, cos %d",
3982 			  o->cids[cos], cos);
3983 		ECORE_MSG(sc, "context pointer %p", init->cxts[cos]);
3984 		ECORE_SET_CTX_VALIDATION(sc, init->cxts[cos], o->cids[cos]);
3985 	}
3986 
3987 	/* As no ramrod is sent, complete the command immediately  */
3988 	o->complete_cmd(sc, o, ECORE_Q_CMD_INIT);
3989 
3990 	ECORE_MMIOWB();
3991 	ECORE_SMP_MB();
3992 
3993 	return ECORE_SUCCESS;
3994 }
3995 
ecore_q_send_setup_e1x(struct bnx2x_softc * sc,struct ecore_queue_state_params * params)3996 static int ecore_q_send_setup_e1x(struct bnx2x_softc *sc, struct ecore_queue_state_params
3997 				  *params)
3998 {
3999 	struct ecore_queue_sp_obj *o = params->q_obj;
4000 	struct client_init_ramrod_data *rdata =
4001 	    (struct client_init_ramrod_data *)o->rdata;
4002 	ecore_dma_addr_t data_mapping = o->rdata_mapping;
4003 	int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4004 
4005 	/* Clear the ramrod data */
4006 	ECORE_MEMSET(rdata, 0, sizeof(*rdata));
4007 
4008 	/* Fill the ramrod data */
4009 	ecore_q_fill_setup_data_cmn(sc, params, rdata);
4010 
4011 	/* No need for an explicit memory barrier here as long we would
4012 	 * need to ensure the ordering of writing to the SPQ element
4013 	 * and updating of the SPQ producer which involves a memory
4014 	 * read and we will have to put a full memory barrier there
4015 	 * (inside ecore_sp_post()).
4016 	 */
4017 
4018 	return ecore_sp_post(sc,
4019 			     ramrod,
4020 			     o->cids[ECORE_PRIMARY_CID_INDEX],
4021 			     data_mapping, ETH_CONNECTION_TYPE);
4022 }
4023 
ecore_q_send_setup_e2(struct bnx2x_softc * sc,struct ecore_queue_state_params * params)4024 static int ecore_q_send_setup_e2(struct bnx2x_softc *sc,
4025 				 struct ecore_queue_state_params *params)
4026 {
4027 	struct ecore_queue_sp_obj *o = params->q_obj;
4028 	struct client_init_ramrod_data *rdata =
4029 	    (struct client_init_ramrod_data *)o->rdata;
4030 	ecore_dma_addr_t data_mapping = o->rdata_mapping;
4031 	int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4032 
4033 	/* Clear the ramrod data */
4034 	ECORE_MEMSET(rdata, 0, sizeof(*rdata));
4035 
4036 	/* Fill the ramrod data */
4037 	ecore_q_fill_setup_data_cmn(sc, params, rdata);
4038 	ecore_q_fill_setup_data_e2(params, rdata);
4039 
4040 	/* No need for an explicit memory barrier here as long we would
4041 	 * need to ensure the ordering of writing to the SPQ element
4042 	 * and updating of the SPQ producer which involves a memory
4043 	 * read and we will have to put a full memory barrier there
4044 	 * (inside ecore_sp_post()).
4045 	 */
4046 
4047 	return ecore_sp_post(sc,
4048 			     ramrod,
4049 			     o->cids[ECORE_PRIMARY_CID_INDEX],
4050 			     data_mapping, ETH_CONNECTION_TYPE);
4051 }
4052 
ecore_q_send_setup_tx_only(struct bnx2x_softc * sc,struct ecore_queue_state_params * params)4053 static int ecore_q_send_setup_tx_only(struct bnx2x_softc *sc, struct ecore_queue_state_params
4054 				      *params)
4055 {
4056 	struct ecore_queue_sp_obj *o = params->q_obj;
4057 	struct tx_queue_init_ramrod_data *rdata =
4058 	    (struct tx_queue_init_ramrod_data *)o->rdata;
4059 	ecore_dma_addr_t data_mapping = o->rdata_mapping;
4060 	int ramrod = RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP;
4061 	struct ecore_queue_setup_tx_only_params *tx_only_params =
4062 	    &params->params.tx_only;
4063 	uint8_t cid_index = tx_only_params->cid_index;
4064 
4065 	if (ECORE_TEST_BIT(ECORE_Q_TYPE_FWD, &o->type))
4066 		ramrod = RAMROD_CMD_ID_ETH_FORWARD_SETUP;
4067 	ECORE_MSG(sc, "sending forward tx-only ramrod");
4068 
4069 	if (cid_index >= o->max_cos) {
4070 		PMD_DRV_LOG(ERR, sc, "queue[%d]: cid_index (%d) is out of range",
4071 			    o->cl_id, cid_index);
4072 		return ECORE_INVAL;
4073 	}
4074 
4075 	ECORE_MSG(sc, "parameters received: cos: %d sp-id: %d",
4076 		  tx_only_params->gen_params.cos,
4077 		  tx_only_params->gen_params.spcl_id);
4078 
4079 	/* Clear the ramrod data */
4080 	ECORE_MEMSET(rdata, 0, sizeof(*rdata));
4081 
4082 	/* Fill the ramrod data */
4083 	ecore_q_fill_setup_tx_only(sc, params, rdata);
4084 
4085 	    ECORE_MSG
4086 	    (sc, "sending tx-only ramrod: cid %d, client-id %d, sp-client id %d, cos %d",
4087 	     o->cids[cid_index], rdata->general.client_id,
4088 	     rdata->general.sp_client_id, rdata->general.cos);
4089 
4090 	/* No need for an explicit memory barrier here as long we would
4091 	 * need to ensure the ordering of writing to the SPQ element
4092 	 * and updating of the SPQ producer which involves a memory
4093 	 * read and we will have to put a full memory barrier there
4094 	 * (inside ecore_sp_post()).
4095 	 */
4096 
4097 	return ecore_sp_post(sc, ramrod, o->cids[cid_index],
4098 			     data_mapping, ETH_CONNECTION_TYPE);
4099 }
4100 
ecore_q_fill_update_data(struct ecore_queue_sp_obj * obj,struct ecore_queue_update_params * params,struct client_update_ramrod_data * data)4101 static void ecore_q_fill_update_data(struct ecore_queue_sp_obj *obj,
4102 				     struct ecore_queue_update_params *params,
4103 				     struct client_update_ramrod_data *data)
4104 {
4105 	/* Client ID of the client to update */
4106 	data->client_id = obj->cl_id;
4107 
4108 	/* Function ID of the client to update */
4109 	data->func_id = obj->func_id;
4110 
4111 	/* Default VLAN value */
4112 	data->default_vlan = ECORE_CPU_TO_LE16(params->def_vlan);
4113 
4114 	/* Inner VLAN stripping */
4115 	data->inner_vlan_removal_enable_flg =
4116 	    ECORE_TEST_BIT(ECORE_Q_UPDATE_IN_VLAN_REM, &params->update_flags);
4117 	data->inner_vlan_removal_change_flg =
4118 	    ECORE_TEST_BIT(ECORE_Q_UPDATE_IN_VLAN_REM_CHNG,
4119 			   &params->update_flags);
4120 
4121 	/* Outer VLAN stripping */
4122 	data->outer_vlan_removal_enable_flg =
4123 	    ECORE_TEST_BIT(ECORE_Q_UPDATE_OUT_VLAN_REM, &params->update_flags);
4124 	data->outer_vlan_removal_change_flg =
4125 	    ECORE_TEST_BIT(ECORE_Q_UPDATE_OUT_VLAN_REM_CHNG,
4126 			   &params->update_flags);
4127 
4128 	/* Drop packets that have source MAC that doesn't belong to this
4129 	 * Queue.
4130 	 */
4131 	data->anti_spoofing_enable_flg =
4132 	    ECORE_TEST_BIT(ECORE_Q_UPDATE_ANTI_SPOOF, &params->update_flags);
4133 	data->anti_spoofing_change_flg =
4134 	    ECORE_TEST_BIT(ECORE_Q_UPDATE_ANTI_SPOOF_CHNG,
4135 			   &params->update_flags);
4136 
4137 	/* Activate/Deactivate */
4138 	data->activate_flg =
4139 	    ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE, &params->update_flags);
4140 	data->activate_change_flg =
4141 	    ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG, &params->update_flags);
4142 
4143 	/* Enable default VLAN */
4144 	data->default_vlan_enable_flg =
4145 	    ECORE_TEST_BIT(ECORE_Q_UPDATE_DEF_VLAN_EN, &params->update_flags);
4146 	data->default_vlan_change_flg =
4147 	    ECORE_TEST_BIT(ECORE_Q_UPDATE_DEF_VLAN_EN_CHNG,
4148 			   &params->update_flags);
4149 
4150 	/* silent vlan removal */
4151 	data->silent_vlan_change_flg =
4152 	    ECORE_TEST_BIT(ECORE_Q_UPDATE_SILENT_VLAN_REM_CHNG,
4153 			   &params->update_flags);
4154 	data->silent_vlan_removal_flg =
4155 	    ECORE_TEST_BIT(ECORE_Q_UPDATE_SILENT_VLAN_REM,
4156 			   &params->update_flags);
4157 	data->silent_vlan_value =
4158 	    ECORE_CPU_TO_LE16(params->silent_removal_value);
4159 	data->silent_vlan_mask = ECORE_CPU_TO_LE16(params->silent_removal_mask);
4160 
4161 	/* tx switching */
4162 	data->tx_switching_flg =
4163 	    ECORE_TEST_BIT(ECORE_Q_UPDATE_TX_SWITCHING, &params->update_flags);
4164 	data->tx_switching_change_flg =
4165 	    ECORE_TEST_BIT(ECORE_Q_UPDATE_TX_SWITCHING_CHNG,
4166 			   &params->update_flags);
4167 }
4168 
ecore_q_send_update(struct bnx2x_softc * sc,struct ecore_queue_state_params * params)4169 static int ecore_q_send_update(struct bnx2x_softc *sc,
4170 			       struct ecore_queue_state_params *params)
4171 {
4172 	struct ecore_queue_sp_obj *o = params->q_obj;
4173 	struct client_update_ramrod_data *rdata =
4174 	    (struct client_update_ramrod_data *)o->rdata;
4175 	ecore_dma_addr_t data_mapping = o->rdata_mapping;
4176 	struct ecore_queue_update_params *update_params =
4177 	    &params->params.update;
4178 	uint8_t cid_index = update_params->cid_index;
4179 
4180 	if (cid_index >= o->max_cos) {
4181 		PMD_DRV_LOG(ERR, sc, "queue[%d]: cid_index (%d) is out of range",
4182 			    o->cl_id, cid_index);
4183 		return ECORE_INVAL;
4184 	}
4185 
4186 	/* Clear the ramrod data */
4187 	ECORE_MEMSET(rdata, 0, sizeof(*rdata));
4188 
4189 	/* Fill the ramrod data */
4190 	ecore_q_fill_update_data(o, update_params, rdata);
4191 
4192 	/* No need for an explicit memory barrier here as long we would
4193 	 * need to ensure the ordering of writing to the SPQ element
4194 	 * and updating of the SPQ producer which involves a memory
4195 	 * read and we will have to put a full memory barrier there
4196 	 * (inside ecore_sp_post()).
4197 	 */
4198 
4199 	return ecore_sp_post(sc, RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
4200 			     o->cids[cid_index], data_mapping,
4201 			     ETH_CONNECTION_TYPE);
4202 }
4203 
4204 /**
4205  * ecore_q_send_deactivate - send DEACTIVATE command
4206  *
4207  * @sc:		device handle
4208  * @params:
4209  *
4210  * implemented using the UPDATE command.
4211  */
ecore_q_send_deactivate(struct bnx2x_softc * sc,struct ecore_queue_state_params * params)4212 static int ecore_q_send_deactivate(struct bnx2x_softc *sc, struct ecore_queue_state_params
4213 				   *params)
4214 {
4215 	struct ecore_queue_update_params *update = &params->params.update;
4216 
4217 	ECORE_MEMSET(update, 0, sizeof(*update));
4218 
4219 	ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
4220 
4221 	return ecore_q_send_update(sc, params);
4222 }
4223 
4224 /**
4225  * ecore_q_send_activate - send ACTIVATE command
4226  *
4227  * @sc:		device handle
4228  * @params:
4229  *
4230  * implemented using the UPDATE command.
4231  */
ecore_q_send_activate(struct bnx2x_softc * sc,struct ecore_queue_state_params * params)4232 static int ecore_q_send_activate(struct bnx2x_softc *sc,
4233 				 struct ecore_queue_state_params *params)
4234 {
4235 	struct ecore_queue_update_params *update = &params->params.update;
4236 
4237 	ECORE_MEMSET(update, 0, sizeof(*update));
4238 
4239 	ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE, &update->update_flags);
4240 	ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
4241 
4242 	return ecore_q_send_update(sc, params);
4243 }
4244 
ecore_q_send_update_tpa(__rte_unused struct bnx2x_softc * sc,__rte_unused struct ecore_queue_state_params * params)4245 static int ecore_q_send_update_tpa(__rte_unused struct bnx2x_softc *sc,
4246 				   __rte_unused struct
4247 				   ecore_queue_state_params *params)
4248 {
4249 	/* Not implemented yet. */
4250 	return -1;
4251 }
4252 
ecore_q_send_halt(struct bnx2x_softc * sc,struct ecore_queue_state_params * params)4253 static int ecore_q_send_halt(struct bnx2x_softc *sc,
4254 			     struct ecore_queue_state_params *params)
4255 {
4256 	struct ecore_queue_sp_obj *o = params->q_obj;
4257 
4258 	/* build eth_halt_ramrod_data.client_id in a big-endian friendly way */
4259 	ecore_dma_addr_t data_mapping = 0;
4260 	data_mapping = (ecore_dma_addr_t) o->cl_id;
4261 
4262 	return ecore_sp_post(sc,
4263 			     RAMROD_CMD_ID_ETH_HALT,
4264 			     o->cids[ECORE_PRIMARY_CID_INDEX],
4265 			     data_mapping, ETH_CONNECTION_TYPE);
4266 }
4267 
ecore_q_send_cfc_del(struct bnx2x_softc * sc,struct ecore_queue_state_params * params)4268 static int ecore_q_send_cfc_del(struct bnx2x_softc *sc,
4269 				struct ecore_queue_state_params *params)
4270 {
4271 	struct ecore_queue_sp_obj *o = params->q_obj;
4272 	uint8_t cid_idx = params->params.cfc_del.cid_index;
4273 
4274 	if (cid_idx >= o->max_cos) {
4275 		PMD_DRV_LOG(ERR, sc, "queue[%d]: cid_index (%d) is out of range",
4276 			    o->cl_id, cid_idx);
4277 		return ECORE_INVAL;
4278 	}
4279 
4280 	return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_CFC_DEL,
4281 			     o->cids[cid_idx], 0, NONE_CONNECTION_TYPE);
4282 }
4283 
ecore_q_send_terminate(struct bnx2x_softc * sc,struct ecore_queue_state_params * params)4284 static int ecore_q_send_terminate(struct bnx2x_softc *sc, struct ecore_queue_state_params
4285 				  *params)
4286 {
4287 	struct ecore_queue_sp_obj *o = params->q_obj;
4288 	uint8_t cid_index = params->params.terminate.cid_index;
4289 
4290 	if (cid_index >= o->max_cos) {
4291 		PMD_DRV_LOG(ERR, sc, "queue[%d]: cid_index (%d) is out of range",
4292 			    o->cl_id, cid_index);
4293 		return ECORE_INVAL;
4294 	}
4295 
4296 	return ecore_sp_post(sc, RAMROD_CMD_ID_ETH_TERMINATE,
4297 			     o->cids[cid_index], 0, ETH_CONNECTION_TYPE);
4298 }
4299 
ecore_q_send_empty(struct bnx2x_softc * sc,struct ecore_queue_state_params * params)4300 static int ecore_q_send_empty(struct bnx2x_softc *sc,
4301 			      struct ecore_queue_state_params *params)
4302 {
4303 	struct ecore_queue_sp_obj *o = params->q_obj;
4304 
4305 	return ecore_sp_post(sc, RAMROD_CMD_ID_ETH_EMPTY,
4306 			     o->cids[ECORE_PRIMARY_CID_INDEX], 0,
4307 			     ETH_CONNECTION_TYPE);
4308 }
4309 
ecore_queue_send_cmd_cmn(struct bnx2x_softc * sc,struct ecore_queue_state_params * params)4310 static int ecore_queue_send_cmd_cmn(struct bnx2x_softc *sc, struct ecore_queue_state_params
4311 				    *params)
4312 {
4313 	switch (params->cmd) {
4314 	case ECORE_Q_CMD_INIT:
4315 		return ecore_q_init(sc, params);
4316 	case ECORE_Q_CMD_SETUP_TX_ONLY:
4317 		return ecore_q_send_setup_tx_only(sc, params);
4318 	case ECORE_Q_CMD_DEACTIVATE:
4319 		return ecore_q_send_deactivate(sc, params);
4320 	case ECORE_Q_CMD_ACTIVATE:
4321 		return ecore_q_send_activate(sc, params);
4322 	case ECORE_Q_CMD_UPDATE:
4323 		return ecore_q_send_update(sc, params);
4324 	case ECORE_Q_CMD_UPDATE_TPA:
4325 		return ecore_q_send_update_tpa(sc, params);
4326 	case ECORE_Q_CMD_HALT:
4327 		return ecore_q_send_halt(sc, params);
4328 	case ECORE_Q_CMD_CFC_DEL:
4329 		return ecore_q_send_cfc_del(sc, params);
4330 	case ECORE_Q_CMD_TERMINATE:
4331 		return ecore_q_send_terminate(sc, params);
4332 	case ECORE_Q_CMD_EMPTY:
4333 		return ecore_q_send_empty(sc, params);
4334 	default:
4335 		PMD_DRV_LOG(ERR, sc, "Unknown command: %d", params->cmd);
4336 		return ECORE_INVAL;
4337 	}
4338 }
4339 
ecore_queue_send_cmd_e1x(struct bnx2x_softc * sc,struct ecore_queue_state_params * params)4340 static int ecore_queue_send_cmd_e1x(struct bnx2x_softc *sc,
4341 				    struct ecore_queue_state_params *params)
4342 {
4343 	switch (params->cmd) {
4344 	case ECORE_Q_CMD_SETUP:
4345 		return ecore_q_send_setup_e1x(sc, params);
4346 	case ECORE_Q_CMD_INIT:
4347 	case ECORE_Q_CMD_SETUP_TX_ONLY:
4348 	case ECORE_Q_CMD_DEACTIVATE:
4349 	case ECORE_Q_CMD_ACTIVATE:
4350 	case ECORE_Q_CMD_UPDATE:
4351 	case ECORE_Q_CMD_UPDATE_TPA:
4352 	case ECORE_Q_CMD_HALT:
4353 	case ECORE_Q_CMD_CFC_DEL:
4354 	case ECORE_Q_CMD_TERMINATE:
4355 	case ECORE_Q_CMD_EMPTY:
4356 		return ecore_queue_send_cmd_cmn(sc, params);
4357 	default:
4358 		PMD_DRV_LOG(ERR, sc, "Unknown command: %d", params->cmd);
4359 		return ECORE_INVAL;
4360 	}
4361 }
4362 
ecore_queue_send_cmd_e2(struct bnx2x_softc * sc,struct ecore_queue_state_params * params)4363 static int ecore_queue_send_cmd_e2(struct bnx2x_softc *sc,
4364 				   struct ecore_queue_state_params *params)
4365 {
4366 	switch (params->cmd) {
4367 	case ECORE_Q_CMD_SETUP:
4368 		return ecore_q_send_setup_e2(sc, params);
4369 	case ECORE_Q_CMD_INIT:
4370 	case ECORE_Q_CMD_SETUP_TX_ONLY:
4371 	case ECORE_Q_CMD_DEACTIVATE:
4372 	case ECORE_Q_CMD_ACTIVATE:
4373 	case ECORE_Q_CMD_UPDATE:
4374 	case ECORE_Q_CMD_UPDATE_TPA:
4375 	case ECORE_Q_CMD_HALT:
4376 	case ECORE_Q_CMD_CFC_DEL:
4377 	case ECORE_Q_CMD_TERMINATE:
4378 	case ECORE_Q_CMD_EMPTY:
4379 		return ecore_queue_send_cmd_cmn(sc, params);
4380 	default:
4381 		PMD_DRV_LOG(ERR, sc, "Unknown command: %d", params->cmd);
4382 		return ECORE_INVAL;
4383 	}
4384 }
4385 
4386 /**
4387  * ecore_queue_chk_transition - check state machine of a regular Queue
4388  *
4389  * @sc:		device handle
4390  * @o:
4391  * @params:
4392  *
4393  * (not Forwarding)
4394  * It both checks if the requested command is legal in a current
4395  * state and, if it's legal, sets a `next_state' in the object
4396  * that will be used in the completion flow to set the `state'
4397  * of the object.
4398  *
4399  * returns 0 if a requested command is a legal transition,
4400  *         ECORE_INVAL otherwise.
4401  */
ecore_queue_chk_transition(struct bnx2x_softc * sc __rte_unused,struct ecore_queue_sp_obj * o,struct ecore_queue_state_params * params)4402 static int ecore_queue_chk_transition(struct bnx2x_softc *sc __rte_unused,
4403 				      struct ecore_queue_sp_obj *o,
4404 				      struct ecore_queue_state_params *params)
4405 {
4406 	enum ecore_q_state state = o->state, next_state = ECORE_Q_STATE_MAX;
4407 	enum ecore_queue_cmd cmd = params->cmd;
4408 	struct ecore_queue_update_params *update_params =
4409 	    &params->params.update;
4410 	uint8_t next_tx_only = o->num_tx_only;
4411 
4412 	/* Forget all pending for completion commands if a driver only state
4413 	 * transition has been requested.
4414 	 */
4415 	if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
4416 		o->pending = 0;
4417 		o->next_state = ECORE_Q_STATE_MAX;
4418 	}
4419 
4420 	/* Don't allow a next state transition if we are in the middle of
4421 	 * the previous one.
4422 	 */
4423 	if (o->pending) {
4424 		PMD_DRV_LOG(ERR, sc, "Blocking transition since pending was %x",
4425 			    o->pending);
4426 		return ECORE_BUSY;
4427 	}
4428 
4429 	switch (state) {
4430 	case ECORE_Q_STATE_RESET:
4431 		if (cmd == ECORE_Q_CMD_INIT)
4432 			next_state = ECORE_Q_STATE_INITIALIZED;
4433 
4434 		break;
4435 	case ECORE_Q_STATE_INITIALIZED:
4436 		if (cmd == ECORE_Q_CMD_SETUP) {
4437 			if (ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE,
4438 					   &params->params.setup.flags))
4439 				next_state = ECORE_Q_STATE_ACTIVE;
4440 			else
4441 				next_state = ECORE_Q_STATE_INACTIVE;
4442 		}
4443 
4444 		break;
4445 	case ECORE_Q_STATE_ACTIVE:
4446 		if (cmd == ECORE_Q_CMD_DEACTIVATE)
4447 			next_state = ECORE_Q_STATE_INACTIVE;
4448 
4449 		else if ((cmd == ECORE_Q_CMD_EMPTY) ||
4450 			 (cmd == ECORE_Q_CMD_UPDATE_TPA))
4451 			next_state = ECORE_Q_STATE_ACTIVE;
4452 
4453 		else if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) {
4454 			next_state = ECORE_Q_STATE_MULTI_COS;
4455 			next_tx_only = 1;
4456 		}
4457 
4458 		else if (cmd == ECORE_Q_CMD_HALT)
4459 			next_state = ECORE_Q_STATE_STOPPED;
4460 
4461 		else if (cmd == ECORE_Q_CMD_UPDATE) {
4462 			/* If "active" state change is requested, update the
4463 			 *  state accordingly.
4464 			 */
4465 			if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG,
4466 					   &update_params->update_flags) &&
4467 			    !ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE,
4468 					    &update_params->update_flags))
4469 				next_state = ECORE_Q_STATE_INACTIVE;
4470 			else
4471 				next_state = ECORE_Q_STATE_ACTIVE;
4472 		}
4473 
4474 		break;
4475 	case ECORE_Q_STATE_MULTI_COS:
4476 		if (cmd == ECORE_Q_CMD_TERMINATE)
4477 			next_state = ECORE_Q_STATE_MCOS_TERMINATED;
4478 
4479 		else if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) {
4480 			next_state = ECORE_Q_STATE_MULTI_COS;
4481 			next_tx_only = o->num_tx_only + 1;
4482 		}
4483 
4484 		else if ((cmd == ECORE_Q_CMD_EMPTY) ||
4485 			 (cmd == ECORE_Q_CMD_UPDATE_TPA))
4486 			next_state = ECORE_Q_STATE_MULTI_COS;
4487 
4488 		else if (cmd == ECORE_Q_CMD_UPDATE) {
4489 			/* If "active" state change is requested, update the
4490 			 *  state accordingly.
4491 			 */
4492 			if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG,
4493 					   &update_params->update_flags) &&
4494 			    !ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE,
4495 					    &update_params->update_flags))
4496 				next_state = ECORE_Q_STATE_INACTIVE;
4497 			else
4498 				next_state = ECORE_Q_STATE_MULTI_COS;
4499 		}
4500 
4501 		break;
4502 	case ECORE_Q_STATE_MCOS_TERMINATED:
4503 		if (cmd == ECORE_Q_CMD_CFC_DEL) {
4504 			next_tx_only = o->num_tx_only - 1;
4505 			if (next_tx_only == 0)
4506 				next_state = ECORE_Q_STATE_ACTIVE;
4507 			else
4508 				next_state = ECORE_Q_STATE_MULTI_COS;
4509 		}
4510 
4511 		break;
4512 	case ECORE_Q_STATE_INACTIVE:
4513 		if (cmd == ECORE_Q_CMD_ACTIVATE)
4514 			next_state = ECORE_Q_STATE_ACTIVE;
4515 
4516 		else if ((cmd == ECORE_Q_CMD_EMPTY) ||
4517 			 (cmd == ECORE_Q_CMD_UPDATE_TPA))
4518 			next_state = ECORE_Q_STATE_INACTIVE;
4519 
4520 		else if (cmd == ECORE_Q_CMD_HALT)
4521 			next_state = ECORE_Q_STATE_STOPPED;
4522 
4523 		else if (cmd == ECORE_Q_CMD_UPDATE) {
4524 			/* If "active" state change is requested, update the
4525 			 * state accordingly.
4526 			 */
4527 			if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG,
4528 					   &update_params->update_flags) &&
4529 			    ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE,
4530 					   &update_params->update_flags)) {
4531 				if (o->num_tx_only == 0)
4532 					next_state = ECORE_Q_STATE_ACTIVE;
4533 				else	/* tx only queues exist for this queue */
4534 					next_state = ECORE_Q_STATE_MULTI_COS;
4535 			} else
4536 				next_state = ECORE_Q_STATE_INACTIVE;
4537 		}
4538 
4539 		break;
4540 	case ECORE_Q_STATE_STOPPED:
4541 		if (cmd == ECORE_Q_CMD_TERMINATE)
4542 			next_state = ECORE_Q_STATE_TERMINATED;
4543 
4544 		break;
4545 	case ECORE_Q_STATE_TERMINATED:
4546 		if (cmd == ECORE_Q_CMD_CFC_DEL)
4547 			next_state = ECORE_Q_STATE_RESET;
4548 
4549 		break;
4550 	default:
4551 		PMD_DRV_LOG(ERR, sc, "Illegal state: %d", state);
4552 	}
4553 
4554 	/* Transition is assured */
4555 	if (next_state != ECORE_Q_STATE_MAX) {
4556 		ECORE_MSG(sc, "Good state transition: %d(%d)->%d",
4557 			  state, cmd, next_state);
4558 		o->next_state = next_state;
4559 		o->next_tx_only = next_tx_only;
4560 		return ECORE_SUCCESS;
4561 	}
4562 
4563 	ECORE_MSG(sc, "Bad state transition request: %d %d", state, cmd);
4564 
4565 	return ECORE_INVAL;
4566 }
4567 
4568 /**
4569  * ecore_queue_chk_fwd_transition - check state machine of a Forwarding Queue.
4570  *
4571  * @sc:		device handle
4572  * @o:
4573  * @params:
4574  *
4575  * It both checks if the requested command is legal in a current
4576  * state and, if it's legal, sets a `next_state' in the object
4577  * that will be used in the completion flow to set the `state'
4578  * of the object.
4579  *
4580  * returns 0 if a requested command is a legal transition,
4581  *         ECORE_INVAL otherwise.
4582  */
ecore_queue_chk_fwd_transition(struct bnx2x_softc * sc __rte_unused,struct ecore_queue_sp_obj * o,struct ecore_queue_state_params * params)4583 static int ecore_queue_chk_fwd_transition(struct bnx2x_softc *sc __rte_unused,
4584 					  struct ecore_queue_sp_obj *o,
4585 					  struct ecore_queue_state_params
4586 					  *params)
4587 {
4588 	enum ecore_q_state state = o->state, next_state = ECORE_Q_STATE_MAX;
4589 	enum ecore_queue_cmd cmd = params->cmd;
4590 
4591 	switch (state) {
4592 	case ECORE_Q_STATE_RESET:
4593 		if (cmd == ECORE_Q_CMD_INIT)
4594 			next_state = ECORE_Q_STATE_INITIALIZED;
4595 
4596 		break;
4597 	case ECORE_Q_STATE_INITIALIZED:
4598 		if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) {
4599 			if (ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE,
4600 					   &params->params.tx_only.flags))
4601 				next_state = ECORE_Q_STATE_ACTIVE;
4602 			else
4603 				next_state = ECORE_Q_STATE_INACTIVE;
4604 		}
4605 
4606 		break;
4607 	case ECORE_Q_STATE_ACTIVE:
4608 	case ECORE_Q_STATE_INACTIVE:
4609 		if (cmd == ECORE_Q_CMD_CFC_DEL)
4610 			next_state = ECORE_Q_STATE_RESET;
4611 
4612 		break;
4613 	default:
4614 		PMD_DRV_LOG(ERR, sc, "Illegal state: %d", state);
4615 	}
4616 
4617 	/* Transition is assured */
4618 	if (next_state != ECORE_Q_STATE_MAX) {
4619 		ECORE_MSG(sc, "Good state transition: %d(%d)->%d",
4620 			  state, cmd, next_state);
4621 		o->next_state = next_state;
4622 		return ECORE_SUCCESS;
4623 	}
4624 
4625 	ECORE_MSG(sc, "Bad state transition request: %d %d", state, cmd);
4626 	return ECORE_INVAL;
4627 }
4628 
ecore_init_queue_obj(struct bnx2x_softc * sc,struct ecore_queue_sp_obj * obj,uint8_t cl_id,uint32_t * cids,uint8_t cid_cnt,uint8_t func_id,void * rdata,ecore_dma_addr_t rdata_mapping,uint32_t type)4629 void ecore_init_queue_obj(struct bnx2x_softc *sc,
4630 			  struct ecore_queue_sp_obj *obj,
4631 			  uint8_t cl_id, uint32_t * cids, uint8_t cid_cnt,
4632 			  uint8_t func_id, void *rdata,
4633 			  ecore_dma_addr_t rdata_mapping, uint32_t type)
4634 {
4635 	ECORE_MEMSET(obj, 0, sizeof(*obj));
4636 
4637 	/* We support only ECORE_MULTI_TX_COS Tx CoS at the moment */
4638 	ECORE_BUG_ON(ECORE_MULTI_TX_COS < cid_cnt);
4639 
4640 	rte_memcpy(obj->cids, cids, sizeof(obj->cids[0]) * cid_cnt);
4641 	obj->max_cos = cid_cnt;
4642 	obj->cl_id = cl_id;
4643 	obj->func_id = func_id;
4644 	obj->rdata = rdata;
4645 	obj->rdata_mapping = rdata_mapping;
4646 	obj->type = type;
4647 	obj->next_state = ECORE_Q_STATE_MAX;
4648 
4649 	if (CHIP_IS_E1x(sc))
4650 		obj->send_cmd = ecore_queue_send_cmd_e1x;
4651 	else
4652 		obj->send_cmd = ecore_queue_send_cmd_e2;
4653 
4654 	if (ECORE_TEST_BIT(ECORE_Q_TYPE_FWD, &type))
4655 		obj->check_transition = ecore_queue_chk_fwd_transition;
4656 	else
4657 		obj->check_transition = ecore_queue_chk_transition;
4658 
4659 	obj->complete_cmd = ecore_queue_comp_cmd;
4660 	obj->wait_comp = ecore_queue_wait_comp;
4661 	obj->set_pending = ecore_queue_set_pending;
4662 }
4663 
4664 /********************** Function state object *********************************/
ecore_func_get_state(__rte_unused struct bnx2x_softc * sc,struct ecore_func_sp_obj * o)4665 enum ecore_func_state ecore_func_get_state(__rte_unused struct bnx2x_softc *sc,
4666 					   struct ecore_func_sp_obj *o)
4667 {
4668 	/* in the middle of transaction - return INVALID state */
4669 	if (o->pending)
4670 		return ECORE_F_STATE_MAX;
4671 
4672 	/* unsure the order of reading of o->pending and o->state
4673 	 * o->pending should be read first
4674 	 */
4675 	rmb();
4676 
4677 	return o->state;
4678 }
4679 
ecore_func_wait_comp(struct bnx2x_softc * sc,struct ecore_func_sp_obj * o,enum ecore_func_cmd cmd)4680 static int ecore_func_wait_comp(struct bnx2x_softc *sc,
4681 				struct ecore_func_sp_obj *o,
4682 				enum ecore_func_cmd cmd)
4683 {
4684 	return ecore_state_wait(sc, cmd, &o->pending);
4685 }
4686 
4687 /**
4688  * ecore_func_state_change_comp - complete the state machine transition
4689  *
4690  * @sc:		device handle
4691  * @o:
4692  * @cmd:
4693  *
4694  * Called on state change transition. Completes the state
4695  * machine transition only - no HW interaction.
4696  */
4697 static int
ecore_func_state_change_comp(struct bnx2x_softc * sc __rte_unused,struct ecore_func_sp_obj * o,enum ecore_func_cmd cmd)4698 ecore_func_state_change_comp(struct bnx2x_softc *sc __rte_unused,
4699 			     struct ecore_func_sp_obj *o,
4700 			     enum ecore_func_cmd cmd)
4701 {
4702 	uint32_t cur_pending = o->pending;
4703 
4704 	if (!ECORE_TEST_AND_CLEAR_BIT(cmd, &cur_pending)) {
4705 		PMD_DRV_LOG(ERR, sc,
4706 			    "Bad MC reply %d for func %d in state %d pending 0x%x, next_state %d",
4707 			    cmd, ECORE_FUNC_ID(sc), o->state, cur_pending,
4708 			    o->next_state);
4709 		return ECORE_INVAL;
4710 	}
4711 
4712 	ECORE_MSG(sc, "Completing command %d for func %d, setting state to %d",
4713 		  cmd, ECORE_FUNC_ID(sc), o->next_state);
4714 
4715 	o->state = o->next_state;
4716 	o->next_state = ECORE_F_STATE_MAX;
4717 
4718 	/* It's important that o->state and o->next_state are
4719 	 * updated before o->pending.
4720 	 */
4721 	wmb();
4722 
4723 	ECORE_CLEAR_BIT(cmd, &o->pending);
4724 	ECORE_SMP_MB_AFTER_CLEAR_BIT();
4725 
4726 	return ECORE_SUCCESS;
4727 }
4728 
4729 /**
4730  * ecore_func_comp_cmd - complete the state change command
4731  *
4732  * @sc:		device handle
4733  * @o:
4734  * @cmd:
4735  *
4736  * Checks that the arrived completion is expected.
4737  */
ecore_func_comp_cmd(struct bnx2x_softc * sc,struct ecore_func_sp_obj * o,enum ecore_func_cmd cmd)4738 static int ecore_func_comp_cmd(struct bnx2x_softc *sc,
4739 			       struct ecore_func_sp_obj *o,
4740 			       enum ecore_func_cmd cmd)
4741 {
4742 	/* Complete the state machine part first, check if it's a
4743 	 * legal completion.
4744 	 */
4745 	int rc = ecore_func_state_change_comp(sc, o, cmd);
4746 	return rc;
4747 }
4748 
4749 /**
4750  * ecore_func_chk_transition - perform function state machine transition
4751  *
4752  * @sc:		device handle
4753  * @o:
4754  * @params:
4755  *
4756  * It both checks if the requested command is legal in a current
4757  * state and, if it's legal, sets a `next_state' in the object
4758  * that will be used in the completion flow to set the `state'
4759  * of the object.
4760  *
4761  * returns 0 if a requested command is a legal transition,
4762  *         ECORE_INVAL otherwise.
4763  */
ecore_func_chk_transition(struct bnx2x_softc * sc __rte_unused,struct ecore_func_sp_obj * o,struct ecore_func_state_params * params)4764 static int ecore_func_chk_transition(struct bnx2x_softc *sc __rte_unused,
4765 				     struct ecore_func_sp_obj *o,
4766 				     struct ecore_func_state_params *params)
4767 {
4768 	enum ecore_func_state state = o->state, next_state = ECORE_F_STATE_MAX;
4769 	enum ecore_func_cmd cmd = params->cmd;
4770 
4771 	/* Forget all pending for completion commands if a driver only state
4772 	 * transition has been requested.
4773 	 */
4774 	if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
4775 		o->pending = 0;
4776 		o->next_state = ECORE_F_STATE_MAX;
4777 	}
4778 
4779 	/* Don't allow a next state transition if we are in the middle of
4780 	 * the previous one.
4781 	 */
4782 	if (o->pending)
4783 		return ECORE_BUSY;
4784 
4785 	switch (state) {
4786 	case ECORE_F_STATE_RESET:
4787 		if (cmd == ECORE_F_CMD_HW_INIT)
4788 			next_state = ECORE_F_STATE_INITIALIZED;
4789 
4790 		break;
4791 	case ECORE_F_STATE_INITIALIZED:
4792 		if (cmd == ECORE_F_CMD_START)
4793 			next_state = ECORE_F_STATE_STARTED;
4794 
4795 		else if (cmd == ECORE_F_CMD_HW_RESET)
4796 			next_state = ECORE_F_STATE_RESET;
4797 
4798 		break;
4799 	case ECORE_F_STATE_STARTED:
4800 		if (cmd == ECORE_F_CMD_STOP)
4801 			next_state = ECORE_F_STATE_INITIALIZED;
4802 		/* afex ramrods can be sent only in started mode, and only
4803 		 * if not pending for function_stop ramrod completion
4804 		 * for these events - next state remained STARTED.
4805 		 */
4806 		else if ((cmd == ECORE_F_CMD_AFEX_UPDATE) &&
4807 			 (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
4808 			next_state = ECORE_F_STATE_STARTED;
4809 
4810 		else if ((cmd == ECORE_F_CMD_AFEX_VIFLISTS) &&
4811 			 (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
4812 			next_state = ECORE_F_STATE_STARTED;
4813 
4814 		/* Switch_update ramrod can be sent in either started or
4815 		 * tx_stopped state, and it doesn't change the state.
4816 		 */
4817 		else if ((cmd == ECORE_F_CMD_SWITCH_UPDATE) &&
4818 			 (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
4819 			next_state = ECORE_F_STATE_STARTED;
4820 
4821 		else if (cmd == ECORE_F_CMD_TX_STOP)
4822 			next_state = ECORE_F_STATE_TX_STOPPED;
4823 
4824 		break;
4825 	case ECORE_F_STATE_TX_STOPPED:
4826 		if ((cmd == ECORE_F_CMD_SWITCH_UPDATE) &&
4827 		    (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
4828 			next_state = ECORE_F_STATE_TX_STOPPED;
4829 
4830 		else if (cmd == ECORE_F_CMD_TX_START)
4831 			next_state = ECORE_F_STATE_STARTED;
4832 
4833 		break;
4834 	default:
4835 		PMD_DRV_LOG(ERR, sc, "Unknown state: %d", state);
4836 	}
4837 
4838 	/* Transition is assured */
4839 	if (next_state != ECORE_F_STATE_MAX) {
4840 		ECORE_MSG(sc, "Good function state transition: %d(%d)->%d",
4841 			  state, cmd, next_state);
4842 		o->next_state = next_state;
4843 		return ECORE_SUCCESS;
4844 	}
4845 
4846 	ECORE_MSG(sc,
4847 		  "Bad function state transition request: %d %d", state, cmd);
4848 
4849 	return ECORE_INVAL;
4850 }
4851 
4852 /**
4853  * ecore_func_init_func - performs HW init at function stage
4854  *
4855  * @sc:		device handle
4856  * @drv:
4857  *
4858  * Init HW when the current phase is
4859  * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only
4860  * HW blocks.
4861  */
ecore_func_init_func(struct bnx2x_softc * sc,const struct ecore_func_sp_drv_ops * drv)4862 static int ecore_func_init_func(struct bnx2x_softc *sc,
4863 				const struct ecore_func_sp_drv_ops *drv)
4864 {
4865 	return drv->init_hw_func(sc);
4866 }
4867 
4868 /**
4869  * ecore_func_init_port - performs HW init at port stage
4870  *
4871  * @sc:		device handle
4872  * @drv:
4873  *
4874  * Init HW when the current phase is
4875  * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and
4876  * FUNCTION-only HW blocks.
4877  *
4878  */
ecore_func_init_port(struct bnx2x_softc * sc,const struct ecore_func_sp_drv_ops * drv)4879 static int ecore_func_init_port(struct bnx2x_softc *sc,
4880 				const struct ecore_func_sp_drv_ops *drv)
4881 {
4882 	int rc = drv->init_hw_port(sc);
4883 	if (rc)
4884 		return rc;
4885 
4886 	return ecore_func_init_func(sc, drv);
4887 }
4888 
4889 /**
4890  * ecore_func_init_cmn_chip - performs HW init at chip-common stage
4891  *
4892  * @sc:		device handle
4893  * @drv:
4894  *
4895  * Init HW when the current phase is
4896  * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP,
4897  * PORT-only and FUNCTION-only HW blocks.
4898  */
ecore_func_init_cmn_chip(struct bnx2x_softc * sc,const struct ecore_func_sp_drv_ops * drv)4899 static int ecore_func_init_cmn_chip(struct bnx2x_softc *sc, const struct ecore_func_sp_drv_ops
4900 				    *drv)
4901 {
4902 	int rc = drv->init_hw_cmn_chip(sc);
4903 	if (rc)
4904 		return rc;
4905 
4906 	return ecore_func_init_port(sc, drv);
4907 }
4908 
4909 /**
4910  * ecore_func_init_cmn - performs HW init at common stage
4911  *
4912  * @sc:		device handle
4913  * @drv:
4914  *
4915  * Init HW when the current phase is
4916  * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON,
4917  * PORT-only and FUNCTION-only HW blocks.
4918  */
ecore_func_init_cmn(struct bnx2x_softc * sc,const struct ecore_func_sp_drv_ops * drv)4919 static int ecore_func_init_cmn(struct bnx2x_softc *sc,
4920 			       const struct ecore_func_sp_drv_ops *drv)
4921 {
4922 	int rc = drv->init_hw_cmn(sc);
4923 	if (rc)
4924 		return rc;
4925 
4926 	return ecore_func_init_port(sc, drv);
4927 }
4928 
ecore_func_hw_init(struct bnx2x_softc * sc,struct ecore_func_state_params * params)4929 static int ecore_func_hw_init(struct bnx2x_softc *sc,
4930 			      struct ecore_func_state_params *params)
4931 {
4932 	uint32_t load_code = params->params.hw_init.load_phase;
4933 	struct ecore_func_sp_obj *o = params->f_obj;
4934 	const struct ecore_func_sp_drv_ops *drv = o->drv;
4935 	int rc = 0;
4936 
4937 	ECORE_MSG(sc, "function %d  load_code %x",
4938 		  ECORE_ABS_FUNC_ID(sc), load_code);
4939 
4940 	/* Prepare FW */
4941 	rc = drv->init_fw(sc);
4942 	if (rc) {
4943 		PMD_DRV_LOG(ERR, sc, "Error loading firmware");
4944 		goto init_err;
4945 	}
4946 
4947 	/* Handle the beginning of COMMON_XXX pases separately... */
4948 	switch (load_code) {
4949 	case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
4950 		rc = ecore_func_init_cmn_chip(sc, drv);
4951 		if (rc)
4952 			goto init_err;
4953 
4954 		break;
4955 	case FW_MSG_CODE_DRV_LOAD_COMMON:
4956 		rc = ecore_func_init_cmn(sc, drv);
4957 		if (rc)
4958 			goto init_err;
4959 
4960 		break;
4961 	case FW_MSG_CODE_DRV_LOAD_PORT:
4962 		rc = ecore_func_init_port(sc, drv);
4963 		if (rc)
4964 			goto init_err;
4965 
4966 		break;
4967 	case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4968 		rc = ecore_func_init_func(sc, drv);
4969 		if (rc)
4970 			goto init_err;
4971 
4972 		break;
4973 	default:
4974 		PMD_DRV_LOG(ERR, sc, "Unknown load_code (0x%x) from MCP",
4975 			    load_code);
4976 		rc = ECORE_INVAL;
4977 	}
4978 
4979 init_err:
4980 	/* In case of success, complete the command immediately: no ramrods
4981 	 * have been sent.
4982 	 */
4983 	if (!rc)
4984 		o->complete_cmd(sc, o, ECORE_F_CMD_HW_INIT);
4985 
4986 	return rc;
4987 }
4988 
4989 /**
4990  * ecore_func_reset_func - reset HW at function stage
4991  *
4992  * @sc:		device handle
4993  * @drv:
4994  *
4995  * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only
4996  * FUNCTION-only HW blocks.
4997  */
ecore_func_reset_func(struct bnx2x_softc * sc,const struct ecore_func_sp_drv_ops * drv)4998 static void ecore_func_reset_func(struct bnx2x_softc *sc, const struct ecore_func_sp_drv_ops
4999 				  *drv)
5000 {
5001 	drv->reset_hw_func(sc);
5002 }
5003 
5004 /**
5005  * ecore_func_reset_port - reser HW at port stage
5006  *
5007  * @sc:		device handle
5008  * @drv:
5009  *
5010  * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset
5011  * FUNCTION-only and PORT-only HW blocks.
5012  *
5013  *                 !!!IMPORTANT!!!
5014  *
5015  * It's important to call reset_port before reset_func() as the last thing
5016  * reset_func does is pf_disable() thus disabling PGLUE_B, which
5017  * makes impossible any DMAE transactions.
5018  */
ecore_func_reset_port(struct bnx2x_softc * sc,const struct ecore_func_sp_drv_ops * drv)5019 static void ecore_func_reset_port(struct bnx2x_softc *sc, const struct ecore_func_sp_drv_ops
5020 				  *drv)
5021 {
5022 	drv->reset_hw_port(sc);
5023 	ecore_func_reset_func(sc, drv);
5024 }
5025 
5026 /**
5027  * ecore_func_reset_cmn - reser HW at common stage
5028  *
5029  * @sc:		device handle
5030  * @drv:
5031  *
5032  * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and
5033  * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON,
5034  * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks.
5035  */
ecore_func_reset_cmn(struct bnx2x_softc * sc,const struct ecore_func_sp_drv_ops * drv)5036 static void ecore_func_reset_cmn(struct bnx2x_softc *sc,
5037 				 const struct ecore_func_sp_drv_ops *drv)
5038 {
5039 	ecore_func_reset_port(sc, drv);
5040 	drv->reset_hw_cmn(sc);
5041 }
5042 
ecore_func_hw_reset(struct bnx2x_softc * sc,struct ecore_func_state_params * params)5043 static int ecore_func_hw_reset(struct bnx2x_softc *sc,
5044 			       struct ecore_func_state_params *params)
5045 {
5046 	uint32_t reset_phase = params->params.hw_reset.reset_phase;
5047 	struct ecore_func_sp_obj *o = params->f_obj;
5048 	const struct ecore_func_sp_drv_ops *drv = o->drv;
5049 
5050 	ECORE_MSG(sc, "function %d  reset_phase %x", ECORE_ABS_FUNC_ID(sc),
5051 		  reset_phase);
5052 
5053 	switch (reset_phase) {
5054 	case FW_MSG_CODE_DRV_UNLOAD_COMMON:
5055 		ecore_func_reset_cmn(sc, drv);
5056 		break;
5057 	case FW_MSG_CODE_DRV_UNLOAD_PORT:
5058 		ecore_func_reset_port(sc, drv);
5059 		break;
5060 	case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
5061 		ecore_func_reset_func(sc, drv);
5062 		break;
5063 	default:
5064 		PMD_DRV_LOG(ERR, sc, "Unknown reset_phase (0x%x) from MCP",
5065 			    reset_phase);
5066 		break;
5067 	}
5068 
5069 	/* Complete the command immediately: no ramrods have been sent. */
5070 	o->complete_cmd(sc, o, ECORE_F_CMD_HW_RESET);
5071 
5072 	return ECORE_SUCCESS;
5073 }
5074 
ecore_func_send_start(struct bnx2x_softc * sc,struct ecore_func_state_params * params)5075 static int ecore_func_send_start(struct bnx2x_softc *sc,
5076 				 struct ecore_func_state_params *params)
5077 {
5078 	struct ecore_func_sp_obj *o = params->f_obj;
5079 	struct function_start_data *rdata =
5080 	    (struct function_start_data *)o->rdata;
5081 	ecore_dma_addr_t data_mapping = o->rdata_mapping;
5082 	struct ecore_func_start_params *start_params = &params->params.start;
5083 
5084 	ECORE_MEMSET(rdata, 0, sizeof(*rdata));
5085 
5086 	/* Fill the ramrod data with provided parameters */
5087 	rdata->function_mode = (uint8_t) start_params->mf_mode;
5088 	rdata->sd_vlan_tag = ECORE_CPU_TO_LE16(start_params->sd_vlan_tag);
5089 	rdata->path_id = ECORE_PATH_ID(sc);
5090 	rdata->network_cos_mode = start_params->network_cos_mode;
5091 
5092 	/*
5093 	 *  No need for an explicit memory barrier here as long we would
5094 	 *  need to ensure the ordering of writing to the SPQ element
5095 	 *  and updating of the SPQ producer which involves a memory
5096 	 *  read and we will have to put a full memory barrier there
5097 	 *  (inside ecore_sp_post()).
5098 	 */
5099 
5100 	return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0,
5101 			     data_mapping, NONE_CONNECTION_TYPE);
5102 }
5103 
ecore_func_send_switch_update(struct bnx2x_softc * sc,struct ecore_func_state_params * params)5104 static int ecore_func_send_switch_update(struct bnx2x_softc *sc, struct ecore_func_state_params
5105 					 *params)
5106 {
5107 	struct ecore_func_sp_obj *o = params->f_obj;
5108 	struct function_update_data *rdata =
5109 	    (struct function_update_data *)o->rdata;
5110 	ecore_dma_addr_t data_mapping = o->rdata_mapping;
5111 	struct ecore_func_switch_update_params *switch_update_params =
5112 	    &params->params.switch_update;
5113 
5114 	ECORE_MEMSET(rdata, 0, sizeof(*rdata));
5115 
5116 	/* Fill the ramrod data with provided parameters */
5117 	if (ECORE_TEST_BIT(ECORE_F_UPDATE_TX_SWITCH_SUSPEND_CHNG,
5118 			   &switch_update_params->changes)) {
5119 		rdata->tx_switch_suspend_change_flg = 1;
5120 		rdata->tx_switch_suspend =
5121 			ECORE_TEST_BIT(ECORE_F_UPDATE_TX_SWITCH_SUSPEND,
5122 				       &switch_update_params->changes);
5123 	}
5124 
5125 	rdata->echo = SWITCH_UPDATE;
5126 
5127 	return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
5128 			     data_mapping, NONE_CONNECTION_TYPE);
5129 }
5130 
ecore_func_send_afex_update(struct bnx2x_softc * sc,struct ecore_func_state_params * params)5131 static int ecore_func_send_afex_update(struct bnx2x_softc *sc, struct ecore_func_state_params
5132 				       *params)
5133 {
5134 	struct ecore_func_sp_obj *o = params->f_obj;
5135 	struct function_update_data *rdata =
5136 	    (struct function_update_data *)o->afex_rdata;
5137 	ecore_dma_addr_t data_mapping = o->afex_rdata_mapping;
5138 	struct ecore_func_afex_update_params *afex_update_params =
5139 	    &params->params.afex_update;
5140 
5141 	ECORE_MEMSET(rdata, 0, sizeof(*rdata));
5142 
5143 	/* Fill the ramrod data with provided parameters */
5144 	rdata->vif_id_change_flg = 1;
5145 	rdata->vif_id = ECORE_CPU_TO_LE16(afex_update_params->vif_id);
5146 	rdata->afex_default_vlan_change_flg = 1;
5147 	rdata->afex_default_vlan =
5148 	    ECORE_CPU_TO_LE16(afex_update_params->afex_default_vlan);
5149 	rdata->allowed_priorities_change_flg = 1;
5150 	rdata->allowed_priorities = afex_update_params->allowed_priorities;
5151 	rdata->echo = AFEX_UPDATE;
5152 
5153 	/*  No need for an explicit memory barrier here as long we would
5154 	 *  need to ensure the ordering of writing to the SPQ element
5155 	 *  and updating of the SPQ producer which involves a memory
5156 	 *  read and we will have to put a full memory barrier there
5157 	 *  (inside ecore_sp_post()).
5158 	 */
5159 	ECORE_MSG(sc, "afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x",
5160 		  rdata->vif_id,
5161 		  rdata->afex_default_vlan, rdata->allowed_priorities);
5162 
5163 	return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
5164 			     data_mapping, NONE_CONNECTION_TYPE);
5165 }
5166 
5167 static
ecore_func_send_afex_viflists(struct bnx2x_softc * sc,struct ecore_func_state_params * params)5168 inline int ecore_func_send_afex_viflists(struct bnx2x_softc *sc,
5169 					 struct ecore_func_state_params *params)
5170 {
5171 	struct ecore_func_sp_obj *o = params->f_obj;
5172 	struct afex_vif_list_ramrod_data *rdata =
5173 	    (struct afex_vif_list_ramrod_data *)o->afex_rdata;
5174 	struct ecore_func_afex_viflists_params *afex_vif_params =
5175 	    &params->params.afex_viflists;
5176 	uint64_t *p_rdata = (uint64_t *) rdata;
5177 
5178 	ECORE_MEMSET(rdata, 0, sizeof(*rdata));
5179 
5180 	/* Fill the ramrod data with provided parameters */
5181 	rdata->vif_list_index =
5182 	    ECORE_CPU_TO_LE16(afex_vif_params->vif_list_index);
5183 	rdata->func_bit_map = afex_vif_params->func_bit_map;
5184 	rdata->afex_vif_list_command = afex_vif_params->afex_vif_list_command;
5185 	rdata->func_to_clear = afex_vif_params->func_to_clear;
5186 
5187 	/* send in echo type of sub command */
5188 	rdata->echo = afex_vif_params->afex_vif_list_command;
5189 
5190 	/*  No need for an explicit memory barrier here as long we would
5191 	 *  need to ensure the ordering of writing to the SPQ element
5192 	 *  and updating of the SPQ producer which involves a memory
5193 	 *  read and we will have to put a full memory barrier there
5194 	 *  (inside ecore_sp_post()).
5195 	 */
5196 
5197 	    ECORE_MSG
5198 	    (sc, "afex: ramrod lists, cmd 0x%x index 0x%x func_bit_map 0x%x func_to_clr 0x%x",
5199 	     rdata->afex_vif_list_command, rdata->vif_list_index,
5200 	     rdata->func_bit_map, rdata->func_to_clear);
5201 
5202 	/* this ramrod sends data directly and not through DMA mapping */
5203 	return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS, 0,
5204 			     *p_rdata, NONE_CONNECTION_TYPE);
5205 }
5206 
ecore_func_send_stop(struct bnx2x_softc * sc,__rte_unused struct ecore_func_state_params * params)5207 static int ecore_func_send_stop(struct bnx2x_softc *sc, __rte_unused struct
5208 				ecore_func_state_params *params)
5209 {
5210 	return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0,
5211 			     NONE_CONNECTION_TYPE);
5212 }
5213 
ecore_func_send_tx_stop(struct bnx2x_softc * sc,__rte_unused struct ecore_func_state_params * params)5214 static int ecore_func_send_tx_stop(struct bnx2x_softc *sc, __rte_unused struct
5215 				   ecore_func_state_params *params)
5216 {
5217 	return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 0, 0,
5218 			     NONE_CONNECTION_TYPE);
5219 }
5220 
ecore_func_send_tx_start(struct bnx2x_softc * sc,struct ecore_func_state_params * params)5221 static int ecore_func_send_tx_start(struct bnx2x_softc *sc, struct ecore_func_state_params
5222 				    *params)
5223 {
5224 	struct ecore_func_sp_obj *o = params->f_obj;
5225 	struct flow_control_configuration *rdata =
5226 	    (struct flow_control_configuration *)o->rdata;
5227 	ecore_dma_addr_t data_mapping = o->rdata_mapping;
5228 	struct ecore_func_tx_start_params *tx_start_params =
5229 	    &params->params.tx_start;
5230 	uint32_t i;
5231 
5232 	ECORE_MEMSET(rdata, 0, sizeof(*rdata));
5233 
5234 	rdata->dcb_enabled = tx_start_params->dcb_enabled;
5235 	rdata->dcb_version = tx_start_params->dcb_version;
5236 	rdata->dont_add_pri_0_en = tx_start_params->dont_add_pri_0_en;
5237 
5238 	for (i = 0; i < ARRAY_SIZE(rdata->traffic_type_to_priority_cos); i++)
5239 		rdata->traffic_type_to_priority_cos[i] =
5240 		    tx_start_params->traffic_type_to_priority_cos[i];
5241 
5242 	return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0,
5243 			     data_mapping, NONE_CONNECTION_TYPE);
5244 }
5245 
ecore_func_send_cmd(struct bnx2x_softc * sc,struct ecore_func_state_params * params)5246 static int ecore_func_send_cmd(struct bnx2x_softc *sc,
5247 			       struct ecore_func_state_params *params)
5248 {
5249 	switch (params->cmd) {
5250 	case ECORE_F_CMD_HW_INIT:
5251 		return ecore_func_hw_init(sc, params);
5252 	case ECORE_F_CMD_START:
5253 		return ecore_func_send_start(sc, params);
5254 	case ECORE_F_CMD_STOP:
5255 		return ecore_func_send_stop(sc, params);
5256 	case ECORE_F_CMD_HW_RESET:
5257 		return ecore_func_hw_reset(sc, params);
5258 	case ECORE_F_CMD_AFEX_UPDATE:
5259 		return ecore_func_send_afex_update(sc, params);
5260 	case ECORE_F_CMD_AFEX_VIFLISTS:
5261 		return ecore_func_send_afex_viflists(sc, params);
5262 	case ECORE_F_CMD_TX_STOP:
5263 		return ecore_func_send_tx_stop(sc, params);
5264 	case ECORE_F_CMD_TX_START:
5265 		return ecore_func_send_tx_start(sc, params);
5266 	case ECORE_F_CMD_SWITCH_UPDATE:
5267 		return ecore_func_send_switch_update(sc, params);
5268 	default:
5269 		PMD_DRV_LOG(ERR, sc, "Unknown command: %d", params->cmd);
5270 		return ECORE_INVAL;
5271 	}
5272 }
5273 
ecore_init_func_obj(__rte_unused struct bnx2x_softc * sc,struct ecore_func_sp_obj * obj,void * rdata,ecore_dma_addr_t rdata_mapping,void * afex_rdata,ecore_dma_addr_t afex_rdata_mapping,struct ecore_func_sp_drv_ops * drv_iface)5274 void ecore_init_func_obj(__rte_unused struct bnx2x_softc *sc,
5275 			 struct ecore_func_sp_obj *obj,
5276 			 void *rdata, ecore_dma_addr_t rdata_mapping,
5277 			 void *afex_rdata, ecore_dma_addr_t afex_rdata_mapping,
5278 			 struct ecore_func_sp_drv_ops *drv_iface)
5279 {
5280 	ECORE_MEMSET(obj, 0, sizeof(*obj));
5281 
5282 	ECORE_MUTEX_INIT(&obj->one_pending_mutex);
5283 
5284 	obj->rdata = rdata;
5285 	obj->rdata_mapping = rdata_mapping;
5286 	obj->afex_rdata = afex_rdata;
5287 	obj->afex_rdata_mapping = afex_rdata_mapping;
5288 	obj->send_cmd = ecore_func_send_cmd;
5289 	obj->check_transition = ecore_func_chk_transition;
5290 	obj->complete_cmd = ecore_func_comp_cmd;
5291 	obj->wait_comp = ecore_func_wait_comp;
5292 	obj->drv = drv_iface;
5293 }
5294 
5295 /**
5296  * ecore_func_state_change - perform Function state change transition
5297  *
5298  * @sc:		device handle
5299  * @params:	parameters to perform the transaction
5300  *
5301  * returns 0 in case of successfully completed transition,
5302  *         negative error code in case of failure, positive
5303  *         (EBUSY) value if there is a completion to that is
5304  *         still pending (possible only if RAMROD_COMP_WAIT is
5305  *         not set in params->ramrod_flags for asynchronous
5306  *         commands).
5307  */
ecore_func_state_change(struct bnx2x_softc * sc,struct ecore_func_state_params * params)5308 int ecore_func_state_change(struct bnx2x_softc *sc,
5309 			    struct ecore_func_state_params *params)
5310 {
5311 	struct ecore_func_sp_obj *o = params->f_obj;
5312 	int rc, cnt = 300;
5313 	enum ecore_func_cmd cmd = params->cmd;
5314 	uint32_t *pending = &o->pending;
5315 
5316 	ECORE_MUTEX_LOCK(&o->one_pending_mutex);
5317 
5318 	/* Check that the requested transition is legal */
5319 	rc = o->check_transition(sc, o, params);
5320 	if ((rc == ECORE_BUSY) &&
5321 	    (ECORE_TEST_BIT(RAMROD_RETRY, &params->ramrod_flags))) {
5322 		while ((rc == ECORE_BUSY) && (--cnt > 0)) {
5323 			ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
5324 			ECORE_MSLEEP(10);
5325 			ECORE_MUTEX_LOCK(&o->one_pending_mutex);
5326 			rc = o->check_transition(sc, o, params);
5327 		}
5328 		if (rc == ECORE_BUSY) {
5329 			ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
5330 			PMD_DRV_LOG(ERR, sc,
5331 				    "timeout waiting for previous ramrod completion");
5332 			return rc;
5333 		}
5334 	} else if (rc) {
5335 		ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
5336 		return rc;
5337 	}
5338 
5339 	/* Set "pending" bit */
5340 	ECORE_SET_BIT(cmd, pending);
5341 
5342 	/* Don't send a command if only driver cleanup was requested */
5343 	if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5344 		ecore_func_state_change_comp(sc, o, cmd);
5345 		ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
5346 	} else {
5347 		/* Send a ramrod */
5348 		rc = o->send_cmd(sc, params);
5349 
5350 		ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
5351 
5352 		if (rc) {
5353 			o->next_state = ECORE_F_STATE_MAX;
5354 			ECORE_CLEAR_BIT(cmd, pending);
5355 			ECORE_SMP_MB_AFTER_CLEAR_BIT();
5356 			return rc;
5357 		}
5358 
5359 		if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
5360 			rc = o->wait_comp(sc, o, cmd);
5361 			if (rc)
5362 				return rc;
5363 
5364 			return ECORE_SUCCESS;
5365 		}
5366 	}
5367 
5368 	return ECORE_RET_PENDING(cmd, pending);
5369 }
5370 
5371 /******************************************************************************
5372  * Description:
5373  *	   Calculates crc 8 on a word value: polynomial 0-1-2-8
5374  *	   Code was translated from Verilog.
5375  * Return:
5376  *****************************************************************************/
ecore_calc_crc8(uint32_t data,uint8_t crc)5377 uint8_t ecore_calc_crc8(uint32_t data, uint8_t crc)
5378 {
5379 	uint8_t D[32];
5380 	uint8_t NewCRC[8];
5381 	uint8_t C[8];
5382 	uint8_t crc_res;
5383 	uint8_t i;
5384 
5385 	/* split the data into 31 bits */
5386 	for (i = 0; i < 32; i++) {
5387 		D[i] = (uint8_t) (data & 1);
5388 		data = data >> 1;
5389 	}
5390 
5391 	/* split the crc into 8 bits */
5392 	for (i = 0; i < 8; i++) {
5393 		C[i] = crc & 1;
5394 		crc = crc >> 1;
5395 	}
5396 
5397 	NewCRC[0] = D[31] ^ D[30] ^ D[28] ^ D[23] ^ D[21] ^ D[19] ^ D[18] ^
5398 	    D[16] ^ D[14] ^ D[12] ^ D[8] ^ D[7] ^ D[6] ^ D[0] ^ C[4] ^
5399 	    C[6] ^ C[7];
5400 	NewCRC[1] = D[30] ^ D[29] ^ D[28] ^ D[24] ^ D[23] ^ D[22] ^ D[21] ^
5401 	    D[20] ^ D[18] ^ D[17] ^ D[16] ^ D[15] ^ D[14] ^ D[13] ^
5402 	    D[12] ^ D[9] ^ D[6] ^ D[1] ^ D[0] ^ C[0] ^ C[4] ^ C[5] ^ C[6];
5403 	NewCRC[2] = D[29] ^ D[28] ^ D[25] ^ D[24] ^ D[22] ^ D[17] ^ D[15] ^
5404 	    D[13] ^ D[12] ^ D[10] ^ D[8] ^ D[6] ^ D[2] ^ D[1] ^ D[0] ^
5405 	    C[0] ^ C[1] ^ C[4] ^ C[5];
5406 	NewCRC[3] = D[30] ^ D[29] ^ D[26] ^ D[25] ^ D[23] ^ D[18] ^ D[16] ^
5407 	    D[14] ^ D[13] ^ D[11] ^ D[9] ^ D[7] ^ D[3] ^ D[2] ^ D[1] ^
5408 	    C[1] ^ C[2] ^ C[5] ^ C[6];
5409 	NewCRC[4] = D[31] ^ D[30] ^ D[27] ^ D[26] ^ D[24] ^ D[19] ^ D[17] ^
5410 	    D[15] ^ D[14] ^ D[12] ^ D[10] ^ D[8] ^ D[4] ^ D[3] ^ D[2] ^
5411 	    C[0] ^ C[2] ^ C[3] ^ C[6] ^ C[7];
5412 	NewCRC[5] = D[31] ^ D[28] ^ D[27] ^ D[25] ^ D[20] ^ D[18] ^ D[16] ^
5413 	    D[15] ^ D[13] ^ D[11] ^ D[9] ^ D[5] ^ D[4] ^ D[3] ^ C[1] ^
5414 	    C[3] ^ C[4] ^ C[7];
5415 	NewCRC[6] = D[29] ^ D[28] ^ D[26] ^ D[21] ^ D[19] ^ D[17] ^ D[16] ^
5416 	    D[14] ^ D[12] ^ D[10] ^ D[6] ^ D[5] ^ D[4] ^ C[2] ^ C[4] ^ C[5];
5417 	NewCRC[7] = D[30] ^ D[29] ^ D[27] ^ D[22] ^ D[20] ^ D[18] ^ D[17] ^
5418 	    D[15] ^ D[13] ^ D[11] ^ D[7] ^ D[6] ^ D[5] ^ C[3] ^ C[5] ^ C[6];
5419 
5420 	crc_res = 0;
5421 	for (i = 0; i < 8; i++) {
5422 		crc_res |= (NewCRC[i] << i);
5423 	}
5424 
5425 	return crc_res;
5426 }
5427 
5428 uint32_t
ecore_calc_crc32(uint32_t crc,uint8_t const * p,uint32_t len,uint32_t magic)5429 ecore_calc_crc32(uint32_t crc, uint8_t const *p, uint32_t len, uint32_t magic)
5430 {
5431 	int i;
5432 	while (len--) {
5433 		crc ^= *p++;
5434 		for (i = 0; i < 8; i++)
5435 			crc = (crc >> 1) ^ ((crc & 1) ? magic : 0);
5436 	}
5437 	return crc;
5438 }
5439