xref: /dpdk/drivers/net/qede/base/ecore_spq.c (revision d80e42cce4c7017ed8c99dabb8ae444a492acc1c)
1 /*
2  * Copyright (c) 2016 - 2018 Cavium Inc.
3  * All rights reserved.
4  * www.cavium.com
5  *
6  * See LICENSE.qede_pmd for copyright and licensing details.
7  */
8 
9 #include "bcm_osal.h"
10 #include "reg_addr.h"
11 #include "ecore_gtt_reg_addr.h"
12 #include "ecore_hsi_common.h"
13 #include "ecore.h"
14 #include "ecore_sp_api.h"
15 #include "ecore_spq.h"
16 #include "ecore_iro.h"
17 #include "ecore_init_fw_funcs.h"
18 #include "ecore_cxt.h"
19 #include "ecore_int.h"
20 #include "ecore_dev_api.h"
21 #include "ecore_mcp.h"
22 #include "ecore_hw.h"
23 #include "ecore_sriov.h"
24 
25 /***************************************************************************
26  * Structures & Definitions
27  ***************************************************************************/
28 
29 #define SPQ_HIGH_PRI_RESERVE_DEFAULT	(1)
30 
31 #define SPQ_BLOCK_DELAY_MAX_ITER	(10)
32 #define SPQ_BLOCK_DELAY_US		(10)
33 #define SPQ_BLOCK_SLEEP_MAX_ITER	(200)
34 #define SPQ_BLOCK_SLEEP_MS		(5)
35 
36 /***************************************************************************
37  * Blocking Imp. (BLOCK/EBLOCK mode)
38  ***************************************************************************/
39 static void ecore_spq_blocking_cb(struct ecore_hwfn *p_hwfn, void *cookie,
40 				  union event_ring_data OSAL_UNUSED * data,
41 				  u8 fw_return_code)
42 {
43 	struct ecore_spq_comp_done *comp_done;
44 
45 	comp_done = (struct ecore_spq_comp_done *)cookie;
46 
47 	comp_done->done = 0x1;
48 	comp_done->fw_return_code = fw_return_code;
49 
50 	/* make update visible to waiting thread */
51 	OSAL_SMP_WMB(p_hwfn->p_dev);
52 }
53 
54 static enum _ecore_status_t __ecore_spq_block(struct ecore_hwfn *p_hwfn,
55 					      struct ecore_spq_entry *p_ent,
56 					      u8 *p_fw_ret,
57 					      bool sleep_between_iter)
58 {
59 	struct ecore_spq_comp_done *comp_done;
60 	u32 iter_cnt;
61 
62 	comp_done = (struct ecore_spq_comp_done *)p_ent->comp_cb.cookie;
63 	iter_cnt = sleep_between_iter ? p_hwfn->p_spq->block_sleep_max_iter
64 				      : SPQ_BLOCK_DELAY_MAX_ITER;
65 #ifndef ASIC_ONLY
66 	if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && sleep_between_iter)
67 		iter_cnt *= 5;
68 #endif
69 
70 	while (iter_cnt--) {
71 		OSAL_POLL_MODE_DPC(p_hwfn);
72 		OSAL_SMP_RMB(p_hwfn->p_dev);
73 		if (comp_done->done == 1) {
74 			if (p_fw_ret)
75 				*p_fw_ret = comp_done->fw_return_code;
76 			return ECORE_SUCCESS;
77 		}
78 
79 		if (sleep_between_iter)
80 			OSAL_MSLEEP(SPQ_BLOCK_SLEEP_MS);
81 		else
82 			OSAL_UDELAY(SPQ_BLOCK_DELAY_US);
83 	}
84 
85 	return ECORE_TIMEOUT;
86 }
87 
88 static enum _ecore_status_t ecore_spq_block(struct ecore_hwfn *p_hwfn,
89 					    struct ecore_spq_entry *p_ent,
90 					    u8 *p_fw_ret, bool skip_quick_poll)
91 {
92 	struct ecore_spq_comp_done *comp_done;
93 	struct ecore_ptt *p_ptt;
94 	enum _ecore_status_t rc;
95 
96 	/* A relatively short polling period w/o sleeping, to allow the FW to
97 	 * complete the ramrod and thus possibly to avoid the following sleeps.
98 	 */
99 	if (!skip_quick_poll) {
100 		rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, false);
101 		if (rc == ECORE_SUCCESS)
102 			return ECORE_SUCCESS;
103 	}
104 
105 	/* Move to polling with a sleeping period between iterations */
106 	rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, true);
107 	if (rc == ECORE_SUCCESS)
108 		return ECORE_SUCCESS;
109 
110 	p_ptt = ecore_ptt_acquire(p_hwfn);
111 	if (!p_ptt)
112 		return ECORE_AGAIN;
113 
114 	DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
115 	rc = ecore_mcp_drain(p_hwfn, p_ptt);
116 	ecore_ptt_release(p_hwfn, p_ptt);
117 	if (rc != ECORE_SUCCESS) {
118 		DP_NOTICE(p_hwfn, true, "MCP drain failed\n");
119 		goto err;
120 	}
121 
122 	/* Retry after drain */
123 	rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, true);
124 	if (rc == ECORE_SUCCESS)
125 		return ECORE_SUCCESS;
126 
127 	comp_done = (struct ecore_spq_comp_done *)p_ent->comp_cb.cookie;
128 	if (comp_done->done == 1) {
129 		if (p_fw_ret)
130 			*p_fw_ret = comp_done->fw_return_code;
131 		return ECORE_SUCCESS;
132 	}
133 err:
134 	DP_NOTICE(p_hwfn, true,
135 		  "Ramrod is stuck [CID %08x cmd %02x proto %02x echo %04x]\n",
136 		  OSAL_LE32_TO_CPU(p_ent->elem.hdr.cid),
137 		  p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id,
138 		  OSAL_LE16_TO_CPU(p_ent->elem.hdr.echo));
139 
140 	ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_RAMROD_FAIL);
141 
142 	return ECORE_BUSY;
143 }
144 
145 void ecore_set_spq_block_timeout(struct ecore_hwfn *p_hwfn,
146 				 u32 spq_timeout_ms)
147 {
148 	p_hwfn->p_spq->block_sleep_max_iter = spq_timeout_ms ?
149 		spq_timeout_ms / SPQ_BLOCK_SLEEP_MS :
150 		SPQ_BLOCK_SLEEP_MAX_ITER;
151 }
152 
153 /***************************************************************************
154  * SPQ entries inner API
155  ***************************************************************************/
156 static enum _ecore_status_t
157 ecore_spq_fill_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry *p_ent)
158 {
159 	p_ent->flags = 0;
160 
161 	switch (p_ent->comp_mode) {
162 	case ECORE_SPQ_MODE_EBLOCK:
163 	case ECORE_SPQ_MODE_BLOCK:
164 		p_ent->comp_cb.function = ecore_spq_blocking_cb;
165 		break;
166 	case ECORE_SPQ_MODE_CB:
167 		break;
168 	default:
169 		DP_NOTICE(p_hwfn, true, "Unknown SPQE completion mode %d\n",
170 			  p_ent->comp_mode);
171 		return ECORE_INVAL;
172 	}
173 
174 	DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
175 		   "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x]"
176 		   " Data pointer: [%08x:%08x] Completion Mode: %s\n",
177 		   p_ent->elem.hdr.cid, p_ent->elem.hdr.cmd_id,
178 		   p_ent->elem.hdr.protocol_id,
179 		   p_ent->elem.data_ptr.hi, p_ent->elem.data_ptr.lo,
180 		   D_TRINE(p_ent->comp_mode, ECORE_SPQ_MODE_EBLOCK,
181 			   ECORE_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
182 			   "MODE_CB"));
183 
184 	return ECORE_SUCCESS;
185 }
186 
187 /***************************************************************************
188  * HSI access
189  ***************************************************************************/
190 static void ecore_spq_hw_initialize(struct ecore_hwfn *p_hwfn,
191 				    struct ecore_spq *p_spq)
192 {
193 	struct e4_core_conn_context *p_cxt;
194 	struct ecore_cxt_info cxt_info;
195 	u16 physical_q;
196 	enum _ecore_status_t rc;
197 
198 	cxt_info.iid = p_spq->cid;
199 
200 	rc = ecore_cxt_get_cid_info(p_hwfn, &cxt_info);
201 
202 	if (rc < 0) {
203 		DP_NOTICE(p_hwfn, true, "Cannot find context info for cid=%d\n",
204 			  p_spq->cid);
205 		return;
206 	}
207 
208 	p_cxt = cxt_info.p_cxt;
209 
210 	/* @@@TBD we zero the context until we have ilt_reset implemented. */
211 	OSAL_MEM_ZERO(p_cxt, sizeof(*p_cxt));
212 
213 	if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev)) {
214 		SET_FIELD(p_cxt->xstorm_ag_context.flags10,
215 			  E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
216 		SET_FIELD(p_cxt->xstorm_ag_context.flags1,
217 			  E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
218 		/* SET_FIELD(p_cxt->xstorm_ag_context.flags10,
219 		 *	  E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN, 1);
220 		 */
221 		SET_FIELD(p_cxt->xstorm_ag_context.flags9,
222 			  E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
223 	}
224 
225 	/* CDU validation - FIXME currently disabled */
226 
227 	/* QM physical queue */
228 	physical_q = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
229 	p_cxt->xstorm_ag_context.physical_q0 = OSAL_CPU_TO_LE16(physical_q);
230 
231 	p_cxt->xstorm_st_context.spq_base_lo =
232 	    DMA_LO_LE(p_spq->chain.p_phys_addr);
233 	p_cxt->xstorm_st_context.spq_base_hi =
234 	    DMA_HI_LE(p_spq->chain.p_phys_addr);
235 
236 	DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr,
237 		       p_hwfn->p_consq->chain.p_phys_addr);
238 }
239 
240 static enum _ecore_status_t ecore_spq_hw_post(struct ecore_hwfn *p_hwfn,
241 					      struct ecore_spq *p_spq,
242 					      struct ecore_spq_entry *p_ent)
243 {
244 	struct ecore_chain *p_chain = &p_hwfn->p_spq->chain;
245 	struct core_db_data *p_db_data = &p_spq->db_data;
246 	u16 echo = ecore_chain_get_prod_idx(p_chain);
247 	struct slow_path_element *elem;
248 
249 	p_ent->elem.hdr.echo = OSAL_CPU_TO_LE16(echo);
250 	elem = ecore_chain_produce(p_chain);
251 	if (!elem) {
252 		DP_NOTICE(p_hwfn, true, "Failed to produce from SPQ chain\n");
253 		return ECORE_INVAL;
254 	}
255 
256 	*elem = p_ent->elem;	/* Struct assignment */
257 
258 	p_db_data->spq_prod =
259 		OSAL_CPU_TO_LE16(ecore_chain_get_prod_idx(p_chain));
260 
261 	/* Make sure the SPQE is updated before the doorbell */
262 	OSAL_WMB(p_hwfn->p_dev);
263 
264 	DOORBELL(p_hwfn, p_spq->db_addr_offset, *(u32 *)p_db_data);
265 
266 	/* Make sure doorbell is rang */
267 	OSAL_WMB(p_hwfn->p_dev);
268 
269 	DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
270 		   "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x"
271 		   " agg_params: %02x, prod: %04x\n",
272 		   p_spq->db_addr_offset, p_spq->cid, p_db_data->params,
273 		   p_db_data->agg_flags, ecore_chain_get_prod_idx(p_chain));
274 
275 	return ECORE_SUCCESS;
276 }
277 
278 /***************************************************************************
279  * Asynchronous events
280  ***************************************************************************/
281 
282 static enum _ecore_status_t
283 ecore_async_event_completion(struct ecore_hwfn *p_hwfn,
284 			     struct event_ring_entry *p_eqe)
285 {
286 	ecore_spq_async_comp_cb cb;
287 
288 	if (!p_hwfn->p_spq || (p_eqe->protocol_id >= MAX_PROTOCOL_TYPE))
289 		return ECORE_INVAL;
290 
291 	cb = p_hwfn->p_spq->async_comp_cb[p_eqe->protocol_id];
292 	if (cb) {
293 		return cb(p_hwfn, p_eqe->opcode, p_eqe->echo,
294 			  &p_eqe->data, p_eqe->fw_return_code);
295 	} else {
296 		DP_NOTICE(p_hwfn,
297 			  true, "Unknown Async completion for protocol: %d\n",
298 			  p_eqe->protocol_id);
299 		return ECORE_INVAL;
300 	}
301 }
302 
303 enum _ecore_status_t
304 ecore_spq_register_async_cb(struct ecore_hwfn *p_hwfn,
305 			    enum protocol_type protocol_id,
306 			    ecore_spq_async_comp_cb cb)
307 {
308 	if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
309 		return ECORE_INVAL;
310 
311 	p_hwfn->p_spq->async_comp_cb[protocol_id] = cb;
312 	return ECORE_SUCCESS;
313 }
314 
315 void
316 ecore_spq_unregister_async_cb(struct ecore_hwfn *p_hwfn,
317 			      enum protocol_type protocol_id)
318 {
319 	if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
320 		return;
321 
322 	p_hwfn->p_spq->async_comp_cb[protocol_id] = OSAL_NULL;
323 }
324 
325 /***************************************************************************
326  * EQ API
327  ***************************************************************************/
328 void ecore_eq_prod_update(struct ecore_hwfn *p_hwfn, u16 prod)
329 {
330 	u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
331 	    USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
332 
333 	REG_WR16(p_hwfn, addr, prod);
334 
335 	/* keep prod updates ordered */
336 	OSAL_MMIOWB(p_hwfn->p_dev);
337 }
338 
339 enum _ecore_status_t ecore_eq_completion(struct ecore_hwfn *p_hwfn,
340 					 void *cookie)
341 {
342 	struct ecore_eq *p_eq = cookie;
343 	struct ecore_chain *p_chain = &p_eq->chain;
344 	enum _ecore_status_t rc = 0;
345 
346 	/* take a snapshot of the FW consumer */
347 	u16 fw_cons_idx = OSAL_LE16_TO_CPU(*p_eq->p_fw_cons);
348 
349 	DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
350 
351 	/* Need to guarantee the fw_cons index we use points to a usuable
352 	 * element (to comply with our chain), so our macros would comply
353 	 */
354 	if ((fw_cons_idx & ecore_chain_get_usable_per_page(p_chain)) ==
355 	    ecore_chain_get_usable_per_page(p_chain)) {
356 		fw_cons_idx += ecore_chain_get_unusable_per_page(p_chain);
357 	}
358 
359 	/* Complete current segment of eq entries */
360 	while (fw_cons_idx != ecore_chain_get_cons_idx(p_chain)) {
361 		struct event_ring_entry *p_eqe = ecore_chain_consume(p_chain);
362 		if (!p_eqe) {
363 			rc = ECORE_INVAL;
364 			break;
365 		}
366 
367 		DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
368 			   "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
369 			   p_eqe->opcode,	     /* Event Opcode */
370 			   p_eqe->protocol_id,	/* Event Protocol ID */
371 			   p_eqe->reserved0,	/* Reserved */
372 			   /* Echo value from ramrod data on the host */
373 			   OSAL_LE16_TO_CPU(p_eqe->echo),
374 			   p_eqe->fw_return_code,    /* FW return code for SP
375 						      * ramrods
376 						      */
377 			   p_eqe->flags);
378 
379 		if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
380 			if (ecore_async_event_completion(p_hwfn, p_eqe))
381 				rc = ECORE_INVAL;
382 		} else if (ecore_spq_completion(p_hwfn,
383 						p_eqe->echo,
384 						p_eqe->fw_return_code,
385 						&p_eqe->data)) {
386 			rc = ECORE_INVAL;
387 		}
388 
389 		ecore_chain_recycle_consumed(p_chain);
390 	}
391 
392 	ecore_eq_prod_update(p_hwfn, ecore_chain_get_prod_idx(p_chain));
393 
394 	return rc;
395 }
396 
397 enum _ecore_status_t ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem)
398 {
399 	struct ecore_eq *p_eq;
400 
401 	/* Allocate EQ struct */
402 	p_eq = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_eq));
403 	if (!p_eq) {
404 		DP_NOTICE(p_hwfn, false,
405 			  "Failed to allocate `struct ecore_eq'\n");
406 		return ECORE_NOMEM;
407 	}
408 
409 	/* Allocate and initialize EQ chain*/
410 	if (ecore_chain_alloc(p_hwfn->p_dev,
411 			      ECORE_CHAIN_USE_TO_PRODUCE,
412 			      ECORE_CHAIN_MODE_PBL,
413 			      ECORE_CHAIN_CNT_TYPE_U16,
414 			      num_elem,
415 			      sizeof(union event_ring_element),
416 			      &p_eq->chain, OSAL_NULL) != ECORE_SUCCESS) {
417 		DP_NOTICE(p_hwfn, false, "Failed to allocate eq chain\n");
418 		goto eq_allocate_fail;
419 	}
420 
421 	/* register EQ completion on the SP SB */
422 	ecore_int_register_cb(p_hwfn, ecore_eq_completion,
423 			      p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
424 
425 	p_hwfn->p_eq = p_eq;
426 	return ECORE_SUCCESS;
427 
428 eq_allocate_fail:
429 	OSAL_FREE(p_hwfn->p_dev, p_eq);
430 	return ECORE_NOMEM;
431 }
432 
433 void ecore_eq_setup(struct ecore_hwfn *p_hwfn)
434 {
435 	ecore_chain_reset(&p_hwfn->p_eq->chain);
436 }
437 
438 void ecore_eq_free(struct ecore_hwfn *p_hwfn)
439 {
440 	if (!p_hwfn->p_eq)
441 		return;
442 
443 	ecore_chain_free(p_hwfn->p_dev, &p_hwfn->p_eq->chain);
444 
445 	OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_eq);
446 	p_hwfn->p_eq = OSAL_NULL;
447 }
448 
449 /***************************************************************************
450 * CQE API - manipulate EQ functionality
451 ***************************************************************************/
452 static enum _ecore_status_t ecore_cqe_completion(struct ecore_hwfn *p_hwfn,
453 						 struct eth_slow_path_rx_cqe
454 						 *cqe,
455 						 enum protocol_type protocol)
456 {
457 	if (IS_VF(p_hwfn->p_dev))
458 		return OSAL_VF_CQE_COMPLETION(p_hwfn, cqe, protocol);
459 
460 	/* @@@tmp - it's possible we'll eventually want to handle some
461 	 * actual commands that can arrive here, but for now this is only
462 	 * used to complete the ramrod using the echo value on the cqe
463 	 */
464 	return ecore_spq_completion(p_hwfn, cqe->echo, 0, OSAL_NULL);
465 }
466 
467 enum _ecore_status_t ecore_eth_cqe_completion(struct ecore_hwfn *p_hwfn,
468 					      struct eth_slow_path_rx_cqe *cqe)
469 {
470 	enum _ecore_status_t rc;
471 
472 	rc = ecore_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
473 	if (rc) {
474 		DP_NOTICE(p_hwfn, true,
475 			  "Failed to handle RXQ CQE [cmd 0x%02x]\n",
476 			  cqe->ramrod_cmd_id);
477 	}
478 
479 	return rc;
480 }
481 
482 /***************************************************************************
483  * Slow hwfn Queue (spq)
484  ***************************************************************************/
485 void ecore_spq_setup(struct ecore_hwfn *p_hwfn)
486 {
487 	struct ecore_spq *p_spq = p_hwfn->p_spq;
488 	struct ecore_spq_entry *p_virt = OSAL_NULL;
489 	struct core_db_data *p_db_data;
490 	void OSAL_IOMEM *db_addr;
491 	dma_addr_t p_phys = 0;
492 	u32 i, capacity;
493 	enum _ecore_status_t rc;
494 
495 	OSAL_LIST_INIT(&p_spq->pending);
496 	OSAL_LIST_INIT(&p_spq->completion_pending);
497 	OSAL_LIST_INIT(&p_spq->free_pool);
498 	OSAL_LIST_INIT(&p_spq->unlimited_pending);
499 	OSAL_SPIN_LOCK_INIT(&p_spq->lock);
500 
501 	/* SPQ empty pool */
502 	p_phys = p_spq->p_phys + OFFSETOF(struct ecore_spq_entry, ramrod);
503 	p_virt = p_spq->p_virt;
504 
505 	capacity = ecore_chain_get_capacity(&p_spq->chain);
506 	for (i = 0; i < capacity; i++) {
507 		DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys);
508 
509 		OSAL_LIST_PUSH_TAIL(&p_virt->list, &p_spq->free_pool);
510 
511 		p_virt++;
512 		p_phys += sizeof(struct ecore_spq_entry);
513 	}
514 
515 	/* Statistics */
516 	p_spq->normal_count = 0;
517 	p_spq->comp_count = 0;
518 	p_spq->comp_sent_count = 0;
519 	p_spq->unlimited_pending_count = 0;
520 
521 	OSAL_MEM_ZERO(p_spq->p_comp_bitmap,
522 		      SPQ_COMP_BMAP_SIZE * sizeof(unsigned long));
523 	p_spq->comp_bitmap_idx = 0;
524 
525 	/* SPQ cid, cannot fail */
526 	ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
527 	ecore_spq_hw_initialize(p_hwfn, p_spq);
528 
529 	/* reset the chain itself */
530 	ecore_chain_reset(&p_spq->chain);
531 
532 	/* Initialize the address/data of the SPQ doorbell */
533 	p_spq->db_addr_offset = DB_ADDR(p_spq->cid, DQ_DEMS_LEGACY);
534 	p_db_data = &p_spq->db_data;
535 	OSAL_MEM_ZERO(p_db_data, sizeof(*p_db_data));
536 	SET_FIELD(p_db_data->params, CORE_DB_DATA_DEST, DB_DEST_XCM);
537 	SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_MAX);
538 	SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_VAL_SEL,
539 		  DQ_XCM_CORE_SPQ_PROD_CMD);
540 	p_db_data->agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
541 
542 	/* Register the SPQ doorbell with the doorbell recovery mechanism */
543 	db_addr = (void *)((u8 *)p_hwfn->doorbells + p_spq->db_addr_offset);
544 	rc = ecore_db_recovery_add(p_hwfn->p_dev, db_addr, &p_spq->db_data,
545 				   DB_REC_WIDTH_32B, DB_REC_KERNEL);
546 	if (rc != ECORE_SUCCESS)
547 		DP_INFO(p_hwfn,
548 			"Failed to register the SPQ doorbell with the doorbell recovery mechanism\n");
549 }
550 
551 enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn)
552 {
553 	struct ecore_spq_entry *p_virt = OSAL_NULL;
554 	struct ecore_spq *p_spq = OSAL_NULL;
555 	dma_addr_t p_phys = 0;
556 	u32 capacity;
557 
558 	/* SPQ struct */
559 	p_spq =
560 	    OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(struct ecore_spq));
561 	if (!p_spq) {
562 		DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_spq'\n");
563 		return ECORE_NOMEM;
564 	}
565 
566 	/* SPQ ring  */
567 	if (ecore_chain_alloc(p_hwfn->p_dev,
568 			      ECORE_CHAIN_USE_TO_PRODUCE,
569 			      ECORE_CHAIN_MODE_SINGLE,
570 			      ECORE_CHAIN_CNT_TYPE_U16,
571 			      0, /* N/A when the mode is SINGLE */
572 			      sizeof(struct slow_path_element),
573 			      &p_spq->chain, OSAL_NULL)) {
574 		DP_NOTICE(p_hwfn, false, "Failed to allocate spq chain\n");
575 		goto spq_allocate_fail;
576 	}
577 
578 	/* allocate and fill the SPQ elements (incl. ramrod data list) */
579 	capacity = ecore_chain_get_capacity(&p_spq->chain);
580 	p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, &p_phys,
581 					 capacity *
582 					 sizeof(struct ecore_spq_entry));
583 	if (!p_virt)
584 		goto spq_allocate_fail;
585 
586 	p_spq->p_virt = p_virt;
587 	p_spq->p_phys = p_phys;
588 
589 #ifdef CONFIG_ECORE_LOCK_ALLOC
590 	if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_spq->lock))
591 		goto spq_allocate_fail;
592 #endif
593 
594 	p_hwfn->p_spq = p_spq;
595 	return ECORE_SUCCESS;
596 
597 spq_allocate_fail:
598 	ecore_chain_free(p_hwfn->p_dev, &p_spq->chain);
599 	OSAL_FREE(p_hwfn->p_dev, p_spq);
600 	return ECORE_NOMEM;
601 }
602 
603 void ecore_spq_free(struct ecore_hwfn *p_hwfn)
604 {
605 	struct ecore_spq *p_spq = p_hwfn->p_spq;
606 	void OSAL_IOMEM *db_addr;
607 	u32 capacity;
608 
609 	if (!p_spq)
610 		return;
611 
612 	/* Delete the SPQ doorbell from the doorbell recovery mechanism */
613 	db_addr = (void *)((u8 *)p_hwfn->doorbells + p_spq->db_addr_offset);
614 	ecore_db_recovery_del(p_hwfn->p_dev, db_addr, &p_spq->db_data);
615 
616 	if (p_spq->p_virt) {
617 		capacity = ecore_chain_get_capacity(&p_spq->chain);
618 		OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
619 				       p_spq->p_virt,
620 				       p_spq->p_phys,
621 				       capacity *
622 				       sizeof(struct ecore_spq_entry));
623 	}
624 
625 	ecore_chain_free(p_hwfn->p_dev, &p_spq->chain);
626 #ifdef CONFIG_ECORE_LOCK_ALLOC
627 	OSAL_SPIN_LOCK_DEALLOC(&p_spq->lock);
628 #endif
629 
630 	OSAL_FREE(p_hwfn->p_dev, p_spq);
631 }
632 
633 enum _ecore_status_t
634 ecore_spq_get_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry **pp_ent)
635 {
636 	struct ecore_spq *p_spq = p_hwfn->p_spq;
637 	struct ecore_spq_entry *p_ent = OSAL_NULL;
638 	enum _ecore_status_t rc = ECORE_SUCCESS;
639 
640 	OSAL_SPIN_LOCK(&p_spq->lock);
641 
642 	if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
643 		p_ent = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC, sizeof(*p_ent));
644 		if (!p_ent) {
645 			DP_NOTICE(p_hwfn, false, "Failed to allocate an SPQ entry for a pending ramrod\n");
646 			rc = ECORE_NOMEM;
647 			goto out_unlock;
648 		}
649 		p_ent->queue = &p_spq->unlimited_pending;
650 	} else {
651 		p_ent = OSAL_LIST_FIRST_ENTRY(&p_spq->free_pool,
652 					      struct ecore_spq_entry, list);
653 		OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->free_pool);
654 		p_ent->queue = &p_spq->pending;
655 	}
656 
657 	*pp_ent = p_ent;
658 
659 out_unlock:
660 	OSAL_SPIN_UNLOCK(&p_spq->lock);
661 	return rc;
662 }
663 
664 /* Locked variant; Should be called while the SPQ lock is taken */
665 static void __ecore_spq_return_entry(struct ecore_hwfn *p_hwfn,
666 				     struct ecore_spq_entry *p_ent)
667 {
668 	OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_hwfn->p_spq->free_pool);
669 }
670 
671 void ecore_spq_return_entry(struct ecore_hwfn *p_hwfn,
672 			    struct ecore_spq_entry *p_ent)
673 {
674 	OSAL_SPIN_LOCK(&p_hwfn->p_spq->lock);
675 	__ecore_spq_return_entry(p_hwfn, p_ent);
676 	OSAL_SPIN_UNLOCK(&p_hwfn->p_spq->lock);
677 }
678 
679 /**
680  * @brief ecore_spq_add_entry - adds a new entry to the pending
681  *        list. Should be used while lock is being held.
682  *
683  * Addes an entry to the pending list is there is room (en empty
684  * element is available in the free_pool), or else places the
685  * entry in the unlimited_pending pool.
686  *
687  * @param p_hwfn
688  * @param p_ent
689  * @param priority
690  *
691  * @return enum _ecore_status_t
692  */
693 static enum _ecore_status_t
694 ecore_spq_add_entry(struct ecore_hwfn *p_hwfn,
695 		    struct ecore_spq_entry *p_ent, enum spq_priority priority)
696 {
697 	struct ecore_spq *p_spq = p_hwfn->p_spq;
698 
699 	if (p_ent->queue == &p_spq->unlimited_pending) {
700 		if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
701 			OSAL_LIST_PUSH_TAIL(&p_ent->list,
702 					    &p_spq->unlimited_pending);
703 			p_spq->unlimited_pending_count++;
704 
705 			return ECORE_SUCCESS;
706 
707 		} else {
708 			struct ecore_spq_entry *p_en2;
709 
710 			p_en2 = OSAL_LIST_FIRST_ENTRY(&p_spq->free_pool,
711 						     struct ecore_spq_entry,
712 						     list);
713 			OSAL_LIST_REMOVE_ENTRY(&p_en2->list, &p_spq->free_pool);
714 
715 			/* Copy the ring element physical pointer to the new
716 			 * entry, since we are about to override the entire ring
717 			 * entry and don't want to lose the pointer.
718 			 */
719 			p_ent->elem.data_ptr = p_en2->elem.data_ptr;
720 
721 			*p_en2 = *p_ent;
722 
723 			/* EBLOCK responsible to free the allocated p_ent */
724 			if (p_ent->comp_mode != ECORE_SPQ_MODE_EBLOCK)
725 				OSAL_FREE(p_hwfn->p_dev, p_ent);
726 
727 			p_ent = p_en2;
728 		}
729 	}
730 
731 	/* entry is to be placed in 'pending' queue */
732 	switch (priority) {
733 	case ECORE_SPQ_PRIORITY_NORMAL:
734 		OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_spq->pending);
735 		p_spq->normal_count++;
736 		break;
737 	case ECORE_SPQ_PRIORITY_HIGH:
738 		OSAL_LIST_PUSH_HEAD(&p_ent->list, &p_spq->pending);
739 		p_spq->high_count++;
740 		break;
741 	default:
742 		return ECORE_INVAL;
743 	}
744 
745 	return ECORE_SUCCESS;
746 }
747 
748 /***************************************************************************
749  * Accessor
750  ***************************************************************************/
751 
752 u32 ecore_spq_get_cid(struct ecore_hwfn *p_hwfn)
753 {
754 	if (!p_hwfn->p_spq)
755 		return 0xffffffff;	/* illegal */
756 	return p_hwfn->p_spq->cid;
757 }
758 
759 /***************************************************************************
760  * Posting new Ramrods
761  ***************************************************************************/
762 
763 static enum _ecore_status_t ecore_spq_post_list(struct ecore_hwfn *p_hwfn,
764 						osal_list_t *head,
765 						u32 keep_reserve)
766 {
767 	struct ecore_spq *p_spq = p_hwfn->p_spq;
768 	enum _ecore_status_t rc;
769 
770 	/* TODO - implementation might be wasteful; will always keep room
771 	 * for an additional high priority ramrod (even if one is already
772 	 * pending FW)
773 	 */
774 	while (ecore_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
775 	       !OSAL_LIST_IS_EMPTY(head)) {
776 		struct ecore_spq_entry *p_ent =
777 		    OSAL_LIST_FIRST_ENTRY(head, struct ecore_spq_entry, list);
778 		if (p_ent != OSAL_NULL) {
779 #if defined(_NTDDK_)
780 #pragma warning(suppress : 6011 28182)
781 #endif
782 			OSAL_LIST_REMOVE_ENTRY(&p_ent->list, head);
783 			OSAL_LIST_PUSH_TAIL(&p_ent->list,
784 					    &p_spq->completion_pending);
785 			p_spq->comp_sent_count++;
786 
787 			rc = ecore_spq_hw_post(p_hwfn, p_spq, p_ent);
788 			if (rc) {
789 				OSAL_LIST_REMOVE_ENTRY(&p_ent->list,
790 						    &p_spq->completion_pending);
791 				__ecore_spq_return_entry(p_hwfn, p_ent);
792 				return rc;
793 			}
794 		}
795 	}
796 
797 	return ECORE_SUCCESS;
798 }
799 
800 static enum _ecore_status_t ecore_spq_pend_post(struct ecore_hwfn *p_hwfn)
801 {
802 	struct ecore_spq *p_spq = p_hwfn->p_spq;
803 	struct ecore_spq_entry *p_ent = OSAL_NULL;
804 
805 	while (!OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
806 		if (OSAL_LIST_IS_EMPTY(&p_spq->unlimited_pending))
807 			break;
808 
809 		p_ent = OSAL_LIST_FIRST_ENTRY(&p_spq->unlimited_pending,
810 					      struct ecore_spq_entry, list);
811 		if (!p_ent)
812 			return ECORE_INVAL;
813 
814 #if defined(_NTDDK_)
815 #pragma warning(suppress : 6011)
816 #endif
817 		OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->unlimited_pending);
818 
819 		ecore_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
820 	}
821 
822 	return ecore_spq_post_list(p_hwfn,
823 				 &p_spq->pending, SPQ_HIGH_PRI_RESERVE_DEFAULT);
824 }
825 
826 enum _ecore_status_t ecore_spq_post(struct ecore_hwfn *p_hwfn,
827 				    struct ecore_spq_entry *p_ent,
828 				    u8 *fw_return_code)
829 {
830 	enum _ecore_status_t rc = ECORE_SUCCESS;
831 	struct ecore_spq *p_spq = p_hwfn ? p_hwfn->p_spq : OSAL_NULL;
832 	bool b_ret_ent = true;
833 
834 	if (!p_hwfn)
835 		return ECORE_INVAL;
836 
837 	if (!p_ent) {
838 		DP_NOTICE(p_hwfn, true, "Got a NULL pointer\n");
839 		return ECORE_INVAL;
840 	}
841 
842 	if (p_hwfn->p_dev->recov_in_prog) {
843 		DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
844 			   "Recovery is in progress -> skip spq post"
845 			   " [cmd %02x protocol %02x]\n",
846 			   p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id);
847 		/* Return success to let the flows to be completed successfully
848 		 * w/o any error handling.
849 		 */
850 		return ECORE_SUCCESS;
851 	}
852 
853 	OSAL_SPIN_LOCK(&p_spq->lock);
854 
855 	/* Complete the entry */
856 	rc = ecore_spq_fill_entry(p_hwfn, p_ent);
857 
858 	/* Check return value after LOCK is taken for cleaner error flow */
859 	if (rc)
860 		goto spq_post_fail;
861 
862 	/* Add the request to the pending queue */
863 	rc = ecore_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
864 	if (rc)
865 		goto spq_post_fail;
866 
867 	rc = ecore_spq_pend_post(p_hwfn);
868 	if (rc) {
869 		/* Since it's possible that pending failed for a different
870 		 * entry [although unlikely], the failed entry was already
871 		 * dealt with; No need to return it here.
872 		 */
873 		b_ret_ent = false;
874 		goto spq_post_fail;
875 	}
876 
877 	OSAL_SPIN_UNLOCK(&p_spq->lock);
878 
879 	if (p_ent->comp_mode == ECORE_SPQ_MODE_EBLOCK) {
880 		/* For entries in ECORE BLOCK mode, the completion code cannot
881 		 * perform the necessary cleanup - if it did, we couldn't
882 		 * access p_ent here to see whether it's successful or not.
883 		 * Thus, after gaining the answer perform the cleanup here.
884 		 */
885 		rc = ecore_spq_block(p_hwfn, p_ent, fw_return_code,
886 				     p_ent->queue == &p_spq->unlimited_pending);
887 
888 		if (p_ent->queue == &p_spq->unlimited_pending) {
889 			/* This is an allocated p_ent which does not need to
890 			 * return to pool.
891 			 */
892 			OSAL_FREE(p_hwfn->p_dev, p_ent);
893 
894 			/* TBD: handle error flow and remove p_ent from
895 			 * completion pending
896 			 */
897 			return rc;
898 		}
899 
900 		if (rc)
901 			goto spq_post_fail2;
902 
903 		/* return to pool */
904 		ecore_spq_return_entry(p_hwfn, p_ent);
905 	}
906 	return rc;
907 
908 spq_post_fail2:
909 	OSAL_SPIN_LOCK(&p_spq->lock);
910 	OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->completion_pending);
911 	ecore_chain_return_produced(&p_spq->chain);
912 
913 spq_post_fail:
914 	/* return to the free pool */
915 	if (b_ret_ent)
916 		__ecore_spq_return_entry(p_hwfn, p_ent);
917 	OSAL_SPIN_UNLOCK(&p_spq->lock);
918 
919 	return rc;
920 }
921 
922 enum _ecore_status_t ecore_spq_completion(struct ecore_hwfn *p_hwfn,
923 					  __le16 echo,
924 					  u8 fw_return_code,
925 					  union event_ring_data *p_data)
926 {
927 	struct ecore_spq *p_spq;
928 	struct ecore_spq_entry *p_ent = OSAL_NULL;
929 	struct ecore_spq_entry *tmp;
930 	struct ecore_spq_entry *found = OSAL_NULL;
931 	enum _ecore_status_t rc;
932 
933 	if (!p_hwfn)
934 		return ECORE_INVAL;
935 
936 	p_spq = p_hwfn->p_spq;
937 	if (!p_spq)
938 		return ECORE_INVAL;
939 
940 	OSAL_SPIN_LOCK(&p_spq->lock);
941 	OSAL_LIST_FOR_EACH_ENTRY_SAFE(p_ent,
942 				      tmp,
943 				      &p_spq->completion_pending,
944 				      list, struct ecore_spq_entry) {
945 		if (p_ent->elem.hdr.echo == echo) {
946 			OSAL_LIST_REMOVE_ENTRY(&p_ent->list,
947 					       &p_spq->completion_pending);
948 
949 			/* Avoid overriding of SPQ entries when getting
950 			 * out-of-order completions, by marking the completions
951 			 * in a bitmap and increasing the chain consumer only
952 			 * for the first successive completed entries.
953 			 */
954 			SPQ_COMP_BMAP_SET_BIT(p_spq, echo);
955 			while (SPQ_COMP_BMAP_TEST_BIT(p_spq,
956 						      p_spq->comp_bitmap_idx)) {
957 				SPQ_COMP_BMAP_CLEAR_BIT(p_spq,
958 							p_spq->comp_bitmap_idx);
959 				p_spq->comp_bitmap_idx++;
960 				ecore_chain_return_produced(&p_spq->chain);
961 			}
962 
963 			p_spq->comp_count++;
964 			found = p_ent;
965 			break;
966 		}
967 
968 		/* This is debug and should be relatively uncommon - depends
969 		 * on scenarios which have mutliple per-PF sent ramrods.
970 		 */
971 		DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
972 			   "Got completion for echo %04x - doesn't match"
973 			   " echo %04x in completion pending list\n",
974 			   OSAL_LE16_TO_CPU(echo),
975 			   OSAL_LE16_TO_CPU(p_ent->elem.hdr.echo));
976 	}
977 
978 	/* Release lock before callback, as callback may post
979 	 * an additional ramrod.
980 	 */
981 	OSAL_SPIN_UNLOCK(&p_spq->lock);
982 
983 	if (!found) {
984 		DP_NOTICE(p_hwfn, true,
985 			  "Failed to find an entry this"
986 			  " EQE [echo %04x] completes\n",
987 			  OSAL_LE16_TO_CPU(echo));
988 		return ECORE_EXISTS;
989 	}
990 
991 	DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
992 		   "Complete EQE [echo %04x]: func %p cookie %p)\n",
993 		   OSAL_LE16_TO_CPU(echo),
994 		   p_ent->comp_cb.function, p_ent->comp_cb.cookie);
995 	if (found->comp_cb.function)
996 		found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
997 					fw_return_code);
998 	else
999 		DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
1000 			   "Got a completion without a callback function\n");
1001 
1002 	if ((found->comp_mode != ECORE_SPQ_MODE_EBLOCK) ||
1003 	    (found->queue == &p_spq->unlimited_pending))
1004 		/* EBLOCK  is responsible for returning its own entry into the
1005 		 * free list, unless it originally added the entry into the
1006 		 * unlimited pending list.
1007 		 */
1008 		ecore_spq_return_entry(p_hwfn, found);
1009 
1010 	/* Attempt to post pending requests */
1011 	OSAL_SPIN_LOCK(&p_spq->lock);
1012 	rc = ecore_spq_pend_post(p_hwfn);
1013 	OSAL_SPIN_UNLOCK(&p_spq->lock);
1014 
1015 	return rc;
1016 }
1017 
1018 enum _ecore_status_t ecore_consq_alloc(struct ecore_hwfn *p_hwfn)
1019 {
1020 	struct ecore_consq *p_consq;
1021 
1022 	/* Allocate ConsQ struct */
1023 	p_consq =
1024 	    OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_consq));
1025 	if (!p_consq) {
1026 		DP_NOTICE(p_hwfn, false,
1027 			  "Failed to allocate `struct ecore_consq'\n");
1028 		return ECORE_NOMEM;
1029 	}
1030 
1031 	/* Allocate and initialize EQ chain */
1032 	if (ecore_chain_alloc(p_hwfn->p_dev,
1033 			      ECORE_CHAIN_USE_TO_PRODUCE,
1034 			      ECORE_CHAIN_MODE_PBL,
1035 			      ECORE_CHAIN_CNT_TYPE_U16,
1036 			      ECORE_CHAIN_PAGE_SIZE / 0x80,
1037 			      0x80,
1038 			      &p_consq->chain, OSAL_NULL) != ECORE_SUCCESS) {
1039 		DP_NOTICE(p_hwfn, false, "Failed to allocate consq chain");
1040 		goto consq_allocate_fail;
1041 	}
1042 
1043 	p_hwfn->p_consq = p_consq;
1044 	return ECORE_SUCCESS;
1045 
1046 consq_allocate_fail:
1047 	OSAL_FREE(p_hwfn->p_dev, p_consq);
1048 	return ECORE_NOMEM;
1049 }
1050 
1051 void ecore_consq_setup(struct ecore_hwfn *p_hwfn)
1052 {
1053 	ecore_chain_reset(&p_hwfn->p_consq->chain);
1054 }
1055 
1056 void ecore_consq_free(struct ecore_hwfn *p_hwfn)
1057 {
1058 	if (!p_hwfn->p_consq)
1059 		return;
1060 
1061 	ecore_chain_free(p_hwfn->p_dev, &p_hwfn->p_consq->chain);
1062 	OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_consq);
1063 }
1064