xref: /dpdk/drivers/common/idpf/base/idpf_controlq.c (revision d7c660d4714172f7c3aca959782a34e536206bce)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2001-2023 Intel Corporation
3  */
4 
5 #include "idpf_controlq.h"
6 
7 /**
8  * idpf_ctlq_setup_regs - initialize control queue registers
9  * @cq: pointer to the specific control queue
10  * @q_create_info: structs containing info for each queue to be initialized
11  */
idpf_ctlq_setup_regs(struct idpf_ctlq_info * cq,struct idpf_ctlq_create_info * q_create_info)12 static void idpf_ctlq_setup_regs(struct idpf_ctlq_info *cq,
13 				 struct idpf_ctlq_create_info *q_create_info)
14 {
15 	/* set control queue registers in our local struct */
16 	cq->reg.head = q_create_info->reg.head;
17 	cq->reg.tail = q_create_info->reg.tail;
18 	cq->reg.len = q_create_info->reg.len;
19 	cq->reg.bah = q_create_info->reg.bah;
20 	cq->reg.bal = q_create_info->reg.bal;
21 	cq->reg.len_mask = q_create_info->reg.len_mask;
22 	cq->reg.len_ena_mask = q_create_info->reg.len_ena_mask;
23 	cq->reg.head_mask = q_create_info->reg.head_mask;
24 }
25 
26 /**
27  * idpf_ctlq_init_regs - Initialize control queue registers
28  * @hw: pointer to hw struct
29  * @cq: pointer to the specific Control queue
30  * @is_rxq: true if receive control queue, false otherwise
31  *
32  * Initialize registers. The caller is expected to have already initialized the
33  * descriptor ring memory and buffer memory
34  */
idpf_ctlq_init_regs(struct idpf_hw * hw,struct idpf_ctlq_info * cq,bool is_rxq)35 static void idpf_ctlq_init_regs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
36 				bool is_rxq)
37 {
38 	/* Update tail to post pre-allocated buffers for rx queues */
39 	if (is_rxq)
40 		wr32(hw, cq->reg.tail, (u32)(cq->ring_size - 1));
41 
42 	/* For non-Mailbox control queues only TAIL need to be set */
43 	if (cq->q_id != -1)
44 		return;
45 
46 	/* Clear Head for both send or receive */
47 	wr32(hw, cq->reg.head, 0);
48 
49 	/* set starting point */
50 	wr32(hw, cq->reg.bal, IDPF_LO_DWORD(cq->desc_ring.pa));
51 	wr32(hw, cq->reg.bah, IDPF_HI_DWORD(cq->desc_ring.pa));
52 	wr32(hw, cq->reg.len, (cq->ring_size | cq->reg.len_ena_mask));
53 }
54 
55 /**
56  * idpf_ctlq_init_rxq_bufs - populate receive queue descriptors with buf
57  * @cq: pointer to the specific Control queue
58  *
59  * Record the address of the receive queue DMA buffers in the descriptors.
60  * The buffers must have been previously allocated.
61  */
idpf_ctlq_init_rxq_bufs(struct idpf_ctlq_info * cq)62 static void idpf_ctlq_init_rxq_bufs(struct idpf_ctlq_info *cq)
63 {
64 	int i;
65 
66 	for (i = 0; i < cq->ring_size; i++) {
67 		struct idpf_ctlq_desc *desc = IDPF_CTLQ_DESC(cq, i);
68 		struct idpf_dma_mem *bi = cq->bi.rx_buff[i];
69 
70 		/* No buffer to post to descriptor, continue */
71 		if (!bi)
72 			continue;
73 
74 		desc->flags =
75 			CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF | IDPF_CTLQ_FLAG_RD);
76 		desc->opcode = 0;
77 		desc->datalen = CPU_TO_LE16(bi->size);
78 		desc->ret_val = 0;
79 		desc->cookie_high = 0;
80 		desc->cookie_low = 0;
81 		desc->params.indirect.addr_high =
82 			CPU_TO_LE32(IDPF_HI_DWORD(bi->pa));
83 		desc->params.indirect.addr_low =
84 			CPU_TO_LE32(IDPF_LO_DWORD(bi->pa));
85 		desc->params.indirect.param0 = 0;
86 		desc->params.indirect.param1 = 0;
87 	}
88 }
89 
90 /**
91  * idpf_ctlq_shutdown - shutdown the CQ
92  * @hw: pointer to hw struct
93  * @cq: pointer to the specific Control queue
94  *
95  * The main shutdown routine for any controq queue
96  */
idpf_ctlq_shutdown(struct idpf_hw * hw,struct idpf_ctlq_info * cq)97 static void idpf_ctlq_shutdown(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
98 {
99 	idpf_acquire_lock(&cq->cq_lock);
100 
101 #ifdef SIMICS_BUILD
102 	wr32(hw, cq->reg.head, 0);
103 	wr32(hw, cq->reg.tail, 0);
104 	wr32(hw, cq->reg.len, 0);
105 	wr32(hw, cq->reg.bal, 0);
106 	wr32(hw, cq->reg.bah, 0);
107 #endif /* SIMICS_BUILD */
108 
109 	/* free ring buffers and the ring itself */
110 	idpf_ctlq_dealloc_ring_res(hw, cq);
111 
112 	/* Set ring_size to 0 to indicate uninitialized queue */
113 	cq->ring_size = 0;
114 
115 	idpf_release_lock(&cq->cq_lock);
116 	idpf_destroy_lock(&cq->cq_lock);
117 }
118 
119 /**
120  * idpf_ctlq_add - add one control queue
121  * @hw: pointer to hardware struct
122  * @qinfo: info for queue to be created
123  * @cq_out: (output) double pointer to control queue to be created
124  *
125  * Allocate and initialize a control queue and add it to the control queue list.
126  * The cq parameter will be allocated/initialized and passed back to the caller
127  * if no errors occur.
128  *
129  * Note: idpf_ctlq_init must be called prior to any calls to idpf_ctlq_add
130  */
idpf_ctlq_add(struct idpf_hw * hw,struct idpf_ctlq_create_info * qinfo,struct idpf_ctlq_info ** cq_out)131 int idpf_ctlq_add(struct idpf_hw *hw,
132 		  struct idpf_ctlq_create_info *qinfo,
133 		  struct idpf_ctlq_info **cq_out)
134 {
135 	struct idpf_ctlq_info *cq;
136 	bool is_rxq = false;
137 	int err;
138 
139 	if (!qinfo->len || !qinfo->buf_size ||
140 	    qinfo->len > IDPF_CTLQ_MAX_RING_SIZE ||
141 	    qinfo->buf_size > IDPF_CTLQ_MAX_BUF_LEN)
142 		return -EINVAL;
143 
144 	cq = (struct idpf_ctlq_info *)
145 	     idpf_calloc(hw, 1, sizeof(struct idpf_ctlq_info));
146 	if (!cq)
147 		return -ENOMEM;
148 
149 	cq->cq_type = qinfo->type;
150 	cq->q_id = qinfo->id;
151 	cq->buf_size = qinfo->buf_size;
152 	cq->ring_size = qinfo->len;
153 
154 	cq->next_to_use = 0;
155 	cq->next_to_clean = 0;
156 	cq->next_to_post = cq->ring_size - 1;
157 
158 	switch (qinfo->type) {
159 	case IDPF_CTLQ_TYPE_MAILBOX_RX:
160 		is_rxq = true;
161 		/* fallthrough */
162 	case IDPF_CTLQ_TYPE_MAILBOX_TX:
163 		err = idpf_ctlq_alloc_ring_res(hw, cq);
164 		break;
165 	default:
166 		err = -EINVAL;
167 		break;
168 	}
169 
170 	if (err)
171 		goto init_free_q;
172 
173 	if (is_rxq) {
174 		idpf_ctlq_init_rxq_bufs(cq);
175 	} else {
176 		/* Allocate the array of msg pointers for TX queues */
177 		cq->bi.tx_msg = (struct idpf_ctlq_msg **)
178 			idpf_calloc(hw, qinfo->len,
179 				    sizeof(struct idpf_ctlq_msg *));
180 		if (!cq->bi.tx_msg) {
181 			err = -ENOMEM;
182 			goto init_dealloc_q_mem;
183 		}
184 	}
185 
186 	idpf_ctlq_setup_regs(cq, qinfo);
187 
188 	idpf_ctlq_init_regs(hw, cq, is_rxq);
189 
190 	idpf_init_lock(&(cq->cq_lock));
191 
192 	LIST_INSERT_HEAD(&hw->cq_list_head, cq, cq_list);
193 
194 	*cq_out = cq;
195 	return 0;
196 
197 init_dealloc_q_mem:
198 	/* free ring buffers and the ring itself */
199 	idpf_ctlq_dealloc_ring_res(hw, cq);
200 init_free_q:
201 	idpf_free(hw, cq);
202 	cq = NULL;
203 
204 	return err;
205 }
206 
207 /**
208  * idpf_ctlq_remove - deallocate and remove specified control queue
209  * @hw: pointer to hardware struct
210  * @cq: pointer to control queue to be removed
211  */
idpf_ctlq_remove(struct idpf_hw * hw,struct idpf_ctlq_info * cq)212 void idpf_ctlq_remove(struct idpf_hw *hw,
213 		      struct idpf_ctlq_info *cq)
214 {
215 	LIST_REMOVE(cq, cq_list);
216 	idpf_ctlq_shutdown(hw, cq);
217 	idpf_free(hw, cq);
218 }
219 
220 /**
221  * idpf_ctlq_init - main initialization routine for all control queues
222  * @hw: pointer to hardware struct
223  * @num_q: number of queues to initialize
224  * @q_info: array of structs containing info for each queue to be initialized
225  *
226  * This initializes any number and any type of control queues. This is an all
227  * or nothing routine; if one fails, all previously allocated queues will be
228  * destroyed. This must be called prior to using the individual add/remove
229  * APIs.
230  */
idpf_ctlq_init(struct idpf_hw * hw,u8 num_q,struct idpf_ctlq_create_info * q_info)231 int idpf_ctlq_init(struct idpf_hw *hw, u8 num_q,
232 		   struct idpf_ctlq_create_info *q_info)
233 {
234 	struct idpf_ctlq_info *cq = NULL, *tmp = NULL;
235 	int err;
236 	int i;
237 
238 	LIST_INIT(&hw->cq_list_head);
239 
240 	for (i = 0; i < num_q; i++) {
241 		struct idpf_ctlq_create_info *qinfo = q_info + i;
242 
243 		err = idpf_ctlq_add(hw, qinfo, &cq);
244 		if (err)
245 			goto init_destroy_qs;
246 	}
247 
248 	return 0;
249 
250 init_destroy_qs:
251 	LIST_FOR_EACH_ENTRY_SAFE(cq, tmp, &hw->cq_list_head,
252 				 idpf_ctlq_info, cq_list)
253 		idpf_ctlq_remove(hw, cq);
254 
255 	return err;
256 }
257 
258 /**
259  * idpf_ctlq_deinit - destroy all control queues
260  * @hw: pointer to hw struct
261  */
idpf_ctlq_deinit(struct idpf_hw * hw)262 void idpf_ctlq_deinit(struct idpf_hw *hw)
263 {
264 	struct idpf_ctlq_info *cq = NULL, *tmp = NULL;
265 
266 	LIST_FOR_EACH_ENTRY_SAFE(cq, tmp, &hw->cq_list_head,
267 				 idpf_ctlq_info, cq_list)
268 		idpf_ctlq_remove(hw, cq);
269 }
270 
271 /**
272  * idpf_ctlq_send - send command to Control Queue (CTQ)
273  * @hw: pointer to hw struct
274  * @cq: handle to control queue struct to send on
275  * @num_q_msg: number of messages to send on control queue
276  * @q_msg: pointer to array of queue messages to be sent
277  *
278  * The caller is expected to allocate DMAable buffers and pass them to the
279  * send routine via the q_msg struct / control queue specific data struct.
280  * The control queue will hold a reference to each send message until
281  * the completion for that message has been cleaned.
282  * Since all q_msgs being sent are store in native endianness, these values
283  * must be converted to LE before being written to the hw descriptor.
284  */
idpf_ctlq_send(struct idpf_hw * hw,struct idpf_ctlq_info * cq,u16 num_q_msg,struct idpf_ctlq_msg q_msg[])285 int idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
286 		   u16 num_q_msg, struct idpf_ctlq_msg q_msg[])
287 {
288 	struct idpf_ctlq_desc *desc;
289 	int num_desc_avail;
290 	int err = 0;
291 	int i;
292 
293 	if (!cq || !cq->ring_size)
294 		return -ENOBUFS;
295 
296 	idpf_acquire_lock(&cq->cq_lock);
297 
298 	/* Ensure there are enough descriptors to send all messages */
299 	num_desc_avail = IDPF_CTLQ_DESC_UNUSED(cq);
300 	if (num_desc_avail == 0 || num_desc_avail < num_q_msg) {
301 		err = -ENOSPC;
302 		goto err_unlock;
303 	}
304 
305 	for (i = 0; i < num_q_msg; i++) {
306 		struct idpf_ctlq_msg *msg = &q_msg[i];
307 
308 		desc = IDPF_CTLQ_DESC(cq, cq->next_to_use);
309 
310 		desc->opcode = CPU_TO_LE16(msg->opcode);
311 		desc->pfid_vfid = CPU_TO_LE16(msg->func_id);
312 
313 		desc->cookie_high = CPU_TO_LE32(msg->cookie.mbx.chnl_opcode);
314 		desc->cookie_low = CPU_TO_LE32(msg->cookie.mbx.chnl_retval);
315 
316 		desc->flags = CPU_TO_LE16((msg->host_id & IDPF_HOST_ID_MASK) <<
317 					  IDPF_CTLQ_FLAG_HOST_ID_S);
318 		if (msg->data_len) {
319 			struct idpf_dma_mem *buff = msg->ctx.indirect.payload;
320 
321 			desc->datalen |= CPU_TO_LE16(msg->data_len);
322 			desc->flags |= CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF);
323 			desc->flags |= CPU_TO_LE16(IDPF_CTLQ_FLAG_RD);
324 
325 			/* Update the address values in the desc with the pa
326 			 * value for respective buffer
327 			 */
328 			desc->params.indirect.addr_high =
329 				CPU_TO_LE32(IDPF_HI_DWORD(buff->pa));
330 			desc->params.indirect.addr_low =
331 				CPU_TO_LE32(IDPF_LO_DWORD(buff->pa));
332 
333 			idpf_memcpy(&desc->params, msg->ctx.indirect.context,
334 				    IDPF_INDIRECT_CTX_SIZE, IDPF_NONDMA_TO_DMA);
335 #ifdef SIMICS_BUILD
336 			/* MBX message with opcode idpf_mbq_opc_send_msg_to_pf
337 			 * need to set peer PF function id in param0 for Simics
338 			 */
339 			if (msg->opcode == idpf_mbq_opc_send_msg_to_pf) {
340 				desc->params.indirect.param0 =
341 					CPU_TO_LE32(msg->func_id);
342 			}
343 #endif
344 		} else {
345 			idpf_memcpy(&desc->params, msg->ctx.direct,
346 				    IDPF_DIRECT_CTX_SIZE, IDPF_NONDMA_TO_DMA);
347 #ifdef SIMICS_BUILD
348 			/* MBX message with opcode idpf_mbq_opc_send_msg_to_pf
349 			 * need to set peer PF function id in param0 for Simics
350 			 */
351 			if (msg->opcode == idpf_mbq_opc_send_msg_to_pf) {
352 				desc->params.direct.param0 =
353 					CPU_TO_LE32(msg->func_id);
354 			}
355 #endif
356 		}
357 
358 		/* Store buffer info */
359 		cq->bi.tx_msg[cq->next_to_use] = msg;
360 
361 		(cq->next_to_use)++;
362 		if (cq->next_to_use == cq->ring_size)
363 			cq->next_to_use = 0;
364 	}
365 
366 	/* Force memory write to complete before letting hardware
367 	 * know that there are new descriptors to fetch.
368 	 */
369 	idpf_wmb();
370 
371 	wr32(hw, cq->reg.tail, cq->next_to_use);
372 
373 err_unlock:
374 	idpf_release_lock(&cq->cq_lock);
375 
376 	return err;
377 }
378 
379 /**
380  * __idpf_ctlq_clean_sq - helper function to reclaim descriptors on HW write
381  * back for the requested queue
382  * @cq: pointer to the specific Control queue
383  * @clean_count: (input|output) number of descriptors to clean as input, and
384  * number of descriptors actually cleaned as output
385  * @msg_status: (output) pointer to msg pointer array to be populated; needs
386  * to be allocated by caller
387  * @force: (input) clean descriptors which were not done yet. Use with caution
388  * in kernel mode only
389  *
390  * Returns an array of message pointers associated with the cleaned
391  * descriptors. The pointers are to the original ctlq_msgs sent on the cleaned
392  * descriptors.  The status will be returned for each; any messages that failed
393  * to send will have a non-zero status. The caller is expected to free original
394  * ctlq_msgs and free or reuse the DMA buffers.
395  */
__idpf_ctlq_clean_sq(struct idpf_ctlq_info * cq,u16 * clean_count,struct idpf_ctlq_msg * msg_status[],bool force)396 static int __idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
397 				struct idpf_ctlq_msg *msg_status[], bool force)
398 {
399 	struct idpf_ctlq_desc *desc;
400 	u16 i, num_to_clean;
401 	u16 ntc, desc_err;
402 
403 	if (!cq || !cq->ring_size)
404 		return -ENOBUFS;
405 
406 	if (*clean_count == 0)
407 		return 0;
408 	if (*clean_count > cq->ring_size)
409 		return -EINVAL;
410 
411 	idpf_acquire_lock(&cq->cq_lock);
412 
413 	ntc = cq->next_to_clean;
414 
415 	num_to_clean = *clean_count;
416 
417 	for (i = 0; i < num_to_clean; i++) {
418 		/* Fetch next descriptor and check if marked as done */
419 		desc = IDPF_CTLQ_DESC(cq, ntc);
420 		if (!force && !(LE16_TO_CPU(desc->flags) & IDPF_CTLQ_FLAG_DD))
421 			break;
422 
423 		/* strip off FW internal code */
424 		desc_err = LE16_TO_CPU(desc->ret_val) & 0xff;
425 
426 		msg_status[i] = cq->bi.tx_msg[ntc];
427 		if (!msg_status[i])
428 			break;
429 		msg_status[i]->status = desc_err;
430 
431 		cq->bi.tx_msg[ntc] = NULL;
432 
433 		/* Zero out any stale data */
434 		idpf_memset(desc, 0, sizeof(*desc), IDPF_DMA_MEM);
435 
436 		ntc++;
437 		if (ntc == cq->ring_size)
438 			ntc = 0;
439 	}
440 
441 	cq->next_to_clean = ntc;
442 
443 	idpf_release_lock(&cq->cq_lock);
444 
445 	/* Return number of descriptors actually cleaned */
446 	*clean_count = i;
447 
448 	return 0;
449 }
450 
451 /**
452  * idpf_ctlq_clean_sq_force - reclaim all descriptors on HW write back for the
453  * requested queue. Use only in kernel mode.
454  * @cq: pointer to the specific Control queue
455  * @clean_count: (input|output) number of descriptors to clean as input, and
456  * number of descriptors actually cleaned as output
457  * @msg_status: (output) pointer to msg pointer array to be populated; needs
458  * to be allocated by caller
459  *
460  * Returns an array of message pointers associated with the cleaned
461  * descriptors. The pointers are to the original ctlq_msgs sent on the cleaned
462  * descriptors.  The status will be returned for each; any messages that failed
463  * to send will have a non-zero status. The caller is expected to free original
464  * ctlq_msgs and free or reuse the DMA buffers.
465  */
idpf_ctlq_clean_sq_force(struct idpf_ctlq_info * cq,u16 * clean_count,struct idpf_ctlq_msg * msg_status[])466 int idpf_ctlq_clean_sq_force(struct idpf_ctlq_info *cq, u16 *clean_count,
467 			     struct idpf_ctlq_msg *msg_status[])
468 {
469 	return __idpf_ctlq_clean_sq(cq, clean_count, msg_status, true);
470 }
471 
472 /**
473  * idpf_ctlq_clean_sq - reclaim send descriptors on HW write back for the
474  * requested queue
475  * @cq: pointer to the specific Control queue
476  * @clean_count: (input|output) number of descriptors to clean as input, and
477  * number of descriptors actually cleaned as output
478  * @msg_status: (output) pointer to msg pointer array to be populated; needs
479  * to be allocated by caller
480  *
481  * Returns an array of message pointers associated with the cleaned
482  * descriptors. The pointers are to the original ctlq_msgs sent on the cleaned
483  * descriptors.  The status will be returned for each; any messages that failed
484  * to send will have a non-zero status. The caller is expected to free original
485  * ctlq_msgs and free or reuse the DMA buffers.
486  */
idpf_ctlq_clean_sq(struct idpf_ctlq_info * cq,u16 * clean_count,struct idpf_ctlq_msg * msg_status[])487 int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
488 		       struct idpf_ctlq_msg *msg_status[])
489 {
490 	return __idpf_ctlq_clean_sq(cq, clean_count, msg_status, false);
491 }
492 
493 /**
494  * idpf_ctlq_post_rx_buffs - post buffers to descriptor ring
495  * @hw: pointer to hw struct
496  * @cq: pointer to control queue handle
497  * @buff_count: (input|output) input is number of buffers caller is trying to
498  * return; output is number of buffers that were not posted
499  * @buffs: array of pointers to dma mem structs to be given to hardware
500  *
501  * Caller uses this function to return DMA buffers to the descriptor ring after
502  * consuming them; buff_count will be the number of buffers.
503  *
504  * Note: this function needs to be called after a receive call even
505  * if there are no DMA buffers to be returned, i.e. buff_count = 0,
506  * buffs = NULL to support direct commands
507  */
idpf_ctlq_post_rx_buffs(struct idpf_hw * hw,struct idpf_ctlq_info * cq,u16 * buff_count,struct idpf_dma_mem ** buffs)508 int idpf_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
509 			    u16 *buff_count, struct idpf_dma_mem **buffs)
510 {
511 	struct idpf_ctlq_desc *desc;
512 	u16 ntp = cq->next_to_post;
513 	bool buffs_avail = false;
514 	u16 tbp = ntp + 1;
515 	int i = 0;
516 
517 	if (*buff_count > cq->ring_size)
518 		return -EINVAL;
519 
520 	if (*buff_count > 0)
521 		buffs_avail = true;
522 
523 	idpf_acquire_lock(&cq->cq_lock);
524 
525 	if (tbp >= cq->ring_size)
526 		tbp = 0;
527 
528 	if (tbp == cq->next_to_clean)
529 		/* Nothing to do */
530 		goto post_buffs_out;
531 
532 	/* Post buffers for as many as provided or up until the last one used */
533 	while (ntp != cq->next_to_clean) {
534 		desc = IDPF_CTLQ_DESC(cq, ntp);
535 
536 		if (cq->bi.rx_buff[ntp])
537 			goto fill_desc;
538 		if (!buffs_avail) {
539 			/* If the caller hasn't given us any buffers or
540 			 * there are none left, search the ring itself
541 			 * for an available buffer to move to this
542 			 * entry starting at the next entry in the ring
543 			 */
544 			tbp = ntp + 1;
545 
546 			/* Wrap ring if necessary */
547 			if (tbp >= cq->ring_size)
548 				tbp = 0;
549 
550 			while (tbp != cq->next_to_clean) {
551 				if (cq->bi.rx_buff[tbp]) {
552 					cq->bi.rx_buff[ntp] =
553 						cq->bi.rx_buff[tbp];
554 					cq->bi.rx_buff[tbp] = NULL;
555 
556 					/* Found a buffer, no need to
557 					 * search anymore
558 					 */
559 					break;
560 				}
561 
562 				/* Wrap ring if necessary */
563 				tbp++;
564 				if (tbp >= cq->ring_size)
565 					tbp = 0;
566 			}
567 
568 			if (tbp == cq->next_to_clean)
569 				goto post_buffs_out;
570 		} else {
571 			/* Give back pointer to DMA buffer */
572 			cq->bi.rx_buff[ntp] = buffs[i];
573 			i++;
574 
575 			if (i >= *buff_count)
576 				buffs_avail = false;
577 		}
578 
579 fill_desc:
580 		desc->flags =
581 			CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF | IDPF_CTLQ_FLAG_RD);
582 
583 		/* Post buffers to descriptor */
584 		desc->datalen = CPU_TO_LE16(cq->bi.rx_buff[ntp]->size);
585 		desc->params.indirect.addr_high =
586 			CPU_TO_LE32(IDPF_HI_DWORD(cq->bi.rx_buff[ntp]->pa));
587 		desc->params.indirect.addr_low =
588 			CPU_TO_LE32(IDPF_LO_DWORD(cq->bi.rx_buff[ntp]->pa));
589 
590 		ntp++;
591 		if (ntp == cq->ring_size)
592 			ntp = 0;
593 	}
594 
595 post_buffs_out:
596 	/* Only update tail if buffers were actually posted */
597 	if (cq->next_to_post != ntp) {
598 		if (ntp)
599 			/* Update next_to_post to ntp - 1 since current ntp
600 			 * will not have a buffer
601 			 */
602 			cq->next_to_post = ntp - 1;
603 		else
604 			/* Wrap to end of end ring since current ntp is 0 */
605 			cq->next_to_post = cq->ring_size - 1;
606 
607 		idpf_wmb();
608 
609 		wr32(hw, cq->reg.tail, cq->next_to_post);
610 	}
611 
612 	idpf_release_lock(&cq->cq_lock);
613 
614 	/* return the number of buffers that were not posted */
615 	*buff_count = *buff_count - i;
616 
617 	return 0;
618 }
619 
620 /**
621  * idpf_ctlq_recv - receive control queue message call back
622  * @cq: pointer to control queue handle to receive on
623  * @num_q_msg: (input|output) input number of messages that should be received;
624  * output number of messages actually received
625  * @q_msg: (output) array of received control queue messages on this q;
626  * needs to be pre-allocated by caller for as many messages as requested
627  *
628  * Called by interrupt handler or polling mechanism. Caller is expected
629  * to free buffers
630  */
idpf_ctlq_recv(struct idpf_ctlq_info * cq,u16 * num_q_msg,struct idpf_ctlq_msg * q_msg)631 int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
632 		   struct idpf_ctlq_msg *q_msg)
633 {
634 	u16 num_to_clean, ntc, ret_val, flags;
635 	struct idpf_ctlq_desc *desc;
636 	int err = 0;
637 	u16 i;
638 
639 	if (!cq || !cq->ring_size)
640 		return -ENOBUFS;
641 
642 	if (*num_q_msg == 0)
643 		return 0;
644 	else if (*num_q_msg > cq->ring_size)
645 		return -EINVAL;
646 
647 	/* take the lock before we start messing with the ring */
648 	idpf_acquire_lock(&cq->cq_lock);
649 
650 	ntc = cq->next_to_clean;
651 
652 	num_to_clean = *num_q_msg;
653 
654 	for (i = 0; i < num_to_clean; i++) {
655 		/* Fetch next descriptor and check if marked as done */
656 		desc = IDPF_CTLQ_DESC(cq, ntc);
657 		flags = LE16_TO_CPU(desc->flags);
658 
659 		if (!(flags & IDPF_CTLQ_FLAG_DD))
660 			break;
661 
662 		ret_val = LE16_TO_CPU(desc->ret_val);
663 
664 		q_msg[i].vmvf_type = (flags &
665 				      (IDPF_CTLQ_FLAG_FTYPE_VM |
666 				       IDPF_CTLQ_FLAG_FTYPE_PF)) >>
667 				      IDPF_CTLQ_FLAG_FTYPE_S;
668 
669 		if (flags & IDPF_CTLQ_FLAG_ERR)
670 			err = -EBADMSG;
671 
672 		q_msg[i].cookie.mbx.chnl_opcode = LE32_TO_CPU(desc->cookie_high);
673 		q_msg[i].cookie.mbx.chnl_retval = LE32_TO_CPU(desc->cookie_low);
674 
675 		q_msg[i].opcode = LE16_TO_CPU(desc->opcode);
676 		q_msg[i].data_len = LE16_TO_CPU(desc->datalen);
677 		q_msg[i].status = ret_val;
678 
679 		if (desc->datalen) {
680 			idpf_memcpy(q_msg[i].ctx.indirect.context,
681 				    &desc->params.indirect,
682 				    IDPF_INDIRECT_CTX_SIZE,
683 				    IDPF_DMA_TO_NONDMA);
684 
685 			/* Assign pointer to dma buffer to ctlq_msg array
686 			 * to be given to upper layer
687 			 */
688 			q_msg[i].ctx.indirect.payload = cq->bi.rx_buff[ntc];
689 
690 			/* Zero out pointer to DMA buffer info;
691 			 * will be repopulated by post buffers API
692 			 */
693 			cq->bi.rx_buff[ntc] = NULL;
694 		} else {
695 			idpf_memcpy(q_msg[i].ctx.direct,
696 				    desc->params.raw,
697 				    IDPF_DIRECT_CTX_SIZE,
698 				    IDPF_DMA_TO_NONDMA);
699 		}
700 
701 		/* Zero out stale data in descriptor */
702 		idpf_memset(desc, 0, sizeof(struct idpf_ctlq_desc),
703 			    IDPF_DMA_MEM);
704 
705 		ntc++;
706 		if (ntc == cq->ring_size)
707 			ntc = 0;
708 	};
709 
710 	cq->next_to_clean = ntc;
711 
712 	idpf_release_lock(&cq->cq_lock);
713 
714 	*num_q_msg = i;
715 	if (*num_q_msg == 0)
716 		err = -ENOMSG;
717 
718 	return err;
719 }
720