xref: /dpdk/drivers/common/idpf/base/idpf_controlq.c (revision 02d36ef6a9528e0f4a3403956e66bcea5fadbf8c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2001-2022 Intel Corporation
3  */
4 
5 #include "idpf_controlq.h"
6 
7 /**
8  * idpf_ctlq_setup_regs - initialize control queue registers
9  * @cq: pointer to the specific control queue
10  * @q_create_info: structs containing info for each queue to be initialized
11  */
12 static void
13 idpf_ctlq_setup_regs(struct idpf_ctlq_info *cq,
14 		     struct idpf_ctlq_create_info *q_create_info)
15 {
16 	/* set head and tail registers in our local struct */
17 	cq->reg.head = q_create_info->reg.head;
18 	cq->reg.tail = q_create_info->reg.tail;
19 	cq->reg.len = q_create_info->reg.len;
20 	cq->reg.bah = q_create_info->reg.bah;
21 	cq->reg.bal = q_create_info->reg.bal;
22 	cq->reg.len_mask = q_create_info->reg.len_mask;
23 	cq->reg.len_ena_mask = q_create_info->reg.len_ena_mask;
24 	cq->reg.head_mask = q_create_info->reg.head_mask;
25 }
26 
27 /**
28  * idpf_ctlq_init_regs - Initialize control queue registers
29  * @hw: pointer to hw struct
30  * @cq: pointer to the specific Control queue
31  * @is_rxq: true if receive control queue, false otherwise
32  *
33  * Initialize registers. The caller is expected to have already initialized the
34  * descriptor ring memory and buffer memory
35  */
36 static void idpf_ctlq_init_regs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
37 				bool is_rxq)
38 {
39 	/* Update tail to post pre-allocated buffers for rx queues */
40 	if (is_rxq)
41 		wr32(hw, cq->reg.tail, (u32)(cq->ring_size - 1));
42 
43 	/* For non-Mailbox control queues only TAIL need to be set */
44 	if (cq->q_id != -1)
45 		return;
46 
47 	/* Clear Head for both send or receive */
48 	wr32(hw, cq->reg.head, 0);
49 
50 	/* set starting point */
51 	wr32(hw, cq->reg.bal, IDPF_LO_DWORD(cq->desc_ring.pa));
52 	wr32(hw, cq->reg.bah, IDPF_HI_DWORD(cq->desc_ring.pa));
53 	wr32(hw, cq->reg.len, (cq->ring_size | cq->reg.len_ena_mask));
54 }
55 
56 /**
57  * idpf_ctlq_init_rxq_bufs - populate receive queue descriptors with buf
58  * @cq: pointer to the specific Control queue
59  *
60  * Record the address of the receive queue DMA buffers in the descriptors.
61  * The buffers must have been previously allocated.
62  */
63 static void idpf_ctlq_init_rxq_bufs(struct idpf_ctlq_info *cq)
64 {
65 	int i = 0;
66 
67 	for (i = 0; i < cq->ring_size; i++) {
68 		struct idpf_ctlq_desc *desc = IDPF_CTLQ_DESC(cq, i);
69 		struct idpf_dma_mem *bi = cq->bi.rx_buff[i];
70 
71 		/* No buffer to post to descriptor, continue */
72 		if (!bi)
73 			continue;
74 
75 		desc->flags =
76 			CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF | IDPF_CTLQ_FLAG_RD);
77 		desc->opcode = 0;
78 		desc->datalen = (__le16)CPU_TO_LE16(bi->size);
79 		desc->ret_val = 0;
80 		desc->cookie_high = 0;
81 		desc->cookie_low = 0;
82 		desc->params.indirect.addr_high =
83 			CPU_TO_LE32(IDPF_HI_DWORD(bi->pa));
84 		desc->params.indirect.addr_low =
85 			CPU_TO_LE32(IDPF_LO_DWORD(bi->pa));
86 		desc->params.indirect.param0 = 0;
87 		desc->params.indirect.param1 = 0;
88 	}
89 }
90 
91 /**
92  * idpf_ctlq_shutdown - shutdown the CQ
93  * @hw: pointer to hw struct
94  * @cq: pointer to the specific Control queue
95  *
96  * The main shutdown routine for any controq queue
97  */
98 static void idpf_ctlq_shutdown(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
99 {
100 	idpf_acquire_lock(&cq->cq_lock);
101 
102 	if (!cq->ring_size)
103 		goto shutdown_sq_out;
104 
105 #ifdef SIMICS_BUILD
106 	wr32(hw, cq->reg.head, 0);
107 	wr32(hw, cq->reg.tail, 0);
108 	wr32(hw, cq->reg.len, 0);
109 	wr32(hw, cq->reg.bal, 0);
110 	wr32(hw, cq->reg.bah, 0);
111 #endif /* SIMICS_BUILD */
112 
113 	/* free ring buffers and the ring itself */
114 	idpf_ctlq_dealloc_ring_res(hw, cq);
115 
116 	/* Set ring_size to 0 to indicate uninitialized queue */
117 	cq->ring_size = 0;
118 
119 shutdown_sq_out:
120 	idpf_release_lock(&cq->cq_lock);
121 	idpf_destroy_lock(&cq->cq_lock);
122 }
123 
124 /**
125  * idpf_ctlq_add - add one control queue
126  * @hw: pointer to hardware struct
127  * @qinfo: info for queue to be created
128  * @cq_out: (output) double pointer to control queue to be created
129  *
130  * Allocate and initialize a control queue and add it to the control queue list.
131  * The cq parameter will be allocated/initialized and passed back to the caller
132  * if no errors occur.
133  *
134  * Note: idpf_ctlq_init must be called prior to any calls to idpf_ctlq_add
135  */
136 int idpf_ctlq_add(struct idpf_hw *hw,
137 		  struct idpf_ctlq_create_info *qinfo,
138 		  struct idpf_ctlq_info **cq_out)
139 {
140 	bool is_rxq = false;
141 	int status = 0;
142 
143 	if (!qinfo->len || !qinfo->buf_size ||
144 	    qinfo->len > IDPF_CTLQ_MAX_RING_SIZE ||
145 	    qinfo->buf_size > IDPF_CTLQ_MAX_BUF_LEN)
146 		return -EINVAL;
147 
148 	*cq_out = (struct idpf_ctlq_info *)
149 		idpf_calloc(hw, 1, sizeof(struct idpf_ctlq_info));
150 	if (!(*cq_out))
151 		return -ENOMEM;
152 
153 	(*cq_out)->cq_type = qinfo->type;
154 	(*cq_out)->q_id = qinfo->id;
155 	(*cq_out)->buf_size = qinfo->buf_size;
156 	(*cq_out)->ring_size = qinfo->len;
157 
158 	(*cq_out)->next_to_use = 0;
159 	(*cq_out)->next_to_clean = 0;
160 	(*cq_out)->next_to_post = (*cq_out)->ring_size - 1;
161 
162 	switch (qinfo->type) {
163 	case IDPF_CTLQ_TYPE_MAILBOX_RX:
164 		is_rxq = true;
165 #ifdef __KERNEL__
166 		fallthrough;
167 #else
168 		/* fallthrough */
169 #endif /* __KERNEL__ */
170 	case IDPF_CTLQ_TYPE_MAILBOX_TX:
171 		status = idpf_ctlq_alloc_ring_res(hw, *cq_out);
172 		break;
173 	default:
174 		status = -EINVAL;
175 		break;
176 	}
177 
178 	if (status)
179 		goto init_free_q;
180 
181 	if (is_rxq) {
182 		idpf_ctlq_init_rxq_bufs(*cq_out);
183 	} else {
184 		/* Allocate the array of msg pointers for TX queues */
185 		(*cq_out)->bi.tx_msg = (struct idpf_ctlq_msg **)
186 			idpf_calloc(hw, qinfo->len,
187 				    sizeof(struct idpf_ctlq_msg *));
188 		if (!(*cq_out)->bi.tx_msg) {
189 			status = -ENOMEM;
190 			goto init_dealloc_q_mem;
191 		}
192 	}
193 
194 	idpf_ctlq_setup_regs(*cq_out, qinfo);
195 
196 	idpf_ctlq_init_regs(hw, *cq_out, is_rxq);
197 
198 	idpf_init_lock(&(*cq_out)->cq_lock);
199 
200 	LIST_INSERT_HEAD(&hw->cq_list_head, (*cq_out), cq_list);
201 
202 	return status;
203 
204 init_dealloc_q_mem:
205 	/* free ring buffers and the ring itself */
206 	idpf_ctlq_dealloc_ring_res(hw, *cq_out);
207 init_free_q:
208 	idpf_free(hw, *cq_out);
209 
210 	return status;
211 }
212 
213 /**
214  * idpf_ctlq_remove - deallocate and remove specified control queue
215  * @hw: pointer to hardware struct
216  * @cq: pointer to control queue to be removed
217  */
218 void idpf_ctlq_remove(struct idpf_hw *hw,
219 		      struct idpf_ctlq_info *cq)
220 {
221 	LIST_REMOVE(cq, cq_list);
222 	idpf_ctlq_shutdown(hw, cq);
223 	idpf_free(hw, cq);
224 }
225 
226 /**
227  * idpf_ctlq_init - main initialization routine for all control queues
228  * @hw: pointer to hardware struct
229  * @num_q: number of queues to initialize
230  * @q_info: array of structs containing info for each queue to be initialized
231  *
232  * This initializes any number and any type of control queues. This is an all
233  * or nothing routine; if one fails, all previously allocated queues will be
234  * destroyed. This must be called prior to using the individual add/remove
235  * APIs.
236  */
237 int idpf_ctlq_init(struct idpf_hw *hw, u8 num_q,
238 		   struct idpf_ctlq_create_info *q_info)
239 {
240 	struct idpf_ctlq_info *cq = NULL, *tmp = NULL;
241 	int ret_code = 0;
242 	int i = 0;
243 
244 	LIST_INIT(&hw->cq_list_head);
245 
246 	for (i = 0; i < num_q; i++) {
247 		struct idpf_ctlq_create_info *qinfo = q_info + i;
248 
249 		ret_code = idpf_ctlq_add(hw, qinfo, &cq);
250 		if (ret_code)
251 			goto init_destroy_qs;
252 	}
253 
254 	return ret_code;
255 
256 init_destroy_qs:
257 	LIST_FOR_EACH_ENTRY_SAFE(cq, tmp, &hw->cq_list_head,
258 				 idpf_ctlq_info, cq_list)
259 		idpf_ctlq_remove(hw, cq);
260 
261 	return ret_code;
262 }
263 
264 /**
265  * idpf_ctlq_deinit - destroy all control queues
266  * @hw: pointer to hw struct
267  */
268 int idpf_ctlq_deinit(struct idpf_hw *hw)
269 {
270 	struct idpf_ctlq_info *cq = NULL, *tmp = NULL;
271 	int ret_code = 0;
272 
273 	LIST_FOR_EACH_ENTRY_SAFE(cq, tmp, &hw->cq_list_head,
274 				 idpf_ctlq_info, cq_list)
275 		idpf_ctlq_remove(hw, cq);
276 
277 	return ret_code;
278 }
279 
280 /**
281  * idpf_ctlq_send - send command to Control Queue (CTQ)
282  * @hw: pointer to hw struct
283  * @cq: handle to control queue struct to send on
284  * @num_q_msg: number of messages to send on control queue
285  * @q_msg: pointer to array of queue messages to be sent
286  *
287  * The caller is expected to allocate DMAable buffers and pass them to the
288  * send routine via the q_msg struct / control queue specific data struct.
289  * The control queue will hold a reference to each send message until
290  * the completion for that message has been cleaned.
291  */
292 int idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
293 		   u16 num_q_msg, struct idpf_ctlq_msg q_msg[])
294 {
295 	struct idpf_ctlq_desc *desc;
296 	int num_desc_avail = 0;
297 	int status = 0;
298 	int i = 0;
299 
300 	if (!cq || !cq->ring_size)
301 		return -ENOBUFS;
302 
303 	idpf_acquire_lock(&cq->cq_lock);
304 
305 	/* Ensure there are enough descriptors to send all messages */
306 	num_desc_avail = IDPF_CTLQ_DESC_UNUSED(cq);
307 	if (num_desc_avail == 0 || num_desc_avail < num_q_msg) {
308 		status = -ENOSPC;
309 		goto sq_send_command_out;
310 	}
311 
312 	for (i = 0; i < num_q_msg; i++) {
313 		struct idpf_ctlq_msg *msg = &q_msg[i];
314 		u64 msg_cookie;
315 
316 		desc = IDPF_CTLQ_DESC(cq, cq->next_to_use);
317 
318 		desc->opcode = CPU_TO_LE16(msg->opcode);
319 		desc->pfid_vfid = CPU_TO_LE16(msg->func_id);
320 
321 		msg_cookie = *(u64 *)&msg->cookie;
322 		desc->cookie_high =
323 			CPU_TO_LE32(IDPF_HI_DWORD(msg_cookie));
324 		desc->cookie_low =
325 			CPU_TO_LE32(IDPF_LO_DWORD(msg_cookie));
326 
327 		desc->flags = CPU_TO_LE16((msg->host_id & IDPF_HOST_ID_MASK) <<
328 					  IDPF_CTLQ_FLAG_HOST_ID_S);
329 		if (msg->data_len) {
330 			struct idpf_dma_mem *buff = msg->ctx.indirect.payload;
331 
332 			desc->datalen |= CPU_TO_LE16(msg->data_len);
333 			desc->flags |= CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF);
334 			desc->flags |= CPU_TO_LE16(IDPF_CTLQ_FLAG_RD);
335 
336 			/* Update the address values in the desc with the pa
337 			 * value for respective buffer
338 			 */
339 			desc->params.indirect.addr_high =
340 				CPU_TO_LE32(IDPF_HI_DWORD(buff->pa));
341 			desc->params.indirect.addr_low =
342 				CPU_TO_LE32(IDPF_LO_DWORD(buff->pa));
343 
344 			idpf_memcpy(&desc->params, msg->ctx.indirect.context,
345 				    IDPF_INDIRECT_CTX_SIZE, IDPF_NONDMA_TO_DMA);
346 #ifdef SIMICS_BUILD
347 			/* MBX message with opcode idpf_mbq_opc_send_msg_to_pf
348 			 * need to set peer PF function id in param0 for Simics
349 			 */
350 			if (msg->opcode == idpf_mbq_opc_send_msg_to_pf) {
351 				desc->params.indirect.param0 =
352 					CPU_TO_LE32(msg->func_id);
353 			}
354 #endif
355 		} else {
356 			idpf_memcpy(&desc->params, msg->ctx.direct,
357 				    IDPF_DIRECT_CTX_SIZE, IDPF_NONDMA_TO_DMA);
358 #ifdef SIMICS_BUILD
359 			/* MBX message with opcode idpf_mbq_opc_send_msg_to_pf
360 			 * need to set peer PF function id in param0 for Simics
361 			 */
362 			if (msg->opcode == idpf_mbq_opc_send_msg_to_pf) {
363 				desc->params.direct.param0 =
364 					CPU_TO_LE32(msg->func_id);
365 			}
366 #endif
367 		}
368 
369 		/* Store buffer info */
370 		cq->bi.tx_msg[cq->next_to_use] = msg;
371 
372 		(cq->next_to_use)++;
373 		if (cq->next_to_use == cq->ring_size)
374 			cq->next_to_use = 0;
375 	}
376 
377 	/* Force memory write to complete before letting hardware
378 	 * know that there are new descriptors to fetch.
379 	 */
380 	idpf_wmb();
381 
382 	wr32(hw, cq->reg.tail, cq->next_to_use);
383 
384 sq_send_command_out:
385 	idpf_release_lock(&cq->cq_lock);
386 
387 	return status;
388 }
389 
390 /**
391  * idpf_ctlq_clean_sq - reclaim send descriptors on HW write back for the
392  * requested queue
393  * @cq: pointer to the specific Control queue
394  * @clean_count: (input|output) number of descriptors to clean as input, and
395  * number of descriptors actually cleaned as output
396  * @msg_status: (output) pointer to msg pointer array to be populated; needs
397  * to be allocated by caller
398  *
399  * Returns an array of message pointers associated with the cleaned
400  * descriptors. The pointers are to the original ctlq_msgs sent on the cleaned
401  * descriptors.  The status will be returned for each; any messages that failed
402  * to send will have a non-zero status. The caller is expected to free original
403  * ctlq_msgs and free or reuse the DMA buffers.
404  */
405 int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
406 		       struct idpf_ctlq_msg *msg_status[])
407 {
408 	struct idpf_ctlq_desc *desc;
409 	u16 i = 0, num_to_clean;
410 	u16 ntc, desc_err;
411 	int ret = 0;
412 
413 	if (!cq || !cq->ring_size)
414 		return -ENOBUFS;
415 
416 	if (*clean_count == 0)
417 		return 0;
418 	if (*clean_count > cq->ring_size)
419 		return -EINVAL;
420 
421 	idpf_acquire_lock(&cq->cq_lock);
422 
423 	ntc = cq->next_to_clean;
424 
425 	num_to_clean = *clean_count;
426 
427 	for (i = 0; i < num_to_clean; i++) {
428 		/* Fetch next descriptor and check if marked as done */
429 		desc = IDPF_CTLQ_DESC(cq, ntc);
430 		if (!(LE16_TO_CPU(desc->flags) & IDPF_CTLQ_FLAG_DD))
431 			break;
432 
433 		desc_err = LE16_TO_CPU(desc->ret_val);
434 		if (desc_err) {
435 			/* strip off FW internal code */
436 			desc_err &= 0xff;
437 		}
438 
439 		msg_status[i] = cq->bi.tx_msg[ntc];
440 		msg_status[i]->status = desc_err;
441 
442 		cq->bi.tx_msg[ntc] = NULL;
443 
444 		/* Zero out any stale data */
445 		idpf_memset(desc, 0, sizeof(*desc), IDPF_DMA_MEM);
446 
447 		ntc++;
448 		if (ntc == cq->ring_size)
449 			ntc = 0;
450 	}
451 
452 	cq->next_to_clean = ntc;
453 
454 	idpf_release_lock(&cq->cq_lock);
455 
456 	/* Return number of descriptors actually cleaned */
457 	*clean_count = i;
458 
459 	return ret;
460 }
461 
462 /**
463  * idpf_ctlq_post_rx_buffs - post buffers to descriptor ring
464  * @hw: pointer to hw struct
465  * @cq: pointer to control queue handle
466  * @buff_count: (input|output) input is number of buffers caller is trying to
467  * return; output is number of buffers that were not posted
468  * @buffs: array of pointers to dma mem structs to be given to hardware
469  *
470  * Caller uses this function to return DMA buffers to the descriptor ring after
471  * consuming them; buff_count will be the number of buffers.
472  *
473  * Note: this function needs to be called after a receive call even
474  * if there are no DMA buffers to be returned, i.e. buff_count = 0,
475  * buffs = NULL to support direct commands
476  */
477 int idpf_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
478 			    u16 *buff_count, struct idpf_dma_mem **buffs)
479 {
480 	struct idpf_ctlq_desc *desc;
481 	u16 ntp = cq->next_to_post;
482 	bool buffs_avail = false;
483 	u16 tbp = ntp + 1;
484 	int status = 0;
485 	int i = 0;
486 
487 	if (*buff_count > cq->ring_size)
488 		return -EINVAL;
489 
490 	if (*buff_count > 0)
491 		buffs_avail = true;
492 
493 	idpf_acquire_lock(&cq->cq_lock);
494 
495 	if (tbp >= cq->ring_size)
496 		tbp = 0;
497 
498 	if (tbp == cq->next_to_clean)
499 		/* Nothing to do */
500 		goto post_buffs_out;
501 
502 	/* Post buffers for as many as provided or up until the last one used */
503 	while (ntp != cq->next_to_clean) {
504 		desc = IDPF_CTLQ_DESC(cq, ntp);
505 
506 		if (cq->bi.rx_buff[ntp])
507 			goto fill_desc;
508 		if (!buffs_avail) {
509 			/* If the caller hasn't given us any buffers or
510 			 * there are none left, search the ring itself
511 			 * for an available buffer to move to this
512 			 * entry starting at the next entry in the ring
513 			 */
514 			tbp = ntp + 1;
515 
516 			/* Wrap ring if necessary */
517 			if (tbp >= cq->ring_size)
518 				tbp = 0;
519 
520 			while (tbp != cq->next_to_clean) {
521 				if (cq->bi.rx_buff[tbp]) {
522 					cq->bi.rx_buff[ntp] =
523 						cq->bi.rx_buff[tbp];
524 					cq->bi.rx_buff[tbp] = NULL;
525 
526 					/* Found a buffer, no need to
527 					 * search anymore
528 					 */
529 					break;
530 				}
531 
532 				/* Wrap ring if necessary */
533 				tbp++;
534 				if (tbp >= cq->ring_size)
535 					tbp = 0;
536 			}
537 
538 			if (tbp == cq->next_to_clean)
539 				goto post_buffs_out;
540 		} else {
541 			/* Give back pointer to DMA buffer */
542 			cq->bi.rx_buff[ntp] = buffs[i];
543 			i++;
544 
545 			if (i >= *buff_count)
546 				buffs_avail = false;
547 		}
548 
549 fill_desc:
550 		desc->flags =
551 			CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF | IDPF_CTLQ_FLAG_RD);
552 
553 		/* Post buffers to descriptor */
554 		desc->datalen = CPU_TO_LE16(cq->bi.rx_buff[ntp]->size);
555 		desc->params.indirect.addr_high =
556 			CPU_TO_LE32(IDPF_HI_DWORD(cq->bi.rx_buff[ntp]->pa));
557 		desc->params.indirect.addr_low =
558 			CPU_TO_LE32(IDPF_LO_DWORD(cq->bi.rx_buff[ntp]->pa));
559 
560 		ntp++;
561 		if (ntp == cq->ring_size)
562 			ntp = 0;
563 	}
564 
565 post_buffs_out:
566 	/* Only update tail if buffers were actually posted */
567 	if (cq->next_to_post != ntp) {
568 		if (ntp)
569 			/* Update next_to_post to ntp - 1 since current ntp
570 			 * will not have a buffer
571 			 */
572 			cq->next_to_post = ntp - 1;
573 		else
574 			/* Wrap to end of end ring since current ntp is 0 */
575 			cq->next_to_post = cq->ring_size - 1;
576 
577 		wr32(hw, cq->reg.tail, cq->next_to_post);
578 	}
579 
580 	idpf_release_lock(&cq->cq_lock);
581 
582 	/* return the number of buffers that were not posted */
583 	*buff_count = *buff_count - i;
584 
585 	return status;
586 }
587 
588 /**
589  * idpf_ctlq_recv - receive control queue message call back
590  * @cq: pointer to control queue handle to receive on
591  * @num_q_msg: (input|output) input number of messages that should be received;
592  * output number of messages actually received
593  * @q_msg: (output) array of received control queue messages on this q;
594  * needs to be pre-allocated by caller for as many messages as requested
595  *
596  * Called by interrupt handler or polling mechanism. Caller is expected
597  * to free buffers
598  */
599 int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
600 		   struct idpf_ctlq_msg *q_msg)
601 {
602 	u16 num_to_clean, ntc, ret_val, flags;
603 	struct idpf_ctlq_desc *desc;
604 	int ret_code = 0;
605 	u16 i = 0;
606 
607 	if (!cq || !cq->ring_size)
608 		return -ENOBUFS;
609 
610 	if (*num_q_msg == 0)
611 		return 0;
612 	else if (*num_q_msg > cq->ring_size)
613 		return -EINVAL;
614 
615 	/* take the lock before we start messing with the ring */
616 	idpf_acquire_lock(&cq->cq_lock);
617 
618 	ntc = cq->next_to_clean;
619 
620 	num_to_clean = *num_q_msg;
621 
622 	for (i = 0; i < num_to_clean; i++) {
623 		u64 msg_cookie;
624 
625 		/* Fetch next descriptor and check if marked as done */
626 		desc = IDPF_CTLQ_DESC(cq, ntc);
627 		flags = LE16_TO_CPU(desc->flags);
628 
629 		if (!(flags & IDPF_CTLQ_FLAG_DD))
630 			break;
631 
632 		ret_val = LE16_TO_CPU(desc->ret_val);
633 
634 		q_msg[i].vmvf_type = (flags &
635 				      (IDPF_CTLQ_FLAG_FTYPE_VM |
636 				       IDPF_CTLQ_FLAG_FTYPE_PF)) >>
637 				      IDPF_CTLQ_FLAG_FTYPE_S;
638 
639 		if (flags & IDPF_CTLQ_FLAG_ERR)
640 			ret_code = -EBADMSG;
641 
642 		msg_cookie = (u64)LE32_TO_CPU(desc->cookie_high) << 32;
643 		msg_cookie |= (u64)LE32_TO_CPU(desc->cookie_low);
644 		idpf_memcpy(&q_msg[i].cookie, &msg_cookie, sizeof(u64),
645 			    IDPF_NONDMA_TO_NONDMA);
646 
647 		q_msg[i].opcode = LE16_TO_CPU(desc->opcode);
648 		q_msg[i].data_len = LE16_TO_CPU(desc->datalen);
649 		q_msg[i].status = ret_val;
650 
651 		if (desc->datalen) {
652 			idpf_memcpy(q_msg[i].ctx.indirect.context,
653 				    &desc->params.indirect,
654 				    IDPF_INDIRECT_CTX_SIZE,
655 				    IDPF_DMA_TO_NONDMA);
656 
657 			/* Assign pointer to dma buffer to ctlq_msg array
658 			 * to be given to upper layer
659 			 */
660 			q_msg[i].ctx.indirect.payload = cq->bi.rx_buff[ntc];
661 
662 			/* Zero out pointer to DMA buffer info;
663 			 * will be repopulated by post buffers API
664 			 */
665 			cq->bi.rx_buff[ntc] = NULL;
666 		} else {
667 			idpf_memcpy(q_msg[i].ctx.direct,
668 				    desc->params.raw,
669 				    IDPF_DIRECT_CTX_SIZE,
670 				    IDPF_DMA_TO_NONDMA);
671 		}
672 
673 		/* Zero out stale data in descriptor */
674 		idpf_memset(desc, 0, sizeof(struct idpf_ctlq_desc),
675 			    IDPF_DMA_MEM);
676 
677 		ntc++;
678 		if (ntc == cq->ring_size)
679 			ntc = 0;
680 	};
681 
682 	cq->next_to_clean = ntc;
683 
684 	idpf_release_lock(&cq->cq_lock);
685 
686 	*num_q_msg = i;
687 	if (*num_q_msg == 0)
688 		ret_code = -ENOMSG;
689 
690 	return ret_code;
691 }
692