xref: /dpdk/drivers/common/idpf/base/idpf_controlq.c (revision d7c660d4714172f7c3aca959782a34e536206bce)
1fb4ac04eSJunfeng Guo /* SPDX-License-Identifier: BSD-3-Clause
26b35be99SWenjing Qiao  * Copyright(c) 2001-2023 Intel Corporation
3fb4ac04eSJunfeng Guo  */
4fb4ac04eSJunfeng Guo 
5fb4ac04eSJunfeng Guo #include "idpf_controlq.h"
6fb4ac04eSJunfeng Guo 
7fb4ac04eSJunfeng Guo /**
8fb4ac04eSJunfeng Guo  * idpf_ctlq_setup_regs - initialize control queue registers
9fb4ac04eSJunfeng Guo  * @cq: pointer to the specific control queue
10fb4ac04eSJunfeng Guo  * @q_create_info: structs containing info for each queue to be initialized
11fb4ac04eSJunfeng Guo  */
idpf_ctlq_setup_regs(struct idpf_ctlq_info * cq,struct idpf_ctlq_create_info * q_create_info)12a97fb92cSSimei Su static void idpf_ctlq_setup_regs(struct idpf_ctlq_info *cq,
13fb4ac04eSJunfeng Guo 				 struct idpf_ctlq_create_info *q_create_info)
14fb4ac04eSJunfeng Guo {
15a97fb92cSSimei Su 	/* set control queue registers in our local struct */
16fb4ac04eSJunfeng Guo 	cq->reg.head = q_create_info->reg.head;
17fb4ac04eSJunfeng Guo 	cq->reg.tail = q_create_info->reg.tail;
18fb4ac04eSJunfeng Guo 	cq->reg.len = q_create_info->reg.len;
19fb4ac04eSJunfeng Guo 	cq->reg.bah = q_create_info->reg.bah;
20fb4ac04eSJunfeng Guo 	cq->reg.bal = q_create_info->reg.bal;
21fb4ac04eSJunfeng Guo 	cq->reg.len_mask = q_create_info->reg.len_mask;
22fb4ac04eSJunfeng Guo 	cq->reg.len_ena_mask = q_create_info->reg.len_ena_mask;
23fb4ac04eSJunfeng Guo 	cq->reg.head_mask = q_create_info->reg.head_mask;
24fb4ac04eSJunfeng Guo }
25fb4ac04eSJunfeng Guo 
26fb4ac04eSJunfeng Guo /**
27fb4ac04eSJunfeng Guo  * idpf_ctlq_init_regs - Initialize control queue registers
28fb4ac04eSJunfeng Guo  * @hw: pointer to hw struct
29fb4ac04eSJunfeng Guo  * @cq: pointer to the specific Control queue
30fb4ac04eSJunfeng Guo  * @is_rxq: true if receive control queue, false otherwise
31fb4ac04eSJunfeng Guo  *
32fb4ac04eSJunfeng Guo  * Initialize registers. The caller is expected to have already initialized the
33fb4ac04eSJunfeng Guo  * descriptor ring memory and buffer memory
34fb4ac04eSJunfeng Guo  */
idpf_ctlq_init_regs(struct idpf_hw * hw,struct idpf_ctlq_info * cq,bool is_rxq)35fb4ac04eSJunfeng Guo static void idpf_ctlq_init_regs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
36fb4ac04eSJunfeng Guo 				bool is_rxq)
37fb4ac04eSJunfeng Guo {
38fb4ac04eSJunfeng Guo 	/* Update tail to post pre-allocated buffers for rx queues */
39fb4ac04eSJunfeng Guo 	if (is_rxq)
40fb4ac04eSJunfeng Guo 		wr32(hw, cq->reg.tail, (u32)(cq->ring_size - 1));
41fb4ac04eSJunfeng Guo 
42fb4ac04eSJunfeng Guo 	/* For non-Mailbox control queues only TAIL need to be set */
43fb4ac04eSJunfeng Guo 	if (cq->q_id != -1)
44fb4ac04eSJunfeng Guo 		return;
45fb4ac04eSJunfeng Guo 
46fb4ac04eSJunfeng Guo 	/* Clear Head for both send or receive */
47fb4ac04eSJunfeng Guo 	wr32(hw, cq->reg.head, 0);
48fb4ac04eSJunfeng Guo 
49fb4ac04eSJunfeng Guo 	/* set starting point */
50fb4ac04eSJunfeng Guo 	wr32(hw, cq->reg.bal, IDPF_LO_DWORD(cq->desc_ring.pa));
51fb4ac04eSJunfeng Guo 	wr32(hw, cq->reg.bah, IDPF_HI_DWORD(cq->desc_ring.pa));
52fb4ac04eSJunfeng Guo 	wr32(hw, cq->reg.len, (cq->ring_size | cq->reg.len_ena_mask));
53fb4ac04eSJunfeng Guo }
54fb4ac04eSJunfeng Guo 
55fb4ac04eSJunfeng Guo /**
56fb4ac04eSJunfeng Guo  * idpf_ctlq_init_rxq_bufs - populate receive queue descriptors with buf
57fb4ac04eSJunfeng Guo  * @cq: pointer to the specific Control queue
58fb4ac04eSJunfeng Guo  *
59fb4ac04eSJunfeng Guo  * Record the address of the receive queue DMA buffers in the descriptors.
60fb4ac04eSJunfeng Guo  * The buffers must have been previously allocated.
61fb4ac04eSJunfeng Guo  */
idpf_ctlq_init_rxq_bufs(struct idpf_ctlq_info * cq)62fb4ac04eSJunfeng Guo static void idpf_ctlq_init_rxq_bufs(struct idpf_ctlq_info *cq)
63fb4ac04eSJunfeng Guo {
64fb2f9d92SSoumyadeep Hore 	int i;
65fb4ac04eSJunfeng Guo 
66fb4ac04eSJunfeng Guo 	for (i = 0; i < cq->ring_size; i++) {
67fb4ac04eSJunfeng Guo 		struct idpf_ctlq_desc *desc = IDPF_CTLQ_DESC(cq, i);
68fb4ac04eSJunfeng Guo 		struct idpf_dma_mem *bi = cq->bi.rx_buff[i];
69fb4ac04eSJunfeng Guo 
70fb4ac04eSJunfeng Guo 		/* No buffer to post to descriptor, continue */
71fb4ac04eSJunfeng Guo 		if (!bi)
72fb4ac04eSJunfeng Guo 			continue;
73fb4ac04eSJunfeng Guo 
74fb4ac04eSJunfeng Guo 		desc->flags =
75fb4ac04eSJunfeng Guo 			CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF | IDPF_CTLQ_FLAG_RD);
76fb4ac04eSJunfeng Guo 		desc->opcode = 0;
77a97fb92cSSimei Su 		desc->datalen = CPU_TO_LE16(bi->size);
78fb4ac04eSJunfeng Guo 		desc->ret_val = 0;
79fb4ac04eSJunfeng Guo 		desc->cookie_high = 0;
80fb4ac04eSJunfeng Guo 		desc->cookie_low = 0;
81fb4ac04eSJunfeng Guo 		desc->params.indirect.addr_high =
82fb4ac04eSJunfeng Guo 			CPU_TO_LE32(IDPF_HI_DWORD(bi->pa));
83fb4ac04eSJunfeng Guo 		desc->params.indirect.addr_low =
84fb4ac04eSJunfeng Guo 			CPU_TO_LE32(IDPF_LO_DWORD(bi->pa));
85fb4ac04eSJunfeng Guo 		desc->params.indirect.param0 = 0;
86fb4ac04eSJunfeng Guo 		desc->params.indirect.param1 = 0;
87fb4ac04eSJunfeng Guo 	}
88fb4ac04eSJunfeng Guo }
89fb4ac04eSJunfeng Guo 
90fb4ac04eSJunfeng Guo /**
91fb4ac04eSJunfeng Guo  * idpf_ctlq_shutdown - shutdown the CQ
92fb4ac04eSJunfeng Guo  * @hw: pointer to hw struct
93fb4ac04eSJunfeng Guo  * @cq: pointer to the specific Control queue
94fb4ac04eSJunfeng Guo  *
95fb4ac04eSJunfeng Guo  * The main shutdown routine for any controq queue
96fb4ac04eSJunfeng Guo  */
idpf_ctlq_shutdown(struct idpf_hw * hw,struct idpf_ctlq_info * cq)97fb4ac04eSJunfeng Guo static void idpf_ctlq_shutdown(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
98fb4ac04eSJunfeng Guo {
99fb4ac04eSJunfeng Guo 	idpf_acquire_lock(&cq->cq_lock);
100fb4ac04eSJunfeng Guo 
101fb4ac04eSJunfeng Guo #ifdef SIMICS_BUILD
102fb4ac04eSJunfeng Guo 	wr32(hw, cq->reg.head, 0);
103fb4ac04eSJunfeng Guo 	wr32(hw, cq->reg.tail, 0);
104fb4ac04eSJunfeng Guo 	wr32(hw, cq->reg.len, 0);
105fb4ac04eSJunfeng Guo 	wr32(hw, cq->reg.bal, 0);
106fb4ac04eSJunfeng Guo 	wr32(hw, cq->reg.bah, 0);
107fb4ac04eSJunfeng Guo #endif /* SIMICS_BUILD */
108fb4ac04eSJunfeng Guo 
109fb4ac04eSJunfeng Guo 	/* free ring buffers and the ring itself */
110fb4ac04eSJunfeng Guo 	idpf_ctlq_dealloc_ring_res(hw, cq);
111fb4ac04eSJunfeng Guo 
112fb4ac04eSJunfeng Guo 	/* Set ring_size to 0 to indicate uninitialized queue */
113fb4ac04eSJunfeng Guo 	cq->ring_size = 0;
114fb4ac04eSJunfeng Guo 
115fb4ac04eSJunfeng Guo 	idpf_release_lock(&cq->cq_lock);
116fb4ac04eSJunfeng Guo 	idpf_destroy_lock(&cq->cq_lock);
117fb4ac04eSJunfeng Guo }
118fb4ac04eSJunfeng Guo 
119fb4ac04eSJunfeng Guo /**
120fb4ac04eSJunfeng Guo  * idpf_ctlq_add - add one control queue
121fb4ac04eSJunfeng Guo  * @hw: pointer to hardware struct
122fb4ac04eSJunfeng Guo  * @qinfo: info for queue to be created
123fb4ac04eSJunfeng Guo  * @cq_out: (output) double pointer to control queue to be created
124fb4ac04eSJunfeng Guo  *
125fb4ac04eSJunfeng Guo  * Allocate and initialize a control queue and add it to the control queue list.
126fb4ac04eSJunfeng Guo  * The cq parameter will be allocated/initialized and passed back to the caller
127fb4ac04eSJunfeng Guo  * if no errors occur.
128fb4ac04eSJunfeng Guo  *
129fb4ac04eSJunfeng Guo  * Note: idpf_ctlq_init must be called prior to any calls to idpf_ctlq_add
130fb4ac04eSJunfeng Guo  */
idpf_ctlq_add(struct idpf_hw * hw,struct idpf_ctlq_create_info * qinfo,struct idpf_ctlq_info ** cq_out)131fb4ac04eSJunfeng Guo int idpf_ctlq_add(struct idpf_hw *hw,
132fb4ac04eSJunfeng Guo 		  struct idpf_ctlq_create_info *qinfo,
133fb4ac04eSJunfeng Guo 		  struct idpf_ctlq_info **cq_out)
134fb4ac04eSJunfeng Guo {
135a97fb92cSSimei Su 	struct idpf_ctlq_info *cq;
136fb4ac04eSJunfeng Guo 	bool is_rxq = false;
137fb2f9d92SSoumyadeep Hore 	int err;
138fb4ac04eSJunfeng Guo 
139fb4ac04eSJunfeng Guo 	if (!qinfo->len || !qinfo->buf_size ||
140fb4ac04eSJunfeng Guo 	    qinfo->len > IDPF_CTLQ_MAX_RING_SIZE ||
141fb4ac04eSJunfeng Guo 	    qinfo->buf_size > IDPF_CTLQ_MAX_BUF_LEN)
142fb4ac04eSJunfeng Guo 		return -EINVAL;
143fb4ac04eSJunfeng Guo 
144a97fb92cSSimei Su 	cq = (struct idpf_ctlq_info *)
145fb4ac04eSJunfeng Guo 	     idpf_calloc(hw, 1, sizeof(struct idpf_ctlq_info));
146a97fb92cSSimei Su 	if (!cq)
147fb4ac04eSJunfeng Guo 		return -ENOMEM;
148fb4ac04eSJunfeng Guo 
149a97fb92cSSimei Su 	cq->cq_type = qinfo->type;
150a97fb92cSSimei Su 	cq->q_id = qinfo->id;
151a97fb92cSSimei Su 	cq->buf_size = qinfo->buf_size;
152a97fb92cSSimei Su 	cq->ring_size = qinfo->len;
153fb4ac04eSJunfeng Guo 
154a97fb92cSSimei Su 	cq->next_to_use = 0;
155a97fb92cSSimei Su 	cq->next_to_clean = 0;
156a97fb92cSSimei Su 	cq->next_to_post = cq->ring_size - 1;
157fb4ac04eSJunfeng Guo 
158fb4ac04eSJunfeng Guo 	switch (qinfo->type) {
159fb4ac04eSJunfeng Guo 	case IDPF_CTLQ_TYPE_MAILBOX_RX:
160fb4ac04eSJunfeng Guo 		is_rxq = true;
161fb4ac04eSJunfeng Guo 		/* fallthrough */
162fb4ac04eSJunfeng Guo 	case IDPF_CTLQ_TYPE_MAILBOX_TX:
163fb2f9d92SSoumyadeep Hore 		err = idpf_ctlq_alloc_ring_res(hw, cq);
164fb4ac04eSJunfeng Guo 		break;
165fb4ac04eSJunfeng Guo 	default:
166fb2f9d92SSoumyadeep Hore 		err = -EINVAL;
167fb4ac04eSJunfeng Guo 		break;
168fb4ac04eSJunfeng Guo 	}
169fb4ac04eSJunfeng Guo 
170fb2f9d92SSoumyadeep Hore 	if (err)
171fb4ac04eSJunfeng Guo 		goto init_free_q;
172fb4ac04eSJunfeng Guo 
173fb4ac04eSJunfeng Guo 	if (is_rxq) {
174a97fb92cSSimei Su 		idpf_ctlq_init_rxq_bufs(cq);
175fb4ac04eSJunfeng Guo 	} else {
176fb4ac04eSJunfeng Guo 		/* Allocate the array of msg pointers for TX queues */
177a97fb92cSSimei Su 		cq->bi.tx_msg = (struct idpf_ctlq_msg **)
178fb4ac04eSJunfeng Guo 			idpf_calloc(hw, qinfo->len,
179fb4ac04eSJunfeng Guo 				    sizeof(struct idpf_ctlq_msg *));
180a97fb92cSSimei Su 		if (!cq->bi.tx_msg) {
181fb2f9d92SSoumyadeep Hore 			err = -ENOMEM;
182fb4ac04eSJunfeng Guo 			goto init_dealloc_q_mem;
183fb4ac04eSJunfeng Guo 		}
184fb4ac04eSJunfeng Guo 	}
185fb4ac04eSJunfeng Guo 
186a97fb92cSSimei Su 	idpf_ctlq_setup_regs(cq, qinfo);
187fb4ac04eSJunfeng Guo 
188a97fb92cSSimei Su 	idpf_ctlq_init_regs(hw, cq, is_rxq);
189fb4ac04eSJunfeng Guo 
190a97fb92cSSimei Su 	idpf_init_lock(&(cq->cq_lock));
191fb4ac04eSJunfeng Guo 
192a97fb92cSSimei Su 	LIST_INSERT_HEAD(&hw->cq_list_head, cq, cq_list);
193fb4ac04eSJunfeng Guo 
194a97fb92cSSimei Su 	*cq_out = cq;
195fb2f9d92SSoumyadeep Hore 	return 0;
196fb4ac04eSJunfeng Guo 
197fb4ac04eSJunfeng Guo init_dealloc_q_mem:
198fb4ac04eSJunfeng Guo 	/* free ring buffers and the ring itself */
199a97fb92cSSimei Su 	idpf_ctlq_dealloc_ring_res(hw, cq);
200fb4ac04eSJunfeng Guo init_free_q:
201a97fb92cSSimei Su 	idpf_free(hw, cq);
202a97fb92cSSimei Su 	cq = NULL;
203fb4ac04eSJunfeng Guo 
204fb2f9d92SSoumyadeep Hore 	return err;
205fb4ac04eSJunfeng Guo }
206fb4ac04eSJunfeng Guo 
207fb4ac04eSJunfeng Guo /**
208fb4ac04eSJunfeng Guo  * idpf_ctlq_remove - deallocate and remove specified control queue
209fb4ac04eSJunfeng Guo  * @hw: pointer to hardware struct
210fb4ac04eSJunfeng Guo  * @cq: pointer to control queue to be removed
211fb4ac04eSJunfeng Guo  */
idpf_ctlq_remove(struct idpf_hw * hw,struct idpf_ctlq_info * cq)212fb4ac04eSJunfeng Guo void idpf_ctlq_remove(struct idpf_hw *hw,
213fb4ac04eSJunfeng Guo 		      struct idpf_ctlq_info *cq)
214fb4ac04eSJunfeng Guo {
215fb4ac04eSJunfeng Guo 	LIST_REMOVE(cq, cq_list);
216fb4ac04eSJunfeng Guo 	idpf_ctlq_shutdown(hw, cq);
217fb4ac04eSJunfeng Guo 	idpf_free(hw, cq);
218fb4ac04eSJunfeng Guo }
219fb4ac04eSJunfeng Guo 
220fb4ac04eSJunfeng Guo /**
221fb4ac04eSJunfeng Guo  * idpf_ctlq_init - main initialization routine for all control queues
222fb4ac04eSJunfeng Guo  * @hw: pointer to hardware struct
223fb4ac04eSJunfeng Guo  * @num_q: number of queues to initialize
224fb4ac04eSJunfeng Guo  * @q_info: array of structs containing info for each queue to be initialized
225fb4ac04eSJunfeng Guo  *
226fb4ac04eSJunfeng Guo  * This initializes any number and any type of control queues. This is an all
227fb4ac04eSJunfeng Guo  * or nothing routine; if one fails, all previously allocated queues will be
228fb4ac04eSJunfeng Guo  * destroyed. This must be called prior to using the individual add/remove
229fb4ac04eSJunfeng Guo  * APIs.
230fb4ac04eSJunfeng Guo  */
idpf_ctlq_init(struct idpf_hw * hw,u8 num_q,struct idpf_ctlq_create_info * q_info)231fb4ac04eSJunfeng Guo int idpf_ctlq_init(struct idpf_hw *hw, u8 num_q,
232fb4ac04eSJunfeng Guo 		   struct idpf_ctlq_create_info *q_info)
233fb4ac04eSJunfeng Guo {
234fb4ac04eSJunfeng Guo 	struct idpf_ctlq_info *cq = NULL, *tmp = NULL;
235fb2f9d92SSoumyadeep Hore 	int err;
236fb2f9d92SSoumyadeep Hore 	int i;
237fb4ac04eSJunfeng Guo 
238fb4ac04eSJunfeng Guo 	LIST_INIT(&hw->cq_list_head);
239fb4ac04eSJunfeng Guo 
240fb4ac04eSJunfeng Guo 	for (i = 0; i < num_q; i++) {
241fb4ac04eSJunfeng Guo 		struct idpf_ctlq_create_info *qinfo = q_info + i;
242fb4ac04eSJunfeng Guo 
243fb2f9d92SSoumyadeep Hore 		err = idpf_ctlq_add(hw, qinfo, &cq);
244fb2f9d92SSoumyadeep Hore 		if (err)
245fb4ac04eSJunfeng Guo 			goto init_destroy_qs;
246fb4ac04eSJunfeng Guo 	}
247fb4ac04eSJunfeng Guo 
248fb2f9d92SSoumyadeep Hore 	return 0;
249fb4ac04eSJunfeng Guo 
250fb4ac04eSJunfeng Guo init_destroy_qs:
251fb4ac04eSJunfeng Guo 	LIST_FOR_EACH_ENTRY_SAFE(cq, tmp, &hw->cq_list_head,
252fb4ac04eSJunfeng Guo 				 idpf_ctlq_info, cq_list)
253fb4ac04eSJunfeng Guo 		idpf_ctlq_remove(hw, cq);
254fb4ac04eSJunfeng Guo 
255fb2f9d92SSoumyadeep Hore 	return err;
256fb4ac04eSJunfeng Guo }
257fb4ac04eSJunfeng Guo 
258fb4ac04eSJunfeng Guo /**
259fb4ac04eSJunfeng Guo  * idpf_ctlq_deinit - destroy all control queues
260fb4ac04eSJunfeng Guo  * @hw: pointer to hw struct
261fb4ac04eSJunfeng Guo  */
idpf_ctlq_deinit(struct idpf_hw * hw)262a97fb92cSSimei Su void idpf_ctlq_deinit(struct idpf_hw *hw)
263fb4ac04eSJunfeng Guo {
264fb4ac04eSJunfeng Guo 	struct idpf_ctlq_info *cq = NULL, *tmp = NULL;
265fb4ac04eSJunfeng Guo 
266fb4ac04eSJunfeng Guo 	LIST_FOR_EACH_ENTRY_SAFE(cq, tmp, &hw->cq_list_head,
267fb4ac04eSJunfeng Guo 				 idpf_ctlq_info, cq_list)
268fb4ac04eSJunfeng Guo 		idpf_ctlq_remove(hw, cq);
269fb4ac04eSJunfeng Guo }
270fb4ac04eSJunfeng Guo 
271fb4ac04eSJunfeng Guo /**
272fb4ac04eSJunfeng Guo  * idpf_ctlq_send - send command to Control Queue (CTQ)
273fb4ac04eSJunfeng Guo  * @hw: pointer to hw struct
274fb4ac04eSJunfeng Guo  * @cq: handle to control queue struct to send on
275fb4ac04eSJunfeng Guo  * @num_q_msg: number of messages to send on control queue
276fb4ac04eSJunfeng Guo  * @q_msg: pointer to array of queue messages to be sent
277fb4ac04eSJunfeng Guo  *
278fb4ac04eSJunfeng Guo  * The caller is expected to allocate DMAable buffers and pass them to the
279fb4ac04eSJunfeng Guo  * send routine via the q_msg struct / control queue specific data struct.
280fb4ac04eSJunfeng Guo  * The control queue will hold a reference to each send message until
281fb4ac04eSJunfeng Guo  * the completion for that message has been cleaned.
282428533f8SWenjing Qiao  * Since all q_msgs being sent are store in native endianness, these values
283428533f8SWenjing Qiao  * must be converted to LE before being written to the hw descriptor.
284fb4ac04eSJunfeng Guo  */
idpf_ctlq_send(struct idpf_hw * hw,struct idpf_ctlq_info * cq,u16 num_q_msg,struct idpf_ctlq_msg q_msg[])285fb4ac04eSJunfeng Guo int idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
286fb4ac04eSJunfeng Guo 		   u16 num_q_msg, struct idpf_ctlq_msg q_msg[])
287fb4ac04eSJunfeng Guo {
288fb4ac04eSJunfeng Guo 	struct idpf_ctlq_desc *desc;
289fb2f9d92SSoumyadeep Hore 	int num_desc_avail;
290fb2f9d92SSoumyadeep Hore 	int err = 0;
291fb2f9d92SSoumyadeep Hore 	int i;
292fb4ac04eSJunfeng Guo 
293fb4ac04eSJunfeng Guo 	if (!cq || !cq->ring_size)
294fb4ac04eSJunfeng Guo 		return -ENOBUFS;
295fb4ac04eSJunfeng Guo 
296fb4ac04eSJunfeng Guo 	idpf_acquire_lock(&cq->cq_lock);
297fb4ac04eSJunfeng Guo 
298fb4ac04eSJunfeng Guo 	/* Ensure there are enough descriptors to send all messages */
299fb4ac04eSJunfeng Guo 	num_desc_avail = IDPF_CTLQ_DESC_UNUSED(cq);
300fb4ac04eSJunfeng Guo 	if (num_desc_avail == 0 || num_desc_avail < num_q_msg) {
301fb2f9d92SSoumyadeep Hore 		err = -ENOSPC;
302fb2f9d92SSoumyadeep Hore 		goto err_unlock;
303fb4ac04eSJunfeng Guo 	}
304fb4ac04eSJunfeng Guo 
305fb4ac04eSJunfeng Guo 	for (i = 0; i < num_q_msg; i++) {
306fb4ac04eSJunfeng Guo 		struct idpf_ctlq_msg *msg = &q_msg[i];
307fb4ac04eSJunfeng Guo 
308fb4ac04eSJunfeng Guo 		desc = IDPF_CTLQ_DESC(cq, cq->next_to_use);
309fb4ac04eSJunfeng Guo 
310fb4ac04eSJunfeng Guo 		desc->opcode = CPU_TO_LE16(msg->opcode);
311fb4ac04eSJunfeng Guo 		desc->pfid_vfid = CPU_TO_LE16(msg->func_id);
312fb4ac04eSJunfeng Guo 
313abb00ba2SWenjing Qiao 		desc->cookie_high = CPU_TO_LE32(msg->cookie.mbx.chnl_opcode);
314abb00ba2SWenjing Qiao 		desc->cookie_low = CPU_TO_LE32(msg->cookie.mbx.chnl_retval);
315fb4ac04eSJunfeng Guo 
316fb4ac04eSJunfeng Guo 		desc->flags = CPU_TO_LE16((msg->host_id & IDPF_HOST_ID_MASK) <<
317fb4ac04eSJunfeng Guo 					  IDPF_CTLQ_FLAG_HOST_ID_S);
318fb4ac04eSJunfeng Guo 		if (msg->data_len) {
319fb4ac04eSJunfeng Guo 			struct idpf_dma_mem *buff = msg->ctx.indirect.payload;
320fb4ac04eSJunfeng Guo 
321fb4ac04eSJunfeng Guo 			desc->datalen |= CPU_TO_LE16(msg->data_len);
322fb4ac04eSJunfeng Guo 			desc->flags |= CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF);
323fb4ac04eSJunfeng Guo 			desc->flags |= CPU_TO_LE16(IDPF_CTLQ_FLAG_RD);
324fb4ac04eSJunfeng Guo 
325fb4ac04eSJunfeng Guo 			/* Update the address values in the desc with the pa
326fb4ac04eSJunfeng Guo 			 * value for respective buffer
327fb4ac04eSJunfeng Guo 			 */
328fb4ac04eSJunfeng Guo 			desc->params.indirect.addr_high =
329fb4ac04eSJunfeng Guo 				CPU_TO_LE32(IDPF_HI_DWORD(buff->pa));
330fb4ac04eSJunfeng Guo 			desc->params.indirect.addr_low =
331fb4ac04eSJunfeng Guo 				CPU_TO_LE32(IDPF_LO_DWORD(buff->pa));
332fb4ac04eSJunfeng Guo 
333fb4ac04eSJunfeng Guo 			idpf_memcpy(&desc->params, msg->ctx.indirect.context,
334fb4ac04eSJunfeng Guo 				    IDPF_INDIRECT_CTX_SIZE, IDPF_NONDMA_TO_DMA);
335fb4ac04eSJunfeng Guo #ifdef SIMICS_BUILD
336fb4ac04eSJunfeng Guo 			/* MBX message with opcode idpf_mbq_opc_send_msg_to_pf
337fb4ac04eSJunfeng Guo 			 * need to set peer PF function id in param0 for Simics
338fb4ac04eSJunfeng Guo 			 */
339fb4ac04eSJunfeng Guo 			if (msg->opcode == idpf_mbq_opc_send_msg_to_pf) {
340fb4ac04eSJunfeng Guo 				desc->params.indirect.param0 =
341fb4ac04eSJunfeng Guo 					CPU_TO_LE32(msg->func_id);
342fb4ac04eSJunfeng Guo 			}
343fb4ac04eSJunfeng Guo #endif
344fb4ac04eSJunfeng Guo 		} else {
345fb4ac04eSJunfeng Guo 			idpf_memcpy(&desc->params, msg->ctx.direct,
346fb4ac04eSJunfeng Guo 				    IDPF_DIRECT_CTX_SIZE, IDPF_NONDMA_TO_DMA);
347fb4ac04eSJunfeng Guo #ifdef SIMICS_BUILD
348fb4ac04eSJunfeng Guo 			/* MBX message with opcode idpf_mbq_opc_send_msg_to_pf
349fb4ac04eSJunfeng Guo 			 * need to set peer PF function id in param0 for Simics
350fb4ac04eSJunfeng Guo 			 */
351fb4ac04eSJunfeng Guo 			if (msg->opcode == idpf_mbq_opc_send_msg_to_pf) {
352fb4ac04eSJunfeng Guo 				desc->params.direct.param0 =
353fb4ac04eSJunfeng Guo 					CPU_TO_LE32(msg->func_id);
354fb4ac04eSJunfeng Guo 			}
355fb4ac04eSJunfeng Guo #endif
356fb4ac04eSJunfeng Guo 		}
357fb4ac04eSJunfeng Guo 
358fb4ac04eSJunfeng Guo 		/* Store buffer info */
359fb4ac04eSJunfeng Guo 		cq->bi.tx_msg[cq->next_to_use] = msg;
360fb4ac04eSJunfeng Guo 
361fb4ac04eSJunfeng Guo 		(cq->next_to_use)++;
362fb4ac04eSJunfeng Guo 		if (cq->next_to_use == cq->ring_size)
363fb4ac04eSJunfeng Guo 			cq->next_to_use = 0;
364fb4ac04eSJunfeng Guo 	}
365fb4ac04eSJunfeng Guo 
366fb4ac04eSJunfeng Guo 	/* Force memory write to complete before letting hardware
367fb4ac04eSJunfeng Guo 	 * know that there are new descriptors to fetch.
368fb4ac04eSJunfeng Guo 	 */
369fb4ac04eSJunfeng Guo 	idpf_wmb();
370fb4ac04eSJunfeng Guo 
371fb4ac04eSJunfeng Guo 	wr32(hw, cq->reg.tail, cq->next_to_use);
372fb4ac04eSJunfeng Guo 
373fb2f9d92SSoumyadeep Hore err_unlock:
374fb4ac04eSJunfeng Guo 	idpf_release_lock(&cq->cq_lock);
375fb4ac04eSJunfeng Guo 
376fb2f9d92SSoumyadeep Hore 	return err;
377fb4ac04eSJunfeng Guo }
378fb4ac04eSJunfeng Guo 
379fb4ac04eSJunfeng Guo /**
380d6c84a1fSWenjing Qiao  * __idpf_ctlq_clean_sq - helper function to reclaim descriptors on HW write
381d6c84a1fSWenjing Qiao  * back for the requested queue
382fb4ac04eSJunfeng Guo  * @cq: pointer to the specific Control queue
383fb4ac04eSJunfeng Guo  * @clean_count: (input|output) number of descriptors to clean as input, and
384fb4ac04eSJunfeng Guo  * number of descriptors actually cleaned as output
385fb4ac04eSJunfeng Guo  * @msg_status: (output) pointer to msg pointer array to be populated; needs
386fb4ac04eSJunfeng Guo  * to be allocated by caller
387d6c84a1fSWenjing Qiao  * @force: (input) clean descriptors which were not done yet. Use with caution
388d6c84a1fSWenjing Qiao  * in kernel mode only
389fb4ac04eSJunfeng Guo  *
390fb4ac04eSJunfeng Guo  * Returns an array of message pointers associated with the cleaned
391fb4ac04eSJunfeng Guo  * descriptors. The pointers are to the original ctlq_msgs sent on the cleaned
392fb4ac04eSJunfeng Guo  * descriptors.  The status will be returned for each; any messages that failed
393fb4ac04eSJunfeng Guo  * to send will have a non-zero status. The caller is expected to free original
394fb4ac04eSJunfeng Guo  * ctlq_msgs and free or reuse the DMA buffers.
395fb4ac04eSJunfeng Guo  */
__idpf_ctlq_clean_sq(struct idpf_ctlq_info * cq,u16 * clean_count,struct idpf_ctlq_msg * msg_status[],bool force)396d6c84a1fSWenjing Qiao static int __idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
397d6c84a1fSWenjing Qiao 				struct idpf_ctlq_msg *msg_status[], bool force)
398fb4ac04eSJunfeng Guo {
399fb4ac04eSJunfeng Guo 	struct idpf_ctlq_desc *desc;
400fb2f9d92SSoumyadeep Hore 	u16 i, num_to_clean;
401fb4ac04eSJunfeng Guo 	u16 ntc, desc_err;
402fb4ac04eSJunfeng Guo 
403fb4ac04eSJunfeng Guo 	if (!cq || !cq->ring_size)
404fb4ac04eSJunfeng Guo 		return -ENOBUFS;
405fb4ac04eSJunfeng Guo 
406fb4ac04eSJunfeng Guo 	if (*clean_count == 0)
407fb4ac04eSJunfeng Guo 		return 0;
408fb4ac04eSJunfeng Guo 	if (*clean_count > cq->ring_size)
409fb4ac04eSJunfeng Guo 		return -EINVAL;
410fb4ac04eSJunfeng Guo 
411fb4ac04eSJunfeng Guo 	idpf_acquire_lock(&cq->cq_lock);
412fb4ac04eSJunfeng Guo 
413fb4ac04eSJunfeng Guo 	ntc = cq->next_to_clean;
414fb4ac04eSJunfeng Guo 
415fb4ac04eSJunfeng Guo 	num_to_clean = *clean_count;
416fb4ac04eSJunfeng Guo 
417fb4ac04eSJunfeng Guo 	for (i = 0; i < num_to_clean; i++) {
418fb4ac04eSJunfeng Guo 		/* Fetch next descriptor and check if marked as done */
419fb4ac04eSJunfeng Guo 		desc = IDPF_CTLQ_DESC(cq, ntc);
420d6c84a1fSWenjing Qiao 		if (!force && !(LE16_TO_CPU(desc->flags) & IDPF_CTLQ_FLAG_DD))
421fb4ac04eSJunfeng Guo 			break;
422fb4ac04eSJunfeng Guo 
423fb4ac04eSJunfeng Guo 		/* strip off FW internal code */
424a97fb92cSSimei Su 		desc_err = LE16_TO_CPU(desc->ret_val) & 0xff;
425fb4ac04eSJunfeng Guo 
426fb4ac04eSJunfeng Guo 		msg_status[i] = cq->bi.tx_msg[ntc];
427d6c84a1fSWenjing Qiao 		if (!msg_status[i])
428d6c84a1fSWenjing Qiao 			break;
429fb4ac04eSJunfeng Guo 		msg_status[i]->status = desc_err;
430fb4ac04eSJunfeng Guo 
431fb4ac04eSJunfeng Guo 		cq->bi.tx_msg[ntc] = NULL;
432fb4ac04eSJunfeng Guo 
433fb4ac04eSJunfeng Guo 		/* Zero out any stale data */
434fb4ac04eSJunfeng Guo 		idpf_memset(desc, 0, sizeof(*desc), IDPF_DMA_MEM);
435fb4ac04eSJunfeng Guo 
436fb4ac04eSJunfeng Guo 		ntc++;
437fb4ac04eSJunfeng Guo 		if (ntc == cq->ring_size)
438fb4ac04eSJunfeng Guo 			ntc = 0;
439fb4ac04eSJunfeng Guo 	}
440fb4ac04eSJunfeng Guo 
441fb4ac04eSJunfeng Guo 	cq->next_to_clean = ntc;
442fb4ac04eSJunfeng Guo 
443fb4ac04eSJunfeng Guo 	idpf_release_lock(&cq->cq_lock);
444fb4ac04eSJunfeng Guo 
445fb4ac04eSJunfeng Guo 	/* Return number of descriptors actually cleaned */
446fb4ac04eSJunfeng Guo 	*clean_count = i;
447fb4ac04eSJunfeng Guo 
448fb2f9d92SSoumyadeep Hore 	return 0;
449fb4ac04eSJunfeng Guo }
450fb4ac04eSJunfeng Guo 
451fb4ac04eSJunfeng Guo /**
452d6c84a1fSWenjing Qiao  * idpf_ctlq_clean_sq_force - reclaim all descriptors on HW write back for the
453d6c84a1fSWenjing Qiao  * requested queue. Use only in kernel mode.
454d6c84a1fSWenjing Qiao  * @cq: pointer to the specific Control queue
455d6c84a1fSWenjing Qiao  * @clean_count: (input|output) number of descriptors to clean as input, and
456d6c84a1fSWenjing Qiao  * number of descriptors actually cleaned as output
457d6c84a1fSWenjing Qiao  * @msg_status: (output) pointer to msg pointer array to be populated; needs
458d6c84a1fSWenjing Qiao  * to be allocated by caller
459d6c84a1fSWenjing Qiao  *
460d6c84a1fSWenjing Qiao  * Returns an array of message pointers associated with the cleaned
461d6c84a1fSWenjing Qiao  * descriptors. The pointers are to the original ctlq_msgs sent on the cleaned
462d6c84a1fSWenjing Qiao  * descriptors.  The status will be returned for each; any messages that failed
463d6c84a1fSWenjing Qiao  * to send will have a non-zero status. The caller is expected to free original
464d6c84a1fSWenjing Qiao  * ctlq_msgs and free or reuse the DMA buffers.
465d6c84a1fSWenjing Qiao  */
idpf_ctlq_clean_sq_force(struct idpf_ctlq_info * cq,u16 * clean_count,struct idpf_ctlq_msg * msg_status[])466d6c84a1fSWenjing Qiao int idpf_ctlq_clean_sq_force(struct idpf_ctlq_info *cq, u16 *clean_count,
467d6c84a1fSWenjing Qiao 			     struct idpf_ctlq_msg *msg_status[])
468d6c84a1fSWenjing Qiao {
469d6c84a1fSWenjing Qiao 	return __idpf_ctlq_clean_sq(cq, clean_count, msg_status, true);
470d6c84a1fSWenjing Qiao }
471d6c84a1fSWenjing Qiao 
472d6c84a1fSWenjing Qiao /**
473d6c84a1fSWenjing Qiao  * idpf_ctlq_clean_sq - reclaim send descriptors on HW write back for the
474d6c84a1fSWenjing Qiao  * requested queue
475d6c84a1fSWenjing Qiao  * @cq: pointer to the specific Control queue
476d6c84a1fSWenjing Qiao  * @clean_count: (input|output) number of descriptors to clean as input, and
477d6c84a1fSWenjing Qiao  * number of descriptors actually cleaned as output
478d6c84a1fSWenjing Qiao  * @msg_status: (output) pointer to msg pointer array to be populated; needs
479d6c84a1fSWenjing Qiao  * to be allocated by caller
480d6c84a1fSWenjing Qiao  *
481d6c84a1fSWenjing Qiao  * Returns an array of message pointers associated with the cleaned
482d6c84a1fSWenjing Qiao  * descriptors. The pointers are to the original ctlq_msgs sent on the cleaned
483d6c84a1fSWenjing Qiao  * descriptors.  The status will be returned for each; any messages that failed
484d6c84a1fSWenjing Qiao  * to send will have a non-zero status. The caller is expected to free original
485d6c84a1fSWenjing Qiao  * ctlq_msgs and free or reuse the DMA buffers.
486d6c84a1fSWenjing Qiao  */
idpf_ctlq_clean_sq(struct idpf_ctlq_info * cq,u16 * clean_count,struct idpf_ctlq_msg * msg_status[])487d6c84a1fSWenjing Qiao int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
488d6c84a1fSWenjing Qiao 		       struct idpf_ctlq_msg *msg_status[])
489d6c84a1fSWenjing Qiao {
490d6c84a1fSWenjing Qiao 	return __idpf_ctlq_clean_sq(cq, clean_count, msg_status, false);
491d6c84a1fSWenjing Qiao }
492d6c84a1fSWenjing Qiao 
493d6c84a1fSWenjing Qiao /**
494fb4ac04eSJunfeng Guo  * idpf_ctlq_post_rx_buffs - post buffers to descriptor ring
495fb4ac04eSJunfeng Guo  * @hw: pointer to hw struct
496fb4ac04eSJunfeng Guo  * @cq: pointer to control queue handle
497fb4ac04eSJunfeng Guo  * @buff_count: (input|output) input is number of buffers caller is trying to
498fb4ac04eSJunfeng Guo  * return; output is number of buffers that were not posted
499fb4ac04eSJunfeng Guo  * @buffs: array of pointers to dma mem structs to be given to hardware
500fb4ac04eSJunfeng Guo  *
501fb4ac04eSJunfeng Guo  * Caller uses this function to return DMA buffers to the descriptor ring after
502fb4ac04eSJunfeng Guo  * consuming them; buff_count will be the number of buffers.
503fb4ac04eSJunfeng Guo  *
504fb4ac04eSJunfeng Guo  * Note: this function needs to be called after a receive call even
505fb4ac04eSJunfeng Guo  * if there are no DMA buffers to be returned, i.e. buff_count = 0,
506fb4ac04eSJunfeng Guo  * buffs = NULL to support direct commands
507fb4ac04eSJunfeng Guo  */
idpf_ctlq_post_rx_buffs(struct idpf_hw * hw,struct idpf_ctlq_info * cq,u16 * buff_count,struct idpf_dma_mem ** buffs)508fb4ac04eSJunfeng Guo int idpf_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
509fb4ac04eSJunfeng Guo 			    u16 *buff_count, struct idpf_dma_mem **buffs)
510fb4ac04eSJunfeng Guo {
511fb4ac04eSJunfeng Guo 	struct idpf_ctlq_desc *desc;
512fb4ac04eSJunfeng Guo 	u16 ntp = cq->next_to_post;
513fb4ac04eSJunfeng Guo 	bool buffs_avail = false;
514fb4ac04eSJunfeng Guo 	u16 tbp = ntp + 1;
515fb4ac04eSJunfeng Guo 	int i = 0;
516fb4ac04eSJunfeng Guo 
517fb4ac04eSJunfeng Guo 	if (*buff_count > cq->ring_size)
518fb4ac04eSJunfeng Guo 		return -EINVAL;
519fb4ac04eSJunfeng Guo 
520fb4ac04eSJunfeng Guo 	if (*buff_count > 0)
521fb4ac04eSJunfeng Guo 		buffs_avail = true;
522fb4ac04eSJunfeng Guo 
523fb4ac04eSJunfeng Guo 	idpf_acquire_lock(&cq->cq_lock);
524fb4ac04eSJunfeng Guo 
525fb4ac04eSJunfeng Guo 	if (tbp >= cq->ring_size)
526fb4ac04eSJunfeng Guo 		tbp = 0;
527fb4ac04eSJunfeng Guo 
528fb4ac04eSJunfeng Guo 	if (tbp == cq->next_to_clean)
529fb4ac04eSJunfeng Guo 		/* Nothing to do */
530fb4ac04eSJunfeng Guo 		goto post_buffs_out;
531fb4ac04eSJunfeng Guo 
532fb4ac04eSJunfeng Guo 	/* Post buffers for as many as provided or up until the last one used */
533fb4ac04eSJunfeng Guo 	while (ntp != cq->next_to_clean) {
534fb4ac04eSJunfeng Guo 		desc = IDPF_CTLQ_DESC(cq, ntp);
535fb4ac04eSJunfeng Guo 
536fb4ac04eSJunfeng Guo 		if (cq->bi.rx_buff[ntp])
537fb4ac04eSJunfeng Guo 			goto fill_desc;
538fb4ac04eSJunfeng Guo 		if (!buffs_avail) {
539fb4ac04eSJunfeng Guo 			/* If the caller hasn't given us any buffers or
540fb4ac04eSJunfeng Guo 			 * there are none left, search the ring itself
541fb4ac04eSJunfeng Guo 			 * for an available buffer to move to this
542fb4ac04eSJunfeng Guo 			 * entry starting at the next entry in the ring
543fb4ac04eSJunfeng Guo 			 */
544fb4ac04eSJunfeng Guo 			tbp = ntp + 1;
545fb4ac04eSJunfeng Guo 
546fb4ac04eSJunfeng Guo 			/* Wrap ring if necessary */
547fb4ac04eSJunfeng Guo 			if (tbp >= cq->ring_size)
548fb4ac04eSJunfeng Guo 				tbp = 0;
549fb4ac04eSJunfeng Guo 
550fb4ac04eSJunfeng Guo 			while (tbp != cq->next_to_clean) {
551fb4ac04eSJunfeng Guo 				if (cq->bi.rx_buff[tbp]) {
552fb4ac04eSJunfeng Guo 					cq->bi.rx_buff[ntp] =
553fb4ac04eSJunfeng Guo 						cq->bi.rx_buff[tbp];
554fb4ac04eSJunfeng Guo 					cq->bi.rx_buff[tbp] = NULL;
555fb4ac04eSJunfeng Guo 
556fb4ac04eSJunfeng Guo 					/* Found a buffer, no need to
557fb4ac04eSJunfeng Guo 					 * search anymore
558fb4ac04eSJunfeng Guo 					 */
559fb4ac04eSJunfeng Guo 					break;
560fb4ac04eSJunfeng Guo 				}
561fb4ac04eSJunfeng Guo 
562fb4ac04eSJunfeng Guo 				/* Wrap ring if necessary */
563fb4ac04eSJunfeng Guo 				tbp++;
564fb4ac04eSJunfeng Guo 				if (tbp >= cq->ring_size)
565fb4ac04eSJunfeng Guo 					tbp = 0;
566fb4ac04eSJunfeng Guo 			}
567fb4ac04eSJunfeng Guo 
568fb4ac04eSJunfeng Guo 			if (tbp == cq->next_to_clean)
569fb4ac04eSJunfeng Guo 				goto post_buffs_out;
570fb4ac04eSJunfeng Guo 		} else {
571fb4ac04eSJunfeng Guo 			/* Give back pointer to DMA buffer */
572fb4ac04eSJunfeng Guo 			cq->bi.rx_buff[ntp] = buffs[i];
573fb4ac04eSJunfeng Guo 			i++;
574fb4ac04eSJunfeng Guo 
575fb4ac04eSJunfeng Guo 			if (i >= *buff_count)
576fb4ac04eSJunfeng Guo 				buffs_avail = false;
577fb4ac04eSJunfeng Guo 		}
578fb4ac04eSJunfeng Guo 
579fb4ac04eSJunfeng Guo fill_desc:
580fb4ac04eSJunfeng Guo 		desc->flags =
581fb4ac04eSJunfeng Guo 			CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF | IDPF_CTLQ_FLAG_RD);
582fb4ac04eSJunfeng Guo 
583fb4ac04eSJunfeng Guo 		/* Post buffers to descriptor */
584fb4ac04eSJunfeng Guo 		desc->datalen = CPU_TO_LE16(cq->bi.rx_buff[ntp]->size);
585fb4ac04eSJunfeng Guo 		desc->params.indirect.addr_high =
586fb4ac04eSJunfeng Guo 			CPU_TO_LE32(IDPF_HI_DWORD(cq->bi.rx_buff[ntp]->pa));
587fb4ac04eSJunfeng Guo 		desc->params.indirect.addr_low =
588fb4ac04eSJunfeng Guo 			CPU_TO_LE32(IDPF_LO_DWORD(cq->bi.rx_buff[ntp]->pa));
589fb4ac04eSJunfeng Guo 
590fb4ac04eSJunfeng Guo 		ntp++;
591fb4ac04eSJunfeng Guo 		if (ntp == cq->ring_size)
592fb4ac04eSJunfeng Guo 			ntp = 0;
593fb4ac04eSJunfeng Guo 	}
594fb4ac04eSJunfeng Guo 
595fb4ac04eSJunfeng Guo post_buffs_out:
596fb4ac04eSJunfeng Guo 	/* Only update tail if buffers were actually posted */
597fb4ac04eSJunfeng Guo 	if (cq->next_to_post != ntp) {
598fb4ac04eSJunfeng Guo 		if (ntp)
599fb4ac04eSJunfeng Guo 			/* Update next_to_post to ntp - 1 since current ntp
600fb4ac04eSJunfeng Guo 			 * will not have a buffer
601fb4ac04eSJunfeng Guo 			 */
602fb4ac04eSJunfeng Guo 			cq->next_to_post = ntp - 1;
603fb4ac04eSJunfeng Guo 		else
604fb4ac04eSJunfeng Guo 			/* Wrap to end of end ring since current ntp is 0 */
605fb4ac04eSJunfeng Guo 			cq->next_to_post = cq->ring_size - 1;
606fb4ac04eSJunfeng Guo 
607*d7c660d4SSoumyadeep Hore 		idpf_wmb();
608*d7c660d4SSoumyadeep Hore 
609fb4ac04eSJunfeng Guo 		wr32(hw, cq->reg.tail, cq->next_to_post);
610fb4ac04eSJunfeng Guo 	}
611fb4ac04eSJunfeng Guo 
612fb4ac04eSJunfeng Guo 	idpf_release_lock(&cq->cq_lock);
613fb4ac04eSJunfeng Guo 
614fb4ac04eSJunfeng Guo 	/* return the number of buffers that were not posted */
615fb4ac04eSJunfeng Guo 	*buff_count = *buff_count - i;
616fb4ac04eSJunfeng Guo 
617fb2f9d92SSoumyadeep Hore 	return 0;
618fb4ac04eSJunfeng Guo }
619fb4ac04eSJunfeng Guo 
620fb4ac04eSJunfeng Guo /**
621fb4ac04eSJunfeng Guo  * idpf_ctlq_recv - receive control queue message call back
622fb4ac04eSJunfeng Guo  * @cq: pointer to control queue handle to receive on
623fb4ac04eSJunfeng Guo  * @num_q_msg: (input|output) input number of messages that should be received;
624fb4ac04eSJunfeng Guo  * output number of messages actually received
625fb4ac04eSJunfeng Guo  * @q_msg: (output) array of received control queue messages on this q;
626fb4ac04eSJunfeng Guo  * needs to be pre-allocated by caller for as many messages as requested
627fb4ac04eSJunfeng Guo  *
628fb4ac04eSJunfeng Guo  * Called by interrupt handler or polling mechanism. Caller is expected
629fb4ac04eSJunfeng Guo  * to free buffers
630fb4ac04eSJunfeng Guo  */
idpf_ctlq_recv(struct idpf_ctlq_info * cq,u16 * num_q_msg,struct idpf_ctlq_msg * q_msg)631fb4ac04eSJunfeng Guo int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
632fb4ac04eSJunfeng Guo 		   struct idpf_ctlq_msg *q_msg)
633fb4ac04eSJunfeng Guo {
634fb4ac04eSJunfeng Guo 	u16 num_to_clean, ntc, ret_val, flags;
635fb4ac04eSJunfeng Guo 	struct idpf_ctlq_desc *desc;
636fb2f9d92SSoumyadeep Hore 	int err = 0;
637fb2f9d92SSoumyadeep Hore 	u16 i;
638fb4ac04eSJunfeng Guo 
639fb4ac04eSJunfeng Guo 	if (!cq || !cq->ring_size)
640fb4ac04eSJunfeng Guo 		return -ENOBUFS;
641fb4ac04eSJunfeng Guo 
642fb4ac04eSJunfeng Guo 	if (*num_q_msg == 0)
643fb4ac04eSJunfeng Guo 		return 0;
644fb4ac04eSJunfeng Guo 	else if (*num_q_msg > cq->ring_size)
645fb4ac04eSJunfeng Guo 		return -EINVAL;
646fb4ac04eSJunfeng Guo 
647fb4ac04eSJunfeng Guo 	/* take the lock before we start messing with the ring */
648fb4ac04eSJunfeng Guo 	idpf_acquire_lock(&cq->cq_lock);
649fb4ac04eSJunfeng Guo 
650fb4ac04eSJunfeng Guo 	ntc = cq->next_to_clean;
651fb4ac04eSJunfeng Guo 
652fb4ac04eSJunfeng Guo 	num_to_clean = *num_q_msg;
653fb4ac04eSJunfeng Guo 
654fb4ac04eSJunfeng Guo 	for (i = 0; i < num_to_clean; i++) {
655fb4ac04eSJunfeng Guo 		/* Fetch next descriptor and check if marked as done */
656fb4ac04eSJunfeng Guo 		desc = IDPF_CTLQ_DESC(cq, ntc);
657fb4ac04eSJunfeng Guo 		flags = LE16_TO_CPU(desc->flags);
658fb4ac04eSJunfeng Guo 
659fb4ac04eSJunfeng Guo 		if (!(flags & IDPF_CTLQ_FLAG_DD))
660fb4ac04eSJunfeng Guo 			break;
661fb4ac04eSJunfeng Guo 
662fb4ac04eSJunfeng Guo 		ret_val = LE16_TO_CPU(desc->ret_val);
663fb4ac04eSJunfeng Guo 
664fb4ac04eSJunfeng Guo 		q_msg[i].vmvf_type = (flags &
665fb4ac04eSJunfeng Guo 				      (IDPF_CTLQ_FLAG_FTYPE_VM |
666fb4ac04eSJunfeng Guo 				       IDPF_CTLQ_FLAG_FTYPE_PF)) >>
667fb4ac04eSJunfeng Guo 				      IDPF_CTLQ_FLAG_FTYPE_S;
668fb4ac04eSJunfeng Guo 
669fb4ac04eSJunfeng Guo 		if (flags & IDPF_CTLQ_FLAG_ERR)
670fb2f9d92SSoumyadeep Hore 			err = -EBADMSG;
671fb4ac04eSJunfeng Guo 
672abb00ba2SWenjing Qiao 		q_msg[i].cookie.mbx.chnl_opcode = LE32_TO_CPU(desc->cookie_high);
673abb00ba2SWenjing Qiao 		q_msg[i].cookie.mbx.chnl_retval = LE32_TO_CPU(desc->cookie_low);
674fb4ac04eSJunfeng Guo 
675fb4ac04eSJunfeng Guo 		q_msg[i].opcode = LE16_TO_CPU(desc->opcode);
676fb4ac04eSJunfeng Guo 		q_msg[i].data_len = LE16_TO_CPU(desc->datalen);
677fb4ac04eSJunfeng Guo 		q_msg[i].status = ret_val;
678fb4ac04eSJunfeng Guo 
679fb4ac04eSJunfeng Guo 		if (desc->datalen) {
680fb4ac04eSJunfeng Guo 			idpf_memcpy(q_msg[i].ctx.indirect.context,
681fb4ac04eSJunfeng Guo 				    &desc->params.indirect,
682fb4ac04eSJunfeng Guo 				    IDPF_INDIRECT_CTX_SIZE,
683fb4ac04eSJunfeng Guo 				    IDPF_DMA_TO_NONDMA);
684fb4ac04eSJunfeng Guo 
685fb4ac04eSJunfeng Guo 			/* Assign pointer to dma buffer to ctlq_msg array
686fb4ac04eSJunfeng Guo 			 * to be given to upper layer
687fb4ac04eSJunfeng Guo 			 */
688fb4ac04eSJunfeng Guo 			q_msg[i].ctx.indirect.payload = cq->bi.rx_buff[ntc];
689fb4ac04eSJunfeng Guo 
690fb4ac04eSJunfeng Guo 			/* Zero out pointer to DMA buffer info;
691fb4ac04eSJunfeng Guo 			 * will be repopulated by post buffers API
692fb4ac04eSJunfeng Guo 			 */
693fb4ac04eSJunfeng Guo 			cq->bi.rx_buff[ntc] = NULL;
694fb4ac04eSJunfeng Guo 		} else {
695fb4ac04eSJunfeng Guo 			idpf_memcpy(q_msg[i].ctx.direct,
696fb4ac04eSJunfeng Guo 				    desc->params.raw,
697fb4ac04eSJunfeng Guo 				    IDPF_DIRECT_CTX_SIZE,
698fb4ac04eSJunfeng Guo 				    IDPF_DMA_TO_NONDMA);
699fb4ac04eSJunfeng Guo 		}
700fb4ac04eSJunfeng Guo 
701fb4ac04eSJunfeng Guo 		/* Zero out stale data in descriptor */
702fb4ac04eSJunfeng Guo 		idpf_memset(desc, 0, sizeof(struct idpf_ctlq_desc),
703fb4ac04eSJunfeng Guo 			    IDPF_DMA_MEM);
704fb4ac04eSJunfeng Guo 
705fb4ac04eSJunfeng Guo 		ntc++;
706fb4ac04eSJunfeng Guo 		if (ntc == cq->ring_size)
707fb4ac04eSJunfeng Guo 			ntc = 0;
708fb4ac04eSJunfeng Guo 	};
709fb4ac04eSJunfeng Guo 
710fb4ac04eSJunfeng Guo 	cq->next_to_clean = ntc;
711fb4ac04eSJunfeng Guo 
712fb4ac04eSJunfeng Guo 	idpf_release_lock(&cq->cq_lock);
713fb4ac04eSJunfeng Guo 
714fb4ac04eSJunfeng Guo 	*num_q_msg = i;
715fb4ac04eSJunfeng Guo 	if (*num_q_msg == 0)
716fb2f9d92SSoumyadeep Hore 		err = -ENOMSG;
717fb4ac04eSJunfeng Guo 
718fb2f9d92SSoumyadeep Hore 	return err;
719fb4ac04eSJunfeng Guo }
720