xref: /freebsd-src/sys/dev/ice/ice_controlq.c (revision f377a0c7dfa97035844e58c2aec810001bebce17)
171d10453SEric Joyner /* SPDX-License-Identifier: BSD-3-Clause */
2015f8cc5SEric Joyner /*  Copyright (c) 2024, Intel Corporation
371d10453SEric Joyner  *  All rights reserved.
471d10453SEric Joyner  *
571d10453SEric Joyner  *  Redistribution and use in source and binary forms, with or without
671d10453SEric Joyner  *  modification, are permitted provided that the following conditions are met:
771d10453SEric Joyner  *
871d10453SEric Joyner  *   1. Redistributions of source code must retain the above copyright notice,
971d10453SEric Joyner  *      this list of conditions and the following disclaimer.
1071d10453SEric Joyner  *
1171d10453SEric Joyner  *   2. Redistributions in binary form must reproduce the above copyright
1271d10453SEric Joyner  *      notice, this list of conditions and the following disclaimer in the
1371d10453SEric Joyner  *      documentation and/or other materials provided with the distribution.
1471d10453SEric Joyner  *
1571d10453SEric Joyner  *   3. Neither the name of the Intel Corporation nor the names of its
1671d10453SEric Joyner  *      contributors may be used to endorse or promote products derived from
1771d10453SEric Joyner  *      this software without specific prior written permission.
1871d10453SEric Joyner  *
1971d10453SEric Joyner  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
2071d10453SEric Joyner  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
2171d10453SEric Joyner  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
2271d10453SEric Joyner  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
2371d10453SEric Joyner  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
2471d10453SEric Joyner  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
2571d10453SEric Joyner  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
2671d10453SEric Joyner  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
2771d10453SEric Joyner  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
2871d10453SEric Joyner  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
2971d10453SEric Joyner  *  POSSIBILITY OF SUCH DAMAGE.
3071d10453SEric Joyner  */
3171d10453SEric Joyner 
3271d10453SEric Joyner #include "ice_common.h"
3371d10453SEric Joyner 
3471d10453SEric Joyner #define ICE_CQ_INIT_REGS(qinfo, prefix)				\
3571d10453SEric Joyner do {								\
3671d10453SEric Joyner 	(qinfo)->sq.head = prefix##_ATQH;			\
3771d10453SEric Joyner 	(qinfo)->sq.tail = prefix##_ATQT;			\
3871d10453SEric Joyner 	(qinfo)->sq.len = prefix##_ATQLEN;			\
3971d10453SEric Joyner 	(qinfo)->sq.bah = prefix##_ATQBAH;			\
4071d10453SEric Joyner 	(qinfo)->sq.bal = prefix##_ATQBAL;			\
4171d10453SEric Joyner 	(qinfo)->sq.len_mask = prefix##_ATQLEN_ATQLEN_M;	\
4271d10453SEric Joyner 	(qinfo)->sq.len_ena_mask = prefix##_ATQLEN_ATQENABLE_M;	\
437d7af7f8SEric Joyner 	(qinfo)->sq.len_crit_mask = prefix##_ATQLEN_ATQCRIT_M;	\
4471d10453SEric Joyner 	(qinfo)->sq.head_mask = prefix##_ATQH_ATQH_M;		\
4571d10453SEric Joyner 	(qinfo)->rq.head = prefix##_ARQH;			\
4671d10453SEric Joyner 	(qinfo)->rq.tail = prefix##_ARQT;			\
4771d10453SEric Joyner 	(qinfo)->rq.len = prefix##_ARQLEN;			\
4871d10453SEric Joyner 	(qinfo)->rq.bah = prefix##_ARQBAH;			\
4971d10453SEric Joyner 	(qinfo)->rq.bal = prefix##_ARQBAL;			\
5071d10453SEric Joyner 	(qinfo)->rq.len_mask = prefix##_ARQLEN_ARQLEN_M;	\
5171d10453SEric Joyner 	(qinfo)->rq.len_ena_mask = prefix##_ARQLEN_ARQENABLE_M;	\
527d7af7f8SEric Joyner 	(qinfo)->rq.len_crit_mask = prefix##_ARQLEN_ARQCRIT_M;	\
5371d10453SEric Joyner 	(qinfo)->rq.head_mask = prefix##_ARQH_ARQH_M;		\
5471d10453SEric Joyner } while (0)
5571d10453SEric Joyner 
5671d10453SEric Joyner /**
5771d10453SEric Joyner  * ice_adminq_init_regs - Initialize AdminQ registers
5871d10453SEric Joyner  * @hw: pointer to the hardware structure
5971d10453SEric Joyner  *
6071d10453SEric Joyner  * This assumes the alloc_sq and alloc_rq functions have already been called
6171d10453SEric Joyner  */
6271d10453SEric Joyner static void ice_adminq_init_regs(struct ice_hw *hw)
6371d10453SEric Joyner {
6471d10453SEric Joyner 	struct ice_ctl_q_info *cq = &hw->adminq;
6571d10453SEric Joyner 
6671d10453SEric Joyner 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
6771d10453SEric Joyner 
6871d10453SEric Joyner 	ICE_CQ_INIT_REGS(cq, PF_FW);
6971d10453SEric Joyner }
7071d10453SEric Joyner 
7171d10453SEric Joyner /**
7271d10453SEric Joyner  * ice_mailbox_init_regs - Initialize Mailbox registers
7371d10453SEric Joyner  * @hw: pointer to the hardware structure
7471d10453SEric Joyner  *
7571d10453SEric Joyner  * This assumes the alloc_sq and alloc_rq functions have already been called
7671d10453SEric Joyner  */
7771d10453SEric Joyner static void ice_mailbox_init_regs(struct ice_hw *hw)
7871d10453SEric Joyner {
7971d10453SEric Joyner 	struct ice_ctl_q_info *cq = &hw->mailboxq;
8071d10453SEric Joyner 
8171d10453SEric Joyner 	ICE_CQ_INIT_REGS(cq, PF_MBX);
8271d10453SEric Joyner }
8371d10453SEric Joyner 
8471d10453SEric Joyner /**
85f2635e84SEric Joyner  * ice_sb_init_regs - Initialize Sideband registers
86f2635e84SEric Joyner  * @hw: pointer to the hardware structure
87f2635e84SEric Joyner  *
88f2635e84SEric Joyner  * This assumes the alloc_sq and alloc_rq functions have already been called
89f2635e84SEric Joyner  */
90f2635e84SEric Joyner static void ice_sb_init_regs(struct ice_hw *hw)
91f2635e84SEric Joyner {
92f2635e84SEric Joyner 	struct ice_ctl_q_info *cq = &hw->sbq;
93f2635e84SEric Joyner 
94f2635e84SEric Joyner 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
95f2635e84SEric Joyner 
96f2635e84SEric Joyner 	ICE_CQ_INIT_REGS(cq, PF_SB);
97f2635e84SEric Joyner }
98f2635e84SEric Joyner 
99f2635e84SEric Joyner /**
10071d10453SEric Joyner  * ice_check_sq_alive
10171d10453SEric Joyner  * @hw: pointer to the HW struct
10271d10453SEric Joyner  * @cq: pointer to the specific Control queue
10371d10453SEric Joyner  *
10471d10453SEric Joyner  * Returns true if Queue is enabled else false.
10571d10453SEric Joyner  */
10671d10453SEric Joyner bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq)
10771d10453SEric Joyner {
10871d10453SEric Joyner 	/* check both queue-length and queue-enable fields */
10971d10453SEric Joyner 	if (cq->sq.len && cq->sq.len_mask && cq->sq.len_ena_mask)
11071d10453SEric Joyner 		return (rd32(hw, cq->sq.len) & (cq->sq.len_mask |
11171d10453SEric Joyner 						cq->sq.len_ena_mask)) ==
11271d10453SEric Joyner 			(cq->num_sq_entries | cq->sq.len_ena_mask);
11371d10453SEric Joyner 
11471d10453SEric Joyner 	return false;
11571d10453SEric Joyner }
11671d10453SEric Joyner 
11771d10453SEric Joyner /**
11871d10453SEric Joyner  * ice_alloc_ctrlq_sq_ring - Allocate Control Transmit Queue (ATQ) rings
11971d10453SEric Joyner  * @hw: pointer to the hardware structure
12071d10453SEric Joyner  * @cq: pointer to the specific Control queue
12171d10453SEric Joyner  */
122f2635e84SEric Joyner static int
12371d10453SEric Joyner ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
12471d10453SEric Joyner {
12571d10453SEric Joyner 	size_t size = cq->num_sq_entries * sizeof(struct ice_aq_desc);
12671d10453SEric Joyner 
12771d10453SEric Joyner 	cq->sq.desc_buf.va = ice_alloc_dma_mem(hw, &cq->sq.desc_buf, size);
12871d10453SEric Joyner 	if (!cq->sq.desc_buf.va)
12971d10453SEric Joyner 		return ICE_ERR_NO_MEMORY;
13071d10453SEric Joyner 
131f2635e84SEric Joyner 	return 0;
13271d10453SEric Joyner }
13371d10453SEric Joyner 
13471d10453SEric Joyner /**
13571d10453SEric Joyner  * ice_alloc_ctrlq_rq_ring - Allocate Control Receive Queue (ARQ) rings
13671d10453SEric Joyner  * @hw: pointer to the hardware structure
13771d10453SEric Joyner  * @cq: pointer to the specific Control queue
13871d10453SEric Joyner  */
139f2635e84SEric Joyner static int
14071d10453SEric Joyner ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
14171d10453SEric Joyner {
14271d10453SEric Joyner 	size_t size = cq->num_rq_entries * sizeof(struct ice_aq_desc);
14371d10453SEric Joyner 
14471d10453SEric Joyner 	cq->rq.desc_buf.va = ice_alloc_dma_mem(hw, &cq->rq.desc_buf, size);
14571d10453SEric Joyner 	if (!cq->rq.desc_buf.va)
14671d10453SEric Joyner 		return ICE_ERR_NO_MEMORY;
147f2635e84SEric Joyner 	return 0;
14871d10453SEric Joyner }
14971d10453SEric Joyner 
15071d10453SEric Joyner /**
15171d10453SEric Joyner  * ice_free_cq_ring - Free control queue ring
15271d10453SEric Joyner  * @hw: pointer to the hardware structure
15371d10453SEric Joyner  * @ring: pointer to the specific control queue ring
15471d10453SEric Joyner  *
15571d10453SEric Joyner  * This assumes the posted buffers have already been cleaned
15671d10453SEric Joyner  * and de-allocated
15771d10453SEric Joyner  */
15871d10453SEric Joyner static void ice_free_cq_ring(struct ice_hw *hw, struct ice_ctl_q_ring *ring)
15971d10453SEric Joyner {
16071d10453SEric Joyner 	ice_free_dma_mem(hw, &ring->desc_buf);
16171d10453SEric Joyner }
16271d10453SEric Joyner 
16371d10453SEric Joyner /**
16471d10453SEric Joyner  * ice_alloc_rq_bufs - Allocate pre-posted buffers for the ARQ
16571d10453SEric Joyner  * @hw: pointer to the hardware structure
16671d10453SEric Joyner  * @cq: pointer to the specific Control queue
16771d10453SEric Joyner  */
168f2635e84SEric Joyner static int
16971d10453SEric Joyner ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
17071d10453SEric Joyner {
17171d10453SEric Joyner 	int i;
17271d10453SEric Joyner 
17371d10453SEric Joyner 	/* We'll be allocating the buffer info memory first, then we can
17471d10453SEric Joyner 	 * allocate the mapped buffers for the event processing
17571d10453SEric Joyner 	 */
17671d10453SEric Joyner 	cq->rq.dma_head = ice_calloc(hw, cq->num_rq_entries,
17771d10453SEric Joyner 				     sizeof(cq->rq.desc_buf));
17871d10453SEric Joyner 	if (!cq->rq.dma_head)
17971d10453SEric Joyner 		return ICE_ERR_NO_MEMORY;
18071d10453SEric Joyner 	cq->rq.r.rq_bi = (struct ice_dma_mem *)cq->rq.dma_head;
18171d10453SEric Joyner 
18271d10453SEric Joyner 	/* allocate the mapped buffers */
18371d10453SEric Joyner 	for (i = 0; i < cq->num_rq_entries; i++) {
18471d10453SEric Joyner 		struct ice_aq_desc *desc;
18571d10453SEric Joyner 		struct ice_dma_mem *bi;
18671d10453SEric Joyner 
18771d10453SEric Joyner 		bi = &cq->rq.r.rq_bi[i];
18871d10453SEric Joyner 		bi->va = ice_alloc_dma_mem(hw, bi, cq->rq_buf_size);
18971d10453SEric Joyner 		if (!bi->va)
19071d10453SEric Joyner 			goto unwind_alloc_rq_bufs;
19171d10453SEric Joyner 
19271d10453SEric Joyner 		/* now configure the descriptors for use */
19371d10453SEric Joyner 		desc = ICE_CTL_Q_DESC(cq->rq, i);
19471d10453SEric Joyner 
19571d10453SEric Joyner 		desc->flags = CPU_TO_LE16(ICE_AQ_FLAG_BUF);
19671d10453SEric Joyner 		if (cq->rq_buf_size > ICE_AQ_LG_BUF)
19771d10453SEric Joyner 			desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB);
19871d10453SEric Joyner 		desc->opcode = 0;
1999c30461dSEric Joyner 		/* This is in accordance with control queue design, there is no
20071d10453SEric Joyner 		 * register for buffer size configuration
20171d10453SEric Joyner 		 */
20271d10453SEric Joyner 		desc->datalen = CPU_TO_LE16(bi->size);
20371d10453SEric Joyner 		desc->retval = 0;
20471d10453SEric Joyner 		desc->cookie_high = 0;
20571d10453SEric Joyner 		desc->cookie_low = 0;
20671d10453SEric Joyner 		desc->params.generic.addr_high =
20771d10453SEric Joyner 			CPU_TO_LE32(ICE_HI_DWORD(bi->pa));
20871d10453SEric Joyner 		desc->params.generic.addr_low =
20971d10453SEric Joyner 			CPU_TO_LE32(ICE_LO_DWORD(bi->pa));
21071d10453SEric Joyner 		desc->params.generic.param0 = 0;
21171d10453SEric Joyner 		desc->params.generic.param1 = 0;
21271d10453SEric Joyner 	}
213f2635e84SEric Joyner 	return 0;
21471d10453SEric Joyner 
21571d10453SEric Joyner unwind_alloc_rq_bufs:
21671d10453SEric Joyner 	/* don't try to free the one that failed... */
21771d10453SEric Joyner 	i--;
21871d10453SEric Joyner 	for (; i >= 0; i--)
21971d10453SEric Joyner 		ice_free_dma_mem(hw, &cq->rq.r.rq_bi[i]);
2207d7af7f8SEric Joyner 	cq->rq.r.rq_bi = NULL;
22171d10453SEric Joyner 	ice_free(hw, cq->rq.dma_head);
2227d7af7f8SEric Joyner 	cq->rq.dma_head = NULL;
22371d10453SEric Joyner 
22471d10453SEric Joyner 	return ICE_ERR_NO_MEMORY;
22571d10453SEric Joyner }
22671d10453SEric Joyner 
22771d10453SEric Joyner /**
22871d10453SEric Joyner  * ice_alloc_sq_bufs - Allocate empty buffer structs for the ATQ
22971d10453SEric Joyner  * @hw: pointer to the hardware structure
23071d10453SEric Joyner  * @cq: pointer to the specific Control queue
23171d10453SEric Joyner  */
232f2635e84SEric Joyner static int
23371d10453SEric Joyner ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
23471d10453SEric Joyner {
23571d10453SEric Joyner 	int i;
23671d10453SEric Joyner 
23771d10453SEric Joyner 	/* No mapped memory needed yet, just the buffer info structures */
23871d10453SEric Joyner 	cq->sq.dma_head = ice_calloc(hw, cq->num_sq_entries,
23971d10453SEric Joyner 				     sizeof(cq->sq.desc_buf));
24071d10453SEric Joyner 	if (!cq->sq.dma_head)
24171d10453SEric Joyner 		return ICE_ERR_NO_MEMORY;
24271d10453SEric Joyner 	cq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head;
24371d10453SEric Joyner 
24471d10453SEric Joyner 	/* allocate the mapped buffers */
24571d10453SEric Joyner 	for (i = 0; i < cq->num_sq_entries; i++) {
24671d10453SEric Joyner 		struct ice_dma_mem *bi;
24771d10453SEric Joyner 
24871d10453SEric Joyner 		bi = &cq->sq.r.sq_bi[i];
24971d10453SEric Joyner 		bi->va = ice_alloc_dma_mem(hw, bi, cq->sq_buf_size);
25071d10453SEric Joyner 		if (!bi->va)
25171d10453SEric Joyner 			goto unwind_alloc_sq_bufs;
25271d10453SEric Joyner 	}
253f2635e84SEric Joyner 	return 0;
25471d10453SEric Joyner 
25571d10453SEric Joyner unwind_alloc_sq_bufs:
25671d10453SEric Joyner 	/* don't try to free the one that failed... */
25771d10453SEric Joyner 	i--;
25871d10453SEric Joyner 	for (; i >= 0; i--)
25971d10453SEric Joyner 		ice_free_dma_mem(hw, &cq->sq.r.sq_bi[i]);
2607d7af7f8SEric Joyner 	cq->sq.r.sq_bi = NULL;
26171d10453SEric Joyner 	ice_free(hw, cq->sq.dma_head);
2627d7af7f8SEric Joyner 	cq->sq.dma_head = NULL;
26371d10453SEric Joyner 
26471d10453SEric Joyner 	return ICE_ERR_NO_MEMORY;
26571d10453SEric Joyner }
26671d10453SEric Joyner 
267f2635e84SEric Joyner static int
26871d10453SEric Joyner ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries)
26971d10453SEric Joyner {
27071d10453SEric Joyner 	/* Clear Head and Tail */
27171d10453SEric Joyner 	wr32(hw, ring->head, 0);
27271d10453SEric Joyner 	wr32(hw, ring->tail, 0);
27371d10453SEric Joyner 
27471d10453SEric Joyner 	/* set starting point */
27571d10453SEric Joyner 	wr32(hw, ring->len, (num_entries | ring->len_ena_mask));
27671d10453SEric Joyner 	wr32(hw, ring->bal, ICE_LO_DWORD(ring->desc_buf.pa));
27771d10453SEric Joyner 	wr32(hw, ring->bah, ICE_HI_DWORD(ring->desc_buf.pa));
27871d10453SEric Joyner 
27971d10453SEric Joyner 	/* Check one register to verify that config was applied */
28071d10453SEric Joyner 	if (rd32(hw, ring->bal) != ICE_LO_DWORD(ring->desc_buf.pa))
28171d10453SEric Joyner 		return ICE_ERR_AQ_ERROR;
28271d10453SEric Joyner 
283f2635e84SEric Joyner 	return 0;
28471d10453SEric Joyner }
28571d10453SEric Joyner 
28671d10453SEric Joyner /**
28771d10453SEric Joyner  * ice_cfg_sq_regs - configure Control ATQ registers
28871d10453SEric Joyner  * @hw: pointer to the hardware structure
28971d10453SEric Joyner  * @cq: pointer to the specific Control queue
29071d10453SEric Joyner  *
29171d10453SEric Joyner  * Configure base address and length registers for the transmit queue
29271d10453SEric Joyner  */
293f2635e84SEric Joyner static int
29471d10453SEric Joyner ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
29571d10453SEric Joyner {
29671d10453SEric Joyner 	return ice_cfg_cq_regs(hw, &cq->sq, cq->num_sq_entries);
29771d10453SEric Joyner }
29871d10453SEric Joyner 
29971d10453SEric Joyner /**
30071d10453SEric Joyner  * ice_cfg_rq_regs - configure Control ARQ register
30171d10453SEric Joyner  * @hw: pointer to the hardware structure
30271d10453SEric Joyner  * @cq: pointer to the specific Control queue
30371d10453SEric Joyner  *
30471d10453SEric Joyner  * Configure base address and length registers for the receive (event queue)
30571d10453SEric Joyner  */
306f2635e84SEric Joyner static int
30771d10453SEric Joyner ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
30871d10453SEric Joyner {
309f2635e84SEric Joyner 	int status;
31071d10453SEric Joyner 
31171d10453SEric Joyner 	status = ice_cfg_cq_regs(hw, &cq->rq, cq->num_rq_entries);
31271d10453SEric Joyner 	if (status)
31371d10453SEric Joyner 		return status;
31471d10453SEric Joyner 
31571d10453SEric Joyner 	/* Update tail in the HW to post pre-allocated buffers */
31671d10453SEric Joyner 	wr32(hw, cq->rq.tail, (u32)(cq->num_rq_entries - 1));
31771d10453SEric Joyner 
318f2635e84SEric Joyner 	return 0;
31971d10453SEric Joyner }
32071d10453SEric Joyner 
3217d7af7f8SEric Joyner #define ICE_FREE_CQ_BUFS(hw, qi, ring)					\
3227d7af7f8SEric Joyner do {									\
3237d7af7f8SEric Joyner 	/* free descriptors */						\
3247d7af7f8SEric Joyner 	if ((qi)->ring.r.ring##_bi) {					\
3257d7af7f8SEric Joyner 		int i;							\
3267d7af7f8SEric Joyner 									\
3277d7af7f8SEric Joyner 		for (i = 0; i < (qi)->num_##ring##_entries; i++)	\
3287d7af7f8SEric Joyner 			if ((qi)->ring.r.ring##_bi[i].pa)		\
3297d7af7f8SEric Joyner 				ice_free_dma_mem((hw),			\
3307d7af7f8SEric Joyner 					&(qi)->ring.r.ring##_bi[i]);	\
3317d7af7f8SEric Joyner 	}								\
3327d7af7f8SEric Joyner 	/* free DMA head */						\
3337d7af7f8SEric Joyner 	ice_free(hw, (qi)->ring.dma_head);				\
3347d7af7f8SEric Joyner } while (0)
3357d7af7f8SEric Joyner 
33671d10453SEric Joyner /**
33771d10453SEric Joyner  * ice_init_sq - main initialization routine for Control ATQ
33871d10453SEric Joyner  * @hw: pointer to the hardware structure
33971d10453SEric Joyner  * @cq: pointer to the specific Control queue
34071d10453SEric Joyner  *
34171d10453SEric Joyner  * This is the main initialization routine for the Control Send Queue
34271d10453SEric Joyner  * Prior to calling this function, the driver *MUST* set the following fields
34371d10453SEric Joyner  * in the cq->structure:
34471d10453SEric Joyner  *     - cq->num_sq_entries
34571d10453SEric Joyner  *     - cq->sq_buf_size
34671d10453SEric Joyner  *
34771d10453SEric Joyner  * Do *NOT* hold the lock when calling this as the memory allocation routines
34871d10453SEric Joyner  * called are not going to be atomic context safe
34971d10453SEric Joyner  */
350f2635e84SEric Joyner static int ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
35171d10453SEric Joyner {
352f2635e84SEric Joyner 	int ret_code;
35371d10453SEric Joyner 
35471d10453SEric Joyner 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
35571d10453SEric Joyner 
35671d10453SEric Joyner 	if (cq->sq.count > 0) {
35771d10453SEric Joyner 		/* queue already initialized */
35871d10453SEric Joyner 		ret_code = ICE_ERR_NOT_READY;
35971d10453SEric Joyner 		goto init_ctrlq_exit;
36071d10453SEric Joyner 	}
36171d10453SEric Joyner 
36271d10453SEric Joyner 	/* verify input for valid configuration */
36371d10453SEric Joyner 	if (!cq->num_sq_entries || !cq->sq_buf_size) {
36471d10453SEric Joyner 		ret_code = ICE_ERR_CFG;
36571d10453SEric Joyner 		goto init_ctrlq_exit;
36671d10453SEric Joyner 	}
36771d10453SEric Joyner 
36871d10453SEric Joyner 	cq->sq.next_to_use = 0;
36971d10453SEric Joyner 	cq->sq.next_to_clean = 0;
37071d10453SEric Joyner 
37171d10453SEric Joyner 	/* allocate the ring memory */
37271d10453SEric Joyner 	ret_code = ice_alloc_ctrlq_sq_ring(hw, cq);
37371d10453SEric Joyner 	if (ret_code)
37471d10453SEric Joyner 		goto init_ctrlq_exit;
37571d10453SEric Joyner 
37671d10453SEric Joyner 	/* allocate buffers in the rings */
37771d10453SEric Joyner 	ret_code = ice_alloc_sq_bufs(hw, cq);
37871d10453SEric Joyner 	if (ret_code)
37971d10453SEric Joyner 		goto init_ctrlq_free_rings;
38071d10453SEric Joyner 
38171d10453SEric Joyner 	/* initialize base registers */
38271d10453SEric Joyner 	ret_code = ice_cfg_sq_regs(hw, cq);
38371d10453SEric Joyner 	if (ret_code)
38471d10453SEric Joyner 		goto init_ctrlq_free_rings;
38571d10453SEric Joyner 
38671d10453SEric Joyner 	/* success! */
38771d10453SEric Joyner 	cq->sq.count = cq->num_sq_entries;
38871d10453SEric Joyner 	goto init_ctrlq_exit;
38971d10453SEric Joyner 
39071d10453SEric Joyner init_ctrlq_free_rings:
3917d7af7f8SEric Joyner 	ICE_FREE_CQ_BUFS(hw, cq, sq);
39271d10453SEric Joyner 	ice_free_cq_ring(hw, &cq->sq);
39371d10453SEric Joyner 
39471d10453SEric Joyner init_ctrlq_exit:
39571d10453SEric Joyner 	return ret_code;
39671d10453SEric Joyner }
39771d10453SEric Joyner 
39871d10453SEric Joyner /**
3999c30461dSEric Joyner  * ice_init_rq - initialize receive side of a control queue
40071d10453SEric Joyner  * @hw: pointer to the hardware structure
40171d10453SEric Joyner  * @cq: pointer to the specific Control queue
40271d10453SEric Joyner  *
4039c30461dSEric Joyner  * The main initialization routine for Receive side of a control queue.
40471d10453SEric Joyner  * Prior to calling this function, the driver *MUST* set the following fields
40571d10453SEric Joyner  * in the cq->structure:
40671d10453SEric Joyner  *     - cq->num_rq_entries
40771d10453SEric Joyner  *     - cq->rq_buf_size
40871d10453SEric Joyner  *
40971d10453SEric Joyner  * Do *NOT* hold the lock when calling this as the memory allocation routines
41071d10453SEric Joyner  * called are not going to be atomic context safe
41171d10453SEric Joyner  */
412f2635e84SEric Joyner static int ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
41371d10453SEric Joyner {
414f2635e84SEric Joyner 	int ret_code;
41571d10453SEric Joyner 
41671d10453SEric Joyner 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
41771d10453SEric Joyner 
41871d10453SEric Joyner 	if (cq->rq.count > 0) {
41971d10453SEric Joyner 		/* queue already initialized */
42071d10453SEric Joyner 		ret_code = ICE_ERR_NOT_READY;
42171d10453SEric Joyner 		goto init_ctrlq_exit;
42271d10453SEric Joyner 	}
42371d10453SEric Joyner 
42471d10453SEric Joyner 	/* verify input for valid configuration */
42571d10453SEric Joyner 	if (!cq->num_rq_entries || !cq->rq_buf_size) {
42671d10453SEric Joyner 		ret_code = ICE_ERR_CFG;
42771d10453SEric Joyner 		goto init_ctrlq_exit;
42871d10453SEric Joyner 	}
42971d10453SEric Joyner 
43071d10453SEric Joyner 	cq->rq.next_to_use = 0;
43171d10453SEric Joyner 	cq->rq.next_to_clean = 0;
43271d10453SEric Joyner 
43371d10453SEric Joyner 	/* allocate the ring memory */
43471d10453SEric Joyner 	ret_code = ice_alloc_ctrlq_rq_ring(hw, cq);
43571d10453SEric Joyner 	if (ret_code)
43671d10453SEric Joyner 		goto init_ctrlq_exit;
43771d10453SEric Joyner 
43871d10453SEric Joyner 	/* allocate buffers in the rings */
43971d10453SEric Joyner 	ret_code = ice_alloc_rq_bufs(hw, cq);
44071d10453SEric Joyner 	if (ret_code)
44171d10453SEric Joyner 		goto init_ctrlq_free_rings;
44271d10453SEric Joyner 
44371d10453SEric Joyner 	/* initialize base registers */
44471d10453SEric Joyner 	ret_code = ice_cfg_rq_regs(hw, cq);
44571d10453SEric Joyner 	if (ret_code)
44671d10453SEric Joyner 		goto init_ctrlq_free_rings;
44771d10453SEric Joyner 
44871d10453SEric Joyner 	/* success! */
44971d10453SEric Joyner 	cq->rq.count = cq->num_rq_entries;
45071d10453SEric Joyner 	goto init_ctrlq_exit;
45171d10453SEric Joyner 
45271d10453SEric Joyner init_ctrlq_free_rings:
4537d7af7f8SEric Joyner 	ICE_FREE_CQ_BUFS(hw, cq, rq);
45471d10453SEric Joyner 	ice_free_cq_ring(hw, &cq->rq);
45571d10453SEric Joyner 
45671d10453SEric Joyner init_ctrlq_exit:
45771d10453SEric Joyner 	return ret_code;
45871d10453SEric Joyner }
45971d10453SEric Joyner 
46071d10453SEric Joyner /**
4619c30461dSEric Joyner  * ice_shutdown_sq - shutdown the transmit side of a control queue
46271d10453SEric Joyner  * @hw: pointer to the hardware structure
46371d10453SEric Joyner  * @cq: pointer to the specific Control queue
46471d10453SEric Joyner  *
46571d10453SEric Joyner  * The main shutdown routine for the Control Transmit Queue
46671d10453SEric Joyner  */
467f2635e84SEric Joyner static int
46871d10453SEric Joyner ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
46971d10453SEric Joyner {
470f2635e84SEric Joyner 	int ret_code = 0;
47171d10453SEric Joyner 
47271d10453SEric Joyner 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
47371d10453SEric Joyner 
47471d10453SEric Joyner 	ice_acquire_lock(&cq->sq_lock);
47571d10453SEric Joyner 
47671d10453SEric Joyner 	if (!cq->sq.count) {
47771d10453SEric Joyner 		ret_code = ICE_ERR_NOT_READY;
47871d10453SEric Joyner 		goto shutdown_sq_out;
47971d10453SEric Joyner 	}
48071d10453SEric Joyner 
4819c30461dSEric Joyner 	/* Stop processing of the control queue */
48271d10453SEric Joyner 	wr32(hw, cq->sq.head, 0);
48371d10453SEric Joyner 	wr32(hw, cq->sq.tail, 0);
48471d10453SEric Joyner 	wr32(hw, cq->sq.len, 0);
48571d10453SEric Joyner 	wr32(hw, cq->sq.bal, 0);
48671d10453SEric Joyner 	wr32(hw, cq->sq.bah, 0);
48771d10453SEric Joyner 
48871d10453SEric Joyner 	cq->sq.count = 0;	/* to indicate uninitialized queue */
48971d10453SEric Joyner 
49071d10453SEric Joyner 	/* free ring buffers and the ring itself */
49171d10453SEric Joyner 	ICE_FREE_CQ_BUFS(hw, cq, sq);
49271d10453SEric Joyner 	ice_free_cq_ring(hw, &cq->sq);
49371d10453SEric Joyner 
49471d10453SEric Joyner shutdown_sq_out:
49571d10453SEric Joyner 	ice_release_lock(&cq->sq_lock);
49671d10453SEric Joyner 	return ret_code;
49771d10453SEric Joyner }
49871d10453SEric Joyner 
49971d10453SEric Joyner /**
5009e54973fSEric Joyner  * ice_aq_ver_check - Check the reported AQ API version
50171d10453SEric Joyner  * @hw: pointer to the hardware structure
50271d10453SEric Joyner  *
50371d10453SEric Joyner  * Checks if the driver should load on a given AQ API version.
50471d10453SEric Joyner  *
50571d10453SEric Joyner  * Return: 'true' iff the driver should attempt to load. 'false' otherwise.
50671d10453SEric Joyner  */
50771d10453SEric Joyner static bool ice_aq_ver_check(struct ice_hw *hw)
50871d10453SEric Joyner {
509f2635e84SEric Joyner 	u8 exp_fw_api_ver_major = EXP_FW_API_VER_MAJOR_BY_MAC(hw);
510f2635e84SEric Joyner 	u8 exp_fw_api_ver_minor = EXP_FW_API_VER_MINOR_BY_MAC(hw);
511f2635e84SEric Joyner 
512f2635e84SEric Joyner 	if (hw->api_maj_ver > exp_fw_api_ver_major) {
51371d10453SEric Joyner 		/* Major API version is newer than expected, don't load */
51471d10453SEric Joyner 		ice_warn(hw, "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
51571d10453SEric Joyner 		return false;
516f2635e84SEric Joyner 	} else if (hw->api_maj_ver == exp_fw_api_ver_major) {
517f2635e84SEric Joyner 		if (hw->api_min_ver > (exp_fw_api_ver_minor + 2))
5188923de59SPiotr Kubaj 			ice_info(hw, "The driver for the device detected a newer version (%u.%u) of the NVM image than expected (%u.%u). Please install the most recent version of the network driver.\n",
5198923de59SPiotr Kubaj 				 hw->api_maj_ver, hw->api_min_ver,
520f2635e84SEric Joyner 				 exp_fw_api_ver_major, exp_fw_api_ver_minor);
521f2635e84SEric Joyner 		else if ((hw->api_min_ver + 2) < exp_fw_api_ver_minor)
5228923de59SPiotr Kubaj 			ice_info(hw, "The driver for the device detected an older version (%u.%u) of the NVM image than expected (%u.%u). Please update the NVM image.\n",
5238923de59SPiotr Kubaj 				 hw->api_maj_ver, hw->api_min_ver,
524f2635e84SEric Joyner 				 exp_fw_api_ver_major, exp_fw_api_ver_minor);
52571d10453SEric Joyner 	} else {
52671d10453SEric Joyner 		/* Major API version is older than expected, log a warning */
5278923de59SPiotr Kubaj 		ice_info(hw, "The driver for the device detected an older version (%u.%u) of the NVM image than expected (%u.%u). Please update the NVM image.\n",
5288923de59SPiotr Kubaj 			 hw->api_maj_ver, hw->api_min_ver,
529f2635e84SEric Joyner 			 exp_fw_api_ver_major, exp_fw_api_ver_minor);
53071d10453SEric Joyner 	}
53171d10453SEric Joyner 	return true;
53271d10453SEric Joyner }
53371d10453SEric Joyner 
53471d10453SEric Joyner /**
53571d10453SEric Joyner  * ice_shutdown_rq - shutdown Control ARQ
53671d10453SEric Joyner  * @hw: pointer to the hardware structure
53771d10453SEric Joyner  * @cq: pointer to the specific Control queue
53871d10453SEric Joyner  *
53971d10453SEric Joyner  * The main shutdown routine for the Control Receive Queue
54071d10453SEric Joyner  */
541f2635e84SEric Joyner static int
54271d10453SEric Joyner ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
54371d10453SEric Joyner {
544f2635e84SEric Joyner 	int ret_code = 0;
54571d10453SEric Joyner 
54671d10453SEric Joyner 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
54771d10453SEric Joyner 
54871d10453SEric Joyner 	ice_acquire_lock(&cq->rq_lock);
54971d10453SEric Joyner 
55071d10453SEric Joyner 	if (!cq->rq.count) {
55171d10453SEric Joyner 		ret_code = ICE_ERR_NOT_READY;
55271d10453SEric Joyner 		goto shutdown_rq_out;
55371d10453SEric Joyner 	}
55471d10453SEric Joyner 
55571d10453SEric Joyner 	/* Stop Control Queue processing */
55671d10453SEric Joyner 	wr32(hw, cq->rq.head, 0);
55771d10453SEric Joyner 	wr32(hw, cq->rq.tail, 0);
55871d10453SEric Joyner 	wr32(hw, cq->rq.len, 0);
55971d10453SEric Joyner 	wr32(hw, cq->rq.bal, 0);
56071d10453SEric Joyner 	wr32(hw, cq->rq.bah, 0);
56171d10453SEric Joyner 
56271d10453SEric Joyner 	/* set rq.count to 0 to indicate uninitialized queue */
56371d10453SEric Joyner 	cq->rq.count = 0;
56471d10453SEric Joyner 
56571d10453SEric Joyner 	/* free ring buffers and the ring itself */
56671d10453SEric Joyner 	ICE_FREE_CQ_BUFS(hw, cq, rq);
56771d10453SEric Joyner 	ice_free_cq_ring(hw, &cq->rq);
56871d10453SEric Joyner 
56971d10453SEric Joyner shutdown_rq_out:
57071d10453SEric Joyner 	ice_release_lock(&cq->rq_lock);
57171d10453SEric Joyner 	return ret_code;
57271d10453SEric Joyner }
57371d10453SEric Joyner 
57471d10453SEric Joyner /**
57571d10453SEric Joyner  * ice_idle_aq - stop ARQ/ATQ processing momentarily
57671d10453SEric Joyner  * @hw: pointer to the hardware structure
57771d10453SEric Joyner  * @cq: pointer to the specific Control queue
57871d10453SEric Joyner  */
57971d10453SEric Joyner void ice_idle_aq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
58071d10453SEric Joyner {
58171d10453SEric Joyner 	wr32(hw, cq->sq.len, 0);
58271d10453SEric Joyner 	wr32(hw, cq->rq.len, 0);
58371d10453SEric Joyner 
58471d10453SEric Joyner 	ice_msec_delay(2, false);
58571d10453SEric Joyner }
58671d10453SEric Joyner 
58771d10453SEric Joyner /**
58871d10453SEric Joyner  * ice_init_check_adminq - Check version for Admin Queue to know if its alive
58971d10453SEric Joyner  * @hw: pointer to the hardware structure
59071d10453SEric Joyner  */
591f2635e84SEric Joyner static int ice_init_check_adminq(struct ice_hw *hw)
59271d10453SEric Joyner {
59371d10453SEric Joyner 	struct ice_ctl_q_info *cq = &hw->adminq;
594f2635e84SEric Joyner 	int status;
59571d10453SEric Joyner 
59671d10453SEric Joyner 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
59771d10453SEric Joyner 
59871d10453SEric Joyner 	status = ice_aq_get_fw_ver(hw, NULL);
59971d10453SEric Joyner 	if (status)
60071d10453SEric Joyner 		goto init_ctrlq_free_rq;
60171d10453SEric Joyner 
60271d10453SEric Joyner 	if (!ice_aq_ver_check(hw)) {
60371d10453SEric Joyner 		status = ICE_ERR_FW_API_VER;
60471d10453SEric Joyner 		goto init_ctrlq_free_rq;
60571d10453SEric Joyner 	}
60671d10453SEric Joyner 
607f2635e84SEric Joyner 	return 0;
60871d10453SEric Joyner 
60971d10453SEric Joyner init_ctrlq_free_rq:
61071d10453SEric Joyner 	ice_shutdown_rq(hw, cq);
61171d10453SEric Joyner 	ice_shutdown_sq(hw, cq);
61271d10453SEric Joyner 	return status;
61371d10453SEric Joyner }
61471d10453SEric Joyner 
61571d10453SEric Joyner /**
61671d10453SEric Joyner  * ice_init_ctrlq - main initialization routine for any control Queue
61771d10453SEric Joyner  * @hw: pointer to the hardware structure
61871d10453SEric Joyner  * @q_type: specific Control queue type
61971d10453SEric Joyner  *
62071d10453SEric Joyner  * Prior to calling this function, the driver *MUST* set the following fields
62171d10453SEric Joyner  * in the cq->structure:
62271d10453SEric Joyner  *     - cq->num_sq_entries
62371d10453SEric Joyner  *     - cq->num_rq_entries
62471d10453SEric Joyner  *     - cq->rq_buf_size
62571d10453SEric Joyner  *     - cq->sq_buf_size
62671d10453SEric Joyner  *
62771d10453SEric Joyner  * NOTE: this function does not initialize the controlq locks
62871d10453SEric Joyner  */
629f2635e84SEric Joyner static int ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
63071d10453SEric Joyner {
63171d10453SEric Joyner 	struct ice_ctl_q_info *cq;
632f2635e84SEric Joyner 	int ret_code;
63371d10453SEric Joyner 
63471d10453SEric Joyner 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
63571d10453SEric Joyner 
63671d10453SEric Joyner 	switch (q_type) {
63771d10453SEric Joyner 	case ICE_CTL_Q_ADMIN:
63871d10453SEric Joyner 		ice_adminq_init_regs(hw);
63971d10453SEric Joyner 		cq = &hw->adminq;
64071d10453SEric Joyner 		break;
641f2635e84SEric Joyner 	case ICE_CTL_Q_SB:
642f2635e84SEric Joyner 		ice_sb_init_regs(hw);
643f2635e84SEric Joyner 		cq = &hw->sbq;
644f2635e84SEric Joyner 		break;
64571d10453SEric Joyner 	case ICE_CTL_Q_MAILBOX:
64671d10453SEric Joyner 		ice_mailbox_init_regs(hw);
64771d10453SEric Joyner 		cq = &hw->mailboxq;
64871d10453SEric Joyner 		break;
64971d10453SEric Joyner 	default:
65071d10453SEric Joyner 		return ICE_ERR_PARAM;
65171d10453SEric Joyner 	}
65271d10453SEric Joyner 	cq->qtype = q_type;
65371d10453SEric Joyner 
65471d10453SEric Joyner 	/* verify input for valid configuration */
65571d10453SEric Joyner 	if (!cq->num_rq_entries || !cq->num_sq_entries ||
65671d10453SEric Joyner 	    !cq->rq_buf_size || !cq->sq_buf_size) {
65771d10453SEric Joyner 		return ICE_ERR_CFG;
65871d10453SEric Joyner 	}
65971d10453SEric Joyner 
66071d10453SEric Joyner 	/* setup SQ command write back timeout */
66171d10453SEric Joyner 	cq->sq_cmd_timeout = ICE_CTL_Q_SQ_CMD_TIMEOUT;
66271d10453SEric Joyner 
66371d10453SEric Joyner 	/* allocate the ATQ */
66471d10453SEric Joyner 	ret_code = ice_init_sq(hw, cq);
66571d10453SEric Joyner 	if (ret_code)
66671d10453SEric Joyner 		return ret_code;
66771d10453SEric Joyner 
66871d10453SEric Joyner 	/* allocate the ARQ */
66971d10453SEric Joyner 	ret_code = ice_init_rq(hw, cq);
67071d10453SEric Joyner 	if (ret_code)
67171d10453SEric Joyner 		goto init_ctrlq_free_sq;
67271d10453SEric Joyner 
67371d10453SEric Joyner 	/* success! */
674f2635e84SEric Joyner 	return 0;
67571d10453SEric Joyner 
67671d10453SEric Joyner init_ctrlq_free_sq:
67771d10453SEric Joyner 	ice_shutdown_sq(hw, cq);
67871d10453SEric Joyner 	return ret_code;
67971d10453SEric Joyner }
68071d10453SEric Joyner 
68171d10453SEric Joyner /**
682f2635e84SEric Joyner  * ice_is_sbq_supported - is the sideband queue supported
683f2635e84SEric Joyner  * @hw: pointer to the hardware structure
684f2635e84SEric Joyner  *
685f2635e84SEric Joyner  * Returns true if the sideband control queue interface is
686f2635e84SEric Joyner  * supported for the device, false otherwise
687f2635e84SEric Joyner  */
688f2635e84SEric Joyner static bool ice_is_sbq_supported(struct ice_hw *hw)
689f2635e84SEric Joyner {
690f2635e84SEric Joyner 	return ice_is_generic_mac(hw);
691f2635e84SEric Joyner }
692f2635e84SEric Joyner 
693f2635e84SEric Joyner /**
69471d10453SEric Joyner  * ice_shutdown_ctrlq - shutdown routine for any control queue
69571d10453SEric Joyner  * @hw: pointer to the hardware structure
69671d10453SEric Joyner  * @q_type: specific Control queue type
6978923de59SPiotr Kubaj  * @unloading: is the driver unloading itself
69871d10453SEric Joyner  *
69971d10453SEric Joyner  * NOTE: this function does not destroy the control queue locks.
70071d10453SEric Joyner  */
7019c30461dSEric Joyner static void
7029c30461dSEric Joyner ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type,
7038923de59SPiotr Kubaj 		   bool unloading)
70471d10453SEric Joyner {
70571d10453SEric Joyner 	struct ice_ctl_q_info *cq;
70671d10453SEric Joyner 
70771d10453SEric Joyner 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
70871d10453SEric Joyner 
70971d10453SEric Joyner 	switch (q_type) {
71071d10453SEric Joyner 	case ICE_CTL_Q_ADMIN:
71171d10453SEric Joyner 		cq = &hw->adminq;
71271d10453SEric Joyner 		if (ice_check_sq_alive(hw, cq))
7138923de59SPiotr Kubaj 			ice_aq_q_shutdown(hw, unloading);
71471d10453SEric Joyner 		break;
715f2635e84SEric Joyner 	case ICE_CTL_Q_SB:
716f2635e84SEric Joyner 		cq = &hw->sbq;
717f2635e84SEric Joyner 		break;
71871d10453SEric Joyner 	case ICE_CTL_Q_MAILBOX:
71971d10453SEric Joyner 		cq = &hw->mailboxq;
72071d10453SEric Joyner 		break;
72171d10453SEric Joyner 	default:
72271d10453SEric Joyner 		return;
72371d10453SEric Joyner 	}
72471d10453SEric Joyner 
72571d10453SEric Joyner 	ice_shutdown_sq(hw, cq);
72671d10453SEric Joyner 	ice_shutdown_rq(hw, cq);
72771d10453SEric Joyner }
72871d10453SEric Joyner 
72971d10453SEric Joyner /**
73071d10453SEric Joyner  * ice_shutdown_all_ctrlq - shutdown routine for all control queues
73171d10453SEric Joyner  * @hw: pointer to the hardware structure
7328923de59SPiotr Kubaj  * @unloading: is the driver unloading itself
73371d10453SEric Joyner  *
73471d10453SEric Joyner  * NOTE: this function does not destroy the control queue locks. The driver
73571d10453SEric Joyner  * may call this at runtime to shutdown and later restart control queues, such
73671d10453SEric Joyner  * as in response to a reset event.
73771d10453SEric Joyner  */
7388923de59SPiotr Kubaj void ice_shutdown_all_ctrlq(struct ice_hw *hw, bool unloading)
73971d10453SEric Joyner {
74071d10453SEric Joyner 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
74171d10453SEric Joyner 	/* Shutdown FW admin queue */
7428923de59SPiotr Kubaj 	ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN, unloading);
743f2635e84SEric Joyner 	/* Shutdown PHY Sideband */
744f2635e84SEric Joyner 	if (ice_is_sbq_supported(hw))
745f2635e84SEric Joyner 		ice_shutdown_ctrlq(hw, ICE_CTL_Q_SB, unloading);
74671d10453SEric Joyner 	/* Shutdown PF-VF Mailbox */
7478923de59SPiotr Kubaj 	ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX, unloading);
74871d10453SEric Joyner }
74971d10453SEric Joyner 
75071d10453SEric Joyner /**
7517d7af7f8SEric Joyner  * ice_init_all_ctrlq - main initialization routine for all control queues
7527d7af7f8SEric Joyner  * @hw: pointer to the hardware structure
7537d7af7f8SEric Joyner  *
7547d7af7f8SEric Joyner  * Prior to calling this function, the driver MUST* set the following fields
7557d7af7f8SEric Joyner  * in the cq->structure for all control queues:
7567d7af7f8SEric Joyner  *     - cq->num_sq_entries
7577d7af7f8SEric Joyner  *     - cq->num_rq_entries
7587d7af7f8SEric Joyner  *     - cq->rq_buf_size
7597d7af7f8SEric Joyner  *     - cq->sq_buf_size
7607d7af7f8SEric Joyner  *
7617d7af7f8SEric Joyner  * NOTE: this function does not initialize the controlq locks.
7627d7af7f8SEric Joyner  */
763f2635e84SEric Joyner int ice_init_all_ctrlq(struct ice_hw *hw)
7647d7af7f8SEric Joyner {
7657d7af7f8SEric Joyner 	u32 retry = 0;
766f2635e84SEric Joyner 	int status;
7677d7af7f8SEric Joyner 
7687d7af7f8SEric Joyner 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
7697d7af7f8SEric Joyner 
7707d7af7f8SEric Joyner 	/* Init FW admin queue */
7717d7af7f8SEric Joyner 	do {
7727d7af7f8SEric Joyner 		status = ice_init_ctrlq(hw, ICE_CTL_Q_ADMIN);
7737d7af7f8SEric Joyner 		if (status)
7747d7af7f8SEric Joyner 			return status;
7757d7af7f8SEric Joyner 
7767d7af7f8SEric Joyner 		status = ice_init_check_adminq(hw);
7777d7af7f8SEric Joyner 		if (status != ICE_ERR_AQ_FW_CRITICAL)
7787d7af7f8SEric Joyner 			break;
7797d7af7f8SEric Joyner 
7807d7af7f8SEric Joyner 		ice_debug(hw, ICE_DBG_AQ_MSG, "Retry Admin Queue init due to FW critical error\n");
7818923de59SPiotr Kubaj 		ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN, true);
7827d7af7f8SEric Joyner 		ice_msec_delay(ICE_CTL_Q_ADMIN_INIT_MSEC, true);
7837d7af7f8SEric Joyner 	} while (retry++ < ICE_CTL_Q_ADMIN_INIT_TIMEOUT);
7847d7af7f8SEric Joyner 
7857d7af7f8SEric Joyner 	if (status)
7867d7af7f8SEric Joyner 		return status;
787f2635e84SEric Joyner 	/* sideband control queue (SBQ) interface is not supported on some
788f2635e84SEric Joyner 	 * devices. Initialize if supported, else fallback to the admin queue
789f2635e84SEric Joyner 	 * interface
790f2635e84SEric Joyner 	 */
791f2635e84SEric Joyner 	if (ice_is_sbq_supported(hw)) {
792f2635e84SEric Joyner 		status = ice_init_ctrlq(hw, ICE_CTL_Q_SB);
793f2635e84SEric Joyner 		if (status)
794f2635e84SEric Joyner 			return status;
795f2635e84SEric Joyner 	}
7967d7af7f8SEric Joyner 	/* Init Mailbox queue */
7977d7af7f8SEric Joyner 	return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX);
7987d7af7f8SEric Joyner }
7997d7af7f8SEric Joyner 
8007d7af7f8SEric Joyner /**
8017d7af7f8SEric Joyner  * ice_init_ctrlq_locks - Initialize locks for a control queue
8027d7af7f8SEric Joyner  * @cq: pointer to the control queue
8037d7af7f8SEric Joyner  *
8047d7af7f8SEric Joyner  * Initializes the send and receive queue locks for a given control queue.
8057d7af7f8SEric Joyner  */
8067d7af7f8SEric Joyner static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq)
8077d7af7f8SEric Joyner {
8087d7af7f8SEric Joyner 	ice_init_lock(&cq->sq_lock);
8097d7af7f8SEric Joyner 	ice_init_lock(&cq->rq_lock);
8107d7af7f8SEric Joyner }
8117d7af7f8SEric Joyner 
8127d7af7f8SEric Joyner /**
8137d7af7f8SEric Joyner  * ice_create_all_ctrlq - main initialization routine for all control queues
8147d7af7f8SEric Joyner  * @hw: pointer to the hardware structure
8157d7af7f8SEric Joyner  *
8167d7af7f8SEric Joyner  * Prior to calling this function, the driver *MUST* set the following fields
8177d7af7f8SEric Joyner  * in the cq->structure for all control queues:
8187d7af7f8SEric Joyner  *     - cq->num_sq_entries
8197d7af7f8SEric Joyner  *     - cq->num_rq_entries
8207d7af7f8SEric Joyner  *     - cq->rq_buf_size
8217d7af7f8SEric Joyner  *     - cq->sq_buf_size
8227d7af7f8SEric Joyner  *
8237d7af7f8SEric Joyner  * This function creates all the control queue locks and then calls
8247d7af7f8SEric Joyner  * ice_init_all_ctrlq. It should be called once during driver load. If the
8257d7af7f8SEric Joyner  * driver needs to re-initialize control queues at run time it should call
8267d7af7f8SEric Joyner  * ice_init_all_ctrlq instead.
8277d7af7f8SEric Joyner  */
828f2635e84SEric Joyner int ice_create_all_ctrlq(struct ice_hw *hw)
8297d7af7f8SEric Joyner {
8307d7af7f8SEric Joyner 	ice_init_ctrlq_locks(&hw->adminq);
831f2635e84SEric Joyner 	if (ice_is_sbq_supported(hw))
832f2635e84SEric Joyner 		ice_init_ctrlq_locks(&hw->sbq);
8337d7af7f8SEric Joyner 	ice_init_ctrlq_locks(&hw->mailboxq);
8347d7af7f8SEric Joyner 
8357d7af7f8SEric Joyner 	return ice_init_all_ctrlq(hw);
8367d7af7f8SEric Joyner }
8377d7af7f8SEric Joyner 
8387d7af7f8SEric Joyner /**
83971d10453SEric Joyner  * ice_destroy_ctrlq_locks - Destroy locks for a control queue
84071d10453SEric Joyner  * @cq: pointer to the control queue
84171d10453SEric Joyner  *
84271d10453SEric Joyner  * Destroys the send and receive queue locks for a given control queue.
84371d10453SEric Joyner  */
8447d7af7f8SEric Joyner static void ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq)
84571d10453SEric Joyner {
84671d10453SEric Joyner 	ice_destroy_lock(&cq->sq_lock);
84771d10453SEric Joyner 	ice_destroy_lock(&cq->rq_lock);
84871d10453SEric Joyner }
84971d10453SEric Joyner 
85071d10453SEric Joyner /**
85171d10453SEric Joyner  * ice_destroy_all_ctrlq - exit routine for all control queues
85271d10453SEric Joyner  * @hw: pointer to the hardware structure
85371d10453SEric Joyner  *
85471d10453SEric Joyner  * This function shuts down all the control queues and then destroys the
85571d10453SEric Joyner  * control queue locks. It should be called once during driver unload. The
85671d10453SEric Joyner  * driver should call ice_shutdown_all_ctrlq if it needs to shut down and
85771d10453SEric Joyner  * reinitialize control queues, such as in response to a reset event.
85871d10453SEric Joyner  */
85971d10453SEric Joyner void ice_destroy_all_ctrlq(struct ice_hw *hw)
86071d10453SEric Joyner {
86171d10453SEric Joyner 	/* shut down all the control queues first */
8628923de59SPiotr Kubaj 	ice_shutdown_all_ctrlq(hw, true);
86371d10453SEric Joyner 
86471d10453SEric Joyner 	ice_destroy_ctrlq_locks(&hw->adminq);
865f2635e84SEric Joyner 	if (ice_is_sbq_supported(hw))
866f2635e84SEric Joyner 		ice_destroy_ctrlq_locks(&hw->sbq);
86771d10453SEric Joyner 	ice_destroy_ctrlq_locks(&hw->mailboxq);
86871d10453SEric Joyner }
86971d10453SEric Joyner 
87071d10453SEric Joyner /**
8719c30461dSEric Joyner  * ice_clean_sq - cleans send side of a control queue
87271d10453SEric Joyner  * @hw: pointer to the hardware structure
87371d10453SEric Joyner  * @cq: pointer to the specific Control queue
87471d10453SEric Joyner  *
87571d10453SEric Joyner  * returns the number of free desc
87671d10453SEric Joyner  */
87771d10453SEric Joyner static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
87871d10453SEric Joyner {
87971d10453SEric Joyner 	struct ice_ctl_q_ring *sq = &cq->sq;
88071d10453SEric Joyner 	u16 ntc = sq->next_to_clean;
88171d10453SEric Joyner 	struct ice_aq_desc *desc;
882*f377a0c7SEric Joyner 	u32 head;
88371d10453SEric Joyner 
88471d10453SEric Joyner 	desc = ICE_CTL_Q_DESC(*sq, ntc);
88571d10453SEric Joyner 
886*f377a0c7SEric Joyner 	head = rd32(hw, sq->head);
887*f377a0c7SEric Joyner 	if (head >= sq->count) {
888*f377a0c7SEric Joyner 		ice_debug(hw, ICE_DBG_AQ_MSG,
889*f377a0c7SEric Joyner 			  "Read head value (%d) exceeds allowed range.\n",
890*f377a0c7SEric Joyner 			  head);
891*f377a0c7SEric Joyner 		return 0;
892*f377a0c7SEric Joyner 	}
893*f377a0c7SEric Joyner 
894*f377a0c7SEric Joyner 	while (head != ntc) {
895*f377a0c7SEric Joyner 		ice_debug(hw, ICE_DBG_AQ_MSG,
896*f377a0c7SEric Joyner 			  "ntc %d head %d.\n",
897*f377a0c7SEric Joyner 			  ntc, head);
89871d10453SEric Joyner 		ice_memset(desc, 0, sizeof(*desc), ICE_DMA_MEM);
89971d10453SEric Joyner 		ntc++;
90071d10453SEric Joyner 		if (ntc == sq->count)
90171d10453SEric Joyner 			ntc = 0;
90271d10453SEric Joyner 		desc = ICE_CTL_Q_DESC(*sq, ntc);
903*f377a0c7SEric Joyner 
904*f377a0c7SEric Joyner 		head = rd32(hw, sq->head);
905*f377a0c7SEric Joyner 		if (head >= sq->count) {
906*f377a0c7SEric Joyner 			ice_debug(hw, ICE_DBG_AQ_MSG,
907*f377a0c7SEric Joyner 				  "Read head value (%d) exceeds allowed range.\n",
908*f377a0c7SEric Joyner 				  head);
909*f377a0c7SEric Joyner 			return 0;
910*f377a0c7SEric Joyner 		}
91171d10453SEric Joyner 	}
91271d10453SEric Joyner 
91371d10453SEric Joyner 	sq->next_to_clean = ntc;
91471d10453SEric Joyner 
91571d10453SEric Joyner 	return ICE_CTL_Q_DESC_UNUSED(sq);
91671d10453SEric Joyner }
91771d10453SEric Joyner 
91871d10453SEric Joyner /**
9199c30461dSEric Joyner  * ice_ctl_q_str - Convert control queue type to string
9209c30461dSEric Joyner  * @qtype: the control queue type
9219c30461dSEric Joyner  *
9229c30461dSEric Joyner  * Returns: A string name for the given control queue type.
9239c30461dSEric Joyner  */
9249c30461dSEric Joyner static const char *ice_ctl_q_str(enum ice_ctl_q qtype)
9259c30461dSEric Joyner {
9269c30461dSEric Joyner 	switch (qtype) {
9279c30461dSEric Joyner 	case ICE_CTL_Q_UNKNOWN:
9289c30461dSEric Joyner 		return "Unknown CQ";
9299c30461dSEric Joyner 	case ICE_CTL_Q_ADMIN:
9309c30461dSEric Joyner 		return "AQ";
9319c30461dSEric Joyner 	case ICE_CTL_Q_MAILBOX:
9329c30461dSEric Joyner 		return "MBXQ";
933f2635e84SEric Joyner 	case ICE_CTL_Q_SB:
934f2635e84SEric Joyner 		return "SBQ";
9359c30461dSEric Joyner 	default:
9369c30461dSEric Joyner 		return "Unrecognized CQ";
9379c30461dSEric Joyner 	}
9389c30461dSEric Joyner }
9399c30461dSEric Joyner 
9409c30461dSEric Joyner /**
94171d10453SEric Joyner  * ice_debug_cq
94271d10453SEric Joyner  * @hw: pointer to the hardware structure
9439c30461dSEric Joyner  * @cq: pointer to the specific Control queue
94471d10453SEric Joyner  * @desc: pointer to control queue descriptor
94571d10453SEric Joyner  * @buf: pointer to command buffer
94671d10453SEric Joyner  * @buf_len: max length of buf
9479c30461dSEric Joyner  * @response: true if this is the writeback response
94871d10453SEric Joyner  *
94971d10453SEric Joyner  * Dumps debug log about control command with descriptor contents.
95071d10453SEric Joyner  */
9519c30461dSEric Joyner static void
9529c30461dSEric Joyner ice_debug_cq(struct ice_hw *hw, struct ice_ctl_q_info *cq,
9539c30461dSEric Joyner 	     void *desc, void *buf, u16 buf_len, bool response)
95471d10453SEric Joyner {
95571d10453SEric Joyner 	struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc;
95671d10453SEric Joyner 	u16 datalen, flags;
95771d10453SEric Joyner 
95871d10453SEric Joyner 	if (!((ICE_DBG_AQ_DESC | ICE_DBG_AQ_DESC_BUF) & hw->debug_mask))
95971d10453SEric Joyner 		return;
96071d10453SEric Joyner 
96171d10453SEric Joyner 	if (!desc)
96271d10453SEric Joyner 		return;
96371d10453SEric Joyner 
96471d10453SEric Joyner 	datalen = LE16_TO_CPU(cq_desc->datalen);
96571d10453SEric Joyner 	flags = LE16_TO_CPU(cq_desc->flags);
96671d10453SEric Joyner 
9679c30461dSEric Joyner 	ice_debug(hw, ICE_DBG_AQ_DESC, "%s %s: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
9689c30461dSEric Joyner 		  ice_ctl_q_str(cq->qtype), response ? "Response" : "Command",
96971d10453SEric Joyner 		  LE16_TO_CPU(cq_desc->opcode), flags, datalen,
97071d10453SEric Joyner 		  LE16_TO_CPU(cq_desc->retval));
97171d10453SEric Joyner 	ice_debug(hw, ICE_DBG_AQ_DESC, "\tcookie (h,l) 0x%08X 0x%08X\n",
97271d10453SEric Joyner 		  LE32_TO_CPU(cq_desc->cookie_high),
97371d10453SEric Joyner 		  LE32_TO_CPU(cq_desc->cookie_low));
97471d10453SEric Joyner 	ice_debug(hw, ICE_DBG_AQ_DESC, "\tparam (0,1)  0x%08X 0x%08X\n",
97571d10453SEric Joyner 		  LE32_TO_CPU(cq_desc->params.generic.param0),
97671d10453SEric Joyner 		  LE32_TO_CPU(cq_desc->params.generic.param1));
97771d10453SEric Joyner 	ice_debug(hw, ICE_DBG_AQ_DESC, "\taddr (h,l)   0x%08X 0x%08X\n",
97871d10453SEric Joyner 		  LE32_TO_CPU(cq_desc->params.generic.addr_high),
97971d10453SEric Joyner 		  LE32_TO_CPU(cq_desc->params.generic.addr_low));
98071d10453SEric Joyner 	/* Dump buffer iff 1) one exists and 2) is either a response indicated
98171d10453SEric Joyner 	 * by the DD and/or CMP flag set or a command with the RD flag set.
98271d10453SEric Joyner 	 */
98371d10453SEric Joyner 	if (buf && cq_desc->datalen != 0 &&
98471d10453SEric Joyner 	    (flags & (ICE_AQ_FLAG_DD | ICE_AQ_FLAG_CMP) ||
98571d10453SEric Joyner 	     flags & ICE_AQ_FLAG_RD)) {
98671d10453SEric Joyner 		ice_debug(hw, ICE_DBG_AQ_DESC_BUF, "Buffer:\n");
98771d10453SEric Joyner 		ice_debug_array(hw, ICE_DBG_AQ_DESC_BUF, 16, 1, (u8 *)buf,
98871d10453SEric Joyner 				MIN_T(u16, buf_len, datalen));
98971d10453SEric Joyner 	}
99071d10453SEric Joyner }
99171d10453SEric Joyner 
99271d10453SEric Joyner /**
9939c30461dSEric Joyner  * ice_sq_done - check if the last send on a control queue has completed
99471d10453SEric Joyner  * @hw: pointer to the HW struct
99571d10453SEric Joyner  * @cq: pointer to the specific Control queue
99671d10453SEric Joyner  *
9979c30461dSEric Joyner  * Returns: true if all the descriptors on the send side of a control queue
9989c30461dSEric Joyner  *          are finished processing, false otherwise.
99971d10453SEric Joyner  */
100071d10453SEric Joyner bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
100171d10453SEric Joyner {
10029c30461dSEric Joyner 	/* control queue designers suggest use of head for better
100371d10453SEric Joyner 	 * timing reliability than DD bit
100471d10453SEric Joyner 	 */
100571d10453SEric Joyner 	return rd32(hw, cq->sq.head) == cq->sq.next_to_use;
100671d10453SEric Joyner }
100771d10453SEric Joyner 
100871d10453SEric Joyner /**
10099c30461dSEric Joyner  * ice_sq_send_cmd_nolock - send command to a control queue
101071d10453SEric Joyner  * @hw: pointer to the HW struct
101171d10453SEric Joyner  * @cq: pointer to the specific Control queue
101271d10453SEric Joyner  * @desc: prefilled descriptor describing the command (non DMA mem)
101371d10453SEric Joyner  * @buf: buffer to use for indirect commands (or NULL for direct commands)
101471d10453SEric Joyner  * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
101571d10453SEric Joyner  * @cd: pointer to command details structure
101671d10453SEric Joyner  *
10179c30461dSEric Joyner  * This is the main send command routine for a control queue. It prepares the
10189c30461dSEric Joyner  * command into a descriptor, bumps the send queue tail, waits for the command
10199c30461dSEric Joyner  * to complete, captures status and data for the command, etc.
102071d10453SEric Joyner  */
1021f2635e84SEric Joyner int
102271d10453SEric Joyner ice_sq_send_cmd_nolock(struct ice_hw *hw, struct ice_ctl_q_info *cq,
102371d10453SEric Joyner 		       struct ice_aq_desc *desc, void *buf, u16 buf_size,
102471d10453SEric Joyner 		       struct ice_sq_cd *cd)
102571d10453SEric Joyner {
102671d10453SEric Joyner 	struct ice_dma_mem *dma_buf = NULL;
102771d10453SEric Joyner 	struct ice_aq_desc *desc_on_ring;
102871d10453SEric Joyner 	bool cmd_completed = false;
102971d10453SEric Joyner 	u32 total_delay = 0;
1030f2635e84SEric Joyner 	int status = 0;
103171d10453SEric Joyner 	u16 retval = 0;
103271d10453SEric Joyner 	u32 val = 0;
103371d10453SEric Joyner 
103471d10453SEric Joyner 	/* if reset is in progress return a soft error */
103571d10453SEric Joyner 	if (hw->reset_ongoing)
103671d10453SEric Joyner 		return ICE_ERR_RESET_ONGOING;
103771d10453SEric Joyner 
103871d10453SEric Joyner 	cq->sq_last_status = ICE_AQ_RC_OK;
103971d10453SEric Joyner 
104071d10453SEric Joyner 	if (!cq->sq.count) {
10417d7af7f8SEric Joyner 		ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send queue not initialized.\n");
104271d10453SEric Joyner 		status = ICE_ERR_AQ_EMPTY;
104371d10453SEric Joyner 		goto sq_send_command_error;
104471d10453SEric Joyner 	}
104571d10453SEric Joyner 
104671d10453SEric Joyner 	if ((buf && !buf_size) || (!buf && buf_size)) {
104771d10453SEric Joyner 		status = ICE_ERR_PARAM;
104871d10453SEric Joyner 		goto sq_send_command_error;
104971d10453SEric Joyner 	}
105071d10453SEric Joyner 
105171d10453SEric Joyner 	if (buf) {
105271d10453SEric Joyner 		if (buf_size > cq->sq_buf_size) {
10537d7af7f8SEric Joyner 			ice_debug(hw, ICE_DBG_AQ_MSG, "Invalid buffer size for Control Send queue: %d.\n",
105471d10453SEric Joyner 				  buf_size);
105571d10453SEric Joyner 			status = ICE_ERR_INVAL_SIZE;
105671d10453SEric Joyner 			goto sq_send_command_error;
105771d10453SEric Joyner 		}
105871d10453SEric Joyner 
105971d10453SEric Joyner 		desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_BUF);
106071d10453SEric Joyner 		if (buf_size > ICE_AQ_LG_BUF)
106171d10453SEric Joyner 			desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB);
106271d10453SEric Joyner 	}
106371d10453SEric Joyner 
106471d10453SEric Joyner 	val = rd32(hw, cq->sq.head);
106571d10453SEric Joyner 	if (val >= cq->num_sq_entries) {
10667d7af7f8SEric Joyner 		ice_debug(hw, ICE_DBG_AQ_MSG, "head overrun at %d in the Control Send Queue ring\n",
106771d10453SEric Joyner 			  val);
106871d10453SEric Joyner 		status = ICE_ERR_AQ_EMPTY;
106971d10453SEric Joyner 		goto sq_send_command_error;
107071d10453SEric Joyner 	}
107171d10453SEric Joyner 
107271d10453SEric Joyner 	/* Call clean and check queue available function to reclaim the
107371d10453SEric Joyner 	 * descriptors that were processed by FW/MBX; the function returns the
107471d10453SEric Joyner 	 * number of desc available. The clean function called here could be
107571d10453SEric Joyner 	 * called in a separate thread in case of asynchronous completions.
107671d10453SEric Joyner 	 */
107771d10453SEric Joyner 	if (ice_clean_sq(hw, cq) == 0) {
10787d7af7f8SEric Joyner 		ice_debug(hw, ICE_DBG_AQ_MSG, "Error: Control Send Queue is full.\n");
107971d10453SEric Joyner 		status = ICE_ERR_AQ_FULL;
108071d10453SEric Joyner 		goto sq_send_command_error;
108171d10453SEric Joyner 	}
108271d10453SEric Joyner 
108371d10453SEric Joyner 	/* initialize the temp desc pointer with the right desc */
108471d10453SEric Joyner 	desc_on_ring = ICE_CTL_Q_DESC(cq->sq, cq->sq.next_to_use);
108571d10453SEric Joyner 
108671d10453SEric Joyner 	/* if the desc is available copy the temp desc to the right place */
108771d10453SEric Joyner 	ice_memcpy(desc_on_ring, desc, sizeof(*desc_on_ring),
108871d10453SEric Joyner 		   ICE_NONDMA_TO_DMA);
108971d10453SEric Joyner 
109071d10453SEric Joyner 	/* if buf is not NULL assume indirect command */
109171d10453SEric Joyner 	if (buf) {
109271d10453SEric Joyner 		dma_buf = &cq->sq.r.sq_bi[cq->sq.next_to_use];
109371d10453SEric Joyner 		/* copy the user buf into the respective DMA buf */
109471d10453SEric Joyner 		ice_memcpy(dma_buf->va, buf, buf_size, ICE_NONDMA_TO_DMA);
109571d10453SEric Joyner 		desc_on_ring->datalen = CPU_TO_LE16(buf_size);
109671d10453SEric Joyner 
109771d10453SEric Joyner 		/* Update the address values in the desc with the pa value
109871d10453SEric Joyner 		 * for respective buffer
109971d10453SEric Joyner 		 */
110071d10453SEric Joyner 		desc_on_ring->params.generic.addr_high =
110171d10453SEric Joyner 			CPU_TO_LE32(ICE_HI_DWORD(dma_buf->pa));
110271d10453SEric Joyner 		desc_on_ring->params.generic.addr_low =
110371d10453SEric Joyner 			CPU_TO_LE32(ICE_LO_DWORD(dma_buf->pa));
110471d10453SEric Joyner 	}
110571d10453SEric Joyner 
110671d10453SEric Joyner 	/* Debug desc and buffer */
11077d7af7f8SEric Joyner 	ice_debug(hw, ICE_DBG_AQ_DESC, "ATQ: Control Send queue desc and buffer:\n");
11089c30461dSEric Joyner 	ice_debug_cq(hw, cq, (void *)desc_on_ring, buf, buf_size, false);
110971d10453SEric Joyner 
111071d10453SEric Joyner 	(cq->sq.next_to_use)++;
111171d10453SEric Joyner 	if (cq->sq.next_to_use == cq->sq.count)
111271d10453SEric Joyner 		cq->sq.next_to_use = 0;
111371d10453SEric Joyner 	wr32(hw, cq->sq.tail, cq->sq.next_to_use);
11149e54973fSEric Joyner 	ice_flush(hw);
11159e54973fSEric Joyner 
11169e54973fSEric Joyner 	/* Wait a short time before initial ice_sq_done() check, to allow
11179e54973fSEric Joyner 	 * hardware time for completion.
11189e54973fSEric Joyner 	 */
11199e54973fSEric Joyner 	ice_usec_delay(5, false);
112071d10453SEric Joyner 
112171d10453SEric Joyner 	do {
112271d10453SEric Joyner 		if (ice_sq_done(hw, cq))
112371d10453SEric Joyner 			break;
112471d10453SEric Joyner 
11259e54973fSEric Joyner 		ice_usec_delay(10, false);
112671d10453SEric Joyner 		total_delay++;
112771d10453SEric Joyner 	} while (total_delay < cq->sq_cmd_timeout);
112871d10453SEric Joyner 
112971d10453SEric Joyner 	/* if ready, copy the desc back to temp */
113071d10453SEric Joyner 	if (ice_sq_done(hw, cq)) {
113171d10453SEric Joyner 		ice_memcpy(desc, desc_on_ring, sizeof(*desc),
113271d10453SEric Joyner 			   ICE_DMA_TO_NONDMA);
113371d10453SEric Joyner 		if (buf) {
113471d10453SEric Joyner 			/* get returned length to copy */
113571d10453SEric Joyner 			u16 copy_size = LE16_TO_CPU(desc->datalen);
113671d10453SEric Joyner 
113771d10453SEric Joyner 			if (copy_size > buf_size) {
11387d7af7f8SEric Joyner 				ice_debug(hw, ICE_DBG_AQ_MSG, "Return len %d > than buf len %d\n",
113971d10453SEric Joyner 					  copy_size, buf_size);
114071d10453SEric Joyner 				status = ICE_ERR_AQ_ERROR;
114171d10453SEric Joyner 			} else {
114271d10453SEric Joyner 				ice_memcpy(buf, dma_buf->va, copy_size,
114371d10453SEric Joyner 					   ICE_DMA_TO_NONDMA);
114471d10453SEric Joyner 			}
114571d10453SEric Joyner 		}
114671d10453SEric Joyner 		retval = LE16_TO_CPU(desc->retval);
114771d10453SEric Joyner 		if (retval) {
11487d7af7f8SEric Joyner 			ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue command 0x%04X completed with error 0x%X\n",
114971d10453SEric Joyner 				  LE16_TO_CPU(desc->opcode),
115071d10453SEric Joyner 				  retval);
115171d10453SEric Joyner 
115271d10453SEric Joyner 			/* strip off FW internal code */
115371d10453SEric Joyner 			retval &= 0xff;
115471d10453SEric Joyner 		}
115571d10453SEric Joyner 		cmd_completed = true;
115671d10453SEric Joyner 		if (!status && retval != ICE_AQ_RC_OK)
115771d10453SEric Joyner 			status = ICE_ERR_AQ_ERROR;
115871d10453SEric Joyner 		cq->sq_last_status = (enum ice_aq_err)retval;
115971d10453SEric Joyner 	}
116071d10453SEric Joyner 
11617d7af7f8SEric Joyner 	ice_debug(hw, ICE_DBG_AQ_MSG, "ATQ: desc and buffer writeback:\n");
11629c30461dSEric Joyner 	ice_debug_cq(hw, cq, (void *)desc, buf, buf_size, true);
116371d10453SEric Joyner 
116471d10453SEric Joyner 	/* save writeback AQ if requested */
11659c30461dSEric Joyner 	if (cd && cd->wb_desc)
11669c30461dSEric Joyner 		ice_memcpy(cd->wb_desc, desc_on_ring,
11679c30461dSEric Joyner 			   sizeof(*cd->wb_desc), ICE_DMA_TO_NONDMA);
116871d10453SEric Joyner 
116971d10453SEric Joyner 	/* update the error if time out occurred */
117071d10453SEric Joyner 	if (!cmd_completed) {
11717d7af7f8SEric Joyner 		if (rd32(hw, cq->rq.len) & cq->rq.len_crit_mask ||
11727d7af7f8SEric Joyner 		    rd32(hw, cq->sq.len) & cq->sq.len_crit_mask) {
11737d7af7f8SEric Joyner 			ice_debug(hw, ICE_DBG_AQ_MSG, "Critical FW error.\n");
11747d7af7f8SEric Joyner 			status = ICE_ERR_AQ_FW_CRITICAL;
11757d7af7f8SEric Joyner 		} else {
11767d7af7f8SEric Joyner 			ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue Writeback timeout.\n");
117771d10453SEric Joyner 			status = ICE_ERR_AQ_TIMEOUT;
117871d10453SEric Joyner 		}
11797d7af7f8SEric Joyner 	}
118071d10453SEric Joyner 
118171d10453SEric Joyner sq_send_command_error:
118271d10453SEric Joyner 	return status;
118371d10453SEric Joyner }
118471d10453SEric Joyner 
118571d10453SEric Joyner /**
11869c30461dSEric Joyner  * ice_sq_send_cmd - send command to a control queue
118771d10453SEric Joyner  * @hw: pointer to the HW struct
118871d10453SEric Joyner  * @cq: pointer to the specific Control queue
11899cf1841cSEric Joyner  * @desc: prefilled descriptor describing the command
119071d10453SEric Joyner  * @buf: buffer to use for indirect commands (or NULL for direct commands)
119171d10453SEric Joyner  * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
119271d10453SEric Joyner  * @cd: pointer to command details structure
119371d10453SEric Joyner  *
11949c30461dSEric Joyner  * Main command for the transmit side of a control queue. It puts the command
11959c30461dSEric Joyner  * on the queue, bumps the tail, waits for processing of the command, captures
11969c30461dSEric Joyner  * command status and results, etc.
119771d10453SEric Joyner  */
1198f2635e84SEric Joyner int
119971d10453SEric Joyner ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
120071d10453SEric Joyner 		struct ice_aq_desc *desc, void *buf, u16 buf_size,
120171d10453SEric Joyner 		struct ice_sq_cd *cd)
120271d10453SEric Joyner {
1203f2635e84SEric Joyner 	int status = 0;
120471d10453SEric Joyner 
120571d10453SEric Joyner 	/* if reset is in progress return a soft error */
120671d10453SEric Joyner 	if (hw->reset_ongoing)
120771d10453SEric Joyner 		return ICE_ERR_RESET_ONGOING;
120871d10453SEric Joyner 
120971d10453SEric Joyner 	ice_acquire_lock(&cq->sq_lock);
121071d10453SEric Joyner 	status = ice_sq_send_cmd_nolock(hw, cq, desc, buf, buf_size, cd);
121171d10453SEric Joyner 	ice_release_lock(&cq->sq_lock);
121271d10453SEric Joyner 
121371d10453SEric Joyner 	return status;
121471d10453SEric Joyner }
121571d10453SEric Joyner 
121671d10453SEric Joyner /**
121771d10453SEric Joyner  * ice_fill_dflt_direct_cmd_desc - AQ descriptor helper function
121871d10453SEric Joyner  * @desc: pointer to the temp descriptor (non DMA mem)
121971d10453SEric Joyner  * @opcode: the opcode can be used to decide which flags to turn off or on
122071d10453SEric Joyner  *
122171d10453SEric Joyner  * Fill the desc with default values
122271d10453SEric Joyner  */
122371d10453SEric Joyner void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode)
122471d10453SEric Joyner {
122571d10453SEric Joyner 	/* zero out the desc */
122671d10453SEric Joyner 	ice_memset(desc, 0, sizeof(*desc), ICE_NONDMA_MEM);
122771d10453SEric Joyner 	desc->opcode = CPU_TO_LE16(opcode);
122871d10453SEric Joyner 	desc->flags = CPU_TO_LE16(ICE_AQ_FLAG_SI);
122971d10453SEric Joyner }
123071d10453SEric Joyner 
123171d10453SEric Joyner /**
123271d10453SEric Joyner  * ice_clean_rq_elem
123371d10453SEric Joyner  * @hw: pointer to the HW struct
123471d10453SEric Joyner  * @cq: pointer to the specific Control queue
123571d10453SEric Joyner  * @e: event info from the receive descriptor, includes any buffers
123671d10453SEric Joyner  * @pending: number of events that could be left to process
123771d10453SEric Joyner  *
12389c30461dSEric Joyner  * Clean one element from the receive side of a control queue. On return 'e'
12399c30461dSEric Joyner  * contains contents of the message, and 'pending' contains the number of
12409c30461dSEric Joyner  * events left to process.
124171d10453SEric Joyner  */
1242f2635e84SEric Joyner int
124371d10453SEric Joyner ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
124471d10453SEric Joyner 		  struct ice_rq_event_info *e, u16 *pending)
124571d10453SEric Joyner {
124671d10453SEric Joyner 	u16 ntc = cq->rq.next_to_clean;
12479cf1841cSEric Joyner 	enum ice_aq_err rq_last_status;
124871d10453SEric Joyner 	struct ice_aq_desc *desc;
124971d10453SEric Joyner 	struct ice_dma_mem *bi;
1250f2635e84SEric Joyner 	int ret_code = 0;
125171d10453SEric Joyner 	u16 desc_idx;
125271d10453SEric Joyner 	u16 datalen;
125371d10453SEric Joyner 	u16 flags;
125471d10453SEric Joyner 	u16 ntu;
125571d10453SEric Joyner 
125671d10453SEric Joyner 	/* pre-clean the event info */
125771d10453SEric Joyner 	ice_memset(&e->desc, 0, sizeof(e->desc), ICE_NONDMA_MEM);
125871d10453SEric Joyner 
125971d10453SEric Joyner 	/* take the lock before we start messing with the ring */
126071d10453SEric Joyner 	ice_acquire_lock(&cq->rq_lock);
126171d10453SEric Joyner 
126271d10453SEric Joyner 	if (!cq->rq.count) {
12637d7af7f8SEric Joyner 		ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive queue not initialized.\n");
126471d10453SEric Joyner 		ret_code = ICE_ERR_AQ_EMPTY;
126571d10453SEric Joyner 		goto clean_rq_elem_err;
126671d10453SEric Joyner 	}
126771d10453SEric Joyner 
126871d10453SEric Joyner 	/* set next_to_use to head */
126971d10453SEric Joyner 	ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
127071d10453SEric Joyner 
127171d10453SEric Joyner 	if (ntu == ntc) {
127271d10453SEric Joyner 		/* nothing to do - shouldn't need to update ring's values */
127371d10453SEric Joyner 		ret_code = ICE_ERR_AQ_NO_WORK;
127471d10453SEric Joyner 		goto clean_rq_elem_out;
127571d10453SEric Joyner 	}
127671d10453SEric Joyner 
127771d10453SEric Joyner 	/* now clean the next descriptor */
127871d10453SEric Joyner 	desc = ICE_CTL_Q_DESC(cq->rq, ntc);
127971d10453SEric Joyner 	desc_idx = ntc;
128071d10453SEric Joyner 
12819cf1841cSEric Joyner 	rq_last_status = (enum ice_aq_err)LE16_TO_CPU(desc->retval);
128271d10453SEric Joyner 	flags = LE16_TO_CPU(desc->flags);
128371d10453SEric Joyner 	if (flags & ICE_AQ_FLAG_ERR) {
128471d10453SEric Joyner 		ret_code = ICE_ERR_AQ_ERROR;
12857d7af7f8SEric Joyner 		ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive Queue Event 0x%04X received with error 0x%X\n",
12869cf1841cSEric Joyner 			  LE16_TO_CPU(desc->opcode), rq_last_status);
128771d10453SEric Joyner 	}
128871d10453SEric Joyner 	ice_memcpy(&e->desc, desc, sizeof(e->desc), ICE_DMA_TO_NONDMA);
128971d10453SEric Joyner 	datalen = LE16_TO_CPU(desc->datalen);
129071d10453SEric Joyner 	e->msg_len = MIN_T(u16, datalen, e->buf_len);
129171d10453SEric Joyner 	if (e->msg_buf && e->msg_len)
129271d10453SEric Joyner 		ice_memcpy(e->msg_buf, cq->rq.r.rq_bi[desc_idx].va,
129371d10453SEric Joyner 			   e->msg_len, ICE_DMA_TO_NONDMA);
129471d10453SEric Joyner 
129571d10453SEric Joyner 	ice_debug(hw, ICE_DBG_AQ_DESC, "ARQ: desc and buffer:\n");
12969c30461dSEric Joyner 	ice_debug_cq(hw, cq, (void *)desc, e->msg_buf, cq->rq_buf_size, true);
129771d10453SEric Joyner 
129871d10453SEric Joyner 	/* Restore the original datalen and buffer address in the desc,
129971d10453SEric Joyner 	 * FW updates datalen to indicate the event message size
130071d10453SEric Joyner 	 */
130171d10453SEric Joyner 	bi = &cq->rq.r.rq_bi[ntc];
130271d10453SEric Joyner 	ice_memset(desc, 0, sizeof(*desc), ICE_DMA_MEM);
130371d10453SEric Joyner 
130471d10453SEric Joyner 	desc->flags = CPU_TO_LE16(ICE_AQ_FLAG_BUF);
130571d10453SEric Joyner 	if (cq->rq_buf_size > ICE_AQ_LG_BUF)
130671d10453SEric Joyner 		desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB);
130771d10453SEric Joyner 	desc->datalen = CPU_TO_LE16(bi->size);
130871d10453SEric Joyner 	desc->params.generic.addr_high = CPU_TO_LE32(ICE_HI_DWORD(bi->pa));
130971d10453SEric Joyner 	desc->params.generic.addr_low = CPU_TO_LE32(ICE_LO_DWORD(bi->pa));
131071d10453SEric Joyner 
131171d10453SEric Joyner 	/* set tail = the last cleaned desc index. */
131271d10453SEric Joyner 	wr32(hw, cq->rq.tail, ntc);
131371d10453SEric Joyner 	/* ntc is updated to tail + 1 */
131471d10453SEric Joyner 	ntc++;
131571d10453SEric Joyner 	if (ntc == cq->num_rq_entries)
131671d10453SEric Joyner 		ntc = 0;
131771d10453SEric Joyner 	cq->rq.next_to_clean = ntc;
131871d10453SEric Joyner 	cq->rq.next_to_use = ntu;
131971d10453SEric Joyner 
132071d10453SEric Joyner clean_rq_elem_out:
132171d10453SEric Joyner 	/* Set pending if needed, unlock and return */
132271d10453SEric Joyner 	if (pending) {
132371d10453SEric Joyner 		/* re-read HW head to calculate actual pending messages */
132471d10453SEric Joyner 		ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
132571d10453SEric Joyner 		*pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc));
132671d10453SEric Joyner 	}
132771d10453SEric Joyner clean_rq_elem_err:
132871d10453SEric Joyner 	ice_release_lock(&cq->rq_lock);
132971d10453SEric Joyner 
133071d10453SEric Joyner 	return ret_code;
133171d10453SEric Joyner }
1332