xref: /dpdk/drivers/net/hinic/base/hinic_pmd_wq.c (revision 089e5ed727a15da2729cfee9b63533dd120bd04c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Huawei Technologies Co., Ltd
3  */
4 
5 #include "hinic_compat.h"
6 #include "hinic_pmd_hwdev.h"
7 #include "hinic_pmd_wq.h"
8 
9 static void free_wq_pages(struct hinic_hwdev *hwdev, struct hinic_wq *wq)
10 {
11 	dma_free_coherent(hwdev, wq->wq_buf_size, (void *)wq->queue_buf_vaddr,
12 			(dma_addr_t)wq->queue_buf_paddr);
13 
14 	wq->queue_buf_paddr = 0;
15 	wq->queue_buf_vaddr = 0;
16 }
17 
18 static int alloc_wq_pages(struct hinic_hwdev *hwdev, struct hinic_wq *wq)
19 {
20 	dma_addr_t dma_addr = 0;
21 
22 	wq->queue_buf_vaddr = (u64)(u64 *)
23 		dma_zalloc_coherent_aligned256k(hwdev, wq->wq_buf_size,
24 						&dma_addr, GFP_KERNEL);
25 	if (!wq->queue_buf_vaddr) {
26 		PMD_DRV_LOG(ERR, "Failed to allocate wq page");
27 		return -ENOMEM;
28 	}
29 
30 	if (!ADDR_256K_ALIGNED(dma_addr)) {
31 		PMD_DRV_LOG(ERR, "Wqe pages is not 256k aligned!");
32 		dma_free_coherent(hwdev, wq->wq_buf_size,
33 				  (void *)wq->queue_buf_vaddr,
34 				  dma_addr);
35 		return -ENOMEM;
36 	}
37 	wq->queue_buf_paddr = dma_addr;
38 
39 	return 0;
40 }
41 
42 int hinic_wq_allocate(struct hinic_hwdev *hwdev, struct hinic_wq *wq,
43 		      u32 wqebb_shift, u16 q_depth)
44 {
45 	int err;
46 
47 	if (q_depth & (q_depth - 1)) {
48 		PMD_DRV_LOG(ERR, "WQ q_depth isn't power of 2");
49 		return -EINVAL;
50 	}
51 
52 	wq->wqebb_size = 1 << wqebb_shift;
53 	wq->wqebb_shift = wqebb_shift;
54 	wq->wq_buf_size = ((u32)q_depth) << wqebb_shift;
55 	wq->q_depth = q_depth;
56 
57 	if (wq->wq_buf_size > (HINIC_PAGE_SIZE << HINIC_PAGE_SIZE_DPDK)) {
58 		PMD_DRV_LOG(ERR, "Invalid q_depth %u which one page_size can not hold",
59 			q_depth);
60 		return -EINVAL;
61 	}
62 
63 	err = alloc_wq_pages(hwdev, wq);
64 	if (err) {
65 		PMD_DRV_LOG(ERR, "Failed to allocate wq pages");
66 		return err;
67 	}
68 
69 	wq->cons_idx = 0;
70 	wq->prod_idx = 0;
71 	wq->delta = q_depth;
72 	wq->mask = q_depth - 1;
73 
74 	return 0;
75 }
76 
77 void hinic_wq_free(struct hinic_hwdev *hwdev, struct hinic_wq *wq)
78 {
79 	free_wq_pages(hwdev, wq);
80 }
81 
82 void hinic_put_wqe(struct hinic_wq *wq, int num_wqebbs)
83 {
84 	wq->cons_idx += num_wqebbs;
85 	wq->delta += num_wqebbs;
86 }
87 
88 void *hinic_read_wqe(struct hinic_wq *wq, int num_wqebbs, u16 *cons_idx)
89 {
90 	u16 curr_cons_idx;
91 
92 	if ((wq->delta + num_wqebbs) > wq->q_depth)
93 		return NULL;
94 
95 	curr_cons_idx = (u16)(wq->cons_idx);
96 
97 	curr_cons_idx = MASKED_WQE_IDX(wq, curr_cons_idx);
98 
99 	*cons_idx = curr_cons_idx;
100 
101 	return WQ_WQE_ADDR(wq, (u32)(*cons_idx));
102 }
103 
104 int hinic_cmdq_alloc(struct hinic_wq *wq, struct hinic_hwdev *hwdev,
105 		     int cmdq_blocks, u32 wq_buf_size, u32 wqebb_shift,
106 		     u16 q_depth)
107 {
108 	int i, j, err = -ENOMEM;
109 
110 	/* validate q_depth is power of 2 & wqebb_size is not 0 */
111 	for (i = 0; i < cmdq_blocks; i++) {
112 		wq[i].wqebb_size = 1 << wqebb_shift;
113 		wq[i].wqebb_shift = wqebb_shift;
114 		wq[i].wq_buf_size = wq_buf_size;
115 		wq[i].q_depth = q_depth;
116 
117 		err = alloc_wq_pages(hwdev, &wq[i]);
118 		if (err) {
119 			PMD_DRV_LOG(ERR, "Failed to alloc CMDQ blocks");
120 			goto cmdq_block_err;
121 		}
122 
123 		wq[i].cons_idx = 0;
124 		wq[i].prod_idx = 0;
125 		wq[i].delta = q_depth;
126 
127 		wq[i].mask = q_depth - 1;
128 	}
129 
130 	return 0;
131 
132 cmdq_block_err:
133 	for (j = 0; j < i; j++)
134 		free_wq_pages(hwdev, &wq[j]);
135 
136 	return err;
137 }
138 
139 void hinic_cmdq_free(struct hinic_hwdev *hwdev, struct hinic_wq *wq,
140 		     int cmdq_blocks)
141 {
142 	int i;
143 
144 	for (i = 0; i < cmdq_blocks; i++)
145 		free_wq_pages(hwdev, &wq[i]);
146 }
147 
148 void hinic_wq_wqe_pg_clear(struct hinic_wq *wq)
149 {
150 	wq->cons_idx = 0;
151 	wq->prod_idx = 0;
152 
153 	memset((void *)wq->queue_buf_vaddr, 0, wq->wq_buf_size);
154 }
155 
156 void *hinic_get_wqe(struct hinic_wq *wq, int num_wqebbs, u16 *prod_idx)
157 {
158 	u16 curr_prod_idx;
159 
160 	wq->delta -= num_wqebbs;
161 	curr_prod_idx = wq->prod_idx;
162 	wq->prod_idx += num_wqebbs;
163 	*prod_idx = MASKED_WQE_IDX(wq, curr_prod_idx);
164 
165 	return WQ_WQE_ADDR(wq, (u32)(*prod_idx));
166 }
167 
168 /**
169  * hinic_set_sge - set dma area in scatter gather entry
170  * @sge: scatter gather entry
171  * @addr: dma address
172  * @len: length of relevant data in the dma address
173  **/
174 void hinic_set_sge(struct hinic_sge *sge, dma_addr_t addr, u32 len)
175 {
176 	sge->hi_addr = upper_32_bits(addr);
177 	sge->lo_addr = lower_32_bits(addr);
178 	sge->len  = len;
179 }
180