1828d3e15SZiyang Xuan /* SPDX-License-Identifier: BSD-3-Clause
2828d3e15SZiyang Xuan * Copyright(c) 2017 Huawei Technologies Co., Ltd
3828d3e15SZiyang Xuan */
4828d3e15SZiyang Xuan
5828d3e15SZiyang Xuan #include "hinic_compat.h"
6828d3e15SZiyang Xuan #include "hinic_pmd_hwdev.h"
7828d3e15SZiyang Xuan #include "hinic_pmd_wq.h"
8828d3e15SZiyang Xuan
free_wq_pages(struct hinic_hwdev * hwdev,struct hinic_wq * wq)9828d3e15SZiyang Xuan static void free_wq_pages(struct hinic_hwdev *hwdev, struct hinic_wq *wq)
10828d3e15SZiyang Xuan {
11828d3e15SZiyang Xuan dma_free_coherent(hwdev, wq->wq_buf_size, (void *)wq->queue_buf_vaddr,
12828d3e15SZiyang Xuan (dma_addr_t)wq->queue_buf_paddr);
13828d3e15SZiyang Xuan
14828d3e15SZiyang Xuan wq->queue_buf_paddr = 0;
15828d3e15SZiyang Xuan wq->queue_buf_vaddr = 0;
16828d3e15SZiyang Xuan }
17828d3e15SZiyang Xuan
alloc_wq_pages(struct hinic_hwdev * hwdev,struct hinic_wq * wq,unsigned int socket_id)18*1b7b9f17SXiaoyun Wang static int alloc_wq_pages(struct hinic_hwdev *hwdev, struct hinic_wq *wq,
19*1b7b9f17SXiaoyun Wang unsigned int socket_id)
20828d3e15SZiyang Xuan {
21828d3e15SZiyang Xuan dma_addr_t dma_addr = 0;
22828d3e15SZiyang Xuan
23828d3e15SZiyang Xuan wq->queue_buf_vaddr = (u64)(u64 *)
24828d3e15SZiyang Xuan dma_zalloc_coherent_aligned256k(hwdev, wq->wq_buf_size,
25*1b7b9f17SXiaoyun Wang &dma_addr, socket_id);
26828d3e15SZiyang Xuan if (!wq->queue_buf_vaddr) {
27828d3e15SZiyang Xuan PMD_DRV_LOG(ERR, "Failed to allocate wq page");
28828d3e15SZiyang Xuan return -ENOMEM;
29828d3e15SZiyang Xuan }
30828d3e15SZiyang Xuan
31828d3e15SZiyang Xuan if (!ADDR_256K_ALIGNED(dma_addr)) {
32828d3e15SZiyang Xuan PMD_DRV_LOG(ERR, "Wqe pages is not 256k aligned!");
33828d3e15SZiyang Xuan dma_free_coherent(hwdev, wq->wq_buf_size,
34828d3e15SZiyang Xuan (void *)wq->queue_buf_vaddr,
35828d3e15SZiyang Xuan dma_addr);
36828d3e15SZiyang Xuan return -ENOMEM;
37828d3e15SZiyang Xuan }
38828d3e15SZiyang Xuan wq->queue_buf_paddr = dma_addr;
39828d3e15SZiyang Xuan
40828d3e15SZiyang Xuan return 0;
41828d3e15SZiyang Xuan }
42828d3e15SZiyang Xuan
hinic_wq_allocate(struct hinic_hwdev * hwdev,struct hinic_wq * wq,u32 wqebb_shift,u16 q_depth,unsigned int socket_id)43828d3e15SZiyang Xuan int hinic_wq_allocate(struct hinic_hwdev *hwdev, struct hinic_wq *wq,
44*1b7b9f17SXiaoyun Wang u32 wqebb_shift, u16 q_depth, unsigned int socket_id)
45828d3e15SZiyang Xuan {
46828d3e15SZiyang Xuan int err;
47828d3e15SZiyang Xuan
48828d3e15SZiyang Xuan if (q_depth & (q_depth - 1)) {
49828d3e15SZiyang Xuan PMD_DRV_LOG(ERR, "WQ q_depth isn't power of 2");
50828d3e15SZiyang Xuan return -EINVAL;
51828d3e15SZiyang Xuan }
52828d3e15SZiyang Xuan
53828d3e15SZiyang Xuan wq->wqebb_size = 1 << wqebb_shift;
54828d3e15SZiyang Xuan wq->wqebb_shift = wqebb_shift;
55828d3e15SZiyang Xuan wq->wq_buf_size = ((u32)q_depth) << wqebb_shift;
56828d3e15SZiyang Xuan wq->q_depth = q_depth;
57828d3e15SZiyang Xuan
58828d3e15SZiyang Xuan if (wq->wq_buf_size > (HINIC_PAGE_SIZE << HINIC_PAGE_SIZE_DPDK)) {
59828d3e15SZiyang Xuan PMD_DRV_LOG(ERR, "Invalid q_depth %u which one page_size can not hold",
60828d3e15SZiyang Xuan q_depth);
61828d3e15SZiyang Xuan return -EINVAL;
62828d3e15SZiyang Xuan }
63828d3e15SZiyang Xuan
64*1b7b9f17SXiaoyun Wang err = alloc_wq_pages(hwdev, wq, socket_id);
65828d3e15SZiyang Xuan if (err) {
66828d3e15SZiyang Xuan PMD_DRV_LOG(ERR, "Failed to allocate wq pages");
67828d3e15SZiyang Xuan return err;
68828d3e15SZiyang Xuan }
69828d3e15SZiyang Xuan
70828d3e15SZiyang Xuan wq->cons_idx = 0;
71828d3e15SZiyang Xuan wq->prod_idx = 0;
72828d3e15SZiyang Xuan wq->delta = q_depth;
73828d3e15SZiyang Xuan wq->mask = q_depth - 1;
74828d3e15SZiyang Xuan
75828d3e15SZiyang Xuan return 0;
76828d3e15SZiyang Xuan }
77828d3e15SZiyang Xuan
hinic_wq_free(struct hinic_hwdev * hwdev,struct hinic_wq * wq)78828d3e15SZiyang Xuan void hinic_wq_free(struct hinic_hwdev *hwdev, struct hinic_wq *wq)
79828d3e15SZiyang Xuan {
80828d3e15SZiyang Xuan free_wq_pages(hwdev, wq);
81828d3e15SZiyang Xuan }
82828d3e15SZiyang Xuan
hinic_put_wqe(struct hinic_wq * wq,int num_wqebbs)83828d3e15SZiyang Xuan void hinic_put_wqe(struct hinic_wq *wq, int num_wqebbs)
84828d3e15SZiyang Xuan {
85828d3e15SZiyang Xuan wq->cons_idx += num_wqebbs;
86828d3e15SZiyang Xuan wq->delta += num_wqebbs;
87828d3e15SZiyang Xuan }
88828d3e15SZiyang Xuan
hinic_read_wqe(struct hinic_wq * wq,int num_wqebbs,u16 * cons_idx)89828d3e15SZiyang Xuan void *hinic_read_wqe(struct hinic_wq *wq, int num_wqebbs, u16 *cons_idx)
90828d3e15SZiyang Xuan {
91828d3e15SZiyang Xuan u16 curr_cons_idx;
92828d3e15SZiyang Xuan
93828d3e15SZiyang Xuan if ((wq->delta + num_wqebbs) > wq->q_depth)
94828d3e15SZiyang Xuan return NULL;
95828d3e15SZiyang Xuan
96828d3e15SZiyang Xuan curr_cons_idx = (u16)(wq->cons_idx);
97828d3e15SZiyang Xuan
98828d3e15SZiyang Xuan curr_cons_idx = MASKED_WQE_IDX(wq, curr_cons_idx);
99828d3e15SZiyang Xuan
100828d3e15SZiyang Xuan *cons_idx = curr_cons_idx;
101828d3e15SZiyang Xuan
102828d3e15SZiyang Xuan return WQ_WQE_ADDR(wq, (u32)(*cons_idx));
103828d3e15SZiyang Xuan }
104828d3e15SZiyang Xuan
hinic_cmdq_alloc(struct hinic_wq * wq,struct hinic_hwdev * hwdev,int cmdq_blocks,u32 wq_buf_size,u32 wqebb_shift,u16 q_depth)105828d3e15SZiyang Xuan int hinic_cmdq_alloc(struct hinic_wq *wq, struct hinic_hwdev *hwdev,
106828d3e15SZiyang Xuan int cmdq_blocks, u32 wq_buf_size, u32 wqebb_shift,
107828d3e15SZiyang Xuan u16 q_depth)
108828d3e15SZiyang Xuan {
109828d3e15SZiyang Xuan int i, j, err = -ENOMEM;
110828d3e15SZiyang Xuan
111828d3e15SZiyang Xuan /* validate q_depth is power of 2 & wqebb_size is not 0 */
112828d3e15SZiyang Xuan for (i = 0; i < cmdq_blocks; i++) {
113828d3e15SZiyang Xuan wq[i].wqebb_size = 1 << wqebb_shift;
114828d3e15SZiyang Xuan wq[i].wqebb_shift = wqebb_shift;
115828d3e15SZiyang Xuan wq[i].wq_buf_size = wq_buf_size;
116828d3e15SZiyang Xuan wq[i].q_depth = q_depth;
117828d3e15SZiyang Xuan
118*1b7b9f17SXiaoyun Wang err = alloc_wq_pages(hwdev, &wq[i], SOCKET_ID_ANY);
119828d3e15SZiyang Xuan if (err) {
120828d3e15SZiyang Xuan PMD_DRV_LOG(ERR, "Failed to alloc CMDQ blocks");
121828d3e15SZiyang Xuan goto cmdq_block_err;
122828d3e15SZiyang Xuan }
123828d3e15SZiyang Xuan
124828d3e15SZiyang Xuan wq[i].cons_idx = 0;
125828d3e15SZiyang Xuan wq[i].prod_idx = 0;
126828d3e15SZiyang Xuan wq[i].delta = q_depth;
127828d3e15SZiyang Xuan
128828d3e15SZiyang Xuan wq[i].mask = q_depth - 1;
129828d3e15SZiyang Xuan }
130828d3e15SZiyang Xuan
131828d3e15SZiyang Xuan return 0;
132828d3e15SZiyang Xuan
133828d3e15SZiyang Xuan cmdq_block_err:
134828d3e15SZiyang Xuan for (j = 0; j < i; j++)
135828d3e15SZiyang Xuan free_wq_pages(hwdev, &wq[j]);
136828d3e15SZiyang Xuan
137828d3e15SZiyang Xuan return err;
138828d3e15SZiyang Xuan }
139828d3e15SZiyang Xuan
hinic_cmdq_free(struct hinic_hwdev * hwdev,struct hinic_wq * wq,int cmdq_blocks)140828d3e15SZiyang Xuan void hinic_cmdq_free(struct hinic_hwdev *hwdev, struct hinic_wq *wq,
141828d3e15SZiyang Xuan int cmdq_blocks)
142828d3e15SZiyang Xuan {
143828d3e15SZiyang Xuan int i;
144828d3e15SZiyang Xuan
145828d3e15SZiyang Xuan for (i = 0; i < cmdq_blocks; i++)
146828d3e15SZiyang Xuan free_wq_pages(hwdev, &wq[i]);
147828d3e15SZiyang Xuan }
148828d3e15SZiyang Xuan
hinic_wq_wqe_pg_clear(struct hinic_wq * wq)149828d3e15SZiyang Xuan void hinic_wq_wqe_pg_clear(struct hinic_wq *wq)
150828d3e15SZiyang Xuan {
151828d3e15SZiyang Xuan wq->cons_idx = 0;
152828d3e15SZiyang Xuan wq->prod_idx = 0;
153828d3e15SZiyang Xuan
154828d3e15SZiyang Xuan memset((void *)wq->queue_buf_vaddr, 0, wq->wq_buf_size);
155828d3e15SZiyang Xuan }
156828d3e15SZiyang Xuan
hinic_get_wqe(struct hinic_wq * wq,int num_wqebbs,u16 * prod_idx)157828d3e15SZiyang Xuan void *hinic_get_wqe(struct hinic_wq *wq, int num_wqebbs, u16 *prod_idx)
158828d3e15SZiyang Xuan {
159828d3e15SZiyang Xuan u16 curr_prod_idx;
160828d3e15SZiyang Xuan
161828d3e15SZiyang Xuan wq->delta -= num_wqebbs;
162828d3e15SZiyang Xuan curr_prod_idx = wq->prod_idx;
163828d3e15SZiyang Xuan wq->prod_idx += num_wqebbs;
164828d3e15SZiyang Xuan *prod_idx = MASKED_WQE_IDX(wq, curr_prod_idx);
165828d3e15SZiyang Xuan
166828d3e15SZiyang Xuan return WQ_WQE_ADDR(wq, (u32)(*prod_idx));
167828d3e15SZiyang Xuan }
168828d3e15SZiyang Xuan
169828d3e15SZiyang Xuan /**
170828d3e15SZiyang Xuan * hinic_set_sge - set dma area in scatter gather entry
171828d3e15SZiyang Xuan * @sge: scatter gather entry
172828d3e15SZiyang Xuan * @addr: dma address
173828d3e15SZiyang Xuan * @len: length of relevant data in the dma address
174828d3e15SZiyang Xuan **/
hinic_set_sge(struct hinic_sge * sge,dma_addr_t addr,u32 len)175828d3e15SZiyang Xuan void hinic_set_sge(struct hinic_sge *sge, dma_addr_t addr, u32 len)
176828d3e15SZiyang Xuan {
177828d3e15SZiyang Xuan sge->hi_addr = upper_32_bits(addr);
178828d3e15SZiyang Xuan sge->lo_addr = lower_32_bits(addr);
179828d3e15SZiyang Xuan sge->len = len;
180828d3e15SZiyang Xuan }
181