1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Huawei Technologies Co., Ltd
3 */
4
5 #include "hinic_compat.h"
6 #include "hinic_pmd_hwdev.h"
7 #include "hinic_pmd_hwif.h"
8 #include "hinic_pmd_wq.h"
9 #include "hinic_pmd_mgmt.h"
10 #include "hinic_pmd_mbox.h"
11 #include "hinic_pmd_cmdq.h"
12
13 #define CMDQ_CMD_TIMEOUT 5000 /* millisecond */
14
15 #define UPPER_8_BITS(data) (((data) >> 8) & 0xFF)
16 #define LOWER_8_BITS(data) ((data) & 0xFF)
17
18 #define CMDQ_DB_INFO_HI_PROD_IDX_SHIFT 0
19 #define CMDQ_DB_INFO_QUEUE_TYPE_SHIFT 23
20 #define CMDQ_DB_INFO_CMDQ_TYPE_SHIFT 24
21 #define CMDQ_DB_INFO_SRC_TYPE_SHIFT 27
22
23 #define CMDQ_DB_INFO_HI_PROD_IDX_MASK 0xFFU
24 #define CMDQ_DB_INFO_QUEUE_TYPE_MASK 0x1U
25 #define CMDQ_DB_INFO_CMDQ_TYPE_MASK 0x7U
26 #define CMDQ_DB_INFO_SRC_TYPE_MASK 0x1FU
27
28 #define CMDQ_DB_INFO_SET(val, member) \
29 (((val) & CMDQ_DB_INFO_##member##_MASK) << \
30 CMDQ_DB_INFO_##member##_SHIFT)
31
32 #define CMDQ_CTRL_PI_SHIFT 0
33 #define CMDQ_CTRL_CMD_SHIFT 16
34 #define CMDQ_CTRL_MOD_SHIFT 24
35 #define CMDQ_CTRL_ACK_TYPE_SHIFT 29
36 #define CMDQ_CTRL_HW_BUSY_BIT_SHIFT 31
37
38 #define CMDQ_CTRL_PI_MASK 0xFFFFU
39 #define CMDQ_CTRL_CMD_MASK 0xFFU
40 #define CMDQ_CTRL_MOD_MASK 0x1FU
41 #define CMDQ_CTRL_ACK_TYPE_MASK 0x3U
42 #define CMDQ_CTRL_HW_BUSY_BIT_MASK 0x1U
43
44 #define CMDQ_CTRL_SET(val, member) \
45 (((val) & CMDQ_CTRL_##member##_MASK) << CMDQ_CTRL_##member##_SHIFT)
46
47 #define CMDQ_CTRL_GET(val, member) \
48 (((val) >> CMDQ_CTRL_##member##_SHIFT) & CMDQ_CTRL_##member##_MASK)
49
50 #define CMDQ_WQE_HEADER_BUFDESC_LEN_SHIFT 0
51 #define CMDQ_WQE_HEADER_COMPLETE_FMT_SHIFT 15
52 #define CMDQ_WQE_HEADER_DATA_FMT_SHIFT 22
53 #define CMDQ_WQE_HEADER_COMPLETE_REQ_SHIFT 23
54 #define CMDQ_WQE_HEADER_COMPLETE_SECT_LEN_SHIFT 27
55 #define CMDQ_WQE_HEADER_CTRL_LEN_SHIFT 29
56 #define CMDQ_WQE_HEADER_HW_BUSY_BIT_SHIFT 31
57
58 #define CMDQ_WQE_HEADER_BUFDESC_LEN_MASK 0xFFU
59 #define CMDQ_WQE_HEADER_COMPLETE_FMT_MASK 0x1U
60 #define CMDQ_WQE_HEADER_DATA_FMT_MASK 0x1U
61 #define CMDQ_WQE_HEADER_COMPLETE_REQ_MASK 0x1U
62 #define CMDQ_WQE_HEADER_COMPLETE_SECT_LEN_MASK 0x3U
63 #define CMDQ_WQE_HEADER_CTRL_LEN_MASK 0x3U
64 #define CMDQ_WQE_HEADER_HW_BUSY_BIT_MASK 0x1U
65
66 #define CMDQ_WQE_HEADER_SET(val, member) \
67 (((val) & CMDQ_WQE_HEADER_##member##_MASK) << \
68 CMDQ_WQE_HEADER_##member##_SHIFT)
69
70 #define CMDQ_WQE_HEADER_GET(val, member) \
71 (((val) >> CMDQ_WQE_HEADER_##member##_SHIFT) & \
72 CMDQ_WQE_HEADER_##member##_MASK)
73
74 #define CMDQ_CTXT_CURR_WQE_PAGE_PFN_SHIFT 0
75 #define CMDQ_CTXT_EQ_ID_SHIFT 56
76 #define CMDQ_CTXT_CEQ_ARM_SHIFT 61
77 #define CMDQ_CTXT_CEQ_EN_SHIFT 62
78 #define CMDQ_CTXT_HW_BUSY_BIT_SHIFT 63
79
80 #define CMDQ_CTXT_CURR_WQE_PAGE_PFN_MASK 0xFFFFFFFFFFFFF
81 #define CMDQ_CTXT_EQ_ID_MASK 0x1F
82 #define CMDQ_CTXT_CEQ_ARM_MASK 0x1
83 #define CMDQ_CTXT_CEQ_EN_MASK 0x1
84 #define CMDQ_CTXT_HW_BUSY_BIT_MASK 0x1
85
86 #define CMDQ_CTXT_PAGE_INFO_SET(val, member) \
87 (((u64)(val) & CMDQ_CTXT_##member##_MASK) << CMDQ_CTXT_##member##_SHIFT)
88
89 #define CMDQ_CTXT_PAGE_INFO_CLEAR(val, member) \
90 ((val) & (~((u64)CMDQ_CTXT_##member##_MASK << \
91 CMDQ_CTXT_##member##_SHIFT)))
92
93 #define CMDQ_CTXT_WQ_BLOCK_PFN_SHIFT 0
94 #define CMDQ_CTXT_CI_SHIFT 52
95
96 #define CMDQ_CTXT_WQ_BLOCK_PFN_MASK 0xFFFFFFFFFFFFF
97 #define CMDQ_CTXT_CI_MASK 0xFFF
98
99 #define CMDQ_CTXT_BLOCK_INFO_SET(val, member) \
100 (((u64)(val) & CMDQ_CTXT_##member##_MASK) << CMDQ_CTXT_##member##_SHIFT)
101
102 #define SAVED_DATA_ARM_SHIFT 31
103
104 #define SAVED_DATA_ARM_MASK 0x1U
105
106 #define SAVED_DATA_SET(val, member) \
107 (((val) & SAVED_DATA_##member##_MASK) << SAVED_DATA_##member##_SHIFT)
108
109 #define SAVED_DATA_CLEAR(val, member) \
110 ((val) & (~(SAVED_DATA_##member##_MASK << SAVED_DATA_##member##_SHIFT)))
111
112 #define WQE_ERRCODE_VAL_SHIFT 20
113
114 #define WQE_ERRCODE_VAL_MASK 0xF
115
116 #define WQE_ERRCODE_GET(val, member) \
117 (((val) >> WQE_ERRCODE_##member##_SHIFT) & WQE_ERRCODE_##member##_MASK)
118
119 #define WQE_COMPLETED(ctrl_info) CMDQ_CTRL_GET(ctrl_info, HW_BUSY_BIT)
120
121 #define WQE_HEADER(wqe) ((struct hinic_cmdq_header *)(wqe))
122
123 #define CMDQ_DB_PI_OFF(pi) (((u16)LOWER_8_BITS(pi)) << 3)
124
125 #define CMDQ_DB_ADDR(db_base, pi) \
126 (((u8 *)(db_base) + HINIC_DB_OFF) + CMDQ_DB_PI_OFF(pi))
127
128 #define CMDQ_PFN(addr, page_size) ((addr) >> (ilog2(page_size)))
129
130 #define FIRST_DATA_TO_WRITE_LAST sizeof(u64)
131
132 #define WQE_LCMD_SIZE 64
133 #define WQE_SCMD_SIZE 64
134
135 #define COMPLETE_LEN 3
136
137 #define CMDQ_WQEBB_SIZE 64
138 #define CMDQ_WQEBB_SHIFT 6
139
140 #define CMDQ_WQE_SIZE 64
141
142 #define HINIC_CMDQ_WQ_BUF_SIZE 4096
143
144 #define WQE_NUM_WQEBBS(wqe_size, wq) \
145 ((u16)(ALIGN((u32)(wqe_size), (wq)->wqebb_size) / (wq)->wqebb_size))
146
147 #define cmdq_to_cmdqs(cmdq) container_of((cmdq) - (cmdq)->cmdq_type, \
148 struct hinic_cmdqs, cmdq[0])
149
150 #define WAIT_CMDQ_ENABLE_TIMEOUT 300
151
152
153 static void cmdq_init_queue_ctxt(struct hinic_cmdq *cmdq,
154 struct hinic_cmdq_ctxt *cmdq_ctxt);
155 static void hinic_cmdqs_free(struct hinic_hwdev *hwdev);
156
hinic_cmdq_idle(struct hinic_cmdq * cmdq)157 bool hinic_cmdq_idle(struct hinic_cmdq *cmdq)
158 {
159 struct hinic_wq *wq = cmdq->wq;
160
161 return ((wq->delta) == wq->q_depth ? true : false);
162 }
163
hinic_alloc_cmd_buf(void * hwdev)164 struct hinic_cmd_buf *hinic_alloc_cmd_buf(void *hwdev)
165 {
166 struct hinic_cmdqs *cmdqs = ((struct hinic_hwdev *)hwdev)->cmdqs;
167 struct hinic_cmd_buf *cmd_buf;
168
169 cmd_buf = kzalloc(sizeof(*cmd_buf), GFP_KERNEL);
170 if (!cmd_buf) {
171 PMD_DRV_LOG(ERR, "Allocate cmd buffer failed");
172 return NULL;
173 }
174
175 cmd_buf->buf = pci_pool_alloc(cmdqs->cmd_buf_pool, &cmd_buf->dma_addr);
176 if (!cmd_buf->buf) {
177 PMD_DRV_LOG(ERR, "Allocate cmd from the pool failed");
178 goto alloc_pci_buf_err;
179 }
180
181 return cmd_buf;
182
183 alloc_pci_buf_err:
184 kfree(cmd_buf);
185 return NULL;
186 }
187
hinic_free_cmd_buf(void * hwdev,struct hinic_cmd_buf * cmd_buf)188 void hinic_free_cmd_buf(void *hwdev, struct hinic_cmd_buf *cmd_buf)
189 {
190 struct hinic_cmdqs *cmdqs = ((struct hinic_hwdev *)hwdev)->cmdqs;
191
192 pci_pool_free(cmdqs->cmd_buf_pool, cmd_buf->buf, cmd_buf->dma_addr);
193 kfree(cmd_buf);
194 }
195
cmdq_wqe_size(enum cmdq_wqe_type wqe_type)196 static u32 cmdq_wqe_size(enum cmdq_wqe_type wqe_type)
197 {
198 u32 wqe_size = 0;
199
200 switch (wqe_type) {
201 case WQE_LCMD_TYPE:
202 wqe_size = WQE_LCMD_SIZE;
203 break;
204 case WQE_SCMD_TYPE:
205 wqe_size = WQE_SCMD_SIZE;
206 break;
207 }
208
209 return wqe_size;
210 }
211
cmdq_get_wqe_size(enum bufdesc_len len)212 static int cmdq_get_wqe_size(enum bufdesc_len len)
213 {
214 int wqe_size = 0;
215
216 switch (len) {
217 case BUFDESC_LCMD_LEN:
218 wqe_size = WQE_LCMD_SIZE;
219 break;
220 case BUFDESC_SCMD_LEN:
221 wqe_size = WQE_SCMD_SIZE;
222 break;
223 }
224
225 return wqe_size;
226 }
227
cmdq_set_completion(struct hinic_cmdq_completion * complete,struct hinic_cmd_buf * buf_out)228 static void cmdq_set_completion(struct hinic_cmdq_completion *complete,
229 struct hinic_cmd_buf *buf_out)
230 {
231 struct hinic_sge_resp *sge_resp = &complete->sge_resp;
232
233 hinic_set_sge(&sge_resp->sge, buf_out->dma_addr,
234 HINIC_CMDQ_BUF_SIZE);
235 }
236
cmdq_set_lcmd_bufdesc(struct hinic_cmdq_wqe_lcmd * wqe,struct hinic_cmd_buf * buf_in)237 static void cmdq_set_lcmd_bufdesc(struct hinic_cmdq_wqe_lcmd *wqe,
238 struct hinic_cmd_buf *buf_in)
239 {
240 hinic_set_sge(&wqe->buf_desc.sge, buf_in->dma_addr, buf_in->size);
241 }
242
cmdq_fill_db(struct hinic_cmdq_db * db,enum hinic_cmdq_type cmdq_type,u16 prod_idx)243 static void cmdq_fill_db(struct hinic_cmdq_db *db,
244 enum hinic_cmdq_type cmdq_type, u16 prod_idx)
245 {
246 db->db_info = CMDQ_DB_INFO_SET(UPPER_8_BITS(prod_idx), HI_PROD_IDX) |
247 CMDQ_DB_INFO_SET(HINIC_DB_CMDQ_TYPE, QUEUE_TYPE) |
248 CMDQ_DB_INFO_SET(cmdq_type, CMDQ_TYPE) |
249 CMDQ_DB_INFO_SET(HINIC_DB_SRC_CMDQ_TYPE, SRC_TYPE);
250 }
251
cmdq_set_db(struct hinic_cmdq * cmdq,enum hinic_cmdq_type cmdq_type,u16 prod_idx)252 static void cmdq_set_db(struct hinic_cmdq *cmdq,
253 enum hinic_cmdq_type cmdq_type, u16 prod_idx)
254 {
255 struct hinic_cmdq_db db;
256
257 cmdq_fill_db(&db, cmdq_type, prod_idx);
258
259 /* The data that is written to HW should be in Big Endian Format */
260 db.db_info = cpu_to_be32(db.db_info);
261
262 rte_wmb(); /* write all before the doorbell */
263
264 writel(db.db_info, CMDQ_DB_ADDR(cmdq->db_base, prod_idx));
265 }
266
cmdq_wqe_fill(void * dst,void * src)267 static void cmdq_wqe_fill(void *dst, void *src)
268 {
269 memcpy((u8 *)dst + FIRST_DATA_TO_WRITE_LAST,
270 (u8 *)src + FIRST_DATA_TO_WRITE_LAST,
271 CMDQ_WQE_SIZE - FIRST_DATA_TO_WRITE_LAST);
272
273 rte_wmb();/* The first 8 bytes should be written last */
274
275 *(u64 *)dst = *(u64 *)src;
276 }
277
cmdq_prepare_wqe_ctrl(struct hinic_cmdq_wqe * wqe,int wrapped,enum hinic_ack_type ack_type,enum hinic_mod_type mod,u8 cmd,u16 prod_idx,enum completion_format complete_format,enum data_format local_data_format,enum bufdesc_len buf_len)278 static void cmdq_prepare_wqe_ctrl(struct hinic_cmdq_wqe *wqe, int wrapped,
279 enum hinic_ack_type ack_type,
280 enum hinic_mod_type mod, u8 cmd, u16 prod_idx,
281 enum completion_format complete_format,
282 enum data_format local_data_format,
283 enum bufdesc_len buf_len)
284 {
285 struct hinic_ctrl *ctrl;
286 enum ctrl_sect_len ctrl_len;
287 struct hinic_cmdq_wqe_lcmd *wqe_lcmd;
288 struct hinic_cmdq_wqe_scmd *wqe_scmd;
289 u32 saved_data = WQE_HEADER(wqe)->saved_data;
290
291 if (local_data_format == DATA_SGE) {
292 wqe_lcmd = &wqe->wqe_lcmd;
293
294 wqe_lcmd->status.status_info = 0;
295 ctrl = &wqe_lcmd->ctrl;
296 ctrl_len = CTRL_SECT_LEN;
297 } else {
298 wqe_scmd = &wqe->inline_wqe.wqe_scmd;
299
300 wqe_scmd->status.status_info = 0;
301 ctrl = &wqe_scmd->ctrl;
302 ctrl_len = CTRL_DIRECT_SECT_LEN;
303 }
304
305 ctrl->ctrl_info = CMDQ_CTRL_SET(prod_idx, PI) |
306 CMDQ_CTRL_SET(cmd, CMD) |
307 CMDQ_CTRL_SET(mod, MOD) |
308 CMDQ_CTRL_SET(ack_type, ACK_TYPE);
309
310 WQE_HEADER(wqe)->header_info =
311 CMDQ_WQE_HEADER_SET(buf_len, BUFDESC_LEN) |
312 CMDQ_WQE_HEADER_SET(complete_format, COMPLETE_FMT) |
313 CMDQ_WQE_HEADER_SET(local_data_format, DATA_FMT) |
314 CMDQ_WQE_HEADER_SET(CEQ_SET, COMPLETE_REQ) |
315 CMDQ_WQE_HEADER_SET(COMPLETE_LEN, COMPLETE_SECT_LEN) |
316 CMDQ_WQE_HEADER_SET(ctrl_len, CTRL_LEN) |
317 CMDQ_WQE_HEADER_SET((u32)wrapped, HW_BUSY_BIT);
318
319 if (cmd == CMDQ_SET_ARM_CMD && mod == HINIC_MOD_COMM) {
320 saved_data &= SAVED_DATA_CLEAR(saved_data, ARM);
321 WQE_HEADER(wqe)->saved_data = saved_data |
322 SAVED_DATA_SET(1, ARM);
323 } else {
324 saved_data &= SAVED_DATA_CLEAR(saved_data, ARM);
325 WQE_HEADER(wqe)->saved_data = saved_data;
326 }
327 }
328
cmdq_set_lcmd_wqe(struct hinic_cmdq_wqe * wqe,enum cmdq_cmd_type cmd_type,struct hinic_cmd_buf * buf_in,struct hinic_cmd_buf * buf_out,int wrapped,enum hinic_ack_type ack_type,enum hinic_mod_type mod,u8 cmd,u16 prod_idx)329 static void cmdq_set_lcmd_wqe(struct hinic_cmdq_wqe *wqe,
330 enum cmdq_cmd_type cmd_type,
331 struct hinic_cmd_buf *buf_in,
332 struct hinic_cmd_buf *buf_out, int wrapped,
333 enum hinic_ack_type ack_type,
334 enum hinic_mod_type mod, u8 cmd, u16 prod_idx)
335 {
336 struct hinic_cmdq_wqe_lcmd *wqe_lcmd = &wqe->wqe_lcmd;
337 enum completion_format complete_format = COMPLETE_DIRECT;
338
339 switch (cmd_type) {
340 case SYNC_CMD_SGE_RESP:
341 if (buf_out) {
342 complete_format = COMPLETE_SGE;
343 cmdq_set_completion(&wqe_lcmd->completion, buf_out);
344 }
345 break;
346 case SYNC_CMD_DIRECT_RESP:
347 complete_format = COMPLETE_DIRECT;
348 wqe_lcmd->completion.direct_resp = 0;
349 break;
350 case ASYNC_CMD:
351 complete_format = COMPLETE_DIRECT;
352 wqe_lcmd->completion.direct_resp = 0;
353
354 wqe_lcmd->buf_desc.saved_async_buf = (u64)(buf_in);
355 break;
356 }
357
358 cmdq_prepare_wqe_ctrl(wqe, wrapped, ack_type, mod, cmd,
359 prod_idx, complete_format, DATA_SGE,
360 BUFDESC_LCMD_LEN);
361
362 cmdq_set_lcmd_bufdesc(wqe_lcmd, buf_in);
363 }
364
cmdq_params_valid(struct hinic_cmd_buf * buf_in)365 static int cmdq_params_valid(struct hinic_cmd_buf *buf_in)
366 {
367 if (buf_in->size > HINIC_CMDQ_MAX_DATA_SIZE) {
368 PMD_DRV_LOG(ERR, "Invalid CMDQ buffer size");
369 return -EINVAL;
370 }
371
372 return 0;
373 }
374
wait_cmdqs_enable(struct hinic_cmdqs * cmdqs)375 static int wait_cmdqs_enable(struct hinic_cmdqs *cmdqs)
376 {
377 unsigned long end;
378
379 end = jiffies + msecs_to_jiffies(WAIT_CMDQ_ENABLE_TIMEOUT);
380 do {
381 if (cmdqs->status & HINIC_CMDQ_ENABLE)
382 return 0;
383
384 } while (time_before(jiffies, end));
385
386 return -EBUSY;
387 }
388
cmdq_update_errcode(struct hinic_cmdq * cmdq,u16 prod_idx,int errcode)389 static void cmdq_update_errcode(struct hinic_cmdq *cmdq, u16 prod_idx,
390 int errcode)
391 {
392 cmdq->errcode[prod_idx] = errcode;
393 }
394
clear_wqe_complete_bit(struct hinic_cmdq * cmdq,struct hinic_cmdq_wqe * wqe)395 static void clear_wqe_complete_bit(struct hinic_cmdq *cmdq,
396 struct hinic_cmdq_wqe *wqe)
397 {
398 struct hinic_cmdq_wqe_lcmd *wqe_lcmd;
399 struct hinic_cmdq_inline_wqe *inline_wqe;
400 struct hinic_cmdq_wqe_scmd *wqe_scmd;
401 struct hinic_ctrl *ctrl;
402 u32 header_info = be32_to_cpu(WQE_HEADER(wqe)->header_info);
403 int buf_len = CMDQ_WQE_HEADER_GET(header_info, BUFDESC_LEN);
404 int wqe_size = cmdq_get_wqe_size(buf_len);
405 u16 num_wqebbs;
406
407 if (wqe_size == WQE_LCMD_SIZE) {
408 wqe_lcmd = &wqe->wqe_lcmd;
409 ctrl = &wqe_lcmd->ctrl;
410 } else {
411 inline_wqe = &wqe->inline_wqe;
412 wqe_scmd = &inline_wqe->wqe_scmd;
413 ctrl = &wqe_scmd->ctrl;
414 }
415
416 /* clear HW busy bit */
417 ctrl->ctrl_info = 0;
418
419 rte_wmb(); /* verify wqe is clear */
420
421 num_wqebbs = WQE_NUM_WQEBBS(wqe_size, cmdq->wq);
422 hinic_put_wqe(cmdq->wq, num_wqebbs);
423 }
424
hinic_set_cmdq_ctxts(struct hinic_hwdev * hwdev)425 static int hinic_set_cmdq_ctxts(struct hinic_hwdev *hwdev)
426 {
427 struct hinic_cmdqs *cmdqs = hwdev->cmdqs;
428 struct hinic_cmdq_ctxt *cmdq_ctxt;
429 struct hinic_cmdq_ctxt cmdq_ctxt_out;
430 enum hinic_cmdq_type cmdq_type;
431 u16 out_size = sizeof(cmdq_ctxt_out);
432 u16 in_size;
433 int err;
434
435 cmdq_type = HINIC_CMDQ_SYNC;
436 memset(&cmdq_ctxt_out, 0, out_size);
437 for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) {
438 cmdq_ctxt = &cmdqs->cmdq[cmdq_type].cmdq_ctxt;
439 cmdq_ctxt->resp_aeq_num = HINIC_AEQ1;
440 in_size = sizeof(*cmdq_ctxt);
441 err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
442 HINIC_MGMT_CMD_CMDQ_CTXT_SET,
443 cmdq_ctxt, in_size, &cmdq_ctxt_out,
444 &out_size, 0);
445 if (err || !out_size || cmdq_ctxt_out.status) {
446 if (err == HINIC_MBOX_PF_BUSY_ACTIVE_FW ||
447 err == HINIC_DEV_BUSY_ACTIVE_FW) {
448 cmdqs->status |= HINIC_CMDQ_SET_FAIL;
449 PMD_DRV_LOG(ERR, "PF or VF fw is hot active");
450 }
451 PMD_DRV_LOG(ERR, "Set cmdq ctxt failed, err: %d, status: 0x%x, out_size: 0x%x",
452 err, cmdq_ctxt_out.status, out_size);
453 return -EIO;
454 }
455 }
456
457 cmdqs->status &= ~HINIC_CMDQ_SET_FAIL;
458 cmdqs->status |= HINIC_CMDQ_ENABLE;
459
460 return 0;
461 }
462
hinic_comm_cmdqs_free(struct hinic_hwdev * hwdev)463 void hinic_comm_cmdqs_free(struct hinic_hwdev *hwdev)
464 {
465 hinic_cmdqs_free(hwdev);
466 }
467
hinic_reinit_cmdq_ctxts(struct hinic_hwdev * hwdev)468 int hinic_reinit_cmdq_ctxts(struct hinic_hwdev *hwdev)
469 {
470 struct hinic_cmdqs *cmdqs = hwdev->cmdqs;
471 enum hinic_cmdq_type cmdq_type;
472
473 cmdq_type = HINIC_CMDQ_SYNC;
474 for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) {
475 cmdqs->cmdq[cmdq_type].wrapped = 1;
476 hinic_wq_wqe_pg_clear(cmdqs->cmdq[cmdq_type].wq);
477 }
478
479 return hinic_set_cmdq_ctxts(hwdev);
480 }
481
init_cmdq(struct hinic_cmdq * cmdq,struct hinic_hwdev * hwdev,struct hinic_wq * wq,enum hinic_cmdq_type q_type)482 static int init_cmdq(struct hinic_cmdq *cmdq, struct hinic_hwdev *hwdev,
483 struct hinic_wq *wq, enum hinic_cmdq_type q_type)
484 {
485 void __iomem *db_base;
486 int err = 0;
487 size_t errcode_size;
488 size_t cmd_infos_size;
489
490 cmdq->wq = wq;
491 cmdq->cmdq_type = q_type;
492 cmdq->wrapped = 1;
493
494 spin_lock_init(&cmdq->cmdq_lock);
495
496 errcode_size = wq->q_depth * sizeof(*cmdq->errcode);
497 cmdq->errcode = kzalloc(errcode_size, GFP_KERNEL);
498 if (!cmdq->errcode) {
499 PMD_DRV_LOG(ERR, "Allocate errcode for cmdq failed");
500 spin_lock_deinit(&cmdq->cmdq_lock);
501 return -ENOMEM;
502 }
503
504 cmd_infos_size = wq->q_depth * sizeof(*cmdq->cmd_infos);
505 cmdq->cmd_infos = kzalloc(cmd_infos_size, GFP_KERNEL);
506 if (!cmdq->cmd_infos) {
507 PMD_DRV_LOG(ERR, "Allocate errcode for cmdq failed");
508 err = -ENOMEM;
509 goto cmd_infos_err;
510 }
511
512 err = hinic_alloc_db_addr(hwdev, &db_base);
513 if (err)
514 goto alloc_db_err;
515
516 cmdq->db_base = (u8 *)db_base;
517 return 0;
518
519 alloc_db_err:
520 kfree(cmdq->cmd_infos);
521
522 cmd_infos_err:
523 kfree(cmdq->errcode);
524 spin_lock_deinit(&cmdq->cmdq_lock);
525
526 return err;
527 }
528
free_cmdq(struct hinic_hwdev * hwdev,struct hinic_cmdq * cmdq)529 static void free_cmdq(struct hinic_hwdev *hwdev, struct hinic_cmdq *cmdq)
530 {
531 hinic_free_db_addr(hwdev, cmdq->db_base);
532 kfree(cmdq->cmd_infos);
533 kfree(cmdq->errcode);
534 spin_lock_deinit(&cmdq->cmdq_lock);
535 }
536
hinic_cmdqs_init(struct hinic_hwdev * hwdev)537 static int hinic_cmdqs_init(struct hinic_hwdev *hwdev)
538 {
539 struct hinic_cmdqs *cmdqs;
540 struct hinic_cmdq_ctxt *cmdq_ctxt;
541 enum hinic_cmdq_type type, cmdq_type;
542 size_t saved_wqs_size;
543 int err;
544
545 cmdqs = kzalloc(sizeof(*cmdqs), GFP_KERNEL);
546 if (!cmdqs)
547 return -ENOMEM;
548
549 hwdev->cmdqs = cmdqs;
550 cmdqs->hwdev = hwdev;
551
552 saved_wqs_size = HINIC_MAX_CMDQ_TYPES * sizeof(struct hinic_wq);
553 cmdqs->saved_wqs = kzalloc(saved_wqs_size, GFP_KERNEL);
554 if (!cmdqs->saved_wqs) {
555 PMD_DRV_LOG(ERR, "Allocate saved wqs failed");
556 err = -ENOMEM;
557 goto alloc_wqs_err;
558 }
559
560 cmdqs->cmd_buf_pool = dma_pool_create("hinic_cmdq", hwdev,
561 HINIC_CMDQ_BUF_SIZE,
562 HINIC_CMDQ_BUF_SIZE, 0ULL);
563 if (!cmdqs->cmd_buf_pool) {
564 PMD_DRV_LOG(ERR, "Create cmdq buffer pool failed");
565 err = -ENOMEM;
566 goto pool_create_err;
567 }
568
569 err = hinic_cmdq_alloc(cmdqs->saved_wqs, hwdev,
570 HINIC_MAX_CMDQ_TYPES, HINIC_CMDQ_WQ_BUF_SIZE,
571 CMDQ_WQEBB_SHIFT, HINIC_CMDQ_DEPTH);
572 if (err) {
573 PMD_DRV_LOG(ERR, "Allocate cmdq failed");
574 goto cmdq_alloc_err;
575 }
576
577 cmdq_type = HINIC_CMDQ_SYNC;
578 for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) {
579 err = init_cmdq(&cmdqs->cmdq[cmdq_type], hwdev,
580 &cmdqs->saved_wqs[cmdq_type], cmdq_type);
581 if (err) {
582 PMD_DRV_LOG(ERR, "Initialize cmdq failed");
583 goto init_cmdq_err;
584 }
585
586 cmdq_ctxt = &cmdqs->cmdq[cmdq_type].cmdq_ctxt;
587 cmdq_init_queue_ctxt(&cmdqs->cmdq[cmdq_type], cmdq_ctxt);
588 }
589
590 err = hinic_set_cmdq_ctxts(hwdev);
591 if (err)
592 goto init_cmdq_err;
593
594 return 0;
595
596 init_cmdq_err:
597 type = HINIC_CMDQ_SYNC;
598 for ( ; type < cmdq_type; type++)
599 free_cmdq(hwdev, &cmdqs->cmdq[type]);
600
601 hinic_cmdq_free(hwdev, cmdqs->saved_wqs, HINIC_MAX_CMDQ_TYPES);
602
603 cmdq_alloc_err:
604 dma_pool_destroy(cmdqs->cmd_buf_pool);
605
606 pool_create_err:
607 kfree(cmdqs->saved_wqs);
608
609 alloc_wqs_err:
610 kfree(cmdqs);
611
612 return err;
613 }
614
hinic_cmdqs_free(struct hinic_hwdev * hwdev)615 static void hinic_cmdqs_free(struct hinic_hwdev *hwdev)
616 {
617 struct hinic_cmdqs *cmdqs = hwdev->cmdqs;
618 enum hinic_cmdq_type cmdq_type = HINIC_CMDQ_SYNC;
619
620 cmdqs->status &= ~HINIC_CMDQ_ENABLE;
621
622 for ( ; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++)
623 free_cmdq(cmdqs->hwdev, &cmdqs->cmdq[cmdq_type]);
624
625 hinic_cmdq_free(hwdev, cmdqs->saved_wqs,
626 HINIC_MAX_CMDQ_TYPES);
627
628 dma_pool_destroy(cmdqs->cmd_buf_pool);
629
630 kfree(cmdqs->saved_wqs);
631
632 kfree(cmdqs);
633 }
634
hinic_set_cmdq_depth(struct hinic_hwdev * hwdev,u16 cmdq_depth)635 static int hinic_set_cmdq_depth(struct hinic_hwdev *hwdev, u16 cmdq_depth)
636 {
637 struct hinic_root_ctxt root_ctxt;
638 u16 out_size = sizeof(root_ctxt);
639 int err;
640
641 memset(&root_ctxt, 0, sizeof(root_ctxt));
642 root_ctxt.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
643 root_ctxt.func_idx = hinic_global_func_id(hwdev);
644 root_ctxt.ppf_idx = hinic_ppf_idx(hwdev);
645 root_ctxt.set_cmdq_depth = 1;
646 root_ctxt.cmdq_depth = (u8)ilog2(cmdq_depth);
647 err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
648 HINIC_MGMT_CMD_VAT_SET,
649 &root_ctxt, sizeof(root_ctxt),
650 &root_ctxt, &out_size, 0);
651 if (err || !out_size || root_ctxt.mgmt_msg_head.status) {
652 PMD_DRV_LOG(ERR, "Set cmdq depth failed, err: %d, status: 0x%x, out_size: 0x%x",
653 err, root_ctxt.mgmt_msg_head.status, out_size);
654 return -EIO;
655 }
656
657 return 0;
658 }
659
hinic_comm_cmdqs_init(struct hinic_hwdev * hwdev)660 int hinic_comm_cmdqs_init(struct hinic_hwdev *hwdev)
661 {
662 int err;
663
664 err = hinic_cmdqs_init(hwdev);
665 if (err) {
666 PMD_DRV_LOG(ERR, "Init cmd queues failed");
667 return err;
668 }
669
670 err = hinic_set_cmdq_depth(hwdev, HINIC_CMDQ_DEPTH);
671 if (err) {
672 PMD_DRV_LOG(ERR, "Set cmdq depth failed");
673 goto set_cmdq_depth_err;
674 }
675
676 return 0;
677
678 set_cmdq_depth_err:
679 hinic_cmdqs_free(hwdev);
680
681 return err;
682 }
683
cmdq_init_queue_ctxt(struct hinic_cmdq * cmdq,struct hinic_cmdq_ctxt * cmdq_ctxt)684 static void cmdq_init_queue_ctxt(struct hinic_cmdq *cmdq,
685 struct hinic_cmdq_ctxt *cmdq_ctxt)
686 {
687 struct hinic_cmdqs *cmdqs = (struct hinic_cmdqs *)cmdq_to_cmdqs(cmdq);
688 struct hinic_hwdev *hwdev = cmdqs->hwdev;
689 struct hinic_wq *wq = cmdq->wq;
690 struct hinic_cmdq_ctxt_info *ctxt_info = &cmdq_ctxt->ctxt_info;
691 u64 wq_first_page_paddr, pfn;
692
693 u16 start_ci = (u16)(wq->cons_idx);
694
695 /* The data in the HW is in Big Endian Format */
696 wq_first_page_paddr = wq->queue_buf_paddr;
697
698 pfn = CMDQ_PFN(wq_first_page_paddr, HINIC_PAGE_SIZE);
699 ctxt_info->curr_wqe_page_pfn =
700 CMDQ_CTXT_PAGE_INFO_SET(1, HW_BUSY_BIT) |
701 CMDQ_CTXT_PAGE_INFO_SET(1, CEQ_EN) |
702 CMDQ_CTXT_PAGE_INFO_SET(0, CEQ_ARM) |
703 CMDQ_CTXT_PAGE_INFO_SET(HINIC_CEQ_ID_CMDQ, EQ_ID) |
704 CMDQ_CTXT_PAGE_INFO_SET(pfn, CURR_WQE_PAGE_PFN);
705
706 ctxt_info->wq_block_pfn = CMDQ_CTXT_BLOCK_INFO_SET(start_ci, CI) |
707 CMDQ_CTXT_BLOCK_INFO_SET(pfn, WQ_BLOCK_PFN);
708 cmdq_ctxt->func_idx = HINIC_HWIF_GLOBAL_IDX(hwdev->hwif);
709 cmdq_ctxt->ppf_idx = HINIC_HWIF_PPF_IDX(hwdev->hwif);
710 cmdq_ctxt->cmdq_id = cmdq->cmdq_type;
711 }
712
hinic_cmdq_poll_msg(struct hinic_cmdq * cmdq,u32 timeout)713 static int hinic_cmdq_poll_msg(struct hinic_cmdq *cmdq, u32 timeout)
714 {
715 struct hinic_cmdq_wqe *wqe;
716 struct hinic_cmdq_wqe_lcmd *wqe_lcmd;
717 struct hinic_ctrl *ctrl;
718 struct hinic_cmdq_cmd_info *cmd_info;
719 u32 status_info, ctrl_info;
720 u16 ci;
721 int errcode;
722 unsigned long end;
723 int done = 0;
724 int rc = 0;
725
726 wqe = hinic_read_wqe(cmdq->wq, 1, &ci);
727 if (wqe == NULL) {
728 PMD_DRV_LOG(ERR, "No outstanding cmdq msg");
729 return -EINVAL;
730 }
731
732 cmd_info = &cmdq->cmd_infos[ci];
733 /* this cmd has not been filled and send to hw, or get TMO msg ack*/
734 if (cmd_info->cmd_type == HINIC_CMD_TYPE_NONE) {
735 PMD_DRV_LOG(ERR, "Cmdq msg has not been filled and send to hw, or get TMO msg ack. cmdq ci: %u",
736 ci);
737 return -EINVAL;
738 }
739
740 /* only arm bit is using scmd wqe, the wqe is lcmd */
741 wqe_lcmd = &wqe->wqe_lcmd;
742 ctrl = &wqe_lcmd->ctrl;
743 end = jiffies + msecs_to_jiffies(timeout);
744 do {
745 ctrl_info = be32_to_cpu((ctrl)->ctrl_info);
746 if (WQE_COMPLETED(ctrl_info)) {
747 done = 1;
748 break;
749 }
750
751 rte_delay_ms(1);
752 } while (time_before(jiffies, end));
753
754 if (done) {
755 status_info = be32_to_cpu(wqe_lcmd->status.status_info);
756 errcode = WQE_ERRCODE_GET(status_info, VAL);
757 cmdq_update_errcode(cmdq, ci, errcode);
758 clear_wqe_complete_bit(cmdq, wqe);
759 rc = 0;
760 } else {
761 PMD_DRV_LOG(ERR, "Poll cmdq msg time out, ci: %u", ci);
762 rc = -ETIMEDOUT;
763 }
764
765 /* set this cmd invalid */
766 cmd_info->cmd_type = HINIC_CMD_TYPE_NONE;
767
768 return rc;
769 }
770
cmdq_sync_cmd_direct_resp(struct hinic_cmdq * cmdq,enum hinic_ack_type ack_type,enum hinic_mod_type mod,u8 cmd,struct hinic_cmd_buf * buf_in,u64 * out_param,u32 timeout)771 static int cmdq_sync_cmd_direct_resp(struct hinic_cmdq *cmdq,
772 enum hinic_ack_type ack_type,
773 enum hinic_mod_type mod, u8 cmd,
774 struct hinic_cmd_buf *buf_in,
775 u64 *out_param, u32 timeout)
776 {
777 struct hinic_wq *wq = cmdq->wq;
778 struct hinic_cmdq_wqe *curr_wqe, wqe;
779 struct hinic_cmdq_wqe_lcmd *wqe_lcmd;
780 u16 curr_prod_idx, next_prod_idx, num_wqebbs;
781 int wrapped;
782 u32 timeo, wqe_size;
783 int err;
784
785 wqe_size = cmdq_wqe_size(WQE_LCMD_TYPE);
786 num_wqebbs = WQE_NUM_WQEBBS(wqe_size, wq);
787
788 /* Keep wrapped and doorbell index correct. */
789 spin_lock(&cmdq->cmdq_lock);
790
791 curr_wqe = hinic_get_wqe(cmdq->wq, num_wqebbs, &curr_prod_idx);
792 if (!curr_wqe) {
793 err = -EBUSY;
794 goto cmdq_unlock;
795 }
796
797 memset(&wqe, 0, sizeof(wqe));
798 wrapped = cmdq->wrapped;
799
800 next_prod_idx = curr_prod_idx + num_wqebbs;
801 if (next_prod_idx >= wq->q_depth) {
802 cmdq->wrapped = !cmdq->wrapped;
803 next_prod_idx -= wq->q_depth;
804 }
805
806 cmdq_set_lcmd_wqe(&wqe, SYNC_CMD_DIRECT_RESP, buf_in, NULL,
807 wrapped, ack_type, mod, cmd, curr_prod_idx);
808
809 /* The data that is written to HW should be in Big Endian Format */
810 hinic_cpu_to_be32(&wqe, wqe_size);
811
812 /* CMDQ WQE is not shadow, therefore wqe will be written to wq */
813 cmdq_wqe_fill(curr_wqe, &wqe);
814
815 cmdq->cmd_infos[curr_prod_idx].cmd_type = HINIC_CMD_TYPE_NORMAL;
816
817 cmdq_set_db(cmdq, HINIC_CMDQ_SYNC, next_prod_idx);
818
819 timeo = msecs_to_jiffies(timeout ? timeout : CMDQ_CMD_TIMEOUT);
820 err = hinic_cmdq_poll_msg(cmdq, timeo);
821 if (err) {
822 PMD_DRV_LOG(ERR, "Cmdq poll msg ack failed, prod idx: 0x%x",
823 curr_prod_idx);
824 err = -ETIMEDOUT;
825 goto cmdq_unlock;
826 }
827
828 rte_smp_rmb(); /* read error code after completion */
829
830 if (out_param) {
831 wqe_lcmd = &curr_wqe->wqe_lcmd;
832 *out_param = cpu_to_be64(wqe_lcmd->completion.direct_resp);
833 }
834
835 if (cmdq->errcode[curr_prod_idx] > 1) {
836 err = cmdq->errcode[curr_prod_idx];
837 goto cmdq_unlock;
838 }
839
840 cmdq_unlock:
841 spin_unlock(&cmdq->cmdq_lock);
842
843 return err;
844 }
845
hinic_cmdq_direct_resp(void * hwdev,enum hinic_ack_type ack_type,enum hinic_mod_type mod,u8 cmd,struct hinic_cmd_buf * buf_in,u64 * out_param,u32 timeout)846 int hinic_cmdq_direct_resp(void *hwdev, enum hinic_ack_type ack_type,
847 enum hinic_mod_type mod, u8 cmd,
848 struct hinic_cmd_buf *buf_in,
849 u64 *out_param, u32 timeout)
850 {
851 struct hinic_cmdqs *cmdqs = ((struct hinic_hwdev *)hwdev)->cmdqs;
852 int err = cmdq_params_valid(buf_in);
853
854 if (err) {
855 PMD_DRV_LOG(ERR, "Invalid CMDQ parameters");
856 return err;
857 }
858
859 err = wait_cmdqs_enable(cmdqs);
860 if (err) {
861 PMD_DRV_LOG(ERR, "Cmdq is disable");
862 return err;
863 }
864
865 return cmdq_sync_cmd_direct_resp(&cmdqs->cmdq[HINIC_CMDQ_SYNC],
866 ack_type, mod, cmd, buf_in,
867 out_param, timeout);
868 }
869