1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
3 */
4
5 #include "roc_api.h"
6 #include "roc_priv.h"
7
8 #define REE0_PF 19
9 #define REE1_PF 20
10
11 static int
roc_ree_available_queues_get(struct roc_ree_vf * vf,uint16_t * nb_queues)12 roc_ree_available_queues_get(struct roc_ree_vf *vf, uint16_t *nb_queues)
13 {
14 struct free_rsrcs_rsp *rsp;
15 struct dev *dev = vf->dev;
16 int ret;
17
18 mbox_alloc_msg_free_rsrc_cnt(dev->mbox);
19
20 ret = mbox_process_msg(dev->mbox, (void *)&rsp);
21 if (ret)
22 return -EIO;
23
24 if (vf->block_address == RVU_BLOCK_ADDR_REE0)
25 *nb_queues = rsp->ree0;
26 else
27 *nb_queues = rsp->ree1;
28 return 0;
29 }
30
31 static int
roc_ree_max_matches_get(struct roc_ree_vf * vf,uint8_t * max_matches)32 roc_ree_max_matches_get(struct roc_ree_vf *vf, uint8_t *max_matches)
33 {
34 uint64_t val;
35 int ret;
36
37 ret = roc_ree_af_reg_read(vf, REE_AF_REEXM_MAX_MATCH, &val);
38 if (ret)
39 return ret;
40
41 *max_matches = val;
42 return 0;
43 }
44
45 int
roc_ree_queues_attach(struct roc_ree_vf * vf,uint8_t nb_queues)46 roc_ree_queues_attach(struct roc_ree_vf *vf, uint8_t nb_queues)
47 {
48 struct rsrc_attach_req *req;
49 struct mbox *mbox;
50
51 mbox = vf->dev->mbox;
52 /* Ask AF to attach required LFs */
53 req = mbox_alloc_msg_attach_resources(mbox);
54 if (req == NULL) {
55 plt_err("Could not allocate mailbox message");
56 return -EFAULT;
57 }
58
59 /* 1 LF = 1 queue */
60 req->reelfs = nb_queues;
61 req->ree_blkaddr = vf->block_address;
62
63 if (mbox_process(mbox) < 0)
64 return -EIO;
65
66 /* Update number of attached queues */
67 vf->nb_queues = nb_queues;
68
69 return 0;
70 }
71
72 int
roc_ree_queues_detach(struct roc_ree_vf * vf)73 roc_ree_queues_detach(struct roc_ree_vf *vf)
74 {
75 struct rsrc_detach_req *req;
76 struct mbox *mbox;
77
78 mbox = vf->dev->mbox;
79 req = mbox_alloc_msg_detach_resources(mbox);
80 if (req == NULL) {
81 plt_err("Could not allocate mailbox message");
82 return -EFAULT;
83 }
84 req->reelfs = true;
85 req->partial = true;
86 if (mbox_process(mbox) < 0)
87 return -EIO;
88
89 /* Queues have been detached */
90 vf->nb_queues = 0;
91
92 return 0;
93 }
94
95 int
roc_ree_msix_offsets_get(struct roc_ree_vf * vf)96 roc_ree_msix_offsets_get(struct roc_ree_vf *vf)
97 {
98 struct msix_offset_rsp *rsp;
99 struct mbox *mbox;
100 uint32_t i, ret;
101
102 /* Get REE MSI-X vector offsets */
103 mbox = vf->dev->mbox;
104 mbox_alloc_msg_msix_offset(mbox);
105
106 ret = mbox_process_msg(mbox, (void *)&rsp);
107 if (ret)
108 return ret;
109
110 for (i = 0; i < vf->nb_queues; i++) {
111 if (vf->block_address == RVU_BLOCK_ADDR_REE0)
112 vf->lf_msixoff[i] = rsp->ree0_lf_msixoff[i];
113 else
114 vf->lf_msixoff[i] = rsp->ree1_lf_msixoff[i];
115 plt_ree_dbg("lf_msixoff[%d] 0x%x", i, vf->lf_msixoff[i]);
116 }
117
118 return 0;
119 }
120
121 static int
ree_send_mbox_msg(struct roc_ree_vf * vf)122 ree_send_mbox_msg(struct roc_ree_vf *vf)
123 {
124 struct mbox *mbox = vf->dev->mbox;
125 int ret;
126
127 mbox_msg_send(mbox, 0);
128
129 ret = mbox_wait_for_rsp(mbox, 0);
130 if (ret < 0) {
131 plt_err("Could not get mailbox response");
132 return ret;
133 }
134
135 return 0;
136 }
137
138 int
roc_ree_config_lf(struct roc_ree_vf * vf,uint8_t lf,uint8_t pri,uint32_t size)139 roc_ree_config_lf(struct roc_ree_vf *vf, uint8_t lf, uint8_t pri, uint32_t size)
140 {
141 struct ree_lf_req_msg *req;
142 struct mbox *mbox;
143 int ret;
144
145 mbox = vf->dev->mbox;
146 req = mbox_alloc_msg_ree_config_lf(mbox);
147 if (req == NULL) {
148 plt_err("Could not allocate mailbox message");
149 return -EFAULT;
150 }
151
152 req->lf = lf;
153 req->pri = pri ? 1 : 0;
154 req->size = size;
155 req->blkaddr = vf->block_address;
156
157 ret = mbox_process(mbox);
158 if (ret < 0) {
159 plt_err("Could not get mailbox response");
160 return ret;
161 }
162 return 0;
163 }
164
165 int
roc_ree_af_reg_read(struct roc_ree_vf * vf,uint64_t reg,uint64_t * val)166 roc_ree_af_reg_read(struct roc_ree_vf *vf, uint64_t reg, uint64_t *val)
167 {
168 struct ree_rd_wr_reg_msg *msg;
169 struct mbox_dev *mdev;
170 struct mbox *mbox;
171 int ret, off;
172
173 mbox = vf->dev->mbox;
174 mdev = &mbox->dev[0];
175 msg = (struct ree_rd_wr_reg_msg *)mbox_alloc_msg_rsp(
176 mbox, 0, sizeof(*msg), sizeof(*msg));
177 if (msg == NULL) {
178 plt_err("Could not allocate mailbox message");
179 return -EFAULT;
180 }
181
182 msg->hdr.id = MBOX_MSG_REE_RD_WR_REGISTER;
183 msg->hdr.sig = MBOX_REQ_SIG;
184 msg->hdr.pcifunc = vf->dev->pf_func;
185 msg->is_write = 0;
186 msg->reg_offset = reg;
187 msg->ret_val = val;
188 msg->blkaddr = vf->block_address;
189
190 ret = ree_send_mbox_msg(vf);
191 if (ret < 0)
192 return ret;
193
194 off = mbox->rx_start +
195 RTE_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
196 msg = (struct ree_rd_wr_reg_msg *)((uintptr_t)mdev->mbase + off);
197
198 *val = msg->val;
199
200 return 0;
201 }
202
203 int
roc_ree_af_reg_write(struct roc_ree_vf * vf,uint64_t reg,uint64_t val)204 roc_ree_af_reg_write(struct roc_ree_vf *vf, uint64_t reg, uint64_t val)
205 {
206 struct ree_rd_wr_reg_msg *msg;
207 struct mbox *mbox;
208
209 mbox = vf->dev->mbox;
210 msg = (struct ree_rd_wr_reg_msg *)mbox_alloc_msg_rsp(
211 mbox, 0, sizeof(*msg), sizeof(*msg));
212 if (msg == NULL) {
213 plt_err("Could not allocate mailbox message");
214 return -EFAULT;
215 }
216
217 msg->hdr.id = MBOX_MSG_REE_RD_WR_REGISTER;
218 msg->hdr.sig = MBOX_REQ_SIG;
219 msg->hdr.pcifunc = vf->dev->pf_func;
220 msg->is_write = 1;
221 msg->reg_offset = reg;
222 msg->val = val;
223 msg->blkaddr = vf->block_address;
224
225 return ree_send_mbox_msg(vf);
226 }
227
228 int
roc_ree_rule_db_get(struct roc_ree_vf * vf,char * rule_db,uint32_t rule_db_len,char * rule_dbi,uint32_t rule_dbi_len)229 roc_ree_rule_db_get(struct roc_ree_vf *vf, char *rule_db, uint32_t rule_db_len,
230 char *rule_dbi, uint32_t rule_dbi_len)
231 {
232 struct ree_rule_db_get_req_msg *req;
233 struct ree_rule_db_get_rsp_msg *rsp;
234 char *rule_db_ptr = (char *)rule_db;
235 struct mbox *mbox;
236 int ret, last = 0;
237 uint32_t len = 0;
238
239 mbox = vf->dev->mbox;
240 if (!rule_db) {
241 plt_err("Couldn't return rule db due to NULL pointer");
242 return -EFAULT;
243 }
244
245 while (!last) {
246 req = (struct ree_rule_db_get_req_msg *)mbox_alloc_msg_rsp(
247 mbox, 0, sizeof(*req), sizeof(*rsp));
248 if (!req) {
249 plt_err("Could not allocate mailbox message");
250 return -EFAULT;
251 }
252
253 req->hdr.id = MBOX_MSG_REE_RULE_DB_GET;
254 req->hdr.sig = MBOX_REQ_SIG;
255 req->hdr.pcifunc = vf->dev->pf_func;
256 req->blkaddr = vf->block_address;
257 req->is_dbi = 0;
258 req->offset = len;
259 ret = mbox_process_msg(mbox, (void *)&rsp);
260 if (ret)
261 return ret;
262 if (rule_db_len < len + rsp->len) {
263 plt_err("Rule db size is too small");
264 return -EFAULT;
265 }
266 mbox_memcpy(rule_db_ptr, rsp->rule_db, rsp->len);
267 len += rsp->len;
268 rule_db_ptr = rule_db_ptr + rsp->len;
269 last = rsp->is_last;
270 }
271
272 if (rule_dbi) {
273 req = (struct ree_rule_db_get_req_msg *)mbox_alloc_msg_rsp(
274 mbox, 0, sizeof(*req), sizeof(*rsp));
275 if (!req) {
276 plt_err("Could not allocate mailbox message");
277 return -EFAULT;
278 }
279
280 req->hdr.id = MBOX_MSG_REE_RULE_DB_GET;
281 req->hdr.sig = MBOX_REQ_SIG;
282 req->hdr.pcifunc = vf->dev->pf_func;
283 req->blkaddr = vf->block_address;
284 req->is_dbi = 1;
285 req->offset = 0;
286
287 ret = mbox_process_msg(mbox, (void *)&rsp);
288 if (ret)
289 return ret;
290 if (rule_dbi_len < rsp->len) {
291 plt_err("Rule dbi size is too small");
292 return -EFAULT;
293 }
294 mbox_memcpy(rule_dbi, rsp->rule_db, rsp->len);
295 }
296 return 0;
297 }
298
299 int
roc_ree_rule_db_len_get(struct roc_ree_vf * vf,uint32_t * rule_db_len,uint32_t * rule_dbi_len)300 roc_ree_rule_db_len_get(struct roc_ree_vf *vf, uint32_t *rule_db_len,
301 uint32_t *rule_dbi_len)
302 {
303 struct ree_rule_db_len_rsp_msg *rsp;
304 struct ree_req_msg *req;
305 struct mbox *mbox;
306 int ret;
307
308 mbox = vf->dev->mbox;
309 req = (struct ree_req_msg *)mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
310 sizeof(*rsp));
311 if (!req) {
312 plt_err("Could not allocate mailbox message");
313 return -EFAULT;
314 }
315
316 req->hdr.id = MBOX_MSG_REE_RULE_DB_LEN_GET;
317 req->hdr.sig = MBOX_REQ_SIG;
318 req->hdr.pcifunc = vf->dev->pf_func;
319 req->blkaddr = vf->block_address;
320 ret = mbox_process_msg(mbox, (void *)&rsp);
321 if (ret)
322 return ret;
323 if (rule_db_len != NULL)
324 *rule_db_len = rsp->len;
325 if (rule_dbi_len != NULL)
326 *rule_dbi_len = rsp->inc_len;
327
328 return 0;
329 }
330
331 static int
ree_db_msg(struct roc_ree_vf * vf,const char * db,uint32_t db_len,int inc,int dbi)332 ree_db_msg(struct roc_ree_vf *vf, const char *db, uint32_t db_len, int inc,
333 int dbi)
334 {
335 uint32_t len_left = db_len, offset = 0;
336 struct ree_rule_db_prog_req_msg *req;
337 const char *rule_db_ptr = db;
338 struct mbox *mbox;
339 struct msg_rsp *rsp;
340 int ret;
341
342 mbox = vf->dev->mbox;
343 while (len_left) {
344 req = (struct ree_rule_db_prog_req_msg *)mbox_alloc_msg_rsp(
345 mbox, 0, sizeof(*req), sizeof(*rsp));
346 if (!req) {
347 plt_err("Could not allocate mailbox message");
348 return -EFAULT;
349 }
350 req->hdr.id = MBOX_MSG_REE_RULE_DB_PROG;
351 req->hdr.sig = MBOX_REQ_SIG;
352 req->hdr.pcifunc = vf->dev->pf_func;
353 req->offset = offset;
354 req->total_len = db_len;
355 req->len = REE_RULE_DB_REQ_BLOCK_SIZE;
356 req->is_incremental = inc;
357 req->is_dbi = dbi;
358 req->blkaddr = vf->block_address;
359
360 if (len_left < REE_RULE_DB_REQ_BLOCK_SIZE) {
361 req->is_last = true;
362 req->len = len_left;
363 }
364 mbox_memcpy(req->rule_db, rule_db_ptr, req->len);
365 ret = mbox_process_msg(mbox, (void *)&rsp);
366 if (ret) {
367 plt_err("Programming mailbox processing failed");
368 return ret;
369 }
370 len_left -= req->len;
371 offset += req->len;
372 rule_db_ptr = rule_db_ptr + req->len;
373 }
374 return 0;
375 }
376
377 int
roc_ree_rule_db_prog(struct roc_ree_vf * vf,const char * rule_db,uint32_t rule_db_len,const char * rule_dbi,uint32_t rule_dbi_len)378 roc_ree_rule_db_prog(struct roc_ree_vf *vf, const char *rule_db,
379 uint32_t rule_db_len, const char *rule_dbi,
380 uint32_t rule_dbi_len)
381 {
382 int inc, ret;
383
384 if (rule_db_len == 0) {
385 plt_err("Couldn't program empty rule db");
386 return -EFAULT;
387 }
388 inc = (rule_dbi_len != 0);
389 if ((rule_db == NULL) || (inc && (rule_dbi == NULL))) {
390 plt_err("Couldn't program NULL rule db");
391 return -EFAULT;
392 }
393 if (inc) {
394 ret = ree_db_msg(vf, rule_dbi, rule_dbi_len, inc, 1);
395 if (ret)
396 return ret;
397 }
398 return ree_db_msg(vf, rule_db, rule_db_len, inc, 0);
399 }
400
401 static int
ree_get_blkaddr(struct dev * dev)402 ree_get_blkaddr(struct dev *dev)
403 {
404 int pf;
405
406 pf = dev_get_pf(dev->pf_func);
407 if (pf == REE0_PF)
408 return RVU_BLOCK_ADDR_REE0;
409 else if (pf == REE1_PF)
410 return RVU_BLOCK_ADDR_REE1;
411 else
412 return 0;
413 }
414
415 uintptr_t
roc_ree_qp_get_base(struct roc_ree_vf * vf,uint16_t qp_id)416 roc_ree_qp_get_base(struct roc_ree_vf *vf, uint16_t qp_id)
417 {
418 return REE_LF_BAR2(vf, qp_id);
419 }
420
421 static void
roc_ree_lf_err_intr_handler(void * param)422 roc_ree_lf_err_intr_handler(void *param)
423 {
424 uintptr_t base = (uintptr_t)param;
425 uint8_t lf_id;
426 uint64_t intr;
427
428 lf_id = (base >> 12) & 0xFF;
429
430 intr = plt_read64(base + REE_LF_MISC_INT);
431 if (intr == 0)
432 return;
433
434 plt_ree_dbg("LF %d MISC_INT: 0x%" PRIx64 "", lf_id, intr);
435
436 /* Clear interrupt */
437 plt_write64(intr, base + REE_LF_MISC_INT);
438 }
439
440 static void
roc_ree_lf_err_intr_unregister(struct roc_ree_vf * vf,uint16_t msix_off,uintptr_t base)441 roc_ree_lf_err_intr_unregister(struct roc_ree_vf *vf, uint16_t msix_off,
442 uintptr_t base)
443 {
444 struct plt_pci_device *pci_dev = vf->pci_dev;
445
446 /* Disable error interrupts */
447 plt_write64(~0ull, base + REE_LF_MISC_INT_ENA_W1C);
448
449 dev_irq_unregister(pci_dev->intr_handle,
450 roc_ree_lf_err_intr_handler, (void *)base, msix_off);
451 }
452
453 void
roc_ree_err_intr_unregister(struct roc_ree_vf * vf)454 roc_ree_err_intr_unregister(struct roc_ree_vf *vf)
455 {
456 uintptr_t base;
457 uint32_t i;
458
459 for (i = 0; i < vf->nb_queues; i++) {
460 base = REE_LF_BAR2(vf, i);
461 roc_ree_lf_err_intr_unregister(vf, vf->lf_msixoff[i], base);
462 }
463
464 vf->err_intr_registered = 0;
465 }
466
467 static int
roc_ree_lf_err_intr_register(struct roc_ree_vf * vf,uint16_t msix_off,uintptr_t base)468 roc_ree_lf_err_intr_register(struct roc_ree_vf *vf, uint16_t msix_off,
469 uintptr_t base)
470 {
471 struct plt_pci_device *pci_dev = vf->pci_dev;
472 int ret;
473
474 /* Disable error interrupts */
475 plt_write64(~0ull, base + REE_LF_MISC_INT_ENA_W1C);
476
477 /* Register error interrupt handler */
478 ret = dev_irq_register(pci_dev->intr_handle,
479 roc_ree_lf_err_intr_handler, (void *)base,
480 msix_off);
481 if (ret)
482 return ret;
483
484 /* Enable error interrupts */
485 plt_write64(~0ull, base + REE_LF_MISC_INT_ENA_W1S);
486
487 return 0;
488 }
489
490 int
roc_ree_err_intr_register(struct roc_ree_vf * vf)491 roc_ree_err_intr_register(struct roc_ree_vf *vf)
492 {
493 uint32_t i, j, ret;
494 uintptr_t base;
495
496 for (i = 0; i < vf->nb_queues; i++) {
497 if (vf->lf_msixoff[i] == MSIX_VECTOR_INVALID) {
498 plt_err("Invalid REE LF MSI-X offset: 0x%x",
499 vf->lf_msixoff[i]);
500 return -EINVAL;
501 }
502 }
503
504 for (i = 0; i < vf->nb_queues; i++) {
505 base = REE_LF_BAR2(vf, i);
506 ret = roc_ree_lf_err_intr_register(vf, vf->lf_msixoff[i], base);
507 if (ret)
508 goto intr_unregister;
509 }
510
511 vf->err_intr_registered = 1;
512 return 0;
513
514 intr_unregister:
515 /* Unregister the ones already registered */
516 for (j = 0; j < i; j++) {
517 base = REE_LF_BAR2(vf, j);
518 roc_ree_lf_err_intr_unregister(vf, vf->lf_msixoff[j], base);
519 }
520 return ret;
521 }
522
523 int
roc_ree_iq_enable(struct roc_ree_vf * vf,const struct roc_ree_qp * qp,uint8_t pri,uint32_t size_div2)524 roc_ree_iq_enable(struct roc_ree_vf *vf, const struct roc_ree_qp *qp,
525 uint8_t pri, uint32_t size_div2)
526 {
527 uint64_t val;
528
529 /* Set instruction queue size and priority */
530 roc_ree_config_lf(vf, qp->id, pri, size_div2);
531
532 /* Set instruction queue base address */
533 /* Should be written after SBUF_CTL and before LF_ENA */
534
535 val = plt_read64(qp->base + REE_LF_SBUF_ADDR);
536 val &= ~REE_LF_SBUF_ADDR_PTR_MASK;
537 val |= FIELD_PREP(REE_LF_SBUF_ADDR_PTR_MASK, qp->iq_dma_addr >> 7);
538 plt_write64(val, qp->base + REE_LF_SBUF_ADDR);
539
540 /* Enable instruction queue */
541
542 val = plt_read64(qp->base + REE_LF_ENA);
543 val &= ~REE_LF_ENA_ENA_MASK;
544 val |= FIELD_PREP(REE_LF_ENA_ENA_MASK, 1);
545 plt_write64(val, qp->base + REE_LF_ENA);
546
547 return 0;
548 }
549
550 void
roc_ree_iq_disable(struct roc_ree_qp * qp)551 roc_ree_iq_disable(struct roc_ree_qp *qp)
552 {
553 uint64_t val;
554
555 /* Stop instruction execution */
556 val = plt_read64(qp->base + REE_LF_ENA);
557 val &= ~REE_LF_ENA_ENA_MASK;
558 val |= FIELD_PREP(REE_LF_ENA_ENA_MASK, 0);
559 plt_write64(val, qp->base + REE_LF_ENA);
560 }
561
562 int
roc_ree_dev_init(struct roc_ree_vf * vf)563 roc_ree_dev_init(struct roc_ree_vf *vf)
564 {
565 struct plt_pci_device *pci_dev;
566 struct ree *ree;
567 struct dev *dev;
568 uint8_t max_matches = 0;
569 uint16_t nb_queues = 0;
570 int rc;
571
572 if (vf == NULL || vf->pci_dev == NULL)
573 return -EINVAL;
574
575 PLT_STATIC_ASSERT(sizeof(struct ree) <= ROC_REE_MEM_SZ);
576
577 ree = roc_ree_to_ree_priv(vf);
578 memset(ree, 0, sizeof(*ree));
579 vf->dev = &ree->dev;
580
581 pci_dev = vf->pci_dev;
582 dev = vf->dev;
583
584 /* Initialize device */
585 rc = dev_init(dev, pci_dev);
586 if (rc) {
587 plt_err("Failed to init roc device");
588 goto fail;
589 }
590
591 /* Get REE block address */
592 vf->block_address = ree_get_blkaddr(dev);
593 if (!vf->block_address) {
594 plt_err("Could not determine block PF number");
595 goto fail;
596 }
597
598 /* Get number of queues available on the device */
599 rc = roc_ree_available_queues_get(vf, &nb_queues);
600 if (rc) {
601 plt_err("Could not determine the number of queues available");
602 goto fail;
603 }
604
605 /* Don't exceed the limits set per VF */
606 nb_queues = RTE_MIN(nb_queues, REE_MAX_QUEUES_PER_VF);
607
608 if (nb_queues == 0) {
609 plt_err("No free queues available on the device");
610 goto fail;
611 }
612
613 vf->max_queues = nb_queues;
614
615 plt_ree_dbg("Max queues supported by device: %d", vf->max_queues);
616
617 /* Get number of maximum matches supported on the device */
618 rc = roc_ree_max_matches_get(vf, &max_matches);
619 if (rc) {
620 plt_err("Could not determine the maximum matches supported");
621 goto fail;
622 }
623 /* Don't exceed the limits set per VF */
624 max_matches = RTE_MIN(max_matches, REE_MAX_MATCHES_PER_VF);
625 if (max_matches == 0) {
626 plt_err("Could not determine the maximum matches supported");
627 goto fail;
628 }
629
630 vf->max_matches = max_matches;
631
632 plt_ree_dbg("Max matches supported by device: %d", vf->max_matches);
633 fail:
634 return rc;
635 }
636
637 int
roc_ree_dev_fini(struct roc_ree_vf * vf)638 roc_ree_dev_fini(struct roc_ree_vf *vf)
639 {
640 if (vf == NULL)
641 return -EINVAL;
642
643 vf->max_matches = 0;
644 vf->max_queues = 0;
645
646 return dev_fini(vf->dev, vf->pci_dev);
647 }
648