xref: /dpdk/drivers/net/octeontx/base/octeontx_pkovf.c (revision 25d11a86c56d50947af33d0b79ede622809bd8b9)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Cavium, Inc
3  */
4 
5 #include <stdbool.h>
6 #include <string.h>
7 #include <stdio.h>
8 
9 #include <rte_eal.h>
10 #include <rte_cycles.h>
11 #include <rte_malloc.h>
12 #include <rte_memory.h>
13 #include <rte_bus_pci.h>
14 #include <rte_spinlock.h>
15 
16 #include "../octeontx_logs.h"
17 #include "octeontx_io.h"
18 #include "octeontx_pkovf.h"
19 
20 struct octeontx_pko_iomem {
21 	uint8_t		*va;
22 	rte_iova_t	iova;
23 	size_t		size;
24 };
25 
26 #define PKO_IOMEM_NULL (struct octeontx_pko_iomem){0, 0, 0}
27 
28 struct octeontx_pko_fc_ctl_s {
29 	int64_t buf_cnt;
30 	int64_t padding[(PKO_DQ_FC_STRIDE / 8) - 1];
31 };
32 
33 struct octeontx_pkovf {
34 	uint8_t		*bar0;
35 	uint8_t		*bar2;
36 	uint16_t	domain;
37 	uint16_t	vfid;
38 };
39 
40 struct octeontx_pko_vf_ctl_s {
41 	rte_spinlock_t lock;
42 
43 	struct octeontx_pko_iomem fc_iomem;
44 	struct octeontx_pko_fc_ctl_s *fc_ctl;
45 	struct octeontx_pkovf pko[PKO_VF_MAX];
46 	struct {
47 		uint64_t chanid;
48 	} dq_map[PKO_VF_MAX * PKO_VF_NUM_DQ];
49 };
50 
51 static struct octeontx_pko_vf_ctl_s pko_vf_ctl;
52 
53 static void *
54 octeontx_pko_dq_vf_bar0(uint16_t txq)
55 {
56 	int vf_ix;
57 
58 	vf_ix = txq / PKO_VF_NUM_DQ;
59 	return pko_vf_ctl.pko[vf_ix].bar0;
60 }
61 
62 static int
63 octeontx_pko_dq_gdq(uint16_t txq)
64 {
65 	return txq % PKO_VF_NUM_DQ;
66 }
67 
68 /**
69  * Open a PKO DQ.
70  */
71 static inline
72 int octeontx_pko_dq_open(uint16_t txq)
73 {
74 	unsigned int reg_off;
75 	uint8_t *vf_bar0;
76 	uint64_t rtn;
77 	int gdq;
78 
79 	vf_bar0 = octeontx_pko_dq_vf_bar0(txq);
80 	gdq = octeontx_pko_dq_gdq(txq);
81 
82 	if (unlikely(gdq < 0 || vf_bar0 == NULL))
83 		return -EINVAL;
84 	*(volatile int64_t*)(pko_vf_ctl.fc_ctl + txq) =
85 		PKO_DQ_FC_DEPTH_PAGES - PKO_DQ_FC_SKID;
86 
87 	rte_wmb();
88 
89 	octeontx_write64(PKO_DQ_FC_DEPTH_PAGES,
90 			 vf_bar0 + PKO_VF_DQ_FC_STATUS(gdq));
91 
92 	/* Set the register to return descriptor (packet) count as DEPTH */
93 	/* KIND=1, NCB_QUERY_RSP=0 */
94 	octeontx_write64(1ull << PKO_DQ_KIND_BIT,
95 				vf_bar0 + PKO_VF_DQ_WM_CTL(gdq));
96 	reg_off = PKO_VF_DQ_OP_OPEN(gdq);
97 
98 	rtn = octeontx_reg_ldadd_u64(vf_bar0 + reg_off, 0);
99 
100 	/* PKO_DQOP_E::OPEN */
101 	if (((rtn >> PKO_DQ_OP_BIT) & 0x3) != 0x1)
102 		return -EIO;
103 
104 	switch (rtn >> PKO_DQ_STATUS_BIT) {
105 	case 0xC:	/* DQALREADYCREATED */
106 	case 0x0:	/* PASS */
107 		break;
108 	default:
109 		return -EIO;
110 	}
111 
112 	/* DRAIN=0, DRAIN_NULL_LINK=0, SW_XOFF=0 */
113 	octeontx_write64(0, vf_bar0 + PKO_VF_DQ_SW_XOFF(gdq));
114 
115 	return rtn & ((1ull << PKO_DQ_OP_BIT) - 1);
116 }
117 
118 /**
119  * Close a PKO DQ
120  * Flush all packets pending.
121  */
122 static inline
123 int octeontx_pko_dq_close(uint16_t txq)
124 {
125 	unsigned int reg_off;
126 	uint8_t *vf_bar0;
127 	uint64_t rtn;
128 	int res;
129 
130 	vf_bar0 = octeontx_pko_dq_vf_bar0(txq);
131 	res = octeontx_pko_dq_gdq(txq);
132 
133 	if (unlikely(res < 0 || vf_bar0 == NULL))
134 		return -EINVAL;
135 
136 	reg_off = PKO_VF_DQ_OP_CLOSE(res);
137 
138 	rtn = octeontx_reg_ldadd_u64(vf_bar0 + reg_off, 0);
139 
140 	/* PKO_DQOP_E::CLOSE */
141 	if (((rtn >> PKO_DQ_OP_BIT) & 0x3) != 0x2)
142 		return -EIO;
143 
144 	switch (rtn >> PKO_DQ_STATUS_BIT) {
145 	case 0xD:	/* DQNOTCREATED */
146 	case 0x0:	/* PASS */
147 		break;
148 	default:
149 		return -EIO;
150 	}
151 
152 	res = rtn & ((1ull << PKO_DQ_OP_BIT) - 1); /* DEPTH */
153 	return res;
154 }
155 
156 /* Flush all packets pending on a DQ */
157 static inline
158 int octeontx_pko_dq_drain(uint16_t txq)
159 {
160 	unsigned int gdq;
161 	uint8_t *vf_bar0;
162 	uint64_t reg;
163 	int res, timo = PKO_DQ_DRAIN_TO;
164 
165 	vf_bar0 = octeontx_pko_dq_vf_bar0(txq);
166 	res = octeontx_pko_dq_gdq(txq);
167 	gdq = res;
168 
169 	 /* DRAIN=1, DRAIN_NULL_LINK=0, SW_XOFF=1 */
170 	 octeontx_write64(0x3, vf_bar0 + PKO_VF_DQ_SW_XOFF(gdq));
171 	/* Wait until buffers leave DQs */
172 	reg = octeontx_read64(vf_bar0 + PKO_VF_DQ_WM_CNT(gdq));
173 	while (reg && timo > 0) {
174 		rte_delay_us(100);
175 		timo--;
176 		reg = octeontx_read64(vf_bar0 + PKO_VF_DQ_WM_CNT(gdq));
177 	}
178 	/* DRAIN=0, DRAIN_NULL_LINK=0, SW_XOFF=0 */
179 	octeontx_write64(0, vf_bar0 + PKO_VF_DQ_SW_XOFF(gdq));
180 
181 	return reg;
182 }
183 
184 static inline int
185 octeontx_pko_dq_range_lookup(struct octeontx_pko_vf_ctl_s *ctl, uint64_t chanid,
186 			     unsigned int dq_num, unsigned int dq_from)
187 {
188 	unsigned int dq, dq_cnt;
189 	unsigned int dq_base;
190 
191 	dq_cnt = 0;
192 	dq = dq_from;
193 	while (dq < RTE_DIM(ctl->dq_map)) {
194 		dq_base = dq;
195 		dq_cnt = 0;
196 		while (ctl->dq_map[dq].chanid == ~chanid &&
197 			dq < RTE_DIM(ctl->dq_map)) {
198 			dq_cnt++;
199 			if (dq_cnt == dq_num)
200 				return dq_base;
201 			dq++;
202 		}
203 		dq++;
204 	}
205 	return -1;
206 }
207 
208 static inline void
209 octeontx_pko_dq_range_assign(struct octeontx_pko_vf_ctl_s *ctl, uint64_t chanid,
210 			     unsigned int dq_base, unsigned int dq_num)
211 {
212 	unsigned int dq, dq_cnt;
213 
214 	dq_cnt = 0;
215 	while (dq_cnt < dq_num) {
216 		dq = dq_base + dq_cnt;
217 
218 		octeontx_log_dbg("DQ# %u assigned to CHAN# %" PRIx64 "", dq,
219 			chanid);
220 
221 		ctl->dq_map[dq].chanid = ~chanid;
222 		dq_cnt++;
223 	}
224 }
225 
226 static inline int
227 octeontx_pko_dq_claim(struct octeontx_pko_vf_ctl_s *ctl, unsigned int dq_base,
228 		      unsigned int dq_num, uint64_t chanid)
229 {
230 	const uint64_t null_chanid = ~0ull;
231 	int dq;
232 
233 	rte_spinlock_lock(&ctl->lock);
234 
235 	dq = octeontx_pko_dq_range_lookup(ctl, null_chanid, dq_num, dq_base);
236 	if (dq < 0 || (unsigned int)dq != dq_base) {
237 		rte_spinlock_unlock(&ctl->lock);
238 		return -1;
239 	}
240 	octeontx_pko_dq_range_assign(ctl, chanid, dq_base, dq_num);
241 
242 	rte_spinlock_unlock(&ctl->lock);
243 
244 	return 0;
245 }
246 
247 static inline int
248 octeontx_pko_dq_free(struct octeontx_pko_vf_ctl_s *ctl, uint64_t chanid)
249 {
250 	const uint64_t null_chanid = ~0ull;
251 	unsigned int dq = 0, dq_cnt = 0;
252 
253 	rte_spinlock_lock(&ctl->lock);
254 	while (dq < RTE_DIM(ctl->dq_map)) {
255 		if (ctl->dq_map[dq].chanid == ~chanid) {
256 			ctl->dq_map[dq].chanid = ~null_chanid;
257 			dq_cnt++;
258 		}
259 		dq++;
260 	}
261 	rte_spinlock_unlock(&ctl->lock);
262 
263 	return dq_cnt > 0 ? 0 : -EINVAL;
264 }
265 
266 int
267 octeontx_pko_channel_open(int dq_base, int dq_num, int chanid)
268 {
269 	struct octeontx_pko_vf_ctl_s *ctl = &pko_vf_ctl;
270 	int res;
271 
272 	res = octeontx_pko_dq_claim(ctl, dq_base, dq_num, chanid);
273 	if (res < 0)
274 		return -1;
275 
276 	return 0;
277 }
278 
279 int
280 octeontx_pko_channel_close(int chanid)
281 {
282 	struct octeontx_pko_vf_ctl_s *ctl = &pko_vf_ctl;
283 	int res;
284 
285 	res = octeontx_pko_dq_free(ctl, chanid);
286 	if (res < 0)
287 		return -1;
288 
289 	return 0;
290 }
291 
292 static inline int
293 octeontx_pko_chan_start(struct octeontx_pko_vf_ctl_s *ctl, uint64_t chanid)
294 {
295 	unsigned int dq_vf;
296 	unsigned int dq, dq_cnt;
297 
298 	dq_cnt = 0;
299 	dq = 0;
300 	while (dq < RTE_DIM(ctl->dq_map)) {
301 		dq_vf = dq / PKO_VF_NUM_DQ;
302 
303 		if (!ctl->pko[dq_vf].bar0) {
304 			dq += PKO_VF_NUM_DQ;
305 			continue;
306 		}
307 
308 		if (ctl->dq_map[dq].chanid != ~chanid) {
309 			dq++;
310 			continue;
311 		}
312 
313 		if (octeontx_pko_dq_open(dq) < 0)
314 			break;
315 
316 		dq_cnt++;
317 		dq++;
318 	}
319 
320 	return dq_cnt;
321 }
322 
323 int
324 octeontx_pko_channel_start(int chanid)
325 {
326 	struct octeontx_pko_vf_ctl_s *ctl = &pko_vf_ctl;
327 	int dq_cnt;
328 
329 	dq_cnt = octeontx_pko_chan_start(ctl, chanid);
330 	if (dq_cnt < 0)
331 		return -1;
332 
333 	return dq_cnt;
334 }
335 
336 static inline int
337 octeontx_pko_chan_stop(struct octeontx_pko_vf_ctl_s *ctl, uint64_t chanid)
338 {
339 	unsigned int dq, dq_cnt, dq_vf;
340 	int res;
341 
342 	dq_cnt = 0;
343 	dq = 0;
344 	while (dq < RTE_DIM(ctl->dq_map)) {
345 		dq_vf = dq / PKO_VF_NUM_DQ;
346 
347 		if (!ctl->pko[dq_vf].bar0) {
348 			dq += PKO_VF_NUM_DQ;
349 			continue;
350 		}
351 
352 		if (ctl->dq_map[dq].chanid != ~chanid) {
353 			dq++;
354 			continue;
355 		}
356 
357 		res = octeontx_pko_dq_drain(dq);
358 		if (res > 0)
359 			octeontx_log_err("draining DQ%d, buffers left: %x",
360 					 dq, res);
361 
362 		res = octeontx_pko_dq_close(dq);
363 		if (res < 0)
364 			octeontx_log_err("closing DQ%d failed\n", dq);
365 
366 		dq_cnt++;
367 		dq++;
368 	}
369 	return dq_cnt;
370 }
371 
372 int
373 octeontx_pko_channel_stop(int chanid)
374 {
375 	struct octeontx_pko_vf_ctl_s *ctl = &pko_vf_ctl;
376 
377 	octeontx_pko_chan_stop(ctl, chanid);
378 	return 0;
379 }
380 
381 static inline int
382 octeontx_pko_channel_query(struct octeontx_pko_vf_ctl_s *ctl, uint64_t chanid,
383 			   void *out, size_t out_elem_size,
384 			   size_t dq_num, octeontx_pko_dq_getter_t getter)
385 {
386 	octeontx_dq_t curr;
387 	unsigned int dq_vf;
388 	unsigned int dq;
389 
390 	RTE_SET_USED(out_elem_size);
391 	memset(&curr, 0, sizeof(octeontx_dq_t));
392 
393 	dq_vf = dq_num / PKO_VF_NUM_DQ;
394 	dq = dq_num % PKO_VF_NUM_DQ;
395 
396 	if (!ctl->pko[dq_vf].bar0)
397 		return -EINVAL;
398 
399 	if (ctl->dq_map[dq_num].chanid != ~chanid)
400 		return -EINVAL;
401 
402 	uint8_t *iter = (uint8_t *)out;
403 	curr.lmtline_va = ctl->pko[dq_vf].bar2;
404 	curr.ioreg_va = (void *)((uintptr_t)ctl->pko[dq_vf].bar0
405 		+ PKO_VF_DQ_OP_SEND((dq), 0));
406 	curr.fc_status_va = ctl->fc_ctl + dq;
407 
408 	octeontx_log_dbg("lmtline=%p ioreg_va=%p fc_status_va=%p",
409 			 curr.lmtline_va, curr.ioreg_va,
410 			 curr.fc_status_va);
411 
412 	getter(&curr, (void *)iter);
413 	return 0;
414 }
415 
416 int
417 octeontx_pko_channel_query_dqs(int chanid, void *out, size_t out_elem_size,
418 				size_t dq_num, octeontx_pko_dq_getter_t getter)
419 {
420 	struct octeontx_pko_vf_ctl_s *ctl = &pko_vf_ctl;
421 	int dq_cnt;
422 
423 	dq_cnt = octeontx_pko_channel_query(ctl, chanid, out, out_elem_size,
424 						dq_num, getter);
425 	if (dq_cnt < 0)
426 		return -1;
427 
428 	return dq_cnt;
429 }
430 
431 int
432 octeontx_pko_vf_count(void)
433 {
434 	int vf_cnt;
435 
436 	vf_cnt = 0;
437 	while (pko_vf_ctl.pko[vf_cnt].bar0)
438 		vf_cnt++;
439 
440 	return vf_cnt;
441 }
442 
443 int
444 octeontx_pko_init_fc(const size_t pko_vf_count)
445 {
446 	int dq_ix;
447 	uint64_t reg;
448 	uint8_t *vf_bar0;
449 	size_t vf_idx;
450 	size_t fc_mem_size;
451 
452 	fc_mem_size = sizeof(struct octeontx_pko_fc_ctl_s) *
453 			pko_vf_count * PKO_VF_NUM_DQ;
454 
455 	pko_vf_ctl.fc_iomem.va = rte_malloc(NULL, fc_mem_size, 128);
456 	if (unlikely(!pko_vf_ctl.fc_iomem.va)) {
457 		octeontx_log_err("fc_iomem: not enough memory");
458 		return -ENOMEM;
459 	}
460 
461 	pko_vf_ctl.fc_iomem.iova = rte_malloc_virt2iova((void *)
462 							pko_vf_ctl.fc_iomem.va);
463 	pko_vf_ctl.fc_iomem.size = fc_mem_size;
464 
465 	pko_vf_ctl.fc_ctl =
466 		(struct octeontx_pko_fc_ctl_s *)pko_vf_ctl.fc_iomem.va;
467 
468 	/* Configure Flow-Control feature for all DQs of open VFs */
469 	for (vf_idx = 0; vf_idx < pko_vf_count; vf_idx++) {
470 		dq_ix = vf_idx * PKO_VF_NUM_DQ;
471 
472 		vf_bar0 = pko_vf_ctl.pko[vf_idx].bar0;
473 
474 		reg = (pko_vf_ctl.fc_iomem.iova +
475 			(sizeof(struct octeontx_pko_fc_ctl_s) * dq_ix)) & ~0x7F;
476 		reg |=			/* BASE */
477 		    (0x2 << 3) |	/* HYST_BITS */
478 		    (((PKO_DQ_FC_STRIDE == PKO_DQ_FC_STRIDE_16) ? 1 : 0) << 2) |
479 		    (0x1 << 0);		/* ENABLE */
480 
481 		octeontx_write64(reg, vf_bar0 + PKO_VF_DQ_FC_CONFIG);
482 
483 		octeontx_log_dbg("PKO: bar0 %p VF_idx %d DQ_FC_CFG=%" PRIx64 "",
484 				 vf_bar0, (int)vf_idx, reg);
485 	}
486 	return 0;
487 }
488 
489 void
490 octeontx_pko_fc_free(void)
491 {
492 	rte_free(pko_vf_ctl.fc_iomem.va);
493 }
494 
495 static void
496 octeontx_pkovf_setup(void)
497 {
498 	static bool init_once;
499 
500 	if (!init_once) {
501 		unsigned int i;
502 
503 		rte_spinlock_init(&pko_vf_ctl.lock);
504 
505 		pko_vf_ctl.fc_iomem = PKO_IOMEM_NULL;
506 		pko_vf_ctl.fc_ctl = NULL;
507 
508 		for (i = 0; i < PKO_VF_MAX; i++) {
509 			pko_vf_ctl.pko[i].bar0 = NULL;
510 			pko_vf_ctl.pko[i].bar2 = NULL;
511 			pko_vf_ctl.pko[i].domain = ~(uint16_t)0;
512 			pko_vf_ctl.pko[i].vfid = ~(uint16_t)0;
513 		}
514 
515 		for (i = 0; i < (PKO_VF_MAX * PKO_VF_NUM_DQ); i++)
516 			pko_vf_ctl.dq_map[i].chanid = 0;
517 
518 		init_once = true;
519 	}
520 }
521 
522 /* PKOVF pcie device*/
523 static int
524 pkovf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
525 {
526 	uint64_t val;
527 	uint16_t vfid;
528 	uint16_t domain;
529 	uint8_t *bar0;
530 	uint8_t *bar2;
531 	struct octeontx_pkovf *res;
532 
533 	RTE_SET_USED(pci_drv);
534 
535 	/* For secondary processes, the primary has done all the work */
536 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
537 		return 0;
538 
539 	if (pci_dev->mem_resource[0].addr == NULL ||
540 	    pci_dev->mem_resource[2].addr == NULL) {
541 		octeontx_log_err("Empty bars %p %p",
542 			pci_dev->mem_resource[0].addr,
543 			pci_dev->mem_resource[2].addr);
544 		return -ENODEV;
545 	}
546 	bar0 = pci_dev->mem_resource[0].addr;
547 	bar2 = pci_dev->mem_resource[2].addr;
548 
549 	octeontx_pkovf_setup();
550 
551 	/* get vfid and domain */
552 	val = octeontx_read64(bar0 + PKO_VF_DQ_FC_CONFIG);
553 	domain = (val >> 7) & 0xffff;
554 	vfid = (val >> 23) & 0xffff;
555 
556 	if (unlikely(vfid >= PKO_VF_MAX)) {
557 		octeontx_log_err("pko: Invalid vfid %d", vfid);
558 		return -EINVAL;
559 	}
560 
561 	res = &pko_vf_ctl.pko[vfid];
562 	res->vfid = vfid;
563 	res->domain = domain;
564 	res->bar0 = bar0;
565 	res->bar2 = bar2;
566 
567 	octeontx_log_dbg("Domain=%d group=%d", res->domain, res->vfid);
568 	return 0;
569 }
570 
571 #define PCI_VENDOR_ID_CAVIUM               0x177D
572 #define PCI_DEVICE_ID_OCTEONTX_PKO_VF      0xA049
573 
574 static const struct rte_pci_id pci_pkovf_map[] = {
575 	{
576 		RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
577 				PCI_DEVICE_ID_OCTEONTX_PKO_VF)
578 	},
579 	{
580 		.vendor_id = 0,
581 	},
582 };
583 
584 static struct rte_pci_driver pci_pkovf = {
585 	.id_table = pci_pkovf_map,
586 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
587 	.probe = pkovf_probe,
588 };
589 
590 RTE_PMD_REGISTER_PCI(octeontx_pkovf, pci_pkovf);
591