xref: /dpdk/drivers/net/octeontx/base/octeontx_pkovf.c (revision f665790a5dbad7b645ff46f31d65e977324e7bfc)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Cavium, Inc
3  */
4 
5 #include <stdbool.h>
6 #include <string.h>
7 #include <stdio.h>
8 
9 #include <rte_eal.h>
10 #include <rte_cycles.h>
11 #include <rte_malloc.h>
12 #include <rte_memory.h>
13 #include <bus_pci_driver.h>
14 #include <rte_spinlock.h>
15 
16 #include "../octeontx_logs.h"
17 #include "octeontx_io.h"
18 #include "octeontx_pkovf.h"
19 
20 struct octeontx_pko_iomem {
21 	uint8_t		*va;
22 	rte_iova_t	iova;
23 	size_t		size;
24 };
25 
26 #define PKO_IOMEM_NULL (struct octeontx_pko_iomem){0, 0, 0}
27 #define PKO_VALID	0x1
28 #define PKO_INUSE	0x2
29 
30 struct octeontx_pko_fc_ctl_s {
31 	int64_t buf_cnt;
32 	int64_t padding[(PKO_DQ_FC_STRIDE / 8) - 1];
33 };
34 
35 struct octeontx_pkovf {
36 	uint8_t		*bar0;
37 	uint8_t		*bar2;
38 	uint8_t		status;
39 	uint16_t	domain;
40 	uint16_t	vfid;
41 };
42 
43 struct octeontx_pko_vf_ctl_s {
44 	rte_spinlock_t lock;
45 	uint16_t global_domain;
46 	struct octeontx_pko_iomem fc_iomem;
47 	struct octeontx_pko_fc_ctl_s *fc_ctl;
48 	struct octeontx_pkovf pko[PKO_VF_MAX];
49 	struct {
50 		uint64_t chanid;
51 	} dq_map[PKO_VF_MAX * PKO_VF_NUM_DQ];
52 };
53 
54 static struct octeontx_pko_vf_ctl_s pko_vf_ctl;
55 
56 static void *
57 octeontx_pko_dq_vf_bar0(uint16_t txq)
58 {
59 	int vf_ix;
60 
61 	vf_ix = txq / PKO_VF_NUM_DQ;
62 	return pko_vf_ctl.pko[vf_ix].bar0;
63 }
64 
65 static int
66 octeontx_pko_dq_gdq(uint16_t txq)
67 {
68 	return txq % PKO_VF_NUM_DQ;
69 }
70 
71 /**
72  * Open a PKO DQ.
73  */
74 static inline
75 int octeontx_pko_dq_open(uint16_t txq)
76 {
77 	unsigned int reg_off;
78 	uint8_t *vf_bar0;
79 	uint64_t rtn;
80 	int gdq;
81 
82 	vf_bar0 = octeontx_pko_dq_vf_bar0(txq);
83 	gdq = octeontx_pko_dq_gdq(txq);
84 
85 	if (unlikely(gdq < 0 || vf_bar0 == NULL))
86 		return -EINVAL;
87 	*(volatile int64_t*)(pko_vf_ctl.fc_ctl + txq) =
88 		PKO_DQ_FC_DEPTH_PAGES - PKO_DQ_FC_SKID;
89 
90 	rte_wmb();
91 
92 	octeontx_write64(PKO_DQ_FC_DEPTH_PAGES,
93 			 vf_bar0 + PKO_VF_DQ_FC_STATUS(gdq));
94 
95 	/* Set the register to return descriptor (packet) count as DEPTH */
96 	/* KIND=1, NCB_QUERY_RSP=0 */
97 	octeontx_write64(1ull << PKO_DQ_KIND_BIT,
98 				vf_bar0 + PKO_VF_DQ_WM_CTL(gdq));
99 	reg_off = PKO_VF_DQ_OP_OPEN(gdq);
100 
101 	rtn = octeontx_reg_ldadd_u64(vf_bar0 + reg_off, 0);
102 
103 	/* PKO_DQOP_E::OPEN */
104 	if (((rtn >> PKO_DQ_OP_BIT) & 0x3) != 0x1)
105 		return -EIO;
106 
107 	switch (rtn >> PKO_DQ_STATUS_BIT) {
108 	case 0xC:	/* DQALREADYCREATED */
109 	case 0x0:	/* PASS */
110 		break;
111 	default:
112 		return -EIO;
113 	}
114 
115 	/* DRAIN=0, DRAIN_NULL_LINK=0, SW_XOFF=0 */
116 	octeontx_write64(0, vf_bar0 + PKO_VF_DQ_SW_XOFF(gdq));
117 
118 	return rtn & ((1ull << PKO_DQ_OP_BIT) - 1);
119 }
120 
121 /**
122  * Close a PKO DQ
123  * Flush all packets pending.
124  */
125 static inline
126 int octeontx_pko_dq_close(uint16_t txq)
127 {
128 	unsigned int reg_off;
129 	uint8_t *vf_bar0;
130 	uint64_t rtn;
131 	int res;
132 
133 	vf_bar0 = octeontx_pko_dq_vf_bar0(txq);
134 	res = octeontx_pko_dq_gdq(txq);
135 
136 	if (unlikely(res < 0 || vf_bar0 == NULL))
137 		return -EINVAL;
138 
139 	reg_off = PKO_VF_DQ_OP_CLOSE(res);
140 
141 	rtn = octeontx_reg_ldadd_u64(vf_bar0 + reg_off, 0);
142 
143 	/* PKO_DQOP_E::CLOSE */
144 	if (((rtn >> PKO_DQ_OP_BIT) & 0x3) != 0x2)
145 		return -EIO;
146 
147 	switch (rtn >> PKO_DQ_STATUS_BIT) {
148 	case 0xD:	/* DQNOTCREATED */
149 	case 0x0:	/* PASS */
150 		break;
151 	default:
152 		return -EIO;
153 	}
154 
155 	res = rtn & ((1ull << PKO_DQ_OP_BIT) - 1); /* DEPTH */
156 	return res;
157 }
158 
159 /* Flush all packets pending on a DQ */
160 static inline
161 int octeontx_pko_dq_drain(uint16_t txq)
162 {
163 	unsigned int gdq;
164 	uint8_t *vf_bar0;
165 	uint64_t reg;
166 	int res, timo = PKO_DQ_DRAIN_TO;
167 
168 	vf_bar0 = octeontx_pko_dq_vf_bar0(txq);
169 	res = octeontx_pko_dq_gdq(txq);
170 	gdq = res;
171 
172 	 /* DRAIN=1, DRAIN_NULL_LINK=0, SW_XOFF=1 */
173 	 octeontx_write64(0x3, vf_bar0 + PKO_VF_DQ_SW_XOFF(gdq));
174 	/* Wait until buffers leave DQs */
175 	reg = octeontx_read64(vf_bar0 + PKO_VF_DQ_WM_CNT(gdq));
176 	while (reg && timo > 0) {
177 		rte_delay_us(100);
178 		timo--;
179 		reg = octeontx_read64(vf_bar0 + PKO_VF_DQ_WM_CNT(gdq));
180 	}
181 	/* DRAIN=0, DRAIN_NULL_LINK=0, SW_XOFF=0 */
182 	octeontx_write64(0, vf_bar0 + PKO_VF_DQ_SW_XOFF(gdq));
183 
184 	return reg;
185 }
186 
187 static inline int
188 octeontx_pko_dq_range_lookup(struct octeontx_pko_vf_ctl_s *ctl, uint64_t chanid,
189 			     unsigned int dq_num, unsigned int dq_from)
190 {
191 	unsigned int dq, dq_cnt;
192 	unsigned int dq_base;
193 
194 	dq_cnt = 0;
195 	dq = dq_from;
196 	while (dq < RTE_DIM(ctl->dq_map)) {
197 		dq_base = dq;
198 		dq_cnt = 0;
199 		while (ctl->dq_map[dq].chanid == ~chanid &&
200 			dq < RTE_DIM(ctl->dq_map)) {
201 			dq_cnt++;
202 			if (dq_cnt == dq_num)
203 				return dq_base;
204 			dq++;
205 		}
206 		dq++;
207 	}
208 	return -1;
209 }
210 
211 static inline void
212 octeontx_pko_dq_range_assign(struct octeontx_pko_vf_ctl_s *ctl, uint64_t chanid,
213 			     unsigned int dq_base, unsigned int dq_num)
214 {
215 	unsigned int dq, dq_cnt;
216 
217 	dq_cnt = 0;
218 	while (dq_cnt < dq_num) {
219 		dq = dq_base + dq_cnt;
220 
221 		octeontx_log_dbg("DQ# %u assigned to CHAN# %" PRIx64 "", dq,
222 			chanid);
223 
224 		ctl->dq_map[dq].chanid = ~chanid;
225 		dq_cnt++;
226 	}
227 }
228 
229 static inline int
230 octeontx_pko_dq_claim(struct octeontx_pko_vf_ctl_s *ctl, unsigned int dq_base,
231 		      unsigned int dq_num, uint64_t chanid)
232 {
233 	const uint64_t null_chanid = ~0ull;
234 	int dq;
235 
236 	rte_spinlock_lock(&ctl->lock);
237 
238 	dq = octeontx_pko_dq_range_lookup(ctl, null_chanid, dq_num, dq_base);
239 	if (dq < 0 || (unsigned int)dq != dq_base) {
240 		rte_spinlock_unlock(&ctl->lock);
241 		return -1;
242 	}
243 	octeontx_pko_dq_range_assign(ctl, chanid, dq_base, dq_num);
244 
245 	rte_spinlock_unlock(&ctl->lock);
246 
247 	return 0;
248 }
249 
250 static inline int
251 octeontx_pko_dq_free(struct octeontx_pko_vf_ctl_s *ctl, uint64_t chanid)
252 {
253 	const uint64_t null_chanid = ~0ull;
254 	unsigned int dq = 0, dq_cnt = 0;
255 
256 	rte_spinlock_lock(&ctl->lock);
257 	while (dq < RTE_DIM(ctl->dq_map)) {
258 		if (ctl->dq_map[dq].chanid == ~chanid) {
259 			ctl->dq_map[dq].chanid = ~null_chanid;
260 			dq_cnt++;
261 		}
262 		dq++;
263 	}
264 	rte_spinlock_unlock(&ctl->lock);
265 
266 	return dq_cnt > 0 ? 0 : -EINVAL;
267 }
268 
269 int
270 octeontx_pko_channel_open(int dq_base, int dq_num, int chanid)
271 {
272 	struct octeontx_pko_vf_ctl_s *ctl = &pko_vf_ctl;
273 	int res;
274 
275 	res = octeontx_pko_dq_claim(ctl, dq_base, dq_num, chanid);
276 	if (res < 0)
277 		return -1;
278 
279 	return 0;
280 }
281 
282 int
283 octeontx_pko_channel_close(int chanid)
284 {
285 	struct octeontx_pko_vf_ctl_s *ctl = &pko_vf_ctl;
286 	int res;
287 
288 	res = octeontx_pko_dq_free(ctl, chanid);
289 	if (res < 0)
290 		return -1;
291 
292 	return 0;
293 }
294 
295 static inline int
296 octeontx_pko_chan_start(struct octeontx_pko_vf_ctl_s *ctl, uint64_t chanid)
297 {
298 	unsigned int dq_vf;
299 	unsigned int dq, dq_cnt;
300 
301 	dq_cnt = 0;
302 	dq = 0;
303 	while (dq < RTE_DIM(ctl->dq_map)) {
304 		dq_vf = dq / PKO_VF_NUM_DQ;
305 
306 		if (!ctl->pko[dq_vf].bar0) {
307 			dq += PKO_VF_NUM_DQ;
308 			continue;
309 		}
310 
311 		if (ctl->dq_map[dq].chanid != ~chanid) {
312 			dq++;
313 			continue;
314 		}
315 
316 		if (octeontx_pko_dq_open(dq) < 0)
317 			break;
318 
319 		dq_cnt++;
320 		dq++;
321 	}
322 
323 	return dq_cnt;
324 }
325 
326 int
327 octeontx_pko_channel_start(int chanid)
328 {
329 	struct octeontx_pko_vf_ctl_s *ctl = &pko_vf_ctl;
330 	int dq_cnt;
331 
332 	dq_cnt = octeontx_pko_chan_start(ctl, chanid);
333 	if (dq_cnt < 0)
334 		return -1;
335 
336 	return dq_cnt;
337 }
338 
339 static inline int
340 octeontx_pko_chan_stop(struct octeontx_pko_vf_ctl_s *ctl, uint64_t chanid)
341 {
342 	unsigned int dq, dq_cnt, dq_vf;
343 	int res;
344 
345 	dq_cnt = 0;
346 	dq = 0;
347 	while (dq < RTE_DIM(ctl->dq_map)) {
348 		dq_vf = dq / PKO_VF_NUM_DQ;
349 
350 		if (!ctl->pko[dq_vf].bar0) {
351 			dq += PKO_VF_NUM_DQ;
352 			continue;
353 		}
354 
355 		if (ctl->dq_map[dq].chanid != ~chanid) {
356 			dq++;
357 			continue;
358 		}
359 
360 		res = octeontx_pko_dq_drain(dq);
361 		if (res > 0)
362 			octeontx_log_err("draining DQ%d, buffers left: %x",
363 					 dq, res);
364 
365 		res = octeontx_pko_dq_close(dq);
366 		if (res < 0)
367 			octeontx_log_err("closing DQ%d failed", dq);
368 
369 		dq_cnt++;
370 		dq++;
371 	}
372 	return dq_cnt;
373 }
374 
375 int
376 octeontx_pko_channel_stop(int chanid)
377 {
378 	struct octeontx_pko_vf_ctl_s *ctl = &pko_vf_ctl;
379 
380 	octeontx_pko_chan_stop(ctl, chanid);
381 	return 0;
382 }
383 
384 static inline int
385 octeontx_pko_channel_query(struct octeontx_pko_vf_ctl_s *ctl, uint64_t chanid,
386 			   void *out, size_t out_elem_size,
387 			   size_t dq_num, octeontx_pko_dq_getter_t getter)
388 {
389 	octeontx_dq_t curr;
390 	unsigned int dq_vf;
391 	unsigned int dq;
392 
393 	RTE_SET_USED(out_elem_size);
394 	memset(&curr, 0, sizeof(octeontx_dq_t));
395 
396 	dq_vf = dq_num / PKO_VF_NUM_DQ;
397 	dq = dq_num % PKO_VF_NUM_DQ;
398 
399 	if (!ctl->pko[dq_vf].bar0)
400 		return -EINVAL;
401 
402 	if (ctl->dq_map[dq_num].chanid != ~chanid)
403 		return -EINVAL;
404 
405 	uint8_t *iter = (uint8_t *)out;
406 	curr.lmtline_va = ctl->pko[dq_vf].bar2;
407 	curr.ioreg_va = (void *)((uintptr_t)ctl->pko[dq_vf].bar0
408 		+ PKO_VF_DQ_OP_SEND((dq), 0));
409 	curr.fc_status_va = ctl->fc_ctl + dq_num;
410 
411 	octeontx_log_dbg("lmtline=%p ioreg_va=%p fc_status_va=%p",
412 			 curr.lmtline_va, curr.ioreg_va,
413 			 curr.fc_status_va);
414 
415 	getter(&curr, (void *)iter);
416 	return 0;
417 }
418 
419 int
420 octeontx_pko_channel_query_dqs(int chanid, void *out, size_t out_elem_size,
421 				size_t dq_num, octeontx_pko_dq_getter_t getter)
422 {
423 	struct octeontx_pko_vf_ctl_s *ctl = &pko_vf_ctl;
424 	int dq_cnt;
425 
426 	dq_cnt = octeontx_pko_channel_query(ctl, chanid, out, out_elem_size,
427 						dq_num, getter);
428 	if (dq_cnt < 0)
429 		return -1;
430 
431 	return dq_cnt;
432 }
433 
434 int
435 octeontx_pko_vf_count(void)
436 {
437 	uint16_t global_domain = octeontx_get_global_domain();
438 	int vf_cnt;
439 
440 	pko_vf_ctl.global_domain = global_domain;
441 	vf_cnt = 0;
442 	while (pko_vf_ctl.pko[vf_cnt].bar0)
443 		vf_cnt++;
444 
445 	return vf_cnt;
446 }
447 
448 size_t
449 octeontx_pko_get_vfid(void)
450 {
451 	size_t vf_cnt = octeontx_pko_vf_count();
452 	size_t vf_idx;
453 
454 
455 	for (vf_idx = 0; vf_idx < vf_cnt; vf_idx++) {
456 		if (!(pko_vf_ctl.pko[vf_idx].status & PKO_VALID))
457 			continue;
458 		if (pko_vf_ctl.pko[vf_idx].status & PKO_INUSE)
459 			continue;
460 
461 		pko_vf_ctl.pko[vf_idx].status |= PKO_INUSE;
462 		return pko_vf_ctl.pko[vf_idx].vfid;
463 	}
464 
465 	return SIZE_MAX;
466 }
467 
468 int
469 octeontx_pko_send_mtu(int port, int mtu)
470 {
471 	struct octeontx_mbox_hdr hdr;
472 	int res;
473 	mbox_pko_mtu_cfg_t cfg;
474 
475 	cfg.mtu = mtu;
476 
477 	hdr.coproc = OCTEONTX_PKO_COPROC;
478 	hdr.msg = MBOX_PKO_MTU_CONFIG;
479 	hdr.vfid = port;
480 
481 	res = octeontx_mbox_send(&hdr, &cfg, sizeof(mbox_pko_mtu_cfg_t),
482 				 NULL, 0);
483 	if (res < 0)
484 		return -EACCES;
485 
486 	return res;
487 }
488 
489 int
490 octeontx_pko_init_fc(const size_t pko_vf_count)
491 {
492 	int dq_ix;
493 	uint64_t reg;
494 	uint8_t *vf_bar0;
495 	size_t vf_idx;
496 	size_t fc_mem_size;
497 
498 	fc_mem_size = sizeof(struct octeontx_pko_fc_ctl_s) *
499 			pko_vf_count * PKO_VF_NUM_DQ;
500 
501 	pko_vf_ctl.fc_iomem.va = rte_malloc(NULL, fc_mem_size, 128);
502 	if (unlikely(!pko_vf_ctl.fc_iomem.va)) {
503 		octeontx_log_err("fc_iomem: not enough memory");
504 		return -ENOMEM;
505 	}
506 
507 	pko_vf_ctl.fc_iomem.iova = rte_malloc_virt2iova((void *)
508 							pko_vf_ctl.fc_iomem.va);
509 	pko_vf_ctl.fc_iomem.size = fc_mem_size;
510 
511 	pko_vf_ctl.fc_ctl =
512 		(struct octeontx_pko_fc_ctl_s *)pko_vf_ctl.fc_iomem.va;
513 
514 	/* Configure Flow-Control feature for all DQs of open VFs */
515 	for (vf_idx = 0; vf_idx < pko_vf_count; vf_idx++) {
516 		if (pko_vf_ctl.pko[vf_idx].domain != pko_vf_ctl.global_domain)
517 			continue;
518 
519 		dq_ix = pko_vf_ctl.pko[vf_idx].vfid * PKO_VF_NUM_DQ;
520 		vf_bar0 = pko_vf_ctl.pko[vf_idx].bar0;
521 
522 		reg = (pko_vf_ctl.fc_iomem.iova +
523 			(sizeof(struct octeontx_pko_fc_ctl_s) * dq_ix)) & ~0x7F;
524 		reg |=			/* BASE */
525 		    (0x2 << 3) |	/* HYST_BITS */
526 		    (((PKO_DQ_FC_STRIDE == PKO_DQ_FC_STRIDE_16) ? 1 : 0) << 2) |
527 		    (0x1 << 0);		/* ENABLE */
528 
529 		octeontx_write64(reg, vf_bar0 + PKO_VF_DQ_FC_CONFIG);
530 		pko_vf_ctl.pko[vf_idx].status = PKO_VALID;
531 
532 		octeontx_log_dbg("PKO: bar0 %p VF_idx %d DQ_FC_CFG=%" PRIx64 "",
533 				 vf_bar0, (int)vf_idx, reg);
534 	}
535 	return 0;
536 }
537 
538 void
539 octeontx_pko_fc_free(void)
540 {
541 	rte_free(pko_vf_ctl.fc_iomem.va);
542 }
543 
544 static void
545 octeontx_pkovf_setup(void)
546 {
547 	static bool init_once;
548 
549 	if (!init_once) {
550 		unsigned int i;
551 
552 		rte_spinlock_init(&pko_vf_ctl.lock);
553 
554 		pko_vf_ctl.fc_iomem = PKO_IOMEM_NULL;
555 		pko_vf_ctl.fc_ctl = NULL;
556 
557 		for (i = 0; i < PKO_VF_MAX; i++) {
558 			pko_vf_ctl.pko[i].bar0 = NULL;
559 			pko_vf_ctl.pko[i].bar2 = NULL;
560 			pko_vf_ctl.pko[i].domain = ~(uint16_t)0;
561 			pko_vf_ctl.pko[i].vfid = ~(uint16_t)0;
562 		}
563 
564 		for (i = 0; i < (PKO_VF_MAX * PKO_VF_NUM_DQ); i++)
565 			pko_vf_ctl.dq_map[i].chanid = 0;
566 
567 		init_once = true;
568 	}
569 }
570 
571 /* PKOVF pcie device*/
572 static int
573 pkovf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
574 {
575 	uint64_t val;
576 	uint16_t vfid;
577 	uint16_t domain;
578 	uint8_t *bar0;
579 	uint8_t *bar2;
580 	static uint8_t vf_cnt;
581 	struct octeontx_pkovf *res;
582 
583 	RTE_SET_USED(pci_drv);
584 
585 	/* For secondary processes, the primary has done all the work */
586 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
587 		return 0;
588 
589 	if (pci_dev->mem_resource[0].addr == NULL ||
590 	    pci_dev->mem_resource[2].addr == NULL) {
591 		octeontx_log_err("Empty bars %p %p",
592 			pci_dev->mem_resource[0].addr,
593 			pci_dev->mem_resource[2].addr);
594 		return -ENODEV;
595 	}
596 	bar0 = pci_dev->mem_resource[0].addr;
597 	bar2 = pci_dev->mem_resource[2].addr;
598 
599 	octeontx_pkovf_setup();
600 
601 	/* get vfid and domain */
602 	val = octeontx_read64(bar0 + PKO_VF_DQ_FC_CONFIG);
603 	domain = (val >> 7) & 0xffff;
604 	vfid = (val >> 23) & 0xffff;
605 
606 	if (unlikely(vfid >= PKO_VF_MAX)) {
607 		octeontx_log_err("pko: Invalid vfid %d", vfid);
608 		return -EINVAL;
609 	}
610 
611 	res = &pko_vf_ctl.pko[vf_cnt++];
612 	res->vfid = vfid;
613 	res->domain = domain;
614 	res->bar0 = bar0;
615 	res->bar2 = bar2;
616 
617 	octeontx_log_dbg("Domain=%d group=%d", res->domain, res->vfid);
618 	return 0;
619 }
620 
621 #define PCI_VENDOR_ID_CAVIUM               0x177D
622 #define PCI_DEVICE_ID_OCTEONTX_PKO_VF      0xA049
623 
624 static const struct rte_pci_id pci_pkovf_map[] = {
625 	{
626 		RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
627 				PCI_DEVICE_ID_OCTEONTX_PKO_VF)
628 	},
629 	{
630 		.vendor_id = 0,
631 	},
632 };
633 
634 static struct rte_pci_driver pci_pkovf = {
635 	.id_table = pci_pkovf_map,
636 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
637 	.probe = pkovf_probe,
638 };
639 
640 RTE_PMD_REGISTER_PCI(octeontx_pkovf, pci_pkovf);
641