xref: /dpdk/drivers/net/octeontx/base/octeontx_pkovf.c (revision eb6d5a0af9a05bf940ba19ec1ddbe575b5e7540b)
1 /*
2  *   BSD LICENSE
3  *
4  *   Copyright (C) Cavium Inc. 2017. All rights reserved.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of Cavium networks nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 #include <stdbool.h>
33 #include <string.h>
34 #include <stdio.h>
35 
36 #include <rte_eal.h>
37 #include <rte_cycles.h>
38 #include <rte_malloc.h>
39 #include <rte_memory.h>
40 #include <rte_pci.h>
41 #include <rte_spinlock.h>
42 
43 #include "../octeontx_logs.h"
44 #include "octeontx_io.h"
45 #include "octeontx_pkovf.h"
46 
47 struct octeontx_pko_iomem {
48 	uint8_t		*va;
49 	phys_addr_t	iova;
50 	size_t		size;
51 };
52 
53 #define PKO_IOMEM_NULL (struct octeontx_pko_iomem){0, 0, 0}
54 
55 struct octeontx_pko_fc_ctl_s {
56 	int64_t buf_cnt;
57 	int64_t padding[(PKO_DQ_FC_STRIDE / 8) - 1];
58 };
59 
60 struct octeontx_pkovf {
61 	uint8_t		*bar0;
62 	uint8_t		*bar2;
63 	uint16_t	domain;
64 	uint16_t	vfid;
65 };
66 
67 struct octeontx_pko_vf_ctl_s {
68 	rte_spinlock_t lock;
69 
70 	struct octeontx_pko_iomem fc_iomem;
71 	struct octeontx_pko_fc_ctl_s *fc_ctl;
72 	struct octeontx_pkovf pko[PKO_VF_MAX];
73 	struct {
74 		uint64_t chanid;
75 	} dq_map[PKO_VF_MAX * PKO_VF_NUM_DQ];
76 };
77 
78 static struct octeontx_pko_vf_ctl_s pko_vf_ctl;
79 
80 static void *
81 octeontx_pko_dq_vf_bar0(uint16_t txq)
82 {
83 	int vf_ix;
84 
85 	vf_ix = txq / PKO_VF_NUM_DQ;
86 	return pko_vf_ctl.pko[vf_ix].bar0;
87 }
88 
89 static int
90 octeontx_pko_dq_gdq(uint16_t txq)
91 {
92 	return txq % PKO_VF_NUM_DQ;
93 }
94 
95 /**
96  * Open a PKO DQ.
97  */
98 static inline
99 int octeontx_pko_dq_open(uint16_t txq)
100 {
101 	unsigned int reg_off;
102 	uint8_t *vf_bar0;
103 	uint64_t rtn;
104 	int gdq;
105 
106 	vf_bar0 = octeontx_pko_dq_vf_bar0(txq);
107 	gdq = octeontx_pko_dq_gdq(txq);
108 
109 	if (unlikely(gdq < 0 || vf_bar0 == NULL))
110 		return -EINVAL;
111 	*(volatile int64_t*)(pko_vf_ctl.fc_ctl + txq) =
112 		PKO_DQ_FC_DEPTH_PAGES - PKO_DQ_FC_SKID;
113 
114 	rte_wmb();
115 
116 	octeontx_write64(PKO_DQ_FC_DEPTH_PAGES,
117 			 vf_bar0 + PKO_VF_DQ_FC_STATUS(gdq));
118 
119 	/* Set the register to return descriptor (packet) count as DEPTH */
120 	/* KIND=1, NCB_QUERY_RSP=0 */
121 	octeontx_write64(1ull << PKO_DQ_KIND_BIT,
122 				vf_bar0 + PKO_VF_DQ_WM_CTL(gdq));
123 	reg_off = PKO_VF_DQ_OP_OPEN(gdq);
124 
125 	rtn = octeontx_reg_ldadd_u64(vf_bar0 + reg_off, 0);
126 
127 	/* PKO_DQOP_E::OPEN */
128 	if (((rtn >> PKO_DQ_OP_BIT) & 0x3) != 0x1)
129 		return -EIO;
130 
131 	switch (rtn >> PKO_DQ_STATUS_BIT) {
132 	case 0xC:	/* DQALREADYCREATED */
133 	case 0x0:	/* PASS */
134 		break;
135 	default:
136 		return -EIO;
137 	}
138 
139 	/* DRAIN=0, DRAIN_NULL_LINK=0, SW_XOFF=0 */
140 	octeontx_write64(0, vf_bar0 + PKO_VF_DQ_SW_XOFF(gdq));
141 
142 	return rtn & ((1ull << PKO_DQ_OP_BIT) - 1);
143 }
144 
145 /**
146  * Close a PKO DQ
147  * Flush all packets pending.
148  */
149 static inline
150 int octeontx_pko_dq_close(uint16_t txq)
151 {
152 	unsigned int reg_off;
153 	uint8_t *vf_bar0;
154 	uint64_t rtn;
155 	int res;
156 
157 	vf_bar0 = octeontx_pko_dq_vf_bar0(txq);
158 	res = octeontx_pko_dq_gdq(txq);
159 
160 	if (unlikely(res < 0 || vf_bar0 == NULL))
161 		return -EINVAL;
162 
163 	reg_off = PKO_VF_DQ_OP_CLOSE(res);
164 
165 	rtn = octeontx_reg_ldadd_u64(vf_bar0 + reg_off, 0);
166 
167 	/* PKO_DQOP_E::CLOSE */
168 	if (((rtn >> PKO_DQ_OP_BIT) & 0x3) != 0x2)
169 		return -EIO;
170 
171 	switch (rtn >> PKO_DQ_STATUS_BIT) {
172 	case 0xD:	/* DQNOTCREATED */
173 	case 0x0:	/* PASS */
174 		break;
175 	default:
176 		return -EIO;
177 	}
178 
179 	res = rtn & ((1ull << PKO_DQ_OP_BIT) - 1); /* DEPTH */
180 	return res;
181 }
182 
183 /* Flush all packets pending on a DQ */
184 static inline
185 int octeontx_pko_dq_drain(uint16_t txq)
186 {
187 	unsigned int gdq;
188 	uint8_t *vf_bar0;
189 	uint64_t reg;
190 	int res, timo = PKO_DQ_DRAIN_TO;
191 
192 	vf_bar0 = octeontx_pko_dq_vf_bar0(txq);
193 	res = octeontx_pko_dq_gdq(txq);
194 	gdq = res;
195 
196 	 /* DRAIN=1, DRAIN_NULL_LINK=0, SW_XOFF=1 */
197 	 octeontx_write64(0x3, vf_bar0 + PKO_VF_DQ_SW_XOFF(gdq));
198 	/* Wait until buffers leave DQs */
199 	reg = octeontx_read64(vf_bar0 + PKO_VF_DQ_WM_CNT(gdq));
200 	while (reg && timo > 0) {
201 		rte_delay_us(100);
202 		timo--;
203 		reg = octeontx_read64(vf_bar0 + PKO_VF_DQ_WM_CNT(gdq));
204 	}
205 	/* DRAIN=0, DRAIN_NULL_LINK=0, SW_XOFF=0 */
206 	octeontx_write64(0, vf_bar0 + PKO_VF_DQ_SW_XOFF(gdq));
207 
208 	return reg;
209 }
210 
211 static inline int
212 octeontx_pko_dq_range_lookup(struct octeontx_pko_vf_ctl_s *ctl, uint64_t chanid,
213 			     unsigned int dq_num, unsigned int dq_from)
214 {
215 	unsigned int dq, dq_cnt;
216 	unsigned int dq_base;
217 
218 	dq_cnt = 0;
219 	dq = dq_from;
220 	while (dq < RTE_DIM(ctl->dq_map)) {
221 		dq_base = dq;
222 		dq_cnt = 0;
223 		while (ctl->dq_map[dq].chanid == ~chanid &&
224 			dq < RTE_DIM(ctl->dq_map)) {
225 			dq_cnt++;
226 			if (dq_cnt == dq_num)
227 				return dq_base;
228 			dq++;
229 		}
230 		dq++;
231 	}
232 	return -1;
233 }
234 
235 static inline void
236 octeontx_pko_dq_range_assign(struct octeontx_pko_vf_ctl_s *ctl, uint64_t chanid,
237 			     unsigned int dq_base, unsigned int dq_num)
238 {
239 	unsigned int dq, dq_cnt;
240 
241 	dq_cnt = 0;
242 	while (dq_cnt < dq_num) {
243 		dq = dq_base + dq_cnt;
244 
245 		octeontx_log_dbg("DQ# %u assigned to CHAN# %" PRIx64 "", dq,
246 			chanid);
247 
248 		ctl->dq_map[dq].chanid = ~chanid;
249 		dq_cnt++;
250 	}
251 }
252 
253 static inline int
254 octeontx_pko_dq_claim(struct octeontx_pko_vf_ctl_s *ctl, unsigned int dq_base,
255 		      unsigned int dq_num, uint64_t chanid)
256 {
257 	const uint64_t null_chanid = ~0ull;
258 	int dq;
259 
260 	rte_spinlock_lock(&ctl->lock);
261 
262 	dq = octeontx_pko_dq_range_lookup(ctl, null_chanid, dq_num, dq_base);
263 	if (dq < 0 || (unsigned int)dq != dq_base) {
264 		rte_spinlock_unlock(&ctl->lock);
265 		return -1;
266 	}
267 	octeontx_pko_dq_range_assign(ctl, chanid, dq_base, dq_num);
268 
269 	rte_spinlock_unlock(&ctl->lock);
270 
271 	return 0;
272 }
273 
274 static inline int
275 octeontx_pko_dq_free(struct octeontx_pko_vf_ctl_s *ctl, uint64_t chanid)
276 {
277 	const uint64_t null_chanid = ~0ull;
278 	unsigned int dq = 0, dq_cnt = 0;
279 
280 	rte_spinlock_lock(&ctl->lock);
281 	while (dq < RTE_DIM(ctl->dq_map)) {
282 		if (ctl->dq_map[dq].chanid == ~chanid) {
283 			ctl->dq_map[dq].chanid = ~null_chanid;
284 			dq_cnt++;
285 		}
286 		dq++;
287 	}
288 	rte_spinlock_unlock(&ctl->lock);
289 
290 	return dq_cnt > 0 ? 0 : -EINVAL;
291 }
292 
293 int
294 octeontx_pko_channel_open(int dq_base, int dq_num, int chanid)
295 {
296 	struct octeontx_pko_vf_ctl_s *ctl = &pko_vf_ctl;
297 	int res;
298 
299 	res = octeontx_pko_dq_claim(ctl, dq_base, dq_num, chanid);
300 	if (res < 0)
301 		return -1;
302 
303 	return 0;
304 }
305 
306 int
307 octeontx_pko_channel_close(int chanid)
308 {
309 	struct octeontx_pko_vf_ctl_s *ctl = &pko_vf_ctl;
310 	int res;
311 
312 	res = octeontx_pko_dq_free(ctl, chanid);
313 	if (res < 0)
314 		return -1;
315 
316 	return 0;
317 }
318 
319 static inline int
320 octeontx_pko_chan_start(struct octeontx_pko_vf_ctl_s *ctl, uint64_t chanid)
321 {
322 	unsigned int dq_vf;
323 	unsigned int dq, dq_cnt;
324 
325 	dq_cnt = 0;
326 	dq = 0;
327 	while (dq < RTE_DIM(ctl->dq_map)) {
328 		dq_vf = dq / PKO_VF_NUM_DQ;
329 
330 		if (!ctl->pko[dq_vf].bar0) {
331 			dq += PKO_VF_NUM_DQ;
332 			continue;
333 		}
334 
335 		if (ctl->dq_map[dq].chanid != ~chanid) {
336 			dq++;
337 			continue;
338 		}
339 
340 		if (octeontx_pko_dq_open(dq) < 0)
341 			break;
342 
343 		dq_cnt++;
344 		dq++;
345 	}
346 
347 	return dq_cnt;
348 }
349 
350 int
351 octeontx_pko_channel_start(int chanid)
352 {
353 	struct octeontx_pko_vf_ctl_s *ctl = &pko_vf_ctl;
354 	int dq_cnt;
355 
356 	dq_cnt = octeontx_pko_chan_start(ctl, chanid);
357 	if (dq_cnt < 0)
358 		return -1;
359 
360 	return dq_cnt;
361 }
362 
363 static inline int
364 octeontx_pko_chan_stop(struct octeontx_pko_vf_ctl_s *ctl, uint64_t chanid)
365 {
366 	unsigned int dq, dq_cnt, dq_vf;
367 	int res;
368 
369 	dq_cnt = 0;
370 	dq = 0;
371 	while (dq < RTE_DIM(ctl->dq_map)) {
372 		dq_vf = dq / PKO_VF_NUM_DQ;
373 
374 		if (!ctl->pko[dq_vf].bar0) {
375 			dq += PKO_VF_NUM_DQ;
376 			continue;
377 		}
378 
379 		if (ctl->dq_map[dq].chanid != ~chanid) {
380 			dq++;
381 			continue;
382 		}
383 
384 		res = octeontx_pko_dq_drain(dq);
385 		if (res > 0)
386 			octeontx_log_err("draining DQ%d, buffers left: %x",
387 					 dq, res);
388 
389 		res = octeontx_pko_dq_close(dq);
390 		if (res < 0)
391 			octeontx_log_err("closing DQ%d failed\n", dq);
392 
393 		dq_cnt++;
394 		dq++;
395 	}
396 	return dq_cnt;
397 }
398 
399 int
400 octeontx_pko_channel_stop(int chanid)
401 {
402 	struct octeontx_pko_vf_ctl_s *ctl = &pko_vf_ctl;
403 
404 	octeontx_pko_chan_stop(ctl, chanid);
405 	return 0;
406 }
407 
408 static inline int
409 octeontx_pko_channel_query(struct octeontx_pko_vf_ctl_s *ctl, uint64_t chanid,
410 			   void *out, size_t out_elem_size,
411 			   size_t dq_num, octeontx_pko_dq_getter_t getter)
412 {
413 	octeontx_dq_t curr;
414 	unsigned int dq_vf;
415 	unsigned int dq;
416 
417 	RTE_SET_USED(out_elem_size);
418 	memset(&curr, 0, sizeof(octeontx_dq_t));
419 
420 	dq_vf = dq_num / PKO_VF_NUM_DQ;
421 	dq = dq_num % PKO_VF_NUM_DQ;
422 
423 	if (!ctl->pko[dq_vf].bar0)
424 		return -EINVAL;
425 
426 	if (ctl->dq_map[dq_num].chanid != ~chanid)
427 		return -EINVAL;
428 
429 	uint8_t *iter = (uint8_t *)out;
430 	curr.lmtline_va = ctl->pko[dq_vf].bar2;
431 	curr.ioreg_va = (void *)((uintptr_t)ctl->pko[dq_vf].bar0
432 		+ PKO_VF_DQ_OP_SEND((dq), 0));
433 	curr.fc_status_va = ctl->fc_ctl + dq;
434 
435 	octeontx_log_dbg("lmtline=%p ioreg_va=%p fc_status_va=%p",
436 			 curr.lmtline_va, curr.ioreg_va,
437 			 curr.fc_status_va);
438 
439 	getter(&curr, (void *)iter);
440 	return 0;
441 }
442 
443 int
444 octeontx_pko_channel_query_dqs(int chanid, void *out, size_t out_elem_size,
445 				size_t dq_num, octeontx_pko_dq_getter_t getter)
446 {
447 	struct octeontx_pko_vf_ctl_s *ctl = &pko_vf_ctl;
448 	int dq_cnt;
449 
450 	dq_cnt = octeontx_pko_channel_query(ctl, chanid, out, out_elem_size,
451 						dq_num, getter);
452 	if (dq_cnt < 0)
453 		return -1;
454 
455 	return dq_cnt;
456 }
457 
458 int
459 octeontx_pko_vf_count(void)
460 {
461 	int vf_cnt;
462 
463 	vf_cnt = 0;
464 	while (pko_vf_ctl.pko[vf_cnt].bar0)
465 		vf_cnt++;
466 
467 	return vf_cnt;
468 }
469 
470 int
471 octeontx_pko_init_fc(const size_t pko_vf_count)
472 {
473 	int dq_ix;
474 	uint64_t reg;
475 	uint8_t *vf_bar0;
476 	size_t vf_idx;
477 	size_t fc_mem_size;
478 
479 	fc_mem_size = sizeof(struct octeontx_pko_fc_ctl_s) *
480 			pko_vf_count * PKO_VF_NUM_DQ;
481 
482 	pko_vf_ctl.fc_iomem.va = rte_malloc(NULL, fc_mem_size, 128);
483 	if (unlikely(!pko_vf_ctl.fc_iomem.va)) {
484 		octeontx_log_err("fc_iomem: not enough memory");
485 		return -ENOMEM;
486 	}
487 
488 	pko_vf_ctl.fc_iomem.iova = rte_malloc_virt2phy((void *)
489 							pko_vf_ctl.fc_iomem.va);
490 	pko_vf_ctl.fc_iomem.size = fc_mem_size;
491 
492 	pko_vf_ctl.fc_ctl =
493 		(struct octeontx_pko_fc_ctl_s *)pko_vf_ctl.fc_iomem.va;
494 
495 	/* Configure Flow-Control feature for all DQs of open VFs */
496 	for (vf_idx = 0; vf_idx < pko_vf_count; vf_idx++) {
497 		dq_ix = vf_idx * PKO_VF_NUM_DQ;
498 
499 		vf_bar0 = pko_vf_ctl.pko[vf_idx].bar0;
500 
501 		reg = (pko_vf_ctl.fc_iomem.iova +
502 			(sizeof(struct octeontx_pko_fc_ctl_s) * dq_ix)) & ~0x7F;
503 		reg |=			/* BASE */
504 		    (0x2 << 3) |	/* HYST_BITS */
505 		    (((PKO_DQ_FC_STRIDE == PKO_DQ_FC_STRIDE_16) ? 1 : 0) << 2) |
506 		    (0x1 << 0);		/* ENABLE */
507 
508 		octeontx_write64(reg, vf_bar0 + PKO_VF_DQ_FC_CONFIG);
509 
510 		octeontx_log_dbg("PKO: bar0 %p VF_idx %d DQ_FC_CFG=%" PRIx64 "",
511 				 vf_bar0, (int)vf_idx, reg);
512 	}
513 	return 0;
514 }
515 
516 void
517 octeontx_pko_fc_free(void)
518 {
519 	rte_free(pko_vf_ctl.fc_iomem.va);
520 }
521 
522 static void
523 octeontx_pkovf_setup(void)
524 {
525 	static bool init_once;
526 
527 	if (!init_once) {
528 		unsigned int i;
529 
530 		rte_spinlock_init(&pko_vf_ctl.lock);
531 
532 		pko_vf_ctl.fc_iomem = PKO_IOMEM_NULL;
533 		pko_vf_ctl.fc_ctl = NULL;
534 
535 		for (i = 0; i < PKO_VF_MAX; i++) {
536 			pko_vf_ctl.pko[i].bar0 = NULL;
537 			pko_vf_ctl.pko[i].bar2 = NULL;
538 			pko_vf_ctl.pko[i].domain = ~(uint16_t)0;
539 			pko_vf_ctl.pko[i].vfid = ~(uint16_t)0;
540 		}
541 
542 		for (i = 0; i < (PKO_VF_MAX * PKO_VF_NUM_DQ); i++)
543 			pko_vf_ctl.dq_map[i].chanid = 0;
544 
545 		init_once = true;
546 	}
547 }
548 
549 /* PKOVF pcie device*/
550 static int
551 pkovf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
552 {
553 	uint64_t val;
554 	uint16_t vfid;
555 	uint16_t domain;
556 	uint8_t *bar0;
557 	uint8_t *bar2;
558 	struct octeontx_pkovf *res;
559 
560 	RTE_SET_USED(pci_drv);
561 
562 	/* For secondary processes, the primary has done all the work */
563 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
564 		return 0;
565 
566 	if (pci_dev->mem_resource[0].addr == NULL ||
567 	    pci_dev->mem_resource[2].addr == NULL) {
568 		octeontx_log_err("Empty bars %p %p",
569 			pci_dev->mem_resource[0].addr,
570 			pci_dev->mem_resource[2].addr);
571 		return -ENODEV;
572 	}
573 	bar0 = pci_dev->mem_resource[0].addr;
574 	bar2 = pci_dev->mem_resource[2].addr;
575 
576 	octeontx_pkovf_setup();
577 
578 	/* get vfid and domain */
579 	val = octeontx_read64(bar0 + PKO_VF_DQ_FC_CONFIG);
580 	domain = (val >> 7) & 0xffff;
581 	vfid = (val >> 23) & 0xffff;
582 
583 	if (unlikely(vfid >= PKO_VF_MAX)) {
584 		octeontx_log_err("pko: Invalid vfid %d", vfid);
585 		return -EINVAL;
586 	}
587 
588 	res = &pko_vf_ctl.pko[vfid];
589 	res->vfid = vfid;
590 	res->domain = domain;
591 	res->bar0 = bar0;
592 	res->bar2 = bar2;
593 
594 	octeontx_log_dbg("Domain=%d group=%d", res->domain, res->vfid);
595 	return 0;
596 }
597 
598 #define PCI_VENDOR_ID_CAVIUM               0x177D
599 #define PCI_DEVICE_ID_OCTEONTX_PKO_VF      0xA049
600 
601 static const struct rte_pci_id pci_pkovf_map[] = {
602 	{
603 		RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
604 				PCI_DEVICE_ID_OCTEONTX_PKO_VF)
605 	},
606 	{
607 		.vendor_id = 0,
608 	},
609 };
610 
611 static struct rte_pci_driver pci_pkovf = {
612 	.id_table = pci_pkovf_map,
613 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
614 	.probe = pkovf_probe,
615 };
616 
617 RTE_PMD_REGISTER_PCI(octeontx_pkovf, pci_pkovf);
618