xref: /dpdk/drivers/net/pfe/pfe_hif.c (revision 5253fe372e4f6141d5675b3b29b249ffdee962d3)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2019 NXP
3  */
4 
5 #include "pfe_logs.h"
6 #include "pfe_mod.h"
7 #include <sys/ioctl.h>
8 #include <sys/epoll.h>
9 #include <sys/eventfd.h>
10 
11 static int
12 pfe_hif_alloc_descr(struct pfe_hif *hif)
13 {
14 	void *addr;
15 	int err = 0;
16 
17 	PMD_INIT_FUNC_TRACE();
18 
19 	addr = rte_zmalloc(NULL, HIF_RX_DESC_NT * sizeof(struct hif_desc) +
20 		HIF_TX_DESC_NT * sizeof(struct hif_desc), RTE_CACHE_LINE_SIZE);
21 	if (!addr) {
22 		PFE_PMD_ERR("Could not allocate buffer descriptors!");
23 		err = -ENOMEM;
24 		goto err0;
25 	}
26 
27 	hif->descr_baseaddr_p = pfe_mem_vtop((uintptr_t)addr);
28 	hif->descr_baseaddr_v = addr;
29 	hif->rx_ring_size = HIF_RX_DESC_NT;
30 	hif->tx_ring_size = HIF_TX_DESC_NT;
31 
32 	return 0;
33 
34 err0:
35 	return err;
36 }
37 
38 static void
39 pfe_hif_free_descr(struct pfe_hif *hif)
40 {
41 	PMD_INIT_FUNC_TRACE();
42 
43 	rte_free(hif->descr_baseaddr_v);
44 }
45 
46 #if defined(LS1012A_PFE_RESET_WA)
47 static void
48 pfe_hif_disable_rx_desc(struct pfe_hif *hif)
49 {
50 	u32 ii;
51 	struct hif_desc	*desc = hif->rx_base;
52 
53 	/*Mark all descriptors as LAST_BD */
54 	for (ii = 0; ii < hif->rx_ring_size; ii++) {
55 		desc->ctrl |= BD_CTRL_LAST_BD;
56 		desc++;
57 	}
58 }
59 
60 struct class_rx_hdr_t {
61 	u32     next_ptr;       /* ptr to the start of the first DDR buffer */
62 	u16     length;         /* total packet length */
63 	u16     phyno;          /* input physical port number */
64 	u32     status;         /* gemac status bits */
65 	u32     status2;            /* reserved for software usage */
66 };
67 
68 /* STATUS_BAD_FRAME_ERR is set for all errors (including checksums if enabled)
69  * except overflow
70  */
71 #define STATUS_BAD_FRAME_ERR            BIT(16)
72 #define STATUS_LENGTH_ERR               BIT(17)
73 #define STATUS_CRC_ERR                  BIT(18)
74 #define STATUS_TOO_SHORT_ERR            BIT(19)
75 #define STATUS_TOO_LONG_ERR             BIT(20)
76 #define STATUS_CODE_ERR                 BIT(21)
77 #define STATUS_MC_HASH_MATCH            BIT(22)
78 #define STATUS_CUMULATIVE_ARC_HIT       BIT(23)
79 #define STATUS_UNICAST_HASH_MATCH       BIT(24)
80 #define STATUS_IP_CHECKSUM_CORRECT      BIT(25)
81 #define STATUS_TCP_CHECKSUM_CORRECT     BIT(26)
82 #define STATUS_UDP_CHECKSUM_CORRECT     BIT(27)
83 #define STATUS_OVERFLOW_ERR             BIT(28) /* GPI error */
84 #define MIN_PKT_SIZE			64
85 #define DUMMY_PKT_COUNT			128
86 
87 static inline void
88 copy_to_lmem(u32 *dst, u32 *src, int len)
89 {
90 	int i;
91 
92 	for (i = 0; i < len; i += sizeof(u32))	{
93 		*dst = htonl(*src);
94 		dst++; src++;
95 	}
96 }
97 #if defined(RTE_TOOLCHAIN_GCC)
98 __attribute__ ((optimize(1)))
99 #endif
100 static void
101 send_dummy_pkt_to_hif(void)
102 {
103 	void *lmem_ptr, *ddr_ptr, *lmem_virt_addr;
104 	u64 physaddr;
105 	struct class_rx_hdr_t local_hdr;
106 	static u32 dummy_pkt[] =  {
107 		0x33221100, 0x2b785544, 0xd73093cb, 0x01000608,
108 		0x04060008, 0x2b780200, 0xd73093cb, 0x0a01a8c0,
109 		0x33221100, 0xa8c05544, 0x00000301, 0x00000000,
110 		0x00000000, 0x00000000, 0x00000000, 0xbe86c51f };
111 
112 	ddr_ptr = (void *)(size_t)readl(BMU2_BASE_ADDR + BMU_ALLOC_CTRL);
113 	if (!ddr_ptr)
114 		return;
115 
116 	lmem_ptr = (void *)(size_t)readl(BMU1_BASE_ADDR + BMU_ALLOC_CTRL);
117 	if (!lmem_ptr)
118 		return;
119 
120 	PFE_PMD_INFO("Sending a dummy pkt to HIF %p %p", ddr_ptr, lmem_ptr);
121 	physaddr = DDR_VIRT_TO_PFE(ddr_ptr);
122 
123 	lmem_virt_addr = (void *)CBUS_PFE_TO_VIRT((unsigned long)lmem_ptr);
124 
125 	local_hdr.phyno = htons(0); /* RX_PHY_0 */
126 	local_hdr.length = htons(MIN_PKT_SIZE);
127 
128 	local_hdr.next_ptr = htonl((u32)physaddr);
129 	/*Mark checksum is correct */
130 	local_hdr.status = htonl((STATUS_IP_CHECKSUM_CORRECT |
131 				STATUS_UDP_CHECKSUM_CORRECT |
132 				STATUS_TCP_CHECKSUM_CORRECT |
133 				STATUS_UNICAST_HASH_MATCH |
134 				STATUS_CUMULATIVE_ARC_HIT));
135 	copy_to_lmem((u32 *)lmem_virt_addr, (u32 *)&local_hdr,
136 		     sizeof(local_hdr));
137 
138 	copy_to_lmem((u32 *)(lmem_virt_addr + LMEM_HDR_SIZE), (u32 *)dummy_pkt,
139 		     0x40);
140 
141 	writel((unsigned long)lmem_ptr, CLASS_INQ_PKTPTR);
142 }
143 
144 void
145 pfe_hif_rx_idle(struct pfe_hif *hif)
146 {
147 	int hif_stop_loop = DUMMY_PKT_COUNT;
148 	u32 rx_status;
149 
150 	pfe_hif_disable_rx_desc(hif);
151 	PFE_PMD_INFO("Bringing hif to idle state...");
152 	writel(0, HIF_INT_ENABLE);
153 	/*If HIF Rx BDP is busy send a dummy packet */
154 	do {
155 		rx_status = readl(HIF_RX_STATUS);
156 		if (rx_status & BDP_CSR_RX_DMA_ACTV)
157 			send_dummy_pkt_to_hif();
158 
159 		sleep(1);
160 	} while (--hif_stop_loop);
161 
162 	if (readl(HIF_RX_STATUS) & BDP_CSR_RX_DMA_ACTV)
163 		PFE_PMD_ERR("Failed\n");
164 	else
165 		PFE_PMD_INFO("Done\n");
166 }
167 #endif
168 
169 /*
170  * pfe_hif_init
171  * This function initializes the baseaddresses and irq, etc.
172  */
173 int
174 pfe_hif_init(struct pfe *pfe)
175 {
176 	struct pfe_hif *hif = &pfe->hif;
177 	int err;
178 
179 	PMD_INIT_FUNC_TRACE();
180 
181 #if defined(LS1012A_PFE_RESET_WA)
182 	pfe_hif_rx_idle(hif);
183 #endif
184 
185 	err = pfe_hif_alloc_descr(hif);
186 	if (err)
187 		goto err0;
188 
189 	rte_spinlock_init(&hif->tx_lock);
190 	rte_spinlock_init(&hif->lock);
191 
192 	gpi_enable(HGPI_BASE_ADDR);
193 	if (getenv("PFE_INTR_SUPPORT")) {
194 		struct epoll_event epoll_ev;
195 		int event_fd = -1, epoll_fd, pfe_cdev_fd;
196 
197 		pfe_cdev_fd = open(PFE_CDEV_PATH, O_RDWR);
198 		if (pfe_cdev_fd < 0) {
199 			PFE_PMD_WARN("Unable to open PFE device file (%s).\n",
200 				     PFE_CDEV_PATH);
201 			pfe->cdev_fd = PFE_CDEV_INVALID_FD;
202 			return -1;
203 		}
204 		pfe->cdev_fd = pfe_cdev_fd;
205 
206 		event_fd = eventfd(0, EFD_NONBLOCK);
207 		/* hif interrupt enable */
208 		err = ioctl(pfe->cdev_fd, PFE_CDEV_HIF_INTR_EN, &event_fd);
209 		if (err) {
210 			PFE_PMD_ERR("\nioctl failed for intr enable err: %d\n",
211 					errno);
212 			goto err0;
213 		}
214 		epoll_fd = epoll_create(1);
215 		epoll_ev.events = EPOLLIN | EPOLLPRI | EPOLLET;
216 		epoll_ev.data.fd = event_fd;
217 		err = epoll_ctl(epoll_fd, EPOLL_CTL_ADD, event_fd, &epoll_ev);
218 		if (err < 0) {
219 			PFE_PMD_ERR("epoll_ctl failed with err = %d\n", errno);
220 			goto err0;
221 		}
222 		pfe->hif.epoll_fd = epoll_fd;
223 	}
224 	return 0;
225 err0:
226 	return err;
227 }
228 
229 /* pfe_hif_exit- */
230 void
231 pfe_hif_exit(struct pfe *pfe)
232 {
233 	struct pfe_hif *hif = &pfe->hif;
234 
235 	PMD_INIT_FUNC_TRACE();
236 
237 	rte_spinlock_lock(&hif->lock);
238 	hif->shm->g_client_status[0] = 0;
239 	/* Make sure all clients are disabled*/
240 	hif->shm->g_client_status[1] = 0;
241 
242 	rte_spinlock_unlock(&hif->lock);
243 
244 	if (hif->setuped) {
245 #if defined(LS1012A_PFE_RESET_WA)
246 		pfe_hif_rx_idle(hif);
247 #endif
248 		/*Disable Rx/Tx */
249 		hif_rx_disable();
250 		hif_tx_disable();
251 
252 		pfe_hif_free_descr(hif);
253 		pfe->hif.setuped = 0;
254 	}
255 	gpi_disable(HGPI_BASE_ADDR);
256 }
257