1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2019 NXP 3 */ 4 5 #include "pfe_logs.h" 6 #include "pfe_mod.h" 7 #include <sys/ioctl.h> 8 #include <sys/epoll.h> 9 #include <sys/eventfd.h> 10 11 static int 12 pfe_hif_alloc_descr(struct pfe_hif *hif) 13 { 14 void *addr; 15 int err = 0; 16 17 PMD_INIT_FUNC_TRACE(); 18 19 addr = rte_zmalloc(NULL, HIF_RX_DESC_NT * sizeof(struct hif_desc) + 20 HIF_TX_DESC_NT * sizeof(struct hif_desc), RTE_CACHE_LINE_SIZE); 21 if (!addr) { 22 PFE_PMD_ERR("Could not allocate buffer descriptors!"); 23 err = -ENOMEM; 24 goto err0; 25 } 26 27 hif->descr_baseaddr_p = pfe_mem_vtop((uintptr_t)addr); 28 hif->descr_baseaddr_v = addr; 29 hif->rx_ring_size = HIF_RX_DESC_NT; 30 hif->tx_ring_size = HIF_TX_DESC_NT; 31 32 return 0; 33 34 err0: 35 return err; 36 } 37 38 static void 39 pfe_hif_free_descr(struct pfe_hif *hif) 40 { 41 PMD_INIT_FUNC_TRACE(); 42 43 rte_free(hif->descr_baseaddr_v); 44 } 45 46 /* 47 * pfe_hif_client_register 48 * 49 * This function used to register a client driver with the HIF driver. 50 * 51 * Return value: 52 * 0 - on Successful registration 53 */ 54 static int 55 pfe_hif_client_register(struct pfe_hif *hif, u32 client_id, 56 struct hif_client_shm *client_shm) 57 { 58 struct hif_client *client = &hif->client[client_id]; 59 u32 i, cnt; 60 struct rx_queue_desc *rx_qbase; 61 struct tx_queue_desc *tx_qbase; 62 struct hif_rx_queue *rx_queue; 63 struct hif_tx_queue *tx_queue; 64 int err = 0; 65 66 PMD_INIT_FUNC_TRACE(); 67 68 rte_spinlock_lock(&hif->tx_lock); 69 70 if (test_bit(client_id, &hif->shm->g_client_status[0])) { 71 PFE_PMD_ERR("client %d already registered", client_id); 72 err = -1; 73 goto unlock; 74 } 75 76 memset(client, 0, sizeof(struct hif_client)); 77 78 /* Initialize client Rx queues baseaddr, size */ 79 80 cnt = CLIENT_CTRL_RX_Q_CNT(client_shm->ctrl); 81 /* Check if client is requesting for more queues than supported */ 82 if (cnt > HIF_CLIENT_QUEUES_MAX) 83 cnt = HIF_CLIENT_QUEUES_MAX; 84 85 client->rx_qn = cnt; 86 rx_qbase = (struct rx_queue_desc *)client_shm->rx_qbase; 87 for (i = 0; i < cnt; i++) { 88 rx_queue = &client->rx_q[i]; 89 rx_queue->base = rx_qbase + i * client_shm->rx_qsize; 90 rx_queue->size = client_shm->rx_qsize; 91 rx_queue->write_idx = 0; 92 } 93 94 /* Initialize client Tx queues baseaddr, size */ 95 cnt = CLIENT_CTRL_TX_Q_CNT(client_shm->ctrl); 96 97 /* Check if client is requesting for more queues than supported */ 98 if (cnt > HIF_CLIENT_QUEUES_MAX) 99 cnt = HIF_CLIENT_QUEUES_MAX; 100 101 client->tx_qn = cnt; 102 tx_qbase = (struct tx_queue_desc *)client_shm->tx_qbase; 103 for (i = 0; i < cnt; i++) { 104 tx_queue = &client->tx_q[i]; 105 tx_queue->base = tx_qbase + i * client_shm->tx_qsize; 106 tx_queue->size = client_shm->tx_qsize; 107 tx_queue->ack_idx = 0; 108 } 109 110 set_bit(client_id, &hif->shm->g_client_status[0]); 111 112 unlock: 113 rte_spinlock_unlock(&hif->tx_lock); 114 115 return err; 116 } 117 118 /* 119 * pfe_hif_client_unregister 120 * 121 * This function used to unregister a client from the HIF driver. 122 * 123 */ 124 static void 125 pfe_hif_client_unregister(struct pfe_hif *hif, u32 client_id) 126 { 127 PMD_INIT_FUNC_TRACE(); 128 129 /* 130 * Mark client as no longer available (which prevents further packet 131 * receive for this client) 132 */ 133 rte_spinlock_lock(&hif->tx_lock); 134 135 if (!test_bit(client_id, &hif->shm->g_client_status[0])) { 136 PFE_PMD_ERR("client %d not registered", client_id); 137 138 rte_spinlock_unlock(&hif->tx_lock); 139 return; 140 } 141 142 clear_bit(client_id, &hif->shm->g_client_status[0]); 143 144 rte_spinlock_unlock(&hif->tx_lock); 145 } 146 147 void 148 hif_process_client_req(struct pfe_hif *hif, int req, 149 int data1, __rte_unused int data2) 150 { 151 unsigned int client_id = data1; 152 153 if (client_id >= HIF_CLIENTS_MAX) { 154 PFE_PMD_ERR("client id %d out of bounds", client_id); 155 return; 156 } 157 158 switch (req) { 159 case REQUEST_CL_REGISTER: 160 /* Request for register a client */ 161 PFE_PMD_INFO("register client_id %d", client_id); 162 pfe_hif_client_register(hif, client_id, (struct 163 hif_client_shm *)&hif->shm->client[client_id]); 164 break; 165 166 case REQUEST_CL_UNREGISTER: 167 PFE_PMD_INFO("unregister client_id %d", client_id); 168 169 /* Request for unregister a client */ 170 pfe_hif_client_unregister(hif, client_id); 171 172 break; 173 174 default: 175 PFE_PMD_ERR("unsupported request %d", req); 176 break; 177 } 178 179 /* 180 * Process client Tx queues 181 * Currently we don't have checking for tx pending 182 */ 183 } 184 185 #if defined(LS1012A_PFE_RESET_WA) 186 static void 187 pfe_hif_disable_rx_desc(struct pfe_hif *hif) 188 { 189 u32 ii; 190 struct hif_desc *desc = hif->rx_base; 191 192 /*Mark all descriptors as LAST_BD */ 193 for (ii = 0; ii < hif->rx_ring_size; ii++) { 194 desc->ctrl |= BD_CTRL_LAST_BD; 195 desc++; 196 } 197 } 198 199 struct class_rx_hdr_t { 200 u32 next_ptr; /* ptr to the start of the first DDR buffer */ 201 u16 length; /* total packet length */ 202 u16 phyno; /* input physical port number */ 203 u32 status; /* gemac status bits */ 204 u32 status2; /* reserved for software usage */ 205 }; 206 207 /* STATUS_BAD_FRAME_ERR is set for all errors (including checksums if enabled) 208 * except overflow 209 */ 210 #define STATUS_BAD_FRAME_ERR BIT(16) 211 #define STATUS_LENGTH_ERR BIT(17) 212 #define STATUS_CRC_ERR BIT(18) 213 #define STATUS_TOO_SHORT_ERR BIT(19) 214 #define STATUS_TOO_LONG_ERR BIT(20) 215 #define STATUS_CODE_ERR BIT(21) 216 #define STATUS_MC_HASH_MATCH BIT(22) 217 #define STATUS_CUMULATIVE_ARC_HIT BIT(23) 218 #define STATUS_UNICAST_HASH_MATCH BIT(24) 219 #define STATUS_IP_CHECKSUM_CORRECT BIT(25) 220 #define STATUS_TCP_CHECKSUM_CORRECT BIT(26) 221 #define STATUS_UDP_CHECKSUM_CORRECT BIT(27) 222 #define STATUS_OVERFLOW_ERR BIT(28) /* GPI error */ 223 #define MIN_PKT_SIZE 64 224 #define DUMMY_PKT_COUNT 128 225 226 static inline void 227 copy_to_lmem(u32 *dst, u32 *src, int len) 228 { 229 int i; 230 231 for (i = 0; i < len; i += sizeof(u32)) { 232 *dst = htonl(*src); 233 dst++; src++; 234 } 235 } 236 #if defined(RTE_TOOLCHAIN_GCC) 237 __attribute__ ((optimize(1))) 238 #endif 239 static void 240 send_dummy_pkt_to_hif(void) 241 { 242 void *lmem_ptr, *ddr_ptr, *lmem_virt_addr; 243 u64 physaddr; 244 struct class_rx_hdr_t local_hdr; 245 static u32 dummy_pkt[] = { 246 0x33221100, 0x2b785544, 0xd73093cb, 0x01000608, 247 0x04060008, 0x2b780200, 0xd73093cb, 0x0a01a8c0, 248 0x33221100, 0xa8c05544, 0x00000301, 0x00000000, 249 0x00000000, 0x00000000, 0x00000000, 0xbe86c51f }; 250 251 ddr_ptr = (void *)(size_t)readl(BMU2_BASE_ADDR + BMU_ALLOC_CTRL); 252 if (!ddr_ptr) 253 return; 254 255 lmem_ptr = (void *)(size_t)readl(BMU1_BASE_ADDR + BMU_ALLOC_CTRL); 256 if (!lmem_ptr) 257 return; 258 259 PFE_PMD_INFO("Sending a dummy pkt to HIF %p %p", ddr_ptr, lmem_ptr); 260 physaddr = DDR_VIRT_TO_PFE(ddr_ptr); 261 262 lmem_virt_addr = (void *)CBUS_PFE_TO_VIRT((unsigned long)lmem_ptr); 263 264 local_hdr.phyno = htons(0); /* RX_PHY_0 */ 265 local_hdr.length = htons(MIN_PKT_SIZE); 266 267 local_hdr.next_ptr = htonl((u32)physaddr); 268 /*Mark checksum is correct */ 269 local_hdr.status = htonl((STATUS_IP_CHECKSUM_CORRECT | 270 STATUS_UDP_CHECKSUM_CORRECT | 271 STATUS_TCP_CHECKSUM_CORRECT | 272 STATUS_UNICAST_HASH_MATCH | 273 STATUS_CUMULATIVE_ARC_HIT)); 274 copy_to_lmem((u32 *)lmem_virt_addr, (u32 *)&local_hdr, 275 sizeof(local_hdr)); 276 277 copy_to_lmem((u32 *)(lmem_virt_addr + LMEM_HDR_SIZE), (u32 *)dummy_pkt, 278 0x40); 279 280 writel((unsigned long)lmem_ptr, CLASS_INQ_PKTPTR); 281 } 282 283 void 284 pfe_hif_rx_idle(struct pfe_hif *hif) 285 { 286 int hif_stop_loop = DUMMY_PKT_COUNT; 287 u32 rx_status; 288 289 pfe_hif_disable_rx_desc(hif); 290 PFE_PMD_INFO("Bringing hif to idle state..."); 291 writel(0, HIF_INT_ENABLE); 292 /*If HIF Rx BDP is busy send a dummy packet */ 293 do { 294 rx_status = readl(HIF_RX_STATUS); 295 if (rx_status & BDP_CSR_RX_DMA_ACTV) 296 send_dummy_pkt_to_hif(); 297 298 sleep(1); 299 } while (--hif_stop_loop); 300 301 if (readl(HIF_RX_STATUS) & BDP_CSR_RX_DMA_ACTV) 302 PFE_PMD_ERR("Failed\n"); 303 else 304 PFE_PMD_INFO("Done\n"); 305 } 306 #endif 307 308 /* 309 * pfe_hif_init 310 * This function initializes the baseaddresses and irq, etc. 311 */ 312 int 313 pfe_hif_init(struct pfe *pfe) 314 { 315 struct pfe_hif *hif = &pfe->hif; 316 int err; 317 318 PMD_INIT_FUNC_TRACE(); 319 320 #if defined(LS1012A_PFE_RESET_WA) 321 pfe_hif_rx_idle(hif); 322 #endif 323 324 err = pfe_hif_alloc_descr(hif); 325 if (err) 326 goto err0; 327 328 rte_spinlock_init(&hif->tx_lock); 329 rte_spinlock_init(&hif->lock); 330 331 gpi_enable(HGPI_BASE_ADDR); 332 if (getenv("PFE_INTR_SUPPORT")) { 333 struct epoll_event epoll_ev; 334 int event_fd = -1, epoll_fd, pfe_cdev_fd; 335 336 pfe_cdev_fd = open(PFE_CDEV_PATH, O_RDWR); 337 if (pfe_cdev_fd < 0) { 338 PFE_PMD_WARN("Unable to open PFE device file (%s).\n", 339 PFE_CDEV_PATH); 340 pfe->cdev_fd = PFE_CDEV_INVALID_FD; 341 return -1; 342 } 343 pfe->cdev_fd = pfe_cdev_fd; 344 345 event_fd = eventfd(0, EFD_NONBLOCK); 346 /* hif interrupt enable */ 347 err = ioctl(pfe->cdev_fd, PFE_CDEV_HIF_INTR_EN, &event_fd); 348 if (err) { 349 PFE_PMD_ERR("\nioctl failed for intr enable err: %d\n", 350 errno); 351 goto err0; 352 } 353 epoll_fd = epoll_create(1); 354 epoll_ev.events = EPOLLIN | EPOLLPRI | EPOLLET; 355 epoll_ev.data.fd = event_fd; 356 err = epoll_ctl(epoll_fd, EPOLL_CTL_ADD, event_fd, &epoll_ev); 357 if (err < 0) { 358 PFE_PMD_ERR("epoll_ctl failed with err = %d\n", errno); 359 goto err0; 360 } 361 pfe->hif.epoll_fd = epoll_fd; 362 } 363 return 0; 364 err0: 365 return err; 366 } 367 368 /* pfe_hif_exit- */ 369 void 370 pfe_hif_exit(struct pfe *pfe) 371 { 372 struct pfe_hif *hif = &pfe->hif; 373 374 PMD_INIT_FUNC_TRACE(); 375 376 rte_spinlock_lock(&hif->lock); 377 hif->shm->g_client_status[0] = 0; 378 /* Make sure all clients are disabled*/ 379 hif->shm->g_client_status[1] = 0; 380 381 rte_spinlock_unlock(&hif->lock); 382 383 if (hif->setuped) { 384 #if defined(LS1012A_PFE_RESET_WA) 385 pfe_hif_rx_idle(hif); 386 #endif 387 /*Disable Rx/Tx */ 388 hif_rx_disable(); 389 hif_tx_disable(); 390 391 pfe_hif_free_descr(hif); 392 pfe->hif.setuped = 0; 393 } 394 gpi_disable(HGPI_BASE_ADDR); 395 } 396