1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2018 Mellanox Technologies, Ltd 3 */ 4 5 /** 6 * @file 7 * Interrupts handling for failsafe driver. 8 */ 9 10 #if defined(LINUX) 11 #include <sys/epoll.h> 12 #endif 13 #include <unistd.h> 14 15 #include <rte_alarm.h> 16 #include <rte_errno.h> 17 #include <rte_ethdev.h> 18 #include <rte_interrupts.h> 19 #include <rte_io.h> 20 #include <rte_service_component.h> 21 22 #include "failsafe_private.h" 23 24 #define NUM_RX_PROXIES (FAILSAFE_MAX_ETHPORTS * RTE_MAX_RXTX_INTR_VEC_ID) 25 26 27 /** 28 * Open an epoll file descriptor. 29 * 30 * @param flags 31 * Flags for defining epoll behavior. 32 * @return 33 * 0 on success, negative errno value otherwise. 34 */ 35 static int 36 fs_epoll_create1(int flags) 37 { 38 #if defined(LINUX) 39 return epoll_create1(flags); 40 #elif defined(BSD) 41 RTE_SET_USED(flags); 42 return -ENOTSUP; 43 #endif 44 } 45 46 /** 47 * Install failsafe Rx event proxy service. 48 * The Rx event proxy is the service that listens to Rx events from the 49 * subdevices and triggers failsafe Rx events accordingly. 50 * 51 * @param priv 52 * Pointer to failsafe private structure. 53 * @return 54 * 0 on success, negative errno value otherwise. 55 */ 56 static int 57 fs_rx_event_proxy_routine(void *data) 58 { 59 struct fs_priv *priv; 60 struct rxq *rxq; 61 struct rte_epoll_event *events; 62 uint64_t u64; 63 int i, n; 64 int rc = 0; 65 66 u64 = 1; 67 priv = data; 68 events = priv->rxp.evec; 69 n = rte_epoll_wait(priv->rxp.efd, events, NUM_RX_PROXIES, -1); 70 for (i = 0; i < n; i++) { 71 rxq = events[i].epdata.data; 72 if (rxq->enable_events && rxq->event_fd != -1) { 73 if (write(rxq->event_fd, &u64, sizeof(u64)) != 74 sizeof(u64)) { 75 ERROR("Failed to proxy Rx event to socket %d", 76 rxq->event_fd); 77 rc = -EIO; 78 } 79 } 80 } 81 return rc; 82 } 83 84 /** 85 * Uninstall failsafe Rx event proxy service. 86 * 87 * @param priv 88 * Pointer to failsafe private structure. 89 */ 90 static void 91 fs_rx_event_proxy_service_uninstall(struct fs_priv *priv) 92 { 93 /* Unregister the event service. */ 94 switch (priv->rxp.sstate) { 95 case SS_RUNNING: 96 rte_service_map_lcore_set(priv->rxp.sid, priv->rxp.scid, 0); 97 /* fall through */ 98 case SS_READY: 99 rte_service_runstate_set(priv->rxp.sid, 0); 100 rte_service_set_stats_enable(priv->rxp.sid, 0); 101 rte_service_component_runstate_set(priv->rxp.sid, 0); 102 /* fall through */ 103 case SS_REGISTERED: 104 rte_service_component_unregister(priv->rxp.sid); 105 /* fall through */ 106 default: 107 break; 108 } 109 } 110 111 /** 112 * Install the failsafe Rx event proxy service. 113 * 114 * @param priv 115 * Pointer to failsafe private structure. 116 * @return 117 * 0 on success, negative errno value otherwise. 118 */ 119 static int 120 fs_rx_event_proxy_service_install(struct fs_priv *priv) 121 { 122 struct rte_service_spec service; 123 int32_t num_service_cores; 124 int ret = 0; 125 126 num_service_cores = rte_service_lcore_count(); 127 if (num_service_cores <= 0) { 128 ERROR("Failed to install Rx interrupts, " 129 "no service core found"); 130 return -ENOTSUP; 131 } 132 /* prepare service info */ 133 memset(&service, 0, sizeof(struct rte_service_spec)); 134 snprintf(service.name, sizeof(service.name), "%s_Rx_service", 135 priv->data->name); 136 service.socket_id = priv->data->numa_node; 137 service.callback = fs_rx_event_proxy_routine; 138 service.callback_userdata = priv; 139 140 if (priv->rxp.sstate == SS_NO_SERVICE) { 141 uint32_t service_core_list[num_service_cores]; 142 143 /* get a service core to work with */ 144 ret = rte_service_lcore_list(service_core_list, 145 num_service_cores); 146 if (ret <= 0) { 147 ERROR("Failed to install Rx interrupts, " 148 "service core list empty or corrupted"); 149 return -ENOTSUP; 150 } 151 priv->rxp.scid = service_core_list[0]; 152 ret = rte_service_lcore_add(priv->rxp.scid); 153 if (ret && ret != -EALREADY) { 154 ERROR("Failed adding service core"); 155 return ret; 156 } 157 /* service core may be in "stopped" state, start it */ 158 ret = rte_service_lcore_start(priv->rxp.scid); 159 if (ret && (ret != -EALREADY)) { 160 ERROR("Failed to install Rx interrupts, " 161 "service core not started"); 162 return ret; 163 } 164 /* register our service */ 165 int32_t ret = rte_service_component_register(&service, 166 &priv->rxp.sid); 167 if (ret) { 168 ERROR("service register() failed"); 169 return -ENOEXEC; 170 } 171 priv->rxp.sstate = SS_REGISTERED; 172 /* run the service */ 173 ret = rte_service_component_runstate_set(priv->rxp.sid, 1); 174 if (ret < 0) { 175 ERROR("Failed Setting component runstate\n"); 176 return ret; 177 } 178 ret = rte_service_set_stats_enable(priv->rxp.sid, 1); 179 if (ret < 0) { 180 ERROR("Failed enabling stats\n"); 181 return ret; 182 } 183 ret = rte_service_runstate_set(priv->rxp.sid, 1); 184 if (ret < 0) { 185 ERROR("Failed to run service\n"); 186 return ret; 187 } 188 priv->rxp.sstate = SS_READY; 189 /* map the service with the service core */ 190 ret = rte_service_map_lcore_set(priv->rxp.sid, 191 priv->rxp.scid, 1); 192 if (ret) { 193 ERROR("Failed to install Rx interrupts, " 194 "could not map service core"); 195 return ret; 196 } 197 priv->rxp.sstate = SS_RUNNING; 198 } 199 return 0; 200 } 201 202 /** 203 * Install failsafe Rx event proxy subsystem. 204 * This is the way the failsafe PMD generates Rx events on behalf of its 205 * subdevices. 206 * 207 * @param priv 208 * Pointer to failsafe private structure. 209 * @return 210 * 0 on success, negative errno value otherwise and rte_errno is set. 211 */ 212 static int 213 fs_rx_event_proxy_install(struct fs_priv *priv) 214 { 215 int rc = 0; 216 217 /* 218 * Create the epoll fd and event vector for the proxy service to 219 * wait on for Rx events generated by the subdevices. 220 */ 221 priv->rxp.efd = fs_epoll_create1(0); 222 if (priv->rxp.efd < 0) { 223 rte_errno = errno; 224 ERROR("Failed to create epoll," 225 " Rx interrupts will not be supported"); 226 return -rte_errno; 227 } 228 priv->rxp.evec = calloc(NUM_RX_PROXIES, sizeof(*priv->rxp.evec)); 229 if (priv->rxp.evec == NULL) { 230 ERROR("Failed to allocate memory for event vectors," 231 " Rx interrupts will not be supported"); 232 rc = -ENOMEM; 233 goto error; 234 } 235 rc = fs_rx_event_proxy_service_install(priv); 236 if (rc < 0) 237 goto error; 238 return 0; 239 error: 240 if (priv->rxp.efd >= 0) { 241 close(priv->rxp.efd); 242 priv->rxp.efd = -1; 243 } 244 if (priv->rxp.evec != NULL) { 245 free(priv->rxp.evec); 246 priv->rxp.evec = NULL; 247 } 248 rte_errno = -rc; 249 return rc; 250 } 251 252 /** 253 * RX Interrupt control per subdevice. 254 * 255 * @param sdev 256 * Pointer to sub-device structure. 257 * @param op 258 * The operation be performed for the vector. 259 * Operation type of {RTE_INTR_EVENT_ADD, RTE_INTR_EVENT_DEL}. 260 * @return 261 * - On success, zero. 262 * - On failure, a negative value. 263 */ 264 static int 265 failsafe_eth_rx_intr_ctl_subdevice(struct sub_device *sdev, int op) 266 { 267 struct rte_eth_dev *dev; 268 struct rte_eth_dev *fsdev; 269 int epfd; 270 uint16_t pid; 271 uint16_t qid; 272 struct rxq *fsrxq; 273 int rc; 274 int ret = 0; 275 276 fsdev = fs_dev(sdev); 277 if (sdev == NULL || (ETH(sdev) == NULL) || 278 fsdev == NULL || (PRIV(fsdev) == NULL)) { 279 ERROR("Called with invalid arguments"); 280 return -EINVAL; 281 } 282 dev = ETH(sdev); 283 epfd = PRIV(fsdev)->rxp.efd; 284 pid = PORT_ID(sdev); 285 286 if (epfd <= 0) { 287 if (op == RTE_INTR_EVENT_ADD) { 288 ERROR("Proxy events are not initialized"); 289 return -EBADF; 290 } else { 291 return 0; 292 } 293 } 294 if (dev->data->nb_rx_queues > fsdev->data->nb_rx_queues) { 295 ERROR("subdevice has too many queues," 296 " Interrupts will not be enabled"); 297 return -E2BIG; 298 } 299 for (qid = 0; qid < dev->data->nb_rx_queues; qid++) { 300 fsrxq = fsdev->data->rx_queues[qid]; 301 rc = rte_eth_dev_rx_intr_ctl_q(pid, qid, epfd, 302 op, (void *)fsrxq); 303 if (rc) { 304 ERROR("rte_eth_dev_rx_intr_ctl_q failed for " 305 "port %d queue %d, epfd %d, error %d", 306 pid, qid, epfd, rc); 307 ret = rc; 308 } 309 } 310 return ret; 311 } 312 313 /** 314 * Install Rx interrupts subsystem for a subdevice. 315 * This is a support for dynamically adding subdevices. 316 * 317 * @param sdev 318 * Pointer to subdevice structure. 319 * 320 * @return 321 * 0 on success, negative errno value otherwise and rte_errno is set. 322 */ 323 int failsafe_rx_intr_install_subdevice(struct sub_device *sdev) 324 { 325 int rc; 326 int qid; 327 struct rte_eth_dev *fsdev; 328 struct rxq **rxq; 329 const struct rte_eth_intr_conf *const intr_conf = 330 Ð(sdev)->data->dev_conf.intr_conf; 331 332 fsdev = fs_dev(sdev); 333 rxq = (struct rxq **)fsdev->data->rx_queues; 334 if (intr_conf->rxq == 0) 335 return 0; 336 rc = failsafe_eth_rx_intr_ctl_subdevice(sdev, RTE_INTR_EVENT_ADD); 337 if (rc) 338 return rc; 339 /* enable interrupts on already-enabled queues */ 340 for (qid = 0; qid < ETH(sdev)->data->nb_rx_queues; qid++) { 341 if (rxq[qid]->enable_events) { 342 int ret = rte_eth_dev_rx_intr_enable(PORT_ID(sdev), 343 qid); 344 if (ret && (ret != -ENOTSUP)) { 345 ERROR("Failed to enable interrupts on " 346 "port %d queue %d", PORT_ID(sdev), qid); 347 rc = ret; 348 } 349 } 350 } 351 return rc; 352 } 353 354 /** 355 * Uninstall Rx interrupts subsystem for a subdevice. 356 * This is a support for dynamically removing subdevices. 357 * 358 * @param sdev 359 * Pointer to subdevice structure. 360 * 361 * @return 362 * 0 on success, negative errno value otherwise and rte_errno is set. 363 */ 364 void failsafe_rx_intr_uninstall_subdevice(struct sub_device *sdev) 365 { 366 int qid; 367 struct rte_eth_dev *fsdev; 368 struct rxq *fsrxq; 369 370 fsdev = fs_dev(sdev); 371 for (qid = 0; qid < ETH(sdev)->data->nb_rx_queues; qid++) { 372 if (qid < fsdev->data->nb_rx_queues) { 373 fsrxq = fsdev->data->rx_queues[qid]; 374 if (fsrxq != NULL && fsrxq->enable_events) 375 rte_eth_dev_rx_intr_disable(PORT_ID(sdev), 376 qid); 377 } 378 } 379 failsafe_eth_rx_intr_ctl_subdevice(sdev, RTE_INTR_EVENT_DEL); 380 } 381 382 /** 383 * Uninstall failsafe Rx event proxy. 384 * 385 * @param priv 386 * Pointer to failsafe private structure. 387 */ 388 static void 389 fs_rx_event_proxy_uninstall(struct fs_priv *priv) 390 { 391 fs_rx_event_proxy_service_uninstall(priv); 392 if (priv->rxp.evec != NULL) { 393 free(priv->rxp.evec); 394 priv->rxp.evec = NULL; 395 } 396 if (priv->rxp.efd >= 0) { 397 close(priv->rxp.efd); 398 priv->rxp.efd = -1; 399 } 400 } 401 402 /** 403 * Uninstall failsafe interrupt vector. 404 * 405 * @param priv 406 * Pointer to failsafe private structure. 407 */ 408 static void 409 fs_rx_intr_vec_uninstall(struct fs_priv *priv) 410 { 411 struct rte_intr_handle *intr_handle; 412 413 intr_handle = &priv->intr_handle; 414 if (intr_handle->intr_vec != NULL) { 415 free(intr_handle->intr_vec); 416 intr_handle->intr_vec = NULL; 417 } 418 intr_handle->nb_efd = 0; 419 } 420 421 /** 422 * Installs failsafe interrupt vector to be registered with EAL later on. 423 * 424 * @param priv 425 * Pointer to failsafe private structure. 426 * 427 * @return 428 * 0 on success, negative errno value otherwise and rte_errno is set. 429 */ 430 static int 431 fs_rx_intr_vec_install(struct fs_priv *priv) 432 { 433 unsigned int i; 434 unsigned int rxqs_n; 435 unsigned int n; 436 unsigned int count; 437 struct rte_intr_handle *intr_handle; 438 439 rxqs_n = priv->data->nb_rx_queues; 440 n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID); 441 count = 0; 442 intr_handle = &priv->intr_handle; 443 RTE_ASSERT(intr_handle->intr_vec == NULL); 444 /* Allocate the interrupt vector of the failsafe Rx proxy interrupts */ 445 intr_handle->intr_vec = malloc(n * sizeof(intr_handle->intr_vec[0])); 446 if (intr_handle->intr_vec == NULL) { 447 fs_rx_intr_vec_uninstall(priv); 448 rte_errno = ENOMEM; 449 ERROR("Failed to allocate memory for interrupt vector," 450 " Rx interrupts will not be supported"); 451 return -rte_errno; 452 } 453 for (i = 0; i < n; i++) { 454 struct rxq *rxq = priv->data->rx_queues[i]; 455 456 /* Skip queues that cannot request interrupts. */ 457 if (rxq == NULL || rxq->event_fd < 0) { 458 /* Use invalid intr_vec[] index to disable entry. */ 459 intr_handle->intr_vec[i] = 460 RTE_INTR_VEC_RXTX_OFFSET + 461 RTE_MAX_RXTX_INTR_VEC_ID; 462 continue; 463 } 464 if (count >= RTE_MAX_RXTX_INTR_VEC_ID) { 465 rte_errno = E2BIG; 466 ERROR("Too many Rx queues for interrupt vector size" 467 " (%d), Rx interrupts cannot be enabled", 468 RTE_MAX_RXTX_INTR_VEC_ID); 469 fs_rx_intr_vec_uninstall(priv); 470 return -rte_errno; 471 } 472 intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count; 473 intr_handle->efds[count] = rxq->event_fd; 474 count++; 475 } 476 if (count == 0) { 477 fs_rx_intr_vec_uninstall(priv); 478 } else { 479 intr_handle->nb_efd = count; 480 intr_handle->efd_counter_size = sizeof(uint64_t); 481 } 482 return 0; 483 } 484 485 486 /** 487 * Uninstall failsafe Rx interrupts subsystem. 488 * 489 * @param priv 490 * Pointer to private structure. 491 * 492 * @return 493 * 0 on success, negative errno value otherwise and rte_errno is set. 494 */ 495 void 496 failsafe_rx_intr_uninstall(struct rte_eth_dev *dev) 497 { 498 struct fs_priv *priv; 499 struct rte_intr_handle *intr_handle; 500 501 priv = PRIV(dev); 502 intr_handle = &priv->intr_handle; 503 rte_intr_free_epoll_fd(intr_handle); 504 fs_rx_event_proxy_uninstall(priv); 505 fs_rx_intr_vec_uninstall(priv); 506 dev->intr_handle = NULL; 507 } 508 509 /** 510 * Install failsafe Rx interrupts subsystem. 511 * 512 * @param priv 513 * Pointer to private structure. 514 * 515 * @return 516 * 0 on success, negative errno value otherwise and rte_errno is set. 517 */ 518 int 519 failsafe_rx_intr_install(struct rte_eth_dev *dev) 520 { 521 struct fs_priv *priv = PRIV(dev); 522 const struct rte_eth_intr_conf *const intr_conf = 523 &priv->data->dev_conf.intr_conf; 524 525 if (intr_conf->rxq == 0 || dev->intr_handle != NULL) 526 return 0; 527 if (fs_rx_intr_vec_install(priv) < 0) 528 return -rte_errno; 529 if (fs_rx_event_proxy_install(priv) < 0) { 530 fs_rx_intr_vec_uninstall(priv); 531 return -rte_errno; 532 } 533 dev->intr_handle = &priv->intr_handle; 534 return 0; 535 } 536