1*99a2dd95SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause 2*99a2dd95SBruce Richardson * Copyright(c) 2016 Cavium, Inc 3*99a2dd95SBruce Richardson */ 4*99a2dd95SBruce Richardson 5*99a2dd95SBruce Richardson #include <ctype.h> 6*99a2dd95SBruce Richardson #include <stdio.h> 7*99a2dd95SBruce Richardson #include <stdlib.h> 8*99a2dd95SBruce Richardson #include <string.h> 9*99a2dd95SBruce Richardson #include <stdarg.h> 10*99a2dd95SBruce Richardson #include <errno.h> 11*99a2dd95SBruce Richardson #include <stdint.h> 12*99a2dd95SBruce Richardson #include <inttypes.h> 13*99a2dd95SBruce Richardson #include <sys/types.h> 14*99a2dd95SBruce Richardson #include <sys/queue.h> 15*99a2dd95SBruce Richardson 16*99a2dd95SBruce Richardson #include <rte_string_fns.h> 17*99a2dd95SBruce Richardson #include <rte_byteorder.h> 18*99a2dd95SBruce Richardson #include <rte_log.h> 19*99a2dd95SBruce Richardson #include <rte_debug.h> 20*99a2dd95SBruce Richardson #include <rte_dev.h> 21*99a2dd95SBruce Richardson #include <rte_memory.h> 22*99a2dd95SBruce Richardson #include <rte_memcpy.h> 23*99a2dd95SBruce Richardson #include <rte_memzone.h> 24*99a2dd95SBruce Richardson #include <rte_eal.h> 25*99a2dd95SBruce Richardson #include <rte_per_lcore.h> 26*99a2dd95SBruce Richardson #include <rte_lcore.h> 27*99a2dd95SBruce Richardson #include <rte_atomic.h> 28*99a2dd95SBruce Richardson #include <rte_branch_prediction.h> 29*99a2dd95SBruce Richardson #include <rte_common.h> 30*99a2dd95SBruce Richardson #include <rte_malloc.h> 31*99a2dd95SBruce Richardson #include <rte_errno.h> 32*99a2dd95SBruce Richardson #include <rte_ethdev.h> 33*99a2dd95SBruce Richardson #include <rte_cryptodev.h> 34*99a2dd95SBruce Richardson #include <rte_cryptodev_pmd.h> 35*99a2dd95SBruce Richardson #include <rte_telemetry.h> 36*99a2dd95SBruce Richardson 37*99a2dd95SBruce Richardson #include "rte_eventdev.h" 38*99a2dd95SBruce Richardson #include "eventdev_pmd.h" 39*99a2dd95SBruce Richardson #include "rte_eventdev_trace.h" 40*99a2dd95SBruce Richardson 41*99a2dd95SBruce Richardson static struct rte_eventdev rte_event_devices[RTE_EVENT_MAX_DEVS]; 42*99a2dd95SBruce Richardson 43*99a2dd95SBruce Richardson struct rte_eventdev *rte_eventdevs = rte_event_devices; 44*99a2dd95SBruce Richardson 45*99a2dd95SBruce Richardson static struct rte_eventdev_global eventdev_globals = { 46*99a2dd95SBruce Richardson .nb_devs = 0 47*99a2dd95SBruce Richardson }; 48*99a2dd95SBruce Richardson 49*99a2dd95SBruce Richardson /* Event dev north bound API implementation */ 50*99a2dd95SBruce Richardson 51*99a2dd95SBruce Richardson uint8_t 52*99a2dd95SBruce Richardson rte_event_dev_count(void) 53*99a2dd95SBruce Richardson { 54*99a2dd95SBruce Richardson return eventdev_globals.nb_devs; 55*99a2dd95SBruce Richardson } 56*99a2dd95SBruce Richardson 57*99a2dd95SBruce Richardson int 58*99a2dd95SBruce Richardson rte_event_dev_get_dev_id(const char *name) 59*99a2dd95SBruce Richardson { 60*99a2dd95SBruce Richardson int i; 61*99a2dd95SBruce Richardson uint8_t cmp; 62*99a2dd95SBruce Richardson 63*99a2dd95SBruce Richardson if (!name) 64*99a2dd95SBruce Richardson return -EINVAL; 65*99a2dd95SBruce Richardson 66*99a2dd95SBruce Richardson for (i = 0; i < eventdev_globals.nb_devs; i++) { 67*99a2dd95SBruce Richardson cmp = (strncmp(rte_event_devices[i].data->name, name, 68*99a2dd95SBruce Richardson RTE_EVENTDEV_NAME_MAX_LEN) == 0) || 69*99a2dd95SBruce Richardson (rte_event_devices[i].dev ? (strncmp( 70*99a2dd95SBruce Richardson rte_event_devices[i].dev->driver->name, name, 71*99a2dd95SBruce Richardson RTE_EVENTDEV_NAME_MAX_LEN) == 0) : 0); 72*99a2dd95SBruce Richardson if (cmp && (rte_event_devices[i].attached == 73*99a2dd95SBruce Richardson RTE_EVENTDEV_ATTACHED)) 74*99a2dd95SBruce Richardson return i; 75*99a2dd95SBruce Richardson } 76*99a2dd95SBruce Richardson return -ENODEV; 77*99a2dd95SBruce Richardson } 78*99a2dd95SBruce Richardson 79*99a2dd95SBruce Richardson int 80*99a2dd95SBruce Richardson rte_event_dev_socket_id(uint8_t dev_id) 81*99a2dd95SBruce Richardson { 82*99a2dd95SBruce Richardson struct rte_eventdev *dev; 83*99a2dd95SBruce Richardson 84*99a2dd95SBruce Richardson RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 85*99a2dd95SBruce Richardson dev = &rte_eventdevs[dev_id]; 86*99a2dd95SBruce Richardson 87*99a2dd95SBruce Richardson return dev->data->socket_id; 88*99a2dd95SBruce Richardson } 89*99a2dd95SBruce Richardson 90*99a2dd95SBruce Richardson int 91*99a2dd95SBruce Richardson rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info) 92*99a2dd95SBruce Richardson { 93*99a2dd95SBruce Richardson struct rte_eventdev *dev; 94*99a2dd95SBruce Richardson 95*99a2dd95SBruce Richardson RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 96*99a2dd95SBruce Richardson dev = &rte_eventdevs[dev_id]; 97*99a2dd95SBruce Richardson 98*99a2dd95SBruce Richardson if (dev_info == NULL) 99*99a2dd95SBruce Richardson return -EINVAL; 100*99a2dd95SBruce Richardson 101*99a2dd95SBruce Richardson memset(dev_info, 0, sizeof(struct rte_event_dev_info)); 102*99a2dd95SBruce Richardson 103*99a2dd95SBruce Richardson RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP); 104*99a2dd95SBruce Richardson (*dev->dev_ops->dev_infos_get)(dev, dev_info); 105*99a2dd95SBruce Richardson 106*99a2dd95SBruce Richardson dev_info->dequeue_timeout_ns = dev->data->dev_conf.dequeue_timeout_ns; 107*99a2dd95SBruce Richardson 108*99a2dd95SBruce Richardson dev_info->dev = dev->dev; 109*99a2dd95SBruce Richardson return 0; 110*99a2dd95SBruce Richardson } 111*99a2dd95SBruce Richardson 112*99a2dd95SBruce Richardson int 113*99a2dd95SBruce Richardson rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id, 114*99a2dd95SBruce Richardson uint32_t *caps) 115*99a2dd95SBruce Richardson { 116*99a2dd95SBruce Richardson struct rte_eventdev *dev; 117*99a2dd95SBruce Richardson 118*99a2dd95SBruce Richardson RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 119*99a2dd95SBruce Richardson RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL); 120*99a2dd95SBruce Richardson 121*99a2dd95SBruce Richardson dev = &rte_eventdevs[dev_id]; 122*99a2dd95SBruce Richardson 123*99a2dd95SBruce Richardson if (caps == NULL) 124*99a2dd95SBruce Richardson return -EINVAL; 125*99a2dd95SBruce Richardson 126*99a2dd95SBruce Richardson if (dev->dev_ops->eth_rx_adapter_caps_get == NULL) 127*99a2dd95SBruce Richardson *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP; 128*99a2dd95SBruce Richardson else 129*99a2dd95SBruce Richardson *caps = 0; 130*99a2dd95SBruce Richardson 131*99a2dd95SBruce Richardson return dev->dev_ops->eth_rx_adapter_caps_get ? 132*99a2dd95SBruce Richardson (*dev->dev_ops->eth_rx_adapter_caps_get)(dev, 133*99a2dd95SBruce Richardson &rte_eth_devices[eth_port_id], 134*99a2dd95SBruce Richardson caps) 135*99a2dd95SBruce Richardson : 0; 136*99a2dd95SBruce Richardson } 137*99a2dd95SBruce Richardson 138*99a2dd95SBruce Richardson int 139*99a2dd95SBruce Richardson rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps) 140*99a2dd95SBruce Richardson { 141*99a2dd95SBruce Richardson struct rte_eventdev *dev; 142*99a2dd95SBruce Richardson const struct rte_event_timer_adapter_ops *ops; 143*99a2dd95SBruce Richardson 144*99a2dd95SBruce Richardson RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 145*99a2dd95SBruce Richardson 146*99a2dd95SBruce Richardson dev = &rte_eventdevs[dev_id]; 147*99a2dd95SBruce Richardson 148*99a2dd95SBruce Richardson if (caps == NULL) 149*99a2dd95SBruce Richardson return -EINVAL; 150*99a2dd95SBruce Richardson *caps = 0; 151*99a2dd95SBruce Richardson 152*99a2dd95SBruce Richardson return dev->dev_ops->timer_adapter_caps_get ? 153*99a2dd95SBruce Richardson (*dev->dev_ops->timer_adapter_caps_get)(dev, 154*99a2dd95SBruce Richardson 0, 155*99a2dd95SBruce Richardson caps, 156*99a2dd95SBruce Richardson &ops) 157*99a2dd95SBruce Richardson : 0; 158*99a2dd95SBruce Richardson } 159*99a2dd95SBruce Richardson 160*99a2dd95SBruce Richardson int 161*99a2dd95SBruce Richardson rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id, 162*99a2dd95SBruce Richardson uint32_t *caps) 163*99a2dd95SBruce Richardson { 164*99a2dd95SBruce Richardson struct rte_eventdev *dev; 165*99a2dd95SBruce Richardson struct rte_cryptodev *cdev; 166*99a2dd95SBruce Richardson 167*99a2dd95SBruce Richardson RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 168*99a2dd95SBruce Richardson if (!rte_cryptodev_pmd_is_valid_dev(cdev_id)) 169*99a2dd95SBruce Richardson return -EINVAL; 170*99a2dd95SBruce Richardson 171*99a2dd95SBruce Richardson dev = &rte_eventdevs[dev_id]; 172*99a2dd95SBruce Richardson cdev = rte_cryptodev_pmd_get_dev(cdev_id); 173*99a2dd95SBruce Richardson 174*99a2dd95SBruce Richardson if (caps == NULL) 175*99a2dd95SBruce Richardson return -EINVAL; 176*99a2dd95SBruce Richardson *caps = 0; 177*99a2dd95SBruce Richardson 178*99a2dd95SBruce Richardson return dev->dev_ops->crypto_adapter_caps_get ? 179*99a2dd95SBruce Richardson (*dev->dev_ops->crypto_adapter_caps_get) 180*99a2dd95SBruce Richardson (dev, cdev, caps) : -ENOTSUP; 181*99a2dd95SBruce Richardson } 182*99a2dd95SBruce Richardson 183*99a2dd95SBruce Richardson int 184*99a2dd95SBruce Richardson rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id, 185*99a2dd95SBruce Richardson uint32_t *caps) 186*99a2dd95SBruce Richardson { 187*99a2dd95SBruce Richardson struct rte_eventdev *dev; 188*99a2dd95SBruce Richardson struct rte_eth_dev *eth_dev; 189*99a2dd95SBruce Richardson 190*99a2dd95SBruce Richardson RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 191*99a2dd95SBruce Richardson RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL); 192*99a2dd95SBruce Richardson 193*99a2dd95SBruce Richardson dev = &rte_eventdevs[dev_id]; 194*99a2dd95SBruce Richardson eth_dev = &rte_eth_devices[eth_port_id]; 195*99a2dd95SBruce Richardson 196*99a2dd95SBruce Richardson if (caps == NULL) 197*99a2dd95SBruce Richardson return -EINVAL; 198*99a2dd95SBruce Richardson 199*99a2dd95SBruce Richardson if (dev->dev_ops->eth_tx_adapter_caps_get == NULL) 200*99a2dd95SBruce Richardson *caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_EVENT_VECTOR; 201*99a2dd95SBruce Richardson else 202*99a2dd95SBruce Richardson *caps = 0; 203*99a2dd95SBruce Richardson 204*99a2dd95SBruce Richardson return dev->dev_ops->eth_tx_adapter_caps_get ? 205*99a2dd95SBruce Richardson (*dev->dev_ops->eth_tx_adapter_caps_get)(dev, 206*99a2dd95SBruce Richardson eth_dev, 207*99a2dd95SBruce Richardson caps) 208*99a2dd95SBruce Richardson : 0; 209*99a2dd95SBruce Richardson } 210*99a2dd95SBruce Richardson 211*99a2dd95SBruce Richardson static inline int 212*99a2dd95SBruce Richardson rte_event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues) 213*99a2dd95SBruce Richardson { 214*99a2dd95SBruce Richardson uint8_t old_nb_queues = dev->data->nb_queues; 215*99a2dd95SBruce Richardson struct rte_event_queue_conf *queues_cfg; 216*99a2dd95SBruce Richardson unsigned int i; 217*99a2dd95SBruce Richardson 218*99a2dd95SBruce Richardson RTE_EDEV_LOG_DEBUG("Setup %d queues on device %u", nb_queues, 219*99a2dd95SBruce Richardson dev->data->dev_id); 220*99a2dd95SBruce Richardson 221*99a2dd95SBruce Richardson /* First time configuration */ 222*99a2dd95SBruce Richardson if (dev->data->queues_cfg == NULL && nb_queues != 0) { 223*99a2dd95SBruce Richardson /* Allocate memory to store queue configuration */ 224*99a2dd95SBruce Richardson dev->data->queues_cfg = rte_zmalloc_socket( 225*99a2dd95SBruce Richardson "eventdev->data->queues_cfg", 226*99a2dd95SBruce Richardson sizeof(dev->data->queues_cfg[0]) * nb_queues, 227*99a2dd95SBruce Richardson RTE_CACHE_LINE_SIZE, dev->data->socket_id); 228*99a2dd95SBruce Richardson if (dev->data->queues_cfg == NULL) { 229*99a2dd95SBruce Richardson dev->data->nb_queues = 0; 230*99a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("failed to get mem for queue cfg," 231*99a2dd95SBruce Richardson "nb_queues %u", nb_queues); 232*99a2dd95SBruce Richardson return -(ENOMEM); 233*99a2dd95SBruce Richardson } 234*99a2dd95SBruce Richardson /* Re-configure */ 235*99a2dd95SBruce Richardson } else if (dev->data->queues_cfg != NULL && nb_queues != 0) { 236*99a2dd95SBruce Richardson RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP); 237*99a2dd95SBruce Richardson 238*99a2dd95SBruce Richardson for (i = nb_queues; i < old_nb_queues; i++) 239*99a2dd95SBruce Richardson (*dev->dev_ops->queue_release)(dev, i); 240*99a2dd95SBruce Richardson 241*99a2dd95SBruce Richardson /* Re allocate memory to store queue configuration */ 242*99a2dd95SBruce Richardson queues_cfg = dev->data->queues_cfg; 243*99a2dd95SBruce Richardson queues_cfg = rte_realloc(queues_cfg, 244*99a2dd95SBruce Richardson sizeof(queues_cfg[0]) * nb_queues, 245*99a2dd95SBruce Richardson RTE_CACHE_LINE_SIZE); 246*99a2dd95SBruce Richardson if (queues_cfg == NULL) { 247*99a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("failed to realloc queue cfg memory," 248*99a2dd95SBruce Richardson " nb_queues %u", nb_queues); 249*99a2dd95SBruce Richardson return -(ENOMEM); 250*99a2dd95SBruce Richardson } 251*99a2dd95SBruce Richardson dev->data->queues_cfg = queues_cfg; 252*99a2dd95SBruce Richardson 253*99a2dd95SBruce Richardson if (nb_queues > old_nb_queues) { 254*99a2dd95SBruce Richardson uint8_t new_qs = nb_queues - old_nb_queues; 255*99a2dd95SBruce Richardson 256*99a2dd95SBruce Richardson memset(queues_cfg + old_nb_queues, 0, 257*99a2dd95SBruce Richardson sizeof(queues_cfg[0]) * new_qs); 258*99a2dd95SBruce Richardson } 259*99a2dd95SBruce Richardson } else if (dev->data->queues_cfg != NULL && nb_queues == 0) { 260*99a2dd95SBruce Richardson RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP); 261*99a2dd95SBruce Richardson 262*99a2dd95SBruce Richardson for (i = nb_queues; i < old_nb_queues; i++) 263*99a2dd95SBruce Richardson (*dev->dev_ops->queue_release)(dev, i); 264*99a2dd95SBruce Richardson } 265*99a2dd95SBruce Richardson 266*99a2dd95SBruce Richardson dev->data->nb_queues = nb_queues; 267*99a2dd95SBruce Richardson return 0; 268*99a2dd95SBruce Richardson } 269*99a2dd95SBruce Richardson 270*99a2dd95SBruce Richardson #define EVENT_QUEUE_SERVICE_PRIORITY_INVALID (0xdead) 271*99a2dd95SBruce Richardson 272*99a2dd95SBruce Richardson static inline int 273*99a2dd95SBruce Richardson rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports) 274*99a2dd95SBruce Richardson { 275*99a2dd95SBruce Richardson uint8_t old_nb_ports = dev->data->nb_ports; 276*99a2dd95SBruce Richardson void **ports; 277*99a2dd95SBruce Richardson uint16_t *links_map; 278*99a2dd95SBruce Richardson struct rte_event_port_conf *ports_cfg; 279*99a2dd95SBruce Richardson unsigned int i; 280*99a2dd95SBruce Richardson 281*99a2dd95SBruce Richardson RTE_EDEV_LOG_DEBUG("Setup %d ports on device %u", nb_ports, 282*99a2dd95SBruce Richardson dev->data->dev_id); 283*99a2dd95SBruce Richardson 284*99a2dd95SBruce Richardson /* First time configuration */ 285*99a2dd95SBruce Richardson if (dev->data->ports == NULL && nb_ports != 0) { 286*99a2dd95SBruce Richardson dev->data->ports = rte_zmalloc_socket("eventdev->data->ports", 287*99a2dd95SBruce Richardson sizeof(dev->data->ports[0]) * nb_ports, 288*99a2dd95SBruce Richardson RTE_CACHE_LINE_SIZE, dev->data->socket_id); 289*99a2dd95SBruce Richardson if (dev->data->ports == NULL) { 290*99a2dd95SBruce Richardson dev->data->nb_ports = 0; 291*99a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("failed to get mem for port meta data," 292*99a2dd95SBruce Richardson "nb_ports %u", nb_ports); 293*99a2dd95SBruce Richardson return -(ENOMEM); 294*99a2dd95SBruce Richardson } 295*99a2dd95SBruce Richardson 296*99a2dd95SBruce Richardson /* Allocate memory to store port configurations */ 297*99a2dd95SBruce Richardson dev->data->ports_cfg = 298*99a2dd95SBruce Richardson rte_zmalloc_socket("eventdev->ports_cfg", 299*99a2dd95SBruce Richardson sizeof(dev->data->ports_cfg[0]) * nb_ports, 300*99a2dd95SBruce Richardson RTE_CACHE_LINE_SIZE, dev->data->socket_id); 301*99a2dd95SBruce Richardson if (dev->data->ports_cfg == NULL) { 302*99a2dd95SBruce Richardson dev->data->nb_ports = 0; 303*99a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("failed to get mem for port cfg," 304*99a2dd95SBruce Richardson "nb_ports %u", nb_ports); 305*99a2dd95SBruce Richardson return -(ENOMEM); 306*99a2dd95SBruce Richardson } 307*99a2dd95SBruce Richardson 308*99a2dd95SBruce Richardson /* Allocate memory to store queue to port link connection */ 309*99a2dd95SBruce Richardson dev->data->links_map = 310*99a2dd95SBruce Richardson rte_zmalloc_socket("eventdev->links_map", 311*99a2dd95SBruce Richardson sizeof(dev->data->links_map[0]) * nb_ports * 312*99a2dd95SBruce Richardson RTE_EVENT_MAX_QUEUES_PER_DEV, 313*99a2dd95SBruce Richardson RTE_CACHE_LINE_SIZE, dev->data->socket_id); 314*99a2dd95SBruce Richardson if (dev->data->links_map == NULL) { 315*99a2dd95SBruce Richardson dev->data->nb_ports = 0; 316*99a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("failed to get mem for port_map area," 317*99a2dd95SBruce Richardson "nb_ports %u", nb_ports); 318*99a2dd95SBruce Richardson return -(ENOMEM); 319*99a2dd95SBruce Richardson } 320*99a2dd95SBruce Richardson for (i = 0; i < nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV; i++) 321*99a2dd95SBruce Richardson dev->data->links_map[i] = 322*99a2dd95SBruce Richardson EVENT_QUEUE_SERVICE_PRIORITY_INVALID; 323*99a2dd95SBruce Richardson } else if (dev->data->ports != NULL && nb_ports != 0) {/* re-config */ 324*99a2dd95SBruce Richardson RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP); 325*99a2dd95SBruce Richardson 326*99a2dd95SBruce Richardson ports = dev->data->ports; 327*99a2dd95SBruce Richardson ports_cfg = dev->data->ports_cfg; 328*99a2dd95SBruce Richardson links_map = dev->data->links_map; 329*99a2dd95SBruce Richardson 330*99a2dd95SBruce Richardson for (i = nb_ports; i < old_nb_ports; i++) 331*99a2dd95SBruce Richardson (*dev->dev_ops->port_release)(ports[i]); 332*99a2dd95SBruce Richardson 333*99a2dd95SBruce Richardson /* Realloc memory for ports */ 334*99a2dd95SBruce Richardson ports = rte_realloc(ports, sizeof(ports[0]) * nb_ports, 335*99a2dd95SBruce Richardson RTE_CACHE_LINE_SIZE); 336*99a2dd95SBruce Richardson if (ports == NULL) { 337*99a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("failed to realloc port meta data," 338*99a2dd95SBruce Richardson " nb_ports %u", nb_ports); 339*99a2dd95SBruce Richardson return -(ENOMEM); 340*99a2dd95SBruce Richardson } 341*99a2dd95SBruce Richardson 342*99a2dd95SBruce Richardson /* Realloc memory for ports_cfg */ 343*99a2dd95SBruce Richardson ports_cfg = rte_realloc(ports_cfg, 344*99a2dd95SBruce Richardson sizeof(ports_cfg[0]) * nb_ports, 345*99a2dd95SBruce Richardson RTE_CACHE_LINE_SIZE); 346*99a2dd95SBruce Richardson if (ports_cfg == NULL) { 347*99a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("failed to realloc port cfg mem," 348*99a2dd95SBruce Richardson " nb_ports %u", nb_ports); 349*99a2dd95SBruce Richardson return -(ENOMEM); 350*99a2dd95SBruce Richardson } 351*99a2dd95SBruce Richardson 352*99a2dd95SBruce Richardson /* Realloc memory to store queue to port link connection */ 353*99a2dd95SBruce Richardson links_map = rte_realloc(links_map, 354*99a2dd95SBruce Richardson sizeof(dev->data->links_map[0]) * nb_ports * 355*99a2dd95SBruce Richardson RTE_EVENT_MAX_QUEUES_PER_DEV, 356*99a2dd95SBruce Richardson RTE_CACHE_LINE_SIZE); 357*99a2dd95SBruce Richardson if (links_map == NULL) { 358*99a2dd95SBruce Richardson dev->data->nb_ports = 0; 359*99a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("failed to realloc mem for port_map," 360*99a2dd95SBruce Richardson "nb_ports %u", nb_ports); 361*99a2dd95SBruce Richardson return -(ENOMEM); 362*99a2dd95SBruce Richardson } 363*99a2dd95SBruce Richardson 364*99a2dd95SBruce Richardson if (nb_ports > old_nb_ports) { 365*99a2dd95SBruce Richardson uint8_t new_ps = nb_ports - old_nb_ports; 366*99a2dd95SBruce Richardson unsigned int old_links_map_end = 367*99a2dd95SBruce Richardson old_nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV; 368*99a2dd95SBruce Richardson unsigned int links_map_end = 369*99a2dd95SBruce Richardson nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV; 370*99a2dd95SBruce Richardson 371*99a2dd95SBruce Richardson memset(ports + old_nb_ports, 0, 372*99a2dd95SBruce Richardson sizeof(ports[0]) * new_ps); 373*99a2dd95SBruce Richardson memset(ports_cfg + old_nb_ports, 0, 374*99a2dd95SBruce Richardson sizeof(ports_cfg[0]) * new_ps); 375*99a2dd95SBruce Richardson for (i = old_links_map_end; i < links_map_end; i++) 376*99a2dd95SBruce Richardson links_map[i] = 377*99a2dd95SBruce Richardson EVENT_QUEUE_SERVICE_PRIORITY_INVALID; 378*99a2dd95SBruce Richardson } 379*99a2dd95SBruce Richardson 380*99a2dd95SBruce Richardson dev->data->ports = ports; 381*99a2dd95SBruce Richardson dev->data->ports_cfg = ports_cfg; 382*99a2dd95SBruce Richardson dev->data->links_map = links_map; 383*99a2dd95SBruce Richardson } else if (dev->data->ports != NULL && nb_ports == 0) { 384*99a2dd95SBruce Richardson RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP); 385*99a2dd95SBruce Richardson 386*99a2dd95SBruce Richardson ports = dev->data->ports; 387*99a2dd95SBruce Richardson for (i = nb_ports; i < old_nb_ports; i++) 388*99a2dd95SBruce Richardson (*dev->dev_ops->port_release)(ports[i]); 389*99a2dd95SBruce Richardson } 390*99a2dd95SBruce Richardson 391*99a2dd95SBruce Richardson dev->data->nb_ports = nb_ports; 392*99a2dd95SBruce Richardson return 0; 393*99a2dd95SBruce Richardson } 394*99a2dd95SBruce Richardson 395*99a2dd95SBruce Richardson int 396*99a2dd95SBruce Richardson rte_event_dev_configure(uint8_t dev_id, 397*99a2dd95SBruce Richardson const struct rte_event_dev_config *dev_conf) 398*99a2dd95SBruce Richardson { 399*99a2dd95SBruce Richardson struct rte_eventdev *dev; 400*99a2dd95SBruce Richardson struct rte_event_dev_info info; 401*99a2dd95SBruce Richardson int diag; 402*99a2dd95SBruce Richardson 403*99a2dd95SBruce Richardson RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 404*99a2dd95SBruce Richardson dev = &rte_eventdevs[dev_id]; 405*99a2dd95SBruce Richardson 406*99a2dd95SBruce Richardson RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP); 407*99a2dd95SBruce Richardson RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP); 408*99a2dd95SBruce Richardson 409*99a2dd95SBruce Richardson if (dev->data->dev_started) { 410*99a2dd95SBruce Richardson RTE_EDEV_LOG_ERR( 411*99a2dd95SBruce Richardson "device %d must be stopped to allow configuration", dev_id); 412*99a2dd95SBruce Richardson return -EBUSY; 413*99a2dd95SBruce Richardson } 414*99a2dd95SBruce Richardson 415*99a2dd95SBruce Richardson if (dev_conf == NULL) 416*99a2dd95SBruce Richardson return -EINVAL; 417*99a2dd95SBruce Richardson 418*99a2dd95SBruce Richardson (*dev->dev_ops->dev_infos_get)(dev, &info); 419*99a2dd95SBruce Richardson 420*99a2dd95SBruce Richardson /* Check dequeue_timeout_ns value is in limit */ 421*99a2dd95SBruce Richardson if (!(dev_conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)) { 422*99a2dd95SBruce Richardson if (dev_conf->dequeue_timeout_ns && 423*99a2dd95SBruce Richardson (dev_conf->dequeue_timeout_ns < info.min_dequeue_timeout_ns 424*99a2dd95SBruce Richardson || dev_conf->dequeue_timeout_ns > 425*99a2dd95SBruce Richardson info.max_dequeue_timeout_ns)) { 426*99a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("dev%d invalid dequeue_timeout_ns=%d" 427*99a2dd95SBruce Richardson " min_dequeue_timeout_ns=%d max_dequeue_timeout_ns=%d", 428*99a2dd95SBruce Richardson dev_id, dev_conf->dequeue_timeout_ns, 429*99a2dd95SBruce Richardson info.min_dequeue_timeout_ns, 430*99a2dd95SBruce Richardson info.max_dequeue_timeout_ns); 431*99a2dd95SBruce Richardson return -EINVAL; 432*99a2dd95SBruce Richardson } 433*99a2dd95SBruce Richardson } 434*99a2dd95SBruce Richardson 435*99a2dd95SBruce Richardson /* Check nb_events_limit is in limit */ 436*99a2dd95SBruce Richardson if (dev_conf->nb_events_limit > info.max_num_events) { 437*99a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("dev%d nb_events_limit=%d > max_num_events=%d", 438*99a2dd95SBruce Richardson dev_id, dev_conf->nb_events_limit, info.max_num_events); 439*99a2dd95SBruce Richardson return -EINVAL; 440*99a2dd95SBruce Richardson } 441*99a2dd95SBruce Richardson 442*99a2dd95SBruce Richardson /* Check nb_event_queues is in limit */ 443*99a2dd95SBruce Richardson if (!dev_conf->nb_event_queues) { 444*99a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("dev%d nb_event_queues cannot be zero", 445*99a2dd95SBruce Richardson dev_id); 446*99a2dd95SBruce Richardson return -EINVAL; 447*99a2dd95SBruce Richardson } 448*99a2dd95SBruce Richardson if (dev_conf->nb_event_queues > info.max_event_queues + 449*99a2dd95SBruce Richardson info.max_single_link_event_port_queue_pairs) { 450*99a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("%d nb_event_queues=%d > max_event_queues=%d + max_single_link_event_port_queue_pairs=%d", 451*99a2dd95SBruce Richardson dev_id, dev_conf->nb_event_queues, 452*99a2dd95SBruce Richardson info.max_event_queues, 453*99a2dd95SBruce Richardson info.max_single_link_event_port_queue_pairs); 454*99a2dd95SBruce Richardson return -EINVAL; 455*99a2dd95SBruce Richardson } 456*99a2dd95SBruce Richardson if (dev_conf->nb_event_queues - 457*99a2dd95SBruce Richardson dev_conf->nb_single_link_event_port_queues > 458*99a2dd95SBruce Richardson info.max_event_queues) { 459*99a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("id%d nb_event_queues=%d - nb_single_link_event_port_queues=%d > max_event_queues=%d", 460*99a2dd95SBruce Richardson dev_id, dev_conf->nb_event_queues, 461*99a2dd95SBruce Richardson dev_conf->nb_single_link_event_port_queues, 462*99a2dd95SBruce Richardson info.max_event_queues); 463*99a2dd95SBruce Richardson return -EINVAL; 464*99a2dd95SBruce Richardson } 465*99a2dd95SBruce Richardson if (dev_conf->nb_single_link_event_port_queues > 466*99a2dd95SBruce Richardson dev_conf->nb_event_queues) { 467*99a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("dev%d nb_single_link_event_port_queues=%d > nb_event_queues=%d", 468*99a2dd95SBruce Richardson dev_id, 469*99a2dd95SBruce Richardson dev_conf->nb_single_link_event_port_queues, 470*99a2dd95SBruce Richardson dev_conf->nb_event_queues); 471*99a2dd95SBruce Richardson return -EINVAL; 472*99a2dd95SBruce Richardson } 473*99a2dd95SBruce Richardson 474*99a2dd95SBruce Richardson /* Check nb_event_ports is in limit */ 475*99a2dd95SBruce Richardson if (!dev_conf->nb_event_ports) { 476*99a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("dev%d nb_event_ports cannot be zero", dev_id); 477*99a2dd95SBruce Richardson return -EINVAL; 478*99a2dd95SBruce Richardson } 479*99a2dd95SBruce Richardson if (dev_conf->nb_event_ports > info.max_event_ports + 480*99a2dd95SBruce Richardson info.max_single_link_event_port_queue_pairs) { 481*99a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d > max_event_ports=%d + max_single_link_event_port_queue_pairs=%d", 482*99a2dd95SBruce Richardson dev_id, dev_conf->nb_event_ports, 483*99a2dd95SBruce Richardson info.max_event_ports, 484*99a2dd95SBruce Richardson info.max_single_link_event_port_queue_pairs); 485*99a2dd95SBruce Richardson return -EINVAL; 486*99a2dd95SBruce Richardson } 487*99a2dd95SBruce Richardson if (dev_conf->nb_event_ports - 488*99a2dd95SBruce Richardson dev_conf->nb_single_link_event_port_queues 489*99a2dd95SBruce Richardson > info.max_event_ports) { 490*99a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d - nb_single_link_event_port_queues=%d > max_event_ports=%d", 491*99a2dd95SBruce Richardson dev_id, dev_conf->nb_event_ports, 492*99a2dd95SBruce Richardson dev_conf->nb_single_link_event_port_queues, 493*99a2dd95SBruce Richardson info.max_event_ports); 494*99a2dd95SBruce Richardson return -EINVAL; 495*99a2dd95SBruce Richardson } 496*99a2dd95SBruce Richardson 497*99a2dd95SBruce Richardson if (dev_conf->nb_single_link_event_port_queues > 498*99a2dd95SBruce Richardson dev_conf->nb_event_ports) { 499*99a2dd95SBruce Richardson RTE_EDEV_LOG_ERR( 500*99a2dd95SBruce Richardson "dev%d nb_single_link_event_port_queues=%d > nb_event_ports=%d", 501*99a2dd95SBruce Richardson dev_id, 502*99a2dd95SBruce Richardson dev_conf->nb_single_link_event_port_queues, 503*99a2dd95SBruce Richardson dev_conf->nb_event_ports); 504*99a2dd95SBruce Richardson return -EINVAL; 505*99a2dd95SBruce Richardson } 506*99a2dd95SBruce Richardson 507*99a2dd95SBruce Richardson /* Check nb_event_queue_flows is in limit */ 508*99a2dd95SBruce Richardson if (!dev_conf->nb_event_queue_flows) { 509*99a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("dev%d nb_flows cannot be zero", dev_id); 510*99a2dd95SBruce Richardson return -EINVAL; 511*99a2dd95SBruce Richardson } 512*99a2dd95SBruce Richardson if (dev_conf->nb_event_queue_flows > info.max_event_queue_flows) { 513*99a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("dev%d nb_flows=%x > max_flows=%x", 514*99a2dd95SBruce Richardson dev_id, dev_conf->nb_event_queue_flows, 515*99a2dd95SBruce Richardson info.max_event_queue_flows); 516*99a2dd95SBruce Richardson return -EINVAL; 517*99a2dd95SBruce Richardson } 518*99a2dd95SBruce Richardson 519*99a2dd95SBruce Richardson /* Check nb_event_port_dequeue_depth is in limit */ 520*99a2dd95SBruce Richardson if (!dev_conf->nb_event_port_dequeue_depth) { 521*99a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("dev%d nb_dequeue_depth cannot be zero", 522*99a2dd95SBruce Richardson dev_id); 523*99a2dd95SBruce Richardson return -EINVAL; 524*99a2dd95SBruce Richardson } 525*99a2dd95SBruce Richardson if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) && 526*99a2dd95SBruce Richardson (dev_conf->nb_event_port_dequeue_depth > 527*99a2dd95SBruce Richardson info.max_event_port_dequeue_depth)) { 528*99a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("dev%d nb_dq_depth=%d > max_dq_depth=%d", 529*99a2dd95SBruce Richardson dev_id, dev_conf->nb_event_port_dequeue_depth, 530*99a2dd95SBruce Richardson info.max_event_port_dequeue_depth); 531*99a2dd95SBruce Richardson return -EINVAL; 532*99a2dd95SBruce Richardson } 533*99a2dd95SBruce Richardson 534*99a2dd95SBruce Richardson /* Check nb_event_port_enqueue_depth is in limit */ 535*99a2dd95SBruce Richardson if (!dev_conf->nb_event_port_enqueue_depth) { 536*99a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("dev%d nb_enqueue_depth cannot be zero", 537*99a2dd95SBruce Richardson dev_id); 538*99a2dd95SBruce Richardson return -EINVAL; 539*99a2dd95SBruce Richardson } 540*99a2dd95SBruce Richardson if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) && 541*99a2dd95SBruce Richardson (dev_conf->nb_event_port_enqueue_depth > 542*99a2dd95SBruce Richardson info.max_event_port_enqueue_depth)) { 543*99a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("dev%d nb_enq_depth=%d > max_enq_depth=%d", 544*99a2dd95SBruce Richardson dev_id, dev_conf->nb_event_port_enqueue_depth, 545*99a2dd95SBruce Richardson info.max_event_port_enqueue_depth); 546*99a2dd95SBruce Richardson return -EINVAL; 547*99a2dd95SBruce Richardson } 548*99a2dd95SBruce Richardson 549*99a2dd95SBruce Richardson /* Copy the dev_conf parameter into the dev structure */ 550*99a2dd95SBruce Richardson memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf)); 551*99a2dd95SBruce Richardson 552*99a2dd95SBruce Richardson /* Setup new number of queues and reconfigure device. */ 553*99a2dd95SBruce Richardson diag = rte_event_dev_queue_config(dev, dev_conf->nb_event_queues); 554*99a2dd95SBruce Richardson if (diag != 0) { 555*99a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("dev%d rte_event_dev_queue_config = %d", 556*99a2dd95SBruce Richardson dev_id, diag); 557*99a2dd95SBruce Richardson return diag; 558*99a2dd95SBruce Richardson } 559*99a2dd95SBruce Richardson 560*99a2dd95SBruce Richardson /* Setup new number of ports and reconfigure device. */ 561*99a2dd95SBruce Richardson diag = rte_event_dev_port_config(dev, dev_conf->nb_event_ports); 562*99a2dd95SBruce Richardson if (diag != 0) { 563*99a2dd95SBruce Richardson rte_event_dev_queue_config(dev, 0); 564*99a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("dev%d rte_event_dev_port_config = %d", 565*99a2dd95SBruce Richardson dev_id, diag); 566*99a2dd95SBruce Richardson return diag; 567*99a2dd95SBruce Richardson } 568*99a2dd95SBruce Richardson 569*99a2dd95SBruce Richardson /* Configure the device */ 570*99a2dd95SBruce Richardson diag = (*dev->dev_ops->dev_configure)(dev); 571*99a2dd95SBruce Richardson if (diag != 0) { 572*99a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("dev%d dev_configure = %d", dev_id, diag); 573*99a2dd95SBruce Richardson rte_event_dev_queue_config(dev, 0); 574*99a2dd95SBruce Richardson rte_event_dev_port_config(dev, 0); 575*99a2dd95SBruce Richardson } 576*99a2dd95SBruce Richardson 577*99a2dd95SBruce Richardson dev->data->event_dev_cap = info.event_dev_cap; 578*99a2dd95SBruce Richardson rte_eventdev_trace_configure(dev_id, dev_conf, diag); 579*99a2dd95SBruce Richardson return diag; 580*99a2dd95SBruce Richardson } 581*99a2dd95SBruce Richardson 582*99a2dd95SBruce Richardson static inline int 583*99a2dd95SBruce Richardson is_valid_queue(struct rte_eventdev *dev, uint8_t queue_id) 584*99a2dd95SBruce Richardson { 585*99a2dd95SBruce Richardson if (queue_id < dev->data->nb_queues && queue_id < 586*99a2dd95SBruce Richardson RTE_EVENT_MAX_QUEUES_PER_DEV) 587*99a2dd95SBruce Richardson return 1; 588*99a2dd95SBruce Richardson else 589*99a2dd95SBruce Richardson return 0; 590*99a2dd95SBruce Richardson } 591*99a2dd95SBruce Richardson 592*99a2dd95SBruce Richardson int 593*99a2dd95SBruce Richardson rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id, 594*99a2dd95SBruce Richardson struct rte_event_queue_conf *queue_conf) 595*99a2dd95SBruce Richardson { 596*99a2dd95SBruce Richardson struct rte_eventdev *dev; 597*99a2dd95SBruce Richardson 598*99a2dd95SBruce Richardson RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 599*99a2dd95SBruce Richardson dev = &rte_eventdevs[dev_id]; 600*99a2dd95SBruce Richardson 601*99a2dd95SBruce Richardson if (queue_conf == NULL) 602*99a2dd95SBruce Richardson return -EINVAL; 603*99a2dd95SBruce Richardson 604*99a2dd95SBruce Richardson if (!is_valid_queue(dev, queue_id)) { 605*99a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id); 606*99a2dd95SBruce Richardson return -EINVAL; 607*99a2dd95SBruce Richardson } 608*99a2dd95SBruce Richardson 609*99a2dd95SBruce Richardson RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf, -ENOTSUP); 610*99a2dd95SBruce Richardson memset(queue_conf, 0, sizeof(struct rte_event_queue_conf)); 611*99a2dd95SBruce Richardson (*dev->dev_ops->queue_def_conf)(dev, queue_id, queue_conf); 612*99a2dd95SBruce Richardson return 0; 613*99a2dd95SBruce Richardson } 614*99a2dd95SBruce Richardson 615*99a2dd95SBruce Richardson static inline int 616*99a2dd95SBruce Richardson is_valid_atomic_queue_conf(const struct rte_event_queue_conf *queue_conf) 617*99a2dd95SBruce Richardson { 618*99a2dd95SBruce Richardson if (queue_conf && 619*99a2dd95SBruce Richardson !(queue_conf->event_queue_cfg & 620*99a2dd95SBruce Richardson RTE_EVENT_QUEUE_CFG_SINGLE_LINK) && 621*99a2dd95SBruce Richardson ((queue_conf->event_queue_cfg & 622*99a2dd95SBruce Richardson RTE_EVENT_QUEUE_CFG_ALL_TYPES) || 623*99a2dd95SBruce Richardson (queue_conf->schedule_type 624*99a2dd95SBruce Richardson == RTE_SCHED_TYPE_ATOMIC) 625*99a2dd95SBruce Richardson )) 626*99a2dd95SBruce Richardson return 1; 627*99a2dd95SBruce Richardson else 628*99a2dd95SBruce Richardson return 0; 629*99a2dd95SBruce Richardson } 630*99a2dd95SBruce Richardson 631*99a2dd95SBruce Richardson static inline int 632*99a2dd95SBruce Richardson is_valid_ordered_queue_conf(const struct rte_event_queue_conf *queue_conf) 633*99a2dd95SBruce Richardson { 634*99a2dd95SBruce Richardson if (queue_conf && 635*99a2dd95SBruce Richardson !(queue_conf->event_queue_cfg & 636*99a2dd95SBruce Richardson RTE_EVENT_QUEUE_CFG_SINGLE_LINK) && 637*99a2dd95SBruce Richardson ((queue_conf->event_queue_cfg & 638*99a2dd95SBruce Richardson RTE_EVENT_QUEUE_CFG_ALL_TYPES) || 639*99a2dd95SBruce Richardson (queue_conf->schedule_type 640*99a2dd95SBruce Richardson == RTE_SCHED_TYPE_ORDERED) 641*99a2dd95SBruce Richardson )) 642*99a2dd95SBruce Richardson return 1; 643*99a2dd95SBruce Richardson else 644*99a2dd95SBruce Richardson return 0; 645*99a2dd95SBruce Richardson } 646*99a2dd95SBruce Richardson 647*99a2dd95SBruce Richardson 648*99a2dd95SBruce Richardson int 649*99a2dd95SBruce Richardson rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id, 650*99a2dd95SBruce Richardson const struct rte_event_queue_conf *queue_conf) 651*99a2dd95SBruce Richardson { 652*99a2dd95SBruce Richardson struct rte_eventdev *dev; 653*99a2dd95SBruce Richardson struct rte_event_queue_conf def_conf; 654*99a2dd95SBruce Richardson 655*99a2dd95SBruce Richardson RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 656*99a2dd95SBruce Richardson dev = &rte_eventdevs[dev_id]; 657*99a2dd95SBruce Richardson 658*99a2dd95SBruce Richardson if (!is_valid_queue(dev, queue_id)) { 659*99a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id); 660*99a2dd95SBruce Richardson return -EINVAL; 661*99a2dd95SBruce Richardson } 662*99a2dd95SBruce Richardson 663*99a2dd95SBruce Richardson /* Check nb_atomic_flows limit */ 664*99a2dd95SBruce Richardson if (is_valid_atomic_queue_conf(queue_conf)) { 665*99a2dd95SBruce Richardson if (queue_conf->nb_atomic_flows == 0 || 666*99a2dd95SBruce Richardson queue_conf->nb_atomic_flows > 667*99a2dd95SBruce Richardson dev->data->dev_conf.nb_event_queue_flows) { 668*99a2dd95SBruce Richardson RTE_EDEV_LOG_ERR( 669*99a2dd95SBruce Richardson "dev%d queue%d Invalid nb_atomic_flows=%d max_flows=%d", 670*99a2dd95SBruce Richardson dev_id, queue_id, queue_conf->nb_atomic_flows, 671*99a2dd95SBruce Richardson dev->data->dev_conf.nb_event_queue_flows); 672*99a2dd95SBruce Richardson return -EINVAL; 673*99a2dd95SBruce Richardson } 674*99a2dd95SBruce Richardson } 675*99a2dd95SBruce Richardson 676*99a2dd95SBruce Richardson /* Check nb_atomic_order_sequences limit */ 677*99a2dd95SBruce Richardson if (is_valid_ordered_queue_conf(queue_conf)) { 678*99a2dd95SBruce Richardson if (queue_conf->nb_atomic_order_sequences == 0 || 679*99a2dd95SBruce Richardson queue_conf->nb_atomic_order_sequences > 680*99a2dd95SBruce Richardson dev->data->dev_conf.nb_event_queue_flows) { 681*99a2dd95SBruce Richardson RTE_EDEV_LOG_ERR( 682*99a2dd95SBruce Richardson "dev%d queue%d Invalid nb_atomic_order_seq=%d max_flows=%d", 683*99a2dd95SBruce Richardson dev_id, queue_id, queue_conf->nb_atomic_order_sequences, 684*99a2dd95SBruce Richardson dev->data->dev_conf.nb_event_queue_flows); 685*99a2dd95SBruce Richardson return -EINVAL; 686*99a2dd95SBruce Richardson } 687*99a2dd95SBruce Richardson } 688*99a2dd95SBruce Richardson 689*99a2dd95SBruce Richardson if (dev->data->dev_started) { 690*99a2dd95SBruce Richardson RTE_EDEV_LOG_ERR( 691*99a2dd95SBruce Richardson "device %d must be stopped to allow queue setup", dev_id); 692*99a2dd95SBruce Richardson return -EBUSY; 693*99a2dd95SBruce Richardson } 694*99a2dd95SBruce Richardson 695*99a2dd95SBruce Richardson RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_setup, -ENOTSUP); 696*99a2dd95SBruce Richardson 697*99a2dd95SBruce Richardson if (queue_conf == NULL) { 698*99a2dd95SBruce Richardson RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf, 699*99a2dd95SBruce Richardson -ENOTSUP); 700*99a2dd95SBruce Richardson (*dev->dev_ops->queue_def_conf)(dev, queue_id, &def_conf); 701*99a2dd95SBruce Richardson queue_conf = &def_conf; 702*99a2dd95SBruce Richardson } 703*99a2dd95SBruce Richardson 704*99a2dd95SBruce Richardson dev->data->queues_cfg[queue_id] = *queue_conf; 705*99a2dd95SBruce Richardson rte_eventdev_trace_queue_setup(dev_id, queue_id, queue_conf); 706*99a2dd95SBruce Richardson return (*dev->dev_ops->queue_setup)(dev, queue_id, queue_conf); 707*99a2dd95SBruce Richardson } 708*99a2dd95SBruce Richardson 709*99a2dd95SBruce Richardson static inline int 710*99a2dd95SBruce Richardson is_valid_port(struct rte_eventdev *dev, uint8_t port_id) 711*99a2dd95SBruce Richardson { 712*99a2dd95SBruce Richardson if (port_id < dev->data->nb_ports) 713*99a2dd95SBruce Richardson return 1; 714*99a2dd95SBruce Richardson else 715*99a2dd95SBruce Richardson return 0; 716*99a2dd95SBruce Richardson } 717*99a2dd95SBruce Richardson 718*99a2dd95SBruce Richardson int 719*99a2dd95SBruce Richardson rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id, 720*99a2dd95SBruce Richardson struct rte_event_port_conf *port_conf) 721*99a2dd95SBruce Richardson { 722*99a2dd95SBruce Richardson struct rte_eventdev *dev; 723*99a2dd95SBruce Richardson 724*99a2dd95SBruce Richardson RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 725*99a2dd95SBruce Richardson dev = &rte_eventdevs[dev_id]; 726*99a2dd95SBruce Richardson 727*99a2dd95SBruce Richardson if (port_conf == NULL) 728*99a2dd95SBruce Richardson return -EINVAL; 729*99a2dd95SBruce Richardson 730*99a2dd95SBruce Richardson if (!is_valid_port(dev, port_id)) { 731*99a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id); 732*99a2dd95SBruce Richardson return -EINVAL; 733*99a2dd95SBruce Richardson } 734*99a2dd95SBruce Richardson 735*99a2dd95SBruce Richardson RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf, -ENOTSUP); 736*99a2dd95SBruce Richardson memset(port_conf, 0, sizeof(struct rte_event_port_conf)); 737*99a2dd95SBruce Richardson (*dev->dev_ops->port_def_conf)(dev, port_id, port_conf); 738*99a2dd95SBruce Richardson return 0; 739*99a2dd95SBruce Richardson } 740*99a2dd95SBruce Richardson 741*99a2dd95SBruce Richardson int 742*99a2dd95SBruce Richardson rte_event_port_setup(uint8_t dev_id, uint8_t port_id, 743*99a2dd95SBruce Richardson const struct rte_event_port_conf *port_conf) 744*99a2dd95SBruce Richardson { 745*99a2dd95SBruce Richardson struct rte_eventdev *dev; 746*99a2dd95SBruce Richardson struct rte_event_port_conf def_conf; 747*99a2dd95SBruce Richardson int diag; 748*99a2dd95SBruce Richardson 749*99a2dd95SBruce Richardson RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 750*99a2dd95SBruce Richardson dev = &rte_eventdevs[dev_id]; 751*99a2dd95SBruce Richardson 752*99a2dd95SBruce Richardson if (!is_valid_port(dev, port_id)) { 753*99a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id); 754*99a2dd95SBruce Richardson return -EINVAL; 755*99a2dd95SBruce Richardson } 756*99a2dd95SBruce Richardson 757*99a2dd95SBruce Richardson /* Check new_event_threshold limit */ 758*99a2dd95SBruce Richardson if ((port_conf && !port_conf->new_event_threshold) || 759*99a2dd95SBruce Richardson (port_conf && port_conf->new_event_threshold > 760*99a2dd95SBruce Richardson dev->data->dev_conf.nb_events_limit)) { 761*99a2dd95SBruce Richardson RTE_EDEV_LOG_ERR( 762*99a2dd95SBruce Richardson "dev%d port%d Invalid event_threshold=%d nb_events_limit=%d", 763*99a2dd95SBruce Richardson dev_id, port_id, port_conf->new_event_threshold, 764*99a2dd95SBruce Richardson dev->data->dev_conf.nb_events_limit); 765*99a2dd95SBruce Richardson return -EINVAL; 766*99a2dd95SBruce Richardson } 767*99a2dd95SBruce Richardson 768*99a2dd95SBruce Richardson /* Check dequeue_depth limit */ 769*99a2dd95SBruce Richardson if ((port_conf && !port_conf->dequeue_depth) || 770*99a2dd95SBruce Richardson (port_conf && port_conf->dequeue_depth > 771*99a2dd95SBruce Richardson dev->data->dev_conf.nb_event_port_dequeue_depth)) { 772*99a2dd95SBruce Richardson RTE_EDEV_LOG_ERR( 773*99a2dd95SBruce Richardson "dev%d port%d Invalid dequeue depth=%d max_dequeue_depth=%d", 774*99a2dd95SBruce Richardson dev_id, port_id, port_conf->dequeue_depth, 775*99a2dd95SBruce Richardson dev->data->dev_conf.nb_event_port_dequeue_depth); 776*99a2dd95SBruce Richardson return -EINVAL; 777*99a2dd95SBruce Richardson } 778*99a2dd95SBruce Richardson 779*99a2dd95SBruce Richardson /* Check enqueue_depth limit */ 780*99a2dd95SBruce Richardson if ((port_conf && !port_conf->enqueue_depth) || 781*99a2dd95SBruce Richardson (port_conf && port_conf->enqueue_depth > 782*99a2dd95SBruce Richardson dev->data->dev_conf.nb_event_port_enqueue_depth)) { 783*99a2dd95SBruce Richardson RTE_EDEV_LOG_ERR( 784*99a2dd95SBruce Richardson "dev%d port%d Invalid enqueue depth=%d max_enqueue_depth=%d", 785*99a2dd95SBruce Richardson dev_id, port_id, port_conf->enqueue_depth, 786*99a2dd95SBruce Richardson dev->data->dev_conf.nb_event_port_enqueue_depth); 787*99a2dd95SBruce Richardson return -EINVAL; 788*99a2dd95SBruce Richardson } 789*99a2dd95SBruce Richardson 790*99a2dd95SBruce Richardson if (port_conf && 791*99a2dd95SBruce Richardson (port_conf->event_port_cfg & RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL) && 792*99a2dd95SBruce Richardson !(dev->data->event_dev_cap & 793*99a2dd95SBruce Richardson RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE)) { 794*99a2dd95SBruce Richardson RTE_EDEV_LOG_ERR( 795*99a2dd95SBruce Richardson "dev%d port%d Implicit release disable not supported", 796*99a2dd95SBruce Richardson dev_id, port_id); 797*99a2dd95SBruce Richardson return -EINVAL; 798*99a2dd95SBruce Richardson } 799*99a2dd95SBruce Richardson 800*99a2dd95SBruce Richardson if (dev->data->dev_started) { 801*99a2dd95SBruce Richardson RTE_EDEV_LOG_ERR( 802*99a2dd95SBruce Richardson "device %d must be stopped to allow port setup", dev_id); 803*99a2dd95SBruce Richardson return -EBUSY; 804*99a2dd95SBruce Richardson } 805*99a2dd95SBruce Richardson 806*99a2dd95SBruce Richardson RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_setup, -ENOTSUP); 807*99a2dd95SBruce Richardson 808*99a2dd95SBruce Richardson if (port_conf == NULL) { 809*99a2dd95SBruce Richardson RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf, 810*99a2dd95SBruce Richardson -ENOTSUP); 811*99a2dd95SBruce Richardson (*dev->dev_ops->port_def_conf)(dev, port_id, &def_conf); 812*99a2dd95SBruce Richardson port_conf = &def_conf; 813*99a2dd95SBruce Richardson } 814*99a2dd95SBruce Richardson 815*99a2dd95SBruce Richardson dev->data->ports_cfg[port_id] = *port_conf; 816*99a2dd95SBruce Richardson 817*99a2dd95SBruce Richardson diag = (*dev->dev_ops->port_setup)(dev, port_id, port_conf); 818*99a2dd95SBruce Richardson 819*99a2dd95SBruce Richardson /* Unlink all the queues from this port(default state after setup) */ 820*99a2dd95SBruce Richardson if (!diag) 821*99a2dd95SBruce Richardson diag = rte_event_port_unlink(dev_id, port_id, NULL, 0); 822*99a2dd95SBruce Richardson 823*99a2dd95SBruce Richardson rte_eventdev_trace_port_setup(dev_id, port_id, port_conf, diag); 824*99a2dd95SBruce Richardson if (diag < 0) 825*99a2dd95SBruce Richardson return diag; 826*99a2dd95SBruce Richardson 827*99a2dd95SBruce Richardson return 0; 828*99a2dd95SBruce Richardson } 829*99a2dd95SBruce Richardson 830*99a2dd95SBruce Richardson int 831*99a2dd95SBruce Richardson rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id, 832*99a2dd95SBruce Richardson uint32_t *attr_value) 833*99a2dd95SBruce Richardson { 834*99a2dd95SBruce Richardson struct rte_eventdev *dev; 835*99a2dd95SBruce Richardson 836*99a2dd95SBruce Richardson if (!attr_value) 837*99a2dd95SBruce Richardson return -EINVAL; 838*99a2dd95SBruce Richardson RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 839*99a2dd95SBruce Richardson dev = &rte_eventdevs[dev_id]; 840*99a2dd95SBruce Richardson 841*99a2dd95SBruce Richardson switch (attr_id) { 842*99a2dd95SBruce Richardson case RTE_EVENT_DEV_ATTR_PORT_COUNT: 843*99a2dd95SBruce Richardson *attr_value = dev->data->nb_ports; 844*99a2dd95SBruce Richardson break; 845*99a2dd95SBruce Richardson case RTE_EVENT_DEV_ATTR_QUEUE_COUNT: 846*99a2dd95SBruce Richardson *attr_value = dev->data->nb_queues; 847*99a2dd95SBruce Richardson break; 848*99a2dd95SBruce Richardson case RTE_EVENT_DEV_ATTR_STARTED: 849*99a2dd95SBruce Richardson *attr_value = dev->data->dev_started; 850*99a2dd95SBruce Richardson break; 851*99a2dd95SBruce Richardson default: 852*99a2dd95SBruce Richardson return -EINVAL; 853*99a2dd95SBruce Richardson } 854*99a2dd95SBruce Richardson 855*99a2dd95SBruce Richardson return 0; 856*99a2dd95SBruce Richardson } 857*99a2dd95SBruce Richardson 858*99a2dd95SBruce Richardson int 859*99a2dd95SBruce Richardson rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id, 860*99a2dd95SBruce Richardson uint32_t *attr_value) 861*99a2dd95SBruce Richardson { 862*99a2dd95SBruce Richardson struct rte_eventdev *dev; 863*99a2dd95SBruce Richardson 864*99a2dd95SBruce Richardson if (!attr_value) 865*99a2dd95SBruce Richardson return -EINVAL; 866*99a2dd95SBruce Richardson 867*99a2dd95SBruce Richardson RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 868*99a2dd95SBruce Richardson dev = &rte_eventdevs[dev_id]; 869*99a2dd95SBruce Richardson if (!is_valid_port(dev, port_id)) { 870*99a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id); 871*99a2dd95SBruce Richardson return -EINVAL; 872*99a2dd95SBruce Richardson } 873*99a2dd95SBruce Richardson 874*99a2dd95SBruce Richardson switch (attr_id) { 875*99a2dd95SBruce Richardson case RTE_EVENT_PORT_ATTR_ENQ_DEPTH: 876*99a2dd95SBruce Richardson *attr_value = dev->data->ports_cfg[port_id].enqueue_depth; 877*99a2dd95SBruce Richardson break; 878*99a2dd95SBruce Richardson case RTE_EVENT_PORT_ATTR_DEQ_DEPTH: 879*99a2dd95SBruce Richardson *attr_value = dev->data->ports_cfg[port_id].dequeue_depth; 880*99a2dd95SBruce Richardson break; 881*99a2dd95SBruce Richardson case RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD: 882*99a2dd95SBruce Richardson *attr_value = dev->data->ports_cfg[port_id].new_event_threshold; 883*99a2dd95SBruce Richardson break; 884*99a2dd95SBruce Richardson case RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE: 885*99a2dd95SBruce Richardson { 886*99a2dd95SBruce Richardson uint32_t config; 887*99a2dd95SBruce Richardson 888*99a2dd95SBruce Richardson config = dev->data->ports_cfg[port_id].event_port_cfg; 889*99a2dd95SBruce Richardson *attr_value = !!(config & RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL); 890*99a2dd95SBruce Richardson break; 891*99a2dd95SBruce Richardson } 892*99a2dd95SBruce Richardson default: 893*99a2dd95SBruce Richardson return -EINVAL; 894*99a2dd95SBruce Richardson }; 895*99a2dd95SBruce Richardson return 0; 896*99a2dd95SBruce Richardson } 897*99a2dd95SBruce Richardson 898*99a2dd95SBruce Richardson int 899*99a2dd95SBruce Richardson rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id, 900*99a2dd95SBruce Richardson uint32_t *attr_value) 901*99a2dd95SBruce Richardson { 902*99a2dd95SBruce Richardson struct rte_event_queue_conf *conf; 903*99a2dd95SBruce Richardson struct rte_eventdev *dev; 904*99a2dd95SBruce Richardson 905*99a2dd95SBruce Richardson if (!attr_value) 906*99a2dd95SBruce Richardson return -EINVAL; 907*99a2dd95SBruce Richardson 908*99a2dd95SBruce Richardson RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 909*99a2dd95SBruce Richardson dev = &rte_eventdevs[dev_id]; 910*99a2dd95SBruce Richardson if (!is_valid_queue(dev, queue_id)) { 911*99a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id); 912*99a2dd95SBruce Richardson return -EINVAL; 913*99a2dd95SBruce Richardson } 914*99a2dd95SBruce Richardson 915*99a2dd95SBruce Richardson conf = &dev->data->queues_cfg[queue_id]; 916*99a2dd95SBruce Richardson 917*99a2dd95SBruce Richardson switch (attr_id) { 918*99a2dd95SBruce Richardson case RTE_EVENT_QUEUE_ATTR_PRIORITY: 919*99a2dd95SBruce Richardson *attr_value = RTE_EVENT_DEV_PRIORITY_NORMAL; 920*99a2dd95SBruce Richardson if (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS) 921*99a2dd95SBruce Richardson *attr_value = conf->priority; 922*99a2dd95SBruce Richardson break; 923*99a2dd95SBruce Richardson case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS: 924*99a2dd95SBruce Richardson *attr_value = conf->nb_atomic_flows; 925*99a2dd95SBruce Richardson break; 926*99a2dd95SBruce Richardson case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES: 927*99a2dd95SBruce Richardson *attr_value = conf->nb_atomic_order_sequences; 928*99a2dd95SBruce Richardson break; 929*99a2dd95SBruce Richardson case RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG: 930*99a2dd95SBruce Richardson *attr_value = conf->event_queue_cfg; 931*99a2dd95SBruce Richardson break; 932*99a2dd95SBruce Richardson case RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE: 933*99a2dd95SBruce Richardson if (conf->event_queue_cfg & RTE_EVENT_QUEUE_CFG_ALL_TYPES) 934*99a2dd95SBruce Richardson return -EOVERFLOW; 935*99a2dd95SBruce Richardson 936*99a2dd95SBruce Richardson *attr_value = conf->schedule_type; 937*99a2dd95SBruce Richardson break; 938*99a2dd95SBruce Richardson default: 939*99a2dd95SBruce Richardson return -EINVAL; 940*99a2dd95SBruce Richardson }; 941*99a2dd95SBruce Richardson return 0; 942*99a2dd95SBruce Richardson } 943*99a2dd95SBruce Richardson 944*99a2dd95SBruce Richardson int 945*99a2dd95SBruce Richardson rte_event_port_link(uint8_t dev_id, uint8_t port_id, 946*99a2dd95SBruce Richardson const uint8_t queues[], const uint8_t priorities[], 947*99a2dd95SBruce Richardson uint16_t nb_links) 948*99a2dd95SBruce Richardson { 949*99a2dd95SBruce Richardson struct rte_eventdev *dev; 950*99a2dd95SBruce Richardson uint8_t queues_list[RTE_EVENT_MAX_QUEUES_PER_DEV]; 951*99a2dd95SBruce Richardson uint8_t priorities_list[RTE_EVENT_MAX_QUEUES_PER_DEV]; 952*99a2dd95SBruce Richardson uint16_t *links_map; 953*99a2dd95SBruce Richardson int i, diag; 954*99a2dd95SBruce Richardson 955*99a2dd95SBruce Richardson RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, EINVAL, 0); 956*99a2dd95SBruce Richardson dev = &rte_eventdevs[dev_id]; 957*99a2dd95SBruce Richardson 958*99a2dd95SBruce Richardson if (*dev->dev_ops->port_link == NULL) { 959*99a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("Function not supported\n"); 960*99a2dd95SBruce Richardson rte_errno = ENOTSUP; 961*99a2dd95SBruce Richardson return 0; 962*99a2dd95SBruce Richardson } 963*99a2dd95SBruce Richardson 964*99a2dd95SBruce Richardson if (!is_valid_port(dev, port_id)) { 965*99a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id); 966*99a2dd95SBruce Richardson rte_errno = EINVAL; 967*99a2dd95SBruce Richardson return 0; 968*99a2dd95SBruce Richardson } 969*99a2dd95SBruce Richardson 970*99a2dd95SBruce Richardson if (queues == NULL) { 971*99a2dd95SBruce Richardson for (i = 0; i < dev->data->nb_queues; i++) 972*99a2dd95SBruce Richardson queues_list[i] = i; 973*99a2dd95SBruce Richardson 974*99a2dd95SBruce Richardson queues = queues_list; 975*99a2dd95SBruce Richardson nb_links = dev->data->nb_queues; 976*99a2dd95SBruce Richardson } 977*99a2dd95SBruce Richardson 978*99a2dd95SBruce Richardson if (priorities == NULL) { 979*99a2dd95SBruce Richardson for (i = 0; i < nb_links; i++) 980*99a2dd95SBruce Richardson priorities_list[i] = RTE_EVENT_DEV_PRIORITY_NORMAL; 981*99a2dd95SBruce Richardson 982*99a2dd95SBruce Richardson priorities = priorities_list; 983*99a2dd95SBruce Richardson } 984*99a2dd95SBruce Richardson 985*99a2dd95SBruce Richardson for (i = 0; i < nb_links; i++) 986*99a2dd95SBruce Richardson if (queues[i] >= dev->data->nb_queues) { 987*99a2dd95SBruce Richardson rte_errno = EINVAL; 988*99a2dd95SBruce Richardson return 0; 989*99a2dd95SBruce Richardson } 990*99a2dd95SBruce Richardson 991*99a2dd95SBruce Richardson diag = (*dev->dev_ops->port_link)(dev, dev->data->ports[port_id], 992*99a2dd95SBruce Richardson queues, priorities, nb_links); 993*99a2dd95SBruce Richardson if (diag < 0) 994*99a2dd95SBruce Richardson return diag; 995*99a2dd95SBruce Richardson 996*99a2dd95SBruce Richardson links_map = dev->data->links_map; 997*99a2dd95SBruce Richardson /* Point links_map to this port specific area */ 998*99a2dd95SBruce Richardson links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV); 999*99a2dd95SBruce Richardson for (i = 0; i < diag; i++) 1000*99a2dd95SBruce Richardson links_map[queues[i]] = (uint8_t)priorities[i]; 1001*99a2dd95SBruce Richardson 1002*99a2dd95SBruce Richardson rte_eventdev_trace_port_link(dev_id, port_id, nb_links, diag); 1003*99a2dd95SBruce Richardson return diag; 1004*99a2dd95SBruce Richardson } 1005*99a2dd95SBruce Richardson 1006*99a2dd95SBruce Richardson int 1007*99a2dd95SBruce Richardson rte_event_port_unlink(uint8_t dev_id, uint8_t port_id, 1008*99a2dd95SBruce Richardson uint8_t queues[], uint16_t nb_unlinks) 1009*99a2dd95SBruce Richardson { 1010*99a2dd95SBruce Richardson struct rte_eventdev *dev; 1011*99a2dd95SBruce Richardson uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV]; 1012*99a2dd95SBruce Richardson int i, diag, j; 1013*99a2dd95SBruce Richardson uint16_t *links_map; 1014*99a2dd95SBruce Richardson 1015*99a2dd95SBruce Richardson RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, EINVAL, 0); 1016*99a2dd95SBruce Richardson dev = &rte_eventdevs[dev_id]; 1017*99a2dd95SBruce Richardson 1018*99a2dd95SBruce Richardson if (*dev->dev_ops->port_unlink == NULL) { 1019*99a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("Function not supported"); 1020*99a2dd95SBruce Richardson rte_errno = ENOTSUP; 1021*99a2dd95SBruce Richardson return 0; 1022*99a2dd95SBruce Richardson } 1023*99a2dd95SBruce Richardson 1024*99a2dd95SBruce Richardson if (!is_valid_port(dev, port_id)) { 1025*99a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id); 1026*99a2dd95SBruce Richardson rte_errno = EINVAL; 1027*99a2dd95SBruce Richardson return 0; 1028*99a2dd95SBruce Richardson } 1029*99a2dd95SBruce Richardson 1030*99a2dd95SBruce Richardson links_map = dev->data->links_map; 1031*99a2dd95SBruce Richardson /* Point links_map to this port specific area */ 1032*99a2dd95SBruce Richardson links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV); 1033*99a2dd95SBruce Richardson 1034*99a2dd95SBruce Richardson if (queues == NULL) { 1035*99a2dd95SBruce Richardson j = 0; 1036*99a2dd95SBruce Richardson for (i = 0; i < dev->data->nb_queues; i++) { 1037*99a2dd95SBruce Richardson if (links_map[i] != 1038*99a2dd95SBruce Richardson EVENT_QUEUE_SERVICE_PRIORITY_INVALID) { 1039*99a2dd95SBruce Richardson all_queues[j] = i; 1040*99a2dd95SBruce Richardson j++; 1041*99a2dd95SBruce Richardson } 1042*99a2dd95SBruce Richardson } 1043*99a2dd95SBruce Richardson queues = all_queues; 1044*99a2dd95SBruce Richardson } else { 1045*99a2dd95SBruce Richardson for (j = 0; j < nb_unlinks; j++) { 1046*99a2dd95SBruce Richardson if (links_map[queues[j]] == 1047*99a2dd95SBruce Richardson EVENT_QUEUE_SERVICE_PRIORITY_INVALID) 1048*99a2dd95SBruce Richardson break; 1049*99a2dd95SBruce Richardson } 1050*99a2dd95SBruce Richardson } 1051*99a2dd95SBruce Richardson 1052*99a2dd95SBruce Richardson nb_unlinks = j; 1053*99a2dd95SBruce Richardson for (i = 0; i < nb_unlinks; i++) 1054*99a2dd95SBruce Richardson if (queues[i] >= dev->data->nb_queues) { 1055*99a2dd95SBruce Richardson rte_errno = EINVAL; 1056*99a2dd95SBruce Richardson return 0; 1057*99a2dd95SBruce Richardson } 1058*99a2dd95SBruce Richardson 1059*99a2dd95SBruce Richardson diag = (*dev->dev_ops->port_unlink)(dev, dev->data->ports[port_id], 1060*99a2dd95SBruce Richardson queues, nb_unlinks); 1061*99a2dd95SBruce Richardson 1062*99a2dd95SBruce Richardson if (diag < 0) 1063*99a2dd95SBruce Richardson return diag; 1064*99a2dd95SBruce Richardson 1065*99a2dd95SBruce Richardson for (i = 0; i < diag; i++) 1066*99a2dd95SBruce Richardson links_map[queues[i]] = EVENT_QUEUE_SERVICE_PRIORITY_INVALID; 1067*99a2dd95SBruce Richardson 1068*99a2dd95SBruce Richardson rte_eventdev_trace_port_unlink(dev_id, port_id, nb_unlinks, diag); 1069*99a2dd95SBruce Richardson return diag; 1070*99a2dd95SBruce Richardson } 1071*99a2dd95SBruce Richardson 1072*99a2dd95SBruce Richardson int 1073*99a2dd95SBruce Richardson rte_event_port_unlinks_in_progress(uint8_t dev_id, uint8_t port_id) 1074*99a2dd95SBruce Richardson { 1075*99a2dd95SBruce Richardson struct rte_eventdev *dev; 1076*99a2dd95SBruce Richardson 1077*99a2dd95SBruce Richardson RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 1078*99a2dd95SBruce Richardson dev = &rte_eventdevs[dev_id]; 1079*99a2dd95SBruce Richardson if (!is_valid_port(dev, port_id)) { 1080*99a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id); 1081*99a2dd95SBruce Richardson return -EINVAL; 1082*99a2dd95SBruce Richardson } 1083*99a2dd95SBruce Richardson 1084*99a2dd95SBruce Richardson /* Return 0 if the PMD does not implement unlinks in progress. 1085*99a2dd95SBruce Richardson * This allows PMDs which handle unlink synchronously to not implement 1086*99a2dd95SBruce Richardson * this function at all. 1087*99a2dd95SBruce Richardson */ 1088*99a2dd95SBruce Richardson RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_unlinks_in_progress, 0); 1089*99a2dd95SBruce Richardson 1090*99a2dd95SBruce Richardson return (*dev->dev_ops->port_unlinks_in_progress)(dev, 1091*99a2dd95SBruce Richardson dev->data->ports[port_id]); 1092*99a2dd95SBruce Richardson } 1093*99a2dd95SBruce Richardson 1094*99a2dd95SBruce Richardson int 1095*99a2dd95SBruce Richardson rte_event_port_links_get(uint8_t dev_id, uint8_t port_id, 1096*99a2dd95SBruce Richardson uint8_t queues[], uint8_t priorities[]) 1097*99a2dd95SBruce Richardson { 1098*99a2dd95SBruce Richardson struct rte_eventdev *dev; 1099*99a2dd95SBruce Richardson uint16_t *links_map; 1100*99a2dd95SBruce Richardson int i, count = 0; 1101*99a2dd95SBruce Richardson 1102*99a2dd95SBruce Richardson RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 1103*99a2dd95SBruce Richardson dev = &rte_eventdevs[dev_id]; 1104*99a2dd95SBruce Richardson if (!is_valid_port(dev, port_id)) { 1105*99a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id); 1106*99a2dd95SBruce Richardson return -EINVAL; 1107*99a2dd95SBruce Richardson } 1108*99a2dd95SBruce Richardson 1109*99a2dd95SBruce Richardson links_map = dev->data->links_map; 1110*99a2dd95SBruce Richardson /* Point links_map to this port specific area */ 1111*99a2dd95SBruce Richardson links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV); 1112*99a2dd95SBruce Richardson for (i = 0; i < dev->data->nb_queues; i++) { 1113*99a2dd95SBruce Richardson if (links_map[i] != EVENT_QUEUE_SERVICE_PRIORITY_INVALID) { 1114*99a2dd95SBruce Richardson queues[count] = i; 1115*99a2dd95SBruce Richardson priorities[count] = (uint8_t)links_map[i]; 1116*99a2dd95SBruce Richardson ++count; 1117*99a2dd95SBruce Richardson } 1118*99a2dd95SBruce Richardson } 1119*99a2dd95SBruce Richardson return count; 1120*99a2dd95SBruce Richardson } 1121*99a2dd95SBruce Richardson 1122*99a2dd95SBruce Richardson int 1123*99a2dd95SBruce Richardson rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns, 1124*99a2dd95SBruce Richardson uint64_t *timeout_ticks) 1125*99a2dd95SBruce Richardson { 1126*99a2dd95SBruce Richardson struct rte_eventdev *dev; 1127*99a2dd95SBruce Richardson 1128*99a2dd95SBruce Richardson RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 1129*99a2dd95SBruce Richardson dev = &rte_eventdevs[dev_id]; 1130*99a2dd95SBruce Richardson RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timeout_ticks, -ENOTSUP); 1131*99a2dd95SBruce Richardson 1132*99a2dd95SBruce Richardson if (timeout_ticks == NULL) 1133*99a2dd95SBruce Richardson return -EINVAL; 1134*99a2dd95SBruce Richardson 1135*99a2dd95SBruce Richardson return (*dev->dev_ops->timeout_ticks)(dev, ns, timeout_ticks); 1136*99a2dd95SBruce Richardson } 1137*99a2dd95SBruce Richardson 1138*99a2dd95SBruce Richardson int 1139*99a2dd95SBruce Richardson rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id) 1140*99a2dd95SBruce Richardson { 1141*99a2dd95SBruce Richardson struct rte_eventdev *dev; 1142*99a2dd95SBruce Richardson 1143*99a2dd95SBruce Richardson RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 1144*99a2dd95SBruce Richardson dev = &rte_eventdevs[dev_id]; 1145*99a2dd95SBruce Richardson 1146*99a2dd95SBruce Richardson if (service_id == NULL) 1147*99a2dd95SBruce Richardson return -EINVAL; 1148*99a2dd95SBruce Richardson 1149*99a2dd95SBruce Richardson if (dev->data->service_inited) 1150*99a2dd95SBruce Richardson *service_id = dev->data->service_id; 1151*99a2dd95SBruce Richardson 1152*99a2dd95SBruce Richardson return dev->data->service_inited ? 0 : -ESRCH; 1153*99a2dd95SBruce Richardson } 1154*99a2dd95SBruce Richardson 1155*99a2dd95SBruce Richardson int 1156*99a2dd95SBruce Richardson rte_event_dev_dump(uint8_t dev_id, FILE *f) 1157*99a2dd95SBruce Richardson { 1158*99a2dd95SBruce Richardson struct rte_eventdev *dev; 1159*99a2dd95SBruce Richardson 1160*99a2dd95SBruce Richardson RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 1161*99a2dd95SBruce Richardson dev = &rte_eventdevs[dev_id]; 1162*99a2dd95SBruce Richardson RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dump, -ENOTSUP); 1163*99a2dd95SBruce Richardson if (f == NULL) 1164*99a2dd95SBruce Richardson return -EINVAL; 1165*99a2dd95SBruce Richardson 1166*99a2dd95SBruce Richardson (*dev->dev_ops->dump)(dev, f); 1167*99a2dd95SBruce Richardson return 0; 1168*99a2dd95SBruce Richardson 1169*99a2dd95SBruce Richardson } 1170*99a2dd95SBruce Richardson 1171*99a2dd95SBruce Richardson static int 1172*99a2dd95SBruce Richardson xstats_get_count(uint8_t dev_id, enum rte_event_dev_xstats_mode mode, 1173*99a2dd95SBruce Richardson uint8_t queue_port_id) 1174*99a2dd95SBruce Richardson { 1175*99a2dd95SBruce Richardson struct rte_eventdev *dev = &rte_eventdevs[dev_id]; 1176*99a2dd95SBruce Richardson if (dev->dev_ops->xstats_get_names != NULL) 1177*99a2dd95SBruce Richardson return (*dev->dev_ops->xstats_get_names)(dev, mode, 1178*99a2dd95SBruce Richardson queue_port_id, 1179*99a2dd95SBruce Richardson NULL, NULL, 0); 1180*99a2dd95SBruce Richardson return 0; 1181*99a2dd95SBruce Richardson } 1182*99a2dd95SBruce Richardson 1183*99a2dd95SBruce Richardson int 1184*99a2dd95SBruce Richardson rte_event_dev_xstats_names_get(uint8_t dev_id, 1185*99a2dd95SBruce Richardson enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id, 1186*99a2dd95SBruce Richardson struct rte_event_dev_xstats_name *xstats_names, 1187*99a2dd95SBruce Richardson unsigned int *ids, unsigned int size) 1188*99a2dd95SBruce Richardson { 1189*99a2dd95SBruce Richardson RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV); 1190*99a2dd95SBruce Richardson const int cnt_expected_entries = xstats_get_count(dev_id, mode, 1191*99a2dd95SBruce Richardson queue_port_id); 1192*99a2dd95SBruce Richardson if (xstats_names == NULL || cnt_expected_entries < 0 || 1193*99a2dd95SBruce Richardson (int)size < cnt_expected_entries) 1194*99a2dd95SBruce Richardson return cnt_expected_entries; 1195*99a2dd95SBruce Richardson 1196*99a2dd95SBruce Richardson /* dev_id checked above */ 1197*99a2dd95SBruce Richardson const struct rte_eventdev *dev = &rte_eventdevs[dev_id]; 1198*99a2dd95SBruce Richardson 1199*99a2dd95SBruce Richardson if (dev->dev_ops->xstats_get_names != NULL) 1200*99a2dd95SBruce Richardson return (*dev->dev_ops->xstats_get_names)(dev, mode, 1201*99a2dd95SBruce Richardson queue_port_id, xstats_names, ids, size); 1202*99a2dd95SBruce Richardson 1203*99a2dd95SBruce Richardson return -ENOTSUP; 1204*99a2dd95SBruce Richardson } 1205*99a2dd95SBruce Richardson 1206*99a2dd95SBruce Richardson /* retrieve eventdev extended statistics */ 1207*99a2dd95SBruce Richardson int 1208*99a2dd95SBruce Richardson rte_event_dev_xstats_get(uint8_t dev_id, enum rte_event_dev_xstats_mode mode, 1209*99a2dd95SBruce Richardson uint8_t queue_port_id, const unsigned int ids[], 1210*99a2dd95SBruce Richardson uint64_t values[], unsigned int n) 1211*99a2dd95SBruce Richardson { 1212*99a2dd95SBruce Richardson RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV); 1213*99a2dd95SBruce Richardson const struct rte_eventdev *dev = &rte_eventdevs[dev_id]; 1214*99a2dd95SBruce Richardson 1215*99a2dd95SBruce Richardson /* implemented by the driver */ 1216*99a2dd95SBruce Richardson if (dev->dev_ops->xstats_get != NULL) 1217*99a2dd95SBruce Richardson return (*dev->dev_ops->xstats_get)(dev, mode, queue_port_id, 1218*99a2dd95SBruce Richardson ids, values, n); 1219*99a2dd95SBruce Richardson return -ENOTSUP; 1220*99a2dd95SBruce Richardson } 1221*99a2dd95SBruce Richardson 1222*99a2dd95SBruce Richardson uint64_t 1223*99a2dd95SBruce Richardson rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name, 1224*99a2dd95SBruce Richardson unsigned int *id) 1225*99a2dd95SBruce Richardson { 1226*99a2dd95SBruce Richardson RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, 0); 1227*99a2dd95SBruce Richardson const struct rte_eventdev *dev = &rte_eventdevs[dev_id]; 1228*99a2dd95SBruce Richardson unsigned int temp = -1; 1229*99a2dd95SBruce Richardson 1230*99a2dd95SBruce Richardson if (id != NULL) 1231*99a2dd95SBruce Richardson *id = (unsigned int)-1; 1232*99a2dd95SBruce Richardson else 1233*99a2dd95SBruce Richardson id = &temp; /* ensure driver never gets a NULL value */ 1234*99a2dd95SBruce Richardson 1235*99a2dd95SBruce Richardson /* implemented by driver */ 1236*99a2dd95SBruce Richardson if (dev->dev_ops->xstats_get_by_name != NULL) 1237*99a2dd95SBruce Richardson return (*dev->dev_ops->xstats_get_by_name)(dev, name, id); 1238*99a2dd95SBruce Richardson return -ENOTSUP; 1239*99a2dd95SBruce Richardson } 1240*99a2dd95SBruce Richardson 1241*99a2dd95SBruce Richardson int rte_event_dev_xstats_reset(uint8_t dev_id, 1242*99a2dd95SBruce Richardson enum rte_event_dev_xstats_mode mode, int16_t queue_port_id, 1243*99a2dd95SBruce Richardson const uint32_t ids[], uint32_t nb_ids) 1244*99a2dd95SBruce Richardson { 1245*99a2dd95SBruce Richardson RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 1246*99a2dd95SBruce Richardson struct rte_eventdev *dev = &rte_eventdevs[dev_id]; 1247*99a2dd95SBruce Richardson 1248*99a2dd95SBruce Richardson if (dev->dev_ops->xstats_reset != NULL) 1249*99a2dd95SBruce Richardson return (*dev->dev_ops->xstats_reset)(dev, mode, queue_port_id, 1250*99a2dd95SBruce Richardson ids, nb_ids); 1251*99a2dd95SBruce Richardson return -ENOTSUP; 1252*99a2dd95SBruce Richardson } 1253*99a2dd95SBruce Richardson 1254*99a2dd95SBruce Richardson int rte_event_pmd_selftest_seqn_dynfield_offset = -1; 1255*99a2dd95SBruce Richardson 1256*99a2dd95SBruce Richardson int rte_event_dev_selftest(uint8_t dev_id) 1257*99a2dd95SBruce Richardson { 1258*99a2dd95SBruce Richardson RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 1259*99a2dd95SBruce Richardson static const struct rte_mbuf_dynfield test_seqn_dynfield_desc = { 1260*99a2dd95SBruce Richardson .name = "rte_event_pmd_selftest_seqn_dynfield", 1261*99a2dd95SBruce Richardson .size = sizeof(rte_event_pmd_selftest_seqn_t), 1262*99a2dd95SBruce Richardson .align = __alignof__(rte_event_pmd_selftest_seqn_t), 1263*99a2dd95SBruce Richardson }; 1264*99a2dd95SBruce Richardson struct rte_eventdev *dev = &rte_eventdevs[dev_id]; 1265*99a2dd95SBruce Richardson 1266*99a2dd95SBruce Richardson if (dev->dev_ops->dev_selftest != NULL) { 1267*99a2dd95SBruce Richardson rte_event_pmd_selftest_seqn_dynfield_offset = 1268*99a2dd95SBruce Richardson rte_mbuf_dynfield_register(&test_seqn_dynfield_desc); 1269*99a2dd95SBruce Richardson if (rte_event_pmd_selftest_seqn_dynfield_offset < 0) 1270*99a2dd95SBruce Richardson return -ENOMEM; 1271*99a2dd95SBruce Richardson return (*dev->dev_ops->dev_selftest)(); 1272*99a2dd95SBruce Richardson } 1273*99a2dd95SBruce Richardson return -ENOTSUP; 1274*99a2dd95SBruce Richardson } 1275*99a2dd95SBruce Richardson 1276*99a2dd95SBruce Richardson struct rte_mempool * 1277*99a2dd95SBruce Richardson rte_event_vector_pool_create(const char *name, unsigned int n, 1278*99a2dd95SBruce Richardson unsigned int cache_size, uint16_t nb_elem, 1279*99a2dd95SBruce Richardson int socket_id) 1280*99a2dd95SBruce Richardson { 1281*99a2dd95SBruce Richardson const char *mp_ops_name; 1282*99a2dd95SBruce Richardson struct rte_mempool *mp; 1283*99a2dd95SBruce Richardson unsigned int elt_sz; 1284*99a2dd95SBruce Richardson int ret; 1285*99a2dd95SBruce Richardson 1286*99a2dd95SBruce Richardson if (!nb_elem) { 1287*99a2dd95SBruce Richardson RTE_LOG(ERR, EVENTDEV, 1288*99a2dd95SBruce Richardson "Invalid number of elements=%d requested\n", nb_elem); 1289*99a2dd95SBruce Richardson rte_errno = EINVAL; 1290*99a2dd95SBruce Richardson return NULL; 1291*99a2dd95SBruce Richardson } 1292*99a2dd95SBruce Richardson 1293*99a2dd95SBruce Richardson elt_sz = 1294*99a2dd95SBruce Richardson sizeof(struct rte_event_vector) + (nb_elem * sizeof(uintptr_t)); 1295*99a2dd95SBruce Richardson mp = rte_mempool_create_empty(name, n, elt_sz, cache_size, 0, socket_id, 1296*99a2dd95SBruce Richardson 0); 1297*99a2dd95SBruce Richardson if (mp == NULL) 1298*99a2dd95SBruce Richardson return NULL; 1299*99a2dd95SBruce Richardson 1300*99a2dd95SBruce Richardson mp_ops_name = rte_mbuf_best_mempool_ops(); 1301*99a2dd95SBruce Richardson ret = rte_mempool_set_ops_byname(mp, mp_ops_name, NULL); 1302*99a2dd95SBruce Richardson if (ret != 0) { 1303*99a2dd95SBruce Richardson RTE_LOG(ERR, EVENTDEV, "error setting mempool handler\n"); 1304*99a2dd95SBruce Richardson goto err; 1305*99a2dd95SBruce Richardson } 1306*99a2dd95SBruce Richardson 1307*99a2dd95SBruce Richardson ret = rte_mempool_populate_default(mp); 1308*99a2dd95SBruce Richardson if (ret < 0) 1309*99a2dd95SBruce Richardson goto err; 1310*99a2dd95SBruce Richardson 1311*99a2dd95SBruce Richardson return mp; 1312*99a2dd95SBruce Richardson err: 1313*99a2dd95SBruce Richardson rte_mempool_free(mp); 1314*99a2dd95SBruce Richardson rte_errno = -ret; 1315*99a2dd95SBruce Richardson return NULL; 1316*99a2dd95SBruce Richardson } 1317*99a2dd95SBruce Richardson 1318*99a2dd95SBruce Richardson int 1319*99a2dd95SBruce Richardson rte_event_dev_start(uint8_t dev_id) 1320*99a2dd95SBruce Richardson { 1321*99a2dd95SBruce Richardson struct rte_eventdev *dev; 1322*99a2dd95SBruce Richardson int diag; 1323*99a2dd95SBruce Richardson 1324*99a2dd95SBruce Richardson RTE_EDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id); 1325*99a2dd95SBruce Richardson 1326*99a2dd95SBruce Richardson RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 1327*99a2dd95SBruce Richardson dev = &rte_eventdevs[dev_id]; 1328*99a2dd95SBruce Richardson RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP); 1329*99a2dd95SBruce Richardson 1330*99a2dd95SBruce Richardson if (dev->data->dev_started != 0) { 1331*99a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already started", 1332*99a2dd95SBruce Richardson dev_id); 1333*99a2dd95SBruce Richardson return 0; 1334*99a2dd95SBruce Richardson } 1335*99a2dd95SBruce Richardson 1336*99a2dd95SBruce Richardson diag = (*dev->dev_ops->dev_start)(dev); 1337*99a2dd95SBruce Richardson rte_eventdev_trace_start(dev_id, diag); 1338*99a2dd95SBruce Richardson if (diag == 0) 1339*99a2dd95SBruce Richardson dev->data->dev_started = 1; 1340*99a2dd95SBruce Richardson else 1341*99a2dd95SBruce Richardson return diag; 1342*99a2dd95SBruce Richardson 1343*99a2dd95SBruce Richardson return 0; 1344*99a2dd95SBruce Richardson } 1345*99a2dd95SBruce Richardson 1346*99a2dd95SBruce Richardson int 1347*99a2dd95SBruce Richardson rte_event_dev_stop_flush_callback_register(uint8_t dev_id, 1348*99a2dd95SBruce Richardson eventdev_stop_flush_t callback, void *userdata) 1349*99a2dd95SBruce Richardson { 1350*99a2dd95SBruce Richardson struct rte_eventdev *dev; 1351*99a2dd95SBruce Richardson 1352*99a2dd95SBruce Richardson RTE_EDEV_LOG_DEBUG("Stop flush register dev_id=%" PRIu8, dev_id); 1353*99a2dd95SBruce Richardson 1354*99a2dd95SBruce Richardson RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 1355*99a2dd95SBruce Richardson dev = &rte_eventdevs[dev_id]; 1356*99a2dd95SBruce Richardson 1357*99a2dd95SBruce Richardson dev->dev_ops->dev_stop_flush = callback; 1358*99a2dd95SBruce Richardson dev->data->dev_stop_flush_arg = userdata; 1359*99a2dd95SBruce Richardson 1360*99a2dd95SBruce Richardson return 0; 1361*99a2dd95SBruce Richardson } 1362*99a2dd95SBruce Richardson 1363*99a2dd95SBruce Richardson void 1364*99a2dd95SBruce Richardson rte_event_dev_stop(uint8_t dev_id) 1365*99a2dd95SBruce Richardson { 1366*99a2dd95SBruce Richardson struct rte_eventdev *dev; 1367*99a2dd95SBruce Richardson 1368*99a2dd95SBruce Richardson RTE_EDEV_LOG_DEBUG("Stop dev_id=%" PRIu8, dev_id); 1369*99a2dd95SBruce Richardson 1370*99a2dd95SBruce Richardson RTE_EVENTDEV_VALID_DEVID_OR_RET(dev_id); 1371*99a2dd95SBruce Richardson dev = &rte_eventdevs[dev_id]; 1372*99a2dd95SBruce Richardson RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop); 1373*99a2dd95SBruce Richardson 1374*99a2dd95SBruce Richardson if (dev->data->dev_started == 0) { 1375*99a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already stopped", 1376*99a2dd95SBruce Richardson dev_id); 1377*99a2dd95SBruce Richardson return; 1378*99a2dd95SBruce Richardson } 1379*99a2dd95SBruce Richardson 1380*99a2dd95SBruce Richardson dev->data->dev_started = 0; 1381*99a2dd95SBruce Richardson (*dev->dev_ops->dev_stop)(dev); 1382*99a2dd95SBruce Richardson rte_eventdev_trace_stop(dev_id); 1383*99a2dd95SBruce Richardson } 1384*99a2dd95SBruce Richardson 1385*99a2dd95SBruce Richardson int 1386*99a2dd95SBruce Richardson rte_event_dev_close(uint8_t dev_id) 1387*99a2dd95SBruce Richardson { 1388*99a2dd95SBruce Richardson struct rte_eventdev *dev; 1389*99a2dd95SBruce Richardson 1390*99a2dd95SBruce Richardson RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 1391*99a2dd95SBruce Richardson dev = &rte_eventdevs[dev_id]; 1392*99a2dd95SBruce Richardson RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP); 1393*99a2dd95SBruce Richardson 1394*99a2dd95SBruce Richardson /* Device must be stopped before it can be closed */ 1395*99a2dd95SBruce Richardson if (dev->data->dev_started == 1) { 1396*99a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("Device %u must be stopped before closing", 1397*99a2dd95SBruce Richardson dev_id); 1398*99a2dd95SBruce Richardson return -EBUSY; 1399*99a2dd95SBruce Richardson } 1400*99a2dd95SBruce Richardson 1401*99a2dd95SBruce Richardson rte_eventdev_trace_close(dev_id); 1402*99a2dd95SBruce Richardson return (*dev->dev_ops->dev_close)(dev); 1403*99a2dd95SBruce Richardson } 1404*99a2dd95SBruce Richardson 1405*99a2dd95SBruce Richardson static inline int 1406*99a2dd95SBruce Richardson rte_eventdev_data_alloc(uint8_t dev_id, struct rte_eventdev_data **data, 1407*99a2dd95SBruce Richardson int socket_id) 1408*99a2dd95SBruce Richardson { 1409*99a2dd95SBruce Richardson char mz_name[RTE_EVENTDEV_NAME_MAX_LEN]; 1410*99a2dd95SBruce Richardson const struct rte_memzone *mz; 1411*99a2dd95SBruce Richardson int n; 1412*99a2dd95SBruce Richardson 1413*99a2dd95SBruce Richardson /* Generate memzone name */ 1414*99a2dd95SBruce Richardson n = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u", dev_id); 1415*99a2dd95SBruce Richardson if (n >= (int)sizeof(mz_name)) 1416*99a2dd95SBruce Richardson return -EINVAL; 1417*99a2dd95SBruce Richardson 1418*99a2dd95SBruce Richardson if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 1419*99a2dd95SBruce Richardson mz = rte_memzone_reserve(mz_name, 1420*99a2dd95SBruce Richardson sizeof(struct rte_eventdev_data), 1421*99a2dd95SBruce Richardson socket_id, 0); 1422*99a2dd95SBruce Richardson } else 1423*99a2dd95SBruce Richardson mz = rte_memzone_lookup(mz_name); 1424*99a2dd95SBruce Richardson 1425*99a2dd95SBruce Richardson if (mz == NULL) 1426*99a2dd95SBruce Richardson return -ENOMEM; 1427*99a2dd95SBruce Richardson 1428*99a2dd95SBruce Richardson *data = mz->addr; 1429*99a2dd95SBruce Richardson if (rte_eal_process_type() == RTE_PROC_PRIMARY) 1430*99a2dd95SBruce Richardson memset(*data, 0, sizeof(struct rte_eventdev_data)); 1431*99a2dd95SBruce Richardson 1432*99a2dd95SBruce Richardson return 0; 1433*99a2dd95SBruce Richardson } 1434*99a2dd95SBruce Richardson 1435*99a2dd95SBruce Richardson static inline uint8_t 1436*99a2dd95SBruce Richardson rte_eventdev_find_free_device_index(void) 1437*99a2dd95SBruce Richardson { 1438*99a2dd95SBruce Richardson uint8_t dev_id; 1439*99a2dd95SBruce Richardson 1440*99a2dd95SBruce Richardson for (dev_id = 0; dev_id < RTE_EVENT_MAX_DEVS; dev_id++) { 1441*99a2dd95SBruce Richardson if (rte_eventdevs[dev_id].attached == 1442*99a2dd95SBruce Richardson RTE_EVENTDEV_DETACHED) 1443*99a2dd95SBruce Richardson return dev_id; 1444*99a2dd95SBruce Richardson } 1445*99a2dd95SBruce Richardson return RTE_EVENT_MAX_DEVS; 1446*99a2dd95SBruce Richardson } 1447*99a2dd95SBruce Richardson 1448*99a2dd95SBruce Richardson static uint16_t 1449*99a2dd95SBruce Richardson rte_event_tx_adapter_enqueue(__rte_unused void *port, 1450*99a2dd95SBruce Richardson __rte_unused struct rte_event ev[], 1451*99a2dd95SBruce Richardson __rte_unused uint16_t nb_events) 1452*99a2dd95SBruce Richardson { 1453*99a2dd95SBruce Richardson rte_errno = ENOTSUP; 1454*99a2dd95SBruce Richardson return 0; 1455*99a2dd95SBruce Richardson } 1456*99a2dd95SBruce Richardson 1457*99a2dd95SBruce Richardson static uint16_t 1458*99a2dd95SBruce Richardson rte_event_crypto_adapter_enqueue(__rte_unused void *port, 1459*99a2dd95SBruce Richardson __rte_unused struct rte_event ev[], 1460*99a2dd95SBruce Richardson __rte_unused uint16_t nb_events) 1461*99a2dd95SBruce Richardson { 1462*99a2dd95SBruce Richardson rte_errno = ENOTSUP; 1463*99a2dd95SBruce Richardson return 0; 1464*99a2dd95SBruce Richardson } 1465*99a2dd95SBruce Richardson 1466*99a2dd95SBruce Richardson struct rte_eventdev * 1467*99a2dd95SBruce Richardson rte_event_pmd_allocate(const char *name, int socket_id) 1468*99a2dd95SBruce Richardson { 1469*99a2dd95SBruce Richardson struct rte_eventdev *eventdev; 1470*99a2dd95SBruce Richardson uint8_t dev_id; 1471*99a2dd95SBruce Richardson 1472*99a2dd95SBruce Richardson if (rte_event_pmd_get_named_dev(name) != NULL) { 1473*99a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("Event device with name %s already " 1474*99a2dd95SBruce Richardson "allocated!", name); 1475*99a2dd95SBruce Richardson return NULL; 1476*99a2dd95SBruce Richardson } 1477*99a2dd95SBruce Richardson 1478*99a2dd95SBruce Richardson dev_id = rte_eventdev_find_free_device_index(); 1479*99a2dd95SBruce Richardson if (dev_id == RTE_EVENT_MAX_DEVS) { 1480*99a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("Reached maximum number of event devices"); 1481*99a2dd95SBruce Richardson return NULL; 1482*99a2dd95SBruce Richardson } 1483*99a2dd95SBruce Richardson 1484*99a2dd95SBruce Richardson eventdev = &rte_eventdevs[dev_id]; 1485*99a2dd95SBruce Richardson 1486*99a2dd95SBruce Richardson eventdev->txa_enqueue = rte_event_tx_adapter_enqueue; 1487*99a2dd95SBruce Richardson eventdev->txa_enqueue_same_dest = rte_event_tx_adapter_enqueue; 1488*99a2dd95SBruce Richardson eventdev->ca_enqueue = rte_event_crypto_adapter_enqueue; 1489*99a2dd95SBruce Richardson 1490*99a2dd95SBruce Richardson if (eventdev->data == NULL) { 1491*99a2dd95SBruce Richardson struct rte_eventdev_data *eventdev_data = NULL; 1492*99a2dd95SBruce Richardson 1493*99a2dd95SBruce Richardson int retval = rte_eventdev_data_alloc(dev_id, &eventdev_data, 1494*99a2dd95SBruce Richardson socket_id); 1495*99a2dd95SBruce Richardson 1496*99a2dd95SBruce Richardson if (retval < 0 || eventdev_data == NULL) 1497*99a2dd95SBruce Richardson return NULL; 1498*99a2dd95SBruce Richardson 1499*99a2dd95SBruce Richardson eventdev->data = eventdev_data; 1500*99a2dd95SBruce Richardson 1501*99a2dd95SBruce Richardson if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 1502*99a2dd95SBruce Richardson 1503*99a2dd95SBruce Richardson strlcpy(eventdev->data->name, name, 1504*99a2dd95SBruce Richardson RTE_EVENTDEV_NAME_MAX_LEN); 1505*99a2dd95SBruce Richardson 1506*99a2dd95SBruce Richardson eventdev->data->dev_id = dev_id; 1507*99a2dd95SBruce Richardson eventdev->data->socket_id = socket_id; 1508*99a2dd95SBruce Richardson eventdev->data->dev_started = 0; 1509*99a2dd95SBruce Richardson } 1510*99a2dd95SBruce Richardson 1511*99a2dd95SBruce Richardson eventdev->attached = RTE_EVENTDEV_ATTACHED; 1512*99a2dd95SBruce Richardson eventdev_globals.nb_devs++; 1513*99a2dd95SBruce Richardson } 1514*99a2dd95SBruce Richardson 1515*99a2dd95SBruce Richardson return eventdev; 1516*99a2dd95SBruce Richardson } 1517*99a2dd95SBruce Richardson 1518*99a2dd95SBruce Richardson int 1519*99a2dd95SBruce Richardson rte_event_pmd_release(struct rte_eventdev *eventdev) 1520*99a2dd95SBruce Richardson { 1521*99a2dd95SBruce Richardson int ret; 1522*99a2dd95SBruce Richardson char mz_name[RTE_EVENTDEV_NAME_MAX_LEN]; 1523*99a2dd95SBruce Richardson const struct rte_memzone *mz; 1524*99a2dd95SBruce Richardson 1525*99a2dd95SBruce Richardson if (eventdev == NULL) 1526*99a2dd95SBruce Richardson return -EINVAL; 1527*99a2dd95SBruce Richardson 1528*99a2dd95SBruce Richardson eventdev->attached = RTE_EVENTDEV_DETACHED; 1529*99a2dd95SBruce Richardson eventdev_globals.nb_devs--; 1530*99a2dd95SBruce Richardson 1531*99a2dd95SBruce Richardson if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 1532*99a2dd95SBruce Richardson rte_free(eventdev->data->dev_private); 1533*99a2dd95SBruce Richardson 1534*99a2dd95SBruce Richardson /* Generate memzone name */ 1535*99a2dd95SBruce Richardson ret = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u", 1536*99a2dd95SBruce Richardson eventdev->data->dev_id); 1537*99a2dd95SBruce Richardson if (ret >= (int)sizeof(mz_name)) 1538*99a2dd95SBruce Richardson return -EINVAL; 1539*99a2dd95SBruce Richardson 1540*99a2dd95SBruce Richardson mz = rte_memzone_lookup(mz_name); 1541*99a2dd95SBruce Richardson if (mz == NULL) 1542*99a2dd95SBruce Richardson return -ENOMEM; 1543*99a2dd95SBruce Richardson 1544*99a2dd95SBruce Richardson ret = rte_memzone_free(mz); 1545*99a2dd95SBruce Richardson if (ret) 1546*99a2dd95SBruce Richardson return ret; 1547*99a2dd95SBruce Richardson } 1548*99a2dd95SBruce Richardson 1549*99a2dd95SBruce Richardson eventdev->data = NULL; 1550*99a2dd95SBruce Richardson return 0; 1551*99a2dd95SBruce Richardson } 1552*99a2dd95SBruce Richardson 1553*99a2dd95SBruce Richardson 1554*99a2dd95SBruce Richardson static int 1555*99a2dd95SBruce Richardson handle_dev_list(const char *cmd __rte_unused, 1556*99a2dd95SBruce Richardson const char *params __rte_unused, 1557*99a2dd95SBruce Richardson struct rte_tel_data *d) 1558*99a2dd95SBruce Richardson { 1559*99a2dd95SBruce Richardson uint8_t dev_id; 1560*99a2dd95SBruce Richardson int ndev = rte_event_dev_count(); 1561*99a2dd95SBruce Richardson 1562*99a2dd95SBruce Richardson if (ndev < 1) 1563*99a2dd95SBruce Richardson return -1; 1564*99a2dd95SBruce Richardson 1565*99a2dd95SBruce Richardson rte_tel_data_start_array(d, RTE_TEL_INT_VAL); 1566*99a2dd95SBruce Richardson for (dev_id = 0; dev_id < RTE_EVENT_MAX_DEVS; dev_id++) { 1567*99a2dd95SBruce Richardson if (rte_eventdevs[dev_id].attached == 1568*99a2dd95SBruce Richardson RTE_EVENTDEV_ATTACHED) 1569*99a2dd95SBruce Richardson rte_tel_data_add_array_int(d, dev_id); 1570*99a2dd95SBruce Richardson } 1571*99a2dd95SBruce Richardson 1572*99a2dd95SBruce Richardson return 0; 1573*99a2dd95SBruce Richardson } 1574*99a2dd95SBruce Richardson 1575*99a2dd95SBruce Richardson static int 1576*99a2dd95SBruce Richardson handle_port_list(const char *cmd __rte_unused, 1577*99a2dd95SBruce Richardson const char *params, 1578*99a2dd95SBruce Richardson struct rte_tel_data *d) 1579*99a2dd95SBruce Richardson { 1580*99a2dd95SBruce Richardson int i; 1581*99a2dd95SBruce Richardson uint8_t dev_id; 1582*99a2dd95SBruce Richardson struct rte_eventdev *dev; 1583*99a2dd95SBruce Richardson char *end_param; 1584*99a2dd95SBruce Richardson 1585*99a2dd95SBruce Richardson if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 1586*99a2dd95SBruce Richardson return -1; 1587*99a2dd95SBruce Richardson 1588*99a2dd95SBruce Richardson dev_id = strtoul(params, &end_param, 10); 1589*99a2dd95SBruce Richardson if (*end_param != '\0') 1590*99a2dd95SBruce Richardson RTE_EDEV_LOG_DEBUG( 1591*99a2dd95SBruce Richardson "Extra parameters passed to eventdev telemetry command, ignoring"); 1592*99a2dd95SBruce Richardson 1593*99a2dd95SBruce Richardson RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 1594*99a2dd95SBruce Richardson dev = &rte_eventdevs[dev_id]; 1595*99a2dd95SBruce Richardson 1596*99a2dd95SBruce Richardson rte_tel_data_start_array(d, RTE_TEL_INT_VAL); 1597*99a2dd95SBruce Richardson for (i = 0; i < dev->data->nb_ports; i++) 1598*99a2dd95SBruce Richardson rte_tel_data_add_array_int(d, i); 1599*99a2dd95SBruce Richardson 1600*99a2dd95SBruce Richardson return 0; 1601*99a2dd95SBruce Richardson } 1602*99a2dd95SBruce Richardson 1603*99a2dd95SBruce Richardson static int 1604*99a2dd95SBruce Richardson handle_queue_list(const char *cmd __rte_unused, 1605*99a2dd95SBruce Richardson const char *params, 1606*99a2dd95SBruce Richardson struct rte_tel_data *d) 1607*99a2dd95SBruce Richardson { 1608*99a2dd95SBruce Richardson int i; 1609*99a2dd95SBruce Richardson uint8_t dev_id; 1610*99a2dd95SBruce Richardson struct rte_eventdev *dev; 1611*99a2dd95SBruce Richardson char *end_param; 1612*99a2dd95SBruce Richardson 1613*99a2dd95SBruce Richardson if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 1614*99a2dd95SBruce Richardson return -1; 1615*99a2dd95SBruce Richardson 1616*99a2dd95SBruce Richardson dev_id = strtoul(params, &end_param, 10); 1617*99a2dd95SBruce Richardson if (*end_param != '\0') 1618*99a2dd95SBruce Richardson RTE_EDEV_LOG_DEBUG( 1619*99a2dd95SBruce Richardson "Extra parameters passed to eventdev telemetry command, ignoring"); 1620*99a2dd95SBruce Richardson 1621*99a2dd95SBruce Richardson RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 1622*99a2dd95SBruce Richardson dev = &rte_eventdevs[dev_id]; 1623*99a2dd95SBruce Richardson 1624*99a2dd95SBruce Richardson rte_tel_data_start_array(d, RTE_TEL_INT_VAL); 1625*99a2dd95SBruce Richardson for (i = 0; i < dev->data->nb_queues; i++) 1626*99a2dd95SBruce Richardson rte_tel_data_add_array_int(d, i); 1627*99a2dd95SBruce Richardson 1628*99a2dd95SBruce Richardson return 0; 1629*99a2dd95SBruce Richardson } 1630*99a2dd95SBruce Richardson 1631*99a2dd95SBruce Richardson static int 1632*99a2dd95SBruce Richardson handle_queue_links(const char *cmd __rte_unused, 1633*99a2dd95SBruce Richardson const char *params, 1634*99a2dd95SBruce Richardson struct rte_tel_data *d) 1635*99a2dd95SBruce Richardson { 1636*99a2dd95SBruce Richardson int i, ret, port_id = 0; 1637*99a2dd95SBruce Richardson char *end_param; 1638*99a2dd95SBruce Richardson uint8_t dev_id; 1639*99a2dd95SBruce Richardson uint8_t queues[RTE_EVENT_MAX_QUEUES_PER_DEV]; 1640*99a2dd95SBruce Richardson uint8_t priorities[RTE_EVENT_MAX_QUEUES_PER_DEV]; 1641*99a2dd95SBruce Richardson const char *p_param; 1642*99a2dd95SBruce Richardson 1643*99a2dd95SBruce Richardson if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 1644*99a2dd95SBruce Richardson return -1; 1645*99a2dd95SBruce Richardson 1646*99a2dd95SBruce Richardson /* Get dev ID from parameter string */ 1647*99a2dd95SBruce Richardson dev_id = strtoul(params, &end_param, 10); 1648*99a2dd95SBruce Richardson RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 1649*99a2dd95SBruce Richardson 1650*99a2dd95SBruce Richardson p_param = strtok(end_param, ","); 1651*99a2dd95SBruce Richardson if (p_param == NULL || strlen(p_param) == 0 || !isdigit(*p_param)) 1652*99a2dd95SBruce Richardson return -1; 1653*99a2dd95SBruce Richardson 1654*99a2dd95SBruce Richardson port_id = strtoul(p_param, &end_param, 10); 1655*99a2dd95SBruce Richardson p_param = strtok(NULL, "\0"); 1656*99a2dd95SBruce Richardson if (p_param != NULL) 1657*99a2dd95SBruce Richardson RTE_EDEV_LOG_DEBUG( 1658*99a2dd95SBruce Richardson "Extra parameters passed to eventdev telemetry command, ignoring"); 1659*99a2dd95SBruce Richardson 1660*99a2dd95SBruce Richardson ret = rte_event_port_links_get(dev_id, port_id, queues, priorities); 1661*99a2dd95SBruce Richardson if (ret < 0) 1662*99a2dd95SBruce Richardson return -1; 1663*99a2dd95SBruce Richardson 1664*99a2dd95SBruce Richardson rte_tel_data_start_dict(d); 1665*99a2dd95SBruce Richardson for (i = 0; i < ret; i++) { 1666*99a2dd95SBruce Richardson char qid_name[32]; 1667*99a2dd95SBruce Richardson 1668*99a2dd95SBruce Richardson snprintf(qid_name, 31, "qid_%u", queues[i]); 1669*99a2dd95SBruce Richardson rte_tel_data_add_dict_u64(d, qid_name, priorities[i]); 1670*99a2dd95SBruce Richardson } 1671*99a2dd95SBruce Richardson 1672*99a2dd95SBruce Richardson return 0; 1673*99a2dd95SBruce Richardson } 1674*99a2dd95SBruce Richardson 1675*99a2dd95SBruce Richardson static int 1676*99a2dd95SBruce Richardson eventdev_build_telemetry_data(int dev_id, 1677*99a2dd95SBruce Richardson enum rte_event_dev_xstats_mode mode, 1678*99a2dd95SBruce Richardson int port_queue_id, 1679*99a2dd95SBruce Richardson struct rte_tel_data *d) 1680*99a2dd95SBruce Richardson { 1681*99a2dd95SBruce Richardson struct rte_event_dev_xstats_name *xstat_names; 1682*99a2dd95SBruce Richardson unsigned int *ids; 1683*99a2dd95SBruce Richardson uint64_t *values; 1684*99a2dd95SBruce Richardson int i, ret, num_xstats; 1685*99a2dd95SBruce Richardson 1686*99a2dd95SBruce Richardson num_xstats = rte_event_dev_xstats_names_get(dev_id, 1687*99a2dd95SBruce Richardson mode, 1688*99a2dd95SBruce Richardson port_queue_id, 1689*99a2dd95SBruce Richardson NULL, 1690*99a2dd95SBruce Richardson NULL, 1691*99a2dd95SBruce Richardson 0); 1692*99a2dd95SBruce Richardson 1693*99a2dd95SBruce Richardson if (num_xstats < 0) 1694*99a2dd95SBruce Richardson return -1; 1695*99a2dd95SBruce Richardson 1696*99a2dd95SBruce Richardson /* use one malloc for names */ 1697*99a2dd95SBruce Richardson xstat_names = malloc((sizeof(struct rte_event_dev_xstats_name)) 1698*99a2dd95SBruce Richardson * num_xstats); 1699*99a2dd95SBruce Richardson if (xstat_names == NULL) 1700*99a2dd95SBruce Richardson return -1; 1701*99a2dd95SBruce Richardson 1702*99a2dd95SBruce Richardson ids = malloc((sizeof(unsigned int)) * num_xstats); 1703*99a2dd95SBruce Richardson if (ids == NULL) { 1704*99a2dd95SBruce Richardson free(xstat_names); 1705*99a2dd95SBruce Richardson return -1; 1706*99a2dd95SBruce Richardson } 1707*99a2dd95SBruce Richardson 1708*99a2dd95SBruce Richardson values = malloc((sizeof(uint64_t)) * num_xstats); 1709*99a2dd95SBruce Richardson if (values == NULL) { 1710*99a2dd95SBruce Richardson free(xstat_names); 1711*99a2dd95SBruce Richardson free(ids); 1712*99a2dd95SBruce Richardson return -1; 1713*99a2dd95SBruce Richardson } 1714*99a2dd95SBruce Richardson 1715*99a2dd95SBruce Richardson ret = rte_event_dev_xstats_names_get(dev_id, mode, port_queue_id, 1716*99a2dd95SBruce Richardson xstat_names, ids, num_xstats); 1717*99a2dd95SBruce Richardson if (ret < 0 || ret > num_xstats) { 1718*99a2dd95SBruce Richardson free(xstat_names); 1719*99a2dd95SBruce Richardson free(ids); 1720*99a2dd95SBruce Richardson free(values); 1721*99a2dd95SBruce Richardson return -1; 1722*99a2dd95SBruce Richardson } 1723*99a2dd95SBruce Richardson 1724*99a2dd95SBruce Richardson ret = rte_event_dev_xstats_get(dev_id, mode, port_queue_id, 1725*99a2dd95SBruce Richardson ids, values, num_xstats); 1726*99a2dd95SBruce Richardson if (ret < 0 || ret > num_xstats) { 1727*99a2dd95SBruce Richardson free(xstat_names); 1728*99a2dd95SBruce Richardson free(ids); 1729*99a2dd95SBruce Richardson free(values); 1730*99a2dd95SBruce Richardson return -1; 1731*99a2dd95SBruce Richardson } 1732*99a2dd95SBruce Richardson 1733*99a2dd95SBruce Richardson rte_tel_data_start_dict(d); 1734*99a2dd95SBruce Richardson for (i = 0; i < num_xstats; i++) 1735*99a2dd95SBruce Richardson rte_tel_data_add_dict_u64(d, xstat_names[i].name, 1736*99a2dd95SBruce Richardson values[i]); 1737*99a2dd95SBruce Richardson 1738*99a2dd95SBruce Richardson free(xstat_names); 1739*99a2dd95SBruce Richardson free(ids); 1740*99a2dd95SBruce Richardson free(values); 1741*99a2dd95SBruce Richardson return 0; 1742*99a2dd95SBruce Richardson } 1743*99a2dd95SBruce Richardson 1744*99a2dd95SBruce Richardson static int 1745*99a2dd95SBruce Richardson handle_dev_xstats(const char *cmd __rte_unused, 1746*99a2dd95SBruce Richardson const char *params, 1747*99a2dd95SBruce Richardson struct rte_tel_data *d) 1748*99a2dd95SBruce Richardson { 1749*99a2dd95SBruce Richardson int dev_id; 1750*99a2dd95SBruce Richardson enum rte_event_dev_xstats_mode mode; 1751*99a2dd95SBruce Richardson char *end_param; 1752*99a2dd95SBruce Richardson 1753*99a2dd95SBruce Richardson if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 1754*99a2dd95SBruce Richardson return -1; 1755*99a2dd95SBruce Richardson 1756*99a2dd95SBruce Richardson /* Get dev ID from parameter string */ 1757*99a2dd95SBruce Richardson dev_id = strtoul(params, &end_param, 10); 1758*99a2dd95SBruce Richardson if (*end_param != '\0') 1759*99a2dd95SBruce Richardson RTE_EDEV_LOG_DEBUG( 1760*99a2dd95SBruce Richardson "Extra parameters passed to eventdev telemetry command, ignoring"); 1761*99a2dd95SBruce Richardson 1762*99a2dd95SBruce Richardson RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 1763*99a2dd95SBruce Richardson 1764*99a2dd95SBruce Richardson mode = RTE_EVENT_DEV_XSTATS_DEVICE; 1765*99a2dd95SBruce Richardson return eventdev_build_telemetry_data(dev_id, mode, 0, d); 1766*99a2dd95SBruce Richardson } 1767*99a2dd95SBruce Richardson 1768*99a2dd95SBruce Richardson static int 1769*99a2dd95SBruce Richardson handle_port_xstats(const char *cmd __rte_unused, 1770*99a2dd95SBruce Richardson const char *params, 1771*99a2dd95SBruce Richardson struct rte_tel_data *d) 1772*99a2dd95SBruce Richardson { 1773*99a2dd95SBruce Richardson int dev_id; 1774*99a2dd95SBruce Richardson int port_queue_id = 0; 1775*99a2dd95SBruce Richardson enum rte_event_dev_xstats_mode mode; 1776*99a2dd95SBruce Richardson char *end_param; 1777*99a2dd95SBruce Richardson const char *p_param; 1778*99a2dd95SBruce Richardson 1779*99a2dd95SBruce Richardson if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 1780*99a2dd95SBruce Richardson return -1; 1781*99a2dd95SBruce Richardson 1782*99a2dd95SBruce Richardson /* Get dev ID from parameter string */ 1783*99a2dd95SBruce Richardson dev_id = strtoul(params, &end_param, 10); 1784*99a2dd95SBruce Richardson RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 1785*99a2dd95SBruce Richardson 1786*99a2dd95SBruce Richardson p_param = strtok(end_param, ","); 1787*99a2dd95SBruce Richardson mode = RTE_EVENT_DEV_XSTATS_PORT; 1788*99a2dd95SBruce Richardson 1789*99a2dd95SBruce Richardson if (p_param == NULL || strlen(p_param) == 0 || !isdigit(*p_param)) 1790*99a2dd95SBruce Richardson return -1; 1791*99a2dd95SBruce Richardson 1792*99a2dd95SBruce Richardson port_queue_id = strtoul(p_param, &end_param, 10); 1793*99a2dd95SBruce Richardson 1794*99a2dd95SBruce Richardson p_param = strtok(NULL, "\0"); 1795*99a2dd95SBruce Richardson if (p_param != NULL) 1796*99a2dd95SBruce Richardson RTE_EDEV_LOG_DEBUG( 1797*99a2dd95SBruce Richardson "Extra parameters passed to eventdev telemetry command, ignoring"); 1798*99a2dd95SBruce Richardson 1799*99a2dd95SBruce Richardson return eventdev_build_telemetry_data(dev_id, mode, port_queue_id, d); 1800*99a2dd95SBruce Richardson } 1801*99a2dd95SBruce Richardson 1802*99a2dd95SBruce Richardson static int 1803*99a2dd95SBruce Richardson handle_queue_xstats(const char *cmd __rte_unused, 1804*99a2dd95SBruce Richardson const char *params, 1805*99a2dd95SBruce Richardson struct rte_tel_data *d) 1806*99a2dd95SBruce Richardson { 1807*99a2dd95SBruce Richardson int dev_id; 1808*99a2dd95SBruce Richardson int port_queue_id = 0; 1809*99a2dd95SBruce Richardson enum rte_event_dev_xstats_mode mode; 1810*99a2dd95SBruce Richardson char *end_param; 1811*99a2dd95SBruce Richardson const char *p_param; 1812*99a2dd95SBruce Richardson 1813*99a2dd95SBruce Richardson if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 1814*99a2dd95SBruce Richardson return -1; 1815*99a2dd95SBruce Richardson 1816*99a2dd95SBruce Richardson /* Get dev ID from parameter string */ 1817*99a2dd95SBruce Richardson dev_id = strtoul(params, &end_param, 10); 1818*99a2dd95SBruce Richardson RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 1819*99a2dd95SBruce Richardson 1820*99a2dd95SBruce Richardson p_param = strtok(end_param, ","); 1821*99a2dd95SBruce Richardson mode = RTE_EVENT_DEV_XSTATS_QUEUE; 1822*99a2dd95SBruce Richardson 1823*99a2dd95SBruce Richardson if (p_param == NULL || strlen(p_param) == 0 || !isdigit(*p_param)) 1824*99a2dd95SBruce Richardson return -1; 1825*99a2dd95SBruce Richardson 1826*99a2dd95SBruce Richardson port_queue_id = strtoul(p_param, &end_param, 10); 1827*99a2dd95SBruce Richardson 1828*99a2dd95SBruce Richardson p_param = strtok(NULL, "\0"); 1829*99a2dd95SBruce Richardson if (p_param != NULL) 1830*99a2dd95SBruce Richardson RTE_EDEV_LOG_DEBUG( 1831*99a2dd95SBruce Richardson "Extra parameters passed to eventdev telemetry command, ignoring"); 1832*99a2dd95SBruce Richardson 1833*99a2dd95SBruce Richardson return eventdev_build_telemetry_data(dev_id, mode, port_queue_id, d); 1834*99a2dd95SBruce Richardson } 1835*99a2dd95SBruce Richardson 1836*99a2dd95SBruce Richardson RTE_INIT(eventdev_init_telemetry) 1837*99a2dd95SBruce Richardson { 1838*99a2dd95SBruce Richardson rte_telemetry_register_cmd("/eventdev/dev_list", handle_dev_list, 1839*99a2dd95SBruce Richardson "Returns list of available eventdevs. Takes no parameters"); 1840*99a2dd95SBruce Richardson rte_telemetry_register_cmd("/eventdev/port_list", handle_port_list, 1841*99a2dd95SBruce Richardson "Returns list of available ports. Parameter: DevID"); 1842*99a2dd95SBruce Richardson rte_telemetry_register_cmd("/eventdev/queue_list", handle_queue_list, 1843*99a2dd95SBruce Richardson "Returns list of available queues. Parameter: DevID"); 1844*99a2dd95SBruce Richardson 1845*99a2dd95SBruce Richardson rte_telemetry_register_cmd("/eventdev/dev_xstats", handle_dev_xstats, 1846*99a2dd95SBruce Richardson "Returns stats for an eventdev. Parameter: DevID"); 1847*99a2dd95SBruce Richardson rte_telemetry_register_cmd("/eventdev/port_xstats", handle_port_xstats, 1848*99a2dd95SBruce Richardson "Returns stats for an eventdev port. Params: DevID,PortID"); 1849*99a2dd95SBruce Richardson rte_telemetry_register_cmd("/eventdev/queue_xstats", 1850*99a2dd95SBruce Richardson handle_queue_xstats, 1851*99a2dd95SBruce Richardson "Returns stats for an eventdev queue. Params: DevID,QueueID"); 1852*99a2dd95SBruce Richardson rte_telemetry_register_cmd("/eventdev/queue_links", handle_queue_links, 1853*99a2dd95SBruce Richardson "Returns links for an eventdev port. Params: DevID,QueueID"); 1854*99a2dd95SBruce Richardson } 1855