Lines Matching refs:dev

42 #define VALID_DEV_OR_RET_ERR(dev, dev_id) do { \
43 if (dev == NULL) { \
50 #define VALID_DEV_OPS_OR_RET_ERR(dev, dev_id) do { \
51 if (dev->dev_ops == NULL) { \
68 #define VALID_QUEUE_OR_RET_ERR(queue_id, dev) do { \
69 if (queue_id >= dev->data->num_queues) { \
71 queue_id, dev->data->dev_id); \
279 struct rte_bbdev *dev = get_dev(i);
280 if (dev && (strncmp(dev->data->name,
282 return dev;
319 struct rte_bbdev *dev = get_dev(dev_id);
320 VALID_DEV_OR_RET_ERR(dev, dev_id);
322 VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
324 if (dev->data->started) {
332 VALID_FUNC_OR_RET_ERR(dev->dev_ops->info_get, dev_id);
334 dev->dev_ops->info_get(dev, &dev_info);
344 if (dev->data->queues != NULL) {
345 VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_release, dev_id);
346 for (i = 0; i < dev->data->num_queues; i++) {
347 int ret = dev->dev_ops->queue_release(dev, i);
356 if (dev->dev_ops->close) {
357 ret = dev->dev_ops->close(dev);
365 rte_free(dev->data->queues);
369 dev->data->queues = rte_calloc_socket(DEV_NAME, num_queues,
370 sizeof(dev->data->queues[0]), RTE_CACHE_LINE_SIZE,
371 dev->data->socket_id);
372 if (dev->data->queues == NULL) {
375 num_queues, dev_id, dev->data->socket_id);
379 dev->data->num_queues = num_queues;
382 if (dev->dev_ops->setup_queues) {
383 ret = dev->dev_ops->setup_queues(dev, num_queues, socket_id);
397 dev->data->num_queues = 0;
398 rte_free(dev->data->queues);
399 dev->data->queues = NULL;
407 struct rte_bbdev *dev = get_dev(dev_id);
408 VALID_DEV_OR_RET_ERR(dev, dev_id);
410 VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
412 if (dev->data->started) {
419 if (dev->dev_ops->intr_enable) {
420 ret = dev->dev_ops->intr_enable(dev);
427 rte_bbdev_log_debug("Enabled interrupts for dev %u", dev_id);
441 struct rte_bbdev *dev = get_dev(dev_id);
446 VALID_DEV_OR_RET_ERR(dev, dev_id);
448 VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
450 VALID_QUEUE_OR_RET_ERR(queue_id, dev);
452 if (dev->data->queues[queue_id].started || dev->data->started) {
459 VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_release, dev_id);
460 VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_setup, dev_id);
463 VALID_FUNC_OR_RET_ERR(dev->dev_ops->info_get, dev_id);
465 dev->dev_ops->info_get(dev, &dev_info);
514 if (dev->data->queues[queue_id].queue_private != NULL) {
515 ret = dev->dev_ops->queue_release(dev, queue_id);
524 ret = dev->dev_ops->queue_setup(dev, queue_id, (conf != NULL) ?
535 stored_conf = &dev->data->queues[queue_id].conf;
544 rte_bbdev_log_debug("Configured dev%uq%u (size=%u, type=%s, prio=%u)",
555 struct rte_bbdev *dev = get_dev(dev_id);
556 VALID_DEV_OR_RET_ERR(dev, dev_id);
558 VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
560 if (dev->data->started) {
565 if (dev->dev_ops->start) {
566 int ret = dev->dev_ops->start(dev);
574 for (i = 0; i < dev->data->num_queues; i++)
575 if (!dev->data->queues[i].conf.deferred_start)
576 dev->data->queues[i].started = true;
577 dev->data->started = true;
586 struct rte_bbdev *dev = get_dev(dev_id);
587 VALID_DEV_OR_RET_ERR(dev, dev_id);
589 VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
591 if (!dev->data->started) {
596 if (dev->dev_ops->stop)
597 dev->dev_ops->stop(dev);
598 dev->data->started = false;
609 struct rte_bbdev *dev = get_dev(dev_id);
610 VALID_DEV_OR_RET_ERR(dev, dev_id);
612 VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
614 if (dev->data->started) {
623 for (i = 0; i < dev->data->num_queues; i++) {
624 ret = dev->dev_ops->queue_release(dev, i);
631 rte_free(dev->data->queues);
633 if (dev->dev_ops->close) {
634 ret = dev->dev_ops->close(dev);
642 dev->data->queues = NULL;
643 dev->data->num_queues = 0;
652 struct rte_bbdev *dev = get_dev(dev_id);
653 VALID_DEV_OR_RET_ERR(dev, dev_id);
655 VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
657 VALID_QUEUE_OR_RET_ERR(queue_id, dev);
659 if (dev->data->queues[queue_id].started) {
665 if (dev->dev_ops->queue_start) {
666 int ret = dev->dev_ops->queue_start(dev, queue_id);
673 dev->data->queues[queue_id].started = true;
682 struct rte_bbdev *dev = get_dev(dev_id);
683 VALID_DEV_OR_RET_ERR(dev, dev_id);
685 VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
687 VALID_QUEUE_OR_RET_ERR(queue_id, dev);
689 if (!dev->data->queues[queue_id].started) {
695 if (dev->dev_ops->queue_stop) {
696 int ret = dev->dev_ops->queue_stop(dev, queue_id);
703 dev->data->queues[queue_id].started = false;
711 get_stats_from_queues(struct rte_bbdev *dev, struct rte_bbdev_stats *stats)
714 for (q_id = 0; q_id < dev->data->num_queues; q_id++) {
716 &dev->data->queues[q_id].queue_stats;
725 rte_bbdev_log_debug("Got stats on %u", dev->data->dev_id);
729 reset_stats_in_queues(struct rte_bbdev *dev)
732 for (q_id = 0; q_id < dev->data->num_queues; q_id++) {
734 &dev->data->queues[q_id].queue_stats;
738 rte_bbdev_log_debug("Reset stats on %u", dev->data->dev_id);
744 struct rte_bbdev *dev = get_dev(dev_id);
745 VALID_DEV_OR_RET_ERR(dev, dev_id);
747 VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
755 if (dev->dev_ops->stats_get != NULL)
756 dev->dev_ops->stats_get(dev, stats);
758 get_stats_from_queues(dev, stats);
767 struct rte_bbdev *dev = get_dev(dev_id);
768 VALID_DEV_OR_RET_ERR(dev, dev_id);
770 VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
772 if (dev->dev_ops->stats_reset != NULL)
773 dev->dev_ops->stats_reset(dev);
775 reset_stats_in_queues(dev);
784 struct rte_bbdev *dev = get_dev(dev_id);
785 VALID_DEV_OR_RET_ERR(dev, dev_id);
787 VALID_FUNC_OR_RET_ERR(dev->dev_ops->info_get, dev_id);
790 rte_bbdev_log(ERR, "NULL dev info structure");
796 dev_info->dev_name = dev->data->name;
797 dev_info->num_queues = dev->data->num_queues;
798 dev_info->device = dev->device;
799 dev_info->socket_id = dev->data->socket_id;
800 dev_info->started = dev->data->started;
803 dev->dev_ops->info_get(dev, &dev_info->drv);
813 struct rte_bbdev *dev = get_dev(dev_id);
814 VALID_DEV_OR_RET_ERR(dev, dev_id);
816 VALID_QUEUE_OR_RET_ERR(queue_id, dev);
825 queue_info->conf = dev->data->queues[queue_id].conf;
826 queue_info->started = dev->data->queues[queue_id].started;
947 struct rte_bbdev *dev = get_dev(dev_id);
948 VALID_DEV_OR_RET_ERR(dev, dev_id);
964 TAILQ_FOREACH(user_cb, &(dev->list_cbs), next) {
979 TAILQ_INSERT_TAIL(&(dev->list_cbs), user_cb, next);
993 struct rte_bbdev *dev = get_dev(dev_id);
994 VALID_DEV_OR_RET_ERR(dev, dev_id);
1009 dev = &rte_bbdev_devices[dev_id];
1012 for (cb = TAILQ_FIRST(&dev->list_cbs); cb != NULL; cb = next) {
1022 TAILQ_REMOVE(&(dev->list_cbs), cb, next);
1033 rte_bbdev_pmd_callback_process(struct rte_bbdev *dev,
1039 if (dev == NULL) {
1044 if (dev->data == NULL) {
1057 TAILQ_FOREACH(cb_lst, &(dev->list_cbs), next) {
1066 dev_cb.cb_fn(dev->data->dev_id, dev_cb.event,
1077 struct rte_bbdev *dev = get_dev(dev_id);
1078 VALID_DEV_OR_RET_ERR(dev, dev_id);
1079 VALID_QUEUE_OR_RET_ERR(queue_id, dev);
1080 VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
1081 VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_intr_enable, dev_id);
1082 return dev->dev_ops->queue_intr_enable(dev, queue_id);
1088 struct rte_bbdev *dev = get_dev(dev_id);
1089 VALID_DEV_OR_RET_ERR(dev, dev_id);
1090 VALID_QUEUE_OR_RET_ERR(queue_id, dev);
1091 VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
1092 VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_intr_disable, dev_id);
1093 return dev->dev_ops->queue_intr_disable(dev, queue_id);
1101 struct rte_bbdev *dev = get_dev(dev_id);
1105 VALID_DEV_OR_RET_ERR(dev, dev_id);
1106 VALID_QUEUE_OR_RET_ERR(queue_id, dev);
1108 intr_handle = dev->intr_handle;
1124 "dev %u q %u int ctl error op %d epfd %d vec %u",
1201 struct rte_bbdev *dev = get_dev(dev_id);
1203 VALID_DEV_OR_RET_ERR(dev, dev_id);
1204 VALID_QUEUE_OR_RET_ERR(queue_id, dev);
1205 VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
1206 VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_ops_dump, dev_id);
1208 q_data = &dev->data->queues[queue_id];
1214 dev->data->name, queue_id);
1222 stats = &dev->data->queues[queue_id].queue_stats;
1231 return dev->dev_ops->queue_ops_dump(dev, queue_id, f);