1d133f4cdSViacheslav Ovsiienko /* SPDX-License-Identifier: BSD-3-Clause 2d133f4cdSViacheslav Ovsiienko * Copyright 2020 Mellanox Technologies, Ltd 3d133f4cdSViacheslav Ovsiienko */ 477522be0SViacheslav Ovsiienko #include <fcntl.h> 577522be0SViacheslav Ovsiienko #include <stdint.h> 677522be0SViacheslav Ovsiienko 7d133f4cdSViacheslav Ovsiienko #include <rte_ether.h> 8df96fd0dSBruce Richardson #include <ethdev_driver.h> 9d133f4cdSViacheslav Ovsiienko #include <rte_interrupts.h> 10d133f4cdSViacheslav Ovsiienko #include <rte_alarm.h> 11d133f4cdSViacheslav Ovsiienko #include <rte_malloc.h> 12aef1e20eSViacheslav Ovsiienko #include <rte_cycles.h> 132aba9fc7SOphir Munk #include <rte_eal_paging.h> 14d133f4cdSViacheslav Ovsiienko 15ac3fc732SSuanming Mou #include <mlx5_malloc.h> 16a7787bb0SMichael Baum #include <mlx5_common_devx.h> 17ac3fc732SSuanming Mou 18d133f4cdSViacheslav Ovsiienko #include "mlx5.h" 19151cbe3aSMichael Baum #include "mlx5_rx.h" 20377b69fbSMichael Baum #include "mlx5_tx.h" 21551c94c8SViacheslav Ovsiienko #include "mlx5_common_os.h" 22d133f4cdSViacheslav Ovsiienko 239b9890e2SOphir Munk static_assert(sizeof(struct mlx5_cqe_ts) == sizeof(rte_int128_t), 249b9890e2SOphir Munk "Wrong timestamp CQE part size"); 259b9890e2SOphir Munk 263b025c0cSViacheslav Ovsiienko static const char * const mlx5_txpp_stat_names[] = { 27b15af157SViacheslav Ovsiienko "tx_pp_missed_interrupt_errors", /* Missed service interrupt. */ 28b15af157SViacheslav Ovsiienko "tx_pp_rearm_queue_errors", /* Rearm Queue errors. */ 29b15af157SViacheslav Ovsiienko "tx_pp_clock_queue_errors", /* Clock Queue errors. */ 30b15af157SViacheslav Ovsiienko "tx_pp_timestamp_past_errors", /* Timestamp in the past. */ 31b15af157SViacheslav Ovsiienko "tx_pp_timestamp_future_errors", /* Timestamp in the distant future. */ 32a31aa37bSViacheslav Ovsiienko "tx_pp_timestamp_order_errors", /* Timestamp not in ascending order. */ 33b15af157SViacheslav Ovsiienko "tx_pp_jitter", /* Timestamp jitter (one Clock Queue completion). */ 34b15af157SViacheslav Ovsiienko "tx_pp_wander", /* Timestamp wander (half of Clock Queue CQEs). */ 35b15af157SViacheslav Ovsiienko "tx_pp_sync_lost", /* Scheduling synchronization lost. */ 363b025c0cSViacheslav Ovsiienko }; 373b025c0cSViacheslav Ovsiienko 38d133f4cdSViacheslav Ovsiienko /* Destroy Event Queue Notification Channel. */ 39d133f4cdSViacheslav Ovsiienko static void 40e7055bbfSMichael Baum mlx5_txpp_destroy_event_channel(struct mlx5_dev_ctx_shared *sh) 41d133f4cdSViacheslav Ovsiienko { 42d133f4cdSViacheslav Ovsiienko if (sh->txpp.echan) { 4398174626STal Shnaiderman mlx5_os_devx_destroy_event_channel(sh->txpp.echan); 44d133f4cdSViacheslav Ovsiienko sh->txpp.echan = NULL; 45d133f4cdSViacheslav Ovsiienko } 46d133f4cdSViacheslav Ovsiienko } 47d133f4cdSViacheslav Ovsiienko 48d133f4cdSViacheslav Ovsiienko /* Create Event Queue Notification Channel. */ 49d133f4cdSViacheslav Ovsiienko static int 50e7055bbfSMichael Baum mlx5_txpp_create_event_channel(struct mlx5_dev_ctx_shared *sh) 51d133f4cdSViacheslav Ovsiienko { 52d133f4cdSViacheslav Ovsiienko MLX5_ASSERT(!sh->txpp.echan); 53ca1418ceSMichael Baum sh->txpp.echan = mlx5_os_devx_create_event_channel(sh->cdev->ctx, 54d133f4cdSViacheslav Ovsiienko MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA); 55d133f4cdSViacheslav Ovsiienko if (!sh->txpp.echan) { 56d133f4cdSViacheslav Ovsiienko rte_errno = errno; 57e7055bbfSMichael Baum DRV_LOG(ERR, "Failed to create event channel %d.", rte_errno); 58d133f4cdSViacheslav Ovsiienko return -rte_errno; 59d133f4cdSViacheslav Ovsiienko } 60d133f4cdSViacheslav Ovsiienko return 0; 61d133f4cdSViacheslav Ovsiienko } 62d133f4cdSViacheslav Ovsiienko 63d133f4cdSViacheslav Ovsiienko static void 64aef1e20eSViacheslav Ovsiienko mlx5_txpp_free_pp_index(struct mlx5_dev_ctx_shared *sh) 65aef1e20eSViacheslav Ovsiienko { 66b492e288SOphir Munk #ifdef HAVE_MLX5DV_PP_ALLOC 67aef1e20eSViacheslav Ovsiienko if (sh->txpp.pp) { 68aef1e20eSViacheslav Ovsiienko mlx5_glue->dv_free_pp(sh->txpp.pp); 69aef1e20eSViacheslav Ovsiienko sh->txpp.pp = NULL; 70aef1e20eSViacheslav Ovsiienko sh->txpp.pp_id = 0; 71aef1e20eSViacheslav Ovsiienko } 72b492e288SOphir Munk #else 73b492e288SOphir Munk RTE_SET_USED(sh); 74b492e288SOphir Munk DRV_LOG(ERR, "Freeing pacing index is not supported."); 75b492e288SOphir Munk #endif 76aef1e20eSViacheslav Ovsiienko } 77aef1e20eSViacheslav Ovsiienko 78aef1e20eSViacheslav Ovsiienko /* Allocate Packet Pacing index from kernel via mlx5dv call. */ 79aef1e20eSViacheslav Ovsiienko static int 80aef1e20eSViacheslav Ovsiienko mlx5_txpp_alloc_pp_index(struct mlx5_dev_ctx_shared *sh) 81aef1e20eSViacheslav Ovsiienko { 82aef1e20eSViacheslav Ovsiienko #ifdef HAVE_MLX5DV_PP_ALLOC 83aef1e20eSViacheslav Ovsiienko uint32_t pp[MLX5_ST_SZ_DW(set_pp_rate_limit_context)]; 84aef1e20eSViacheslav Ovsiienko uint64_t rate; 85aef1e20eSViacheslav Ovsiienko 86aef1e20eSViacheslav Ovsiienko MLX5_ASSERT(!sh->txpp.pp); 87aef1e20eSViacheslav Ovsiienko memset(&pp, 0, sizeof(pp)); 88aef1e20eSViacheslav Ovsiienko rate = NS_PER_S / sh->txpp.tick; 89aef1e20eSViacheslav Ovsiienko if (rate * sh->txpp.tick != NS_PER_S) 90aef1e20eSViacheslav Ovsiienko DRV_LOG(WARNING, "Packet pacing frequency is not precise."); 91aef1e20eSViacheslav Ovsiienko if (sh->txpp.test) { 92aef1e20eSViacheslav Ovsiienko uint32_t len; 93aef1e20eSViacheslav Ovsiienko 94aef1e20eSViacheslav Ovsiienko len = RTE_MAX(MLX5_TXPP_TEST_PKT_SIZE, 95aef1e20eSViacheslav Ovsiienko (size_t)RTE_ETHER_MIN_LEN); 96aef1e20eSViacheslav Ovsiienko MLX5_SET(set_pp_rate_limit_context, &pp, 97aef1e20eSViacheslav Ovsiienko burst_upper_bound, len); 98aef1e20eSViacheslav Ovsiienko MLX5_SET(set_pp_rate_limit_context, &pp, 99aef1e20eSViacheslav Ovsiienko typical_packet_size, len); 100aef1e20eSViacheslav Ovsiienko /* Convert packets per second into kilobits. */ 101aef1e20eSViacheslav Ovsiienko rate = (rate * len) / (1000ul / CHAR_BIT); 102aef1e20eSViacheslav Ovsiienko DRV_LOG(INFO, "Packet pacing rate set to %" PRIu64, rate); 103aef1e20eSViacheslav Ovsiienko } 104aef1e20eSViacheslav Ovsiienko MLX5_SET(set_pp_rate_limit_context, &pp, rate_limit, rate); 105aef1e20eSViacheslav Ovsiienko MLX5_SET(set_pp_rate_limit_context, &pp, rate_mode, 106aef1e20eSViacheslav Ovsiienko sh->txpp.test ? MLX5_DATA_RATE : MLX5_WQE_RATE); 107aef1e20eSViacheslav Ovsiienko sh->txpp.pp = mlx5_glue->dv_alloc_pp 108ca1418ceSMichael Baum (sh->cdev->ctx, sizeof(pp), &pp, 109aef1e20eSViacheslav Ovsiienko MLX5DV_PP_ALLOC_FLAGS_DEDICATED_INDEX); 110aef1e20eSViacheslav Ovsiienko if (sh->txpp.pp == NULL) { 111aef1e20eSViacheslav Ovsiienko DRV_LOG(ERR, "Failed to allocate packet pacing index."); 112aef1e20eSViacheslav Ovsiienko rte_errno = errno; 113aef1e20eSViacheslav Ovsiienko return -errno; 114aef1e20eSViacheslav Ovsiienko } 1151f66ac5bSOphir Munk if (!((struct mlx5dv_pp *)sh->txpp.pp)->index) { 116aef1e20eSViacheslav Ovsiienko DRV_LOG(ERR, "Zero packet pacing index allocated."); 117aef1e20eSViacheslav Ovsiienko mlx5_txpp_free_pp_index(sh); 118aef1e20eSViacheslav Ovsiienko rte_errno = ENOTSUP; 119aef1e20eSViacheslav Ovsiienko return -ENOTSUP; 120aef1e20eSViacheslav Ovsiienko } 1211f66ac5bSOphir Munk sh->txpp.pp_id = ((struct mlx5dv_pp *)(sh->txpp.pp))->index; 122aef1e20eSViacheslav Ovsiienko return 0; 123aef1e20eSViacheslav Ovsiienko #else 124aef1e20eSViacheslav Ovsiienko RTE_SET_USED(sh); 125aef1e20eSViacheslav Ovsiienko DRV_LOG(ERR, "Allocating pacing index is not supported."); 126aef1e20eSViacheslav Ovsiienko rte_errno = ENOTSUP; 127aef1e20eSViacheslav Ovsiienko return -ENOTSUP; 128aef1e20eSViacheslav Ovsiienko #endif 129aef1e20eSViacheslav Ovsiienko } 130aef1e20eSViacheslav Ovsiienko 131aef1e20eSViacheslav Ovsiienko static void 132551c94c8SViacheslav Ovsiienko mlx5_txpp_destroy_send_queue(struct mlx5_txpp_wq *wq) 133d133f4cdSViacheslav Ovsiienko { 13471011bd5SMichael Baum mlx5_devx_sq_destroy(&wq->sq_obj); 135a7787bb0SMichael Baum mlx5_devx_cq_destroy(&wq->cq_obj); 136d133f4cdSViacheslav Ovsiienko memset(wq, 0, sizeof(*wq)); 137d133f4cdSViacheslav Ovsiienko } 138d133f4cdSViacheslav Ovsiienko 139d133f4cdSViacheslav Ovsiienko static void 140551c94c8SViacheslav Ovsiienko mlx5_txpp_destroy_rearm_queue(struct mlx5_dev_ctx_shared *sh) 141551c94c8SViacheslav Ovsiienko { 142551c94c8SViacheslav Ovsiienko struct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue; 143551c94c8SViacheslav Ovsiienko 144551c94c8SViacheslav Ovsiienko mlx5_txpp_destroy_send_queue(wq); 145551c94c8SViacheslav Ovsiienko } 146551c94c8SViacheslav Ovsiienko 147551c94c8SViacheslav Ovsiienko static void 148551c94c8SViacheslav Ovsiienko mlx5_txpp_destroy_clock_queue(struct mlx5_dev_ctx_shared *sh) 149551c94c8SViacheslav Ovsiienko { 150551c94c8SViacheslav Ovsiienko struct mlx5_txpp_wq *wq = &sh->txpp.clock_queue; 151551c94c8SViacheslav Ovsiienko 152551c94c8SViacheslav Ovsiienko mlx5_txpp_destroy_send_queue(wq); 15377522be0SViacheslav Ovsiienko if (sh->txpp.tsa) { 154ac3fc732SSuanming Mou mlx5_free(sh->txpp.tsa); 15577522be0SViacheslav Ovsiienko sh->txpp.tsa = NULL; 15677522be0SViacheslav Ovsiienko } 15777522be0SViacheslav Ovsiienko } 15877522be0SViacheslav Ovsiienko 15977522be0SViacheslav Ovsiienko static void 16077522be0SViacheslav Ovsiienko mlx5_txpp_doorbell_rearm_queue(struct mlx5_dev_ctx_shared *sh, uint16_t ci) 16177522be0SViacheslav Ovsiienko { 16277522be0SViacheslav Ovsiienko struct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue; 16371011bd5SMichael Baum struct mlx5_wqe *wqe = (struct mlx5_wqe *)(uintptr_t)wq->sq_obj.wqes; 16477522be0SViacheslav Ovsiienko union { 16577522be0SViacheslav Ovsiienko uint32_t w32[2]; 16677522be0SViacheslav Ovsiienko uint64_t w64; 16777522be0SViacheslav Ovsiienko } cs; 16877522be0SViacheslav Ovsiienko 16977522be0SViacheslav Ovsiienko wq->sq_ci = ci + 1; 17077522be0SViacheslav Ovsiienko cs.w32[0] = rte_cpu_to_be_32(rte_be_to_cpu_32 17171011bd5SMichael Baum (wqe[ci & (wq->sq_size - 1)].ctrl[0]) | (ci - 1) << 8); 17271011bd5SMichael Baum cs.w32[1] = wqe[ci & (wq->sq_size - 1)].ctrl[1]; 17377522be0SViacheslav Ovsiienko /* Update SQ doorbell record with new SQ ci. */ 1745dfa003dSMichael Baum mlx5_doorbell_ring(&sh->tx_uar.bf_db, cs.w64, wq->sq_ci, 1755dfa003dSMichael Baum wq->sq_obj.db_rec, !sh->tx_uar.dbnc); 176551c94c8SViacheslav Ovsiienko } 177551c94c8SViacheslav Ovsiienko 178551c94c8SViacheslav Ovsiienko static void 179551c94c8SViacheslav Ovsiienko mlx5_txpp_fill_wqe_rearm_queue(struct mlx5_dev_ctx_shared *sh) 180551c94c8SViacheslav Ovsiienko { 181551c94c8SViacheslav Ovsiienko struct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue; 18271011bd5SMichael Baum struct mlx5_wqe *wqe = (struct mlx5_wqe *)(uintptr_t)wq->sq_obj.wqes; 183551c94c8SViacheslav Ovsiienko uint32_t i; 184551c94c8SViacheslav Ovsiienko 185551c94c8SViacheslav Ovsiienko for (i = 0; i < wq->sq_size; i += 2) { 186551c94c8SViacheslav Ovsiienko struct mlx5_wqe_cseg *cs; 187551c94c8SViacheslav Ovsiienko struct mlx5_wqe_qseg *qs; 188551c94c8SViacheslav Ovsiienko uint32_t index; 189551c94c8SViacheslav Ovsiienko 190551c94c8SViacheslav Ovsiienko /* Build SEND_EN request with slave WQE index. */ 191551c94c8SViacheslav Ovsiienko cs = &wqe[i + 0].cseg; 192551c94c8SViacheslav Ovsiienko cs->opcode = RTE_BE32(MLX5_OPCODE_SEND_EN | 0); 19371011bd5SMichael Baum cs->sq_ds = rte_cpu_to_be_32((wq->sq_obj.sq->id << 8) | 2); 194551c94c8SViacheslav Ovsiienko cs->flags = RTE_BE32(MLX5_COMP_ALWAYS << 195551c94c8SViacheslav Ovsiienko MLX5_COMP_MODE_OFFSET); 196551c94c8SViacheslav Ovsiienko cs->misc = RTE_BE32(0); 197551c94c8SViacheslav Ovsiienko qs = RTE_PTR_ADD(cs, sizeof(struct mlx5_wqe_cseg)); 198551c94c8SViacheslav Ovsiienko index = (i * MLX5_TXPP_REARM / 2 + MLX5_TXPP_REARM) & 199551c94c8SViacheslav Ovsiienko ((1 << MLX5_WQ_INDEX_WIDTH) - 1); 200551c94c8SViacheslav Ovsiienko qs->max_index = rte_cpu_to_be_32(index); 20171011bd5SMichael Baum qs->qpn_cqn = 20271011bd5SMichael Baum rte_cpu_to_be_32(sh->txpp.clock_queue.sq_obj.sq->id); 203551c94c8SViacheslav Ovsiienko /* Build WAIT request with slave CQE index. */ 204551c94c8SViacheslav Ovsiienko cs = &wqe[i + 1].cseg; 205551c94c8SViacheslav Ovsiienko cs->opcode = RTE_BE32(MLX5_OPCODE_WAIT | 0); 20671011bd5SMichael Baum cs->sq_ds = rte_cpu_to_be_32((wq->sq_obj.sq->id << 8) | 2); 207551c94c8SViacheslav Ovsiienko cs->flags = RTE_BE32(MLX5_COMP_ONLY_ERR << 208551c94c8SViacheslav Ovsiienko MLX5_COMP_MODE_OFFSET); 209551c94c8SViacheslav Ovsiienko cs->misc = RTE_BE32(0); 210551c94c8SViacheslav Ovsiienko qs = RTE_PTR_ADD(cs, sizeof(struct mlx5_wqe_cseg)); 211551c94c8SViacheslav Ovsiienko index = (i * MLX5_TXPP_REARM / 2 + MLX5_TXPP_REARM / 2) & 212551c94c8SViacheslav Ovsiienko ((1 << MLX5_CQ_INDEX_WIDTH) - 1); 213551c94c8SViacheslav Ovsiienko qs->max_index = rte_cpu_to_be_32(index); 214a7787bb0SMichael Baum qs->qpn_cqn = 215a7787bb0SMichael Baum rte_cpu_to_be_32(sh->txpp.clock_queue.cq_obj.cq->id); 216551c94c8SViacheslav Ovsiienko } 217551c94c8SViacheslav Ovsiienko } 218551c94c8SViacheslav Ovsiienko 219551c94c8SViacheslav Ovsiienko /* Creates the Rearm Queue to fire the requests to Clock Queue in realtime. */ 220551c94c8SViacheslav Ovsiienko static int 221551c94c8SViacheslav Ovsiienko mlx5_txpp_create_rearm_queue(struct mlx5_dev_ctx_shared *sh) 222551c94c8SViacheslav Ovsiienko { 22371011bd5SMichael Baum struct mlx5_devx_create_sq_attr sq_attr = { 22471011bd5SMichael Baum .cd_master = 1, 22571011bd5SMichael Baum .state = MLX5_SQC_STATE_RST, 22671011bd5SMichael Baum .tis_lst_sz = 1, 227a89f6433SRongwei Liu .tis_num = sh->tis[0]->id, 22871011bd5SMichael Baum .wq_attr = (struct mlx5_devx_wq_attr){ 229e35ccf24SMichael Baum .pd = sh->cdev->pdn, 2305dfa003dSMichael Baum .uar_page = 2315dfa003dSMichael Baum mlx5_os_get_devx_uar_page_id(sh->tx_uar.obj), 23271011bd5SMichael Baum }, 233fe46b20cSMichael Baum .ts_format = mlx5_ts_format_conv 234fe46b20cSMichael Baum (sh->cdev->config.hca_attr.sq_ts_format), 23571011bd5SMichael Baum }; 236551c94c8SViacheslav Ovsiienko struct mlx5_devx_modify_sq_attr msq_attr = { 0 }; 237a7787bb0SMichael Baum struct mlx5_devx_cq_attr cq_attr = { 2385dfa003dSMichael Baum .uar_page_id = mlx5_os_get_devx_uar_page_id(sh->tx_uar.obj), 239a7787bb0SMichael Baum }; 240551c94c8SViacheslav Ovsiienko struct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue; 241551c94c8SViacheslav Ovsiienko int ret; 242551c94c8SViacheslav Ovsiienko 243551c94c8SViacheslav Ovsiienko /* Create completion queue object for Rearm Queue. */ 244ca1418ceSMichael Baum ret = mlx5_devx_cq_create(sh->cdev->ctx, &wq->cq_obj, 245a7787bb0SMichael Baum log2above(MLX5_TXPP_REARM_CQ_SIZE), &cq_attr, 246a7787bb0SMichael Baum sh->numa_node); 247a7787bb0SMichael Baum if (ret) { 248551c94c8SViacheslav Ovsiienko DRV_LOG(ERR, "Failed to create CQ for Rearm Queue."); 249a7787bb0SMichael Baum return ret; 250551c94c8SViacheslav Ovsiienko } 251551c94c8SViacheslav Ovsiienko wq->cq_ci = 0; 252551c94c8SViacheslav Ovsiienko wq->arm_sn = 0; 253551c94c8SViacheslav Ovsiienko wq->sq_size = MLX5_TXPP_REARM_SQ_SIZE; 254551c94c8SViacheslav Ovsiienko MLX5_ASSERT(wq->sq_size == (1 << log2above(wq->sq_size))); 255551c94c8SViacheslav Ovsiienko /* Create send queue object for Rearm Queue. */ 256a7787bb0SMichael Baum sq_attr.cqn = wq->cq_obj.cq->id; 25771011bd5SMichael Baum /* There should be no WQE leftovers in the cyclic queue. */ 258ca1418ceSMichael Baum ret = mlx5_devx_sq_create(sh->cdev->ctx, &wq->sq_obj, 25971011bd5SMichael Baum log2above(MLX5_TXPP_REARM_SQ_SIZE), &sq_attr, 26071011bd5SMichael Baum sh->numa_node); 26171011bd5SMichael Baum if (ret) { 262551c94c8SViacheslav Ovsiienko rte_errno = errno; 263551c94c8SViacheslav Ovsiienko DRV_LOG(ERR, "Failed to create SQ for Rearm Queue."); 264551c94c8SViacheslav Ovsiienko goto error; 265551c94c8SViacheslav Ovsiienko } 266551c94c8SViacheslav Ovsiienko /* Build the WQEs in the Send Queue before goto Ready state. */ 267551c94c8SViacheslav Ovsiienko mlx5_txpp_fill_wqe_rearm_queue(sh); 268551c94c8SViacheslav Ovsiienko /* Change queue state to ready. */ 269551c94c8SViacheslav Ovsiienko msq_attr.sq_state = MLX5_SQC_STATE_RST; 270551c94c8SViacheslav Ovsiienko msq_attr.state = MLX5_SQC_STATE_RDY; 27171011bd5SMichael Baum ret = mlx5_devx_cmd_modify_sq(wq->sq_obj.sq, &msq_attr); 272551c94c8SViacheslav Ovsiienko if (ret) { 273551c94c8SViacheslav Ovsiienko DRV_LOG(ERR, "Failed to set SQ ready state Rearm Queue."); 274551c94c8SViacheslav Ovsiienko goto error; 275551c94c8SViacheslav Ovsiienko } 276551c94c8SViacheslav Ovsiienko return 0; 277551c94c8SViacheslav Ovsiienko error: 278551c94c8SViacheslav Ovsiienko ret = -rte_errno; 279551c94c8SViacheslav Ovsiienko mlx5_txpp_destroy_rearm_queue(sh); 280551c94c8SViacheslav Ovsiienko rte_errno = -ret; 281551c94c8SViacheslav Ovsiienko return ret; 282551c94c8SViacheslav Ovsiienko } 283551c94c8SViacheslav Ovsiienko 284551c94c8SViacheslav Ovsiienko static void 285d133f4cdSViacheslav Ovsiienko mlx5_txpp_fill_wqe_clock_queue(struct mlx5_dev_ctx_shared *sh) 286d133f4cdSViacheslav Ovsiienko { 287d133f4cdSViacheslav Ovsiienko struct mlx5_txpp_wq *wq = &sh->txpp.clock_queue; 28871011bd5SMichael Baum struct mlx5_wqe *wqe = (struct mlx5_wqe *)(uintptr_t)wq->sq_obj.wqes; 289d133f4cdSViacheslav Ovsiienko struct mlx5_wqe_cseg *cs = &wqe->cseg; 290d133f4cdSViacheslav Ovsiienko uint32_t wqe_size, opcode, i; 291d133f4cdSViacheslav Ovsiienko uint8_t *dst; 292d133f4cdSViacheslav Ovsiienko 293d133f4cdSViacheslav Ovsiienko /* For test purposes fill the WQ with SEND inline packet. */ 294d133f4cdSViacheslav Ovsiienko if (sh->txpp.test) { 295d133f4cdSViacheslav Ovsiienko wqe_size = RTE_ALIGN(MLX5_TXPP_TEST_PKT_SIZE + 296d133f4cdSViacheslav Ovsiienko MLX5_WQE_CSEG_SIZE + 297d133f4cdSViacheslav Ovsiienko 2 * MLX5_WQE_ESEG_SIZE - 298d133f4cdSViacheslav Ovsiienko MLX5_ESEG_MIN_INLINE_SIZE, 299d133f4cdSViacheslav Ovsiienko MLX5_WSEG_SIZE); 300d133f4cdSViacheslav Ovsiienko opcode = MLX5_OPCODE_SEND; 301d133f4cdSViacheslav Ovsiienko } else { 302d133f4cdSViacheslav Ovsiienko wqe_size = MLX5_WSEG_SIZE; 303d133f4cdSViacheslav Ovsiienko opcode = MLX5_OPCODE_NOP; 304d133f4cdSViacheslav Ovsiienko } 305d133f4cdSViacheslav Ovsiienko cs->opcode = rte_cpu_to_be_32(opcode | 0); /* Index is ignored. */ 30671011bd5SMichael Baum cs->sq_ds = rte_cpu_to_be_32((wq->sq_obj.sq->id << 8) | 307d133f4cdSViacheslav Ovsiienko (wqe_size / MLX5_WSEG_SIZE)); 308d133f4cdSViacheslav Ovsiienko cs->flags = RTE_BE32(MLX5_COMP_ALWAYS << MLX5_COMP_MODE_OFFSET); 309d133f4cdSViacheslav Ovsiienko cs->misc = RTE_BE32(0); 310d133f4cdSViacheslav Ovsiienko wqe_size = RTE_ALIGN(wqe_size, MLX5_WQE_SIZE); 311d133f4cdSViacheslav Ovsiienko if (sh->txpp.test) { 312d133f4cdSViacheslav Ovsiienko struct mlx5_wqe_eseg *es = &wqe->eseg; 313d133f4cdSViacheslav Ovsiienko struct rte_ether_hdr *eth_hdr; 314d133f4cdSViacheslav Ovsiienko struct rte_ipv4_hdr *ip_hdr; 315d133f4cdSViacheslav Ovsiienko struct rte_udp_hdr *udp_hdr; 316d133f4cdSViacheslav Ovsiienko 317d133f4cdSViacheslav Ovsiienko /* Build the inline test packet pattern. */ 318d133f4cdSViacheslav Ovsiienko MLX5_ASSERT(wqe_size <= MLX5_WQE_SIZE_MAX); 319d133f4cdSViacheslav Ovsiienko MLX5_ASSERT(MLX5_TXPP_TEST_PKT_SIZE >= 320d133f4cdSViacheslav Ovsiienko (sizeof(struct rte_ether_hdr) + 321d133f4cdSViacheslav Ovsiienko sizeof(struct rte_ipv4_hdr))); 322d133f4cdSViacheslav Ovsiienko es->flags = 0; 323d133f4cdSViacheslav Ovsiienko es->cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM; 324d133f4cdSViacheslav Ovsiienko es->swp_offs = 0; 325d133f4cdSViacheslav Ovsiienko es->metadata = 0; 326d133f4cdSViacheslav Ovsiienko es->swp_flags = 0; 327d133f4cdSViacheslav Ovsiienko es->mss = 0; 328d133f4cdSViacheslav Ovsiienko es->inline_hdr_sz = RTE_BE16(MLX5_TXPP_TEST_PKT_SIZE); 329d133f4cdSViacheslav Ovsiienko /* Build test packet L2 header (Ethernet). */ 330d133f4cdSViacheslav Ovsiienko dst = (uint8_t *)&es->inline_data; 331d133f4cdSViacheslav Ovsiienko eth_hdr = (struct rte_ether_hdr *)dst; 33204d43857SDmitry Kozlyuk rte_eth_random_addr(ð_hdr->dst_addr.addr_bytes[0]); 33304d43857SDmitry Kozlyuk rte_eth_random_addr(ð_hdr->src_addr.addr_bytes[0]); 334d133f4cdSViacheslav Ovsiienko eth_hdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4); 335d133f4cdSViacheslav Ovsiienko /* Build test packet L3 header (IP v4). */ 336d133f4cdSViacheslav Ovsiienko dst += sizeof(struct rte_ether_hdr); 337d133f4cdSViacheslav Ovsiienko ip_hdr = (struct rte_ipv4_hdr *)dst; 338d133f4cdSViacheslav Ovsiienko ip_hdr->version_ihl = RTE_IPV4_VHL_DEF; 339d133f4cdSViacheslav Ovsiienko ip_hdr->type_of_service = 0; 340d133f4cdSViacheslav Ovsiienko ip_hdr->fragment_offset = 0; 341d133f4cdSViacheslav Ovsiienko ip_hdr->time_to_live = 64; 342d133f4cdSViacheslav Ovsiienko ip_hdr->next_proto_id = IPPROTO_UDP; 343d133f4cdSViacheslav Ovsiienko ip_hdr->packet_id = 0; 344d133f4cdSViacheslav Ovsiienko ip_hdr->total_length = RTE_BE16(MLX5_TXPP_TEST_PKT_SIZE - 345d133f4cdSViacheslav Ovsiienko sizeof(struct rte_ether_hdr)); 346d133f4cdSViacheslav Ovsiienko /* use RFC5735 / RFC2544 reserved network test addresses */ 347d133f4cdSViacheslav Ovsiienko ip_hdr->src_addr = RTE_BE32((198U << 24) | (18 << 16) | 348d133f4cdSViacheslav Ovsiienko (0 << 8) | 1); 349d133f4cdSViacheslav Ovsiienko ip_hdr->dst_addr = RTE_BE32((198U << 24) | (18 << 16) | 350d133f4cdSViacheslav Ovsiienko (0 << 8) | 2); 351d133f4cdSViacheslav Ovsiienko if (MLX5_TXPP_TEST_PKT_SIZE < 352d133f4cdSViacheslav Ovsiienko (sizeof(struct rte_ether_hdr) + 353d133f4cdSViacheslav Ovsiienko sizeof(struct rte_ipv4_hdr) + 354d133f4cdSViacheslav Ovsiienko sizeof(struct rte_udp_hdr))) 355d133f4cdSViacheslav Ovsiienko goto wcopy; 356d133f4cdSViacheslav Ovsiienko /* Build test packet L4 header (UDP). */ 357d133f4cdSViacheslav Ovsiienko dst += sizeof(struct rte_ipv4_hdr); 358d133f4cdSViacheslav Ovsiienko udp_hdr = (struct rte_udp_hdr *)dst; 359d133f4cdSViacheslav Ovsiienko udp_hdr->src_port = RTE_BE16(9); /* RFC863 Discard. */ 360d133f4cdSViacheslav Ovsiienko udp_hdr->dst_port = RTE_BE16(9); 361d133f4cdSViacheslav Ovsiienko udp_hdr->dgram_len = RTE_BE16(MLX5_TXPP_TEST_PKT_SIZE - 362d133f4cdSViacheslav Ovsiienko sizeof(struct rte_ether_hdr) - 363d133f4cdSViacheslav Ovsiienko sizeof(struct rte_ipv4_hdr)); 364d133f4cdSViacheslav Ovsiienko udp_hdr->dgram_cksum = 0; 365d133f4cdSViacheslav Ovsiienko /* Fill the test packet data. */ 366d133f4cdSViacheslav Ovsiienko dst += sizeof(struct rte_udp_hdr); 367d133f4cdSViacheslav Ovsiienko for (i = sizeof(struct rte_ether_hdr) + 368d133f4cdSViacheslav Ovsiienko sizeof(struct rte_ipv4_hdr) + 369d133f4cdSViacheslav Ovsiienko sizeof(struct rte_udp_hdr); 370d133f4cdSViacheslav Ovsiienko i < MLX5_TXPP_TEST_PKT_SIZE; i++) 371d133f4cdSViacheslav Ovsiienko *dst++ = (uint8_t)(i & 0xFF); 372d133f4cdSViacheslav Ovsiienko } 373d133f4cdSViacheslav Ovsiienko wcopy: 374d133f4cdSViacheslav Ovsiienko /* Duplicate the pattern to the next WQEs. */ 37571011bd5SMichael Baum dst = (uint8_t *)(uintptr_t)wq->sq_obj.umem_buf; 376d133f4cdSViacheslav Ovsiienko for (i = 1; i < MLX5_TXPP_CLKQ_SIZE; i++) { 377d133f4cdSViacheslav Ovsiienko dst += wqe_size; 37871011bd5SMichael Baum rte_memcpy(dst, (void *)(uintptr_t)wq->sq_obj.umem_buf, 37971011bd5SMichael Baum wqe_size); 380d133f4cdSViacheslav Ovsiienko } 381d133f4cdSViacheslav Ovsiienko } 382d133f4cdSViacheslav Ovsiienko 383d133f4cdSViacheslav Ovsiienko /* Creates the Clock Queue for packet pacing, returns zero on success. */ 384d133f4cdSViacheslav Ovsiienko static int 385d133f4cdSViacheslav Ovsiienko mlx5_txpp_create_clock_queue(struct mlx5_dev_ctx_shared *sh) 386d133f4cdSViacheslav Ovsiienko { 387d133f4cdSViacheslav Ovsiienko struct mlx5_devx_create_sq_attr sq_attr = { 0 }; 388d133f4cdSViacheslav Ovsiienko struct mlx5_devx_modify_sq_attr msq_attr = { 0 }; 389a7787bb0SMichael Baum struct mlx5_devx_cq_attr cq_attr = { 390a7787bb0SMichael Baum .use_first_only = 1, 391a7787bb0SMichael Baum .overrun_ignore = 1, 3925dfa003dSMichael Baum .uar_page_id = mlx5_os_get_devx_uar_page_id(sh->tx_uar.obj), 393a7787bb0SMichael Baum }; 394d133f4cdSViacheslav Ovsiienko struct mlx5_txpp_wq *wq = &sh->txpp.clock_queue; 395d133f4cdSViacheslav Ovsiienko int ret; 396d133f4cdSViacheslav Ovsiienko 397ac3fc732SSuanming Mou sh->txpp.tsa = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, 39877522be0SViacheslav Ovsiienko MLX5_TXPP_REARM_SQ_SIZE * 39977522be0SViacheslav Ovsiienko sizeof(struct mlx5_txpp_ts), 40077522be0SViacheslav Ovsiienko 0, sh->numa_node); 40177522be0SViacheslav Ovsiienko if (!sh->txpp.tsa) { 40277522be0SViacheslav Ovsiienko DRV_LOG(ERR, "Failed to allocate memory for CQ stats."); 40377522be0SViacheslav Ovsiienko return -ENOMEM; 40477522be0SViacheslav Ovsiienko } 40577522be0SViacheslav Ovsiienko sh->txpp.ts_p = 0; 40677522be0SViacheslav Ovsiienko sh->txpp.ts_n = 0; 407d133f4cdSViacheslav Ovsiienko /* Create completion queue object for Clock Queue. */ 408ca1418ceSMichael Baum ret = mlx5_devx_cq_create(sh->cdev->ctx, &wq->cq_obj, 409a7787bb0SMichael Baum log2above(MLX5_TXPP_CLKQ_SIZE), &cq_attr, 410a7787bb0SMichael Baum sh->numa_node); 411a7787bb0SMichael Baum if (ret) { 412d133f4cdSViacheslav Ovsiienko DRV_LOG(ERR, "Failed to create CQ for Clock Queue."); 413d133f4cdSViacheslav Ovsiienko goto error; 414d133f4cdSViacheslav Ovsiienko } 415d133f4cdSViacheslav Ovsiienko wq->cq_ci = 0; 416d133f4cdSViacheslav Ovsiienko /* Allocate memory buffer for Send Queue WQEs. */ 417d133f4cdSViacheslav Ovsiienko if (sh->txpp.test) { 418d133f4cdSViacheslav Ovsiienko wq->sq_size = RTE_ALIGN(MLX5_TXPP_TEST_PKT_SIZE + 419d133f4cdSViacheslav Ovsiienko MLX5_WQE_CSEG_SIZE + 420d133f4cdSViacheslav Ovsiienko 2 * MLX5_WQE_ESEG_SIZE - 421d133f4cdSViacheslav Ovsiienko MLX5_ESEG_MIN_INLINE_SIZE, 422d133f4cdSViacheslav Ovsiienko MLX5_WQE_SIZE) / MLX5_WQE_SIZE; 423d133f4cdSViacheslav Ovsiienko wq->sq_size *= MLX5_TXPP_CLKQ_SIZE; 424d133f4cdSViacheslav Ovsiienko } else { 425d133f4cdSViacheslav Ovsiienko wq->sq_size = MLX5_TXPP_CLKQ_SIZE; 426d133f4cdSViacheslav Ovsiienko } 427d133f4cdSViacheslav Ovsiienko /* There should not be WQE leftovers in the cyclic queue. */ 428d133f4cdSViacheslav Ovsiienko MLX5_ASSERT(wq->sq_size == (1 << log2above(wq->sq_size))); 429d133f4cdSViacheslav Ovsiienko /* Create send queue object for Clock Queue. */ 430d133f4cdSViacheslav Ovsiienko if (sh->txpp.test) { 431d133f4cdSViacheslav Ovsiienko sq_attr.tis_lst_sz = 1; 432a89f6433SRongwei Liu sq_attr.tis_num = sh->tis[0]->id; 433d133f4cdSViacheslav Ovsiienko sq_attr.non_wire = 0; 434d133f4cdSViacheslav Ovsiienko sq_attr.static_sq_wq = 1; 435d133f4cdSViacheslav Ovsiienko } else { 436d133f4cdSViacheslav Ovsiienko sq_attr.non_wire = 1; 437d133f4cdSViacheslav Ovsiienko sq_attr.static_sq_wq = 1; 438d133f4cdSViacheslav Ovsiienko } 439a7787bb0SMichael Baum sq_attr.cqn = wq->cq_obj.cq->id; 440aef1e20eSViacheslav Ovsiienko sq_attr.packet_pacing_rate_limit_index = sh->txpp.pp_id; 441d133f4cdSViacheslav Ovsiienko sq_attr.wq_attr.cd_slave = 1; 4425dfa003dSMichael Baum sq_attr.wq_attr.uar_page = mlx5_os_get_devx_uar_page_id(sh->tx_uar.obj); 443e35ccf24SMichael Baum sq_attr.wq_attr.pd = sh->cdev->pdn; 444fe46b20cSMichael Baum sq_attr.ts_format = 445fe46b20cSMichael Baum mlx5_ts_format_conv(sh->cdev->config.hca_attr.sq_ts_format); 446ca1418ceSMichael Baum ret = mlx5_devx_sq_create(sh->cdev->ctx, &wq->sq_obj, 447ca1418ceSMichael Baum log2above(wq->sq_size), 44871011bd5SMichael Baum &sq_attr, sh->numa_node); 44971011bd5SMichael Baum if (ret) { 450d133f4cdSViacheslav Ovsiienko rte_errno = errno; 451d133f4cdSViacheslav Ovsiienko DRV_LOG(ERR, "Failed to create SQ for Clock Queue."); 452d133f4cdSViacheslav Ovsiienko goto error; 453d133f4cdSViacheslav Ovsiienko } 454d133f4cdSViacheslav Ovsiienko /* Build the WQEs in the Send Queue before goto Ready state. */ 455d133f4cdSViacheslav Ovsiienko mlx5_txpp_fill_wqe_clock_queue(sh); 456d133f4cdSViacheslav Ovsiienko /* Change queue state to ready. */ 457d133f4cdSViacheslav Ovsiienko msq_attr.sq_state = MLX5_SQC_STATE_RST; 458d133f4cdSViacheslav Ovsiienko msq_attr.state = MLX5_SQC_STATE_RDY; 459d133f4cdSViacheslav Ovsiienko wq->sq_ci = 0; 46071011bd5SMichael Baum ret = mlx5_devx_cmd_modify_sq(wq->sq_obj.sq, &msq_attr); 461d133f4cdSViacheslav Ovsiienko if (ret) { 462d133f4cdSViacheslav Ovsiienko DRV_LOG(ERR, "Failed to set SQ ready state Clock Queue."); 463d133f4cdSViacheslav Ovsiienko goto error; 464d133f4cdSViacheslav Ovsiienko } 465d133f4cdSViacheslav Ovsiienko return 0; 466d133f4cdSViacheslav Ovsiienko error: 467d133f4cdSViacheslav Ovsiienko ret = -rte_errno; 468d133f4cdSViacheslav Ovsiienko mlx5_txpp_destroy_clock_queue(sh); 469d133f4cdSViacheslav Ovsiienko rte_errno = -ret; 470d133f4cdSViacheslav Ovsiienko return ret; 471d133f4cdSViacheslav Ovsiienko } 472d133f4cdSViacheslav Ovsiienko 47377522be0SViacheslav Ovsiienko /* Enable notification from the Rearm Queue CQ. */ 47477522be0SViacheslav Ovsiienko static inline void 47577522be0SViacheslav Ovsiienko mlx5_txpp_cq_arm(struct mlx5_dev_ctx_shared *sh) 47677522be0SViacheslav Ovsiienko { 47777522be0SViacheslav Ovsiienko struct mlx5_txpp_wq *aq = &sh->txpp.rearm_queue; 47877522be0SViacheslav Ovsiienko uint32_t arm_sn = aq->arm_sn << MLX5_CQ_SQN_OFFSET; 47977522be0SViacheslav Ovsiienko uint32_t db_hi = arm_sn | MLX5_CQ_DBR_CMD_ALL | aq->cq_ci; 480a7787bb0SMichael Baum uint64_t db_be = 481a7787bb0SMichael Baum rte_cpu_to_be_64(((uint64_t)db_hi << 32) | aq->cq_obj.cq->id); 48277522be0SViacheslav Ovsiienko 4835dfa003dSMichael Baum mlx5_doorbell_ring(&sh->tx_uar.cq_db, db_be, db_hi, 4845dfa003dSMichael Baum &aq->cq_obj.db_rec[MLX5_CQ_ARM_DB], 0); 48577522be0SViacheslav Ovsiienko aq->arm_sn++; 48677522be0SViacheslav Ovsiienko } 48777522be0SViacheslav Ovsiienko 48841c2bb63SViacheslav Ovsiienko #if defined(RTE_ARCH_X86_64) 48941c2bb63SViacheslav Ovsiienko static inline int 49041c2bb63SViacheslav Ovsiienko mlx5_atomic128_compare_exchange(rte_int128_t *dst, 49141c2bb63SViacheslav Ovsiienko rte_int128_t *exp, 49241c2bb63SViacheslav Ovsiienko const rte_int128_t *src) 49341c2bb63SViacheslav Ovsiienko { 49441c2bb63SViacheslav Ovsiienko uint8_t res; 49541c2bb63SViacheslav Ovsiienko 49641c2bb63SViacheslav Ovsiienko asm volatile (MPLOCKED 49741c2bb63SViacheslav Ovsiienko "cmpxchg16b %[dst];" 49841c2bb63SViacheslav Ovsiienko " sete %[res]" 49941c2bb63SViacheslav Ovsiienko : [dst] "=m" (dst->val[0]), 50041c2bb63SViacheslav Ovsiienko "=a" (exp->val[0]), 50141c2bb63SViacheslav Ovsiienko "=d" (exp->val[1]), 50241c2bb63SViacheslav Ovsiienko [res] "=r" (res) 50341c2bb63SViacheslav Ovsiienko : "b" (src->val[0]), 50441c2bb63SViacheslav Ovsiienko "c" (src->val[1]), 50541c2bb63SViacheslav Ovsiienko "a" (exp->val[0]), 50641c2bb63SViacheslav Ovsiienko "d" (exp->val[1]), 50741c2bb63SViacheslav Ovsiienko "m" (dst->val[0]) 50841c2bb63SViacheslav Ovsiienko : "memory"); 50941c2bb63SViacheslav Ovsiienko 51041c2bb63SViacheslav Ovsiienko return res; 51141c2bb63SViacheslav Ovsiienko } 51241c2bb63SViacheslav Ovsiienko #endif 51341c2bb63SViacheslav Ovsiienko 51477522be0SViacheslav Ovsiienko static inline void 51577522be0SViacheslav Ovsiienko mlx5_atomic_read_cqe(rte_int128_t *from, rte_int128_t *ts) 51677522be0SViacheslav Ovsiienko { 51777522be0SViacheslav Ovsiienko /* 51877522be0SViacheslav Ovsiienko * The only CQE of Clock Queue is being continuously 519dab07e48SViacheslav Ovsiienko * updated by hardware with specified rate. We must 520dab07e48SViacheslav Ovsiienko * read timestamp and WQE completion index atomically. 52177522be0SViacheslav Ovsiienko */ 52241c2bb63SViacheslav Ovsiienko #if defined(RTE_ARCH_X86_64) 52377522be0SViacheslav Ovsiienko rte_int128_t src; 52477522be0SViacheslav Ovsiienko 52577522be0SViacheslav Ovsiienko memset(&src, 0, sizeof(src)); 52677522be0SViacheslav Ovsiienko *ts = src; 52777522be0SViacheslav Ovsiienko /* if (*from == *ts) *from = *src else *ts = *from; */ 52841c2bb63SViacheslav Ovsiienko mlx5_atomic128_compare_exchange(from, ts, &src); 52977522be0SViacheslav Ovsiienko #else 53041c2bb63SViacheslav Ovsiienko uint64_t *cqe = (uint64_t *)from; 53177522be0SViacheslav Ovsiienko 53241c2bb63SViacheslav Ovsiienko /* 53341c2bb63SViacheslav Ovsiienko * Power architecture does not support 16B compare-and-swap. 53441c2bb63SViacheslav Ovsiienko * ARM implements it in software, code below is more relevant. 53541c2bb63SViacheslav Ovsiienko */ 53677522be0SViacheslav Ovsiienko for (;;) { 53741c2bb63SViacheslav Ovsiienko uint64_t tm, op; 53841c2bb63SViacheslav Ovsiienko uint64_t *ps; 53977522be0SViacheslav Ovsiienko 54077522be0SViacheslav Ovsiienko rte_compiler_barrier(); 541e12a0166STyler Retzlaff tm = rte_atomic_load_explicit(cqe + 0, rte_memory_order_relaxed); 542e12a0166STyler Retzlaff op = rte_atomic_load_explicit(cqe + 1, rte_memory_order_relaxed); 54377522be0SViacheslav Ovsiienko rte_compiler_barrier(); 544e12a0166STyler Retzlaff if (tm != rte_atomic_load_explicit(cqe + 0, rte_memory_order_relaxed)) 54577522be0SViacheslav Ovsiienko continue; 546e12a0166STyler Retzlaff if (op != rte_atomic_load_explicit(cqe + 1, rte_memory_order_relaxed)) 54777522be0SViacheslav Ovsiienko continue; 54841c2bb63SViacheslav Ovsiienko ps = (uint64_t *)ts; 54977522be0SViacheslav Ovsiienko ps[0] = tm; 55077522be0SViacheslav Ovsiienko ps[1] = op; 55177522be0SViacheslav Ovsiienko return; 55277522be0SViacheslav Ovsiienko } 55377522be0SViacheslav Ovsiienko #endif 55477522be0SViacheslav Ovsiienko } 55577522be0SViacheslav Ovsiienko 55677522be0SViacheslav Ovsiienko /* Stores timestamp in the cache structure to share data with datapath. */ 55777522be0SViacheslav Ovsiienko static inline void 55877522be0SViacheslav Ovsiienko mlx5_txpp_cache_timestamp(struct mlx5_dev_ctx_shared *sh, 55977522be0SViacheslav Ovsiienko uint64_t ts, uint64_t ci) 56077522be0SViacheslav Ovsiienko { 56177522be0SViacheslav Ovsiienko ci = ci << (64 - MLX5_CQ_INDEX_WIDTH); 56277522be0SViacheslav Ovsiienko ci |= (ts << MLX5_CQ_INDEX_WIDTH) >> MLX5_CQ_INDEX_WIDTH; 56377522be0SViacheslav Ovsiienko rte_compiler_barrier(); 564e12a0166STyler Retzlaff rte_atomic_store_explicit(&sh->txpp.ts.ts, ts, rte_memory_order_relaxed); 565e12a0166STyler Retzlaff rte_atomic_store_explicit(&sh->txpp.ts.ci_ts, ci, rte_memory_order_relaxed); 56677522be0SViacheslav Ovsiienko rte_wmb(); 56777522be0SViacheslav Ovsiienko } 56877522be0SViacheslav Ovsiienko 56977522be0SViacheslav Ovsiienko /* Reads timestamp from Clock Queue CQE and stores in the cache. */ 57077522be0SViacheslav Ovsiienko static inline void 57177522be0SViacheslav Ovsiienko mlx5_txpp_update_timestamp(struct mlx5_dev_ctx_shared *sh) 57277522be0SViacheslav Ovsiienko { 57377522be0SViacheslav Ovsiienko struct mlx5_txpp_wq *wq = &sh->txpp.clock_queue; 574a7787bb0SMichael Baum struct mlx5_cqe *cqe = (struct mlx5_cqe *)(uintptr_t)wq->cq_obj.cqes; 57577522be0SViacheslav Ovsiienko union { 57677522be0SViacheslav Ovsiienko rte_int128_t u128; 57777522be0SViacheslav Ovsiienko struct mlx5_cqe_ts cts; 57877522be0SViacheslav Ovsiienko } to; 57977522be0SViacheslav Ovsiienko uint64_t ts; 58077522be0SViacheslav Ovsiienko uint16_t ci; 581dab07e48SViacheslav Ovsiienko uint8_t opcode; 58277522be0SViacheslav Ovsiienko 58377522be0SViacheslav Ovsiienko mlx5_atomic_read_cqe((rte_int128_t *)&cqe->timestamp, &to.u128); 584dab07e48SViacheslav Ovsiienko opcode = MLX5_CQE_OPCODE(to.cts.op_own); 585dab07e48SViacheslav Ovsiienko if (opcode) { 586dab07e48SViacheslav Ovsiienko if (opcode != MLX5_CQE_INVALID) { 587dab07e48SViacheslav Ovsiienko /* 588dab07e48SViacheslav Ovsiienko * Commit the error state if and only if 589dab07e48SViacheslav Ovsiienko * we have got at least one actual completion. 590dab07e48SViacheslav Ovsiienko */ 591dab07e48SViacheslav Ovsiienko DRV_LOG(DEBUG, 592dab07e48SViacheslav Ovsiienko "Clock Queue error sync lost (%X).", opcode); 593e12a0166STyler Retzlaff rte_atomic_fetch_add_explicit(&sh->txpp.err_clock_queue, 594e12a0166STyler Retzlaff 1, rte_memory_order_relaxed); 59577522be0SViacheslav Ovsiienko sh->txpp.sync_lost = 1; 596dab07e48SViacheslav Ovsiienko } 59777522be0SViacheslav Ovsiienko return; 59877522be0SViacheslav Ovsiienko } 59977522be0SViacheslav Ovsiienko ci = rte_be_to_cpu_16(to.cts.wqe_counter); 60077522be0SViacheslav Ovsiienko ts = rte_be_to_cpu_64(to.cts.timestamp); 60177522be0SViacheslav Ovsiienko ts = mlx5_txpp_convert_rx_ts(sh, ts); 60277522be0SViacheslav Ovsiienko wq->cq_ci += (ci - wq->sq_ci) & UINT16_MAX; 60377522be0SViacheslav Ovsiienko wq->sq_ci = ci; 60477522be0SViacheslav Ovsiienko mlx5_txpp_cache_timestamp(sh, ts, wq->cq_ci); 60577522be0SViacheslav Ovsiienko } 60677522be0SViacheslav Ovsiienko 60777522be0SViacheslav Ovsiienko /* Waits for the first completion on Clock Queue to init timestamp. */ 60877522be0SViacheslav Ovsiienko static inline void 60977522be0SViacheslav Ovsiienko mlx5_txpp_init_timestamp(struct mlx5_dev_ctx_shared *sh) 61077522be0SViacheslav Ovsiienko { 61177522be0SViacheslav Ovsiienko struct mlx5_txpp_wq *wq = &sh->txpp.clock_queue; 61277522be0SViacheslav Ovsiienko uint32_t wait; 61377522be0SViacheslav Ovsiienko 61477522be0SViacheslav Ovsiienko sh->txpp.ts_p = 0; 61577522be0SViacheslav Ovsiienko sh->txpp.ts_n = 0; 61677522be0SViacheslav Ovsiienko for (wait = 0; wait < MLX5_TXPP_WAIT_INIT_TS; wait++) { 61777522be0SViacheslav Ovsiienko mlx5_txpp_update_timestamp(sh); 61877522be0SViacheslav Ovsiienko if (wq->sq_ci) 61977522be0SViacheslav Ovsiienko return; 62077522be0SViacheslav Ovsiienko /* Wait one millisecond and try again. */ 62120698c9fSOphir Munk rte_delay_us_sleep(US_PER_S / MS_PER_S); 62277522be0SViacheslav Ovsiienko } 62377522be0SViacheslav Ovsiienko DRV_LOG(ERR, "Unable to initialize timestamp."); 62477522be0SViacheslav Ovsiienko sh->txpp.sync_lost = 1; 62577522be0SViacheslav Ovsiienko } 62677522be0SViacheslav Ovsiienko 62777522be0SViacheslav Ovsiienko #ifdef HAVE_IBV_DEVX_EVENT 62877522be0SViacheslav Ovsiienko /* Gather statistics for timestamp from Clock Queue CQE. */ 62977522be0SViacheslav Ovsiienko static inline void 63077522be0SViacheslav Ovsiienko mlx5_txpp_gather_timestamp(struct mlx5_dev_ctx_shared *sh) 63177522be0SViacheslav Ovsiienko { 63277522be0SViacheslav Ovsiienko /* Check whether we have a valid timestamp. */ 63377522be0SViacheslav Ovsiienko if (!sh->txpp.clock_queue.sq_ci && !sh->txpp.ts_n) 63477522be0SViacheslav Ovsiienko return; 63577522be0SViacheslav Ovsiienko MLX5_ASSERT(sh->txpp.ts_p < MLX5_TXPP_REARM_SQ_SIZE); 636e12a0166STyler Retzlaff rte_atomic_store_explicit(&sh->txpp.tsa[sh->txpp.ts_p].ts, 637e12a0166STyler Retzlaff sh->txpp.ts.ts, rte_memory_order_relaxed); 638e12a0166STyler Retzlaff rte_atomic_store_explicit(&sh->txpp.tsa[sh->txpp.ts_p].ci_ts, 639e12a0166STyler Retzlaff sh->txpp.ts.ci_ts, rte_memory_order_relaxed); 64077522be0SViacheslav Ovsiienko if (++sh->txpp.ts_p >= MLX5_TXPP_REARM_SQ_SIZE) 64177522be0SViacheslav Ovsiienko sh->txpp.ts_p = 0; 64277522be0SViacheslav Ovsiienko if (sh->txpp.ts_n < MLX5_TXPP_REARM_SQ_SIZE) 64377522be0SViacheslav Ovsiienko ++sh->txpp.ts_n; 64477522be0SViacheslav Ovsiienko } 64577522be0SViacheslav Ovsiienko 64677522be0SViacheslav Ovsiienko /* Handles Rearm Queue completions in periodic service. */ 64777522be0SViacheslav Ovsiienko static __rte_always_inline void 64877522be0SViacheslav Ovsiienko mlx5_txpp_handle_rearm_queue(struct mlx5_dev_ctx_shared *sh) 64977522be0SViacheslav Ovsiienko { 65077522be0SViacheslav Ovsiienko struct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue; 65177522be0SViacheslav Ovsiienko uint32_t cq_ci = wq->cq_ci; 65277522be0SViacheslav Ovsiienko bool error = false; 65377522be0SViacheslav Ovsiienko int ret; 65477522be0SViacheslav Ovsiienko 65577522be0SViacheslav Ovsiienko do { 65677522be0SViacheslav Ovsiienko volatile struct mlx5_cqe *cqe; 65777522be0SViacheslav Ovsiienko 658a7787bb0SMichael Baum cqe = &wq->cq_obj.cqes[cq_ci & (MLX5_TXPP_REARM_CQ_SIZE - 1)]; 65977522be0SViacheslav Ovsiienko ret = check_cqe(cqe, MLX5_TXPP_REARM_CQ_SIZE, cq_ci); 66077522be0SViacheslav Ovsiienko switch (ret) { 66177522be0SViacheslav Ovsiienko case MLX5_CQE_STATUS_ERR: 66277522be0SViacheslav Ovsiienko error = true; 66377522be0SViacheslav Ovsiienko ++cq_ci; 66477522be0SViacheslav Ovsiienko break; 66577522be0SViacheslav Ovsiienko case MLX5_CQE_STATUS_SW_OWN: 66677522be0SViacheslav Ovsiienko wq->sq_ci += 2; 66777522be0SViacheslav Ovsiienko ++cq_ci; 66877522be0SViacheslav Ovsiienko break; 66977522be0SViacheslav Ovsiienko case MLX5_CQE_STATUS_HW_OWN: 67077522be0SViacheslav Ovsiienko break; 67177522be0SViacheslav Ovsiienko default: 67277522be0SViacheslav Ovsiienko MLX5_ASSERT(false); 67377522be0SViacheslav Ovsiienko break; 67477522be0SViacheslav Ovsiienko } 67577522be0SViacheslav Ovsiienko } while (ret != MLX5_CQE_STATUS_HW_OWN); 67677522be0SViacheslav Ovsiienko if (likely(cq_ci != wq->cq_ci)) { 67777522be0SViacheslav Ovsiienko /* Check whether we have missed interrupts. */ 67877522be0SViacheslav Ovsiienko if (cq_ci - wq->cq_ci != 1) { 67977522be0SViacheslav Ovsiienko DRV_LOG(DEBUG, "Rearm Queue missed interrupt."); 680e12a0166STyler Retzlaff rte_atomic_fetch_add_explicit(&sh->txpp.err_miss_int, 681e12a0166STyler Retzlaff 1, rte_memory_order_relaxed); 68277522be0SViacheslav Ovsiienko /* Check sync lost on wqe index. */ 68377522be0SViacheslav Ovsiienko if (cq_ci - wq->cq_ci >= 68477522be0SViacheslav Ovsiienko (((1UL << MLX5_WQ_INDEX_WIDTH) / 68577522be0SViacheslav Ovsiienko MLX5_TXPP_REARM) - 1)) 68677522be0SViacheslav Ovsiienko error = 1; 68777522be0SViacheslav Ovsiienko } 68877522be0SViacheslav Ovsiienko /* Update doorbell record to notify hardware. */ 68977522be0SViacheslav Ovsiienko rte_compiler_barrier(); 690a7787bb0SMichael Baum *wq->cq_obj.db_rec = rte_cpu_to_be_32(cq_ci); 69177522be0SViacheslav Ovsiienko rte_wmb(); 69277522be0SViacheslav Ovsiienko wq->cq_ci = cq_ci; 69377522be0SViacheslav Ovsiienko /* Fire new requests to Rearm Queue. */ 69477522be0SViacheslav Ovsiienko if (error) { 69577522be0SViacheslav Ovsiienko DRV_LOG(DEBUG, "Rearm Queue error sync lost."); 696e12a0166STyler Retzlaff rte_atomic_fetch_add_explicit(&sh->txpp.err_rearm_queue, 697e12a0166STyler Retzlaff 1, rte_memory_order_relaxed); 69877522be0SViacheslav Ovsiienko sh->txpp.sync_lost = 1; 69977522be0SViacheslav Ovsiienko } 70077522be0SViacheslav Ovsiienko } 70177522be0SViacheslav Ovsiienko } 70277522be0SViacheslav Ovsiienko 70377522be0SViacheslav Ovsiienko /* Handles Clock Queue completions in periodic service. */ 70477522be0SViacheslav Ovsiienko static __rte_always_inline void 70577522be0SViacheslav Ovsiienko mlx5_txpp_handle_clock_queue(struct mlx5_dev_ctx_shared *sh) 70677522be0SViacheslav Ovsiienko { 70777522be0SViacheslav Ovsiienko mlx5_txpp_update_timestamp(sh); 70877522be0SViacheslav Ovsiienko mlx5_txpp_gather_timestamp(sh); 70977522be0SViacheslav Ovsiienko } 71077522be0SViacheslav Ovsiienko #endif 71177522be0SViacheslav Ovsiienko 71277522be0SViacheslav Ovsiienko /* Invoked periodically on Rearm Queue completions. */ 71377522be0SViacheslav Ovsiienko void 71477522be0SViacheslav Ovsiienko mlx5_txpp_interrupt_handler(void *cb_arg) 71577522be0SViacheslav Ovsiienko { 71677522be0SViacheslav Ovsiienko #ifndef HAVE_IBV_DEVX_EVENT 71777522be0SViacheslav Ovsiienko RTE_SET_USED(cb_arg); 71877522be0SViacheslav Ovsiienko return; 71977522be0SViacheslav Ovsiienko #else 72077522be0SViacheslav Ovsiienko struct mlx5_dev_ctx_shared *sh = cb_arg; 72177522be0SViacheslav Ovsiienko union { 72277522be0SViacheslav Ovsiienko struct mlx5dv_devx_async_event_hdr event_resp; 72377522be0SViacheslav Ovsiienko uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128]; 72477522be0SViacheslav Ovsiienko } out; 72577522be0SViacheslav Ovsiienko 72677522be0SViacheslav Ovsiienko MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY); 72777522be0SViacheslav Ovsiienko /* Process events in the loop. Only rearm completions are expected. */ 72877522be0SViacheslav Ovsiienko while (mlx5_glue->devx_get_event 72977522be0SViacheslav Ovsiienko (sh->txpp.echan, 73077522be0SViacheslav Ovsiienko &out.event_resp, 73177522be0SViacheslav Ovsiienko sizeof(out.buf)) >= 73277522be0SViacheslav Ovsiienko (ssize_t)sizeof(out.event_resp.cookie)) { 73377522be0SViacheslav Ovsiienko mlx5_txpp_handle_rearm_queue(sh); 73477522be0SViacheslav Ovsiienko mlx5_txpp_handle_clock_queue(sh); 73577522be0SViacheslav Ovsiienko mlx5_txpp_cq_arm(sh); 73677522be0SViacheslav Ovsiienko mlx5_txpp_doorbell_rearm_queue 73777522be0SViacheslav Ovsiienko (sh, sh->txpp.rearm_queue.sq_ci - 1); 73877522be0SViacheslav Ovsiienko } 73977522be0SViacheslav Ovsiienko #endif /* HAVE_IBV_DEVX_ASYNC */ 74077522be0SViacheslav Ovsiienko } 74177522be0SViacheslav Ovsiienko 74277522be0SViacheslav Ovsiienko static void 74377522be0SViacheslav Ovsiienko mlx5_txpp_stop_service(struct mlx5_dev_ctx_shared *sh) 74477522be0SViacheslav Ovsiienko { 74572d7efe4SSpike Du mlx5_os_interrupt_handler_destroy(sh->txpp.intr_handle, 74677522be0SViacheslav Ovsiienko mlx5_txpp_interrupt_handler, sh); 74777522be0SViacheslav Ovsiienko } 74877522be0SViacheslav Ovsiienko 74977522be0SViacheslav Ovsiienko /* Attach interrupt handler and fires first request to Rearm Queue. */ 75077522be0SViacheslav Ovsiienko static int 75177522be0SViacheslav Ovsiienko mlx5_txpp_start_service(struct mlx5_dev_ctx_shared *sh) 75277522be0SViacheslav Ovsiienko { 75377522be0SViacheslav Ovsiienko uint16_t event_nums[1] = {0}; 75477522be0SViacheslav Ovsiienko int ret; 7551f66ac5bSOphir Munk int fd; 75677522be0SViacheslav Ovsiienko 75741c2bb63SViacheslav Ovsiienko sh->txpp.err_miss_int = 0; 75841c2bb63SViacheslav Ovsiienko sh->txpp.err_rearm_queue = 0; 75941c2bb63SViacheslav Ovsiienko sh->txpp.err_clock_queue = 0; 76041c2bb63SViacheslav Ovsiienko sh->txpp.err_ts_past = 0; 76141c2bb63SViacheslav Ovsiienko sh->txpp.err_ts_future = 0; 762a31aa37bSViacheslav Ovsiienko sh->txpp.err_ts_order = 0; 76377522be0SViacheslav Ovsiienko /* Attach interrupt handler to process Rearm Queue completions. */ 7641f66ac5bSOphir Munk fd = mlx5_os_get_devx_channel_fd(sh->txpp.echan); 7651f66ac5bSOphir Munk ret = mlx5_os_set_nonblock_channel_fd(fd); 76677522be0SViacheslav Ovsiienko if (ret) { 76777522be0SViacheslav Ovsiienko DRV_LOG(ERR, "Failed to change event channel FD."); 76877522be0SViacheslav Ovsiienko rte_errno = errno; 76977522be0SViacheslav Ovsiienko return -rte_errno; 77077522be0SViacheslav Ovsiienko } 7711f66ac5bSOphir Munk fd = mlx5_os_get_devx_channel_fd(sh->txpp.echan); 77272d7efe4SSpike Du sh->txpp.intr_handle = mlx5_os_interrupt_handler_create 77372d7efe4SSpike Du (RTE_INTR_INSTANCE_F_SHARED, false, 77472d7efe4SSpike Du fd, mlx5_txpp_interrupt_handler, sh); 77572d7efe4SSpike Du if (!sh->txpp.intr_handle) { 77672d7efe4SSpike Du DRV_LOG(ERR, "Fail to allocate intr_handle"); 77777522be0SViacheslav Ovsiienko return -rte_errno; 77877522be0SViacheslav Ovsiienko } 77977522be0SViacheslav Ovsiienko /* Subscribe CQ event to the event channel controlled by the driver. */ 78098174626STal Shnaiderman ret = mlx5_os_devx_subscribe_devx_event(sh->txpp.echan, 781a7787bb0SMichael Baum sh->txpp.rearm_queue.cq_obj.cq->obj, 782a7787bb0SMichael Baum sizeof(event_nums), event_nums, 0); 78377522be0SViacheslav Ovsiienko if (ret) { 78477522be0SViacheslav Ovsiienko DRV_LOG(ERR, "Failed to subscribe CQE event."); 78577522be0SViacheslav Ovsiienko rte_errno = errno; 78677522be0SViacheslav Ovsiienko return -errno; 78777522be0SViacheslav Ovsiienko } 78877522be0SViacheslav Ovsiienko /* Enable interrupts in the CQ. */ 78977522be0SViacheslav Ovsiienko mlx5_txpp_cq_arm(sh); 79077522be0SViacheslav Ovsiienko /* Fire the first request on Rearm Queue. */ 79177522be0SViacheslav Ovsiienko mlx5_txpp_doorbell_rearm_queue(sh, sh->txpp.rearm_queue.sq_size - 1); 79277522be0SViacheslav Ovsiienko mlx5_txpp_init_timestamp(sh); 79377522be0SViacheslav Ovsiienko return 0; 79477522be0SViacheslav Ovsiienko } 79577522be0SViacheslav Ovsiienko 796d133f4cdSViacheslav Ovsiienko /* 797d133f4cdSViacheslav Ovsiienko * The routine initializes the packet pacing infrastructure: 798d133f4cdSViacheslav Ovsiienko * - allocates PP context 799d133f4cdSViacheslav Ovsiienko * - Clock CQ/SQ 800d133f4cdSViacheslav Ovsiienko * - Rearm CQ/SQ 801d133f4cdSViacheslav Ovsiienko * - attaches rearm interrupt handler 802aef1e20eSViacheslav Ovsiienko * - starts Clock Queue 803d133f4cdSViacheslav Ovsiienko * 804d133f4cdSViacheslav Ovsiienko * Returns 0 on success, negative otherwise 805d133f4cdSViacheslav Ovsiienko */ 806d133f4cdSViacheslav Ovsiienko static int 807a13ec19cSMichael Baum mlx5_txpp_create(struct mlx5_dev_ctx_shared *sh) 808d133f4cdSViacheslav Ovsiienko { 809a13ec19cSMichael Baum int tx_pp = sh->config.tx_pp; 810d133f4cdSViacheslav Ovsiienko int ret; 811d133f4cdSViacheslav Ovsiienko 812d133f4cdSViacheslav Ovsiienko /* Store the requested pacing parameters. */ 813d133f4cdSViacheslav Ovsiienko sh->txpp.tick = tx_pp >= 0 ? tx_pp : -tx_pp; 814d133f4cdSViacheslav Ovsiienko sh->txpp.test = !!(tx_pp < 0); 815a13ec19cSMichael Baum sh->txpp.skew = sh->config.tx_skew; 81653820561SMichael Baum sh->txpp.freq = sh->cdev->config.hca_attr.dev_freq_khz; 817e7055bbfSMichael Baum ret = mlx5_txpp_create_event_channel(sh); 818d133f4cdSViacheslav Ovsiienko if (ret) 819d133f4cdSViacheslav Ovsiienko goto exit; 820aef1e20eSViacheslav Ovsiienko ret = mlx5_txpp_alloc_pp_index(sh); 821aef1e20eSViacheslav Ovsiienko if (ret) 822aef1e20eSViacheslav Ovsiienko goto exit; 823d133f4cdSViacheslav Ovsiienko ret = mlx5_txpp_create_clock_queue(sh); 824d133f4cdSViacheslav Ovsiienko if (ret) 825d133f4cdSViacheslav Ovsiienko goto exit; 826551c94c8SViacheslav Ovsiienko ret = mlx5_txpp_create_rearm_queue(sh); 827551c94c8SViacheslav Ovsiienko if (ret) 828551c94c8SViacheslav Ovsiienko goto exit; 82977522be0SViacheslav Ovsiienko ret = mlx5_txpp_start_service(sh); 83077522be0SViacheslav Ovsiienko if (ret) 83177522be0SViacheslav Ovsiienko goto exit; 832d133f4cdSViacheslav Ovsiienko exit: 833d133f4cdSViacheslav Ovsiienko if (ret) { 83477522be0SViacheslav Ovsiienko mlx5_txpp_stop_service(sh); 835551c94c8SViacheslav Ovsiienko mlx5_txpp_destroy_rearm_queue(sh); 836d133f4cdSViacheslav Ovsiienko mlx5_txpp_destroy_clock_queue(sh); 837aef1e20eSViacheslav Ovsiienko mlx5_txpp_free_pp_index(sh); 838e7055bbfSMichael Baum mlx5_txpp_destroy_event_channel(sh); 839d133f4cdSViacheslav Ovsiienko sh->txpp.tick = 0; 840d133f4cdSViacheslav Ovsiienko sh->txpp.test = 0; 841d133f4cdSViacheslav Ovsiienko sh->txpp.skew = 0; 842d133f4cdSViacheslav Ovsiienko } 843d133f4cdSViacheslav Ovsiienko return ret; 844d133f4cdSViacheslav Ovsiienko } 845d133f4cdSViacheslav Ovsiienko 846d133f4cdSViacheslav Ovsiienko /* 847d133f4cdSViacheslav Ovsiienko * The routine destroys the packet pacing infrastructure: 848d133f4cdSViacheslav Ovsiienko * - detaches rearm interrupt handler 849d133f4cdSViacheslav Ovsiienko * - Rearm CQ/SQ 850d133f4cdSViacheslav Ovsiienko * - Clock CQ/SQ 851d133f4cdSViacheslav Ovsiienko * - PP context 852d133f4cdSViacheslav Ovsiienko */ 853d133f4cdSViacheslav Ovsiienko static void 854d133f4cdSViacheslav Ovsiienko mlx5_txpp_destroy(struct mlx5_dev_ctx_shared *sh) 855d133f4cdSViacheslav Ovsiienko { 85677522be0SViacheslav Ovsiienko mlx5_txpp_stop_service(sh); 857551c94c8SViacheslav Ovsiienko mlx5_txpp_destroy_rearm_queue(sh); 858d133f4cdSViacheslav Ovsiienko mlx5_txpp_destroy_clock_queue(sh); 859aef1e20eSViacheslav Ovsiienko mlx5_txpp_free_pp_index(sh); 860e7055bbfSMichael Baum mlx5_txpp_destroy_event_channel(sh); 861d133f4cdSViacheslav Ovsiienko sh->txpp.tick = 0; 862d133f4cdSViacheslav Ovsiienko sh->txpp.test = 0; 863d133f4cdSViacheslav Ovsiienko sh->txpp.skew = 0; 864d133f4cdSViacheslav Ovsiienko } 865d133f4cdSViacheslav Ovsiienko 866d133f4cdSViacheslav Ovsiienko /** 867d133f4cdSViacheslav Ovsiienko * Creates and starts packet pacing infrastructure on specified device. 868d133f4cdSViacheslav Ovsiienko * 869d133f4cdSViacheslav Ovsiienko * @param dev 870d133f4cdSViacheslav Ovsiienko * Pointer to Ethernet device structure. 871d133f4cdSViacheslav Ovsiienko * 872d133f4cdSViacheslav Ovsiienko * @return 873d133f4cdSViacheslav Ovsiienko * 0 on success, a negative errno value otherwise and rte_errno is set. 874d133f4cdSViacheslav Ovsiienko */ 875d133f4cdSViacheslav Ovsiienko int 876d133f4cdSViacheslav Ovsiienko mlx5_txpp_start(struct rte_eth_dev *dev) 877d133f4cdSViacheslav Ovsiienko { 878d133f4cdSViacheslav Ovsiienko struct mlx5_priv *priv = dev->data->dev_private; 879d133f4cdSViacheslav Ovsiienko struct mlx5_dev_ctx_shared *sh = priv->sh; 880d133f4cdSViacheslav Ovsiienko int err = 0; 881d133f4cdSViacheslav Ovsiienko 882a13ec19cSMichael Baum if (!sh->config.tx_pp) { 883d133f4cdSViacheslav Ovsiienko /* Packet pacing is not requested for the device. */ 884d133f4cdSViacheslav Ovsiienko MLX5_ASSERT(priv->txpp_en == 0); 885d133f4cdSViacheslav Ovsiienko return 0; 886d133f4cdSViacheslav Ovsiienko } 887d133f4cdSViacheslav Ovsiienko if (priv->txpp_en) { 888d133f4cdSViacheslav Ovsiienko /* Packet pacing is already enabled for the device. */ 889d133f4cdSViacheslav Ovsiienko MLX5_ASSERT(sh->txpp.refcnt); 890d133f4cdSViacheslav Ovsiienko return 0; 891d133f4cdSViacheslav Ovsiienko } 892a13ec19cSMichael Baum if (sh->config.tx_pp > 0) { 8931e580ed4SChengfeng Ye err = rte_mbuf_dynflag_lookup 894d133f4cdSViacheslav Ovsiienko (RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME, NULL); 8951e580ed4SChengfeng Ye /* No flag registered means no service needed. */ 8961e580ed4SChengfeng Ye if (err < 0) 897d133f4cdSViacheslav Ovsiienko return 0; 8981e580ed4SChengfeng Ye err = 0; 899d133f4cdSViacheslav Ovsiienko } 9001e580ed4SChengfeng Ye claim_zero(pthread_mutex_lock(&sh->txpp.mutex)); 901d133f4cdSViacheslav Ovsiienko if (sh->txpp.refcnt) { 902d133f4cdSViacheslav Ovsiienko priv->txpp_en = 1; 903d133f4cdSViacheslav Ovsiienko ++sh->txpp.refcnt; 904d133f4cdSViacheslav Ovsiienko } else { 905a13ec19cSMichael Baum err = mlx5_txpp_create(sh); 906d133f4cdSViacheslav Ovsiienko if (!err) { 907d133f4cdSViacheslav Ovsiienko MLX5_ASSERT(sh->txpp.tick); 908d133f4cdSViacheslav Ovsiienko priv->txpp_en = 1; 909d133f4cdSViacheslav Ovsiienko sh->txpp.refcnt = 1; 910d133f4cdSViacheslav Ovsiienko } else { 911d133f4cdSViacheslav Ovsiienko rte_errno = -err; 912d133f4cdSViacheslav Ovsiienko } 913d133f4cdSViacheslav Ovsiienko } 9141e580ed4SChengfeng Ye claim_zero(pthread_mutex_unlock(&sh->txpp.mutex)); 915d133f4cdSViacheslav Ovsiienko return err; 916d133f4cdSViacheslav Ovsiienko } 917d133f4cdSViacheslav Ovsiienko 918d133f4cdSViacheslav Ovsiienko /** 919d133f4cdSViacheslav Ovsiienko * Stops and destroys packet pacing infrastructure on specified device. 920d133f4cdSViacheslav Ovsiienko * 921d133f4cdSViacheslav Ovsiienko * @param dev 922d133f4cdSViacheslav Ovsiienko * Pointer to Ethernet device structure. 923d133f4cdSViacheslav Ovsiienko * 924d133f4cdSViacheslav Ovsiienko * @return 925d133f4cdSViacheslav Ovsiienko * 0 on success, a negative errno value otherwise and rte_errno is set. 926d133f4cdSViacheslav Ovsiienko */ 927d133f4cdSViacheslav Ovsiienko void 928d133f4cdSViacheslav Ovsiienko mlx5_txpp_stop(struct rte_eth_dev *dev) 929d133f4cdSViacheslav Ovsiienko { 930d133f4cdSViacheslav Ovsiienko struct mlx5_priv *priv = dev->data->dev_private; 931d133f4cdSViacheslav Ovsiienko struct mlx5_dev_ctx_shared *sh = priv->sh; 932d133f4cdSViacheslav Ovsiienko 933d133f4cdSViacheslav Ovsiienko if (!priv->txpp_en) { 934d133f4cdSViacheslav Ovsiienko /* Packet pacing is already disabled for the device. */ 935d133f4cdSViacheslav Ovsiienko return; 936d133f4cdSViacheslav Ovsiienko } 937d133f4cdSViacheslav Ovsiienko priv->txpp_en = 0; 9381e580ed4SChengfeng Ye claim_zero(pthread_mutex_lock(&sh->txpp.mutex)); 939d133f4cdSViacheslav Ovsiienko MLX5_ASSERT(sh->txpp.refcnt); 9401e580ed4SChengfeng Ye if (!sh->txpp.refcnt || --sh->txpp.refcnt) { 9411e580ed4SChengfeng Ye claim_zero(pthread_mutex_unlock(&sh->txpp.mutex)); 942d133f4cdSViacheslav Ovsiienko return; 9431e580ed4SChengfeng Ye } 944d133f4cdSViacheslav Ovsiienko /* No references any more, do actual destroy. */ 945d133f4cdSViacheslav Ovsiienko mlx5_txpp_destroy(sh); 9461e580ed4SChengfeng Ye claim_zero(pthread_mutex_unlock(&sh->txpp.mutex)); 947d133f4cdSViacheslav Ovsiienko } 948b94d93caSViacheslav Ovsiienko 949b94d93caSViacheslav Ovsiienko /* 950b94d93caSViacheslav Ovsiienko * Read the current clock counter of an Ethernet device 951b94d93caSViacheslav Ovsiienko * 952b94d93caSViacheslav Ovsiienko * This returns the current raw clock value of an Ethernet device. It is 953b94d93caSViacheslav Ovsiienko * a raw amount of ticks, with no given time reference. 954b94d93caSViacheslav Ovsiienko * The value returned here is from the same clock than the one 955b94d93caSViacheslav Ovsiienko * filling timestamp field of Rx/Tx packets when using hardware timestamp 956b94d93caSViacheslav Ovsiienko * offload. Therefore it can be used to compute a precise conversion of 957b94d93caSViacheslav Ovsiienko * the device clock to the real time. 958b94d93caSViacheslav Ovsiienko * 959b94d93caSViacheslav Ovsiienko * @param dev 960b94d93caSViacheslav Ovsiienko * Pointer to Ethernet device structure. 961b94d93caSViacheslav Ovsiienko * @param clock 962b94d93caSViacheslav Ovsiienko * Pointer to the uint64_t that holds the raw clock value. 963b94d93caSViacheslav Ovsiienko * 964b94d93caSViacheslav Ovsiienko * @return 965b94d93caSViacheslav Ovsiienko * - 0: Success. 966b94d93caSViacheslav Ovsiienko * - -ENOTSUP: The function is not supported in this mode. Requires 967b94d93caSViacheslav Ovsiienko * packet pacing module configured and started (tx_pp devarg) 968b94d93caSViacheslav Ovsiienko */ 969b94d93caSViacheslav Ovsiienko int 970b94d93caSViacheslav Ovsiienko mlx5_txpp_read_clock(struct rte_eth_dev *dev, uint64_t *timestamp) 971b94d93caSViacheslav Ovsiienko { 972b94d93caSViacheslav Ovsiienko struct mlx5_priv *priv = dev->data->dev_private; 973b94d93caSViacheslav Ovsiienko struct mlx5_dev_ctx_shared *sh = priv->sh; 9749b31fc90SViacheslav Ovsiienko uint64_t ts; 975b94d93caSViacheslav Ovsiienko int ret; 976b94d93caSViacheslav Ovsiienko 977b94d93caSViacheslav Ovsiienko if (sh->txpp.refcnt) { 978b94d93caSViacheslav Ovsiienko struct mlx5_txpp_wq *wq = &sh->txpp.clock_queue; 979a7787bb0SMichael Baum struct mlx5_cqe *cqe = 980a7787bb0SMichael Baum (struct mlx5_cqe *)(uintptr_t)wq->cq_obj.cqes; 981b94d93caSViacheslav Ovsiienko union { 982b94d93caSViacheslav Ovsiienko rte_int128_t u128; 983b94d93caSViacheslav Ovsiienko struct mlx5_cqe_ts cts; 984b94d93caSViacheslav Ovsiienko } to; 985b94d93caSViacheslav Ovsiienko 986b94d93caSViacheslav Ovsiienko mlx5_atomic_read_cqe((rte_int128_t *)&cqe->timestamp, &to.u128); 987b94d93caSViacheslav Ovsiienko if (to.cts.op_own >> 4) { 988b94d93caSViacheslav Ovsiienko DRV_LOG(DEBUG, "Clock Queue error sync lost."); 989e12a0166STyler Retzlaff rte_atomic_fetch_add_explicit(&sh->txpp.err_clock_queue, 990e12a0166STyler Retzlaff 1, rte_memory_order_relaxed); 991b94d93caSViacheslav Ovsiienko sh->txpp.sync_lost = 1; 992b94d93caSViacheslav Ovsiienko return -EIO; 993b94d93caSViacheslav Ovsiienko } 994b94d93caSViacheslav Ovsiienko ts = rte_be_to_cpu_64(to.cts.timestamp); 995b94d93caSViacheslav Ovsiienko ts = mlx5_txpp_convert_rx_ts(sh, ts); 996b94d93caSViacheslav Ovsiienko *timestamp = ts; 997b94d93caSViacheslav Ovsiienko return 0; 998b94d93caSViacheslav Ovsiienko } 9999b31fc90SViacheslav Ovsiienko /* Check if we can read timestamp directly from hardware. */ 1000*27918f0dSTim Martin ts = mlx5_read_pcibar_clock(dev); 1001*27918f0dSTim Martin if (ts != 0) { 10029b31fc90SViacheslav Ovsiienko *timestamp = ts; 10039b31fc90SViacheslav Ovsiienko return 0; 10049b31fc90SViacheslav Ovsiienko } 1005b94d93caSViacheslav Ovsiienko /* Not supported in isolated mode - kernel does not see the CQEs. */ 1006b94d93caSViacheslav Ovsiienko if (priv->isolated || rte_eal_process_type() != RTE_PROC_PRIMARY) 1007b94d93caSViacheslav Ovsiienko return -ENOTSUP; 1008b94d93caSViacheslav Ovsiienko ret = mlx5_read_clock(dev, timestamp); 1009b94d93caSViacheslav Ovsiienko return ret; 1010b94d93caSViacheslav Ovsiienko } 10113b025c0cSViacheslav Ovsiienko 10123b025c0cSViacheslav Ovsiienko /** 10133b025c0cSViacheslav Ovsiienko * DPDK callback to clear device extended statistics. 10143b025c0cSViacheslav Ovsiienko * 10153b025c0cSViacheslav Ovsiienko * @param dev 10163b025c0cSViacheslav Ovsiienko * Pointer to Ethernet device structure. 10173b025c0cSViacheslav Ovsiienko * 10183b025c0cSViacheslav Ovsiienko * @return 10193b025c0cSViacheslav Ovsiienko * 0 on success and stats is reset, negative errno value otherwise and 10203b025c0cSViacheslav Ovsiienko * rte_errno is set. 10213b025c0cSViacheslav Ovsiienko */ 10223b025c0cSViacheslav Ovsiienko int mlx5_txpp_xstats_reset(struct rte_eth_dev *dev) 10233b025c0cSViacheslav Ovsiienko { 10243b025c0cSViacheslav Ovsiienko struct mlx5_priv *priv = dev->data->dev_private; 10253b025c0cSViacheslav Ovsiienko struct mlx5_dev_ctx_shared *sh = priv->sh; 10263b025c0cSViacheslav Ovsiienko 1027e12a0166STyler Retzlaff rte_atomic_store_explicit(&sh->txpp.err_miss_int, 0, rte_memory_order_relaxed); 1028e12a0166STyler Retzlaff rte_atomic_store_explicit(&sh->txpp.err_rearm_queue, 0, rte_memory_order_relaxed); 1029e12a0166STyler Retzlaff rte_atomic_store_explicit(&sh->txpp.err_clock_queue, 0, rte_memory_order_relaxed); 1030e12a0166STyler Retzlaff rte_atomic_store_explicit(&sh->txpp.err_ts_past, 0, rte_memory_order_relaxed); 1031e12a0166STyler Retzlaff rte_atomic_store_explicit(&sh->txpp.err_ts_future, 0, rte_memory_order_relaxed); 1032e12a0166STyler Retzlaff rte_atomic_store_explicit(&sh->txpp.err_ts_order, 0, rte_memory_order_relaxed); 10333b025c0cSViacheslav Ovsiienko return 0; 10343b025c0cSViacheslav Ovsiienko } 10353b025c0cSViacheslav Ovsiienko 10363b025c0cSViacheslav Ovsiienko /** 10373b025c0cSViacheslav Ovsiienko * Routine to retrieve names of extended device statistics 10383b025c0cSViacheslav Ovsiienko * for packet send scheduling. It appends the specific stats names 10393b025c0cSViacheslav Ovsiienko * after the parts filled by preceding modules (eth stats, etc.) 10403b025c0cSViacheslav Ovsiienko * 10413b025c0cSViacheslav Ovsiienko * @param dev 10423b025c0cSViacheslav Ovsiienko * Pointer to Ethernet device structure. 10433b025c0cSViacheslav Ovsiienko * @param[out] xstats_names 10443b025c0cSViacheslav Ovsiienko * Buffer to insert names into. 10453b025c0cSViacheslav Ovsiienko * @param n 10463b025c0cSViacheslav Ovsiienko * Number of names. 10473b025c0cSViacheslav Ovsiienko * @param n_used 10483b025c0cSViacheslav Ovsiienko * Number of names filled by preceding statistics modules. 10493b025c0cSViacheslav Ovsiienko * 10503b025c0cSViacheslav Ovsiienko * @return 10513b025c0cSViacheslav Ovsiienko * Number of xstats names. 10523b025c0cSViacheslav Ovsiienko */ 10533b025c0cSViacheslav Ovsiienko int mlx5_txpp_xstats_get_names(struct rte_eth_dev *dev __rte_unused, 10543b025c0cSViacheslav Ovsiienko struct rte_eth_xstat_name *xstats_names, 10553b025c0cSViacheslav Ovsiienko unsigned int n, unsigned int n_used) 10563b025c0cSViacheslav Ovsiienko { 10573b025c0cSViacheslav Ovsiienko unsigned int n_txpp = RTE_DIM(mlx5_txpp_stat_names); 10583b025c0cSViacheslav Ovsiienko unsigned int i; 10593b025c0cSViacheslav Ovsiienko 10603b025c0cSViacheslav Ovsiienko if (n >= n_used + n_txpp && xstats_names) { 10613b025c0cSViacheslav Ovsiienko for (i = 0; i < n_txpp; ++i) { 1062e1784075SDavid Marchand strlcpy(xstats_names[i + n_used].name, 10633b025c0cSViacheslav Ovsiienko mlx5_txpp_stat_names[i], 10643b025c0cSViacheslav Ovsiienko RTE_ETH_XSTATS_NAME_SIZE); 10653b025c0cSViacheslav Ovsiienko } 10663b025c0cSViacheslav Ovsiienko } 10673b025c0cSViacheslav Ovsiienko return n_used + n_txpp; 10683b025c0cSViacheslav Ovsiienko } 10693b025c0cSViacheslav Ovsiienko 10703b025c0cSViacheslav Ovsiienko static inline void 10713b025c0cSViacheslav Ovsiienko mlx5_txpp_read_tsa(struct mlx5_dev_txpp *txpp, 10723b025c0cSViacheslav Ovsiienko struct mlx5_txpp_ts *tsa, uint16_t idx) 10733b025c0cSViacheslav Ovsiienko { 10743b025c0cSViacheslav Ovsiienko do { 107541c2bb63SViacheslav Ovsiienko uint64_t ts, ci; 10763b025c0cSViacheslav Ovsiienko 1077e12a0166STyler Retzlaff ts = rte_atomic_load_explicit(&txpp->tsa[idx].ts, rte_memory_order_relaxed); 1078e12a0166STyler Retzlaff ci = rte_atomic_load_explicit(&txpp->tsa[idx].ci_ts, rte_memory_order_relaxed); 10793b025c0cSViacheslav Ovsiienko rte_compiler_barrier(); 10803b025c0cSViacheslav Ovsiienko if ((ci ^ ts) << MLX5_CQ_INDEX_WIDTH != 0) 10813b025c0cSViacheslav Ovsiienko continue; 1082e12a0166STyler Retzlaff if (rte_atomic_load_explicit(&txpp->tsa[idx].ts, 1083e12a0166STyler Retzlaff rte_memory_order_relaxed) != ts) 10843b025c0cSViacheslav Ovsiienko continue; 1085e12a0166STyler Retzlaff if (rte_atomic_load_explicit(&txpp->tsa[idx].ci_ts, 1086e12a0166STyler Retzlaff rte_memory_order_relaxed) != ci) 10873b025c0cSViacheslav Ovsiienko continue; 108841c2bb63SViacheslav Ovsiienko tsa->ts = ts; 108941c2bb63SViacheslav Ovsiienko tsa->ci_ts = ci; 10903b025c0cSViacheslav Ovsiienko return; 10913b025c0cSViacheslav Ovsiienko } while (true); 10923b025c0cSViacheslav Ovsiienko } 10933b025c0cSViacheslav Ovsiienko 10943b025c0cSViacheslav Ovsiienko /* 10953b025c0cSViacheslav Ovsiienko * Jitter reflects the clock change between 10963b025c0cSViacheslav Ovsiienko * neighbours Clock Queue completions. 10973b025c0cSViacheslav Ovsiienko */ 10983b025c0cSViacheslav Ovsiienko static uint64_t 10993b025c0cSViacheslav Ovsiienko mlx5_txpp_xstats_jitter(struct mlx5_dev_txpp *txpp) 11003b025c0cSViacheslav Ovsiienko { 11013b025c0cSViacheslav Ovsiienko struct mlx5_txpp_ts tsa0, tsa1; 11023b025c0cSViacheslav Ovsiienko int64_t dts, dci; 11033b025c0cSViacheslav Ovsiienko uint16_t ts_p; 11043b025c0cSViacheslav Ovsiienko 11053b025c0cSViacheslav Ovsiienko if (txpp->ts_n < 2) { 11063b025c0cSViacheslav Ovsiienko /* No gathered enough reports yet. */ 11073b025c0cSViacheslav Ovsiienko return 0; 11083b025c0cSViacheslav Ovsiienko } 11093b025c0cSViacheslav Ovsiienko do { 11103b025c0cSViacheslav Ovsiienko int ts_0, ts_1; 11113b025c0cSViacheslav Ovsiienko 11123b025c0cSViacheslav Ovsiienko ts_p = txpp->ts_p; 11133b025c0cSViacheslav Ovsiienko rte_compiler_barrier(); 11143b025c0cSViacheslav Ovsiienko ts_0 = ts_p - 2; 11153b025c0cSViacheslav Ovsiienko if (ts_0 < 0) 11163b025c0cSViacheslav Ovsiienko ts_0 += MLX5_TXPP_REARM_SQ_SIZE; 11173b025c0cSViacheslav Ovsiienko ts_1 = ts_p - 1; 11183b025c0cSViacheslav Ovsiienko if (ts_1 < 0) 11193b025c0cSViacheslav Ovsiienko ts_1 += MLX5_TXPP_REARM_SQ_SIZE; 11203b025c0cSViacheslav Ovsiienko mlx5_txpp_read_tsa(txpp, &tsa0, ts_0); 11213b025c0cSViacheslav Ovsiienko mlx5_txpp_read_tsa(txpp, &tsa1, ts_1); 11223b025c0cSViacheslav Ovsiienko rte_compiler_barrier(); 11233b025c0cSViacheslav Ovsiienko } while (ts_p != txpp->ts_p); 11243b025c0cSViacheslav Ovsiienko /* We have two neighbor reports, calculate the jitter. */ 112541c2bb63SViacheslav Ovsiienko dts = tsa1.ts - tsa0.ts; 112641c2bb63SViacheslav Ovsiienko dci = (tsa1.ci_ts >> (64 - MLX5_CQ_INDEX_WIDTH)) - 112741c2bb63SViacheslav Ovsiienko (tsa0.ci_ts >> (64 - MLX5_CQ_INDEX_WIDTH)); 11283b025c0cSViacheslav Ovsiienko if (dci < 0) 11293b025c0cSViacheslav Ovsiienko dci += 1 << MLX5_CQ_INDEX_WIDTH; 11303b025c0cSViacheslav Ovsiienko dci *= txpp->tick; 11313b025c0cSViacheslav Ovsiienko return (dts > dci) ? dts - dci : dci - dts; 11323b025c0cSViacheslav Ovsiienko } 11333b025c0cSViacheslav Ovsiienko 11343b025c0cSViacheslav Ovsiienko /* 11353b025c0cSViacheslav Ovsiienko * Wander reflects the long-term clock change 11363b025c0cSViacheslav Ovsiienko * over the entire length of all Clock Queue completions. 11373b025c0cSViacheslav Ovsiienko */ 11383b025c0cSViacheslav Ovsiienko static uint64_t 11393b025c0cSViacheslav Ovsiienko mlx5_txpp_xstats_wander(struct mlx5_dev_txpp *txpp) 11403b025c0cSViacheslav Ovsiienko { 11413b025c0cSViacheslav Ovsiienko struct mlx5_txpp_ts tsa0, tsa1; 11423b025c0cSViacheslav Ovsiienko int64_t dts, dci; 11433b025c0cSViacheslav Ovsiienko uint16_t ts_p; 11443b025c0cSViacheslav Ovsiienko 11453b025c0cSViacheslav Ovsiienko if (txpp->ts_n < MLX5_TXPP_REARM_SQ_SIZE) { 11463b025c0cSViacheslav Ovsiienko /* No gathered enough reports yet. */ 11473b025c0cSViacheslav Ovsiienko return 0; 11483b025c0cSViacheslav Ovsiienko } 11493b025c0cSViacheslav Ovsiienko do { 11503b025c0cSViacheslav Ovsiienko int ts_0, ts_1; 11513b025c0cSViacheslav Ovsiienko 11523b025c0cSViacheslav Ovsiienko ts_p = txpp->ts_p; 11533b025c0cSViacheslav Ovsiienko rte_compiler_barrier(); 11543b025c0cSViacheslav Ovsiienko ts_0 = ts_p - MLX5_TXPP_REARM_SQ_SIZE / 2 - 1; 11553b025c0cSViacheslav Ovsiienko if (ts_0 < 0) 11563b025c0cSViacheslav Ovsiienko ts_0 += MLX5_TXPP_REARM_SQ_SIZE; 11573b025c0cSViacheslav Ovsiienko ts_1 = ts_p - 1; 11583b025c0cSViacheslav Ovsiienko if (ts_1 < 0) 11593b025c0cSViacheslav Ovsiienko ts_1 += MLX5_TXPP_REARM_SQ_SIZE; 11603b025c0cSViacheslav Ovsiienko mlx5_txpp_read_tsa(txpp, &tsa0, ts_0); 11613b025c0cSViacheslav Ovsiienko mlx5_txpp_read_tsa(txpp, &tsa1, ts_1); 11623b025c0cSViacheslav Ovsiienko rte_compiler_barrier(); 11633b025c0cSViacheslav Ovsiienko } while (ts_p != txpp->ts_p); 11643b025c0cSViacheslav Ovsiienko /* We have two neighbor reports, calculate the jitter. */ 116541c2bb63SViacheslav Ovsiienko dts = tsa1.ts - tsa0.ts; 116641c2bb63SViacheslav Ovsiienko dci = (tsa1.ci_ts >> (64 - MLX5_CQ_INDEX_WIDTH)) - 116741c2bb63SViacheslav Ovsiienko (tsa0.ci_ts >> (64 - MLX5_CQ_INDEX_WIDTH)); 11683b025c0cSViacheslav Ovsiienko dci += 1 << MLX5_CQ_INDEX_WIDTH; 11693b025c0cSViacheslav Ovsiienko dci *= txpp->tick; 11703b025c0cSViacheslav Ovsiienko return (dts > dci) ? dts - dci : dci - dts; 11713b025c0cSViacheslav Ovsiienko } 11723b025c0cSViacheslav Ovsiienko 11733b025c0cSViacheslav Ovsiienko /** 11743b025c0cSViacheslav Ovsiienko * Routine to retrieve extended device statistics 11753b025c0cSViacheslav Ovsiienko * for packet send scheduling. It appends the specific statistics 11763b025c0cSViacheslav Ovsiienko * after the parts filled by preceding modules (eth stats, etc.) 11773b025c0cSViacheslav Ovsiienko * 11783b025c0cSViacheslav Ovsiienko * @param dev 11793b025c0cSViacheslav Ovsiienko * Pointer to Ethernet device. 11803b025c0cSViacheslav Ovsiienko * @param[out] stats 11813b025c0cSViacheslav Ovsiienko * Pointer to rte extended stats table. 11823b025c0cSViacheslav Ovsiienko * @param n 11833b025c0cSViacheslav Ovsiienko * The size of the stats table. 11843b025c0cSViacheslav Ovsiienko * @param n_used 11853b025c0cSViacheslav Ovsiienko * Number of stats filled by preceding statistics modules. 11863b025c0cSViacheslav Ovsiienko * 11873b025c0cSViacheslav Ovsiienko * @return 11883b025c0cSViacheslav Ovsiienko * Number of extended stats on success and stats is filled, 11893b025c0cSViacheslav Ovsiienko * negative on error and rte_errno is set. 11903b025c0cSViacheslav Ovsiienko */ 11913b025c0cSViacheslav Ovsiienko int 11923b025c0cSViacheslav Ovsiienko mlx5_txpp_xstats_get(struct rte_eth_dev *dev, 11933b025c0cSViacheslav Ovsiienko struct rte_eth_xstat *stats, 11943b025c0cSViacheslav Ovsiienko unsigned int n, unsigned int n_used) 11953b025c0cSViacheslav Ovsiienko { 11963b025c0cSViacheslav Ovsiienko unsigned int n_txpp = RTE_DIM(mlx5_txpp_stat_names); 11973b025c0cSViacheslav Ovsiienko 11983b025c0cSViacheslav Ovsiienko if (n >= n_used + n_txpp && stats) { 11993b025c0cSViacheslav Ovsiienko struct mlx5_priv *priv = dev->data->dev_private; 12003b025c0cSViacheslav Ovsiienko struct mlx5_dev_ctx_shared *sh = priv->sh; 12013b025c0cSViacheslav Ovsiienko unsigned int i; 12023b025c0cSViacheslav Ovsiienko 12033b025c0cSViacheslav Ovsiienko for (i = 0; i < n_txpp; ++i) 12043b025c0cSViacheslav Ovsiienko stats[n_used + i].id = n_used + i; 12053b025c0cSViacheslav Ovsiienko stats[n_used + 0].value = 1206e12a0166STyler Retzlaff rte_atomic_load_explicit(&sh->txpp.err_miss_int, 1207e12a0166STyler Retzlaff rte_memory_order_relaxed); 12083b025c0cSViacheslav Ovsiienko stats[n_used + 1].value = 1209e12a0166STyler Retzlaff rte_atomic_load_explicit(&sh->txpp.err_rearm_queue, 1210e12a0166STyler Retzlaff rte_memory_order_relaxed); 12113b025c0cSViacheslav Ovsiienko stats[n_used + 2].value = 1212e12a0166STyler Retzlaff rte_atomic_load_explicit(&sh->txpp.err_clock_queue, 1213e12a0166STyler Retzlaff rte_memory_order_relaxed); 12143b025c0cSViacheslav Ovsiienko stats[n_used + 3].value = 1215e12a0166STyler Retzlaff rte_atomic_load_explicit(&sh->txpp.err_ts_past, 1216e12a0166STyler Retzlaff rte_memory_order_relaxed); 12173b025c0cSViacheslav Ovsiienko stats[n_used + 4].value = 1218e12a0166STyler Retzlaff rte_atomic_load_explicit(&sh->txpp.err_ts_future, 1219e12a0166STyler Retzlaff rte_memory_order_relaxed); 1220a31aa37bSViacheslav Ovsiienko stats[n_used + 5].value = 1221e12a0166STyler Retzlaff rte_atomic_load_explicit(&sh->txpp.err_ts_order, 1222e12a0166STyler Retzlaff rte_memory_order_relaxed); 1223a31aa37bSViacheslav Ovsiienko stats[n_used + 6].value = mlx5_txpp_xstats_jitter(&sh->txpp); 1224a31aa37bSViacheslav Ovsiienko stats[n_used + 7].value = mlx5_txpp_xstats_wander(&sh->txpp); 1225a31aa37bSViacheslav Ovsiienko stats[n_used + 8].value = sh->txpp.sync_lost; 12263b025c0cSViacheslav Ovsiienko } 12273b025c0cSViacheslav Ovsiienko return n_used + n_txpp; 12283b025c0cSViacheslav Ovsiienko } 1229