1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2020 Mellanox Technologies, Ltd 3 */ 4 #include <fcntl.h> 5 #include <stdint.h> 6 7 #include <rte_ether.h> 8 #include <rte_ethdev_driver.h> 9 #include <rte_interrupts.h> 10 #include <rte_alarm.h> 11 #include <rte_malloc.h> 12 #include <rte_cycles.h> 13 #include <rte_eal_paging.h> 14 15 #include <mlx5_malloc.h> 16 17 #include "mlx5.h" 18 #include "mlx5_rxtx.h" 19 #include "mlx5_common_os.h" 20 21 static const char * const mlx5_txpp_stat_names[] = { 22 "tx_pp_missed_interrupt_errors", /* Missed service interrupt. */ 23 "tx_pp_rearm_queue_errors", /* Rearm Queue errors. */ 24 "tx_pp_clock_queue_errors", /* Clock Queue errors. */ 25 "tx_pp_timestamp_past_errors", /* Timestamp in the past. */ 26 "tx_pp_timestamp_future_errors", /* Timestamp in the distant future. */ 27 "tx_pp_jitter", /* Timestamp jitter (one Clock Queue completion). */ 28 "tx_pp_wander", /* Timestamp wander (half of Clock Queue CQEs). */ 29 "tx_pp_sync_lost", /* Scheduling synchronization lost. */ 30 }; 31 32 /* Destroy Event Queue Notification Channel. */ 33 static void 34 mlx5_txpp_destroy_event_channel(struct mlx5_dev_ctx_shared *sh) 35 { 36 if (sh->txpp.echan) { 37 mlx5_glue->devx_destroy_event_channel(sh->txpp.echan); 38 sh->txpp.echan = NULL; 39 } 40 } 41 42 /* Create Event Queue Notification Channel. */ 43 static int 44 mlx5_txpp_create_event_channel(struct mlx5_dev_ctx_shared *sh) 45 { 46 MLX5_ASSERT(!sh->txpp.echan); 47 sh->txpp.echan = mlx5_glue->devx_create_event_channel(sh->ctx, 48 MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA); 49 if (!sh->txpp.echan) { 50 rte_errno = errno; 51 DRV_LOG(ERR, "Failed to create event channel %d.", rte_errno); 52 return -rte_errno; 53 } 54 return 0; 55 } 56 57 static void 58 mlx5_txpp_free_pp_index(struct mlx5_dev_ctx_shared *sh) 59 { 60 #ifdef HAVE_MLX5DV_PP_ALLOC 61 if (sh->txpp.pp) { 62 mlx5_glue->dv_free_pp(sh->txpp.pp); 63 sh->txpp.pp = NULL; 64 sh->txpp.pp_id = 0; 65 } 66 #else 67 RTE_SET_USED(sh); 68 DRV_LOG(ERR, "Freeing pacing index is not supported."); 69 #endif 70 } 71 72 /* Allocate Packet Pacing index from kernel via mlx5dv call. */ 73 static int 74 mlx5_txpp_alloc_pp_index(struct mlx5_dev_ctx_shared *sh) 75 { 76 #ifdef HAVE_MLX5DV_PP_ALLOC 77 uint32_t pp[MLX5_ST_SZ_DW(set_pp_rate_limit_context)]; 78 uint64_t rate; 79 80 MLX5_ASSERT(!sh->txpp.pp); 81 memset(&pp, 0, sizeof(pp)); 82 rate = NS_PER_S / sh->txpp.tick; 83 if (rate * sh->txpp.tick != NS_PER_S) 84 DRV_LOG(WARNING, "Packet pacing frequency is not precise."); 85 if (sh->txpp.test) { 86 uint32_t len; 87 88 len = RTE_MAX(MLX5_TXPP_TEST_PKT_SIZE, 89 (size_t)RTE_ETHER_MIN_LEN); 90 MLX5_SET(set_pp_rate_limit_context, &pp, 91 burst_upper_bound, len); 92 MLX5_SET(set_pp_rate_limit_context, &pp, 93 typical_packet_size, len); 94 /* Convert packets per second into kilobits. */ 95 rate = (rate * len) / (1000ul / CHAR_BIT); 96 DRV_LOG(INFO, "Packet pacing rate set to %" PRIu64, rate); 97 } 98 MLX5_SET(set_pp_rate_limit_context, &pp, rate_limit, rate); 99 MLX5_SET(set_pp_rate_limit_context, &pp, rate_mode, 100 sh->txpp.test ? MLX5_DATA_RATE : MLX5_WQE_RATE); 101 sh->txpp.pp = mlx5_glue->dv_alloc_pp 102 (sh->ctx, sizeof(pp), &pp, 103 MLX5DV_PP_ALLOC_FLAGS_DEDICATED_INDEX); 104 if (sh->txpp.pp == NULL) { 105 DRV_LOG(ERR, "Failed to allocate packet pacing index."); 106 rte_errno = errno; 107 return -errno; 108 } 109 if (!((struct mlx5dv_pp *)sh->txpp.pp)->index) { 110 DRV_LOG(ERR, "Zero packet pacing index allocated."); 111 mlx5_txpp_free_pp_index(sh); 112 rte_errno = ENOTSUP; 113 return -ENOTSUP; 114 } 115 sh->txpp.pp_id = ((struct mlx5dv_pp *)(sh->txpp.pp))->index; 116 return 0; 117 #else 118 RTE_SET_USED(sh); 119 DRV_LOG(ERR, "Allocating pacing index is not supported."); 120 rte_errno = ENOTSUP; 121 return -ENOTSUP; 122 #endif 123 } 124 125 static void 126 mlx5_txpp_destroy_send_queue(struct mlx5_txpp_wq *wq) 127 { 128 if (wq->sq) 129 claim_zero(mlx5_devx_cmd_destroy(wq->sq)); 130 if (wq->sq_umem) 131 claim_zero(mlx5_glue->devx_umem_dereg(wq->sq_umem)); 132 if (wq->sq_buf) 133 mlx5_free((void *)(uintptr_t)wq->sq_buf); 134 if (wq->cq) 135 claim_zero(mlx5_devx_cmd_destroy(wq->cq)); 136 if (wq->cq_umem) 137 claim_zero(mlx5_glue->devx_umem_dereg(wq->cq_umem)); 138 if (wq->cq_buf) 139 mlx5_free((void *)(uintptr_t)wq->cq_buf); 140 memset(wq, 0, sizeof(*wq)); 141 } 142 143 static void 144 mlx5_txpp_destroy_rearm_queue(struct mlx5_dev_ctx_shared *sh) 145 { 146 struct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue; 147 148 mlx5_txpp_destroy_send_queue(wq); 149 } 150 151 static void 152 mlx5_txpp_destroy_clock_queue(struct mlx5_dev_ctx_shared *sh) 153 { 154 struct mlx5_txpp_wq *wq = &sh->txpp.clock_queue; 155 156 mlx5_txpp_destroy_send_queue(wq); 157 if (sh->txpp.tsa) { 158 mlx5_free(sh->txpp.tsa); 159 sh->txpp.tsa = NULL; 160 } 161 } 162 163 static void 164 mlx5_txpp_doorbell_rearm_queue(struct mlx5_dev_ctx_shared *sh, uint16_t ci) 165 { 166 struct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue; 167 union { 168 uint32_t w32[2]; 169 uint64_t w64; 170 } cs; 171 void *reg_addr; 172 173 wq->sq_ci = ci + 1; 174 cs.w32[0] = rte_cpu_to_be_32(rte_be_to_cpu_32 175 (wq->wqes[ci & (wq->sq_size - 1)].ctrl[0]) | (ci - 1) << 8); 176 cs.w32[1] = wq->wqes[ci & (wq->sq_size - 1)].ctrl[1]; 177 /* Update SQ doorbell record with new SQ ci. */ 178 rte_compiler_barrier(); 179 *wq->sq_dbrec = rte_cpu_to_be_32(wq->sq_ci); 180 /* Make sure the doorbell record is updated. */ 181 rte_wmb(); 182 /* Write to doorbel register to start processing. */ 183 reg_addr = mlx5_os_get_devx_uar_reg_addr(sh->tx_uar); 184 __mlx5_uar_write64_relaxed(cs.w64, reg_addr, NULL); 185 rte_wmb(); 186 } 187 188 static void 189 mlx5_txpp_fill_cqe_rearm_queue(struct mlx5_dev_ctx_shared *sh) 190 { 191 struct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue; 192 struct mlx5_cqe *cqe = (struct mlx5_cqe *)(uintptr_t)wq->cqes; 193 uint32_t i; 194 195 for (i = 0; i < MLX5_TXPP_REARM_CQ_SIZE; i++) { 196 cqe->op_own = (MLX5_CQE_INVALID << 4) | MLX5_CQE_OWNER_MASK; 197 ++cqe; 198 } 199 } 200 201 static void 202 mlx5_txpp_fill_wqe_rearm_queue(struct mlx5_dev_ctx_shared *sh) 203 { 204 struct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue; 205 struct mlx5_wqe *wqe = (struct mlx5_wqe *)(uintptr_t)wq->wqes; 206 uint32_t i; 207 208 for (i = 0; i < wq->sq_size; i += 2) { 209 struct mlx5_wqe_cseg *cs; 210 struct mlx5_wqe_qseg *qs; 211 uint32_t index; 212 213 /* Build SEND_EN request with slave WQE index. */ 214 cs = &wqe[i + 0].cseg; 215 cs->opcode = RTE_BE32(MLX5_OPCODE_SEND_EN | 0); 216 cs->sq_ds = rte_cpu_to_be_32((wq->sq->id << 8) | 2); 217 cs->flags = RTE_BE32(MLX5_COMP_ALWAYS << 218 MLX5_COMP_MODE_OFFSET); 219 cs->misc = RTE_BE32(0); 220 qs = RTE_PTR_ADD(cs, sizeof(struct mlx5_wqe_cseg)); 221 index = (i * MLX5_TXPP_REARM / 2 + MLX5_TXPP_REARM) & 222 ((1 << MLX5_WQ_INDEX_WIDTH) - 1); 223 qs->max_index = rte_cpu_to_be_32(index); 224 qs->qpn_cqn = rte_cpu_to_be_32(sh->txpp.clock_queue.sq->id); 225 /* Build WAIT request with slave CQE index. */ 226 cs = &wqe[i + 1].cseg; 227 cs->opcode = RTE_BE32(MLX5_OPCODE_WAIT | 0); 228 cs->sq_ds = rte_cpu_to_be_32((wq->sq->id << 8) | 2); 229 cs->flags = RTE_BE32(MLX5_COMP_ONLY_ERR << 230 MLX5_COMP_MODE_OFFSET); 231 cs->misc = RTE_BE32(0); 232 qs = RTE_PTR_ADD(cs, sizeof(struct mlx5_wqe_cseg)); 233 index = (i * MLX5_TXPP_REARM / 2 + MLX5_TXPP_REARM / 2) & 234 ((1 << MLX5_CQ_INDEX_WIDTH) - 1); 235 qs->max_index = rte_cpu_to_be_32(index); 236 qs->qpn_cqn = rte_cpu_to_be_32(sh->txpp.clock_queue.cq->id); 237 } 238 } 239 240 /* Creates the Rearm Queue to fire the requests to Clock Queue in realtime. */ 241 static int 242 mlx5_txpp_create_rearm_queue(struct mlx5_dev_ctx_shared *sh) 243 { 244 struct mlx5_devx_create_sq_attr sq_attr = { 0 }; 245 struct mlx5_devx_modify_sq_attr msq_attr = { 0 }; 246 struct mlx5_devx_cq_attr cq_attr = { 0 }; 247 struct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue; 248 size_t page_size; 249 uint32_t umem_size, umem_dbrec; 250 int ret; 251 252 page_size = rte_mem_page_size(); 253 if (page_size == (size_t)-1) { 254 DRV_LOG(ERR, "Failed to get mem page size"); 255 return -ENOMEM; 256 } 257 /* Allocate memory buffer for CQEs and doorbell record. */ 258 umem_size = sizeof(struct mlx5_cqe) * MLX5_TXPP_REARM_CQ_SIZE; 259 umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE); 260 umem_size += MLX5_DBR_SIZE; 261 wq->cq_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, umem_size, 262 page_size, sh->numa_node); 263 if (!wq->cq_buf) { 264 DRV_LOG(ERR, "Failed to allocate memory for Rearm Queue."); 265 return -ENOMEM; 266 } 267 /* Register allocated buffer in user space with DevX. */ 268 wq->cq_umem = mlx5_glue->devx_umem_reg(sh->ctx, 269 (void *)(uintptr_t)wq->cq_buf, 270 umem_size, 271 IBV_ACCESS_LOCAL_WRITE); 272 if (!wq->cq_umem) { 273 rte_errno = errno; 274 DRV_LOG(ERR, "Failed to register umem for Rearm Queue."); 275 goto error; 276 } 277 /* Create completion queue object for Rearm Queue. */ 278 cq_attr.cqe_size = (sizeof(struct mlx5_cqe) == 128) ? 279 MLX5_CQE_SIZE_128B : MLX5_CQE_SIZE_64B; 280 cq_attr.uar_page_id = mlx5_os_get_devx_uar_page_id(sh->tx_uar); 281 cq_attr.eqn = sh->eqn; 282 cq_attr.q_umem_valid = 1; 283 cq_attr.q_umem_offset = 0; 284 cq_attr.q_umem_id = mlx5_os_get_umem_id(wq->cq_umem); 285 cq_attr.db_umem_valid = 1; 286 cq_attr.db_umem_offset = umem_dbrec; 287 cq_attr.db_umem_id = mlx5_os_get_umem_id(wq->cq_umem); 288 cq_attr.log_cq_size = rte_log2_u32(MLX5_TXPP_REARM_CQ_SIZE); 289 cq_attr.log_page_size = rte_log2_u32(page_size); 290 wq->cq = mlx5_devx_cmd_create_cq(sh->ctx, &cq_attr); 291 if (!wq->cq) { 292 rte_errno = errno; 293 DRV_LOG(ERR, "Failed to create CQ for Rearm Queue."); 294 goto error; 295 } 296 wq->cq_dbrec = RTE_PTR_ADD(wq->cq_buf, umem_dbrec); 297 wq->cq_ci = 0; 298 wq->arm_sn = 0; 299 /* Mark all CQEs initially as invalid. */ 300 mlx5_txpp_fill_cqe_rearm_queue(sh); 301 /* 302 * Allocate memory buffer for Send Queue WQEs. 303 * There should be no WQE leftovers in the cyclic queue. 304 */ 305 wq->sq_size = MLX5_TXPP_REARM_SQ_SIZE; 306 MLX5_ASSERT(wq->sq_size == (1 << log2above(wq->sq_size))); 307 umem_size = MLX5_WQE_SIZE * wq->sq_size; 308 umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE); 309 umem_size += MLX5_DBR_SIZE; 310 wq->sq_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, umem_size, 311 page_size, sh->numa_node); 312 if (!wq->sq_buf) { 313 DRV_LOG(ERR, "Failed to allocate memory for Rearm Queue."); 314 rte_errno = ENOMEM; 315 goto error; 316 } 317 /* Register allocated buffer in user space with DevX. */ 318 wq->sq_umem = mlx5_glue->devx_umem_reg(sh->ctx, 319 (void *)(uintptr_t)wq->sq_buf, 320 umem_size, 321 IBV_ACCESS_LOCAL_WRITE); 322 if (!wq->sq_umem) { 323 rte_errno = errno; 324 DRV_LOG(ERR, "Failed to register umem for Rearm Queue."); 325 goto error; 326 } 327 /* Create send queue object for Rearm Queue. */ 328 sq_attr.state = MLX5_SQC_STATE_RST; 329 sq_attr.tis_lst_sz = 1; 330 sq_attr.tis_num = sh->tis->id; 331 sq_attr.cqn = wq->cq->id; 332 sq_attr.cd_master = 1; 333 sq_attr.wq_attr.uar_page = mlx5_os_get_devx_uar_page_id(sh->tx_uar); 334 sq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC; 335 sq_attr.wq_attr.pd = sh->pdn; 336 sq_attr.wq_attr.log_wq_stride = rte_log2_u32(MLX5_WQE_SIZE); 337 sq_attr.wq_attr.log_wq_sz = rte_log2_u32(wq->sq_size); 338 sq_attr.wq_attr.dbr_umem_valid = 1; 339 sq_attr.wq_attr.dbr_addr = umem_dbrec; 340 sq_attr.wq_attr.dbr_umem_id = mlx5_os_get_umem_id(wq->sq_umem); 341 sq_attr.wq_attr.wq_umem_valid = 1; 342 sq_attr.wq_attr.wq_umem_id = mlx5_os_get_umem_id(wq->sq_umem); 343 sq_attr.wq_attr.wq_umem_offset = 0; 344 wq->sq = mlx5_devx_cmd_create_sq(sh->ctx, &sq_attr); 345 if (!wq->sq) { 346 rte_errno = errno; 347 DRV_LOG(ERR, "Failed to create SQ for Rearm Queue."); 348 goto error; 349 } 350 wq->sq_dbrec = RTE_PTR_ADD(wq->sq_buf, umem_dbrec + 351 MLX5_SND_DBR * sizeof(uint32_t)); 352 /* Build the WQEs in the Send Queue before goto Ready state. */ 353 mlx5_txpp_fill_wqe_rearm_queue(sh); 354 /* Change queue state to ready. */ 355 msq_attr.sq_state = MLX5_SQC_STATE_RST; 356 msq_attr.state = MLX5_SQC_STATE_RDY; 357 ret = mlx5_devx_cmd_modify_sq(wq->sq, &msq_attr); 358 if (ret) { 359 DRV_LOG(ERR, "Failed to set SQ ready state Rearm Queue."); 360 goto error; 361 } 362 return 0; 363 error: 364 ret = -rte_errno; 365 mlx5_txpp_destroy_rearm_queue(sh); 366 rte_errno = -ret; 367 return ret; 368 } 369 370 static void 371 mlx5_txpp_fill_wqe_clock_queue(struct mlx5_dev_ctx_shared *sh) 372 { 373 struct mlx5_txpp_wq *wq = &sh->txpp.clock_queue; 374 struct mlx5_wqe *wqe = (struct mlx5_wqe *)(uintptr_t)wq->wqes; 375 struct mlx5_wqe_cseg *cs = &wqe->cseg; 376 uint32_t wqe_size, opcode, i; 377 uint8_t *dst; 378 379 /* For test purposes fill the WQ with SEND inline packet. */ 380 if (sh->txpp.test) { 381 wqe_size = RTE_ALIGN(MLX5_TXPP_TEST_PKT_SIZE + 382 MLX5_WQE_CSEG_SIZE + 383 2 * MLX5_WQE_ESEG_SIZE - 384 MLX5_ESEG_MIN_INLINE_SIZE, 385 MLX5_WSEG_SIZE); 386 opcode = MLX5_OPCODE_SEND; 387 } else { 388 wqe_size = MLX5_WSEG_SIZE; 389 opcode = MLX5_OPCODE_NOP; 390 } 391 cs->opcode = rte_cpu_to_be_32(opcode | 0); /* Index is ignored. */ 392 cs->sq_ds = rte_cpu_to_be_32((wq->sq->id << 8) | 393 (wqe_size / MLX5_WSEG_SIZE)); 394 cs->flags = RTE_BE32(MLX5_COMP_ALWAYS << MLX5_COMP_MODE_OFFSET); 395 cs->misc = RTE_BE32(0); 396 wqe_size = RTE_ALIGN(wqe_size, MLX5_WQE_SIZE); 397 if (sh->txpp.test) { 398 struct mlx5_wqe_eseg *es = &wqe->eseg; 399 struct rte_ether_hdr *eth_hdr; 400 struct rte_ipv4_hdr *ip_hdr; 401 struct rte_udp_hdr *udp_hdr; 402 403 /* Build the inline test packet pattern. */ 404 MLX5_ASSERT(wqe_size <= MLX5_WQE_SIZE_MAX); 405 MLX5_ASSERT(MLX5_TXPP_TEST_PKT_SIZE >= 406 (sizeof(struct rte_ether_hdr) + 407 sizeof(struct rte_ipv4_hdr))); 408 es->flags = 0; 409 es->cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM; 410 es->swp_offs = 0; 411 es->metadata = 0; 412 es->swp_flags = 0; 413 es->mss = 0; 414 es->inline_hdr_sz = RTE_BE16(MLX5_TXPP_TEST_PKT_SIZE); 415 /* Build test packet L2 header (Ethernet). */ 416 dst = (uint8_t *)&es->inline_data; 417 eth_hdr = (struct rte_ether_hdr *)dst; 418 rte_eth_random_addr(ð_hdr->d_addr.addr_bytes[0]); 419 rte_eth_random_addr(ð_hdr->s_addr.addr_bytes[0]); 420 eth_hdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4); 421 /* Build test packet L3 header (IP v4). */ 422 dst += sizeof(struct rte_ether_hdr); 423 ip_hdr = (struct rte_ipv4_hdr *)dst; 424 ip_hdr->version_ihl = RTE_IPV4_VHL_DEF; 425 ip_hdr->type_of_service = 0; 426 ip_hdr->fragment_offset = 0; 427 ip_hdr->time_to_live = 64; 428 ip_hdr->next_proto_id = IPPROTO_UDP; 429 ip_hdr->packet_id = 0; 430 ip_hdr->total_length = RTE_BE16(MLX5_TXPP_TEST_PKT_SIZE - 431 sizeof(struct rte_ether_hdr)); 432 /* use RFC5735 / RFC2544 reserved network test addresses */ 433 ip_hdr->src_addr = RTE_BE32((198U << 24) | (18 << 16) | 434 (0 << 8) | 1); 435 ip_hdr->dst_addr = RTE_BE32((198U << 24) | (18 << 16) | 436 (0 << 8) | 2); 437 if (MLX5_TXPP_TEST_PKT_SIZE < 438 (sizeof(struct rte_ether_hdr) + 439 sizeof(struct rte_ipv4_hdr) + 440 sizeof(struct rte_udp_hdr))) 441 goto wcopy; 442 /* Build test packet L4 header (UDP). */ 443 dst += sizeof(struct rte_ipv4_hdr); 444 udp_hdr = (struct rte_udp_hdr *)dst; 445 udp_hdr->src_port = RTE_BE16(9); /* RFC863 Discard. */ 446 udp_hdr->dst_port = RTE_BE16(9); 447 udp_hdr->dgram_len = RTE_BE16(MLX5_TXPP_TEST_PKT_SIZE - 448 sizeof(struct rte_ether_hdr) - 449 sizeof(struct rte_ipv4_hdr)); 450 udp_hdr->dgram_cksum = 0; 451 /* Fill the test packet data. */ 452 dst += sizeof(struct rte_udp_hdr); 453 for (i = sizeof(struct rte_ether_hdr) + 454 sizeof(struct rte_ipv4_hdr) + 455 sizeof(struct rte_udp_hdr); 456 i < MLX5_TXPP_TEST_PKT_SIZE; i++) 457 *dst++ = (uint8_t)(i & 0xFF); 458 } 459 wcopy: 460 /* Duplicate the pattern to the next WQEs. */ 461 dst = (uint8_t *)(uintptr_t)wq->sq_buf; 462 for (i = 1; i < MLX5_TXPP_CLKQ_SIZE; i++) { 463 dst += wqe_size; 464 rte_memcpy(dst, (void *)(uintptr_t)wq->sq_buf, wqe_size); 465 } 466 } 467 468 /* Creates the Clock Queue for packet pacing, returns zero on success. */ 469 static int 470 mlx5_txpp_create_clock_queue(struct mlx5_dev_ctx_shared *sh) 471 { 472 struct mlx5_devx_create_sq_attr sq_attr = { 0 }; 473 struct mlx5_devx_modify_sq_attr msq_attr = { 0 }; 474 struct mlx5_devx_cq_attr cq_attr = { 0 }; 475 struct mlx5_txpp_wq *wq = &sh->txpp.clock_queue; 476 size_t page_size; 477 uint32_t umem_size, umem_dbrec; 478 int ret; 479 480 page_size = rte_mem_page_size(); 481 if (page_size == (size_t)-1) { 482 DRV_LOG(ERR, "Failed to get mem page size"); 483 return -ENOMEM; 484 } 485 sh->txpp.tsa = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, 486 MLX5_TXPP_REARM_SQ_SIZE * 487 sizeof(struct mlx5_txpp_ts), 488 0, sh->numa_node); 489 if (!sh->txpp.tsa) { 490 DRV_LOG(ERR, "Failed to allocate memory for CQ stats."); 491 return -ENOMEM; 492 } 493 sh->txpp.ts_p = 0; 494 sh->txpp.ts_n = 0; 495 /* Allocate memory buffer for CQEs and doorbell record. */ 496 umem_size = sizeof(struct mlx5_cqe) * MLX5_TXPP_CLKQ_SIZE; 497 umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE); 498 umem_size += MLX5_DBR_SIZE; 499 wq->cq_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, umem_size, 500 page_size, sh->numa_node); 501 if (!wq->cq_buf) { 502 DRV_LOG(ERR, "Failed to allocate memory for Clock Queue."); 503 return -ENOMEM; 504 } 505 /* Register allocated buffer in user space with DevX. */ 506 wq->cq_umem = mlx5_glue->devx_umem_reg(sh->ctx, 507 (void *)(uintptr_t)wq->cq_buf, 508 umem_size, 509 IBV_ACCESS_LOCAL_WRITE); 510 if (!wq->cq_umem) { 511 rte_errno = errno; 512 DRV_LOG(ERR, "Failed to register umem for Clock Queue."); 513 goto error; 514 } 515 /* Create completion queue object for Clock Queue. */ 516 cq_attr.cqe_size = (sizeof(struct mlx5_cqe) == 128) ? 517 MLX5_CQE_SIZE_128B : MLX5_CQE_SIZE_64B; 518 cq_attr.use_first_only = 1; 519 cq_attr.overrun_ignore = 1; 520 cq_attr.uar_page_id = mlx5_os_get_devx_uar_page_id(sh->tx_uar); 521 cq_attr.eqn = sh->eqn; 522 cq_attr.q_umem_valid = 1; 523 cq_attr.q_umem_offset = 0; 524 cq_attr.q_umem_id = mlx5_os_get_umem_id(wq->cq_umem); 525 cq_attr.db_umem_valid = 1; 526 cq_attr.db_umem_offset = umem_dbrec; 527 cq_attr.db_umem_id = mlx5_os_get_umem_id(wq->cq_umem); 528 cq_attr.log_cq_size = rte_log2_u32(MLX5_TXPP_CLKQ_SIZE); 529 cq_attr.log_page_size = rte_log2_u32(page_size); 530 wq->cq = mlx5_devx_cmd_create_cq(sh->ctx, &cq_attr); 531 if (!wq->cq) { 532 rte_errno = errno; 533 DRV_LOG(ERR, "Failed to create CQ for Clock Queue."); 534 goto error; 535 } 536 wq->cq_dbrec = RTE_PTR_ADD(wq->cq_buf, umem_dbrec); 537 wq->cq_ci = 0; 538 /* Allocate memory buffer for Send Queue WQEs. */ 539 if (sh->txpp.test) { 540 wq->sq_size = RTE_ALIGN(MLX5_TXPP_TEST_PKT_SIZE + 541 MLX5_WQE_CSEG_SIZE + 542 2 * MLX5_WQE_ESEG_SIZE - 543 MLX5_ESEG_MIN_INLINE_SIZE, 544 MLX5_WQE_SIZE) / MLX5_WQE_SIZE; 545 wq->sq_size *= MLX5_TXPP_CLKQ_SIZE; 546 } else { 547 wq->sq_size = MLX5_TXPP_CLKQ_SIZE; 548 } 549 /* There should not be WQE leftovers in the cyclic queue. */ 550 MLX5_ASSERT(wq->sq_size == (1 << log2above(wq->sq_size))); 551 umem_size = MLX5_WQE_SIZE * wq->sq_size; 552 umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE); 553 umem_size += MLX5_DBR_SIZE; 554 wq->sq_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, umem_size, 555 page_size, sh->numa_node); 556 if (!wq->sq_buf) { 557 DRV_LOG(ERR, "Failed to allocate memory for Clock Queue."); 558 rte_errno = ENOMEM; 559 goto error; 560 } 561 /* Register allocated buffer in user space with DevX. */ 562 wq->sq_umem = mlx5_glue->devx_umem_reg(sh->ctx, 563 (void *)(uintptr_t)wq->sq_buf, 564 umem_size, 565 IBV_ACCESS_LOCAL_WRITE); 566 if (!wq->sq_umem) { 567 rte_errno = errno; 568 DRV_LOG(ERR, "Failed to register umem for Clock Queue."); 569 goto error; 570 } 571 /* Create send queue object for Clock Queue. */ 572 if (sh->txpp.test) { 573 sq_attr.tis_lst_sz = 1; 574 sq_attr.tis_num = sh->tis->id; 575 sq_attr.non_wire = 0; 576 sq_attr.static_sq_wq = 1; 577 } else { 578 sq_attr.non_wire = 1; 579 sq_attr.static_sq_wq = 1; 580 } 581 sq_attr.state = MLX5_SQC_STATE_RST; 582 sq_attr.cqn = wq->cq->id; 583 sq_attr.packet_pacing_rate_limit_index = sh->txpp.pp_id; 584 sq_attr.wq_attr.cd_slave = 1; 585 sq_attr.wq_attr.uar_page = mlx5_os_get_devx_uar_page_id(sh->tx_uar); 586 sq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC; 587 sq_attr.wq_attr.pd = sh->pdn; 588 sq_attr.wq_attr.log_wq_stride = rte_log2_u32(MLX5_WQE_SIZE); 589 sq_attr.wq_attr.log_wq_sz = rte_log2_u32(wq->sq_size); 590 sq_attr.wq_attr.dbr_umem_valid = 1; 591 sq_attr.wq_attr.dbr_addr = umem_dbrec; 592 sq_attr.wq_attr.dbr_umem_id = mlx5_os_get_umem_id(wq->sq_umem); 593 sq_attr.wq_attr.wq_umem_valid = 1; 594 sq_attr.wq_attr.wq_umem_id = mlx5_os_get_umem_id(wq->sq_umem); 595 /* umem_offset must be zero for static_sq_wq queue. */ 596 sq_attr.wq_attr.wq_umem_offset = 0; 597 wq->sq = mlx5_devx_cmd_create_sq(sh->ctx, &sq_attr); 598 if (!wq->sq) { 599 rte_errno = errno; 600 DRV_LOG(ERR, "Failed to create SQ for Clock Queue."); 601 goto error; 602 } 603 wq->sq_dbrec = RTE_PTR_ADD(wq->sq_buf, umem_dbrec + 604 MLX5_SND_DBR * sizeof(uint32_t)); 605 /* Build the WQEs in the Send Queue before goto Ready state. */ 606 mlx5_txpp_fill_wqe_clock_queue(sh); 607 /* Change queue state to ready. */ 608 msq_attr.sq_state = MLX5_SQC_STATE_RST; 609 msq_attr.state = MLX5_SQC_STATE_RDY; 610 wq->sq_ci = 0; 611 ret = mlx5_devx_cmd_modify_sq(wq->sq, &msq_attr); 612 if (ret) { 613 DRV_LOG(ERR, "Failed to set SQ ready state Clock Queue."); 614 goto error; 615 } 616 return 0; 617 error: 618 ret = -rte_errno; 619 mlx5_txpp_destroy_clock_queue(sh); 620 rte_errno = -ret; 621 return ret; 622 } 623 624 /* Enable notification from the Rearm Queue CQ. */ 625 static inline void 626 mlx5_txpp_cq_arm(struct mlx5_dev_ctx_shared *sh) 627 { 628 void *base_addr; 629 630 struct mlx5_txpp_wq *aq = &sh->txpp.rearm_queue; 631 uint32_t arm_sn = aq->arm_sn << MLX5_CQ_SQN_OFFSET; 632 uint32_t db_hi = arm_sn | MLX5_CQ_DBR_CMD_ALL | aq->cq_ci; 633 uint64_t db_be = rte_cpu_to_be_64(((uint64_t)db_hi << 32) | aq->cq->id); 634 base_addr = mlx5_os_get_devx_uar_base_addr(sh->tx_uar); 635 uint32_t *addr = RTE_PTR_ADD(base_addr, MLX5_CQ_DOORBELL); 636 637 rte_compiler_barrier(); 638 aq->cq_dbrec[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(db_hi); 639 rte_wmb(); 640 #ifdef RTE_ARCH_64 641 *(uint64_t *)addr = db_be; 642 #else 643 *(uint32_t *)addr = db_be; 644 rte_io_wmb(); 645 *((uint32_t *)addr + 1) = db_be >> 32; 646 #endif 647 aq->arm_sn++; 648 } 649 650 #if defined(RTE_ARCH_X86_64) 651 static inline int 652 mlx5_atomic128_compare_exchange(rte_int128_t *dst, 653 rte_int128_t *exp, 654 const rte_int128_t *src) 655 { 656 uint8_t res; 657 658 asm volatile (MPLOCKED 659 "cmpxchg16b %[dst];" 660 " sete %[res]" 661 : [dst] "=m" (dst->val[0]), 662 "=a" (exp->val[0]), 663 "=d" (exp->val[1]), 664 [res] "=r" (res) 665 : "b" (src->val[0]), 666 "c" (src->val[1]), 667 "a" (exp->val[0]), 668 "d" (exp->val[1]), 669 "m" (dst->val[0]) 670 : "memory"); 671 672 return res; 673 } 674 #endif 675 676 static inline void 677 mlx5_atomic_read_cqe(rte_int128_t *from, rte_int128_t *ts) 678 { 679 /* 680 * The only CQE of Clock Queue is being continuously 681 * update by hardware with soecified rate. We have to 682 * read timestump and WQE completion index atomically. 683 */ 684 #if defined(RTE_ARCH_X86_64) 685 rte_int128_t src; 686 687 memset(&src, 0, sizeof(src)); 688 *ts = src; 689 /* if (*from == *ts) *from = *src else *ts = *from; */ 690 mlx5_atomic128_compare_exchange(from, ts, &src); 691 #else 692 uint64_t *cqe = (uint64_t *)from; 693 694 /* 695 * Power architecture does not support 16B compare-and-swap. 696 * ARM implements it in software, code below is more relevant. 697 */ 698 for (;;) { 699 uint64_t tm, op; 700 uint64_t *ps; 701 702 rte_compiler_barrier(); 703 tm = __atomic_load_n(cqe + 0, __ATOMIC_RELAXED); 704 op = __atomic_load_n(cqe + 1, __ATOMIC_RELAXED); 705 rte_compiler_barrier(); 706 if (tm != __atomic_load_n(cqe + 0, __ATOMIC_RELAXED)) 707 continue; 708 if (op != __atomic_load_n(cqe + 1, __ATOMIC_RELAXED)) 709 continue; 710 ps = (uint64_t *)ts; 711 ps[0] = tm; 712 ps[1] = op; 713 return; 714 } 715 #endif 716 } 717 718 /* Stores timestamp in the cache structure to share data with datapath. */ 719 static inline void 720 mlx5_txpp_cache_timestamp(struct mlx5_dev_ctx_shared *sh, 721 uint64_t ts, uint64_t ci) 722 { 723 ci = ci << (64 - MLX5_CQ_INDEX_WIDTH); 724 ci |= (ts << MLX5_CQ_INDEX_WIDTH) >> MLX5_CQ_INDEX_WIDTH; 725 rte_compiler_barrier(); 726 __atomic_store_n(&sh->txpp.ts.ts, ts, __ATOMIC_RELAXED); 727 __atomic_store_n(&sh->txpp.ts.ci_ts, ci, __ATOMIC_RELAXED); 728 rte_wmb(); 729 } 730 731 /* Reads timestamp from Clock Queue CQE and stores in the cache. */ 732 static inline void 733 mlx5_txpp_update_timestamp(struct mlx5_dev_ctx_shared *sh) 734 { 735 struct mlx5_txpp_wq *wq = &sh->txpp.clock_queue; 736 struct mlx5_cqe *cqe = (struct mlx5_cqe *)(uintptr_t)wq->cqes; 737 union { 738 rte_int128_t u128; 739 struct mlx5_cqe_ts cts; 740 } to; 741 uint64_t ts; 742 uint16_t ci; 743 744 static_assert(sizeof(struct mlx5_cqe_ts) == sizeof(rte_int128_t), 745 "Wrong timestamp CQE part size"); 746 mlx5_atomic_read_cqe((rte_int128_t *)&cqe->timestamp, &to.u128); 747 if (to.cts.op_own >> 4) { 748 DRV_LOG(DEBUG, "Clock Queue error sync lost."); 749 __atomic_fetch_add(&sh->txpp.err_clock_queue, 750 1, __ATOMIC_RELAXED); 751 sh->txpp.sync_lost = 1; 752 return; 753 } 754 ci = rte_be_to_cpu_16(to.cts.wqe_counter); 755 ts = rte_be_to_cpu_64(to.cts.timestamp); 756 ts = mlx5_txpp_convert_rx_ts(sh, ts); 757 wq->cq_ci += (ci - wq->sq_ci) & UINT16_MAX; 758 wq->sq_ci = ci; 759 mlx5_txpp_cache_timestamp(sh, ts, wq->cq_ci); 760 } 761 762 /* Waits for the first completion on Clock Queue to init timestamp. */ 763 static inline void 764 mlx5_txpp_init_timestamp(struct mlx5_dev_ctx_shared *sh) 765 { 766 struct mlx5_txpp_wq *wq = &sh->txpp.clock_queue; 767 uint32_t wait; 768 769 sh->txpp.ts_p = 0; 770 sh->txpp.ts_n = 0; 771 for (wait = 0; wait < MLX5_TXPP_WAIT_INIT_TS; wait++) { 772 mlx5_txpp_update_timestamp(sh); 773 if (wq->sq_ci) 774 return; 775 /* Wait one millisecond and try again. */ 776 rte_delay_us_sleep(US_PER_S / MS_PER_S); 777 } 778 DRV_LOG(ERR, "Unable to initialize timestamp."); 779 sh->txpp.sync_lost = 1; 780 } 781 782 #ifdef HAVE_IBV_DEVX_EVENT 783 /* Gather statistics for timestamp from Clock Queue CQE. */ 784 static inline void 785 mlx5_txpp_gather_timestamp(struct mlx5_dev_ctx_shared *sh) 786 { 787 /* Check whether we have a valid timestamp. */ 788 if (!sh->txpp.clock_queue.sq_ci && !sh->txpp.ts_n) 789 return; 790 MLX5_ASSERT(sh->txpp.ts_p < MLX5_TXPP_REARM_SQ_SIZE); 791 __atomic_store_n(&sh->txpp.tsa[sh->txpp.ts_p].ts, 792 sh->txpp.ts.ts, __ATOMIC_RELAXED); 793 __atomic_store_n(&sh->txpp.tsa[sh->txpp.ts_p].ci_ts, 794 sh->txpp.ts.ci_ts, __ATOMIC_RELAXED); 795 if (++sh->txpp.ts_p >= MLX5_TXPP_REARM_SQ_SIZE) 796 sh->txpp.ts_p = 0; 797 if (sh->txpp.ts_n < MLX5_TXPP_REARM_SQ_SIZE) 798 ++sh->txpp.ts_n; 799 } 800 801 /* Handles Rearm Queue completions in periodic service. */ 802 static __rte_always_inline void 803 mlx5_txpp_handle_rearm_queue(struct mlx5_dev_ctx_shared *sh) 804 { 805 struct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue; 806 uint32_t cq_ci = wq->cq_ci; 807 bool error = false; 808 int ret; 809 810 do { 811 volatile struct mlx5_cqe *cqe; 812 813 cqe = &wq->cqes[cq_ci & (MLX5_TXPP_REARM_CQ_SIZE - 1)]; 814 ret = check_cqe(cqe, MLX5_TXPP_REARM_CQ_SIZE, cq_ci); 815 switch (ret) { 816 case MLX5_CQE_STATUS_ERR: 817 error = true; 818 ++cq_ci; 819 break; 820 case MLX5_CQE_STATUS_SW_OWN: 821 wq->sq_ci += 2; 822 ++cq_ci; 823 break; 824 case MLX5_CQE_STATUS_HW_OWN: 825 break; 826 default: 827 MLX5_ASSERT(false); 828 break; 829 } 830 } while (ret != MLX5_CQE_STATUS_HW_OWN); 831 if (likely(cq_ci != wq->cq_ci)) { 832 /* Check whether we have missed interrupts. */ 833 if (cq_ci - wq->cq_ci != 1) { 834 DRV_LOG(DEBUG, "Rearm Queue missed interrupt."); 835 __atomic_fetch_add(&sh->txpp.err_miss_int, 836 1, __ATOMIC_RELAXED); 837 /* Check sync lost on wqe index. */ 838 if (cq_ci - wq->cq_ci >= 839 (((1UL << MLX5_WQ_INDEX_WIDTH) / 840 MLX5_TXPP_REARM) - 1)) 841 error = 1; 842 } 843 /* Update doorbell record to notify hardware. */ 844 rte_compiler_barrier(); 845 *wq->cq_dbrec = rte_cpu_to_be_32(cq_ci); 846 rte_wmb(); 847 wq->cq_ci = cq_ci; 848 /* Fire new requests to Rearm Queue. */ 849 if (error) { 850 DRV_LOG(DEBUG, "Rearm Queue error sync lost."); 851 __atomic_fetch_add(&sh->txpp.err_rearm_queue, 852 1, __ATOMIC_RELAXED); 853 sh->txpp.sync_lost = 1; 854 } 855 } 856 } 857 858 /* Handles Clock Queue completions in periodic service. */ 859 static __rte_always_inline void 860 mlx5_txpp_handle_clock_queue(struct mlx5_dev_ctx_shared *sh) 861 { 862 mlx5_txpp_update_timestamp(sh); 863 mlx5_txpp_gather_timestamp(sh); 864 } 865 #endif 866 867 /* Invoked periodically on Rearm Queue completions. */ 868 void 869 mlx5_txpp_interrupt_handler(void *cb_arg) 870 { 871 #ifndef HAVE_IBV_DEVX_EVENT 872 RTE_SET_USED(cb_arg); 873 return; 874 #else 875 struct mlx5_dev_ctx_shared *sh = cb_arg; 876 union { 877 struct mlx5dv_devx_async_event_hdr event_resp; 878 uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128]; 879 } out; 880 881 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY); 882 /* Process events in the loop. Only rearm completions are expected. */ 883 while (mlx5_glue->devx_get_event 884 (sh->txpp.echan, 885 &out.event_resp, 886 sizeof(out.buf)) >= 887 (ssize_t)sizeof(out.event_resp.cookie)) { 888 mlx5_txpp_handle_rearm_queue(sh); 889 mlx5_txpp_handle_clock_queue(sh); 890 mlx5_txpp_cq_arm(sh); 891 mlx5_txpp_doorbell_rearm_queue 892 (sh, sh->txpp.rearm_queue.sq_ci - 1); 893 } 894 #endif /* HAVE_IBV_DEVX_ASYNC */ 895 } 896 897 static void 898 mlx5_txpp_stop_service(struct mlx5_dev_ctx_shared *sh) 899 { 900 if (!sh->txpp.intr_handle.fd) 901 return; 902 mlx5_intr_callback_unregister(&sh->txpp.intr_handle, 903 mlx5_txpp_interrupt_handler, sh); 904 sh->txpp.intr_handle.fd = 0; 905 } 906 907 /* Attach interrupt handler and fires first request to Rearm Queue. */ 908 static int 909 mlx5_txpp_start_service(struct mlx5_dev_ctx_shared *sh) 910 { 911 uint16_t event_nums[1] = {0}; 912 int ret; 913 int fd; 914 915 sh->txpp.err_miss_int = 0; 916 sh->txpp.err_rearm_queue = 0; 917 sh->txpp.err_clock_queue = 0; 918 sh->txpp.err_ts_past = 0; 919 sh->txpp.err_ts_future = 0; 920 /* Attach interrupt handler to process Rearm Queue completions. */ 921 fd = mlx5_os_get_devx_channel_fd(sh->txpp.echan); 922 ret = mlx5_os_set_nonblock_channel_fd(fd); 923 if (ret) { 924 DRV_LOG(ERR, "Failed to change event channel FD."); 925 rte_errno = errno; 926 return -rte_errno; 927 } 928 memset(&sh->txpp.intr_handle, 0, sizeof(sh->txpp.intr_handle)); 929 fd = mlx5_os_get_devx_channel_fd(sh->txpp.echan); 930 sh->txpp.intr_handle.fd = fd; 931 sh->txpp.intr_handle.type = RTE_INTR_HANDLE_EXT; 932 if (rte_intr_callback_register(&sh->txpp.intr_handle, 933 mlx5_txpp_interrupt_handler, sh)) { 934 sh->txpp.intr_handle.fd = 0; 935 DRV_LOG(ERR, "Failed to register CQE interrupt %d.", rte_errno); 936 return -rte_errno; 937 } 938 /* Subscribe CQ event to the event channel controlled by the driver. */ 939 ret = mlx5_glue->devx_subscribe_devx_event(sh->txpp.echan, 940 sh->txpp.rearm_queue.cq->obj, 941 sizeof(event_nums), 942 event_nums, 0); 943 if (ret) { 944 DRV_LOG(ERR, "Failed to subscribe CQE event."); 945 rte_errno = errno; 946 return -errno; 947 } 948 /* Enable interrupts in the CQ. */ 949 mlx5_txpp_cq_arm(sh); 950 /* Fire the first request on Rearm Queue. */ 951 mlx5_txpp_doorbell_rearm_queue(sh, sh->txpp.rearm_queue.sq_size - 1); 952 mlx5_txpp_init_timestamp(sh); 953 return 0; 954 } 955 956 /* 957 * The routine initializes the packet pacing infrastructure: 958 * - allocates PP context 959 * - Clock CQ/SQ 960 * - Rearm CQ/SQ 961 * - attaches rearm interrupt handler 962 * - starts Clock Queue 963 * 964 * Returns 0 on success, negative otherwise 965 */ 966 static int 967 mlx5_txpp_create(struct mlx5_dev_ctx_shared *sh, struct mlx5_priv *priv) 968 { 969 int tx_pp = priv->config.tx_pp; 970 int ret; 971 972 /* Store the requested pacing parameters. */ 973 sh->txpp.tick = tx_pp >= 0 ? tx_pp : -tx_pp; 974 sh->txpp.test = !!(tx_pp < 0); 975 sh->txpp.skew = priv->config.tx_skew; 976 sh->txpp.freq = priv->config.hca_attr.dev_freq_khz; 977 ret = mlx5_txpp_create_event_channel(sh); 978 if (ret) 979 goto exit; 980 ret = mlx5_txpp_alloc_pp_index(sh); 981 if (ret) 982 goto exit; 983 ret = mlx5_txpp_create_clock_queue(sh); 984 if (ret) 985 goto exit; 986 ret = mlx5_txpp_create_rearm_queue(sh); 987 if (ret) 988 goto exit; 989 ret = mlx5_txpp_start_service(sh); 990 if (ret) 991 goto exit; 992 exit: 993 if (ret) { 994 mlx5_txpp_stop_service(sh); 995 mlx5_txpp_destroy_rearm_queue(sh); 996 mlx5_txpp_destroy_clock_queue(sh); 997 mlx5_txpp_free_pp_index(sh); 998 mlx5_txpp_destroy_event_channel(sh); 999 sh->txpp.tick = 0; 1000 sh->txpp.test = 0; 1001 sh->txpp.skew = 0; 1002 } 1003 return ret; 1004 } 1005 1006 /* 1007 * The routine destroys the packet pacing infrastructure: 1008 * - detaches rearm interrupt handler 1009 * - Rearm CQ/SQ 1010 * - Clock CQ/SQ 1011 * - PP context 1012 */ 1013 static void 1014 mlx5_txpp_destroy(struct mlx5_dev_ctx_shared *sh) 1015 { 1016 mlx5_txpp_stop_service(sh); 1017 mlx5_txpp_destroy_rearm_queue(sh); 1018 mlx5_txpp_destroy_clock_queue(sh); 1019 mlx5_txpp_free_pp_index(sh); 1020 mlx5_txpp_destroy_event_channel(sh); 1021 sh->txpp.tick = 0; 1022 sh->txpp.test = 0; 1023 sh->txpp.skew = 0; 1024 } 1025 1026 /** 1027 * Creates and starts packet pacing infrastructure on specified device. 1028 * 1029 * @param dev 1030 * Pointer to Ethernet device structure. 1031 * 1032 * @return 1033 * 0 on success, a negative errno value otherwise and rte_errno is set. 1034 */ 1035 int 1036 mlx5_txpp_start(struct rte_eth_dev *dev) 1037 { 1038 struct mlx5_priv *priv = dev->data->dev_private; 1039 struct mlx5_dev_ctx_shared *sh = priv->sh; 1040 int err = 0; 1041 int ret; 1042 1043 if (!priv->config.tx_pp) { 1044 /* Packet pacing is not requested for the device. */ 1045 MLX5_ASSERT(priv->txpp_en == 0); 1046 return 0; 1047 } 1048 if (priv->txpp_en) { 1049 /* Packet pacing is already enabled for the device. */ 1050 MLX5_ASSERT(sh->txpp.refcnt); 1051 return 0; 1052 } 1053 if (priv->config.tx_pp > 0) { 1054 ret = rte_mbuf_dynflag_lookup 1055 (RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME, NULL); 1056 if (ret < 0) 1057 return 0; 1058 } 1059 ret = pthread_mutex_lock(&sh->txpp.mutex); 1060 MLX5_ASSERT(!ret); 1061 RTE_SET_USED(ret); 1062 if (sh->txpp.refcnt) { 1063 priv->txpp_en = 1; 1064 ++sh->txpp.refcnt; 1065 } else { 1066 err = mlx5_txpp_create(sh, priv); 1067 if (!err) { 1068 MLX5_ASSERT(sh->txpp.tick); 1069 priv->txpp_en = 1; 1070 sh->txpp.refcnt = 1; 1071 } else { 1072 rte_errno = -err; 1073 } 1074 } 1075 ret = pthread_mutex_unlock(&sh->txpp.mutex); 1076 MLX5_ASSERT(!ret); 1077 RTE_SET_USED(ret); 1078 return err; 1079 } 1080 1081 /** 1082 * Stops and destroys packet pacing infrastructure on specified device. 1083 * 1084 * @param dev 1085 * Pointer to Ethernet device structure. 1086 * 1087 * @return 1088 * 0 on success, a negative errno value otherwise and rte_errno is set. 1089 */ 1090 void 1091 mlx5_txpp_stop(struct rte_eth_dev *dev) 1092 { 1093 struct mlx5_priv *priv = dev->data->dev_private; 1094 struct mlx5_dev_ctx_shared *sh = priv->sh; 1095 int ret; 1096 1097 if (!priv->txpp_en) { 1098 /* Packet pacing is already disabled for the device. */ 1099 return; 1100 } 1101 priv->txpp_en = 0; 1102 ret = pthread_mutex_lock(&sh->txpp.mutex); 1103 MLX5_ASSERT(!ret); 1104 RTE_SET_USED(ret); 1105 MLX5_ASSERT(sh->txpp.refcnt); 1106 if (!sh->txpp.refcnt || --sh->txpp.refcnt) 1107 return; 1108 /* No references any more, do actual destroy. */ 1109 mlx5_txpp_destroy(sh); 1110 ret = pthread_mutex_unlock(&sh->txpp.mutex); 1111 MLX5_ASSERT(!ret); 1112 RTE_SET_USED(ret); 1113 } 1114 1115 /* 1116 * Read the current clock counter of an Ethernet device 1117 * 1118 * This returns the current raw clock value of an Ethernet device. It is 1119 * a raw amount of ticks, with no given time reference. 1120 * The value returned here is from the same clock than the one 1121 * filling timestamp field of Rx/Tx packets when using hardware timestamp 1122 * offload. Therefore it can be used to compute a precise conversion of 1123 * the device clock to the real time. 1124 * 1125 * @param dev 1126 * Pointer to Ethernet device structure. 1127 * @param clock 1128 * Pointer to the uint64_t that holds the raw clock value. 1129 * 1130 * @return 1131 * - 0: Success. 1132 * - -ENOTSUP: The function is not supported in this mode. Requires 1133 * packet pacing module configured and started (tx_pp devarg) 1134 */ 1135 int 1136 mlx5_txpp_read_clock(struct rte_eth_dev *dev, uint64_t *timestamp) 1137 { 1138 struct mlx5_priv *priv = dev->data->dev_private; 1139 struct mlx5_dev_ctx_shared *sh = priv->sh; 1140 int ret; 1141 1142 if (sh->txpp.refcnt) { 1143 struct mlx5_txpp_wq *wq = &sh->txpp.clock_queue; 1144 struct mlx5_cqe *cqe = (struct mlx5_cqe *)(uintptr_t)wq->cqes; 1145 union { 1146 rte_int128_t u128; 1147 struct mlx5_cqe_ts cts; 1148 } to; 1149 uint64_t ts; 1150 1151 mlx5_atomic_read_cqe((rte_int128_t *)&cqe->timestamp, &to.u128); 1152 if (to.cts.op_own >> 4) { 1153 DRV_LOG(DEBUG, "Clock Queue error sync lost."); 1154 __atomic_fetch_add(&sh->txpp.err_clock_queue, 1155 1, __ATOMIC_RELAXED); 1156 sh->txpp.sync_lost = 1; 1157 return -EIO; 1158 } 1159 ts = rte_be_to_cpu_64(to.cts.timestamp); 1160 ts = mlx5_txpp_convert_rx_ts(sh, ts); 1161 *timestamp = ts; 1162 return 0; 1163 } 1164 /* Not supported in isolated mode - kernel does not see the CQEs. */ 1165 if (priv->isolated || rte_eal_process_type() != RTE_PROC_PRIMARY) 1166 return -ENOTSUP; 1167 ret = mlx5_read_clock(dev, timestamp); 1168 return ret; 1169 } 1170 1171 /** 1172 * DPDK callback to clear device extended statistics. 1173 * 1174 * @param dev 1175 * Pointer to Ethernet device structure. 1176 * 1177 * @return 1178 * 0 on success and stats is reset, negative errno value otherwise and 1179 * rte_errno is set. 1180 */ 1181 int mlx5_txpp_xstats_reset(struct rte_eth_dev *dev) 1182 { 1183 struct mlx5_priv *priv = dev->data->dev_private; 1184 struct mlx5_dev_ctx_shared *sh = priv->sh; 1185 1186 __atomic_store_n(&sh->txpp.err_miss_int, 0, __ATOMIC_RELAXED); 1187 __atomic_store_n(&sh->txpp.err_rearm_queue, 0, __ATOMIC_RELAXED); 1188 __atomic_store_n(&sh->txpp.err_clock_queue, 0, __ATOMIC_RELAXED); 1189 __atomic_store_n(&sh->txpp.err_ts_past, 0, __ATOMIC_RELAXED); 1190 __atomic_store_n(&sh->txpp.err_ts_future, 0, __ATOMIC_RELAXED); 1191 return 0; 1192 } 1193 1194 /** 1195 * Routine to retrieve names of extended device statistics 1196 * for packet send scheduling. It appends the specific stats names 1197 * after the parts filled by preceding modules (eth stats, etc.) 1198 * 1199 * @param dev 1200 * Pointer to Ethernet device structure. 1201 * @param[out] xstats_names 1202 * Buffer to insert names into. 1203 * @param n 1204 * Number of names. 1205 * @param n_used 1206 * Number of names filled by preceding statistics modules. 1207 * 1208 * @return 1209 * Number of xstats names. 1210 */ 1211 int mlx5_txpp_xstats_get_names(struct rte_eth_dev *dev __rte_unused, 1212 struct rte_eth_xstat_name *xstats_names, 1213 unsigned int n, unsigned int n_used) 1214 { 1215 unsigned int n_txpp = RTE_DIM(mlx5_txpp_stat_names); 1216 unsigned int i; 1217 1218 if (n >= n_used + n_txpp && xstats_names) { 1219 for (i = 0; i < n_txpp; ++i) { 1220 strncpy(xstats_names[i + n_used].name, 1221 mlx5_txpp_stat_names[i], 1222 RTE_ETH_XSTATS_NAME_SIZE); 1223 xstats_names[i + n_used].name 1224 [RTE_ETH_XSTATS_NAME_SIZE - 1] = 0; 1225 } 1226 } 1227 return n_used + n_txpp; 1228 } 1229 1230 static inline void 1231 mlx5_txpp_read_tsa(struct mlx5_dev_txpp *txpp, 1232 struct mlx5_txpp_ts *tsa, uint16_t idx) 1233 { 1234 do { 1235 uint64_t ts, ci; 1236 1237 ts = __atomic_load_n(&txpp->tsa[idx].ts, __ATOMIC_RELAXED); 1238 ci = __atomic_load_n(&txpp->tsa[idx].ci_ts, __ATOMIC_RELAXED); 1239 rte_compiler_barrier(); 1240 if ((ci ^ ts) << MLX5_CQ_INDEX_WIDTH != 0) 1241 continue; 1242 if (__atomic_load_n(&txpp->tsa[idx].ts, 1243 __ATOMIC_RELAXED) != ts) 1244 continue; 1245 if (__atomic_load_n(&txpp->tsa[idx].ci_ts, 1246 __ATOMIC_RELAXED) != ci) 1247 continue; 1248 tsa->ts = ts; 1249 tsa->ci_ts = ci; 1250 return; 1251 } while (true); 1252 } 1253 1254 /* 1255 * Jitter reflects the clock change between 1256 * neighbours Clock Queue completions. 1257 */ 1258 static uint64_t 1259 mlx5_txpp_xstats_jitter(struct mlx5_dev_txpp *txpp) 1260 { 1261 struct mlx5_txpp_ts tsa0, tsa1; 1262 int64_t dts, dci; 1263 uint16_t ts_p; 1264 1265 if (txpp->ts_n < 2) { 1266 /* No gathered enough reports yet. */ 1267 return 0; 1268 } 1269 do { 1270 int ts_0, ts_1; 1271 1272 ts_p = txpp->ts_p; 1273 rte_compiler_barrier(); 1274 ts_0 = ts_p - 2; 1275 if (ts_0 < 0) 1276 ts_0 += MLX5_TXPP_REARM_SQ_SIZE; 1277 ts_1 = ts_p - 1; 1278 if (ts_1 < 0) 1279 ts_1 += MLX5_TXPP_REARM_SQ_SIZE; 1280 mlx5_txpp_read_tsa(txpp, &tsa0, ts_0); 1281 mlx5_txpp_read_tsa(txpp, &tsa1, ts_1); 1282 rte_compiler_barrier(); 1283 } while (ts_p != txpp->ts_p); 1284 /* We have two neighbor reports, calculate the jitter. */ 1285 dts = tsa1.ts - tsa0.ts; 1286 dci = (tsa1.ci_ts >> (64 - MLX5_CQ_INDEX_WIDTH)) - 1287 (tsa0.ci_ts >> (64 - MLX5_CQ_INDEX_WIDTH)); 1288 if (dci < 0) 1289 dci += 1 << MLX5_CQ_INDEX_WIDTH; 1290 dci *= txpp->tick; 1291 return (dts > dci) ? dts - dci : dci - dts; 1292 } 1293 1294 /* 1295 * Wander reflects the long-term clock change 1296 * over the entire length of all Clock Queue completions. 1297 */ 1298 static uint64_t 1299 mlx5_txpp_xstats_wander(struct mlx5_dev_txpp *txpp) 1300 { 1301 struct mlx5_txpp_ts tsa0, tsa1; 1302 int64_t dts, dci; 1303 uint16_t ts_p; 1304 1305 if (txpp->ts_n < MLX5_TXPP_REARM_SQ_SIZE) { 1306 /* No gathered enough reports yet. */ 1307 return 0; 1308 } 1309 do { 1310 int ts_0, ts_1; 1311 1312 ts_p = txpp->ts_p; 1313 rte_compiler_barrier(); 1314 ts_0 = ts_p - MLX5_TXPP_REARM_SQ_SIZE / 2 - 1; 1315 if (ts_0 < 0) 1316 ts_0 += MLX5_TXPP_REARM_SQ_SIZE; 1317 ts_1 = ts_p - 1; 1318 if (ts_1 < 0) 1319 ts_1 += MLX5_TXPP_REARM_SQ_SIZE; 1320 mlx5_txpp_read_tsa(txpp, &tsa0, ts_0); 1321 mlx5_txpp_read_tsa(txpp, &tsa1, ts_1); 1322 rte_compiler_barrier(); 1323 } while (ts_p != txpp->ts_p); 1324 /* We have two neighbor reports, calculate the jitter. */ 1325 dts = tsa1.ts - tsa0.ts; 1326 dci = (tsa1.ci_ts >> (64 - MLX5_CQ_INDEX_WIDTH)) - 1327 (tsa0.ci_ts >> (64 - MLX5_CQ_INDEX_WIDTH)); 1328 dci += 1 << MLX5_CQ_INDEX_WIDTH; 1329 dci *= txpp->tick; 1330 return (dts > dci) ? dts - dci : dci - dts; 1331 } 1332 1333 /** 1334 * Routine to retrieve extended device statistics 1335 * for packet send scheduling. It appends the specific statistics 1336 * after the parts filled by preceding modules (eth stats, etc.) 1337 * 1338 * @param dev 1339 * Pointer to Ethernet device. 1340 * @param[out] stats 1341 * Pointer to rte extended stats table. 1342 * @param n 1343 * The size of the stats table. 1344 * @param n_used 1345 * Number of stats filled by preceding statistics modules. 1346 * 1347 * @return 1348 * Number of extended stats on success and stats is filled, 1349 * negative on error and rte_errno is set. 1350 */ 1351 int 1352 mlx5_txpp_xstats_get(struct rte_eth_dev *dev, 1353 struct rte_eth_xstat *stats, 1354 unsigned int n, unsigned int n_used) 1355 { 1356 unsigned int n_txpp = RTE_DIM(mlx5_txpp_stat_names); 1357 1358 if (n >= n_used + n_txpp && stats) { 1359 struct mlx5_priv *priv = dev->data->dev_private; 1360 struct mlx5_dev_ctx_shared *sh = priv->sh; 1361 unsigned int i; 1362 1363 for (i = 0; i < n_txpp; ++i) 1364 stats[n_used + i].id = n_used + i; 1365 stats[n_used + 0].value = 1366 __atomic_load_n(&sh->txpp.err_miss_int, 1367 __ATOMIC_RELAXED); 1368 stats[n_used + 1].value = 1369 __atomic_load_n(&sh->txpp.err_rearm_queue, 1370 __ATOMIC_RELAXED); 1371 stats[n_used + 2].value = 1372 __atomic_load_n(&sh->txpp.err_clock_queue, 1373 __ATOMIC_RELAXED); 1374 stats[n_used + 3].value = 1375 __atomic_load_n(&sh->txpp.err_ts_past, 1376 __ATOMIC_RELAXED); 1377 stats[n_used + 4].value = 1378 __atomic_load_n(&sh->txpp.err_ts_future, 1379 __ATOMIC_RELAXED); 1380 stats[n_used + 5].value = mlx5_txpp_xstats_jitter(&sh->txpp); 1381 stats[n_used + 6].value = mlx5_txpp_xstats_wander(&sh->txpp); 1382 stats[n_used + 7].value = sh->txpp.sync_lost; 1383 } 1384 return n_used + n_txpp; 1385 } 1386