1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2017 6WIND S.A. 3 * Copyright 2017 Mellanox Technologies, Ltd 4 */ 5 6 #include <stdint.h> 7 #include <string.h> 8 #include <stdlib.h> 9 10 #include <rte_mbuf.h> 11 #include <rte_mempool.h> 12 #include <rte_prefetch.h> 13 #include <rte_vect.h> 14 15 #include <mlx5_glue.h> 16 #include <mlx5_prm.h> 17 18 #include "mlx5_defs.h" 19 #include "mlx5.h" 20 #include "mlx5_utils.h" 21 #include "mlx5_rxtx.h" 22 #include "mlx5_rxtx_vec.h" 23 #include "mlx5_autoconf.h" 24 25 #if defined RTE_ARCH_X86_64 26 #include "mlx5_rxtx_vec_sse.h" 27 #elif defined RTE_ARCH_ARM64 28 #include "mlx5_rxtx_vec_neon.h" 29 #elif defined RTE_ARCH_PPC_64 30 #include "mlx5_rxtx_vec_altivec.h" 31 #else 32 #error "This should not be compiled if SIMD instructions are not supported." 33 #endif 34 35 /** 36 * Skip error packets. 37 * 38 * @param rxq 39 * Pointer to RX queue structure. 40 * @param[out] pkts 41 * Array to store received packets. 42 * @param pkts_n 43 * Maximum number of packets in array. 44 * 45 * @return 46 * Number of packets successfully received (<= pkts_n). 47 */ 48 static uint16_t 49 rxq_handle_pending_error(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, 50 uint16_t pkts_n) 51 { 52 uint16_t n = 0; 53 unsigned int i; 54 #ifdef MLX5_PMD_SOFT_COUNTERS 55 uint32_t err_bytes = 0; 56 #endif 57 58 for (i = 0; i < pkts_n; ++i) { 59 struct rte_mbuf *pkt = pkts[i]; 60 61 if (pkt->packet_type == RTE_PTYPE_ALL_MASK || rxq->err_state) { 62 #ifdef MLX5_PMD_SOFT_COUNTERS 63 err_bytes += PKT_LEN(pkt); 64 #endif 65 rte_pktmbuf_free_seg(pkt); 66 } else { 67 pkts[n++] = pkt; 68 } 69 } 70 rxq->stats.idropped += (pkts_n - n); 71 #ifdef MLX5_PMD_SOFT_COUNTERS 72 /* Correct counters of errored completions. */ 73 rxq->stats.ipackets -= (pkts_n - n); 74 rxq->stats.ibytes -= err_bytes; 75 #endif 76 mlx5_rx_err_handle(rxq, 1); 77 return n; 78 } 79 80 /** 81 * Replenish buffers for RX in bulk. 82 * 83 * @param rxq 84 * Pointer to RX queue structure. 85 */ 86 static inline void 87 mlx5_rx_replenish_bulk_mbuf(struct mlx5_rxq_data *rxq) 88 { 89 const uint16_t q_n = 1 << rxq->elts_n; 90 const uint16_t q_mask = q_n - 1; 91 uint16_t n = q_n - (rxq->rq_ci - rxq->rq_pi); 92 uint16_t elts_idx = rxq->rq_ci & q_mask; 93 struct rte_mbuf **elts = &(*rxq->elts)[elts_idx]; 94 volatile struct mlx5_wqe_data_seg *wq = 95 &((volatile struct mlx5_wqe_data_seg *)rxq->wqes)[elts_idx]; 96 unsigned int i; 97 98 if (n >= rxq->rq_repl_thresh) { 99 MLX5_ASSERT(n >= MLX5_VPMD_RXQ_RPLNSH_THRESH(q_n)); 100 MLX5_ASSERT(MLX5_VPMD_RXQ_RPLNSH_THRESH(q_n) > 101 MLX5_VPMD_DESCS_PER_LOOP); 102 /* Not to cross queue end. */ 103 n = RTE_MIN(n - MLX5_VPMD_DESCS_PER_LOOP, q_n - elts_idx); 104 if (rte_mempool_get_bulk(rxq->mp, (void *)elts, n) < 0) { 105 rxq->stats.rx_nombuf += n; 106 return; 107 } 108 for (i = 0; i < n; ++i) { 109 void *buf_addr; 110 111 /* 112 * In order to support the mbufs with external attached 113 * data buffer we should use the buf_addr pointer 114 * instead of rte_mbuf_buf_addr(). It touches the mbuf 115 * itself and may impact the performance. 116 */ 117 buf_addr = elts[i]->buf_addr; 118 wq[i].addr = rte_cpu_to_be_64((uintptr_t)buf_addr + 119 RTE_PKTMBUF_HEADROOM); 120 /* If there's a single MR, no need to replace LKey. */ 121 if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) 122 > 1)) 123 wq[i].lkey = mlx5_rx_mb2mr(rxq, elts[i]); 124 } 125 rxq->rq_ci += n; 126 /* Prevent overflowing into consumed mbufs. */ 127 elts_idx = rxq->rq_ci & q_mask; 128 for (i = 0; i < MLX5_VPMD_DESCS_PER_LOOP; ++i) 129 (*rxq->elts)[elts_idx + i] = &rxq->fake_mbuf; 130 rte_io_wmb(); 131 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci); 132 } 133 } 134 135 /** 136 * Replenish buffers for MPRQ RX in bulk. 137 * 138 * @param rxq 139 * Pointer to RX queue structure. 140 */ 141 static inline void 142 mlx5_rx_mprq_replenish_bulk_mbuf(struct mlx5_rxq_data *rxq) 143 { 144 const uint16_t wqe_n = 1 << rxq->elts_n; 145 const uint32_t strd_n = 1 << rxq->strd_num_n; 146 const uint32_t elts_n = wqe_n * strd_n; 147 const uint32_t wqe_mask = elts_n - 1; 148 uint32_t n = elts_n - (rxq->elts_ci - rxq->rq_pi); 149 uint32_t elts_idx = rxq->elts_ci & wqe_mask; 150 struct rte_mbuf **elts = &(*rxq->elts)[elts_idx]; 151 152 /* Not to cross queue end. */ 153 if (n >= rxq->rq_repl_thresh) { 154 MLX5_ASSERT(n >= MLX5_VPMD_RXQ_RPLNSH_THRESH(elts_n)); 155 MLX5_ASSERT(MLX5_VPMD_RXQ_RPLNSH_THRESH(elts_n) > 156 MLX5_VPMD_DESCS_PER_LOOP); 157 n = RTE_MIN(n, elts_n - elts_idx); 158 if (rte_mempool_get_bulk(rxq->mp, (void *)elts, n) < 0) { 159 rxq->stats.rx_nombuf += n; 160 return; 161 } 162 rxq->elts_ci += n; 163 } 164 } 165 166 /** 167 * Copy or attach MPRQ buffers to RX SW ring. 168 * 169 * @param rxq 170 * Pointer to RX queue structure. 171 * @param pkts 172 * Pointer to array of packets to be stored. 173 * @param pkts_n 174 * Number of packets to be stored. 175 * 176 * @return 177 * Number of packets successfully copied/attached (<= pkts_n). 178 */ 179 static inline uint16_t 180 rxq_copy_mprq_mbuf_v(struct mlx5_rxq_data *rxq, 181 struct rte_mbuf **pkts, uint16_t pkts_n) 182 { 183 const uint16_t wqe_n = 1 << rxq->elts_n; 184 const uint16_t wqe_mask = wqe_n - 1; 185 const uint16_t strd_sz = 1 << rxq->strd_sz_n; 186 const uint32_t strd_n = 1 << rxq->strd_num_n; 187 const uint32_t elts_n = wqe_n * strd_n; 188 const uint32_t elts_mask = elts_n - 1; 189 uint32_t elts_idx = rxq->rq_pi & elts_mask; 190 struct rte_mbuf **elts = &(*rxq->elts)[elts_idx]; 191 uint32_t rq_ci = rxq->rq_ci; 192 struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[rq_ci & wqe_mask]; 193 uint16_t copied = 0; 194 uint16_t i = 0; 195 196 for (i = 0; i < pkts_n; ++i) { 197 uint16_t strd_cnt; 198 enum mlx5_rqx_code rxq_code; 199 200 if (rxq->consumed_strd == strd_n) { 201 /* Replace WQE if the buffer is still in use. */ 202 mprq_buf_replace(rxq, rq_ci & wqe_mask); 203 /* Advance to the next WQE. */ 204 rxq->consumed_strd = 0; 205 rq_ci++; 206 buf = (*rxq->mprq_bufs)[rq_ci & wqe_mask]; 207 } 208 209 if (!elts[i]->pkt_len) { 210 rxq->consumed_strd = strd_n; 211 rte_pktmbuf_free_seg(elts[i]); 212 #ifdef MLX5_PMD_SOFT_COUNTERS 213 rxq->stats.ipackets -= 1; 214 #endif 215 continue; 216 } 217 strd_cnt = (elts[i]->pkt_len / strd_sz) + 218 ((elts[i]->pkt_len % strd_sz) ? 1 : 0); 219 rxq_code = mprq_buf_to_pkt(rxq, elts[i], elts[i]->pkt_len, 220 buf, rxq->consumed_strd, strd_cnt); 221 rxq->consumed_strd += strd_cnt; 222 if (unlikely(rxq_code != MLX5_RXQ_CODE_EXIT)) { 223 rte_pktmbuf_free_seg(elts[i]); 224 #ifdef MLX5_PMD_SOFT_COUNTERS 225 rxq->stats.ipackets -= 1; 226 rxq->stats.ibytes -= elts[i]->pkt_len; 227 #endif 228 if (rxq_code == MLX5_RXQ_CODE_NOMBUF) { 229 ++rxq->stats.rx_nombuf; 230 break; 231 } 232 if (rxq_code == MLX5_RXQ_CODE_DROPPED) { 233 ++rxq->stats.idropped; 234 continue; 235 } 236 } 237 pkts[copied++] = elts[i]; 238 } 239 rxq->rq_pi += i; 240 rxq->cq_ci += i; 241 rte_io_wmb(); 242 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci); 243 if (rq_ci != rxq->rq_ci) { 244 rxq->rq_ci = rq_ci; 245 rte_io_wmb(); 246 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci); 247 } 248 return copied; 249 } 250 251 /** 252 * Receive burst of packets. An errored completion also consumes a mbuf, but the 253 * packet_type is set to be RTE_PTYPE_ALL_MASK. Marked mbufs should be freed 254 * before returning to application. 255 * 256 * @param rxq 257 * Pointer to RX queue structure. 258 * @param[out] pkts 259 * Array to store received packets. 260 * @param pkts_n 261 * Maximum number of packets in array. 262 * @param[out] err 263 * Pointer to a flag. Set non-zero value if pkts array has at least one error 264 * packet to handle. 265 * @param[out] no_cq 266 * Pointer to a boolean. Set true if no new CQE seen. 267 * 268 * @return 269 * Number of packets received including errors (<= pkts_n). 270 */ 271 static inline uint16_t 272 rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, 273 uint16_t pkts_n, uint64_t *err, bool *no_cq) 274 { 275 const uint16_t q_n = 1 << rxq->cqe_n; 276 const uint16_t q_mask = q_n - 1; 277 const uint16_t e_n = 1 << rxq->elts_n; 278 const uint16_t e_mask = e_n - 1; 279 volatile struct mlx5_cqe *cq; 280 struct rte_mbuf **elts; 281 uint64_t comp_idx = MLX5_VPMD_DESCS_PER_LOOP; 282 uint16_t nocmp_n = 0; 283 uint16_t rcvd_pkt = 0; 284 unsigned int cq_idx = rxq->cq_ci & q_mask; 285 unsigned int elts_idx; 286 287 MLX5_ASSERT(rxq->sges_n == 0); 288 MLX5_ASSERT(rxq->cqe_n == rxq->elts_n); 289 cq = &(*rxq->cqes)[cq_idx]; 290 rte_prefetch0(cq); 291 rte_prefetch0(cq + 1); 292 rte_prefetch0(cq + 2); 293 rte_prefetch0(cq + 3); 294 pkts_n = RTE_MIN(pkts_n, MLX5_VPMD_RX_MAX_BURST); 295 mlx5_rx_replenish_bulk_mbuf(rxq); 296 /* See if there're unreturned mbufs from compressed CQE. */ 297 rcvd_pkt = rxq->decompressed; 298 if (rcvd_pkt > 0) { 299 rcvd_pkt = RTE_MIN(rcvd_pkt, pkts_n); 300 rxq_copy_mbuf_v(&(*rxq->elts)[rxq->rq_pi & e_mask], 301 pkts, rcvd_pkt); 302 rxq->rq_pi += rcvd_pkt; 303 rxq->decompressed -= rcvd_pkt; 304 pkts += rcvd_pkt; 305 } 306 elts_idx = rxq->rq_pi & e_mask; 307 elts = &(*rxq->elts)[elts_idx]; 308 /* Not to overflow pkts array. */ 309 pkts_n = RTE_ALIGN_FLOOR(pkts_n - rcvd_pkt, MLX5_VPMD_DESCS_PER_LOOP); 310 /* Not to cross queue end. */ 311 pkts_n = RTE_MIN(pkts_n, q_n - elts_idx); 312 pkts_n = RTE_MIN(pkts_n, q_n - cq_idx); 313 if (!pkts_n) { 314 *no_cq = !rcvd_pkt; 315 return rcvd_pkt; 316 } 317 /* At this point, there shouldn't be any remaining packets. */ 318 MLX5_ASSERT(rxq->decompressed == 0); 319 /* Process all the CQEs */ 320 nocmp_n = rxq_cq_process_v(rxq, cq, elts, pkts, pkts_n, err, &comp_idx); 321 /* If no new CQE seen, return without updating cq_db. */ 322 if (unlikely(!nocmp_n && comp_idx == MLX5_VPMD_DESCS_PER_LOOP)) { 323 *no_cq = true; 324 return rcvd_pkt; 325 } 326 /* Update the consumer indexes for non-compressed CQEs. */ 327 MLX5_ASSERT(nocmp_n <= pkts_n); 328 rxq->cq_ci += nocmp_n; 329 rxq->rq_pi += nocmp_n; 330 rcvd_pkt += nocmp_n; 331 /* Decompress the last CQE if compressed. */ 332 if (comp_idx < MLX5_VPMD_DESCS_PER_LOOP) { 333 MLX5_ASSERT(comp_idx == (nocmp_n % MLX5_VPMD_DESCS_PER_LOOP)); 334 rxq->decompressed = rxq_cq_decompress_v(rxq, &cq[nocmp_n], 335 &elts[nocmp_n]); 336 rxq->cq_ci += rxq->decompressed; 337 /* Return more packets if needed. */ 338 if (nocmp_n < pkts_n) { 339 uint16_t n = rxq->decompressed; 340 341 n = RTE_MIN(n, pkts_n - nocmp_n); 342 rxq_copy_mbuf_v(&(*rxq->elts)[rxq->rq_pi & e_mask], 343 &pkts[nocmp_n], n); 344 rxq->rq_pi += n; 345 rcvd_pkt += n; 346 rxq->decompressed -= n; 347 } 348 } 349 rte_io_wmb(); 350 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci); 351 *no_cq = !rcvd_pkt; 352 return rcvd_pkt; 353 } 354 355 /** 356 * DPDK callback for vectorized RX. 357 * 358 * @param dpdk_rxq 359 * Generic pointer to RX queue structure. 360 * @param[out] pkts 361 * Array to store received packets. 362 * @param pkts_n 363 * Maximum number of packets in array. 364 * 365 * @return 366 * Number of packets successfully received (<= pkts_n). 367 */ 368 uint16_t 369 mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) 370 { 371 struct mlx5_rxq_data *rxq = dpdk_rxq; 372 uint16_t nb_rx = 0; 373 uint16_t tn = 0; 374 uint64_t err = 0; 375 bool no_cq = false; 376 377 do { 378 nb_rx = rxq_burst_v(rxq, pkts + tn, pkts_n - tn, 379 &err, &no_cq); 380 if (unlikely(err | rxq->err_state)) 381 nb_rx = rxq_handle_pending_error(rxq, pkts + tn, nb_rx); 382 tn += nb_rx; 383 if (unlikely(no_cq)) 384 break; 385 } while (tn != pkts_n); 386 return tn; 387 } 388 389 /** 390 * Receive burst of packets. An errored completion also consumes a mbuf, but the 391 * packet_type is set to be RTE_PTYPE_ALL_MASK. Marked mbufs should be freed 392 * before returning to application. 393 * 394 * @param rxq 395 * Pointer to RX queue structure. 396 * @param[out] pkts 397 * Array to store received packets. 398 * @param pkts_n 399 * Maximum number of packets in array. 400 * @param[out] err 401 * Pointer to a flag. Set non-zero value if pkts array has at least one error 402 * packet to handle. 403 * @param[out] no_cq 404 * Pointer to a boolean. Set true if no new CQE seen. 405 * 406 * @return 407 * Number of packets received including errors (<= pkts_n). 408 */ 409 static inline uint16_t 410 rxq_burst_mprq_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, 411 uint16_t pkts_n, uint64_t *err, bool *no_cq) 412 { 413 const uint16_t q_n = 1 << rxq->cqe_n; 414 const uint16_t q_mask = q_n - 1; 415 const uint16_t wqe_n = 1 << rxq->elts_n; 416 const uint32_t strd_n = 1 << rxq->strd_num_n; 417 const uint32_t elts_n = wqe_n * strd_n; 418 const uint32_t elts_mask = elts_n - 1; 419 volatile struct mlx5_cqe *cq; 420 struct rte_mbuf **elts; 421 uint64_t comp_idx = MLX5_VPMD_DESCS_PER_LOOP; 422 uint16_t nocmp_n = 0; 423 uint16_t rcvd_pkt = 0; 424 uint16_t cp_pkt = 0; 425 unsigned int cq_idx = rxq->cq_ci & q_mask; 426 unsigned int elts_idx; 427 428 MLX5_ASSERT(rxq->sges_n == 0); 429 cq = &(*rxq->cqes)[cq_idx]; 430 rte_prefetch0(cq); 431 rte_prefetch0(cq + 1); 432 rte_prefetch0(cq + 2); 433 rte_prefetch0(cq + 3); 434 pkts_n = RTE_MIN(pkts_n, MLX5_VPMD_RX_MAX_BURST); 435 mlx5_rx_mprq_replenish_bulk_mbuf(rxq); 436 /* See if there're unreturned mbufs from compressed CQE. */ 437 rcvd_pkt = rxq->decompressed; 438 if (rcvd_pkt > 0) { 439 rcvd_pkt = RTE_MIN(rcvd_pkt, pkts_n); 440 cp_pkt = rxq_copy_mprq_mbuf_v(rxq, pkts, rcvd_pkt); 441 rxq->decompressed -= rcvd_pkt; 442 pkts += cp_pkt; 443 } 444 elts_idx = rxq->rq_pi & elts_mask; 445 elts = &(*rxq->elts)[elts_idx]; 446 /* Not to overflow pkts array. */ 447 pkts_n = RTE_ALIGN_FLOOR(pkts_n - cp_pkt, MLX5_VPMD_DESCS_PER_LOOP); 448 /* Not to cross queue end. */ 449 pkts_n = RTE_MIN(pkts_n, elts_n - elts_idx); 450 pkts_n = RTE_MIN(pkts_n, q_n - cq_idx); 451 /* Not to move past the allocated mbufs. */ 452 pkts_n = RTE_MIN(pkts_n, rxq->elts_ci - rxq->rq_pi); 453 if (!pkts_n) { 454 *no_cq = !cp_pkt; 455 return cp_pkt; 456 } 457 /* At this point, there shouldn't be any remaining packets. */ 458 MLX5_ASSERT(rxq->decompressed == 0); 459 /* Process all the CQEs */ 460 nocmp_n = rxq_cq_process_v(rxq, cq, elts, pkts, pkts_n, err, &comp_idx); 461 /* If no new CQE seen, return without updating cq_db. */ 462 if (unlikely(!nocmp_n && comp_idx == MLX5_VPMD_DESCS_PER_LOOP)) { 463 *no_cq = true; 464 return cp_pkt; 465 } 466 /* Update the consumer indexes for non-compressed CQEs. */ 467 MLX5_ASSERT(nocmp_n <= pkts_n); 468 cp_pkt = rxq_copy_mprq_mbuf_v(rxq, pkts, nocmp_n); 469 rcvd_pkt += cp_pkt; 470 /* Decompress the last CQE if compressed. */ 471 if (comp_idx < MLX5_VPMD_DESCS_PER_LOOP) { 472 MLX5_ASSERT(comp_idx == (nocmp_n % MLX5_VPMD_DESCS_PER_LOOP)); 473 rxq->decompressed = rxq_cq_decompress_v(rxq, &cq[nocmp_n], 474 &elts[nocmp_n]); 475 /* Return more packets if needed. */ 476 if (nocmp_n < pkts_n) { 477 uint16_t n = rxq->decompressed; 478 479 n = RTE_MIN(n, pkts_n - nocmp_n); 480 cp_pkt = rxq_copy_mprq_mbuf_v(rxq, &pkts[cp_pkt], n); 481 rcvd_pkt += cp_pkt; 482 rxq->decompressed -= n; 483 } 484 } 485 *no_cq = !rcvd_pkt; 486 return rcvd_pkt; 487 } 488 489 /** 490 * DPDK callback for vectorized MPRQ RX. 491 * 492 * @param dpdk_rxq 493 * Generic pointer to RX queue structure. 494 * @param[out] pkts 495 * Array to store received packets. 496 * @param pkts_n 497 * Maximum number of packets in array. 498 * 499 * @return 500 * Number of packets successfully received (<= pkts_n). 501 */ 502 uint16_t 503 mlx5_rx_burst_mprq_vec(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) 504 { 505 struct mlx5_rxq_data *rxq = dpdk_rxq; 506 uint16_t nb_rx = 0; 507 uint16_t tn = 0; 508 uint64_t err = 0; 509 bool no_cq = false; 510 511 do { 512 nb_rx = rxq_burst_mprq_v(rxq, pkts + tn, pkts_n - tn, 513 &err, &no_cq); 514 if (unlikely(err | rxq->err_state)) 515 nb_rx = rxq_handle_pending_error(rxq, pkts + tn, nb_rx); 516 tn += nb_rx; 517 if (unlikely(no_cq)) 518 break; 519 } while (tn != pkts_n); 520 return tn; 521 } 522 523 /** 524 * Check a RX queue can support vectorized RX. 525 * 526 * @param rxq 527 * Pointer to RX queue. 528 * 529 * @return 530 * 1 if supported, negative errno value if not. 531 */ 532 int __rte_cold 533 mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq) 534 { 535 struct mlx5_rxq_ctrl *ctrl = 536 container_of(rxq, struct mlx5_rxq_ctrl, rxq); 537 538 if (!ctrl->priv->config.rx_vec_en || rxq->sges_n != 0) 539 return -ENOTSUP; 540 if (rxq->lro) 541 return -ENOTSUP; 542 return 1; 543 } 544 545 /** 546 * Check a device can support vectorized RX. 547 * 548 * @param dev 549 * Pointer to Ethernet device. 550 * 551 * @return 552 * 1 if supported, negative errno value if not. 553 */ 554 int __rte_cold 555 mlx5_check_vec_rx_support(struct rte_eth_dev *dev) 556 { 557 struct mlx5_priv *priv = dev->data->dev_private; 558 uint32_t i; 559 560 if (rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_128) 561 return -ENOTSUP; 562 if (!priv->config.rx_vec_en) 563 return -ENOTSUP; 564 /* All the configured queues should support. */ 565 for (i = 0; i < priv->rxqs_n; ++i) { 566 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i]; 567 568 if (!rxq) 569 continue; 570 if (mlx5_rxq_check_vec_support(rxq) < 0) 571 break; 572 } 573 if (i != priv->rxqs_n) 574 return -ENOTSUP; 575 return 1; 576 } 577