xref: /dpdk/drivers/net/bnxt/bnxt_txr.c (revision 089e5ed727a15da2729cfee9b63533dd120bd04c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2018 Broadcom
3  * All rights reserved.
4  */
5 
6 #include <inttypes.h>
7 
8 #include <rte_byteorder.h>
9 #include <rte_malloc.h>
10 
11 #include "bnxt.h"
12 #include "bnxt_cpr.h"
13 #include "bnxt_ring.h"
14 #include "bnxt_txq.h"
15 #include "bnxt_txr.h"
16 #include "hsi_struct_def_dpdk.h"
17 #include <stdbool.h>
18 
19 /*
20  * TX Ring handling
21  */
22 
23 void bnxt_free_tx_rings(struct bnxt *bp)
24 {
25 	int i;
26 
27 	for (i = 0; i < (int)bp->tx_nr_rings; i++) {
28 		struct bnxt_tx_queue *txq = bp->tx_queues[i];
29 
30 		if (!txq)
31 			continue;
32 
33 		bnxt_free_ring(txq->tx_ring->tx_ring_struct);
34 		rte_free(txq->tx_ring->tx_ring_struct);
35 		rte_free(txq->tx_ring);
36 
37 		bnxt_free_ring(txq->cp_ring->cp_ring_struct);
38 		rte_free(txq->cp_ring->cp_ring_struct);
39 		rte_free(txq->cp_ring);
40 
41 		rte_free(txq);
42 		bp->tx_queues[i] = NULL;
43 	}
44 }
45 
46 int bnxt_init_one_tx_ring(struct bnxt_tx_queue *txq)
47 {
48 	struct bnxt_tx_ring_info *txr = txq->tx_ring;
49 	struct bnxt_ring *ring = txr->tx_ring_struct;
50 
51 	txq->tx_wake_thresh = ring->ring_size / 2;
52 	ring->fw_ring_id = INVALID_HW_RING_ID;
53 
54 	return 0;
55 }
56 
57 int bnxt_init_tx_ring_struct(struct bnxt_tx_queue *txq, unsigned int socket_id)
58 {
59 	struct bnxt_cp_ring_info *cpr;
60 	struct bnxt_cp_ring_info *nqr;
61 	struct bnxt_tx_ring_info *txr;
62 	struct bnxt_ring *ring;
63 
64 	txr = rte_zmalloc_socket("bnxt_tx_ring",
65 				 sizeof(struct bnxt_tx_ring_info),
66 				 RTE_CACHE_LINE_SIZE, socket_id);
67 	if (txr == NULL)
68 		return -ENOMEM;
69 	txq->tx_ring = txr;
70 
71 	ring = rte_zmalloc_socket("bnxt_tx_ring_struct",
72 				  sizeof(struct bnxt_ring),
73 				  RTE_CACHE_LINE_SIZE, socket_id);
74 	if (ring == NULL)
75 		return -ENOMEM;
76 	txr->tx_ring_struct = ring;
77 	ring->ring_size = rte_align32pow2(txq->nb_tx_desc);
78 	ring->ring_mask = ring->ring_size - 1;
79 	ring->bd = (void *)txr->tx_desc_ring;
80 	ring->bd_dma = txr->tx_desc_mapping;
81 	ring->vmem_size = ring->ring_size * sizeof(struct bnxt_sw_tx_bd);
82 	ring->vmem = (void **)&txr->tx_buf_ring;
83 
84 	cpr = rte_zmalloc_socket("bnxt_tx_ring",
85 				 sizeof(struct bnxt_cp_ring_info),
86 				 RTE_CACHE_LINE_SIZE, socket_id);
87 	if (cpr == NULL)
88 		return -ENOMEM;
89 	txq->cp_ring = cpr;
90 
91 	ring = rte_zmalloc_socket("bnxt_tx_ring_struct",
92 				  sizeof(struct bnxt_ring),
93 				  RTE_CACHE_LINE_SIZE, socket_id);
94 	if (ring == NULL)
95 		return -ENOMEM;
96 	cpr->cp_ring_struct = ring;
97 	ring->ring_size = txr->tx_ring_struct->ring_size;
98 	ring->ring_mask = ring->ring_size - 1;
99 	ring->bd = (void *)cpr->cp_desc_ring;
100 	ring->bd_dma = cpr->cp_desc_mapping;
101 	ring->vmem_size = 0;
102 	ring->vmem = NULL;
103 
104 	if (BNXT_HAS_NQ(txq->bp)) {
105 		nqr = rte_zmalloc_socket("bnxt_tx_ring_nq",
106 					 sizeof(struct bnxt_cp_ring_info),
107 					 RTE_CACHE_LINE_SIZE, socket_id);
108 		if (nqr == NULL)
109 			return -ENOMEM;
110 
111 		txq->nq_ring = nqr;
112 
113 		ring = rte_zmalloc_socket("bnxt_tx_ring_struct",
114 					  sizeof(struct bnxt_ring),
115 					  RTE_CACHE_LINE_SIZE, socket_id);
116 		if (ring == NULL)
117 			return -ENOMEM;
118 
119 		nqr->cp_ring_struct = ring;
120 		ring->ring_size = txr->tx_ring_struct->ring_size;
121 		ring->ring_mask = ring->ring_size - 1;
122 		ring->bd = (void *)nqr->cp_desc_ring;
123 		ring->bd_dma = nqr->cp_desc_mapping;
124 		ring->vmem_size = 0;
125 		ring->vmem = NULL;
126 	}
127 
128 	return 0;
129 }
130 
131 static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
132 				struct bnxt_tx_queue *txq,
133 				uint16_t *coal_pkts,
134 				struct tx_bd_long **last_txbd)
135 {
136 	struct bnxt_tx_ring_info *txr = txq->tx_ring;
137 	struct tx_bd_long *txbd;
138 	struct tx_bd_long_hi *txbd1 = NULL;
139 	uint32_t vlan_tag_flags, cfa_action;
140 	bool long_bd = false;
141 	unsigned short nr_bds = 0;
142 	struct rte_mbuf *m_seg;
143 	struct bnxt_sw_tx_bd *tx_buf;
144 	static const uint32_t lhint_arr[4] = {
145 		TX_BD_LONG_FLAGS_LHINT_LT512,
146 		TX_BD_LONG_FLAGS_LHINT_LT1K,
147 		TX_BD_LONG_FLAGS_LHINT_LT2K,
148 		TX_BD_LONG_FLAGS_LHINT_LT2K
149 	};
150 
151 	if (tx_pkt->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_TCP_CKSUM |
152 				PKT_TX_UDP_CKSUM | PKT_TX_IP_CKSUM |
153 				PKT_TX_VLAN_PKT | PKT_TX_OUTER_IP_CKSUM |
154 				PKT_TX_TUNNEL_GRE | PKT_TX_TUNNEL_VXLAN |
155 				PKT_TX_TUNNEL_GENEVE))
156 		long_bd = true;
157 
158 	nr_bds = long_bd + tx_pkt->nb_segs;
159 	if (unlikely(bnxt_tx_avail(txq) < nr_bds))
160 		return -ENOMEM;
161 
162 	/* Check if number of Tx descriptors is above HW limit */
163 	if (unlikely(nr_bds > BNXT_MAX_TSO_SEGS)) {
164 		PMD_DRV_LOG(ERR,
165 			    "Num descriptors %d exceeds HW limit\n", nr_bds);
166 		return -ENOSPC;
167 	}
168 
169 	/* If packet length is less than minimum packet size, pad it */
170 	if (unlikely(rte_pktmbuf_pkt_len(tx_pkt) < BNXT_MIN_PKT_SIZE)) {
171 		uint8_t pad = BNXT_MIN_PKT_SIZE - rte_pktmbuf_pkt_len(tx_pkt);
172 		char *seg = rte_pktmbuf_append(tx_pkt, pad);
173 
174 		if (!seg) {
175 			PMD_DRV_LOG(ERR,
176 				    "Failed to pad mbuf by %d bytes\n",
177 				    pad);
178 			return -ENOMEM;
179 		}
180 
181 		/* Note: data_len, pkt len are updated in rte_pktmbuf_append */
182 		memset(seg, 0, pad);
183 	}
184 
185 	/* Check non zero data_len */
186 	RTE_VERIFY(tx_pkt->data_len);
187 
188 	tx_buf = &txr->tx_buf_ring[txr->tx_prod];
189 	tx_buf->mbuf = tx_pkt;
190 	tx_buf->nr_bds = nr_bds;
191 
192 	txbd = &txr->tx_desc_ring[txr->tx_prod];
193 	txbd->opaque = *coal_pkts;
194 	txbd->flags_type = nr_bds << TX_BD_LONG_FLAGS_BD_CNT_SFT;
195 	txbd->flags_type |= TX_BD_SHORT_FLAGS_COAL_NOW;
196 	txbd->flags_type |= TX_BD_LONG_FLAGS_NO_CMPL;
197 	txbd->len = tx_pkt->data_len;
198 	if (tx_pkt->pkt_len >= 2014)
199 		txbd->flags_type |= TX_BD_LONG_FLAGS_LHINT_GTE2K;
200 	else
201 		txbd->flags_type |= lhint_arr[tx_pkt->pkt_len >> 9];
202 	txbd->address = rte_cpu_to_le_64(rte_mbuf_data_iova(tx_buf->mbuf));
203 	*last_txbd = txbd;
204 
205 	if (long_bd) {
206 		txbd->flags_type |= TX_BD_LONG_TYPE_TX_BD_LONG;
207 		vlan_tag_flags = 0;
208 		cfa_action = 0;
209 		if (tx_buf->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
210 			/* shurd: Should this mask at
211 			 * TX_BD_LONG_CFA_META_VLAN_VID_MASK?
212 			 */
213 			vlan_tag_flags = TX_BD_LONG_CFA_META_KEY_VLAN_TAG |
214 				tx_buf->mbuf->vlan_tci;
215 			/* Currently supports 8021Q, 8021AD vlan offloads
216 			 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
217 			 */
218 			/* DPDK only supports 802.11q VLAN packets */
219 			vlan_tag_flags |=
220 					TX_BD_LONG_CFA_META_VLAN_TPID_TPID8100;
221 		}
222 
223 		txr->tx_prod = RING_NEXT(txr->tx_ring_struct, txr->tx_prod);
224 
225 		txbd1 = (struct tx_bd_long_hi *)
226 					&txr->tx_desc_ring[txr->tx_prod];
227 		txbd1->lflags = 0;
228 		txbd1->cfa_meta = vlan_tag_flags;
229 		txbd1->cfa_action = cfa_action;
230 
231 		if (tx_pkt->ol_flags & PKT_TX_TCP_SEG) {
232 			uint16_t hdr_size;
233 
234 			/* TSO */
235 			txbd1->lflags |= TX_BD_LONG_LFLAGS_LSO |
236 					 TX_BD_LONG_LFLAGS_T_IPID;
237 			hdr_size = tx_pkt->l2_len + tx_pkt->l3_len +
238 					tx_pkt->l4_len;
239 			hdr_size += (tx_pkt->ol_flags & PKT_TX_TUNNEL_MASK) ?
240 				    tx_pkt->outer_l2_len +
241 				    tx_pkt->outer_l3_len : 0;
242 			/* The hdr_size is multiple of 16bit units not 8bit.
243 			 * Hence divide by 2.
244 			 */
245 			txbd1->hdr_size = hdr_size >> 1;
246 			txbd1->mss = tx_pkt->tso_segsz;
247 			RTE_VERIFY(txbd1->mss);
248 
249 		} else if ((tx_pkt->ol_flags & PKT_TX_OIP_IIP_TCP_UDP_CKSUM) ==
250 			   PKT_TX_OIP_IIP_TCP_UDP_CKSUM) {
251 			/* Outer IP, Inner IP, Inner TCP/UDP CSO */
252 			txbd1->lflags |= TX_BD_FLG_TIP_IP_TCP_UDP_CHKSUM;
253 			txbd1->mss = 0;
254 		} else if ((tx_pkt->ol_flags & PKT_TX_OIP_IIP_TCP_CKSUM) ==
255 			   PKT_TX_OIP_IIP_TCP_CKSUM) {
256 			/* Outer IP, Inner IP, Inner TCP/UDP CSO */
257 			txbd1->lflags |= TX_BD_FLG_TIP_IP_TCP_UDP_CHKSUM;
258 			txbd1->mss = 0;
259 		} else if ((tx_pkt->ol_flags & PKT_TX_OIP_IIP_UDP_CKSUM) ==
260 			   PKT_TX_OIP_IIP_UDP_CKSUM) {
261 			/* Outer IP, Inner IP, Inner TCP/UDP CSO */
262 			txbd1->lflags |= TX_BD_FLG_TIP_IP_TCP_UDP_CHKSUM;
263 			txbd1->mss = 0;
264 		} else if ((tx_pkt->ol_flags & PKT_TX_IIP_TCP_UDP_CKSUM) ==
265 			   PKT_TX_IIP_TCP_UDP_CKSUM) {
266 			/* (Inner) IP, (Inner) TCP/UDP CSO */
267 			txbd1->lflags |= TX_BD_FLG_IP_TCP_UDP_CHKSUM;
268 			txbd1->mss = 0;
269 		} else if ((tx_pkt->ol_flags & PKT_TX_IIP_UDP_CKSUM) ==
270 			   PKT_TX_IIP_UDP_CKSUM) {
271 			/* (Inner) IP, (Inner) TCP/UDP CSO */
272 			txbd1->lflags |= TX_BD_FLG_IP_TCP_UDP_CHKSUM;
273 			txbd1->mss = 0;
274 		} else if ((tx_pkt->ol_flags & PKT_TX_IIP_TCP_CKSUM) ==
275 			   PKT_TX_IIP_TCP_CKSUM) {
276 			/* (Inner) IP, (Inner) TCP/UDP CSO */
277 			txbd1->lflags |= TX_BD_FLG_IP_TCP_UDP_CHKSUM;
278 			txbd1->mss = 0;
279 		} else if ((tx_pkt->ol_flags & PKT_TX_OIP_TCP_UDP_CKSUM) ==
280 			   PKT_TX_OIP_TCP_UDP_CKSUM) {
281 			/* Outer IP, (Inner) TCP/UDP CSO */
282 			txbd1->lflags |= TX_BD_FLG_TIP_TCP_UDP_CHKSUM;
283 			txbd1->mss = 0;
284 		} else if ((tx_pkt->ol_flags & PKT_TX_OIP_UDP_CKSUM) ==
285 			   PKT_TX_OIP_UDP_CKSUM) {
286 			/* Outer IP, (Inner) TCP/UDP CSO */
287 			txbd1->lflags |= TX_BD_FLG_TIP_TCP_UDP_CHKSUM;
288 			txbd1->mss = 0;
289 		} else if ((tx_pkt->ol_flags & PKT_TX_OIP_TCP_CKSUM) ==
290 			   PKT_TX_OIP_TCP_CKSUM) {
291 			/* Outer IP, (Inner) TCP/UDP CSO */
292 			txbd1->lflags |= TX_BD_FLG_TIP_TCP_UDP_CHKSUM;
293 			txbd1->mss = 0;
294 		} else if ((tx_pkt->ol_flags & PKT_TX_OIP_IIP_CKSUM) ==
295 			   PKT_TX_OIP_IIP_CKSUM) {
296 			/* Outer IP, Inner IP CSO */
297 			txbd1->lflags |= TX_BD_FLG_TIP_IP_CHKSUM;
298 			txbd1->mss = 0;
299 		} else if ((tx_pkt->ol_flags & PKT_TX_TCP_UDP_CKSUM) ==
300 			   PKT_TX_TCP_UDP_CKSUM) {
301 			/* TCP/UDP CSO */
302 			txbd1->lflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM;
303 			txbd1->mss = 0;
304 		} else if ((tx_pkt->ol_flags & PKT_TX_TCP_CKSUM) ==
305 			   PKT_TX_TCP_CKSUM) {
306 			/* TCP/UDP CSO */
307 			txbd1->lflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM;
308 			txbd1->mss = 0;
309 		} else if ((tx_pkt->ol_flags & PKT_TX_UDP_CKSUM) ==
310 			   PKT_TX_UDP_CKSUM) {
311 			/* TCP/UDP CSO */
312 			txbd1->lflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM;
313 			txbd1->mss = 0;
314 		} else if ((tx_pkt->ol_flags & PKT_TX_IP_CKSUM) ==
315 			   PKT_TX_IP_CKSUM) {
316 			/* IP CSO */
317 			txbd1->lflags |= TX_BD_LONG_LFLAGS_IP_CHKSUM;
318 			txbd1->mss = 0;
319 		} else if ((tx_pkt->ol_flags & PKT_TX_OUTER_IP_CKSUM) ==
320 			   PKT_TX_OUTER_IP_CKSUM) {
321 			/* IP CSO */
322 			txbd1->lflags |= TX_BD_LONG_LFLAGS_T_IP_CHKSUM;
323 			txbd1->mss = 0;
324 		}
325 	} else {
326 		txbd->flags_type |= TX_BD_SHORT_TYPE_TX_BD_SHORT;
327 	}
328 
329 	m_seg = tx_pkt->next;
330 	while (m_seg) {
331 		/* Check non zero data_len */
332 		RTE_VERIFY(m_seg->data_len);
333 		txr->tx_prod = RING_NEXT(txr->tx_ring_struct, txr->tx_prod);
334 		tx_buf = &txr->tx_buf_ring[txr->tx_prod];
335 		tx_buf->mbuf = m_seg;
336 
337 		txbd = &txr->tx_desc_ring[txr->tx_prod];
338 		txbd->address = rte_cpu_to_le_64(rte_mbuf_data_iova(m_seg));
339 		txbd->flags_type = TX_BD_SHORT_TYPE_TX_BD_SHORT;
340 		txbd->len = m_seg->data_len;
341 
342 		m_seg = m_seg->next;
343 	}
344 
345 	txbd->flags_type |= TX_BD_LONG_FLAGS_PACKET_END;
346 
347 	txr->tx_prod = RING_NEXT(txr->tx_ring_struct, txr->tx_prod);
348 
349 	return 0;
350 }
351 
352 static void bnxt_tx_cmp(struct bnxt_tx_queue *txq, int nr_pkts)
353 {
354 	struct bnxt_tx_ring_info *txr = txq->tx_ring;
355 	struct rte_mempool *pool = NULL;
356 	struct rte_mbuf **free = txq->free;
357 	uint16_t cons = txr->tx_cons;
358 	unsigned int blk = 0;
359 	int i, j;
360 
361 	for (i = 0; i < nr_pkts; i++) {
362 		struct rte_mbuf *mbuf;
363 		struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[cons];
364 		unsigned short nr_bds = tx_buf->nr_bds;
365 
366 		for (j = 0; j < nr_bds; j++) {
367 			mbuf = tx_buf->mbuf;
368 			tx_buf->mbuf = NULL;
369 			cons = RING_NEXT(txr->tx_ring_struct, cons);
370 			tx_buf = &txr->tx_buf_ring[cons];
371 			if (!mbuf)	/* long_bd's tx_buf ? */
372 				continue;
373 
374 			mbuf = rte_pktmbuf_prefree_seg(mbuf);
375 			if (unlikely(!mbuf))
376 				continue;
377 
378 			/* EW - no need to unmap DMA memory? */
379 
380 			if (likely(mbuf->pool == pool)) {
381 				/* Add mbuf to the bulk free array */
382 				free[blk++] = mbuf;
383 			} else {
384 				/* Found an mbuf from a different pool. Free
385 				 * mbufs accumulated so far to the previous
386 				 * pool
387 				 */
388 				if (likely(pool != NULL))
389 					rte_mempool_put_bulk(pool,
390 							     (void *)free,
391 							     blk);
392 
393 				/* Start accumulating mbufs in a new pool */
394 				free[0] = mbuf;
395 				pool = mbuf->pool;
396 				blk = 1;
397 			}
398 		}
399 	}
400 	if (blk)
401 		rte_mempool_put_bulk(pool, (void *)free, blk);
402 
403 	txr->tx_cons = cons;
404 }
405 
406 static int bnxt_handle_tx_cp(struct bnxt_tx_queue *txq)
407 {
408 	struct bnxt_cp_ring_info *cpr = txq->cp_ring;
409 	uint32_t raw_cons = cpr->cp_raw_cons;
410 	uint32_t cons;
411 	uint32_t nb_tx_pkts = 0;
412 	struct tx_cmpl *txcmp;
413 	struct cmpl_base *cp_desc_ring = cpr->cp_desc_ring;
414 	struct bnxt_ring *cp_ring_struct = cpr->cp_ring_struct;
415 	uint32_t ring_mask = cp_ring_struct->ring_mask;
416 	uint32_t opaque = 0;
417 
418 	if (bnxt_tx_bds_in_hw(txq) < txq->tx_free_thresh)
419 		return 0;
420 
421 	do {
422 		cons = RING_CMPL(ring_mask, raw_cons);
423 		txcmp = (struct tx_cmpl *)&cpr->cp_desc_ring[cons];
424 		rte_prefetch_non_temporal(&cp_desc_ring[(cons + 2) &
425 							ring_mask]);
426 
427 		if (!CMPL_VALID(txcmp, cpr->valid))
428 			break;
429 		opaque = rte_cpu_to_le_32(txcmp->opaque);
430 		NEXT_CMPL(cpr, cons, cpr->valid, 1);
431 		rte_prefetch0(&cp_desc_ring[cons]);
432 
433 		if (CMP_TYPE(txcmp) == TX_CMPL_TYPE_TX_L2)
434 			nb_tx_pkts += opaque;
435 		else
436 			RTE_LOG_DP(ERR, PMD,
437 					"Unhandled CMP type %02x\n",
438 					CMP_TYPE(txcmp));
439 		raw_cons = cons;
440 	} while (nb_tx_pkts < ring_mask);
441 
442 	if (nb_tx_pkts) {
443 		bnxt_tx_cmp(txq, nb_tx_pkts);
444 		cpr->cp_raw_cons = raw_cons;
445 		bnxt_db_cq(cpr);
446 	}
447 
448 	return nb_tx_pkts;
449 }
450 
451 uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
452 			       uint16_t nb_pkts)
453 {
454 	int rc;
455 	uint16_t nb_tx_pkts = 0;
456 	uint16_t coal_pkts = 0;
457 	struct bnxt_tx_queue *txq = tx_queue;
458 	struct tx_bd_long *last_txbd = NULL;
459 
460 	/* Handle TX completions */
461 	bnxt_handle_tx_cp(txq);
462 
463 	/* Tx queue was stopped; wait for it to be restarted */
464 	if (txq->tx_deferred_start) {
465 		PMD_DRV_LOG(DEBUG, "Tx q stopped;return\n");
466 		return 0;
467 	}
468 
469 	/* Handle TX burst request */
470 	for (nb_tx_pkts = 0; nb_tx_pkts < nb_pkts; nb_tx_pkts++) {
471 		coal_pkts++;
472 		rc = bnxt_start_xmit(tx_pkts[nb_tx_pkts], txq,
473 				     &coal_pkts, &last_txbd);
474 
475 		if (unlikely(rc))
476 			break;
477 	}
478 
479 	if (likely(nb_tx_pkts)) {
480 		/* Request a completion on the last packet */
481 		last_txbd->flags_type &= ~TX_BD_LONG_FLAGS_NO_CMPL;
482 		bnxt_db_write(&txq->tx_ring->tx_db, txq->tx_ring->tx_prod);
483 	}
484 
485 	return nb_tx_pkts;
486 }
487 
488 int bnxt_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
489 {
490 	struct bnxt *bp = dev->data->dev_private;
491 	struct bnxt_tx_queue *txq = bp->tx_queues[tx_queue_id];
492 
493 	dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
494 	txq->tx_deferred_start = false;
495 	PMD_DRV_LOG(DEBUG, "Tx queue started\n");
496 
497 	return 0;
498 }
499 
500 int bnxt_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
501 {
502 	struct bnxt *bp = dev->data->dev_private;
503 	struct bnxt_tx_queue *txq = bp->tx_queues[tx_queue_id];
504 
505 	/* Handle TX completions */
506 	bnxt_handle_tx_cp(txq);
507 
508 	dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
509 	txq->tx_deferred_start = true;
510 	PMD_DRV_LOG(DEBUG, "Tx queue stopped\n");
511 
512 	return 0;
513 }
514