xref: /dpdk/drivers/common/sfc_efx/base/ef10_tx.c (revision 672386c1e9e1f64f7aa3b1360ad22dc737ea8d72)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright(c) 2019-2021 Xilinx, Inc.
4  * Copyright(c) 2012-2019 Solarflare Communications Inc.
5  */
6 
7 #include "efx.h"
8 #include "efx_impl.h"
9 
10 
11 #if EFX_OPTS_EF10()
12 
13 #if EFSYS_OPT_QSTATS
14 #define	EFX_TX_QSTAT_INCR(_etp, _stat)					\
15 	do {								\
16 		(_etp)->et_stat[_stat]++;				\
17 	_NOTE(CONSTANTCONDITION)					\
18 	} while (B_FALSE)
19 #else
20 #define	EFX_TX_QSTAT_INCR(_etp, _stat)
21 #endif
22 
23 	__checkReturn	efx_rc_t
ef10_tx_init(__in efx_nic_t * enp)24 ef10_tx_init(
25 	__in		efx_nic_t *enp)
26 {
27 	_NOTE(ARGUNUSED(enp))
28 	return (0);
29 }
30 
31 			void
ef10_tx_fini(__in efx_nic_t * enp)32 ef10_tx_fini(
33 	__in		efx_nic_t *enp)
34 {
35 	_NOTE(ARGUNUSED(enp))
36 }
37 
38 	__checkReturn	efx_rc_t
ef10_tx_qcreate(__in efx_nic_t * enp,__in unsigned int index,__in unsigned int label,__in efsys_mem_t * esmp,__in size_t ndescs,__in uint32_t id,__in uint16_t flags,__in efx_evq_t * eep,__in efx_txq_t * etp,__out unsigned int * addedp)39 ef10_tx_qcreate(
40 	__in		efx_nic_t *enp,
41 	__in		unsigned int index,
42 	__in		unsigned int label,
43 	__in		efsys_mem_t *esmp,
44 	__in		size_t ndescs,
45 	__in		uint32_t id,
46 	__in		uint16_t flags,
47 	__in		efx_evq_t *eep,
48 	__in		efx_txq_t *etp,
49 	__out		unsigned int *addedp)
50 {
51 	efx_nic_cfg_t *encp = &enp->en_nic_cfg;
52 	uint16_t inner_csum;
53 	efx_desc_t desc;
54 	efx_rc_t rc;
55 
56 	_NOTE(ARGUNUSED(id))
57 
58 	inner_csum = EFX_TXQ_CKSUM_INNER_IPV4 | EFX_TXQ_CKSUM_INNER_TCPUDP;
59 	if (((flags & inner_csum) != 0) &&
60 	    (encp->enc_tunnel_encapsulations_supported == 0)) {
61 		rc = EINVAL;
62 		goto fail1;
63 	}
64 
65 	if ((rc = efx_mcdi_init_txq(enp, ndescs, eep->ee_index, label, index,
66 	    flags, esmp)) != 0)
67 		goto fail2;
68 
69 	/*
70 	 * A previous user of this TX queue may have written a descriptor to the
71 	 * TX push collector, but not pushed the doorbell (e.g. after a crash).
72 	 * The next doorbell write would then push the stale descriptor.
73 	 *
74 	 * Ensure the (per network port) TX push collector is cleared by writing
75 	 * a no-op TX option descriptor. See bug29981 for details.
76 	 */
77 	*addedp = 1;
78 	ef10_tx_qdesc_checksum_create(etp, flags, &desc);
79 
80 	EFSYS_MEM_WRITEQ(etp->et_esmp, 0, &desc.ed_eq);
81 	ef10_tx_qpush(etp, *addedp, 0);
82 
83 	return (0);
84 
85 fail2:
86 	EFSYS_PROBE(fail2);
87 fail1:
88 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
89 
90 	return (rc);
91 }
92 
93 		void
ef10_tx_qdestroy(__in efx_txq_t * etp)94 ef10_tx_qdestroy(
95 	__in	efx_txq_t *etp)
96 {
97 	/* FIXME */
98 	_NOTE(ARGUNUSED(etp))
99 	/* FIXME */
100 }
101 
102 	__checkReturn	efx_rc_t
ef10_tx_qpio_enable(__in efx_txq_t * etp)103 ef10_tx_qpio_enable(
104 	__in		efx_txq_t *etp)
105 {
106 	efx_nic_t *enp = etp->et_enp;
107 	efx_piobuf_handle_t handle;
108 	efx_rc_t rc;
109 
110 	if (etp->et_pio_size != 0) {
111 		rc = EALREADY;
112 		goto fail1;
113 	}
114 
115 	/* Sub-allocate a PIO block from a piobuf */
116 	if ((rc = ef10_nic_pio_alloc(enp,
117 		    &etp->et_pio_bufnum,
118 		    &handle,
119 		    &etp->et_pio_blknum,
120 		    &etp->et_pio_offset,
121 		    &etp->et_pio_size)) != 0) {
122 		goto fail2;
123 	}
124 	EFSYS_ASSERT3U(etp->et_pio_size, !=, 0);
125 
126 	/* Link the piobuf to this TXQ */
127 	if ((rc = ef10_nic_pio_link(enp, etp->et_index, handle)) != 0) {
128 		goto fail3;
129 	}
130 
131 	/*
132 	 * et_pio_offset is the offset of the sub-allocated block within the
133 	 * hardware PIO buffer. It is used as the buffer address in the PIO
134 	 * option descriptor.
135 	 *
136 	 * et_pio_write_offset is the offset of the sub-allocated block from the
137 	 * start of the write-combined memory mapping, and is used for writing
138 	 * data into the PIO buffer.
139 	 */
140 	etp->et_pio_write_offset =
141 	    (etp->et_pio_bufnum * ER_DZ_TX_PIOBUF_STEP) +
142 	    ER_DZ_TX_PIOBUF_OFST + etp->et_pio_offset;
143 
144 	return (0);
145 
146 fail3:
147 	EFSYS_PROBE(fail3);
148 	(void) ef10_nic_pio_free(enp, etp->et_pio_bufnum, etp->et_pio_blknum);
149 fail2:
150 	EFSYS_PROBE(fail2);
151 	etp->et_pio_size = 0;
152 fail1:
153 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
154 
155 	return (rc);
156 }
157 
158 			void
ef10_tx_qpio_disable(__in efx_txq_t * etp)159 ef10_tx_qpio_disable(
160 	__in		efx_txq_t *etp)
161 {
162 	efx_nic_t *enp = etp->et_enp;
163 
164 	if (etp->et_pio_size != 0) {
165 		/* Unlink the piobuf from this TXQ */
166 		if (ef10_nic_pio_unlink(enp, etp->et_index) != 0)
167 			return;
168 
169 		/* Free the sub-allocated PIO block */
170 		(void) ef10_nic_pio_free(enp, etp->et_pio_bufnum,
171 		    etp->et_pio_blknum);
172 		etp->et_pio_size = 0;
173 		etp->et_pio_write_offset = 0;
174 	}
175 }
176 
177 	__checkReturn	efx_rc_t
ef10_tx_qpio_write(__in efx_txq_t * etp,__in_ecount (length)uint8_t * buffer,__in size_t length,__in size_t offset)178 ef10_tx_qpio_write(
179 	__in			efx_txq_t *etp,
180 	__in_ecount(length)	uint8_t *buffer,
181 	__in			size_t length,
182 	__in			size_t offset)
183 {
184 	efx_nic_t *enp = etp->et_enp;
185 	efsys_bar_t *esbp = enp->en_esbp;
186 	uint32_t write_offset;
187 	uint32_t write_offset_limit;
188 	efx_qword_t *eqp;
189 	efx_rc_t rc;
190 
191 	EFSYS_ASSERT(length % sizeof (efx_qword_t) == 0);
192 
193 	if (etp->et_pio_size == 0) {
194 		rc = ENOENT;
195 		goto fail1;
196 	}
197 	if (offset + length > etp->et_pio_size)	{
198 		rc = ENOSPC;
199 		goto fail2;
200 	}
201 
202 	/*
203 	 * Writes to PIO buffers must be 64 bit aligned, and multiples of
204 	 * 64 bits.
205 	 */
206 	write_offset = etp->et_pio_write_offset + offset;
207 	write_offset_limit = write_offset + length;
208 	eqp = (efx_qword_t *)buffer;
209 	while (write_offset < write_offset_limit) {
210 		EFSYS_BAR_WC_WRITEQ(esbp, write_offset, eqp);
211 		eqp++;
212 		write_offset += sizeof (efx_qword_t);
213 	}
214 
215 	return (0);
216 
217 fail2:
218 	EFSYS_PROBE(fail2);
219 fail1:
220 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
221 
222 	return (rc);
223 }
224 
225 	__checkReturn	efx_rc_t
ef10_tx_qpio_post(__in efx_txq_t * etp,__in size_t pkt_length,__in unsigned int completed,__inout unsigned int * addedp)226 ef10_tx_qpio_post(
227 	__in			efx_txq_t *etp,
228 	__in			size_t pkt_length,
229 	__in			unsigned int completed,
230 	__inout			unsigned int *addedp)
231 {
232 	efx_qword_t pio_desc;
233 	unsigned int id;
234 	size_t offset;
235 	unsigned int added = *addedp;
236 	efx_rc_t rc;
237 
238 
239 	if (added - completed + 1 > EFX_TXQ_LIMIT(etp->et_mask + 1)) {
240 		rc = ENOSPC;
241 		goto fail1;
242 	}
243 
244 	if (etp->et_pio_size == 0) {
245 		rc = ENOENT;
246 		goto fail2;
247 	}
248 
249 	id = added++ & etp->et_mask;
250 	offset = id * sizeof (efx_qword_t);
251 
252 	EFSYS_PROBE4(tx_pio_post, unsigned int, etp->et_index,
253 		    unsigned int, id, uint32_t, etp->et_pio_offset,
254 		    size_t, pkt_length);
255 
256 	EFX_POPULATE_QWORD_5(pio_desc,
257 			ESF_DZ_TX_DESC_IS_OPT, 1,
258 			ESF_DZ_TX_OPTION_TYPE, 1,
259 			ESF_DZ_TX_PIO_CONT, 0,
260 			ESF_DZ_TX_PIO_BYTE_CNT, pkt_length,
261 			ESF_DZ_TX_PIO_BUF_ADDR, etp->et_pio_offset);
262 
263 	EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &pio_desc);
264 
265 	EFX_TX_QSTAT_INCR(etp, TX_POST_PIO);
266 
267 	*addedp = added;
268 	return (0);
269 
270 fail2:
271 	EFSYS_PROBE(fail2);
272 fail1:
273 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
274 
275 	return (rc);
276 }
277 
278 	__checkReturn		efx_rc_t
ef10_tx_qpost(__in efx_txq_t * etp,__in_ecount (ndescs)efx_buffer_t * eb,__in unsigned int ndescs,__in unsigned int completed,__inout unsigned int * addedp)279 ef10_tx_qpost(
280 	__in			efx_txq_t *etp,
281 	__in_ecount(ndescs)	efx_buffer_t *eb,
282 	__in			unsigned int ndescs,
283 	__in			unsigned int completed,
284 	__inout			unsigned int *addedp)
285 {
286 	unsigned int added = *addedp;
287 	unsigned int i;
288 	efx_rc_t rc;
289 
290 	if (added - completed + ndescs > EFX_TXQ_LIMIT(etp->et_mask + 1)) {
291 		rc = ENOSPC;
292 		goto fail1;
293 	}
294 
295 	for (i = 0; i < ndescs; i++) {
296 		efx_buffer_t *ebp = &eb[i];
297 		efsys_dma_addr_t addr = ebp->eb_addr;
298 		size_t size = ebp->eb_size;
299 		boolean_t eop = ebp->eb_eop;
300 		unsigned int id;
301 		size_t offset;
302 		efx_qword_t qword;
303 
304 		/* No limitations on boundary crossing */
305 		EFSYS_ASSERT(size <=
306 		    etp->et_enp->en_nic_cfg.enc_tx_dma_desc_size_max);
307 
308 		id = added++ & etp->et_mask;
309 		offset = id * sizeof (efx_qword_t);
310 
311 		EFSYS_PROBE5(tx_post, unsigned int, etp->et_index,
312 		    unsigned int, id, efsys_dma_addr_t, addr,
313 		    size_t, size, boolean_t, eop);
314 
315 		EFX_POPULATE_QWORD_5(qword,
316 		    ESF_DZ_TX_KER_TYPE, 0,
317 		    ESF_DZ_TX_KER_CONT, (eop) ? 0 : 1,
318 		    ESF_DZ_TX_KER_BYTE_CNT, (uint32_t)(size),
319 		    ESF_DZ_TX_KER_BUF_ADDR_DW0, (uint32_t)(addr & 0xffffffff),
320 		    ESF_DZ_TX_KER_BUF_ADDR_DW1, (uint32_t)(addr >> 32));
321 
322 		EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &qword);
323 	}
324 
325 	EFX_TX_QSTAT_INCR(etp, TX_POST);
326 
327 	*addedp = added;
328 	return (0);
329 
330 fail1:
331 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
332 
333 	return (rc);
334 }
335 
336 /*
337  * This improves performance by, when possible, pushing a TX descriptor at the
338  * same time as the doorbell. The descriptor must be added to the TXQ, so that
339  * can be used if the hardware decides not to use the pushed descriptor.
340  */
341 			void
ef10_tx_qpush(__in efx_txq_t * etp,__in unsigned int added,__in unsigned int pushed)342 ef10_tx_qpush(
343 	__in		efx_txq_t *etp,
344 	__in		unsigned int added,
345 	__in		unsigned int pushed)
346 {
347 	efx_nic_t *enp = etp->et_enp;
348 	unsigned int wptr;
349 	unsigned int id;
350 	size_t offset;
351 	efx_qword_t desc;
352 	efx_oword_t oword;
353 
354 	wptr = added & etp->et_mask;
355 	id = pushed & etp->et_mask;
356 	offset = id * sizeof (efx_qword_t);
357 
358 	EFSYS_MEM_READQ(etp->et_esmp, offset, &desc);
359 
360 	/*
361 	 * Bug 65776: TSO option descriptors cannot be pushed if pacer bypass is
362 	 * enabled on the event queue this transmit queue is attached to.
363 	 *
364 	 * To ensure the code is safe, it is easiest to simply test the type of
365 	 * the descriptor to push, and only push it is if it not a TSO option
366 	 * descriptor.
367 	 */
368 	if ((EFX_QWORD_FIELD(desc, ESF_DZ_TX_DESC_IS_OPT) != 1) ||
369 	    (EFX_QWORD_FIELD(desc, ESF_DZ_TX_OPTION_TYPE) !=
370 	    ESE_DZ_TX_OPTION_DESC_TSO)) {
371 		/* Push the descriptor and update the wptr. */
372 		EFX_POPULATE_OWORD_3(oword, ERF_DZ_TX_DESC_WPTR, wptr,
373 		    ERF_DZ_TX_DESC_HWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_1),
374 		    ERF_DZ_TX_DESC_LWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_0));
375 
376 		/* Ensure ordering of memory (descriptors) and PIO (doorbell) */
377 		EFX_DMA_SYNC_QUEUE_FOR_DEVICE(etp->et_esmp, etp->et_mask + 1,
378 		    EF10_TXQ_DESC_SIZE, wptr, id);
379 		EFSYS_PIO_WRITE_BARRIER();
380 		EFX_BAR_VI_DOORBELL_WRITEO(enp, ER_DZ_TX_DESC_UPD_REG,
381 		    etp->et_index, &oword);
382 	} else {
383 		efx_dword_t dword;
384 
385 		/*
386 		 * Only update the wptr. This is signalled to the hardware by
387 		 * only writing one DWORD of the doorbell register.
388 		 */
389 		EFX_POPULATE_OWORD_1(oword, ERF_DZ_TX_DESC_WPTR, wptr);
390 		dword = oword.eo_dword[2];
391 
392 		/* Ensure ordering of memory (descriptors) and PIO (doorbell) */
393 		EFX_DMA_SYNC_QUEUE_FOR_DEVICE(etp->et_esmp, etp->et_mask + 1,
394 		    EF10_TXQ_DESC_SIZE, wptr, id);
395 		EFSYS_PIO_WRITE_BARRIER();
396 		EFX_BAR_VI_WRITED2(enp, ER_DZ_TX_DESC_UPD_REG,
397 		    etp->et_index, &dword, B_FALSE);
398 	}
399 }
400 
401 	__checkReturn		efx_rc_t
ef10_tx_qdesc_post(__in efx_txq_t * etp,__in_ecount (ndescs)efx_desc_t * ed,__in unsigned int ndescs,__in unsigned int completed,__inout unsigned int * addedp)402 ef10_tx_qdesc_post(
403 	__in			efx_txq_t *etp,
404 	__in_ecount(ndescs)	efx_desc_t *ed,
405 	__in			unsigned int ndescs,
406 	__in			unsigned int completed,
407 	__inout			unsigned int *addedp)
408 {
409 	unsigned int added = *addedp;
410 	unsigned int i;
411 
412 	if (added - completed + ndescs > EFX_TXQ_LIMIT(etp->et_mask + 1))
413 		return (ENOSPC);
414 
415 	for (i = 0; i < ndescs; i++) {
416 		efx_desc_t *edp = &ed[i];
417 		unsigned int id;
418 		size_t offset;
419 
420 		id = added++ & etp->et_mask;
421 		offset = id * sizeof (efx_desc_t);
422 
423 		EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &edp->ed_eq);
424 	}
425 
426 	EFSYS_PROBE3(tx_desc_post, unsigned int, etp->et_index,
427 		    unsigned int, added, unsigned int, ndescs);
428 
429 	EFX_TX_QSTAT_INCR(etp, TX_POST);
430 
431 	*addedp = added;
432 	return (0);
433 }
434 
435 	void
ef10_tx_qdesc_dma_create(__in efx_txq_t * etp,__in efsys_dma_addr_t addr,__in size_t size,__in boolean_t eop,__out efx_desc_t * edp)436 ef10_tx_qdesc_dma_create(
437 	__in	efx_txq_t *etp,
438 	__in	efsys_dma_addr_t addr,
439 	__in	size_t size,
440 	__in	boolean_t eop,
441 	__out	efx_desc_t *edp)
442 {
443 	_NOTE(ARGUNUSED(etp))
444 
445 	/* No limitations on boundary crossing */
446 	EFSYS_ASSERT(size <= etp->et_enp->en_nic_cfg.enc_tx_dma_desc_size_max);
447 
448 	EFSYS_PROBE4(tx_desc_dma_create, unsigned int, etp->et_index,
449 		    efsys_dma_addr_t, addr,
450 		    size_t, size, boolean_t, eop);
451 
452 	EFX_POPULATE_QWORD_5(edp->ed_eq,
453 		    ESF_DZ_TX_KER_TYPE, 0,
454 		    ESF_DZ_TX_KER_CONT, (eop) ? 0 : 1,
455 		    ESF_DZ_TX_KER_BYTE_CNT, (uint32_t)(size),
456 		    ESF_DZ_TX_KER_BUF_ADDR_DW0, (uint32_t)(addr & 0xffffffff),
457 		    ESF_DZ_TX_KER_BUF_ADDR_DW1, (uint32_t)(addr >> 32));
458 }
459 
460 	void
ef10_tx_qdesc_tso_create(__in efx_txq_t * etp,__in uint16_t ipv4_id,__in uint32_t tcp_seq,__in uint8_t tcp_flags,__out efx_desc_t * edp)461 ef10_tx_qdesc_tso_create(
462 	__in	efx_txq_t *etp,
463 	__in	uint16_t ipv4_id,
464 	__in	uint32_t tcp_seq,
465 	__in	uint8_t  tcp_flags,
466 	__out	efx_desc_t *edp)
467 {
468 	_NOTE(ARGUNUSED(etp))
469 
470 	EFSYS_PROBE4(tx_desc_tso_create, unsigned int, etp->et_index,
471 		    uint16_t, ipv4_id, uint32_t, tcp_seq,
472 		    uint8_t, tcp_flags);
473 
474 	EFX_POPULATE_QWORD_5(edp->ed_eq,
475 			    ESF_DZ_TX_DESC_IS_OPT, 1,
476 			    ESF_DZ_TX_OPTION_TYPE,
477 			    ESE_DZ_TX_OPTION_DESC_TSO,
478 			    ESF_DZ_TX_TSO_TCP_FLAGS, tcp_flags,
479 			    ESF_DZ_TX_TSO_IP_ID, ipv4_id,
480 			    ESF_DZ_TX_TSO_TCP_SEQNO, tcp_seq);
481 }
482 
483 	void
ef10_tx_qdesc_tso2_create(__in efx_txq_t * etp,__in uint16_t ipv4_id,__in uint16_t outer_ipv4_id,__in uint32_t tcp_seq,__in uint16_t tcp_mss,__out_ecount (count)efx_desc_t * edp,__in int count)484 ef10_tx_qdesc_tso2_create(
485 	__in			efx_txq_t *etp,
486 	__in			uint16_t ipv4_id,
487 	__in			uint16_t outer_ipv4_id,
488 	__in			uint32_t tcp_seq,
489 	__in			uint16_t tcp_mss,
490 	__out_ecount(count)	efx_desc_t *edp,
491 	__in			int count)
492 {
493 	_NOTE(ARGUNUSED(etp, count))
494 
495 	EFSYS_PROBE4(tx_desc_tso2_create, unsigned int, etp->et_index,
496 		    uint16_t, ipv4_id, uint32_t, tcp_seq,
497 		    uint16_t, tcp_mss);
498 
499 	EFSYS_ASSERT(count >= EFX_TX_FATSOV2_OPT_NDESCS);
500 
501 	EFX_POPULATE_QWORD_5(edp[0].ed_eq,
502 			    ESF_DZ_TX_DESC_IS_OPT, 1,
503 			    ESF_DZ_TX_OPTION_TYPE,
504 			    ESE_DZ_TX_OPTION_DESC_TSO,
505 			    ESF_DZ_TX_TSO_OPTION_TYPE,
506 			    ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A,
507 			    ESF_DZ_TX_TSO_IP_ID, ipv4_id,
508 			    ESF_DZ_TX_TSO_TCP_SEQNO, tcp_seq);
509 	EFX_POPULATE_QWORD_5(edp[1].ed_eq,
510 			    ESF_DZ_TX_DESC_IS_OPT, 1,
511 			    ESF_DZ_TX_OPTION_TYPE,
512 			    ESE_DZ_TX_OPTION_DESC_TSO,
513 			    ESF_DZ_TX_TSO_OPTION_TYPE,
514 			    ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B,
515 			    ESF_DZ_TX_TSO_TCP_MSS, tcp_mss,
516 			    ESF_DZ_TX_TSO_OUTER_IPID, outer_ipv4_id);
517 }
518 
519 	void
ef10_tx_qdesc_vlantci_create(__in efx_txq_t * etp,__in uint16_t tci,__out efx_desc_t * edp)520 ef10_tx_qdesc_vlantci_create(
521 	__in	efx_txq_t *etp,
522 	__in	uint16_t  tci,
523 	__out	efx_desc_t *edp)
524 {
525 	_NOTE(ARGUNUSED(etp))
526 
527 	EFSYS_PROBE2(tx_desc_vlantci_create, unsigned int, etp->et_index,
528 		    uint16_t, tci);
529 
530 	EFX_POPULATE_QWORD_4(edp->ed_eq,
531 			    ESF_DZ_TX_DESC_IS_OPT, 1,
532 			    ESF_DZ_TX_OPTION_TYPE,
533 			    ESE_DZ_TX_OPTION_DESC_VLAN,
534 			    ESF_DZ_TX_VLAN_OP, tci ? 1 : 0,
535 			    ESF_DZ_TX_VLAN_TAG1, tci);
536 }
537 
538 	void
ef10_tx_qdesc_checksum_create(__in efx_txq_t * etp,__in uint16_t flags,__out efx_desc_t * edp)539 ef10_tx_qdesc_checksum_create(
540 	__in	efx_txq_t *etp,
541 	__in	uint16_t flags,
542 	__out	efx_desc_t *edp)
543 {
544 	_NOTE(ARGUNUSED(etp));
545 
546 	EFSYS_PROBE2(tx_desc_checksum_create, unsigned int, etp->et_index,
547 		    uint32_t, flags);
548 
549 	EFX_POPULATE_QWORD_6(edp->ed_eq,
550 	    ESF_DZ_TX_DESC_IS_OPT, 1,
551 	    ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_CRC_CSUM,
552 	    ESF_DZ_TX_OPTION_UDP_TCP_CSUM,
553 	    (flags & EFX_TXQ_CKSUM_TCPUDP) ? 1 : 0,
554 	    ESF_DZ_TX_OPTION_IP_CSUM,
555 	    (flags & EFX_TXQ_CKSUM_IPV4) ? 1 : 0,
556 	    ESF_DZ_TX_OPTION_INNER_UDP_TCP_CSUM,
557 	    (flags & EFX_TXQ_CKSUM_INNER_TCPUDP) ? 1 : 0,
558 	    ESF_DZ_TX_OPTION_INNER_IP_CSUM,
559 	    (flags & EFX_TXQ_CKSUM_INNER_IPV4) ? 1 : 0);
560 }
561 
562 
563 	__checkReturn	efx_rc_t
ef10_tx_qpace(__in efx_txq_t * etp,__in unsigned int ns)564 ef10_tx_qpace(
565 	__in		efx_txq_t *etp,
566 	__in		unsigned int ns)
567 {
568 	efx_rc_t rc;
569 
570 	/* FIXME */
571 	_NOTE(ARGUNUSED(etp, ns))
572 	_NOTE(CONSTANTCONDITION)
573 	if (B_FALSE) {
574 		rc = ENOTSUP;
575 		goto fail1;
576 	}
577 	/* FIXME */
578 
579 	return (0);
580 
581 fail1:
582 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
583 
584 	return (rc);
585 }
586 
587 	__checkReturn	efx_rc_t
ef10_tx_qflush(__in efx_txq_t * etp)588 ef10_tx_qflush(
589 	__in		efx_txq_t *etp)
590 {
591 	efx_nic_t *enp = etp->et_enp;
592 	efx_rc_t rc;
593 
594 	if ((rc = efx_mcdi_fini_txq(enp, etp->et_index)) != 0)
595 		goto fail1;
596 
597 	return (0);
598 
599 fail1:
600 	/*
601 	 * EALREADY is not an error, but indicates that the MC has rebooted and
602 	 * that the TXQ has already been destroyed. Callers need to know that
603 	 * the TXQ flush has completed to avoid waiting until timeout for a
604 	 * flush done event that will not be delivered.
605 	 */
606 	if (rc != EALREADY)
607 		EFSYS_PROBE1(fail1, efx_rc_t, rc);
608 
609 	return (rc);
610 }
611 
612 			void
ef10_tx_qenable(__in efx_txq_t * etp)613 ef10_tx_qenable(
614 	__in		efx_txq_t *etp)
615 {
616 	/* FIXME */
617 	_NOTE(ARGUNUSED(etp))
618 	/* FIXME */
619 }
620 
621 #if EFSYS_OPT_QSTATS
622 			void
ef10_tx_qstats_update(__in efx_txq_t * etp,__inout_ecount (TX_NQSTATS)efsys_stat_t * stat)623 ef10_tx_qstats_update(
624 	__in				efx_txq_t *etp,
625 	__inout_ecount(TX_NQSTATS)	efsys_stat_t *stat)
626 {
627 	unsigned int id;
628 
629 	for (id = 0; id < TX_NQSTATS; id++) {
630 		efsys_stat_t *essp = &stat[id];
631 
632 		EFSYS_STAT_INCR(essp, etp->et_stat[id]);
633 		etp->et_stat[id] = 0;
634 	}
635 }
636 
637 #endif /* EFSYS_OPT_QSTATS */
638 
639 #endif /* EFX_OPTS_EF10() */
640