xref: /dpdk/drivers/net/sfc/sfc_ef10.h (revision ba6a168a06581b5b3d523f984722a3e5f65bbb82)
144c0947bSAndrew Rybchenko /* SPDX-License-Identifier: BSD-3-Clause
2638bddc9SAndrew Rybchenko  *
398d26ef7SAndrew Rybchenko  * Copyright(c) 2019-2021 Xilinx, Inc.
4a0147be5SAndrew Rybchenko  * Copyright(c) 2017-2019 Solarflare Communications Inc.
5638bddc9SAndrew Rybchenko  *
6638bddc9SAndrew Rybchenko  * This software was jointly developed between OKTET Labs (under contract
7638bddc9SAndrew Rybchenko  * for Solarflare) and Solarflare Communications, Inc.
8638bddc9SAndrew Rybchenko  */
9638bddc9SAndrew Rybchenko 
10638bddc9SAndrew Rybchenko #ifndef _SFC_EF10_H
11638bddc9SAndrew Rybchenko #define _SFC_EF10_H
12638bddc9SAndrew Rybchenko 
131b0236e2SAndrew Rybchenko #include "sfc_debug.h"
141b0236e2SAndrew Rybchenko 
15638bddc9SAndrew Rybchenko #ifdef __cplusplus
16638bddc9SAndrew Rybchenko extern "C" {
17638bddc9SAndrew Rybchenko #endif
18638bddc9SAndrew Rybchenko 
19638bddc9SAndrew Rybchenko /* Number of events in one cache line */
20638bddc9SAndrew Rybchenko #define SFC_EF10_EV_PER_CACHE_LINE \
21638bddc9SAndrew Rybchenko 	(RTE_CACHE_LINE_SIZE / sizeof(efx_qword_t))
22638bddc9SAndrew Rybchenko 
23638bddc9SAndrew Rybchenko #define SFC_EF10_EV_QCLEAR_MASK		(~(SFC_EF10_EV_PER_CACHE_LINE - 1))
24638bddc9SAndrew Rybchenko 
25141d2870SAndrew Rybchenko /*
26141d2870SAndrew Rybchenko  * Use simple libefx-based implementation of the
27141d2870SAndrew Rybchenko  * sfc_ef10_ev_qclear_cache_line() if SSE2 is not available
28141d2870SAndrew Rybchenko  * since optimized implementation uses __m128i intrinsics.
29141d2870SAndrew Rybchenko  */
30141d2870SAndrew Rybchenko #ifndef __SSE2__
31141d2870SAndrew Rybchenko #define SFC_EF10_EV_QCLEAR_USE_EFX
32141d2870SAndrew Rybchenko #endif
33141d2870SAndrew Rybchenko 
34638bddc9SAndrew Rybchenko #if defined(SFC_EF10_EV_QCLEAR_USE_EFX)
35638bddc9SAndrew Rybchenko static inline void
sfc_ef10_ev_qclear_cache_line(void * ptr)36638bddc9SAndrew Rybchenko sfc_ef10_ev_qclear_cache_line(void *ptr)
37638bddc9SAndrew Rybchenko {
38638bddc9SAndrew Rybchenko 	efx_qword_t *entry = ptr;
39638bddc9SAndrew Rybchenko 	unsigned int i;
40638bddc9SAndrew Rybchenko 
41638bddc9SAndrew Rybchenko 	for (i = 0; i < SFC_EF10_EV_PER_CACHE_LINE; ++i)
42638bddc9SAndrew Rybchenko 		EFX_SET_QWORD(entry[i]);
43638bddc9SAndrew Rybchenko }
44638bddc9SAndrew Rybchenko #else
45638bddc9SAndrew Rybchenko /*
46638bddc9SAndrew Rybchenko  * It is possible to do it using AVX2 and AVX512F, but it shows less
47638bddc9SAndrew Rybchenko  * performance.
48638bddc9SAndrew Rybchenko  */
49638bddc9SAndrew Rybchenko static inline void
50638bddc9SAndrew Rybchenko sfc_ef10_ev_qclear_cache_line(void *ptr)
51638bddc9SAndrew Rybchenko {
5231113761SAndrew Rybchenko 	const efsys_uint128_t val = _mm_set1_epi64x(UINT64_MAX);
5331113761SAndrew Rybchenko 	efsys_uint128_t *addr = ptr;
54638bddc9SAndrew Rybchenko 	unsigned int i;
55638bddc9SAndrew Rybchenko 
56638bddc9SAndrew Rybchenko 	RTE_BUILD_BUG_ON(sizeof(val) > RTE_CACHE_LINE_SIZE);
57638bddc9SAndrew Rybchenko 	RTE_BUILD_BUG_ON(RTE_CACHE_LINE_SIZE % sizeof(val) != 0);
58638bddc9SAndrew Rybchenko 
59638bddc9SAndrew Rybchenko 	for (i = 0; i < RTE_CACHE_LINE_SIZE / sizeof(val); ++i)
60638bddc9SAndrew Rybchenko 		_mm_store_si128(&addr[i], val);
61638bddc9SAndrew Rybchenko }
62638bddc9SAndrew Rybchenko #endif
63638bddc9SAndrew Rybchenko 
64638bddc9SAndrew Rybchenko static inline void
sfc_ef10_ev_qclear(efx_qword_t * hw_ring,unsigned int ptr_mask,unsigned int old_read_ptr,unsigned int read_ptr)65638bddc9SAndrew Rybchenko sfc_ef10_ev_qclear(efx_qword_t *hw_ring, unsigned int ptr_mask,
66638bddc9SAndrew Rybchenko 		   unsigned int old_read_ptr, unsigned int read_ptr)
67638bddc9SAndrew Rybchenko {
68638bddc9SAndrew Rybchenko 	const unsigned int clear_ptr = read_ptr & SFC_EF10_EV_QCLEAR_MASK;
69638bddc9SAndrew Rybchenko 	unsigned int old_clear_ptr = old_read_ptr & SFC_EF10_EV_QCLEAR_MASK;
70638bddc9SAndrew Rybchenko 
71638bddc9SAndrew Rybchenko 	while (old_clear_ptr != clear_ptr) {
72638bddc9SAndrew Rybchenko 		sfc_ef10_ev_qclear_cache_line(
73638bddc9SAndrew Rybchenko 			&hw_ring[old_clear_ptr & ptr_mask]);
74638bddc9SAndrew Rybchenko 		old_clear_ptr += SFC_EF10_EV_PER_CACHE_LINE;
75638bddc9SAndrew Rybchenko 	}
76638bddc9SAndrew Rybchenko 
77638bddc9SAndrew Rybchenko 	/*
78638bddc9SAndrew Rybchenko 	 * No barriers here.
79638bddc9SAndrew Rybchenko 	 * Functions which push doorbell should care about correct
80638bddc9SAndrew Rybchenko 	 * ordering: store instructions which fill in EvQ ring should be
81638bddc9SAndrew Rybchenko 	 * retired from CPU and DMA sync before doorbell which will allow
82638bddc9SAndrew Rybchenko 	 * to use these event entries.
83638bddc9SAndrew Rybchenko 	 */
84638bddc9SAndrew Rybchenko }
85638bddc9SAndrew Rybchenko 
86638bddc9SAndrew Rybchenko static inline bool
sfc_ef10_ev_present(const efx_qword_t ev)87638bddc9SAndrew Rybchenko sfc_ef10_ev_present(const efx_qword_t ev)
88638bddc9SAndrew Rybchenko {
89638bddc9SAndrew Rybchenko 	return ~EFX_QWORD_FIELD(ev, EFX_DWORD_0) |
90638bddc9SAndrew Rybchenko 	       ~EFX_QWORD_FIELD(ev, EFX_DWORD_1);
91638bddc9SAndrew Rybchenko }
92638bddc9SAndrew Rybchenko 
93a2443fdfSAndrew Rybchenko 
94a2443fdfSAndrew Rybchenko /**
95a2443fdfSAndrew Rybchenko  * Alignment requirement for value written to RX WPTR:
96a2443fdfSAndrew Rybchenko  * the WPTR must be aligned to an 8 descriptor boundary.
97a2443fdfSAndrew Rybchenko  */
98a2443fdfSAndrew Rybchenko #define SFC_EF10_RX_WPTR_ALIGN	8u
99a2443fdfSAndrew Rybchenko 
100a2443fdfSAndrew Rybchenko static inline void
sfc_ef10_rx_qpush(volatile void * doorbell,unsigned int added,unsigned int ptr_mask,uint32_t * dbell_counter)101a2443fdfSAndrew Rybchenko sfc_ef10_rx_qpush(volatile void *doorbell, unsigned int added,
102fdd7719eSIvan Ilchenko 		  unsigned int ptr_mask, uint32_t *dbell_counter)
103a2443fdfSAndrew Rybchenko {
104a2443fdfSAndrew Rybchenko 	efx_dword_t dword;
105a2443fdfSAndrew Rybchenko 
106a2443fdfSAndrew Rybchenko 	/* Hardware has alignment restriction for WPTR */
107a2443fdfSAndrew Rybchenko 	RTE_BUILD_BUG_ON(SFC_RX_REFILL_BULK % SFC_EF10_RX_WPTR_ALIGN != 0);
108a2443fdfSAndrew Rybchenko 	SFC_ASSERT(RTE_ALIGN(added, SFC_EF10_RX_WPTR_ALIGN) == added);
109a2443fdfSAndrew Rybchenko 
110a2443fdfSAndrew Rybchenko 	EFX_POPULATE_DWORD_1(dword, ERF_DZ_RX_DESC_WPTR, added & ptr_mask);
111a2443fdfSAndrew Rybchenko 
112a2443fdfSAndrew Rybchenko 	/* DMA sync to device is not required */
113a2443fdfSAndrew Rybchenko 
114a2443fdfSAndrew Rybchenko 	/*
115a2443fdfSAndrew Rybchenko 	 * rte_write32() has rte_io_wmb() which guarantees that the STORE
116a2443fdfSAndrew Rybchenko 	 * operations (i.e. Rx and event descriptor updates) that precede
117a2443fdfSAndrew Rybchenko 	 * the rte_io_wmb() call are visible to NIC before the STORE
118a2443fdfSAndrew Rybchenko 	 * operations that follow it (i.e. doorbell write).
119a2443fdfSAndrew Rybchenko 	 */
120a2443fdfSAndrew Rybchenko 	rte_write32(dword.ed_u32[0], doorbell);
121fdd7719eSIvan Ilchenko 	(*dbell_counter)++;
122a2443fdfSAndrew Rybchenko }
123a2443fdfSAndrew Rybchenko 
1241245e3faSGeorgiy Levashov static inline void
sfc_ef10_ev_qprime(volatile void * qprime,unsigned int read_ptr,unsigned int ptr_mask)1251245e3faSGeorgiy Levashov sfc_ef10_ev_qprime(volatile void *qprime, unsigned int read_ptr,
1261245e3faSGeorgiy Levashov 		  unsigned int ptr_mask)
1271245e3faSGeorgiy Levashov {
1281245e3faSGeorgiy Levashov 	efx_dword_t dword;
1291245e3faSGeorgiy Levashov 
1301245e3faSGeorgiy Levashov 	EFX_POPULATE_DWORD_1(dword, ERF_DZ_EVQ_RPTR, read_ptr & ptr_mask);
1311245e3faSGeorgiy Levashov 
1321245e3faSGeorgiy Levashov 	rte_write32_relaxed(dword.ed_u32[0], qprime);
1331245e3faSGeorgiy Levashov 	rte_wmb();
1341245e3faSGeorgiy Levashov }
1351245e3faSGeorgiy Levashov 
136a2443fdfSAndrew Rybchenko 
137*ba6a168aSSivaramakrishnan Venkat const uint32_t *sfc_ef10_supported_ptypes_get(uint32_t tunnel_encaps,
138*ba6a168aSSivaramakrishnan Venkat 					      size_t *no_of_elements);
139390f9b8dSAndrew Rybchenko 
140390f9b8dSAndrew Rybchenko 
141638bddc9SAndrew Rybchenko #ifdef __cplusplus
142638bddc9SAndrew Rybchenko }
143638bddc9SAndrew Rybchenko #endif
144638bddc9SAndrew Rybchenko #endif /* _SFC_EF10_H */
145