1 /* SPDX-License-Identifier: BSD-3-Clause
2 *
3 * Copyright(c) 2019-2021 Xilinx, Inc.
4 * Copyright(c) 2016-2019 Solarflare Communications Inc.
5 *
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
8 */
9
10 #ifndef _SFC_EV_H_
11 #define _SFC_EV_H_
12
13 #include <ethdev_driver.h>
14
15 #include "efx.h"
16
17 #include "sfc.h"
18
19 #ifdef __cplusplus
20 extern "C" {
21 #endif
22
23 struct sfc_adapter;
24 struct sfc_dp_rxq;
25 struct sfc_dp_txq;
26
27 enum sfc_evq_state {
28 SFC_EVQ_UNINITIALIZED = 0,
29 SFC_EVQ_INITIALIZED,
30 SFC_EVQ_STARTING,
31 SFC_EVQ_STARTED,
32
33 SFC_EVQ_NSTATES
34 };
35
36 enum sfc_evq_type {
37 SFC_EVQ_TYPE_MGMT = 0,
38 SFC_EVQ_TYPE_RX,
39 SFC_EVQ_TYPE_TX,
40
41 SFC_EVQ_NTYPES
42 };
43
44 struct sfc_evq {
45 /* Used on datapath */
46 efx_evq_t *common;
47 const efx_ev_callbacks_t *callbacks;
48 unsigned int read_ptr;
49 unsigned int read_ptr_primed;
50 boolean_t exception;
51 efsys_mem_t mem;
52 struct sfc_dp_rxq *dp_rxq;
53 struct sfc_dp_txq *dp_txq;
54
55 /* Not used on datapath */
56 struct sfc_adapter *sa;
57 unsigned int evq_index;
58 enum sfc_evq_state init_state;
59 enum sfc_evq_type type;
60 unsigned int entries;
61 };
62
63 static inline sfc_sw_index_t
sfc_mgmt_evq_sw_index(__rte_unused const struct sfc_adapter_shared * sas)64 sfc_mgmt_evq_sw_index(__rte_unused const struct sfc_adapter_shared *sas)
65 {
66 return 0;
67 }
68
69 /* Return the number of Rx queues reserved for driver's internal use */
70 static inline unsigned int
sfc_nb_reserved_rxq(const struct sfc_adapter_shared * sas)71 sfc_nb_reserved_rxq(const struct sfc_adapter_shared *sas)
72 {
73 return sfc_nb_counter_rxq(sas) + sfc_repr_nb_rxq(sas);
74 }
75
76 /* Return the number of Tx queues reserved for driver's internal use */
77 static inline unsigned int
sfc_nb_txq_reserved(const struct sfc_adapter_shared * sas)78 sfc_nb_txq_reserved(const struct sfc_adapter_shared *sas)
79 {
80 return sfc_repr_nb_txq(sas);
81 }
82
83 static inline unsigned int
sfc_nb_reserved_evq(const struct sfc_adapter_shared * sas)84 sfc_nb_reserved_evq(const struct sfc_adapter_shared *sas)
85 {
86 /* An EvQ is required for each reserved Rx/Tx queue */
87 return 1 + sfc_nb_reserved_rxq(sas) + sfc_nb_txq_reserved(sas);
88 }
89
90 /*
91 * The mapping functions that return SW index of a specific reserved
92 * queue rely on the relative order of reserved queues. Some reserved
93 * queues are optional, and if they are disabled or not supported, then
94 * the function for that specific reserved queue will return previous
95 * valid index of a reserved queue in the dependency chain or
96 * SFC_SW_INDEX_INVALID if it is the first reserved queue in the chain.
97 * If at least one of the reserved queues in the chain is enabled, then
98 * the corresponding function will give valid SW index, even if previous
99 * functions in the chain returned SFC_SW_INDEX_INVALID, since this value
100 * is one less than the first valid SW index.
101 *
102 * The dependency mechanism is utilized to avoid regid defines for SW indices
103 * for reserved queues and to allow these indices to shrink and make space
104 * for ethdev queue indices when some of the reserved queues are disabled.
105 */
106
107 static inline sfc_sw_index_t
sfc_counters_rxq_sw_index(const struct sfc_adapter_shared * sas)108 sfc_counters_rxq_sw_index(const struct sfc_adapter_shared *sas)
109 {
110 return sas->counters_rxq_allocated ? 0 : SFC_SW_INDEX_INVALID;
111 }
112
113 static inline sfc_sw_index_t
sfc_repr_rxq_sw_index(const struct sfc_adapter_shared * sas,unsigned int repr_queue_id)114 sfc_repr_rxq_sw_index(const struct sfc_adapter_shared *sas,
115 unsigned int repr_queue_id)
116 {
117 return sfc_counters_rxq_sw_index(sas) + sfc_repr_nb_rxq(sas) +
118 repr_queue_id;
119 }
120
121 static inline sfc_sw_index_t
sfc_repr_txq_sw_index(const struct sfc_adapter_shared * sas,unsigned int repr_queue_id)122 sfc_repr_txq_sw_index(const struct sfc_adapter_shared *sas,
123 unsigned int repr_queue_id)
124 {
125 /* Reserved TxQ for representors is the first reserved TxQ */
126 return sfc_repr_available(sas) ? repr_queue_id : SFC_SW_INDEX_INVALID;
127 }
128
129 /*
130 * Functions below define event queue to transmit/receive queue and vice
131 * versa mapping.
132 * SFC_ETHDEV_QID_INVALID is returned when sw_index is converted to
133 * ethdev_qid, but sw_index represents a reserved queue for driver's
134 * internal use.
135 * Own event queue is allocated for management, each Rx and each Tx queue.
136 * Zero event queue is used for management events.
137 * When counters are supported, one Rx event queue is reserved.
138 * When representors are supported, Rx and Tx event queues are reserved.
139 * Rx event queues follow reserved event queues.
140 * Tx event queues follow Rx event queues.
141 */
142
143 static inline sfc_ethdev_qid_t
sfc_ethdev_rx_qid_by_rxq_sw_index(struct sfc_adapter_shared * sas,sfc_sw_index_t rxq_sw_index)144 sfc_ethdev_rx_qid_by_rxq_sw_index(struct sfc_adapter_shared *sas,
145 sfc_sw_index_t rxq_sw_index)
146 {
147 if (rxq_sw_index < sfc_nb_reserved_rxq(sas))
148 return SFC_ETHDEV_QID_INVALID;
149
150 return rxq_sw_index - sfc_nb_reserved_rxq(sas);
151 }
152
153 static inline sfc_sw_index_t
sfc_rxq_sw_index_by_ethdev_rx_qid(struct sfc_adapter_shared * sas,sfc_ethdev_qid_t ethdev_qid)154 sfc_rxq_sw_index_by_ethdev_rx_qid(struct sfc_adapter_shared *sas,
155 sfc_ethdev_qid_t ethdev_qid)
156 {
157 return sfc_nb_reserved_rxq(sas) + ethdev_qid;
158 }
159
160 static inline sfc_sw_index_t
sfc_evq_sw_index_by_rxq_sw_index(struct sfc_adapter * sa,sfc_sw_index_t rxq_sw_index)161 sfc_evq_sw_index_by_rxq_sw_index(struct sfc_adapter *sa,
162 sfc_sw_index_t rxq_sw_index)
163 {
164 struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
165 sfc_ethdev_qid_t ethdev_qid;
166
167 ethdev_qid = sfc_ethdev_rx_qid_by_rxq_sw_index(sas, rxq_sw_index);
168 if (ethdev_qid == SFC_ETHDEV_QID_INVALID) {
169 /* One EvQ is reserved for management */
170 return 1 + rxq_sw_index;
171 }
172
173 return sfc_nb_reserved_evq(sas) + ethdev_qid;
174 }
175
176 static inline sfc_ethdev_qid_t
sfc_ethdev_tx_qid_by_txq_sw_index(struct sfc_adapter_shared * sas,sfc_sw_index_t txq_sw_index)177 sfc_ethdev_tx_qid_by_txq_sw_index(struct sfc_adapter_shared *sas,
178 sfc_sw_index_t txq_sw_index)
179 {
180 if (txq_sw_index < sfc_nb_txq_reserved(sas))
181 return SFC_ETHDEV_QID_INVALID;
182
183 return txq_sw_index - sfc_nb_txq_reserved(sas);
184 }
185
186 static inline sfc_sw_index_t
sfc_txq_sw_index_by_ethdev_tx_qid(struct sfc_adapter_shared * sas,sfc_ethdev_qid_t ethdev_qid)187 sfc_txq_sw_index_by_ethdev_tx_qid(struct sfc_adapter_shared *sas,
188 sfc_ethdev_qid_t ethdev_qid)
189 {
190 return sfc_nb_txq_reserved(sas) + ethdev_qid;
191 }
192
193 static inline sfc_sw_index_t
sfc_evq_sw_index_by_txq_sw_index(struct sfc_adapter * sa,sfc_sw_index_t txq_sw_index)194 sfc_evq_sw_index_by_txq_sw_index(struct sfc_adapter *sa,
195 sfc_sw_index_t txq_sw_index)
196 {
197 struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
198 sfc_ethdev_qid_t ethdev_qid;
199
200 ethdev_qid = sfc_ethdev_tx_qid_by_txq_sw_index(sas, txq_sw_index);
201 if (ethdev_qid == SFC_ETHDEV_QID_INVALID) {
202 return sfc_nb_reserved_evq(sas) - sfc_nb_txq_reserved(sas) +
203 txq_sw_index;
204 }
205
206 return sfc_nb_reserved_evq(sas) + sa->eth_dev->data->nb_rx_queues +
207 ethdev_qid;
208 }
209
210 int sfc_ev_attach(struct sfc_adapter *sa);
211 void sfc_ev_detach(struct sfc_adapter *sa);
212 int sfc_ev_start(struct sfc_adapter *sa);
213 void sfc_ev_stop(struct sfc_adapter *sa);
214
215 int sfc_ev_qinit(struct sfc_adapter *sa,
216 enum sfc_evq_type type, unsigned int type_index,
217 unsigned int entries, int socket_id, struct sfc_evq **evqp);
218 void sfc_ev_qfini(struct sfc_evq *evq);
219 int sfc_ev_qstart(struct sfc_evq *evq, unsigned int hw_index);
220 void sfc_ev_qstop(struct sfc_evq *evq);
221
222 int sfc_ev_qprime(struct sfc_evq *evq);
223 void sfc_ev_qpoll(struct sfc_evq *evq);
224
225 void sfc_ev_mgmt_qpoll(struct sfc_adapter *sa);
226
227 #ifdef __cplusplus
228 }
229 #endif
230 #endif /* _SFC_EV_H_ */
231