xref: /dpdk/drivers/bus/dpaa/bus_dpaa_driver.h (revision 1acb7f547455f636a6968cb3b4ca3870279dfece)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright 2017-2022 NXP
4  *
5  */
6 #ifndef BUS_DPAA_DRIVER_H
7 #define BUS_DPAA_DRIVER_H
8 
9 #include <rte_compat.h>
10 #include <dev_driver.h>
11 #include <rte_mbuf_dyn.h>
12 #include <rte_mempool.h>
13 
14 #include <dpaax_iova_table.h>
15 
16 #include <dpaa_of.h>
17 #include <fsl_usd.h>
18 #include <fsl_qman.h>
19 #include <fsl_bman.h>
20 #include <netcfg.h>
21 
22 #ifdef __cplusplus
23 extern "C" {
24 #endif
25 
26 /* This sequence number field is used to store event entry index for
27  * driver specific usage. For parallel mode queues, invalid
28  * index will be set and for atomic mode queues, valid value
29  * ranging from 1 to 16.
30  */
31 #define DPAA_INVALID_MBUF_SEQN  0
32 
33 typedef uint32_t dpaa_seqn_t;
34 extern int dpaa_seqn_dynfield_offset;
35 
36 /**
37  * Read dpaa sequence number from mbuf.
38  *
39  * @param mbuf Structure to read from.
40  * @return pointer to dpaa sequence number.
41  */
42 __rte_internal
43 static inline dpaa_seqn_t *
dpaa_seqn(struct rte_mbuf * mbuf)44 dpaa_seqn(struct rte_mbuf *mbuf)
45 {
46 	return RTE_MBUF_DYNFIELD(mbuf, dpaa_seqn_dynfield_offset,
47 		dpaa_seqn_t *);
48 }
49 
50 #define DPAA_MEMPOOL_OPS_NAME	"dpaa"
51 
52 #define DEV_TO_DPAA_DEVICE(ptr)	\
53 		container_of(ptr, struct rte_dpaa_device, device)
54 
55 /* DPAA SoC identifier; If this is not available, it can be concluded
56  * that board is non-DPAA. Single slot is currently supported.
57  */
58 #define DPAA_SOC_ID_FILE	"/sys/devices/soc0/soc_id"
59 
60 #define SVR_LS1043A_FAMILY	0x87920000
61 #define SVR_LS1046A_FAMILY	0x87070000
62 #define SVR_MASK		0xffff0000
63 
64 /** Device driver supports link state interrupt */
65 #define RTE_DPAA_DRV_INTR_LSC  0x0008
66 
67 /** Number of supported QDMA devices */
68 #define RTE_DPAA_QDMA_DEVICES  1
69 
70 #define RTE_DEV_TO_DPAA_CONST(ptr) \
71 	container_of(ptr, const struct rte_dpaa_device, device)
72 
73 extern unsigned int dpaa_svr_family;
74 
75 struct rte_dpaa_device;
76 struct rte_dpaa_driver;
77 
78 enum rte_dpaa_type {
79 	FSL_DPAA_ETH = 1,
80 	FSL_DPAA_CRYPTO,
81 	FSL_DPAA_QDMA
82 };
83 
84 struct dpaa_device_id {
85 	uint8_t fman_id; /**< Fman interface ID, for ETH type device */
86 	uint8_t mac_id; /**< Fman MAC interface ID, for ETH type device */
87 	uint16_t dev_id; /**< Device Identifier from DPDK */
88 };
89 
90 struct rte_dpaa_device {
91 	TAILQ_ENTRY(rte_dpaa_device) next;
92 	struct rte_device device;
93 	union {
94 		struct rte_eth_dev *eth_dev;
95 		struct rte_cryptodev *crypto_dev;
96 		struct rte_dma_dev *dmadev;
97 	};
98 	struct rte_dpaa_driver *driver;
99 	struct dpaa_device_id id;
100 	struct rte_intr_handle *intr_handle;
101 	enum rte_dpaa_type device_type; /**< Ethernet or crypto type device */
102 	char name[RTE_ETH_NAME_MAX_LEN];
103 };
104 
105 typedef int (*rte_dpaa_probe_t)(struct rte_dpaa_driver *dpaa_drv,
106 				struct rte_dpaa_device *dpaa_dev);
107 typedef int (*rte_dpaa_remove_t)(struct rte_dpaa_device *dpaa_dev);
108 
109 struct rte_dpaa_driver {
110 	TAILQ_ENTRY(rte_dpaa_driver) next;
111 	struct rte_driver driver;
112 	enum rte_dpaa_type drv_type;
113 	rte_dpaa_probe_t probe;
114 	rte_dpaa_remove_t remove;
115 	uint32_t drv_flags;                 /**< Flags for controlling device.*/
116 };
117 
118 /* Create storage for dqrr entries per lcore */
119 #define DPAA_PORTAL_DEQUEUE_DEPTH	16
120 struct dpaa_portal_dqrr {
121 	void *mbuf[DPAA_PORTAL_DEQUEUE_DEPTH];
122 	uint64_t dqrr_held;
123 	uint8_t dqrr_size;
124 };
125 
126 struct dpaa_portal {
127 	uint32_t bman_idx; /**< BMAN Portal ID*/
128 	uint32_t qman_idx; /**< QMAN Portal ID*/
129 	struct dpaa_portal_dqrr dpaa_held_bufs;
130 	uint64_t tid;/**< Parent Thread id for this portal */
131 };
132 
133 RTE_DECLARE_PER_LCORE(struct dpaa_portal *, dpaa_io);
134 
135 #define DPAA_PER_LCORE_PORTAL \
136 	RTE_PER_LCORE(dpaa_io)
137 #define DPAA_PER_LCORE_DQRR_SIZE \
138 	RTE_PER_LCORE(dpaa_io)->dpaa_held_bufs.dqrr_size
139 #define DPAA_PER_LCORE_DQRR_HELD \
140 	RTE_PER_LCORE(dpaa_io)->dpaa_held_bufs.dqrr_held
141 #define DPAA_PER_LCORE_DQRR_MBUF(i) \
142 	RTE_PER_LCORE(dpaa_io)->dpaa_held_bufs.mbuf[i]
143 
144 /* Various structures representing contiguous memory maps */
145 struct dpaa_memseg {
146 	TAILQ_ENTRY(dpaa_memseg) next;
147 	char *vaddr;
148 	rte_iova_t iova;
149 	size_t len;
150 };
151 
152 TAILQ_HEAD(dpaa_memseg_list, dpaa_memseg);
153 extern struct dpaa_memseg_list rte_dpaa_memsegs;
154 
155 /* Either iterate over the list of internal memseg references or fallback to
156  * EAL memseg based iova2virt.
157  */
158 __rte_internal
rte_dpaa_mem_ptov(phys_addr_t paddr)159 static inline void *rte_dpaa_mem_ptov(phys_addr_t paddr)
160 {
161 	struct dpaa_memseg *ms;
162 	void *va;
163 
164 	va = dpaax_iova_table_get_va(paddr);
165 	if (likely(va != NULL))
166 		return va;
167 
168 	/* Check if the address is already part of the memseg list internally
169 	 * maintained by the dpaa driver.
170 	 */
171 	TAILQ_FOREACH(ms, &rte_dpaa_memsegs, next) {
172 		if (paddr >= ms->iova && paddr <
173 			ms->iova + ms->len)
174 			return RTE_PTR_ADD(ms->vaddr, (uintptr_t)(paddr - ms->iova));
175 	}
176 
177 	/* If not, Fallback to full memseg list searching */
178 	va = rte_mem_iova2virt(paddr);
179 
180 	dpaax_iova_table_update(paddr, va, RTE_CACHE_LINE_SIZE);
181 
182 	return va;
183 }
184 
185 __rte_internal
186 static inline rte_iova_t
rte_dpaa_mem_vtop(void * vaddr)187 rte_dpaa_mem_vtop(void *vaddr)
188 {
189 	const struct rte_memseg *ms;
190 
191 	ms = rte_mem_virt2memseg(vaddr, NULL);
192 	if (ms)
193 		return ms->iova + RTE_PTR_DIFF(vaddr, ms->addr);
194 
195 	return (size_t)NULL;
196 }
197 
198 /**
199  * Register a DPAA driver.
200  *
201  * @param driver
202  *   A pointer to a rte_dpaa_driver structure describing the driver
203  *   to be registered.
204  */
205 __rte_internal
206 void rte_dpaa_driver_register(struct rte_dpaa_driver *driver);
207 
208 /**
209  * Unregister a DPAA driver.
210  *
211  * @param driver
212  *	A pointer to a rte_dpaa_driver structure describing the driver
213  *	to be unregistered.
214  */
215 __rte_internal
216 void rte_dpaa_driver_unregister(struct rte_dpaa_driver *driver);
217 
218 /**
219  * Initialize a DPAA portal
220  *
221  * @param arg
222  *	Per thread ID
223  *
224  * @return
225  *	0 in case of success, error otherwise
226  */
227 __rte_internal
228 int rte_dpaa_portal_init(void *arg);
229 
230 __rte_internal
231 int rte_dpaa_portal_fq_init(void *arg, struct qman_fq *fq);
232 
233 __rte_internal
234 int rte_dpaa_portal_fq_close(struct qman_fq *fq);
235 
236 /**
237  * Cleanup a DPAA Portal
238  */
239 void dpaa_portal_finish(void *arg);
240 
241 /** Helper for DPAA device registration from driver (eth, crypto) instance */
242 #define RTE_PMD_REGISTER_DPAA(nm, dpaa_drv) \
243 RTE_INIT(dpaainitfn_ ##nm) \
244 {\
245 	(dpaa_drv).driver.name = RTE_STR(nm);\
246 	rte_dpaa_driver_register(&dpaa_drv); \
247 } \
248 RTE_PMD_EXPORT_NAME(nm, __COUNTER__)
249 
250 __rte_internal
251 struct fm_eth_port_cfg *dpaa_get_eth_port_cfg(int dev_id);
252 
253 #ifdef __cplusplus
254 }
255 #endif
256 
257 #endif /* BUS_DPAA_DRIVER_H */
258