xref: /dpdk/drivers/net/sfc/sfc_nic_dma.c (revision 3037e6cf3ddec72a4091b5f023301152a0640900)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2021 Xilinx, Inc.
3  */
4 
5 #include <rte_mempool.h>
6 #include <rte_memzone.h>
7 
8 #include "efx.h"
9 
10 #include "sfc_log.h"
11 #include "sfc.h"
12 #include "sfc_nic_dma.h"
13 
14 static int
sfc_nic_dma_add_region(struct sfc_nic_dma_info * nic_dma_info,rte_iova_t nic_base,rte_iova_t trgt_base,size_t map_len)15 sfc_nic_dma_add_region(struct sfc_nic_dma_info *nic_dma_info,
16 		       rte_iova_t nic_base, rte_iova_t trgt_base,
17 		       size_t map_len)
18 {
19 	struct sfc_nic_dma_region *region;
20 
21 	if (nic_dma_info->nb_regions >= RTE_DIM(nic_dma_info->regions))
22 		return ENOMEM;
23 
24 	region = &nic_dma_info->regions[nic_dma_info->nb_regions];
25 	region->nic_base = nic_base;
26 	region->trgt_base = trgt_base;
27 	region->trgt_end = trgt_base + map_len;
28 
29 	nic_dma_info->nb_regions++;
30 	return 0;
31 }
32 
33 /*
34  * Register mapping for all IOVA mempools at the time of creation to
35  * have mapping for all mbufs.
36  */
37 
38 struct sfc_nic_dma_register_mempool_data {
39 	struct sfc_adapter		*sa;
40 	int				rc;
41 };
42 
43 static void
sfc_nic_dma_register_mempool_chunk(struct rte_mempool * mp __rte_unused,void * opaque,struct rte_mempool_memhdr * memhdr,unsigned mem_idx __rte_unused)44 sfc_nic_dma_register_mempool_chunk(struct rte_mempool *mp __rte_unused,
45 				   void *opaque,
46 				   struct rte_mempool_memhdr *memhdr,
47 				   unsigned mem_idx __rte_unused)
48 {
49 	struct sfc_nic_dma_register_mempool_data *register_data = opaque;
50 	struct sfc_adapter *sa = register_data->sa;
51 	struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
52 	efsys_dma_addr_t nic_base;
53 	efsys_dma_addr_t trgt_base;
54 	size_t map_len;
55 	int rc;
56 
57 	if (memhdr->iova == RTE_BAD_IOVA)
58 		return;
59 
60 	/*
61 	 * Check if the memory chunk is mapped already. In that case, there's
62 	 * nothing left to do.
63 	 */
64 	nic_base = sfc_nic_dma_map(&sas->nic_dma_info, memhdr->iova,
65 				   memhdr->len);
66 	if (nic_base != RTE_BAD_IOVA)
67 		return;
68 
69 	rc = efx_nic_dma_config_add(sa->nic, memhdr->iova, memhdr->len,
70 				    &nic_base, &trgt_base, &map_len);
71 	if (rc != 0) {
72 		sfc_err(sa,
73 			"cannot handle memory buffer VA=%p IOVA=%" PRIx64 " length=0x%" PRIx64 ": %s",
74 			memhdr->addr, (uint64_t)memhdr->iova, memhdr->len,
75 			rte_strerror(rc));
76 		register_data->rc = rc;
77 		return;
78 	}
79 
80 	sfc_info(sa,
81 		 "registered memory buffer VA=%p IOVA=%" PRIx64 " length=0x%" PRIx64 " -> NIC_BASE=%" PRIx64 " TRGT_BASE=%" PRIx64 " MAP_LEN=%" PRIx64,
82 		 memhdr->addr, (uint64_t)memhdr->iova, memhdr->len,
83 		 (uint64_t)nic_base, (uint64_t)trgt_base, (uint64_t)map_len);
84 
85 	rc = sfc_nic_dma_add_region(&sas->nic_dma_info, nic_base, trgt_base,
86 				    map_len);
87 	if (rc != 0) {
88 		sfc_err(sa, "failed to add regioned NIC DMA mapping: %s",
89 			rte_strerror(rc));
90 		register_data->rc = rc;
91 	}
92 }
93 
94 static int
sfc_nic_dma_register_mempool(struct sfc_adapter * sa,struct rte_mempool * mp)95 sfc_nic_dma_register_mempool(struct sfc_adapter *sa, struct rte_mempool *mp)
96 {
97 	struct sfc_nic_dma_register_mempool_data register_data = {
98 		.sa = sa,
99 	};
100 	uint32_t iters;
101 	int result = 0;
102 	int rc;
103 
104 	SFC_ASSERT(sfc_adapter_is_locked(sa));
105 
106 	if (mp->flags & RTE_MEMPOOL_F_NON_IO)
107 		return 0;
108 
109 	iters = rte_mempool_mem_iter(mp, sfc_nic_dma_register_mempool_chunk,
110 				     &register_data);
111 	if (iters != mp->nb_mem_chunks) {
112 		sfc_err(sa,
113 			"failed to iterate over memory chunks, some mbufs may be unusable");
114 		result = EFAULT;
115 		/*
116 		 * Return an error, but try to continue if error is
117 		 * async and cannot be handled properly.
118 		 */
119 	}
120 
121 	if (register_data.rc != 0) {
122 		sfc_err(sa,
123 			"failed to map some memory chunks (%s), some mbufs may be unusable",
124 			rte_strerror(register_data.rc));
125 		result = register_data.rc;
126 		/* Try to continue */
127 	}
128 
129 	/*
130 	 * There is no point to apply mapping changes triggered by mempool
131 	 * registration. Configuration will be propagated on start and
132 	 * mbufs mapping is required in started state only.
133 	 */
134 	if (sa->state == SFC_ETHDEV_STARTED) {
135 		/*
136 		 * It's safe to reconfigure the DMA mapping even if no changes
137 		 * have been made during memory chunks iteration. In that case,
138 		 * this operation will not change anything either.
139 		 */
140 		rc = efx_nic_dma_reconfigure(sa->nic);
141 		if (rc != 0) {
142 			sfc_err(sa, "cannot reconfigure NIC DMA: %s",
143 				rte_strerror(rc));
144 			result = rc;
145 		}
146 	}
147 
148 	return result;
149 }
150 
151 static void
sfc_mempool_event_cb(enum rte_mempool_event event,struct rte_mempool * mp,void * user_data)152 sfc_mempool_event_cb(enum rte_mempool_event event, struct rte_mempool *mp,
153 		     void *user_data)
154 {
155 	struct sfc_adapter *sa = user_data;
156 
157 	if (event != RTE_MEMPOOL_EVENT_READY)
158 		return;
159 
160 	sfc_adapter_lock(sa);
161 
162 	(void)sfc_nic_dma_register_mempool(sa, mp);
163 
164 	sfc_adapter_unlock(sa);
165 }
166 
167 struct sfc_mempool_walk_data {
168 	struct sfc_adapter		*sa;
169 	int				rc;
170 };
171 
172 static void
sfc_mempool_walk_cb(struct rte_mempool * mp,void * arg)173 sfc_mempool_walk_cb(struct rte_mempool *mp, void *arg)
174 {
175 	struct sfc_mempool_walk_data *walk_data = arg;
176 	int rc;
177 
178 	rc = sfc_nic_dma_register_mempool(walk_data->sa, mp);
179 	if (rc != 0)
180 		walk_data->rc = rc;
181 }
182 
183 static int
sfc_nic_dma_attach_regioned(struct sfc_adapter * sa)184 sfc_nic_dma_attach_regioned(struct sfc_adapter *sa)
185 {
186 	struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
187 	struct sfc_mempool_walk_data walk_data = {
188 		.sa = sa,
189 	};
190 	int rc;
191 
192 	rc = rte_mempool_event_callback_register(sfc_mempool_event_cb, sa);
193 	if (rc != 0) {
194 		sfc_err(sa, "failed to register mempool event callback");
195 		rc = EFAULT;
196 		goto fail_mempool_event_callback_register;
197 	}
198 
199 	rte_mempool_walk(sfc_mempool_walk_cb, &walk_data);
200 	if (walk_data.rc != 0) {
201 		rc = walk_data.rc;
202 		goto fail_mempool_walk;
203 	}
204 
205 	return 0;
206 
207 fail_mempool_walk:
208 	rte_mempool_event_callback_unregister(sfc_mempool_event_cb, sa);
209 	sas->nic_dma_info.nb_regions = 0;
210 
211 fail_mempool_event_callback_register:
212 	return rc;
213 }
214 
215 static void
sfc_nic_dma_detach_regioned(struct sfc_adapter * sa)216 sfc_nic_dma_detach_regioned(struct sfc_adapter *sa)
217 {
218 	struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
219 
220 	rte_mempool_event_callback_unregister(sfc_mempool_event_cb, sa);
221 	sas->nic_dma_info.nb_regions = 0;
222 }
223 
224 int
sfc_nic_dma_attach(struct sfc_adapter * sa)225 sfc_nic_dma_attach(struct sfc_adapter *sa)
226 {
227 	const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
228 	int rc;
229 
230 	sfc_log_init(sa, "dma_mapping_type=%u", encp->enc_dma_mapping);
231 
232 	switch (encp->enc_dma_mapping) {
233 	case EFX_NIC_DMA_MAPPING_FLAT:
234 		/* No mapping required */
235 		rc = 0;
236 		break;
237 	case EFX_NIC_DMA_MAPPING_REGIONED:
238 		rc = sfc_nic_dma_attach_regioned(sa);
239 		break;
240 	default:
241 		rc = ENOTSUP;
242 		break;
243 	}
244 
245 	sfc_log_init(sa, "done: %s", rte_strerror(rc));
246 	return rc;
247 }
248 
249 void
sfc_nic_dma_detach(struct sfc_adapter * sa)250 sfc_nic_dma_detach(struct sfc_adapter *sa)
251 {
252 	const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
253 
254 	sfc_log_init(sa, "dma_mapping_type=%u", encp->enc_dma_mapping);
255 
256 	switch (encp->enc_dma_mapping) {
257 	case EFX_NIC_DMA_MAPPING_FLAT:
258 		/* Nothing to do here */
259 		break;
260 	case EFX_NIC_DMA_MAPPING_REGIONED:
261 		sfc_nic_dma_detach_regioned(sa);
262 		break;
263 	default:
264 		break;
265 	}
266 
267 	sfc_log_init(sa, "done");
268 }
269 
270 int
sfc_nic_dma_mz_map(struct sfc_adapter * sa,const struct rte_memzone * mz,efx_nic_dma_addr_type_t addr_type,efsys_dma_addr_t * dma_addr)271 sfc_nic_dma_mz_map(struct sfc_adapter *sa, const struct rte_memzone *mz,
272 		   efx_nic_dma_addr_type_t addr_type,
273 		   efsys_dma_addr_t *dma_addr)
274 {
275 	efsys_dma_addr_t nic_base;
276 	efsys_dma_addr_t trgt_base;
277 	size_t map_len;
278 	int rc;
279 
280 	/*
281 	 * Check if the memzone can be mapped already without changing the DMA
282 	 * configuration.
283 	 * libefx is used instead of the driver cache since it can take the type
284 	 * of the buffer into account and make a better decision when it comes
285 	 * to buffers that are mapped by the FW itself.
286 	 */
287 	rc = efx_nic_dma_map(sa->nic, addr_type, mz->iova, mz->len, dma_addr);
288 	if (rc == 0)
289 		return 0;
290 
291 	if (rc != ENOENT) {
292 		sfc_err(sa,
293 			"failed to map memory buffer VA=%p IOVA=%" PRIx64 " length=0x%" PRIx64 ": %s",
294 			mz->addr, (uint64_t)mz->iova, mz->len,
295 			rte_strerror(rc));
296 		return rc;
297 	}
298 
299 	rc = efx_nic_dma_config_add(sa->nic, mz->iova, mz->len,
300 				    &nic_base, &trgt_base, &map_len);
301 	if (rc != 0) {
302 		sfc_err(sa,
303 			"cannot handle memory buffer VA=%p IOVA=%" PRIx64 " length=0x%" PRIx64 ": %s",
304 			mz->addr, (uint64_t)mz->iova, mz->len,
305 			rte_strerror(rc));
306 		return EFAULT;
307 	}
308 
309 	rc = sfc_nic_dma_add_region(&sfc_sa2shared(sa)->nic_dma_info,
310 				    nic_base, trgt_base, map_len);
311 	if (rc != 0) {
312 		sfc_err(sa,
313 			"failed to add DMA region VA=%p IOVA=%" PRIx64 " length=0x%" PRIx64 ": %s",
314 			mz->addr, (uint64_t)mz->iova, mz->len,
315 			rte_strerror(rc));
316 		return rc;
317 	}
318 
319 	rc = efx_nic_dma_reconfigure(sa->nic);
320 	if (rc != 0) {
321 		sfc_err(sa, "failed to reconfigure DMA");
322 		return rc;
323 	}
324 
325 	rc = efx_nic_dma_map(sa->nic, addr_type, mz->iova, mz->len, dma_addr);
326 	if (rc != 0) {
327 		sfc_err(sa,
328 			"failed to map memory buffer VA=%p IOVA=%" PRIx64 " length=0x%" PRIx64 ": %s",
329 			mz->addr, (uint64_t)mz->iova, mz->len,
330 			rte_strerror(rc));
331 		return rc;
332 	}
333 
334 	return 0;
335 }
336