xref: /spdk/lib/dma/dma.c (revision d4d015a572e1af7b2818e44218c1e661a61545ec)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3  */
4 
5 #include "spdk/dma.h"
6 #include "spdk/log.h"
7 #include "spdk/util.h"
8 #include "spdk/likely.h"
9 
10 static pthread_mutex_t g_dma_mutex = PTHREAD_MUTEX_INITIALIZER;
11 static TAILQ_HEAD(, spdk_memory_domain) g_dma_memory_domains = TAILQ_HEAD_INITIALIZER(
12 			g_dma_memory_domains);
13 
14 struct spdk_memory_domain {
15 	enum spdk_dma_device_type type;
16 	spdk_memory_domain_pull_data_cb pull_cb;
17 	spdk_memory_domain_push_data_cb push_cb;
18 	spdk_memory_domain_transfer_data_cb transfer_cb;
19 	spdk_memory_domain_translate_memory_cb translate_cb;
20 	spdk_memory_domain_invalidate_data_cb invalidate_cb;
21 	spdk_memory_domain_memzero_cb memzero_cb;
22 	TAILQ_ENTRY(spdk_memory_domain) link;
23 	struct spdk_memory_domain_ctx *ctx;
24 	char *id;
25 	size_t user_ctx_size;
26 	uint8_t user_ctx[];
27 };
28 
29 static struct spdk_memory_domain g_system_domain = {
30 	.type = SPDK_DMA_DEVICE_TYPE_DMA,
31 	.id = "system",
32 };
33 
34 static void
35 __attribute__((constructor))
36 _memory_domain_register(void)
37 {
38 	TAILQ_INSERT_TAIL(&g_dma_memory_domains, &g_system_domain, link);
39 }
40 
41 struct spdk_memory_domain *
42 spdk_memory_domain_get_system_domain(void)
43 {
44 	return &g_system_domain;
45 }
46 
47 int
48 spdk_memory_domain_create(struct spdk_memory_domain **_domain, enum spdk_dma_device_type type,
49 			  struct spdk_memory_domain_ctx *ctx, const char *id)
50 {
51 	struct spdk_memory_domain *domain;
52 	size_t ctx_size, user_ctx_size = 0;
53 
54 	if (!_domain) {
55 		return -EINVAL;
56 	}
57 
58 	if (ctx) {
59 		if (ctx->size == 0) {
60 			SPDK_ERRLOG("Context size can't be 0\n");
61 			return -EINVAL;
62 		}
63 		if (ctx->user_ctx &&
64 		    offsetof(struct spdk_memory_domain_ctx, user_ctx_size) + sizeof(ctx->user_ctx_size) <= ctx->size) {
65 			user_ctx_size = ctx->user_ctx_size;
66 		}
67 	}
68 
69 	domain = calloc(1, sizeof(*domain) + user_ctx_size);
70 	if (!domain) {
71 		SPDK_ERRLOG("Failed to allocate memory");
72 		return -ENOMEM;
73 	}
74 
75 	if (id) {
76 		domain->id = strdup(id);
77 		if (!domain->id) {
78 			SPDK_ERRLOG("Failed to allocate memory");
79 			free(domain);
80 			return -ENOMEM;
81 		}
82 	}
83 
84 	if (ctx) {
85 		domain->ctx = calloc(1, sizeof(*domain->ctx));
86 		if (!domain->ctx) {
87 			SPDK_ERRLOG("Failed to allocate memory");
88 			free(domain->id);
89 			free(domain);
90 			return -ENOMEM;
91 		}
92 
93 		ctx_size = spdk_min(sizeof(*domain->ctx), ctx->size);
94 		memcpy(domain->ctx, ctx, ctx_size);
95 		domain->ctx->size = ctx_size;
96 	}
97 
98 	if (user_ctx_size) {
99 		assert(ctx);
100 		memcpy(domain->user_ctx, ctx->user_ctx, user_ctx_size);
101 		domain->user_ctx_size = user_ctx_size;
102 	}
103 
104 	domain->type = type;
105 
106 	pthread_mutex_lock(&g_dma_mutex);
107 	TAILQ_INSERT_TAIL(&g_dma_memory_domains, domain, link);
108 	pthread_mutex_unlock(&g_dma_mutex);
109 
110 	*_domain = domain;
111 
112 	return 0;
113 }
114 
115 void
116 spdk_memory_domain_set_translation(struct spdk_memory_domain *domain,
117 				   spdk_memory_domain_translate_memory_cb translate_cb)
118 {
119 	assert(domain);
120 
121 	domain->translate_cb = translate_cb;
122 }
123 
124 void
125 spdk_memory_domain_set_invalidate(struct spdk_memory_domain *domain,
126 				  spdk_memory_domain_invalidate_data_cb invalidate_cb)
127 {
128 	assert(domain);
129 
130 	domain->invalidate_cb = invalidate_cb;
131 }
132 
133 void
134 spdk_memory_domain_set_pull(struct spdk_memory_domain *domain,
135 			    spdk_memory_domain_pull_data_cb pull_cb)
136 {
137 	assert(domain);
138 
139 	domain->pull_cb = pull_cb;
140 }
141 
142 void
143 spdk_memory_domain_set_push(struct spdk_memory_domain *domain,
144 			    spdk_memory_domain_push_data_cb push_cb)
145 {
146 	assert(domain);
147 
148 	domain->push_cb = push_cb;
149 }
150 
151 void
152 spdk_memory_domain_set_data_transfer(struct spdk_memory_domain *domain,
153 				     spdk_memory_domain_transfer_data_cb transfer_cb)
154 {
155 	assert(domain);
156 
157 	domain->transfer_cb = transfer_cb;
158 }
159 
160 void
161 spdk_memory_domain_set_memzero(struct spdk_memory_domain *domain,
162 			       spdk_memory_domain_memzero_cb memzero_cb)
163 {
164 	assert(domain);
165 
166 	domain->memzero_cb = memzero_cb;
167 }
168 
169 struct spdk_memory_domain_ctx *
170 spdk_memory_domain_get_context(struct spdk_memory_domain *domain)
171 {
172 	assert(domain);
173 
174 	return domain->ctx;
175 }
176 
177 void *
178 spdk_memory_domain_get_user_context(struct spdk_memory_domain *domain, size_t *ctx_size)
179 {
180 	assert(domain);
181 
182 	if (!domain->user_ctx_size) {
183 		return NULL;
184 	}
185 
186 	*ctx_size = domain->user_ctx_size;
187 	return domain->user_ctx;
188 }
189 
190 /* We have to use the typedef in the function declaration to appease astyle. */
191 typedef enum spdk_dma_device_type spdk_dma_device_type_t;
192 
193 spdk_dma_device_type_t
194 spdk_memory_domain_get_dma_device_type(struct spdk_memory_domain *domain)
195 {
196 	assert(domain);
197 
198 	return domain->type;
199 }
200 
201 const char *
202 spdk_memory_domain_get_dma_device_id(struct spdk_memory_domain *domain)
203 {
204 	assert(domain);
205 
206 	return domain->id;
207 }
208 
209 void
210 spdk_memory_domain_destroy(struct spdk_memory_domain *domain)
211 {
212 	if (!domain) {
213 		return;
214 	}
215 
216 	assert(domain != &g_system_domain);
217 
218 	pthread_mutex_lock(&g_dma_mutex);
219 	TAILQ_REMOVE(&g_dma_memory_domains, domain, link);
220 	pthread_mutex_unlock(&g_dma_mutex);
221 
222 	free(domain->ctx);
223 	free(domain->id);
224 	free(domain);
225 }
226 
227 int
228 spdk_memory_domain_pull_data(struct spdk_memory_domain *src_domain, void *src_domain_ctx,
229 			     struct iovec *src_iov, uint32_t src_iov_cnt, struct iovec *dst_iov, uint32_t dst_iov_cnt,
230 			     spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg)
231 {
232 	assert(src_domain);
233 	assert(src_iov);
234 	assert(dst_iov);
235 
236 	if (spdk_unlikely(!src_domain->pull_cb)) {
237 		return -ENOTSUP;
238 	}
239 
240 	return src_domain->pull_cb(src_domain, src_domain_ctx, src_iov, src_iov_cnt, dst_iov, dst_iov_cnt,
241 				   cpl_cb, cpl_cb_arg);
242 }
243 
244 int
245 spdk_memory_domain_push_data(struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
246 			     struct iovec *dst_iov, uint32_t dst_iovcnt, struct iovec *src_iov, uint32_t src_iovcnt,
247 			     spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg)
248 {
249 	assert(dst_domain);
250 	assert(dst_iov);
251 	assert(src_iov);
252 
253 	if (spdk_unlikely(!dst_domain->push_cb)) {
254 		return -ENOTSUP;
255 	}
256 
257 	return dst_domain->push_cb(dst_domain, dst_domain_ctx, dst_iov, dst_iovcnt, src_iov, src_iovcnt,
258 				   cpl_cb, cpl_cb_arg);
259 }
260 
261 int
262 spdk_memory_domain_transfer_data(struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
263 				 struct iovec *dst_iov, uint32_t dst_iovcnt,
264 				 struct spdk_memory_domain *src_domain, void *src_domain_ctx,
265 				 struct iovec *src_iov, uint32_t src_iovcnt,
266 				 struct spdk_memory_domain_translation_result *src_translation,
267 				 spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg)
268 {
269 	assert(dst_domain);
270 	assert(dst_iov);
271 	assert(src_iov);
272 
273 	if (spdk_unlikely(!dst_domain->transfer_cb)) {
274 		return -ENOTSUP;
275 	}
276 
277 	return dst_domain->transfer_cb(dst_domain, dst_domain_ctx, dst_iov, dst_iovcnt, src_domain,
278 				       src_domain_ctx, src_iov, src_iovcnt,
279 				       src_translation, cpl_cb, cpl_cb_arg);
280 }
281 
282 int
283 spdk_memory_domain_translate_data(struct spdk_memory_domain *src_domain, void *src_domain_ctx,
284 				  struct spdk_memory_domain *dst_domain, struct spdk_memory_domain_translation_ctx *dst_domain_ctx,
285 				  void *addr, size_t len, struct spdk_memory_domain_translation_result *result)
286 {
287 	assert(src_domain);
288 	assert(dst_domain);
289 	assert(result);
290 
291 	if (spdk_unlikely(!src_domain->translate_cb)) {
292 		return -ENOTSUP;
293 	}
294 
295 	return src_domain->translate_cb(src_domain, src_domain_ctx, dst_domain, dst_domain_ctx, addr, len,
296 					result);
297 }
298 
299 void
300 spdk_memory_domain_invalidate_data(struct spdk_memory_domain *domain, void *domain_ctx,
301 				   struct iovec *iov, uint32_t iovcnt)
302 {
303 	assert(domain);
304 
305 	if (spdk_unlikely(!domain->invalidate_cb)) {
306 		return;
307 	}
308 
309 	domain->invalidate_cb(domain, domain_ctx, iov, iovcnt);
310 }
311 
312 int
313 spdk_memory_domain_memzero(struct spdk_memory_domain *domain, void *domain_ctx, struct iovec *iov,
314 			   uint32_t iovcnt, spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg)
315 {
316 	assert(domain);
317 	assert(iov);
318 	assert(iovcnt);
319 
320 	if (spdk_unlikely(!domain->memzero_cb)) {
321 		return -ENOTSUP;
322 	}
323 
324 	return domain->memzero_cb(domain, domain_ctx, iov, iovcnt, cpl_cb, cpl_cb_arg);
325 }
326 
327 struct spdk_memory_domain *
328 spdk_memory_domain_get_first(const char *id)
329 {
330 	struct spdk_memory_domain *domain;
331 
332 	if (!id) {
333 		pthread_mutex_lock(&g_dma_mutex);
334 		domain = TAILQ_FIRST(&g_dma_memory_domains);
335 		pthread_mutex_unlock(&g_dma_mutex);
336 
337 		return domain;
338 	}
339 
340 	pthread_mutex_lock(&g_dma_mutex);
341 	TAILQ_FOREACH(domain, &g_dma_memory_domains, link) {
342 		if (!strcmp(domain->id, id)) {
343 			break;
344 		}
345 	}
346 	pthread_mutex_unlock(&g_dma_mutex);
347 
348 	return domain;
349 }
350 
351 struct spdk_memory_domain *
352 spdk_memory_domain_get_next(struct spdk_memory_domain *prev, const char *id)
353 {
354 	struct spdk_memory_domain *domain;
355 
356 	if (!prev) {
357 		return NULL;
358 	}
359 
360 	pthread_mutex_lock(&g_dma_mutex);
361 	domain = TAILQ_NEXT(prev, link);
362 	pthread_mutex_unlock(&g_dma_mutex);
363 
364 	if (!id || !domain) {
365 		return domain;
366 	}
367 
368 	pthread_mutex_lock(&g_dma_mutex);
369 	TAILQ_FOREACH_FROM(domain, &g_dma_memory_domains, link) {
370 		if (!strcmp(domain->id, id)) {
371 			break;
372 		}
373 	}
374 	pthread_mutex_unlock(&g_dma_mutex);
375 
376 	return domain;
377 }
378