xref: /spdk/lib/dma/dma.c (revision 25a9ccb9892e398116f4168fc7eb2ae321650b11)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3  */
4 
5 #include "spdk/dma.h"
6 #include "spdk/log.h"
7 #include "spdk/util.h"
8 #include "spdk/likely.h"
9 
10 static pthread_mutex_t g_dma_mutex = PTHREAD_MUTEX_INITIALIZER;
11 static TAILQ_HEAD(, spdk_memory_domain) g_dma_memory_domains = TAILQ_HEAD_INITIALIZER(
12 			g_dma_memory_domains);
13 
14 struct spdk_memory_domain {
15 	enum spdk_dma_device_type type;
16 	spdk_memory_domain_pull_data_cb pull_cb;
17 	spdk_memory_domain_push_data_cb push_cb;
18 	spdk_memory_domain_translate_memory_cb translate_cb;
19 	spdk_memory_domain_invalidate_data_cb invalidate_cb;
20 	spdk_memory_domain_memzero_cb memzero_cb;
21 	TAILQ_ENTRY(spdk_memory_domain) link;
22 	struct spdk_memory_domain_ctx *ctx;
23 	char *id;
24 };
25 
26 static struct spdk_memory_domain g_system_domain = {
27 	.type = SPDK_DMA_DEVICE_TYPE_DMA,
28 	.id = "system",
29 };
30 
31 static void
32 __attribute__((constructor))
33 _memory_domain_register(void)
34 {
35 	TAILQ_INSERT_TAIL(&g_dma_memory_domains, &g_system_domain, link);
36 }
37 
38 struct spdk_memory_domain *
39 spdk_memory_domain_get_system_domain(void)
40 {
41 	return &g_system_domain;
42 }
43 
44 int
45 spdk_memory_domain_create(struct spdk_memory_domain **_domain, enum spdk_dma_device_type type,
46 			  struct spdk_memory_domain_ctx *ctx, const char *id)
47 {
48 	struct spdk_memory_domain *domain;
49 	size_t ctx_size;
50 
51 	if (!_domain) {
52 		return -EINVAL;
53 	}
54 
55 	if (ctx && ctx->size == 0) {
56 		SPDK_ERRLOG("Context size can't be 0\n");
57 		return -EINVAL;
58 	}
59 
60 	domain = calloc(1, sizeof(*domain));
61 	if (!domain) {
62 		SPDK_ERRLOG("Failed to allocate memory");
63 		return -ENOMEM;
64 	}
65 
66 	if (id) {
67 		domain->id = strdup(id);
68 		if (!domain->id) {
69 			SPDK_ERRLOG("Failed to allocate memory");
70 			free(domain);
71 			return -ENOMEM;
72 		}
73 	}
74 
75 	if (ctx) {
76 		domain->ctx = calloc(1, sizeof(*domain->ctx));
77 		if (!domain->ctx) {
78 			SPDK_ERRLOG("Failed to allocate memory");
79 			free(domain->id);
80 			free(domain);
81 			return -ENOMEM;
82 		}
83 
84 		ctx_size = spdk_min(sizeof(*domain->ctx), ctx->size);
85 		memcpy(domain->ctx, ctx, ctx_size);
86 		domain->ctx->size = ctx_size;
87 	}
88 
89 	domain->type = type;
90 
91 	pthread_mutex_lock(&g_dma_mutex);
92 	TAILQ_INSERT_TAIL(&g_dma_memory_domains, domain, link);
93 	pthread_mutex_unlock(&g_dma_mutex);
94 
95 	*_domain = domain;
96 
97 	return 0;
98 }
99 
100 void
101 spdk_memory_domain_set_translation(struct spdk_memory_domain *domain,
102 				   spdk_memory_domain_translate_memory_cb translate_cb)
103 {
104 	assert(domain);
105 
106 	domain->translate_cb = translate_cb;
107 }
108 
109 void
110 spdk_memory_domain_set_invalidate(struct spdk_memory_domain *domain,
111 				  spdk_memory_domain_invalidate_data_cb invalidate_cb)
112 {
113 	assert(domain);
114 
115 	domain->invalidate_cb = invalidate_cb;
116 }
117 
118 void
119 spdk_memory_domain_set_pull(struct spdk_memory_domain *domain,
120 			    spdk_memory_domain_pull_data_cb pull_cb)
121 {
122 	assert(domain);
123 
124 	domain->pull_cb = pull_cb;
125 }
126 
127 void
128 spdk_memory_domain_set_push(struct spdk_memory_domain *domain,
129 			    spdk_memory_domain_push_data_cb push_cb)
130 {
131 	assert(domain);
132 
133 	domain->push_cb = push_cb;
134 }
135 
136 void
137 spdk_memory_domain_set_memzero(struct spdk_memory_domain *domain,
138 			       spdk_memory_domain_memzero_cb memzero_cb)
139 {
140 	assert(domain);
141 
142 	domain->memzero_cb = memzero_cb;
143 }
144 
145 struct spdk_memory_domain_ctx *
146 spdk_memory_domain_get_context(struct spdk_memory_domain *domain)
147 {
148 	assert(domain);
149 
150 	return domain->ctx;
151 }
152 
153 /* We have to use the typedef in the function declaration to appease astyle. */
154 typedef enum spdk_dma_device_type spdk_dma_device_type_t;
155 
156 spdk_dma_device_type_t
157 spdk_memory_domain_get_dma_device_type(struct spdk_memory_domain *domain)
158 {
159 	assert(domain);
160 
161 	return domain->type;
162 }
163 
164 const char *
165 spdk_memory_domain_get_dma_device_id(struct spdk_memory_domain *domain)
166 {
167 	assert(domain);
168 
169 	return domain->id;
170 }
171 
172 void
173 spdk_memory_domain_destroy(struct spdk_memory_domain *domain)
174 {
175 	if (!domain) {
176 		return;
177 	}
178 
179 	assert(domain != &g_system_domain);
180 
181 	pthread_mutex_lock(&g_dma_mutex);
182 	TAILQ_REMOVE(&g_dma_memory_domains, domain, link);
183 	pthread_mutex_unlock(&g_dma_mutex);
184 
185 	free(domain->ctx);
186 	free(domain->id);
187 	free(domain);
188 }
189 
190 int
191 spdk_memory_domain_pull_data(struct spdk_memory_domain *src_domain, void *src_domain_ctx,
192 			     struct iovec *src_iov, uint32_t src_iov_cnt, struct iovec *dst_iov, uint32_t dst_iov_cnt,
193 			     spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg)
194 {
195 	assert(src_domain);
196 	assert(src_iov);
197 	assert(dst_iov);
198 
199 	if (spdk_unlikely(!src_domain->pull_cb)) {
200 		return -ENOTSUP;
201 	}
202 
203 	return src_domain->pull_cb(src_domain, src_domain_ctx, src_iov, src_iov_cnt, dst_iov, dst_iov_cnt,
204 				   cpl_cb, cpl_cb_arg);
205 }
206 
207 int
208 spdk_memory_domain_push_data(struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
209 			     struct iovec *dst_iov, uint32_t dst_iovcnt, struct iovec *src_iov, uint32_t src_iovcnt,
210 			     spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg)
211 {
212 	assert(dst_domain);
213 	assert(dst_iov);
214 	assert(src_iov);
215 
216 	if (spdk_unlikely(!dst_domain->push_cb)) {
217 		return -ENOTSUP;
218 	}
219 
220 	return dst_domain->push_cb(dst_domain, dst_domain_ctx, dst_iov, dst_iovcnt, src_iov, src_iovcnt,
221 				   cpl_cb, cpl_cb_arg);
222 }
223 
224 int
225 spdk_memory_domain_translate_data(struct spdk_memory_domain *src_domain, void *src_domain_ctx,
226 				  struct spdk_memory_domain *dst_domain, struct spdk_memory_domain_translation_ctx *dst_domain_ctx,
227 				  void *addr, size_t len, struct spdk_memory_domain_translation_result *result)
228 {
229 	assert(src_domain);
230 	assert(dst_domain);
231 	assert(result);
232 
233 	if (spdk_unlikely(!src_domain->translate_cb)) {
234 		return -ENOTSUP;
235 	}
236 
237 	return src_domain->translate_cb(src_domain, src_domain_ctx, dst_domain, dst_domain_ctx, addr, len,
238 					result);
239 }
240 
241 void
242 spdk_memory_domain_invalidate_data(struct spdk_memory_domain *domain, void *domain_ctx,
243 				   struct iovec *iov, uint32_t iovcnt)
244 {
245 	assert(domain);
246 
247 	if (spdk_unlikely(!domain->invalidate_cb)) {
248 		return;
249 	}
250 
251 	domain->invalidate_cb(domain, domain_ctx, iov, iovcnt);
252 }
253 
254 int
255 spdk_memory_domain_memzero(struct spdk_memory_domain *domain, void *domain_ctx, struct iovec *iov,
256 			   uint32_t iovcnt, spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg)
257 {
258 	assert(domain);
259 	assert(iov);
260 	assert(iovcnt);
261 
262 	if (spdk_unlikely(!domain->memzero_cb)) {
263 		return -ENOTSUP;
264 	}
265 
266 	return domain->memzero_cb(domain, domain_ctx, iov, iovcnt, cpl_cb, cpl_cb_arg);
267 }
268 
269 struct spdk_memory_domain *
270 spdk_memory_domain_get_first(const char *id)
271 {
272 	struct spdk_memory_domain *domain;
273 
274 	if (!id) {
275 		pthread_mutex_lock(&g_dma_mutex);
276 		domain = TAILQ_FIRST(&g_dma_memory_domains);
277 		pthread_mutex_unlock(&g_dma_mutex);
278 
279 		return domain;
280 	}
281 
282 	pthread_mutex_lock(&g_dma_mutex);
283 	TAILQ_FOREACH(domain, &g_dma_memory_domains, link) {
284 		if (!strcmp(domain->id, id)) {
285 			break;
286 		}
287 	}
288 	pthread_mutex_unlock(&g_dma_mutex);
289 
290 	return domain;
291 }
292 
293 struct spdk_memory_domain *
294 spdk_memory_domain_get_next(struct spdk_memory_domain *prev, const char *id)
295 {
296 	struct spdk_memory_domain *domain;
297 
298 	if (!prev) {
299 		return NULL;
300 	}
301 
302 	pthread_mutex_lock(&g_dma_mutex);
303 	domain = TAILQ_NEXT(prev, link);
304 	pthread_mutex_unlock(&g_dma_mutex);
305 
306 	if (!id || !domain) {
307 		return domain;
308 	}
309 
310 	pthread_mutex_lock(&g_dma_mutex);
311 	TAILQ_FOREACH_FROM(domain, &g_dma_memory_domains, link) {
312 		if (!strcmp(domain->id, id)) {
313 			break;
314 		}
315 	}
316 	pthread_mutex_unlock(&g_dma_mutex);
317 
318 	return domain;
319 }
320