xref: /spdk/lib/dma/dma.c (revision 927f1fd57bd004df581518466ec4c1b8083e5d23)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of Nvidia Corporation nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include "spdk/dma.h"
34 #include "spdk/log.h"
35 #include "spdk/util.h"
36 #include "spdk/likely.h"
37 
38 pthread_mutex_t g_dma_mutex = PTHREAD_MUTEX_INITIALIZER;
39 TAILQ_HEAD(, spdk_memory_domain) g_dma_memory_domains = TAILQ_HEAD_INITIALIZER(
40 			g_dma_memory_domains);
41 
42 struct spdk_memory_domain {
43 	enum spdk_dma_device_type type;
44 	spdk_memory_domain_pull_data_cb pull_cb;
45 	spdk_memory_domain_push_data_cb push_cb;
46 	spdk_memory_domain_translate_memory_cb translate_cb;
47 	spdk_memory_domain_memzero_cb memzero_cb;
48 	TAILQ_ENTRY(spdk_memory_domain) link;
49 	struct spdk_memory_domain_ctx *ctx;
50 	char *id;
51 };
52 
53 int
54 spdk_memory_domain_create(struct spdk_memory_domain **_domain, enum spdk_dma_device_type type,
55 			  struct spdk_memory_domain_ctx *ctx, const char *id)
56 {
57 	struct spdk_memory_domain *domain;
58 	size_t ctx_size;
59 
60 	if (!_domain) {
61 		return -EINVAL;
62 	}
63 
64 	if (ctx && ctx->size == 0) {
65 		SPDK_ERRLOG("Context size can't be 0\n");
66 		return -EINVAL;
67 	}
68 
69 	domain = calloc(1, sizeof(*domain));
70 	if (!domain) {
71 		SPDK_ERRLOG("Failed to allocate memory");
72 		return -ENOMEM;
73 	}
74 
75 	if (id) {
76 		domain->id = strdup(id);
77 		if (!domain->id) {
78 			SPDK_ERRLOG("Failed to allocate memory");
79 			free(domain);
80 			return -ENOMEM;
81 		}
82 	}
83 
84 	if (ctx) {
85 		domain->ctx = calloc(1, sizeof(*domain->ctx));
86 		if (!domain->ctx) {
87 			SPDK_ERRLOG("Failed to allocate memory");
88 			free(domain->id);
89 			free(domain);
90 			return -ENOMEM;
91 		}
92 
93 		ctx_size = spdk_min(sizeof(*domain->ctx), ctx->size);
94 		memcpy(domain->ctx, ctx, ctx_size);
95 		domain->ctx->size = ctx_size;
96 	}
97 
98 	domain->type = type;
99 
100 	pthread_mutex_lock(&g_dma_mutex);
101 	TAILQ_INSERT_TAIL(&g_dma_memory_domains, domain, link);
102 	pthread_mutex_unlock(&g_dma_mutex);
103 
104 	*_domain = domain;
105 
106 	return 0;
107 }
108 
109 void
110 spdk_memory_domain_set_translation(struct spdk_memory_domain *domain,
111 				   spdk_memory_domain_translate_memory_cb translate_cb)
112 {
113 	if (!domain) {
114 		return;
115 	}
116 
117 	domain->translate_cb = translate_cb;
118 }
119 
120 void
121 spdk_memory_domain_set_pull(struct spdk_memory_domain *domain,
122 			    spdk_memory_domain_pull_data_cb pull_cb)
123 {
124 	if (!domain) {
125 		return;
126 	}
127 
128 	domain->pull_cb = pull_cb;
129 }
130 
131 void
132 spdk_memory_domain_set_push(struct spdk_memory_domain *domain,
133 			    spdk_memory_domain_push_data_cb push_cb)
134 {
135 	if (!domain) {
136 		return;
137 	}
138 
139 	domain->push_cb = push_cb;
140 }
141 
142 void
143 spdk_memory_domain_set_memzero(struct spdk_memory_domain *domain,
144 			       spdk_memory_domain_memzero_cb memzero_cb)
145 {
146 	if (!domain) {
147 		return;
148 	}
149 
150 	domain->memzero_cb = memzero_cb;
151 }
152 
153 struct spdk_memory_domain_ctx *
154 spdk_memory_domain_get_context(struct spdk_memory_domain *domain)
155 {
156 	assert(domain);
157 
158 	return domain->ctx;
159 }
160 
161 enum spdk_dma_device_type spdk_memory_domain_get_dma_device_type(struct spdk_memory_domain *domain)
162 {
163 	assert(domain);
164 
165 	return domain->type;
166 }
167 
168 const char *
169 spdk_memory_domain_get_dma_device_id(struct spdk_memory_domain *domain)
170 {
171 	assert(domain);
172 
173 	return domain->id;
174 }
175 
176 void
177 spdk_memory_domain_destroy(struct spdk_memory_domain *domain)
178 {
179 	if (!domain) {
180 		return;
181 	}
182 
183 	pthread_mutex_lock(&g_dma_mutex);
184 	TAILQ_REMOVE(&g_dma_memory_domains, domain, link);
185 	pthread_mutex_unlock(&g_dma_mutex);
186 
187 	free(domain->ctx);
188 	free(domain->id);
189 	free(domain);
190 }
191 
192 int
193 spdk_memory_domain_pull_data(struct spdk_memory_domain *src_domain, void *src_domain_ctx,
194 			     struct iovec *src_iov, uint32_t src_iov_cnt, struct iovec *dst_iov, uint32_t dst_iov_cnt,
195 			     spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg)
196 {
197 	assert(src_domain);
198 	assert(src_iov);
199 	assert(dst_iov);
200 
201 	if (spdk_unlikely(!src_domain->pull_cb)) {
202 		return -ENOTSUP;
203 	}
204 
205 	return src_domain->pull_cb(src_domain, src_domain_ctx, src_iov, src_iov_cnt, dst_iov, dst_iov_cnt,
206 				   cpl_cb, cpl_cb_arg);
207 }
208 
209 int
210 spdk_memory_domain_push_data(struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
211 			     struct iovec *dst_iov, uint32_t dst_iovcnt, struct iovec *src_iov, uint32_t src_iovcnt,
212 			     spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg)
213 {
214 	assert(dst_domain);
215 	assert(dst_iov);
216 	assert(src_iov);
217 
218 	if (spdk_unlikely(!dst_domain->push_cb)) {
219 		return -ENOTSUP;
220 	}
221 
222 	return dst_domain->push_cb(dst_domain, dst_domain_ctx, dst_iov, dst_iovcnt, src_iov, src_iovcnt,
223 				   cpl_cb, cpl_cb_arg);
224 }
225 
226 int
227 spdk_memory_domain_translate_data(struct spdk_memory_domain *src_domain, void *src_domain_ctx,
228 				  struct spdk_memory_domain *dst_domain, struct spdk_memory_domain_translation_ctx *dst_domain_ctx,
229 				  void *addr, size_t len, struct spdk_memory_domain_translation_result *result)
230 {
231 	assert(src_domain);
232 	assert(dst_domain);
233 	assert(result);
234 
235 	if (spdk_unlikely(!src_domain->translate_cb)) {
236 		return -ENOTSUP;
237 	}
238 
239 	return src_domain->translate_cb(src_domain, src_domain_ctx, dst_domain, dst_domain_ctx, addr, len,
240 					result);
241 }
242 
243 int
244 spdk_memory_domain_memzero(struct spdk_memory_domain *domain, void *domain_ctx, struct iovec *iov,
245 			   uint32_t iovcnt, spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg)
246 {
247 	assert(domain);
248 	assert(iov);
249 	assert(iovcnt);
250 
251 	if (spdk_unlikely(!domain->memzero_cb)) {
252 		return -ENOTSUP;
253 	}
254 
255 	return domain->memzero_cb(domain, domain_ctx, iov, iovcnt, cpl_cb, cpl_cb_arg);
256 }
257 
258 struct spdk_memory_domain *
259 spdk_memory_domain_get_first(const char *id)
260 {
261 	struct spdk_memory_domain *domain;
262 
263 	if (!id) {
264 		pthread_mutex_lock(&g_dma_mutex);
265 		domain = TAILQ_FIRST(&g_dma_memory_domains);
266 		pthread_mutex_unlock(&g_dma_mutex);
267 
268 		return domain;
269 	}
270 
271 	pthread_mutex_lock(&g_dma_mutex);
272 	TAILQ_FOREACH(domain, &g_dma_memory_domains, link) {
273 		if (!strcmp(domain->id, id)) {
274 			break;
275 		}
276 	}
277 	pthread_mutex_unlock(&g_dma_mutex);
278 
279 	return domain;
280 }
281 
282 struct spdk_memory_domain *
283 spdk_memory_domain_get_next(struct spdk_memory_domain *prev, const char *id)
284 {
285 	struct spdk_memory_domain *domain;
286 
287 	if (!prev) {
288 		return NULL;
289 	}
290 
291 	pthread_mutex_lock(&g_dma_mutex);
292 	domain = TAILQ_NEXT(prev, link);
293 	pthread_mutex_unlock(&g_dma_mutex);
294 
295 	if (!id || !domain) {
296 		return domain;
297 	}
298 
299 	pthread_mutex_lock(&g_dma_mutex);
300 	TAILQ_FOREACH_FROM(domain, &g_dma_memory_domains, link) {
301 		if (!strcmp(domain->id, id)) {
302 			break;
303 		}
304 	}
305 	pthread_mutex_unlock(&g_dma_mutex);
306 
307 	return domain;
308 }
309