xref: /spdk/lib/dma/dma.c (revision 8e9bf1815df2455d994df622f5b43078193b4a84)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of Nvidia Corporation nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include "spdk/dma.h"
34 #include "spdk/log.h"
35 #include "spdk/util.h"
36 #include "spdk/likely.h"
37 
38 pthread_mutex_t g_dma_mutex = PTHREAD_MUTEX_INITIALIZER;
39 TAILQ_HEAD(, spdk_memory_domain) g_dma_memory_domains = TAILQ_HEAD_INITIALIZER(
40 			g_dma_memory_domains);
41 
42 struct spdk_memory_domain {
43 	enum spdk_dma_device_type type;
44 	spdk_memory_domain_pull_data_cb pull_cb;
45 	spdk_memory_domain_push_data_cb push_cb;
46 	spdk_memory_domain_translate_memory_cb translate_cb;
47 	TAILQ_ENTRY(spdk_memory_domain) link;
48 	struct spdk_memory_domain_ctx *ctx;
49 	char *id;
50 };
51 
52 int
53 spdk_memory_domain_create(struct spdk_memory_domain **_domain, enum spdk_dma_device_type type,
54 			  struct spdk_memory_domain_ctx *ctx, const char *id)
55 {
56 	struct spdk_memory_domain *domain;
57 	size_t ctx_size;
58 
59 	if (!_domain) {
60 		return -EINVAL;
61 	}
62 
63 	if (ctx && ctx->size == 0) {
64 		SPDK_ERRLOG("Context size can't be 0\n");
65 		return -EINVAL;
66 	}
67 
68 	domain = calloc(1, sizeof(*domain));
69 	if (!domain) {
70 		SPDK_ERRLOG("Failed to allocate memory");
71 		return -ENOMEM;
72 	}
73 
74 	if (id) {
75 		domain->id = strdup(id);
76 		if (!domain->id) {
77 			SPDK_ERRLOG("Failed to allocate memory");
78 			free(domain);
79 			return -ENOMEM;
80 		}
81 	}
82 
83 	if (ctx) {
84 		domain->ctx = calloc(1, sizeof(*domain->ctx));
85 		if (!domain->ctx) {
86 			SPDK_ERRLOG("Failed to allocate memory");
87 			free(domain->id);
88 			free(domain);
89 			return -ENOMEM;
90 		}
91 
92 		ctx_size = spdk_min(sizeof(*domain->ctx), ctx->size);
93 		memcpy(domain->ctx, ctx, ctx_size);
94 		domain->ctx->size = ctx_size;
95 	}
96 
97 	domain->type = type;
98 
99 	pthread_mutex_lock(&g_dma_mutex);
100 	TAILQ_INSERT_TAIL(&g_dma_memory_domains, domain, link);
101 	pthread_mutex_unlock(&g_dma_mutex);
102 
103 	*_domain = domain;
104 
105 	return 0;
106 }
107 
108 void
109 spdk_memory_domain_set_translation(struct spdk_memory_domain *domain,
110 				   spdk_memory_domain_translate_memory_cb translate_cb)
111 {
112 	if (!domain) {
113 		return;
114 	}
115 
116 	domain->translate_cb = translate_cb;
117 }
118 
119 void
120 spdk_memory_domain_set_pull(struct spdk_memory_domain *domain,
121 			    spdk_memory_domain_pull_data_cb pull_cb)
122 {
123 	if (!domain) {
124 		return;
125 	}
126 
127 	domain->pull_cb = pull_cb;
128 }
129 
130 void
131 spdk_memory_domain_set_push(struct spdk_memory_domain *domain,
132 			    spdk_memory_domain_push_data_cb push_cb)
133 {
134 	if (!domain) {
135 		return;
136 	}
137 
138 	domain->push_cb = push_cb;
139 }
140 
141 struct spdk_memory_domain_ctx *
142 spdk_memory_domain_get_context(struct spdk_memory_domain *domain)
143 {
144 	assert(domain);
145 
146 	return domain->ctx;
147 }
148 
149 enum spdk_dma_device_type spdk_memory_domain_get_dma_device_type(struct spdk_memory_domain *domain)
150 {
151 	assert(domain);
152 
153 	return domain->type;
154 }
155 
156 const char *
157 spdk_memory_domain_get_dma_device_id(struct spdk_memory_domain *domain)
158 {
159 	assert(domain);
160 
161 	return domain->id;
162 }
163 
164 void
165 spdk_memory_domain_destroy(struct spdk_memory_domain *domain)
166 {
167 	if (!domain) {
168 		return;
169 	}
170 
171 	pthread_mutex_lock(&g_dma_mutex);
172 	TAILQ_REMOVE(&g_dma_memory_domains, domain, link);
173 	pthread_mutex_unlock(&g_dma_mutex);
174 
175 	free(domain->ctx);
176 	free(domain->id);
177 	free(domain);
178 }
179 
180 int
181 spdk_memory_domain_pull_data(struct spdk_memory_domain *src_domain, void *src_domain_ctx,
182 			     struct iovec *src_iov, uint32_t src_iov_cnt, struct iovec *dst_iov, uint32_t dst_iov_cnt,
183 			     spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg)
184 {
185 	assert(src_domain);
186 	assert(src_iov);
187 	assert(dst_iov);
188 
189 	if (spdk_unlikely(!src_domain->pull_cb)) {
190 		return -ENOTSUP;
191 	}
192 
193 	return src_domain->pull_cb(src_domain, src_domain_ctx, src_iov, src_iov_cnt, dst_iov, dst_iov_cnt,
194 				   cpl_cb, cpl_cb_arg);
195 }
196 
197 int
198 spdk_memory_domain_push_data(struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
199 			     struct iovec *dst_iov, uint32_t dst_iovcnt, struct iovec *src_iov, uint32_t src_iovcnt,
200 			     spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg)
201 {
202 	assert(dst_domain);
203 	assert(dst_iov);
204 	assert(src_iov);
205 
206 	if (spdk_unlikely(!dst_domain->push_cb)) {
207 		return -ENOTSUP;
208 	}
209 
210 	return dst_domain->push_cb(dst_domain, dst_domain_ctx, dst_iov, dst_iovcnt, src_iov, src_iovcnt,
211 				   cpl_cb, cpl_cb_arg);
212 }
213 
214 int
215 spdk_memory_domain_translate_data(struct spdk_memory_domain *src_domain, void *src_domain_ctx,
216 				  struct spdk_memory_domain *dst_domain, struct spdk_memory_domain_translation_ctx *dst_domain_ctx,
217 				  void *addr, size_t len, struct spdk_memory_domain_translation_result *result)
218 {
219 	assert(src_domain);
220 	assert(dst_domain);
221 	assert(result);
222 
223 	if (spdk_unlikely(!src_domain->translate_cb)) {
224 		return -ENOTSUP;
225 	}
226 
227 	return src_domain->translate_cb(src_domain, src_domain_ctx, dst_domain, dst_domain_ctx, addr, len,
228 					result);
229 }
230 
231 struct spdk_memory_domain *
232 spdk_memory_domain_get_first(const char *id)
233 {
234 	struct spdk_memory_domain *domain;
235 
236 	if (!id) {
237 		pthread_mutex_lock(&g_dma_mutex);
238 		domain = TAILQ_FIRST(&g_dma_memory_domains);
239 		pthread_mutex_unlock(&g_dma_mutex);
240 
241 		return domain;
242 	}
243 
244 	pthread_mutex_lock(&g_dma_mutex);
245 	TAILQ_FOREACH(domain, &g_dma_memory_domains, link) {
246 		if (!strcmp(domain->id, id)) {
247 			break;
248 		}
249 	}
250 	pthread_mutex_unlock(&g_dma_mutex);
251 
252 	return domain;
253 }
254 
255 struct spdk_memory_domain *
256 spdk_memory_domain_get_next(struct spdk_memory_domain *prev, const char *id)
257 {
258 	struct spdk_memory_domain *domain;
259 
260 	if (!prev) {
261 		return NULL;
262 	}
263 
264 	pthread_mutex_lock(&g_dma_mutex);
265 	domain = TAILQ_NEXT(prev, link);
266 	pthread_mutex_unlock(&g_dma_mutex);
267 
268 	if (!id || !domain) {
269 		return domain;
270 	}
271 
272 	pthread_mutex_lock(&g_dma_mutex);
273 	TAILQ_FOREACH_FROM(domain, &g_dma_memory_domains, link) {
274 		if (!strcmp(domain->id, id)) {
275 			break;
276 		}
277 	}
278 	pthread_mutex_unlock(&g_dma_mutex);
279 
280 	return domain;
281 }
282