xref: /spdk/lib/dma/dma.c (revision 9efad7468f30e1c5f7442823f5a8b17acd1e6a9b)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of Nvidia Corporation nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include "spdk/dma.h"
34 #include "spdk/log.h"
35 #include "spdk/util.h"
36 #include "spdk/likely.h"
37 
38 pthread_mutex_t g_dma_mutex = PTHREAD_MUTEX_INITIALIZER;
39 TAILQ_HEAD(, spdk_memory_domain) g_dma_memory_domains = TAILQ_HEAD_INITIALIZER(
40 			g_dma_memory_domains);
41 
42 struct spdk_memory_domain {
43 	enum spdk_dma_device_type type;
44 	spdk_memory_domain_pull_data_cb pull_cb;
45 	spdk_memory_domain_translate_memory_cb translate_cb;
46 	TAILQ_ENTRY(spdk_memory_domain) link;
47 	struct spdk_memory_domain_ctx *ctx;
48 	char *id;
49 };
50 
51 int
52 spdk_memory_domain_create(struct spdk_memory_domain **_domain, enum spdk_dma_device_type type,
53 			  struct spdk_memory_domain_ctx *ctx, const char *id)
54 {
55 	struct spdk_memory_domain *domain;
56 	size_t ctx_size;
57 
58 	if (!_domain) {
59 		return -EINVAL;
60 	}
61 
62 	if (ctx && ctx->size == 0) {
63 		SPDK_ERRLOG("Context size can't be 0\n");
64 		return -EINVAL;
65 	}
66 
67 	domain = calloc(1, sizeof(*domain));
68 	if (!domain) {
69 		SPDK_ERRLOG("Failed to allocate memory");
70 		return -ENOMEM;
71 	}
72 
73 	if (id) {
74 		domain->id = strdup(id);
75 		if (!domain->id) {
76 			SPDK_ERRLOG("Failed to allocate memory");
77 			free(domain);
78 			return -ENOMEM;
79 		}
80 	}
81 
82 	if (ctx) {
83 		domain->ctx = calloc(1, sizeof(*domain->ctx));
84 		if (!domain->ctx) {
85 			SPDK_ERRLOG("Failed to allocate memory");
86 			free(domain->id);
87 			free(domain);
88 			return -ENOMEM;
89 		}
90 
91 		ctx_size = spdk_min(sizeof(*domain->ctx), ctx->size);
92 		memcpy(domain->ctx, ctx, ctx_size);
93 		domain->ctx->size = ctx_size;
94 	}
95 
96 	domain->type = type;
97 
98 	pthread_mutex_lock(&g_dma_mutex);
99 	TAILQ_INSERT_TAIL(&g_dma_memory_domains, domain, link);
100 	pthread_mutex_unlock(&g_dma_mutex);
101 
102 	*_domain = domain;
103 
104 	return 0;
105 }
106 
107 void
108 spdk_memory_domain_set_translation(struct spdk_memory_domain *domain,
109 				   spdk_memory_domain_translate_memory_cb translate_cb)
110 {
111 	if (!domain) {
112 		return;
113 	}
114 
115 	domain->translate_cb = translate_cb;
116 }
117 
118 void
119 spdk_memory_domain_set_pull(struct spdk_memory_domain *domain,
120 			    spdk_memory_domain_pull_data_cb pull_cb)
121 {
122 	if (!domain) {
123 		return;
124 	}
125 
126 	domain->pull_cb = pull_cb;
127 }
128 
129 struct spdk_memory_domain_ctx *
130 spdk_memory_domain_get_context(struct spdk_memory_domain *domain)
131 {
132 	assert(domain);
133 
134 	return domain->ctx;
135 }
136 
137 enum spdk_dma_device_type spdk_memory_domain_get_dma_device_type(struct spdk_memory_domain *domain)
138 {
139 	assert(domain);
140 
141 	return domain->type;
142 }
143 
144 const char *
145 spdk_memory_domain_get_dma_device_id(struct spdk_memory_domain *domain)
146 {
147 	assert(domain);
148 
149 	return domain->id;
150 }
151 
152 void
153 spdk_memory_domain_destroy(struct spdk_memory_domain *domain)
154 {
155 	if (!domain) {
156 		return;
157 	}
158 
159 	pthread_mutex_lock(&g_dma_mutex);
160 	TAILQ_REMOVE(&g_dma_memory_domains, domain, link);
161 	pthread_mutex_unlock(&g_dma_mutex);
162 
163 	free(domain->ctx);
164 	free(domain->id);
165 	free(domain);
166 }
167 
168 int
169 spdk_memory_domain_pull_data(struct spdk_memory_domain *src_domain, void *src_domain_ctx,
170 			     struct iovec *src_iov, uint32_t src_iov_cnt, struct iovec *dst_iov, uint32_t dst_iov_cnt,
171 			     spdk_memory_domain_pull_data_cpl_cb cpl_cb, void *cpl_cb_arg)
172 {
173 	assert(src_domain);
174 	assert(src_iov);
175 	assert(dst_iov);
176 
177 	if (spdk_unlikely(!src_domain->pull_cb)) {
178 		return -ENOTSUP;
179 	}
180 
181 	return src_domain->pull_cb(src_domain, src_domain_ctx, src_iov, src_iov_cnt, dst_iov, dst_iov_cnt,
182 				   cpl_cb, cpl_cb_arg);
183 }
184 
185 int
186 spdk_memory_domain_translate_data(struct spdk_memory_domain *src_domain, void *src_domain_ctx,
187 				  struct spdk_memory_domain *dst_domain, struct spdk_memory_domain_translation_ctx *dst_domain_ctx,
188 				  void *addr, size_t len, struct spdk_memory_domain_translation_result *result)
189 {
190 	assert(src_domain);
191 	assert(dst_domain);
192 	assert(result);
193 
194 	if (spdk_unlikely(!src_domain->translate_cb)) {
195 		return -ENOTSUP;
196 	}
197 
198 	return src_domain->translate_cb(src_domain, src_domain_ctx, dst_domain, dst_domain_ctx, addr, len,
199 					result);
200 }
201 
202 struct spdk_memory_domain *
203 spdk_memory_domain_get_first(const char *id)
204 {
205 	struct spdk_memory_domain *domain;
206 
207 	if (!id) {
208 		pthread_mutex_lock(&g_dma_mutex);
209 		domain = TAILQ_FIRST(&g_dma_memory_domains);
210 		pthread_mutex_unlock(&g_dma_mutex);
211 
212 		return domain;
213 	}
214 
215 	pthread_mutex_lock(&g_dma_mutex);
216 	TAILQ_FOREACH(domain, &g_dma_memory_domains, link) {
217 		if (!strcmp(domain->id, id)) {
218 			break;
219 		}
220 	}
221 	pthread_mutex_unlock(&g_dma_mutex);
222 
223 	return domain;
224 }
225 
226 struct spdk_memory_domain *
227 spdk_memory_domain_get_next(struct spdk_memory_domain *prev, const char *id)
228 {
229 	struct spdk_memory_domain *domain;
230 
231 	if (!prev) {
232 		return NULL;
233 	}
234 
235 	pthread_mutex_lock(&g_dma_mutex);
236 	domain = TAILQ_NEXT(prev, link);
237 	pthread_mutex_unlock(&g_dma_mutex);
238 
239 	if (!id || !domain) {
240 		return domain;
241 	}
242 
243 	pthread_mutex_lock(&g_dma_mutex);
244 	TAILQ_FOREACH_FROM(domain, &g_dma_memory_domains, link) {
245 		if (!strcmp(domain->id, id)) {
246 			break;
247 		}
248 	}
249 	pthread_mutex_unlock(&g_dma_mutex);
250 
251 	return domain;
252 }
253