xref: /spdk/lib/dma/dma.c (revision 307b8c112ffd90a26d53dd15fad67bd9038ef526)
1  /*   SPDX-License-Identifier: BSD-3-Clause
2   *   Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3   */
4  
5  #include "spdk/dma.h"
6  #include "spdk/log.h"
7  #include "spdk/util.h"
8  #include "spdk/likely.h"
9  
10  pthread_mutex_t g_dma_mutex = PTHREAD_MUTEX_INITIALIZER;
11  TAILQ_HEAD(, spdk_memory_domain) g_dma_memory_domains = TAILQ_HEAD_INITIALIZER(
12  			g_dma_memory_domains);
13  
14  struct spdk_memory_domain {
15  	enum spdk_dma_device_type type;
16  	spdk_memory_domain_pull_data_cb pull_cb;
17  	spdk_memory_domain_push_data_cb push_cb;
18  	spdk_memory_domain_translate_memory_cb translate_cb;
19  	spdk_memory_domain_memzero_cb memzero_cb;
20  	TAILQ_ENTRY(spdk_memory_domain) link;
21  	struct spdk_memory_domain_ctx *ctx;
22  	char *id;
23  };
24  
25  int
26  spdk_memory_domain_create(struct spdk_memory_domain **_domain, enum spdk_dma_device_type type,
27  			  struct spdk_memory_domain_ctx *ctx, const char *id)
28  {
29  	struct spdk_memory_domain *domain;
30  	size_t ctx_size;
31  
32  	if (!_domain) {
33  		return -EINVAL;
34  	}
35  
36  	if (ctx && ctx->size == 0) {
37  		SPDK_ERRLOG("Context size can't be 0\n");
38  		return -EINVAL;
39  	}
40  
41  	domain = calloc(1, sizeof(*domain));
42  	if (!domain) {
43  		SPDK_ERRLOG("Failed to allocate memory");
44  		return -ENOMEM;
45  	}
46  
47  	if (id) {
48  		domain->id = strdup(id);
49  		if (!domain->id) {
50  			SPDK_ERRLOG("Failed to allocate memory");
51  			free(domain);
52  			return -ENOMEM;
53  		}
54  	}
55  
56  	if (ctx) {
57  		domain->ctx = calloc(1, sizeof(*domain->ctx));
58  		if (!domain->ctx) {
59  			SPDK_ERRLOG("Failed to allocate memory");
60  			free(domain->id);
61  			free(domain);
62  			return -ENOMEM;
63  		}
64  
65  		ctx_size = spdk_min(sizeof(*domain->ctx), ctx->size);
66  		memcpy(domain->ctx, ctx, ctx_size);
67  		domain->ctx->size = ctx_size;
68  	}
69  
70  	domain->type = type;
71  
72  	pthread_mutex_lock(&g_dma_mutex);
73  	TAILQ_INSERT_TAIL(&g_dma_memory_domains, domain, link);
74  	pthread_mutex_unlock(&g_dma_mutex);
75  
76  	*_domain = domain;
77  
78  	return 0;
79  }
80  
81  void
82  spdk_memory_domain_set_translation(struct spdk_memory_domain *domain,
83  				   spdk_memory_domain_translate_memory_cb translate_cb)
84  {
85  	if (!domain) {
86  		return;
87  	}
88  
89  	domain->translate_cb = translate_cb;
90  }
91  
92  void
93  spdk_memory_domain_set_pull(struct spdk_memory_domain *domain,
94  			    spdk_memory_domain_pull_data_cb pull_cb)
95  {
96  	if (!domain) {
97  		return;
98  	}
99  
100  	domain->pull_cb = pull_cb;
101  }
102  
103  void
104  spdk_memory_domain_set_push(struct spdk_memory_domain *domain,
105  			    spdk_memory_domain_push_data_cb push_cb)
106  {
107  	if (!domain) {
108  		return;
109  	}
110  
111  	domain->push_cb = push_cb;
112  }
113  
114  void
115  spdk_memory_domain_set_memzero(struct spdk_memory_domain *domain,
116  			       spdk_memory_domain_memzero_cb memzero_cb)
117  {
118  	if (!domain) {
119  		return;
120  	}
121  
122  	domain->memzero_cb = memzero_cb;
123  }
124  
125  struct spdk_memory_domain_ctx *
126  spdk_memory_domain_get_context(struct spdk_memory_domain *domain)
127  {
128  	assert(domain);
129  
130  	return domain->ctx;
131  }
132  
133  /* We have to use the typedef in the function declaration to appease astyle. */
134  typedef enum spdk_dma_device_type spdk_dma_device_type_t;
135  
136  spdk_dma_device_type_t
137  spdk_memory_domain_get_dma_device_type(struct spdk_memory_domain *domain)
138  {
139  	assert(domain);
140  
141  	return domain->type;
142  }
143  
144  const char *
145  spdk_memory_domain_get_dma_device_id(struct spdk_memory_domain *domain)
146  {
147  	assert(domain);
148  
149  	return domain->id;
150  }
151  
152  void
153  spdk_memory_domain_destroy(struct spdk_memory_domain *domain)
154  {
155  	if (!domain) {
156  		return;
157  	}
158  
159  	pthread_mutex_lock(&g_dma_mutex);
160  	TAILQ_REMOVE(&g_dma_memory_domains, domain, link);
161  	pthread_mutex_unlock(&g_dma_mutex);
162  
163  	free(domain->ctx);
164  	free(domain->id);
165  	free(domain);
166  }
167  
168  int
169  spdk_memory_domain_pull_data(struct spdk_memory_domain *src_domain, void *src_domain_ctx,
170  			     struct iovec *src_iov, uint32_t src_iov_cnt, struct iovec *dst_iov, uint32_t dst_iov_cnt,
171  			     spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg)
172  {
173  	assert(src_domain);
174  	assert(src_iov);
175  	assert(dst_iov);
176  
177  	if (spdk_unlikely(!src_domain->pull_cb)) {
178  		return -ENOTSUP;
179  	}
180  
181  	return src_domain->pull_cb(src_domain, src_domain_ctx, src_iov, src_iov_cnt, dst_iov, dst_iov_cnt,
182  				   cpl_cb, cpl_cb_arg);
183  }
184  
185  int
186  spdk_memory_domain_push_data(struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
187  			     struct iovec *dst_iov, uint32_t dst_iovcnt, struct iovec *src_iov, uint32_t src_iovcnt,
188  			     spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg)
189  {
190  	assert(dst_domain);
191  	assert(dst_iov);
192  	assert(src_iov);
193  
194  	if (spdk_unlikely(!dst_domain->push_cb)) {
195  		return -ENOTSUP;
196  	}
197  
198  	return dst_domain->push_cb(dst_domain, dst_domain_ctx, dst_iov, dst_iovcnt, src_iov, src_iovcnt,
199  				   cpl_cb, cpl_cb_arg);
200  }
201  
202  int
203  spdk_memory_domain_translate_data(struct spdk_memory_domain *src_domain, void *src_domain_ctx,
204  				  struct spdk_memory_domain *dst_domain, struct spdk_memory_domain_translation_ctx *dst_domain_ctx,
205  				  void *addr, size_t len, struct spdk_memory_domain_translation_result *result)
206  {
207  	assert(src_domain);
208  	assert(dst_domain);
209  	assert(result);
210  
211  	if (spdk_unlikely(!src_domain->translate_cb)) {
212  		return -ENOTSUP;
213  	}
214  
215  	return src_domain->translate_cb(src_domain, src_domain_ctx, dst_domain, dst_domain_ctx, addr, len,
216  					result);
217  }
218  
219  int
220  spdk_memory_domain_memzero(struct spdk_memory_domain *domain, void *domain_ctx, struct iovec *iov,
221  			   uint32_t iovcnt, spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg)
222  {
223  	assert(domain);
224  	assert(iov);
225  	assert(iovcnt);
226  
227  	if (spdk_unlikely(!domain->memzero_cb)) {
228  		return -ENOTSUP;
229  	}
230  
231  	return domain->memzero_cb(domain, domain_ctx, iov, iovcnt, cpl_cb, cpl_cb_arg);
232  }
233  
234  struct spdk_memory_domain *
235  spdk_memory_domain_get_first(const char *id)
236  {
237  	struct spdk_memory_domain *domain;
238  
239  	if (!id) {
240  		pthread_mutex_lock(&g_dma_mutex);
241  		domain = TAILQ_FIRST(&g_dma_memory_domains);
242  		pthread_mutex_unlock(&g_dma_mutex);
243  
244  		return domain;
245  	}
246  
247  	pthread_mutex_lock(&g_dma_mutex);
248  	TAILQ_FOREACH(domain, &g_dma_memory_domains, link) {
249  		if (!strcmp(domain->id, id)) {
250  			break;
251  		}
252  	}
253  	pthread_mutex_unlock(&g_dma_mutex);
254  
255  	return domain;
256  }
257  
258  struct spdk_memory_domain *
259  spdk_memory_domain_get_next(struct spdk_memory_domain *prev, const char *id)
260  {
261  	struct spdk_memory_domain *domain;
262  
263  	if (!prev) {
264  		return NULL;
265  	}
266  
267  	pthread_mutex_lock(&g_dma_mutex);
268  	domain = TAILQ_NEXT(prev, link);
269  	pthread_mutex_unlock(&g_dma_mutex);
270  
271  	if (!id || !domain) {
272  		return domain;
273  	}
274  
275  	pthread_mutex_lock(&g_dma_mutex);
276  	TAILQ_FOREACH_FROM(domain, &g_dma_memory_domains, link) {
277  		if (!strcmp(domain->id, id)) {
278  			break;
279  		}
280  	}
281  	pthread_mutex_unlock(&g_dma_mutex);
282  
283  	return domain;
284  }
285