xref: /dpdk/drivers/net/virtio/virtio_user/vhost_vdpa.c (revision f665790a5dbad7b645ff46f31d65e977324e7bfc)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Red Hat Inc.
3  */
4 
5 #include <sys/ioctl.h>
6 #include <sys/types.h>
7 #include <sys/stat.h>
8 #include <sys/mman.h>
9 #include <fcntl.h>
10 #include <stdlib.h>
11 #include <unistd.h>
12 
13 #include <rte_memory.h>
14 
15 #include "vhost.h"
16 #include "virtio_user_dev.h"
17 
18 struct vhost_vdpa_data {
19 	int vhostfd;
20 	uint64_t protocol_features;
21 };
22 
23 #define VHOST_VDPA_SUPPORTED_BACKEND_FEATURES		\
24 	(1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2	|	\
25 	1ULL << VHOST_BACKEND_F_IOTLB_BATCH)
26 
27 /* vhost kernel & vdpa ioctls */
28 #define VHOST_VIRTIO 0xAF
29 #define VHOST_GET_FEATURES _IOR(VHOST_VIRTIO, 0x00, __u64)
30 #define VHOST_SET_FEATURES _IOW(VHOST_VIRTIO, 0x00, __u64)
31 #define VHOST_SET_OWNER _IO(VHOST_VIRTIO, 0x01)
32 #define VHOST_RESET_OWNER _IO(VHOST_VIRTIO, 0x02)
33 #define VHOST_SET_LOG_BASE _IOW(VHOST_VIRTIO, 0x04, __u64)
34 #define VHOST_SET_LOG_FD _IOW(VHOST_VIRTIO, 0x07, int)
35 #define VHOST_SET_VRING_NUM _IOW(VHOST_VIRTIO, 0x10, struct vhost_vring_state)
36 #define VHOST_SET_VRING_ADDR _IOW(VHOST_VIRTIO, 0x11, struct vhost_vring_addr)
37 #define VHOST_SET_VRING_BASE _IOW(VHOST_VIRTIO, 0x12, struct vhost_vring_state)
38 #define VHOST_GET_VRING_BASE _IOWR(VHOST_VIRTIO, 0x12, struct vhost_vring_state)
39 #define VHOST_SET_VRING_KICK _IOW(VHOST_VIRTIO, 0x20, struct vhost_vring_file)
40 #define VHOST_SET_VRING_CALL _IOW(VHOST_VIRTIO, 0x21, struct vhost_vring_file)
41 #define VHOST_SET_VRING_ERR _IOW(VHOST_VIRTIO, 0x22, struct vhost_vring_file)
42 #define VHOST_NET_SET_BACKEND _IOW(VHOST_VIRTIO, 0x30, struct vhost_vring_file)
43 #define VHOST_VDPA_GET_DEVICE_ID _IOR(VHOST_VIRTIO, 0x70, __u32)
44 #define VHOST_VDPA_GET_STATUS _IOR(VHOST_VIRTIO, 0x71, __u8)
45 #define VHOST_VDPA_SET_STATUS _IOW(VHOST_VIRTIO, 0x72, __u8)
46 #define VHOST_VDPA_GET_CONFIG _IOR(VHOST_VIRTIO, 0x73, struct vhost_vdpa_config)
47 #define VHOST_VDPA_SET_CONFIG _IOW(VHOST_VIRTIO, 0x74, struct vhost_vdpa_config)
48 #define VHOST_VDPA_SET_VRING_ENABLE _IOW(VHOST_VIRTIO, 0x75, struct vhost_vring_state)
49 #define VHOST_SET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x25, __u64)
50 #define VHOST_GET_BACKEND_FEATURES _IOR(VHOST_VIRTIO, 0x26, __u64)
51 
52 /* no alignment requirement */
53 struct vhost_iotlb_msg {
54 	uint64_t iova;
55 	uint64_t size;
56 	uint64_t uaddr;
57 #define VHOST_ACCESS_RO      0x1
58 #define VHOST_ACCESS_WO      0x2
59 #define VHOST_ACCESS_RW      0x3
60 	uint8_t perm;
61 #define VHOST_IOTLB_MISS           1
62 #define VHOST_IOTLB_UPDATE         2
63 #define VHOST_IOTLB_INVALIDATE     3
64 #define VHOST_IOTLB_ACCESS_FAIL    4
65 #define VHOST_IOTLB_BATCH_BEGIN    5
66 #define VHOST_IOTLB_BATCH_END      6
67 	uint8_t type;
68 };
69 
70 #define VHOST_IOTLB_MSG_V2 0x2
71 
72 struct vhost_vdpa_config {
73 	uint32_t off;
74 	uint32_t len;
75 	uint8_t buf[];
76 };
77 
78 struct vhost_msg {
79 	uint32_t type;
80 	uint32_t reserved;
81 	union {
82 		struct vhost_iotlb_msg iotlb;
83 		uint8_t padding[64];
84 	};
85 };
86 
87 
88 static int
89 vhost_vdpa_ioctl(int fd, uint64_t request, void *arg)
90 {
91 	int ret;
92 
93 	ret = ioctl(fd, request, arg);
94 	if (ret) {
95 		PMD_DRV_LOG(ERR, "Vhost-vDPA ioctl %"PRIu64" failed (%s)",
96 				request, strerror(errno));
97 		return -1;
98 	}
99 
100 	return 0;
101 }
102 
103 static int
104 vhost_vdpa_set_owner(struct virtio_user_dev *dev)
105 {
106 	struct vhost_vdpa_data *data = dev->backend_data;
107 
108 	return vhost_vdpa_ioctl(data->vhostfd, VHOST_SET_OWNER, NULL);
109 }
110 
111 static int
112 vhost_vdpa_get_protocol_features(struct virtio_user_dev *dev, uint64_t *features)
113 {
114 	struct vhost_vdpa_data *data = dev->backend_data;
115 
116 	return vhost_vdpa_ioctl(data->vhostfd, VHOST_GET_BACKEND_FEATURES, features);
117 }
118 
119 static int
120 vhost_vdpa_set_protocol_features(struct virtio_user_dev *dev, uint64_t features)
121 {
122 	struct vhost_vdpa_data *data = dev->backend_data;
123 
124 	return vhost_vdpa_ioctl(data->vhostfd, VHOST_SET_BACKEND_FEATURES, &features);
125 }
126 
127 static int
128 vhost_vdpa_get_features(struct virtio_user_dev *dev, uint64_t *features)
129 {
130 	struct vhost_vdpa_data *data = dev->backend_data;
131 	int ret;
132 
133 	ret = vhost_vdpa_ioctl(data->vhostfd, VHOST_GET_FEATURES, features);
134 	if (ret) {
135 		PMD_DRV_LOG(ERR, "Failed to get features");
136 		return -1;
137 	}
138 
139 	if (*features & 1ULL << VIRTIO_NET_F_CTRL_VQ)
140 		dev->hw_cvq = true;
141 
142 	/* Negotiated vDPA backend features */
143 	ret = vhost_vdpa_get_protocol_features(dev, &data->protocol_features);
144 	if (ret < 0) {
145 		PMD_DRV_LOG(ERR, "Failed to get backend features");
146 		return -1;
147 	}
148 
149 	data->protocol_features &= VHOST_VDPA_SUPPORTED_BACKEND_FEATURES;
150 
151 	ret = vhost_vdpa_set_protocol_features(dev, data->protocol_features);
152 	if (ret < 0) {
153 		PMD_DRV_LOG(ERR, "Failed to set backend features");
154 		return -1;
155 	}
156 
157 	return 0;
158 }
159 
160 static int
161 vhost_vdpa_set_features(struct virtio_user_dev *dev, uint64_t features)
162 {
163 	struct vhost_vdpa_data *data = dev->backend_data;
164 
165 	/* WORKAROUND */
166 	features |= 1ULL << VIRTIO_F_IOMMU_PLATFORM;
167 
168 	return vhost_vdpa_ioctl(data->vhostfd, VHOST_SET_FEATURES, &features);
169 }
170 
171 static int
172 vhost_vdpa_iotlb_batch_begin(struct virtio_user_dev *dev)
173 {
174 	struct vhost_vdpa_data *data = dev->backend_data;
175 	struct vhost_msg msg = {};
176 
177 	if (!(data->protocol_features & (1ULL << VHOST_BACKEND_F_IOTLB_BATCH)))
178 		return 0;
179 
180 	if (!(data->protocol_features & (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2))) {
181 		PMD_DRV_LOG(ERR, "IOTLB_MSG_V2 not supported by the backend.");
182 		return -1;
183 	}
184 
185 	msg.type = VHOST_IOTLB_MSG_V2;
186 	msg.iotlb.type = VHOST_IOTLB_BATCH_BEGIN;
187 
188 	if (write(data->vhostfd, &msg, sizeof(msg)) != sizeof(msg)) {
189 		PMD_DRV_LOG(ERR, "Failed to send IOTLB batch begin (%s)",
190 				strerror(errno));
191 		return -1;
192 	}
193 
194 	return 0;
195 }
196 
197 static int
198 vhost_vdpa_iotlb_batch_end(struct virtio_user_dev *dev)
199 {
200 	struct vhost_vdpa_data *data = dev->backend_data;
201 	struct vhost_msg msg = {};
202 
203 	if (!(data->protocol_features & (1ULL << VHOST_BACKEND_F_IOTLB_BATCH)))
204 		return 0;
205 
206 	if (!(data->protocol_features & (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2))) {
207 		PMD_DRV_LOG(ERR, "IOTLB_MSG_V2 not supported by the backend.");
208 		return -1;
209 	}
210 
211 	msg.type = VHOST_IOTLB_MSG_V2;
212 	msg.iotlb.type = VHOST_IOTLB_BATCH_END;
213 
214 	if (write(data->vhostfd, &msg, sizeof(msg)) != sizeof(msg)) {
215 		PMD_DRV_LOG(ERR, "Failed to send IOTLB batch end (%s)",
216 				strerror(errno));
217 		return -1;
218 	}
219 
220 	return 0;
221 }
222 
223 static int
224 vhost_vdpa_dma_map(struct virtio_user_dev *dev, void *addr,
225 				  uint64_t iova, size_t len)
226 {
227 	struct vhost_vdpa_data *data = dev->backend_data;
228 	struct vhost_msg msg = {};
229 
230 	if (!(data->protocol_features & (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2))) {
231 		PMD_DRV_LOG(ERR, "IOTLB_MSG_V2 not supported by the backend.");
232 		return -1;
233 	}
234 
235 	msg.type = VHOST_IOTLB_MSG_V2;
236 	msg.iotlb.type = VHOST_IOTLB_UPDATE;
237 	msg.iotlb.iova = iova;
238 	msg.iotlb.uaddr = (uint64_t)(uintptr_t)addr;
239 	msg.iotlb.size = len;
240 	msg.iotlb.perm = VHOST_ACCESS_RW;
241 
242 	PMD_DRV_LOG(DEBUG, "%s: iova: 0x%" PRIx64 ", addr: %p, len: 0x%zx",
243 			__func__, iova, addr, len);
244 
245 	if (write(data->vhostfd, &msg, sizeof(msg)) != sizeof(msg)) {
246 		PMD_DRV_LOG(ERR, "Failed to send IOTLB update (%s)",
247 				strerror(errno));
248 		return -1;
249 	}
250 
251 	return 0;
252 }
253 
254 static int
255 vhost_vdpa_dma_unmap(struct virtio_user_dev *dev, __rte_unused void *addr,
256 				  uint64_t iova, size_t len)
257 {
258 	struct vhost_vdpa_data *data = dev->backend_data;
259 	struct vhost_msg msg = {};
260 
261 	if (!(data->protocol_features & (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2))) {
262 		PMD_DRV_LOG(ERR, "IOTLB_MSG_V2 not supported by the backend.");
263 		return -1;
264 	}
265 
266 	msg.type = VHOST_IOTLB_MSG_V2;
267 	msg.iotlb.type = VHOST_IOTLB_INVALIDATE;
268 	msg.iotlb.iova = iova;
269 	msg.iotlb.size = len;
270 
271 	PMD_DRV_LOG(DEBUG, "%s: iova: 0x%" PRIx64 ", len: 0x%zx",
272 			__func__, iova, len);
273 
274 	if (write(data->vhostfd, &msg, sizeof(msg)) != sizeof(msg)) {
275 		PMD_DRV_LOG(ERR, "Failed to send IOTLB invalidate (%s)",
276 				strerror(errno));
277 		return -1;
278 	}
279 
280 	return 0;
281 }
282 
283 static int
284 vhost_vdpa_dma_map_batch(struct virtio_user_dev *dev, void *addr,
285 				  uint64_t iova, size_t len)
286 {
287 	int ret;
288 
289 	if (vhost_vdpa_iotlb_batch_begin(dev) < 0)
290 		return -1;
291 
292 	ret = vhost_vdpa_dma_map(dev, addr, iova, len);
293 
294 	if (vhost_vdpa_iotlb_batch_end(dev) < 0)
295 		return -1;
296 
297 	return ret;
298 }
299 
300 static int
301 vhost_vdpa_dma_unmap_batch(struct virtio_user_dev *dev, void *addr,
302 				  uint64_t iova, size_t len)
303 {
304 	int ret;
305 
306 	if (vhost_vdpa_iotlb_batch_begin(dev) < 0)
307 		return -1;
308 
309 	ret = vhost_vdpa_dma_unmap(dev, addr, iova, len);
310 
311 	if (vhost_vdpa_iotlb_batch_end(dev) < 0)
312 		return -1;
313 
314 	return ret;
315 }
316 
317 static int
318 vhost_vdpa_map_contig(const struct rte_memseg_list *msl,
319 		const struct rte_memseg *ms, size_t len, void *arg)
320 {
321 	struct virtio_user_dev *dev = arg;
322 
323 	if (msl->external)
324 		return 0;
325 
326 	return vhost_vdpa_dma_map(dev, ms->addr, ms->iova, len);
327 }
328 
329 static int
330 vhost_vdpa_map(const struct rte_memseg_list *msl, const struct rte_memseg *ms,
331 		void *arg)
332 {
333 	struct virtio_user_dev *dev = arg;
334 
335 	/* skip external memory that isn't a heap */
336 	if (msl->external && !msl->heap)
337 		return 0;
338 
339 	/* skip any segments with invalid IOVA addresses */
340 	if (ms->iova == RTE_BAD_IOVA)
341 		return 0;
342 
343 	/* if IOVA mode is VA, we've already mapped the internal segments */
344 	if (!msl->external && rte_eal_iova_mode() == RTE_IOVA_VA)
345 		return 0;
346 
347 	return vhost_vdpa_dma_map(dev, ms->addr, ms->iova, ms->len);
348 }
349 
350 static int
351 vhost_vdpa_set_memory_table(struct virtio_user_dev *dev)
352 {
353 	int ret;
354 
355 	if (vhost_vdpa_iotlb_batch_begin(dev) < 0)
356 		return -1;
357 
358 	vhost_vdpa_dma_unmap(dev, NULL, 0, SIZE_MAX);
359 
360 	if (rte_eal_iova_mode() == RTE_IOVA_VA) {
361 		/* with IOVA as VA mode, we can get away with mapping contiguous
362 		 * chunks rather than going page-by-page.
363 		 */
364 		ret = rte_memseg_contig_walk_thread_unsafe(
365 				vhost_vdpa_map_contig, dev);
366 		if (ret)
367 			goto batch_end;
368 		/* we have to continue the walk because we've skipped the
369 		 * external segments during the config walk.
370 		 */
371 	}
372 	ret = rte_memseg_walk_thread_unsafe(vhost_vdpa_map, dev);
373 
374 batch_end:
375 	if (vhost_vdpa_iotlb_batch_end(dev) < 0)
376 		return -1;
377 
378 	return ret;
379 }
380 
381 static int
382 vhost_vdpa_set_vring_enable(struct virtio_user_dev *dev, struct vhost_vring_state *state)
383 {
384 	struct vhost_vdpa_data *data = dev->backend_data;
385 
386 	return vhost_vdpa_ioctl(data->vhostfd, VHOST_VDPA_SET_VRING_ENABLE, state);
387 }
388 
389 static int
390 vhost_vdpa_set_vring_num(struct virtio_user_dev *dev, struct vhost_vring_state *state)
391 {
392 	struct vhost_vdpa_data *data = dev->backend_data;
393 
394 	return vhost_vdpa_ioctl(data->vhostfd, VHOST_SET_VRING_NUM, state);
395 }
396 
397 static int
398 vhost_vdpa_set_vring_base(struct virtio_user_dev *dev, struct vhost_vring_state *state)
399 {
400 	struct vhost_vdpa_data *data = dev->backend_data;
401 
402 	return vhost_vdpa_ioctl(data->vhostfd, VHOST_SET_VRING_BASE, state);
403 }
404 
405 static int
406 vhost_vdpa_get_vring_base(struct virtio_user_dev *dev, struct vhost_vring_state *state)
407 {
408 	struct vhost_vdpa_data *data = dev->backend_data;
409 
410 	return vhost_vdpa_ioctl(data->vhostfd, VHOST_GET_VRING_BASE, state);
411 }
412 
413 static int
414 vhost_vdpa_set_vring_call(struct virtio_user_dev *dev, struct vhost_vring_file *file)
415 {
416 	struct vhost_vdpa_data *data = dev->backend_data;
417 
418 	return vhost_vdpa_ioctl(data->vhostfd, VHOST_SET_VRING_CALL, file);
419 }
420 
421 static int
422 vhost_vdpa_set_vring_kick(struct virtio_user_dev *dev, struct vhost_vring_file *file)
423 {
424 	struct vhost_vdpa_data *data = dev->backend_data;
425 
426 	return vhost_vdpa_ioctl(data->vhostfd, VHOST_SET_VRING_KICK, file);
427 }
428 
429 static int
430 vhost_vdpa_set_vring_addr(struct virtio_user_dev *dev, struct vhost_vring_addr *addr)
431 {
432 	struct vhost_vdpa_data *data = dev->backend_data;
433 
434 	return vhost_vdpa_ioctl(data->vhostfd, VHOST_SET_VRING_ADDR, addr);
435 }
436 
437 static int
438 vhost_vdpa_get_status(struct virtio_user_dev *dev, uint8_t *status)
439 {
440 	struct vhost_vdpa_data *data = dev->backend_data;
441 
442 	return vhost_vdpa_ioctl(data->vhostfd, VHOST_VDPA_GET_STATUS, status);
443 }
444 
445 static int
446 vhost_vdpa_set_status(struct virtio_user_dev *dev, uint8_t status)
447 {
448 	struct vhost_vdpa_data *data = dev->backend_data;
449 
450 	return vhost_vdpa_ioctl(data->vhostfd, VHOST_VDPA_SET_STATUS, &status);
451 }
452 
453 static int
454 vhost_vdpa_get_config(struct virtio_user_dev *dev, uint8_t *data, uint32_t off, uint32_t len)
455 {
456 	struct vhost_vdpa_data *vdpa_data = dev->backend_data;
457 	struct vhost_vdpa_config *config;
458 	int ret = 0;
459 
460 	config = malloc(sizeof(*config) + len);
461 	if (!config) {
462 		PMD_DRV_LOG(ERR, "Failed to allocate vDPA config data");
463 		return -1;
464 	}
465 
466 	config->off = off;
467 	config->len = len;
468 
469 	ret = vhost_vdpa_ioctl(vdpa_data->vhostfd, VHOST_VDPA_GET_CONFIG, config);
470 	if (ret) {
471 		PMD_DRV_LOG(ERR, "Failed to get vDPA config (offset 0x%x, len 0x%x)", off, len);
472 		ret = -1;
473 		goto out;
474 	}
475 
476 	memcpy(data, config->buf, len);
477 out:
478 	free(config);
479 
480 	return ret;
481 }
482 
483 static int
484 vhost_vdpa_set_config(struct virtio_user_dev *dev, const uint8_t *data, uint32_t off, uint32_t len)
485 {
486 	struct vhost_vdpa_data *vdpa_data = dev->backend_data;
487 	struct vhost_vdpa_config *config;
488 	int ret = 0;
489 
490 	config = malloc(sizeof(*config) + len);
491 	if (!config) {
492 		PMD_DRV_LOG(ERR, "Failed to allocate vDPA config data");
493 		return -1;
494 	}
495 
496 	config->off = off;
497 	config->len = len;
498 
499 	memcpy(config->buf, data, len);
500 
501 	ret = vhost_vdpa_ioctl(vdpa_data->vhostfd, VHOST_VDPA_SET_CONFIG, config);
502 	if (ret) {
503 		PMD_DRV_LOG(ERR, "Failed to set vDPA config (offset 0x%x, len 0x%x)", off, len);
504 		ret = -1;
505 	}
506 
507 	free(config);
508 
509 	return ret;
510 }
511 
512 /**
513  * Set up environment to talk with a vhost vdpa backend.
514  *
515  * @return
516  *   - (-1) if fail to set up;
517  *   - (>=0) if successful.
518  */
519 static int
520 vhost_vdpa_setup(struct virtio_user_dev *dev)
521 {
522 	struct vhost_vdpa_data *data;
523 	uint32_t did = (uint32_t)-1;
524 
525 	data = malloc(sizeof(*data));
526 	if (!data) {
527 		PMD_DRV_LOG(ERR, "(%s) Faidle to allocate backend data", dev->path);
528 		return -1;
529 	}
530 
531 	data->vhostfd = open(dev->path, O_RDWR);
532 	if (data->vhostfd < 0) {
533 		PMD_DRV_LOG(ERR, "Failed to open %s: %s",
534 				dev->path, strerror(errno));
535 		free(data);
536 		return -1;
537 	}
538 
539 	if (ioctl(data->vhostfd, VHOST_VDPA_GET_DEVICE_ID, &did) < 0 ||
540 			did != VIRTIO_ID_NETWORK) {
541 		PMD_DRV_LOG(ERR, "Invalid vdpa device ID: %u", did);
542 		close(data->vhostfd);
543 		free(data);
544 		return -1;
545 	}
546 
547 	dev->backend_data = data;
548 
549 	return 0;
550 }
551 
552 static int
553 vhost_vdpa_destroy(struct virtio_user_dev *dev)
554 {
555 	struct vhost_vdpa_data *data = dev->backend_data;
556 
557 	if (!data)
558 		return 0;
559 
560 	close(data->vhostfd);
561 
562 	free(data);
563 	dev->backend_data = NULL;
564 
565 	return 0;
566 }
567 
568 static int
569 vhost_vdpa_cvq_enable(struct virtio_user_dev *dev, int enable)
570 {
571 	struct vhost_vring_state state = {
572 		.index = dev->max_queue_pairs * 2,
573 		.num   = enable,
574 	};
575 
576 	return vhost_vdpa_set_vring_enable(dev, &state);
577 }
578 
579 static int
580 vhost_vdpa_enable_queue_pair(struct virtio_user_dev *dev,
581 			       uint16_t pair_idx,
582 			       int enable)
583 {
584 	int i;
585 
586 	if (dev->qp_enabled[pair_idx] == enable)
587 		return 0;
588 
589 	for (i = 0; i < 2; ++i) {
590 		struct vhost_vring_state state = {
591 			.index = pair_idx * 2 + i,
592 			.num   = enable,
593 		};
594 
595 		if (vhost_vdpa_set_vring_enable(dev, &state))
596 			return -1;
597 	}
598 
599 	dev->qp_enabled[pair_idx] = enable;
600 
601 	return 0;
602 }
603 
604 static int
605 vhost_vdpa_get_backend_features(uint64_t *features)
606 {
607 	*features = 0;
608 
609 	return 0;
610 }
611 
612 static int
613 vhost_vdpa_update_link_state(struct virtio_user_dev *dev __rte_unused)
614 {
615 	/* Nothing to update (for now?) */
616 	return 0;
617 }
618 
619 static int
620 vhost_vdpa_get_intr_fd(struct virtio_user_dev *dev __rte_unused)
621 {
622 	/* No link state interrupt with Vhost-vDPA */
623 	return -1;
624 }
625 
626 static int
627 vhost_vdpa_get_nr_vrings(struct virtio_user_dev *dev)
628 {
629 	int nr_vrings = dev->max_queue_pairs * 2;
630 
631 	if (dev->device_features & (1ull << VIRTIO_NET_F_CTRL_VQ))
632 		nr_vrings += 1;
633 
634 	return nr_vrings;
635 }
636 
637 static int
638 vhost_vdpa_unmap_notification_area(struct virtio_user_dev *dev)
639 {
640 	int i, nr_vrings;
641 
642 	nr_vrings = vhost_vdpa_get_nr_vrings(dev);
643 
644 	for (i = 0; i < nr_vrings; i++) {
645 		if (dev->notify_area[i])
646 			munmap(dev->notify_area[i], getpagesize());
647 	}
648 	free(dev->notify_area);
649 	dev->notify_area = NULL;
650 
651 	return 0;
652 }
653 
654 static int
655 vhost_vdpa_map_notification_area(struct virtio_user_dev *dev)
656 {
657 	struct vhost_vdpa_data *data = dev->backend_data;
658 	int nr_vrings, i, page_size = getpagesize();
659 	uint16_t **notify_area;
660 
661 	nr_vrings = vhost_vdpa_get_nr_vrings(dev);
662 
663 	notify_area = malloc(nr_vrings * sizeof(*notify_area));
664 	if (!notify_area) {
665 		PMD_DRV_LOG(ERR, "(%s) Failed to allocate notify area array", dev->path);
666 		return -1;
667 	}
668 
669 	for (i = 0; i < nr_vrings; i++) {
670 		notify_area[i] = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED | MAP_FILE,
671 				      data->vhostfd, i * page_size);
672 		if (notify_area[i] == MAP_FAILED) {
673 			PMD_DRV_LOG(ERR, "(%s) Map failed for notify address of queue %d",
674 				    dev->path, i);
675 			i--;
676 			goto map_err;
677 		}
678 	}
679 	dev->notify_area = notify_area;
680 
681 	return 0;
682 
683 map_err:
684 	for (; i >= 0; i--)
685 		munmap(notify_area[i], page_size);
686 	free(notify_area);
687 
688 	return -1;
689 }
690 
691 struct virtio_user_backend_ops virtio_ops_vdpa = {
692 	.setup = vhost_vdpa_setup,
693 	.destroy = vhost_vdpa_destroy,
694 	.get_backend_features = vhost_vdpa_get_backend_features,
695 	.set_owner = vhost_vdpa_set_owner,
696 	.get_features = vhost_vdpa_get_features,
697 	.set_features = vhost_vdpa_set_features,
698 	.set_memory_table = vhost_vdpa_set_memory_table,
699 	.set_vring_num = vhost_vdpa_set_vring_num,
700 	.set_vring_base = vhost_vdpa_set_vring_base,
701 	.get_vring_base = vhost_vdpa_get_vring_base,
702 	.set_vring_call = vhost_vdpa_set_vring_call,
703 	.set_vring_kick = vhost_vdpa_set_vring_kick,
704 	.set_vring_addr = vhost_vdpa_set_vring_addr,
705 	.get_status = vhost_vdpa_get_status,
706 	.set_status = vhost_vdpa_set_status,
707 	.get_config = vhost_vdpa_get_config,
708 	.set_config = vhost_vdpa_set_config,
709 	.cvq_enable = vhost_vdpa_cvq_enable,
710 	.enable_qp = vhost_vdpa_enable_queue_pair,
711 	.dma_map = vhost_vdpa_dma_map_batch,
712 	.dma_unmap = vhost_vdpa_dma_unmap_batch,
713 	.update_link_state = vhost_vdpa_update_link_state,
714 	.get_intr_fd = vhost_vdpa_get_intr_fd,
715 	.map_notification_area = vhost_vdpa_map_notification_area,
716 	.unmap_notification_area = vhost_vdpa_unmap_notification_area,
717 };
718