199a2dd95SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
299a2dd95SBruce Richardson * Copyright (c) 2017 Red Hat, Inc.
399a2dd95SBruce Richardson */
499a2dd95SBruce Richardson
599a2dd95SBruce Richardson #ifdef RTE_LIBRTE_VHOST_NUMA
699a2dd95SBruce Richardson #include <numaif.h>
799a2dd95SBruce Richardson #endif
899a2dd95SBruce Richardson
999a2dd95SBruce Richardson #include <rte_tailq.h>
1099a2dd95SBruce Richardson
1199a2dd95SBruce Richardson #include "iotlb.h"
1299a2dd95SBruce Richardson #include "vhost.h"
1399a2dd95SBruce Richardson
1499a2dd95SBruce Richardson struct vhost_iotlb_entry {
1599a2dd95SBruce Richardson TAILQ_ENTRY(vhost_iotlb_entry) next;
16f8f6b1c1SDavid Marchand SLIST_ENTRY(vhost_iotlb_entry) next_free;
1799a2dd95SBruce Richardson
1899a2dd95SBruce Richardson uint64_t iova;
1999a2dd95SBruce Richardson uint64_t uaddr;
20a4d80474SMaxime Coquelin uint64_t uoffset;
2199a2dd95SBruce Richardson uint64_t size;
22ede563faSMaxime Coquelin uint8_t page_shift;
2399a2dd95SBruce Richardson uint8_t perm;
2499a2dd95SBruce Richardson };
2599a2dd95SBruce Richardson
2699a2dd95SBruce Richardson #define IOTLB_CACHE_SIZE 2048
2799a2dd95SBruce Richardson
285c3e2259SMaxime Coquelin static void
vhost_user_iotlb_remove_notify(struct virtio_net * dev,struct vhost_iotlb_entry * entry)295c3e2259SMaxime Coquelin vhost_user_iotlb_remove_notify(struct virtio_net *dev, struct vhost_iotlb_entry *entry)
305c3e2259SMaxime Coquelin {
315c3e2259SMaxime Coquelin if (dev->backend_ops->iotlb_remove_notify == NULL)
325c3e2259SMaxime Coquelin return;
335c3e2259SMaxime Coquelin
345c3e2259SMaxime Coquelin dev->backend_ops->iotlb_remove_notify(entry->uaddr, entry->uoffset, entry->size);
355c3e2259SMaxime Coquelin }
365c3e2259SMaxime Coquelin
37af489f64SMaxime Coquelin static bool
vhost_user_iotlb_share_page(struct vhost_iotlb_entry * a,struct vhost_iotlb_entry * b)38ede563faSMaxime Coquelin vhost_user_iotlb_share_page(struct vhost_iotlb_entry *a, struct vhost_iotlb_entry *b)
39af489f64SMaxime Coquelin {
40a4d80474SMaxime Coquelin uint64_t a_start, a_end, b_start;
41af489f64SMaxime Coquelin
42af489f64SMaxime Coquelin if (a == NULL || b == NULL)
43af489f64SMaxime Coquelin return false;
44af489f64SMaxime Coquelin
45a4d80474SMaxime Coquelin a_start = a->uaddr + a->uoffset;
46a4d80474SMaxime Coquelin b_start = b->uaddr + b->uoffset;
47a4d80474SMaxime Coquelin
48af489f64SMaxime Coquelin /* Assumes entry a lower than entry b */
49a4d80474SMaxime Coquelin RTE_ASSERT(a_start < b_start);
50ede563faSMaxime Coquelin a_end = RTE_ALIGN_CEIL(a_start + a->size, RTE_BIT64(a->page_shift));
51ede563faSMaxime Coquelin b_start = RTE_ALIGN_FLOOR(b_start, RTE_BIT64(b->page_shift));
52af489f64SMaxime Coquelin
53af489f64SMaxime Coquelin return a_end > b_start;
54af489f64SMaxime Coquelin }
55af489f64SMaxime Coquelin
56714587a9SMaxime Coquelin static void
vhost_user_iotlb_set_dump(struct virtio_net * dev,struct vhost_iotlb_entry * node)572018fabfSDavid Marchand vhost_user_iotlb_set_dump(struct virtio_net *dev, struct vhost_iotlb_entry *node)
58714587a9SMaxime Coquelin {
59ede563faSMaxime Coquelin uint64_t start;
60714587a9SMaxime Coquelin
61a4d80474SMaxime Coquelin start = node->uaddr + node->uoffset;
622018fabfSDavid Marchand mem_set_dump(dev, (void *)(uintptr_t)start, node->size, true, RTE_BIT64(node->page_shift));
63714587a9SMaxime Coquelin }
64714587a9SMaxime Coquelin
65714587a9SMaxime Coquelin static void
vhost_user_iotlb_clear_dump(struct virtio_net * dev,struct vhost_iotlb_entry * node,struct vhost_iotlb_entry * prev,struct vhost_iotlb_entry * next)662018fabfSDavid Marchand vhost_user_iotlb_clear_dump(struct virtio_net *dev, struct vhost_iotlb_entry *node,
67714587a9SMaxime Coquelin struct vhost_iotlb_entry *prev, struct vhost_iotlb_entry *next)
68714587a9SMaxime Coquelin {
69ede563faSMaxime Coquelin uint64_t start, end;
70cd8e4beeSMaxime Coquelin
71a4d80474SMaxime Coquelin start = node->uaddr + node->uoffset;
72a4d80474SMaxime Coquelin end = start + node->size;
73714587a9SMaxime Coquelin
74cd8e4beeSMaxime Coquelin /* Skip first page if shared with previous entry. */
75ede563faSMaxime Coquelin if (vhost_user_iotlb_share_page(prev, node))
76ede563faSMaxime Coquelin start = RTE_ALIGN_CEIL(start, RTE_BIT64(node->page_shift));
77cd8e4beeSMaxime Coquelin
78cd8e4beeSMaxime Coquelin /* Skip last page if shared with next entry. */
79ede563faSMaxime Coquelin if (vhost_user_iotlb_share_page(node, next))
80ede563faSMaxime Coquelin end = RTE_ALIGN_FLOOR(end, RTE_BIT64(node->page_shift));
81cd8e4beeSMaxime Coquelin
82cd8e4beeSMaxime Coquelin if (end > start)
832018fabfSDavid Marchand mem_set_dump(dev, (void *)(uintptr_t)start, end - start, false,
84ede563faSMaxime Coquelin RTE_BIT64(node->page_shift));
85714587a9SMaxime Coquelin }
86714587a9SMaxime Coquelin
87f8f6b1c1SDavid Marchand static struct vhost_iotlb_entry *
vhost_user_iotlb_pool_get(struct virtio_net * dev)88a54f046dSMaxime Coquelin vhost_user_iotlb_pool_get(struct virtio_net *dev)
89f8f6b1c1SDavid Marchand {
90f8f6b1c1SDavid Marchand struct vhost_iotlb_entry *node;
91f8f6b1c1SDavid Marchand
92a54f046dSMaxime Coquelin rte_spinlock_lock(&dev->iotlb_free_lock);
93a54f046dSMaxime Coquelin node = SLIST_FIRST(&dev->iotlb_free_list);
94f8f6b1c1SDavid Marchand if (node != NULL)
95a54f046dSMaxime Coquelin SLIST_REMOVE_HEAD(&dev->iotlb_free_list, next_free);
96a54f046dSMaxime Coquelin rte_spinlock_unlock(&dev->iotlb_free_lock);
97f8f6b1c1SDavid Marchand return node;
98f8f6b1c1SDavid Marchand }
99f8f6b1c1SDavid Marchand
100f8f6b1c1SDavid Marchand static void
vhost_user_iotlb_pool_put(struct virtio_net * dev,struct vhost_iotlb_entry * node)101a54f046dSMaxime Coquelin vhost_user_iotlb_pool_put(struct virtio_net *dev, struct vhost_iotlb_entry *node)
102f8f6b1c1SDavid Marchand {
103a54f046dSMaxime Coquelin rte_spinlock_lock(&dev->iotlb_free_lock);
104a54f046dSMaxime Coquelin SLIST_INSERT_HEAD(&dev->iotlb_free_list, node, next_free);
105a54f046dSMaxime Coquelin rte_spinlock_unlock(&dev->iotlb_free_lock);
106f8f6b1c1SDavid Marchand }
107f8f6b1c1SDavid Marchand
10899a2dd95SBruce Richardson static void
109a54f046dSMaxime Coquelin vhost_user_iotlb_cache_random_evict(struct virtio_net *dev);
11099a2dd95SBruce Richardson
11199a2dd95SBruce Richardson static void
vhost_user_iotlb_pending_remove_all(struct virtio_net * dev)112a54f046dSMaxime Coquelin vhost_user_iotlb_pending_remove_all(struct virtio_net *dev)
11399a2dd95SBruce Richardson {
11499a2dd95SBruce Richardson struct vhost_iotlb_entry *node, *temp_node;
11599a2dd95SBruce Richardson
116a54f046dSMaxime Coquelin rte_rwlock_write_lock(&dev->iotlb_pending_lock);
11799a2dd95SBruce Richardson
118a54f046dSMaxime Coquelin RTE_TAILQ_FOREACH_SAFE(node, &dev->iotlb_pending_list, next, temp_node) {
119a54f046dSMaxime Coquelin TAILQ_REMOVE(&dev->iotlb_pending_list, node, next);
120a54f046dSMaxime Coquelin vhost_user_iotlb_pool_put(dev, node);
12199a2dd95SBruce Richardson }
12299a2dd95SBruce Richardson
123a54f046dSMaxime Coquelin rte_rwlock_write_unlock(&dev->iotlb_pending_lock);
12499a2dd95SBruce Richardson }
12599a2dd95SBruce Richardson
12699a2dd95SBruce Richardson bool
vhost_user_iotlb_pending_miss(struct virtio_net * dev,uint64_t iova,uint8_t perm)127a54f046dSMaxime Coquelin vhost_user_iotlb_pending_miss(struct virtio_net *dev, uint64_t iova, uint8_t perm)
12899a2dd95SBruce Richardson {
12999a2dd95SBruce Richardson struct vhost_iotlb_entry *node;
13099a2dd95SBruce Richardson bool found = false;
13199a2dd95SBruce Richardson
132a54f046dSMaxime Coquelin rte_rwlock_read_lock(&dev->iotlb_pending_lock);
13399a2dd95SBruce Richardson
134a54f046dSMaxime Coquelin TAILQ_FOREACH(node, &dev->iotlb_pending_list, next) {
13599a2dd95SBruce Richardson if ((node->iova == iova) && (node->perm == perm)) {
13699a2dd95SBruce Richardson found = true;
13799a2dd95SBruce Richardson break;
13899a2dd95SBruce Richardson }
13999a2dd95SBruce Richardson }
14099a2dd95SBruce Richardson
141a54f046dSMaxime Coquelin rte_rwlock_read_unlock(&dev->iotlb_pending_lock);
14299a2dd95SBruce Richardson
14399a2dd95SBruce Richardson return found;
14499a2dd95SBruce Richardson }
14599a2dd95SBruce Richardson
14699a2dd95SBruce Richardson void
vhost_user_iotlb_pending_insert(struct virtio_net * dev,uint64_t iova,uint8_t perm)147a54f046dSMaxime Coquelin vhost_user_iotlb_pending_insert(struct virtio_net *dev, uint64_t iova, uint8_t perm)
14899a2dd95SBruce Richardson {
14999a2dd95SBruce Richardson struct vhost_iotlb_entry *node;
15099a2dd95SBruce Richardson
151a54f046dSMaxime Coquelin node = vhost_user_iotlb_pool_get(dev);
152f8f6b1c1SDavid Marchand if (node == NULL) {
153*0e21c7c0SDavid Marchand VHOST_CONFIG_LOG(dev->ifname, DEBUG,
154*0e21c7c0SDavid Marchand "IOTLB pool empty, clear entries for pending insertion");
155a54f046dSMaxime Coquelin if (!TAILQ_EMPTY(&dev->iotlb_pending_list))
156a54f046dSMaxime Coquelin vhost_user_iotlb_pending_remove_all(dev);
15799a2dd95SBruce Richardson else
158a54f046dSMaxime Coquelin vhost_user_iotlb_cache_random_evict(dev);
159a54f046dSMaxime Coquelin node = vhost_user_iotlb_pool_get(dev);
160f8f6b1c1SDavid Marchand if (node == NULL) {
161*0e21c7c0SDavid Marchand VHOST_CONFIG_LOG(dev->ifname, ERR,
162*0e21c7c0SDavid Marchand "IOTLB pool still empty, pending insertion failure");
16399a2dd95SBruce Richardson return;
16499a2dd95SBruce Richardson }
16599a2dd95SBruce Richardson }
16699a2dd95SBruce Richardson
16799a2dd95SBruce Richardson node->iova = iova;
16899a2dd95SBruce Richardson node->perm = perm;
16999a2dd95SBruce Richardson
170a54f046dSMaxime Coquelin rte_rwlock_write_lock(&dev->iotlb_pending_lock);
17199a2dd95SBruce Richardson
172a54f046dSMaxime Coquelin TAILQ_INSERT_TAIL(&dev->iotlb_pending_list, node, next);
17399a2dd95SBruce Richardson
174a54f046dSMaxime Coquelin rte_rwlock_write_unlock(&dev->iotlb_pending_lock);
17599a2dd95SBruce Richardson }
17699a2dd95SBruce Richardson
17799a2dd95SBruce Richardson void
vhost_user_iotlb_pending_remove(struct virtio_net * dev,uint64_t iova,uint64_t size,uint8_t perm)178a54f046dSMaxime Coquelin vhost_user_iotlb_pending_remove(struct virtio_net *dev, uint64_t iova, uint64_t size, uint8_t perm)
17999a2dd95SBruce Richardson {
18099a2dd95SBruce Richardson struct vhost_iotlb_entry *node, *temp_node;
18199a2dd95SBruce Richardson
182a54f046dSMaxime Coquelin rte_rwlock_write_lock(&dev->iotlb_pending_lock);
18399a2dd95SBruce Richardson
184a54f046dSMaxime Coquelin RTE_TAILQ_FOREACH_SAFE(node, &dev->iotlb_pending_list, next,
185f1f6ebc0SWilliam Tu temp_node) {
18699a2dd95SBruce Richardson if (node->iova < iova)
18799a2dd95SBruce Richardson continue;
18899a2dd95SBruce Richardson if (node->iova >= iova + size)
18999a2dd95SBruce Richardson continue;
19099a2dd95SBruce Richardson if ((node->perm & perm) != node->perm)
19199a2dd95SBruce Richardson continue;
192a54f046dSMaxime Coquelin TAILQ_REMOVE(&dev->iotlb_pending_list, node, next);
193a54f046dSMaxime Coquelin vhost_user_iotlb_pool_put(dev, node);
19499a2dd95SBruce Richardson }
19599a2dd95SBruce Richardson
196a54f046dSMaxime Coquelin rte_rwlock_write_unlock(&dev->iotlb_pending_lock);
19799a2dd95SBruce Richardson }
19899a2dd95SBruce Richardson
19999a2dd95SBruce Richardson static void
vhost_user_iotlb_cache_remove_all(struct virtio_net * dev)200a54f046dSMaxime Coquelin vhost_user_iotlb_cache_remove_all(struct virtio_net *dev)
20199a2dd95SBruce Richardson {
20299a2dd95SBruce Richardson struct vhost_iotlb_entry *node, *temp_node;
20399a2dd95SBruce Richardson
204a54f046dSMaxime Coquelin vhost_user_iotlb_wr_lock_all(dev);
20599a2dd95SBruce Richardson
206a54f046dSMaxime Coquelin RTE_TAILQ_FOREACH_SAFE(node, &dev->iotlb_list, next, temp_node) {
2072018fabfSDavid Marchand vhost_user_iotlb_clear_dump(dev, node, NULL, NULL);
208714587a9SMaxime Coquelin
209a54f046dSMaxime Coquelin TAILQ_REMOVE(&dev->iotlb_list, node, next);
2105c3e2259SMaxime Coquelin vhost_user_iotlb_remove_notify(dev, node);
211a54f046dSMaxime Coquelin vhost_user_iotlb_pool_put(dev, node);
21299a2dd95SBruce Richardson }
21399a2dd95SBruce Richardson
214a54f046dSMaxime Coquelin dev->iotlb_cache_nr = 0;
21599a2dd95SBruce Richardson
216a54f046dSMaxime Coquelin vhost_user_iotlb_wr_unlock_all(dev);
21799a2dd95SBruce Richardson }
21899a2dd95SBruce Richardson
21999a2dd95SBruce Richardson static void
vhost_user_iotlb_cache_random_evict(struct virtio_net * dev)220a54f046dSMaxime Coquelin vhost_user_iotlb_cache_random_evict(struct virtio_net *dev)
22199a2dd95SBruce Richardson {
222dea092d0SMike Pattrick struct vhost_iotlb_entry *node, *temp_node, *prev_node = NULL;
22399a2dd95SBruce Richardson int entry_idx;
22499a2dd95SBruce Richardson
225a54f046dSMaxime Coquelin vhost_user_iotlb_wr_lock_all(dev);
22699a2dd95SBruce Richardson
227a54f046dSMaxime Coquelin entry_idx = rte_rand() % dev->iotlb_cache_nr;
22899a2dd95SBruce Richardson
229a54f046dSMaxime Coquelin RTE_TAILQ_FOREACH_SAFE(node, &dev->iotlb_list, next, temp_node) {
23099a2dd95SBruce Richardson if (!entry_idx) {
231714587a9SMaxime Coquelin struct vhost_iotlb_entry *next_node = RTE_TAILQ_NEXT(node, next);
232dea092d0SMike Pattrick
2332018fabfSDavid Marchand vhost_user_iotlb_clear_dump(dev, node, prev_node, next_node);
234714587a9SMaxime Coquelin
235a54f046dSMaxime Coquelin TAILQ_REMOVE(&dev->iotlb_list, node, next);
2365c3e2259SMaxime Coquelin vhost_user_iotlb_remove_notify(dev, node);
237a54f046dSMaxime Coquelin vhost_user_iotlb_pool_put(dev, node);
238a54f046dSMaxime Coquelin dev->iotlb_cache_nr--;
23999a2dd95SBruce Richardson break;
24099a2dd95SBruce Richardson }
241dea092d0SMike Pattrick prev_node = node;
24299a2dd95SBruce Richardson entry_idx--;
24399a2dd95SBruce Richardson }
24499a2dd95SBruce Richardson
245a54f046dSMaxime Coquelin vhost_user_iotlb_wr_unlock_all(dev);
24699a2dd95SBruce Richardson }
24799a2dd95SBruce Richardson
24899a2dd95SBruce Richardson void
vhost_user_iotlb_cache_insert(struct virtio_net * dev,uint64_t iova,uint64_t uaddr,uint64_t uoffset,uint64_t size,uint64_t page_size,uint8_t perm)249a54f046dSMaxime Coquelin vhost_user_iotlb_cache_insert(struct virtio_net *dev, uint64_t iova, uint64_t uaddr,
250ede563faSMaxime Coquelin uint64_t uoffset, uint64_t size, uint64_t page_size, uint8_t perm)
25199a2dd95SBruce Richardson {
25299a2dd95SBruce Richardson struct vhost_iotlb_entry *node, *new_node;
25399a2dd95SBruce Richardson
254a54f046dSMaxime Coquelin new_node = vhost_user_iotlb_pool_get(dev);
255f8f6b1c1SDavid Marchand if (new_node == NULL) {
256*0e21c7c0SDavid Marchand VHOST_CONFIG_LOG(dev->ifname, DEBUG,
257*0e21c7c0SDavid Marchand "IOTLB pool empty, clear entries for cache insertion");
258a54f046dSMaxime Coquelin if (!TAILQ_EMPTY(&dev->iotlb_list))
259a54f046dSMaxime Coquelin vhost_user_iotlb_cache_random_evict(dev);
26099a2dd95SBruce Richardson else
261a54f046dSMaxime Coquelin vhost_user_iotlb_pending_remove_all(dev);
262a54f046dSMaxime Coquelin new_node = vhost_user_iotlb_pool_get(dev);
263f8f6b1c1SDavid Marchand if (new_node == NULL) {
264*0e21c7c0SDavid Marchand VHOST_CONFIG_LOG(dev->ifname, ERR,
265*0e21c7c0SDavid Marchand "IOTLB pool still empty, cache insertion failed");
26699a2dd95SBruce Richardson return;
26799a2dd95SBruce Richardson }
26899a2dd95SBruce Richardson }
26999a2dd95SBruce Richardson
27099a2dd95SBruce Richardson new_node->iova = iova;
27199a2dd95SBruce Richardson new_node->uaddr = uaddr;
272a4d80474SMaxime Coquelin new_node->uoffset = uoffset;
27399a2dd95SBruce Richardson new_node->size = size;
2743d4e27fdSDavid Marchand new_node->page_shift = rte_ctz64(page_size);
27599a2dd95SBruce Richardson new_node->perm = perm;
27699a2dd95SBruce Richardson
277a54f046dSMaxime Coquelin vhost_user_iotlb_wr_lock_all(dev);
27899a2dd95SBruce Richardson
279a54f046dSMaxime Coquelin TAILQ_FOREACH(node, &dev->iotlb_list, next) {
28099a2dd95SBruce Richardson /*
28199a2dd95SBruce Richardson * Entries must be invalidated before being updated.
28299a2dd95SBruce Richardson * So if iova already in list, assume identical.
28399a2dd95SBruce Richardson */
28499a2dd95SBruce Richardson if (node->iova == new_node->iova) {
285a54f046dSMaxime Coquelin vhost_user_iotlb_pool_put(dev, new_node);
28699a2dd95SBruce Richardson goto unlock;
28799a2dd95SBruce Richardson } else if (node->iova > new_node->iova) {
2882018fabfSDavid Marchand vhost_user_iotlb_set_dump(dev, new_node);
289714587a9SMaxime Coquelin
29099a2dd95SBruce Richardson TAILQ_INSERT_BEFORE(node, new_node, next);
291a54f046dSMaxime Coquelin dev->iotlb_cache_nr++;
29299a2dd95SBruce Richardson goto unlock;
29399a2dd95SBruce Richardson }
29499a2dd95SBruce Richardson }
29599a2dd95SBruce Richardson
2962018fabfSDavid Marchand vhost_user_iotlb_set_dump(dev, new_node);
297714587a9SMaxime Coquelin
298a54f046dSMaxime Coquelin TAILQ_INSERT_TAIL(&dev->iotlb_list, new_node, next);
299a54f046dSMaxime Coquelin dev->iotlb_cache_nr++;
30099a2dd95SBruce Richardson
30199a2dd95SBruce Richardson unlock:
302a54f046dSMaxime Coquelin vhost_user_iotlb_pending_remove(dev, iova, size, perm);
30399a2dd95SBruce Richardson
304a54f046dSMaxime Coquelin vhost_user_iotlb_wr_unlock_all(dev);
30599a2dd95SBruce Richardson }
30699a2dd95SBruce Richardson
30799a2dd95SBruce Richardson void
vhost_user_iotlb_cache_remove(struct virtio_net * dev,uint64_t iova,uint64_t size)308a54f046dSMaxime Coquelin vhost_user_iotlb_cache_remove(struct virtio_net *dev, uint64_t iova, uint64_t size)
30999a2dd95SBruce Richardson {
310dea092d0SMike Pattrick struct vhost_iotlb_entry *node, *temp_node, *prev_node = NULL;
31199a2dd95SBruce Richardson
31299a2dd95SBruce Richardson if (unlikely(!size))
31399a2dd95SBruce Richardson return;
31499a2dd95SBruce Richardson
315a54f046dSMaxime Coquelin vhost_user_iotlb_wr_lock_all(dev);
31699a2dd95SBruce Richardson
317a54f046dSMaxime Coquelin RTE_TAILQ_FOREACH_SAFE(node, &dev->iotlb_list, next, temp_node) {
31899a2dd95SBruce Richardson /* Sorted list */
31999a2dd95SBruce Richardson if (unlikely(iova + size < node->iova))
32099a2dd95SBruce Richardson break;
32199a2dd95SBruce Richardson
32299a2dd95SBruce Richardson if (iova < node->iova + node->size) {
323714587a9SMaxime Coquelin struct vhost_iotlb_entry *next_node = RTE_TAILQ_NEXT(node, next);
324dea092d0SMike Pattrick
3252018fabfSDavid Marchand vhost_user_iotlb_clear_dump(dev, node, prev_node, next_node);
326dea092d0SMike Pattrick
327a54f046dSMaxime Coquelin TAILQ_REMOVE(&dev->iotlb_list, node, next);
3285c3e2259SMaxime Coquelin vhost_user_iotlb_remove_notify(dev, node);
329a54f046dSMaxime Coquelin vhost_user_iotlb_pool_put(dev, node);
330a54f046dSMaxime Coquelin dev->iotlb_cache_nr--;
331a54f046dSMaxime Coquelin } else {
332dea092d0SMike Pattrick prev_node = node;
33399a2dd95SBruce Richardson }
334a54f046dSMaxime Coquelin }
33599a2dd95SBruce Richardson
336a54f046dSMaxime Coquelin vhost_user_iotlb_wr_unlock_all(dev);
33799a2dd95SBruce Richardson }
33899a2dd95SBruce Richardson
33999a2dd95SBruce Richardson uint64_t
vhost_user_iotlb_cache_find(struct virtio_net * dev,uint64_t iova,uint64_t * size,uint8_t perm)340a54f046dSMaxime Coquelin vhost_user_iotlb_cache_find(struct virtio_net *dev, uint64_t iova, uint64_t *size, uint8_t perm)
34199a2dd95SBruce Richardson {
34299a2dd95SBruce Richardson struct vhost_iotlb_entry *node;
34399a2dd95SBruce Richardson uint64_t offset, vva = 0, mapped = 0;
34499a2dd95SBruce Richardson
34599a2dd95SBruce Richardson if (unlikely(!*size))
34699a2dd95SBruce Richardson goto out;
34799a2dd95SBruce Richardson
348a54f046dSMaxime Coquelin TAILQ_FOREACH(node, &dev->iotlb_list, next) {
34999a2dd95SBruce Richardson /* List sorted by iova */
35099a2dd95SBruce Richardson if (unlikely(iova < node->iova))
35199a2dd95SBruce Richardson break;
35299a2dd95SBruce Richardson
35399a2dd95SBruce Richardson if (iova >= node->iova + node->size)
35499a2dd95SBruce Richardson continue;
35599a2dd95SBruce Richardson
35699a2dd95SBruce Richardson if (unlikely((perm & node->perm) != perm)) {
35799a2dd95SBruce Richardson vva = 0;
35899a2dd95SBruce Richardson break;
35999a2dd95SBruce Richardson }
36099a2dd95SBruce Richardson
36199a2dd95SBruce Richardson offset = iova - node->iova;
36299a2dd95SBruce Richardson if (!vva)
363a4d80474SMaxime Coquelin vva = node->uaddr + node->uoffset + offset;
36499a2dd95SBruce Richardson
36599a2dd95SBruce Richardson mapped += node->size - offset;
36699a2dd95SBruce Richardson iova = node->iova + node->size;
36799a2dd95SBruce Richardson
36899a2dd95SBruce Richardson if (mapped >= *size)
36999a2dd95SBruce Richardson break;
37099a2dd95SBruce Richardson }
37199a2dd95SBruce Richardson
37299a2dd95SBruce Richardson out:
37399a2dd95SBruce Richardson /* Only part of the requested chunk is mapped */
37499a2dd95SBruce Richardson if (unlikely(mapped < *size))
37599a2dd95SBruce Richardson *size = mapped;
37699a2dd95SBruce Richardson
37799a2dd95SBruce Richardson return vva;
37899a2dd95SBruce Richardson }
37999a2dd95SBruce Richardson
38099a2dd95SBruce Richardson void
vhost_user_iotlb_flush_all(struct virtio_net * dev)381a54f046dSMaxime Coquelin vhost_user_iotlb_flush_all(struct virtio_net *dev)
38299a2dd95SBruce Richardson {
383a54f046dSMaxime Coquelin vhost_user_iotlb_cache_remove_all(dev);
384a54f046dSMaxime Coquelin vhost_user_iotlb_pending_remove_all(dev);
38599a2dd95SBruce Richardson }
38699a2dd95SBruce Richardson
38799a2dd95SBruce Richardson int
vhost_user_iotlb_init(struct virtio_net * dev)388a54f046dSMaxime Coquelin vhost_user_iotlb_init(struct virtio_net *dev)
38999a2dd95SBruce Richardson {
390f8f6b1c1SDavid Marchand unsigned int i;
39199a2dd95SBruce Richardson int socket = 0;
39299a2dd95SBruce Richardson
393a54f046dSMaxime Coquelin if (dev->iotlb_pool) {
39499a2dd95SBruce Richardson /*
39599a2dd95SBruce Richardson * The cache has already been initialized,
39699a2dd95SBruce Richardson * just drop all cached and pending entries.
39799a2dd95SBruce Richardson */
398a54f046dSMaxime Coquelin vhost_user_iotlb_flush_all(dev);
399a54f046dSMaxime Coquelin rte_free(dev->iotlb_pool);
40099a2dd95SBruce Richardson }
40199a2dd95SBruce Richardson
40299a2dd95SBruce Richardson #ifdef RTE_LIBRTE_VHOST_NUMA
403a54f046dSMaxime Coquelin if (get_mempolicy(&socket, NULL, 0, dev, MPOL_F_NODE | MPOL_F_ADDR) != 0)
40499a2dd95SBruce Richardson socket = 0;
40599a2dd95SBruce Richardson #endif
40699a2dd95SBruce Richardson
407a54f046dSMaxime Coquelin rte_spinlock_init(&dev->iotlb_free_lock);
408a54f046dSMaxime Coquelin rte_rwlock_init(&dev->iotlb_pending_lock);
40999a2dd95SBruce Richardson
410a54f046dSMaxime Coquelin SLIST_INIT(&dev->iotlb_free_list);
411a54f046dSMaxime Coquelin TAILQ_INIT(&dev->iotlb_list);
412a54f046dSMaxime Coquelin TAILQ_INIT(&dev->iotlb_pending_list);
41399a2dd95SBruce Richardson
4141a44f67aSDavid Marchand if (dev->flags & VIRTIO_DEV_SUPPORT_IOMMU) {
415a54f046dSMaxime Coquelin dev->iotlb_pool = rte_calloc_socket("iotlb", IOTLB_CACHE_SIZE,
416f8f6b1c1SDavid Marchand sizeof(struct vhost_iotlb_entry), 0, socket);
417a54f046dSMaxime Coquelin if (!dev->iotlb_pool) {
418*0e21c7c0SDavid Marchand VHOST_CONFIG_LOG(dev->ifname, ERR, "Failed to create IOTLB cache pool");
41999a2dd95SBruce Richardson return -1;
42099a2dd95SBruce Richardson }
421f8f6b1c1SDavid Marchand for (i = 0; i < IOTLB_CACHE_SIZE; i++)
422a54f046dSMaxime Coquelin vhost_user_iotlb_pool_put(dev, &dev->iotlb_pool[i]);
4231a44f67aSDavid Marchand }
42499a2dd95SBruce Richardson
425a54f046dSMaxime Coquelin dev->iotlb_cache_nr = 0;
42699a2dd95SBruce Richardson
42799a2dd95SBruce Richardson return 0;
42899a2dd95SBruce Richardson }
429f8f6b1c1SDavid Marchand
430f8f6b1c1SDavid Marchand void
vhost_user_iotlb_destroy(struct virtio_net * dev)431a54f046dSMaxime Coquelin vhost_user_iotlb_destroy(struct virtio_net *dev)
432f8f6b1c1SDavid Marchand {
433a54f046dSMaxime Coquelin rte_free(dev->iotlb_pool);
434f8f6b1c1SDavid Marchand }
435