1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2019 Mellanox Technologies, Ltd
3 */
4 #include <rte_malloc.h>
5 #include <rte_errno.h>
6
7 #include "mlx5_vdpa_utils.h"
8 #include "mlx5_vdpa.h"
9
10
11 int
mlx5_vdpa_logging_enable(struct mlx5_vdpa_priv * priv,int enable)12 mlx5_vdpa_logging_enable(struct mlx5_vdpa_priv *priv, int enable)
13 {
14 struct mlx5_devx_virtq_attr attr = {
15 .mod_fields_bitmap =
16 MLX5_VIRTQ_MODIFY_TYPE_DIRTY_BITMAP_DUMP_ENABLE,
17 .dirty_bitmap_dump_enable = enable,
18 };
19 struct mlx5_vdpa_virtq *virtq;
20 int i;
21
22 for (i = 0; i < priv->nr_virtqs; ++i) {
23 attr.queue_index = i;
24 virtq = &priv->virtqs[i];
25 if (!virtq->configured) {
26 DRV_LOG(DEBUG, "virtq %d is invalid for dirty bitmap enabling.", i);
27 } else {
28 struct mlx5_vdpa_virtq *virtq = &priv->virtqs[i];
29
30 pthread_mutex_lock(&virtq->virtq_lock);
31 if (mlx5_devx_cmd_modify_virtq(priv->virtqs[i].virtq,
32 &attr)) {
33 pthread_mutex_unlock(&virtq->virtq_lock);
34 DRV_LOG(ERR,
35 "Failed to modify virtq %d for dirty bitmap enabling.",
36 i);
37 return -1;
38 }
39 pthread_mutex_unlock(&virtq->virtq_lock);
40 }
41 }
42 return 0;
43 }
44
45 int
mlx5_vdpa_dirty_bitmap_set(struct mlx5_vdpa_priv * priv,uint64_t log_base,uint64_t log_size)46 mlx5_vdpa_dirty_bitmap_set(struct mlx5_vdpa_priv *priv, uint64_t log_base,
47 uint64_t log_size)
48 {
49 struct mlx5_devx_virtq_attr attr = {
50 .mod_fields_bitmap = MLX5_VIRTQ_MODIFY_TYPE_DIRTY_BITMAP_PARAMS,
51 .dirty_bitmap_addr = log_base,
52 .dirty_bitmap_size = log_size,
53 };
54 struct mlx5_vdpa_virtq *virtq;
55 int i;
56 int ret = mlx5_os_wrapped_mkey_create(priv->cdev->ctx, priv->cdev->pd,
57 priv->cdev->pdn,
58 (void *)(uintptr_t)log_base,
59 log_size, &priv->lm_mr);
60
61 if (ret) {
62 DRV_LOG(ERR, "Failed to allocate wrapped MR for lm.");
63 return -1;
64 }
65 attr.dirty_bitmap_mkey = priv->lm_mr.lkey;
66 for (i = 0; i < priv->nr_virtqs; ++i) {
67 attr.queue_index = i;
68 virtq = &priv->virtqs[i];
69 if (!virtq->configured) {
70 DRV_LOG(DEBUG, "virtq %d is invalid for LM.", i);
71 } else {
72 struct mlx5_vdpa_virtq *virtq = &priv->virtqs[i];
73
74 pthread_mutex_lock(&virtq->virtq_lock);
75 if (mlx5_devx_cmd_modify_virtq(
76 priv->virtqs[i].virtq,
77 &attr)) {
78 pthread_mutex_unlock(&virtq->virtq_lock);
79 DRV_LOG(ERR,
80 "Failed to modify virtq %d for LM.", i);
81 goto err;
82 }
83 pthread_mutex_unlock(&virtq->virtq_lock);
84 }
85 }
86 return 0;
87 err:
88 mlx5_os_wrapped_mkey_destroy(&priv->lm_mr);
89 return -1;
90 }
91
92 int
mlx5_vdpa_lm_log(struct mlx5_vdpa_priv * priv)93 mlx5_vdpa_lm_log(struct mlx5_vdpa_priv *priv)
94 {
95 RTE_ATOMIC(uint32_t) remaining_cnt = 0;
96 RTE_ATOMIC(uint32_t) err_cnt = 0;
97 uint32_t task_num = 0;
98 uint32_t i, thrd_idx, data[1];
99 struct mlx5_vdpa_virtq *virtq;
100 uint64_t features;
101 int ret;
102
103 ret = rte_vhost_get_negotiated_features(priv->vid, &features);
104 if (ret) {
105 DRV_LOG(ERR, "Failed to get negotiated features.");
106 return -1;
107 }
108 if (priv->use_c_thread && priv->nr_virtqs) {
109 uint32_t main_task_idx[priv->nr_virtqs];
110
111 for (i = 0; i < priv->nr_virtqs; i++) {
112 virtq = &priv->virtqs[i];
113 if (!virtq->configured)
114 continue;
115 thrd_idx = i % (conf_thread_mng.max_thrds + 1);
116 if (!thrd_idx) {
117 main_task_idx[task_num] = i;
118 task_num++;
119 continue;
120 }
121 thrd_idx = priv->last_c_thrd_idx + 1;
122 if (thrd_idx >= conf_thread_mng.max_thrds)
123 thrd_idx = 0;
124 priv->last_c_thrd_idx = thrd_idx;
125 data[0] = i;
126 if (mlx5_vdpa_task_add(priv, thrd_idx,
127 MLX5_VDPA_TASK_STOP_VIRTQ,
128 &remaining_cnt, &err_cnt,
129 (void **)&data, 1)) {
130 DRV_LOG(ERR, "Fail to add "
131 "task stop virtq (%d).", i);
132 main_task_idx[task_num] = i;
133 task_num++;
134 }
135 }
136 for (i = 0; i < task_num; i++) {
137 virtq = &priv->virtqs[main_task_idx[i]];
138 pthread_mutex_lock(&virtq->virtq_lock);
139 ret = mlx5_vdpa_virtq_stop(priv,
140 main_task_idx[i]);
141 if (ret) {
142 pthread_mutex_unlock(&virtq->virtq_lock);
143 DRV_LOG(ERR,
144 "Failed to stop virtq %d.", i);
145 return -1;
146 }
147 if (RTE_VHOST_NEED_LOG(features))
148 rte_vhost_log_used_vring(priv->vid, i, 0,
149 MLX5_VDPA_USED_RING_LEN(virtq->vq_size));
150 pthread_mutex_unlock(&virtq->virtq_lock);
151 }
152 if (mlx5_vdpa_c_thread_wait_bulk_tasks_done(&remaining_cnt,
153 &err_cnt, 2000)) {
154 DRV_LOG(ERR,
155 "Failed to wait virt-queue setup tasks ready.");
156 return -1;
157 }
158 } else {
159 for (i = 0; i < priv->nr_virtqs; i++) {
160 virtq = &priv->virtqs[i];
161 pthread_mutex_lock(&virtq->virtq_lock);
162 if (!virtq->configured) {
163 pthread_mutex_unlock(&virtq->virtq_lock);
164 continue;
165 }
166 ret = mlx5_vdpa_virtq_stop(priv, i);
167 if (ret) {
168 pthread_mutex_unlock(&virtq->virtq_lock);
169 DRV_LOG(ERR,
170 "Failed to stop virtq %d for LM log.", i);
171 return -1;
172 }
173 if (RTE_VHOST_NEED_LOG(features))
174 rte_vhost_log_used_vring(priv->vid, i, 0,
175 MLX5_VDPA_USED_RING_LEN(virtq->vq_size));
176 pthread_mutex_unlock(&virtq->virtq_lock);
177 }
178 }
179 return 0;
180 }
181