xref: /freebsd-src/sys/dev/mlx5/mlx5_core/mlx5_qp.c (revision bb1f0779b0e99e96522fa5f9090e5c9c6d9d8057)
1 /*-
2  * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  *
25  * $FreeBSD$
26  */
27 
28 
29 #include <linux/gfp.h>
30 #include <dev/mlx5/qp.h>
31 #include <dev/mlx5/driver.h>
32 
33 #include "mlx5_core.h"
34 
35 static struct mlx5_core_rsc_common *mlx5_get_rsc(struct mlx5_core_dev *dev,
36 						 u32 rsn)
37 {
38 	struct mlx5_qp_table *table = &dev->priv.qp_table;
39 	struct mlx5_core_rsc_common *common;
40 
41 	spin_lock(&table->lock);
42 
43 	common = radix_tree_lookup(&table->tree, rsn);
44 	if (common)
45 		atomic_inc(&common->refcount);
46 
47 	spin_unlock(&table->lock);
48 
49 	if (!common) {
50 		mlx5_core_warn(dev, "Async event for bogus resource 0x%x\n",
51 			       rsn);
52 		return NULL;
53 	}
54 	return common;
55 }
56 
57 void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common)
58 {
59 	if (atomic_dec_and_test(&common->refcount))
60 		complete(&common->free);
61 }
62 
63 void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type)
64 {
65 	struct mlx5_core_rsc_common *common = mlx5_get_rsc(dev, rsn);
66 	struct mlx5_core_qp *qp;
67 
68 	if (!common)
69 		return;
70 
71 	switch (common->res) {
72 	case MLX5_RES_QP:
73 		qp = (struct mlx5_core_qp *)common;
74 		qp->event(qp, event_type);
75 		break;
76 
77 	default:
78 		mlx5_core_warn(dev, "invalid resource type for 0x%x\n", rsn);
79 	}
80 
81 	mlx5_core_put_rsc(common);
82 }
83 
84 int mlx5_core_create_qp(struct mlx5_core_dev *dev,
85 			struct mlx5_core_qp *qp,
86 			struct mlx5_create_qp_mbox_in *in,
87 			int inlen)
88 {
89 	struct mlx5_qp_table *table = &dev->priv.qp_table;
90 	struct mlx5_create_qp_mbox_out out;
91 	struct mlx5_destroy_qp_mbox_in din;
92 	struct mlx5_destroy_qp_mbox_out dout;
93 	int err;
94 	void *qpc;
95 
96 	memset(&out, 0, sizeof(out));
97 	in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_QP);
98 	if (dev->issi) {
99 		qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
100 		/* 0xffffff means we ask to work with cqe version 0 */
101 		MLX5_SET(qpc, qpc, user_index, 0xffffff);
102 	}
103 
104 	err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
105 	if (err) {
106 		mlx5_core_warn(dev, "ret %d\n", err);
107 		return err;
108 	}
109 
110 	if (out.hdr.status) {
111 		mlx5_core_warn(dev, "current num of QPs 0x%x\n",
112 			       atomic_read(&dev->num_qps));
113 		return mlx5_cmd_status_to_err(&out.hdr);
114 	}
115 
116 	qp->qpn = be32_to_cpu(out.qpn) & 0xffffff;
117 	mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn);
118 
119 	qp->common.res = MLX5_RES_QP;
120 	spin_lock_irq(&table->lock);
121 	err = radix_tree_insert(&table->tree, qp->qpn, qp);
122 	spin_unlock_irq(&table->lock);
123 	if (err) {
124 		mlx5_core_warn(dev, "err %d\n", err);
125 		goto err_cmd;
126 	}
127 
128 	qp->pid = curthread->td_proc->p_pid;
129 	atomic_set(&qp->common.refcount, 1);
130 	atomic_inc(&dev->num_qps);
131 	init_completion(&qp->common.free);
132 
133 	return 0;
134 
135 err_cmd:
136 	memset(&din, 0, sizeof(din));
137 	memset(&dout, 0, sizeof(dout));
138 	din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_QP);
139 	din.qpn = cpu_to_be32(qp->qpn);
140 	mlx5_cmd_exec(dev, &din, sizeof(din), &out, sizeof(dout));
141 
142 	return err;
143 }
144 EXPORT_SYMBOL_GPL(mlx5_core_create_qp);
145 
146 int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
147 			 struct mlx5_core_qp *qp)
148 {
149 	struct mlx5_destroy_qp_mbox_in in;
150 	struct mlx5_destroy_qp_mbox_out out;
151 	struct mlx5_qp_table *table = &dev->priv.qp_table;
152 	unsigned long flags;
153 	int err;
154 
155 
156 	spin_lock_irqsave(&table->lock, flags);
157 	radix_tree_delete(&table->tree, qp->qpn);
158 	spin_unlock_irqrestore(&table->lock, flags);
159 
160 	mlx5_core_put_rsc((struct mlx5_core_rsc_common *)qp);
161 	wait_for_completion(&qp->common.free);
162 
163 	memset(&in, 0, sizeof(in));
164 	memset(&out, 0, sizeof(out));
165 	in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_QP);
166 	in.qpn = cpu_to_be32(qp->qpn);
167 	err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
168 	if (err)
169 		return err;
170 
171 	if (out.hdr.status)
172 		return mlx5_cmd_status_to_err(&out.hdr);
173 
174 	atomic_dec(&dev->num_qps);
175 	return 0;
176 }
177 EXPORT_SYMBOL_GPL(mlx5_core_destroy_qp);
178 
179 int mlx5_core_qp_modify(struct mlx5_core_dev *dev, enum mlx5_qp_state cur_state,
180 			enum mlx5_qp_state new_state,
181 			struct mlx5_modify_qp_mbox_in *in, int sqd_event,
182 			struct mlx5_core_qp *qp)
183 {
184 	static const u16 optab[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE] = {
185 		[MLX5_QP_STATE_RST] = {
186 			[MLX5_QP_STATE_RST]	= MLX5_CMD_OP_2RST_QP,
187 			[MLX5_QP_STATE_ERR]	= MLX5_CMD_OP_2ERR_QP,
188 			[MLX5_QP_STATE_INIT]	= MLX5_CMD_OP_RST2INIT_QP,
189 		},
190 		[MLX5_QP_STATE_INIT]  = {
191 			[MLX5_QP_STATE_RST]	= MLX5_CMD_OP_2RST_QP,
192 			[MLX5_QP_STATE_ERR]	= MLX5_CMD_OP_2ERR_QP,
193 			[MLX5_QP_STATE_INIT]	= MLX5_CMD_OP_INIT2INIT_QP,
194 			[MLX5_QP_STATE_RTR]	= MLX5_CMD_OP_INIT2RTR_QP,
195 		},
196 		[MLX5_QP_STATE_RTR]   = {
197 			[MLX5_QP_STATE_RST]	= MLX5_CMD_OP_2RST_QP,
198 			[MLX5_QP_STATE_ERR]	= MLX5_CMD_OP_2ERR_QP,
199 			[MLX5_QP_STATE_RTS]	= MLX5_CMD_OP_RTR2RTS_QP,
200 		},
201 		[MLX5_QP_STATE_RTS]   = {
202 			[MLX5_QP_STATE_RST]	= MLX5_CMD_OP_2RST_QP,
203 			[MLX5_QP_STATE_ERR]	= MLX5_CMD_OP_2ERR_QP,
204 			[MLX5_QP_STATE_RTS]	= MLX5_CMD_OP_RTS2RTS_QP,
205 		},
206 		[MLX5_QP_STATE_SQD] = {
207 			[MLX5_QP_STATE_RST]	= MLX5_CMD_OP_2RST_QP,
208 			[MLX5_QP_STATE_ERR]	= MLX5_CMD_OP_2ERR_QP,
209 		},
210 		[MLX5_QP_STATE_SQER] = {
211 			[MLX5_QP_STATE_RST]	= MLX5_CMD_OP_2RST_QP,
212 			[MLX5_QP_STATE_ERR]	= MLX5_CMD_OP_2ERR_QP,
213 			[MLX5_QP_STATE_RTS]	= MLX5_CMD_OP_SQERR2RTS_QP,
214 		},
215 		[MLX5_QP_STATE_ERR] = {
216 			[MLX5_QP_STATE_RST]	= MLX5_CMD_OP_2RST_QP,
217 			[MLX5_QP_STATE_ERR]	= MLX5_CMD_OP_2ERR_QP,
218 		}
219 	};
220 
221 	struct mlx5_modify_qp_mbox_out out;
222 	int err = 0;
223 	u16 op;
224 
225 	if (cur_state >= MLX5_QP_NUM_STATE || new_state >= MLX5_QP_NUM_STATE ||
226 	    !optab[cur_state][new_state])
227 		return -EINVAL;
228 
229 	memset(&out, 0, sizeof(out));
230 	op = optab[cur_state][new_state];
231 	in->hdr.opcode = cpu_to_be16(op);
232 	in->qpn = cpu_to_be32(qp->qpn);
233 	err = mlx5_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out));
234 	if (err)
235 		return err;
236 
237 	return mlx5_cmd_status_to_err(&out.hdr);
238 }
239 EXPORT_SYMBOL_GPL(mlx5_core_qp_modify);
240 
241 void mlx5_init_qp_table(struct mlx5_core_dev *dev)
242 {
243 	struct mlx5_qp_table *table = &dev->priv.qp_table;
244 
245 	spin_lock_init(&table->lock);
246 	INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
247 }
248 
249 void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev)
250 {
251 }
252 
253 int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
254 		       struct mlx5_query_qp_mbox_out *out, int outlen)
255 {
256 	struct mlx5_query_qp_mbox_in in;
257 	int err;
258 
259 	memset(&in, 0, sizeof(in));
260 	memset(out, 0, outlen);
261 	in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_QP);
262 	in.qpn = cpu_to_be32(qp->qpn);
263 	err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
264 	if (err)
265 		return err;
266 
267 	if (out->hdr.status)
268 		return mlx5_cmd_status_to_err(&out->hdr);
269 
270 	return err;
271 }
272 EXPORT_SYMBOL_GPL(mlx5_core_qp_query);
273 
274 int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn)
275 {
276 	u32 in[MLX5_ST_SZ_DW(alloc_xrcd_in)];
277 	u32 out[MLX5_ST_SZ_DW(alloc_xrcd_out)];
278 	int err;
279 
280 	memset(in, 0, sizeof(in));
281 
282 	MLX5_SET(alloc_xrcd_in, in, opcode, MLX5_CMD_OP_ALLOC_XRCD);
283 
284 	memset(out, 0, sizeof(out));
285 	err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
286 	if (err)
287 		return err;
288 
289 	*xrcdn = MLX5_GET(alloc_xrcd_out, out, xrcd);
290 	return 0;
291 }
292 EXPORT_SYMBOL_GPL(mlx5_core_xrcd_alloc);
293 
294 int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn)
295 {
296 	u32 in[MLX5_ST_SZ_DW(dealloc_xrcd_in)];
297 	u32 out[MLX5_ST_SZ_DW(dealloc_xrcd_out)];
298 
299 	memset(in, 0, sizeof(in));
300 
301 	MLX5_SET(dealloc_xrcd_in, in, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
302 	MLX5_SET(dealloc_xrcd_in, in, xrcd, xrcdn);
303 
304 	memset(out, 0, sizeof(out));
305 	return mlx5_cmd_exec_check_status(dev, in,  sizeof(in),
306 					       out, sizeof(out));
307 }
308 EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc);
309