xref: /dpdk/drivers/crypto/ionic/ionic_crypto_cmds.c (revision 2c1662bb53cab994552d82814ce81ad183947f62)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2021-2024 Advanced Micro Devices, Inc.
3  */
4 
5 #include <stdbool.h>
6 
7 #include <rte_malloc.h>
8 #include <rte_memzone.h>
9 
10 #include "ionic_crypto.h"
11 
12 /* queuetype support level */
13 static const uint8_t iocpt_qtype_vers[IOCPT_QTYPE_MAX] = {
14 	[IOCPT_QTYPE_ADMINQ]  = 0,   /* 0 = Base version */
15 	[IOCPT_QTYPE_NOTIFYQ] = 0,   /* 0 = Base version */
16 	[IOCPT_QTYPE_CRYPTOQ] = 0,   /* 0 = Base version */
17 };
18 
19 static const char *
iocpt_error_to_str(enum iocpt_status_code code)20 iocpt_error_to_str(enum iocpt_status_code code)
21 {
22 	switch (code) {
23 	case IOCPT_RC_SUCCESS:
24 		return "IOCPT_RC_SUCCESS";
25 	case IOCPT_RC_EVERSION:
26 		return "IOCPT_RC_EVERSION";
27 	case IOCPT_RC_EOPCODE:
28 		return "IOCPT_RC_EOPCODE";
29 	case IOCPT_RC_EIO:
30 		return "IOCPT_RC_EIO";
31 	case IOCPT_RC_EPERM:
32 		return "IOCPT_RC_EPERM";
33 	case IOCPT_RC_EQID:
34 		return "IOCPT_RC_EQID";
35 	case IOCPT_RC_EQTYPE:
36 		return "IOCPT_RC_EQTYPE";
37 	case IOCPT_RC_ENOENT:
38 		return "IOCPT_RC_ENOENT";
39 	case IOCPT_RC_EINTR:
40 		return "IOCPT_RC_EINTR";
41 	case IOCPT_RC_EAGAIN:
42 		return "IOCPT_RC_EAGAIN";
43 	case IOCPT_RC_ENOMEM:
44 		return "IOCPT_RC_ENOMEM";
45 	case IOCPT_RC_EFAULT:
46 		return "IOCPT_RC_EFAULT";
47 	case IOCPT_RC_EBUSY:
48 		return "IOCPT_RC_EBUSY";
49 	case IOCPT_RC_EEXIST:
50 		return "IOCPT_RC_EEXIST";
51 	case IOCPT_RC_EINVAL:
52 		return "IOCPT_RC_EINVAL";
53 	case IOCPT_RC_ENOSPC:
54 		return "IOCPT_RC_ENOSPC";
55 	case IOCPT_RC_ERANGE:
56 		return "IOCPT_RC_ERANGE";
57 	case IOCPT_RC_BAD_ADDR:
58 		return "IOCPT_RC_BAD_ADDR";
59 	case IOCPT_RC_DEV_CMD:
60 		return "IOCPT_RC_DEV_CMD";
61 	case IOCPT_RC_ERROR:
62 		return "IOCPT_RC_ERROR";
63 	default:
64 		return "IOCPT_RC_UNKNOWN";
65 	}
66 }
67 
68 static const char *
iocpt_opcode_to_str(enum iocpt_cmd_opcode opcode)69 iocpt_opcode_to_str(enum iocpt_cmd_opcode opcode)
70 {
71 	switch (opcode) {
72 	case IOCPT_CMD_NOP:
73 		return "IOCPT_CMD_NOP";
74 	case IOCPT_CMD_IDENTIFY:
75 		return "IOCPT_CMD_IDENTIFY";
76 	case IOCPT_CMD_RESET:
77 		return "IOCPT_CMD_RESET";
78 	case IOCPT_CMD_LIF_IDENTIFY:
79 		return "IOCPT_CMD_LIF_IDENTIFY";
80 	case IOCPT_CMD_LIF_INIT:
81 		return "IOCPT_CMD_LIF_INIT";
82 	case IOCPT_CMD_LIF_RESET:
83 		return "IOCPT_CMD_LIF_RESET";
84 	case IOCPT_CMD_LIF_GETATTR:
85 		return "IOCPT_CMD_LIF_GETATTR";
86 	case IOCPT_CMD_LIF_SETATTR:
87 		return "IOCPT_CMD_LIF_SETATTR";
88 	case IOCPT_CMD_Q_IDENTIFY:
89 		return "IOCPT_CMD_Q_IDENTIFY";
90 	case IOCPT_CMD_Q_INIT:
91 		return "IOCPT_CMD_Q_INIT";
92 	case IOCPT_CMD_Q_CONTROL:
93 		return "IOCPT_CMD_Q_CONTROL";
94 	case IOCPT_CMD_SESS_CONTROL:
95 		return "IOCPT_CMD_SESS_CONTROL";
96 	default:
97 		return "DEVCMD_UNKNOWN";
98 	}
99 }
100 
101 /* Dev_cmd Interface */
102 
103 static void
iocpt_dev_cmd_go(struct iocpt_dev * dev,union iocpt_dev_cmd * cmd)104 iocpt_dev_cmd_go(struct iocpt_dev *dev, union iocpt_dev_cmd *cmd)
105 {
106 	uint32_t cmd_size = RTE_DIM(cmd->words);
107 	uint32_t i;
108 
109 	IOCPT_PRINT(DEBUG, "Sending %s (%d) via dev_cmd",
110 		iocpt_opcode_to_str(cmd->cmd.opcode), cmd->cmd.opcode);
111 
112 	for (i = 0; i < cmd_size; i++)
113 		iowrite32(cmd->words[i], &dev->dev_cmd->cmd.words[i]);
114 
115 	iowrite32(0, &dev->dev_cmd->done);
116 	iowrite32(1, &dev->dev_cmd->doorbell);
117 }
118 
119 static int
iocpt_dev_cmd_wait(struct iocpt_dev * dev,unsigned long max_wait)120 iocpt_dev_cmd_wait(struct iocpt_dev *dev, unsigned long max_wait)
121 {
122 	unsigned long step_usec = IONIC_DEVCMD_CHECK_PERIOD_US;
123 	unsigned long max_wait_usec = max_wait * 1000000L;
124 	unsigned long elapsed_usec = 0;
125 	int done;
126 
127 	/* Wait for dev cmd to complete.. but no more than max_wait sec */
128 
129 	do {
130 		done = ioread32(&dev->dev_cmd->done) & IONIC_DEV_CMD_DONE;
131 		if (done != 0) {
132 			IOCPT_PRINT(DEBUG, "DEVCMD %d done took %lu usecs",
133 				ioread8(&dev->dev_cmd->cmd.cmd.opcode),
134 				elapsed_usec);
135 			return 0;
136 		}
137 
138 		rte_delay_us_block(step_usec);
139 
140 		elapsed_usec += step_usec;
141 	} while (elapsed_usec < max_wait_usec);
142 
143 	IOCPT_PRINT(ERR, "DEVCMD %d timeout after %lu usecs",
144 		ioread8(&dev->dev_cmd->cmd.cmd.opcode), elapsed_usec);
145 
146 	return -ETIMEDOUT;
147 }
148 
149 static void
iocpt_dev_cmd_comp(struct iocpt_dev * dev,void * mem)150 iocpt_dev_cmd_comp(struct iocpt_dev *dev, void *mem)
151 {
152 	union iocpt_dev_cmd_comp *comp = mem;
153 	uint32_t comp_size = RTE_DIM(comp->words);
154 	uint32_t i;
155 
156 	for (i = 0; i < comp_size; i++)
157 		comp->words[i] = ioread32(&dev->dev_cmd->comp.words[i]);
158 }
159 
160 static int
iocpt_dev_cmd_wait_check(struct iocpt_dev * dev,unsigned long max_wait)161 iocpt_dev_cmd_wait_check(struct iocpt_dev *dev, unsigned long max_wait)
162 {
163 	uint8_t status;
164 	int err;
165 
166 	err = iocpt_dev_cmd_wait(dev, max_wait);
167 	if (err == 0) {
168 		status = ioread8(&dev->dev_cmd->comp.comp.status);
169 		if (status == IOCPT_RC_EAGAIN)
170 			err = -EAGAIN;
171 		else if (status != 0)
172 			err = -EIO;
173 	}
174 
175 	IOCPT_PRINT(DEBUG, "dev_cmd returned %d", err);
176 	return err;
177 }
178 
179 /* Dev_cmds */
180 
181 static void
iocpt_dev_cmd_reset(struct iocpt_dev * dev)182 iocpt_dev_cmd_reset(struct iocpt_dev *dev)
183 {
184 	union iocpt_dev_cmd cmd = {
185 		.reset.opcode = IOCPT_CMD_RESET,
186 	};
187 
188 	iocpt_dev_cmd_go(dev, &cmd);
189 }
190 
191 static void
iocpt_dev_cmd_lif_identify(struct iocpt_dev * dev,uint8_t ver)192 iocpt_dev_cmd_lif_identify(struct iocpt_dev *dev, uint8_t ver)
193 {
194 	union iocpt_dev_cmd cmd = {
195 		.lif_identify.opcode = IOCPT_CMD_LIF_IDENTIFY,
196 		.lif_identify.type = IOCPT_LIF_TYPE_DEFAULT,
197 		.lif_identify.ver = ver,
198 	};
199 
200 	iocpt_dev_cmd_go(dev, &cmd);
201 }
202 
203 static void
iocpt_dev_cmd_lif_init(struct iocpt_dev * dev,rte_iova_t info_pa)204 iocpt_dev_cmd_lif_init(struct iocpt_dev *dev, rte_iova_t info_pa)
205 {
206 	union iocpt_dev_cmd cmd = {
207 		.lif_init.opcode = IOCPT_CMD_LIF_INIT,
208 		.lif_init.type = IOCPT_LIF_TYPE_DEFAULT,
209 		.lif_init.info_pa = info_pa,
210 	};
211 
212 	iocpt_dev_cmd_go(dev, &cmd);
213 }
214 
215 static void
iocpt_dev_cmd_lif_reset(struct iocpt_dev * dev)216 iocpt_dev_cmd_lif_reset(struct iocpt_dev *dev)
217 {
218 	union iocpt_dev_cmd cmd = {
219 		.lif_reset.opcode = IOCPT_CMD_LIF_RESET,
220 	};
221 
222 	iocpt_dev_cmd_go(dev, &cmd);
223 }
224 
225 static void
iocpt_dev_cmd_queue_identify(struct iocpt_dev * dev,uint8_t qtype,uint8_t qver)226 iocpt_dev_cmd_queue_identify(struct iocpt_dev *dev,
227 		uint8_t qtype, uint8_t qver)
228 {
229 	union iocpt_dev_cmd cmd = {
230 		.q_identify.opcode = IOCPT_CMD_Q_IDENTIFY,
231 		.q_identify.type = qtype,
232 		.q_identify.ver = qver,
233 	};
234 
235 	iocpt_dev_cmd_go(dev, &cmd);
236 }
237 
238 static void
iocpt_dev_cmd_adminq_init(struct iocpt_dev * dev)239 iocpt_dev_cmd_adminq_init(struct iocpt_dev *dev)
240 {
241 	struct iocpt_queue *q = &dev->adminq->q;
242 	struct iocpt_cq *cq = &dev->adminq->cq;
243 
244 	union iocpt_dev_cmd cmd = {
245 		.q_init.opcode = IOCPT_CMD_Q_INIT,
246 		.q_init.type = q->type,
247 		.q_init.ver = dev->qtype_info[q->type].version,
248 		.q_init.index = rte_cpu_to_le_32(q->index),
249 		.q_init.flags = rte_cpu_to_le_16(IOCPT_QINIT_F_ENA),
250 		.q_init.intr_index = rte_cpu_to_le_16(IONIC_INTR_NONE),
251 		.q_init.ring_size = rte_log2_u32(q->num_descs),
252 		.q_init.ring_base = rte_cpu_to_le_64(q->base_pa),
253 		.q_init.cq_ring_base = rte_cpu_to_le_64(cq->base_pa),
254 	};
255 
256 	IOCPT_PRINT(DEBUG, "adminq.q_init.ver %u", cmd.q_init.ver);
257 
258 	iocpt_dev_cmd_go(dev, &cmd);
259 }
260 
261 /* Dev_cmd consumers */
262 
263 static void
iocpt_queue_identify(struct iocpt_dev * dev)264 iocpt_queue_identify(struct iocpt_dev *dev)
265 {
266 	union iocpt_q_identity *q_ident = &dev->ident.q;
267 	uint32_t q_words = RTE_DIM(q_ident->words);
268 	uint32_t cmd_words = RTE_DIM(dev->dev_cmd->data);
269 	uint32_t i, nwords, qtype;
270 	int err;
271 
272 	for (qtype = 0; qtype < RTE_DIM(iocpt_qtype_vers); qtype++) {
273 		struct iocpt_qtype_info *qti = &dev->qtype_info[qtype];
274 
275 		/* Filter out the types this driver knows about */
276 		switch (qtype) {
277 		case IOCPT_QTYPE_ADMINQ:
278 		case IOCPT_QTYPE_NOTIFYQ:
279 		case IOCPT_QTYPE_CRYPTOQ:
280 			break;
281 		default:
282 			continue;
283 		}
284 
285 		memset(qti, 0, sizeof(*qti));
286 
287 		if (iocpt_is_embedded()) {
288 			/* When embedded, FW will always match the driver */
289 			qti->version = iocpt_qtype_vers[qtype];
290 			continue;
291 		}
292 
293 		/* On the host, query the FW for info */
294 		iocpt_dev_cmd_queue_identify(dev,
295 			qtype, iocpt_qtype_vers[qtype]);
296 		err = iocpt_dev_cmd_wait_check(dev, IONIC_DEVCMD_TIMEOUT);
297 		if (err == -EINVAL) {
298 			IOCPT_PRINT(ERR, "qtype %d not supported", qtype);
299 			continue;
300 		} else if (err == -EIO) {
301 			IOCPT_PRINT(ERR, "q_ident failed, older FW");
302 			return;
303 		} else if (err != 0) {
304 			IOCPT_PRINT(ERR, "q_ident failed, qtype %d: %d",
305 				qtype, err);
306 			return;
307 		}
308 
309 		nwords = RTE_MIN(q_words, cmd_words);
310 		for (i = 0; i < nwords; i++)
311 			q_ident->words[i] = ioread32(&dev->dev_cmd->data[i]);
312 
313 		qti->version   = q_ident->version;
314 		qti->supported = q_ident->supported;
315 		qti->features  = rte_le_to_cpu_64(q_ident->features);
316 		qti->desc_sz   = rte_le_to_cpu_16(q_ident->desc_sz);
317 		qti->comp_sz   = rte_le_to_cpu_16(q_ident->comp_sz);
318 		qti->sg_desc_sz = rte_le_to_cpu_16(q_ident->sg_desc_sz);
319 		qti->max_sg_elems = rte_le_to_cpu_16(q_ident->max_sg_elems);
320 		qti->sg_desc_stride =
321 			rte_le_to_cpu_16(q_ident->sg_desc_stride);
322 
323 		IOCPT_PRINT(DEBUG, " qtype[%d].version = %d",
324 			qtype, qti->version);
325 		IOCPT_PRINT(DEBUG, " qtype[%d].supported = %#x",
326 			qtype, qti->supported);
327 		IOCPT_PRINT(DEBUG, " qtype[%d].features = %#jx",
328 			qtype, qti->features);
329 		IOCPT_PRINT(DEBUG, " qtype[%d].desc_sz = %d",
330 			qtype, qti->desc_sz);
331 		IOCPT_PRINT(DEBUG, " qtype[%d].comp_sz = %d",
332 			qtype, qti->comp_sz);
333 		IOCPT_PRINT(DEBUG, " qtype[%d].sg_desc_sz = %d",
334 			qtype, qti->sg_desc_sz);
335 		IOCPT_PRINT(DEBUG, " qtype[%d].max_sg_elems = %d",
336 			qtype, qti->max_sg_elems);
337 		IOCPT_PRINT(DEBUG, " qtype[%d].sg_desc_stride = %d",
338 			qtype, qti->sg_desc_stride);
339 	}
340 }
341 
342 int
iocpt_dev_identify(struct iocpt_dev * dev)343 iocpt_dev_identify(struct iocpt_dev *dev)
344 {
345 	union iocpt_lif_identity *ident = &dev->ident.lif;
346 	union iocpt_lif_config *cfg = &ident->config;
347 	uint64_t features;
348 	uint32_t cmd_size = RTE_DIM(dev->dev_cmd->data);
349 	uint32_t dev_size = RTE_DIM(ident->words);
350 	uint32_t i, nwords;
351 	int err;
352 
353 	memset(ident, 0, sizeof(*ident));
354 
355 	iocpt_dev_cmd_lif_identify(dev, IOCPT_IDENTITY_VERSION_1);
356 	err = iocpt_dev_cmd_wait_check(dev, IONIC_DEVCMD_TIMEOUT);
357 	if (err != 0)
358 		return err;
359 
360 	nwords = RTE_MIN(dev_size, cmd_size);
361 	for (i = 0; i < nwords; i++)
362 		ident->words[i] = ioread32(&dev->dev_cmd->data[i]);
363 
364 	dev->max_qps =
365 		rte_le_to_cpu_32(cfg->queue_count[IOCPT_QTYPE_CRYPTOQ]);
366 	dev->max_sessions =
367 		rte_le_to_cpu_32(ident->max_nb_sessions);
368 
369 	features = rte_le_to_cpu_64(ident->features);
370 	dev->features = RTE_CRYPTODEV_FF_HW_ACCELERATED;
371 	if (features & IOCPT_HW_SYM)
372 		dev->features |= RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO;
373 	if (features & IOCPT_HW_ASYM)
374 		dev->features |= RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO;
375 	if (features & IOCPT_HW_CHAIN)
376 		dev->features |= RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING;
377 	if (features & IOCPT_HW_IP)
378 		dev->features |= RTE_CRYPTODEV_FF_IN_PLACE_SGL;
379 	if (features & IOCPT_HW_OOP) {
380 		dev->features |= RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT;
381 		dev->features |= RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT;
382 		dev->features |= RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
383 		dev->features |= RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT;
384 	}
385 
386 	IOCPT_PRINT(INFO, "crypto.features %#jx",
387 		rte_le_to_cpu_64(ident->features));
388 	IOCPT_PRINT(INFO, "crypto.features_active %#jx",
389 		rte_le_to_cpu_64(cfg->features));
390 	IOCPT_PRINT(INFO, "crypto.queue_count[IOCPT_QTYPE_ADMINQ] %#x",
391 		rte_le_to_cpu_32(cfg->queue_count[IOCPT_QTYPE_ADMINQ]));
392 	IOCPT_PRINT(INFO, "crypto.queue_count[IOCPT_QTYPE_NOTIFYQ] %#x",
393 		rte_le_to_cpu_32(cfg->queue_count[IOCPT_QTYPE_NOTIFYQ]));
394 	IOCPT_PRINT(INFO, "crypto.queue_count[IOCPT_QTYPE_CRYPTOQ] %#x",
395 		rte_le_to_cpu_32(cfg->queue_count[IOCPT_QTYPE_CRYPTOQ]));
396 	IOCPT_PRINT(INFO, "crypto.max_sessions %u",
397 		rte_le_to_cpu_32(ident->max_nb_sessions));
398 
399 	iocpt_queue_identify(dev);
400 
401 	return 0;
402 }
403 
404 int
iocpt_dev_init(struct iocpt_dev * dev,rte_iova_t info_pa)405 iocpt_dev_init(struct iocpt_dev *dev, rte_iova_t info_pa)
406 {
407 	uint32_t retries = 5;
408 	int err;
409 
410 retry_lif_init:
411 	iocpt_dev_cmd_lif_init(dev, info_pa);
412 
413 	err = iocpt_dev_cmd_wait_check(dev, IONIC_DEVCMD_TIMEOUT);
414 	if (err == -EAGAIN && retries > 0) {
415 		retries--;
416 		rte_delay_us_block(IONIC_DEVCMD_RETRY_WAIT_US);
417 		goto retry_lif_init;
418 	}
419 
420 	return err;
421 }
422 
423 void
iocpt_dev_reset(struct iocpt_dev * dev)424 iocpt_dev_reset(struct iocpt_dev *dev)
425 {
426 	iocpt_dev_cmd_lif_reset(dev);
427 	(void)iocpt_dev_cmd_wait_check(dev, IONIC_DEVCMD_TIMEOUT);
428 
429 	iocpt_dev_cmd_reset(dev);
430 	(void)iocpt_dev_cmd_wait_check(dev, IONIC_DEVCMD_TIMEOUT);
431 }
432 
433 int
iocpt_dev_adminq_init(struct iocpt_dev * dev)434 iocpt_dev_adminq_init(struct iocpt_dev *dev)
435 {
436 	struct iocpt_queue *q = &dev->adminq->q;
437 	struct iocpt_q_init_comp comp;
438 	uint32_t retries = 5;
439 	int err;
440 
441 retry_adminq_init:
442 	iocpt_dev_cmd_adminq_init(dev);
443 
444 	err = iocpt_dev_cmd_wait_check(dev, IONIC_DEVCMD_TIMEOUT);
445 	if (err == -EAGAIN && retries > 0) {
446 		retries--;
447 		rte_delay_us_block(IONIC_DEVCMD_RETRY_WAIT_US);
448 		goto retry_adminq_init;
449 	}
450 	if (err != 0)
451 		return err;
452 
453 	iocpt_dev_cmd_comp(dev, &comp);
454 
455 	q->hw_type = comp.hw_type;
456 	q->hw_index = rte_le_to_cpu_32(comp.hw_index);
457 	q->db = iocpt_db_map(dev, q);
458 
459 	IOCPT_PRINT(DEBUG, "adminq->hw_type %d", q->hw_type);
460 	IOCPT_PRINT(DEBUG, "adminq->hw_index %d", q->hw_index);
461 	IOCPT_PRINT(DEBUG, "adminq->db %p", q->db);
462 
463 	dev->adminq->flags |= IOCPT_Q_F_INITED;
464 
465 	return 0;
466 }
467 
468 /* Admin_cmd interface */
469 
470 static bool
iocpt_adminq_service(struct iocpt_cq * cq,uint16_t cq_desc_index,void * cb_arg __rte_unused)471 iocpt_adminq_service(struct iocpt_cq *cq, uint16_t cq_desc_index,
472 		void *cb_arg __rte_unused)
473 {
474 	struct iocpt_admin_comp *cq_desc_base = cq->base;
475 	struct iocpt_admin_comp *cq_desc = &cq_desc_base[cq_desc_index];
476 	struct iocpt_admin_q *adminq =
477 		container_of(cq, struct iocpt_admin_q, cq);
478 	struct iocpt_queue *q = &adminq->q;
479 	struct iocpt_admin_ctx *ctx;
480 	uint16_t curr_q_tail_idx;
481 	uint16_t stop_index;
482 	void **info;
483 
484 	if (!iocpt_color_match(cq_desc->color, cq->done_color))
485 		return false;
486 
487 	stop_index = rte_le_to_cpu_16(cq_desc->comp_index);
488 
489 	do {
490 		info = IOCPT_INFO_PTR(q, q->tail_idx);
491 
492 		ctx = info[0];
493 		if (ctx != NULL) {
494 			memcpy(&ctx->comp, cq_desc, sizeof(*cq_desc));
495 			ctx->pending_work = false; /* done */
496 		}
497 
498 		curr_q_tail_idx = q->tail_idx;
499 		q->tail_idx = Q_NEXT_TO_SRVC(q, 1);
500 	} while (curr_q_tail_idx != stop_index);
501 
502 	return true;
503 }
504 
505 /** iocpt_adminq_post - Post an admin command.
506  * @dev:		Handle to dev.
507  * @cmd_ctx:		Api admin command context.
508  *
509  * Return: zero or negative error status.
510  */
511 static int
iocpt_adminq_post(struct iocpt_dev * dev,struct iocpt_admin_ctx * ctx)512 iocpt_adminq_post(struct iocpt_dev *dev, struct iocpt_admin_ctx *ctx)
513 {
514 	struct iocpt_queue *q = &dev->adminq->q;
515 	struct iocpt_admin_cmd *q_desc_base = q->base;
516 	struct iocpt_admin_cmd *q_desc;
517 	void **info;
518 	int err = 0;
519 
520 	rte_spinlock_lock(&dev->adminq_lock);
521 
522 	if (iocpt_q_space_avail(q) < 1) {
523 		err = -ENOSPC;
524 		goto err_out;
525 	}
526 
527 	q_desc = &q_desc_base[q->head_idx];
528 
529 	memcpy(q_desc, &ctx->cmd, sizeof(ctx->cmd));
530 
531 	info = IOCPT_INFO_PTR(q, q->head_idx);
532 	info[0] = ctx;
533 
534 	q->head_idx = Q_NEXT_TO_POST(q, 1);
535 
536 	/* Ring doorbell */
537 	iocpt_q_flush(q);
538 
539 err_out:
540 	rte_spinlock_unlock(&dev->adminq_lock);
541 
542 	return err;
543 }
544 
545 static int
iocpt_adminq_wait_for_completion(struct iocpt_dev * dev,struct iocpt_admin_ctx * ctx,unsigned long max_wait)546 iocpt_adminq_wait_for_completion(struct iocpt_dev *dev,
547 		struct iocpt_admin_ctx *ctx, unsigned long max_wait)
548 {
549 	struct iocpt_queue *q = &dev->adminq->q;
550 	unsigned long step_usec = IONIC_DEVCMD_CHECK_PERIOD_US;
551 	unsigned long step_deadline;
552 	unsigned long max_wait_usec = max_wait * 1000000L;
553 	unsigned long elapsed_usec = 0;
554 	int budget = 8;
555 	uint16_t idx;
556 	void **info;
557 
558 	step_deadline = IONIC_ADMINQ_WDOG_MS * 1000 / step_usec;
559 
560 	while (ctx->pending_work && elapsed_usec < max_wait_usec) {
561 		/*
562 		 * Locking here as adminq is served inline and could be
563 		 * called from multiple places
564 		 */
565 		rte_spinlock_lock(&dev->adminq_service_lock);
566 
567 		iocpt_cq_service(&dev->adminq->cq, budget,
568 			iocpt_adminq_service, NULL);
569 
570 		/*
571 		 * Ring the doorbell again if work is pending after step_usec.
572 		 */
573 		if (ctx->pending_work && !step_deadline) {
574 			step_deadline = IONIC_ADMINQ_WDOG_MS *
575 				1000 / step_usec;
576 
577 			rte_spinlock_lock(&dev->adminq_lock);
578 			idx = Q_NEXT_TO_POST(q, -1);
579 			info = IOCPT_INFO_PTR(q, idx);
580 			if (info[0] == ctx)
581 				iocpt_q_flush(q);
582 			rte_spinlock_unlock(&dev->adminq_lock);
583 		}
584 
585 		rte_spinlock_unlock(&dev->adminq_service_lock);
586 
587 		rte_delay_us_block(step_usec);
588 		elapsed_usec += step_usec;
589 		step_deadline--;
590 	}
591 
592 	return (!ctx->pending_work);
593 }
594 
595 static int
iocpt_adminq_check_err(struct iocpt_admin_ctx * ctx,bool timeout)596 iocpt_adminq_check_err(struct iocpt_admin_ctx *ctx, bool timeout)
597 {
598 	const char *name;
599 	const char *status;
600 
601 	name = iocpt_opcode_to_str(ctx->cmd.cmd.opcode);
602 
603 	if (ctx->comp.comp.status == IOCPT_RC_EAGAIN) {
604 		IOCPT_PRINT(DEBUG, "%s (%d) returned EAGAIN (%d)",
605 			name, ctx->cmd.cmd.opcode,
606 			ctx->comp.comp.status);
607 		return -EAGAIN;
608 	}
609 	if (ctx->comp.comp.status != 0 || timeout) {
610 		status = iocpt_error_to_str(ctx->comp.comp.status);
611 		IOCPT_PRINT(ERR, "%s (%d) failed: %s (%d)",
612 			name,
613 			ctx->cmd.cmd.opcode,
614 			timeout ? "TIMEOUT" : status,
615 			timeout ? -1 : ctx->comp.comp.status);
616 		return -EIO;
617 	}
618 
619 	if (ctx->cmd.cmd.opcode != IOCPT_CMD_SESS_CONTROL) {
620 		IOCPT_PRINT(DEBUG, "%s (%d) succeeded",
621 			name, ctx->cmd.cmd.opcode);
622 	}
623 
624 	return 0;
625 }
626 
627 int
iocpt_adminq_post_wait(struct iocpt_dev * dev,struct iocpt_admin_ctx * ctx)628 iocpt_adminq_post_wait(struct iocpt_dev *dev, struct iocpt_admin_ctx *ctx)
629 {
630 	bool done;
631 	int err;
632 
633 	if (ctx->cmd.cmd.opcode != IOCPT_CMD_SESS_CONTROL) {
634 		IOCPT_PRINT(DEBUG, "Sending %s (%d) via the admin queue",
635 			iocpt_opcode_to_str(ctx->cmd.cmd.opcode),
636 			ctx->cmd.cmd.opcode);
637 	}
638 
639 	err = iocpt_adminq_post(dev, ctx);
640 	if (err != 0) {
641 		IOCPT_PRINT(ERR, "Failure posting %d to the admin queue (%d)",
642 			ctx->cmd.cmd.opcode, err);
643 		return err;
644 	}
645 
646 	done = iocpt_adminq_wait_for_completion(dev, ctx,
647 		IONIC_DEVCMD_TIMEOUT);
648 
649 	return iocpt_adminq_check_err(ctx, !done /* timed out */);
650 }
651