xref: /spdk/lib/nvme/nvme_pcie_common.c (revision 8dd1cd2104ea4001e4a0da2a4851ccd62c82f8e8)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (c) Intel Corporation. All rights reserved.
3  *   Copyright (c) 2021 Mellanox Technologies LTD. All rights reserved.
4  *   Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 /*
8  * NVMe over PCIe common library
9  */
10 
11 #include "spdk/stdinc.h"
12 #include "spdk/likely.h"
13 #include "spdk/string.h"
14 #include "nvme_internal.h"
15 #include "nvme_pcie_internal.h"
16 #include "spdk/trace.h"
17 
18 #include "spdk_internal/trace_defs.h"
19 
20 __thread struct nvme_pcie_ctrlr *g_thread_mmio_ctrlr = NULL;
21 
22 static struct spdk_nvme_pcie_stat g_dummy_stat = {};
23 
24 static void nvme_pcie_fail_request_bad_vtophys(struct spdk_nvme_qpair *qpair,
25 		struct nvme_tracker *tr);
26 
27 static inline uint64_t
28 nvme_pcie_vtophys(struct spdk_nvme_ctrlr *ctrlr, const void *buf, uint64_t *size)
29 {
30 	if (spdk_likely(ctrlr->trid.trtype == SPDK_NVME_TRANSPORT_PCIE)) {
31 		return spdk_vtophys(buf, size);
32 	} else {
33 		/* vfio-user address translation with IOVA=VA mode */
34 		return (uint64_t)(uintptr_t)buf;
35 	}
36 }
37 
38 int
39 nvme_pcie_qpair_reset(struct spdk_nvme_qpair *qpair)
40 {
41 	struct nvme_pcie_qpair *pqpair = nvme_pcie_qpair(qpair);
42 	uint32_t i;
43 
44 	/* all head/tail vals are set to 0 */
45 	pqpair->last_sq_tail = pqpair->sq_tail = pqpair->sq_head = pqpair->cq_head = 0;
46 
47 	/*
48 	 * First time through the completion queue, HW will set phase
49 	 *  bit on completions to 1.  So set this to 1 here, indicating
50 	 *  we're looking for a 1 to know which entries have completed.
51 	 *  we'll toggle the bit each time when the completion queue
52 	 *  rolls over.
53 	 */
54 	pqpair->flags.phase = 1;
55 	for (i = 0; i < pqpair->num_entries; i++) {
56 		pqpair->cpl[i].status.p = 0;
57 	}
58 
59 	return 0;
60 }
61 
62 static void
63 nvme_qpair_construct_tracker(struct nvme_tracker *tr, uint16_t cid, uint64_t phys_addr)
64 {
65 	tr->prp_sgl_bus_addr = phys_addr + offsetof(struct nvme_tracker, u.prp);
66 	tr->cid = cid;
67 	tr->req = NULL;
68 }
69 
70 static void *
71 nvme_pcie_ctrlr_alloc_cmb(struct spdk_nvme_ctrlr *ctrlr, uint64_t size, uint64_t alignment,
72 			  uint64_t *phys_addr)
73 {
74 	struct nvme_pcie_ctrlr *pctrlr = nvme_pcie_ctrlr(ctrlr);
75 	uintptr_t addr;
76 
77 	if (pctrlr->cmb.mem_register_addr != NULL) {
78 		/* BAR is mapped for data */
79 		return NULL;
80 	}
81 
82 	addr = (uintptr_t)pctrlr->cmb.bar_va + pctrlr->cmb.current_offset;
83 	addr = (addr + (alignment - 1)) & ~(alignment - 1);
84 
85 	/* CMB may only consume part of the BAR, calculate accordingly */
86 	if (addr + size > ((uintptr_t)pctrlr->cmb.bar_va + pctrlr->cmb.size)) {
87 		SPDK_ERRLOG("Tried to allocate past valid CMB range!\n");
88 		return NULL;
89 	}
90 	*phys_addr = pctrlr->cmb.bar_pa + addr - (uintptr_t)pctrlr->cmb.bar_va;
91 
92 	pctrlr->cmb.current_offset = (addr + size) - (uintptr_t)pctrlr->cmb.bar_va;
93 
94 	return (void *)addr;
95 }
96 
97 int
98 nvme_pcie_qpair_construct(struct spdk_nvme_qpair *qpair,
99 			  const struct spdk_nvme_io_qpair_opts *opts)
100 {
101 	struct spdk_nvme_ctrlr	*ctrlr = qpair->ctrlr;
102 	struct nvme_pcie_ctrlr	*pctrlr = nvme_pcie_ctrlr(ctrlr);
103 	struct nvme_pcie_qpair	*pqpair = nvme_pcie_qpair(qpair);
104 	struct nvme_tracker	*tr;
105 	uint16_t		i;
106 	uint16_t		num_trackers;
107 	size_t			page_align = sysconf(_SC_PAGESIZE);
108 	size_t			queue_align, queue_len;
109 	uint32_t                flags = SPDK_MALLOC_DMA;
110 	uint64_t		sq_paddr = 0;
111 	uint64_t		cq_paddr = 0;
112 
113 	if (opts) {
114 		pqpair->sq_vaddr = opts->sq.vaddr;
115 		pqpair->cq_vaddr = opts->cq.vaddr;
116 		sq_paddr = opts->sq.paddr;
117 		cq_paddr = opts->cq.paddr;
118 	}
119 
120 	pqpair->retry_count = ctrlr->opts.transport_retry_count;
121 
122 	/*
123 	 * Limit the maximum number of completions to return per call to prevent wraparound,
124 	 * and calculate how many trackers can be submitted at once without overflowing the
125 	 * completion queue.
126 	 */
127 	pqpair->max_completions_cap = pqpair->num_entries / 4;
128 	pqpair->max_completions_cap = spdk_max(pqpair->max_completions_cap, NVME_MIN_COMPLETIONS);
129 	pqpair->max_completions_cap = spdk_min(pqpair->max_completions_cap, NVME_MAX_COMPLETIONS);
130 	num_trackers = pqpair->num_entries - pqpair->max_completions_cap;
131 
132 	SPDK_INFOLOG(nvme, "max_completions_cap = %" PRIu16 " num_trackers = %" PRIu16 "\n",
133 		     pqpair->max_completions_cap, num_trackers);
134 
135 	assert(num_trackers != 0);
136 
137 	pqpair->sq_in_cmb = false;
138 
139 	if (nvme_qpair_is_admin_queue(&pqpair->qpair)) {
140 		flags |= SPDK_MALLOC_SHARE;
141 	}
142 
143 	/* cmd and cpl rings must be aligned on page size boundaries. */
144 	if (ctrlr->opts.use_cmb_sqs) {
145 		pqpair->cmd = nvme_pcie_ctrlr_alloc_cmb(ctrlr, pqpair->num_entries * sizeof(struct spdk_nvme_cmd),
146 							page_align, &pqpair->cmd_bus_addr);
147 		if (pqpair->cmd != NULL) {
148 			pqpair->sq_in_cmb = true;
149 		}
150 	}
151 
152 	if (pqpair->sq_in_cmb == false) {
153 		if (pqpair->sq_vaddr) {
154 			pqpair->cmd = pqpair->sq_vaddr;
155 		} else {
156 			/* To ensure physical address contiguity we make each ring occupy
157 			 * a single hugepage only. See MAX_IO_QUEUE_ENTRIES.
158 			 */
159 			queue_len = pqpair->num_entries * sizeof(struct spdk_nvme_cmd);
160 			queue_align = spdk_max(spdk_align32pow2(queue_len), page_align);
161 			pqpair->cmd = spdk_zmalloc(queue_len, queue_align, NULL, SPDK_ENV_SOCKET_ID_ANY, flags);
162 			if (pqpair->cmd == NULL) {
163 				SPDK_ERRLOG("alloc qpair_cmd failed\n");
164 				return -ENOMEM;
165 			}
166 		}
167 		if (sq_paddr) {
168 			assert(pqpair->sq_vaddr != NULL);
169 			pqpair->cmd_bus_addr = sq_paddr;
170 		} else {
171 			pqpair->cmd_bus_addr = nvme_pcie_vtophys(ctrlr, pqpair->cmd, NULL);
172 			if (pqpair->cmd_bus_addr == SPDK_VTOPHYS_ERROR) {
173 				SPDK_ERRLOG("spdk_vtophys(pqpair->cmd) failed\n");
174 				return -EFAULT;
175 			}
176 		}
177 	}
178 
179 	if (pqpair->cq_vaddr) {
180 		pqpair->cpl = pqpair->cq_vaddr;
181 	} else {
182 		queue_len = pqpair->num_entries * sizeof(struct spdk_nvme_cpl);
183 		queue_align = spdk_max(spdk_align32pow2(queue_len), page_align);
184 		pqpair->cpl = spdk_zmalloc(queue_len, queue_align, NULL, SPDK_ENV_SOCKET_ID_ANY, flags);
185 		if (pqpair->cpl == NULL) {
186 			SPDK_ERRLOG("alloc qpair_cpl failed\n");
187 			return -ENOMEM;
188 		}
189 	}
190 	if (cq_paddr) {
191 		assert(pqpair->cq_vaddr != NULL);
192 		pqpair->cpl_bus_addr = cq_paddr;
193 	} else {
194 		pqpair->cpl_bus_addr =  nvme_pcie_vtophys(ctrlr, pqpair->cpl, NULL);
195 		if (pqpair->cpl_bus_addr == SPDK_VTOPHYS_ERROR) {
196 			SPDK_ERRLOG("spdk_vtophys(pqpair->cpl) failed\n");
197 			return -EFAULT;
198 		}
199 	}
200 
201 	pqpair->sq_tdbl = pctrlr->doorbell_base + (2 * qpair->id + 0) * pctrlr->doorbell_stride_u32;
202 	pqpair->cq_hdbl = pctrlr->doorbell_base + (2 * qpair->id + 1) * pctrlr->doorbell_stride_u32;
203 
204 	/*
205 	 * Reserve space for all of the trackers in a single allocation.
206 	 *   struct nvme_tracker must be padded so that its size is already a power of 2.
207 	 *   This ensures the PRP list embedded in the nvme_tracker object will not span a
208 	 *   4KB boundary, while allowing access to trackers in tr[] via normal array indexing.
209 	 */
210 	pqpair->tr = spdk_zmalloc(num_trackers * sizeof(*tr), sizeof(*tr), NULL,
211 				  SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_SHARE);
212 	if (pqpair->tr == NULL) {
213 		SPDK_ERRLOG("nvme_tr failed\n");
214 		return -ENOMEM;
215 	}
216 
217 	TAILQ_INIT(&pqpair->free_tr);
218 	TAILQ_INIT(&pqpair->outstanding_tr);
219 
220 	for (i = 0; i < num_trackers; i++) {
221 		tr = &pqpair->tr[i];
222 		nvme_qpair_construct_tracker(tr, i, nvme_pcie_vtophys(ctrlr, tr, NULL));
223 		TAILQ_INSERT_HEAD(&pqpair->free_tr, tr, tq_list);
224 	}
225 
226 	nvme_pcie_qpair_reset(qpair);
227 
228 	return 0;
229 }
230 
231 int
232 nvme_pcie_ctrlr_construct_admin_qpair(struct spdk_nvme_ctrlr *ctrlr, uint16_t num_entries)
233 {
234 	struct nvme_pcie_qpair *pqpair;
235 	int rc;
236 
237 	pqpair = spdk_zmalloc(sizeof(*pqpair), 64, NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_SHARE);
238 	if (pqpair == NULL) {
239 		return -ENOMEM;
240 	}
241 
242 	pqpair->num_entries = num_entries;
243 	pqpair->flags.delay_cmd_submit = 0;
244 	pqpair->pcie_state = NVME_PCIE_QPAIR_READY;
245 
246 	ctrlr->adminq = &pqpair->qpair;
247 
248 	rc = nvme_qpair_init(ctrlr->adminq,
249 			     0, /* qpair ID */
250 			     ctrlr,
251 			     SPDK_NVME_QPRIO_URGENT,
252 			     num_entries,
253 			     false);
254 	if (rc != 0) {
255 		return rc;
256 	}
257 
258 	pqpair->stat = spdk_zmalloc(sizeof(*pqpair->stat), 64, NULL, SPDK_ENV_SOCKET_ID_ANY,
259 				    SPDK_MALLOC_SHARE);
260 	if (!pqpair->stat) {
261 		SPDK_ERRLOG("Failed to allocate admin qpair statistics\n");
262 		return -ENOMEM;
263 	}
264 
265 	return nvme_pcie_qpair_construct(ctrlr->adminq, NULL);
266 }
267 
268 /**
269  * Note: the ctrlr_lock must be held when calling this function.
270  */
271 void
272 nvme_pcie_qpair_insert_pending_admin_request(struct spdk_nvme_qpair *qpair,
273 		struct nvme_request *req, struct spdk_nvme_cpl *cpl)
274 {
275 	struct spdk_nvme_ctrlr		*ctrlr = qpair->ctrlr;
276 	struct nvme_request		*active_req = req;
277 	struct spdk_nvme_ctrlr_process	*active_proc;
278 
279 	/*
280 	 * The admin request is from another process. Move to the per
281 	 *  process list for that process to handle it later.
282 	 */
283 	assert(nvme_qpair_is_admin_queue(qpair));
284 	assert(active_req->pid != getpid());
285 
286 	active_proc = nvme_ctrlr_get_process(ctrlr, active_req->pid);
287 	if (active_proc) {
288 		/* Save the original completion information */
289 		memcpy(&active_req->cpl, cpl, sizeof(*cpl));
290 		STAILQ_INSERT_TAIL(&active_proc->active_reqs, active_req, stailq);
291 	} else {
292 		SPDK_ERRLOG("The owning process (pid %d) is not found. Dropping the request.\n",
293 			    active_req->pid);
294 
295 		nvme_free_request(active_req);
296 	}
297 }
298 
299 /**
300  * Note: the ctrlr_lock must be held when calling this function.
301  */
302 void
303 nvme_pcie_qpair_complete_pending_admin_request(struct spdk_nvme_qpair *qpair)
304 {
305 	struct spdk_nvme_ctrlr		*ctrlr = qpair->ctrlr;
306 	struct nvme_request		*req, *tmp_req;
307 	pid_t				pid = getpid();
308 	struct spdk_nvme_ctrlr_process	*proc;
309 
310 	/*
311 	 * Check whether there is any pending admin request from
312 	 * other active processes.
313 	 */
314 	assert(nvme_qpair_is_admin_queue(qpair));
315 
316 	proc = nvme_ctrlr_get_current_process(ctrlr);
317 	if (!proc) {
318 		SPDK_ERRLOG("the active process (pid %d) is not found for this controller.\n", pid);
319 		assert(proc);
320 		return;
321 	}
322 
323 	STAILQ_FOREACH_SAFE(req, &proc->active_reqs, stailq, tmp_req) {
324 		STAILQ_REMOVE(&proc->active_reqs, req, nvme_request, stailq);
325 
326 		assert(req->pid == pid);
327 
328 		nvme_complete_request(req->cb_fn, req->cb_arg, qpair, req, &req->cpl);
329 		nvme_free_request(req);
330 	}
331 }
332 
333 int
334 nvme_pcie_ctrlr_cmd_create_io_cq(struct spdk_nvme_ctrlr *ctrlr,
335 				 struct spdk_nvme_qpair *io_que, spdk_nvme_cmd_cb cb_fn,
336 				 void *cb_arg)
337 {
338 	struct nvme_pcie_qpair *pqpair = nvme_pcie_qpair(io_que);
339 	struct nvme_request *req;
340 	struct spdk_nvme_cmd *cmd;
341 
342 	req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg);
343 	if (req == NULL) {
344 		return -ENOMEM;
345 	}
346 
347 	cmd = &req->cmd;
348 	cmd->opc = SPDK_NVME_OPC_CREATE_IO_CQ;
349 
350 	cmd->cdw10_bits.create_io_q.qid = io_que->id;
351 	cmd->cdw10_bits.create_io_q.qsize = pqpair->num_entries - 1;
352 
353 	cmd->cdw11_bits.create_io_cq.pc = 1;
354 	cmd->dptr.prp.prp1 = pqpair->cpl_bus_addr;
355 
356 	return nvme_ctrlr_submit_admin_request(ctrlr, req);
357 }
358 
359 int
360 nvme_pcie_ctrlr_cmd_create_io_sq(struct spdk_nvme_ctrlr *ctrlr,
361 				 struct spdk_nvme_qpair *io_que, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
362 {
363 	struct nvme_pcie_qpair *pqpair = nvme_pcie_qpair(io_que);
364 	struct nvme_request *req;
365 	struct spdk_nvme_cmd *cmd;
366 
367 	req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg);
368 	if (req == NULL) {
369 		return -ENOMEM;
370 	}
371 
372 	cmd = &req->cmd;
373 	cmd->opc = SPDK_NVME_OPC_CREATE_IO_SQ;
374 
375 	cmd->cdw10_bits.create_io_q.qid = io_que->id;
376 	cmd->cdw10_bits.create_io_q.qsize = pqpair->num_entries - 1;
377 	cmd->cdw11_bits.create_io_sq.pc = 1;
378 	cmd->cdw11_bits.create_io_sq.qprio = io_que->qprio;
379 	cmd->cdw11_bits.create_io_sq.cqid = io_que->id;
380 	cmd->dptr.prp.prp1 = pqpair->cmd_bus_addr;
381 
382 	return nvme_ctrlr_submit_admin_request(ctrlr, req);
383 }
384 
385 int
386 nvme_pcie_ctrlr_cmd_delete_io_cq(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair,
387 				 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
388 {
389 	struct nvme_request *req;
390 	struct spdk_nvme_cmd *cmd;
391 
392 	req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg);
393 	if (req == NULL) {
394 		return -ENOMEM;
395 	}
396 
397 	cmd = &req->cmd;
398 	cmd->opc = SPDK_NVME_OPC_DELETE_IO_CQ;
399 	cmd->cdw10_bits.delete_io_q.qid = qpair->id;
400 
401 	return nvme_ctrlr_submit_admin_request(ctrlr, req);
402 }
403 
404 int
405 nvme_pcie_ctrlr_cmd_delete_io_sq(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair,
406 				 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
407 {
408 	struct nvme_request *req;
409 	struct spdk_nvme_cmd *cmd;
410 
411 	req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg);
412 	if (req == NULL) {
413 		return -ENOMEM;
414 	}
415 
416 	cmd = &req->cmd;
417 	cmd->opc = SPDK_NVME_OPC_DELETE_IO_SQ;
418 	cmd->cdw10_bits.delete_io_q.qid = qpair->id;
419 
420 	return nvme_ctrlr_submit_admin_request(ctrlr, req);
421 }
422 
423 static void
424 nvme_completion_sq_error_delete_cq_cb(void *arg, const struct spdk_nvme_cpl *cpl)
425 {
426 	struct spdk_nvme_qpair *qpair = arg;
427 	struct nvme_pcie_qpair *pqpair = nvme_pcie_qpair(qpair);
428 
429 	if (spdk_nvme_cpl_is_error(cpl)) {
430 		SPDK_ERRLOG("delete_io_cq failed!\n");
431 	}
432 
433 	pqpair->pcie_state = NVME_PCIE_QPAIR_FAILED;
434 }
435 
436 static void
437 nvme_completion_create_sq_cb(void *arg, const struct spdk_nvme_cpl *cpl)
438 {
439 	struct spdk_nvme_qpair *qpair = arg;
440 	struct nvme_pcie_qpair *pqpair = nvme_pcie_qpair(qpair);
441 	struct spdk_nvme_ctrlr	*ctrlr = qpair->ctrlr;
442 	struct nvme_pcie_ctrlr	*pctrlr = nvme_pcie_ctrlr(ctrlr);
443 	int rc;
444 
445 	if (pqpair->flags.defer_destruction) {
446 		/* This qpair was deleted by the application while the
447 		 * connection was still in progress.  We had to wait
448 		 * to free the qpair resources until this outstanding
449 		 * command was completed.  Now that we have the completion
450 		 * free it now.
451 		 */
452 		nvme_pcie_qpair_destroy(qpair);
453 		return;
454 	}
455 
456 	if (spdk_nvme_cpl_is_error(cpl)) {
457 		SPDK_ERRLOG("nvme_create_io_sq failed, deleting cq!\n");
458 		rc = nvme_pcie_ctrlr_cmd_delete_io_cq(qpair->ctrlr, qpair, nvme_completion_sq_error_delete_cq_cb,
459 						      qpair);
460 		if (rc != 0) {
461 			SPDK_ERRLOG("Failed to send request to delete_io_cq with rc=%d\n", rc);
462 			pqpair->pcie_state = NVME_PCIE_QPAIR_FAILED;
463 		}
464 		return;
465 	}
466 	pqpair->pcie_state = NVME_PCIE_QPAIR_READY;
467 	if (ctrlr->shadow_doorbell) {
468 		pqpair->shadow_doorbell.sq_tdbl = ctrlr->shadow_doorbell + (2 * qpair->id + 0) *
469 						  pctrlr->doorbell_stride_u32;
470 		pqpair->shadow_doorbell.cq_hdbl = ctrlr->shadow_doorbell + (2 * qpair->id + 1) *
471 						  pctrlr->doorbell_stride_u32;
472 		pqpair->shadow_doorbell.sq_eventidx = ctrlr->eventidx + (2 * qpair->id + 0) *
473 						      pctrlr->doorbell_stride_u32;
474 		pqpair->shadow_doorbell.cq_eventidx = ctrlr->eventidx + (2 * qpair->id + 1) *
475 						      pctrlr->doorbell_stride_u32;
476 		pqpair->flags.has_shadow_doorbell = 1;
477 	} else {
478 		pqpair->flags.has_shadow_doorbell = 0;
479 	}
480 	nvme_pcie_qpair_reset(qpair);
481 
482 }
483 
484 static void
485 nvme_completion_create_cq_cb(void *arg, const struct spdk_nvme_cpl *cpl)
486 {
487 	struct spdk_nvme_qpair *qpair = arg;
488 	struct nvme_pcie_qpair	*pqpair = nvme_pcie_qpair(qpair);
489 	int rc;
490 
491 	if (pqpair->flags.defer_destruction) {
492 		/* This qpair was deleted by the application while the
493 		 * connection was still in progress.  We had to wait
494 		 * to free the qpair resources until this outstanding
495 		 * command was completed.  Now that we have the completion
496 		 * free it now.
497 		 */
498 		nvme_pcie_qpair_destroy(qpair);
499 		return;
500 	}
501 
502 	if (spdk_nvme_cpl_is_error(cpl)) {
503 		pqpair->pcie_state = NVME_PCIE_QPAIR_FAILED;
504 		SPDK_ERRLOG("nvme_create_io_cq failed!\n");
505 		return;
506 	}
507 
508 	rc = nvme_pcie_ctrlr_cmd_create_io_sq(qpair->ctrlr, qpair, nvme_completion_create_sq_cb, qpair);
509 
510 	if (rc != 0) {
511 		SPDK_ERRLOG("Failed to send request to create_io_sq, deleting cq!\n");
512 		rc = nvme_pcie_ctrlr_cmd_delete_io_cq(qpair->ctrlr, qpair, nvme_completion_sq_error_delete_cq_cb,
513 						      qpair);
514 		if (rc != 0) {
515 			SPDK_ERRLOG("Failed to send request to delete_io_cq with rc=%d\n", rc);
516 			pqpair->pcie_state = NVME_PCIE_QPAIR_FAILED;
517 		}
518 		return;
519 	}
520 	pqpair->pcie_state = NVME_PCIE_QPAIR_WAIT_FOR_SQ;
521 }
522 
523 static int
524 _nvme_pcie_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair,
525 				 uint16_t qid)
526 {
527 	struct nvme_pcie_qpair	*pqpair = nvme_pcie_qpair(qpair);
528 	int	rc;
529 
530 	/* Statistics may already be allocated in the case of controller reset */
531 	if (!pqpair->stat) {
532 		if (qpair->poll_group) {
533 			struct nvme_pcie_poll_group *group = SPDK_CONTAINEROF(qpair->poll_group,
534 							     struct nvme_pcie_poll_group, group);
535 
536 			pqpair->stat = &group->stats;
537 			pqpair->shared_stats = true;
538 		} else {
539 			pqpair->stat = calloc(1, sizeof(*pqpair->stat));
540 			if (!pqpair->stat) {
541 				SPDK_ERRLOG("Failed to allocate qpair statistics\n");
542 				nvme_qpair_set_state(qpair, NVME_QPAIR_DISCONNECTED);
543 				return -ENOMEM;
544 			}
545 		}
546 	}
547 
548 
549 	rc = nvme_pcie_ctrlr_cmd_create_io_cq(ctrlr, qpair, nvme_completion_create_cq_cb, qpair);
550 
551 	if (rc != 0) {
552 		SPDK_ERRLOG("Failed to send request to create_io_cq\n");
553 		nvme_qpair_set_state(qpair, NVME_QPAIR_DISCONNECTED);
554 		return rc;
555 	}
556 	pqpair->pcie_state = NVME_PCIE_QPAIR_WAIT_FOR_CQ;
557 	return 0;
558 }
559 
560 int
561 nvme_pcie_ctrlr_connect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
562 {
563 	int rc = 0;
564 
565 	if (!nvme_qpair_is_admin_queue(qpair)) {
566 		rc = _nvme_pcie_ctrlr_create_io_qpair(ctrlr, qpair, qpair->id);
567 	} else {
568 		nvme_qpair_set_state(qpair, NVME_QPAIR_CONNECTED);
569 	}
570 
571 	return rc;
572 }
573 
574 void
575 nvme_pcie_ctrlr_disconnect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
576 {
577 	if (!nvme_qpair_is_admin_queue(qpair) || !ctrlr->is_disconnecting) {
578 		nvme_transport_ctrlr_disconnect_qpair_done(qpair);
579 	} else {
580 		/* If this function is called for the admin qpair via spdk_nvme_ctrlr_reset()
581 		 * or spdk_nvme_ctrlr_disconnect(), initiate a Controller Level Reset.
582 		 * Then we can abort trackers safely because the Controller Level Reset deletes
583 		 * all I/O SQ/CQs.
584 		 */
585 		nvme_ctrlr_disable(ctrlr);
586 	}
587 }
588 
589 /* Used when dst points to MMIO (i.e. CMB) in a virtual machine - in these cases we must
590  * not use wide instructions because QEMU will not emulate such instructions to MMIO space.
591  * So this function ensures we only copy 8 bytes at a time.
592  */
593 static inline void
594 nvme_pcie_copy_command_mmio(struct spdk_nvme_cmd *dst, const struct spdk_nvme_cmd *src)
595 {
596 	uint64_t *dst64 = (uint64_t *)dst;
597 	const uint64_t *src64 = (const uint64_t *)src;
598 	uint32_t i;
599 
600 	for (i = 0; i < sizeof(*dst) / 8; i++) {
601 		dst64[i] = src64[i];
602 	}
603 }
604 
605 static inline void
606 nvme_pcie_copy_command(struct spdk_nvme_cmd *dst, const struct spdk_nvme_cmd *src)
607 {
608 	/* dst and src are known to be non-overlapping and 64-byte aligned. */
609 #if defined(__SSE2__)
610 	__m128i *d128 = (__m128i *)dst;
611 	const __m128i *s128 = (const __m128i *)src;
612 
613 	_mm_stream_si128(&d128[0], _mm_load_si128(&s128[0]));
614 	_mm_stream_si128(&d128[1], _mm_load_si128(&s128[1]));
615 	_mm_stream_si128(&d128[2], _mm_load_si128(&s128[2]));
616 	_mm_stream_si128(&d128[3], _mm_load_si128(&s128[3]));
617 #else
618 	*dst = *src;
619 #endif
620 }
621 
622 void
623 nvme_pcie_qpair_submit_tracker(struct spdk_nvme_qpair *qpair, struct nvme_tracker *tr)
624 {
625 	struct nvme_request	*req;
626 	struct nvme_pcie_qpair	*pqpair = nvme_pcie_qpair(qpair);
627 	struct spdk_nvme_ctrlr	*ctrlr = qpair->ctrlr;
628 
629 	req = tr->req;
630 	assert(req != NULL);
631 
632 	spdk_trace_record(TRACE_NVME_PCIE_SUBMIT, qpair->id, 0, (uintptr_t)req,
633 			  req->cmd.cid, req->cmd.opc, req->cmd.cdw10, req->cmd.cdw11, req->cmd.cdw12);
634 
635 	if (req->cmd.fuse) {
636 		/*
637 		 * Keep track of the fuse operation sequence so that we ring the doorbell only
638 		 * after the second fuse is submitted.
639 		 */
640 		qpair->last_fuse = req->cmd.fuse;
641 	}
642 
643 	/* Don't use wide instructions to copy NVMe command, this is limited by QEMU
644 	 * virtual NVMe controller, the maximum access width is 8 Bytes for one time.
645 	 */
646 	if (spdk_unlikely((ctrlr->quirks & NVME_QUIRK_MAXIMUM_PCI_ACCESS_WIDTH) && pqpair->sq_in_cmb)) {
647 		nvme_pcie_copy_command_mmio(&pqpair->cmd[pqpair->sq_tail], &req->cmd);
648 	} else {
649 		/* Copy the command from the tracker to the submission queue. */
650 		nvme_pcie_copy_command(&pqpair->cmd[pqpair->sq_tail], &req->cmd);
651 	}
652 
653 	if (spdk_unlikely(++pqpair->sq_tail == pqpair->num_entries)) {
654 		pqpair->sq_tail = 0;
655 	}
656 
657 	if (spdk_unlikely(pqpair->sq_tail == pqpair->sq_head)) {
658 		SPDK_ERRLOG("sq_tail is passing sq_head!\n");
659 	}
660 
661 	if (!pqpair->flags.delay_cmd_submit) {
662 		nvme_pcie_qpair_ring_sq_doorbell(qpair);
663 	}
664 }
665 
666 void
667 nvme_pcie_qpair_complete_tracker(struct spdk_nvme_qpair *qpair, struct nvme_tracker *tr,
668 				 struct spdk_nvme_cpl *cpl, bool print_on_error)
669 {
670 	struct nvme_pcie_qpair		*pqpair = nvme_pcie_qpair(qpair);
671 	struct nvme_request		*req;
672 	bool				retry, error;
673 	bool				req_from_current_proc = true;
674 	bool				print_error;
675 
676 	req = tr->req;
677 
678 	spdk_trace_record(TRACE_NVME_PCIE_COMPLETE, qpair->id, 0, (uintptr_t)req, req->cmd.cid);
679 
680 	assert(req != NULL);
681 
682 	error = spdk_nvme_cpl_is_error(cpl);
683 	retry = error && nvme_completion_is_retry(cpl) &&
684 		req->retries < pqpair->retry_count;
685 	print_error = error && print_on_error && !qpair->ctrlr->opts.disable_error_logging;
686 
687 	if (print_error) {
688 		spdk_nvme_qpair_print_command(qpair, &req->cmd);
689 	}
690 
691 	if (print_error || SPDK_DEBUGLOG_FLAG_ENABLED("nvme")) {
692 		spdk_nvme_qpair_print_completion(qpair, cpl);
693 	}
694 
695 	assert(cpl->cid == req->cmd.cid);
696 
697 	if (retry) {
698 		req->retries++;
699 		nvme_pcie_qpair_submit_tracker(qpair, tr);
700 	} else {
701 		TAILQ_REMOVE(&pqpair->outstanding_tr, tr, tq_list);
702 
703 		/* Only check admin requests from different processes. */
704 		if (nvme_qpair_is_admin_queue(qpair) && req->pid != getpid()) {
705 			req_from_current_proc = false;
706 			nvme_pcie_qpair_insert_pending_admin_request(qpair, req, cpl);
707 		} else {
708 			nvme_complete_request(tr->cb_fn, tr->cb_arg, qpair, req, cpl);
709 		}
710 
711 		if (req_from_current_proc == true) {
712 			nvme_qpair_free_request(qpair, req);
713 		}
714 
715 		tr->req = NULL;
716 
717 		TAILQ_INSERT_HEAD(&pqpair->free_tr, tr, tq_list);
718 	}
719 }
720 
721 void
722 nvme_pcie_qpair_manual_complete_tracker(struct spdk_nvme_qpair *qpair,
723 					struct nvme_tracker *tr, uint32_t sct, uint32_t sc, uint32_t dnr,
724 					bool print_on_error)
725 {
726 	struct spdk_nvme_cpl	cpl;
727 
728 	memset(&cpl, 0, sizeof(cpl));
729 	cpl.sqid = qpair->id;
730 	cpl.cid = tr->cid;
731 	cpl.status.sct = sct;
732 	cpl.status.sc = sc;
733 	cpl.status.dnr = dnr;
734 	nvme_pcie_qpair_complete_tracker(qpair, tr, &cpl, print_on_error);
735 }
736 
737 void
738 nvme_pcie_qpair_abort_trackers(struct spdk_nvme_qpair *qpair, uint32_t dnr)
739 {
740 	struct nvme_pcie_qpair *pqpair = nvme_pcie_qpair(qpair);
741 	struct nvme_tracker *tr, *temp, *last;
742 
743 	last = TAILQ_LAST(&pqpair->outstanding_tr, nvme_outstanding_tr_head);
744 
745 	/* Abort previously submitted (outstanding) trs */
746 	TAILQ_FOREACH_SAFE(tr, &pqpair->outstanding_tr, tq_list, temp) {
747 		if (!qpair->ctrlr->opts.disable_error_logging) {
748 			SPDK_ERRLOG("aborting outstanding command\n");
749 		}
750 		nvme_pcie_qpair_manual_complete_tracker(qpair, tr, SPDK_NVME_SCT_GENERIC,
751 							SPDK_NVME_SC_ABORTED_BY_REQUEST, dnr, true);
752 
753 		if (tr == last) {
754 			break;
755 		}
756 	}
757 }
758 
759 void
760 nvme_pcie_admin_qpair_abort_aers(struct spdk_nvme_qpair *qpair)
761 {
762 	struct nvme_pcie_qpair	*pqpair = nvme_pcie_qpair(qpair);
763 	struct nvme_tracker	*tr;
764 
765 	tr = TAILQ_FIRST(&pqpair->outstanding_tr);
766 	while (tr != NULL) {
767 		assert(tr->req != NULL);
768 		if (tr->req->cmd.opc == SPDK_NVME_OPC_ASYNC_EVENT_REQUEST) {
769 			nvme_pcie_qpair_manual_complete_tracker(qpair, tr,
770 								SPDK_NVME_SCT_GENERIC, SPDK_NVME_SC_ABORTED_SQ_DELETION, 0,
771 								false);
772 			tr = TAILQ_FIRST(&pqpair->outstanding_tr);
773 		} else {
774 			tr = TAILQ_NEXT(tr, tq_list);
775 		}
776 	}
777 }
778 
779 void
780 nvme_pcie_admin_qpair_destroy(struct spdk_nvme_qpair *qpair)
781 {
782 	nvme_pcie_admin_qpair_abort_aers(qpair);
783 }
784 
785 void
786 nvme_pcie_qpair_abort_reqs(struct spdk_nvme_qpair *qpair, uint32_t dnr)
787 {
788 	nvme_pcie_qpair_abort_trackers(qpair, dnr);
789 }
790 
791 static void
792 nvme_pcie_qpair_check_timeout(struct spdk_nvme_qpair *qpair)
793 {
794 	uint64_t t02;
795 	struct nvme_tracker *tr, *tmp;
796 	struct nvme_pcie_qpair *pqpair = nvme_pcie_qpair(qpair);
797 	struct spdk_nvme_ctrlr *ctrlr = qpair->ctrlr;
798 	struct spdk_nvme_ctrlr_process *active_proc;
799 
800 	/* Don't check timeouts during controller initialization. */
801 	if (ctrlr->state != NVME_CTRLR_STATE_READY) {
802 		return;
803 	}
804 
805 	if (nvme_qpair_is_admin_queue(qpair)) {
806 		active_proc = nvme_ctrlr_get_current_process(ctrlr);
807 	} else {
808 		active_proc = qpair->active_proc;
809 	}
810 
811 	/* Only check timeouts if the current process has a timeout callback. */
812 	if (active_proc == NULL || active_proc->timeout_cb_fn == NULL) {
813 		return;
814 	}
815 
816 	t02 = spdk_get_ticks();
817 	TAILQ_FOREACH_SAFE(tr, &pqpair->outstanding_tr, tq_list, tmp) {
818 		assert(tr->req != NULL);
819 
820 		if (nvme_request_check_timeout(tr->req, tr->cid, active_proc, t02)) {
821 			/*
822 			 * The requests are in order, so as soon as one has not timed out,
823 			 * stop iterating.
824 			 */
825 			break;
826 		}
827 	}
828 }
829 
830 int32_t
831 nvme_pcie_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions)
832 {
833 	struct nvme_pcie_qpair	*pqpair = nvme_pcie_qpair(qpair);
834 	struct nvme_tracker	*tr;
835 	struct spdk_nvme_cpl	*cpl, *next_cpl;
836 	uint32_t		 num_completions = 0;
837 	struct spdk_nvme_ctrlr	*ctrlr = qpair->ctrlr;
838 	uint16_t		 next_cq_head;
839 	uint8_t			 next_phase;
840 	bool			 next_is_valid = false;
841 	int			 rc;
842 
843 	if (spdk_unlikely(pqpair->pcie_state == NVME_PCIE_QPAIR_FAILED)) {
844 		return -ENXIO;
845 	}
846 
847 	if (spdk_unlikely(nvme_qpair_get_state(qpair) == NVME_QPAIR_CONNECTING)) {
848 		if (pqpair->pcie_state == NVME_PCIE_QPAIR_READY) {
849 			/* It is possible that another thread set the pcie_state to
850 			 * QPAIR_READY, if it polled the adminq and processed the SQ
851 			 * completion for this qpair.  So check for that condition
852 			 * here and then update the qpair's state to CONNECTED, since
853 			 * we can only set the qpair state from the qpair's thread.
854 			 * (Note: this fixed issue #2157.)
855 			 */
856 			nvme_qpair_set_state(qpair, NVME_QPAIR_CONNECTED);
857 		} else if (pqpair->pcie_state == NVME_PCIE_QPAIR_FAILED) {
858 			nvme_qpair_set_state(qpair, NVME_QPAIR_DISCONNECTED);
859 			return -ENXIO;
860 		} else {
861 			rc = spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
862 			if (rc < 0) {
863 				return rc;
864 			} else if (pqpair->pcie_state == NVME_PCIE_QPAIR_FAILED) {
865 				nvme_qpair_set_state(qpair, NVME_QPAIR_DISCONNECTED);
866 				return -ENXIO;
867 			}
868 		}
869 		return 0;
870 	}
871 
872 	if (spdk_unlikely(nvme_qpair_is_admin_queue(qpair))) {
873 		nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
874 	}
875 
876 	if (max_completions == 0 || max_completions > pqpair->max_completions_cap) {
877 		/*
878 		 * max_completions == 0 means unlimited, but complete at most
879 		 * max_completions_cap batch of I/O at a time so that the completion
880 		 * queue doorbells don't wrap around.
881 		 */
882 		max_completions = pqpair->max_completions_cap;
883 	}
884 
885 	pqpair->stat->polls++;
886 
887 	while (1) {
888 		cpl = &pqpair->cpl[pqpair->cq_head];
889 
890 		if (!next_is_valid && cpl->status.p != pqpair->flags.phase) {
891 			break;
892 		}
893 
894 		if (spdk_likely(pqpair->cq_head + 1 != pqpair->num_entries)) {
895 			next_cq_head = pqpair->cq_head + 1;
896 			next_phase = pqpair->flags.phase;
897 		} else {
898 			next_cq_head = 0;
899 			next_phase = !pqpair->flags.phase;
900 		}
901 		next_cpl = &pqpair->cpl[next_cq_head];
902 		next_is_valid = (next_cpl->status.p == next_phase);
903 		if (next_is_valid) {
904 			__builtin_prefetch(&pqpair->tr[next_cpl->cid]);
905 		}
906 
907 #if defined(__PPC64__) || defined(__riscv)
908 		/*
909 		 * This memory barrier prevents reordering of:
910 		 * - load after store from/to tr
911 		 * - load after load cpl phase and cpl cid
912 		 */
913 		spdk_mb();
914 #elif defined(__aarch64__)
915 		__asm volatile("dmb oshld" ::: "memory");
916 #endif
917 
918 		if (spdk_unlikely(++pqpair->cq_head == pqpair->num_entries)) {
919 			pqpair->cq_head = 0;
920 			pqpair->flags.phase = !pqpair->flags.phase;
921 		}
922 
923 		tr = &pqpair->tr[cpl->cid];
924 		/* Prefetch the req's STAILQ_ENTRY since we'll need to access it
925 		 * as part of putting the req back on the qpair's free list.
926 		 */
927 		__builtin_prefetch(&tr->req->stailq);
928 		pqpair->sq_head = cpl->sqhd;
929 
930 		if (tr->req) {
931 			nvme_pcie_qpair_complete_tracker(qpair, tr, cpl, true);
932 		} else {
933 			SPDK_ERRLOG("cpl does not map to outstanding cmd\n");
934 			spdk_nvme_qpair_print_completion(qpair, cpl);
935 			assert(0);
936 		}
937 
938 		if (++num_completions == max_completions) {
939 			break;
940 		}
941 	}
942 
943 	if (num_completions > 0) {
944 		pqpair->stat->completions += num_completions;
945 		nvme_pcie_qpair_ring_cq_doorbell(qpair);
946 	} else {
947 		pqpair->stat->idle_polls++;
948 	}
949 
950 	if (pqpair->flags.delay_cmd_submit) {
951 		if (pqpair->last_sq_tail != pqpair->sq_tail) {
952 			nvme_pcie_qpair_ring_sq_doorbell(qpair);
953 			pqpair->last_sq_tail = pqpair->sq_tail;
954 		}
955 	}
956 
957 	if (spdk_unlikely(ctrlr->timeout_enabled)) {
958 		/*
959 		 * User registered for timeout callback
960 		 */
961 		nvme_pcie_qpair_check_timeout(qpair);
962 	}
963 
964 	/* Before returning, complete any pending admin request or
965 	 * process the admin qpair disconnection.
966 	 */
967 	if (spdk_unlikely(nvme_qpair_is_admin_queue(qpair))) {
968 		nvme_pcie_qpair_complete_pending_admin_request(qpair);
969 
970 		if (nvme_qpair_get_state(qpair) == NVME_QPAIR_DISCONNECTING) {
971 			rc = nvme_ctrlr_disable_poll(qpair->ctrlr);
972 			if (rc == 0) {
973 				nvme_transport_ctrlr_disconnect_qpair_done(qpair);
974 			}
975 		}
976 
977 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
978 	}
979 
980 	if (spdk_unlikely(pqpair->flags.has_pending_vtophys_failures)) {
981 		struct nvme_tracker *tr, *tmp;
982 
983 		TAILQ_FOREACH_SAFE(tr, &pqpair->outstanding_tr, tq_list, tmp) {
984 			if (tr->bad_vtophys) {
985 				tr->bad_vtophys = 0;
986 				nvme_pcie_fail_request_bad_vtophys(qpair, tr);
987 			}
988 		}
989 		pqpair->flags.has_pending_vtophys_failures = 0;
990 	}
991 
992 	return num_completions;
993 }
994 
995 int
996 nvme_pcie_qpair_destroy(struct spdk_nvme_qpair *qpair)
997 {
998 	struct nvme_pcie_qpair *pqpair = nvme_pcie_qpair(qpair);
999 
1000 	if (nvme_qpair_is_admin_queue(qpair)) {
1001 		nvme_pcie_admin_qpair_destroy(qpair);
1002 	}
1003 	/*
1004 	 * We check sq_vaddr and cq_vaddr to see if the user specified the memory
1005 	 * buffers when creating the I/O queue.
1006 	 * If the user specified them, we cannot free that memory.
1007 	 * Nor do we free it if it's in the CMB.
1008 	 */
1009 	if (!pqpair->sq_vaddr && pqpair->cmd && !pqpair->sq_in_cmb) {
1010 		spdk_free(pqpair->cmd);
1011 	}
1012 	if (!pqpair->cq_vaddr && pqpair->cpl) {
1013 		spdk_free(pqpair->cpl);
1014 	}
1015 	if (pqpair->tr) {
1016 		spdk_free(pqpair->tr);
1017 	}
1018 
1019 	nvme_qpair_deinit(qpair);
1020 
1021 	if (!pqpair->shared_stats) {
1022 		if (qpair->id) {
1023 			free(pqpair->stat);
1024 		} else {
1025 			/* statistics of admin qpair are allocates from huge pages because
1026 			 * admin qpair is shared for multi-process */
1027 			spdk_free(pqpair->stat);
1028 		}
1029 
1030 	}
1031 
1032 	spdk_free(pqpair);
1033 
1034 	return 0;
1035 }
1036 
1037 struct spdk_nvme_qpair *
1038 nvme_pcie_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid,
1039 				const struct spdk_nvme_io_qpair_opts *opts)
1040 {
1041 	struct nvme_pcie_qpair *pqpair;
1042 	struct spdk_nvme_qpair *qpair;
1043 	int rc;
1044 
1045 	assert(ctrlr != NULL);
1046 
1047 	pqpair = spdk_zmalloc(sizeof(*pqpair), 64, NULL,
1048 			      SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_SHARE);
1049 	if (pqpair == NULL) {
1050 		return NULL;
1051 	}
1052 
1053 	pqpair->num_entries = opts->io_queue_size;
1054 	pqpair->flags.delay_cmd_submit = opts->delay_cmd_submit;
1055 
1056 	qpair = &pqpair->qpair;
1057 
1058 	rc = nvme_qpair_init(qpair, qid, ctrlr, opts->qprio, opts->io_queue_requests, opts->async_mode);
1059 	if (rc != 0) {
1060 		nvme_pcie_qpair_destroy(qpair);
1061 		return NULL;
1062 	}
1063 
1064 	rc = nvme_pcie_qpair_construct(qpair, opts);
1065 
1066 	if (rc != 0) {
1067 		nvme_pcie_qpair_destroy(qpair);
1068 		return NULL;
1069 	}
1070 
1071 	return qpair;
1072 }
1073 
1074 int
1075 nvme_pcie_ctrlr_delete_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
1076 {
1077 	struct nvme_pcie_qpair *pqpair = nvme_pcie_qpair(qpair);
1078 	struct nvme_completion_poll_status *status;
1079 	int rc;
1080 
1081 	assert(ctrlr != NULL);
1082 
1083 	if (ctrlr->is_removed) {
1084 		goto free;
1085 	}
1086 
1087 	if (ctrlr->prepare_for_reset) {
1088 		if (nvme_qpair_get_state(qpair) == NVME_QPAIR_CONNECTING) {
1089 			pqpair->flags.defer_destruction = true;
1090 		}
1091 		goto clear_shadow_doorbells;
1092 	}
1093 
1094 	/* If attempting to delete a qpair that's still being connected, we have to wait until it's
1095 	 * finished, so that we don't free it while it's waiting for the create cq/sq callbacks.
1096 	 */
1097 	while (pqpair->pcie_state == NVME_PCIE_QPAIR_WAIT_FOR_CQ ||
1098 	       pqpair->pcie_state == NVME_PCIE_QPAIR_WAIT_FOR_SQ) {
1099 		rc = spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
1100 		if (rc < 0) {
1101 			break;
1102 		}
1103 	}
1104 
1105 	status = calloc(1, sizeof(*status));
1106 	if (!status) {
1107 		SPDK_ERRLOG("Failed to allocate status tracker\n");
1108 		goto free;
1109 	}
1110 
1111 	/* Delete the I/O submission queue */
1112 	rc = nvme_pcie_ctrlr_cmd_delete_io_sq(ctrlr, qpair, nvme_completion_poll_cb, status);
1113 	if (rc != 0) {
1114 		SPDK_ERRLOG("Failed to send request to delete_io_sq with rc=%d\n", rc);
1115 		free(status);
1116 		goto free;
1117 	}
1118 	if (nvme_wait_for_completion(ctrlr->adminq, status)) {
1119 		if (!status->timed_out) {
1120 			free(status);
1121 		}
1122 		goto free;
1123 	}
1124 
1125 	/* Now that the submission queue is deleted, the device is supposed to have
1126 	 * completed any outstanding I/O. Try to complete them. If they don't complete,
1127 	 * they'll be marked as aborted and completed below. */
1128 	nvme_pcie_qpair_process_completions(qpair, 0);
1129 
1130 	memset(status, 0, sizeof(*status));
1131 	/* Delete the completion queue */
1132 	rc = nvme_pcie_ctrlr_cmd_delete_io_cq(ctrlr, qpair, nvme_completion_poll_cb, status);
1133 	if (rc != 0) {
1134 		SPDK_ERRLOG("Failed to send request to delete_io_cq with rc=%d\n", rc);
1135 		free(status);
1136 		goto free;
1137 	}
1138 	if (nvme_wait_for_completion(ctrlr->adminq, status)) {
1139 		if (!status->timed_out) {
1140 			free(status);
1141 		}
1142 		goto free;
1143 	}
1144 	free(status);
1145 
1146 clear_shadow_doorbells:
1147 	if (pqpair->flags.has_shadow_doorbell) {
1148 		*pqpair->shadow_doorbell.sq_tdbl = 0;
1149 		*pqpair->shadow_doorbell.cq_hdbl = 0;
1150 		*pqpair->shadow_doorbell.sq_eventidx = 0;
1151 		*pqpair->shadow_doorbell.cq_eventidx = 0;
1152 	}
1153 free:
1154 	if (qpair->no_deletion_notification_needed == 0) {
1155 		/* Abort the rest of the I/O */
1156 		nvme_pcie_qpair_abort_trackers(qpair, 1);
1157 	}
1158 
1159 	if (!pqpair->flags.defer_destruction) {
1160 		nvme_pcie_qpair_destroy(qpair);
1161 	}
1162 	return 0;
1163 }
1164 
1165 static void
1166 nvme_pcie_fail_request_bad_vtophys(struct spdk_nvme_qpair *qpair, struct nvme_tracker *tr)
1167 {
1168 	if (!qpair->in_completion_context) {
1169 		struct nvme_pcie_qpair *pqpair = nvme_pcie_qpair(qpair);
1170 
1171 		tr->bad_vtophys = 1;
1172 		pqpair->flags.has_pending_vtophys_failures = 1;
1173 		return;
1174 	}
1175 
1176 	/*
1177 	 * Bad vtophys translation, so abort this request and return
1178 	 *  immediately.
1179 	 */
1180 	SPDK_ERRLOG("vtophys or other payload buffer related error\n");
1181 	nvme_pcie_qpair_manual_complete_tracker(qpair, tr, SPDK_NVME_SCT_GENERIC,
1182 						SPDK_NVME_SC_INVALID_FIELD,
1183 						1 /* do not retry */, true);
1184 }
1185 
1186 /*
1187  * Append PRP list entries to describe a virtually contiguous buffer starting at virt_addr of len bytes.
1188  *
1189  * *prp_index will be updated to account for the number of PRP entries used.
1190  */
1191 static inline int
1192 nvme_pcie_prp_list_append(struct spdk_nvme_ctrlr *ctrlr, struct nvme_tracker *tr,
1193 			  uint32_t *prp_index, void *virt_addr, size_t len,
1194 			  uint32_t page_size)
1195 {
1196 	struct spdk_nvme_cmd *cmd = &tr->req->cmd;
1197 	uintptr_t page_mask = page_size - 1;
1198 	uint64_t phys_addr;
1199 	uint32_t i;
1200 
1201 	SPDK_DEBUGLOG(nvme, "prp_index:%u virt_addr:%p len:%u\n",
1202 		      *prp_index, virt_addr, (uint32_t)len);
1203 
1204 	if (spdk_unlikely(((uintptr_t)virt_addr & 3) != 0)) {
1205 		SPDK_ERRLOG("virt_addr %p not dword aligned\n", virt_addr);
1206 		return -EFAULT;
1207 	}
1208 
1209 	i = *prp_index;
1210 	while (len) {
1211 		uint32_t seg_len;
1212 
1213 		/*
1214 		 * prp_index 0 is stored in prp1, and the rest are stored in the prp[] array,
1215 		 * so prp_index == count is valid.
1216 		 */
1217 		if (spdk_unlikely(i > SPDK_COUNTOF(tr->u.prp))) {
1218 			SPDK_ERRLOG("out of PRP entries\n");
1219 			return -EFAULT;
1220 		}
1221 
1222 		phys_addr = nvme_pcie_vtophys(ctrlr, virt_addr, NULL);
1223 		if (spdk_unlikely(phys_addr == SPDK_VTOPHYS_ERROR)) {
1224 			SPDK_ERRLOG("vtophys(%p) failed\n", virt_addr);
1225 			return -EFAULT;
1226 		}
1227 
1228 		if (i == 0) {
1229 			SPDK_DEBUGLOG(nvme, "prp1 = %p\n", (void *)phys_addr);
1230 			cmd->dptr.prp.prp1 = phys_addr;
1231 			seg_len = page_size - ((uintptr_t)virt_addr & page_mask);
1232 		} else {
1233 			if ((phys_addr & page_mask) != 0) {
1234 				SPDK_ERRLOG("PRP %u not page aligned (%p)\n", i, virt_addr);
1235 				return -EFAULT;
1236 			}
1237 
1238 			SPDK_DEBUGLOG(nvme, "prp[%u] = %p\n", i - 1, (void *)phys_addr);
1239 			tr->u.prp[i - 1] = phys_addr;
1240 			seg_len = page_size;
1241 		}
1242 
1243 		seg_len = spdk_min(seg_len, len);
1244 		virt_addr += seg_len;
1245 		len -= seg_len;
1246 		i++;
1247 	}
1248 
1249 	cmd->psdt = SPDK_NVME_PSDT_PRP;
1250 	if (i <= 1) {
1251 		cmd->dptr.prp.prp2 = 0;
1252 	} else if (i == 2) {
1253 		cmd->dptr.prp.prp2 = tr->u.prp[0];
1254 		SPDK_DEBUGLOG(nvme, "prp2 = %p\n", (void *)cmd->dptr.prp.prp2);
1255 	} else {
1256 		cmd->dptr.prp.prp2 = tr->prp_sgl_bus_addr;
1257 		SPDK_DEBUGLOG(nvme, "prp2 = %p (PRP list)\n", (void *)cmd->dptr.prp.prp2);
1258 	}
1259 
1260 	*prp_index = i;
1261 	return 0;
1262 }
1263 
1264 static int
1265 nvme_pcie_qpair_build_request_invalid(struct spdk_nvme_qpair *qpair,
1266 				      struct nvme_request *req, struct nvme_tracker *tr, bool dword_aligned)
1267 {
1268 	assert(0);
1269 	nvme_pcie_fail_request_bad_vtophys(qpair, tr);
1270 	return -EINVAL;
1271 }
1272 
1273 /**
1274  * Build PRP list describing physically contiguous payload buffer.
1275  */
1276 static int
1277 nvme_pcie_qpair_build_contig_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req,
1278 				     struct nvme_tracker *tr, bool dword_aligned)
1279 {
1280 	uint32_t prp_index = 0;
1281 	int rc;
1282 
1283 	rc = nvme_pcie_prp_list_append(qpair->ctrlr, tr, &prp_index,
1284 				       req->payload.contig_or_cb_arg + req->payload_offset,
1285 				       req->payload_size, qpair->ctrlr->page_size);
1286 	if (rc) {
1287 		nvme_pcie_fail_request_bad_vtophys(qpair, tr);
1288 	}
1289 
1290 	return rc;
1291 }
1292 
1293 /**
1294  * Build an SGL describing a physically contiguous payload buffer.
1295  *
1296  * This is more efficient than using PRP because large buffers can be
1297  * described this way.
1298  */
1299 static int
1300 nvme_pcie_qpair_build_contig_hw_sgl_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req,
1301 		struct nvme_tracker *tr, bool dword_aligned)
1302 {
1303 	void *virt_addr;
1304 	uint64_t phys_addr, mapping_length;
1305 	uint32_t length;
1306 	struct spdk_nvme_sgl_descriptor *sgl;
1307 	uint32_t nseg = 0;
1308 
1309 	assert(req->payload_size != 0);
1310 	assert(nvme_payload_type(&req->payload) == NVME_PAYLOAD_TYPE_CONTIG);
1311 
1312 	sgl = tr->u.sgl;
1313 	req->cmd.psdt = SPDK_NVME_PSDT_SGL_MPTR_CONTIG;
1314 	req->cmd.dptr.sgl1.unkeyed.subtype = 0;
1315 
1316 	length = req->payload_size;
1317 	virt_addr = req->payload.contig_or_cb_arg + req->payload_offset;
1318 
1319 	while (length > 0) {
1320 		if (nseg >= NVME_MAX_SGL_DESCRIPTORS) {
1321 			nvme_pcie_fail_request_bad_vtophys(qpair, tr);
1322 			return -EFAULT;
1323 		}
1324 
1325 		if (dword_aligned && ((uintptr_t)virt_addr & 3)) {
1326 			SPDK_ERRLOG("virt_addr %p not dword aligned\n", virt_addr);
1327 			nvme_pcie_fail_request_bad_vtophys(qpair, tr);
1328 			return -EFAULT;
1329 		}
1330 
1331 		mapping_length = length;
1332 		phys_addr = nvme_pcie_vtophys(qpair->ctrlr, virt_addr, &mapping_length);
1333 		if (phys_addr == SPDK_VTOPHYS_ERROR) {
1334 			nvme_pcie_fail_request_bad_vtophys(qpair, tr);
1335 			return -EFAULT;
1336 		}
1337 
1338 		mapping_length = spdk_min(length, mapping_length);
1339 
1340 		length -= mapping_length;
1341 		virt_addr += mapping_length;
1342 
1343 		sgl->unkeyed.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK;
1344 		sgl->unkeyed.length = mapping_length;
1345 		sgl->address = phys_addr;
1346 		sgl->unkeyed.subtype = 0;
1347 
1348 		sgl++;
1349 		nseg++;
1350 	}
1351 
1352 	if (nseg == 1) {
1353 		/*
1354 		 * The whole transfer can be described by a single SGL descriptor.
1355 		 *  Use the special case described by the spec where SGL1's type is Data Block.
1356 		 *  This means the SGL in the tracker is not used at all, so copy the first (and only)
1357 		 *  SGL element into SGL1.
1358 		 */
1359 		req->cmd.dptr.sgl1.unkeyed.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK;
1360 		req->cmd.dptr.sgl1.address = tr->u.sgl[0].address;
1361 		req->cmd.dptr.sgl1.unkeyed.length = tr->u.sgl[0].unkeyed.length;
1362 	} else {
1363 		/* SPDK NVMe driver supports only 1 SGL segment for now, it is enough because
1364 		 *  NVME_MAX_SGL_DESCRIPTORS * 16 is less than one page.
1365 		 */
1366 		req->cmd.dptr.sgl1.unkeyed.type = SPDK_NVME_SGL_TYPE_LAST_SEGMENT;
1367 		req->cmd.dptr.sgl1.address = tr->prp_sgl_bus_addr;
1368 		req->cmd.dptr.sgl1.unkeyed.length = nseg * sizeof(struct spdk_nvme_sgl_descriptor);
1369 	}
1370 
1371 	return 0;
1372 }
1373 
1374 /**
1375  * Build SGL list describing scattered payload buffer.
1376  */
1377 static int
1378 nvme_pcie_qpair_build_hw_sgl_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req,
1379 				     struct nvme_tracker *tr, bool dword_aligned)
1380 {
1381 	int rc;
1382 	void *virt_addr;
1383 	uint64_t phys_addr, mapping_length;
1384 	uint32_t remaining_transfer_len, remaining_user_sge_len, length;
1385 	struct spdk_nvme_sgl_descriptor *sgl;
1386 	uint32_t nseg = 0;
1387 
1388 	/*
1389 	 * Build scattered payloads.
1390 	 */
1391 	assert(req->payload_size != 0);
1392 	assert(nvme_payload_type(&req->payload) == NVME_PAYLOAD_TYPE_SGL);
1393 	assert(req->payload.reset_sgl_fn != NULL);
1394 	assert(req->payload.next_sge_fn != NULL);
1395 	req->payload.reset_sgl_fn(req->payload.contig_or_cb_arg, req->payload_offset);
1396 
1397 	sgl = tr->u.sgl;
1398 	req->cmd.psdt = SPDK_NVME_PSDT_SGL_MPTR_CONTIG;
1399 	req->cmd.dptr.sgl1.unkeyed.subtype = 0;
1400 
1401 	remaining_transfer_len = req->payload_size;
1402 
1403 	while (remaining_transfer_len > 0) {
1404 		rc = req->payload.next_sge_fn(req->payload.contig_or_cb_arg,
1405 					      &virt_addr, &remaining_user_sge_len);
1406 		if (rc) {
1407 			nvme_pcie_fail_request_bad_vtophys(qpair, tr);
1408 			return -EFAULT;
1409 		}
1410 
1411 		/* Bit Bucket SGL descriptor */
1412 		if ((uint64_t)virt_addr == UINT64_MAX) {
1413 			/* TODO: enable WRITE and COMPARE when necessary */
1414 			if (req->cmd.opc != SPDK_NVME_OPC_READ) {
1415 				SPDK_ERRLOG("Only READ command can be supported\n");
1416 				goto exit;
1417 			}
1418 			if (nseg >= NVME_MAX_SGL_DESCRIPTORS) {
1419 				SPDK_ERRLOG("Too many SGL entries\n");
1420 				goto exit;
1421 			}
1422 
1423 			sgl->unkeyed.type = SPDK_NVME_SGL_TYPE_BIT_BUCKET;
1424 			/* If the SGL describes a destination data buffer, the length of data
1425 			 * buffer shall be discarded by controller, and the length is included
1426 			 * in Number of Logical Blocks (NLB) parameter. Otherwise, the length
1427 			 * is not included in the NLB parameter.
1428 			 */
1429 			remaining_user_sge_len = spdk_min(remaining_user_sge_len, remaining_transfer_len);
1430 			remaining_transfer_len -= remaining_user_sge_len;
1431 
1432 			sgl->unkeyed.length = remaining_user_sge_len;
1433 			sgl->address = 0;
1434 			sgl->unkeyed.subtype = 0;
1435 
1436 			sgl++;
1437 			nseg++;
1438 
1439 			continue;
1440 		}
1441 
1442 		remaining_user_sge_len = spdk_min(remaining_user_sge_len, remaining_transfer_len);
1443 		remaining_transfer_len -= remaining_user_sge_len;
1444 		while (remaining_user_sge_len > 0) {
1445 			if (nseg >= NVME_MAX_SGL_DESCRIPTORS) {
1446 				SPDK_ERRLOG("Too many SGL entries\n");
1447 				goto exit;
1448 			}
1449 
1450 			if (dword_aligned && ((uintptr_t)virt_addr & 3)) {
1451 				SPDK_ERRLOG("virt_addr %p not dword aligned\n", virt_addr);
1452 				goto exit;
1453 			}
1454 
1455 			mapping_length = remaining_user_sge_len;
1456 			phys_addr = nvme_pcie_vtophys(qpair->ctrlr, virt_addr, &mapping_length);
1457 			if (phys_addr == SPDK_VTOPHYS_ERROR) {
1458 				goto exit;
1459 			}
1460 
1461 			length = spdk_min(remaining_user_sge_len, mapping_length);
1462 			remaining_user_sge_len -= length;
1463 			virt_addr += length;
1464 
1465 			if (nseg > 0 && phys_addr ==
1466 			    (*(sgl - 1)).address + (*(sgl - 1)).unkeyed.length) {
1467 				/* extend previous entry */
1468 				(*(sgl - 1)).unkeyed.length += length;
1469 				continue;
1470 			}
1471 
1472 			sgl->unkeyed.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK;
1473 			sgl->unkeyed.length = length;
1474 			sgl->address = phys_addr;
1475 			sgl->unkeyed.subtype = 0;
1476 
1477 			sgl++;
1478 			nseg++;
1479 		}
1480 	}
1481 
1482 	if (nseg == 1) {
1483 		/*
1484 		 * The whole transfer can be described by a single SGL descriptor.
1485 		 *  Use the special case described by the spec where SGL1's type is Data Block.
1486 		 *  This means the SGL in the tracker is not used at all, so copy the first (and only)
1487 		 *  SGL element into SGL1.
1488 		 */
1489 		req->cmd.dptr.sgl1.unkeyed.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK;
1490 		req->cmd.dptr.sgl1.address = tr->u.sgl[0].address;
1491 		req->cmd.dptr.sgl1.unkeyed.length = tr->u.sgl[0].unkeyed.length;
1492 	} else {
1493 		/* SPDK NVMe driver supports only 1 SGL segment for now, it is enough because
1494 		 *  NVME_MAX_SGL_DESCRIPTORS * 16 is less than one page.
1495 		 */
1496 		req->cmd.dptr.sgl1.unkeyed.type = SPDK_NVME_SGL_TYPE_LAST_SEGMENT;
1497 		req->cmd.dptr.sgl1.address = tr->prp_sgl_bus_addr;
1498 		req->cmd.dptr.sgl1.unkeyed.length = nseg * sizeof(struct spdk_nvme_sgl_descriptor);
1499 	}
1500 
1501 	return 0;
1502 
1503 exit:
1504 	nvme_pcie_fail_request_bad_vtophys(qpair, tr);
1505 	return -EFAULT;
1506 }
1507 
1508 /**
1509  * Build PRP list describing scattered payload buffer.
1510  */
1511 static int
1512 nvme_pcie_qpair_build_prps_sgl_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req,
1513 				       struct nvme_tracker *tr, bool dword_aligned)
1514 {
1515 	int rc;
1516 	void *virt_addr;
1517 	uint32_t remaining_transfer_len, length;
1518 	uint32_t prp_index = 0;
1519 	uint32_t page_size = qpair->ctrlr->page_size;
1520 
1521 	/*
1522 	 * Build scattered payloads.
1523 	 */
1524 	assert(nvme_payload_type(&req->payload) == NVME_PAYLOAD_TYPE_SGL);
1525 	assert(req->payload.reset_sgl_fn != NULL);
1526 	req->payload.reset_sgl_fn(req->payload.contig_or_cb_arg, req->payload_offset);
1527 
1528 	remaining_transfer_len = req->payload_size;
1529 	while (remaining_transfer_len > 0) {
1530 		assert(req->payload.next_sge_fn != NULL);
1531 		rc = req->payload.next_sge_fn(req->payload.contig_or_cb_arg, &virt_addr, &length);
1532 		if (rc) {
1533 			nvme_pcie_fail_request_bad_vtophys(qpair, tr);
1534 			return -EFAULT;
1535 		}
1536 
1537 		length = spdk_min(remaining_transfer_len, length);
1538 
1539 		/*
1540 		 * Any incompatible sges should have been handled up in the splitting routine,
1541 		 *  but assert here as an additional check.
1542 		 *
1543 		 * All SGEs except last must end on a page boundary.
1544 		 */
1545 		assert((length == remaining_transfer_len) ||
1546 		       _is_page_aligned((uintptr_t)virt_addr + length, page_size));
1547 
1548 		rc = nvme_pcie_prp_list_append(qpair->ctrlr, tr, &prp_index, virt_addr, length, page_size);
1549 		if (rc) {
1550 			nvme_pcie_fail_request_bad_vtophys(qpair, tr);
1551 			return rc;
1552 		}
1553 
1554 		remaining_transfer_len -= length;
1555 	}
1556 
1557 	return 0;
1558 }
1559 
1560 typedef int(*build_req_fn)(struct spdk_nvme_qpair *, struct nvme_request *, struct nvme_tracker *,
1561 			   bool);
1562 
1563 static build_req_fn const g_nvme_pcie_build_req_table[][2] = {
1564 	[NVME_PAYLOAD_TYPE_INVALID] = {
1565 		nvme_pcie_qpair_build_request_invalid,			/* PRP */
1566 		nvme_pcie_qpair_build_request_invalid			/* SGL */
1567 	},
1568 	[NVME_PAYLOAD_TYPE_CONTIG] = {
1569 		nvme_pcie_qpair_build_contig_request,			/* PRP */
1570 		nvme_pcie_qpair_build_contig_hw_sgl_request		/* SGL */
1571 	},
1572 	[NVME_PAYLOAD_TYPE_SGL] = {
1573 		nvme_pcie_qpair_build_prps_sgl_request,			/* PRP */
1574 		nvme_pcie_qpair_build_hw_sgl_request			/* SGL */
1575 	}
1576 };
1577 
1578 static int
1579 nvme_pcie_qpair_build_metadata(struct spdk_nvme_qpair *qpair, struct nvme_tracker *tr,
1580 			       bool sgl_supported, bool dword_aligned)
1581 {
1582 	void *md_payload;
1583 	struct nvme_request *req = tr->req;
1584 
1585 	if (req->payload.md) {
1586 		md_payload = req->payload.md + req->md_offset;
1587 		if (dword_aligned && ((uintptr_t)md_payload & 3)) {
1588 			SPDK_ERRLOG("virt_addr %p not dword aligned\n", md_payload);
1589 			goto exit;
1590 		}
1591 
1592 		if (sgl_supported && dword_aligned) {
1593 			assert(req->cmd.psdt == SPDK_NVME_PSDT_SGL_MPTR_CONTIG);
1594 			req->cmd.psdt = SPDK_NVME_PSDT_SGL_MPTR_SGL;
1595 			tr->meta_sgl.address = nvme_pcie_vtophys(qpair->ctrlr, md_payload, NULL);
1596 			if (tr->meta_sgl.address == SPDK_VTOPHYS_ERROR) {
1597 				goto exit;
1598 			}
1599 			tr->meta_sgl.unkeyed.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK;
1600 			tr->meta_sgl.unkeyed.length = req->md_size;
1601 			tr->meta_sgl.unkeyed.subtype = 0;
1602 			req->cmd.mptr = tr->prp_sgl_bus_addr - sizeof(struct spdk_nvme_sgl_descriptor);
1603 		} else {
1604 			req->cmd.mptr = nvme_pcie_vtophys(qpair->ctrlr, md_payload, NULL);
1605 			if (req->cmd.mptr == SPDK_VTOPHYS_ERROR) {
1606 				goto exit;
1607 			}
1608 		}
1609 	}
1610 
1611 	return 0;
1612 
1613 exit:
1614 	nvme_pcie_fail_request_bad_vtophys(qpair, tr);
1615 	return -EINVAL;
1616 }
1617 
1618 int
1619 nvme_pcie_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req)
1620 {
1621 	struct nvme_tracker	*tr;
1622 	int			rc = 0;
1623 	struct spdk_nvme_ctrlr	*ctrlr = qpair->ctrlr;
1624 	struct nvme_pcie_qpair	*pqpair = nvme_pcie_qpair(qpair);
1625 	enum nvme_payload_type	payload_type;
1626 	bool			sgl_supported;
1627 	bool			dword_aligned = true;
1628 
1629 	if (spdk_unlikely(nvme_qpair_is_admin_queue(qpair))) {
1630 		nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
1631 	}
1632 
1633 	tr = TAILQ_FIRST(&pqpair->free_tr);
1634 
1635 	if (tr == NULL) {
1636 		pqpair->stat->queued_requests++;
1637 		/* Inform the upper layer to try again later. */
1638 		rc = -EAGAIN;
1639 		goto exit;
1640 	}
1641 
1642 	pqpair->stat->submitted_requests++;
1643 	TAILQ_REMOVE(&pqpair->free_tr, tr, tq_list); /* remove tr from free_tr */
1644 	TAILQ_INSERT_TAIL(&pqpair->outstanding_tr, tr, tq_list);
1645 	tr->req = req;
1646 	tr->cb_fn = req->cb_fn;
1647 	tr->cb_arg = req->cb_arg;
1648 	req->cmd.cid = tr->cid;
1649 
1650 	if (req->payload_size != 0) {
1651 		payload_type = nvme_payload_type(&req->payload);
1652 		/* According to the specification, PRPs shall be used for all
1653 		 *  Admin commands for NVMe over PCIe implementations.
1654 		 */
1655 		sgl_supported = (ctrlr->flags & SPDK_NVME_CTRLR_SGL_SUPPORTED) != 0 &&
1656 				!nvme_qpair_is_admin_queue(qpair);
1657 
1658 		if (sgl_supported) {
1659 			/* Don't use SGL for DSM command */
1660 			if (spdk_unlikely((ctrlr->quirks & NVME_QUIRK_NO_SGL_FOR_DSM) &&
1661 					  (req->cmd.opc == SPDK_NVME_OPC_DATASET_MANAGEMENT))) {
1662 				sgl_supported = false;
1663 			}
1664 		}
1665 
1666 		if (sgl_supported && !(ctrlr->flags & SPDK_NVME_CTRLR_SGL_REQUIRES_DWORD_ALIGNMENT)) {
1667 			dword_aligned = false;
1668 		}
1669 
1670 		/* If we fail to build the request or the metadata, do not return the -EFAULT back up
1671 		 * the stack.  This ensures that we always fail these types of requests via a
1672 		 * completion callback, and never in the context of the submission.
1673 		 */
1674 		rc = g_nvme_pcie_build_req_table[payload_type][sgl_supported](qpair, req, tr, dword_aligned);
1675 		if (rc < 0) {
1676 			assert(rc == -EFAULT);
1677 			rc = 0;
1678 			goto exit;
1679 		}
1680 
1681 		rc = nvme_pcie_qpair_build_metadata(qpair, tr, sgl_supported, dword_aligned);
1682 		if (rc < 0) {
1683 			assert(rc == -EFAULT);
1684 			rc = 0;
1685 			goto exit;
1686 		}
1687 	}
1688 
1689 	nvme_pcie_qpair_submit_tracker(qpair, tr);
1690 
1691 exit:
1692 	if (spdk_unlikely(nvme_qpair_is_admin_queue(qpair))) {
1693 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
1694 	}
1695 
1696 	return rc;
1697 }
1698 
1699 struct spdk_nvme_transport_poll_group *
1700 nvme_pcie_poll_group_create(void)
1701 {
1702 	struct nvme_pcie_poll_group *group = calloc(1, sizeof(*group));
1703 
1704 	if (group == NULL) {
1705 		SPDK_ERRLOG("Unable to allocate poll group.\n");
1706 		return NULL;
1707 	}
1708 
1709 	return &group->group;
1710 }
1711 
1712 int
1713 nvme_pcie_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair)
1714 {
1715 	return 0;
1716 }
1717 
1718 int
1719 nvme_pcie_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair)
1720 {
1721 	return 0;
1722 }
1723 
1724 int
1725 nvme_pcie_poll_group_add(struct spdk_nvme_transport_poll_group *tgroup,
1726 			 struct spdk_nvme_qpair *qpair)
1727 {
1728 	return 0;
1729 }
1730 
1731 int
1732 nvme_pcie_poll_group_remove(struct spdk_nvme_transport_poll_group *tgroup,
1733 			    struct spdk_nvme_qpair *qpair)
1734 {
1735 	struct nvme_pcie_qpair *pqpair = nvme_pcie_qpair(qpair);
1736 
1737 	pqpair->stat = &g_dummy_stat;
1738 	return 0;
1739 }
1740 
1741 int64_t
1742 nvme_pcie_poll_group_process_completions(struct spdk_nvme_transport_poll_group *tgroup,
1743 		uint32_t completions_per_qpair, spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb)
1744 {
1745 	struct spdk_nvme_qpair *qpair, *tmp_qpair;
1746 	int32_t local_completions = 0;
1747 	int64_t total_completions = 0;
1748 
1749 	STAILQ_FOREACH_SAFE(qpair, &tgroup->disconnected_qpairs, poll_group_stailq, tmp_qpair) {
1750 		disconnected_qpair_cb(qpair, tgroup->group->ctx);
1751 	}
1752 
1753 	STAILQ_FOREACH_SAFE(qpair, &tgroup->connected_qpairs, poll_group_stailq, tmp_qpair) {
1754 		local_completions = spdk_nvme_qpair_process_completions(qpair, completions_per_qpair);
1755 		if (spdk_unlikely(local_completions < 0)) {
1756 			disconnected_qpair_cb(qpair, tgroup->group->ctx);
1757 			total_completions = -ENXIO;
1758 		} else if (spdk_likely(total_completions >= 0)) {
1759 			total_completions += local_completions;
1760 		}
1761 	}
1762 
1763 	return total_completions;
1764 }
1765 
1766 int
1767 nvme_pcie_poll_group_destroy(struct spdk_nvme_transport_poll_group *tgroup)
1768 {
1769 	if (!STAILQ_EMPTY(&tgroup->connected_qpairs) || !STAILQ_EMPTY(&tgroup->disconnected_qpairs)) {
1770 		return -EBUSY;
1771 	}
1772 
1773 	free(tgroup);
1774 
1775 	return 0;
1776 }
1777 
1778 SPDK_TRACE_REGISTER_FN(nvme_pcie, "nvme_pcie", TRACE_GROUP_NVME_PCIE)
1779 {
1780 	struct spdk_trace_tpoint_opts opts[] = {
1781 		{
1782 			"NVME_PCIE_SUBMIT", TRACE_NVME_PCIE_SUBMIT,
1783 			OWNER_NVME_PCIE_QP, OBJECT_NVME_PCIE_TR, 1,
1784 			{	{ "cid", SPDK_TRACE_ARG_TYPE_INT, 8 },
1785 				{ "opc", SPDK_TRACE_ARG_TYPE_INT, 8 },
1786 				{ "dw10", SPDK_TRACE_ARG_TYPE_PTR, 8 },
1787 				{ "dw11", SPDK_TRACE_ARG_TYPE_PTR, 8 },
1788 				{ "dw12", SPDK_TRACE_ARG_TYPE_PTR, 8 }
1789 			}
1790 		},
1791 		{
1792 			"NVME_PCIE_COMPLETE", TRACE_NVME_PCIE_COMPLETE,
1793 			OWNER_NVME_PCIE_QP, OBJECT_NVME_PCIE_TR, 0,
1794 			{{ "cid", SPDK_TRACE_ARG_TYPE_INT, 8 }}
1795 		},
1796 	};
1797 
1798 	spdk_trace_register_object(OBJECT_NVME_PCIE_TR, 'p');
1799 	spdk_trace_register_owner(OWNER_NVME_PCIE_QP, 'q');
1800 	spdk_trace_register_description_ext(opts, SPDK_COUNTOF(opts));
1801 }
1802