xref: /spdk/lib/nvme/nvme_pcie.c (revision 1efa1b16d579b0c09bcbf26a84140cbbcf88d9df)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2016 Intel Corporation. All rights reserved.
3  *   Copyright (c) 2017, IBM Corporation. All rights reserved.
4  *   Copyright (c) 2019-2021 Mellanox Technologies LTD. All rights reserved.
5  */
6 
7 /*
8  * NVMe over PCIe transport
9  */
10 
11 #include "spdk/stdinc.h"
12 #include "spdk/env.h"
13 #include "spdk/likely.h"
14 #include "spdk/string.h"
15 #include "nvme_internal.h"
16 #include "nvme_pcie_internal.h"
17 
18 struct nvme_pcie_enum_ctx {
19 	struct spdk_nvme_probe_ctx *probe_ctx;
20 	struct spdk_pci_addr pci_addr;
21 	bool has_pci_addr;
22 };
23 
24 static uint16_t g_signal_lock;
25 static bool g_sigset = false;
26 static spdk_nvme_pcie_hotplug_filter_cb g_hotplug_filter_cb;
27 
28 static void
29 nvme_sigbus_fault_sighandler(const void *failure_addr, void *ctx)
30 {
31 	void *map_address;
32 	uint16_t flag = 0;
33 
34 	if (!__atomic_compare_exchange_n(&g_signal_lock, &flag, 1, false, __ATOMIC_ACQUIRE,
35 					 __ATOMIC_RELAXED)) {
36 		SPDK_DEBUGLOG(nvme, "request g_signal_lock failed\n");
37 		return;
38 	}
39 
40 	if (g_thread_mmio_ctrlr == NULL) {
41 		return;
42 	}
43 
44 	if (!g_thread_mmio_ctrlr->is_remapped) {
45 		map_address = mmap((void *)g_thread_mmio_ctrlr->regs, g_thread_mmio_ctrlr->regs_size,
46 				   PROT_READ | PROT_WRITE,
47 				   MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
48 		if (map_address == MAP_FAILED) {
49 			SPDK_ERRLOG("mmap failed\n");
50 			__atomic_store_n(&g_signal_lock, 0, __ATOMIC_RELEASE);
51 			return;
52 		}
53 		memset(map_address, 0xFF, sizeof(struct spdk_nvme_registers));
54 		g_thread_mmio_ctrlr->regs = (volatile struct spdk_nvme_registers *)map_address;
55 		g_thread_mmio_ctrlr->is_remapped = true;
56 	}
57 	__atomic_store_n(&g_signal_lock, 0, __ATOMIC_RELEASE);
58 }
59 
60 static void
61 _nvme_pcie_event_process(struct spdk_pci_event *event, void *cb_ctx)
62 {
63 	struct spdk_nvme_transport_id trid;
64 	struct spdk_nvme_ctrlr *ctrlr;
65 
66 	if (event->action == SPDK_UEVENT_ADD) {
67 		if (spdk_process_is_primary()) {
68 			if (g_hotplug_filter_cb == NULL || g_hotplug_filter_cb(&event->traddr)) {
69 				/* The enumerate interface implement the add operation */
70 				spdk_pci_device_allow(&event->traddr);
71 			}
72 		}
73 	} else if (event->action == SPDK_UEVENT_REMOVE) {
74 		memset(&trid, 0, sizeof(trid));
75 		spdk_nvme_trid_populate_transport(&trid, SPDK_NVME_TRANSPORT_PCIE);
76 
77 		if (spdk_pci_addr_fmt(trid.traddr, sizeof(trid.traddr), &event->traddr) < 0) {
78 			SPDK_ERRLOG("Failed to format pci address\n");
79 			return;
80 		}
81 
82 		ctrlr = nvme_get_ctrlr_by_trid_unsafe(&trid, NULL);
83 		if (ctrlr == NULL) {
84 			return;
85 		}
86 		SPDK_DEBUGLOG(nvme, "remove nvme address: %s\n", trid.traddr);
87 
88 		nvme_ctrlr_lock(ctrlr);
89 		nvme_ctrlr_fail(ctrlr, true);
90 		nvme_ctrlr_unlock(ctrlr);
91 
92 		/* get the user app to clean up and stop I/O */
93 		if (ctrlr->remove_cb) {
94 			nvme_robust_mutex_unlock(&g_spdk_nvme_driver->lock);
95 			ctrlr->remove_cb(ctrlr->cb_ctx, ctrlr);
96 			nvme_robust_mutex_lock(&g_spdk_nvme_driver->lock);
97 		}
98 	}
99 }
100 
101 static int
102 _nvme_pcie_hotplug_monitor(struct spdk_nvme_probe_ctx *probe_ctx)
103 {
104 	struct spdk_nvme_ctrlr *ctrlr, *tmp;
105 	struct spdk_pci_event event;
106 	int rc = 0;
107 
108 	if (g_spdk_nvme_driver->hotplug_fd >= 0) {
109 		while (spdk_pci_get_event(g_spdk_nvme_driver->hotplug_fd, &event) > 0) {
110 			_nvme_pcie_event_process(&event, probe_ctx->cb_ctx);
111 		}
112 	}
113 
114 	/* Initiate removal of physically hotremoved PCI controllers. Even after
115 	 * they're hotremoved from the system, SPDK might still report them via RPC.
116 	 */
117 	TAILQ_FOREACH_SAFE(ctrlr, &g_spdk_nvme_driver->shared_attached_ctrlrs, tailq, tmp) {
118 		bool do_remove = false;
119 		struct nvme_pcie_ctrlr *pctrlr;
120 
121 		if (ctrlr->trid.trtype != SPDK_NVME_TRANSPORT_PCIE) {
122 			continue;
123 		}
124 
125 		pctrlr = nvme_pcie_ctrlr(ctrlr);
126 		if (spdk_pci_device_is_removed(pctrlr->devhandle)) {
127 			do_remove = true;
128 			rc = 1;
129 		}
130 
131 		if (do_remove) {
132 			nvme_ctrlr_lock(ctrlr);
133 			nvme_ctrlr_fail(ctrlr, true);
134 			nvme_ctrlr_unlock(ctrlr);
135 			if (ctrlr->remove_cb) {
136 				nvme_robust_mutex_unlock(&g_spdk_nvme_driver->lock);
137 				ctrlr->remove_cb(ctrlr->cb_ctx, ctrlr);
138 				nvme_robust_mutex_lock(&g_spdk_nvme_driver->lock);
139 			}
140 		}
141 	}
142 	return rc;
143 }
144 
145 static volatile void *
146 nvme_pcie_reg_addr(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset)
147 {
148 	struct nvme_pcie_ctrlr *pctrlr = nvme_pcie_ctrlr(ctrlr);
149 
150 	return (volatile void *)((uintptr_t)pctrlr->regs + offset);
151 }
152 
153 static volatile struct spdk_nvme_registers *
154 nvme_pcie_ctrlr_get_registers(struct spdk_nvme_ctrlr *ctrlr)
155 {
156 	struct nvme_pcie_ctrlr *pctrlr = nvme_pcie_ctrlr(ctrlr);
157 
158 	return pctrlr->regs;
159 }
160 
161 static int
162 nvme_pcie_ctrlr_set_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t value)
163 {
164 	struct nvme_pcie_ctrlr *pctrlr = nvme_pcie_ctrlr(ctrlr);
165 
166 	assert(offset <= sizeof(struct spdk_nvme_registers) - 4);
167 	g_thread_mmio_ctrlr = pctrlr;
168 	spdk_mmio_write_4(nvme_pcie_reg_addr(ctrlr, offset), value);
169 	g_thread_mmio_ctrlr = NULL;
170 	return 0;
171 }
172 
173 static int
174 nvme_pcie_ctrlr_set_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t value)
175 {
176 	struct nvme_pcie_ctrlr *pctrlr = nvme_pcie_ctrlr(ctrlr);
177 
178 	assert(offset <= sizeof(struct spdk_nvme_registers) - 8);
179 	g_thread_mmio_ctrlr = pctrlr;
180 	spdk_mmio_write_8(nvme_pcie_reg_addr(ctrlr, offset), value);
181 	g_thread_mmio_ctrlr = NULL;
182 	return 0;
183 }
184 
185 static int
186 nvme_pcie_ctrlr_get_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t *value)
187 {
188 	struct nvme_pcie_ctrlr *pctrlr = nvme_pcie_ctrlr(ctrlr);
189 
190 	assert(offset <= sizeof(struct spdk_nvme_registers) - 4);
191 	assert(value != NULL);
192 	g_thread_mmio_ctrlr = pctrlr;
193 	*value = spdk_mmio_read_4(nvme_pcie_reg_addr(ctrlr, offset));
194 	g_thread_mmio_ctrlr = NULL;
195 	if (~(*value) == 0) {
196 		return -1;
197 	}
198 
199 	return 0;
200 }
201 
202 static int
203 nvme_pcie_ctrlr_get_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t *value)
204 {
205 	struct nvme_pcie_ctrlr *pctrlr = nvme_pcie_ctrlr(ctrlr);
206 
207 	assert(offset <= sizeof(struct spdk_nvme_registers) - 8);
208 	assert(value != NULL);
209 	g_thread_mmio_ctrlr = pctrlr;
210 	*value = spdk_mmio_read_8(nvme_pcie_reg_addr(ctrlr, offset));
211 	g_thread_mmio_ctrlr = NULL;
212 	if (~(*value) == 0) {
213 		return -1;
214 	}
215 
216 	return 0;
217 }
218 
219 static int
220 nvme_pcie_ctrlr_set_asq(struct nvme_pcie_ctrlr *pctrlr, uint64_t value)
221 {
222 	return nvme_pcie_ctrlr_set_reg_8(&pctrlr->ctrlr, offsetof(struct spdk_nvme_registers, asq),
223 					 value);
224 }
225 
226 static int
227 nvme_pcie_ctrlr_set_acq(struct nvme_pcie_ctrlr *pctrlr, uint64_t value)
228 {
229 	return nvme_pcie_ctrlr_set_reg_8(&pctrlr->ctrlr, offsetof(struct spdk_nvme_registers, acq),
230 					 value);
231 }
232 
233 static int
234 nvme_pcie_ctrlr_set_aqa(struct nvme_pcie_ctrlr *pctrlr, const union spdk_nvme_aqa_register *aqa)
235 {
236 	return nvme_pcie_ctrlr_set_reg_4(&pctrlr->ctrlr, offsetof(struct spdk_nvme_registers, aqa.raw),
237 					 aqa->raw);
238 }
239 
240 static int
241 nvme_pcie_ctrlr_get_cmbloc(struct nvme_pcie_ctrlr *pctrlr, union spdk_nvme_cmbloc_register *cmbloc)
242 {
243 	return nvme_pcie_ctrlr_get_reg_4(&pctrlr->ctrlr, offsetof(struct spdk_nvme_registers, cmbloc.raw),
244 					 &cmbloc->raw);
245 }
246 
247 static int
248 nvme_pcie_ctrlr_get_cmbsz(struct nvme_pcie_ctrlr *pctrlr, union spdk_nvme_cmbsz_register *cmbsz)
249 {
250 	return nvme_pcie_ctrlr_get_reg_4(&pctrlr->ctrlr, offsetof(struct spdk_nvme_registers, cmbsz.raw),
251 					 &cmbsz->raw);
252 }
253 
254 static int
255 nvme_pcie_ctrlr_get_pmrcap(struct nvme_pcie_ctrlr *pctrlr, union spdk_nvme_pmrcap_register *pmrcap)
256 {
257 	return nvme_pcie_ctrlr_get_reg_4(&pctrlr->ctrlr, offsetof(struct spdk_nvme_registers, pmrcap.raw),
258 					 &pmrcap->raw);
259 }
260 
261 static int
262 nvme_pcie_ctrlr_set_pmrctl(struct nvme_pcie_ctrlr *pctrlr, union spdk_nvme_pmrctl_register *pmrctl)
263 {
264 	return nvme_pcie_ctrlr_set_reg_4(&pctrlr->ctrlr, offsetof(struct spdk_nvme_registers, pmrctl.raw),
265 					 pmrctl->raw);
266 }
267 
268 static int
269 nvme_pcie_ctrlr_get_pmrctl(struct nvme_pcie_ctrlr *pctrlr, union spdk_nvme_pmrctl_register *pmrctl)
270 {
271 	return nvme_pcie_ctrlr_get_reg_4(&pctrlr->ctrlr, offsetof(struct spdk_nvme_registers, pmrctl.raw),
272 					 &pmrctl->raw);
273 }
274 
275 static int
276 nvme_pcie_ctrlr_get_pmrsts(struct nvme_pcie_ctrlr *pctrlr, union spdk_nvme_pmrsts_register *pmrsts)
277 {
278 	return nvme_pcie_ctrlr_get_reg_4(&pctrlr->ctrlr, offsetof(struct spdk_nvme_registers, pmrsts.raw),
279 					 &pmrsts->raw);
280 }
281 
282 static int
283 nvme_pcie_ctrlr_set_pmrmscl(struct nvme_pcie_ctrlr *pctrlr, uint32_t value)
284 {
285 	return nvme_pcie_ctrlr_set_reg_4(&pctrlr->ctrlr, offsetof(struct spdk_nvme_registers, pmrmscl.raw),
286 					 value);
287 }
288 
289 static int
290 nvme_pcie_ctrlr_set_pmrmscu(struct nvme_pcie_ctrlr *pctrlr, uint32_t value)
291 {
292 	return nvme_pcie_ctrlr_set_reg_4(&pctrlr->ctrlr, offsetof(struct spdk_nvme_registers, pmrmscu),
293 					 value);
294 }
295 
296 static  uint32_t
297 nvme_pcie_ctrlr_get_max_xfer_size(struct spdk_nvme_ctrlr *ctrlr)
298 {
299 	/*
300 	 * For commands requiring more than 2 PRP entries, one PRP will be
301 	 *  embedded in the command (prp1), and the rest of the PRP entries
302 	 *  will be in a list pointed to by the command (prp2).  The number
303 	 *  of PRP entries in the list is defined by
304 	 *  NVME_MAX_PRP_LIST_ENTRIES.
305 	 *
306 	 *  Note that the max xfer size is not (MAX_ENTRIES + 1) * page_size
307 	 *  because the first PRP entry may not be aligned on a 4KiB
308 	 *  boundary.
309 	 */
310 	return NVME_MAX_PRP_LIST_ENTRIES * ctrlr->page_size;
311 }
312 
313 static uint16_t
314 nvme_pcie_ctrlr_get_max_sges(struct spdk_nvme_ctrlr *ctrlr)
315 {
316 	return NVME_MAX_SGL_DESCRIPTORS;
317 }
318 
319 static void
320 nvme_pcie_ctrlr_map_cmb(struct nvme_pcie_ctrlr *pctrlr)
321 {
322 	int rc;
323 	void *addr = NULL;
324 	uint32_t bir;
325 	union spdk_nvme_cmbsz_register cmbsz;
326 	union spdk_nvme_cmbloc_register cmbloc;
327 	uint64_t size, unit_size, offset, bar_size = 0, bar_phys_addr = 0;
328 
329 	if (nvme_pcie_ctrlr_get_cmbsz(pctrlr, &cmbsz) ||
330 	    nvme_pcie_ctrlr_get_cmbloc(pctrlr, &cmbloc)) {
331 		SPDK_ERRLOG("get registers failed\n");
332 		goto exit;
333 	}
334 
335 	if (!cmbsz.bits.sz) {
336 		goto exit;
337 	}
338 
339 	bir = cmbloc.bits.bir;
340 	/* Values 0 2 3 4 5 are valid for BAR */
341 	if (bir > 5 || bir == 1) {
342 		goto exit;
343 	}
344 
345 	/* unit size for 4KB/64KB/1MB/16MB/256MB/4GB/64GB */
346 	unit_size = (uint64_t)1 << (12 + 4 * cmbsz.bits.szu);
347 	/* controller memory buffer size in Bytes */
348 	size = unit_size * cmbsz.bits.sz;
349 	/* controller memory buffer offset from BAR in Bytes */
350 	offset = unit_size * cmbloc.bits.ofst;
351 
352 	rc = spdk_pci_device_map_bar(pctrlr->devhandle, bir, &addr,
353 				     &bar_phys_addr, &bar_size);
354 	if ((rc != 0) || addr == NULL) {
355 		goto exit;
356 	}
357 
358 	if (offset > bar_size) {
359 		goto exit;
360 	}
361 
362 	if (size > bar_size - offset) {
363 		goto exit;
364 	}
365 
366 	pctrlr->cmb.bar_va = addr;
367 	pctrlr->cmb.bar_pa = bar_phys_addr;
368 	pctrlr->cmb.size = size;
369 	pctrlr->cmb.current_offset = offset;
370 
371 	if (!cmbsz.bits.sqs) {
372 		pctrlr->ctrlr.opts.use_cmb_sqs = false;
373 	}
374 
375 	return;
376 exit:
377 	pctrlr->ctrlr.opts.use_cmb_sqs = false;
378 	return;
379 }
380 
381 static int
382 nvme_pcie_ctrlr_unmap_cmb(struct nvme_pcie_ctrlr *pctrlr)
383 {
384 	int rc = 0;
385 	union spdk_nvme_cmbloc_register cmbloc;
386 	void *addr = pctrlr->cmb.bar_va;
387 
388 	if (addr) {
389 		if (pctrlr->cmb.mem_register_addr) {
390 			spdk_mem_unregister(pctrlr->cmb.mem_register_addr, pctrlr->cmb.mem_register_size);
391 		}
392 
393 		if (nvme_pcie_ctrlr_get_cmbloc(pctrlr, &cmbloc)) {
394 			SPDK_ERRLOG("get_cmbloc() failed\n");
395 			return -EIO;
396 		}
397 		rc = spdk_pci_device_unmap_bar(pctrlr->devhandle, cmbloc.bits.bir, addr);
398 	}
399 	return rc;
400 }
401 
402 static int
403 nvme_pcie_ctrlr_reserve_cmb(struct spdk_nvme_ctrlr *ctrlr)
404 {
405 	struct nvme_pcie_ctrlr *pctrlr = nvme_pcie_ctrlr(ctrlr);
406 
407 	if (pctrlr->cmb.bar_va == NULL) {
408 		SPDK_DEBUGLOG(nvme, "CMB not available\n");
409 		return -ENOTSUP;
410 	}
411 
412 	if (ctrlr->opts.use_cmb_sqs) {
413 		SPDK_ERRLOG("CMB is already in use for submission queues.\n");
414 		return -ENOTSUP;
415 	}
416 
417 	return 0;
418 }
419 
420 static void *
421 nvme_pcie_ctrlr_map_io_cmb(struct spdk_nvme_ctrlr *ctrlr, size_t *size)
422 {
423 	struct nvme_pcie_ctrlr *pctrlr = nvme_pcie_ctrlr(ctrlr);
424 	union spdk_nvme_cmbsz_register cmbsz;
425 	union spdk_nvme_cmbloc_register cmbloc;
426 	uint64_t mem_register_start, mem_register_end;
427 	int rc;
428 
429 	if (pctrlr->cmb.mem_register_addr != NULL) {
430 		*size = pctrlr->cmb.mem_register_size;
431 		return pctrlr->cmb.mem_register_addr;
432 	}
433 
434 	*size = 0;
435 
436 	if (pctrlr->cmb.bar_va == NULL) {
437 		SPDK_DEBUGLOG(nvme, "CMB not available\n");
438 		return NULL;
439 	}
440 
441 	if (ctrlr->opts.use_cmb_sqs) {
442 		SPDK_ERRLOG("CMB is already in use for submission queues.\n");
443 		return NULL;
444 	}
445 
446 	if (nvme_pcie_ctrlr_get_cmbsz(pctrlr, &cmbsz) ||
447 	    nvme_pcie_ctrlr_get_cmbloc(pctrlr, &cmbloc)) {
448 		SPDK_ERRLOG("get registers failed\n");
449 		return NULL;
450 	}
451 
452 	/* If only SQS is supported */
453 	if (!(cmbsz.bits.wds || cmbsz.bits.rds)) {
454 		return NULL;
455 	}
456 
457 	/* If CMB is less than 4MiB in size then abort CMB mapping */
458 	if (pctrlr->cmb.size < (1ULL << 22)) {
459 		return NULL;
460 	}
461 
462 	mem_register_start = _2MB_PAGE((uintptr_t)pctrlr->cmb.bar_va + pctrlr->cmb.current_offset +
463 				       VALUE_2MB - 1);
464 	mem_register_end = _2MB_PAGE((uintptr_t)pctrlr->cmb.bar_va + pctrlr->cmb.current_offset +
465 				     pctrlr->cmb.size);
466 
467 	rc = spdk_mem_register((void *)mem_register_start, mem_register_end - mem_register_start);
468 	if (rc) {
469 		SPDK_ERRLOG("spdk_mem_register() failed\n");
470 		return NULL;
471 	}
472 
473 	pctrlr->cmb.mem_register_addr = (void *)mem_register_start;
474 	pctrlr->cmb.mem_register_size = mem_register_end - mem_register_start;
475 
476 	*size = pctrlr->cmb.mem_register_size;
477 	return pctrlr->cmb.mem_register_addr;
478 }
479 
480 static int
481 nvme_pcie_ctrlr_unmap_io_cmb(struct spdk_nvme_ctrlr *ctrlr)
482 {
483 	struct nvme_pcie_ctrlr *pctrlr = nvme_pcie_ctrlr(ctrlr);
484 	int rc;
485 
486 	if (pctrlr->cmb.mem_register_addr == NULL) {
487 		return 0;
488 	}
489 
490 	rc = spdk_mem_unregister(pctrlr->cmb.mem_register_addr, pctrlr->cmb.mem_register_size);
491 
492 	if (rc == 0) {
493 		pctrlr->cmb.mem_register_addr = NULL;
494 		pctrlr->cmb.mem_register_size = 0;
495 	}
496 
497 	return rc;
498 }
499 
500 static void
501 nvme_pcie_ctrlr_map_pmr(struct nvme_pcie_ctrlr *pctrlr)
502 {
503 	int rc;
504 	void *addr = NULL;
505 	uint32_t bir;
506 	union spdk_nvme_pmrcap_register pmrcap;
507 	uint64_t bar_size = 0, bar_phys_addr = 0;
508 
509 	if (!pctrlr->regs->cap.bits.pmrs) {
510 		return;
511 	}
512 
513 	if (nvme_pcie_ctrlr_get_pmrcap(pctrlr, &pmrcap)) {
514 		SPDK_ERRLOG("get registers failed\n");
515 		return;
516 	}
517 
518 	bir = pmrcap.bits.bir;
519 	/* Values 2 3 4 5 are valid for BAR */
520 	if (bir > 5 || bir < 2) {
521 		SPDK_ERRLOG("invalid base indicator register value\n");
522 		return;
523 	}
524 
525 	rc = spdk_pci_device_map_bar(pctrlr->devhandle, bir, &addr, &bar_phys_addr, &bar_size);
526 	if ((rc != 0) || addr == NULL) {
527 		SPDK_ERRLOG("could not map the bar %d\n", bir);
528 		return;
529 	}
530 
531 	if (pmrcap.bits.cmss) {
532 		uint32_t pmrmscl, pmrmscu, cmse = 1;
533 		union spdk_nvme_pmrsts_register pmrsts;
534 
535 		/* Enable Controller Memory Space */
536 		pmrmscl = (uint32_t)((bar_phys_addr & 0xFFFFF000ULL) | (cmse << 1));
537 		pmrmscu = (uint32_t)((bar_phys_addr >> 32ULL) & 0xFFFFFFFFULL);
538 
539 		if (nvme_pcie_ctrlr_set_pmrmscu(pctrlr, pmrmscu)) {
540 			SPDK_ERRLOG("set_pmrmscu() failed\n");
541 			spdk_pci_device_unmap_bar(pctrlr->devhandle, bir, addr);
542 			return;
543 		}
544 
545 		if (nvme_pcie_ctrlr_set_pmrmscl(pctrlr, pmrmscl)) {
546 			SPDK_ERRLOG("set_pmrmscl() failed\n");
547 			spdk_pci_device_unmap_bar(pctrlr->devhandle, bir, addr);
548 			return;
549 		}
550 
551 		if (nvme_pcie_ctrlr_get_pmrsts(pctrlr, &pmrsts)) {
552 			SPDK_ERRLOG("get pmrsts failed\n");
553 			spdk_pci_device_unmap_bar(pctrlr->devhandle, bir, addr);
554 			return;
555 		}
556 
557 		if (pmrsts.bits.cbai) {
558 			SPDK_ERRLOG("Controller Memory Space Enable Failure\n");
559 			SPDK_ERRLOG("CBA Invalid - Host Addresses cannot reference PMR\n");
560 		} else {
561 			SPDK_DEBUGLOG(nvme, "Controller Memory Space Enable Success\n");
562 			SPDK_DEBUGLOG(nvme, "Host Addresses can reference PMR\n");
563 		}
564 	}
565 
566 	pctrlr->pmr.bar_va = addr;
567 	pctrlr->pmr.bar_pa = bar_phys_addr;
568 	pctrlr->pmr.size = pctrlr->ctrlr.pmr_size = bar_size;
569 }
570 
571 static int
572 nvme_pcie_ctrlr_unmap_pmr(struct nvme_pcie_ctrlr *pctrlr)
573 {
574 	int rc = 0;
575 	union spdk_nvme_pmrcap_register pmrcap;
576 	void *addr = pctrlr->pmr.bar_va;
577 
578 	if (addr == NULL) {
579 		return rc;
580 	}
581 
582 	if (pctrlr->pmr.mem_register_addr) {
583 		spdk_mem_unregister(pctrlr->pmr.mem_register_addr, pctrlr->pmr.mem_register_size);
584 	}
585 
586 	if (nvme_pcie_ctrlr_get_pmrcap(pctrlr, &pmrcap)) {
587 		SPDK_ERRLOG("get_pmrcap() failed\n");
588 		return -EIO;
589 	}
590 
591 	if (pmrcap.bits.cmss) {
592 		if (nvme_pcie_ctrlr_set_pmrmscu(pctrlr, 0)) {
593 			SPDK_ERRLOG("set_pmrmscu() failed\n");
594 		}
595 
596 		if (nvme_pcie_ctrlr_set_pmrmscl(pctrlr, 0)) {
597 			SPDK_ERRLOG("set_pmrmscl() failed\n");
598 		}
599 	}
600 
601 	rc = spdk_pci_device_unmap_bar(pctrlr->devhandle, pmrcap.bits.bir, addr);
602 
603 	return rc;
604 }
605 
606 static int
607 nvme_pcie_ctrlr_config_pmr(struct spdk_nvme_ctrlr *ctrlr, bool enable)
608 {
609 	struct nvme_pcie_ctrlr *pctrlr = nvme_pcie_ctrlr(ctrlr);
610 	union spdk_nvme_pmrcap_register pmrcap;
611 	union spdk_nvme_pmrctl_register pmrctl;
612 	union spdk_nvme_pmrsts_register pmrsts;
613 	uint8_t pmrto, pmrtu;
614 	uint64_t timeout_in_ms, ticks_per_ms, timeout_in_ticks, now_ticks;
615 
616 	if (!pctrlr->regs->cap.bits.pmrs) {
617 		SPDK_ERRLOG("PMR is not supported by the controller\n");
618 		return -ENOTSUP;
619 	}
620 
621 	if (nvme_pcie_ctrlr_get_pmrcap(pctrlr, &pmrcap)) {
622 		SPDK_ERRLOG("get registers failed\n");
623 		return -EIO;
624 	}
625 
626 	pmrto = pmrcap.bits.pmrto;
627 	pmrtu = pmrcap.bits.pmrtu;
628 
629 	if (pmrtu > 1) {
630 		SPDK_ERRLOG("PMR Time Units Invalid\n");
631 		return -EINVAL;
632 	}
633 
634 	ticks_per_ms = spdk_get_ticks_hz() / 1000;
635 	timeout_in_ms = pmrto * (pmrtu ? (60 * 1000) : 500);
636 	timeout_in_ticks = timeout_in_ms * ticks_per_ms;
637 
638 	if (nvme_pcie_ctrlr_get_pmrctl(pctrlr, &pmrctl)) {
639 		SPDK_ERRLOG("get pmrctl failed\n");
640 		return -EIO;
641 	}
642 
643 	if (enable && pmrctl.bits.en != 0) {
644 		SPDK_ERRLOG("PMR is already enabled\n");
645 		return -EINVAL;
646 	} else if (!enable && pmrctl.bits.en != 1) {
647 		SPDK_ERRLOG("PMR is already disabled\n");
648 		return -EINVAL;
649 	}
650 
651 	pmrctl.bits.en = enable;
652 
653 	if (nvme_pcie_ctrlr_set_pmrctl(pctrlr, &pmrctl)) {
654 		SPDK_ERRLOG("set pmrctl failed\n");
655 		return -EIO;
656 	}
657 
658 	now_ticks =  spdk_get_ticks();
659 
660 	do {
661 		if (nvme_pcie_ctrlr_get_pmrsts(pctrlr, &pmrsts)) {
662 			SPDK_ERRLOG("get pmrsts failed\n");
663 			return -EIO;
664 		}
665 
666 		if (pmrsts.bits.nrdy == enable &&
667 		    spdk_get_ticks() > now_ticks + timeout_in_ticks) {
668 			SPDK_ERRLOG("PMR Enable - Timed Out\n");
669 			return -ETIMEDOUT;
670 		}
671 	} while (pmrsts.bits.nrdy == enable);
672 
673 	SPDK_DEBUGLOG(nvme, "PMR %s\n", enable ? "Enabled" : "Disabled");
674 
675 	return 0;
676 }
677 
678 static int
679 nvme_pcie_ctrlr_enable_pmr(struct spdk_nvme_ctrlr *ctrlr)
680 {
681 	return nvme_pcie_ctrlr_config_pmr(ctrlr, true);
682 }
683 
684 static int
685 nvme_pcie_ctrlr_disable_pmr(struct spdk_nvme_ctrlr *ctrlr)
686 {
687 	return nvme_pcie_ctrlr_config_pmr(ctrlr, false);
688 }
689 
690 static void *
691 nvme_pcie_ctrlr_map_io_pmr(struct spdk_nvme_ctrlr *ctrlr, size_t *size)
692 {
693 	struct nvme_pcie_ctrlr *pctrlr = nvme_pcie_ctrlr(ctrlr);
694 	union spdk_nvme_pmrcap_register pmrcap;
695 	uint64_t mem_register_start, mem_register_end;
696 	int rc;
697 
698 	if (!pctrlr->regs->cap.bits.pmrs) {
699 		SPDK_ERRLOG("PMR is not supported by the controller\n");
700 		return NULL;
701 	}
702 
703 	if (pctrlr->pmr.mem_register_addr != NULL) {
704 		*size = pctrlr->pmr.mem_register_size;
705 		return pctrlr->pmr.mem_register_addr;
706 	}
707 
708 	*size = 0;
709 
710 	if (pctrlr->pmr.bar_va == NULL) {
711 		SPDK_DEBUGLOG(nvme, "PMR not available\n");
712 		return NULL;
713 	}
714 
715 	if (nvme_pcie_ctrlr_get_pmrcap(pctrlr, &pmrcap)) {
716 		SPDK_ERRLOG("get registers failed\n");
717 		return NULL;
718 	}
719 
720 	/* Check if WDS / RDS is supported */
721 	if (!(pmrcap.bits.wds || pmrcap.bits.rds)) {
722 		return NULL;
723 	}
724 
725 	/* If PMR is less than 4MiB in size then abort PMR mapping */
726 	if (pctrlr->pmr.size < (1ULL << 22)) {
727 		return NULL;
728 	}
729 
730 	mem_register_start = _2MB_PAGE((uintptr_t)pctrlr->pmr.bar_va + VALUE_2MB - 1);
731 	mem_register_end = _2MB_PAGE((uintptr_t)pctrlr->pmr.bar_va + pctrlr->pmr.size);
732 
733 	rc = spdk_mem_register((void *)mem_register_start, mem_register_end - mem_register_start);
734 	if (rc) {
735 		SPDK_ERRLOG("spdk_mem_register() failed\n");
736 		return NULL;
737 	}
738 
739 	pctrlr->pmr.mem_register_addr = (void *)mem_register_start;
740 	pctrlr->pmr.mem_register_size = mem_register_end - mem_register_start;
741 
742 	*size = pctrlr->pmr.mem_register_size;
743 	return pctrlr->pmr.mem_register_addr;
744 }
745 
746 static int
747 nvme_pcie_ctrlr_unmap_io_pmr(struct spdk_nvme_ctrlr *ctrlr)
748 {
749 	struct nvme_pcie_ctrlr *pctrlr = nvme_pcie_ctrlr(ctrlr);
750 	int rc;
751 
752 	if (pctrlr->pmr.mem_register_addr == NULL) {
753 		return -ENXIO;
754 	}
755 
756 	rc = spdk_mem_unregister(pctrlr->pmr.mem_register_addr, pctrlr->pmr.mem_register_size);
757 
758 	if (rc == 0) {
759 		pctrlr->pmr.mem_register_addr = NULL;
760 		pctrlr->pmr.mem_register_size = 0;
761 	}
762 
763 	return rc;
764 }
765 
766 static int
767 nvme_pcie_ctrlr_allocate_bars(struct nvme_pcie_ctrlr *pctrlr)
768 {
769 	int rc;
770 	void *addr = NULL;
771 	uint64_t phys_addr = 0, size = 0;
772 
773 	rc = spdk_pci_device_map_bar(pctrlr->devhandle, 0, &addr,
774 				     &phys_addr, &size);
775 
776 	if ((addr == NULL) || (rc != 0)) {
777 		SPDK_ERRLOG("nvme_pcicfg_map_bar failed with rc %d or bar %p\n",
778 			    rc, addr);
779 		return -1;
780 	}
781 
782 	pctrlr->regs = (volatile struct spdk_nvme_registers *)addr;
783 	pctrlr->regs_size = size;
784 	pctrlr->doorbell_base = (volatile uint32_t *)&pctrlr->regs->doorbell[0].sq_tdbl;
785 	nvme_pcie_ctrlr_map_cmb(pctrlr);
786 	nvme_pcie_ctrlr_map_pmr(pctrlr);
787 
788 	return 0;
789 }
790 
791 static int
792 nvme_pcie_ctrlr_free_bars(struct nvme_pcie_ctrlr *pctrlr)
793 {
794 	int rc = 0;
795 	void *addr = (void *)pctrlr->regs;
796 
797 	if (pctrlr->ctrlr.is_removed) {
798 		return rc;
799 	}
800 
801 	rc = nvme_pcie_ctrlr_unmap_pmr(pctrlr);
802 	if (rc != 0) {
803 		SPDK_ERRLOG("nvme_ctrlr_unmap_pmr failed with error code %d\n", rc);
804 		return -1;
805 	}
806 
807 	rc = nvme_pcie_ctrlr_unmap_cmb(pctrlr);
808 	if (rc != 0) {
809 		SPDK_ERRLOG("nvme_ctrlr_unmap_cmb failed with error code %d\n", rc);
810 		return -1;
811 	}
812 
813 	if (addr && spdk_process_is_primary()) {
814 		/* NOTE: addr may have been remapped here. We're relying on DPDK to call
815 		 * munmap internally.
816 		 */
817 		rc = spdk_pci_device_unmap_bar(pctrlr->devhandle, 0, addr);
818 	}
819 	return rc;
820 }
821 
822 /* This function must only be called while holding g_spdk_nvme_driver->lock */
823 static int
824 pcie_nvme_enum_cb(void *ctx, struct spdk_pci_device *pci_dev)
825 {
826 	struct spdk_nvme_transport_id trid = {};
827 	struct nvme_pcie_enum_ctx *enum_ctx = ctx;
828 	struct spdk_nvme_ctrlr *ctrlr;
829 	struct spdk_pci_addr pci_addr;
830 
831 	pci_addr = spdk_pci_device_get_addr(pci_dev);
832 
833 	spdk_nvme_trid_populate_transport(&trid, SPDK_NVME_TRANSPORT_PCIE);
834 	spdk_pci_addr_fmt(trid.traddr, sizeof(trid.traddr), &pci_addr);
835 
836 	ctrlr = nvme_get_ctrlr_by_trid_unsafe(&trid, NULL);
837 	if (!spdk_process_is_primary()) {
838 		if (!ctrlr) {
839 			SPDK_ERRLOG("Controller must be constructed in the primary process first.\n");
840 			return -1;
841 		}
842 
843 		if (ctrlr->opts.enable_interrupts) {
844 			SPDK_ERRLOG("Secondary processes are not supported in interrupt mode.\n");
845 			return -1;
846 		}
847 
848 		return nvme_ctrlr_add_process(ctrlr, pci_dev);
849 	}
850 
851 	/* check whether user passes the pci_addr */
852 	if (enum_ctx->has_pci_addr &&
853 	    (spdk_pci_addr_compare(&pci_addr, &enum_ctx->pci_addr) != 0)) {
854 		return 1;
855 	}
856 
857 	return nvme_ctrlr_probe(&trid, enum_ctx->probe_ctx, pci_dev);
858 }
859 
860 static int
861 nvme_pci_ctrlr_scan_attached(struct spdk_nvme_probe_ctx *probe_ctx)
862 {
863 	/* Only the primary process can monitor hotplug. */
864 	if (spdk_process_is_primary()) {
865 		return _nvme_pcie_hotplug_monitor(probe_ctx);
866 	}
867 	return 0;
868 }
869 
870 static int
871 nvme_pcie_ctrlr_scan(struct spdk_nvme_probe_ctx *probe_ctx,
872 		     bool direct_connect)
873 {
874 	struct nvme_pcie_enum_ctx enum_ctx = {};
875 
876 	enum_ctx.probe_ctx = probe_ctx;
877 
878 	if (strlen(probe_ctx->trid.traddr) != 0) {
879 		if (spdk_pci_addr_parse(&enum_ctx.pci_addr, probe_ctx->trid.traddr)) {
880 			return -1;
881 		}
882 		enum_ctx.has_pci_addr = true;
883 	}
884 
885 	/* Only the primary process can monitor hotplug. */
886 	if (nvme_pci_ctrlr_scan_attached(probe_ctx) > 0) {
887 		/* Some removal events were received. Return immediately, avoiding
888 		 * an spdk_pci_enumerate() which could trigger issue #3205. */
889 		return 0;
890 	}
891 
892 	if (enum_ctx.has_pci_addr == false) {
893 		return spdk_pci_enumerate(spdk_pci_nvme_get_driver(),
894 					  pcie_nvme_enum_cb, &enum_ctx);
895 	} else {
896 		return spdk_pci_device_attach(spdk_pci_nvme_get_driver(),
897 					      pcie_nvme_enum_cb, &enum_ctx, &enum_ctx.pci_addr);
898 	}
899 }
900 
901 static struct spdk_nvme_ctrlr *
902 	nvme_pcie_ctrlr_construct(const struct spdk_nvme_transport_id *trid,
903 			  const struct spdk_nvme_ctrlr_opts *opts,
904 			  void *devhandle)
905 {
906 	struct spdk_pci_device *pci_dev = devhandle;
907 	struct nvme_pcie_ctrlr *pctrlr;
908 	union spdk_nvme_cap_register cap;
909 	uint16_t cmd_reg;
910 	int rc;
911 	struct spdk_pci_id pci_id;
912 
913 	rc = spdk_pci_device_claim(pci_dev);
914 	if (rc < 0) {
915 		SPDK_ERRLOG("could not claim device %s (%s)\n",
916 			    trid->traddr, spdk_strerror(-rc));
917 		return NULL;
918 	}
919 
920 	pctrlr = spdk_zmalloc(sizeof(struct nvme_pcie_ctrlr), 64, NULL,
921 			      SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_SHARE);
922 	if (pctrlr == NULL) {
923 		spdk_pci_device_unclaim(pci_dev);
924 		SPDK_ERRLOG("could not allocate ctrlr\n");
925 		return NULL;
926 	}
927 
928 	pctrlr->is_remapped = false;
929 	pctrlr->ctrlr.is_removed = false;
930 	pctrlr->devhandle = devhandle;
931 	pctrlr->ctrlr.opts = *opts;
932 	pctrlr->ctrlr.trid = *trid;
933 	pctrlr->ctrlr.opts.admin_queue_size = spdk_max(pctrlr->ctrlr.opts.admin_queue_size,
934 					      NVME_PCIE_MIN_ADMIN_QUEUE_SIZE);
935 	pci_id = spdk_pci_device_get_id(pci_dev);
936 	pctrlr->ctrlr.quirks = nvme_get_quirks(&pci_id);
937 	if (pci_dev->numa_id != SPDK_ENV_NUMA_ID_ANY) {
938 		pctrlr->ctrlr.numa.id_valid = 1;
939 		pctrlr->ctrlr.numa.id = pci_dev->numa_id;
940 	}
941 
942 	rc = nvme_ctrlr_construct(&pctrlr->ctrlr);
943 	if (rc != 0) {
944 		spdk_pci_device_unclaim(pci_dev);
945 		spdk_free(pctrlr);
946 		return NULL;
947 	}
948 
949 	rc = nvme_pcie_ctrlr_allocate_bars(pctrlr);
950 	if (rc != 0) {
951 		spdk_pci_device_unclaim(pci_dev);
952 		spdk_free(pctrlr);
953 		return NULL;
954 	}
955 
956 	/* Enable PCI busmaster and disable INTx */
957 	spdk_pci_device_cfg_read16(pci_dev, &cmd_reg, 4);
958 	cmd_reg |= 0x404;
959 	spdk_pci_device_cfg_write16(pci_dev, cmd_reg, 4);
960 
961 	if (nvme_ctrlr_get_cap(&pctrlr->ctrlr, &cap)) {
962 		SPDK_ERRLOG("get_cap() failed\n");
963 		spdk_pci_device_unclaim(pci_dev);
964 		spdk_free(pctrlr);
965 		return NULL;
966 	}
967 
968 	/* Doorbell stride is 2 ^ (dstrd + 2),
969 	 * but we want multiples of 4, so drop the + 2 */
970 	pctrlr->doorbell_stride_u32 = 1 << cap.bits.dstrd;
971 
972 	rc = nvme_pcie_ctrlr_construct_admin_qpair(&pctrlr->ctrlr, pctrlr->ctrlr.opts.admin_queue_size);
973 	if (rc != 0) {
974 		nvme_ctrlr_destruct(&pctrlr->ctrlr);
975 		return NULL;
976 	}
977 
978 	/* Construct the primary process properties */
979 	rc = nvme_ctrlr_add_process(&pctrlr->ctrlr, pci_dev);
980 	if (rc != 0) {
981 		nvme_ctrlr_destruct(&pctrlr->ctrlr);
982 		return NULL;
983 	}
984 
985 	if (g_sigset != true) {
986 		spdk_pci_register_error_handler(nvme_sigbus_fault_sighandler,
987 						NULL);
988 		g_sigset = true;
989 	}
990 
991 	return &pctrlr->ctrlr;
992 }
993 
994 static int
995 nvme_pcie_ctrlr_enable(struct spdk_nvme_ctrlr *ctrlr)
996 {
997 	struct nvme_pcie_ctrlr *pctrlr = nvme_pcie_ctrlr(ctrlr);
998 	struct nvme_pcie_qpair *padminq = nvme_pcie_qpair(ctrlr->adminq);
999 	union spdk_nvme_aqa_register aqa;
1000 
1001 	if (nvme_pcie_ctrlr_set_asq(pctrlr, padminq->cmd_bus_addr)) {
1002 		SPDK_ERRLOG("set_asq() failed\n");
1003 		return -EIO;
1004 	}
1005 
1006 	if (nvme_pcie_ctrlr_set_acq(pctrlr, padminq->cpl_bus_addr)) {
1007 		SPDK_ERRLOG("set_acq() failed\n");
1008 		return -EIO;
1009 	}
1010 
1011 	aqa.raw = 0;
1012 	/* acqs and asqs are 0-based. */
1013 	aqa.bits.acqs = nvme_pcie_qpair(ctrlr->adminq)->num_entries - 1;
1014 	aqa.bits.asqs = nvme_pcie_qpair(ctrlr->adminq)->num_entries - 1;
1015 
1016 	if (nvme_pcie_ctrlr_set_aqa(pctrlr, &aqa)) {
1017 		SPDK_ERRLOG("set_aqa() failed\n");
1018 		return -EIO;
1019 	}
1020 
1021 	return 0;
1022 }
1023 
1024 static int
1025 nvme_pcie_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
1026 {
1027 	struct nvme_pcie_ctrlr *pctrlr = nvme_pcie_ctrlr(ctrlr);
1028 	struct spdk_pci_device *devhandle = nvme_ctrlr_proc_get_devhandle(ctrlr);
1029 
1030 	if (ctrlr->adminq) {
1031 		nvme_pcie_qpair_destroy(ctrlr->adminq);
1032 	}
1033 
1034 	nvme_ctrlr_destruct_finish(ctrlr);
1035 
1036 	nvme_pcie_ctrlr_free_bars(pctrlr);
1037 
1038 	if (devhandle) {
1039 		if (ctrlr->opts.enable_interrupts) {
1040 			spdk_pci_device_disable_interrupts(devhandle);
1041 		}
1042 		spdk_pci_device_unclaim(devhandle);
1043 		spdk_pci_device_detach(devhandle);
1044 	}
1045 
1046 	spdk_free(pctrlr);
1047 
1048 	return 0;
1049 }
1050 
1051 static int
1052 nvme_pcie_ctrlr_enable_interrupts(struct spdk_nvme_ctrlr *ctrlr)
1053 {
1054 	struct spdk_pci_device *devhandle = nvme_ctrlr_proc_get_devhandle(ctrlr);
1055 	int rc;
1056 
1057 	assert(devhandle != NULL);
1058 	rc = spdk_pci_device_enable_interrupts(devhandle, ctrlr->opts.num_io_queues);
1059 	if (rc) {
1060 		SPDK_ERRLOG("enable_interrupts() failed\n");
1061 		return -EIO;
1062 	}
1063 
1064 	return 0;
1065 }
1066 
1067 static int
1068 nvme_pcie_qpair_iterate_requests(struct spdk_nvme_qpair *qpair,
1069 				 int (*iter_fn)(struct nvme_request *req, void *arg),
1070 				 void *arg)
1071 {
1072 	struct nvme_pcie_qpair *pqpair = nvme_pcie_qpair(qpair);
1073 	struct nvme_tracker *tr, *tmp;
1074 	int rc;
1075 
1076 	assert(iter_fn != NULL);
1077 
1078 	TAILQ_FOREACH_SAFE(tr, &pqpair->outstanding_tr, tq_list, tmp) {
1079 		assert(tr->req != NULL);
1080 
1081 		rc = iter_fn(tr->req, arg);
1082 		if (rc != 0) {
1083 			return rc;
1084 		}
1085 	}
1086 
1087 	return 0;
1088 }
1089 
1090 void
1091 spdk_nvme_pcie_set_hotplug_filter(spdk_nvme_pcie_hotplug_filter_cb filter_cb)
1092 {
1093 	g_hotplug_filter_cb = filter_cb;
1094 }
1095 
1096 static struct spdk_pci_id nvme_pci_driver_id[] = {
1097 	{
1098 		.class_id = SPDK_PCI_CLASS_NVME,
1099 		.vendor_id = SPDK_PCI_ANY_ID,
1100 		.device_id = SPDK_PCI_ANY_ID,
1101 		.subvendor_id = SPDK_PCI_ANY_ID,
1102 		.subdevice_id = SPDK_PCI_ANY_ID,
1103 	},
1104 	{ .vendor_id = 0, /* sentinel */ },
1105 };
1106 
1107 SPDK_PCI_DRIVER_REGISTER(nvme, nvme_pci_driver_id,
1108 			 SPDK_PCI_DRIVER_NEED_MAPPING | SPDK_PCI_DRIVER_WC_ACTIVATE);
1109 
1110 const struct spdk_nvme_transport_ops pcie_ops = {
1111 	.name = "PCIE",
1112 	.type = SPDK_NVME_TRANSPORT_PCIE,
1113 	.ctrlr_construct = nvme_pcie_ctrlr_construct,
1114 	.ctrlr_scan = nvme_pcie_ctrlr_scan,
1115 	.ctrlr_scan_attached = nvme_pci_ctrlr_scan_attached,
1116 	.ctrlr_destruct = nvme_pcie_ctrlr_destruct,
1117 	.ctrlr_enable = nvme_pcie_ctrlr_enable,
1118 	.ctrlr_enable_interrupts = nvme_pcie_ctrlr_enable_interrupts,
1119 
1120 	.ctrlr_get_registers = nvme_pcie_ctrlr_get_registers,
1121 	.ctrlr_set_reg_4 = nvme_pcie_ctrlr_set_reg_4,
1122 	.ctrlr_set_reg_8 = nvme_pcie_ctrlr_set_reg_8,
1123 	.ctrlr_get_reg_4 = nvme_pcie_ctrlr_get_reg_4,
1124 	.ctrlr_get_reg_8 = nvme_pcie_ctrlr_get_reg_8,
1125 
1126 	.ctrlr_get_max_xfer_size = nvme_pcie_ctrlr_get_max_xfer_size,
1127 	.ctrlr_get_max_sges = nvme_pcie_ctrlr_get_max_sges,
1128 
1129 	.ctrlr_reserve_cmb = nvme_pcie_ctrlr_reserve_cmb,
1130 	.ctrlr_map_cmb = nvme_pcie_ctrlr_map_io_cmb,
1131 	.ctrlr_unmap_cmb = nvme_pcie_ctrlr_unmap_io_cmb,
1132 
1133 	.ctrlr_enable_pmr = nvme_pcie_ctrlr_enable_pmr,
1134 	.ctrlr_disable_pmr = nvme_pcie_ctrlr_disable_pmr,
1135 	.ctrlr_map_pmr = nvme_pcie_ctrlr_map_io_pmr,
1136 	.ctrlr_unmap_pmr = nvme_pcie_ctrlr_unmap_io_pmr,
1137 
1138 	.ctrlr_create_io_qpair = nvme_pcie_ctrlr_create_io_qpair,
1139 	.ctrlr_delete_io_qpair = nvme_pcie_ctrlr_delete_io_qpair,
1140 	.ctrlr_connect_qpair = nvme_pcie_ctrlr_connect_qpair,
1141 	.ctrlr_disconnect_qpair = nvme_pcie_ctrlr_disconnect_qpair,
1142 
1143 	.qpair_abort_reqs = nvme_pcie_qpair_abort_reqs,
1144 	.qpair_reset = nvme_pcie_qpair_reset,
1145 	.qpair_submit_request = nvme_pcie_qpair_submit_request,
1146 	.qpair_process_completions = nvme_pcie_qpair_process_completions,
1147 	.qpair_iterate_requests = nvme_pcie_qpair_iterate_requests,
1148 	.qpair_get_fd = nvme_pcie_qpair_get_fd,
1149 	.admin_qpair_abort_aers = nvme_pcie_admin_qpair_abort_aers,
1150 
1151 	.poll_group_create = nvme_pcie_poll_group_create,
1152 	.poll_group_connect_qpair = nvme_pcie_poll_group_connect_qpair,
1153 	.poll_group_disconnect_qpair = nvme_pcie_poll_group_disconnect_qpair,
1154 	.poll_group_add = nvme_pcie_poll_group_add,
1155 	.poll_group_remove = nvme_pcie_poll_group_remove,
1156 	.poll_group_process_completions = nvme_pcie_poll_group_process_completions,
1157 	.poll_group_check_disconnected_qpairs = nvme_pcie_poll_group_check_disconnected_qpairs,
1158 	.poll_group_destroy = nvme_pcie_poll_group_destroy,
1159 	.poll_group_get_stats = nvme_pcie_poll_group_get_stats,
1160 	.poll_group_free_stats = nvme_pcie_poll_group_free_stats
1161 };
1162 
1163 SPDK_NVME_TRANSPORT_REGISTER(pcie, &pcie_ops);
1164