xref: /spdk/test/external_code/nvme/nvme.c (revision 186b109dd3a723612e3df79bb3d97699173d39e3)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2021 Intel Corporation.
3  *   All rights reserved.
4  */
5 
6 #include "spdk/mmio.h"
7 #include "spdk/nvme_spec.h"
8 #include "spdk/likely.h"
9 #include "spdk/log.h"
10 #include "spdk/stdinc.h"
11 #include "spdk/util.h"
12 #include "nvme.h"
13 
14 typedef void (*nvme_cmd_cb)(void *ctx, const struct spdk_nvme_cpl *cpl);
15 
16 struct nvme_request {
17 	/* Command identifier and position within qpair's requests array */
18 	uint16_t			cid;
19 	/* NVMe command */
20 	struct spdk_nvme_cmd		cmd;
21 	/* Completion callback */
22 	nvme_cmd_cb			cb_fn;
23 	/* Completion callback's argument */
24 	void				*cb_arg;
25 	TAILQ_ENTRY(nvme_request)	tailq;
26 };
27 
28 struct nvme_qpair {
29 	/* Submission queue */
30 	struct spdk_nvme_cmd		*cmd;
31 	/* Completion queue */
32 	struct spdk_nvme_cpl		*cpl;
33 	/* Physical address of the submission queue */
34 	uint64_t			sq_paddr;
35 	/* Physical address of the completion queue */
36 	uint64_t			cq_paddr;
37 	/* Submission queue tail doorbell */
38 	volatile uint32_t		*sq_tdbl;
39 	/* Completion queue head doorbell */
40 	volatile uint32_t		*cq_hdbl;
41 	/* Submission/completion queues pointers */
42 	uint16_t			sq_head;
43 	uint16_t			sq_tail;
44 	uint16_t			cq_head;
45 	/* Current phase tag value */
46 	uint8_t				phase;
47 	/* NVMe requests queue */
48 	TAILQ_HEAD(, nvme_request)	free_requests;
49 	struct nvme_request		*requests;
50 	/* Size of both queues */
51 	uint32_t			num_entries;
52 };
53 
54 enum nvme_ctrlr_state {
55 	/* Controller has not been initialized yet */
56 	NVME_CTRLR_STATE_INIT,
57 	/* Waiting for CSTS.RDY to transition from 0 to 1 so that CC.EN may be set to 0 */
58 	NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1,
59 	/* Waiting for CSTS.RDY to transition from 1 to 0 so that CC.EN may be set to 1 */
60 	NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0,
61 	/* Enable the controller by writing CC.EN to 1 */
62 	NVME_CTRLR_STATE_ENABLE,
63 	/* Waiting for CSTS.RDY to transition from 0 to 1 after enabling the controller */
64 	NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1,
65 	/* Identify Controller command will be sent to then controller */
66 	NVME_CTRLR_STATE_IDENTIFY,
67 	/* Waiting for Identify Controller command to be completed */
68 	NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY,
69 	/* Controller initialization has completed and the controller is ready */
70 	NVME_CTRLR_STATE_READY,
71 	/*  Controller initialization error */
72 	NVME_CTRLR_STATE_ERROR,
73 };
74 
75 struct nvme_ctrlr {
76 	/* Underlying PCI device */
77 	struct spdk_pci_device			*pci_device;
78 	/* Pointer to the MMIO register space */
79 	volatile struct spdk_nvme_registers	*regs;
80 	/* Stride in uint32_t units between doorbells */
81 	uint32_t				doorbell_stride_u32;
82 	/* Controller's memory page size */
83 	uint32_t				page_size;
84 	/* Admin queue pair */
85 	struct nvme_qpair			*admin_qpair;
86 	/* Controller's identify data */
87 	struct spdk_nvme_ctrlr_data		*cdata;
88 	/* State of the controller */
89 	enum nvme_ctrlr_state			state;
90 	TAILQ_ENTRY(nvme_ctrlr)			tailq;
91 };
92 
93 static struct spdk_pci_id nvme_pci_driver_id[] = {
94 	{
95 		.class_id = SPDK_PCI_CLASS_NVME,
96 		.vendor_id = SPDK_PCI_ANY_ID,
97 		.device_id = SPDK_PCI_ANY_ID,
98 		.subvendor_id = SPDK_PCI_ANY_ID,
99 		.subdevice_id = SPDK_PCI_ANY_ID,
100 	},
101 	{ .vendor_id = 0, /* sentinel */ },
102 };
103 
104 SPDK_PCI_DRIVER_REGISTER(nvme_external, nvme_pci_driver_id, SPDK_PCI_DRIVER_NEED_MAPPING);
105 static TAILQ_HEAD(, nvme_ctrlr) g_nvme_ctrlrs = TAILQ_HEAD_INITIALIZER(g_nvme_ctrlrs);
106 
107 static struct nvme_ctrlr *
108 find_ctrlr_by_addr(struct spdk_pci_addr *addr)
109 {
110 	struct spdk_pci_addr ctrlr_addr;
111 	struct nvme_ctrlr *ctrlr;
112 
113 	TAILQ_FOREACH(ctrlr, &g_nvme_ctrlrs, tailq) {
114 		ctrlr_addr = spdk_pci_device_get_addr(ctrlr->pci_device);
115 		if (spdk_pci_addr_compare(addr, &ctrlr_addr) == 0) {
116 			return ctrlr;
117 		}
118 	}
119 
120 	return NULL;
121 }
122 
123 static volatile void *
124 get_pcie_reg_addr(struct nvme_ctrlr *ctrlr, uint32_t offset)
125 {
126 	return (volatile void *)((uintptr_t)ctrlr->regs + offset);
127 }
128 
129 static void
130 get_pcie_reg_4(struct nvme_ctrlr *ctrlr, uint32_t offset, uint32_t *value)
131 {
132 	assert(offset <= sizeof(struct spdk_nvme_registers) - 4);
133 	*value = spdk_mmio_read_4(get_pcie_reg_addr(ctrlr, offset));
134 }
135 
136 static void
137 get_pcie_reg_8(struct nvme_ctrlr *ctrlr, uint32_t offset, uint64_t *value)
138 {
139 	assert(offset <= sizeof(struct spdk_nvme_registers) - 8);
140 	*value = spdk_mmio_read_8(get_pcie_reg_addr(ctrlr, offset));
141 }
142 
143 static void
144 set_pcie_reg_4(struct nvme_ctrlr *ctrlr, uint32_t offset, uint32_t value)
145 {
146 	assert(offset <= sizeof(struct spdk_nvme_registers) - 4);
147 	spdk_mmio_write_4(get_pcie_reg_addr(ctrlr, offset), value);
148 }
149 
150 static void
151 set_pcie_reg_8(struct nvme_ctrlr *ctrlr, uint32_t offset, uint64_t value)
152 {
153 	assert(offset <= sizeof(struct spdk_nvme_registers) - 8);
154 	spdk_mmio_write_8(get_pcie_reg_addr(ctrlr, offset), value);
155 }
156 
157 static void
158 nvme_ctrlr_get_cap(struct nvme_ctrlr *ctrlr, union spdk_nvme_cap_register *cap)
159 {
160 	get_pcie_reg_8(ctrlr, offsetof(struct spdk_nvme_registers, cap), &cap->raw);
161 }
162 
163 static void
164 nvme_ctrlr_get_cc(struct nvme_ctrlr *ctrlr, union spdk_nvme_cc_register *cc)
165 {
166 	get_pcie_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, cc), &cc->raw);
167 }
168 
169 static void
170 nvme_ctrlr_get_csts(struct nvme_ctrlr *ctrlr, union spdk_nvme_csts_register *csts)
171 {
172 	get_pcie_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, csts), &csts->raw);
173 }
174 
175 static void
176 nvme_ctrlr_set_cc(struct nvme_ctrlr *ctrlr, const union spdk_nvme_cc_register *cc)
177 {
178 	set_pcie_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, cc.raw), cc->raw);
179 }
180 
181 static void
182 nvme_ctrlr_set_asq(struct nvme_ctrlr *ctrlr, uint64_t value)
183 {
184 	set_pcie_reg_8(ctrlr, offsetof(struct spdk_nvme_registers, asq), value);
185 }
186 
187 static void
188 nvme_ctrlr_set_acq(struct nvme_ctrlr *ctrlr, uint64_t value)
189 {
190 	set_pcie_reg_8(ctrlr, offsetof(struct spdk_nvme_registers, acq), value);
191 }
192 
193 static void
194 nvme_ctrlr_set_aqa(struct nvme_ctrlr *ctrlr, const union spdk_nvme_aqa_register *aqa)
195 {
196 	set_pcie_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, aqa.raw), aqa->raw);
197 }
198 
199 static void
200 free_qpair(struct nvme_qpair *qpair)
201 {
202 	spdk_free(qpair->cmd);
203 	spdk_free(qpair->cpl);
204 	free(qpair->requests);
205 	free(qpair);
206 }
207 
208 static struct nvme_qpair *
209 init_qpair(struct nvme_ctrlr *ctrlr, uint16_t id, uint16_t num_entries)
210 {
211 	struct nvme_qpair *qpair;
212 	size_t page_align = sysconf(_SC_PAGESIZE);
213 	size_t queue_align, queue_len;
214 	volatile uint32_t *doorbell_base;
215 	uint16_t i;
216 
217 	qpair = calloc(1, sizeof(*qpair));
218 	if (!qpair) {
219 		SPDK_ERRLOG("Failed to allocate queue pair\n");
220 		return NULL;
221 	}
222 
223 	qpair->phase = 1;
224 	qpair->num_entries = num_entries;
225 	queue_len = num_entries * sizeof(struct spdk_nvme_cmd);
226 	queue_align = spdk_max(spdk_align32pow2(queue_len), page_align);
227 	qpair->cmd = spdk_zmalloc(queue_len, queue_align, NULL,
228 				  SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA);
229 	if (!qpair->cmd) {
230 		SPDK_ERRLOG("Failed to allocate submission queue buffer\n");
231 		free_qpair(qpair);
232 		return NULL;
233 	}
234 
235 	queue_len = num_entries * sizeof(struct spdk_nvme_cpl);
236 	queue_align = spdk_max(spdk_align32pow2(queue_len), page_align);
237 	qpair->cpl = spdk_zmalloc(queue_len, queue_align, NULL,
238 				  SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA);
239 	if (!qpair->cpl) {
240 		SPDK_ERRLOG("Failed to allocate completion queue buffer\n");
241 		free_qpair(qpair);
242 		return NULL;
243 	}
244 
245 	qpair->requests = calloc(num_entries - 1, sizeof(*qpair->requests));
246 	if (!qpair->requests) {
247 		SPDK_ERRLOG("Failed to allocate NVMe request descriptors\n");
248 		free_qpair(qpair);
249 		return NULL;
250 	}
251 
252 	TAILQ_INIT(&qpair->free_requests);
253 	for (i = 0; i < num_entries - 1; ++i) {
254 		qpair->requests[i].cid = i;
255 		TAILQ_INSERT_TAIL(&qpair->free_requests, &qpair->requests[i], tailq);
256 	}
257 
258 	qpair->sq_paddr = spdk_vtophys(qpair->cmd, NULL);
259 	qpair->cq_paddr = spdk_vtophys(qpair->cpl, NULL);
260 	if (qpair->sq_paddr == SPDK_VTOPHYS_ERROR || qpair->cq_paddr == SPDK_VTOPHYS_ERROR) {
261 		SPDK_ERRLOG("Failed to translate the sq/cq virtual address\n");
262 		free_qpair(qpair);
263 		return NULL;
264 	}
265 
266 	doorbell_base = (volatile uint32_t *)&ctrlr->regs->doorbell[0];
267 	qpair->sq_tdbl = doorbell_base + (2 * id + 0) * ctrlr->doorbell_stride_u32;
268 	qpair->cq_hdbl = doorbell_base + (2 * id + 1) * ctrlr->doorbell_stride_u32;
269 
270 	return qpair;
271 }
272 
273 static int
274 pcie_enum_cb(void *ctx, struct spdk_pci_device *pci_dev)
275 {
276 	struct nvme_ctrlr *ctrlr;
277 	TAILQ_HEAD(, nvme_ctrlr) *ctrlrs = ctx;
278 	union spdk_nvme_cap_register cap;
279 	char addr[32] = {};
280 	uint64_t phys_addr, size;
281 	uint16_t cmd_reg;
282 	void *reg_addr;
283 
284 	spdk_pci_addr_fmt(addr, sizeof(addr), &pci_dev->addr);
285 
286 	ctrlr = calloc(1, sizeof(*ctrlr));
287 	if (!ctrlr) {
288 		SPDK_ERRLOG("Failed to allocate NVMe controller: %s\n", addr);
289 		return -1;
290 	}
291 
292 	if (spdk_pci_device_claim(pci_dev)) {
293 		SPDK_ERRLOG("Failed to claim PCI device: %s\n", addr);
294 		free(ctrlr);
295 		return -1;
296 	}
297 
298 	if (spdk_pci_device_map_bar(pci_dev, 0, &reg_addr, &phys_addr, &size)) {
299 		SPDK_ERRLOG("Failed to allocate BAR0 for NVMe controller: %s\n", addr);
300 		spdk_pci_device_unclaim(pci_dev);
301 		free(ctrlr);
302 		return -1;
303 	}
304 
305 	ctrlr->pci_device = pci_dev;
306 	ctrlr->regs = (volatile struct spdk_nvme_registers *)reg_addr;
307 
308 	/* Enable PCI busmaster and disable INTx */
309 	spdk_pci_device_cfg_read16(pci_dev, &cmd_reg, 4);
310 	cmd_reg |= 0x404;
311 	spdk_pci_device_cfg_write16(pci_dev, cmd_reg, 4);
312 
313 	nvme_ctrlr_get_cap(ctrlr, &cap);
314 	ctrlr->page_size = 1 << (12 + cap.bits.mpsmin);
315 	ctrlr->doorbell_stride_u32 = 1 << cap.bits.dstrd;
316 
317 	ctrlr->cdata = spdk_zmalloc(sizeof(*ctrlr->cdata), ctrlr->page_size, NULL,
318 				    SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA);
319 	if (!ctrlr->cdata) {
320 		SPDK_ERRLOG("Failed to allocate identify data for NVMe controller: %s\n", addr);
321 		spdk_pci_device_unclaim(pci_dev);
322 		free(ctrlr);
323 		return -1;
324 	}
325 
326 	/* Initialize admin queue pair with minimum number of entries (2) */
327 	ctrlr->admin_qpair = init_qpair(ctrlr, 0, SPDK_NVME_ADMIN_QUEUE_MIN_ENTRIES);
328 	if (!ctrlr->admin_qpair) {
329 		SPDK_ERRLOG("Failed to initialize admin queue pair for controller: %s\n", addr);
330 		spdk_pci_device_unclaim(pci_dev);
331 		spdk_free(ctrlr->cdata);
332 		free(ctrlr);
333 		return -1;
334 	}
335 
336 	TAILQ_INSERT_TAIL(ctrlrs, ctrlr, tailq);
337 
338 	return 0;
339 }
340 
341 static struct nvme_request *
342 allocate_request(struct nvme_qpair *qpair)
343 {
344 	struct nvme_request *request;
345 
346 	if ((qpair->sq_tail + 1) % qpair->num_entries == qpair->sq_head) {
347 		return NULL;
348 	}
349 
350 	request = TAILQ_FIRST(&qpair->free_requests);
351 	assert(request != NULL);
352 	TAILQ_REMOVE(&qpair->free_requests, request, tailq);
353 	memset(&request->cmd, 0, sizeof(request->cmd));
354 
355 	return request;
356 }
357 
358 static void
359 submit_request(struct nvme_qpair *qpair, struct nvme_request *request)
360 {
361 	qpair->cmd[qpair->sq_tail] = request->cmd;
362 
363 	if (spdk_unlikely(++qpair->sq_tail == qpair->num_entries)) {
364 		qpair->sq_tail = 0;
365 	}
366 
367 	spdk_wmb();
368 	spdk_mmio_write_4(qpair->sq_tdbl, qpair->sq_tail);
369 }
370 
371 static void
372 identify_ctrlr_done(void *ctx, const struct spdk_nvme_cpl *cpl)
373 {
374 	struct nvme_ctrlr *ctrlr = ctx;
375 
376 	if (spdk_nvme_cpl_is_error(cpl)) {
377 		SPDK_ERRLOG("Identify Controller command failed\n");
378 		ctrlr->state = NVME_CTRLR_STATE_ERROR;
379 		return;
380 	}
381 
382 	ctrlr->state = NVME_CTRLR_STATE_READY;
383 }
384 
385 static int
386 identify_ctrlr(struct nvme_ctrlr *ctrlr)
387 {
388 	struct nvme_request *request;
389 	struct spdk_nvme_cmd *cmd;
390 	uint64_t prp1;
391 
392 	/* We're only filling a single PRP entry, so the address needs to be page aligned */
393 	assert(((uintptr_t)ctrlr->cdata & (ctrlr->page_size - 1)) == 0);
394 	prp1 = spdk_vtophys(ctrlr->cdata, NULL);
395 	if (prp1 == SPDK_VTOPHYS_ERROR) {
396 		return -EFAULT;
397 	}
398 
399 	request = allocate_request(ctrlr->admin_qpair);
400 	if (!request) {
401 		return -EAGAIN;
402 	}
403 
404 	request->cb_fn = identify_ctrlr_done;
405 	request->cb_arg = ctrlr;
406 
407 	cmd = &request->cmd;
408 	cmd->cid = request->cid;
409 	cmd->opc = SPDK_NVME_OPC_IDENTIFY;
410 	cmd->dptr.prp.prp1 = prp1;
411 	cmd->cdw10_bits.identify.cns = SPDK_NVME_IDENTIFY_CTRLR;
412 	cmd->cdw10_bits.identify.cntid = 0;
413 	cmd->cdw11_bits.identify.csi = 0;
414 	cmd->nsid = 0;
415 
416 	submit_request(ctrlr->admin_qpair, request);
417 
418 	return 0;
419 }
420 
421 static int32_t
422 process_completions(struct nvme_qpair *qpair)
423 {
424 	struct spdk_nvme_cpl *cpl;
425 	struct nvme_request *request;
426 	int32_t max_completions, num_completions = 0;
427 
428 	max_completions = qpair->num_entries - 1;
429 	while (1) {
430 		cpl = &qpair->cpl[qpair->cq_head];
431 
432 		if (cpl->status.p != qpair->phase) {
433 			break;
434 		}
435 
436 		if (spdk_unlikely(++qpair->cq_head == qpair->num_entries)) {
437 			qpair->cq_head = 0;
438 			qpair->phase = !qpair->phase;
439 		}
440 
441 		qpair->sq_head = cpl->sqhd;
442 		request = &qpair->requests[cpl->cid];
443 		request->cb_fn(request->cb_arg, cpl);
444 		TAILQ_INSERT_TAIL(&qpair->free_requests, request, tailq);
445 
446 		if (++num_completions == max_completions) {
447 			break;
448 		}
449 	}
450 
451 	if (num_completions > 0) {
452 		spdk_mmio_write_4(qpair->cq_hdbl, qpair->cq_head);
453 	}
454 
455 	return num_completions;
456 }
457 
458 static int
459 process_ctrlr_init(struct nvme_ctrlr *ctrlr)
460 {
461 	union spdk_nvme_cc_register cc;
462 	union spdk_nvme_csts_register csts;
463 	union spdk_nvme_aqa_register aqa;
464 	int rc = 0;
465 
466 	if (ctrlr->state == NVME_CTRLR_STATE_READY) {
467 		return 0;
468 	}
469 
470 	nvme_ctrlr_get_cc(ctrlr, &cc);
471 	nvme_ctrlr_get_csts(ctrlr, &csts);
472 
473 	switch (ctrlr->state) {
474 	case NVME_CTRLR_STATE_INIT:
475 		if (cc.bits.en) {
476 			if (csts.bits.rdy == 0) {
477 				ctrlr->state = NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1;
478 				break;
479 			}
480 
481 			cc.bits.en = 0;
482 			nvme_ctrlr_set_cc(ctrlr, &cc);
483 		}
484 		ctrlr->state = NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0;
485 		break;
486 	case NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1:
487 		if (csts.bits.rdy || csts.bits.cfs) {
488 			cc.bits.en = 0;
489 			nvme_ctrlr_set_cc(ctrlr, &cc);
490 			ctrlr->state = NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0;
491 		}
492 		break;
493 	case NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0:
494 		if (csts.bits.rdy == 0) {
495 			ctrlr->state = NVME_CTRLR_STATE_ENABLE;
496 		}
497 		break;
498 	case NVME_CTRLR_STATE_ENABLE:
499 		nvme_ctrlr_set_asq(ctrlr, ctrlr->admin_qpair->sq_paddr);
500 		nvme_ctrlr_set_acq(ctrlr, ctrlr->admin_qpair->cq_paddr);
501 
502 		aqa.raw = 0;
503 		aqa.bits.asqs = ctrlr->admin_qpair->num_entries - 1;
504 		aqa.bits.acqs = ctrlr->admin_qpair->num_entries - 1;
505 		nvme_ctrlr_set_aqa(ctrlr, &aqa);
506 
507 		cc.bits.en = 1;
508 		cc.bits.iocqes = 4;
509 		cc.bits.iosqes = 6;
510 		nvme_ctrlr_set_cc(ctrlr, &cc);
511 		ctrlr->state = NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1;
512 		break;
513 	case NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1:
514 		if (csts.bits.rdy) {
515 			ctrlr->state = NVME_CTRLR_STATE_IDENTIFY;
516 		}
517 		break;
518 	case NVME_CTRLR_STATE_IDENTIFY:
519 		ctrlr->state = NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY;
520 		rc = identify_ctrlr(ctrlr);
521 		break;
522 	case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY:
523 		process_completions(ctrlr->admin_qpair);
524 		break;
525 	case NVME_CTRLR_STATE_ERROR:
526 		rc = -1;
527 		break;
528 	default:
529 		assert(0 && "should never get here");
530 		return -1;
531 	}
532 
533 	return rc;
534 }
535 
536 static void
537 free_ctrlr(struct nvme_ctrlr *ctrlr)
538 {
539 	spdk_pci_device_unmap_bar(ctrlr->pci_device, 0, (void *)ctrlr->regs);
540 	spdk_pci_device_unclaim(ctrlr->pci_device);
541 	spdk_pci_device_detach(ctrlr->pci_device);
542 	free_qpair(ctrlr->admin_qpair);
543 	spdk_free(ctrlr->cdata);
544 	free(ctrlr);
545 }
546 
547 static int
548 probe_internal(struct spdk_pci_addr *addr, nvme_attach_cb attach_cb, void *cb_ctx)
549 {
550 	struct nvme_ctrlr *ctrlr, *tmp;
551 	TAILQ_HEAD(, nvme_ctrlr) ctrlrs = TAILQ_HEAD_INITIALIZER(ctrlrs);
552 	int rc;
553 
554 	if (addr == NULL) {
555 		rc = spdk_pci_enumerate(spdk_pci_get_driver("nvme_external"),
556 					pcie_enum_cb, &ctrlrs);
557 	} else {
558 		rc = spdk_pci_device_attach(spdk_pci_get_driver("nvme_external"),
559 					    pcie_enum_cb, &ctrlrs, addr);
560 	}
561 
562 	if (rc != 0) {
563 		SPDK_ERRLOG("Failed to enumerate PCI devices\n");
564 		while (!TAILQ_EMPTY(&ctrlrs)) {
565 			ctrlr = TAILQ_FIRST(&ctrlrs);
566 			TAILQ_REMOVE(&ctrlrs, ctrlr, tailq);
567 			free_ctrlr(ctrlr);
568 		}
569 
570 		return rc;
571 	}
572 
573 	while (!TAILQ_EMPTY(&ctrlrs)) {
574 		TAILQ_FOREACH_SAFE(ctrlr, &ctrlrs, tailq, tmp) {
575 			rc = process_ctrlr_init(ctrlr);
576 			if (rc != 0) {
577 				SPDK_ERRLOG("NVMe controller initialization failed\n");
578 				TAILQ_REMOVE(&ctrlrs, ctrlr, tailq);
579 				free_ctrlr(ctrlr);
580 				continue;
581 			}
582 
583 			if (ctrlr->state == NVME_CTRLR_STATE_READY) {
584 				TAILQ_REMOVE(&ctrlrs, ctrlr, tailq);
585 				TAILQ_INSERT_TAIL(&g_nvme_ctrlrs, ctrlr, tailq);
586 
587 				if (attach_cb != NULL) {
588 					attach_cb(cb_ctx, &ctrlr->pci_device->addr, ctrlr);
589 				}
590 			}
591 		}
592 	}
593 
594 	return 0;
595 }
596 
597 int
598 nvme_probe(nvme_attach_cb attach_cb, void *cb_ctx)
599 {
600 	return probe_internal(NULL, attach_cb, cb_ctx);
601 }
602 
603 struct nvme_ctrlr *
604 nvme_connect(struct spdk_pci_addr *addr)
605 {
606 	int rc;
607 
608 	rc = probe_internal(addr, NULL, NULL);
609 	if (rc != 0) {
610 		return NULL;
611 	}
612 
613 	return find_ctrlr_by_addr(addr);
614 }
615 
616 void
617 nvme_detach(struct nvme_ctrlr *ctrlr)
618 {
619 	TAILQ_REMOVE(&g_nvme_ctrlrs, ctrlr, tailq);
620 	free_ctrlr(ctrlr);
621 }
622 
623 const struct spdk_nvme_ctrlr_data *
624 nvme_ctrlr_get_data(struct nvme_ctrlr *ctrlr)
625 {
626 	return ctrlr->cdata;
627 }
628 
629 SPDK_LOG_REGISTER_COMPONENT(nvme_external)
630