xref: /openbsd-src/sys/dev/ic/nvme.c (revision 50b7afb2c2c0993b0894d4e34bf857cb13ed9c80)
1 /*	$OpenBSD: nvme.c,v 1.6 2014/07/13 23:10:23 deraadt Exp $ */
2 
3 /*
4  * Copyright (c) 2014 David Gwynne <dlg@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <sys/param.h>
20 #include <sys/systm.h>
21 #include <sys/buf.h>
22 #include <sys/kernel.h>
23 #include <sys/malloc.h>
24 #include <sys/device.h>
25 #include <sys/queue.h>
26 #include <sys/mutex.h>
27 #include <sys/pool.h>
28 
29 #include <machine/bus.h>
30 
31 #include <scsi/scsi_all.h>
32 #include <scsi/scsiconf.h>
33 
34 #include <dev/ic/nvmereg.h>
35 #include <dev/ic/nvmevar.h>
36 
37 struct cfdriver nvme_cd = {
38 	NULL,
39 	"nvme",
40 	DV_DULL
41 };
42 
43 int			nvme_ready(struct nvme_softc *, u_int32_t);
44 int			nvme_enable(struct nvme_softc *, u_int);
45 int			nvme_disable(struct nvme_softc *);
46 
47 void			nvme_version(struct nvme_softc *, u_int32_t);
48 void			nvme_dumpregs(struct nvme_softc *);
49 int			nvme_identify(struct nvme_softc *, u_int);
50 void			nvme_fill_identify(struct nvme_softc *,
51 			    struct nvme_ccb *, void *);
52 
53 int			nvme_ccbs_alloc(struct nvme_softc *, u_int);
54 void			nvme_ccbs_free(struct nvme_softc *);
55 
56 void *			nvme_ccb_get(void *);
57 void			nvme_ccb_put(void *, void *);
58 
59 int			nvme_poll(struct nvme_softc *, struct nvme_queue *,
60 			    struct nvme_ccb *,
61 			    void (*fill)(struct nvme_softc *,
62 			     struct nvme_ccb *, void *));
63 void			nvme_poll_fill(struct nvme_softc *,
64 			    struct nvme_ccb *, void *);
65 void			nvme_poll_done(struct nvme_softc *,
66 			    struct nvme_ccb *, struct nvme_cqe *);
67 void			nvme_empty_done(struct nvme_softc *,
68 			    struct nvme_ccb *, struct nvme_cqe *);
69 
70 struct nvme_queue *	nvme_q_alloc(struct nvme_softc *,
71 			    u_int, u_int, u_int);
72 void			nvme_q_submit(struct nvme_softc *,
73 			    struct nvme_queue *, struct nvme_ccb *,
74 			    void (*)(struct nvme_softc *,
75 			     struct nvme_ccb *, void *));
76 int			nvme_q_complete(struct nvme_softc *,
77 			    struct nvme_queue *q);
78 void			nvme_q_free(struct nvme_softc *,
79 			    struct nvme_queue *);
80 
81 struct nvme_dmamem *	nvme_dmamem_alloc(struct nvme_softc *, size_t);
82 void			nvme_dmamem_free(struct nvme_softc *,
83 			    struct nvme_dmamem *);
84 
85 #define nvme_read4(_s, _r) \
86 	bus_space_read_4((_s)->sc_iot, (_s)->sc_ioh, (_r))
87 #define nvme_write4(_s, _r, _v) \
88 	bus_space_write_4((_s)->sc_iot, (_s)->sc_ioh, (_r), (_v))
89 #ifdef __LP64__
90 #define nvme_read8(_s, _r) \
91 	bus_space_read_8((_s)->sc_iot, (_s)->sc_ioh, (_r))
92 #define nvme_write8(_s, _r, _v) \
93 	bus_space_write_8((_s)->sc_iot, (_s)->sc_ioh, (_r), (_v))
94 #else /* __LP64__ */
95 static inline u_int64_t
96 nvme_read8(struct nvme_softc *sc, bus_size_t r)
97 {
98 	u_int64_t v;
99 	u_int32_t *a = (u_int32_t *)&v;
100 
101 #if _BYTE_ORDER == _LITTLE_ENDIAN
102 	a[0] = nvme_read4(sc, r);
103 	a[1] = nvme_read4(sc, r + 4);
104 #else /* _BYTE_ORDER == _LITTLE_ENDIAN */
105 	a[1] = nvme_read4(sc, r);
106 	a[0] = nvme_read4(sc, r + 4);
107 #endif
108 
109 	return (v);
110 }
111 
112 static inline void
113 nvme_write8(struct nvme_softc *sc, bus_size_t r, u_int64_t v)
114 {
115 	u_int32_t *a = (u_int32_t *)&v;
116 
117 #if _BYTE_ORDER == _LITTLE_ENDIAN
118 	nvme_write4(sc, r, a[0]);
119 	nvme_write4(sc, r + 4, a[1]);
120 #else /* _BYTE_ORDER == _LITTLE_ENDIAN */
121 	nvme_write4(sc, r, a[1]);
122 	nvme_write4(sc, r + 4, a[0]);
123 #endif
124 }
125 #endif /* __LP64__ */
126 #define nvme_barrier(_s, _r, _l, _f) \
127 	bus_space_barrier((_s)->sc_iot, (_s)->sc_ioh, (_r), (_l), (_f))
128 
129 void
130 nvme_version(struct nvme_softc *sc, u_int32_t version)
131 {
132 	u_int16_t minor;
133 
134 	minor = NVME_VS_MNR(version);
135 	minor = ((minor >> 8) * 10) + (minor & 0xff);
136 	printf(", NVME %d.%d", NVME_VS_MJR(version), minor);
137 }
138 
139 void
140 nvme_dumpregs(struct nvme_softc *sc)
141 {
142 	u_int64_t r8;
143 	u_int32_t r4;
144 
145 	r8 = nvme_read8(sc, NVME_CAP);
146 	printf("%s: cap  0x%016llx\n", DEVNAME(sc), nvme_read8(sc, NVME_CAP));
147 	printf("%s:  mpsmax %u (%u)\n", DEVNAME(sc),
148 	    (u_int)NVME_CAP_MPSMAX(r8), (1 << NVME_CAP_MPSMAX(r8)));
149 	printf("%s:  mpsmin %u (%u)\n", DEVNAME(sc),
150 	    (u_int)NVME_CAP_MPSMIN(r8), (1 << NVME_CAP_MPSMIN(r8)));
151 	printf("%s:  css %llu\n", DEVNAME(sc), NVME_CAP_CSS(r8));
152 	printf("%s:  nssrs %llu\n", DEVNAME(sc), NVME_CAP_NSSRS(r8));
153 	printf("%s:  dstrd %llu\n", DEVNAME(sc), NVME_CAP_DSTRD(r8));
154 	printf("%s:  to %llu msec\n", DEVNAME(sc), NVME_CAP_TO(r8));
155 	printf("%s:  ams %llu\n", DEVNAME(sc), NVME_CAP_AMS(r8));
156 	printf("%s:  cqr %llu\n", DEVNAME(sc), NVME_CAP_CQR(r8));
157 	printf("%s:  mqes %llu\n", DEVNAME(sc), NVME_CAP_MQES(r8));
158 
159 	printf("%s: vs   0x%08lx\n", DEVNAME(sc), nvme_read4(sc, NVME_VS));
160 
161 	r4 = nvme_read4(sc, NVME_CC);
162 	printf("%s: cc   0x%08lx\n", DEVNAME(sc), r4);
163 	printf("%s:  iocqes %u\n", DEVNAME(sc), NVME_CC_IOCQES_R(r4));
164 	printf("%s:  iosqes %u\n", DEVNAME(sc), NVME_CC_IOSQES_R(r4));
165 	printf("%s:  shn %u\n", DEVNAME(sc), NVME_CC_SHN_R(r4));
166 	printf("%s:  ams %u\n", DEVNAME(sc), NVME_CC_AMS_R(r4));
167 	printf("%s:  mps %u\n", DEVNAME(sc), NVME_CC_MPS_R(r4));
168 	printf("%s:  css %u\n", DEVNAME(sc), NVME_CC_CSS_R(r4));
169 	printf("%s:  en %u\n", DEVNAME(sc), ISSET(r4, NVME_CC_EN));
170 
171 	printf("%s: csts 0x%08lx\n", DEVNAME(sc), nvme_read4(sc, NVME_CSTS));
172 	printf("%s: aqa  0x%08lx\n", DEVNAME(sc), nvme_read4(sc, NVME_AQA));
173 	printf("%s: asq  0x%016llx\n", DEVNAME(sc), nvme_read8(sc, NVME_ASQ));
174 	printf("%s: acq  0x%016llx\n", DEVNAME(sc), nvme_read8(sc, NVME_ACQ));
175 }
176 
177 int
178 nvme_ready(struct nvme_softc *sc, u_int32_t rdy)
179 {
180 	u_int i = 0;
181 
182 	while ((nvme_read4(sc, NVME_CSTS) & NVME_CSTS_RDY) != rdy) {
183 		if (i++ > sc->sc_rdy_to)
184 			return (1);
185 
186 		delay(1000);
187 		nvme_barrier(sc, NVME_CSTS, 4, BUS_SPACE_BARRIER_READ);
188 	}
189 
190 	return (0);
191 }
192 
193 int
194 nvme_enable(struct nvme_softc *sc, u_int mps)
195 {
196 	u_int32_t cc;
197 
198 	cc = nvme_read4(sc, NVME_CC);
199 	if (ISSET(cc, NVME_CC_EN))
200 		return (nvme_ready(sc, NVME_CSTS_RDY));
201 
202 	nvme_write8(sc, NVME_ASQ, NVME_DMA_DVA(sc->sc_admin_q->q_sq_dmamem));
203 	nvme_barrier(sc, 0, sc->sc_ios, BUS_SPACE_BARRIER_WRITE);
204 	nvme_write8(sc, NVME_ACQ, NVME_DMA_DVA(sc->sc_admin_q->q_cq_dmamem));
205 	nvme_barrier(sc, 0, sc->sc_ios, BUS_SPACE_BARRIER_WRITE);
206 
207 	nvme_write4(sc, NVME_AQA, NVME_AQA_ACQS(sc->sc_admin_q->q_entries) |
208 	    NVME_AQA_ASQS(sc->sc_admin_q->q_entries));
209 	nvme_barrier(sc, 0, sc->sc_ios, BUS_SPACE_BARRIER_WRITE);
210 
211 	CLR(cc, NVME_CC_IOCQES_MASK | NVME_CC_IOSQES_MASK | NVME_CC_SHN_MASK |
212 	    NVME_CC_AMS_MASK | NVME_CC_MPS_MASK | NVME_CC_CSS_MASK);
213 	SET(cc, NVME_CC_IOSQES(ffs(64) - 1) | NVME_CC_IOCQES(ffs(16) - 1));
214 	SET(cc, NVME_CC_SHN(NVME_CC_SHN_NONE));
215 	SET(cc, NVME_CC_CSS(NVME_CC_CSS_NVM));
216 	SET(cc, NVME_CC_AMS(NVME_CC_AMS_RR));
217 	SET(cc, NVME_CC_MPS(mps));
218 	SET(cc, NVME_CC_EN);
219 
220 	nvme_write4(sc, NVME_CC, cc);
221 	nvme_barrier(sc, 0, sc->sc_ios,
222 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
223 
224 	return (0);
225 }
226 
227 int
228 nvme_disable(struct nvme_softc *sc)
229 {
230 	u_int32_t cc, csts;
231 
232 	cc = nvme_read4(sc, NVME_CC);
233 	if (ISSET(cc, NVME_CC_EN)) {
234 		csts = nvme_read4(sc, NVME_CSTS);
235 		if (!ISSET(csts, NVME_CSTS_CFS) &&
236 		    nvme_ready(sc, NVME_CSTS_RDY) != 0)
237 			return (1);
238 	}
239 
240 	CLR(cc, NVME_CC_EN);
241 
242 	nvme_write4(sc, NVME_CC, cc);
243 	nvme_barrier(sc, 0, sc->sc_ios,
244 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
245 
246 	return (nvme_ready(sc, 0));
247 }
248 
249 int
250 nvme_attach(struct nvme_softc *sc)
251 {
252 	u_int64_t cap;
253 	u_int32_t reg;
254 	u_int dstrd;
255 	u_int mps = PAGE_SHIFT;
256 
257 	reg = nvme_read4(sc, NVME_VS);
258 	if (reg == 0xffffffff) {
259 		printf(", invalid mapping\n");
260 		return (1);
261 	}
262 
263 	nvme_version(sc, reg);
264 	printf("\n");
265 
266 	if (nvme_disable(sc) != 0) {
267 		printf("%s: unable to disable controller\n", DEVNAME(sc));
268 		return (1);
269 	}
270 
271 	cap = nvme_read8(sc, NVME_CAP);
272 	dstrd = NVME_CAP_DSTRD(cap);
273 	if (NVME_CAP_MPSMIN(cap) > mps)
274 		mps = NVME_CAP_MPSMIN(cap);
275 	else if (NVME_CAP_MPSMAX(cap) < mps)
276 		mps = NVME_CAP_MPSMAX(cap);
277 
278 	sc->sc_rdy_to = NVME_CAP_TO(cap);
279 	sc->sc_mps = 1 << mps;
280 	sc->sc_mdts = MAXPHYS;
281 	sc->sc_max_sgl = 2;
282 	mtx_init(&sc->sc_ccb_mtx, IPL_BIO);
283 	SIMPLEQ_INIT(&sc->sc_ccb_list);
284 	scsi_iopool_init(&sc->sc_iopool, sc, nvme_ccb_get, nvme_ccb_put);
285 
286 	sc->sc_admin_q = nvme_q_alloc(sc, NVME_ADMIN_Q, 128, dstrd);
287 	if (sc->sc_admin_q == NULL) {
288 		printf("%s: unable to allocate admin queue\n", DEVNAME(sc));
289 		return (1);
290 	}
291 
292 	if (nvme_ccbs_alloc(sc, 16) != 0) {
293 		printf("%s: unable to allocate initial ccbs\n", DEVNAME(sc));
294 		goto free_admin_q;
295 	}
296 
297 	if (nvme_enable(sc, mps) != 0) {
298 		printf("%s: unable to enable controller\n", DEVNAME(sc));
299 		goto free_ccbs;
300 	}
301 
302 	if (nvme_identify(sc, NVME_CAP_MPSMIN(cap)) != 0) {
303 		printf("%s: unable to identify controller\n", DEVNAME(sc));
304 		goto disable;
305 	}
306 
307 	return (0);
308 
309 disable:
310 	nvme_disable(sc);
311 free_ccbs:
312 	nvme_ccbs_free(sc);
313 free_admin_q:
314 	nvme_q_free(sc, sc->sc_admin_q);
315 
316 	return (1);
317 }
318 
319 void
320 nvme_q_submit(struct nvme_softc *sc, struct nvme_queue *q, struct nvme_ccb *ccb,
321     void (*fill)(struct nvme_softc *, struct nvme_ccb *, void *))
322 {
323 	struct nvme_sqe *sqe = NVME_DMA_KVA(q->q_sq_dmamem);
324 	u_int32_t tail;
325 
326 	mtx_enter(&q->q_sq_mtx);
327 	tail = q->q_sq_tail;
328 	if (++q->q_sq_tail >= q->q_entries)
329 		q->q_sq_tail = 0;
330 
331 	sqe += tail;
332 
333 	bus_dmamap_sync(sc->sc_dmat, NVME_DMA_MAP(q->q_sq_dmamem),
334 	    sizeof(*sqe) * tail, sizeof(*sqe), BUS_DMASYNC_POSTWRITE);
335 	memset(sqe, 0, sizeof(*sqe));
336 	sqe->cid = ccb->ccb_id;
337 	(*fill)(sc, ccb, sqe);
338 	bus_dmamap_sync(sc->sc_dmat, NVME_DMA_MAP(q->q_sq_dmamem),
339 	    sizeof(*sqe) * tail, sizeof(*sqe), BUS_DMASYNC_PREWRITE);
340 
341 	nvme_write4(sc, q->q_sqtdbl, q->q_sq_tail);
342 	mtx_leave(&q->q_sq_mtx);
343 }
344 
345 struct nvme_poll_state {
346 	struct nvme_sqe s;
347 	struct nvme_cqe c;
348 };
349 
350 int
351 nvme_poll(struct nvme_softc *sc, struct nvme_queue *q, struct nvme_ccb *ccb,
352     void (*fill)(struct nvme_softc *, struct nvme_ccb *, void *))
353 {
354 	struct nvme_poll_state state;
355 	void (*done)(struct nvme_softc *, struct nvme_ccb *, struct nvme_cqe *);
356 	void *cookie;
357 	u_int16_t flags;
358 
359 	memset(&state, 0, sizeof(state));
360 	state.s.cid = ccb->ccb_id;
361 	(*fill)(sc, ccb, &state.s);
362 
363 	done = ccb->ccb_done;
364 	cookie = ccb->ccb_cookie;
365 
366 	ccb->ccb_done = nvme_poll_done;
367 	ccb->ccb_cookie = &state;
368 
369 	nvme_q_submit(sc, q, ccb, nvme_poll_fill);
370 	while (!ISSET(state.c.flags, htole16(NVME_CQE_PHASE))) {
371 		if (nvme_intr(sc) == 0)
372 			delay(10);
373 
374 		/* XXX no timeout? */
375 	}
376 
377 	ccb->ccb_cookie = cookie;
378 	done(sc, ccb, &state.c);
379 
380 	flags = lemtoh16(&state.c.flags);
381 
382 	return (NVME_CQE_SCT(flags) | NVME_CQE_SC(flags));
383 }
384 
385 void
386 nvme_poll_fill(struct nvme_softc *sc, struct nvme_ccb *ccb, void *slot)
387 {
388 	struct nvme_sqe *sqe = slot;
389 	struct nvme_poll_state *state = ccb->ccb_cookie;
390 
391 	*sqe = state->s;
392 }
393 
394 void
395 nvme_poll_done(struct nvme_softc *sc, struct nvme_ccb *ccb,
396     struct nvme_cqe *cqe)
397 {
398 	struct nvme_poll_state *state = ccb->ccb_cookie;
399 
400 	SET(cqe->flags, htole16(NVME_CQE_PHASE));
401 	state->c = *cqe;
402 }
403 
404 void
405 nvme_empty_done(struct nvme_softc *sc, struct nvme_ccb *ccb,
406     struct nvme_cqe *cqe)
407 {
408 }
409 
410 int
411 nvme_q_complete(struct nvme_softc *sc, struct nvme_queue *q)
412 {
413 	struct nvme_ccb *ccb;
414 	struct nvme_cqe *ring = NVME_DMA_KVA(q->q_cq_dmamem), *cqe;
415 	u_int32_t head;
416 	u_int16_t flags;
417 	int rv = 0;
418 
419 	if (!mtx_enter_try(&q->q_cq_mtx))
420 		return (-1);
421 
422 	head = q->q_cq_head;
423 	for (;;) {
424 		cqe = &ring[head];
425 		flags = lemtoh16(&cqe->flags);
426 		if ((flags & NVME_CQE_PHASE) != q->q_cq_phase)
427 			break;
428 
429 		ccb = &sc->sc_ccbs[cqe->cid];
430 		ccb->ccb_done(sc, ccb, cqe);
431 
432 		if (++head >= q->q_entries) {
433 			head = 0;
434 			q->q_cq_phase ^= NVME_CQE_PHASE;
435 		}
436 
437 		rv = 1;
438 	}
439 
440 	if (rv)
441 		nvme_write4(sc, q->q_sqtdbl, q->q_cq_head = head);
442 	mtx_leave(&q->q_cq_mtx);
443 
444 	return (rv);
445 }
446 
447 int
448 nvme_identify(struct nvme_softc *sc, u_int mps)
449 {
450 	char sn[41], mn[81], fr[17];
451 	struct nvm_identify_controller *identify;
452 	struct nvme_dmamem *mem;
453 	struct nvme_ccb *ccb;
454 	u_int mdts;
455 	int rv = 1;
456 
457 	ccb = nvme_ccb_get(sc);
458 	if (ccb == NULL)
459 		panic("nvme_identify: nvme_ccb_get returned NULL");
460 
461 	mem = nvme_dmamem_alloc(sc, sizeof(*identify));
462 	if (mem == NULL)
463 		return (1);
464 
465 	identify = NVME_DMA_KVA(mem);
466 
467 	ccb->ccb_done = nvme_empty_done;
468 	ccb->ccb_cookie = mem;
469 
470 	bus_dmamap_sync(sc->sc_dmat, NVME_DMA_MAP(mem),
471 	    0, sizeof(*identify), BUS_DMASYNC_PREREAD);
472 	rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_fill_identify);
473 	bus_dmamap_sync(sc->sc_dmat, NVME_DMA_MAP(mem),
474 	    0, sizeof(*identify), BUS_DMASYNC_POSTREAD);
475 
476 	if (rv != 0)
477 		goto done;
478 
479 	scsi_strvis(sn, identify->sn, sizeof(identify->sn));
480 	scsi_strvis(mn, identify->mn, sizeof(identify->mn));
481 	scsi_strvis(fr, identify->fr, sizeof(identify->fr));
482 
483 	printf("%s: %s, firmware %s, serial %s\n", DEVNAME(sc), mn, fr, sn);
484 
485 	if (identify->mdts > 0) {
486 		mdts = (1 << identify->mdts) * (1 << mps);
487 		if (mdts < sc->sc_mdts)
488 			sc->sc_mdts = mdts;
489 	}
490 
491 done:
492 	nvme_dmamem_free(sc, mem);
493 
494 	return (rv);
495 }
496 
497 void
498 nvme_fill_identify(struct nvme_softc *sc, struct nvme_ccb *ccb, void *slot)
499 {
500 	struct nvme_sqe *sqe = slot;
501 	struct nvme_dmamem *mem = ccb->ccb_cookie;
502 
503 	sqe->opcode = NVM_ADMIN_IDENTIFY;
504 	htolem64(&sqe->entry.prp[0], NVME_DMA_DVA(mem));
505 	htolem32(&sqe->cdw10, 1);
506 }
507 
508 int
509 nvme_ccbs_alloc(struct nvme_softc *sc, u_int nccbs)
510 {
511 	struct nvme_ccb *ccb;
512 	u_int i;
513 
514 	sc->sc_ccbs = mallocarray(nccbs, sizeof(*ccb), M_DEVBUF,
515 	    M_WAITOK | M_CANFAIL);
516 	if (sc->sc_ccbs == NULL)
517 		return (1);
518 
519 	for (i = 0; i < nccbs; i++) {
520 		ccb = &sc->sc_ccbs[i];
521 
522 		if (bus_dmamap_create(sc->sc_dmat, sc->sc_mdts, sc->sc_max_sgl,
523 		    sc->sc_mdts, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
524 		    &ccb->ccb_dmamap) != 0)
525 			goto free_maps;
526 
527 		ccb->ccb_id = i;
528 		SIMPLEQ_INSERT_TAIL(&sc->sc_ccb_list, ccb, ccb_entry);
529 	}
530 
531 	return (0);
532 
533 free_maps:
534 	nvme_ccbs_free(sc);
535 	return (1);
536 }
537 
538 void *
539 nvme_ccb_get(void *cookie)
540 {
541 	struct nvme_softc *sc = cookie;
542 	struct nvme_ccb *ccb;
543 
544 	mtx_enter(&sc->sc_ccb_mtx);
545 	ccb = SIMPLEQ_FIRST(&sc->sc_ccb_list);
546 	if (ccb != NULL)
547 		SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_list, ccb_entry);
548 	mtx_leave(&sc->sc_ccb_mtx);
549 
550 	return (ccb);
551 }
552 
553 void
554 nvme_ccb_put(void *cookie, void *io)
555 {
556 	struct nvme_softc *sc = cookie;
557 	struct nvme_ccb *ccb = io;
558 
559 	mtx_enter(&sc->sc_ccb_mtx);
560 	SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_list, ccb, ccb_entry);
561 	mtx_leave(&sc->sc_ccb_mtx);
562 }
563 
564 void
565 nvme_ccbs_free(struct nvme_softc *sc)
566 {
567 	struct nvme_ccb *ccb;
568 
569 	while ((ccb = SIMPLEQ_FIRST(&sc->sc_ccb_list)) != NULL) {
570 		SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_list, ccb_entry);
571 		bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
572 	}
573 
574 	free(sc->sc_ccbs, M_DEVBUF, 0);
575 }
576 
577 struct nvme_queue *
578 nvme_q_alloc(struct nvme_softc *sc, u_int idx, u_int entries, u_int dstrd)
579 {
580 	struct nvme_queue *q;
581 
582 	q = malloc(sizeof(*q), M_DEVBUF, M_WAITOK | M_CANFAIL);
583 	if (q == NULL)
584 		return (NULL);
585 
586 	q->q_sq_dmamem = nvme_dmamem_alloc(sc,
587 	    sizeof(struct nvme_sqe *) * entries);
588 	if (q->q_sq_dmamem == NULL)
589 		goto free;
590 
591 	q->q_cq_dmamem = nvme_dmamem_alloc(sc,
592 	    sizeof(struct nvme_cqe *) * entries);
593 	if (q->q_sq_dmamem == NULL)
594 		goto free_sq;
595 
596 	memset(NVME_DMA_KVA(q->q_sq_dmamem), 0, NVME_DMA_LEN(q->q_sq_dmamem));
597 	memset(NVME_DMA_KVA(q->q_cq_dmamem), 0, NVME_DMA_LEN(q->q_cq_dmamem));
598 
599 	mtx_init(&q->q_sq_mtx, IPL_BIO);
600 	mtx_init(&q->q_cq_mtx, IPL_BIO);
601 	q->q_sqtdbl = NVME_SQTDBL(idx, dstrd);
602 	q->q_cqhdbl = NVME_CQHDBL(idx, dstrd);
603 	q->q_entries = entries;
604 	q->q_sq_tail = 0;
605 	q->q_cq_head = 0;
606 	q->q_cq_phase = NVME_CQE_PHASE;
607 
608 	bus_dmamap_sync(sc->sc_dmat, NVME_DMA_MAP(q->q_sq_dmamem),
609 	    0, NVME_DMA_LEN(q->q_sq_dmamem), BUS_DMASYNC_PREWRITE);
610 	bus_dmamap_sync(sc->sc_dmat, NVME_DMA_MAP(q->q_cq_dmamem),
611 	    0, NVME_DMA_LEN(q->q_cq_dmamem), BUS_DMASYNC_PREREAD);
612 
613 	return (q);
614 
615 free_sq:
616 	nvme_dmamem_free(sc, q->q_sq_dmamem);
617 free:
618 	free(q, M_DEVBUF, 0);
619 
620 	return (NULL);
621 }
622 
623 void
624 nvme_q_free(struct nvme_softc *sc, struct nvme_queue *q)
625 {
626 	bus_dmamap_sync(sc->sc_dmat, NVME_DMA_MAP(q->q_cq_dmamem),
627 	    0, NVME_DMA_LEN(q->q_cq_dmamem), BUS_DMASYNC_POSTREAD);
628 	bus_dmamap_sync(sc->sc_dmat, NVME_DMA_MAP(q->q_sq_dmamem),
629 	    0, NVME_DMA_LEN(q->q_sq_dmamem), BUS_DMASYNC_POSTWRITE);
630 	nvme_dmamem_free(sc, q->q_cq_dmamem);
631 	nvme_dmamem_free(sc, q->q_sq_dmamem);
632 	free(q, M_DEVBUF, 0);
633 }
634 
635 int
636 nvme_intr(void *xsc)
637 {
638 	struct nvme_softc *sc = xsc;
639 
640 	return (nvme_q_complete(sc, sc->sc_admin_q));
641 }
642 
643 struct nvme_dmamem *
644 nvme_dmamem_alloc(struct nvme_softc *sc, size_t size)
645 {
646 	struct nvme_dmamem *ndm;
647 	int nsegs;
648 
649 	ndm = malloc(sizeof(*ndm), M_DEVBUF, M_NOWAIT | M_ZERO);
650 	if (ndm == NULL)
651 		return (NULL);
652 
653 	ndm->ndm_size = size;
654 
655 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
656 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ndm->ndm_map) != 0)
657 		goto ndmfree;
658 
659 	if (bus_dmamem_alloc(sc->sc_dmat, size, sc->sc_mps, 0, &ndm->ndm_seg,
660 	    1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0)
661 		goto destroy;
662 
663 	if (bus_dmamem_map(sc->sc_dmat, &ndm->ndm_seg, nsegs, size,
664 	    &ndm->ndm_kva, BUS_DMA_NOWAIT) != 0)
665 		goto free;
666 
667 	if (bus_dmamap_load(sc->sc_dmat, ndm->ndm_map, ndm->ndm_kva, size,
668 	    NULL, BUS_DMA_NOWAIT) != 0)
669 		goto unmap;
670 
671 	return (ndm);
672 
673 unmap:
674 	bus_dmamem_unmap(sc->sc_dmat, ndm->ndm_kva, size);
675 free:
676 	bus_dmamem_free(sc->sc_dmat, &ndm->ndm_seg, 1);
677 destroy:
678 	bus_dmamap_destroy(sc->sc_dmat, ndm->ndm_map);
679 ndmfree:
680 	free(ndm, M_DEVBUF, 0);
681 
682 	return (NULL);
683 }
684 
685 void
686 nvme_dmamem_free(struct nvme_softc *sc, struct nvme_dmamem *ndm)
687 {
688 	bus_dmamap_unload(sc->sc_dmat, ndm->ndm_map);
689 	bus_dmamem_unmap(sc->sc_dmat, ndm->ndm_kva, ndm->ndm_size);
690 	bus_dmamem_free(sc->sc_dmat, &ndm->ndm_seg, 1);
691 	bus_dmamap_destroy(sc->sc_dmat, ndm->ndm_map);
692 	free(ndm, M_DEVBUF, 0);
693 }
694 
695