xref: /openbsd-src/sys/arch/arm64/dev/aplns.c (revision 25c4e8bd056e974b28f4a0ffd39d76c190a56013)
1 /*	$OpenBSD: aplns.c,v 1.12 2022/06/12 16:00:12 kettenis Exp $ */
2 /*
3  * Copyright (c) 2014, 2021 David Gwynne <dlg@openbsd.org>
4  *
5  * Permission to use, copy, modify, and distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include <sys/param.h>
19 #include <sys/systm.h>
20 #include <sys/buf.h>
21 #include <sys/kernel.h>
22 #include <sys/malloc.h>
23 #include <sys/device.h>
24 #include <sys/timeout.h>
25 #include <sys/queue.h>
26 #include <sys/mutex.h>
27 #include <sys/pool.h>
28 
29 #include <machine/bus.h>
30 #include <machine/fdt.h>
31 
32 #include <dev/ofw/openfirm.h>
33 #include <dev/ofw/ofw_misc.h>
34 #include <dev/ofw/ofw_power.h>
35 #include <dev/ofw/fdt.h>
36 
37 #include <scsi/scsi_all.h>
38 #include <scsi/scsiconf.h>
39 
40 #include <dev/ic/nvmereg.h>
41 #include <dev/ic/nvmevar.h>
42 
43 #include <arm64/dev/rtkit.h>
44 
45 #define ANS_CPU_CTRL		0x0044
46 #define ANS_CPU_CTRL_RUN	(1 << 4)
47 
48 #define ANS_MAX_PEND_CMDS_CTRL	0x01210
49 #define  ANS_MAX_QUEUE_DEPTH	64
50 #define ANS_BOOT_STATUS		0x01300
51 #define  ANS_BOOT_STATUS_OK	0xde71ce55
52 #define ANS_MODESEL_REG		0x01304
53 #define ANS_UNKNOWN_CTRL	0x24008
54 #define  ANS_PRP_NULL_CHECK	(1 << 11)
55 #define ANS_LINEAR_SQ_CTRL	0x24908
56 #define  ANS_LINEAR_SQ_CTRL_EN	(1 << 0)
57 #define ANS_LINEAR_ASQ_DB	0x2490c
58 #define ANS_LINEAR_IOSQ_DB	0x24910
59 
60 #define ANS_NVMMU_NUM		0x28100
61 #define ANS_NVMMU_BASE_ASQ	0x28108
62 #define ANS_NVMMU_BASE_IOSQ	0x28110
63 #define ANS_NVMMU_TCB_INVAL	0x28118
64 #define ANS_NVMMU_TCB_STAT	0x28120
65 
66 #define ANS_NVMMU_TCB_SIZE	0x4000
67 #define ANS_NVMMU_TCB_PITCH	0x80
68 
69 struct ans_nvmmu_tcb {
70 	uint8_t		tcb_opcode;
71 	uint8_t		tcb_flags;
72 #define ANS_NVMMU_TCB_WRITE		(1 << 0)
73 #define ANS_NVMMU_TCB_READ		(1 << 1)
74 	uint8_t		tcb_cid;
75 	uint8_t		tcb_pad0[1];
76 
77 	uint32_t	tcb_prpl_len;
78 	uint8_t		tcb_pad1[16];
79 
80 	uint64_t	tcb_prp[2];
81 };
82 
83 int	aplns_match(struct device *, void *, void *);
84 void	aplns_attach(struct device *, struct device *, void *);
85 
86 const struct cfattach	aplns_ca = {
87 	sizeof(struct device),
88 	aplns_match,
89 	aplns_attach
90 };
91 
92 struct cfdriver aplns_cd = {
93 	NULL, "aplns", DV_DULL
94 };
95 
96 int	nvme_ans_sart_map(void *, bus_addr_t, bus_size_t);
97 
98 int
99 aplns_match(struct device *parent, void *match, void *aux)
100 {
101 	struct fdt_attach_args *faa = aux;
102 
103 	return (OF_is_compatible(faa->fa_node, "apple,nvme-m1") ||
104 	    OF_is_compatible(faa->fa_node, "apple,nvme-ans2"));
105 }
106 
107 void
108 aplns_attach(struct device *parent, struct device *self, void *aux)
109 {
110 	struct fdt_attach_args *faa = aux;
111 
112 	printf("\n");
113 
114 	config_found(self, faa, NULL);
115 }
116 
117 struct nvme_ans_softc {
118 	struct nvme_softc	 asc_nvme;
119 	bus_space_tag_t		 asc_iot;
120 	bus_space_handle_t	 asc_ioh;
121 
122 	uint32_t		 asc_sart;
123 	struct rtkit		 asc_rtkit;
124 	struct rtkit_state	*asc_rtkit_state;
125 	struct nvme_dmamem	*asc_nvmmu;
126 };
127 
128 int	nvme_ans_match(struct device *, void *, void *);
129 void	nvme_ans_attach(struct device *, struct device *, void *);
130 
131 const struct cfattach nvme_ans_ca = {
132 	sizeof(struct nvme_ans_softc),
133 	nvme_ans_match,
134 	nvme_ans_attach,
135 };
136 
137 void		nvme_ans_enable(struct nvme_softc *);
138 
139 int		nvme_ans_q_alloc(struct nvme_softc *,
140 		    struct nvme_queue *);
141 void		nvme_ans_q_free(struct nvme_softc *,
142 		    struct nvme_queue *);
143 
144 uint32_t	nvme_ans_sq_enter(struct nvme_softc *,
145 		    struct nvme_queue *, struct nvme_ccb *);
146 void		nvme_ans_sq_leave(struct nvme_softc *,
147 		    struct nvme_queue *, struct nvme_ccb *);
148 
149 void		nvme_ans_cq_done(struct nvme_softc *,
150 		    struct nvme_queue *, struct nvme_ccb *);
151 
152 static const struct nvme_ops nvme_ans_ops = {
153 	.op_enable		= nvme_ans_enable,
154 
155 	.op_q_alloc		= nvme_ans_q_alloc,
156 	.op_q_free		= nvme_ans_q_free,
157 
158 	.op_sq_enter		= nvme_ans_sq_enter,
159 	.op_sq_leave		= nvme_ans_sq_leave,
160 	.op_sq_enter_locked	= nvme_ans_sq_enter,
161 	.op_sq_leave_locked	= nvme_ans_sq_leave,
162 
163 	.op_cq_done		= nvme_ans_cq_done,
164 };
165 
166 int
167 nvme_ans_match(struct device *parent, void *match, void *aux)
168 {
169 	struct fdt_attach_args *faa = aux;
170 
171 	return (OF_is_compatible(faa->fa_node, "apple,nvme-m1") ||
172 	    OF_is_compatible(faa->fa_node, "apple,nvme-ans2"));
173 }
174 
175 void
176 nvme_ans_attach(struct device *parent, struct device *self, void *aux)
177 {
178 	struct nvme_ans_softc *asc = (struct nvme_ans_softc *)self;
179 	struct nvme_softc *sc = &asc->asc_nvme;
180 	struct fdt_attach_args *faa = aux;
181 	uint32_t ctrl, status;
182 
183 	if (faa->fa_nreg < 2) {
184 		printf(": no registers\n");
185 		return;
186 	}
187 
188 	sc->sc_iot = faa->fa_iot;
189 	if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr,
190 	    faa->fa_reg[0].size, 0, &sc->sc_ioh) != 0) {
191 		printf(": can't map registers\n");
192 		return;
193 	}
194 
195 	asc->asc_iot = faa->fa_iot;
196 	if (bus_space_map(asc->asc_iot, faa->fa_reg[1].addr,
197 	    faa->fa_reg[1].size, 0, &asc->asc_ioh)) {
198 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
199 		printf(": can't map registers\n");
200 		return;
201 	}
202 
203 	power_domain_enable(faa->fa_node);
204 
205 	sc->sc_ih = fdt_intr_establish(faa->fa_node, IPL_BIO,
206 	    nvme_intr, sc, sc->sc_dev.dv_xname);
207 	if (sc->sc_ih == NULL) {
208 		printf(": can't establish interrupt\n");
209 		goto unmap;
210 	}
211 
212 	asc->asc_sart = OF_getpropint(faa->fa_node, "apple,sart", 0);
213 	asc->asc_rtkit.rk_cookie = asc;
214 	asc->asc_rtkit.rk_dmat = faa->fa_dmat;
215 	asc->asc_rtkit.rk_map = nvme_ans_sart_map;
216 
217 	asc->asc_rtkit_state = rtkit_init(faa->fa_node, NULL, &asc->asc_rtkit);
218 	if (asc->asc_rtkit_state == NULL) {
219 		printf(": can't map mailbox channel\n");
220 		goto disestablish;
221 	}
222 
223 	ctrl = bus_space_read_4(asc->asc_iot, asc->asc_ioh, ANS_CPU_CTRL);
224 	bus_space_write_4(asc->asc_iot, asc->asc_ioh, ANS_CPU_CTRL,
225 	    ctrl | ANS_CPU_CTRL_RUN);
226 
227 	status = bus_space_read_4(sc->sc_iot, sc->sc_ioh, ANS_BOOT_STATUS);
228 	if (status != ANS_BOOT_STATUS_OK)
229 		rtkit_boot(asc->asc_rtkit_state);
230 
231 	status = bus_space_read_4(sc->sc_iot, sc->sc_ioh, ANS_BOOT_STATUS);
232 	if (status != ANS_BOOT_STATUS_OK) {
233 		printf(": firmware not ready\n");
234 		goto disestablish;
235 	}
236 
237 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, ANS_LINEAR_SQ_CTRL,
238 	    ANS_LINEAR_SQ_CTRL_EN);
239 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, ANS_MAX_PEND_CMDS_CTRL,
240 	    (ANS_MAX_QUEUE_DEPTH << 16) | ANS_MAX_QUEUE_DEPTH);
241 
242 	ctrl = bus_space_read_4(sc->sc_iot, sc->sc_ioh, ANS_UNKNOWN_CTRL);
243 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, ANS_UNKNOWN_CTRL,
244 	    ctrl & ~ANS_PRP_NULL_CHECK);
245 
246 	printf(": ");
247 
248 	sc->sc_dmat = faa->fa_dmat;
249 	sc->sc_ios = faa->fa_reg[0].size;
250 	sc->sc_ops = &nvme_ans_ops;
251 	sc->sc_openings = 1;
252 
253 	if (nvme_attach(sc) != 0) {
254 		/* error printed by nvme_attach() */
255 		goto disestablish;
256 	}
257 
258 	return;
259 
260 disestablish:
261 	fdt_intr_disestablish(sc->sc_ih);
262 	sc->sc_ih = NULL;
263 
264 unmap:
265 	bus_space_unmap(asc->asc_iot, asc->asc_ioh, faa->fa_reg[1].size);
266 	bus_space_unmap(sc->sc_iot, sc->sc_ioh, faa->fa_reg[0].size);
267 	sc->sc_ios = 0;
268 }
269 
270 int
271 nvme_ans_sart_map(void *cookie, bus_addr_t addr, bus_size_t size)
272 {
273 	struct nvme_ans_softc *asc = cookie;
274 
275 	return aplsart_map(asc->asc_sart, addr, size);
276 }
277 
278 int
279 nvme_ans_q_alloc(struct nvme_softc *sc,
280     struct nvme_queue *q)
281 {
282 	bus_size_t db, base;
283 
284 	KASSERT(q->q_entries <= (ANS_NVMMU_TCB_SIZE / ANS_NVMMU_TCB_PITCH));
285 
286 	q->q_nvmmu_dmamem = nvme_dmamem_alloc(sc, ANS_NVMMU_TCB_SIZE);
287         if (q->q_nvmmu_dmamem == NULL)
288 		return (-1);
289 
290 	memset(NVME_DMA_KVA(q->q_nvmmu_dmamem),
291 	    0, NVME_DMA_LEN(q->q_nvmmu_dmamem));
292 
293 	switch (q->q_id) {
294 	case NVME_IO_Q:
295 		db = ANS_LINEAR_IOSQ_DB;
296 		base = ANS_NVMMU_BASE_IOSQ;
297 		break;
298 	case NVME_ADMIN_Q:
299 		db = ANS_LINEAR_ASQ_DB;
300 		base = ANS_NVMMU_BASE_ASQ;
301 		break;
302 	default:
303 		panic("unsupported queue id %u", q->q_id);
304 		/* NOTREACHED */
305 	}
306 
307 	q->q_sqtdbl = db;
308 
309 	nvme_dmamem_sync(sc, q->q_nvmmu_dmamem, BUS_DMASYNC_PREWRITE);
310 	nvme_write8(sc, base, NVME_DMA_DVA(q->q_nvmmu_dmamem));
311 
312 	return (0);
313 }
314 
315 void
316 nvme_ans_enable(struct nvme_softc *sc)
317 {
318 	nvme_write4(sc, ANS_NVMMU_NUM,
319 	    (ANS_NVMMU_TCB_SIZE / ANS_NVMMU_TCB_PITCH) - 1);
320 	nvme_write4(sc, ANS_MODESEL_REG, 0);
321 }
322 
323 void
324 nvme_ans_q_free(struct nvme_softc *sc,
325     struct nvme_queue *q)
326 {
327         nvme_dmamem_sync(sc, q->q_nvmmu_dmamem, BUS_DMASYNC_POSTWRITE);
328 	nvme_dmamem_free(sc, q->q_nvmmu_dmamem);
329 }
330 
331 uint32_t
332 nvme_ans_sq_enter(struct nvme_softc *sc,
333     struct nvme_queue *q, struct nvme_ccb *ccb)
334 {
335 	return (ccb->ccb_id);
336 }
337 
338 static inline struct ans_nvmmu_tcb *
339 nvme_ans_tcb(struct nvme_queue *q, unsigned int qid)
340 {
341 	caddr_t ptr = NVME_DMA_KVA(q->q_nvmmu_dmamem);
342 	ptr += qid * ANS_NVMMU_TCB_PITCH;
343 	return ((struct ans_nvmmu_tcb *)ptr);
344 }
345 
346 void
347 nvme_ans_sq_leave(struct nvme_softc *sc,
348     struct nvme_queue *q, struct nvme_ccb *ccb)
349 {
350 	unsigned int id = ccb->ccb_id;
351 	struct nvme_sqe_io *sqe;
352 	struct ans_nvmmu_tcb *tcb = nvme_ans_tcb(q, id);
353 
354 	sqe = NVME_DMA_KVA(q->q_sq_dmamem);
355 	sqe += id;
356 
357 	bus_dmamap_sync(sc->sc_dmat, NVME_DMA_MAP(q->q_nvmmu_dmamem),
358 	    ANS_NVMMU_TCB_PITCH * id, sizeof(*tcb), BUS_DMASYNC_POSTWRITE);
359 
360 	memset(tcb, 0, sizeof(*tcb));
361 	tcb->tcb_opcode = sqe->opcode;
362 	tcb->tcb_flags = ANS_NVMMU_TCB_WRITE | ANS_NVMMU_TCB_READ;
363 	tcb->tcb_cid = id;
364 	tcb->tcb_prpl_len = sqe->nlb;
365 	tcb->tcb_prp[0] = sqe->entry.prp[0];
366 	tcb->tcb_prp[1] = sqe->entry.prp[1];
367 
368 	bus_dmamap_sync(sc->sc_dmat, NVME_DMA_MAP(q->q_nvmmu_dmamem),
369 	    ANS_NVMMU_TCB_PITCH * id, sizeof(*tcb), BUS_DMASYNC_PREWRITE);
370 
371 	nvme_write4(sc, q->q_sqtdbl, id);
372 }
373 
374 void
375 nvme_ans_cq_done(struct nvme_softc *sc,
376     struct nvme_queue *q, struct nvme_ccb *ccb)
377 {
378 	unsigned int id = ccb->ccb_id;
379 	struct ans_nvmmu_tcb *tcb = nvme_ans_tcb(q, id);
380 	uint32_t stat;
381 
382 	bus_dmamap_sync(sc->sc_dmat, NVME_DMA_MAP(q->q_nvmmu_dmamem),
383 	    ANS_NVMMU_TCB_PITCH * id, sizeof(*tcb), BUS_DMASYNC_POSTWRITE);
384 	memset(tcb, 0, sizeof(*tcb));
385 	bus_dmamap_sync(sc->sc_dmat, NVME_DMA_MAP(q->q_nvmmu_dmamem),
386 	    ANS_NVMMU_TCB_PITCH * id, sizeof(*tcb), BUS_DMASYNC_PREWRITE);
387 
388 	nvme_write4(sc, ANS_NVMMU_TCB_INVAL, id);
389 	stat = nvme_read4(sc, ANS_NVMMU_TCB_STAT);
390 	if (stat != 0) {
391 		printf("%s: nvmmu tcp stat is non-zero: 0x%08x\n",
392 		    DEVNAME(sc), stat);
393 	}
394 }
395