1 /* $OpenBSD: aplns.c,v 1.15 2022/11/11 11:45:10 kettenis Exp $ */
2 /*
3 * Copyright (c) 2014, 2021 David Gwynne <dlg@openbsd.org>
4 *
5 * Permission to use, copy, modify, and distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18 #include <sys/param.h>
19 #include <sys/systm.h>
20 #include <sys/buf.h>
21 #include <sys/kernel.h>
22 #include <sys/malloc.h>
23 #include <sys/device.h>
24 #include <sys/timeout.h>
25 #include <sys/queue.h>
26 #include <sys/mutex.h>
27 #include <sys/pool.h>
28
29 #include <machine/bus.h>
30 #include <machine/fdt.h>
31
32 #include <dev/ofw/openfirm.h>
33 #include <dev/ofw/ofw_misc.h>
34 #include <dev/ofw/ofw_power.h>
35 #include <dev/ofw/ofw_clock.h>
36 #include <dev/ofw/fdt.h>
37
38 #include <scsi/scsi_all.h>
39 #include <scsi/scsiconf.h>
40
41 #include <dev/ic/nvmereg.h>
42 #include <dev/ic/nvmevar.h>
43
44 #include <arm64/dev/rtkit.h>
45
46 #define ANS_CPU_CTRL 0x0044
47 #define ANS_CPU_CTRL_RUN (1 << 4)
48
49 #define ANS_MAX_PEND_CMDS_CTRL 0x01210
50 #define ANS_MAX_QUEUE_DEPTH 64
51 #define ANS_BOOT_STATUS 0x01300
52 #define ANS_BOOT_STATUS_OK 0xde71ce55
53 #define ANS_MODESEL_REG 0x01304
54 #define ANS_UNKNOWN_CTRL 0x24008
55 #define ANS_PRP_NULL_CHECK (1 << 11)
56 #define ANS_LINEAR_SQ_CTRL 0x24908
57 #define ANS_LINEAR_SQ_CTRL_EN (1 << 0)
58 #define ANS_LINEAR_ASQ_DB 0x2490c
59 #define ANS_LINEAR_IOSQ_DB 0x24910
60
61 #define ANS_NVMMU_NUM 0x28100
62 #define ANS_NVMMU_BASE_ASQ 0x28108
63 #define ANS_NVMMU_BASE_IOSQ 0x28110
64 #define ANS_NVMMU_TCB_INVAL 0x28118
65 #define ANS_NVMMU_TCB_STAT 0x28120
66
67 #define ANS_NVMMU_TCB_SIZE 0x4000
68 #define ANS_NVMMU_TCB_PITCH 0x80
69
70 struct ans_nvmmu_tcb {
71 uint8_t tcb_opcode;
72 uint8_t tcb_flags;
73 #define ANS_NVMMU_TCB_WRITE (1 << 0)
74 #define ANS_NVMMU_TCB_READ (1 << 1)
75 uint8_t tcb_cid;
76 uint8_t tcb_pad0[1];
77
78 uint32_t tcb_prpl_len;
79 uint8_t tcb_pad1[16];
80
81 uint64_t tcb_prp[2];
82 };
83
84 int aplns_match(struct device *, void *, void *);
85 void aplns_attach(struct device *, struct device *, void *);
86
87 const struct cfattach aplns_ca = {
88 sizeof(struct device), aplns_match, aplns_attach
89 };
90
91 struct cfdriver aplns_cd = {
92 NULL, "aplns", DV_DULL
93 };
94
95 int nvme_ans_sart_map(void *, bus_addr_t, bus_size_t);
96 int nvme_ans_sart_unmap(void *, bus_addr_t, bus_size_t);
97
98 int
aplns_match(struct device * parent,void * match,void * aux)99 aplns_match(struct device *parent, void *match, void *aux)
100 {
101 struct fdt_attach_args *faa = aux;
102
103 return (OF_is_compatible(faa->fa_node, "apple,nvme-m1") ||
104 OF_is_compatible(faa->fa_node, "apple,nvme-ans2"));
105 }
106
107 void
aplns_attach(struct device * parent,struct device * self,void * aux)108 aplns_attach(struct device *parent, struct device *self, void *aux)
109 {
110 struct fdt_attach_args *faa = aux;
111
112 printf("\n");
113
114 config_found(self, faa, NULL);
115 }
116
117 struct nvme_ans_softc {
118 struct nvme_softc asc_nvme;
119 bus_space_tag_t asc_iot;
120 bus_space_handle_t asc_ioh;
121 int asc_node;
122
123 uint32_t asc_sart;
124 struct rtkit asc_rtkit;
125 struct rtkit_state *asc_rtkit_state;
126 struct nvme_dmamem *asc_nvmmu;
127 };
128
129 int nvme_ans_match(struct device *, void *, void *);
130 void nvme_ans_attach(struct device *, struct device *, void *);
131 int nvme_ans_activate(struct device *, int act);
132
133 const struct cfattach nvme_ans_ca = {
134 sizeof(struct nvme_ans_softc), nvme_ans_match, nvme_ans_attach, NULL,
135 nvme_ans_activate
136 };
137
138 int nvme_ans_init(struct nvme_ans_softc *sc);
139 void nvme_ans_shutdown(struct nvme_ans_softc *sc);
140 void nvme_ans_enable(struct nvme_softc *);
141
142 int nvme_ans_q_alloc(struct nvme_softc *,
143 struct nvme_queue *);
144 void nvme_ans_q_free(struct nvme_softc *,
145 struct nvme_queue *);
146
147 uint32_t nvme_ans_sq_enter(struct nvme_softc *,
148 struct nvme_queue *, struct nvme_ccb *);
149 void nvme_ans_sq_leave(struct nvme_softc *,
150 struct nvme_queue *, struct nvme_ccb *);
151
152 void nvme_ans_cq_done(struct nvme_softc *,
153 struct nvme_queue *, struct nvme_ccb *);
154
155 static const struct nvme_ops nvme_ans_ops = {
156 .op_enable = nvme_ans_enable,
157
158 .op_q_alloc = nvme_ans_q_alloc,
159 .op_q_free = nvme_ans_q_free,
160
161 .op_sq_enter = nvme_ans_sq_enter,
162 .op_sq_leave = nvme_ans_sq_leave,
163 .op_sq_enter_locked = nvme_ans_sq_enter,
164 .op_sq_leave_locked = nvme_ans_sq_leave,
165
166 .op_cq_done = nvme_ans_cq_done,
167 };
168
169 int
nvme_ans_match(struct device * parent,void * match,void * aux)170 nvme_ans_match(struct device *parent, void *match, void *aux)
171 {
172 struct fdt_attach_args *faa = aux;
173
174 return (OF_is_compatible(faa->fa_node, "apple,nvme-m1") ||
175 OF_is_compatible(faa->fa_node, "apple,nvme-ans2"));
176 }
177
178 void
nvme_ans_attach(struct device * parent,struct device * self,void * aux)179 nvme_ans_attach(struct device *parent, struct device *self, void *aux)
180 {
181 struct nvme_ans_softc *asc = (struct nvme_ans_softc *)self;
182 struct nvme_softc *sc = &asc->asc_nvme;
183 struct fdt_attach_args *faa = aux;
184
185 if (faa->fa_nreg < 2) {
186 printf(": no registers\n");
187 return;
188 }
189
190 sc->sc_iot = faa->fa_iot;
191 if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr,
192 faa->fa_reg[0].size, 0, &sc->sc_ioh) != 0) {
193 printf(": can't map registers\n");
194 return;
195 }
196
197 asc->asc_iot = faa->fa_iot;
198 if (bus_space_map(asc->asc_iot, faa->fa_reg[1].addr,
199 faa->fa_reg[1].size, 0, &asc->asc_ioh)) {
200 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
201 printf(": can't map registers\n");
202 return;
203 }
204
205 sc->sc_ih = fdt_intr_establish(faa->fa_node, IPL_BIO,
206 nvme_intr, sc, sc->sc_dev.dv_xname);
207 if (sc->sc_ih == NULL) {
208 printf(": can't establish interrupt\n");
209 goto unmap;
210 }
211
212 asc->asc_node = faa->fa_node;
213 asc->asc_sart = OF_getpropint(faa->fa_node, "apple,sart", 0);
214 asc->asc_rtkit.rk_cookie = asc;
215 asc->asc_rtkit.rk_dmat = faa->fa_dmat;
216 asc->asc_rtkit.rk_map = nvme_ans_sart_map;
217 asc->asc_rtkit.rk_unmap = nvme_ans_sart_unmap;
218
219 asc->asc_rtkit_state =
220 rtkit_init(faa->fa_node, NULL, 0, &asc->asc_rtkit);
221 if (asc->asc_rtkit_state == NULL) {
222 printf(": can't map mailbox channel\n");
223 goto disestablish;
224 }
225
226 if (nvme_ans_init(asc)) {
227 printf(": firmware not ready\n");
228 goto disestablish;
229 }
230
231 printf(": ");
232
233 sc->sc_dmat = faa->fa_dmat;
234 sc->sc_ios = faa->fa_reg[0].size;
235 sc->sc_ops = &nvme_ans_ops;
236 sc->sc_openings = 1;
237
238 if (nvme_attach(sc) != 0) {
239 /* error printed by nvme_attach() */
240 goto disestablish;
241 }
242
243 return;
244
245 disestablish:
246 fdt_intr_disestablish(sc->sc_ih);
247 sc->sc_ih = NULL;
248
249 unmap:
250 bus_space_unmap(asc->asc_iot, asc->asc_ioh, faa->fa_reg[1].size);
251 bus_space_unmap(sc->sc_iot, sc->sc_ioh, faa->fa_reg[0].size);
252 sc->sc_ios = 0;
253 }
254
255 int
nvme_ans_activate(struct device * self,int act)256 nvme_ans_activate(struct device *self, int act)
257 {
258 struct nvme_ans_softc *asc = (struct nvme_ans_softc *)self;
259 struct nvme_softc *sc = &asc->asc_nvme;
260 int rv;
261
262 switch (act) {
263 case DVACT_POWERDOWN:
264 rv = nvme_activate(&asc->asc_nvme, act);
265 nvme_ans_shutdown(asc);
266 break;
267 case DVACT_RESUME:
268 rv = nvme_ans_init(asc);
269 if (rv) {
270 printf("%s: firmware not ready\n", DEVNAME(sc));
271 goto fail;
272 }
273 rv = nvme_activate(&asc->asc_nvme, act);
274 break;
275 default:
276 rv = nvme_activate(&asc->asc_nvme, act);
277 break;
278 }
279
280 fail:
281 return rv;
282 }
283
284 int
nvme_ans_init(struct nvme_ans_softc * asc)285 nvme_ans_init(struct nvme_ans_softc *asc)
286 {
287 struct nvme_softc *sc = &asc->asc_nvme;
288 uint32_t ctrl, status;
289
290 power_domain_enable_all(asc->asc_node);
291
292 ctrl = bus_space_read_4(asc->asc_iot, asc->asc_ioh, ANS_CPU_CTRL);
293 bus_space_write_4(asc->asc_iot, asc->asc_ioh, ANS_CPU_CTRL,
294 ctrl | ANS_CPU_CTRL_RUN);
295
296 status = bus_space_read_4(sc->sc_iot, sc->sc_ioh, ANS_BOOT_STATUS);
297 if (status != ANS_BOOT_STATUS_OK)
298 rtkit_boot(asc->asc_rtkit_state);
299
300 status = bus_space_read_4(sc->sc_iot, sc->sc_ioh, ANS_BOOT_STATUS);
301 if (status != ANS_BOOT_STATUS_OK)
302 return ENXIO;
303
304 bus_space_write_4(sc->sc_iot, sc->sc_ioh, ANS_LINEAR_SQ_CTRL,
305 ANS_LINEAR_SQ_CTRL_EN);
306 bus_space_write_4(sc->sc_iot, sc->sc_ioh, ANS_MAX_PEND_CMDS_CTRL,
307 (ANS_MAX_QUEUE_DEPTH << 16) | ANS_MAX_QUEUE_DEPTH);
308
309 ctrl = bus_space_read_4(sc->sc_iot, sc->sc_ioh, ANS_UNKNOWN_CTRL);
310 bus_space_write_4(sc->sc_iot, sc->sc_ioh, ANS_UNKNOWN_CTRL,
311 ctrl & ~ANS_PRP_NULL_CHECK);
312
313 return 0;
314 }
315
316 void
nvme_ans_shutdown(struct nvme_ans_softc * asc)317 nvme_ans_shutdown(struct nvme_ans_softc *asc)
318 {
319 uint32_t ctrl;
320
321 rtkit_shutdown(asc->asc_rtkit_state);
322
323 ctrl = bus_space_read_4(asc->asc_iot, asc->asc_ioh, ANS_CPU_CTRL);
324 bus_space_write_4(asc->asc_iot, asc->asc_ioh, ANS_CPU_CTRL,
325 ctrl & ~ANS_CPU_CTRL_RUN);
326
327 reset_assert_all(asc->asc_node);
328 reset_deassert_all(asc->asc_node);
329
330 power_domain_disable_all(asc->asc_node);
331 }
332
333 int
nvme_ans_sart_map(void * cookie,bus_addr_t addr,bus_size_t size)334 nvme_ans_sart_map(void *cookie, bus_addr_t addr, bus_size_t size)
335 {
336 struct nvme_ans_softc *asc = cookie;
337
338 return aplsart_map(asc->asc_sart, addr, size);
339 }
340
341 int
nvme_ans_sart_unmap(void * cookie,bus_addr_t addr,bus_size_t size)342 nvme_ans_sart_unmap(void *cookie, bus_addr_t addr, bus_size_t size)
343 {
344 struct nvme_ans_softc *asc = cookie;
345
346 return aplsart_unmap(asc->asc_sart, addr, size);
347 }
348
349 int
nvme_ans_q_alloc(struct nvme_softc * sc,struct nvme_queue * q)350 nvme_ans_q_alloc(struct nvme_softc *sc,
351 struct nvme_queue *q)
352 {
353 bus_size_t db, base;
354
355 KASSERT(q->q_entries <= (ANS_NVMMU_TCB_SIZE / ANS_NVMMU_TCB_PITCH));
356
357 q->q_nvmmu_dmamem = nvme_dmamem_alloc(sc, ANS_NVMMU_TCB_SIZE);
358 if (q->q_nvmmu_dmamem == NULL)
359 return (-1);
360
361 memset(NVME_DMA_KVA(q->q_nvmmu_dmamem),
362 0, NVME_DMA_LEN(q->q_nvmmu_dmamem));
363
364 switch (q->q_id) {
365 case NVME_IO_Q:
366 db = ANS_LINEAR_IOSQ_DB;
367 base = ANS_NVMMU_BASE_IOSQ;
368 break;
369 case NVME_ADMIN_Q:
370 db = ANS_LINEAR_ASQ_DB;
371 base = ANS_NVMMU_BASE_ASQ;
372 break;
373 default:
374 panic("unsupported queue id %u", q->q_id);
375 /* NOTREACHED */
376 }
377
378 q->q_sqtdbl = db;
379
380 nvme_dmamem_sync(sc, q->q_nvmmu_dmamem, BUS_DMASYNC_PREWRITE);
381 nvme_write8(sc, base, NVME_DMA_DVA(q->q_nvmmu_dmamem));
382
383 return (0);
384 }
385
386 void
nvme_ans_enable(struct nvme_softc * sc)387 nvme_ans_enable(struct nvme_softc *sc)
388 {
389 nvme_write4(sc, ANS_NVMMU_NUM,
390 (ANS_NVMMU_TCB_SIZE / ANS_NVMMU_TCB_PITCH) - 1);
391 nvme_write4(sc, ANS_MODESEL_REG, 0);
392 }
393
394 void
nvme_ans_q_free(struct nvme_softc * sc,struct nvme_queue * q)395 nvme_ans_q_free(struct nvme_softc *sc,
396 struct nvme_queue *q)
397 {
398 nvme_dmamem_sync(sc, q->q_nvmmu_dmamem, BUS_DMASYNC_POSTWRITE);
399 nvme_dmamem_free(sc, q->q_nvmmu_dmamem);
400 }
401
402 uint32_t
nvme_ans_sq_enter(struct nvme_softc * sc,struct nvme_queue * q,struct nvme_ccb * ccb)403 nvme_ans_sq_enter(struct nvme_softc *sc,
404 struct nvme_queue *q, struct nvme_ccb *ccb)
405 {
406 return (ccb->ccb_id);
407 }
408
409 static inline struct ans_nvmmu_tcb *
nvme_ans_tcb(struct nvme_queue * q,unsigned int qid)410 nvme_ans_tcb(struct nvme_queue *q, unsigned int qid)
411 {
412 caddr_t ptr = NVME_DMA_KVA(q->q_nvmmu_dmamem);
413 ptr += qid * ANS_NVMMU_TCB_PITCH;
414 return ((struct ans_nvmmu_tcb *)ptr);
415 }
416
417 void
nvme_ans_sq_leave(struct nvme_softc * sc,struct nvme_queue * q,struct nvme_ccb * ccb)418 nvme_ans_sq_leave(struct nvme_softc *sc,
419 struct nvme_queue *q, struct nvme_ccb *ccb)
420 {
421 unsigned int id = ccb->ccb_id;
422 struct nvme_sqe_io *sqe;
423 struct ans_nvmmu_tcb *tcb = nvme_ans_tcb(q, id);
424
425 sqe = NVME_DMA_KVA(q->q_sq_dmamem);
426 sqe += id;
427
428 bus_dmamap_sync(sc->sc_dmat, NVME_DMA_MAP(q->q_nvmmu_dmamem),
429 ANS_NVMMU_TCB_PITCH * id, sizeof(*tcb), BUS_DMASYNC_POSTWRITE);
430
431 memset(tcb, 0, sizeof(*tcb));
432 tcb->tcb_opcode = sqe->opcode;
433 tcb->tcb_flags = ANS_NVMMU_TCB_WRITE | ANS_NVMMU_TCB_READ;
434 tcb->tcb_cid = id;
435 tcb->tcb_prpl_len = sqe->nlb;
436 tcb->tcb_prp[0] = sqe->entry.prp[0];
437 tcb->tcb_prp[1] = sqe->entry.prp[1];
438
439 bus_dmamap_sync(sc->sc_dmat, NVME_DMA_MAP(q->q_nvmmu_dmamem),
440 ANS_NVMMU_TCB_PITCH * id, sizeof(*tcb), BUS_DMASYNC_PREWRITE);
441
442 nvme_write4(sc, q->q_sqtdbl, id);
443 }
444
445 void
nvme_ans_cq_done(struct nvme_softc * sc,struct nvme_queue * q,struct nvme_ccb * ccb)446 nvme_ans_cq_done(struct nvme_softc *sc,
447 struct nvme_queue *q, struct nvme_ccb *ccb)
448 {
449 unsigned int id = ccb->ccb_id;
450 struct ans_nvmmu_tcb *tcb = nvme_ans_tcb(q, id);
451 uint32_t stat;
452
453 bus_dmamap_sync(sc->sc_dmat, NVME_DMA_MAP(q->q_nvmmu_dmamem),
454 ANS_NVMMU_TCB_PITCH * id, sizeof(*tcb), BUS_DMASYNC_POSTWRITE);
455 memset(tcb, 0, sizeof(*tcb));
456 bus_dmamap_sync(sc->sc_dmat, NVME_DMA_MAP(q->q_nvmmu_dmamem),
457 ANS_NVMMU_TCB_PITCH * id, sizeof(*tcb), BUS_DMASYNC_PREWRITE);
458
459 nvme_write4(sc, ANS_NVMMU_TCB_INVAL, id);
460 stat = nvme_read4(sc, ANS_NVMMU_TCB_STAT);
461 if (stat != 0) {
462 printf("%s: nvmmu tcp stat is non-zero: 0x%08x\n",
463 DEVNAME(sc), stat);
464 }
465 }
466