1 /* $NetBSD: mpii.c,v 1.31 2024/02/04 20:50:30 andvar Exp $ */
2 /* $OpenBSD: mpii.c,v 1.115 2018/08/14 05:22:21 jmatthew Exp $ */
3 /*
4 * Copyright (c) 2010, 2012 Mike Belopuhov
5 * Copyright (c) 2009 James Giannoules
6 * Copyright (c) 2005 - 2010 David Gwynne <dlg@openbsd.org>
7 * Copyright (c) 2005 - 2010 Marco Peereboom <marco@openbsd.org>
8 *
9 * Permission to use, copy, modify, and distribute this software for any
10 * purpose with or without fee is hereby granted, provided that the above
11 * copyright notice and this permission notice appear in all copies.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 */
21
22 #include <sys/cdefs.h>
23 __KERNEL_RCSID(0, "$NetBSD: mpii.c,v 1.31 2024/02/04 20:50:30 andvar Exp $");
24
25 #include "bio.h"
26
27 #include <sys/param.h>
28 #include <sys/systm.h>
29 #include <sys/buf.h>
30 #include <sys/device.h>
31 #include <sys/ioctl.h>
32 #include <sys/malloc.h>
33 #include <sys/kernel.h>
34 #include <sys/mutex.h>
35 #include <sys/condvar.h>
36 #include <sys/dkio.h>
37 #include <sys/tree.h>
38
39 #include <dev/pci/pcireg.h>
40 #include <dev/pci/pcivar.h>
41 #include <dev/pci/pcidevs.h>
42
43 #include <dev/scsipi/scsipi_all.h>
44 #include <dev/scsipi/scsi_all.h>
45 #include <dev/scsipi/scsiconf.h>
46
47 #if NBIO > 0
48 #include <dev/biovar.h>
49 #include <dev/sysmon/sysmonvar.h>
50 #include <sys/envsys.h>
51 #endif
52
53 #include <dev/pci/mpiireg.h>
54
55 // #define MPII_DEBUG
56 #ifdef MPII_DEBUG
57 #define DPRINTF(x...) do { if (mpii_debug) printf(x); } while(0)
58 #define DNPRINTF(n,x...) do { if (mpii_debug & (n)) printf(x); } while(0)
59 #define MPII_D_CMD (0x0001)
60 #define MPII_D_INTR (0x0002)
61 #define MPII_D_MISC (0x0004)
62 #define MPII_D_DMA (0x0008)
63 #define MPII_D_IOCTL (0x0010)
64 #define MPII_D_RW (0x0020)
65 #define MPII_D_MEM (0x0040)
66 #define MPII_D_CCB (0x0080)
67 #define MPII_D_PPR (0x0100)
68 #define MPII_D_RAID (0x0200)
69 #define MPII_D_EVT (0x0400)
70 #define MPII_D_CFG (0x0800)
71 #define MPII_D_MAP (0x1000)
72
73 u_int32_t mpii_debug = 0
74 // | MPII_D_CMD
75 // | MPII_D_INTR
76 // | MPII_D_MISC
77 // | MPII_D_DMA
78 // | MPII_D_IOCTL
79 // | MPII_D_RW
80 // | MPII_D_MEM
81 // | MPII_D_CCB
82 // | MPII_D_PPR
83 // | MPII_D_RAID
84 // | MPII_D_EVT
85 // | MPII_D_CFG
86 // | MPII_D_MAP
87 ;
88 #else
89 #define DPRINTF(x...)
90 #define DNPRINTF(n,x...)
91 #endif
92
93 #define MPII_REQUEST_SIZE (512)
94 #define MPII_REQUEST_CREDIT (128)
95
96 struct mpii_dmamem {
97 bus_dmamap_t mdm_map;
98 bus_dma_segment_t mdm_seg;
99 size_t mdm_size;
100 void *mdm_kva;
101 };
102 #define MPII_DMA_MAP(_mdm) ((_mdm)->mdm_map)
103 #define MPII_DMA_DVA(_mdm) ((uint64_t)(_mdm)->mdm_map->dm_segs[0].ds_addr)
104 #define MPII_DMA_KVA(_mdm) ((_mdm)->mdm_kva)
105
106 struct mpii_softc;
107
108 struct mpii_rcb {
109 SIMPLEQ_ENTRY(mpii_rcb) rcb_link;
110 void *rcb_reply;
111 u_int32_t rcb_reply_dva;
112 };
113
114 SIMPLEQ_HEAD(mpii_rcb_list, mpii_rcb);
115
116 struct mpii_device {
117 int flags;
118 #define MPII_DF_ATTACH (0x0001)
119 #define MPII_DF_DETACH (0x0002)
120 #define MPII_DF_HIDDEN (0x0004)
121 #define MPII_DF_UNUSED (0x0008)
122 #define MPII_DF_VOLUME (0x0010)
123 #define MPII_DF_VOLUME_DISK (0x0020)
124 #define MPII_DF_HOT_SPARE (0x0040)
125 short slot;
126 short percent;
127 u_int16_t dev_handle;
128 u_int16_t enclosure;
129 u_int16_t expander;
130 u_int8_t phy_num;
131 u_int8_t physical_port;
132 };
133
134 struct mpii_ccb {
135 struct mpii_softc *ccb_sc;
136
137 void * ccb_cookie;
138 kmutex_t ccb_mtx;
139 kcondvar_t ccb_cv;
140
141 bus_dmamap_t ccb_dmamap;
142
143 bus_addr_t ccb_offset;
144 void *ccb_cmd;
145 bus_addr_t ccb_cmd_dva;
146 u_int16_t ccb_dev_handle;
147 u_int16_t ccb_smid;
148
149 volatile enum {
150 MPII_CCB_FREE,
151 MPII_CCB_READY,
152 MPII_CCB_QUEUED,
153 MPII_CCB_TIMEOUT
154 } ccb_state;
155
156 void (*ccb_done)(struct mpii_ccb *);
157 struct mpii_rcb *ccb_rcb;
158
159 SIMPLEQ_ENTRY(mpii_ccb) ccb_link;
160 };
161
162 SIMPLEQ_HEAD(mpii_ccb_list, mpii_ccb);
163
164 struct mpii_softc {
165 device_t sc_dev;
166
167 pci_chipset_tag_t sc_pc;
168 pcitag_t sc_tag;
169
170 void *sc_ih;
171 pci_intr_handle_t *sc_pihp;
172
173 struct scsipi_adapter sc_adapt;
174 struct scsipi_channel sc_chan;
175 device_t sc_child; /* our scsibus */
176
177 int sc_flags;
178 #define MPII_F_RAID (1<<1)
179 #define MPII_F_SAS3 (1<<2)
180
181 struct mpii_device **sc_devs;
182 kmutex_t sc_devs_mtx;
183
184 bus_space_tag_t sc_iot;
185 bus_space_handle_t sc_ioh;
186 bus_size_t sc_ios;
187 bus_dma_tag_t sc_dmat;
188
189 kmutex_t sc_req_mtx;
190 kmutex_t sc_rep_mtx;
191
192 ushort sc_reply_size;
193 ushort sc_request_size;
194
195 ushort sc_max_cmds;
196 ushort sc_num_reply_frames;
197 u_int sc_reply_free_qdepth;
198 u_int sc_reply_post_qdepth;
199
200 ushort sc_chain_sge;
201 ushort sc_max_sgl;
202
203 u_int8_t sc_ioc_event_replay;
204
205 u_int8_t sc_porttype;
206 u_int8_t sc_max_volumes;
207 u_int16_t sc_max_devices;
208 u_int16_t sc_vd_count;
209 u_int16_t sc_vd_id_low;
210 u_int16_t sc_pd_id_start;
211 int sc_ioc_number;
212 u_int8_t sc_vf_id;
213
214 struct mpii_ccb *sc_ccbs;
215 struct mpii_ccb_list sc_ccb_free;
216 kmutex_t sc_ccb_free_mtx;
217 kcondvar_t sc_ccb_free_cv;
218
219 struct mpii_ccb_list sc_ccb_tmos;
220 kmutex_t sc_ssb_tmomtx;
221 struct workqueue *sc_ssb_tmowk;
222 struct work sc_ssb_tmowork;
223
224 struct mpii_dmamem *sc_requests;
225
226 struct mpii_dmamem *sc_replies;
227 struct mpii_rcb *sc_rcbs;
228
229 struct mpii_dmamem *sc_reply_postq;
230 struct mpii_reply_descr *sc_reply_postq_kva;
231 u_int sc_reply_post_host_index;
232
233 struct mpii_dmamem *sc_reply_freeq;
234 u_int sc_reply_free_host_index;
235 kmutex_t sc_reply_free_mtx;
236
237 struct mpii_rcb_list sc_evt_sas_queue;
238 kmutex_t sc_evt_sas_mtx;
239 struct workqueue *sc_evt_sas_wq;
240 struct work sc_evt_sas_work;
241
242 struct mpii_rcb_list sc_evt_ack_queue;
243 kmutex_t sc_evt_ack_mtx;
244 struct workqueue *sc_evt_ack_wq;
245 struct work sc_evt_ack_work;
246
247 #if NBIO > 0
248 struct sysmon_envsys *sc_sme;
249 envsys_data_t *sc_sensors;
250 #endif
251 };
252
253 static int mpii_match(device_t, cfdata_t, void *);
254 static void mpii_attach(device_t, device_t, void *);
255 static int mpii_detach(device_t, int);
256 static void mpii_childdetached(device_t, device_t);
257 static int mpii_rescan(device_t, const char *, const int *);
258
259 static int mpii_intr(void *);
260
261 CFATTACH_DECL3_NEW(mpii, sizeof(struct mpii_softc),
262 mpii_match, mpii_attach, mpii_detach, NULL, mpii_rescan,
263 mpii_childdetached, DVF_DETACH_SHUTDOWN);
264
265 static void mpii_scsipi_request(struct scsipi_channel *,
266 scsipi_adapter_req_t, void *);
267 static void mpii_scsi_cmd_done(struct mpii_ccb *);
268
269 static struct mpii_dmamem *
270 mpii_dmamem_alloc(struct mpii_softc *, size_t);
271 static void mpii_dmamem_free(struct mpii_softc *,
272 struct mpii_dmamem *);
273 static int mpii_alloc_ccbs(struct mpii_softc *);
274 static struct mpii_ccb *mpii_get_ccb(struct mpii_softc *);
275 static void mpii_put_ccb(struct mpii_softc *, struct mpii_ccb *);
276 static int mpii_alloc_replies(struct mpii_softc *);
277 static int mpii_alloc_queues(struct mpii_softc *);
278 static void mpii_push_reply(struct mpii_softc *, struct mpii_rcb *);
279 static void mpii_push_replies(struct mpii_softc *);
280
281 static void mpii_scsi_cmd_tmo(void *);
282 static void mpii_scsi_cmd_tmo_handler(struct work *, void *);
283 static void mpii_scsi_cmd_tmo_done(struct mpii_ccb *);
284
285 static int mpii_insert_dev(struct mpii_softc *, struct mpii_device *);
286 static int mpii_remove_dev(struct mpii_softc *, struct mpii_device *);
287 static struct mpii_device *
288 mpii_find_dev(struct mpii_softc *, u_int16_t);
289
290 static void mpii_start(struct mpii_softc *, struct mpii_ccb *);
291 static int mpii_poll(struct mpii_softc *, struct mpii_ccb *);
292 static void mpii_poll_done(struct mpii_ccb *);
293 static struct mpii_rcb *
294 mpii_reply(struct mpii_softc *, struct mpii_reply_descr *);
295
296 static void mpii_wait(struct mpii_softc *, struct mpii_ccb *);
297 static void mpii_wait_done(struct mpii_ccb *);
298
299 static void mpii_init_queues(struct mpii_softc *);
300
301 static int mpii_load_xs(struct mpii_ccb *);
302 static int mpii_load_xs_sas3(struct mpii_ccb *);
303
304 static u_int32_t mpii_read(struct mpii_softc *, bus_size_t);
305 static void mpii_write(struct mpii_softc *, bus_size_t, u_int32_t);
306 static int mpii_wait_eq(struct mpii_softc *, bus_size_t, u_int32_t,
307 u_int32_t);
308 static int mpii_wait_ne(struct mpii_softc *, bus_size_t, u_int32_t,
309 u_int32_t);
310
311 static int mpii_init(struct mpii_softc *);
312 static int mpii_reset_soft(struct mpii_softc *);
313 static int mpii_reset_hard(struct mpii_softc *);
314
315 static int mpii_handshake_send(struct mpii_softc *, void *, size_t);
316 static int mpii_handshake_recv_dword(struct mpii_softc *,
317 u_int32_t *);
318 static int mpii_handshake_recv(struct mpii_softc *, void *, size_t);
319
320 static void mpii_empty_done(struct mpii_ccb *);
321
322 static int mpii_iocinit(struct mpii_softc *);
323 static int mpii_iocfacts(struct mpii_softc *);
324 static int mpii_portfacts(struct mpii_softc *);
325 static int mpii_portenable(struct mpii_softc *);
326 static int mpii_cfg_coalescing(struct mpii_softc *);
327 static int mpii_board_info(struct mpii_softc *);
328 static int mpii_target_map(struct mpii_softc *);
329
330 static int mpii_eventnotify(struct mpii_softc *);
331 static void mpii_eventnotify_done(struct mpii_ccb *);
332 static void mpii_eventack(struct work *, void *);
333 static void mpii_eventack_done(struct mpii_ccb *);
334 static void mpii_event_process(struct mpii_softc *, struct mpii_rcb *);
335 static void mpii_event_done(struct mpii_softc *, struct mpii_rcb *);
336 static void mpii_event_sas(struct mpii_softc *, struct mpii_rcb *);
337 static void mpii_event_sas_work(struct work *, void *);
338 static void mpii_event_raid(struct mpii_softc *,
339 struct mpii_msg_event_reply *);
340 static void mpii_event_discovery(struct mpii_softc *,
341 struct mpii_msg_event_reply *);
342
343 static void mpii_sas_remove_device(struct mpii_softc *, u_int16_t);
344
345 static int mpii_req_cfg_header(struct mpii_softc *, u_int8_t,
346 u_int8_t, u_int32_t, int, void *);
347 static int mpii_req_cfg_page(struct mpii_softc *, u_int32_t, int,
348 void *, int, void *, size_t);
349
350 #if 0
351 int mpii_ioctl_cache(struct scsi_link *, u_long, struct dk_cache *);
352 #endif
353
354 #if NBIO > 0
355 static int mpii_ioctl(device_t, u_long, void *);
356 static int mpii_ioctl_inq(struct mpii_softc *, struct bioc_inq *);
357 static int mpii_ioctl_vol(struct mpii_softc *, struct bioc_vol *);
358 static int mpii_ioctl_disk(struct mpii_softc *, struct bioc_disk *);
359 static int mpii_bio_hs(struct mpii_softc *, struct bioc_disk *, int,
360 int, int *);
361 static int mpii_bio_disk(struct mpii_softc *, struct bioc_disk *,
362 u_int8_t);
363 static struct mpii_device *
364 mpii_find_vol(struct mpii_softc *, int);
365 #ifndef SMALL_KERNEL
366 static int mpii_bio_volstate(struct mpii_softc *, struct bioc_vol *);
367 static int mpii_create_sensors(struct mpii_softc *);
368 static void mpii_refresh_sensors(struct sysmon_envsys *, envsys_data_t *);
369 static int mpii_destroy_sensors(struct mpii_softc *);
370 #endif /* SMALL_KERNEL */
371 #endif /* NBIO > 0 */
372
373 #define DEVNAME(s) (device_xname((s)->sc_dev))
374
375 #define dwordsof(s) (sizeof(s) / sizeof(u_int32_t))
376
377 #define mpii_read_db(s) mpii_read((s), MPII_DOORBELL)
378 #define mpii_write_db(s, v) mpii_write((s), MPII_DOORBELL, (v))
379 #define mpii_read_intr(s) mpii_read((s), MPII_INTR_STATUS)
380 #define mpii_write_intr(s, v) mpii_write((s), MPII_INTR_STATUS, (v))
381 #define mpii_reply_waiting(s) ((mpii_read_intr((s)) & MPII_INTR_STATUS_REPLY)\
382 == MPII_INTR_STATUS_REPLY)
383
384 #define mpii_write_reply_free(s, v) \
385 bus_space_write_4((s)->sc_iot, (s)->sc_ioh, \
386 MPII_REPLY_FREE_HOST_INDEX, (v))
387 #define mpii_write_reply_post(s, v) \
388 bus_space_write_4((s)->sc_iot, (s)->sc_ioh, \
389 MPII_REPLY_POST_HOST_INDEX, (v))
390
391 #define mpii_wait_db_int(s) mpii_wait_ne((s), MPII_INTR_STATUS, \
392 MPII_INTR_STATUS_IOC2SYSDB, 0)
393 #define mpii_wait_db_ack(s) mpii_wait_eq((s), MPII_INTR_STATUS, \
394 MPII_INTR_STATUS_SYS2IOCDB, 0)
395
396 static inline void
mpii_dvatosge(struct mpii_sge * sge,u_int64_t dva)397 mpii_dvatosge(struct mpii_sge *sge, u_int64_t dva)
398 {
399 sge->sg_addr_lo = htole32(dva);
400 sge->sg_addr_hi = htole32(dva >> 32);
401 }
402
403 #define MPII_PG_EXTENDED (1<<0)
404 #define MPII_PG_POLL (1<<1)
405 #define MPII_PG_FMT "\020" "\002POLL" "\001EXTENDED"
406
407 static const struct mpii_pci_product {
408 pci_vendor_id_t mpii_vendor;
409 pci_product_id_t mpii_product;
410 } mpii_devices[] = {
411 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2004 },
412 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2008 },
413 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2108_3 },
414 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2108_4 },
415 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2108_5 },
416 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2116_1 },
417 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2116_2 },
418 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2208_1 },
419 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2208_2 },
420 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2208_3 },
421 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2208_4 },
422 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2208_5 },
423 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2208_6 },
424 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2308_1 },
425 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2308_2 },
426 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2308_3 },
427 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS3004 },
428 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS3008 },
429 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS3108_1 },
430 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS3108_2 },
431 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS3108_3 },
432 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS3108_4 },
433 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS3408 },
434 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS3416 },
435 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS3508 },
436 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS3508_1 },
437 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS3516 },
438 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS3516_1 },
439 { 0, 0}
440 };
441
442 static int
mpii_match(device_t parent,cfdata_t match,void * aux)443 mpii_match(device_t parent, cfdata_t match, void *aux)
444 {
445 struct pci_attach_args *pa = aux;
446 const struct mpii_pci_product *mpii;
447
448 for (mpii = mpii_devices; mpii->mpii_vendor != 0; mpii++) {
449 if (PCI_VENDOR(pa->pa_id) == mpii->mpii_vendor &&
450 PCI_PRODUCT(pa->pa_id) == mpii->mpii_product)
451 return (1);
452 }
453 return (0);
454 }
455
456 static void
mpii_attach(device_t parent,device_t self,void * aux)457 mpii_attach(device_t parent, device_t self, void *aux)
458 {
459 struct mpii_softc *sc = device_private(self);
460 struct pci_attach_args *pa = aux;
461 pcireg_t memtype;
462 int r;
463 struct mpii_ccb *ccb;
464 struct scsipi_adapter *adapt = &sc->sc_adapt;
465 struct scsipi_channel *chan = &sc->sc_chan;
466 char intrbuf[PCI_INTRSTR_LEN];
467 const char *intrstr;
468
469 pci_aprint_devinfo(pa, NULL);
470
471 sc->sc_pc = pa->pa_pc;
472 sc->sc_tag = pa->pa_tag;
473 sc->sc_dmat = pa->pa_dmat;
474 sc->sc_dev = self;
475
476 mutex_init(&sc->sc_req_mtx, MUTEX_DEFAULT, IPL_BIO);
477 mutex_init(&sc->sc_rep_mtx, MUTEX_DEFAULT, IPL_BIO);
478
479 /* find the appropriate memory base */
480 for (r = PCI_MAPREG_START; r < PCI_MAPREG_END; r += sizeof(memtype)) {
481 memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, r);
482 if (PCI_MAPREG_TYPE(memtype) == PCI_MAPREG_TYPE_MEM)
483 break;
484 }
485 if (r >= PCI_MAPREG_END) {
486 aprint_error_dev(self,
487 "unable to locate system interface registers\n");
488 return;
489 }
490
491 if (pci_mapreg_map(pa, r, memtype, 0, &sc->sc_iot, &sc->sc_ioh,
492 NULL, &sc->sc_ios) != 0) {
493 aprint_error_dev(self,
494 "unable to map system interface registers\n");
495 return;
496 }
497
498 /* disable the expansion rom */
499 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_MAPREG_ROM,
500 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_MAPREG_ROM) &
501 ~PCI_MAPREG_ROM_ENABLE);
502
503 /* disable interrupts */
504 mpii_write(sc, MPII_INTR_MASK,
505 MPII_INTR_MASK_RESET | MPII_INTR_MASK_REPLY |
506 MPII_INTR_MASK_DOORBELL);
507
508 /* hook up the interrupt */
509 if (pci_intr_alloc(pa, &sc->sc_pihp, NULL, 0)) {
510 aprint_error_dev(self, "unable to map interrupt\n");
511 goto unmap;
512 }
513 intrstr = pci_intr_string(pa->pa_pc, sc->sc_pihp[0],
514 intrbuf, sizeof(intrbuf));
515 pci_intr_setattr(pa->pa_pc, &sc->sc_pihp[0], PCI_INTR_MPSAFE, true);
516 sc->sc_ih = pci_intr_establish_xname(pa->pa_pc, sc->sc_pihp[0], IPL_BIO,
517 mpii_intr, sc, device_xname(self));
518 if (sc->sc_ih == NULL) {
519 aprint_error_dev(self, "couldn't establish interrupt");
520 if (intrstr != NULL)
521 aprint_error(" at %s", intrstr);
522 aprint_error("\n");
523 return;
524 }
525 aprint_normal_dev(self, "interrupting at %s\n", intrstr);
526 aprint_naive("\n");
527
528 if (mpii_iocfacts(sc) != 0) {
529 aprint_error_dev(self, "unable to get iocfacts\n");
530 goto unmap;
531 }
532
533 if (mpii_init(sc) != 0) {
534 aprint_error_dev(self, "unable to initialize ioc\n");
535 goto unmap;
536 }
537
538 if (mpii_alloc_ccbs(sc) != 0) {
539 /* error already printed */
540 goto unmap;
541 }
542
543 if (mpii_alloc_replies(sc) != 0) {
544 aprint_error_dev(self, "unable to allocated reply space\n");
545 goto free_ccbs;
546 }
547
548 if (mpii_alloc_queues(sc) != 0) {
549 aprint_error_dev(self, "unable to allocate reply queues\n");
550 goto free_replies;
551 }
552
553 if (mpii_iocinit(sc) != 0) {
554 aprint_error_dev(self, "unable to send iocinit\n");
555 goto free_queues;
556 }
557
558 if (mpii_wait_eq(sc, MPII_DOORBELL, MPII_DOORBELL_STATE,
559 MPII_DOORBELL_STATE_OPER) != 0) {
560 aprint_error_dev(self, "state: 0x%08x\n",
561 mpii_read_db(sc) & MPII_DOORBELL_STATE);
562 aprint_error_dev(self, "operational state timeout\n");
563 goto free_queues;
564 }
565
566 mpii_push_replies(sc);
567 mpii_init_queues(sc);
568
569 if (mpii_board_info(sc) != 0) {
570 aprint_error_dev(self, "unable to get manufacturing page 0\n");
571 goto free_queues;
572 }
573
574 if (mpii_portfacts(sc) != 0) {
575 aprint_error_dev(self, "unable to get portfacts\n");
576 goto free_queues;
577 }
578
579 if (mpii_target_map(sc) != 0) {
580 aprint_error_dev(self, "unable to setup target mappings\n");
581 goto free_queues;
582 }
583
584 if (mpii_cfg_coalescing(sc) != 0) {
585 aprint_error_dev(self, "unable to configure coalescing\n");
586 goto free_queues;
587 }
588
589 /* XXX bail on unsupported porttype? */
590 if ((sc->sc_porttype == MPII_PORTFACTS_PORTTYPE_SAS_PHYSICAL) ||
591 (sc->sc_porttype == MPII_PORTFACTS_PORTTYPE_SAS_VIRTUAL) ||
592 (sc->sc_porttype == MPII_PORTFACTS_PORTTYPE_TRI_MODE)) {
593 if (mpii_eventnotify(sc) != 0) {
594 aprint_error_dev(self, "unable to enable events\n");
595 goto free_queues;
596 }
597 }
598
599 mutex_init(&sc->sc_devs_mtx, MUTEX_DEFAULT, IPL_BIO);
600 sc->sc_devs = malloc(sc->sc_max_devices * sizeof(struct mpii_device *),
601 M_DEVBUF, M_WAITOK | M_ZERO);
602
603 if (mpii_portenable(sc) != 0) {
604 aprint_error_dev(self, "unable to enable port\n");
605 goto free_devs;
606 }
607
608 /* we should be good to go now, attach scsibus */
609 memset(adapt, 0, sizeof(*adapt));
610 adapt->adapt_dev = sc->sc_dev;
611 adapt->adapt_nchannels = 1;
612 adapt->adapt_openings = sc->sc_max_cmds - 4;
613 adapt->adapt_max_periph = adapt->adapt_openings;
614 adapt->adapt_request = mpii_scsipi_request;
615 adapt->adapt_minphys = minphys;
616 adapt->adapt_flags = SCSIPI_ADAPT_MPSAFE;
617
618 memset(chan, 0, sizeof(*chan));
619 chan->chan_adapter = adapt;
620 chan->chan_bustype = &scsi_sas_bustype;
621 chan->chan_channel = 0;
622 chan->chan_flags = 0;
623 chan->chan_nluns = 8;
624 chan->chan_ntargets = sc->sc_max_devices;
625 chan->chan_id = -1;
626
627 mpii_rescan(self, NULL, NULL);
628
629 /* enable interrupts */
630 mpii_write(sc, MPII_INTR_MASK, MPII_INTR_MASK_DOORBELL
631 | MPII_INTR_MASK_RESET);
632
633 #if NBIO > 0
634 if (ISSET(sc->sc_flags, MPII_F_RAID)) {
635 if (bio_register(sc->sc_dev, mpii_ioctl) != 0)
636 panic("%s: controller registration failed",
637 DEVNAME(sc));
638 if (mpii_create_sensors(sc) != 0)
639 aprint_error_dev(self, "unable to create sensors\n");
640 }
641 #endif
642
643 return;
644
645 free_devs:
646 free(sc->sc_devs, M_DEVBUF);
647 sc->sc_devs = NULL;
648
649 free_queues:
650 bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_reply_freeq),
651 0, sc->sc_reply_free_qdepth * 4, BUS_DMASYNC_POSTREAD);
652 mpii_dmamem_free(sc, sc->sc_reply_freeq);
653
654 bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_reply_postq),
655 0, sc->sc_reply_post_qdepth * 8, BUS_DMASYNC_POSTREAD);
656 mpii_dmamem_free(sc, sc->sc_reply_postq);
657
658 free_replies:
659 bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_replies),
660 0, PAGE_SIZE, BUS_DMASYNC_POSTREAD);
661 mpii_dmamem_free(sc, sc->sc_replies);
662
663 free_ccbs:
664 while ((ccb = mpii_get_ccb(sc)) != NULL)
665 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
666 mpii_dmamem_free(sc, sc->sc_requests);
667 free(sc->sc_ccbs, M_DEVBUF);
668
669 unmap:
670 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
671 sc->sc_ios = 0;
672 }
673
674 static int
mpii_detach(device_t self,int flags)675 mpii_detach(device_t self, int flags)
676 {
677 struct mpii_softc *sc = device_private(self);
678 int error;
679 struct mpii_ccb *ccb;
680
681 if ((error = config_detach_children(sc->sc_dev, flags)) != 0)
682 return error;
683
684 #if NBIO > 0
685 mpii_destroy_sensors(sc);
686 bio_unregister(sc->sc_dev);
687 #endif /* NBIO > 0 */
688
689 if (sc->sc_ih != NULL) {
690 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
691 sc->sc_ih = NULL;
692 }
693 if (sc->sc_ios != 0) {
694 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
695 free(sc->sc_devs, M_DEVBUF);
696 sc->sc_devs = NULL;
697
698 bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_reply_freeq),
699 0, sc->sc_reply_free_qdepth * 4, BUS_DMASYNC_POSTREAD);
700 mpii_dmamem_free(sc, sc->sc_reply_freeq);
701
702 bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_reply_postq),
703 0, sc->sc_reply_post_qdepth * 8, BUS_DMASYNC_POSTREAD);
704 mpii_dmamem_free(sc, sc->sc_reply_postq);
705
706 bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_replies),
707 0, PAGE_SIZE, BUS_DMASYNC_POSTREAD);
708 mpii_dmamem_free(sc, sc->sc_replies);
709
710 while ((ccb = mpii_get_ccb(sc)) != NULL)
711 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
712 mpii_dmamem_free(sc, sc->sc_requests);
713 free(sc->sc_ccbs, M_DEVBUF);
714
715 sc->sc_ios = 0;
716 }
717
718 return (0);
719 }
720
721 static int
mpii_rescan(device_t self,const char * ifattr,const int * locators)722 mpii_rescan(device_t self, const char *ifattr, const int *locators)
723 {
724 struct mpii_softc *sc = device_private(self);
725
726 if (sc->sc_child != NULL)
727 return 0;
728
729 sc->sc_child = config_found(self, &sc->sc_chan, scsiprint, CFARGS_NONE);
730
731 return 0;
732 }
733
734 static void
mpii_childdetached(device_t self,device_t child)735 mpii_childdetached(device_t self, device_t child)
736 {
737 struct mpii_softc *sc = device_private(self);
738
739 KASSERT(self == sc->sc_dev);
740 KASSERT(child == sc->sc_child);
741
742 if (child == sc->sc_child)
743 sc->sc_child = NULL;
744 }
745
746
747 static int
mpii_intr(void * arg)748 mpii_intr(void *arg)
749 {
750 struct mpii_rcb_list evts = SIMPLEQ_HEAD_INITIALIZER(evts);
751 struct mpii_ccb_list ccbs = SIMPLEQ_HEAD_INITIALIZER(ccbs);
752 struct mpii_softc *sc = arg;
753 struct mpii_reply_descr *postq = sc->sc_reply_postq_kva, *rdp;
754 struct mpii_ccb *ccb;
755 struct mpii_rcb *rcb;
756 int smid;
757 u_int idx;
758 int rv = 0;
759
760 mutex_enter(&sc->sc_rep_mtx);
761 bus_dmamap_sync(sc->sc_dmat,
762 MPII_DMA_MAP(sc->sc_reply_postq),
763 0, sc->sc_reply_post_qdepth * sizeof(*rdp),
764 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
765
766 idx = sc->sc_reply_post_host_index;
767 for (;;) {
768 rdp = &postq[idx];
769 if ((rdp->reply_flags & MPII_REPLY_DESCR_TYPE_MASK) ==
770 MPII_REPLY_DESCR_UNUSED)
771 break;
772 if (rdp->data == 0xffffffff) {
773 /*
774 * ioc is still writing to the reply post queue
775 * race condition - bail!
776 */
777 break;
778 }
779
780 smid = le16toh(rdp->smid);
781 rcb = mpii_reply(sc, rdp);
782
783 if (smid) {
784 ccb = &sc->sc_ccbs[smid - 1];
785 ccb->ccb_state = MPII_CCB_READY;
786 ccb->ccb_rcb = rcb;
787 SIMPLEQ_INSERT_TAIL(&ccbs, ccb, ccb_link);
788 } else
789 SIMPLEQ_INSERT_TAIL(&evts, rcb, rcb_link);
790
791 if (++idx >= sc->sc_reply_post_qdepth)
792 idx = 0;
793
794 rv = 1;
795 }
796
797 bus_dmamap_sync(sc->sc_dmat,
798 MPII_DMA_MAP(sc->sc_reply_postq),
799 0, sc->sc_reply_post_qdepth * sizeof(*rdp),
800 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
801
802 if (rv)
803 mpii_write_reply_post(sc, sc->sc_reply_post_host_index = idx);
804
805 mutex_exit(&sc->sc_rep_mtx);
806
807 if (rv == 0)
808 return (0);
809
810 while ((ccb = SIMPLEQ_FIRST(&ccbs)) != NULL) {
811 SIMPLEQ_REMOVE_HEAD(&ccbs, ccb_link);
812 ccb->ccb_done(ccb);
813 }
814 while ((rcb = SIMPLEQ_FIRST(&evts)) != NULL) {
815 SIMPLEQ_REMOVE_HEAD(&evts, rcb_link);
816 mpii_event_process(sc, rcb);
817 }
818
819 return (1);
820 }
821
822 static int
mpii_load_xs_sas3(struct mpii_ccb * ccb)823 mpii_load_xs_sas3(struct mpii_ccb *ccb)
824 {
825 struct mpii_softc *sc = ccb->ccb_sc;
826 struct scsipi_xfer *xs = ccb->ccb_cookie;
827 struct mpii_msg_scsi_io *io = ccb->ccb_cmd;
828 struct mpii_ieee_sge *csge, *nsge, *sge;
829 bus_dmamap_t dmap = ccb->ccb_dmamap;
830 int i, error;
831
832 /* Request frame structure is described in the mpii_iocfacts */
833 nsge = (struct mpii_ieee_sge *)(io + 1);
834 csge = nsge + sc->sc_chain_sge;
835
836 /* zero length transfer still requires an SGE */
837 if (xs->datalen == 0) {
838 nsge->sg_flags = MPII_IEEE_SGE_END_OF_LIST;
839 return (0);
840 }
841
842 error = bus_dmamap_load(sc->sc_dmat, dmap, xs->data, xs->datalen, NULL,
843 (xs->xs_control & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
844 if (error) {
845 printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
846 return (1);
847 }
848
849 sge = nsge;
850 for (i = 0; i < dmap->dm_nsegs; i++, nsge++) {
851 if (nsge == csge) {
852 nsge++;
853 /* offset to the chain sge from the beginning */
854 io->chain_offset = ((uintptr_t)csge - (uintptr_t)io) / 4;
855 csge->sg_flags = MPII_IEEE_SGE_CHAIN_ELEMENT |
856 MPII_IEEE_SGE_ADDR_SYSTEM;
857 /* address of the next sge */
858 csge->sg_addr = htole64(ccb->ccb_cmd_dva +
859 ((uintptr_t)nsge - (uintptr_t)io));
860 csge->sg_len = htole32((dmap->dm_nsegs - i) *
861 sizeof(*sge));
862 }
863
864 sge = nsge;
865 sge->sg_flags = MPII_IEEE_SGE_ADDR_SYSTEM;
866 sge->sg_len = htole32(dmap->dm_segs[i].ds_len);
867 sge->sg_addr = htole64(dmap->dm_segs[i].ds_addr);
868 }
869
870 /* terminate list */
871 sge->sg_flags |= MPII_IEEE_SGE_END_OF_LIST;
872
873 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
874 (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_PREREAD :
875 BUS_DMASYNC_PREWRITE);
876
877 return (0);
878 }
879
880 static int
mpii_load_xs(struct mpii_ccb * ccb)881 mpii_load_xs(struct mpii_ccb *ccb)
882 {
883 struct mpii_softc *sc = ccb->ccb_sc;
884 struct scsipi_xfer *xs = ccb->ccb_cookie;
885 struct mpii_msg_scsi_io *io = ccb->ccb_cmd;
886 struct mpii_sge *csge, *nsge, *sge;
887 bus_dmamap_t dmap = ccb->ccb_dmamap;
888 u_int32_t flags;
889 u_int16_t len;
890 int i, error;
891
892 /* Request frame structure is described in the mpii_iocfacts */
893 nsge = (struct mpii_sge *)(io + 1);
894 csge = nsge + sc->sc_chain_sge;
895
896 /* zero length transfer still requires an SGE */
897 if (xs->datalen == 0) {
898 nsge->sg_hdr = htole32(MPII_SGE_FL_TYPE_SIMPLE |
899 MPII_SGE_FL_LAST | MPII_SGE_FL_EOB | MPII_SGE_FL_EOL);
900 return (0);
901 }
902
903 error = bus_dmamap_load(sc->sc_dmat, dmap, xs->data, xs->datalen, NULL,
904 (xs->xs_control & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
905 if (error) {
906 printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
907 return (1);
908 }
909
910 /* safe default starting flags */
911 flags = MPII_SGE_FL_TYPE_SIMPLE | MPII_SGE_FL_SIZE_64;
912 if (xs->xs_control & XS_CTL_DATA_OUT)
913 flags |= MPII_SGE_FL_DIR_OUT;
914
915 sge = nsge;
916 for (i = 0; i < dmap->dm_nsegs; i++, nsge++) {
917 if (nsge == csge) {
918 nsge++;
919 /* offset to the chain sge from the beginning */
920 io->chain_offset = ((uintptr_t)csge - (uintptr_t)io) / 4;
921 /* length of the sgl segment we're pointing to */
922 len = (dmap->dm_nsegs - i) * sizeof(*sge);
923 csge->sg_hdr = htole32(MPII_SGE_FL_TYPE_CHAIN |
924 MPII_SGE_FL_SIZE_64 | len);
925 /* address of the next sge */
926 mpii_dvatosge(csge, ccb->ccb_cmd_dva +
927 ((uintptr_t)nsge - (uintptr_t)io));
928 }
929
930 sge = nsge;
931 sge->sg_hdr = htole32(flags | dmap->dm_segs[i].ds_len);
932 mpii_dvatosge(sge, dmap->dm_segs[i].ds_addr);
933 }
934
935 /* terminate list */
936 sge->sg_hdr |= htole32(MPII_SGE_FL_LAST | MPII_SGE_FL_EOB |
937 MPII_SGE_FL_EOL);
938
939 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
940 (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_PREREAD :
941 BUS_DMASYNC_PREWRITE);
942
943 return (0);
944 }
945
946 static u_int32_t
mpii_read(struct mpii_softc * sc,bus_size_t r)947 mpii_read(struct mpii_softc *sc, bus_size_t r)
948 {
949 u_int32_t rv;
950
951 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
952 BUS_SPACE_BARRIER_READ);
953 rv = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r);
954
955 DNPRINTF(MPII_D_RW, "%s: mpii_read %#lx %#x\n", DEVNAME(sc), r, rv);
956
957 return (rv);
958 }
959
960 static void
mpii_write(struct mpii_softc * sc,bus_size_t r,u_int32_t v)961 mpii_write(struct mpii_softc *sc, bus_size_t r, u_int32_t v)
962 {
963 DNPRINTF(MPII_D_RW, "%s: mpii_write %#lx %#x\n", DEVNAME(sc), r, v);
964
965 bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
966 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
967 BUS_SPACE_BARRIER_WRITE);
968 }
969
970
971 static int
mpii_wait_eq(struct mpii_softc * sc,bus_size_t r,u_int32_t mask,u_int32_t target)972 mpii_wait_eq(struct mpii_softc *sc, bus_size_t r, u_int32_t mask,
973 u_int32_t target)
974 {
975 int i;
976
977 DNPRINTF(MPII_D_RW, "%s: mpii_wait_eq %#lx %#x %#x\n", DEVNAME(sc), r,
978 mask, target);
979
980 for (i = 0; i < 15000; i++) {
981 if ((mpii_read(sc, r) & mask) == target)
982 return (0);
983 delay(1000);
984 }
985
986 return (1);
987 }
988
989 static int
mpii_wait_ne(struct mpii_softc * sc,bus_size_t r,u_int32_t mask,u_int32_t target)990 mpii_wait_ne(struct mpii_softc *sc, bus_size_t r, u_int32_t mask,
991 u_int32_t target)
992 {
993 int i;
994
995 DNPRINTF(MPII_D_RW, "%s: mpii_wait_ne %#lx %#x %#x\n", DEVNAME(sc), r,
996 mask, target);
997
998 for (i = 0; i < 15000; i++) {
999 if ((mpii_read(sc, r) & mask) != target)
1000 return (0);
1001 delay(1000);
1002 }
1003
1004 return (1);
1005 }
1006
1007 static int
mpii_init(struct mpii_softc * sc)1008 mpii_init(struct mpii_softc *sc)
1009 {
1010 u_int32_t db;
1011 int i;
1012
1013 /* spin until the ioc leaves the reset state */
1014 if (mpii_wait_ne(sc, MPII_DOORBELL, MPII_DOORBELL_STATE,
1015 MPII_DOORBELL_STATE_RESET) != 0) {
1016 DNPRINTF(MPII_D_MISC, "%s: mpii_init timeout waiting to leave "
1017 "reset state\n", DEVNAME(sc));
1018 return (1);
1019 }
1020
1021 /* check current ownership */
1022 db = mpii_read_db(sc);
1023 if ((db & MPII_DOORBELL_WHOINIT) == MPII_DOORBELL_WHOINIT_PCIPEER) {
1024 DNPRINTF(MPII_D_MISC, "%s: mpii_init initialised by pci peer\n",
1025 DEVNAME(sc));
1026 return (0);
1027 }
1028
1029 for (i = 0; i < 5; i++) {
1030 switch (db & MPII_DOORBELL_STATE) {
1031 case MPII_DOORBELL_STATE_READY:
1032 DNPRINTF(MPII_D_MISC, "%s: mpii_init ioc is ready\n",
1033 DEVNAME(sc));
1034 return (0);
1035
1036 case MPII_DOORBELL_STATE_OPER:
1037 DNPRINTF(MPII_D_MISC, "%s: mpii_init ioc is oper\n",
1038 DEVNAME(sc));
1039 if (sc->sc_ioc_event_replay)
1040 mpii_reset_soft(sc);
1041 else
1042 mpii_reset_hard(sc);
1043 break;
1044
1045 case MPII_DOORBELL_STATE_FAULT:
1046 DNPRINTF(MPII_D_MISC, "%s: mpii_init ioc is being "
1047 "reset hard\n" , DEVNAME(sc));
1048 mpii_reset_hard(sc);
1049 break;
1050
1051 case MPII_DOORBELL_STATE_RESET:
1052 DNPRINTF(MPII_D_MISC, "%s: mpii_init waiting to come "
1053 "out of reset\n", DEVNAME(sc));
1054 if (mpii_wait_ne(sc, MPII_DOORBELL, MPII_DOORBELL_STATE,
1055 MPII_DOORBELL_STATE_RESET) != 0)
1056 return (1);
1057 break;
1058 }
1059 db = mpii_read_db(sc);
1060 }
1061
1062 return (1);
1063 }
1064
1065 static int
mpii_reset_soft(struct mpii_softc * sc)1066 mpii_reset_soft(struct mpii_softc *sc)
1067 {
1068 DNPRINTF(MPII_D_MISC, "%s: mpii_reset_soft\n", DEVNAME(sc));
1069
1070 if (mpii_read_db(sc) & MPII_DOORBELL_INUSE) {
1071 return (1);
1072 }
1073
1074 mpii_write_db(sc,
1075 MPII_DOORBELL_FUNCTION(MPII_FUNCTION_IOC_MESSAGE_UNIT_RESET));
1076
1077 /* XXX LSI waits 15 sec */
1078 if (mpii_wait_db_ack(sc) != 0)
1079 return (1);
1080
1081 /* XXX LSI waits 15 sec */
1082 if (mpii_wait_eq(sc, MPII_DOORBELL, MPII_DOORBELL_STATE,
1083 MPII_DOORBELL_STATE_READY) != 0)
1084 return (1);
1085
1086 /* XXX wait for Sys2IOCDB bit to clear in HIS?? */
1087
1088 return (0);
1089 }
1090
1091 static int
mpii_reset_hard(struct mpii_softc * sc)1092 mpii_reset_hard(struct mpii_softc *sc)
1093 {
1094 u_int16_t i;
1095
1096 DNPRINTF(MPII_D_MISC, "%s: mpii_reset_hard\n", DEVNAME(sc));
1097
1098 mpii_write_intr(sc, 0);
1099
1100 /* enable diagnostic register */
1101 mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_FLUSH);
1102 mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_1);
1103 mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_2);
1104 mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_3);
1105 mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_4);
1106 mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_5);
1107 mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_6);
1108
1109 delay(100);
1110
1111 if ((mpii_read(sc, MPII_HOSTDIAG) & MPII_HOSTDIAG_DWRE) == 0) {
1112 DNPRINTF(MPII_D_MISC, "%s: mpii_reset_hard failure to enable "
1113 "diagnostic read/write\n", DEVNAME(sc));
1114 return(1);
1115 }
1116
1117 /* reset ioc */
1118 mpii_write(sc, MPII_HOSTDIAG, MPII_HOSTDIAG_RESET_ADAPTER);
1119
1120 /* 240 milliseconds */
1121 delay(240000);
1122
1123
1124 /* XXX this whole function should be more robust */
1125
1126 /* XXX read the host diagnostic reg until reset adapter bit clears ? */
1127 for (i = 0; i < 30000; i++) {
1128 if ((mpii_read(sc, MPII_HOSTDIAG) &
1129 MPII_HOSTDIAG_RESET_ADAPTER) == 0)
1130 break;
1131 delay(10000);
1132 }
1133
1134 /* disable diagnostic register */
1135 mpii_write(sc, MPII_WRITESEQ, 0xff);
1136
1137 /* XXX what else? */
1138
1139 DNPRINTF(MPII_D_MISC, "%s: done with mpii_reset_hard\n", DEVNAME(sc));
1140
1141 return(0);
1142 }
1143
1144 static int
mpii_handshake_send(struct mpii_softc * sc,void * buf,size_t dwords)1145 mpii_handshake_send(struct mpii_softc *sc, void *buf, size_t dwords)
1146 {
1147 u_int32_t *query = buf;
1148 int i;
1149
1150 /* make sure the doorbell is not in use. */
1151 if (mpii_read_db(sc) & MPII_DOORBELL_INUSE)
1152 return (1);
1153
1154 /* clear pending doorbell interrupts */
1155 if (mpii_read_intr(sc) & MPII_INTR_STATUS_IOC2SYSDB)
1156 mpii_write_intr(sc, 0);
1157
1158 /*
1159 * first write the doorbell with the handshake function and the
1160 * dword count.
1161 */
1162 mpii_write_db(sc, MPII_DOORBELL_FUNCTION(MPII_FUNCTION_HANDSHAKE) |
1163 MPII_DOORBELL_DWORDS(dwords));
1164
1165 /*
1166 * the doorbell used bit will be set because a doorbell function has
1167 * started. wait for the interrupt and then ack it.
1168 */
1169 if (mpii_wait_db_int(sc) != 0)
1170 return (1);
1171 mpii_write_intr(sc, 0);
1172
1173 /* poll for the acknowledgement. */
1174 if (mpii_wait_db_ack(sc) != 0)
1175 return (1);
1176
1177 /* write the query through the doorbell. */
1178 for (i = 0; i < dwords; i++) {
1179 mpii_write_db(sc, htole32(query[i]));
1180 if (mpii_wait_db_ack(sc) != 0)
1181 return (1);
1182 }
1183
1184 return (0);
1185 }
1186
1187 static int
mpii_handshake_recv_dword(struct mpii_softc * sc,u_int32_t * dword)1188 mpii_handshake_recv_dword(struct mpii_softc *sc, u_int32_t *dword)
1189 {
1190 u_int16_t *words = (u_int16_t *)dword;
1191 int i;
1192
1193 for (i = 0; i < 2; i++) {
1194 if (mpii_wait_db_int(sc) != 0)
1195 return (1);
1196 words[i] = le16toh(mpii_read_db(sc) & MPII_DOORBELL_DATA_MASK);
1197 mpii_write_intr(sc, 0);
1198 }
1199
1200 return (0);
1201 }
1202
1203 static int
mpii_handshake_recv(struct mpii_softc * sc,void * buf,size_t dwords)1204 mpii_handshake_recv(struct mpii_softc *sc, void *buf, size_t dwords)
1205 {
1206 struct mpii_msg_reply *reply = buf;
1207 u_int32_t *dbuf = buf, dummy;
1208 int i;
1209
1210 /* get the first dword so we can read the length out of the header. */
1211 if (mpii_handshake_recv_dword(sc, &dbuf[0]) != 0)
1212 return (1);
1213
1214 DNPRINTF(MPII_D_CMD, "%s: mpii_handshake_recv dwords: %lu reply: %d\n",
1215 DEVNAME(sc), dwords, reply->msg_length);
1216
1217 /*
1218 * the total length, in dwords, is in the message length field of the
1219 * reply header.
1220 */
1221 for (i = 1; i < MIN(dwords, reply->msg_length); i++) {
1222 if (mpii_handshake_recv_dword(sc, &dbuf[i]) != 0)
1223 return (1);
1224 }
1225
1226 /* if there's extra stuff to come off the ioc, discard it */
1227 while (i++ < reply->msg_length) {
1228 if (mpii_handshake_recv_dword(sc, &dummy) != 0)
1229 return (1);
1230 DNPRINTF(MPII_D_CMD, "%s: mpii_handshake_recv dummy read: "
1231 "0x%08x\n", DEVNAME(sc), dummy);
1232 }
1233
1234 /* wait for the doorbell used bit to be reset and clear the intr */
1235 if (mpii_wait_db_int(sc) != 0)
1236 return (1);
1237
1238 if (mpii_wait_eq(sc, MPII_DOORBELL, MPII_DOORBELL_INUSE, 0) != 0)
1239 return (1);
1240
1241 mpii_write_intr(sc, 0);
1242
1243 return (0);
1244 }
1245
1246 static void
mpii_empty_done(struct mpii_ccb * ccb)1247 mpii_empty_done(struct mpii_ccb *ccb)
1248 {
1249 /* nothing to do */
1250 }
1251
1252 static int
mpii_iocfacts(struct mpii_softc * sc)1253 mpii_iocfacts(struct mpii_softc *sc)
1254 {
1255 struct mpii_msg_iocfacts_request ifq;
1256 struct mpii_msg_iocfacts_reply ifp;
1257 int irs;
1258 int sge_size;
1259 u_int qdepth;
1260
1261 DNPRINTF(MPII_D_MISC, "%s: mpii_iocfacts\n", DEVNAME(sc));
1262
1263 memset(&ifq, 0, sizeof(ifq));
1264 memset(&ifp, 0, sizeof(ifp));
1265
1266 ifq.function = MPII_FUNCTION_IOC_FACTS;
1267
1268 if (mpii_handshake_send(sc, &ifq, dwordsof(ifq)) != 0) {
1269 DNPRINTF(MPII_D_MISC, "%s: mpii_iocfacts send failed\n",
1270 DEVNAME(sc));
1271 return (1);
1272 }
1273
1274 if (mpii_handshake_recv(sc, &ifp, dwordsof(ifp)) != 0) {
1275 DNPRINTF(MPII_D_MISC, "%s: mpii_iocfacts recv failed\n",
1276 DEVNAME(sc));
1277 return (1);
1278 }
1279
1280 sc->sc_ioc_number = ifp.ioc_number;
1281 sc->sc_vf_id = ifp.vf_id;
1282
1283 sc->sc_max_volumes = ifp.max_volumes;
1284 sc->sc_max_devices = ifp.max_volumes + le16toh(ifp.max_targets);
1285
1286 if (ISSET(le32toh(ifp.ioc_capabilities),
1287 MPII_IOCFACTS_CAPABILITY_INTEGRATED_RAID))
1288 SET(sc->sc_flags, MPII_F_RAID);
1289 if (ISSET(le32toh(ifp.ioc_capabilities),
1290 MPII_IOCFACTS_CAPABILITY_EVENT_REPLAY))
1291 sc->sc_ioc_event_replay = 1;
1292
1293 sc->sc_max_cmds = MIN(le16toh(ifp.request_credit),
1294 MPII_REQUEST_CREDIT);
1295
1296 /* SAS3 and 3.5 controllers have different sgl layouts */
1297 if (ifp.msg_version_maj == 2 && ((ifp.msg_version_min == 5)
1298 || (ifp.msg_version_min == 6)))
1299 SET(sc->sc_flags, MPII_F_SAS3);
1300
1301 /*
1302 * The host driver must ensure that there is at least one
1303 * unused entry in the Reply Free Queue. One way to ensure
1304 * that this requirement is met is to never allocate a number
1305 * of reply frames that is a multiple of 16.
1306 */
1307 sc->sc_num_reply_frames = sc->sc_max_cmds + 32;
1308 if (!(sc->sc_num_reply_frames % 16))
1309 sc->sc_num_reply_frames--;
1310
1311 /* must be multiple of 16 */
1312 sc->sc_reply_post_qdepth = sc->sc_max_cmds +
1313 sc->sc_num_reply_frames;
1314 sc->sc_reply_post_qdepth += 16 - (sc->sc_reply_post_qdepth % 16);
1315
1316 qdepth = le16toh(ifp.max_reply_descriptor_post_queue_depth);
1317 if (sc->sc_reply_post_qdepth > qdepth) {
1318 sc->sc_reply_post_qdepth = qdepth;
1319 if (sc->sc_reply_post_qdepth < 16) {
1320 printf("%s: RDPQ is too shallow\n", DEVNAME(sc));
1321 return (1);
1322 }
1323 sc->sc_max_cmds = sc->sc_reply_post_qdepth / 2 - 4;
1324 sc->sc_num_reply_frames = sc->sc_max_cmds + 4;
1325 }
1326
1327 sc->sc_reply_free_qdepth = sc->sc_num_reply_frames +
1328 16 - (sc->sc_num_reply_frames % 16);
1329
1330 /*
1331 * Our request frame for an I/O operation looks like this:
1332 *
1333 * +-------------------+ -.
1334 * | mpii_msg_scsi_io | |
1335 * +-------------------| |
1336 * | mpii_sge | |
1337 * + - - - - - - - - - + |
1338 * | ... | > ioc_request_frame_size
1339 * + - - - - - - - - - + |
1340 * | mpii_sge (tail) | |
1341 * + - - - - - - - - - + |
1342 * | mpii_sge (csge) | | --.
1343 * + - - - - - - - - - + -' | chain sge points to the next sge
1344 * | mpii_sge |<-----'
1345 * + - - - - - - - - - +
1346 * | ... |
1347 * + - - - - - - - - - +
1348 * | mpii_sge (tail) |
1349 * +-------------------+
1350 * | |
1351 * ~~~~~~~~~~~~~~~~~~~~~
1352 * | |
1353 * +-------------------+ <- sc_request_size - sizeof(scsi_sense_data)
1354 * | scsi_sense_data |
1355 * +-------------------+
1356 */
1357
1358 /* both sizes are in 32-bit words */
1359 sc->sc_reply_size = ifp.reply_frame_size * 4;
1360 irs = le16toh(ifp.ioc_request_frame_size) * 4;
1361 sc->sc_request_size = MPII_REQUEST_SIZE;
1362 /* make sure we have enough space for scsi sense data */
1363 if (irs > sc->sc_request_size) {
1364 sc->sc_request_size = irs + sizeof(struct scsi_sense_data);
1365 sc->sc_request_size += 16 - (sc->sc_request_size % 16);
1366 }
1367
1368 if (ISSET(sc->sc_flags, MPII_F_SAS3)) {
1369 sge_size = sizeof(struct mpii_ieee_sge);
1370 } else {
1371 sge_size = sizeof(struct mpii_sge);
1372 }
1373
1374 /* offset to the chain sge */
1375 sc->sc_chain_sge = (irs - sizeof(struct mpii_msg_scsi_io)) /
1376 sge_size - 1;
1377
1378 /*
1379 * A number of simple scatter-gather elements we can fit into the
1380 * request buffer after the I/O command minus the chain element.
1381 */
1382 sc->sc_max_sgl = (sc->sc_request_size -
1383 sizeof(struct mpii_msg_scsi_io) - sizeof(struct scsi_sense_data)) /
1384 sge_size - 1;
1385
1386 return (0);
1387 }
1388
1389 static int
mpii_iocinit(struct mpii_softc * sc)1390 mpii_iocinit(struct mpii_softc *sc)
1391 {
1392 struct mpii_msg_iocinit_request iiq;
1393 struct mpii_msg_iocinit_reply iip;
1394
1395 DNPRINTF(MPII_D_MISC, "%s: mpii_iocinit\n", DEVNAME(sc));
1396
1397 memset(&iiq, 0, sizeof(iiq));
1398 memset(&iip, 0, sizeof(iip));
1399
1400 iiq.function = MPII_FUNCTION_IOC_INIT;
1401 iiq.whoinit = MPII_WHOINIT_HOST_DRIVER;
1402
1403 /* XXX JPG do something about vf_id */
1404 iiq.vf_id = 0;
1405
1406 iiq.msg_version_maj = 0x02;
1407 iiq.msg_version_min = 0x00;
1408
1409 /* XXX JPG ensure compliance with some level and hard-code? */
1410 iiq.hdr_version_unit = 0x00;
1411 iiq.hdr_version_dev = 0x00;
1412
1413 iiq.system_request_frame_size = htole16(sc->sc_request_size / 4);
1414
1415 iiq.reply_descriptor_post_queue_depth =
1416 htole16(sc->sc_reply_post_qdepth);
1417
1418 iiq.reply_free_queue_depth = htole16(sc->sc_reply_free_qdepth);
1419
1420 iiq.sense_buffer_address_high =
1421 htole32(MPII_DMA_DVA(sc->sc_requests) >> 32);
1422
1423 iiq.system_reply_address_high =
1424 htole32(MPII_DMA_DVA(sc->sc_replies) >> 32);
1425
1426 iiq.system_request_frame_base_address_lo =
1427 htole32(MPII_DMA_DVA(sc->sc_requests));
1428 iiq.system_request_frame_base_address_hi =
1429 htole32(MPII_DMA_DVA(sc->sc_requests) >> 32);
1430
1431 iiq.reply_descriptor_post_queue_address_lo =
1432 htole32(MPII_DMA_DVA(sc->sc_reply_postq));
1433 iiq.reply_descriptor_post_queue_address_hi =
1434 htole32(MPII_DMA_DVA(sc->sc_reply_postq) >> 32);
1435
1436 iiq.reply_free_queue_address_lo =
1437 htole32(MPII_DMA_DVA(sc->sc_reply_freeq));
1438 iiq.reply_free_queue_address_hi =
1439 htole32(MPII_DMA_DVA(sc->sc_reply_freeq) >> 32);
1440
1441 if (mpii_handshake_send(sc, &iiq, dwordsof(iiq)) != 0) {
1442 DNPRINTF(MPII_D_MISC, "%s: mpii_iocinit send failed\n",
1443 DEVNAME(sc));
1444 return (1);
1445 }
1446
1447 if (mpii_handshake_recv(sc, &iip, dwordsof(iip)) != 0) {
1448 DNPRINTF(MPII_D_MISC, "%s: mpii_iocinit recv failed\n",
1449 DEVNAME(sc));
1450 return (1);
1451 }
1452
1453 DNPRINTF(MPII_D_MISC, "%s: function: 0x%02x msg_length: %d "
1454 "whoinit: 0x%02x\n", DEVNAME(sc), iip.function,
1455 iip.msg_length, iip.whoinit);
1456 DNPRINTF(MPII_D_MISC, "%s: msg_flags: 0x%02x\n", DEVNAME(sc),
1457 iip.msg_flags);
1458 DNPRINTF(MPII_D_MISC, "%s: vf_id: 0x%02x vp_id: 0x%02x\n", DEVNAME(sc),
1459 iip.vf_id, iip.vp_id);
1460 DNPRINTF(MPII_D_MISC, "%s: ioc_status: 0x%04x\n", DEVNAME(sc),
1461 le16toh(iip.ioc_status));
1462 DNPRINTF(MPII_D_MISC, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc),
1463 le32toh(iip.ioc_loginfo));
1464
1465 if (le16toh(iip.ioc_status) != MPII_IOCSTATUS_SUCCESS ||
1466 le32toh(iip.ioc_loginfo))
1467 return (1);
1468
1469 return (0);
1470 }
1471
1472 static void
mpii_push_reply(struct mpii_softc * sc,struct mpii_rcb * rcb)1473 mpii_push_reply(struct mpii_softc *sc, struct mpii_rcb *rcb)
1474 {
1475 u_int32_t *rfp;
1476 u_int idx;
1477
1478 if (rcb == NULL)
1479 return;
1480
1481 mutex_enter(&sc->sc_reply_free_mtx);
1482 idx = sc->sc_reply_free_host_index;
1483
1484 rfp = MPII_DMA_KVA(sc->sc_reply_freeq);
1485 rfp[idx] = htole32(rcb->rcb_reply_dva);
1486
1487 if (++idx >= sc->sc_reply_free_qdepth)
1488 idx = 0;
1489
1490 mpii_write_reply_free(sc, sc->sc_reply_free_host_index = idx);
1491 mutex_exit(&sc->sc_reply_free_mtx);
1492 }
1493
1494 static int
mpii_portfacts(struct mpii_softc * sc)1495 mpii_portfacts(struct mpii_softc *sc)
1496 {
1497 struct mpii_msg_portfacts_request *pfq;
1498 struct mpii_msg_portfacts_reply *pfp;
1499 struct mpii_ccb *ccb;
1500 int rv = 1;
1501
1502 DNPRINTF(MPII_D_MISC, "%s: mpii_portfacts\n", DEVNAME(sc));
1503
1504 ccb = mpii_get_ccb(sc);
1505 if (ccb == NULL) {
1506 DNPRINTF(MPII_D_MISC, "%s: mpii_portfacts mpii_get_ccb fail\n",
1507 DEVNAME(sc));
1508 return (rv);
1509 }
1510
1511 ccb->ccb_done = mpii_empty_done;
1512 pfq = ccb->ccb_cmd;
1513
1514 memset(pfq, 0, sizeof(*pfq));
1515
1516 pfq->function = MPII_FUNCTION_PORT_FACTS;
1517 pfq->chain_offset = 0;
1518 pfq->msg_flags = 0;
1519 pfq->port_number = 0;
1520 pfq->vp_id = 0;
1521 pfq->vf_id = 0;
1522
1523 if (mpii_poll(sc, ccb) != 0) {
1524 DNPRINTF(MPII_D_MISC, "%s: mpii_portfacts poll\n",
1525 DEVNAME(sc));
1526 goto err;
1527 }
1528
1529 if (ccb->ccb_rcb == NULL) {
1530 DNPRINTF(MPII_D_MISC, "%s: empty portfacts reply\n",
1531 DEVNAME(sc));
1532 goto err;
1533 }
1534
1535 pfp = ccb->ccb_rcb->rcb_reply;
1536 sc->sc_porttype = pfp->port_type;
1537
1538 mpii_push_reply(sc, ccb->ccb_rcb);
1539 rv = 0;
1540 err:
1541 mpii_put_ccb(sc, ccb);
1542
1543 return (rv);
1544 }
1545
1546 static void
mpii_eventack(struct work * wk,void * cookie)1547 mpii_eventack(struct work *wk, void * cookie)
1548 {
1549 struct mpii_softc *sc = cookie;
1550 struct mpii_ccb *ccb;
1551 struct mpii_rcb *rcb, *next;
1552 struct mpii_msg_event_reply *enp;
1553 struct mpii_msg_eventack_request *eaq;
1554
1555 mutex_enter(&sc->sc_evt_ack_mtx);
1556 next = SIMPLEQ_FIRST(&sc->sc_evt_ack_queue);
1557 SIMPLEQ_INIT(&sc->sc_evt_ack_queue);
1558 mutex_exit(&sc->sc_evt_ack_mtx);
1559
1560 while (next != NULL) {
1561 rcb = next;
1562 next = SIMPLEQ_NEXT(rcb, rcb_link);
1563
1564 enp = (struct mpii_msg_event_reply *)rcb->rcb_reply;
1565
1566 ccb = mpii_get_ccb(sc);
1567 ccb->ccb_done = mpii_eventack_done;
1568 eaq = ccb->ccb_cmd;
1569
1570 eaq->function = MPII_FUNCTION_EVENT_ACK;
1571
1572 eaq->event = enp->event;
1573 eaq->event_context = enp->event_context;
1574
1575 mpii_push_reply(sc, rcb);
1576
1577 mpii_start(sc, ccb);
1578 }
1579 }
1580
1581 static void
mpii_eventack_done(struct mpii_ccb * ccb)1582 mpii_eventack_done(struct mpii_ccb *ccb)
1583 {
1584 struct mpii_softc *sc = ccb->ccb_sc;
1585
1586 DNPRINTF(MPII_D_EVT, "%s: event ack done\n", DEVNAME(sc));
1587
1588 mpii_push_reply(sc, ccb->ccb_rcb);
1589 mpii_put_ccb(sc, ccb);
1590 }
1591
1592 static int
mpii_portenable(struct mpii_softc * sc)1593 mpii_portenable(struct mpii_softc *sc)
1594 {
1595 struct mpii_msg_portenable_request *peq;
1596 struct mpii_ccb *ccb;
1597
1598 DNPRINTF(MPII_D_MISC, "%s: mpii_portenable\n", DEVNAME(sc));
1599
1600 ccb = mpii_get_ccb(sc);
1601 if (ccb == NULL) {
1602 DNPRINTF(MPII_D_MISC, "%s: mpii_portenable ccb_get\n",
1603 DEVNAME(sc));
1604 return (1);
1605 }
1606
1607 ccb->ccb_done = mpii_empty_done;
1608 peq = ccb->ccb_cmd;
1609
1610 peq->function = MPII_FUNCTION_PORT_ENABLE;
1611 peq->vf_id = sc->sc_vf_id;
1612
1613 if (mpii_poll(sc, ccb) != 0) {
1614 DNPRINTF(MPII_D_MISC, "%s: mpii_portenable poll\n",
1615 DEVNAME(sc));
1616 return (1);
1617 }
1618
1619 if (ccb->ccb_rcb == NULL) {
1620 DNPRINTF(MPII_D_MISC, "%s: empty portenable reply\n",
1621 DEVNAME(sc));
1622 return (1);
1623 }
1624
1625 mpii_push_reply(sc, ccb->ccb_rcb);
1626 mpii_put_ccb(sc, ccb);
1627
1628 return (0);
1629 }
1630
1631 static int
mpii_cfg_coalescing(struct mpii_softc * sc)1632 mpii_cfg_coalescing(struct mpii_softc *sc)
1633 {
1634 struct mpii_cfg_hdr hdr;
1635 struct mpii_cfg_ioc_pg1 ipg;
1636
1637 hdr.page_version = 0;
1638 hdr.page_length = sizeof(ipg) / 4;
1639 hdr.page_number = 1;
1640 hdr.page_type = MPII_CONFIG_REQ_PAGE_TYPE_IOC;
1641 memset(&ipg, 0, sizeof(ipg));
1642 if (mpii_req_cfg_page(sc, 0, MPII_PG_POLL, &hdr, 1, &ipg,
1643 sizeof(ipg)) != 0) {
1644 DNPRINTF(MPII_D_MISC, "%s: unable to fetch IOC page 1\n"
1645 "page 1\n", DEVNAME(sc));
1646 return (1);
1647 }
1648
1649 if (!ISSET(le32toh(ipg.flags), MPII_CFG_IOC_1_REPLY_COALESCING))
1650 return (0);
1651
1652 /* Disable coalescing */
1653 CLR(ipg.flags, htole32(MPII_CFG_IOC_1_REPLY_COALESCING));
1654 if (mpii_req_cfg_page(sc, 0, MPII_PG_POLL, &hdr, 0, &ipg,
1655 sizeof(ipg)) != 0) {
1656 DNPRINTF(MPII_D_MISC, "%s: unable to clear coalescing\n",
1657 DEVNAME(sc));
1658 return (1);
1659 }
1660
1661 return (0);
1662 }
1663
1664 #define MPII_EVENT_MASKALL(enq) do { \
1665 enq->event_masks[0] = 0xffffffff; \
1666 enq->event_masks[1] = 0xffffffff; \
1667 enq->event_masks[2] = 0xffffffff; \
1668 enq->event_masks[3] = 0xffffffff; \
1669 } while (0)
1670
1671 #define MPII_EVENT_UNMASK(enq, evt) do { \
1672 enq->event_masks[evt / 32] &= \
1673 htole32(~(1 << (evt % 32))); \
1674 } while (0)
1675
1676 static int
mpii_eventnotify(struct mpii_softc * sc)1677 mpii_eventnotify(struct mpii_softc *sc)
1678 {
1679 struct mpii_msg_event_request *enq;
1680 struct mpii_ccb *ccb;
1681 char wkname[15];
1682
1683 ccb = mpii_get_ccb(sc);
1684 if (ccb == NULL) {
1685 DNPRINTF(MPII_D_MISC, "%s: mpii_eventnotify ccb_get\n",
1686 DEVNAME(sc));
1687 return (1);
1688 }
1689
1690 SIMPLEQ_INIT(&sc->sc_evt_sas_queue);
1691 mutex_init(&sc->sc_evt_sas_mtx, MUTEX_DEFAULT, IPL_BIO);
1692 snprintf(wkname, sizeof(wkname), "%ssas", DEVNAME(sc));
1693 if (workqueue_create(&sc->sc_evt_sas_wq, wkname,
1694 mpii_event_sas_work, sc, PRI_NONE, IPL_BIO, WQ_MPSAFE) != 0) {
1695 mpii_put_ccb(sc, ccb);
1696 aprint_error_dev(sc->sc_dev,
1697 "can't create %s workqueue\n", wkname);
1698 return 1;
1699 }
1700
1701 SIMPLEQ_INIT(&sc->sc_evt_ack_queue);
1702 mutex_init(&sc->sc_evt_ack_mtx, MUTEX_DEFAULT, IPL_BIO);
1703 snprintf(wkname, sizeof(wkname), "%sevt", DEVNAME(sc));
1704 if (workqueue_create(&sc->sc_evt_ack_wq, wkname,
1705 mpii_eventack, sc, PRI_NONE, IPL_BIO, WQ_MPSAFE) != 0) {
1706 mpii_put_ccb(sc, ccb);
1707 aprint_error_dev(sc->sc_dev,
1708 "can't create %s workqueue\n", wkname);
1709 return 1;
1710 }
1711
1712 ccb->ccb_done = mpii_eventnotify_done;
1713 enq = ccb->ccb_cmd;
1714
1715 enq->function = MPII_FUNCTION_EVENT_NOTIFICATION;
1716
1717 /*
1718 * Enable reporting of the following events:
1719 *
1720 * MPII_EVENT_SAS_DISCOVERY
1721 * MPII_EVENT_SAS_TOPOLOGY_CHANGE_LIST
1722 * MPII_EVENT_SAS_DEVICE_STATUS_CHANGE
1723 * MPII_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE
1724 * MPII_EVENT_IR_CONFIGURATION_CHANGE_LIST
1725 * MPII_EVENT_IR_VOLUME
1726 * MPII_EVENT_IR_PHYSICAL_DISK
1727 * MPII_EVENT_IR_OPERATION_STATUS
1728 */
1729
1730 MPII_EVENT_MASKALL(enq);
1731 MPII_EVENT_UNMASK(enq, MPII_EVENT_SAS_DISCOVERY);
1732 MPII_EVENT_UNMASK(enq, MPII_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
1733 MPII_EVENT_UNMASK(enq, MPII_EVENT_SAS_DEVICE_STATUS_CHANGE);
1734 MPII_EVENT_UNMASK(enq, MPII_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
1735 MPII_EVENT_UNMASK(enq, MPII_EVENT_IR_CONFIGURATION_CHANGE_LIST);
1736 MPII_EVENT_UNMASK(enq, MPII_EVENT_IR_VOLUME);
1737 MPII_EVENT_UNMASK(enq, MPII_EVENT_IR_PHYSICAL_DISK);
1738 MPII_EVENT_UNMASK(enq, MPII_EVENT_IR_OPERATION_STATUS);
1739
1740 mpii_start(sc, ccb);
1741
1742 return (0);
1743 }
1744
1745 static void
mpii_eventnotify_done(struct mpii_ccb * ccb)1746 mpii_eventnotify_done(struct mpii_ccb *ccb)
1747 {
1748 struct mpii_softc *sc = ccb->ccb_sc;
1749 struct mpii_rcb *rcb = ccb->ccb_rcb;
1750
1751 DNPRINTF(MPII_D_EVT, "%s: mpii_eventnotify_done\n", DEVNAME(sc));
1752
1753 mpii_put_ccb(sc, ccb);
1754 mpii_event_process(sc, rcb);
1755 }
1756
1757 static void
mpii_event_raid(struct mpii_softc * sc,struct mpii_msg_event_reply * enp)1758 mpii_event_raid(struct mpii_softc *sc, struct mpii_msg_event_reply *enp)
1759 {
1760 struct mpii_evt_ir_cfg_change_list *ccl;
1761 struct mpii_evt_ir_cfg_element *ce;
1762 struct mpii_device *dev;
1763 u_int16_t type;
1764 int i;
1765
1766 ccl = (struct mpii_evt_ir_cfg_change_list *)(enp + 1);
1767 if (ccl->num_elements == 0)
1768 return;
1769
1770 if (ISSET(le32toh(ccl->flags), MPII_EVT_IR_CFG_CHANGE_LIST_FOREIGN)) {
1771 /* bail on foreign configurations */
1772 return;
1773 }
1774
1775 ce = (struct mpii_evt_ir_cfg_element *)(ccl + 1);
1776
1777 for (i = 0; i < ccl->num_elements; i++, ce++) {
1778 type = (le16toh(ce->element_flags) &
1779 MPII_EVT_IR_CFG_ELEMENT_TYPE_MASK);
1780
1781 switch (type) {
1782 case MPII_EVT_IR_CFG_ELEMENT_TYPE_VOLUME:
1783 switch (ce->reason_code) {
1784 case MPII_EVT_IR_CFG_ELEMENT_RC_ADDED:
1785 case MPII_EVT_IR_CFG_ELEMENT_RC_VOLUME_CREATED:
1786 dev = malloc(sizeof(*dev), M_DEVBUF,
1787 M_WAITOK | M_ZERO);
1788 mutex_enter(&sc->sc_devs_mtx);
1789 if (mpii_find_dev(sc,
1790 le16toh(ce->vol_dev_handle))) {
1791 mutex_exit(&sc->sc_devs_mtx);
1792 free(dev, M_DEVBUF);
1793 printf("%s: device %#x is already "
1794 "configured\n", DEVNAME(sc),
1795 le16toh(ce->vol_dev_handle));
1796 break;
1797 }
1798 SET(dev->flags, MPII_DF_VOLUME);
1799 dev->slot = sc->sc_vd_id_low;
1800 dev->dev_handle = le16toh(ce->vol_dev_handle);
1801 if (mpii_insert_dev(sc, dev)) {
1802 mutex_exit(&sc->sc_devs_mtx);
1803 free(dev, M_DEVBUF);
1804 break;
1805 }
1806 sc->sc_vd_count++;
1807 mutex_exit(&sc->sc_devs_mtx);
1808 break;
1809 case MPII_EVT_IR_CFG_ELEMENT_RC_REMOVED:
1810 case MPII_EVT_IR_CFG_ELEMENT_RC_VOLUME_DELETED:
1811 mutex_enter(&sc->sc_devs_mtx);
1812 if (!(dev = mpii_find_dev(sc,
1813 le16toh(ce->vol_dev_handle)))) {
1814 mutex_exit(&sc->sc_devs_mtx);
1815 break;
1816 }
1817 mpii_remove_dev(sc, dev);
1818 sc->sc_vd_count--;
1819 mutex_exit(&sc->sc_devs_mtx);
1820 break;
1821 }
1822 break;
1823 case MPII_EVT_IR_CFG_ELEMENT_TYPE_VOLUME_DISK:
1824 if (ce->reason_code ==
1825 MPII_EVT_IR_CFG_ELEMENT_RC_PD_CREATED ||
1826 ce->reason_code ==
1827 MPII_EVT_IR_CFG_ELEMENT_RC_HIDE) {
1828 /* there should be an underlying sas drive */
1829 mutex_enter(&sc->sc_devs_mtx);
1830 if (!(dev = mpii_find_dev(sc,
1831 le16toh(ce->phys_disk_dev_handle)))) {
1832 mutex_exit(&sc->sc_devs_mtx);
1833 break;
1834 }
1835 /* promoted from a hot spare? */
1836 CLR(dev->flags, MPII_DF_HOT_SPARE);
1837 SET(dev->flags, MPII_DF_VOLUME_DISK |
1838 MPII_DF_HIDDEN);
1839 mutex_exit(&sc->sc_devs_mtx);
1840 }
1841 break;
1842 case MPII_EVT_IR_CFG_ELEMENT_TYPE_HOT_SPARE:
1843 if (ce->reason_code ==
1844 MPII_EVT_IR_CFG_ELEMENT_RC_HIDE) {
1845 /* there should be an underlying sas drive */
1846 mutex_enter(&sc->sc_devs_mtx);
1847 if (!(dev = mpii_find_dev(sc,
1848 le16toh(ce->phys_disk_dev_handle)))) {
1849 mutex_exit(&sc->sc_devs_mtx);
1850 break;
1851 }
1852 SET(dev->flags, MPII_DF_HOT_SPARE |
1853 MPII_DF_HIDDEN);
1854 mutex_exit(&sc->sc_devs_mtx);
1855 }
1856 break;
1857 }
1858 }
1859 }
1860
1861 static void
mpii_event_sas(struct mpii_softc * sc,struct mpii_rcb * rcb)1862 mpii_event_sas(struct mpii_softc *sc, struct mpii_rcb *rcb)
1863 {
1864 struct mpii_msg_event_reply *enp;
1865 struct mpii_evt_sas_tcl *tcl;
1866 struct mpii_evt_phy_entry *pe;
1867 struct mpii_device *dev;
1868 int i;
1869 u_int16_t handle;
1870 int need_queue = 0;
1871
1872 enp = (struct mpii_msg_event_reply *)rcb->rcb_reply;
1873 DNPRINTF(MPII_D_EVT, "%s: mpii_event_sas 0x%x\n",
1874 DEVNAME(sc), le16toh(enp->event));
1875 KASSERT(le16toh(enp->event) == MPII_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
1876
1877 tcl = (struct mpii_evt_sas_tcl *)(enp + 1);
1878 pe = (struct mpii_evt_phy_entry *)(tcl + 1);
1879
1880 for (i = 0; i < tcl->num_entries; i++, pe++) {
1881 DNPRINTF(MPII_D_EVT, "%s: sas change %d stat %d h %d slot %d phy %d enc %d expand %d\n",
1882 DEVNAME(sc), i, pe->phy_status,
1883 le16toh(pe->dev_handle),
1884 sc->sc_pd_id_start + tcl->start_phy_num + i,
1885 tcl->start_phy_num + i, le16toh(tcl->enclosure_handle), le16toh(tcl->expander_handle));
1886
1887 switch (pe->phy_status & MPII_EVENT_SAS_TOPO_PS_RC_MASK) {
1888 case MPII_EVENT_SAS_TOPO_PS_RC_ADDED:
1889 handle = le16toh(pe->dev_handle);
1890 DNPRINTF(MPII_D_EVT, "%s: sas add handle %d\n",
1891 DEVNAME(sc), handle);
1892 dev = malloc(sizeof(*dev), M_DEVBUF, M_WAITOK | M_ZERO);
1893 mutex_enter(&sc->sc_devs_mtx);
1894 if (mpii_find_dev(sc, handle)) {
1895 mutex_exit(&sc->sc_devs_mtx);
1896 free(dev, M_DEVBUF);
1897 printf("%s: device %#x is already "
1898 "configured\n", DEVNAME(sc), handle);
1899 break;
1900 }
1901
1902 dev->slot = sc->sc_pd_id_start + tcl->start_phy_num + i;
1903 dev->dev_handle = handle;
1904 dev->phy_num = tcl->start_phy_num + i;
1905 if (tcl->enclosure_handle)
1906 dev->physical_port = tcl->physical_port;
1907 dev->enclosure = le16toh(tcl->enclosure_handle);
1908 dev->expander = le16toh(tcl->expander_handle);
1909
1910 if (mpii_insert_dev(sc, dev)) {
1911 mutex_exit(&sc->sc_devs_mtx);
1912 free(dev, M_DEVBUF);
1913 break;
1914 }
1915 printf("%s: physical device inserted in slot %d\n",
1916 DEVNAME(sc), dev->slot);
1917 mutex_exit(&sc->sc_devs_mtx);
1918 break;
1919
1920 case MPII_EVENT_SAS_TOPO_PS_RC_MISSING:
1921 /* defer to workqueue thread */
1922 need_queue++;
1923 break;
1924 }
1925 }
1926
1927 if (need_queue) {
1928 bool start_wk;
1929 mutex_enter(&sc->sc_evt_sas_mtx);
1930 start_wk = (SIMPLEQ_FIRST(&sc->sc_evt_sas_queue) == 0);
1931 SIMPLEQ_INSERT_TAIL(&sc->sc_evt_sas_queue, rcb, rcb_link);
1932 if (start_wk) {
1933 workqueue_enqueue(sc->sc_evt_sas_wq,
1934 &sc->sc_evt_sas_work, NULL);
1935 }
1936 mutex_exit(&sc->sc_evt_sas_mtx);
1937 } else
1938 mpii_event_done(sc, rcb);
1939 }
1940
1941 static void
mpii_event_sas_work(struct work * wq,void * xsc)1942 mpii_event_sas_work(struct work *wq, void *xsc)
1943 {
1944 struct mpii_softc *sc = xsc;
1945 struct mpii_rcb *rcb, *next;
1946 struct mpii_msg_event_reply *enp;
1947 struct mpii_evt_sas_tcl *tcl;
1948 struct mpii_evt_phy_entry *pe;
1949 struct mpii_device *dev;
1950 int i;
1951
1952 mutex_enter(&sc->sc_evt_sas_mtx);
1953 next = SIMPLEQ_FIRST(&sc->sc_evt_sas_queue);
1954 SIMPLEQ_INIT(&sc->sc_evt_sas_queue);
1955 mutex_exit(&sc->sc_evt_sas_mtx);
1956
1957 while (next != NULL) {
1958 rcb = next;
1959 next = SIMPLEQ_NEXT(rcb, rcb_link);
1960
1961 enp = (struct mpii_msg_event_reply *)rcb->rcb_reply;
1962 DNPRINTF(MPII_D_EVT, "%s: mpii_event_sas_work 0x%x\n",
1963 DEVNAME(sc), le16toh(enp->event));
1964 KASSERT(le16toh(enp->event) == MPII_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
1965 tcl = (struct mpii_evt_sas_tcl *)(enp + 1);
1966 pe = (struct mpii_evt_phy_entry *)(tcl + 1);
1967
1968 for (i = 0; i < tcl->num_entries; i++, pe++) {
1969 DNPRINTF(MPII_D_EVT, "%s: sas change %d stat %d h %d slot %d phy %d enc %d expand %d\n",
1970 DEVNAME(sc), i, pe->phy_status,
1971 le16toh(pe->dev_handle),
1972 sc->sc_pd_id_start + tcl->start_phy_num + i,
1973 tcl->start_phy_num + i, le16toh(tcl->enclosure_handle), le16toh(tcl->expander_handle));
1974
1975 switch (pe->phy_status & MPII_EVENT_SAS_TOPO_PS_RC_MASK) {
1976 case MPII_EVENT_SAS_TOPO_PS_RC_ADDED:
1977 /* already handled */
1978 break;
1979
1980 case MPII_EVENT_SAS_TOPO_PS_RC_MISSING:
1981 mutex_enter(&sc->sc_devs_mtx);
1982 dev = mpii_find_dev(sc, le16toh(pe->dev_handle));
1983 if (dev == NULL) {
1984 mutex_exit(&sc->sc_devs_mtx);
1985 break;
1986 }
1987
1988 printf(
1989 "%s: physical device removed from slot %d\n",
1990 DEVNAME(sc), dev->slot);
1991 mpii_remove_dev(sc, dev);
1992 mutex_exit(&sc->sc_devs_mtx);
1993 mpii_sas_remove_device(sc, dev->dev_handle);
1994 if (!ISSET(dev->flags, MPII_DF_HIDDEN)) {
1995 scsipi_target_detach(&sc->sc_chan,
1996 dev->slot, 0, DETACH_FORCE);
1997 }
1998
1999 free(dev, M_DEVBUF);
2000 break;
2001 }
2002 }
2003 mpii_event_done(sc, rcb);
2004 }
2005 }
2006
2007 static void
mpii_event_discovery(struct mpii_softc * sc,struct mpii_msg_event_reply * enp)2008 mpii_event_discovery(struct mpii_softc *sc, struct mpii_msg_event_reply *enp)
2009 {
2010 struct mpii_evt_sas_discovery *esd =
2011 (struct mpii_evt_sas_discovery *)(enp + 1);
2012
2013 if (esd->reason_code == MPII_EVENT_SAS_DISC_REASON_CODE_COMPLETED) {
2014 if (esd->discovery_status != 0) {
2015 printf("%s: sas discovery completed with status %#x\n",
2016 DEVNAME(sc), esd->discovery_status);
2017 }
2018
2019 }
2020 }
2021
2022 static void
mpii_event_process(struct mpii_softc * sc,struct mpii_rcb * rcb)2023 mpii_event_process(struct mpii_softc *sc, struct mpii_rcb *rcb)
2024 {
2025 struct mpii_msg_event_reply *enp;
2026
2027 enp = (struct mpii_msg_event_reply *)rcb->rcb_reply;
2028
2029 DNPRINTF(MPII_D_EVT, "%s: mpii_event_process: %#x\n", DEVNAME(sc),
2030 le16toh(enp->event));
2031
2032 switch (le16toh(enp->event)) {
2033 case MPII_EVENT_EVENT_CHANGE:
2034 /* should be properly ignored */
2035 break;
2036 case MPII_EVENT_SAS_DISCOVERY:
2037 mpii_event_discovery(sc, enp);
2038 break;
2039 case MPII_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
2040 mpii_event_sas(sc, rcb);
2041 return;
2042 case MPII_EVENT_SAS_DEVICE_STATUS_CHANGE:
2043 break;
2044 case MPII_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
2045 break;
2046 case MPII_EVENT_IR_VOLUME: {
2047 struct mpii_evt_ir_volume *evd =
2048 (struct mpii_evt_ir_volume *)(enp + 1);
2049 struct mpii_device *dev;
2050 #if NBIO > 0
2051 const char *vol_states[] = {
2052 BIOC_SVINVALID_S,
2053 BIOC_SVOFFLINE_S,
2054 BIOC_SVBUILDING_S,
2055 BIOC_SVONLINE_S,
2056 BIOC_SVDEGRADED_S,
2057 BIOC_SVONLINE_S,
2058 };
2059 #endif
2060
2061 if (cold)
2062 break;
2063 mutex_enter(&sc->sc_devs_mtx);
2064 dev = mpii_find_dev(sc, le16toh(evd->vol_dev_handle));
2065 if (dev == NULL) {
2066 mutex_exit(&sc->sc_devs_mtx);
2067 break;
2068 }
2069 #if NBIO > 0
2070 if (evd->reason_code == MPII_EVENT_IR_VOL_RC_STATE_CHANGED)
2071 printf("%s: volume %d state changed from %s to %s\n",
2072 DEVNAME(sc), dev->slot - sc->sc_vd_id_low,
2073 vol_states[evd->prev_value],
2074 vol_states[evd->new_value]);
2075 #endif
2076 if (evd->reason_code == MPII_EVENT_IR_VOL_RC_STATUS_CHANGED &&
2077 ISSET(evd->new_value, MPII_CFG_RAID_VOL_0_STATUS_RESYNC) &&
2078 !ISSET(evd->prev_value, MPII_CFG_RAID_VOL_0_STATUS_RESYNC))
2079 printf("%s: started resync on a volume %d\n",
2080 DEVNAME(sc), dev->slot - sc->sc_vd_id_low);
2081 }
2082 mutex_exit(&sc->sc_devs_mtx);
2083 break;
2084 case MPII_EVENT_IR_PHYSICAL_DISK:
2085 break;
2086 case MPII_EVENT_IR_CONFIGURATION_CHANGE_LIST:
2087 mpii_event_raid(sc, enp);
2088 break;
2089 case MPII_EVENT_IR_OPERATION_STATUS: {
2090 struct mpii_evt_ir_status *evs =
2091 (struct mpii_evt_ir_status *)(enp + 1);
2092 struct mpii_device *dev;
2093
2094 mutex_enter(&sc->sc_devs_mtx);
2095 dev = mpii_find_dev(sc, le16toh(evs->vol_dev_handle));
2096 if (dev != NULL &&
2097 evs->operation == MPII_EVENT_IR_RAIDOP_RESYNC)
2098 dev->percent = evs->percent;
2099 mutex_exit(&sc->sc_devs_mtx);
2100 break;
2101 }
2102 default:
2103 DNPRINTF(MPII_D_EVT, "%s: unhandled event 0x%02x\n",
2104 DEVNAME(sc), le16toh(enp->event));
2105 }
2106
2107 mpii_event_done(sc, rcb);
2108 }
2109
2110 static void
mpii_event_done(struct mpii_softc * sc,struct mpii_rcb * rcb)2111 mpii_event_done(struct mpii_softc *sc, struct mpii_rcb *rcb)
2112 {
2113 struct mpii_msg_event_reply *enp = rcb->rcb_reply;
2114 bool need_start;
2115
2116 if (enp->ack_required) {
2117 mutex_enter(&sc->sc_evt_ack_mtx);
2118 need_start = (SIMPLEQ_FIRST(&sc->sc_evt_ack_queue) == 0);
2119 SIMPLEQ_INSERT_TAIL(&sc->sc_evt_ack_queue, rcb, rcb_link);
2120 if (need_start)
2121 workqueue_enqueue(sc->sc_evt_ack_wq,
2122 &sc->sc_evt_ack_work, NULL);
2123 mutex_exit(&sc->sc_evt_ack_mtx);
2124 } else
2125 mpii_push_reply(sc, rcb);
2126 }
2127
2128 static void
mpii_sas_remove_device(struct mpii_softc * sc,u_int16_t handle)2129 mpii_sas_remove_device(struct mpii_softc *sc, u_int16_t handle)
2130 {
2131 struct mpii_msg_scsi_task_request *stq;
2132 struct mpii_msg_sas_oper_request *soq;
2133 struct mpii_ccb *ccb;
2134
2135 ccb = mpii_get_ccb(sc);
2136 if (ccb == NULL)
2137 return;
2138
2139 stq = ccb->ccb_cmd;
2140 stq->function = MPII_FUNCTION_SCSI_TASK_MGMT;
2141 stq->task_type = MPII_SCSI_TASK_TARGET_RESET;
2142 stq->dev_handle = htole16(handle);
2143
2144 ccb->ccb_done = mpii_empty_done;
2145 mpii_wait(sc, ccb);
2146
2147 if (ccb->ccb_rcb != NULL)
2148 mpii_push_reply(sc, ccb->ccb_rcb);
2149
2150 /* reuse a ccb */
2151 ccb->ccb_state = MPII_CCB_READY;
2152 ccb->ccb_rcb = NULL;
2153
2154 soq = ccb->ccb_cmd;
2155 memset(soq, 0, sizeof(*soq));
2156 soq->function = MPII_FUNCTION_SAS_IO_UNIT_CONTROL;
2157 soq->operation = MPII_SAS_OP_REMOVE_DEVICE;
2158 soq->dev_handle = htole16(handle);
2159
2160 ccb->ccb_done = mpii_empty_done;
2161 mpii_wait(sc, ccb);
2162 if (ccb->ccb_rcb != NULL)
2163 mpii_push_reply(sc, ccb->ccb_rcb);
2164
2165 mpii_put_ccb(sc, ccb);
2166 }
2167
2168 static int
mpii_board_info(struct mpii_softc * sc)2169 mpii_board_info(struct mpii_softc *sc)
2170 {
2171 struct mpii_msg_iocfacts_request ifq;
2172 struct mpii_msg_iocfacts_reply ifp;
2173 struct mpii_cfg_manufacturing_pg0 mpg;
2174 struct mpii_cfg_hdr hdr;
2175
2176 memset(&ifq, 0, sizeof(ifq));
2177 memset(&ifp, 0, sizeof(ifp));
2178
2179 ifq.function = MPII_FUNCTION_IOC_FACTS;
2180
2181 if (mpii_handshake_send(sc, &ifq, dwordsof(ifq)) != 0) {
2182 DNPRINTF(MPII_D_MISC, "%s: failed to request ioc facts\n",
2183 DEVNAME(sc));
2184 return (1);
2185 }
2186
2187 if (mpii_handshake_recv(sc, &ifp, dwordsof(ifp)) != 0) {
2188 DNPRINTF(MPII_D_MISC, "%s: failed to receive ioc facts\n",
2189 DEVNAME(sc));
2190 return (1);
2191 }
2192
2193 hdr.page_version = 0;
2194 hdr.page_length = sizeof(mpg) / 4;
2195 hdr.page_number = 0;
2196 hdr.page_type = MPII_CONFIG_REQ_PAGE_TYPE_MANUFACTURING;
2197 memset(&mpg, 0, sizeof(mpg));
2198 if (mpii_req_cfg_page(sc, 0, MPII_PG_POLL, &hdr, 1, &mpg,
2199 sizeof(mpg)) != 0) {
2200 printf("%s: unable to fetch manufacturing page 0\n",
2201 DEVNAME(sc));
2202 return (EINVAL);
2203 }
2204
2205 printf("%s: %s, firmware %u.%u.%u.%u%s, MPI %u.%u\n", DEVNAME(sc),
2206 mpg.board_name, ifp.fw_version_maj, ifp.fw_version_min,
2207 ifp.fw_version_unit, ifp.fw_version_dev,
2208 ISSET(sc->sc_flags, MPII_F_RAID) ? " IR" : "",
2209 ifp.msg_version_maj, ifp.msg_version_min);
2210
2211 return (0);
2212 }
2213
2214 static int
mpii_target_map(struct mpii_softc * sc)2215 mpii_target_map(struct mpii_softc *sc)
2216 {
2217 struct mpii_cfg_hdr hdr;
2218 struct mpii_cfg_ioc_pg8 ipg;
2219 int flags, pad = 0;
2220
2221 hdr.page_version = 0;
2222 hdr.page_length = sizeof(ipg) / 4;
2223 hdr.page_number = 8;
2224 hdr.page_type = MPII_CONFIG_REQ_PAGE_TYPE_IOC;
2225 memset(&ipg, 0, sizeof(ipg));
2226 if (mpii_req_cfg_page(sc, 0, MPII_PG_POLL, &hdr, 1, &ipg,
2227 sizeof(ipg)) != 0) {
2228 printf("%s: unable to fetch ioc page 8\n",
2229 DEVNAME(sc));
2230 return (EINVAL);
2231 }
2232
2233 if (le16toh(ipg.flags) & MPII_IOC_PG8_FLAGS_RESERVED_TARGETID_0)
2234 pad = 1;
2235
2236 flags = le16toh(ipg.ir_volume_mapping_flags) &
2237 MPII_IOC_PG8_IRFLAGS_VOLUME_MAPPING_MODE_MASK;
2238 if (ISSET(sc->sc_flags, MPII_F_RAID)) {
2239 if (flags == MPII_IOC_PG8_IRFLAGS_LOW_VOLUME_MAPPING) {
2240 sc->sc_vd_id_low += pad;
2241 pad = sc->sc_max_volumes; /* for sc_pd_id_start */
2242 } else
2243 sc->sc_vd_id_low = sc->sc_max_devices -
2244 sc->sc_max_volumes;
2245 }
2246
2247 sc->sc_pd_id_start += pad;
2248
2249 return (0);
2250 }
2251
2252 static int
mpii_req_cfg_header(struct mpii_softc * sc,u_int8_t type,u_int8_t number,u_int32_t address,int flags,void * p)2253 mpii_req_cfg_header(struct mpii_softc *sc, u_int8_t type, u_int8_t number,
2254 u_int32_t address, int flags, void *p)
2255 {
2256 struct mpii_msg_config_request *cq;
2257 struct mpii_msg_config_reply *cp;
2258 struct mpii_ccb *ccb;
2259 struct mpii_cfg_hdr *hdr = p;
2260 struct mpii_ecfg_hdr *ehdr = p;
2261 int etype = 0;
2262 int rv = 0;
2263
2264 DNPRINTF(MPII_D_MISC, "%s: mpii_req_cfg_header type: %#x number: %x "
2265 "address: 0x%08x flags: 0x%x\n", DEVNAME(sc), type, number,
2266 address, flags);
2267
2268 ccb = mpii_get_ccb(sc);
2269 if (ccb == NULL) {
2270 DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_header ccb_get\n",
2271 DEVNAME(sc));
2272 return (1);
2273 }
2274
2275 if (ISSET(flags, MPII_PG_EXTENDED)) {
2276 etype = type;
2277 type = MPII_CONFIG_REQ_PAGE_TYPE_EXTENDED;
2278 }
2279
2280 cq = ccb->ccb_cmd;
2281
2282 cq->function = MPII_FUNCTION_CONFIG;
2283
2284 cq->action = MPII_CONFIG_REQ_ACTION_PAGE_HEADER;
2285
2286 cq->config_header.page_number = number;
2287 cq->config_header.page_type = type;
2288 cq->ext_page_type = etype;
2289 cq->page_address = htole32(address);
2290 cq->page_buffer.sg_hdr = htole32(MPII_SGE_FL_TYPE_SIMPLE |
2291 MPII_SGE_FL_LAST | MPII_SGE_FL_EOB | MPII_SGE_FL_EOL);
2292
2293 ccb->ccb_done = mpii_empty_done;
2294 if (ISSET(flags, MPII_PG_POLL)) {
2295 if (mpii_poll(sc, ccb) != 0) {
2296 DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_header poll\n",
2297 DEVNAME(sc));
2298 return (1);
2299 }
2300 } else
2301 mpii_wait(sc, ccb);
2302
2303 if (ccb->ccb_rcb == NULL) {
2304 mpii_put_ccb(sc, ccb);
2305 return (1);
2306 }
2307 cp = ccb->ccb_rcb->rcb_reply;
2308
2309 DNPRINTF(MPII_D_MISC, "%s: action: 0x%02x sgl_flags: 0x%02x "
2310 "msg_length: %d function: 0x%02x\n", DEVNAME(sc), cp->action,
2311 cp->sgl_flags, cp->msg_length, cp->function);
2312 DNPRINTF(MPII_D_MISC, "%s: ext_page_length: %d ext_page_type: 0x%02x "
2313 "msg_flags: 0x%02x\n", DEVNAME(sc),
2314 le16toh(cp->ext_page_length), cp->ext_page_type,
2315 cp->msg_flags);
2316 DNPRINTF(MPII_D_MISC, "%s: vp_id: 0x%02x vf_id: 0x%02x\n", DEVNAME(sc),
2317 cp->vp_id, cp->vf_id);
2318 DNPRINTF(MPII_D_MISC, "%s: ioc_status: 0x%04x\n", DEVNAME(sc),
2319 le16toh(cp->ioc_status));
2320 DNPRINTF(MPII_D_MISC, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2321 le32toh(cp->ioc_loginfo));
2322 DNPRINTF(MPII_D_MISC, "%s: page_version: 0x%02x page_length: %d "
2323 "page_number: 0x%02x page_type: 0x%02x\n", DEVNAME(sc),
2324 cp->config_header.page_version,
2325 cp->config_header.page_length,
2326 cp->config_header.page_number,
2327 cp->config_header.page_type);
2328
2329 if (le16toh(cp->ioc_status) != MPII_IOCSTATUS_SUCCESS)
2330 rv = 1;
2331 else if (ISSET(flags, MPII_PG_EXTENDED)) {
2332 memset(ehdr, 0, sizeof(*ehdr));
2333 ehdr->page_version = cp->config_header.page_version;
2334 ehdr->page_number = cp->config_header.page_number;
2335 ehdr->page_type = cp->config_header.page_type;
2336 ehdr->ext_page_length = cp->ext_page_length;
2337 ehdr->ext_page_type = cp->ext_page_type;
2338 } else
2339 *hdr = cp->config_header;
2340
2341 mpii_push_reply(sc, ccb->ccb_rcb);
2342 mpii_put_ccb(sc, ccb);
2343
2344 return (rv);
2345 }
2346
2347 static int
mpii_req_cfg_page(struct mpii_softc * sc,u_int32_t address,int flags,void * p,int read,void * page,size_t len)2348 mpii_req_cfg_page(struct mpii_softc *sc, u_int32_t address, int flags,
2349 void *p, int read, void *page, size_t len)
2350 {
2351 struct mpii_msg_config_request *cq;
2352 struct mpii_msg_config_reply *cp;
2353 struct mpii_ccb *ccb;
2354 struct mpii_cfg_hdr *hdr = p;
2355 struct mpii_ecfg_hdr *ehdr = p;
2356 uintptr_t kva;
2357 int page_length;
2358 int rv = 0;
2359
2360 DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_page address: %d read: %d "
2361 "type: %x\n", DEVNAME(sc), address, read, hdr->page_type);
2362
2363 page_length = ISSET(flags, MPII_PG_EXTENDED) ?
2364 le16toh(ehdr->ext_page_length) : hdr->page_length;
2365
2366 if (len > sc->sc_request_size - sizeof(*cq) || len < page_length * 4)
2367 return (1);
2368
2369 ccb = mpii_get_ccb(sc);
2370 if (ccb == NULL) {
2371 DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_page ccb_get\n",
2372 DEVNAME(sc));
2373 return (1);
2374 }
2375
2376 cq = ccb->ccb_cmd;
2377
2378 cq->function = MPII_FUNCTION_CONFIG;
2379
2380 cq->action = (read ? MPII_CONFIG_REQ_ACTION_PAGE_READ_CURRENT :
2381 MPII_CONFIG_REQ_ACTION_PAGE_WRITE_CURRENT);
2382
2383 if (ISSET(flags, MPII_PG_EXTENDED)) {
2384 cq->config_header.page_version = ehdr->page_version;
2385 cq->config_header.page_number = ehdr->page_number;
2386 cq->config_header.page_type = ehdr->page_type;
2387 cq->ext_page_len = ehdr->ext_page_length;
2388 cq->ext_page_type = ehdr->ext_page_type;
2389 } else
2390 cq->config_header = *hdr;
2391 cq->config_header.page_type &= MPII_CONFIG_REQ_PAGE_TYPE_MASK;
2392 cq->page_address = htole32(address);
2393 cq->page_buffer.sg_hdr = htole32(MPII_SGE_FL_TYPE_SIMPLE |
2394 MPII_SGE_FL_LAST | MPII_SGE_FL_EOB | MPII_SGE_FL_EOL |
2395 MPII_SGE_FL_SIZE_64 | (page_length * 4) |
2396 (read ? MPII_SGE_FL_DIR_IN : MPII_SGE_FL_DIR_OUT));
2397
2398 /* bounce the page via the request space to avoid more bus_dma games */
2399 mpii_dvatosge(&cq->page_buffer, ccb->ccb_cmd_dva +
2400 sizeof(struct mpii_msg_config_request));
2401
2402 kva = (uintptr_t)ccb->ccb_cmd;
2403 kva += sizeof(struct mpii_msg_config_request);
2404
2405 if (!read)
2406 memcpy((void *)kva, page, len);
2407
2408 ccb->ccb_done = mpii_empty_done;
2409 if (ISSET(flags, MPII_PG_POLL)) {
2410 if (mpii_poll(sc, ccb) != 0) {
2411 DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_header poll\n",
2412 DEVNAME(sc));
2413 return (1);
2414 }
2415 } else
2416 mpii_wait(sc, ccb);
2417
2418 if (ccb->ccb_rcb == NULL) {
2419 mpii_put_ccb(sc, ccb);
2420 return (1);
2421 }
2422 cp = ccb->ccb_rcb->rcb_reply;
2423
2424 DNPRINTF(MPII_D_MISC, "%s: action: 0x%02x msg_length: %d "
2425 "function: 0x%02x\n", DEVNAME(sc), cp->action, cp->msg_length,
2426 cp->function);
2427 DNPRINTF(MPII_D_MISC, "%s: ext_page_length: %d ext_page_type: 0x%02x "
2428 "msg_flags: 0x%02x\n", DEVNAME(sc),
2429 le16toh(cp->ext_page_length), cp->ext_page_type,
2430 cp->msg_flags);
2431 DNPRINTF(MPII_D_MISC, "%s: vp_id: 0x%02x vf_id: 0x%02x\n", DEVNAME(sc),
2432 cp->vp_id, cp->vf_id);
2433 DNPRINTF(MPII_D_MISC, "%s: ioc_status: 0x%04x\n", DEVNAME(sc),
2434 le16toh(cp->ioc_status));
2435 DNPRINTF(MPII_D_MISC, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2436 le32toh(cp->ioc_loginfo));
2437 DNPRINTF(MPII_D_MISC, "%s: page_version: 0x%02x page_length: %d "
2438 "page_number: 0x%02x page_type: 0x%02x\n", DEVNAME(sc),
2439 cp->config_header.page_version,
2440 cp->config_header.page_length,
2441 cp->config_header.page_number,
2442 cp->config_header.page_type);
2443
2444 if (le16toh(cp->ioc_status) != MPII_IOCSTATUS_SUCCESS)
2445 rv = 1;
2446 else if (read)
2447 memcpy(page, (void *)kva, len);
2448
2449 mpii_push_reply(sc, ccb->ccb_rcb);
2450 mpii_put_ccb(sc, ccb);
2451
2452 return (rv);
2453 }
2454
2455 static struct mpii_rcb *
mpii_reply(struct mpii_softc * sc,struct mpii_reply_descr * rdp)2456 mpii_reply(struct mpii_softc *sc, struct mpii_reply_descr *rdp)
2457 {
2458 struct mpii_rcb *rcb = NULL;
2459 u_int32_t rfid;
2460
2461 KASSERT(mutex_owned(&sc->sc_rep_mtx));
2462 DNPRINTF(MPII_D_INTR, "%s: mpii_reply\n", DEVNAME(sc));
2463
2464 if ((rdp->reply_flags & MPII_REPLY_DESCR_TYPE_MASK) ==
2465 MPII_REPLY_DESCR_ADDRESS_REPLY) {
2466 rfid = (le32toh(rdp->frame_addr) -
2467 (u_int32_t)MPII_DMA_DVA(sc->sc_replies)) /
2468 sc->sc_reply_size;
2469
2470 bus_dmamap_sync(sc->sc_dmat,
2471 MPII_DMA_MAP(sc->sc_replies), sc->sc_reply_size * rfid,
2472 sc->sc_reply_size, BUS_DMASYNC_POSTREAD);
2473
2474 rcb = &sc->sc_rcbs[rfid];
2475 }
2476
2477 memset(rdp, 0xff, sizeof(*rdp));
2478
2479 bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_reply_postq),
2480 8 * sc->sc_reply_post_host_index, 8,
2481 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2482
2483 return (rcb);
2484 }
2485
2486 static struct mpii_dmamem *
mpii_dmamem_alloc(struct mpii_softc * sc,size_t size)2487 mpii_dmamem_alloc(struct mpii_softc *sc, size_t size)
2488 {
2489 struct mpii_dmamem *mdm;
2490 int nsegs;
2491
2492 mdm = malloc(sizeof(*mdm), M_DEVBUF, M_WAITOK | M_ZERO);
2493 mdm->mdm_size = size;
2494
2495 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
2496 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mdm->mdm_map) != 0)
2497 goto mdmfree;
2498
2499 if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &mdm->mdm_seg,
2500 1, &nsegs, BUS_DMA_NOWAIT) != 0)
2501 goto destroy;
2502
2503 if (bus_dmamem_map(sc->sc_dmat, &mdm->mdm_seg, nsegs, size,
2504 &mdm->mdm_kva, BUS_DMA_NOWAIT) != 0)
2505 goto free;
2506
2507 if (bus_dmamap_load(sc->sc_dmat, mdm->mdm_map, mdm->mdm_kva, size,
2508 NULL, BUS_DMA_NOWAIT) != 0)
2509 goto unmap;
2510
2511 memset(mdm->mdm_kva, 0, size);
2512
2513 return (mdm);
2514
2515 unmap:
2516 bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, size);
2517 free:
2518 bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
2519 destroy:
2520 bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
2521 mdmfree:
2522 free(mdm, M_DEVBUF);
2523
2524 return (NULL);
2525 }
2526
2527 static void
mpii_dmamem_free(struct mpii_softc * sc,struct mpii_dmamem * mdm)2528 mpii_dmamem_free(struct mpii_softc *sc, struct mpii_dmamem *mdm)
2529 {
2530 DNPRINTF(MPII_D_MEM, "%s: mpii_dmamem_free %p\n", DEVNAME(sc), mdm);
2531
2532 bus_dmamap_unload(sc->sc_dmat, mdm->mdm_map);
2533 bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, mdm->mdm_size);
2534 bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
2535 bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
2536 free(mdm, M_DEVBUF);
2537 }
2538
2539 static int
mpii_insert_dev(struct mpii_softc * sc,struct mpii_device * dev)2540 mpii_insert_dev(struct mpii_softc *sc, struct mpii_device *dev)
2541 {
2542 int slot; /* initial hint */
2543
2544 KASSERT(mutex_owned(&sc->sc_devs_mtx));
2545 DNPRINTF(MPII_D_EVT, "%s: mpii_insert_dev wants slot %d\n",
2546 DEVNAME(sc), dev->slot);
2547 if (dev == NULL || dev->slot < 0)
2548 return (1);
2549 slot = dev->slot;
2550
2551 while (slot < sc->sc_max_devices && sc->sc_devs[slot] != NULL)
2552 slot++;
2553
2554 if (slot >= sc->sc_max_devices)
2555 return (1);
2556
2557 DNPRINTF(MPII_D_EVT, "%s: mpii_insert_dev alloc slot %d\n",
2558 DEVNAME(sc), slot);
2559
2560 dev->slot = slot;
2561 sc->sc_devs[slot] = dev;
2562
2563 return (0);
2564 }
2565
2566 static int
mpii_remove_dev(struct mpii_softc * sc,struct mpii_device * dev)2567 mpii_remove_dev(struct mpii_softc *sc, struct mpii_device *dev)
2568 {
2569 int i;
2570
2571 KASSERT(mutex_owned(&sc->sc_devs_mtx));
2572 if (dev == NULL)
2573 return (1);
2574
2575 for (i = 0; i < sc->sc_max_devices; i++) {
2576 if (sc->sc_devs[i] == NULL)
2577 continue;
2578
2579 if (sc->sc_devs[i]->dev_handle == dev->dev_handle) {
2580 sc->sc_devs[i] = NULL;
2581 return (0);
2582 }
2583 }
2584
2585 return (1);
2586 }
2587
2588 static struct mpii_device *
mpii_find_dev(struct mpii_softc * sc,u_int16_t handle)2589 mpii_find_dev(struct mpii_softc *sc, u_int16_t handle)
2590 {
2591 int i;
2592 KASSERT(mutex_owned(&sc->sc_devs_mtx));
2593
2594 for (i = 0; i < sc->sc_max_devices; i++) {
2595 if (sc->sc_devs[i] == NULL)
2596 continue;
2597
2598 if (sc->sc_devs[i]->dev_handle == handle)
2599 return (sc->sc_devs[i]);
2600 }
2601
2602 return (NULL);
2603 }
2604
2605 static int
mpii_alloc_ccbs(struct mpii_softc * sc)2606 mpii_alloc_ccbs(struct mpii_softc *sc)
2607 {
2608 struct mpii_ccb *ccb;
2609 u_int8_t *cmd;
2610 int i;
2611 char wqname[16];
2612
2613 SIMPLEQ_INIT(&sc->sc_ccb_free);
2614 SIMPLEQ_INIT(&sc->sc_ccb_tmos);
2615 mutex_init(&sc->sc_ccb_free_mtx, MUTEX_DEFAULT, IPL_BIO);
2616 cv_init(&sc->sc_ccb_free_cv, "mpii_ccbs");
2617 mutex_init(&sc->sc_ssb_tmomtx, MUTEX_DEFAULT, IPL_BIO);
2618 snprintf(wqname, sizeof(wqname) - 1, "%sabrt", DEVNAME(sc));
2619 workqueue_create(&sc->sc_ssb_tmowk, wqname, mpii_scsi_cmd_tmo_handler,
2620 sc, PRI_BIO, IPL_BIO, WQ_MPSAFE);
2621 if (sc->sc_ssb_tmowk == NULL)
2622 return 1;
2623
2624 sc->sc_ccbs = malloc((sc->sc_max_cmds-1) * sizeof(*ccb),
2625 M_DEVBUF, M_WAITOK | M_ZERO);
2626 sc->sc_requests = mpii_dmamem_alloc(sc,
2627 sc->sc_request_size * sc->sc_max_cmds);
2628 if (sc->sc_requests == NULL) {
2629 printf("%s: unable to allocate ccb dmamem\n", DEVNAME(sc));
2630 goto free_ccbs;
2631 }
2632 cmd = MPII_DMA_KVA(sc->sc_requests);
2633
2634 /*
2635 * we have sc->sc_max_cmds system request message
2636 * frames, but smid zero cannot be used. so we then
2637 * have (sc->sc_max_cmds - 1) number of ccbs
2638 */
2639 for (i = 1; i < sc->sc_max_cmds; i++) {
2640 ccb = &sc->sc_ccbs[i - 1];
2641
2642 if (bus_dmamap_create(sc->sc_dmat, MAXPHYS, sc->sc_max_sgl,
2643 MAXPHYS, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
2644 &ccb->ccb_dmamap) != 0) {
2645 printf("%s: unable to create dma map\n", DEVNAME(sc));
2646 goto free_maps;
2647 }
2648
2649 ccb->ccb_sc = sc;
2650 mutex_init(&ccb->ccb_mtx, MUTEX_DEFAULT, IPL_BIO);
2651 cv_init(&ccb->ccb_cv, "mpiiexec");
2652
2653 ccb->ccb_smid = htole16(i);
2654 ccb->ccb_offset = sc->sc_request_size * i;
2655
2656 ccb->ccb_cmd = &cmd[ccb->ccb_offset];
2657 ccb->ccb_cmd_dva = (u_int32_t)MPII_DMA_DVA(sc->sc_requests) +
2658 ccb->ccb_offset;
2659
2660 DNPRINTF(MPII_D_CCB, "%s: mpii_alloc_ccbs(%d) ccb: %p map: %p "
2661 "sc: %p smid: %#x offs: %#lx cmd: %p dva: %#lx\n",
2662 DEVNAME(sc), i, ccb, ccb->ccb_dmamap, ccb->ccb_sc,
2663 ccb->ccb_smid, ccb->ccb_offset, ccb->ccb_cmd,
2664 ccb->ccb_cmd_dva);
2665
2666 mpii_put_ccb(sc, ccb);
2667 }
2668
2669 return (0);
2670
2671 free_maps:
2672 while ((ccb = mpii_get_ccb(sc)) != NULL)
2673 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
2674
2675 mpii_dmamem_free(sc, sc->sc_requests);
2676 free_ccbs:
2677 free(sc->sc_ccbs, M_DEVBUF);
2678
2679 return (1);
2680 }
2681
2682 static void
mpii_put_ccb(struct mpii_softc * sc,struct mpii_ccb * ccb)2683 mpii_put_ccb(struct mpii_softc *sc, struct mpii_ccb *ccb)
2684 {
2685 DNPRINTF(MPII_D_CCB, "%s: mpii_put_ccb %p\n", DEVNAME(sc), ccb);
2686
2687 ccb->ccb_state = MPII_CCB_FREE;
2688 ccb->ccb_cookie = NULL;
2689 ccb->ccb_done = NULL;
2690 ccb->ccb_rcb = NULL;
2691 memset(ccb->ccb_cmd, 0, sc->sc_request_size);
2692
2693 mutex_enter(&sc->sc_ccb_free_mtx);
2694 SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_free, ccb, ccb_link);
2695 mutex_exit(&sc->sc_ccb_free_mtx);
2696 }
2697
2698 static struct mpii_ccb *
mpii_get_ccb(struct mpii_softc * sc)2699 mpii_get_ccb(struct mpii_softc *sc)
2700 {
2701 struct mpii_ccb *ccb;
2702
2703 mutex_enter(&sc->sc_ccb_free_mtx);
2704 ccb = SIMPLEQ_FIRST(&sc->sc_ccb_free);
2705 if (ccb != NULL) {
2706 SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_free, ccb_link);
2707 ccb->ccb_state = MPII_CCB_READY;
2708 KASSERT(ccb->ccb_sc == sc);
2709 }
2710 mutex_exit(&sc->sc_ccb_free_mtx);
2711
2712 DNPRINTF(MPII_D_CCB, "%s: mpii_get_ccb %p\n", DEVNAME(sc), ccb);
2713
2714 return (ccb);
2715 }
2716
2717 static int
mpii_alloc_replies(struct mpii_softc * sc)2718 mpii_alloc_replies(struct mpii_softc *sc)
2719 {
2720 DNPRINTF(MPII_D_MISC, "%s: mpii_alloc_replies\n", DEVNAME(sc));
2721
2722 sc->sc_rcbs = malloc(sc->sc_num_reply_frames * sizeof(struct mpii_rcb),
2723 M_DEVBUF, M_WAITOK);
2724
2725 sc->sc_replies = mpii_dmamem_alloc(sc, sc->sc_reply_size *
2726 sc->sc_num_reply_frames);
2727 if (sc->sc_replies == NULL) {
2728 free(sc->sc_rcbs, M_DEVBUF);
2729 return (1);
2730 }
2731
2732 return (0);
2733 }
2734
2735 static void
mpii_push_replies(struct mpii_softc * sc)2736 mpii_push_replies(struct mpii_softc *sc)
2737 {
2738 struct mpii_rcb *rcb;
2739 uintptr_t kva = (uintptr_t)MPII_DMA_KVA(sc->sc_replies);
2740 int i;
2741
2742 bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_replies),
2743 0, sc->sc_reply_size * sc->sc_num_reply_frames,
2744 BUS_DMASYNC_PREREAD);
2745
2746 for (i = 0; i < sc->sc_num_reply_frames; i++) {
2747 rcb = &sc->sc_rcbs[i];
2748
2749 rcb->rcb_reply = (void *)(kva + sc->sc_reply_size * i);
2750 rcb->rcb_reply_dva = (u_int32_t)MPII_DMA_DVA(sc->sc_replies) +
2751 sc->sc_reply_size * i;
2752 mpii_push_reply(sc, rcb);
2753 }
2754 }
2755
2756 static void
mpii_start(struct mpii_softc * sc,struct mpii_ccb * ccb)2757 mpii_start(struct mpii_softc *sc, struct mpii_ccb *ccb)
2758 {
2759 struct mpii_request_header *rhp;
2760 struct mpii_request_descr descr;
2761 #if defined(__LP64__) && 0
2762 u_long *rdp = (u_long *)&descr;
2763 #else
2764 u_int32_t *rdp = (u_int32_t *)&descr;
2765 #endif
2766
2767 DNPRINTF(MPII_D_RW, "%s: mpii_start %#lx\n", DEVNAME(sc),
2768 ccb->ccb_cmd_dva);
2769
2770 bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_requests),
2771 ccb->ccb_offset, sc->sc_request_size,
2772 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2773
2774 ccb->ccb_state = MPII_CCB_QUEUED;
2775
2776 rhp = ccb->ccb_cmd;
2777
2778 memset(&descr, 0, sizeof(descr));
2779
2780 switch (rhp->function) {
2781 case MPII_FUNCTION_SCSI_IO_REQUEST:
2782 descr.request_flags = MPII_REQ_DESCR_SCSI_IO;
2783 descr.dev_handle = htole16(ccb->ccb_dev_handle);
2784 break;
2785 case MPII_FUNCTION_SCSI_TASK_MGMT:
2786 descr.request_flags = MPII_REQ_DESCR_HIGH_PRIORITY;
2787 break;
2788 default:
2789 descr.request_flags = MPII_REQ_DESCR_DEFAULT;
2790 }
2791
2792 descr.vf_id = sc->sc_vf_id;
2793 descr.smid = ccb->ccb_smid;
2794
2795 #if defined(__LP64__) && 0
2796 DNPRINTF(MPII_D_RW, "%s: MPII_REQ_DESCR_POST_LOW (0x%08x) write "
2797 "0x%08lx\n", DEVNAME(sc), MPII_REQ_DESCR_POST_LOW, *rdp);
2798 bus_space_write_raw_8(sc->sc_iot, sc->sc_ioh,
2799 MPII_REQ_DESCR_POST_LOW, *rdp);
2800 #else
2801 DNPRINTF(MPII_D_RW, "%s: MPII_REQ_DESCR_POST_LOW (0x%08x) write "
2802 "0x%04x\n", DEVNAME(sc), MPII_REQ_DESCR_POST_LOW, *rdp);
2803
2804 DNPRINTF(MPII_D_RW, "%s: MPII_REQ_DESCR_POST_HIGH (0x%08x) write "
2805 "0x%04x\n", DEVNAME(sc), MPII_REQ_DESCR_POST_HIGH, *(rdp+1));
2806
2807 mutex_enter(&sc->sc_req_mtx);
2808 bus_space_write_4(sc->sc_iot, sc->sc_ioh,
2809 MPII_REQ_DESCR_POST_LOW, rdp[0]);
2810 bus_space_barrier(sc->sc_iot, sc->sc_ioh,
2811 MPII_REQ_DESCR_POST_LOW, 8, BUS_SPACE_BARRIER_WRITE);
2812
2813 bus_space_write_4(sc->sc_iot, sc->sc_ioh,
2814 MPII_REQ_DESCR_POST_HIGH, rdp[1]);
2815 bus_space_barrier(sc->sc_iot, sc->sc_ioh,
2816 MPII_REQ_DESCR_POST_LOW, 8, BUS_SPACE_BARRIER_WRITE);
2817 mutex_exit(&sc->sc_req_mtx);
2818 #endif
2819 }
2820
2821 static int
mpii_poll(struct mpii_softc * sc,struct mpii_ccb * ccb)2822 mpii_poll(struct mpii_softc *sc, struct mpii_ccb *ccb)
2823 {
2824 void (*done)(struct mpii_ccb *);
2825 void *cookie;
2826 int rv = 1;
2827
2828 DNPRINTF(MPII_D_INTR, "%s: mpii_poll\n", DEVNAME(sc));
2829
2830 done = ccb->ccb_done;
2831 cookie = ccb->ccb_cookie;
2832
2833 ccb->ccb_done = mpii_poll_done;
2834 ccb->ccb_cookie = &rv;
2835
2836 mpii_start(sc, ccb);
2837
2838 while (rv == 1) {
2839 /* avoid excessive polling */
2840 if (mpii_reply_waiting(sc))
2841 mpii_intr(sc);
2842 else
2843 delay(10);
2844 }
2845
2846 ccb->ccb_cookie = cookie;
2847 done(ccb);
2848
2849 return (0);
2850 }
2851
2852 static void
mpii_poll_done(struct mpii_ccb * ccb)2853 mpii_poll_done(struct mpii_ccb *ccb)
2854 {
2855 int *rv = ccb->ccb_cookie;
2856
2857 *rv = 0;
2858 }
2859
2860 static int
mpii_alloc_queues(struct mpii_softc * sc)2861 mpii_alloc_queues(struct mpii_softc *sc)
2862 {
2863 u_int32_t *rfp;
2864 int i;
2865
2866 DNPRINTF(MPII_D_MISC, "%s: mpii_alloc_queues\n", DEVNAME(sc));
2867
2868 mutex_init(&sc->sc_reply_free_mtx, MUTEX_DEFAULT, IPL_BIO);
2869 sc->sc_reply_freeq = mpii_dmamem_alloc(sc,
2870 sc->sc_reply_free_qdepth * sizeof(*rfp));
2871 if (sc->sc_reply_freeq == NULL)
2872 return (1);
2873 rfp = MPII_DMA_KVA(sc->sc_reply_freeq);
2874 for (i = 0; i < sc->sc_num_reply_frames; i++) {
2875 rfp[i] = (u_int32_t)MPII_DMA_DVA(sc->sc_replies) +
2876 sc->sc_reply_size * i;
2877 }
2878
2879 sc->sc_reply_postq = mpii_dmamem_alloc(sc,
2880 sc->sc_reply_post_qdepth * sizeof(struct mpii_reply_descr));
2881 if (sc->sc_reply_postq == NULL)
2882 goto free_reply_freeq;
2883 sc->sc_reply_postq_kva = MPII_DMA_KVA(sc->sc_reply_postq);
2884 memset(sc->sc_reply_postq_kva, 0xff, sc->sc_reply_post_qdepth *
2885 sizeof(struct mpii_reply_descr));
2886
2887 return (0);
2888
2889 free_reply_freeq:
2890 mpii_dmamem_free(sc, sc->sc_reply_freeq);
2891 return (1);
2892 }
2893
2894 static void
mpii_init_queues(struct mpii_softc * sc)2895 mpii_init_queues(struct mpii_softc *sc)
2896 {
2897 DNPRINTF(MPII_D_MISC, "%s: mpii_init_queues\n", DEVNAME(sc));
2898
2899 sc->sc_reply_free_host_index = sc->sc_reply_free_qdepth - 1;
2900 sc->sc_reply_post_host_index = 0;
2901 mpii_write_reply_free(sc, sc->sc_reply_free_host_index);
2902 mpii_write_reply_post(sc, sc->sc_reply_post_host_index);
2903 }
2904
2905 static void
mpii_wait(struct mpii_softc * sc,struct mpii_ccb * ccb)2906 mpii_wait(struct mpii_softc *sc, struct mpii_ccb *ccb)
2907 {
2908 void (*done)(struct mpii_ccb *);
2909 void *cookie;
2910
2911 done = ccb->ccb_done;
2912 cookie = ccb->ccb_cookie;
2913
2914 ccb->ccb_done = mpii_wait_done;
2915 ccb->ccb_cookie = ccb;
2916
2917 /* XXX this will wait forever for the ccb to complete */
2918
2919 mpii_start(sc, ccb);
2920
2921 mutex_enter(&ccb->ccb_mtx);
2922 while (ccb->ccb_cookie != NULL)
2923 cv_wait(&ccb->ccb_cv, &ccb->ccb_mtx);
2924 mutex_exit(&ccb->ccb_mtx);
2925
2926 ccb->ccb_cookie = cookie;
2927 done(ccb);
2928 }
2929
2930 static void
mpii_wait_done(struct mpii_ccb * ccb)2931 mpii_wait_done(struct mpii_ccb *ccb)
2932 {
2933 mutex_enter(&ccb->ccb_mtx);
2934 ccb->ccb_cookie = NULL;
2935 cv_signal(&ccb->ccb_cv);
2936 mutex_exit(&ccb->ccb_mtx);
2937 }
2938
2939 static void
mpii_scsipi_request(struct scsipi_channel * chan,scsipi_adapter_req_t req,void * arg)2940 mpii_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
2941 void *arg)
2942 {
2943 struct scsipi_periph *periph;
2944 struct scsipi_xfer *xs;
2945 struct scsipi_adapter *adapt = chan->chan_adapter;
2946 struct mpii_softc *sc = device_private(adapt->adapt_dev);
2947 struct mpii_ccb *ccb;
2948 struct mpii_msg_scsi_io *io;
2949 struct mpii_device *dev;
2950 int target, timeout, ret;
2951 u_int16_t dev_handle;
2952
2953 DNPRINTF(MPII_D_CMD, "%s: mpii_scsipi_request\n", DEVNAME(sc));
2954
2955 switch (req) {
2956 case ADAPTER_REQ_GROW_RESOURCES:
2957 /* Not supported. */
2958 return;
2959 case ADAPTER_REQ_SET_XFER_MODE:
2960 {
2961 struct scsipi_xfer_mode *xm = arg;
2962 xm->xm_mode = PERIPH_CAP_TQING;
2963 xm->xm_period = 0;
2964 xm->xm_offset = 0;
2965 scsipi_async_event(&sc->sc_chan, ASYNC_EVENT_XFER_MODE, xm);
2966 return;
2967 }
2968 case ADAPTER_REQ_RUN_XFER:
2969 break;
2970 }
2971
2972 xs = arg;
2973 periph = xs->xs_periph;
2974 target = periph->periph_target;
2975
2976 if (xs->cmdlen > MPII_CDB_LEN) {
2977 DNPRINTF(MPII_D_CMD, "%s: CDB too big %d\n",
2978 DEVNAME(sc), xs->cmdlen);
2979 memset(&xs->sense, 0, sizeof(xs->sense));
2980 xs->sense.scsi_sense.response_code =
2981 SSD_RCODE_VALID | SSD_RCODE_CURRENT;
2982 xs->sense.scsi_sense.flags = SKEY_ILLEGAL_REQUEST;
2983 xs->sense.scsi_sense.asc = 0x20;
2984 xs->error = XS_SENSE;
2985 scsipi_done(xs);
2986 return;
2987 }
2988
2989 mutex_enter(&sc->sc_devs_mtx);
2990 if ((dev = sc->sc_devs[target]) == NULL) {
2991 mutex_exit(&sc->sc_devs_mtx);
2992 /* device no longer exists */
2993 xs->error = XS_SELTIMEOUT;
2994 scsipi_done(xs);
2995 return;
2996 }
2997 dev_handle = dev->dev_handle;
2998 mutex_exit(&sc->sc_devs_mtx);
2999
3000 ccb = mpii_get_ccb(sc);
3001 if (ccb == NULL) {
3002 xs->error = XS_RESOURCE_SHORTAGE;
3003 scsipi_done(xs);
3004 return;
3005 }
3006 DNPRINTF(MPII_D_CMD, "%s: ccb_smid: %d xs->cmd->opcode: 0x%02x xs->xs_control: 0x%x\n",
3007 DEVNAME(sc), ccb->ccb_smid, xs->cmd->opcode, xs->xs_control);
3008
3009 ccb->ccb_cookie = xs;
3010 ccb->ccb_done = mpii_scsi_cmd_done;
3011 ccb->ccb_dev_handle = dev_handle;
3012
3013 io = ccb->ccb_cmd;
3014 memset(io, 0, sizeof(*io));
3015 io->function = MPII_FUNCTION_SCSI_IO_REQUEST;
3016 io->sense_buffer_length = sizeof(xs->sense);
3017 io->sgl_offset0 = sizeof(struct mpii_msg_scsi_io) / 4;
3018 io->io_flags = htole16(xs->cmdlen);
3019 io->dev_handle = htole16(ccb->ccb_dev_handle);
3020 io->lun[0] = htobe16(periph->periph_lun);
3021
3022 switch (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
3023 case XS_CTL_DATA_IN:
3024 io->direction = MPII_SCSIIO_DIR_READ;
3025 break;
3026 case XS_CTL_DATA_OUT:
3027 io->direction = MPII_SCSIIO_DIR_WRITE;
3028 break;
3029 default:
3030 io->direction = MPII_SCSIIO_DIR_NONE;
3031 break;
3032 }
3033
3034 io->tagging = MPII_SCSIIO_ATTR_SIMPLE_Q;
3035
3036 memcpy(io->cdb, xs->cmd, xs->cmdlen);
3037
3038 io->data_length = htole32(xs->datalen);
3039
3040 /* sense data is at the end of a request */
3041 io->sense_buffer_low_address = htole32(ccb->ccb_cmd_dva +
3042 sc->sc_request_size - sizeof(struct scsi_sense_data));
3043
3044 if (ISSET(sc->sc_flags, MPII_F_SAS3))
3045 ret = mpii_load_xs_sas3(ccb);
3046 else
3047 ret = mpii_load_xs(ccb);
3048
3049 if (ret != 0) {
3050 xs->error = XS_DRIVER_STUFFUP;
3051 goto done;
3052 }
3053
3054 if (xs->xs_control & XS_CTL_POLL) {
3055 if (mpii_poll(sc, ccb) != 0) {
3056 xs->error = XS_DRIVER_STUFFUP;
3057 goto done;
3058 }
3059 return;
3060 }
3061 timeout = mstohz(xs->timeout);
3062 if (timeout == 0)
3063 timeout = 1;
3064 callout_reset(&xs->xs_callout, timeout, mpii_scsi_cmd_tmo, ccb);
3065 mpii_start(sc, ccb);
3066 return;
3067 done:
3068 mpii_put_ccb(sc, ccb);
3069 scsipi_done(xs);
3070 }
3071
3072 static void
mpii_scsi_cmd_tmo(void * xccb)3073 mpii_scsi_cmd_tmo(void *xccb)
3074 {
3075 struct mpii_ccb *ccb = xccb;
3076 struct mpii_softc *sc = ccb->ccb_sc;
3077 bool start_work;
3078
3079 printf("%s: mpii_scsi_cmd_tmo\n", DEVNAME(sc));
3080
3081 if (ccb->ccb_state == MPII_CCB_QUEUED) {
3082 mutex_enter(&sc->sc_ssb_tmomtx);
3083 start_work = (SIMPLEQ_FIRST(&sc->sc_ccb_tmos) == 0);
3084 ccb->ccb_state = MPII_CCB_TIMEOUT;
3085 SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_tmos, ccb, ccb_link);
3086 if (start_work) {
3087 workqueue_enqueue(sc->sc_ssb_tmowk,
3088 &sc->sc_ssb_tmowork, NULL);
3089 }
3090 mutex_exit(&sc->sc_ssb_tmomtx);
3091 }
3092 }
3093
3094 static void
mpii_scsi_cmd_tmo_handler(struct work * wk,void * cookie)3095 mpii_scsi_cmd_tmo_handler(struct work *wk, void *cookie)
3096 {
3097 struct mpii_softc *sc = cookie;
3098 struct mpii_ccb *next;
3099 struct mpii_ccb *ccb;
3100 struct mpii_ccb *tccb;
3101 struct mpii_msg_scsi_task_request *stq;
3102
3103 mutex_enter(&sc->sc_ssb_tmomtx);
3104 next = SIMPLEQ_FIRST(&sc->sc_ccb_tmos);
3105 SIMPLEQ_INIT(&sc->sc_ccb_tmos);
3106 mutex_exit(&sc->sc_ssb_tmomtx);
3107
3108 while (next != NULL) {
3109 ccb = next;
3110 next = SIMPLEQ_NEXT(ccb, ccb_link);
3111 if (ccb->ccb_state != MPII_CCB_TIMEOUT)
3112 continue;
3113 tccb = mpii_get_ccb(sc);
3114 stq = tccb->ccb_cmd;
3115 stq->function = MPII_FUNCTION_SCSI_TASK_MGMT;
3116 stq->task_type = MPII_SCSI_TASK_TARGET_RESET;
3117 stq->dev_handle = htole16(ccb->ccb_dev_handle);
3118
3119 tccb->ccb_done = mpii_scsi_cmd_tmo_done;
3120 mpii_wait(sc, tccb);
3121 }
3122 }
3123
3124 static void
mpii_scsi_cmd_tmo_done(struct mpii_ccb * tccb)3125 mpii_scsi_cmd_tmo_done(struct mpii_ccb *tccb)
3126 {
3127 mpii_put_ccb(tccb->ccb_sc, tccb);
3128 }
3129
3130 static u_int8_t
map_scsi_status(u_int8_t mpii_scsi_status)3131 map_scsi_status(u_int8_t mpii_scsi_status)
3132 {
3133 u_int8_t scsi_status;
3134
3135 switch (mpii_scsi_status)
3136 {
3137 case MPII_SCSIIO_STATUS_GOOD:
3138 scsi_status = SCSI_OK;
3139 break;
3140
3141 case MPII_SCSIIO_STATUS_CHECK_COND:
3142 scsi_status = SCSI_CHECK;
3143 break;
3144
3145 case MPII_SCSIIO_STATUS_BUSY:
3146 scsi_status = SCSI_BUSY;
3147 break;
3148
3149 case MPII_SCSIIO_STATUS_INTERMEDIATE:
3150 scsi_status = SCSI_INTERM;
3151 break;
3152
3153 case MPII_SCSIIO_STATUS_INTERMEDIATE_CONDMET:
3154 scsi_status = SCSI_INTERM;
3155 break;
3156
3157 case MPII_SCSIIO_STATUS_RESERVATION_CONFLICT:
3158 scsi_status = SCSI_RESV_CONFLICT;
3159 break;
3160
3161 case MPII_SCSIIO_STATUS_CMD_TERM:
3162 case MPII_SCSIIO_STATUS_TASK_ABORTED:
3163 scsi_status = SCSI_TERMINATED;
3164 break;
3165
3166 case MPII_SCSIIO_STATUS_TASK_SET_FULL:
3167 scsi_status = SCSI_QUEUE_FULL;
3168 break;
3169
3170 case MPII_SCSIIO_STATUS_ACA_ACTIVE:
3171 scsi_status = SCSI_ACA_ACTIVE;
3172 break;
3173
3174 default:
3175 /* XXX: for the lack of anything better and other than OK */
3176 scsi_status = 0xFF;
3177 break;
3178 }
3179
3180 return scsi_status;
3181 }
3182
3183 static void
mpii_scsi_cmd_done(struct mpii_ccb * ccb)3184 mpii_scsi_cmd_done(struct mpii_ccb *ccb)
3185 {
3186 struct mpii_msg_scsi_io_error *sie;
3187 struct mpii_softc *sc = ccb->ccb_sc;
3188 struct scsipi_xfer *xs = ccb->ccb_cookie;
3189 struct scsi_sense_data *sense;
3190 bus_dmamap_t dmap = ccb->ccb_dmamap;
3191 bool timeout = 1;
3192
3193 callout_stop(&xs->xs_callout);
3194 if (ccb->ccb_state == MPII_CCB_TIMEOUT)
3195 timeout = 1;
3196 ccb->ccb_state = MPII_CCB_READY;
3197
3198 if (xs->datalen != 0) {
3199 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
3200 (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_POSTREAD :
3201 BUS_DMASYNC_POSTWRITE);
3202
3203 bus_dmamap_unload(sc->sc_dmat, dmap);
3204 }
3205
3206 KASSERT(xs->error == XS_NOERROR);
3207 KASSERT(xs->status == SCSI_OK);
3208
3209 if (ccb->ccb_rcb == NULL) {
3210 /* no scsi error, we're ok so drop out early */
3211 xs->resid = 0;
3212 goto done;
3213 }
3214
3215 sie = ccb->ccb_rcb->rcb_reply;
3216
3217 DNPRINTF(MPII_D_CMD, "%s: mpii_scsi_cmd_done xs cmd: 0x%02x len: %d "
3218 "flags 0x%x\n", DEVNAME(sc), xs->cmd->opcode, xs->datalen,
3219 xs->xs_control);
3220 DNPRINTF(MPII_D_CMD, "%s: dev_handle: %d msg_length: %d "
3221 "function: 0x%02x\n", DEVNAME(sc), le16toh(sie->dev_handle),
3222 sie->msg_length, sie->function);
3223 DNPRINTF(MPII_D_CMD, "%s: vp_id: 0x%02x vf_id: 0x%02x\n", DEVNAME(sc),
3224 sie->vp_id, sie->vf_id);
3225 DNPRINTF(MPII_D_CMD, "%s: scsi_status: 0x%02x scsi_state: 0x%02x "
3226 "ioc_status: 0x%04x\n", DEVNAME(sc), sie->scsi_status,
3227 sie->scsi_state, le16toh(sie->ioc_status));
3228 DNPRINTF(MPII_D_CMD, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc),
3229 le32toh(sie->ioc_loginfo));
3230 DNPRINTF(MPII_D_CMD, "%s: transfer_count: %d\n", DEVNAME(sc),
3231 le32toh(sie->transfer_count));
3232 DNPRINTF(MPII_D_CMD, "%s: sense_count: %d\n", DEVNAME(sc),
3233 le32toh(sie->sense_count));
3234 DNPRINTF(MPII_D_CMD, "%s: response_info: 0x%08x\n", DEVNAME(sc),
3235 le32toh(sie->response_info));
3236 DNPRINTF(MPII_D_CMD, "%s: task_tag: 0x%04x\n", DEVNAME(sc),
3237 le16toh(sie->task_tag));
3238 DNPRINTF(MPII_D_CMD, "%s: bidirectional_transfer_count: 0x%08x\n",
3239 DEVNAME(sc), le32toh(sie->bidirectional_transfer_count));
3240
3241 xs->status = map_scsi_status(sie->scsi_status);
3242
3243 switch (le16toh(sie->ioc_status) & MPII_IOCSTATUS_MASK) {
3244 case MPII_IOCSTATUS_SCSI_DATA_UNDERRUN:
3245 switch(sie->scsi_status) {
3246 case MPII_SCSIIO_STATUS_CHECK_COND:
3247 xs->error = XS_SENSE;
3248 /* FALLTHROUGH */
3249 case MPII_SCSIIO_STATUS_GOOD:
3250 xs->resid = xs->datalen - le32toh(sie->transfer_count);
3251 break;
3252 default:
3253 xs->error = XS_DRIVER_STUFFUP;
3254 break;
3255 }
3256 break;
3257
3258 case MPII_IOCSTATUS_SUCCESS:
3259 case MPII_IOCSTATUS_SCSI_RECOVERED_ERROR:
3260 switch (sie->scsi_status) {
3261 case MPII_SCSIIO_STATUS_GOOD:
3262 xs->resid = 0;
3263 break;
3264
3265 case MPII_SCSIIO_STATUS_CHECK_COND:
3266 xs->error = XS_SENSE;
3267 break;
3268
3269 case MPII_SCSIIO_STATUS_BUSY:
3270 case MPII_SCSIIO_STATUS_TASK_SET_FULL:
3271 xs->error = XS_BUSY;
3272 break;
3273
3274 default:
3275 xs->error = XS_DRIVER_STUFFUP;
3276 }
3277 break;
3278
3279 case MPII_IOCSTATUS_BUSY:
3280 case MPII_IOCSTATUS_INSUFFICIENT_RESOURCES:
3281 xs->error = XS_BUSY;
3282 break;
3283
3284 case MPII_IOCSTATUS_SCSI_IOC_TERMINATED:
3285 case MPII_IOCSTATUS_SCSI_TASK_TERMINATED:
3286 xs->error = timeout ? XS_TIMEOUT : XS_RESET;
3287 break;
3288
3289 case MPII_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
3290 case MPII_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
3291 xs->error = XS_SELTIMEOUT;
3292 break;
3293
3294 default:
3295 xs->error = XS_DRIVER_STUFFUP;
3296 break;
3297 }
3298
3299 sense = (struct scsi_sense_data *)((uintptr_t)ccb->ccb_cmd +
3300 sc->sc_request_size - sizeof(*sense));
3301 if (sie->scsi_state & MPII_SCSIIO_STATE_AUTOSENSE_VALID)
3302 memcpy(&xs->sense, sense, sizeof(xs->sense));
3303
3304 mpii_push_reply(sc, ccb->ccb_rcb);
3305
3306 done:
3307 mpii_put_ccb(sc, ccb);
3308
3309 DNPRINTF(MPII_D_CMD, "%s: xs err: %d status: %#x len: %d resid: %d\n",
3310 DEVNAME(sc), xs->error, xs->status, xs->datalen, xs->resid);
3311
3312 scsipi_done(xs);
3313 }
3314
3315 #if 0
3316 int
3317 mpii_scsi_ioctl(struct scsi_link *link, u_long cmd, void *addr, int flag)
3318 {
3319 struct mpii_softc *sc = (struct mpii_softc *)link->adapter_softc;
3320 struct mpii_device *dev = sc->sc_devs[link->target];
3321
3322 DNPRINTF(MPII_D_IOCTL, "%s: mpii_scsi_ioctl\n", DEVNAME(sc));
3323
3324 switch (cmd) {
3325 case DIOCGCACHE:
3326 case DIOCSCACHE:
3327 if (dev != NULL && ISSET(dev->flags, MPII_DF_VOLUME)) {
3328 return (mpii_ioctl_cache(link, cmd,
3329 (struct dk_cache *)addr));
3330 }
3331 break;
3332
3333 default:
3334 if (sc->sc_ioctl)
3335 return (sc->sc_ioctl(link->adapter_softc, cmd, addr));
3336
3337 break;
3338 }
3339
3340 return (ENOTTY);
3341 }
3342
3343 int
3344 mpii_ioctl_cache(struct scsi_link *link, u_long cmd, struct dk_cache *dc)
3345 {
3346 struct mpii_softc *sc = (struct mpii_softc *)link->adapter_softc;
3347 struct mpii_device *dev = sc->sc_devs[link->target];
3348 struct mpii_cfg_raid_vol_pg0 *vpg;
3349 struct mpii_msg_raid_action_request *req;
3350 struct mpii_msg_raid_action_reply *rep;
3351 struct mpii_cfg_hdr hdr;
3352 struct mpii_ccb *ccb;
3353 u_int32_t addr = MPII_CFG_RAID_VOL_ADDR_HANDLE | dev->dev_handle;
3354 size_t pagelen;
3355 int rv = 0;
3356 int enabled;
3357
3358 if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0,
3359 addr, MPII_PG_POLL, &hdr) != 0)
3360 return (EINVAL);
3361
3362 pagelen = hdr.page_length * 4;
3363 vpg = malloc(pagelen, M_TEMP, M_WAITOK | M_ZERO);
3364 if (vpg == NULL)
3365 return (ENOMEM);
3366
3367 if (mpii_req_cfg_page(sc, addr, MPII_PG_POLL, &hdr, 1,
3368 vpg, pagelen) != 0) {
3369 rv = EINVAL;
3370 goto done;
3371 }
3372
3373 enabled = ((le16toh(vpg->volume_settings) &
3374 MPII_CFG_RAID_VOL_0_SETTINGS_CACHE_MASK) ==
3375 MPII_CFG_RAID_VOL_0_SETTINGS_CACHE_ENABLED) ? 1 : 0;
3376
3377 if (cmd == DIOCGCACHE) {
3378 dc->wrcache = enabled;
3379 dc->rdcache = 0;
3380 goto done;
3381 } /* else DIOCSCACHE */
3382
3383 if (dc->rdcache) {
3384 rv = EOPNOTSUPP;
3385 goto done;
3386 }
3387
3388 if (((dc->wrcache) ? 1 : 0) == enabled)
3389 goto done;
3390
3391 ccb = scsi_io_get(&sc->sc_iopool, SCSI_POLL);
3392 if (ccb == NULL) {
3393 rv = ENOMEM;
3394 goto done;
3395 }
3396
3397 ccb->ccb_done = mpii_empty_done;
3398
3399 req = ccb->ccb_cmd;
3400 memset(req, 0, sizeof(*req));
3401 req->function = MPII_FUNCTION_RAID_ACTION;
3402 req->action = MPII_RAID_ACTION_CHANGE_VOL_WRITE_CACHE;
3403 req->vol_dev_handle = htole16(dev->dev_handle);
3404 req->action_data = htole32(dc->wrcache ?
3405 MPII_RAID_VOL_WRITE_CACHE_ENABLE :
3406 MPII_RAID_VOL_WRITE_CACHE_DISABLE);
3407
3408 if (mpii_poll(sc, ccb) != 0) {
3409 rv = EIO;
3410 goto done;
3411 }
3412
3413 if (ccb->ccb_rcb != NULL) {
3414 rep = ccb->ccb_rcb->rcb_reply;
3415 if ((rep->ioc_status != MPII_IOCSTATUS_SUCCESS) ||
3416 ((rep->action_data[0] &
3417 MPII_RAID_VOL_WRITE_CACHE_MASK) !=
3418 (dc->wrcache ? MPII_RAID_VOL_WRITE_CACHE_ENABLE :
3419 MPII_RAID_VOL_WRITE_CACHE_DISABLE)))
3420 rv = EINVAL;
3421 mpii_push_reply(sc, ccb->ccb_rcb);
3422 }
3423
3424 scsi_io_put(&sc->sc_iopool, ccb);
3425
3426 done:
3427 free(vpg, M_TEMP);
3428 return (rv);
3429 }
3430 #endif /* 0 */
3431
3432 #if NBIO > 0
3433 static int
mpii_ioctl(device_t dev,u_long cmd,void * addr)3434 mpii_ioctl(device_t dev, u_long cmd, void *addr)
3435 {
3436 struct mpii_softc *sc = device_private(dev);
3437 int error = 0;
3438
3439 DNPRINTF(MPII_D_IOCTL, "%s: mpii_ioctl ", DEVNAME(sc));
3440
3441 switch (cmd) {
3442 case BIOCINQ:
3443 DNPRINTF(MPII_D_IOCTL, "inq\n");
3444 error = mpii_ioctl_inq(sc, (struct bioc_inq *)addr);
3445 break;
3446 case BIOCVOL:
3447 DNPRINTF(MPII_D_IOCTL, "vol\n");
3448 error = mpii_ioctl_vol(sc, (struct bioc_vol *)addr);
3449 break;
3450 case BIOCDISK:
3451 DNPRINTF(MPII_D_IOCTL, "disk\n");
3452 error = mpii_ioctl_disk(sc, (struct bioc_disk *)addr);
3453 break;
3454 default:
3455 DNPRINTF(MPII_D_IOCTL, " invalid ioctl\n");
3456 error = ENOTTY;
3457 }
3458
3459 return (error);
3460 }
3461
3462 static int
mpii_ioctl_inq(struct mpii_softc * sc,struct bioc_inq * bi)3463 mpii_ioctl_inq(struct mpii_softc *sc, struct bioc_inq *bi)
3464 {
3465 int i;
3466
3467 DNPRINTF(MPII_D_IOCTL, "%s: mpii_ioctl_inq\n", DEVNAME(sc));
3468
3469 strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev));
3470 mutex_enter(&sc->sc_devs_mtx);
3471 for (i = 0; i < sc->sc_max_devices; i++)
3472 if (sc->sc_devs[i] &&
3473 ISSET(sc->sc_devs[i]->flags, MPII_DF_VOLUME))
3474 bi->bi_novol++;
3475 mutex_exit(&sc->sc_devs_mtx);
3476 return (0);
3477 }
3478
3479 static int
mpii_ioctl_vol(struct mpii_softc * sc,struct bioc_vol * bv)3480 mpii_ioctl_vol(struct mpii_softc *sc, struct bioc_vol *bv)
3481 {
3482 struct mpii_cfg_raid_vol_pg0 *vpg;
3483 struct mpii_cfg_hdr hdr;
3484 struct mpii_device *dev;
3485 size_t pagelen;
3486 u_int16_t volh;
3487 int rv, hcnt = 0;
3488 int percent;
3489
3490 DNPRINTF(MPII_D_IOCTL, "%s: mpii_ioctl_vol %d\n",
3491 DEVNAME(sc), bv->bv_volid);
3492
3493 mutex_enter(&sc->sc_devs_mtx);
3494 if ((dev = mpii_find_vol(sc, bv->bv_volid)) == NULL) {
3495 mutex_exit(&sc->sc_devs_mtx);
3496 return (ENODEV);
3497 }
3498 volh = dev->dev_handle;
3499 percent = dev->percent;
3500 mutex_exit(&sc->sc_devs_mtx);
3501
3502 if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0,
3503 MPII_CFG_RAID_VOL_ADDR_HANDLE | volh, 0, &hdr) != 0) {
3504 printf("%s: unable to fetch header for raid volume page 0\n",
3505 DEVNAME(sc));
3506 return (EINVAL);
3507 }
3508
3509 pagelen = hdr.page_length * 4;
3510 vpg = malloc(pagelen, M_TEMP, M_WAITOK | M_ZERO);
3511 if (vpg == NULL) {
3512 printf("%s: unable to allocate space for raid "
3513 "volume page 0\n", DEVNAME(sc));
3514 return (ENOMEM);
3515 }
3516
3517 if (mpii_req_cfg_page(sc, MPII_CFG_RAID_VOL_ADDR_HANDLE | volh, 0,
3518 &hdr, 1, vpg, pagelen) != 0) {
3519 printf("%s: unable to fetch raid volume page 0\n",
3520 DEVNAME(sc));
3521 free(vpg, M_TEMP);
3522 return (EINVAL);
3523 }
3524
3525 switch (vpg->volume_state) {
3526 case MPII_CFG_RAID_VOL_0_STATE_ONLINE:
3527 case MPII_CFG_RAID_VOL_0_STATE_OPTIMAL:
3528 bv->bv_status = BIOC_SVONLINE;
3529 break;
3530 case MPII_CFG_RAID_VOL_0_STATE_DEGRADED:
3531 if (ISSET(le32toh(vpg->volume_status),
3532 MPII_CFG_RAID_VOL_0_STATUS_RESYNC)) {
3533 bv->bv_status = BIOC_SVREBUILD;
3534 bv->bv_percent = percent;
3535 } else
3536 bv->bv_status = BIOC_SVDEGRADED;
3537 break;
3538 case MPII_CFG_RAID_VOL_0_STATE_FAILED:
3539 bv->bv_status = BIOC_SVOFFLINE;
3540 break;
3541 case MPII_CFG_RAID_VOL_0_STATE_INITIALIZING:
3542 bv->bv_status = BIOC_SVBUILDING;
3543 break;
3544 case MPII_CFG_RAID_VOL_0_STATE_MISSING:
3545 default:
3546 bv->bv_status = BIOC_SVINVALID;
3547 break;
3548 }
3549
3550 switch (vpg->volume_type) {
3551 case MPII_CFG_RAID_VOL_0_TYPE_RAID0:
3552 bv->bv_level = 0;
3553 break;
3554 case MPII_CFG_RAID_VOL_0_TYPE_RAID1:
3555 bv->bv_level = 1;
3556 break;
3557 case MPII_CFG_RAID_VOL_0_TYPE_RAID1E:
3558 case MPII_CFG_RAID_VOL_0_TYPE_RAID10:
3559 bv->bv_level = 10;
3560 break;
3561 default:
3562 bv->bv_level = -1;
3563 }
3564
3565 if ((rv = mpii_bio_hs(sc, NULL, 0, vpg->hot_spare_pool, &hcnt)) != 0) {
3566 free(vpg, M_TEMP);
3567 return (rv);
3568 }
3569
3570 bv->bv_nodisk = vpg->num_phys_disks + hcnt;
3571
3572 bv->bv_size = le64toh(vpg->max_lba) * le16toh(vpg->block_size);
3573
3574 free(vpg, M_TEMP);
3575 return (0);
3576 }
3577
3578 static int
mpii_ioctl_disk(struct mpii_softc * sc,struct bioc_disk * bd)3579 mpii_ioctl_disk(struct mpii_softc *sc, struct bioc_disk *bd)
3580 {
3581 struct mpii_cfg_raid_vol_pg0 *vpg;
3582 struct mpii_cfg_raid_vol_pg0_physdisk *pd;
3583 struct mpii_cfg_hdr hdr;
3584 struct mpii_device *dev;
3585 size_t pagelen;
3586 u_int16_t volh;
3587 u_int8_t dn;
3588
3589 DNPRINTF(MPII_D_IOCTL, "%s: mpii_ioctl_disk %d/%d\n",
3590 DEVNAME(sc), bd->bd_volid, bd->bd_diskid);
3591
3592 mutex_enter(&sc->sc_devs_mtx);
3593 if ((dev = mpii_find_vol(sc, bd->bd_volid)) == NULL) {
3594 mutex_exit(&sc->sc_devs_mtx);
3595 return (ENODEV);
3596 }
3597 volh = dev->dev_handle;
3598 mutex_exit(&sc->sc_devs_mtx);
3599
3600 if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0,
3601 MPII_CFG_RAID_VOL_ADDR_HANDLE | volh, 0, &hdr) != 0) {
3602 printf("%s: unable to fetch header for raid volume page 0\n",
3603 DEVNAME(sc));
3604 return (EINVAL);
3605 }
3606
3607 pagelen = hdr.page_length * 4;
3608 vpg = malloc(pagelen, M_TEMP, M_WAITOK | M_ZERO);
3609 if (vpg == NULL) {
3610 printf("%s: unable to allocate space for raid "
3611 "volume page 0\n", DEVNAME(sc));
3612 return (ENOMEM);
3613 }
3614
3615 if (mpii_req_cfg_page(sc, MPII_CFG_RAID_VOL_ADDR_HANDLE | volh, 0,
3616 &hdr, 1, vpg, pagelen) != 0) {
3617 printf("%s: unable to fetch raid volume page 0\n",
3618 DEVNAME(sc));
3619 free(vpg, M_TEMP);
3620 return (EINVAL);
3621 }
3622
3623 if (bd->bd_diskid >= vpg->num_phys_disks) {
3624 int nvdsk = vpg->num_phys_disks;
3625 int hsmap = vpg->hot_spare_pool;
3626
3627 free(vpg, M_TEMP);
3628 return (mpii_bio_hs(sc, bd, nvdsk, hsmap, NULL));
3629 }
3630
3631 pd = (struct mpii_cfg_raid_vol_pg0_physdisk *)(vpg + 1) +
3632 bd->bd_diskid;
3633 dn = pd->phys_disk_num;
3634
3635 free(vpg, M_TEMP);
3636 return (mpii_bio_disk(sc, bd, dn));
3637 }
3638
3639 static int
mpii_bio_hs(struct mpii_softc * sc,struct bioc_disk * bd,int nvdsk,int hsmap,int * hscnt)3640 mpii_bio_hs(struct mpii_softc *sc, struct bioc_disk *bd, int nvdsk,
3641 int hsmap, int *hscnt)
3642 {
3643 struct mpii_cfg_raid_config_pg0 *cpg;
3644 struct mpii_raid_config_element *el;
3645 struct mpii_ecfg_hdr ehdr;
3646 size_t pagelen;
3647 int i, nhs = 0;
3648
3649 if (bd) {
3650 DNPRINTF(MPII_D_IOCTL, "%s: mpii_bio_hs %d\n", DEVNAME(sc),
3651 bd->bd_diskid - nvdsk);
3652 } else {
3653 DNPRINTF(MPII_D_IOCTL, "%s: mpii_bio_hs\n", DEVNAME(sc));
3654 }
3655
3656 if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_CONFIG,
3657 0, MPII_CFG_RAID_CONFIG_ACTIVE_CONFIG, MPII_PG_EXTENDED,
3658 &ehdr) != 0) {
3659 printf("%s: unable to fetch header for raid config page 0\n",
3660 DEVNAME(sc));
3661 return (EINVAL);
3662 }
3663
3664 pagelen = le16toh(ehdr.ext_page_length) * 4;
3665 cpg = malloc(pagelen, M_TEMP, M_WAITOK | M_ZERO);
3666 if (cpg == NULL) {
3667 printf("%s: unable to allocate space for raid config page 0\n",
3668 DEVNAME(sc));
3669 return (ENOMEM);
3670 }
3671
3672 if (mpii_req_cfg_page(sc, MPII_CFG_RAID_CONFIG_ACTIVE_CONFIG,
3673 MPII_PG_EXTENDED, &ehdr, 1, cpg, pagelen) != 0) {
3674 printf("%s: unable to fetch raid config page 0\n",
3675 DEVNAME(sc));
3676 free(cpg, M_TEMP);
3677 return (EINVAL);
3678 }
3679
3680 el = (struct mpii_raid_config_element *)(cpg + 1);
3681 for (i = 0; i < cpg->num_elements; i++, el++) {
3682 if (ISSET(le16toh(el->element_flags),
3683 MPII_RAID_CONFIG_ELEMENT_FLAG_HSP_PHYS_DISK) &&
3684 el->hot_spare_pool == hsmap) {
3685 /*
3686 * diskid comparison is based on the idea that all
3687 * disks are counted by the bio(4) in sequence, thus
3688 * subtracting the number of disks in the volume
3689 * from the diskid yields us a "relative" hotspare
3690 * number, which is good enough for us.
3691 */
3692 if (bd != NULL && bd->bd_diskid == nhs + nvdsk) {
3693 u_int8_t dn = el->phys_disk_num;
3694
3695 free(cpg, M_TEMP);
3696 return (mpii_bio_disk(sc, bd, dn));
3697 }
3698 nhs++;
3699 }
3700 }
3701
3702 if (hscnt)
3703 *hscnt = nhs;
3704
3705 free(cpg, M_TEMP);
3706 return (0);
3707 }
3708
3709 static int
mpii_bio_disk(struct mpii_softc * sc,struct bioc_disk * bd,u_int8_t dn)3710 mpii_bio_disk(struct mpii_softc *sc, struct bioc_disk *bd, u_int8_t dn)
3711 {
3712 struct mpii_cfg_raid_physdisk_pg0 *ppg;
3713 struct mpii_cfg_hdr hdr;
3714 struct mpii_device *dev;
3715 int len;
3716
3717 DNPRINTF(MPII_D_IOCTL, "%s: mpii_bio_disk %d\n", DEVNAME(sc),
3718 bd->bd_diskid);
3719
3720 ppg = malloc(sizeof(*ppg), M_TEMP, M_WAITOK | M_ZERO);
3721 if (ppg == NULL) {
3722 printf("%s: unable to allocate space for raid physical disk "
3723 "page 0\n", DEVNAME(sc));
3724 return (ENOMEM);
3725 }
3726
3727 hdr.page_version = 0;
3728 hdr.page_length = sizeof(*ppg) / 4;
3729 hdr.page_number = 0;
3730 hdr.page_type = MPII_CONFIG_REQ_PAGE_TYPE_RAID_PD;
3731
3732 if (mpii_req_cfg_page(sc, MPII_CFG_RAID_PHYS_DISK_ADDR_NUMBER | dn, 0,
3733 &hdr, 1, ppg, sizeof(*ppg)) != 0) {
3734 printf("%s: unable to fetch raid drive page 0\n",
3735 DEVNAME(sc));
3736 free(ppg, M_TEMP);
3737 return (EINVAL);
3738 }
3739
3740 bd->bd_target = ppg->phys_disk_num;
3741
3742 mutex_enter(&sc->sc_devs_mtx);
3743 if ((dev = mpii_find_dev(sc, le16toh(ppg->dev_handle))) == NULL) {
3744 mutex_exit(&sc->sc_devs_mtx);
3745 bd->bd_status = BIOC_SDINVALID;
3746 free(ppg, M_TEMP);
3747 return (0);
3748 }
3749 mutex_exit(&sc->sc_devs_mtx);
3750
3751 switch (ppg->phys_disk_state) {
3752 case MPII_CFG_RAID_PHYDISK_0_STATE_ONLINE:
3753 case MPII_CFG_RAID_PHYDISK_0_STATE_OPTIMAL:
3754 bd->bd_status = BIOC_SDONLINE;
3755 break;
3756 case MPII_CFG_RAID_PHYDISK_0_STATE_OFFLINE:
3757 if (ppg->offline_reason ==
3758 MPII_CFG_RAID_PHYDISK_0_OFFLINE_FAILED ||
3759 ppg->offline_reason ==
3760 MPII_CFG_RAID_PHYDISK_0_OFFLINE_FAILEDREQ)
3761 bd->bd_status = BIOC_SDFAILED;
3762 else
3763 bd->bd_status = BIOC_SDOFFLINE;
3764 break;
3765 case MPII_CFG_RAID_PHYDISK_0_STATE_DEGRADED:
3766 bd->bd_status = BIOC_SDFAILED;
3767 break;
3768 case MPII_CFG_RAID_PHYDISK_0_STATE_REBUILDING:
3769 bd->bd_status = BIOC_SDREBUILD;
3770 break;
3771 case MPII_CFG_RAID_PHYDISK_0_STATE_HOTSPARE:
3772 bd->bd_status = BIOC_SDHOTSPARE;
3773 break;
3774 case MPII_CFG_RAID_PHYDISK_0_STATE_NOTCONFIGURED:
3775 bd->bd_status = BIOC_SDUNUSED;
3776 break;
3777 case MPII_CFG_RAID_PHYDISK_0_STATE_NOTCOMPATIBLE:
3778 default:
3779 bd->bd_status = BIOC_SDINVALID;
3780 break;
3781 }
3782
3783 bd->bd_size = le64toh(ppg->dev_max_lba) * le16toh(ppg->block_size);
3784
3785 strnvisx(bd->bd_vendor, sizeof(bd->bd_vendor),
3786 ppg->vendor_id, sizeof(ppg->vendor_id),
3787 VIS_TRIM|VIS_SAFE|VIS_OCTAL);
3788 len = strlen(bd->bd_vendor);
3789 bd->bd_vendor[len] = ' ';
3790 strnvisx(&bd->bd_vendor[len + 1], sizeof(ppg->vendor_id) - len - 1,
3791 ppg->product_id, sizeof(ppg->product_id),
3792 VIS_TRIM|VIS_SAFE|VIS_OCTAL);
3793 strnvisx(bd->bd_serial, sizeof(bd->bd_serial),
3794 ppg->serial, sizeof(ppg->serial), VIS_TRIM|VIS_SAFE|VIS_OCTAL);
3795
3796 free(ppg, M_TEMP);
3797 return (0);
3798 }
3799
3800 static struct mpii_device *
mpii_find_vol(struct mpii_softc * sc,int volid)3801 mpii_find_vol(struct mpii_softc *sc, int volid)
3802 {
3803 struct mpii_device *dev = NULL;
3804
3805 KASSERT(mutex_owned(&sc->sc_devs_mtx));
3806 if (sc->sc_vd_id_low + volid >= sc->sc_max_devices)
3807 return (NULL);
3808 dev = sc->sc_devs[sc->sc_vd_id_low + volid];
3809 if (dev && ISSET(dev->flags, MPII_DF_VOLUME))
3810 return (dev);
3811 return (NULL);
3812 }
3813
3814 /*
3815 * Non-sleeping lightweight version of the mpii_ioctl_vol
3816 */
3817 static int
mpii_bio_volstate(struct mpii_softc * sc,struct bioc_vol * bv)3818 mpii_bio_volstate(struct mpii_softc *sc, struct bioc_vol *bv)
3819 {
3820 struct mpii_cfg_raid_vol_pg0 *vpg;
3821 struct mpii_cfg_hdr hdr;
3822 struct mpii_device *dev = NULL;
3823 size_t pagelen;
3824 u_int16_t volh;
3825
3826 mutex_enter(&sc->sc_devs_mtx);
3827 if ((dev = mpii_find_vol(sc, bv->bv_volid)) == NULL) {
3828 mutex_exit(&sc->sc_devs_mtx);
3829 return (ENODEV);
3830 }
3831 volh = dev->dev_handle;
3832 mutex_exit(&sc->sc_devs_mtx);
3833
3834 if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0,
3835 MPII_CFG_RAID_VOL_ADDR_HANDLE | volh, MPII_PG_POLL, &hdr) != 0) {
3836 DNPRINTF(MPII_D_MISC, "%s: unable to fetch header for raid "
3837 "volume page 0\n", DEVNAME(sc));
3838 return (EINVAL);
3839 }
3840
3841 pagelen = hdr.page_length * 4;
3842 vpg = malloc(pagelen, M_TEMP, M_WAITOK | M_ZERO);
3843 if (mpii_req_cfg_page(sc, MPII_CFG_RAID_VOL_ADDR_HANDLE | volh,
3844 MPII_PG_POLL, &hdr, 1, vpg, pagelen) != 0) {
3845 DNPRINTF(MPII_D_MISC, "%s: unable to fetch raid volume "
3846 "page 0\n", DEVNAME(sc));
3847 free(vpg, M_TEMP);
3848 return (EINVAL);
3849 }
3850
3851 switch (vpg->volume_state) {
3852 case MPII_CFG_RAID_VOL_0_STATE_ONLINE:
3853 case MPII_CFG_RAID_VOL_0_STATE_OPTIMAL:
3854 bv->bv_status = BIOC_SVONLINE;
3855 break;
3856 case MPII_CFG_RAID_VOL_0_STATE_DEGRADED:
3857 if (ISSET(le32toh(vpg->volume_status),
3858 MPII_CFG_RAID_VOL_0_STATUS_RESYNC))
3859 bv->bv_status = BIOC_SVREBUILD;
3860 else
3861 bv->bv_status = BIOC_SVDEGRADED;
3862 break;
3863 case MPII_CFG_RAID_VOL_0_STATE_FAILED:
3864 bv->bv_status = BIOC_SVOFFLINE;
3865 break;
3866 case MPII_CFG_RAID_VOL_0_STATE_INITIALIZING:
3867 bv->bv_status = BIOC_SVBUILDING;
3868 break;
3869 case MPII_CFG_RAID_VOL_0_STATE_MISSING:
3870 default:
3871 bv->bv_status = BIOC_SVINVALID;
3872 break;
3873 }
3874
3875 free(vpg, M_TEMP);
3876 return (0);
3877 }
3878
3879 static int
mpii_create_sensors(struct mpii_softc * sc)3880 mpii_create_sensors(struct mpii_softc *sc)
3881 {
3882 int i, rv;
3883
3884 DNPRINTF(MPII_D_MISC, "%s: mpii_create_sensors(%d)\n",
3885 DEVNAME(sc), sc->sc_max_volumes);
3886 sc->sc_sme = sysmon_envsys_create();
3887 sc->sc_sensors = malloc(sizeof(envsys_data_t) * sc->sc_max_volumes,
3888 M_DEVBUF, M_WAITOK | M_ZERO);
3889
3890 for (i = 0; i < sc->sc_max_volumes; i++) {
3891 sc->sc_sensors[i].units = ENVSYS_DRIVE;
3892 sc->sc_sensors[i].state = ENVSYS_SINVALID;
3893 sc->sc_sensors[i].value_cur = ENVSYS_DRIVE_EMPTY;
3894 sc->sc_sensors[i].flags |= ENVSYS_FMONSTCHANGED;
3895
3896 /* logical drives */
3897 snprintf(sc->sc_sensors[i].desc,
3898 sizeof(sc->sc_sensors[i].desc), "%s:%d",
3899 DEVNAME(sc), i);
3900 if ((rv = sysmon_envsys_sensor_attach(sc->sc_sme,
3901 &sc->sc_sensors[i])) != 0) {
3902 aprint_error_dev(sc->sc_dev,
3903 "unable to attach sensor (rv = %d)\n", rv);
3904 goto out;
3905 }
3906 }
3907 sc->sc_sme->sme_name = DEVNAME(sc);
3908 sc->sc_sme->sme_cookie = sc;
3909 sc->sc_sme->sme_refresh = mpii_refresh_sensors;
3910
3911 rv = sysmon_envsys_register(sc->sc_sme);
3912 if (rv != 0) {
3913 aprint_error_dev(sc->sc_dev,
3914 "unable to register with sysmon (rv = %d)\n", rv);
3915 goto out;
3916 }
3917 return 0;
3918
3919 out:
3920 free(sc->sc_sensors, M_DEVBUF);
3921 sysmon_envsys_destroy(sc->sc_sme);
3922 sc->sc_sme = NULL;
3923 return 1;
3924 }
3925
3926 static int
mpii_destroy_sensors(struct mpii_softc * sc)3927 mpii_destroy_sensors(struct mpii_softc *sc)
3928 {
3929 if (sc->sc_sme == NULL)
3930 return 0;
3931 sysmon_envsys_unregister(sc->sc_sme);
3932 sc->sc_sme = NULL;
3933 free(sc->sc_sensors, M_DEVBUF);
3934 return 0;
3935
3936 }
3937
3938 static void
mpii_refresh_sensors(struct sysmon_envsys * sme,envsys_data_t * edata)3939 mpii_refresh_sensors(struct sysmon_envsys *sme, envsys_data_t *edata)
3940 {
3941 struct mpii_softc *sc = sme->sme_cookie;
3942 struct bioc_vol bv;
3943
3944 memset(&bv, 0, sizeof(bv));
3945 bv.bv_volid = edata->sensor;
3946 if (mpii_bio_volstate(sc, &bv))
3947 bv.bv_status = BIOC_SVINVALID;
3948 bio_vol_to_envsys(edata, &bv);
3949 }
3950 #endif /* NBIO > 0 */
3951