1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
26
27 #include <sys/types.h>
28 #include <sys/sysmacros.h>
29 #include <sys/buf.h>
30 #include <sys/errno.h>
31 #include <sys/modctl.h>
32 #include <sys/conf.h>
33 #include <sys/stat.h>
34 #include <sys/kmem.h>
35 #include <sys/proc.h>
36 #include <sys/cpuvar.h>
37 #include <sys/ddi_impldefs.h>
38 #include <sys/ddi.h>
39 #include <sys/fm/protocol.h>
40 #include <sys/fm/util.h>
41 #include <sys/fm/io/ddi.h>
42 #include <sys/sysevent/eventdefs.h>
43 #include <sys/sunddi.h>
44 #include <sys/sunndi.h>
45 #include <sys/debug.h>
46 #include <sys/bofi.h>
47 #include <sys/dvma.h>
48 #include <sys/bofi_impl.h>
49
50 /*
51 * Testing the resilience of a hardened device driver requires a suitably wide
52 * range of different types of "typical" hardware faults to be injected,
53 * preferably in a controlled and repeatable fashion. This is not in general
54 * possible via hardware, so the "fault injection test harness" is provided.
55 * This works by intercepting calls from the driver to various DDI routines,
56 * and then corrupting the result of those DDI routine calls as if the
57 * hardware had caused the corruption.
58 *
59 * Conceptually, the bofi driver consists of two parts:
60 *
61 * A driver interface that supports a number of ioctls which allow error
62 * definitions ("errdefs") to be defined and subsequently managed. The
63 * driver is a clone driver, so each open will create a separate
64 * invocation. Any errdefs created by using ioctls to that invocation
65 * will automatically be deleted when that invocation is closed.
66 *
67 * Intercept routines: When the bofi driver is attached, it edits the
68 * bus_ops structure of the bus nexus specified by the "bofi-nexus"
69 * field in the "bofi.conf" file, thus allowing the
70 * bofi driver to intercept various ddi functions. These intercept
71 * routines primarily carry out fault injections based on the errdefs
72 * created for that device.
73 *
74 * Faults can be injected into:
75 *
76 * DMA (corrupting data for DMA to/from memory areas defined by
77 * ddi_dma_setup(), ddi_dma_bind_handle(), etc)
78 *
79 * Physical IO (corrupting data sent/received via ddi_get8(), ddi_put8(),
80 * etc),
81 *
82 * Interrupts (generating spurious interrupts, losing interrupts,
83 * delaying interrupts).
84 *
85 * By default, ddi routines called from all drivers will be intercepted
86 * and faults potentially injected. However, the "bofi-to-test" field in
87 * the "bofi.conf" file can be set to a space-separated list of drivers to
88 * test (or by preceding each driver name in the list with an "!", a list
89 * of drivers not to test).
90 *
91 * In addition to fault injection, the bofi driver does a number of static
92 * checks which are controlled by properties in the "bofi.conf" file.
93 *
94 * "bofi-ddi-check" - if set will validate that there are no PIO access
95 * other than those using the DDI routines (ddi_get8(), ddi_put8(), etc).
96 *
97 * "bofi-range-check" - if set to values 1 (warning) or 2 (panic), will
98 * validate that calls to ddi_get8(), ddi_put8(), etc are not made
99 * specifying addresses outside the range of the access_handle.
100 *
101 * "bofi-sync-check" - if set will validate that calls to ddi_dma_sync()
102 * are being made correctly.
103 */
104
105 extern void *bp_mapin_common(struct buf *, int);
106
107 static int bofi_ddi_check;
108 static int bofi_sync_check;
109 static int bofi_range_check;
110
111 static struct bofi_link bofi_link_array[BOFI_NLINKS], *bofi_link_freelist;
112
113 #define LLSZMASK (sizeof (uint64_t)-1)
114
115 #define HDL_HASH_TBL_SIZE 64
116 static struct bofi_shadow hhash_table[HDL_HASH_TBL_SIZE];
117 static struct bofi_shadow dhash_table[HDL_HASH_TBL_SIZE];
118 #define HDL_DHASH(x) \
119 (&dhash_table[((uintptr_t)(x) >> 3) & (HDL_HASH_TBL_SIZE-1)])
120 #define HDL_HHASH(x) \
121 (&hhash_table[((uintptr_t)(x) >> 5) & (HDL_HASH_TBL_SIZE-1)])
122
123 static struct bofi_shadow shadow_list;
124 static struct bofi_errent *errent_listp;
125
126 static char driver_list[NAMESIZE];
127 static int driver_list_size;
128 static int driver_list_neg;
129 static char nexus_name[NAMESIZE];
130
131 static int initialized = 0;
132
133 #define NCLONES 2560
134 static int clone_tab[NCLONES];
135
136 static dev_info_t *our_dip;
137
138 static kmutex_t bofi_mutex;
139 static kmutex_t clone_tab_mutex;
140 static kmutex_t bofi_low_mutex;
141 static ddi_iblock_cookie_t bofi_low_cookie;
142 static uint_t bofi_signal(caddr_t arg);
143 static int bofi_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
144 static int bofi_attach(dev_info_t *, ddi_attach_cmd_t);
145 static int bofi_detach(dev_info_t *, ddi_detach_cmd_t);
146 static int bofi_open(dev_t *, int, int, cred_t *);
147 static int bofi_close(dev_t, int, int, cred_t *);
148 static int bofi_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
149 static int bofi_errdef_alloc(struct bofi_errdef *, char *,
150 struct bofi_errent *);
151 static int bofi_errdef_free(struct bofi_errent *);
152 static void bofi_start(struct bofi_errctl *, char *);
153 static void bofi_stop(struct bofi_errctl *, char *);
154 static void bofi_broadcast(struct bofi_errctl *, char *);
155 static void bofi_clear_acc_chk(struct bofi_errctl *, char *);
156 static void bofi_clear_errors(struct bofi_errctl *, char *);
157 static void bofi_clear_errdefs(struct bofi_errctl *, char *);
158 static int bofi_errdef_check(struct bofi_errstate *,
159 struct acc_log_elem **);
160 static int bofi_errdef_check_w(struct bofi_errstate *,
161 struct acc_log_elem **);
162 static int bofi_map(dev_info_t *, dev_info_t *, ddi_map_req_t *,
163 off_t, off_t, caddr_t *);
164 static int bofi_dma_map(dev_info_t *, dev_info_t *,
165 struct ddi_dma_req *, ddi_dma_handle_t *);
166 static int bofi_dma_allochdl(dev_info_t *, dev_info_t *,
167 ddi_dma_attr_t *, int (*)(caddr_t), caddr_t,
168 ddi_dma_handle_t *);
169 static int bofi_dma_freehdl(dev_info_t *, dev_info_t *,
170 ddi_dma_handle_t);
171 static int bofi_dma_bindhdl(dev_info_t *, dev_info_t *,
172 ddi_dma_handle_t, struct ddi_dma_req *, ddi_dma_cookie_t *,
173 uint_t *);
174 static int bofi_dma_unbindhdl(dev_info_t *, dev_info_t *,
175 ddi_dma_handle_t);
176 static int bofi_dma_flush(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
177 off_t, size_t, uint_t);
178 static int bofi_dma_ctl(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
179 enum ddi_dma_ctlops, off_t *, size_t *, caddr_t *, uint_t);
180 static int bofi_dma_win(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
181 uint_t, off_t *, size_t *, ddi_dma_cookie_t *, uint_t *);
182 static int bofi_intr_ops(dev_info_t *dip, dev_info_t *rdip,
183 ddi_intr_op_t intr_op, ddi_intr_handle_impl_t *hdlp,
184 void *result);
185 static int bofi_fm_ereport_callback(sysevent_t *ev, void *cookie);
186
187 evchan_t *bofi_error_chan;
188
189 #define FM_SIMULATED_DMA "simulated.dma"
190 #define FM_SIMULATED_PIO "simulated.pio"
191
192 #if defined(__sparc)
193 static void bofi_dvma_kaddr_load(ddi_dma_handle_t, caddr_t, uint_t,
194 uint_t, ddi_dma_cookie_t *);
195 static void bofi_dvma_unload(ddi_dma_handle_t, uint_t, uint_t);
196 static void bofi_dvma_sync(ddi_dma_handle_t, uint_t, uint_t);
197 static void bofi_dvma_reserve(dev_info_t *, ddi_dma_handle_t);
198 #endif
199 static int driver_under_test(dev_info_t *);
200 static int bofi_check_acc_hdl(ddi_acc_impl_t *);
201 static int bofi_check_dma_hdl(ddi_dma_impl_t *);
202 static int bofi_post_event(dev_info_t *dip, dev_info_t *rdip,
203 ddi_eventcookie_t eventhdl, void *impl_data);
204
205 static struct bus_ops bofi_bus_ops = {
206 BUSO_REV,
207 bofi_map,
208 NULL,
209 NULL,
210 NULL,
211 i_ddi_map_fault,
212 bofi_dma_map,
213 bofi_dma_allochdl,
214 bofi_dma_freehdl,
215 bofi_dma_bindhdl,
216 bofi_dma_unbindhdl,
217 bofi_dma_flush,
218 bofi_dma_win,
219 bofi_dma_ctl,
220 NULL,
221 ddi_bus_prop_op,
222 ndi_busop_get_eventcookie,
223 ndi_busop_add_eventcall,
224 ndi_busop_remove_eventcall,
225 bofi_post_event,
226 NULL,
227 0,
228 0,
229 0,
230 0,
231 0,
232 0,
233 0,
234 bofi_intr_ops
235 };
236
237 static struct cb_ops bofi_cb_ops = {
238 bofi_open, /* open */
239 bofi_close, /* close */
240 nodev, /* strategy */
241 nodev, /* print */
242 nodev, /* dump */
243 nodev, /* read */
244 nodev, /* write */
245 bofi_ioctl, /* ioctl */
246 nodev, /* devmap */
247 nodev, /* mmap */
248 nodev, /* segmap */
249 nochpoll, /* chpoll */
250 ddi_prop_op, /* prop_op */
251 NULL, /* for STREAMS drivers */
252 D_MP, /* driver compatibility flag */
253 CB_REV, /* cb_ops revision */
254 nodev, /* aread */
255 nodev /* awrite */
256 };
257
258 static struct dev_ops bofi_ops = {
259 DEVO_REV, /* driver build version */
260 0, /* device reference count */
261 bofi_getinfo,
262 nulldev,
263 nulldev, /* probe */
264 bofi_attach,
265 bofi_detach,
266 nulldev, /* reset */
267 &bofi_cb_ops,
268 (struct bus_ops *)NULL,
269 nulldev, /* power */
270 ddi_quiesce_not_needed, /* quiesce */
271 };
272
273 /* module configuration stuff */
274 static void *statep;
275
276 static struct modldrv modldrv = {
277 &mod_driverops,
278 "bofi driver",
279 &bofi_ops
280 };
281
282 static struct modlinkage modlinkage = {
283 MODREV_1,
284 &modldrv,
285 0
286 };
287
288 static struct bus_ops save_bus_ops;
289
290 #if defined(__sparc)
291 static struct dvma_ops bofi_dvma_ops = {
292 DVMAO_REV,
293 bofi_dvma_kaddr_load,
294 bofi_dvma_unload,
295 bofi_dvma_sync
296 };
297 #endif
298
299 /*
300 * support routine - map user page into kernel virtual
301 */
302 static caddr_t
dmareq_mapin(offset_t len,caddr_t addr,struct as * as,int flag)303 dmareq_mapin(offset_t len, caddr_t addr, struct as *as, int flag)
304 {
305 struct buf buf;
306 struct proc proc;
307
308 /*
309 * mock up a buf structure so we can call bp_mapin_common()
310 */
311 buf.b_flags = B_PHYS;
312 buf.b_un.b_addr = (caddr_t)addr;
313 buf.b_bcount = (size_t)len;
314 proc.p_as = as;
315 buf.b_proc = &proc;
316 return (bp_mapin_common(&buf, flag));
317 }
318
319
320 /*
321 * support routine - map page chain into kernel virtual
322 */
323 static caddr_t
dmareq_pp_mapin(offset_t len,uint_t offset,page_t * pp,int flag)324 dmareq_pp_mapin(offset_t len, uint_t offset, page_t *pp, int flag)
325 {
326 struct buf buf;
327
328 /*
329 * mock up a buf structure so we can call bp_mapin_common()
330 */
331 buf.b_flags = B_PAGEIO;
332 buf.b_un.b_addr = (caddr_t)(uintptr_t)offset;
333 buf.b_bcount = (size_t)len;
334 buf.b_pages = pp;
335 return (bp_mapin_common(&buf, flag));
336 }
337
338
339 /*
340 * support routine - map page array into kernel virtual
341 */
342 static caddr_t
dmareq_pplist_mapin(uint_t len,caddr_t addr,page_t ** pplist,struct as * as,int flag)343 dmareq_pplist_mapin(uint_t len, caddr_t addr, page_t **pplist, struct as *as,
344 int flag)
345 {
346 struct buf buf;
347 struct proc proc;
348
349 /*
350 * mock up a buf structure so we can call bp_mapin_common()
351 */
352 buf.b_flags = B_PHYS|B_SHADOW;
353 buf.b_un.b_addr = addr;
354 buf.b_bcount = len;
355 buf.b_shadow = pplist;
356 proc.p_as = as;
357 buf.b_proc = &proc;
358 return (bp_mapin_common(&buf, flag));
359 }
360
361
362 /*
363 * support routine - map dmareq into kernel virtual if not already
364 * fills in *lenp with length
365 * *mapaddr will be new kernel virtual address - or null if no mapping needed
366 */
367 static caddr_t
ddi_dmareq_mapin(struct ddi_dma_req * dmareqp,caddr_t * mapaddrp,offset_t * lenp)368 ddi_dmareq_mapin(struct ddi_dma_req *dmareqp, caddr_t *mapaddrp,
369 offset_t *lenp)
370 {
371 int sleep = (dmareqp->dmar_fp == DDI_DMA_SLEEP) ? VM_SLEEP: VM_NOSLEEP;
372
373 *lenp = dmareqp->dmar_object.dmao_size;
374 if (dmareqp->dmar_object.dmao_type == DMA_OTYP_PAGES) {
375 *mapaddrp = dmareq_pp_mapin(dmareqp->dmar_object.dmao_size,
376 dmareqp->dmar_object.dmao_obj.pp_obj.pp_offset,
377 dmareqp->dmar_object.dmao_obj.pp_obj.pp_pp, sleep);
378 return (*mapaddrp);
379 } else if (dmareqp->dmar_object.dmao_obj.virt_obj.v_priv != NULL) {
380 *mapaddrp = dmareq_pplist_mapin(dmareqp->dmar_object.dmao_size,
381 dmareqp->dmar_object.dmao_obj.virt_obj.v_addr,
382 dmareqp->dmar_object.dmao_obj.virt_obj.v_priv,
383 dmareqp->dmar_object.dmao_obj.virt_obj.v_as, sleep);
384 return (*mapaddrp);
385 } else if (dmareqp->dmar_object.dmao_obj.virt_obj.v_as == &kas) {
386 *mapaddrp = NULL;
387 return (dmareqp->dmar_object.dmao_obj.virt_obj.v_addr);
388 } else if (dmareqp->dmar_object.dmao_obj.virt_obj.v_as == NULL) {
389 *mapaddrp = NULL;
390 return (dmareqp->dmar_object.dmao_obj.virt_obj.v_addr);
391 } else {
392 *mapaddrp = dmareq_mapin(dmareqp->dmar_object.dmao_size,
393 dmareqp->dmar_object.dmao_obj.virt_obj.v_addr,
394 dmareqp->dmar_object.dmao_obj.virt_obj.v_as, sleep);
395 return (*mapaddrp);
396 }
397 }
398
399
400 /*
401 * support routine - free off kernel virtual mapping as allocated by
402 * ddi_dmareq_mapin()
403 */
404 static void
ddi_dmareq_mapout(caddr_t addr,offset_t len,int map_flags,page_t * pp,page_t ** pplist)405 ddi_dmareq_mapout(caddr_t addr, offset_t len, int map_flags, page_t *pp,
406 page_t **pplist)
407 {
408 struct buf buf;
409
410 if (addr == NULL)
411 return;
412 /*
413 * mock up a buf structure
414 */
415 buf.b_flags = B_REMAPPED | map_flags;
416 buf.b_un.b_addr = addr;
417 buf.b_bcount = (size_t)len;
418 buf.b_pages = pp;
419 buf.b_shadow = pplist;
420 bp_mapout(&buf);
421 }
422
423 static time_t
bofi_gettime()424 bofi_gettime()
425 {
426 timestruc_t ts;
427
428 gethrestime(&ts);
429 return (ts.tv_sec);
430 }
431
432 /*
433 * reset the bus_ops structure of the specified nexus to point to
434 * the original values in the save_bus_ops structure.
435 *
436 * Note that both this routine and modify_bus_ops() rely on the current
437 * behavior of the framework in that nexus drivers are not unloadable
438 *
439 */
440
441 static int
reset_bus_ops(char * name,struct bus_ops * bop)442 reset_bus_ops(char *name, struct bus_ops *bop)
443 {
444 struct modctl *modp;
445 struct modldrv *mp;
446 struct bus_ops *bp;
447 struct dev_ops *ops;
448
449 mutex_enter(&mod_lock);
450 /*
451 * find specified module
452 */
453 modp = &modules;
454 do {
455 if (strcmp(name, modp->mod_modname) == 0) {
456 if (!modp->mod_linkage) {
457 mutex_exit(&mod_lock);
458 return (0);
459 }
460 mp = modp->mod_linkage->ml_linkage[0];
461 if (!mp || !mp->drv_dev_ops) {
462 mutex_exit(&mod_lock);
463 return (0);
464 }
465 ops = mp->drv_dev_ops;
466 bp = ops->devo_bus_ops;
467 if (!bp) {
468 mutex_exit(&mod_lock);
469 return (0);
470 }
471 if (ops->devo_refcnt > 0) {
472 /*
473 * As long as devices are active with modified
474 * bus ops bofi must not go away. There may be
475 * drivers with modified access or dma handles.
476 */
477 mutex_exit(&mod_lock);
478 return (0);
479 }
480 cmn_err(CE_NOTE, "bofi reset bus_ops for %s",
481 mp->drv_linkinfo);
482 bp->bus_intr_op = bop->bus_intr_op;
483 bp->bus_post_event = bop->bus_post_event;
484 bp->bus_map = bop->bus_map;
485 bp->bus_dma_map = bop->bus_dma_map;
486 bp->bus_dma_allochdl = bop->bus_dma_allochdl;
487 bp->bus_dma_freehdl = bop->bus_dma_freehdl;
488 bp->bus_dma_bindhdl = bop->bus_dma_bindhdl;
489 bp->bus_dma_unbindhdl = bop->bus_dma_unbindhdl;
490 bp->bus_dma_flush = bop->bus_dma_flush;
491 bp->bus_dma_win = bop->bus_dma_win;
492 bp->bus_dma_ctl = bop->bus_dma_ctl;
493 mutex_exit(&mod_lock);
494 return (1);
495 }
496 } while ((modp = modp->mod_next) != &modules);
497 mutex_exit(&mod_lock);
498 return (0);
499 }
500
501 /*
502 * modify the bus_ops structure of the specified nexus to point to bofi
503 * routines, saving the original values in the save_bus_ops structure
504 */
505
506 static int
modify_bus_ops(char * name,struct bus_ops * bop)507 modify_bus_ops(char *name, struct bus_ops *bop)
508 {
509 struct modctl *modp;
510 struct modldrv *mp;
511 struct bus_ops *bp;
512 struct dev_ops *ops;
513
514 if (ddi_name_to_major(name) == -1)
515 return (0);
516
517 mutex_enter(&mod_lock);
518 /*
519 * find specified module
520 */
521 modp = &modules;
522 do {
523 if (strcmp(name, modp->mod_modname) == 0) {
524 if (!modp->mod_linkage) {
525 mutex_exit(&mod_lock);
526 return (0);
527 }
528 mp = modp->mod_linkage->ml_linkage[0];
529 if (!mp || !mp->drv_dev_ops) {
530 mutex_exit(&mod_lock);
531 return (0);
532 }
533 ops = mp->drv_dev_ops;
534 bp = ops->devo_bus_ops;
535 if (!bp) {
536 mutex_exit(&mod_lock);
537 return (0);
538 }
539 if (ops->devo_refcnt == 0) {
540 /*
541 * If there is no device active for this
542 * module then there is nothing to do for bofi.
543 */
544 mutex_exit(&mod_lock);
545 return (0);
546 }
547 cmn_err(CE_NOTE, "bofi modify bus_ops for %s",
548 mp->drv_linkinfo);
549 save_bus_ops = *bp;
550 bp->bus_intr_op = bop->bus_intr_op;
551 bp->bus_post_event = bop->bus_post_event;
552 bp->bus_map = bop->bus_map;
553 bp->bus_dma_map = bop->bus_dma_map;
554 bp->bus_dma_allochdl = bop->bus_dma_allochdl;
555 bp->bus_dma_freehdl = bop->bus_dma_freehdl;
556 bp->bus_dma_bindhdl = bop->bus_dma_bindhdl;
557 bp->bus_dma_unbindhdl = bop->bus_dma_unbindhdl;
558 bp->bus_dma_flush = bop->bus_dma_flush;
559 bp->bus_dma_win = bop->bus_dma_win;
560 bp->bus_dma_ctl = bop->bus_dma_ctl;
561 mutex_exit(&mod_lock);
562 return (1);
563 }
564 } while ((modp = modp->mod_next) != &modules);
565 mutex_exit(&mod_lock);
566 return (0);
567 }
568
569
570 int
_init(void)571 _init(void)
572 {
573 int e;
574
575 e = ddi_soft_state_init(&statep, sizeof (struct bofi_errent), 1);
576 if (e != 0)
577 return (e);
578 if ((e = mod_install(&modlinkage)) != 0)
579 ddi_soft_state_fini(&statep);
580 return (e);
581 }
582
583
584 int
_fini(void)585 _fini(void)
586 {
587 int e;
588
589 if ((e = mod_remove(&modlinkage)) != 0)
590 return (e);
591 ddi_soft_state_fini(&statep);
592 return (e);
593 }
594
595
596 int
_info(struct modinfo * modinfop)597 _info(struct modinfo *modinfop)
598 {
599 return (mod_info(&modlinkage, modinfop));
600 }
601
602
603 static int
bofi_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)604 bofi_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
605 {
606 char *name;
607 char buf[80];
608 int i;
609 int s, ss;
610 int size = NAMESIZE;
611 int new_string;
612 char *ptr;
613
614 if (cmd != DDI_ATTACH)
615 return (DDI_FAILURE);
616 /*
617 * only one instance - but we clone using the open routine
618 */
619 if (ddi_get_instance(dip) > 0)
620 return (DDI_FAILURE);
621
622 if (!initialized) {
623 if ((name = ddi_get_name(dip)) == NULL)
624 return (DDI_FAILURE);
625 (void) snprintf(buf, sizeof (buf), "%s,ctl", name);
626 if (ddi_create_minor_node(dip, buf, S_IFCHR, 0,
627 DDI_PSEUDO, NULL) == DDI_FAILURE)
628 return (DDI_FAILURE);
629
630 if (ddi_get_soft_iblock_cookie(dip, DDI_SOFTINT_MED,
631 &bofi_low_cookie) != DDI_SUCCESS) {
632 ddi_remove_minor_node(dip, buf);
633 return (DDI_FAILURE); /* fail attach */
634 }
635 /*
636 * get nexus name (from conf file)
637 */
638 if (ddi_prop_op(DDI_DEV_T_ANY, dip, PROP_LEN_AND_VAL_BUF, 0,
639 "bofi-nexus", nexus_name, &size) != DDI_PROP_SUCCESS) {
640 ddi_remove_minor_node(dip, buf);
641 return (DDI_FAILURE);
642 }
643 /*
644 * get whether to do dma map kmem private checking
645 */
646 if ((bofi_range_check = ddi_prop_lookup_string(DDI_DEV_T_ANY,
647 dip, 0, "bofi-range-check", &ptr)) != DDI_PROP_SUCCESS)
648 bofi_range_check = 0;
649 else if (strcmp(ptr, "panic") == 0)
650 bofi_range_check = 2;
651 else if (strcmp(ptr, "warn") == 0)
652 bofi_range_check = 1;
653 else
654 bofi_range_check = 0;
655 ddi_prop_free(ptr);
656
657 /*
658 * get whether to prevent direct access to register
659 */
660 if ((bofi_ddi_check = ddi_prop_lookup_string(DDI_DEV_T_ANY,
661 dip, 0, "bofi-ddi-check", &ptr)) != DDI_PROP_SUCCESS)
662 bofi_ddi_check = 0;
663 else if (strcmp(ptr, "on") == 0)
664 bofi_ddi_check = 1;
665 else
666 bofi_ddi_check = 0;
667 ddi_prop_free(ptr);
668
669 /*
670 * get whether to do copy on ddi_dma_sync
671 */
672 if ((bofi_sync_check = ddi_prop_lookup_string(DDI_DEV_T_ANY,
673 dip, 0, "bofi-sync-check", &ptr)) != DDI_PROP_SUCCESS)
674 bofi_sync_check = 0;
675 else if (strcmp(ptr, "on") == 0)
676 bofi_sync_check = 1;
677 else
678 bofi_sync_check = 0;
679 ddi_prop_free(ptr);
680
681 /*
682 * get driver-under-test names (from conf file)
683 */
684 size = NAMESIZE;
685 if (ddi_prop_op(DDI_DEV_T_ANY, dip, PROP_LEN_AND_VAL_BUF, 0,
686 "bofi-to-test", driver_list, &size) != DDI_PROP_SUCCESS)
687 driver_list[0] = 0;
688 /*
689 * and convert into a sequence of strings
690 */
691 driver_list_neg = 1;
692 new_string = 1;
693 driver_list_size = strlen(driver_list);
694 for (i = 0; i < driver_list_size; i++) {
695 if (driver_list[i] == ' ') {
696 driver_list[i] = '\0';
697 new_string = 1;
698 } else if (new_string) {
699 if (driver_list[i] != '!')
700 driver_list_neg = 0;
701 new_string = 0;
702 }
703 }
704 /*
705 * initialize mutex, lists
706 */
707 mutex_init(&clone_tab_mutex, NULL, MUTEX_DRIVER,
708 NULL);
709 /*
710 * fake up iblock cookie - need to protect outselves
711 * against drivers that use hilevel interrupts
712 */
713 ss = spl8();
714 s = spl8();
715 splx(ss);
716 mutex_init(&bofi_mutex, NULL, MUTEX_SPIN, (void *)(uintptr_t)s);
717 mutex_init(&bofi_low_mutex, NULL, MUTEX_DRIVER,
718 (void *)bofi_low_cookie);
719 shadow_list.next = &shadow_list;
720 shadow_list.prev = &shadow_list;
721 for (i = 0; i < HDL_HASH_TBL_SIZE; i++) {
722 hhash_table[i].hnext = &hhash_table[i];
723 hhash_table[i].hprev = &hhash_table[i];
724 dhash_table[i].dnext = &dhash_table[i];
725 dhash_table[i].dprev = &dhash_table[i];
726 }
727 for (i = 1; i < BOFI_NLINKS; i++)
728 bofi_link_array[i].link = &bofi_link_array[i-1];
729 bofi_link_freelist = &bofi_link_array[BOFI_NLINKS - 1];
730 /*
731 * overlay bus_ops structure
732 */
733 if (modify_bus_ops(nexus_name, &bofi_bus_ops) == 0) {
734 ddi_remove_minor_node(dip, buf);
735 mutex_destroy(&clone_tab_mutex);
736 mutex_destroy(&bofi_mutex);
737 mutex_destroy(&bofi_low_mutex);
738 return (DDI_FAILURE);
739 }
740 if (sysevent_evc_bind(FM_ERROR_CHAN, &bofi_error_chan, 0) == 0)
741 (void) sysevent_evc_subscribe(bofi_error_chan, "bofi",
742 EC_FM, bofi_fm_ereport_callback, NULL, 0);
743
744 /*
745 * save dip for getinfo
746 */
747 our_dip = dip;
748 ddi_report_dev(dip);
749 initialized = 1;
750 }
751 return (DDI_SUCCESS);
752 }
753
754
755 static int
bofi_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)756 bofi_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
757 {
758 char *name;
759 char buf[80];
760
761 if (cmd != DDI_DETACH)
762 return (DDI_FAILURE);
763 if (ddi_get_instance(dip) > 0)
764 return (DDI_FAILURE);
765 if ((name = ddi_get_name(dip)) == NULL)
766 return (DDI_FAILURE);
767 (void) snprintf(buf, sizeof (buf), "%s,ctl", name);
768 mutex_enter(&bofi_low_mutex);
769 mutex_enter(&bofi_mutex);
770 /*
771 * make sure test bofi is no longer in use
772 */
773 if (shadow_list.next != &shadow_list || errent_listp != NULL) {
774 mutex_exit(&bofi_mutex);
775 mutex_exit(&bofi_low_mutex);
776 return (DDI_FAILURE);
777 }
778 mutex_exit(&bofi_mutex);
779 mutex_exit(&bofi_low_mutex);
780
781 /*
782 * restore bus_ops structure
783 */
784 if (reset_bus_ops(nexus_name, &save_bus_ops) == 0)
785 return (DDI_FAILURE);
786
787 (void) sysevent_evc_unbind(bofi_error_chan);
788
789 mutex_destroy(&clone_tab_mutex);
790 mutex_destroy(&bofi_mutex);
791 mutex_destroy(&bofi_low_mutex);
792 ddi_remove_minor_node(dip, buf);
793 our_dip = NULL;
794 initialized = 0;
795 return (DDI_SUCCESS);
796 }
797
798
799 /* ARGSUSED */
800 static int
bofi_getinfo(dev_info_t * dip,ddi_info_cmd_t cmd,void * arg,void ** result)801 bofi_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
802 {
803 dev_t dev = (dev_t)arg;
804 int minor = (int)getminor(dev);
805 int retval;
806
807 switch (cmd) {
808 case DDI_INFO_DEVT2DEVINFO:
809 if (minor != 0 || our_dip == NULL) {
810 *result = (void *)NULL;
811 retval = DDI_FAILURE;
812 } else {
813 *result = (void *)our_dip;
814 retval = DDI_SUCCESS;
815 }
816 break;
817 case DDI_INFO_DEVT2INSTANCE:
818 *result = (void *)0;
819 retval = DDI_SUCCESS;
820 break;
821 default:
822 retval = DDI_FAILURE;
823 }
824 return (retval);
825 }
826
827
828 /* ARGSUSED */
829 static int
bofi_open(dev_t * devp,int flag,int otyp,cred_t * credp)830 bofi_open(dev_t *devp, int flag, int otyp, cred_t *credp)
831 {
832 int minor = (int)getminor(*devp);
833 struct bofi_errent *softc;
834
835 /*
836 * only allow open on minor=0 - the clone device
837 */
838 if (minor != 0)
839 return (ENXIO);
840 /*
841 * fail if not attached
842 */
843 if (!initialized)
844 return (ENXIO);
845 /*
846 * find a free slot and grab it
847 */
848 mutex_enter(&clone_tab_mutex);
849 for (minor = 1; minor < NCLONES; minor++) {
850 if (clone_tab[minor] == 0) {
851 clone_tab[minor] = 1;
852 break;
853 }
854 }
855 mutex_exit(&clone_tab_mutex);
856 if (minor == NCLONES)
857 return (EAGAIN);
858 /*
859 * soft state structure for this clone is used to maintain a list
860 * of allocated errdefs so they can be freed on close
861 */
862 if (ddi_soft_state_zalloc(statep, minor) != DDI_SUCCESS) {
863 mutex_enter(&clone_tab_mutex);
864 clone_tab[minor] = 0;
865 mutex_exit(&clone_tab_mutex);
866 return (EAGAIN);
867 }
868 softc = ddi_get_soft_state(statep, minor);
869 softc->cnext = softc;
870 softc->cprev = softc;
871
872 *devp = makedevice(getmajor(*devp), minor);
873 return (0);
874 }
875
876
877 /* ARGSUSED */
878 static int
bofi_close(dev_t dev,int flag,int otyp,cred_t * credp)879 bofi_close(dev_t dev, int flag, int otyp, cred_t *credp)
880 {
881 int minor = (int)getminor(dev);
882 struct bofi_errent *softc;
883 struct bofi_errent *ep, *next_ep;
884
885 softc = ddi_get_soft_state(statep, minor);
886 if (softc == NULL)
887 return (ENXIO);
888 /*
889 * find list of errdefs and free them off
890 */
891 for (ep = softc->cnext; ep != softc; ) {
892 next_ep = ep->cnext;
893 (void) bofi_errdef_free(ep);
894 ep = next_ep;
895 }
896 /*
897 * free clone tab slot
898 */
899 mutex_enter(&clone_tab_mutex);
900 clone_tab[minor] = 0;
901 mutex_exit(&clone_tab_mutex);
902
903 ddi_soft_state_free(statep, minor);
904 return (0);
905 }
906
907
908 /* ARGSUSED */
909 static int
bofi_ioctl(dev_t dev,int cmd,intptr_t arg,int mode,cred_t * credp,int * rvalp)910 bofi_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
911 int *rvalp)
912 {
913 struct bofi_errent *softc;
914 int minor = (int)getminor(dev);
915 struct bofi_errdef errdef;
916 struct bofi_errctl errctl;
917 struct bofi_errstate errstate;
918 void *ed_handle;
919 struct bofi_get_handles get_handles;
920 struct bofi_get_hdl_info hdl_info;
921 struct handle_info *hdlip;
922 struct handle_info *hib;
923
924 char *buffer;
925 char *bufptr;
926 char *endbuf;
927 int req_count, count, err;
928 char *namep;
929 struct bofi_shadow *hp;
930 int retval;
931 struct bofi_shadow *hhashp;
932 int i;
933
934 switch (cmd) {
935 case BOFI_ADD_DEF:
936 /*
937 * add a new error definition
938 */
939 #ifdef _MULTI_DATAMODEL
940 switch (ddi_model_convert_from(mode & FMODELS)) {
941 case DDI_MODEL_ILP32:
942 {
943 /*
944 * For use when a 32 bit app makes a call into a
945 * 64 bit ioctl
946 */
947 struct bofi_errdef32 errdef_32;
948
949 if (ddi_copyin((void *)arg, &errdef_32,
950 sizeof (struct bofi_errdef32), mode)) {
951 return (EFAULT);
952 }
953 errdef.namesize = errdef_32.namesize;
954 (void) strncpy(errdef.name, errdef_32.name, NAMESIZE);
955 errdef.instance = errdef_32.instance;
956 errdef.rnumber = errdef_32.rnumber;
957 errdef.offset = errdef_32.offset;
958 errdef.len = errdef_32.len;
959 errdef.access_type = errdef_32.access_type;
960 errdef.access_count = errdef_32.access_count;
961 errdef.fail_count = errdef_32.fail_count;
962 errdef.acc_chk = errdef_32.acc_chk;
963 errdef.optype = errdef_32.optype;
964 errdef.operand = errdef_32.operand;
965 errdef.log.logsize = errdef_32.log.logsize;
966 errdef.log.entries = errdef_32.log.entries;
967 errdef.log.flags = errdef_32.log.flags;
968 errdef.log.wrapcnt = errdef_32.log.wrapcnt;
969 errdef.log.start_time = errdef_32.log.start_time;
970 errdef.log.stop_time = errdef_32.log.stop_time;
971 errdef.log.logbase =
972 (caddr_t)(uintptr_t)errdef_32.log.logbase;
973 errdef.errdef_handle = errdef_32.errdef_handle;
974 break;
975 }
976 case DDI_MODEL_NONE:
977 if (ddi_copyin((void *)arg, &errdef,
978 sizeof (struct bofi_errdef), mode))
979 return (EFAULT);
980 break;
981 }
982 #else /* ! _MULTI_DATAMODEL */
983 if (ddi_copyin((void *)arg, &errdef,
984 sizeof (struct bofi_errdef), mode) != 0)
985 return (EFAULT);
986 #endif /* _MULTI_DATAMODEL */
987 /*
988 * do some validation
989 */
990 if (errdef.fail_count == 0)
991 errdef.optype = 0;
992 if (errdef.optype != 0) {
993 if (errdef.access_type & BOFI_INTR &&
994 errdef.optype != BOFI_DELAY_INTR &&
995 errdef.optype != BOFI_LOSE_INTR &&
996 errdef.optype != BOFI_EXTRA_INTR)
997 return (EINVAL);
998 if ((errdef.access_type & (BOFI_DMA_RW|BOFI_PIO_R)) &&
999 errdef.optype == BOFI_NO_TRANSFER)
1000 return (EINVAL);
1001 if ((errdef.access_type & (BOFI_PIO_RW)) &&
1002 errdef.optype != BOFI_EQUAL &&
1003 errdef.optype != BOFI_OR &&
1004 errdef.optype != BOFI_XOR &&
1005 errdef.optype != BOFI_AND &&
1006 errdef.optype != BOFI_NO_TRANSFER)
1007 return (EINVAL);
1008 }
1009 /*
1010 * find softstate for this clone, so we can tag
1011 * new errdef on to it
1012 */
1013 softc = ddi_get_soft_state(statep, minor);
1014 if (softc == NULL)
1015 return (ENXIO);
1016 /*
1017 * read in name
1018 */
1019 if (errdef.namesize > NAMESIZE)
1020 return (EINVAL);
1021 namep = kmem_zalloc(errdef.namesize+1, KM_SLEEP);
1022 (void) strncpy(namep, errdef.name, errdef.namesize);
1023
1024 if (bofi_errdef_alloc(&errdef, namep, softc) != DDI_SUCCESS) {
1025 (void) bofi_errdef_free((struct bofi_errent *)
1026 (uintptr_t)errdef.errdef_handle);
1027 kmem_free(namep, errdef.namesize+1);
1028 return (EINVAL);
1029 }
1030 /*
1031 * copy out errdef again, including filled in errdef_handle
1032 */
1033 #ifdef _MULTI_DATAMODEL
1034 switch (ddi_model_convert_from(mode & FMODELS)) {
1035 case DDI_MODEL_ILP32:
1036 {
1037 /*
1038 * For use when a 32 bit app makes a call into a
1039 * 64 bit ioctl
1040 */
1041 struct bofi_errdef32 errdef_32;
1042
1043 errdef_32.namesize = errdef.namesize;
1044 (void) strncpy(errdef_32.name, errdef.name, NAMESIZE);
1045 errdef_32.instance = errdef.instance;
1046 errdef_32.rnumber = errdef.rnumber;
1047 errdef_32.offset = errdef.offset;
1048 errdef_32.len = errdef.len;
1049 errdef_32.access_type = errdef.access_type;
1050 errdef_32.access_count = errdef.access_count;
1051 errdef_32.fail_count = errdef.fail_count;
1052 errdef_32.acc_chk = errdef.acc_chk;
1053 errdef_32.optype = errdef.optype;
1054 errdef_32.operand = errdef.operand;
1055 errdef_32.log.logsize = errdef.log.logsize;
1056 errdef_32.log.entries = errdef.log.entries;
1057 errdef_32.log.flags = errdef.log.flags;
1058 errdef_32.log.wrapcnt = errdef.log.wrapcnt;
1059 errdef_32.log.start_time = errdef.log.start_time;
1060 errdef_32.log.stop_time = errdef.log.stop_time;
1061 errdef_32.log.logbase =
1062 (caddr32_t)(uintptr_t)errdef.log.logbase;
1063 errdef_32.errdef_handle = errdef.errdef_handle;
1064 if (ddi_copyout(&errdef_32, (void *)arg,
1065 sizeof (struct bofi_errdef32), mode) != 0) {
1066 (void) bofi_errdef_free((struct bofi_errent *)
1067 errdef.errdef_handle);
1068 kmem_free(namep, errdef.namesize+1);
1069 return (EFAULT);
1070 }
1071 break;
1072 }
1073 case DDI_MODEL_NONE:
1074 if (ddi_copyout(&errdef, (void *)arg,
1075 sizeof (struct bofi_errdef), mode) != 0) {
1076 (void) bofi_errdef_free((struct bofi_errent *)
1077 errdef.errdef_handle);
1078 kmem_free(namep, errdef.namesize+1);
1079 return (EFAULT);
1080 }
1081 break;
1082 }
1083 #else /* ! _MULTI_DATAMODEL */
1084 if (ddi_copyout(&errdef, (void *)arg,
1085 sizeof (struct bofi_errdef), mode) != 0) {
1086 (void) bofi_errdef_free((struct bofi_errent *)
1087 (uintptr_t)errdef.errdef_handle);
1088 kmem_free(namep, errdef.namesize+1);
1089 return (EFAULT);
1090 }
1091 #endif /* _MULTI_DATAMODEL */
1092 return (0);
1093 case BOFI_DEL_DEF:
1094 /*
1095 * delete existing errdef
1096 */
1097 if (ddi_copyin((void *)arg, &ed_handle,
1098 sizeof (void *), mode) != 0)
1099 return (EFAULT);
1100 return (bofi_errdef_free((struct bofi_errent *)ed_handle));
1101 case BOFI_START:
1102 /*
1103 * start all errdefs corresponding to
1104 * this name and instance
1105 */
1106 if (ddi_copyin((void *)arg, &errctl,
1107 sizeof (struct bofi_errctl), mode) != 0)
1108 return (EFAULT);
1109 /*
1110 * copy in name
1111 */
1112 if (errctl.namesize > NAMESIZE)
1113 return (EINVAL);
1114 namep = kmem_zalloc(errctl.namesize+1, KM_SLEEP);
1115 (void) strncpy(namep, errctl.name, errctl.namesize);
1116 bofi_start(&errctl, namep);
1117 kmem_free(namep, errctl.namesize+1);
1118 return (0);
1119 case BOFI_STOP:
1120 /*
1121 * stop all errdefs corresponding to
1122 * this name and instance
1123 */
1124 if (ddi_copyin((void *)arg, &errctl,
1125 sizeof (struct bofi_errctl), mode) != 0)
1126 return (EFAULT);
1127 /*
1128 * copy in name
1129 */
1130 if (errctl.namesize > NAMESIZE)
1131 return (EINVAL);
1132 namep = kmem_zalloc(errctl.namesize+1, KM_SLEEP);
1133 (void) strncpy(namep, errctl.name, errctl.namesize);
1134 bofi_stop(&errctl, namep);
1135 kmem_free(namep, errctl.namesize+1);
1136 return (0);
1137 case BOFI_BROADCAST:
1138 /*
1139 * wakeup all errdefs corresponding to
1140 * this name and instance
1141 */
1142 if (ddi_copyin((void *)arg, &errctl,
1143 sizeof (struct bofi_errctl), mode) != 0)
1144 return (EFAULT);
1145 /*
1146 * copy in name
1147 */
1148 if (errctl.namesize > NAMESIZE)
1149 return (EINVAL);
1150 namep = kmem_zalloc(errctl.namesize+1, KM_SLEEP);
1151 (void) strncpy(namep, errctl.name, errctl.namesize);
1152 bofi_broadcast(&errctl, namep);
1153 kmem_free(namep, errctl.namesize+1);
1154 return (0);
1155 case BOFI_CLEAR_ACC_CHK:
1156 /*
1157 * clear "acc_chk" for all errdefs corresponding to
1158 * this name and instance
1159 */
1160 if (ddi_copyin((void *)arg, &errctl,
1161 sizeof (struct bofi_errctl), mode) != 0)
1162 return (EFAULT);
1163 /*
1164 * copy in name
1165 */
1166 if (errctl.namesize > NAMESIZE)
1167 return (EINVAL);
1168 namep = kmem_zalloc(errctl.namesize+1, KM_SLEEP);
1169 (void) strncpy(namep, errctl.name, errctl.namesize);
1170 bofi_clear_acc_chk(&errctl, namep);
1171 kmem_free(namep, errctl.namesize+1);
1172 return (0);
1173 case BOFI_CLEAR_ERRORS:
1174 /*
1175 * set "fail_count" to 0 for all errdefs corresponding to
1176 * this name and instance whose "access_count"
1177 * has expired.
1178 */
1179 if (ddi_copyin((void *)arg, &errctl,
1180 sizeof (struct bofi_errctl), mode) != 0)
1181 return (EFAULT);
1182 /*
1183 * copy in name
1184 */
1185 if (errctl.namesize > NAMESIZE)
1186 return (EINVAL);
1187 namep = kmem_zalloc(errctl.namesize+1, KM_SLEEP);
1188 (void) strncpy(namep, errctl.name, errctl.namesize);
1189 bofi_clear_errors(&errctl, namep);
1190 kmem_free(namep, errctl.namesize+1);
1191 return (0);
1192 case BOFI_CLEAR_ERRDEFS:
1193 /*
1194 * set "access_count" and "fail_count" to 0 for all errdefs
1195 * corresponding to this name and instance
1196 */
1197 if (ddi_copyin((void *)arg, &errctl,
1198 sizeof (struct bofi_errctl), mode) != 0)
1199 return (EFAULT);
1200 /*
1201 * copy in name
1202 */
1203 if (errctl.namesize > NAMESIZE)
1204 return (EINVAL);
1205 namep = kmem_zalloc(errctl.namesize+1, KM_SLEEP);
1206 (void) strncpy(namep, errctl.name, errctl.namesize);
1207 bofi_clear_errdefs(&errctl, namep);
1208 kmem_free(namep, errctl.namesize+1);
1209 return (0);
1210 case BOFI_CHK_STATE:
1211 {
1212 struct acc_log_elem *klg;
1213 size_t uls;
1214 /*
1215 * get state for this errdef - read in dummy errstate
1216 * with just the errdef_handle filled in
1217 */
1218 #ifdef _MULTI_DATAMODEL
1219 switch (ddi_model_convert_from(mode & FMODELS)) {
1220 case DDI_MODEL_ILP32:
1221 {
1222 /*
1223 * For use when a 32 bit app makes a call into a
1224 * 64 bit ioctl
1225 */
1226 struct bofi_errstate32 errstate_32;
1227
1228 if (ddi_copyin((void *)arg, &errstate_32,
1229 sizeof (struct bofi_errstate32), mode) != 0) {
1230 return (EFAULT);
1231 }
1232 errstate.fail_time = errstate_32.fail_time;
1233 errstate.msg_time = errstate_32.msg_time;
1234 errstate.access_count = errstate_32.access_count;
1235 errstate.fail_count = errstate_32.fail_count;
1236 errstate.acc_chk = errstate_32.acc_chk;
1237 errstate.errmsg_count = errstate_32.errmsg_count;
1238 (void) strncpy(errstate.buffer, errstate_32.buffer,
1239 ERRMSGSIZE);
1240 errstate.severity = errstate_32.severity;
1241 errstate.log.logsize = errstate_32.log.logsize;
1242 errstate.log.entries = errstate_32.log.entries;
1243 errstate.log.flags = errstate_32.log.flags;
1244 errstate.log.wrapcnt = errstate_32.log.wrapcnt;
1245 errstate.log.start_time = errstate_32.log.start_time;
1246 errstate.log.stop_time = errstate_32.log.stop_time;
1247 errstate.log.logbase =
1248 (caddr_t)(uintptr_t)errstate_32.log.logbase;
1249 errstate.errdef_handle = errstate_32.errdef_handle;
1250 break;
1251 }
1252 case DDI_MODEL_NONE:
1253 if (ddi_copyin((void *)arg, &errstate,
1254 sizeof (struct bofi_errstate), mode) != 0)
1255 return (EFAULT);
1256 break;
1257 }
1258 #else /* ! _MULTI_DATAMODEL */
1259 if (ddi_copyin((void *)arg, &errstate,
1260 sizeof (struct bofi_errstate), mode) != 0)
1261 return (EFAULT);
1262 #endif /* _MULTI_DATAMODEL */
1263 if ((retval = bofi_errdef_check(&errstate, &klg)) == EINVAL)
1264 return (EINVAL);
1265 /*
1266 * copy out real errstate structure
1267 */
1268 uls = errstate.log.logsize;
1269 if (errstate.log.entries > uls && uls)
1270 /* insufficient user memory */
1271 errstate.log.entries = uls;
1272 /* always pass back a time */
1273 if (errstate.log.stop_time == 0ul)
1274 (void) drv_getparm(TIME, &(errstate.log.stop_time));
1275
1276 #ifdef _MULTI_DATAMODEL
1277 switch (ddi_model_convert_from(mode & FMODELS)) {
1278 case DDI_MODEL_ILP32:
1279 {
1280 /*
1281 * For use when a 32 bit app makes a call into a
1282 * 64 bit ioctl
1283 */
1284 struct bofi_errstate32 errstate_32;
1285
1286 errstate_32.fail_time = errstate.fail_time;
1287 errstate_32.msg_time = errstate.msg_time;
1288 errstate_32.access_count = errstate.access_count;
1289 errstate_32.fail_count = errstate.fail_count;
1290 errstate_32.acc_chk = errstate.acc_chk;
1291 errstate_32.errmsg_count = errstate.errmsg_count;
1292 (void) strncpy(errstate_32.buffer, errstate.buffer,
1293 ERRMSGSIZE);
1294 errstate_32.severity = errstate.severity;
1295 errstate_32.log.logsize = errstate.log.logsize;
1296 errstate_32.log.entries = errstate.log.entries;
1297 errstate_32.log.flags = errstate.log.flags;
1298 errstate_32.log.wrapcnt = errstate.log.wrapcnt;
1299 errstate_32.log.start_time = errstate.log.start_time;
1300 errstate_32.log.stop_time = errstate.log.stop_time;
1301 errstate_32.log.logbase =
1302 (caddr32_t)(uintptr_t)errstate.log.logbase;
1303 errstate_32.errdef_handle = errstate.errdef_handle;
1304 if (ddi_copyout(&errstate_32, (void *)arg,
1305 sizeof (struct bofi_errstate32), mode) != 0)
1306 return (EFAULT);
1307 break;
1308 }
1309 case DDI_MODEL_NONE:
1310 if (ddi_copyout(&errstate, (void *)arg,
1311 sizeof (struct bofi_errstate), mode) != 0)
1312 return (EFAULT);
1313 break;
1314 }
1315 #else /* ! _MULTI_DATAMODEL */
1316 if (ddi_copyout(&errstate, (void *)arg,
1317 sizeof (struct bofi_errstate), mode) != 0)
1318 return (EFAULT);
1319 #endif /* _MULTI_DATAMODEL */
1320 if (uls && errstate.log.entries &&
1321 ddi_copyout(klg, errstate.log.logbase,
1322 errstate.log.entries * sizeof (struct acc_log_elem),
1323 mode) != 0) {
1324 return (EFAULT);
1325 }
1326 return (retval);
1327 }
1328 case BOFI_CHK_STATE_W:
1329 {
1330 struct acc_log_elem *klg;
1331 size_t uls;
1332 /*
1333 * get state for this errdef - read in dummy errstate
1334 * with just the errdef_handle filled in. Then wait for
1335 * a ddi_report_fault message to come back
1336 */
1337 #ifdef _MULTI_DATAMODEL
1338 switch (ddi_model_convert_from(mode & FMODELS)) {
1339 case DDI_MODEL_ILP32:
1340 {
1341 /*
1342 * For use when a 32 bit app makes a call into a
1343 * 64 bit ioctl
1344 */
1345 struct bofi_errstate32 errstate_32;
1346
1347 if (ddi_copyin((void *)arg, &errstate_32,
1348 sizeof (struct bofi_errstate32), mode) != 0) {
1349 return (EFAULT);
1350 }
1351 errstate.fail_time = errstate_32.fail_time;
1352 errstate.msg_time = errstate_32.msg_time;
1353 errstate.access_count = errstate_32.access_count;
1354 errstate.fail_count = errstate_32.fail_count;
1355 errstate.acc_chk = errstate_32.acc_chk;
1356 errstate.errmsg_count = errstate_32.errmsg_count;
1357 (void) strncpy(errstate.buffer, errstate_32.buffer,
1358 ERRMSGSIZE);
1359 errstate.severity = errstate_32.severity;
1360 errstate.log.logsize = errstate_32.log.logsize;
1361 errstate.log.entries = errstate_32.log.entries;
1362 errstate.log.flags = errstate_32.log.flags;
1363 errstate.log.wrapcnt = errstate_32.log.wrapcnt;
1364 errstate.log.start_time = errstate_32.log.start_time;
1365 errstate.log.stop_time = errstate_32.log.stop_time;
1366 errstate.log.logbase =
1367 (caddr_t)(uintptr_t)errstate_32.log.logbase;
1368 errstate.errdef_handle = errstate_32.errdef_handle;
1369 break;
1370 }
1371 case DDI_MODEL_NONE:
1372 if (ddi_copyin((void *)arg, &errstate,
1373 sizeof (struct bofi_errstate), mode) != 0)
1374 return (EFAULT);
1375 break;
1376 }
1377 #else /* ! _MULTI_DATAMODEL */
1378 if (ddi_copyin((void *)arg, &errstate,
1379 sizeof (struct bofi_errstate), mode) != 0)
1380 return (EFAULT);
1381 #endif /* _MULTI_DATAMODEL */
1382 if ((retval = bofi_errdef_check_w(&errstate, &klg)) == EINVAL)
1383 return (EINVAL);
1384 /*
1385 * copy out real errstate structure
1386 */
1387 uls = errstate.log.logsize;
1388 uls = errstate.log.logsize;
1389 if (errstate.log.entries > uls && uls)
1390 /* insufficient user memory */
1391 errstate.log.entries = uls;
1392 /* always pass back a time */
1393 if (errstate.log.stop_time == 0ul)
1394 (void) drv_getparm(TIME, &(errstate.log.stop_time));
1395
1396 #ifdef _MULTI_DATAMODEL
1397 switch (ddi_model_convert_from(mode & FMODELS)) {
1398 case DDI_MODEL_ILP32:
1399 {
1400 /*
1401 * For use when a 32 bit app makes a call into a
1402 * 64 bit ioctl
1403 */
1404 struct bofi_errstate32 errstate_32;
1405
1406 errstate_32.fail_time = errstate.fail_time;
1407 errstate_32.msg_time = errstate.msg_time;
1408 errstate_32.access_count = errstate.access_count;
1409 errstate_32.fail_count = errstate.fail_count;
1410 errstate_32.acc_chk = errstate.acc_chk;
1411 errstate_32.errmsg_count = errstate.errmsg_count;
1412 (void) strncpy(errstate_32.buffer, errstate.buffer,
1413 ERRMSGSIZE);
1414 errstate_32.severity = errstate.severity;
1415 errstate_32.log.logsize = errstate.log.logsize;
1416 errstate_32.log.entries = errstate.log.entries;
1417 errstate_32.log.flags = errstate.log.flags;
1418 errstate_32.log.wrapcnt = errstate.log.wrapcnt;
1419 errstate_32.log.start_time = errstate.log.start_time;
1420 errstate_32.log.stop_time = errstate.log.stop_time;
1421 errstate_32.log.logbase =
1422 (caddr32_t)(uintptr_t)errstate.log.logbase;
1423 errstate_32.errdef_handle = errstate.errdef_handle;
1424 if (ddi_copyout(&errstate_32, (void *)arg,
1425 sizeof (struct bofi_errstate32), mode) != 0)
1426 return (EFAULT);
1427 break;
1428 }
1429 case DDI_MODEL_NONE:
1430 if (ddi_copyout(&errstate, (void *)arg,
1431 sizeof (struct bofi_errstate), mode) != 0)
1432 return (EFAULT);
1433 break;
1434 }
1435 #else /* ! _MULTI_DATAMODEL */
1436 if (ddi_copyout(&errstate, (void *)arg,
1437 sizeof (struct bofi_errstate), mode) != 0)
1438 return (EFAULT);
1439 #endif /* _MULTI_DATAMODEL */
1440
1441 if (uls && errstate.log.entries &&
1442 ddi_copyout(klg, errstate.log.logbase,
1443 errstate.log.entries * sizeof (struct acc_log_elem),
1444 mode) != 0) {
1445 return (EFAULT);
1446 }
1447 return (retval);
1448 }
1449 case BOFI_GET_HANDLES:
1450 /*
1451 * display existing handles
1452 */
1453 #ifdef _MULTI_DATAMODEL
1454 switch (ddi_model_convert_from(mode & FMODELS)) {
1455 case DDI_MODEL_ILP32:
1456 {
1457 /*
1458 * For use when a 32 bit app makes a call into a
1459 * 64 bit ioctl
1460 */
1461 struct bofi_get_handles32 get_handles_32;
1462
1463 if (ddi_copyin((void *)arg, &get_handles_32,
1464 sizeof (get_handles_32), mode) != 0) {
1465 return (EFAULT);
1466 }
1467 get_handles.namesize = get_handles_32.namesize;
1468 (void) strncpy(get_handles.name, get_handles_32.name,
1469 NAMESIZE);
1470 get_handles.instance = get_handles_32.instance;
1471 get_handles.count = get_handles_32.count;
1472 get_handles.buffer =
1473 (caddr_t)(uintptr_t)get_handles_32.buffer;
1474 break;
1475 }
1476 case DDI_MODEL_NONE:
1477 if (ddi_copyin((void *)arg, &get_handles,
1478 sizeof (get_handles), mode) != 0)
1479 return (EFAULT);
1480 break;
1481 }
1482 #else /* ! _MULTI_DATAMODEL */
1483 if (ddi_copyin((void *)arg, &get_handles,
1484 sizeof (get_handles), mode) != 0)
1485 return (EFAULT);
1486 #endif /* _MULTI_DATAMODEL */
1487 /*
1488 * read in name
1489 */
1490 if (get_handles.namesize > NAMESIZE)
1491 return (EINVAL);
1492 namep = kmem_zalloc(get_handles.namesize+1, KM_SLEEP);
1493 (void) strncpy(namep, get_handles.name, get_handles.namesize);
1494 req_count = get_handles.count;
1495 bufptr = buffer = kmem_zalloc(req_count, KM_SLEEP);
1496 endbuf = bufptr + req_count;
1497 /*
1498 * display existing handles
1499 */
1500 mutex_enter(&bofi_low_mutex);
1501 mutex_enter(&bofi_mutex);
1502 for (i = 0; i < HDL_HASH_TBL_SIZE; i++) {
1503 hhashp = &hhash_table[i];
1504 for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext) {
1505 if (!driver_under_test(hp->dip))
1506 continue;
1507 if (ddi_name_to_major(ddi_get_name(hp->dip)) !=
1508 ddi_name_to_major(namep))
1509 continue;
1510 if (hp->instance != get_handles.instance)
1511 continue;
1512 /*
1513 * print information per handle - note that
1514 * DMA* means an unbound DMA handle
1515 */
1516 (void) snprintf(bufptr, (size_t)(endbuf-bufptr),
1517 " %s %d %s ", hp->name, hp->instance,
1518 (hp->type == BOFI_INT_HDL) ? "INTR" :
1519 (hp->type == BOFI_ACC_HDL) ? "PIO" :
1520 (hp->type == BOFI_DMA_HDL) ? "DMA" :
1521 (hp->hparrayp != NULL) ? "DVMA" : "DMA*");
1522 bufptr += strlen(bufptr);
1523 if (hp->type == BOFI_ACC_HDL) {
1524 if (hp->len == INT_MAX - hp->offset)
1525 (void) snprintf(bufptr,
1526 (size_t)(endbuf-bufptr),
1527 "reg set %d off 0x%llx\n",
1528 hp->rnumber, hp->offset);
1529 else
1530 (void) snprintf(bufptr,
1531 (size_t)(endbuf-bufptr),
1532 "reg set %d off 0x%llx"
1533 " len 0x%llx\n",
1534 hp->rnumber, hp->offset,
1535 hp->len);
1536 } else if (hp->type == BOFI_DMA_HDL)
1537 (void) snprintf(bufptr,
1538 (size_t)(endbuf-bufptr),
1539 "handle no %d len 0x%llx"
1540 " addr 0x%p\n", hp->rnumber,
1541 hp->len, (void *)hp->addr);
1542 else if (hp->type == BOFI_NULL &&
1543 hp->hparrayp == NULL)
1544 (void) snprintf(bufptr,
1545 (size_t)(endbuf-bufptr),
1546 "handle no %d\n", hp->rnumber);
1547 else
1548 (void) snprintf(bufptr,
1549 (size_t)(endbuf-bufptr), "\n");
1550 bufptr += strlen(bufptr);
1551 }
1552 }
1553 mutex_exit(&bofi_mutex);
1554 mutex_exit(&bofi_low_mutex);
1555 err = ddi_copyout(buffer, get_handles.buffer, req_count, mode);
1556 kmem_free(namep, get_handles.namesize+1);
1557 kmem_free(buffer, req_count);
1558 if (err != 0)
1559 return (EFAULT);
1560 else
1561 return (0);
1562 case BOFI_GET_HANDLE_INFO:
1563 /*
1564 * display existing handles
1565 */
1566 #ifdef _MULTI_DATAMODEL
1567 switch (ddi_model_convert_from(mode & FMODELS)) {
1568 case DDI_MODEL_ILP32:
1569 {
1570 /*
1571 * For use when a 32 bit app makes a call into a
1572 * 64 bit ioctl
1573 */
1574 struct bofi_get_hdl_info32 hdl_info_32;
1575
1576 if (ddi_copyin((void *)arg, &hdl_info_32,
1577 sizeof (hdl_info_32), mode)) {
1578 return (EFAULT);
1579 }
1580 hdl_info.namesize = hdl_info_32.namesize;
1581 (void) strncpy(hdl_info.name, hdl_info_32.name,
1582 NAMESIZE);
1583 hdl_info.count = hdl_info_32.count;
1584 hdl_info.hdli = (caddr_t)(uintptr_t)hdl_info_32.hdli;
1585 break;
1586 }
1587 case DDI_MODEL_NONE:
1588 if (ddi_copyin((void *)arg, &hdl_info,
1589 sizeof (hdl_info), mode))
1590 return (EFAULT);
1591 break;
1592 }
1593 #else /* ! _MULTI_DATAMODEL */
1594 if (ddi_copyin((void *)arg, &hdl_info,
1595 sizeof (hdl_info), mode))
1596 return (EFAULT);
1597 #endif /* _MULTI_DATAMODEL */
1598 if (hdl_info.namesize > NAMESIZE)
1599 return (EINVAL);
1600 namep = kmem_zalloc(hdl_info.namesize + 1, KM_SLEEP);
1601 (void) strncpy(namep, hdl_info.name, hdl_info.namesize);
1602 req_count = hdl_info.count;
1603 count = hdl_info.count = 0; /* the actual no of handles */
1604 if (req_count > 0) {
1605 hib = hdlip =
1606 kmem_zalloc(req_count * sizeof (struct handle_info),
1607 KM_SLEEP);
1608 } else {
1609 hib = hdlip = 0;
1610 req_count = hdl_info.count = 0;
1611 }
1612
1613 /*
1614 * display existing handles
1615 */
1616 mutex_enter(&bofi_low_mutex);
1617 mutex_enter(&bofi_mutex);
1618 for (i = 0; i < HDL_HASH_TBL_SIZE; i++) {
1619 hhashp = &hhash_table[i];
1620 for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext) {
1621 if (!driver_under_test(hp->dip) ||
1622 ddi_name_to_major(ddi_get_name(hp->dip)) !=
1623 ddi_name_to_major(namep) ||
1624 ++(hdl_info.count) > req_count ||
1625 count == req_count)
1626 continue;
1627
1628 hdlip->instance = hp->instance;
1629 hdlip->rnumber = hp->rnumber;
1630 switch (hp->type) {
1631 case BOFI_ACC_HDL:
1632 hdlip->access_type = BOFI_PIO_RW;
1633 hdlip->offset = hp->offset;
1634 hdlip->len = hp->len;
1635 break;
1636 case BOFI_DMA_HDL:
1637 hdlip->access_type = 0;
1638 if (hp->flags & DDI_DMA_WRITE)
1639 hdlip->access_type |=
1640 BOFI_DMA_W;
1641 if (hp->flags & DDI_DMA_READ)
1642 hdlip->access_type |=
1643 BOFI_DMA_R;
1644 hdlip->len = hp->len;
1645 hdlip->addr_cookie =
1646 (uint64_t)(uintptr_t)hp->addr;
1647 break;
1648 case BOFI_INT_HDL:
1649 hdlip->access_type = BOFI_INTR;
1650 break;
1651 default:
1652 hdlip->access_type = 0;
1653 break;
1654 }
1655 hdlip++;
1656 count++;
1657 }
1658 }
1659 mutex_exit(&bofi_mutex);
1660 mutex_exit(&bofi_low_mutex);
1661 err = 0;
1662 #ifdef _MULTI_DATAMODEL
1663 switch (ddi_model_convert_from(mode & FMODELS)) {
1664 case DDI_MODEL_ILP32:
1665 {
1666 /*
1667 * For use when a 32 bit app makes a call into a
1668 * 64 bit ioctl
1669 */
1670 struct bofi_get_hdl_info32 hdl_info_32;
1671
1672 hdl_info_32.namesize = hdl_info.namesize;
1673 (void) strncpy(hdl_info_32.name, hdl_info.name,
1674 NAMESIZE);
1675 hdl_info_32.count = hdl_info.count;
1676 hdl_info_32.hdli = (caddr32_t)(uintptr_t)hdl_info.hdli;
1677 if (ddi_copyout(&hdl_info_32, (void *)arg,
1678 sizeof (hdl_info_32), mode) != 0) {
1679 kmem_free(namep, hdl_info.namesize+1);
1680 if (req_count > 0)
1681 kmem_free(hib,
1682 req_count * sizeof (*hib));
1683 return (EFAULT);
1684 }
1685 break;
1686 }
1687 case DDI_MODEL_NONE:
1688 if (ddi_copyout(&hdl_info, (void *)arg,
1689 sizeof (hdl_info), mode) != 0) {
1690 kmem_free(namep, hdl_info.namesize+1);
1691 if (req_count > 0)
1692 kmem_free(hib,
1693 req_count * sizeof (*hib));
1694 return (EFAULT);
1695 }
1696 break;
1697 }
1698 #else /* ! _MULTI_DATAMODEL */
1699 if (ddi_copyout(&hdl_info, (void *)arg,
1700 sizeof (hdl_info), mode) != 0) {
1701 kmem_free(namep, hdl_info.namesize+1);
1702 if (req_count > 0)
1703 kmem_free(hib, req_count * sizeof (*hib));
1704 return (EFAULT);
1705 }
1706 #endif /* ! _MULTI_DATAMODEL */
1707 if (count > 0) {
1708 if (ddi_copyout(hib, hdl_info.hdli,
1709 count * sizeof (*hib), mode) != 0) {
1710 kmem_free(namep, hdl_info.namesize+1);
1711 if (req_count > 0)
1712 kmem_free(hib,
1713 req_count * sizeof (*hib));
1714 return (EFAULT);
1715 }
1716 }
1717 kmem_free(namep, hdl_info.namesize+1);
1718 if (req_count > 0)
1719 kmem_free(hib, req_count * sizeof (*hib));
1720 return (err);
1721 default:
1722 return (ENOTTY);
1723 }
1724 }
1725
1726
1727 /*
1728 * add a new error definition
1729 */
1730 static int
bofi_errdef_alloc(struct bofi_errdef * errdefp,char * namep,struct bofi_errent * softc)1731 bofi_errdef_alloc(struct bofi_errdef *errdefp, char *namep,
1732 struct bofi_errent *softc)
1733 {
1734 struct bofi_errent *ep;
1735 struct bofi_shadow *hp;
1736 struct bofi_link *lp;
1737
1738 /*
1739 * allocate errdef structure and put on in-use list
1740 */
1741 ep = kmem_zalloc(sizeof (struct bofi_errent), KM_SLEEP);
1742 ep->errdef = *errdefp;
1743 ep->name = namep;
1744 ep->errdef.errdef_handle = (uint64_t)(uintptr_t)ep;
1745 ep->errstate.severity = DDI_SERVICE_RESTORED;
1746 ep->errstate.errdef_handle = (uint64_t)(uintptr_t)ep;
1747 cv_init(&ep->cv, NULL, CV_DRIVER, NULL);
1748 /*
1749 * allocate space for logging
1750 */
1751 ep->errdef.log.entries = 0;
1752 ep->errdef.log.wrapcnt = 0;
1753 if (ep->errdef.access_type & BOFI_LOG)
1754 ep->logbase = kmem_alloc(sizeof (struct acc_log_elem) *
1755 ep->errdef.log.logsize, KM_SLEEP);
1756 else
1757 ep->logbase = NULL;
1758 /*
1759 * put on in-use list
1760 */
1761 mutex_enter(&bofi_low_mutex);
1762 mutex_enter(&bofi_mutex);
1763 ep->next = errent_listp;
1764 errent_listp = ep;
1765 /*
1766 * and add it to the per-clone list
1767 */
1768 ep->cnext = softc->cnext;
1769 softc->cnext->cprev = ep;
1770 ep->cprev = softc;
1771 softc->cnext = ep;
1772
1773 /*
1774 * look for corresponding shadow handle structures and if we find any
1775 * tag this errdef structure on to their link lists.
1776 */
1777 for (hp = shadow_list.next; hp != &shadow_list; hp = hp->next) {
1778 if (ddi_name_to_major(hp->name) == ddi_name_to_major(namep) &&
1779 hp->instance == errdefp->instance &&
1780 (((errdefp->access_type & BOFI_DMA_RW) &&
1781 (ep->errdef.rnumber == -1 ||
1782 hp->rnumber == ep->errdef.rnumber) &&
1783 hp->type == BOFI_DMA_HDL &&
1784 (((uintptr_t)(hp->addr + ep->errdef.offset +
1785 ep->errdef.len) & ~LLSZMASK) >
1786 ((uintptr_t)((hp->addr + ep->errdef.offset) +
1787 LLSZMASK) & ~LLSZMASK))) ||
1788 ((errdefp->access_type & BOFI_INTR) &&
1789 hp->type == BOFI_INT_HDL) ||
1790 ((errdefp->access_type & BOFI_PIO_RW) &&
1791 hp->type == BOFI_ACC_HDL &&
1792 (errdefp->rnumber == -1 ||
1793 hp->rnumber == errdefp->rnumber) &&
1794 (errdefp->len == 0 ||
1795 hp->offset < errdefp->offset + errdefp->len) &&
1796 hp->offset + hp->len > errdefp->offset))) {
1797 lp = bofi_link_freelist;
1798 if (lp != NULL) {
1799 bofi_link_freelist = lp->link;
1800 lp->errentp = ep;
1801 lp->link = hp->link;
1802 hp->link = lp;
1803 }
1804 }
1805 }
1806 errdefp->errdef_handle = (uint64_t)(uintptr_t)ep;
1807 mutex_exit(&bofi_mutex);
1808 mutex_exit(&bofi_low_mutex);
1809 ep->softintr_id = NULL;
1810 return (ddi_add_softintr(our_dip, DDI_SOFTINT_MED, &ep->softintr_id,
1811 NULL, NULL, bofi_signal, (caddr_t)&ep->errdef));
1812 }
1813
1814
1815 /*
1816 * delete existing errdef
1817 */
1818 static int
bofi_errdef_free(struct bofi_errent * ep)1819 bofi_errdef_free(struct bofi_errent *ep)
1820 {
1821 struct bofi_errent *hep, *prev_hep;
1822 struct bofi_link *lp, *prev_lp, *next_lp;
1823 struct bofi_shadow *hp;
1824
1825 mutex_enter(&bofi_low_mutex);
1826 mutex_enter(&bofi_mutex);
1827 /*
1828 * don't just assume its a valid ep - check that its on the
1829 * in-use list
1830 */
1831 prev_hep = NULL;
1832 for (hep = errent_listp; hep != NULL; ) {
1833 if (hep == ep)
1834 break;
1835 prev_hep = hep;
1836 hep = hep->next;
1837 }
1838 if (hep == NULL) {
1839 mutex_exit(&bofi_mutex);
1840 mutex_exit(&bofi_low_mutex);
1841 return (EINVAL);
1842 }
1843 /*
1844 * found it - delete from in-use list
1845 */
1846
1847 if (prev_hep)
1848 prev_hep->next = hep->next;
1849 else
1850 errent_listp = hep->next;
1851 /*
1852 * and take it off the per-clone list
1853 */
1854 hep->cnext->cprev = hep->cprev;
1855 hep->cprev->cnext = hep->cnext;
1856 /*
1857 * see if we are on any shadow handle link lists - and if we
1858 * are then take us off
1859 */
1860 for (hp = shadow_list.next; hp != &shadow_list; hp = hp->next) {
1861 prev_lp = NULL;
1862 for (lp = hp->link; lp != NULL; ) {
1863 if (lp->errentp == ep) {
1864 if (prev_lp)
1865 prev_lp->link = lp->link;
1866 else
1867 hp->link = lp->link;
1868 next_lp = lp->link;
1869 lp->link = bofi_link_freelist;
1870 bofi_link_freelist = lp;
1871 lp = next_lp;
1872 } else {
1873 prev_lp = lp;
1874 lp = lp->link;
1875 }
1876 }
1877 }
1878 mutex_exit(&bofi_mutex);
1879 mutex_exit(&bofi_low_mutex);
1880
1881 cv_destroy(&ep->cv);
1882 kmem_free(ep->name, ep->errdef.namesize+1);
1883 if ((ep->errdef.access_type & BOFI_LOG) &&
1884 ep->errdef.log.logsize && ep->logbase) /* double check */
1885 kmem_free(ep->logbase,
1886 sizeof (struct acc_log_elem) * ep->errdef.log.logsize);
1887
1888 if (ep->softintr_id)
1889 ddi_remove_softintr(ep->softintr_id);
1890 kmem_free(ep, sizeof (struct bofi_errent));
1891 return (0);
1892 }
1893
1894
1895 /*
1896 * start all errdefs corresponding to this name and instance
1897 */
1898 static void
bofi_start(struct bofi_errctl * errctlp,char * namep)1899 bofi_start(struct bofi_errctl *errctlp, char *namep)
1900 {
1901 struct bofi_errent *ep;
1902
1903 /*
1904 * look for any errdefs with matching name and instance
1905 */
1906 mutex_enter(&bofi_low_mutex);
1907 for (ep = errent_listp; ep != NULL; ep = ep->next)
1908 if (strncmp(namep, ep->name, NAMESIZE) == 0 &&
1909 errctlp->instance == ep->errdef.instance) {
1910 ep->state |= BOFI_DEV_ACTIVE;
1911 (void) drv_getparm(TIME, &(ep->errdef.log.start_time));
1912 ep->errdef.log.stop_time = 0ul;
1913 }
1914 mutex_exit(&bofi_low_mutex);
1915 }
1916
1917
1918 /*
1919 * stop all errdefs corresponding to this name and instance
1920 */
1921 static void
bofi_stop(struct bofi_errctl * errctlp,char * namep)1922 bofi_stop(struct bofi_errctl *errctlp, char *namep)
1923 {
1924 struct bofi_errent *ep;
1925
1926 /*
1927 * look for any errdefs with matching name and instance
1928 */
1929 mutex_enter(&bofi_low_mutex);
1930 for (ep = errent_listp; ep != NULL; ep = ep->next)
1931 if (strncmp(namep, ep->name, NAMESIZE) == 0 &&
1932 errctlp->instance == ep->errdef.instance) {
1933 ep->state &= ~BOFI_DEV_ACTIVE;
1934 if (ep->errdef.log.stop_time == 0ul)
1935 (void) drv_getparm(TIME,
1936 &(ep->errdef.log.stop_time));
1937 }
1938 mutex_exit(&bofi_low_mutex);
1939 }
1940
1941
1942 /*
1943 * wake up any thread waiting on this errdefs
1944 */
1945 static uint_t
bofi_signal(caddr_t arg)1946 bofi_signal(caddr_t arg)
1947 {
1948 struct bofi_errdef *edp = (struct bofi_errdef *)arg;
1949 struct bofi_errent *hep;
1950 struct bofi_errent *ep =
1951 (struct bofi_errent *)(uintptr_t)edp->errdef_handle;
1952
1953 mutex_enter(&bofi_low_mutex);
1954 for (hep = errent_listp; hep != NULL; ) {
1955 if (hep == ep)
1956 break;
1957 hep = hep->next;
1958 }
1959 if (hep == NULL) {
1960 mutex_exit(&bofi_low_mutex);
1961 return (DDI_INTR_UNCLAIMED);
1962 }
1963 if ((ep->errdef.access_type & BOFI_LOG) &&
1964 (edp->log.flags & BOFI_LOG_FULL)) {
1965 edp->log.stop_time = bofi_gettime();
1966 ep->state |= BOFI_NEW_MESSAGE;
1967 if (ep->state & BOFI_MESSAGE_WAIT)
1968 cv_broadcast(&ep->cv);
1969 ep->state &= ~BOFI_MESSAGE_WAIT;
1970 }
1971 if (ep->errstate.msg_time != 0) {
1972 ep->state |= BOFI_NEW_MESSAGE;
1973 if (ep->state & BOFI_MESSAGE_WAIT)
1974 cv_broadcast(&ep->cv);
1975 ep->state &= ~BOFI_MESSAGE_WAIT;
1976 }
1977 mutex_exit(&bofi_low_mutex);
1978 return (DDI_INTR_CLAIMED);
1979 }
1980
1981
1982 /*
1983 * wake up all errdefs corresponding to this name and instance
1984 */
1985 static void
bofi_broadcast(struct bofi_errctl * errctlp,char * namep)1986 bofi_broadcast(struct bofi_errctl *errctlp, char *namep)
1987 {
1988 struct bofi_errent *ep;
1989
1990 /*
1991 * look for any errdefs with matching name and instance
1992 */
1993 mutex_enter(&bofi_low_mutex);
1994 for (ep = errent_listp; ep != NULL; ep = ep->next)
1995 if (strncmp(namep, ep->name, NAMESIZE) == 0 &&
1996 errctlp->instance == ep->errdef.instance) {
1997 /*
1998 * wake up sleepers
1999 */
2000 ep->state |= BOFI_NEW_MESSAGE;
2001 if (ep->state & BOFI_MESSAGE_WAIT)
2002 cv_broadcast(&ep->cv);
2003 ep->state &= ~BOFI_MESSAGE_WAIT;
2004 }
2005 mutex_exit(&bofi_low_mutex);
2006 }
2007
2008
2009 /*
2010 * clear "acc_chk" for all errdefs corresponding to this name and instance
2011 * and wake them up.
2012 */
2013 static void
bofi_clear_acc_chk(struct bofi_errctl * errctlp,char * namep)2014 bofi_clear_acc_chk(struct bofi_errctl *errctlp, char *namep)
2015 {
2016 struct bofi_errent *ep;
2017
2018 /*
2019 * look for any errdefs with matching name and instance
2020 */
2021 mutex_enter(&bofi_low_mutex);
2022 for (ep = errent_listp; ep != NULL; ep = ep->next)
2023 if (strncmp(namep, ep->name, NAMESIZE) == 0 &&
2024 errctlp->instance == ep->errdef.instance) {
2025 mutex_enter(&bofi_mutex);
2026 if (ep->errdef.access_count == 0 &&
2027 ep->errdef.fail_count == 0)
2028 ep->errdef.acc_chk = 0;
2029 mutex_exit(&bofi_mutex);
2030 /*
2031 * wake up sleepers
2032 */
2033 ep->state |= BOFI_NEW_MESSAGE;
2034 if (ep->state & BOFI_MESSAGE_WAIT)
2035 cv_broadcast(&ep->cv);
2036 ep->state &= ~BOFI_MESSAGE_WAIT;
2037 }
2038 mutex_exit(&bofi_low_mutex);
2039 }
2040
2041
2042 /*
2043 * set "fail_count" to 0 for all errdefs corresponding to this name and instance
2044 * whose "access_count" has expired, set "acc_chk" to 0 and wake them up.
2045 */
2046 static void
bofi_clear_errors(struct bofi_errctl * errctlp,char * namep)2047 bofi_clear_errors(struct bofi_errctl *errctlp, char *namep)
2048 {
2049 struct bofi_errent *ep;
2050
2051 /*
2052 * look for any errdefs with matching name and instance
2053 */
2054 mutex_enter(&bofi_low_mutex);
2055 for (ep = errent_listp; ep != NULL; ep = ep->next)
2056 if (strncmp(namep, ep->name, NAMESIZE) == 0 &&
2057 errctlp->instance == ep->errdef.instance) {
2058 mutex_enter(&bofi_mutex);
2059 if (ep->errdef.access_count == 0) {
2060 ep->errdef.acc_chk = 0;
2061 ep->errdef.fail_count = 0;
2062 mutex_exit(&bofi_mutex);
2063 if (ep->errdef.log.stop_time == 0ul)
2064 (void) drv_getparm(TIME,
2065 &(ep->errdef.log.stop_time));
2066 } else
2067 mutex_exit(&bofi_mutex);
2068 /*
2069 * wake up sleepers
2070 */
2071 ep->state |= BOFI_NEW_MESSAGE;
2072 if (ep->state & BOFI_MESSAGE_WAIT)
2073 cv_broadcast(&ep->cv);
2074 ep->state &= ~BOFI_MESSAGE_WAIT;
2075 }
2076 mutex_exit(&bofi_low_mutex);
2077 }
2078
2079
2080 /*
2081 * set "access_count" and "fail_count" to 0 for all errdefs corresponding to
2082 * this name and instance, set "acc_chk" to 0, and wake them up.
2083 */
2084 static void
bofi_clear_errdefs(struct bofi_errctl * errctlp,char * namep)2085 bofi_clear_errdefs(struct bofi_errctl *errctlp, char *namep)
2086 {
2087 struct bofi_errent *ep;
2088
2089 /*
2090 * look for any errdefs with matching name and instance
2091 */
2092 mutex_enter(&bofi_low_mutex);
2093 for (ep = errent_listp; ep != NULL; ep = ep->next)
2094 if (strncmp(namep, ep->name, NAMESIZE) == 0 &&
2095 errctlp->instance == ep->errdef.instance) {
2096 mutex_enter(&bofi_mutex);
2097 ep->errdef.acc_chk = 0;
2098 ep->errdef.access_count = 0;
2099 ep->errdef.fail_count = 0;
2100 mutex_exit(&bofi_mutex);
2101 if (ep->errdef.log.stop_time == 0ul)
2102 (void) drv_getparm(TIME,
2103 &(ep->errdef.log.stop_time));
2104 /*
2105 * wake up sleepers
2106 */
2107 ep->state |= BOFI_NEW_MESSAGE;
2108 if (ep->state & BOFI_MESSAGE_WAIT)
2109 cv_broadcast(&ep->cv);
2110 ep->state &= ~BOFI_MESSAGE_WAIT;
2111 }
2112 mutex_exit(&bofi_low_mutex);
2113 }
2114
2115
2116 /*
2117 * get state for this errdef
2118 */
2119 static int
bofi_errdef_check(struct bofi_errstate * errstatep,struct acc_log_elem ** logpp)2120 bofi_errdef_check(struct bofi_errstate *errstatep, struct acc_log_elem **logpp)
2121 {
2122 struct bofi_errent *hep;
2123 struct bofi_errent *ep;
2124
2125 ep = (struct bofi_errent *)(uintptr_t)errstatep->errdef_handle;
2126 mutex_enter(&bofi_low_mutex);
2127 /*
2128 * don't just assume its a valid ep - check that its on the
2129 * in-use list
2130 */
2131 for (hep = errent_listp; hep != NULL; hep = hep->next)
2132 if (hep == ep)
2133 break;
2134 if (hep == NULL) {
2135 mutex_exit(&bofi_low_mutex);
2136 return (EINVAL);
2137 }
2138 mutex_enter(&bofi_mutex);
2139 ep->errstate.access_count = ep->errdef.access_count;
2140 ep->errstate.fail_count = ep->errdef.fail_count;
2141 ep->errstate.acc_chk = ep->errdef.acc_chk;
2142 ep->errstate.log = ep->errdef.log;
2143 *logpp = ep->logbase;
2144 *errstatep = ep->errstate;
2145 mutex_exit(&bofi_mutex);
2146 mutex_exit(&bofi_low_mutex);
2147 return (0);
2148 }
2149
2150
2151 /*
2152 * Wait for a ddi_report_fault message to come back for this errdef
2153 * Then return state for this errdef.
2154 * fault report is intercepted by bofi_post_event, which triggers
2155 * bofi_signal via a softint, which will wake up this routine if
2156 * we are waiting
2157 */
2158 static int
bofi_errdef_check_w(struct bofi_errstate * errstatep,struct acc_log_elem ** logpp)2159 bofi_errdef_check_w(struct bofi_errstate *errstatep,
2160 struct acc_log_elem **logpp)
2161 {
2162 struct bofi_errent *hep;
2163 struct bofi_errent *ep;
2164 int rval = 0;
2165
2166 ep = (struct bofi_errent *)(uintptr_t)errstatep->errdef_handle;
2167 mutex_enter(&bofi_low_mutex);
2168 retry:
2169 /*
2170 * don't just assume its a valid ep - check that its on the
2171 * in-use list
2172 */
2173 for (hep = errent_listp; hep != NULL; hep = hep->next)
2174 if (hep == ep)
2175 break;
2176 if (hep == NULL) {
2177 mutex_exit(&bofi_low_mutex);
2178 return (EINVAL);
2179 }
2180 /*
2181 * wait for ddi_report_fault for the devinfo corresponding
2182 * to this errdef
2183 */
2184 if (rval == 0 && !(ep->state & BOFI_NEW_MESSAGE)) {
2185 ep->state |= BOFI_MESSAGE_WAIT;
2186 if (cv_wait_sig(&ep->cv, &bofi_low_mutex) == 0) {
2187 if (!(ep->state & BOFI_NEW_MESSAGE))
2188 rval = EINTR;
2189 }
2190 goto retry;
2191 }
2192 ep->state &= ~BOFI_NEW_MESSAGE;
2193 /*
2194 * we either didn't need to sleep, we've been woken up or we've been
2195 * signaled - either way return state now
2196 */
2197 mutex_enter(&bofi_mutex);
2198 ep->errstate.access_count = ep->errdef.access_count;
2199 ep->errstate.fail_count = ep->errdef.fail_count;
2200 ep->errstate.acc_chk = ep->errdef.acc_chk;
2201 ep->errstate.log = ep->errdef.log;
2202 *logpp = ep->logbase;
2203 *errstatep = ep->errstate;
2204 mutex_exit(&bofi_mutex);
2205 mutex_exit(&bofi_low_mutex);
2206 return (rval);
2207 }
2208
2209
2210 /*
2211 * support routine - check if requested driver is defined as under test in the
2212 * conf file.
2213 */
2214 static int
driver_under_test(dev_info_t * rdip)2215 driver_under_test(dev_info_t *rdip)
2216 {
2217 int i;
2218 char *rname;
2219 major_t rmaj;
2220
2221 rname = ddi_get_name(rdip);
2222 rmaj = ddi_name_to_major(rname);
2223
2224 /*
2225 * Enforce the user to specifically request the following drivers.
2226 */
2227 for (i = 0; i < driver_list_size; i += (1 + strlen(&driver_list[i]))) {
2228 if (driver_list_neg == 0) {
2229 if (rmaj == ddi_name_to_major(&driver_list[i]))
2230 return (1);
2231 } else {
2232 if (rmaj == ddi_name_to_major(&driver_list[i+1]))
2233 return (0);
2234 }
2235 }
2236 if (driver_list_neg == 0)
2237 return (0);
2238 else
2239 return (1);
2240
2241 }
2242
2243
2244 static void
log_acc_event(struct bofi_errent * ep,uint_t at,offset_t offset,off_t len,size_t repcount,uint64_t * valuep)2245 log_acc_event(struct bofi_errent *ep, uint_t at, offset_t offset, off_t len,
2246 size_t repcount, uint64_t *valuep)
2247 {
2248 struct bofi_errdef *edp = &(ep->errdef);
2249 struct acc_log *log = &edp->log;
2250
2251 ASSERT(log != NULL);
2252 ASSERT(MUTEX_HELD(&bofi_mutex));
2253
2254 if (log->flags & BOFI_LOG_REPIO)
2255 repcount = 1;
2256 else if (repcount == 0 && edp->access_count > 0 &&
2257 (log->flags & BOFI_LOG_FULL) == 0)
2258 edp->access_count += 1;
2259
2260 if (repcount && log->entries < log->logsize) {
2261 struct acc_log_elem *elem = ep->logbase + log->entries;
2262
2263 if (log->flags & BOFI_LOG_TIMESTAMP)
2264 elem->access_time = bofi_gettime();
2265 elem->access_type = at;
2266 elem->offset = offset;
2267 elem->value = valuep ? *valuep : 0ll;
2268 elem->size = len;
2269 elem->repcount = repcount;
2270 ++log->entries;
2271 if (log->entries == log->logsize) {
2272 log->flags |= BOFI_LOG_FULL;
2273 ddi_trigger_softintr(((struct bofi_errent *)
2274 (uintptr_t)edp->errdef_handle)->softintr_id);
2275 }
2276 }
2277 if ((log->flags & BOFI_LOG_WRAP) && edp->access_count <= 1) {
2278 log->wrapcnt++;
2279 edp->access_count = log->logsize;
2280 log->entries = 0; /* wrap back to the start */
2281 }
2282 }
2283
2284
2285 /*
2286 * got a condition match on dma read/write - check counts and corrupt
2287 * data if necessary
2288 *
2289 * bofi_mutex always held when this is called.
2290 */
2291 static void
do_dma_corrupt(struct bofi_shadow * hp,struct bofi_errent * ep,uint_t synctype,off_t off,off_t length)2292 do_dma_corrupt(struct bofi_shadow *hp, struct bofi_errent *ep,
2293 uint_t synctype, off_t off, off_t length)
2294 {
2295 uint64_t operand;
2296 int i;
2297 off_t len;
2298 caddr_t logaddr;
2299 uint64_t *addr;
2300 uint64_t *endaddr;
2301 ddi_dma_impl_t *hdlp;
2302 ndi_err_t *errp;
2303
2304 ASSERT(MUTEX_HELD(&bofi_mutex));
2305 if ((ep->errdef.access_count ||
2306 ep->errdef.fail_count) &&
2307 (ep->errdef.access_type & BOFI_LOG)) {
2308 uint_t atype;
2309
2310 if (synctype == DDI_DMA_SYNC_FORDEV)
2311 atype = BOFI_DMA_W;
2312 else if (synctype == DDI_DMA_SYNC_FORCPU ||
2313 synctype == DDI_DMA_SYNC_FORKERNEL)
2314 atype = BOFI_DMA_R;
2315 else
2316 atype = 0;
2317 if ((off <= ep->errdef.offset &&
2318 off + length > ep->errdef.offset) ||
2319 (off > ep->errdef.offset &&
2320 off < ep->errdef.offset + ep->errdef.len)) {
2321 logaddr = (caddr_t)((uintptr_t)(hp->addr +
2322 off + LLSZMASK) & ~LLSZMASK);
2323
2324 log_acc_event(ep, atype, logaddr - hp->addr,
2325 length, 1, 0);
2326 }
2327 }
2328 if (ep->errdef.access_count > 1) {
2329 ep->errdef.access_count--;
2330 } else if (ep->errdef.fail_count > 0) {
2331 ep->errdef.fail_count--;
2332 ep->errdef.access_count = 0;
2333 /*
2334 * OK do the corruption
2335 */
2336 if (ep->errstate.fail_time == 0)
2337 ep->errstate.fail_time = bofi_gettime();
2338 /*
2339 * work out how much to corrupt
2340 *
2341 * Make sure endaddr isn't greater than hp->addr + hp->len.
2342 * If endaddr becomes less than addr len becomes negative
2343 * and the following loop isn't entered.
2344 */
2345 addr = (uint64_t *)((uintptr_t)((hp->addr +
2346 ep->errdef.offset) + LLSZMASK) & ~LLSZMASK);
2347 endaddr = (uint64_t *)((uintptr_t)(hp->addr + min(hp->len,
2348 ep->errdef.offset + ep->errdef.len)) & ~LLSZMASK);
2349 len = endaddr - addr;
2350 operand = ep->errdef.operand;
2351 hdlp = (ddi_dma_impl_t *)(hp->hdl.dma_handle);
2352 errp = &hdlp->dmai_error;
2353 if (ep->errdef.acc_chk & 2) {
2354 uint64_t ena;
2355 char buf[FM_MAX_CLASS];
2356
2357 errp->err_status = DDI_FM_NONFATAL;
2358 (void) snprintf(buf, FM_MAX_CLASS, FM_SIMULATED_DMA);
2359 ena = fm_ena_generate(0, FM_ENA_FMT1);
2360 ddi_fm_ereport_post(hp->dip, buf, ena,
2361 DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8,
2362 FM_EREPORT_VERS0, NULL);
2363 }
2364 switch (ep->errdef.optype) {
2365 case BOFI_EQUAL :
2366 for (i = 0; i < len; i++)
2367 *(addr + i) = operand;
2368 break;
2369 case BOFI_AND :
2370 for (i = 0; i < len; i++)
2371 *(addr + i) &= operand;
2372 break;
2373 case BOFI_OR :
2374 for (i = 0; i < len; i++)
2375 *(addr + i) |= operand;
2376 break;
2377 case BOFI_XOR :
2378 for (i = 0; i < len; i++)
2379 *(addr + i) ^= operand;
2380 break;
2381 default:
2382 /* do nothing */
2383 break;
2384 }
2385 }
2386 }
2387
2388
2389 static uint64_t do_bofi_rd8(struct bofi_shadow *, caddr_t);
2390 static uint64_t do_bofi_rd16(struct bofi_shadow *, caddr_t);
2391 static uint64_t do_bofi_rd32(struct bofi_shadow *, caddr_t);
2392 static uint64_t do_bofi_rd64(struct bofi_shadow *, caddr_t);
2393
2394
2395 /*
2396 * check all errdefs linked to this shadow handle. If we've got a condition
2397 * match check counts and corrupt data if necessary
2398 *
2399 * bofi_mutex always held when this is called.
2400 *
2401 * because of possibility of BOFI_NO_TRANSFER, we couldn't get data
2402 * from io-space before calling this, so we pass in the func to do the
2403 * transfer as a parameter.
2404 */
2405 static uint64_t
do_pior_corrupt(struct bofi_shadow * hp,caddr_t addr,uint64_t (* func)(),size_t repcount,size_t accsize)2406 do_pior_corrupt(struct bofi_shadow *hp, caddr_t addr,
2407 uint64_t (*func)(), size_t repcount, size_t accsize)
2408 {
2409 struct bofi_errent *ep;
2410 struct bofi_link *lp;
2411 uint64_t operand;
2412 uintptr_t minlen;
2413 intptr_t base;
2414 int done_get = 0;
2415 uint64_t get_val, gv;
2416 ddi_acc_impl_t *hdlp;
2417 ndi_err_t *errp;
2418
2419 ASSERT(MUTEX_HELD(&bofi_mutex));
2420 /*
2421 * check through all errdefs associated with this shadow handle
2422 */
2423 for (lp = hp->link; lp != NULL; lp = lp->link) {
2424 ep = lp->errentp;
2425 if (ep->errdef.len == 0)
2426 minlen = hp->len;
2427 else
2428 minlen = min(hp->len, ep->errdef.len);
2429 base = addr - hp->addr - ep->errdef.offset + hp->offset;
2430 if ((ep->errdef.access_type & BOFI_PIO_R) &&
2431 (ep->state & BOFI_DEV_ACTIVE) &&
2432 base >= 0 && base < minlen) {
2433 /*
2434 * condition match for pio read
2435 */
2436 if (ep->errdef.access_count > 1) {
2437 ep->errdef.access_count--;
2438 if (done_get == 0) {
2439 done_get = 1;
2440 gv = get_val = func(hp, addr);
2441 }
2442 if (ep->errdef.access_type & BOFI_LOG) {
2443 log_acc_event(ep, BOFI_PIO_R,
2444 addr - hp->addr,
2445 accsize, repcount, &gv);
2446 }
2447 } else if (ep->errdef.fail_count > 0) {
2448 ep->errdef.fail_count--;
2449 ep->errdef.access_count = 0;
2450 /*
2451 * OK do corruption
2452 */
2453 if (ep->errstate.fail_time == 0)
2454 ep->errstate.fail_time = bofi_gettime();
2455 operand = ep->errdef.operand;
2456 if (done_get == 0) {
2457 if (ep->errdef.optype ==
2458 BOFI_NO_TRANSFER)
2459 /*
2460 * no transfer - bomb out
2461 */
2462 return (operand);
2463 done_get = 1;
2464 gv = get_val = func(hp, addr);
2465
2466 }
2467 if (ep->errdef.access_type & BOFI_LOG) {
2468 log_acc_event(ep, BOFI_PIO_R,
2469 addr - hp->addr,
2470 accsize, repcount, &gv);
2471 }
2472 hdlp = (ddi_acc_impl_t *)(hp->hdl.acc_handle);
2473 errp = hdlp->ahi_err;
2474 if (ep->errdef.acc_chk & 1) {
2475 uint64_t ena;
2476 char buf[FM_MAX_CLASS];
2477
2478 errp->err_status = DDI_FM_NONFATAL;
2479 (void) snprintf(buf, FM_MAX_CLASS,
2480 FM_SIMULATED_PIO);
2481 ena = fm_ena_generate(0, FM_ENA_FMT1);
2482 ddi_fm_ereport_post(hp->dip, buf, ena,
2483 DDI_NOSLEEP, FM_VERSION,
2484 DATA_TYPE_UINT8, FM_EREPORT_VERS0,
2485 NULL);
2486 }
2487 switch (ep->errdef.optype) {
2488 case BOFI_EQUAL :
2489 get_val = operand;
2490 break;
2491 case BOFI_AND :
2492 get_val &= operand;
2493 break;
2494 case BOFI_OR :
2495 get_val |= operand;
2496 break;
2497 case BOFI_XOR :
2498 get_val ^= operand;
2499 break;
2500 default:
2501 /* do nothing */
2502 break;
2503 }
2504 }
2505 }
2506 }
2507 if (done_get == 0)
2508 return (func(hp, addr));
2509 else
2510 return (get_val);
2511 }
2512
2513
2514 /*
2515 * check all errdefs linked to this shadow handle. If we've got a condition
2516 * match check counts and corrupt data if necessary
2517 *
2518 * bofi_mutex always held when this is called.
2519 *
2520 * because of possibility of BOFI_NO_TRANSFER, we return 0 if no data
2521 * is to be written out to io-space, 1 otherwise
2522 */
2523 static int
do_piow_corrupt(struct bofi_shadow * hp,caddr_t addr,uint64_t * valuep,size_t size,size_t repcount)2524 do_piow_corrupt(struct bofi_shadow *hp, caddr_t addr, uint64_t *valuep,
2525 size_t size, size_t repcount)
2526 {
2527 struct bofi_errent *ep;
2528 struct bofi_link *lp;
2529 uintptr_t minlen;
2530 intptr_t base;
2531 uint64_t v = *valuep;
2532 ddi_acc_impl_t *hdlp;
2533 ndi_err_t *errp;
2534
2535 ASSERT(MUTEX_HELD(&bofi_mutex));
2536 /*
2537 * check through all errdefs associated with this shadow handle
2538 */
2539 for (lp = hp->link; lp != NULL; lp = lp->link) {
2540 ep = lp->errentp;
2541 if (ep->errdef.len == 0)
2542 minlen = hp->len;
2543 else
2544 minlen = min(hp->len, ep->errdef.len);
2545 base = (caddr_t)addr - hp->addr - ep->errdef.offset +hp->offset;
2546 if ((ep->errdef.access_type & BOFI_PIO_W) &&
2547 (ep->state & BOFI_DEV_ACTIVE) &&
2548 base >= 0 && base < minlen) {
2549 /*
2550 * condition match for pio write
2551 */
2552
2553 if (ep->errdef.access_count > 1) {
2554 ep->errdef.access_count--;
2555 if (ep->errdef.access_type & BOFI_LOG)
2556 log_acc_event(ep, BOFI_PIO_W,
2557 addr - hp->addr, size,
2558 repcount, &v);
2559 } else if (ep->errdef.fail_count > 0) {
2560 ep->errdef.fail_count--;
2561 ep->errdef.access_count = 0;
2562 if (ep->errdef.access_type & BOFI_LOG)
2563 log_acc_event(ep, BOFI_PIO_W,
2564 addr - hp->addr, size,
2565 repcount, &v);
2566 /*
2567 * OK do corruption
2568 */
2569 if (ep->errstate.fail_time == 0)
2570 ep->errstate.fail_time = bofi_gettime();
2571 hdlp = (ddi_acc_impl_t *)(hp->hdl.acc_handle);
2572 errp = hdlp->ahi_err;
2573 if (ep->errdef.acc_chk & 1) {
2574 uint64_t ena;
2575 char buf[FM_MAX_CLASS];
2576
2577 errp->err_status = DDI_FM_NONFATAL;
2578 (void) snprintf(buf, FM_MAX_CLASS,
2579 FM_SIMULATED_PIO);
2580 ena = fm_ena_generate(0, FM_ENA_FMT1);
2581 ddi_fm_ereport_post(hp->dip, buf, ena,
2582 DDI_NOSLEEP, FM_VERSION,
2583 DATA_TYPE_UINT8, FM_EREPORT_VERS0,
2584 NULL);
2585 }
2586 switch (ep->errdef.optype) {
2587 case BOFI_EQUAL :
2588 *valuep = ep->errdef.operand;
2589 break;
2590 case BOFI_AND :
2591 *valuep &= ep->errdef.operand;
2592 break;
2593 case BOFI_OR :
2594 *valuep |= ep->errdef.operand;
2595 break;
2596 case BOFI_XOR :
2597 *valuep ^= ep->errdef.operand;
2598 break;
2599 case BOFI_NO_TRANSFER :
2600 /*
2601 * no transfer - bomb out
2602 */
2603 return (0);
2604 default:
2605 /* do nothing */
2606 break;
2607 }
2608 }
2609 }
2610 }
2611 return (1);
2612 }
2613
2614
2615 static uint64_t
do_bofi_rd8(struct bofi_shadow * hp,caddr_t addr)2616 do_bofi_rd8(struct bofi_shadow *hp, caddr_t addr)
2617 {
2618 return (hp->save.acc.ahi_get8(&hp->save.acc, (uint8_t *)addr));
2619 }
2620
2621 #define BOFI_READ_CHECKS(type) \
2622 if (bofi_ddi_check) \
2623 addr = (type *)((uintptr_t)addr - 64 + hp->addr); \
2624 if (bofi_range_check && ((caddr_t)addr < hp->addr || \
2625 (caddr_t)addr - hp->addr >= hp->len)) { \
2626 cmn_err((bofi_range_check == 2) ? CE_PANIC : CE_WARN, \
2627 "ddi_get() out of range addr %p not in %p/%llx", \
2628 (void *)addr, (void *)hp->addr, hp->len); \
2629 return (0); \
2630 }
2631
2632 /*
2633 * our getb() routine - use tryenter
2634 */
2635 static uint8_t
bofi_rd8(ddi_acc_impl_t * handle,uint8_t * addr)2636 bofi_rd8(ddi_acc_impl_t *handle, uint8_t *addr)
2637 {
2638 struct bofi_shadow *hp;
2639 uint8_t retval;
2640
2641 hp = handle->ahi_common.ah_bus_private;
2642 BOFI_READ_CHECKS(uint8_t)
2643 if (!hp->link || !mutex_tryenter(&bofi_mutex))
2644 return (hp->save.acc.ahi_get8(&hp->save.acc, addr));
2645 retval = (uint8_t)do_pior_corrupt(hp, (caddr_t)addr, do_bofi_rd8, 1,
2646 1);
2647 mutex_exit(&bofi_mutex);
2648 return (retval);
2649 }
2650
2651
2652 static uint64_t
do_bofi_rd16(struct bofi_shadow * hp,caddr_t addr)2653 do_bofi_rd16(struct bofi_shadow *hp, caddr_t addr)
2654 {
2655 return (hp->save.acc.ahi_get16(&hp->save.acc, (uint16_t *)addr));
2656 }
2657
2658
2659 /*
2660 * our getw() routine - use tryenter
2661 */
2662 static uint16_t
bofi_rd16(ddi_acc_impl_t * handle,uint16_t * addr)2663 bofi_rd16(ddi_acc_impl_t *handle, uint16_t *addr)
2664 {
2665 struct bofi_shadow *hp;
2666 uint16_t retval;
2667
2668 hp = handle->ahi_common.ah_bus_private;
2669 BOFI_READ_CHECKS(uint16_t)
2670 if (!hp->link || !mutex_tryenter(&bofi_mutex))
2671 return (hp->save.acc.ahi_get16(&hp->save.acc, addr));
2672 retval = (uint16_t)do_pior_corrupt(hp, (caddr_t)addr, do_bofi_rd16, 1,
2673 2);
2674 mutex_exit(&bofi_mutex);
2675 return (retval);
2676 }
2677
2678
2679 static uint64_t
do_bofi_rd32(struct bofi_shadow * hp,caddr_t addr)2680 do_bofi_rd32(struct bofi_shadow *hp, caddr_t addr)
2681 {
2682 return (hp->save.acc.ahi_get32(&hp->save.acc, (uint32_t *)addr));
2683 }
2684
2685
2686 /*
2687 * our getl() routine - use tryenter
2688 */
2689 static uint32_t
bofi_rd32(ddi_acc_impl_t * handle,uint32_t * addr)2690 bofi_rd32(ddi_acc_impl_t *handle, uint32_t *addr)
2691 {
2692 struct bofi_shadow *hp;
2693 uint32_t retval;
2694
2695 hp = handle->ahi_common.ah_bus_private;
2696 BOFI_READ_CHECKS(uint32_t)
2697 if (!hp->link || !mutex_tryenter(&bofi_mutex))
2698 return (hp->save.acc.ahi_get32(&hp->save.acc, addr));
2699 retval = (uint32_t)do_pior_corrupt(hp, (caddr_t)addr, do_bofi_rd32, 1,
2700 4);
2701 mutex_exit(&bofi_mutex);
2702 return (retval);
2703 }
2704
2705
2706 static uint64_t
do_bofi_rd64(struct bofi_shadow * hp,caddr_t addr)2707 do_bofi_rd64(struct bofi_shadow *hp, caddr_t addr)
2708 {
2709 return (hp->save.acc.ahi_get64(&hp->save.acc, (uint64_t *)addr));
2710 }
2711
2712
2713 /*
2714 * our getll() routine - use tryenter
2715 */
2716 static uint64_t
bofi_rd64(ddi_acc_impl_t * handle,uint64_t * addr)2717 bofi_rd64(ddi_acc_impl_t *handle, uint64_t *addr)
2718 {
2719 struct bofi_shadow *hp;
2720 uint64_t retval;
2721
2722 hp = handle->ahi_common.ah_bus_private;
2723 BOFI_READ_CHECKS(uint64_t)
2724 if (!hp->link || !mutex_tryenter(&bofi_mutex))
2725 return (hp->save.acc.ahi_get64(&hp->save.acc, addr));
2726 retval = (uint64_t)do_pior_corrupt(hp, (caddr_t)addr, do_bofi_rd64, 1,
2727 8);
2728 mutex_exit(&bofi_mutex);
2729 return (retval);
2730 }
2731
2732 #define BOFI_WRITE_TESTS(type) \
2733 if (bofi_ddi_check) \
2734 addr = (type *)((uintptr_t)addr - 64 + hp->addr); \
2735 if (bofi_range_check && ((caddr_t)addr < hp->addr || \
2736 (caddr_t)addr - hp->addr >= hp->len)) { \
2737 cmn_err((bofi_range_check == 2) ? CE_PANIC : CE_WARN, \
2738 "ddi_put() out of range addr %p not in %p/%llx\n", \
2739 (void *)addr, (void *)hp->addr, hp->len); \
2740 return; \
2741 }
2742
2743 /*
2744 * our putb() routine - use tryenter
2745 */
2746 static void
bofi_wr8(ddi_acc_impl_t * handle,uint8_t * addr,uint8_t value)2747 bofi_wr8(ddi_acc_impl_t *handle, uint8_t *addr, uint8_t value)
2748 {
2749 struct bofi_shadow *hp;
2750 uint64_t llvalue = value;
2751
2752 hp = handle->ahi_common.ah_bus_private;
2753 BOFI_WRITE_TESTS(uint8_t)
2754 if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2755 hp->save.acc.ahi_put8(&hp->save.acc, addr, (uint8_t)llvalue);
2756 return;
2757 }
2758 if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 1, 1))
2759 hp->save.acc.ahi_put8(&hp->save.acc, addr, (uint8_t)llvalue);
2760 mutex_exit(&bofi_mutex);
2761 }
2762
2763
2764 /*
2765 * our putw() routine - use tryenter
2766 */
2767 static void
bofi_wr16(ddi_acc_impl_t * handle,uint16_t * addr,uint16_t value)2768 bofi_wr16(ddi_acc_impl_t *handle, uint16_t *addr, uint16_t value)
2769 {
2770 struct bofi_shadow *hp;
2771 uint64_t llvalue = value;
2772
2773 hp = handle->ahi_common.ah_bus_private;
2774 BOFI_WRITE_TESTS(uint16_t)
2775 if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2776 hp->save.acc.ahi_put16(&hp->save.acc, addr, (uint16_t)llvalue);
2777 return;
2778 }
2779 if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 2, 1))
2780 hp->save.acc.ahi_put16(&hp->save.acc, addr, (uint16_t)llvalue);
2781 mutex_exit(&bofi_mutex);
2782 }
2783
2784
2785 /*
2786 * our putl() routine - use tryenter
2787 */
2788 static void
bofi_wr32(ddi_acc_impl_t * handle,uint32_t * addr,uint32_t value)2789 bofi_wr32(ddi_acc_impl_t *handle, uint32_t *addr, uint32_t value)
2790 {
2791 struct bofi_shadow *hp;
2792 uint64_t llvalue = value;
2793
2794 hp = handle->ahi_common.ah_bus_private;
2795 BOFI_WRITE_TESTS(uint32_t)
2796 if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2797 hp->save.acc.ahi_put32(&hp->save.acc, addr, (uint32_t)llvalue);
2798 return;
2799 }
2800 if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 4, 1))
2801 hp->save.acc.ahi_put32(&hp->save.acc, addr, (uint32_t)llvalue);
2802 mutex_exit(&bofi_mutex);
2803 }
2804
2805
2806 /*
2807 * our putll() routine - use tryenter
2808 */
2809 static void
bofi_wr64(ddi_acc_impl_t * handle,uint64_t * addr,uint64_t value)2810 bofi_wr64(ddi_acc_impl_t *handle, uint64_t *addr, uint64_t value)
2811 {
2812 struct bofi_shadow *hp;
2813 uint64_t llvalue = value;
2814
2815 hp = handle->ahi_common.ah_bus_private;
2816 BOFI_WRITE_TESTS(uint64_t)
2817 if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2818 hp->save.acc.ahi_put64(&hp->save.acc, addr, (uint64_t)llvalue);
2819 return;
2820 }
2821 if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 8, 1))
2822 hp->save.acc.ahi_put64(&hp->save.acc, addr, (uint64_t)llvalue);
2823 mutex_exit(&bofi_mutex);
2824 }
2825
2826 #define BOFI_REP_READ_TESTS(type) \
2827 if (bofi_ddi_check) \
2828 dev_addr = (type *)((uintptr_t)dev_addr - 64 + hp->addr); \
2829 if (bofi_range_check && ((caddr_t)dev_addr < hp->addr || \
2830 (caddr_t)(dev_addr + repcount) - hp->addr > hp->len)) { \
2831 cmn_err((bofi_range_check == 2) ? CE_PANIC : CE_WARN, \
2832 "ddi_rep_get() out of range addr %p not in %p/%llx\n", \
2833 (void *)dev_addr, (void *)hp->addr, hp->len); \
2834 if ((caddr_t)dev_addr < hp->addr || \
2835 (caddr_t)dev_addr - hp->addr >= hp->len) \
2836 return; \
2837 repcount = (type *)(hp->addr + hp->len) - dev_addr; \
2838 }
2839
2840 /*
2841 * our rep_getb() routine - use tryenter
2842 */
2843 static void
bofi_rep_rd8(ddi_acc_impl_t * handle,uint8_t * host_addr,uint8_t * dev_addr,size_t repcount,uint_t flags)2844 bofi_rep_rd8(ddi_acc_impl_t *handle, uint8_t *host_addr, uint8_t *dev_addr,
2845 size_t repcount, uint_t flags)
2846 {
2847 struct bofi_shadow *hp;
2848 int i;
2849 uint8_t *addr;
2850
2851 hp = handle->ahi_common.ah_bus_private;
2852 BOFI_REP_READ_TESTS(uint8_t)
2853 if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2854 hp->save.acc.ahi_rep_get8(&hp->save.acc, host_addr, dev_addr,
2855 repcount, flags);
2856 return;
2857 }
2858 for (i = 0; i < repcount; i++) {
2859 addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0);
2860 *(host_addr + i) = (uint8_t)do_pior_corrupt(hp, (caddr_t)addr,
2861 do_bofi_rd8, i ? 0 : repcount, 1);
2862 }
2863 mutex_exit(&bofi_mutex);
2864 }
2865
2866
2867 /*
2868 * our rep_getw() routine - use tryenter
2869 */
2870 static void
bofi_rep_rd16(ddi_acc_impl_t * handle,uint16_t * host_addr,uint16_t * dev_addr,size_t repcount,uint_t flags)2871 bofi_rep_rd16(ddi_acc_impl_t *handle, uint16_t *host_addr,
2872 uint16_t *dev_addr, size_t repcount, uint_t flags)
2873 {
2874 struct bofi_shadow *hp;
2875 int i;
2876 uint16_t *addr;
2877
2878 hp = handle->ahi_common.ah_bus_private;
2879 BOFI_REP_READ_TESTS(uint16_t)
2880 if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2881 hp->save.acc.ahi_rep_get16(&hp->save.acc, host_addr, dev_addr,
2882 repcount, flags);
2883 return;
2884 }
2885 for (i = 0; i < repcount; i++) {
2886 addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0);
2887 *(host_addr + i) = (uint16_t)do_pior_corrupt(hp, (caddr_t)addr,
2888 do_bofi_rd16, i ? 0 : repcount, 2);
2889 }
2890 mutex_exit(&bofi_mutex);
2891 }
2892
2893
2894 /*
2895 * our rep_getl() routine - use tryenter
2896 */
2897 static void
bofi_rep_rd32(ddi_acc_impl_t * handle,uint32_t * host_addr,uint32_t * dev_addr,size_t repcount,uint_t flags)2898 bofi_rep_rd32(ddi_acc_impl_t *handle, uint32_t *host_addr,
2899 uint32_t *dev_addr, size_t repcount, uint_t flags)
2900 {
2901 struct bofi_shadow *hp;
2902 int i;
2903 uint32_t *addr;
2904
2905 hp = handle->ahi_common.ah_bus_private;
2906 BOFI_REP_READ_TESTS(uint32_t)
2907 if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2908 hp->save.acc.ahi_rep_get32(&hp->save.acc, host_addr, dev_addr,
2909 repcount, flags);
2910 return;
2911 }
2912 for (i = 0; i < repcount; i++) {
2913 addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0);
2914 *(host_addr + i) = (uint32_t)do_pior_corrupt(hp, (caddr_t)addr,
2915 do_bofi_rd32, i ? 0 : repcount, 4);
2916 }
2917 mutex_exit(&bofi_mutex);
2918 }
2919
2920
2921 /*
2922 * our rep_getll() routine - use tryenter
2923 */
2924 static void
bofi_rep_rd64(ddi_acc_impl_t * handle,uint64_t * host_addr,uint64_t * dev_addr,size_t repcount,uint_t flags)2925 bofi_rep_rd64(ddi_acc_impl_t *handle, uint64_t *host_addr,
2926 uint64_t *dev_addr, size_t repcount, uint_t flags)
2927 {
2928 struct bofi_shadow *hp;
2929 int i;
2930 uint64_t *addr;
2931
2932 hp = handle->ahi_common.ah_bus_private;
2933 BOFI_REP_READ_TESTS(uint64_t)
2934 if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2935 hp->save.acc.ahi_rep_get64(&hp->save.acc, host_addr, dev_addr,
2936 repcount, flags);
2937 return;
2938 }
2939 for (i = 0; i < repcount; i++) {
2940 addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0);
2941 *(host_addr + i) = (uint64_t)do_pior_corrupt(hp, (caddr_t)addr,
2942 do_bofi_rd64, i ? 0 : repcount, 8);
2943 }
2944 mutex_exit(&bofi_mutex);
2945 }
2946
2947 #define BOFI_REP_WRITE_TESTS(type) \
2948 if (bofi_ddi_check) \
2949 dev_addr = (type *)((uintptr_t)dev_addr - 64 + hp->addr); \
2950 if (bofi_range_check && ((caddr_t)dev_addr < hp->addr || \
2951 (caddr_t)(dev_addr + repcount) - hp->addr > hp->len)) { \
2952 cmn_err((bofi_range_check == 2) ? CE_PANIC : CE_WARN, \
2953 "ddi_rep_put() out of range addr %p not in %p/%llx\n", \
2954 (void *)dev_addr, (void *)hp->addr, hp->len); \
2955 if ((caddr_t)dev_addr < hp->addr || \
2956 (caddr_t)dev_addr - hp->addr >= hp->len) \
2957 return; \
2958 repcount = (type *)(hp->addr + hp->len) - dev_addr; \
2959 }
2960
2961 /*
2962 * our rep_putb() routine - use tryenter
2963 */
2964 static void
bofi_rep_wr8(ddi_acc_impl_t * handle,uint8_t * host_addr,uint8_t * dev_addr,size_t repcount,uint_t flags)2965 bofi_rep_wr8(ddi_acc_impl_t *handle, uint8_t *host_addr, uint8_t *dev_addr,
2966 size_t repcount, uint_t flags)
2967 {
2968 struct bofi_shadow *hp;
2969 int i;
2970 uint64_t llvalue;
2971 uint8_t *addr;
2972
2973 hp = handle->ahi_common.ah_bus_private;
2974 BOFI_REP_WRITE_TESTS(uint8_t)
2975 if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2976 hp->save.acc.ahi_rep_put8(&hp->save.acc, host_addr, dev_addr,
2977 repcount, flags);
2978 return;
2979 }
2980 for (i = 0; i < repcount; i++) {
2981 llvalue = *(host_addr + i);
2982 addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0);
2983 if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 1, i ? 0 :
2984 repcount))
2985 hp->save.acc.ahi_put8(&hp->save.acc, addr,
2986 (uint8_t)llvalue);
2987 }
2988 mutex_exit(&bofi_mutex);
2989 }
2990
2991
2992 /*
2993 * our rep_putw() routine - use tryenter
2994 */
2995 static void
bofi_rep_wr16(ddi_acc_impl_t * handle,uint16_t * host_addr,uint16_t * dev_addr,size_t repcount,uint_t flags)2996 bofi_rep_wr16(ddi_acc_impl_t *handle, uint16_t *host_addr,
2997 uint16_t *dev_addr, size_t repcount, uint_t flags)
2998 {
2999 struct bofi_shadow *hp;
3000 int i;
3001 uint64_t llvalue;
3002 uint16_t *addr;
3003
3004 hp = handle->ahi_common.ah_bus_private;
3005 BOFI_REP_WRITE_TESTS(uint16_t)
3006 if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
3007 hp->save.acc.ahi_rep_put16(&hp->save.acc, host_addr, dev_addr,
3008 repcount, flags);
3009 return;
3010 }
3011 for (i = 0; i < repcount; i++) {
3012 llvalue = *(host_addr + i);
3013 addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0);
3014 if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 2, i ? 0 :
3015 repcount))
3016 hp->save.acc.ahi_put16(&hp->save.acc, addr,
3017 (uint16_t)llvalue);
3018 }
3019 mutex_exit(&bofi_mutex);
3020 }
3021
3022
3023 /*
3024 * our rep_putl() routine - use tryenter
3025 */
3026 static void
bofi_rep_wr32(ddi_acc_impl_t * handle,uint32_t * host_addr,uint32_t * dev_addr,size_t repcount,uint_t flags)3027 bofi_rep_wr32(ddi_acc_impl_t *handle, uint32_t *host_addr,
3028 uint32_t *dev_addr, size_t repcount, uint_t flags)
3029 {
3030 struct bofi_shadow *hp;
3031 int i;
3032 uint64_t llvalue;
3033 uint32_t *addr;
3034
3035 hp = handle->ahi_common.ah_bus_private;
3036 BOFI_REP_WRITE_TESTS(uint32_t)
3037 if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
3038 hp->save.acc.ahi_rep_put32(&hp->save.acc, host_addr, dev_addr,
3039 repcount, flags);
3040 return;
3041 }
3042 for (i = 0; i < repcount; i++) {
3043 llvalue = *(host_addr + i);
3044 addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0);
3045 if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 4, i ? 0 :
3046 repcount))
3047 hp->save.acc.ahi_put32(&hp->save.acc, addr,
3048 (uint32_t)llvalue);
3049 }
3050 mutex_exit(&bofi_mutex);
3051 }
3052
3053
3054 /*
3055 * our rep_putll() routine - use tryenter
3056 */
3057 static void
bofi_rep_wr64(ddi_acc_impl_t * handle,uint64_t * host_addr,uint64_t * dev_addr,size_t repcount,uint_t flags)3058 bofi_rep_wr64(ddi_acc_impl_t *handle, uint64_t *host_addr,
3059 uint64_t *dev_addr, size_t repcount, uint_t flags)
3060 {
3061 struct bofi_shadow *hp;
3062 int i;
3063 uint64_t llvalue;
3064 uint64_t *addr;
3065
3066 hp = handle->ahi_common.ah_bus_private;
3067 BOFI_REP_WRITE_TESTS(uint64_t)
3068 if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
3069 hp->save.acc.ahi_rep_put64(&hp->save.acc, host_addr, dev_addr,
3070 repcount, flags);
3071 return;
3072 }
3073 for (i = 0; i < repcount; i++) {
3074 llvalue = *(host_addr + i);
3075 addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0);
3076 if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 8, i ? 0 :
3077 repcount))
3078 hp->save.acc.ahi_put64(&hp->save.acc, addr,
3079 (uint64_t)llvalue);
3080 }
3081 mutex_exit(&bofi_mutex);
3082 }
3083
3084
3085 /*
3086 * our ddi_map routine
3087 */
3088 static int
bofi_map(dev_info_t * dip,dev_info_t * rdip,ddi_map_req_t * reqp,off_t offset,off_t len,caddr_t * vaddrp)3089 bofi_map(dev_info_t *dip, dev_info_t *rdip,
3090 ddi_map_req_t *reqp, off_t offset, off_t len, caddr_t *vaddrp)
3091 {
3092 ddi_acc_impl_t *ap;
3093 struct bofi_shadow *hp;
3094 struct bofi_errent *ep;
3095 struct bofi_link *lp, *next_lp;
3096 int retval;
3097 struct bofi_shadow *dhashp;
3098 struct bofi_shadow *hhashp;
3099
3100 switch (reqp->map_op) {
3101 case DDI_MO_MAP_LOCKED:
3102 /*
3103 * for this case get nexus to do real work first
3104 */
3105 retval = save_bus_ops.bus_map(dip, rdip, reqp, offset, len,
3106 vaddrp);
3107 if (retval != DDI_SUCCESS)
3108 return (retval);
3109
3110 ap = (ddi_acc_impl_t *)reqp->map_handlep;
3111 if (ap == NULL)
3112 return (DDI_SUCCESS);
3113 /*
3114 * if driver_list is set, only intercept those drivers
3115 */
3116 if (!driver_under_test(ap->ahi_common.ah_dip))
3117 return (DDI_SUCCESS);
3118
3119 /*
3120 * support for ddi_regs_map_setup()
3121 * - allocate shadow handle structure and fill it in
3122 */
3123 hp = kmem_zalloc(sizeof (struct bofi_shadow), KM_SLEEP);
3124 (void) strncpy(hp->name, ddi_get_name(ap->ahi_common.ah_dip),
3125 NAMESIZE);
3126 hp->instance = ddi_get_instance(ap->ahi_common.ah_dip);
3127 hp->dip = ap->ahi_common.ah_dip;
3128 hp->addr = *vaddrp;
3129 /*
3130 * return spurious value to catch direct access to registers
3131 */
3132 if (bofi_ddi_check)
3133 *vaddrp = (caddr_t)64;
3134 hp->rnumber = ((ddi_acc_hdl_t *)ap)->ah_rnumber;
3135 hp->offset = offset;
3136 if (len == 0)
3137 hp->len = INT_MAX - offset;
3138 else
3139 hp->len = min(len, INT_MAX - offset);
3140 hp->hdl.acc_handle = (ddi_acc_handle_t)ap;
3141 hp->link = NULL;
3142 hp->type = BOFI_ACC_HDL;
3143 /*
3144 * save existing function pointers and plug in our own
3145 */
3146 hp->save.acc = *ap;
3147 ap->ahi_get8 = bofi_rd8;
3148 ap->ahi_get16 = bofi_rd16;
3149 ap->ahi_get32 = bofi_rd32;
3150 ap->ahi_get64 = bofi_rd64;
3151 ap->ahi_put8 = bofi_wr8;
3152 ap->ahi_put16 = bofi_wr16;
3153 ap->ahi_put32 = bofi_wr32;
3154 ap->ahi_put64 = bofi_wr64;
3155 ap->ahi_rep_get8 = bofi_rep_rd8;
3156 ap->ahi_rep_get16 = bofi_rep_rd16;
3157 ap->ahi_rep_get32 = bofi_rep_rd32;
3158 ap->ahi_rep_get64 = bofi_rep_rd64;
3159 ap->ahi_rep_put8 = bofi_rep_wr8;
3160 ap->ahi_rep_put16 = bofi_rep_wr16;
3161 ap->ahi_rep_put32 = bofi_rep_wr32;
3162 ap->ahi_rep_put64 = bofi_rep_wr64;
3163 ap->ahi_fault_check = bofi_check_acc_hdl;
3164 #if defined(__sparc)
3165 #else
3166 ap->ahi_acc_attr &= ~DDI_ACCATTR_DIRECT;
3167 #endif
3168 /*
3169 * stick in a pointer to our shadow handle
3170 */
3171 ap->ahi_common.ah_bus_private = hp;
3172 /*
3173 * add to dhash, hhash and inuse lists
3174 */
3175 mutex_enter(&bofi_low_mutex);
3176 mutex_enter(&bofi_mutex);
3177 hp->next = shadow_list.next;
3178 shadow_list.next->prev = hp;
3179 hp->prev = &shadow_list;
3180 shadow_list.next = hp;
3181 hhashp = HDL_HHASH(ap);
3182 hp->hnext = hhashp->hnext;
3183 hhashp->hnext->hprev = hp;
3184 hp->hprev = hhashp;
3185 hhashp->hnext = hp;
3186 dhashp = HDL_DHASH(hp->dip);
3187 hp->dnext = dhashp->dnext;
3188 dhashp->dnext->dprev = hp;
3189 hp->dprev = dhashp;
3190 dhashp->dnext = hp;
3191 /*
3192 * chain on any pre-existing errdefs that apply to this
3193 * acc_handle
3194 */
3195 for (ep = errent_listp; ep != NULL; ep = ep->next) {
3196 if (ddi_name_to_major(hp->name) ==
3197 ddi_name_to_major(ep->name) &&
3198 hp->instance == ep->errdef.instance &&
3199 (ep->errdef.access_type & BOFI_PIO_RW) &&
3200 (ep->errdef.rnumber == -1 ||
3201 hp->rnumber == ep->errdef.rnumber) &&
3202 (ep->errdef.len == 0 ||
3203 offset < ep->errdef.offset + ep->errdef.len) &&
3204 offset + hp->len > ep->errdef.offset) {
3205 lp = bofi_link_freelist;
3206 if (lp != NULL) {
3207 bofi_link_freelist = lp->link;
3208 lp->errentp = ep;
3209 lp->link = hp->link;
3210 hp->link = lp;
3211 }
3212 }
3213 }
3214 mutex_exit(&bofi_mutex);
3215 mutex_exit(&bofi_low_mutex);
3216 return (DDI_SUCCESS);
3217 case DDI_MO_UNMAP:
3218
3219 ap = (ddi_acc_impl_t *)reqp->map_handlep;
3220 if (ap == NULL)
3221 break;
3222 /*
3223 * support for ddi_regs_map_free()
3224 * - check we really have a shadow handle for this one
3225 */
3226 mutex_enter(&bofi_low_mutex);
3227 mutex_enter(&bofi_mutex);
3228 hhashp = HDL_HHASH(ap);
3229 for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext)
3230 if (hp->hdl.acc_handle == (ddi_acc_handle_t)ap)
3231 break;
3232 if (hp == hhashp) {
3233 mutex_exit(&bofi_mutex);
3234 mutex_exit(&bofi_low_mutex);
3235 break;
3236 }
3237 /*
3238 * got a shadow handle - restore original pointers
3239 */
3240 *ap = hp->save.acc;
3241 *vaddrp = hp->addr;
3242 /*
3243 * remove from dhash, hhash and inuse lists
3244 */
3245 hp->hnext->hprev = hp->hprev;
3246 hp->hprev->hnext = hp->hnext;
3247 hp->dnext->dprev = hp->dprev;
3248 hp->dprev->dnext = hp->dnext;
3249 hp->next->prev = hp->prev;
3250 hp->prev->next = hp->next;
3251 /*
3252 * free any errdef link structures tagged onto the shadow handle
3253 */
3254 for (lp = hp->link; lp != NULL; ) {
3255 next_lp = lp->link;
3256 lp->link = bofi_link_freelist;
3257 bofi_link_freelist = lp;
3258 lp = next_lp;
3259 }
3260 hp->link = NULL;
3261 mutex_exit(&bofi_mutex);
3262 mutex_exit(&bofi_low_mutex);
3263 /*
3264 * finally delete shadow handle
3265 */
3266 kmem_free(hp, sizeof (struct bofi_shadow));
3267 break;
3268 default:
3269 break;
3270 }
3271 return (save_bus_ops.bus_map(dip, rdip, reqp, offset, len, vaddrp));
3272 }
3273
3274
3275 /*
3276 * chain any pre-existing errdefs on to newly created dma handle
3277 * if required call do_dma_corrupt() to corrupt data
3278 */
3279 static void
chain_on_errdefs(struct bofi_shadow * hp)3280 chain_on_errdefs(struct bofi_shadow *hp)
3281 {
3282 struct bofi_errent *ep;
3283 struct bofi_link *lp;
3284
3285 ASSERT(MUTEX_HELD(&bofi_mutex));
3286 /*
3287 * chain on any pre-existing errdefs that apply to this dma_handle
3288 */
3289 for (ep = errent_listp; ep != NULL; ep = ep->next) {
3290 if (ddi_name_to_major(hp->name) ==
3291 ddi_name_to_major(ep->name) &&
3292 hp->instance == ep->errdef.instance &&
3293 (ep->errdef.rnumber == -1 ||
3294 hp->rnumber == ep->errdef.rnumber) &&
3295 ((ep->errdef.access_type & BOFI_DMA_RW) &&
3296 (((uintptr_t)(hp->addr + ep->errdef.offset +
3297 ep->errdef.len) & ~LLSZMASK) >
3298 ((uintptr_t)((hp->addr + ep->errdef.offset) +
3299 LLSZMASK) & ~LLSZMASK)))) {
3300 /*
3301 * got a match - link it on
3302 */
3303 lp = bofi_link_freelist;
3304 if (lp != NULL) {
3305 bofi_link_freelist = lp->link;
3306 lp->errentp = ep;
3307 lp->link = hp->link;
3308 hp->link = lp;
3309 if ((ep->errdef.access_type & BOFI_DMA_W) &&
3310 (hp->flags & DDI_DMA_WRITE) &&
3311 (ep->state & BOFI_DEV_ACTIVE)) {
3312 do_dma_corrupt(hp, ep,
3313 DDI_DMA_SYNC_FORDEV,
3314 0, hp->len);
3315 }
3316 }
3317 }
3318 }
3319 }
3320
3321
3322 /*
3323 * need to do copy byte-by-byte in case one of pages is little-endian
3324 */
3325 static void
xbcopy(void * from,void * to,u_longlong_t len)3326 xbcopy(void *from, void *to, u_longlong_t len)
3327 {
3328 uchar_t *f = from;
3329 uchar_t *t = to;
3330
3331 while (len--)
3332 *t++ = *f++;
3333 }
3334
3335
3336 /*
3337 * our ddi_dma_map routine
3338 */
3339 static int
bofi_dma_map(dev_info_t * dip,dev_info_t * rdip,struct ddi_dma_req * dmareqp,ddi_dma_handle_t * handlep)3340 bofi_dma_map(dev_info_t *dip, dev_info_t *rdip,
3341 struct ddi_dma_req *dmareqp, ddi_dma_handle_t *handlep)
3342 {
3343 struct bofi_shadow *hp, *xhp;
3344 int maxrnumber = 0;
3345 int retval = DDI_DMA_NORESOURCES;
3346 auto struct ddi_dma_req dmareq;
3347 int sleep;
3348 struct bofi_shadow *dhashp;
3349 struct bofi_shadow *hhashp;
3350 ddi_dma_impl_t *mp;
3351 unsigned long pagemask = ddi_ptob(rdip, 1) - 1;
3352
3353 /*
3354 * if driver_list is set, only intercept those drivers
3355 */
3356 if (handlep == NULL || !driver_under_test(rdip))
3357 return (save_bus_ops.bus_dma_map(dip, rdip, dmareqp, handlep));
3358
3359 sleep = (dmareqp->dmar_fp == DDI_DMA_SLEEP) ? KM_SLEEP : KM_NOSLEEP;
3360 /*
3361 * allocate shadow handle structure and fill it in
3362 */
3363 hp = kmem_zalloc(sizeof (struct bofi_shadow), sleep);
3364 if (hp == NULL)
3365 goto error;
3366 (void) strncpy(hp->name, ddi_get_name(rdip), NAMESIZE);
3367 hp->instance = ddi_get_instance(rdip);
3368 hp->dip = rdip;
3369 hp->flags = dmareqp->dmar_flags;
3370 if (dmareqp->dmar_object.dmao_type == DMA_OTYP_PAGES) {
3371 hp->map_flags = B_PAGEIO;
3372 hp->map_pp = dmareqp->dmar_object.dmao_obj.pp_obj.pp_pp;
3373 } else if (dmareqp->dmar_object.dmao_obj.virt_obj.v_priv != NULL) {
3374 hp->map_flags = B_SHADOW;
3375 hp->map_pplist = dmareqp->dmar_object.dmao_obj.virt_obj.v_priv;
3376 } else {
3377 hp->map_flags = 0;
3378 }
3379 hp->link = NULL;
3380 hp->type = BOFI_DMA_HDL;
3381 /*
3382 * get a kernel virtual mapping
3383 */
3384 hp->addr = ddi_dmareq_mapin(dmareqp, &hp->mapaddr, &hp->len);
3385 if (hp->addr == NULL)
3386 goto error;
3387 if (bofi_sync_check) {
3388 /*
3389 * Take a copy and pass pointers to this up to nexus instead.
3390 * Data will be copied from the original on explicit
3391 * and implicit ddi_dma_sync()
3392 *
3393 * - maintain page alignment because some devices assume it.
3394 */
3395 hp->origaddr = hp->addr;
3396 hp->allocaddr = ddi_umem_alloc(
3397 ((uintptr_t)hp->addr & pagemask) + hp->len, sleep,
3398 &hp->umem_cookie);
3399 if (hp->allocaddr == NULL)
3400 goto error;
3401 hp->addr = hp->allocaddr + ((uintptr_t)hp->addr & pagemask);
3402 if (dmareqp->dmar_flags & DDI_DMA_WRITE)
3403 xbcopy(hp->origaddr, hp->addr, hp->len);
3404 dmareq = *dmareqp;
3405 dmareq.dmar_object.dmao_size = hp->len;
3406 dmareq.dmar_object.dmao_type = DMA_OTYP_VADDR;
3407 dmareq.dmar_object.dmao_obj.virt_obj.v_as = &kas;
3408 dmareq.dmar_object.dmao_obj.virt_obj.v_addr = hp->addr;
3409 dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL;
3410 dmareqp = &dmareq;
3411 }
3412 /*
3413 * call nexus to do the real work
3414 */
3415 retval = save_bus_ops.bus_dma_map(dip, rdip, dmareqp, handlep);
3416 if (retval != DDI_SUCCESS)
3417 goto error2;
3418 /*
3419 * now set dma_handle to point to real handle
3420 */
3421 hp->hdl.dma_handle = *handlep;
3422 /*
3423 * unset DMP_NOSYNC
3424 */
3425 mp = (ddi_dma_impl_t *)*handlep;
3426 mp->dmai_rflags &= ~DMP_NOSYNC;
3427 mp->dmai_fault_check = bofi_check_dma_hdl;
3428 /*
3429 * bind and unbind are cached in devinfo - must overwrite them
3430 * - note that our bind and unbind are quite happy dealing with
3431 * any handles for this devinfo that were previously allocated
3432 */
3433 if (save_bus_ops.bus_dma_bindhdl == DEVI(rdip)->devi_bus_dma_bindfunc)
3434 DEVI(rdip)->devi_bus_dma_bindfunc = bofi_dma_bindhdl;
3435 if (save_bus_ops.bus_dma_unbindhdl ==
3436 DEVI(rdip)->devi_bus_dma_unbindfunc)
3437 DEVI(rdip)->devi_bus_dma_unbindfunc = bofi_dma_unbindhdl;
3438 mutex_enter(&bofi_low_mutex);
3439 mutex_enter(&bofi_mutex);
3440 /*
3441 * get an "rnumber" for this handle - really just seeking to
3442 * get a unique number - generally only care for early allocated
3443 * handles - so we get as far as INT_MAX, just stay there
3444 */
3445 dhashp = HDL_DHASH(hp->dip);
3446 for (xhp = dhashp->dnext; xhp != dhashp; xhp = xhp->dnext)
3447 if (ddi_name_to_major(xhp->name) ==
3448 ddi_name_to_major(hp->name) &&
3449 xhp->instance == hp->instance &&
3450 xhp->type == BOFI_DMA_HDL)
3451 if (xhp->rnumber >= maxrnumber) {
3452 if (xhp->rnumber == INT_MAX)
3453 maxrnumber = INT_MAX;
3454 else
3455 maxrnumber = xhp->rnumber + 1;
3456 }
3457 hp->rnumber = maxrnumber;
3458 /*
3459 * add to dhash, hhash and inuse lists
3460 */
3461 hp->next = shadow_list.next;
3462 shadow_list.next->prev = hp;
3463 hp->prev = &shadow_list;
3464 shadow_list.next = hp;
3465 hhashp = HDL_HHASH(*handlep);
3466 hp->hnext = hhashp->hnext;
3467 hhashp->hnext->hprev = hp;
3468 hp->hprev = hhashp;
3469 hhashp->hnext = hp;
3470 dhashp = HDL_DHASH(hp->dip);
3471 hp->dnext = dhashp->dnext;
3472 dhashp->dnext->dprev = hp;
3473 hp->dprev = dhashp;
3474 dhashp->dnext = hp;
3475 /*
3476 * chain on any pre-existing errdefs that apply to this
3477 * acc_handle and corrupt if required (as there is an implicit
3478 * ddi_dma_sync() in this call)
3479 */
3480 chain_on_errdefs(hp);
3481 mutex_exit(&bofi_mutex);
3482 mutex_exit(&bofi_low_mutex);
3483 return (retval);
3484 error:
3485 if (dmareqp->dmar_fp != DDI_DMA_DONTWAIT) {
3486 /*
3487 * what to do here? Wait a bit and try again
3488 */
3489 (void) timeout((void (*)())dmareqp->dmar_fp,
3490 dmareqp->dmar_arg, 10);
3491 }
3492 error2:
3493 if (hp) {
3494 ddi_dmareq_mapout(hp->mapaddr, hp->len, hp->map_flags,
3495 hp->map_pp, hp->map_pplist);
3496 if (bofi_sync_check && hp->allocaddr)
3497 ddi_umem_free(hp->umem_cookie);
3498 kmem_free(hp, sizeof (struct bofi_shadow));
3499 }
3500 return (retval);
3501 }
3502
3503
3504 /*
3505 * our ddi_dma_allochdl routine
3506 */
3507 static int
bofi_dma_allochdl(dev_info_t * dip,dev_info_t * rdip,ddi_dma_attr_t * attrp,int (* waitfp)(caddr_t),caddr_t arg,ddi_dma_handle_t * handlep)3508 bofi_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attrp,
3509 int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
3510 {
3511 int retval = DDI_DMA_NORESOURCES;
3512 struct bofi_shadow *hp, *xhp;
3513 int maxrnumber = 0;
3514 struct bofi_shadow *dhashp;
3515 struct bofi_shadow *hhashp;
3516 ddi_dma_impl_t *mp;
3517
3518 /*
3519 * if driver_list is set, only intercept those drivers
3520 */
3521 if (!driver_under_test(rdip))
3522 return (save_bus_ops.bus_dma_allochdl(dip, rdip, attrp,
3523 waitfp, arg, handlep));
3524
3525 /*
3526 * allocate shadow handle structure and fill it in
3527 */
3528 hp = kmem_zalloc(sizeof (struct bofi_shadow),
3529 ((waitfp == DDI_DMA_SLEEP) ? KM_SLEEP : KM_NOSLEEP));
3530 if (hp == NULL) {
3531 /*
3532 * what to do here? Wait a bit and try again
3533 */
3534 if (waitfp != DDI_DMA_DONTWAIT)
3535 (void) timeout((void (*)())waitfp, arg, 10);
3536 return (retval);
3537 }
3538 (void) strncpy(hp->name, ddi_get_name(rdip), NAMESIZE);
3539 hp->instance = ddi_get_instance(rdip);
3540 hp->dip = rdip;
3541 hp->link = NULL;
3542 hp->type = BOFI_NULL;
3543 /*
3544 * call nexus to do the real work
3545 */
3546 retval = save_bus_ops.bus_dma_allochdl(dip, rdip, attrp, waitfp, arg,
3547 handlep);
3548 if (retval != DDI_SUCCESS) {
3549 kmem_free(hp, sizeof (struct bofi_shadow));
3550 return (retval);
3551 }
3552 /*
3553 * now point set dma_handle to point to real handle
3554 */
3555 hp->hdl.dma_handle = *handlep;
3556 mp = (ddi_dma_impl_t *)*handlep;
3557 mp->dmai_fault_check = bofi_check_dma_hdl;
3558 /*
3559 * bind and unbind are cached in devinfo - must overwrite them
3560 * - note that our bind and unbind are quite happy dealing with
3561 * any handles for this devinfo that were previously allocated
3562 */
3563 if (save_bus_ops.bus_dma_bindhdl == DEVI(rdip)->devi_bus_dma_bindfunc)
3564 DEVI(rdip)->devi_bus_dma_bindfunc = bofi_dma_bindhdl;
3565 if (save_bus_ops.bus_dma_unbindhdl ==
3566 DEVI(rdip)->devi_bus_dma_unbindfunc)
3567 DEVI(rdip)->devi_bus_dma_unbindfunc = bofi_dma_unbindhdl;
3568 mutex_enter(&bofi_low_mutex);
3569 mutex_enter(&bofi_mutex);
3570 /*
3571 * get an "rnumber" for this handle - really just seeking to
3572 * get a unique number - generally only care for early allocated
3573 * handles - so we get as far as INT_MAX, just stay there
3574 */
3575 dhashp = HDL_DHASH(hp->dip);
3576 for (xhp = dhashp->dnext; xhp != dhashp; xhp = xhp->dnext)
3577 if (ddi_name_to_major(xhp->name) ==
3578 ddi_name_to_major(hp->name) &&
3579 xhp->instance == hp->instance &&
3580 (xhp->type == BOFI_DMA_HDL ||
3581 xhp->type == BOFI_NULL))
3582 if (xhp->rnumber >= maxrnumber) {
3583 if (xhp->rnumber == INT_MAX)
3584 maxrnumber = INT_MAX;
3585 else
3586 maxrnumber = xhp->rnumber + 1;
3587 }
3588 hp->rnumber = maxrnumber;
3589 /*
3590 * add to dhash, hhash and inuse lists
3591 */
3592 hp->next = shadow_list.next;
3593 shadow_list.next->prev = hp;
3594 hp->prev = &shadow_list;
3595 shadow_list.next = hp;
3596 hhashp = HDL_HHASH(*handlep);
3597 hp->hnext = hhashp->hnext;
3598 hhashp->hnext->hprev = hp;
3599 hp->hprev = hhashp;
3600 hhashp->hnext = hp;
3601 dhashp = HDL_DHASH(hp->dip);
3602 hp->dnext = dhashp->dnext;
3603 dhashp->dnext->dprev = hp;
3604 hp->dprev = dhashp;
3605 dhashp->dnext = hp;
3606 mutex_exit(&bofi_mutex);
3607 mutex_exit(&bofi_low_mutex);
3608 return (retval);
3609 }
3610
3611
3612 /*
3613 * our ddi_dma_freehdl routine
3614 */
3615 static int
bofi_dma_freehdl(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handle)3616 bofi_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle)
3617 {
3618 int retval;
3619 struct bofi_shadow *hp;
3620 struct bofi_shadow *hhashp;
3621
3622 /*
3623 * find shadow for this handle
3624 */
3625 mutex_enter(&bofi_low_mutex);
3626 mutex_enter(&bofi_mutex);
3627 hhashp = HDL_HHASH(handle);
3628 for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext)
3629 if (hp->hdl.dma_handle == handle)
3630 break;
3631 mutex_exit(&bofi_mutex);
3632 mutex_exit(&bofi_low_mutex);
3633 /*
3634 * call nexus to do the real work
3635 */
3636 retval = save_bus_ops.bus_dma_freehdl(dip, rdip, handle);
3637 if (retval != DDI_SUCCESS) {
3638 return (retval);
3639 }
3640 /*
3641 * did we really have a shadow for this handle
3642 */
3643 if (hp == hhashp)
3644 return (retval);
3645 /*
3646 * yes we have - see if it's still bound
3647 */
3648 mutex_enter(&bofi_low_mutex);
3649 mutex_enter(&bofi_mutex);
3650 if (hp->type != BOFI_NULL)
3651 panic("driver freeing bound dma_handle");
3652 /*
3653 * remove from dhash, hhash and inuse lists
3654 */
3655 hp->hnext->hprev = hp->hprev;
3656 hp->hprev->hnext = hp->hnext;
3657 hp->dnext->dprev = hp->dprev;
3658 hp->dprev->dnext = hp->dnext;
3659 hp->next->prev = hp->prev;
3660 hp->prev->next = hp->next;
3661 mutex_exit(&bofi_mutex);
3662 mutex_exit(&bofi_low_mutex);
3663
3664 kmem_free(hp, sizeof (struct bofi_shadow));
3665 return (retval);
3666 }
3667
3668
3669 /*
3670 * our ddi_dma_bindhdl routine
3671 */
3672 static int
bofi_dma_bindhdl(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handle,struct ddi_dma_req * dmareqp,ddi_dma_cookie_t * cookiep,uint_t * ccountp)3673 bofi_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
3674 ddi_dma_handle_t handle, struct ddi_dma_req *dmareqp,
3675 ddi_dma_cookie_t *cookiep, uint_t *ccountp)
3676 {
3677 int retval = DDI_DMA_NORESOURCES;
3678 auto struct ddi_dma_req dmareq;
3679 struct bofi_shadow *hp;
3680 struct bofi_shadow *hhashp;
3681 ddi_dma_impl_t *mp;
3682 unsigned long pagemask = ddi_ptob(rdip, 1) - 1;
3683
3684 /*
3685 * check we really have a shadow for this handle
3686 */
3687 mutex_enter(&bofi_low_mutex);
3688 mutex_enter(&bofi_mutex);
3689 hhashp = HDL_HHASH(handle);
3690 for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext)
3691 if (hp->hdl.dma_handle == handle)
3692 break;
3693 mutex_exit(&bofi_mutex);
3694 mutex_exit(&bofi_low_mutex);
3695 if (hp == hhashp) {
3696 /*
3697 * no we don't - just call nexus to do the real work
3698 */
3699 return save_bus_ops.bus_dma_bindhdl(dip, rdip, handle, dmareqp,
3700 cookiep, ccountp);
3701 }
3702 /*
3703 * yes we have - see if it's already bound
3704 */
3705 if (hp->type != BOFI_NULL)
3706 return (DDI_DMA_INUSE);
3707
3708 hp->flags = dmareqp->dmar_flags;
3709 if (dmareqp->dmar_object.dmao_type == DMA_OTYP_PAGES) {
3710 hp->map_flags = B_PAGEIO;
3711 hp->map_pp = dmareqp->dmar_object.dmao_obj.pp_obj.pp_pp;
3712 } else if (dmareqp->dmar_object.dmao_obj.virt_obj.v_priv != NULL) {
3713 hp->map_flags = B_SHADOW;
3714 hp->map_pplist = dmareqp->dmar_object.dmao_obj.virt_obj.v_priv;
3715 } else {
3716 hp->map_flags = 0;
3717 }
3718 /*
3719 * get a kernel virtual mapping
3720 */
3721 hp->addr = ddi_dmareq_mapin(dmareqp, &hp->mapaddr, &hp->len);
3722 if (hp->addr == NULL)
3723 goto error;
3724 if (bofi_sync_check) {
3725 /*
3726 * Take a copy and pass pointers to this up to nexus instead.
3727 * Data will be copied from the original on explicit
3728 * and implicit ddi_dma_sync()
3729 *
3730 * - maintain page alignment because some devices assume it.
3731 */
3732 hp->origaddr = hp->addr;
3733 hp->allocaddr = ddi_umem_alloc(
3734 ((uintptr_t)hp->addr & pagemask) + hp->len,
3735 (dmareqp->dmar_fp == DDI_DMA_SLEEP) ? KM_SLEEP : KM_NOSLEEP,
3736 &hp->umem_cookie);
3737 if (hp->allocaddr == NULL)
3738 goto error;
3739 hp->addr = hp->allocaddr + ((uintptr_t)hp->addr & pagemask);
3740 if (dmareqp->dmar_flags & DDI_DMA_WRITE)
3741 xbcopy(hp->origaddr, hp->addr, hp->len);
3742 dmareq = *dmareqp;
3743 dmareq.dmar_object.dmao_size = hp->len;
3744 dmareq.dmar_object.dmao_type = DMA_OTYP_VADDR;
3745 dmareq.dmar_object.dmao_obj.virt_obj.v_as = &kas;
3746 dmareq.dmar_object.dmao_obj.virt_obj.v_addr = hp->addr;
3747 dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL;
3748 dmareqp = &dmareq;
3749 }
3750 /*
3751 * call nexus to do the real work
3752 */
3753 retval = save_bus_ops.bus_dma_bindhdl(dip, rdip, handle, dmareqp,
3754 cookiep, ccountp);
3755 if (retval != DDI_SUCCESS)
3756 goto error2;
3757 /*
3758 * unset DMP_NOSYNC
3759 */
3760 mp = (ddi_dma_impl_t *)handle;
3761 mp->dmai_rflags &= ~DMP_NOSYNC;
3762 /*
3763 * chain on any pre-existing errdefs that apply to this
3764 * acc_handle and corrupt if required (as there is an implicit
3765 * ddi_dma_sync() in this call)
3766 */
3767 mutex_enter(&bofi_low_mutex);
3768 mutex_enter(&bofi_mutex);
3769 hp->type = BOFI_DMA_HDL;
3770 chain_on_errdefs(hp);
3771 mutex_exit(&bofi_mutex);
3772 mutex_exit(&bofi_low_mutex);
3773 return (retval);
3774
3775 error:
3776 if (dmareqp->dmar_fp != DDI_DMA_DONTWAIT) {
3777 /*
3778 * what to do here? Wait a bit and try again
3779 */
3780 (void) timeout((void (*)())dmareqp->dmar_fp,
3781 dmareqp->dmar_arg, 10);
3782 }
3783 error2:
3784 if (hp) {
3785 ddi_dmareq_mapout(hp->mapaddr, hp->len, hp->map_flags,
3786 hp->map_pp, hp->map_pplist);
3787 if (bofi_sync_check && hp->allocaddr)
3788 ddi_umem_free(hp->umem_cookie);
3789 hp->mapaddr = NULL;
3790 hp->allocaddr = NULL;
3791 hp->origaddr = NULL;
3792 }
3793 return (retval);
3794 }
3795
3796
3797 /*
3798 * our ddi_dma_unbindhdl routine
3799 */
3800 static int
bofi_dma_unbindhdl(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handle)3801 bofi_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle)
3802 {
3803 struct bofi_link *lp, *next_lp;
3804 struct bofi_errent *ep;
3805 int retval;
3806 struct bofi_shadow *hp;
3807 struct bofi_shadow *hhashp;
3808
3809 /*
3810 * call nexus to do the real work
3811 */
3812 retval = save_bus_ops.bus_dma_unbindhdl(dip, rdip, handle);
3813 if (retval != DDI_SUCCESS)
3814 return (retval);
3815 /*
3816 * check we really have a shadow for this handle
3817 */
3818 mutex_enter(&bofi_low_mutex);
3819 mutex_enter(&bofi_mutex);
3820 hhashp = HDL_HHASH(handle);
3821 for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext)
3822 if (hp->hdl.dma_handle == handle)
3823 break;
3824 if (hp == hhashp) {
3825 mutex_exit(&bofi_mutex);
3826 mutex_exit(&bofi_low_mutex);
3827 return (retval);
3828 }
3829 /*
3830 * yes we have - see if it's already unbound
3831 */
3832 if (hp->type == BOFI_NULL)
3833 panic("driver unbinding unbound dma_handle");
3834 /*
3835 * free any errdef link structures tagged on to this
3836 * shadow handle
3837 */
3838 for (lp = hp->link; lp != NULL; ) {
3839 next_lp = lp->link;
3840 /*
3841 * there is an implicit sync_for_cpu on free -
3842 * may need to corrupt
3843 */
3844 ep = lp->errentp;
3845 if ((ep->errdef.access_type & BOFI_DMA_R) &&
3846 (hp->flags & DDI_DMA_READ) &&
3847 (ep->state & BOFI_DEV_ACTIVE)) {
3848 do_dma_corrupt(hp, ep, DDI_DMA_SYNC_FORCPU, 0, hp->len);
3849 }
3850 lp->link = bofi_link_freelist;
3851 bofi_link_freelist = lp;
3852 lp = next_lp;
3853 }
3854 hp->link = NULL;
3855 hp->type = BOFI_NULL;
3856 mutex_exit(&bofi_mutex);
3857 mutex_exit(&bofi_low_mutex);
3858
3859 if (bofi_sync_check && (hp->flags & DDI_DMA_READ))
3860 /*
3861 * implicit sync_for_cpu - copy data back
3862 */
3863 if (hp->allocaddr)
3864 xbcopy(hp->addr, hp->origaddr, hp->len);
3865 ddi_dmareq_mapout(hp->mapaddr, hp->len, hp->map_flags,
3866 hp->map_pp, hp->map_pplist);
3867 if (bofi_sync_check && hp->allocaddr)
3868 ddi_umem_free(hp->umem_cookie);
3869 hp->mapaddr = NULL;
3870 hp->allocaddr = NULL;
3871 hp->origaddr = NULL;
3872 return (retval);
3873 }
3874
3875
3876 /*
3877 * our ddi_dma_sync routine
3878 */
3879 static int
bofi_dma_flush(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handle,off_t off,size_t len,uint_t flags)3880 bofi_dma_flush(dev_info_t *dip, dev_info_t *rdip,
3881 ddi_dma_handle_t handle, off_t off, size_t len, uint_t flags)
3882 {
3883 struct bofi_link *lp;
3884 struct bofi_errent *ep;
3885 struct bofi_shadow *hp;
3886 struct bofi_shadow *hhashp;
3887 int retval;
3888
3889 if (flags == DDI_DMA_SYNC_FORCPU || flags == DDI_DMA_SYNC_FORKERNEL) {
3890 /*
3891 * in this case get nexus driver to do sync first
3892 */
3893 retval = save_bus_ops.bus_dma_flush(dip, rdip, handle, off,
3894 len, flags);
3895 if (retval != DDI_SUCCESS)
3896 return (retval);
3897 }
3898 /*
3899 * check we really have a shadow for this handle
3900 */
3901 mutex_enter(&bofi_low_mutex);
3902 mutex_enter(&bofi_mutex);
3903 hhashp = HDL_HHASH(handle);
3904 for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext)
3905 if (hp->hdl.dma_handle == handle &&
3906 hp->type == BOFI_DMA_HDL)
3907 break;
3908 mutex_exit(&bofi_mutex);
3909 mutex_exit(&bofi_low_mutex);
3910 if (hp != hhashp) {
3911 /*
3912 * yes - do we need to copy data from original
3913 */
3914 if (bofi_sync_check && flags == DDI_DMA_SYNC_FORDEV)
3915 if (hp->allocaddr)
3916 xbcopy(hp->origaddr+off, hp->addr+off,
3917 len ? len : (hp->len - off));
3918 /*
3919 * yes - check if we need to corrupt the data
3920 */
3921 mutex_enter(&bofi_low_mutex);
3922 mutex_enter(&bofi_mutex);
3923 for (lp = hp->link; lp != NULL; lp = lp->link) {
3924 ep = lp->errentp;
3925 if ((((ep->errdef.access_type & BOFI_DMA_R) &&
3926 (flags == DDI_DMA_SYNC_FORCPU ||
3927 flags == DDI_DMA_SYNC_FORKERNEL)) ||
3928 ((ep->errdef.access_type & BOFI_DMA_W) &&
3929 (flags == DDI_DMA_SYNC_FORDEV))) &&
3930 (ep->state & BOFI_DEV_ACTIVE)) {
3931 do_dma_corrupt(hp, ep, flags, off,
3932 len ? len : (hp->len - off));
3933 }
3934 }
3935 mutex_exit(&bofi_mutex);
3936 mutex_exit(&bofi_low_mutex);
3937 /*
3938 * do we need to copy data to original
3939 */
3940 if (bofi_sync_check && (flags == DDI_DMA_SYNC_FORCPU ||
3941 flags == DDI_DMA_SYNC_FORKERNEL))
3942 if (hp->allocaddr)
3943 xbcopy(hp->addr+off, hp->origaddr+off,
3944 len ? len : (hp->len - off));
3945 }
3946 if (flags == DDI_DMA_SYNC_FORDEV)
3947 /*
3948 * in this case get nexus driver to do sync last
3949 */
3950 retval = save_bus_ops.bus_dma_flush(dip, rdip, handle, off,
3951 len, flags);
3952 return (retval);
3953 }
3954
3955
3956 /*
3957 * our dma_win routine
3958 */
3959 static int
bofi_dma_win(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handle,uint_t win,off_t * offp,size_t * lenp,ddi_dma_cookie_t * cookiep,uint_t * ccountp)3960 bofi_dma_win(dev_info_t *dip, dev_info_t *rdip,
3961 ddi_dma_handle_t handle, uint_t win, off_t *offp,
3962 size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
3963 {
3964 struct bofi_shadow *hp;
3965 struct bofi_shadow *hhashp;
3966 int retval;
3967 ddi_dma_impl_t *mp;
3968
3969 /*
3970 * call nexus to do the real work
3971 */
3972 retval = save_bus_ops.bus_dma_win(dip, rdip, handle, win, offp, lenp,
3973 cookiep, ccountp);
3974 if (retval != DDI_SUCCESS)
3975 return (retval);
3976 /*
3977 * check we really have a shadow for this handle
3978 */
3979 mutex_enter(&bofi_low_mutex);
3980 mutex_enter(&bofi_mutex);
3981 hhashp = HDL_HHASH(handle);
3982 for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext)
3983 if (hp->hdl.dma_handle == handle)
3984 break;
3985 if (hp != hhashp) {
3986 /*
3987 * yes - make sure DMP_NOSYNC is unset
3988 */
3989 mp = (ddi_dma_impl_t *)handle;
3990 mp->dmai_rflags &= ~DMP_NOSYNC;
3991 }
3992 mutex_exit(&bofi_mutex);
3993 mutex_exit(&bofi_low_mutex);
3994 return (retval);
3995 }
3996
3997
3998 /*
3999 * our dma_ctl routine
4000 */
4001 static int
bofi_dma_ctl(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handle,enum ddi_dma_ctlops request,off_t * offp,size_t * lenp,caddr_t * objp,uint_t flags)4002 bofi_dma_ctl(dev_info_t *dip, dev_info_t *rdip,
4003 ddi_dma_handle_t handle, enum ddi_dma_ctlops request,
4004 off_t *offp, size_t *lenp, caddr_t *objp, uint_t flags)
4005 {
4006 struct bofi_link *lp, *next_lp;
4007 struct bofi_errent *ep;
4008 struct bofi_shadow *hp;
4009 struct bofi_shadow *hhashp;
4010 int retval;
4011 int i;
4012 struct bofi_shadow *dummyhp;
4013 ddi_dma_impl_t *mp;
4014
4015 /*
4016 * get nexus to do real work
4017 */
4018 retval = save_bus_ops.bus_dma_ctl(dip, rdip, handle, request, offp,
4019 lenp, objp, flags);
4020 if (retval != DDI_SUCCESS)
4021 return (retval);
4022 /*
4023 * if driver_list is set, only intercept those drivers
4024 */
4025 if (!driver_under_test(rdip))
4026 return (DDI_SUCCESS);
4027
4028 #if defined(__sparc)
4029 /*
4030 * check if this is a dvma_reserve - that one's like a
4031 * dma_allochdl and needs to be handled separately
4032 */
4033 if (request == DDI_DMA_RESERVE) {
4034 bofi_dvma_reserve(rdip, *(ddi_dma_handle_t *)objp);
4035 return (DDI_SUCCESS);
4036 }
4037 #endif
4038 /*
4039 * check we really have a shadow for this handle
4040 */
4041 mutex_enter(&bofi_low_mutex);
4042 mutex_enter(&bofi_mutex);
4043 hhashp = HDL_HHASH(handle);
4044 for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext)
4045 if (hp->hdl.dma_handle == handle)
4046 break;
4047 if (hp == hhashp) {
4048 mutex_exit(&bofi_mutex);
4049 mutex_exit(&bofi_low_mutex);
4050 return (retval);
4051 }
4052 /*
4053 * yes we have - see what kind of command this is
4054 */
4055 switch (request) {
4056 case DDI_DMA_RELEASE:
4057 /*
4058 * dvma release - release dummy handle and all the index handles
4059 */
4060 dummyhp = hp;
4061 dummyhp->hnext->hprev = dummyhp->hprev;
4062 dummyhp->hprev->hnext = dummyhp->hnext;
4063 mutex_exit(&bofi_mutex);
4064 mutex_exit(&bofi_low_mutex);
4065 for (i = 0; i < dummyhp->len; i++) {
4066 hp = dummyhp->hparrayp[i];
4067 /*
4068 * chek none of the index handles were still loaded
4069 */
4070 if (hp->type != BOFI_NULL)
4071 panic("driver releasing loaded dvma");
4072 /*
4073 * remove from dhash and inuse lists
4074 */
4075 mutex_enter(&bofi_low_mutex);
4076 mutex_enter(&bofi_mutex);
4077 hp->dnext->dprev = hp->dprev;
4078 hp->dprev->dnext = hp->dnext;
4079 hp->next->prev = hp->prev;
4080 hp->prev->next = hp->next;
4081 mutex_exit(&bofi_mutex);
4082 mutex_exit(&bofi_low_mutex);
4083
4084 if (bofi_sync_check && hp->allocaddr)
4085 ddi_umem_free(hp->umem_cookie);
4086 kmem_free(hp, sizeof (struct bofi_shadow));
4087 }
4088 kmem_free(dummyhp->hparrayp, dummyhp->len *
4089 sizeof (struct bofi_shadow *));
4090 kmem_free(dummyhp, sizeof (struct bofi_shadow));
4091 return (retval);
4092 case DDI_DMA_FREE:
4093 /*
4094 * ddi_dma_free case - remove from dhash, hhash and inuse lists
4095 */
4096 hp->hnext->hprev = hp->hprev;
4097 hp->hprev->hnext = hp->hnext;
4098 hp->dnext->dprev = hp->dprev;
4099 hp->dprev->dnext = hp->dnext;
4100 hp->next->prev = hp->prev;
4101 hp->prev->next = hp->next;
4102 /*
4103 * free any errdef link structures tagged on to this
4104 * shadow handle
4105 */
4106 for (lp = hp->link; lp != NULL; ) {
4107 next_lp = lp->link;
4108 /*
4109 * there is an implicit sync_for_cpu on free -
4110 * may need to corrupt
4111 */
4112 ep = lp->errentp;
4113 if ((ep->errdef.access_type & BOFI_DMA_R) &&
4114 (hp->flags & DDI_DMA_READ) &&
4115 (ep->state & BOFI_DEV_ACTIVE)) {
4116 do_dma_corrupt(hp, ep, DDI_DMA_SYNC_FORCPU,
4117 0, hp->len);
4118 }
4119 lp->link = bofi_link_freelist;
4120 bofi_link_freelist = lp;
4121 lp = next_lp;
4122 }
4123 hp->link = NULL;
4124 mutex_exit(&bofi_mutex);
4125 mutex_exit(&bofi_low_mutex);
4126
4127 if (bofi_sync_check && (hp->flags & DDI_DMA_READ))
4128 if (hp->allocaddr)
4129 xbcopy(hp->addr, hp->origaddr, hp->len);
4130 ddi_dmareq_mapout(hp->mapaddr, hp->len, hp->map_flags,
4131 hp->map_pp, hp->map_pplist);
4132 if (bofi_sync_check && hp->allocaddr)
4133 ddi_umem_free(hp->umem_cookie);
4134 kmem_free(hp, sizeof (struct bofi_shadow));
4135 return (retval);
4136 case DDI_DMA_MOVWIN:
4137 mp = (ddi_dma_impl_t *)handle;
4138 mp->dmai_rflags &= ~DMP_NOSYNC;
4139 break;
4140 case DDI_DMA_NEXTWIN:
4141 mp = (ddi_dma_impl_t *)handle;
4142 mp->dmai_rflags &= ~DMP_NOSYNC;
4143 break;
4144 default:
4145 break;
4146 }
4147 mutex_exit(&bofi_mutex);
4148 mutex_exit(&bofi_low_mutex);
4149 return (retval);
4150 }
4151
4152 #if defined(__sparc)
4153 /*
4154 * dvma reserve case from bofi_dma_ctl()
4155 */
4156 static void
bofi_dvma_reserve(dev_info_t * rdip,ddi_dma_handle_t handle)4157 bofi_dvma_reserve(dev_info_t *rdip, ddi_dma_handle_t handle)
4158 {
4159 struct bofi_shadow *hp;
4160 struct bofi_shadow *dummyhp;
4161 struct bofi_shadow *dhashp;
4162 struct bofi_shadow *hhashp;
4163 ddi_dma_impl_t *mp;
4164 struct fast_dvma *nexus_private;
4165 int i, count;
4166
4167 mp = (ddi_dma_impl_t *)handle;
4168 count = mp->dmai_ndvmapages;
4169 /*
4170 * allocate dummy shadow handle structure
4171 */
4172 dummyhp = kmem_zalloc(sizeof (*dummyhp), KM_SLEEP);
4173 if (mp->dmai_rflags & DMP_BYPASSNEXUS) {
4174 /*
4175 * overlay our routines over the nexus's dvma routines
4176 */
4177 nexus_private = (struct fast_dvma *)mp->dmai_nexus_private;
4178 dummyhp->save.dvma_ops = *(nexus_private->ops);
4179 nexus_private->ops = &bofi_dvma_ops;
4180 }
4181 /*
4182 * now fill in the dummy handle. This just gets put on hhash queue
4183 * so our dvma routines can find and index off to the handle they
4184 * really want.
4185 */
4186 (void) strncpy(dummyhp->name, ddi_get_name(rdip), NAMESIZE);
4187 dummyhp->instance = ddi_get_instance(rdip);
4188 dummyhp->rnumber = -1;
4189 dummyhp->dip = rdip;
4190 dummyhp->len = count;
4191 dummyhp->hdl.dma_handle = handle;
4192 dummyhp->link = NULL;
4193 dummyhp->type = BOFI_NULL;
4194 /*
4195 * allocate space for real handles
4196 */
4197 dummyhp->hparrayp = kmem_alloc(count *
4198 sizeof (struct bofi_shadow *), KM_SLEEP);
4199 for (i = 0; i < count; i++) {
4200 /*
4201 * allocate shadow handle structures and fill them in
4202 */
4203 hp = kmem_zalloc(sizeof (*hp), KM_SLEEP);
4204 (void) strncpy(hp->name, ddi_get_name(rdip), NAMESIZE);
4205 hp->instance = ddi_get_instance(rdip);
4206 hp->rnumber = -1;
4207 hp->dip = rdip;
4208 hp->hdl.dma_handle = 0;
4209 hp->link = NULL;
4210 hp->type = BOFI_NULL;
4211 if (bofi_sync_check) {
4212 unsigned long pagemask = ddi_ptob(rdip, 1) - 1;
4213 /*
4214 * Take a copy and set this to be hp->addr
4215 * Data will be copied to and from the original on
4216 * explicit and implicit ddi_dma_sync()
4217 *
4218 * - maintain page alignment because some devices
4219 * assume it.
4220 */
4221 hp->allocaddr = ddi_umem_alloc(
4222 ((int)(uintptr_t)hp->addr & pagemask)
4223 + pagemask + 1,
4224 KM_SLEEP, &hp->umem_cookie);
4225 hp->addr = hp->allocaddr +
4226 ((int)(uintptr_t)hp->addr & pagemask);
4227 }
4228 /*
4229 * add to dhash and inuse lists.
4230 * these don't go on hhash queue.
4231 */
4232 mutex_enter(&bofi_low_mutex);
4233 mutex_enter(&bofi_mutex);
4234 hp->next = shadow_list.next;
4235 shadow_list.next->prev = hp;
4236 hp->prev = &shadow_list;
4237 shadow_list.next = hp;
4238 dhashp = HDL_DHASH(hp->dip);
4239 hp->dnext = dhashp->dnext;
4240 dhashp->dnext->dprev = hp;
4241 hp->dprev = dhashp;
4242 dhashp->dnext = hp;
4243 dummyhp->hparrayp[i] = hp;
4244 mutex_exit(&bofi_mutex);
4245 mutex_exit(&bofi_low_mutex);
4246 }
4247 /*
4248 * add dummy handle to hhash list only
4249 */
4250 mutex_enter(&bofi_low_mutex);
4251 mutex_enter(&bofi_mutex);
4252 hhashp = HDL_HHASH(handle);
4253 dummyhp->hnext = hhashp->hnext;
4254 hhashp->hnext->hprev = dummyhp;
4255 dummyhp->hprev = hhashp;
4256 hhashp->hnext = dummyhp;
4257 mutex_exit(&bofi_mutex);
4258 mutex_exit(&bofi_low_mutex);
4259 }
4260
4261 /*
4262 * our dvma_kaddr_load()
4263 */
4264 static void
bofi_dvma_kaddr_load(ddi_dma_handle_t h,caddr_t a,uint_t len,uint_t index,ddi_dma_cookie_t * cp)4265 bofi_dvma_kaddr_load(ddi_dma_handle_t h, caddr_t a, uint_t len, uint_t index,
4266 ddi_dma_cookie_t *cp)
4267 {
4268 struct bofi_shadow *dummyhp;
4269 struct bofi_shadow *hp;
4270 struct bofi_shadow *hhashp;
4271 struct bofi_errent *ep;
4272 struct bofi_link *lp;
4273
4274 /*
4275 * check we really have a dummy shadow for this handle
4276 */
4277 mutex_enter(&bofi_low_mutex);
4278 mutex_enter(&bofi_mutex);
4279 hhashp = HDL_HHASH(h);
4280 for (dummyhp = hhashp->hnext; dummyhp != hhashp;
4281 dummyhp = dummyhp->hnext)
4282 if (dummyhp->hdl.dma_handle == h)
4283 break;
4284 mutex_exit(&bofi_mutex);
4285 mutex_exit(&bofi_low_mutex);
4286 if (dummyhp == hhashp) {
4287 /*
4288 * no dummy shadow - panic
4289 */
4290 panic("driver dvma_kaddr_load with no reserve");
4291 }
4292
4293 /*
4294 * find real hp
4295 */
4296 hp = dummyhp->hparrayp[index];
4297 /*
4298 * check its not already loaded
4299 */
4300 if (hp->type != BOFI_NULL)
4301 panic("driver loading loaded dvma");
4302 /*
4303 * if were doing copying, just need to change origaddr and get
4304 * nexus to map hp->addr again
4305 * if not, set hp->addr to new address.
4306 * - note these are always kernel virtual addresses - no need to map
4307 */
4308 if (bofi_sync_check && hp->allocaddr) {
4309 hp->origaddr = a;
4310 a = hp->addr;
4311 } else
4312 hp->addr = a;
4313 hp->len = len;
4314 /*
4315 * get nexus to do the real work
4316 */
4317 dummyhp->save.dvma_ops.dvma_kaddr_load(h, a, len, index, cp);
4318 /*
4319 * chain on any pre-existing errdefs that apply to this dma_handle
4320 * no need to corrupt - there's no implicit dma_sync on this one
4321 */
4322 mutex_enter(&bofi_low_mutex);
4323 mutex_enter(&bofi_mutex);
4324 hp->type = BOFI_DMA_HDL;
4325 for (ep = errent_listp; ep != NULL; ep = ep->next) {
4326 if (ddi_name_to_major(hp->name) ==
4327 ddi_name_to_major(ep->name) &&
4328 hp->instance == ep->errdef.instance &&
4329 (ep->errdef.rnumber == -1 ||
4330 hp->rnumber == ep->errdef.rnumber) &&
4331 ((ep->errdef.access_type & BOFI_DMA_RW) &&
4332 (((uintptr_t)(hp->addr + ep->errdef.offset +
4333 ep->errdef.len) & ~LLSZMASK) >
4334 ((uintptr_t)((hp->addr + ep->errdef.offset) +
4335 LLSZMASK) & ~LLSZMASK)))) {
4336 lp = bofi_link_freelist;
4337 if (lp != NULL) {
4338 bofi_link_freelist = lp->link;
4339 lp->errentp = ep;
4340 lp->link = hp->link;
4341 hp->link = lp;
4342 }
4343 }
4344 }
4345 mutex_exit(&bofi_mutex);
4346 mutex_exit(&bofi_low_mutex);
4347 }
4348
4349 /*
4350 * our dvma_unload()
4351 */
4352 static void
bofi_dvma_unload(ddi_dma_handle_t h,uint_t index,uint_t view)4353 bofi_dvma_unload(ddi_dma_handle_t h, uint_t index, uint_t view)
4354 {
4355 struct bofi_link *lp, *next_lp;
4356 struct bofi_errent *ep;
4357 struct bofi_shadow *dummyhp;
4358 struct bofi_shadow *hp;
4359 struct bofi_shadow *hhashp;
4360
4361 /*
4362 * check we really have a dummy shadow for this handle
4363 */
4364 mutex_enter(&bofi_low_mutex);
4365 mutex_enter(&bofi_mutex);
4366 hhashp = HDL_HHASH(h);
4367 for (dummyhp = hhashp->hnext; dummyhp != hhashp;
4368 dummyhp = dummyhp->hnext)
4369 if (dummyhp->hdl.dma_handle == h)
4370 break;
4371 mutex_exit(&bofi_mutex);
4372 mutex_exit(&bofi_low_mutex);
4373 if (dummyhp == hhashp) {
4374 /*
4375 * no dummy shadow - panic
4376 */
4377 panic("driver dvma_unload with no reserve");
4378 }
4379 dummyhp->save.dvma_ops.dvma_unload(h, index, view);
4380 /*
4381 * find real hp
4382 */
4383 hp = dummyhp->hparrayp[index];
4384 /*
4385 * check its not already unloaded
4386 */
4387 if (hp->type == BOFI_NULL)
4388 panic("driver unloading unloaded dvma");
4389 /*
4390 * free any errdef link structures tagged on to this
4391 * shadow handle - do corruption if necessary
4392 */
4393 mutex_enter(&bofi_low_mutex);
4394 mutex_enter(&bofi_mutex);
4395 for (lp = hp->link; lp != NULL; ) {
4396 next_lp = lp->link;
4397 ep = lp->errentp;
4398 if ((ep->errdef.access_type & BOFI_DMA_R) &&
4399 (view == DDI_DMA_SYNC_FORCPU ||
4400 view == DDI_DMA_SYNC_FORKERNEL) &&
4401 (ep->state & BOFI_DEV_ACTIVE)) {
4402 do_dma_corrupt(hp, ep, view, 0, hp->len);
4403 }
4404 lp->link = bofi_link_freelist;
4405 bofi_link_freelist = lp;
4406 lp = next_lp;
4407 }
4408 hp->link = NULL;
4409 hp->type = BOFI_NULL;
4410 mutex_exit(&bofi_mutex);
4411 mutex_exit(&bofi_low_mutex);
4412 /*
4413 * if there is an explicit sync_for_cpu, then do copy to original
4414 */
4415 if (bofi_sync_check &&
4416 (view == DDI_DMA_SYNC_FORCPU || view == DDI_DMA_SYNC_FORKERNEL))
4417 if (hp->allocaddr)
4418 xbcopy(hp->addr, hp->origaddr, hp->len);
4419 }
4420
4421 /*
4422 * our dvma_unload()
4423 */
4424 static void
bofi_dvma_sync(ddi_dma_handle_t h,uint_t index,uint_t view)4425 bofi_dvma_sync(ddi_dma_handle_t h, uint_t index, uint_t view)
4426 {
4427 struct bofi_link *lp;
4428 struct bofi_errent *ep;
4429 struct bofi_shadow *hp;
4430 struct bofi_shadow *dummyhp;
4431 struct bofi_shadow *hhashp;
4432
4433 /*
4434 * check we really have a dummy shadow for this handle
4435 */
4436 mutex_enter(&bofi_low_mutex);
4437 mutex_enter(&bofi_mutex);
4438 hhashp = HDL_HHASH(h);
4439 for (dummyhp = hhashp->hnext; dummyhp != hhashp;
4440 dummyhp = dummyhp->hnext)
4441 if (dummyhp->hdl.dma_handle == h)
4442 break;
4443 mutex_exit(&bofi_mutex);
4444 mutex_exit(&bofi_low_mutex);
4445 if (dummyhp == hhashp) {
4446 /*
4447 * no dummy shadow - panic
4448 */
4449 panic("driver dvma_sync with no reserve");
4450 }
4451 /*
4452 * find real hp
4453 */
4454 hp = dummyhp->hparrayp[index];
4455 /*
4456 * check its already loaded
4457 */
4458 if (hp->type == BOFI_NULL)
4459 panic("driver syncing unloaded dvma");
4460 if (view == DDI_DMA_SYNC_FORCPU || view == DDI_DMA_SYNC_FORKERNEL)
4461 /*
4462 * in this case do sync first
4463 */
4464 dummyhp->save.dvma_ops.dvma_sync(h, index, view);
4465 /*
4466 * if there is an explicit sync_for_dev, then do copy from original
4467 */
4468 if (bofi_sync_check && view == DDI_DMA_SYNC_FORDEV) {
4469 if (hp->allocaddr)
4470 xbcopy(hp->origaddr, hp->addr, hp->len);
4471 }
4472 /*
4473 * do corruption if necessary
4474 */
4475 mutex_enter(&bofi_low_mutex);
4476 mutex_enter(&bofi_mutex);
4477 for (lp = hp->link; lp != NULL; lp = lp->link) {
4478 ep = lp->errentp;
4479 if ((((ep->errdef.access_type & BOFI_DMA_R) &&
4480 (view == DDI_DMA_SYNC_FORCPU ||
4481 view == DDI_DMA_SYNC_FORKERNEL)) ||
4482 ((ep->errdef.access_type & BOFI_DMA_W) &&
4483 (view == DDI_DMA_SYNC_FORDEV))) &&
4484 (ep->state & BOFI_DEV_ACTIVE)) {
4485 do_dma_corrupt(hp, ep, view, 0, hp->len);
4486 }
4487 }
4488 mutex_exit(&bofi_mutex);
4489 mutex_exit(&bofi_low_mutex);
4490 /*
4491 * if there is an explicit sync_for_cpu, then do copy to original
4492 */
4493 if (bofi_sync_check &&
4494 (view == DDI_DMA_SYNC_FORCPU || view == DDI_DMA_SYNC_FORKERNEL)) {
4495 if (hp->allocaddr)
4496 xbcopy(hp->addr, hp->origaddr, hp->len);
4497 }
4498 if (view == DDI_DMA_SYNC_FORDEV)
4499 /*
4500 * in this case do sync last
4501 */
4502 dummyhp->save.dvma_ops.dvma_sync(h, index, view);
4503 }
4504 #endif
4505
4506 /*
4507 * bofi intercept routine - gets called instead of users interrupt routine
4508 */
4509 static uint_t
bofi_intercept_intr(caddr_t xp,caddr_t arg2)4510 bofi_intercept_intr(caddr_t xp, caddr_t arg2)
4511 {
4512 struct bofi_errent *ep;
4513 struct bofi_link *lp;
4514 struct bofi_shadow *hp;
4515 int intr_count = 1;
4516 int i;
4517 uint_t retval = DDI_INTR_UNCLAIMED;
4518 uint_t result;
4519 int unclaimed_counter = 0;
4520 int jabber_detected = 0;
4521
4522 hp = (struct bofi_shadow *)xp;
4523 /*
4524 * check if nothing to do
4525 */
4526 if (hp->link == NULL)
4527 return (hp->save.intr.int_handler
4528 (hp->save.intr.int_handler_arg1, arg2));
4529 mutex_enter(&bofi_mutex);
4530 /*
4531 * look for any errdefs
4532 */
4533 for (lp = hp->link; lp != NULL; lp = lp->link) {
4534 ep = lp->errentp;
4535 if (ep->state & BOFI_DEV_ACTIVE) {
4536 /*
4537 * got one
4538 */
4539 if ((ep->errdef.access_count ||
4540 ep->errdef.fail_count) &&
4541 (ep->errdef.access_type & BOFI_LOG))
4542 log_acc_event(ep, BOFI_INTR, 0, 0, 1, 0);
4543 if (ep->errdef.access_count > 1) {
4544 ep->errdef.access_count--;
4545 } else if (ep->errdef.fail_count > 0) {
4546 ep->errdef.fail_count--;
4547 ep->errdef.access_count = 0;
4548 /*
4549 * OK do "corruption"
4550 */
4551 if (ep->errstate.fail_time == 0)
4552 ep->errstate.fail_time = bofi_gettime();
4553 switch (ep->errdef.optype) {
4554 case BOFI_DELAY_INTR:
4555 if (!hp->hilevel) {
4556 drv_usecwait
4557 (ep->errdef.operand);
4558 }
4559 break;
4560 case BOFI_LOSE_INTR:
4561 intr_count = 0;
4562 break;
4563 case BOFI_EXTRA_INTR:
4564 intr_count += ep->errdef.operand;
4565 break;
4566 default:
4567 break;
4568 }
4569 }
4570 }
4571 }
4572 mutex_exit(&bofi_mutex);
4573 /*
4574 * send extra or fewer interrupts as requested
4575 */
4576 for (i = 0; i < intr_count; i++) {
4577 result = hp->save.intr.int_handler
4578 (hp->save.intr.int_handler_arg1, arg2);
4579 if (result == DDI_INTR_CLAIMED)
4580 unclaimed_counter >>= 1;
4581 else if (++unclaimed_counter >= 20)
4582 jabber_detected = 1;
4583 if (i == 0)
4584 retval = result;
4585 }
4586 /*
4587 * if more than 1000 spurious interrupts requested and
4588 * jabber not detected - give warning
4589 */
4590 if (intr_count > 1000 && !jabber_detected)
4591 panic("undetected interrupt jabber: %s%d",
4592 hp->name, hp->instance);
4593 /*
4594 * return first response - or "unclaimed" if none
4595 */
4596 return (retval);
4597 }
4598
4599
4600 /*
4601 * our ddi_check_acc_hdl
4602 */
4603 /* ARGSUSED */
4604 static int
bofi_check_acc_hdl(ddi_acc_impl_t * handle)4605 bofi_check_acc_hdl(ddi_acc_impl_t *handle)
4606 {
4607 struct bofi_shadow *hp;
4608 struct bofi_link *lp;
4609 uint_t result = 0;
4610
4611 hp = handle->ahi_common.ah_bus_private;
4612 if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
4613 return (0);
4614 }
4615 for (lp = hp->link; lp != NULL; lp = lp->link) {
4616 /*
4617 * OR in error state from all associated
4618 * errdef structures
4619 */
4620 if (lp->errentp->errdef.access_count == 0 &&
4621 (lp->errentp->state & BOFI_DEV_ACTIVE)) {
4622 result = (lp->errentp->errdef.acc_chk & 1);
4623 }
4624 }
4625 mutex_exit(&bofi_mutex);
4626 return (result);
4627 }
4628
4629 /*
4630 * our ddi_check_dma_hdl
4631 */
4632 /* ARGSUSED */
4633 static int
bofi_check_dma_hdl(ddi_dma_impl_t * handle)4634 bofi_check_dma_hdl(ddi_dma_impl_t *handle)
4635 {
4636 struct bofi_shadow *hp;
4637 struct bofi_link *lp;
4638 struct bofi_shadow *hhashp;
4639 uint_t result = 0;
4640
4641 if (!mutex_tryenter(&bofi_mutex)) {
4642 return (0);
4643 }
4644 hhashp = HDL_HHASH(handle);
4645 for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext)
4646 if (hp->hdl.dma_handle == (ddi_dma_handle_t)handle)
4647 break;
4648 if (hp == hhashp) {
4649 mutex_exit(&bofi_mutex);
4650 return (0);
4651 }
4652 if (!hp->link) {
4653 mutex_exit(&bofi_mutex);
4654 return (0);
4655 }
4656 for (lp = hp->link; lp != NULL; lp = lp->link) {
4657 /*
4658 * OR in error state from all associated
4659 * errdef structures
4660 */
4661 if (lp->errentp->errdef.access_count == 0 &&
4662 (lp->errentp->state & BOFI_DEV_ACTIVE)) {
4663 result = ((lp->errentp->errdef.acc_chk & 2) ? 1 : 0);
4664 }
4665 }
4666 mutex_exit(&bofi_mutex);
4667 return (result);
4668 }
4669
4670
4671 /* ARGSUSED */
4672 static int
bofi_post_event(dev_info_t * dip,dev_info_t * rdip,ddi_eventcookie_t eventhdl,void * impl_data)4673 bofi_post_event(dev_info_t *dip, dev_info_t *rdip,
4674 ddi_eventcookie_t eventhdl, void *impl_data)
4675 {
4676 ddi_eventcookie_t ec;
4677 struct ddi_fault_event_data *arg;
4678 struct bofi_errent *ep;
4679 struct bofi_shadow *hp;
4680 struct bofi_shadow *dhashp;
4681 struct bofi_link *lp;
4682
4683 ASSERT(eventhdl);
4684 if (ddi_get_eventcookie(dip, DDI_DEVI_FAULT_EVENT, &ec) != DDI_SUCCESS)
4685 return (DDI_FAILURE);
4686
4687 if (ec != eventhdl)
4688 return (save_bus_ops.bus_post_event(dip, rdip, eventhdl,
4689 impl_data));
4690
4691 arg = (struct ddi_fault_event_data *)impl_data;
4692 mutex_enter(&bofi_mutex);
4693 /*
4694 * find shadow handles with appropriate dev_infos
4695 * and set error reported on all associated errdef structures
4696 */
4697 dhashp = HDL_DHASH(arg->f_dip);
4698 for (hp = dhashp->dnext; hp != dhashp; hp = hp->dnext) {
4699 if (hp->dip == arg->f_dip) {
4700 for (lp = hp->link; lp != NULL; lp = lp->link) {
4701 ep = lp->errentp;
4702 ep->errstate.errmsg_count++;
4703 if ((ep->errstate.msg_time == NULL ||
4704 ep->errstate.severity > arg->f_impact) &&
4705 (ep->state & BOFI_DEV_ACTIVE)) {
4706 ep->errstate.msg_time = bofi_gettime();
4707 ep->errstate.severity = arg->f_impact;
4708 (void) strncpy(ep->errstate.buffer,
4709 arg->f_message, ERRMSGSIZE);
4710 ddi_trigger_softintr(ep->softintr_id);
4711 }
4712 }
4713 }
4714 }
4715 mutex_exit(&bofi_mutex);
4716 return (save_bus_ops.bus_post_event(dip, rdip, eventhdl, impl_data));
4717 }
4718
4719 /*ARGSUSED*/
4720 static int
bofi_fm_ereport_callback(sysevent_t * ev,void * cookie)4721 bofi_fm_ereport_callback(sysevent_t *ev, void *cookie)
4722 {
4723 char *class = "";
4724 char *path = "";
4725 char *ptr;
4726 nvlist_t *nvlist;
4727 nvlist_t *detector;
4728 ddi_fault_impact_t impact;
4729 struct bofi_errent *ep;
4730 struct bofi_shadow *hp;
4731 struct bofi_link *lp;
4732 char service_class[FM_MAX_CLASS];
4733 char hppath[MAXPATHLEN];
4734 int service_ereport = 0;
4735
4736 (void) sysevent_get_attr_list(ev, &nvlist);
4737 (void) nvlist_lookup_string(nvlist, FM_CLASS, &class);
4738 if (nvlist_lookup_nvlist(nvlist, FM_EREPORT_DETECTOR, &detector) == 0)
4739 (void) nvlist_lookup_string(detector, FM_FMRI_DEV_PATH, &path);
4740
4741 (void) snprintf(service_class, FM_MAX_CLASS, "%s.%s.%s.",
4742 FM_EREPORT_CLASS, DDI_IO_CLASS, DDI_FM_SERVICE_IMPACT);
4743 if (strncmp(class, service_class, strlen(service_class) - 1) == 0)
4744 service_ereport = 1;
4745
4746 mutex_enter(&bofi_mutex);
4747 /*
4748 * find shadow handles with appropriate dev_infos
4749 * and set error reported on all associated errdef structures
4750 */
4751 for (hp = shadow_list.next; hp != &shadow_list; hp = hp->next) {
4752 (void) ddi_pathname(hp->dip, hppath);
4753 if (strcmp(path, hppath) != 0)
4754 continue;
4755 for (lp = hp->link; lp != NULL; lp = lp->link) {
4756 ep = lp->errentp;
4757 ep->errstate.errmsg_count++;
4758 if (!(ep->state & BOFI_DEV_ACTIVE))
4759 continue;
4760 if (ep->errstate.msg_time != NULL)
4761 continue;
4762 if (service_ereport) {
4763 ptr = class + strlen(service_class);
4764 if (strcmp(ptr, DDI_FM_SERVICE_LOST) == 0)
4765 impact = DDI_SERVICE_LOST;
4766 else if (strcmp(ptr,
4767 DDI_FM_SERVICE_DEGRADED) == 0)
4768 impact = DDI_SERVICE_DEGRADED;
4769 else if (strcmp(ptr,
4770 DDI_FM_SERVICE_RESTORED) == 0)
4771 impact = DDI_SERVICE_RESTORED;
4772 else
4773 impact = DDI_SERVICE_UNAFFECTED;
4774 if (ep->errstate.severity > impact)
4775 ep->errstate.severity = impact;
4776 } else if (ep->errstate.buffer[0] == '\0') {
4777 (void) strncpy(ep->errstate.buffer, class,
4778 ERRMSGSIZE);
4779 }
4780 if (ep->errstate.buffer[0] != '\0' &&
4781 ep->errstate.severity < DDI_SERVICE_RESTORED) {
4782 ep->errstate.msg_time = bofi_gettime();
4783 ddi_trigger_softintr(ep->softintr_id);
4784 }
4785 }
4786 }
4787 nvlist_free(nvlist);
4788 mutex_exit(&bofi_mutex);
4789 return (0);
4790 }
4791
4792 /*
4793 * our intr_ops routine
4794 */
4795 static int
bofi_intr_ops(dev_info_t * dip,dev_info_t * rdip,ddi_intr_op_t intr_op,ddi_intr_handle_impl_t * hdlp,void * result)4796 bofi_intr_ops(dev_info_t *dip, dev_info_t *rdip, ddi_intr_op_t intr_op,
4797 ddi_intr_handle_impl_t *hdlp, void *result)
4798 {
4799 int retval;
4800 struct bofi_shadow *hp;
4801 struct bofi_shadow *dhashp;
4802 struct bofi_shadow *hhashp;
4803 struct bofi_errent *ep;
4804 struct bofi_link *lp, *next_lp;
4805
4806 switch (intr_op) {
4807 case DDI_INTROP_ADDISR:
4808 /*
4809 * if driver_list is set, only intercept those drivers
4810 */
4811 if (!driver_under_test(rdip))
4812 return (save_bus_ops.bus_intr_op(dip, rdip,
4813 intr_op, hdlp, result));
4814 /*
4815 * allocate shadow handle structure and fill in
4816 */
4817 hp = kmem_zalloc(sizeof (struct bofi_shadow), KM_SLEEP);
4818 (void) strncpy(hp->name, ddi_get_name(rdip), NAMESIZE);
4819 hp->instance = ddi_get_instance(rdip);
4820 hp->save.intr.int_handler = hdlp->ih_cb_func;
4821 hp->save.intr.int_handler_arg1 = hdlp->ih_cb_arg1;
4822 hdlp->ih_cb_func = (ddi_intr_handler_t *)bofi_intercept_intr;
4823 hdlp->ih_cb_arg1 = (caddr_t)hp;
4824 hp->bofi_inum = hdlp->ih_inum;
4825 hp->dip = rdip;
4826 hp->link = NULL;
4827 hp->type = BOFI_INT_HDL;
4828 /*
4829 * save whether hilevel or not
4830 */
4831
4832 if (hdlp->ih_pri >= ddi_intr_get_hilevel_pri())
4833 hp->hilevel = 1;
4834 else
4835 hp->hilevel = 0;
4836
4837 /*
4838 * call nexus to do real work, but specifying our handler, and
4839 * our shadow handle as argument
4840 */
4841 retval = save_bus_ops.bus_intr_op(dip, rdip,
4842 intr_op, hdlp, result);
4843 if (retval != DDI_SUCCESS) {
4844 kmem_free(hp, sizeof (struct bofi_shadow));
4845 return (retval);
4846 }
4847 /*
4848 * add to dhash, hhash and inuse lists
4849 */
4850 mutex_enter(&bofi_low_mutex);
4851 mutex_enter(&bofi_mutex);
4852 hp->next = shadow_list.next;
4853 shadow_list.next->prev = hp;
4854 hp->prev = &shadow_list;
4855 shadow_list.next = hp;
4856 hhashp = HDL_HHASH(hdlp->ih_inum);
4857 hp->hnext = hhashp->hnext;
4858 hhashp->hnext->hprev = hp;
4859 hp->hprev = hhashp;
4860 hhashp->hnext = hp;
4861 dhashp = HDL_DHASH(hp->dip);
4862 hp->dnext = dhashp->dnext;
4863 dhashp->dnext->dprev = hp;
4864 hp->dprev = dhashp;
4865 dhashp->dnext = hp;
4866 /*
4867 * chain on any pre-existing errdefs that apply to this
4868 * acc_handle
4869 */
4870 for (ep = errent_listp; ep != NULL; ep = ep->next) {
4871 if (ddi_name_to_major(hp->name) ==
4872 ddi_name_to_major(ep->name) &&
4873 hp->instance == ep->errdef.instance &&
4874 (ep->errdef.access_type & BOFI_INTR)) {
4875 lp = bofi_link_freelist;
4876 if (lp != NULL) {
4877 bofi_link_freelist = lp->link;
4878 lp->errentp = ep;
4879 lp->link = hp->link;
4880 hp->link = lp;
4881 }
4882 }
4883 }
4884 mutex_exit(&bofi_mutex);
4885 mutex_exit(&bofi_low_mutex);
4886 return (retval);
4887 case DDI_INTROP_REMISR:
4888 /*
4889 * call nexus routine first
4890 */
4891 retval = save_bus_ops.bus_intr_op(dip, rdip,
4892 intr_op, hdlp, result);
4893 /*
4894 * find shadow handle
4895 */
4896 mutex_enter(&bofi_low_mutex);
4897 mutex_enter(&bofi_mutex);
4898 hhashp = HDL_HHASH(hdlp->ih_inum);
4899 for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext) {
4900 if (hp->dip == rdip &&
4901 hp->type == BOFI_INT_HDL &&
4902 hp->bofi_inum == hdlp->ih_inum) {
4903 break;
4904 }
4905 }
4906 if (hp == hhashp) {
4907 mutex_exit(&bofi_mutex);
4908 mutex_exit(&bofi_low_mutex);
4909 return (retval);
4910 }
4911 /*
4912 * found one - remove from dhash, hhash and inuse lists
4913 */
4914 hp->hnext->hprev = hp->hprev;
4915 hp->hprev->hnext = hp->hnext;
4916 hp->dnext->dprev = hp->dprev;
4917 hp->dprev->dnext = hp->dnext;
4918 hp->next->prev = hp->prev;
4919 hp->prev->next = hp->next;
4920 /*
4921 * free any errdef link structures
4922 * tagged on to this shadow handle
4923 */
4924 for (lp = hp->link; lp != NULL; ) {
4925 next_lp = lp->link;
4926 lp->link = bofi_link_freelist;
4927 bofi_link_freelist = lp;
4928 lp = next_lp;
4929 }
4930 hp->link = NULL;
4931 mutex_exit(&bofi_mutex);
4932 mutex_exit(&bofi_low_mutex);
4933 kmem_free(hp, sizeof (struct bofi_shadow));
4934 return (retval);
4935 default:
4936 return (save_bus_ops.bus_intr_op(dip, rdip,
4937 intr_op, hdlp, result));
4938 }
4939 }
4940