1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 1994, 2010, Oracle and/or its affiliates. All rights reserved.
23 */
24
25 /*
26 * Layered driver support.
27 */
28
29 #include <sys/atomic.h>
30 #include <sys/types.h>
31 #include <sys/t_lock.h>
32 #include <sys/param.h>
33 #include <sys/conf.h>
34 #include <sys/systm.h>
35 #include <sys/sysmacros.h>
36 #include <sys/buf.h>
37 #include <sys/cred.h>
38 #include <sys/uio.h>
39 #include <sys/vnode.h>
40 #include <sys/fs/snode.h>
41 #include <sys/open.h>
42 #include <sys/kmem.h>
43 #include <sys/file.h>
44 #include <sys/bootconf.h>
45 #include <sys/pathname.h>
46 #include <sys/bitmap.h>
47 #include <sys/stat.h>
48 #include <sys/dditypes.h>
49 #include <sys/ddi_impldefs.h>
50 #include <sys/ddi.h>
51 #include <sys/sunddi.h>
52 #include <sys/sunndi.h>
53 #include <sys/esunddi.h>
54 #include <sys/autoconf.h>
55 #include <sys/sunldi.h>
56 #include <sys/sunldi_impl.h>
57 #include <sys/errno.h>
58 #include <sys/debug.h>
59 #include <sys/modctl.h>
60 #include <sys/var.h>
61 #include <vm/seg_vn.h>
62
63 #include <sys/stropts.h>
64 #include <sys/strsubr.h>
65 #include <sys/socket.h>
66 #include <sys/socketvar.h>
67 #include <sys/kstr.h>
68
69 /*
70 * Device contract related
71 */
72 #include <sys/contract_impl.h>
73 #include <sys/contract/device_impl.h>
74
75 /*
76 * Define macros to manipulate snode, vnode, and open device flags
77 */
78 #define VTYP_VALID(i) (((i) == VCHR) || ((i) == VBLK))
79 #define VTYP_TO_OTYP(i) (((i) == VCHR) ? OTYP_CHR : OTYP_BLK)
80 #define VTYP_TO_STYP(i) (((i) == VCHR) ? S_IFCHR : S_IFBLK)
81
82 #define OTYP_VALID(i) (((i) == OTYP_CHR) || ((i) == OTYP_BLK))
83 #define OTYP_TO_VTYP(i) (((i) == OTYP_CHR) ? VCHR : VBLK)
84 #define OTYP_TO_STYP(i) (((i) == OTYP_CHR) ? S_IFCHR : S_IFBLK)
85
86 #define STYP_VALID(i) (((i) == S_IFCHR) || ((i) == S_IFBLK))
87 #define STYP_TO_VTYP(i) (((i) == S_IFCHR) ? VCHR : VBLK)
88
89 /*
90 * Define macros for accessing layered driver hash structures
91 */
92 #define LH_HASH(vp) (handle_hash_func(vp) % LH_HASH_SZ)
93 #define LI_HASH(mid, dip, dev) (ident_hash_func(mid, dip, dev) % LI_HASH_SZ)
94
95 /*
96 * Define layered handle flags used in the lh_type field
97 */
98 #define LH_STREAM (0x1) /* handle to a streams device */
99 #define LH_CBDEV (0x2) /* handle to a char/block device */
100
101 /*
102 * Define macro for devid property lookups
103 */
104 #define DEVID_PROP_FLAGS (DDI_PROP_DONTPASS | \
105 DDI_PROP_TYPE_STRING|DDI_PROP_CANSLEEP)
106
107 /*
108 * Dummy string for NDI events
109 */
110 #define NDI_EVENT_SERVICE "NDI_EVENT_SERVICE"
111
112 static void ldi_ev_lock(void);
113 static void ldi_ev_unlock(void);
114
115 #ifdef LDI_OBSOLETE_EVENT
116 int ldi_remove_event_handler(ldi_handle_t lh, ldi_callback_id_t id);
117 #endif
118
119
120 /*
121 * globals
122 */
123 static kmutex_t ldi_ident_hash_lock[LI_HASH_SZ];
124 static struct ldi_ident *ldi_ident_hash[LI_HASH_SZ];
125
126 static kmutex_t ldi_handle_hash_lock[LH_HASH_SZ];
127 static struct ldi_handle *ldi_handle_hash[LH_HASH_SZ];
128 static size_t ldi_handle_hash_count;
129
130 static struct ldi_ev_callback_list ldi_ev_callback_list;
131
132 static uint32_t ldi_ev_id_pool = 0;
133
134 struct ldi_ev_cookie {
135 char *ck_evname;
136 uint_t ck_sync;
137 uint_t ck_ctype;
138 };
139
140 static struct ldi_ev_cookie ldi_ev_cookies[] = {
141 { LDI_EV_OFFLINE, 1, CT_DEV_EV_OFFLINE},
142 { LDI_EV_DEGRADE, 0, CT_DEV_EV_DEGRADED},
143 { LDI_EV_DEVICE_REMOVE, 0, 0},
144 { NULL} /* must terminate list */
145 };
146
147 void
ldi_init(void)148 ldi_init(void)
149 {
150 int i;
151
152 ldi_handle_hash_count = 0;
153 for (i = 0; i < LH_HASH_SZ; i++) {
154 mutex_init(&ldi_handle_hash_lock[i], NULL, MUTEX_DEFAULT, NULL);
155 ldi_handle_hash[i] = NULL;
156 }
157 for (i = 0; i < LI_HASH_SZ; i++) {
158 mutex_init(&ldi_ident_hash_lock[i], NULL, MUTEX_DEFAULT, NULL);
159 ldi_ident_hash[i] = NULL;
160 }
161
162 /*
163 * Initialize the LDI event subsystem
164 */
165 mutex_init(&ldi_ev_callback_list.le_lock, NULL, MUTEX_DEFAULT, NULL);
166 cv_init(&ldi_ev_callback_list.le_cv, NULL, CV_DEFAULT, NULL);
167 ldi_ev_callback_list.le_busy = 0;
168 ldi_ev_callback_list.le_thread = NULL;
169 list_create(&ldi_ev_callback_list.le_head,
170 sizeof (ldi_ev_callback_impl_t),
171 offsetof(ldi_ev_callback_impl_t, lec_list));
172 }
173
174 /*
175 * LDI ident manipulation functions
176 */
177 static uint_t
ident_hash_func(modid_t modid,dev_info_t * dip,dev_t dev)178 ident_hash_func(modid_t modid, dev_info_t *dip, dev_t dev)
179 {
180 if (dip != NULL) {
181 uintptr_t k = (uintptr_t)dip;
182 k >>= (int)highbit(sizeof (struct dev_info));
183 return ((uint_t)k);
184 } else if (dev != DDI_DEV_T_NONE) {
185 return (modid + getminor(dev) + getmajor(dev));
186 } else {
187 return (modid);
188 }
189 }
190
191 static struct ldi_ident **
ident_find_ref_nolock(modid_t modid,dev_info_t * dip,dev_t dev,major_t major)192 ident_find_ref_nolock(modid_t modid, dev_info_t *dip, dev_t dev, major_t major)
193 {
194 struct ldi_ident **lipp = NULL;
195 uint_t index = LI_HASH(modid, dip, dev);
196
197 ASSERT(MUTEX_HELD(&ldi_ident_hash_lock[index]));
198
199 for (lipp = &(ldi_ident_hash[index]);
200 (*lipp != NULL);
201 lipp = &((*lipp)->li_next)) {
202 if (((*lipp)->li_modid == modid) &&
203 ((*lipp)->li_major == major) &&
204 ((*lipp)->li_dip == dip) &&
205 ((*lipp)->li_dev == dev))
206 break;
207 }
208
209 ASSERT(lipp != NULL);
210 return (lipp);
211 }
212
213 static struct ldi_ident *
ident_alloc(char * mod_name,dev_info_t * dip,dev_t dev,major_t major)214 ident_alloc(char *mod_name, dev_info_t *dip, dev_t dev, major_t major)
215 {
216 struct ldi_ident *lip, **lipp, *retlip;
217 modid_t modid;
218 uint_t index;
219
220 ASSERT(mod_name != NULL);
221
222 /* get the module id */
223 modid = mod_name_to_modid(mod_name);
224 ASSERT(modid != -1);
225
226 /* allocate a new ident in case we need it */
227 lip = kmem_zalloc(sizeof (*lip), KM_SLEEP);
228
229 /* search the hash for a matching ident */
230 index = LI_HASH(modid, dip, dev);
231 mutex_enter(&ldi_ident_hash_lock[index]);
232 lipp = ident_find_ref_nolock(modid, dip, dev, major);
233
234 if (*lipp != NULL) {
235 /* we found an ident in the hash */
236 ASSERT(strcmp((*lipp)->li_modname, mod_name) == 0);
237 (*lipp)->li_ref++;
238 retlip = *lipp;
239 mutex_exit(&ldi_ident_hash_lock[index]);
240 kmem_free(lip, sizeof (struct ldi_ident));
241 return (retlip);
242 }
243
244 /* initialize the new ident */
245 lip->li_next = NULL;
246 lip->li_ref = 1;
247 lip->li_modid = modid;
248 lip->li_major = major;
249 lip->li_dip = dip;
250 lip->li_dev = dev;
251 (void) strncpy(lip->li_modname, mod_name, sizeof (lip->li_modname) - 1);
252
253 /* add it to the ident hash */
254 lip->li_next = ldi_ident_hash[index];
255 ldi_ident_hash[index] = lip;
256
257 mutex_exit(&ldi_ident_hash_lock[index]);
258 return (lip);
259 }
260
261 static void
ident_hold(struct ldi_ident * lip)262 ident_hold(struct ldi_ident *lip)
263 {
264 uint_t index;
265
266 ASSERT(lip != NULL);
267 index = LI_HASH(lip->li_modid, lip->li_dip, lip->li_dev);
268 mutex_enter(&ldi_ident_hash_lock[index]);
269 ASSERT(lip->li_ref > 0);
270 lip->li_ref++;
271 mutex_exit(&ldi_ident_hash_lock[index]);
272 }
273
274 static void
ident_release(struct ldi_ident * lip)275 ident_release(struct ldi_ident *lip)
276 {
277 struct ldi_ident **lipp;
278 uint_t index;
279
280 ASSERT(lip != NULL);
281 index = LI_HASH(lip->li_modid, lip->li_dip, lip->li_dev);
282 mutex_enter(&ldi_ident_hash_lock[index]);
283
284 ASSERT(lip->li_ref > 0);
285 if (--lip->li_ref > 0) {
286 /* there are more references to this ident */
287 mutex_exit(&ldi_ident_hash_lock[index]);
288 return;
289 }
290
291 /* this was the last reference/open for this ident. free it. */
292 lipp = ident_find_ref_nolock(
293 lip->li_modid, lip->li_dip, lip->li_dev, lip->li_major);
294
295 ASSERT((lipp != NULL) && (*lipp != NULL));
296 *lipp = lip->li_next;
297 mutex_exit(&ldi_ident_hash_lock[index]);
298 kmem_free(lip, sizeof (struct ldi_ident));
299 }
300
301 /*
302 * LDI handle manipulation functions
303 */
304 static uint_t
handle_hash_func(void * vp)305 handle_hash_func(void *vp)
306 {
307 uintptr_t k = (uintptr_t)vp;
308 k >>= (int)highbit(sizeof (vnode_t));
309 return ((uint_t)k);
310 }
311
312 static struct ldi_handle **
handle_find_ref_nolock(vnode_t * vp,struct ldi_ident * ident)313 handle_find_ref_nolock(vnode_t *vp, struct ldi_ident *ident)
314 {
315 struct ldi_handle **lhpp = NULL;
316 uint_t index = LH_HASH(vp);
317
318 ASSERT(MUTEX_HELD(&ldi_handle_hash_lock[index]));
319
320 for (lhpp = &(ldi_handle_hash[index]);
321 (*lhpp != NULL);
322 lhpp = &((*lhpp)->lh_next)) {
323 if (((*lhpp)->lh_ident == ident) &&
324 ((*lhpp)->lh_vp == vp))
325 break;
326 }
327
328 ASSERT(lhpp != NULL);
329 return (lhpp);
330 }
331
332 static struct ldi_handle *
handle_find(vnode_t * vp,struct ldi_ident * ident)333 handle_find(vnode_t *vp, struct ldi_ident *ident)
334 {
335 struct ldi_handle **lhpp, *retlhp;
336 int index = LH_HASH(vp);
337
338 mutex_enter(&ldi_handle_hash_lock[index]);
339 lhpp = handle_find_ref_nolock(vp, ident);
340 retlhp = *lhpp;
341 mutex_exit(&ldi_handle_hash_lock[index]);
342 return (retlhp);
343 }
344
345 static struct ldi_handle *
handle_alloc(vnode_t * vp,struct ldi_ident * ident)346 handle_alloc(vnode_t *vp, struct ldi_ident *ident)
347 {
348 struct ldi_handle *lhp, **lhpp, *retlhp;
349 uint_t index;
350
351 ASSERT((vp != NULL) && (ident != NULL));
352
353 /* allocate a new handle in case we need it */
354 lhp = kmem_zalloc(sizeof (*lhp), KM_SLEEP);
355
356 /* search the hash for a matching handle */
357 index = LH_HASH(vp);
358 mutex_enter(&ldi_handle_hash_lock[index]);
359 lhpp = handle_find_ref_nolock(vp, ident);
360
361 if (*lhpp != NULL) {
362 /* we found a handle in the hash */
363 (*lhpp)->lh_ref++;
364 retlhp = *lhpp;
365 mutex_exit(&ldi_handle_hash_lock[index]);
366
367 LDI_ALLOCFREE((CE_WARN, "ldi handle alloc: dup "
368 "lh=0x%p, ident=0x%p, vp=0x%p, drv=%s, minor=0x%x",
369 (void *)retlhp, (void *)ident, (void *)vp,
370 mod_major_to_name(getmajor(vp->v_rdev)),
371 getminor(vp->v_rdev)));
372
373 kmem_free(lhp, sizeof (struct ldi_handle));
374 return (retlhp);
375 }
376
377 /* initialize the new handle */
378 lhp->lh_ref = 1;
379 lhp->lh_vp = vp;
380 lhp->lh_ident = ident;
381 #ifdef LDI_OBSOLETE_EVENT
382 mutex_init(lhp->lh_lock, NULL, MUTEX_DEFAULT, NULL);
383 #endif
384
385 /* set the device type for this handle */
386 lhp->lh_type = 0;
387 if (vp->v_stream) {
388 ASSERT(vp->v_type == VCHR);
389 lhp->lh_type |= LH_STREAM;
390 } else {
391 lhp->lh_type |= LH_CBDEV;
392 }
393
394 /* get holds on other objects */
395 ident_hold(ident);
396 ASSERT(vp->v_count >= 1);
397 VN_HOLD(vp);
398
399 /* add it to the handle hash */
400 lhp->lh_next = ldi_handle_hash[index];
401 ldi_handle_hash[index] = lhp;
402 atomic_add_long(&ldi_handle_hash_count, 1);
403
404 LDI_ALLOCFREE((CE_WARN, "ldi handle alloc: new "
405 "lh=0x%p, ident=0x%p, vp=0x%p, drv=%s, minor=0x%x",
406 (void *)lhp, (void *)ident, (void *)vp,
407 mod_major_to_name(getmajor(vp->v_rdev)),
408 getminor(vp->v_rdev)));
409
410 mutex_exit(&ldi_handle_hash_lock[index]);
411 return (lhp);
412 }
413
414 static void
handle_release(struct ldi_handle * lhp)415 handle_release(struct ldi_handle *lhp)
416 {
417 struct ldi_handle **lhpp;
418 uint_t index;
419
420 ASSERT(lhp != NULL);
421
422 index = LH_HASH(lhp->lh_vp);
423 mutex_enter(&ldi_handle_hash_lock[index]);
424
425 LDI_ALLOCFREE((CE_WARN, "ldi handle release: "
426 "lh=0x%p, ident=0x%p, vp=0x%p, drv=%s, minor=0x%x",
427 (void *)lhp, (void *)lhp->lh_ident, (void *)lhp->lh_vp,
428 mod_major_to_name(getmajor(lhp->lh_vp->v_rdev)),
429 getminor(lhp->lh_vp->v_rdev)));
430
431 ASSERT(lhp->lh_ref > 0);
432 if (--lhp->lh_ref > 0) {
433 /* there are more references to this handle */
434 mutex_exit(&ldi_handle_hash_lock[index]);
435 return;
436 }
437
438 /* this was the last reference/open for this handle. free it. */
439 lhpp = handle_find_ref_nolock(lhp->lh_vp, lhp->lh_ident);
440 ASSERT((lhpp != NULL) && (*lhpp != NULL));
441 *lhpp = lhp->lh_next;
442 atomic_add_long(&ldi_handle_hash_count, -1);
443 mutex_exit(&ldi_handle_hash_lock[index]);
444
445 VN_RELE(lhp->lh_vp);
446 ident_release(lhp->lh_ident);
447 #ifdef LDI_OBSOLETE_EVENT
448 mutex_destroy(lhp->lh_lock);
449 #endif
450 kmem_free(lhp, sizeof (struct ldi_handle));
451 }
452
453 #ifdef LDI_OBSOLETE_EVENT
454 /*
455 * LDI event manipulation functions
456 */
457 static void
handle_event_add(ldi_event_t * lep)458 handle_event_add(ldi_event_t *lep)
459 {
460 struct ldi_handle *lhp = lep->le_lhp;
461
462 ASSERT(lhp != NULL);
463
464 mutex_enter(lhp->lh_lock);
465 if (lhp->lh_events == NULL) {
466 lhp->lh_events = lep;
467 mutex_exit(lhp->lh_lock);
468 return;
469 }
470
471 lep->le_next = lhp->lh_events;
472 lhp->lh_events->le_prev = lep;
473 lhp->lh_events = lep;
474 mutex_exit(lhp->lh_lock);
475 }
476
477 static void
handle_event_remove(ldi_event_t * lep)478 handle_event_remove(ldi_event_t *lep)
479 {
480 struct ldi_handle *lhp = lep->le_lhp;
481
482 ASSERT(lhp != NULL);
483
484 mutex_enter(lhp->lh_lock);
485 if (lep->le_prev)
486 lep->le_prev->le_next = lep->le_next;
487 if (lep->le_next)
488 lep->le_next->le_prev = lep->le_prev;
489 if (lhp->lh_events == lep)
490 lhp->lh_events = lep->le_next;
491 mutex_exit(lhp->lh_lock);
492
493 }
494
495 static void
i_ldi_callback(dev_info_t * dip,ddi_eventcookie_t event_cookie,void * arg,void * bus_impldata)496 i_ldi_callback(dev_info_t *dip, ddi_eventcookie_t event_cookie,
497 void *arg, void *bus_impldata)
498 {
499 ldi_event_t *lep = (ldi_event_t *)arg;
500
501 ASSERT(lep != NULL);
502
503 LDI_EVENTCB((CE_NOTE, "%s: dip=0x%p, "
504 "event_cookie=0x%p, ldi_eventp=0x%p", "i_ldi_callback",
505 (void *)dip, (void *)event_cookie, (void *)lep));
506
507 lep->le_handler(lep->le_lhp, event_cookie, lep->le_arg, bus_impldata);
508 }
509 #endif
510
511 /*
512 * LDI open helper functions
513 */
514
515 /* get a vnode to a device by dev_t and otyp */
516 static int
ldi_vp_from_dev(dev_t dev,int otyp,vnode_t ** vpp)517 ldi_vp_from_dev(dev_t dev, int otyp, vnode_t **vpp)
518 {
519 dev_info_t *dip;
520 vnode_t *vp;
521
522 /* sanity check required input parameters */
523 if ((dev == DDI_DEV_T_NONE) || (!OTYP_VALID(otyp)) || (vpp == NULL))
524 return (EINVAL);
525
526 if ((dip = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
527 return (ENODEV);
528
529 vp = makespecvp(dev, OTYP_TO_VTYP(otyp));
530 spec_assoc_vp_with_devi(vp, dip);
531 ddi_release_devi(dip); /* from e_ddi_hold_devi_by_dev */
532
533 *vpp = vp;
534 return (0);
535 }
536
537 /* get a vnode to a device by pathname */
538 int
ldi_vp_from_name(char * path,vnode_t ** vpp)539 ldi_vp_from_name(char *path, vnode_t **vpp)
540 {
541 vnode_t *vp = NULL;
542 int ret;
543
544 /* sanity check required input parameters */
545 if ((path == NULL) || (vpp == NULL))
546 return (EINVAL);
547
548 if (modrootloaded) {
549 cred_t *saved_cred = curthread->t_cred;
550
551 /* we don't want lookupname to fail because of credentials */
552 curthread->t_cred = kcred;
553
554 /*
555 * all lookups should be done in the global zone. but
556 * lookupnameat() won't actually do this if an absolute
557 * path is passed in. since the ldi interfaces require an
558 * absolute path we pass lookupnameat() a pointer to
559 * the character after the leading '/' and tell it to
560 * start searching at the current system root directory.
561 */
562 ASSERT(*path == '/');
563 ret = lookupnameat(path + 1, UIO_SYSSPACE, FOLLOW, NULLVPP,
564 &vp, rootdir);
565
566 /* restore this threads credentials */
567 curthread->t_cred = saved_cred;
568
569 if (ret == 0) {
570 if (!vn_matchops(vp, spec_getvnodeops()) ||
571 !VTYP_VALID(vp->v_type)) {
572 VN_RELE(vp);
573 return (ENXIO);
574 }
575 }
576 }
577
578 if (vp == NULL) {
579 dev_info_t *dip;
580 dev_t dev;
581 int spec_type;
582
583 /*
584 * Root is not mounted, the minor node is not specified,
585 * or an OBP path has been specified.
586 */
587
588 /*
589 * Determine if path can be pruned to produce an
590 * OBP or devfs path for resolve_pathname.
591 */
592 if (strncmp(path, "/devices/", 9) == 0)
593 path += strlen("/devices");
594
595 /*
596 * if no minor node was specified the DEFAULT minor node
597 * will be returned. if there is no DEFAULT minor node
598 * one will be fabricated of type S_IFCHR with the minor
599 * number equal to the instance number.
600 */
601 ret = resolve_pathname(path, &dip, &dev, &spec_type);
602 if (ret != 0)
603 return (ENODEV);
604
605 ASSERT(STYP_VALID(spec_type));
606 vp = makespecvp(dev, STYP_TO_VTYP(spec_type));
607 spec_assoc_vp_with_devi(vp, dip);
608 ddi_release_devi(dip);
609 }
610
611 *vpp = vp;
612 return (0);
613 }
614
615 static int
ldi_devid_match(ddi_devid_t devid,dev_info_t * dip,dev_t dev)616 ldi_devid_match(ddi_devid_t devid, dev_info_t *dip, dev_t dev)
617 {
618 char *devidstr;
619 ddi_prop_t *propp;
620
621 /* convert devid as a string property */
622 if ((devidstr = ddi_devid_str_encode(devid, NULL)) == NULL)
623 return (0);
624
625 /*
626 * Search for the devid. For speed and ease in locking this
627 * code directly uses the property implementation. See
628 * ddi_common_devid_to_devlist() for a comment as to why.
629 */
630 mutex_enter(&(DEVI(dip)->devi_lock));
631
632 /* check if there is a DDI_DEV_T_NONE devid property */
633 propp = i_ddi_prop_search(DDI_DEV_T_NONE,
634 DEVID_PROP_NAME, DEVID_PROP_FLAGS, &DEVI(dip)->devi_hw_prop_ptr);
635 if (propp != NULL) {
636 if (ddi_devid_str_compare(propp->prop_val, devidstr) == 0) {
637 /* a DDI_DEV_T_NONE devid exists and matchs */
638 mutex_exit(&(DEVI(dip)->devi_lock));
639 ddi_devid_str_free(devidstr);
640 return (1);
641 } else {
642 /* a DDI_DEV_T_NONE devid exists and doesn't match */
643 mutex_exit(&(DEVI(dip)->devi_lock));
644 ddi_devid_str_free(devidstr);
645 return (0);
646 }
647 }
648
649 /* check if there is a devt specific devid property */
650 propp = i_ddi_prop_search(dev,
651 DEVID_PROP_NAME, DEVID_PROP_FLAGS, &(DEVI(dip)->devi_hw_prop_ptr));
652 if (propp != NULL) {
653 if (ddi_devid_str_compare(propp->prop_val, devidstr) == 0) {
654 /* a devt specific devid exists and matchs */
655 mutex_exit(&(DEVI(dip)->devi_lock));
656 ddi_devid_str_free(devidstr);
657 return (1);
658 } else {
659 /* a devt specific devid exists and doesn't match */
660 mutex_exit(&(DEVI(dip)->devi_lock));
661 ddi_devid_str_free(devidstr);
662 return (0);
663 }
664 }
665
666 /* we didn't find any devids associated with the device */
667 mutex_exit(&(DEVI(dip)->devi_lock));
668 ddi_devid_str_free(devidstr);
669 return (0);
670 }
671
672 /* get a handle to a device by devid and minor name */
673 int
ldi_vp_from_devid(ddi_devid_t devid,char * minor_name,vnode_t ** vpp)674 ldi_vp_from_devid(ddi_devid_t devid, char *minor_name, vnode_t **vpp)
675 {
676 dev_info_t *dip;
677 vnode_t *vp;
678 int ret, i, ndevs, styp;
679 dev_t dev, *devs;
680
681 /* sanity check required input parameters */
682 if ((devid == NULL) || (minor_name == NULL) || (vpp == NULL))
683 return (EINVAL);
684
685 ret = ddi_lyr_devid_to_devlist(devid, minor_name, &ndevs, &devs);
686 if ((ret != DDI_SUCCESS) || (ndevs <= 0))
687 return (ENODEV);
688
689 for (i = 0; i < ndevs; i++) {
690 dev = devs[i];
691
692 if ((dip = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
693 continue;
694
695 /*
696 * now we have to verify that the devid of the disk
697 * still matches what was requested.
698 *
699 * we have to do this because the devid could have
700 * changed between the call to ddi_lyr_devid_to_devlist()
701 * and e_ddi_hold_devi_by_dev(). this is because when
702 * ddi_lyr_devid_to_devlist() returns a list of devts
703 * there is no kind of hold on those devts so a device
704 * could have been replaced out from under us in the
705 * interim.
706 */
707 if ((i_ddi_minorname_to_devtspectype(dip, minor_name,
708 NULL, &styp) == DDI_SUCCESS) &&
709 ldi_devid_match(devid, dip, dev))
710 break;
711
712 ddi_release_devi(dip); /* from e_ddi_hold_devi_by_dev() */
713 }
714
715 ddi_lyr_free_devlist(devs, ndevs);
716
717 if (i == ndevs)
718 return (ENODEV);
719
720 ASSERT(STYP_VALID(styp));
721 vp = makespecvp(dev, STYP_TO_VTYP(styp));
722 spec_assoc_vp_with_devi(vp, dip);
723 ddi_release_devi(dip); /* from e_ddi_hold_devi_by_dev */
724
725 *vpp = vp;
726 return (0);
727 }
728
729 /* given a vnode, open a device */
730 static int
ldi_open_by_vp(vnode_t ** vpp,int flag,cred_t * cr,ldi_handle_t * lhp,struct ldi_ident * li)731 ldi_open_by_vp(vnode_t **vpp, int flag, cred_t *cr,
732 ldi_handle_t *lhp, struct ldi_ident *li)
733 {
734 struct ldi_handle *nlhp;
735 vnode_t *vp;
736 int err;
737
738 ASSERT((vpp != NULL) && (*vpp != NULL));
739 ASSERT((lhp != NULL) && (li != NULL));
740
741 vp = *vpp;
742 /* if the vnode passed in is not a device, then bail */
743 if (!vn_matchops(vp, spec_getvnodeops()) || !VTYP_VALID(vp->v_type))
744 return (ENXIO);
745
746 /*
747 * the caller may have specified a node that
748 * doesn't have cb_ops defined. the ldi doesn't yet
749 * support opening devices without a valid cb_ops.
750 */
751 if (devopsp[getmajor(vp->v_rdev)]->devo_cb_ops == NULL)
752 return (ENXIO);
753
754 /* open the device */
755 if ((err = VOP_OPEN(&vp, flag | FKLYR, cr, NULL)) != 0)
756 return (err);
757
758 /* possible clone open, make sure that we still have a spec node */
759 ASSERT(vn_matchops(vp, spec_getvnodeops()));
760
761 nlhp = handle_alloc(vp, li);
762
763 if (vp != *vpp) {
764 /*
765 * allocating the layered handle took a new hold on the vnode
766 * so we can release the hold that was returned by the clone
767 * open
768 */
769 LDI_OPENCLOSE((CE_WARN, "%s: lh=0x%p",
770 "ldi clone open", (void *)nlhp));
771 } else {
772 LDI_OPENCLOSE((CE_WARN, "%s: lh=0x%p",
773 "ldi open", (void *)nlhp));
774 }
775
776 *vpp = vp;
777 *lhp = (ldi_handle_t)nlhp;
778 return (0);
779 }
780
781 /* Call a drivers prop_op(9E) interface */
782 static int
i_ldi_prop_op(dev_t dev,dev_info_t * dip,ddi_prop_op_t prop_op,int flags,char * name,caddr_t valuep,int * lengthp)783 i_ldi_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
784 int flags, char *name, caddr_t valuep, int *lengthp)
785 {
786 struct dev_ops *ops = NULL;
787 int res;
788
789 ASSERT((dip != NULL) && (name != NULL));
790 ASSERT((prop_op == PROP_LEN) || (valuep != NULL));
791 ASSERT(lengthp != NULL);
792
793 /*
794 * we can only be invoked after a driver has been opened and
795 * someone has a layered handle to it, so there had better be
796 * a valid ops vector.
797 */
798 ops = DEVI(dip)->devi_ops;
799 ASSERT(ops && ops->devo_cb_ops);
800
801 /*
802 * Some nexus drivers incorrectly set cb_prop_op to nodev,
803 * nulldev or even NULL.
804 */
805 if ((ops->devo_cb_ops->cb_prop_op == nodev) ||
806 (ops->devo_cb_ops->cb_prop_op == nulldev) ||
807 (ops->devo_cb_ops->cb_prop_op == NULL)) {
808 return (DDI_PROP_NOT_FOUND);
809 }
810
811 /* check if this is actually DDI_DEV_T_ANY query */
812 if (flags & LDI_DEV_T_ANY) {
813 flags &= ~LDI_DEV_T_ANY;
814 dev = DDI_DEV_T_ANY;
815 }
816
817 res = cdev_prop_op(dev, dip, prop_op, flags, name, valuep, lengthp);
818 return (res);
819 }
820
821 static void
i_ldi_prop_op_free(struct prop_driver_data * pdd)822 i_ldi_prop_op_free(struct prop_driver_data *pdd)
823 {
824 kmem_free(pdd, pdd->pdd_size);
825 }
826
827 static caddr_t
i_ldi_prop_op_alloc(int prop_len)828 i_ldi_prop_op_alloc(int prop_len)
829 {
830 struct prop_driver_data *pdd;
831 int pdd_size;
832
833 pdd_size = sizeof (struct prop_driver_data) + prop_len;
834 pdd = kmem_alloc(pdd_size, KM_SLEEP);
835 pdd->pdd_size = pdd_size;
836 pdd->pdd_prop_free = i_ldi_prop_op_free;
837 return ((caddr_t)&pdd[1]);
838 }
839
840 /*
841 * i_ldi_prop_op_typed() is a wrapper for i_ldi_prop_op that is used
842 * by the typed ldi property lookup interfaces.
843 */
844 static int
i_ldi_prop_op_typed(dev_t dev,dev_info_t * dip,int flags,char * name,caddr_t * datap,int * lengthp,int elem_size)845 i_ldi_prop_op_typed(dev_t dev, dev_info_t *dip, int flags, char *name,
846 caddr_t *datap, int *lengthp, int elem_size)
847 {
848 caddr_t prop_val;
849 int prop_len, res;
850
851 ASSERT((dip != NULL) && (name != NULL));
852 ASSERT((datap != NULL) && (lengthp != NULL));
853
854 /*
855 * first call the drivers prop_op() interface to allow it
856 * it to override default property values.
857 */
858 res = i_ldi_prop_op(dev, dip, PROP_LEN,
859 flags | DDI_PROP_DYNAMIC, name, NULL, &prop_len);
860 if (res != DDI_PROP_SUCCESS)
861 return (DDI_PROP_NOT_FOUND);
862
863 /* sanity check the property length */
864 if (prop_len == 0) {
865 /*
866 * the ddi typed interfaces don't allow a drivers to
867 * create properties with a length of 0. so we should
868 * prevent drivers from returning 0 length dynamic
869 * properties for typed property lookups.
870 */
871 return (DDI_PROP_NOT_FOUND);
872 }
873
874 /* sanity check the property length against the element size */
875 if (elem_size && ((prop_len % elem_size) != 0))
876 return (DDI_PROP_NOT_FOUND);
877
878 /*
879 * got it. now allocate a prop_driver_data struct so that the
880 * user can free the property via ddi_prop_free().
881 */
882 prop_val = i_ldi_prop_op_alloc(prop_len);
883
884 /* lookup the property again, this time get the value */
885 res = i_ldi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF,
886 flags | DDI_PROP_DYNAMIC, name, prop_val, &prop_len);
887 if (res != DDI_PROP_SUCCESS) {
888 ddi_prop_free(prop_val);
889 return (DDI_PROP_NOT_FOUND);
890 }
891
892 /* sanity check the property length */
893 if (prop_len == 0) {
894 ddi_prop_free(prop_val);
895 return (DDI_PROP_NOT_FOUND);
896 }
897
898 /* sanity check the property length against the element size */
899 if (elem_size && ((prop_len % elem_size) != 0)) {
900 ddi_prop_free(prop_val);
901 return (DDI_PROP_NOT_FOUND);
902 }
903
904 /*
905 * return the prop_driver_data struct and, optionally, the length
906 * of the data.
907 */
908 *datap = prop_val;
909 *lengthp = prop_len;
910
911 return (DDI_PROP_SUCCESS);
912 }
913
914 /*
915 * i_check_string looks at a string property and makes sure its
916 * a valid null terminated string
917 */
918 static int
i_check_string(char * str,int prop_len)919 i_check_string(char *str, int prop_len)
920 {
921 int i;
922
923 ASSERT(str != NULL);
924
925 for (i = 0; i < prop_len; i++) {
926 if (str[i] == '\0')
927 return (0);
928 }
929 return (1);
930 }
931
932 /*
933 * i_pack_string_array takes a a string array property that is represented
934 * as a concatenation of strings (with the NULL character included for
935 * each string) and converts it into a format that can be returned by
936 * ldi_prop_lookup_string_array.
937 */
938 static int
i_pack_string_array(char * str_concat,int prop_len,char *** str_arrayp,int * nelemp)939 i_pack_string_array(char *str_concat, int prop_len,
940 char ***str_arrayp, int *nelemp)
941 {
942 int i, nelem, pack_size;
943 char **str_array, *strptr;
944
945 /*
946 * first we need to sanity check the input string array.
947 * in essence this can be done my making sure that the last
948 * character of the array passed in is null. (meaning the last
949 * string in the array is NULL terminated.
950 */
951 if (str_concat[prop_len - 1] != '\0')
952 return (1);
953
954 /* now let's count the number of strings in the array */
955 for (nelem = i = 0; i < prop_len; i++)
956 if (str_concat[i] == '\0')
957 nelem++;
958 ASSERT(nelem >= 1);
959
960 /* now let's allocate memory for the new packed property */
961 pack_size = (sizeof (char *) * (nelem + 1)) + prop_len;
962 str_array = (char **)i_ldi_prop_op_alloc(pack_size);
963
964 /* let's copy the actual string data into the new property */
965 strptr = (char *)&(str_array[nelem + 1]);
966 bcopy(str_concat, strptr, prop_len);
967
968 /* now initialize the string array pointers */
969 for (i = 0; i < nelem; i++) {
970 str_array[i] = strptr;
971 strptr += strlen(strptr) + 1;
972 }
973 str_array[nelem] = NULL;
974
975 /* set the return values */
976 *str_arrayp = str_array;
977 *nelemp = nelem;
978
979 return (0);
980 }
981
982
983 /*
984 * LDI Project private device usage interfaces
985 */
986
987 /*
988 * Get a count of how many devices are currentl open by different consumers
989 */
990 int
ldi_usage_count()991 ldi_usage_count()
992 {
993 return (ldi_handle_hash_count);
994 }
995
996 static void
ldi_usage_walker_tgt_helper(ldi_usage_t * ldi_usage,vnode_t * vp)997 ldi_usage_walker_tgt_helper(ldi_usage_t *ldi_usage, vnode_t *vp)
998 {
999 dev_info_t *dip;
1000 dev_t dev;
1001
1002 ASSERT(STYP_VALID(VTYP_TO_STYP(vp->v_type)));
1003
1004 /* get the target devt */
1005 dev = vp->v_rdev;
1006
1007 /* try to get the target dip */
1008 dip = VTOCS(vp)->s_dip;
1009 if (dip != NULL) {
1010 e_ddi_hold_devi(dip);
1011 } else if (dev != DDI_DEV_T_NONE) {
1012 dip = e_ddi_hold_devi_by_dev(dev, 0);
1013 }
1014
1015 /* set the target information */
1016 ldi_usage->tgt_name = mod_major_to_name(getmajor(dev));
1017 ldi_usage->tgt_modid = mod_name_to_modid(ldi_usage->tgt_name);
1018 ldi_usage->tgt_devt = dev;
1019 ldi_usage->tgt_spec_type = VTYP_TO_STYP(vp->v_type);
1020 ldi_usage->tgt_dip = dip;
1021 }
1022
1023
1024 static int
ldi_usage_walker_helper(struct ldi_ident * lip,vnode_t * vp,void * arg,int (* callback)(const ldi_usage_t *,void *))1025 ldi_usage_walker_helper(struct ldi_ident *lip, vnode_t *vp,
1026 void *arg, int (*callback)(const ldi_usage_t *, void *))
1027 {
1028 ldi_usage_t ldi_usage;
1029 struct devnames *dnp;
1030 dev_info_t *dip;
1031 major_t major;
1032 dev_t dev;
1033 int ret = LDI_USAGE_CONTINUE;
1034
1035 /* set the target device information */
1036 ldi_usage_walker_tgt_helper(&ldi_usage, vp);
1037
1038 /* get the source devt */
1039 dev = lip->li_dev;
1040
1041 /* try to get the source dip */
1042 dip = lip->li_dip;
1043 if (dip != NULL) {
1044 e_ddi_hold_devi(dip);
1045 } else if (dev != DDI_DEV_T_NONE) {
1046 dip = e_ddi_hold_devi_by_dev(dev, 0);
1047 }
1048
1049 /* set the valid source information */
1050 ldi_usage.src_modid = lip->li_modid;
1051 ldi_usage.src_name = lip->li_modname;
1052 ldi_usage.src_devt = dev;
1053 ldi_usage.src_dip = dip;
1054
1055 /*
1056 * if the source ident represents either:
1057 *
1058 * - a kernel module (and not a device or device driver)
1059 * - a device node
1060 *
1061 * then we currently have all the info we need to report the
1062 * usage information so invoke the callback function.
1063 */
1064 if (((lip->li_major == -1) && (dev == DDI_DEV_T_NONE)) ||
1065 (dip != NULL)) {
1066 ret = callback(&ldi_usage, arg);
1067 if (dip != NULL)
1068 ddi_release_devi(dip);
1069 if (ldi_usage.tgt_dip != NULL)
1070 ddi_release_devi(ldi_usage.tgt_dip);
1071 return (ret);
1072 }
1073
1074 /*
1075 * now this is kinda gross.
1076 *
1077 * what we do here is attempt to associate every device instance
1078 * of the source driver on the system with the open target driver.
1079 * we do this because we don't know which instance of the device
1080 * could potentially access the lower device so we assume that all
1081 * the instances could access it.
1082 *
1083 * there are two ways we could have gotten here:
1084 *
1085 * 1) this layered ident represents one created using only a
1086 * major number or a driver module name. this means that when
1087 * it was created we could not associate it with a particular
1088 * dev_t or device instance.
1089 *
1090 * when could this possibly happen you ask?
1091 *
1092 * a perfect example of this is streams persistent links.
1093 * when a persistant streams link is formed we can't associate
1094 * the lower device stream with any particular upper device
1095 * stream or instance. this is because any particular upper
1096 * device stream could be closed, then another could be
1097 * opened with a different dev_t and device instance, and it
1098 * would still have access to the lower linked stream.
1099 *
1100 * since any instance of the upper streams driver could
1101 * potentially access the lower stream whenever it wants,
1102 * we represent that here by associating the opened lower
1103 * device with every existing device instance of the upper
1104 * streams driver.
1105 *
1106 * 2) This case should really never happen but we'll include it
1107 * for completeness.
1108 *
1109 * it's possible that we could have gotten here because we
1110 * have a dev_t for the upper device but we couldn't find a
1111 * dip associated with that dev_t.
1112 *
1113 * the only types of devices that have dev_t without an
1114 * associated dip are unbound DLPIv2 network devices. These
1115 * types of devices exist to be able to attach a stream to any
1116 * instance of a hardware network device. since these types of
1117 * devices are usually hardware devices they should never
1118 * really have other devices open.
1119 */
1120 if (dev != DDI_DEV_T_NONE)
1121 major = getmajor(dev);
1122 else
1123 major = lip->li_major;
1124
1125 ASSERT((major >= 0) && (major < devcnt));
1126
1127 dnp = &devnamesp[major];
1128 LOCK_DEV_OPS(&dnp->dn_lock);
1129 dip = dnp->dn_head;
1130 while ((dip) && (ret == LDI_USAGE_CONTINUE)) {
1131 e_ddi_hold_devi(dip);
1132 UNLOCK_DEV_OPS(&dnp->dn_lock);
1133
1134 /* set the source dip */
1135 ldi_usage.src_dip = dip;
1136
1137 /* invoke the callback function */
1138 ret = callback(&ldi_usage, arg);
1139
1140 LOCK_DEV_OPS(&dnp->dn_lock);
1141 ddi_release_devi(dip);
1142 dip = ddi_get_next(dip);
1143 }
1144 UNLOCK_DEV_OPS(&dnp->dn_lock);
1145
1146 /* if there was a target dip, release it */
1147 if (ldi_usage.tgt_dip != NULL)
1148 ddi_release_devi(ldi_usage.tgt_dip);
1149
1150 return (ret);
1151 }
1152
1153 /*
1154 * ldi_usage_walker() - this walker reports LDI kernel device usage
1155 * information via the callback() callback function. the LDI keeps track
1156 * of what devices are being accessed in its own internal data structures.
1157 * this function walks those data structures to determine device usage.
1158 */
1159 void
ldi_usage_walker(void * arg,int (* callback)(const ldi_usage_t *,void *))1160 ldi_usage_walker(void *arg, int (*callback)(const ldi_usage_t *, void *))
1161 {
1162 struct ldi_handle *lhp;
1163 struct ldi_ident *lip;
1164 vnode_t *vp;
1165 int i;
1166 int ret = LDI_USAGE_CONTINUE;
1167
1168 for (i = 0; i < LH_HASH_SZ; i++) {
1169 mutex_enter(&ldi_handle_hash_lock[i]);
1170
1171 lhp = ldi_handle_hash[i];
1172 while ((lhp != NULL) && (ret == LDI_USAGE_CONTINUE)) {
1173 lip = lhp->lh_ident;
1174 vp = lhp->lh_vp;
1175
1176 /* invoke the devinfo callback function */
1177 ret = ldi_usage_walker_helper(lip, vp, arg, callback);
1178
1179 lhp = lhp->lh_next;
1180 }
1181 mutex_exit(&ldi_handle_hash_lock[i]);
1182
1183 if (ret != LDI_USAGE_CONTINUE)
1184 break;
1185 }
1186 }
1187
1188 /*
1189 * LDI Project private interfaces (streams linking interfaces)
1190 *
1191 * Streams supports a type of built in device layering via linking.
1192 * Certain types of streams drivers can be streams multiplexors.
1193 * A streams multiplexor supports the I_LINK/I_PLINK operation.
1194 * These operations allows other streams devices to be linked under the
1195 * multiplexor. By definition all streams multiplexors are devices
1196 * so this linking is a type of device layering where the multiplexor
1197 * device is layered on top of the device linked below it.
1198 */
1199
1200 /*
1201 * ldi_mlink_lh() is invoked when streams are linked using LDI handles.
1202 * It is not used for normal I_LINKs and I_PLINKs using file descriptors.
1203 *
1204 * The streams framework keeps track of links via the file_t of the lower
1205 * stream. The LDI keeps track of devices using a vnode. In the case
1206 * of a streams link created via an LDI handle, fnk_lh() allocates
1207 * a file_t that the streams framework can use to track the linkage.
1208 */
1209 int
ldi_mlink_lh(vnode_t * vp,int cmd,intptr_t arg,cred_t * crp,int * rvalp)1210 ldi_mlink_lh(vnode_t *vp, int cmd, intptr_t arg, cred_t *crp, int *rvalp)
1211 {
1212 struct ldi_handle *lhp = (struct ldi_handle *)arg;
1213 vnode_t *vpdown;
1214 file_t *fpdown;
1215 int err;
1216
1217 if (lhp == NULL)
1218 return (EINVAL);
1219
1220 vpdown = lhp->lh_vp;
1221 ASSERT(vn_matchops(vpdown, spec_getvnodeops()));
1222 ASSERT(cmd == _I_PLINK_LH);
1223
1224 /*
1225 * create a new lower vnode and a file_t that points to it,
1226 * streams linking requires a file_t. falloc() returns with
1227 * fpdown locked.
1228 */
1229 VN_HOLD(vpdown);
1230 (void) falloc(vpdown, FREAD|FWRITE, &fpdown, NULL);
1231 mutex_exit(&fpdown->f_tlock);
1232
1233 /* try to establish the link */
1234 err = mlink_file(vp, I_PLINK, fpdown, crp, rvalp, 1);
1235
1236 if (err != 0) {
1237 /* the link failed, free the file_t and release the vnode */
1238 mutex_enter(&fpdown->f_tlock);
1239 unfalloc(fpdown);
1240 VN_RELE(vpdown);
1241 }
1242
1243 return (err);
1244 }
1245
1246 /*
1247 * ldi_mlink_fp() is invoked for all successful streams linkages created
1248 * via I_LINK and I_PLINK. ldi_mlink_fp() records the linkage information
1249 * in its internal state so that the devinfo snapshot code has some
1250 * observability into streams device linkage information.
1251 */
1252 void
ldi_mlink_fp(struct stdata * stp,file_t * fpdown,int lhlink,int type)1253 ldi_mlink_fp(struct stdata *stp, file_t *fpdown, int lhlink, int type)
1254 {
1255 vnode_t *vp = fpdown->f_vnode;
1256 struct snode *sp, *csp;
1257 ldi_ident_t li;
1258 major_t major;
1259 int ret;
1260
1261 /* if the lower stream is not a device then return */
1262 if (!vn_matchops(vp, spec_getvnodeops()))
1263 return;
1264
1265 ASSERT(!servicing_interrupt());
1266
1267 LDI_STREAMS_LNK((CE_NOTE, "%s: linking streams "
1268 "stp=0x%p, fpdown=0x%p", "ldi_mlink_fp",
1269 (void *)stp, (void *)fpdown));
1270
1271 sp = VTOS(vp);
1272 csp = VTOS(sp->s_commonvp);
1273
1274 /* check if this was a plink via a layered handle */
1275 if (lhlink) {
1276 /*
1277 * increment the common snode s_count.
1278 *
1279 * this is done because after the link operation there
1280 * are two ways that s_count can be decremented.
1281 *
1282 * when the layered handle used to create the link is
1283 * closed, spec_close() is called and it will decrement
1284 * s_count in the common snode. if we don't increment
1285 * s_count here then this could cause spec_close() to
1286 * actually close the device while it's still linked
1287 * under a multiplexer.
1288 *
1289 * also, when the lower stream is unlinked, closef() is
1290 * called for the file_t associated with this snode.
1291 * closef() will call spec_close(), which will decrement
1292 * s_count. if we dont't increment s_count here then this
1293 * could cause spec_close() to actually close the device
1294 * while there may still be valid layered handles
1295 * pointing to it.
1296 */
1297 mutex_enter(&csp->s_lock);
1298 ASSERT(csp->s_count >= 1);
1299 csp->s_count++;
1300 mutex_exit(&csp->s_lock);
1301
1302 /*
1303 * decrement the f_count.
1304 * this is done because the layered driver framework does
1305 * not actually cache a copy of the file_t allocated to
1306 * do the link. this is done here instead of in ldi_mlink_lh()
1307 * because there is a window in ldi_mlink_lh() between where
1308 * milnk_file() returns and we would decrement the f_count
1309 * when the stream could be unlinked.
1310 */
1311 mutex_enter(&fpdown->f_tlock);
1312 fpdown->f_count--;
1313 mutex_exit(&fpdown->f_tlock);
1314 }
1315
1316 /*
1317 * NOTE: here we rely on the streams subsystem not allowing
1318 * a stream to be multiplexed more than once. if this
1319 * changes, we break.
1320 *
1321 * mark the snode/stream as multiplexed
1322 */
1323 mutex_enter(&sp->s_lock);
1324 ASSERT(!(sp->s_flag & SMUXED));
1325 sp->s_flag |= SMUXED;
1326 mutex_exit(&sp->s_lock);
1327
1328 /* get a layered ident for the upper stream */
1329 if (type == LINKNORMAL) {
1330 /*
1331 * if the link is not persistant then we can associate
1332 * the upper stream with a dev_t. this is because the
1333 * upper stream is associated with a vnode, which is
1334 * associated with a dev_t and this binding can't change
1335 * during the life of the stream. since the link isn't
1336 * persistant once the stream is destroyed the link is
1337 * destroyed. so the dev_t will be valid for the life
1338 * of the link.
1339 */
1340 ret = ldi_ident_from_stream(getendq(stp->sd_wrq), &li);
1341 } else {
1342 /*
1343 * if the link is persistant we can only associate the
1344 * link with a driver (and not a dev_t.) this is
1345 * because subsequent opens of the upper device may result
1346 * in a different stream (and dev_t) having access to
1347 * the lower stream.
1348 *
1349 * for example, if the upper stream is closed after the
1350 * persistant link operation is compleated, a subsequent
1351 * open of the upper device will create a new stream which
1352 * may have a different dev_t and an unlink operation
1353 * can be performed using this new upper stream.
1354 */
1355 ASSERT(type == LINKPERSIST);
1356 major = getmajor(stp->sd_vnode->v_rdev);
1357 ret = ldi_ident_from_major(major, &li);
1358 }
1359
1360 ASSERT(ret == 0);
1361 (void) handle_alloc(vp, (struct ldi_ident *)li);
1362 ldi_ident_release(li);
1363 }
1364
1365 void
ldi_munlink_fp(struct stdata * stp,file_t * fpdown,int type)1366 ldi_munlink_fp(struct stdata *stp, file_t *fpdown, int type)
1367 {
1368 struct ldi_handle *lhp;
1369 vnode_t *vp = (vnode_t *)fpdown->f_vnode;
1370 struct snode *sp;
1371 ldi_ident_t li;
1372 major_t major;
1373 int ret;
1374
1375 /* if the lower stream is not a device then return */
1376 if (!vn_matchops(vp, spec_getvnodeops()))
1377 return;
1378
1379 ASSERT(!servicing_interrupt());
1380 ASSERT((type == LINKNORMAL) || (type == LINKPERSIST));
1381
1382 LDI_STREAMS_LNK((CE_NOTE, "%s: unlinking streams "
1383 "stp=0x%p, fpdown=0x%p", "ldi_munlink_fp",
1384 (void *)stp, (void *)fpdown));
1385
1386 /*
1387 * NOTE: here we rely on the streams subsystem not allowing
1388 * a stream to be multiplexed more than once. if this
1389 * changes, we break.
1390 *
1391 * mark the snode/stream as not multiplexed
1392 */
1393 sp = VTOS(vp);
1394 mutex_enter(&sp->s_lock);
1395 ASSERT(sp->s_flag & SMUXED);
1396 sp->s_flag &= ~SMUXED;
1397 mutex_exit(&sp->s_lock);
1398
1399 /*
1400 * clear the owner for this snode
1401 * see the comment in ldi_mlink_fp() for information about how
1402 * the ident is allocated
1403 */
1404 if (type == LINKNORMAL) {
1405 ret = ldi_ident_from_stream(getendq(stp->sd_wrq), &li);
1406 } else {
1407 ASSERT(type == LINKPERSIST);
1408 major = getmajor(stp->sd_vnode->v_rdev);
1409 ret = ldi_ident_from_major(major, &li);
1410 }
1411
1412 ASSERT(ret == 0);
1413 lhp = handle_find(vp, (struct ldi_ident *)li);
1414 handle_release(lhp);
1415 ldi_ident_release(li);
1416 }
1417
1418 /*
1419 * LDI Consolidation private interfaces
1420 */
1421 int
ldi_ident_from_mod(struct modlinkage * modlp,ldi_ident_t * lip)1422 ldi_ident_from_mod(struct modlinkage *modlp, ldi_ident_t *lip)
1423 {
1424 struct modctl *modp;
1425 major_t major;
1426 char *name;
1427
1428 if ((modlp == NULL) || (lip == NULL))
1429 return (EINVAL);
1430
1431 ASSERT(!servicing_interrupt());
1432
1433 modp = mod_getctl(modlp);
1434 if (modp == NULL)
1435 return (EINVAL);
1436 name = modp->mod_modname;
1437 if (name == NULL)
1438 return (EINVAL);
1439 major = mod_name_to_major(name);
1440
1441 *lip = (ldi_ident_t)ident_alloc(name, NULL, DDI_DEV_T_NONE, major);
1442
1443 LDI_ALLOCFREE((CE_WARN, "%s: li=0x%p, mod=%s",
1444 "ldi_ident_from_mod", (void *)*lip, name));
1445
1446 return (0);
1447 }
1448
1449 ldi_ident_t
ldi_ident_from_anon()1450 ldi_ident_from_anon()
1451 {
1452 ldi_ident_t lip;
1453
1454 ASSERT(!servicing_interrupt());
1455
1456 lip = (ldi_ident_t)ident_alloc("genunix", NULL, DDI_DEV_T_NONE, -1);
1457
1458 LDI_ALLOCFREE((CE_WARN, "%s: li=0x%p, mod=%s",
1459 "ldi_ident_from_anon", (void *)lip, "genunix"));
1460
1461 return (lip);
1462 }
1463
1464
1465 /*
1466 * LDI Public interfaces
1467 */
1468 int
ldi_ident_from_stream(struct queue * sq,ldi_ident_t * lip)1469 ldi_ident_from_stream(struct queue *sq, ldi_ident_t *lip)
1470 {
1471 struct stdata *stp;
1472 dev_t dev;
1473 char *name;
1474
1475 if ((sq == NULL) || (lip == NULL))
1476 return (EINVAL);
1477
1478 ASSERT(!servicing_interrupt());
1479
1480 stp = sq->q_stream;
1481 if (!vn_matchops(stp->sd_vnode, spec_getvnodeops()))
1482 return (EINVAL);
1483
1484 dev = stp->sd_vnode->v_rdev;
1485 name = mod_major_to_name(getmajor(dev));
1486 if (name == NULL)
1487 return (EINVAL);
1488 *lip = (ldi_ident_t)ident_alloc(name, NULL, dev, -1);
1489
1490 LDI_ALLOCFREE((CE_WARN,
1491 "%s: li=0x%p, mod=%s, minor=0x%x, stp=0x%p",
1492 "ldi_ident_from_stream", (void *)*lip, name, getminor(dev),
1493 (void *)stp));
1494
1495 return (0);
1496 }
1497
1498 int
ldi_ident_from_dev(dev_t dev,ldi_ident_t * lip)1499 ldi_ident_from_dev(dev_t dev, ldi_ident_t *lip)
1500 {
1501 char *name;
1502
1503 if (lip == NULL)
1504 return (EINVAL);
1505
1506 ASSERT(!servicing_interrupt());
1507
1508 name = mod_major_to_name(getmajor(dev));
1509 if (name == NULL)
1510 return (EINVAL);
1511 *lip = (ldi_ident_t)ident_alloc(name, NULL, dev, -1);
1512
1513 LDI_ALLOCFREE((CE_WARN,
1514 "%s: li=0x%p, mod=%s, minor=0x%x",
1515 "ldi_ident_from_dev", (void *)*lip, name, getminor(dev)));
1516
1517 return (0);
1518 }
1519
1520 int
ldi_ident_from_dip(dev_info_t * dip,ldi_ident_t * lip)1521 ldi_ident_from_dip(dev_info_t *dip, ldi_ident_t *lip)
1522 {
1523 struct dev_info *devi = (struct dev_info *)dip;
1524 char *name;
1525
1526 if ((dip == NULL) || (lip == NULL))
1527 return (EINVAL);
1528
1529 ASSERT(!servicing_interrupt());
1530
1531 name = mod_major_to_name(devi->devi_major);
1532 if (name == NULL)
1533 return (EINVAL);
1534 *lip = (ldi_ident_t)ident_alloc(name, dip, DDI_DEV_T_NONE, -1);
1535
1536 LDI_ALLOCFREE((CE_WARN,
1537 "%s: li=0x%p, mod=%s, dip=0x%p",
1538 "ldi_ident_from_dip", (void *)*lip, name, (void *)devi));
1539
1540 return (0);
1541 }
1542
1543 int
ldi_ident_from_major(major_t major,ldi_ident_t * lip)1544 ldi_ident_from_major(major_t major, ldi_ident_t *lip)
1545 {
1546 char *name;
1547
1548 if (lip == NULL)
1549 return (EINVAL);
1550
1551 ASSERT(!servicing_interrupt());
1552
1553 name = mod_major_to_name(major);
1554 if (name == NULL)
1555 return (EINVAL);
1556 *lip = (ldi_ident_t)ident_alloc(name, NULL, DDI_DEV_T_NONE, major);
1557
1558 LDI_ALLOCFREE((CE_WARN,
1559 "%s: li=0x%p, mod=%s",
1560 "ldi_ident_from_major", (void *)*lip, name));
1561
1562 return (0);
1563 }
1564
1565 void
ldi_ident_release(ldi_ident_t li)1566 ldi_ident_release(ldi_ident_t li)
1567 {
1568 struct ldi_ident *ident = (struct ldi_ident *)li;
1569 char *name;
1570
1571 if (li == NULL)
1572 return;
1573
1574 ASSERT(!servicing_interrupt());
1575
1576 name = ident->li_modname;
1577
1578 LDI_ALLOCFREE((CE_WARN,
1579 "%s: li=0x%p, mod=%s",
1580 "ldi_ident_release", (void *)li, name));
1581
1582 ident_release((struct ldi_ident *)li);
1583 }
1584
1585 /* get a handle to a device by dev_t and otyp */
1586 int
ldi_open_by_dev(dev_t * devp,int otyp,int flag,cred_t * cr,ldi_handle_t * lhp,ldi_ident_t li)1587 ldi_open_by_dev(dev_t *devp, int otyp, int flag, cred_t *cr,
1588 ldi_handle_t *lhp, ldi_ident_t li)
1589 {
1590 struct ldi_ident *lip = (struct ldi_ident *)li;
1591 int ret;
1592 vnode_t *vp;
1593
1594 /* sanity check required input parameters */
1595 if ((devp == NULL) || (!OTYP_VALID(otyp)) || (cr == NULL) ||
1596 (lhp == NULL) || (lip == NULL))
1597 return (EINVAL);
1598
1599 ASSERT(!servicing_interrupt());
1600
1601 if ((ret = ldi_vp_from_dev(*devp, otyp, &vp)) != 0)
1602 return (ret);
1603
1604 if ((ret = ldi_open_by_vp(&vp, flag, cr, lhp, lip)) == 0) {
1605 *devp = vp->v_rdev;
1606 }
1607 VN_RELE(vp);
1608
1609 return (ret);
1610 }
1611
1612 /* get a handle to a device by pathname */
1613 int
ldi_open_by_name(char * pathname,int flag,cred_t * cr,ldi_handle_t * lhp,ldi_ident_t li)1614 ldi_open_by_name(char *pathname, int flag, cred_t *cr,
1615 ldi_handle_t *lhp, ldi_ident_t li)
1616 {
1617 struct ldi_ident *lip = (struct ldi_ident *)li;
1618 int ret;
1619 vnode_t *vp;
1620
1621 /* sanity check required input parameters */
1622 if ((pathname == NULL) || (*pathname != '/') ||
1623 (cr == NULL) || (lhp == NULL) || (lip == NULL))
1624 return (EINVAL);
1625
1626 ASSERT(!servicing_interrupt());
1627
1628 if ((ret = ldi_vp_from_name(pathname, &vp)) != 0)
1629 return (ret);
1630
1631 ret = ldi_open_by_vp(&vp, flag, cr, lhp, lip);
1632 VN_RELE(vp);
1633
1634 return (ret);
1635 }
1636
1637 /* get a handle to a device by devid and minor_name */
1638 int
ldi_open_by_devid(ddi_devid_t devid,char * minor_name,int flag,cred_t * cr,ldi_handle_t * lhp,ldi_ident_t li)1639 ldi_open_by_devid(ddi_devid_t devid, char *minor_name,
1640 int flag, cred_t *cr, ldi_handle_t *lhp, ldi_ident_t li)
1641 {
1642 struct ldi_ident *lip = (struct ldi_ident *)li;
1643 int ret;
1644 vnode_t *vp;
1645
1646 /* sanity check required input parameters */
1647 if ((minor_name == NULL) || (cr == NULL) ||
1648 (lhp == NULL) || (lip == NULL))
1649 return (EINVAL);
1650
1651 ASSERT(!servicing_interrupt());
1652
1653 if ((ret = ldi_vp_from_devid(devid, minor_name, &vp)) != 0)
1654 return (ret);
1655
1656 ret = ldi_open_by_vp(&vp, flag, cr, lhp, lip);
1657 VN_RELE(vp);
1658
1659 return (ret);
1660 }
1661
1662 int
ldi_close(ldi_handle_t lh,int flag,cred_t * cr)1663 ldi_close(ldi_handle_t lh, int flag, cred_t *cr)
1664 {
1665 struct ldi_handle *handlep = (struct ldi_handle *)lh;
1666 struct ldi_event *lep;
1667 int err = 0;
1668 int notify = 0;
1669 list_t *listp;
1670 ldi_ev_callback_impl_t *lecp;
1671
1672 if (lh == NULL)
1673 return (EINVAL);
1674
1675 ASSERT(!servicing_interrupt());
1676
1677 #ifdef LDI_OBSOLETE_EVENT
1678
1679 /*
1680 * Any event handlers should have been unregistered by the
1681 * time ldi_close() is called. If they haven't then it's a
1682 * bug.
1683 *
1684 * In a debug kernel we'll panic to make the problem obvious.
1685 */
1686 ASSERT(handlep->lh_events == NULL);
1687
1688 /*
1689 * On a production kernel we'll "do the right thing" (unregister
1690 * the event handlers) and then complain about having to do the
1691 * work ourselves.
1692 */
1693 while ((lep = handlep->lh_events) != NULL) {
1694 err = 1;
1695 (void) ldi_remove_event_handler(lh, (ldi_callback_id_t)lep);
1696 }
1697 if (err) {
1698 struct ldi_ident *lip = handlep->lh_ident;
1699 ASSERT(lip != NULL);
1700 cmn_err(CE_NOTE, "ldi err: %s "
1701 "failed to unregister layered event handlers before "
1702 "closing devices", lip->li_modname);
1703 }
1704 #endif
1705
1706 /* do a layered close on the device */
1707 err = VOP_CLOSE(handlep->lh_vp, flag | FKLYR, 1, (offset_t)0, cr, NULL);
1708
1709 LDI_OPENCLOSE((CE_WARN, "%s: lh=0x%p", "ldi close", (void *)lh));
1710
1711 /*
1712 * Search the event callback list for callbacks with this
1713 * handle. There are 2 cases
1714 * 1. Called in the context of a notify. The handle consumer
1715 * is releasing its hold on the device to allow a reconfiguration
1716 * of the device. Simply NULL out the handle and the notify callback.
1717 * The finalize callback is still available so that the consumer
1718 * knows of the final disposition of the device.
1719 * 2. Not called in the context of notify. NULL out the handle as well
1720 * as the notify and finalize callbacks. Since the consumer has
1721 * closed the handle, we assume it is not interested in the
1722 * notify and finalize callbacks.
1723 */
1724 ldi_ev_lock();
1725
1726 if (handlep->lh_flags & LH_FLAGS_NOTIFY)
1727 notify = 1;
1728 listp = &ldi_ev_callback_list.le_head;
1729 for (lecp = list_head(listp); lecp; lecp = list_next(listp, lecp)) {
1730 if (lecp->lec_lhp != handlep)
1731 continue;
1732 lecp->lec_lhp = NULL;
1733 lecp->lec_notify = NULL;
1734 LDI_EVDBG((CE_NOTE, "ldi_close: NULLed lh and notify"));
1735 if (!notify) {
1736 LDI_EVDBG((CE_NOTE, "ldi_close: NULLed finalize"));
1737 lecp->lec_finalize = NULL;
1738 }
1739 }
1740
1741 if (notify)
1742 handlep->lh_flags &= ~LH_FLAGS_NOTIFY;
1743 ldi_ev_unlock();
1744
1745 /*
1746 * Free the handle even if the device close failed. why?
1747 *
1748 * If the device close failed we can't really make assumptions
1749 * about the devices state so we shouldn't allow access to the
1750 * device via this handle any more. If the device consumer wants
1751 * to access the device again they should open it again.
1752 *
1753 * This is the same way file/device close failures are handled
1754 * in other places like spec_close() and closeandsetf().
1755 */
1756 handle_release(handlep);
1757 return (err);
1758 }
1759
1760 int
ldi_read(ldi_handle_t lh,struct uio * uiop,cred_t * credp)1761 ldi_read(ldi_handle_t lh, struct uio *uiop, cred_t *credp)
1762 {
1763 struct ldi_handle *handlep = (struct ldi_handle *)lh;
1764 vnode_t *vp;
1765 dev_t dev;
1766 int ret;
1767
1768 if (lh == NULL)
1769 return (EINVAL);
1770
1771 vp = handlep->lh_vp;
1772 dev = vp->v_rdev;
1773 if (handlep->lh_type & LH_CBDEV) {
1774 ret = cdev_read(dev, uiop, credp);
1775 } else if (handlep->lh_type & LH_STREAM) {
1776 ret = strread(vp, uiop, credp);
1777 } else {
1778 return (ENOTSUP);
1779 }
1780 return (ret);
1781 }
1782
1783 int
ldi_write(ldi_handle_t lh,struct uio * uiop,cred_t * credp)1784 ldi_write(ldi_handle_t lh, struct uio *uiop, cred_t *credp)
1785 {
1786 struct ldi_handle *handlep = (struct ldi_handle *)lh;
1787 vnode_t *vp;
1788 dev_t dev;
1789 int ret;
1790
1791 if (lh == NULL)
1792 return (EINVAL);
1793
1794 vp = handlep->lh_vp;
1795 dev = vp->v_rdev;
1796 if (handlep->lh_type & LH_CBDEV) {
1797 ret = cdev_write(dev, uiop, credp);
1798 } else if (handlep->lh_type & LH_STREAM) {
1799 ret = strwrite(vp, uiop, credp);
1800 } else {
1801 return (ENOTSUP);
1802 }
1803 return (ret);
1804 }
1805
1806 int
ldi_get_size(ldi_handle_t lh,uint64_t * sizep)1807 ldi_get_size(ldi_handle_t lh, uint64_t *sizep)
1808 {
1809 int otyp;
1810 uint_t value;
1811 int64_t drv_prop64;
1812 struct ldi_handle *handlep = (struct ldi_handle *)lh;
1813 uint_t blksize;
1814 int blkshift;
1815
1816
1817 if ((lh == NULL) || (sizep == NULL))
1818 return (DDI_FAILURE);
1819
1820 if (handlep->lh_type & LH_STREAM)
1821 return (DDI_FAILURE);
1822
1823 /*
1824 * Determine device type (char or block).
1825 * Character devices support Size/size
1826 * property value. Block devices may support
1827 * Nblocks/nblocks or Size/size property value.
1828 */
1829 if ((ldi_get_otyp(lh, &otyp)) != 0)
1830 return (DDI_FAILURE);
1831
1832 if (otyp == OTYP_BLK) {
1833 if (ldi_prop_exists(lh,
1834 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "Nblocks")) {
1835
1836 drv_prop64 = ldi_prop_get_int64(lh,
1837 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1838 "Nblocks", 0);
1839 blksize = ldi_prop_get_int(lh,
1840 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1841 "blksize", DEV_BSIZE);
1842 if (blksize == DEV_BSIZE)
1843 blksize = ldi_prop_get_int(lh, LDI_DEV_T_ANY |
1844 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1845 "device-blksize", DEV_BSIZE);
1846
1847 /* blksize must be a power of two */
1848 ASSERT(BIT_ONLYONESET(blksize));
1849 blkshift = highbit(blksize) - 1;
1850
1851 /*
1852 * We don't support Nblocks values that don't have
1853 * an accurate uint64_t byte count representation.
1854 */
1855 if ((uint64_t)drv_prop64 >= (UINT64_MAX >> blkshift))
1856 return (DDI_FAILURE);
1857
1858 *sizep = (uint64_t)
1859 (((u_offset_t)drv_prop64) << blkshift);
1860 return (DDI_SUCCESS);
1861 }
1862
1863 if (ldi_prop_exists(lh,
1864 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "nblocks")) {
1865
1866 value = ldi_prop_get_int(lh,
1867 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1868 "nblocks", 0);
1869 blksize = ldi_prop_get_int(lh,
1870 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1871 "blksize", DEV_BSIZE);
1872 if (blksize == DEV_BSIZE)
1873 blksize = ldi_prop_get_int(lh, LDI_DEV_T_ANY |
1874 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1875 "device-blksize", DEV_BSIZE);
1876
1877 /* blksize must be a power of two */
1878 ASSERT(BIT_ONLYONESET(blksize));
1879 blkshift = highbit(blksize) - 1;
1880
1881 /*
1882 * We don't support nblocks values that don't have an
1883 * accurate uint64_t byte count representation.
1884 */
1885 if ((uint64_t)value >= (UINT64_MAX >> blkshift))
1886 return (DDI_FAILURE);
1887
1888 *sizep = (uint64_t)
1889 (((u_offset_t)value) << blkshift);
1890 return (DDI_SUCCESS);
1891 }
1892 }
1893
1894 if (ldi_prop_exists(lh,
1895 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "Size")) {
1896
1897 drv_prop64 = ldi_prop_get_int64(lh,
1898 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "Size", 0);
1899 *sizep = (uint64_t)drv_prop64;
1900 return (DDI_SUCCESS);
1901 }
1902
1903 if (ldi_prop_exists(lh,
1904 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "size")) {
1905
1906 value = ldi_prop_get_int(lh,
1907 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "size", 0);
1908 *sizep = (uint64_t)value;
1909 return (DDI_SUCCESS);
1910 }
1911
1912 /* unable to determine device size */
1913 return (DDI_FAILURE);
1914 }
1915
1916 int
ldi_ioctl(ldi_handle_t lh,int cmd,intptr_t arg,int mode,cred_t * cr,int * rvalp)1917 ldi_ioctl(ldi_handle_t lh, int cmd, intptr_t arg, int mode,
1918 cred_t *cr, int *rvalp)
1919 {
1920 struct ldi_handle *handlep = (struct ldi_handle *)lh;
1921 vnode_t *vp;
1922 dev_t dev;
1923 int ret, copymode, unused;
1924
1925 if (lh == NULL)
1926 return (EINVAL);
1927
1928 /*
1929 * if the data pointed to by arg is located in the kernel then
1930 * make sure the FNATIVE flag is set.
1931 */
1932 if (mode & FKIOCTL)
1933 mode = (mode & ~FMODELS) | FNATIVE | FKIOCTL;
1934
1935 /*
1936 * Some drivers assume that rvalp will always be non-NULL, so in
1937 * an attempt to avoid panics if the caller passed in a NULL
1938 * value, update rvalp to point to a temporary variable.
1939 */
1940 if (rvalp == NULL)
1941 rvalp = &unused;
1942 vp = handlep->lh_vp;
1943 dev = vp->v_rdev;
1944 if (handlep->lh_type & LH_CBDEV) {
1945 ret = cdev_ioctl(dev, cmd, arg, mode, cr, rvalp);
1946 } else if (handlep->lh_type & LH_STREAM) {
1947 copymode = (mode & FKIOCTL) ? K_TO_K : U_TO_K;
1948
1949 /*
1950 * if we get an I_PLINK from within the kernel the
1951 * arg is a layered handle pointer instead of
1952 * a file descriptor, so we translate this ioctl
1953 * into a private one that can handle this.
1954 */
1955 if ((mode & FKIOCTL) && (cmd == I_PLINK))
1956 cmd = _I_PLINK_LH;
1957
1958 ret = strioctl(vp, cmd, arg, mode, copymode, cr, rvalp);
1959 } else {
1960 return (ENOTSUP);
1961 }
1962
1963 return (ret);
1964 }
1965
1966 int
ldi_poll(ldi_handle_t lh,short events,int anyyet,short * reventsp,struct pollhead ** phpp)1967 ldi_poll(ldi_handle_t lh, short events, int anyyet, short *reventsp,
1968 struct pollhead **phpp)
1969 {
1970 struct ldi_handle *handlep = (struct ldi_handle *)lh;
1971 vnode_t *vp;
1972 dev_t dev;
1973 int ret;
1974
1975 if (lh == NULL)
1976 return (EINVAL);
1977
1978 vp = handlep->lh_vp;
1979 dev = vp->v_rdev;
1980 if (handlep->lh_type & LH_CBDEV) {
1981 ret = cdev_poll(dev, events, anyyet, reventsp, phpp);
1982 } else if (handlep->lh_type & LH_STREAM) {
1983 ret = strpoll(vp->v_stream, events, anyyet, reventsp, phpp);
1984 } else {
1985 return (ENOTSUP);
1986 }
1987
1988 return (ret);
1989 }
1990
1991 int
ldi_prop_op(ldi_handle_t lh,ddi_prop_op_t prop_op,int flags,char * name,caddr_t valuep,int * length)1992 ldi_prop_op(ldi_handle_t lh, ddi_prop_op_t prop_op,
1993 int flags, char *name, caddr_t valuep, int *length)
1994 {
1995 struct ldi_handle *handlep = (struct ldi_handle *)lh;
1996 dev_t dev;
1997 dev_info_t *dip;
1998 int ret;
1999 struct snode *csp;
2000
2001 if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2002 return (DDI_PROP_INVAL_ARG);
2003
2004 if ((prop_op != PROP_LEN) && (valuep == NULL))
2005 return (DDI_PROP_INVAL_ARG);
2006
2007 if (length == NULL)
2008 return (DDI_PROP_INVAL_ARG);
2009
2010 /*
2011 * try to find the associated dip,
2012 * this places a hold on the driver
2013 */
2014 dev = handlep->lh_vp->v_rdev;
2015
2016 csp = VTOCS(handlep->lh_vp);
2017 mutex_enter(&csp->s_lock);
2018 if ((dip = csp->s_dip) != NULL)
2019 e_ddi_hold_devi(dip);
2020 mutex_exit(&csp->s_lock);
2021 if (dip == NULL)
2022 dip = e_ddi_hold_devi_by_dev(dev, 0);
2023
2024 if (dip == NULL)
2025 return (DDI_PROP_NOT_FOUND);
2026
2027 ret = i_ldi_prop_op(dev, dip, prop_op, flags, name, valuep, length);
2028 ddi_release_devi(dip);
2029
2030 return (ret);
2031 }
2032
2033 int
ldi_strategy(ldi_handle_t lh,struct buf * bp)2034 ldi_strategy(ldi_handle_t lh, struct buf *bp)
2035 {
2036 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2037 dev_t dev;
2038
2039 if ((lh == NULL) || (bp == NULL))
2040 return (EINVAL);
2041
2042 /* this entry point is only supported for cb devices */
2043 dev = handlep->lh_vp->v_rdev;
2044 if (!(handlep->lh_type & LH_CBDEV))
2045 return (ENOTSUP);
2046
2047 bp->b_edev = dev;
2048 bp->b_dev = cmpdev(dev);
2049 return (bdev_strategy(bp));
2050 }
2051
2052 int
ldi_dump(ldi_handle_t lh,caddr_t addr,daddr_t blkno,int nblk)2053 ldi_dump(ldi_handle_t lh, caddr_t addr, daddr_t blkno, int nblk)
2054 {
2055 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2056 dev_t dev;
2057
2058 if (lh == NULL)
2059 return (EINVAL);
2060
2061 /* this entry point is only supported for cb devices */
2062 dev = handlep->lh_vp->v_rdev;
2063 if (!(handlep->lh_type & LH_CBDEV))
2064 return (ENOTSUP);
2065
2066 return (bdev_dump(dev, addr, blkno, nblk));
2067 }
2068
2069 int
ldi_devmap(ldi_handle_t lh,devmap_cookie_t dhp,offset_t off,size_t len,size_t * maplen,uint_t model)2070 ldi_devmap(ldi_handle_t lh, devmap_cookie_t dhp, offset_t off,
2071 size_t len, size_t *maplen, uint_t model)
2072 {
2073 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2074 dev_t dev;
2075
2076 if (lh == NULL)
2077 return (EINVAL);
2078
2079 /* this entry point is only supported for cb devices */
2080 dev = handlep->lh_vp->v_rdev;
2081 if (!(handlep->lh_type & LH_CBDEV))
2082 return (ENOTSUP);
2083
2084 return (cdev_devmap(dev, dhp, off, len, maplen, model));
2085 }
2086
2087 int
ldi_aread(ldi_handle_t lh,struct aio_req * aio_reqp,cred_t * cr)2088 ldi_aread(ldi_handle_t lh, struct aio_req *aio_reqp, cred_t *cr)
2089 {
2090 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2091 dev_t dev;
2092 struct cb_ops *cb;
2093
2094 if (lh == NULL)
2095 return (EINVAL);
2096
2097 /* this entry point is only supported for cb devices */
2098 if (!(handlep->lh_type & LH_CBDEV))
2099 return (ENOTSUP);
2100
2101 /*
2102 * Kaio is only supported on block devices.
2103 */
2104 dev = handlep->lh_vp->v_rdev;
2105 cb = devopsp[getmajor(dev)]->devo_cb_ops;
2106 if (cb->cb_strategy == nodev || cb->cb_strategy == NULL)
2107 return (ENOTSUP);
2108
2109 if (cb->cb_aread == NULL)
2110 return (ENOTSUP);
2111
2112 return (cb->cb_aread(dev, aio_reqp, cr));
2113 }
2114
2115 int
ldi_awrite(ldi_handle_t lh,struct aio_req * aio_reqp,cred_t * cr)2116 ldi_awrite(ldi_handle_t lh, struct aio_req *aio_reqp, cred_t *cr)
2117 {
2118 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2119 struct cb_ops *cb;
2120 dev_t dev;
2121
2122 if (lh == NULL)
2123 return (EINVAL);
2124
2125 /* this entry point is only supported for cb devices */
2126 if (!(handlep->lh_type & LH_CBDEV))
2127 return (ENOTSUP);
2128
2129 /*
2130 * Kaio is only supported on block devices.
2131 */
2132 dev = handlep->lh_vp->v_rdev;
2133 cb = devopsp[getmajor(dev)]->devo_cb_ops;
2134 if (cb->cb_strategy == nodev || cb->cb_strategy == NULL)
2135 return (ENOTSUP);
2136
2137 if (cb->cb_awrite == NULL)
2138 return (ENOTSUP);
2139
2140 return (cb->cb_awrite(dev, aio_reqp, cr));
2141 }
2142
2143 int
ldi_putmsg(ldi_handle_t lh,mblk_t * smp)2144 ldi_putmsg(ldi_handle_t lh, mblk_t *smp)
2145 {
2146 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2147 int ret;
2148
2149 if ((lh == NULL) || (smp == NULL))
2150 return (EINVAL);
2151
2152 if (!(handlep->lh_type & LH_STREAM)) {
2153 freemsg(smp);
2154 return (ENOTSUP);
2155 }
2156
2157 /*
2158 * If we don't have db_credp, set it. Note that we can not be called
2159 * from interrupt context.
2160 */
2161 if (msg_getcred(smp, NULL) == NULL)
2162 mblk_setcred(smp, CRED(), curproc->p_pid);
2163
2164 /* Send message while honoring flow control */
2165 ret = kstrputmsg(handlep->lh_vp, smp, NULL, 0, 0,
2166 MSG_BAND | MSG_HOLDSIG | MSG_IGNERROR, 0);
2167
2168 return (ret);
2169 }
2170
2171 int
ldi_getmsg(ldi_handle_t lh,mblk_t ** rmp,timestruc_t * timeo)2172 ldi_getmsg(ldi_handle_t lh, mblk_t **rmp, timestruc_t *timeo)
2173 {
2174 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2175 clock_t timout; /* milliseconds */
2176 uchar_t pri;
2177 rval_t rval;
2178 int ret, pflag;
2179
2180
2181 if (lh == NULL)
2182 return (EINVAL);
2183
2184 if (!(handlep->lh_type & LH_STREAM))
2185 return (ENOTSUP);
2186
2187 /* Convert from nanoseconds to milliseconds */
2188 if (timeo != NULL) {
2189 timout = timeo->tv_sec * 1000 + timeo->tv_nsec / 1000000;
2190 if (timout > INT_MAX)
2191 return (EINVAL);
2192 } else
2193 timout = -1;
2194
2195 /* Wait for timeout millseconds for a message */
2196 pflag = MSG_ANY;
2197 pri = 0;
2198 *rmp = NULL;
2199 ret = kstrgetmsg(handlep->lh_vp,
2200 rmp, NULL, &pri, &pflag, timout, &rval);
2201 return (ret);
2202 }
2203
2204 int
ldi_get_dev(ldi_handle_t lh,dev_t * devp)2205 ldi_get_dev(ldi_handle_t lh, dev_t *devp)
2206 {
2207 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2208
2209 if ((lh == NULL) || (devp == NULL))
2210 return (EINVAL);
2211
2212 *devp = handlep->lh_vp->v_rdev;
2213 return (0);
2214 }
2215
2216 int
ldi_get_otyp(ldi_handle_t lh,int * otyp)2217 ldi_get_otyp(ldi_handle_t lh, int *otyp)
2218 {
2219 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2220
2221 if ((lh == NULL) || (otyp == NULL))
2222 return (EINVAL);
2223
2224 *otyp = VTYP_TO_OTYP(handlep->lh_vp->v_type);
2225 return (0);
2226 }
2227
2228 int
ldi_get_devid(ldi_handle_t lh,ddi_devid_t * devid)2229 ldi_get_devid(ldi_handle_t lh, ddi_devid_t *devid)
2230 {
2231 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2232 int ret;
2233 dev_t dev;
2234
2235 if ((lh == NULL) || (devid == NULL))
2236 return (EINVAL);
2237
2238 dev = handlep->lh_vp->v_rdev;
2239
2240 ret = ddi_lyr_get_devid(dev, devid);
2241 if (ret != DDI_SUCCESS)
2242 return (ENOTSUP);
2243
2244 return (0);
2245 }
2246
2247 int
ldi_get_minor_name(ldi_handle_t lh,char ** minor_name)2248 ldi_get_minor_name(ldi_handle_t lh, char **minor_name)
2249 {
2250 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2251 int ret, otyp;
2252 dev_t dev;
2253
2254 if ((lh == NULL) || (minor_name == NULL))
2255 return (EINVAL);
2256
2257 dev = handlep->lh_vp->v_rdev;
2258 otyp = VTYP_TO_OTYP(handlep->lh_vp->v_type);
2259
2260 ret = ddi_lyr_get_minor_name(dev, OTYP_TO_STYP(otyp), minor_name);
2261 if (ret != DDI_SUCCESS)
2262 return (ENOTSUP);
2263
2264 return (0);
2265 }
2266
2267 int
ldi_prop_lookup_int_array(ldi_handle_t lh,uint_t flags,char * name,int ** data,uint_t * nelements)2268 ldi_prop_lookup_int_array(ldi_handle_t lh,
2269 uint_t flags, char *name, int **data, uint_t *nelements)
2270 {
2271 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2272 dev_info_t *dip;
2273 dev_t dev;
2274 int res;
2275 struct snode *csp;
2276
2277 if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2278 return (DDI_PROP_INVAL_ARG);
2279
2280 dev = handlep->lh_vp->v_rdev;
2281
2282 csp = VTOCS(handlep->lh_vp);
2283 mutex_enter(&csp->s_lock);
2284 if ((dip = csp->s_dip) != NULL)
2285 e_ddi_hold_devi(dip);
2286 mutex_exit(&csp->s_lock);
2287 if (dip == NULL)
2288 dip = e_ddi_hold_devi_by_dev(dev, 0);
2289
2290 if (dip == NULL) {
2291 flags |= DDI_UNBND_DLPI2;
2292 } else if (flags & LDI_DEV_T_ANY) {
2293 flags &= ~LDI_DEV_T_ANY;
2294 dev = DDI_DEV_T_ANY;
2295 }
2296
2297 if (dip != NULL) {
2298 int *prop_val, prop_len;
2299
2300 res = i_ldi_prop_op_typed(dev, dip, flags, name,
2301 (caddr_t *)&prop_val, &prop_len, sizeof (int));
2302
2303 /* if we got it then return it */
2304 if (res == DDI_PROP_SUCCESS) {
2305 *nelements = prop_len / sizeof (int);
2306 *data = prop_val;
2307
2308 ddi_release_devi(dip);
2309 return (res);
2310 }
2311 }
2312
2313 /* call the normal property interfaces */
2314 res = ddi_prop_lookup_int_array(dev, dip, flags,
2315 name, data, nelements);
2316
2317 if (dip != NULL)
2318 ddi_release_devi(dip);
2319
2320 return (res);
2321 }
2322
2323 int
ldi_prop_lookup_int64_array(ldi_handle_t lh,uint_t flags,char * name,int64_t ** data,uint_t * nelements)2324 ldi_prop_lookup_int64_array(ldi_handle_t lh,
2325 uint_t flags, char *name, int64_t **data, uint_t *nelements)
2326 {
2327 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2328 dev_info_t *dip;
2329 dev_t dev;
2330 int res;
2331 struct snode *csp;
2332
2333 if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2334 return (DDI_PROP_INVAL_ARG);
2335
2336 dev = handlep->lh_vp->v_rdev;
2337
2338 csp = VTOCS(handlep->lh_vp);
2339 mutex_enter(&csp->s_lock);
2340 if ((dip = csp->s_dip) != NULL)
2341 e_ddi_hold_devi(dip);
2342 mutex_exit(&csp->s_lock);
2343 if (dip == NULL)
2344 dip = e_ddi_hold_devi_by_dev(dev, 0);
2345
2346 if (dip == NULL) {
2347 flags |= DDI_UNBND_DLPI2;
2348 } else if (flags & LDI_DEV_T_ANY) {
2349 flags &= ~LDI_DEV_T_ANY;
2350 dev = DDI_DEV_T_ANY;
2351 }
2352
2353 if (dip != NULL) {
2354 int64_t *prop_val;
2355 int prop_len;
2356
2357 res = i_ldi_prop_op_typed(dev, dip, flags, name,
2358 (caddr_t *)&prop_val, &prop_len, sizeof (int64_t));
2359
2360 /* if we got it then return it */
2361 if (res == DDI_PROP_SUCCESS) {
2362 *nelements = prop_len / sizeof (int64_t);
2363 *data = prop_val;
2364
2365 ddi_release_devi(dip);
2366 return (res);
2367 }
2368 }
2369
2370 /* call the normal property interfaces */
2371 res = ddi_prop_lookup_int64_array(dev, dip, flags,
2372 name, data, nelements);
2373
2374 if (dip != NULL)
2375 ddi_release_devi(dip);
2376
2377 return (res);
2378 }
2379
2380 int
ldi_prop_lookup_string_array(ldi_handle_t lh,uint_t flags,char * name,char *** data,uint_t * nelements)2381 ldi_prop_lookup_string_array(ldi_handle_t lh,
2382 uint_t flags, char *name, char ***data, uint_t *nelements)
2383 {
2384 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2385 dev_info_t *dip;
2386 dev_t dev;
2387 int res;
2388 struct snode *csp;
2389
2390 if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2391 return (DDI_PROP_INVAL_ARG);
2392
2393 dev = handlep->lh_vp->v_rdev;
2394
2395 csp = VTOCS(handlep->lh_vp);
2396 mutex_enter(&csp->s_lock);
2397 if ((dip = csp->s_dip) != NULL)
2398 e_ddi_hold_devi(dip);
2399 mutex_exit(&csp->s_lock);
2400 if (dip == NULL)
2401 dip = e_ddi_hold_devi_by_dev(dev, 0);
2402
2403 if (dip == NULL) {
2404 flags |= DDI_UNBND_DLPI2;
2405 } else if (flags & LDI_DEV_T_ANY) {
2406 flags &= ~LDI_DEV_T_ANY;
2407 dev = DDI_DEV_T_ANY;
2408 }
2409
2410 if (dip != NULL) {
2411 char *prop_val;
2412 int prop_len;
2413
2414 res = i_ldi_prop_op_typed(dev, dip, flags, name,
2415 (caddr_t *)&prop_val, &prop_len, 0);
2416
2417 /* if we got it then return it */
2418 if (res == DDI_PROP_SUCCESS) {
2419 char **str_array;
2420 int nelem;
2421
2422 /*
2423 * pack the returned string array into the format
2424 * our callers expect
2425 */
2426 if (i_pack_string_array(prop_val, prop_len,
2427 &str_array, &nelem) == 0) {
2428
2429 *data = str_array;
2430 *nelements = nelem;
2431
2432 ddi_prop_free(prop_val);
2433 ddi_release_devi(dip);
2434 return (res);
2435 }
2436
2437 /*
2438 * the format of the returned property must have
2439 * been bad so throw it out
2440 */
2441 ddi_prop_free(prop_val);
2442 }
2443 }
2444
2445 /* call the normal property interfaces */
2446 res = ddi_prop_lookup_string_array(dev, dip, flags,
2447 name, data, nelements);
2448
2449 if (dip != NULL)
2450 ddi_release_devi(dip);
2451
2452 return (res);
2453 }
2454
2455 int
ldi_prop_lookup_string(ldi_handle_t lh,uint_t flags,char * name,char ** data)2456 ldi_prop_lookup_string(ldi_handle_t lh,
2457 uint_t flags, char *name, char **data)
2458 {
2459 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2460 dev_info_t *dip;
2461 dev_t dev;
2462 int res;
2463 struct snode *csp;
2464
2465 if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2466 return (DDI_PROP_INVAL_ARG);
2467
2468 dev = handlep->lh_vp->v_rdev;
2469
2470 csp = VTOCS(handlep->lh_vp);
2471 mutex_enter(&csp->s_lock);
2472 if ((dip = csp->s_dip) != NULL)
2473 e_ddi_hold_devi(dip);
2474 mutex_exit(&csp->s_lock);
2475 if (dip == NULL)
2476 dip = e_ddi_hold_devi_by_dev(dev, 0);
2477
2478 if (dip == NULL) {
2479 flags |= DDI_UNBND_DLPI2;
2480 } else if (flags & LDI_DEV_T_ANY) {
2481 flags &= ~LDI_DEV_T_ANY;
2482 dev = DDI_DEV_T_ANY;
2483 }
2484
2485 if (dip != NULL) {
2486 char *prop_val;
2487 int prop_len;
2488
2489 res = i_ldi_prop_op_typed(dev, dip, flags, name,
2490 (caddr_t *)&prop_val, &prop_len, 0);
2491
2492 /* if we got it then return it */
2493 if (res == DDI_PROP_SUCCESS) {
2494 /*
2495 * sanity check the vaule returned.
2496 */
2497 if (i_check_string(prop_val, prop_len)) {
2498 ddi_prop_free(prop_val);
2499 } else {
2500 *data = prop_val;
2501 ddi_release_devi(dip);
2502 return (res);
2503 }
2504 }
2505 }
2506
2507 /* call the normal property interfaces */
2508 res = ddi_prop_lookup_string(dev, dip, flags, name, data);
2509
2510 if (dip != NULL)
2511 ddi_release_devi(dip);
2512
2513 #ifdef DEBUG
2514 if (res == DDI_PROP_SUCCESS) {
2515 /*
2516 * keep ourselves honest
2517 * make sure the framework returns strings in the
2518 * same format as we're demanding from drivers.
2519 */
2520 struct prop_driver_data *pdd;
2521 int pdd_prop_size;
2522
2523 pdd = ((struct prop_driver_data *)(*data)) - 1;
2524 pdd_prop_size = pdd->pdd_size -
2525 sizeof (struct prop_driver_data);
2526 ASSERT(i_check_string(*data, pdd_prop_size) == 0);
2527 }
2528 #endif /* DEBUG */
2529
2530 return (res);
2531 }
2532
2533 int
ldi_prop_lookup_byte_array(ldi_handle_t lh,uint_t flags,char * name,uchar_t ** data,uint_t * nelements)2534 ldi_prop_lookup_byte_array(ldi_handle_t lh,
2535 uint_t flags, char *name, uchar_t **data, uint_t *nelements)
2536 {
2537 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2538 dev_info_t *dip;
2539 dev_t dev;
2540 int res;
2541 struct snode *csp;
2542
2543 if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2544 return (DDI_PROP_INVAL_ARG);
2545
2546 dev = handlep->lh_vp->v_rdev;
2547
2548 csp = VTOCS(handlep->lh_vp);
2549 mutex_enter(&csp->s_lock);
2550 if ((dip = csp->s_dip) != NULL)
2551 e_ddi_hold_devi(dip);
2552 mutex_exit(&csp->s_lock);
2553 if (dip == NULL)
2554 dip = e_ddi_hold_devi_by_dev(dev, 0);
2555
2556 if (dip == NULL) {
2557 flags |= DDI_UNBND_DLPI2;
2558 } else if (flags & LDI_DEV_T_ANY) {
2559 flags &= ~LDI_DEV_T_ANY;
2560 dev = DDI_DEV_T_ANY;
2561 }
2562
2563 if (dip != NULL) {
2564 uchar_t *prop_val;
2565 int prop_len;
2566
2567 res = i_ldi_prop_op_typed(dev, dip, flags, name,
2568 (caddr_t *)&prop_val, &prop_len, sizeof (uchar_t));
2569
2570 /* if we got it then return it */
2571 if (res == DDI_PROP_SUCCESS) {
2572 *nelements = prop_len / sizeof (uchar_t);
2573 *data = prop_val;
2574
2575 ddi_release_devi(dip);
2576 return (res);
2577 }
2578 }
2579
2580 /* call the normal property interfaces */
2581 res = ddi_prop_lookup_byte_array(dev, dip, flags,
2582 name, data, nelements);
2583
2584 if (dip != NULL)
2585 ddi_release_devi(dip);
2586
2587 return (res);
2588 }
2589
2590 int
ldi_prop_get_int(ldi_handle_t lh,uint_t flags,char * name,int defvalue)2591 ldi_prop_get_int(ldi_handle_t lh,
2592 uint_t flags, char *name, int defvalue)
2593 {
2594 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2595 dev_info_t *dip;
2596 dev_t dev;
2597 int res;
2598 struct snode *csp;
2599
2600 if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2601 return (defvalue);
2602
2603 dev = handlep->lh_vp->v_rdev;
2604
2605 csp = VTOCS(handlep->lh_vp);
2606 mutex_enter(&csp->s_lock);
2607 if ((dip = csp->s_dip) != NULL)
2608 e_ddi_hold_devi(dip);
2609 mutex_exit(&csp->s_lock);
2610 if (dip == NULL)
2611 dip = e_ddi_hold_devi_by_dev(dev, 0);
2612
2613 if (dip == NULL) {
2614 flags |= DDI_UNBND_DLPI2;
2615 } else if (flags & LDI_DEV_T_ANY) {
2616 flags &= ~LDI_DEV_T_ANY;
2617 dev = DDI_DEV_T_ANY;
2618 }
2619
2620 if (dip != NULL) {
2621 int prop_val;
2622 int prop_len;
2623
2624 /*
2625 * first call the drivers prop_op interface to allow it
2626 * it to override default property values.
2627 */
2628 prop_len = sizeof (int);
2629 res = i_ldi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF,
2630 flags | DDI_PROP_DYNAMIC, name,
2631 (caddr_t)&prop_val, &prop_len);
2632
2633 /* if we got it then return it */
2634 if ((res == DDI_PROP_SUCCESS) &&
2635 (prop_len == sizeof (int))) {
2636 res = prop_val;
2637 ddi_release_devi(dip);
2638 return (res);
2639 }
2640 }
2641
2642 /* call the normal property interfaces */
2643 res = ddi_prop_get_int(dev, dip, flags, name, defvalue);
2644
2645 if (dip != NULL)
2646 ddi_release_devi(dip);
2647
2648 return (res);
2649 }
2650
2651 int64_t
ldi_prop_get_int64(ldi_handle_t lh,uint_t flags,char * name,int64_t defvalue)2652 ldi_prop_get_int64(ldi_handle_t lh,
2653 uint_t flags, char *name, int64_t defvalue)
2654 {
2655 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2656 dev_info_t *dip;
2657 dev_t dev;
2658 int64_t res;
2659 struct snode *csp;
2660
2661 if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2662 return (defvalue);
2663
2664 dev = handlep->lh_vp->v_rdev;
2665
2666 csp = VTOCS(handlep->lh_vp);
2667 mutex_enter(&csp->s_lock);
2668 if ((dip = csp->s_dip) != NULL)
2669 e_ddi_hold_devi(dip);
2670 mutex_exit(&csp->s_lock);
2671 if (dip == NULL)
2672 dip = e_ddi_hold_devi_by_dev(dev, 0);
2673
2674 if (dip == NULL) {
2675 flags |= DDI_UNBND_DLPI2;
2676 } else if (flags & LDI_DEV_T_ANY) {
2677 flags &= ~LDI_DEV_T_ANY;
2678 dev = DDI_DEV_T_ANY;
2679 }
2680
2681 if (dip != NULL) {
2682 int64_t prop_val;
2683 int prop_len;
2684
2685 /*
2686 * first call the drivers prop_op interface to allow it
2687 * it to override default property values.
2688 */
2689 prop_len = sizeof (int64_t);
2690 res = i_ldi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF,
2691 flags | DDI_PROP_DYNAMIC, name,
2692 (caddr_t)&prop_val, &prop_len);
2693
2694 /* if we got it then return it */
2695 if ((res == DDI_PROP_SUCCESS) &&
2696 (prop_len == sizeof (int64_t))) {
2697 res = prop_val;
2698 ddi_release_devi(dip);
2699 return (res);
2700 }
2701 }
2702
2703 /* call the normal property interfaces */
2704 res = ddi_prop_get_int64(dev, dip, flags, name, defvalue);
2705
2706 if (dip != NULL)
2707 ddi_release_devi(dip);
2708
2709 return (res);
2710 }
2711
2712 int
ldi_prop_exists(ldi_handle_t lh,uint_t flags,char * name)2713 ldi_prop_exists(ldi_handle_t lh, uint_t flags, char *name)
2714 {
2715 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2716 dev_info_t *dip;
2717 dev_t dev;
2718 int res, prop_len;
2719 struct snode *csp;
2720
2721 if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2722 return (0);
2723
2724 dev = handlep->lh_vp->v_rdev;
2725
2726 csp = VTOCS(handlep->lh_vp);
2727 mutex_enter(&csp->s_lock);
2728 if ((dip = csp->s_dip) != NULL)
2729 e_ddi_hold_devi(dip);
2730 mutex_exit(&csp->s_lock);
2731 if (dip == NULL)
2732 dip = e_ddi_hold_devi_by_dev(dev, 0);
2733
2734 /* if NULL dip, prop does NOT exist */
2735 if (dip == NULL)
2736 return (0);
2737
2738 if (flags & LDI_DEV_T_ANY) {
2739 flags &= ~LDI_DEV_T_ANY;
2740 dev = DDI_DEV_T_ANY;
2741 }
2742
2743 /*
2744 * first call the drivers prop_op interface to allow it
2745 * it to override default property values.
2746 */
2747 res = i_ldi_prop_op(dev, dip, PROP_LEN,
2748 flags | DDI_PROP_DYNAMIC, name, NULL, &prop_len);
2749
2750 if (res == DDI_PROP_SUCCESS) {
2751 ddi_release_devi(dip);
2752 return (1);
2753 }
2754
2755 /* call the normal property interfaces */
2756 res = ddi_prop_exists(dev, dip, flags, name);
2757
2758 ddi_release_devi(dip);
2759 return (res);
2760 }
2761
2762 #ifdef LDI_OBSOLETE_EVENT
2763
2764 int
ldi_get_eventcookie(ldi_handle_t lh,char * name,ddi_eventcookie_t * ecp)2765 ldi_get_eventcookie(ldi_handle_t lh, char *name, ddi_eventcookie_t *ecp)
2766 {
2767 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2768 dev_info_t *dip;
2769 dev_t dev;
2770 int res;
2771 struct snode *csp;
2772
2773 if ((lh == NULL) || (name == NULL) ||
2774 (strlen(name) == 0) || (ecp == NULL)) {
2775 return (DDI_FAILURE);
2776 }
2777
2778 ASSERT(!servicing_interrupt());
2779
2780 dev = handlep->lh_vp->v_rdev;
2781
2782 csp = VTOCS(handlep->lh_vp);
2783 mutex_enter(&csp->s_lock);
2784 if ((dip = csp->s_dip) != NULL)
2785 e_ddi_hold_devi(dip);
2786 mutex_exit(&csp->s_lock);
2787 if (dip == NULL)
2788 dip = e_ddi_hold_devi_by_dev(dev, 0);
2789
2790 if (dip == NULL)
2791 return (DDI_FAILURE);
2792
2793 LDI_EVENTCB((CE_NOTE, "%s: event_name=%s, "
2794 "dip=0x%p, event_cookiep=0x%p", "ldi_get_eventcookie",
2795 name, (void *)dip, (void *)ecp));
2796
2797 res = ddi_get_eventcookie(dip, name, ecp);
2798
2799 ddi_release_devi(dip);
2800 return (res);
2801 }
2802
2803 int
ldi_add_event_handler(ldi_handle_t lh,ddi_eventcookie_t ec,void (* handler)(ldi_handle_t,ddi_eventcookie_t,void *,void *),void * arg,ldi_callback_id_t * id)2804 ldi_add_event_handler(ldi_handle_t lh, ddi_eventcookie_t ec,
2805 void (*handler)(ldi_handle_t, ddi_eventcookie_t, void *, void *),
2806 void *arg, ldi_callback_id_t *id)
2807 {
2808 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2809 struct ldi_event *lep;
2810 dev_info_t *dip;
2811 dev_t dev;
2812 int res;
2813 struct snode *csp;
2814
2815 if ((lh == NULL) || (ec == NULL) || (handler == NULL) || (id == NULL))
2816 return (DDI_FAILURE);
2817
2818 ASSERT(!servicing_interrupt());
2819
2820 dev = handlep->lh_vp->v_rdev;
2821
2822 csp = VTOCS(handlep->lh_vp);
2823 mutex_enter(&csp->s_lock);
2824 if ((dip = csp->s_dip) != NULL)
2825 e_ddi_hold_devi(dip);
2826 mutex_exit(&csp->s_lock);
2827 if (dip == NULL)
2828 dip = e_ddi_hold_devi_by_dev(dev, 0);
2829
2830 if (dip == NULL)
2831 return (DDI_FAILURE);
2832
2833 lep = kmem_zalloc(sizeof (struct ldi_event), KM_SLEEP);
2834 lep->le_lhp = handlep;
2835 lep->le_arg = arg;
2836 lep->le_handler = handler;
2837
2838 if ((res = ddi_add_event_handler(dip, ec, i_ldi_callback,
2839 (void *)lep, &lep->le_id)) != DDI_SUCCESS) {
2840 LDI_EVENTCB((CE_WARN, "%s: unable to add"
2841 "event callback", "ldi_add_event_handler"));
2842 ddi_release_devi(dip);
2843 kmem_free(lep, sizeof (struct ldi_event));
2844 return (res);
2845 }
2846
2847 *id = (ldi_callback_id_t)lep;
2848
2849 LDI_EVENTCB((CE_NOTE, "%s: dip=0x%p, event=0x%p, "
2850 "ldi_eventp=0x%p, cb_id=0x%p", "ldi_add_event_handler",
2851 (void *)dip, (void *)ec, (void *)lep, (void *)id));
2852
2853 handle_event_add(lep);
2854 ddi_release_devi(dip);
2855 return (res);
2856 }
2857
2858 int
ldi_remove_event_handler(ldi_handle_t lh,ldi_callback_id_t id)2859 ldi_remove_event_handler(ldi_handle_t lh, ldi_callback_id_t id)
2860 {
2861 ldi_event_t *lep = (ldi_event_t *)id;
2862 int res;
2863
2864 if ((lh == NULL) || (id == NULL))
2865 return (DDI_FAILURE);
2866
2867 ASSERT(!servicing_interrupt());
2868
2869 if ((res = ddi_remove_event_handler(lep->le_id))
2870 != DDI_SUCCESS) {
2871 LDI_EVENTCB((CE_WARN, "%s: unable to remove "
2872 "event callback", "ldi_remove_event_handler"));
2873 return (res);
2874 }
2875
2876 handle_event_remove(lep);
2877 kmem_free(lep, sizeof (struct ldi_event));
2878 return (res);
2879 }
2880
2881 #endif
2882
2883 /*
2884 * Here are some definitions of terms used in the following LDI events
2885 * code:
2886 *
2887 * "LDI events" AKA "native events": These are events defined by the
2888 * "new" LDI event framework. These events are serviced by the LDI event
2889 * framework itself and thus are native to it.
2890 *
2891 * "LDI contract events": These are contract events that correspond to the
2892 * LDI events. This mapping of LDI events to contract events is defined by
2893 * the ldi_ev_cookies[] array above.
2894 *
2895 * NDI events: These are events which are serviced by the NDI event subsystem.
2896 * LDI subsystem just provides a thin wrapper around the NDI event interfaces
2897 * These events are therefore *not* native events.
2898 */
2899
2900 static int
ldi_native_event(const char * evname)2901 ldi_native_event(const char *evname)
2902 {
2903 int i;
2904
2905 LDI_EVTRC((CE_NOTE, "ldi_native_event: entered: ev=%s", evname));
2906
2907 for (i = 0; ldi_ev_cookies[i].ck_evname != NULL; i++) {
2908 if (strcmp(ldi_ev_cookies[i].ck_evname, evname) == 0)
2909 return (1);
2910 }
2911
2912 return (0);
2913 }
2914
2915 static uint_t
ldi_ev_sync_event(const char * evname)2916 ldi_ev_sync_event(const char *evname)
2917 {
2918 int i;
2919
2920 ASSERT(ldi_native_event(evname));
2921
2922 LDI_EVTRC((CE_NOTE, "ldi_ev_sync_event: entered: %s", evname));
2923
2924 for (i = 0; ldi_ev_cookies[i].ck_evname != NULL; i++) {
2925 if (strcmp(ldi_ev_cookies[i].ck_evname, evname) == 0)
2926 return (ldi_ev_cookies[i].ck_sync);
2927 }
2928
2929 /*
2930 * This should never happen until non-contract based
2931 * LDI events are introduced. If that happens, we will
2932 * use a "special" token to indicate that there are no
2933 * contracts corresponding to this LDI event.
2934 */
2935 cmn_err(CE_PANIC, "Unknown LDI event: %s", evname);
2936
2937 return (0);
2938 }
2939
2940 static uint_t
ldi_contract_event(const char * evname)2941 ldi_contract_event(const char *evname)
2942 {
2943 int i;
2944
2945 ASSERT(ldi_native_event(evname));
2946
2947 LDI_EVTRC((CE_NOTE, "ldi_contract_event: entered: %s", evname));
2948
2949 for (i = 0; ldi_ev_cookies[i].ck_evname != NULL; i++) {
2950 if (strcmp(ldi_ev_cookies[i].ck_evname, evname) == 0)
2951 return (ldi_ev_cookies[i].ck_ctype);
2952 }
2953
2954 /*
2955 * This should never happen until non-contract based
2956 * LDI events are introduced. If that happens, we will
2957 * use a "special" token to indicate that there are no
2958 * contracts corresponding to this LDI event.
2959 */
2960 cmn_err(CE_PANIC, "Unknown LDI event: %s", evname);
2961
2962 return (0);
2963 }
2964
2965 char *
ldi_ev_get_type(ldi_ev_cookie_t cookie)2966 ldi_ev_get_type(ldi_ev_cookie_t cookie)
2967 {
2968 int i;
2969 struct ldi_ev_cookie *cookie_impl = (struct ldi_ev_cookie *)cookie;
2970
2971 for (i = 0; ldi_ev_cookies[i].ck_evname != NULL; i++) {
2972 if (&ldi_ev_cookies[i] == cookie_impl) {
2973 LDI_EVTRC((CE_NOTE, "ldi_ev_get_type: LDI: %s",
2974 ldi_ev_cookies[i].ck_evname));
2975 return (ldi_ev_cookies[i].ck_evname);
2976 }
2977 }
2978
2979 /*
2980 * Not an LDI native event. Must be NDI event service.
2981 * Just return a generic string
2982 */
2983 LDI_EVTRC((CE_NOTE, "ldi_ev_get_type: is NDI"));
2984 return (NDI_EVENT_SERVICE);
2985 }
2986
2987 static int
ldi_native_cookie(ldi_ev_cookie_t cookie)2988 ldi_native_cookie(ldi_ev_cookie_t cookie)
2989 {
2990 int i;
2991 struct ldi_ev_cookie *cookie_impl = (struct ldi_ev_cookie *)cookie;
2992
2993 for (i = 0; ldi_ev_cookies[i].ck_evname != NULL; i++) {
2994 if (&ldi_ev_cookies[i] == cookie_impl) {
2995 LDI_EVTRC((CE_NOTE, "ldi_native_cookie: native LDI"));
2996 return (1);
2997 }
2998 }
2999
3000 LDI_EVTRC((CE_NOTE, "ldi_native_cookie: is NDI"));
3001 return (0);
3002 }
3003
3004 static ldi_ev_cookie_t
ldi_get_native_cookie(const char * evname)3005 ldi_get_native_cookie(const char *evname)
3006 {
3007 int i;
3008
3009 for (i = 0; ldi_ev_cookies[i].ck_evname != NULL; i++) {
3010 if (strcmp(ldi_ev_cookies[i].ck_evname, evname) == 0) {
3011 LDI_EVTRC((CE_NOTE, "ldi_get_native_cookie: found"));
3012 return ((ldi_ev_cookie_t)&ldi_ev_cookies[i]);
3013 }
3014 }
3015
3016 LDI_EVTRC((CE_NOTE, "ldi_get_native_cookie: NOT found"));
3017 return (NULL);
3018 }
3019
3020 /*
3021 * ldi_ev_lock() needs to be recursive, since layered drivers may call
3022 * other LDI interfaces (such as ldi_close() from within the context of
3023 * a notify callback. Since the notify callback is called with the
3024 * ldi_ev_lock() held and ldi_close() also grabs ldi_ev_lock, the lock needs
3025 * to be recursive.
3026 */
3027 static void
ldi_ev_lock(void)3028 ldi_ev_lock(void)
3029 {
3030 LDI_EVTRC((CE_NOTE, "ldi_ev_lock: entered"));
3031
3032 mutex_enter(&ldi_ev_callback_list.le_lock);
3033 if (ldi_ev_callback_list.le_thread == curthread) {
3034 ASSERT(ldi_ev_callback_list.le_busy >= 1);
3035 ldi_ev_callback_list.le_busy++;
3036 } else {
3037 while (ldi_ev_callback_list.le_busy)
3038 cv_wait(&ldi_ev_callback_list.le_cv,
3039 &ldi_ev_callback_list.le_lock);
3040 ASSERT(ldi_ev_callback_list.le_thread == NULL);
3041 ldi_ev_callback_list.le_busy = 1;
3042 ldi_ev_callback_list.le_thread = curthread;
3043 }
3044 mutex_exit(&ldi_ev_callback_list.le_lock);
3045
3046 LDI_EVTRC((CE_NOTE, "ldi_ev_lock: exit"));
3047 }
3048
3049 static void
ldi_ev_unlock(void)3050 ldi_ev_unlock(void)
3051 {
3052 LDI_EVTRC((CE_NOTE, "ldi_ev_unlock: entered"));
3053 mutex_enter(&ldi_ev_callback_list.le_lock);
3054 ASSERT(ldi_ev_callback_list.le_thread == curthread);
3055 ASSERT(ldi_ev_callback_list.le_busy >= 1);
3056
3057 ldi_ev_callback_list.le_busy--;
3058 if (ldi_ev_callback_list.le_busy == 0) {
3059 ldi_ev_callback_list.le_thread = NULL;
3060 cv_signal(&ldi_ev_callback_list.le_cv);
3061 }
3062 mutex_exit(&ldi_ev_callback_list.le_lock);
3063 LDI_EVTRC((CE_NOTE, "ldi_ev_unlock: exit"));
3064 }
3065
3066 int
ldi_ev_get_cookie(ldi_handle_t lh,char * evname,ldi_ev_cookie_t * cookiep)3067 ldi_ev_get_cookie(ldi_handle_t lh, char *evname, ldi_ev_cookie_t *cookiep)
3068 {
3069 struct ldi_handle *handlep = (struct ldi_handle *)lh;
3070 dev_info_t *dip;
3071 dev_t dev;
3072 int res;
3073 struct snode *csp;
3074 ddi_eventcookie_t ddi_cookie;
3075 ldi_ev_cookie_t tcookie;
3076
3077 LDI_EVDBG((CE_NOTE, "ldi_ev_get_cookie: entered: evname=%s",
3078 evname ? evname : "<NULL>"));
3079
3080 if (lh == NULL || evname == NULL ||
3081 strlen(evname) == 0 || cookiep == NULL) {
3082 LDI_EVDBG((CE_NOTE, "ldi_ev_get_cookie: invalid args"));
3083 return (LDI_EV_FAILURE);
3084 }
3085
3086 *cookiep = NULL;
3087
3088 /*
3089 * First check if it is a LDI native event
3090 */
3091 tcookie = ldi_get_native_cookie(evname);
3092 if (tcookie) {
3093 LDI_EVDBG((CE_NOTE, "ldi_ev_get_cookie: got native cookie"));
3094 *cookiep = tcookie;
3095 return (LDI_EV_SUCCESS);
3096 }
3097
3098 /*
3099 * Not a LDI native event. Try NDI event services
3100 */
3101
3102 dev = handlep->lh_vp->v_rdev;
3103
3104 csp = VTOCS(handlep->lh_vp);
3105 mutex_enter(&csp->s_lock);
3106 if ((dip = csp->s_dip) != NULL)
3107 e_ddi_hold_devi(dip);
3108 mutex_exit(&csp->s_lock);
3109 if (dip == NULL)
3110 dip = e_ddi_hold_devi_by_dev(dev, 0);
3111
3112 if (dip == NULL) {
3113 cmn_err(CE_WARN, "ldi_ev_get_cookie: No devinfo node for LDI "
3114 "handle: %p", (void *)handlep);
3115 return (LDI_EV_FAILURE);
3116 }
3117
3118 LDI_EVDBG((CE_NOTE, "Calling ddi_get_eventcookie: dip=%p, ev=%s",
3119 (void *)dip, evname));
3120
3121 res = ddi_get_eventcookie(dip, evname, &ddi_cookie);
3122
3123 ddi_release_devi(dip);
3124
3125 if (res == DDI_SUCCESS) {
3126 LDI_EVDBG((CE_NOTE, "ldi_ev_get_cookie: NDI cookie found"));
3127 *cookiep = (ldi_ev_cookie_t)ddi_cookie;
3128 return (LDI_EV_SUCCESS);
3129 } else {
3130 LDI_EVDBG((CE_WARN, "ldi_ev_get_cookie: NDI cookie: failed"));
3131 return (LDI_EV_FAILURE);
3132 }
3133 }
3134
3135 /*ARGSUSED*/
3136 static void
i_ldi_ev_callback(dev_info_t * dip,ddi_eventcookie_t event_cookie,void * arg,void * ev_data)3137 i_ldi_ev_callback(dev_info_t *dip, ddi_eventcookie_t event_cookie,
3138 void *arg, void *ev_data)
3139 {
3140 ldi_ev_callback_impl_t *lecp = (ldi_ev_callback_impl_t *)arg;
3141
3142 ASSERT(lecp != NULL);
3143 ASSERT(!ldi_native_cookie(lecp->lec_cookie));
3144 ASSERT(lecp->lec_lhp);
3145 ASSERT(lecp->lec_notify == NULL);
3146 ASSERT(lecp->lec_finalize);
3147
3148 LDI_EVDBG((CE_NOTE, "i_ldi_ev_callback: ldh=%p, cookie=%p, arg=%p, "
3149 "ev_data=%p", (void *)lecp->lec_lhp, (void *)event_cookie,
3150 (void *)lecp->lec_arg, (void *)ev_data));
3151
3152 lecp->lec_finalize(lecp->lec_lhp, (ldi_ev_cookie_t)event_cookie,
3153 lecp->lec_arg, ev_data);
3154 }
3155
3156 int
ldi_ev_register_callbacks(ldi_handle_t lh,ldi_ev_cookie_t cookie,ldi_ev_callback_t * callb,void * arg,ldi_callback_id_t * id)3157 ldi_ev_register_callbacks(ldi_handle_t lh, ldi_ev_cookie_t cookie,
3158 ldi_ev_callback_t *callb, void *arg, ldi_callback_id_t *id)
3159 {
3160 struct ldi_handle *lhp = (struct ldi_handle *)lh;
3161 ldi_ev_callback_impl_t *lecp;
3162 dev_t dev;
3163 struct snode *csp;
3164 dev_info_t *dip;
3165 int ddi_event;
3166
3167 ASSERT(!servicing_interrupt());
3168
3169 if (lh == NULL || cookie == NULL || callb == NULL || id == NULL) {
3170 LDI_EVDBG((CE_NOTE, "ldi_ev_register_callbacks: Invalid args"));
3171 return (LDI_EV_FAILURE);
3172 }
3173
3174 if (callb->cb_vers != LDI_EV_CB_VERS) {
3175 LDI_EVDBG((CE_NOTE, "ldi_ev_register_callbacks: Invalid vers"));
3176 return (LDI_EV_FAILURE);
3177 }
3178
3179 if (callb->cb_notify == NULL && callb->cb_finalize == NULL) {
3180 LDI_EVDBG((CE_NOTE, "ldi_ev_register_callbacks: NULL callb"));
3181 return (LDI_EV_FAILURE);
3182 }
3183
3184 *id = 0;
3185
3186 dev = lhp->lh_vp->v_rdev;
3187 csp = VTOCS(lhp->lh_vp);
3188 mutex_enter(&csp->s_lock);
3189 if ((dip = csp->s_dip) != NULL)
3190 e_ddi_hold_devi(dip);
3191 mutex_exit(&csp->s_lock);
3192 if (dip == NULL)
3193 dip = e_ddi_hold_devi_by_dev(dev, 0);
3194
3195 if (dip == NULL) {
3196 cmn_err(CE_WARN, "ldi_ev_register: No devinfo node for "
3197 "LDI handle: %p", (void *)lhp);
3198 return (LDI_EV_FAILURE);
3199 }
3200
3201 lecp = kmem_zalloc(sizeof (ldi_ev_callback_impl_t), KM_SLEEP);
3202
3203 ddi_event = 0;
3204 if (!ldi_native_cookie(cookie)) {
3205 if (callb->cb_notify || callb->cb_finalize == NULL) {
3206 /*
3207 * NDI event services only accept finalize
3208 */
3209 cmn_err(CE_WARN, "%s: module: %s: NDI event cookie. "
3210 "Only finalize"
3211 " callback supported with this cookie",
3212 "ldi_ev_register_callbacks",
3213 lhp->lh_ident->li_modname);
3214 kmem_free(lecp, sizeof (ldi_ev_callback_impl_t));
3215 ddi_release_devi(dip);
3216 return (LDI_EV_FAILURE);
3217 }
3218
3219 if (ddi_add_event_handler(dip, (ddi_eventcookie_t)cookie,
3220 i_ldi_ev_callback, (void *)lecp,
3221 (ddi_callback_id_t *)&lecp->lec_id)
3222 != DDI_SUCCESS) {
3223 kmem_free(lecp, sizeof (ldi_ev_callback_impl_t));
3224 ddi_release_devi(dip);
3225 LDI_EVDBG((CE_NOTE, "ldi_ev_register_callbacks(): "
3226 "ddi_add_event_handler failed"));
3227 return (LDI_EV_FAILURE);
3228 }
3229 ddi_event = 1;
3230 LDI_EVDBG((CE_NOTE, "ldi_ev_register_callbacks(): "
3231 "ddi_add_event_handler success"));
3232 }
3233
3234
3235
3236 ldi_ev_lock();
3237
3238 /*
3239 * Add the notify/finalize callback to the LDI's list of callbacks.
3240 */
3241 lecp->lec_lhp = lhp;
3242 lecp->lec_dev = lhp->lh_vp->v_rdev;
3243 lecp->lec_spec = VTYP_TO_STYP(lhp->lh_vp->v_type);
3244 lecp->lec_notify = callb->cb_notify;
3245 lecp->lec_finalize = callb->cb_finalize;
3246 lecp->lec_arg = arg;
3247 lecp->lec_cookie = cookie;
3248 if (!ddi_event)
3249 lecp->lec_id = (void *)(uintptr_t)(++ldi_ev_id_pool);
3250 else
3251 ASSERT(lecp->lec_id);
3252 lecp->lec_dip = dip;
3253 list_insert_tail(&ldi_ev_callback_list.le_head, lecp);
3254
3255 *id = (ldi_callback_id_t)lecp->lec_id;
3256
3257 ldi_ev_unlock();
3258
3259 ddi_release_devi(dip);
3260
3261 LDI_EVDBG((CE_NOTE, "ldi_ev_register_callbacks: registered "
3262 "notify/finalize"));
3263
3264 return (LDI_EV_SUCCESS);
3265 }
3266
3267 static int
ldi_ev_device_match(ldi_ev_callback_impl_t * lecp,dev_info_t * dip,dev_t dev,int spec_type)3268 ldi_ev_device_match(ldi_ev_callback_impl_t *lecp, dev_info_t *dip,
3269 dev_t dev, int spec_type)
3270 {
3271 ASSERT(lecp);
3272 ASSERT(dip);
3273 ASSERT(dev != DDI_DEV_T_NONE);
3274 ASSERT(dev != NODEV);
3275 ASSERT((dev == DDI_DEV_T_ANY && spec_type == 0) ||
3276 (spec_type == S_IFCHR || spec_type == S_IFBLK));
3277 ASSERT(lecp->lec_dip);
3278 ASSERT(lecp->lec_spec == S_IFCHR || lecp->lec_spec == S_IFBLK);
3279 ASSERT(lecp->lec_dev != DDI_DEV_T_ANY);
3280 ASSERT(lecp->lec_dev != DDI_DEV_T_NONE);
3281 ASSERT(lecp->lec_dev != NODEV);
3282
3283 if (dip != lecp->lec_dip)
3284 return (0);
3285
3286 if (dev != DDI_DEV_T_ANY) {
3287 if (dev != lecp->lec_dev || spec_type != lecp->lec_spec)
3288 return (0);
3289 }
3290
3291 LDI_EVTRC((CE_NOTE, "ldi_ev_device_match: MATCH dip=%p", (void *)dip));
3292
3293 return (1);
3294 }
3295
3296 /*
3297 * LDI framework function to post a "notify" event to all layered drivers
3298 * that have registered for that event
3299 *
3300 * Returns:
3301 * LDI_EV_SUCCESS - registered callbacks allow event
3302 * LDI_EV_FAILURE - registered callbacks block event
3303 * LDI_EV_NONE - No matching LDI callbacks
3304 *
3305 * This function is *not* to be called by layered drivers. It is for I/O
3306 * framework code in Solaris, such as the I/O retire code and DR code
3307 * to call while servicing a device event such as offline or degraded.
3308 */
3309 int
ldi_invoke_notify(dev_info_t * dip,dev_t dev,int spec_type,char * event,void * ev_data)3310 ldi_invoke_notify(dev_info_t *dip, dev_t dev, int spec_type, char *event,
3311 void *ev_data)
3312 {
3313 ldi_ev_callback_impl_t *lecp;
3314 list_t *listp;
3315 int ret;
3316 char *lec_event;
3317
3318 ASSERT(dip);
3319 ASSERT(dev != DDI_DEV_T_NONE);
3320 ASSERT(dev != NODEV);
3321 ASSERT((dev == DDI_DEV_T_ANY && spec_type == 0) ||
3322 (spec_type == S_IFCHR || spec_type == S_IFBLK));
3323 ASSERT(event);
3324 ASSERT(ldi_native_event(event));
3325 ASSERT(ldi_ev_sync_event(event));
3326
3327 LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): entered: dip=%p, ev=%s",
3328 (void *)dip, event));
3329
3330 ret = LDI_EV_NONE;
3331 ldi_ev_lock();
3332 listp = &ldi_ev_callback_list.le_head;
3333 for (lecp = list_head(listp); lecp; lecp = list_next(listp, lecp)) {
3334
3335 /* Check if matching device */
3336 if (!ldi_ev_device_match(lecp, dip, dev, spec_type))
3337 continue;
3338
3339 if (lecp->lec_lhp == NULL) {
3340 /*
3341 * Consumer has unregistered the handle and so
3342 * is no longer interested in notify events.
3343 */
3344 LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): No LDI "
3345 "handle, skipping"));
3346 continue;
3347 }
3348
3349 if (lecp->lec_notify == NULL) {
3350 LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): No notify "
3351 "callback. skipping"));
3352 continue; /* not interested in notify */
3353 }
3354
3355 /*
3356 * Check if matching event
3357 */
3358 lec_event = ldi_ev_get_type(lecp->lec_cookie);
3359 if (strcmp(event, lec_event) != 0) {
3360 LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): Not matching"
3361 " event {%s,%s}. skipping", event, lec_event));
3362 continue;
3363 }
3364
3365 lecp->lec_lhp->lh_flags |= LH_FLAGS_NOTIFY;
3366 if (lecp->lec_notify(lecp->lec_lhp, lecp->lec_cookie,
3367 lecp->lec_arg, ev_data) != LDI_EV_SUCCESS) {
3368 ret = LDI_EV_FAILURE;
3369 LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): notify"
3370 " FAILURE"));
3371 break;
3372 }
3373
3374 /* We have a matching callback that allows the event to occur */
3375 ret = LDI_EV_SUCCESS;
3376
3377 LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): 1 consumer success"));
3378 }
3379
3380 if (ret != LDI_EV_FAILURE)
3381 goto out;
3382
3383 LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): undoing notify"));
3384
3385 /*
3386 * Undo notifies already sent
3387 */
3388 lecp = list_prev(listp, lecp);
3389 for (; lecp; lecp = list_prev(listp, lecp)) {
3390
3391 /*
3392 * Check if matching device
3393 */
3394 if (!ldi_ev_device_match(lecp, dip, dev, spec_type))
3395 continue;
3396
3397
3398 if (lecp->lec_finalize == NULL) {
3399 LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): no finalize, "
3400 "skipping"));
3401 continue; /* not interested in finalize */
3402 }
3403
3404 /*
3405 * it is possible that in response to a notify event a
3406 * layered driver closed its LDI handle so it is ok
3407 * to have a NULL LDI handle for finalize. The layered
3408 * driver is expected to maintain state in its "arg"
3409 * parameter to keep track of the closed device.
3410 */
3411
3412 /* Check if matching event */
3413 lec_event = ldi_ev_get_type(lecp->lec_cookie);
3414 if (strcmp(event, lec_event) != 0) {
3415 LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): not matching "
3416 "event: %s,%s, skipping", event, lec_event));
3417 continue;
3418 }
3419
3420 LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): calling finalize"));
3421
3422 lecp->lec_finalize(lecp->lec_lhp, lecp->lec_cookie,
3423 LDI_EV_FAILURE, lecp->lec_arg, ev_data);
3424
3425 /*
3426 * If LDI native event and LDI handle closed in context
3427 * of notify, NULL out the finalize callback as we have
3428 * already called the 1 finalize above allowed in this situation
3429 */
3430 if (lecp->lec_lhp == NULL &&
3431 ldi_native_cookie(lecp->lec_cookie)) {
3432 LDI_EVDBG((CE_NOTE,
3433 "ldi_invoke_notify(): NULL-ing finalize after "
3434 "calling 1 finalize following ldi_close"));
3435 lecp->lec_finalize = NULL;
3436 }
3437 }
3438
3439 out:
3440 ldi_ev_unlock();
3441
3442 if (ret == LDI_EV_NONE) {
3443 LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): no matching "
3444 "LDI callbacks"));
3445 }
3446
3447 return (ret);
3448 }
3449
3450 /*
3451 * Framework function to be called from a layered driver to propagate
3452 * LDI "notify" events to exported minors.
3453 *
3454 * This function is a public interface exported by the LDI framework
3455 * for use by layered drivers to propagate device events up the software
3456 * stack.
3457 */
3458 int
ldi_ev_notify(dev_info_t * dip,minor_t minor,int spec_type,ldi_ev_cookie_t cookie,void * ev_data)3459 ldi_ev_notify(dev_info_t *dip, minor_t minor, int spec_type,
3460 ldi_ev_cookie_t cookie, void *ev_data)
3461 {
3462 char *evname = ldi_ev_get_type(cookie);
3463 uint_t ct_evtype;
3464 dev_t dev;
3465 major_t major;
3466 int retc;
3467 int retl;
3468
3469 ASSERT(spec_type == S_IFBLK || spec_type == S_IFCHR);
3470 ASSERT(dip);
3471 ASSERT(ldi_native_cookie(cookie));
3472
3473 LDI_EVDBG((CE_NOTE, "ldi_ev_notify(): entered: event=%s, dip=%p",
3474 evname, (void *)dip));
3475
3476 if (!ldi_ev_sync_event(evname)) {
3477 cmn_err(CE_PANIC, "ldi_ev_notify(): %s not a "
3478 "negotiatable event", evname);
3479 return (LDI_EV_SUCCESS);
3480 }
3481
3482 major = ddi_driver_major(dip);
3483 if (major == DDI_MAJOR_T_NONE) {
3484 char *path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
3485 (void) ddi_pathname(dip, path);
3486 cmn_err(CE_WARN, "ldi_ev_notify: cannot derive major number "
3487 "for device %s", path);
3488 kmem_free(path, MAXPATHLEN);
3489 return (LDI_EV_FAILURE);
3490 }
3491 dev = makedevice(major, minor);
3492
3493 /*
3494 * Generate negotiation contract events on contracts (if any) associated
3495 * with this minor.
3496 */
3497 LDI_EVDBG((CE_NOTE, "ldi_ev_notify(): calling contract nego."));
3498 ct_evtype = ldi_contract_event(evname);
3499 retc = contract_device_negotiate(dip, dev, spec_type, ct_evtype);
3500 if (retc == CT_NACK) {
3501 LDI_EVDBG((CE_NOTE, "ldi_ev_notify(): contract neg. NACK"));
3502 return (LDI_EV_FAILURE);
3503 }
3504
3505 LDI_EVDBG((CE_NOTE, "ldi_ev_notify(): LDI invoke notify"));
3506 retl = ldi_invoke_notify(dip, dev, spec_type, evname, ev_data);
3507 if (retl == LDI_EV_FAILURE) {
3508 LDI_EVDBG((CE_NOTE, "ldi_ev_notify(): ldi_invoke_notify "
3509 "returned FAILURE. Calling contract negend"));
3510 contract_device_negend(dip, dev, spec_type, CT_EV_FAILURE);
3511 return (LDI_EV_FAILURE);
3512 }
3513
3514 /*
3515 * The very fact that we are here indicates that there is a
3516 * LDI callback (and hence a constraint) for the retire of the
3517 * HW device. So we just return success even if there are no
3518 * contracts or LDI callbacks against the minors layered on top
3519 * of the HW minors
3520 */
3521 LDI_EVDBG((CE_NOTE, "ldi_ev_notify(): returning SUCCESS"));
3522 return (LDI_EV_SUCCESS);
3523 }
3524
3525 /*
3526 * LDI framework function to invoke "finalize" callbacks for all layered
3527 * drivers that have registered callbacks for that event.
3528 *
3529 * This function is *not* to be called by layered drivers. It is for I/O
3530 * framework code in Solaris, such as the I/O retire code and DR code
3531 * to call while servicing a device event such as offline or degraded.
3532 */
3533 void
ldi_invoke_finalize(dev_info_t * dip,dev_t dev,int spec_type,char * event,int ldi_result,void * ev_data)3534 ldi_invoke_finalize(dev_info_t *dip, dev_t dev, int spec_type, char *event,
3535 int ldi_result, void *ev_data)
3536 {
3537 ldi_ev_callback_impl_t *lecp;
3538 list_t *listp;
3539 char *lec_event;
3540 int found = 0;
3541
3542 ASSERT(dip);
3543 ASSERT(dev != DDI_DEV_T_NONE);
3544 ASSERT(dev != NODEV);
3545 ASSERT((dev == DDI_DEV_T_ANY && spec_type == 0) ||
3546 (spec_type == S_IFCHR || spec_type == S_IFBLK));
3547 ASSERT(event);
3548 ASSERT(ldi_native_event(event));
3549 ASSERT(ldi_result == LDI_EV_SUCCESS || ldi_result == LDI_EV_FAILURE);
3550
3551 LDI_EVDBG((CE_NOTE, "ldi_invoke_finalize(): entered: dip=%p, result=%d"
3552 " event=%s", (void *)dip, ldi_result, event));
3553
3554 ldi_ev_lock();
3555 listp = &ldi_ev_callback_list.le_head;
3556 for (lecp = list_head(listp); lecp; lecp = list_next(listp, lecp)) {
3557
3558 if (lecp->lec_finalize == NULL) {
3559 LDI_EVDBG((CE_NOTE, "ldi_invoke_finalize(): No "
3560 "finalize. Skipping"));
3561 continue; /* Not interested in finalize */
3562 }
3563
3564 /*
3565 * Check if matching device
3566 */
3567 if (!ldi_ev_device_match(lecp, dip, dev, spec_type))
3568 continue;
3569
3570 /*
3571 * It is valid for the LDI handle to be NULL during finalize.
3572 * The layered driver may have done an LDI close in the notify
3573 * callback.
3574 */
3575
3576 /*
3577 * Check if matching event
3578 */
3579 lec_event = ldi_ev_get_type(lecp->lec_cookie);
3580 if (strcmp(event, lec_event) != 0) {
3581 LDI_EVDBG((CE_NOTE, "ldi_invoke_finalize(): Not "
3582 "matching event {%s,%s}. Skipping",
3583 event, lec_event));
3584 continue;
3585 }
3586
3587 LDI_EVDBG((CE_NOTE, "ldi_invoke_finalize(): calling finalize"));
3588
3589 found = 1;
3590
3591 lecp->lec_finalize(lecp->lec_lhp, lecp->lec_cookie,
3592 ldi_result, lecp->lec_arg, ev_data);
3593
3594 /*
3595 * If LDI native event and LDI handle closed in context
3596 * of notify, NULL out the finalize callback as we have
3597 * already called the 1 finalize above allowed in this situation
3598 */
3599 if (lecp->lec_lhp == NULL &&
3600 ldi_native_cookie(lecp->lec_cookie)) {
3601 LDI_EVDBG((CE_NOTE,
3602 "ldi_invoke_finalize(): NULLing finalize after "
3603 "calling 1 finalize following ldi_close"));
3604 lecp->lec_finalize = NULL;
3605 }
3606 }
3607 ldi_ev_unlock();
3608
3609 if (found)
3610 return;
3611
3612 LDI_EVDBG((CE_NOTE, "ldi_invoke_finalize(): no matching callbacks"));
3613 }
3614
3615 /*
3616 * Framework function to be called from a layered driver to propagate
3617 * LDI "finalize" events to exported minors.
3618 *
3619 * This function is a public interface exported by the LDI framework
3620 * for use by layered drivers to propagate device events up the software
3621 * stack.
3622 */
3623 void
ldi_ev_finalize(dev_info_t * dip,minor_t minor,int spec_type,int ldi_result,ldi_ev_cookie_t cookie,void * ev_data)3624 ldi_ev_finalize(dev_info_t *dip, minor_t minor, int spec_type, int ldi_result,
3625 ldi_ev_cookie_t cookie, void *ev_data)
3626 {
3627 dev_t dev;
3628 major_t major;
3629 char *evname;
3630 int ct_result = (ldi_result == LDI_EV_SUCCESS) ?
3631 CT_EV_SUCCESS : CT_EV_FAILURE;
3632 uint_t ct_evtype;
3633
3634 ASSERT(dip);
3635 ASSERT(spec_type == S_IFBLK || spec_type == S_IFCHR);
3636 ASSERT(ldi_result == LDI_EV_SUCCESS || ldi_result == LDI_EV_FAILURE);
3637 ASSERT(ldi_native_cookie(cookie));
3638
3639 LDI_EVDBG((CE_NOTE, "ldi_ev_finalize: entered: dip=%p", (void *)dip));
3640
3641 major = ddi_driver_major(dip);
3642 if (major == DDI_MAJOR_T_NONE) {
3643 char *path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
3644 (void) ddi_pathname(dip, path);
3645 cmn_err(CE_WARN, "ldi_ev_finalize: cannot derive major number "
3646 "for device %s", path);
3647 kmem_free(path, MAXPATHLEN);
3648 return;
3649 }
3650 dev = makedevice(major, minor);
3651
3652 evname = ldi_ev_get_type(cookie);
3653
3654 LDI_EVDBG((CE_NOTE, "ldi_ev_finalize: calling contracts"));
3655 ct_evtype = ldi_contract_event(evname);
3656 contract_device_finalize(dip, dev, spec_type, ct_evtype, ct_result);
3657
3658 LDI_EVDBG((CE_NOTE, "ldi_ev_finalize: calling ldi_invoke_finalize"));
3659 ldi_invoke_finalize(dip, dev, spec_type, evname, ldi_result, ev_data);
3660 }
3661
3662 int
ldi_ev_remove_callbacks(ldi_callback_id_t id)3663 ldi_ev_remove_callbacks(ldi_callback_id_t id)
3664 {
3665 ldi_ev_callback_impl_t *lecp;
3666 ldi_ev_callback_impl_t *next;
3667 ldi_ev_callback_impl_t *found;
3668 list_t *listp;
3669
3670 ASSERT(!servicing_interrupt());
3671
3672 if (id == 0) {
3673 cmn_err(CE_WARN, "ldi_ev_remove_callbacks: Invalid ID 0");
3674 return (LDI_EV_FAILURE);
3675 }
3676
3677 LDI_EVDBG((CE_NOTE, "ldi_ev_remove_callbacks: entered: id=%p",
3678 (void *)id));
3679
3680 ldi_ev_lock();
3681
3682 listp = &ldi_ev_callback_list.le_head;
3683 next = found = NULL;
3684 for (lecp = list_head(listp); lecp; lecp = next) {
3685 next = list_next(listp, lecp);
3686 if (lecp->lec_id == id) {
3687 ASSERT(found == NULL);
3688 list_remove(listp, lecp);
3689 found = lecp;
3690 }
3691 }
3692 ldi_ev_unlock();
3693
3694 if (found == NULL) {
3695 cmn_err(CE_WARN, "No LDI event handler for id (%p)",
3696 (void *)id);
3697 return (LDI_EV_SUCCESS);
3698 }
3699
3700 if (!ldi_native_cookie(found->lec_cookie)) {
3701 ASSERT(found->lec_notify == NULL);
3702 if (ddi_remove_event_handler((ddi_callback_id_t)id)
3703 != DDI_SUCCESS) {
3704 cmn_err(CE_WARN, "failed to remove NDI event handler "
3705 "for id (%p)", (void *)id);
3706 ldi_ev_lock();
3707 list_insert_tail(listp, found);
3708 ldi_ev_unlock();
3709 return (LDI_EV_FAILURE);
3710 }
3711 LDI_EVDBG((CE_NOTE, "ldi_ev_remove_callbacks: NDI event "
3712 "service removal succeeded"));
3713 } else {
3714 LDI_EVDBG((CE_NOTE, "ldi_ev_remove_callbacks: removed "
3715 "LDI native callbacks"));
3716 }
3717 kmem_free(found, sizeof (ldi_ev_callback_impl_t));
3718
3719 return (LDI_EV_SUCCESS);
3720 }
3721