1 /* 2 * Copyright 1998 Massachusetts Institute of Technology 3 * 4 * Permission to use, copy, modify, and distribute this software and 5 * its documentation for any purpose and without fee is hereby 6 * granted, provided that both the above copyright notice and this 7 * permission notice appear in all copies, that both the above 8 * copyright notice and this permission notice appear in all 9 * supporting documentation, and that the name of M.I.T. not be used 10 * in advertising or publicity pertaining to distribution of the 11 * software without specific, written prior permission. M.I.T. makes 12 * no representations about the suitability of this software for any 13 * purpose. It is provided "as is" without express or implied 14 * warranty. 15 * 16 * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS 17 * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, 18 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT 20 * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF 23 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 24 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 25 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 26 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * $FreeBSD: src/sys/kern/subr_rman.c,v 1.10.2.1 2001/06/05 08:06:08 imp Exp $ 30 * $DragonFly: src/sys/kern/subr_rman.c,v 1.15 2008/09/30 12:20:29 hasso Exp $ 31 */ 32 33 /* 34 * The kernel resource manager. This code is responsible for keeping track 35 * of hardware resources which are apportioned out to various drivers. 36 * It does not actually assign those resources, and it is not expected 37 * that end-device drivers will call into this code directly. Rather, 38 * the code which implements the buses that those devices are attached to, 39 * and the code which manages CPU resources, will call this code, and the 40 * end-device drivers will make upcalls to that code to actually perform 41 * the allocation. 42 * 43 * There are two sorts of resources managed by this code. The first is 44 * the more familiar array (RMAN_ARRAY) type; resources in this class 45 * consist of a sequence of individually-allocatable objects which have 46 * been numbered in some well-defined order. Most of the resources 47 * are of this type, as it is the most familiar. The second type is 48 * called a gauge (RMAN_GAUGE), and models fungible resources (i.e., 49 * resources in which each instance is indistinguishable from every 50 * other instance). The principal anticipated application of gauges 51 * is in the context of power consumption, where a bus may have a specific 52 * power budget which all attached devices share. RMAN_GAUGE is not 53 * implemented yet. 54 * 55 * For array resources, we make one simplifying assumption: two clients 56 * sharing the same resource must use the same range of indices. That 57 * is to say, sharing of overlapping-but-not-identical regions is not 58 * permitted. 59 */ 60 61 #include <sys/param.h> 62 #include <sys/systm.h> 63 #include <sys/kernel.h> 64 #include <sys/lock.h> 65 #include <sys/malloc.h> 66 #include <sys/bus.h> /* XXX debugging */ 67 #include <sys/rman.h> 68 #include <sys/sysctl.h> 69 70 int rman_debug = 0; 71 TUNABLE_INT("debug.rman_debug", &rman_debug); 72 SYSCTL_INT(_debug, OID_AUTO, rman_debug, CTLFLAG_RW, 73 &rman_debug, 0, "rman debug"); 74 75 #define DPRINTF(params) if (rman_debug) kprintf params 76 77 static MALLOC_DEFINE(M_RMAN, "rman", "Resource manager"); 78 79 struct rman_head rman_head; 80 static struct lwkt_token rman_tok; /* mutex to protect rman_head */ 81 static int int_rman_activate_resource(struct rman *rm, struct resource *r, 82 struct resource **whohas); 83 static int int_rman_deactivate_resource(struct resource *r); 84 static int int_rman_release_resource(struct rman *rm, struct resource *r); 85 86 int 87 rman_init(struct rman *rm, int cpuid) 88 { 89 static int once; 90 91 if (once == 0) { 92 once = 1; 93 TAILQ_INIT(&rman_head); 94 lwkt_token_init(&rman_tok, "rman"); 95 } 96 97 if (rm->rm_type == RMAN_UNINIT) 98 panic("rman_init"); 99 if (rm->rm_type == RMAN_GAUGE) 100 panic("implement RMAN_GAUGE"); 101 102 TAILQ_INIT(&rm->rm_list); 103 rm->rm_slock = kmalloc(sizeof *rm->rm_slock, M_RMAN, M_NOWAIT); 104 if (rm->rm_slock == NULL) 105 return ENOMEM; 106 lwkt_token_init(rm->rm_slock, "rmanslock"); 107 108 rm->rm_cpuid = cpuid; 109 110 lwkt_gettoken(&rman_tok); 111 TAILQ_INSERT_TAIL(&rman_head, rm, rm_link); 112 lwkt_reltoken(&rman_tok); 113 return 0; 114 } 115 116 /* 117 * NB: this interface is not robust against programming errors which 118 * add multiple copies of the same region. 119 */ 120 int 121 rman_manage_region(struct rman *rm, u_long start, u_long end) 122 { 123 struct resource *r, *s; 124 125 DPRINTF(("rman_manage_region: <%s> request: start %#lx, end %#lx\n", 126 rm->rm_descr, start, end)); 127 r = kmalloc(sizeof *r, M_RMAN, M_NOWAIT | M_ZERO); 128 if (r == 0) 129 return ENOMEM; 130 r->r_sharehead = 0; 131 r->r_start = start; 132 r->r_end = end; 133 r->r_flags = 0; 134 r->r_dev = 0; 135 r->r_rm = rm; 136 137 lwkt_gettoken(rm->rm_slock); 138 for (s = TAILQ_FIRST(&rm->rm_list); 139 s && s->r_end < r->r_start; 140 s = TAILQ_NEXT(s, r_link)) 141 ; 142 143 if (s == NULL) 144 TAILQ_INSERT_TAIL(&rm->rm_list, r, r_link); 145 else 146 TAILQ_INSERT_BEFORE(s, r, r_link); 147 148 lwkt_reltoken(rm->rm_slock); 149 return 0; 150 } 151 152 int 153 rman_fini(struct rman *rm) 154 { 155 struct resource *r; 156 157 lwkt_gettoken(rm->rm_slock); 158 TAILQ_FOREACH(r, &rm->rm_list, r_link) { 159 if (r->r_flags & RF_ALLOCATED) { 160 lwkt_reltoken(rm->rm_slock); 161 return EBUSY; 162 } 163 } 164 165 /* 166 * There really should only be one of these if we are in this 167 * state and the code is working properly, but it can't hurt. 168 */ 169 while (!TAILQ_EMPTY(&rm->rm_list)) { 170 r = TAILQ_FIRST(&rm->rm_list); 171 TAILQ_REMOVE(&rm->rm_list, r, r_link); 172 kfree(r, M_RMAN); 173 } 174 lwkt_reltoken(rm->rm_slock); 175 176 /* XXX what's the point of this if we are going to free the struct? */ 177 lwkt_gettoken(&rman_tok); 178 TAILQ_REMOVE(&rman_head, rm, rm_link); 179 lwkt_reltoken(&rman_tok); 180 kfree(rm->rm_slock, M_RMAN); 181 182 return 0; 183 } 184 185 struct resource * 186 rman_reserve_resource(struct rman *rm, u_long start, u_long end, u_long count, 187 u_int flags, struct device *dev) 188 { 189 u_int want_activate; 190 struct resource *r, *s, *rv; 191 u_long rstart, rend; 192 193 rv = 0; 194 195 DPRINTF(("rman_reserve_resource: <%s> request: [%#lx, %#lx], length " 196 "%#lx, flags %u, device %s\n", rm->rm_descr, start, end, 197 count, flags, 198 dev == NULL ? "<null>" : device_get_nameunit(dev))); 199 want_activate = (flags & RF_ACTIVE); 200 flags &= ~RF_ACTIVE; 201 202 lwkt_gettoken(rm->rm_slock); 203 204 for (r = TAILQ_FIRST(&rm->rm_list); 205 r && r->r_end < start; 206 r = TAILQ_NEXT(r, r_link)) 207 ; 208 209 if (r == NULL) { 210 DPRINTF(("could not find a region\n")); 211 goto out; 212 } 213 214 /* 215 * First try to find an acceptable totally-unshared region. 216 */ 217 for (s = r; s; s = TAILQ_NEXT(s, r_link)) { 218 DPRINTF(("considering [%#lx, %#lx]\n", s->r_start, s->r_end)); 219 if (s->r_start > end) { 220 DPRINTF(("s->r_start (%#lx) > end (%#lx)\n", 221 s->r_start, end)); 222 break; 223 } 224 if (s->r_flags & RF_ALLOCATED) { 225 DPRINTF(("region is allocated\n")); 226 continue; 227 } 228 rstart = max(s->r_start, start); 229 rstart = (rstart + ((1ul << RF_ALIGNMENT(flags))) - 1) & 230 ~((1ul << RF_ALIGNMENT(flags)) - 1); 231 rend = min(s->r_end, max(start + count, end)); 232 DPRINTF(("truncated region: [%#lx, %#lx]; size %#lx (requested %#lx)\n", 233 rstart, rend, (rend - rstart + 1), count)); 234 235 if ((rend - rstart + 1) >= count) { 236 DPRINTF(("candidate region: [%#lx, %#lx], size %#lx\n", 237 rstart, rend, (rend - rstart + 1))); 238 if ((s->r_end - s->r_start + 1) == count) { 239 DPRINTF(("candidate region is entire chunk\n")); 240 rv = s; 241 rv->r_flags |= RF_ALLOCATED | flags; 242 rv->r_dev = dev; 243 goto out; 244 } 245 246 /* 247 * If s->r_start < rstart and 248 * s->r_end > rstart + count - 1, then 249 * we need to split the region into three pieces 250 * (the middle one will get returned to the user). 251 * Otherwise, we are allocating at either the 252 * beginning or the end of s, so we only need to 253 * split it in two. The first case requires 254 * two new allocations; the second requires but one. 255 */ 256 rv = kmalloc(sizeof *rv, M_RMAN, M_NOWAIT | M_ZERO); 257 if (rv == 0) 258 goto out; 259 rv->r_start = rstart; 260 rv->r_end = rstart + count - 1; 261 rv->r_flags = flags | RF_ALLOCATED; 262 rv->r_dev = dev; 263 rv->r_sharehead = 0; 264 rv->r_rm = rm; 265 266 if (s->r_start < rv->r_start && s->r_end > rv->r_end) { 267 DPRINTF(("splitting region in three parts: " 268 "[%#lx, %#lx]; [%#lx, %#lx]; [%#lx, %#lx]\n", 269 s->r_start, rv->r_start - 1, 270 rv->r_start, rv->r_end, 271 rv->r_end + 1, s->r_end)); 272 /* 273 * We are allocating in the middle. 274 */ 275 r = kmalloc(sizeof *r, M_RMAN, 276 M_NOWAIT | M_ZERO); 277 if (r == 0) { 278 kfree(rv, M_RMAN); 279 rv = 0; 280 goto out; 281 } 282 r->r_start = rv->r_end + 1; 283 r->r_end = s->r_end; 284 r->r_flags = s->r_flags; 285 r->r_dev = 0; 286 r->r_sharehead = 0; 287 r->r_rm = rm; 288 s->r_end = rv->r_start - 1; 289 TAILQ_INSERT_AFTER(&rm->rm_list, s, rv, 290 r_link); 291 TAILQ_INSERT_AFTER(&rm->rm_list, rv, r, 292 r_link); 293 } else if (s->r_start == rv->r_start) { 294 DPRINTF(("allocating from the beginning\n")); 295 /* 296 * We are allocating at the beginning. 297 */ 298 s->r_start = rv->r_end + 1; 299 TAILQ_INSERT_BEFORE(s, rv, r_link); 300 } else { 301 DPRINTF(("allocating at the end\n")); 302 /* 303 * We are allocating at the end. 304 */ 305 s->r_end = rv->r_start - 1; 306 TAILQ_INSERT_AFTER(&rm->rm_list, s, rv, 307 r_link); 308 } 309 goto out; 310 } 311 } 312 313 /* 314 * Now find an acceptable shared region, if the client's requirements 315 * allow sharing. By our implementation restriction, a candidate 316 * region must match exactly by both size and sharing type in order 317 * to be considered compatible with the client's request. (The 318 * former restriction could probably be lifted without too much 319 * additional work, but this does not seem warranted.) 320 */ 321 DPRINTF(("no unshared regions found\n")); 322 if ((flags & (RF_SHAREABLE | RF_TIMESHARE)) == 0) 323 goto out; 324 325 for (s = r; s; s = TAILQ_NEXT(s, r_link)) { 326 if (s->r_start > end) 327 break; 328 if ((s->r_flags & flags) != flags) 329 continue; 330 rstart = max(s->r_start, start); 331 rend = min(s->r_end, max(start + count, end)); 332 if (s->r_start >= start && s->r_end <= end 333 && (s->r_end - s->r_start + 1) == count) { 334 rv = kmalloc(sizeof *rv, M_RMAN, M_NOWAIT | M_ZERO); 335 if (rv == 0) 336 goto out; 337 rv->r_start = s->r_start; 338 rv->r_end = s->r_end; 339 rv->r_flags = s->r_flags & 340 (RF_ALLOCATED | RF_SHAREABLE | RF_TIMESHARE); 341 rv->r_dev = dev; 342 rv->r_rm = rm; 343 if (s->r_sharehead == 0) { 344 s->r_sharehead = kmalloc(sizeof *s->r_sharehead, 345 M_RMAN, 346 M_NOWAIT | M_ZERO); 347 if (s->r_sharehead == 0) { 348 kfree(rv, M_RMAN); 349 rv = 0; 350 goto out; 351 } 352 LIST_INIT(s->r_sharehead); 353 LIST_INSERT_HEAD(s->r_sharehead, s, 354 r_sharelink); 355 s->r_flags |= RF_FIRSTSHARE; 356 } 357 rv->r_sharehead = s->r_sharehead; 358 LIST_INSERT_HEAD(s->r_sharehead, rv, r_sharelink); 359 goto out; 360 } 361 } 362 363 /* 364 * We couldn't find anything. 365 */ 366 out: 367 /* 368 * If the user specified RF_ACTIVE in the initial flags, 369 * which is reflected in `want_activate', we attempt to atomically 370 * activate the resource. If this fails, we release the resource 371 * and indicate overall failure. (This behavior probably doesn't 372 * make sense for RF_TIMESHARE-type resources.) 373 */ 374 if (rv && want_activate) { 375 struct resource *whohas; 376 if (int_rman_activate_resource(rm, rv, &whohas)) { 377 int_rman_release_resource(rm, rv); 378 rv = 0; 379 } 380 } 381 lwkt_reltoken(rm->rm_slock); 382 return (rv); 383 } 384 385 static int 386 int_rman_activate_resource(struct rman *rm, struct resource *r, 387 struct resource **whohas) 388 { 389 struct resource *s; 390 int ok; 391 392 /* 393 * If we are not timesharing, then there is nothing much to do. 394 * If we already have the resource, then there is nothing at all to do. 395 * If we are not on a sharing list with anybody else, then there is 396 * little to do. 397 */ 398 if ((r->r_flags & RF_TIMESHARE) == 0 399 || (r->r_flags & RF_ACTIVE) != 0 400 || r->r_sharehead == 0) { 401 r->r_flags |= RF_ACTIVE; 402 return 0; 403 } 404 405 ok = 1; 406 for (s = LIST_FIRST(r->r_sharehead); s && ok; 407 s = LIST_NEXT(s, r_sharelink)) { 408 if ((s->r_flags & RF_ACTIVE) != 0) { 409 ok = 0; 410 *whohas = s; 411 } 412 } 413 if (ok) { 414 r->r_flags |= RF_ACTIVE; 415 return 0; 416 } 417 return EBUSY; 418 } 419 420 int 421 rman_activate_resource(struct resource *r) 422 { 423 int rv; 424 struct resource *whohas; 425 struct rman *rm; 426 427 rm = r->r_rm; 428 lwkt_gettoken(rm->rm_slock); 429 rv = int_rman_activate_resource(rm, r, &whohas); 430 lwkt_reltoken(rm->rm_slock); 431 return rv; 432 } 433 434 #if 0 435 436 /* XXX */ 437 int 438 rman_await_resource(struct resource *r, int slpflags, int timo) 439 { 440 int rv; 441 struct resource *whohas; 442 struct rman *rm; 443 444 rm = r->r_rm; 445 for (;;) { 446 lwkt_gettoken(rm->rm_slock); 447 rv = int_rman_activate_resource(rm, r, &whohas); 448 if (rv != EBUSY) 449 return (rv); /* returns with ilock held */ 450 451 if (r->r_sharehead == 0) 452 panic("rman_await_resource"); 453 /* 454 * A critical section will hopefully will prevent a race 455 * between lwkt_reltoken and tsleep where a process 456 * could conceivably get in and release the resource 457 * before we have a chance to sleep on it. YYY 458 */ 459 crit_enter(); 460 whohas->r_flags |= RF_WANTED; 461 rv = tsleep(r->r_sharehead, slpflags, "rmwait", timo); 462 if (rv) { 463 lwkt_reltoken(rm->rm_slock); 464 crit_exit(); 465 return rv; 466 } 467 crit_exit(); 468 } 469 } 470 471 #endif 472 473 static int 474 int_rman_deactivate_resource(struct resource *r) 475 { 476 struct rman *rm; 477 478 rm = r->r_rm; 479 r->r_flags &= ~RF_ACTIVE; 480 if (r->r_flags & RF_WANTED) { 481 r->r_flags &= ~RF_WANTED; 482 wakeup(r->r_sharehead); 483 } 484 return 0; 485 } 486 487 int 488 rman_deactivate_resource(struct resource *r) 489 { 490 struct rman *rm; 491 492 rm = r->r_rm; 493 lwkt_gettoken(rm->rm_slock); 494 int_rman_deactivate_resource(r); 495 lwkt_reltoken(rm->rm_slock); 496 return 0; 497 } 498 499 static int 500 int_rman_release_resource(struct rman *rm, struct resource *r) 501 { 502 struct resource *s, *t; 503 504 if (r->r_flags & RF_ACTIVE) 505 int_rman_deactivate_resource(r); 506 507 /* 508 * Check for a sharing list first. If there is one, then we don't 509 * have to think as hard. 510 */ 511 if (r->r_sharehead) { 512 /* 513 * If a sharing list exists, then we know there are at 514 * least two sharers. 515 * 516 * If we are in the main circleq, appoint someone else. 517 */ 518 LIST_REMOVE(r, r_sharelink); 519 s = LIST_FIRST(r->r_sharehead); 520 if (r->r_flags & RF_FIRSTSHARE) { 521 s->r_flags |= RF_FIRSTSHARE; 522 TAILQ_INSERT_BEFORE(r, s, r_link); 523 TAILQ_REMOVE(&rm->rm_list, r, r_link); 524 } 525 526 /* 527 * Make sure that the sharing list goes away completely 528 * if the resource is no longer being shared at all. 529 */ 530 if (LIST_NEXT(s, r_sharelink) == 0) { 531 kfree(s->r_sharehead, M_RMAN); 532 s->r_sharehead = 0; 533 s->r_flags &= ~RF_FIRSTSHARE; 534 } 535 goto out; 536 } 537 538 /* 539 * Look at the adjacent resources in the list and see if our 540 * segment can be merged with any of them. 541 */ 542 s = TAILQ_PREV(r, resource_head, r_link); 543 t = TAILQ_NEXT(r, r_link); 544 545 if (s != NULL && (s->r_flags & RF_ALLOCATED) == 0 546 && t != NULL && (t->r_flags & RF_ALLOCATED) == 0) { 547 /* 548 * Merge all three segments. 549 */ 550 s->r_end = t->r_end; 551 TAILQ_REMOVE(&rm->rm_list, r, r_link); 552 TAILQ_REMOVE(&rm->rm_list, t, r_link); 553 kfree(t, M_RMAN); 554 } else if (s != NULL && (s->r_flags & RF_ALLOCATED) == 0) { 555 /* 556 * Merge previous segment with ours. 557 */ 558 s->r_end = r->r_end; 559 TAILQ_REMOVE(&rm->rm_list, r, r_link); 560 } else if (t != NULL && (t->r_flags & RF_ALLOCATED) == 0) { 561 /* 562 * Merge next segment with ours. 563 */ 564 t->r_start = r->r_start; 565 TAILQ_REMOVE(&rm->rm_list, r, r_link); 566 } else { 567 /* 568 * At this point, we know there is nothing we 569 * can potentially merge with, because on each 570 * side, there is either nothing there or what is 571 * there is still allocated. In that case, we don't 572 * want to remove r from the list; we simply want to 573 * change it to an unallocated region and return 574 * without freeing anything. 575 */ 576 r->r_flags &= ~RF_ALLOCATED; 577 return 0; 578 } 579 580 out: 581 kfree(r, M_RMAN); 582 return 0; 583 } 584 585 int 586 rman_release_resource(struct resource *r) 587 { 588 struct rman *rm = r->r_rm; 589 int rv; 590 591 lwkt_gettoken(rm->rm_slock); 592 rv = int_rman_release_resource(rm, r); 593 lwkt_reltoken(rm->rm_slock); 594 return (rv); 595 } 596 597 uint32_t 598 rman_make_alignment_flags(uint32_t size) 599 { 600 int i; 601 602 /* 603 * Find the hightest bit set, and add one if more than one bit 604 * set. We're effectively computing the ceil(log2(size)) here. 605 */ 606 for (i = 32; i > 0; i--) 607 if ((1 << i) & size) 608 break; 609 if (~(1 << i) & size) 610 i++; 611 612 return(RF_ALIGNMENT_LOG2(i)); 613 } 614 615 /* 616 * Sysctl interface for scanning the resource lists. 617 * 618 * We take two input parameters; the index into the list of resource 619 * managers, and the resource offset into the list. 620 */ 621 static int 622 sysctl_rman(SYSCTL_HANDLER_ARGS) 623 { 624 int *name = (int *)arg1; 625 u_int namelen = arg2; 626 int rman_idx, res_idx; 627 struct rman *rm; 628 struct resource *res; 629 struct u_rman urm; 630 struct u_resource ures; 631 int error; 632 633 if (namelen != 3) 634 return (EINVAL); 635 636 if (bus_data_generation_check(name[0])) 637 return (EINVAL); 638 rman_idx = name[1]; 639 res_idx = name[2]; 640 641 /* 642 * Find the indexed resource manager 643 */ 644 TAILQ_FOREACH(rm, &rman_head, rm_link) { 645 if (rman_idx-- == 0) 646 break; 647 } 648 if (rm == NULL) 649 return (ENOENT); 650 651 /* 652 * If the resource index is -1, we want details on the 653 * resource manager. 654 */ 655 if (res_idx == -1) { 656 urm.rm_handle = (uintptr_t)rm; 657 strlcpy(urm.rm_descr, rm->rm_descr, RM_TEXTLEN); 658 urm.rm_start = rm->rm_start; 659 urm.rm_size = rm->rm_end - rm->rm_start + 1; 660 urm.rm_type = rm->rm_type; 661 662 error = SYSCTL_OUT(req, &urm, sizeof(urm)); 663 return (error); 664 } 665 666 /* 667 * Find the indexed resource and return it. 668 */ 669 TAILQ_FOREACH(res, &rm->rm_list, r_link) { 670 if (res_idx-- == 0) { 671 ures.r_handle = (uintptr_t)res; 672 ures.r_parent = (uintptr_t)res->r_rm; 673 ures.r_device = (uintptr_t)res->r_dev; 674 if (res->r_dev != NULL) { 675 if (device_get_name(res->r_dev) != NULL) { 676 ksnprintf(ures.r_devname, RM_TEXTLEN, 677 "%s%d", 678 device_get_name(res->r_dev), 679 device_get_unit(res->r_dev)); 680 } else { 681 strlcpy(ures.r_devname, "nomatch", 682 RM_TEXTLEN); 683 } 684 } else { 685 ures.r_devname[0] = '\0'; 686 } 687 ures.r_start = res->r_start; 688 ures.r_size = res->r_end - res->r_start + 1; 689 ures.r_flags = res->r_flags; 690 691 error = SYSCTL_OUT(req, &ures, sizeof(ures)); 692 return (error); 693 } 694 } 695 return (ENOENT); 696 } 697 698 SYSCTL_NODE(_hw_bus, OID_AUTO, rman, CTLFLAG_RD, sysctl_rman, 699 "kernel resource manager"); 700