1 /* $NetBSD: rf_diskqueue.c,v 1.7 1999/06/04 01:51:00 oster Exp $ */ 2 /* 3 * Copyright (c) 1995 Carnegie-Mellon University. 4 * All rights reserved. 5 * 6 * Author: Mark Holland 7 * 8 * Permission to use, copy, modify and distribute this software and 9 * its documentation is hereby granted, provided that both the copyright 10 * notice and this permission notice appear in all copies of the 11 * software, derivative works or modified versions, and any portions 12 * thereof, and that both notices appear in supporting documentation. 13 * 14 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 15 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 16 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 17 * 18 * Carnegie Mellon requests users of this software to return to 19 * 20 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 21 * School of Computer Science 22 * Carnegie Mellon University 23 * Pittsburgh PA 15213-3890 24 * 25 * any improvements or extensions that they make and grant Carnegie the 26 * rights to redistribute these changes. 27 */ 28 29 /**************************************************************************************** 30 * 31 * rf_diskqueue.c -- higher-level disk queue code 32 * 33 * the routines here are a generic wrapper around the actual queueing 34 * routines. The code here implements thread scheduling, synchronization, 35 * and locking ops (see below) on top of the lower-level queueing code. 36 * 37 * to support atomic RMW, we implement "locking operations". When a locking op 38 * is dispatched to the lower levels of the driver, the queue is locked, and no further 39 * I/Os are dispatched until the queue receives & completes a corresponding "unlocking 40 * operation". This code relies on the higher layers to guarantee that a locking 41 * op will always be eventually followed by an unlocking op. The model is that 42 * the higher layers are structured so locking and unlocking ops occur in pairs, i.e. 43 * an unlocking op cannot be generated until after a locking op reports completion. 44 * There is no good way to check to see that an unlocking op "corresponds" to the 45 * op that currently has the queue locked, so we make no such attempt. Since by 46 * definition there can be only one locking op outstanding on a disk, this should 47 * not be a problem. 48 * 49 * In the kernel, we allow multiple I/Os to be concurrently dispatched to the disk 50 * driver. In order to support locking ops in this environment, when we decide to 51 * do a locking op, we stop dispatching new I/Os and wait until all dispatched I/Os 52 * have completed before dispatching the locking op. 53 * 54 * Unfortunately, the code is different in the 3 different operating states 55 * (user level, kernel, simulator). In the kernel, I/O is non-blocking, and 56 * we have no disk threads to dispatch for us. Therefore, we have to dispatch 57 * new I/Os to the scsi driver at the time of enqueue, and also at the time 58 * of completion. At user level, I/O is blocking, and so only the disk threads 59 * may dispatch I/Os. Thus at user level, all we can do at enqueue time is 60 * enqueue and wake up the disk thread to do the dispatch. 61 * 62 ***************************************************************************************/ 63 64 #include "rf_types.h" 65 #include "rf_threadstuff.h" 66 #include "rf_threadid.h" 67 #include "rf_raid.h" 68 #include "rf_diskqueue.h" 69 #include "rf_alloclist.h" 70 #include "rf_acctrace.h" 71 #include "rf_etimer.h" 72 #include "rf_configure.h" 73 #include "rf_general.h" 74 #include "rf_freelist.h" 75 #include "rf_debugprint.h" 76 #include "rf_shutdown.h" 77 #include "rf_cvscan.h" 78 #include "rf_sstf.h" 79 #include "rf_fifo.h" 80 81 static int init_dqd(RF_DiskQueueData_t *); 82 static void clean_dqd(RF_DiskQueueData_t *); 83 static void rf_ShutdownDiskQueueSystem(void *); 84 /* From rf_kintf.c */ 85 int rf_DispatchKernelIO(RF_DiskQueue_t *, RF_DiskQueueData_t *); 86 87 88 #define Dprintf1(s,a) if (rf_queueDebug) rf_debug_printf(s,(void *)((unsigned long)a),NULL,NULL,NULL,NULL,NULL,NULL,NULL) 89 #define Dprintf2(s,a,b) if (rf_queueDebug) rf_debug_printf(s,(void *)((unsigned long)a),(void *)((unsigned long)b),NULL,NULL,NULL,NULL,NULL,NULL) 90 #define Dprintf3(s,a,b,c) if (rf_queueDebug) rf_debug_printf(s,(void *)((unsigned long)a),(void *)((unsigned long)b),(void *)((unsigned long)c),NULL,NULL,NULL,NULL,NULL) 91 #define Dprintf4(s,a,b,c,d) if (rf_queueDebug) rf_debug_printf(s,(void *)((unsigned long)a),(void *)((unsigned long)b),(void *)((unsigned long)c),(void *)((unsigned long)d),NULL,NULL,NULL,NULL) 92 #define Dprintf5(s,a,b,c,d,e) if (rf_queueDebug) rf_debug_printf(s,(void *)((unsigned long)a),(void *)((unsigned long)b),(void *)((unsigned long)c),(void *)((unsigned long)d),(void *)((unsigned long)e),NULL,NULL,NULL) 93 94 95 #define SIGNAL_DISK_QUEUE(_q_,_wh_) 96 #define WAIT_DISK_QUEUE(_q_,_wh_) 97 98 /***************************************************************************************** 99 * 100 * the disk queue switch defines all the functions used in the different queueing 101 * disciplines 102 * queue ID, init routine, enqueue routine, dequeue routine 103 * 104 ****************************************************************************************/ 105 106 static RF_DiskQueueSW_t diskqueuesw[] = { 107 {"fifo", /* FIFO */ 108 rf_FifoCreate, 109 rf_FifoEnqueue, 110 rf_FifoDequeue, 111 rf_FifoPeek, 112 rf_FifoPromote}, 113 114 {"cvscan", /* cvscan */ 115 rf_CvscanCreate, 116 rf_CvscanEnqueue, 117 rf_CvscanDequeue, 118 rf_CvscanPeek, 119 rf_CvscanPromote}, 120 121 {"sstf", /* shortest seek time first */ 122 rf_SstfCreate, 123 rf_SstfEnqueue, 124 rf_SstfDequeue, 125 rf_SstfPeek, 126 rf_SstfPromote}, 127 128 {"scan", /* SCAN (two-way elevator) */ 129 rf_ScanCreate, 130 rf_SstfEnqueue, 131 rf_ScanDequeue, 132 rf_ScanPeek, 133 rf_SstfPromote}, 134 135 {"cscan", /* CSCAN (one-way elevator) */ 136 rf_CscanCreate, 137 rf_SstfEnqueue, 138 rf_CscanDequeue, 139 rf_CscanPeek, 140 rf_SstfPromote}, 141 142 #if !defined(_KERNEL) && RF_INCLUDE_QUEUE_RANDOM > 0 143 /* to make a point to Chris :-> */ 144 {"random", /* random */ 145 rf_FifoCreate, 146 rf_FifoEnqueue, 147 rf_RandomDequeue, 148 rf_RandomPeek, 149 rf_FifoPromote}, 150 #endif /* !KERNEL && RF_INCLUDE_QUEUE_RANDOM > 0 */ 151 }; 152 #define NUM_DISK_QUEUE_TYPES (sizeof(diskqueuesw)/sizeof(RF_DiskQueueSW_t)) 153 154 static RF_FreeList_t *rf_dqd_freelist; 155 156 #define RF_MAX_FREE_DQD 256 157 #define RF_DQD_INC 16 158 #define RF_DQD_INITIAL 64 159 160 #include <sys/buf.h> 161 162 static int 163 init_dqd(dqd) 164 RF_DiskQueueData_t *dqd; 165 { 166 /* XXX not sure if the following malloc is appropriate... probably not 167 * quite... */ 168 dqd->bp = (struct buf *) malloc(sizeof(struct buf), M_RAIDFRAME, M_NOWAIT); 169 if (dqd->bp == NULL) { 170 return (ENOMEM); 171 } 172 memset(dqd->bp, 0, sizeof(struct buf)); /* if you don't do it, nobody 173 * else will.. */ 174 return (0); 175 } 176 177 static void 178 clean_dqd(dqd) 179 RF_DiskQueueData_t *dqd; 180 { 181 free(dqd->bp, M_RAIDFRAME); 182 } 183 /* configures a single disk queue */ 184 int config_disk_queue(RF_Raid_t *, RF_DiskQueue_t *, RF_RowCol_t, 185 RF_RowCol_t, RF_DiskQueueSW_t *, 186 RF_SectorCount_t, dev_t, int, 187 RF_ShutdownList_t **, 188 RF_AllocListElem_t *); 189 int 190 config_disk_queue( 191 RF_Raid_t * raidPtr, 192 RF_DiskQueue_t * diskqueue, 193 RF_RowCol_t r, /* row & col -- debug only. BZZT not any 194 * more... */ 195 RF_RowCol_t c, 196 RF_DiskQueueSW_t * p, 197 RF_SectorCount_t sectPerDisk, 198 dev_t dev, 199 int maxOutstanding, 200 RF_ShutdownList_t ** listp, 201 RF_AllocListElem_t * clList) 202 { 203 int rc; 204 205 diskqueue->row = r; 206 diskqueue->col = c; 207 diskqueue->qPtr = p; 208 diskqueue->qHdr = (p->Create) (sectPerDisk, clList, listp); 209 diskqueue->dev = dev; 210 diskqueue->numOutstanding = 0; 211 diskqueue->queueLength = 0; 212 diskqueue->maxOutstanding = maxOutstanding; 213 diskqueue->curPriority = RF_IO_NORMAL_PRIORITY; 214 diskqueue->nextLockingOp = NULL; 215 diskqueue->unlockingOp = NULL; 216 diskqueue->numWaiting = 0; 217 diskqueue->flags = 0; 218 diskqueue->raidPtr = raidPtr; 219 diskqueue->rf_cinfo = &raidPtr->raid_cinfo[r][c]; 220 rc = rf_create_managed_mutex(listp, &diskqueue->mutex); 221 if (rc) { 222 RF_ERRORMSG3("Unable to init mutex file %s line %d rc=%d\n", __FILE__, 223 __LINE__, rc); 224 return (rc); 225 } 226 rc = rf_create_managed_cond(listp, &diskqueue->cond); 227 if (rc) { 228 RF_ERRORMSG3("Unable to init cond file %s line %d rc=%d\n", __FILE__, 229 __LINE__, rc); 230 return (rc); 231 } 232 return (0); 233 } 234 235 static void 236 rf_ShutdownDiskQueueSystem(ignored) 237 void *ignored; 238 { 239 RF_FREELIST_DESTROY_CLEAN(rf_dqd_freelist, next, (RF_DiskQueueData_t *), clean_dqd); 240 } 241 242 int 243 rf_ConfigureDiskQueueSystem(listp) 244 RF_ShutdownList_t **listp; 245 { 246 int rc; 247 248 RF_FREELIST_CREATE(rf_dqd_freelist, RF_MAX_FREE_DQD, 249 RF_DQD_INC, sizeof(RF_DiskQueueData_t)); 250 if (rf_dqd_freelist == NULL) 251 return (ENOMEM); 252 rc = rf_ShutdownCreate(listp, rf_ShutdownDiskQueueSystem, NULL); 253 if (rc) { 254 RF_ERRORMSG3("Unable to add to shutdown list file %s line %d rc=%d\n", 255 __FILE__, __LINE__, rc); 256 rf_ShutdownDiskQueueSystem(NULL); 257 return (rc); 258 } 259 RF_FREELIST_PRIME_INIT(rf_dqd_freelist, RF_DQD_INITIAL, next, 260 (RF_DiskQueueData_t *), init_dqd); 261 return (0); 262 } 263 264 int 265 rf_ConfigureDiskQueues( 266 RF_ShutdownList_t ** listp, 267 RF_Raid_t * raidPtr, 268 RF_Config_t * cfgPtr) 269 { 270 RF_DiskQueue_t **diskQueues, *spareQueues; 271 RF_DiskQueueSW_t *p; 272 RF_RowCol_t r, c; 273 int rc, i; 274 275 raidPtr->maxQueueDepth = cfgPtr->maxOutstandingDiskReqs; 276 277 for (p = NULL, i = 0; i < NUM_DISK_QUEUE_TYPES; i++) { 278 if (!strcmp(diskqueuesw[i].queueType, cfgPtr->diskQueueType)) { 279 p = &diskqueuesw[i]; 280 break; 281 } 282 } 283 if (p == NULL) { 284 RF_ERRORMSG2("Unknown queue type \"%s\". Using %s\n", cfgPtr->diskQueueType, diskqueuesw[0].queueType); 285 p = &diskqueuesw[0]; 286 } 287 RF_CallocAndAdd(diskQueues, raidPtr->numRow, sizeof(RF_DiskQueue_t *), (RF_DiskQueue_t **), raidPtr->cleanupList); 288 if (diskQueues == NULL) { 289 return (ENOMEM); 290 } 291 raidPtr->Queues = diskQueues; 292 for (r = 0; r < raidPtr->numRow; r++) { 293 RF_CallocAndAdd(diskQueues[r], raidPtr->numCol + 294 ((r == 0) ? RF_MAXSPARE : 0), 295 sizeof(RF_DiskQueue_t), (RF_DiskQueue_t *), 296 raidPtr->cleanupList); 297 if (diskQueues[r] == NULL) 298 return (ENOMEM); 299 for (c = 0; c < raidPtr->numCol; c++) { 300 rc = config_disk_queue(raidPtr, &diskQueues[r][c], r, c, p, 301 raidPtr->sectorsPerDisk, raidPtr->Disks[r][c].dev, 302 cfgPtr->maxOutstandingDiskReqs, listp, raidPtr->cleanupList); 303 if (rc) 304 return (rc); 305 } 306 } 307 308 spareQueues = &raidPtr->Queues[0][raidPtr->numCol]; 309 for (r = 0; r < raidPtr->numSpare; r++) { 310 rc = config_disk_queue(raidPtr, &spareQueues[r], 311 0, raidPtr->numCol + r, p, 312 raidPtr->sectorsPerDisk, 313 raidPtr->Disks[0][raidPtr->numCol + r].dev, 314 cfgPtr->maxOutstandingDiskReqs, listp, 315 raidPtr->cleanupList); 316 if (rc) 317 return (rc); 318 } 319 return (0); 320 } 321 /* Enqueue a disk I/O 322 * 323 * Unfortunately, we have to do things differently in the different 324 * environments (simulator, user-level, kernel). 325 * At user level, all I/O is blocking, so we have 1 or more threads/disk 326 * and the thread that enqueues is different from the thread that dequeues. 327 * In the kernel, I/O is non-blocking and so we'd like to have multiple 328 * I/Os outstanding on the physical disks when possible. 329 * 330 * when any request arrives at a queue, we have two choices: 331 * dispatch it to the lower levels 332 * queue it up 333 * 334 * kernel rules for when to do what: 335 * locking request: queue empty => dispatch and lock queue, 336 * else queue it 337 * unlocking req : always dispatch it 338 * normal req : queue empty => dispatch it & set priority 339 * queue not full & priority is ok => dispatch it 340 * else queue it 341 * 342 * user-level rules: 343 * always enqueue. In the special case of an unlocking op, enqueue 344 * in a special way that will cause the unlocking op to be the next 345 * thing dequeued. 346 * 347 * simulator rules: 348 * Do the same as at user level, with the sleeps and wakeups suppressed. 349 */ 350 void 351 rf_DiskIOEnqueue(queue, req, pri) 352 RF_DiskQueue_t *queue; 353 RF_DiskQueueData_t *req; 354 int pri; 355 { 356 int tid; 357 358 RF_ETIMER_START(req->qtime); 359 rf_get_threadid(tid); 360 RF_ASSERT(req->type == RF_IO_TYPE_NOP || req->numSector); 361 req->priority = pri; 362 363 if (rf_queueDebug && (req->numSector == 0)) { 364 printf("Warning: Enqueueing zero-sector access\n"); 365 } 366 /* 367 * kernel 368 */ 369 RF_LOCK_QUEUE_MUTEX(queue, "DiskIOEnqueue"); 370 /* locking request */ 371 if (RF_LOCKING_REQ(req)) { 372 if (RF_QUEUE_EMPTY(queue)) { 373 Dprintf3("Dispatching pri %d locking op to r %d c %d (queue empty)\n", pri, queue->row, queue->col); 374 RF_LOCK_QUEUE(queue); 375 rf_DispatchKernelIO(queue, req); 376 } else { 377 queue->queueLength++; /* increment count of number 378 * of requests waiting in this 379 * queue */ 380 Dprintf3("Enqueueing pri %d locking op to r %d c %d (queue not empty)\n", pri, queue->row, queue->col); 381 req->queue = (void *) queue; 382 (queue->qPtr->Enqueue) (queue->qHdr, req, pri); 383 } 384 } 385 /* unlocking request */ 386 else 387 if (RF_UNLOCKING_REQ(req)) { /* we'll do the actual unlock 388 * when this I/O completes */ 389 Dprintf3("Dispatching pri %d unlocking op to r %d c %d\n", pri, queue->row, queue->col); 390 RF_ASSERT(RF_QUEUE_LOCKED(queue)); 391 rf_DispatchKernelIO(queue, req); 392 } 393 /* normal request */ 394 else 395 if (RF_OK_TO_DISPATCH(queue, req)) { 396 Dprintf3("Dispatching pri %d regular op to r %d c %d (ok to dispatch)\n", pri, queue->row, queue->col); 397 rf_DispatchKernelIO(queue, req); 398 } else { 399 queue->queueLength++; /* increment count of 400 * number of requests 401 * waiting in this queue */ 402 Dprintf3("Enqueueing pri %d regular op to r %d c %d (not ok to dispatch)\n", pri, queue->row, queue->col); 403 req->queue = (void *) queue; 404 (queue->qPtr->Enqueue) (queue->qHdr, req, pri); 405 } 406 RF_UNLOCK_QUEUE_MUTEX(queue, "DiskIOEnqueue"); 407 } 408 409 410 /* get the next set of I/Os started, kernel version only */ 411 void 412 rf_DiskIOComplete(queue, req, status) 413 RF_DiskQueue_t *queue; 414 RF_DiskQueueData_t *req; 415 int status; 416 { 417 int done = 0; 418 419 RF_LOCK_QUEUE_MUTEX(queue, "DiskIOComplete"); 420 421 /* unlock the queue: (1) after an unlocking req completes (2) after a 422 * locking req fails */ 423 if (RF_UNLOCKING_REQ(req) || (RF_LOCKING_REQ(req) && status)) { 424 Dprintf2("DiskIOComplete: unlocking queue at r %d c %d\n", queue->row, queue->col); 425 RF_ASSERT(RF_QUEUE_LOCKED(queue) && (queue->unlockingOp == NULL)); 426 RF_UNLOCK_QUEUE(queue); 427 } 428 queue->numOutstanding--; 429 RF_ASSERT(queue->numOutstanding >= 0); 430 431 /* dispatch requests to the disk until we find one that we can't. */ 432 /* no reason to continue once we've filled up the queue */ 433 /* no reason to even start if the queue is locked */ 434 435 while (!done && !RF_QUEUE_FULL(queue) && !RF_QUEUE_LOCKED(queue)) { 436 if (queue->nextLockingOp) { 437 req = queue->nextLockingOp; 438 queue->nextLockingOp = NULL; 439 Dprintf3("DiskIOComplete: a pri %d locking req was pending at r %d c %d\n", req->priority, queue->row, queue->col); 440 } else { 441 req = (queue->qPtr->Dequeue) (queue->qHdr); 442 if (req != NULL) { 443 Dprintf3("DiskIOComplete: extracting pri %d req from queue at r %d c %d\n", req->priority, queue->row, queue->col); 444 } else { 445 Dprintf1("DiskIOComplete: no more requests to extract.\n", ""); 446 } 447 } 448 if (req) { 449 queue->queueLength--; /* decrement count of number 450 * of requests waiting in this 451 * queue */ 452 RF_ASSERT(queue->queueLength >= 0); 453 } 454 if (!req) 455 done = 1; 456 else 457 if (RF_LOCKING_REQ(req)) { 458 if (RF_QUEUE_EMPTY(queue)) { /* dispatch it */ 459 Dprintf3("DiskIOComplete: dispatching pri %d locking req to r %d c %d (queue empty)\n", req->priority, queue->row, queue->col); 460 RF_LOCK_QUEUE(queue); 461 rf_DispatchKernelIO(queue, req); 462 done = 1; 463 } else { /* put it aside to wait for 464 * the queue to drain */ 465 Dprintf3("DiskIOComplete: postponing pri %d locking req to r %d c %d\n", req->priority, queue->row, queue->col); 466 RF_ASSERT(queue->nextLockingOp == NULL); 467 queue->nextLockingOp = req; 468 done = 1; 469 } 470 } else 471 if (RF_UNLOCKING_REQ(req)) { /* should not happen: 472 * unlocking ops should 473 * not get queued */ 474 RF_ASSERT(RF_QUEUE_LOCKED(queue)); /* support it anyway for 475 * the future */ 476 Dprintf3("DiskIOComplete: dispatching pri %d unl req to r %d c %d (SHOULD NOT SEE THIS)\n", req->priority, queue->row, queue->col); 477 rf_DispatchKernelIO(queue, req); 478 done = 1; 479 } else 480 if (RF_OK_TO_DISPATCH(queue, req)) { 481 Dprintf3("DiskIOComplete: dispatching pri %d regular req to r %d c %d (ok to dispatch)\n", req->priority, queue->row, queue->col); 482 rf_DispatchKernelIO(queue, req); 483 } else { /* we can't dispatch it, 484 * so just re-enqueue 485 * it. */ 486 /* potential trouble here if 487 * disk queues batch reqs */ 488 Dprintf3("DiskIOComplete: re-enqueueing pri %d regular req to r %d c %d\n", req->priority, queue->row, queue->col); 489 queue->queueLength++; 490 (queue->qPtr->Enqueue) (queue->qHdr, req, req->priority); 491 done = 1; 492 } 493 } 494 495 RF_UNLOCK_QUEUE_MUTEX(queue, "DiskIOComplete"); 496 } 497 /* promotes accesses tagged with the given parityStripeID from low priority 498 * to normal priority. This promotion is optional, meaning that a queue 499 * need not implement it. If there is no promotion routine associated with 500 * a queue, this routine does nothing and returns -1. 501 */ 502 int 503 rf_DiskIOPromote(queue, parityStripeID, which_ru) 504 RF_DiskQueue_t *queue; 505 RF_StripeNum_t parityStripeID; 506 RF_ReconUnitNum_t which_ru; 507 { 508 int retval; 509 510 if (!queue->qPtr->Promote) 511 return (-1); 512 RF_LOCK_QUEUE_MUTEX(queue, "DiskIOPromote"); 513 retval = (queue->qPtr->Promote) (queue->qHdr, parityStripeID, which_ru); 514 RF_UNLOCK_QUEUE_MUTEX(queue, "DiskIOPromote"); 515 return (retval); 516 } 517 518 RF_DiskQueueData_t * 519 rf_CreateDiskQueueData( 520 RF_IoType_t typ, 521 RF_SectorNum_t ssect, 522 RF_SectorCount_t nsect, 523 caddr_t buf, 524 RF_StripeNum_t parityStripeID, 525 RF_ReconUnitNum_t which_ru, 526 int (*wakeF) (void *, int), 527 void *arg, 528 RF_DiskQueueData_t * next, 529 RF_AccTraceEntry_t * tracerec, 530 void *raidPtr, 531 RF_DiskQueueDataFlags_t flags, 532 void *kb_proc) 533 { 534 RF_DiskQueueData_t *p; 535 536 RF_FREELIST_GET_INIT(rf_dqd_freelist, p, next, (RF_DiskQueueData_t *), init_dqd); 537 538 p->sectorOffset = ssect + rf_protectedSectors; 539 p->numSector = nsect; 540 p->type = typ; 541 p->buf = buf; 542 p->parityStripeID = parityStripeID; 543 p->which_ru = which_ru; 544 p->CompleteFunc = wakeF; 545 p->argument = arg; 546 p->next = next; 547 p->tracerec = tracerec; 548 p->priority = RF_IO_NORMAL_PRIORITY; 549 p->AuxFunc = NULL; 550 p->buf2 = NULL; 551 p->raidPtr = raidPtr; 552 p->flags = flags; 553 p->b_proc = kb_proc; 554 return (p); 555 } 556 557 RF_DiskQueueData_t * 558 rf_CreateDiskQueueDataFull( 559 RF_IoType_t typ, 560 RF_SectorNum_t ssect, 561 RF_SectorCount_t nsect, 562 caddr_t buf, 563 RF_StripeNum_t parityStripeID, 564 RF_ReconUnitNum_t which_ru, 565 int (*wakeF) (void *, int), 566 void *arg, 567 RF_DiskQueueData_t * next, 568 RF_AccTraceEntry_t * tracerec, 569 int priority, 570 int (*AuxFunc) (void *,...), 571 caddr_t buf2, 572 void *raidPtr, 573 RF_DiskQueueDataFlags_t flags, 574 void *kb_proc) 575 { 576 RF_DiskQueueData_t *p; 577 578 RF_FREELIST_GET_INIT(rf_dqd_freelist, p, next, (RF_DiskQueueData_t *), init_dqd); 579 580 p->sectorOffset = ssect + rf_protectedSectors; 581 p->numSector = nsect; 582 p->type = typ; 583 p->buf = buf; 584 p->parityStripeID = parityStripeID; 585 p->which_ru = which_ru; 586 p->CompleteFunc = wakeF; 587 p->argument = arg; 588 p->next = next; 589 p->tracerec = tracerec; 590 p->priority = priority; 591 p->AuxFunc = AuxFunc; 592 p->buf2 = buf2; 593 p->raidPtr = raidPtr; 594 p->flags = flags; 595 p->b_proc = kb_proc; 596 return (p); 597 } 598 599 void 600 rf_FreeDiskQueueData(p) 601 RF_DiskQueueData_t *p; 602 { 603 RF_FREELIST_FREE_CLEAN(rf_dqd_freelist, p, next, clean_dqd); 604 } 605