1 /* $NetBSD: rf_diskqueue.c,v 1.6 1999/02/05 00:06:09 oster Exp $ */ 2 /* 3 * Copyright (c) 1995 Carnegie-Mellon University. 4 * All rights reserved. 5 * 6 * Author: Mark Holland 7 * 8 * Permission to use, copy, modify and distribute this software and 9 * its documentation is hereby granted, provided that both the copyright 10 * notice and this permission notice appear in all copies of the 11 * software, derivative works or modified versions, and any portions 12 * thereof, and that both notices appear in supporting documentation. 13 * 14 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 15 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 16 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 17 * 18 * Carnegie Mellon requests users of this software to return to 19 * 20 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 21 * School of Computer Science 22 * Carnegie Mellon University 23 * Pittsburgh PA 15213-3890 24 * 25 * any improvements or extensions that they make and grant Carnegie the 26 * rights to redistribute these changes. 27 */ 28 29 /**************************************************************************************** 30 * 31 * rf_diskqueue.c -- higher-level disk queue code 32 * 33 * the routines here are a generic wrapper around the actual queueing 34 * routines. The code here implements thread scheduling, synchronization, 35 * and locking ops (see below) on top of the lower-level queueing code. 36 * 37 * to support atomic RMW, we implement "locking operations". When a locking op 38 * is dispatched to the lower levels of the driver, the queue is locked, and no further 39 * I/Os are dispatched until the queue receives & completes a corresponding "unlocking 40 * operation". This code relies on the higher layers to guarantee that a locking 41 * op will always be eventually followed by an unlocking op. The model is that 42 * the higher layers are structured so locking and unlocking ops occur in pairs, i.e. 43 * an unlocking op cannot be generated until after a locking op reports completion. 44 * There is no good way to check to see that an unlocking op "corresponds" to the 45 * op that currently has the queue locked, so we make no such attempt. Since by 46 * definition there can be only one locking op outstanding on a disk, this should 47 * not be a problem. 48 * 49 * In the kernel, we allow multiple I/Os to be concurrently dispatched to the disk 50 * driver. In order to support locking ops in this environment, when we decide to 51 * do a locking op, we stop dispatching new I/Os and wait until all dispatched I/Os 52 * have completed before dispatching the locking op. 53 * 54 * Unfortunately, the code is different in the 3 different operating states 55 * (user level, kernel, simulator). In the kernel, I/O is non-blocking, and 56 * we have no disk threads to dispatch for us. Therefore, we have to dispatch 57 * new I/Os to the scsi driver at the time of enqueue, and also at the time 58 * of completion. At user level, I/O is blocking, and so only the disk threads 59 * may dispatch I/Os. Thus at user level, all we can do at enqueue time is 60 * enqueue and wake up the disk thread to do the dispatch. 61 * 62 ***************************************************************************************/ 63 64 #include "rf_types.h" 65 #include "rf_threadstuff.h" 66 #include "rf_threadid.h" 67 #include "rf_raid.h" 68 #include "rf_diskqueue.h" 69 #include "rf_alloclist.h" 70 #include "rf_acctrace.h" 71 #include "rf_etimer.h" 72 #include "rf_configure.h" 73 #include "rf_general.h" 74 #include "rf_freelist.h" 75 #include "rf_debugprint.h" 76 #include "rf_shutdown.h" 77 #include "rf_cvscan.h" 78 #include "rf_sstf.h" 79 #include "rf_fifo.h" 80 81 static int init_dqd(RF_DiskQueueData_t *); 82 static void clean_dqd(RF_DiskQueueData_t *); 83 static void rf_ShutdownDiskQueueSystem(void *); 84 /* From rf_kintf.c */ 85 int rf_DispatchKernelIO(RF_DiskQueue_t *, RF_DiskQueueData_t *); 86 87 88 #define Dprintf1(s,a) if (rf_queueDebug) rf_debug_printf(s,(void *)((unsigned long)a),NULL,NULL,NULL,NULL,NULL,NULL,NULL) 89 #define Dprintf2(s,a,b) if (rf_queueDebug) rf_debug_printf(s,(void *)((unsigned long)a),(void *)((unsigned long)b),NULL,NULL,NULL,NULL,NULL,NULL) 90 #define Dprintf3(s,a,b,c) if (rf_queueDebug) rf_debug_printf(s,(void *)((unsigned long)a),(void *)((unsigned long)b),(void *)((unsigned long)c),NULL,NULL,NULL,NULL,NULL) 91 #define Dprintf4(s,a,b,c,d) if (rf_queueDebug) rf_debug_printf(s,(void *)((unsigned long)a),(void *)((unsigned long)b),(void *)((unsigned long)c),(void *)((unsigned long)d),NULL,NULL,NULL,NULL) 92 #define Dprintf5(s,a,b,c,d,e) if (rf_queueDebug) rf_debug_printf(s,(void *)((unsigned long)a),(void *)((unsigned long)b),(void *)((unsigned long)c),(void *)((unsigned long)d),(void *)((unsigned long)e),NULL,NULL,NULL) 93 94 95 #define SIGNAL_DISK_QUEUE(_q_,_wh_) 96 #define WAIT_DISK_QUEUE(_q_,_wh_) 97 98 /***************************************************************************************** 99 * 100 * the disk queue switch defines all the functions used in the different queueing 101 * disciplines 102 * queue ID, init routine, enqueue routine, dequeue routine 103 * 104 ****************************************************************************************/ 105 106 static RF_DiskQueueSW_t diskqueuesw[] = { 107 {"fifo", /* FIFO */ 108 rf_FifoCreate, 109 rf_FifoEnqueue, 110 rf_FifoDequeue, 111 rf_FifoPeek, 112 rf_FifoPromote}, 113 114 {"cvscan", /* cvscan */ 115 rf_CvscanCreate, 116 rf_CvscanEnqueue, 117 rf_CvscanDequeue, 118 rf_CvscanPeek, 119 rf_CvscanPromote}, 120 121 {"sstf", /* shortest seek time first */ 122 rf_SstfCreate, 123 rf_SstfEnqueue, 124 rf_SstfDequeue, 125 rf_SstfPeek, 126 rf_SstfPromote}, 127 128 {"scan", /* SCAN (two-way elevator) */ 129 rf_ScanCreate, 130 rf_SstfEnqueue, 131 rf_ScanDequeue, 132 rf_ScanPeek, 133 rf_SstfPromote}, 134 135 {"cscan", /* CSCAN (one-way elevator) */ 136 rf_CscanCreate, 137 rf_SstfEnqueue, 138 rf_CscanDequeue, 139 rf_CscanPeek, 140 rf_SstfPromote}, 141 142 #if !defined(_KERNEL) && RF_INCLUDE_QUEUE_RANDOM > 0 143 /* to make a point to Chris :-> */ 144 {"random", /* random */ 145 rf_FifoCreate, 146 rf_FifoEnqueue, 147 rf_RandomDequeue, 148 rf_RandomPeek, 149 rf_FifoPromote}, 150 #endif /* !KERNEL && RF_INCLUDE_QUEUE_RANDOM > 0 */ 151 }; 152 #define NUM_DISK_QUEUE_TYPES (sizeof(diskqueuesw)/sizeof(RF_DiskQueueSW_t)) 153 154 static RF_FreeList_t *rf_dqd_freelist; 155 156 #define RF_MAX_FREE_DQD 256 157 #define RF_DQD_INC 16 158 #define RF_DQD_INITIAL 64 159 160 #include <sys/buf.h> 161 162 static int 163 init_dqd(dqd) 164 RF_DiskQueueData_t *dqd; 165 { 166 /* XXX not sure if the following malloc is appropriate... probably not 167 * quite... */ 168 dqd->bp = (struct buf *) malloc(sizeof(struct buf), M_RAIDFRAME, M_NOWAIT); 169 if (dqd->bp == NULL) { 170 return (ENOMEM); 171 } 172 memset(dqd->bp, 0, sizeof(struct buf)); /* if you don't do it, nobody 173 * else will.. */ 174 return (0); 175 } 176 177 static void 178 clean_dqd(dqd) 179 RF_DiskQueueData_t *dqd; 180 { 181 free(dqd->bp, M_RAIDFRAME); 182 } 183 /* configures a single disk queue */ 184 static int 185 config_disk_queue( 186 RF_Raid_t * raidPtr, 187 RF_DiskQueue_t * diskqueue, 188 RF_RowCol_t r, /* row & col -- debug only. BZZT not any 189 * more... */ 190 RF_RowCol_t c, 191 RF_DiskQueueSW_t * p, 192 RF_SectorCount_t sectPerDisk, 193 dev_t dev, 194 int maxOutstanding, 195 RF_ShutdownList_t ** listp, 196 RF_AllocListElem_t * clList) 197 { 198 int rc; 199 200 diskqueue->row = r; 201 diskqueue->col = c; 202 diskqueue->qPtr = p; 203 diskqueue->qHdr = (p->Create) (sectPerDisk, clList, listp); 204 diskqueue->dev = dev; 205 diskqueue->numOutstanding = 0; 206 diskqueue->queueLength = 0; 207 diskqueue->maxOutstanding = maxOutstanding; 208 diskqueue->curPriority = RF_IO_NORMAL_PRIORITY; 209 diskqueue->nextLockingOp = NULL; 210 diskqueue->unlockingOp = NULL; 211 diskqueue->numWaiting = 0; 212 diskqueue->flags = 0; 213 diskqueue->raidPtr = raidPtr; 214 diskqueue->rf_cinfo = &raidPtr->raid_cinfo[r][c]; 215 rc = rf_create_managed_mutex(listp, &diskqueue->mutex); 216 if (rc) { 217 RF_ERRORMSG3("Unable to init mutex file %s line %d rc=%d\n", __FILE__, 218 __LINE__, rc); 219 return (rc); 220 } 221 rc = rf_create_managed_cond(listp, &diskqueue->cond); 222 if (rc) { 223 RF_ERRORMSG3("Unable to init cond file %s line %d rc=%d\n", __FILE__, 224 __LINE__, rc); 225 return (rc); 226 } 227 return (0); 228 } 229 230 static void 231 rf_ShutdownDiskQueueSystem(ignored) 232 void *ignored; 233 { 234 RF_FREELIST_DESTROY_CLEAN(rf_dqd_freelist, next, (RF_DiskQueueData_t *), clean_dqd); 235 } 236 237 int 238 rf_ConfigureDiskQueueSystem(listp) 239 RF_ShutdownList_t **listp; 240 { 241 int rc; 242 243 RF_FREELIST_CREATE(rf_dqd_freelist, RF_MAX_FREE_DQD, 244 RF_DQD_INC, sizeof(RF_DiskQueueData_t)); 245 if (rf_dqd_freelist == NULL) 246 return (ENOMEM); 247 rc = rf_ShutdownCreate(listp, rf_ShutdownDiskQueueSystem, NULL); 248 if (rc) { 249 RF_ERRORMSG3("Unable to add to shutdown list file %s line %d rc=%d\n", 250 __FILE__, __LINE__, rc); 251 rf_ShutdownDiskQueueSystem(NULL); 252 return (rc); 253 } 254 RF_FREELIST_PRIME_INIT(rf_dqd_freelist, RF_DQD_INITIAL, next, 255 (RF_DiskQueueData_t *), init_dqd); 256 return (0); 257 } 258 259 int 260 rf_ConfigureDiskQueues( 261 RF_ShutdownList_t ** listp, 262 RF_Raid_t * raidPtr, 263 RF_Config_t * cfgPtr) 264 { 265 RF_DiskQueue_t **diskQueues, *spareQueues; 266 RF_DiskQueueSW_t *p; 267 RF_RowCol_t r, c; 268 int rc, i; 269 270 raidPtr->maxQueueDepth = cfgPtr->maxOutstandingDiskReqs; 271 272 for (p = NULL, i = 0; i < NUM_DISK_QUEUE_TYPES; i++) { 273 if (!strcmp(diskqueuesw[i].queueType, cfgPtr->diskQueueType)) { 274 p = &diskqueuesw[i]; 275 break; 276 } 277 } 278 if (p == NULL) { 279 RF_ERRORMSG2("Unknown queue type \"%s\". Using %s\n", cfgPtr->diskQueueType, diskqueuesw[0].queueType); 280 p = &diskqueuesw[0]; 281 } 282 RF_CallocAndAdd(diskQueues, raidPtr->numRow, sizeof(RF_DiskQueue_t *), (RF_DiskQueue_t **), raidPtr->cleanupList); 283 if (diskQueues == NULL) { 284 return (ENOMEM); 285 } 286 raidPtr->Queues = diskQueues; 287 for (r = 0; r < raidPtr->numRow; r++) { 288 RF_CallocAndAdd(diskQueues[r], raidPtr->numCol + ((r == 0) ? raidPtr->numSpare : 0), sizeof(RF_DiskQueue_t), (RF_DiskQueue_t *), raidPtr->cleanupList); 289 if (diskQueues[r] == NULL) 290 return (ENOMEM); 291 for (c = 0; c < raidPtr->numCol; c++) { 292 rc = config_disk_queue(raidPtr, &diskQueues[r][c], r, c, p, 293 raidPtr->sectorsPerDisk, raidPtr->Disks[r][c].dev, 294 cfgPtr->maxOutstandingDiskReqs, listp, raidPtr->cleanupList); 295 if (rc) 296 return (rc); 297 } 298 } 299 300 spareQueues = &raidPtr->Queues[0][raidPtr->numCol]; 301 for (r = 0; r < raidPtr->numSpare; r++) { 302 rc = config_disk_queue(raidPtr, &spareQueues[r], 303 0, raidPtr->numCol + r, p, 304 raidPtr->sectorsPerDisk, 305 raidPtr->Disks[0][raidPtr->numCol + r].dev, 306 cfgPtr->maxOutstandingDiskReqs, listp, 307 raidPtr->cleanupList); 308 if (rc) 309 return (rc); 310 } 311 return (0); 312 } 313 /* Enqueue a disk I/O 314 * 315 * Unfortunately, we have to do things differently in the different 316 * environments (simulator, user-level, kernel). 317 * At user level, all I/O is blocking, so we have 1 or more threads/disk 318 * and the thread that enqueues is different from the thread that dequeues. 319 * In the kernel, I/O is non-blocking and so we'd like to have multiple 320 * I/Os outstanding on the physical disks when possible. 321 * 322 * when any request arrives at a queue, we have two choices: 323 * dispatch it to the lower levels 324 * queue it up 325 * 326 * kernel rules for when to do what: 327 * locking request: queue empty => dispatch and lock queue, 328 * else queue it 329 * unlocking req : always dispatch it 330 * normal req : queue empty => dispatch it & set priority 331 * queue not full & priority is ok => dispatch it 332 * else queue it 333 * 334 * user-level rules: 335 * always enqueue. In the special case of an unlocking op, enqueue 336 * in a special way that will cause the unlocking op to be the next 337 * thing dequeued. 338 * 339 * simulator rules: 340 * Do the same as at user level, with the sleeps and wakeups suppressed. 341 */ 342 void 343 rf_DiskIOEnqueue(queue, req, pri) 344 RF_DiskQueue_t *queue; 345 RF_DiskQueueData_t *req; 346 int pri; 347 { 348 int tid; 349 350 RF_ETIMER_START(req->qtime); 351 rf_get_threadid(tid); 352 RF_ASSERT(req->type == RF_IO_TYPE_NOP || req->numSector); 353 req->priority = pri; 354 355 if (rf_queueDebug && (req->numSector == 0)) { 356 printf("Warning: Enqueueing zero-sector access\n"); 357 } 358 /* 359 * kernel 360 */ 361 RF_LOCK_QUEUE_MUTEX(queue, "DiskIOEnqueue"); 362 /* locking request */ 363 if (RF_LOCKING_REQ(req)) { 364 if (RF_QUEUE_EMPTY(queue)) { 365 Dprintf3("Dispatching pri %d locking op to r %d c %d (queue empty)\n", pri, queue->row, queue->col); 366 RF_LOCK_QUEUE(queue); 367 rf_DispatchKernelIO(queue, req); 368 } else { 369 queue->queueLength++; /* increment count of number 370 * of requests waiting in this 371 * queue */ 372 Dprintf3("Enqueueing pri %d locking op to r %d c %d (queue not empty)\n", pri, queue->row, queue->col); 373 req->queue = (void *) queue; 374 (queue->qPtr->Enqueue) (queue->qHdr, req, pri); 375 } 376 } 377 /* unlocking request */ 378 else 379 if (RF_UNLOCKING_REQ(req)) { /* we'll do the actual unlock 380 * when this I/O completes */ 381 Dprintf3("Dispatching pri %d unlocking op to r %d c %d\n", pri, queue->row, queue->col); 382 RF_ASSERT(RF_QUEUE_LOCKED(queue)); 383 rf_DispatchKernelIO(queue, req); 384 } 385 /* normal request */ 386 else 387 if (RF_OK_TO_DISPATCH(queue, req)) { 388 Dprintf3("Dispatching pri %d regular op to r %d c %d (ok to dispatch)\n", pri, queue->row, queue->col); 389 rf_DispatchKernelIO(queue, req); 390 } else { 391 queue->queueLength++; /* increment count of 392 * number of requests 393 * waiting in this queue */ 394 Dprintf3("Enqueueing pri %d regular op to r %d c %d (not ok to dispatch)\n", pri, queue->row, queue->col); 395 req->queue = (void *) queue; 396 (queue->qPtr->Enqueue) (queue->qHdr, req, pri); 397 } 398 RF_UNLOCK_QUEUE_MUTEX(queue, "DiskIOEnqueue"); 399 } 400 401 402 /* get the next set of I/Os started, kernel version only */ 403 void 404 rf_DiskIOComplete(queue, req, status) 405 RF_DiskQueue_t *queue; 406 RF_DiskQueueData_t *req; 407 int status; 408 { 409 int done = 0; 410 411 RF_LOCK_QUEUE_MUTEX(queue, "DiskIOComplete"); 412 413 /* unlock the queue: (1) after an unlocking req completes (2) after a 414 * locking req fails */ 415 if (RF_UNLOCKING_REQ(req) || (RF_LOCKING_REQ(req) && status)) { 416 Dprintf2("DiskIOComplete: unlocking queue at r %d c %d\n", queue->row, queue->col); 417 RF_ASSERT(RF_QUEUE_LOCKED(queue) && (queue->unlockingOp == NULL)); 418 RF_UNLOCK_QUEUE(queue); 419 } 420 queue->numOutstanding--; 421 RF_ASSERT(queue->numOutstanding >= 0); 422 423 /* dispatch requests to the disk until we find one that we can't. */ 424 /* no reason to continue once we've filled up the queue */ 425 /* no reason to even start if the queue is locked */ 426 427 while (!done && !RF_QUEUE_FULL(queue) && !RF_QUEUE_LOCKED(queue)) { 428 if (queue->nextLockingOp) { 429 req = queue->nextLockingOp; 430 queue->nextLockingOp = NULL; 431 Dprintf3("DiskIOComplete: a pri %d locking req was pending at r %d c %d\n", req->priority, queue->row, queue->col); 432 } else { 433 req = (queue->qPtr->Dequeue) (queue->qHdr); 434 if (req != NULL) { 435 Dprintf3("DiskIOComplete: extracting pri %d req from queue at r %d c %d\n", req->priority, queue->row, queue->col); 436 } else { 437 Dprintf1("DiskIOComplete: no more requests to extract.\n", ""); 438 } 439 } 440 if (req) { 441 queue->queueLength--; /* decrement count of number 442 * of requests waiting in this 443 * queue */ 444 RF_ASSERT(queue->queueLength >= 0); 445 } 446 if (!req) 447 done = 1; 448 else 449 if (RF_LOCKING_REQ(req)) { 450 if (RF_QUEUE_EMPTY(queue)) { /* dispatch it */ 451 Dprintf3("DiskIOComplete: dispatching pri %d locking req to r %d c %d (queue empty)\n", req->priority, queue->row, queue->col); 452 RF_LOCK_QUEUE(queue); 453 rf_DispatchKernelIO(queue, req); 454 done = 1; 455 } else { /* put it aside to wait for 456 * the queue to drain */ 457 Dprintf3("DiskIOComplete: postponing pri %d locking req to r %d c %d\n", req->priority, queue->row, queue->col); 458 RF_ASSERT(queue->nextLockingOp == NULL); 459 queue->nextLockingOp = req; 460 done = 1; 461 } 462 } else 463 if (RF_UNLOCKING_REQ(req)) { /* should not happen: 464 * unlocking ops should 465 * not get queued */ 466 RF_ASSERT(RF_QUEUE_LOCKED(queue)); /* support it anyway for 467 * the future */ 468 Dprintf3("DiskIOComplete: dispatching pri %d unl req to r %d c %d (SHOULD NOT SEE THIS)\n", req->priority, queue->row, queue->col); 469 rf_DispatchKernelIO(queue, req); 470 done = 1; 471 } else 472 if (RF_OK_TO_DISPATCH(queue, req)) { 473 Dprintf3("DiskIOComplete: dispatching pri %d regular req to r %d c %d (ok to dispatch)\n", req->priority, queue->row, queue->col); 474 rf_DispatchKernelIO(queue, req); 475 } else { /* we can't dispatch it, 476 * so just re-enqueue 477 * it. */ 478 /* potential trouble here if 479 * disk queues batch reqs */ 480 Dprintf3("DiskIOComplete: re-enqueueing pri %d regular req to r %d c %d\n", req->priority, queue->row, queue->col); 481 queue->queueLength++; 482 (queue->qPtr->Enqueue) (queue->qHdr, req, req->priority); 483 done = 1; 484 } 485 } 486 487 RF_UNLOCK_QUEUE_MUTEX(queue, "DiskIOComplete"); 488 } 489 /* promotes accesses tagged with the given parityStripeID from low priority 490 * to normal priority. This promotion is optional, meaning that a queue 491 * need not implement it. If there is no promotion routine associated with 492 * a queue, this routine does nothing and returns -1. 493 */ 494 int 495 rf_DiskIOPromote(queue, parityStripeID, which_ru) 496 RF_DiskQueue_t *queue; 497 RF_StripeNum_t parityStripeID; 498 RF_ReconUnitNum_t which_ru; 499 { 500 int retval; 501 502 if (!queue->qPtr->Promote) 503 return (-1); 504 RF_LOCK_QUEUE_MUTEX(queue, "DiskIOPromote"); 505 retval = (queue->qPtr->Promote) (queue->qHdr, parityStripeID, which_ru); 506 RF_UNLOCK_QUEUE_MUTEX(queue, "DiskIOPromote"); 507 return (retval); 508 } 509 510 RF_DiskQueueData_t * 511 rf_CreateDiskQueueData( 512 RF_IoType_t typ, 513 RF_SectorNum_t ssect, 514 RF_SectorCount_t nsect, 515 caddr_t buf, 516 RF_StripeNum_t parityStripeID, 517 RF_ReconUnitNum_t which_ru, 518 int (*wakeF) (void *, int), 519 void *arg, 520 RF_DiskQueueData_t * next, 521 RF_AccTraceEntry_t * tracerec, 522 void *raidPtr, 523 RF_DiskQueueDataFlags_t flags, 524 void *kb_proc) 525 { 526 RF_DiskQueueData_t *p; 527 528 RF_FREELIST_GET_INIT(rf_dqd_freelist, p, next, (RF_DiskQueueData_t *), init_dqd); 529 530 p->sectorOffset = ssect + rf_protectedSectors; 531 p->numSector = nsect; 532 p->type = typ; 533 p->buf = buf; 534 p->parityStripeID = parityStripeID; 535 p->which_ru = which_ru; 536 p->CompleteFunc = wakeF; 537 p->argument = arg; 538 p->next = next; 539 p->tracerec = tracerec; 540 p->priority = RF_IO_NORMAL_PRIORITY; 541 p->AuxFunc = NULL; 542 p->buf2 = NULL; 543 p->raidPtr = raidPtr; 544 p->flags = flags; 545 p->b_proc = kb_proc; 546 return (p); 547 } 548 549 RF_DiskQueueData_t * 550 rf_CreateDiskQueueDataFull( 551 RF_IoType_t typ, 552 RF_SectorNum_t ssect, 553 RF_SectorCount_t nsect, 554 caddr_t buf, 555 RF_StripeNum_t parityStripeID, 556 RF_ReconUnitNum_t which_ru, 557 int (*wakeF) (void *, int), 558 void *arg, 559 RF_DiskQueueData_t * next, 560 RF_AccTraceEntry_t * tracerec, 561 int priority, 562 int (*AuxFunc) (void *,...), 563 caddr_t buf2, 564 void *raidPtr, 565 RF_DiskQueueDataFlags_t flags, 566 void *kb_proc) 567 { 568 RF_DiskQueueData_t *p; 569 570 RF_FREELIST_GET_INIT(rf_dqd_freelist, p, next, (RF_DiskQueueData_t *), init_dqd); 571 572 p->sectorOffset = ssect + rf_protectedSectors; 573 p->numSector = nsect; 574 p->type = typ; 575 p->buf = buf; 576 p->parityStripeID = parityStripeID; 577 p->which_ru = which_ru; 578 p->CompleteFunc = wakeF; 579 p->argument = arg; 580 p->next = next; 581 p->tracerec = tracerec; 582 p->priority = priority; 583 p->AuxFunc = AuxFunc; 584 p->buf2 = buf2; 585 p->raidPtr = raidPtr; 586 p->flags = flags; 587 p->b_proc = kb_proc; 588 return (p); 589 } 590 591 void 592 rf_FreeDiskQueueData(p) 593 RF_DiskQueueData_t *p; 594 { 595 RF_FREELIST_FREE_CLEAN(rf_dqd_freelist, p, next, clean_dqd); 596 } 597