1 /* $NetBSD: rf_diskqueue.c,v 1.29 2004/01/01 19:27:35 oster Exp $ */ 2 /* 3 * Copyright (c) 1995 Carnegie-Mellon University. 4 * All rights reserved. 5 * 6 * Author: Mark Holland 7 * 8 * Permission to use, copy, modify and distribute this software and 9 * its documentation is hereby granted, provided that both the copyright 10 * notice and this permission notice appear in all copies of the 11 * software, derivative works or modified versions, and any portions 12 * thereof, and that both notices appear in supporting documentation. 13 * 14 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 15 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 16 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 17 * 18 * Carnegie Mellon requests users of this software to return to 19 * 20 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 21 * School of Computer Science 22 * Carnegie Mellon University 23 * Pittsburgh PA 15213-3890 24 * 25 * any improvements or extensions that they make and grant Carnegie the 26 * rights to redistribute these changes. 27 */ 28 29 /**************************************************************************** 30 * 31 * rf_diskqueue.c -- higher-level disk queue code 32 * 33 * the routines here are a generic wrapper around the actual queueing 34 * routines. The code here implements thread scheduling, synchronization, 35 * and locking ops (see below) on top of the lower-level queueing code. 36 * 37 * to support atomic RMW, we implement "locking operations". When a 38 * locking op is dispatched to the lower levels of the driver, the 39 * queue is locked, and no further I/Os are dispatched until the queue 40 * receives & completes a corresponding "unlocking operation". This 41 * code relies on the higher layers to guarantee that a locking op 42 * will always be eventually followed by an unlocking op. The model 43 * is that the higher layers are structured so locking and unlocking 44 * ops occur in pairs, i.e. an unlocking op cannot be generated until 45 * after a locking op reports completion. There is no good way to 46 * check to see that an unlocking op "corresponds" to the op that 47 * currently has the queue locked, so we make no such attempt. Since 48 * by definition there can be only one locking op outstanding on a 49 * disk, this should not be a problem. 50 * 51 * In the kernel, we allow multiple I/Os to be concurrently dispatched 52 * to the disk driver. In order to support locking ops in this 53 * environment, when we decide to do a locking op, we stop dispatching 54 * new I/Os and wait until all dispatched I/Os have completed before 55 * dispatching the locking op. 56 * 57 * Unfortunately, the code is different in the 3 different operating 58 * states (user level, kernel, simulator). In the kernel, I/O is 59 * non-blocking, and we have no disk threads to dispatch for us. 60 * Therefore, we have to dispatch new I/Os to the scsi driver at the 61 * time of enqueue, and also at the time of completion. At user 62 * level, I/O is blocking, and so only the disk threads may dispatch 63 * I/Os. Thus at user level, all we can do at enqueue time is enqueue 64 * and wake up the disk thread to do the dispatch. 65 * 66 ****************************************************************************/ 67 68 #include <sys/cdefs.h> 69 __KERNEL_RCSID(0, "$NetBSD: rf_diskqueue.c,v 1.29 2004/01/01 19:27:35 oster Exp $"); 70 71 #include <dev/raidframe/raidframevar.h> 72 73 #include "rf_threadstuff.h" 74 #include "rf_raid.h" 75 #include "rf_diskqueue.h" 76 #include "rf_alloclist.h" 77 #include "rf_acctrace.h" 78 #include "rf_etimer.h" 79 #include "rf_general.h" 80 #include "rf_debugprint.h" 81 #include "rf_shutdown.h" 82 #include "rf_cvscan.h" 83 #include "rf_sstf.h" 84 #include "rf_fifo.h" 85 #include "rf_kintf.h" 86 87 static void rf_ShutdownDiskQueueSystem(void *); 88 89 #ifndef RF_DEBUG_DISKQUEUE 90 #define RF_DEBUG_DISKQUEUE 0 91 #endif 92 93 #if RF_DEBUG_DISKQUEUE 94 #define Dprintf1(s,a) if (rf_queueDebug) rf_debug_printf(s,(void *)((unsigned long)a),NULL,NULL,NULL,NULL,NULL,NULL,NULL) 95 #define Dprintf2(s,a,b) if (rf_queueDebug) rf_debug_printf(s,(void *)((unsigned long)a),(void *)((unsigned long)b),NULL,NULL,NULL,NULL,NULL,NULL) 96 #define Dprintf3(s,a,b,c) if (rf_queueDebug) rf_debug_printf(s,(void *)((unsigned long)a),(void *)((unsigned long)b),(void *)((unsigned long)c),NULL,NULL,NULL,NULL,NULL) 97 #else 98 #define Dprintf1(s,a) 99 #define Dprintf2(s,a,b) 100 #define Dprintf3(s,a,b,c) 101 #endif 102 103 /***************************************************************************** 104 * 105 * the disk queue switch defines all the functions used in the 106 * different queueing disciplines queue ID, init routine, enqueue 107 * routine, dequeue routine 108 * 109 ****************************************************************************/ 110 111 static const RF_DiskQueueSW_t diskqueuesw[] = { 112 {"fifo", /* FIFO */ 113 rf_FifoCreate, 114 rf_FifoEnqueue, 115 rf_FifoDequeue, 116 rf_FifoPeek, 117 rf_FifoPromote}, 118 119 {"cvscan", /* cvscan */ 120 rf_CvscanCreate, 121 rf_CvscanEnqueue, 122 rf_CvscanDequeue, 123 rf_CvscanPeek, 124 rf_CvscanPromote}, 125 126 {"sstf", /* shortest seek time first */ 127 rf_SstfCreate, 128 rf_SstfEnqueue, 129 rf_SstfDequeue, 130 rf_SstfPeek, 131 rf_SstfPromote}, 132 133 {"scan", /* SCAN (two-way elevator) */ 134 rf_ScanCreate, 135 rf_SstfEnqueue, 136 rf_ScanDequeue, 137 rf_ScanPeek, 138 rf_SstfPromote}, 139 140 {"cscan", /* CSCAN (one-way elevator) */ 141 rf_CscanCreate, 142 rf_SstfEnqueue, 143 rf_CscanDequeue, 144 rf_CscanPeek, 145 rf_SstfPromote}, 146 147 }; 148 #define NUM_DISK_QUEUE_TYPES (sizeof(diskqueuesw)/sizeof(RF_DiskQueueSW_t)) 149 150 static struct pool rf_dqd_pool; 151 #define RF_MAX_FREE_DQD 256 152 #define RF_DQD_INC 16 153 #define RF_DQD_INITIAL 64 154 155 #include <sys/buf.h> 156 157 /* configures a single disk queue */ 158 159 int 160 rf_ConfigureDiskQueue(RF_Raid_t *raidPtr, RF_DiskQueue_t *diskqueue, 161 RF_RowCol_t c, const RF_DiskQueueSW_t *p, 162 RF_SectorCount_t sectPerDisk, dev_t dev, 163 int maxOutstanding, RF_ShutdownList_t **listp, 164 RF_AllocListElem_t *clList) 165 { 166 diskqueue->col = c; 167 diskqueue->qPtr = p; 168 diskqueue->qHdr = (p->Create) (sectPerDisk, clList, listp); 169 diskqueue->dev = dev; 170 diskqueue->numOutstanding = 0; 171 diskqueue->queueLength = 0; 172 diskqueue->maxOutstanding = maxOutstanding; 173 diskqueue->curPriority = RF_IO_NORMAL_PRIORITY; 174 diskqueue->nextLockingOp = NULL; 175 diskqueue->flags = 0; 176 diskqueue->raidPtr = raidPtr; 177 diskqueue->rf_cinfo = &raidPtr->raid_cinfo[c]; 178 rf_mutex_init(&diskqueue->mutex); 179 diskqueue->cond = 0; 180 return (0); 181 } 182 183 static void 184 rf_ShutdownDiskQueueSystem(void *ignored) 185 { 186 pool_destroy(&rf_dqd_pool); 187 } 188 189 int 190 rf_ConfigureDiskQueueSystem(RF_ShutdownList_t **listp) 191 { 192 int rc; 193 194 pool_init(&rf_dqd_pool, sizeof(RF_DiskQueueData_t), 0, 0, 0, 195 "rf_dqd_pl", NULL); 196 pool_sethiwat(&rf_dqd_pool, RF_MAX_FREE_DQD); 197 pool_prime(&rf_dqd_pool, RF_DQD_INITIAL); 198 199 rc = rf_ShutdownCreate(listp, rf_ShutdownDiskQueueSystem, NULL); 200 if (rc) { 201 rf_print_unable_to_add_shutdown( __FILE__, __LINE__, rc); 202 rf_ShutdownDiskQueueSystem(NULL); 203 return (rc); 204 } 205 206 return (0); 207 } 208 209 int 210 rf_ConfigureDiskQueues(RF_ShutdownList_t **listp, RF_Raid_t *raidPtr, 211 RF_Config_t *cfgPtr) 212 { 213 RF_DiskQueue_t *diskQueues, *spareQueues; 214 const RF_DiskQueueSW_t *p; 215 RF_RowCol_t r,c; 216 int rc, i; 217 218 raidPtr->maxQueueDepth = cfgPtr->maxOutstandingDiskReqs; 219 220 for (p = NULL, i = 0; i < NUM_DISK_QUEUE_TYPES; i++) { 221 if (!strcmp(diskqueuesw[i].queueType, cfgPtr->diskQueueType)) { 222 p = &diskqueuesw[i]; 223 break; 224 } 225 } 226 if (p == NULL) { 227 RF_ERRORMSG2("Unknown queue type \"%s\". Using %s\n", cfgPtr->diskQueueType, diskqueuesw[0].queueType); 228 p = &diskqueuesw[0]; 229 } 230 raidPtr->qType = p; 231 232 RF_MallocAndAdd(diskQueues, 233 (raidPtr->numCol + RF_MAXSPARE) * 234 sizeof(RF_DiskQueue_t), (RF_DiskQueue_t *), 235 raidPtr->cleanupList); 236 if (diskQueues == NULL) 237 return (ENOMEM); 238 raidPtr->Queues = diskQueues; 239 240 for (c = 0; c < raidPtr->numCol; c++) { 241 rc = rf_ConfigureDiskQueue(raidPtr, &diskQueues[c], 242 c, p, 243 raidPtr->sectorsPerDisk, 244 raidPtr->Disks[c].dev, 245 cfgPtr->maxOutstandingDiskReqs, 246 listp, raidPtr->cleanupList); 247 if (rc) 248 return (rc); 249 } 250 251 spareQueues = &raidPtr->Queues[raidPtr->numCol]; 252 for (r = 0; r < raidPtr->numSpare; r++) { 253 rc = rf_ConfigureDiskQueue(raidPtr, &spareQueues[r], 254 raidPtr->numCol + r, p, 255 raidPtr->sectorsPerDisk, 256 raidPtr->Disks[raidPtr->numCol + r].dev, 257 cfgPtr->maxOutstandingDiskReqs, listp, 258 raidPtr->cleanupList); 259 if (rc) 260 return (rc); 261 } 262 return (0); 263 } 264 /* Enqueue a disk I/O 265 * 266 * Unfortunately, we have to do things differently in the different 267 * environments (simulator, user-level, kernel). 268 * At user level, all I/O is blocking, so we have 1 or more threads/disk 269 * and the thread that enqueues is different from the thread that dequeues. 270 * In the kernel, I/O is non-blocking and so we'd like to have multiple 271 * I/Os outstanding on the physical disks when possible. 272 * 273 * when any request arrives at a queue, we have two choices: 274 * dispatch it to the lower levels 275 * queue it up 276 * 277 * kernel rules for when to do what: 278 * locking request: queue empty => dispatch and lock queue, 279 * else queue it 280 * unlocking req : always dispatch it 281 * normal req : queue empty => dispatch it & set priority 282 * queue not full & priority is ok => dispatch it 283 * else queue it 284 * 285 * user-level rules: 286 * always enqueue. In the special case of an unlocking op, enqueue 287 * in a special way that will cause the unlocking op to be the next 288 * thing dequeued. 289 * 290 * simulator rules: 291 * Do the same as at user level, with the sleeps and wakeups suppressed. 292 */ 293 void 294 rf_DiskIOEnqueue(RF_DiskQueue_t *queue, RF_DiskQueueData_t *req, int pri) 295 { 296 RF_ETIMER_START(req->qtime); 297 RF_ASSERT(req->type == RF_IO_TYPE_NOP || req->numSector); 298 req->priority = pri; 299 300 #if RF_DEBUG_DISKQUEUE 301 if (rf_queueDebug && (req->numSector == 0)) { 302 printf("Warning: Enqueueing zero-sector access\n"); 303 } 304 #endif 305 /* 306 * kernel 307 */ 308 RF_LOCK_QUEUE_MUTEX(queue, "DiskIOEnqueue"); 309 /* locking request */ 310 if (RF_LOCKING_REQ(req)) { 311 if (RF_QUEUE_EMPTY(queue)) { 312 Dprintf2("Dispatching pri %d locking op to c %d (queue empty)\n", pri, queue->col); 313 RF_LOCK_QUEUE(queue); 314 rf_DispatchKernelIO(queue, req); 315 } else { 316 queue->queueLength++; /* increment count of number 317 * of requests waiting in this 318 * queue */ 319 Dprintf2("Enqueueing pri %d locking op to c %d (queue not empty)\n", pri, queue->col); 320 req->queue = (void *) queue; 321 (queue->qPtr->Enqueue) (queue->qHdr, req, pri); 322 } 323 } 324 /* unlocking request */ 325 else 326 if (RF_UNLOCKING_REQ(req)) { /* we'll do the actual unlock 327 * when this I/O completes */ 328 Dprintf2("Dispatching pri %d unlocking op to c %d\n", pri, queue->col); 329 RF_ASSERT(RF_QUEUE_LOCKED(queue)); 330 rf_DispatchKernelIO(queue, req); 331 } 332 /* normal request */ 333 else 334 if (RF_OK_TO_DISPATCH(queue, req)) { 335 Dprintf2("Dispatching pri %d regular op to c %d (ok to dispatch)\n", pri, queue->col); 336 rf_DispatchKernelIO(queue, req); 337 } else { 338 queue->queueLength++; /* increment count of 339 * number of requests 340 * waiting in this queue */ 341 Dprintf2("Enqueueing pri %d regular op to c %d (not ok to dispatch)\n", pri, queue->col); 342 req->queue = (void *) queue; 343 (queue->qPtr->Enqueue) (queue->qHdr, req, pri); 344 } 345 RF_UNLOCK_QUEUE_MUTEX(queue, "DiskIOEnqueue"); 346 } 347 348 349 /* get the next set of I/Os started, kernel version only */ 350 void 351 rf_DiskIOComplete(RF_DiskQueue_t *queue, RF_DiskQueueData_t *req, int status) 352 { 353 int done = 0; 354 355 RF_LOCK_QUEUE_MUTEX(queue, "DiskIOComplete"); 356 357 /* unlock the queue: (1) after an unlocking req completes (2) after a 358 * locking req fails */ 359 if (RF_UNLOCKING_REQ(req) || (RF_LOCKING_REQ(req) && status)) { 360 Dprintf1("DiskIOComplete: unlocking queue at c %d\n", queue->col); 361 RF_ASSERT(RF_QUEUE_LOCKED(queue)); 362 RF_UNLOCK_QUEUE(queue); 363 } 364 queue->numOutstanding--; 365 RF_ASSERT(queue->numOutstanding >= 0); 366 367 /* dispatch requests to the disk until we find one that we can't. */ 368 /* no reason to continue once we've filled up the queue */ 369 /* no reason to even start if the queue is locked */ 370 371 while (!done && !RF_QUEUE_FULL(queue) && !RF_QUEUE_LOCKED(queue)) { 372 if (queue->nextLockingOp) { 373 req = queue->nextLockingOp; 374 queue->nextLockingOp = NULL; 375 Dprintf2("DiskIOComplete: a pri %d locking req was pending at c %d\n", req->priority, queue->col); 376 } else { 377 req = (queue->qPtr->Dequeue) (queue->qHdr); 378 if (req != NULL) { 379 Dprintf2("DiskIOComplete: extracting pri %d req from queue at c %d\n", req->priority, queue->col); 380 } else { 381 Dprintf1("DiskIOComplete: no more requests to extract.\n", ""); 382 } 383 } 384 if (req) { 385 queue->queueLength--; /* decrement count of number 386 * of requests waiting in this 387 * queue */ 388 RF_ASSERT(queue->queueLength >= 0); 389 } 390 if (!req) 391 done = 1; 392 else 393 if (RF_LOCKING_REQ(req)) { 394 if (RF_QUEUE_EMPTY(queue)) { /* dispatch it */ 395 Dprintf2("DiskIOComplete: dispatching pri %d locking req to c %d (queue empty)\n", req->priority, queue->col); 396 RF_LOCK_QUEUE(queue); 397 rf_DispatchKernelIO(queue, req); 398 done = 1; 399 } else { /* put it aside to wait for 400 * the queue to drain */ 401 Dprintf2("DiskIOComplete: postponing pri %d locking req to c %d\n", req->priority, queue->col); 402 RF_ASSERT(queue->nextLockingOp == NULL); 403 queue->nextLockingOp = req; 404 done = 1; 405 } 406 } else 407 if (RF_UNLOCKING_REQ(req)) { /* should not happen: 408 * unlocking ops should 409 * not get queued */ 410 RF_ASSERT(RF_QUEUE_LOCKED(queue)); /* support it anyway for 411 * the future */ 412 Dprintf2("DiskIOComplete: dispatching pri %d unl req to c %d (SHOULD NOT SEE THIS)\n", req->priority, queue->col); 413 rf_DispatchKernelIO(queue, req); 414 done = 1; 415 } else 416 if (RF_OK_TO_DISPATCH(queue, req)) { 417 Dprintf2("DiskIOComplete: dispatching pri %d regular req to c %d (ok to dispatch)\n", req->priority, queue->col); 418 rf_DispatchKernelIO(queue, req); 419 } else { /* we can't dispatch it, 420 * so just re-enqueue 421 * it. */ 422 /* potential trouble here if 423 * disk queues batch reqs */ 424 Dprintf2("DiskIOComplete: re-enqueueing pri %d regular req to c %d\n", req->priority, queue->col); 425 queue->queueLength++; 426 (queue->qPtr->Enqueue) (queue->qHdr, req, req->priority); 427 done = 1; 428 } 429 } 430 431 RF_UNLOCK_QUEUE_MUTEX(queue, "DiskIOComplete"); 432 } 433 /* promotes accesses tagged with the given parityStripeID from low priority 434 * to normal priority. This promotion is optional, meaning that a queue 435 * need not implement it. If there is no promotion routine associated with 436 * a queue, this routine does nothing and returns -1. 437 */ 438 int 439 rf_DiskIOPromote(RF_DiskQueue_t *queue, RF_StripeNum_t parityStripeID, 440 RF_ReconUnitNum_t which_ru) 441 { 442 int retval; 443 444 if (!queue->qPtr->Promote) 445 return (-1); 446 RF_LOCK_QUEUE_MUTEX(queue, "DiskIOPromote"); 447 retval = (queue->qPtr->Promote) (queue->qHdr, parityStripeID, which_ru); 448 RF_UNLOCK_QUEUE_MUTEX(queue, "DiskIOPromote"); 449 return (retval); 450 } 451 452 RF_DiskQueueData_t * 453 rf_CreateDiskQueueData(RF_IoType_t typ, RF_SectorNum_t ssect, 454 RF_SectorCount_t nsect, caddr_t buf, 455 RF_StripeNum_t parityStripeID, 456 RF_ReconUnitNum_t which_ru, 457 int (*wakeF) (void *, int), void *arg, 458 RF_DiskQueueData_t *next, 459 RF_AccTraceEntry_t *tracerec, void *raidPtr, 460 RF_DiskQueueDataFlags_t flags, void *kb_proc) 461 { 462 RF_DiskQueueData_t *p; 463 464 p = pool_get(&rf_dqd_pool, PR_WAITOK); 465 p->bp = pool_get(&bufpool, PR_NOWAIT); /* XXX: make up our minds here. 466 WAITOK, or NOWAIT?? */ 467 468 if (p->bp == NULL) { 469 /* no memory for the buffer!?!? */ 470 pool_put(&rf_dqd_pool, p); 471 return(NULL); 472 } 473 474 memset(p->bp, 0, sizeof(struct buf)); 475 p->sectorOffset = ssect + rf_protectedSectors; 476 p->numSector = nsect; 477 p->type = typ; 478 p->buf = buf; 479 p->parityStripeID = parityStripeID; 480 p->which_ru = which_ru; 481 p->CompleteFunc = wakeF; 482 p->argument = arg; 483 p->next = next; 484 p->tracerec = tracerec; 485 p->priority = RF_IO_NORMAL_PRIORITY; 486 p->raidPtr = raidPtr; 487 p->flags = flags; 488 p->b_proc = kb_proc; 489 return (p); 490 } 491 492 void 493 rf_FreeDiskQueueData(RF_DiskQueueData_t *p) 494 { 495 pool_put(&bufpool, p->bp); 496 pool_put(&rf_dqd_pool, p); 497 } 498