1*0Sstevel@tonic-gate /*- 2*0Sstevel@tonic-gate * See the file LICENSE for redistribution information. 3*0Sstevel@tonic-gate * 4*0Sstevel@tonic-gate * Copyright (c) 1996, 1997, 1998 5*0Sstevel@tonic-gate * Sleepycat Software. All rights reserved. 6*0Sstevel@tonic-gate */ 7*0Sstevel@tonic-gate #include "config.h" 8*0Sstevel@tonic-gate 9*0Sstevel@tonic-gate #ifndef lint 10*0Sstevel@tonic-gate static const char sccsid[] = "@(#)mp_sync.c 10.31 (Sleepycat) 12/11/98"; 11*0Sstevel@tonic-gate #endif /* not lint */ 12*0Sstevel@tonic-gate 13*0Sstevel@tonic-gate #ifndef NO_SYSTEM_INCLUDES 14*0Sstevel@tonic-gate #include <sys/types.h> 15*0Sstevel@tonic-gate 16*0Sstevel@tonic-gate #include <errno.h> 17*0Sstevel@tonic-gate #include <stdlib.h> 18*0Sstevel@tonic-gate #endif 19*0Sstevel@tonic-gate 20*0Sstevel@tonic-gate #include "db_int.h" 21*0Sstevel@tonic-gate #include "shqueue.h" 22*0Sstevel@tonic-gate #include "db_shash.h" 23*0Sstevel@tonic-gate #include "mp.h" 24*0Sstevel@tonic-gate #include "common_ext.h" 25*0Sstevel@tonic-gate 26*0Sstevel@tonic-gate static int __bhcmp __P((const void *, const void *)); 27*0Sstevel@tonic-gate static int __memp_fsync __P((DB_MPOOLFILE *)); 28*0Sstevel@tonic-gate 29*0Sstevel@tonic-gate /* 30*0Sstevel@tonic-gate * memp_sync -- 31*0Sstevel@tonic-gate * Mpool sync function. 32*0Sstevel@tonic-gate */ 33*0Sstevel@tonic-gate int 34*0Sstevel@tonic-gate memp_sync(dbmp, lsnp) 35*0Sstevel@tonic-gate DB_MPOOL *dbmp; 36*0Sstevel@tonic-gate DB_LSN *lsnp; 37*0Sstevel@tonic-gate { 38*0Sstevel@tonic-gate BH *bhp, **bharray; 39*0Sstevel@tonic-gate DB_ENV *dbenv; 40*0Sstevel@tonic-gate MPOOL *mp; 41*0Sstevel@tonic-gate MPOOLFILE *mfp; 42*0Sstevel@tonic-gate int ar_cnt, nalloc, next, maxpin, ret, wrote; 43*0Sstevel@tonic-gate 44*0Sstevel@tonic-gate MP_PANIC_CHECK(dbmp); 45*0Sstevel@tonic-gate 46*0Sstevel@tonic-gate dbenv = dbmp->dbenv; 47*0Sstevel@tonic-gate mp = dbmp->mp; 48*0Sstevel@tonic-gate 49*0Sstevel@tonic-gate if (dbenv->lg_info == NULL) { 50*0Sstevel@tonic-gate __db_err(dbenv, "memp_sync: requires logging"); 51*0Sstevel@tonic-gate return (EINVAL); 52*0Sstevel@tonic-gate } 53*0Sstevel@tonic-gate 54*0Sstevel@tonic-gate /* 55*0Sstevel@tonic-gate * We try and write the buffers in page order: it should reduce seeks 56*0Sstevel@tonic-gate * by the underlying filesystem and possibly reduce the actual number 57*0Sstevel@tonic-gate * of writes. We don't want to hold the region lock while we write 58*0Sstevel@tonic-gate * the buffers, so only hold it lock while we create a list. Get a 59*0Sstevel@tonic-gate * good-size block of memory to hold buffer pointers, we don't want 60*0Sstevel@tonic-gate * to run out. 61*0Sstevel@tonic-gate */ 62*0Sstevel@tonic-gate LOCKREGION(dbmp); 63*0Sstevel@tonic-gate nalloc = mp->stat.st_page_dirty + mp->stat.st_page_dirty / 2 + 10; 64*0Sstevel@tonic-gate UNLOCKREGION(dbmp); 65*0Sstevel@tonic-gate 66*0Sstevel@tonic-gate if ((ret = __os_malloc(nalloc * sizeof(BH *), NULL, &bharray)) != 0) 67*0Sstevel@tonic-gate return (ret); 68*0Sstevel@tonic-gate 69*0Sstevel@tonic-gate LOCKREGION(dbmp); 70*0Sstevel@tonic-gate 71*0Sstevel@tonic-gate /* 72*0Sstevel@tonic-gate * If the application is asking about a previous call to memp_sync(), 73*0Sstevel@tonic-gate * and we haven't found any buffers that the application holding the 74*0Sstevel@tonic-gate * pin couldn't write, return yes or no based on the current count. 75*0Sstevel@tonic-gate * Note, if the application is asking about a LSN *smaller* than one 76*0Sstevel@tonic-gate * we've already handled or are currently handling, then we return a 77*0Sstevel@tonic-gate * result based on the count for the larger LSN. 78*0Sstevel@tonic-gate */ 79*0Sstevel@tonic-gate if (!F_ISSET(mp, MP_LSN_RETRY) && log_compare(lsnp, &mp->lsn) <= 0) { 80*0Sstevel@tonic-gate if (mp->lsn_cnt == 0) { 81*0Sstevel@tonic-gate *lsnp = mp->lsn; 82*0Sstevel@tonic-gate ret = 0; 83*0Sstevel@tonic-gate } else 84*0Sstevel@tonic-gate ret = DB_INCOMPLETE; 85*0Sstevel@tonic-gate goto done; 86*0Sstevel@tonic-gate } 87*0Sstevel@tonic-gate 88*0Sstevel@tonic-gate /* Else, it's a new checkpoint. */ 89*0Sstevel@tonic-gate F_CLR(mp, MP_LSN_RETRY); 90*0Sstevel@tonic-gate 91*0Sstevel@tonic-gate /* 92*0Sstevel@tonic-gate * Save the LSN. We know that it's a new LSN or larger than the one 93*0Sstevel@tonic-gate * for which we were already doing a checkpoint. (BTW, I don't expect 94*0Sstevel@tonic-gate * to see multiple LSN's from the same or multiple processes, but You 95*0Sstevel@tonic-gate * Just Never Know. Responding as if they all called with the largest 96*0Sstevel@tonic-gate * of the LSNs specified makes everything work.) 97*0Sstevel@tonic-gate * 98*0Sstevel@tonic-gate * We don't currently use the LSN we save. We could potentially save 99*0Sstevel@tonic-gate * the last-written LSN in each buffer header and use it to determine 100*0Sstevel@tonic-gate * what buffers need to be written. The problem with this is that it's 101*0Sstevel@tonic-gate * sizeof(LSN) more bytes of buffer header. We currently write all the 102*0Sstevel@tonic-gate * dirty buffers instead. 103*0Sstevel@tonic-gate * 104*0Sstevel@tonic-gate * Walk the list of shared memory segments clearing the count of 105*0Sstevel@tonic-gate * buffers waiting to be written. 106*0Sstevel@tonic-gate */ 107*0Sstevel@tonic-gate mp->lsn = *lsnp; 108*0Sstevel@tonic-gate mp->lsn_cnt = 0; 109*0Sstevel@tonic-gate for (mfp = SH_TAILQ_FIRST(&dbmp->mp->mpfq, __mpoolfile); 110*0Sstevel@tonic-gate mfp != NULL; mfp = SH_TAILQ_NEXT(mfp, q, __mpoolfile)) 111*0Sstevel@tonic-gate mfp->lsn_cnt = 0; 112*0Sstevel@tonic-gate 113*0Sstevel@tonic-gate /* 114*0Sstevel@tonic-gate * Walk the list of buffers and mark all dirty buffers to be written 115*0Sstevel@tonic-gate * and all pinned buffers to be potentially written (we can't know if 116*0Sstevel@tonic-gate * we'll need to write them until the holding process returns them to 117*0Sstevel@tonic-gate * the cache). We do this in one pass while holding the region locked 118*0Sstevel@tonic-gate * so that processes can't make new buffers dirty, causing us to never 119*0Sstevel@tonic-gate * finish. Since the application may have restarted the sync, clear 120*0Sstevel@tonic-gate * any BH_WRITE flags that appear to be left over from previous calls. 121*0Sstevel@tonic-gate * 122*0Sstevel@tonic-gate * We don't want to pin down the entire buffer cache, otherwise we'll 123*0Sstevel@tonic-gate * starve threads needing new pages. Don't pin down more than 80% of 124*0Sstevel@tonic-gate * the cache. 125*0Sstevel@tonic-gate * 126*0Sstevel@tonic-gate * Keep a count of the total number of buffers we need to write in 127*0Sstevel@tonic-gate * MPOOL->lsn_cnt, and for each file, in MPOOLFILE->lsn_count. 128*0Sstevel@tonic-gate */ 129*0Sstevel@tonic-gate ar_cnt = 0; 130*0Sstevel@tonic-gate maxpin = ((mp->stat.st_page_dirty + mp->stat.st_page_clean) * 8) / 10; 131*0Sstevel@tonic-gate for (bhp = SH_TAILQ_FIRST(&mp->bhq, __bh); 132*0Sstevel@tonic-gate bhp != NULL; bhp = SH_TAILQ_NEXT(bhp, q, __bh)) 133*0Sstevel@tonic-gate if (F_ISSET(bhp, BH_DIRTY) || bhp->ref != 0) { 134*0Sstevel@tonic-gate F_SET(bhp, BH_WRITE); 135*0Sstevel@tonic-gate 136*0Sstevel@tonic-gate ++mp->lsn_cnt; 137*0Sstevel@tonic-gate 138*0Sstevel@tonic-gate mfp = R_ADDR(dbmp, bhp->mf_offset); 139*0Sstevel@tonic-gate ++mfp->lsn_cnt; 140*0Sstevel@tonic-gate 141*0Sstevel@tonic-gate /* 142*0Sstevel@tonic-gate * If the buffer isn't in use, we should be able to 143*0Sstevel@tonic-gate * write it immediately, so increment the reference 144*0Sstevel@tonic-gate * count to lock it and its contents down, and then 145*0Sstevel@tonic-gate * save a reference to it. 146*0Sstevel@tonic-gate * 147*0Sstevel@tonic-gate * If we've run out space to store buffer references, 148*0Sstevel@tonic-gate * we're screwed. We don't want to realloc the array 149*0Sstevel@tonic-gate * while holding a region lock, so we set the flag to 150*0Sstevel@tonic-gate * force the checkpoint to be done again, from scratch, 151*0Sstevel@tonic-gate * later. 152*0Sstevel@tonic-gate * 153*0Sstevel@tonic-gate * If we've pinned down too much of the cache stop, and 154*0Sstevel@tonic-gate * set a flag to force the checkpoint to be tried again 155*0Sstevel@tonic-gate * later. 156*0Sstevel@tonic-gate */ 157*0Sstevel@tonic-gate if (bhp->ref == 0) { 158*0Sstevel@tonic-gate ++bhp->ref; 159*0Sstevel@tonic-gate bharray[ar_cnt] = bhp; 160*0Sstevel@tonic-gate if (++ar_cnt >= nalloc || ar_cnt >= maxpin) { 161*0Sstevel@tonic-gate F_SET(mp, MP_LSN_RETRY); 162*0Sstevel@tonic-gate break; 163*0Sstevel@tonic-gate } 164*0Sstevel@tonic-gate } 165*0Sstevel@tonic-gate } else 166*0Sstevel@tonic-gate if (F_ISSET(bhp, BH_WRITE)) 167*0Sstevel@tonic-gate F_CLR(bhp, BH_WRITE); 168*0Sstevel@tonic-gate 169*0Sstevel@tonic-gate /* If there no buffers we can write immediately, we're done. */ 170*0Sstevel@tonic-gate if (ar_cnt == 0) { 171*0Sstevel@tonic-gate ret = mp->lsn_cnt ? DB_INCOMPLETE : 0; 172*0Sstevel@tonic-gate goto done; 173*0Sstevel@tonic-gate } 174*0Sstevel@tonic-gate 175*0Sstevel@tonic-gate UNLOCKREGION(dbmp); 176*0Sstevel@tonic-gate 177*0Sstevel@tonic-gate /* Sort the buffers we're going to write. */ 178*0Sstevel@tonic-gate qsort(bharray, ar_cnt, sizeof(BH *), __bhcmp); 179*0Sstevel@tonic-gate 180*0Sstevel@tonic-gate LOCKREGION(dbmp); 181*0Sstevel@tonic-gate 182*0Sstevel@tonic-gate /* Walk the array, writing buffers. */ 183*0Sstevel@tonic-gate for (next = 0; next < ar_cnt; ++next) { 184*0Sstevel@tonic-gate /* 185*0Sstevel@tonic-gate * It's possible for a thread to have gotten the buffer since 186*0Sstevel@tonic-gate * we listed it for writing. If the reference count is still 187*0Sstevel@tonic-gate * 1, we're the only ones using the buffer, go ahead and write. 188*0Sstevel@tonic-gate * If it's >1, then skip the buffer and assume that it will be 189*0Sstevel@tonic-gate * written when it's returned to the cache. 190*0Sstevel@tonic-gate */ 191*0Sstevel@tonic-gate if (bharray[next]->ref > 1) { 192*0Sstevel@tonic-gate --bharray[next]->ref; 193*0Sstevel@tonic-gate continue; 194*0Sstevel@tonic-gate } 195*0Sstevel@tonic-gate 196*0Sstevel@tonic-gate /* Write the buffer. */ 197*0Sstevel@tonic-gate mfp = R_ADDR(dbmp, bharray[next]->mf_offset); 198*0Sstevel@tonic-gate ret = __memp_bhwrite(dbmp, mfp, bharray[next], NULL, &wrote); 199*0Sstevel@tonic-gate 200*0Sstevel@tonic-gate /* Release the buffer. */ 201*0Sstevel@tonic-gate --bharray[next]->ref; 202*0Sstevel@tonic-gate 203*0Sstevel@tonic-gate /* If there's an error, release the rest of the buffers. */ 204*0Sstevel@tonic-gate if (ret != 0 || !wrote) { 205*0Sstevel@tonic-gate /* 206*0Sstevel@tonic-gate * Any process syncing the shared memory buffer pool 207*0Sstevel@tonic-gate * had better be able to write to any underlying file. 208*0Sstevel@tonic-gate * Be understanding, but firm, on this point. 209*0Sstevel@tonic-gate */ 210*0Sstevel@tonic-gate if (ret == 0) { 211*0Sstevel@tonic-gate __db_err(dbenv, "%s: unable to flush page: %lu", 212*0Sstevel@tonic-gate __memp_fns(dbmp, mfp), 213*0Sstevel@tonic-gate (u_long)bharray[next]->pgno); 214*0Sstevel@tonic-gate ret = EPERM; 215*0Sstevel@tonic-gate } 216*0Sstevel@tonic-gate 217*0Sstevel@tonic-gate while (++next < ar_cnt) 218*0Sstevel@tonic-gate --bharray[next]->ref; 219*0Sstevel@tonic-gate goto err; 220*0Sstevel@tonic-gate } 221*0Sstevel@tonic-gate } 222*0Sstevel@tonic-gate ret = mp->lsn_cnt != 0 || 223*0Sstevel@tonic-gate F_ISSET(mp, MP_LSN_RETRY) ? DB_INCOMPLETE : 0; 224*0Sstevel@tonic-gate 225*0Sstevel@tonic-gate done: 226*0Sstevel@tonic-gate if (0) { 227*0Sstevel@tonic-gate err: /* 228*0Sstevel@tonic-gate * On error, clear: 229*0Sstevel@tonic-gate * MPOOL->lsn_cnt (the total sync count) 230*0Sstevel@tonic-gate * MPOOLFILE->lsn_cnt (the per-file sync count) 231*0Sstevel@tonic-gate * BH_WRITE flag (the scheduled for writing flag) 232*0Sstevel@tonic-gate */ 233*0Sstevel@tonic-gate mp->lsn_cnt = 0; 234*0Sstevel@tonic-gate for (mfp = SH_TAILQ_FIRST(&dbmp->mp->mpfq, __mpoolfile); 235*0Sstevel@tonic-gate mfp != NULL; mfp = SH_TAILQ_NEXT(mfp, q, __mpoolfile)) 236*0Sstevel@tonic-gate mfp->lsn_cnt = 0; 237*0Sstevel@tonic-gate for (bhp = SH_TAILQ_FIRST(&mp->bhq, __bh); 238*0Sstevel@tonic-gate bhp != NULL; bhp = SH_TAILQ_NEXT(bhp, q, __bh)) 239*0Sstevel@tonic-gate F_CLR(bhp, BH_WRITE); 240*0Sstevel@tonic-gate } 241*0Sstevel@tonic-gate UNLOCKREGION(dbmp); 242*0Sstevel@tonic-gate __os_free(bharray, nalloc * sizeof(BH *)); 243*0Sstevel@tonic-gate return (ret); 244*0Sstevel@tonic-gate } 245*0Sstevel@tonic-gate 246*0Sstevel@tonic-gate /* 247*0Sstevel@tonic-gate * memp_fsync -- 248*0Sstevel@tonic-gate * Mpool file sync function. 249*0Sstevel@tonic-gate */ 250*0Sstevel@tonic-gate int 251*0Sstevel@tonic-gate memp_fsync(dbmfp) 252*0Sstevel@tonic-gate DB_MPOOLFILE *dbmfp; 253*0Sstevel@tonic-gate { 254*0Sstevel@tonic-gate DB_MPOOL *dbmp; 255*0Sstevel@tonic-gate int is_tmp; 256*0Sstevel@tonic-gate 257*0Sstevel@tonic-gate dbmp = dbmfp->dbmp; 258*0Sstevel@tonic-gate 259*0Sstevel@tonic-gate MP_PANIC_CHECK(dbmp); 260*0Sstevel@tonic-gate 261*0Sstevel@tonic-gate /* 262*0Sstevel@tonic-gate * If this handle doesn't have a file descriptor that's open for 263*0Sstevel@tonic-gate * writing, or if the file is a temporary, there's no reason to 264*0Sstevel@tonic-gate * proceed further. 265*0Sstevel@tonic-gate */ 266*0Sstevel@tonic-gate if (F_ISSET(dbmfp, MP_READONLY)) 267*0Sstevel@tonic-gate return (0); 268*0Sstevel@tonic-gate 269*0Sstevel@tonic-gate LOCKREGION(dbmp); 270*0Sstevel@tonic-gate is_tmp = F_ISSET(dbmfp->mfp, MP_TEMP); 271*0Sstevel@tonic-gate UNLOCKREGION(dbmp); 272*0Sstevel@tonic-gate if (is_tmp) 273*0Sstevel@tonic-gate return (0); 274*0Sstevel@tonic-gate 275*0Sstevel@tonic-gate return (__memp_fsync(dbmfp)); 276*0Sstevel@tonic-gate } 277*0Sstevel@tonic-gate 278*0Sstevel@tonic-gate /* 279*0Sstevel@tonic-gate * __mp_xxx_fd -- 280*0Sstevel@tonic-gate * Return a file descriptor for DB 1.85 compatibility locking. 281*0Sstevel@tonic-gate * 282*0Sstevel@tonic-gate * PUBLIC: int __mp_xxx_fd __P((DB_MPOOLFILE *, int *)); 283*0Sstevel@tonic-gate */ 284*0Sstevel@tonic-gate int 285*0Sstevel@tonic-gate __mp_xxx_fd(dbmfp, fdp) 286*0Sstevel@tonic-gate DB_MPOOLFILE *dbmfp; 287*0Sstevel@tonic-gate int *fdp; 288*0Sstevel@tonic-gate { 289*0Sstevel@tonic-gate int ret; 290*0Sstevel@tonic-gate 291*0Sstevel@tonic-gate /* 292*0Sstevel@tonic-gate * This is a truly spectacular layering violation, intended ONLY to 293*0Sstevel@tonic-gate * support compatibility for the DB 1.85 DB->fd call. 294*0Sstevel@tonic-gate * 295*0Sstevel@tonic-gate * Sync the database file to disk, creating the file as necessary. 296*0Sstevel@tonic-gate * 297*0Sstevel@tonic-gate * We skip the MP_READONLY and MP_TEMP tests done by memp_fsync(3). 298*0Sstevel@tonic-gate * The MP_READONLY test isn't interesting because we will either 299*0Sstevel@tonic-gate * already have a file descriptor (we opened the database file for 300*0Sstevel@tonic-gate * reading) or we aren't readonly (we created the database which 301*0Sstevel@tonic-gate * requires write privileges). The MP_TEMP test isn't interesting 302*0Sstevel@tonic-gate * because we want to write to the backing file regardless so that 303*0Sstevel@tonic-gate * we get a file descriptor to return. 304*0Sstevel@tonic-gate */ 305*0Sstevel@tonic-gate ret = dbmfp->fd == -1 ? __memp_fsync(dbmfp) : 0; 306*0Sstevel@tonic-gate 307*0Sstevel@tonic-gate return ((*fdp = dbmfp->fd) == -1 ? ENOENT : ret); 308*0Sstevel@tonic-gate } 309*0Sstevel@tonic-gate 310*0Sstevel@tonic-gate /* 311*0Sstevel@tonic-gate * __memp_fsync -- 312*0Sstevel@tonic-gate * Mpool file internal sync function. 313*0Sstevel@tonic-gate */ 314*0Sstevel@tonic-gate static int 315*0Sstevel@tonic-gate __memp_fsync(dbmfp) 316*0Sstevel@tonic-gate DB_MPOOLFILE *dbmfp; 317*0Sstevel@tonic-gate { 318*0Sstevel@tonic-gate BH *bhp, **bharray; 319*0Sstevel@tonic-gate DB_MPOOL *dbmp; 320*0Sstevel@tonic-gate MPOOL *mp; 321*0Sstevel@tonic-gate size_t mf_offset; 322*0Sstevel@tonic-gate int ar_cnt, incomplete, nalloc, next, ret, wrote; 323*0Sstevel@tonic-gate 324*0Sstevel@tonic-gate ret = 0; 325*0Sstevel@tonic-gate dbmp = dbmfp->dbmp; 326*0Sstevel@tonic-gate mp = dbmp->mp; 327*0Sstevel@tonic-gate mf_offset = R_OFFSET(dbmp, dbmfp->mfp); 328*0Sstevel@tonic-gate 329*0Sstevel@tonic-gate /* 330*0Sstevel@tonic-gate * We try and write the buffers in page order: it should reduce seeks 331*0Sstevel@tonic-gate * by the underlying filesystem and possibly reduce the actual number 332*0Sstevel@tonic-gate * of writes. We don't want to hold the region lock while we write 333*0Sstevel@tonic-gate * the buffers, so only hold it lock while we create a list. Get a 334*0Sstevel@tonic-gate * good-size block of memory to hold buffer pointers, we don't want 335*0Sstevel@tonic-gate * to run out. 336*0Sstevel@tonic-gate */ 337*0Sstevel@tonic-gate LOCKREGION(dbmp); 338*0Sstevel@tonic-gate nalloc = mp->stat.st_page_dirty + mp->stat.st_page_dirty / 2 + 10; 339*0Sstevel@tonic-gate UNLOCKREGION(dbmp); 340*0Sstevel@tonic-gate 341*0Sstevel@tonic-gate if ((ret = __os_malloc(nalloc * sizeof(BH *), NULL, &bharray)) != 0) 342*0Sstevel@tonic-gate return (ret); 343*0Sstevel@tonic-gate 344*0Sstevel@tonic-gate LOCKREGION(dbmp); 345*0Sstevel@tonic-gate 346*0Sstevel@tonic-gate /* 347*0Sstevel@tonic-gate * Walk the LRU list of buffer headers, and get a list of buffers to 348*0Sstevel@tonic-gate * write for this MPOOLFILE. 349*0Sstevel@tonic-gate */ 350*0Sstevel@tonic-gate ar_cnt = incomplete = 0; 351*0Sstevel@tonic-gate for (bhp = SH_TAILQ_FIRST(&mp->bhq, __bh); 352*0Sstevel@tonic-gate bhp != NULL; bhp = SH_TAILQ_NEXT(bhp, q, __bh)) { 353*0Sstevel@tonic-gate if (!F_ISSET(bhp, BH_DIRTY) || bhp->mf_offset != mf_offset) 354*0Sstevel@tonic-gate continue; 355*0Sstevel@tonic-gate if (bhp->ref != 0 || F_ISSET(bhp, BH_LOCKED)) { 356*0Sstevel@tonic-gate incomplete = 1; 357*0Sstevel@tonic-gate continue; 358*0Sstevel@tonic-gate } 359*0Sstevel@tonic-gate 360*0Sstevel@tonic-gate ++bhp->ref; 361*0Sstevel@tonic-gate bharray[ar_cnt] = bhp; 362*0Sstevel@tonic-gate 363*0Sstevel@tonic-gate /* 364*0Sstevel@tonic-gate * If we've run out space to store buffer references, we're 365*0Sstevel@tonic-gate * screwed, as we don't want to realloc the array holding a 366*0Sstevel@tonic-gate * region lock. Set the incomplete flag -- the only way we 367*0Sstevel@tonic-gate * can get here is if the file is active in the buffer cache, 368*0Sstevel@tonic-gate * which is the same thing as finding pinned buffers. 369*0Sstevel@tonic-gate */ 370*0Sstevel@tonic-gate if (++ar_cnt >= nalloc) { 371*0Sstevel@tonic-gate incomplete = 1; 372*0Sstevel@tonic-gate break; 373*0Sstevel@tonic-gate } 374*0Sstevel@tonic-gate } 375*0Sstevel@tonic-gate 376*0Sstevel@tonic-gate UNLOCKREGION(dbmp); 377*0Sstevel@tonic-gate 378*0Sstevel@tonic-gate /* Sort the buffers we're going to write. */ 379*0Sstevel@tonic-gate if (ar_cnt != 0) 380*0Sstevel@tonic-gate qsort(bharray, ar_cnt, sizeof(BH *), __bhcmp); 381*0Sstevel@tonic-gate 382*0Sstevel@tonic-gate LOCKREGION(dbmp); 383*0Sstevel@tonic-gate 384*0Sstevel@tonic-gate /* Walk the array, writing buffers. */ 385*0Sstevel@tonic-gate for (next = 0; next < ar_cnt; ++next) { 386*0Sstevel@tonic-gate /* 387*0Sstevel@tonic-gate * It's possible for a thread to have gotten the buffer since 388*0Sstevel@tonic-gate * we listed it for writing. If the reference count is still 389*0Sstevel@tonic-gate * 1, we're the only ones using the buffer, go ahead and write. 390*0Sstevel@tonic-gate * If it's >1, then skip the buffer. 391*0Sstevel@tonic-gate */ 392*0Sstevel@tonic-gate if (bharray[next]->ref > 1) { 393*0Sstevel@tonic-gate incomplete = 1; 394*0Sstevel@tonic-gate 395*0Sstevel@tonic-gate --bharray[next]->ref; 396*0Sstevel@tonic-gate continue; 397*0Sstevel@tonic-gate } 398*0Sstevel@tonic-gate 399*0Sstevel@tonic-gate /* Write the buffer. */ 400*0Sstevel@tonic-gate ret = __memp_pgwrite(dbmfp, bharray[next], NULL, &wrote); 401*0Sstevel@tonic-gate 402*0Sstevel@tonic-gate /* Release the buffer. */ 403*0Sstevel@tonic-gate --bharray[next]->ref; 404*0Sstevel@tonic-gate 405*0Sstevel@tonic-gate /* If there's an error, release the rest of the buffers. */ 406*0Sstevel@tonic-gate if (ret != 0) { 407*0Sstevel@tonic-gate while (++next < ar_cnt) 408*0Sstevel@tonic-gate --bharray[next]->ref; 409*0Sstevel@tonic-gate goto err; 410*0Sstevel@tonic-gate } 411*0Sstevel@tonic-gate 412*0Sstevel@tonic-gate /* 413*0Sstevel@tonic-gate * If we didn't write the buffer for some reason, don't return 414*0Sstevel@tonic-gate * success. 415*0Sstevel@tonic-gate */ 416*0Sstevel@tonic-gate if (!wrote) 417*0Sstevel@tonic-gate incomplete = 1; 418*0Sstevel@tonic-gate } 419*0Sstevel@tonic-gate 420*0Sstevel@tonic-gate err: UNLOCKREGION(dbmp); 421*0Sstevel@tonic-gate 422*0Sstevel@tonic-gate __os_free(bharray, nalloc * sizeof(BH *)); 423*0Sstevel@tonic-gate 424*0Sstevel@tonic-gate /* 425*0Sstevel@tonic-gate * Sync the underlying file as the last thing we do, so that the OS 426*0Sstevel@tonic-gate * has maximal opportunity to flush buffers before we request it. 427*0Sstevel@tonic-gate * 428*0Sstevel@tonic-gate * XXX: 429*0Sstevel@tonic-gate * Don't lock the region around the sync, fsync(2) has no atomicity 430*0Sstevel@tonic-gate * issues. 431*0Sstevel@tonic-gate */ 432*0Sstevel@tonic-gate if (ret == 0) 433*0Sstevel@tonic-gate return (incomplete ? DB_INCOMPLETE : __os_fsync(dbmfp->fd)); 434*0Sstevel@tonic-gate return (ret); 435*0Sstevel@tonic-gate } 436*0Sstevel@tonic-gate 437*0Sstevel@tonic-gate /* 438*0Sstevel@tonic-gate * memp_trickle -- 439*0Sstevel@tonic-gate * Keep a specified percentage of the buffers clean. 440*0Sstevel@tonic-gate */ 441*0Sstevel@tonic-gate int 442*0Sstevel@tonic-gate memp_trickle(dbmp, pct, nwrotep) 443*0Sstevel@tonic-gate DB_MPOOL *dbmp; 444*0Sstevel@tonic-gate int pct, *nwrotep; 445*0Sstevel@tonic-gate { 446*0Sstevel@tonic-gate BH *bhp; 447*0Sstevel@tonic-gate MPOOL *mp; 448*0Sstevel@tonic-gate MPOOLFILE *mfp; 449*0Sstevel@tonic-gate db_pgno_t pgno; 450*0Sstevel@tonic-gate u_long total; 451*0Sstevel@tonic-gate int ret, wrote; 452*0Sstevel@tonic-gate 453*0Sstevel@tonic-gate MP_PANIC_CHECK(dbmp); 454*0Sstevel@tonic-gate 455*0Sstevel@tonic-gate mp = dbmp->mp; 456*0Sstevel@tonic-gate if (nwrotep != NULL) 457*0Sstevel@tonic-gate *nwrotep = 0; 458*0Sstevel@tonic-gate 459*0Sstevel@tonic-gate if (pct < 1 || pct > 100) 460*0Sstevel@tonic-gate return (EINVAL); 461*0Sstevel@tonic-gate 462*0Sstevel@tonic-gate LOCKREGION(dbmp); 463*0Sstevel@tonic-gate 464*0Sstevel@tonic-gate /* 465*0Sstevel@tonic-gate * If there are sufficient clean buffers, or no buffers or no dirty 466*0Sstevel@tonic-gate * buffers, we're done. 467*0Sstevel@tonic-gate * 468*0Sstevel@tonic-gate * XXX 469*0Sstevel@tonic-gate * Using st_page_clean and st_page_dirty is our only choice at the 470*0Sstevel@tonic-gate * moment, but it's not as correct as we might like in the presence 471*0Sstevel@tonic-gate * of pools with more than one buffer size, as a free 512-byte buffer 472*0Sstevel@tonic-gate * isn't the same as a free 8K buffer. 473*0Sstevel@tonic-gate */ 474*0Sstevel@tonic-gate loop: total = mp->stat.st_page_clean + mp->stat.st_page_dirty; 475*0Sstevel@tonic-gate if (total == 0 || mp->stat.st_page_dirty == 0 || 476*0Sstevel@tonic-gate (mp->stat.st_page_clean * 100) / total >= (u_long)pct) { 477*0Sstevel@tonic-gate UNLOCKREGION(dbmp); 478*0Sstevel@tonic-gate return (0); 479*0Sstevel@tonic-gate } 480*0Sstevel@tonic-gate 481*0Sstevel@tonic-gate /* Loop until we write a buffer. */ 482*0Sstevel@tonic-gate for (bhp = SH_TAILQ_FIRST(&mp->bhq, __bh); 483*0Sstevel@tonic-gate bhp != NULL; bhp = SH_TAILQ_NEXT(bhp, q, __bh)) { 484*0Sstevel@tonic-gate if (bhp->ref != 0 || 485*0Sstevel@tonic-gate !F_ISSET(bhp, BH_DIRTY) || F_ISSET(bhp, BH_LOCKED)) 486*0Sstevel@tonic-gate continue; 487*0Sstevel@tonic-gate 488*0Sstevel@tonic-gate mfp = R_ADDR(dbmp, bhp->mf_offset); 489*0Sstevel@tonic-gate 490*0Sstevel@tonic-gate /* 491*0Sstevel@tonic-gate * We can't write to temporary files -- see the comment in 492*0Sstevel@tonic-gate * mp_bh.c:__memp_bhwrite(). 493*0Sstevel@tonic-gate */ 494*0Sstevel@tonic-gate if (F_ISSET(mfp, MP_TEMP)) 495*0Sstevel@tonic-gate continue; 496*0Sstevel@tonic-gate 497*0Sstevel@tonic-gate pgno = bhp->pgno; 498*0Sstevel@tonic-gate if ((ret = __memp_bhwrite(dbmp, mfp, bhp, NULL, &wrote)) != 0) 499*0Sstevel@tonic-gate goto err; 500*0Sstevel@tonic-gate 501*0Sstevel@tonic-gate /* 502*0Sstevel@tonic-gate * Any process syncing the shared memory buffer pool had better 503*0Sstevel@tonic-gate * be able to write to any underlying file. Be understanding, 504*0Sstevel@tonic-gate * but firm, on this point. 505*0Sstevel@tonic-gate */ 506*0Sstevel@tonic-gate if (!wrote) { 507*0Sstevel@tonic-gate __db_err(dbmp->dbenv, "%s: unable to flush page: %lu", 508*0Sstevel@tonic-gate __memp_fns(dbmp, mfp), (u_long)pgno); 509*0Sstevel@tonic-gate ret = EPERM; 510*0Sstevel@tonic-gate goto err; 511*0Sstevel@tonic-gate } 512*0Sstevel@tonic-gate 513*0Sstevel@tonic-gate ++mp->stat.st_page_trickle; 514*0Sstevel@tonic-gate if (nwrotep != NULL) 515*0Sstevel@tonic-gate ++*nwrotep; 516*0Sstevel@tonic-gate goto loop; 517*0Sstevel@tonic-gate } 518*0Sstevel@tonic-gate 519*0Sstevel@tonic-gate /* No more buffers to write. */ 520*0Sstevel@tonic-gate ret = 0; 521*0Sstevel@tonic-gate 522*0Sstevel@tonic-gate err: UNLOCKREGION(dbmp); 523*0Sstevel@tonic-gate return (ret); 524*0Sstevel@tonic-gate } 525*0Sstevel@tonic-gate 526*0Sstevel@tonic-gate static int 527*0Sstevel@tonic-gate __bhcmp(p1, p2) 528*0Sstevel@tonic-gate const void *p1, *p2; 529*0Sstevel@tonic-gate { 530*0Sstevel@tonic-gate BH *bhp1, *bhp2; 531*0Sstevel@tonic-gate 532*0Sstevel@tonic-gate bhp1 = *(BH * const *)p1; 533*0Sstevel@tonic-gate bhp2 = *(BH * const *)p2; 534*0Sstevel@tonic-gate 535*0Sstevel@tonic-gate /* Sort by file (shared memory pool offset). */ 536*0Sstevel@tonic-gate if (bhp1->mf_offset < bhp2->mf_offset) 537*0Sstevel@tonic-gate return (-1); 538*0Sstevel@tonic-gate if (bhp1->mf_offset > bhp2->mf_offset) 539*0Sstevel@tonic-gate return (1); 540*0Sstevel@tonic-gate 541*0Sstevel@tonic-gate /* 542*0Sstevel@tonic-gate * !!! 543*0Sstevel@tonic-gate * Defend against badly written quicksort code calling the comparison 544*0Sstevel@tonic-gate * function with two identical pointers (e.g., WATCOM C++ (Power++)). 545*0Sstevel@tonic-gate */ 546*0Sstevel@tonic-gate if (bhp1->pgno < bhp2->pgno) 547*0Sstevel@tonic-gate return (-1); 548*0Sstevel@tonic-gate if (bhp1->pgno > bhp2->pgno) 549*0Sstevel@tonic-gate return (1); 550*0Sstevel@tonic-gate return (0); 551*0Sstevel@tonic-gate } 552