xref: /onnv-gate/usr/src/cmd/sendmail/db/mp/mp_fget.c (revision 0:68f95e015346)
1*0Sstevel@tonic-gate /*-
2*0Sstevel@tonic-gate  * See the file LICENSE for redistribution information.
3*0Sstevel@tonic-gate  *
4*0Sstevel@tonic-gate  * Copyright (c) 1996, 1997, 1998
5*0Sstevel@tonic-gate  *	Sleepycat Software.  All rights reserved.
6*0Sstevel@tonic-gate  */
7*0Sstevel@tonic-gate #include "config.h"
8*0Sstevel@tonic-gate 
9*0Sstevel@tonic-gate #ifndef lint
10*0Sstevel@tonic-gate static const char sccsid[] = "@(#)mp_fget.c	10.53 (Sleepycat) 11/16/98";
11*0Sstevel@tonic-gate #endif /* not lint */
12*0Sstevel@tonic-gate 
13*0Sstevel@tonic-gate #ifndef NO_SYSTEM_INCLUDES
14*0Sstevel@tonic-gate #include <sys/types.h>
15*0Sstevel@tonic-gate 
16*0Sstevel@tonic-gate #include <errno.h>
17*0Sstevel@tonic-gate #include <string.h>
18*0Sstevel@tonic-gate #endif
19*0Sstevel@tonic-gate 
20*0Sstevel@tonic-gate #include "db_int.h"
21*0Sstevel@tonic-gate #include "shqueue.h"
22*0Sstevel@tonic-gate #include "db_shash.h"
23*0Sstevel@tonic-gate #include "mp.h"
24*0Sstevel@tonic-gate #include "common_ext.h"
25*0Sstevel@tonic-gate 
26*0Sstevel@tonic-gate /*
27*0Sstevel@tonic-gate  * memp_fget --
28*0Sstevel@tonic-gate  *	Get a page from the file.
29*0Sstevel@tonic-gate  */
30*0Sstevel@tonic-gate int
memp_fget(dbmfp,pgnoaddr,flags,addrp)31*0Sstevel@tonic-gate memp_fget(dbmfp, pgnoaddr, flags, addrp)
32*0Sstevel@tonic-gate 	DB_MPOOLFILE *dbmfp;
33*0Sstevel@tonic-gate 	db_pgno_t *pgnoaddr;
34*0Sstevel@tonic-gate 	u_int32_t flags;
35*0Sstevel@tonic-gate 	void *addrp;
36*0Sstevel@tonic-gate {
37*0Sstevel@tonic-gate 	BH *bhp;
38*0Sstevel@tonic-gate 	DB_MPOOL *dbmp;
39*0Sstevel@tonic-gate 	MPOOL *mp;
40*0Sstevel@tonic-gate 	MPOOLFILE *mfp;
41*0Sstevel@tonic-gate 	size_t bucket, mf_offset;
42*0Sstevel@tonic-gate 	u_int32_t st_hsearch;
43*0Sstevel@tonic-gate 	int b_incr, first, ret;
44*0Sstevel@tonic-gate 
45*0Sstevel@tonic-gate 	dbmp = dbmfp->dbmp;
46*0Sstevel@tonic-gate 	mp = dbmp->mp;
47*0Sstevel@tonic-gate 	mfp = dbmfp->mfp;
48*0Sstevel@tonic-gate 
49*0Sstevel@tonic-gate 	MP_PANIC_CHECK(dbmp);
50*0Sstevel@tonic-gate 
51*0Sstevel@tonic-gate 	/*
52*0Sstevel@tonic-gate 	 * Validate arguments.
53*0Sstevel@tonic-gate 	 *
54*0Sstevel@tonic-gate 	 * !!!
55*0Sstevel@tonic-gate 	 * Don't test for DB_MPOOL_CREATE and DB_MPOOL_NEW flags for readonly
56*0Sstevel@tonic-gate 	 * files here, and create non-existent pages in readonly files if the
57*0Sstevel@tonic-gate 	 * flags are set, later.  The reason is that the hash access method
58*0Sstevel@tonic-gate 	 * wants to get empty pages that don't really exist in readonly files.
59*0Sstevel@tonic-gate 	 * The only alternative is for hash to write the last "bucket" all the
60*0Sstevel@tonic-gate 	 * time, which we don't want to do because one of our big goals in life
61*0Sstevel@tonic-gate 	 * is to keep database files small.  It's sleazy as hell, but we catch
62*0Sstevel@tonic-gate 	 * any attempt to actually write the file in memp_fput().
63*0Sstevel@tonic-gate 	 */
64*0Sstevel@tonic-gate #define	OKFLAGS	(DB_MPOOL_CREATE | DB_MPOOL_LAST | DB_MPOOL_NEW)
65*0Sstevel@tonic-gate 	if (flags != 0) {
66*0Sstevel@tonic-gate 		if ((ret =
67*0Sstevel@tonic-gate 		    __db_fchk(dbmp->dbenv, "memp_fget", flags, OKFLAGS)) != 0)
68*0Sstevel@tonic-gate 			return (ret);
69*0Sstevel@tonic-gate 
70*0Sstevel@tonic-gate 		switch (flags) {
71*0Sstevel@tonic-gate 		case DB_MPOOL_CREATE:
72*0Sstevel@tonic-gate 		case DB_MPOOL_LAST:
73*0Sstevel@tonic-gate 		case DB_MPOOL_NEW:
74*0Sstevel@tonic-gate 		case 0:
75*0Sstevel@tonic-gate 			break;
76*0Sstevel@tonic-gate 		default:
77*0Sstevel@tonic-gate 			return (__db_ferr(dbmp->dbenv, "memp_fget", 1));
78*0Sstevel@tonic-gate 		}
79*0Sstevel@tonic-gate 	}
80*0Sstevel@tonic-gate 
81*0Sstevel@tonic-gate #ifdef DIAGNOSTIC
82*0Sstevel@tonic-gate 	/*
83*0Sstevel@tonic-gate 	 * XXX
84*0Sstevel@tonic-gate 	 * We want to switch threads as often as possible.  Yield every time
85*0Sstevel@tonic-gate 	 * we get a new page to ensure contention.
86*0Sstevel@tonic-gate 	 */
87*0Sstevel@tonic-gate 	if (DB_GLOBAL(db_pageyield))
88*0Sstevel@tonic-gate 		__os_yield(1);
89*0Sstevel@tonic-gate #endif
90*0Sstevel@tonic-gate 
91*0Sstevel@tonic-gate 	/* Initialize remaining local variables. */
92*0Sstevel@tonic-gate 	mf_offset = R_OFFSET(dbmp, mfp);
93*0Sstevel@tonic-gate 	bhp = NULL;
94*0Sstevel@tonic-gate 	st_hsearch = 0;
95*0Sstevel@tonic-gate 	b_incr = ret = 0;
96*0Sstevel@tonic-gate 
97*0Sstevel@tonic-gate 	/* Determine the hash bucket where this page will live. */
98*0Sstevel@tonic-gate 	bucket = BUCKET(mp, mf_offset, *pgnoaddr);
99*0Sstevel@tonic-gate 
100*0Sstevel@tonic-gate 	LOCKREGION(dbmp);
101*0Sstevel@tonic-gate 
102*0Sstevel@tonic-gate 	/*
103*0Sstevel@tonic-gate 	 * Check for the last or last + 1 page requests.
104*0Sstevel@tonic-gate 	 *
105*0Sstevel@tonic-gate 	 * Examine and update the file's last_pgno value.  We don't care if
106*0Sstevel@tonic-gate 	 * the last_pgno value immediately changes due to another thread --
107*0Sstevel@tonic-gate 	 * at this instant in time, the value is correct.  We do increment the
108*0Sstevel@tonic-gate 	 * current last_pgno value if the thread is asking for a new page,
109*0Sstevel@tonic-gate 	 * however, to ensure that two threads creating pages don't get the
110*0Sstevel@tonic-gate 	 * same one.
111*0Sstevel@tonic-gate 	 */
112*0Sstevel@tonic-gate 	if (LF_ISSET(DB_MPOOL_LAST | DB_MPOOL_NEW)) {
113*0Sstevel@tonic-gate 		if (LF_ISSET(DB_MPOOL_NEW))
114*0Sstevel@tonic-gate 			++mfp->last_pgno;
115*0Sstevel@tonic-gate 		*pgnoaddr = mfp->last_pgno;
116*0Sstevel@tonic-gate 		bucket = BUCKET(mp, mf_offset, mfp->last_pgno);
117*0Sstevel@tonic-gate 
118*0Sstevel@tonic-gate 		if (LF_ISSET(DB_MPOOL_NEW))
119*0Sstevel@tonic-gate 			goto alloc;
120*0Sstevel@tonic-gate 	}
121*0Sstevel@tonic-gate 
122*0Sstevel@tonic-gate 	/*
123*0Sstevel@tonic-gate 	 * If mmap'ing the file and the page is not past the end of the file,
124*0Sstevel@tonic-gate 	 * just return a pointer.
125*0Sstevel@tonic-gate 	 *
126*0Sstevel@tonic-gate 	 * The page may be past the end of the file, so check the page number
127*0Sstevel@tonic-gate 	 * argument against the original length of the file.  If we previously
128*0Sstevel@tonic-gate 	 * returned pages past the original end of the file, last_pgno will
129*0Sstevel@tonic-gate 	 * have been updated to match the "new" end of the file, and checking
130*0Sstevel@tonic-gate 	 * against it would return pointers past the end of the mmap'd region.
131*0Sstevel@tonic-gate 	 *
132*0Sstevel@tonic-gate 	 * If another process has opened the file for writing since we mmap'd
133*0Sstevel@tonic-gate 	 * it, we will start playing the game by their rules, i.e. everything
134*0Sstevel@tonic-gate 	 * goes through the cache.  All pages previously returned will be safe,
135*0Sstevel@tonic-gate 	 * as long as the correct locking protocol was observed.
136*0Sstevel@tonic-gate 	 *
137*0Sstevel@tonic-gate 	 * XXX
138*0Sstevel@tonic-gate 	 * We don't discard the map because we don't know when all of the
139*0Sstevel@tonic-gate 	 * pages will have been discarded from the process' address space.
140*0Sstevel@tonic-gate 	 * It would be possible to do so by reference counting the open
141*0Sstevel@tonic-gate 	 * pages from the mmap, but it's unclear to me that it's worth it.
142*0Sstevel@tonic-gate 	 */
143*0Sstevel@tonic-gate 	if (dbmfp->addr != NULL && F_ISSET(mfp, MP_CAN_MMAP))
144*0Sstevel@tonic-gate 		if (*pgnoaddr > mfp->orig_last_pgno) {
145*0Sstevel@tonic-gate 			/*
146*0Sstevel@tonic-gate 			 * !!!
147*0Sstevel@tonic-gate 			 * See the comment above about non-existent pages and
148*0Sstevel@tonic-gate 			 * the hash access method.
149*0Sstevel@tonic-gate 			 */
150*0Sstevel@tonic-gate 			if (!LF_ISSET(DB_MPOOL_CREATE)) {
151*0Sstevel@tonic-gate 				__db_err(dbmp->dbenv,
152*0Sstevel@tonic-gate 				    "%s: page %lu doesn't exist",
153*0Sstevel@tonic-gate 				    __memp_fn(dbmfp), (u_long)*pgnoaddr);
154*0Sstevel@tonic-gate 				ret = EINVAL;
155*0Sstevel@tonic-gate 				goto err;
156*0Sstevel@tonic-gate 			}
157*0Sstevel@tonic-gate 		} else {
158*0Sstevel@tonic-gate 			*(void **)addrp =
159*0Sstevel@tonic-gate 			    R_ADDR(dbmfp, *pgnoaddr * mfp->stat.st_pagesize);
160*0Sstevel@tonic-gate 			++mp->stat.st_map;
161*0Sstevel@tonic-gate 			++mfp->stat.st_map;
162*0Sstevel@tonic-gate 			goto done;
163*0Sstevel@tonic-gate 		}
164*0Sstevel@tonic-gate 
165*0Sstevel@tonic-gate 	/* Search the hash chain for the page. */
166*0Sstevel@tonic-gate 	for (bhp = SH_TAILQ_FIRST(&dbmp->htab[bucket], __bh);
167*0Sstevel@tonic-gate 	    bhp != NULL; bhp = SH_TAILQ_NEXT(bhp, hq, __bh)) {
168*0Sstevel@tonic-gate 		++st_hsearch;
169*0Sstevel@tonic-gate 		if (bhp->pgno != *pgnoaddr || bhp->mf_offset != mf_offset)
170*0Sstevel@tonic-gate 			continue;
171*0Sstevel@tonic-gate 
172*0Sstevel@tonic-gate 		/* Increment the reference count. */
173*0Sstevel@tonic-gate 		if (bhp->ref == UINT16_T_MAX) {
174*0Sstevel@tonic-gate 			__db_err(dbmp->dbenv,
175*0Sstevel@tonic-gate 			    "%s: page %lu: reference count overflow",
176*0Sstevel@tonic-gate 			    __memp_fn(dbmfp), (u_long)bhp->pgno);
177*0Sstevel@tonic-gate 			ret = EINVAL;
178*0Sstevel@tonic-gate 			goto err;
179*0Sstevel@tonic-gate 		}
180*0Sstevel@tonic-gate 
181*0Sstevel@tonic-gate 		/*
182*0Sstevel@tonic-gate 		 * Increment the reference count.  We may discard the region
183*0Sstevel@tonic-gate 		 * lock as we evaluate and/or read the buffer, so we need to
184*0Sstevel@tonic-gate 		 * ensure that it doesn't move and that its contents remain
185*0Sstevel@tonic-gate 		 * unchanged.
186*0Sstevel@tonic-gate 		 */
187*0Sstevel@tonic-gate 		++bhp->ref;
188*0Sstevel@tonic-gate 		b_incr = 1;
189*0Sstevel@tonic-gate 
190*0Sstevel@tonic-gate 		/*
191*0Sstevel@tonic-gate 	 	 * Any buffer we find might be trouble.
192*0Sstevel@tonic-gate 		 *
193*0Sstevel@tonic-gate 		 * BH_LOCKED --
194*0Sstevel@tonic-gate 		 * I/O is in progress.  Because we've incremented the buffer
195*0Sstevel@tonic-gate 		 * reference count, we know the buffer can't move.  Unlock
196*0Sstevel@tonic-gate 		 * the region lock, wait for the I/O to complete, and reacquire
197*0Sstevel@tonic-gate 		 * the region.
198*0Sstevel@tonic-gate 		 */
199*0Sstevel@tonic-gate 		for (first = 1; F_ISSET(bhp, BH_LOCKED); first = 0) {
200*0Sstevel@tonic-gate 			UNLOCKREGION(dbmp);
201*0Sstevel@tonic-gate 
202*0Sstevel@tonic-gate 			/*
203*0Sstevel@tonic-gate 			 * Explicitly yield the processor if it's not the first
204*0Sstevel@tonic-gate 			 * pass through this loop -- if we don't, we might end
205*0Sstevel@tonic-gate 			 * up running to the end of our CPU quantum as we will
206*0Sstevel@tonic-gate 			 * simply be swapping between the two locks.
207*0Sstevel@tonic-gate 			 */
208*0Sstevel@tonic-gate 			if (!first)
209*0Sstevel@tonic-gate 				__os_yield(1);
210*0Sstevel@tonic-gate 
211*0Sstevel@tonic-gate 			LOCKBUFFER(dbmp, bhp);
212*0Sstevel@tonic-gate 			/* Wait for I/O to finish... */
213*0Sstevel@tonic-gate 			UNLOCKBUFFER(dbmp, bhp);
214*0Sstevel@tonic-gate 			LOCKREGION(dbmp);
215*0Sstevel@tonic-gate 		}
216*0Sstevel@tonic-gate 
217*0Sstevel@tonic-gate 		/*
218*0Sstevel@tonic-gate 		 * BH_TRASH --
219*0Sstevel@tonic-gate 		 * The contents of the buffer are garbage.  Shouldn't happen,
220*0Sstevel@tonic-gate 		 * and this read is likely to fail, but might as well try.
221*0Sstevel@tonic-gate 		 */
222*0Sstevel@tonic-gate 		if (F_ISSET(bhp, BH_TRASH))
223*0Sstevel@tonic-gate 			goto reread;
224*0Sstevel@tonic-gate 
225*0Sstevel@tonic-gate 		/*
226*0Sstevel@tonic-gate 		 * BH_CALLPGIN --
227*0Sstevel@tonic-gate 		 * The buffer was converted so it could be written, and the
228*0Sstevel@tonic-gate 		 * contents need to be converted again.
229*0Sstevel@tonic-gate 		 */
230*0Sstevel@tonic-gate 		if (F_ISSET(bhp, BH_CALLPGIN)) {
231*0Sstevel@tonic-gate 			if ((ret = __memp_pg(dbmfp, bhp, 1)) != 0)
232*0Sstevel@tonic-gate 				goto err;
233*0Sstevel@tonic-gate 			F_CLR(bhp, BH_CALLPGIN);
234*0Sstevel@tonic-gate 		}
235*0Sstevel@tonic-gate 
236*0Sstevel@tonic-gate 		++mp->stat.st_cache_hit;
237*0Sstevel@tonic-gate 		++mfp->stat.st_cache_hit;
238*0Sstevel@tonic-gate 		*(void **)addrp = bhp->buf;
239*0Sstevel@tonic-gate 		goto done;
240*0Sstevel@tonic-gate 	}
241*0Sstevel@tonic-gate 
242*0Sstevel@tonic-gate alloc:	/* Allocate new buffer header and data space. */
243*0Sstevel@tonic-gate 	if ((ret = __memp_alloc(dbmp, sizeof(BH) -
244*0Sstevel@tonic-gate 	    sizeof(u_int8_t) + mfp->stat.st_pagesize, NULL, &bhp)) != 0)
245*0Sstevel@tonic-gate 		goto err;
246*0Sstevel@tonic-gate 
247*0Sstevel@tonic-gate #ifdef DIAGNOSTIC
248*0Sstevel@tonic-gate 	if ((ALIGNTYPE)bhp->buf & (sizeof(size_t) - 1)) {
249*0Sstevel@tonic-gate 		__db_err(dbmp->dbenv,
250*0Sstevel@tonic-gate 		    "Internal error: BH data NOT size_t aligned.");
251*0Sstevel@tonic-gate 		ret = EINVAL;
252*0Sstevel@tonic-gate 		goto err;
253*0Sstevel@tonic-gate 	}
254*0Sstevel@tonic-gate #endif
255*0Sstevel@tonic-gate 	/* Initialize the BH fields. */
256*0Sstevel@tonic-gate 	memset(bhp, 0, sizeof(BH));
257*0Sstevel@tonic-gate 	LOCKINIT(dbmp, &bhp->mutex);
258*0Sstevel@tonic-gate 	bhp->ref = 1;
259*0Sstevel@tonic-gate 	bhp->pgno = *pgnoaddr;
260*0Sstevel@tonic-gate 	bhp->mf_offset = mf_offset;
261*0Sstevel@tonic-gate 
262*0Sstevel@tonic-gate 	/*
263*0Sstevel@tonic-gate 	 * Prepend the bucket header to the head of the appropriate MPOOL
264*0Sstevel@tonic-gate 	 * bucket hash list.  Append the bucket header to the tail of the
265*0Sstevel@tonic-gate 	 * MPOOL LRU chain.
266*0Sstevel@tonic-gate 	 */
267*0Sstevel@tonic-gate 	SH_TAILQ_INSERT_HEAD(&dbmp->htab[bucket], bhp, hq, __bh);
268*0Sstevel@tonic-gate 	SH_TAILQ_INSERT_TAIL(&mp->bhq, bhp, q);
269*0Sstevel@tonic-gate 
270*0Sstevel@tonic-gate 	/*
271*0Sstevel@tonic-gate 	 * If we created the page, zero it out and continue.
272*0Sstevel@tonic-gate 	 *
273*0Sstevel@tonic-gate 	 * !!!
274*0Sstevel@tonic-gate 	 * Note: DB_MPOOL_NEW specifically doesn't call the pgin function.
275*0Sstevel@tonic-gate 	 * If DB_MPOOL_CREATE is used, then the application's pgin function
276*0Sstevel@tonic-gate 	 * has to be able to handle pages of 0's -- if it uses DB_MPOOL_NEW,
277*0Sstevel@tonic-gate 	 * it can detect all of its page creates, and not bother.
278*0Sstevel@tonic-gate 	 *
279*0Sstevel@tonic-gate 	 * Otherwise, read the page into memory, optionally creating it if
280*0Sstevel@tonic-gate 	 * DB_MPOOL_CREATE is set.
281*0Sstevel@tonic-gate 	 */
282*0Sstevel@tonic-gate 	if (LF_ISSET(DB_MPOOL_NEW)) {
283*0Sstevel@tonic-gate 		if (mfp->clear_len == 0)
284*0Sstevel@tonic-gate 			memset(bhp->buf, 0, mfp->stat.st_pagesize);
285*0Sstevel@tonic-gate 		else {
286*0Sstevel@tonic-gate 			memset(bhp->buf, 0, mfp->clear_len);
287*0Sstevel@tonic-gate #ifdef DIAGNOSTIC
288*0Sstevel@tonic-gate 			memset(bhp->buf + mfp->clear_len, 0xdb,
289*0Sstevel@tonic-gate 			    mfp->stat.st_pagesize - mfp->clear_len);
290*0Sstevel@tonic-gate #endif
291*0Sstevel@tonic-gate 		}
292*0Sstevel@tonic-gate 
293*0Sstevel@tonic-gate 		++mp->stat.st_page_create;
294*0Sstevel@tonic-gate 		++mfp->stat.st_page_create;
295*0Sstevel@tonic-gate 	} else {
296*0Sstevel@tonic-gate 		/*
297*0Sstevel@tonic-gate 		 * It's possible for the read function to fail, which means
298*0Sstevel@tonic-gate 		 * that we fail as well.  Note, the __memp_pgread() function
299*0Sstevel@tonic-gate 		 * discards the region lock, so the buffer must be pinned
300*0Sstevel@tonic-gate 		 * down so that it cannot move and its contents are unchanged.
301*0Sstevel@tonic-gate 		 */
302*0Sstevel@tonic-gate reread:		if ((ret = __memp_pgread(dbmfp,
303*0Sstevel@tonic-gate 		    bhp, LF_ISSET(DB_MPOOL_CREATE))) != 0) {
304*0Sstevel@tonic-gate 			/*
305*0Sstevel@tonic-gate 			 * !!!
306*0Sstevel@tonic-gate 			 * Discard the buffer unless another thread is waiting
307*0Sstevel@tonic-gate 			 * on our I/O to complete.  Regardless, the header has
308*0Sstevel@tonic-gate 			 * the BH_TRASH flag set.
309*0Sstevel@tonic-gate 			 */
310*0Sstevel@tonic-gate 			if (bhp->ref == 1)
311*0Sstevel@tonic-gate 				__memp_bhfree(dbmp, mfp, bhp, 1);
312*0Sstevel@tonic-gate 			goto err;
313*0Sstevel@tonic-gate 		}
314*0Sstevel@tonic-gate 
315*0Sstevel@tonic-gate 		++mp->stat.st_cache_miss;
316*0Sstevel@tonic-gate 		++mfp->stat.st_cache_miss;
317*0Sstevel@tonic-gate 	}
318*0Sstevel@tonic-gate 
319*0Sstevel@tonic-gate 	/*
320*0Sstevel@tonic-gate 	 * If we're returning a page after our current notion of the last-page,
321*0Sstevel@tonic-gate 	 * update our information.  Note, there's no way to un-instantiate this
322*0Sstevel@tonic-gate 	 * page, it's going to exist whether it's returned to us dirty or not.
323*0Sstevel@tonic-gate 	 */
324*0Sstevel@tonic-gate 	if (bhp->pgno > mfp->last_pgno)
325*0Sstevel@tonic-gate 		mfp->last_pgno = bhp->pgno;
326*0Sstevel@tonic-gate 
327*0Sstevel@tonic-gate 	++mp->stat.st_page_clean;
328*0Sstevel@tonic-gate 	*(void **)addrp = bhp->buf;
329*0Sstevel@tonic-gate 
330*0Sstevel@tonic-gate done:	/* Update the chain search statistics. */
331*0Sstevel@tonic-gate 	if (st_hsearch) {
332*0Sstevel@tonic-gate 		++mp->stat.st_hash_searches;
333*0Sstevel@tonic-gate 		if (st_hsearch > mp->stat.st_hash_longest)
334*0Sstevel@tonic-gate 			mp->stat.st_hash_longest = st_hsearch;
335*0Sstevel@tonic-gate 		mp->stat.st_hash_examined += st_hsearch;
336*0Sstevel@tonic-gate 	}
337*0Sstevel@tonic-gate 
338*0Sstevel@tonic-gate 	++dbmfp->pinref;
339*0Sstevel@tonic-gate 
340*0Sstevel@tonic-gate 	UNLOCKREGION(dbmp);
341*0Sstevel@tonic-gate 
342*0Sstevel@tonic-gate 	return (0);
343*0Sstevel@tonic-gate 
344*0Sstevel@tonic-gate err:	/* Discard our reference. */
345*0Sstevel@tonic-gate 	if (b_incr)
346*0Sstevel@tonic-gate 		--bhp->ref;
347*0Sstevel@tonic-gate 	UNLOCKREGION(dbmp);
348*0Sstevel@tonic-gate 
349*0Sstevel@tonic-gate 	*(void **)addrp = NULL;
350*0Sstevel@tonic-gate 	return (ret);
351*0Sstevel@tonic-gate }
352