xref: /netbsd-src/external/ibm-public/postfix/dist/src/cleanup/cleanup_region.c (revision 4fee23f98c45552038ad6b5bd05124a41302fb01)
1 /*	$NetBSD: cleanup_region.c,v 1.1.1.1 2009/06/23 10:08:43 tron Exp $	*/
2 
3 /*++
4 /* NAME
5 /*	cleanup_region 3
6 /* SUMMARY
7 /*	queue file region manager
8 /* SYNOPSIS
9 /*	#include "cleanup.h"
10 /*
11 /*	void	cleanup_region_init(state)
12 /*	CLEANUP_STATE *state;
13 /*
14 /*	CLEANUP_REGION *cleanup_region_open(state, space_needed)
15 /*	CLEANUP_STATE *state;
16 /*	ssize_t	space_needed;
17 /*
18 /*	int	cleanup_region_close(state, rp)
19 /*	CLEANUP_STATE *state;
20 /*	CLEANUP_REGION *rp;
21 /*
22 /*	CLEANUP_REGION *cleanup_region_return(state, rp)
23 /*	CLEANUP_STATE *state;
24 /*	CLEANUP_REGION *rp;
25 /*
26 /*	void	cleanup_region_done(state)
27 /*	CLEANUP_STATE *state;
28 /* DESCRIPTION
29 /*	This module maintains queue file regions. Regions are created
30 /*	on-the-fly and can be reused multiple times. Each region
31 /*	structure consists of a file offset, a length (0 for an
32 /*	open-ended region at the end of the file), a write offset
33 /*	(maintained by the caller), and list linkage. Region
34 /*	boundaries are not enforced by this module. It is up to the
35 /*	caller to ensure that they stay within bounds.
36 /*
37 /*	cleanup_region_init() performs mandatory initialization and
38 /*	overlays an initial region structure over an already existing
39 /*	queue file. This function must not be called before the
40 /*	queue file is complete.
41 /*
42 /*	cleanup_region_open() opens an existing region or creates
43 /*	a new region that can accomodate at least the specified
44 /*	amount of space. A new region is an open-ended region at
45 /*	the end of the file; it must be closed (see next) before
46 /*	unrelated data can be appended to the same file.
47 /*
48 /*	cleanup_region_close() indicates that a region will not be
49 /*	updated further. With an open-ended region, the region's
50 /*	end is frozen just before the caller-maintained write offset.
51 /*	With a close-ended region, unused space (beginning at the
52 /*	caller-maintained write offset) may be returned to the free
53 /*	pool.
54 /*
55 /*	cleanup_region_return() returns a list of regions to the
56 /*	free pool, and returns a null pointer. To avoid fragmentation,
57 /*	adjacent free regions may be coalesced together.
58 /*
59 /*	cleanup_region_done() destroys all in-memory information
60 /*	that was allocated for administering queue file regions.
61 /*
62 /*	Arguments:
63 /* .IP state
64 /*	Queue file and message processing state. This state is
65 /*	updated as records are processed and as errors happen.
66 /* .IP space_needed
67 /*	The minimum region size needed.
68 /* LICENSE
69 /* .ad
70 /* .fi
71 /*	The Secure Mailer license must be distributed with this software.
72 /* AUTHOR(S)
73 /*	Wietse Venema
74 /*	IBM T.J. Watson Research
75 /*	P.O. Box 704
76 /*	Yorktown Heights, NY 10598, USA
77 /*--*/
78 
79 /* System library. */
80 
81 #include <sys_defs.h>
82 #include <sys/stat.h>
83 
84 /* Utility library. */
85 
86 #include <msg.h>
87 #include <mymalloc.h>
88 
89 /* Application-specific. */
90 
91 #include <cleanup.h>
92 
93 /* cleanup_region_alloc - create queue file region */
94 
95 static CLEANUP_REGION *cleanup_region_alloc(off_t start, off_t len)
96 {
97     CLEANUP_REGION *rp;
98 
99     rp = (CLEANUP_REGION *) mymalloc(sizeof(*rp));
100     rp->write_offs = rp->start = start;
101     rp->len = len;
102     rp->next = 0;
103 
104     return (rp);
105 }
106 
107 /* cleanup_region_free - destroy region list */
108 
109 static CLEANUP_REGION *cleanup_region_free(CLEANUP_REGION *regions)
110 {
111     CLEANUP_REGION *rp;
112     CLEANUP_REGION *next;
113 
114     for (rp = regions; rp != 0; rp = next) {
115 	next = rp->next;
116 	myfree((char *) rp);
117     }
118     return (0);
119 }
120 
121 /* cleanup_region_init - create initial region overlay */
122 
123 void    cleanup_region_init(CLEANUP_STATE *state)
124 {
125     const char *myname = "cleanup_region_init";
126 
127     /*
128      * Sanity check.
129      */
130     if (state->free_regions != 0 || state->body_regions != 0)
131 	msg_panic("%s: repeated call", myname);
132 
133     /*
134      * Craft the first regions on the fly, from circumstantial evidence.
135      */
136     state->body_regions =
137 	cleanup_region_alloc(state->append_hdr_pt_target,
138 			  state->xtra_offset - state->append_hdr_pt_target);
139     if (msg_verbose)
140 	msg_info("%s: body start %ld len %ld",
141 	      myname, (long) state->body_regions->start, (long) state->body_regions->len);
142 }
143 
144 /* cleanup_region_open - open existing region or create new region */
145 
146 CLEANUP_REGION *cleanup_region_open(CLEANUP_STATE *state, ssize_t len)
147 {
148     const char *myname = "cleanup_region_open";
149     CLEANUP_REGION **rpp;
150     CLEANUP_REGION *rp;
151     struct stat st;
152 
153     /*
154      * Find the first region that is large enough, or create a new region.
155      */
156     for (rpp = &state->free_regions; /* see below */ ; rpp = &(rp->next)) {
157 
158 	/*
159 	 * Create an open-ended region at the end of the queue file. We
160 	 * freeze the region size after we stop writing to it. XXX Assume
161 	 * that fstat() returns a file size that is never less than the file
162 	 * append offset. It is not a problem if fstat() returns a larger
163 	 * result; we would just waste some space.
164 	 */
165 	if ((rp = *rpp) == 0) {
166 	    if (fstat(vstream_fileno(state->dst), &st) < 0)
167 		msg_fatal("%s: fstat file %s: %m", myname, cleanup_path);
168 	    rp = cleanup_region_alloc(st.st_size, 0);
169 	    break;
170 	}
171 
172 	/*
173 	 * Reuse an existing region.
174 	 */
175 	if (rp->len >= len) {
176 	    (*rpp) = rp->next;
177 	    rp->next = 0;
178 	    rp->write_offs = rp->start;
179 	    break;
180 	}
181 
182 	/*
183 	 * Skip a too small region.
184 	 */
185 	if (msg_verbose)
186 	    msg_info("%s: skip start %ld len %ld < %ld",
187 		     myname, (long) rp->start, (long) rp->len, (long) len);
188     }
189     if (msg_verbose)
190 	msg_info("%s: done start %ld len %ld",
191 		 myname, (long) rp->start, (long) rp->len);
192     return (rp);
193 }
194 
195 /* cleanup_region_close - freeze queue file region size */
196 
197 void    cleanup_region_close(CLEANUP_STATE *unused_state, CLEANUP_REGION *rp)
198 {
199     const char *myname = "cleanup_region_close";
200 
201     /*
202      * If this region is still open ended, freeze the size. If this region is
203      * closed, some future version of this routine may shrink the size and
204      * return the unused portion to the free pool.
205      */
206     if (rp->len == 0)
207 	rp->len = rp->write_offs - rp->start;
208     if (msg_verbose)
209 	msg_info("%s: freeze start %ld len %ld",
210 		 myname, (long) rp->start, (long) rp->len);
211 }
212 
213 /* cleanup_region_return - return region list to free pool */
214 
215 CLEANUP_REGION *cleanup_region_return(CLEANUP_STATE *state, CLEANUP_REGION *rp)
216 {
217     CLEANUP_REGION **rpp;
218 
219     for (rpp = &state->free_regions; (*rpp) != 0; rpp = &(*rpp)->next)
220 	 /* void */ ;
221     *rpp = rp;
222     return (0);
223 }
224 
225 /* cleanup_region_done - destroy region metadata */
226 
227 void    cleanup_region_done(CLEANUP_STATE *state)
228 {
229     if (state->free_regions != 0)
230 	state->free_regions = cleanup_region_free(state->free_regions);
231     if (state->body_regions != 0)
232 	state->body_regions = cleanup_region_free(state->body_regions);
233 }
234