xref: /netbsd-src/external/ibm-public/postfix/dist/src/cleanup/cleanup_region.c (revision 33881f779a77dce6440bdc44610d94de75bebefe)
1 /*	$NetBSD: cleanup_region.c,v 1.3 2020/03/18 19:05:15 christos Exp $	*/
2 
3 /*++
4 /* NAME
5 /*	cleanup_region 3
6 /* SUMMARY
7 /*	queue file region manager
8 /* SYNOPSIS
9 /*	#include "cleanup.h"
10 /*
11 /*	void	cleanup_region_init(state)
12 /*	CLEANUP_STATE *state;
13 /*
14 /*	CLEANUP_REGION *cleanup_region_open(state, space_needed)
15 /*	CLEANUP_STATE *state;
16 /*	ssize_t	space_needed;
17 /*
18 /*	int	cleanup_region_close(state, rp)
19 /*	CLEANUP_STATE *state;
20 /*	CLEANUP_REGION *rp;
21 /*
22 /*	CLEANUP_REGION *cleanup_region_return(state, rp)
23 /*	CLEANUP_STATE *state;
24 /*	CLEANUP_REGION *rp;
25 /*
26 /*	void	cleanup_region_done(state)
27 /*	CLEANUP_STATE *state;
28 /* DESCRIPTION
29 /*	This module maintains queue file regions. Regions are created
30 /*	on-the-fly and can be reused multiple times. Each region
31 /*	structure consists of a file offset, a length (0 for an
32 /*	open-ended region at the end of the file), a write offset
33 /*	(maintained by the caller), and list linkage. Region
34 /*	boundaries are not enforced by this module. It is up to the
35 /*	caller to ensure that they stay within bounds.
36 /*
37 /*	cleanup_region_init() performs mandatory initialization and
38 /*	overlays an initial region structure over an already existing
39 /*	queue file. This function must not be called before the
40 /*	queue file is complete.
41 /*
42 /*	cleanup_region_open() opens an existing region or creates
43 /*	a new region that can accommodate at least the specified
44 /*	amount of space. A new region is an open-ended region at
45 /*	the end of the file; it must be closed (see next) before
46 /*	unrelated data can be appended to the same file.
47 /*
48 /*	cleanup_region_close() indicates that a region will not be
49 /*	updated further. With an open-ended region, the region's
50 /*	end is frozen just before the caller-maintained write offset.
51 /*	With a close-ended region, unused space (beginning at the
52 /*	caller-maintained write offset) may be returned to the free
53 /*	pool.
54 /*
55 /*	cleanup_region_return() returns a list of regions to the
56 /*	free pool, and returns a null pointer. To avoid fragmentation,
57 /*	adjacent free regions may be coalesced together.
58 /*
59 /*	cleanup_region_done() destroys all in-memory information
60 /*	that was allocated for administering queue file regions.
61 /*
62 /*	Arguments:
63 /* .IP state
64 /*	Queue file and message processing state. This state is
65 /*	updated as records are processed and as errors happen.
66 /* .IP space_needed
67 /*	The minimum region size needed.
68 /* LICENSE
69 /* .ad
70 /* .fi
71 /*	The Secure Mailer license must be distributed with this software.
72 /* AUTHOR(S)
73 /*	Wietse Venema
74 /*	IBM T.J. Watson Research
75 /*	P.O. Box 704
76 /*	Yorktown Heights, NY 10598, USA
77 /*--*/
78 
79 /* System library. */
80 
81 #include <sys_defs.h>
82 #include <sys/stat.h>
83 
84 /* Utility library. */
85 
86 #include <msg.h>
87 #include <mymalloc.h>
88 #include <warn_stat.h>
89 
90 /* Application-specific. */
91 
92 #include <cleanup.h>
93 
94 /* cleanup_region_alloc - create queue file region */
95 
cleanup_region_alloc(off_t start,off_t len)96 static CLEANUP_REGION *cleanup_region_alloc(off_t start, off_t len)
97 {
98     CLEANUP_REGION *rp;
99 
100     rp = (CLEANUP_REGION *) mymalloc(sizeof(*rp));
101     rp->write_offs = rp->start = start;
102     rp->len = len;
103     rp->next = 0;
104 
105     return (rp);
106 }
107 
108 /* cleanup_region_free - destroy region list */
109 
cleanup_region_free(CLEANUP_REGION * regions)110 static CLEANUP_REGION *cleanup_region_free(CLEANUP_REGION *regions)
111 {
112     CLEANUP_REGION *rp;
113     CLEANUP_REGION *next;
114 
115     for (rp = regions; rp != 0; rp = next) {
116 	next = rp->next;
117 	myfree((void *) rp);
118     }
119     return (0);
120 }
121 
122 /* cleanup_region_init - create initial region overlay */
123 
cleanup_region_init(CLEANUP_STATE * state)124 void    cleanup_region_init(CLEANUP_STATE *state)
125 {
126     const char *myname = "cleanup_region_init";
127 
128     /*
129      * Sanity check.
130      */
131     if (state->free_regions != 0 || state->body_regions != 0)
132 	msg_panic("%s: repeated call", myname);
133 
134     /*
135      * Craft the first regions on the fly, from circumstantial evidence.
136      */
137     state->body_regions =
138 	cleanup_region_alloc(state->append_hdr_pt_target,
139 			  state->xtra_offset - state->append_hdr_pt_target);
140     if (msg_verbose)
141 	msg_info("%s: body start %ld len %ld",
142 		 myname, (long) state->body_regions->start, (long) state->body_regions->len);
143 }
144 
145 /* cleanup_region_open - open existing region or create new region */
146 
cleanup_region_open(CLEANUP_STATE * state,ssize_t len)147 CLEANUP_REGION *cleanup_region_open(CLEANUP_STATE *state, ssize_t len)
148 {
149     const char *myname = "cleanup_region_open";
150     CLEANUP_REGION **rpp;
151     CLEANUP_REGION *rp;
152     struct stat st;
153 
154     /*
155      * Find the first region that is large enough, or create a new region.
156      */
157     for (rpp = &state->free_regions; /* see below */ ; rpp = &(rp->next)) {
158 
159 	/*
160 	 * Create an open-ended region at the end of the queue file. We
161 	 * freeze the region size after we stop writing to it. XXX Assume
162 	 * that fstat() returns a file size that is never less than the file
163 	 * append offset. It is not a problem if fstat() returns a larger
164 	 * result; we would just waste some space.
165 	 */
166 	if ((rp = *rpp) == 0) {
167 	    if (fstat(vstream_fileno(state->dst), &st) < 0)
168 		msg_fatal("%s: fstat file %s: %m", myname, cleanup_path);
169 	    rp = cleanup_region_alloc(st.st_size, 0);
170 	    break;
171 	}
172 
173 	/*
174 	 * Reuse an existing region.
175 	 */
176 	if (rp->len >= len) {
177 	    (*rpp) = rp->next;
178 	    rp->next = 0;
179 	    rp->write_offs = rp->start;
180 	    break;
181 	}
182 
183 	/*
184 	 * Skip a too small region.
185 	 */
186 	if (msg_verbose)
187 	    msg_info("%s: skip start %ld len %ld < %ld",
188 		     myname, (long) rp->start, (long) rp->len, (long) len);
189     }
190     if (msg_verbose)
191 	msg_info("%s: done start %ld len %ld",
192 		 myname, (long) rp->start, (long) rp->len);
193     return (rp);
194 }
195 
196 /* cleanup_region_close - freeze queue file region size */
197 
cleanup_region_close(CLEANUP_STATE * unused_state,CLEANUP_REGION * rp)198 void    cleanup_region_close(CLEANUP_STATE *unused_state, CLEANUP_REGION *rp)
199 {
200     const char *myname = "cleanup_region_close";
201 
202     /*
203      * If this region is still open ended, freeze the size. If this region is
204      * closed, some future version of this routine may shrink the size and
205      * return the unused portion to the free pool.
206      */
207     if (rp->len == 0)
208 	rp->len = rp->write_offs - rp->start;
209     if (msg_verbose)
210 	msg_info("%s: freeze start %ld len %ld",
211 		 myname, (long) rp->start, (long) rp->len);
212 }
213 
214 /* cleanup_region_return - return region list to free pool */
215 
cleanup_region_return(CLEANUP_STATE * state,CLEANUP_REGION * rp)216 CLEANUP_REGION *cleanup_region_return(CLEANUP_STATE *state, CLEANUP_REGION *rp)
217 {
218     CLEANUP_REGION **rpp;
219 
220     for (rpp = &state->free_regions; (*rpp) != 0; rpp = &(*rpp)->next)
221 	 /* void */ ;
222     *rpp = rp;
223     return (0);
224 }
225 
226 /* cleanup_region_done - destroy region metadata */
227 
cleanup_region_done(CLEANUP_STATE * state)228 void    cleanup_region_done(CLEANUP_STATE *state)
229 {
230     if (state->free_regions != 0)
231 	state->free_regions = cleanup_region_free(state->free_regions);
232     if (state->body_regions != 0)
233 	state->body_regions = cleanup_region_free(state->body_regions);
234 }
235