xref: /netbsd-src/external/ibm-public/postfix/dist/src/qmgr/qmgr.h (revision 33881f779a77dce6440bdc44610d94de75bebefe)
1 /*	$NetBSD: qmgr.h,v 1.3 2020/03/18 19:05:19 christos Exp $	*/
2 
3 /*++
4 /* NAME
5 /*	qmgr 3h
6 /* SUMMARY
7 /*	queue manager data structures
8 /* SYNOPSIS
9 /*	#include "qmgr.h"
10 /* DESCRIPTION
11 /* .nf
12 
13  /*
14   * System library.
15   */
16 #include <sys/time.h>
17 #include <time.h>
18 
19  /*
20   * Utility library.
21   */
22 #include <vstream.h>
23 #include <scan_dir.h>
24 
25  /*
26   * Global library.
27   */
28 #include <recipient_list.h>
29 #include <dsn.h>
30 
31  /*
32   * The queue manager is built around lots of mutually-referring structures.
33   * These typedefs save some typing.
34   */
35 typedef struct QMGR_TRANSPORT QMGR_TRANSPORT;
36 typedef struct QMGR_QUEUE QMGR_QUEUE;
37 typedef struct QMGR_ENTRY QMGR_ENTRY;
38 typedef struct QMGR_MESSAGE QMGR_MESSAGE;
39 typedef struct QMGR_JOB QMGR_JOB;
40 typedef struct QMGR_PEER QMGR_PEER;
41 typedef struct QMGR_TRANSPORT_LIST QMGR_TRANSPORT_LIST;
42 typedef struct QMGR_QUEUE_LIST QMGR_QUEUE_LIST;
43 typedef struct QMGR_ENTRY_LIST QMGR_ENTRY_LIST;
44 typedef struct QMGR_JOB_LIST QMGR_JOB_LIST;
45 typedef struct QMGR_PEER_LIST QMGR_PEER_LIST;
46 typedef struct QMGR_SCAN QMGR_SCAN;
47 typedef struct QMGR_FEEDBACK QMGR_FEEDBACK;
48 
49  /*
50   * Hairy macros to update doubly-linked lists.
51   */
52 #define QMGR_LIST_ROTATE(head, object, peers) { \
53     head.next->peers.prev = head.prev; \
54     head.prev->peers.next = head.next; \
55     head.next = object->peers.next; \
56     head.next->peers.prev = 0; \
57     head.prev = object; \
58     object->peers.next = 0; \
59 }
60 
61 #define QMGR_LIST_UNLINK(head, type, object, peers) { \
62     type   _next = object->peers.next; \
63     type   _prev = object->peers.prev; \
64     if (_prev) _prev->peers.next = _next; \
65 	else head.next = _next; \
66     if (_next) _next->peers.prev = _prev; \
67 	else head.prev = _prev; \
68     object->peers.next = object->peers.prev = 0; \
69 }
70 
71 #define QMGR_LIST_LINK(head, pred, object, succ, peers) { \
72     object->peers.prev = pred; \
73     object->peers.next = succ; \
74     if (pred) pred->peers.next = object; \
75     else head.next = object; \
76     if (succ) succ->peers.prev = object; \
77     else head.prev = object; \
78 }
79 
80 #define QMGR_LIST_PREPEND(head, object, peers) { \
81     object->peers.next = head.next; \
82     object->peers.prev = 0; \
83     if (head.next) { \
84 	head.next->peers.prev = object; \
85     } else { \
86 	head.prev = object; \
87     } \
88     head.next = object; \
89 }
90 
91 #define QMGR_LIST_APPEND(head, object, peers) { \
92     object->peers.prev = head.prev; \
93     object->peers.next = 0; \
94     if (head.prev) { \
95 	head.prev->peers.next = object; \
96     } else { \
97 	head.next = object; \
98     } \
99     head.prev = object; \
100 }
101 
102 #define QMGR_LIST_INIT(head) { \
103     head.prev = 0; \
104     head.next = 0; \
105 }
106 
107  /*
108   * Transports are looked up by name (when we have resolved a message), or
109   * round-robin wise (when we want to distribute resources fairly).
110   */
111 struct QMGR_TRANSPORT_LIST {
112     QMGR_TRANSPORT *next;
113     QMGR_TRANSPORT *prev;
114 };
115 
116 extern struct HTABLE *qmgr_transport_byname;	/* transport by name */
117 extern QMGR_TRANSPORT_LIST qmgr_transport_list;	/* transports, round robin */
118 
119  /*
120   * Delivery agents provide feedback, as hints that Postfix should expend
121   * more or fewer resources on a specific destination domain. The main.cf
122   * file specifies how feedback affects delivery concurrency: add/subtract a
123   * constant, a ratio of constants, or a constant divided by the delivery
124   * concurrency; and it specifies how much feedback must accumulate between
125   * concurrency updates.
126   */
127 struct QMGR_FEEDBACK {
128     int     hysteresis;			/* to pass, need to be this tall */
129     double  base;			/* pre-computed from main.cf */
130     int     index;			/* none, window, sqrt(window) */
131 };
132 
133 #define QMGR_FEEDBACK_IDX_NONE		0	/* no window dependence */
134 #define QMGR_FEEDBACK_IDX_WIN		1	/* 1/window dependence */
135 #if 0
136 #define QMGR_FEEDBACK_IDX_SQRT_WIN	2	/* 1/sqrt(window) dependence */
137 #endif
138 
139 #ifdef QMGR_FEEDBACK_IDX_SQRT_WIN
140 #include <math.h>
141 #endif
142 
143 extern void qmgr_feedback_init(QMGR_FEEDBACK *, const char *, const char *, const char *, const char *);
144 
145 #ifndef QMGR_FEEDBACK_IDX_SQRT_WIN
146 #define QMGR_FEEDBACK_VAL(fb, win) \
147     ((fb).index == QMGR_FEEDBACK_IDX_NONE ? (fb).base : (fb).base / (win))
148 #else
149 #define QMGR_FEEDBACK_VAL(fb, win) \
150     ((fb).index == QMGR_FEEDBACK_IDX_NONE ? (fb).base : \
151     (fb).index == QMGR_FEEDBACK_IDX_WIN ? (fb).base / (win) : \
152     (fb).base / sqrt(win))
153 #endif
154 
155  /*
156   * Each transport (local, smtp-out, bounce) can have one queue per next hop
157   * name. Queues are looked up by next hop name (when we have resolved a
158   * message destination), or round-robin wise (when we want to deliver
159   * messages fairly).
160   */
161 struct QMGR_QUEUE_LIST {
162     QMGR_QUEUE *next;
163     QMGR_QUEUE *prev;
164 };
165 
166 struct QMGR_JOB_LIST {
167     QMGR_JOB *next;
168     QMGR_JOB *prev;
169 };
170 
171 struct QMGR_TRANSPORT {
172     int     flags;			/* blocked, etc. */
173     int     pending;			/* incomplete DA connections */
174     char   *name;			/* transport name */
175     int     dest_concurrency_limit;	/* concurrency per domain */
176     int     init_dest_concurrency;	/* init. per-domain concurrency */
177     int     recipient_limit;		/* recipients per transaction */
178     int     rcpt_per_stack;		/* extra slots reserved for jobs put
179 					 * on the job stack */
180     int     rcpt_unused;		/* available in-core recipient slots */
181     int     refill_limit;		/* recipient batch size for message
182 					 * refill */
183     int     refill_delay;		/* delay before message refill */
184     int     slot_cost;			/* cost of new preemption slot (# of
185 					 * selected entries) */
186     int     slot_loan;			/* preemption boost offset and */
187     int     slot_loan_factor;		/* factor, see qmgr_job_preempt() */
188     int     min_slots;			/* when preemption can take effect at
189 					 * all */
190     struct HTABLE *queue_byname;	/* queues indexed by domain */
191     QMGR_QUEUE_LIST queue_list;		/* queues, round robin order */
192     struct HTABLE *job_byname;		/* jobs indexed by queue id */
193     QMGR_JOB_LIST job_list;		/* list of message jobs (1 per
194 					 * message) ordered by scheduler */
195     QMGR_JOB_LIST job_bytime;		/* jobs ordered by time since queued */
196     QMGR_JOB *job_current;		/* keeps track of the current job */
197     QMGR_JOB *job_next_unread;		/* next job with unread recipients */
198     QMGR_JOB *candidate_cache;		/* cached result from
199 					 * qmgr_job_candidate() */
200     QMGR_JOB *candidate_cache_current;	/* current job tied to the candidate */
201     time_t  candidate_cache_time;	/* when candidate_cache was last
202 					 * updated */
203     int     blocker_tag;		/* for marking blocker jobs */
204     QMGR_TRANSPORT_LIST peers;		/* linkage */
205     DSN    *dsn;			/* why unavailable */
206     QMGR_FEEDBACK pos_feedback;		/* positive feedback control */
207     QMGR_FEEDBACK neg_feedback;		/* negative feedback control */
208     int     fail_cohort_limit;		/* flow shutdown control */
209     int     xport_rate_delay;		/* suspend per delivery */
210     int     rate_delay;			/* suspend per delivery */
211 };
212 
213 #define QMGR_TRANSPORT_STAT_DEAD	(1<<1)
214 #define QMGR_TRANSPORT_STAT_RATE_LOCK	(1<<2)
215 
216 typedef void (*QMGR_TRANSPORT_ALLOC_NOTIFY) (QMGR_TRANSPORT *, VSTREAM *);
217 extern QMGR_TRANSPORT *qmgr_transport_select(void);
218 extern void qmgr_transport_alloc(QMGR_TRANSPORT *, QMGR_TRANSPORT_ALLOC_NOTIFY);
219 extern void qmgr_transport_throttle(QMGR_TRANSPORT *, DSN *);
220 extern void qmgr_transport_unthrottle(QMGR_TRANSPORT *);
221 extern QMGR_TRANSPORT *qmgr_transport_create(const char *);
222 extern QMGR_TRANSPORT *qmgr_transport_find(const char *);
223 
224 #define QMGR_TRANSPORT_THROTTLED(t)	((t)->flags & QMGR_TRANSPORT_STAT_DEAD)
225 
226  /*
227   * Each next hop (e.g., a domain name) has its own queue of pending message
228   * transactions. The "todo" queue contains messages that are to be delivered
229   * to this next hop. When a message is elected for transmission, it is moved
230   * from the "todo" queue to the "busy" queue. Messages are taken from the
231   * "todo" queue in round-robin order.
232   */
233 struct QMGR_ENTRY_LIST {
234     QMGR_ENTRY *next;
235     QMGR_ENTRY *prev;
236 };
237 
238 struct QMGR_QUEUE {
239     int     dflags;			/* delivery request options */
240     time_t  last_done;			/* last delivery completion */
241     char   *name;			/* domain name or address */
242     char   *nexthop;			/* domain name */
243     int     todo_refcount;		/* queue entries (todo list) */
244     int     busy_refcount;		/* queue entries (busy list) */
245     int     window;			/* slow open algorithm */
246     double  success;			/* accumulated positive feedback */
247     double  failure;			/* accumulated negative feedback */
248     double  fail_cohorts;		/* pseudo-cohort failure count */
249     QMGR_TRANSPORT *transport;		/* transport linkage */
250     QMGR_ENTRY_LIST todo;		/* todo queue entries */
251     QMGR_ENTRY_LIST busy;		/* messages on the wire */
252     QMGR_QUEUE_LIST peers;		/* neighbor queues */
253     DSN    *dsn;			/* why unavailable */
254     time_t  clog_time_to_warn;		/* time of last warning */
255     int     blocker_tag;		/* tagged if blocks job list */
256 };
257 
258 #define	QMGR_QUEUE_TODO	1		/* waiting for service */
259 #define QMGR_QUEUE_BUSY	2		/* recipients on the wire */
260 
261 extern int qmgr_queue_count;
262 
263 extern QMGR_QUEUE *qmgr_queue_create(QMGR_TRANSPORT *, const char *, const char *);
264 extern void qmgr_queue_done(QMGR_QUEUE *);
265 extern void qmgr_queue_throttle(QMGR_QUEUE *, DSN *);
266 extern void qmgr_queue_unthrottle(QMGR_QUEUE *);
267 extern QMGR_QUEUE *qmgr_queue_find(QMGR_TRANSPORT *, const char *);
268 extern void qmgr_queue_suspend(QMGR_QUEUE *, int);
269 
270  /*
271   * Exclusive queue states. Originally there were only two: "throttled" and
272   * "not throttled". It was natural to encode these in the queue window size.
273   * After 10 years it's not practical to rip out all the working code and
274   * change representations, so we just clean up the names a little.
275   *
276   * Note: only the "ready" state can reach every state (including itself);
277   * non-ready states can reach only the "ready" state. Other transitions are
278   * forbidden, because they would result in dangling event handlers.
279   */
280 #define QMGR_QUEUE_STAT_THROTTLED	0	/* back-off timer */
281 #define QMGR_QUEUE_STAT_SUSPENDED	-1	/* voluntary delay timer */
282 #define QMGR_QUEUE_STAT_SAVED		-2	/* delayed cleanup timer */
283 #define QMGR_QUEUE_STAT_BAD		-3	/* can't happen */
284 
285 #define QMGR_QUEUE_READY(q)	((q)->window > 0)
286 #define QMGR_QUEUE_THROTTLED(q)	((q)->window == QMGR_QUEUE_STAT_THROTTLED)
287 #define QMGR_QUEUE_SUSPENDED(q)	((q)->window == QMGR_QUEUE_STAT_SUSPENDED)
288 #define QMGR_QUEUE_SAVED(q)	((q)->window == QMGR_QUEUE_STAT_SAVED)
289 #define QMGR_QUEUE_BAD(q)	((q)->window <= QMGR_QUEUE_STAT_BAD)
290 
291 #define QMGR_QUEUE_STATUS(q) ( \
292 	    QMGR_QUEUE_READY(q) ? "ready" : \
293 	    QMGR_QUEUE_THROTTLED(q) ? "throttled" : \
294 	    QMGR_QUEUE_SUSPENDED(q) ? "suspended" : \
295 	    QMGR_QUEUE_SAVED(q) ? "saved" : \
296 	    "invalid queue status" \
297 	)
298 
299  /*
300   * Structure of one next-hop queue entry. In order to save some copying
301   * effort we allow multiple recipients per transaction.
302   */
303 struct QMGR_ENTRY {
304     VSTREAM *stream;			/* delivery process */
305     QMGR_MESSAGE *message;		/* message info */
306     RECIPIENT_LIST rcpt_list;		/* as many as it takes */
307     QMGR_QUEUE *queue;			/* parent linkage */
308     QMGR_PEER *peer;			/* parent linkage */
309     QMGR_ENTRY_LIST queue_peers;	/* per queue neighbor entries */
310     QMGR_ENTRY_LIST peer_peers;		/* per peer neighbor entries */
311 };
312 
313 extern QMGR_ENTRY *qmgr_entry_select(QMGR_PEER *);
314 extern void qmgr_entry_unselect(QMGR_ENTRY *);
315 extern void qmgr_entry_move_todo(QMGR_QUEUE *, QMGR_ENTRY *);
316 extern void qmgr_entry_done(QMGR_ENTRY *, int);
317 extern QMGR_ENTRY *qmgr_entry_create(QMGR_PEER *, QMGR_MESSAGE *);
318 
319  /*
320   * All common in-core information about a message is kept here. When all
321   * recipients have been tried the message file is linked to the "deferred"
322   * queue (some hosts not reachable), to the "bounce" queue (some recipients
323   * were rejected), and is then removed from the "active" queue.
324   */
325 struct QMGR_MESSAGE {
326     int     flags;			/* delivery problems */
327     int     qflags;			/* queuing flags */
328     int     tflags;			/* tracing flags */
329     long    tflags_offset;		/* offset for killing */
330     int     rflags;			/* queue file read flags */
331     VSTREAM *fp;			/* open queue file or null */
332     int     refcount;			/* queue entries */
333     int     single_rcpt;		/* send one rcpt at a time */
334     struct timeval arrival_time;	/* start of receive transaction */
335     time_t  create_time;		/* queue file create time */
336     struct timeval active_time;		/* time of entry into active queue */
337     time_t  queued_time;		/* sanitized time when moved to the
338 					 * active queue */
339     time_t  refill_time;		/* sanitized time of last message
340 					 * refill */
341     long    warn_offset;		/* warning bounce flag offset */
342     time_t  warn_time;			/* time next warning to be sent */
343     long    data_offset;		/* data seek offset */
344     char   *queue_name;			/* queue name */
345     char   *queue_id;			/* queue file */
346     char   *encoding;			/* content encoding */
347     char   *sender;			/* complete address */
348     char   *dsn_envid;			/* DSN envelope ID */
349     int     dsn_ret;			/* DSN headers/full */
350     int     smtputf8;			/* requires unicode */
351     char   *verp_delims;		/* VERP delimiters */
352     char   *filter_xport;		/* filtering transport */
353     char   *inspect_xport;		/* inspecting transport */
354     char   *redirect_addr;		/* info@spammer.tld */
355     long    data_size;			/* data segment size */
356     long    cont_length;		/* message content length */
357     long    rcpt_offset;		/* more recipients here */
358     char   *client_name;		/* client hostname */
359     char   *client_addr;		/* client address */
360     char   *client_port;		/* client port */
361     char   *client_proto;		/* client protocol */
362     char   *client_helo;		/* helo parameter */
363     char   *sasl_method;		/* SASL method */
364     char   *sasl_username;		/* SASL user name */
365     char   *sasl_sender;		/* SASL sender */
366     char   *log_ident;			/* up-stream queue ID */
367     char   *rewrite_context;		/* address qualification */
368     RECIPIENT_LIST rcpt_list;		/* complete addresses */
369     int     rcpt_count;			/* used recipient slots */
370     int     rcpt_limit;			/* maximum read in-core */
371     int     rcpt_unread;		/* # of recipients left in queue file */
372     QMGR_JOB_LIST job_list;		/* jobs delivering this message (1
373 					 * per transport) */
374 };
375 
376  /*
377   * Flags 0-15 are reserved for qmgr_user.h.
378   */
379 #define QMGR_READ_FLAG_SEEN_ALL_NON_RCPT	(1<<16)
380 
381 #define QMGR_MESSAGE_LOCKED	((QMGR_MESSAGE *) 1)
382 
383 extern int qmgr_message_count;
384 extern int qmgr_recipient_count;
385 extern int qmgr_vrfy_pend_count;
386 
387 extern void qmgr_message_free(QMGR_MESSAGE *);
388 extern void qmgr_message_update_warn(QMGR_MESSAGE *);
389 extern void qmgr_message_kill_record(QMGR_MESSAGE *, long);
390 extern QMGR_MESSAGE *qmgr_message_alloc(const char *, const char *, int, mode_t);
391 extern QMGR_MESSAGE *qmgr_message_realloc(QMGR_MESSAGE *);
392 
393 #define QMGR_MSG_STATS(stats, message) \
394     MSG_STATS_INIT2(stats, \
395                     incoming_arrival, message->arrival_time, \
396                     active_arrival, message->active_time)
397 
398  /*
399   * Sometimes it's required to access the transport queues and entries on per
400   * message basis. That's what the QMGR_JOB structure is for - it groups all
401   * per message information within each transport using a list of QMGR_PEER
402   * structures. These structures in turn correspond with per message
403   * QMGR_QUEUE structure and list all per message QMGR_ENTRY structures.
404   */
405 struct QMGR_PEER_LIST {
406     QMGR_PEER *next;
407     QMGR_PEER *prev;
408 };
409 
410 struct QMGR_JOB {
411     QMGR_MESSAGE *message;		/* message delivered by this job */
412     QMGR_TRANSPORT *transport;		/* transport this job belongs to */
413     QMGR_JOB_LIST message_peers;	/* per message neighbor linkage */
414     QMGR_JOB_LIST transport_peers;	/* per transport neighbor linkage */
415     QMGR_JOB_LIST time_peers;		/* by time neighbor linkage */
416     QMGR_JOB *stack_parent;		/* stack parent */
417     QMGR_JOB_LIST stack_children;	/* all stack children */
418     QMGR_JOB_LIST stack_siblings;	/* stack children linkage */
419     int     stack_level;		/* job stack nesting level (-1 means
420 					 * it's not on the lists at all) */
421     int     blocker_tag;		/* tagged if blocks the job list */
422     struct HTABLE *peer_byname;		/* message job peers, indexed by
423 					 * domain */
424     QMGR_PEER_LIST peer_list;		/* list of message job peers */
425     int     slots_used;			/* slots used during preemption */
426     int     slots_available;		/* slots available for preemption (in
427 					 * multiples of slot_cost) */
428     int     selected_entries;		/* # of entries selected for delivery
429 					 * so far */
430     int     read_entries;		/* # of entries read in-core so far */
431     int     rcpt_count;			/* used recipient slots */
432     int     rcpt_limit;			/* available recipient slots */
433 };
434 
435 struct QMGR_PEER {
436     QMGR_JOB *job;			/* job handling this peer */
437     QMGR_QUEUE *queue;			/* queue corresponding with this peer */
438     int     refcount;			/* peer entries */
439     QMGR_ENTRY_LIST entry_list;		/* todo message entries queued for
440 					 * this peer */
441     QMGR_PEER_LIST peers;		/* neighbor linkage */
442 };
443 
444 extern QMGR_ENTRY *qmgr_job_entry_select(QMGR_TRANSPORT *);
445 extern QMGR_PEER *qmgr_peer_select(QMGR_JOB *);
446 extern void qmgr_job_blocker_update(QMGR_QUEUE *);
447 
448 extern QMGR_JOB *qmgr_job_obtain(QMGR_MESSAGE *, QMGR_TRANSPORT *);
449 extern void qmgr_job_free(QMGR_JOB *);
450 extern void qmgr_job_move_limits(QMGR_JOB *);
451 
452 extern QMGR_PEER *qmgr_peer_create(QMGR_JOB *, QMGR_QUEUE *);
453 extern QMGR_PEER *qmgr_peer_find(QMGR_JOB *, QMGR_QUEUE *);
454 extern QMGR_PEER *qmgr_peer_obtain(QMGR_JOB *, QMGR_QUEUE *);
455 extern void qmgr_peer_free(QMGR_PEER *);
456 
457  /*
458   * qmgr_defer.c
459   */
460 extern void qmgr_defer_transport(QMGR_TRANSPORT *, DSN *);
461 extern void qmgr_defer_todo(QMGR_QUEUE *, DSN *);
462 extern void qmgr_defer_recipient(QMGR_MESSAGE *, RECIPIENT *, DSN *);
463 
464  /*
465   * qmgr_bounce.c
466   */
467 extern void qmgr_bounce_recipient(QMGR_MESSAGE *, RECIPIENT *, DSN *);
468 
469  /*
470   * qmgr_deliver.c
471   */
472 extern int qmgr_deliver_concurrency;
473 extern void qmgr_deliver(QMGR_TRANSPORT *, VSTREAM *);
474 
475  /*
476   * qmgr_active.c
477   */
478 extern int qmgr_active_feed(QMGR_SCAN *, const char *);
479 extern void qmgr_active_drain(void);
480 extern void qmgr_active_done(QMGR_MESSAGE *);
481 
482  /*
483   * qmgr_move.c
484   */
485 extern void qmgr_move(const char *, const char *, time_t);
486 
487  /*
488   * qmgr_enable.c
489   */
490 extern void qmgr_enable_all(void);
491 extern void qmgr_enable_transport(QMGR_TRANSPORT *);
492 extern void qmgr_enable_queue(QMGR_QUEUE *);
493 
494  /*
495   * Queue scan context.
496   */
497 struct QMGR_SCAN {
498     char   *queue;			/* queue name */
499     int     flags;			/* private, this run */
500     int     nflags;			/* private, next run */
501     struct SCAN_DIR *handle;		/* scan */
502 };
503 
504  /*
505   * Flags that control queue scans or destination selection. These are
506   * similar to the QMGR_REQ_XXX request codes.
507   */
508 #define QMGR_SCAN_START	(1<<0)		/* start now/restart when done */
509 #define QMGR_SCAN_ALL	(1<<1)		/* all queue file time stamps */
510 #define QMGR_FLUSH_ONCE	(1<<2)		/* unthrottle once */
511 #define QMGR_FLUSH_DFXP	(1<<3)		/* override defer_transports */
512 #define QMGR_FLUSH_EACH	(1<<4)		/* unthrottle per message */
513 #define QMGR_FORCE_EXPIRE (1<<5)	/* force-defer and force-expire */
514 
515  /*
516   * qmgr_scan.c
517   */
518 extern QMGR_SCAN *qmgr_scan_create(const char *);
519 extern void qmgr_scan_request(QMGR_SCAN *, int);
520 extern char *qmgr_scan_next(QMGR_SCAN *);
521 
522  /*
523   * qmgr_error.c
524   */
525 extern QMGR_TRANSPORT *qmgr_error_transport(const char *);
526 extern QMGR_QUEUE *qmgr_error_queue(const char *, DSN *);
527 extern char *qmgr_error_nexthop(DSN *);
528 
529 /* LICENSE
530 /* .ad
531 /* .fi
532 /*	The Secure Mailer license must be distributed with this software.
533 /* AUTHOR(S)
534 /*	Wietse Venema
535 /*	IBM T.J. Watson Research
536 /*	P.O. Box 704
537 /*	Yorktown Heights, NY 10598, USA
538 /*
539 /*	Wietse Venema
540 /*	Google, Inc.
541 /*	111 8th Avenue
542 /*	New York, NY 10011, USA
543 /*
544 /*	Preemptive scheduler enhancements:
545 /*	Patrik Rak
546 /*	Modra 6
547 /*	155 00, Prague, Czech Republic
548 /*--*/
549