1 /* $OpenBSD: queue.c,v 1.121 2012/07/09 09:57:53 gilles Exp $ */ 2 3 /* 4 * Copyright (c) 2008 Gilles Chehade <gilles@openbsd.org> 5 * Copyright (c) 2008 Pierre-Yves Ritschard <pyr@openbsd.org> 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include <sys/types.h> 21 #include <sys/queue.h> 22 #include <sys/tree.h> 23 #include <sys/param.h> 24 #include <sys/socket.h> 25 #include <sys/stat.h> 26 27 #include <err.h> 28 #include <event.h> 29 #include <imsg.h> 30 #include <libgen.h> 31 #include <pwd.h> 32 #include <signal.h> 33 #include <stdio.h> 34 #include <stdlib.h> 35 #include <string.h> 36 #include <unistd.h> 37 38 #include "smtpd.h" 39 #include "log.h" 40 41 static void queue_imsg(struct imsgev *, struct imsg *); 42 static void queue_pass_to_scheduler(struct imsgev *, struct imsg *); 43 static void queue_shutdown(void); 44 static void queue_sig_handler(int, short, void *); 45 46 static void 47 queue_imsg(struct imsgev *iev, struct imsg *imsg) 48 { 49 struct submit_status ss; 50 struct envelope *e; 51 struct mta_batch *mta_batch; 52 int fd, ret; 53 54 log_imsg(PROC_QUEUE, iev->proc, imsg); 55 56 if (iev->proc == PROC_SMTP) { 57 e = imsg->data; 58 59 switch (imsg->hdr.type) { 60 case IMSG_QUEUE_CREATE_MESSAGE: 61 ss.id = e->session_id; 62 ss.code = 250; 63 ss.u.msgid = 0; 64 ret = queue_message_create(&ss.u.msgid); 65 if (ret == 0) 66 ss.code = 421; 67 imsg_compose_event(iev, IMSG_QUEUE_CREATE_MESSAGE, 0, 0, -1, 68 &ss, sizeof ss); 69 return; 70 71 case IMSG_QUEUE_REMOVE_MESSAGE: 72 queue_message_incoming_delete(evpid_to_msgid(e->id)); 73 return; 74 75 case IMSG_QUEUE_COMMIT_MESSAGE: 76 ss.id = e->session_id; 77 ss.code = 250; 78 if (queue_message_commit(evpid_to_msgid(e->id))) 79 stat_increment(e->flags & DF_ENQUEUED ? 80 STATS_QUEUE_LOCAL : STATS_QUEUE_REMOTE); 81 else 82 ss.code = 421; 83 84 imsg_compose_event(iev, IMSG_QUEUE_COMMIT_MESSAGE, 0, 0, -1, 85 &ss, sizeof ss); 86 87 if (ss.code != 421) 88 queue_pass_to_scheduler(iev, imsg); 89 90 return; 91 92 case IMSG_QUEUE_MESSAGE_FILE: 93 ss.id = e->session_id; 94 fd = queue_message_fd_rw(evpid_to_msgid(e->id)); 95 if (fd == -1) 96 ss.code = 421; 97 imsg_compose_event(iev, IMSG_QUEUE_MESSAGE_FILE, 0, 0, fd, 98 &ss, sizeof ss); 99 return; 100 101 case IMSG_SMTP_ENQUEUE: 102 queue_pass_to_scheduler(iev, imsg); 103 return; 104 } 105 } 106 107 if (iev->proc == PROC_LKA) { 108 e = imsg->data; 109 110 switch (imsg->hdr.type) { 111 case IMSG_QUEUE_SUBMIT_ENVELOPE: 112 ss.id = e->session_id; 113 ret = queue_envelope_create(e); 114 if (ret == 0) { 115 ss.code = 421; 116 imsg_compose_event(env->sc_ievs[PROC_SMTP], 117 IMSG_QUEUE_TEMPFAIL, 0, 0, -1, &ss, 118 sizeof ss); 119 } 120 return; 121 122 case IMSG_QUEUE_COMMIT_ENVELOPES: 123 ss.id = e->session_id; 124 ss.code = 250; 125 imsg_compose_event(env->sc_ievs[PROC_SMTP], 126 IMSG_QUEUE_COMMIT_ENVELOPES, 0, 0, -1, &ss, 127 sizeof ss); 128 return; 129 } 130 } 131 132 if (iev->proc == PROC_SCHEDULER) { 133 /* forward imsgs from scheduler on its behalf */ 134 imsg_compose_event(env->sc_ievs[imsg->hdr.peerid], imsg->hdr.type, 135 0, imsg->hdr.pid, imsg->fd, (char *)imsg->data, 136 imsg->hdr.len - sizeof imsg->hdr); 137 return; 138 } 139 140 if (iev->proc == PROC_MTA) { 141 switch (imsg->hdr.type) { 142 case IMSG_QUEUE_MESSAGE_FD: 143 mta_batch = imsg->data; 144 fd = queue_message_fd_r(mta_batch->msgid); 145 imsg_compose_event(iev, IMSG_QUEUE_MESSAGE_FD, 0, 0, 146 fd, mta_batch, sizeof *mta_batch); 147 return; 148 149 case IMSG_QUEUE_DELIVERY_OK: 150 case IMSG_QUEUE_DELIVERY_TEMPFAIL: 151 case IMSG_QUEUE_DELIVERY_PERMFAIL: 152 case IMSG_BATCH_DONE: 153 queue_pass_to_scheduler(iev, imsg); 154 return; 155 } 156 } 157 158 if (iev->proc == PROC_MDA) { 159 switch (imsg->hdr.type) { 160 case IMSG_QUEUE_DELIVERY_OK: 161 case IMSG_QUEUE_DELIVERY_TEMPFAIL: 162 case IMSG_QUEUE_DELIVERY_PERMFAIL: 163 case IMSG_MDA_SESS_NEW: 164 queue_pass_to_scheduler(iev, imsg); 165 return; 166 } 167 } 168 169 if (iev->proc == PROC_CONTROL) { 170 switch (imsg->hdr.type) { 171 case IMSG_QUEUE_PAUSE_MDA: 172 case IMSG_QUEUE_PAUSE_MTA: 173 case IMSG_QUEUE_RESUME_MDA: 174 case IMSG_QUEUE_RESUME_MTA: 175 case IMSG_QUEUE_SCHEDULE: 176 case IMSG_QUEUE_REMOVE: 177 queue_pass_to_scheduler(iev, imsg); 178 return; 179 } 180 } 181 182 if (iev->proc == PROC_PARENT) { 183 switch (imsg->hdr.type) { 184 case IMSG_CTL_VERBOSE: 185 log_verbose(*(int *)imsg->data); 186 queue_pass_to_scheduler(iev, imsg); 187 return; 188 } 189 } 190 191 errx(1, "queue_imsg: unexpected %s imsg", imsg_to_str(imsg->hdr.type)); 192 } 193 194 static void 195 queue_pass_to_scheduler(struct imsgev *iev, struct imsg *imsg) 196 { 197 imsg_compose_event(env->sc_ievs[PROC_SCHEDULER], imsg->hdr.type, 198 iev->proc, imsg->hdr.pid, imsg->fd, imsg->data, 199 imsg->hdr.len - sizeof imsg->hdr); 200 } 201 202 static void 203 queue_sig_handler(int sig, short event, void *p) 204 { 205 switch (sig) { 206 case SIGINT: 207 case SIGTERM: 208 queue_shutdown(); 209 break; 210 default: 211 fatalx("queue_sig_handler: unexpected signal"); 212 } 213 } 214 215 static void 216 queue_shutdown(void) 217 { 218 log_info("queue handler exiting"); 219 _exit(0); 220 } 221 222 pid_t 223 queue(void) 224 { 225 pid_t pid; 226 struct passwd *pw; 227 228 struct event ev_sigint; 229 struct event ev_sigterm; 230 231 struct peer peers[] = { 232 { PROC_PARENT, imsg_dispatch }, 233 { PROC_CONTROL, imsg_dispatch }, 234 { PROC_SMTP, imsg_dispatch }, 235 { PROC_MDA, imsg_dispatch }, 236 { PROC_MTA, imsg_dispatch }, 237 { PROC_LKA, imsg_dispatch }, 238 { PROC_SCHEDULER, imsg_dispatch } 239 }; 240 241 switch (pid = fork()) { 242 case -1: 243 fatal("queue: cannot fork"); 244 case 0: 245 break; 246 default: 247 return (pid); 248 } 249 250 purge_config(PURGE_EVERYTHING); 251 252 pw = env->sc_pw; 253 254 if (chroot(PATH_SPOOL) == -1) 255 fatal("queue: chroot"); 256 if (chdir("/") == -1) 257 fatal("queue: chdir(\"/\")"); 258 259 smtpd_process = PROC_QUEUE; 260 setproctitle("%s", env->sc_title[smtpd_process]); 261 262 if (setgroups(1, &pw->pw_gid) || 263 setresgid(pw->pw_gid, pw->pw_gid, pw->pw_gid) || 264 setresuid(pw->pw_uid, pw->pw_uid, pw->pw_uid)) 265 fatal("queue: cannot drop privileges"); 266 267 imsg_callback = queue_imsg; 268 event_init(); 269 270 signal_set(&ev_sigint, SIGINT, queue_sig_handler, NULL); 271 signal_set(&ev_sigterm, SIGTERM, queue_sig_handler, NULL); 272 signal_add(&ev_sigint, NULL); 273 signal_add(&ev_sigterm, NULL); 274 signal(SIGPIPE, SIG_IGN); 275 signal(SIGHUP, SIG_IGN); 276 277 /* 278 * queue opens fds for four purposes: smtp, mta, mda, and bounces. 279 * Therefore, use all available fd space and set the maxconn (=max 280 * session count for mta and mda) to a quarter of this value. 281 */ 282 fdlimit(1.0); 283 if ((env->sc_maxconn = availdesc() / 4) < 1) 284 fatalx("queue: fd starvation"); 285 286 config_pipes(peers, nitems(peers)); 287 config_peers(peers, nitems(peers)); 288 289 if (event_dispatch() < 0) 290 fatal("event_dispatch"); 291 queue_shutdown(); 292 293 return (0); 294 } 295 296 void 297 queue_submit_envelope(struct envelope *ep) 298 { 299 imsg_compose_event(env->sc_ievs[PROC_QUEUE], 300 IMSG_QUEUE_SUBMIT_ENVELOPE, 0, 0, -1, 301 ep, sizeof(*ep)); 302 } 303 304 void 305 queue_commit_envelopes(struct envelope *ep) 306 { 307 imsg_compose_event(env->sc_ievs[PROC_QUEUE], 308 IMSG_QUEUE_COMMIT_ENVELOPES, 0, 0, -1, 309 ep, sizeof(*ep)); 310 } 311