10Sstevel@tonic-gate /* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 52565Sudpa * Common Development and Distribution License (the "License"). 62565Sudpa * You may not use this file except in compliance with the License. 70Sstevel@tonic-gate * 80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 100Sstevel@tonic-gate * See the License for the specific language governing permissions 110Sstevel@tonic-gate * and limitations under the License. 120Sstevel@tonic-gate * 130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 180Sstevel@tonic-gate * 190Sstevel@tonic-gate * CDDL HEADER END 200Sstevel@tonic-gate */ 210Sstevel@tonic-gate /* 22*4153Sdv142724 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 230Sstevel@tonic-gate * Use is subject to license terms. 240Sstevel@tonic-gate */ 250Sstevel@tonic-gate 260Sstevel@tonic-gate /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 270Sstevel@tonic-gate /* All Rights Reserved */ 280Sstevel@tonic-gate 290Sstevel@tonic-gate 300Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 310Sstevel@tonic-gate 320Sstevel@tonic-gate /* 330Sstevel@tonic-gate * Inter-Process Communication Message Facility. 340Sstevel@tonic-gate * 350Sstevel@tonic-gate * See os/ipc.c for a description of common IPC functionality. 360Sstevel@tonic-gate * 370Sstevel@tonic-gate * Resource controls 380Sstevel@tonic-gate * ----------------- 390Sstevel@tonic-gate * 402677Sml93401 * Control: zone.max-msg-ids (rc_zone_msgmni) 412677Sml93401 * Description: Maximum number of message queue ids allowed a zone. 422677Sml93401 * 432677Sml93401 * When msgget() is used to allocate a message queue, one id is 442677Sml93401 * allocated. If the id allocation doesn't succeed, msgget() fails 452677Sml93401 * and errno is set to ENOSPC. Upon successful msgctl(, IPC_RMID) 462677Sml93401 * the id is deallocated. 472677Sml93401 * 480Sstevel@tonic-gate * Control: project.max-msg-ids (rc_project_msgmni) 490Sstevel@tonic-gate * Description: Maximum number of message queue ids allowed a project. 500Sstevel@tonic-gate * 510Sstevel@tonic-gate * When msgget() is used to allocate a message queue, one id is 520Sstevel@tonic-gate * allocated. If the id allocation doesn't succeed, msgget() fails 530Sstevel@tonic-gate * and errno is set to ENOSPC. Upon successful msgctl(, IPC_RMID) 540Sstevel@tonic-gate * the id is deallocated. 550Sstevel@tonic-gate * 560Sstevel@tonic-gate * Control: process.max-msg-qbytes (rc_process_msgmnb) 570Sstevel@tonic-gate * Description: Maximum number of bytes of messages on a message queue. 580Sstevel@tonic-gate * 590Sstevel@tonic-gate * When msgget() successfully allocates a message queue, the minimum 600Sstevel@tonic-gate * enforced value of this limit is used to initialize msg_qbytes. 610Sstevel@tonic-gate * 620Sstevel@tonic-gate * Control: process.max-msg-messages (rc_process_msgtql) 630Sstevel@tonic-gate * Description: Maximum number of messages on a message queue. 640Sstevel@tonic-gate * 650Sstevel@tonic-gate * When msgget() successfully allocates a message queue, the minimum 660Sstevel@tonic-gate * enforced value of this limit is used to initialize a per-queue 670Sstevel@tonic-gate * limit on the number of messages. 680Sstevel@tonic-gate */ 690Sstevel@tonic-gate 700Sstevel@tonic-gate #include <sys/types.h> 710Sstevel@tonic-gate #include <sys/t_lock.h> 720Sstevel@tonic-gate #include <sys/param.h> 730Sstevel@tonic-gate #include <sys/cred.h> 740Sstevel@tonic-gate #include <sys/user.h> 750Sstevel@tonic-gate #include <sys/proc.h> 760Sstevel@tonic-gate #include <sys/time.h> 770Sstevel@tonic-gate #include <sys/ipc.h> 780Sstevel@tonic-gate #include <sys/ipc_impl.h> 790Sstevel@tonic-gate #include <sys/msg.h> 800Sstevel@tonic-gate #include <sys/msg_impl.h> 810Sstevel@tonic-gate #include <sys/list.h> 820Sstevel@tonic-gate #include <sys/systm.h> 830Sstevel@tonic-gate #include <sys/sysmacros.h> 840Sstevel@tonic-gate #include <sys/cpuvar.h> 850Sstevel@tonic-gate #include <sys/kmem.h> 860Sstevel@tonic-gate #include <sys/ddi.h> 870Sstevel@tonic-gate #include <sys/errno.h> 880Sstevel@tonic-gate #include <sys/cmn_err.h> 890Sstevel@tonic-gate #include <sys/debug.h> 900Sstevel@tonic-gate #include <sys/project.h> 910Sstevel@tonic-gate #include <sys/modctl.h> 920Sstevel@tonic-gate #include <sys/syscall.h> 930Sstevel@tonic-gate #include <sys/policy.h> 940Sstevel@tonic-gate #include <sys/zone.h> 950Sstevel@tonic-gate 960Sstevel@tonic-gate #include <c2/audit.h> 970Sstevel@tonic-gate 980Sstevel@tonic-gate /* 990Sstevel@tonic-gate * The following tunables are obsolete. Though for compatibility we 1000Sstevel@tonic-gate * still read and interpret msginfo_msgmnb, msginfo_msgmni, and 1010Sstevel@tonic-gate * msginfo_msgtql (see os/project.c and os/rctl_proc.c), the preferred 1020Sstevel@tonic-gate * mechanism for administrating the IPC Message facility is through the 1030Sstevel@tonic-gate * resource controls described at the top of this file. 1040Sstevel@tonic-gate */ 1050Sstevel@tonic-gate size_t msginfo_msgmax = 2048; /* (obsolete) */ 1060Sstevel@tonic-gate size_t msginfo_msgmnb = 4096; /* (obsolete) */ 1070Sstevel@tonic-gate int msginfo_msgmni = 50; /* (obsolete) */ 1080Sstevel@tonic-gate int msginfo_msgtql = 40; /* (obsolete) */ 1090Sstevel@tonic-gate int msginfo_msgssz = 8; /* (obsolete) */ 1100Sstevel@tonic-gate int msginfo_msgmap = 0; /* (obsolete) */ 1110Sstevel@tonic-gate ushort_t msginfo_msgseg = 1024; /* (obsolete) */ 1120Sstevel@tonic-gate 1132677Sml93401 extern rctl_hndl_t rc_zone_msgmni; 1140Sstevel@tonic-gate extern rctl_hndl_t rc_project_msgmni; 1150Sstevel@tonic-gate extern rctl_hndl_t rc_process_msgmnb; 1160Sstevel@tonic-gate extern rctl_hndl_t rc_process_msgtql; 1170Sstevel@tonic-gate static ipc_service_t *msq_svc; 1180Sstevel@tonic-gate static zone_key_t msg_zone_key; 1190Sstevel@tonic-gate 1200Sstevel@tonic-gate static void msg_dtor(kipc_perm_t *); 1210Sstevel@tonic-gate static void msg_rmid(kipc_perm_t *); 1220Sstevel@tonic-gate static void msg_remove_zone(zoneid_t, void *); 1230Sstevel@tonic-gate 1240Sstevel@tonic-gate /* 1250Sstevel@tonic-gate * Module linkage information for the kernel. 1260Sstevel@tonic-gate */ 1270Sstevel@tonic-gate static ssize_t msgsys(int opcode, uintptr_t a0, uintptr_t a1, uintptr_t a2, 1280Sstevel@tonic-gate uintptr_t a4, uintptr_t a5); 1290Sstevel@tonic-gate 1300Sstevel@tonic-gate static struct sysent ipcmsg_sysent = { 1310Sstevel@tonic-gate 6, 1320Sstevel@tonic-gate #ifdef _LP64 1330Sstevel@tonic-gate SE_ARGC | SE_NOUNLOAD | SE_64RVAL, 1340Sstevel@tonic-gate #else 1350Sstevel@tonic-gate SE_ARGC | SE_NOUNLOAD | SE_32RVAL1, 1360Sstevel@tonic-gate #endif 1370Sstevel@tonic-gate (int (*)())msgsys 1380Sstevel@tonic-gate }; 1390Sstevel@tonic-gate 1400Sstevel@tonic-gate #ifdef _SYSCALL32_IMPL 1410Sstevel@tonic-gate static ssize32_t msgsys32(int opcode, uint32_t a0, uint32_t a1, uint32_t a2, 1420Sstevel@tonic-gate uint32_t a4, uint32_t a5); 1430Sstevel@tonic-gate 1440Sstevel@tonic-gate static struct sysent ipcmsg_sysent32 = { 1450Sstevel@tonic-gate 6, 1460Sstevel@tonic-gate SE_ARGC | SE_NOUNLOAD | SE_32RVAL1, 1470Sstevel@tonic-gate (int (*)())msgsys32 1480Sstevel@tonic-gate }; 1490Sstevel@tonic-gate #endif /* _SYSCALL32_IMPL */ 1500Sstevel@tonic-gate 1510Sstevel@tonic-gate static struct modlsys modlsys = { 1520Sstevel@tonic-gate &mod_syscallops, "System V message facility", &ipcmsg_sysent 1530Sstevel@tonic-gate }; 1540Sstevel@tonic-gate 1550Sstevel@tonic-gate #ifdef _SYSCALL32_IMPL 1560Sstevel@tonic-gate static struct modlsys modlsys32 = { 1570Sstevel@tonic-gate &mod_syscallops32, "32-bit System V message facility", &ipcmsg_sysent32 1580Sstevel@tonic-gate }; 1590Sstevel@tonic-gate #endif 1600Sstevel@tonic-gate 161*4153Sdv142724 /* 162*4153Sdv142724 * Big Theory statement for message queue correctness 163*4153Sdv142724 * 164*4153Sdv142724 * The msgrcv and msgsnd functions no longer uses cv_broadcast to wake up 165*4153Sdv142724 * receivers who are waiting for an event. Using the cv_broadcast method 166*4153Sdv142724 * resulted in negative scaling when the number of waiting receivers are large 167*4153Sdv142724 * (the thundering herd problem). Instead, the receivers waiting to receive a 168*4153Sdv142724 * message are now linked in a queue-like fashion and awaken one at a time in 169*4153Sdv142724 * a controlled manner. 170*4153Sdv142724 * 171*4153Sdv142724 * Receivers can block on two different classes of waiting list: 172*4153Sdv142724 * 1) "sendwait" list, which is the more complex list of the two. The 173*4153Sdv142724 * receiver will be awakened by a sender posting a new message. There 174*4153Sdv142724 * are two types of "sendwait" list used: 175*4153Sdv142724 * a) msg_wait_snd: handles all receivers who are looking for 176*4153Sdv142724 * a message type >= 0, but was unable to locate a match. 177*4153Sdv142724 * 178*4153Sdv142724 * slot 0: reserved for receivers that have designated they 179*4153Sdv142724 * will take any message type. 180*4153Sdv142724 * rest: consist of receivers requesting a specific type 181*4153Sdv142724 * but the type was not present. The entries are 182*4153Sdv142724 * hashed into a bucket in an attempt to keep 183*4153Sdv142724 * any list search relatively short. 184*4153Sdv142724 * b) msg_wait_snd_ngt: handles all receivers that have designated 185*4153Sdv142724 * a negative message type. Unlike msg_wait_snd, the hash bucket 186*4153Sdv142724 * serves a range of negative message types (-1 to -5, -6 to -10 187*4153Sdv142724 * and so forth), where the last bucket is reserved for all the 188*4153Sdv142724 * negative message types that hash outside of MSG_MAX_QNUM - 1. 189*4153Sdv142724 * This is done this way to simplify the operation of locating a 190*4153Sdv142724 * negative message type. 191*4153Sdv142724 * 192*4153Sdv142724 * 2) "copyout" list, where the receiver is awakened by another 193*4153Sdv142724 * receiver after a message is copied out. This is a linked list 194*4153Sdv142724 * of waiters that are awakened one at a time. Although the solution is 195*4153Sdv142724 * not optimal, the complexity that would be added in for waking 196*4153Sdv142724 * up the right entry far exceeds any potential pay back (too many 197*4153Sdv142724 * correctness and corner case issues). 198*4153Sdv142724 * 199*4153Sdv142724 * The lists are doubly linked. In the case of the "sendwait" 200*4153Sdv142724 * list, this allows the thread to remove itself from the list without having 201*4153Sdv142724 * to traverse the list. In the case of the "copyout" list it simply allows 202*4153Sdv142724 * us to use common functions with the "sendwait" list. 203*4153Sdv142724 * 204*4153Sdv142724 * To make sure receivers are not hung out to dry, we must guarantee: 205*4153Sdv142724 * 1. If any queued message matches any receiver, then at least one 206*4153Sdv142724 * matching receiver must be processing the request. 207*4153Sdv142724 * 2. Blocking on the copyout queue is only temporary while messages 208*4153Sdv142724 * are being copied out. The process is guaranted to wakeup 209*4153Sdv142724 * when it gets to front of the queue (copyout is a FIFO). 210*4153Sdv142724 * 211*4153Sdv142724 * Rules for blocking and waking up: 212*4153Sdv142724 * 1. A receiver entering msgrcv must examine all messages for a match 213*4153Sdv142724 * before blocking on a sendwait queue. 214*4153Sdv142724 * 2. If the receiver blocks because the message it chose is already 215*4153Sdv142724 * being copied out, then when it wakes up needs to start start 216*4153Sdv142724 * checking the messages from the beginning. 217*4153Sdv142724 * 3) When ever a process returns from msgrcv for any reason, if it 218*4153Sdv142724 * had attempted to copy a message or blocked waiting for a copy 219*4153Sdv142724 * to complete it needs to wakeup the next receiver blocked on 220*4153Sdv142724 * a copy out. 221*4153Sdv142724 * 4) When a message is sent, the sender selects a process waiting 222*4153Sdv142724 * for that type of message. This selection process rotates between 223*4153Sdv142724 * receivers types of 0, negative and positive to prevent starvation of 224*4153Sdv142724 * any one particular receiver type. 225*4153Sdv142724 * 5) The following are the scenarios for processes that are awakened 226*4153Sdv142724 * by a msgsnd: 227*4153Sdv142724 * a) The process finds the message and is able to copy 228*4153Sdv142724 * it out. Once complete, the process returns. 229*4153Sdv142724 * b) The message that was sent that triggered the wakeup is no 230*4153Sdv142724 * longer available (another process found the message first). 231*4153Sdv142724 * We issue a wakeup on copy queue and then go back to 232*4153Sdv142724 * sleep waiting for another matching message to be sent. 233*4153Sdv142724 * c) The message that was supposed to be processed was 234*4153Sdv142724 * already serviced by another process. However a different 235*4153Sdv142724 * message is present which we can service. The message 236*4153Sdv142724 * is copied and the process returns. 237*4153Sdv142724 * d) The message is found, but some sort of error occurs that 238*4153Sdv142724 * prevents the message from being copied. The receiver 239*4153Sdv142724 * wakes up the next sender that can service this message 240*4153Sdv142724 * type and returns an error to the caller. 241*4153Sdv142724 * e) The message is found, but it is marked as being copied 242*4153Sdv142724 * out. The receiver then goes to sleep on the copyout 243*4153Sdv142724 * queue where it will be awakened again sometime in the future. 244*4153Sdv142724 * 245*4153Sdv142724 * 246*4153Sdv142724 * 6) Whenever a message is found that matches the message type designated, 247*4153Sdv142724 * but is being copied out we have to block on the copyout queue. 248*4153Sdv142724 * After process copying finishes the copy out, it must wakeup (either 249*4153Sdv142724 * directly or indirectly) all receivers who blocked on its copyout, 250*4153Sdv142724 * so they are guaranteed a chance to examine the remaining messages. 251*4153Sdv142724 * This is implemented via a chain of wakeups: Y wakes X, who wakes Z, 252*4153Sdv142724 * and so on. The chain cannot be broken. This leads to the following 253*4153Sdv142724 * cases: 254*4153Sdv142724 * a) A receiver is finished copying the message (or encountered) 255*4153Sdv142724 * an error), the first entry on the copyout queue is woken 256*4153Sdv142724 * up. 257*4153Sdv142724 * b) When the receiver is woken up, it attempts to locate 258*4153Sdv142724 * a message type match. 259*4153Sdv142724 * c) If a message type is found and 260*4153Sdv142724 * -- MSG_RCVCOPY flag is not set, the message is 261*4153Sdv142724 * marked for copying out. Regardless of the copyout 262*4153Sdv142724 * success the next entry on the copyout queue is 263*4153Sdv142724 * awakened and the operation is completed. 264*4153Sdv142724 * -- MSG_RCVCOPY is set, we simply go back to sleep again 265*4153Sdv142724 * on the copyout queue. 266*4153Sdv142724 * d) If the message type is not found then we wakeup the next 267*4153Sdv142724 * process on the copyout queue. 268*4153Sdv142724 */ 269*4153Sdv142724 270*4153Sdv142724 static ulong_t msg_type_hash(long); 271*4153Sdv142724 static int msgq_check_err(kmsqid_t *qp, int cvres); 272*4153Sdv142724 static int msg_rcvq_sleep(list_t *, msgq_wakeup_t *, kmutex_t **, 273*4153Sdv142724 kmsqid_t *); 274*4153Sdv142724 static int msg_copyout(kmsqid_t *, long, kmutex_t **, size_t *, size_t, 275*4153Sdv142724 struct msg *, struct ipcmsgbuf *, int); 276*4153Sdv142724 static void msg_rcvq_wakeup_all(list_t *); 277*4153Sdv142724 static void msg_wakeup_rdr(kmsqid_t *, msg_select_t **, long); 278*4153Sdv142724 static msgq_wakeup_t *msg_fnd_any_snd(kmsqid_t *, int, long); 279*4153Sdv142724 static msgq_wakeup_t *msg_fnd_any_rdr(kmsqid_t *, int, long); 280*4153Sdv142724 static msgq_wakeup_t *msg_fnd_neg_snd(kmsqid_t *, int, long); 281*4153Sdv142724 static msgq_wakeup_t *msg_fnd_spc_snd(kmsqid_t *, int, long); 282*4153Sdv142724 static struct msg *msgrcv_lookup(kmsqid_t *, long); 283*4153Sdv142724 284*4153Sdv142724 msg_select_t msg_fnd_sndr[] = { 285*4153Sdv142724 { msg_fnd_any_snd, &msg_fnd_sndr[1] }, 286*4153Sdv142724 { msg_fnd_spc_snd, &msg_fnd_sndr[2] }, 287*4153Sdv142724 { msg_fnd_neg_snd, &msg_fnd_sndr[0] } 288*4153Sdv142724 }; 289*4153Sdv142724 290*4153Sdv142724 msg_select_t msg_fnd_rdr[1] = { 291*4153Sdv142724 { msg_fnd_any_rdr, &msg_fnd_rdr[0] }, 292*4153Sdv142724 }; 293*4153Sdv142724 2940Sstevel@tonic-gate static struct modlinkage modlinkage = { 2950Sstevel@tonic-gate MODREV_1, 2960Sstevel@tonic-gate &modlsys, 2970Sstevel@tonic-gate #ifdef _SYSCALL32_IMPL 2980Sstevel@tonic-gate &modlsys32, 2990Sstevel@tonic-gate #endif 3000Sstevel@tonic-gate NULL 3010Sstevel@tonic-gate }; 3020Sstevel@tonic-gate 3030Sstevel@tonic-gate 3040Sstevel@tonic-gate int 3050Sstevel@tonic-gate _init(void) 3060Sstevel@tonic-gate { 3070Sstevel@tonic-gate int result; 3080Sstevel@tonic-gate 3092677Sml93401 msq_svc = ipcs_create("msqids", rc_project_msgmni, rc_zone_msgmni, 3102677Sml93401 sizeof (kmsqid_t), msg_dtor, msg_rmid, AT_IPC_MSG, 3112677Sml93401 offsetof(ipc_rqty_t, ipcq_msgmni)); 3120Sstevel@tonic-gate zone_key_create(&msg_zone_key, NULL, msg_remove_zone, NULL); 3130Sstevel@tonic-gate 3140Sstevel@tonic-gate if ((result = mod_install(&modlinkage)) == 0) 3150Sstevel@tonic-gate return (0); 3160Sstevel@tonic-gate 3170Sstevel@tonic-gate (void) zone_key_delete(msg_zone_key); 3180Sstevel@tonic-gate ipcs_destroy(msq_svc); 3190Sstevel@tonic-gate 3200Sstevel@tonic-gate return (result); 3210Sstevel@tonic-gate } 3220Sstevel@tonic-gate 3230Sstevel@tonic-gate int 3240Sstevel@tonic-gate _fini(void) 3250Sstevel@tonic-gate { 3260Sstevel@tonic-gate return (EBUSY); 3270Sstevel@tonic-gate } 3280Sstevel@tonic-gate 3290Sstevel@tonic-gate int 3300Sstevel@tonic-gate _info(struct modinfo *modinfop) 3310Sstevel@tonic-gate { 3320Sstevel@tonic-gate return (mod_info(&modlinkage, modinfop)); 3330Sstevel@tonic-gate } 3340Sstevel@tonic-gate 3350Sstevel@tonic-gate static void 3360Sstevel@tonic-gate msg_dtor(kipc_perm_t *perm) 3370Sstevel@tonic-gate { 3380Sstevel@tonic-gate kmsqid_t *qp = (kmsqid_t *)perm; 3392565Sudpa int ii; 3400Sstevel@tonic-gate 341*4153Sdv142724 for (ii = 0; ii <= MSG_MAX_QNUM; ii++) { 342*4153Sdv142724 ASSERT(list_is_empty(&qp->msg_wait_snd[ii])); 343*4153Sdv142724 ASSERT(list_is_empty(&qp->msg_wait_snd_ngt[ii])); 344*4153Sdv142724 list_destroy(&qp->msg_wait_snd[ii]); 345*4153Sdv142724 list_destroy(&qp->msg_wait_snd_ngt[ii]); 346*4153Sdv142724 } 347*4153Sdv142724 ASSERT(list_is_empty(&qp->msg_cpy_block)); 348*4153Sdv142724 list_destroy(&qp->msg_cpy_block); 3490Sstevel@tonic-gate ASSERT(qp->msg_snd_cnt == 0); 3500Sstevel@tonic-gate ASSERT(qp->msg_cbytes == 0); 3510Sstevel@tonic-gate list_destroy(&qp->msg_list); 3520Sstevel@tonic-gate } 3530Sstevel@tonic-gate 3540Sstevel@tonic-gate 3550Sstevel@tonic-gate #define msg_hold(mp) (mp)->msg_copycnt++ 3560Sstevel@tonic-gate 3570Sstevel@tonic-gate /* 3580Sstevel@tonic-gate * msg_rele - decrement the reference count on the message. When count 3590Sstevel@tonic-gate * reaches zero, free message header and contents. 3600Sstevel@tonic-gate */ 3610Sstevel@tonic-gate static void 3620Sstevel@tonic-gate msg_rele(struct msg *mp) 3630Sstevel@tonic-gate { 3640Sstevel@tonic-gate ASSERT(mp->msg_copycnt > 0); 3650Sstevel@tonic-gate if (mp->msg_copycnt-- == 1) { 3660Sstevel@tonic-gate if (mp->msg_addr) 3670Sstevel@tonic-gate kmem_free(mp->msg_addr, mp->msg_size); 3680Sstevel@tonic-gate kmem_free(mp, sizeof (struct msg)); 3690Sstevel@tonic-gate } 3700Sstevel@tonic-gate } 3710Sstevel@tonic-gate 3720Sstevel@tonic-gate /* 3730Sstevel@tonic-gate * msgunlink - Unlink msg from queue, decrement byte count and wake up anyone 3740Sstevel@tonic-gate * waiting for free bytes on queue. 3750Sstevel@tonic-gate * 3760Sstevel@tonic-gate * Called with queue locked. 3770Sstevel@tonic-gate */ 3780Sstevel@tonic-gate static void 3790Sstevel@tonic-gate msgunlink(kmsqid_t *qp, struct msg *mp) 3800Sstevel@tonic-gate { 3810Sstevel@tonic-gate list_remove(&qp->msg_list, mp); 3820Sstevel@tonic-gate qp->msg_qnum--; 3830Sstevel@tonic-gate qp->msg_cbytes -= mp->msg_size; 3840Sstevel@tonic-gate msg_rele(mp); 3850Sstevel@tonic-gate 3860Sstevel@tonic-gate /* Wake up waiting writers */ 3870Sstevel@tonic-gate if (qp->msg_snd_cnt) 3880Sstevel@tonic-gate cv_broadcast(&qp->msg_snd_cv); 3890Sstevel@tonic-gate } 3900Sstevel@tonic-gate 3910Sstevel@tonic-gate static void 3920Sstevel@tonic-gate msg_rmid(kipc_perm_t *perm) 3930Sstevel@tonic-gate { 3940Sstevel@tonic-gate kmsqid_t *qp = (kmsqid_t *)perm; 3950Sstevel@tonic-gate struct msg *mp; 3962565Sudpa int ii; 3970Sstevel@tonic-gate 3980Sstevel@tonic-gate 3990Sstevel@tonic-gate while ((mp = list_head(&qp->msg_list)) != NULL) 4000Sstevel@tonic-gate msgunlink(qp, mp); 4010Sstevel@tonic-gate ASSERT(qp->msg_cbytes == 0); 4020Sstevel@tonic-gate 403*4153Sdv142724 /* 404*4153Sdv142724 * Wake up everyone who is in a wait state of some sort 405*4153Sdv142724 * for this message queue. 406*4153Sdv142724 */ 407*4153Sdv142724 for (ii = 0; ii <= MSG_MAX_QNUM; ii++) { 408*4153Sdv142724 msg_rcvq_wakeup_all(&qp->msg_wait_snd[ii]); 409*4153Sdv142724 msg_rcvq_wakeup_all(&qp->msg_wait_snd_ngt[ii]); 4102565Sudpa } 411*4153Sdv142724 msg_rcvq_wakeup_all(&qp->msg_cpy_block); 4120Sstevel@tonic-gate if (qp->msg_snd_cnt) 4130Sstevel@tonic-gate cv_broadcast(&qp->msg_snd_cv); 4140Sstevel@tonic-gate } 4150Sstevel@tonic-gate 4160Sstevel@tonic-gate /* 4170Sstevel@tonic-gate * msgctl system call. 4180Sstevel@tonic-gate * 4190Sstevel@tonic-gate * gets q lock (via ipc_lookup), releases before return. 4200Sstevel@tonic-gate * may call users of msg_lock 4210Sstevel@tonic-gate */ 4220Sstevel@tonic-gate static int 4230Sstevel@tonic-gate msgctl(int msgid, int cmd, void *arg) 4240Sstevel@tonic-gate { 4250Sstevel@tonic-gate STRUCT_DECL(msqid_ds, ds); /* SVR4 queue work area */ 4260Sstevel@tonic-gate kmsqid_t *qp; /* ptr to associated q */ 427*4153Sdv142724 int error; 4280Sstevel@tonic-gate struct cred *cr; 4290Sstevel@tonic-gate model_t mdl = get_udatamodel(); 4300Sstevel@tonic-gate struct msqid_ds64 ds64; 4310Sstevel@tonic-gate kmutex_t *lock; 4320Sstevel@tonic-gate proc_t *pp = curproc; 4330Sstevel@tonic-gate 4340Sstevel@tonic-gate STRUCT_INIT(ds, mdl); 4350Sstevel@tonic-gate cr = CRED(); 4360Sstevel@tonic-gate 4370Sstevel@tonic-gate /* 4380Sstevel@tonic-gate * Perform pre- or non-lookup actions (e.g. copyins, RMID). 4390Sstevel@tonic-gate */ 4400Sstevel@tonic-gate switch (cmd) { 4410Sstevel@tonic-gate case IPC_SET: 4420Sstevel@tonic-gate if (copyin(arg, STRUCT_BUF(ds), STRUCT_SIZE(ds))) 4430Sstevel@tonic-gate return (set_errno(EFAULT)); 4440Sstevel@tonic-gate break; 4450Sstevel@tonic-gate 4460Sstevel@tonic-gate case IPC_SET64: 4470Sstevel@tonic-gate if (copyin(arg, &ds64, sizeof (struct msqid_ds64))) 4480Sstevel@tonic-gate return (set_errno(EFAULT)); 4490Sstevel@tonic-gate break; 4500Sstevel@tonic-gate 4510Sstevel@tonic-gate case IPC_RMID: 4520Sstevel@tonic-gate if (error = ipc_rmid(msq_svc, msgid, cr)) 4530Sstevel@tonic-gate return (set_errno(error)); 4540Sstevel@tonic-gate return (0); 4550Sstevel@tonic-gate } 4560Sstevel@tonic-gate 4570Sstevel@tonic-gate /* 4580Sstevel@tonic-gate * get msqid_ds for this msgid 4590Sstevel@tonic-gate */ 4600Sstevel@tonic-gate if ((lock = ipc_lookup(msq_svc, msgid, (kipc_perm_t **)&qp)) == NULL) 4610Sstevel@tonic-gate return (set_errno(EINVAL)); 4620Sstevel@tonic-gate 4630Sstevel@tonic-gate switch (cmd) { 4640Sstevel@tonic-gate case IPC_SET: 4650Sstevel@tonic-gate if (STRUCT_FGET(ds, msg_qbytes) > qp->msg_qbytes && 4660Sstevel@tonic-gate secpolicy_ipc_config(cr) != 0) { 4670Sstevel@tonic-gate mutex_exit(lock); 4680Sstevel@tonic-gate return (set_errno(EPERM)); 4690Sstevel@tonic-gate } 4700Sstevel@tonic-gate if (error = ipcperm_set(msq_svc, cr, &qp->msg_perm, 4710Sstevel@tonic-gate &STRUCT_BUF(ds)->msg_perm, mdl)) { 4720Sstevel@tonic-gate mutex_exit(lock); 4730Sstevel@tonic-gate return (set_errno(error)); 4740Sstevel@tonic-gate } 4750Sstevel@tonic-gate qp->msg_qbytes = STRUCT_FGET(ds, msg_qbytes); 4760Sstevel@tonic-gate qp->msg_ctime = gethrestime_sec(); 4770Sstevel@tonic-gate break; 4780Sstevel@tonic-gate 4790Sstevel@tonic-gate case IPC_STAT: 4800Sstevel@tonic-gate if (error = ipcperm_access(&qp->msg_perm, MSG_R, cr)) { 4810Sstevel@tonic-gate mutex_exit(lock); 4820Sstevel@tonic-gate return (set_errno(error)); 4830Sstevel@tonic-gate } 4840Sstevel@tonic-gate 485*4153Sdv142724 if (qp->msg_rcv_cnt) 486*4153Sdv142724 qp->msg_perm.ipc_mode |= MSG_RWAIT; 4870Sstevel@tonic-gate if (qp->msg_snd_cnt) 4880Sstevel@tonic-gate qp->msg_perm.ipc_mode |= MSG_WWAIT; 4890Sstevel@tonic-gate ipcperm_stat(&STRUCT_BUF(ds)->msg_perm, &qp->msg_perm, mdl); 4900Sstevel@tonic-gate qp->msg_perm.ipc_mode &= ~(MSG_RWAIT|MSG_WWAIT); 4910Sstevel@tonic-gate STRUCT_FSETP(ds, msg_first, NULL); /* kernel addr */ 4920Sstevel@tonic-gate STRUCT_FSETP(ds, msg_last, NULL); 4930Sstevel@tonic-gate STRUCT_FSET(ds, msg_cbytes, qp->msg_cbytes); 4940Sstevel@tonic-gate STRUCT_FSET(ds, msg_qnum, qp->msg_qnum); 4950Sstevel@tonic-gate STRUCT_FSET(ds, msg_qbytes, qp->msg_qbytes); 4960Sstevel@tonic-gate STRUCT_FSET(ds, msg_lspid, qp->msg_lspid); 4970Sstevel@tonic-gate STRUCT_FSET(ds, msg_lrpid, qp->msg_lrpid); 4980Sstevel@tonic-gate STRUCT_FSET(ds, msg_stime, qp->msg_stime); 4990Sstevel@tonic-gate STRUCT_FSET(ds, msg_rtime, qp->msg_rtime); 5000Sstevel@tonic-gate STRUCT_FSET(ds, msg_ctime, qp->msg_ctime); 5010Sstevel@tonic-gate break; 5020Sstevel@tonic-gate 5030Sstevel@tonic-gate case IPC_SET64: 5040Sstevel@tonic-gate mutex_enter(&pp->p_lock); 5050Sstevel@tonic-gate if ((ds64.msgx_qbytes > qp->msg_qbytes) && 5060Sstevel@tonic-gate secpolicy_ipc_config(cr) != 0 && 5070Sstevel@tonic-gate rctl_test(rc_process_msgmnb, pp->p_rctls, pp, 5080Sstevel@tonic-gate ds64.msgx_qbytes, RCA_SAFE) & RCT_DENY) { 5090Sstevel@tonic-gate mutex_exit(&pp->p_lock); 5100Sstevel@tonic-gate mutex_exit(lock); 5110Sstevel@tonic-gate return (set_errno(EPERM)); 5120Sstevel@tonic-gate } 5130Sstevel@tonic-gate mutex_exit(&pp->p_lock); 5140Sstevel@tonic-gate if (error = ipcperm_set64(msq_svc, cr, &qp->msg_perm, 5150Sstevel@tonic-gate &ds64.msgx_perm)) { 5160Sstevel@tonic-gate mutex_exit(lock); 5170Sstevel@tonic-gate return (set_errno(error)); 5180Sstevel@tonic-gate } 5190Sstevel@tonic-gate qp->msg_qbytes = ds64.msgx_qbytes; 5200Sstevel@tonic-gate qp->msg_ctime = gethrestime_sec(); 5210Sstevel@tonic-gate break; 5220Sstevel@tonic-gate 5230Sstevel@tonic-gate case IPC_STAT64: 524*4153Sdv142724 if (qp->msg_rcv_cnt) 525*4153Sdv142724 qp->msg_perm.ipc_mode |= MSG_RWAIT; 5260Sstevel@tonic-gate if (qp->msg_snd_cnt) 5270Sstevel@tonic-gate qp->msg_perm.ipc_mode |= MSG_WWAIT; 5280Sstevel@tonic-gate ipcperm_stat64(&ds64.msgx_perm, &qp->msg_perm); 5290Sstevel@tonic-gate qp->msg_perm.ipc_mode &= ~(MSG_RWAIT|MSG_WWAIT); 5300Sstevel@tonic-gate ds64.msgx_cbytes = qp->msg_cbytes; 5310Sstevel@tonic-gate ds64.msgx_qnum = qp->msg_qnum; 5320Sstevel@tonic-gate ds64.msgx_qbytes = qp->msg_qbytes; 5330Sstevel@tonic-gate ds64.msgx_lspid = qp->msg_lspid; 5340Sstevel@tonic-gate ds64.msgx_lrpid = qp->msg_lrpid; 5350Sstevel@tonic-gate ds64.msgx_stime = qp->msg_stime; 5360Sstevel@tonic-gate ds64.msgx_rtime = qp->msg_rtime; 5370Sstevel@tonic-gate ds64.msgx_ctime = qp->msg_ctime; 5380Sstevel@tonic-gate break; 5390Sstevel@tonic-gate 5400Sstevel@tonic-gate default: 5410Sstevel@tonic-gate mutex_exit(lock); 5420Sstevel@tonic-gate return (set_errno(EINVAL)); 5430Sstevel@tonic-gate } 5440Sstevel@tonic-gate 5450Sstevel@tonic-gate mutex_exit(lock); 5460Sstevel@tonic-gate 5470Sstevel@tonic-gate /* 5480Sstevel@tonic-gate * Do copyout last (after releasing mutex). 5490Sstevel@tonic-gate */ 5500Sstevel@tonic-gate switch (cmd) { 5510Sstevel@tonic-gate case IPC_STAT: 5520Sstevel@tonic-gate if (copyout(STRUCT_BUF(ds), arg, STRUCT_SIZE(ds))) 5530Sstevel@tonic-gate return (set_errno(EFAULT)); 5540Sstevel@tonic-gate break; 5550Sstevel@tonic-gate 5560Sstevel@tonic-gate case IPC_STAT64: 5570Sstevel@tonic-gate if (copyout(&ds64, arg, sizeof (struct msqid_ds64))) 5580Sstevel@tonic-gate return (set_errno(EFAULT)); 5590Sstevel@tonic-gate break; 5600Sstevel@tonic-gate } 5610Sstevel@tonic-gate 5620Sstevel@tonic-gate return (0); 5630Sstevel@tonic-gate } 5640Sstevel@tonic-gate 5650Sstevel@tonic-gate /* 5660Sstevel@tonic-gate * Remove all message queues associated with a given zone. Called by 5670Sstevel@tonic-gate * zone_shutdown when the zone is halted. 5680Sstevel@tonic-gate */ 5690Sstevel@tonic-gate /*ARGSUSED1*/ 5700Sstevel@tonic-gate static void 5710Sstevel@tonic-gate msg_remove_zone(zoneid_t zoneid, void *arg) 5720Sstevel@tonic-gate { 5730Sstevel@tonic-gate ipc_remove_zone(msq_svc, zoneid); 5740Sstevel@tonic-gate } 5750Sstevel@tonic-gate 5760Sstevel@tonic-gate /* 5770Sstevel@tonic-gate * msgget system call. 5780Sstevel@tonic-gate */ 5790Sstevel@tonic-gate static int 5800Sstevel@tonic-gate msgget(key_t key, int msgflg) 5810Sstevel@tonic-gate { 5820Sstevel@tonic-gate kmsqid_t *qp; 5830Sstevel@tonic-gate kmutex_t *lock; 5840Sstevel@tonic-gate int id, error; 5852565Sudpa int ii; 5860Sstevel@tonic-gate proc_t *pp = curproc; 5870Sstevel@tonic-gate 5880Sstevel@tonic-gate top: 5890Sstevel@tonic-gate if (error = ipc_get(msq_svc, key, msgflg, (kipc_perm_t **)&qp, &lock)) 5900Sstevel@tonic-gate return (set_errno(error)); 5910Sstevel@tonic-gate 5920Sstevel@tonic-gate if (IPC_FREE(&qp->msg_perm)) { 5930Sstevel@tonic-gate mutex_exit(lock); 5940Sstevel@tonic-gate mutex_exit(&pp->p_lock); 5950Sstevel@tonic-gate 5960Sstevel@tonic-gate list_create(&qp->msg_list, sizeof (struct msg), 5970Sstevel@tonic-gate offsetof(struct msg, msg_node)); 5980Sstevel@tonic-gate qp->msg_qnum = 0; 5990Sstevel@tonic-gate qp->msg_lspid = qp->msg_lrpid = 0; 6000Sstevel@tonic-gate qp->msg_stime = qp->msg_rtime = 0; 6010Sstevel@tonic-gate qp->msg_ctime = gethrestime_sec(); 602*4153Sdv142724 qp->msg_ngt_cnt = 0; 603*4153Sdv142724 qp->msg_neg_copy = 0; 604*4153Sdv142724 for (ii = 0; ii <= MSG_MAX_QNUM; ii++) { 605*4153Sdv142724 list_create(&qp->msg_wait_snd[ii], 606*4153Sdv142724 sizeof (msgq_wakeup_t), 607*4153Sdv142724 offsetof(msgq_wakeup_t, msgw_list)); 608*4153Sdv142724 list_create(&qp->msg_wait_snd_ngt[ii], 609*4153Sdv142724 sizeof (msgq_wakeup_t), 610*4153Sdv142724 offsetof(msgq_wakeup_t, msgw_list)); 611*4153Sdv142724 } 612*4153Sdv142724 /* 613*4153Sdv142724 * The proper initialization of msg_lowest_type is to the 614*4153Sdv142724 * highest possible value. By doing this we guarantee that 615*4153Sdv142724 * when the first send happens, the lowest type will be set 616*4153Sdv142724 * properly. 617*4153Sdv142724 */ 618*4153Sdv142724 qp->msg_lowest_type = -1; 619*4153Sdv142724 list_create(&qp->msg_cpy_block, 620*4153Sdv142724 sizeof (msgq_wakeup_t), 621*4153Sdv142724 offsetof(msgq_wakeup_t, msgw_list)); 622*4153Sdv142724 qp->msg_fnd_sndr = &msg_fnd_sndr[0]; 623*4153Sdv142724 qp->msg_fnd_rdr = &msg_fnd_rdr[0]; 624*4153Sdv142724 qp->msg_rcv_cnt = 0; 6252565Sudpa qp->msg_snd_cnt = 0; 6260Sstevel@tonic-gate 6270Sstevel@tonic-gate if (error = ipc_commit_begin(msq_svc, key, msgflg, 6280Sstevel@tonic-gate (kipc_perm_t *)qp)) { 6290Sstevel@tonic-gate if (error == EAGAIN) 6300Sstevel@tonic-gate goto top; 6310Sstevel@tonic-gate return (set_errno(error)); 6320Sstevel@tonic-gate } 6330Sstevel@tonic-gate qp->msg_qbytes = rctl_enforced_value(rc_process_msgmnb, 6340Sstevel@tonic-gate pp->p_rctls, pp); 6350Sstevel@tonic-gate qp->msg_qmax = rctl_enforced_value(rc_process_msgtql, 6360Sstevel@tonic-gate pp->p_rctls, pp); 6370Sstevel@tonic-gate lock = ipc_commit_end(msq_svc, &qp->msg_perm); 6380Sstevel@tonic-gate } 6390Sstevel@tonic-gate #ifdef C2_AUDIT 6400Sstevel@tonic-gate if (audit_active) 6410Sstevel@tonic-gate audit_ipcget(AT_IPC_MSG, (void *)qp); 6420Sstevel@tonic-gate #endif 6430Sstevel@tonic-gate id = qp->msg_perm.ipc_id; 6440Sstevel@tonic-gate mutex_exit(lock); 6450Sstevel@tonic-gate return (id); 6460Sstevel@tonic-gate } 6470Sstevel@tonic-gate 6480Sstevel@tonic-gate static ssize_t 6490Sstevel@tonic-gate msgrcv(int msqid, struct ipcmsgbuf *msgp, size_t msgsz, long msgtyp, int msgflg) 6500Sstevel@tonic-gate { 6510Sstevel@tonic-gate struct msg *smp; /* ptr to best msg on q */ 6520Sstevel@tonic-gate kmsqid_t *qp; /* ptr to associated q */ 6530Sstevel@tonic-gate kmutex_t *lock; 6540Sstevel@tonic-gate size_t xtsz; /* transfer byte count */ 655*4153Sdv142724 int error = 0; 6560Sstevel@tonic-gate int cvres; 657*4153Sdv142724 ulong_t msg_hash; 658*4153Sdv142724 msgq_wakeup_t msg_entry; 6590Sstevel@tonic-gate 6600Sstevel@tonic-gate CPU_STATS_ADDQ(CPU, sys, msg, 1); /* bump msg send/rcv count */ 6610Sstevel@tonic-gate 662*4153Sdv142724 msg_hash = msg_type_hash(msgtyp); 663*4153Sdv142724 if ((lock = ipc_lookup(msq_svc, msqid, (kipc_perm_t **)&qp)) == NULL) { 6640Sstevel@tonic-gate return ((ssize_t)set_errno(EINVAL)); 665*4153Sdv142724 } 6660Sstevel@tonic-gate ipc_hold(msq_svc, (kipc_perm_t *)qp); 6670Sstevel@tonic-gate 668*4153Sdv142724 if (error = ipcperm_access(&qp->msg_perm, MSG_R, CRED())) { 6690Sstevel@tonic-gate goto msgrcv_out; 670*4153Sdv142724 } 671*4153Sdv142724 672*4153Sdv142724 /* 673*4153Sdv142724 * Various information (including the condvar_t) required for the 674*4153Sdv142724 * process to sleep is provided by it's stack. 675*4153Sdv142724 */ 676*4153Sdv142724 msg_entry.msgw_thrd = curthread; 677*4153Sdv142724 msg_entry.msgw_snd_wake = 0; 678*4153Sdv142724 msg_entry.msgw_type = msgtyp; 679*4153Sdv142724 findmsg: 680*4153Sdv142724 smp = msgrcv_lookup(qp, msgtyp); 681*4153Sdv142724 682*4153Sdv142724 if (smp) { 683*4153Sdv142724 /* 684*4153Sdv142724 * We found a possible message to copy out. 685*4153Sdv142724 */ 686*4153Sdv142724 if ((smp->msg_flags & MSG_RCVCOPY) == 0) { 687*4153Sdv142724 /* 688*4153Sdv142724 * It is available, attempt to copy it. 689*4153Sdv142724 */ 690*4153Sdv142724 error = msg_copyout(qp, msgtyp, &lock, &xtsz, msgsz, 691*4153Sdv142724 smp, msgp, msgflg); 692*4153Sdv142724 /* 693*4153Sdv142724 * Don't forget to wakeup a sleeper that blocked because 694*4153Sdv142724 * we were copying things out. 695*4153Sdv142724 */ 696*4153Sdv142724 msg_wakeup_rdr(qp, &qp->msg_fnd_rdr, 0); 697*4153Sdv142724 goto msgrcv_out; 698*4153Sdv142724 } 699*4153Sdv142724 /* 700*4153Sdv142724 * The selected message is being copied out, so block. We do 701*4153Sdv142724 * not need to wake the next person up on the msg_cpy_block list 702*4153Sdv142724 * due to the fact some one is copying out and they will get 703*4153Sdv142724 * things moving again once the copy is completed. 704*4153Sdv142724 */ 705*4153Sdv142724 cvres = msg_rcvq_sleep(&qp->msg_cpy_block, 706*4153Sdv142724 &msg_entry, &lock, qp); 707*4153Sdv142724 error = msgq_check_err(qp, cvres); 708*4153Sdv142724 if (error) { 709*4153Sdv142724 goto msgrcv_out; 710*4153Sdv142724 } 711*4153Sdv142724 goto findmsg; 712*4153Sdv142724 } 713*4153Sdv142724 /* 714*4153Sdv142724 * There isn't a message to copy out that matches the designated 715*4153Sdv142724 * criteria. 716*4153Sdv142724 */ 717*4153Sdv142724 if (msgflg & IPC_NOWAIT) { 718*4153Sdv142724 error = ENOMSG; 719*4153Sdv142724 goto msgrcv_out; 720*4153Sdv142724 } 721*4153Sdv142724 msg_wakeup_rdr(qp, &qp->msg_fnd_rdr, 0); 722*4153Sdv142724 723*4153Sdv142724 /* 724*4153Sdv142724 * Wait for new message. We keep the negative and positive types 725*4153Sdv142724 * separate for performance reasons. 726*4153Sdv142724 */ 727*4153Sdv142724 msg_entry.msgw_snd_wake = 0; 728*4153Sdv142724 if (msgtyp >= 0) { 729*4153Sdv142724 cvres = msg_rcvq_sleep(&qp->msg_wait_snd[msg_hash], 730*4153Sdv142724 &msg_entry, &lock, qp); 731*4153Sdv142724 } else { 732*4153Sdv142724 qp->msg_ngt_cnt++; 733*4153Sdv142724 cvres = msg_rcvq_sleep(&qp->msg_wait_snd_ngt[msg_hash], 734*4153Sdv142724 &msg_entry, &lock, qp); 735*4153Sdv142724 qp->msg_ngt_cnt--; 736*4153Sdv142724 } 737*4153Sdv142724 738*4153Sdv142724 if (!(error = msgq_check_err(qp, cvres))) { 739*4153Sdv142724 goto findmsg; 740*4153Sdv142724 } 741*4153Sdv142724 742*4153Sdv142724 msgrcv_out: 743*4153Sdv142724 if (error) { 744*4153Sdv142724 msg_wakeup_rdr(qp, &qp->msg_fnd_rdr, 0); 745*4153Sdv142724 if (msg_entry.msgw_snd_wake) { 746*4153Sdv142724 msg_wakeup_rdr(qp, &qp->msg_fnd_sndr, 747*4153Sdv142724 msg_entry.msgw_snd_wake); 748*4153Sdv142724 } 749*4153Sdv142724 ipc_rele(msq_svc, (kipc_perm_t *)qp); 750*4153Sdv142724 return ((ssize_t)set_errno(error)); 751*4153Sdv142724 } 752*4153Sdv142724 ipc_rele(msq_svc, (kipc_perm_t *)qp); 753*4153Sdv142724 return ((ssize_t)xtsz); 754*4153Sdv142724 } 7550Sstevel@tonic-gate 756*4153Sdv142724 static int 757*4153Sdv142724 msgq_check_err(kmsqid_t *qp, int cvres) 758*4153Sdv142724 { 759*4153Sdv142724 if (IPC_FREE(&qp->msg_perm)) { 760*4153Sdv142724 return (EIDRM); 761*4153Sdv142724 } 762*4153Sdv142724 763*4153Sdv142724 if (cvres == 0) { 764*4153Sdv142724 return (EINTR); 765*4153Sdv142724 } 766*4153Sdv142724 767*4153Sdv142724 return (0); 768*4153Sdv142724 } 769*4153Sdv142724 770*4153Sdv142724 static int 771*4153Sdv142724 msg_copyout(kmsqid_t *qp, long msgtyp, kmutex_t **lock, size_t *xtsz_ret, 772*4153Sdv142724 size_t msgsz, struct msg *smp, struct ipcmsgbuf *msgp, int msgflg) 773*4153Sdv142724 { 774*4153Sdv142724 size_t xtsz; 775*4153Sdv142724 STRUCT_HANDLE(ipcmsgbuf, umsgp); 776*4153Sdv142724 model_t mdl = get_udatamodel(); 777*4153Sdv142724 int copyerror = 0; 778*4153Sdv142724 779*4153Sdv142724 STRUCT_SET_HANDLE(umsgp, mdl, msgp); 780*4153Sdv142724 if (msgsz < smp->msg_size) { 781*4153Sdv142724 if ((msgflg & MSG_NOERROR) == 0) { 782*4153Sdv142724 return (E2BIG); 783*4153Sdv142724 } else { 784*4153Sdv142724 xtsz = msgsz; 785*4153Sdv142724 } 786*4153Sdv142724 } else { 787*4153Sdv142724 xtsz = smp->msg_size; 788*4153Sdv142724 } 789*4153Sdv142724 *xtsz_ret = xtsz; 790*4153Sdv142724 791*4153Sdv142724 /* 792*4153Sdv142724 * To prevent a DOS attack we mark the message as being 793*4153Sdv142724 * copied out and release mutex. When the copy is completed 794*4153Sdv142724 * we need to acquire the mutex and make the appropriate updates. 795*4153Sdv142724 */ 796*4153Sdv142724 ASSERT((smp->msg_flags & MSG_RCVCOPY) == 0); 797*4153Sdv142724 smp->msg_flags |= MSG_RCVCOPY; 798*4153Sdv142724 msg_hold(smp); 799*4153Sdv142724 if (msgtyp < 0) { 800*4153Sdv142724 ASSERT(qp->msg_neg_copy == 0); 801*4153Sdv142724 qp->msg_neg_copy = 1; 802*4153Sdv142724 } 803*4153Sdv142724 mutex_exit(*lock); 804*4153Sdv142724 805*4153Sdv142724 if (mdl == DATAMODEL_NATIVE) { 806*4153Sdv142724 copyerror = copyout(&smp->msg_type, msgp, 807*4153Sdv142724 sizeof (smp->msg_type)); 808*4153Sdv142724 } else { 809*4153Sdv142724 /* 810*4153Sdv142724 * 32-bit callers need an imploded msg type. 811*4153Sdv142724 */ 812*4153Sdv142724 int32_t msg_type32 = smp->msg_type; 813*4153Sdv142724 814*4153Sdv142724 copyerror = copyout(&msg_type32, msgp, 815*4153Sdv142724 sizeof (msg_type32)); 816*4153Sdv142724 } 817*4153Sdv142724 818*4153Sdv142724 if (copyerror == 0 && xtsz) { 819*4153Sdv142724 copyerror = copyout(smp->msg_addr, 820*4153Sdv142724 STRUCT_FADDR(umsgp, mtext), xtsz); 821*4153Sdv142724 } 822*4153Sdv142724 823*4153Sdv142724 /* 824*4153Sdv142724 * Reclaim the mutex and make sure the message queue still exists. 825*4153Sdv142724 */ 826*4153Sdv142724 827*4153Sdv142724 *lock = ipc_lock(msq_svc, qp->msg_perm.ipc_id); 828*4153Sdv142724 if (msgtyp < 0) { 829*4153Sdv142724 qp->msg_neg_copy = 0; 830*4153Sdv142724 } 831*4153Sdv142724 ASSERT(smp->msg_flags & MSG_RCVCOPY); 832*4153Sdv142724 smp->msg_flags &= ~MSG_RCVCOPY; 833*4153Sdv142724 msg_rele(smp); 834*4153Sdv142724 if (IPC_FREE(&qp->msg_perm)) { 835*4153Sdv142724 return (EIDRM); 836*4153Sdv142724 } 837*4153Sdv142724 if (copyerror) { 838*4153Sdv142724 return (EFAULT); 839*4153Sdv142724 } 840*4153Sdv142724 qp->msg_lrpid = ttoproc(curthread)->p_pid; 841*4153Sdv142724 qp->msg_rtime = gethrestime_sec(); 842*4153Sdv142724 msgunlink(qp, smp); 843*4153Sdv142724 return (0); 844*4153Sdv142724 } 845*4153Sdv142724 846*4153Sdv142724 static struct msg * 847*4153Sdv142724 msgrcv_lookup(kmsqid_t *qp, long msgtyp) 848*4153Sdv142724 { 849*4153Sdv142724 struct msg *smp = NULL; 850*4153Sdv142724 int qp_low; 851*4153Sdv142724 struct msg *mp; /* ptr to msg on q */ 852*4153Sdv142724 int low_msgtype; 853*4153Sdv142724 static struct msg neg_copy_smp; 854*4153Sdv142724 8550Sstevel@tonic-gate mp = list_head(&qp->msg_list); 8560Sstevel@tonic-gate if (msgtyp == 0) { 8570Sstevel@tonic-gate smp = mp; 8580Sstevel@tonic-gate } else { 859*4153Sdv142724 qp_low = qp->msg_lowest_type; 860*4153Sdv142724 if (msgtyp > 0) { 861*4153Sdv142724 /* 862*4153Sdv142724 * If our lowest possible message type is larger than 863*4153Sdv142724 * the message type desired, then we know there is 864*4153Sdv142724 * no entry present. 865*4153Sdv142724 */ 866*4153Sdv142724 if (qp_low > msgtyp) { 867*4153Sdv142724 return (NULL); 868*4153Sdv142724 } 869*4153Sdv142724 870*4153Sdv142724 for (; mp; mp = list_next(&qp->msg_list, mp)) { 871*4153Sdv142724 if (msgtyp == mp->msg_type) { 872*4153Sdv142724 smp = mp; 873*4153Sdv142724 break; 874*4153Sdv142724 } 8750Sstevel@tonic-gate } 876*4153Sdv142724 } else { 877*4153Sdv142724 /* 878*4153Sdv142724 * We have kept track of the lowest possible message 879*4153Sdv142724 * type on the send queue. This allows us to terminate 880*4153Sdv142724 * the search early if we find a message type of that 881*4153Sdv142724 * type. Note, the lowest type may not be the actual 882*4153Sdv142724 * lowest value in the system, it is only guaranteed 883*4153Sdv142724 * that there isn't a value lower than that. 884*4153Sdv142724 */ 885*4153Sdv142724 low_msgtype = -msgtyp; 886*4153Sdv142724 if (low_msgtype++ < qp_low) { 887*4153Sdv142724 return (NULL); 888*4153Sdv142724 } 889*4153Sdv142724 if (qp->msg_neg_copy) { 890*4153Sdv142724 neg_copy_smp.msg_flags = MSG_RCVCOPY; 891*4153Sdv142724 return (&neg_copy_smp); 892*4153Sdv142724 } 893*4153Sdv142724 for (; mp; mp = list_next(&qp->msg_list, mp)) { 894*4153Sdv142724 if (mp->msg_type < low_msgtype) { 895*4153Sdv142724 smp = mp; 896*4153Sdv142724 low_msgtype = mp->msg_type; 897*4153Sdv142724 if (low_msgtype == qp_low) { 898*4153Sdv142724 break; 899*4153Sdv142724 } 900*4153Sdv142724 } 901*4153Sdv142724 } 902*4153Sdv142724 if (smp) { 903*4153Sdv142724 /* 904*4153Sdv142724 * Update the lowest message type. 905*4153Sdv142724 */ 906*4153Sdv142724 qp->msg_lowest_type = smp->msg_type; 9070Sstevel@tonic-gate } 9080Sstevel@tonic-gate } 9090Sstevel@tonic-gate } 910*4153Sdv142724 return (smp); 9110Sstevel@tonic-gate } 9120Sstevel@tonic-gate 9130Sstevel@tonic-gate /* 9140Sstevel@tonic-gate * msgids system call. 9150Sstevel@tonic-gate */ 9160Sstevel@tonic-gate static int 9170Sstevel@tonic-gate msgids(int *buf, uint_t nids, uint_t *pnids) 9180Sstevel@tonic-gate { 9190Sstevel@tonic-gate int error; 9200Sstevel@tonic-gate 9210Sstevel@tonic-gate if (error = ipc_ids(msq_svc, buf, nids, pnids)) 9220Sstevel@tonic-gate return (set_errno(error)); 9230Sstevel@tonic-gate 9240Sstevel@tonic-gate return (0); 9250Sstevel@tonic-gate } 9260Sstevel@tonic-gate 9270Sstevel@tonic-gate #define RND(x) roundup((x), sizeof (size_t)) 9280Sstevel@tonic-gate #define RND32(x) roundup((x), sizeof (size32_t)) 9290Sstevel@tonic-gate 9300Sstevel@tonic-gate /* 9310Sstevel@tonic-gate * msgsnap system call. 9320Sstevel@tonic-gate */ 9330Sstevel@tonic-gate static int 9340Sstevel@tonic-gate msgsnap(int msqid, caddr_t buf, size_t bufsz, long msgtyp) 9350Sstevel@tonic-gate { 9360Sstevel@tonic-gate struct msg *mp; /* ptr to msg on q */ 9370Sstevel@tonic-gate kmsqid_t *qp; /* ptr to associated q */ 9380Sstevel@tonic-gate kmutex_t *lock; 9390Sstevel@tonic-gate size_t size; 9400Sstevel@tonic-gate size_t nmsg; 9410Sstevel@tonic-gate struct msg **snaplist; 9420Sstevel@tonic-gate int error, i; 9430Sstevel@tonic-gate model_t mdl = get_udatamodel(); 9440Sstevel@tonic-gate STRUCT_DECL(msgsnap_head, head); 9450Sstevel@tonic-gate STRUCT_DECL(msgsnap_mhead, mhead); 9460Sstevel@tonic-gate 9470Sstevel@tonic-gate STRUCT_INIT(head, mdl); 9480Sstevel@tonic-gate STRUCT_INIT(mhead, mdl); 9490Sstevel@tonic-gate 9500Sstevel@tonic-gate if (bufsz < STRUCT_SIZE(head)) 9510Sstevel@tonic-gate return (set_errno(EINVAL)); 9520Sstevel@tonic-gate 9530Sstevel@tonic-gate if ((lock = ipc_lookup(msq_svc, msqid, (kipc_perm_t **)&qp)) == NULL) 9540Sstevel@tonic-gate return (set_errno(EINVAL)); 9550Sstevel@tonic-gate 9560Sstevel@tonic-gate if (error = ipcperm_access(&qp->msg_perm, MSG_R, CRED())) { 9570Sstevel@tonic-gate mutex_exit(lock); 9580Sstevel@tonic-gate return (set_errno(error)); 9590Sstevel@tonic-gate } 9600Sstevel@tonic-gate ipc_hold(msq_svc, (kipc_perm_t *)qp); 9610Sstevel@tonic-gate 9620Sstevel@tonic-gate /* 9630Sstevel@tonic-gate * First compute the required buffer size and 9640Sstevel@tonic-gate * the number of messages on the queue. 9650Sstevel@tonic-gate */ 9660Sstevel@tonic-gate size = nmsg = 0; 9670Sstevel@tonic-gate for (mp = list_head(&qp->msg_list); mp; 9680Sstevel@tonic-gate mp = list_next(&qp->msg_list, mp)) { 9690Sstevel@tonic-gate if (msgtyp == 0 || 9700Sstevel@tonic-gate (msgtyp > 0 && msgtyp == mp->msg_type) || 9710Sstevel@tonic-gate (msgtyp < 0 && mp->msg_type <= -msgtyp)) { 9720Sstevel@tonic-gate nmsg++; 9730Sstevel@tonic-gate if (mdl == DATAMODEL_NATIVE) 9740Sstevel@tonic-gate size += RND(mp->msg_size); 9750Sstevel@tonic-gate else 9760Sstevel@tonic-gate size += RND32(mp->msg_size); 9770Sstevel@tonic-gate } 9780Sstevel@tonic-gate } 9790Sstevel@tonic-gate 9800Sstevel@tonic-gate size += STRUCT_SIZE(head) + nmsg * STRUCT_SIZE(mhead); 9810Sstevel@tonic-gate if (size > bufsz) 9820Sstevel@tonic-gate nmsg = 0; 9830Sstevel@tonic-gate 9840Sstevel@tonic-gate if (nmsg > 0) { 9850Sstevel@tonic-gate /* 9860Sstevel@tonic-gate * Mark the messages as being copied. 9870Sstevel@tonic-gate */ 9880Sstevel@tonic-gate snaplist = (struct msg **)kmem_alloc(nmsg * 9890Sstevel@tonic-gate sizeof (struct msg *), KM_SLEEP); 9900Sstevel@tonic-gate i = 0; 9910Sstevel@tonic-gate for (mp = list_head(&qp->msg_list); mp; 9920Sstevel@tonic-gate mp = list_next(&qp->msg_list, mp)) { 9930Sstevel@tonic-gate if (msgtyp == 0 || 9940Sstevel@tonic-gate (msgtyp > 0 && msgtyp == mp->msg_type) || 9950Sstevel@tonic-gate (msgtyp < 0 && mp->msg_type <= -msgtyp)) { 9960Sstevel@tonic-gate msg_hold(mp); 9970Sstevel@tonic-gate snaplist[i] = mp; 9980Sstevel@tonic-gate i++; 9990Sstevel@tonic-gate } 10000Sstevel@tonic-gate } 10010Sstevel@tonic-gate } 10020Sstevel@tonic-gate mutex_exit(lock); 10030Sstevel@tonic-gate 10040Sstevel@tonic-gate /* 10050Sstevel@tonic-gate * Copy out the buffer header. 10060Sstevel@tonic-gate */ 10070Sstevel@tonic-gate STRUCT_FSET(head, msgsnap_size, size); 10080Sstevel@tonic-gate STRUCT_FSET(head, msgsnap_nmsg, nmsg); 10090Sstevel@tonic-gate if (copyout(STRUCT_BUF(head), buf, STRUCT_SIZE(head))) 10100Sstevel@tonic-gate error = EFAULT; 10110Sstevel@tonic-gate 10120Sstevel@tonic-gate buf += STRUCT_SIZE(head); 10130Sstevel@tonic-gate 10140Sstevel@tonic-gate /* 10150Sstevel@tonic-gate * Now copy out the messages one by one. 10160Sstevel@tonic-gate */ 10170Sstevel@tonic-gate for (i = 0; i < nmsg; i++) { 10180Sstevel@tonic-gate mp = snaplist[i]; 10190Sstevel@tonic-gate if (error == 0) { 10200Sstevel@tonic-gate STRUCT_FSET(mhead, msgsnap_mlen, mp->msg_size); 10210Sstevel@tonic-gate STRUCT_FSET(mhead, msgsnap_mtype, mp->msg_type); 10220Sstevel@tonic-gate if (copyout(STRUCT_BUF(mhead), buf, STRUCT_SIZE(mhead))) 10230Sstevel@tonic-gate error = EFAULT; 10240Sstevel@tonic-gate buf += STRUCT_SIZE(mhead); 10250Sstevel@tonic-gate 10260Sstevel@tonic-gate if (error == 0 && 10270Sstevel@tonic-gate mp->msg_size != 0 && 10280Sstevel@tonic-gate copyout(mp->msg_addr, buf, mp->msg_size)) 10290Sstevel@tonic-gate error = EFAULT; 10300Sstevel@tonic-gate if (mdl == DATAMODEL_NATIVE) 10310Sstevel@tonic-gate buf += RND(mp->msg_size); 10320Sstevel@tonic-gate else 10330Sstevel@tonic-gate buf += RND32(mp->msg_size); 10340Sstevel@tonic-gate } 10350Sstevel@tonic-gate lock = ipc_lock(msq_svc, qp->msg_perm.ipc_id); 10360Sstevel@tonic-gate msg_rele(mp); 10370Sstevel@tonic-gate /* Check for msg q deleted or reallocated */ 10380Sstevel@tonic-gate if (IPC_FREE(&qp->msg_perm)) 10390Sstevel@tonic-gate error = EIDRM; 10400Sstevel@tonic-gate mutex_exit(lock); 10410Sstevel@tonic-gate } 10420Sstevel@tonic-gate 10430Sstevel@tonic-gate (void) ipc_lock(msq_svc, qp->msg_perm.ipc_id); 10440Sstevel@tonic-gate ipc_rele(msq_svc, (kipc_perm_t *)qp); 10450Sstevel@tonic-gate 10460Sstevel@tonic-gate if (nmsg > 0) 10470Sstevel@tonic-gate kmem_free(snaplist, nmsg * sizeof (struct msg *)); 10480Sstevel@tonic-gate 10490Sstevel@tonic-gate if (error) 10500Sstevel@tonic-gate return (set_errno(error)); 10510Sstevel@tonic-gate return (0); 10520Sstevel@tonic-gate } 10530Sstevel@tonic-gate 10542983Sdv142724 #define MSG_PREALLOC_LIMIT 8192 10552983Sdv142724 10560Sstevel@tonic-gate /* 10570Sstevel@tonic-gate * msgsnd system call. 10580Sstevel@tonic-gate */ 10590Sstevel@tonic-gate static int 10600Sstevel@tonic-gate msgsnd(int msqid, struct ipcmsgbuf *msgp, size_t msgsz, int msgflg) 10610Sstevel@tonic-gate { 10620Sstevel@tonic-gate kmsqid_t *qp; 10632983Sdv142724 kmutex_t *lock = NULL; 10640Sstevel@tonic-gate struct msg *mp = NULL; 10650Sstevel@tonic-gate long type; 10660Sstevel@tonic-gate int error = 0; 10670Sstevel@tonic-gate model_t mdl = get_udatamodel(); 10680Sstevel@tonic-gate STRUCT_HANDLE(ipcmsgbuf, umsgp); 10690Sstevel@tonic-gate 10700Sstevel@tonic-gate CPU_STATS_ADDQ(CPU, sys, msg, 1); /* bump msg send/rcv count */ 10710Sstevel@tonic-gate STRUCT_SET_HANDLE(umsgp, mdl, msgp); 10720Sstevel@tonic-gate 10730Sstevel@tonic-gate if (mdl == DATAMODEL_NATIVE) { 10740Sstevel@tonic-gate if (copyin(msgp, &type, sizeof (type))) 10750Sstevel@tonic-gate return (set_errno(EFAULT)); 10760Sstevel@tonic-gate } else { 10770Sstevel@tonic-gate int32_t type32; 10780Sstevel@tonic-gate if (copyin(msgp, &type32, sizeof (type32))) 10790Sstevel@tonic-gate return (set_errno(EFAULT)); 10800Sstevel@tonic-gate type = type32; 10810Sstevel@tonic-gate } 10820Sstevel@tonic-gate 10830Sstevel@tonic-gate if (type < 1) 10840Sstevel@tonic-gate return (set_errno(EINVAL)); 10850Sstevel@tonic-gate 10862983Sdv142724 /* 10872983Sdv142724 * We want the value here large enough that most of the 10882983Sdv142724 * the message operations will use the "lockless" path, 10892983Sdv142724 * but small enough that a user can not reserve large 10902983Sdv142724 * chunks of kernel memory unless they have a valid 10912983Sdv142724 * reason to. 10922983Sdv142724 */ 10932983Sdv142724 if (msgsz <= MSG_PREALLOC_LIMIT) { 10942983Sdv142724 /* 10952983Sdv142724 * We are small enough that we can afford to do the 10962983Sdv142724 * allocation now. This saves dropping the lock 10972983Sdv142724 * and then reacquiring the lock. 10982983Sdv142724 */ 10992983Sdv142724 mp = kmem_zalloc(sizeof (struct msg), KM_SLEEP); 11002983Sdv142724 mp->msg_copycnt = 1; 11012983Sdv142724 mp->msg_size = msgsz; 11022983Sdv142724 if (msgsz) { 11032983Sdv142724 mp->msg_addr = kmem_alloc(msgsz, KM_SLEEP); 11042983Sdv142724 if (copyin(STRUCT_FADDR(umsgp, mtext), 11052983Sdv142724 mp->msg_addr, msgsz) == -1) { 11062983Sdv142724 error = EFAULT; 11072983Sdv142724 goto msgsnd_out; 11082983Sdv142724 } 11092983Sdv142724 } 11102983Sdv142724 } 11112983Sdv142724 11122983Sdv142724 if ((lock = ipc_lookup(msq_svc, msqid, (kipc_perm_t **)&qp)) == NULL) { 11132983Sdv142724 error = EINVAL; 11142983Sdv142724 goto msgsnd_out; 11152983Sdv142724 } 11162983Sdv142724 11170Sstevel@tonic-gate ipc_hold(msq_svc, (kipc_perm_t *)qp); 11180Sstevel@tonic-gate 11190Sstevel@tonic-gate if (msgsz > qp->msg_qbytes) { 11200Sstevel@tonic-gate error = EINVAL; 11210Sstevel@tonic-gate goto msgsnd_out; 11220Sstevel@tonic-gate } 11230Sstevel@tonic-gate 11240Sstevel@tonic-gate if (error = ipcperm_access(&qp->msg_perm, MSG_W, CRED())) 11250Sstevel@tonic-gate goto msgsnd_out; 11260Sstevel@tonic-gate 11270Sstevel@tonic-gate top: 11280Sstevel@tonic-gate /* 11290Sstevel@tonic-gate * Allocate space on q, message header, & buffer space. 11300Sstevel@tonic-gate */ 11310Sstevel@tonic-gate ASSERT(qp->msg_qnum <= qp->msg_qmax); 11320Sstevel@tonic-gate while ((msgsz > qp->msg_qbytes - qp->msg_cbytes) || 11330Sstevel@tonic-gate (qp->msg_qnum == qp->msg_qmax)) { 11340Sstevel@tonic-gate int cvres; 11350Sstevel@tonic-gate 11360Sstevel@tonic-gate if (msgflg & IPC_NOWAIT) { 11370Sstevel@tonic-gate error = EAGAIN; 11380Sstevel@tonic-gate goto msgsnd_out; 11390Sstevel@tonic-gate } 11400Sstevel@tonic-gate 11410Sstevel@tonic-gate qp->msg_snd_cnt++; 11420Sstevel@tonic-gate cvres = cv_wait_sig(&qp->msg_snd_cv, lock); 11430Sstevel@tonic-gate lock = ipc_relock(msq_svc, qp->msg_perm.ipc_id, lock); 11440Sstevel@tonic-gate qp->msg_snd_cnt--; 11450Sstevel@tonic-gate 1146*4153Sdv142724 if (error = msgq_check_err(qp, cvres)) { 11470Sstevel@tonic-gate goto msgsnd_out; 11480Sstevel@tonic-gate } 11490Sstevel@tonic-gate } 11500Sstevel@tonic-gate 11510Sstevel@tonic-gate if (mp == NULL) { 11520Sstevel@tonic-gate int failure; 11530Sstevel@tonic-gate 11540Sstevel@tonic-gate mutex_exit(lock); 11552983Sdv142724 ASSERT(msgsz > 0); 11560Sstevel@tonic-gate mp = kmem_zalloc(sizeof (struct msg), KM_SLEEP); 11572983Sdv142724 mp->msg_addr = kmem_alloc(msgsz, KM_SLEEP); 11580Sstevel@tonic-gate mp->msg_size = msgsz; 11590Sstevel@tonic-gate mp->msg_copycnt = 1; 11600Sstevel@tonic-gate 11612983Sdv142724 failure = (copyin(STRUCT_FADDR(umsgp, mtext), 11620Sstevel@tonic-gate mp->msg_addr, msgsz) == -1); 11630Sstevel@tonic-gate lock = ipc_lock(msq_svc, qp->msg_perm.ipc_id); 11640Sstevel@tonic-gate if (IPC_FREE(&qp->msg_perm)) { 11650Sstevel@tonic-gate error = EIDRM; 11660Sstevel@tonic-gate goto msgsnd_out; 11670Sstevel@tonic-gate } 11680Sstevel@tonic-gate if (failure) { 11690Sstevel@tonic-gate error = EFAULT; 11700Sstevel@tonic-gate goto msgsnd_out; 11710Sstevel@tonic-gate } 11720Sstevel@tonic-gate goto top; 11730Sstevel@tonic-gate } 11740Sstevel@tonic-gate 11750Sstevel@tonic-gate /* 11760Sstevel@tonic-gate * Everything is available, put msg on q. 11770Sstevel@tonic-gate */ 11780Sstevel@tonic-gate qp->msg_qnum++; 11790Sstevel@tonic-gate qp->msg_cbytes += msgsz; 11800Sstevel@tonic-gate qp->msg_lspid = curproc->p_pid; 11810Sstevel@tonic-gate qp->msg_stime = gethrestime_sec(); 11820Sstevel@tonic-gate mp->msg_type = type; 1183*4153Sdv142724 if (qp->msg_lowest_type > type) 1184*4153Sdv142724 qp->msg_lowest_type = type; 11850Sstevel@tonic-gate list_insert_tail(&qp->msg_list, mp); 11862565Sudpa /* 1187*4153Sdv142724 * Get the proper receiver going. 11882565Sudpa */ 1189*4153Sdv142724 msg_wakeup_rdr(qp, &qp->msg_fnd_sndr, type); 11900Sstevel@tonic-gate 11910Sstevel@tonic-gate msgsnd_out: 11922983Sdv142724 if (lock) 11932983Sdv142724 ipc_rele(msq_svc, (kipc_perm_t *)qp); /* drops lock */ 11940Sstevel@tonic-gate 11950Sstevel@tonic-gate if (error) { 11960Sstevel@tonic-gate if (mp) 11970Sstevel@tonic-gate msg_rele(mp); 11980Sstevel@tonic-gate return (set_errno(error)); 11990Sstevel@tonic-gate } 12000Sstevel@tonic-gate 12010Sstevel@tonic-gate return (0); 12020Sstevel@tonic-gate } 12030Sstevel@tonic-gate 1204*4153Sdv142724 static void 1205*4153Sdv142724 msg_wakeup_rdr(kmsqid_t *qp, msg_select_t **flist, long type) 1206*4153Sdv142724 { 1207*4153Sdv142724 msg_select_t *walker = *flist; 1208*4153Sdv142724 msgq_wakeup_t *wakeup; 1209*4153Sdv142724 ulong_t msg_hash; 1210*4153Sdv142724 1211*4153Sdv142724 msg_hash = msg_type_hash(type); 1212*4153Sdv142724 1213*4153Sdv142724 do { 1214*4153Sdv142724 wakeup = walker->selection(qp, msg_hash, type); 1215*4153Sdv142724 walker = walker->next_selection; 1216*4153Sdv142724 } while (!wakeup && walker != *flist); 1217*4153Sdv142724 1218*4153Sdv142724 *flist = (*flist)->next_selection; 1219*4153Sdv142724 if (wakeup) { 1220*4153Sdv142724 if (type) { 1221*4153Sdv142724 wakeup->msgw_snd_wake = type; 1222*4153Sdv142724 } 1223*4153Sdv142724 cv_signal(&wakeup->msgw_wake_cv); 1224*4153Sdv142724 } 1225*4153Sdv142724 } 1226*4153Sdv142724 1227*4153Sdv142724 static ulong_t 1228*4153Sdv142724 msg_type_hash(long msg_type) 1229*4153Sdv142724 { 1230*4153Sdv142724 long temp; 1231*4153Sdv142724 ulong_t hash; 1232*4153Sdv142724 1233*4153Sdv142724 if (msg_type < 0) { 1234*4153Sdv142724 /* 1235*4153Sdv142724 * Negative message types are hashed over an 1236*4153Sdv142724 * interval. Any message type that hashes 1237*4153Sdv142724 * beyond MSG_MAX_QNUM is automatically placed 1238*4153Sdv142724 * in the last bucket. 1239*4153Sdv142724 */ 1240*4153Sdv142724 temp = -msg_type; 1241*4153Sdv142724 hash = temp / MSG_NEG_INTERVAL; 1242*4153Sdv142724 if (hash > MSG_MAX_QNUM) { 1243*4153Sdv142724 hash = MSG_MAX_QNUM; 1244*4153Sdv142724 } 1245*4153Sdv142724 return (hash); 1246*4153Sdv142724 } 1247*4153Sdv142724 1248*4153Sdv142724 /* 1249*4153Sdv142724 * 0 or positive message type. The first bucket is reserved for 1250*4153Sdv142724 * message receivers of type 0, the other buckets we hash into. 1251*4153Sdv142724 */ 1252*4153Sdv142724 if (msg_type) { 1253*4153Sdv142724 return (1 + (msg_type % (MSG_MAX_QNUM))); 1254*4153Sdv142724 } 1255*4153Sdv142724 return (0); 1256*4153Sdv142724 } 1257*4153Sdv142724 1258*4153Sdv142724 /* 1259*4153Sdv142724 * Routines to see if we have a receiver of type 0 either blocked waiting 1260*4153Sdv142724 * for a message. Simply return the first guy on the list. 1261*4153Sdv142724 */ 1262*4153Sdv142724 1263*4153Sdv142724 static msgq_wakeup_t * 1264*4153Sdv142724 /* LINTED */ 1265*4153Sdv142724 msg_fnd_any_snd(kmsqid_t *qp, int msg_hash, long type) 1266*4153Sdv142724 { 1267*4153Sdv142724 return (list_head(&qp->msg_wait_snd[0])); 1268*4153Sdv142724 } 1269*4153Sdv142724 1270*4153Sdv142724 static msgq_wakeup_t * 1271*4153Sdv142724 /* LINTED */ 1272*4153Sdv142724 msg_fnd_any_rdr(kmsqid_t *qp, int msg_hash, long type) 1273*4153Sdv142724 { 1274*4153Sdv142724 return (list_head(&qp->msg_cpy_block)); 1275*4153Sdv142724 } 1276*4153Sdv142724 1277*4153Sdv142724 static msgq_wakeup_t * 1278*4153Sdv142724 msg_fnd_spc_snd(kmsqid_t *qp, int msg_hash, long type) 1279*4153Sdv142724 { 1280*4153Sdv142724 msgq_wakeup_t *walker; 1281*4153Sdv142724 1282*4153Sdv142724 walker = list_head(&qp->msg_wait_snd[msg_hash]); 1283*4153Sdv142724 1284*4153Sdv142724 while (walker && walker->msgw_type != type && 1285*4153Sdv142724 (walker = list_next(&qp->msg_wait_snd[msg_hash], walker))); 1286*4153Sdv142724 return (walker); 1287*4153Sdv142724 } 1288*4153Sdv142724 1289*4153Sdv142724 static msgq_wakeup_t * 1290*4153Sdv142724 /* LINTED */ 1291*4153Sdv142724 msg_fnd_neg_snd(kmsqid_t *qp, int msg_hash, long type) 1292*4153Sdv142724 { 1293*4153Sdv142724 msgq_wakeup_t *qptr; 1294*4153Sdv142724 int count; 1295*4153Sdv142724 int check_index; 1296*4153Sdv142724 int neg_index; 1297*4153Sdv142724 int nbuckets; 1298*4153Sdv142724 1299*4153Sdv142724 if (!qp->msg_ngt_cnt) { 1300*4153Sdv142724 return (NULL); 1301*4153Sdv142724 } 1302*4153Sdv142724 neg_index = msg_type_hash(-type); 1303*4153Sdv142724 1304*4153Sdv142724 /* 1305*4153Sdv142724 * Check for a match among the negative type queues. Any buckets 1306*4153Sdv142724 * at neg_index or larger can match the type. Use the last send 1307*4153Sdv142724 * time to randomize the starting bucket to prevent starvation. 1308*4153Sdv142724 * Search all buckets from neg_index to MSG_MAX_QNUM, starting 1309*4153Sdv142724 * from the random starting point, and wrapping around after 1310*4153Sdv142724 * MSG_MAX_QNUM. 1311*4153Sdv142724 */ 1312*4153Sdv142724 1313*4153Sdv142724 nbuckets = MSG_MAX_QNUM - neg_index + 1; 1314*4153Sdv142724 check_index = neg_index + (qp->msg_stime % nbuckets); 1315*4153Sdv142724 1316*4153Sdv142724 for (count = nbuckets; count > 0; count--) { 1317*4153Sdv142724 qptr = list_head(&qp->msg_wait_snd_ngt[check_index]); 1318*4153Sdv142724 while (qptr) { 1319*4153Sdv142724 /* 1320*4153Sdv142724 * The lowest hash bucket may actually contain 1321*4153Sdv142724 * message types that are not valid for this 1322*4153Sdv142724 * request. This can happen due to the fact that 1323*4153Sdv142724 * the message buckets actually contain a consecutive 1324*4153Sdv142724 * range of types. 1325*4153Sdv142724 */ 1326*4153Sdv142724 if (-qptr->msgw_type >= type) { 1327*4153Sdv142724 return (qptr); 1328*4153Sdv142724 } 1329*4153Sdv142724 qptr = list_next(&qp->msg_wait_snd_ngt[msg_hash], qptr); 1330*4153Sdv142724 } 1331*4153Sdv142724 1332*4153Sdv142724 if (++check_index > MSG_MAX_QNUM) { 1333*4153Sdv142724 check_index = neg_index; 1334*4153Sdv142724 } 1335*4153Sdv142724 } 1336*4153Sdv142724 return (NULL); 1337*4153Sdv142724 } 1338*4153Sdv142724 1339*4153Sdv142724 static int 1340*4153Sdv142724 msg_rcvq_sleep(list_t *queue, msgq_wakeup_t *entry, kmutex_t **lock, 1341*4153Sdv142724 kmsqid_t *qp) 1342*4153Sdv142724 { 1343*4153Sdv142724 int cvres; 1344*4153Sdv142724 1345*4153Sdv142724 cv_init(&entry->msgw_wake_cv, NULL, 0, NULL); 1346*4153Sdv142724 1347*4153Sdv142724 list_insert_tail(queue, entry); 1348*4153Sdv142724 1349*4153Sdv142724 qp->msg_rcv_cnt++; 1350*4153Sdv142724 cvres = cv_wait_sig(&entry->msgw_wake_cv, *lock); 1351*4153Sdv142724 *lock = ipc_relock(msq_svc, qp->msg_perm.ipc_id, *lock); 1352*4153Sdv142724 qp->msg_rcv_cnt--; 1353*4153Sdv142724 /* 1354*4153Sdv142724 * We have woken up, so remove ourselves from the waiter list. 1355*4153Sdv142724 */ 1356*4153Sdv142724 if (!IPC_FREE(&qp->msg_perm)) { 1357*4153Sdv142724 list_remove(queue, entry); 1358*4153Sdv142724 } 1359*4153Sdv142724 1360*4153Sdv142724 return (cvres); 1361*4153Sdv142724 } 1362*4153Sdv142724 1363*4153Sdv142724 static void 1364*4153Sdv142724 msg_rcvq_wakeup_all(list_t *q_ptr) 1365*4153Sdv142724 { 1366*4153Sdv142724 msgq_wakeup_t *q_walk; 1367*4153Sdv142724 1368*4153Sdv142724 q_walk = (msgq_wakeup_t *)list_head(q_ptr); 1369*4153Sdv142724 while (q_walk) { 1370*4153Sdv142724 /* 1371*4153Sdv142724 * Walk the entire list, wake every process up. 1372*4153Sdv142724 */ 1373*4153Sdv142724 cv_signal(&q_walk->msgw_wake_cv); 1374*4153Sdv142724 q_walk = list_next(q_ptr, q_walk); 1375*4153Sdv142724 } 1376*4153Sdv142724 } 1377*4153Sdv142724 13780Sstevel@tonic-gate /* 13790Sstevel@tonic-gate * msgsys - System entry point for msgctl, msgget, msgrcv, and msgsnd 13800Sstevel@tonic-gate * system calls. 13810Sstevel@tonic-gate */ 13820Sstevel@tonic-gate static ssize_t 13830Sstevel@tonic-gate msgsys(int opcode, uintptr_t a1, uintptr_t a2, uintptr_t a3, 13840Sstevel@tonic-gate uintptr_t a4, uintptr_t a5) 13850Sstevel@tonic-gate { 13860Sstevel@tonic-gate ssize_t error; 13870Sstevel@tonic-gate 13880Sstevel@tonic-gate switch (opcode) { 13890Sstevel@tonic-gate case MSGGET: 13900Sstevel@tonic-gate error = msgget((key_t)a1, (int)a2); 13910Sstevel@tonic-gate break; 13920Sstevel@tonic-gate case MSGCTL: 13930Sstevel@tonic-gate error = msgctl((int)a1, (int)a2, (void *)a3); 13940Sstevel@tonic-gate break; 13950Sstevel@tonic-gate case MSGRCV: 13960Sstevel@tonic-gate error = msgrcv((int)a1, (struct ipcmsgbuf *)a2, 13970Sstevel@tonic-gate (size_t)a3, (long)a4, (int)a5); 13980Sstevel@tonic-gate break; 13990Sstevel@tonic-gate case MSGSND: 14000Sstevel@tonic-gate error = msgsnd((int)a1, (struct ipcmsgbuf *)a2, 14010Sstevel@tonic-gate (size_t)a3, (int)a4); 14020Sstevel@tonic-gate break; 14030Sstevel@tonic-gate case MSGIDS: 14040Sstevel@tonic-gate error = msgids((int *)a1, (uint_t)a2, (uint_t *)a3); 14050Sstevel@tonic-gate break; 14060Sstevel@tonic-gate case MSGSNAP: 14070Sstevel@tonic-gate error = msgsnap((int)a1, (caddr_t)a2, (size_t)a3, (long)a4); 14080Sstevel@tonic-gate break; 14090Sstevel@tonic-gate default: 14100Sstevel@tonic-gate error = set_errno(EINVAL); 14110Sstevel@tonic-gate break; 14120Sstevel@tonic-gate } 14130Sstevel@tonic-gate 14140Sstevel@tonic-gate return (error); 14150Sstevel@tonic-gate } 14160Sstevel@tonic-gate 14170Sstevel@tonic-gate #ifdef _SYSCALL32_IMPL 14180Sstevel@tonic-gate /* 14190Sstevel@tonic-gate * msgsys32 - System entry point for msgctl, msgget, msgrcv, and msgsnd 14200Sstevel@tonic-gate * system calls for 32-bit callers on LP64 kernel. 14210Sstevel@tonic-gate */ 14220Sstevel@tonic-gate static ssize32_t 14230Sstevel@tonic-gate msgsys32(int opcode, uint32_t a1, uint32_t a2, uint32_t a3, 14240Sstevel@tonic-gate uint32_t a4, uint32_t a5) 14250Sstevel@tonic-gate { 14260Sstevel@tonic-gate ssize_t error; 14270Sstevel@tonic-gate 14280Sstevel@tonic-gate switch (opcode) { 14290Sstevel@tonic-gate case MSGGET: 14300Sstevel@tonic-gate error = msgget((key_t)a1, (int)a2); 14310Sstevel@tonic-gate break; 14320Sstevel@tonic-gate case MSGCTL: 14330Sstevel@tonic-gate error = msgctl((int)a1, (int)a2, (void *)(uintptr_t)a3); 14340Sstevel@tonic-gate break; 14350Sstevel@tonic-gate case MSGRCV: 14360Sstevel@tonic-gate error = msgrcv((int)a1, (struct ipcmsgbuf *)(uintptr_t)a2, 14370Sstevel@tonic-gate (size_t)a3, (long)(int32_t)a4, (int)a5); 14380Sstevel@tonic-gate break; 14390Sstevel@tonic-gate case MSGSND: 14400Sstevel@tonic-gate error = msgsnd((int)a1, (struct ipcmsgbuf *)(uintptr_t)a2, 14410Sstevel@tonic-gate (size_t)(int32_t)a3, (int)a4); 14420Sstevel@tonic-gate break; 14430Sstevel@tonic-gate case MSGIDS: 14440Sstevel@tonic-gate error = msgids((int *)(uintptr_t)a1, (uint_t)a2, 14450Sstevel@tonic-gate (uint_t *)(uintptr_t)a3); 14460Sstevel@tonic-gate break; 14470Sstevel@tonic-gate case MSGSNAP: 14480Sstevel@tonic-gate error = msgsnap((int)a1, (caddr_t)(uintptr_t)a2, (size_t)a3, 14490Sstevel@tonic-gate (long)(int32_t)a4); 14500Sstevel@tonic-gate break; 14510Sstevel@tonic-gate default: 14520Sstevel@tonic-gate error = set_errno(EINVAL); 14530Sstevel@tonic-gate break; 14540Sstevel@tonic-gate } 14550Sstevel@tonic-gate 14560Sstevel@tonic-gate return (error); 14570Sstevel@tonic-gate } 14580Sstevel@tonic-gate #endif /* SYSCALL32_IMPL */ 1459