1 /* $FreeBSD: src/sys/kern/sysv_shm.c,v 1.45.2.6 2002/10/22 20:45:03 fjoe Exp $ */ 2 /* $NetBSD: sysv_shm.c,v 1.23 1994/07/04 23:25:12 glass Exp $ */ 3 4 /* 5 * Copyright (c) 1994 Adam Glass and Charles Hannum. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Adam Glass and Charles 18 * Hannum. 19 * 4. The names of the authors may not be used to endorse or promote products 20 * derived from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 25 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "opt_compat.h" 35 #include "opt_sysvipc.h" 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/sysproto.h> 40 #include <sys/kernel.h> 41 #include <sys/sysctl.h> 42 #include <sys/shm.h> 43 #include <sys/proc.h> 44 #include <sys/malloc.h> 45 #include <sys/mman.h> 46 #include <sys/stat.h> 47 #include <sys/sysent.h> 48 #include <sys/jail.h> 49 50 #include <sys/mplock2.h> 51 52 #include <vm/vm.h> 53 #include <vm/vm_param.h> 54 #include <sys/lock.h> 55 #include <vm/pmap.h> 56 #include <vm/vm_object.h> 57 #include <vm/vm_map.h> 58 #include <vm/vm_page.h> 59 #include <vm/vm_pager.h> 60 61 static MALLOC_DEFINE(M_SHM, "shm", "SVID compatible shared memory segments"); 62 63 struct oshmctl_args; 64 static int sys_oshmctl (struct proc *p, struct oshmctl_args *uap); 65 66 static int shmget_allocate_segment (struct proc *p, struct shmget_args *uap, int mode); 67 static int shmget_existing (struct proc *p, struct shmget_args *uap, int mode, int segnum); 68 69 /* XXX casting to (sy_call_t *) is bogus, as usual. */ 70 static sy_call_t *shmcalls[] = { 71 (sy_call_t *)sys_shmat, (sy_call_t *)sys_oshmctl, 72 (sy_call_t *)sys_shmdt, (sy_call_t *)sys_shmget, 73 (sy_call_t *)sys_shmctl 74 }; 75 76 #define SHMSEG_FREE 0x0200 77 #define SHMSEG_REMOVED 0x0400 78 #define SHMSEG_ALLOCATED 0x0800 79 #define SHMSEG_WANTED 0x1000 80 81 static int shm_last_free, shm_committed, shmalloced; 82 int shm_nused; 83 static struct shmid_ds *shmsegs; 84 85 struct shm_handle { 86 /* vm_offset_t kva; */ 87 vm_object_t shm_object; 88 }; 89 90 struct shmmap_state { 91 vm_offset_t va; 92 int shmid; 93 }; 94 95 static void shm_deallocate_segment (struct shmid_ds *); 96 static int shm_find_segment_by_key (key_t); 97 static struct shmid_ds *shm_find_segment_by_shmid (int); 98 static int shm_delete_mapping (struct vmspace *vm, struct shmmap_state *); 99 static void shmrealloc (void); 100 static void shminit (void *); 101 102 /* 103 * Tuneable values 104 */ 105 #ifndef SHMMIN 106 #define SHMMIN 1 107 #endif 108 #ifndef SHMMNI 109 #define SHMMNI 512 110 #endif 111 #ifndef SHMSEG 112 #define SHMSEG 1024 113 #endif 114 115 struct shminfo shminfo = { 116 0, 117 SHMMIN, 118 SHMMNI, 119 SHMSEG, 120 0 121 }; 122 123 static int shm_use_phys; 124 125 TUNABLE_LONG("kern.ipc.shmmin", &shminfo.shmmin); 126 TUNABLE_LONG("kern.ipc.shmmni", &shminfo.shmmni); 127 TUNABLE_LONG("kern.ipc.shmseg", &shminfo.shmseg); 128 TUNABLE_LONG("kern.ipc.shmmaxpgs", &shminfo.shmall); 129 TUNABLE_INT("kern.ipc.shm_use_phys", &shm_use_phys); 130 131 SYSCTL_LONG(_kern_ipc, OID_AUTO, shmmax, CTLFLAG_RW, &shminfo.shmmax, 0, 132 "Max shared memory segment size"); 133 SYSCTL_LONG(_kern_ipc, OID_AUTO, shmmin, CTLFLAG_RW, &shminfo.shmmin, 0, 134 "Min shared memory segment size"); 135 SYSCTL_LONG(_kern_ipc, OID_AUTO, shmmni, CTLFLAG_RD, &shminfo.shmmni, 0, 136 "Max number of shared memory identifiers"); 137 SYSCTL_LONG(_kern_ipc, OID_AUTO, shmseg, CTLFLAG_RW, &shminfo.shmseg, 0, 138 "Max shared memory segments per process"); 139 SYSCTL_LONG(_kern_ipc, OID_AUTO, shmall, CTLFLAG_RW, &shminfo.shmall, 0, 140 "Max pages of shared memory"); 141 SYSCTL_INT(_kern_ipc, OID_AUTO, shm_use_phys, CTLFLAG_RW, &shm_use_phys, 0, 142 "Use phys pager allocation instead of swap pager allocation"); 143 144 static int 145 shm_find_segment_by_key(key_t key) 146 { 147 int i; 148 149 for (i = 0; i < shmalloced; i++) { 150 if ((shmsegs[i].shm_perm.mode & SHMSEG_ALLOCATED) && 151 shmsegs[i].shm_perm.key == key) 152 return i; 153 } 154 return -1; 155 } 156 157 static struct shmid_ds * 158 shm_find_segment_by_shmid(int shmid) 159 { 160 int segnum; 161 struct shmid_ds *shmseg; 162 163 segnum = IPCID_TO_IX(shmid); 164 if (segnum < 0 || segnum >= shmalloced) 165 return NULL; 166 shmseg = &shmsegs[segnum]; 167 if ((shmseg->shm_perm.mode & (SHMSEG_ALLOCATED | SHMSEG_REMOVED)) 168 != SHMSEG_ALLOCATED || 169 shmseg->shm_perm.seq != IPCID_TO_SEQ(shmid)) { 170 return NULL; 171 } 172 return shmseg; 173 } 174 175 static void 176 shm_deallocate_segment(struct shmid_ds *shmseg) 177 { 178 struct shm_handle *shm_handle; 179 size_t size; 180 181 shm_handle = shmseg->shm_internal; 182 vm_object_deallocate(shm_handle->shm_object); 183 kfree((caddr_t)shm_handle, M_SHM); 184 shmseg->shm_internal = NULL; 185 size = round_page(shmseg->shm_segsz); 186 shm_committed -= btoc(size); 187 shm_nused--; 188 shmseg->shm_perm.mode = SHMSEG_FREE; 189 } 190 191 static int 192 shm_delete_mapping(struct vmspace *vm, struct shmmap_state *shmmap_s) 193 { 194 struct shmid_ds *shmseg; 195 int segnum, result; 196 size_t size; 197 198 segnum = IPCID_TO_IX(shmmap_s->shmid); 199 shmseg = &shmsegs[segnum]; 200 size = round_page(shmseg->shm_segsz); 201 result = vm_map_remove(&vm->vm_map, shmmap_s->va, shmmap_s->va + size); 202 if (result != KERN_SUCCESS) 203 return EINVAL; 204 shmmap_s->shmid = -1; 205 shmseg->shm_dtime = time_second; 206 if ((--shmseg->shm_nattch <= 0) && 207 (shmseg->shm_perm.mode & SHMSEG_REMOVED)) { 208 shm_deallocate_segment(shmseg); 209 shm_last_free = segnum; 210 } 211 return 0; 212 } 213 214 /* 215 * MPALMOSTSAFE 216 */ 217 int 218 sys_shmdt(struct shmdt_args *uap) 219 { 220 struct thread *td = curthread; 221 struct proc *p = td->td_proc; 222 struct shmmap_state *shmmap_s; 223 long i; 224 int error; 225 226 if (!jail_sysvipc_allowed && td->td_ucred->cr_prison != NULL) 227 return (ENOSYS); 228 229 get_mplock(); 230 shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm; 231 if (shmmap_s == NULL) { 232 error = EINVAL; 233 goto done; 234 } 235 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) { 236 if (shmmap_s->shmid != -1 && 237 shmmap_s->va == (vm_offset_t)uap->shmaddr) 238 break; 239 } 240 if (i == shminfo.shmseg) 241 error = EINVAL; 242 else 243 error = shm_delete_mapping(p->p_vmspace, shmmap_s); 244 done: 245 rel_mplock(); 246 return (error); 247 } 248 249 /* 250 * MPALMOSTSAFE 251 */ 252 int 253 sys_shmat(struct shmat_args *uap) 254 { 255 struct thread *td = curthread; 256 struct proc *p = td->td_proc; 257 int error, flags; 258 long i; 259 struct shmid_ds *shmseg; 260 struct shmmap_state *shmmap_s = NULL; 261 struct shm_handle *shm_handle; 262 vm_offset_t attach_va; 263 vm_prot_t prot; 264 vm_size_t size; 265 int rv; 266 267 if (!jail_sysvipc_allowed && td->td_ucred->cr_prison != NULL) 268 return (ENOSYS); 269 270 get_mplock(); 271 again: 272 shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm; 273 if (shmmap_s == NULL) { 274 size = shminfo.shmseg * sizeof(struct shmmap_state); 275 shmmap_s = kmalloc(size, M_SHM, M_WAITOK); 276 for (i = 0; i < shminfo.shmseg; i++) 277 shmmap_s[i].shmid = -1; 278 if (p->p_vmspace->vm_shm != NULL) { 279 kfree(shmmap_s, M_SHM); 280 goto again; 281 } 282 p->p_vmspace->vm_shm = (caddr_t)shmmap_s; 283 } 284 shmseg = shm_find_segment_by_shmid(uap->shmid); 285 if (shmseg == NULL) { 286 error = EINVAL; 287 goto done; 288 } 289 error = ipcperm(p, &shmseg->shm_perm, 290 (uap->shmflg & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W); 291 if (error) 292 goto done; 293 for (i = 0; i < shminfo.shmseg; i++) { 294 if (shmmap_s->shmid == -1) 295 break; 296 shmmap_s++; 297 } 298 if (i >= shminfo.shmseg) { 299 error = EMFILE; 300 goto done; 301 } 302 size = round_page(shmseg->shm_segsz); 303 #ifdef VM_PROT_READ_IS_EXEC 304 prot = VM_PROT_READ | VM_PROT_EXECUTE; 305 #else 306 prot = VM_PROT_READ; 307 #endif 308 if ((uap->shmflg & SHM_RDONLY) == 0) 309 prot |= VM_PROT_WRITE; 310 flags = MAP_ANON | MAP_SHARED; 311 if (uap->shmaddr) { 312 flags |= MAP_FIXED; 313 if (uap->shmflg & SHM_RND) { 314 attach_va = (vm_offset_t)uap->shmaddr & ~(SHMLBA-1); 315 } else if (((vm_offset_t)uap->shmaddr & (SHMLBA-1)) == 0) { 316 attach_va = (vm_offset_t)uap->shmaddr; 317 } else { 318 error = EINVAL; 319 goto done; 320 } 321 } else { 322 /* 323 * This is just a hint to vm_map_find() about where to put it. 324 */ 325 attach_va = round_page((vm_offset_t)p->p_vmspace->vm_taddr + maxtsiz + maxdsiz); 326 } 327 328 shm_handle = shmseg->shm_internal; 329 vm_object_hold(shm_handle->shm_object); 330 vm_object_reference_locked(shm_handle->shm_object); 331 rv = vm_map_find(&p->p_vmspace->vm_map, 332 shm_handle->shm_object, 0, 333 &attach_va, 334 size, PAGE_SIZE, 335 ((flags & MAP_FIXED) ? 0 : 1), 336 VM_MAPTYPE_NORMAL, 337 prot, prot, 338 0); 339 vm_object_drop(shm_handle->shm_object); 340 if (rv != KERN_SUCCESS) { 341 vm_object_deallocate(shm_handle->shm_object); 342 error = ENOMEM; 343 goto done; 344 } 345 vm_map_inherit(&p->p_vmspace->vm_map, 346 attach_va, attach_va + size, VM_INHERIT_SHARE); 347 348 KKASSERT(shmmap_s->shmid == -1); 349 shmmap_s->va = attach_va; 350 shmmap_s->shmid = uap->shmid; 351 shmseg->shm_lpid = p->p_pid; 352 shmseg->shm_atime = time_second; 353 shmseg->shm_nattch++; 354 uap->sysmsg_resultp = (void *)attach_va; 355 error = 0; 356 done: 357 rel_mplock(); 358 return error; 359 } 360 361 struct oshmid_ds { 362 struct ipc_perm shm_perm; /* operation perms */ 363 int shm_segsz; /* size of segment (bytes) */ 364 ushort shm_cpid; /* pid, creator */ 365 ushort shm_lpid; /* pid, last operation */ 366 short shm_nattch; /* no. of current attaches */ 367 time_t shm_atime; /* last attach time */ 368 time_t shm_dtime; /* last detach time */ 369 time_t shm_ctime; /* last change time */ 370 void *shm_handle; /* internal handle for shm segment */ 371 }; 372 373 struct oshmctl_args { 374 struct sysmsg sysmsg; 375 int shmid; 376 int cmd; 377 struct oshmid_ds *ubuf; 378 }; 379 380 /* 381 * MPALMOSTSAFE 382 */ 383 static int 384 sys_oshmctl(struct proc *p, struct oshmctl_args *uap) 385 { 386 #ifdef COMPAT_43 387 struct thread *td = curthread; 388 struct shmid_ds *shmseg; 389 struct oshmid_ds outbuf; 390 int error; 391 392 if (!jail_sysvipc_allowed && td->td_ucred->cr_prison != NULL) 393 return (ENOSYS); 394 395 get_mplock(); 396 shmseg = shm_find_segment_by_shmid(uap->shmid); 397 if (shmseg == NULL) { 398 error = EINVAL; 399 goto done; 400 } 401 402 switch (uap->cmd) { 403 case IPC_STAT: 404 error = ipcperm(p, &shmseg->shm_perm, IPC_R); 405 if (error) 406 break; 407 outbuf.shm_perm = shmseg->shm_perm; 408 outbuf.shm_segsz = shmseg->shm_segsz; 409 outbuf.shm_cpid = shmseg->shm_cpid; 410 outbuf.shm_lpid = shmseg->shm_lpid; 411 outbuf.shm_nattch = shmseg->shm_nattch; 412 outbuf.shm_atime = shmseg->shm_atime; 413 outbuf.shm_dtime = shmseg->shm_dtime; 414 outbuf.shm_ctime = shmseg->shm_ctime; 415 outbuf.shm_handle = shmseg->shm_internal; 416 error = copyout((caddr_t)&outbuf, uap->ubuf, sizeof(outbuf)); 417 break; 418 default: 419 /* XXX casting to (sy_call_t *) is bogus, as usual. */ 420 error = sys_shmctl((struct shmctl_args *)uap); 421 } 422 done: 423 rel_mplock(); 424 return error; 425 #else 426 return EINVAL; 427 #endif 428 } 429 430 /* 431 * MPALMOSTSAFE 432 */ 433 int 434 sys_shmctl(struct shmctl_args *uap) 435 { 436 struct thread *td = curthread; 437 struct proc *p = td->td_proc; 438 int error; 439 struct shmid_ds inbuf; 440 struct shmid_ds *shmseg; 441 442 if (!jail_sysvipc_allowed && td->td_ucred->cr_prison != NULL) 443 return (ENOSYS); 444 445 get_mplock(); 446 shmseg = shm_find_segment_by_shmid(uap->shmid); 447 if (shmseg == NULL) { 448 error = EINVAL; 449 goto done; 450 } 451 452 switch (uap->cmd) { 453 case IPC_STAT: 454 error = ipcperm(p, &shmseg->shm_perm, IPC_R); 455 if (error == 0) 456 error = copyout(shmseg, uap->buf, sizeof(inbuf)); 457 break; 458 case IPC_SET: 459 error = ipcperm(p, &shmseg->shm_perm, IPC_M); 460 if (error == 0) 461 error = copyin(uap->buf, &inbuf, sizeof(inbuf)); 462 if (error == 0) { 463 shmseg->shm_perm.uid = inbuf.shm_perm.uid; 464 shmseg->shm_perm.gid = inbuf.shm_perm.gid; 465 shmseg->shm_perm.mode = 466 (shmseg->shm_perm.mode & ~ACCESSPERMS) | 467 (inbuf.shm_perm.mode & ACCESSPERMS); 468 shmseg->shm_ctime = time_second; 469 } 470 break; 471 case IPC_RMID: 472 error = ipcperm(p, &shmseg->shm_perm, IPC_M); 473 if (error == 0) { 474 shmseg->shm_perm.key = IPC_PRIVATE; 475 shmseg->shm_perm.mode |= SHMSEG_REMOVED; 476 if (shmseg->shm_nattch <= 0) { 477 shm_deallocate_segment(shmseg); 478 shm_last_free = IPCID_TO_IX(uap->shmid); 479 } 480 } 481 break; 482 #if 0 483 case SHM_LOCK: 484 case SHM_UNLOCK: 485 #endif 486 default: 487 error = EINVAL; 488 break; 489 } 490 done: 491 rel_mplock(); 492 return error; 493 } 494 495 static int 496 shmget_existing(struct proc *p, struct shmget_args *uap, int mode, int segnum) 497 { 498 struct shmid_ds *shmseg; 499 int error; 500 501 shmseg = &shmsegs[segnum]; 502 if (shmseg->shm_perm.mode & SHMSEG_REMOVED) { 503 /* 504 * This segment is in the process of being allocated. Wait 505 * until it's done, and look the key up again (in case the 506 * allocation failed or it was freed). 507 */ 508 shmseg->shm_perm.mode |= SHMSEG_WANTED; 509 error = tsleep((caddr_t)shmseg, PCATCH, "shmget", 0); 510 if (error) 511 return error; 512 return EAGAIN; 513 } 514 if ((uap->shmflg & (IPC_CREAT | IPC_EXCL)) == (IPC_CREAT | IPC_EXCL)) 515 return EEXIST; 516 error = ipcperm(p, &shmseg->shm_perm, mode); 517 if (error) 518 return error; 519 if (uap->size && uap->size > shmseg->shm_segsz) 520 return EINVAL; 521 uap->sysmsg_result = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm); 522 return 0; 523 } 524 525 static int 526 shmget_allocate_segment(struct proc *p, struct shmget_args *uap, int mode) 527 { 528 int i, segnum, shmid; 529 size_t size; 530 struct ucred *cred = p->p_ucred; 531 struct shmid_ds *shmseg; 532 struct shm_handle *shm_handle; 533 534 if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax) 535 return EINVAL; 536 if (shm_nused >= shminfo.shmmni) /* any shmids left? */ 537 return ENOSPC; 538 size = round_page(uap->size); 539 if (shm_committed + btoc(size) > shminfo.shmall) 540 return ENOMEM; 541 if (shm_last_free < 0) { 542 shmrealloc(); /* maybe expand the shmsegs[] array */ 543 for (i = 0; i < shmalloced; i++) { 544 if (shmsegs[i].shm_perm.mode & SHMSEG_FREE) 545 break; 546 } 547 if (i == shmalloced) 548 return ENOSPC; 549 segnum = i; 550 } else { 551 segnum = shm_last_free; 552 shm_last_free = -1; 553 } 554 shmseg = &shmsegs[segnum]; 555 /* 556 * In case we sleep in malloc(), mark the segment present but deleted 557 * so that noone else tries to create the same key. 558 */ 559 shmseg->shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED; 560 shmseg->shm_perm.key = uap->key; 561 shmseg->shm_perm.seq = (shmseg->shm_perm.seq + 1) & 0x7fff; 562 shm_handle = kmalloc(sizeof(struct shm_handle), M_SHM, M_WAITOK); 563 shmid = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm); 564 565 /* 566 * We make sure that we have allocated a pager before we need 567 * to. 568 */ 569 if (shm_use_phys) { 570 shm_handle->shm_object = 571 phys_pager_alloc(NULL, size, VM_PROT_DEFAULT, 0); 572 } else { 573 shm_handle->shm_object = 574 swap_pager_alloc(NULL, size, VM_PROT_DEFAULT, 0); 575 } 576 vm_object_clear_flag(shm_handle->shm_object, OBJ_ONEMAPPING); 577 vm_object_set_flag(shm_handle->shm_object, OBJ_NOSPLIT); 578 579 shmseg->shm_internal = shm_handle; 580 shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid; 581 shmseg->shm_perm.cgid = shmseg->shm_perm.gid = cred->cr_gid; 582 shmseg->shm_perm.mode = (shmseg->shm_perm.mode & SHMSEG_WANTED) | 583 (mode & ACCESSPERMS) | SHMSEG_ALLOCATED; 584 shmseg->shm_segsz = uap->size; 585 shmseg->shm_cpid = p->p_pid; 586 shmseg->shm_lpid = shmseg->shm_nattch = 0; 587 shmseg->shm_atime = shmseg->shm_dtime = 0; 588 shmseg->shm_ctime = time_second; 589 shm_committed += btoc(size); 590 shm_nused++; 591 if (shmseg->shm_perm.mode & SHMSEG_WANTED) { 592 /* 593 * Somebody else wanted this key while we were asleep. Wake 594 * them up now. 595 */ 596 shmseg->shm_perm.mode &= ~SHMSEG_WANTED; 597 wakeup((caddr_t)shmseg); 598 } 599 uap->sysmsg_result = shmid; 600 return 0; 601 } 602 603 /* 604 * MPALMOSTSAFE 605 */ 606 int 607 sys_shmget(struct shmget_args *uap) 608 { 609 struct thread *td = curthread; 610 struct proc *p = td->td_proc; 611 int segnum, mode, error; 612 613 if (!jail_sysvipc_allowed && td->td_ucred->cr_prison != NULL) 614 return (ENOSYS); 615 616 mode = uap->shmflg & ACCESSPERMS; 617 get_mplock(); 618 619 if (uap->key != IPC_PRIVATE) { 620 again: 621 segnum = shm_find_segment_by_key(uap->key); 622 if (segnum >= 0) { 623 error = shmget_existing(p, uap, mode, segnum); 624 if (error == EAGAIN) 625 goto again; 626 goto done; 627 } 628 if ((uap->shmflg & IPC_CREAT) == 0) { 629 error = ENOENT; 630 goto done; 631 } 632 } 633 error = shmget_allocate_segment(p, uap, mode); 634 done: 635 rel_mplock(); 636 return (error); 637 } 638 639 /* 640 * shmsys_args(int which, int a2, ...) (VARARGS) 641 * 642 * MPALMOSTSAFE 643 */ 644 int 645 sys_shmsys(struct shmsys_args *uap) 646 { 647 struct thread *td = curthread; 648 unsigned int which = (unsigned int)uap->which; 649 int error; 650 651 if (!jail_sysvipc_allowed && td->td_ucred->cr_prison != NULL) 652 return (ENOSYS); 653 654 if (which >= NELEM(shmcalls)) 655 return EINVAL; 656 get_mplock(); 657 bcopy(&uap->a2, &uap->which, 658 sizeof(struct shmsys_args) - offsetof(struct shmsys_args, a2)); 659 error = ((*shmcalls[which])(uap)); 660 rel_mplock(); 661 662 return(error); 663 } 664 665 void 666 shmfork(struct proc *p1, struct proc *p2) 667 { 668 struct shmmap_state *shmmap_s; 669 size_t size; 670 int i; 671 672 size = shminfo.shmseg * sizeof(struct shmmap_state); 673 shmmap_s = kmalloc(size, M_SHM, M_WAITOK); 674 bcopy((caddr_t)p1->p_vmspace->vm_shm, (caddr_t)shmmap_s, size); 675 p2->p_vmspace->vm_shm = (caddr_t)shmmap_s; 676 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) 677 if (shmmap_s->shmid != -1) 678 shmsegs[IPCID_TO_IX(shmmap_s->shmid)].shm_nattch++; 679 } 680 681 void 682 shmexit(struct vmspace *vm) 683 { 684 struct shmmap_state *base, *shm; 685 int i; 686 687 if ((base = (struct shmmap_state *)vm->vm_shm) != NULL) { 688 vm->vm_shm = NULL; 689 for (i = 0, shm = base; i < shminfo.shmseg; i++, shm++) { 690 if (shm->shmid != -1) 691 shm_delete_mapping(vm, shm); 692 } 693 kfree(base, M_SHM); 694 } 695 } 696 697 static void 698 shmrealloc(void) 699 { 700 int i; 701 struct shmid_ds *newsegs; 702 703 if (shmalloced >= shminfo.shmmni) 704 return; 705 706 newsegs = kmalloc(shminfo.shmmni * sizeof(*newsegs), M_SHM, M_WAITOK); 707 for (i = 0; i < shmalloced; i++) 708 bcopy(&shmsegs[i], &newsegs[i], sizeof(newsegs[0])); 709 for (; i < shminfo.shmmni; i++) { 710 shmsegs[i].shm_perm.mode = SHMSEG_FREE; 711 shmsegs[i].shm_perm.seq = 0; 712 } 713 kfree(shmsegs, M_SHM); 714 shmsegs = newsegs; 715 shmalloced = shminfo.shmmni; 716 } 717 718 static void 719 shminit(void *dummy) 720 { 721 int i; 722 723 /* 724 * If not overridden by a tunable set the maximum shm to 725 * 2/3 of main memory. 726 */ 727 if (shminfo.shmall == 0) 728 shminfo.shmall = (size_t)vmstats.v_page_count * 2 / 3; 729 730 shminfo.shmmax = shminfo.shmall * PAGE_SIZE; 731 shmalloced = shminfo.shmmni; 732 shmsegs = kmalloc(shmalloced * sizeof(shmsegs[0]), M_SHM, M_WAITOK); 733 for (i = 0; i < shmalloced; i++) { 734 shmsegs[i].shm_perm.mode = SHMSEG_FREE; 735 shmsegs[i].shm_perm.seq = 0; 736 } 737 shm_last_free = 0; 738 shm_nused = 0; 739 shm_committed = 0; 740 } 741 SYSINIT(sysv_shm, SI_SUB_SYSV_SHM, SI_ORDER_FIRST, shminit, NULL); 742