xref: /netbsd-src/sys/kern/sysv_shm.c (revision 0ec00ddcf529f3e4a0181fa6dd641b3b9a9e5ccc)
1 /*	$NetBSD: sysv_shm.c,v 1.142 2024/03/02 08:59:47 mlelstv Exp $	*/
2 
3 /*-
4  * Copyright (c) 1999, 2007 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9  * NASA Ames Research Center, and by Mindaugas Rasiukevicius.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /*
34  * Copyright (c) 1994 Adam Glass and Charles M. Hannum.  All rights reserved.
35  *
36  * Redistribution and use in source and binary forms, with or without
37  * modification, are permitted provided that the following conditions
38  * are met:
39  * 1. Redistributions of source code must retain the above copyright
40  *    notice, this list of conditions and the following disclaimer.
41  * 2. Redistributions in binary form must reproduce the above copyright
42  *    notice, this list of conditions and the following disclaimer in the
43  *    documentation and/or other materials provided with the distribution.
44  * 3. All advertising materials mentioning features or use of this software
45  *    must display the following acknowledgement:
46  *	This product includes software developed by Adam Glass and Charles M.
47  *	Hannum.
48  * 4. The names of the authors may not be used to endorse or promote products
49  *    derived from this software without specific prior written permission.
50  *
51  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
52  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
53  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
54  * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
55  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
56  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
60  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61  */
62 
63 #include <sys/cdefs.h>
64 __KERNEL_RCSID(0, "$NetBSD: sysv_shm.c,v 1.142 2024/03/02 08:59:47 mlelstv Exp $");
65 
66 #ifdef _KERNEL_OPT
67 #include "opt_sysv.h"
68 #endif
69 
70 #include <sys/param.h>
71 #include <sys/kernel.h>
72 #include <sys/kmem.h>
73 #include <sys/shm.h>
74 #include <sys/mutex.h>
75 #include <sys/mman.h>
76 #include <sys/stat.h>
77 #include <sys/sysctl.h>
78 #include <sys/mount.h>		/* XXX for <sys/syscallargs.h> */
79 #include <sys/syscallargs.h>
80 #include <sys/queue.h>
81 #include <sys/kauth.h>
82 
83 #include <uvm/uvm_extern.h>
84 #include <uvm/uvm_object.h>
85 
86 struct shmmap_entry {
87 	SLIST_ENTRY(shmmap_entry) next;
88 	vaddr_t va;
89 	int shmid;
90 };
91 
92 int			shm_nused		__cacheline_aligned;
93 struct shmid_ds *	shmsegs			__read_mostly;
94 
95 static kmutex_t		shm_lock		__cacheline_aligned;
96 static kcondvar_t *	shm_cv			__cacheline_aligned;
97 static int		shm_last_free		__cacheline_aligned;
98 static size_t		shm_committed		__cacheline_aligned;
99 static int		shm_use_phys		__read_mostly;
100 
101 static kcondvar_t	shm_realloc_cv;
102 static bool		shm_realloc_state;
103 static u_int		shm_realloc_disable;
104 
105 struct shmmap_state {
106 	unsigned int nitems;
107 	unsigned int nrefs;
108 	SLIST_HEAD(, shmmap_entry) entries;
109 };
110 
111 extern int kern_has_sysvshm;
112 
113 SYSCTL_SETUP_PROTO(sysctl_ipc_shm_setup);
114 
115 #ifdef SHMDEBUG
116 #define SHMPRINTF(a) printf a
117 #else
118 #define SHMPRINTF(a)
119 #endif
120 
121 static int shmrealloc(int);
122 
123 /*
124  * Find the shared memory segment permission by the index. Only used by
125  * compat_linux to implement SHM_STAT.
126  */
127 int
shm_find_segment_perm_by_index(int index,struct ipc_perm * perm)128 shm_find_segment_perm_by_index(int index, struct ipc_perm *perm)
129 {
130 	struct shmid_ds *shmseg;
131 
132 	mutex_enter(&shm_lock);
133 	if (index < 0 || index >= shminfo.shmmni) {
134 		mutex_exit(&shm_lock);
135 		return EINVAL;
136 	}
137 	shmseg = &shmsegs[index];
138 	memcpy(perm, &shmseg->shm_perm, sizeof(*perm));
139 	mutex_exit(&shm_lock);
140 	return 0;
141 }
142 
143 /*
144  * Find the shared memory segment by the identifier.
145  *  => must be called with shm_lock held;
146  */
147 static struct shmid_ds *
shm_find_segment_by_shmid(int shmid)148 shm_find_segment_by_shmid(int shmid)
149 {
150 	int segnum;
151 	struct shmid_ds *shmseg;
152 
153 	KASSERT(mutex_owned(&shm_lock));
154 
155 	segnum = IPCID_TO_IX(shmid);
156 	if (segnum < 0 || segnum >= shminfo.shmmni)
157 		return NULL;
158 	shmseg = &shmsegs[segnum];
159 	if ((shmseg->shm_perm.mode & SHMSEG_ALLOCATED) == 0)
160 		return NULL;
161 	if ((shmseg->shm_perm.mode &
162 	    (SHMSEG_REMOVED|SHMSEG_RMLINGER)) == SHMSEG_REMOVED)
163 		return NULL;
164 	if (shmseg->shm_perm._seq != IPCID_TO_SEQ(shmid))
165 		return NULL;
166 
167 	return shmseg;
168 }
169 
170 /*
171  * Free memory segment.
172  *  => must be called with shm_lock held;
173  */
174 static void
shm_free_segment(int segnum)175 shm_free_segment(int segnum)
176 {
177 	struct shmid_ds *shmseg;
178 	size_t size;
179 	bool wanted;
180 
181 	KASSERT(mutex_owned(&shm_lock));
182 
183 	shmseg = &shmsegs[segnum];
184 	SHMPRINTF(("shm freeing key 0x%lx seq 0x%x\n",
185 	    shmseg->shm_perm._key, shmseg->shm_perm._seq));
186 
187 	size = (shmseg->shm_segsz + PGOFSET) & ~PGOFSET;
188 	wanted = (shmseg->shm_perm.mode & SHMSEG_WANTED);
189 
190 	shmseg->_shm_internal = NULL;
191 	shm_committed -= btoc(size);
192 	shm_nused--;
193 	shmseg->shm_perm.mode = SHMSEG_FREE;
194 	shm_last_free = segnum;
195 	if (wanted == true)
196 		cv_broadcast(&shm_cv[segnum]);
197 }
198 
199 /*
200  * Delete entry from the shm map.
201  *  => must be called with shm_lock held;
202  */
203 static struct uvm_object *
shm_delete_mapping(struct shmmap_state * shmmap_s,struct shmmap_entry * shmmap_se)204 shm_delete_mapping(struct shmmap_state *shmmap_s,
205     struct shmmap_entry *shmmap_se)
206 {
207 	struct uvm_object *uobj = NULL;
208 	struct shmid_ds *shmseg;
209 	int segnum;
210 
211 	KASSERT(mutex_owned(&shm_lock));
212 
213 	segnum = IPCID_TO_IX(shmmap_se->shmid);
214 	shmseg = &shmsegs[segnum];
215 	SLIST_REMOVE(&shmmap_s->entries, shmmap_se, shmmap_entry, next);
216 	shmmap_s->nitems--;
217 	shmseg->shm_dtime = time_second;
218 	if ((--shmseg->shm_nattch <= 0) &&
219 	    (shmseg->shm_perm.mode & SHMSEG_REMOVED)) {
220 		uobj = shmseg->_shm_internal;
221 		shm_free_segment(segnum);
222 	}
223 
224 	return uobj;
225 }
226 
227 /*
228  * Get a non-shared shm map for that vmspace.  Note, that memory
229  * allocation might be performed with lock held.
230  */
231 static struct shmmap_state *
shmmap_getprivate(struct proc * p)232 shmmap_getprivate(struct proc *p)
233 {
234 	struct shmmap_state *oshmmap_s, *shmmap_s;
235 	struct shmmap_entry *oshmmap_se, *shmmap_se;
236 
237 	KASSERT(mutex_owned(&shm_lock));
238 
239 	/* 1. A shm map with refcnt = 1, used by ourselves, thus return */
240 	oshmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
241 	if (oshmmap_s && oshmmap_s->nrefs == 1)
242 		return oshmmap_s;
243 
244 	/* 2. No shm map preset - create a fresh one */
245 	shmmap_s = kmem_zalloc(sizeof(struct shmmap_state), KM_SLEEP);
246 	shmmap_s->nrefs = 1;
247 	SLIST_INIT(&shmmap_s->entries);
248 	p->p_vmspace->vm_shm = (void *)shmmap_s;
249 
250 	if (oshmmap_s == NULL)
251 		return shmmap_s;
252 
253 	SHMPRINTF(("shmmap_getprivate: vm %p split (%d entries), was used by %d\n",
254 	    p->p_vmspace, oshmmap_s->nitems, oshmmap_s->nrefs));
255 
256 	/* 3. A shared shm map, copy to a fresh one and adjust refcounts */
257 	SLIST_FOREACH(oshmmap_se, &oshmmap_s->entries, next) {
258 		shmmap_se = kmem_alloc(sizeof(struct shmmap_entry), KM_SLEEP);
259 		shmmap_se->va = oshmmap_se->va;
260 		shmmap_se->shmid = oshmmap_se->shmid;
261 		SLIST_INSERT_HEAD(&shmmap_s->entries, shmmap_se, next);
262 	}
263 	shmmap_s->nitems = oshmmap_s->nitems;
264 	oshmmap_s->nrefs--;
265 
266 	return shmmap_s;
267 }
268 
269 /*
270  * Lock/unlock the memory.
271  *  => must be called with shm_lock held;
272  */
273 static int
shm_memlock(struct shmid_ds * shmseg,int shmid,int cmd)274 shm_memlock(struct shmid_ds *shmseg, int shmid, int cmd)
275 {
276 	size_t size;
277 	int error;
278 
279 	KASSERT(mutex_owned(&shm_lock));
280 
281 	size = round_page(shmseg->shm_segsz);
282 
283 	if (cmd == SHM_LOCK && (shmseg->shm_perm.mode & SHMSEG_WIRED) == 0) {
284 		/* Wire the object and map, then tag it */
285 		error = uvm_obj_wirepages(shmseg->_shm_internal,
286 		    0, size, NULL);
287 		if (error)
288 			return EIO;
289 		shmseg->shm_perm.mode |= SHMSEG_WIRED;
290 
291 	} else if (cmd == SHM_UNLOCK &&
292 	    (shmseg->shm_perm.mode & SHMSEG_WIRED) != 0) {
293 		/* Unwire the object, then untag it */
294 		uvm_obj_unwirepages(shmseg->_shm_internal, 0, size);
295 		shmseg->shm_perm.mode &= ~SHMSEG_WIRED;
296 	}
297 
298 	return 0;
299 }
300 
301 /*
302  * Unmap shared memory.
303  */
304 int
sys_shmdt(struct lwp * l,const struct sys_shmdt_args * uap,register_t * retval)305 sys_shmdt(struct lwp *l, const struct sys_shmdt_args *uap, register_t *retval)
306 {
307 	/* {
308 		syscallarg(const void *) shmaddr;
309 	} */
310 	struct proc *p = l->l_proc;
311 	struct shmmap_state *shmmap_s1, *shmmap_s;
312 	struct shmmap_entry *shmmap_se;
313 	struct uvm_object *uobj;
314 	struct shmid_ds *shmseg;
315 	size_t size;
316 
317 	mutex_enter(&shm_lock);
318 	/* In case of reallocation, we will wait for completion */
319 	while (__predict_false(shm_realloc_state))
320 		cv_wait(&shm_realloc_cv, &shm_lock);
321 
322 	shmmap_s1 = (struct shmmap_state *)p->p_vmspace->vm_shm;
323 	if (shmmap_s1 == NULL) {
324 		mutex_exit(&shm_lock);
325 		return EINVAL;
326 	}
327 
328 	/* Find the map entry */
329 	SLIST_FOREACH(shmmap_se, &shmmap_s1->entries, next)
330 		if (shmmap_se->va == (vaddr_t)SCARG(uap, shmaddr))
331 			break;
332 	if (shmmap_se == NULL) {
333 		mutex_exit(&shm_lock);
334 		return EINVAL;
335 	}
336 
337 	shmmap_s = shmmap_getprivate(p);
338 	if (shmmap_s != shmmap_s1) {
339 		/* Map has been copied, lookup entry in new map */
340 		SLIST_FOREACH(shmmap_se, &shmmap_s->entries, next)
341 			if (shmmap_se->va == (vaddr_t)SCARG(uap, shmaddr))
342 				break;
343 		if (shmmap_se == NULL) {
344 			mutex_exit(&shm_lock);
345 			return EINVAL;
346 		}
347 	}
348 
349 	SHMPRINTF(("shmdt: vm %p: remove %d @%lx\n",
350 	    p->p_vmspace, shmmap_se->shmid, shmmap_se->va));
351 
352 	/* Delete the entry from shm map */
353 	uobj = shm_delete_mapping(shmmap_s, shmmap_se);
354 	shmseg = &shmsegs[IPCID_TO_IX(shmmap_se->shmid)];
355 	size = (shmseg->shm_segsz + PGOFSET) & ~PGOFSET;
356 	mutex_exit(&shm_lock);
357 
358 	uvm_deallocate(&p->p_vmspace->vm_map, shmmap_se->va, size);
359 	if (uobj != NULL) {
360 		uao_detach(uobj);
361 	}
362 	kmem_free(shmmap_se, sizeof(struct shmmap_entry));
363 
364 	return 0;
365 }
366 
367 /*
368  * Map shared memory.
369  */
370 int
sys_shmat(struct lwp * l,const struct sys_shmat_args * uap,register_t * retval)371 sys_shmat(struct lwp *l, const struct sys_shmat_args *uap, register_t *retval)
372 {
373 	/* {
374 		syscallarg(int) shmid;
375 		syscallarg(const void *) shmaddr;
376 		syscallarg(int) shmflg;
377 	} */
378 	int error, flags = 0;
379 	struct proc *p = l->l_proc;
380 	kauth_cred_t cred = l->l_cred;
381 	struct shmid_ds *shmseg;
382 	struct shmmap_state *shmmap_s;
383 	struct shmmap_entry *shmmap_se;
384 	struct uvm_object *uobj;
385 	struct vmspace *vm;
386 	vaddr_t attach_va;
387 	vm_prot_t prot;
388 	vsize_t size;
389 
390 	/* Allocate a new map entry and set it */
391 	shmmap_se = kmem_alloc(sizeof(struct shmmap_entry), KM_SLEEP);
392 	shmmap_se->shmid = SCARG(uap, shmid);
393 
394 	mutex_enter(&shm_lock);
395 	/* In case of reallocation, we will wait for completion */
396 	while (__predict_false(shm_realloc_state))
397 		cv_wait(&shm_realloc_cv, &shm_lock);
398 
399 	shmseg = shm_find_segment_by_shmid(SCARG(uap, shmid));
400 	if (shmseg == NULL) {
401 		error = EINVAL;
402 		goto err;
403 	}
404 	error = ipcperm(cred, &shmseg->shm_perm,
405 	    (SCARG(uap, shmflg) & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W);
406 	if (error)
407 		goto err;
408 
409 	vm = p->p_vmspace;
410 	shmmap_s = (struct shmmap_state *)vm->vm_shm;
411 	if (shmmap_s && shmmap_s->nitems >= shminfo.shmseg) {
412 		error = EMFILE;
413 		goto err;
414 	}
415 
416 	size = (shmseg->shm_segsz + PGOFSET) & ~PGOFSET;
417 	prot = VM_PROT_READ;
418 	if ((SCARG(uap, shmflg) & SHM_RDONLY) == 0)
419 		prot |= VM_PROT_WRITE;
420 	if (SCARG(uap, shmaddr)) {
421 		flags |= UVM_FLAG_FIXED;
422 		if (SCARG(uap, shmflg) & SHM_RND)
423 			attach_va =
424 			    (vaddr_t)SCARG(uap, shmaddr) & ~(SHMLBA-1);
425 		else if (((vaddr_t)SCARG(uap, shmaddr) & (SHMLBA-1)) == 0)
426 			attach_va = (vaddr_t)SCARG(uap, shmaddr);
427 		else {
428 			error = EINVAL;
429 			goto err;
430 		}
431 	} else {
432 		/* This is just a hint to uvm_map() about where to put it. */
433 		attach_va = p->p_emul->e_vm_default_addr(p,
434 		    (vaddr_t)vm->vm_daddr, size,
435 		    p->p_vmspace->vm_map.flags & VM_MAP_TOPDOWN);
436 	}
437 
438 	/*
439 	 * Create a map entry, add it to the list and increase the counters.
440 	 */
441 	shmmap_s = shmmap_getprivate(p);
442 	SLIST_INSERT_HEAD(&shmmap_s->entries, shmmap_se, next);
443 	shmmap_s->nitems++;
444 	shmseg->shm_lpid = p->p_pid;
445 	shmseg->shm_nattch++;
446 
447 	/*
448 	 * Map the segment into the address space.
449 	 */
450 	uobj = shmseg->_shm_internal;
451 	uao_reference(uobj);
452 	error = uvm_map(&vm->vm_map, &attach_va, size, uobj, 0, 0,
453 	    UVM_MAPFLAG(prot, prot, UVM_INH_SHARE, UVM_ADV_RANDOM, flags));
454 	if (error)
455 		goto err_detach;
456 
457 	/* Set the new address, and update the time */
458 	shmmap_se->va = attach_va;
459 	shmseg->shm_atime = time_second;
460 	retval[0] = attach_va;
461 	SHMPRINTF(("shmat: vm %p: add %d @%lx\n",
462 	    p->p_vmspace, shmmap_se->shmid, attach_va));
463 err:
464 	mutex_exit(&shm_lock);
465 	if (error && shmmap_se) {
466 		kmem_free(shmmap_se, sizeof(struct shmmap_entry));
467 	}
468 	return error;
469 
470 err_detach:
471 	uao_detach(uobj);
472 	uobj = shm_delete_mapping(shmmap_s, shmmap_se);
473 	mutex_exit(&shm_lock);
474 	if (uobj != NULL) {
475 		uao_detach(uobj);
476 	}
477 	kmem_free(shmmap_se, sizeof(struct shmmap_entry));
478 	return error;
479 }
480 
481 /*
482  * Shared memory control operations.
483  */
484 int
sys___shmctl50(struct lwp * l,const struct sys___shmctl50_args * uap,register_t * retval)485 sys___shmctl50(struct lwp *l, const struct sys___shmctl50_args *uap,
486     register_t *retval)
487 {
488 	/* {
489 		syscallarg(int) shmid;
490 		syscallarg(int) cmd;
491 		syscallarg(struct shmid_ds *) buf;
492 	} */
493 	struct shmid_ds shmbuf;
494 	int cmd, error;
495 
496 	cmd = SCARG(uap, cmd);
497 	if (cmd == IPC_SET) {
498 		error = copyin(SCARG(uap, buf), &shmbuf, sizeof(shmbuf));
499 		if (error)
500 			return error;
501 	}
502 
503 	error = shmctl1(l, SCARG(uap, shmid), cmd,
504 	    (cmd == IPC_SET || cmd == IPC_STAT) ? &shmbuf : NULL);
505 
506 	if (error == 0 && cmd == IPC_STAT)
507 		error = copyout(&shmbuf, SCARG(uap, buf), sizeof(shmbuf));
508 
509 	return error;
510 }
511 
512 int
shmctl1(struct lwp * l,int shmid,int cmd,struct shmid_ds * shmbuf)513 shmctl1(struct lwp *l, int shmid, int cmd, struct shmid_ds *shmbuf)
514 {
515 	struct uvm_object *uobj = NULL;
516 	kauth_cred_t cred = l->l_cred;
517 	struct shmid_ds *shmseg;
518 	int error = 0;
519 
520 	mutex_enter(&shm_lock);
521 	/* In case of reallocation, we will wait for completion */
522 	while (__predict_false(shm_realloc_state))
523 		cv_wait(&shm_realloc_cv, &shm_lock);
524 
525 	shmseg = shm_find_segment_by_shmid(shmid);
526 	if (shmseg == NULL) {
527 		mutex_exit(&shm_lock);
528 		return EINVAL;
529 	}
530 
531 	switch (cmd) {
532 	case IPC_STAT:
533 		if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_R)) != 0)
534 			break;
535 		memset(shmbuf, 0, sizeof *shmbuf);
536 		shmbuf->shm_perm = shmseg->shm_perm;
537 		shmbuf->shm_perm.mode &= 0777;
538 		shmbuf->shm_segsz = shmseg->shm_segsz;
539 		shmbuf->shm_lpid = shmseg->shm_lpid;
540 		shmbuf->shm_cpid = shmseg->shm_cpid;
541 		shmbuf->shm_nattch = shmseg->shm_nattch;
542 		shmbuf->shm_atime = shmseg->shm_atime;
543 		shmbuf->shm_dtime = shmseg->shm_dtime;
544 		shmbuf->shm_ctime = shmseg->shm_ctime;
545 		break;
546 	case IPC_SET:
547 		if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_M)) != 0)
548 			break;
549 		shmseg->shm_perm.uid = shmbuf->shm_perm.uid;
550 		shmseg->shm_perm.gid = shmbuf->shm_perm.gid;
551 		shmseg->shm_perm.mode =
552 		    (shmseg->shm_perm.mode & ~ACCESSPERMS) |
553 		    (shmbuf->shm_perm.mode & ACCESSPERMS);
554 		shmseg->shm_ctime = time_second;
555 		break;
556 	case IPC_RMID:
557 		if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_M)) != 0)
558 			break;
559 		shmseg->shm_perm._key = IPC_PRIVATE;
560 		shmseg->shm_perm.mode |= SHMSEG_REMOVED;
561 		if (shmseg->shm_nattch <= 0) {
562 			uobj = shmseg->_shm_internal;
563 			shm_free_segment(IPCID_TO_IX(shmid));
564 		}
565 		break;
566 	case SHM_LOCK:
567 	case SHM_UNLOCK:
568 		if ((error = kauth_authorize_system(cred,
569 		    KAUTH_SYSTEM_SYSVIPC,
570 		    (cmd == SHM_LOCK) ? KAUTH_REQ_SYSTEM_SYSVIPC_SHM_LOCK :
571 		    KAUTH_REQ_SYSTEM_SYSVIPC_SHM_UNLOCK, NULL, NULL, NULL)) != 0)
572 			break;
573 		error = shm_memlock(shmseg, shmid, cmd);
574 		break;
575 	default:
576 		error = EINVAL;
577 	}
578 
579 	mutex_exit(&shm_lock);
580 	if (uobj != NULL)
581 		uao_detach(uobj);
582 	return error;
583 }
584 
585 /*
586  * Try to take an already existing segment.
587  *  => must be called with shm_lock held;
588  *  => called from one place, thus, inline;
589  */
590 static inline int
shmget_existing(struct lwp * l,const struct sys_shmget_args * uap,int mode,register_t * retval)591 shmget_existing(struct lwp *l, const struct sys_shmget_args *uap, int mode,
592     register_t *retval)
593 {
594 	struct shmid_ds *shmseg;
595 	kauth_cred_t cred = l->l_cred;
596 	int segnum, error;
597 again:
598 	KASSERT(mutex_owned(&shm_lock));
599 
600 	/* Find segment by key */
601 	for (segnum = 0; segnum < shminfo.shmmni; segnum++)
602 		if ((shmsegs[segnum].shm_perm.mode & SHMSEG_ALLOCATED) &&
603 		    shmsegs[segnum].shm_perm._key == SCARG(uap, key))
604 			break;
605 	if (segnum == shminfo.shmmni) {
606 		/* Not found */
607 		return -1;
608 	}
609 
610 	shmseg = &shmsegs[segnum];
611 	if (shmseg->shm_perm.mode & SHMSEG_REMOVED) {
612 		/*
613 		 * This segment is in the process of being allocated.  Wait
614 		 * until it's done, and look the key up again (in case the
615 		 * allocation failed or it was freed).
616 		 */
617 		shmseg->shm_perm.mode |= SHMSEG_WANTED;
618 		error = cv_wait_sig(&shm_cv[segnum], &shm_lock);
619 		if (error)
620 			return error;
621 		goto again;
622 	}
623 
624 	/*
625 	 * First check the flags, to generate a useful error when a
626 	 * segment already exists.
627 	 */
628 	if ((SCARG(uap, shmflg) & (IPC_CREAT | IPC_EXCL)) ==
629 	    (IPC_CREAT | IPC_EXCL))
630 		return EEXIST;
631 
632 	/* Check the permission and segment size. */
633 	error = ipcperm(cred, &shmseg->shm_perm, mode);
634 	if (error)
635 		return error;
636 	if (SCARG(uap, size) && SCARG(uap, size) > shmseg->shm_segsz)
637 		return EINVAL;
638 
639 	*retval = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
640 	return 0;
641 }
642 
643 int
sys_shmget(struct lwp * l,const struct sys_shmget_args * uap,register_t * retval)644 sys_shmget(struct lwp *l, const struct sys_shmget_args *uap, register_t *retval)
645 {
646 	/* {
647 		syscallarg(key_t) key;
648 		syscallarg(size_t) size;
649 		syscallarg(int) shmflg;
650 	} */
651 	struct shmid_ds *shmseg;
652 	kauth_cred_t cred = l->l_cred;
653 	key_t key = SCARG(uap, key);
654 	size_t size;
655 	int error, mode, segnum;
656 	bool lockmem;
657 
658 	mode = SCARG(uap, shmflg) & ACCESSPERMS;
659 	if (SCARG(uap, shmflg) & _SHM_RMLINGER)
660 		mode |= SHMSEG_RMLINGER;
661 
662 	SHMPRINTF(("shmget: key 0x%lx size 0x%zx shmflg 0x%x mode 0x%x\n",
663 	    SCARG(uap, key), SCARG(uap, size), SCARG(uap, shmflg), mode));
664 
665 	mutex_enter(&shm_lock);
666 	/* In case of reallocation, we will wait for completion */
667 	while (__predict_false(shm_realloc_state))
668 		cv_wait(&shm_realloc_cv, &shm_lock);
669 
670 	if (key != IPC_PRIVATE) {
671 		error = shmget_existing(l, uap, mode, retval);
672 		if (error != -1) {
673 			mutex_exit(&shm_lock);
674 			return error;
675 		}
676 		if ((SCARG(uap, shmflg) & IPC_CREAT) == 0) {
677 			mutex_exit(&shm_lock);
678 			return ENOENT;
679 		}
680 	}
681 	error = 0;
682 
683 	/*
684 	 * Check the for the limits.
685 	 */
686 	size = SCARG(uap, size);
687 	if (size < shminfo.shmmin || size > shminfo.shmmax) {
688 		mutex_exit(&shm_lock);
689 		return EINVAL;
690 	}
691 	if (shm_nused >= shminfo.shmmni) {
692 		mutex_exit(&shm_lock);
693 		return ENOSPC;
694 	}
695 	size = round_page(size);
696 	if (shm_committed + btoc(size) > shminfo.shmall) {
697 		mutex_exit(&shm_lock);
698 		return ENOMEM;
699 	}
700 
701 	/* Find the first available segment */
702 	if (shm_last_free < 0) {
703 		for (segnum = 0; segnum < shminfo.shmmni; segnum++)
704 			if (shmsegs[segnum].shm_perm.mode & SHMSEG_FREE)
705 				break;
706 		KASSERT(segnum < shminfo.shmmni);
707 	} else {
708 		segnum = shm_last_free;
709 		shm_last_free = -1;
710 	}
711 
712 	/*
713 	 * Initialize the segment.
714 	 * We will drop the lock while allocating the memory, thus mark the
715 	 * segment present, but removed, that no other thread could take it.
716 	 * Also, disable reallocation, while lock is dropped.
717 	 */
718 	shmseg = &shmsegs[segnum];
719 	shmseg->shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED;
720 	shm_committed += btoc(size);
721 	shm_nused++;
722 	lockmem = shm_use_phys;
723 	shm_realloc_disable++;
724 	mutex_exit(&shm_lock);
725 
726 	/* Allocate the memory object and lock it if needed */
727 	shmseg->_shm_internal = uao_create(size, 0);
728 	if (lockmem) {
729 		/* Wire the pages and tag it */
730 		error = uvm_obj_wirepages(shmseg->_shm_internal, 0, size, NULL);
731 		if (error) {
732 			uao_detach(shmseg->_shm_internal);
733 			mutex_enter(&shm_lock);
734 			shm_free_segment(segnum);
735 			shm_realloc_disable--;
736 			mutex_exit(&shm_lock);
737 			return error;
738 		}
739 	}
740 
741 	/*
742 	 * Please note, while segment is marked, there are no need to hold the
743 	 * lock, while setting it (except shm_perm.mode).
744 	 */
745 	shmseg->shm_perm._key = SCARG(uap, key);
746 	shmseg->shm_perm._seq = (shmseg->shm_perm._seq + 1) & 0x7fff;
747 	*retval = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
748 
749 	shmseg->shm_perm.cuid = shmseg->shm_perm.uid = kauth_cred_geteuid(cred);
750 	shmseg->shm_perm.cgid = shmseg->shm_perm.gid = kauth_cred_getegid(cred);
751 	shmseg->shm_segsz = SCARG(uap, size);
752 	shmseg->shm_cpid = l->l_proc->p_pid;
753 	shmseg->shm_lpid = shmseg->shm_nattch = 0;
754 	shmseg->shm_atime = shmseg->shm_dtime = 0;
755 	shmseg->shm_ctime = time_second;
756 
757 	/*
758 	 * Segment is initialized.
759 	 * Enter the lock, mark as allocated, and notify waiters (if any).
760 	 * Also, unmark the state of reallocation.
761 	 */
762 	mutex_enter(&shm_lock);
763 	shmseg->shm_perm.mode = (shmseg->shm_perm.mode & SHMSEG_WANTED) |
764 	    (mode & (ACCESSPERMS | SHMSEG_RMLINGER)) |
765 	    SHMSEG_ALLOCATED | (lockmem ? SHMSEG_WIRED : 0);
766 	if (shmseg->shm_perm.mode & SHMSEG_WANTED) {
767 		shmseg->shm_perm.mode &= ~SHMSEG_WANTED;
768 		cv_broadcast(&shm_cv[segnum]);
769 	}
770 	shm_realloc_disable--;
771 	cv_broadcast(&shm_realloc_cv);
772 	mutex_exit(&shm_lock);
773 
774 	return error;
775 }
776 
777 void
shmfork(struct vmspace * vm1,struct vmspace * vm2)778 shmfork(struct vmspace *vm1, struct vmspace *vm2)
779 {
780 	struct shmmap_state *shmmap_s;
781 	struct shmmap_entry *shmmap_se;
782 
783 	SHMPRINTF(("shmfork %p->%p\n", vm1, vm2));
784 	mutex_enter(&shm_lock);
785 	vm2->vm_shm = vm1->vm_shm;
786 	if (vm1->vm_shm) {
787 		shmmap_s = (struct shmmap_state *)vm1->vm_shm;
788 		SLIST_FOREACH(shmmap_se, &shmmap_s->entries, next)
789 			shmsegs[IPCID_TO_IX(shmmap_se->shmid)].shm_nattch++;
790 		shmmap_s->nrefs++;
791 	}
792 	mutex_exit(&shm_lock);
793 }
794 
795 void
shmexit(struct vmspace * vm)796 shmexit(struct vmspace *vm)
797 {
798 	struct shmmap_state *shmmap_s;
799 	struct shmmap_entry *shmmap_se;
800 
801 	mutex_enter(&shm_lock);
802 	shmmap_s = (struct shmmap_state *)vm->vm_shm;
803 	if (shmmap_s == NULL) {
804 		mutex_exit(&shm_lock);
805 		return;
806 	}
807 	vm->vm_shm = NULL;
808 
809 	if (--shmmap_s->nrefs > 0) {
810 		SHMPRINTF(("shmexit: vm %p drop ref (%d entries), refs = %d\n",
811 		    vm, shmmap_s->nitems, shmmap_s->nrefs));
812 		SLIST_FOREACH(shmmap_se, &shmmap_s->entries, next) {
813 			shmsegs[IPCID_TO_IX(shmmap_se->shmid)].shm_nattch--;
814 		}
815 		mutex_exit(&shm_lock);
816 		return;
817 	}
818 
819 	SHMPRINTF(("shmexit: vm %p cleanup (%d entries)\n", vm, shmmap_s->nitems));
820 	if (shmmap_s->nitems == 0) {
821 		mutex_exit(&shm_lock);
822 		kmem_free(shmmap_s, sizeof(struct shmmap_state));
823 		return;
824 	}
825 
826 	/*
827 	 * Delete the entry from shm map.
828 	 */
829 	for (;;) {
830 		struct shmid_ds *shmseg;
831 		struct uvm_object *uobj;
832 		size_t sz;
833 
834 		shmmap_se = SLIST_FIRST(&shmmap_s->entries);
835 		KASSERT(shmmap_se != NULL);
836 
837 		shmseg = &shmsegs[IPCID_TO_IX(shmmap_se->shmid)];
838 		sz = (shmseg->shm_segsz + PGOFSET) & ~PGOFSET;
839 		/* shm_delete_mapping() removes from the list. */
840 		uobj = shm_delete_mapping(shmmap_s, shmmap_se);
841 		mutex_exit(&shm_lock);
842 
843 		uvm_deallocate(&vm->vm_map, shmmap_se->va, sz);
844 		if (uobj != NULL) {
845 			uao_detach(uobj);
846 		}
847 		kmem_free(shmmap_se, sizeof(struct shmmap_entry));
848 
849 		if (SLIST_EMPTY(&shmmap_s->entries)) {
850 			break;
851 		}
852 		mutex_enter(&shm_lock);
853 		KASSERT(!SLIST_EMPTY(&shmmap_s->entries));
854 	}
855 	kmem_free(shmmap_s, sizeof(struct shmmap_state));
856 }
857 
858 static int
shmrealloc(int newshmni)859 shmrealloc(int newshmni)
860 {
861 	vaddr_t v;
862 	struct shmid_ds *oldshmsegs, *newshmsegs;
863 	kcondvar_t *newshm_cv, *oldshm_cv;
864 	size_t sz;
865 	int i, lsegid, oldshmni;
866 
867 	if (newshmni < 1)
868 		return EINVAL;
869 
870 	/* Allocate new memory area */
871 	sz = ALIGN(newshmni * sizeof(struct shmid_ds)) +
872 	    ALIGN(newshmni * sizeof(kcondvar_t));
873 	sz = round_page(sz);
874 	v = uvm_km_alloc(kernel_map, sz, 0, UVM_KMF_WIRED|UVM_KMF_ZERO);
875 	if (v == 0)
876 		return ENOMEM;
877 
878 	mutex_enter(&shm_lock);
879 	while (shm_realloc_state || shm_realloc_disable)
880 		cv_wait(&shm_realloc_cv, &shm_lock);
881 
882 	/*
883 	 * Get the number of last segment.  Fail we are trying to
884 	 * reallocate less memory than we use.
885 	 */
886 	lsegid = 0;
887 	for (i = 0; i < shminfo.shmmni; i++)
888 		if ((shmsegs[i].shm_perm.mode & SHMSEG_FREE) == 0)
889 			lsegid = i;
890 	if (lsegid >= newshmni) {
891 		mutex_exit(&shm_lock);
892 		uvm_km_free(kernel_map, v, sz, UVM_KMF_WIRED);
893 		return EBUSY;
894 	}
895 	shm_realloc_state = true;
896 
897 	newshmsegs = (void *)v;
898 	newshm_cv = (void *)((uintptr_t)newshmsegs +
899 	    ALIGN(newshmni * sizeof(struct shmid_ds)));
900 
901 	/* Copy all memory to the new area */
902 	for (i = 0; i < shm_nused; i++) {
903 		cv_init(&newshm_cv[i], "shmwait");
904 		(void)memcpy(&newshmsegs[i], &shmsegs[i],
905 		    sizeof(newshmsegs[0]));
906 	}
907 
908 	/* Mark as free all new segments, if there is any */
909 	for (; i < newshmni; i++) {
910 		cv_init(&newshm_cv[i], "shmwait");
911 		newshmsegs[i].shm_perm.mode = SHMSEG_FREE;
912 		newshmsegs[i].shm_perm._seq = 0;
913 	}
914 
915 	oldshmsegs = shmsegs;
916 	oldshmni = shminfo.shmmni;
917 	shminfo.shmmni = newshmni;
918 	shmsegs = newshmsegs;
919 	shm_cv = newshm_cv;
920 
921 	/* Reallocation completed - notify all waiters, if any */
922 	shm_realloc_state = false;
923 	cv_broadcast(&shm_realloc_cv);
924 	mutex_exit(&shm_lock);
925 
926 	/* Release now unused resources. */
927 	oldshm_cv = (void *)((uintptr_t)oldshmsegs +
928 	    ALIGN(oldshmni * sizeof(struct shmid_ds)));
929 	for (i = 0; i < oldshmni; i++)
930 		cv_destroy(&oldshm_cv[i]);
931 
932 	sz = ALIGN(oldshmni * sizeof(struct shmid_ds)) +
933 	    ALIGN(oldshmni * sizeof(kcondvar_t));
934 	sz = round_page(sz);
935 	uvm_km_free(kernel_map, (vaddr_t)oldshmsegs, sz, UVM_KMF_WIRED);
936 
937 	return 0;
938 }
939 
940 int
shminit(void)941 shminit(void)
942 {
943 	vaddr_t v;
944 	size_t sz;
945 	int i;
946 
947 	mutex_init(&shm_lock, MUTEX_DEFAULT, IPL_NONE);
948 	cv_init(&shm_realloc_cv, "shmrealc");
949 
950 	/* Allocate the wired memory for our structures */
951 	sz = ALIGN(shminfo.shmmni * sizeof(struct shmid_ds)) +
952 	    ALIGN(shminfo.shmmni * sizeof(kcondvar_t));
953 	sz = round_page(sz);
954 	v = uvm_km_alloc(kernel_map, sz, 0, UVM_KMF_WIRED|UVM_KMF_ZERO);
955 	if (v == 0) {
956 		printf("sysv_shm: cannot allocate memory");
957 		return ENOMEM;
958 	}
959 	shmsegs = (void *)v;
960 	shm_cv = (void *)((uintptr_t)shmsegs +
961 	    ALIGN(shminfo.shmmni * sizeof(struct shmid_ds)));
962 
963 	if (shminfo.shmmax == 0)
964 		shminfo.shmall = uimax(physmem / 4, 1024);
965 	else
966 		shminfo.shmall = shminfo.shmmax / PAGE_SIZE;
967 	shminfo.shmmax = (uint64_t)shminfo.shmall * PAGE_SIZE;
968 
969 	for (i = 0; i < shminfo.shmmni; i++) {
970 		cv_init(&shm_cv[i], "shmwait");
971 		shmsegs[i].shm_perm.mode = SHMSEG_FREE;
972 		shmsegs[i].shm_perm._seq = 0;
973 	}
974 	shm_last_free = 0;
975 	shm_nused = 0;
976 	shm_committed = 0;
977 	shm_realloc_disable = 0;
978 	shm_realloc_state = false;
979 
980 	kern_has_sysvshm = 1;
981 
982 	/* Load the callback function pointers for the uvm subsystem */
983 	uvm_shmexit = shmexit;
984 	uvm_shmfork = shmfork;
985 
986 	return 0;
987 }
988 
989 int
shmfini(void)990 shmfini(void)
991 {
992 	size_t sz;
993 	int i;
994 	vaddr_t v = (vaddr_t)shmsegs;
995 
996 	mutex_enter(&shm_lock);
997 	if (shm_nused) {
998 		mutex_exit(&shm_lock);
999 		return 1;
1000 	}
1001 
1002 	/* Clear the callback function pointers for the uvm subsystem */
1003 	uvm_shmexit = NULL;
1004 	uvm_shmfork = NULL;
1005 
1006 	/* Destroy all condvars */
1007 	for (i = 0; i < shminfo.shmmni; i++)
1008 		cv_destroy(&shm_cv[i]);
1009 	cv_destroy(&shm_realloc_cv);
1010 
1011 	/* Free the allocated/wired memory */
1012 	sz = ALIGN(shminfo.shmmni * sizeof(struct shmid_ds)) +
1013 	    ALIGN(shminfo.shmmni * sizeof(kcondvar_t));
1014 	sz = round_page(sz);
1015 	uvm_km_free(kernel_map, v, sz, UVM_KMF_WIRED);
1016 
1017 	/* Release and destroy our mutex */
1018 	mutex_exit(&shm_lock);
1019 	mutex_destroy(&shm_lock);
1020 
1021 	kern_has_sysvshm = 0;
1022 
1023 	return 0;
1024 }
1025 
1026 static int
sysctl_ipc_shmmni(SYSCTLFN_ARGS)1027 sysctl_ipc_shmmni(SYSCTLFN_ARGS)
1028 {
1029 	int newsize, error;
1030 	struct sysctlnode node;
1031 	node = *rnode;
1032 	node.sysctl_data = &newsize;
1033 
1034 	newsize = shminfo.shmmni;
1035 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
1036 	if (error || newp == NULL)
1037 		return error;
1038 
1039 	sysctl_unlock();
1040 	error = shmrealloc(newsize);
1041 	sysctl_relock();
1042 	return error;
1043 }
1044 
1045 static int
sysctl_ipc_shmmaxpgs(SYSCTLFN_ARGS)1046 sysctl_ipc_shmmaxpgs(SYSCTLFN_ARGS)
1047 {
1048 	uint32_t newsize;
1049 	int error;
1050 	struct sysctlnode node;
1051 	node = *rnode;
1052 	node.sysctl_data = &newsize;
1053 
1054 	newsize = shminfo.shmall;
1055 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
1056 	if (error || newp == NULL)
1057 		return error;
1058 
1059 	if (newsize < 1)
1060 		return EINVAL;
1061 
1062 	shminfo.shmall = newsize;
1063 	shminfo.shmmax = (uint64_t)shminfo.shmall * PAGE_SIZE;
1064 
1065 	return 0;
1066 }
1067 
1068 static int
sysctl_ipc_shmmax(SYSCTLFN_ARGS)1069 sysctl_ipc_shmmax(SYSCTLFN_ARGS)
1070 {
1071 	uint64_t newsize;
1072 	int error;
1073 	struct sysctlnode node;
1074 	node = *rnode;
1075 	node.sysctl_data = &newsize;
1076 
1077 	newsize = shminfo.shmmax;
1078 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
1079 	if (error || newp == NULL)
1080 		return error;
1081 
1082 	if (newsize < PAGE_SIZE)
1083 		return EINVAL;
1084 
1085 	shminfo.shmmax = round_page(newsize);
1086 	shminfo.shmall = shminfo.shmmax / PAGE_SIZE;
1087 
1088 	return 0;
1089 }
1090 
1091 SYSCTL_SETUP(sysctl_ipc_shm_setup, "sysctl kern.ipc subtree setup")
1092 {
1093 
1094 	sysctl_createv(clog, 0, NULL, NULL,
1095 		CTLFLAG_PERMANENT,
1096 		CTLTYPE_NODE, "ipc",
1097 		SYSCTL_DESCR("SysV IPC options"),
1098 		NULL, 0, NULL, 0,
1099 		CTL_KERN, KERN_SYSVIPC, CTL_EOL);
1100 	sysctl_createv(clog, 0, NULL, NULL,
1101 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
1102 		CTLTYPE_QUAD, "shmmax",
1103 		SYSCTL_DESCR("Max shared memory segment size in bytes"),
1104 		sysctl_ipc_shmmax, 0, &shminfo.shmmax, 0,
1105 		CTL_KERN, KERN_SYSVIPC, KERN_SYSVIPC_SHMMAX, CTL_EOL);
1106 	sysctl_createv(clog, 0, NULL, NULL,
1107 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
1108 		CTLTYPE_INT, "shmmni",
1109 		SYSCTL_DESCR("Max number of shared memory identifiers"),
1110 		sysctl_ipc_shmmni, 0, &shminfo.shmmni, 0,
1111 		CTL_KERN, KERN_SYSVIPC, KERN_SYSVIPC_SHMMNI, CTL_EOL);
1112 	sysctl_createv(clog, 0, NULL, NULL,
1113 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
1114 		CTLTYPE_INT, "shmseg",
1115 		SYSCTL_DESCR("Max shared memory segments per process"),
1116 		NULL, 0, &shminfo.shmseg, 0,
1117 		CTL_KERN, KERN_SYSVIPC, KERN_SYSVIPC_SHMSEG, CTL_EOL);
1118 	sysctl_createv(clog, 0, NULL, NULL,
1119 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
1120 		CTLTYPE_INT, "shmmaxpgs",
1121 		SYSCTL_DESCR("Max amount of shared memory in pages"),
1122 		sysctl_ipc_shmmaxpgs, 0, &shminfo.shmall, 0,
1123 		CTL_KERN, KERN_SYSVIPC, KERN_SYSVIPC_SHMMAXPGS, CTL_EOL);
1124 	sysctl_createv(clog, 0, NULL, NULL,
1125 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
1126 		CTLTYPE_INT, "shm_use_phys",
1127 		SYSCTL_DESCR("Enable/disable locking of shared memory in "
1128 		    "physical memory"), NULL, 0, &shm_use_phys, 0,
1129 		CTL_KERN, KERN_SYSVIPC, KERN_SYSVIPC_SHMUSEPHYS, CTL_EOL);
1130 }
1131