xref: /openbsd-src/sys/kern/kern_exit.c (revision 99fd087599a8791921855f21bd7e36130f39aadc)
1 /*	$OpenBSD: kern_exit.c,v 1.185 2020/03/01 18:50:52 mpi Exp $	*/
2 /*	$NetBSD: kern_exit.c,v 1.39 1996/04/22 01:38:25 christos Exp $	*/
3 
4 /*
5  * Copyright (c) 1982, 1986, 1989, 1991, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  * (c) UNIX System Laboratories, Inc.
8  * All or some portions of this file are derived from material licensed
9  * to the University of California by American Telephone and Telegraph
10  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11  * the permission of UNIX System Laboratories, Inc.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. Neither the name of the University nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  *	@(#)kern_exit.c	8.7 (Berkeley) 2/12/94
38  */
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/ioctl.h>
43 #include <sys/proc.h>
44 #include <sys/tty.h>
45 #include <sys/time.h>
46 #include <sys/resource.h>
47 #include <sys/kernel.h>
48 #include <sys/sysctl.h>
49 #include <sys/wait.h>
50 #include <sys/vnode.h>
51 #include <sys/syslog.h>
52 #include <sys/malloc.h>
53 #include <sys/resourcevar.h>
54 #include <sys/ptrace.h>
55 #include <sys/acct.h>
56 #include <sys/filedesc.h>
57 #include <sys/signalvar.h>
58 #include <sys/sched.h>
59 #include <sys/ktrace.h>
60 #include <sys/pool.h>
61 #include <sys/mutex.h>
62 #include <sys/pledge.h>
63 #ifdef SYSVSEM
64 #include <sys/sem.h>
65 #endif
66 #include <sys/witness.h>
67 
68 #include <sys/mount.h>
69 #include <sys/syscallargs.h>
70 
71 #include <uvm/uvm_extern.h>
72 
73 #include "kcov.h"
74 #if NKCOV > 0
75 #include <sys/kcov.h>
76 #endif
77 
78 void	proc_finish_wait(struct proc *, struct proc *);
79 void	process_zap(struct process *);
80 void	proc_free(struct proc *);
81 void	unveil_destroy(struct process *ps);
82 
83 /*
84  * exit --
85  *	Death of process.
86  */
87 int
88 sys_exit(struct proc *p, void *v, register_t *retval)
89 {
90 	struct sys_exit_args /* {
91 		syscallarg(int) rval;
92 	} */ *uap = v;
93 
94 	exit1(p, SCARG(uap, rval), 0, EXIT_NORMAL);
95 	/* NOTREACHED */
96 	return (0);
97 }
98 
99 int
100 sys___threxit(struct proc *p, void *v, register_t *retval)
101 {
102 	struct sys___threxit_args /* {
103 		syscallarg(pid_t *) notdead;
104 	} */ *uap = v;
105 
106 	if (SCARG(uap, notdead) != NULL) {
107 		pid_t zero = 0;
108 		if (copyout(&zero, SCARG(uap, notdead), sizeof(zero)))
109 			psignal(p, SIGSEGV);
110 	}
111 	exit1(p, 0, 0, EXIT_THREAD);
112 
113 	return (0);
114 }
115 
116 /*
117  * Exit: deallocate address space and other resources, change proc state
118  * to zombie, and unlink proc from allproc and parent's lists.  Save exit
119  * status and rusage for wait().  Check for child processes and orphan them.
120  */
121 void
122 exit1(struct proc *p, int xexit, int xsig, int flags)
123 {
124 	struct process *pr, *qr, *nqr;
125 	struct rusage *rup;
126 
127 	atomic_setbits_int(&p->p_flag, P_WEXIT);
128 
129 	pr = p->p_p;
130 
131 	/* single-threaded? */
132 	if (!P_HASSIBLING(p)) {
133 		flags = EXIT_NORMAL;
134 	} else {
135 		/* nope, multi-threaded */
136 		if (flags == EXIT_NORMAL)
137 			single_thread_set(p, SINGLE_EXIT, 0);
138 		else if (flags == EXIT_THREAD)
139 			single_thread_check(p, 0);
140 	}
141 
142 	if (flags == EXIT_NORMAL) {
143 		if (pr->ps_pid == 1)
144 			panic("init died (signal %d, exit %d)", xsig, xexit);
145 
146 		atomic_setbits_int(&pr->ps_flags, PS_EXITING);
147 		pr->ps_xexit = xexit;
148 		pr->ps_xsig  = xsig;
149 
150 		/*
151 		 * If parent is waiting for us to exit or exec, PS_PPWAIT
152 		 * is set; we wake up the parent early to avoid deadlock.
153 		 */
154 		if (pr->ps_flags & PS_PPWAIT) {
155 			atomic_clearbits_int(&pr->ps_flags, PS_PPWAIT);
156 			atomic_clearbits_int(&pr->ps_pptr->ps_flags,
157 			    PS_ISPWAIT);
158 			wakeup(pr->ps_pptr);
159 		}
160 	}
161 
162 	/* unlink ourselves from the active threads */
163 	TAILQ_REMOVE(&pr->ps_threads, p, p_thr_link);
164 	if ((p->p_flag & P_THREAD) == 0) {
165 		/* main thread gotta wait because it has the pid, et al */
166 		while (pr->ps_refcnt > 1)
167 			tsleep_nsec(&pr->ps_threads, PWAIT, "thrdeath", INFSLP);
168 		if (pr->ps_flags & PS_PROFIL)
169 			stopprofclock(pr);
170 	}
171 
172 	rup = pr->ps_ru;
173 	if (rup == NULL) {
174 		rup = pool_get(&rusage_pool, PR_WAITOK | PR_ZERO);
175 		if (pr->ps_ru == NULL) {
176 			pr->ps_ru = rup;
177 		} else {
178 			pool_put(&rusage_pool, rup);
179 			rup = pr->ps_ru;
180 		}
181 	}
182 	p->p_siglist = 0;
183 	if ((p->p_flag & P_THREAD) == 0)
184 		pr->ps_siglist = 0;
185 
186 #if NKCOV > 0
187 	kcov_exit(p);
188 #endif
189 
190 	if ((p->p_flag & P_THREAD) == 0) {
191 		sigio_freelist(&pr->ps_sigiolst);
192 
193 		/* close open files and release open-file table */
194 		fdfree(p);
195 
196 		timeout_del(&pr->ps_realit_to);
197 		timeout_del(&pr->ps_rucheck_to);
198 #ifdef SYSVSEM
199 		semexit(pr);
200 #endif
201 		killjobc(pr);
202 #ifdef ACCOUNTING
203 		acct_process(p);
204 #endif
205 
206 #ifdef KTRACE
207 		/* release trace file */
208 		if (pr->ps_tracevp)
209 			ktrcleartrace(pr);
210 #endif
211 
212 		unveil_destroy(pr);
213 
214 		/*
215 		 * If parent has the SAS_NOCLDWAIT flag set, we're not
216 		 * going to become a zombie.
217 		 */
218 		if (pr->ps_pptr->ps_sigacts->ps_flags & SAS_NOCLDWAIT)
219 			atomic_setbits_int(&pr->ps_flags, PS_NOZOMBIE);
220 	}
221 
222 	p->p_fd = NULL;		/* zap the thread's copy */
223 
224         /*
225 	 * Remove proc from pidhash chain and allproc so looking
226 	 * it up won't work.  We will put the proc on the
227 	 * deadproc list later (using the p_hash member), and
228 	 * wake up the reaper when we do.  If this is the last
229 	 * thread of a process that isn't PS_NOZOMBIE, we'll put
230 	 * the process on the zombprocess list below.
231 	 */
232 	/*
233 	 * NOTE: WE ARE NO LONGER ALLOWED TO SLEEP!
234 	 */
235 	p->p_stat = SDEAD;
236 
237 	LIST_REMOVE(p, p_hash);
238 	LIST_REMOVE(p, p_list);
239 
240 	if ((p->p_flag & P_THREAD) == 0) {
241 		LIST_REMOVE(pr, ps_hash);
242 		LIST_REMOVE(pr, ps_list);
243 
244 		if ((pr->ps_flags & PS_NOZOMBIE) == 0)
245 			LIST_INSERT_HEAD(&zombprocess, pr, ps_list);
246 		else {
247 			/*
248 			 * Not going to be a zombie, so it's now off all
249 			 * the lists scanned by ispidtaken(), so block
250 			 * fast reuse of the pid now.
251 			 */
252 			freepid(pr->ps_pid);
253 		}
254 
255 		/*
256 		 * Give orphaned children to init(8).
257 		 */
258 		qr = LIST_FIRST(&pr->ps_children);
259 		if (qr)		/* only need this if any child is S_ZOMB */
260 			wakeup(initprocess);
261 		for (; qr != 0; qr = nqr) {
262 			nqr = LIST_NEXT(qr, ps_sibling);
263 			proc_reparent(qr, initprocess);
264 			/*
265 			 * Traced processes are killed since their
266 			 * existence means someone is screwing up.
267 			 */
268 			if (qr->ps_flags & PS_TRACED &&
269 			    !(qr->ps_flags & PS_EXITING)) {
270 				atomic_clearbits_int(&qr->ps_flags, PS_TRACED);
271 				/*
272 				 * If single threading is active,
273 				 * direct the signal to the active
274 				 * thread to avoid deadlock.
275 				 */
276 				if (qr->ps_single)
277 					ptsignal(qr->ps_single, SIGKILL,
278 					    STHREAD);
279 				else
280 					prsignal(qr, SIGKILL);
281 			}
282 		}
283 	}
284 
285 	/* add thread's accumulated rusage into the process's total */
286 	ruadd(rup, &p->p_ru);
287 	tuagg(pr, p);
288 
289 	/*
290 	 * clear %cpu usage during swap
291 	 */
292 	p->p_pctcpu = 0;
293 
294 	if ((p->p_flag & P_THREAD) == 0) {
295 		/*
296 		 * Final thread has died, so add on our children's rusage
297 		 * and calculate the total times
298 		 */
299 		calcru(&pr->ps_tu, &rup->ru_utime, &rup->ru_stime, NULL);
300 		ruadd(rup, &pr->ps_cru);
301 
302 		/* notify interested parties of our demise and clean up */
303 		knote_processexit(p);
304 
305 		/*
306 		 * Notify parent that we're gone.  If we're not going to
307 		 * become a zombie, reparent to process 1 (init) so that
308 		 * we can wake our original parent to possibly unblock
309 		 * wait4() to return ECHILD.
310 		 */
311 		if (pr->ps_flags & PS_NOZOMBIE) {
312 			struct process *ppr = pr->ps_pptr;
313 			proc_reparent(pr, initprocess);
314 			wakeup(ppr);
315 		}
316 
317 		/*
318 		 * Release the process's signal state.
319 		 */
320 		sigactsfree(pr);
321 	}
322 
323 	/* just a thread? detach it from its process */
324 	if (p->p_flag & P_THREAD) {
325 		/* scheduler_wait_hook(pr->ps_mainproc, p); XXX */
326 		if (--pr->ps_refcnt == 1)
327 			wakeup(&pr->ps_threads);
328 		KASSERT(pr->ps_refcnt > 0);
329 	}
330 
331 	/* Release the thread's read reference of resource limit structure. */
332 	if (p->p_limit != NULL) {
333 		struct plimit *limit;
334 
335 		limit = p->p_limit;
336 		p->p_limit = NULL;
337 		lim_free(limit);
338 	}
339 
340 	/*
341 	 * Other substructures are freed from reaper and wait().
342 	 */
343 
344 	/*
345 	 * Finally, call machine-dependent code to switch to a new
346 	 * context (possibly the idle context).  Once we are no longer
347 	 * using the dead process's vmspace and stack, exit2() will be
348 	 * called to schedule those resources to be released by the
349 	 * reaper thread.
350 	 *
351 	 * Note that cpu_exit() will end with a call equivalent to
352 	 * cpu_switch(), finishing our execution (pun intended).
353 	 */
354 	uvmexp.swtch++;
355 	cpu_exit(p);
356 	panic("cpu_exit returned");
357 }
358 
359 /*
360  * Locking of this proclist is special; it's accessed in a
361  * critical section of process exit, and thus locking it can't
362  * modify interrupt state.  We use a simple spin lock for this
363  * proclist.  We use the p_hash member to linkup to deadproc.
364  */
365 struct mutex deadproc_mutex =
366     MUTEX_INITIALIZER_FLAGS(IPL_NONE, "deadproc", MTX_NOWITNESS);
367 struct proclist deadproc = LIST_HEAD_INITIALIZER(deadproc);
368 
369 /*
370  * We are called from cpu_exit() once it is safe to schedule the
371  * dead process's resources to be freed.
372  *
373  * NOTE: One must be careful with locking in this routine.  It's
374  * called from a critical section in machine-dependent code, so
375  * we should refrain from changing any interrupt state.
376  *
377  * We lock the deadproc list, place the proc on that list (using
378  * the p_hash member), and wake up the reaper.
379  */
380 void
381 exit2(struct proc *p)
382 {
383 	mtx_enter(&deadproc_mutex);
384 	LIST_INSERT_HEAD(&deadproc, p, p_hash);
385 	mtx_leave(&deadproc_mutex);
386 
387 	wakeup(&deadproc);
388 }
389 
390 void
391 proc_free(struct proc *p)
392 {
393 	crfree(p->p_ucred);
394 	pool_put(&proc_pool, p);
395 	nthreads--;
396 }
397 
398 /*
399  * Process reaper.  This is run by a kernel thread to free the resources
400  * of a dead process.  Once the resources are free, the process becomes
401  * a zombie, and the parent is allowed to read the undead's status.
402  */
403 void
404 reaper(void *arg)
405 {
406 	struct proc *p;
407 
408 	KERNEL_UNLOCK();
409 
410 	SCHED_ASSERT_UNLOCKED();
411 
412 	for (;;) {
413 		mtx_enter(&deadproc_mutex);
414 		while ((p = LIST_FIRST(&deadproc)) == NULL)
415 			msleep_nsec(&deadproc, &deadproc_mutex, PVM, "reaper",
416 			    INFSLP);
417 
418 		/* Remove us from the deadproc list. */
419 		LIST_REMOVE(p, p_hash);
420 		mtx_leave(&deadproc_mutex);
421 
422 		WITNESS_THREAD_EXIT(p);
423 
424 		KERNEL_LOCK();
425 
426 		/*
427 		 * Free the VM resources we're still holding on to.
428 		 * We must do this from a valid thread because doing
429 		 * so may block.
430 		 */
431 		uvm_uarea_free(p);
432 		p->p_vmspace = NULL;		/* zap the thread's copy */
433 
434 		if (p->p_flag & P_THREAD) {
435 			/* Just a thread */
436 			proc_free(p);
437 		} else {
438 			struct process *pr = p->p_p;
439 
440 			/* Release the rest of the process's vmspace */
441 			uvm_exit(pr);
442 
443 			if ((pr->ps_flags & PS_NOZOMBIE) == 0) {
444 				/* Process is now a true zombie. */
445 				atomic_setbits_int(&pr->ps_flags, PS_ZOMBIE);
446 				prsignal(pr->ps_pptr, SIGCHLD);
447 
448 				/* Wake up the parent so it can get exit status. */
449 				wakeup(pr->ps_pptr);
450 			} else {
451 				/* No one will wait for us. Just zap the process now */
452 				process_zap(pr);
453 			}
454 		}
455 
456 		KERNEL_UNLOCK();
457 	}
458 }
459 
460 int
461 sys_wait4(struct proc *q, void *v, register_t *retval)
462 {
463 	struct sys_wait4_args /* {
464 		syscallarg(pid_t) pid;
465 		syscallarg(int *) status;
466 		syscallarg(int) options;
467 		syscallarg(struct rusage *) rusage;
468 	} */ *uap = v;
469 	struct rusage ru;
470 	int status, error;
471 
472 	error = dowait4(q, SCARG(uap, pid),
473 	    SCARG(uap, status) ? &status : NULL,
474 	    SCARG(uap, options), SCARG(uap, rusage) ? &ru : NULL, retval);
475 	if (error == 0 && retval[0] > 0 && SCARG(uap, status)) {
476 		error = copyout(&status, SCARG(uap, status), sizeof(status));
477 	}
478 	if (error == 0 && retval[0] > 0 && SCARG(uap, rusage)) {
479 		error = copyout(&ru, SCARG(uap, rusage), sizeof(ru));
480 #ifdef KTRACE
481 		if (error == 0 && KTRPOINT(q, KTR_STRUCT))
482 			ktrrusage(q, &ru);
483 #endif
484 	}
485 	return (error);
486 }
487 
488 int
489 dowait4(struct proc *q, pid_t pid, int *statusp, int options,
490     struct rusage *rusage, register_t *retval)
491 {
492 	int nfound;
493 	struct process *pr;
494 	struct proc *p;
495 	int error;
496 
497 	if (pid == 0)
498 		pid = -q->p_p->ps_pgid;
499 	if (options &~ (WUNTRACED|WNOHANG|WCONTINUED))
500 		return (EINVAL);
501 
502 loop:
503 	nfound = 0;
504 	LIST_FOREACH(pr, &q->p_p->ps_children, ps_sibling) {
505 		if ((pr->ps_flags & PS_NOZOMBIE) ||
506 		    (pid != WAIT_ANY &&
507 		    pr->ps_pid != pid &&
508 		    pr->ps_pgid != -pid))
509 			continue;
510 
511 		p = pr->ps_mainproc;
512 
513 		nfound++;
514 		if (pr->ps_flags & PS_ZOMBIE) {
515 			retval[0] = pr->ps_pid;
516 
517 			if (statusp != NULL)
518 				*statusp = W_EXITCODE(pr->ps_xexit,
519 				    pr->ps_xsig);
520 			if (rusage != NULL)
521 				memcpy(rusage, pr->ps_ru, sizeof(*rusage));
522 			proc_finish_wait(q, p);
523 			return (0);
524 		}
525 		if (pr->ps_flags & PS_TRACED &&
526 		    (pr->ps_flags & PS_WAITED) == 0 && pr->ps_single &&
527 		    pr->ps_single->p_stat == SSTOP &&
528 		    (pr->ps_single->p_flag & P_SUSPSINGLE) == 0) {
529 			single_thread_wait(pr);
530 
531 			atomic_setbits_int(&pr->ps_flags, PS_WAITED);
532 			retval[0] = pr->ps_pid;
533 
534 			if (statusp != NULL)
535 				*statusp = W_STOPCODE(pr->ps_xsig);
536 			if (rusage != NULL)
537 				memset(rusage, 0, sizeof(*rusage));
538 			return (0);
539 		}
540 		if (p->p_stat == SSTOP &&
541 		    (pr->ps_flags & PS_WAITED) == 0 &&
542 		    (p->p_flag & P_SUSPSINGLE) == 0 &&
543 		    (pr->ps_flags & PS_TRACED ||
544 		    options & WUNTRACED)) {
545 			atomic_setbits_int(&pr->ps_flags, PS_WAITED);
546 			retval[0] = pr->ps_pid;
547 
548 			if (statusp != NULL)
549 				*statusp = W_STOPCODE(pr->ps_xsig);
550 			if (rusage != NULL)
551 				memset(rusage, 0, sizeof(*rusage));
552 			return (0);
553 		}
554 		if ((options & WCONTINUED) && (p->p_flag & P_CONTINUED)) {
555 			atomic_clearbits_int(&p->p_flag, P_CONTINUED);
556 			retval[0] = pr->ps_pid;
557 
558 			if (statusp != NULL)
559 				*statusp = _WCONTINUED;
560 			if (rusage != NULL)
561 				memset(rusage, 0, sizeof(*rusage));
562 			return (0);
563 		}
564 	}
565 	if (nfound == 0)
566 		return (ECHILD);
567 	if (options & WNOHANG) {
568 		retval[0] = 0;
569 		return (0);
570 	}
571 	if ((error = tsleep_nsec(q->p_p, PWAIT | PCATCH, "wait", INFSLP)) != 0)
572 		return (error);
573 	goto loop;
574 }
575 
576 void
577 proc_finish_wait(struct proc *waiter, struct proc *p)
578 {
579 	struct process *pr, *tr;
580 	struct rusage *rup;
581 
582 	/*
583 	 * If we got the child via a ptrace 'attach',
584 	 * we need to give it back to the old parent.
585 	 */
586 	pr = p->p_p;
587 	if (pr->ps_oppid != 0 && (pr->ps_oppid != pr->ps_pptr->ps_pid) &&
588 	   (tr = prfind(pr->ps_oppid))) {
589 		atomic_clearbits_int(&pr->ps_flags, PS_TRACED);
590 		pr->ps_oppid = 0;
591 		proc_reparent(pr, tr);
592 		prsignal(tr, SIGCHLD);
593 		wakeup(tr);
594 	} else {
595 		scheduler_wait_hook(waiter, p);
596 		rup = &waiter->p_p->ps_cru;
597 		ruadd(rup, pr->ps_ru);
598 		LIST_REMOVE(pr, ps_list);	/* off zombprocess */
599 		freepid(pr->ps_pid);
600 		process_zap(pr);
601 	}
602 }
603 
604 /*
605  * make process 'parent' the new parent of process 'child'.
606  */
607 void
608 proc_reparent(struct process *child, struct process *parent)
609 {
610 
611 	if (child->ps_pptr == parent)
612 		return;
613 
614 	LIST_REMOVE(child, ps_sibling);
615 	LIST_INSERT_HEAD(&parent->ps_children, child, ps_sibling);
616 	child->ps_pptr = parent;
617 }
618 
619 void
620 process_zap(struct process *pr)
621 {
622 	struct vnode *otvp;
623 	struct proc *p = pr->ps_mainproc;
624 
625 	/*
626 	 * Finally finished with old proc entry.
627 	 * Unlink it from its process group and free it.
628 	 */
629 	leavepgrp(pr);
630 	LIST_REMOVE(pr, ps_sibling);
631 
632 	/*
633 	 * Decrement the count of procs running with this uid.
634 	 */
635 	(void)chgproccnt(pr->ps_ucred->cr_ruid, -1);
636 
637 	/*
638 	 * Release reference to text vnode
639 	 */
640 	otvp = pr->ps_textvp;
641 	pr->ps_textvp = NULL;
642 	if (otvp)
643 		vrele(otvp);
644 
645 	KASSERT(pr->ps_refcnt == 1);
646 	if (pr->ps_ptstat != NULL)
647 		free(pr->ps_ptstat, M_SUBPROC, sizeof(*pr->ps_ptstat));
648 	pool_put(&rusage_pool, pr->ps_ru);
649 	KASSERT(TAILQ_EMPTY(&pr->ps_threads));
650 	lim_free(pr->ps_limit);
651 	crfree(pr->ps_ucred);
652 	pool_put(&process_pool, pr);
653 	nprocesses--;
654 
655 	proc_free(p);
656 }
657