xref: /openbsd-src/lib/libc/thread/rthread.c (revision c7e8ea31cd41a963f06f0a8ba93948b06aa6b4a4)
1 /*	$OpenBSD: rthread.c,v 1.3 2017/08/15 07:06:29 guenther Exp $ */
2 /*
3  * Copyright (c) 2004,2005 Ted Unangst <tedu@openbsd.org>
4  * All Rights Reserved.
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 /*
19  * The heart of rthreads.  Basic functions like creating and joining
20  * threads.
21  */
22 
23 #include <sys/types.h>
24 #ifndef NO_PIC
25 #include <sys/exec_elf.h>
26 #pragma weak _DYNAMIC
27 #endif
28 
29 #include <dlfcn.h>
30 #include <errno.h>
31 #include <pthread.h>
32 #include <signal.h>
33 #include <stdio.h>
34 #include <stdlib.h>
35 #include <string.h>
36 #include <tib.h>
37 #include <unistd.h>
38 
39 #include "cancel.h"		/* in libc/include */
40 #include "thread_private.h"
41 #include "rthread.h"
42 #include "rthread_cb.h"
43 
44 /*
45  * Call nonstandard functions via names in the reserved namespace:
46  *	dlctl() -> _dlctl()
47  *	getthrid -> _thread_sys_getthrid
48  */
49 typeof(dlctl) dlctl asm("_dlctl") __attribute__((weak));
50 REDIRECT_SYSCALL(getthrid);
51 
52 /* weak stub to be overriden by ld.so */
53 int	dlctl(void *handle, int cmd, void *data) { return 0; }
54 
55 /*
56  * libc's signal wrappers hide SIGTHR; we need to call the real syscall
57  * stubs _thread_sys_* directly.
58  */
59 REDIRECT_SYSCALL(sigaction);
60 REDIRECT_SYSCALL(sigprocmask);
61 REDIRECT_SYSCALL(thrkill);
62 
63 static int concurrency_level;	/* not used */
64 
65 int _threads_ready;
66 int _post_threaded;
67 size_t _thread_pagesize;
68 struct listhead _thread_list = LIST_HEAD_INITIALIZER(_thread_list);
69 _atomic_lock_t _thread_lock = _SPINLOCK_UNLOCKED;
70 static struct pthread_queue _thread_gc_list
71     = TAILQ_HEAD_INITIALIZER(_thread_gc_list);
72 static _atomic_lock_t _thread_gc_lock = _SPINLOCK_UNLOCKED;
73 static struct pthread _initial_thread;
74 
75 struct pthread_attr _rthread_attr_default = {
76 	.stack_addr			= NULL,
77 	.stack_size			= RTHREAD_STACK_SIZE_DEF,
78 /*	.guard_size		set in _rthread_init */
79 	.detach_state			= PTHREAD_CREATE_JOINABLE,
80 	.contention_scope		= PTHREAD_SCOPE_SYSTEM,
81 	.sched_policy			= SCHED_OTHER,
82 	.sched_param = { .sched_priority = 0 },
83 	.sched_inherit			= PTHREAD_INHERIT_SCHED,
84 };
85 
86 /*
87  * internal support functions
88  */
89 void
90 _spinlock(volatile _atomic_lock_t *lock)
91 {
92 	while (_atomic_lock(lock))
93 		sched_yield();
94 }
95 
96 int
97 _spinlocktry(volatile _atomic_lock_t *lock)
98 {
99 	return 0 == _atomic_lock(lock);
100 }
101 
102 void
103 _spinunlock(volatile _atomic_lock_t *lock)
104 {
105 	*lock = _ATOMIC_LOCK_UNLOCKED;
106 }
107 
108 static void
109 _rthread_start(void *v)
110 {
111 	pthread_t thread = v;
112 	void *retval;
113 
114 	retval = thread->fn(thread->arg);
115 	pthread_exit(retval);
116 }
117 
118 static void
119 sigthr_handler(__unused int sig)
120 {
121 	struct tib *tib = TIB_GET();
122 	pthread_t self = tib->tib_thread;
123 
124 	/*
125 	 * Do nothing unless
126 	 * 1) pthread_cancel() has been called on this thread,
127 	 * 2) cancelation is enabled for it, and
128 	 * 3) we're not already in cancelation processing
129 	 */
130 	if (!tib->tib_canceled || tib->tib_cantcancel)
131 		return;
132 
133 	/*
134 	 * If delaying cancels inside complex ops (pthread_cond_wait,
135 	 * pthread_join, etc), just mark that this has happened to
136 	 * prevent a race with going to sleep
137 	 */
138 	if (tib->tib_cancel_point & CANCEL_POINT_DELAYED) {
139 		self->delayed_cancel = 1;
140 		return;
141 	}
142 
143 	/*
144 	 * otherwise, if in a cancel point or async cancels are
145 	 * enabled, then exit
146 	 */
147 	if (tib->tib_cancel_point ||
148 	    (tib->tib_thread_flags & TIB_THREAD_ASYNC_CANCEL))
149 		pthread_exit(PTHREAD_CANCELED);
150 }
151 
152 
153 /*
154  * A few basic callbacks for libc.  The first couple are only used
155  * on archs where there isn't a fast TCB_GET()
156  */
157 #ifndef TCB_HAVE_MD_GET
158 static int *
159 multi_threaded_errnoptr(void)
160 {
161         return (&TIB_GET()->tib_errno);
162 }
163 
164 static void *
165 multi_threaded_tcb(void)
166 {
167 	return (TCB_GET());
168 }
169 #endif /* TCB_HAVE_MD_GET */
170 
171 void
172 _thread_canceled(void)
173 {
174 	pthread_exit(PTHREAD_CANCELED);
175 }
176 
177 void
178 _rthread_init(void)
179 {
180 	pthread_t thread = &_initial_thread;
181 	struct tib *tib;
182 	struct sigaction sa;
183 
184 	tib = TIB_GET();
185 	tib->tib_thread = thread;
186 	thread->tib = tib;
187 
188 	thread->donesem.lock = _SPINLOCK_UNLOCKED;
189 	tib->tib_thread_flags = TIB_THREAD_INITIAL_STACK;
190 	thread->flags_lock = _SPINLOCK_UNLOCKED;
191 	strlcpy(thread->name, "Main process", sizeof(thread->name));
192 	LIST_INSERT_HEAD(&_thread_list, thread, threads);
193 	_rthread_debug_init();
194 
195 	_thread_pagesize = (size_t)sysconf(_SC_PAGESIZE);
196 	_rthread_attr_default.guard_size = _thread_pagesize;
197 	thread->attr = _rthread_attr_default;
198 
199 	/* get libc to start using our callbacks */
200 	{
201 		struct thread_callbacks cb = { 0 };
202 
203 #ifndef TCB_HAVE_MD_GET
204 		cb.tc_errnoptr		= multi_threaded_errnoptr;
205 		cb.tc_tcb		= multi_threaded_tcb;
206 #endif
207 		cb.tc_canceled		= _thread_canceled;
208 		cb.tc_flockfile		= _thread_flockfile;
209 		cb.tc_ftrylockfile	= _thread_ftrylockfile;
210 		cb.tc_funlockfile	= _thread_funlockfile;
211 		cb.tc_malloc_lock	= _thread_malloc_lock;
212 		cb.tc_malloc_unlock	= _thread_malloc_unlock;
213 		cb.tc_atexit_lock	= _thread_atexit_lock;
214 		cb.tc_atexit_unlock	= _thread_atexit_unlock;
215 		cb.tc_atfork_lock	= _thread_atfork_lock;
216 		cb.tc_atfork_unlock	= _thread_atfork_unlock;
217 		cb.tc_arc4_lock		= _thread_arc4_lock;
218 		cb.tc_arc4_unlock	= _thread_arc4_unlock;
219 		cb.tc_mutex_lock	= _thread_mutex_lock;
220 		cb.tc_mutex_unlock	= _thread_mutex_unlock;
221 		cb.tc_mutex_destroy	= _thread_mutex_destroy;
222 		cb.tc_tag_lock		= _thread_tag_lock;
223 		cb.tc_tag_unlock	= _thread_tag_unlock;
224 		cb.tc_tag_storage	= _thread_tag_storage;
225 		cb.tc_fork		= _thread_fork;
226 		cb.tc_vfork		= _thread_vfork;
227 		_thread_set_callbacks(&cb, sizeof(cb));
228 	}
229 
230 #ifndef NO_PIC
231 	if (_DYNAMIC) {
232 		dlctl(NULL, DL_SETTHREADLCK, _rthread_dl_lock);
233 	}
234 #endif
235 
236 	/*
237 	 * Set the handler on the signal used for cancelation and
238 	 * suspension, and make sure it's unblocked
239 	 */
240 	memset(&sa, 0, sizeof(sa));
241 	sigemptyset(&sa.sa_mask);
242 	sa.sa_handler = sigthr_handler;
243 	sigaction(SIGTHR, &sa, NULL);
244 	sigaddset(&sa.sa_mask, SIGTHR);
245 	sigprocmask(SIG_UNBLOCK, &sa.sa_mask, NULL);
246 
247 	_threads_ready = 1;
248 
249 	_malloc_init(1);
250 
251 	_rthread_debug(1, "rthread init\n");
252 }
253 
254 static void
255 _rthread_free(pthread_t thread)
256 {
257 	_spinlock(&_thread_gc_lock);
258 	TAILQ_INSERT_TAIL(&_thread_gc_list, thread, waiting);
259 	_spinunlock(&_thread_gc_lock);
260 }
261 
262 /*
263  * real pthread functions
264  */
265 pthread_t
266 pthread_self(void)
267 {
268 	if (!_threads_ready)
269 		_rthread_init();
270 
271 	return (TIB_GET()->tib_thread);
272 }
273 DEF_STRONG(pthread_self);
274 
275 static void
276 _rthread_reaper(void)
277 {
278 	pthread_t thread;
279 
280 restart:
281 	_spinlock(&_thread_gc_lock);
282 	TAILQ_FOREACH(thread, &_thread_gc_list, waiting) {
283 		if (thread->tib->tib_tid != 0)
284 			continue;
285 		TAILQ_REMOVE(&_thread_gc_list, thread, waiting);
286 		_spinunlock(&_thread_gc_lock);
287 		if (thread != &_initial_thread) {
288 			_rthread_debug(3, "rthread reaping %p stack %p\n",
289 			    (void *)thread, (void *)thread->stack);
290 			_rthread_free_stack(thread->stack);
291 			_dl_free_tib(thread->tib, sizeof(*thread));
292 		} else {
293 			/* initial thread isn't part of TIB allocation */
294 			_rthread_debug(3, "rthread reaping %p (initial)\n",
295 			    (void *)thread);
296 			_dl_free_tib(thread->tib, 0);
297 		}
298 		goto restart;
299 	}
300 	_spinunlock(&_thread_gc_lock);
301 }
302 
303 void
304 pthread_exit(void *retval)
305 {
306 	struct rthread_cleanup_fn *clfn;
307 	struct tib *tib = TIB_GET();
308 	pthread_t thread;
309 
310 	if (!_threads_ready)
311 		_rthread_init();
312 	thread = tib->tib_thread;
313 
314 	if (tib->tib_cantcancel & CANCEL_DYING) {
315 		/*
316 		 * Called pthread_exit() from destructor or cancelation
317 		 * handler: blow up.  XXX write something to stderr?
318 		 */
319 		abort();
320 		//_exit(42);
321 	}
322 
323 	tib->tib_cantcancel |= CANCEL_DYING;
324 
325 	thread->retval = retval;
326 
327 	for (clfn = thread->cleanup_fns; clfn; ) {
328 		struct rthread_cleanup_fn *oclfn = clfn;
329 		clfn = clfn->next;
330 		oclfn->fn(oclfn->arg);
331 		free(oclfn);
332 	}
333 	_rthread_tls_destructors(thread);
334 	_spinlock(&_thread_lock);
335 	LIST_REMOVE(thread, threads);
336 	_spinunlock(&_thread_lock);
337 
338 	_spinlock(&thread->flags_lock);
339 	if (thread->flags & THREAD_DETACHED) {
340 		_spinunlock(&thread->flags_lock);
341 		_rthread_free(thread);
342 	} else {
343 		thread->flags |= THREAD_DONE;
344 		_spinunlock(&thread->flags_lock);
345 		_sem_post(&thread->donesem);
346 	}
347 
348 	__threxit(&tib->tib_tid);
349 	for(;;);
350 }
351 DEF_STRONG(pthread_exit);
352 
353 int
354 pthread_join(pthread_t thread, void **retval)
355 {
356 	int e;
357 	struct tib *tib = TIB_GET();
358 	pthread_t self;
359 	PREP_CANCEL_POINT(tib);
360 
361 	if (_post_threaded) {
362 #define GREATSCOTT "great scott! serious repercussions on future events!\n"
363 		write(2, GREATSCOTT, sizeof(GREATSCOTT) - 1);
364 		abort();
365 	}
366 	if (!_threads_ready)
367 		_rthread_init();
368 	self = tib->tib_thread;
369 
370 	e = 0;
371 	ENTER_DELAYED_CANCEL_POINT(tib, self);
372 	if (thread == NULL)
373 		e = EINVAL;
374 	else if (thread == self)
375 		e = EDEADLK;
376 	else if (thread->flags & THREAD_DETACHED)
377 		e = EINVAL;
378 	else if ((e = _sem_wait(&thread->donesem, 0, NULL,
379 	    &self->delayed_cancel)) == 0) {
380 		if (retval)
381 			*retval = thread->retval;
382 
383 		/*
384 		 * We should be the last having a ref to this thread,
385 		 * but someone stupid or evil might haved detached it;
386 		 * in that case the thread will clean up itself
387 		 */
388 		if ((thread->flags & THREAD_DETACHED) == 0)
389 			_rthread_free(thread);
390 	}
391 
392 	LEAVE_CANCEL_POINT_INNER(tib, e);
393 	_rthread_reaper();
394 	return (e);
395 }
396 
397 int
398 pthread_detach(pthread_t thread)
399 {
400 	int rc = 0;
401 
402 	_spinlock(&thread->flags_lock);
403 	if (thread->flags & THREAD_DETACHED) {
404 		rc = EINVAL;
405 		_spinunlock(&thread->flags_lock);
406 	} else if (thread->flags & THREAD_DONE) {
407 		_spinunlock(&thread->flags_lock);
408 		_rthread_free(thread);
409 	} else {
410 		thread->flags |= THREAD_DETACHED;
411 		_spinunlock(&thread->flags_lock);
412 	}
413 	_rthread_reaper();
414 	return (rc);
415 }
416 
417 int
418 pthread_create(pthread_t *threadp, const pthread_attr_t *attr,
419     void *(*start_routine)(void *), void *arg)
420 {
421 	extern int __isthreaded;
422 	struct tib *tib;
423 	pthread_t thread;
424 	struct __tfork param;
425 	int rc;
426 
427 	if (!_threads_ready)
428 		_rthread_init();
429 
430 	_rthread_reaper();
431 
432 	tib = _dl_allocate_tib(sizeof(*thread));
433 	if (tib == NULL)
434 		return (ENOMEM);
435 	thread = tib->tib_thread;
436 	memset(thread, 0, sizeof(*thread));
437 	thread->tib = tib;
438 	thread->donesem.lock = _SPINLOCK_UNLOCKED;
439 	thread->flags_lock = _SPINLOCK_UNLOCKED;
440 	thread->fn = start_routine;
441 	thread->arg = arg;
442 	tib->tib_tid = -1;
443 
444 	thread->attr = attr != NULL ? *(*attr) : _rthread_attr_default;
445 	if (thread->attr.sched_inherit == PTHREAD_INHERIT_SCHED) {
446 		pthread_t self = pthread_self();
447 
448 		thread->attr.sched_policy = self->attr.sched_policy;
449 		thread->attr.sched_param = self->attr.sched_param;
450 	}
451 	if (thread->attr.detach_state == PTHREAD_CREATE_DETACHED)
452 		thread->flags |= THREAD_DETACHED;
453 
454 	thread->stack = _rthread_alloc_stack(thread);
455 	if (!thread->stack) {
456 		rc = errno;
457 		goto fail1;
458 	}
459 
460 	param.tf_tcb = TIB_TO_TCB(tib);
461 	param.tf_tid = &tib->tib_tid;
462 	param.tf_stack = thread->stack->sp;
463 
464 	_spinlock(&_thread_lock);
465 	LIST_INSERT_HEAD(&_thread_list, thread, threads);
466 	_spinunlock(&_thread_lock);
467 
468 	/* we're going to be multi-threaded real soon now */
469 	__isthreaded = 1;
470 	rc = __tfork_thread(&param, sizeof(param), _rthread_start, thread);
471 	if (rc != -1) {
472 		/* success */
473 		*threadp = thread;
474 		return (0);
475 	}
476 
477 	rc = errno;
478 
479 	_spinlock(&_thread_lock);
480 	LIST_REMOVE(thread, threads);
481 	_spinunlock(&_thread_lock);
482 	_rthread_free_stack(thread->stack);
483 fail1:
484 	_dl_free_tib(tib, sizeof(*thread));
485 
486 	return (rc);
487 }
488 
489 int
490 pthread_kill(pthread_t thread, int sig)
491 {
492 	struct tib *tib = thread->tib;
493 
494 	if (sig == SIGTHR)
495 		return (EINVAL);
496 	if (thrkill(tib->tib_tid, sig, TIB_TO_TCB(tib)))
497 		return (errno);
498 	return (0);
499 }
500 
501 int
502 pthread_equal(pthread_t t1, pthread_t t2)
503 {
504 	return (t1 == t2);
505 }
506 
507 int
508 pthread_cancel(pthread_t thread)
509 {
510 	struct tib *tib = thread->tib;
511 	pid_t tid = tib->tib_tid;
512 
513 	if (tib->tib_canceled == 0 && tid != 0 &&
514 	    (tib->tib_cantcancel & CANCEL_DYING) == 0) {
515 		tib->tib_canceled = 1;
516 
517 		if ((tib->tib_cantcancel & CANCEL_DISABLED) == 0) {
518 			thrkill(tid, SIGTHR, TIB_TO_TCB(tib));
519 			return (0);
520 		}
521 	}
522 	return (0);
523 }
524 
525 void
526 pthread_testcancel(void)
527 {
528 	struct tib *tib = TIB_GET();
529 
530 	if (tib->tib_canceled && (tib->tib_cantcancel & CANCEL_DISABLED) == 0)
531 		pthread_exit(PTHREAD_CANCELED);
532 }
533 
534 int
535 pthread_setcancelstate(int state, int *oldstatep)
536 {
537 	struct tib *tib = TIB_GET();
538 	int oldstate;
539 
540 	oldstate = tib->tib_cantcancel & CANCEL_DISABLED ?
541 	    PTHREAD_CANCEL_DISABLE : PTHREAD_CANCEL_ENABLE;
542 	if (state == PTHREAD_CANCEL_ENABLE) {
543 		tib->tib_cantcancel &= ~CANCEL_DISABLED;
544 	} else if (state == PTHREAD_CANCEL_DISABLE) {
545 		tib->tib_cantcancel |= CANCEL_DISABLED;
546 	} else {
547 		return (EINVAL);
548 	}
549 	if (oldstatep)
550 		*oldstatep = oldstate;
551 
552 	return (0);
553 }
554 DEF_STRONG(pthread_setcancelstate);
555 
556 int
557 pthread_setcanceltype(int type, int *oldtypep)
558 {
559 	struct tib *tib = TIB_GET();
560 	int oldtype;
561 
562 	oldtype = tib->tib_thread_flags & TIB_THREAD_ASYNC_CANCEL ?
563 	    PTHREAD_CANCEL_ASYNCHRONOUS : PTHREAD_CANCEL_DEFERRED;
564 	if (type == PTHREAD_CANCEL_DEFERRED) {
565 		tib->tib_thread_flags &=~ TIB_THREAD_ASYNC_CANCEL;
566 	} else if (type == PTHREAD_CANCEL_ASYNCHRONOUS) {
567 		tib->tib_thread_flags |= TIB_THREAD_ASYNC_CANCEL;
568 	} else {
569 		return (EINVAL);
570 	}
571 	if (oldtypep)
572 		*oldtypep = oldtype;
573 
574 	return (0);
575 }
576 
577 void
578 pthread_cleanup_push(void (*fn)(void *), void *arg)
579 {
580 	struct rthread_cleanup_fn *clfn;
581 	pthread_t self = pthread_self();
582 
583 	clfn = calloc(1, sizeof(*clfn));
584 	if (!clfn)
585 		return;
586 	clfn->fn = fn;
587 	clfn->arg = arg;
588 	clfn->next = self->cleanup_fns;
589 	self->cleanup_fns = clfn;
590 }
591 
592 void
593 pthread_cleanup_pop(int execute)
594 {
595 	struct rthread_cleanup_fn *clfn;
596 	pthread_t self = pthread_self();
597 
598 	clfn = self->cleanup_fns;
599 	if (clfn) {
600 		self->cleanup_fns = clfn->next;
601 		if (execute)
602 			clfn->fn(clfn->arg);
603 		free(clfn);
604 	}
605 }
606 
607 int
608 pthread_getconcurrency(void)
609 {
610 	return (concurrency_level);
611 }
612 
613 int
614 pthread_setconcurrency(int new_level)
615 {
616 	if (new_level < 0)
617 		return (EINVAL);
618 	concurrency_level = new_level;
619 	return (0);
620 }
621 
622 /*
623  * compat debug stuff
624  */
625 void
626 _thread_dump_info(void)
627 {
628 	pthread_t thread;
629 
630 	_spinlock(&_thread_lock);
631 	LIST_FOREACH(thread, &_thread_list, threads)
632 		printf("thread %d flags 0x%x name %s\n", thread->tib->tib_tid,
633 		    thread->tib->tib_thread_flags, thread->name);
634 	_spinunlock(&_thread_lock);
635 }
636 
637 #ifndef NO_PIC
638 /*
639  * _rthread_dl_lock() provides the locking for dlopen(), dlclose(), and
640  * the function called via atexit() to invoke all destructors.  The latter
641  * two call shared-object destructors, which may need to call dlclose(),
642  * so this lock needs to permit recursive locking.
643  * The specific code here was extracted from _rthread_mutex_lock() and
644  * pthread_mutex_unlock() and simplified to use the static variables.
645  */
646 void
647 _rthread_dl_lock(int what)
648 {
649 	static _atomic_lock_t lock = _SPINLOCK_UNLOCKED;
650 	static pthread_t owner = NULL;
651 	static struct pthread_queue lockers = TAILQ_HEAD_INITIALIZER(lockers);
652 	static int count = 0;
653 
654 	if (what == 0) {
655 		pthread_t self = pthread_self();
656 
657 		/* lock, possibly recursive */
658 		_spinlock(&lock);
659 		if (owner == NULL) {
660 			owner = self;
661 		} else if (owner != self) {
662 			TAILQ_INSERT_TAIL(&lockers, self, waiting);
663 			while (owner != self) {
664 				__thrsleep(self, 0, NULL, &lock, NULL);
665 				_spinlock(&lock);
666 			}
667 		}
668 		count++;
669 		_spinunlock(&lock);
670 	} else if (what == 1) {
671 		/* unlock, possibly recursive */
672 		if (--count == 0) {
673 			pthread_t next;
674 
675 			_spinlock(&lock);
676 			owner = next = TAILQ_FIRST(&lockers);
677 			if (next != NULL)
678 				TAILQ_REMOVE(&lockers, next, waiting);
679 			_spinunlock(&lock);
680 			if (next != NULL)
681 				__thrwakeup(next, 1);
682 		}
683 	} else {
684 		/* reinit: used in child after fork to clear the queue */
685 		lock = _SPINLOCK_UNLOCKED;
686 		if (--count == 0)
687 			owner = NULL;
688 		TAILQ_INIT(&lockers);
689 	}
690 }
691 #endif
692