xref: /openbsd-src/lib/librthread/rthread.c (revision c7e8ea31cd41a963f06f0a8ba93948b06aa6b4a4)
1 /*	$OpenBSD: rthread.c,v 1.95 2017/07/27 16:35:08 tedu Exp $ */
2 /*
3  * Copyright (c) 2004,2005 Ted Unangst <tedu@openbsd.org>
4  * All Rights Reserved.
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 /*
19  * The heart of rthreads.  Basic functions like creating and joining
20  * threads.
21  */
22 
23 #include <sys/types.h>
24 #ifndef NO_PIC
25 #include <sys/exec_elf.h>
26 #pragma weak _DYNAMIC
27 #endif
28 
29 #include <stdlib.h>
30 #include <unistd.h>
31 #include <signal.h>
32 #include <stdio.h>
33 #include <string.h>
34 #include <errno.h>
35 #include <dlfcn.h>
36 #include <tib.h>
37 
38 #include <pthread.h>
39 
40 #include "cancel.h"		/* in libc/include */
41 #include "thread_private.h"
42 #include "rthread.h"
43 #include "rthread_cb.h"
44 
45 /*
46  * Call nonstandard functions via names in the reserved namespace:
47  *	dlctl() -> _dlctl()
48  *	getthrid -> _thread_sys_getthrid
49  */
50 typeof(dlctl) dlctl asm("_dlctl") __attribute__((weak));
51 REDIRECT_SYSCALL(getthrid);
52 
53 /* weak stub to be overriden by ld.so */
54 int	dlctl(void *handle, int cmd, void *data) { return 0; }
55 
56 /*
57  * libc's signal wrappers hide SIGTHR; we need to call the real syscall
58  * stubs _thread_sys_* directly.
59  */
60 REDIRECT_SYSCALL(sigaction);
61 REDIRECT_SYSCALL(sigprocmask);
62 REDIRECT_SYSCALL(thrkill);
63 
64 static int concurrency_level;	/* not used */
65 
66 int _threads_ready;
67 int _post_threaded;
68 size_t _thread_pagesize;
69 struct listhead _thread_list = LIST_HEAD_INITIALIZER(_thread_list);
70 _atomic_lock_t _thread_lock = _SPINLOCK_UNLOCKED;
71 static struct pthread_queue _thread_gc_list
72     = TAILQ_HEAD_INITIALIZER(_thread_gc_list);
73 static _atomic_lock_t _thread_gc_lock = _SPINLOCK_UNLOCKED;
74 static struct pthread _initial_thread;
75 
76 struct pthread_attr _rthread_attr_default = {
77 	.stack_addr			= NULL,
78 	.stack_size			= RTHREAD_STACK_SIZE_DEF,
79 /*	.guard_size		set in _rthread_init */
80 	.detach_state			= PTHREAD_CREATE_JOINABLE,
81 	.contention_scope		= PTHREAD_SCOPE_SYSTEM,
82 	.sched_policy			= SCHED_OTHER,
83 	.sched_param = { .sched_priority = 0 },
84 	.sched_inherit			= PTHREAD_INHERIT_SCHED,
85 };
86 
87 /*
88  * internal support functions
89  */
90 void
91 _spinlock(volatile _atomic_lock_t *lock)
92 {
93 	while (_atomic_lock(lock))
94 		sched_yield();
95 }
96 
97 int
98 _spinlocktry(volatile _atomic_lock_t *lock)
99 {
100 	return 0 == _atomic_lock(lock);
101 }
102 
103 void
104 _spinunlock(volatile _atomic_lock_t *lock)
105 {
106 	*lock = _ATOMIC_LOCK_UNLOCKED;
107 }
108 
109 static void
110 _rthread_start(void *v)
111 {
112 	pthread_t thread = v;
113 	void *retval;
114 
115 	retval = thread->fn(thread->arg);
116 	pthread_exit(retval);
117 }
118 
119 static void
120 sigthr_handler(__unused int sig)
121 {
122 	struct tib *tib = TIB_GET();
123 	pthread_t self = tib->tib_thread;
124 
125 	/*
126 	 * Do nothing unless
127 	 * 1) pthread_cancel() has been called on this thread,
128 	 * 2) cancelation is enabled for it, and
129 	 * 3) we're not already in cancelation processing
130 	 */
131 	if (!tib->tib_canceled || tib->tib_cantcancel)
132 		return;
133 
134 	/*
135 	 * If delaying cancels inside complex ops (pthread_cond_wait,
136 	 * pthread_join, etc), just mark that this has happened to
137 	 * prevent a race with going to sleep
138 	 */
139 	if (tib->tib_cancel_point & CANCEL_POINT_DELAYED) {
140 		self->delayed_cancel = 1;
141 		return;
142 	}
143 
144 	/*
145 	 * otherwise, if in a cancel point or async cancels are
146 	 * enabled, then exit
147 	 */
148 	if (tib->tib_cancel_point ||
149 	    (tib->tib_thread_flags & TIB_THREAD_ASYNC_CANCEL))
150 		pthread_exit(PTHREAD_CANCELED);
151 }
152 
153 
154 /*
155  * A few basic callbacks for libc.  The first couple are only used
156  * on archs where there isn't a fast TCB_GET()
157  */
158 #ifndef TCB_HAVE_MD_GET
159 static int *
160 multi_threaded_errnoptr(void)
161 {
162         return (&TIB_GET()->tib_errno);
163 }
164 
165 static void *
166 multi_threaded_tcb(void)
167 {
168 	return (TCB_GET());
169 }
170 #endif /* TCB_HAVE_MD_GET */
171 
172 void
173 _thread_canceled(void)
174 {
175 	pthread_exit(PTHREAD_CANCELED);
176 }
177 
178 void
179 _rthread_init(void)
180 {
181 	pthread_t thread = &_initial_thread;
182 	struct tib *tib;
183 	struct sigaction sa;
184 
185 	tib = TIB_GET();
186 	tib->tib_thread = thread;
187 	thread->tib = tib;
188 
189 	thread->donesem.lock = _SPINLOCK_UNLOCKED;
190 	tib->tib_thread_flags = TIB_THREAD_INITIAL_STACK;
191 	thread->flags_lock = _SPINLOCK_UNLOCKED;
192 	strlcpy(thread->name, "Main process", sizeof(thread->name));
193 	LIST_INSERT_HEAD(&_thread_list, thread, threads);
194 	_rthread_debug_init();
195 
196 	_thread_pagesize = (size_t)sysconf(_SC_PAGESIZE);
197 	_rthread_attr_default.guard_size = _thread_pagesize;
198 	thread->attr = _rthread_attr_default;
199 
200 	/* get libc to start using our callbacks */
201 	{
202 		struct thread_callbacks cb = { 0 };
203 
204 #ifndef TCB_HAVE_MD_GET
205 		cb.tc_errnoptr		= multi_threaded_errnoptr;
206 		cb.tc_tcb		= multi_threaded_tcb;
207 #endif
208 		cb.tc_canceled		= _thread_canceled;
209 		cb.tc_flockfile		= _thread_flockfile;
210 		cb.tc_ftrylockfile	= _thread_ftrylockfile;
211 		cb.tc_funlockfile	= _thread_funlockfile;
212 		cb.tc_malloc_lock	= _thread_malloc_lock;
213 		cb.tc_malloc_unlock	= _thread_malloc_unlock;
214 		cb.tc_atexit_lock	= _thread_atexit_lock;
215 		cb.tc_atexit_unlock	= _thread_atexit_unlock;
216 		cb.tc_atfork_lock	= _thread_atfork_lock;
217 		cb.tc_atfork_unlock	= _thread_atfork_unlock;
218 		cb.tc_arc4_lock		= _thread_arc4_lock;
219 		cb.tc_arc4_unlock	= _thread_arc4_unlock;
220 		cb.tc_mutex_lock	= _thread_mutex_lock;
221 		cb.tc_mutex_unlock	= _thread_mutex_unlock;
222 		cb.tc_mutex_destroy	= _thread_mutex_destroy;
223 		cb.tc_tag_lock		= _thread_tag_lock;
224 		cb.tc_tag_unlock	= _thread_tag_unlock;
225 		cb.tc_tag_storage	= _thread_tag_storage;
226 		cb.tc_fork		= _thread_fork;
227 		cb.tc_vfork		= _thread_vfork;
228 		_thread_set_callbacks(&cb, sizeof(cb));
229 	}
230 
231 #ifndef NO_PIC
232 	if (_DYNAMIC) {
233 		dlctl(NULL, DL_SETTHREADLCK, _rthread_dl_lock);
234 	}
235 #endif
236 
237 	/*
238 	 * Set the handler on the signal used for cancelation and
239 	 * suspension, and make sure it's unblocked
240 	 */
241 	memset(&sa, 0, sizeof(sa));
242 	sigemptyset(&sa.sa_mask);
243 	sa.sa_handler = sigthr_handler;
244 	sigaction(SIGTHR, &sa, NULL);
245 	sigaddset(&sa.sa_mask, SIGTHR);
246 	sigprocmask(SIG_UNBLOCK, &sa.sa_mask, NULL);
247 
248 	_threads_ready = 1;
249 
250 	_malloc_init(1);
251 
252 	_rthread_debug(1, "rthread init\n");
253 }
254 
255 static void
256 _rthread_free(pthread_t thread)
257 {
258 	_spinlock(&_thread_gc_lock);
259 	TAILQ_INSERT_TAIL(&_thread_gc_list, thread, waiting);
260 	_spinunlock(&_thread_gc_lock);
261 }
262 
263 /*
264  * real pthread functions
265  */
266 pthread_t
267 pthread_self(void)
268 {
269 	if (!_threads_ready)
270 		_rthread_init();
271 
272 	return (TIB_GET()->tib_thread);
273 }
274 DEF_STD(pthread_self);
275 
276 static void
277 _rthread_reaper(void)
278 {
279 	pthread_t thread;
280 
281 restart:
282 	_spinlock(&_thread_gc_lock);
283 	TAILQ_FOREACH(thread, &_thread_gc_list, waiting) {
284 		if (thread->tib->tib_tid != 0)
285 			continue;
286 		TAILQ_REMOVE(&_thread_gc_list, thread, waiting);
287 		_spinunlock(&_thread_gc_lock);
288 		if (thread != &_initial_thread) {
289 			_rthread_debug(3, "rthread reaping %p stack %p\n",
290 			    (void *)thread, (void *)thread->stack);
291 			_rthread_free_stack(thread->stack);
292 			_dl_free_tib(thread->tib, sizeof(*thread));
293 		} else {
294 			/* initial thread isn't part of TIB allocation */
295 			_rthread_debug(3, "rthread reaping %p (initial)\n",
296 			    (void *)thread);
297 			_dl_free_tib(thread->tib, 0);
298 		}
299 		goto restart;
300 	}
301 	_spinunlock(&_thread_gc_lock);
302 }
303 
304 void
305 pthread_exit(void *retval)
306 {
307 	struct rthread_cleanup_fn *clfn;
308 	struct tib *tib = TIB_GET();
309 	pthread_t thread;
310 
311 	if (!_threads_ready)
312 		_rthread_init();
313 	thread = tib->tib_thread;
314 
315 	if (tib->tib_cantcancel & CANCEL_DYING) {
316 		/*
317 		 * Called pthread_exit() from destructor or cancelation
318 		 * handler: blow up.  XXX write something to stderr?
319 		 */
320 		abort();
321 		//_exit(42);
322 	}
323 
324 	tib->tib_cantcancel |= CANCEL_DYING;
325 
326 	thread->retval = retval;
327 
328 	for (clfn = thread->cleanup_fns; clfn; ) {
329 		struct rthread_cleanup_fn *oclfn = clfn;
330 		clfn = clfn->next;
331 		oclfn->fn(oclfn->arg);
332 		free(oclfn);
333 	}
334 	_rthread_tls_destructors(thread);
335 	_spinlock(&_thread_lock);
336 	LIST_REMOVE(thread, threads);
337 	_spinunlock(&_thread_lock);
338 
339 	_spinlock(&thread->flags_lock);
340 	if (thread->flags & THREAD_DETACHED) {
341 		_spinunlock(&thread->flags_lock);
342 		_rthread_free(thread);
343 	} else {
344 		thread->flags |= THREAD_DONE;
345 		_spinunlock(&thread->flags_lock);
346 		_sem_post(&thread->donesem);
347 	}
348 
349 	__threxit(&tib->tib_tid);
350 	for(;;);
351 }
352 DEF_STD(pthread_exit);
353 
354 int
355 pthread_join(pthread_t thread, void **retval)
356 {
357 	int e;
358 	struct tib *tib = TIB_GET();
359 	pthread_t self;
360 	PREP_CANCEL_POINT(tib);
361 
362 	if (_post_threaded) {
363 #define GREATSCOTT "great scott! serious repercussions on future events!\n"
364 		write(2, GREATSCOTT, sizeof(GREATSCOTT) - 1);
365 		abort();
366 	}
367 	if (!_threads_ready)
368 		_rthread_init();
369 	self = tib->tib_thread;
370 
371 	e = 0;
372 	ENTER_DELAYED_CANCEL_POINT(tib, self);
373 	if (thread == NULL)
374 		e = EINVAL;
375 	else if (thread == self)
376 		e = EDEADLK;
377 	else if (thread->flags & THREAD_DETACHED)
378 		e = EINVAL;
379 	else if ((e = _sem_wait(&thread->donesem, 0, NULL,
380 	    &self->delayed_cancel)) == 0) {
381 		if (retval)
382 			*retval = thread->retval;
383 
384 		/*
385 		 * We should be the last having a ref to this thread,
386 		 * but someone stupid or evil might haved detached it;
387 		 * in that case the thread will clean up itself
388 		 */
389 		if ((thread->flags & THREAD_DETACHED) == 0)
390 			_rthread_free(thread);
391 	}
392 
393 	LEAVE_CANCEL_POINT_INNER(tib, e);
394 	_rthread_reaper();
395 	return (e);
396 }
397 
398 int
399 pthread_detach(pthread_t thread)
400 {
401 	int rc = 0;
402 
403 	_spinlock(&thread->flags_lock);
404 	if (thread->flags & THREAD_DETACHED) {
405 		rc = EINVAL;
406 		_spinunlock(&thread->flags_lock);
407 	} else if (thread->flags & THREAD_DONE) {
408 		_spinunlock(&thread->flags_lock);
409 		_rthread_free(thread);
410 	} else {
411 		thread->flags |= THREAD_DETACHED;
412 		_spinunlock(&thread->flags_lock);
413 	}
414 	_rthread_reaper();
415 	return (rc);
416 }
417 
418 int
419 pthread_create(pthread_t *threadp, const pthread_attr_t *attr,
420     void *(*start_routine)(void *), void *arg)
421 {
422 	extern int __isthreaded;
423 	struct tib *tib;
424 	pthread_t thread;
425 	struct __tfork param;
426 	int rc;
427 
428 	if (!_threads_ready)
429 		_rthread_init();
430 
431 	_rthread_reaper();
432 
433 	tib = _dl_allocate_tib(sizeof(*thread));
434 	if (tib == NULL)
435 		return (ENOMEM);
436 	thread = tib->tib_thread;
437 	memset(thread, 0, sizeof(*thread));
438 	thread->tib = tib;
439 	thread->donesem.lock = _SPINLOCK_UNLOCKED;
440 	thread->flags_lock = _SPINLOCK_UNLOCKED;
441 	thread->fn = start_routine;
442 	thread->arg = arg;
443 	tib->tib_tid = -1;
444 
445 	thread->attr = attr != NULL ? *(*attr) : _rthread_attr_default;
446 	if (thread->attr.sched_inherit == PTHREAD_INHERIT_SCHED) {
447 		pthread_t self = pthread_self();
448 
449 		thread->attr.sched_policy = self->attr.sched_policy;
450 		thread->attr.sched_param = self->attr.sched_param;
451 	}
452 	if (thread->attr.detach_state == PTHREAD_CREATE_DETACHED)
453 		thread->flags |= THREAD_DETACHED;
454 
455 	thread->stack = _rthread_alloc_stack(thread);
456 	if (!thread->stack) {
457 		rc = errno;
458 		goto fail1;
459 	}
460 
461 	param.tf_tcb = TIB_TO_TCB(tib);
462 	param.tf_tid = &tib->tib_tid;
463 	param.tf_stack = thread->stack->sp;
464 
465 	_spinlock(&_thread_lock);
466 	LIST_INSERT_HEAD(&_thread_list, thread, threads);
467 	_spinunlock(&_thread_lock);
468 
469 	/* we're going to be multi-threaded real soon now */
470 	__isthreaded = 1;
471 	rc = __tfork_thread(&param, sizeof(param), _rthread_start, thread);
472 	if (rc != -1) {
473 		/* success */
474 		*threadp = thread;
475 		return (0);
476 	}
477 
478 	rc = errno;
479 
480 	_spinlock(&_thread_lock);
481 	LIST_REMOVE(thread, threads);
482 	_spinunlock(&_thread_lock);
483 	_rthread_free_stack(thread->stack);
484 fail1:
485 	_dl_free_tib(tib, sizeof(*thread));
486 
487 	return (rc);
488 }
489 
490 int
491 pthread_kill(pthread_t thread, int sig)
492 {
493 	struct tib *tib = thread->tib;
494 
495 	if (sig == SIGTHR)
496 		return (EINVAL);
497 	if (thrkill(tib->tib_tid, sig, TIB_TO_TCB(tib)))
498 		return (errno);
499 	return (0);
500 }
501 
502 int
503 pthread_equal(pthread_t t1, pthread_t t2)
504 {
505 	return (t1 == t2);
506 }
507 
508 int
509 pthread_cancel(pthread_t thread)
510 {
511 	struct tib *tib = thread->tib;
512 	pid_t tid = tib->tib_tid;
513 
514 	if (tib->tib_canceled == 0 && tid != 0 &&
515 	    (tib->tib_cantcancel & CANCEL_DYING) == 0) {
516 		tib->tib_canceled = 1;
517 
518 		if ((tib->tib_cantcancel & CANCEL_DISABLED) == 0) {
519 			thrkill(tid, SIGTHR, TIB_TO_TCB(tib));
520 			return (0);
521 		}
522 	}
523 	return (0);
524 }
525 
526 void
527 pthread_testcancel(void)
528 {
529 	struct tib *tib = TIB_GET();
530 
531 	if (tib->tib_canceled && (tib->tib_cantcancel & CANCEL_DISABLED) == 0)
532 		pthread_exit(PTHREAD_CANCELED);
533 }
534 
535 int
536 pthread_setcancelstate(int state, int *oldstatep)
537 {
538 	struct tib *tib = TIB_GET();
539 	int oldstate;
540 
541 	oldstate = tib->tib_cantcancel & CANCEL_DISABLED ?
542 	    PTHREAD_CANCEL_DISABLE : PTHREAD_CANCEL_ENABLE;
543 	if (state == PTHREAD_CANCEL_ENABLE) {
544 		tib->tib_cantcancel &= ~CANCEL_DISABLED;
545 	} else if (state == PTHREAD_CANCEL_DISABLE) {
546 		tib->tib_cantcancel |= CANCEL_DISABLED;
547 	} else {
548 		return (EINVAL);
549 	}
550 	if (oldstatep)
551 		*oldstatep = oldstate;
552 
553 	return (0);
554 }
555 DEF_STD(pthread_setcancelstate);
556 
557 int
558 pthread_setcanceltype(int type, int *oldtypep)
559 {
560 	struct tib *tib = TIB_GET();
561 	int oldtype;
562 
563 	oldtype = tib->tib_thread_flags & TIB_THREAD_ASYNC_CANCEL ?
564 	    PTHREAD_CANCEL_ASYNCHRONOUS : PTHREAD_CANCEL_DEFERRED;
565 	if (type == PTHREAD_CANCEL_DEFERRED) {
566 		tib->tib_thread_flags &=~ TIB_THREAD_ASYNC_CANCEL;
567 	} else if (type == PTHREAD_CANCEL_ASYNCHRONOUS) {
568 		tib->tib_thread_flags |= TIB_THREAD_ASYNC_CANCEL;
569 	} else {
570 		return (EINVAL);
571 	}
572 	if (oldtypep)
573 		*oldtypep = oldtype;
574 
575 	return (0);
576 }
577 
578 void
579 pthread_cleanup_push(void (*fn)(void *), void *arg)
580 {
581 	struct rthread_cleanup_fn *clfn;
582 	pthread_t self = pthread_self();
583 
584 	clfn = calloc(1, sizeof(*clfn));
585 	if (!clfn)
586 		return;
587 	clfn->fn = fn;
588 	clfn->arg = arg;
589 	clfn->next = self->cleanup_fns;
590 	self->cleanup_fns = clfn;
591 }
592 
593 void
594 pthread_cleanup_pop(int execute)
595 {
596 	struct rthread_cleanup_fn *clfn;
597 	pthread_t self = pthread_self();
598 
599 	clfn = self->cleanup_fns;
600 	if (clfn) {
601 		self->cleanup_fns = clfn->next;
602 		if (execute)
603 			clfn->fn(clfn->arg);
604 		free(clfn);
605 	}
606 }
607 
608 int
609 pthread_getconcurrency(void)
610 {
611 	return (concurrency_level);
612 }
613 
614 int
615 pthread_setconcurrency(int new_level)
616 {
617 	if (new_level < 0)
618 		return (EINVAL);
619 	concurrency_level = new_level;
620 	return (0);
621 }
622 
623 /*
624  * compat debug stuff
625  */
626 void
627 _thread_dump_info(void)
628 {
629 	pthread_t thread;
630 
631 	_spinlock(&_thread_lock);
632 	LIST_FOREACH(thread, &_thread_list, threads)
633 		printf("thread %d flags 0x%x name %s\n", thread->tib->tib_tid,
634 		    thread->tib->tib_thread_flags, thread->name);
635 	_spinunlock(&_thread_lock);
636 }
637 
638 #ifndef NO_PIC
639 /*
640  * _rthread_dl_lock() provides the locking for dlopen(), dlclose(), and
641  * the function called via atexit() to invoke all destructors.  The latter
642  * two call shared-object destructors, which may need to call dlclose(),
643  * so this lock needs to permit recursive locking.
644  * The specific code here was extracted from _rthread_mutex_lock() and
645  * pthread_mutex_unlock() and simplified to use the static variables.
646  */
647 void
648 _rthread_dl_lock(int what)
649 {
650 	static _atomic_lock_t lock = _SPINLOCK_UNLOCKED;
651 	static pthread_t owner = NULL;
652 	static struct pthread_queue lockers = TAILQ_HEAD_INITIALIZER(lockers);
653 	static int count = 0;
654 
655 	if (what == 0) {
656 		pthread_t self = pthread_self();
657 
658 		/* lock, possibly recursive */
659 		_spinlock(&lock);
660 		if (owner == NULL) {
661 			owner = self;
662 		} else if (owner != self) {
663 			TAILQ_INSERT_TAIL(&lockers, self, waiting);
664 			while (owner != self) {
665 				__thrsleep(self, 0, NULL, &lock, NULL);
666 				_spinlock(&lock);
667 			}
668 		}
669 		count++;
670 		_spinunlock(&lock);
671 	} else if (what == 1) {
672 		/* unlock, possibly recursive */
673 		if (--count == 0) {
674 			pthread_t next;
675 
676 			_spinlock(&lock);
677 			owner = next = TAILQ_FIRST(&lockers);
678 			if (next != NULL)
679 				TAILQ_REMOVE(&lockers, next, waiting);
680 			_spinunlock(&lock);
681 			if (next != NULL)
682 				__thrwakeup(next, 1);
683 		}
684 	} else {
685 		/* reinit: used in child after fork to clear the queue */
686 		lock = _SPINLOCK_UNLOCKED;
687 		if (--count == 0)
688 			owner = NULL;
689 		TAILQ_INIT(&lockers);
690 	}
691 }
692 #endif
693