xref: /netbsd-src/sys/kern/subr_asan.c (revision f3cfa6f6ce31685c6c4a758bc430e69eb99f50a4)
1 /*	$NetBSD: subr_asan.c,v 1.9 2019/05/04 17:19:10 maxv Exp $	*/
2 
3 /*
4  * Copyright (c) 2018 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Maxime Villard.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: subr_asan.c,v 1.9 2019/05/04 17:19:10 maxv Exp $");
34 
35 #include <sys/param.h>
36 #include <sys/device.h>
37 #include <sys/kernel.h>
38 #include <sys/param.h>
39 #include <sys/conf.h>
40 #include <sys/systm.h>
41 #include <sys/types.h>
42 #include <sys/asan.h>
43 
44 #include <uvm/uvm.h>
45 
46 /* ASAN constants. Part of the compiler ABI. */
47 #define KASAN_SHADOW_SCALE_SHIFT	3
48 #define KASAN_SHADOW_SCALE_SIZE		(1UL << KASAN_SHADOW_SCALE_SHIFT)
49 #define KASAN_SHADOW_MASK		(KASAN_SHADOW_SCALE_SIZE - 1)
50 
51 /* The MD code. */
52 #include <machine/asan.h>
53 
54 /* ASAN ABI version. */
55 #if defined(__clang__) && (__clang_major__ - 0 >= 6)
56 #define ASAN_ABI_VERSION	8
57 #elif __GNUC_PREREQ__(7, 1) && !defined(__clang__)
58 #define ASAN_ABI_VERSION	8
59 #elif __GNUC_PREREQ__(6, 1) && !defined(__clang__)
60 #define ASAN_ABI_VERSION	6
61 #else
62 #error "Unsupported compiler version"
63 #endif
64 
65 #define __RET_ADDR	(unsigned long)__builtin_return_address(0)
66 
67 /* Global variable descriptor. Part of the compiler ABI.  */
68 struct __asan_global_source_location {
69 	const char *filename;
70 	int line_no;
71 	int column_no;
72 };
73 struct __asan_global {
74 	const void *beg;		/* address of the global variable */
75 	size_t size;			/* size of the global variable */
76 	size_t size_with_redzone;	/* size with the redzone */
77 	const void *name;		/* name of the variable */
78 	const void *module_name;	/* name of the module where the var is declared */
79 	unsigned long has_dynamic_init;	/* the var has dyn initializer (c++) */
80 	struct __asan_global_source_location *location;
81 #if ASAN_ABI_VERSION >= 7
82 	uintptr_t odr_indicator;	/* the address of the ODR indicator symbol */
83 #endif
84 };
85 
86 static bool kasan_enabled __read_mostly = false;
87 
88 /* -------------------------------------------------------------------------- */
89 
90 void
91 kasan_shadow_map(void *addr, size_t size)
92 {
93 	size_t sz, npages, i;
94 	vaddr_t sva, eva;
95 
96 	KASSERT((vaddr_t)addr % KASAN_SHADOW_SCALE_SIZE == 0);
97 
98 	sz = roundup(size, KASAN_SHADOW_SCALE_SIZE) / KASAN_SHADOW_SCALE_SIZE;
99 
100 	sva = (vaddr_t)kasan_md_addr_to_shad(addr);
101 	eva = (vaddr_t)kasan_md_addr_to_shad(addr) + sz;
102 
103 	sva = rounddown(sva, PAGE_SIZE);
104 	eva = roundup(eva, PAGE_SIZE);
105 
106 	npages = (eva - sva) / PAGE_SIZE;
107 
108 	KASSERT(sva >= KASAN_MD_SHADOW_START && eva < KASAN_MD_SHADOW_END);
109 
110 	for (i = 0; i < npages; i++) {
111 		kasan_md_shadow_map_page(sva + i * PAGE_SIZE);
112 	}
113 }
114 
115 static void
116 kasan_ctors(void)
117 {
118 	extern uint64_t __CTOR_LIST__, __CTOR_END__;
119 	size_t nentries, i;
120 	uint64_t *ptr;
121 
122 	nentries = ((size_t)&__CTOR_END__ - (size_t)&__CTOR_LIST__) /
123 	    sizeof(uintptr_t);
124 
125 	ptr = &__CTOR_LIST__;
126 	for (i = 0; i < nentries; i++) {
127 		void (*func)(void);
128 
129 		func = (void *)(*ptr);
130 		(*func)();
131 
132 		ptr++;
133 	}
134 }
135 
136 void
137 kasan_early_init(void *stack)
138 {
139 	kasan_md_early_init(stack);
140 }
141 
142 void
143 kasan_init(void)
144 {
145 	/* MD initialization. */
146 	kasan_md_init();
147 
148 	/* Now officially enabled. */
149 	kasan_enabled = true;
150 
151 	/* Call the ASAN constructors. */
152 	kasan_ctors();
153 }
154 
155 static inline const char *
156 kasan_code_name(uint8_t code)
157 {
158 	switch (code) {
159 	case KASAN_GENERIC_REDZONE:
160 		return "GenericRedZone";
161 	case KASAN_MALLOC_REDZONE:
162 		return "MallocRedZone";
163 	case KASAN_KMEM_REDZONE:
164 		return "KmemRedZone";
165 	case KASAN_POOL_REDZONE:
166 		return "PoolRedZone";
167 	case KASAN_POOL_FREED:
168 		return "PoolUseAfterFree";
169 	case 1 ... 7:
170 		return "RedZonePartial";
171 	case KASAN_STACK_LEFT:
172 		return "StackLeft";
173 	case KASAN_STACK_RIGHT:
174 		return "StackRight";
175 	case KASAN_STACK_PARTIAL:
176 		return "StackPartial";
177 	case KASAN_USE_AFTER_SCOPE:
178 		return "UseAfterScope";
179 	default:
180 		return "Unknown";
181 	}
182 }
183 
184 static void
185 kasan_report(unsigned long addr, size_t size, bool write, unsigned long pc,
186     uint8_t code)
187 {
188 	printf("ASan: Unauthorized Access In %p: Addr %p [%zu byte%s, %s,"
189 	    " %s]\n",
190 	    (void *)pc, (void *)addr, size, (size > 1 ? "s" : ""),
191 	    (write ? "write" : "read"), kasan_code_name(code));
192 	kasan_md_unwind();
193 }
194 
195 static __always_inline void
196 kasan_shadow_1byte_markvalid(unsigned long addr)
197 {
198 	int8_t *byte = kasan_md_addr_to_shad((void *)addr);
199 	int8_t last = (addr & KASAN_SHADOW_MASK) + 1;
200 
201 	*byte = last;
202 }
203 
204 static __always_inline void
205 kasan_shadow_Nbyte_markvalid(const void *addr, size_t size)
206 {
207 	size_t i;
208 
209 	for (i = 0; i < size; i++) {
210 		kasan_shadow_1byte_markvalid((unsigned long)addr+i);
211 	}
212 }
213 
214 static __always_inline void
215 kasan_shadow_Nbyte_fill(const void *addr, size_t size, uint8_t code)
216 {
217 	void *shad;
218 
219 	if (__predict_false(size == 0))
220 		return;
221 	if (__predict_false(kasan_md_unsupported((vaddr_t)addr)))
222 		return;
223 
224 	KASSERT((vaddr_t)addr % KASAN_SHADOW_SCALE_SIZE == 0);
225 	KASSERT(size % KASAN_SHADOW_SCALE_SIZE == 0);
226 
227 	shad = (void *)kasan_md_addr_to_shad(addr);
228 	size = size >> KASAN_SHADOW_SCALE_SHIFT;
229 
230 	__builtin_memset(shad, code, size);
231 }
232 
233 void
234 kasan_add_redzone(size_t *size)
235 {
236 	*size = roundup(*size, KASAN_SHADOW_SCALE_SIZE);
237 	*size += KASAN_SHADOW_SCALE_SIZE;
238 }
239 
240 void
241 kasan_softint(struct lwp *l)
242 {
243 	const void *stk = (const void *)uvm_lwp_getuarea(l);
244 
245 	kasan_shadow_Nbyte_fill(stk, USPACE, 0);
246 }
247 
248 /*
249  * In an area of size 'sz_with_redz', mark the 'size' first bytes as valid,
250  * and the rest as invalid. There are generally two use cases:
251  *
252  *  o kasan_mark(addr, origsize, size, code), with origsize < size. This marks
253  *    the redzone at the end of the buffer as invalid.
254  *
255  *  o kasan_mark(addr, size, size, 0). This marks the entire buffer as valid.
256  */
257 void
258 kasan_mark(const void *addr, size_t size, size_t sz_with_redz, uint8_t code)
259 {
260 	size_t i, n, redz;
261 	int8_t *shad;
262 
263 	KASSERT((vaddr_t)addr % KASAN_SHADOW_SCALE_SIZE == 0);
264 	redz = sz_with_redz - roundup(size, KASAN_SHADOW_SCALE_SIZE);
265 	KASSERT(redz % KASAN_SHADOW_SCALE_SIZE == 0);
266 	shad = kasan_md_addr_to_shad(addr);
267 
268 	/* Chunks of 8 bytes, valid. */
269 	n = size / KASAN_SHADOW_SCALE_SIZE;
270 	for (i = 0; i < n; i++) {
271 		*shad++ = 0;
272 	}
273 
274 	/* Possibly one chunk, mid. */
275 	if ((size & KASAN_SHADOW_MASK) != 0) {
276 		*shad++ = (size & KASAN_SHADOW_MASK);
277 	}
278 
279 	/* Chunks of 8 bytes, invalid. */
280 	n = redz / KASAN_SHADOW_SCALE_SIZE;
281 	for (i = 0; i < n; i++) {
282 		*shad++ = code;
283 	}
284 }
285 
286 /* -------------------------------------------------------------------------- */
287 
288 #define ADDR_CROSSES_SCALE_BOUNDARY(addr, size) 		\
289 	(addr >> KASAN_SHADOW_SCALE_SHIFT) !=			\
290 	    ((addr + size - 1) >> KASAN_SHADOW_SCALE_SHIFT)
291 
292 static __always_inline bool
293 kasan_shadow_1byte_isvalid(unsigned long addr, uint8_t *code)
294 {
295 	int8_t *byte = kasan_md_addr_to_shad((void *)addr);
296 	int8_t last = (addr & KASAN_SHADOW_MASK) + 1;
297 
298 	if (__predict_true(*byte == 0 || last <= *byte)) {
299 		return true;
300 	}
301 	*code = *byte;
302 	return false;
303 }
304 
305 static __always_inline bool
306 kasan_shadow_2byte_isvalid(unsigned long addr, uint8_t *code)
307 {
308 	int8_t *byte, last;
309 
310 	if (ADDR_CROSSES_SCALE_BOUNDARY(addr, 2)) {
311 		return (kasan_shadow_1byte_isvalid(addr, code) &&
312 		    kasan_shadow_1byte_isvalid(addr+1, code));
313 	}
314 
315 	byte = kasan_md_addr_to_shad((void *)addr);
316 	last = ((addr + 1) & KASAN_SHADOW_MASK) + 1;
317 
318 	if (__predict_true(*byte == 0 || last <= *byte)) {
319 		return true;
320 	}
321 	*code = *byte;
322 	return false;
323 }
324 
325 static __always_inline bool
326 kasan_shadow_4byte_isvalid(unsigned long addr, uint8_t *code)
327 {
328 	int8_t *byte, last;
329 
330 	if (ADDR_CROSSES_SCALE_BOUNDARY(addr, 4)) {
331 		return (kasan_shadow_2byte_isvalid(addr, code) &&
332 		    kasan_shadow_2byte_isvalid(addr+2, code));
333 	}
334 
335 	byte = kasan_md_addr_to_shad((void *)addr);
336 	last = ((addr + 3) & KASAN_SHADOW_MASK) + 1;
337 
338 	if (__predict_true(*byte == 0 || last <= *byte)) {
339 		return true;
340 	}
341 	*code = *byte;
342 	return false;
343 }
344 
345 static __always_inline bool
346 kasan_shadow_8byte_isvalid(unsigned long addr, uint8_t *code)
347 {
348 	int8_t *byte, last;
349 
350 	if (ADDR_CROSSES_SCALE_BOUNDARY(addr, 8)) {
351 		return (kasan_shadow_4byte_isvalid(addr, code) &&
352 		    kasan_shadow_4byte_isvalid(addr+4, code));
353 	}
354 
355 	byte = kasan_md_addr_to_shad((void *)addr);
356 	last = ((addr + 7) & KASAN_SHADOW_MASK) + 1;
357 
358 	if (__predict_true(*byte == 0 || last <= *byte)) {
359 		return true;
360 	}
361 	*code = *byte;
362 	return false;
363 }
364 
365 static __always_inline bool
366 kasan_shadow_Nbyte_isvalid(unsigned long addr, size_t size, uint8_t *code)
367 {
368 	size_t i;
369 
370 	for (i = 0; i < size; i++) {
371 		if (!kasan_shadow_1byte_isvalid(addr+i, code))
372 			return false;
373 	}
374 
375 	return true;
376 }
377 
378 static __always_inline void
379 kasan_shadow_check(unsigned long addr, size_t size, bool write,
380     unsigned long retaddr)
381 {
382 	uint8_t code;
383 	bool valid;
384 
385 	if (__predict_false(!kasan_enabled))
386 		return;
387 	if (__predict_false(size == 0))
388 		return;
389 	if (__predict_false(kasan_md_unsupported(addr)))
390 		return;
391 
392 	if (__builtin_constant_p(size)) {
393 		switch (size) {
394 		case 1:
395 			valid = kasan_shadow_1byte_isvalid(addr, &code);
396 			break;
397 		case 2:
398 			valid = kasan_shadow_2byte_isvalid(addr, &code);
399 			break;
400 		case 4:
401 			valid = kasan_shadow_4byte_isvalid(addr, &code);
402 			break;
403 		case 8:
404 			valid = kasan_shadow_8byte_isvalid(addr, &code);
405 			break;
406 		default:
407 			valid = kasan_shadow_Nbyte_isvalid(addr, size, &code);
408 			break;
409 		}
410 	} else {
411 		valid = kasan_shadow_Nbyte_isvalid(addr, size, &code);
412 	}
413 
414 	if (__predict_false(!valid)) {
415 		kasan_report(addr, size, write, retaddr, code);
416 	}
417 }
418 
419 /* -------------------------------------------------------------------------- */
420 
421 void *
422 kasan_memcpy(void *dst, const void *src, size_t len)
423 {
424 	kasan_shadow_check((unsigned long)src, len, false, __RET_ADDR);
425 	kasan_shadow_check((unsigned long)dst, len, true, __RET_ADDR);
426 	return __builtin_memcpy(dst, src, len);
427 }
428 
429 int
430 kasan_memcmp(const void *b1, const void *b2, size_t len)
431 {
432 	kasan_shadow_check((unsigned long)b1, len, false, __RET_ADDR);
433 	kasan_shadow_check((unsigned long)b2, len, false, __RET_ADDR);
434 	return __builtin_memcmp(b1, b2, len);
435 }
436 
437 void *
438 kasan_memset(void *b, int c, size_t len)
439 {
440 	kasan_shadow_check((unsigned long)b, len, true, __RET_ADDR);
441 	return __builtin_memset(b, c, len);
442 }
443 
444 char *
445 kasan_strcpy(char *dst, const char *src)
446 {
447 	char *save = dst;
448 
449 	while (1) {
450 		kasan_shadow_check((unsigned long)src, 1, false, __RET_ADDR);
451 		kasan_shadow_check((unsigned long)dst, 1, true, __RET_ADDR);
452 		*dst = *src;
453 		if (*src == '\0')
454 			break;
455 		src++, dst++;
456 	}
457 
458 	return save;
459 }
460 
461 int
462 kasan_strcmp(const char *s1, const char *s2)
463 {
464 	while (1) {
465 		kasan_shadow_check((unsigned long)s1, 1, false, __RET_ADDR);
466 		kasan_shadow_check((unsigned long)s2, 1, false, __RET_ADDR);
467 		if (*s1 != *s2)
468 			break;
469 		if (*s1 == '\0')
470 			return 0;
471 		s1++, s2++;
472 	}
473 
474 	return (*(const unsigned char *)s1 - *(const unsigned char *)s2);
475 }
476 
477 size_t
478 kasan_strlen(const char *str)
479 {
480 	const char *s;
481 
482 	s = str;
483 	while (1) {
484 		kasan_shadow_check((unsigned long)s, 1, false, __RET_ADDR);
485 		if (*s == '\0')
486 			break;
487 		s++;
488 	}
489 
490 	return (s - str);
491 }
492 
493 #undef kcopy
494 #undef copystr
495 #undef copyinstr
496 #undef copyoutstr
497 #undef copyin
498 
499 int	kasan_kcopy(const void *, void *, size_t);
500 int	kasan_copystr(const void *, void *, size_t, size_t *);
501 int	kasan_copyinstr(const void *, void *, size_t, size_t *);
502 int	kasan_copyoutstr(const void *, void *, size_t, size_t *);
503 int	kasan_copyin(const void *, void *, size_t);
504 int	kcopy(const void *, void *, size_t);
505 int	copystr(const void *, void *, size_t, size_t *);
506 int	copyinstr(const void *, void *, size_t, size_t *);
507 int	copyoutstr(const void *, void *, size_t, size_t *);
508 int	copyin(const void *, void *, size_t);
509 
510 int
511 kasan_kcopy(const void *src, void *dst, size_t len)
512 {
513 	kasan_shadow_check((unsigned long)src, len, false, __RET_ADDR);
514 	kasan_shadow_check((unsigned long)dst, len, true, __RET_ADDR);
515 	return kcopy(src, dst, len);
516 }
517 
518 int
519 kasan_copystr(const void *kfaddr, void *kdaddr, size_t len, size_t *done)
520 {
521 	kasan_shadow_check((unsigned long)kdaddr, len, true, __RET_ADDR);
522 	return copystr(kfaddr, kdaddr, len, done);
523 }
524 
525 int
526 kasan_copyin(const void *uaddr, void *kaddr, size_t len)
527 {
528 	kasan_shadow_check((unsigned long)kaddr, len, true, __RET_ADDR);
529 	return copyin(uaddr, kaddr, len);
530 }
531 
532 int
533 kasan_copyinstr(const void *uaddr, void *kaddr, size_t len, size_t *done)
534 {
535 	kasan_shadow_check((unsigned long)kaddr, len, true, __RET_ADDR);
536 	return copyinstr(uaddr, kaddr, len, done);
537 }
538 
539 int
540 kasan_copyoutstr(const void *kaddr, void *uaddr, size_t len, size_t *done)
541 {
542 	kasan_shadow_check((unsigned long)kaddr, len, false, __RET_ADDR);
543 	return copyoutstr(kaddr, uaddr, len, done);
544 }
545 
546 /* -------------------------------------------------------------------------- */
547 
548 void __asan_register_globals(struct __asan_global *, size_t);
549 void __asan_unregister_globals(struct __asan_global *, size_t);
550 
551 void
552 __asan_register_globals(struct __asan_global *globals, size_t n)
553 {
554 	size_t i;
555 
556 	for (i = 0; i < n; i++) {
557 		kasan_mark(globals[i].beg, globals[i].size,
558 		    globals[i].size_with_redzone, KASAN_GENERIC_REDZONE);
559 	}
560 }
561 
562 void
563 __asan_unregister_globals(struct __asan_global *globals, size_t n)
564 {
565 	/* never called */
566 }
567 
568 #define ASAN_LOAD_STORE(size)					\
569 	void __asan_load##size(unsigned long);			\
570 	void __asan_load##size(unsigned long addr)		\
571 	{							\
572 		kasan_shadow_check(addr, size, false, __RET_ADDR);\
573 	} 							\
574 	void __asan_load##size##_noabort(unsigned long);	\
575 	void __asan_load##size##_noabort(unsigned long addr)	\
576 	{							\
577 		kasan_shadow_check(addr, size, false, __RET_ADDR);\
578 	}							\
579 	void __asan_store##size(unsigned long);			\
580 	void __asan_store##size(unsigned long addr)		\
581 	{							\
582 		kasan_shadow_check(addr, size, true, __RET_ADDR);\
583 	}							\
584 	void __asan_store##size##_noabort(unsigned long);	\
585 	void __asan_store##size##_noabort(unsigned long addr)	\
586 	{							\
587 		kasan_shadow_check(addr, size, true, __RET_ADDR);\
588 	}
589 
590 ASAN_LOAD_STORE(1);
591 ASAN_LOAD_STORE(2);
592 ASAN_LOAD_STORE(4);
593 ASAN_LOAD_STORE(8);
594 ASAN_LOAD_STORE(16);
595 
596 void __asan_loadN(unsigned long, size_t);
597 void __asan_loadN_noabort(unsigned long, size_t);
598 void __asan_storeN(unsigned long, size_t);
599 void __asan_storeN_noabort(unsigned long, size_t);
600 void __asan_handle_no_return(void);
601 
602 void
603 __asan_loadN(unsigned long addr, size_t size)
604 {
605 	kasan_shadow_check(addr, size, false, __RET_ADDR);
606 }
607 
608 void
609 __asan_loadN_noabort(unsigned long addr, size_t size)
610 {
611 	kasan_shadow_check(addr, size, false, __RET_ADDR);
612 }
613 
614 void
615 __asan_storeN(unsigned long addr, size_t size)
616 {
617 	kasan_shadow_check(addr, size, true, __RET_ADDR);
618 }
619 
620 void
621 __asan_storeN_noabort(unsigned long addr, size_t size)
622 {
623 	kasan_shadow_check(addr, size, true, __RET_ADDR);
624 }
625 
626 void
627 __asan_handle_no_return(void)
628 {
629 	/* nothing */
630 }
631 
632 #define ASAN_SET_SHADOW(byte) \
633 	void __asan_set_shadow_##byte(void *, size_t);			\
634 	void __asan_set_shadow_##byte(void *addr, size_t size)		\
635 	{								\
636 		__builtin_memset((void *)addr, 0x##byte, size);		\
637 	}
638 
639 ASAN_SET_SHADOW(00);
640 ASAN_SET_SHADOW(f1);
641 ASAN_SET_SHADOW(f2);
642 ASAN_SET_SHADOW(f3);
643 ASAN_SET_SHADOW(f5);
644 ASAN_SET_SHADOW(f8);
645 
646 void __asan_poison_stack_memory(const void *, size_t);
647 void __asan_unpoison_stack_memory(const void *, size_t);
648 
649 void __asan_poison_stack_memory(const void *addr, size_t size)
650 {
651 	size = roundup(size, KASAN_SHADOW_SCALE_SIZE);
652 	kasan_shadow_Nbyte_fill(addr, size, KASAN_USE_AFTER_SCOPE);
653 }
654 
655 void __asan_unpoison_stack_memory(const void *addr, size_t size)
656 {
657 	kasan_shadow_Nbyte_markvalid(addr, size);
658 }
659