xref: /plan9-contrib/sys/src/libc/amd64/memmove.s (revision 272efad760864ee41cfe633b56aea9b4f5cf3ae7)
1TEXT memmove(SB), $0
2	MOVQ	RARG, DI
3	MOVQ	DI, AX			/* return value */
4	MOVQ	p2+8(FP), SI
5	MOVL	n+16(FP), BX
6	CMPL	BX, $0
7	JGT	_ok
8	JEQ	_return			/* nothing to do if n == 0 */
9	MOVL	$0, SI			/* fault if n < 0 */
10
11/*
12 * check and set for backwards:
13 *	(p2 < p1) && ((p2+n) > p1)
14 */
15_ok:
16	CMPQ	SI, DI
17	JGT	_forward
18	JEQ	_return			/* nothing to do if p2 == p1 */
19	MOVQ	SI, DX
20	ADDQ	BX, DX
21	CMPQ	DX, DI
22	JGT	_back
23
24/*
25 * copy whole longs if aligned
26 */
27_forward:
28	CLD
29	MOVQ	SI, DX
30	ORQ	DI, DX
31	ANDL	$3, DX
32	JNE	c3f
33	MOVQ	BX, CX
34	SHRQ	$2, CX
35	ANDL	$3, BX
36	REP;	MOVSL
37
38/*
39 * copy the rest, by bytes
40 */
41	JEQ	_return			/* flags set by above ANDL */
42c3f:
43	MOVL	BX, CX
44	REP;	MOVSB
45
46	RET
47
48/*
49 * whole thing backwards has
50 * adjusted addresses
51 */
52_back:
53	ADDQ	BX, DI
54	ADDQ	BX, SI
55	STD
56	SUBQ	$4, DI
57	SUBQ	$4, SI
58/*
59 * copy whole longs, if aligned
60 */
61	MOVQ	DI, DX
62	ORQ	SI, DX
63	ANDL	$3, DX
64	JNE	c3b
65	MOVL	BX, CX
66	SHRQ	$2, CX
67	ANDL	$3, BX
68	REP;	MOVSL
69/*
70 * copy the rest, by bytes
71 */
72	JEQ	_return			/* flags set by above ANDL */
73
74c3b:
75	ADDQ	$3, DI
76	ADDQ	$3, SI
77	MOVL	BX, CX
78	REP;	MOVSB
79
80_return:
81	RET
82