xref: /netbsd-src/sys/arch/arm/arm/cpu_in_cksum.S (revision fa28c6faa16e0b00edee7acdcaf4899797043def)
1/*	$NetBSD: cpu_in_cksum.S,v 1.11 2015/02/15 17:21:08 skrll Exp $	*/
2
3/*
4 * Copyright 2003 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Steve C. Woodford for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 *    must display the following acknowledgement:
19 *      This product includes software developed for the NetBSD Project by
20 *      Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 *    or promote products derived from this software without specific prior
23 *    written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38/*
39 * Hand-optimised in_cksum() and in4_cksum() implementations for ARM/Xscale
40 */
41
42#include <machine/asm.h>
43RCSID("$NetBSD: cpu_in_cksum.S,v 1.11 2015/02/15 17:21:08 skrll Exp $")
44
45#include "assym.h"
46
47/*
48 * int cpu_in_cksum(struct mbuf *m, int len, int off, uint32_t initial_sum)
49 *
50 * Entry:
51 *	r0	m
52 *	r1	len
53 *	r2	off
54 *	r3	initial_sum
55 *
56 * Function wide register usage
57 *	r8	accumulated sum
58 *	r9	remaining length to parse
59 *	ip	pointer to next mbuf
60 */
61/* LINTSTUB: Func: int cpu_in_cksum(struct mbuf *, int, int, uint32_t) */
62ENTRY(cpu_in_cksum)
63	push	{r4-r11,lr}
64
65	mov	r8, r3			/* Accumulate sum in r8 */
66	mov	r9, r1			/* save len in r9 */
67	mov	ip, r0			/* set ip to the current mbuf */
68
69.Lin_cksum_skip_loop:
70	ldr	r1, [ip, #(M_LEN)]
71	ldr	r0, [ip, #(M_DATA)]
72	ldr	ip, [ip, #(M_NEXT)]
73.Lin_cksum_skip_entry:
74	subs	r2, r2, r1		/* offset = offset - mbuf length */
75	ble	.Lin_cksum_skip_done	/* if offset has gone negative start with this mbuf */
76	cmp	ip, #0x00
77	bne	.Lin_cksum_skip_loop
78	b	.Lin_cksum_whoops
79
80.Lin_cksum_skip_done:
81	add	r0, r2, r0		/* data += offset (offset is < 0) */
82	add	r0, r0, r1		/* data += length of mbuf */
83					/* data == start of data to cksum */
84	rsb	r1, r2, #0x00		/* length = remainder of mbuf to read */
85	mov	r10, #0x00
86	b	.Lin_cksum_entry
87
88.Lin_cksum_loop:
89	ldr	r1, [ip, #(M_LEN)]
90	ldr	r0, [ip, #(M_DATA)]
91	ldr	ip, [ip, #(M_NEXT)]
92.Lin_cksum_entry:
93	cmp	r9, r1
94#ifdef __thumb__
95	bge	1f
96	mov	r1, r9
97#else
98	movlt	r1, r9
99#endif
1001:	sub	r9, r9, r1
101	eor	r11, r10, r0
102	add	r10, r10, r1
103	adds	r2, r1, #0x00
104#ifdef __thumb__
105	it	ne
106#endif
107	blne	_ASM_LABEL(arm_cksumdata)
108	tst	r11, #0x01
109#ifdef __thumb__
110	it	ne
111#endif
112	movne	r2, r2, ror #8
113	adds	r8, r8, r2
114	adc	r8, r8, #0x00
115	cmp	ip, #00
116	bne	.Lin_cksum_loop
117
118#ifdef __thumb__
119	mov	r0, r8
120	lsls	r2, r0, #16
121	adds	r0, r0, r2
122	bcc	1f
123	adds	r0, r0, #65536
1241:	mvns	r0, r0
125	lsrs	r0, r0, #16
126#else
127	adds	r8, r8, r8, lsl #16
128	addcs	r8, r8, #65536
129	mvn	r0, r8
130	lsr	r0, r0, #16
131#endif
132	pop	{r4-r11, pc}
133
134.Lin_cksum_whoops:
135	adr	r0, .Lin_cksum_whoops_str
136	bl	_C_LABEL(panic)
137.Lin_cksum_whoops_str:
138	.asciz	"in_cksum: out of mbufs\n"
139	.p2align	5
140END(cpu_in_cksum)
141
142
143/*
144 * The main in*_cksum() workhorse...
145 *
146 * Entry parameters:
147 *	r0	Pointer to buffer
148 *	r1	Buffer length
149 *	lr	Return address
150 *
151 * Returns:
152 *	r2	Accumulated 32-bit sum
153 *
154 * Clobbers:
155 *	r0-r7
156 */
157/* LINTSTUB: Ignore */
158ASENTRY_NP(arm_cksumdata)
159#ifdef __PROG26
160	str	lr, [sp, #-4]!		/* for SVC26 mode */
161#endif
162#ifdef __XSCALE__
163	pld	[r0]			/* Pre-fetch the start of the buffer */
164#endif
165	movs	r2, #0
166
167	/* We first have to word-align the buffer.  */
168	ands	r7, r0, #0x03
169	beq	.Lcksumdata_wordaligned
170	eors	r0, r0, r7		/* r0 is word aligned */
171	ldr	r2, [r0], #0x04
172#ifdef __thumb__
173	movs	r4, r7
174	lsls	r4, r4, #3
175#else
176	lsl	r4, r7, #3
177#endif
178#if defined(__ARMEB__)
179	lsls	r2, r2, r4
180	lsrs	r2, r2, r4
181#else
182	lsrs	r2, r2, r4
183	lsls	r2, r2, r4
184#endif
185	rsb	r7, r7, #0x04
186	subs	r1, r1, r7		/* Enough bytes left to make it? */
187	bgt	.Lcksumdata_wordaligned
188#ifdef __PROG26
189	ldreq	pc, [sp], #4		/* done */
190#else
191	RETc(eq)			/* done */
192#endif
193	adds	r1, r1, r7		/* undo sub */
194	subs	r7, r7, r1
195	lsls	r7, r7, #3
196#if defined(__ARMEB__)
197	lsrs	r2, r2, r7
198	lsls	r2, r2, r7
199#else
200	lsls	r2, r2, r7
201	lsrs	r2, r2, r7
202#endif
203#ifdef __PROG26
204	ldr	pc, [sp], #4		/* done */
205#else
206	RET				/* done */
207#endif
208
209	/* Buffer is now word aligned */
210.Lcksumdata_wordaligned:
211#ifdef __XSCALE__
212	cmp	r1, #0x04		/* Less than 4 bytes left? */
213	blt	.Lcksumdata_endgame	/* Yup */
214
215	/* Now quad-align, if necessary */
216	ands	r7, r0, #0x04
217	ldrne	r7, [r0], #0x04
218	subne	r1, r1, #0x04
219	subs	r1, r1, #0x40
220	blt	.Lcksumdata_bigloop_end	/* Note: C flag clear if branch taken */
221
222	/*
223	 * Buffer is now quad aligned. Sum 64 bytes at a time.
224	 * Note: First ldrd is hoisted above the loop, together with
225	 * setting r6 to zero to avoid stalling for results in the
226	 * loop. (r7 is live, from above).
227	 */
228	ldrd	r4, r5, [r0], #0x08
229	mov	r6, #0x00
230.Lcksumdata_bigloop:
231	pld	[r0, #0x18]
232	adds	r2, r2, r6
233	adcs	r2, r2, r7
234	ldrd	r6, r7, [r0], #0x08
235	adcs	r2, r2, r4
236	adcs	r2, r2, r5
237	ldrd	r4, r5, [r0], #0x08
238	adcs	r2, r2, r6
239	adcs	r2, r2, r7
240	ldrd	r6, r7, [r0], #0x08
241	adcs	r2, r2, r4
242	adcs	r2, r2, r5
243	ldrd	r4, r5, [r0], #0x08
244	adcs	r2, r2, r6
245	adcs	r2, r2, r7
246	pld	[r0, #0x18]
247	ldrd	r6, r7, [r0], #0x08
248	adcs	r2, r2, r4
249	adcs	r2, r2, r5
250	ldrd	r4, r5, [r0], #0x08
251	adcs	r2, r2, r6
252	adcs	r2, r2, r7
253	ldrd	r6, r7, [r0], #0x08
254	adcs	r2, r2, r4
255	adcs	r2, r2, r5
256	adcs	r2, r2, #0x00
257	subs	r1, r1, #0x40
258	ldrdge	r4, r5, [r0], #0x08
259	bge	.Lcksumdata_bigloop
260
261	adds	r2, r2, r6		/* r6/r7 still need summing */
262.Lcksumdata_bigloop_end:
263	adcs	r2, r2, r7
264	adcs	r2, r2, #0x00
265
266#else	/* !__XSCALE__ */
267
268	subs	r1, r1, #0x40
269	blt	.Lcksumdata_bigloop_end
270
271.Lcksumdata_bigloop:
272	ldmia	r0!, {r3, r4, r5, r6}
273	adds	r2, r2, r3
274	adcs	r2, r2, r4
275	adcs	r2, r2, r5
276	ldmia	r0!, {r3, r4, r5, r7}
277	adcs	r2, r2, r6
278	adcs	r2, r2, r3
279	adcs	r2, r2, r4
280	adcs	r2, r2, r5
281	ldmia	r0!, {r3, r4, r5, r6}
282	adcs	r2, r2, r7
283	adcs	r2, r2, r3
284	adcs	r2, r2, r4
285	adcs	r2, r2, r5
286	ldmia	r0!, {r3, r4, r5, r7}
287	adcs	r2, r2, r6
288	adcs	r2, r2, r3
289	adcs	r2, r2, r4
290	adcs	r2, r2, r5
291	adcs	r2, r2, r7
292	adcs	r2, r2, #0x00
293	subs	r1, r1, #0x40
294	bge	.Lcksumdata_bigloop
295.Lcksumdata_bigloop_end:
296#endif
297
298	adds	r1, r1, #0x40
299#ifdef __PROG26
300	ldreq	pc, [sp], #4
301#else
302	RETc(eq)
303#endif
304	cmp	r1, #0x20
305
306#ifdef __XSCALE__
307	ldrdge	r4, r5, [r0], #0x08	/* Avoid stalling pld and result */
308	blt	.Lcksumdata_less_than_32
309	pld	[r0, #0x18]
310	ldrd	r6, r7, [r0], #0x08
311	adds	r2, r2, r4
312	adcs	r2, r2, r5
313	ldrd	r4, r5, [r0], #0x08
314	adcs	r2, r2, r6
315	adcs	r2, r2, r7
316	ldrd	r6, r7, [r0], #0x08
317	adcs	r2, r2, r4
318	adcs	r2, r2, r5
319	adcs	r2, r2, r6		/* XXX: Unavoidable result stall */
320	adcs	r2, r2, r7
321#else
322	blt	.Lcksumdata_less_than_32
323	ldmia	r0!, {r3, r4, r5, r6}
324	adds	r2, r2, r3
325	adcs	r2, r2, r4
326	adcs	r2, r2, r5
327	ldmia	r0!, {r3, r4, r5, r7}
328	adcs	r2, r2, r6
329	adcs	r2, r2, r3
330	adcs	r2, r2, r4
331	adcs	r2, r2, r5
332	adcs	r2, r2, r7
333#endif
334	adcs	r2, r2, #0x00
335	subs	r1, r1, #0x20
336#ifdef __PROG26
337	ldreq	pc, [sp], #4
338#else
339	RETc(eq)
340#endif
341
342.Lcksumdata_less_than_32:
343	/* There are less than 32 bytes left */
344	and	r3, r1, #0x18
345	rsb	r4, r3, #0x18
346	subs	r1, r1, r3
347	adds	r4, r4, r4, lsr #1	/* Side effect: Clear carry flag */
348#ifdef __thumb__
349	it	ne
350#endif
351	addne	pc, pc, r4
352
353/*
354 * Note: We use ldm here, even on Xscale, since the combined issue/result
355 * latencies for ldm and ldrd are the same. Using ldm avoids needless #ifdefs.
356 */
357	/* At least 24 bytes remaining... */
358	ldmia	r0!, {r4, r5}
359	nop
360	adcs	r2, r2, r4
361	adcs	r2, r2, r5
362
363	/* At least 16 bytes remaining... */
364	ldmia	r0!, {r4, r5}
365	adcs	r2, r2, r4
366	adcs	r2, r2, r5
367
368	/* At least 8 bytes remaining... */
369	ldmia	r0!, {r4, r5}
370	adcs	r2, r2, r4
371	adcs	r2, r2, r5
372
373	/* Less than 8 bytes remaining... */
374	adcs	r2, r2, #0x00
375	subs	r1, r1, #0x04
376	blt	.Lcksumdata_lessthan4
377
378	ldr	r4, [r0], #0x04
379	subs	r1, r1, #0x04
380	adds	r2, r2, r4
381	adcs	r2, r2, #0x00
382
383	/* Deal with < 4 bytes remaining */
384.Lcksumdata_lessthan4:
385	adds	r1, r1, #0x04
386#ifdef __PROG26
387	ldreq	pc, [sp], #4
388#else
389	RETc(eq)
390#endif
391
392	/* Deal with 1 to 3 remaining bytes, possibly misaligned */
393.Lcksumdata_endgame:
394	ldr	r3, [r0]		/* Fetch last word */
395	rsb	r1, r1, #4		/* get discard amount */
396	lsl	r1, r1, #3		/* turn it into bits */
397#ifdef __ARMEB__
398	lsr	r3, r3, r1		/* discard least significant bits */
399	lsl	r3, r3, r1		/* shift back filling with zeros */
400#else
401	lsl	r3, r3, r1		/* discard least significant bits */
402	lsr	r3, r3, r1		/* shift back filling with zeros */
403#endif
404	adds	r2, r2, r3
405	adcs	r2, r2, #0x00
406#ifdef __PROG26
407	ldr	pc, [sp], #4
408#else
409	RET
410#endif
411ASEND(arm_cksumdata)
412