blob: 6b876ca299eeb4d4d7f684a441e70158942968b1 [file] [log] [blame]
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +09001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Quick'n'dirty IP checksum ...
7 *
8 * Copyright (C) 1998, 1999 Ralf Baechle
9 * Copyright (C) 1999 Silicon Graphics, Inc.
Maciej W. Rozycki619b6e12007-10-23 12:43:25 +010010 * Copyright (C) 2007 Maciej W. Rozycki
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +090011 */
Atsushi Nemotof860c902006-12-13 01:22:06 +090012#include <linux/errno.h>
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +090013#include <asm/asm.h>
Atsushi Nemotof860c902006-12-13 01:22:06 +090014#include <asm/asm-offsets.h>
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +090015#include <asm/regdef.h>
16
17#ifdef CONFIG_64BIT
Atsushi Nemoto52ffe762006-12-08 01:04:31 +090018/*
19 * As we are sharing code base with the mips32 tree (which use the o32 ABI
20 * register definitions). We need to redefine the register definitions from
21 * the n64 ABI register naming to the o32 ABI register naming.
22 */
23#undef t0
24#undef t1
25#undef t2
26#undef t3
27#define t0 $8
28#define t1 $9
29#define t2 $10
30#define t3 $11
31#define t4 $12
32#define t5 $13
33#define t6 $14
34#define t7 $15
Atsushi Nemotoed99e2b2006-12-08 01:04:51 +090035
36#define USE_DOUBLE
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +090037#endif
38
Atsushi Nemotoed99e2b2006-12-08 01:04:51 +090039#ifdef USE_DOUBLE
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +090040
Atsushi Nemotoed99e2b2006-12-08 01:04:51 +090041#define LOAD ld
Atsushi Nemotob80a1b82008-09-20 17:20:04 +020042#define LOAD32 lwu
Atsushi Nemotoed99e2b2006-12-08 01:04:51 +090043#define ADD daddu
44#define NBYTES 8
45
46#else
47
48#define LOAD lw
Atsushi Nemotob80a1b82008-09-20 17:20:04 +020049#define LOAD32 lw
Atsushi Nemotoed99e2b2006-12-08 01:04:51 +090050#define ADD addu
51#define NBYTES 4
52
53#endif /* USE_DOUBLE */
54
55#define UNIT(unit) ((unit)*NBYTES)
56
57#define ADDC(sum,reg) \
58 ADD sum, reg; \
59 sltu v1, sum, reg; \
Maciej W. Rozycki619b6e12007-10-23 12:43:25 +010060 ADD sum, v1; \
Atsushi Nemotoed99e2b2006-12-08 01:04:51 +090061
Atsushi Nemotob80a1b82008-09-20 17:20:04 +020062#define ADDC32(sum,reg) \
Atsushi Nemotob80a1b82008-09-20 17:20:04 +020063 addu sum, reg; \
64 sltu v1, sum, reg; \
65 addu sum, v1; \
Atsushi Nemotob80a1b82008-09-20 17:20:04 +020066
Atsushi Nemotoed99e2b2006-12-08 01:04:51 +090067#define CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3) \
68 LOAD _t0, (offset + UNIT(0))(src); \
69 LOAD _t1, (offset + UNIT(1))(src); \
70 LOAD _t2, (offset + UNIT(2))(src); \
71 LOAD _t3, (offset + UNIT(3))(src); \
72 ADDC(sum, _t0); \
73 ADDC(sum, _t1); \
74 ADDC(sum, _t2); \
75 ADDC(sum, _t3)
76
77#ifdef USE_DOUBLE
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +090078#define CSUM_BIGCHUNK(src, offset, sum, _t0, _t1, _t2, _t3) \
Atsushi Nemotoed99e2b2006-12-08 01:04:51 +090079 CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3)
80#else
81#define CSUM_BIGCHUNK(src, offset, sum, _t0, _t1, _t2, _t3) \
82 CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3); \
83 CSUM_BIGCHUNK1(src, offset + 0x10, sum, _t0, _t1, _t2, _t3)
84#endif
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +090085
86/*
87 * a0: source address
88 * a1: length of the area to checksum
89 * a2: partial checksum
90 */
91
92#define src a0
93#define sum v0
94
95 .text
96 .set noreorder
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +090097 .align 5
98LEAF(csum_partial)
99 move sum, zero
Atsushi Nemoto52ffe762006-12-08 01:04:31 +0900100 move t7, zero
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +0900101
102 sltiu t8, a1, 0x8
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000103 bnez t8, .Lsmall_csumcpy /* < 8 bytes to copy */
Atsushi Nemoto52ffe762006-12-08 01:04:31 +0900104 move t2, a1
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +0900105
Atsushi Nemoto773ff782006-12-08 01:04:45 +0900106 andi t7, src, 0x1 /* odd buffer? */
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +0900107
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000108.Lhword_align:
109 beqz t7, .Lword_align
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +0900110 andi t8, src, 0x2
111
Atsushi Nemoto52ffe762006-12-08 01:04:31 +0900112 lbu t0, (src)
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +0900113 LONG_SUBU a1, a1, 0x1
114#ifdef __MIPSEL__
Atsushi Nemoto52ffe762006-12-08 01:04:31 +0900115 sll t0, t0, 8
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +0900116#endif
Atsushi Nemoto52ffe762006-12-08 01:04:31 +0900117 ADDC(sum, t0)
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +0900118 PTR_ADDU src, src, 0x1
119 andi t8, src, 0x2
120
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000121.Lword_align:
122 beqz t8, .Ldword_align
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +0900123 sltiu t8, a1, 56
124
Atsushi Nemoto52ffe762006-12-08 01:04:31 +0900125 lhu t0, (src)
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +0900126 LONG_SUBU a1, a1, 0x2
Atsushi Nemoto52ffe762006-12-08 01:04:31 +0900127 ADDC(sum, t0)
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +0900128 sltiu t8, a1, 56
129 PTR_ADDU src, src, 0x2
130
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000131.Ldword_align:
132 bnez t8, .Ldo_end_words
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +0900133 move t8, a1
134
135 andi t8, src, 0x4
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000136 beqz t8, .Lqword_align
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +0900137 andi t8, src, 0x8
138
Atsushi Nemotob80a1b82008-09-20 17:20:04 +0200139 LOAD32 t0, 0x00(src)
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +0900140 LONG_SUBU a1, a1, 0x4
Atsushi Nemoto52ffe762006-12-08 01:04:31 +0900141 ADDC(sum, t0)
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +0900142 PTR_ADDU src, src, 0x4
143 andi t8, src, 0x8
144
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000145.Lqword_align:
146 beqz t8, .Loword_align
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +0900147 andi t8, src, 0x10
148
Atsushi Nemotoed99e2b2006-12-08 01:04:51 +0900149#ifdef USE_DOUBLE
150 ld t0, 0x00(src)
151 LONG_SUBU a1, a1, 0x8
152 ADDC(sum, t0)
153#else
Atsushi Nemoto52ffe762006-12-08 01:04:31 +0900154 lw t0, 0x00(src)
155 lw t1, 0x04(src)
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +0900156 LONG_SUBU a1, a1, 0x8
Atsushi Nemoto52ffe762006-12-08 01:04:31 +0900157 ADDC(sum, t0)
158 ADDC(sum, t1)
Atsushi Nemotoed99e2b2006-12-08 01:04:51 +0900159#endif
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +0900160 PTR_ADDU src, src, 0x8
161 andi t8, src, 0x10
162
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000163.Loword_align:
164 beqz t8, .Lbegin_movement
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +0900165 LONG_SRL t8, a1, 0x7
166
Atsushi Nemotoed99e2b2006-12-08 01:04:51 +0900167#ifdef USE_DOUBLE
168 ld t0, 0x00(src)
169 ld t1, 0x08(src)
Atsushi Nemoto52ffe762006-12-08 01:04:31 +0900170 ADDC(sum, t0)
171 ADDC(sum, t1)
Atsushi Nemotoed99e2b2006-12-08 01:04:51 +0900172#else
173 CSUM_BIGCHUNK1(src, 0x00, sum, t0, t1, t3, t4)
174#endif
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +0900175 LONG_SUBU a1, a1, 0x10
176 PTR_ADDU src, src, 0x10
177 LONG_SRL t8, a1, 0x7
178
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000179.Lbegin_movement:
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +0900180 beqz t8, 1f
Atsushi Nemoto52ffe762006-12-08 01:04:31 +0900181 andi t2, a1, 0x40
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +0900182
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000183.Lmove_128bytes:
Atsushi Nemoto52ffe762006-12-08 01:04:31 +0900184 CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4)
185 CSUM_BIGCHUNK(src, 0x20, sum, t0, t1, t3, t4)
186 CSUM_BIGCHUNK(src, 0x40, sum, t0, t1, t3, t4)
187 CSUM_BIGCHUNK(src, 0x60, sum, t0, t1, t3, t4)
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +0900188 LONG_SUBU t8, t8, 0x01
Maciej W. Rozycki619b6e12007-10-23 12:43:25 +0100189 .set reorder /* DADDI_WAR */
190 PTR_ADDU src, src, 0x80
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000191 bnez t8, .Lmove_128bytes
Maciej W. Rozycki619b6e12007-10-23 12:43:25 +0100192 .set noreorder
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +0900193
1941:
Atsushi Nemoto52ffe762006-12-08 01:04:31 +0900195 beqz t2, 1f
196 andi t2, a1, 0x20
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +0900197
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000198.Lmove_64bytes:
Atsushi Nemoto52ffe762006-12-08 01:04:31 +0900199 CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4)
200 CSUM_BIGCHUNK(src, 0x20, sum, t0, t1, t3, t4)
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +0900201 PTR_ADDU src, src, 0x40
202
2031:
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000204 beqz t2, .Ldo_end_words
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +0900205 andi t8, a1, 0x1c
206
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000207.Lmove_32bytes:
Atsushi Nemoto52ffe762006-12-08 01:04:31 +0900208 CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4)
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +0900209 andi t8, a1, 0x1c
210 PTR_ADDU src, src, 0x20
211
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000212.Ldo_end_words:
213 beqz t8, .Lsmall_csumcpy
Atsushi Nemoto773ff782006-12-08 01:04:45 +0900214 andi t2, a1, 0x3
215 LONG_SRL t8, t8, 0x2
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +0900216
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000217.Lend_words:
Atsushi Nemotob80a1b82008-09-20 17:20:04 +0200218 LOAD32 t0, (src)
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +0900219 LONG_SUBU t8, t8, 0x1
Atsushi Nemoto52ffe762006-12-08 01:04:31 +0900220 ADDC(sum, t0)
Maciej W. Rozycki619b6e12007-10-23 12:43:25 +0100221 .set reorder /* DADDI_WAR */
222 PTR_ADDU src, src, 0x4
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000223 bnez t8, .Lend_words
Maciej W. Rozycki619b6e12007-10-23 12:43:25 +0100224 .set noreorder
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +0900225
Atsushi Nemoto773ff782006-12-08 01:04:45 +0900226/* unknown src alignment and < 8 bytes to go */
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000227.Lsmall_csumcpy:
Atsushi Nemoto773ff782006-12-08 01:04:45 +0900228 move a1, t2
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +0900229
Atsushi Nemoto773ff782006-12-08 01:04:45 +0900230 andi t0, a1, 4
231 beqz t0, 1f
232 andi t0, a1, 2
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +0900233
Atsushi Nemoto773ff782006-12-08 01:04:45 +0900234 /* Still a full word to go */
235 ulw t1, (src)
236 PTR_ADDIU src, 4
Atsushi Nemotob80a1b82008-09-20 17:20:04 +0200237#ifdef USE_DOUBLE
238 dsll t1, t1, 32 /* clear lower 32bit */
239#endif
Atsushi Nemoto773ff782006-12-08 01:04:45 +0900240 ADDC(sum, t1)
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +0900241
Atsushi Nemoto773ff782006-12-08 01:04:45 +09002421: move t1, zero
243 beqz t0, 1f
244 andi t0, a1, 1
245
246 /* Still a halfword to go */
247 ulhu t1, (src)
248 PTR_ADDIU src, 2
249
2501: beqz t0, 1f
251 sll t1, t1, 16
252
253 lbu t2, (src)
254 nop
255
256#ifdef __MIPSEB__
257 sll t2, t2, 8
258#endif
259 or t1, t2
260
2611: ADDC(sum, t1)
262
263 /* fold checksum */
Atsushi Nemotoed99e2b2006-12-08 01:04:51 +0900264#ifdef USE_DOUBLE
265 dsll32 v1, sum, 0
266 daddu sum, v1
267 sltu v1, sum, v1
268 dsra32 sum, sum, 0
269 addu sum, v1
270#endif
Atsushi Nemoto773ff782006-12-08 01:04:45 +0900271
272 /* odd buffer alignment? */
Ralf Baechleb65a75b2008-10-11 16:18:53 +0100273#ifdef CPU_MIPSR2
274 wsbh v1, sum
275 movn sum, v1, t7
276#else
277 beqz t7, 1f /* odd buffer alignment? */
278 lui v1, 0x00ff
279 addu v1, 0x00ff
280 and t0, sum, v1
281 sll t0, t0, 8
Atsushi Nemoto773ff782006-12-08 01:04:45 +0900282 srl sum, sum, 8
Ralf Baechleb65a75b2008-10-11 16:18:53 +0100283 and sum, sum, v1
284 or sum, sum, t0
Atsushi Nemoto773ff782006-12-08 01:04:45 +09002851:
Ralf Baechleb65a75b2008-10-11 16:18:53 +0100286#endif
Atsushi Nemoto773ff782006-12-08 01:04:45 +0900287 .set reorder
288 /* Add the passed partial csum. */
Atsushi Nemotob80a1b82008-09-20 17:20:04 +0200289 ADDC32(sum, a2)
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +0900290 jr ra
Atsushi Nemoto773ff782006-12-08 01:04:45 +0900291 .set noreorder
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +0900292 END(csum_partial)
Atsushi Nemotof860c902006-12-13 01:22:06 +0900293
294
295/*
296 * checksum and copy routines based on memcpy.S
297 *
298 * csum_partial_copy_nocheck(src, dst, len, sum)
299 * __csum_partial_copy_user(src, dst, len, sum, errp)
300 *
301 * See "Spec" in memcpy.S for details. Unlike __copy_user, all
302 * function in this file use the standard calling convention.
303 */
304
305#define src a0
306#define dst a1
307#define len a2
308#define psum a3
309#define sum v0
310#define odd t8
311#define errptr t9
312
313/*
314 * The exception handler for loads requires that:
315 * 1- AT contain the address of the byte just past the end of the source
316 * of the copy,
317 * 2- src_entry <= src < AT, and
318 * 3- (dst - src) == (dst_entry - src_entry),
319 * The _entry suffix denotes values when __copy_user was called.
320 *
321 * (1) is set up up by __csum_partial_copy_from_user and maintained by
322 * not writing AT in __csum_partial_copy
323 * (2) is met by incrementing src by the number of bytes copied
324 * (3) is met by not doing loads between a pair of increments of dst and src
325 *
326 * The exception handlers for stores stores -EFAULT to errptr and return.
327 * These handlers do not need to overwrite any data.
328 */
329
330#define EXC(inst_reg,addr,handler) \
3319: inst_reg, addr; \
332 .section __ex_table,"a"; \
333 PTR 9b, handler; \
334 .previous
335
336#ifdef USE_DOUBLE
337
338#define LOAD ld
339#define LOADL ldl
340#define LOADR ldr
341#define STOREL sdl
342#define STORER sdr
343#define STORE sd
344#define ADD daddu
345#define SUB dsubu
346#define SRL dsrl
347#define SLL dsll
348#define SLLV dsllv
349#define SRLV dsrlv
350#define NBYTES 8
351#define LOG_NBYTES 3
352
353#else
354
355#define LOAD lw
356#define LOADL lwl
357#define LOADR lwr
358#define STOREL swl
359#define STORER swr
360#define STORE sw
361#define ADD addu
362#define SUB subu
363#define SRL srl
364#define SLL sll
365#define SLLV sllv
366#define SRLV srlv
367#define NBYTES 4
368#define LOG_NBYTES 2
369
370#endif /* USE_DOUBLE */
371
372#ifdef CONFIG_CPU_LITTLE_ENDIAN
373#define LDFIRST LOADR
374#define LDREST LOADL
375#define STFIRST STORER
376#define STREST STOREL
377#define SHIFT_DISCARD SLLV
378#define SHIFT_DISCARD_REVERT SRLV
379#else
380#define LDFIRST LOADL
381#define LDREST LOADR
382#define STFIRST STOREL
383#define STREST STORER
384#define SHIFT_DISCARD SRLV
385#define SHIFT_DISCARD_REVERT SLLV
386#endif
387
388#define FIRST(unit) ((unit)*NBYTES)
389#define REST(unit) (FIRST(unit)+NBYTES-1)
390
391#define ADDRMASK (NBYTES-1)
392
Maciej W. Rozycki619b6e12007-10-23 12:43:25 +0100393#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
Atsushi Nemotof860c902006-12-13 01:22:06 +0900394 .set noat
Maciej W. Rozycki619b6e12007-10-23 12:43:25 +0100395#else
396 .set at=v1
397#endif
Atsushi Nemotof860c902006-12-13 01:22:06 +0900398
399LEAF(__csum_partial_copy_user)
400 PTR_ADDU AT, src, len /* See (1) above. */
401#ifdef CONFIG_64BIT
402 move errptr, a4
403#else
404 lw errptr, 16(sp)
405#endif
406FEXPORT(csum_partial_copy_nocheck)
407 move sum, zero
408 move odd, zero
409 /*
410 * Note: dst & src may be unaligned, len may be 0
411 * Temps
412 */
413 /*
414 * The "issue break"s below are very approximate.
415 * Issue delays for dcache fills will perturb the schedule, as will
416 * load queue full replay traps, etc.
417 *
418 * If len < NBYTES use byte operations.
419 */
420 sltu t2, len, NBYTES
421 and t1, dst, ADDRMASK
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000422 bnez t2, .Lcopy_bytes_checklen
Atsushi Nemotof860c902006-12-13 01:22:06 +0900423 and t0, src, ADDRMASK
424 andi odd, dst, 0x1 /* odd buffer? */
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000425 bnez t1, .Ldst_unaligned
Atsushi Nemotof860c902006-12-13 01:22:06 +0900426 nop
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000427 bnez t0, .Lsrc_unaligned_dst_aligned
Atsushi Nemotof860c902006-12-13 01:22:06 +0900428 /*
429 * use delay slot for fall-through
430 * src and dst are aligned; need to compute rem
431 */
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000432.Lboth_aligned:
Atsushi Nemotof860c902006-12-13 01:22:06 +0900433 SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000434 beqz t0, .Lcleanup_both_aligned # len < 8*NBYTES
Atsushi Nemotof860c902006-12-13 01:22:06 +0900435 nop
436 SUB len, 8*NBYTES # subtract here for bgez loop
437 .align 4
4381:
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000439EXC( LOAD t0, UNIT(0)(src), .Ll_exc)
440EXC( LOAD t1, UNIT(1)(src), .Ll_exc_copy)
441EXC( LOAD t2, UNIT(2)(src), .Ll_exc_copy)
442EXC( LOAD t3, UNIT(3)(src), .Ll_exc_copy)
443EXC( LOAD t4, UNIT(4)(src), .Ll_exc_copy)
444EXC( LOAD t5, UNIT(5)(src), .Ll_exc_copy)
445EXC( LOAD t6, UNIT(6)(src), .Ll_exc_copy)
446EXC( LOAD t7, UNIT(7)(src), .Ll_exc_copy)
Atsushi Nemotof860c902006-12-13 01:22:06 +0900447 SUB len, len, 8*NBYTES
448 ADD src, src, 8*NBYTES
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000449EXC( STORE t0, UNIT(0)(dst), .Ls_exc)
Atsushi Nemotof860c902006-12-13 01:22:06 +0900450 ADDC(sum, t0)
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000451EXC( STORE t1, UNIT(1)(dst), .Ls_exc)
Atsushi Nemotof860c902006-12-13 01:22:06 +0900452 ADDC(sum, t1)
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000453EXC( STORE t2, UNIT(2)(dst), .Ls_exc)
Atsushi Nemotof860c902006-12-13 01:22:06 +0900454 ADDC(sum, t2)
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000455EXC( STORE t3, UNIT(3)(dst), .Ls_exc)
Atsushi Nemotof860c902006-12-13 01:22:06 +0900456 ADDC(sum, t3)
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000457EXC( STORE t4, UNIT(4)(dst), .Ls_exc)
Atsushi Nemotof860c902006-12-13 01:22:06 +0900458 ADDC(sum, t4)
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000459EXC( STORE t5, UNIT(5)(dst), .Ls_exc)
Atsushi Nemotof860c902006-12-13 01:22:06 +0900460 ADDC(sum, t5)
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000461EXC( STORE t6, UNIT(6)(dst), .Ls_exc)
Atsushi Nemotof860c902006-12-13 01:22:06 +0900462 ADDC(sum, t6)
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000463EXC( STORE t7, UNIT(7)(dst), .Ls_exc)
Atsushi Nemotof860c902006-12-13 01:22:06 +0900464 ADDC(sum, t7)
Maciej W. Rozycki619b6e12007-10-23 12:43:25 +0100465 .set reorder /* DADDI_WAR */
466 ADD dst, dst, 8*NBYTES
Atsushi Nemotof860c902006-12-13 01:22:06 +0900467 bgez len, 1b
Maciej W. Rozycki619b6e12007-10-23 12:43:25 +0100468 .set noreorder
Atsushi Nemotof860c902006-12-13 01:22:06 +0900469 ADD len, 8*NBYTES # revert len (see above)
470
471 /*
472 * len == the number of bytes left to copy < 8*NBYTES
473 */
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000474.Lcleanup_both_aligned:
Atsushi Nemotof860c902006-12-13 01:22:06 +0900475#define rem t7
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000476 beqz len, .Ldone
Atsushi Nemotof860c902006-12-13 01:22:06 +0900477 sltu t0, len, 4*NBYTES
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000478 bnez t0, .Lless_than_4units
Atsushi Nemotof860c902006-12-13 01:22:06 +0900479 and rem, len, (NBYTES-1) # rem = len % NBYTES
480 /*
481 * len >= 4*NBYTES
482 */
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000483EXC( LOAD t0, UNIT(0)(src), .Ll_exc)
484EXC( LOAD t1, UNIT(1)(src), .Ll_exc_copy)
485EXC( LOAD t2, UNIT(2)(src), .Ll_exc_copy)
486EXC( LOAD t3, UNIT(3)(src), .Ll_exc_copy)
Atsushi Nemotof860c902006-12-13 01:22:06 +0900487 SUB len, len, 4*NBYTES
488 ADD src, src, 4*NBYTES
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000489EXC( STORE t0, UNIT(0)(dst), .Ls_exc)
Atsushi Nemotof860c902006-12-13 01:22:06 +0900490 ADDC(sum, t0)
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000491EXC( STORE t1, UNIT(1)(dst), .Ls_exc)
Atsushi Nemotof860c902006-12-13 01:22:06 +0900492 ADDC(sum, t1)
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000493EXC( STORE t2, UNIT(2)(dst), .Ls_exc)
Atsushi Nemotof860c902006-12-13 01:22:06 +0900494 ADDC(sum, t2)
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000495EXC( STORE t3, UNIT(3)(dst), .Ls_exc)
Atsushi Nemotof860c902006-12-13 01:22:06 +0900496 ADDC(sum, t3)
Maciej W. Rozycki619b6e12007-10-23 12:43:25 +0100497 .set reorder /* DADDI_WAR */
498 ADD dst, dst, 4*NBYTES
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000499 beqz len, .Ldone
Maciej W. Rozycki619b6e12007-10-23 12:43:25 +0100500 .set noreorder
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000501.Lless_than_4units:
Atsushi Nemotof860c902006-12-13 01:22:06 +0900502 /*
503 * rem = len % NBYTES
504 */
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000505 beq rem, len, .Lcopy_bytes
Atsushi Nemotof860c902006-12-13 01:22:06 +0900506 nop
5071:
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000508EXC( LOAD t0, 0(src), .Ll_exc)
Atsushi Nemotof860c902006-12-13 01:22:06 +0900509 ADD src, src, NBYTES
510 SUB len, len, NBYTES
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000511EXC( STORE t0, 0(dst), .Ls_exc)
Atsushi Nemotof860c902006-12-13 01:22:06 +0900512 ADDC(sum, t0)
Maciej W. Rozycki619b6e12007-10-23 12:43:25 +0100513 .set reorder /* DADDI_WAR */
514 ADD dst, dst, NBYTES
Atsushi Nemotof860c902006-12-13 01:22:06 +0900515 bne rem, len, 1b
Maciej W. Rozycki619b6e12007-10-23 12:43:25 +0100516 .set noreorder
Atsushi Nemotof860c902006-12-13 01:22:06 +0900517
518 /*
519 * src and dst are aligned, need to copy rem bytes (rem < NBYTES)
520 * A loop would do only a byte at a time with possible branch
521 * mispredicts. Can't do an explicit LOAD dst,mask,or,STORE
522 * because can't assume read-access to dst. Instead, use
523 * STREST dst, which doesn't require read access to dst.
524 *
525 * This code should perform better than a simple loop on modern,
526 * wide-issue mips processors because the code has fewer branches and
527 * more instruction-level parallelism.
528 */
529#define bits t2
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000530 beqz len, .Ldone
Atsushi Nemotof860c902006-12-13 01:22:06 +0900531 ADD t1, dst, len # t1 is just past last byte of dst
532 li bits, 8*NBYTES
533 SLL rem, len, 3 # rem = number of bits to keep
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000534EXC( LOAD t0, 0(src), .Ll_exc)
Atsushi Nemotof860c902006-12-13 01:22:06 +0900535 SUB bits, bits, rem # bits = number of bits to discard
536 SHIFT_DISCARD t0, t0, bits
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000537EXC( STREST t0, -1(t1), .Ls_exc)
Atsushi Nemotof860c902006-12-13 01:22:06 +0900538 SHIFT_DISCARD_REVERT t0, t0, bits
539 .set reorder
540 ADDC(sum, t0)
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000541 b .Ldone
Atsushi Nemotof860c902006-12-13 01:22:06 +0900542 .set noreorder
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000543.Ldst_unaligned:
Atsushi Nemotof860c902006-12-13 01:22:06 +0900544 /*
545 * dst is unaligned
546 * t0 = src & ADDRMASK
547 * t1 = dst & ADDRMASK; T1 > 0
548 * len >= NBYTES
549 *
550 * Copy enough bytes to align dst
551 * Set match = (src and dst have same alignment)
552 */
553#define match rem
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000554EXC( LDFIRST t3, FIRST(0)(src), .Ll_exc)
Atsushi Nemotof860c902006-12-13 01:22:06 +0900555 ADD t2, zero, NBYTES
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000556EXC( LDREST t3, REST(0)(src), .Ll_exc_copy)
Atsushi Nemotof860c902006-12-13 01:22:06 +0900557 SUB t2, t2, t1 # t2 = number of bytes copied
558 xor match, t0, t1
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000559EXC( STFIRST t3, FIRST(0)(dst), .Ls_exc)
Atsushi Nemotof860c902006-12-13 01:22:06 +0900560 SLL t4, t1, 3 # t4 = number of bits to discard
561 SHIFT_DISCARD t3, t3, t4
562 /* no SHIFT_DISCARD_REVERT to handle odd buffer properly */
563 ADDC(sum, t3)
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000564 beq len, t2, .Ldone
Atsushi Nemotof860c902006-12-13 01:22:06 +0900565 SUB len, len, t2
566 ADD dst, dst, t2
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000567 beqz match, .Lboth_aligned
Atsushi Nemotof860c902006-12-13 01:22:06 +0900568 ADD src, src, t2
569
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000570.Lsrc_unaligned_dst_aligned:
Atsushi Nemotof860c902006-12-13 01:22:06 +0900571 SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000572 beqz t0, .Lcleanup_src_unaligned
Atsushi Nemotof860c902006-12-13 01:22:06 +0900573 and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES
5741:
575/*
576 * Avoid consecutive LD*'s to the same register since some mips
577 * implementations can't issue them in the same cycle.
578 * It's OK to load FIRST(N+1) before REST(N) because the two addresses
579 * are to the same unit (unless src is aligned, but it's not).
580 */
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000581EXC( LDFIRST t0, FIRST(0)(src), .Ll_exc)
582EXC( LDFIRST t1, FIRST(1)(src), .Ll_exc_copy)
Atsushi Nemotof860c902006-12-13 01:22:06 +0900583 SUB len, len, 4*NBYTES
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000584EXC( LDREST t0, REST(0)(src), .Ll_exc_copy)
585EXC( LDREST t1, REST(1)(src), .Ll_exc_copy)
586EXC( LDFIRST t2, FIRST(2)(src), .Ll_exc_copy)
587EXC( LDFIRST t3, FIRST(3)(src), .Ll_exc_copy)
588EXC( LDREST t2, REST(2)(src), .Ll_exc_copy)
589EXC( LDREST t3, REST(3)(src), .Ll_exc_copy)
Atsushi Nemotof860c902006-12-13 01:22:06 +0900590 ADD src, src, 4*NBYTES
591#ifdef CONFIG_CPU_SB1
592 nop # improves slotting
593#endif
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000594EXC( STORE t0, UNIT(0)(dst), .Ls_exc)
Atsushi Nemotof860c902006-12-13 01:22:06 +0900595 ADDC(sum, t0)
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000596EXC( STORE t1, UNIT(1)(dst), .Ls_exc)
Atsushi Nemotof860c902006-12-13 01:22:06 +0900597 ADDC(sum, t1)
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000598EXC( STORE t2, UNIT(2)(dst), .Ls_exc)
Atsushi Nemotof860c902006-12-13 01:22:06 +0900599 ADDC(sum, t2)
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000600EXC( STORE t3, UNIT(3)(dst), .Ls_exc)
Atsushi Nemotof860c902006-12-13 01:22:06 +0900601 ADDC(sum, t3)
Maciej W. Rozycki619b6e12007-10-23 12:43:25 +0100602 .set reorder /* DADDI_WAR */
603 ADD dst, dst, 4*NBYTES
Atsushi Nemotof860c902006-12-13 01:22:06 +0900604 bne len, rem, 1b
Maciej W. Rozycki619b6e12007-10-23 12:43:25 +0100605 .set noreorder
Atsushi Nemotof860c902006-12-13 01:22:06 +0900606
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000607.Lcleanup_src_unaligned:
608 beqz len, .Ldone
Atsushi Nemotof860c902006-12-13 01:22:06 +0900609 and rem, len, NBYTES-1 # rem = len % NBYTES
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000610 beq rem, len, .Lcopy_bytes
Atsushi Nemotof860c902006-12-13 01:22:06 +0900611 nop
6121:
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000613EXC( LDFIRST t0, FIRST(0)(src), .Ll_exc)
614EXC( LDREST t0, REST(0)(src), .Ll_exc_copy)
Atsushi Nemotof860c902006-12-13 01:22:06 +0900615 ADD src, src, NBYTES
616 SUB len, len, NBYTES
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000617EXC( STORE t0, 0(dst), .Ls_exc)
Atsushi Nemotof860c902006-12-13 01:22:06 +0900618 ADDC(sum, t0)
Maciej W. Rozycki619b6e12007-10-23 12:43:25 +0100619 .set reorder /* DADDI_WAR */
620 ADD dst, dst, NBYTES
Atsushi Nemotof860c902006-12-13 01:22:06 +0900621 bne len, rem, 1b
Maciej W. Rozycki619b6e12007-10-23 12:43:25 +0100622 .set noreorder
Atsushi Nemotof860c902006-12-13 01:22:06 +0900623
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000624.Lcopy_bytes_checklen:
625 beqz len, .Ldone
Atsushi Nemotof860c902006-12-13 01:22:06 +0900626 nop
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000627.Lcopy_bytes:
Atsushi Nemotof860c902006-12-13 01:22:06 +0900628 /* 0 < len < NBYTES */
629#ifdef CONFIG_CPU_LITTLE_ENDIAN
630#define SHIFT_START 0
631#define SHIFT_INC 8
632#else
633#define SHIFT_START 8*(NBYTES-1)
634#define SHIFT_INC -8
635#endif
636 move t2, zero # partial word
637 li t3, SHIFT_START # shift
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000638/* use .Ll_exc_copy here to return correct sum on fault */
Atsushi Nemotof860c902006-12-13 01:22:06 +0900639#define COPY_BYTE(N) \
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000640EXC( lbu t0, N(src), .Ll_exc_copy); \
Atsushi Nemotof860c902006-12-13 01:22:06 +0900641 SUB len, len, 1; \
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000642EXC( sb t0, N(dst), .Ls_exc); \
Atsushi Nemotof860c902006-12-13 01:22:06 +0900643 SLLV t0, t0, t3; \
644 addu t3, SHIFT_INC; \
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000645 beqz len, .Lcopy_bytes_done; \
Atsushi Nemotof860c902006-12-13 01:22:06 +0900646 or t2, t0
647
648 COPY_BYTE(0)
649 COPY_BYTE(1)
650#ifdef USE_DOUBLE
651 COPY_BYTE(2)
652 COPY_BYTE(3)
653 COPY_BYTE(4)
654 COPY_BYTE(5)
655#endif
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000656EXC( lbu t0, NBYTES-2(src), .Ll_exc_copy)
Atsushi Nemotof860c902006-12-13 01:22:06 +0900657 SUB len, len, 1
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000658EXC( sb t0, NBYTES-2(dst), .Ls_exc)
Atsushi Nemotof860c902006-12-13 01:22:06 +0900659 SLLV t0, t0, t3
660 or t2, t0
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000661.Lcopy_bytes_done:
Atsushi Nemotof860c902006-12-13 01:22:06 +0900662 ADDC(sum, t2)
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000663.Ldone:
Atsushi Nemotof860c902006-12-13 01:22:06 +0900664 /* fold checksum */
665#ifdef USE_DOUBLE
666 dsll32 v1, sum, 0
667 daddu sum, v1
668 sltu v1, sum, v1
669 dsra32 sum, sum, 0
670 addu sum, v1
671#endif
Atsushi Nemotof860c902006-12-13 01:22:06 +0900672
Ralf Baechleb65a75b2008-10-11 16:18:53 +0100673#ifdef CPU_MIPSR2
674 wsbh v1, sum
675 movn sum, v1, odd
676#else
677 beqz odd, 1f /* odd buffer alignment? */
678 lui v1, 0x00ff
679 addu v1, 0x00ff
680 and t0, sum, v1
681 sll t0, t0, 8
Atsushi Nemotof860c902006-12-13 01:22:06 +0900682 srl sum, sum, 8
Ralf Baechleb65a75b2008-10-11 16:18:53 +0100683 and sum, sum, v1
684 or sum, sum, t0
Atsushi Nemotof860c902006-12-13 01:22:06 +09006851:
Ralf Baechleb65a75b2008-10-11 16:18:53 +0100686#endif
Atsushi Nemotof860c902006-12-13 01:22:06 +0900687 .set reorder
Atsushi Nemotob80a1b82008-09-20 17:20:04 +0200688 ADDC32(sum, psum)
Atsushi Nemotof860c902006-12-13 01:22:06 +0900689 jr ra
690 .set noreorder
691
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000692.Ll_exc_copy:
Atsushi Nemotof860c902006-12-13 01:22:06 +0900693 /*
694 * Copy bytes from src until faulting load address (or until a
695 * lb faults)
696 *
697 * When reached by a faulting LDFIRST/LDREST, THREAD_BUADDR($28)
698 * may be more than a byte beyond the last address.
699 * Hence, the lb below may get an exception.
700 *
701 * Assumes src < THREAD_BUADDR($28)
702 */
703 LOAD t0, TI_TASK($28)
704 li t2, SHIFT_START
705 LOAD t0, THREAD_BUADDR(t0)
7061:
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000707EXC( lbu t1, 0(src), .Ll_exc)
Atsushi Nemotof860c902006-12-13 01:22:06 +0900708 ADD src, src, 1
709 sb t1, 0(dst) # can't fault -- we're copy_from_user
710 SLLV t1, t1, t2
711 addu t2, SHIFT_INC
712 ADDC(sum, t1)
Maciej W. Rozycki619b6e12007-10-23 12:43:25 +0100713 .set reorder /* DADDI_WAR */
714 ADD dst, dst, 1
Atsushi Nemotof860c902006-12-13 01:22:06 +0900715 bne src, t0, 1b
Maciej W. Rozycki619b6e12007-10-23 12:43:25 +0100716 .set noreorder
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000717.Ll_exc:
Atsushi Nemotof860c902006-12-13 01:22:06 +0900718 LOAD t0, TI_TASK($28)
719 nop
720 LOAD t0, THREAD_BUADDR(t0) # t0 is just past last good address
721 nop
722 SUB len, AT, t0 # len number of uncopied bytes
723 /*
724 * Here's where we rely on src and dst being incremented in tandem,
725 * See (3) above.
726 * dst += (fault addr - src) to put dst at first byte to clear
727 */
728 ADD dst, t0 # compute start address in a1
729 SUB dst, src
730 /*
731 * Clear len bytes starting at dst. Can't call __bzero because it
732 * might modify len. An inefficient loop for these rare times...
733 */
Maciej W. Rozycki619b6e12007-10-23 12:43:25 +0100734 .set reorder /* DADDI_WAR */
735 SUB src, len, 1
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000736 beqz len, .Ldone
Maciej W. Rozycki619b6e12007-10-23 12:43:25 +0100737 .set noreorder
Atsushi Nemotof860c902006-12-13 01:22:06 +09007381: sb zero, 0(dst)
739 ADD dst, dst, 1
Maciej W. Rozycki619b6e12007-10-23 12:43:25 +0100740 .set push
741 .set noat
742#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
Atsushi Nemotof860c902006-12-13 01:22:06 +0900743 bnez src, 1b
744 SUB src, src, 1
Maciej W. Rozycki619b6e12007-10-23 12:43:25 +0100745#else
746 li v1, 1
747 bnez src, 1b
748 SUB src, src, v1
749#endif
Atsushi Nemotof860c902006-12-13 01:22:06 +0900750 li v1, -EFAULT
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000751 b .Ldone
Atsushi Nemotof860c902006-12-13 01:22:06 +0900752 sw v1, (errptr)
753
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000754.Ls_exc:
Atsushi Nemotof860c902006-12-13 01:22:06 +0900755 li v0, -1 /* invalid checksum */
756 li v1, -EFAULT
757 jr ra
758 sw v1, (errptr)
Maciej W. Rozycki619b6e12007-10-23 12:43:25 +0100759 .set pop
Atsushi Nemotof860c902006-12-13 01:22:06 +0900760 END(__csum_partial_copy_user)