2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Quick'n'dirty IP checksum ...
8 * Copyright (C) 1998, 1999 Ralf Baechle
9 * Copyright (C) 1999 Silicon Graphics, Inc.
10 * Copyright (C) 2007 Maciej W. Rozycki
11 * Copyright (C) 2014 Imagination Technologies Ltd.
13 #include <linux/errno.h>
15 #include <asm/asm-offsets.h>
16 #include <asm/regdef.h>
20 * As we are sharing code base with the mips32 tree (which use the o32 ABI
21 * register definitions). We need to redefine the register definitions from
22 * the n64 ABI register naming to the o32 ABI register naming.
54 #endif /* USE_DOUBLE */
56 #define UNIT(unit) ((unit)*NBYTES)
58 #define ADDC(sum,reg) \
66 #define ADDC32(sum,reg) \
74 #define CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3) \
75 LOAD _t0, (offset + UNIT(0))(src); \
76 LOAD _t1, (offset + UNIT(1))(src); \
77 LOAD _t2, (offset + UNIT(2))(src); \
78 LOAD _t3, (offset + UNIT(3))(src); \
85 #define CSUM_BIGCHUNK(src, offset, sum, _t0, _t1, _t2, _t3) \
86 CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3)
88 #define CSUM_BIGCHUNK(src, offset, sum, _t0, _t1, _t2, _t3) \
89 CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3); \
90 CSUM_BIGCHUNK1(src, offset + 0x10, sum, _t0, _t1, _t2, _t3)
95 * a1: length of the area to checksum
96 * a2: partial checksum
110 bnez t8, .Lsmall_csumcpy /* < 8 bytes to copy */
113 andi t7, src, 0x1 /* odd buffer? */
116 beqz t7, .Lword_align
120 LONG_SUBU a1, a1, 0x1
125 PTR_ADDU src, src, 0x1
129 beqz t8, .Ldword_align
133 LONG_SUBU a1, a1, 0x2
136 PTR_ADDU src, src, 0x2
139 bnez t8, .Ldo_end_words
143 beqz t8, .Lqword_align
147 LONG_SUBU a1, a1, 0x4
149 PTR_ADDU src, src, 0x4
153 beqz t8, .Loword_align
158 LONG_SUBU a1, a1, 0x8
163 LONG_SUBU a1, a1, 0x8
167 PTR_ADDU src, src, 0x8
171 beqz t8, .Lbegin_movement
180 CSUM_BIGCHUNK1(src, 0x00, sum, t0, t1, t3, t4)
182 LONG_SUBU a1, a1, 0x10
183 PTR_ADDU src, src, 0x10
191 CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4)
192 CSUM_BIGCHUNK(src, 0x20, sum, t0, t1, t3, t4)
193 CSUM_BIGCHUNK(src, 0x40, sum, t0, t1, t3, t4)
194 CSUM_BIGCHUNK(src, 0x60, sum, t0, t1, t3, t4)
195 LONG_SUBU t8, t8, 0x01
196 .set reorder /* DADDI_WAR */
197 PTR_ADDU src, src, 0x80
198 bnez t8, .Lmove_128bytes
206 CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4)
207 CSUM_BIGCHUNK(src, 0x20, sum, t0, t1, t3, t4)
208 PTR_ADDU src, src, 0x40
211 beqz t2, .Ldo_end_words
215 CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4)
217 PTR_ADDU src, src, 0x20
220 beqz t8, .Lsmall_csumcpy
226 LONG_SUBU t8, t8, 0x1
228 .set reorder /* DADDI_WAR */
229 PTR_ADDU src, src, 0x4
233 /* unknown src alignment and < 8 bytes to go */
241 /* Still a full word to go */
245 dsll t1, t1, 32 /* clear lower 32bit */
253 /* Still a halfword to go */
279 /* odd buffer alignment? */
280 #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_LOONGSON3)
287 beqz t7, 1f /* odd buffer alignment? */
298 /* Add the passed partial csum. */
306 * checksum and copy routines based on memcpy.S
308 * csum_partial_copy_nocheck(src, dst, len, sum)
309 * __csum_partial_copy_kernel(src, dst, len, sum, errp)
311 * See "Spec" in memcpy.S for details. Unlike __copy_user, all
312 * function in this file use the standard calling convention.
324 * The exception handler for loads requires that:
325 * 1- AT contain the address of the byte just past the end of the source
327 * 2- src_entry <= src < AT, and
328 * 3- (dst - src) == (dst_entry - src_entry),
329 * The _entry suffix denotes values when __copy_user was called.
331 * (1) is set up up by __csum_partial_copy_from_user and maintained by
332 * not writing AT in __csum_partial_copy
333 * (2) is met by incrementing src by the number of bytes copied
334 * (3) is met by not doing loads between a pair of increments of dst and src
336 * The exception handlers for stores stores -EFAULT to errptr and return.
337 * These handlers do not need to overwrite any data.
340 /* Instruction type */
343 #define LEGACY_MODE 1
349 * Wrapper to add an entry in the exception table
350 * in case the insn causes a memory exception.
352 * insn : Load/store instruction
353 * type : Instruction type
356 * handler : Exception handler
358 #define EXC(insn, type, reg, addr, handler) \
359 .if \mode == LEGACY_MODE; \
361 .section __ex_table,"a"; \
364 /* This is enabled in EVA mode */ \
366 /* If loading from user or storing to user */ \
367 .if ((\from == USEROP) && (type == LD_INSN)) || \
368 ((\to == USEROP) && (type == ST_INSN)); \
369 9: __BUILD_EVA_INSN(insn##e, reg, addr); \
370 .section __ex_table,"a"; \
374 /* EVA without exception */ \
383 #define LOADK ld /* No exception */
384 #define LOAD(reg, addr, handler) EXC(ld, LD_INSN, reg, addr, handler)
385 #define LOADBU(reg, addr, handler) EXC(lbu, LD_INSN, reg, addr, handler)
386 #define LOADL(reg, addr, handler) EXC(ldl, LD_INSN, reg, addr, handler)
387 #define LOADR(reg, addr, handler) EXC(ldr, LD_INSN, reg, addr, handler)
388 #define STOREB(reg, addr, handler) EXC(sb, ST_INSN, reg, addr, handler)
389 #define STOREL(reg, addr, handler) EXC(sdl, ST_INSN, reg, addr, handler)
390 #define STORER(reg, addr, handler) EXC(sdr, ST_INSN, reg, addr, handler)
391 #define STORE(reg, addr, handler) EXC(sd, ST_INSN, reg, addr, handler)
403 #define LOADK lw /* No exception */
404 #define LOAD(reg, addr, handler) EXC(lw, LD_INSN, reg, addr, handler)
405 #define LOADBU(reg, addr, handler) EXC(lbu, LD_INSN, reg, addr, handler)
406 #define LOADL(reg, addr, handler) EXC(lwl, LD_INSN, reg, addr, handler)
407 #define LOADR(reg, addr, handler) EXC(lwr, LD_INSN, reg, addr, handler)
408 #define STOREB(reg, addr, handler) EXC(sb, ST_INSN, reg, addr, handler)
409 #define STOREL(reg, addr, handler) EXC(swl, ST_INSN, reg, addr, handler)
410 #define STORER(reg, addr, handler) EXC(swr, ST_INSN, reg, addr, handler)
411 #define STORE(reg, addr, handler) EXC(sw, ST_INSN, reg, addr, handler)
421 #endif /* USE_DOUBLE */
423 #ifdef CONFIG_CPU_LITTLE_ENDIAN
424 #define LDFIRST LOADR
426 #define STFIRST STORER
427 #define STREST STOREL
428 #define SHIFT_DISCARD SLLV
429 #define SHIFT_DISCARD_REVERT SRLV
431 #define LDFIRST LOADL
433 #define STFIRST STOREL
434 #define STREST STORER
435 #define SHIFT_DISCARD SRLV
436 #define SHIFT_DISCARD_REVERT SLLV
439 #define FIRST(unit) ((unit)*NBYTES)
440 #define REST(unit) (FIRST(unit)+NBYTES-1)
442 #define ADDRMASK (NBYTES-1)
444 #ifndef CONFIG_CPU_DADDI_WORKAROUNDS
450 .macro __BUILD_CSUM_PARTIAL_COPY_USER mode, from, to, __nocheck
452 PTR_ADDU AT, src, len /* See (1) above. */
453 /* initialize __nocheck if this the first time we execute this
462 FEXPORT(csum_partial_copy_nocheck)
467 * Note: dst & src may be unaligned, len may be 0
471 * The "issue break"s below are very approximate.
472 * Issue delays for dcache fills will perturb the schedule, as will
473 * load queue full replay traps, etc.
475 * If len < NBYTES use byte operations.
478 and t1, dst, ADDRMASK
479 bnez t2, .Lcopy_bytes_checklen\@
480 and t0, src, ADDRMASK
481 andi odd, dst, 0x1 /* odd buffer? */
482 bnez t1, .Ldst_unaligned\@
484 bnez t0, .Lsrc_unaligned_dst_aligned\@
486 * use delay slot for fall-through
487 * src and dst are aligned; need to compute rem
490 SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter
491 beqz t0, .Lcleanup_both_aligned\@ # len < 8*NBYTES
493 SUB len, 8*NBYTES # subtract here for bgez loop
496 LOAD(t0, UNIT(0)(src), .Ll_exc\@)
497 LOAD(t1, UNIT(1)(src), .Ll_exc_copy\@)
498 LOAD(t2, UNIT(2)(src), .Ll_exc_copy\@)
499 LOAD(t3, UNIT(3)(src), .Ll_exc_copy\@)
500 LOAD(t4, UNIT(4)(src), .Ll_exc_copy\@)
501 LOAD(t5, UNIT(5)(src), .Ll_exc_copy\@)
502 LOAD(t6, UNIT(6)(src), .Ll_exc_copy\@)
503 LOAD(t7, UNIT(7)(src), .Ll_exc_copy\@)
504 SUB len, len, 8*NBYTES
505 ADD src, src, 8*NBYTES
506 STORE(t0, UNIT(0)(dst), .Ls_exc\@)
508 STORE(t1, UNIT(1)(dst), .Ls_exc\@)
510 STORE(t2, UNIT(2)(dst), .Ls_exc\@)
512 STORE(t3, UNIT(3)(dst), .Ls_exc\@)
514 STORE(t4, UNIT(4)(dst), .Ls_exc\@)
516 STORE(t5, UNIT(5)(dst), .Ls_exc\@)
518 STORE(t6, UNIT(6)(dst), .Ls_exc\@)
520 STORE(t7, UNIT(7)(dst), .Ls_exc\@)
522 .set reorder /* DADDI_WAR */
523 ADD dst, dst, 8*NBYTES
526 ADD len, 8*NBYTES # revert len (see above)
529 * len == the number of bytes left to copy < 8*NBYTES
531 .Lcleanup_both_aligned\@:
534 sltu t0, len, 4*NBYTES
535 bnez t0, .Lless_than_4units\@
536 and rem, len, (NBYTES-1) # rem = len % NBYTES
540 LOAD(t0, UNIT(0)(src), .Ll_exc\@)
541 LOAD(t1, UNIT(1)(src), .Ll_exc_copy\@)
542 LOAD(t2, UNIT(2)(src), .Ll_exc_copy\@)
543 LOAD(t3, UNIT(3)(src), .Ll_exc_copy\@)
544 SUB len, len, 4*NBYTES
545 ADD src, src, 4*NBYTES
546 STORE(t0, UNIT(0)(dst), .Ls_exc\@)
548 STORE(t1, UNIT(1)(dst), .Ls_exc\@)
550 STORE(t2, UNIT(2)(dst), .Ls_exc\@)
552 STORE(t3, UNIT(3)(dst), .Ls_exc\@)
554 .set reorder /* DADDI_WAR */
555 ADD dst, dst, 4*NBYTES
558 .Lless_than_4units\@:
562 beq rem, len, .Lcopy_bytes\@
565 LOAD(t0, 0(src), .Ll_exc\@)
568 STORE(t0, 0(dst), .Ls_exc\@)
570 .set reorder /* DADDI_WAR */
576 * src and dst are aligned, need to copy rem bytes (rem < NBYTES)
577 * A loop would do only a byte at a time with possible branch
578 * mispredicts. Can't do an explicit LOAD dst,mask,or,STORE
579 * because can't assume read-access to dst. Instead, use
580 * STREST dst, which doesn't require read access to dst.
582 * This code should perform better than a simple loop on modern,
583 * wide-issue mips processors because the code has fewer branches and
584 * more instruction-level parallelism.
588 ADD t1, dst, len # t1 is just past last byte of dst
590 SLL rem, len, 3 # rem = number of bits to keep
591 LOAD(t0, 0(src), .Ll_exc\@)
592 SUB bits, bits, rem # bits = number of bits to discard
593 SHIFT_DISCARD t0, t0, bits
594 STREST(t0, -1(t1), .Ls_exc\@)
595 SHIFT_DISCARD_REVERT t0, t0, bits
603 * t0 = src & ADDRMASK
604 * t1 = dst & ADDRMASK; T1 > 0
607 * Copy enough bytes to align dst
608 * Set match = (src and dst have same alignment)
611 LDFIRST(t3, FIRST(0)(src), .Ll_exc\@)
613 LDREST(t3, REST(0)(src), .Ll_exc_copy\@)
614 SUB t2, t2, t1 # t2 = number of bytes copied
616 STFIRST(t3, FIRST(0)(dst), .Ls_exc\@)
617 SLL t4, t1, 3 # t4 = number of bits to discard
618 SHIFT_DISCARD t3, t3, t4
619 /* no SHIFT_DISCARD_REVERT to handle odd buffer properly */
621 beq len, t2, .Ldone\@
624 beqz match, .Lboth_aligned\@
627 .Lsrc_unaligned_dst_aligned\@:
628 SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter
629 beqz t0, .Lcleanup_src_unaligned\@
630 and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES
633 * Avoid consecutive LD*'s to the same register since some mips
634 * implementations can't issue them in the same cycle.
635 * It's OK to load FIRST(N+1) before REST(N) because the two addresses
636 * are to the same unit (unless src is aligned, but it's not).
638 LDFIRST(t0, FIRST(0)(src), .Ll_exc\@)
639 LDFIRST(t1, FIRST(1)(src), .Ll_exc_copy\@)
640 SUB len, len, 4*NBYTES
641 LDREST(t0, REST(0)(src), .Ll_exc_copy\@)
642 LDREST(t1, REST(1)(src), .Ll_exc_copy\@)
643 LDFIRST(t2, FIRST(2)(src), .Ll_exc_copy\@)
644 LDFIRST(t3, FIRST(3)(src), .Ll_exc_copy\@)
645 LDREST(t2, REST(2)(src), .Ll_exc_copy\@)
646 LDREST(t3, REST(3)(src), .Ll_exc_copy\@)
647 ADD src, src, 4*NBYTES
648 #ifdef CONFIG_CPU_SB1
649 nop # improves slotting
651 STORE(t0, UNIT(0)(dst), .Ls_exc\@)
653 STORE(t1, UNIT(1)(dst), .Ls_exc\@)
655 STORE(t2, UNIT(2)(dst), .Ls_exc\@)
657 STORE(t3, UNIT(3)(dst), .Ls_exc\@)
659 .set reorder /* DADDI_WAR */
660 ADD dst, dst, 4*NBYTES
664 .Lcleanup_src_unaligned\@:
666 and rem, len, NBYTES-1 # rem = len % NBYTES
667 beq rem, len, .Lcopy_bytes\@
670 LDFIRST(t0, FIRST(0)(src), .Ll_exc\@)
671 LDREST(t0, REST(0)(src), .Ll_exc_copy\@)
674 STORE(t0, 0(dst), .Ls_exc\@)
676 .set reorder /* DADDI_WAR */
681 .Lcopy_bytes_checklen\@:
685 /* 0 < len < NBYTES */
686 #ifdef CONFIG_CPU_LITTLE_ENDIAN
687 #define SHIFT_START 0
690 #define SHIFT_START 8*(NBYTES-1)
693 move t2, zero # partial word
694 li t3, SHIFT_START # shift
695 /* use .Ll_exc_copy here to return correct sum on fault */
696 #define COPY_BYTE(N) \
697 LOADBU(t0, N(src), .Ll_exc_copy\@); \
699 STOREB(t0, N(dst), .Ls_exc\@); \
701 addu t3, SHIFT_INC; \
702 beqz len, .Lcopy_bytes_done\@; \
713 LOADBU(t0, NBYTES-2(src), .Ll_exc_copy\@)
715 STOREB(t0, NBYTES-2(dst), .Ls_exc\@)
732 #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_LOONGSON3)
739 beqz odd, 1f /* odd buffer alignment? */
757 * Copy bytes from src until faulting load address (or until a
760 * When reached by a faulting LDFIRST/LDREST, THREAD_BUADDR($28)
761 * may be more than a byte beyond the last address.
762 * Hence, the lb below may get an exception.
764 * Assumes src < THREAD_BUADDR($28)
766 LOADK t0, TI_TASK($28)
768 LOADK t0, THREAD_BUADDR(t0)
770 LOADBU(t1, 0(src), .Ll_exc\@)
772 sb t1, 0(dst) # can't fault -- we're copy_from_user
776 .set reorder /* DADDI_WAR */
781 LOADK t0, TI_TASK($28)
783 LOADK t0, THREAD_BUADDR(t0) # t0 is just past last good address
785 SUB len, AT, t0 # len number of uncopied bytes
787 * Here's where we rely on src and dst being incremented in tandem,
789 * dst += (fault addr - src) to put dst at first byte to clear
791 ADD dst, t0 # compute start address in a1
794 * Clear len bytes starting at dst. Can't call __bzero because it
795 * might modify len. An inefficient loop for these rare times...
797 .set reorder /* DADDI_WAR */
805 #ifndef CONFIG_CPU_DADDI_WORKAROUNDS
818 li v0, -1 /* invalid checksum */
825 LEAF(__csum_partial_copy_kernel)
827 FEXPORT(__csum_partial_copy_to_user)
828 FEXPORT(__csum_partial_copy_from_user)
830 __BUILD_CSUM_PARTIAL_COPY_USER LEGACY_MODE USEROP USEROP 1
831 END(__csum_partial_copy_kernel)
834 LEAF(__csum_partial_copy_to_user)
835 __BUILD_CSUM_PARTIAL_COPY_USER EVA_MODE KERNELOP USEROP 0
836 END(__csum_partial_copy_to_user)
838 LEAF(__csum_partial_copy_from_user)
839 __BUILD_CSUM_PARTIAL_COPY_USER EVA_MODE USEROP KERNELOP 0
840 END(__csum_partial_copy_from_user)