blob: 94323f20816e022be510c6e9dad7d06e5ef24d7d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* Copyright 2002,2003 Andi Kleen, SuSE Labs.
2 * Subject to the GNU Public License v.2
3 *
4 * Wrappers of assembly checksum functions for x86-64.
5 */
6
7#include <asm/checksum.h>
8#include <linux/module.h>
9
10/**
11 * csum_partial_copy_from_user - Copy and checksum from user space.
12 * @src: source address (user space)
13 * @dst: destination address
14 * @len: number of bytes to be copied.
15 * @isum: initial sum that is added into the result (32bit unfolded)
16 * @errp: set to -EFAULT for an bad source address.
17 *
18 * Returns an 32bit unfolded checksum of the buffer.
19 * src and dst are best aligned to 64bits.
20 */
21unsigned int
22csum_partial_copy_from_user(const unsigned char __user *src, unsigned char *dst,
23 int len, unsigned int isum, int *errp)
24{
25 might_sleep();
26 *errp = 0;
27 if (likely(access_ok(VERIFY_READ,src, len))) {
28 /* Why 6, not 7? To handle odd addresses aligned we
29 would need to do considerable complications to fix the
30 checksum which is defined as an 16bit accumulator. The
31 fix alignment code is primarily for performance
32 compatibility with 32bit and that will handle odd
33 addresses slowly too. */
34 if (unlikely((unsigned long)src & 6)) {
35 while (((unsigned long)src & 6) && len >= 2) {
36 __u16 val16;
37 *errp = __get_user(val16, (__u16 __user *)src);
38 if (*errp)
39 return isum;
40 *(__u16 *)dst = val16;
41 isum = add32_with_carry(isum, val16);
42 src += 2;
43 dst += 2;
44 len -= 2;
45 }
46 }
47 isum = csum_partial_copy_generic((__force void *)src,dst,len,isum,errp,NULL);
48 if (likely(*errp == 0))
49 return isum;
50 }
51 *errp = -EFAULT;
52 memset(dst,0,len);
53 return isum;
54}
55
56EXPORT_SYMBOL(csum_partial_copy_from_user);
57
58/**
59 * csum_partial_copy_to_user - Copy and checksum to user space.
60 * @src: source address
61 * @dst: destination address (user space)
62 * @len: number of bytes to be copied.
63 * @isum: initial sum that is added into the result (32bit unfolded)
64 * @errp: set to -EFAULT for an bad destination address.
65 *
66 * Returns an 32bit unfolded checksum of the buffer.
67 * src and dst are best aligned to 64bits.
68 */
69unsigned int
70csum_partial_copy_to_user(unsigned const char *src, unsigned char __user *dst,
71 int len, unsigned int isum, int *errp)
72{
73 might_sleep();
74 if (unlikely(!access_ok(VERIFY_WRITE, dst, len))) {
75 *errp = -EFAULT;
76 return 0;
77 }
78
79 if (unlikely((unsigned long)dst & 6)) {
80 while (((unsigned long)dst & 6) && len >= 2) {
81 __u16 val16 = *(__u16 *)src;
82 isum = add32_with_carry(isum, val16);
83 *errp = __put_user(val16, (__u16 __user *)dst);
84 if (*errp)
85 return isum;
86 src += 2;
87 dst += 2;
88 len -= 2;
89 }
90 }
91
92 *errp = 0;
93 return csum_partial_copy_generic(src, (void __force *)dst,len,isum,NULL,errp);
94}
95
96EXPORT_SYMBOL(csum_partial_copy_to_user);
97
98/**
99 * csum_partial_copy_nocheck - Copy and checksum.
100 * @src: source address
101 * @dst: destination address
102 * @len: number of bytes to be copied.
103 * @isum: initial sum that is added into the result (32bit unfolded)
104 *
105 * Returns an 32bit unfolded checksum of the buffer.
106 */
107unsigned int
108csum_partial_copy_nocheck(const unsigned char *src, unsigned char *dst, int len, unsigned int sum)
109{
110 return csum_partial_copy_generic(src,dst,len,sum,NULL,NULL);
111}
112
113unsigned short csum_ipv6_magic(struct in6_addr *saddr, struct in6_addr *daddr,
114 __u32 len, unsigned short proto, unsigned int sum)
115{
116 __u64 rest, sum64;
117
118 rest = (__u64)htonl(len) + (__u64)htons(proto) + (__u64)sum;
119 asm(" addq (%[saddr]),%[sum]\n"
120 " adcq 8(%[saddr]),%[sum]\n"
121 " adcq (%[daddr]),%[sum]\n"
122 " adcq 8(%[daddr]),%[sum]\n"
123 " adcq $0,%[sum]\n"
124 : [sum] "=r" (sum64)
125 : "[sum]" (rest),[saddr] "r" (saddr), [daddr] "r" (daddr));
126 return csum_fold(add32_with_carry(sum64 & 0xffffffff, sum64>>32));
127}
128
129EXPORT_SYMBOL(csum_ipv6_magic);