blob: 95adc500cabc3acbc3cbd5d1918eaa540c7f7cc7 [file] [log] [blame]
Paul Mundt95b781c2008-11-26 00:29:58 +09001#ifndef __ASM_SH_UNALIGNED_SH4A_H
2#define __ASM_SH_UNALIGNED_SH4A_H
3
4/*
5 * SH-4A has support for unaligned 32-bit loads, and 32-bit loads only.
Magnus Damm48c72fc2009-06-04 20:20:24 +09006 * Support for 64-bit accesses are done through shifting and masking
7 * relative to the endianness. Unaligned stores are not supported by the
8 * instruction encoding, so these continue to use the packed
Paul Mundt95b781c2008-11-26 00:29:58 +09009 * struct.
10 *
11 * The same note as with the movli.l/movco.l pair applies here, as long
Lucas De Marchi25985ed2011-03-30 22:57:33 -030012 * as the load is guaranteed to be inlined, nothing else will hook in to
Paul Mundt95b781c2008-11-26 00:29:58 +090013 * r0 and we get the return value for free.
14 *
15 * NOTE: Due to the fact we require r0 encoding, care should be taken to
16 * avoid mixing these heavily with other r0 consumers, such as the atomic
17 * ops. Failure to adhere to this can result in the compiler running out
18 * of spill registers and blowing up when building at low optimization
19 * levels. See http://gcc.gnu.org/bugzilla/show_bug.cgi?id=34777.
20 */
Paul Mundt1dee92b2010-12-24 19:19:23 +090021#include <linux/unaligned/packed_struct.h>
Paul Mundt95b781c2008-11-26 00:29:58 +090022#include <linux/types.h>
23#include <asm/byteorder.h>
24
Paul Mundt1dee92b2010-12-24 19:19:23 +090025static inline u16 sh4a_get_unaligned_cpu16(const u8 *p)
26{
27#ifdef __LITTLE_ENDIAN
28 return p[0] | p[1] << 8;
29#else
30 return p[0] << 8 | p[1];
31#endif
32}
33
34static __always_inline u32 sh4a_get_unaligned_cpu32(const u8 *p)
Paul Mundt95b781c2008-11-26 00:29:58 +090035{
36 unsigned long unaligned;
37
38 __asm__ __volatile__ (
39 "movua.l @%1, %0\n\t"
40 : "=z" (unaligned)
41 : "r" (p)
42 );
43
44 return unaligned;
45}
46
Paul Mundt95b781c2008-11-26 00:29:58 +090047/*
48 * Even though movua.l supports auto-increment on the read side, it can
49 * only store to r0 due to instruction encoding constraints, so just let
50 * the compiler sort it out on its own.
51 */
Paul Mundt1dee92b2010-12-24 19:19:23 +090052static inline u64 sh4a_get_unaligned_cpu64(const u8 *p)
Paul Mundt95b781c2008-11-26 00:29:58 +090053{
54#ifdef __LITTLE_ENDIAN
Paul Mundt1dee92b2010-12-24 19:19:23 +090055 return (u64)sh4a_get_unaligned_cpu32(p + 4) << 32 |
56 sh4a_get_unaligned_cpu32(p);
Paul Mundt95b781c2008-11-26 00:29:58 +090057#else
Paul Mundt1dee92b2010-12-24 19:19:23 +090058 return (u64)sh4a_get_unaligned_cpu32(p) << 32 |
59 sh4a_get_unaligned_cpu32(p + 4);
Paul Mundt95b781c2008-11-26 00:29:58 +090060#endif
61}
62
63static inline u16 get_unaligned_le16(const void *p)
64{
Paul Mundt1dee92b2010-12-24 19:19:23 +090065 return le16_to_cpu(sh4a_get_unaligned_cpu16(p));
Paul Mundt95b781c2008-11-26 00:29:58 +090066}
67
68static inline u32 get_unaligned_le32(const void *p)
69{
Paul Mundt1dee92b2010-12-24 19:19:23 +090070 return le32_to_cpu(sh4a_get_unaligned_cpu32(p));
Paul Mundt95b781c2008-11-26 00:29:58 +090071}
72
73static inline u64 get_unaligned_le64(const void *p)
74{
Paul Mundt1dee92b2010-12-24 19:19:23 +090075 return le64_to_cpu(sh4a_get_unaligned_cpu64(p));
Paul Mundt95b781c2008-11-26 00:29:58 +090076}
77
78static inline u16 get_unaligned_be16(const void *p)
79{
Paul Mundt1dee92b2010-12-24 19:19:23 +090080 return be16_to_cpu(sh4a_get_unaligned_cpu16(p));
Paul Mundt95b781c2008-11-26 00:29:58 +090081}
82
83static inline u32 get_unaligned_be32(const void *p)
84{
Paul Mundt1dee92b2010-12-24 19:19:23 +090085 return be32_to_cpu(sh4a_get_unaligned_cpu32(p));
Paul Mundt95b781c2008-11-26 00:29:58 +090086}
87
88static inline u64 get_unaligned_be64(const void *p)
89{
Paul Mundt1dee92b2010-12-24 19:19:23 +090090 return be64_to_cpu(sh4a_get_unaligned_cpu64(p));
Paul Mundt95b781c2008-11-26 00:29:58 +090091}
92
Paul Mundt1dee92b2010-12-24 19:19:23 +090093static inline void nonnative_put_le16(u16 val, u8 *p)
Paul Mundt95b781c2008-11-26 00:29:58 +090094{
95 *p++ = val;
96 *p++ = val >> 8;
97}
98
Paul Mundt1dee92b2010-12-24 19:19:23 +090099static inline void nonnative_put_le32(u32 val, u8 *p)
Paul Mundt95b781c2008-11-26 00:29:58 +0900100{
Paul Mundt1dee92b2010-12-24 19:19:23 +0900101 nonnative_put_le16(val, p);
102 nonnative_put_le16(val >> 16, p + 2);
Paul Mundt95b781c2008-11-26 00:29:58 +0900103}
104
Paul Mundt1dee92b2010-12-24 19:19:23 +0900105static inline void nonnative_put_le64(u64 val, u8 *p)
Paul Mundt95b781c2008-11-26 00:29:58 +0900106{
Paul Mundt1dee92b2010-12-24 19:19:23 +0900107 nonnative_put_le32(val, p);
108 nonnative_put_le32(val >> 32, p + 4);
Paul Mundt95b781c2008-11-26 00:29:58 +0900109}
110
Paul Mundt1dee92b2010-12-24 19:19:23 +0900111static inline void nonnative_put_be16(u16 val, u8 *p)
Paul Mundt95b781c2008-11-26 00:29:58 +0900112{
113 *p++ = val >> 8;
114 *p++ = val;
115}
116
Paul Mundt1dee92b2010-12-24 19:19:23 +0900117static inline void nonnative_put_be32(u32 val, u8 *p)
Paul Mundt95b781c2008-11-26 00:29:58 +0900118{
Paul Mundt1dee92b2010-12-24 19:19:23 +0900119 nonnative_put_be16(val >> 16, p);
120 nonnative_put_be16(val, p + 2);
Paul Mundt95b781c2008-11-26 00:29:58 +0900121}
122
Paul Mundt1dee92b2010-12-24 19:19:23 +0900123static inline void nonnative_put_be64(u64 val, u8 *p)
Paul Mundt95b781c2008-11-26 00:29:58 +0900124{
Paul Mundt1dee92b2010-12-24 19:19:23 +0900125 nonnative_put_be32(val >> 32, p);
126 nonnative_put_be32(val, p + 4);
Paul Mundt95b781c2008-11-26 00:29:58 +0900127}
128
129static inline void put_unaligned_le16(u16 val, void *p)
130{
131#ifdef __LITTLE_ENDIAN
Paul Mundt1dee92b2010-12-24 19:19:23 +0900132 __put_unaligned_cpu16(val, p);
Paul Mundt95b781c2008-11-26 00:29:58 +0900133#else
Paul Mundt1dee92b2010-12-24 19:19:23 +0900134 nonnative_put_le16(val, p);
Paul Mundt95b781c2008-11-26 00:29:58 +0900135#endif
136}
137
138static inline void put_unaligned_le32(u32 val, void *p)
139{
140#ifdef __LITTLE_ENDIAN
Paul Mundt1dee92b2010-12-24 19:19:23 +0900141 __put_unaligned_cpu32(val, p);
Paul Mundt95b781c2008-11-26 00:29:58 +0900142#else
Paul Mundt1dee92b2010-12-24 19:19:23 +0900143 nonnative_put_le32(val, p);
Paul Mundt95b781c2008-11-26 00:29:58 +0900144#endif
145}
146
147static inline void put_unaligned_le64(u64 val, void *p)
148{
149#ifdef __LITTLE_ENDIAN
Paul Mundt1dee92b2010-12-24 19:19:23 +0900150 __put_unaligned_cpu64(val, p);
Paul Mundt95b781c2008-11-26 00:29:58 +0900151#else
Paul Mundt1dee92b2010-12-24 19:19:23 +0900152 nonnative_put_le64(val, p);
Paul Mundt95b781c2008-11-26 00:29:58 +0900153#endif
154}
155
156static inline void put_unaligned_be16(u16 val, void *p)
157{
158#ifdef __BIG_ENDIAN
Paul Mundt1dee92b2010-12-24 19:19:23 +0900159 __put_unaligned_cpu16(val, p);
Paul Mundt95b781c2008-11-26 00:29:58 +0900160#else
Paul Mundt1dee92b2010-12-24 19:19:23 +0900161 nonnative_put_be16(val, p);
Paul Mundt95b781c2008-11-26 00:29:58 +0900162#endif
163}
164
165static inline void put_unaligned_be32(u32 val, void *p)
166{
167#ifdef __BIG_ENDIAN
Paul Mundt1dee92b2010-12-24 19:19:23 +0900168 __put_unaligned_cpu32(val, p);
Paul Mundt95b781c2008-11-26 00:29:58 +0900169#else
Paul Mundt1dee92b2010-12-24 19:19:23 +0900170 nonnative_put_be32(val, p);
Paul Mundt95b781c2008-11-26 00:29:58 +0900171#endif
172}
173
174static inline void put_unaligned_be64(u64 val, void *p)
175{
176#ifdef __BIG_ENDIAN
Paul Mundt1dee92b2010-12-24 19:19:23 +0900177 __put_unaligned_cpu64(val, p);
Paul Mundt95b781c2008-11-26 00:29:58 +0900178#else
Paul Mundt1dee92b2010-12-24 19:19:23 +0900179 nonnative_put_be64(val, p);
Paul Mundt95b781c2008-11-26 00:29:58 +0900180#endif
181}
182
183/*
Paul Mundt1dee92b2010-12-24 19:19:23 +0900184 * While it's a bit non-obvious, even though the generic le/be wrappers
185 * use the __get/put_xxx prefixing, they actually wrap in to the
186 * non-prefixed get/put_xxx variants as provided above.
Paul Mundt95b781c2008-11-26 00:29:58 +0900187 */
Paul Mundt1dee92b2010-12-24 19:19:23 +0900188#include <linux/unaligned/generic.h>
Paul Mundt95b781c2008-11-26 00:29:58 +0900189
190#ifdef __LITTLE_ENDIAN
191# define get_unaligned __get_unaligned_le
192# define put_unaligned __put_unaligned_le
193#else
194# define get_unaligned __get_unaligned_be
195# define put_unaligned __put_unaligned_be
196#endif
197
198#endif /* __ASM_SH_UNALIGNED_SH4A_H */