Michael S. Tsirkin | eef960a0 | 2014-10-22 15:35:56 +0300 | [diff] [blame] | 1 | #ifndef _LINUX_VIRTIO_BYTEORDER_H |
| 2 | #define _LINUX_VIRTIO_BYTEORDER_H |
| 3 | #include <linux/types.h> |
| 4 | #include <uapi/linux/virtio_types.h> |
| 5 | |
| 6 | /* |
| 7 | * Low-level memory accessors for handling virtio in modern little endian and in |
| 8 | * compatibility native endian format. |
| 9 | */ |
| 10 | |
| 11 | static inline u16 __virtio16_to_cpu(bool little_endian, __virtio16 val) |
| 12 | { |
| 13 | if (little_endian) |
| 14 | return le16_to_cpu((__force __le16)val); |
| 15 | else |
| 16 | return (__force u16)val; |
| 17 | } |
| 18 | |
| 19 | static inline __virtio16 __cpu_to_virtio16(bool little_endian, u16 val) |
| 20 | { |
| 21 | if (little_endian) |
| 22 | return (__force __virtio16)cpu_to_le16(val); |
| 23 | else |
| 24 | return (__force __virtio16)val; |
| 25 | } |
| 26 | |
| 27 | static inline u32 __virtio32_to_cpu(bool little_endian, __virtio32 val) |
| 28 | { |
| 29 | if (little_endian) |
| 30 | return le32_to_cpu((__force __le32)val); |
| 31 | else |
| 32 | return (__force u32)val; |
| 33 | } |
| 34 | |
| 35 | static inline __virtio32 __cpu_to_virtio32(bool little_endian, u32 val) |
| 36 | { |
| 37 | if (little_endian) |
| 38 | return (__force __virtio32)cpu_to_le32(val); |
| 39 | else |
| 40 | return (__force __virtio32)val; |
| 41 | } |
| 42 | |
| 43 | static inline u64 __virtio64_to_cpu(bool little_endian, __virtio64 val) |
| 44 | { |
| 45 | if (little_endian) |
| 46 | return le64_to_cpu((__force __le64)val); |
| 47 | else |
| 48 | return (__force u64)val; |
| 49 | } |
| 50 | |
| 51 | static inline __virtio64 __cpu_to_virtio64(bool little_endian, u64 val) |
| 52 | { |
| 53 | if (little_endian) |
| 54 | return (__force __virtio64)cpu_to_le64(val); |
| 55 | else |
| 56 | return (__force __virtio64)val; |
| 57 | } |
| 58 | |
| 59 | #endif /* _LINUX_VIRTIO_BYTEORDER */ |