| Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ | 
| Avi Kivity | 26eef70 | 2008-07-03 14:59:22 +0300 | [diff] [blame] | 2 | #ifndef ARCH_X86_KVM_X86_H | 
|  | 3 | #define ARCH_X86_KVM_X86_H | 
|  | 4 |  | 
| Michael S. Tsirkin | 668fffa3 | 2017-04-21 12:27:17 +0200 | [diff] [blame] | 5 | #include <asm/processor.h> | 
|  | 6 | #include <asm/mwait.h> | 
| Avi Kivity | 26eef70 | 2008-07-03 14:59:22 +0300 | [diff] [blame] | 7 | #include <linux/kvm_host.h> | 
| Marcelo Tosatti | 8d93c87 | 2016-06-20 22:28:02 -0300 | [diff] [blame] | 8 | #include <asm/pvclock.h> | 
| Avi Kivity | 3eeb328 | 2010-01-21 15:31:48 +0200 | [diff] [blame] | 9 | #include "kvm_cache_regs.h" | 
| Avi Kivity | 26eef70 | 2008-07-03 14:59:22 +0300 | [diff] [blame] | 10 |  | 
| Radim Krčmář | 7454570 | 2015-04-27 15:11:25 +0200 | [diff] [blame] | 11 | #define MSR_IA32_CR_PAT_DEFAULT  0x0007040600070406ULL | 
|  | 12 |  | 
| Avi Kivity | 26eef70 | 2008-07-03 14:59:22 +0300 | [diff] [blame] | 13 | static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu) | 
|  | 14 | { | 
| Wanpeng Li | 664f8e2 | 2017-08-24 03:35:09 -0700 | [diff] [blame] | 15 | vcpu->arch.exception.injected = false; | 
| Avi Kivity | 26eef70 | 2008-07-03 14:59:22 +0300 | [diff] [blame] | 16 | } | 
|  | 17 |  | 
| Gleb Natapov | 66fd3f7 | 2009-05-11 13:35:50 +0300 | [diff] [blame] | 18 | static inline void kvm_queue_interrupt(struct kvm_vcpu *vcpu, u8 vector, | 
|  | 19 | bool soft) | 
| Avi Kivity | 937a7ea | 2008-07-03 15:17:01 +0300 | [diff] [blame] | 20 | { | 
|  | 21 | vcpu->arch.interrupt.pending = true; | 
| Gleb Natapov | 66fd3f7 | 2009-05-11 13:35:50 +0300 | [diff] [blame] | 22 | vcpu->arch.interrupt.soft = soft; | 
| Avi Kivity | 937a7ea | 2008-07-03 15:17:01 +0300 | [diff] [blame] | 23 | vcpu->arch.interrupt.nr = vector; | 
|  | 24 | } | 
|  | 25 |  | 
|  | 26 | static inline void kvm_clear_interrupt_queue(struct kvm_vcpu *vcpu) | 
|  | 27 | { | 
|  | 28 | vcpu->arch.interrupt.pending = false; | 
|  | 29 | } | 
|  | 30 |  | 
| Gleb Natapov | 3298b75 | 2009-05-11 13:35:46 +0300 | [diff] [blame] | 31 | static inline bool kvm_event_needs_reinjection(struct kvm_vcpu *vcpu) | 
|  | 32 | { | 
| Wanpeng Li | 664f8e2 | 2017-08-24 03:35:09 -0700 | [diff] [blame] | 33 | return vcpu->arch.exception.injected || vcpu->arch.interrupt.pending || | 
| Gleb Natapov | 3298b75 | 2009-05-11 13:35:46 +0300 | [diff] [blame] | 34 | vcpu->arch.nmi_injected; | 
|  | 35 | } | 
| Gleb Natapov | 66fd3f7 | 2009-05-11 13:35:50 +0300 | [diff] [blame] | 36 |  | 
|  | 37 | static inline bool kvm_exception_is_soft(unsigned int nr) | 
|  | 38 | { | 
|  | 39 | return (nr == BP_VECTOR) || (nr == OF_VECTOR); | 
|  | 40 | } | 
| Gleb Natapov | fc61b80 | 2009-07-05 17:39:35 +0300 | [diff] [blame] | 41 |  | 
| Avi Kivity | 3eeb328 | 2010-01-21 15:31:48 +0200 | [diff] [blame] | 42 | static inline bool is_protmode(struct kvm_vcpu *vcpu) | 
|  | 43 | { | 
|  | 44 | return kvm_read_cr0_bits(vcpu, X86_CR0_PE); | 
|  | 45 | } | 
|  | 46 |  | 
| Avi Kivity | 836a1b3 | 2010-01-21 15:31:49 +0200 | [diff] [blame] | 47 | static inline int is_long_mode(struct kvm_vcpu *vcpu) | 
|  | 48 | { | 
|  | 49 | #ifdef CONFIG_X86_64 | 
| Avi Kivity | f6801df | 2010-01-21 15:31:50 +0200 | [diff] [blame] | 50 | return vcpu->arch.efer & EFER_LMA; | 
| Avi Kivity | 836a1b3 | 2010-01-21 15:31:49 +0200 | [diff] [blame] | 51 | #else | 
|  | 52 | return 0; | 
|  | 53 | #endif | 
|  | 54 | } | 
|  | 55 |  | 
| Nadav Amit | 5777392 | 2014-06-18 17:19:23 +0300 | [diff] [blame] | 56 | static inline bool is_64_bit_mode(struct kvm_vcpu *vcpu) | 
|  | 57 | { | 
|  | 58 | int cs_db, cs_l; | 
|  | 59 |  | 
|  | 60 | if (!is_long_mode(vcpu)) | 
|  | 61 | return false; | 
|  | 62 | kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); | 
|  | 63 | return cs_l; | 
|  | 64 | } | 
|  | 65 |  | 
| Yu Zhang | 855feb6 | 2017-08-24 20:27:55 +0800 | [diff] [blame] | 66 | static inline bool is_la57_mode(struct kvm_vcpu *vcpu) | 
|  | 67 | { | 
|  | 68 | #ifdef CONFIG_X86_64 | 
|  | 69 | return (vcpu->arch.efer & EFER_LMA) && | 
|  | 70 | kvm_read_cr4_bits(vcpu, X86_CR4_LA57); | 
|  | 71 | #else | 
|  | 72 | return 0; | 
|  | 73 | #endif | 
|  | 74 | } | 
|  | 75 |  | 
| Joerg Roedel | 6539e73 | 2010-09-10 17:30:50 +0200 | [diff] [blame] | 76 | static inline bool mmu_is_nested(struct kvm_vcpu *vcpu) | 
|  | 77 | { | 
|  | 78 | return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu; | 
|  | 79 | } | 
|  | 80 |  | 
| Avi Kivity | 836a1b3 | 2010-01-21 15:31:49 +0200 | [diff] [blame] | 81 | static inline int is_pae(struct kvm_vcpu *vcpu) | 
|  | 82 | { | 
|  | 83 | return kvm_read_cr4_bits(vcpu, X86_CR4_PAE); | 
|  | 84 | } | 
|  | 85 |  | 
|  | 86 | static inline int is_pse(struct kvm_vcpu *vcpu) | 
|  | 87 | { | 
|  | 88 | return kvm_read_cr4_bits(vcpu, X86_CR4_PSE); | 
|  | 89 | } | 
|  | 90 |  | 
|  | 91 | static inline int is_paging(struct kvm_vcpu *vcpu) | 
|  | 92 | { | 
| Davidlohr Bueso | c36fc04 | 2012-03-08 12:45:54 +0100 | [diff] [blame] | 93 | return likely(kvm_read_cr0_bits(vcpu, X86_CR0_PG)); | 
| Avi Kivity | 836a1b3 | 2010-01-21 15:31:49 +0200 | [diff] [blame] | 94 | } | 
|  | 95 |  | 
| Joerg Roedel | 24d1b15 | 2010-12-07 17:15:05 +0100 | [diff] [blame] | 96 | static inline u32 bit(int bitno) | 
|  | 97 | { | 
|  | 98 | return 1 << (bitno & 31); | 
|  | 99 | } | 
|  | 100 |  | 
| Yu Zhang | fd8cb43 | 2017-08-24 20:27:56 +0800 | [diff] [blame] | 101 | static inline u8 vcpu_virt_addr_bits(struct kvm_vcpu *vcpu) | 
|  | 102 | { | 
|  | 103 | return kvm_read_cr4_bits(vcpu, X86_CR4_LA57) ? 57 : 48; | 
|  | 104 | } | 
|  | 105 |  | 
|  | 106 | static inline u8 ctxt_virt_addr_bits(struct x86_emulate_ctxt *ctxt) | 
|  | 107 | { | 
|  | 108 | return (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_LA57) ? 57 : 48; | 
|  | 109 | } | 
|  | 110 |  | 
|  | 111 | static inline u64 get_canonical(u64 la, u8 vaddr_bits) | 
|  | 112 | { | 
|  | 113 | return ((int64_t)la << (64 - vaddr_bits)) >> (64 - vaddr_bits); | 
|  | 114 | } | 
|  | 115 |  | 
|  | 116 | static inline bool is_noncanonical_address(u64 la, struct kvm_vcpu *vcpu) | 
|  | 117 | { | 
|  | 118 | #ifdef CONFIG_X86_64 | 
|  | 119 | return get_canonical(la, vcpu_virt_addr_bits(vcpu)) != la; | 
|  | 120 | #else | 
|  | 121 | return false; | 
|  | 122 | #endif | 
|  | 123 | } | 
|  | 124 |  | 
|  | 125 | static inline bool emul_is_noncanonical_address(u64 la, | 
|  | 126 | struct x86_emulate_ctxt *ctxt) | 
|  | 127 | { | 
|  | 128 | #ifdef CONFIG_X86_64 | 
|  | 129 | return get_canonical(la, ctxt_virt_addr_bits(ctxt)) != la; | 
|  | 130 | #else | 
|  | 131 | return false; | 
|  | 132 | #endif | 
|  | 133 | } | 
|  | 134 |  | 
| Xiao Guangrong | bebb106 | 2011-07-12 03:23:20 +0800 | [diff] [blame] | 135 | static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu, | 
|  | 136 | gva_t gva, gfn_t gfn, unsigned access) | 
|  | 137 | { | 
| Paolo Bonzini | 9034e6e | 2017-08-17 18:36:58 +0200 | [diff] [blame] | 138 | /* | 
|  | 139 | * If this is a shadow nested page table, the "GVA" is | 
|  | 140 | * actually a nGPA. | 
|  | 141 | */ | 
|  | 142 | vcpu->arch.mmio_gva = mmu_is_nested(vcpu) ? 0 : gva & PAGE_MASK; | 
| Xiao Guangrong | bebb106 | 2011-07-12 03:23:20 +0800 | [diff] [blame] | 143 | vcpu->arch.access = access; | 
|  | 144 | vcpu->arch.mmio_gfn = gfn; | 
| David Matlack | 56f17dd | 2014-08-18 15:46:07 -0700 | [diff] [blame] | 145 | vcpu->arch.mmio_gen = kvm_memslots(vcpu->kvm)->generation; | 
|  | 146 | } | 
|  | 147 |  | 
|  | 148 | static inline bool vcpu_match_mmio_gen(struct kvm_vcpu *vcpu) | 
|  | 149 | { | 
|  | 150 | return vcpu->arch.mmio_gen == kvm_memslots(vcpu->kvm)->generation; | 
| Xiao Guangrong | bebb106 | 2011-07-12 03:23:20 +0800 | [diff] [blame] | 151 | } | 
|  | 152 |  | 
|  | 153 | /* | 
| David Matlack | 56f17dd | 2014-08-18 15:46:07 -0700 | [diff] [blame] | 154 | * Clear the mmio cache info for the given gva. If gva is MMIO_GVA_ANY, we | 
|  | 155 | * clear all mmio cache info. | 
| Xiao Guangrong | bebb106 | 2011-07-12 03:23:20 +0800 | [diff] [blame] | 156 | */ | 
| David Matlack | 56f17dd | 2014-08-18 15:46:07 -0700 | [diff] [blame] | 157 | #define MMIO_GVA_ANY (~(gva_t)0) | 
|  | 158 |  | 
| Xiao Guangrong | bebb106 | 2011-07-12 03:23:20 +0800 | [diff] [blame] | 159 | static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva) | 
|  | 160 | { | 
| David Matlack | 56f17dd | 2014-08-18 15:46:07 -0700 | [diff] [blame] | 161 | if (gva != MMIO_GVA_ANY && vcpu->arch.mmio_gva != (gva & PAGE_MASK)) | 
| Xiao Guangrong | bebb106 | 2011-07-12 03:23:20 +0800 | [diff] [blame] | 162 | return; | 
|  | 163 |  | 
|  | 164 | vcpu->arch.mmio_gva = 0; | 
|  | 165 | } | 
|  | 166 |  | 
|  | 167 | static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva) | 
|  | 168 | { | 
| David Matlack | 56f17dd | 2014-08-18 15:46:07 -0700 | [diff] [blame] | 169 | if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gva && | 
|  | 170 | vcpu->arch.mmio_gva == (gva & PAGE_MASK)) | 
| Xiao Guangrong | bebb106 | 2011-07-12 03:23:20 +0800 | [diff] [blame] | 171 | return true; | 
|  | 172 |  | 
|  | 173 | return false; | 
|  | 174 | } | 
|  | 175 |  | 
|  | 176 | static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa) | 
|  | 177 | { | 
| David Matlack | 56f17dd | 2014-08-18 15:46:07 -0700 | [diff] [blame] | 178 | if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gfn && | 
|  | 179 | vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT) | 
| Xiao Guangrong | bebb106 | 2011-07-12 03:23:20 +0800 | [diff] [blame] | 180 | return true; | 
|  | 181 |  | 
|  | 182 | return false; | 
|  | 183 | } | 
|  | 184 |  | 
| Nadav Amit | 5777392 | 2014-06-18 17:19:23 +0300 | [diff] [blame] | 185 | static inline unsigned long kvm_register_readl(struct kvm_vcpu *vcpu, | 
|  | 186 | enum kvm_reg reg) | 
|  | 187 | { | 
|  | 188 | unsigned long val = kvm_register_read(vcpu, reg); | 
|  | 189 |  | 
|  | 190 | return is_64_bit_mode(vcpu) ? val : (u32)val; | 
|  | 191 | } | 
|  | 192 |  | 
| Nadav Amit | 27e6fb5 | 2014-06-18 17:19:26 +0300 | [diff] [blame] | 193 | static inline void kvm_register_writel(struct kvm_vcpu *vcpu, | 
|  | 194 | enum kvm_reg reg, | 
|  | 195 | unsigned long val) | 
|  | 196 | { | 
|  | 197 | if (!is_64_bit_mode(vcpu)) | 
|  | 198 | val = (u32)val; | 
|  | 199 | return kvm_register_write(vcpu, reg, val); | 
|  | 200 | } | 
|  | 201 |  | 
| Paolo Bonzini | 41dbc6b | 2015-07-23 08:22:45 +0200 | [diff] [blame] | 202 | static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk) | 
|  | 203 | { | 
|  | 204 | return !(kvm->arch.disabled_quirks & quirk); | 
|  | 205 | } | 
|  | 206 |  | 
| Zhang, Yanmin | ff9d07a | 2010-04-19 13:32:45 +0800 | [diff] [blame] | 207 | void kvm_before_handle_nmi(struct kvm_vcpu *vcpu); | 
|  | 208 | void kvm_after_handle_nmi(struct kvm_vcpu *vcpu); | 
| Nicholas Krause | bab5bb3 | 2015-01-01 22:05:18 -0500 | [diff] [blame] | 209 | void kvm_set_pending_timer(struct kvm_vcpu *vcpu); | 
| Serge E. Hallyn | 71f9833 | 2011-04-13 09:12:54 -0500 | [diff] [blame] | 210 | int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip); | 
| Zhang, Yanmin | ff9d07a | 2010-04-19 13:32:45 +0800 | [diff] [blame] | 211 |  | 
| Will Auld | 8fe8ab4 | 2012-11-29 12:42:12 -0800 | [diff] [blame] | 212 | void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr); | 
| Paolo Bonzini | 108b249 | 2016-09-01 14:21:03 +0200 | [diff] [blame] | 213 | u64 get_kvmclock_ns(struct kvm *kvm); | 
| Zachary Amsden | 99e3e30 | 2010-08-19 22:07:17 -1000 | [diff] [blame] | 214 |  | 
| Nadav Har'El | 064aea7 | 2011-05-25 23:04:56 +0300 | [diff] [blame] | 215 | int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt, | 
|  | 216 | gva_t addr, void *val, unsigned int bytes, | 
|  | 217 | struct x86_exception *exception); | 
|  | 218 |  | 
| Nadav Har'El | 6a4d755 | 2011-05-25 23:08:00 +0300 | [diff] [blame] | 219 | int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt, | 
|  | 220 | gva_t addr, void *val, unsigned int bytes, | 
|  | 221 | struct x86_exception *exception); | 
|  | 222 |  | 
| Xiao Guangrong | 19efffa | 2015-06-15 16:55:31 +0800 | [diff] [blame] | 223 | void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu); | 
| Xiao Guangrong | ff53604 | 2015-06-15 16:55:22 +0800 | [diff] [blame] | 224 | u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn); | 
| Nadav Amit | 4566654 | 2014-09-18 22:39:44 +0300 | [diff] [blame] | 225 | bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data); | 
| Xiao Guangrong | ff53604 | 2015-06-15 16:55:22 +0800 | [diff] [blame] | 226 | int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data); | 
|  | 227 | int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata); | 
| Xiao Guangrong | 6a39bbc | 2015-06-15 16:55:35 +0800 | [diff] [blame] | 228 | bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, | 
|  | 229 | int page_num); | 
| Feng Wu | 52004014 | 2016-01-25 16:53:33 +0800 | [diff] [blame] | 230 | bool kvm_vector_hashing_enabled(void); | 
| Nadav Amit | 4566654 | 2014-09-18 22:39:44 +0300 | [diff] [blame] | 231 |  | 
| Dave Hansen | d91cab7 | 2015-09-02 16:31:26 -0700 | [diff] [blame] | 232 | #define KVM_SUPPORTED_XCR0     (XFEATURE_MASK_FP | XFEATURE_MASK_SSE \ | 
|  | 233 | | XFEATURE_MASK_YMM | XFEATURE_MASK_BNDREGS \ | 
| Huaitong Han | 17a511f | 2016-03-22 16:51:16 +0800 | [diff] [blame] | 234 | | XFEATURE_MASK_BNDCSR | XFEATURE_MASK_AVX512 \ | 
|  | 235 | | XFEATURE_MASK_PKRU) | 
| Avi Kivity | 00b27a3 | 2011-11-23 16:30:32 +0200 | [diff] [blame] | 236 | extern u64 host_xcr0; | 
|  | 237 |  | 
| Paolo Bonzini | 4ff4173 | 2014-02-24 12:15:16 +0100 | [diff] [blame] | 238 | extern u64 kvm_supported_xcr0(void); | 
|  | 239 |  | 
| Marcelo Tosatti | 9ed96e8 | 2014-01-06 12:00:02 -0200 | [diff] [blame] | 240 | extern unsigned int min_timer_period_us; | 
|  | 241 |  | 
| Marcelo Tosatti | d0659d9 | 2014-12-16 09:08:15 -0500 | [diff] [blame] | 242 | extern unsigned int lapic_timer_advance_ns; | 
|  | 243 |  | 
| Gleb Natapov | 54e9818 | 2012-08-05 15:58:32 +0300 | [diff] [blame] | 244 | extern struct static_key kvm_no_apic_vcpu; | 
| Paolo Bonzini | b51012d | 2016-01-22 11:39:22 +0100 | [diff] [blame] | 245 |  | 
| Marcelo Tosatti | 8d93c87 | 2016-06-20 22:28:02 -0300 | [diff] [blame] | 246 | static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec) | 
|  | 247 | { | 
|  | 248 | return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult, | 
|  | 249 | vcpu->arch.virtual_tsc_shift); | 
|  | 250 | } | 
|  | 251 |  | 
| Paolo Bonzini | b51012d | 2016-01-22 11:39:22 +0100 | [diff] [blame] | 252 | /* Same "calling convention" as do_div: | 
|  | 253 | * - divide (n << 32) by base | 
|  | 254 | * - put result in n | 
|  | 255 | * - return remainder | 
|  | 256 | */ | 
|  | 257 | #define do_shl32_div32(n, base)					\ | 
|  | 258 | ({							\ | 
|  | 259 | u32 __quot, __rem;					\ | 
|  | 260 | asm("divl %2" : "=a" (__quot), "=d" (__rem)		\ | 
|  | 261 | : "rm" (base), "0" (0), "1" ((u32) n));	\ | 
|  | 262 | n = __quot;						\ | 
|  | 263 | __rem;						\ | 
|  | 264 | }) | 
|  | 265 |  | 
| Michael S. Tsirkin | 668fffa3 | 2017-04-21 12:27:17 +0200 | [diff] [blame] | 266 | static inline bool kvm_mwait_in_guest(void) | 
|  | 267 | { | 
|  | 268 | unsigned int eax, ebx, ecx, edx; | 
|  | 269 |  | 
|  | 270 | if (!cpu_has(&boot_cpu_data, X86_FEATURE_MWAIT)) | 
|  | 271 | return false; | 
|  | 272 |  | 
|  | 273 | switch (boot_cpu_data.x86_vendor) { | 
|  | 274 | case X86_VENDOR_AMD: | 
|  | 275 | /* All AMD CPUs have a working MWAIT implementation */ | 
|  | 276 | return true; | 
|  | 277 | case X86_VENDOR_INTEL: | 
|  | 278 | /* Handle Intel below */ | 
|  | 279 | break; | 
|  | 280 | default: | 
|  | 281 | return false; | 
|  | 282 | } | 
|  | 283 |  | 
|  | 284 | /* | 
|  | 285 | * Intel CPUs without CPUID5_ECX_INTERRUPT_BREAK are problematic as | 
|  | 286 | * they would allow guest to stop the CPU completely by disabling | 
|  | 287 | * interrupts then invoking MWAIT. | 
|  | 288 | */ | 
|  | 289 | if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF) | 
|  | 290 | return false; | 
|  | 291 |  | 
|  | 292 | cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx); | 
|  | 293 |  | 
|  | 294 | if (!(ecx & CPUID5_ECX_INTERRUPT_BREAK)) | 
|  | 295 | return false; | 
|  | 296 |  | 
|  | 297 | return true; | 
|  | 298 | } | 
|  | 299 |  | 
| Avi Kivity | 26eef70 | 2008-07-03 14:59:22 +0300 | [diff] [blame] | 300 | #endif |