blob: c6ab2ebb5f4f7df9840e77fb1f85775e349f2d9b [file] [log] [blame]
Andy Lutomirski1f484aa2015-07-03 12:44:23 -07001/*
2 * common.c - C code for kernel entry and exit
3 * Copyright (c) 2015 Andrew Lutomirski
4 * GPL v2
5 *
6 * Based on asm and ptrace code by many authors. The code here originated
7 * in ptrace.c and signal.c.
8 */
9
10#include <linux/kernel.h>
11#include <linux/sched.h>
12#include <linux/mm.h>
13#include <linux/smp.h>
14#include <linux/errno.h>
15#include <linux/ptrace.h>
16#include <linux/tracehook.h>
17#include <linux/audit.h>
18#include <linux/seccomp.h>
19#include <linux/signal.h>
20#include <linux/export.h>
21#include <linux/context_tracking.h>
22#include <linux/user-return-notifier.h>
23#include <linux/uprobes.h>
24
25#include <asm/desc.h>
26#include <asm/traps.h>
Andy Lutomirski710246d2015-10-05 17:48:10 -070027#include <asm/vdso.h>
28#include <asm/uaccess.h>
Borislav Petkovcd4d09e2016-01-26 22:12:04 +010029#include <asm/cpufeature.h>
Andy Lutomirski1f484aa2015-07-03 12:44:23 -070030
31#define CREATE_TRACE_POINTS
32#include <trace/events/syscalls.h>
33
Andy Lutomirskidd636072015-10-05 17:48:22 -070034static struct thread_info *pt_regs_to_thread_info(struct pt_regs *regs)
35{
36 unsigned long top_of_stack =
37 (unsigned long)(regs + 1) + TOP_OF_KERNEL_STACK_PADDING;
38 return (struct thread_info *)(top_of_stack - THREAD_SIZE);
39}
40
Andy Lutomirskifeed36c2015-07-03 12:44:25 -070041#ifdef CONFIG_CONTEXT_TRACKING
42/* Called on entry from user mode with IRQs off. */
43__visible void enter_from_user_mode(void)
44{
45 CT_WARN_ON(ct_state() != CONTEXT_USER);
46 user_exit();
47}
48#endif
49
Andy Lutomirski1f484aa2015-07-03 12:44:23 -070050static void do_audit_syscall_entry(struct pt_regs *regs, u32 arch)
51{
52#ifdef CONFIG_X86_64
53 if (arch == AUDIT_ARCH_X86_64) {
54 audit_syscall_entry(regs->orig_ax, regs->di,
55 regs->si, regs->dx, regs->r10);
56 } else
57#endif
58 {
59 audit_syscall_entry(regs->orig_ax, regs->bx,
60 regs->cx, regs->dx, regs->si);
61 }
62}
63
64/*
65 * We can return 0 to resume the syscall or anything else to go to phase
66 * 2. If we resume the syscall, we need to put something appropriate in
67 * regs->orig_ax.
68 *
69 * NB: We don't have full pt_regs here, but regs->orig_ax and regs->ax
70 * are fully functional.
71 *
72 * For phase 2's benefit, our return value is:
73 * 0: resume the syscall
74 * 1: go to phase 2; no seccomp phase 2 needed
75 * anything else: go to phase 2; pass return value to seccomp
76 */
77unsigned long syscall_trace_enter_phase1(struct pt_regs *regs, u32 arch)
78{
Andy Lutomirskidd636072015-10-05 17:48:22 -070079 struct thread_info *ti = pt_regs_to_thread_info(regs);
Andy Lutomirski1f484aa2015-07-03 12:44:23 -070080 unsigned long ret = 0;
81 u32 work;
82
Andy Lutomirski4aabd142015-10-05 17:48:21 -070083 if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
84 BUG_ON(regs != task_pt_regs(current));
Andy Lutomirski1f484aa2015-07-03 12:44:23 -070085
Andy Lutomirskidd636072015-10-05 17:48:22 -070086 work = ACCESS_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY;
Andy Lutomirski1f484aa2015-07-03 12:44:23 -070087
Andy Lutomirskifeed36c2015-07-03 12:44:25 -070088#ifdef CONFIG_CONTEXT_TRACKING
Andy Lutomirski1f484aa2015-07-03 12:44:23 -070089 /*
90 * If TIF_NOHZ is set, we are required to call user_exit() before
91 * doing anything that could touch RCU.
92 */
93 if (work & _TIF_NOHZ) {
Andy Lutomirskifeed36c2015-07-03 12:44:25 -070094 enter_from_user_mode();
Andy Lutomirski1f484aa2015-07-03 12:44:23 -070095 work &= ~_TIF_NOHZ;
96 }
Andy Lutomirskifeed36c2015-07-03 12:44:25 -070097#endif
Andy Lutomirski1f484aa2015-07-03 12:44:23 -070098
99#ifdef CONFIG_SECCOMP
100 /*
101 * Do seccomp first -- it should minimize exposure of other
102 * code, and keeping seccomp fast is probably more valuable
103 * than the rest of this.
104 */
105 if (work & _TIF_SECCOMP) {
106 struct seccomp_data sd;
107
108 sd.arch = arch;
109 sd.nr = regs->orig_ax;
110 sd.instruction_pointer = regs->ip;
111#ifdef CONFIG_X86_64
112 if (arch == AUDIT_ARCH_X86_64) {
113 sd.args[0] = regs->di;
114 sd.args[1] = regs->si;
115 sd.args[2] = regs->dx;
116 sd.args[3] = regs->r10;
117 sd.args[4] = regs->r8;
118 sd.args[5] = regs->r9;
119 } else
120#endif
121 {
122 sd.args[0] = regs->bx;
123 sd.args[1] = regs->cx;
124 sd.args[2] = regs->dx;
125 sd.args[3] = regs->si;
126 sd.args[4] = regs->di;
127 sd.args[5] = regs->bp;
128 }
129
130 BUILD_BUG_ON(SECCOMP_PHASE1_OK != 0);
131 BUILD_BUG_ON(SECCOMP_PHASE1_SKIP != 1);
132
133 ret = seccomp_phase1(&sd);
134 if (ret == SECCOMP_PHASE1_SKIP) {
135 regs->orig_ax = -1;
136 ret = 0;
137 } else if (ret != SECCOMP_PHASE1_OK) {
138 return ret; /* Go directly to phase 2 */
139 }
140
141 work &= ~_TIF_SECCOMP;
142 }
143#endif
144
145 /* Do our best to finish without phase 2. */
146 if (work == 0)
147 return ret; /* seccomp and/or nohz only (ret == 0 here) */
148
149#ifdef CONFIG_AUDITSYSCALL
150 if (work == _TIF_SYSCALL_AUDIT) {
151 /*
152 * If there is no more work to be done except auditing,
153 * then audit in phase 1. Phase 2 always audits, so, if
154 * we audit here, then we can't go on to phase 2.
155 */
156 do_audit_syscall_entry(regs, arch);
157 return 0;
158 }
159#endif
160
161 return 1; /* Something is enabled that we can't handle in phase 1 */
162}
163
164/* Returns the syscall nr to run (which should match regs->orig_ax). */
165long syscall_trace_enter_phase2(struct pt_regs *regs, u32 arch,
166 unsigned long phase1_result)
167{
Andy Lutomirskidd636072015-10-05 17:48:22 -0700168 struct thread_info *ti = pt_regs_to_thread_info(regs);
Andy Lutomirski1f484aa2015-07-03 12:44:23 -0700169 long ret = 0;
Andy Lutomirskidd636072015-10-05 17:48:22 -0700170 u32 work = ACCESS_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY;
Andy Lutomirski1f484aa2015-07-03 12:44:23 -0700171
Andy Lutomirski4aabd142015-10-05 17:48:21 -0700172 if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
173 BUG_ON(regs != task_pt_regs(current));
Andy Lutomirski1f484aa2015-07-03 12:44:23 -0700174
175 /*
176 * If we stepped into a sysenter/syscall insn, it trapped in
177 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
178 * If user-mode had set TF itself, then it's still clear from
179 * do_debug() and we need to set it again to restore the user
180 * state. If we entered on the slow path, TF was already set.
181 */
182 if (work & _TIF_SINGLESTEP)
183 regs->flags |= X86_EFLAGS_TF;
184
185#ifdef CONFIG_SECCOMP
186 /*
187 * Call seccomp_phase2 before running the other hooks so that
188 * they can see any changes made by a seccomp tracer.
189 */
190 if (phase1_result > 1 && seccomp_phase2(phase1_result)) {
191 /* seccomp failures shouldn't expose any additional code. */
192 return -1;
193 }
194#endif
195
196 if (unlikely(work & _TIF_SYSCALL_EMU))
197 ret = -1L;
198
199 if ((ret || test_thread_flag(TIF_SYSCALL_TRACE)) &&
200 tracehook_report_syscall_entry(regs))
201 ret = -1L;
202
203 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
204 trace_sys_enter(regs, regs->orig_ax);
205
206 do_audit_syscall_entry(regs, arch);
207
208 return ret ?: regs->orig_ax;
209}
210
211long syscall_trace_enter(struct pt_regs *regs)
212{
213 u32 arch = is_ia32_task() ? AUDIT_ARCH_I386 : AUDIT_ARCH_X86_64;
214 unsigned long phase1_result = syscall_trace_enter_phase1(regs, arch);
215
216 if (phase1_result == 0)
217 return regs->orig_ax;
218 else
219 return syscall_trace_enter_phase2(regs, arch, phase1_result);
220}
221
Andy Lutomirski39b48e52015-10-05 17:48:23 -0700222#define EXIT_TO_USERMODE_LOOP_FLAGS \
223 (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
224 _TIF_NEED_RESCHED | _TIF_USER_RETURN_NOTIFY)
225
226static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
Andy Lutomirskic5c46f52015-07-03 12:44:26 -0700227{
Andy Lutomirskic5c46f52015-07-03 12:44:26 -0700228 /*
229 * In order to return to user mode, we need to have IRQs off with
230 * none of _TIF_SIGPENDING, _TIF_NOTIFY_RESUME, _TIF_USER_RETURN_NOTIFY,
231 * _TIF_UPROBE, or _TIF_NEED_RESCHED set. Several of these flags
232 * can be set at any time on preemptable kernels if we have IRQs on,
233 * so we need to loop. Disabling preemption wouldn't help: doing the
234 * work to clear some of the flags can sleep.
235 */
236 while (true) {
Andy Lutomirskic5c46f52015-07-03 12:44:26 -0700237 /* We have work to do. */
238 local_irq_enable();
239
240 if (cached_flags & _TIF_NEED_RESCHED)
241 schedule();
242
243 if (cached_flags & _TIF_UPROBE)
244 uprobe_notify_resume(regs);
245
246 /* deal with pending signal delivery */
247 if (cached_flags & _TIF_SIGPENDING)
248 do_signal(regs);
249
250 if (cached_flags & _TIF_NOTIFY_RESUME) {
251 clear_thread_flag(TIF_NOTIFY_RESUME);
252 tracehook_notify_resume(regs);
253 }
254
255 if (cached_flags & _TIF_USER_RETURN_NOTIFY)
256 fire_user_return_notifiers();
257
258 /* Disable IRQs and retry */
259 local_irq_disable();
Andy Lutomirski39b48e52015-10-05 17:48:23 -0700260
261 cached_flags = READ_ONCE(pt_regs_to_thread_info(regs)->flags);
262
263 if (!(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS))
264 break;
265
Andy Lutomirskic5c46f52015-07-03 12:44:26 -0700266 }
Andy Lutomirski39b48e52015-10-05 17:48:23 -0700267}
268
269/* Called with IRQs disabled. */
270__visible inline void prepare_exit_to_usermode(struct pt_regs *regs)
271{
272 u32 cached_flags;
273
274 if (IS_ENABLED(CONFIG_PROVE_LOCKING) && WARN_ON(!irqs_disabled()))
275 local_irq_disable();
276
277 lockdep_sys_exit();
278
279 cached_flags =
280 READ_ONCE(pt_regs_to_thread_info(regs)->flags);
281
282 if (unlikely(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS))
283 exit_to_usermode_loop(regs, cached_flags);
Andy Lutomirskic5c46f52015-07-03 12:44:26 -0700284
285 user_enter();
286}
287
Andy Lutomirskif5e6a972015-10-05 17:48:24 -0700288#define SYSCALL_EXIT_WORK_FLAGS \
289 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
290 _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT)
291
292static void syscall_slow_exit_work(struct pt_regs *regs, u32 cached_flags)
293{
294 bool step;
295
296 audit_syscall_exit(regs);
297
298 if (cached_flags & _TIF_SYSCALL_TRACEPOINT)
299 trace_sys_exit(regs, regs->ax);
300
301 /*
302 * If TIF_SYSCALL_EMU is set, we only get here because of
303 * TIF_SINGLESTEP (i.e. this is PTRACE_SYSEMU_SINGLESTEP).
304 * We already reported this syscall instruction in
305 * syscall_trace_enter().
306 */
307 step = unlikely(
308 (cached_flags & (_TIF_SINGLESTEP | _TIF_SYSCALL_EMU))
309 == _TIF_SINGLESTEP);
310 if (step || cached_flags & _TIF_SYSCALL_TRACE)
311 tracehook_report_syscall_exit(regs, step);
312}
313
Andy Lutomirski1f484aa2015-07-03 12:44:23 -0700314/*
Andy Lutomirskic5c46f52015-07-03 12:44:26 -0700315 * Called with IRQs on and fully valid regs. Returns with IRQs off in a
316 * state such that we can immediately switch to user mode.
317 */
Andy Lutomirskif5e6a972015-10-05 17:48:24 -0700318__visible inline void syscall_return_slowpath(struct pt_regs *regs)
Andy Lutomirskic5c46f52015-07-03 12:44:26 -0700319{
320 struct thread_info *ti = pt_regs_to_thread_info(regs);
321 u32 cached_flags = READ_ONCE(ti->flags);
Andy Lutomirskic5c46f52015-07-03 12:44:26 -0700322
323 CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
324
Andy Lutomirski460d1242015-10-05 17:48:18 -0700325 if (IS_ENABLED(CONFIG_PROVE_LOCKING) &&
326 WARN(irqs_disabled(), "syscall %ld left IRQs disabled", regs->orig_ax))
Andy Lutomirskic5c46f52015-07-03 12:44:26 -0700327 local_irq_enable();
328
329 /*
330 * First do one-time work. If these work items are enabled, we
331 * want to run them exactly once per syscall exit with IRQs on.
332 */
Andy Lutomirskif5e6a972015-10-05 17:48:24 -0700333 if (unlikely(cached_flags & SYSCALL_EXIT_WORK_FLAGS))
334 syscall_slow_exit_work(regs, cached_flags);
Andy Lutomirskic5c46f52015-07-03 12:44:26 -0700335
336#ifdef CONFIG_COMPAT
337 /*
338 * Compat syscalls set TS_COMPAT. Make sure we clear it before
339 * returning to user mode.
340 */
341 ti->status &= ~TS_COMPAT;
342#endif
343
344 local_irq_disable();
345 prepare_exit_to_usermode(regs);
346}
Andy Lutomirskibd2d3a32015-10-05 17:48:08 -0700347
Andy Lutomirski1e423bf2016-01-28 15:11:28 -0800348#ifdef CONFIG_X86_64
349__visible void do_syscall_64(struct pt_regs *regs)
350{
351 struct thread_info *ti = pt_regs_to_thread_info(regs);
352 unsigned long nr = regs->orig_ax;
353
354 local_irq_enable();
355
356 if (READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY)
357 nr = syscall_trace_enter(regs);
358
359 /*
360 * NB: Native and x32 syscalls are dispatched from the same
361 * table. The only functional difference is the x32 bit in
362 * regs->orig_ax, which changes the behavior of some syscalls.
363 */
364 if (likely((nr & __SYSCALL_MASK) < NR_syscalls)) {
365 regs->ax = sys_call_table[nr & __SYSCALL_MASK](
366 regs->di, regs->si, regs->dx,
367 regs->r10, regs->r8, regs->r9);
368 }
369
370 syscall_return_slowpath(regs);
371}
372#endif
373
Andy Lutomirskibd2d3a32015-10-05 17:48:08 -0700374#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
375/*
Andy Lutomirski8b13c252015-10-05 17:48:17 -0700376 * Does a 32-bit syscall. Called with IRQs on and does all entry and
Andy Lutomirski33c521292015-10-05 17:48:19 -0700377 * exit work and returns with IRQs off. This function is extremely hot
378 * in workloads that use it, and it's usually called from
379 * do_fast_syscall_32, so forcibly inline it to improve performance.
Andy Lutomirskibd2d3a32015-10-05 17:48:08 -0700380 */
Andy Lutomirski657c1ee2015-10-16 15:42:54 -0700381#ifdef CONFIG_X86_32
382/* 32-bit kernels use a trap gate for INT80, and the asm code calls here. */
383__visible
384#else
385/* 64-bit kernels use do_syscall_32_irqs_off() instead. */
386static
387#endif
388__always_inline void do_syscall_32_irqs_on(struct pt_regs *regs)
Andy Lutomirskibd2d3a32015-10-05 17:48:08 -0700389{
390 struct thread_info *ti = pt_regs_to_thread_info(regs);
391 unsigned int nr = (unsigned int)regs->orig_ax;
392
393#ifdef CONFIG_IA32_EMULATION
394 ti->status |= TS_COMPAT;
395#endif
396
Andy Lutomirskibd2d3a32015-10-05 17:48:08 -0700397 if (READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY) {
398 /*
399 * Subtlety here: if ptrace pokes something larger than
400 * 2^32-1 into orig_ax, this truncates it. This may or
401 * may not be necessary, but it matches the old asm
402 * behavior.
403 */
404 nr = syscall_trace_enter(regs);
405 }
406
Andy Lutomirski33c521292015-10-05 17:48:19 -0700407 if (likely(nr < IA32_NR_syscalls)) {
Andy Lutomirskibd2d3a32015-10-05 17:48:08 -0700408 /*
409 * It's possible that a 32-bit syscall implementation
410 * takes a 64-bit parameter but nonetheless assumes that
411 * the high bits are zero. Make sure we zero-extend all
412 * of the args.
413 */
414 regs->ax = ia32_sys_call_table[nr](
415 (unsigned int)regs->bx, (unsigned int)regs->cx,
416 (unsigned int)regs->dx, (unsigned int)regs->si,
417 (unsigned int)regs->di, (unsigned int)regs->bp);
418 }
419
420 syscall_return_slowpath(regs);
421}
Andy Lutomirski710246d2015-10-05 17:48:10 -0700422
Andy Lutomirski657c1ee2015-10-16 15:42:54 -0700423#ifdef CONFIG_X86_64
424/* Handles INT80 on 64-bit kernels */
425__visible void do_syscall_32_irqs_off(struct pt_regs *regs)
Andy Lutomirski8b13c252015-10-05 17:48:17 -0700426{
427 local_irq_enable();
428 do_syscall_32_irqs_on(regs);
429}
Andy Lutomirski657c1ee2015-10-16 15:42:54 -0700430#endif
Andy Lutomirski8b13c252015-10-05 17:48:17 -0700431
Andy Lutomirski5f310f72015-10-05 17:48:15 -0700432/* Returns 0 to return using IRET or 1 to return using SYSEXIT/SYSRETL. */
Andy Lutomirski7841b402015-10-05 17:48:12 -0700433__visible long do_fast_syscall_32(struct pt_regs *regs)
Andy Lutomirski710246d2015-10-05 17:48:10 -0700434{
435 /*
436 * Called using the internal vDSO SYSENTER/SYSCALL32 calling
437 * convention. Adjust regs so it looks like we entered using int80.
438 */
439
440 unsigned long landing_pad = (unsigned long)current->mm->context.vdso +
441 vdso_image_32.sym_int80_landing_pad;
442
443 /*
444 * SYSENTER loses EIP, and even SYSCALL32 needs us to skip forward
445 * so that 'regs->ip -= 2' lands back on an int $0x80 instruction.
446 * Fix it up.
447 */
448 regs->ip = landing_pad;
449
450 /*
Andy Lutomirski30bfa7b2015-12-16 23:18:48 -0800451 * Fetch EBP from where the vDSO stashed it.
Andy Lutomirski710246d2015-10-05 17:48:10 -0700452 *
453 * WARNING: We are in CONTEXT_USER and RCU isn't paying attention!
454 */
455 local_irq_enable();
Andy Lutomirskic68ca672015-10-05 17:48:20 -0700456 if (
457#ifdef CONFIG_X86_64
458 /*
459 * Micro-optimization: the pointer we're following is explicitly
460 * 32 bits, so it can't be out of range.
461 */
Andy Lutomirski30bfa7b2015-12-16 23:18:48 -0800462 __get_user(*(u32 *)&regs->bp,
Andy Lutomirskic68ca672015-10-05 17:48:20 -0700463 (u32 __user __force *)(unsigned long)(u32)regs->sp)
464#else
Andy Lutomirski30bfa7b2015-12-16 23:18:48 -0800465 get_user(*(u32 *)&regs->bp,
Andy Lutomirskic68ca672015-10-05 17:48:20 -0700466 (u32 __user __force *)(unsigned long)(u32)regs->sp)
467#endif
468 ) {
469
Andy Lutomirski710246d2015-10-05 17:48:10 -0700470 /* User code screwed up. */
471 local_irq_disable();
472 regs->ax = -EFAULT;
473#ifdef CONFIG_CONTEXT_TRACKING
474 enter_from_user_mode();
475#endif
476 prepare_exit_to_usermode(regs);
Andy Lutomirski7841b402015-10-05 17:48:12 -0700477 return 0; /* Keep it simple: use IRET. */
Andy Lutomirski710246d2015-10-05 17:48:10 -0700478 }
Andy Lutomirski710246d2015-10-05 17:48:10 -0700479
480 /* Now this is just like a normal syscall. */
Andy Lutomirski8b13c252015-10-05 17:48:17 -0700481 do_syscall_32_irqs_on(regs);
Andy Lutomirski7841b402015-10-05 17:48:12 -0700482
483#ifdef CONFIG_X86_64
484 /*
485 * Opportunistic SYSRETL: if possible, try to return using SYSRETL.
486 * SYSRETL is available on all 64-bit CPUs, so we don't need to
487 * bother with SYSEXIT.
488 *
489 * Unlike 64-bit opportunistic SYSRET, we can't check that CX == IP,
490 * because the ECX fixup above will ensure that this is essentially
491 * never the case.
492 */
493 return regs->cs == __USER32_CS && regs->ss == __USER_DS &&
494 regs->ip == landing_pad &&
495 (regs->flags & (X86_EFLAGS_RF | X86_EFLAGS_TF)) == 0;
496#else
Andy Lutomirski5f310f72015-10-05 17:48:15 -0700497 /*
498 * Opportunistic SYSEXIT: if possible, try to return using SYSEXIT.
499 *
500 * Unlike 64-bit opportunistic SYSRET, we can't check that CX == IP,
501 * because the ECX fixup above will ensure that this is essentially
502 * never the case.
503 *
504 * We don't allow syscalls at all from VM86 mode, but we still
505 * need to check VM, because we might be returning from sys_vm86.
506 */
507 return static_cpu_has(X86_FEATURE_SEP) &&
508 regs->cs == __USER_CS && regs->ss == __USER_DS &&
509 regs->ip == landing_pad &&
510 (regs->flags & (X86_EFLAGS_RF | X86_EFLAGS_TF | X86_EFLAGS_VM)) == 0;
Andy Lutomirski7841b402015-10-05 17:48:12 -0700511#endif
Andy Lutomirski710246d2015-10-05 17:48:10 -0700512}
Andy Lutomirskibd2d3a32015-10-05 17:48:08 -0700513#endif