blob: 2307a3cb2714fca0819cfb7727eca41650f51fbf [file] [log] [blame]
Ralf Baechle56369192009-02-28 09:44:28 +00001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994 by Waldorf Electronics
7 * Copyright (C) 1995 - 2000, 01, 03 by Ralf Baechle
8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
Maciej W. Rozycki2db4bc32014-04-01 00:57:28 +01009 * Copyright (C) 2007, 2014 Maciej W. Rozycki
Ralf Baechle56369192009-02-28 09:44:28 +000010 */
Paul Gortmaker527581b2016-08-21 15:58:15 -040011#include <linux/export.h>
Ralf Baechle56369192009-02-28 09:44:28 +000012#include <linux/param.h>
13#include <linux/smp.h>
Maciej W. Rozyckie4964532014-04-06 21:42:49 +010014#include <linux/stringify.h>
Ralf Baechle56369192009-02-28 09:44:28 +000015
Maciej W. Rozyckie4964532014-04-06 21:42:49 +010016#include <asm/asm.h>
Ralf Baechle56369192009-02-28 09:44:28 +000017#include <asm/compiler.h>
18#include <asm/war.h>
19
Maciej W. Rozycki2db4bc32014-04-01 00:57:28 +010020#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
21#define GCC_DADDI_IMM_ASM() "I"
22#else
23#define GCC_DADDI_IMM_ASM() "r"
24#endif
25
David Daney5210edc2012-09-28 11:34:10 -070026void __delay(unsigned long loops)
Ralf Baechle56369192009-02-28 09:44:28 +000027{
28 __asm__ __volatile__ (
29 " .set noreorder \n"
30 " .align 3 \n"
31 "1: bnez %0, 1b \n"
Maciej W. Rozyckie4964532014-04-06 21:42:49 +010032 " " __stringify(LONG_SUBU) " %0, %1 \n"
Ralf Baechle56369192009-02-28 09:44:28 +000033 " .set reorder \n"
34 : "=r" (loops)
Maciej W. Rozycki2db4bc32014-04-01 00:57:28 +010035 : GCC_DADDI_IMM_ASM() (1), "0" (loops));
Ralf Baechle56369192009-02-28 09:44:28 +000036}
37EXPORT_SYMBOL(__delay);
38
39/*
40 * Division by multiplication: you don't have to worry about
41 * loss of precision.
42 *
Ralf Baechle70342282013-01-22 12:59:30 +010043 * Use only for very small delays ( < 1 msec). Should probably use a
Ralf Baechle56369192009-02-28 09:44:28 +000044 * lookup table, really, as the multiplications take much too long with
45 * short delays. This is a "reasonable" implementation, though (and the
46 * first constant multiplications gets optimized away if the delay is
47 * a constant)
48 */
49
50void __udelay(unsigned long us)
51{
Ralf Baechleabe5b412010-03-10 16:16:04 +010052 unsigned int lpj = raw_current_cpu_data.udelay_val;
Ralf Baechle56369192009-02-28 09:44:28 +000053
Atsushi Nemoto3cb3a662009-06-09 11:12:48 +090054 __delay((us * 0x000010c7ull * HZ * lpj) >> 32);
Ralf Baechle56369192009-02-28 09:44:28 +000055}
56EXPORT_SYMBOL(__udelay);
57
58void __ndelay(unsigned long ns)
59{
Ralf Baechleabe5b412010-03-10 16:16:04 +010060 unsigned int lpj = raw_current_cpu_data.udelay_val;
Ralf Baechle56369192009-02-28 09:44:28 +000061
Atsushi Nemoto3cb3a662009-06-09 11:12:48 +090062 __delay((ns * 0x00000005ull * HZ * lpj) >> 32);
Ralf Baechle56369192009-02-28 09:44:28 +000063}
64EXPORT_SYMBOL(__ndelay);