Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef __ASM_SH_IRQ_H |
| 2 | #define __ASM_SH_IRQ_H |
| 3 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | #include <asm/machvec.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | |
Paul Mundt | be782df | 2007-03-12 14:09:35 +0900 | [diff] [blame] | 6 | /* |
| 7 | * A sane default based on a reasonable vector table size, platforms are |
| 8 | * advised to cap this at the hard limit that they're interested in |
| 9 | * through the machvec. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | */ |
Paul Mundt | be782df | 2007-03-12 14:09:35 +0900 | [diff] [blame] | 11 | #define NR_IRQS 256 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | /* |
Jamie Lenehan | ea0f8fe | 2006-12-06 12:05:02 +0900 | [diff] [blame] | 14 | * Convert back and forth between INTEVT and IRQ values. |
| 15 | */ |
Paul Mundt | 3afb209 | 2007-03-14 13:03:35 +0900 | [diff] [blame] | 16 | #ifdef CONFIG_CPU_HAS_INTEVT |
Jamie Lenehan | ea0f8fe | 2006-12-06 12:05:02 +0900 | [diff] [blame] | 17 | #define evt2irq(evt) (((evt) >> 5) - 16) |
| 18 | #define irq2evt(irq) (((irq) + 16) << 5) |
Paul Mundt | 3afb209 | 2007-03-14 13:03:35 +0900 | [diff] [blame] | 19 | #else |
| 20 | #define evt2irq(evt) (evt) |
| 21 | #define irq2evt(irq) (irq) |
| 22 | #endif |
Jamie Lenehan | ea0f8fe | 2006-12-06 12:05:02 +0900 | [diff] [blame] | 23 | |
| 24 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | * Simple Mask Register Support |
| 26 | */ |
| 27 | extern void make_maskreg_irq(unsigned int irq); |
| 28 | extern unsigned short *irq_mask_register; |
| 29 | |
| 30 | /* |
Paul Mundt | 0f08f33 | 2006-09-27 17:03:56 +0900 | [diff] [blame] | 31 | * PINT IRQs |
| 32 | */ |
| 33 | void init_IRQ_pint(void); |
| 34 | |
Jamie Lenehan | ea0f8fe | 2006-12-06 12:05:02 +0900 | [diff] [blame] | 35 | /* |
| 36 | * The shift value is now the number of bits to shift, not the number of |
| 37 | * bits/4. This is to make it easier to read the value directly from the |
| 38 | * datasheets. The IPR address, addr, will be set from ipr_idx via the |
| 39 | * map_ipridx_to_addr function. |
| 40 | */ |
Jamie Lenehan | bd71ab8 | 2006-10-31 12:35:02 +0900 | [diff] [blame] | 41 | struct ipr_data { |
| 42 | unsigned int irq; |
Jamie Lenehan | ea0f8fe | 2006-12-06 12:05:02 +0900 | [diff] [blame] | 43 | int ipr_idx; /* Index for the IPR registered */ |
| 44 | int shift; /* Number of bits to shift the data */ |
Jamie Lenehan | bd71ab8 | 2006-10-31 12:35:02 +0900 | [diff] [blame] | 45 | int priority; /* The priority */ |
Jamie Lenehan | ea0f8fe | 2006-12-06 12:05:02 +0900 | [diff] [blame] | 46 | unsigned int addr; /* Address of Interrupt Priority Register */ |
Jamie Lenehan | bd71ab8 | 2006-10-31 12:35:02 +0900 | [diff] [blame] | 47 | }; |
| 48 | |
Paul Mundt | 0f08f33 | 2006-09-27 17:03:56 +0900 | [diff] [blame] | 49 | /* |
Jamie Lenehan | ea0f8fe | 2006-12-06 12:05:02 +0900 | [diff] [blame] | 50 | * Given an IPR IDX, map the value to an IPR register address. |
| 51 | */ |
| 52 | unsigned int map_ipridx_to_addr(int idx); |
| 53 | |
| 54 | /* |
| 55 | * Enable individual interrupt mode for external IPR IRQs. |
| 56 | */ |
| 57 | void ipr_irq_enable_irlm(void); |
| 58 | |
| 59 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 60 | * Function for "on chip support modules". |
| 61 | */ |
Jamie Lenehan | ea0f8fe | 2006-12-06 12:05:02 +0900 | [diff] [blame] | 62 | void make_ipr_irq(struct ipr_data *table, unsigned int nr_irqs); |
| 63 | void make_imask_irq(unsigned int irq); |
| 64 | void init_IRQ_ipr(void); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 65 | |
Paul Mundt | 525ccc4 | 2006-10-06 17:35:48 +0900 | [diff] [blame] | 66 | struct intc2_data { |
| 67 | unsigned short irq; |
| 68 | unsigned char ipr_offset, ipr_shift; |
| 69 | unsigned char msk_offset, msk_shift; |
| 70 | unsigned char priority; |
| 71 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 72 | |
Paul Mundt | 66a7405 | 2006-10-20 15:30:55 +0900 | [diff] [blame] | 73 | void make_intc2_irq(struct intc2_data *, unsigned int nr_irqs); |
Paul Mundt | 525ccc4 | 2006-10-06 17:35:48 +0900 | [diff] [blame] | 74 | void init_IRQ_intc2(void); |
Paul Mundt | e5723e0 | 2006-09-27 17:38:11 +0900 | [diff] [blame] | 75 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 76 | static inline int generic_irq_demux(int irq) |
| 77 | { |
| 78 | return irq; |
| 79 | } |
| 80 | |
| 81 | #define irq_canonicalize(irq) (irq) |
Paul Mundt | 9a7ef6d | 2006-11-20 13:55:34 +0900 | [diff] [blame] | 82 | #define irq_demux(irq) sh_mv.mv_irq_demux(irq) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 83 | |
Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 84 | #ifdef CONFIG_4KSTACKS |
| 85 | extern void irq_ctx_init(int cpu); |
| 86 | extern void irq_ctx_exit(int cpu); |
| 87 | # define __ARCH_HAS_DO_SOFTIRQ |
| 88 | #else |
| 89 | # define irq_ctx_init(cpu) do { } while (0) |
| 90 | # define irq_ctx_exit(cpu) do { } while (0) |
| 91 | #endif |
| 92 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 93 | #endif /* __ASM_SH_IRQ_H */ |