Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _ASM_IA64_PCI_H |
| 2 | #define _ASM_IA64_PCI_H |
| 3 | |
| 4 | #include <linux/mm.h> |
| 5 | #include <linux/slab.h> |
| 6 | #include <linux/spinlock.h> |
| 7 | #include <linux/string.h> |
| 8 | #include <linux/types.h> |
| 9 | |
| 10 | #include <asm/io.h> |
| 11 | #include <asm/scatterlist.h> |
| 12 | |
| 13 | /* |
| 14 | * Can be used to override the logic in pci_scan_bus for skipping already-configured bus |
| 15 | * numbers - to be used for buggy BIOSes or architectures with incomplete PCI setup by the |
| 16 | * loader. |
| 17 | */ |
| 18 | #define pcibios_assign_all_busses() 0 |
| 19 | #define pcibios_scan_all_fns(a, b) 0 |
| 20 | |
| 21 | #define PCIBIOS_MIN_IO 0x1000 |
| 22 | #define PCIBIOS_MIN_MEM 0x10000000 |
| 23 | |
| 24 | void pcibios_config_init(void); |
| 25 | |
| 26 | struct pci_dev; |
| 27 | |
| 28 | /* |
| 29 | * PCI_DMA_BUS_IS_PHYS should be set to 1 if there is _necessarily_ a direct correspondence |
| 30 | * between device bus addresses and CPU physical addresses. Platforms with a hardware I/O |
| 31 | * MMU _must_ turn this off to suppress the bounce buffer handling code in the block and |
| 32 | * network device layers. Platforms with separate bus address spaces _must_ turn this off |
| 33 | * and provide a device DMA mapping implementation that takes care of the necessary |
| 34 | * address translation. |
| 35 | * |
| 36 | * For now, the ia64 platforms which may have separate/multiple bus address spaces all |
| 37 | * have I/O MMUs which support the merging of physically discontiguous buffers, so we can |
| 38 | * use that as the sole factor to determine the setting of PCI_DMA_BUS_IS_PHYS. |
| 39 | */ |
| 40 | extern unsigned long ia64_max_iommu_merge_mask; |
| 41 | #define PCI_DMA_BUS_IS_PHYS (ia64_max_iommu_merge_mask == ~0UL) |
| 42 | |
| 43 | static inline void |
| 44 | pcibios_set_master (struct pci_dev *dev) |
| 45 | { |
| 46 | /* No special bus mastering setup handling */ |
| 47 | } |
| 48 | |
| 49 | static inline void |
David Shaohua Li | c9c3e45 | 2005-04-01 00:07:31 -0500 | [diff] [blame] | 50 | pcibios_penalize_isa_irq (int irq, int active) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 | { |
| 52 | /* We don't do dynamic PCI IRQ allocation */ |
| 53 | } |
| 54 | |
| 55 | #define HAVE_ARCH_PCI_MWI 1 |
| 56 | extern int pcibios_prep_mwi (struct pci_dev *); |
| 57 | |
| 58 | #include <asm-generic/pci-dma-compat.h> |
| 59 | |
| 60 | /* pci_unmap_{single,page} is not a nop, thus... */ |
| 61 | #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ |
| 62 | dma_addr_t ADDR_NAME; |
| 63 | #define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \ |
| 64 | __u32 LEN_NAME; |
| 65 | #define pci_unmap_addr(PTR, ADDR_NAME) \ |
| 66 | ((PTR)->ADDR_NAME) |
| 67 | #define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \ |
| 68 | (((PTR)->ADDR_NAME) = (VAL)) |
| 69 | #define pci_unmap_len(PTR, LEN_NAME) \ |
| 70 | ((PTR)->LEN_NAME) |
| 71 | #define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ |
| 72 | (((PTR)->LEN_NAME) = (VAL)) |
| 73 | |
| 74 | /* The ia64 platform always supports 64-bit addressing. */ |
| 75 | #define pci_dac_dma_supported(pci_dev, mask) (1) |
| 76 | #define pci_dac_page_to_dma(dev,pg,off,dir) ((dma_addr_t) page_to_bus(pg) + (off)) |
| 77 | #define pci_dac_dma_to_page(dev,dma_addr) (virt_to_page(bus_to_virt(dma_addr))) |
| 78 | #define pci_dac_dma_to_offset(dev,dma_addr) offset_in_page(dma_addr) |
| 79 | #define pci_dac_dma_sync_single_for_cpu(dev,dma_addr,len,dir) do { } while (0) |
| 80 | #define pci_dac_dma_sync_single_for_device(dev,dma_addr,len,dir) do { mb(); } while (0) |
| 81 | |
| 82 | #define sg_dma_len(sg) ((sg)->dma_length) |
| 83 | #define sg_dma_address(sg) ((sg)->dma_address) |
| 84 | |
Andrew Morton | bb4a61b | 2005-06-06 23:07:46 -0700 | [diff] [blame] | 85 | #ifdef CONFIG_PCI |
David S. Miller | e24c2d9 | 2005-06-02 12:55:50 -0700 | [diff] [blame] | 86 | static inline void pci_dma_burst_advice(struct pci_dev *pdev, |
| 87 | enum pci_dma_burst_strategy *strat, |
| 88 | unsigned long *strategy_parameter) |
| 89 | { |
| 90 | unsigned long cacheline_size; |
| 91 | u8 byte; |
| 92 | |
| 93 | pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &byte); |
| 94 | if (byte == 0) |
| 95 | cacheline_size = 1024; |
| 96 | else |
| 97 | cacheline_size = (int) byte * 4; |
| 98 | |
| 99 | *strat = PCI_DMA_BURST_MULTIPLE; |
| 100 | *strategy_parameter = cacheline_size; |
| 101 | } |
Andrew Morton | bb4a61b | 2005-06-06 23:07:46 -0700 | [diff] [blame] | 102 | #endif |
David S. Miller | e24c2d9 | 2005-06-02 12:55:50 -0700 | [diff] [blame] | 103 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 104 | #define HAVE_PCI_MMAP |
| 105 | extern int pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma, |
| 106 | enum pci_mmap_state mmap_state, int write_combine); |
| 107 | #define HAVE_PCI_LEGACY |
| 108 | extern int pci_mmap_legacy_page_range(struct pci_bus *bus, |
| 109 | struct vm_area_struct *vma); |
| 110 | extern ssize_t pci_read_legacy_io(struct kobject *kobj, char *buf, loff_t off, |
| 111 | size_t count); |
| 112 | extern ssize_t pci_write_legacy_io(struct kobject *kobj, char *buf, loff_t off, |
| 113 | size_t count); |
| 114 | extern int pci_mmap_legacy_mem(struct kobject *kobj, |
| 115 | struct bin_attribute *attr, |
| 116 | struct vm_area_struct *vma); |
| 117 | |
| 118 | #define pci_get_legacy_mem platform_pci_get_legacy_mem |
| 119 | #define pci_legacy_read platform_pci_legacy_read |
| 120 | #define pci_legacy_write platform_pci_legacy_write |
| 121 | |
| 122 | struct pci_window { |
| 123 | struct resource resource; |
| 124 | u64 offset; |
| 125 | }; |
| 126 | |
| 127 | struct pci_controller { |
| 128 | void *acpi_handle; |
| 129 | void *iommu; |
| 130 | int segment; |
Christoph Lameter | 514604c | 2005-07-07 16:59:00 -0700 | [diff] [blame] | 131 | int node; /* nearest node with memory or -1 for global allocation */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 132 | |
| 133 | unsigned int windows; |
| 134 | struct pci_window *window; |
| 135 | |
| 136 | void *platform_data; |
| 137 | }; |
| 138 | |
| 139 | #define PCI_CONTROLLER(busdev) ((struct pci_controller *) busdev->sysdata) |
| 140 | #define pci_domain_nr(busdev) (PCI_CONTROLLER(busdev)->segment) |
| 141 | |
| 142 | extern struct pci_ops pci_root_ops; |
| 143 | |
| 144 | static inline int pci_proc_domain(struct pci_bus *bus) |
| 145 | { |
| 146 | return (pci_domain_nr(bus) != 0); |
| 147 | } |
| 148 | |
| 149 | static inline void pcibios_add_platform_entries(struct pci_dev *dev) |
| 150 | { |
| 151 | } |
| 152 | |
| 153 | extern void pcibios_resource_to_bus(struct pci_dev *dev, |
| 154 | struct pci_bus_region *region, struct resource *res); |
| 155 | |
| 156 | extern void pcibios_bus_to_resource(struct pci_dev *dev, |
| 157 | struct resource *res, struct pci_bus_region *region); |
| 158 | |
David S. Miller | 085ae41 | 2005-08-08 13:19:08 -0700 | [diff] [blame] | 159 | static inline struct resource * |
| 160 | pcibios_select_root(struct pci_dev *pdev, struct resource *res) |
| 161 | { |
| 162 | struct resource *root = NULL; |
| 163 | |
| 164 | if (res->flags & IORESOURCE_IO) |
| 165 | root = &ioport_resource; |
| 166 | if (res->flags & IORESOURCE_MEM) |
| 167 | root = &iomem_resource; |
| 168 | |
| 169 | return root; |
| 170 | } |
| 171 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 172 | #define pcibios_scan_all_fns(a, b) 0 |
| 173 | |
| 174 | #endif /* _ASM_IA64_PCI_H */ |