Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _ASM_IA64_PCI_H |
| 2 | #define _ASM_IA64_PCI_H |
| 3 | |
| 4 | #include <linux/mm.h> |
| 5 | #include <linux/slab.h> |
| 6 | #include <linux/spinlock.h> |
| 7 | #include <linux/string.h> |
| 8 | #include <linux/types.h> |
| 9 | |
| 10 | #include <asm/io.h> |
| 11 | #include <asm/scatterlist.h> |
| 12 | |
| 13 | /* |
| 14 | * Can be used to override the logic in pci_scan_bus for skipping already-configured bus |
| 15 | * numbers - to be used for buggy BIOSes or architectures with incomplete PCI setup by the |
| 16 | * loader. |
| 17 | */ |
| 18 | #define pcibios_assign_all_busses() 0 |
| 19 | #define pcibios_scan_all_fns(a, b) 0 |
| 20 | |
| 21 | #define PCIBIOS_MIN_IO 0x1000 |
| 22 | #define PCIBIOS_MIN_MEM 0x10000000 |
| 23 | |
| 24 | void pcibios_config_init(void); |
| 25 | |
| 26 | struct pci_dev; |
| 27 | |
| 28 | /* |
| 29 | * PCI_DMA_BUS_IS_PHYS should be set to 1 if there is _necessarily_ a direct correspondence |
| 30 | * between device bus addresses and CPU physical addresses. Platforms with a hardware I/O |
| 31 | * MMU _must_ turn this off to suppress the bounce buffer handling code in the block and |
| 32 | * network device layers. Platforms with separate bus address spaces _must_ turn this off |
| 33 | * and provide a device DMA mapping implementation that takes care of the necessary |
| 34 | * address translation. |
| 35 | * |
| 36 | * For now, the ia64 platforms which may have separate/multiple bus address spaces all |
| 37 | * have I/O MMUs which support the merging of physically discontiguous buffers, so we can |
| 38 | * use that as the sole factor to determine the setting of PCI_DMA_BUS_IS_PHYS. |
| 39 | */ |
| 40 | extern unsigned long ia64_max_iommu_merge_mask; |
| 41 | #define PCI_DMA_BUS_IS_PHYS (ia64_max_iommu_merge_mask == ~0UL) |
| 42 | |
| 43 | static inline void |
| 44 | pcibios_set_master (struct pci_dev *dev) |
| 45 | { |
| 46 | /* No special bus mastering setup handling */ |
| 47 | } |
| 48 | |
| 49 | static inline void |
David Shaohua Li | c9c3e45 | 2005-04-01 00:07:31 -0500 | [diff] [blame^] | 50 | pcibios_penalize_isa_irq (int irq, int active) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 | { |
| 52 | /* We don't do dynamic PCI IRQ allocation */ |
| 53 | } |
| 54 | |
| 55 | #define HAVE_ARCH_PCI_MWI 1 |
| 56 | extern int pcibios_prep_mwi (struct pci_dev *); |
| 57 | |
| 58 | #include <asm-generic/pci-dma-compat.h> |
| 59 | |
| 60 | /* pci_unmap_{single,page} is not a nop, thus... */ |
| 61 | #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ |
| 62 | dma_addr_t ADDR_NAME; |
| 63 | #define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \ |
| 64 | __u32 LEN_NAME; |
| 65 | #define pci_unmap_addr(PTR, ADDR_NAME) \ |
| 66 | ((PTR)->ADDR_NAME) |
| 67 | #define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \ |
| 68 | (((PTR)->ADDR_NAME) = (VAL)) |
| 69 | #define pci_unmap_len(PTR, LEN_NAME) \ |
| 70 | ((PTR)->LEN_NAME) |
| 71 | #define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ |
| 72 | (((PTR)->LEN_NAME) = (VAL)) |
| 73 | |
| 74 | /* The ia64 platform always supports 64-bit addressing. */ |
| 75 | #define pci_dac_dma_supported(pci_dev, mask) (1) |
| 76 | #define pci_dac_page_to_dma(dev,pg,off,dir) ((dma_addr_t) page_to_bus(pg) + (off)) |
| 77 | #define pci_dac_dma_to_page(dev,dma_addr) (virt_to_page(bus_to_virt(dma_addr))) |
| 78 | #define pci_dac_dma_to_offset(dev,dma_addr) offset_in_page(dma_addr) |
| 79 | #define pci_dac_dma_sync_single_for_cpu(dev,dma_addr,len,dir) do { } while (0) |
| 80 | #define pci_dac_dma_sync_single_for_device(dev,dma_addr,len,dir) do { mb(); } while (0) |
| 81 | |
| 82 | #define sg_dma_len(sg) ((sg)->dma_length) |
| 83 | #define sg_dma_address(sg) ((sg)->dma_address) |
| 84 | |
| 85 | #define HAVE_PCI_MMAP |
| 86 | extern int pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma, |
| 87 | enum pci_mmap_state mmap_state, int write_combine); |
| 88 | #define HAVE_PCI_LEGACY |
| 89 | extern int pci_mmap_legacy_page_range(struct pci_bus *bus, |
| 90 | struct vm_area_struct *vma); |
| 91 | extern ssize_t pci_read_legacy_io(struct kobject *kobj, char *buf, loff_t off, |
| 92 | size_t count); |
| 93 | extern ssize_t pci_write_legacy_io(struct kobject *kobj, char *buf, loff_t off, |
| 94 | size_t count); |
| 95 | extern int pci_mmap_legacy_mem(struct kobject *kobj, |
| 96 | struct bin_attribute *attr, |
| 97 | struct vm_area_struct *vma); |
| 98 | |
| 99 | #define pci_get_legacy_mem platform_pci_get_legacy_mem |
| 100 | #define pci_legacy_read platform_pci_legacy_read |
| 101 | #define pci_legacy_write platform_pci_legacy_write |
| 102 | |
| 103 | struct pci_window { |
| 104 | struct resource resource; |
| 105 | u64 offset; |
| 106 | }; |
| 107 | |
| 108 | struct pci_controller { |
| 109 | void *acpi_handle; |
| 110 | void *iommu; |
| 111 | int segment; |
| 112 | |
| 113 | unsigned int windows; |
| 114 | struct pci_window *window; |
| 115 | |
| 116 | void *platform_data; |
| 117 | }; |
| 118 | |
| 119 | #define PCI_CONTROLLER(busdev) ((struct pci_controller *) busdev->sysdata) |
| 120 | #define pci_domain_nr(busdev) (PCI_CONTROLLER(busdev)->segment) |
| 121 | |
| 122 | extern struct pci_ops pci_root_ops; |
| 123 | |
| 124 | static inline int pci_proc_domain(struct pci_bus *bus) |
| 125 | { |
| 126 | return (pci_domain_nr(bus) != 0); |
| 127 | } |
| 128 | |
| 129 | static inline void pcibios_add_platform_entries(struct pci_dev *dev) |
| 130 | { |
| 131 | } |
| 132 | |
| 133 | extern void pcibios_resource_to_bus(struct pci_dev *dev, |
| 134 | struct pci_bus_region *region, struct resource *res); |
| 135 | |
| 136 | extern void pcibios_bus_to_resource(struct pci_dev *dev, |
| 137 | struct resource *res, struct pci_bus_region *region); |
| 138 | |
| 139 | #define pcibios_scan_all_fns(a, b) 0 |
| 140 | |
| 141 | #endif /* _ASM_IA64_PCI_H */ |