Linux-2.6.12-rc2

Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.

Let it rip!
diff --git a/arch/ia64/hp/common/hwsw_iommu.c b/arch/ia64/hp/common/hwsw_iommu.c
new file mode 100644
index 0000000..80f8ef0
--- /dev/null
+++ b/arch/ia64/hp/common/hwsw_iommu.c
@@ -0,0 +1,185 @@
+/*
+ * Copyright (c) 2004 Hewlett-Packard Development Company, L.P.
+ *   Contributed by David Mosberger-Tang <davidm@hpl.hp.com>
+ *
+ * This is a pseudo I/O MMU which dispatches to the hardware I/O MMU
+ * whenever possible.  We assume that the hardware I/O MMU requires
+ * full 32-bit addressability, as is the case, e.g., for HP zx1-based
+ * systems (there, the I/O MMU window is mapped at 3-4GB).  If a
+ * device doesn't provide full 32-bit addressability, we fall back on
+ * the sw I/O TLB.  This is good enough to let us support broken
+ * hardware such as soundcards which have a DMA engine that can
+ * address only 28 bits.
+ */
+
+#include <linux/device.h>
+
+#include <asm/machvec.h>
+
+/* swiotlb declarations & definitions: */
+extern void swiotlb_init_with_default_size (size_t size);
+extern ia64_mv_dma_alloc_coherent	swiotlb_alloc_coherent;
+extern ia64_mv_dma_free_coherent	swiotlb_free_coherent;
+extern ia64_mv_dma_map_single		swiotlb_map_single;
+extern ia64_mv_dma_unmap_single		swiotlb_unmap_single;
+extern ia64_mv_dma_map_sg		swiotlb_map_sg;
+extern ia64_mv_dma_unmap_sg		swiotlb_unmap_sg;
+extern ia64_mv_dma_supported		swiotlb_dma_supported;
+extern ia64_mv_dma_mapping_error	swiotlb_dma_mapping_error;
+
+/* hwiommu declarations & definitions: */
+
+extern ia64_mv_dma_alloc_coherent	sba_alloc_coherent;
+extern ia64_mv_dma_free_coherent	sba_free_coherent;
+extern ia64_mv_dma_map_single		sba_map_single;
+extern ia64_mv_dma_unmap_single		sba_unmap_single;
+extern ia64_mv_dma_map_sg		sba_map_sg;
+extern ia64_mv_dma_unmap_sg		sba_unmap_sg;
+extern ia64_mv_dma_supported		sba_dma_supported;
+extern ia64_mv_dma_mapping_error	sba_dma_mapping_error;
+
+#define hwiommu_alloc_coherent		sba_alloc_coherent
+#define hwiommu_free_coherent		sba_free_coherent
+#define hwiommu_map_single		sba_map_single
+#define hwiommu_unmap_single		sba_unmap_single
+#define hwiommu_map_sg			sba_map_sg
+#define hwiommu_unmap_sg		sba_unmap_sg
+#define hwiommu_dma_supported		sba_dma_supported
+#define hwiommu_dma_mapping_error	sba_dma_mapping_error
+#define hwiommu_sync_single_for_cpu	machvec_dma_sync_single
+#define hwiommu_sync_sg_for_cpu		machvec_dma_sync_sg
+#define hwiommu_sync_single_for_device	machvec_dma_sync_single
+#define hwiommu_sync_sg_for_device	machvec_dma_sync_sg
+
+
+/*
+ * Note: we need to make the determination of whether or not to use
+ * the sw I/O TLB based purely on the device structure.  Anything else
+ * would be unreliable or would be too intrusive.
+ */
+static inline int
+use_swiotlb (struct device *dev)
+{
+	return dev && dev->dma_mask && !hwiommu_dma_supported(dev, *dev->dma_mask);
+}
+
+void
+hwsw_init (void)
+{
+	/* default to a smallish 2MB sw I/O TLB */
+	swiotlb_init_with_default_size (2 * (1<<20));
+}
+
+void *
+hwsw_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, int flags)
+{
+	if (use_swiotlb(dev))
+		return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
+	else
+		return hwiommu_alloc_coherent(dev, size, dma_handle, flags);
+}
+
+void
+hwsw_free_coherent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle)
+{
+	if (use_swiotlb(dev))
+		swiotlb_free_coherent(dev, size, vaddr, dma_handle);
+	else
+		hwiommu_free_coherent(dev, size, vaddr, dma_handle);
+}
+
+dma_addr_t
+hwsw_map_single (struct device *dev, void *addr, size_t size, int dir)
+{
+	if (use_swiotlb(dev))
+		return swiotlb_map_single(dev, addr, size, dir);
+	else
+		return hwiommu_map_single(dev, addr, size, dir);
+}
+
+void
+hwsw_unmap_single (struct device *dev, dma_addr_t iova, size_t size, int dir)
+{
+	if (use_swiotlb(dev))
+		return swiotlb_unmap_single(dev, iova, size, dir);
+	else
+		return hwiommu_unmap_single(dev, iova, size, dir);
+}
+
+
+int
+hwsw_map_sg (struct device *dev, struct scatterlist *sglist, int nents, int dir)
+{
+	if (use_swiotlb(dev))
+		return swiotlb_map_sg(dev, sglist, nents, dir);
+	else
+		return hwiommu_map_sg(dev, sglist, nents, dir);
+}
+
+void
+hwsw_unmap_sg (struct device *dev, struct scatterlist *sglist, int nents, int dir)
+{
+	if (use_swiotlb(dev))
+		return swiotlb_unmap_sg(dev, sglist, nents, dir);
+	else
+		return hwiommu_unmap_sg(dev, sglist, nents, dir);
+}
+
+void
+hwsw_sync_single_for_cpu (struct device *dev, dma_addr_t addr, size_t size, int dir)
+{
+	if (use_swiotlb(dev))
+		swiotlb_sync_single_for_cpu(dev, addr, size, dir);
+	else
+		hwiommu_sync_single_for_cpu(dev, addr, size, dir);
+}
+
+void
+hwsw_sync_sg_for_cpu (struct device *dev, struct scatterlist *sg, int nelems, int dir)
+{
+	if (use_swiotlb(dev))
+		swiotlb_sync_sg_for_cpu(dev, sg, nelems, dir);
+	else
+		hwiommu_sync_sg_for_cpu(dev, sg, nelems, dir);
+}
+
+void
+hwsw_sync_single_for_device (struct device *dev, dma_addr_t addr, size_t size, int dir)
+{
+	if (use_swiotlb(dev))
+		swiotlb_sync_single_for_device(dev, addr, size, dir);
+	else
+		hwiommu_sync_single_for_device(dev, addr, size, dir);
+}
+
+void
+hwsw_sync_sg_for_device (struct device *dev, struct scatterlist *sg, int nelems, int dir)
+{
+	if (use_swiotlb(dev))
+		swiotlb_sync_sg_for_device(dev, sg, nelems, dir);
+	else
+		hwiommu_sync_sg_for_device(dev, sg, nelems, dir);
+}
+
+int
+hwsw_dma_supported (struct device *dev, u64 mask)
+{
+	if (hwiommu_dma_supported(dev, mask))
+		return 1;
+	return swiotlb_dma_supported(dev, mask);
+}
+
+int
+hwsw_dma_mapping_error (dma_addr_t dma_addr)
+{
+	return hwiommu_dma_mapping_error (dma_addr) || swiotlb_dma_mapping_error(dma_addr);
+}
+
+EXPORT_SYMBOL(hwsw_dma_mapping_error);
+EXPORT_SYMBOL(hwsw_map_single);
+EXPORT_SYMBOL(hwsw_unmap_single);
+EXPORT_SYMBOL(hwsw_map_sg);
+EXPORT_SYMBOL(hwsw_unmap_sg);
+EXPORT_SYMBOL(hwsw_dma_supported);
+EXPORT_SYMBOL(hwsw_alloc_coherent);
+EXPORT_SYMBOL(hwsw_free_coherent);