diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-07-23 19:10:54 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-07-23 19:10:54 -0700 |
commit | f0a08fcb5972167e55faa330c4a24fbaa3328b1f (patch) | |
tree | e24c42230888bd0e6422b2f81d7991da4373bb5d /arch/tile/kernel | |
parent | 474183b188b3c5af45831c71151f819fc70479b8 (diff) | |
parent | f6d2ce00da145ae31ec22d21daca6ca5e22b3c84 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile
Pull arch/tile updates from Chris Metcalf:
"These changes provide support for PCIe root complex and USB host mode
for tilegx's on-chip I/Os.
In addition, this pull provides the required underpinning for the
on-chip networking support that was pulled into 3.5. The changes have
all been through LKML (with several rounds for PCIe RC) and on
linux-next."
* git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile:
tile: updates to pci root complex from community feedback
bounce: allow use of bounce pool via config option
usb: add host support for the tilegx architecture
arch/tile: provide kernel support for the tilegx USB shim
tile pci: enable IOMMU to support DMA for legacy devices
arch/tile: enable ZONE_DMA for tilegx
tilegx pci: support I/O to arbitrarily-cached pages
tile: remove unused header
arch/tile: tilegx PCI root complex support
arch/tile: provide kernel support for the tilegx TRIO shim
arch/tile: break out the "csum a long" function to <asm/checksum.h>
arch/tile: provide kernel support for the tilegx mPIPE shim
arch/tile: common DMA code for the GXIO IORPC subsystem
arch/tile: support MMIO-based readb/writeb etc.
arch/tile: introduce GXIO IORPC framework for tilegx
Diffstat (limited to 'arch/tile/kernel')
-rw-r--r-- | arch/tile/kernel/Makefile | 5 | ||||
-rw-r--r-- | arch/tile/kernel/pci-dma.c | 536 | ||||
-rw-r--r-- | arch/tile/kernel/pci_gx.c | 1543 | ||||
-rw-r--r-- | arch/tile/kernel/setup.c | 45 | ||||
-rw-r--r-- | arch/tile/kernel/usb.c | 69 |
5 files changed, 2080 insertions, 118 deletions
diff --git a/arch/tile/kernel/Makefile b/arch/tile/kernel/Makefile index 5de99248d8df..5334be8e2538 100644 --- a/arch/tile/kernel/Makefile +++ b/arch/tile/kernel/Makefile @@ -14,4 +14,9 @@ obj-$(CONFIG_SMP) += smpboot.o smp.o tlb.o obj-$(CONFIG_MODULES) += module.o obj-$(CONFIG_EARLY_PRINTK) += early_printk.o obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel_$(BITS).o +ifdef CONFIG_TILEGX +obj-$(CONFIG_PCI) += pci_gx.o +else obj-$(CONFIG_PCI) += pci.o +endif +obj-$(CONFIG_TILE_USB) += usb.o diff --git a/arch/tile/kernel/pci-dma.c b/arch/tile/kernel/pci-dma.c index b3ed19f8779c..b9fe80ec1089 100644 --- a/arch/tile/kernel/pci-dma.c +++ b/arch/tile/kernel/pci-dma.c @@ -14,6 +14,7 @@ #include <linux/mm.h> #include <linux/dma-mapping.h> +#include <linux/swiotlb.h> #include <linux/vmalloc.h> #include <linux/export.h> #include <asm/tlbflush.h> @@ -22,13 +23,18 @@ /* Generic DMA mapping functions: */ /* - * Allocate what Linux calls "coherent" memory, which for us just - * means uncached. + * Allocate what Linux calls "coherent" memory. On TILEPro this is + * uncached memory; on TILE-Gx it is hash-for-home memory. */ -void *dma_alloc_coherent(struct device *dev, - size_t size, - dma_addr_t *dma_handle, - gfp_t gfp) +#ifdef __tilepro__ +#define PAGE_HOME_DMA PAGE_HOME_UNCACHED +#else +#define PAGE_HOME_DMA PAGE_HOME_HASH +#endif + +static void *tile_dma_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp, + struct dma_attrs *attrs) { u64 dma_mask = dev->coherent_dma_mask ?: DMA_BIT_MASK(32); int node = dev_to_node(dev); @@ -39,39 +45,42 @@ void *dma_alloc_coherent(struct device *dev, gfp |= __GFP_ZERO; /* - * By forcing NUMA node 0 for 32-bit masks we ensure that the - * high 32 bits of the resulting PA will be zero. If the mask - * size is, e.g., 24, we may still not be able to guarantee a - * suitable memory address, in which case we will return NULL. - * But such devices are uncommon. + * If the mask specifies that the memory be in the first 4 GB, then + * we force the allocation to come from the DMA zone. We also + * force the node to 0 since that's the only node where the DMA + * zone isn't empty. If the mask size is smaller than 32 bits, we + * may still not be able to guarantee a suitable memory address, in + * which case we will return NULL. But such devices are uncommon. */ - if (dma_mask <= DMA_BIT_MASK(32)) + if (dma_mask <= DMA_BIT_MASK(32)) { + gfp |= GFP_DMA; node = 0; + } - pg = homecache_alloc_pages_node(node, gfp, order, PAGE_HOME_UNCACHED); + pg = homecache_alloc_pages_node(node, gfp, order, PAGE_HOME_DMA); if (pg == NULL) return NULL; addr = page_to_phys(pg); if (addr + size > dma_mask) { - homecache_free_pages(addr, order); + __homecache_free_pages(pg, order); return NULL; } *dma_handle = addr; + return page_address(pg); } -EXPORT_SYMBOL(dma_alloc_coherent); /* - * Free memory that was allocated with dma_alloc_coherent. + * Free memory that was allocated with tile_dma_alloc_coherent. */ -void dma_free_coherent(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle) +static void tile_dma_free_coherent(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_handle, + struct dma_attrs *attrs) { homecache_free_pages((unsigned long)vaddr, get_order(size)); } -EXPORT_SYMBOL(dma_free_coherent); /* * The map routines "map" the specified address range for DMA @@ -87,52 +96,285 @@ EXPORT_SYMBOL(dma_free_coherent); * can count on nothing having been touched. */ -/* Flush a PA range from cache page by page. */ -static void __dma_map_pa_range(dma_addr_t dma_addr, size_t size) +/* Set up a single page for DMA access. */ +static void __dma_prep_page(struct page *page, unsigned long offset, + size_t size, enum dma_data_direction direction) +{ + /* + * Flush the page from cache if necessary. + * On tilegx, data is delivered to hash-for-home L3; on tilepro, + * data is delivered direct to memory. + * + * NOTE: If we were just doing DMA_TO_DEVICE we could optimize + * this to be a "flush" not a "finv" and keep some of the + * state in cache across the DMA operation, but it doesn't seem + * worth creating the necessary flush_buffer_xxx() infrastructure. + */ + int home = page_home(page); + switch (home) { + case PAGE_HOME_HASH: +#ifdef __tilegx__ + return; +#endif + break; + case PAGE_HOME_UNCACHED: +#ifdef __tilepro__ + return; +#endif + break; + case PAGE_HOME_IMMUTABLE: + /* Should be going to the device only. */ + BUG_ON(direction == DMA_FROM_DEVICE || + direction == DMA_BIDIRECTIONAL); + return; + case PAGE_HOME_INCOHERENT: + /* Incoherent anyway, so no need to work hard here. */ + return; + default: + BUG_ON(home < 0 || home >= NR_CPUS); + break; + } + homecache_finv_page(page); + +#ifdef DEBUG_ALIGNMENT + /* Warn if the region isn't cacheline aligned. */ + if (offset & (L2_CACHE_BYTES - 1) || (size & (L2_CACHE_BYTES - 1))) + pr_warn("Unaligned DMA to non-hfh memory: PA %#llx/%#lx\n", + PFN_PHYS(page_to_pfn(page)) + offset, size); +#endif +} + +/* Make the page ready to be read by the core. */ +static void __dma_complete_page(struct page *page, unsigned long offset, + size_t size, enum dma_data_direction direction) +{ +#ifdef __tilegx__ + switch (page_home(page)) { + case PAGE_HOME_HASH: + /* I/O device delivered data the way the cpu wanted it. */ + break; + case PAGE_HOME_INCOHERENT: + /* Incoherent anyway, so no need to work hard here. */ + break; + case PAGE_HOME_IMMUTABLE: + /* Extra read-only copies are not a problem. */ + break; + default: + /* Flush the bogus hash-for-home I/O entries to memory. */ + homecache_finv_map_page(page, PAGE_HOME_HASH); + break; + } +#endif +} + +static void __dma_prep_pa_range(dma_addr_t dma_addr, size_t size, + enum dma_data_direction direction) { struct page *page = pfn_to_page(PFN_DOWN(dma_addr)); - size_t bytesleft = PAGE_SIZE - (dma_addr & (PAGE_SIZE - 1)); + unsigned long offset = dma_addr & (PAGE_SIZE - 1); + size_t bytes = min(size, (size_t)(PAGE_SIZE - offset)); + + while (size != 0) { + __dma_prep_page(page, offset, bytes, direction); + size -= bytes; + ++page; + offset = 0; + bytes = min((size_t)PAGE_SIZE, size); + } +} - while ((ssize_t)size > 0) { - /* Flush the page. */ - homecache_flush_cache(page++, 0); +static void __dma_complete_pa_range(dma_addr_t dma_addr, size_t size, + enum dma_data_direction direction) +{ + struct page *page = pfn_to_page(PFN_DOWN(dma_addr)); + unsigned long offset = dma_addr & (PAGE_SIZE - 1); + size_t bytes = min(size, (size_t)(PAGE_SIZE - offset)); + + while (size != 0) { + __dma_complete_page(page, offset, bytes, direction); + size -= bytes; + ++page; + offset = 0; + bytes = min((size_t)PAGE_SIZE, size); + } +} + +static int tile_dma_map_sg(struct device *dev, struct scatterlist *sglist, + int nents, enum dma_data_direction direction, + struct dma_attrs *attrs) +{ + struct scatterlist *sg; + int i; + + BUG_ON(!valid_dma_direction(direction)); + + WARN_ON(nents == 0 || sglist->length == 0); - /* Figure out if we need to continue on the next page. */ - size -= bytesleft; - bytesleft = PAGE_SIZE; + for_each_sg(sglist, sg, nents, i) { + sg->dma_address = sg_phys(sg); + __dma_prep_pa_range(sg->dma_address, sg->length, direction); +#ifdef CONFIG_NEED_SG_DMA_LENGTH + sg->dma_length = sg->length; +#endif } + + return nents; } -/* - * dma_map_single can be passed any memory address, and there appear - * to be no alignment constraints. - * - * There is a chance that the start of the buffer will share a cache - * line with some other data that has been touched in the meantime. - */ -dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, - enum dma_data_direction direction) +static void tile_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, + int nents, enum dma_data_direction direction, + struct dma_attrs *attrs) +{ + struct scatterlist *sg; + int i; + + BUG_ON(!valid_dma_direction(direction)); + for_each_sg(sglist, sg, nents, i) { + sg->dma_address = sg_phys(sg); + __dma_complete_pa_range(sg->dma_address, sg->length, + direction); + } +} + +static dma_addr_t tile_dma_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction direction, + struct dma_attrs *attrs) { - dma_addr_t dma_addr = __pa(ptr); + BUG_ON(!valid_dma_direction(direction)); + + BUG_ON(offset + size > PAGE_SIZE); + __dma_prep_page(page, offset, size, direction); + return page_to_pa(page) + offset; +} + +static void tile_dma_unmap_page(struct device *dev, dma_addr_t dma_address, + size_t size, enum dma_data_direction direction, + struct dma_attrs *attrs) +{ BUG_ON(!valid_dma_direction(direction)); - WARN_ON(size == 0); - __dma_map_pa_range(dma_addr, size); + __dma_complete_page(pfn_to_page(PFN_DOWN(dma_address)), + dma_address & PAGE_OFFSET, size, direction); +} - return dma_addr; +static void tile_dma_sync_single_for_cpu(struct device *dev, + dma_addr_t dma_handle, + size_t size, + enum dma_data_direction direction) +{ + BUG_ON(!valid_dma_direction(direction)); + + __dma_complete_pa_range(dma_handle, size, direction); +} + +static void tile_dma_sync_single_for_device(struct device *dev, + dma_addr_t dma_handle, size_t size, + enum dma_data_direction direction) +{ + __dma_prep_pa_range(dma_handle, size, direction); } -EXPORT_SYMBOL(dma_map_single); -void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, - enum dma_data_direction direction) +static void tile_dma_sync_sg_for_cpu(struct device *dev, + struct scatterlist *sglist, int nelems, + enum dma_data_direction direction) { + struct scatterlist *sg; + int i; + + BUG_ON(!valid_dma_direction(direction)); + WARN_ON(nelems == 0 || sglist->length == 0); + + for_each_sg(sglist, sg, nelems, i) { + dma_sync_single_for_cpu(dev, sg->dma_address, + sg_dma_len(sg), direction); + } +} + +static void tile_dma_sync_sg_for_device(struct device *dev, + struct scatterlist *sglist, int nelems, + enum dma_data_direction direction) +{ + struct scatterlist *sg; + int i; + BUG_ON(!valid_dma_direction(direction)); + WARN_ON(nelems == 0 || sglist->length == 0); + + for_each_sg(sglist, sg, nelems, i) { + dma_sync_single_for_device(dev, sg->dma_address, + sg_dma_len(sg), direction); + } +} + +static inline int +tile_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) +{ + return 0; +} + +static inline int +tile_dma_supported(struct device *dev, u64 mask) +{ + return 1; +} + +static struct dma_map_ops tile_default_dma_map_ops = { + .alloc = tile_dma_alloc_coherent, + .free = tile_dma_free_coherent, + .map_page = tile_dma_map_page, + .unmap_page = tile_dma_unmap_page, + .map_sg = tile_dma_map_sg, + .unmap_sg = tile_dma_unmap_sg, + .sync_single_for_cpu = tile_dma_sync_single_for_cpu, + .sync_single_for_device = tile_dma_sync_single_for_device, + .sync_sg_for_cpu = tile_dma_sync_sg_for_cpu, + .sync_sg_for_device = tile_dma_sync_sg_for_device, + .mapping_error = tile_dma_mapping_error, + .dma_supported = tile_dma_supported +}; + +struct dma_map_ops *tile_dma_map_ops = &tile_default_dma_map_ops; +EXPORT_SYMBOL(tile_dma_map_ops); + +/* Generic PCI DMA mapping functions */ + +static void *tile_pci_dma_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp, + struct dma_attrs *attrs) +{ + int node = dev_to_node(dev); + int order = get_order(size); + struct page *pg; + dma_addr_t addr; + + gfp |= __GFP_ZERO; + + pg = homecache_alloc_pages_node(node, gfp, order, PAGE_HOME_DMA); + if (pg == NULL) + return NULL; + + addr = page_to_phys(pg); + + *dma_handle = phys_to_dma(dev, addr); + + return page_address(pg); +} + +/* + * Free memory that was allocated with tile_pci_dma_alloc_coherent. + */ +static void tile_pci_dma_free_coherent(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_handle, + struct dma_attrs *attrs) +{ + homecache_free_pages((unsigned long)vaddr, get_order(size)); } -EXPORT_SYMBOL(dma_unmap_single); -int dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, - enum dma_data_direction direction) +static int tile_pci_dma_map_sg(struct device *dev, struct scatterlist *sglist, + int nents, enum dma_data_direction direction, + struct dma_attrs *attrs) { struct scatterlist *sg; int i; @@ -143,73 +385,103 @@ int dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, for_each_sg(sglist, sg, nents, i) { sg->dma_address = sg_phys(sg); - __dma_map_pa_range(sg->dma_address, sg->length); + __dma_prep_pa_range(sg->dma_address, sg->length, direction); + + sg->dma_address = phys_to_dma(dev, sg->dma_address); +#ifdef CONFIG_NEED_SG_DMA_LENGTH + sg->dma_length = sg->length; +#endif } return nents; } -EXPORT_SYMBOL(dma_map_sg); -void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, - enum dma_data_direction direction) +static void tile_pci_dma_unmap_sg(struct device *dev, + struct scatterlist *sglist, int nents, + enum dma_data_direction direction, + struct dma_attrs *attrs) { + struct scatterlist *sg; + int i; + BUG_ON(!valid_dma_direction(direction)); + for_each_sg(sglist, sg, nents, i) { + sg->dma_address = sg_phys(sg); + __dma_complete_pa_range(sg->dma_address, sg->length, + direction); + } } -EXPORT_SYMBOL(dma_unmap_sg); -dma_addr_t dma_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction direction) +static dma_addr_t tile_pci_dma_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction direction, + struct dma_attrs *attrs) { BUG_ON(!valid_dma_direction(direction)); BUG_ON(offset + size > PAGE_SIZE); - homecache_flush_cache(page, 0); + __dma_prep_page(page, offset, size, direction); - return page_to_pa(page) + offset; + return phys_to_dma(dev, page_to_pa(page) + offset); } -EXPORT_SYMBOL(dma_map_page); -void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, - enum dma_data_direction direction) +static void tile_pci_dma_unmap_page(struct device *dev, dma_addr_t dma_address, + size_t size, + enum dma_data_direction direction, + struct dma_attrs *attrs) { BUG_ON(!valid_dma_direction(direction)); + + dma_address = dma_to_phys(dev, dma_address); + + __dma_complete_page(pfn_to_page(PFN_DOWN(dma_address)), + dma_address & PAGE_OFFSET, size, direction); } -EXPORT_SYMBOL(dma_unmap_page); -void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, - size_t size, enum dma_data_direction direction) +static void tile_pci_dma_sync_single_for_cpu(struct device *dev, + dma_addr_t dma_handle, + size_t size, + enum dma_data_direction direction) { BUG_ON(!valid_dma_direction(direction)); + + dma_handle = dma_to_phys(dev, dma_handle); + + __dma_complete_pa_range(dma_handle, size, direction); } -EXPORT_SYMBOL(dma_sync_single_for_cpu); -void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, - size_t size, enum dma_data_direction direction) +static void tile_pci_dma_sync_single_for_device(struct device *dev, + dma_addr_t dma_handle, + size_t size, + enum dma_data_direction + direction) { - unsigned long start = PFN_DOWN(dma_handle); - unsigned long end = PFN_DOWN(dma_handle + size - 1); - unsigned long i; + dma_handle = dma_to_phys(dev, dma_handle); - BUG_ON(!valid_dma_direction(direction)); - for (i = start; i <= end; ++i) - homecache_flush_cache(pfn_to_page(i), 0); + __dma_prep_pa_range(dma_handle, size, direction); } -EXPORT_SYMBOL(dma_sync_single_for_device); -void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, - enum dma_data_direction direction) +static void tile_pci_dma_sync_sg_for_cpu(struct device *dev, + struct scatterlist *sglist, + int nelems, + enum dma_data_direction direction) { + struct scatterlist *sg; + int i; + BUG_ON(!valid_dma_direction(direction)); - WARN_ON(nelems == 0 || sg[0].length == 0); + WARN_ON(nelems == 0 || sglist->length == 0); + + for_each_sg(sglist, sg, nelems, i) { + dma_sync_single_for_cpu(dev, sg->dma_address, + sg_dma_len(sg), direction); + } } -EXPORT_SYMBOL(dma_sync_sg_for_cpu); -/* - * Flush and invalidate cache for scatterlist. - */ -void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist, - int nelems, enum dma_data_direction direction) +static void tile_pci_dma_sync_sg_for_device(struct device *dev, + struct scatterlist *sglist, + int nelems, + enum dma_data_direction direction) { struct scatterlist *sg; int i; @@ -222,31 +494,93 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist, sg_dma_len(sg), direction); } } -EXPORT_SYMBOL(dma_sync_sg_for_device); -void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, - unsigned long offset, size_t size, - enum dma_data_direction direction) +static inline int +tile_pci_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { - dma_sync_single_for_cpu(dev, dma_handle + offset, size, direction); + return 0; } -EXPORT_SYMBOL(dma_sync_single_range_for_cpu); -void dma_sync_single_range_for_device(struct device *dev, - dma_addr_t dma_handle, - unsigned long offset, size_t size, - enum dma_data_direction direction) +static inline int +tile_pci_dma_supported(struct device *dev, u64 mask) { - dma_sync_single_for_device(dev, dma_handle + offset, size, direction); + return 1; } -EXPORT_SYMBOL(dma_sync_single_range_for_device); -/* - * dma_alloc_noncoherent() returns non-cacheable memory, so there's no - * need to do any flushing here. - */ -void dma_cache_sync(struct device *dev, void *vaddr, size_t size, - enum dma_data_direction direction) +static struct dma_map_ops tile_pci_default_dma_map_ops = { + .alloc = tile_pci_dma_alloc_coherent, + .free = tile_pci_dma_free_coherent, + .map_page = tile_pci_dma_map_page, + .unmap_page = tile_pci_dma_unmap_page, + .map_sg = tile_pci_dma_map_sg, + .unmap_sg = tile_pci_dma_unmap_sg, + .sync_single_for_cpu = tile_pci_dma_sync_single_for_cpu, + .sync_single_for_device = tile_pci_dma_sync_single_for_device, + .sync_sg_for_cpu = tile_pci_dma_sync_sg_for_cpu, + .sync_sg_for_device = tile_pci_dma_sync_sg_for_device, + .mapping_error = tile_pci_dma_mapping_error, + .dma_supported = tile_pci_dma_supported +}; + +struct dma_map_ops *gx_pci_dma_map_ops = &tile_pci_default_dma_map_ops; +EXPORT_SYMBOL(gx_pci_dma_map_ops); + +/* PCI DMA mapping functions for legacy PCI devices */ + +#ifdef CONFIG_SWIOTLB +static void *tile_swiotlb_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp, + struct dma_attrs *attrs) { + gfp |= GFP_DMA; + return swiotlb_alloc_coherent(dev, size, dma_handle, gfp); +} + +static void tile_swiotlb_free_coherent(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_addr, + struct dma_attrs *attrs) +{ + swiotlb_free_coherent(dev, size, vaddr, dma_addr); +} + +static struct dma_map_ops pci_swiotlb_dma_ops = { + .alloc = tile_swiotlb_alloc_coherent, + .free = tile_swiotlb_free_coherent, + .map_page = swiotlb_map_page, + .unmap_page = swiotlb_unmap_page, + .map_sg = swiotlb_map_sg_attrs, + .unmap_sg = swiotlb_unmap_sg_attrs, + .sync_single_for_cpu = swiotlb_sync_single_for_cpu, + .sync_single_for_device = swiotlb_sync_single_for_device, + .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, + .sync_sg_for_device = swiotlb_sync_sg_for_device, + .dma_supported = swiotlb_dma_supported, + .mapping_error = swiotlb_dma_mapping_error, +}; + +struct dma_map_ops *gx_legacy_pci_dma_map_ops = &pci_swiotlb_dma_ops; +#else +struct dma_map_ops *gx_legacy_pci_dma_map_ops; +#endif +EXPORT_SYMBOL(gx_legacy_pci_dma_map_ops); + +#ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK +int dma_set_coherent_mask(struct device *dev, u64 mask) +{ + struct dma_map_ops *dma_ops = get_dma_ops(dev); + + /* Handle legacy PCI devices with limited memory addressability. */ + if (((dma_ops == gx_pci_dma_map_ops) || + (dma_ops == gx_legacy_pci_dma_map_ops)) && + (mask <= DMA_BIT_MASK(32))) { + if (mask > dev->archdata.max_direct_dma_addr) + mask = dev->archdata.max_direct_dma_addr; + } + + if (!dma_supported(dev, mask)) + return -EIO; + dev->coherent_dma_mask = mask; + return 0; } -EXPORT_SYMBOL(dma_cache_sync); +EXPORT_SYMBOL(dma_set_coherent_mask); +#endif diff --git a/arch/tile/kernel/pci_gx.c b/arch/tile/kernel/pci_gx.c new file mode 100644 index 000000000000..fa75264a82ae --- /dev/null +++ b/arch/tile/kernel/pci_gx.c @@ -0,0 +1,1543 @@ +/* + * Copyright 2012 Tilera Corporation. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation, version 2. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for + * more details. + */ + +#include <linux/kernel.h> +#include <linux/mmzone.h> +#include <linux/pci.h> +#include <linux/delay.h> +#include <linux/string.h> +#include <linux/init.h> +#include <linux/capability.h> +#include <linux/sched.h> +#include <linux/errno.h> +#include <linux/irq.h> +#include <linux/msi.h> +#include <linux/io.h> +#include <linux/uaccess.h> +#include <linux/ctype.h> + +#include <asm/processor.h> +#include <asm/sections.h> +#include <asm/byteorder.h> + +#include <gxio/iorpc_globals.h> +#include <gxio/kiorpc.h> +#include <gxio/trio.h> +#include <gxio/iorpc_trio.h> +#include <hv/drv_trio_intf.h> + +#include <arch/sim.h> + +/* + * This file containes the routines to search for PCI buses, + * enumerate the buses, and configure any attached devices. + */ + +#define DEBUG_PCI_CFG 0 + +#if DEBUG_PCI_CFG +#define TRACE_CFG_WR(size, val, bus, dev, func, offset) \ + pr_info("CFG WR %d-byte VAL %#x to bus %d dev %d func %d addr %u\n", \ + size, val, bus, dev, func, offset & 0xFFF); +#define TRACE_CFG_RD(size, val, bus, dev, func, offset) \ + pr_info("CFG RD %d-byte VAL %#x from bus %d dev %d func %d addr %u\n", \ + size, val, bus, dev, func, offset & 0xFFF); +#else +#define TRACE_CFG_WR(...) +#define TRACE_CFG_RD(...) +#endif + +static int __devinitdata pci_probe = 1; + +/* Information on the PCIe RC ports configuration. */ +static int __devinitdata pcie_rc[TILEGX_NUM_TRIO][TILEGX_TRIO_PCIES]; + +/* + * On some platforms with one or more Gx endpoint ports, we need to + * delay the PCIe RC port probe for a few seconds to work around + * a HW PCIe link-training bug. The exact delay is specified with + * a kernel boot argument in the form of "pcie_rc_delay=T,P,S", + * where T is the TRIO instance number, P is the port number and S is + * the delay in seconds. If the delay is not provided, the value + * will be DEFAULT_RC_DELAY. + */ +static int __devinitdata rc_delay[TILEGX_NUM_TRIO][TILEGX_TRIO_PCIES]; + +/* Default number of seconds that the PCIe RC port probe can be delayed. */ +#define DEFAULT_RC_DELAY 10 + +/* Max number of seconds that the PCIe RC port probe can be delayed. */ +#define MAX_RC_DELAY 20 + +/* Array of the PCIe ports configuration info obtained from the BIB. */ +struct pcie_port_property pcie_ports[TILEGX_NUM_TRIO][TILEGX_TRIO_PCIES]; + +/* All drivers share the TRIO contexts defined here. */ +gxio_trio_context_t trio_contexts[TILEGX_NUM_TRIO]; + +/* Pointer to an array of PCIe RC controllers. */ +struct pci_controller pci_controllers[TILEGX_NUM_TRIO * TILEGX_TRIO_PCIES]; +int num_rc_controllers; +static int num_ep_controllers; + +static struct pci_ops tile_cfg_ops; + +/* Mask of CPUs that should receive PCIe interrupts. */ +static struct cpumask intr_cpus_map; + +/* + * We don't need to worry about the alignment of resources. + */ +resource_size_t pcibios_align_resource(void *data, const struct resource *res, + resource_size_t size, resource_size_t align) +{ + return res->start; +} +EXPORT_SYMBOL(pcibios_align_resource); + + +/* + * Pick a CPU to receive and handle the PCIe interrupts, based on the IRQ #. + * For now, we simply send interrupts to non-dataplane CPUs. + * We may implement methods to allow user to specify the target CPUs, + * e.g. via boot arguments. + */ +static int tile_irq_cpu(int irq) +{ + unsigned int count; + int i = 0; + int cpu; + + count = cpumask_weight(&intr_cpus_map); + if (unlikely(count == 0)) { + pr_warning("intr_cpus_map empty, interrupts will be" + " delievered to dataplane tiles\n"); + return irq % (smp_height * smp_width); + } + + count = irq % count; + for_each_cpu(cpu, &intr_cpus_map) { + if (i++ == count) + break; + } + return cpu; +} + +/* + * Open a file descriptor to the TRIO shim. + */ +static int __devinit tile_pcie_open(int trio_index) +{ + gxio_trio_context_t *context = &trio_contexts[trio_index]; + int ret; + + /* + * This opens a file descriptor to the TRIO shim. + */ + ret = gxio_trio_init(context, trio_index); + if (ret < 0) + return ret; + + /* + * Allocate an ASID for the kernel. + */ + ret = gxio_trio_alloc_asids(context, 1, 0, 0); + if (ret < 0) { + pr_err("PCI: ASID alloc failure on TRIO %d, give up\n", + trio_index); + goto asid_alloc_failure; + } + + context->asid = ret; + +#ifdef USE_SHARED_PCIE_CONFIG_REGION + /* + * Alloc a PIO region for config access, shared by all MACs per TRIO. + * This shouldn't fail since the kernel is supposed to the first + * client of the TRIO's PIO regions. + */ + ret = gxio_trio_alloc_pio_regions(context, 1, 0, 0); + if (ret < 0) { + pr_err("PCI: CFG PIO alloc failure on TRIO %d, give up\n", + trio_index); + goto pio_alloc_failure; + } + + context->pio_cfg_index = ret; + + /* + * For PIO CFG, the bus_address_hi parameter is 0. The mac parameter + * is also 0 because it is specified in PIO_REGION_SETUP_CFG_ADDR. + */ + ret = gxio_trio_init_pio_region_aux(context, context->pio_cfg_index, + 0, 0, HV_TRIO_PIO_FLAG_CONFIG_SPACE); + if (ret < 0) { + pr_err("PCI: CFG PIO init failure on TRIO %d, give up\n", + trio_index); + goto pio_alloc_failure; + } +#endif + + return ret; + +asid_alloc_failure: +#ifdef USE_SHARED_PCIE_CONFIG_REGION +pio_alloc_failure: +#endif + hv_dev_close(context->fd); + + return ret; +} + +static void +tilegx_legacy_irq_ack(struct irq_data *d) +{ + __insn_mtspr(SPR_IPI_EVENT_RESET_K, 1UL << d->irq); +} + +static void +tilegx_legacy_irq_mask(struct irq_data *d) +{ + __insn_mtspr(SPR_IPI_MASK_SET_K, 1UL << d->irq); +} + +static void +tilegx_legacy_irq_unmask(struct irq_data *d) +{ + __insn_mtspr(SPR_IPI_MASK_RESET_K, 1UL << d->irq); +} + +static struct irq_chip tilegx_legacy_irq_chip = { + .name = "tilegx_legacy_irq", + .irq_ack = tilegx_legacy_irq_ack, + .irq_mask = tilegx_legacy_irq_mask, + .irq_unmask = tilegx_legacy_irq_unmask, + + /* TBD: support set_affinity. */ +}; + +/* + * This is a wrapper function of the kernel level-trigger interrupt + * handler handle_level_irq() for PCI legacy interrupts. The TRIO + * is configured such that only INTx Assert interrupts are proxied + * to Linux which just calls handle_level_irq() after clearing the + * MAC INTx Assert status bit associated with this interrupt. + */ +static void +trio_handle_level_irq(unsigned int irq, struct irq_desc *desc) +{ + struct pci_controller *controller = irq_desc_get_handler_data(desc); + gxio_trio_context_t *trio_context = controller->trio; + uint64_t intx = (uint64_t)irq_desc_get_chip_data(desc); + int mac = controller->mac; + unsigned int reg_offset; + uint64_t level_mask; + + handle_level_irq(irq, desc); + + /* + * Clear the INTx Level status, otherwise future interrupts are + * not sent. + */ + reg_offset = (TRIO_PCIE_INTFC_MAC_INT_STS << + TRIO_CFG_REGION_ADDR__REG_SHIFT) | + (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE << + TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) | + (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT); + + level_mask = TRIO_PCIE_INTFC_MAC_INT_STS__INT_LEVEL_MASK << intx; + + __gxio_mmio_write(trio_context->mmio_base_mac + reg_offset, level_mask); +} + +/* + * Create kernel irqs and set up the handlers for the legacy interrupts. + * Also some minimum initialization for the MSI support. + */ +static int __devinit tile_init_irqs(struct pci_controller *controller) +{ + int i; + int j; + int irq; + int result; + + cpumask_copy(&intr_cpus_map, cpu_online_mask); + + + for (i = 0; i < 4; i++) { + gxio_trio_context_t *context = controller->trio; + int cpu; + + /* Ask the kernel to allocate an IRQ. */ + irq = create_irq(); + if (irq < 0) { + pr_err("PCI: no free irq vectors, failed for %d\n", i); + + goto free_irqs; + } + controller->irq_intx_table[i] = irq; + + /* Distribute the 4 IRQs to different tiles. */ + cpu = tile_irq_cpu(irq); + + /* Configure the TRIO intr binding for this IRQ. */ + result = gxio_trio_config_legacy_intr(context, cpu_x(cpu), + cpu_y(cpu), KERNEL_PL, + irq, controller->mac, i); + if (result < 0) { + pr_err("PCI: MAC intx config failed for %d\n", i); + + goto free_irqs; + } + + /* + * Register the IRQ handler with the kernel. + */ + irq_set_chip_and_handler(irq, &tilegx_legacy_irq_chip, + trio_handle_level_irq); + irq_set_chip_data(irq, (void *)(uint64_t)i); + irq_set_handler_data(irq, controller); + } + + return 0; + +free_irqs: + for (j = 0; j < i; j++) + destroy_irq(controller->irq_intx_table[j]); + + return -1; +} + +/* + * Find valid controllers and fill in pci_controller structs for each + * of them. + * + * Returns the number of controllers discovered. + */ +int __init tile_pci_init(void) +{ + int num_trio_shims = 0; + int ctl_index = 0; + int i, j; + + if (!pci_probe) { + pr_info("PCI: disabled by boot argument\n"); + return 0; + } + + pr_info("PCI: Searching for controllers...\n"); + + /* + * We loop over all the TRIO shims. + */ + for (i = 0; i < TILEGX_NUM_TRIO; i++) { + int ret; + + ret = tile_pcie_open(i); + if (ret < 0) + continue; + + num_trio_shims++; + } + + if (num_trio_shims == 0 || sim_is_simulator()) + return 0; + + /* + * Now determine which PCIe ports are configured to operate in RC mode. + * We look at the Board Information Block first and then see if there + * are any overriding configuration by the HW strapping pin. + */ + for (i = 0; i < TILEGX_NUM_TRIO; i++) { + gxio_trio_context_t *context = &trio_contexts[i]; + int ret; + + if (context->fd < 0) + continue; + + ret = hv_dev_pread(context->fd, 0, + (HV_VirtAddr)&pcie_ports[i][0], + sizeof(struct pcie_port_property) * TILEGX_TRIO_PCIES, + GXIO_TRIO_OP_GET_PORT_PROPERTY); + if (ret < 0) { + pr_err("PCI: PCIE_GET_PORT_PROPERTY failure, error %d," + " on TRIO %d\n", ret, i); + continue; + } + + for (j = 0; j < TILEGX_TRIO_PCIES; j++) { + if (pcie_ports[i][j].allow_rc) { + pcie_rc[i][j] = 1; + num_rc_controllers++; + } + else if (pcie_ports[i][j].allow_ep) { + num_ep_controllers++; + } |