2005-04-16 18:20:36 -04:00
|
|
|
/*
|
|
|
|
* Implement the default iomap interfaces
|
|
|
|
*
|
|
|
|
* (C) Copyright 2004 Linus Torvalds
|
|
|
|
*/
|
|
|
|
#include <linux/pci.h>
|
devres: device resource management
Implement device resource management, in short, devres. A device
driver can allocate arbirary size of devres data which is associated
with a release function. On driver detach, release function is
invoked on the devres data, then, devres data is freed.
devreses are typed by associated release functions. Some devreses are
better represented by single instance of the type while others need
multiple instances sharing the same release function. Both usages are
supported.
devreses can be grouped using devres group such that a device driver
can easily release acquired resources halfway through initialization
or selectively release resources (e.g. resources for port 1 out of 4
ports).
This patch adds devres core including documentation and the following
managed interfaces.
* alloc/free : devm_kzalloc(), devm_kzfree()
* IO region : devm_request_region(), devm_release_region()
* IRQ : devm_request_irq(), devm_free_irq()
* DMA : dmam_alloc_coherent(), dmam_free_coherent(),
dmam_declare_coherent_memory(), dmam_pool_create(),
dmam_pool_destroy()
* PCI : pcim_enable_device(), pcim_pin_device(), pci_is_managed()
* iomap : devm_ioport_map(), devm_ioport_unmap(), devm_ioremap(),
devm_ioremap_nocache(), devm_iounmap(), pcim_iomap_table(),
pcim_iomap(), pcim_iounmap()
Signed-off-by: Tejun Heo <htejun@gmail.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
2007-01-20 02:00:26 -05:00
|
|
|
#include <linux/io.h>
|
|
|
|
|
|
|
|
#ifdef CONFIG_GENERIC_IOMAP
|
2005-04-16 18:20:36 -04:00
|
|
|
#include <linux/module.h>
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Read/write from/to an (offsettable) iomem cookie. It might be a PIO
|
|
|
|
* access or a MMIO access, these functions don't care. The info is
|
|
|
|
* encoded in the hardware mapping set up by the mapping functions
|
|
|
|
* (or the cookie itself, depending on implementation and hw).
|
|
|
|
*
|
|
|
|
* The generic routines don't assume any hardware mappings, and just
|
|
|
|
* encode the PIO/MMIO as part of the cookie. They coldly assume that
|
|
|
|
* the MMIO IO mappings are not in the low address range.
|
|
|
|
*
|
|
|
|
* Architectures for which this is not true can't use this generic
|
|
|
|
* implementation and should do their own copy.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef HAVE_ARCH_PIO_SIZE
|
|
|
|
/*
|
|
|
|
* We encode the physical PIO addresses (0-0xffff) into the
|
|
|
|
* pointer by offsetting them with a constant (0x10000) and
|
|
|
|
* assuming that all the low addresses are always PIO. That means
|
|
|
|
* we can do some sanity checks on the low bits, and don't
|
|
|
|
* need to just take things for granted.
|
|
|
|
*/
|
|
|
|
#define PIO_OFFSET 0x10000UL
|
|
|
|
#define PIO_MASK 0x0ffffUL
|
|
|
|
#define PIO_RESERVED 0x40000UL
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Ugly macros are a way of life.
|
|
|
|
*/
|
|
|
|
#define VERIFY_PIO(port) BUG_ON((port & ~PIO_MASK) != PIO_OFFSET)
|
|
|
|
|
|
|
|
#define IO_COND(addr, is_pio, is_mmio) do { \
|
|
|
|
unsigned long port = (unsigned long __force)addr; \
|
|
|
|
if (port < PIO_RESERVED) { \
|
|
|
|
VERIFY_PIO(port); \
|
|
|
|
port &= PIO_MASK; \
|
|
|
|
is_pio; \
|
|
|
|
} else { \
|
|
|
|
is_mmio; \
|
|
|
|
} \
|
|
|
|
} while (0)
|
|
|
|
|
2006-11-11 01:24:46 -05:00
|
|
|
#ifndef pio_read16be
|
|
|
|
#define pio_read16be(port) swab16(inw(port))
|
|
|
|
#define pio_read32be(port) swab32(inl(port))
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef mmio_read16be
|
|
|
|
#define mmio_read16be(addr) be16_to_cpu(__raw_readw(addr))
|
|
|
|
#define mmio_read32be(addr) be32_to_cpu(__raw_readl(addr))
|
|
|
|
#endif
|
|
|
|
|
2005-04-16 18:20:36 -04:00
|
|
|
unsigned int fastcall ioread8(void __iomem *addr)
|
|
|
|
{
|
|
|
|
IO_COND(addr, return inb(port), return readb(addr));
|
|
|
|
}
|
|
|
|
unsigned int fastcall ioread16(void __iomem *addr)
|
|
|
|
{
|
|
|
|
IO_COND(addr, return inw(port), return readw(addr));
|
|
|
|
}
|
[PATCH] add Big Endian variants of ioread/iowrite
In the new io infrastructure, all of our operators are expecting the
underlying device to be little endian (because the PCI bus, their main
consumer, is LE).
However, there are a fair few devices and busses in the world that are
actually Big Endian. There's even evidence that some of these BE bus and
chip types are attached to LE systems. Thus, there's a need for a BE
equivalent of our io{read,write}{16,32} operations.
The attached patch adds this as io{read,write}{16,32}be. When it's in,
I'll add the first consume (the 53c700 SCSI chip driver).
Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-04-16 18:25:54 -04:00
|
|
|
unsigned int fastcall ioread16be(void __iomem *addr)
|
|
|
|
{
|
2006-11-11 01:24:46 -05:00
|
|
|
IO_COND(addr, return pio_read16be(port), return mmio_read16be(addr));
|
[PATCH] add Big Endian variants of ioread/iowrite
In the new io infrastructure, all of our operators are expecting the
underlying device to be little endian (because the PCI bus, their main
consumer, is LE).
However, there are a fair few devices and busses in the world that are
actually Big Endian. There's even evidence that some of these BE bus and
chip types are attached to LE systems. Thus, there's a need for a BE
equivalent of our io{read,write}{16,32} operations.
The attached patch adds this as io{read,write}{16,32}be. When it's in,
I'll add the first consume (the 53c700 SCSI chip driver).
Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-04-16 18:25:54 -04:00
|
|
|
}
|
2005-04-16 18:20:36 -04:00
|
|
|
unsigned int fastcall ioread32(void __iomem *addr)
|
|
|
|
{
|
|
|
|
IO_COND(addr, return inl(port), return readl(addr));
|
|
|
|
}
|
[PATCH] add Big Endian variants of ioread/iowrite
In the new io infrastructure, all of our operators are expecting the
underlying device to be little endian (because the PCI bus, their main
consumer, is LE).
However, there are a fair few devices and busses in the world that are
actually Big Endian. There's even evidence that some of these BE bus and
chip types are attached to LE systems. Thus, there's a need for a BE
equivalent of our io{read,write}{16,32} operations.
The attached patch adds this as io{read,write}{16,32}be. When it's in,
I'll add the first consume (the 53c700 SCSI chip driver).
Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-04-16 18:25:54 -04:00
|
|
|
unsigned int fastcall ioread32be(void __iomem *addr)
|
|
|
|
{
|
2006-11-11 01:24:46 -05:00
|
|
|
IO_COND(addr, return pio_read32be(port), return mmio_read32be(addr));
|
[PATCH] add Big Endian variants of ioread/iowrite
In the new io infrastructure, all of our operators are expecting the
underlying device to be little endian (because the PCI bus, their main
consumer, is LE).
However, there are a fair few devices and busses in the world that are
actually Big Endian. There's even evidence that some of these BE bus and
chip types are attached to LE systems. Thus, there's a need for a BE
equivalent of our io{read,write}{16,32} operations.
The attached patch adds this as io{read,write}{16,32}be. When it's in,
I'll add the first consume (the 53c700 SCSI chip driver).
Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-04-16 18:25:54 -04:00
|
|
|
}
|
2005-04-16 18:20:36 -04:00
|
|
|
EXPORT_SYMBOL(ioread8);
|
|
|
|
EXPORT_SYMBOL(ioread16);
|
[PATCH] add Big Endian variants of ioread/iowrite
In the new io infrastructure, all of our operators are expecting the
underlying device to be little endian (because the PCI bus, their main
consumer, is LE).
However, there are a fair few devices and busses in the world that are
actually Big Endian. There's even evidence that some of these BE bus and
chip types are attached to LE systems. Thus, there's a need for a BE
equivalent of our io{read,write}{16,32} operations.
The attached patch adds this as io{read,write}{16,32}be. When it's in,
I'll add the first consume (the 53c700 SCSI chip driver).
Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-04-16 18:25:54 -04:00
|
|
|
EXPORT_SYMBOL(ioread16be);
|
2005-04-16 18:20:36 -04:00
|
|
|
EXPORT_SYMBOL(ioread32);
|
[PATCH] add Big Endian variants of ioread/iowrite
In the new io infrastructure, all of our operators are expecting the
underlying device to be little endian (because the PCI bus, their main
consumer, is LE).
However, there are a fair few devices and busses in the world that are
actually Big Endian. There's even evidence that some of these BE bus and
chip types are attached to LE systems. Thus, there's a need for a BE
equivalent of our io{read,write}{16,32} operations.
The attached patch adds this as io{read,write}{16,32}be. When it's in,
I'll add the first consume (the 53c700 SCSI chip driver).
Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-04-16 18:25:54 -04:00
|
|
|
EXPORT_SYMBOL(ioread32be);
|
2005-04-16 18:20:36 -04:00
|
|
|
|
2006-11-11 01:24:46 -05:00
|
|
|
#ifndef pio_write16be
|
|
|
|
#define pio_write16be(val,port) outw(swab16(val),port)
|
|
|
|
#define pio_write32be(val,port) outl(swab32(val),port)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef mmio_write16be
|
|
|
|
#define mmio_write16be(val,port) __raw_writew(be16_to_cpu(val),port)
|
|
|
|
#define mmio_write32be(val,port) __raw_writel(be32_to_cpu(val),port)
|
|
|
|
#endif
|
|
|
|
|
2005-04-16 18:20:36 -04:00
|
|
|
void fastcall iowrite8(u8 val, void __iomem *addr)
|
|
|
|
{
|
|
|
|
IO_COND(addr, outb(val,port), writeb(val, addr));
|
|
|
|
}
|
|
|
|
void fastcall iowrite16(u16 val, void __iomem *addr)
|
|
|
|
{
|
|
|
|
IO_COND(addr, outw(val,port), writew(val, addr));
|
|
|
|
}
|
[PATCH] add Big Endian variants of ioread/iowrite
In the new io infrastructure, all of our operators are expecting the
underlying device to be little endian (because the PCI bus, their main
consumer, is LE).
However, there are a fair few devices and busses in the world that are
actually Big Endian. There's even evidence that some of these BE bus and
chip types are attached to LE systems. Thus, there's a need for a BE
equivalent of our io{read,write}{16,32} operations.
The attached patch adds this as io{read,write}{16,32}be. When it's in,
I'll add the first consume (the 53c700 SCSI chip driver).
Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-04-16 18:25:54 -04:00
|
|
|
void fastcall iowrite16be(u16 val, void __iomem *addr)
|
|
|
|
{
|
2006-11-11 01:24:46 -05:00
|
|
|
IO_COND(addr, pio_write16be(val,port), mmio_write16be(val, addr));
|
[PATCH] add Big Endian variants of ioread/iowrite
In the new io infrastructure, all of our operators are expecting the
underlying device to be little endian (because the PCI bus, their main
consumer, is LE).
However, there are a fair few devices and busses in the world that are
actually Big Endian. There's even evidence that some of these BE bus and
chip types are attached to LE systems. Thus, there's a need for a BE
equivalent of our io{read,write}{16,32} operations.
The attached patch adds this as io{read,write}{16,32}be. When it's in,
I'll add the first consume (the 53c700 SCSI chip driver).
Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-04-16 18:25:54 -04:00
|
|
|
}
|
2005-04-16 18:20:36 -04:00
|
|
|
void fastcall iowrite32(u32 val, void __iomem *addr)
|
|
|
|
{
|
|
|
|
IO_COND(addr, outl(val,port), writel(val, addr));
|
|
|
|
}
|
[PATCH] add Big Endian variants of ioread/iowrite
In the new io infrastructure, all of our operators are expecting the
underlying device to be little endian (because the PCI bus, their main
consumer, is LE).
However, there are a fair few devices and busses in the world that are
actually Big Endian. There's even evidence that some of these BE bus and
chip types are attached to LE systems. Thus, there's a need for a BE
equivalent of our io{read,write}{16,32} operations.
The attached patch adds this as io{read,write}{16,32}be. When it's in,
I'll add the first consume (the 53c700 SCSI chip driver).
Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-04-16 18:25:54 -04:00
|
|
|
void fastcall iowrite32be(u32 val, void __iomem *addr)
|
|
|
|
{
|
2006-11-11 01:24:46 -05:00
|
|
|
IO_COND(addr, pio_write32be(val,port), mmio_write32be(val, addr));
|
[PATCH] add Big Endian variants of ioread/iowrite
In the new io infrastructure, all of our operators are expecting the
underlying device to be little endian (because the PCI bus, their main
consumer, is LE).
However, there are a fair few devices and busses in the world that are
actually Big Endian. There's even evidence that some of these BE bus and
chip types are attached to LE systems. Thus, there's a need for a BE
equivalent of our io{read,write}{16,32} operations.
The attached patch adds this as io{read,write}{16,32}be. When it's in,
I'll add the first consume (the 53c700 SCSI chip driver).
Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-04-16 18:25:54 -04:00
|
|
|
}
|
2005-04-16 18:20:36 -04:00
|
|
|
EXPORT_SYMBOL(iowrite8);
|
|
|
|
EXPORT_SYMBOL(iowrite16);
|
[PATCH] add Big Endian variants of ioread/iowrite
In the new io infrastructure, all of our operators are expecting the
underlying device to be little endian (because the PCI bus, their main
consumer, is LE).
However, there are a fair few devices and busses in the world that are
actually Big Endian. There's even evidence that some of these BE bus and
chip types are attached to LE systems. Thus, there's a need for a BE
equivalent of our io{read,write}{16,32} operations.
The attached patch adds this as io{read,write}{16,32}be. When it's in,
I'll add the first consume (the 53c700 SCSI chip driver).
Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-04-16 18:25:54 -04:00
|
|
|
EXPORT_SYMBOL(iowrite16be);
|
2005-04-16 18:20:36 -04:00
|
|
|
EXPORT_SYMBOL(iowrite32);
|
[PATCH] add Big Endian variants of ioread/iowrite
In the new io infrastructure, all of our operators are expecting the
underlying device to be little endian (because the PCI bus, their main
consumer, is LE).
However, there are a fair few devices and busses in the world that are
actually Big Endian. There's even evidence that some of these BE bus and
chip types are attached to LE systems. Thus, there's a need for a BE
equivalent of our io{read,write}{16,32} operations.
The attached patch adds this as io{read,write}{16,32}be. When it's in,
I'll add the first consume (the 53c700 SCSI chip driver).
Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-04-16 18:25:54 -04:00
|
|
|
EXPORT_SYMBOL(iowrite32be);
|
2005-04-16 18:20:36 -04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* These are the "repeat MMIO read/write" functions.
|
|
|
|
* Note the "__raw" accesses, since we don't want to
|
|
|
|
* convert to CPU byte order. We write in "IO byte
|
|
|
|
* order" (we also don't have IO barriers).
|
|
|
|
*/
|
2006-11-11 01:24:46 -05:00
|
|
|
#ifndef mmio_insb
|
2005-04-16 18:20:36 -04:00
|
|
|
static inline void mmio_insb(void __iomem *addr, u8 *dst, int count)
|
|
|
|
{
|
|
|
|
while (--count >= 0) {
|
|
|
|
u8 data = __raw_readb(addr);
|
|
|
|
*dst = data;
|
|
|
|
dst++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
static inline void mmio_insw(void __iomem *addr, u16 *dst, int count)
|
|
|
|
{
|
|
|
|
while (--count >= 0) {
|
|
|
|
u16 data = __raw_readw(addr);
|
|
|
|
*dst = data;
|
|
|
|
dst++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
static inline void mmio_insl(void __iomem *addr, u32 *dst, int count)
|
|
|
|
{
|
|
|
|
while (--count >= 0) {
|
|
|
|
u32 data = __raw_readl(addr);
|
|
|
|
*dst = data;
|
|
|
|
dst++;
|
|
|
|
}
|
|
|
|
}
|
2006-11-11 01:24:46 -05:00
|
|
|
#endif
|
2005-04-16 18:20:36 -04:00
|
|
|
|
2006-11-11 01:24:46 -05:00
|
|
|
#ifndef mmio_outsb
|
2005-04-16 18:20:36 -04:00
|
|
|
static inline void mmio_outsb(void __iomem *addr, const u8 *src, int count)
|
|
|
|
{
|
|
|
|
while (--count >= 0) {
|
|
|
|
__raw_writeb(*src, addr);
|
|
|
|
src++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
static inline void mmio_outsw(void __iomem *addr, const u16 *src, int count)
|
|
|
|
{
|
|
|
|
while (--count >= 0) {
|
|
|
|
__raw_writew(*src, addr);
|
|
|
|
src++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
static inline void mmio_outsl(void __iomem *addr, const u32 *src, int count)
|
|
|
|
{
|
|
|
|
while (--count >= 0) {
|
|
|
|
__raw_writel(*src, addr);
|
|
|
|
src++;
|
|
|
|
}
|
|
|
|
}
|
2006-11-11 01:24:46 -05:00
|
|
|
#endif
|
2005-04-16 18:20:36 -04:00
|
|
|
|
|
|
|
void fastcall ioread8_rep(void __iomem *addr, void *dst, unsigned long count)
|
|
|
|
{
|
|
|
|
IO_COND(addr, insb(port,dst,count), mmio_insb(addr, dst, count));
|
|
|
|
}
|
|
|
|
void fastcall ioread16_rep(void __iomem *addr, void *dst, unsigned long count)
|
|
|
|
{
|
|
|
|
IO_COND(addr, insw(port,dst,count), mmio_insw(addr, dst, count));
|
|
|
|
}
|
|
|
|
void fastcall ioread32_rep(void __iomem *addr, void *dst, unsigned long count)
|
|
|
|
{
|
|
|
|
IO_COND(addr, insl(port,dst,count), mmio_insl(addr, dst, count));
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ioread8_rep);
|
|
|
|
EXPORT_SYMBOL(ioread16_rep);
|
|
|
|
EXPORT_SYMBOL(ioread32_rep);
|
|
|
|
|
|
|
|
void fastcall iowrite8_rep(void __iomem *addr, const void *src, unsigned long count)
|
|
|
|
{
|
|
|
|
IO_COND(addr, outsb(port, src, count), mmio_outsb(addr, src, count));
|
|
|
|
}
|
|
|
|
void fastcall iowrite16_rep(void __iomem *addr, const void *src, unsigned long count)
|
|
|
|
{
|
|
|
|
IO_COND(addr, outsw(port, src, count), mmio_outsw(addr, src, count));
|
|
|
|
}
|
|
|
|
void fastcall iowrite32_rep(void __iomem *addr, const void *src, unsigned long count)
|
|
|
|
{
|
|
|
|
IO_COND(addr, outsl(port, src,count), mmio_outsl(addr, src, count));
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(iowrite8_rep);
|
|
|
|
EXPORT_SYMBOL(iowrite16_rep);
|
|
|
|
EXPORT_SYMBOL(iowrite32_rep);
|
|
|
|
|
|
|
|
/* Create a virtual mapping cookie for an IO port range */
|
|
|
|
void __iomem *ioport_map(unsigned long port, unsigned int nr)
|
|
|
|
{
|
|
|
|
if (port > PIO_MASK)
|
|
|
|
return NULL;
|
|
|
|
return (void __iomem *) (unsigned long) (port + PIO_OFFSET);
|
|
|
|
}
|
|
|
|
|
|
|
|
void ioport_unmap(void __iomem *addr)
|
|
|
|
{
|
|
|
|
/* Nothing to do */
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ioport_map);
|
|
|
|
EXPORT_SYMBOL(ioport_unmap);
|
|
|
|
|
|
|
|
/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
|
|
|
|
void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
|
|
|
|
{
|
|
|
|
unsigned long start = pci_resource_start(dev, bar);
|
|
|
|
unsigned long len = pci_resource_len(dev, bar);
|
|
|
|
unsigned long flags = pci_resource_flags(dev, bar);
|
|
|
|
|
|
|
|
if (!len || !start)
|
|
|
|
return NULL;
|
|
|
|
if (maxlen && len > maxlen)
|
|
|
|
len = maxlen;
|
|
|
|
if (flags & IORESOURCE_IO)
|
|
|
|
return ioport_map(start, len);
|
|
|
|
if (flags & IORESOURCE_MEM) {
|
|
|
|
if (flags & IORESOURCE_CACHEABLE)
|
|
|
|
return ioremap(start, len);
|
|
|
|
return ioremap_nocache(start, len);
|
|
|
|
}
|
|
|
|
/* What? */
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
|
|
|
|
{
|
|
|
|
IO_COND(addr, /* nothing */, iounmap(addr));
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(pci_iomap);
|
|
|
|
EXPORT_SYMBOL(pci_iounmap);
|
devres: device resource management
Implement device resource management, in short, devres. A device
driver can allocate arbirary size of devres data which is associated
with a release function. On driver detach, release function is
invoked on the devres data, then, devres data is freed.
devreses are typed by associated release functions. Some devreses are
better represented by single instance of the type while others need
multiple instances sharing the same release function. Both usages are
supported.
devreses can be grouped using devres group such that a device driver
can easily release acquired resources halfway through initialization
or selectively release resources (e.g. resources for port 1 out of 4
ports).
This patch adds devres core including documentation and the following
managed interfaces.
* alloc/free : devm_kzalloc(), devm_kzfree()
* IO region : devm_request_region(), devm_release_region()
* IRQ : devm_request_irq(), devm_free_irq()
* DMA : dmam_alloc_coherent(), dmam_free_coherent(),
dmam_declare_coherent_memory(), dmam_pool_create(),
dmam_pool_destroy()
* PCI : pcim_enable_device(), pcim_pin_device(), pci_is_managed()
* iomap : devm_ioport_map(), devm_ioport_unmap(), devm_ioremap(),
devm_ioremap_nocache(), devm_iounmap(), pcim_iomap_table(),
pcim_iomap(), pcim_iounmap()
Signed-off-by: Tejun Heo <htejun@gmail.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
2007-01-20 02:00:26 -05:00
|
|
|
|
|
|
|
#endif /* CONFIG_GENERIC_IOMAP */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Generic iomap devres
|
|
|
|
*/
|
|
|
|
static void devm_ioport_map_release(struct device *dev, void *res)
|
|
|
|
{
|
|
|
|
ioport_unmap(*(void __iomem **)res);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int devm_ioport_map_match(struct device *dev, void *res,
|
|
|
|
void *match_data)
|
|
|
|
{
|
|
|
|
return *(void **)res == match_data;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* devm_ioport_map - Managed ioport_map()
|
|
|
|
* @dev: Generic device to map ioport for
|
|
|
|
* @port: Port to map
|
|
|
|
* @nr: Number of ports to map
|
|
|
|
*
|
|
|
|
* Managed ioport_map(). Map is automatically unmapped on driver
|
|
|
|
* detach.
|
|
|
|
*/
|
|
|
|
void __iomem * devm_ioport_map(struct device *dev, unsigned long port,
|
|
|
|
unsigned int nr)
|
|
|
|
{
|
|
|
|
void __iomem **ptr, *addr;
|
|
|
|
|
|
|
|
ptr = devres_alloc(devm_ioport_map_release, sizeof(*ptr), GFP_KERNEL);
|
|
|
|
if (!ptr)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
addr = ioport_map(port, nr);
|
|
|
|
if (addr) {
|
|
|
|
*ptr = addr;
|
|
|
|
devres_add(dev, ptr);
|
|
|
|
} else
|
|
|
|
devres_free(ptr);
|
|
|
|
|
|
|
|
return addr;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(devm_ioport_map);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* devm_ioport_unmap - Managed ioport_unmap()
|
|
|
|
* @dev: Generic device to unmap for
|
|
|
|
* @addr: Address to unmap
|
|
|
|
*
|
|
|
|
* Managed ioport_unmap(). @addr must have been mapped using
|
|
|
|
* devm_ioport_map().
|
|
|
|
*/
|
|
|
|
void devm_ioport_unmap(struct device *dev, void __iomem *addr)
|
|
|
|
{
|
|
|
|
ioport_unmap(addr);
|
|
|
|
WARN_ON(devres_destroy(dev, devm_ioport_map_release,
|
|
|
|
devm_ioport_map_match, (void *)addr));
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(devm_ioport_unmap);
|
|
|
|
|
|
|
|
static void devm_ioremap_release(struct device *dev, void *res)
|
|
|
|
{
|
|
|
|
iounmap(*(void __iomem **)res);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int devm_ioremap_match(struct device *dev, void *res, void *match_data)
|
|
|
|
{
|
|
|
|
return *(void **)res == match_data;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* devm_ioremap - Managed ioremap()
|
|
|
|
* @dev: Generic device to remap IO address for
|
|
|
|
* @offset: BUS offset to map
|
|
|
|
* @size: Size of map
|
|
|
|
*
|
|
|
|
* Managed ioremap(). Map is automatically unmapped on driver detach.
|
|
|
|
*/
|
|
|
|
void __iomem *devm_ioremap(struct device *dev, unsigned long offset,
|
|
|
|
unsigned long size)
|
|
|
|
{
|
|
|
|
void __iomem **ptr, *addr;
|
|
|
|
|
|
|
|
ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
|
|
|
|
if (!ptr)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
addr = ioremap(offset, size);
|
|
|
|
if (addr) {
|
|
|
|
*ptr = addr;
|
|
|
|
devres_add(dev, ptr);
|
|
|
|
} else
|
|
|
|
devres_free(ptr);
|
|
|
|
|
|
|
|
return addr;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(devm_ioremap);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* devm_ioremap_nocache - Managed ioremap_nocache()
|
|
|
|
* @dev: Generic device to remap IO address for
|
|
|
|
* @offset: BUS offset to map
|
|
|
|
* @size: Size of map
|
|
|
|
*
|
|
|
|
* Managed ioremap_nocache(). Map is automatically unmapped on driver
|
|
|
|
* detach.
|
|
|
|
*/
|
|
|
|
void __iomem *devm_ioremap_nocache(struct device *dev, unsigned long offset,
|
|
|
|
unsigned long size)
|
|
|
|
{
|
|
|
|
void __iomem **ptr, *addr;
|
|
|
|
|
|
|
|
ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
|
|
|
|
if (!ptr)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
addr = ioremap_nocache(offset, size);
|
|
|
|
if (addr) {
|
|
|
|
*ptr = addr;
|
|
|
|
devres_add(dev, ptr);
|
|
|
|
} else
|
|
|
|
devres_free(ptr);
|
|
|
|
|
|
|
|
return addr;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(devm_ioremap_nocache);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* devm_iounmap - Managed iounmap()
|
|
|
|
* @dev: Generic device to unmap for
|
|
|
|
* @addr: Address to unmap
|
|
|
|
*
|
|
|
|
* Managed iounmap(). @addr must have been mapped using devm_ioremap*().
|
|
|
|
*/
|
|
|
|
void devm_iounmap(struct device *dev, void __iomem *addr)
|
|
|
|
{
|
|
|
|
iounmap(addr);
|
|
|
|
WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
|
|
|
|
(void *)addr));
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(devm_iounmap);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* PCI iomap devres
|
|
|
|
*/
|
|
|
|
#define PCIM_IOMAP_MAX PCI_ROM_RESOURCE
|
|
|
|
|
|
|
|
struct pcim_iomap_devres {
|
|
|
|
void __iomem *table[PCIM_IOMAP_MAX];
|
|
|
|
};
|
|
|
|
|
|
|
|
static void pcim_iomap_release(struct device *gendev, void *res)
|
|
|
|
{
|
|
|
|
struct pci_dev *dev = container_of(gendev, struct pci_dev, dev);
|
|
|
|
struct pcim_iomap_devres *this = res;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < PCIM_IOMAP_MAX; i++)
|
|
|
|
if (this->table[i])
|
|
|
|
pci_iounmap(dev, this->table[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* pcim_iomap_table - access iomap allocation table
|
|
|
|
* @pdev: PCI device to access iomap table for
|
|
|
|
*
|
|
|
|
* Access iomap allocation table for @dev. If iomap table doesn't
|
|
|
|
* exist and @pdev is managed, it will be allocated. All iomaps
|
|
|
|
* recorded in the iomap table are automatically unmapped on driver
|
|
|
|
* detach.
|
|
|
|
*
|
|
|
|
* This function might sleep when the table is first allocated but can
|
|
|
|
* be safely called without context and guaranteed to succed once
|
|
|
|
* allocated.
|
|
|
|
*/
|
|
|
|
void __iomem * const * pcim_iomap_table(struct pci_dev *pdev)
|
|
|
|
{
|
|
|
|
struct pcim_iomap_devres *dr, *new_dr;
|
|
|
|
|
|
|
|
dr = devres_find(&pdev->dev, pcim_iomap_release, NULL, NULL);
|
|
|
|
if (dr)
|
|
|
|
return dr->table;
|
|
|
|
|
|
|
|
new_dr = devres_alloc(pcim_iomap_release, sizeof(*new_dr), GFP_KERNEL);
|
|
|
|
if (!new_dr)
|
|
|
|
return NULL;
|
|
|
|
dr = devres_get(&pdev->dev, new_dr, NULL, NULL);
|
|
|
|
return dr->table;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(pcim_iomap_table);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* pcim_iomap - Managed pcim_iomap()
|
|
|
|
* @pdev: PCI device to iomap for
|
|
|
|
* @bar: BAR to iomap
|
|
|
|
* @maxlen: Maximum length of iomap
|
|
|
|
*
|
|
|
|
* Managed pci_iomap(). Map is automatically unmapped on driver
|
|
|
|
* detach.
|
|
|
|
*/
|
|
|
|
void __iomem * pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen)
|
|
|
|
{
|
|
|
|
void __iomem **tbl;
|
|
|
|
|
|
|
|
BUG_ON(bar >= PCIM_IOMAP_MAX);
|
|
|
|
|
|
|
|
tbl = (void __iomem **)pcim_iomap_table(pdev);
|
|
|
|
if (!tbl || tbl[bar]) /* duplicate mappings not allowed */
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
tbl[bar] = pci_iomap(pdev, bar, maxlen);
|
|
|
|
return tbl[bar];
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(pcim_iomap);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* pcim_iounmap - Managed pci_iounmap()
|
|
|
|
* @pdev: PCI device to iounmap for
|
|
|
|
* @addr: Address to unmap
|
|
|
|
*
|
|
|
|
* Managed pci_iounmap(). @addr must have been mapped using pcim_iomap().
|
|
|
|
*/
|
|
|
|
void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr)
|
|
|
|
{
|
|
|
|
void __iomem **tbl;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
pci_iounmap(pdev, addr);
|
|
|
|
|
|
|
|
tbl = (void __iomem **)pcim_iomap_table(pdev);
|
|
|
|
BUG_ON(!tbl);
|
|
|
|
|
|
|
|
for (i = 0; i < PCIM_IOMAP_MAX; i++)
|
|
|
|
if (tbl[i] == addr) {
|
|
|
|
tbl[i] = NULL;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
WARN_ON(1);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(pcim_iounmap);
|
2007-01-20 02:00:28 -05:00
|
|
|
|
|
|
|
/**
|
|
|
|
* pcim_iomap_regions - Request and iomap PCI BARs
|
|
|
|
* @pdev: PCI device to map IO resources for
|
|
|
|
* @mask: Mask of BARs to request and iomap
|
|
|
|
* @name: Name used when requesting regions
|
|
|
|
*
|
|
|
|
* Request and iomap regions specified by @mask.
|
|
|
|
*/
|
|
|
|
int pcim_iomap_regions(struct pci_dev *pdev, u16 mask, const char *name)
|
|
|
|
{
|
|
|
|
void __iomem * const *iomap;
|
|
|
|
int i, rc;
|
|
|
|
|
|
|
|
iomap = pcim_iomap_table(pdev);
|
|
|
|
if (!iomap)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
|
|
|
|
unsigned long len;
|
|
|
|
|
|
|
|
if (!(mask & (1 << i)))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
rc = -EINVAL;
|
|
|
|
len = pci_resource_len(pdev, i);
|
|
|
|
if (!len)
|
|
|
|
goto err_inval;
|
|
|
|
|
|
|
|
rc = pci_request_region(pdev, i, name);
|
|
|
|
if (rc)
|
|
|
|
goto err_region;
|
|
|
|
|
|
|
|
rc = -ENOMEM;
|
|
|
|
if (!pcim_iomap(pdev, i, 0))
|
|
|
|
goto err_iomap;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_iomap:
|
|
|
|
pcim_iounmap(pdev, iomap[i]);
|
|
|
|
err_region:
|
|
|
|
pci_release_region(pdev, i);
|
|
|
|
err_inval:
|
|
|
|
while (--i >= 0) {
|
|
|
|
pcim_iounmap(pdev, iomap[i]);
|
|
|
|
pci_release_region(pdev, i);
|
|
|
|
}
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(pcim_iomap_regions);
|