DMA mapping updates for Linux 4.21
A huge update this time, but a lot of that is just consolidating or
removing code:
- provide a common DMA_MAPPING_ERROR definition and avoid indirect
calls for dma_map_* error checking
- use direct calls for the DMA direct mapping case, avoiding huge
retpoline overhead for high performance workloads
- merge the swiotlb dma_map_ops into dma-direct
- provide a generic remapping DMA consistent allocator for architectures
that have devices that perform DMA that is not cache coherent. Based
on the existing arm64 implementation and also used for csky now.
- improve the dma-debug infrastructure, including dynamic allocation
of entries (Robin Murphy)
- default to providing chaining scatterlist everywhere, with opt-outs
for the few architectures (alpha, parisc, most arm32 variants) that
can't cope with it
- misc sparc32 dma-related cleanups
- remove the dma_mark_clean arch hook used by swiotlb on ia64 and
replace it with the generic noncoherent infrastructure
- fix the return type of dma_set_max_seg_size (Niklas Söderlund)
- move the dummy dma ops for not DMA capable devices from arm64 to
common code (Robin Murphy)
- ensure dma_alloc_coherent returns zeroed memory to avoid kernel data
leaks through userspace. We already did this for most common
architectures, but this ensures we do it everywhere.
dma_zalloc_coherent has been deprecated and can hopefully be
removed after -rc1 with a coccinelle script.
-----BEGIN PGP SIGNATURE-----
iQI/BAABCgApFiEEgdbnc3r/njty3Iq9D55TZVIEUYMFAlwctQgLHGhjaEBsc3Qu
ZGUACgkQD55TZVIEUYMxgQ//dBpAfS4/J76CdAbYry2zqgcOUU9hIrD6NHiEMWov
ltJxyvEl3LsUmIdEj3aCrYL9jZN0qsnCzn5BVj2c3jDIVgD64fAr7HDf/PbEEfKb
j6/GgEnVLPZV+sQMvhNA5jOzHrkseaqPa4/pNLFZ/l8jnuZ2d+btusDWJpMoVDer
TXVwtIfgeIu0gTygYOShLYXd5qptWKWsZEpbTZOO2sE6+x+ZJX7yQYUxYDTlcOIj
JWVO2l5QNHPc5T9o2at+6L5aNUvnZOxT79sWgyZLn0Kc+FagKAVwfLqUEl0v7foG
8k/xca5/8p3afB1DfrIrtplJqis7cVgdyGxriwuuoO8X4F0nPyWwpGmxsBhrWwwl
xTqC4UorEJ7QwoP6Azopk/vYI2QXIUBLjuCJCuFXZj9+2BGf4IfvBY1S2cLM9qLs
HMcxQonuXJii044KEFS96ePEuiT+igVINweIFBKWcgNCEG0UQtyL6RQ1U5297ipF
JiWZAqD+p9X52UdKS+oKfAiZEekMXn6Xyo97+YCiNpfOo0GP5eEcwhL+JpY4AiRq
apPXtsRy2o1s8yfjdraUIM2Mc2n62vFKb35oUbGCd/QO9piPrFQHl6T0HHcHk4YR
XrUXcHieFZBCYqh7ZVa4RL8Msq1wvGuTL4Dxl43mXdsMoUFRR6eSNWLoAV4IpOLZ
WgA=
=in72
-----END PGP SIGNATURE-----
Merge tag 'dma-mapping-4.21' of git://git.infradead.org/users/hch/dma-mapping
Pull DMA mapping updates from Christoph Hellwig:
"A huge update this time, but a lot of that is just consolidating or
removing code:
- provide a common DMA_MAPPING_ERROR definition and avoid indirect
calls for dma_map_* error checking
- use direct calls for the DMA direct mapping case, avoiding huge
retpoline overhead for high performance workloads
- merge the swiotlb dma_map_ops into dma-direct
- provide a generic remapping DMA consistent allocator for
architectures that have devices that perform DMA that is not cache
coherent. Based on the existing arm64 implementation and also used
for csky now.
- improve the dma-debug infrastructure, including dynamic allocation
of entries (Robin Murphy)
- default to providing chaining scatterlist everywhere, with opt-outs
for the few architectures (alpha, parisc, most arm32 variants) that
can't cope with it
- misc sparc32 dma-related cleanups
- remove the dma_mark_clean arch hook used by swiotlb on ia64 and
replace it with the generic noncoherent infrastructure
- fix the return type of dma_set_max_seg_size (Niklas Söderlund)
- move the dummy dma ops for not DMA capable devices from arm64 to
common code (Robin Murphy)
- ensure dma_alloc_coherent returns zeroed memory to avoid kernel
data leaks through userspace. We already did this for most common
architectures, but this ensures we do it everywhere.
dma_zalloc_coherent has been deprecated and can hopefully be
removed after -rc1 with a coccinelle script"
* tag 'dma-mapping-4.21' of git://git.infradead.org/users/hch/dma-mapping: (73 commits)
dma-mapping: fix inverted logic in dma_supported
dma-mapping: deprecate dma_zalloc_coherent
dma-mapping: zero memory returned from dma_alloc_*
sparc/iommu: fix ->map_sg return value
sparc/io-unit: fix ->map_sg return value
arm64: default to the direct mapping in get_arch_dma_ops
PCI: Remove unused attr variable in pci_dma_configure
ia64: only select ARCH_HAS_DMA_COHERENT_TO_PFN if swiotlb is enabled
dma-mapping: bypass indirect calls for dma-direct
vmd: use the proper dma_* APIs instead of direct methods calls
dma-direct: merge swiotlb_dma_ops into the dma_direct code
dma-direct: use dma_direct_map_page to implement dma_direct_map_sg
dma-direct: improve addressability error reporting
swiotlb: remove dma_mark_clean
swiotlb: remove SWIOTLB_MAP_ERROR
ACPI / scan: Refactor _CCA enforcement
dma-mapping: factor out dummy DMA ops
dma-mapping: always build the direct mapping code
dma-mapping: move dma_cache_sync out of line
dma-mapping: move various slow path functions out of line
...
This commit is contained in:
commit
af7ddd8a62
112 changed files with 1444 additions and 2376 deletions
|
|
@ -30,8 +30,6 @@ struct bus_type;
|
|||
|
||||
extern void dma_debug_add_bus(struct bus_type *bus);
|
||||
|
||||
extern int dma_debug_resize_entries(u32 num_entries);
|
||||
|
||||
extern void debug_dma_map_single(struct device *dev, const void *addr,
|
||||
unsigned long len);
|
||||
|
||||
|
|
@ -72,17 +70,6 @@ extern void debug_dma_sync_single_for_device(struct device *dev,
|
|||
dma_addr_t dma_handle,
|
||||
size_t size, int direction);
|
||||
|
||||
extern void debug_dma_sync_single_range_for_cpu(struct device *dev,
|
||||
dma_addr_t dma_handle,
|
||||
unsigned long offset,
|
||||
size_t size,
|
||||
int direction);
|
||||
|
||||
extern void debug_dma_sync_single_range_for_device(struct device *dev,
|
||||
dma_addr_t dma_handle,
|
||||
unsigned long offset,
|
||||
size_t size, int direction);
|
||||
|
||||
extern void debug_dma_sync_sg_for_cpu(struct device *dev,
|
||||
struct scatterlist *sg,
|
||||
int nelems, int direction);
|
||||
|
|
@ -101,11 +88,6 @@ static inline void dma_debug_add_bus(struct bus_type *bus)
|
|||
{
|
||||
}
|
||||
|
||||
static inline int dma_debug_resize_entries(u32 num_entries)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void debug_dma_map_single(struct device *dev, const void *addr,
|
||||
unsigned long len)
|
||||
{
|
||||
|
|
@ -174,22 +156,6 @@ static inline void debug_dma_sync_single_for_device(struct device *dev,
|
|||
{
|
||||
}
|
||||
|
||||
static inline void debug_dma_sync_single_range_for_cpu(struct device *dev,
|
||||
dma_addr_t dma_handle,
|
||||
unsigned long offset,
|
||||
size_t size,
|
||||
int direction)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void debug_dma_sync_single_range_for_device(struct device *dev,
|
||||
dma_addr_t dma_handle,
|
||||
unsigned long offset,
|
||||
size_t size,
|
||||
int direction)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void debug_dma_sync_sg_for_cpu(struct device *dev,
|
||||
struct scatterlist *sg,
|
||||
int nelems, int direction)
|
||||
|
|
|
|||
|
|
@ -5,8 +5,6 @@
|
|||
#include <linux/dma-mapping.h>
|
||||
#include <linux/mem_encrypt.h>
|
||||
|
||||
#define DIRECT_MAPPING_ERROR (~(dma_addr_t)0)
|
||||
|
||||
#ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA
|
||||
#include <asm/dma-direct.h>
|
||||
#else
|
||||
|
|
@ -50,14 +48,6 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
|
|||
return __sme_clr(__dma_to_phys(dev, daddr));
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ARCH_HAS_DMA_MARK_CLEAN
|
||||
void dma_mark_clean(void *addr, size_t size);
|
||||
#else
|
||||
static inline void dma_mark_clean(void *addr, size_t size)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_ARCH_HAS_DMA_MARK_CLEAN */
|
||||
|
||||
u64 dma_direct_get_required_mask(struct device *dev);
|
||||
void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
||||
gfp_t gfp, unsigned long attrs);
|
||||
|
|
@ -67,11 +57,8 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
|
|||
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs);
|
||||
void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
|
||||
dma_addr_t dma_addr, unsigned long attrs);
|
||||
dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size, enum dma_data_direction dir,
|
||||
unsigned long attrs);
|
||||
int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
|
||||
enum dma_data_direction dir, unsigned long attrs);
|
||||
struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs);
|
||||
void __dma_direct_free_pages(struct device *dev, size_t size, struct page *page);
|
||||
int dma_direct_supported(struct device *dev, u64 mask);
|
||||
int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr);
|
||||
#endif /* _LINUX_DMA_DIRECT_H */
|
||||
|
|
|
|||
|
|
@ -69,7 +69,6 @@ dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
|
|||
size_t size, enum dma_data_direction dir, unsigned long attrs);
|
||||
void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs);
|
||||
int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
|
||||
|
||||
/* The DMA API isn't _quite_ the whole story, though... */
|
||||
void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg);
|
||||
|
|
|
|||
|
|
@ -128,13 +128,14 @@ struct dma_map_ops {
|
|||
enum dma_data_direction dir);
|
||||
void (*cache_sync)(struct device *dev, void *vaddr, size_t size,
|
||||
enum dma_data_direction direction);
|
||||
int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
|
||||
int (*dma_supported)(struct device *dev, u64 mask);
|
||||
u64 (*get_required_mask)(struct device *dev);
|
||||
};
|
||||
|
||||
extern const struct dma_map_ops dma_direct_ops;
|
||||
#define DMA_MAPPING_ERROR (~(dma_addr_t)0)
|
||||
|
||||
extern const struct dma_map_ops dma_virt_ops;
|
||||
extern const struct dma_map_ops dma_dummy_ops;
|
||||
|
||||
#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
|
||||
|
||||
|
|
@ -220,6 +221,69 @@ static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
|
|||
}
|
||||
#endif
|
||||
|
||||
static inline bool dma_is_direct(const struct dma_map_ops *ops)
|
||||
{
|
||||
return likely(!ops);
|
||||
}
|
||||
|
||||
/*
|
||||
* All the dma_direct_* declarations are here just for the indirect call bypass,
|
||||
* and must not be used directly drivers!
|
||||
*/
|
||||
dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size, enum dma_data_direction dir,
|
||||
unsigned long attrs);
|
||||
int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
|
||||
enum dma_data_direction dir, unsigned long attrs);
|
||||
|
||||
#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
|
||||
defined(CONFIG_SWIOTLB)
|
||||
void dma_direct_sync_single_for_device(struct device *dev,
|
||||
dma_addr_t addr, size_t size, enum dma_data_direction dir);
|
||||
void dma_direct_sync_sg_for_device(struct device *dev,
|
||||
struct scatterlist *sgl, int nents, enum dma_data_direction dir);
|
||||
#else
|
||||
static inline void dma_direct_sync_single_for_device(struct device *dev,
|
||||
dma_addr_t addr, size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
}
|
||||
static inline void dma_direct_sync_sg_for_device(struct device *dev,
|
||||
struct scatterlist *sgl, int nents, enum dma_data_direction dir)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
|
||||
defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
|
||||
defined(CONFIG_SWIOTLB)
|
||||
void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs);
|
||||
void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
|
||||
int nents, enum dma_data_direction dir, unsigned long attrs);
|
||||
void dma_direct_sync_single_for_cpu(struct device *dev,
|
||||
dma_addr_t addr, size_t size, enum dma_data_direction dir);
|
||||
void dma_direct_sync_sg_for_cpu(struct device *dev,
|
||||
struct scatterlist *sgl, int nents, enum dma_data_direction dir);
|
||||
#else
|
||||
static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
}
|
||||
static inline void dma_direct_unmap_sg(struct device *dev,
|
||||
struct scatterlist *sgl, int nents, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
}
|
||||
static inline void dma_direct_sync_single_for_cpu(struct device *dev,
|
||||
dma_addr_t addr, size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
}
|
||||
static inline void dma_direct_sync_sg_for_cpu(struct device *dev,
|
||||
struct scatterlist *sgl, int nents, enum dma_data_direction dir)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
|
||||
size_t size,
|
||||
enum dma_data_direction dir,
|
||||
|
|
@ -230,9 +294,12 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
|
|||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
debug_dma_map_single(dev, ptr, size);
|
||||
addr = ops->map_page(dev, virt_to_page(ptr),
|
||||
offset_in_page(ptr), size,
|
||||
dir, attrs);
|
||||
if (dma_is_direct(ops))
|
||||
addr = dma_direct_map_page(dev, virt_to_page(ptr),
|
||||
offset_in_page(ptr), size, dir, attrs);
|
||||
else
|
||||
addr = ops->map_page(dev, virt_to_page(ptr),
|
||||
offset_in_page(ptr), size, dir, attrs);
|
||||
debug_dma_map_page(dev, virt_to_page(ptr),
|
||||
offset_in_page(ptr), size,
|
||||
dir, addr, true);
|
||||
|
|
@ -247,11 +314,19 @@ static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
|
|||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
if (ops->unmap_page)
|
||||
if (dma_is_direct(ops))
|
||||
dma_direct_unmap_page(dev, addr, size, dir, attrs);
|
||||
else if (ops->unmap_page)
|
||||
ops->unmap_page(dev, addr, size, dir, attrs);
|
||||
debug_dma_unmap_page(dev, addr, size, dir, true);
|
||||
}
|
||||
|
||||
static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
return dma_unmap_single_attrs(dev, addr, size, dir, attrs);
|
||||
}
|
||||
|
||||
/*
|
||||
* dma_maps_sg_attrs returns 0 on error and > 0 on success.
|
||||
* It should never return a value < 0.
|
||||
|
|
@ -264,7 +339,10 @@ static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
|
|||
int ents;
|
||||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
ents = ops->map_sg(dev, sg, nents, dir, attrs);
|
||||
if (dma_is_direct(ops))
|
||||
ents = dma_direct_map_sg(dev, sg, nents, dir, attrs);
|
||||
else
|
||||
ents = ops->map_sg(dev, sg, nents, dir, attrs);
|
||||
BUG_ON(ents < 0);
|
||||
debug_dma_map_sg(dev, sg, nents, ents, dir);
|
||||
|
||||
|
|
@ -279,7 +357,9 @@ static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg
|
|||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
debug_dma_unmap_sg(dev, sg, nents, dir);
|
||||
if (ops->unmap_sg)
|
||||
if (dma_is_direct(ops))
|
||||
dma_direct_unmap_sg(dev, sg, nents, dir, attrs);
|
||||
else if (ops->unmap_sg)
|
||||
ops->unmap_sg(dev, sg, nents, dir, attrs);
|
||||
}
|
||||
|
||||
|
|
@ -293,25 +373,15 @@ static inline dma_addr_t dma_map_page_attrs(struct device *dev,
|
|||
dma_addr_t addr;
|
||||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
addr = ops->map_page(dev, page, offset, size, dir, attrs);
|
||||
if (dma_is_direct(ops))
|
||||
addr = dma_direct_map_page(dev, page, offset, size, dir, attrs);
|
||||
else
|
||||
addr = ops->map_page(dev, page, offset, size, dir, attrs);
|
||||
debug_dma_map_page(dev, page, offset, size, dir, addr, false);
|
||||
|
||||
return addr;
|
||||
}
|
||||
|
||||
static inline void dma_unmap_page_attrs(struct device *dev,
|
||||
dma_addr_t addr, size_t size,
|
||||
enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
if (ops->unmap_page)
|
||||
ops->unmap_page(dev, addr, size, dir, attrs);
|
||||
debug_dma_unmap_page(dev, addr, size, dir, false);
|
||||
}
|
||||
|
||||
static inline dma_addr_t dma_map_resource(struct device *dev,
|
||||
phys_addr_t phys_addr,
|
||||
size_t size,
|
||||
|
|
@ -327,7 +397,7 @@ static inline dma_addr_t dma_map_resource(struct device *dev,
|
|||
BUG_ON(pfn_valid(PHYS_PFN(phys_addr)));
|
||||
|
||||
addr = phys_addr;
|
||||
if (ops->map_resource)
|
||||
if (ops && ops->map_resource)
|
||||
addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
|
||||
|
||||
debug_dma_map_resource(dev, phys_addr, size, dir, addr);
|
||||
|
|
@ -342,7 +412,7 @@ static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
|
|||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
if (ops->unmap_resource)
|
||||
if (ops && ops->unmap_resource)
|
||||
ops->unmap_resource(dev, addr, size, dir, attrs);
|
||||
debug_dma_unmap_resource(dev, addr, size, dir);
|
||||
}
|
||||
|
|
@ -354,11 +424,20 @@ static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
|
|||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
if (ops->sync_single_for_cpu)
|
||||
if (dma_is_direct(ops))
|
||||
dma_direct_sync_single_for_cpu(dev, addr, size, dir);
|
||||
else if (ops->sync_single_for_cpu)
|
||||
ops->sync_single_for_cpu(dev, addr, size, dir);
|
||||
debug_dma_sync_single_for_cpu(dev, addr, size, dir);
|
||||
}
|
||||
|
||||
static inline void dma_sync_single_range_for_cpu(struct device *dev,
|
||||
dma_addr_t addr, unsigned long offset, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
return dma_sync_single_for_cpu(dev, addr + offset, size, dir);
|
||||
}
|
||||
|
||||
static inline void dma_sync_single_for_device(struct device *dev,
|
||||
dma_addr_t addr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
|
|
@ -366,37 +445,18 @@ static inline void dma_sync_single_for_device(struct device *dev,
|
|||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
if (ops->sync_single_for_device)
|
||||
if (dma_is_direct(ops))
|
||||
dma_direct_sync_single_for_device(dev, addr, size, dir);
|
||||
else if (ops->sync_single_for_device)
|
||||
ops->sync_single_for_device(dev, addr, size, dir);
|
||||
debug_dma_sync_single_for_device(dev, addr, size, dir);
|
||||
}
|
||||
|
||||
static inline void dma_sync_single_range_for_cpu(struct device *dev,
|
||||
dma_addr_t addr,
|
||||
unsigned long offset,
|
||||
size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
if (ops->sync_single_for_cpu)
|
||||
ops->sync_single_for_cpu(dev, addr + offset, size, dir);
|
||||
debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
|
||||
}
|
||||
|
||||
static inline void dma_sync_single_range_for_device(struct device *dev,
|
||||
dma_addr_t addr,
|
||||
unsigned long offset,
|
||||
size_t size,
|
||||
enum dma_data_direction dir)
|
||||
dma_addr_t addr, unsigned long offset, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
if (ops->sync_single_for_device)
|
||||
ops->sync_single_for_device(dev, addr + offset, size, dir);
|
||||
debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
|
||||
return dma_sync_single_for_device(dev, addr + offset, size, dir);
|
||||
}
|
||||
|
||||
static inline void
|
||||
|
|
@ -406,7 +466,9 @@ dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
|
|||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
if (ops->sync_sg_for_cpu)
|
||||
if (dma_is_direct(ops))
|
||||
dma_direct_sync_sg_for_cpu(dev, sg, nelems, dir);
|
||||
else if (ops->sync_sg_for_cpu)
|
||||
ops->sync_sg_for_cpu(dev, sg, nelems, dir);
|
||||
debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
|
||||
}
|
||||
|
|
@ -418,7 +480,9 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
|
|||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
if (ops->sync_sg_for_device)
|
||||
if (dma_is_direct(ops))
|
||||
dma_direct_sync_sg_for_device(dev, sg, nelems, dir);
|
||||
else if (ops->sync_sg_for_device)
|
||||
ops->sync_sg_for_device(dev, sg, nelems, dir);
|
||||
debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
|
||||
|
||||
|
|
@ -431,16 +495,8 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
|
|||
#define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
|
||||
#define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
|
||||
|
||||
static inline void
|
||||
dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
if (ops->cache_sync)
|
||||
ops->cache_sync(dev, vaddr, size, dir);
|
||||
}
|
||||
void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
||||
enum dma_data_direction dir);
|
||||
|
||||
extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
|
||||
void *cpu_addr, dma_addr_t dma_addr, size_t size,
|
||||
|
|
@ -455,107 +511,29 @@ void *dma_common_pages_remap(struct page **pages, size_t size,
|
|||
const void *caller);
|
||||
void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags);
|
||||
|
||||
/**
|
||||
* dma_mmap_attrs - map a coherent DMA allocation into user space
|
||||
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
|
||||
* @vma: vm_area_struct describing requested user mapping
|
||||
* @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
|
||||
* @handle: device-view address returned from dma_alloc_attrs
|
||||
* @size: size of memory originally requested in dma_alloc_attrs
|
||||
* @attrs: attributes of mapping properties requested in dma_alloc_attrs
|
||||
*
|
||||
* Map a coherent DMA buffer previously allocated by dma_alloc_attrs
|
||||
* into user space. The coherent DMA buffer must not be freed by the
|
||||
* driver until the user space mapping has been released.
|
||||
*/
|
||||
static inline int
|
||||
dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
|
||||
dma_addr_t dma_addr, size_t size, unsigned long attrs)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
BUG_ON(!ops);
|
||||
if (ops->mmap)
|
||||
return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
|
||||
return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
|
||||
}
|
||||
int __init dma_atomic_pool_init(gfp_t gfp, pgprot_t prot);
|
||||
bool dma_in_atomic_pool(void *start, size_t size);
|
||||
void *dma_alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags);
|
||||
bool dma_free_from_pool(void *start, size_t size);
|
||||
|
||||
int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
|
||||
void *cpu_addr, dma_addr_t dma_addr, size_t size,
|
||||
unsigned long attrs);
|
||||
#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
|
||||
|
||||
int
|
||||
dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr,
|
||||
dma_addr_t dma_addr, size_t size, unsigned long attrs);
|
||||
|
||||
static inline int
|
||||
dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
|
||||
dma_addr_t dma_addr, size_t size,
|
||||
unsigned long attrs)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
BUG_ON(!ops);
|
||||
if (ops->get_sgtable)
|
||||
return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
|
||||
attrs);
|
||||
return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
|
||||
attrs);
|
||||
}
|
||||
|
||||
int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
|
||||
void *cpu_addr, dma_addr_t dma_addr, size_t size,
|
||||
unsigned long attrs);
|
||||
#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
|
||||
|
||||
#ifndef arch_dma_alloc_attrs
|
||||
#define arch_dma_alloc_attrs(dev) (true)
|
||||
#endif
|
||||
|
||||
static inline void *dma_alloc_attrs(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flag,
|
||||
unsigned long attrs)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
void *cpu_addr;
|
||||
|
||||
BUG_ON(!ops);
|
||||
WARN_ON_ONCE(dev && !dev->coherent_dma_mask);
|
||||
|
||||
if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr))
|
||||
return cpu_addr;
|
||||
|
||||
/* let the implementation decide on the zone to allocate from: */
|
||||
flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
|
||||
|
||||
if (!arch_dma_alloc_attrs(&dev))
|
||||
return NULL;
|
||||
if (!ops->alloc)
|
||||
return NULL;
|
||||
|
||||
cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
|
||||
debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
|
||||
return cpu_addr;
|
||||
}
|
||||
|
||||
static inline void dma_free_attrs(struct device *dev, size_t size,
|
||||
void *cpu_addr, dma_addr_t dma_handle,
|
||||
unsigned long attrs)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!ops);
|
||||
|
||||
if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr))
|
||||
return;
|
||||
/*
|
||||
* On non-coherent platforms which implement DMA-coherent buffers via
|
||||
* non-cacheable remaps, ops->free() may call vunmap(). Thus getting
|
||||
* this far in IRQ context is a) at risk of a BUG_ON() or trying to
|
||||
* sleep on some machines, and b) an indication that the driver is
|
||||
* probably misusing the coherent API anyway.
|
||||
*/
|
||||
WARN_ON(irqs_disabled());
|
||||
|
||||
if (!ops->free || !cpu_addr)
|
||||
return;
|
||||
|
||||
debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
|
||||
ops->free(dev, size, cpu_addr, dma_handle, attrs);
|
||||
}
|
||||
void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
||||
gfp_t flag, unsigned long attrs);
|
||||
void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
|
||||
dma_addr_t dma_handle, unsigned long attrs);
|
||||
|
||||
static inline void *dma_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t gfp)
|
||||
|
|
@ -573,43 +551,16 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
|
|||
|
||||
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
debug_dma_mapping_error(dev, dma_addr);
|
||||
if (ops->mapping_error)
|
||||
return ops->mapping_error(dev, dma_addr);
|
||||
|
||||
if (dma_addr == DMA_MAPPING_ERROR)
|
||||
return -ENOMEM;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void dma_check_mask(struct device *dev, u64 mask)
|
||||
{
|
||||
if (sme_active() && (mask < (((u64)sme_get_me_mask() << 1) - 1)))
|
||||
dev_warn(dev, "SME is active, device will require DMA bounce buffers\n");
|
||||
}
|
||||
|
||||
static inline int dma_supported(struct device *dev, u64 mask)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
if (!ops)
|
||||
return 0;
|
||||
if (!ops->dma_supported)
|
||||
return 1;
|
||||
return ops->dma_supported(dev, mask);
|
||||
}
|
||||
|
||||
#ifndef HAVE_ARCH_DMA_SET_MASK
|
||||
static inline int dma_set_mask(struct device *dev, u64 mask)
|
||||
{
|
||||
if (!dev->dma_mask || !dma_supported(dev, mask))
|
||||
return -EIO;
|
||||
|
||||
dma_check_mask(dev, mask);
|
||||
|
||||
*dev->dma_mask = mask;
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
int dma_supported(struct device *dev, u64 mask);
|
||||
int dma_set_mask(struct device *dev, u64 mask);
|
||||
int dma_set_coherent_mask(struct device *dev, u64 mask);
|
||||
|
||||
static inline u64 dma_get_mask(struct device *dev)
|
||||
{
|
||||
|
|
@ -618,21 +569,6 @@ static inline u64 dma_get_mask(struct device *dev)
|
|||
return DMA_BIT_MASK(32);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
|
||||
int dma_set_coherent_mask(struct device *dev, u64 mask);
|
||||
#else
|
||||
static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
|
||||
{
|
||||
if (!dma_supported(dev, mask))
|
||||
return -EIO;
|
||||
|
||||
dma_check_mask(dev, mask);
|
||||
|
||||
dev->coherent_dma_mask = mask;
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Set both the DMA mask and the coherent DMA mask to the same thing.
|
||||
* Note that we don't check the return value from dma_set_coherent_mask()
|
||||
|
|
@ -676,8 +612,7 @@ static inline unsigned int dma_get_max_seg_size(struct device *dev)
|
|||
return SZ_64K;
|
||||
}
|
||||
|
||||
static inline unsigned int dma_set_max_seg_size(struct device *dev,
|
||||
unsigned int size)
|
||||
static inline int dma_set_max_seg_size(struct device *dev, unsigned int size)
|
||||
{
|
||||
if (dev->dma_parms) {
|
||||
dev->dma_parms->max_segment_size = size;
|
||||
|
|
@ -709,12 +644,13 @@ static inline unsigned long dma_max_pfn(struct device *dev)
|
|||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Please always use dma_alloc_coherent instead as it already zeroes the memory!
|
||||
*/
|
||||
static inline void *dma_zalloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flag)
|
||||
{
|
||||
void *ret = dma_alloc_coherent(dev, size, dma_handle,
|
||||
flag | __GFP_ZERO);
|
||||
return ret;
|
||||
return dma_alloc_coherent(dev, size, dma_handle, flag);
|
||||
}
|
||||
|
||||
static inline int dma_get_cache_alignment(void)
|
||||
|
|
|
|||
|
|
@ -38,7 +38,10 @@ pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
|
|||
void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
||||
enum dma_data_direction direction);
|
||||
#else
|
||||
#define arch_dma_cache_sync NULL
|
||||
static inline void arch_dma_cache_sync(struct device *dev, void *vaddr,
|
||||
size_t size, enum dma_data_direction direction)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_DMA_NONCOHERENT_CACHE_SYNC */
|
||||
|
||||
#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE
|
||||
|
|
@ -69,4 +72,6 @@ static inline void arch_sync_dma_for_cpu_all(struct device *dev)
|
|||
}
|
||||
#endif /* CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL */
|
||||
|
||||
void arch_dma_prep_coherent(struct page *page, size_t size);
|
||||
|
||||
#endif /* _LINUX_DMA_NONCOHERENT_H */
|
||||
|
|
|
|||
|
|
@ -324,10 +324,10 @@ size_t sg_zero_buffer(struct scatterlist *sgl, unsigned int nents,
|
|||
* Like SG_CHUNK_SIZE, but for archs that have sg chaining. This limit
|
||||
* is totally arbitrary, a setting of 2048 will get you at least 8mb ios.
|
||||
*/
|
||||
#ifdef CONFIG_ARCH_HAS_SG_CHAIN
|
||||
#define SG_MAX_SEGMENTS 2048
|
||||
#else
|
||||
#ifdef CONFIG_ARCH_NO_SG_CHAIN
|
||||
#define SG_MAX_SEGMENTS SG_CHUNK_SIZE
|
||||
#else
|
||||
#define SG_MAX_SEGMENTS 2048
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SG_POOL
|
||||
|
|
|
|||
|
|
@ -16,8 +16,6 @@ enum swiotlb_force {
|
|||
SWIOTLB_NO_FORCE, /* swiotlb=noforce */
|
||||
};
|
||||
|
||||
extern enum swiotlb_force swiotlb_force;
|
||||
|
||||
/*
|
||||
* Maximum allowable number of contiguous slabs to map,
|
||||
* must be a power of 2. What is the appropriate value ?
|
||||
|
|
@ -46,9 +44,6 @@ enum dma_sync_target {
|
|||
SYNC_FOR_DEVICE = 1,
|
||||
};
|
||||
|
||||
/* define the last possible byte of physical address space as a mapping error */
|
||||
#define SWIOTLB_MAP_ERROR (~(phys_addr_t)0x0)
|
||||
|
||||
extern phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
|
||||
dma_addr_t tbl_dma_addr,
|
||||
phys_addr_t phys, size_t size,
|
||||
|
|
@ -65,56 +60,44 @@ extern void swiotlb_tbl_sync_single(struct device *hwdev,
|
|||
size_t size, enum dma_data_direction dir,
|
||||
enum dma_sync_target target);
|
||||
|
||||
/* Accessory functions. */
|
||||
|
||||
extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size,
|
||||
enum dma_data_direction dir,
|
||||
unsigned long attrs);
|
||||
extern void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
|
||||
size_t size, enum dma_data_direction dir,
|
||||
unsigned long attrs);
|
||||
|
||||
extern int
|
||||
swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
|
||||
enum dma_data_direction dir,
|
||||
unsigned long attrs);
|
||||
|
||||
extern void
|
||||
swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
|
||||
int nelems, enum dma_data_direction dir,
|
||||
unsigned long attrs);
|
||||
|
||||
extern void
|
||||
swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
|
||||
size_t size, enum dma_data_direction dir);
|
||||
|
||||
extern void
|
||||
swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
|
||||
int nelems, enum dma_data_direction dir);
|
||||
|
||||
extern void
|
||||
swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
|
||||
size_t size, enum dma_data_direction dir);
|
||||
|
||||
extern void
|
||||
swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
|
||||
int nelems, enum dma_data_direction dir);
|
||||
|
||||
extern int
|
||||
swiotlb_dma_supported(struct device *hwdev, u64 mask);
|
||||
|
||||
#ifdef CONFIG_SWIOTLB
|
||||
extern void __init swiotlb_exit(void);
|
||||
extern enum swiotlb_force swiotlb_force;
|
||||
extern phys_addr_t io_tlb_start, io_tlb_end;
|
||||
|
||||
static inline bool is_swiotlb_buffer(phys_addr_t paddr)
|
||||
{
|
||||
return paddr >= io_tlb_start && paddr < io_tlb_end;
|
||||
}
|
||||
|
||||
bool swiotlb_map(struct device *dev, phys_addr_t *phys, dma_addr_t *dma_addr,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs);
|
||||
void __init swiotlb_exit(void);
|
||||
unsigned int swiotlb_max_segment(void);
|
||||
#else
|
||||
static inline void swiotlb_exit(void) { }
|
||||
static inline unsigned int swiotlb_max_segment(void) { return 0; }
|
||||
#endif
|
||||
#define swiotlb_force SWIOTLB_NO_FORCE
|
||||
static inline bool is_swiotlb_buffer(phys_addr_t paddr)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
static inline bool swiotlb_map(struct device *dev, phys_addr_t *phys,
|
||||
dma_addr_t *dma_addr, size_t size, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
static inline void swiotlb_exit(void)
|
||||
{
|
||||
}
|
||||
static inline unsigned int swiotlb_max_segment(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_SWIOTLB */
|
||||
|
||||
extern void swiotlb_print_info(void);
|
||||
extern void swiotlb_set_max_segment(unsigned int);
|
||||
|
||||
extern const struct dma_map_ops swiotlb_dma_ops;
|
||||
|
||||
#endif /* __LINUX_SWIOTLB_H */
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue