IOMMU Updates for Linux v5.19
Including:
- Intel VT-d driver updates
- Domain force snooping improvement.
- Cleanups, no intentional functional changes.
- ARM SMMU driver updates
- Add new Qualcomm device-tree compatible strings
- Add new Nvidia device-tree compatible string for Tegra234
- Fix UAF in SMMUv3 shared virtual addressing code
- Force identity-mapped domains for users of ye olde SMMU
legacy binding
- Minor cleanups
- Patches to fix a BUG_ON in the vfio_iommu_group_notifier
- Groundwork for upcoming iommufd framework
- Introduction of DMA ownership so that an entire IOMMU group
is either controlled by the kernel or by user-space
- MT8195 and MT8186 support in the Mediatek IOMMU driver
- Patches to make forcing of cache-coherent DMA more coherent
between IOMMU drivers
- Fixes for thunderbolt device DMA protection
- Various smaller fixes and cleanups
-----BEGIN PGP SIGNATURE-----
iQIzBAABCAAdFiEEr9jSbILcajRFYWYyK/BELZcBGuMFAmKWCbUACgkQK/BELZcB
GuPHmRAAuoH9iK/jrC3SgrqpBfH2iRN7ovIX8dFvgbQWX27lhXF4gvj2/nYdIvPK
75j/LmdibuzV3Iez4kjbGKNG1AikwK3dKIH21a84f3ctnoamQyL6nMfCVBFaVD/D
kvPpTHyjbGPNf6KZyWQdkJ5DXD1aoG1DKkBnslH5pTNPqGuNqbcnRTg0YxiJFLBv
5w2B6jL06XRzunh+Sp1Dbj+po8ROjLRCEU+tdrndO8W/Dyp6+ZNNuxL9/3BM9zMj
py0M4piFtGnhmJSdym1eeHm7r1YRjkZw+MN+e8NcrcSihmDutEWo7nRRxA5uVaa+
3O2DNERqCvQUYxfNRUOKwzV8v51GYQHEPhvOe/MLgaEQDmDmlF2dHNGm93eCMdrv
m1cT011oU7pa4qHomwLyTJxSsR7FzJ37igq/WeY++MBhl+frqfzEQPVxF+W7GLb8
QvT/+woCPzLVpJbE7s0FUD4nbPd8c1dAz4+HO1DajxILIOTq1bnPIorSjgXODRjq
yzsiP1rAg0L0PsL7pXn3cPMzNCE//xtOsRsAGmaVv6wBoMLyWVFCU/wjPEdjrSWA
nXpAuCL84uxCEl/KLYMsg9UhjT6ko7CuKdsybIG9zNIiUau43uSqgTen0xCpYt0i
m//O/X3tPyxmoLKRW+XVehGOrBZW+qrQny6hk/Zex+6UJQqVMTA=
=W0hj
-----END PGP SIGNATURE-----
Merge tag 'iommu-updates-v5.19' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu
Pull iommu updates from Joerg Roedel:
- Intel VT-d driver updates:
- Domain force snooping improvement.
- Cleanups, no intentional functional changes.
- ARM SMMU driver updates:
- Add new Qualcomm device-tree compatible strings
- Add new Nvidia device-tree compatible string for Tegra234
- Fix UAF in SMMUv3 shared virtual addressing code
- Force identity-mapped domains for users of ye olde SMMU legacy
binding
- Minor cleanups
- Fix a BUG_ON in the vfio_iommu_group_notifier:
- Groundwork for upcoming iommufd framework
- Introduction of DMA ownership so that an entire IOMMU group is
either controlled by the kernel or by user-space
- MT8195 and MT8186 support in the Mediatek IOMMU driver
- Make forcing of cache-coherent DMA more coherent between IOMMU
drivers
- Fixes for thunderbolt device DMA protection
- Various smaller fixes and cleanups
* tag 'iommu-updates-v5.19' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (88 commits)
iommu/amd: Increase timeout waiting for GA log enablement
iommu/s390: Tolerate repeat attach_dev calls
iommu/vt-d: Remove hard coding PGSNP bit in PASID entries
iommu/vt-d: Remove domain_update_iommu_snooping()
iommu/vt-d: Check domain force_snooping against attached devices
iommu/vt-d: Block force-snoop domain attaching if no SC support
iommu/vt-d: Size Page Request Queue to avoid overflow condition
iommu/vt-d: Fold dmar_insert_one_dev_info() into its caller
iommu/vt-d: Change return type of dmar_insert_one_dev_info()
iommu/vt-d: Remove unneeded validity check on dev
iommu/dma: Explicitly sort PCI DMA windows
iommu/dma: Fix iova map result check bug
iommu/mediatek: Fix NULL pointer dereference when printing dev_name
iommu: iommu_group_claim_dma_owner() must always assign a domain
iommu/arm-smmu: Force identity domains for legacy binding
iommu/arm-smmu: Support Tegra234 SMMU
dt-bindings: arm-smmu: Add compatible for Tegra234 SOC
dt-bindings: arm-smmu: Document nvidia,memory-controller property
iommu/arm-smmu-qcom: Add SC8280XP support
dt-bindings: arm-smmu: Add compatible for Qualcomm SC8280XP
...
This commit is contained in:
commit
e1cbc3b96a
53 changed files with 2316 additions and 1035 deletions
|
|
@ -79,6 +79,14 @@ struct amba_driver {
|
|||
void (*remove)(struct amba_device *);
|
||||
void (*shutdown)(struct amba_device *);
|
||||
const struct amba_id *id_table;
|
||||
/*
|
||||
* For most device drivers, no need to care about this flag as long as
|
||||
* all DMAs are handled through the kernel DMA API. For some special
|
||||
* ones, for example VFIO drivers, they know how to manage the DMA
|
||||
* themselves and set this flag so that the IOMMU layer will allow them
|
||||
* to setup and manage their own I/O address space.
|
||||
*/
|
||||
bool driver_managed_dma;
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -59,6 +59,8 @@ struct fwnode_handle;
|
|||
* bus supports.
|
||||
* @dma_configure: Called to setup DMA configuration on a device on
|
||||
* this bus.
|
||||
* @dma_cleanup: Called to cleanup DMA configuration on a device on
|
||||
* this bus.
|
||||
* @pm: Power management operations of this bus, callback the specific
|
||||
* device driver's pm-ops.
|
||||
* @iommu_ops: IOMMU specific operations for this bus, used to attach IOMMU
|
||||
|
|
@ -103,6 +105,7 @@ struct bus_type {
|
|||
int (*num_vf)(struct device *dev);
|
||||
|
||||
int (*dma_configure)(struct device *dev);
|
||||
void (*dma_cleanup)(struct device *dev);
|
||||
|
||||
const struct dev_pm_ops *pm;
|
||||
|
||||
|
|
|
|||
|
|
@ -32,6 +32,13 @@ struct fsl_mc_io;
|
|||
* @shutdown: Function called at shutdown time to quiesce the device
|
||||
* @suspend: Function called when a device is stopped
|
||||
* @resume: Function called when a device is resumed
|
||||
* @driver_managed_dma: Device driver doesn't use kernel DMA API for DMA.
|
||||
* For most device drivers, no need to care about this flag
|
||||
* as long as all DMAs are handled through the kernel DMA API.
|
||||
* For some special ones, for example VFIO drivers, they know
|
||||
* how to manage the DMA themselves and set this flag so that
|
||||
* the IOMMU layer will allow them to setup and manage their
|
||||
* own I/O address space.
|
||||
*
|
||||
* Generic DPAA device driver object for device drivers that are registered
|
||||
* with a DPRC bus. This structure is to be embedded in each device-specific
|
||||
|
|
@ -45,6 +52,7 @@ struct fsl_mc_driver {
|
|||
void (*shutdown)(struct fsl_mc_device *dev);
|
||||
int (*suspend)(struct fsl_mc_device *dev, pm_message_t state);
|
||||
int (*resume)(struct fsl_mc_device *dev);
|
||||
bool driver_managed_dma;
|
||||
};
|
||||
|
||||
#define to_fsl_mc_driver(_drv) \
|
||||
|
|
|
|||
|
|
@ -539,7 +539,8 @@ struct dmar_domain {
|
|||
|
||||
u8 has_iotlb_device: 1;
|
||||
u8 iommu_coherency: 1; /* indicate coherency of iommu access */
|
||||
u8 iommu_snooping: 1; /* indicate snooping control feature */
|
||||
u8 force_snooping : 1; /* Create IOPTEs with snoop control */
|
||||
u8 set_pte_snp:1;
|
||||
|
||||
struct list_head devices; /* all devices' list */
|
||||
struct iova_domain iovad; /* iova's that belong to this domain */
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@
|
|||
#define __INTEL_SVM_H__
|
||||
|
||||
/* Page Request Queue depth */
|
||||
#define PRQ_ORDER 2
|
||||
#define PRQ_ORDER 4
|
||||
#define PRQ_RING_MASK ((0x1000 << PRQ_ORDER) - 0x20)
|
||||
#define PRQ_DEPTH ((0x1000 << PRQ_ORDER) >> 5)
|
||||
|
||||
|
|
|
|||
|
|
@ -103,10 +103,11 @@ static inline bool iommu_is_dma_domain(struct iommu_domain *domain)
|
|||
}
|
||||
|
||||
enum iommu_cap {
|
||||
IOMMU_CAP_CACHE_COHERENCY, /* IOMMU can enforce cache coherent DMA
|
||||
transactions */
|
||||
IOMMU_CAP_CACHE_COHERENCY, /* IOMMU_CACHE is supported */
|
||||
IOMMU_CAP_INTR_REMAP, /* IOMMU supports interrupt isolation */
|
||||
IOMMU_CAP_NOEXEC, /* IOMMU_NOEXEC flag */
|
||||
IOMMU_CAP_PRE_BOOT_PROTECTION, /* Firmware says it used the IOMMU for
|
||||
DMA protection and we should too */
|
||||
};
|
||||
|
||||
/* These are the possible reserved region types */
|
||||
|
|
@ -272,6 +273,9 @@ struct iommu_ops {
|
|||
* @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush
|
||||
* queue
|
||||
* @iova_to_phys: translate iova to physical address
|
||||
* @enforce_cache_coherency: Prevent any kind of DMA from bypassing IOMMU_CACHE,
|
||||
* including no-snoop TLPs on PCIe or other platform
|
||||
* specific mechanisms.
|
||||
* @enable_nesting: Enable nesting
|
||||
* @set_pgtable_quirks: Set io page table quirks (IO_PGTABLE_QUIRK_*)
|
||||
* @free: Release the domain after use.
|
||||
|
|
@ -300,6 +304,7 @@ struct iommu_domain_ops {
|
|||
phys_addr_t (*iova_to_phys)(struct iommu_domain *domain,
|
||||
dma_addr_t iova);
|
||||
|
||||
bool (*enforce_cache_coherency)(struct iommu_domain *domain);
|
||||
int (*enable_nesting)(struct iommu_domain *domain);
|
||||
int (*set_pgtable_quirks)(struct iommu_domain *domain,
|
||||
unsigned long quirks);
|
||||
|
|
@ -407,16 +412,10 @@ static inline const struct iommu_ops *dev_iommu_ops(struct device *dev)
|
|||
return dev->iommu->iommu_dev->ops;
|
||||
}
|
||||
|
||||
#define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
|
||||
#define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
|
||||
#define IOMMU_GROUP_NOTIFY_BIND_DRIVER 3 /* Pre Driver bind */
|
||||
#define IOMMU_GROUP_NOTIFY_BOUND_DRIVER 4 /* Post Driver bind */
|
||||
#define IOMMU_GROUP_NOTIFY_UNBIND_DRIVER 5 /* Pre Driver unbind */
|
||||
#define IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER 6 /* Post Driver unbind */
|
||||
|
||||
extern int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops);
|
||||
extern int bus_iommu_probe(struct bus_type *bus);
|
||||
extern bool iommu_present(struct bus_type *bus);
|
||||
extern bool device_iommu_capable(struct device *dev, enum iommu_cap cap);
|
||||
extern bool iommu_capable(struct bus_type *bus, enum iommu_cap cap);
|
||||
extern struct iommu_domain *iommu_domain_alloc(struct bus_type *bus);
|
||||
extern struct iommu_group *iommu_group_get_by_id(int id);
|
||||
|
|
@ -478,10 +477,6 @@ extern int iommu_group_for_each_dev(struct iommu_group *group, void *data,
|
|||
extern struct iommu_group *iommu_group_get(struct device *dev);
|
||||
extern struct iommu_group *iommu_group_ref_get(struct iommu_group *group);
|
||||
extern void iommu_group_put(struct iommu_group *group);
|
||||
extern int iommu_group_register_notifier(struct iommu_group *group,
|
||||
struct notifier_block *nb);
|
||||
extern int iommu_group_unregister_notifier(struct iommu_group *group,
|
||||
struct notifier_block *nb);
|
||||
extern int iommu_register_device_fault_handler(struct device *dev,
|
||||
iommu_dev_fault_handler_t handler,
|
||||
void *data);
|
||||
|
|
@ -675,6 +670,13 @@ struct iommu_sva *iommu_sva_bind_device(struct device *dev,
|
|||
void iommu_sva_unbind_device(struct iommu_sva *handle);
|
||||
u32 iommu_sva_get_pasid(struct iommu_sva *handle);
|
||||
|
||||
int iommu_device_use_default_domain(struct device *dev);
|
||||
void iommu_device_unuse_default_domain(struct device *dev);
|
||||
|
||||
int iommu_group_claim_dma_owner(struct iommu_group *group, void *owner);
|
||||
void iommu_group_release_dma_owner(struct iommu_group *group);
|
||||
bool iommu_group_dma_owner_claimed(struct iommu_group *group);
|
||||
|
||||
#else /* CONFIG_IOMMU_API */
|
||||
|
||||
struct iommu_ops {};
|
||||
|
|
@ -689,6 +691,11 @@ static inline bool iommu_present(struct bus_type *bus)
|
|||
return false;
|
||||
}
|
||||
|
||||
static inline bool device_iommu_capable(struct device *dev, enum iommu_cap cap)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool iommu_capable(struct bus_type *bus, enum iommu_cap cap)
|
||||
{
|
||||
return false;
|
||||
|
|
@ -871,18 +878,6 @@ static inline void iommu_group_put(struct iommu_group *group)
|
|||
{
|
||||
}
|
||||
|
||||
static inline int iommu_group_register_notifier(struct iommu_group *group,
|
||||
struct notifier_block *nb)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline int iommu_group_unregister_notifier(struct iommu_group *group,
|
||||
struct notifier_block *nb)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline
|
||||
int iommu_register_device_fault_handler(struct device *dev,
|
||||
iommu_dev_fault_handler_t handler,
|
||||
|
|
@ -1031,6 +1026,30 @@ static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev)
|
|||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline int iommu_device_use_default_domain(struct device *dev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void iommu_device_unuse_default_domain(struct device *dev)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int
|
||||
iommu_group_claim_dma_owner(struct iommu_group *group, void *owner)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline void iommu_group_release_dma_owner(struct iommu_group *group)
|
||||
{
|
||||
}
|
||||
|
||||
static inline bool iommu_group_dma_owner_claimed(struct iommu_group *group)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif /* CONFIG_IOMMU_API */
|
||||
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -891,6 +891,13 @@ struct module;
|
|||
* created once it is bound to the driver.
|
||||
* @driver: Driver model structure.
|
||||
* @dynids: List of dynamically added device IDs.
|
||||
* @driver_managed_dma: Device driver doesn't use kernel DMA API for DMA.
|
||||
* For most device drivers, no need to care about this flag
|
||||
* as long as all DMAs are handled through the kernel DMA API.
|
||||
* For some special ones, for example VFIO drivers, they know
|
||||
* how to manage the DMA themselves and set this flag so that
|
||||
* the IOMMU layer will allow them to setup and manage their
|
||||
* own I/O address space.
|
||||
*/
|
||||
struct pci_driver {
|
||||
struct list_head node;
|
||||
|
|
@ -909,6 +916,7 @@ struct pci_driver {
|
|||
const struct attribute_group **dev_groups;
|
||||
struct device_driver driver;
|
||||
struct pci_dynids dynids;
|
||||
bool driver_managed_dma;
|
||||
};
|
||||
|
||||
static inline struct pci_driver *to_pci_driver(struct device_driver *drv)
|
||||
|
|
|
|||
|
|
@ -210,6 +210,14 @@ struct platform_driver {
|
|||
struct device_driver driver;
|
||||
const struct platform_device_id *id_table;
|
||||
bool prevent_deferred_probe;
|
||||
/*
|
||||
* For most device drivers, no need to care about this flag as long as
|
||||
* all DMAs are handled through the kernel DMA API. For some special
|
||||
* ones, for example VFIO drivers, they know how to manage the DMA
|
||||
* themselves and set this flag so that the IOMMU layer will allow them
|
||||
* to setup and manage their own I/O address space.
|
||||
*/
|
||||
bool driver_managed_dma;
|
||||
};
|
||||
|
||||
#define to_platform_driver(drv) (container_of((drv), struct platform_driver, \
|
||||
|
|
@ -328,8 +336,6 @@ extern int platform_pm_restore(struct device *dev);
|
|||
#define platform_pm_restore NULL
|
||||
#endif
|
||||
|
||||
extern int platform_dma_configure(struct device *dev);
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
#define USE_PLATFORM_PM_SLEEP_OPS \
|
||||
.suspend = platform_pm_suspend, \
|
||||
|
|
|
|||
|
|
@ -465,6 +465,7 @@ static inline struct tb_xdomain *tb_service_parent(struct tb_service *svc)
|
|||
* @msix_ida: Used to allocate MSI-X vectors for rings
|
||||
* @going_away: The host controller device is about to disappear so when
|
||||
* this flag is set, avoid touching the hardware anymore.
|
||||
* @iommu_dma_protection: An IOMMU will isolate external-facing ports.
|
||||
* @interrupt_work: Work scheduled to handle ring interrupt when no
|
||||
* MSI-X is used.
|
||||
* @hop_count: Number of rings (end point hops) supported by NHI.
|
||||
|
|
@ -479,6 +480,7 @@ struct tb_nhi {
|
|||
struct tb_ring **rx_rings;
|
||||
struct ida msix_ida;
|
||||
bool going_away;
|
||||
bool iommu_dma_protection;
|
||||
struct work_struct interrupt_work;
|
||||
u32 hop_count;
|
||||
unsigned long quirks;
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue