Driver core patches for 5.1-rc1
Here is the big driver core patchset for 5.1-rc1 More patches than "normal" here this merge window, due to some work in the driver core by Alexander Duyck to rework the async probe functionality to work better for a number of devices, and independant work from Rafael for the device link functionality to make it work "correctly". Also in here is: - lots of BUS_ATTR() removals, the macro is about to go away - firmware test fixups - ihex fixups and simplification - component additions (also includes i915 patches) - lots of minor coding style fixups and cleanups. All of these have been in linux-next for a while with no reported issues. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> -----BEGIN PGP SIGNATURE----- iG0EABECAC0WIQT0tgzFv3jCIUoxPcsxR9QN2y37KQUCXH+euQ8cZ3JlZ0Brcm9h aC5jb20ACgkQMUfUDdst+ynyTgCfbV8CLums843sBnT8NnWrTMTdTCcAn1K4re0m ep8g+6oRLxJy414hogxQ =bLs2 -----END PGP SIGNATURE----- Merge tag 'driver-core-5.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core Pull driver core updates from Greg KH: "Here is the big driver core patchset for 5.1-rc1 More patches than "normal" here this merge window, due to some work in the driver core by Alexander Duyck to rework the async probe functionality to work better for a number of devices, and independant work from Rafael for the device link functionality to make it work "correctly". Also in here is: - lots of BUS_ATTR() removals, the macro is about to go away - firmware test fixups - ihex fixups and simplification - component additions (also includes i915 patches) - lots of minor coding style fixups and cleanups. All of these have been in linux-next for a while with no reported issues" * tag 'driver-core-5.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core: (65 commits) driver core: platform: remove misleading err_alloc label platform: set of_node in platform_device_register_full() firmware: hardcode the debug message for -ENOENT driver core: Add missing description of new struct device_link field driver core: Fix PM-runtime for links added during consumer probe drivers/component: kerneldoc polish async: Add cmdline option to specify drivers to be async probed driver core: Fix possible supplier PM-usage counter imbalance PM-runtime: Fix __pm_runtime_set_status() race with runtime resume driver: platform: Support parsing GpioInt 0 in platform_get_irq() selftests: firmware: fix verify_reqs() return value Revert "selftests: firmware: remove use of non-standard diff -Z option" Revert "selftests: firmware: add CONFIG_FW_LOADER_USER_HELPER_FALLBACK to config" device: Fix comment for driver_data in struct device kernfs: Allocating memory for kernfs_iattrs with kmem_cache. sysfs: remove unused include of kernfs-internal.h driver core: Postpone DMA tear-down until after devres release driver core: Document limitation related to DL_FLAG_RPM_ACTIVE PM-runtime: Take suppliers into account in __pm_runtime_set_status() device.h: Add __cold to dev_<level> logging functions ...
This commit is contained in:
commit
e431f2d74e
52 changed files with 1096 additions and 413 deletions
|
|
@ -14,6 +14,8 @@
|
|||
|
||||
#include <linux/types.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/numa.h>
|
||||
#include <linux/device.h>
|
||||
|
||||
typedef u64 async_cookie_t;
|
||||
typedef void (*async_func_t) (void *data, async_cookie_t cookie);
|
||||
|
|
@ -37,9 +39,83 @@ struct async_domain {
|
|||
struct async_domain _name = { .pending = LIST_HEAD_INIT(_name.pending), \
|
||||
.registered = 0 }
|
||||
|
||||
extern async_cookie_t async_schedule(async_func_t func, void *data);
|
||||
extern async_cookie_t async_schedule_domain(async_func_t func, void *data,
|
||||
struct async_domain *domain);
|
||||
async_cookie_t async_schedule_node(async_func_t func, void *data,
|
||||
int node);
|
||||
async_cookie_t async_schedule_node_domain(async_func_t func, void *data,
|
||||
int node,
|
||||
struct async_domain *domain);
|
||||
|
||||
/**
|
||||
* async_schedule - schedule a function for asynchronous execution
|
||||
* @func: function to execute asynchronously
|
||||
* @data: data pointer to pass to the function
|
||||
*
|
||||
* Returns an async_cookie_t that may be used for checkpointing later.
|
||||
* Note: This function may be called from atomic or non-atomic contexts.
|
||||
*/
|
||||
static inline async_cookie_t async_schedule(async_func_t func, void *data)
|
||||
{
|
||||
return async_schedule_node(func, data, NUMA_NO_NODE);
|
||||
}
|
||||
|
||||
/**
|
||||
* async_schedule_domain - schedule a function for asynchronous execution within a certain domain
|
||||
* @func: function to execute asynchronously
|
||||
* @data: data pointer to pass to the function
|
||||
* @domain: the domain
|
||||
*
|
||||
* Returns an async_cookie_t that may be used for checkpointing later.
|
||||
* @domain may be used in the async_synchronize_*_domain() functions to
|
||||
* wait within a certain synchronization domain rather than globally.
|
||||
* Note: This function may be called from atomic or non-atomic contexts.
|
||||
*/
|
||||
static inline async_cookie_t
|
||||
async_schedule_domain(async_func_t func, void *data,
|
||||
struct async_domain *domain)
|
||||
{
|
||||
return async_schedule_node_domain(func, data, NUMA_NO_NODE, domain);
|
||||
}
|
||||
|
||||
/**
|
||||
* async_schedule_dev - A device specific version of async_schedule
|
||||
* @func: function to execute asynchronously
|
||||
* @dev: device argument to be passed to function
|
||||
*
|
||||
* Returns an async_cookie_t that may be used for checkpointing later.
|
||||
* @dev is used as both the argument for the function and to provide NUMA
|
||||
* context for where to run the function. By doing this we can try to
|
||||
* provide for the best possible outcome by operating on the device on the
|
||||
* CPUs closest to the device.
|
||||
* Note: This function may be called from atomic or non-atomic contexts.
|
||||
*/
|
||||
static inline async_cookie_t
|
||||
async_schedule_dev(async_func_t func, struct device *dev)
|
||||
{
|
||||
return async_schedule_node(func, dev, dev_to_node(dev));
|
||||
}
|
||||
|
||||
/**
|
||||
* async_schedule_dev_domain - A device specific version of async_schedule_domain
|
||||
* @func: function to execute asynchronously
|
||||
* @dev: device argument to be passed to function
|
||||
* @domain: the domain
|
||||
*
|
||||
* Returns an async_cookie_t that may be used for checkpointing later.
|
||||
* @dev is used as both the argument for the function and to provide NUMA
|
||||
* context for where to run the function. By doing this we can try to
|
||||
* provide for the best possible outcome by operating on the device on the
|
||||
* CPUs closest to the device.
|
||||
* @domain may be used in the async_synchronize_*_domain() functions to
|
||||
* wait within a certain synchronization domain rather than globally.
|
||||
* Note: This function may be called from atomic or non-atomic contexts.
|
||||
*/
|
||||
static inline async_cookie_t
|
||||
async_schedule_dev_domain(async_func_t func, struct device *dev,
|
||||
struct async_domain *domain)
|
||||
{
|
||||
return async_schedule_node_domain(func, dev, dev_to_node(dev), domain);
|
||||
}
|
||||
|
||||
void async_unregister_domain(struct async_domain *domain);
|
||||
extern void async_synchronize_full(void);
|
||||
extern void async_synchronize_full_domain(struct async_domain *domain);
|
||||
|
|
|
|||
|
|
@ -98,7 +98,7 @@ void component_match_add_typed(struct device *master,
|
|||
int (*compare_typed)(struct device *, int, void *), void *compare_data);
|
||||
|
||||
/**
|
||||
* component_match_add - add a compent match
|
||||
* component_match_add - add a component match entry
|
||||
* @master: device with the aggregate driver
|
||||
* @matchptr: pointer to the list of component matches
|
||||
* @compare: compare function to match against all components
|
||||
|
|
|
|||
|
|
@ -341,6 +341,7 @@ struct device *driver_find_device(struct device_driver *drv,
|
|||
struct device *start, void *data,
|
||||
int (*match)(struct device *dev, void *data));
|
||||
|
||||
void driver_deferred_probe_add(struct device *dev);
|
||||
int driver_deferred_probe_check_state(struct device *dev);
|
||||
|
||||
/**
|
||||
|
|
@ -827,12 +828,14 @@ enum device_link_state {
|
|||
* PM_RUNTIME: If set, the runtime PM framework will use this link.
|
||||
* RPM_ACTIVE: Run pm_runtime_get_sync() on the supplier during link creation.
|
||||
* AUTOREMOVE_SUPPLIER: Remove the link automatically on supplier driver unbind.
|
||||
* AUTOPROBE_CONSUMER: Probe consumer driver automatically after supplier binds.
|
||||
*/
|
||||
#define DL_FLAG_STATELESS BIT(0)
|
||||
#define DL_FLAG_AUTOREMOVE_CONSUMER BIT(1)
|
||||
#define DL_FLAG_PM_RUNTIME BIT(2)
|
||||
#define DL_FLAG_RPM_ACTIVE BIT(3)
|
||||
#define DL_FLAG_AUTOREMOVE_SUPPLIER BIT(4)
|
||||
#define DL_FLAG_AUTOPROBE_CONSUMER BIT(5)
|
||||
|
||||
/**
|
||||
* struct device_link - Device link representation.
|
||||
|
|
@ -845,6 +848,7 @@ enum device_link_state {
|
|||
* @rpm_active: Whether or not the consumer device is runtime-PM-active.
|
||||
* @kref: Count repeated addition of the same link.
|
||||
* @rcu_head: An RCU head to use for deferred execution of SRCU callbacks.
|
||||
* @supplier_preactivated: Supplier has been made active before consumer probe.
|
||||
*/
|
||||
struct device_link {
|
||||
struct device *supplier;
|
||||
|
|
@ -853,11 +857,12 @@ struct device_link {
|
|||
struct list_head c_node;
|
||||
enum device_link_state status;
|
||||
u32 flags;
|
||||
bool rpm_active;
|
||||
refcount_t rpm_active;
|
||||
struct kref kref;
|
||||
#ifdef CONFIG_SRCU
|
||||
struct rcu_head rcu_head;
|
||||
#endif
|
||||
bool supplier_preactivated; /* Owned by consumer probe. */
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
@ -985,7 +990,7 @@ struct device {
|
|||
void *platform_data; /* Platform specific data, device
|
||||
core doesn't touch it */
|
||||
void *driver_data; /* Driver data, set and get with
|
||||
dev_set/get_drvdata */
|
||||
dev_set_drvdata/dev_get_drvdata */
|
||||
struct dev_links_info links;
|
||||
struct dev_pm_info power;
|
||||
struct dev_pm_domain *pm_domain;
|
||||
|
|
@ -1035,7 +1040,6 @@ struct device {
|
|||
spinlock_t devres_lock;
|
||||
struct list_head devres_head;
|
||||
|
||||
struct klist_node knode_class;
|
||||
struct class *class;
|
||||
const struct attribute_group **groups; /* optional groups */
|
||||
|
||||
|
|
@ -1392,28 +1396,28 @@ void device_link_remove(void *consumer, struct device *supplier);
|
|||
|
||||
#ifdef CONFIG_PRINTK
|
||||
|
||||
__printf(3, 0)
|
||||
__printf(3, 0) __cold
|
||||
int dev_vprintk_emit(int level, const struct device *dev,
|
||||
const char *fmt, va_list args);
|
||||
__printf(3, 4)
|
||||
__printf(3, 4) __cold
|
||||
int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...);
|
||||
|
||||
__printf(3, 4)
|
||||
__printf(3, 4) __cold
|
||||
void dev_printk(const char *level, const struct device *dev,
|
||||
const char *fmt, ...);
|
||||
__printf(2, 3)
|
||||
__printf(2, 3) __cold
|
||||
void _dev_emerg(const struct device *dev, const char *fmt, ...);
|
||||
__printf(2, 3)
|
||||
__printf(2, 3) __cold
|
||||
void _dev_alert(const struct device *dev, const char *fmt, ...);
|
||||
__printf(2, 3)
|
||||
__printf(2, 3) __cold
|
||||
void _dev_crit(const struct device *dev, const char *fmt, ...);
|
||||
__printf(2, 3)
|
||||
__printf(2, 3) __cold
|
||||
void _dev_err(const struct device *dev, const char *fmt, ...);
|
||||
__printf(2, 3)
|
||||
__printf(2, 3) __cold
|
||||
void _dev_warn(const struct device *dev, const char *fmt, ...);
|
||||
__printf(2, 3)
|
||||
__printf(2, 3) __cold
|
||||
void _dev_notice(const struct device *dev, const char *fmt, ...);
|
||||
__printf(2, 3)
|
||||
__printf(2, 3) __cold
|
||||
void _dev_info(const struct device *dev, const char *fmt, ...);
|
||||
|
||||
#else
|
||||
|
|
|
|||
|
|
@ -21,12 +21,24 @@ struct ihex_binrec {
|
|||
uint8_t data[0];
|
||||
} __attribute__((packed));
|
||||
|
||||
static inline uint16_t ihex_binrec_size(const struct ihex_binrec *p)
|
||||
{
|
||||
return be16_to_cpu(p->len) + sizeof(*p);
|
||||
}
|
||||
|
||||
/* Find the next record, taking into account the 4-byte alignment */
|
||||
static inline const struct ihex_binrec *
|
||||
__ihex_next_binrec(const struct ihex_binrec *rec)
|
||||
{
|
||||
const void *p = rec;
|
||||
|
||||
return p + ALIGN(ihex_binrec_size(rec), 4);
|
||||
}
|
||||
|
||||
static inline const struct ihex_binrec *
|
||||
ihex_next_binrec(const struct ihex_binrec *rec)
|
||||
{
|
||||
int next = ((be16_to_cpu(rec->len) + 5) & ~3) - 2;
|
||||
rec = (void *)&rec->data[next];
|
||||
rec = __ihex_next_binrec(rec);
|
||||
|
||||
return be16_to_cpu(rec->len) ? rec : NULL;
|
||||
}
|
||||
|
|
@ -34,18 +46,15 @@ ihex_next_binrec(const struct ihex_binrec *rec)
|
|||
/* Check that ihex_next_binrec() won't take us off the end of the image... */
|
||||
static inline int ihex_validate_fw(const struct firmware *fw)
|
||||
{
|
||||
const struct ihex_binrec *rec;
|
||||
size_t ofs = 0;
|
||||
const struct ihex_binrec *end, *rec;
|
||||
|
||||
while (ofs <= fw->size - sizeof(*rec)) {
|
||||
rec = (void *)&fw->data[ofs];
|
||||
rec = (const void *)fw->data;
|
||||
end = (const void *)&fw->data[fw->size - sizeof(*end)];
|
||||
|
||||
for (; rec <= end; rec = __ihex_next_binrec(rec)) {
|
||||
/* Zero length marks end of records */
|
||||
if (!be16_to_cpu(rec->len))
|
||||
if (rec == end && !be16_to_cpu(rec->len))
|
||||
return 0;
|
||||
|
||||
/* Point to next record... */
|
||||
ofs += (sizeof(*rec) + be16_to_cpu(rec->len) + 3) & ~3;
|
||||
}
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -63,6 +63,7 @@ extern int platform_add_devices(struct platform_device **, int);
|
|||
struct platform_device_info {
|
||||
struct device *parent;
|
||||
struct fwnode_handle *fwnode;
|
||||
bool of_node_reused;
|
||||
|
||||
const char *name;
|
||||
int id;
|
||||
|
|
|
|||
|
|
@ -443,6 +443,8 @@ int workqueue_set_unbound_cpumask(cpumask_var_t cpumask);
|
|||
|
||||
extern bool queue_work_on(int cpu, struct workqueue_struct *wq,
|
||||
struct work_struct *work);
|
||||
extern bool queue_work_node(int node, struct workqueue_struct *wq,
|
||||
struct work_struct *work);
|
||||
extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
|
||||
struct delayed_work *work, unsigned long delay);
|
||||
extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue