main drm pull request for 4.12 kernel
-----BEGIN PGP SIGNATURE----- iQIcBAABAgAGBQJZCTzvAAoJEAx081l5xIa+9kcQAJsQiija4/7QGx6IzakOMqjx WulJ3zYG/cU/HLwCBcuWRDF6wAj+7iWNeLCPmolHwEazcI8tQVdgMlWtbdMbDh8U ckzD3FBXsEVfIfab+u6tyoUkm3l/VDhMXbjkUK7NTo/+dkRqe5LuFfZPCGN09jft Y+5salkRXzDhXPSFsqmjfzhx1v7PTgf0a5HUenKWEWOv+sJQaW4/iPvcDSIcg5qR l9WjAqro1NpFYhUodnh6DkLeledL1U5whdtp/yvrUAck8y+WP/jwGYmQ7pZ0UkQm f0M3kV6K67ox9eqN++jsGX5o8sB1qF01Uh95kBAnyzYzsw4ZlMCx6pV7PDX+J88M UBNMEqX10hrLkNJA9lGjPWx+/6fudcwg9anKvTRO3Uyx7MbYoJAgjzAM+yBqqtV0 8Otxa4Bw0V2pmUD+0lqJDERRvE77VCXkLb8SaI5lQo0MHpQqT2cZA+GD+B+rZHO6 Ie5LDFY87vM2GG1IECufG+xOa3v6sn2FfQ1ouu1KNGKOAMBKcQCQyQx3kGVuNW2i HDACVXALJgXdRlVLm4jydOCZdRoguX7AWmRjtdwxgaO+lBcGfLhkXdjLQ7Ho+29p 32ArJfkZPfA53vMB6lHxAfbtrs1q2RzyVnPHj/KqeJnGZbABKTsF2HQ5BQc4Xq/J mqXoz6Oubdvk4Pwyx7Ne =UxFF -----END PGP SIGNATURE----- Merge tag 'drm-for-v4.12' of git://people.freedesktop.org/~airlied/linux into drm-misc-next Backmerging Dave's 'drm-for-v4.12' pull request now that it's landed. There are a bunch of non-drm changes which are just random bits we hadn't yet picked up in misc-next. main drm pull request for 4.12 kernel Signed-off-by: Sean Paul <seanpaul@chromium.org> Link: http://patchwork.freedesktop.org/patch/msgid/CAPM=9ty0jHgzG18zOr5CYODyTqZfH55kOCOFqNnXiWnTb_uNWw@mail.gmail.com
This commit is contained in:
commit
3c390df333
762 changed files with 16683 additions and 7837 deletions
|
|
@ -51,6 +51,7 @@ struct blk_mq_hw_ctx {
|
|||
|
||||
atomic_t nr_active;
|
||||
|
||||
struct delayed_work delayed_run_work;
|
||||
struct delayed_work delay_work;
|
||||
|
||||
struct hlist_node cpuhp_dead;
|
||||
|
|
@ -238,6 +239,7 @@ void blk_mq_stop_hw_queues(struct request_queue *q);
|
|||
void blk_mq_start_hw_queues(struct request_queue *q);
|
||||
void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
|
||||
void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
|
||||
void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
|
||||
void blk_mq_run_hw_queues(struct request_queue *q, bool async);
|
||||
void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
|
||||
void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
|
||||
|
|
|
|||
|
|
@ -610,7 +610,6 @@ struct request_queue {
|
|||
#define QUEUE_FLAG_FLUSH_NQ 25 /* flush not queueuable */
|
||||
#define QUEUE_FLAG_DAX 26 /* device supports DAX */
|
||||
#define QUEUE_FLAG_STATS 27 /* track rq completion times */
|
||||
#define QUEUE_FLAG_RESTART 28 /* queue needs restart at completion */
|
||||
|
||||
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
|
||||
(1 << QUEUE_FLAG_STACKABLE) | \
|
||||
|
|
@ -1673,12 +1672,36 @@ static inline bool bios_segs_mergeable(struct request_queue *q,
|
|||
return true;
|
||||
}
|
||||
|
||||
static inline bool bio_will_gap(struct request_queue *q, struct bio *prev,
|
||||
struct bio *next)
|
||||
static inline bool bio_will_gap(struct request_queue *q,
|
||||
struct request *prev_rq,
|
||||
struct bio *prev,
|
||||
struct bio *next)
|
||||
{
|
||||
if (bio_has_data(prev) && queue_virt_boundary(q)) {
|
||||
struct bio_vec pb, nb;
|
||||
|
||||
/*
|
||||
* don't merge if the 1st bio starts with non-zero
|
||||
* offset, otherwise it is quite difficult to respect
|
||||
* sg gap limit. We work hard to merge a huge number of small
|
||||
* single bios in case of mkfs.
|
||||
*/
|
||||
if (prev_rq)
|
||||
bio_get_first_bvec(prev_rq->bio, &pb);
|
||||
else
|
||||
bio_get_first_bvec(prev, &pb);
|
||||
if (pb.bv_offset)
|
||||
return true;
|
||||
|
||||
/*
|
||||
* We don't need to worry about the situation that the
|
||||
* merged segment ends in unaligned virt boundary:
|
||||
*
|
||||
* - if 'pb' ends aligned, the merged segment ends aligned
|
||||
* - if 'pb' ends unaligned, the next bio must include
|
||||
* one single bvec of 'nb', otherwise the 'nb' can't
|
||||
* merge with 'pb'
|
||||
*/
|
||||
bio_get_last_bvec(prev, &pb);
|
||||
bio_get_first_bvec(next, &nb);
|
||||
|
||||
|
|
@ -1691,12 +1714,12 @@ static inline bool bio_will_gap(struct request_queue *q, struct bio *prev,
|
|||
|
||||
static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
|
||||
{
|
||||
return bio_will_gap(req->q, req->biotail, bio);
|
||||
return bio_will_gap(req->q, req, req->biotail, bio);
|
||||
}
|
||||
|
||||
static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
|
||||
{
|
||||
return bio_will_gap(req->q, bio, req->bio);
|
||||
return bio_will_gap(req->q, NULL, bio, req->bio);
|
||||
}
|
||||
|
||||
int kblockd_schedule_work(struct work_struct *work);
|
||||
|
|
|
|||
|
|
@ -570,6 +570,25 @@ static inline void pr_cont_cgroup_path(struct cgroup *cgrp)
|
|||
pr_cont_kernfs_path(cgrp->kn);
|
||||
}
|
||||
|
||||
static inline void cgroup_init_kthreadd(void)
|
||||
{
|
||||
/*
|
||||
* kthreadd is inherited by all kthreads, keep it in the root so
|
||||
* that the new kthreads are guaranteed to stay in the root until
|
||||
* initialization is finished.
|
||||
*/
|
||||
current->no_cgroup_migration = 1;
|
||||
}
|
||||
|
||||
static inline void cgroup_kthread_ready(void)
|
||||
{
|
||||
/*
|
||||
* This kthread finished initialization. The creator should have
|
||||
* set PF_NO_SETAFFINITY if this kthread should stay in the root.
|
||||
*/
|
||||
current->no_cgroup_migration = 0;
|
||||
}
|
||||
|
||||
#else /* !CONFIG_CGROUPS */
|
||||
|
||||
struct cgroup_subsys_state;
|
||||
|
|
@ -590,6 +609,8 @@ static inline void cgroup_free(struct task_struct *p) {}
|
|||
|
||||
static inline int cgroup_init_early(void) { return 0; }
|
||||
static inline int cgroup_init(void) { return 0; }
|
||||
static inline void cgroup_init_kthreadd(void) {}
|
||||
static inline void cgroup_kthread_ready(void) {}
|
||||
|
||||
static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
|
||||
struct cgroup *ancestor)
|
||||
|
|
|
|||
|
|
@ -39,13 +39,13 @@ struct dma_buf_attachment;
|
|||
|
||||
/**
|
||||
* struct dma_buf_ops - operations possible on struct dma_buf
|
||||
* @kmap_atomic: maps a page from the buffer into kernel address
|
||||
* space, users may not block until the subsequent unmap call.
|
||||
* This callback must not sleep.
|
||||
* @kunmap_atomic: [optional] unmaps a atomically mapped page from the buffer.
|
||||
* This Callback must not sleep.
|
||||
* @kmap: maps a page from the buffer into kernel address space.
|
||||
* @kunmap: [optional] unmaps a page from the buffer.
|
||||
* @map_atomic: maps a page from the buffer into kernel address
|
||||
* space, users may not block until the subsequent unmap call.
|
||||
* This callback must not sleep.
|
||||
* @unmap_atomic: [optional] unmaps a atomically mapped page from the buffer.
|
||||
* This Callback must not sleep.
|
||||
* @map: maps a page from the buffer into kernel address space.
|
||||
* @unmap: [optional] unmaps a page from the buffer.
|
||||
* @vmap: [optional] creates a virtual mapping for the buffer into kernel
|
||||
* address space. Same restrictions as for vmap and friends apply.
|
||||
* @vunmap: [optional] unmaps a vmap from the buffer
|
||||
|
|
@ -206,10 +206,10 @@ struct dma_buf_ops {
|
|||
* to be restarted.
|
||||
*/
|
||||
int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction);
|
||||
void *(*kmap_atomic)(struct dma_buf *, unsigned long);
|
||||
void (*kunmap_atomic)(struct dma_buf *, unsigned long, void *);
|
||||
void *(*kmap)(struct dma_buf *, unsigned long);
|
||||
void (*kunmap)(struct dma_buf *, unsigned long, void *);
|
||||
void *(*map_atomic)(struct dma_buf *, unsigned long);
|
||||
void (*unmap_atomic)(struct dma_buf *, unsigned long, void *);
|
||||
void *(*map)(struct dma_buf *, unsigned long);
|
||||
void (*unmap)(struct dma_buf *, unsigned long, void *);
|
||||
|
||||
/**
|
||||
* @mmap:
|
||||
|
|
|
|||
|
|
@ -211,7 +211,7 @@ extern ssize_t elv_iosched_show(struct request_queue *, char *);
|
|||
extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t);
|
||||
|
||||
extern int elevator_init(struct request_queue *, char *);
|
||||
extern void elevator_exit(struct elevator_queue *);
|
||||
extern void elevator_exit(struct request_queue *, struct elevator_queue *);
|
||||
extern int elevator_change(struct request_queue *, const char *);
|
||||
extern bool elv_bio_merge_ok(struct request *, struct bio *);
|
||||
extern struct elevator_queue *elevator_alloc(struct request_queue *,
|
||||
|
|
|
|||
|
|
@ -96,6 +96,9 @@
|
|||
#define GICH_MISR_EOI (1 << 0)
|
||||
#define GICH_MISR_U (1 << 1)
|
||||
|
||||
#define GICV_PMR_PRIORITY_SHIFT 3
|
||||
#define GICV_PMR_PRIORITY_MASK (0x1f << GICV_PMR_PRIORITY_SHIFT)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/irqdomain.h>
|
||||
|
|
|
|||
|
|
@ -35,10 +35,11 @@
|
|||
* Max bus-specific overhead incurred by request/responses.
|
||||
* I2C requires 1 additional byte for requests.
|
||||
* I2C requires 2 additional bytes for responses.
|
||||
* SPI requires up to 32 additional bytes for responses.
|
||||
* */
|
||||
#define EC_PROTO_VERSION_UNKNOWN 0
|
||||
#define EC_MAX_REQUEST_OVERHEAD 1
|
||||
#define EC_MAX_RESPONSE_OVERHEAD 2
|
||||
#define EC_MAX_RESPONSE_OVERHEAD 32
|
||||
|
||||
/*
|
||||
* Command interface between EC and AP, for LPC, I2C and SPI interfaces.
|
||||
|
|
|
|||
|
|
@ -394,18 +394,6 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
|
|||
___pud; \
|
||||
})
|
||||
|
||||
#define pmdp_huge_get_and_clear_notify(__mm, __haddr, __pmd) \
|
||||
({ \
|
||||
unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \
|
||||
pmd_t ___pmd; \
|
||||
\
|
||||
___pmd = pmdp_huge_get_and_clear(__mm, __haddr, __pmd); \
|
||||
mmu_notifier_invalidate_range(__mm, ___haddr, \
|
||||
___haddr + HPAGE_PMD_SIZE); \
|
||||
\
|
||||
___pmd; \
|
||||
})
|
||||
|
||||
/*
|
||||
* set_pte_at_notify() sets the pte _after_ running the notifier.
|
||||
* This is safe to start by updating the secondary MMUs, because the primary MMU
|
||||
|
|
@ -489,7 +477,6 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
|
|||
#define ptep_clear_flush_notify ptep_clear_flush
|
||||
#define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush
|
||||
#define pudp_huge_clear_flush_notify pudp_huge_clear_flush
|
||||
#define pmdp_huge_get_and_clear_notify pmdp_huge_get_and_clear
|
||||
#define set_pte_at_notify set_pte_at
|
||||
|
||||
#endif /* CONFIG_MMU_NOTIFIER */
|
||||
|
|
|
|||
|
|
@ -64,26 +64,26 @@ enum {
|
|||
* RDMA_QPTYPE field
|
||||
*/
|
||||
enum {
|
||||
NVMF_RDMA_QPTYPE_CONNECTED = 0, /* Reliable Connected */
|
||||
NVMF_RDMA_QPTYPE_DATAGRAM = 1, /* Reliable Datagram */
|
||||
NVMF_RDMA_QPTYPE_CONNECTED = 1, /* Reliable Connected */
|
||||
NVMF_RDMA_QPTYPE_DATAGRAM = 2, /* Reliable Datagram */
|
||||
};
|
||||
|
||||
/* RDMA QP Service Type codes for Discovery Log Page entry TSAS
|
||||
* RDMA_QPTYPE field
|
||||
*/
|
||||
enum {
|
||||
NVMF_RDMA_PRTYPE_NOT_SPECIFIED = 0, /* No Provider Specified */
|
||||
NVMF_RDMA_PRTYPE_IB = 1, /* InfiniBand */
|
||||
NVMF_RDMA_PRTYPE_ROCE = 2, /* InfiniBand RoCE */
|
||||
NVMF_RDMA_PRTYPE_ROCEV2 = 3, /* InfiniBand RoCEV2 */
|
||||
NVMF_RDMA_PRTYPE_IWARP = 4, /* IWARP */
|
||||
NVMF_RDMA_PRTYPE_NOT_SPECIFIED = 1, /* No Provider Specified */
|
||||
NVMF_RDMA_PRTYPE_IB = 2, /* InfiniBand */
|
||||
NVMF_RDMA_PRTYPE_ROCE = 3, /* InfiniBand RoCE */
|
||||
NVMF_RDMA_PRTYPE_ROCEV2 = 4, /* InfiniBand RoCEV2 */
|
||||
NVMF_RDMA_PRTYPE_IWARP = 5, /* IWARP */
|
||||
};
|
||||
|
||||
/* RDMA Connection Management Service Type codes for Discovery Log Page
|
||||
* entry TSAS RDMA_CMS field
|
||||
*/
|
||||
enum {
|
||||
NVMF_RDMA_CMS_RDMA_CM = 0, /* Sockets based enpoint addressing */
|
||||
NVMF_RDMA_CMS_RDMA_CM = 1, /* Sockets based endpoint addressing */
|
||||
};
|
||||
|
||||
#define NVMF_AQ_DEPTH 32
|
||||
|
|
|
|||
|
|
@ -145,8 +145,9 @@ struct pinctrl_desc {
|
|||
extern int pinctrl_register_and_init(struct pinctrl_desc *pctldesc,
|
||||
struct device *dev, void *driver_data,
|
||||
struct pinctrl_dev **pctldev);
|
||||
extern int pinctrl_enable(struct pinctrl_dev *pctldev);
|
||||
|
||||
/* Please use pinctrl_register_and_init() instead */
|
||||
/* Please use pinctrl_register_and_init() and pinctrl_enable() instead */
|
||||
extern struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc,
|
||||
struct device *dev, void *driver_data);
|
||||
|
||||
|
|
|
|||
|
|
@ -15,6 +15,9 @@ int reset_control_status(struct reset_control *rstc);
|
|||
struct reset_control *__of_reset_control_get(struct device_node *node,
|
||||
const char *id, int index, bool shared,
|
||||
bool optional);
|
||||
struct reset_control *__reset_control_get(struct device *dev, const char *id,
|
||||
int index, bool shared,
|
||||
bool optional);
|
||||
void reset_control_put(struct reset_control *rstc);
|
||||
struct reset_control *__devm_reset_control_get(struct device *dev,
|
||||
const char *id, int index, bool shared,
|
||||
|
|
@ -72,6 +75,13 @@ static inline struct reset_control *__of_reset_control_get(
|
|||
return optional ? NULL : ERR_PTR(-ENOTSUPP);
|
||||
}
|
||||
|
||||
static inline struct reset_control *__reset_control_get(
|
||||
struct device *dev, const char *id,
|
||||
int index, bool shared, bool optional)
|
||||
{
|
||||
return optional ? NULL : ERR_PTR(-ENOTSUPP);
|
||||
}
|
||||
|
||||
static inline struct reset_control *__devm_reset_control_get(
|
||||
struct device *dev, const char *id,
|
||||
int index, bool shared, bool optional)
|
||||
|
|
@ -102,8 +112,7 @@ __must_check reset_control_get_exclusive(struct device *dev, const char *id)
|
|||
#ifndef CONFIG_RESET_CONTROLLER
|
||||
WARN_ON(1);
|
||||
#endif
|
||||
return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, false,
|
||||
false);
|
||||
return __reset_control_get(dev, id, 0, false, false);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -131,22 +140,19 @@ __must_check reset_control_get_exclusive(struct device *dev, const char *id)
|
|||
static inline struct reset_control *reset_control_get_shared(
|
||||
struct device *dev, const char *id)
|
||||
{
|
||||
return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, true,
|
||||
false);
|
||||
return __reset_control_get(dev, id, 0, true, false);
|
||||
}
|
||||
|
||||
static inline struct reset_control *reset_control_get_optional_exclusive(
|
||||
struct device *dev, const char *id)
|
||||
{
|
||||
return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, false,
|
||||
true);
|
||||
return __reset_control_get(dev, id, 0, false, true);
|
||||
}
|
||||
|
||||
static inline struct reset_control *reset_control_get_optional_shared(
|
||||
struct device *dev, const char *id)
|
||||
{
|
||||
return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, true,
|
||||
true);
|
||||
return __reset_control_get(dev, id, 0, true, true);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -604,6 +604,10 @@ struct task_struct {
|
|||
#ifdef CONFIG_COMPAT_BRK
|
||||
unsigned brk_randomized:1;
|
||||
#endif
|
||||
#ifdef CONFIG_CGROUPS
|
||||
/* disallow userland-initiated cgroup migration */
|
||||
unsigned no_cgroup_migration:1;
|
||||
#endif
|
||||
|
||||
unsigned long atomic_flags; /* Flags requiring atomic access. */
|
||||
|
||||
|
|
|
|||
|
|
@ -26,6 +26,7 @@ struct kstat {
|
|||
unsigned int nlink;
|
||||
uint32_t blksize; /* Preferred I/O size */
|
||||
u64 attributes;
|
||||
u64 attributes_mask;
|
||||
#define KSTAT_ATTR_FS_IOC_FLAGS \
|
||||
(STATX_ATTR_COMPRESSED | \
|
||||
STATX_ATTR_IMMUTABLE | \
|
||||
|
|
|
|||
|
|
@ -39,7 +39,10 @@ struct iov_iter {
|
|||
};
|
||||
union {
|
||||
unsigned long nr_segs;
|
||||
int idx;
|
||||
struct {
|
||||
int idx;
|
||||
int start_idx;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
|
|
@ -81,6 +84,7 @@ unsigned long iov_shorten(struct iovec *iov, unsigned long nr_segs, size_t to);
|
|||
size_t iov_iter_copy_from_user_atomic(struct page *page,
|
||||
struct iov_iter *i, unsigned long offset, size_t bytes);
|
||||
void iov_iter_advance(struct iov_iter *i, size_t bytes);
|
||||
void iov_iter_revert(struct iov_iter *i, size_t bytes);
|
||||
int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
|
||||
size_t iov_iter_single_seg_count(const struct iov_iter *i);
|
||||
size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
|
||||
|
|
|
|||
|
|
@ -167,6 +167,7 @@ struct virtio_driver {
|
|||
unsigned int feature_table_size;
|
||||
const unsigned int *feature_table_legacy;
|
||||
unsigned int feature_table_size_legacy;
|
||||
int (*validate)(struct virtio_device *dev);
|
||||
int (*probe)(struct virtio_device *dev);
|
||||
void (*scan)(struct virtio_device *dev);
|
||||
void (*remove)(struct virtio_device *dev);
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue