Linux 4.17-rc5
-----BEGIN PGP SIGNATURE----- iQFSBAABCAA8FiEEq68RxlopcLEwq+PEeb4+QwBBGIYFAlr4xw8eHHRvcnZhbGRz QGxpbnV4LWZvdW5kYXRpb24ub3JnAAoJEHm+PkMAQRiGNYoH/1d5zyMpVJVUKZ0K LuEctCGby1PjSvSOhmMuxFVagFAqfBJXmwWTeohLfLG48r/Yk0AsZQ5HH13/8baj k/T8UgUvKZKustndCRp+joQ3Pa1ZpcIFaWRvB8pKFCefJ/F/Lj4B4X1HYI7vLq0K /ZBXUdy3ry0lcVuypnaARYAb2O7l/nyZIjZ3FhiuyymWe7Jpo+G7VK922LOMSX/y VYFZCWa8nxN+yFhO0ao9X5k7ggIiUrEBtbfNrk19VtAn0hx+OYKW2KfJK/eHNey/ CKrOT+KAxU8VU29AEIbYzlL3yrQmULcEoIDiqJ/6m5m6JwsEbP6EqQHs0TiuQFpq A0MO9rw= =yjUP -----END PGP SIGNATURE----- Merge tag 'v4.17-rc5' into irq/core, to pick up fixes Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
commit
4b96583869
949 changed files with 8721 additions and 4812 deletions
|
|
@ -9,6 +9,9 @@
|
|||
struct blk_mq_tags;
|
||||
struct blk_flush_queue;
|
||||
|
||||
/**
|
||||
* struct blk_mq_hw_ctx - State for a hardware queue facing the hardware block device
|
||||
*/
|
||||
struct blk_mq_hw_ctx {
|
||||
struct {
|
||||
spinlock_t lock;
|
||||
|
|
|
|||
|
|
@ -605,6 +605,11 @@ struct request_queue {
|
|||
* initialized by the low level device driver (e.g. scsi/sd.c).
|
||||
* Stacking drivers (device mappers) may or may not initialize
|
||||
* these fields.
|
||||
*
|
||||
* Reads of this information must be protected with blk_queue_enter() /
|
||||
* blk_queue_exit(). Modifying this information is only allowed while
|
||||
* no requests are being processed. See also blk_mq_freeze_queue() and
|
||||
* blk_mq_unfreeze_queue().
|
||||
*/
|
||||
unsigned int nr_zones;
|
||||
unsigned long *seq_zones_bitmap;
|
||||
|
|
@ -737,6 +742,7 @@ bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q);
|
|||
#define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags)
|
||||
#define blk_queue_preempt_only(q) \
|
||||
test_bit(QUEUE_FLAG_PREEMPT_ONLY, &(q)->queue_flags)
|
||||
#define blk_queue_fua(q) test_bit(QUEUE_FLAG_FUA, &(q)->queue_flags)
|
||||
|
||||
extern int blk_set_preempt_only(struct request_queue *q);
|
||||
extern void blk_clear_preempt_only(struct request_queue *q);
|
||||
|
|
|
|||
|
|
@ -31,6 +31,7 @@ struct bpf_map_ops {
|
|||
void (*map_release)(struct bpf_map *map, struct file *map_file);
|
||||
void (*map_free)(struct bpf_map *map);
|
||||
int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key);
|
||||
void (*map_release_uref)(struct bpf_map *map);
|
||||
|
||||
/* funcs callable from userspace and from eBPF programs */
|
||||
void *(*map_lookup_elem)(struct bpf_map *map, void *key);
|
||||
|
|
@ -339,8 +340,8 @@ int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
|
|||
void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *progs,
|
||||
struct bpf_prog *old_prog);
|
||||
int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array,
|
||||
__u32 __user *prog_ids, u32 request_cnt,
|
||||
__u32 __user *prog_cnt);
|
||||
u32 *prog_ids, u32 request_cnt,
|
||||
u32 *prog_cnt);
|
||||
int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
|
||||
struct bpf_prog *exclude_prog,
|
||||
struct bpf_prog *include_prog,
|
||||
|
|
@ -351,6 +352,7 @@ int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
|
|||
struct bpf_prog **_prog, *__prog; \
|
||||
struct bpf_prog_array *_array; \
|
||||
u32 _ret = 1; \
|
||||
preempt_disable(); \
|
||||
rcu_read_lock(); \
|
||||
_array = rcu_dereference(array); \
|
||||
if (unlikely(check_non_null && !_array))\
|
||||
|
|
@ -362,6 +364,7 @@ int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
|
|||
} \
|
||||
_out: \
|
||||
rcu_read_unlock(); \
|
||||
preempt_enable_no_resched(); \
|
||||
_ret; \
|
||||
})
|
||||
|
||||
|
|
@ -434,7 +437,6 @@ int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value);
|
|||
int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
|
||||
void *key, void *value, u64 map_flags);
|
||||
int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
|
||||
void bpf_fd_array_map_clear(struct bpf_map *map);
|
||||
int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
|
||||
void *key, void *value, u64 map_flags);
|
||||
int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
|
||||
|
|
|
|||
|
|
@ -25,6 +25,7 @@
|
|||
#define PHY_ID_BCM54612E 0x03625e60
|
||||
#define PHY_ID_BCM54616S 0x03625d10
|
||||
#define PHY_ID_BCM57780 0x03625d90
|
||||
#define PHY_ID_BCM89610 0x03625cd0
|
||||
|
||||
#define PHY_ID_BCM7250 0xae025280
|
||||
#define PHY_ID_BCM7260 0xae025190
|
||||
|
|
|
|||
|
|
@ -77,7 +77,10 @@ struct ceph_osd_data {
|
|||
u32 bio_length;
|
||||
};
|
||||
#endif /* CONFIG_BLOCK */
|
||||
struct ceph_bvec_iter bvec_pos;
|
||||
struct {
|
||||
struct ceph_bvec_iter bvec_pos;
|
||||
u32 num_bvecs;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
|
|
@ -412,6 +415,10 @@ void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *osd_req,
|
|||
struct ceph_bio_iter *bio_pos,
|
||||
u32 bio_length);
|
||||
#endif /* CONFIG_BLOCK */
|
||||
void osd_req_op_extent_osd_data_bvecs(struct ceph_osd_request *osd_req,
|
||||
unsigned int which,
|
||||
struct bio_vec *bvecs, u32 num_bvecs,
|
||||
u32 bytes);
|
||||
void osd_req_op_extent_osd_data_bvec_pos(struct ceph_osd_request *osd_req,
|
||||
unsigned int which,
|
||||
struct ceph_bvec_iter *bvec_pos);
|
||||
|
|
@ -426,7 +433,8 @@ extern void osd_req_op_cls_request_data_pages(struct ceph_osd_request *,
|
|||
bool own_pages);
|
||||
void osd_req_op_cls_request_data_bvecs(struct ceph_osd_request *osd_req,
|
||||
unsigned int which,
|
||||
struct bio_vec *bvecs, u32 bytes);
|
||||
struct bio_vec *bvecs, u32 num_bvecs,
|
||||
u32 bytes);
|
||||
extern void osd_req_op_cls_response_data_pages(struct ceph_osd_request *,
|
||||
unsigned int which,
|
||||
struct page **pages, u64 length,
|
||||
|
|
|
|||
|
|
@ -765,6 +765,9 @@ int __clk_mux_determine_rate(struct clk_hw *hw,
|
|||
int __clk_determine_rate(struct clk_hw *core, struct clk_rate_request *req);
|
||||
int __clk_mux_determine_rate_closest(struct clk_hw *hw,
|
||||
struct clk_rate_request *req);
|
||||
int clk_mux_determine_rate_flags(struct clk_hw *hw,
|
||||
struct clk_rate_request *req,
|
||||
unsigned long flags);
|
||||
void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent);
|
||||
void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate,
|
||||
unsigned long max_rate);
|
||||
|
|
|
|||
|
|
@ -256,7 +256,9 @@ enum probe_type {
|
|||
* automatically.
|
||||
* @pm: Power management operations of the device which matched
|
||||
* this driver.
|
||||
* @coredump: Called through sysfs to initiate a device coredump.
|
||||
* @coredump: Called when sysfs entry is written to. The device driver
|
||||
* is expected to call the dev_coredump API resulting in a
|
||||
* uevent.
|
||||
* @p: Driver core's private data, no one other than the driver
|
||||
* core can touch this.
|
||||
*
|
||||
|
|
@ -288,7 +290,7 @@ struct device_driver {
|
|||
const struct attribute_group **groups;
|
||||
|
||||
const struct dev_pm_ops *pm;
|
||||
int (*coredump) (struct device *dev);
|
||||
void (*coredump) (struct device *dev);
|
||||
|
||||
struct driver_private *p;
|
||||
};
|
||||
|
|
|
|||
|
|
@ -310,6 +310,8 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
|
|||
* fields should be ignored (use %__ETHTOOL_LINK_MODE_MASK_NBITS
|
||||
* instead of the latter), any change to them will be overwritten
|
||||
* by kernel. Returns a negative error code or zero.
|
||||
* @get_fecparam: Get the network device Forward Error Correction parameters.
|
||||
* @set_fecparam: Set the network device Forward Error Correction parameters.
|
||||
*
|
||||
* All operations are optional (i.e. the function pointer may be set
|
||||
* to %NULL) and callers must take this into account. Callers must
|
||||
|
|
|
|||
|
|
@ -217,12 +217,10 @@ struct fsnotify_mark_connector {
|
|||
union { /* Object pointer [lock] */
|
||||
struct inode *inode;
|
||||
struct vfsmount *mnt;
|
||||
};
|
||||
union {
|
||||
struct hlist_head list;
|
||||
/* Used listing heads to free after srcu period expires */
|
||||
struct fsnotify_mark_connector *destroy_next;
|
||||
};
|
||||
struct hlist_head list;
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -368,7 +368,9 @@ static inline void free_part_stats(struct hd_struct *part)
|
|||
part_stat_add(cpu, gendiskp, field, -subnd)
|
||||
|
||||
void part_in_flight(struct request_queue *q, struct hd_struct *part,
|
||||
unsigned int inflight[2]);
|
||||
unsigned int inflight[2]);
|
||||
void part_in_flight_rw(struct request_queue *q, struct hd_struct *part,
|
||||
unsigned int inflight[2]);
|
||||
void part_dec_in_flight(struct request_queue *q, struct hd_struct *part,
|
||||
int rw);
|
||||
void part_inc_in_flight(struct request_queue *q, struct hd_struct *part,
|
||||
|
|
|
|||
|
|
@ -161,9 +161,11 @@ struct hrtimer_clock_base {
|
|||
enum hrtimer_base_type {
|
||||
HRTIMER_BASE_MONOTONIC,
|
||||
HRTIMER_BASE_REALTIME,
|
||||
HRTIMER_BASE_BOOTTIME,
|
||||
HRTIMER_BASE_TAI,
|
||||
HRTIMER_BASE_MONOTONIC_SOFT,
|
||||
HRTIMER_BASE_REALTIME_SOFT,
|
||||
HRTIMER_BASE_BOOTTIME_SOFT,
|
||||
HRTIMER_BASE_TAI_SOFT,
|
||||
HRTIMER_MAX_CLOCK_BASES,
|
||||
};
|
||||
|
|
|
|||
|
|
@ -62,6 +62,7 @@ void *kthread_probe_data(struct task_struct *k);
|
|||
int kthread_park(struct task_struct *k);
|
||||
void kthread_unpark(struct task_struct *k);
|
||||
void kthread_parkme(void);
|
||||
void kthread_park_complete(struct task_struct *k);
|
||||
|
||||
int kthreadd(void *unused);
|
||||
extern struct task_struct *kthreadd_task;
|
||||
|
|
|
|||
|
|
@ -1284,25 +1284,19 @@ enum {
|
|||
};
|
||||
|
||||
static inline const struct cpumask *
|
||||
mlx5_get_vector_affinity(struct mlx5_core_dev *dev, int vector)
|
||||
mlx5_get_vector_affinity_hint(struct mlx5_core_dev *dev, int vector)
|
||||
{
|
||||
const struct cpumask *mask;
|
||||
struct irq_desc *desc;
|
||||
unsigned int irq;
|
||||
int eqn;
|
||||
int err;
|
||||
|
||||
err = mlx5_vector2eqn(dev, MLX5_EQ_VEC_COMP_BASE + vector, &eqn, &irq);
|
||||
err = mlx5_vector2eqn(dev, vector, &eqn, &irq);
|
||||
if (err)
|
||||
return NULL;
|
||||
|
||||
desc = irq_to_desc(irq);
|
||||
#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
|
||||
mask = irq_data_get_effective_affinity_mask(&desc->irq_data);
|
||||
#else
|
||||
mask = desc->irq_common_data.affinity;
|
||||
#endif
|
||||
return mask;
|
||||
return desc->affinity_hint;
|
||||
}
|
||||
|
||||
#endif /* MLX5_DRIVER_H */
|
||||
|
|
|
|||
|
|
@ -85,6 +85,7 @@ struct flchip {
|
|||
unsigned int write_suspended:1;
|
||||
unsigned int erase_suspended:1;
|
||||
unsigned long in_progress_block_addr;
|
||||
unsigned long in_progress_block_mask;
|
||||
|
||||
struct mutex mutex;
|
||||
wait_queue_head_t wq; /* Wait on here when we're waiting for the chip
|
||||
|
|
|
|||
|
|
@ -95,6 +95,8 @@ static inline int check_stable_address_space(struct mm_struct *mm)
|
|||
return 0;
|
||||
}
|
||||
|
||||
void __oom_reap_task_mm(struct mm_struct *mm);
|
||||
|
||||
extern unsigned long oom_badness(struct task_struct *p,
|
||||
struct mem_cgroup *memcg, const nodemask_t *nodemask,
|
||||
unsigned long totalpages);
|
||||
|
|
|
|||
|
|
@ -26,6 +26,7 @@
|
|||
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/rbtree.h>
|
||||
#include <linux/rcupdate.h>
|
||||
|
||||
/*
|
||||
* Please note - only struct rb_augment_callbacks and the prototypes for
|
||||
|
|
|
|||
|
|
@ -35,6 +35,7 @@
|
|||
|
||||
#include <linux/rbtree.h>
|
||||
#include <linux/seqlock.h>
|
||||
#include <linux/rcupdate.h>
|
||||
|
||||
struct latch_tree_node {
|
||||
struct rb_node node[2];
|
||||
|
|
|
|||
|
|
@ -569,7 +569,7 @@ static inline struct rproc *vdev_to_rproc(struct virtio_device *vdev)
|
|||
void rproc_add_subdev(struct rproc *rproc,
|
||||
struct rproc_subdev *subdev,
|
||||
int (*probe)(struct rproc_subdev *subdev),
|
||||
void (*remove)(struct rproc_subdev *subdev, bool graceful));
|
||||
void (*remove)(struct rproc_subdev *subdev, bool crashed));
|
||||
|
||||
void rproc_remove_subdev(struct rproc *rproc, struct rproc_subdev *subdev);
|
||||
|
||||
|
|
|
|||
|
|
@ -112,17 +112,36 @@ struct task_group;
|
|||
|
||||
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
|
||||
|
||||
/*
|
||||
* Special states are those that do not use the normal wait-loop pattern. See
|
||||
* the comment with set_special_state().
|
||||
*/
|
||||
#define is_special_task_state(state) \
|
||||
((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_DEAD))
|
||||
|
||||
#define __set_current_state(state_value) \
|
||||
do { \
|
||||
WARN_ON_ONCE(is_special_task_state(state_value));\
|
||||
current->task_state_change = _THIS_IP_; \
|
||||
current->state = (state_value); \
|
||||
} while (0)
|
||||
|
||||
#define set_current_state(state_value) \
|
||||
do { \
|
||||
WARN_ON_ONCE(is_special_task_state(state_value));\
|
||||
current->task_state_change = _THIS_IP_; \
|
||||
smp_store_mb(current->state, (state_value)); \
|
||||
} while (0)
|
||||
|
||||
#define set_special_state(state_value) \
|
||||
do { \
|
||||
unsigned long flags; /* may shadow */ \
|
||||
WARN_ON_ONCE(!is_special_task_state(state_value)); \
|
||||
raw_spin_lock_irqsave(¤t->pi_lock, flags); \
|
||||
current->task_state_change = _THIS_IP_; \
|
||||
current->state = (state_value); \
|
||||
raw_spin_unlock_irqrestore(¤t->pi_lock, flags); \
|
||||
} while (0)
|
||||
#else
|
||||
/*
|
||||
* set_current_state() includes a barrier so that the write of current->state
|
||||
|
|
@ -144,8 +163,8 @@ struct task_group;
|
|||
*
|
||||
* The above is typically ordered against the wakeup, which does:
|
||||
*
|
||||
* need_sleep = false;
|
||||
* wake_up_state(p, TASK_UNINTERRUPTIBLE);
|
||||
* need_sleep = false;
|
||||
* wake_up_state(p, TASK_UNINTERRUPTIBLE);
|
||||
*
|
||||
* Where wake_up_state() (and all other wakeup primitives) imply enough
|
||||
* barriers to order the store of the variable against wakeup.
|
||||
|
|
@ -154,12 +173,33 @@ struct task_group;
|
|||
* once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a
|
||||
* TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING).
|
||||
*
|
||||
* This is obviously fine, since they both store the exact same value.
|
||||
* However, with slightly different timing the wakeup TASK_RUNNING store can
|
||||
* also collide with the TASK_UNINTERRUPTIBLE store. Loosing that store is not
|
||||
* a problem either because that will result in one extra go around the loop
|
||||
* and our @cond test will save the day.
|
||||
*
|
||||
* Also see the comments of try_to_wake_up().
|
||||
*/
|
||||
#define __set_current_state(state_value) do { current->state = (state_value); } while (0)
|
||||
#define set_current_state(state_value) smp_store_mb(current->state, (state_value))
|
||||
#define __set_current_state(state_value) \
|
||||
current->state = (state_value)
|
||||
|
||||
#define set_current_state(state_value) \
|
||||
smp_store_mb(current->state, (state_value))
|
||||
|
||||
/*
|
||||
* set_special_state() should be used for those states when the blocking task
|
||||
* can not use the regular condition based wait-loop. In that case we must
|
||||
* serialize against wakeups such that any possible in-flight TASK_RUNNING stores
|
||||
* will not collide with our state change.
|
||||
*/
|
||||
#define set_special_state(state_value) \
|
||||
do { \
|
||||
unsigned long flags; /* may shadow */ \
|
||||
raw_spin_lock_irqsave(¤t->pi_lock, flags); \
|
||||
current->state = (state_value); \
|
||||
raw_spin_unlock_irqrestore(¤t->pi_lock, flags); \
|
||||
} while (0)
|
||||
|
||||
#endif
|
||||
|
||||
/* Task command name length: */
|
||||
|
|
|
|||
|
|
@ -280,7 +280,7 @@ static inline void kernel_signal_stop(void)
|
|||
{
|
||||
spin_lock_irq(¤t->sighand->siglock);
|
||||
if (current->jobctl & JOBCTL_STOP_DEQUEUED)
|
||||
__set_current_state(TASK_STOPPED);
|
||||
set_special_state(TASK_STOPPED);
|
||||
spin_unlock_irq(¤t->sighand->siglock);
|
||||
|
||||
schedule();
|
||||
|
|
|
|||
|
|
@ -351,10 +351,10 @@ struct earlycon_id {
|
|||
char name[16];
|
||||
char compatible[128];
|
||||
int (*setup)(struct earlycon_device *, const char *options);
|
||||
} __aligned(32);
|
||||
};
|
||||
|
||||
extern const struct earlycon_id __earlycon_table[];
|
||||
extern const struct earlycon_id __earlycon_table_end[];
|
||||
extern const struct earlycon_id *__earlycon_table[];
|
||||
extern const struct earlycon_id *__earlycon_table_end[];
|
||||
|
||||
#if defined(CONFIG_SERIAL_EARLYCON) && !defined(MODULE)
|
||||
#define EARLYCON_USED_OR_UNUSED __used
|
||||
|
|
@ -362,12 +362,19 @@ extern const struct earlycon_id __earlycon_table_end[];
|
|||
#define EARLYCON_USED_OR_UNUSED __maybe_unused
|
||||
#endif
|
||||
|
||||
#define OF_EARLYCON_DECLARE(_name, compat, fn) \
|
||||
static const struct earlycon_id __UNIQUE_ID(__earlycon_##_name) \
|
||||
EARLYCON_USED_OR_UNUSED __section(__earlycon_table) \
|
||||
#define _OF_EARLYCON_DECLARE(_name, compat, fn, unique_id) \
|
||||
static const struct earlycon_id unique_id \
|
||||
EARLYCON_USED_OR_UNUSED __initconst \
|
||||
= { .name = __stringify(_name), \
|
||||
.compatible = compat, \
|
||||
.setup = fn }
|
||||
.setup = fn }; \
|
||||
static const struct earlycon_id EARLYCON_USED_OR_UNUSED \
|
||||
__section(__earlycon_table) \
|
||||
* const __PASTE(__p, unique_id) = &unique_id
|
||||
|
||||
#define OF_EARLYCON_DECLARE(_name, compat, fn) \
|
||||
_OF_EARLYCON_DECLARE(_name, compat, fn, \
|
||||
__UNIQUE_ID(__earlycon_##_name))
|
||||
|
||||
#define EARLYCON_DECLARE(_name, fn) OF_EARLYCON_DECLARE(_name, "", fn)
|
||||
|
||||
|
|
|
|||
|
|
@ -50,9 +50,9 @@ partial_name_hash(unsigned long c, unsigned long prevhash)
|
|||
* losing bits). This also has the property (wanted by the dcache)
|
||||
* that the msbits make a good hash table index.
|
||||
*/
|
||||
static inline unsigned long end_name_hash(unsigned long hash)
|
||||
static inline unsigned int end_name_hash(unsigned long hash)
|
||||
{
|
||||
return __hash_32((unsigned int)hash);
|
||||
return hash_long(hash, 32);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -60,6 +60,81 @@ struct ti_emif_pm_functions {
|
|||
u32 abort_sr;
|
||||
} __packed __aligned(8);
|
||||
|
||||
static inline void ti_emif_asm_offsets(void)
|
||||
{
|
||||
DEFINE(EMIF_SDCFG_VAL_OFFSET,
|
||||
offsetof(struct emif_regs_amx3, emif_sdcfg_val));
|
||||
DEFINE(EMIF_TIMING1_VAL_OFFSET,
|
||||
offsetof(struct emif_regs_amx3, emif_timing1_val));
|
||||
DEFINE(EMIF_TIMING2_VAL_OFFSET,
|
||||
offsetof(struct emif_regs_amx3, emif_timing2_val));
|
||||
DEFINE(EMIF_TIMING3_VAL_OFFSET,
|
||||
offsetof(struct emif_regs_amx3, emif_timing3_val));
|
||||
DEFINE(EMIF_REF_CTRL_VAL_OFFSET,
|
||||
offsetof(struct emif_regs_amx3, emif_ref_ctrl_val));
|
||||
DEFINE(EMIF_ZQCFG_VAL_OFFSET,
|
||||
offsetof(struct emif_regs_amx3, emif_zqcfg_val));
|
||||
DEFINE(EMIF_PMCR_VAL_OFFSET,
|
||||
offsetof(struct emif_regs_amx3, emif_pmcr_val));
|
||||
DEFINE(EMIF_PMCR_SHDW_VAL_OFFSET,
|
||||
offsetof(struct emif_regs_amx3, emif_pmcr_shdw_val));
|
||||
DEFINE(EMIF_RD_WR_LEVEL_RAMP_CTRL_OFFSET,
|
||||
offsetof(struct emif_regs_amx3, emif_rd_wr_level_ramp_ctrl));
|
||||
DEFINE(EMIF_RD_WR_EXEC_THRESH_OFFSET,
|
||||
offsetof(struct emif_regs_amx3, emif_rd_wr_exec_thresh));
|
||||
DEFINE(EMIF_COS_CONFIG_OFFSET,
|
||||
offsetof(struct emif_regs_amx3, emif_cos_config));
|
||||
DEFINE(EMIF_PRIORITY_TO_COS_MAPPING_OFFSET,
|
||||
offsetof(struct emif_regs_amx3, emif_priority_to_cos_mapping));
|
||||
DEFINE(EMIF_CONNECT_ID_SERV_1_MAP_OFFSET,
|
||||
offsetof(struct emif_regs_amx3, emif_connect_id_serv_1_map));
|
||||
DEFINE(EMIF_CONNECT_ID_SERV_2_MAP_OFFSET,
|
||||
offsetof(struct emif_regs_amx3, emif_connect_id_serv_2_map));
|
||||
DEFINE(EMIF_OCP_CONFIG_VAL_OFFSET,
|
||||
offsetof(struct emif_regs_amx3, emif_ocp_config_val));
|
||||
DEFINE(EMIF_LPDDR2_NVM_TIM_OFFSET,
|
||||
offsetof(struct emif_regs_amx3, emif_lpddr2_nvm_tim));
|
||||
DEFINE(EMIF_LPDDR2_NVM_TIM_SHDW_OFFSET,
|
||||
offsetof(struct emif_regs_amx3, emif_lpddr2_nvm_tim_shdw));
|
||||
DEFINE(EMIF_DLL_CALIB_CTRL_VAL_OFFSET,
|
||||
offsetof(struct emif_regs_amx3, emif_dll_calib_ctrl_val));
|
||||
DEFINE(EMIF_DLL_CALIB_CTRL_VAL_SHDW_OFFSET,
|
||||
offsetof(struct emif_regs_amx3, emif_dll_calib_ctrl_val_shdw));
|
||||
DEFINE(EMIF_DDR_PHY_CTLR_1_OFFSET,
|
||||
offsetof(struct emif_regs_amx3, emif_ddr_phy_ctlr_1));
|
||||
DEFINE(EMIF_EXT_PHY_CTRL_VALS_OFFSET,
|
||||
offsetof(struct emif_regs_amx3, emif_ext_phy_ctrl_vals));
|
||||
DEFINE(EMIF_REGS_AMX3_SIZE, sizeof(struct emif_regs_amx3));
|
||||
|
||||
BLANK();
|
||||
|
||||
DEFINE(EMIF_PM_BASE_ADDR_VIRT_OFFSET,
|
||||
offsetof(struct ti_emif_pm_data, ti_emif_base_addr_virt));
|
||||
DEFINE(EMIF_PM_BASE_ADDR_PHYS_OFFSET,
|
||||
offsetof(struct ti_emif_pm_data, ti_emif_base_addr_phys));
|
||||
DEFINE(EMIF_PM_CONFIG_OFFSET,
|
||||
offsetof(struct ti_emif_pm_data, ti_emif_sram_config));
|
||||
DEFINE(EMIF_PM_REGS_VIRT_OFFSET,
|
||||
offsetof(struct ti_emif_pm_data, regs_virt));
|
||||
DEFINE(EMIF_PM_REGS_PHYS_OFFSET,
|
||||
offsetof(struct ti_emif_pm_data, regs_phys));
|
||||
DEFINE(EMIF_PM_DATA_SIZE, sizeof(struct ti_emif_pm_data));
|
||||
|
||||
BLANK();
|
||||
|
||||
DEFINE(EMIF_PM_SAVE_CONTEXT_OFFSET,
|
||||
offsetof(struct ti_emif_pm_functions, save_context));
|
||||
DEFINE(EMIF_PM_RESTORE_CONTEXT_OFFSET,
|
||||
offsetof(struct ti_emif_pm_functions, restore_context));
|
||||
DEFINE(EMIF_PM_ENTER_SR_OFFSET,
|
||||
offsetof(struct ti_emif_pm_functions, enter_sr));
|
||||
DEFINE(EMIF_PM_EXIT_SR_OFFSET,
|
||||
offsetof(struct ti_emif_pm_functions, exit_sr));
|
||||
DEFINE(EMIF_PM_ABORT_SR_OFFSET,
|
||||
offsetof(struct ti_emif_pm_functions, abort_sr));
|
||||
DEFINE(EMIF_PM_FUNCTIONS_SIZE, sizeof(struct ti_emif_pm_functions));
|
||||
}
|
||||
|
||||
struct gen_pool;
|
||||
|
||||
int ti_emif_copy_pm_function_table(struct gen_pool *sram_pool, void *dst);
|
||||
|
|
|
|||
|
|
@ -52,7 +52,6 @@ struct tk_read_base {
|
|||
* @offs_real: Offset clock monotonic -> clock realtime
|
||||
* @offs_boot: Offset clock monotonic -> clock boottime
|
||||
* @offs_tai: Offset clock monotonic -> clock tai
|
||||
* @time_suspended: Accumulated suspend time
|
||||
* @tai_offset: The current UTC to TAI offset in seconds
|
||||
* @clock_was_set_seq: The sequence number of clock was set events
|
||||
* @cs_was_changed_seq: The sequence number of clocksource change events
|
||||
|
|
@ -95,7 +94,6 @@ struct timekeeper {
|
|||
ktime_t offs_real;
|
||||
ktime_t offs_boot;
|
||||
ktime_t offs_tai;
|
||||
ktime_t time_suspended;
|
||||
s32 tai_offset;
|
||||
unsigned int clock_was_set_seq;
|
||||
u8 cs_was_changed_seq;
|
||||
|
|
|
|||
|
|
@ -33,25 +33,20 @@ extern void ktime_get_ts64(struct timespec64 *ts);
|
|||
extern time64_t ktime_get_seconds(void);
|
||||
extern time64_t __ktime_get_real_seconds(void);
|
||||
extern time64_t ktime_get_real_seconds(void);
|
||||
extern void ktime_get_active_ts64(struct timespec64 *ts);
|
||||
|
||||
extern int __getnstimeofday64(struct timespec64 *tv);
|
||||
extern void getnstimeofday64(struct timespec64 *tv);
|
||||
extern void getboottime64(struct timespec64 *ts);
|
||||
|
||||
#define ktime_get_real_ts64(ts) getnstimeofday64(ts)
|
||||
|
||||
/* Clock BOOTTIME compatibility wrappers */
|
||||
static inline void get_monotonic_boottime64(struct timespec64 *ts)
|
||||
{
|
||||
ktime_get_ts64(ts);
|
||||
}
|
||||
#define ktime_get_real_ts64(ts) getnstimeofday64(ts)
|
||||
|
||||
/*
|
||||
* ktime_t based interfaces
|
||||
*/
|
||||
|
||||
enum tk_offsets {
|
||||
TK_OFFS_REAL,
|
||||
TK_OFFS_BOOT,
|
||||
TK_OFFS_TAI,
|
||||
TK_OFFS_MAX,
|
||||
};
|
||||
|
|
@ -62,10 +57,6 @@ extern ktime_t ktime_mono_to_any(ktime_t tmono, enum tk_offsets offs);
|
|||
extern ktime_t ktime_get_raw(void);
|
||||
extern u32 ktime_get_resolution_ns(void);
|
||||
|
||||
/* Clock BOOTTIME compatibility wrappers */
|
||||
static inline ktime_t ktime_get_boottime(void) { return ktime_get(); }
|
||||
static inline u64 ktime_get_boot_ns(void) { return ktime_get(); }
|
||||
|
||||
/**
|
||||
* ktime_get_real - get the real (wall-) time in ktime_t format
|
||||
*/
|
||||
|
|
@ -74,6 +65,17 @@ static inline ktime_t ktime_get_real(void)
|
|||
return ktime_get_with_offset(TK_OFFS_REAL);
|
||||
}
|
||||
|
||||
/**
|
||||
* ktime_get_boottime - Returns monotonic time since boot in ktime_t format
|
||||
*
|
||||
* This is similar to CLOCK_MONTONIC/ktime_get, but also includes the
|
||||
* time spent in suspend.
|
||||
*/
|
||||
static inline ktime_t ktime_get_boottime(void)
|
||||
{
|
||||
return ktime_get_with_offset(TK_OFFS_BOOT);
|
||||
}
|
||||
|
||||
/**
|
||||
* ktime_get_clocktai - Returns the TAI time of day in ktime_t format
|
||||
*/
|
||||
|
|
@ -100,6 +102,11 @@ static inline u64 ktime_get_real_ns(void)
|
|||
return ktime_to_ns(ktime_get_real());
|
||||
}
|
||||
|
||||
static inline u64 ktime_get_boot_ns(void)
|
||||
{
|
||||
return ktime_to_ns(ktime_get_boottime());
|
||||
}
|
||||
|
||||
static inline u64 ktime_get_tai_ns(void)
|
||||
{
|
||||
return ktime_to_ns(ktime_get_clocktai());
|
||||
|
|
@ -112,11 +119,17 @@ static inline u64 ktime_get_raw_ns(void)
|
|||
|
||||
extern u64 ktime_get_mono_fast_ns(void);
|
||||
extern u64 ktime_get_raw_fast_ns(void);
|
||||
extern u64 ktime_get_boot_fast_ns(void);
|
||||
extern u64 ktime_get_real_fast_ns(void);
|
||||
|
||||
/*
|
||||
* timespec64 interfaces utilizing the ktime based ones
|
||||
*/
|
||||
static inline void get_monotonic_boottime64(struct timespec64 *ts)
|
||||
{
|
||||
*ts = ktime_to_timespec64(ktime_get_boottime());
|
||||
}
|
||||
|
||||
static inline void timekeeping_clocktai64(struct timespec64 *ts)
|
||||
{
|
||||
*ts = ktime_to_timespec64(ktime_get_clocktai());
|
||||
|
|
|
|||
|
|
@ -701,7 +701,7 @@ extern int tty_unregister_ldisc(int disc);
|
|||
extern int tty_set_ldisc(struct tty_struct *tty, int disc);
|
||||
extern int tty_ldisc_setup(struct tty_struct *tty, struct tty_struct *o_tty);
|
||||
extern void tty_ldisc_release(struct tty_struct *tty);
|
||||
extern void tty_ldisc_init(struct tty_struct *tty);
|
||||
extern int __must_check tty_ldisc_init(struct tty_struct *tty);
|
||||
extern void tty_ldisc_deinit(struct tty_struct *tty);
|
||||
extern int tty_ldisc_receive_buf(struct tty_ldisc *ld, const unsigned char *p,
|
||||
char *f, int count);
|
||||
|
|
|
|||
|
|
@ -52,7 +52,7 @@
|
|||
#define USB_GADGET_DELAYED_STATUS 0x7fff /* Impossibly large value */
|
||||
|
||||
/* big enough to hold our biggest descriptor */
|
||||
#define USB_COMP_EP0_BUFSIZ 1024
|
||||
#define USB_COMP_EP0_BUFSIZ 4096
|
||||
|
||||
/* OS feature descriptor length <= 4kB */
|
||||
#define USB_COMP_EP0_OS_DESC_BUFSIZ 4096
|
||||
|
|
|
|||
|
|
@ -24,24 +24,6 @@ __printf(1, 2) void vbg_debug(const char *fmt, ...);
|
|||
#define vbg_debug pr_debug
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Allocate memory for generic request and initialize the request header.
|
||||
*
|
||||
* Return: the allocated memory
|
||||
* @len: Size of memory block required for the request.
|
||||
* @req_type: The generic request type.
|
||||
*/
|
||||
void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type);
|
||||
|
||||
/**
|
||||
* Perform a generic request.
|
||||
*
|
||||
* Return: VBox status code
|
||||
* @gdev: The Guest extension device.
|
||||
* @req: Pointer to the request structure.
|
||||
*/
|
||||
int vbg_req_perform(struct vbg_dev *gdev, void *req);
|
||||
|
||||
int vbg_hgcm_connect(struct vbg_dev *gdev,
|
||||
struct vmmdev_hgcm_service_location *loc,
|
||||
u32 *client_id, int *vbox_status);
|
||||
|
|
@ -52,11 +34,6 @@ int vbg_hgcm_call(struct vbg_dev *gdev, u32 client_id, u32 function,
|
|||
u32 timeout_ms, struct vmmdev_hgcm_function_parameter *parms,
|
||||
u32 parm_count, int *vbox_status);
|
||||
|
||||
int vbg_hgcm_call32(
|
||||
struct vbg_dev *gdev, u32 client_id, u32 function, u32 timeout_ms,
|
||||
struct vmmdev_hgcm_function_parameter32 *parm32, u32 parm_count,
|
||||
int *vbox_status);
|
||||
|
||||
/**
|
||||
* Convert a VirtualBox status code to a standard Linux kernel return value.
|
||||
* Return: 0 or negative errno value.
|
||||
|
|
|
|||
|
|
@ -157,6 +157,9 @@ int virtio_device_freeze(struct virtio_device *dev);
|
|||
int virtio_device_restore(struct virtio_device *dev);
|
||||
#endif
|
||||
|
||||
#define virtio_device_for_each_vq(vdev, vq) \
|
||||
list_for_each_entry(vq, &vdev->vqs, list)
|
||||
|
||||
/**
|
||||
* virtio_driver - operations for a virtio I/O driver
|
||||
* @driver: underlying device driver (populate name and owner).
|
||||
|
|
|
|||
|
|
@ -305,4 +305,21 @@ do { \
|
|||
__ret; \
|
||||
})
|
||||
|
||||
/**
|
||||
* clear_and_wake_up_bit - clear a bit and wake up anyone waiting on that bit
|
||||
*
|
||||
* @bit: the bit of the word being waited on
|
||||
* @word: the word being waited on, a kernel virtual address
|
||||
*
|
||||
* You can use this helper if bitflags are manipulated atomically rather than
|
||||
* non-atomically under a lock.
|
||||
*/
|
||||
static inline void clear_and_wake_up_bit(int bit, void *word)
|
||||
{
|
||||
clear_bit_unlock(bit, word);
|
||||
/* See wake_up_bit() for which memory barrier you need to use. */
|
||||
smp_mb__after_atomic();
|
||||
wake_up_bit(word, bit);
|
||||
}
|
||||
|
||||
#endif /* _LINUX_WAIT_BIT_H */
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue