Merge remote branch 'origin/master' into next-merge

This commit is contained in:
Alex Williamson 2013-09-04 11:25:44 -06:00
commit 3bc4f3993b
3081 changed files with 231530 additions and 93622 deletions

View file

@ -481,6 +481,13 @@ void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state,
acpi_status acpi_os_prepare_sleep(u8 sleep_state,
u32 pm1a_control, u32 pm1b_control);
void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state,
u32 val_a, u32 val_b));
acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state,
u32 val_a, u32 val_b);
#ifdef CONFIG_X86
void arch_reserve_mem_area(acpi_physical_address addr, size_t size);
#else

View file

@ -239,6 +239,8 @@ enum {
ATA_CMD_WRITE_QUEUED_FUA_EXT = 0x3E,
ATA_CMD_FPDMA_READ = 0x60,
ATA_CMD_FPDMA_WRITE = 0x61,
ATA_CMD_FPDMA_SEND = 0x64,
ATA_CMD_FPDMA_RECV = 0x65,
ATA_CMD_PIO_READ = 0x20,
ATA_CMD_PIO_READ_EXT = 0x24,
ATA_CMD_PIO_WRITE = 0x30,
@ -293,8 +295,13 @@ enum {
/* marked obsolete in the ATA/ATAPI-7 spec */
ATA_CMD_RESTORE = 0x10,
/* Subcmds for ATA_CMD_FPDMA_SEND */
ATA_SUBCMD_FPDMA_SEND_DSM = 0x00,
ATA_SUBCMD_FPDMA_SEND_WR_LOG_DMA_EXT = 0x02,
/* READ_LOG_EXT pages */
ATA_LOG_SATA_NCQ = 0x10,
ATA_LOG_NCQ_SEND_RECV = 0x13,
ATA_LOG_SATA_ID_DEV_DATA = 0x30,
ATA_LOG_SATA_SETTINGS = 0x08,
ATA_LOG_DEVSLP_OFFSET = 0x30,
@ -305,6 +312,15 @@ enum {
ATA_LOG_DEVSLP_VALID = 0x07,
ATA_LOG_DEVSLP_VALID_MASK = 0x80,
/* NCQ send and receive log */
ATA_LOG_NCQ_SEND_RECV_SUBCMDS_OFFSET = 0x00,
ATA_LOG_NCQ_SEND_RECV_SUBCMDS_DSM = (1 << 0),
ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET = 0x04,
ATA_LOG_NCQ_SEND_RECV_DSM_TRIM = (1 << 0),
ATA_LOG_NCQ_SEND_RECV_RD_LOG_OFFSET = 0x08,
ATA_LOG_NCQ_SEND_RECV_WR_LOG_OFFSET = 0x0C,
ATA_LOG_NCQ_SEND_RECV_SIZE = 0x10,
/* READ/WRITE LONG (obsolete) */
ATA_CMD_READ_LONG = 0x22,
ATA_CMD_READ_LONG_ONCE = 0x23,
@ -446,22 +462,6 @@ enum {
SERR_TRANS_ST_ERROR = (1 << 24), /* Transport state trans. error */
SERR_UNRECOG_FIS = (1 << 25), /* Unrecognized FIS */
SERR_DEV_XCHG = (1 << 26), /* device exchanged */
/* struct ata_taskfile flags */
ATA_TFLAG_LBA48 = (1 << 0), /* enable 48-bit LBA and "HOB" */
ATA_TFLAG_ISADDR = (1 << 1), /* enable r/w to nsect/lba regs */
ATA_TFLAG_DEVICE = (1 << 2), /* enable r/w to device reg */
ATA_TFLAG_WRITE = (1 << 3), /* data dir: host->dev==1 (write) */
ATA_TFLAG_LBA = (1 << 4), /* enable LBA */
ATA_TFLAG_FUA = (1 << 5), /* enable FUA */
ATA_TFLAG_POLLING = (1 << 6), /* set nIEN to 1 and use polling */
/* protocol flags */
ATA_PROT_FLAG_PIO = (1 << 0), /* is PIO */
ATA_PROT_FLAG_DMA = (1 << 1), /* is DMA */
ATA_PROT_FLAG_DATA = ATA_PROT_FLAG_PIO | ATA_PROT_FLAG_DMA,
ATA_PROT_FLAG_NCQ = (1 << 2), /* is NCQ */
ATA_PROT_FLAG_ATAPI = (1 << 3), /* is ATAPI */
};
enum ata_tf_protocols {
@ -488,83 +488,6 @@ struct ata_bmdma_prd {
__le32 flags_len;
};
struct ata_taskfile {
unsigned long flags; /* ATA_TFLAG_xxx */
u8 protocol; /* ATA_PROT_xxx */
u8 ctl; /* control reg */
u8 hob_feature; /* additional data */
u8 hob_nsect; /* to support LBA48 */
u8 hob_lbal;
u8 hob_lbam;
u8 hob_lbah;
u8 feature;
u8 nsect;
u8 lbal;
u8 lbam;
u8 lbah;
u8 device;
u8 command; /* IO operation */
};
/*
* protocol tests
*/
static inline unsigned int ata_prot_flags(u8 prot)
{
switch (prot) {
case ATA_PROT_NODATA:
return 0;
case ATA_PROT_PIO:
return ATA_PROT_FLAG_PIO;
case ATA_PROT_DMA:
return ATA_PROT_FLAG_DMA;
case ATA_PROT_NCQ:
return ATA_PROT_FLAG_DMA | ATA_PROT_FLAG_NCQ;
case ATAPI_PROT_NODATA:
return ATA_PROT_FLAG_ATAPI;
case ATAPI_PROT_PIO:
return ATA_PROT_FLAG_ATAPI | ATA_PROT_FLAG_PIO;
case ATAPI_PROT_DMA:
return ATA_PROT_FLAG_ATAPI | ATA_PROT_FLAG_DMA;
}
return 0;
}
static inline int ata_is_atapi(u8 prot)
{
return ata_prot_flags(prot) & ATA_PROT_FLAG_ATAPI;
}
static inline int ata_is_nodata(u8 prot)
{
return !(ata_prot_flags(prot) & ATA_PROT_FLAG_DATA);
}
static inline int ata_is_pio(u8 prot)
{
return ata_prot_flags(prot) & ATA_PROT_FLAG_PIO;
}
static inline int ata_is_dma(u8 prot)
{
return ata_prot_flags(prot) & ATA_PROT_FLAG_DMA;
}
static inline int ata_is_ncq(u8 prot)
{
return ata_prot_flags(prot) & ATA_PROT_FLAG_NCQ;
}
static inline int ata_is_data(u8 prot)
{
return ata_prot_flags(prot) & ATA_PROT_FLAG_DATA;
}
/*
* id tests
*/
@ -865,6 +788,11 @@ static inline int ata_id_rotation_rate(const u16 *id)
return val;
}
static inline bool ata_id_has_ncq_send_and_recv(const u16 *id)
{
return id[ATA_ID_SATA_CAPABILITY_2] & BIT(6);
}
static inline bool ata_id_has_trim(const u16 *id)
{
if (ata_id_major_version(id) >= 7 &&
@ -1060,15 +988,6 @@ static inline unsigned ata_set_lba_range_entries(void *_buffer,
return used_bytes;
}
static inline int is_multi_taskfile(struct ata_taskfile *tf)
{
return (tf->command == ATA_CMD_READ_MULTI) ||
(tf->command == ATA_CMD_WRITE_MULTI) ||
(tf->command == ATA_CMD_READ_MULTI_EXT) ||
(tf->command == ATA_CMD_WRITE_MULTI_EXT) ||
(tf->command == ATA_CMD_WRITE_MULTI_FUA_EXT);
}
static inline bool ata_ok(u8 status)
{
return ((status & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | ATA_ERR))

View file

@ -124,4 +124,6 @@
#define ATMEL_US_NER 0x44 /* Number of Errors Register */
#define ATMEL_US_IF 0x4c /* IrDA Filter Register */
#define ATMEL_US_NAME 0xf0 /* Ip Name */
#endif

View file

@ -66,22 +66,25 @@ enum cgroup_subsys_id {
/* Per-subsystem/per-cgroup state maintained by the system. */
struct cgroup_subsys_state {
/*
* The cgroup that this subsystem is attached to. Useful
* for subsystems that want to know about the cgroup
* hierarchy structure
*/
/* the cgroup that this css is attached to */
struct cgroup *cgroup;
/* the cgroup subsystem that this css is attached to */
struct cgroup_subsys *ss;
/* reference count - access via css_[try]get() and css_put() */
struct percpu_ref refcnt;
/* the parent css */
struct cgroup_subsys_state *parent;
unsigned long flags;
/* ID for this css, if possible */
struct css_id __rcu *id;
/* Used to put @cgroup->dentry on the last css_put() */
struct work_struct dput_work;
/* percpu_ref killing and RCU release */
struct rcu_head rcu_head;
struct work_struct destroy_work;
};
/* bits in struct cgroup_subsys_state flags field */
@ -161,7 +164,16 @@ struct cgroup_name {
struct cgroup {
unsigned long flags; /* "unsigned long" so bitops work */
int id; /* ida allocated in-hierarchy ID */
/*
* idr allocated in-hierarchy ID.
*
* The ID of the root cgroup is always 0, and a new cgroup
* will be assigned with a smallest available ID.
*/
int id;
/* the number of attached css's */
int nr_css;
/*
* We link our 'sibling' struct into our parent's 'children'.
@ -196,7 +208,7 @@ struct cgroup {
struct cgroup_name __rcu *name;
/* Private pointers for each registered subsystem */
struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT];
struct cgroup_subsys_state __rcu *subsys[CGROUP_SUBSYS_COUNT];
struct cgroupfs_root *root;
@ -220,10 +232,12 @@ struct cgroup {
struct list_head pidlists;
struct mutex pidlist_mutex;
/* dummy css with NULL ->ss, points back to this cgroup */
struct cgroup_subsys_state dummy_css;
/* For css percpu_ref killing and RCU-protected deletion */
struct rcu_head rcu_head;
struct work_struct destroy_work;
atomic_t css_kill_cnt;
/* List of events which userspace want to receive */
struct list_head event_list;
@ -322,7 +336,7 @@ struct cgroupfs_root {
unsigned long flags;
/* IDs for cgroups in this hierarchy */
struct ida cgroup_ida;
struct idr cgroup_idr;
/* The path to use for release notifications. */
char release_agent_path[PATH_MAX];
@ -394,9 +408,10 @@ struct cgroup_map_cb {
/* cftype->flags */
enum {
CFTYPE_ONLY_ON_ROOT = (1 << 0), /* only create on root cg */
CFTYPE_NOT_ON_ROOT = (1 << 1), /* don't create on root cg */
CFTYPE_ONLY_ON_ROOT = (1 << 0), /* only create on root cgrp */
CFTYPE_NOT_ON_ROOT = (1 << 1), /* don't create on root cgrp */
CFTYPE_INSANE = (1 << 2), /* don't create if sane_behavior */
CFTYPE_NO_PREFIX = (1 << 3), /* (DON'T USE FOR NEW FILES) no subsys prefix */
};
#define MAX_CFTYPE_NAME 64
@ -424,35 +439,41 @@ struct cftype {
/* CFTYPE_* flags */
unsigned int flags;
/*
* The subsys this file belongs to. Initialized automatically
* during registration. NULL for cgroup core files.
*/
struct cgroup_subsys *ss;
int (*open)(struct inode *inode, struct file *file);
ssize_t (*read)(struct cgroup *cgrp, struct cftype *cft,
ssize_t (*read)(struct cgroup_subsys_state *css, struct cftype *cft,
struct file *file,
char __user *buf, size_t nbytes, loff_t *ppos);
/*
* read_u64() is a shortcut for the common case of returning a
* single integer. Use it in place of read()
*/
u64 (*read_u64)(struct cgroup *cgrp, struct cftype *cft);
u64 (*read_u64)(struct cgroup_subsys_state *css, struct cftype *cft);
/*
* read_s64() is a signed version of read_u64()
*/
s64 (*read_s64)(struct cgroup *cgrp, struct cftype *cft);
s64 (*read_s64)(struct cgroup_subsys_state *css, struct cftype *cft);
/*
* read_map() is used for defining a map of key/value
* pairs. It should call cb->fill(cb, key, value) for each
* entry. The key/value pairs (and their ordering) should not
* change between reboots.
*/
int (*read_map)(struct cgroup *cgrp, struct cftype *cft,
int (*read_map)(struct cgroup_subsys_state *css, struct cftype *cft,
struct cgroup_map_cb *cb);
/*
* read_seq_string() is used for outputting a simple sequence
* using seqfile.
*/
int (*read_seq_string)(struct cgroup *cgrp, struct cftype *cft,
struct seq_file *m);
int (*read_seq_string)(struct cgroup_subsys_state *css,
struct cftype *cft, struct seq_file *m);
ssize_t (*write)(struct cgroup *cgrp, struct cftype *cft,
ssize_t (*write)(struct cgroup_subsys_state *css, struct cftype *cft,
struct file *file,
const char __user *buf, size_t nbytes, loff_t *ppos);
@ -461,18 +482,20 @@ struct cftype {
* a single integer (as parsed by simple_strtoull) from
* userspace. Use in place of write(); return 0 or error.
*/
int (*write_u64)(struct cgroup *cgrp, struct cftype *cft, u64 val);
int (*write_u64)(struct cgroup_subsys_state *css, struct cftype *cft,
u64 val);
/*
* write_s64() is a signed version of write_u64()
*/
int (*write_s64)(struct cgroup *cgrp, struct cftype *cft, s64 val);
int (*write_s64)(struct cgroup_subsys_state *css, struct cftype *cft,
s64 val);
/*
* write_string() is passed a nul-terminated kernelspace
* buffer of maximum length determined by max_write_len.
* Returns 0 or -ve error code.
*/
int (*write_string)(struct cgroup *cgrp, struct cftype *cft,
int (*write_string)(struct cgroup_subsys_state *css, struct cftype *cft,
const char *buffer);
/*
* trigger() callback can be used to get some kick from the
@ -480,7 +503,7 @@ struct cftype {
* at all. The private field can be used to determine the
* kick type for multiplexing.
*/
int (*trigger)(struct cgroup *cgrp, unsigned int event);
int (*trigger)(struct cgroup_subsys_state *css, unsigned int event);
int (*release)(struct inode *inode, struct file *file);
@ -490,16 +513,18 @@ struct cftype {
* you want to provide this functionality. Use eventfd_signal()
* on eventfd to send notification to userspace.
*/
int (*register_event)(struct cgroup *cgrp, struct cftype *cft,
struct eventfd_ctx *eventfd, const char *args);
int (*register_event)(struct cgroup_subsys_state *css,
struct cftype *cft, struct eventfd_ctx *eventfd,
const char *args);
/*
* unregister_event() callback will be called when userspace
* closes the eventfd or on cgroup removing.
* This callback must be implemented, if you want provide
* notification functionality.
*/
void (*unregister_event)(struct cgroup *cgrp, struct cftype *cft,
struct eventfd_ctx *eventfd);
void (*unregister_event)(struct cgroup_subsys_state *css,
struct cftype *cft,
struct eventfd_ctx *eventfd);
};
/*
@ -512,15 +537,6 @@ struct cftype_set {
struct cftype *cfts;
};
struct cgroup_scanner {
struct cgroup *cg;
int (*test_task)(struct task_struct *p, struct cgroup_scanner *scan);
void (*process_task)(struct task_struct *p,
struct cgroup_scanner *scan);
struct ptr_heap *heap;
void *data;
};
/*
* See the comment above CGRP_ROOT_SANE_BEHAVIOR for details. This
* function can be called as long as @cgrp is accessible.
@ -537,7 +553,7 @@ static inline const char *cgroup_name(const struct cgroup *cgrp)
}
int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
int cgroup_rm_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
int cgroup_rm_cftypes(struct cftype *cfts);
bool cgroup_is_descendant(struct cgroup *cgrp, struct cgroup *ancestor);
@ -553,20 +569,22 @@ int cgroup_task_count(const struct cgroup *cgrp);
struct cgroup_taskset;
struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset);
struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset);
struct cgroup *cgroup_taskset_cur_cgroup(struct cgroup_taskset *tset);
struct cgroup_subsys_state *cgroup_taskset_cur_css(struct cgroup_taskset *tset,
int subsys_id);
int cgroup_taskset_size(struct cgroup_taskset *tset);
/**
* cgroup_taskset_for_each - iterate cgroup_taskset
* @task: the loop cursor
* @skip_cgrp: skip if task's cgroup matches this, %NULL to iterate through all
* @skip_css: skip if task's css matches this, %NULL to iterate through all
* @tset: taskset to iterate
*/
#define cgroup_taskset_for_each(task, skip_cgrp, tset) \
#define cgroup_taskset_for_each(task, skip_css, tset) \
for ((task) = cgroup_taskset_first((tset)); (task); \
(task) = cgroup_taskset_next((tset))) \
if (!(skip_cgrp) || \
cgroup_taskset_cur_cgroup((tset)) != (skip_cgrp))
if (!(skip_css) || \
cgroup_taskset_cur_css((tset), \
(skip_css)->ss->subsys_id) != (skip_css))
/*
* Control Group subsystem type.
@ -574,18 +592,22 @@ int cgroup_taskset_size(struct cgroup_taskset *tset);
*/
struct cgroup_subsys {
struct cgroup_subsys_state *(*css_alloc)(struct cgroup *cgrp);
int (*css_online)(struct cgroup *cgrp);
void (*css_offline)(struct cgroup *cgrp);
void (*css_free)(struct cgroup *cgrp);
struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state *parent_css);
int (*css_online)(struct cgroup_subsys_state *css);
void (*css_offline)(struct cgroup_subsys_state *css);
void (*css_free)(struct cgroup_subsys_state *css);
int (*can_attach)(struct cgroup *cgrp, struct cgroup_taskset *tset);
void (*cancel_attach)(struct cgroup *cgrp, struct cgroup_taskset *tset);
void (*attach)(struct cgroup *cgrp, struct cgroup_taskset *tset);
int (*can_attach)(struct cgroup_subsys_state *css,
struct cgroup_taskset *tset);
void (*cancel_attach)(struct cgroup_subsys_state *css,
struct cgroup_taskset *tset);
void (*attach)(struct cgroup_subsys_state *css,
struct cgroup_taskset *tset);
void (*fork)(struct task_struct *task);
void (*exit)(struct cgroup *cgrp, struct cgroup *old_cgrp,
void (*exit)(struct cgroup_subsys_state *css,
struct cgroup_subsys_state *old_css,
struct task_struct *task);
void (*bind)(struct cgroup *root);
void (*bind)(struct cgroup_subsys_state *root_css);
int subsys_id;
int disabled;
@ -641,10 +663,17 @@ struct cgroup_subsys {
#undef IS_SUBSYS_ENABLED
#undef SUBSYS
static inline struct cgroup_subsys_state *cgroup_subsys_state(
struct cgroup *cgrp, int subsys_id)
/**
* css_parent - find the parent css
* @css: the target cgroup_subsys_state
*
* Return the parent css of @css. This function is guaranteed to return
* non-NULL parent as long as @css isn't the root.
*/
static inline
struct cgroup_subsys_state *css_parent(struct cgroup_subsys_state *css)
{
return cgrp->subsys[subsys_id];
return css->parent;
}
/**
@ -672,7 +701,7 @@ extern struct mutex cgroup_mutex;
#endif
/**
* task_subsys_state_check - obtain css for (task, subsys) w/ extra access conds
* task_css_check - obtain css for (task, subsys) w/ extra access conds
* @task: the target task
* @subsys_id: the target subsystem ID
* @__c: extra condition expression to be passed to rcu_dereference_check()
@ -680,7 +709,7 @@ extern struct mutex cgroup_mutex;
* Return the cgroup_subsys_state for the (@task, @subsys_id) pair. The
* synchronization rules are the same as task_css_set_check().
*/
#define task_subsys_state_check(task, subsys_id, __c) \
#define task_css_check(task, subsys_id, __c) \
task_css_set_check((task), (__c))->subsys[(subsys_id)]
/**
@ -695,87 +724,92 @@ static inline struct css_set *task_css_set(struct task_struct *task)
}
/**
* task_subsys_state - obtain css for (task, subsys)
* task_css - obtain css for (task, subsys)
* @task: the target task
* @subsys_id: the target subsystem ID
*
* See task_subsys_state_check().
* See task_css_check().
*/
static inline struct cgroup_subsys_state *
task_subsys_state(struct task_struct *task, int subsys_id)
static inline struct cgroup_subsys_state *task_css(struct task_struct *task,
int subsys_id)
{
return task_subsys_state_check(task, subsys_id, false);
return task_css_check(task, subsys_id, false);
}
static inline struct cgroup* task_cgroup(struct task_struct *task,
int subsys_id)
static inline struct cgroup *task_cgroup(struct task_struct *task,
int subsys_id)
{
return task_subsys_state(task, subsys_id)->cgroup;
return task_css(task, subsys_id)->cgroup;
}
struct cgroup *cgroup_next_sibling(struct cgroup *pos);
struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos,
struct cgroup_subsys_state *parent);
struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss);
/**
* cgroup_for_each_child - iterate through children of a cgroup
* @pos: the cgroup * to use as the loop cursor
* @cgrp: cgroup whose children to walk
* css_for_each_child - iterate through children of a css
* @pos: the css * to use as the loop cursor
* @parent: css whose children to walk
*
* Walk @cgrp's children. Must be called under rcu_read_lock(). A child
* cgroup which hasn't finished ->css_online() or already has finished
* Walk @parent's children. Must be called under rcu_read_lock(). A child
* css which hasn't finished ->css_online() or already has finished
* ->css_offline() may show up during traversal and it's each subsystem's
* responsibility to verify that each @pos is alive.
*
* If a subsystem synchronizes against the parent in its ->css_online() and
* before starting iterating, a cgroup which finished ->css_online() is
* before starting iterating, a css which finished ->css_online() is
* guaranteed to be visible in the future iterations.
*
* It is allowed to temporarily drop RCU read lock during iteration. The
* caller is responsible for ensuring that @pos remains accessible until
* the start of the next iteration by, for example, bumping the css refcnt.
*/
#define cgroup_for_each_child(pos, cgrp) \
for ((pos) = list_first_or_null_rcu(&(cgrp)->children, \
struct cgroup, sibling); \
(pos); (pos) = cgroup_next_sibling((pos)))
#define css_for_each_child(pos, parent) \
for ((pos) = css_next_child(NULL, (parent)); (pos); \
(pos) = css_next_child((pos), (parent)))
struct cgroup *cgroup_next_descendant_pre(struct cgroup *pos,
struct cgroup *cgroup);
struct cgroup *cgroup_rightmost_descendant(struct cgroup *pos);
struct cgroup_subsys_state *
css_next_descendant_pre(struct cgroup_subsys_state *pos,
struct cgroup_subsys_state *css);
struct cgroup_subsys_state *
css_rightmost_descendant(struct cgroup_subsys_state *pos);
/**
* cgroup_for_each_descendant_pre - pre-order walk of a cgroup's descendants
* @pos: the cgroup * to use as the loop cursor
* @cgroup: cgroup whose descendants to walk
* css_for_each_descendant_pre - pre-order walk of a css's descendants
* @pos: the css * to use as the loop cursor
* @root: css whose descendants to walk
*
* Walk @cgroup's descendants. Must be called under rcu_read_lock(). A
* descendant cgroup which hasn't finished ->css_online() or already has
* Walk @root's descendants. @root is included in the iteration and the
* first node to be visited. Must be called under rcu_read_lock(). A
* descendant css which hasn't finished ->css_online() or already has
* finished ->css_offline() may show up during traversal and it's each
* subsystem's responsibility to verify that each @pos is alive.
*
* If a subsystem synchronizes against the parent in its ->css_online() and
* before starting iterating, and synchronizes against @pos on each
* iteration, any descendant cgroup which finished ->css_online() is
* iteration, any descendant css which finished ->css_online() is
* guaranteed to be visible in the future iterations.
*
* In other words, the following guarantees that a descendant can't escape
* state updates of its ancestors.
*
* my_online(@cgrp)
* my_online(@css)
* {
* Lock @cgrp->parent and @cgrp;
* Inherit state from @cgrp->parent;
* Lock @css's parent and @css;
* Inherit state from the parent;
* Unlock both.
* }
*
* my_update_state(@cgrp)
* my_update_state(@css)
* {
* Lock @cgrp;
* Update @cgrp's state;
* Unlock @cgrp;
*
* cgroup_for_each_descendant_pre(@pos, @cgrp) {
* css_for_each_descendant_pre(@pos, @css) {
* Lock @pos;
* Verify @pos is alive and inherit state from @pos->parent;
* if (@pos == @css)
* Update @css's state;
* else
* Verify @pos is alive and inherit state from its parent;
* Unlock @pos;
* }
* }
@ -786,8 +820,7 @@ struct cgroup *cgroup_rightmost_descendant(struct cgroup *pos);
* visible by walking order and, as long as inheriting operations to the
* same @pos are atomic to each other, multiple updates racing each other
* still result in the correct state. It's guaranateed that at least one
* inheritance happens for any cgroup after the latest update to its
* parent.
* inheritance happens for any css after the latest update to its parent.
*
* If checking parent's state requires locking the parent, each inheriting
* iteration should lock and unlock both @pos->parent and @pos.
@ -800,52 +833,45 @@ struct cgroup *cgroup_rightmost_descendant(struct cgroup *pos);
* caller is responsible for ensuring that @pos remains accessible until
* the start of the next iteration by, for example, bumping the css refcnt.
*/
#define cgroup_for_each_descendant_pre(pos, cgroup) \
for (pos = cgroup_next_descendant_pre(NULL, (cgroup)); (pos); \
pos = cgroup_next_descendant_pre((pos), (cgroup)))
#define css_for_each_descendant_pre(pos, css) \
for ((pos) = css_next_descendant_pre(NULL, (css)); (pos); \
(pos) = css_next_descendant_pre((pos), (css)))
struct cgroup *cgroup_next_descendant_post(struct cgroup *pos,
struct cgroup *cgroup);
struct cgroup_subsys_state *
css_next_descendant_post(struct cgroup_subsys_state *pos,
struct cgroup_subsys_state *css);
/**
* cgroup_for_each_descendant_post - post-order walk of a cgroup's descendants
* @pos: the cgroup * to use as the loop cursor
* @cgroup: cgroup whose descendants to walk
* css_for_each_descendant_post - post-order walk of a css's descendants
* @pos: the css * to use as the loop cursor
* @css: css whose descendants to walk
*
* Similar to cgroup_for_each_descendant_pre() but performs post-order
* traversal instead. Note that the walk visibility guarantee described in
* pre-order walk doesn't apply the same to post-order walks.
* Similar to css_for_each_descendant_pre() but performs post-order
* traversal instead. @root is included in the iteration and the last
* node to be visited. Note that the walk visibility guarantee described
* in pre-order walk doesn't apply the same to post-order walks.
*/
#define cgroup_for_each_descendant_post(pos, cgroup) \
for (pos = cgroup_next_descendant_post(NULL, (cgroup)); (pos); \
pos = cgroup_next_descendant_post((pos), (cgroup)))
#define css_for_each_descendant_post(pos, css) \
for ((pos) = css_next_descendant_post(NULL, (css)); (pos); \
(pos) = css_next_descendant_post((pos), (css)))
/* A cgroup_iter should be treated as an opaque object */
struct cgroup_iter {
struct list_head *cset_link;
struct list_head *task;
/* A css_task_iter should be treated as an opaque object */
struct css_task_iter {
struct cgroup_subsys_state *origin_css;
struct list_head *cset_link;
struct list_head *task;
};
/*
* To iterate across the tasks in a cgroup:
*
* 1) call cgroup_iter_start to initialize an iterator
*
* 2) call cgroup_iter_next() to retrieve member tasks until it
* returns NULL or until you want to end the iteration
*
* 3) call cgroup_iter_end() to destroy the iterator.
*
* Or, call cgroup_scan_tasks() to iterate through every task in a
* cgroup - cgroup_scan_tasks() holds the css_set_lock when calling
* the test_task() callback, but not while calling the process_task()
* callback.
*/
void cgroup_iter_start(struct cgroup *cgrp, struct cgroup_iter *it);
struct task_struct *cgroup_iter_next(struct cgroup *cgrp,
struct cgroup_iter *it);
void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it);
int cgroup_scan_tasks(struct cgroup_scanner *scan);
void css_task_iter_start(struct cgroup_subsys_state *css,
struct css_task_iter *it);
struct task_struct *css_task_iter_next(struct css_task_iter *it);
void css_task_iter_end(struct css_task_iter *it);
int css_scan_tasks(struct cgroup_subsys_state *css,
bool (*test)(struct task_struct *, void *),
void (*process)(struct task_struct *, void *),
void *data, struct ptr_heap *heap);
int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
@ -878,7 +904,8 @@ bool css_is_ancestor(struct cgroup_subsys_state *cg,
/* Get id and depth of css */
unsigned short css_id(struct cgroup_subsys_state *css);
struct cgroup_subsys_state *cgroup_css_from_dir(struct file *f, int id);
struct cgroup_subsys_state *css_from_dir(struct dentry *dentry,
struct cgroup_subsys *ss);
#else /* !CONFIG_CGROUPS */

View file

@ -2,100 +2,110 @@
#define _LINUX_CONTEXT_TRACKING_H
#include <linux/sched.h>
#include <linux/percpu.h>
#include <linux/vtime.h>
#include <linux/context_tracking_state.h>
#include <asm/ptrace.h>
struct context_tracking {
/*
* When active is false, probes are unset in order
* to minimize overhead: TIF flags are cleared
* and calls to user_enter/exit are ignored. This
* may be further optimized using static keys.
*/
bool active;
enum ctx_state {
IN_KERNEL = 0,
IN_USER,
} state;
};
static inline void __guest_enter(void)
{
/*
* This is running in ioctl context so we can avoid
* the call to vtime_account() with its unnecessary idle check.
*/
vtime_account_system(current);
current->flags |= PF_VCPU;
}
static inline void __guest_exit(void)
{
/*
* This is running in ioctl context so we can avoid
* the call to vtime_account() with its unnecessary idle check.
*/
vtime_account_system(current);
current->flags &= ~PF_VCPU;
}
#ifdef CONFIG_CONTEXT_TRACKING
DECLARE_PER_CPU(struct context_tracking, context_tracking);
extern void context_tracking_cpu_set(int cpu);
static inline bool context_tracking_in_user(void)
extern void context_tracking_user_enter(void);
extern void context_tracking_user_exit(void);
extern void __context_tracking_task_switch(struct task_struct *prev,
struct task_struct *next);
static inline void user_enter(void)
{
return __this_cpu_read(context_tracking.state) == IN_USER;
}
if (static_key_false(&context_tracking_enabled))
context_tracking_user_enter();
static inline bool context_tracking_active(void)
}
static inline void user_exit(void)
{
return __this_cpu_read(context_tracking.active);
if (static_key_false(&context_tracking_enabled))
context_tracking_user_exit();
}
extern void user_enter(void);
extern void user_exit(void);
extern void guest_enter(void);
extern void guest_exit(void);
static inline enum ctx_state exception_enter(void)
{
enum ctx_state prev_ctx;
if (!static_key_false(&context_tracking_enabled))
return 0;
prev_ctx = this_cpu_read(context_tracking.state);
user_exit();
context_tracking_user_exit();
return prev_ctx;
}
static inline void exception_exit(enum ctx_state prev_ctx)
{
if (prev_ctx == IN_USER)
user_enter();
if (static_key_false(&context_tracking_enabled)) {
if (prev_ctx == IN_USER)
context_tracking_user_enter();
}
}
extern void context_tracking_task_switch(struct task_struct *prev,
struct task_struct *next);
static inline void context_tracking_task_switch(struct task_struct *prev,
struct task_struct *next)
{
if (static_key_false(&context_tracking_enabled))
__context_tracking_task_switch(prev, next);
}
#else
static inline bool context_tracking_in_user(void) { return false; }
static inline void user_enter(void) { }
static inline void user_exit(void) { }
static inline void guest_enter(void)
{
__guest_enter();
}
static inline void guest_exit(void)
{
__guest_exit();
}
static inline enum ctx_state exception_enter(void) { return 0; }
static inline void exception_exit(enum ctx_state prev_ctx) { }
static inline void context_tracking_task_switch(struct task_struct *prev,
struct task_struct *next) { }
#endif /* !CONFIG_CONTEXT_TRACKING */
#ifdef CONFIG_CONTEXT_TRACKING_FORCE
extern void context_tracking_init(void);
#else
static inline void context_tracking_init(void) { }
#endif /* CONFIG_CONTEXT_TRACKING_FORCE */
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
static inline void guest_enter(void)
{
if (vtime_accounting_enabled())
vtime_guest_enter(current);
else
current->flags |= PF_VCPU;
}
static inline void guest_exit(void)
{
if (vtime_accounting_enabled())
vtime_guest_exit(current);
else
current->flags &= ~PF_VCPU;
}
#else
static inline void guest_enter(void)
{
/*
* This is running in ioctl context so its safe
* to assume that it's the stime pending cputime
* to flush.
*/
vtime_account_system(current);
current->flags |= PF_VCPU;
}
static inline void guest_exit(void)
{
/* Flush the guest cputime we spent on the guest */
vtime_account_system(current);
current->flags &= ~PF_VCPU;
}
#endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */
#endif

View file

@ -0,0 +1,39 @@
#ifndef _LINUX_CONTEXT_TRACKING_STATE_H
#define _LINUX_CONTEXT_TRACKING_STATE_H
#include <linux/percpu.h>
#include <linux/static_key.h>
struct context_tracking {
/*
* When active is false, probes are unset in order
* to minimize overhead: TIF flags are cleared
* and calls to user_enter/exit are ignored. This
* may be further optimized using static keys.
*/
bool active;
enum ctx_state {
IN_KERNEL = 0,
IN_USER,
} state;
};
#ifdef CONFIG_CONTEXT_TRACKING
extern struct static_key context_tracking_enabled;
DECLARE_PER_CPU(struct context_tracking, context_tracking);
static inline bool context_tracking_in_user(void)
{
return __this_cpu_read(context_tracking.state) == IN_USER;
}
static inline bool context_tracking_active(void)
{
return __this_cpu_read(context_tracking.active);
}
#else
static inline bool context_tracking_in_user(void) { return false; }
static inline bool context_tracking_active(void) { return false; }
#endif /* CONFIG_CONTEXT_TRACKING */
#endif

View file

@ -28,6 +28,7 @@ struct cpu {
extern int register_cpu(struct cpu *cpu, int num);
extern struct device *get_cpu_device(unsigned cpu);
extern bool cpu_is_hotpluggable(unsigned cpu);
extern bool arch_match_cpu_phys_id(int cpu, u64 phys_id);
extern int cpu_add_dev_attr(struct device_attribute *attr);
extern void cpu_remove_dev_attr(struct device_attribute *attr);
@ -172,6 +173,8 @@ extern struct bus_type cpu_subsys;
#ifdef CONFIG_HOTPLUG_CPU
/* Stop CPUs going up and down. */
extern void cpu_hotplug_begin(void);
extern void cpu_hotplug_done(void);
extern void get_online_cpus(void);
extern void put_online_cpus(void);
extern void cpu_hotplug_disable(void);
@ -197,6 +200,8 @@ static inline void cpu_hotplug_driver_unlock(void)
#else /* CONFIG_HOTPLUG_CPU */
static inline void cpu_hotplug_begin(void) {}
static inline void cpu_hotplug_done(void) {}
#define get_online_cpus() do { } while (0)
#define put_online_cpus() do { } while (0)
#define cpu_hotplug_disable() do { } while (0)

View file

@ -11,71 +11,36 @@
#ifndef _LINUX_CPUFREQ_H
#define _LINUX_CPUFREQ_H
#include <asm/cputime.h>
#include <linux/mutex.h>
#include <linux/notifier.h>
#include <linux/threads.h>
#include <linux/kobject.h>
#include <linux/sysfs.h>
#include <linux/completion.h>
#include <linux/workqueue.h>
#include <linux/cpumask.h>
#include <asm/div64.h>
#define CPUFREQ_NAME_LEN 16
/* Print length for names. Extra 1 space for accomodating '\n' in prints */
#define CPUFREQ_NAME_PLEN (CPUFREQ_NAME_LEN + 1)
#include <linux/completion.h>
#include <linux/kobject.h>
#include <linux/notifier.h>
#include <linux/sysfs.h>
/*********************************************************************
* CPUFREQ NOTIFIER INTERFACE *
* CPUFREQ INTERFACE *
*********************************************************************/
#define CPUFREQ_TRANSITION_NOTIFIER (0)
#define CPUFREQ_POLICY_NOTIFIER (1)
#ifdef CONFIG_CPU_FREQ
int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list);
int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list);
extern void disable_cpufreq(void);
#else /* CONFIG_CPU_FREQ */
static inline int cpufreq_register_notifier(struct notifier_block *nb,
unsigned int list)
{
return 0;
}
static inline int cpufreq_unregister_notifier(struct notifier_block *nb,
unsigned int list)
{
return 0;
}
static inline void disable_cpufreq(void) { }
#endif /* CONFIG_CPU_FREQ */
/* if (cpufreq_driver->target) exists, the ->governor decides what frequency
* within the limits is used. If (cpufreq_driver->setpolicy> exists, these
* two generic policies are available:
*/
#define CPUFREQ_POLICY_POWERSAVE (1)
#define CPUFREQ_POLICY_PERFORMANCE (2)
/* Frequency values here are CPU kHz so that hardware which doesn't run
* with some frequencies can complain without having to guess what per
* cent / per mille means.
/*
* Frequency values here are CPU kHz
*
* Maximum transition latency is in nanoseconds - if it's unknown,
* CPUFREQ_ETERNAL shall be used.
*/
#define CPUFREQ_ETERNAL (-1)
#define CPUFREQ_NAME_LEN 16
/* Print length for names. Extra 1 space for accomodating '\n' in prints */
#define CPUFREQ_NAME_PLEN (CPUFREQ_NAME_LEN + 1)
struct cpufreq_governor;
/* /sys/devices/system/cpu/cpufreq: entry point for global variables */
extern struct kobject *cpufreq_global_kobject;
int cpufreq_get_global_kobject(void);
void cpufreq_put_global_kobject(void);
int cpufreq_sysfs_create_file(const struct attribute *attr);
void cpufreq_sysfs_remove_file(const struct attribute *attr);
struct cpufreq_freqs {
unsigned int cpu; /* cpu nr */
unsigned int old;
unsigned int new;
u8 flags; /* flags of cpufreq_driver, see below. */
};
#define CPUFREQ_ETERNAL (-1)
struct cpufreq_cpuinfo {
unsigned int max_freq;
unsigned int min_freq;
@ -117,111 +82,59 @@ struct cpufreq_policy {
struct cpufreq_real_policy user_policy;
struct list_head policy_list;
struct kobject kobj;
struct completion kobj_unregister;
int transition_ongoing; /* Tracks transition status */
};
#define CPUFREQ_ADJUST (0)
#define CPUFREQ_INCOMPATIBLE (1)
#define CPUFREQ_NOTIFY (2)
#define CPUFREQ_START (3)
#define CPUFREQ_UPDATE_POLICY_CPU (4)
/* Only for ACPI */
#define CPUFREQ_SHARED_TYPE_NONE (0) /* None */
#define CPUFREQ_SHARED_TYPE_HW (1) /* HW does needed coordination */
#define CPUFREQ_SHARED_TYPE_ALL (2) /* All dependent CPUs should set freq */
#define CPUFREQ_SHARED_TYPE_ANY (3) /* Freq can be set from any dependent CPU*/
struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu);
void cpufreq_cpu_put(struct cpufreq_policy *policy);
static inline bool policy_is_shared(struct cpufreq_policy *policy)
{
return cpumask_weight(policy->cpus) > 1;
}
/******************** cpufreq transition notifiers *******************/
/* /sys/devices/system/cpu/cpufreq: entry point for global variables */
extern struct kobject *cpufreq_global_kobject;
int cpufreq_get_global_kobject(void);
void cpufreq_put_global_kobject(void);
int cpufreq_sysfs_create_file(const struct attribute *attr);
void cpufreq_sysfs_remove_file(const struct attribute *attr);
#define CPUFREQ_PRECHANGE (0)
#define CPUFREQ_POSTCHANGE (1)
#define CPUFREQ_RESUMECHANGE (8)
#define CPUFREQ_SUSPENDCHANGE (9)
#ifdef CONFIG_CPU_FREQ
unsigned int cpufreq_get(unsigned int cpu);
unsigned int cpufreq_quick_get(unsigned int cpu);
unsigned int cpufreq_quick_get_max(unsigned int cpu);
void disable_cpufreq(void);
struct cpufreq_freqs {
unsigned int cpu; /* cpu nr */
unsigned int old;
unsigned int new;
u8 flags; /* flags of cpufreq_driver, see below. */
};
/**
* cpufreq_scale - "old * mult / div" calculation for large values (32-bit-arch
* safe)
* @old: old value
* @div: divisor
* @mult: multiplier
*
*
* new = old * mult / div
*/
static inline unsigned long cpufreq_scale(unsigned long old, u_int div,
u_int mult)
u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy);
int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu);
int cpufreq_update_policy(unsigned int cpu);
bool have_governor_per_policy(void);
struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy);
#else
static inline unsigned int cpufreq_get(unsigned int cpu)
{
#if BITS_PER_LONG == 32
u64 result = ((u64) old) * ((u64) mult);
do_div(result, div);
return (unsigned long) result;
#elif BITS_PER_LONG == 64
unsigned long result = old * ((u64) mult);
result /= div;
return result;
return 0;
}
static inline unsigned int cpufreq_quick_get(unsigned int cpu)
{
return 0;
}
static inline unsigned int cpufreq_quick_get_max(unsigned int cpu)
{
return 0;
}
static inline void disable_cpufreq(void) { }
#endif
};
/*********************************************************************
* CPUFREQ GOVERNORS *
*********************************************************************/
#define CPUFREQ_GOV_START 1
#define CPUFREQ_GOV_STOP 2
#define CPUFREQ_GOV_LIMITS 3
#define CPUFREQ_GOV_POLICY_INIT 4
#define CPUFREQ_GOV_POLICY_EXIT 5
struct cpufreq_governor {
char name[CPUFREQ_NAME_LEN];
int initialized;
int (*governor) (struct cpufreq_policy *policy,
unsigned int event);
ssize_t (*show_setspeed) (struct cpufreq_policy *policy,
char *buf);
int (*store_setspeed) (struct cpufreq_policy *policy,
unsigned int freq);
unsigned int max_transition_latency; /* HW must be able to switch to
next freq faster than this value in nano secs or we
will fallback to performance governor */
struct list_head governor_list;
struct module *owner;
};
/*
* Pass a target to the cpufreq driver.
*/
extern int cpufreq_driver_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation);
extern int __cpufreq_driver_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation);
extern int __cpufreq_driver_getavg(struct cpufreq_policy *policy,
unsigned int cpu);
int cpufreq_register_governor(struct cpufreq_governor *governor);
void cpufreq_unregister_governor(struct cpufreq_governor *governor);
/*********************************************************************
* CPUFREQ DRIVER INTERFACE *
@ -230,76 +143,6 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor);
#define CPUFREQ_RELATION_L 0 /* lowest frequency at or above target */
#define CPUFREQ_RELATION_H 1 /* highest frequency below or at target */
struct freq_attr;
struct cpufreq_driver {
struct module *owner;
char name[CPUFREQ_NAME_LEN];
u8 flags;
/*
* This should be set by platforms having multiple clock-domains, i.e.
* supporting multiple policies. With this sysfs directories of governor
* would be created in cpu/cpu<num>/cpufreq/ directory and so they can
* use the same governor with different tunables for different clusters.
*/
bool have_governor_per_policy;
/* needed by all drivers */
int (*init) (struct cpufreq_policy *policy);
int (*verify) (struct cpufreq_policy *policy);
/* define one out of two */
int (*setpolicy) (struct cpufreq_policy *policy);
int (*target) (struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation);
/* should be defined, if possible */
unsigned int (*get) (unsigned int cpu);
/* optional */
unsigned int (*getavg) (struct cpufreq_policy *policy,
unsigned int cpu);
int (*bios_limit) (int cpu, unsigned int *limit);
int (*exit) (struct cpufreq_policy *policy);
int (*suspend) (struct cpufreq_policy *policy);
int (*resume) (struct cpufreq_policy *policy);
struct freq_attr **attr;
};
/* flags */
#define CPUFREQ_STICKY 0x01 /* the driver isn't removed even if
* all ->init() calls failed */
#define CPUFREQ_CONST_LOOPS 0x02 /* loops_per_jiffy or other kernel
* "constants" aren't affected by
* frequency transitions */
#define CPUFREQ_PM_NO_WARN 0x04 /* don't warn on suspend/resume speed
* mismatches */
int cpufreq_register_driver(struct cpufreq_driver *driver_data);
int cpufreq_unregister_driver(struct cpufreq_driver *driver_data);
void cpufreq_notify_transition(struct cpufreq_policy *policy,
struct cpufreq_freqs *freqs, unsigned int state);
static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy,
unsigned int min, unsigned int max)
{
if (policy->min < min)
policy->min = min;
if (policy->max < min)
policy->max = min;
if (policy->min > max)
policy->min = max;
if (policy->max > max)
policy->max = max;
if (policy->min > policy->max)
policy->min = policy->max;
return;
}
struct freq_attr {
struct attribute attr;
ssize_t (*show)(struct cpufreq_policy *, char *);
@ -334,52 +177,181 @@ __ATTR(_name, 0444, show_##_name, NULL)
static struct global_attr _name = \
__ATTR(_name, 0644, show_##_name, store_##_name)
struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu);
void cpufreq_cpu_put(struct cpufreq_policy *data);
struct cpufreq_driver {
char name[CPUFREQ_NAME_LEN];
u8 flags;
/*
* This should be set by platforms having multiple clock-domains, i.e.
* supporting multiple policies. With this sysfs directories of governor
* would be created in cpu/cpu<num>/cpufreq/ directory and so they can
* use the same governor with different tunables for different clusters.
*/
bool have_governor_per_policy;
/* needed by all drivers */
int (*init) (struct cpufreq_policy *policy);
int (*verify) (struct cpufreq_policy *policy);
/* define one out of two */
int (*setpolicy) (struct cpufreq_policy *policy);
int (*target) (struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation);
/* should be defined, if possible */
unsigned int (*get) (unsigned int cpu);
/* optional */
int (*bios_limit) (int cpu, unsigned int *limit);
int (*exit) (struct cpufreq_policy *policy);
int (*suspend) (struct cpufreq_policy *policy);
int (*resume) (struct cpufreq_policy *policy);
struct freq_attr **attr;
};
/* flags */
#define CPUFREQ_STICKY 0x01 /* the driver isn't removed even if
* all ->init() calls failed */
#define CPUFREQ_CONST_LOOPS 0x02 /* loops_per_jiffy or other kernel
* "constants" aren't affected by
* frequency transitions */
#define CPUFREQ_PM_NO_WARN 0x04 /* don't warn on suspend/resume speed
* mismatches */
int cpufreq_register_driver(struct cpufreq_driver *driver_data);
int cpufreq_unregister_driver(struct cpufreq_driver *driver_data);
const char *cpufreq_get_current_driver(void);
/*********************************************************************
* CPUFREQ 2.6. INTERFACE *
*********************************************************************/
u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy);
int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu);
int cpufreq_update_policy(unsigned int cpu);
bool have_governor_per_policy(void);
struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy);
#ifdef CONFIG_CPU_FREQ
/*
* query the current CPU frequency (in kHz). If zero, cpufreq couldn't detect it
*/
unsigned int cpufreq_get(unsigned int cpu);
#else
static inline unsigned int cpufreq_get(unsigned int cpu)
static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy,
unsigned int min, unsigned int max)
{
return 0;
if (policy->min < min)
policy->min = min;
if (policy->max < min)
policy->max = min;
if (policy->min > max)
policy->min = max;
if (policy->max > max)
policy->max = max;
if (policy->min > policy->max)
policy->min = policy->max;
return;
}
#endif
/*
* query the last known CPU freq (in kHz). If zero, cpufreq couldn't detect it
*/
#ifdef CONFIG_CPU_FREQ
unsigned int cpufreq_quick_get(unsigned int cpu);
unsigned int cpufreq_quick_get_max(unsigned int cpu);
#else
static inline unsigned int cpufreq_quick_get(unsigned int cpu)
{
return 0;
}
static inline unsigned int cpufreq_quick_get_max(unsigned int cpu)
{
return 0;
}
#endif
/*********************************************************************
* CPUFREQ DEFAULT GOVERNOR *
* CPUFREQ NOTIFIER INTERFACE *
*********************************************************************/
#define CPUFREQ_TRANSITION_NOTIFIER (0)
#define CPUFREQ_POLICY_NOTIFIER (1)
/* Transition notifiers */
#define CPUFREQ_PRECHANGE (0)
#define CPUFREQ_POSTCHANGE (1)
#define CPUFREQ_RESUMECHANGE (8)
#define CPUFREQ_SUSPENDCHANGE (9)
/* Policy Notifiers */
#define CPUFREQ_ADJUST (0)
#define CPUFREQ_INCOMPATIBLE (1)
#define CPUFREQ_NOTIFY (2)
#define CPUFREQ_START (3)
#define CPUFREQ_UPDATE_POLICY_CPU (4)
#ifdef CONFIG_CPU_FREQ
int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list);
int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list);
void cpufreq_notify_transition(struct cpufreq_policy *policy,
struct cpufreq_freqs *freqs, unsigned int state);
#else /* CONFIG_CPU_FREQ */
static inline int cpufreq_register_notifier(struct notifier_block *nb,
unsigned int list)
{
return 0;
}
static inline int cpufreq_unregister_notifier(struct notifier_block *nb,
unsigned int list)
{
return 0;
}
#endif /* !CONFIG_CPU_FREQ */
/**
* cpufreq_scale - "old * mult / div" calculation for large values (32-bit-arch
* safe)
* @old: old value
* @div: divisor
* @mult: multiplier
*
*
* new = old * mult / div
*/
static inline unsigned long cpufreq_scale(unsigned long old, u_int div,
u_int mult)
{
#if BITS_PER_LONG == 32
u64 result = ((u64) old) * ((u64) mult);
do_div(result, div);
return (unsigned long) result;
#elif BITS_PER_LONG == 64
unsigned long result = old * ((u64) mult);
result /= div;
return result;
#endif
}
/*********************************************************************
* CPUFREQ GOVERNORS *
*********************************************************************/
/*
* If (cpufreq_driver->target) exists, the ->governor decides what frequency
* within the limits is used. If (cpufreq_driver->setpolicy> exists, these
* two generic policies are available:
*/
#define CPUFREQ_POLICY_POWERSAVE (1)
#define CPUFREQ_POLICY_PERFORMANCE (2)
/* Governor Events */
#define CPUFREQ_GOV_START 1
#define CPUFREQ_GOV_STOP 2
#define CPUFREQ_GOV_LIMITS 3
#define CPUFREQ_GOV_POLICY_INIT 4
#define CPUFREQ_GOV_POLICY_EXIT 5
struct cpufreq_governor {
char name[CPUFREQ_NAME_LEN];
int initialized;
int (*governor) (struct cpufreq_policy *policy,
unsigned int event);
ssize_t (*show_setspeed) (struct cpufreq_policy *policy,
char *buf);
int (*store_setspeed) (struct cpufreq_policy *policy,
unsigned int freq);
unsigned int max_transition_latency; /* HW must be able to switch to
next freq faster than this value in nano secs or we
will fallback to performance governor */
struct list_head governor_list;
struct module *owner;
};
/* Pass a target to the cpufreq driver */
int cpufreq_driver_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation);
int __cpufreq_driver_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation);
int cpufreq_register_governor(struct cpufreq_governor *governor);
void cpufreq_unregister_governor(struct cpufreq_governor *governor);
/* CPUFREQ DEFAULT GOVERNOR */
/*
* Performance governor is fallback governor if any other gov failed to auto
* load due latency restrictions
@ -428,18 +400,16 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
unsigned int relation,
unsigned int *index);
/* the following 3 funtions are for cpufreq core use only */
void cpufreq_frequency_table_update_policy_cpu(struct cpufreq_policy *policy);
ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf);
/* the following funtion is for cpufreq core use only */
struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu);
/* the following are really really optional */
extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs;
void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table,
unsigned int cpu);
void cpufreq_frequency_table_update_policy_cpu(struct cpufreq_policy *policy);
void cpufreq_frequency_table_put_attr(unsigned int cpu);
ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf);
#endif /* _LINUX_CPUFREQ_H */

View file

@ -13,8 +13,6 @@
#include <linux/percpu.h>
#include <linux/list.h>
#include <linux/kobject.h>
#include <linux/completion.h>
#include <linux/hrtimer.h>
#define CPUIDLE_STATE_MAX 10
@ -61,6 +59,10 @@ struct cpuidle_state {
#define CPUIDLE_DRIVER_FLAGS_MASK (0xFFFF0000)
struct cpuidle_device_kobj;
struct cpuidle_state_kobj;
struct cpuidle_driver_kobj;
struct cpuidle_device {
unsigned int registered:1;
unsigned int enabled:1;
@ -71,9 +73,8 @@ struct cpuidle_device {
struct cpuidle_state_usage states_usage[CPUIDLE_STATE_MAX];
struct cpuidle_state_kobj *kobjs[CPUIDLE_STATE_MAX];
struct cpuidle_driver_kobj *kobj_driver;
struct cpuidle_device_kobj *kobj_dev;
struct list_head device_list;
struct kobject kobj;
struct completion kobj_unregister;
#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
int safe_state_index;

View file

@ -9,6 +9,7 @@
#include <linux/seqlock.h>
#include <linux/cache.h>
#include <linux/rcupdate.h>
#include <linux/lockref.h>
struct nameidata;
struct path;
@ -100,6 +101,8 @@ extern unsigned int full_name_hash(const unsigned char *, unsigned int);
# endif
#endif
#define d_lock d_lockref.lock
struct dentry {
/* RCU lookup touched fields */
unsigned int d_flags; /* protected by d_lock */
@ -112,8 +115,7 @@ struct dentry {
unsigned char d_iname[DNAME_INLINE_LEN]; /* small names */
/* Ref lookup also touches following */
unsigned int d_count; /* protected by d_lock */
spinlock_t d_lock; /* per dentry lock */
struct lockref d_lockref; /* per-dentry lock and refcount */
const struct dentry_operations *d_op;
struct super_block *d_sb; /* The root of the dentry tree */
unsigned long d_time; /* used by d_revalidate */
@ -302,31 +304,9 @@ extern struct dentry *__d_lookup(const struct dentry *, const struct qstr *);
extern struct dentry *__d_lookup_rcu(const struct dentry *parent,
const struct qstr *name, unsigned *seq);
/**
* __d_rcu_to_refcount - take a refcount on dentry if sequence check is ok
* @dentry: dentry to take a ref on
* @seq: seqcount to verify against
* Returns: 0 on failure, else 1.
*
* __d_rcu_to_refcount operates on a dentry,seq pair that was returned
* by __d_lookup_rcu, to get a reference on an rcu-walk dentry.
*/
static inline int __d_rcu_to_refcount(struct dentry *dentry, unsigned seq)
{
int ret = 0;
assert_spin_locked(&dentry->d_lock);
if (!read_seqcount_retry(&dentry->d_seq, seq)) {
ret = 1;
dentry->d_count++;
}
return ret;
}
static inline unsigned d_count(const struct dentry *dentry)
{
return dentry->d_count;
return dentry->d_lockref.count;
}
/* validate "insecure" dentry pointer */
@ -336,6 +316,7 @@ extern int d_validate(struct dentry *, struct dentry *);
* helper function for dentry_operations.d_dname() members
*/
extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...);
extern char *simple_dname(struct dentry *, char *, int);
extern char *__d_path(const struct path *, const struct path *, char *, int);
extern char *d_absolute_path(const struct path *, char *, int);
@ -356,17 +337,14 @@ extern char *dentry_path(struct dentry *, char *, int);
static inline struct dentry *dget_dlock(struct dentry *dentry)
{
if (dentry)
dentry->d_count++;
dentry->d_lockref.count++;
return dentry;
}
static inline struct dentry *dget(struct dentry *dentry)
{
if (dentry) {
spin_lock(&dentry->d_lock);
dget_dlock(dentry);
spin_unlock(&dentry->d_lock);
}
if (dentry)
lockref_get(&dentry->d_lockref);
return dentry;
}

View file

@ -192,6 +192,13 @@ static inline struct dentry *debugfs_create_x32(const char *name, umode_t mode,
return ERR_PTR(-ENODEV);
}
static inline struct dentry *debugfs_create_x64(const char *name, umode_t mode,
struct dentry *parent,
u64 *value)
{
return ERR_PTR(-ENODEV);
}
static inline struct dentry *debugfs_create_size_t(const char *name, umode_t mode,
struct dentry *parent,
size_t *value)

View file

@ -63,7 +63,7 @@ struct debug_obj_descr {
extern void debug_object_init (void *addr, struct debug_obj_descr *descr);
extern void
debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr);
extern void debug_object_activate (void *addr, struct debug_obj_descr *descr);
extern int debug_object_activate (void *addr, struct debug_obj_descr *descr);
extern void debug_object_deactivate(void *addr, struct debug_obj_descr *descr);
extern void debug_object_destroy (void *addr, struct debug_obj_descr *descr);
extern void debug_object_free (void *addr, struct debug_obj_descr *descr);
@ -85,8 +85,8 @@ static inline void
debug_object_init (void *addr, struct debug_obj_descr *descr) { }
static inline void
debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr) { }
static inline void
debug_object_activate (void *addr, struct debug_obj_descr *descr) { }
static inline int
debug_object_activate (void *addr, struct debug_obj_descr *descr) { return 0; }
static inline void
debug_object_deactivate(void *addr, struct debug_obj_descr *descr) { }
static inline void

View file

@ -66,6 +66,9 @@ extern void bus_remove_file(struct bus_type *, struct bus_attribute *);
* @bus_attrs: Default attributes of the bus.
* @dev_attrs: Default attributes of the devices on the bus.
* @drv_attrs: Default attributes of the device drivers on the bus.
* @bus_groups: Default attributes of the bus.
* @dev_groups: Default attributes of the devices on the bus.
* @drv_groups: Default attributes of the device drivers on the bus.
* @match: Called, perhaps multiple times, whenever a new device or driver
* is added for this bus. It should return a nonzero value if the
* given device can be handled by the given driver.
@ -103,9 +106,12 @@ struct bus_type {
const char *name;
const char *dev_name;
struct device *dev_root;
struct bus_attribute *bus_attrs;
struct device_attribute *dev_attrs;
struct driver_attribute *drv_attrs;
struct bus_attribute *bus_attrs; /* use bus_groups instead */
struct device_attribute *dev_attrs; /* use dev_groups instead */
struct driver_attribute *drv_attrs; /* use drv_groups instead */
const struct attribute_group **bus_groups;
const struct attribute_group **dev_groups;
const struct attribute_group **drv_groups;
int (*match)(struct device *dev, struct device_driver *drv);
int (*uevent)(struct device *dev, struct kobj_uevent_env *env);
@ -271,6 +277,8 @@ struct driver_attribute {
struct driver_attribute driver_attr_##_name = __ATTR_RW(_name)
#define DRIVER_ATTR_RO(_name) \
struct driver_attribute driver_attr_##_name = __ATTR_RO(_name)
#define DRIVER_ATTR_WO(_name) \
struct driver_attribute driver_attr_##_name = __ATTR_WO(_name)
extern int __must_check driver_create_file(struct device_driver *driver,
const struct driver_attribute *attr);
@ -528,6 +536,8 @@ ssize_t device_store_bool(struct device *dev, struct device_attribute *attr,
struct device_attribute dev_attr_##_name = __ATTR_RW(_name)
#define DEVICE_ATTR_RO(_name) \
struct device_attribute dev_attr_##_name = __ATTR_RO(_name)
#define DEVICE_ATTR_WO(_name) \
struct device_attribute dev_attr_##_name = __ATTR_WO(_name)
#define DEVICE_ULONG_ATTR(_name, _mode, _var) \
struct dev_ext_attribute dev_attr_##_name = \
{ __ATTR(_name, _mode, device_show_ulong, device_store_ulong), &(_var) }
@ -895,6 +905,7 @@ static inline bool device_supports_offline(struct device *dev)
extern void lock_device_hotplug(void);
extern void unlock_device_hotplug(void);
extern int lock_device_hotplug_sysfs(void);
extern int device_offline(struct device *dev);
extern int device_online(struct device *dev);
/*
@ -1099,7 +1110,8 @@ do { \
dev_level_ratelimited(dev_notice, dev, fmt, ##__VA_ARGS__)
#define dev_info_ratelimited(dev, fmt, ...) \
dev_level_ratelimited(dev_info, dev, fmt, ##__VA_ARGS__)
#if defined(CONFIG_DYNAMIC_DEBUG) || defined(DEBUG)
#if defined(CONFIG_DYNAMIC_DEBUG)
/* descriptor check is first to prevent flooding with "callbacks suppressed" */
#define dev_dbg_ratelimited(dev, fmt, ...) \
do { \
static DEFINE_RATELIMIT_STATE(_rs, \
@ -1108,8 +1120,17 @@ do { \
DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) && \
__ratelimit(&_rs)) \
__dynamic_pr_debug(&descriptor, pr_fmt(fmt), \
##__VA_ARGS__); \
__dynamic_dev_dbg(&descriptor, dev, fmt, \
##__VA_ARGS__); \
} while (0)
#elif defined(DEBUG)
#define dev_dbg_ratelimited(dev, fmt, ...) \
do { \
static DEFINE_RATELIMIT_STATE(_rs, \
DEFAULT_RATELIMIT_INTERVAL, \
DEFAULT_RATELIMIT_BURST); \
if (__ratelimit(&_rs)) \
dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); \
} while (0)
#else
#define dev_dbg_ratelimited(dev, fmt, ...) \

View file

@ -0,0 +1,31 @@
/*
* OF helpers for External connector (extcon) framework
*
* Copyright (C) 2013 Texas Instruments, Inc.
* Kishon Vijay Abraham I <kishon@ti.com>
*
* Copyright (C) 2013 Samsung Electronics
* Chanwoo Choi <cw00.choi@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#ifndef __LINUX_OF_EXTCON_H
#define __LINUX_OF_EXTCON_H
#include <linux/err.h>
#if IS_ENABLED(CONFIG_OF_EXTCON)
extern struct extcon_dev
*of_extcon_get_extcon_dev(struct device *dev, int index);
#else
static inline struct extcon_dev
*of_extcon_get_extcon_dev(struct device *dev, int index)
{
return ERR_PTR(-ENOSYS);
}
#endif /* CONFIG_OF_EXTCON */
#endif /* __LINUX_OF_EXTCON_H */

View file

@ -78,6 +78,11 @@ struct trace_iterator {
/* trace_seq for __print_flags() and __print_symbolic() etc. */
struct trace_seq tmp_seq;
cpumask_var_t started;
/* it's true when current open file is snapshot */
bool snapshot;
/* The below is zeroed out in pipe_read */
struct trace_seq seq;
struct trace_entry *ent;
@ -90,10 +95,7 @@ struct trace_iterator {
loff_t pos;
long idx;
cpumask_var_t started;
/* it's true when current open file is snapshot */
bool snapshot;
/* All new field here will be zeroed out in pipe_read */
};
enum trace_iter_flags {
@ -332,7 +334,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type,
const char *name, int offset, int size,
int is_signed, int filter_type);
extern int trace_add_event_call(struct ftrace_event_call *call);
extern void trace_remove_event_call(struct ftrace_event_call *call);
extern int trace_remove_event_call(struct ftrace_event_call *call);
#define is_signed_type(type) (((type)(-1)) < (type)1)
@ -357,6 +359,40 @@ do { \
__trace_printk(ip, fmt, ##args); \
} while (0)
/**
* tracepoint_string - register constant persistent string to trace system
* @str - a constant persistent string that will be referenced in tracepoints
*
* If constant strings are being used in tracepoints, it is faster and
* more efficient to just save the pointer to the string and reference
* that with a printf "%s" instead of saving the string in the ring buffer
* and wasting space and time.
*
* The problem with the above approach is that userspace tools that read
* the binary output of the trace buffers do not have access to the string.
* Instead they just show the address of the string which is not very
* useful to users.
*
* With tracepoint_string(), the string will be registered to the tracing
* system and exported to userspace via the debugfs/tracing/printk_formats
* file that maps the string address to the string text. This way userspace
* tools that read the binary buffers have a way to map the pointers to
* the ASCII strings they represent.
*
* The @str used must be a constant string and persistent as it would not
* make sense to show a string that no longer exists. But it is still fine
* to be used with modules, because when modules are unloaded, if they
* had tracepoints, the ring buffers are cleared too. As long as the string
* does not change during the life of the module, it is fine to use
* tracepoint_string() within a module.
*/
#define tracepoint_string(str) \
({ \
static const char *___tp_str __tracepoint_string = str; \
___tp_str; \
})
#define __tracepoint_string __attribute__((section("__tracepoint_str")))
#ifdef CONFIG_PERF_EVENTS
struct perf_event;

View file

@ -1,126 +1,11 @@
#ifndef LINUX_HARDIRQ_H
#define LINUX_HARDIRQ_H
#include <linux/preempt.h>
#include <linux/preempt_mask.h>
#include <linux/lockdep.h>
#include <linux/ftrace_irq.h>
#include <linux/vtime.h>
#include <asm/hardirq.h>
/*
* We put the hardirq and softirq counter into the preemption
* counter. The bitmask has the following meaning:
*
* - bits 0-7 are the preemption count (max preemption depth: 256)
* - bits 8-15 are the softirq count (max # of softirqs: 256)
*
* The hardirq count can in theory reach the same as NR_IRQS.
* In reality, the number of nested IRQS is limited to the stack
* size as well. For archs with over 1000 IRQS it is not practical
* to expect that they will all nest. We give a max of 10 bits for
* hardirq nesting. An arch may choose to give less than 10 bits.
* m68k expects it to be 8.
*
* - bits 16-25 are the hardirq count (max # of nested hardirqs: 1024)
* - bit 26 is the NMI_MASK
* - bit 27 is the PREEMPT_ACTIVE flag
*
* PREEMPT_MASK: 0x000000ff
* SOFTIRQ_MASK: 0x0000ff00
* HARDIRQ_MASK: 0x03ff0000
* NMI_MASK: 0x04000000
*/
#define PREEMPT_BITS 8
#define SOFTIRQ_BITS 8
#define NMI_BITS 1
#define MAX_HARDIRQ_BITS 10
#ifndef HARDIRQ_BITS
# define HARDIRQ_BITS MAX_HARDIRQ_BITS
#endif
#if HARDIRQ_BITS > MAX_HARDIRQ_BITS
#error HARDIRQ_BITS too high!
#endif
#define PREEMPT_SHIFT 0
#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
#define NMI_SHIFT (HARDIRQ_SHIFT + HARDIRQ_BITS)
#define __IRQ_MASK(x) ((1UL << (x))-1)
#define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
#define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
#define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
#define NMI_MASK (__IRQ_MASK(NMI_BITS) << NMI_SHIFT)
#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
#define NMI_OFFSET (1UL << NMI_SHIFT)
#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
#ifndef PREEMPT_ACTIVE
#define PREEMPT_ACTIVE_BITS 1
#define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS)
#define PREEMPT_ACTIVE (__IRQ_MASK(PREEMPT_ACTIVE_BITS) << PREEMPT_ACTIVE_SHIFT)
#endif
#if PREEMPT_ACTIVE < (1 << (NMI_SHIFT + NMI_BITS))
#error PREEMPT_ACTIVE is too low!
#endif
#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
| NMI_MASK))
/*
* Are we doing bottom half or hardware interrupt processing?
* Are we in a softirq context? Interrupt context?
* in_softirq - Are we currently processing softirq or have bh disabled?
* in_serving_softirq - Are we currently processing softirq?
*/
#define in_irq() (hardirq_count())
#define in_softirq() (softirq_count())
#define in_interrupt() (irq_count())
#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
/*
* Are we in NMI context?
*/
#define in_nmi() (preempt_count() & NMI_MASK)
#if defined(CONFIG_PREEMPT_COUNT)
# define PREEMPT_CHECK_OFFSET 1
#else
# define PREEMPT_CHECK_OFFSET 0
#endif
/*
* Are we running in atomic context? WARNING: this macro cannot
* always detect atomic context; in particular, it cannot know about
* held spinlocks in non-preemptible kernels. Thus it should not be
* used in the general case to determine whether sleeping is possible.
* Do not use in_atomic() in driver code.
*/
#define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != 0)
/*
* Check whether we were atomic before we did preempt_disable():
* (used by the scheduler, *after* releasing the kernel lock)
*/
#define in_atomic_preempt_off() \
((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_CHECK_OFFSET)
#ifdef CONFIG_PREEMPT_COUNT
# define preemptible() (preempt_count() == 0 && !irqs_disabled())
#else
# define preemptible() 0
#endif
#if defined(CONFIG_SMP) || defined(CONFIG_GENERIC_HARDIRQS)
extern void synchronize_irq(unsigned int irq);

View file

@ -27,6 +27,14 @@
#include <linux/types.h>
/*
* Framework version for util services.
*/
#define UTIL_FW_MAJOR 3
#define UTIL_FW_MINOR 0
#define UTIL_FW_MAJOR_MINOR (UTIL_FW_MAJOR << 16 | UTIL_FW_MINOR)
/*
* Implementation of host controlled snapshot of the guest.
@ -455,27 +463,6 @@ hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi,
*read = dsize - *write;
}
/*
* We use the same version numbering for all Hyper-V modules.
*
* Definition of versioning is as follows;
*
* Major Number Changes for these scenarios;
* 1. When a new version of Windows Hyper-V
* is released.
* 2. A Major change has occurred in the
* Linux IC's.
* (For example the merge for the first time
* into the kernel) Every time the Major Number
* changes, the Revision number is reset to 0.
* Minor Number Changes when new functionality is added
* to the Linux IC's that is not a bug fix.
*
* 3.1 - Added completed hv_utils driver. Shutdown/Heartbeat/Timesync
*/
#define HV_DRV_VERSION "3.1"
/*
* VMBUS version is 32 bit entity broken up into
* two 16 bit quantities: major_number. minor_number.
@ -1494,7 +1481,7 @@ struct hyperv_service_callback {
};
#define MAX_SRV_VER 0x7ffffff
extern void vmbus_prep_negotiate_resp(struct icmsg_hdr *,
extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *,
struct icmsg_negotiate *, u8 *, int,
int);

View file

@ -17,6 +17,8 @@
#include <linux/iio/trigger.h>
#include <linux/bitops.h>
#include <linux/platform_data/st_sensors_pdata.h>
#define ST_SENSORS_TX_MAX_LENGTH 2
#define ST_SENSORS_RX_MAX_LENGTH 6
@ -118,14 +120,16 @@ struct st_sensor_bdu {
/**
* struct st_sensor_data_ready_irq - ST sensor device data-ready interrupt
* @addr: address of the register.
* @mask: mask to write the on/off value.
* @mask_int1: mask to enable/disable IRQ on INT1 pin.
* @mask_int2: mask to enable/disable IRQ on INT2 pin.
* struct ig1 - represents the Interrupt Generator 1 of sensors.
* @en_addr: address of the enable ig1 register.
* @en_mask: mask to write the on/off value for enable.
*/
struct st_sensor_data_ready_irq {
u8 addr;
u8 mask;
u8 mask_int1;
u8 mask_int2;
struct {
u8 en_addr;
u8 en_mask;
@ -201,6 +205,7 @@ struct st_sensors {
* @buffer_data: Data used by buffer part.
* @odr: Output data rate of the sensor [Hz].
* num_data_channels: Number of data channels used in buffer.
* @drdy_int_pin: Redirect DRDY on pin 1 (1) or pin 2 (2).
* @get_irq_data_ready: Function to get the IRQ used for data ready signal.
* @tf: Transfer function structure used by I/O operations.
* @tb: Transfer buffers and mutex used by I/O operations.
@ -219,6 +224,8 @@ struct st_sensor_data {
unsigned int odr;
unsigned int num_data_channels;
u8 drdy_int_pin;
unsigned int (*get_irq_data_ready) (struct iio_dev *indio_dev);
const struct st_sensor_transfer_function *tf;
@ -249,7 +256,8 @@ static inline void st_sensors_deallocate_trigger(struct iio_dev *indio_dev)
}
#endif
int st_sensors_init_sensor(struct iio_dev *indio_dev);
int st_sensors_init_sensor(struct iio_dev *indio_dev,
struct st_sensors_platform_data *pdata);
int st_sensors_set_enable(struct iio_dev *indio_dev, bool enable);

View file

@ -531,6 +531,60 @@ static inline struct iio_dev *iio_priv_to_dev(void *priv)
**/
void iio_device_free(struct iio_dev *indio_dev);
/**
* devm_iio_device_alloc - Resource-managed iio_device_alloc()
* @dev: Device to allocate iio_dev for
* @sizeof_priv: Space to allocate for private structure.
*
* Managed iio_device_alloc. iio_dev allocated with this function is
* automatically freed on driver detach.
*
* If an iio_dev allocated with this function needs to be freed separately,
* devm_iio_device_free() must be used.
*
* RETURNS:
* Pointer to allocated iio_dev on success, NULL on failure.
*/
struct iio_dev *devm_iio_device_alloc(struct device *dev, int sizeof_priv);
/**
* devm_iio_device_free - Resource-managed iio_device_free()
* @dev: Device this iio_dev belongs to
* @indio_dev: the iio_dev associated with the device
*
* Free iio_dev allocated with devm_iio_device_alloc().
*/
void devm_iio_device_free(struct device *dev, struct iio_dev *indio_dev);
/**
* devm_iio_trigger_alloc - Resource-managed iio_trigger_alloc()
* @dev: Device to allocate iio_trigger for
* @fmt: trigger name format. If it includes format
* specifiers, the additional arguments following
* format are formatted and inserted in the resulting
* string replacing their respective specifiers.
*
* Managed iio_trigger_alloc. iio_trigger allocated with this function is
* automatically freed on driver detach.
*
* If an iio_trigger allocated with this function needs to be freed separately,
* devm_iio_trigger_free() must be used.
*
* RETURNS:
* Pointer to allocated iio_trigger on success, NULL on failure.
*/
struct iio_trigger *devm_iio_trigger_alloc(struct device *dev,
const char *fmt, ...);
/**
* devm_iio_trigger_free - Resource-managed iio_trigger_free()
* @dev: Device this iio_dev belongs to
* @iio_trig: the iio_trigger associated with the device
*
* Free iio_trigger allocated with devm_iio_trigger_alloc().
*/
void devm_iio_trigger_free(struct device *dev, struct iio_trigger *iio_trig);
/**
* iio_buffer_enabled() - helper function to test if the buffer is enabled
* @indio_dev: IIO device structure for device

View file

@ -73,11 +73,6 @@ struct iio_const_attr {
.dev_attr = __ATTR(_name, S_IRUGO, iio_read_const_attr, NULL)}
/* Generic attributes of onetype or another */
/**
* IIO_DEV_ATTR_RESET: resets the device
**/
#define IIO_DEV_ATTR_RESET(_store) \
IIO_DEVICE_ATTR(reset, S_IWUSR, NULL, _store, 0)
/**
* IIO_DEV_ATTR_SAMP_FREQ - sets any internal clock frequency

View file

@ -8,6 +8,7 @@
*/
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/atomic.h>
#ifndef _IIO_TRIGGER_H_
#define _IIO_TRIGGER_H_
@ -61,7 +62,7 @@ struct iio_trigger {
struct list_head list;
struct list_head alloc_list;
int use_count;
atomic_t use_count;
struct irq_chip subirq_chip;
int subirq_base;

View file

@ -5,45 +5,13 @@
#include <linux/bitmap.h>
#include <linux/if.h>
#include <linux/ip.h>
#include <linux/netdevice.h>
#include <linux/rcupdate.h>
#include <linux/timer.h>
#include <linux/sysctl.h>
#include <linux/rtnetlink.h>
enum
{
IPV4_DEVCONF_FORWARDING=1,
IPV4_DEVCONF_MC_FORWARDING,
IPV4_DEVCONF_PROXY_ARP,
IPV4_DEVCONF_ACCEPT_REDIRECTS,
IPV4_DEVCONF_SECURE_REDIRECTS,
IPV4_DEVCONF_SEND_REDIRECTS,
IPV4_DEVCONF_SHARED_MEDIA,
IPV4_DEVCONF_RP_FILTER,
IPV4_DEVCONF_ACCEPT_SOURCE_ROUTE,
IPV4_DEVCONF_BOOTP_RELAY,
IPV4_DEVCONF_LOG_MARTIANS,
IPV4_DEVCONF_TAG,
IPV4_DEVCONF_ARPFILTER,
IPV4_DEVCONF_MEDIUM_ID,
IPV4_DEVCONF_NOXFRM,
IPV4_DEVCONF_NOPOLICY,
IPV4_DEVCONF_FORCE_IGMP_VERSION,
IPV4_DEVCONF_ARP_ANNOUNCE,
IPV4_DEVCONF_ARP_IGNORE,
IPV4_DEVCONF_PROMOTE_SECONDARIES,
IPV4_DEVCONF_ARP_ACCEPT,
IPV4_DEVCONF_ARP_NOTIFY,
IPV4_DEVCONF_ACCEPT_LOCAL,
IPV4_DEVCONF_SRC_VMARK,
IPV4_DEVCONF_PROXY_ARP_PVLAN,
IPV4_DEVCONF_ROUTE_LOCALNET,
__IPV4_DEVCONF_MAX
};
#define IPV4_DEVCONF_MAX (__IPV4_DEVCONF_MAX - 1)
struct ipv4_devconf {
void *sysctl;
int data[IPV4_DEVCONF_MAX];

View file

@ -101,6 +101,7 @@ struct inet6_skb_parm {
#define IP6SKB_FORWARDED 2
#define IP6SKB_REROUTED 4
#define IP6SKB_ROUTERALERT 8
#define IP6SKB_FRAGMENTED 16
};
#define IP6CB(skb) ((struct inet6_skb_parm*)((skb)->cb))

View file

@ -101,13 +101,13 @@ static inline u64 get_jiffies_64(void)
#define time_after(a,b) \
(typecheck(unsigned long, a) && \
typecheck(unsigned long, b) && \
((long)(b) - (long)(a) < 0))
((long)((b) - (a)) < 0))
#define time_before(a,b) time_after(b,a)
#define time_after_eq(a,b) \
(typecheck(unsigned long, a) && \
typecheck(unsigned long, b) && \
((long)(a) - (long)(b) >= 0))
((long)((a) - (b)) >= 0))
#define time_before_eq(a,b) time_after_eq(b,a)
/*
@ -130,13 +130,13 @@ static inline u64 get_jiffies_64(void)
#define time_after64(a,b) \
(typecheck(__u64, a) && \
typecheck(__u64, b) && \
((__s64)(b) - (__s64)(a) < 0))
((__s64)((b) - (a)) < 0))
#define time_before64(a,b) time_after64(b,a)
#define time_after_eq64(a,b) \
(typecheck(__u64, a) && \
typecheck(__u64, b) && \
((__s64)(a) - (__s64)(b) >= 0))
((__s64)((a) - (b)) >= 0))
#define time_before_eq64(a,b) time_after_eq64(b,a)
#define time_in_range64(a, b, c) \

View file

@ -36,10 +36,9 @@ struct kbd_struct {
#define VC_CTRLRLOCK KG_CTRLR /* ctrlr lock mode */
unsigned char slockstate; /* for `sticky' Shift, Ctrl, etc. */
unsigned char ledmode:2; /* one 2-bit value */
unsigned char ledmode:1;
#define LED_SHOW_FLAGS 0 /* traditional state */
#define LED_SHOW_IOCTL 1 /* only change leds upon ioctl */
#define LED_SHOW_MEM 2 /* `heartbeat': peek into memory */
unsigned char ledflagstate:4; /* flags, not lights */
unsigned char default_ledflagstate:4;

View file

@ -629,7 +629,7 @@ extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode);
static inline void tracing_start(void) { }
static inline void tracing_stop(void) { }
static inline void ftrace_off_permanent(void) { }
static inline void trace_dump_stack(void) { }
static inline void trace_dump_stack(int skip) { }
static inline void tracing_on(void) { }
static inline void tracing_off(void) { }

View file

@ -26,6 +26,7 @@
#include <linux/kernel.h>
#include <linux/wait.h>
#include <linux/atomic.h>
#include <linux/workqueue.h>
#define UEVENT_HELPER_PATH_LEN 256
#define UEVENT_NUM_ENVP 32 /* number of env pointers */
@ -65,6 +66,9 @@ struct kobject {
struct kobj_type *ktype;
struct sysfs_dirent *sd;
struct kref kref;
#ifdef CONFIG_DEBUG_KOBJECT_RELEASE
struct delayed_work release;
#endif
unsigned int state_initialized:1;
unsigned int state_in_sysfs:1;
unsigned int state_add_uevent_sent:1;

View file

@ -138,6 +138,22 @@ enum {
ATA_SHT_THIS_ID = -1,
ATA_SHT_USE_CLUSTERING = 1,
/* struct ata_taskfile flags */
ATA_TFLAG_LBA48 = (1 << 0), /* enable 48-bit LBA and "HOB" */
ATA_TFLAG_ISADDR = (1 << 1), /* enable r/w to nsect/lba regs */
ATA_TFLAG_DEVICE = (1 << 2), /* enable r/w to device reg */
ATA_TFLAG_WRITE = (1 << 3), /* data dir: host->dev==1 (write) */
ATA_TFLAG_LBA = (1 << 4), /* enable LBA */
ATA_TFLAG_FUA = (1 << 5), /* enable FUA */
ATA_TFLAG_POLLING = (1 << 6), /* set nIEN to 1 and use polling */
/* protocol flags */
ATA_PROT_FLAG_PIO = (1 << 0), /* is PIO */
ATA_PROT_FLAG_DMA = (1 << 1), /* is DMA */
ATA_PROT_FLAG_DATA = ATA_PROT_FLAG_PIO | ATA_PROT_FLAG_DMA,
ATA_PROT_FLAG_NCQ = (1 << 2), /* is NCQ */
ATA_PROT_FLAG_ATAPI = (1 << 3), /* is ATAPI */
/* struct ata_device stuff */
ATA_DFLAG_LBA = (1 << 0), /* device supports LBA */
ATA_DFLAG_LBA48 = (1 << 1), /* device supports LBA48 */
@ -156,6 +172,7 @@ enum {
ATA_DFLAG_DUBIOUS_XFER = (1 << 16), /* data transfer not verified */
ATA_DFLAG_NO_UNLOAD = (1 << 17), /* device doesn't support unload */
ATA_DFLAG_UNLOCK_HPA = (1 << 18), /* unlock HPA */
ATA_DFLAG_NCQ_SEND_RECV = (1 << 19), /* device supports NCQ SEND and RECV */
ATA_DFLAG_INIT_MASK = (1 << 24) - 1,
ATA_DFLAG_DETACH = (1 << 24),
@ -207,6 +224,7 @@ enum {
ATA_FLAG_ACPI_SATA = (1 << 17), /* need native SATA ACPI layout */
ATA_FLAG_AN = (1 << 18), /* controller supports AN */
ATA_FLAG_PMP = (1 << 19), /* controller supports PMP */
ATA_FLAG_FPDMA_AUX = (1 << 20), /* controller supports H2DFIS aux field */
ATA_FLAG_EM = (1 << 21), /* driver supports enclosure
* management */
ATA_FLAG_SW_ACTIVITY = (1 << 22), /* driver supports sw activity
@ -518,6 +536,33 @@ enum sw_activity {
BLINK_OFF,
};
struct ata_taskfile {
unsigned long flags; /* ATA_TFLAG_xxx */
u8 protocol; /* ATA_PROT_xxx */
u8 ctl; /* control reg */
u8 hob_feature; /* additional data */
u8 hob_nsect; /* to support LBA48 */
u8 hob_lbal;
u8 hob_lbam;
u8 hob_lbah;
u8 feature;
u8 nsect;
u8 lbal;
u8 lbam;
u8 lbah;
u8 device;
u8 command; /* IO operation */
u32 auxiliary; /* auxiliary field */
/* from SATA 3.1 and */
/* ATA-8 ACS-3 */
};
#ifdef CONFIG_ATA_SFF
struct ata_ioports {
void __iomem *cmd_addr;
@ -660,6 +705,9 @@ struct ata_device {
/* DEVSLP Timing Variables from Identify Device Data Log */
u8 devslp_timing[ATA_LOG_DEVSLP_SIZE];
/* NCQ send and receive log subcommand support */
u8 ncq_send_recv_cmds[ATA_LOG_NCQ_SEND_RECV_SIZE];
/* error history */
int spdn_cnt;
/* ering is CLEAR_END, read comment above CLEAR_END */
@ -959,6 +1007,69 @@ extern const unsigned long sata_deb_timing_long[];
extern struct ata_port_operations ata_dummy_port_ops;
extern const struct ata_port_info ata_dummy_port_info;
/*
* protocol tests
*/
static inline unsigned int ata_prot_flags(u8 prot)
{
switch (prot) {
case ATA_PROT_NODATA:
return 0;
case ATA_PROT_PIO:
return ATA_PROT_FLAG_PIO;
case ATA_PROT_DMA:
return ATA_PROT_FLAG_DMA;
case ATA_PROT_NCQ:
return ATA_PROT_FLAG_DMA | ATA_PROT_FLAG_NCQ;
case ATAPI_PROT_NODATA:
return ATA_PROT_FLAG_ATAPI;
case ATAPI_PROT_PIO:
return ATA_PROT_FLAG_ATAPI | ATA_PROT_FLAG_PIO;
case ATAPI_PROT_DMA:
return ATA_PROT_FLAG_ATAPI | ATA_PROT_FLAG_DMA;
}
return 0;
}
static inline int ata_is_atapi(u8 prot)
{
return ata_prot_flags(prot) & ATA_PROT_FLAG_ATAPI;
}
static inline int ata_is_nodata(u8 prot)
{
return !(ata_prot_flags(prot) & ATA_PROT_FLAG_DATA);
}
static inline int ata_is_pio(u8 prot)
{
return ata_prot_flags(prot) & ATA_PROT_FLAG_PIO;
}
static inline int ata_is_dma(u8 prot)
{
return ata_prot_flags(prot) & ATA_PROT_FLAG_DMA;
}
static inline int ata_is_ncq(u8 prot)
{
return ata_prot_flags(prot) & ATA_PROT_FLAG_NCQ;
}
static inline int ata_is_data(u8 prot)
{
return ata_prot_flags(prot) & ATA_PROT_FLAG_DATA;
}
static inline int is_multi_taskfile(struct ata_taskfile *tf)
{
return (tf->command == ATA_CMD_READ_MULTI) ||
(tf->command == ATA_CMD_WRITE_MULTI) ||
(tf->command == ATA_CMD_READ_MULTI_EXT) ||
(tf->command == ATA_CMD_WRITE_MULTI_EXT) ||
(tf->command == ATA_CMD_WRITE_MULTI_FUA_EXT);
}
static inline const unsigned long *
sata_ehc_deb_timing(struct ata_eh_context *ehc)
{
@ -1142,8 +1253,6 @@ int ata_acpi_stm(struct ata_port *ap, const struct ata_acpi_gtm *stm);
int ata_acpi_gtm(struct ata_port *ap, struct ata_acpi_gtm *stm);
unsigned long ata_acpi_gtm_xfermask(struct ata_device *dev,
const struct ata_acpi_gtm *gtm);
acpi_handle ata_ap_acpi_handle(struct ata_port *ap);
acpi_handle ata_dev_acpi_handle(struct ata_device *dev);
int ata_acpi_cbl_80wire(struct ata_port *ap, const struct ata_acpi_gtm *gtm);
#else
static inline const struct ata_acpi_gtm *ata_acpi_init_gtm(struct ata_port *ap)
@ -1497,6 +1606,13 @@ static inline int ata_ncq_enabled(struct ata_device *dev)
ATA_DFLAG_NCQ)) == ATA_DFLAG_NCQ;
}
static inline bool ata_fpdma_dsm_supported(struct ata_device *dev)
{
return (dev->flags & ATA_DFLAG_NCQ_SEND_RECV) &&
(dev->ncq_send_recv_cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] &
ATA_LOG_NCQ_SEND_RECV_DSM_TRIM);
}
static inline void ata_qc_set_polling(struct ata_queued_cmd *qc)
{
qc->tf.ctl |= ATA_NIEN;

View file

@ -124,6 +124,29 @@ static inline void init_llist_head(struct llist_head *list)
&(pos)->member != NULL; \
(pos) = llist_entry((pos)->member.next, typeof(*(pos)), member))
/**
* llist_for_each_entry_safe - iterate over some deleted entries of lock-less list of given type
* safe against removal of list entry
* @pos: the type * to use as a loop cursor.
* @n: another type * to use as temporary storage
* @node: the first entry of deleted list entries.
* @member: the name of the llist_node with the struct.
*
* In general, some entries of the lock-less list can be traversed
* safely only after being removed from list, so start with an entry
* instead of list head.
*
* If being used on entries deleted from lock-less list directly, the
* traverse order is from the newest to the oldest added entry. If
* you want to traverse from the oldest to the newest, you must
* reverse the order by yourself before traversing.
*/
#define llist_for_each_entry_safe(pos, n, node, member) \
for (pos = llist_entry((node), typeof(*pos), member); \
&pos->member != NULL && \
(n = llist_entry(pos->member.next, typeof(*n), member), true); \
pos = n)
/**
* llist_empty - tests whether a lock-less list is empty
* @head: the list to test

View file

@ -365,7 +365,7 @@ extern void lockdep_trace_alloc(gfp_t mask);
#define lockdep_recursing(tsk) ((tsk)->lockdep_recursion)
#else /* !LOCKDEP */
#else /* !CONFIG_LOCKDEP */
static inline void lockdep_off(void)
{
@ -479,82 +479,36 @@ static inline void print_irqtrace_events(struct task_struct *curr)
* on the per lock-class debug mode:
*/
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# ifdef CONFIG_PROVE_LOCKING
# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i)
# define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i)
# else
# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
# define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, NULL, i)
# endif
# define spin_release(l, n, i) lock_release(l, n, i)
#ifdef CONFIG_PROVE_LOCKING
#define lock_acquire_exclusive(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i)
#define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 1, 2, n, i)
#define lock_acquire_shared_recursive(l, s, t, n, i) lock_acquire(l, s, t, 2, 2, n, i)
#else
# define spin_acquire(l, s, t, i) do { } while (0)
# define spin_release(l, n, i) do { } while (0)
#define lock_acquire_exclusive(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i)
#define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 1, 1, n, i)
#define lock_acquire_shared_recursive(l, s, t, n, i) lock_acquire(l, s, t, 2, 1, n, i)
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# ifdef CONFIG_PROVE_LOCKING
# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i)
# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2, NULL, i)
# else
# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1, NULL, i)
# endif
# define rwlock_release(l, n, i) lock_release(l, n, i)
#else
# define rwlock_acquire(l, s, t, i) do { } while (0)
# define rwlock_acquire_read(l, s, t, i) do { } while (0)
# define rwlock_release(l, n, i) do { } while (0)
#endif
#define spin_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
#define spin_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
#define spin_release(l, n, i) lock_release(l, n, i)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# ifdef CONFIG_PROVE_LOCKING
# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i)
# define mutex_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i)
# else
# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
# define mutex_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i)
# endif
# define mutex_release(l, n, i) lock_release(l, n, i)
#else
# define mutex_acquire(l, s, t, i) do { } while (0)
# define mutex_acquire_nest(l, s, t, n, i) do { } while (0)
# define mutex_release(l, n, i) do { } while (0)
#endif
#define rwlock_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
#define rwlock_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i)
#define rwlock_release(l, n, i) lock_release(l, n, i)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# ifdef CONFIG_PROVE_LOCKING
# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i)
# define rwsem_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i)
# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, NULL, i)
# else
# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
# define rwsem_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i)
# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, NULL, i)
# endif
#define mutex_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
#define mutex_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
#define mutex_release(l, n, i) lock_release(l, n, i)
#define rwsem_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
#define rwsem_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
#define rwsem_acquire_read(l, s, t, i) lock_acquire_shared(l, s, t, NULL, i)
# define rwsem_release(l, n, i) lock_release(l, n, i)
#else
# define rwsem_acquire(l, s, t, i) do { } while (0)
# define rwsem_acquire_nest(l, s, t, n, i) do { } while (0)
# define rwsem_acquire_read(l, s, t, i) do { } while (0)
# define rwsem_release(l, n, i) do { } while (0)
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# ifdef CONFIG_PROVE_LOCKING
# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 2, NULL, _THIS_IP_)
# define lock_map_acquire_read(l) lock_acquire(l, 0, 0, 2, 2, NULL, _THIS_IP_)
# else
# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 1, NULL, _THIS_IP_)
# define lock_map_acquire_read(l) lock_acquire(l, 0, 0, 2, 1, NULL, _THIS_IP_)
# endif
#define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_)
#define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_)
# define lock_map_release(l) lock_release(l, 1, _THIS_IP_)
#else
# define lock_map_acquire(l) do { } while (0)
# define lock_map_acquire_read(l) do { } while (0)
# define lock_map_release(l) do { } while (0)
#endif
#ifdef CONFIG_PROVE_LOCKING
# define might_lock(lock) \

36
include/linux/lockref.h Normal file
View file

@ -0,0 +1,36 @@
#ifndef __LINUX_LOCKREF_H
#define __LINUX_LOCKREF_H
/*
* Locked reference counts.
*
* These are different from just plain atomic refcounts in that they
* are atomic with respect to the spinlock that goes with them. In
* particular, there can be implementations that don't actually get
* the spinlock for the common decrement/increment operations, but they
* still have to check that the operation is done semantically as if
* the spinlock had been taken (using a cmpxchg operation that covers
* both the lock and the count word, or using memory transactions, for
* example).
*/
#include <linux/spinlock.h>
struct lockref {
union {
#ifdef CONFIG_CMPXCHG_LOCKREF
aligned_u64 lock_count;
#endif
struct {
spinlock_t lock;
unsigned int count;
};
};
};
extern void lockref_get(struct lockref *);
extern int lockref_get_not_zero(struct lockref *);
extern int lockref_get_or_lock(struct lockref *);
extern int lockref_put_or_lock(struct lockref *);
#endif /* __LINUX_LOCKREF_H */

View file

@ -85,7 +85,7 @@ extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
extern struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm);
extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg);
extern struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont);
extern struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css);
static inline
bool mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *memcg)

View file

@ -25,16 +25,9 @@
struct memory_block {
unsigned long start_section_nr;
unsigned long end_section_nr;
unsigned long state;
int section_count;
/*
* This serializes all state change requests. It isn't
* held during creation because the control files are
* created long after the critical areas during
* initialization.
*/
struct mutex state_mutex;
unsigned long state; /* serialized by the dev->lock */
int section_count; /* serialized by mem_sysfs_mutex */
int online_type; /* for passing data to online routine */
int phys_device; /* to which fru does this belong? */
void *hw; /* optional pointer to fw/hw data */
int (*phys_callback)(struct memory_block *);
@ -125,7 +118,6 @@ extern struct memory_block *find_memory_block_hinted(struct mem_section *,
struct memory_block *);
extern struct memory_block *find_memory_block(struct mem_section *);
#define CONFIG_MEM_BLOCK_SIZE (PAGES_PER_SECTION<<PAGE_SHIFT)
enum mem_add_context { BOOT, HOTPLUG };
#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
#ifdef CONFIG_MEMORY_HOTPLUG

View file

@ -160,7 +160,8 @@ enum palmas_regulators {
PALMAS_REG_SMPS7,
PALMAS_REG_SMPS8,
PALMAS_REG_SMPS9,
PALMAS_REG_SMPS10,
PALMAS_REG_SMPS10_OUT2,
PALMAS_REG_SMPS10_OUT1,
/* LDO regulators */
PALMAS_REG_LDO1,
PALMAS_REG_LDO2,
@ -355,9 +356,9 @@ struct palmas_pmic {
int smps123;
int smps457;
int range[PALMAS_REG_SMPS10];
unsigned int ramp_delay[PALMAS_REG_SMPS10];
unsigned int current_reg_mode[PALMAS_REG_SMPS10];
int range[PALMAS_REG_SMPS10_OUT1];
unsigned int ramp_delay[PALMAS_REG_SMPS10_OUT1];
unsigned int current_reg_mode[PALMAS_REG_SMPS10_OUT1];
};
struct palmas_resource {
@ -371,17 +372,15 @@ struct palmas_usb {
struct extcon_dev edev;
/* used to set vbus, in atomic path */
struct work_struct set_vbus_work;
int id_otg_irq;
int id_irq;
int vbus_otg_irq;
int vbus_irq;
int vbus_enable;
enum palmas_usb_state linkstat;
int wakeup;
bool enable_vbus_detection;
bool enable_id_detection;
};
#define comparator_to_palmas(x) container_of((x), struct palmas_usb, comparator)

View file

@ -191,6 +191,17 @@ enum s2mps11_regulators {
#define S2MPS11_BUCK_N_VOLTAGES (S2MPS11_BUCK_VSEL_MASK + 1)
#define S2MPS11_RAMP_DELAY 25000 /* uV/us */
#define S2MPS11_BUCK2_RAMP_SHIFT 6
#define S2MPS11_BUCK34_RAMP_SHIFT 4
#define S2MPS11_BUCK5_RAMP_SHIFT 6
#define S2MPS11_BUCK16_RAMP_SHIFT 4
#define S2MPS11_BUCK7810_RAMP_SHIFT 2
#define S2MPS11_BUCK9_RAMP_SHIFT 0
#define S2MPS11_BUCK2_RAMP_EN_SHIFT 3
#define S2MPS11_BUCK3_RAMP_EN_SHIFT 2
#define S2MPS11_BUCK4_RAMP_EN_SHIFT 1
#define S2MPS11_BUCK6_RAMP_EN_SHIFT 0
#define S2MPS11_PMIC_EN_SHIFT 6
#define S2MPS11_REGULATOR_MAX (S2MPS11_REG_MAX - 3)

View file

@ -113,11 +113,27 @@
#define CNTRLREG_8WIRE CNTRLREG_AFE_CTRL(3)
#define CNTRLREG_TSCENB BIT(7)
/* FIFO READ Register */
#define FIFOREAD_DATA_MASK (0xfff << 0)
#define FIFOREAD_CHNLID_MASK (0xf << 16)
/* Sequencer Status */
#define SEQ_STATUS BIT(5)
#define ADC_CLK 3000000
#define MAX_CLK_DIV 7
#define TOTAL_STEPS 16
#define TOTAL_CHANNELS 8
/*
* ADC runs at 3MHz, and it takes
* 15 cycles to latch one data output.
* Hence the idle time for ADC to
* process one sample data would be
* around 5 micro seconds.
*/
#define IDLE_TIMEOUT 5 /* microsec */
#define TSCADC_CELLS 2
struct ti_tscadc_dev {

View file

@ -243,24 +243,6 @@ struct tps65217_board {
struct tps65217_bl_pdata *bl_pdata;
};
/**
* struct tps_info - packages regulator constraints
* @name: Voltage regulator name
* @min_uV: minimum micro volts
* @max_uV: minimum micro volts
* @vsel_to_uv: Function pointer to get voltage from selector
* @uv_to_vsel: Function pointer to get selector from voltage
*
* This data is used to check the regualtor voltage limits while setting.
*/
struct tps_info {
const char *name;
int min_uV;
int max_uV;
int (*vsel_to_uv)(unsigned int vsel);
int (*uv_to_vsel)(int uV, unsigned int *vsel);
};
/**
* struct tps65217 - tps65217 sub-driver chip access routines
*
@ -273,7 +255,6 @@ struct tps65217 {
unsigned int id;
struct regulator_desc desc[TPS65217_NUM_REGULATOR];
struct regulator_dev *rdev[TPS65217_NUM_REGULATOR];
struct tps_info *info[TPS65217_NUM_REGULATOR];
struct regmap *regmap;
};

View file

@ -309,21 +309,20 @@ struct mlx5_hca_cap {
__be16 max_desc_sz_rq;
u8 rsvd21[2];
__be16 max_desc_sz_sq_dc;
u8 rsvd22[4];
__be16 max_qp_mcg;
u8 rsvd23;
__be32 max_qp_mcg;
u8 rsvd22[3];
u8 log_max_mcg;
u8 rsvd24;
u8 rsvd23;
u8 log_max_pd;
u8 rsvd25;
u8 rsvd24;
u8 log_max_xrcd;
u8 rsvd26[42];
u8 rsvd25[42];
__be16 log_uar_page_sz;
u8 rsvd27[28];
u8 rsvd26[28];
u8 log_msx_atomic_size_qp;
u8 rsvd28[2];
u8 rsvd27[2];
u8 log_msx_atomic_size_dc;
u8 rsvd29[76];
u8 rsvd28[76];
};
@ -472,9 +471,8 @@ struct mlx5_eqe_cmd {
struct mlx5_eqe_page_req {
u8 rsvd0[2];
__be16 func_id;
u8 rsvd1[2];
__be16 num_pages;
__be32 rsvd2[5];
__be32 num_pages;
__be32 rsvd1[5];
};
union ev_data {

View file

@ -358,7 +358,7 @@ struct mlx5_caps {
u32 reserved_lkey;
u8 local_ca_ack_delay;
u8 log_max_mcg;
u16 max_qp_mcg;
u32 max_qp_mcg;
int min_page_sz;
};
@ -691,7 +691,7 @@ void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
int mlx5_pagealloc_start(struct mlx5_core_dev *dev);
void mlx5_pagealloc_stop(struct mlx5_core_dev *dev);
void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
s16 npages);
s32 npages);
int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot);
int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev);
void mlx5_register_debugfs(void);
@ -731,9 +731,6 @@ void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev);
int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db);
void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db);
typedef void (*health_handler_t)(struct pci_dev *pdev, struct health_buffer __iomem *buf, int size);
int mlx5_register_health_report_handler(health_handler_t handler);
void mlx5_unregister_health_report_handler(void);
const char *mlx5_command_str(int command);
int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev);
void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev);

View file

@ -332,6 +332,7 @@ struct mm_struct {
unsigned long pgoff, unsigned long flags);
#endif
unsigned long mmap_base; /* base of mmap area */
unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */
unsigned long task_size; /* size of task vm space */
unsigned long highest_vm_end; /* highest vma end address */
pgd_t * pgd;

View file

@ -98,8 +98,17 @@
typedef struct { DECLARE_BITMAP(bits, MAX_NUMNODES); } nodemask_t;
extern nodemask_t _unused_nodemask_arg_;
/*
* The inline keyword gives the compiler room to decide to inline, or
* not inline a function as it sees best. However, as these functions
* are called in both __init and non-__init functions, if they are not
* inlined we will end up with a section mis-match error (of the type of
* freeable items not being freed). So we must use __always_inline here
* to fix the problem. If other functions in the future also end up in
* this situation they will also need to be annotated as __always_inline
*/
#define node_set(node, dst) __node_set((node), &(dst))
static inline void __node_set(int node, volatile nodemask_t *dstp)
static __always_inline void __node_set(int node, volatile nodemask_t *dstp)
{
set_bit(node, dstp->bits);
}

View file

@ -14,6 +14,10 @@ struct fs_struct;
* A structure to contain pointers to all per-process
* namespaces - fs (mount), uts, network, sysvipc, etc.
*
* The pid namespace is an exception -- it's accessed using
* task_active_pid_ns. The pid namespace here is the
* namespace that children will use.
*
* 'count' is the number of tasks holding a reference.
* The count for each namespace, then, will be the number
* of nsproxies pointing to it, not the number of tasks.
@ -27,7 +31,7 @@ struct nsproxy {
struct uts_namespace *uts_ns;
struct ipc_namespace *ipc_ns;
struct mnt_namespace *mnt_ns;
struct pid_namespace *pid_ns;
struct pid_namespace *pid_ns_for_children;
struct net *net_ns;
};
extern struct nsproxy init_nsproxy;

View file

@ -266,6 +266,7 @@ extern int of_device_is_available(const struct device_node *device);
extern const void *of_get_property(const struct device_node *node,
const char *name,
int *lenp);
extern struct device_node *of_get_cpu_node(int cpu, unsigned int *thread);
#define for_each_property_of_node(dn, pp) \
for (pp = dn->properties; pp != NULL; pp = pp->next)
@ -343,6 +344,8 @@ const char *of_prop_next_string(struct property *prop, const char *cur);
s; \
s = of_prop_next_string(prop, s))
int of_device_is_stdout_path(struct device_node *dn);
#else /* CONFIG_OF */
static inline const char* of_node_full_name(struct device_node *np)
@ -459,6 +462,12 @@ static inline const void *of_get_property(const struct device_node *node,
return NULL;
}
static inline struct device_node *of_get_cpu_node(int cpu,
unsigned int *thread)
{
return NULL;
}
static inline int of_property_read_u64(const struct device_node *np,
const char *propname, u64 *out_value)
{
@ -505,6 +514,11 @@ static inline int of_machine_is_compatible(const char *compat)
return 0;
}
static inline int of_device_is_stdout_path(struct device_node *dn)
{
return 0;
}
#define of_match_ptr(_ptr) NULL
#define of_match_node(_matches, _node) NULL
#define of_property_for_each_u32(np, propname, prop, p, u) \

View file

@ -1,6 +1,7 @@
#ifndef _LINUX_OF_DEVICE_H
#define _LINUX_OF_DEVICE_H
#include <linux/cpu.h>
#include <linux/platform_device.h>
#include <linux/of_platform.h> /* temporary until merge */
@ -43,6 +44,15 @@ static inline void of_device_node_put(struct device *dev)
of_node_put(dev->of_node);
}
static inline struct device_node *of_cpu_device_node_get(int cpu)
{
struct device *cpu_dev;
cpu_dev = get_cpu_device(cpu);
if (!cpu_dev)
return NULL;
return of_node_get(cpu_dev->of_node);
}
#else /* CONFIG_OF */
static inline int of_driver_match_device(struct device *dev,
@ -67,6 +77,11 @@ static inline const struct of_device_id *of_match_device(
{
return NULL;
}
static inline struct device_node *of_cpu_device_node_get(int cpu)
{
return NULL;
}
#endif /* CONFIG_OF */
#endif /* _LINUX_OF_DEVICE_H */

View file

@ -6,6 +6,7 @@
#define EC_WRITE_SCI_MASK 0x1b
#define EC_WAKE_UP_WLAN 0x24
#define EC_WLAN_LEAVE_RESET 0x25
#define EC_DCON_POWER_MODE 0x26
#define EC_READ_EB_MODE 0x2a
#define EC_SET_SCI_INHIBIT 0x32
#define EC_SET_SCI_INHIBIT_RELEASE 0x34

View file

@ -47,24 +47,22 @@ void acpi_pci_remove_bus(struct pci_bus *bus);
#ifdef CONFIG_ACPI_PCI_SLOT
void acpi_pci_slot_init(void);
void acpi_pci_slot_enumerate(struct pci_bus *bus, acpi_handle handle);
void acpi_pci_slot_enumerate(struct pci_bus *bus);
void acpi_pci_slot_remove(struct pci_bus *bus);
#else
static inline void acpi_pci_slot_init(void) { }
static inline void acpi_pci_slot_enumerate(struct pci_bus *bus,
acpi_handle handle) { }
static inline void acpi_pci_slot_enumerate(struct pci_bus *bus) { }
static inline void acpi_pci_slot_remove(struct pci_bus *bus) { }
#endif
#ifdef CONFIG_HOTPLUG_PCI_ACPI
void acpiphp_init(void);
void acpiphp_enumerate_slots(struct pci_bus *bus, acpi_handle handle);
void acpiphp_enumerate_slots(struct pci_bus *bus);
void acpiphp_remove_slots(struct pci_bus *bus);
void acpiphp_check_host_bridge(acpi_handle handle);
#else
static inline void acpiphp_init(void) { }
static inline void acpiphp_enumerate_slots(struct pci_bus *bus,
acpi_handle handle) { }
static inline void acpiphp_enumerate_slots(struct pci_bus *bus) { }
static inline void acpiphp_remove_slots(struct pci_bus *bus) { }
static inline void acpiphp_check_host_bridge(acpi_handle handle) { }
#endif

View file

@ -675,7 +675,7 @@ struct pci_driver {
/* these external functions are only available when PCI support is enabled */
#ifdef CONFIG_PCI
void pcie_bus_configure_settings(struct pci_bus *bus, u8 smpss);
void pcie_bus_configure_settings(struct pci_bus *bus);
enum pcie_bus_config_types {
PCIE_BUS_TUNE_OFF,
@ -914,6 +914,7 @@ bool pci_check_and_unmask_intx(struct pci_dev *dev);
void pci_msi_off(struct pci_dev *dev);
int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size);
int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask);
int pci_wait_for_pending_transaction(struct pci_dev *dev);
int pcix_get_max_mmrbc(struct pci_dev *dev);
int pcix_get_mmrbc(struct pci_dev *dev);
int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc);
@ -924,6 +925,11 @@ int pcie_set_mps(struct pci_dev *dev, int mps);
int __pci_reset_function(struct pci_dev *dev);
int __pci_reset_function_locked(struct pci_dev *dev);
int pci_reset_function(struct pci_dev *dev);
int pci_probe_reset_slot(struct pci_slot *slot);
int pci_reset_slot(struct pci_slot *slot);
int pci_probe_reset_bus(struct pci_bus *bus);
int pci_reset_bus(struct pci_bus *bus);
void pci_reset_bridge_secondary_bus(struct pci_dev *dev);
void pci_update_resource(struct pci_dev *dev, int resno);
int __must_check pci_assign_resource(struct pci_dev *dev, int i);
int __must_check pci_reassign_resource(struct pci_dev *dev, int i, resource_size_t add_size, resource_size_t align);
@ -1003,6 +1009,7 @@ int pci_claim_resource(struct pci_dev *, int);
void pci_assign_unassigned_resources(void);
void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge);
void pci_assign_unassigned_bus_resources(struct pci_bus *bus);
void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus);
void pdev_enable_device(struct pci_dev *);
int pci_enable_resources(struct pci_dev *, int mask);
void pci_fixup_irqs(u8 (*)(struct pci_dev *, u8 *),
@ -1043,7 +1050,6 @@ int __must_check pci_bus_alloc_resource(struct pci_bus *bus,
resource_size_t,
resource_size_t),
void *alignf_data);
void pci_enable_bridges(struct pci_bus *bus);
/* Proper probing supporting hot-pluggable devices */
int __must_check __pci_register_driver(struct pci_driver *, struct module *,
@ -1648,6 +1654,10 @@ int pcibios_set_pcie_reset_state(struct pci_dev *dev,
int pcibios_add_device(struct pci_dev *dev);
void pcibios_release_device(struct pci_dev *dev);
#ifdef CONFIG_HIBERNATE_CALLBACKS
extern struct dev_pm_ops pcibios_pm_ops;
#endif
#ifdef CONFIG_PCI_MMCONFIG
void __init pci_mmcfg_early_init(void);
void __init pci_mmcfg_late_init(void);

View file

@ -63,6 +63,9 @@ enum pcie_link_width {
* @get_adapter_status: Called to get see if an adapter is present in the slot or not.
* If this field is NULL, the value passed in the struct hotplug_slot_info
* will be used when this value is requested by a user.
* @reset_slot: Optional interface to allow override of a bus reset for the
* slot for cases where a secondary bus reset can result in spurious
* hotplug events or where a slot can be reset independent of the bus.
*
* The table of function pointers that is passed to the hotplug pci core by a
* hotplug pci driver. These functions are called by the hotplug pci core when
@ -80,6 +83,7 @@ struct hotplug_slot_ops {
int (*get_attention_status) (struct hotplug_slot *slot, u8 *value);
int (*get_latch_status) (struct hotplug_slot *slot, u8 *value);
int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value);
int (*reset_slot) (struct hotplug_slot *slot, int probe);
};
/**

View file

@ -1311,6 +1311,8 @@
#define PCI_DEVICE_ID_IMS_TT128 0x9128
#define PCI_DEVICE_ID_IMS_TT3D 0x9135
#define PCI_VENDOR_ID_AMCC 0x10e8
#define PCI_VENDOR_ID_INTERG 0x10ea
#define PCI_DEVICE_ID_INTERG_1682 0x1682
#define PCI_DEVICE_ID_INTERG_2000 0x2000
@ -2256,12 +2258,10 @@
/*
* ADDI-DATA GmbH communication cards <info@addi-data.com>
*/
#define PCI_VENDOR_ID_ADDIDATA_OLD 0x10E8
#define PCI_VENDOR_ID_ADDIDATA 0x15B8
#define PCI_DEVICE_ID_ADDIDATA_APCI7500 0x7000
#define PCI_DEVICE_ID_ADDIDATA_APCI7420 0x7001
#define PCI_DEVICE_ID_ADDIDATA_APCI7300 0x7002
#define PCI_DEVICE_ID_ADDIDATA_APCI7800 0x818E
#define PCI_DEVICE_ID_ADDIDATA_APCI7500_2 0x7009
#define PCI_DEVICE_ID_ADDIDATA_APCI7420_2 0x700A
#define PCI_DEVICE_ID_ADDIDATA_APCI7300_2 0x700B

View file

@ -22,9 +22,12 @@
* Macro which verifies @ptr is a percpu pointer without evaluating
* @ptr. This is to be used in percpu accessors to verify that the
* input parameter is a percpu pointer.
*
* + 0 is required in order to convert the pointer type from a
* potential array type to a pointer to a single item of the array.
*/
#define __verify_pcpu_ptr(ptr) do { \
const void __percpu *__vpp_verify = (typeof(ptr))NULL; \
const void __percpu *__vpp_verify = (typeof((ptr) + 0))NULL; \
(void)__vpp_verify; \
} while (0)

View file

@ -63,30 +63,6 @@ struct perf_raw_record {
void *data;
};
/*
* single taken branch record layout:
*
* from: source instruction (may not always be a branch insn)
* to: branch target
* mispred: branch target was mispredicted
* predicted: branch target was predicted
*
* support for mispred, predicted is optional. In case it
* is not supported mispred = predicted = 0.
*
* in_tx: running in a hardware transaction
* abort: aborting a hardware transaction
*/
struct perf_branch_entry {
__u64 from;
__u64 to;
__u64 mispred:1, /* target mispredicted */
predicted:1,/* target predicted */
in_tx:1, /* in transaction */
abort:1, /* transaction abort */
reserved:60;
};
/*
* branch stack layout:
* nr: number of taken branches stored in entries[]

View file

@ -14,12 +14,16 @@
(Interruptions registers mostly)
* @status_register: Offset of the Interrupt Status Register
* @trigger_register: Offset of the Trigger setup register
* @mr_prescal_mask: Mask of the PRESCAL field in the adc MR register
* @mr_startup_mask: Mask of the STARTUP field in the adc MR register
*/
struct at91_adc_reg_desc {
u8 channel_base;
u32 drdy_mask;
u8 status_register;
u8 trigger_register;
u32 mr_prescal_mask;
u32 mr_startup_mask;
};
/**

View file

@ -0,0 +1,14 @@
#ifndef __LINUX_PLATFORM_DATA_EFM32_SPI_H__
#define __LINUX_PLATFORM_DATA_EFM32_SPI_H__
#include <linux/types.h>
/**
* struct efm32_spi_pdata
* @location: pinmux location for the I/O pins (to be written to the ROUTE
* register)
*/
struct efm32_spi_pdata {
u8 location;
};
#endif /* ifndef __LINUX_PLATFORM_DATA_EFM32_SPI_H__ */

View file

@ -1,5 +1,5 @@
/*
* Maxim (Dallas) MAX3107/8 serial driver
* Maxim (Dallas) MAX3107/8/9, MAX14830 serial driver
*
* Copyright (C) 2012 Alexander Shiyan <shc_work@mail.ru>
*
@ -37,14 +37,13 @@
* };
*/
#define MAX310X_MAX_UARTS 1
#define MAX310X_MAX_UARTS 4
/* MAX310X platform data structure */
struct max310x_pdata {
/* Flags global to driver */
const u8 driver_flags:2;
const u8 driver_flags;
#define MAX310X_EXT_CLK (0x00000001) /* External clock enable */
#define MAX310X_AUTOSLEEP (0x00000002) /* Enable AutoSleep mode */
/* Flags global to UART port */
const u8 uart_flags[MAX310X_MAX_UARTS];
#define MAX310X_LOOPBACK (0x00000001) /* Loopback mode enable */
@ -60,8 +59,6 @@ struct max310x_pdata {
void (*init)(void);
/* Called before finish */
void (*exit)(void);
/* Suspend callback */
void (*suspend)(int do_suspend);
};
#endif

View file

@ -60,7 +60,6 @@
* };
*
* static struct sccnxp_pdata sc2892_info = {
* .frequency = 3686400,
* .mctrl_cfg[0] = MCTRL_SIG(DIR_OP, LINE_OP0),
* .mctrl_cfg[1] = MCTRL_SIG(DIR_OP, LINE_OP1),
* };
@ -78,8 +77,6 @@
/* SCCNXP platform data structure */
struct sccnxp_pdata {
/* Frequency (extrenal clock or crystal) */
int frequency;
/* Shift for A0 line */
const u8 reg_shift;
/* Modem control lines configuration */

View file

@ -0,0 +1,63 @@
/*
* simplefb.h - Simple Framebuffer Device
*
* Copyright (C) 2013 David Herrmann <dh.herrmann@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#ifndef __PLATFORM_DATA_SIMPLEFB_H__
#define __PLATFORM_DATA_SIMPLEFB_H__
#include <drm/drm_fourcc.h>
#include <linux/fb.h>
#include <linux/kernel.h>
/* format array, use it to initialize a "struct simplefb_format" array */
#define SIMPLEFB_FORMATS \
{ \
{ "r5g6b5", 16, {11, 5}, {5, 6}, {0, 5}, {0, 0}, DRM_FORMAT_RGB565 }, \
{ "x1r5g5b5", 16, {10, 5}, {5, 5}, {0, 5}, {0, 0}, DRM_FORMAT_XRGB1555 }, \
{ "a1r5g5b5", 16, {10, 5}, {5, 5}, {0, 5}, {15, 1}, DRM_FORMAT_ARGB1555 }, \
{ "r8g8b8", 24, {16, 8}, {8, 8}, {0, 8}, {0, 0}, DRM_FORMAT_RGB888 }, \
{ "x8r8g8b8", 32, {16, 8}, {8, 8}, {0, 8}, {0, 0}, DRM_FORMAT_XRGB8888 }, \
{ "a8r8g8b8", 32, {16, 8}, {8, 8}, {0, 8}, {24, 8}, DRM_FORMAT_ARGB8888 }, \
{ "x2r10g10b10", 32, {20, 10}, {10, 10}, {0, 10}, {0, 0}, DRM_FORMAT_XRGB2101010 }, \
{ "a2r10g10b10", 32, {20, 10}, {10, 10}, {0, 10}, {30, 2}, DRM_FORMAT_ARGB2101010 }, \
}
/*
* Data-Format for Simple-Framebuffers
* @name: unique 0-terminated name that can be used to identify the mode
* @red,green,blue: Offsets and sizes of the single RGB parts
* @transp: Offset and size of the alpha bits. length=0 means no alpha
* @fourcc: 32bit DRM four-CC code (see drm_fourcc.h)
*/
struct simplefb_format {
const char *name;
u32 bits_per_pixel;
struct fb_bitfield red;
struct fb_bitfield green;
struct fb_bitfield blue;
struct fb_bitfield transp;
u32 fourcc;
};
/*
* Simple-Framebuffer description
* If the arch-boot code creates simple-framebuffers without DT support, it
* can pass the width, height, stride and format via this platform-data object.
* The framebuffer location must be given as IORESOURCE_MEM resource.
* @format must be a format as described in "struct simplefb_format" above.
*/
struct simplefb_platform_data {
u32 width;
u32 height;
u32 stride;
const char *format;
};
#endif /* __PLATFORM_DATA_SIMPLEFB_H__ */

View file

@ -0,0 +1,24 @@
/*
* STMicroelectronics sensors platform-data driver
*
* Copyright 2013 STMicroelectronics Inc.
*
* Denis Ciocca <denis.ciocca@st.com>
*
* Licensed under the GPL-2.
*/
#ifndef ST_SENSORS_PDATA_H
#define ST_SENSORS_PDATA_H
/**
* struct st_sensors_platform_data - Platform data for the ST sensors
* @drdy_int_pin: Redirect DRDY on pin 1 (1) or pin 2 (2).
* Available only for accelerometer and pressure sensors.
* Accelerometer DRDY on LSM330 available only on pin 1 (see datasheet).
*/
struct st_sensors_platform_data {
u8 drdy_int_pin;
};
#endif /* ST_SENSORS_PDATA_H */

View file

@ -1,32 +0,0 @@
/*
* Copyright (C) 2010 Google, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef _TEGRA_USB_H_
#define _TEGRA_USB_H_
enum tegra_usb_operating_modes {
TEGRA_USB_DEVICE,
TEGRA_USB_HOST,
TEGRA_USB_OTG,
};
struct tegra_ehci_platform_data {
enum tegra_usb_operating_modes operating_mode;
/* power down the phy on bus suspend */
int power_down_on_bus_suspend;
void *phy_config;
int vbus_gpio;
};
#endif /* _TEGRA_USB_H_ */

View file

@ -80,7 +80,7 @@ struct pps_device {
* Global variables
*/
extern struct device_attribute pps_attrs[];
extern const struct attribute_group *pps_groups[];
/*
* Internal functions.

View file

@ -0,0 +1,122 @@
#ifndef LINUX_PREEMPT_MASK_H
#define LINUX_PREEMPT_MASK_H
#include <linux/preempt.h>
#include <asm/hardirq.h>
/*
* We put the hardirq and softirq counter into the preemption
* counter. The bitmask has the following meaning:
*
* - bits 0-7 are the preemption count (max preemption depth: 256)
* - bits 8-15 are the softirq count (max # of softirqs: 256)
*
* The hardirq count can in theory reach the same as NR_IRQS.
* In reality, the number of nested IRQS is limited to the stack
* size as well. For archs with over 1000 IRQS it is not practical
* to expect that they will all nest. We give a max of 10 bits for
* hardirq nesting. An arch may choose to give less than 10 bits.
* m68k expects it to be 8.
*
* - bits 16-25 are the hardirq count (max # of nested hardirqs: 1024)
* - bit 26 is the NMI_MASK
* - bit 27 is the PREEMPT_ACTIVE flag
*
* PREEMPT_MASK: 0x000000ff
* SOFTIRQ_MASK: 0x0000ff00
* HARDIRQ_MASK: 0x03ff0000
* NMI_MASK: 0x04000000
*/
#define PREEMPT_BITS 8
#define SOFTIRQ_BITS 8
#define NMI_BITS 1
#define MAX_HARDIRQ_BITS 10
#ifndef HARDIRQ_BITS
# define HARDIRQ_BITS MAX_HARDIRQ_BITS
#endif
#if HARDIRQ_BITS > MAX_HARDIRQ_BITS
#error HARDIRQ_BITS too high!
#endif
#define PREEMPT_SHIFT 0
#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
#define NMI_SHIFT (HARDIRQ_SHIFT + HARDIRQ_BITS)
#define __IRQ_MASK(x) ((1UL << (x))-1)
#define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
#define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
#define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
#define NMI_MASK (__IRQ_MASK(NMI_BITS) << NMI_SHIFT)
#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
#define NMI_OFFSET (1UL << NMI_SHIFT)
#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
#ifndef PREEMPT_ACTIVE
#define PREEMPT_ACTIVE_BITS 1
#define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS)
#define PREEMPT_ACTIVE (__IRQ_MASK(PREEMPT_ACTIVE_BITS) << PREEMPT_ACTIVE_SHIFT)
#endif
#if PREEMPT_ACTIVE < (1 << (NMI_SHIFT + NMI_BITS))
#error PREEMPT_ACTIVE is too low!
#endif
#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
| NMI_MASK))
/*
* Are we doing bottom half or hardware interrupt processing?
* Are we in a softirq context? Interrupt context?
* in_softirq - Are we currently processing softirq or have bh disabled?
* in_serving_softirq - Are we currently processing softirq?
*/
#define in_irq() (hardirq_count())
#define in_softirq() (softirq_count())
#define in_interrupt() (irq_count())
#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
/*
* Are we in NMI context?
*/
#define in_nmi() (preempt_count() & NMI_MASK)
#if defined(CONFIG_PREEMPT_COUNT)
# define PREEMPT_CHECK_OFFSET 1
#else
# define PREEMPT_CHECK_OFFSET 0
#endif
/*
* Are we running in atomic context? WARNING: this macro cannot
* always detect atomic context; in particular, it cannot know about
* held spinlocks in non-preemptible kernels. Thus it should not be
* used in the general case to determine whether sleeping is possible.
* Do not use in_atomic() in driver code.
*/
#define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != 0)
/*
* Check whether we were atomic before we did preempt_disable():
* (used by the scheduler, *after* releasing the kernel lock)
*/
#define in_atomic_preempt_off() \
((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_CHECK_OFFSET)
#ifdef CONFIG_PREEMPT_COUNT
# define preemptible() (preempt_count() == 0 && !irqs_disabled())
#else
# define preemptible() 0
#endif
#endif /* LINUX_PREEMPT_MASK_H */

View file

@ -200,7 +200,7 @@ static inline void show_regs_print_info(const char *log_lvl)
}
#endif
extern void dump_stack(void) __cold;
extern asmlinkage void dump_stack(void) __cold;
#ifndef pr_fmt
#define pr_fmt(fmt) fmt

View file

@ -55,14 +55,14 @@ struct pstore_info {
int (*close)(struct pstore_info *psi);
ssize_t (*read)(u64 *id, enum pstore_type_id *type,
int *count, struct timespec *time, char **buf,
struct pstore_info *psi);
bool *compressed, struct pstore_info *psi);
int (*write)(enum pstore_type_id type,
enum kmsg_dump_reason reason, u64 *id,
unsigned int part, int count, size_t hsize,
unsigned int part, int count, bool compressed,
size_t size, struct pstore_info *psi);
int (*write_buf)(enum pstore_type_id type,
enum kmsg_dump_reason reason, u64 *id,
unsigned int part, const char *buf, size_t hsize,
unsigned int part, const char *buf, bool compressed,
size_t size, struct pstore_info *psi);
int (*erase)(enum pstore_type_id type, u64 id,
int count, struct timespec time,

View file

@ -267,8 +267,9 @@ static inline void list_splice_init_rcu(struct list_head *list,
*/
#define list_first_or_null_rcu(ptr, type, member) \
({struct list_head *__ptr = (ptr); \
struct list_head __rcu *__next = list_next_rcu(__ptr); \
likely(__ptr != __next) ? container_of(__next, type, member) : NULL; \
struct list_head *__next = ACCESS_ONCE(__ptr->next); \
likely(__ptr != __next) ? \
list_entry_rcu(__next, type, member) : NULL; \
})
/**

View file

@ -52,7 +52,7 @@ extern int rcutorture_runnable; /* for sysctl */
#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
extern void rcutorture_record_test_transition(void);
extern void rcutorture_record_progress(unsigned long vernum);
extern void do_trace_rcu_torture_read(char *rcutorturename,
extern void do_trace_rcu_torture_read(const char *rcutorturename,
struct rcu_head *rhp,
unsigned long secs,
unsigned long c_old,
@ -65,7 +65,7 @@ static inline void rcutorture_record_progress(unsigned long vernum)
{
}
#ifdef CONFIG_RCU_TRACE
extern void do_trace_rcu_torture_read(char *rcutorturename,
extern void do_trace_rcu_torture_read(const char *rcutorturename,
struct rcu_head *rhp,
unsigned long secs,
unsigned long c_old,
@ -229,13 +229,9 @@ extern void rcu_irq_exit(void);
#ifdef CONFIG_RCU_USER_QS
extern void rcu_user_enter(void);
extern void rcu_user_exit(void);
extern void rcu_user_enter_after_irq(void);
extern void rcu_user_exit_after_irq(void);
#else
static inline void rcu_user_enter(void) { }
static inline void rcu_user_exit(void) { }
static inline void rcu_user_enter_after_irq(void) { }
static inline void rcu_user_exit_after_irq(void) { }
static inline void rcu_user_hooks_switch(struct task_struct *prev,
struct task_struct *next) { }
#endif /* CONFIG_RCU_USER_QS */
@ -1015,4 +1011,22 @@ static inline bool rcu_is_nocb_cpu(int cpu) { return false; }
#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
/* Only for use by adaptive-ticks code. */
#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
extern bool rcu_sys_is_idle(void);
extern void rcu_sysidle_force_exit(void);
#else /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
static inline bool rcu_sys_is_idle(void)
{
return false;
}
static inline void rcu_sysidle_force_exit(void)
{
}
#endif /* #else #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
#endif /* __LINUX_RCUPDATE_H */

View file

@ -15,6 +15,8 @@
#include <linux/list.h>
#include <linux/rbtree.h>
#include <linux/err.h>
#include <linux/bug.h>
struct module;
struct device;
@ -470,6 +472,9 @@ struct regmap_irq {
* @ack_base: Base ack address. If zero then the chip is clear on read.
* @wake_base: Base address for wake enables. If zero unsupported.
* @irq_reg_stride: Stride to use for chips where registers are not contiguous.
* @init_ack_masked: Ack all masked interrupts once during initalization.
* @mask_invert: Inverted mask register: cleared bits are masked out.
* @wake_invert: Inverted wake register: cleared bits are wake enabled.
* @runtime_pm: Hold a runtime PM lock on the device when accessing it.
*
* @num_regs: Number of registers in each control bank.
@ -485,9 +490,10 @@ struct regmap_irq_chip {
unsigned int ack_base;
unsigned int wake_base;
unsigned int irq_reg_stride;
unsigned int mask_invert;
unsigned int wake_invert;
bool runtime_pm;
bool init_ack_masked:1;
bool mask_invert:1;
bool wake_invert:1;
bool runtime_pm:1;
int num_regs;

View file

@ -137,6 +137,12 @@ struct regulator *__must_check devm_regulator_get(struct device *dev,
const char *id);
struct regulator *__must_check regulator_get_exclusive(struct device *dev,
const char *id);
struct regulator *__must_check devm_regulator_get_exclusive(struct device *dev,
const char *id);
struct regulator *__must_check regulator_get_optional(struct device *dev,
const char *id);
struct regulator *__must_check devm_regulator_get_optional(struct device *dev,
const char *id);
void regulator_put(struct regulator *regulator);
void devm_regulator_put(struct regulator *regulator);
@ -217,6 +223,25 @@ devm_regulator_get(struct device *dev, const char *id)
return NULL;
}
static inline struct regulator *__must_check
regulator_get_exclusive(struct device *dev, const char *id)
{
return NULL;
}
static inline struct regulator *__must_check
regulator_get_optional(struct device *dev, const char *id)
{
return NULL;
}
static inline struct regulator *__must_check
devm_regulator_get_optional(struct device *dev, const char *id)
{
return NULL;
}
static inline void regulator_put(struct regulator *regulator)
{
}
@ -369,8 +394,11 @@ static inline int regulator_count_voltages(struct regulator *regulator)
static inline int regulator_set_voltage_tol(struct regulator *regulator,
int new_uV, int tol_uV)
{
return regulator_set_voltage(regulator,
new_uV - tol_uV, new_uV + tol_uV);
if (regulator_set_voltage(regulator, new_uV, new_uV + tol_uV) == 0)
return 0;
else
return regulator_set_voltage(regulator,
new_uV - tol_uV, new_uV + tol_uV);
}
static inline int regulator_is_supported_voltage_tol(struct regulator *regulator,

View file

@ -39,6 +39,24 @@ enum regulator_status {
REGULATOR_STATUS_UNDEFINED,
};
/**
* Specify a range of voltages for regulator_map_linar_range() and
* regulator_list_linear_range().
*
* @min_uV: Lowest voltage in range
* @max_uV: Highest voltage in range
* @min_sel: Lowest selector for range
* @max_sel: Highest selector for range
* @uV_step: Step size
*/
struct regulator_linear_range {
unsigned int min_uV;
unsigned int max_uV;
unsigned int min_sel;
unsigned int max_sel;
unsigned int uV_step;
};
/**
* struct regulator_ops - regulator operations.
*
@ -223,6 +241,9 @@ struct regulator_desc {
unsigned int linear_min_sel;
unsigned int ramp_delay;
const struct regulator_linear_range *linear_ranges;
int n_linear_ranges;
const unsigned int *volt_table;
unsigned int vsel_reg;
@ -326,10 +347,14 @@ int regulator_mode_to_status(unsigned int);
int regulator_list_voltage_linear(struct regulator_dev *rdev,
unsigned int selector);
int regulator_list_voltage_linear_range(struct regulator_dev *rdev,
unsigned int selector);
int regulator_list_voltage_table(struct regulator_dev *rdev,
unsigned int selector);
int regulator_map_voltage_linear(struct regulator_dev *rdev,
int min_uV, int max_uV);
int regulator_map_voltage_linear_range(struct regulator_dev *rdev,
int min_uV, int max_uV);
int regulator_map_voltage_iterate(struct regulator_dev *rdev,
int min_uV, int max_uV);
int regulator_map_voltage_ascend(struct regulator_dev *rdev,

View file

@ -11,6 +11,7 @@
*/
#ifndef __FAN53555_H__
#define __FAN53555_H__
/* VSEL ID */
enum {

View file

@ -134,6 +134,7 @@ struct regulation_constraints {
unsigned always_on:1; /* regulator never off when system is on */
unsigned boot_on:1; /* bootloader/firmware enabled regulator */
unsigned apply_uV:1; /* apply uV constraint if min == max */
unsigned ramp_disable:1; /* disable ramp delay */
};
/**

View file

@ -39,7 +39,7 @@ enum {
*/
struct max8660_subdev_data {
int id;
char *name;
const char *name;
struct regulator_init_data *platform_data;
};

View file

@ -0,0 +1,44 @@
/*
* Copyright (C) 2011-2013 Freescale Semiconductor, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#ifndef __LINUX_REG_PFUZE100_H
#define __LINUX_REG_PFUZE100_H
#define PFUZE100_SW1AB 0
#define PFUZE100_SW1C 1
#define PFUZE100_SW2 2
#define PFUZE100_SW3A 3
#define PFUZE100_SW3B 4
#define PFUZE100_SW4 5
#define PFUZE100_SWBST 6
#define PFUZE100_VSNVS 7
#define PFUZE100_VREFDDR 8
#define PFUZE100_VGEN1 9
#define PFUZE100_VGEN2 10
#define PFUZE100_VGEN3 11
#define PFUZE100_VGEN4 12
#define PFUZE100_VGEN5 13
#define PFUZE100_VGEN6 14
#define PFUZE100_MAX_REGULATOR 15
struct regulator_init_data;
struct pfuze_regulator_platform_data {
struct regulator_init_data *init_data[PFUZE100_MAX_REGULATOR];
};
#endif /* __LINUX_REG_PFUZE100_H */

View file

@ -1034,6 +1034,9 @@ struct task_struct {
#ifdef CONFIG_SMP
struct llist_node wake_entry;
int on_cpu;
struct task_struct *last_wakee;
unsigned long wakee_flips;
unsigned long wakee_flip_decay_ts;
#endif
int on_rq;
@ -1532,6 +1535,8 @@ static inline pid_t task_pgrp_nr(struct task_struct *tsk)
* Test if a process is not yet dead (at most zombie state)
* If pid_alive fails, then pointers within the task structure
* can be stale and must not be dereferenced.
*
* Return: 1 if the process is alive. 0 otherwise.
*/
static inline int pid_alive(struct task_struct *p)
{
@ -1543,6 +1548,8 @@ static inline int pid_alive(struct task_struct *p)
* @tsk: Task structure to be checked.
*
* Check if a task structure is the first user space task the kernel created.
*
* Return: 1 if the task structure is init. 0 otherwise.
*/
static inline int is_global_init(struct task_struct *tsk)
{
@ -1894,6 +1901,8 @@ extern struct task_struct *idle_task(int cpu);
/**
* is_idle_task - is the specified task an idle task?
* @p: the task in question.
*
* Return: 1 if @p is an idle task. 0 otherwise.
*/
static inline bool is_idle_task(const struct task_struct *p)
{

View file

@ -74,7 +74,7 @@ struct spi_device {
struct spi_master *master;
u32 max_speed_hz;
u8 chip_select;
u8 mode;
u16 mode;
#define SPI_CPHA 0x01 /* clock phase */
#define SPI_CPOL 0x02 /* clock polarity */
#define SPI_MODE_0 (0|0) /* (original MicroWire) */
@ -87,6 +87,10 @@ struct spi_device {
#define SPI_LOOP 0x20 /* loopback mode */
#define SPI_NO_CS 0x40 /* 1 dev/bus, no chipselect */
#define SPI_READY 0x80 /* slave pulls low to pause */
#define SPI_TX_DUAL 0x100 /* transmit with 2 wires */
#define SPI_TX_QUAD 0x200 /* transmit with 4 wires */
#define SPI_RX_DUAL 0x400 /* receive with 2 wires */
#define SPI_RX_QUAD 0x800 /* receive with 4 wires */
u8 bits_per_word;
int irq;
void *controller_state;
@ -233,6 +237,8 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* suported. If set, the SPI core will reject any transfer with an
* unsupported bits_per_word. If not set, this value is simply ignored,
* and it's up to the individual driver to perform any validation.
* @min_speed_hz: Lowest supported transfer speed
* @max_speed_hz: Highest supported transfer speed
* @flags: other constraints relevant to this driver
* @bus_lock_spinlock: spinlock for SPI bus locking
* @bus_lock_mutex: mutex for SPI bus locking
@ -254,6 +260,9 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* @busy: message pump is busy
* @running: message pump is running
* @rt: whether this queue is set to run as a realtime task
* @auto_runtime_pm: the core should ensure a runtime PM reference is held
* while the hardware is prepared, using the parent
* device for the spidev
* @prepare_transfer_hardware: a message will soon arrive from the queue
* so the subsystem requests the driver to prepare the transfer hardware
* by issuing this call
@ -309,9 +318,13 @@ struct spi_master {
/* bitmask of supported bits_per_word for transfers */
u32 bits_per_word_mask;
#define SPI_BPW_MASK(bits) BIT((bits) - 1)
#define SPI_BIT_MASK(bits) (((bits) == 32) ? ~0UL : (BIT(bits) - 1))
#define SPI_BIT_MASK(bits) (((bits) == 32) ? ~0U : (BIT(bits) - 1))
#define SPI_BPW_RANGE_MASK(min, max) (SPI_BIT_MASK(max) - SPI_BIT_MASK(min - 1))
/* limits on transfer speed */
u32 min_speed_hz;
u32 max_speed_hz;
/* other constraints relevant to this driver */
u16 flags;
#define SPI_MASTER_HALF_DUPLEX BIT(0) /* can't do full duplex */
@ -374,11 +387,13 @@ struct spi_master {
bool busy;
bool running;
bool rt;
bool auto_runtime_pm;
int (*prepare_transfer_hardware)(struct spi_master *master);
int (*transfer_one_message)(struct spi_master *master,
struct spi_message *mesg);
int (*unprepare_transfer_hardware)(struct spi_master *master);
/* gpio chip select */
int *cs_gpios;
};
@ -448,6 +463,10 @@ extern struct spi_master *spi_busnum_to_master(u16 busnum);
* @rx_buf: data to be read (dma-safe memory), or NULL
* @tx_dma: DMA address of tx_buf, if @spi_message.is_dma_mapped
* @rx_dma: DMA address of rx_buf, if @spi_message.is_dma_mapped
* @tx_nbits: number of bits used for writting. If 0 the default
* (SPI_NBITS_SINGLE) is used.
* @rx_nbits: number of bits used for reading. If 0 the default
* (SPI_NBITS_SINGLE) is used.
* @len: size of rx and tx buffers (in bytes)
* @speed_hz: Select a speed other than the device default for this
* transfer. If 0 the default (from @spi_device) is used.
@ -502,6 +521,11 @@ extern struct spi_master *spi_busnum_to_master(u16 busnum);
* by the results of previous messages and where the whole transaction
* ends when the chipselect goes intactive.
*
* When SPI can transfer in 1x,2x or 4x. It can get this tranfer information
* from device through @tx_nbits and @rx_nbits. In Bi-direction, these
* two should both be set. User can set transfer mode with SPI_NBITS_SINGLE(1x)
* SPI_NBITS_DUAL(2x) and SPI_NBITS_QUAD(4x) to support these three transfer.
*
* The code that submits an spi_message (and its spi_transfers)
* to the lower layers is responsible for managing its memory.
* Zero-initialize every field you don't set up explicitly, to
@ -522,6 +546,11 @@ struct spi_transfer {
dma_addr_t rx_dma;
unsigned cs_change:1;
u8 tx_nbits;
u8 rx_nbits;
#define SPI_NBITS_SINGLE 0x01 /* 1bit transfer */
#define SPI_NBITS_DUAL 0x02 /* 2bits transfer */
#define SPI_NBITS_QUAD 0x04 /* 4bits transfer */
u8 bits_per_word;
u16 delay_usecs;
u32 speed_hz;
@ -578,6 +607,7 @@ struct spi_message {
/* completion is reported through a callback */
void (*complete)(void *context);
void *context;
unsigned frame_length;
unsigned actual_length;
int status;
@ -869,7 +899,7 @@ struct spi_board_info {
/* mode becomes spi_device.mode, and is essential for chips
* where the default of SPI_CS_HIGH = 0 is wrong.
*/
u8 mode;
u16 mode;
/* ... may need additional spi_device chip config data here.
* avoid stuff protocol drivers can set; but include stuff

View file

@ -4,11 +4,7 @@
#include <linux/workqueue.h>
struct spi_bitbang {
struct workqueue_struct *workqueue;
struct work_struct work;
spinlock_t lock;
struct list_head queue;
u8 busy;
u8 use_dma;
u8 flags; /* extra spi->mode support */
@ -41,7 +37,6 @@ struct spi_bitbang {
*/
extern int spi_bitbang_setup(struct spi_device *spi);
extern void spi_bitbang_cleanup(struct spi_device *spi);
extern int spi_bitbang_transfer(struct spi_device *spi, struct spi_message *m);
extern int spi_bitbang_setup_transfer(struct spi_device *spi,
struct spi_transfer *t);

View file

@ -117,9 +117,17 @@ do { \
#endif /*arch_spin_is_contended*/
#endif
/* The lock does not imply full memory barrier. */
#ifndef ARCH_HAS_SMP_MB_AFTER_LOCK
static inline void smp_mb__after_lock(void) { smp_mb(); }
/*
* Despite its name it doesn't necessarily has to be a full barrier.
* It should only guarantee that a STORE before the critical section
* can not be reordered with a LOAD inside this section.
* spin_lock() is the one-way barrier, this LOAD can not escape out
* of the region. So the default implementation simply ensures that
* a STORE can not move into the critical section, smp_wmb() should
* serialize it with another STORE done by spin_lock().
*/
#ifndef smp_mb__before_spinlock
#define smp_mb__before_spinlock() smp_wmb()
#endif
/**

View file

@ -121,6 +121,7 @@ struct rpc_task_setup {
#define RPC_TASK_SOFTCONN 0x0400 /* Fail if can't connect */
#define RPC_TASK_SENT 0x0800 /* message was sent */
#define RPC_TASK_TIMEOUT 0x1000 /* fail with ETIMEDOUT on timeout */
#define RPC_TASK_NOCONNECT 0x2000 /* return ENOTCONN if not connected */
#define RPC_IS_ASYNC(t) ((t)->tk_flags & RPC_TASK_ASYNC)
#define RPC_IS_SWAPPER(t) ((t)->tk_flags & RPC_TASK_SWAPPER)

View file

@ -67,6 +67,8 @@ static inline swp_entry_t pte_to_swp_entry(pte_t pte)
swp_entry_t arch_entry;
BUG_ON(pte_file(pte));
if (pte_swp_soft_dirty(pte))
pte = pte_swp_clear_soft_dirty(pte);
arch_entry = __pte_to_swp_entry(pte);
return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
}

View file

@ -802,9 +802,14 @@ asmlinkage long sys_vfork(void);
asmlinkage long sys_clone(unsigned long, unsigned long, int __user *, int,
int __user *);
#else
#ifdef CONFIG_CLONE_BACKWARDS3
asmlinkage long sys_clone(unsigned long, unsigned long, int, int __user *,
int __user *, int);
#else
asmlinkage long sys_clone(unsigned long, unsigned long, int __user *,
int __user *, int);
#endif
#endif
asmlinkage long sys_execve(const char __user *filename,
const char __user *const __user *argv,

View file

@ -51,9 +51,9 @@ do { \
static struct lock_class_key __key; \
\
(attr)->key = &__key; \
} while(0)
} while (0)
#else
#define sysfs_attr_init(attr) do {} while(0)
#define sysfs_attr_init(attr) do {} while (0)
#endif
struct attribute_group {
@ -69,7 +69,7 @@ struct attribute_group {
* for examples..
*/
#define __ATTR(_name,_mode,_show,_store) { \
#define __ATTR(_name, _mode, _show, _store) { \
.attr = {.name = __stringify(_name), .mode = _mode }, \
.show = _show, \
.store = _store, \
@ -80,6 +80,11 @@ struct attribute_group {
.show = _name##_show, \
}
#define __ATTR_WO(_name) { \
.attr = { .name = __stringify(_name), .mode = S_IWUSR }, \
.store = _name##_store, \
}
#define __ATTR_RW(_name) __ATTR(_name, (S_IWUSR | S_IRUGO), \
_name##_show, _name##_store)
@ -108,8 +113,6 @@ static const struct attribute_group _name##_group = { \
}; \
__ATTRIBUTE_GROUPS(_name)
#define attr_name(_attr) (_attr).attr.name
struct file;
struct vm_area_struct;
@ -119,7 +122,7 @@ struct bin_attribute {
void *private;
ssize_t (*read)(struct file *, struct kobject *, struct bin_attribute *,
char *, loff_t, size_t);
ssize_t (*write)(struct file *,struct kobject *, struct bin_attribute *,
ssize_t (*write)(struct file *, struct kobject *, struct bin_attribute *,
char *, loff_t, size_t);
int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr,
struct vm_area_struct *vma);
@ -153,7 +156,7 @@ struct bin_attribute {
#define __BIN_ATTR_RW(_name, _size) __BIN_ATTR(_name, \
(S_IWUSR | S_IRUGO), _name##_read, \
_name##_write)
_name##_write, _size)
#define __BIN_ATTR_NULL __ATTR_NULL
@ -168,8 +171,8 @@ struct bin_attribute bin_attr_##_name = __BIN_ATTR_RO(_name, _size)
struct bin_attribute bin_attr_##_name = __BIN_ATTR_RW(_name, _size)
struct sysfs_ops {
ssize_t (*show)(struct kobject *, struct attribute *,char *);
ssize_t (*store)(struct kobject *,struct attribute *,const char *, size_t);
ssize_t (*show)(struct kobject *, struct attribute *, char *);
ssize_t (*store)(struct kobject *, struct attribute *, const char *, size_t);
const void *(*namespace)(struct kobject *, const struct attribute *);
};
@ -215,10 +218,14 @@ void sysfs_delete_link(struct kobject *dir, struct kobject *targ,
int __must_check sysfs_create_group(struct kobject *kobj,
const struct attribute_group *grp);
int __must_check sysfs_create_groups(struct kobject *kobj,
const struct attribute_group **groups);
int sysfs_update_group(struct kobject *kobj,
const struct attribute_group *grp);
void sysfs_remove_group(struct kobject *kobj,
const struct attribute_group *grp);
void sysfs_remove_groups(struct kobject *kobj,
const struct attribute_group **groups);
int sysfs_add_file_to_group(struct kobject *kobj,
const struct attribute *attr, const char *group);
void sysfs_remove_file_from_group(struct kobject *kobj,
@ -343,6 +350,12 @@ static inline int sysfs_create_group(struct kobject *kobj,
return 0;
}
static inline int sysfs_create_groups(struct kobject *kobj,
const struct attribute_group **groups)
{
return 0;
}
static inline int sysfs_update_group(struct kobject *kobj,
const struct attribute_group *grp)
{
@ -354,6 +367,11 @@ static inline void sysfs_remove_group(struct kobject *kobj,
{
}
static inline void sysfs_remove_groups(struct kobject *kobj,
const struct attribute_group **groups)
{
}
static inline int sysfs_add_file_to_group(struct kobject *kobj,
const struct attribute *attr, const char *group)
{

View file

@ -10,6 +10,8 @@
#include <linux/irqflags.h>
#include <linux/percpu.h>
#include <linux/hrtimer.h>
#include <linux/context_tracking_state.h>
#include <linux/cpumask.h>
#ifdef CONFIG_GENERIC_CLOCKEVENTS
@ -158,20 +160,51 @@ static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; }
# endif /* !CONFIG_NO_HZ_COMMON */
#ifdef CONFIG_NO_HZ_FULL
extern bool tick_nohz_full_running;
extern cpumask_var_t tick_nohz_full_mask;
static inline bool tick_nohz_full_enabled(void)
{
if (!static_key_false(&context_tracking_enabled))
return false;
return tick_nohz_full_running;
}
static inline bool tick_nohz_full_cpu(int cpu)
{
if (!tick_nohz_full_enabled())
return false;
return cpumask_test_cpu(cpu, tick_nohz_full_mask);
}
extern void tick_nohz_init(void);
extern int tick_nohz_full_cpu(int cpu);
extern void tick_nohz_full_check(void);
extern void __tick_nohz_full_check(void);
extern void tick_nohz_full_kick(void);
extern void tick_nohz_full_kick_all(void);
extern void tick_nohz_task_switch(struct task_struct *tsk);
extern void __tick_nohz_task_switch(struct task_struct *tsk);
#else
static inline void tick_nohz_init(void) { }
static inline int tick_nohz_full_cpu(int cpu) { return 0; }
static inline void tick_nohz_full_check(void) { }
static inline bool tick_nohz_full_enabled(void) { return false; }
static inline bool tick_nohz_full_cpu(int cpu) { return false; }
static inline void __tick_nohz_full_check(void) { }
static inline void tick_nohz_full_kick(void) { }
static inline void tick_nohz_full_kick_all(void) { }
static inline void tick_nohz_task_switch(struct task_struct *tsk) { }
static inline void __tick_nohz_task_switch(struct task_struct *tsk) { }
#endif
static inline void tick_nohz_full_check(void)
{
if (tick_nohz_full_enabled())
__tick_nohz_full_check();
}
static inline void tick_nohz_task_switch(struct task_struct *tsk)
{
if (tick_nohz_full_enabled())
__tick_nohz_task_switch(tsk);
}
#endif

View file

@ -10,6 +10,8 @@
#include <linux/mutex.h>
#include <linux/tty_flags.h>
#include <uapi/linux/tty.h>
#include <linux/rwsem.h>
#include <linux/llist.h>
@ -29,9 +31,10 @@
#define __DISABLED_CHAR '\0'
struct tty_buffer {
struct tty_buffer *next;
char *char_buf_ptr;
unsigned char *flag_buf_ptr;
union {
struct tty_buffer *next;
struct llist_node free;
};
int used;
int size;
int commit;
@ -40,25 +43,25 @@ struct tty_buffer {
unsigned long data[0];
};
/*
* We default to dicing tty buffer allocations to this many characters
* in order to avoid multiple page allocations. We know the size of
* tty_buffer itself but it must also be taken into account that the
* the buffer is 256 byte aligned. See tty_buffer_find for the allocation
* logic this must match
*/
#define TTY_BUFFER_PAGE (((PAGE_SIZE - sizeof(struct tty_buffer)) / 2) & ~0xFF)
static inline unsigned char *char_buf_ptr(struct tty_buffer *b, int ofs)
{
return ((unsigned char *)b->data) + ofs;
}
static inline char *flag_buf_ptr(struct tty_buffer *b, int ofs)
{
return (char *)char_buf_ptr(b, ofs) + b->size;
}
struct tty_bufhead {
struct work_struct work;
spinlock_t lock;
struct tty_buffer *head; /* Queue head */
struct work_struct work;
struct mutex lock;
atomic_t priority;
struct tty_buffer sentinel;
struct llist_head free; /* Free queue head */
atomic_t memory_used; /* In-use buffers excluding free list */
struct tty_buffer *tail; /* Active buffer */
struct tty_buffer *free; /* Free queue head */
int memory_used; /* Buffer space used excluding
free queue */
};
/*
* When a break, frame error, or parity error happens, these codes are
@ -199,9 +202,6 @@ struct tty_port {
wait_queue_head_t close_wait; /* Close waiters */
wait_queue_head_t delta_msr_wait; /* Modem status change */
unsigned long flags; /* TTY flags ASY_*/
unsigned long iflags; /* TTYP_ internal flags */
#define TTYP_FLUSHING 1 /* Flushing to ldisc in progress */
#define TTYP_FLUSHPENDING 2 /* Queued buffer flush pending */
unsigned char console:1, /* port is a console */
low_latency:1; /* direct buffer flush */
struct mutex mutex; /* Locking */
@ -238,14 +238,16 @@ struct tty_struct {
int index;
/* Protects ldisc changes: Lock tty not pty */
struct mutex ldisc_mutex;
struct ld_semaphore ldisc_sem;
struct tty_ldisc *ldisc;
struct mutex atomic_write_lock;
struct mutex legacy_mutex;
struct mutex termios_mutex;
struct mutex throttle_mutex;
struct rw_semaphore termios_rwsem;
struct mutex winsize_mutex;
spinlock_t ctrl_lock;
/* Termios values are protected by the termios mutex */
/* Termios values are protected by the termios rwsem */
struct ktermios termios, termios_locked;
struct termiox *termiox; /* May be NULL for unsupported */
char name[64];
@ -253,7 +255,7 @@ struct tty_struct {
struct pid *session;
unsigned long flags;
int count;
struct winsize winsize; /* termios mutex */
struct winsize winsize; /* winsize_mutex */
unsigned char stopped:1, hw_stopped:1, flow_stopped:1, packet:1;
unsigned char ctrl_status; /* ctrl_lock */
unsigned int receive_room; /* Bytes free for queue */
@ -303,10 +305,7 @@ struct tty_file_private {
#define TTY_EXCLUSIVE 3 /* Exclusive open mode */
#define TTY_DEBUG 4 /* Debugging */
#define TTY_DO_WRITE_WAKEUP 5 /* Call write_wakeup after queuing new */
#define TTY_PUSH 6 /* n_tty private */
#define TTY_CLOSING 7 /* ->close() in progress */
#define TTY_LDISC 9 /* Line discipline attached */
#define TTY_LDISC_CHANGING 10 /* Line discipline changing */
#define TTY_LDISC_OPEN 11 /* Line discipline is open */
#define TTY_PTY_LOCK 16 /* pty private */
#define TTY_NO_WRITE_SPLIT 17 /* Preserve write boundaries to driver */
@ -559,6 +558,19 @@ extern void tty_ldisc_init(struct tty_struct *tty);
extern void tty_ldisc_deinit(struct tty_struct *tty);
extern void tty_ldisc_begin(void);
static inline int tty_ldisc_receive_buf(struct tty_ldisc *ld, unsigned char *p,
char *f, int count)
{
if (ld->ops->receive_buf2)
count = ld->ops->receive_buf2(ld->tty, p, f, count);
else {
count = min_t(int, count, ld->tty->receive_room);
if (count)
ld->ops->receive_buf(ld->tty, p, f, count);
}
return count;
}
/* n_tty.c */
extern struct tty_ldisc_ops tty_ldisc_N_TTY;

View file

@ -1,6 +1,7 @@
#ifndef _LINUX_TTY_FLIP_H
#define _LINUX_TTY_FLIP_H
extern int tty_buffer_space_avail(struct tty_port *port);
extern int tty_buffer_request_room(struct tty_port *port, size_t size);
extern int tty_insert_flip_string_flags(struct tty_port *port,
const unsigned char *chars, const char *flags, size_t size);
@ -18,8 +19,8 @@ static inline int tty_insert_flip_char(struct tty_port *port,
{
struct tty_buffer *tb = port->buf.tail;
if (tb && tb->used < tb->size) {
tb->flag_buf_ptr[tb->used] = flag;
tb->char_buf_ptr[tb->used++] = ch;
*flag_buf_ptr(tb, tb->used) = flag;
*char_buf_ptr(tb, tb->used++) = ch;
return 1;
}
return tty_insert_flip_string_flags(port, &ch, &flag, 1);
@ -31,4 +32,7 @@ static inline int tty_insert_flip_string(struct tty_port *port,
return tty_insert_flip_string_fixed_flag(port, chars, TTY_NORMAL, size);
}
extern void tty_buffer_lock_exclusive(struct tty_port *port);
extern void tty_buffer_unlock_exclusive(struct tty_port *port);
#endif /* _LINUX_TTY_FLIP_H */

View file

@ -109,6 +109,17 @@
*
* Tells the discipline that the DCD pin has changed its status.
* Used exclusively by the N_PPS (Pulse-Per-Second) line discipline.
*
* int (*receive_buf2)(struct tty_struct *, const unsigned char *cp,
* char *fp, int count);
*
* This function is called by the low-level tty driver to send
* characters received by the hardware to the line discpline for
* processing. <cp> is a pointer to the buffer of input
* character received by the device. <fp> is a pointer to a
* pointer of flag bytes which indicate whether a character was
* received with a parity error, etc.
* If assigned, prefer this function for automatic flow control.
*/
#include <linux/fs.h>
@ -195,6 +206,8 @@ struct tty_ldisc_ops {
void (*write_wakeup)(struct tty_struct *);
void (*dcd_change)(struct tty_struct *, unsigned int);
void (*fasync)(struct tty_struct *tty, int on);
int (*receive_buf2)(struct tty_struct *, const unsigned char *cp,
char *fp, int count);
struct module *owner;
@ -203,8 +216,7 @@ struct tty_ldisc_ops {
struct tty_ldisc {
struct tty_ldisc_ops *ops;
atomic_t users;
wait_queue_head_t wq_idle;
struct tty_struct *tty;
};
#define TTY_LDISC_MAGIC 0x5403

View file

@ -337,6 +337,7 @@ struct usb_bus {
* the ep queue on a short transfer
* with the URB_SHORT_NOT_OK flag set.
*/
unsigned no_sg_constraint:1; /* no sg constraint */
unsigned sg_tablesize; /* 0 or largest number of sg list entries */
int devnum_next; /* Next open device number in
@ -684,6 +685,11 @@ static inline bool usb_device_supports_ltm(struct usb_device *udev)
return udev->bos->ss_cap->bmAttributes & USB_LTM_SUPPORT;
}
static inline bool usb_device_no_sg_constraint(struct usb_device *udev)
{
return udev && udev->bus && udev->bus->no_sg_constraint;
}
/*-------------------------------------------------------------------------*/
@ -708,7 +714,10 @@ extern int usb_driver_claim_interface(struct usb_driver *driver,
* usb_interface_claimed - returns true iff an interface is claimed
* @iface: the interface being checked
*
* Returns true (nonzero) iff the interface is claimed, else false (zero).
* Return: %true (nonzero) iff the interface is claimed, else %false
* (zero).
*
* Note:
* Callers must own the driver model's usb bus readlock. So driver
* probe() entries don't need extra locking, but other call contexts
* may need to explicitly claim that lock.
@ -745,8 +754,9 @@ extern struct usb_host_interface *usb_find_alt_setting(
* @buf: where to put the string
* @size: how big is "buf"?
*
* Returns length of the string (> 0) or negative if size was too small.
* Return: Length of the string (> 0) or negative if size was too small.
*
* Note:
* This identifier is intended to be "stable", reflecting physical paths in
* hardware such as physical bus addresses for host controllers or ports on
* USB hubs. That makes it stay the same until systems are physically
@ -1247,7 +1257,9 @@ typedef void (*usb_complete_t)(struct urb *);
* the device driver is saying that it provided this DMA address,
* which the host controller driver should use in preference to the
* transfer_buffer.
* @sg: scatter gather buffer list
* @sg: scatter gather buffer list, the buffer size of each element in
* the list (except the last) must be divisible by the endpoint's
* max packet size if no_sg_constraint isn't set in 'struct usb_bus'
* @num_mapped_sgs: (internal) number of mapped sg entries
* @num_sgs: number of entries in the sg list
* @transfer_buffer_length: How big is transfer_buffer. The transfer may
@ -1534,10 +1546,16 @@ static inline void usb_fill_int_urb(struct urb *urb,
urb->transfer_buffer_length = buffer_length;
urb->complete = complete_fn;
urb->context = context;
if (dev->speed == USB_SPEED_HIGH || dev->speed == USB_SPEED_SUPER)
if (dev->speed == USB_SPEED_HIGH || dev->speed == USB_SPEED_SUPER) {
/* make sure interval is within allowed range */
interval = clamp(interval, 1, 16);
urb->interval = 1 << (interval - 1);
else
} else {
urb->interval = interval;
}
urb->start_frame = -1;
}
@ -1570,7 +1588,7 @@ extern int usb_anchor_empty(struct usb_anchor *anchor);
* usb_urb_dir_in - check if an URB describes an IN transfer
* @urb: URB to be checked
*
* Returns 1 if @urb describes an IN transfer (device-to-host),
* Return: 1 if @urb describes an IN transfer (device-to-host),
* otherwise 0.
*/
static inline int usb_urb_dir_in(struct urb *urb)
@ -1582,7 +1600,7 @@ static inline int usb_urb_dir_in(struct urb *urb)
* usb_urb_dir_out - check if an URB describes an OUT transfer
* @urb: URB to be checked
*
* Returns 1 if @urb describes an OUT transfer (host-to-device),
* Return: 1 if @urb describes an OUT transfer (host-to-device),
* otherwise 0.
*/
static inline int usb_urb_dir_out(struct urb *urb)

View file

@ -18,12 +18,17 @@ struct ci_hdrc_platform_data {
unsigned long flags;
#define CI_HDRC_REGS_SHARED BIT(0)
#define CI_HDRC_REQUIRE_TRANSCEIVER BIT(1)
#define CI_HDRC_PULLUP_ON_VBUS BIT(2)
#define CI_HDRC_DISABLE_STREAMING BIT(3)
/*
* Only set it when DCCPARAMS.DC==1 and DCCPARAMS.HC==1,
* but otg is not supported (no register otgsc).
*/
#define CI_HDRC_DUAL_ROLE_NOT_OTG BIT(4)
enum usb_dr_mode dr_mode;
#define CI_HDRC_CONTROLLER_RESET_EVENT 0
#define CI_HDRC_CONTROLLER_STOPPED_EVENT 1
void (*notify_event) (struct ci_hdrc *ci, unsigned event);
struct regulator *reg_vbus;
};
/* Default offset of capability registers */

View file

@ -1,30 +0,0 @@
/*
* Copyright (C) 2013 by Texas Instruments
*
* The Inventra Controller Driver for Linux is free software; you
* can redistribute it and/or modify it under the terms of the GNU
* General Public License version 2 as published by the Free Software
* Foundation.
*/
#ifndef __DWC3_OMAP_H__
#define __DWC3_OMAP_H__
enum omap_dwc3_vbus_id_status {
OMAP_DWC3_UNKNOWN = 0,
OMAP_DWC3_ID_GROUND,
OMAP_DWC3_ID_FLOAT,
OMAP_DWC3_VBUS_VALID,
OMAP_DWC3_VBUS_OFF,
};
#if (defined(CONFIG_USB_DWC3) || defined(CONFIG_USB_DWC3_MODULE))
extern int dwc3_omap_mailbox(enum omap_dwc3_vbus_id_status status);
#else
static inline int dwc3_omap_mailbox(enum omap_dwc3_vbus_id_status status)
{
return -ENODEV;
}
#endif
#endif /* __DWC3_OMAP_H__ */

View file

@ -22,6 +22,7 @@
#include <linux/slab.h>
#include <linux/scatterlist.h>
#include <linux/types.h>
#include <linux/workqueue.h>
#include <linux/usb/ch9.h>
struct usb_ep;
@ -475,6 +476,7 @@ struct usb_gadget_ops {
/**
* struct usb_gadget - represents a usb slave device
* @work: (internal use) Workqueue to be used for sysfs_notify()
* @ops: Function pointers used to access hardware-specific operations.
* @ep0: Endpoint zero, used when reading or writing responses to
* driver setup() requests
@ -520,6 +522,7 @@ struct usb_gadget_ops {
* device is acting as a B-Peripheral (so is_a_peripheral is false).
*/
struct usb_gadget {
struct work_struct work;
/* readonly to gadget driver */
const struct usb_gadget_ops *ops;
struct usb_ep *ep0;
@ -538,6 +541,7 @@ struct usb_gadget {
unsigned out_epnum;
unsigned in_epnum;
};
#define work_to_gadget(w) (container_of((w), struct usb_gadget, work))
static inline void set_gadget_data(struct usb_gadget *gadget, void *data)
{ dev_set_drvdata(&gadget->dev, data); }

View file

@ -22,6 +22,7 @@
#ifdef __KERNEL__
#include <linux/rwsem.h>
#include <linux/interrupt.h>
#define MAX_TOPO_LEVEL 6
@ -67,6 +68,13 @@
/*-------------------------------------------------------------------------*/
struct giveback_urb_bh {
bool running;
spinlock_t lock;
struct list_head head;
struct tasklet_struct bh;
};
struct usb_hcd {
/*
@ -139,6 +147,9 @@ struct usb_hcd {
resource_size_t rsrc_len; /* memory/io resource length */
unsigned power_budget; /* in mA, 0 = no limit */
struct giveback_urb_bh high_prio_bh;
struct giveback_urb_bh low_prio_bh;
/* bandwidth_mutex should be taken before adding or removing
* any new bus bandwidth constraints:
* 1. Before adding a configuration for a new device.
@ -221,6 +232,7 @@ struct hc_driver {
#define HCD_USB25 0x0030 /* Wireless USB 1.0 (USB 2.5)*/
#define HCD_USB3 0x0040 /* USB 3.0 */
#define HCD_MASK 0x0070
#define HCD_BH 0x0100 /* URB complete in BH context */
/* called to init HCD and root hub */
int (*reset) (struct usb_hcd *hcd);
@ -361,6 +373,11 @@ struct hc_driver {
int (*find_raw_port_number)(struct usb_hcd *, int);
};
static inline int hcd_giveback_urb_in_bh(struct usb_hcd *hcd)
{
return hcd->driver->flags & HCD_BH;
}
extern int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb);
extern int usb_hcd_check_unlink_urb(struct usb_hcd *hcd, struct urb *urb,
int status);
@ -411,7 +428,7 @@ extern int usb_hcd_pci_probe(struct pci_dev *dev,
extern void usb_hcd_pci_remove(struct pci_dev *dev);
extern void usb_hcd_pci_shutdown(struct pci_dev *dev);
#ifdef CONFIG_PM_SLEEP
#ifdef CONFIG_PM
extern const struct dev_pm_ops usb_hcd_pci_pm_ops;
#endif
#endif /* CONFIG_PCI */

View file

@ -7,19 +7,27 @@
#ifndef __LINUX_USB_OF_H
#define __LINUX_USB_OF_H
#include <linux/usb/ch9.h>
#include <linux/usb/otg.h>
#include <linux/usb/phy.h>
#if IS_ENABLED(CONFIG_OF)
enum usb_dr_mode of_usb_get_dr_mode(struct device_node *np);
enum usb_device_speed of_usb_get_maximum_speed(struct device_node *np);
#else
static inline enum usb_dr_mode of_usb_get_dr_mode(struct device_node *np)
{
return USB_DR_MODE_UNKNOWN;
}
static inline enum usb_device_speed
of_usb_get_maximum_speed(struct device_node *np)
{
return USB_SPEED_UNKNOWN;
}
#endif
#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_USB_PHY)
#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_USB_SUPPORT)
enum usb_phy_interface of_usb_get_phy_mode(struct device_node *np);
#else
static inline enum usb_phy_interface of_usb_get_phy_mode(struct device_node *np)

View file

@ -142,7 +142,7 @@ extern void usb_remove_phy(struct usb_phy *);
/* helpers for direct access thru low-level io interface */
static inline int usb_phy_io_read(struct usb_phy *x, u32 reg)
{
if (x->io_ops && x->io_ops->read)
if (x && x->io_ops && x->io_ops->read)
return x->io_ops->read(x, reg);
return -EINVAL;
@ -150,7 +150,7 @@ static inline int usb_phy_io_read(struct usb_phy *x, u32 reg)
static inline int usb_phy_io_write(struct usb_phy *x, u32 val, u32 reg)
{
if (x->io_ops && x->io_ops->write)
if (x && x->io_ops && x->io_ops->write)
return x->io_ops->write(x, val, reg);
return -EINVAL;
@ -159,7 +159,7 @@ static inline int usb_phy_io_write(struct usb_phy *x, u32 val, u32 reg)
static inline int
usb_phy_init(struct usb_phy *x)
{
if (x->init)
if (x && x->init)
return x->init(x);
return 0;
@ -168,14 +168,14 @@ usb_phy_init(struct usb_phy *x)
static inline void
usb_phy_shutdown(struct usb_phy *x)
{
if (x->shutdown)
if (x && x->shutdown)
x->shutdown(x);
}
static inline int
usb_phy_vbus_on(struct usb_phy *x)
{
if (!x->set_vbus)
if (!x || !x->set_vbus)
return 0;
return x->set_vbus(x, true);
@ -184,7 +184,7 @@ usb_phy_vbus_on(struct usb_phy *x)
static inline int
usb_phy_vbus_off(struct usb_phy *x)
{
if (!x->set_vbus)
if (!x || !x->set_vbus)
return 0;
return x->set_vbus(x, false);
@ -258,7 +258,7 @@ usb_phy_set_power(struct usb_phy *x, unsigned mA)
static inline int
usb_phy_set_suspend(struct usb_phy *x, int suspend)
{
if (x->set_suspend != NULL)
if (x && x->set_suspend != NULL)
return x->set_suspend(x, suspend);
else
return 0;
@ -267,7 +267,7 @@ usb_phy_set_suspend(struct usb_phy *x, int suspend)
static inline int
usb_phy_notify_connect(struct usb_phy *x, enum usb_device_speed speed)
{
if (x->notify_connect)
if (x && x->notify_connect)
return x->notify_connect(x, speed);
else
return 0;
@ -276,7 +276,7 @@ usb_phy_notify_connect(struct usb_phy *x, enum usb_device_speed speed)
static inline int
usb_phy_notify_disconnect(struct usb_phy *x, enum usb_device_speed speed)
{
if (x->notify_disconnect)
if (x && x->notify_disconnect)
return x->notify_disconnect(x, speed);
else
return 0;

View file

@ -18,19 +18,36 @@
#include <linux/clk.h>
#include <linux/usb/otg.h>
/*
* utmi_pll_config_in_car_module: true if the UTMI PLL configuration registers
* should be set up by clk-tegra, false if by the PHY code
* has_hostpc: true if the USB controller has the HOSTPC extension, which
* changes the location of the PHCD and PTS fields
* requires_usbmode_setup: true if the USBMODE register needs to be set to
* enter host mode
* requires_extra_tuning_parameters: true if xcvr_hsslew, hssquelch_level
* and hsdiscon_level should be set for adequate signal quality
*/
struct tegra_phy_soc_config {
bool utmi_pll_config_in_car_module;
bool has_hostpc;
bool requires_usbmode_setup;
bool requires_extra_tuning_parameters;
};
struct tegra_utmip_config {
u8 hssync_start_delay;
u8 elastic_limit;
u8 idle_wait_delay;
u8 term_range_adj;
bool xcvr_setup_use_fuses;
u8 xcvr_setup;
u8 xcvr_lsfslew;
u8 xcvr_lsrslew;
};
struct tegra_ulpi_config {
int reset_gpio;
const char *clk;
u8 xcvr_hsslew;
u8 hssquelch_level;
u8 hsdiscon_level;
};
enum tegra_usb_phy_port_speed {
@ -39,12 +56,6 @@ enum tegra_usb_phy_port_speed {
TEGRA_USB_PHY_PORT_SPEED_HIGH,
};
enum tegra_usb_phy_mode {
TEGRA_USB_PHY_MODE_DEVICE,
TEGRA_USB_PHY_MODE_HOST,
TEGRA_USB_PHY_MODE_OTG,
};
struct tegra_xtal_freq;
struct tegra_usb_phy {
@ -55,18 +66,17 @@ struct tegra_usb_phy {
struct clk *clk;
struct clk *pll_u;
struct clk *pad_clk;
enum tegra_usb_phy_mode mode;
struct regulator *vbus;
enum usb_dr_mode mode;
void *config;
const struct tegra_phy_soc_config *soc_config;
struct usb_phy *ulpi;
struct usb_phy u_phy;
struct device *dev;
bool is_legacy_phy;
bool is_ulpi_phy;
int reset_gpio;
};
struct usb_phy *tegra_usb_get_phy(struct device_node *dn);
void tegra_usb_phy_preresume(struct usb_phy *phy);
void tegra_usb_phy_postresume(struct usb_phy *phy);

View file

@ -3,7 +3,7 @@
#include <linux/usb/otg.h>
struct nop_usb_xceiv_platform_data {
struct usb_phy_gen_xceiv_platform_data {
enum usb_phy_type type;
unsigned long clk_rate;
@ -12,7 +12,7 @@ struct nop_usb_xceiv_platform_data {
unsigned int needs_reset:1;
};
#if defined(CONFIG_NOP_USB_XCEIV) || (defined(CONFIG_NOP_USB_XCEIV_MODULE) && defined(MODULE))
#if IS_ENABLED(CONFIG_NOP_USB_XCEIV)
/* sometimes transceivers are accessed only through e.g. ULPI */
extern void usb_nop_xceiv_register(void);
extern void usb_nop_xceiv_unregister(void);

View file

@ -34,6 +34,7 @@ struct usbnet {
struct mutex phy_mutex;
unsigned char suspend_count;
unsigned char pkt_cnt, pkt_err;
unsigned can_dma_sg:1;
/* i/o info: pipes etc */
unsigned in, out;

View file

@ -66,6 +66,7 @@ enum {
WA_ENABLE = 0x01,
WA_RESET = 0x02,
RPIPE_PAUSE = 0x1,
RPIPE_STALL = 0x2,
};
/* Responses from Get Status request ([WUSB] section 8.3.1.6) */

View file

@ -23,6 +23,7 @@ struct user_namespace {
struct uid_gid_map projid_map;
atomic_t count;
struct user_namespace *parent;
int level;
kuid_t owner;
kgid_t group;
unsigned int proc_inum;

View file

@ -34,10 +34,12 @@ extern void vmpressure_cleanup(struct vmpressure *vmpr);
extern struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg);
extern struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr);
extern struct vmpressure *css_to_vmpressure(struct cgroup_subsys_state *css);
extern int vmpressure_register_event(struct cgroup *cg, struct cftype *cft,
extern int vmpressure_register_event(struct cgroup_subsys_state *css,
struct cftype *cft,
struct eventfd_ctx *eventfd,
const char *args);
extern void vmpressure_unregister_event(struct cgroup *cg, struct cftype *cft,
extern void vmpressure_unregister_event(struct cgroup_subsys_state *css,
struct cftype *cft,
struct eventfd_ctx *eventfd);
#else
static inline void vmpressure(gfp_t gfp, struct mem_cgroup *memcg,

View file

@ -1,18 +1,68 @@
#ifndef _LINUX_KERNEL_VTIME_H
#define _LINUX_KERNEL_VTIME_H
#include <linux/context_tracking_state.h>
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
#include <asm/vtime.h>
#endif
struct task_struct;
/*
* vtime_accounting_enabled() definitions/declarations
*/
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
static inline bool vtime_accounting_enabled(void) { return true; }
#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
static inline bool vtime_accounting_enabled(void)
{
if (static_key_false(&context_tracking_enabled)) {
if (context_tracking_active())
return true;
}
return false;
}
#endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */
#ifndef CONFIG_VIRT_CPU_ACCOUNTING
static inline bool vtime_accounting_enabled(void) { return false; }
#endif /* !CONFIG_VIRT_CPU_ACCOUNTING */
/*
* Common vtime APIs
*/
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
#ifdef __ARCH_HAS_VTIME_TASK_SWITCH
extern void vtime_task_switch(struct task_struct *prev);
#else
extern void vtime_common_task_switch(struct task_struct *prev);
static inline void vtime_task_switch(struct task_struct *prev)
{
if (vtime_accounting_enabled())
vtime_common_task_switch(prev);
}
#endif /* __ARCH_HAS_VTIME_TASK_SWITCH */
extern void vtime_account_system(struct task_struct *tsk);
extern void vtime_account_idle(struct task_struct *tsk);
extern void vtime_account_user(struct task_struct *tsk);
extern void vtime_account_irq_enter(struct task_struct *tsk);
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
static inline bool vtime_accounting_enabled(void) { return true; }
#endif
#ifdef __ARCH_HAS_VTIME_ACCOUNT
extern void vtime_account_irq_enter(struct task_struct *tsk);
#else
extern void vtime_common_account_irq_enter(struct task_struct *tsk);
static inline void vtime_account_irq_enter(struct task_struct *tsk)
{
if (vtime_accounting_enabled())
vtime_common_account_irq_enter(tsk);
}
#endif /* __ARCH_HAS_VTIME_ACCOUNT */
#else /* !CONFIG_VIRT_CPU_ACCOUNTING */
@ -20,14 +70,20 @@ static inline void vtime_task_switch(struct task_struct *prev) { }
static inline void vtime_account_system(struct task_struct *tsk) { }
static inline void vtime_account_user(struct task_struct *tsk) { }
static inline void vtime_account_irq_enter(struct task_struct *tsk) { }
static inline bool vtime_accounting_enabled(void) { return false; }
#endif
#endif /* !CONFIG_VIRT_CPU_ACCOUNTING */
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
extern void arch_vtime_task_switch(struct task_struct *tsk);
extern void vtime_account_irq_exit(struct task_struct *tsk);
extern bool vtime_accounting_enabled(void);
extern void vtime_gen_account_irq_exit(struct task_struct *tsk);
static inline void vtime_account_irq_exit(struct task_struct *tsk)
{
if (vtime_accounting_enabled())
vtime_gen_account_irq_exit(tsk);
}
extern void vtime_user_enter(struct task_struct *tsk);
static inline void vtime_user_exit(struct task_struct *tsk)
{
vtime_account_user(tsk);
@ -35,7 +91,7 @@ static inline void vtime_user_exit(struct task_struct *tsk)
extern void vtime_guest_enter(struct task_struct *tsk);
extern void vtime_guest_exit(struct task_struct *tsk);
extern void vtime_init_idle(struct task_struct *tsk, int cpu);
#else
#else /* !CONFIG_VIRT_CPU_ACCOUNTING_GEN */
static inline void vtime_account_irq_exit(struct task_struct *tsk)
{
/* On hard|softirq exit we always account to hard|softirq cputime */

View file

@ -811,6 +811,63 @@ do { \
__ret; \
})
#define __wait_event_interruptible_lock_irq_timeout(wq, condition, \
lock, ret) \
do { \
DEFINE_WAIT(__wait); \
\
for (;;) { \
prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
if (condition) \
break; \
if (signal_pending(current)) { \
ret = -ERESTARTSYS; \
break; \
} \
spin_unlock_irq(&lock); \
ret = schedule_timeout(ret); \
spin_lock_irq(&lock); \
if (!ret) \
break; \
} \
finish_wait(&wq, &__wait); \
} while (0)
/**
* wait_event_interruptible_lock_irq_timeout - sleep until a condition gets true or a timeout elapses.
* The condition is checked under the lock. This is expected
* to be called with the lock taken.
* @wq: the waitqueue to wait on
* @condition: a C expression for the event to wait for
* @lock: a locked spinlock_t, which will be released before schedule()
* and reacquired afterwards.
* @timeout: timeout, in jiffies
*
* The process is put to sleep (TASK_INTERRUPTIBLE) until the
* @condition evaluates to true or signal is received. The @condition is
* checked each time the waitqueue @wq is woken up.
*
* wake_up() has to be called after changing any variable that could
* change the result of the wait condition.
*
* This is supposed to be called while holding the lock. The lock is
* dropped before going to sleep and is reacquired afterwards.
*
* The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
* was interrupted by a signal, and the remaining jiffies otherwise
* if the condition evaluated to true before the timeout elapsed.
*/
#define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \
timeout) \
({ \
int __ret = timeout; \
\
if (!(condition)) \
__wait_event_interruptible_lock_irq_timeout( \
wq, condition, lock, __ret); \
__ret; \
})
/*
* These are the old interfaces to sleep waiting for an event.

View file

@ -295,7 +295,12 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
* Documentation/workqueue.txt.
*/
enum {
WQ_NON_REENTRANT = 1 << 0, /* guarantee non-reentrance */
/*
* All wqs are now non-reentrant making the following flag
* meaningless. Will be removed.
*/
WQ_NON_REENTRANT = 1 << 0, /* DEPRECATED */
WQ_UNBOUND = 1 << 1, /* not bound to any cpu */
WQ_FREEZABLE = 1 << 2, /* freeze during suspend */
WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */