Merge branch 'master' of /home/davem/src/GIT/linux-2.6/
Conflicts: include/linux/mod_devicetable.h scripts/mod/file2alias.c
This commit is contained in:
commit
2ec8c6bb5d
577 changed files with 24623 additions and 14821 deletions
|
|
@ -116,11 +116,12 @@ extern unsigned long acpi_realmode_flags;
|
|||
|
||||
int acpi_register_gsi (struct device *dev, u32 gsi, int triggering, int polarity);
|
||||
int acpi_gsi_to_irq (u32 gsi, unsigned int *irq);
|
||||
int acpi_isa_irq_to_gsi (unsigned isa_irq, u32 *gsi);
|
||||
|
||||
#ifdef CONFIG_X86_IO_APIC
|
||||
extern int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity);
|
||||
extern int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity);
|
||||
#else
|
||||
#define acpi_get_override_irq(bus, trigger, polarity) (-1)
|
||||
#define acpi_get_override_irq(gsi, trigger, polarity) (-1)
|
||||
#endif
|
||||
/*
|
||||
* This function undoes the effect of one call to acpi_register_gsi().
|
||||
|
|
|
|||
|
|
@ -10,6 +10,11 @@
|
|||
#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
|
||||
#endif
|
||||
|
||||
extern unsigned int __sw_hweight8(unsigned int w);
|
||||
extern unsigned int __sw_hweight16(unsigned int w);
|
||||
extern unsigned int __sw_hweight32(unsigned int w);
|
||||
extern unsigned long __sw_hweight64(__u64 w);
|
||||
|
||||
/*
|
||||
* Include this here because some architectures need generic_ffs/fls in
|
||||
* scope
|
||||
|
|
@ -44,31 +49,6 @@ static inline unsigned long hweight_long(unsigned long w)
|
|||
return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
|
||||
}
|
||||
|
||||
/*
|
||||
* Clearly slow versions of the hweightN() functions, their benefit is
|
||||
* of course compile time evaluation of constant arguments.
|
||||
*/
|
||||
#define HWEIGHT8(w) \
|
||||
( BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + \
|
||||
(!!((w) & (1ULL << 0))) + \
|
||||
(!!((w) & (1ULL << 1))) + \
|
||||
(!!((w) & (1ULL << 2))) + \
|
||||
(!!((w) & (1ULL << 3))) + \
|
||||
(!!((w) & (1ULL << 4))) + \
|
||||
(!!((w) & (1ULL << 5))) + \
|
||||
(!!((w) & (1ULL << 6))) + \
|
||||
(!!((w) & (1ULL << 7))) )
|
||||
|
||||
#define HWEIGHT16(w) (HWEIGHT8(w) + HWEIGHT8((w) >> 8))
|
||||
#define HWEIGHT32(w) (HWEIGHT16(w) + HWEIGHT16((w) >> 16))
|
||||
#define HWEIGHT64(w) (HWEIGHT32(w) + HWEIGHT32((w) >> 32))
|
||||
|
||||
/*
|
||||
* Type invariant version that simply casts things to the
|
||||
* largest type.
|
||||
*/
|
||||
#define HWEIGHT(w) HWEIGHT64((u64)(w))
|
||||
|
||||
/**
|
||||
* rol32 - rotate a 32-bit value left
|
||||
* @word: value to rotate
|
||||
|
|
|
|||
|
|
@ -278,6 +278,27 @@ struct freq_attr {
|
|||
ssize_t (*store)(struct cpufreq_policy *, const char *, size_t count);
|
||||
};
|
||||
|
||||
#define cpufreq_freq_attr_ro(_name) \
|
||||
static struct freq_attr _name = \
|
||||
__ATTR(_name, 0444, show_##_name, NULL)
|
||||
|
||||
#define cpufreq_freq_attr_ro_perm(_name, _perm) \
|
||||
static struct freq_attr _name = \
|
||||
__ATTR(_name, _perm, show_##_name, NULL)
|
||||
|
||||
#define cpufreq_freq_attr_ro_old(_name) \
|
||||
static struct freq_attr _name##_old = \
|
||||
__ATTR(_name, 0444, show_##_name##_old, NULL)
|
||||
|
||||
#define cpufreq_freq_attr_rw(_name) \
|
||||
static struct freq_attr _name = \
|
||||
__ATTR(_name, 0644, show_##_name, store_##_name)
|
||||
|
||||
#define cpufreq_freq_attr_rw_old(_name) \
|
||||
static struct freq_attr _name##_old = \
|
||||
__ATTR(_name, 0644, show_##_name##_old, store_##_name##_old)
|
||||
|
||||
|
||||
struct global_attr {
|
||||
struct attribute attr;
|
||||
ssize_t (*show)(struct kobject *kobj,
|
||||
|
|
@ -286,6 +307,15 @@ struct global_attr {
|
|||
const char *c, size_t count);
|
||||
};
|
||||
|
||||
#define define_one_global_ro(_name) \
|
||||
static struct global_attr _name = \
|
||||
__ATTR(_name, 0444, show_##_name, NULL)
|
||||
|
||||
#define define_one_global_rw(_name) \
|
||||
static struct global_attr _name = \
|
||||
__ATTR(_name, 0644, show_##_name, store_##_name)
|
||||
|
||||
|
||||
/*********************************************************************
|
||||
* CPUFREQ 2.6. INTERFACE *
|
||||
*********************************************************************/
|
||||
|
|
|
|||
|
|
@ -21,8 +21,7 @@ extern int number_of_cpusets; /* How many cpusets are defined in system? */
|
|||
extern int cpuset_init(void);
|
||||
extern void cpuset_init_smp(void);
|
||||
extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
|
||||
extern void cpuset_cpus_allowed_locked(struct task_struct *p,
|
||||
struct cpumask *mask);
|
||||
extern int cpuset_cpus_allowed_fallback(struct task_struct *p);
|
||||
extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
|
||||
#define cpuset_current_mems_allowed (current->mems_allowed)
|
||||
void cpuset_init_current_mems_allowed(void);
|
||||
|
|
@ -69,9 +68,6 @@ struct seq_file;
|
|||
extern void cpuset_task_status_allowed(struct seq_file *m,
|
||||
struct task_struct *task);
|
||||
|
||||
extern void cpuset_lock(void);
|
||||
extern void cpuset_unlock(void);
|
||||
|
||||
extern int cpuset_mem_spread_node(void);
|
||||
|
||||
static inline int cpuset_do_page_mem_spread(void)
|
||||
|
|
@ -105,10 +101,11 @@ static inline void cpuset_cpus_allowed(struct task_struct *p,
|
|||
{
|
||||
cpumask_copy(mask, cpu_possible_mask);
|
||||
}
|
||||
static inline void cpuset_cpus_allowed_locked(struct task_struct *p,
|
||||
struct cpumask *mask)
|
||||
|
||||
static inline int cpuset_cpus_allowed_fallback(struct task_struct *p)
|
||||
{
|
||||
cpumask_copy(mask, cpu_possible_mask);
|
||||
cpumask_copy(&p->cpus_allowed, cpu_possible_mask);
|
||||
return cpumask_any(cpu_active_mask);
|
||||
}
|
||||
|
||||
static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
|
||||
|
|
@ -157,9 +154,6 @@ static inline void cpuset_task_status_allowed(struct seq_file *m,
|
|||
{
|
||||
}
|
||||
|
||||
static inline void cpuset_lock(void) {}
|
||||
static inline void cpuset_unlock(void) {}
|
||||
|
||||
static inline int cpuset_mem_spread_node(void)
|
||||
{
|
||||
return 0;
|
||||
|
|
|
|||
|
|
@ -186,6 +186,8 @@ d_iput: no no no yes
|
|||
|
||||
#define DCACHE_FSNOTIFY_PARENT_WATCHED 0x0080 /* Parent inode is watched by some fsnotify listener */
|
||||
|
||||
#define DCACHE_CANT_MOUNT 0x0100
|
||||
|
||||
extern spinlock_t dcache_lock;
|
||||
extern seqlock_t rename_lock;
|
||||
|
||||
|
|
@ -358,6 +360,18 @@ static inline int d_unlinked(struct dentry *dentry)
|
|||
return d_unhashed(dentry) && !IS_ROOT(dentry);
|
||||
}
|
||||
|
||||
static inline int cant_mount(struct dentry *dentry)
|
||||
{
|
||||
return (dentry->d_flags & DCACHE_CANT_MOUNT);
|
||||
}
|
||||
|
||||
static inline void dont_mount(struct dentry *dentry)
|
||||
{
|
||||
spin_lock(&dentry->d_lock);
|
||||
dentry->d_flags |= DCACHE_CANT_MOUNT;
|
||||
spin_unlock(&dentry->d_lock);
|
||||
}
|
||||
|
||||
static inline struct dentry *dget_parent(struct dentry *dentry)
|
||||
{
|
||||
struct dentry *ret;
|
||||
|
|
|
|||
|
|
@ -20,12 +20,14 @@ struct debug_obj_descr;
|
|||
* struct debug_obj - representaion of an tracked object
|
||||
* @node: hlist node to link the object into the tracker list
|
||||
* @state: tracked object state
|
||||
* @astate: current active state
|
||||
* @object: pointer to the real object
|
||||
* @descr: pointer to an object type specific debug description structure
|
||||
*/
|
||||
struct debug_obj {
|
||||
struct hlist_node node;
|
||||
enum debug_obj_state state;
|
||||
unsigned int astate;
|
||||
void *object;
|
||||
struct debug_obj_descr *descr;
|
||||
};
|
||||
|
|
@ -60,6 +62,15 @@ extern void debug_object_deactivate(void *addr, struct debug_obj_descr *descr);
|
|||
extern void debug_object_destroy (void *addr, struct debug_obj_descr *descr);
|
||||
extern void debug_object_free (void *addr, struct debug_obj_descr *descr);
|
||||
|
||||
/*
|
||||
* Active state:
|
||||
* - Set at 0 upon initialization.
|
||||
* - Must return to 0 before deactivation.
|
||||
*/
|
||||
extern void
|
||||
debug_object_active_state(void *addr, struct debug_obj_descr *descr,
|
||||
unsigned int expect, unsigned int next);
|
||||
|
||||
extern void debug_objects_early_init(void);
|
||||
extern void debug_objects_mem_init(void);
|
||||
#else
|
||||
|
|
|
|||
|
|
@ -82,9 +82,13 @@ void clear_ftrace_function(void);
|
|||
extern void ftrace_stub(unsigned long a0, unsigned long a1);
|
||||
|
||||
#else /* !CONFIG_FUNCTION_TRACER */
|
||||
# define register_ftrace_function(ops) do { } while (0)
|
||||
# define unregister_ftrace_function(ops) do { } while (0)
|
||||
# define clear_ftrace_function(ops) do { } while (0)
|
||||
/*
|
||||
* (un)register_ftrace_function must be a macro since the ops parameter
|
||||
* must not be evaluated.
|
||||
*/
|
||||
#define register_ftrace_function(ops) ({ 0; })
|
||||
#define unregister_ftrace_function(ops) ({ 0; })
|
||||
static inline void clear_ftrace_function(void) { }
|
||||
static inline void ftrace_kill(void) { }
|
||||
static inline void ftrace_stop(void) { }
|
||||
static inline void ftrace_start(void) { }
|
||||
|
|
@ -237,11 +241,13 @@ extern int skip_trace(unsigned long ip);
|
|||
extern void ftrace_disable_daemon(void);
|
||||
extern void ftrace_enable_daemon(void);
|
||||
#else
|
||||
# define skip_trace(ip) ({ 0; })
|
||||
# define ftrace_force_update() ({ 0; })
|
||||
# define ftrace_set_filter(buf, len, reset) do { } while (0)
|
||||
# define ftrace_disable_daemon() do { } while (0)
|
||||
# define ftrace_enable_daemon() do { } while (0)
|
||||
static inline int skip_trace(unsigned long ip) { return 0; }
|
||||
static inline int ftrace_force_update(void) { return 0; }
|
||||
static inline void ftrace_set_filter(unsigned char *buf, int len, int reset)
|
||||
{
|
||||
}
|
||||
static inline void ftrace_disable_daemon(void) { }
|
||||
static inline void ftrace_enable_daemon(void) { }
|
||||
static inline void ftrace_release_mod(struct module *mod) {}
|
||||
static inline int register_ftrace_command(struct ftrace_func_command *cmd)
|
||||
{
|
||||
|
|
@ -314,16 +320,16 @@ static inline void __ftrace_enabled_restore(int enabled)
|
|||
extern void time_hardirqs_on(unsigned long a0, unsigned long a1);
|
||||
extern void time_hardirqs_off(unsigned long a0, unsigned long a1);
|
||||
#else
|
||||
# define time_hardirqs_on(a0, a1) do { } while (0)
|
||||
# define time_hardirqs_off(a0, a1) do { } while (0)
|
||||
static inline void time_hardirqs_on(unsigned long a0, unsigned long a1) { }
|
||||
static inline void time_hardirqs_off(unsigned long a0, unsigned long a1) { }
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PREEMPT_TRACER
|
||||
extern void trace_preempt_on(unsigned long a0, unsigned long a1);
|
||||
extern void trace_preempt_off(unsigned long a0, unsigned long a1);
|
||||
#else
|
||||
# define trace_preempt_on(a0, a1) do { } while (0)
|
||||
# define trace_preempt_off(a0, a1) do { } while (0)
|
||||
static inline void trace_preempt_on(unsigned long a0, unsigned long a1) { }
|
||||
static inline void trace_preempt_off(unsigned long a0, unsigned long a1) { }
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
|
||||
|
|
@ -352,6 +358,10 @@ struct ftrace_graph_ret {
|
|||
int depth;
|
||||
};
|
||||
|
||||
/* Type of the callback handlers for tracing function graph*/
|
||||
typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */
|
||||
typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */
|
||||
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
|
||||
/* for init task */
|
||||
|
|
@ -400,10 +410,6 @@ extern char __irqentry_text_end[];
|
|||
|
||||
#define FTRACE_RETFUNC_DEPTH 50
|
||||
#define FTRACE_RETSTACK_ALLOC_SIZE 32
|
||||
/* Type of the callback handlers for tracing function graph*/
|
||||
typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */
|
||||
typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */
|
||||
|
||||
extern int register_ftrace_graph(trace_func_graph_ret_t retfunc,
|
||||
trace_func_graph_ent_t entryfunc);
|
||||
|
||||
|
|
@ -441,6 +447,13 @@ static inline void unpause_graph_tracing(void)
|
|||
static inline void ftrace_graph_init_task(struct task_struct *t) { }
|
||||
static inline void ftrace_graph_exit_task(struct task_struct *t) { }
|
||||
|
||||
static inline int register_ftrace_graph(trace_func_graph_ret_t retfunc,
|
||||
trace_func_graph_ent_t entryfunc)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
static inline void unregister_ftrace_graph(void) { }
|
||||
|
||||
static inline int task_curr_ret_stack(struct task_struct *tsk)
|
||||
{
|
||||
return -1;
|
||||
|
|
@ -492,7 +505,9 @@ static inline int test_tsk_trace_graph(struct task_struct *tsk)
|
|||
return tsk->trace & TSK_TRACE_FL_GRAPH;
|
||||
}
|
||||
|
||||
extern int ftrace_dump_on_oops;
|
||||
enum ftrace_dump_mode;
|
||||
|
||||
extern enum ftrace_dump_mode ftrace_dump_on_oops;
|
||||
|
||||
#ifdef CONFIG_PREEMPT
|
||||
#define INIT_TRACE_RECURSION .trace_recursion = 0,
|
||||
|
|
@ -504,18 +519,6 @@ extern int ftrace_dump_on_oops;
|
|||
#define INIT_TRACE_RECURSION
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_HW_BRANCH_TRACER
|
||||
|
||||
void trace_hw_branch(u64 from, u64 to);
|
||||
void trace_hw_branch_oops(void);
|
||||
|
||||
#else /* CONFIG_HW_BRANCH_TRACER */
|
||||
|
||||
static inline void trace_hw_branch(u64 from, u64 to) {}
|
||||
static inline void trace_hw_branch_oops(void) {}
|
||||
|
||||
#endif /* CONFIG_HW_BRANCH_TRACER */
|
||||
|
||||
#ifdef CONFIG_FTRACE_SYSCALLS
|
||||
|
||||
unsigned long arch_syscall_addr(int nr);
|
||||
|
|
|
|||
|
|
@ -58,6 +58,7 @@ struct trace_iterator {
|
|||
/* The below is zeroed out in pipe_read */
|
||||
struct trace_seq seq;
|
||||
struct trace_entry *ent;
|
||||
unsigned long lost_events;
|
||||
int leftover;
|
||||
int cpu;
|
||||
u64 ts;
|
||||
|
|
|
|||
|
|
@ -9,9 +9,22 @@ enum {
|
|||
};
|
||||
|
||||
enum {
|
||||
HW_BREAKPOINT_R = 1,
|
||||
HW_BREAKPOINT_W = 2,
|
||||
HW_BREAKPOINT_X = 4,
|
||||
HW_BREAKPOINT_EMPTY = 0,
|
||||
HW_BREAKPOINT_R = 1,
|
||||
HW_BREAKPOINT_W = 2,
|
||||
HW_BREAKPOINT_RW = HW_BREAKPOINT_R | HW_BREAKPOINT_W,
|
||||
HW_BREAKPOINT_X = 4,
|
||||
HW_BREAKPOINT_INVALID = HW_BREAKPOINT_RW | HW_BREAKPOINT_X,
|
||||
};
|
||||
|
||||
enum bp_type_idx {
|
||||
TYPE_INST = 0,
|
||||
#ifdef CONFIG_HAVE_MIXED_BREAKPOINTS_REGS
|
||||
TYPE_DATA = 0,
|
||||
#else
|
||||
TYPE_DATA = 1,
|
||||
#endif
|
||||
TYPE_MAX
|
||||
};
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
|
@ -34,6 +47,12 @@ static inline void hw_breakpoint_init(struct perf_event_attr *attr)
|
|||
attr->sample_period = 1;
|
||||
}
|
||||
|
||||
static inline void ptrace_breakpoint_init(struct perf_event_attr *attr)
|
||||
{
|
||||
hw_breakpoint_init(attr);
|
||||
attr->exclude_kernel = 1;
|
||||
}
|
||||
|
||||
static inline unsigned long hw_breakpoint_addr(struct perf_event *bp)
|
||||
{
|
||||
return bp->attr.bp_addr;
|
||||
|
|
|
|||
|
|
@ -49,7 +49,6 @@ extern struct group_info init_groups;
|
|||
{ .first = &init_task.pids[PIDTYPE_PGID].node }, \
|
||||
{ .first = &init_task.pids[PIDTYPE_SID].node }, \
|
||||
}, \
|
||||
.rcu = RCU_HEAD_INIT, \
|
||||
.level = 0, \
|
||||
.numbers = { { \
|
||||
.nr = 0, \
|
||||
|
|
|
|||
|
|
@ -37,9 +37,9 @@ struct iommu_ops {
|
|||
int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
|
||||
void (*detach_dev)(struct iommu_domain *domain, struct device *dev);
|
||||
int (*map)(struct iommu_domain *domain, unsigned long iova,
|
||||
phys_addr_t paddr, size_t size, int prot);
|
||||
void (*unmap)(struct iommu_domain *domain, unsigned long iova,
|
||||
size_t size);
|
||||
phys_addr_t paddr, int gfp_order, int prot);
|
||||
int (*unmap)(struct iommu_domain *domain, unsigned long iova,
|
||||
int gfp_order);
|
||||
phys_addr_t (*iova_to_phys)(struct iommu_domain *domain,
|
||||
unsigned long iova);
|
||||
int (*domain_has_cap)(struct iommu_domain *domain,
|
||||
|
|
@ -56,10 +56,10 @@ extern int iommu_attach_device(struct iommu_domain *domain,
|
|||
struct device *dev);
|
||||
extern void iommu_detach_device(struct iommu_domain *domain,
|
||||
struct device *dev);
|
||||
extern int iommu_map_range(struct iommu_domain *domain, unsigned long iova,
|
||||
phys_addr_t paddr, size_t size, int prot);
|
||||
extern void iommu_unmap_range(struct iommu_domain *domain, unsigned long iova,
|
||||
size_t size);
|
||||
extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
|
||||
phys_addr_t paddr, int gfp_order, int prot);
|
||||
extern int iommu_unmap(struct iommu_domain *domain, unsigned long iova,
|
||||
int gfp_order);
|
||||
extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
|
||||
unsigned long iova);
|
||||
extern int iommu_domain_has_cap(struct iommu_domain *domain,
|
||||
|
|
@ -96,16 +96,16 @@ static inline void iommu_detach_device(struct iommu_domain *domain,
|
|||
{
|
||||
}
|
||||
|
||||
static inline int iommu_map_range(struct iommu_domain *domain,
|
||||
unsigned long iova, phys_addr_t paddr,
|
||||
size_t size, int prot)
|
||||
static inline int iommu_map(struct iommu_domain *domain, unsigned long iova,
|
||||
phys_addr_t paddr, int gfp_order, int prot)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline void iommu_unmap_range(struct iommu_domain *domain,
|
||||
unsigned long iova, size_t size)
|
||||
static inline int iommu_unmap(struct iommu_domain *domain, unsigned long iova,
|
||||
int gfp_order)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
|
||||
|
|
|
|||
|
|
@ -492,6 +492,13 @@ static inline void tracing_off(void) { }
|
|||
static inline void tracing_off_permanent(void) { }
|
||||
static inline int tracing_is_on(void) { return 0; }
|
||||
#endif
|
||||
|
||||
enum ftrace_dump_mode {
|
||||
DUMP_NONE,
|
||||
DUMP_ALL,
|
||||
DUMP_ORIG,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_TRACING
|
||||
extern void tracing_start(void);
|
||||
extern void tracing_stop(void);
|
||||
|
|
@ -573,7 +580,7 @@ __ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap);
|
|||
extern int
|
||||
__ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap);
|
||||
|
||||
extern void ftrace_dump(void);
|
||||
extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode);
|
||||
#else
|
||||
static inline void
|
||||
ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { }
|
||||
|
|
@ -594,7 +601,7 @@ ftrace_vprintk(const char *fmt, va_list ap)
|
|||
{
|
||||
return 0;
|
||||
}
|
||||
static inline void ftrace_dump(void) { }
|
||||
static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
|
||||
#endif /* CONFIG_TRACING */
|
||||
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -19,7 +19,6 @@ struct anon_vma;
|
|||
struct file_ra_state;
|
||||
struct user_struct;
|
||||
struct writeback_control;
|
||||
struct rlimit;
|
||||
|
||||
#ifndef CONFIG_DISCONTIGMEM /* Don't use mapnrs, do it properly */
|
||||
extern unsigned long max_mapnr;
|
||||
|
|
@ -1449,9 +1448,6 @@ int vmemmap_populate_basepages(struct page *start_page,
|
|||
int vmemmap_populate(struct page *start_page, unsigned long pages, int node);
|
||||
void vmemmap_populate_print_last(void);
|
||||
|
||||
extern int account_locked_memory(struct mm_struct *mm, struct rlimit *rlim,
|
||||
size_t size);
|
||||
extern void refund_locked_memory(struct mm_struct *mm, size_t size);
|
||||
|
||||
enum mf_flags {
|
||||
MF_COUNT_INCREASED = 1 << 0,
|
||||
|
|
|
|||
|
|
@ -500,4 +500,13 @@ struct mdio_device_id {
|
|||
__u32 phy_id_mask;
|
||||
};
|
||||
|
||||
struct zorro_device_id {
|
||||
__u32 id; /* Device ID or ZORRO_WILDCARD */
|
||||
kernel_ulong_t driver_data; /* Data private to the driver */
|
||||
};
|
||||
|
||||
#define ZORRO_WILDCARD (0xffffffff) /* not official */
|
||||
|
||||
#define ZORRO_DEVICE_MODALIAS_FMT "zorro:i%08X"
|
||||
|
||||
#endif /* LINUX_MOD_DEVICETABLE_H */
|
||||
|
|
|
|||
|
|
@ -465,8 +465,7 @@ static inline void __module_get(struct module *module)
|
|||
if (module) {
|
||||
preempt_disable();
|
||||
__this_cpu_inc(module->refptr->incs);
|
||||
trace_module_get(module, _THIS_IP_,
|
||||
__this_cpu_read(module->refptr->incs));
|
||||
trace_module_get(module, _THIS_IP_);
|
||||
preempt_enable();
|
||||
}
|
||||
}
|
||||
|
|
@ -480,8 +479,7 @@ static inline int try_module_get(struct module *module)
|
|||
|
||||
if (likely(module_is_live(module))) {
|
||||
__this_cpu_inc(module->refptr->incs);
|
||||
trace_module_get(module, _THIS_IP_,
|
||||
__this_cpu_read(module->refptr->incs));
|
||||
trace_module_get(module, _THIS_IP_);
|
||||
} else
|
||||
ret = 0;
|
||||
|
||||
|
|
|
|||
|
|
@ -203,8 +203,19 @@ struct perf_event_attr {
|
|||
enable_on_exec : 1, /* next exec enables */
|
||||
task : 1, /* trace fork/exit */
|
||||
watermark : 1, /* wakeup_watermark */
|
||||
/*
|
||||
* precise_ip:
|
||||
*
|
||||
* 0 - SAMPLE_IP can have arbitrary skid
|
||||
* 1 - SAMPLE_IP must have constant skid
|
||||
* 2 - SAMPLE_IP requested to have 0 skid
|
||||
* 3 - SAMPLE_IP must have 0 skid
|
||||
*
|
||||
* See also PERF_RECORD_MISC_EXACT_IP
|
||||
*/
|
||||
precise_ip : 2, /* skid constraint */
|
||||
|
||||
__reserved_1 : 49;
|
||||
__reserved_1 : 47;
|
||||
|
||||
union {
|
||||
__u32 wakeup_events; /* wakeup every n events */
|
||||
|
|
@ -287,11 +298,24 @@ struct perf_event_mmap_page {
|
|||
__u64 data_tail; /* user-space written tail */
|
||||
};
|
||||
|
||||
#define PERF_RECORD_MISC_CPUMODE_MASK (3 << 0)
|
||||
#define PERF_RECORD_MISC_CPUMODE_MASK (7 << 0)
|
||||
#define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0)
|
||||
#define PERF_RECORD_MISC_KERNEL (1 << 0)
|
||||
#define PERF_RECORD_MISC_USER (2 << 0)
|
||||
#define PERF_RECORD_MISC_HYPERVISOR (3 << 0)
|
||||
#define PERF_RECORD_MISC_GUEST_KERNEL (4 << 0)
|
||||
#define PERF_RECORD_MISC_GUEST_USER (5 << 0)
|
||||
|
||||
/*
|
||||
* Indicates that the content of PERF_SAMPLE_IP points to
|
||||
* the actual instruction that triggered the event. See also
|
||||
* perf_event_attr::precise_ip.
|
||||
*/
|
||||
#define PERF_RECORD_MISC_EXACT_IP (1 << 14)
|
||||
/*
|
||||
* Reserve the last bit to indicate some extended misc field
|
||||
*/
|
||||
#define PERF_RECORD_MISC_EXT_RESERVED (1 << 15)
|
||||
|
||||
struct perf_event_header {
|
||||
__u32 type;
|
||||
|
|
@ -439,6 +463,12 @@ enum perf_callchain_context {
|
|||
# include <asm/perf_event.h>
|
||||
#endif
|
||||
|
||||
struct perf_guest_info_callbacks {
|
||||
int (*is_in_guest) (void);
|
||||
int (*is_user_mode) (void);
|
||||
unsigned long (*get_guest_ip) (void);
|
||||
};
|
||||
|
||||
#ifdef CONFIG_HAVE_HW_BREAKPOINT
|
||||
#include <asm/hw_breakpoint.h>
|
||||
#endif
|
||||
|
|
@ -468,6 +498,17 @@ struct perf_raw_record {
|
|||
void *data;
|
||||
};
|
||||
|
||||
struct perf_branch_entry {
|
||||
__u64 from;
|
||||
__u64 to;
|
||||
__u64 flags;
|
||||
};
|
||||
|
||||
struct perf_branch_stack {
|
||||
__u64 nr;
|
||||
struct perf_branch_entry entries[0];
|
||||
};
|
||||
|
||||
struct task_struct;
|
||||
|
||||
/**
|
||||
|
|
@ -506,6 +547,8 @@ struct hw_perf_event {
|
|||
|
||||
struct perf_event;
|
||||
|
||||
#define PERF_EVENT_TXN_STARTED 1
|
||||
|
||||
/**
|
||||
* struct pmu - generic performance monitoring unit
|
||||
*/
|
||||
|
|
@ -516,6 +559,16 @@ struct pmu {
|
|||
void (*stop) (struct perf_event *event);
|
||||
void (*read) (struct perf_event *event);
|
||||
void (*unthrottle) (struct perf_event *event);
|
||||
|
||||
/*
|
||||
* group events scheduling is treated as a transaction,
|
||||
* add group events as a whole and perform one schedulability test.
|
||||
* If test fails, roll back the whole group
|
||||
*/
|
||||
|
||||
void (*start_txn) (const struct pmu *pmu);
|
||||
void (*cancel_txn) (const struct pmu *pmu);
|
||||
int (*commit_txn) (const struct pmu *pmu);
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
@ -571,6 +624,14 @@ enum perf_group_flag {
|
|||
PERF_GROUP_SOFTWARE = 0x1,
|
||||
};
|
||||
|
||||
#define SWEVENT_HLIST_BITS 8
|
||||
#define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS)
|
||||
|
||||
struct swevent_hlist {
|
||||
struct hlist_head heads[SWEVENT_HLIST_SIZE];
|
||||
struct rcu_head rcu_head;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct perf_event - performance event kernel representation:
|
||||
*/
|
||||
|
|
@ -579,6 +640,7 @@ struct perf_event {
|
|||
struct list_head group_entry;
|
||||
struct list_head event_entry;
|
||||
struct list_head sibling_list;
|
||||
struct hlist_node hlist_entry;
|
||||
int nr_siblings;
|
||||
int group_flags;
|
||||
struct perf_event *group_leader;
|
||||
|
|
@ -726,6 +788,9 @@ struct perf_cpu_context {
|
|||
int active_oncpu;
|
||||
int max_pertask;
|
||||
int exclusive;
|
||||
struct swevent_hlist *swevent_hlist;
|
||||
struct mutex hlist_mutex;
|
||||
int hlist_refcount;
|
||||
|
||||
/*
|
||||
* Recursion avoidance:
|
||||
|
|
@ -769,9 +834,6 @@ extern void perf_disable(void);
|
|||
extern void perf_enable(void);
|
||||
extern int perf_event_task_disable(void);
|
||||
extern int perf_event_task_enable(void);
|
||||
extern int hw_perf_group_sched_in(struct perf_event *group_leader,
|
||||
struct perf_cpu_context *cpuctx,
|
||||
struct perf_event_context *ctx);
|
||||
extern void perf_event_update_userpage(struct perf_event *event);
|
||||
extern int perf_event_release_kernel(struct perf_event *event);
|
||||
extern struct perf_event *
|
||||
|
|
@ -902,6 +964,10 @@ static inline void perf_event_mmap(struct vm_area_struct *vma)
|
|||
__perf_event_mmap(vma);
|
||||
}
|
||||
|
||||
extern struct perf_guest_info_callbacks *perf_guest_cbs;
|
||||
extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
|
||||
extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
|
||||
|
||||
extern void perf_event_comm(struct task_struct *tsk);
|
||||
extern void perf_event_fork(struct task_struct *tsk);
|
||||
|
||||
|
|
@ -971,6 +1037,11 @@ perf_sw_event(u32 event_id, u64 nr, int nmi,
|
|||
static inline void
|
||||
perf_bp_event(struct perf_event *event, void *data) { }
|
||||
|
||||
static inline int perf_register_guest_info_callbacks
|
||||
(struct perf_guest_info_callbacks *callbacks) { return 0; }
|
||||
static inline int perf_unregister_guest_info_callbacks
|
||||
(struct perf_guest_info_callbacks *callbacks) { return 0; }
|
||||
|
||||
static inline void perf_event_mmap(struct vm_area_struct *vma) { }
|
||||
static inline void perf_event_comm(struct task_struct *tsk) { }
|
||||
static inline void perf_event_fork(struct task_struct *tsk) { }
|
||||
|
|
|
|||
|
|
@ -44,12 +44,14 @@ extern int platform_get_irq_byname(struct platform_device *, const char *);
|
|||
extern int platform_add_devices(struct platform_device **, int);
|
||||
|
||||
extern struct platform_device *platform_device_register_simple(const char *, int id,
|
||||
struct resource *, unsigned int);
|
||||
const struct resource *, unsigned int);
|
||||
extern struct platform_device *platform_device_register_data(struct device *,
|
||||
const char *, int, const void *, size_t);
|
||||
|
||||
extern struct platform_device *platform_device_alloc(const char *name, int id);
|
||||
extern int platform_device_add_resources(struct platform_device *pdev, struct resource *res, unsigned int num);
|
||||
extern int platform_device_add_resources(struct platform_device *pdev,
|
||||
const struct resource *res,
|
||||
unsigned int num);
|
||||
extern int platform_device_add_data(struct platform_device *pdev, const void *data, size_t size);
|
||||
extern int platform_device_add(struct platform_device *pdev);
|
||||
extern void platform_device_del(struct platform_device *pdev);
|
||||
|
|
|
|||
|
|
@ -345,18 +345,6 @@ static inline void user_single_step_siginfo(struct task_struct *tsk,
|
|||
#define arch_ptrace_stop(code, info) do { } while (0)
|
||||
#endif
|
||||
|
||||
#ifndef arch_ptrace_untrace
|
||||
/*
|
||||
* Do machine-specific work before untracing child.
|
||||
*
|
||||
* This is called for a normal detach as well as from ptrace_exit()
|
||||
* when the tracing task dies.
|
||||
*
|
||||
* Called with write_lock(&tasklist_lock) held.
|
||||
*/
|
||||
#define arch_ptrace_untrace(task) do { } while (0)
|
||||
#endif
|
||||
|
||||
extern int task_current_syscall(struct task_struct *target, long *callno,
|
||||
unsigned long args[6], unsigned int maxargs,
|
||||
unsigned long *sp, unsigned long *pc);
|
||||
|
|
|
|||
|
|
@ -110,6 +110,7 @@ struct rb_node
|
|||
struct rb_root
|
||||
{
|
||||
struct rb_node *rb_node;
|
||||
void (*augment_cb)(struct rb_node *node);
|
||||
};
|
||||
|
||||
|
||||
|
|
@ -129,7 +130,9 @@ static inline void rb_set_color(struct rb_node *rb, int color)
|
|||
rb->rb_parent_color = (rb->rb_parent_color & ~1) | color;
|
||||
}
|
||||
|
||||
#define RB_ROOT (struct rb_root) { NULL, }
|
||||
#define RB_ROOT (struct rb_root) { NULL, NULL, }
|
||||
#define RB_AUGMENT_ROOT(x) (struct rb_root) { NULL, x}
|
||||
|
||||
#define rb_entry(ptr, type, member) container_of(ptr, type, member)
|
||||
|
||||
#define RB_EMPTY_ROOT(root) ((root)->rb_node == NULL)
|
||||
|
|
|
|||
|
|
@ -56,8 +56,6 @@ struct rcu_head {
|
|||
};
|
||||
|
||||
/* Exported common interfaces */
|
||||
extern void synchronize_rcu_bh(void);
|
||||
extern void synchronize_sched(void);
|
||||
extern void rcu_barrier(void);
|
||||
extern void rcu_barrier_bh(void);
|
||||
extern void rcu_barrier_sched(void);
|
||||
|
|
@ -66,8 +64,6 @@ extern int sched_expedited_torture_stats(char *page);
|
|||
|
||||
/* Internal to kernel */
|
||||
extern void rcu_init(void);
|
||||
extern int rcu_scheduler_active;
|
||||
extern void rcu_scheduler_starting(void);
|
||||
|
||||
#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
|
||||
#include <linux/rcutree.h>
|
||||
|
|
@ -83,6 +79,14 @@ extern void rcu_scheduler_starting(void);
|
|||
(ptr)->next = NULL; (ptr)->func = NULL; \
|
||||
} while (0)
|
||||
|
||||
static inline void init_rcu_head_on_stack(struct rcu_head *head)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void destroy_rcu_head_on_stack(struct rcu_head *head)
|
||||
{
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
|
||||
extern struct lockdep_map rcu_lock_map;
|
||||
|
|
@ -106,12 +110,13 @@ extern int debug_lockdep_rcu_enabled(void);
|
|||
/**
|
||||
* rcu_read_lock_held - might we be in RCU read-side critical section?
|
||||
*
|
||||
* If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in
|
||||
* an RCU read-side critical section. In absence of CONFIG_PROVE_LOCKING,
|
||||
* If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU
|
||||
* read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC,
|
||||
* this assumes we are in an RCU read-side critical section unless it can
|
||||
* prove otherwise.
|
||||
*
|
||||
* Check rcu_scheduler_active to prevent false positives during boot.
|
||||
* Check debug_lockdep_rcu_enabled() to prevent false positives during boot
|
||||
* and while lockdep is disabled.
|
||||
*/
|
||||
static inline int rcu_read_lock_held(void)
|
||||
{
|
||||
|
|
@ -129,13 +134,15 @@ extern int rcu_read_lock_bh_held(void);
|
|||
/**
|
||||
* rcu_read_lock_sched_held - might we be in RCU-sched read-side critical section?
|
||||
*
|
||||
* If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in an
|
||||
* RCU-sched read-side critical section. In absence of CONFIG_PROVE_LOCKING,
|
||||
* this assumes we are in an RCU-sched read-side critical section unless it
|
||||
* can prove otherwise. Note that disabling of preemption (including
|
||||
* disabling irqs) counts as an RCU-sched read-side critical section.
|
||||
* If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an
|
||||
* RCU-sched read-side critical section. In absence of
|
||||
* CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side
|
||||
* critical section unless it can prove otherwise. Note that disabling
|
||||
* of preemption (including disabling irqs) counts as an RCU-sched
|
||||
* read-side critical section.
|
||||
*
|
||||
* Check rcu_scheduler_active to prevent false positives during boot.
|
||||
* Check debug_lockdep_rcu_enabled() to prevent false positives during boot
|
||||
* and while lockdep is disabled.
|
||||
*/
|
||||
#ifdef CONFIG_PREEMPT
|
||||
static inline int rcu_read_lock_sched_held(void)
|
||||
|
|
@ -177,7 +184,7 @@ static inline int rcu_read_lock_bh_held(void)
|
|||
#ifdef CONFIG_PREEMPT
|
||||
static inline int rcu_read_lock_sched_held(void)
|
||||
{
|
||||
return !rcu_scheduler_active || preempt_count() != 0 || irqs_disabled();
|
||||
return preempt_count() != 0 || irqs_disabled();
|
||||
}
|
||||
#else /* #ifdef CONFIG_PREEMPT */
|
||||
static inline int rcu_read_lock_sched_held(void)
|
||||
|
|
@ -192,6 +199,15 @@ static inline int rcu_read_lock_sched_held(void)
|
|||
|
||||
extern int rcu_my_thread_group_empty(void);
|
||||
|
||||
#define __do_rcu_dereference_check(c) \
|
||||
do { \
|
||||
static bool __warned; \
|
||||
if (debug_lockdep_rcu_enabled() && !__warned && !(c)) { \
|
||||
__warned = true; \
|
||||
lockdep_rcu_dereference(__FILE__, __LINE__); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
/**
|
||||
* rcu_dereference_check - rcu_dereference with debug checking
|
||||
* @p: The pointer to read, prior to dereferencing
|
||||
|
|
@ -221,8 +237,7 @@ extern int rcu_my_thread_group_empty(void);
|
|||
*/
|
||||
#define rcu_dereference_check(p, c) \
|
||||
({ \
|
||||
if (debug_lockdep_rcu_enabled() && !(c)) \
|
||||
lockdep_rcu_dereference(__FILE__, __LINE__); \
|
||||
__do_rcu_dereference_check(c); \
|
||||
rcu_dereference_raw(p); \
|
||||
})
|
||||
|
||||
|
|
@ -239,8 +254,7 @@ extern int rcu_my_thread_group_empty(void);
|
|||
*/
|
||||
#define rcu_dereference_protected(p, c) \
|
||||
({ \
|
||||
if (debug_lockdep_rcu_enabled() && !(c)) \
|
||||
lockdep_rcu_dereference(__FILE__, __LINE__); \
|
||||
__do_rcu_dereference_check(c); \
|
||||
(p); \
|
||||
})
|
||||
|
||||
|
|
|
|||
|
|
@ -29,6 +29,10 @@
|
|||
|
||||
void rcu_sched_qs(int cpu);
|
||||
void rcu_bh_qs(int cpu);
|
||||
static inline void rcu_note_context_switch(int cpu)
|
||||
{
|
||||
rcu_sched_qs(cpu);
|
||||
}
|
||||
|
||||
#define __rcu_read_lock() preempt_disable()
|
||||
#define __rcu_read_unlock() preempt_enable()
|
||||
|
|
@ -60,8 +64,6 @@ static inline long rcu_batches_completed_bh(void)
|
|||
return 0;
|
||||
}
|
||||
|
||||
extern int rcu_expedited_torture_stats(char *page);
|
||||
|
||||
static inline void rcu_force_quiescent_state(void)
|
||||
{
|
||||
}
|
||||
|
|
@ -74,7 +76,17 @@ static inline void rcu_sched_force_quiescent_state(void)
|
|||
{
|
||||
}
|
||||
|
||||
#define synchronize_rcu synchronize_sched
|
||||
extern void synchronize_sched(void);
|
||||
|
||||
static inline void synchronize_rcu(void)
|
||||
{
|
||||
synchronize_sched();
|
||||
}
|
||||
|
||||
static inline void synchronize_rcu_bh(void)
|
||||
{
|
||||
synchronize_sched();
|
||||
}
|
||||
|
||||
static inline void synchronize_rcu_expedited(void)
|
||||
{
|
||||
|
|
@ -114,4 +126,17 @@ static inline int rcu_preempt_depth(void)
|
|||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
|
||||
extern int rcu_scheduler_active __read_mostly;
|
||||
extern void rcu_scheduler_starting(void);
|
||||
|
||||
#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
|
||||
|
||||
static inline void rcu_scheduler_starting(void)
|
||||
{
|
||||
}
|
||||
|
||||
#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
|
||||
|
||||
#endif /* __LINUX_RCUTINY_H */
|
||||
|
|
|
|||
|
|
@ -34,8 +34,8 @@ struct notifier_block;
|
|||
|
||||
extern void rcu_sched_qs(int cpu);
|
||||
extern void rcu_bh_qs(int cpu);
|
||||
extern void rcu_note_context_switch(int cpu);
|
||||
extern int rcu_needs_cpu(int cpu);
|
||||
extern int rcu_expedited_torture_stats(char *page);
|
||||
|
||||
#ifdef CONFIG_TREE_PREEMPT_RCU
|
||||
|
||||
|
|
@ -86,6 +86,8 @@ static inline void __rcu_read_unlock_bh(void)
|
|||
|
||||
extern void call_rcu_sched(struct rcu_head *head,
|
||||
void (*func)(struct rcu_head *rcu));
|
||||
extern void synchronize_rcu_bh(void);
|
||||
extern void synchronize_sched(void);
|
||||
extern void synchronize_rcu_expedited(void);
|
||||
|
||||
static inline void synchronize_rcu_bh_expedited(void)
|
||||
|
|
@ -120,4 +122,7 @@ static inline int rcu_blocking_is_gp(void)
|
|||
return num_online_cpus() == 1;
|
||||
}
|
||||
|
||||
extern void rcu_scheduler_starting(void);
|
||||
extern int rcu_scheduler_active __read_mostly;
|
||||
|
||||
#endif /* __LINUX_RCUTREE_H */
|
||||
|
|
|
|||
|
|
@ -120,12 +120,16 @@ int ring_buffer_write(struct ring_buffer *buffer,
|
|||
unsigned long length, void *data);
|
||||
|
||||
struct ring_buffer_event *
|
||||
ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts);
|
||||
ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts,
|
||||
unsigned long *lost_events);
|
||||
struct ring_buffer_event *
|
||||
ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts);
|
||||
ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
|
||||
unsigned long *lost_events);
|
||||
|
||||
struct ring_buffer_iter *
|
||||
ring_buffer_read_start(struct ring_buffer *buffer, int cpu);
|
||||
ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu);
|
||||
void ring_buffer_read_prepare_sync(void);
|
||||
void ring_buffer_read_start(struct ring_buffer_iter *iter);
|
||||
void ring_buffer_read_finish(struct ring_buffer_iter *iter);
|
||||
|
||||
struct ring_buffer_event *
|
||||
|
|
|
|||
|
|
@ -99,7 +99,6 @@ struct futex_pi_state;
|
|||
struct robust_list_head;
|
||||
struct bio_list;
|
||||
struct fs_struct;
|
||||
struct bts_context;
|
||||
struct perf_event_context;
|
||||
|
||||
/*
|
||||
|
|
@ -275,11 +274,17 @@ extern cpumask_var_t nohz_cpu_mask;
|
|||
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
|
||||
extern int select_nohz_load_balancer(int cpu);
|
||||
extern int get_nohz_load_balancer(void);
|
||||
extern int nohz_ratelimit(int cpu);
|
||||
#else
|
||||
static inline int select_nohz_load_balancer(int cpu)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int nohz_ratelimit(int cpu)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
|
@ -954,6 +959,7 @@ struct sched_domain {
|
|||
char *name;
|
||||
#endif
|
||||
|
||||
unsigned int span_weight;
|
||||
/*
|
||||
* Span of all CPUs in this domain.
|
||||
*
|
||||
|
|
@ -1026,12 +1032,17 @@ struct sched_domain;
|
|||
#define WF_SYNC 0x01 /* waker goes to sleep after wakup */
|
||||
#define WF_FORK 0x02 /* child wakeup after fork */
|
||||
|
||||
#define ENQUEUE_WAKEUP 1
|
||||
#define ENQUEUE_WAKING 2
|
||||
#define ENQUEUE_HEAD 4
|
||||
|
||||
#define DEQUEUE_SLEEP 1
|
||||
|
||||
struct sched_class {
|
||||
const struct sched_class *next;
|
||||
|
||||
void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup,
|
||||
bool head);
|
||||
void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep);
|
||||
void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
|
||||
void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
|
||||
void (*yield_task) (struct rq *rq);
|
||||
|
||||
void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags);
|
||||
|
|
@ -1040,7 +1051,8 @@ struct sched_class {
|
|||
void (*put_prev_task) (struct rq *rq, struct task_struct *p);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags);
|
||||
int (*select_task_rq)(struct rq *rq, struct task_struct *p,
|
||||
int sd_flag, int flags);
|
||||
|
||||
void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
|
||||
void (*post_schedule) (struct rq *this_rq);
|
||||
|
|
@ -1077,36 +1089,8 @@ struct load_weight {
|
|||
unsigned long weight, inv_weight;
|
||||
};
|
||||
|
||||
/*
|
||||
* CFS stats for a schedulable entity (task, task-group etc)
|
||||
*
|
||||
* Current field usage histogram:
|
||||
*
|
||||
* 4 se->block_start
|
||||
* 4 se->run_node
|
||||
* 4 se->sleep_start
|
||||
* 6 se->load.weight
|
||||
*/
|
||||
struct sched_entity {
|
||||
struct load_weight load; /* for load-balancing */
|
||||
struct rb_node run_node;
|
||||
struct list_head group_node;
|
||||
unsigned int on_rq;
|
||||
|
||||
u64 exec_start;
|
||||
u64 sum_exec_runtime;
|
||||
u64 vruntime;
|
||||
u64 prev_sum_exec_runtime;
|
||||
|
||||
u64 last_wakeup;
|
||||
u64 avg_overlap;
|
||||
|
||||
u64 nr_migrations;
|
||||
|
||||
u64 start_runtime;
|
||||
u64 avg_wakeup;
|
||||
|
||||
#ifdef CONFIG_SCHEDSTATS
|
||||
struct sched_statistics {
|
||||
u64 wait_start;
|
||||
u64 wait_max;
|
||||
u64 wait_count;
|
||||
|
|
@ -1138,6 +1122,24 @@ struct sched_entity {
|
|||
u64 nr_wakeups_affine_attempts;
|
||||
u64 nr_wakeups_passive;
|
||||
u64 nr_wakeups_idle;
|
||||
};
|
||||
#endif
|
||||
|
||||
struct sched_entity {
|
||||
struct load_weight load; /* for load-balancing */
|
||||
struct rb_node run_node;
|
||||
struct list_head group_node;
|
||||
unsigned int on_rq;
|
||||
|
||||
u64 exec_start;
|
||||
u64 sum_exec_runtime;
|
||||
u64 vruntime;
|
||||
u64 prev_sum_exec_runtime;
|
||||
|
||||
u64 nr_migrations;
|
||||
|
||||
#ifdef CONFIG_SCHEDSTATS
|
||||
struct sched_statistics statistics;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
|
|
@ -1272,12 +1274,6 @@ struct task_struct {
|
|||
struct list_head ptraced;
|
||||
struct list_head ptrace_entry;
|
||||
|
||||
/*
|
||||
* This is the tracer handle for the ptrace BTS extension.
|
||||
* This field actually belongs to the ptracer task.
|
||||
*/
|
||||
struct bts_context *bts;
|
||||
|
||||
/* PID/PID hash table linkage. */
|
||||
struct pid_link pids[PIDTYPE_MAX];
|
||||
struct list_head thread_group;
|
||||
|
|
@ -1846,6 +1842,7 @@ extern void sched_clock_idle_sleep_event(void);
|
|||
extern void sched_clock_idle_wakeup_event(u64 delta_ns);
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
extern void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p);
|
||||
extern void idle_task_exit(void);
|
||||
#else
|
||||
static inline void idle_task_exit(void) {}
|
||||
|
|
@ -2122,10 +2119,8 @@ extern void set_task_comm(struct task_struct *tsk, char *from);
|
|||
extern char *get_task_comm(char *to, struct task_struct *tsk);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
extern void wait_task_context_switch(struct task_struct *p);
|
||||
extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
|
||||
#else
|
||||
static inline void wait_task_context_switch(struct task_struct *p) {}
|
||||
static inline unsigned long wait_task_inactive(struct task_struct *p,
|
||||
long match_state)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -27,6 +27,8 @@
|
|||
#ifndef _LINUX_SRCU_H
|
||||
#define _LINUX_SRCU_H
|
||||
|
||||
#include <linux/mutex.h>
|
||||
|
||||
struct srcu_struct_array {
|
||||
int c[2];
|
||||
};
|
||||
|
|
@ -84,8 +86,8 @@ long srcu_batches_completed(struct srcu_struct *sp);
|
|||
/**
|
||||
* srcu_read_lock_held - might we be in SRCU read-side critical section?
|
||||
*
|
||||
* If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in
|
||||
* an SRCU read-side critical section. In absence of CONFIG_PROVE_LOCKING,
|
||||
* If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an SRCU
|
||||
* read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC,
|
||||
* this assumes we are in an SRCU read-side critical section unless it can
|
||||
* prove otherwise.
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -1,13 +1,101 @@
|
|||
#ifndef _LINUX_STOP_MACHINE
|
||||
#define _LINUX_STOP_MACHINE
|
||||
/* "Bogolock": stop the entire machine, disable interrupts. This is a
|
||||
very heavy lock, which is equivalent to grabbing every spinlock
|
||||
(and more). So the "read" side to such a lock is anything which
|
||||
disables preeempt. */
|
||||
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/list.h>
|
||||
#include <asm/system.h>
|
||||
|
||||
/*
|
||||
* stop_cpu[s]() is simplistic per-cpu maximum priority cpu
|
||||
* monopolization mechanism. The caller can specify a non-sleeping
|
||||
* function to be executed on a single or multiple cpus preempting all
|
||||
* other processes and monopolizing those cpus until it finishes.
|
||||
*
|
||||
* Resources for this mechanism are preallocated when a cpu is brought
|
||||
* up and requests are guaranteed to be served as long as the target
|
||||
* cpus are online.
|
||||
*/
|
||||
typedef int (*cpu_stop_fn_t)(void *arg);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
struct cpu_stop_work {
|
||||
struct list_head list; /* cpu_stopper->works */
|
||||
cpu_stop_fn_t fn;
|
||||
void *arg;
|
||||
struct cpu_stop_done *done;
|
||||
};
|
||||
|
||||
int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg);
|
||||
void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
|
||||
struct cpu_stop_work *work_buf);
|
||||
int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg);
|
||||
int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg);
|
||||
|
||||
#else /* CONFIG_SMP */
|
||||
|
||||
#include <linux/workqueue.h>
|
||||
|
||||
struct cpu_stop_work {
|
||||
struct work_struct work;
|
||||
cpu_stop_fn_t fn;
|
||||
void *arg;
|
||||
};
|
||||
|
||||
static inline int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg)
|
||||
{
|
||||
int ret = -ENOENT;
|
||||
preempt_disable();
|
||||
if (cpu == smp_processor_id())
|
||||
ret = fn(arg);
|
||||
preempt_enable();
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void stop_one_cpu_nowait_workfn(struct work_struct *work)
|
||||
{
|
||||
struct cpu_stop_work *stwork =
|
||||
container_of(work, struct cpu_stop_work, work);
|
||||
preempt_disable();
|
||||
stwork->fn(stwork->arg);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
static inline void stop_one_cpu_nowait(unsigned int cpu,
|
||||
cpu_stop_fn_t fn, void *arg,
|
||||
struct cpu_stop_work *work_buf)
|
||||
{
|
||||
if (cpu == smp_processor_id()) {
|
||||
INIT_WORK(&work_buf->work, stop_one_cpu_nowait_workfn);
|
||||
work_buf->fn = fn;
|
||||
work_buf->arg = arg;
|
||||
schedule_work(&work_buf->work);
|
||||
}
|
||||
}
|
||||
|
||||
static inline int stop_cpus(const struct cpumask *cpumask,
|
||||
cpu_stop_fn_t fn, void *arg)
|
||||
{
|
||||
if (cpumask_test_cpu(raw_smp_processor_id(), cpumask))
|
||||
return stop_one_cpu(raw_smp_processor_id(), fn, arg);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
static inline int try_stop_cpus(const struct cpumask *cpumask,
|
||||
cpu_stop_fn_t fn, void *arg)
|
||||
{
|
||||
return stop_cpus(cpumask, fn, arg);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
/*
|
||||
* stop_machine "Bogolock": stop the entire machine, disable
|
||||
* interrupts. This is a very heavy lock, which is equivalent to
|
||||
* grabbing every spinlock (and more). So the "read" side to such a
|
||||
* lock is anything which disables preeempt.
|
||||
*/
|
||||
#if defined(CONFIG_STOP_MACHINE) && defined(CONFIG_SMP)
|
||||
|
||||
/**
|
||||
|
|
@ -36,24 +124,7 @@ int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus);
|
|||
*/
|
||||
int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus);
|
||||
|
||||
/**
|
||||
* stop_machine_create: create all stop_machine threads
|
||||
*
|
||||
* Description: This causes all stop_machine threads to be created before
|
||||
* stop_machine actually gets called. This can be used by subsystems that
|
||||
* need a non failing stop_machine infrastructure.
|
||||
*/
|
||||
int stop_machine_create(void);
|
||||
|
||||
/**
|
||||
* stop_machine_destroy: destroy all stop_machine threads
|
||||
*
|
||||
* Description: This causes all stop_machine threads which were created with
|
||||
* stop_machine_create to be destroyed again.
|
||||
*/
|
||||
void stop_machine_destroy(void);
|
||||
|
||||
#else
|
||||
#else /* CONFIG_STOP_MACHINE && CONFIG_SMP */
|
||||
|
||||
static inline int stop_machine(int (*fn)(void *), void *data,
|
||||
const struct cpumask *cpus)
|
||||
|
|
@ -65,8 +136,5 @@ static inline int stop_machine(int (*fn)(void *), void *data,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static inline int stop_machine_create(void) { return 0; }
|
||||
static inline void stop_machine_destroy(void) { }
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
#endif /* _LINUX_STOP_MACHINE */
|
||||
#endif /* CONFIG_STOP_MACHINE && CONFIG_SMP */
|
||||
#endif /* _LINUX_STOP_MACHINE */
|
||||
|
|
|
|||
|
|
@ -42,6 +42,7 @@ enum tick_nohz_mode {
|
|||
* @idle_waketime: Time when the idle was interrupted
|
||||
* @idle_exittime: Time when the idle state was left
|
||||
* @idle_sleeptime: Sum of the time slept in idle with sched tick stopped
|
||||
* @iowait_sleeptime: Sum of the time slept in idle with sched tick stopped, with IO outstanding
|
||||
* @sleep_length: Duration of the current idle sleep
|
||||
* @do_timer_lst: CPU was the last one doing do_timer before going idle
|
||||
*/
|
||||
|
|
@ -60,7 +61,7 @@ struct tick_sched {
|
|||
ktime_t idle_waketime;
|
||||
ktime_t idle_exittime;
|
||||
ktime_t idle_sleeptime;
|
||||
ktime_t idle_lastupdate;
|
||||
ktime_t iowait_sleeptime;
|
||||
ktime_t sleep_length;
|
||||
unsigned long last_jiffies;
|
||||
unsigned long next_jiffies;
|
||||
|
|
@ -124,6 +125,7 @@ extern void tick_nohz_stop_sched_tick(int inidle);
|
|||
extern void tick_nohz_restart_sched_tick(void);
|
||||
extern ktime_t tick_nohz_get_sleep_length(void);
|
||||
extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time);
|
||||
extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time);
|
||||
# else
|
||||
static inline void tick_nohz_stop_sched_tick(int inidle) { }
|
||||
static inline void tick_nohz_restart_sched_tick(void) { }
|
||||
|
|
@ -134,6 +136,7 @@ static inline ktime_t tick_nohz_get_sleep_length(void)
|
|||
return len;
|
||||
}
|
||||
static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; }
|
||||
static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; }
|
||||
# endif /* !NO_HZ */
|
||||
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -33,6 +33,65 @@ struct tracepoint {
|
|||
* Keep in sync with vmlinux.lds.h.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Connect a probe to a tracepoint.
|
||||
* Internal API, should not be used directly.
|
||||
*/
|
||||
extern int tracepoint_probe_register(const char *name, void *probe);
|
||||
|
||||
/*
|
||||
* Disconnect a probe from a tracepoint.
|
||||
* Internal API, should not be used directly.
|
||||
*/
|
||||
extern int tracepoint_probe_unregister(const char *name, void *probe);
|
||||
|
||||
extern int tracepoint_probe_register_noupdate(const char *name, void *probe);
|
||||
extern int tracepoint_probe_unregister_noupdate(const char *name, void *probe);
|
||||
extern void tracepoint_probe_update_all(void);
|
||||
|
||||
struct tracepoint_iter {
|
||||
struct module *module;
|
||||
struct tracepoint *tracepoint;
|
||||
};
|
||||
|
||||
extern void tracepoint_iter_start(struct tracepoint_iter *iter);
|
||||
extern void tracepoint_iter_next(struct tracepoint_iter *iter);
|
||||
extern void tracepoint_iter_stop(struct tracepoint_iter *iter);
|
||||
extern void tracepoint_iter_reset(struct tracepoint_iter *iter);
|
||||
extern int tracepoint_get_iter_range(struct tracepoint **tracepoint,
|
||||
struct tracepoint *begin, struct tracepoint *end);
|
||||
|
||||
/*
|
||||
* tracepoint_synchronize_unregister must be called between the last tracepoint
|
||||
* probe unregistration and the end of module exit to make sure there is no
|
||||
* caller executing a probe when it is freed.
|
||||
*/
|
||||
static inline void tracepoint_synchronize_unregister(void)
|
||||
{
|
||||
synchronize_sched();
|
||||
}
|
||||
|
||||
#define PARAMS(args...) args
|
||||
|
||||
#ifdef CONFIG_TRACEPOINTS
|
||||
extern void tracepoint_update_probe_range(struct tracepoint *begin,
|
||||
struct tracepoint *end);
|
||||
#else
|
||||
static inline void tracepoint_update_probe_range(struct tracepoint *begin,
|
||||
struct tracepoint *end)
|
||||
{ }
|
||||
#endif /* CONFIG_TRACEPOINTS */
|
||||
|
||||
#endif /* _LINUX_TRACEPOINT_H */
|
||||
|
||||
/*
|
||||
* Note: we keep the TRACE_EVENT and DECLARE_TRACE outside the include
|
||||
* file ifdef protection.
|
||||
* This is due to the way trace events work. If a file includes two
|
||||
* trace event headers under one "CREATE_TRACE_POINTS" the first include
|
||||
* will override the TRACE_EVENT and break the second include.
|
||||
*/
|
||||
|
||||
#ifndef DECLARE_TRACE
|
||||
|
||||
#define TP_PROTO(args...) args
|
||||
|
|
@ -96,9 +155,6 @@ struct tracepoint {
|
|||
#define EXPORT_TRACEPOINT_SYMBOL(name) \
|
||||
EXPORT_SYMBOL(__tracepoint_##name)
|
||||
|
||||
extern void tracepoint_update_probe_range(struct tracepoint *begin,
|
||||
struct tracepoint *end);
|
||||
|
||||
#else /* !CONFIG_TRACEPOINTS */
|
||||
#define DECLARE_TRACE(name, proto, args) \
|
||||
static inline void _do_trace_##name(struct tracepoint *tp, proto) \
|
||||
|
|
@ -119,61 +175,9 @@ extern void tracepoint_update_probe_range(struct tracepoint *begin,
|
|||
#define EXPORT_TRACEPOINT_SYMBOL_GPL(name)
|
||||
#define EXPORT_TRACEPOINT_SYMBOL(name)
|
||||
|
||||
static inline void tracepoint_update_probe_range(struct tracepoint *begin,
|
||||
struct tracepoint *end)
|
||||
{ }
|
||||
#endif /* CONFIG_TRACEPOINTS */
|
||||
#endif /* DECLARE_TRACE */
|
||||
|
||||
/*
|
||||
* Connect a probe to a tracepoint.
|
||||
* Internal API, should not be used directly.
|
||||
*/
|
||||
extern int tracepoint_probe_register(const char *name, void *probe);
|
||||
|
||||
/*
|
||||
* Disconnect a probe from a tracepoint.
|
||||
* Internal API, should not be used directly.
|
||||
*/
|
||||
extern int tracepoint_probe_unregister(const char *name, void *probe);
|
||||
|
||||
extern int tracepoint_probe_register_noupdate(const char *name, void *probe);
|
||||
extern int tracepoint_probe_unregister_noupdate(const char *name, void *probe);
|
||||
extern void tracepoint_probe_update_all(void);
|
||||
|
||||
struct tracepoint_iter {
|
||||
struct module *module;
|
||||
struct tracepoint *tracepoint;
|
||||
};
|
||||
|
||||
extern void tracepoint_iter_start(struct tracepoint_iter *iter);
|
||||
extern void tracepoint_iter_next(struct tracepoint_iter *iter);
|
||||
extern void tracepoint_iter_stop(struct tracepoint_iter *iter);
|
||||
extern void tracepoint_iter_reset(struct tracepoint_iter *iter);
|
||||
extern int tracepoint_get_iter_range(struct tracepoint **tracepoint,
|
||||
struct tracepoint *begin, struct tracepoint *end);
|
||||
|
||||
/*
|
||||
* tracepoint_synchronize_unregister must be called between the last tracepoint
|
||||
* probe unregistration and the end of module exit to make sure there is no
|
||||
* caller executing a probe when it is freed.
|
||||
*/
|
||||
static inline void tracepoint_synchronize_unregister(void)
|
||||
{
|
||||
synchronize_sched();
|
||||
}
|
||||
|
||||
#define PARAMS(args...) args
|
||||
|
||||
#endif /* _LINUX_TRACEPOINT_H */
|
||||
|
||||
/*
|
||||
* Note: we keep the TRACE_EVENT outside the include file ifdef protection.
|
||||
* This is due to the way trace events work. If a file includes two
|
||||
* trace event headers under one "CREATE_TRACE_POINTS" the first include
|
||||
* will override the TRACE_EVENT and break the second include.
|
||||
*/
|
||||
|
||||
#ifndef TRACE_EVENT
|
||||
/*
|
||||
* For use with the TRACE_EVENT macro:
|
||||
|
|
|
|||
|
|
@ -188,12 +188,12 @@ typedef u32 phys_addr_t;
|
|||
typedef phys_addr_t resource_size_t;
|
||||
|
||||
typedef struct {
|
||||
volatile int counter;
|
||||
int counter;
|
||||
} atomic_t;
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
typedef struct {
|
||||
volatile long counter;
|
||||
long counter;
|
||||
} atomic64_t;
|
||||
#endif
|
||||
|
||||
|
|
|
|||
|
|
@ -127,12 +127,26 @@ static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
|
|||
/*
|
||||
* Used for wake-one threads:
|
||||
*/
|
||||
static inline void __add_wait_queue_exclusive(wait_queue_head_t *q,
|
||||
wait_queue_t *wait)
|
||||
{
|
||||
wait->flags |= WQ_FLAG_EXCLUSIVE;
|
||||
__add_wait_queue(q, wait);
|
||||
}
|
||||
|
||||
static inline void __add_wait_queue_tail(wait_queue_head_t *head,
|
||||
wait_queue_t *new)
|
||||
wait_queue_t *new)
|
||||
{
|
||||
list_add_tail(&new->task_list, &head->task_list);
|
||||
}
|
||||
|
||||
static inline void __add_wait_queue_tail_exclusive(wait_queue_head_t *q,
|
||||
wait_queue_t *wait)
|
||||
{
|
||||
wait->flags |= WQ_FLAG_EXCLUSIVE;
|
||||
__add_wait_queue_tail(q, wait);
|
||||
}
|
||||
|
||||
static inline void __remove_wait_queue(wait_queue_head_t *head,
|
||||
wait_queue_t *old)
|
||||
{
|
||||
|
|
@ -403,25 +417,6 @@ do { \
|
|||
__ret; \
|
||||
})
|
||||
|
||||
/*
|
||||
* Must be called with the spinlock in the wait_queue_head_t held.
|
||||
*/
|
||||
static inline void add_wait_queue_exclusive_locked(wait_queue_head_t *q,
|
||||
wait_queue_t * wait)
|
||||
{
|
||||
wait->flags |= WQ_FLAG_EXCLUSIVE;
|
||||
__add_wait_queue_tail(q, wait);
|
||||
}
|
||||
|
||||
/*
|
||||
* Must be called with the spinlock in the wait_queue_head_t held.
|
||||
*/
|
||||
static inline void remove_wait_queue_locked(wait_queue_head_t *q,
|
||||
wait_queue_t * wait)
|
||||
{
|
||||
__remove_wait_queue(q, wait);
|
||||
}
|
||||
|
||||
/*
|
||||
* These are the old interfaces to sleep waiting for an event.
|
||||
* They are racy. DO NOT use them, use the wait_event* interfaces above.
|
||||
|
|
|
|||
|
|
@ -38,8 +38,6 @@
|
|||
typedef __u32 zorro_id;
|
||||
|
||||
|
||||
#define ZORRO_WILDCARD (0xffffffff) /* not official */
|
||||
|
||||
/* Include the ID list */
|
||||
#include <linux/zorro_ids.h>
|
||||
|
||||
|
|
@ -116,6 +114,7 @@ struct ConfigDev {
|
|||
|
||||
#include <linux/init.h>
|
||||
#include <linux/ioport.h>
|
||||
#include <linux/mod_devicetable.h>
|
||||
|
||||
#include <asm/zorro.h>
|
||||
|
||||
|
|
@ -142,28 +141,9 @@ struct zorro_dev {
|
|||
* Zorro bus
|
||||
*/
|
||||
|
||||
struct zorro_bus {
|
||||
struct list_head devices; /* list of devices on this bus */
|
||||
unsigned int num_resources; /* number of resources */
|
||||
struct resource resources[4]; /* address space routed to this bus */
|
||||
struct device dev;
|
||||
char name[10];
|
||||
};
|
||||
|
||||
extern struct zorro_bus zorro_bus; /* single Zorro bus */
|
||||
extern struct bus_type zorro_bus_type;
|
||||
|
||||
|
||||
/*
|
||||
* Zorro device IDs
|
||||
*/
|
||||
|
||||
struct zorro_device_id {
|
||||
zorro_id id; /* Device ID or ZORRO_WILDCARD */
|
||||
unsigned long driver_data; /* Data private to the driver */
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* Zorro device drivers
|
||||
*/
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue