Merge branch 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (44 commits) rcu: Fix accelerated GPs for last non-dynticked CPU rcu: Make non-RCU_PROVE_LOCKING rcu_read_lock_sched_held() understand boot rcu: Fix accelerated grace periods for last non-dynticked CPU rcu: Export rcu_scheduler_active rcu: Make rcu_read_lock_sched_held() take boot time into account rcu: Make lockdep_rcu_dereference() message less alarmist sched, cgroups: Fix module export rcu: Add RCU_CPU_STALL_VERBOSE to dump detailed per-task information rcu: Fix rcutorture mod_timer argument to delay one jiffy rcu: Fix deadlock in TREE_PREEMPT_RCU CPU stall detection rcu: Convert to raw_spinlocks rcu: Stop overflowing signed integers rcu: Use canonical URL for Mathieu's dissertation rcu: Accelerate grace period if last non-dynticked CPU rcu: Fix citation of Mathieu's dissertation rcu: Documentation update for CONFIG_PROVE_RCU security: Apply lockdep-based checking to rcu_dereference() uses idr: Apply lockdep-based diagnostics to rcu_dereference() uses radix-tree: Disable RCU lockdep checking in radix tree vfs: Abstract rcu_dereference_check for files-fdtable use ...
This commit is contained in:
commit
642c4c75a7
55 changed files with 1346 additions and 448 deletions
|
|
@ -28,6 +28,7 @@ struct css_id;
|
|||
extern int cgroup_init_early(void);
|
||||
extern int cgroup_init(void);
|
||||
extern void cgroup_lock(void);
|
||||
extern int cgroup_lock_is_held(void);
|
||||
extern bool cgroup_lock_live_group(struct cgroup *cgrp);
|
||||
extern void cgroup_unlock(void);
|
||||
extern void cgroup_fork(struct task_struct *p);
|
||||
|
|
@ -486,7 +487,9 @@ static inline struct cgroup_subsys_state *cgroup_subsys_state(
|
|||
static inline struct cgroup_subsys_state *task_subsys_state(
|
||||
struct task_struct *task, int subsys_id)
|
||||
{
|
||||
return rcu_dereference(task->cgroups->subsys[subsys_id]);
|
||||
return rcu_dereference_check(task->cgroups->subsys[subsys_id],
|
||||
rcu_read_lock_held() ||
|
||||
cgroup_lock_is_held());
|
||||
}
|
||||
|
||||
static inline struct cgroup* task_cgroup(struct task_struct *task,
|
||||
|
|
|
|||
|
|
@ -143,6 +143,8 @@ static inline unsigned int cpumask_any_but(const struct cpumask *mask,
|
|||
|
||||
#define for_each_cpu(cpu, mask) \
|
||||
for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
|
||||
#define for_each_cpu_not(cpu, mask) \
|
||||
for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
|
||||
#define for_each_cpu_and(cpu, mask, and) \
|
||||
for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)and)
|
||||
#else
|
||||
|
|
@ -202,6 +204,18 @@ int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
|
|||
(cpu) = cpumask_next((cpu), (mask)), \
|
||||
(cpu) < nr_cpu_ids;)
|
||||
|
||||
/**
|
||||
* for_each_cpu_not - iterate over every cpu in a complemented mask
|
||||
* @cpu: the (optionally unsigned) integer iterator
|
||||
* @mask: the cpumask pointer
|
||||
*
|
||||
* After the loop, cpu is >= nr_cpu_ids.
|
||||
*/
|
||||
#define for_each_cpu_not(cpu, mask) \
|
||||
for ((cpu) = -1; \
|
||||
(cpu) = cpumask_next_zero((cpu), (mask)), \
|
||||
(cpu) < nr_cpu_ids;)
|
||||
|
||||
/**
|
||||
* for_each_cpu_and - iterate over every cpu in both masks
|
||||
* @cpu: the (optionally unsigned) integer iterator
|
||||
|
|
|
|||
|
|
@ -280,7 +280,7 @@ static inline void put_cred(const struct cred *_cred)
|
|||
* task or by holding tasklist_lock to prevent it from being unlinked.
|
||||
*/
|
||||
#define __task_cred(task) \
|
||||
((const struct cred *)(rcu_dereference((task)->real_cred)))
|
||||
((const struct cred *)(rcu_dereference_check((task)->real_cred, rcu_read_lock_held() || lockdep_is_held(&tasklist_lock))))
|
||||
|
||||
/**
|
||||
* get_task_cred - Get another task's objective credentials
|
||||
|
|
|
|||
|
|
@ -57,7 +57,14 @@ struct files_struct {
|
|||
struct file * fd_array[NR_OPEN_DEFAULT];
|
||||
};
|
||||
|
||||
#define files_fdtable(files) (rcu_dereference((files)->fdt))
|
||||
#define rcu_dereference_check_fdtable(files, fdtfd) \
|
||||
(rcu_dereference_check((fdtfd), \
|
||||
rcu_read_lock_held() || \
|
||||
lockdep_is_held(&(files)->file_lock) || \
|
||||
atomic_read(&(files)->count) == 1))
|
||||
|
||||
#define files_fdtable(files) \
|
||||
(rcu_dereference_check_fdtable((files), (files)->fdt))
|
||||
|
||||
struct file_operations;
|
||||
struct vfsmount;
|
||||
|
|
@ -78,7 +85,7 @@ static inline struct file * fcheck_files(struct files_struct *files, unsigned in
|
|||
struct fdtable *fdt = files_fdtable(files);
|
||||
|
||||
if (fd < fdt->max_fds)
|
||||
file = rcu_dereference(fdt->fd[fd]);
|
||||
file = rcu_dereference_check_fdtable(files, fdt->fd[fd]);
|
||||
return file;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -534,4 +534,8 @@ do { \
|
|||
# define might_lock_read(lock) do { } while (0)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PROVE_RCU
|
||||
extern void lockdep_rcu_dereference(const char *file, const int line);
|
||||
#endif
|
||||
|
||||
#endif /* __LINUX_LOCKDEP_H */
|
||||
|
|
|
|||
|
|
@ -208,7 +208,7 @@ static inline void list_splice_init_rcu(struct list_head *list,
|
|||
* primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
|
||||
*/
|
||||
#define list_entry_rcu(ptr, type, member) \
|
||||
container_of(rcu_dereference(ptr), type, member)
|
||||
container_of(rcu_dereference_raw(ptr), type, member)
|
||||
|
||||
/**
|
||||
* list_first_entry_rcu - get the first element from a list
|
||||
|
|
@ -225,9 +225,9 @@ static inline void list_splice_init_rcu(struct list_head *list,
|
|||
list_entry_rcu((ptr)->next, type, member)
|
||||
|
||||
#define __list_for_each_rcu(pos, head) \
|
||||
for (pos = rcu_dereference((head)->next); \
|
||||
for (pos = rcu_dereference_raw((head)->next); \
|
||||
pos != (head); \
|
||||
pos = rcu_dereference(pos->next))
|
||||
pos = rcu_dereference_raw(pos->next))
|
||||
|
||||
/**
|
||||
* list_for_each_entry_rcu - iterate over rcu list of given type
|
||||
|
|
@ -257,9 +257,9 @@ static inline void list_splice_init_rcu(struct list_head *list,
|
|||
* as long as the traversal is guarded by rcu_read_lock().
|
||||
*/
|
||||
#define list_for_each_continue_rcu(pos, head) \
|
||||
for ((pos) = rcu_dereference((pos)->next); \
|
||||
for ((pos) = rcu_dereference_raw((pos)->next); \
|
||||
prefetch((pos)->next), (pos) != (head); \
|
||||
(pos) = rcu_dereference((pos)->next))
|
||||
(pos) = rcu_dereference_raw((pos)->next))
|
||||
|
||||
/**
|
||||
* list_for_each_entry_continue_rcu - continue iteration over list of given type
|
||||
|
|
@ -418,10 +418,10 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev,
|
|||
* as long as the traversal is guarded by rcu_read_lock().
|
||||
*/
|
||||
#define hlist_for_each_entry_rcu(tpos, pos, head, member) \
|
||||
for (pos = rcu_dereference((head)->first); \
|
||||
for (pos = rcu_dereference_raw((head)->first); \
|
||||
pos && ({ prefetch(pos->next); 1; }) && \
|
||||
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \
|
||||
pos = rcu_dereference(pos->next))
|
||||
pos = rcu_dereference_raw(pos->next))
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -101,10 +101,10 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
|
|||
*
|
||||
*/
|
||||
#define hlist_nulls_for_each_entry_rcu(tpos, pos, head, member) \
|
||||
for (pos = rcu_dereference((head)->first); \
|
||||
for (pos = rcu_dereference_raw((head)->first); \
|
||||
(!is_a_nulls(pos)) && \
|
||||
({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1; }); \
|
||||
pos = rcu_dereference(pos->next))
|
||||
pos = rcu_dereference_raw(pos->next))
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -62,6 +62,8 @@ extern int sched_expedited_torture_stats(char *page);
|
|||
|
||||
/* Internal to kernel */
|
||||
extern void rcu_init(void);
|
||||
extern int rcu_scheduler_active;
|
||||
extern void rcu_scheduler_starting(void);
|
||||
|
||||
#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
|
||||
#include <linux/rcutree.h>
|
||||
|
|
@ -78,14 +80,120 @@ extern void rcu_init(void);
|
|||
} while (0)
|
||||
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
|
||||
extern struct lockdep_map rcu_lock_map;
|
||||
# define rcu_read_acquire() \
|
||||
lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_)
|
||||
# define rcu_read_acquire() \
|
||||
lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_)
|
||||
# define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_)
|
||||
#else
|
||||
# define rcu_read_acquire() do { } while (0)
|
||||
# define rcu_read_release() do { } while (0)
|
||||
#endif
|
||||
|
||||
extern struct lockdep_map rcu_bh_lock_map;
|
||||
# define rcu_read_acquire_bh() \
|
||||
lock_acquire(&rcu_bh_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_)
|
||||
# define rcu_read_release_bh() lock_release(&rcu_bh_lock_map, 1, _THIS_IP_)
|
||||
|
||||
extern struct lockdep_map rcu_sched_lock_map;
|
||||
# define rcu_read_acquire_sched() \
|
||||
lock_acquire(&rcu_sched_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_)
|
||||
# define rcu_read_release_sched() \
|
||||
lock_release(&rcu_sched_lock_map, 1, _THIS_IP_)
|
||||
|
||||
/**
|
||||
* rcu_read_lock_held - might we be in RCU read-side critical section?
|
||||
*
|
||||
* If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in
|
||||
* an RCU read-side critical section. In absence of CONFIG_PROVE_LOCKING,
|
||||
* this assumes we are in an RCU read-side critical section unless it can
|
||||
* prove otherwise.
|
||||
*/
|
||||
static inline int rcu_read_lock_held(void)
|
||||
{
|
||||
if (debug_locks)
|
||||
return lock_is_held(&rcu_lock_map);
|
||||
return 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* rcu_read_lock_bh_held - might we be in RCU-bh read-side critical section?
|
||||
*
|
||||
* If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in
|
||||
* an RCU-bh read-side critical section. In absence of CONFIG_PROVE_LOCKING,
|
||||
* this assumes we are in an RCU-bh read-side critical section unless it can
|
||||
* prove otherwise.
|
||||
*/
|
||||
static inline int rcu_read_lock_bh_held(void)
|
||||
{
|
||||
if (debug_locks)
|
||||
return lock_is_held(&rcu_bh_lock_map);
|
||||
return 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* rcu_read_lock_sched_held - might we be in RCU-sched read-side critical section?
|
||||
*
|
||||
* If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in an
|
||||
* RCU-sched read-side critical section. In absence of CONFIG_PROVE_LOCKING,
|
||||
* this assumes we are in an RCU-sched read-side critical section unless it
|
||||
* can prove otherwise. Note that disabling of preemption (including
|
||||
* disabling irqs) counts as an RCU-sched read-side critical section.
|
||||
*/
|
||||
static inline int rcu_read_lock_sched_held(void)
|
||||
{
|
||||
int lockdep_opinion = 0;
|
||||
|
||||
if (debug_locks)
|
||||
lockdep_opinion = lock_is_held(&rcu_sched_lock_map);
|
||||
return lockdep_opinion || preempt_count() != 0 || !rcu_scheduler_active;
|
||||
}
|
||||
|
||||
#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
|
||||
|
||||
# define rcu_read_acquire() do { } while (0)
|
||||
# define rcu_read_release() do { } while (0)
|
||||
# define rcu_read_acquire_bh() do { } while (0)
|
||||
# define rcu_read_release_bh() do { } while (0)
|
||||
# define rcu_read_acquire_sched() do { } while (0)
|
||||
# define rcu_read_release_sched() do { } while (0)
|
||||
|
||||
static inline int rcu_read_lock_held(void)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline int rcu_read_lock_bh_held(void)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline int rcu_read_lock_sched_held(void)
|
||||
{
|
||||
return preempt_count() != 0 || !rcu_scheduler_active;
|
||||
}
|
||||
|
||||
#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
|
||||
|
||||
#ifdef CONFIG_PROVE_RCU
|
||||
|
||||
/**
|
||||
* rcu_dereference_check - rcu_dereference with debug checking
|
||||
*
|
||||
* Do an rcu_dereference(), but check that the context is correct.
|
||||
* For example, rcu_dereference_check(gp, rcu_read_lock_held()) to
|
||||
* ensure that the rcu_dereference_check() executes within an RCU
|
||||
* read-side critical section. It is also possible to check for
|
||||
* locks being held, for example, by using lockdep_is_held().
|
||||
*/
|
||||
#define rcu_dereference_check(p, c) \
|
||||
({ \
|
||||
if (debug_locks && !(c)) \
|
||||
lockdep_rcu_dereference(__FILE__, __LINE__); \
|
||||
rcu_dereference_raw(p); \
|
||||
})
|
||||
|
||||
#else /* #ifdef CONFIG_PROVE_RCU */
|
||||
|
||||
#define rcu_dereference_check(p, c) rcu_dereference_raw(p)
|
||||
|
||||
#endif /* #else #ifdef CONFIG_PROVE_RCU */
|
||||
|
||||
/**
|
||||
* rcu_read_lock - mark the beginning of an RCU read-side critical section.
|
||||
|
|
@ -160,7 +268,7 @@ static inline void rcu_read_lock_bh(void)
|
|||
{
|
||||
__rcu_read_lock_bh();
|
||||
__acquire(RCU_BH);
|
||||
rcu_read_acquire();
|
||||
rcu_read_acquire_bh();
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -170,7 +278,7 @@ static inline void rcu_read_lock_bh(void)
|
|||
*/
|
||||
static inline void rcu_read_unlock_bh(void)
|
||||
{
|
||||
rcu_read_release();
|
||||
rcu_read_release_bh();
|
||||
__release(RCU_BH);
|
||||
__rcu_read_unlock_bh();
|
||||
}
|
||||
|
|
@ -188,7 +296,7 @@ static inline void rcu_read_lock_sched(void)
|
|||
{
|
||||
preempt_disable();
|
||||
__acquire(RCU_SCHED);
|
||||
rcu_read_acquire();
|
||||
rcu_read_acquire_sched();
|
||||
}
|
||||
|
||||
/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
|
||||
|
|
@ -205,7 +313,7 @@ static inline notrace void rcu_read_lock_sched_notrace(void)
|
|||
*/
|
||||
static inline void rcu_read_unlock_sched(void)
|
||||
{
|
||||
rcu_read_release();
|
||||
rcu_read_release_sched();
|
||||
__release(RCU_SCHED);
|
||||
preempt_enable();
|
||||
}
|
||||
|
|
@ -219,21 +327,48 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
|
|||
|
||||
|
||||
/**
|
||||
* rcu_dereference - fetch an RCU-protected pointer in an
|
||||
* RCU read-side critical section. This pointer may later
|
||||
* be safely dereferenced.
|
||||
* rcu_dereference_raw - fetch an RCU-protected pointer
|
||||
*
|
||||
* The caller must be within some flavor of RCU read-side critical
|
||||
* section, or must be otherwise preventing the pointer from changing,
|
||||
* for example, by holding an appropriate lock. This pointer may later
|
||||
* be safely dereferenced. It is the caller's responsibility to have
|
||||
* done the right thing, as this primitive does no checking of any kind.
|
||||
*
|
||||
* Inserts memory barriers on architectures that require them
|
||||
* (currently only the Alpha), and, more importantly, documents
|
||||
* exactly which pointers are protected by RCU.
|
||||
*/
|
||||
|
||||
#define rcu_dereference(p) ({ \
|
||||
#define rcu_dereference_raw(p) ({ \
|
||||
typeof(p) _________p1 = ACCESS_ONCE(p); \
|
||||
smp_read_barrier_depends(); \
|
||||
(_________p1); \
|
||||
})
|
||||
|
||||
/**
|
||||
* rcu_dereference - fetch an RCU-protected pointer, checking for RCU
|
||||
*
|
||||
* Makes rcu_dereference_check() do the dirty work.
|
||||
*/
|
||||
#define rcu_dereference(p) \
|
||||
rcu_dereference_check(p, rcu_read_lock_held())
|
||||
|
||||
/**
|
||||
* rcu_dereference_bh - fetch an RCU-protected pointer, checking for RCU-bh
|
||||
*
|
||||
* Makes rcu_dereference_check() do the dirty work.
|
||||
*/
|
||||
#define rcu_dereference_bh(p) \
|
||||
rcu_dereference_check(p, rcu_read_lock_bh_held())
|
||||
|
||||
/**
|
||||
* rcu_dereference_sched - fetch RCU-protected pointer, checking for RCU-sched
|
||||
*
|
||||
* Makes rcu_dereference_check() do the dirty work.
|
||||
*/
|
||||
#define rcu_dereference_sched(p) \
|
||||
rcu_dereference_check(p, rcu_read_lock_sched_held())
|
||||
|
||||
/**
|
||||
* rcu_assign_pointer - assign (publicize) a pointer to a newly
|
||||
* initialized structure that will be dereferenced by RCU read-side
|
||||
|
|
|
|||
|
|
@ -62,6 +62,18 @@ static inline long rcu_batches_completed_bh(void)
|
|||
|
||||
extern int rcu_expedited_torture_stats(char *page);
|
||||
|
||||
static inline void rcu_force_quiescent_state(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void rcu_bh_force_quiescent_state(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void rcu_sched_force_quiescent_state(void)
|
||||
{
|
||||
}
|
||||
|
||||
#define synchronize_rcu synchronize_sched
|
||||
|
||||
static inline void synchronize_rcu_expedited(void)
|
||||
|
|
@ -93,10 +105,6 @@ static inline void rcu_exit_nohz(void)
|
|||
|
||||
#endif /* #else #ifdef CONFIG_NO_HZ */
|
||||
|
||||
static inline void rcu_scheduler_starting(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void exit_rcu(void)
|
||||
{
|
||||
}
|
||||
|
|
|
|||
|
|
@ -35,7 +35,6 @@ struct notifier_block;
|
|||
extern void rcu_sched_qs(int cpu);
|
||||
extern void rcu_bh_qs(int cpu);
|
||||
extern int rcu_needs_cpu(int cpu);
|
||||
extern void rcu_scheduler_starting(void);
|
||||
extern int rcu_expedited_torture_stats(char *page);
|
||||
|
||||
#ifdef CONFIG_TREE_PREEMPT_RCU
|
||||
|
|
@ -99,6 +98,9 @@ extern void rcu_check_callbacks(int cpu, int user);
|
|||
extern long rcu_batches_completed(void);
|
||||
extern long rcu_batches_completed_bh(void);
|
||||
extern long rcu_batches_completed_sched(void);
|
||||
extern void rcu_force_quiescent_state(void);
|
||||
extern void rcu_bh_force_quiescent_state(void);
|
||||
extern void rcu_sched_force_quiescent_state(void);
|
||||
|
||||
#ifdef CONFIG_NO_HZ
|
||||
void rcu_enter_nohz(void);
|
||||
|
|
|
|||
|
|
@ -735,6 +735,9 @@ extern void rtnl_lock(void);
|
|||
extern void rtnl_unlock(void);
|
||||
extern int rtnl_trylock(void);
|
||||
extern int rtnl_is_locked(void);
|
||||
#ifdef CONFIG_PROVE_LOCKING
|
||||
extern int lockdep_rtnl_is_held(void);
|
||||
#endif /* #ifdef CONFIG_PROVE_LOCKING */
|
||||
|
||||
extern void rtnetlink_init(void);
|
||||
extern void __rtnl_unlock(void);
|
||||
|
|
|
|||
|
|
@ -35,6 +35,9 @@ struct srcu_struct {
|
|||
int completed;
|
||||
struct srcu_struct_array *per_cpu_ref;
|
||||
struct mutex mutex;
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
struct lockdep_map dep_map;
|
||||
#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
|
||||
};
|
||||
|
||||
#ifndef CONFIG_PREEMPT
|
||||
|
|
@ -43,12 +46,100 @@ struct srcu_struct {
|
|||
#define srcu_barrier()
|
||||
#endif /* #else #ifndef CONFIG_PREEMPT */
|
||||
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
|
||||
int __init_srcu_struct(struct srcu_struct *sp, const char *name,
|
||||
struct lock_class_key *key);
|
||||
|
||||
#define init_srcu_struct(sp) \
|
||||
({ \
|
||||
static struct lock_class_key __srcu_key; \
|
||||
\
|
||||
__init_srcu_struct((sp), #sp, &__srcu_key); \
|
||||
})
|
||||
|
||||
# define srcu_read_acquire(sp) \
|
||||
lock_acquire(&(sp)->dep_map, 0, 0, 2, 1, NULL, _THIS_IP_)
|
||||
# define srcu_read_release(sp) \
|
||||
lock_release(&(sp)->dep_map, 1, _THIS_IP_)
|
||||
|
||||
#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
|
||||
|
||||
int init_srcu_struct(struct srcu_struct *sp);
|
||||
|
||||
# define srcu_read_acquire(sp) do { } while (0)
|
||||
# define srcu_read_release(sp) do { } while (0)
|
||||
|
||||
#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
|
||||
|
||||
void cleanup_srcu_struct(struct srcu_struct *sp);
|
||||
int srcu_read_lock(struct srcu_struct *sp) __acquires(sp);
|
||||
void srcu_read_unlock(struct srcu_struct *sp, int idx) __releases(sp);
|
||||
int __srcu_read_lock(struct srcu_struct *sp) __acquires(sp);
|
||||
void __srcu_read_unlock(struct srcu_struct *sp, int idx) __releases(sp);
|
||||
void synchronize_srcu(struct srcu_struct *sp);
|
||||
void synchronize_srcu_expedited(struct srcu_struct *sp);
|
||||
long srcu_batches_completed(struct srcu_struct *sp);
|
||||
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
|
||||
/**
|
||||
* srcu_read_lock_held - might we be in SRCU read-side critical section?
|
||||
*
|
||||
* If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in
|
||||
* an SRCU read-side critical section. In absence of CONFIG_PROVE_LOCKING,
|
||||
* this assumes we are in an SRCU read-side critical section unless it can
|
||||
* prove otherwise.
|
||||
*/
|
||||
static inline int srcu_read_lock_held(struct srcu_struct *sp)
|
||||
{
|
||||
if (debug_locks)
|
||||
return lock_is_held(&sp->dep_map);
|
||||
return 1;
|
||||
}
|
||||
|
||||
#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
|
||||
|
||||
static inline int srcu_read_lock_held(struct srcu_struct *sp)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
|
||||
|
||||
/**
|
||||
* srcu_dereference - fetch SRCU-protected pointer with checking
|
||||
*
|
||||
* Makes rcu_dereference_check() do the dirty work.
|
||||
*/
|
||||
#define srcu_dereference(p, sp) \
|
||||
rcu_dereference_check(p, srcu_read_lock_held(sp))
|
||||
|
||||
/**
|
||||
* srcu_read_lock - register a new reader for an SRCU-protected structure.
|
||||
* @sp: srcu_struct in which to register the new reader.
|
||||
*
|
||||
* Enter an SRCU read-side critical section. Note that SRCU read-side
|
||||
* critical sections may be nested.
|
||||
*/
|
||||
static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp)
|
||||
{
|
||||
int retval = __srcu_read_lock(sp);
|
||||
|
||||
srcu_read_acquire(sp);
|
||||
return retval;
|
||||
}
|
||||
|
||||
/**
|
||||
* srcu_read_unlock - unregister a old reader from an SRCU-protected structure.
|
||||
* @sp: srcu_struct in which to unregister the old reader.
|
||||
* @idx: return value from corresponding srcu_read_lock().
|
||||
*
|
||||
* Exit an SRCU read-side critical section.
|
||||
*/
|
||||
static inline void srcu_read_unlock(struct srcu_struct *sp, int idx)
|
||||
__releases(sp)
|
||||
{
|
||||
srcu_read_release(sp);
|
||||
__srcu_read_unlock(sp, idx);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue