Merge branch 'master' into for-next
Sync with Linus' tree to be able to apply pending patches that are based on newer code already present upstream.
This commit is contained in:
commit
b7e9c223be
993 changed files with 34561 additions and 5907 deletions
|
|
@ -560,29 +560,28 @@ void exit_files(struct task_struct *tsk)
|
|||
|
||||
#ifdef CONFIG_MM_OWNER
|
||||
/*
|
||||
* Task p is exiting and it owned mm, lets find a new owner for it
|
||||
* A task is exiting. If it owned this mm, find a new owner for the mm.
|
||||
*/
|
||||
static inline int
|
||||
mm_need_new_owner(struct mm_struct *mm, struct task_struct *p)
|
||||
{
|
||||
/*
|
||||
* If there are other users of the mm and the owner (us) is exiting
|
||||
* we need to find a new owner to take on the responsibility.
|
||||
*/
|
||||
if (atomic_read(&mm->mm_users) <= 1)
|
||||
return 0;
|
||||
if (mm->owner != p)
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
void mm_update_next_owner(struct mm_struct *mm)
|
||||
{
|
||||
struct task_struct *c, *g, *p = current;
|
||||
|
||||
retry:
|
||||
if (!mm_need_new_owner(mm, p))
|
||||
/*
|
||||
* If the exiting or execing task is not the owner, it's
|
||||
* someone else's problem.
|
||||
*/
|
||||
if (mm->owner != p)
|
||||
return;
|
||||
/*
|
||||
* The current owner is exiting/execing and there are no other
|
||||
* candidates. Do not leave the mm pointing to a possibly
|
||||
* freed task structure.
|
||||
*/
|
||||
if (atomic_read(&mm->mm_users) <= 1) {
|
||||
mm->owner = NULL;
|
||||
return;
|
||||
}
|
||||
|
||||
read_lock(&tasklist_lock);
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -2,7 +2,8 @@ menu "GCOV-based kernel profiling"
|
|||
|
||||
config GCOV_KERNEL
|
||||
bool "Enable gcov-based kernel profiling"
|
||||
depends on DEBUG_FS && CONSTRUCTORS
|
||||
depends on DEBUG_FS
|
||||
select CONSTRUCTORS
|
||||
default n
|
||||
---help---
|
||||
This option enables gcov-based code profiling (e.g. for code coverage
|
||||
|
|
|
|||
|
|
@ -491,6 +491,9 @@ int irq_set_irq_wake(unsigned int irq, unsigned int on)
|
|||
struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
|
||||
int ret = 0;
|
||||
|
||||
if (!desc)
|
||||
return -EINVAL;
|
||||
|
||||
/* wakeup-capable irqs can be shared between drivers that
|
||||
* don't need to have the same sleep mode behaviors.
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -375,15 +375,19 @@ int jump_label_text_reserved(void *start, void *end)
|
|||
|
||||
static void jump_label_update(struct jump_label_key *key, int enable)
|
||||
{
|
||||
struct jump_entry *entry = key->entries;
|
||||
|
||||
/* if there are no users, entry can be NULL */
|
||||
if (entry)
|
||||
__jump_label_update(key, entry, __stop___jump_table, enable);
|
||||
struct jump_entry *entry = key->entries, *stop = __stop___jump_table;
|
||||
|
||||
#ifdef CONFIG_MODULES
|
||||
struct module *mod = __module_address((jump_label_t)key);
|
||||
|
||||
__jump_label_mod_update(key, enable);
|
||||
|
||||
if (mod)
|
||||
stop = mod->jump_entries + mod->num_jump_entries;
|
||||
#endif
|
||||
/* if there are no users, entry can be NULL */
|
||||
if (entry)
|
||||
__jump_label_update(key, entry, stop, enable);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -156,12 +156,6 @@ static int ____call_usermodehelper(void *data)
|
|||
*/
|
||||
set_user_nice(current, 0);
|
||||
|
||||
if (sub_info->init) {
|
||||
retval = sub_info->init(sub_info);
|
||||
if (retval)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
retval = -ENOMEM;
|
||||
new = prepare_kernel_cred(current);
|
||||
if (!new)
|
||||
|
|
@ -173,6 +167,14 @@ static int ____call_usermodehelper(void *data)
|
|||
new->cap_inheritable);
|
||||
spin_unlock(&umh_sysctl_lock);
|
||||
|
||||
if (sub_info->init) {
|
||||
retval = sub_info->init(sub_info, new);
|
||||
if (retval) {
|
||||
abort_creds(new);
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
|
||||
commit_creds(new);
|
||||
|
||||
retval = kernel_execve(sub_info->path,
|
||||
|
|
@ -388,7 +390,7 @@ EXPORT_SYMBOL(call_usermodehelper_setup);
|
|||
* context in which call_usermodehelper_exec is called.
|
||||
*/
|
||||
void call_usermodehelper_setfns(struct subprocess_info *info,
|
||||
int (*init)(struct subprocess_info *info),
|
||||
int (*init)(struct subprocess_info *info, struct cred *new),
|
||||
void (*cleanup)(struct subprocess_info *info),
|
||||
void *data)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -1211,7 +1211,11 @@ static void free_unnecessary_pages(void)
|
|||
to_free_highmem = alloc_highmem - save;
|
||||
} else {
|
||||
to_free_highmem = 0;
|
||||
to_free_normal -= save - alloc_highmem;
|
||||
save -= alloc_highmem;
|
||||
if (to_free_normal > save)
|
||||
to_free_normal -= save;
|
||||
else
|
||||
to_free_normal = 0;
|
||||
}
|
||||
|
||||
memory_bm_position_reset(©_bm);
|
||||
|
|
|
|||
|
|
@ -113,8 +113,10 @@ static int snapshot_open(struct inode *inode, struct file *filp)
|
|||
if (error)
|
||||
pm_notifier_call_chain(PM_POST_RESTORE);
|
||||
}
|
||||
if (error)
|
||||
if (error) {
|
||||
free_basic_memory_bitmaps();
|
||||
atomic_inc(&snapshot_device_available);
|
||||
}
|
||||
data->frozen = 0;
|
||||
data->ready = 0;
|
||||
data->platform_support = 0;
|
||||
|
|
|
|||
398
kernel/rcutree.c
398
kernel/rcutree.c
|
|
@ -87,6 +87,8 @@ static struct rcu_state *rcu_state;
|
|||
int rcu_scheduler_active __read_mostly;
|
||||
EXPORT_SYMBOL_GPL(rcu_scheduler_active);
|
||||
|
||||
#ifdef CONFIG_RCU_BOOST
|
||||
|
||||
/*
|
||||
* Control variables for per-CPU and per-rcu_node kthreads. These
|
||||
* handle all flavors of RCU.
|
||||
|
|
@ -98,8 +100,11 @@ DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
|
|||
DEFINE_PER_CPU(char, rcu_cpu_has_work);
|
||||
static char rcu_kthreads_spawnable;
|
||||
|
||||
#endif /* #ifdef CONFIG_RCU_BOOST */
|
||||
|
||||
static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
|
||||
static void invoke_rcu_cpu_kthread(void);
|
||||
static void invoke_rcu_core(void);
|
||||
static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
|
||||
|
||||
#define RCU_KTHREAD_PRIO 1 /* RT priority for per-CPU kthreads. */
|
||||
|
||||
|
|
@ -1088,14 +1093,8 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
|
|||
int need_report = 0;
|
||||
struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
|
||||
struct rcu_node *rnp;
|
||||
struct task_struct *t;
|
||||
|
||||
/* Stop the CPU's kthread. */
|
||||
t = per_cpu(rcu_cpu_kthread_task, cpu);
|
||||
if (t != NULL) {
|
||||
per_cpu(rcu_cpu_kthread_task, cpu) = NULL;
|
||||
kthread_stop(t);
|
||||
}
|
||||
rcu_stop_cpu_kthread(cpu);
|
||||
|
||||
/* Exclude any attempts to start a new grace period. */
|
||||
raw_spin_lock_irqsave(&rsp->onofflock, flags);
|
||||
|
|
@ -1231,7 +1230,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
|
|||
|
||||
/* Re-raise the RCU softirq if there are callbacks remaining. */
|
||||
if (cpu_has_callbacks_ready_to_invoke(rdp))
|
||||
invoke_rcu_cpu_kthread();
|
||||
invoke_rcu_core();
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -1277,7 +1276,7 @@ void rcu_check_callbacks(int cpu, int user)
|
|||
}
|
||||
rcu_preempt_check_callbacks(cpu);
|
||||
if (rcu_pending(cpu))
|
||||
invoke_rcu_cpu_kthread();
|
||||
invoke_rcu_core();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
|
@ -1442,13 +1441,14 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
|
|||
}
|
||||
|
||||
/* If there are callbacks ready, invoke them. */
|
||||
rcu_do_batch(rsp, rdp);
|
||||
if (cpu_has_callbacks_ready_to_invoke(rdp))
|
||||
invoke_rcu_callbacks(rsp, rdp);
|
||||
}
|
||||
|
||||
/*
|
||||
* Do softirq processing for the current CPU.
|
||||
*/
|
||||
static void rcu_process_callbacks(void)
|
||||
static void rcu_process_callbacks(struct softirq_action *unused)
|
||||
{
|
||||
__rcu_process_callbacks(&rcu_sched_state,
|
||||
&__get_cpu_var(rcu_sched_data));
|
||||
|
|
@ -1465,342 +1465,20 @@ static void rcu_process_callbacks(void)
|
|||
* the current CPU with interrupts disabled, the rcu_cpu_kthread_task
|
||||
* cannot disappear out from under us.
|
||||
*/
|
||||
static void invoke_rcu_cpu_kthread(void)
|
||||
static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
__this_cpu_write(rcu_cpu_has_work, 1);
|
||||
if (__this_cpu_read(rcu_cpu_kthread_task) == NULL) {
|
||||
local_irq_restore(flags);
|
||||
if (likely(!rsp->boost)) {
|
||||
rcu_do_batch(rsp, rdp);
|
||||
return;
|
||||
}
|
||||
wake_up_process(__this_cpu_read(rcu_cpu_kthread_task));
|
||||
local_irq_restore(flags);
|
||||
invoke_rcu_callbacks_kthread();
|
||||
}
|
||||
|
||||
/*
|
||||
* Wake up the specified per-rcu_node-structure kthread.
|
||||
* Because the per-rcu_node kthreads are immortal, we don't need
|
||||
* to do anything to keep them alive.
|
||||
*/
|
||||
static void invoke_rcu_node_kthread(struct rcu_node *rnp)
|
||||
static void invoke_rcu_core(void)
|
||||
{
|
||||
struct task_struct *t;
|
||||
|
||||
t = rnp->node_kthread_task;
|
||||
if (t != NULL)
|
||||
wake_up_process(t);
|
||||
raise_softirq(RCU_SOFTIRQ);
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the specified CPU's kthread to run RT or not, as specified by
|
||||
* the to_rt argument. The CPU-hotplug locks are held, so the task
|
||||
* is not going away.
|
||||
*/
|
||||
static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
|
||||
{
|
||||
int policy;
|
||||
struct sched_param sp;
|
||||
struct task_struct *t;
|
||||
|
||||
t = per_cpu(rcu_cpu_kthread_task, cpu);
|
||||
if (t == NULL)
|
||||
return;
|
||||
if (to_rt) {
|
||||
policy = SCHED_FIFO;
|
||||
sp.sched_priority = RCU_KTHREAD_PRIO;
|
||||
} else {
|
||||
policy = SCHED_NORMAL;
|
||||
sp.sched_priority = 0;
|
||||
}
|
||||
sched_setscheduler_nocheck(t, policy, &sp);
|
||||
}
|
||||
|
||||
/*
|
||||
* Timer handler to initiate the waking up of per-CPU kthreads that
|
||||
* have yielded the CPU due to excess numbers of RCU callbacks.
|
||||
* We wake up the per-rcu_node kthread, which in turn will wake up
|
||||
* the booster kthread.
|
||||
*/
|
||||
static void rcu_cpu_kthread_timer(unsigned long arg)
|
||||
{
|
||||
struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, arg);
|
||||
struct rcu_node *rnp = rdp->mynode;
|
||||
|
||||
atomic_or(rdp->grpmask, &rnp->wakemask);
|
||||
invoke_rcu_node_kthread(rnp);
|
||||
}
|
||||
|
||||
/*
|
||||
* Drop to non-real-time priority and yield, but only after posting a
|
||||
* timer that will cause us to regain our real-time priority if we
|
||||
* remain preempted. Either way, we restore our real-time priority
|
||||
* before returning.
|
||||
*/
|
||||
static void rcu_yield(void (*f)(unsigned long), unsigned long arg)
|
||||
{
|
||||
struct sched_param sp;
|
||||
struct timer_list yield_timer;
|
||||
|
||||
setup_timer_on_stack(&yield_timer, f, arg);
|
||||
mod_timer(&yield_timer, jiffies + 2);
|
||||
sp.sched_priority = 0;
|
||||
sched_setscheduler_nocheck(current, SCHED_NORMAL, &sp);
|
||||
set_user_nice(current, 19);
|
||||
schedule();
|
||||
sp.sched_priority = RCU_KTHREAD_PRIO;
|
||||
sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
|
||||
del_timer(&yield_timer);
|
||||
}
|
||||
|
||||
/*
|
||||
* Handle cases where the rcu_cpu_kthread() ends up on the wrong CPU.
|
||||
* This can happen while the corresponding CPU is either coming online
|
||||
* or going offline. We cannot wait until the CPU is fully online
|
||||
* before starting the kthread, because the various notifier functions
|
||||
* can wait for RCU grace periods. So we park rcu_cpu_kthread() until
|
||||
* the corresponding CPU is online.
|
||||
*
|
||||
* Return 1 if the kthread needs to stop, 0 otherwise.
|
||||
*
|
||||
* Caller must disable bh. This function can momentarily enable it.
|
||||
*/
|
||||
static int rcu_cpu_kthread_should_stop(int cpu)
|
||||
{
|
||||
while (cpu_is_offline(cpu) ||
|
||||
!cpumask_equal(¤t->cpus_allowed, cpumask_of(cpu)) ||
|
||||
smp_processor_id() != cpu) {
|
||||
if (kthread_should_stop())
|
||||
return 1;
|
||||
per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
|
||||
per_cpu(rcu_cpu_kthread_cpu, cpu) = raw_smp_processor_id();
|
||||
local_bh_enable();
|
||||
schedule_timeout_uninterruptible(1);
|
||||
if (!cpumask_equal(¤t->cpus_allowed, cpumask_of(cpu)))
|
||||
set_cpus_allowed_ptr(current, cpumask_of(cpu));
|
||||
local_bh_disable();
|
||||
}
|
||||
per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Per-CPU kernel thread that invokes RCU callbacks. This replaces the
|
||||
* earlier RCU softirq.
|
||||
*/
|
||||
static int rcu_cpu_kthread(void *arg)
|
||||
{
|
||||
int cpu = (int)(long)arg;
|
||||
unsigned long flags;
|
||||
int spincnt = 0;
|
||||
unsigned int *statusp = &per_cpu(rcu_cpu_kthread_status, cpu);
|
||||
char work;
|
||||
char *workp = &per_cpu(rcu_cpu_has_work, cpu);
|
||||
|
||||
for (;;) {
|
||||
*statusp = RCU_KTHREAD_WAITING;
|
||||
rcu_wait(*workp != 0 || kthread_should_stop());
|
||||
local_bh_disable();
|
||||
if (rcu_cpu_kthread_should_stop(cpu)) {
|
||||
local_bh_enable();
|
||||
break;
|
||||
}
|
||||
*statusp = RCU_KTHREAD_RUNNING;
|
||||
per_cpu(rcu_cpu_kthread_loops, cpu)++;
|
||||
local_irq_save(flags);
|
||||
work = *workp;
|
||||
*workp = 0;
|
||||
local_irq_restore(flags);
|
||||
if (work)
|
||||
rcu_process_callbacks();
|
||||
local_bh_enable();
|
||||
if (*workp != 0)
|
||||
spincnt++;
|
||||
else
|
||||
spincnt = 0;
|
||||
if (spincnt > 10) {
|
||||
*statusp = RCU_KTHREAD_YIELDING;
|
||||
rcu_yield(rcu_cpu_kthread_timer, (unsigned long)cpu);
|
||||
spincnt = 0;
|
||||
}
|
||||
}
|
||||
*statusp = RCU_KTHREAD_STOPPED;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Spawn a per-CPU kthread, setting up affinity and priority.
|
||||
* Because the CPU hotplug lock is held, no other CPU will be attempting
|
||||
* to manipulate rcu_cpu_kthread_task. There might be another CPU
|
||||
* attempting to access it during boot, but the locking in kthread_bind()
|
||||
* will enforce sufficient ordering.
|
||||
*/
|
||||
static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu)
|
||||
{
|
||||
struct sched_param sp;
|
||||
struct task_struct *t;
|
||||
|
||||
if (!rcu_kthreads_spawnable ||
|
||||
per_cpu(rcu_cpu_kthread_task, cpu) != NULL)
|
||||
return 0;
|
||||
t = kthread_create(rcu_cpu_kthread, (void *)(long)cpu, "rcuc%d", cpu);
|
||||
if (IS_ERR(t))
|
||||
return PTR_ERR(t);
|
||||
kthread_bind(t, cpu);
|
||||
per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
|
||||
WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL);
|
||||
per_cpu(rcu_cpu_kthread_task, cpu) = t;
|
||||
sp.sched_priority = RCU_KTHREAD_PRIO;
|
||||
sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Per-rcu_node kthread, which is in charge of waking up the per-CPU
|
||||
* kthreads when needed. We ignore requests to wake up kthreads
|
||||
* for offline CPUs, which is OK because force_quiescent_state()
|
||||
* takes care of this case.
|
||||
*/
|
||||
static int rcu_node_kthread(void *arg)
|
||||
{
|
||||
int cpu;
|
||||
unsigned long flags;
|
||||
unsigned long mask;
|
||||
struct rcu_node *rnp = (struct rcu_node *)arg;
|
||||
struct sched_param sp;
|
||||
struct task_struct *t;
|
||||
|
||||
for (;;) {
|
||||
rnp->node_kthread_status = RCU_KTHREAD_WAITING;
|
||||
rcu_wait(atomic_read(&rnp->wakemask) != 0);
|
||||
rnp->node_kthread_status = RCU_KTHREAD_RUNNING;
|
||||
raw_spin_lock_irqsave(&rnp->lock, flags);
|
||||
mask = atomic_xchg(&rnp->wakemask, 0);
|
||||
rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */
|
||||
for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) {
|
||||
if ((mask & 0x1) == 0)
|
||||
continue;
|
||||
preempt_disable();
|
||||
t = per_cpu(rcu_cpu_kthread_task, cpu);
|
||||
if (!cpu_online(cpu) || t == NULL) {
|
||||
preempt_enable();
|
||||
continue;
|
||||
}
|
||||
per_cpu(rcu_cpu_has_work, cpu) = 1;
|
||||
sp.sched_priority = RCU_KTHREAD_PRIO;
|
||||
sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
|
||||
preempt_enable();
|
||||
}
|
||||
}
|
||||
/* NOTREACHED */
|
||||
rnp->node_kthread_status = RCU_KTHREAD_STOPPED;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the per-rcu_node kthread's affinity to cover all CPUs that are
|
||||
* served by the rcu_node in question. The CPU hotplug lock is still
|
||||
* held, so the value of rnp->qsmaskinit will be stable.
|
||||
*
|
||||
* We don't include outgoingcpu in the affinity set, use -1 if there is
|
||||
* no outgoing CPU. If there are no CPUs left in the affinity set,
|
||||
* this function allows the kthread to execute on any CPU.
|
||||
*/
|
||||
static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
|
||||
{
|
||||
cpumask_var_t cm;
|
||||
int cpu;
|
||||
unsigned long mask = rnp->qsmaskinit;
|
||||
|
||||
if (rnp->node_kthread_task == NULL)
|
||||
return;
|
||||
if (!alloc_cpumask_var(&cm, GFP_KERNEL))
|
||||
return;
|
||||
cpumask_clear(cm);
|
||||
for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1)
|
||||
if ((mask & 0x1) && cpu != outgoingcpu)
|
||||
cpumask_set_cpu(cpu, cm);
|
||||
if (cpumask_weight(cm) == 0) {
|
||||
cpumask_setall(cm);
|
||||
for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++)
|
||||
cpumask_clear_cpu(cpu, cm);
|
||||
WARN_ON_ONCE(cpumask_weight(cm) == 0);
|
||||
}
|
||||
set_cpus_allowed_ptr(rnp->node_kthread_task, cm);
|
||||
rcu_boost_kthread_setaffinity(rnp, cm);
|
||||
free_cpumask_var(cm);
|
||||
}
|
||||
|
||||
/*
|
||||
* Spawn a per-rcu_node kthread, setting priority and affinity.
|
||||
* Called during boot before online/offline can happen, or, if
|
||||
* during runtime, with the main CPU-hotplug locks held. So only
|
||||
* one of these can be executing at a time.
|
||||
*/
|
||||
static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp,
|
||||
struct rcu_node *rnp)
|
||||
{
|
||||
unsigned long flags;
|
||||
int rnp_index = rnp - &rsp->node[0];
|
||||
struct sched_param sp;
|
||||
struct task_struct *t;
|
||||
|
||||
if (!rcu_kthreads_spawnable ||
|
||||
rnp->qsmaskinit == 0)
|
||||
return 0;
|
||||
if (rnp->node_kthread_task == NULL) {
|
||||
t = kthread_create(rcu_node_kthread, (void *)rnp,
|
||||
"rcun%d", rnp_index);
|
||||
if (IS_ERR(t))
|
||||
return PTR_ERR(t);
|
||||
raw_spin_lock_irqsave(&rnp->lock, flags);
|
||||
rnp->node_kthread_task = t;
|
||||
raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
||||
sp.sched_priority = 99;
|
||||
sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
|
||||
}
|
||||
return rcu_spawn_one_boost_kthread(rsp, rnp, rnp_index);
|
||||
}
|
||||
|
||||
static void rcu_wake_one_boost_kthread(struct rcu_node *rnp);
|
||||
|
||||
/*
|
||||
* Spawn all kthreads -- called as soon as the scheduler is running.
|
||||
*/
|
||||
static int __init rcu_spawn_kthreads(void)
|
||||
{
|
||||
int cpu;
|
||||
struct rcu_node *rnp;
|
||||
struct task_struct *t;
|
||||
|
||||
rcu_kthreads_spawnable = 1;
|
||||
for_each_possible_cpu(cpu) {
|
||||
per_cpu(rcu_cpu_has_work, cpu) = 0;
|
||||
if (cpu_online(cpu)) {
|
||||
(void)rcu_spawn_one_cpu_kthread(cpu);
|
||||
t = per_cpu(rcu_cpu_kthread_task, cpu);
|
||||
if (t)
|
||||
wake_up_process(t);
|
||||
}
|
||||
}
|
||||
rnp = rcu_get_root(rcu_state);
|
||||
(void)rcu_spawn_one_node_kthread(rcu_state, rnp);
|
||||
if (rnp->node_kthread_task)
|
||||
wake_up_process(rnp->node_kthread_task);
|
||||
if (NUM_RCU_NODES > 1) {
|
||||
rcu_for_each_leaf_node(rcu_state, rnp) {
|
||||
(void)rcu_spawn_one_node_kthread(rcu_state, rnp);
|
||||
t = rnp->node_kthread_task;
|
||||
if (t)
|
||||
wake_up_process(t);
|
||||
rcu_wake_one_boost_kthread(rnp);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
early_initcall(rcu_spawn_kthreads);
|
||||
|
||||
static void
|
||||
__call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
|
||||
struct rcu_state *rsp)
|
||||
|
|
@ -2207,44 +1885,6 @@ static void __cpuinit rcu_prepare_cpu(int cpu)
|
|||
rcu_preempt_init_percpu_data(cpu);
|
||||
}
|
||||
|
||||
static void __cpuinit rcu_prepare_kthreads(int cpu)
|
||||
{
|
||||
struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
|
||||
struct rcu_node *rnp = rdp->mynode;
|
||||
|
||||
/* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
|
||||
if (rcu_kthreads_spawnable) {
|
||||
(void)rcu_spawn_one_cpu_kthread(cpu);
|
||||
if (rnp->node_kthread_task == NULL)
|
||||
(void)rcu_spawn_one_node_kthread(rcu_state, rnp);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* kthread_create() creates threads in TASK_UNINTERRUPTIBLE state,
|
||||
* but the RCU threads are woken on demand, and if demand is low this
|
||||
* could be a while triggering the hung task watchdog.
|
||||
*
|
||||
* In order to avoid this, poke all tasks once the CPU is fully
|
||||
* up and running.
|
||||
*/
|
||||
static void __cpuinit rcu_online_kthreads(int cpu)
|
||||
{
|
||||
struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
|
||||
struct rcu_node *rnp = rdp->mynode;
|
||||
struct task_struct *t;
|
||||
|
||||
t = per_cpu(rcu_cpu_kthread_task, cpu);
|
||||
if (t)
|
||||
wake_up_process(t);
|
||||
|
||||
t = rnp->node_kthread_task;
|
||||
if (t)
|
||||
wake_up_process(t);
|
||||
|
||||
rcu_wake_one_boost_kthread(rnp);
|
||||
}
|
||||
|
||||
/*
|
||||
* Handle CPU online/offline notification events.
|
||||
*/
|
||||
|
|
@ -2262,7 +1902,6 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
|
|||
rcu_prepare_kthreads(cpu);
|
||||
break;
|
||||
case CPU_ONLINE:
|
||||
rcu_online_kthreads(cpu);
|
||||
case CPU_DOWN_FAILED:
|
||||
rcu_node_kthread_setaffinity(rnp, -1);
|
||||
rcu_cpu_kthread_setrt(cpu, 1);
|
||||
|
|
@ -2410,6 +2049,7 @@ void __init rcu_init(void)
|
|||
rcu_init_one(&rcu_sched_state, &rcu_sched_data);
|
||||
rcu_init_one(&rcu_bh_state, &rcu_bh_data);
|
||||
__rcu_init_preempt();
|
||||
open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
|
||||
|
||||
/*
|
||||
* We don't need protection against CPU-hotplug here because
|
||||
|
|
|
|||
|
|
@ -369,6 +369,7 @@ struct rcu_state {
|
|||
/* period because */
|
||||
/* force_quiescent_state() */
|
||||
/* was running. */
|
||||
u8 boost; /* Subject to priority boost. */
|
||||
unsigned long gpnum; /* Current gp number. */
|
||||
unsigned long completed; /* # of last completed gp. */
|
||||
|
||||
|
|
@ -426,6 +427,7 @@ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
|
|||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp,
|
||||
unsigned long flags);
|
||||
static void rcu_stop_cpu_kthread(int cpu);
|
||||
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
|
||||
static void rcu_print_detail_task_stall(struct rcu_state *rsp);
|
||||
static void rcu_print_task_stall(struct rcu_node *rnp);
|
||||
|
|
@ -450,11 +452,19 @@ static void rcu_preempt_send_cbs_to_online(void);
|
|||
static void __init __rcu_init_preempt(void);
|
||||
static void rcu_needs_cpu_flush(void);
|
||||
static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
|
||||
static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
|
||||
static void invoke_rcu_callbacks_kthread(void);
|
||||
#ifdef CONFIG_RCU_BOOST
|
||||
static void rcu_preempt_do_callbacks(void);
|
||||
static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp,
|
||||
cpumask_var_t cm);
|
||||
static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
|
||||
static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
|
||||
struct rcu_node *rnp,
|
||||
int rnp_index);
|
||||
static void invoke_rcu_node_kthread(struct rcu_node *rnp);
|
||||
static void rcu_yield(void (*f)(unsigned long), unsigned long arg);
|
||||
#endif /* #ifdef CONFIG_RCU_BOOST */
|
||||
static void rcu_cpu_kthread_setrt(int cpu, int to_rt);
|
||||
static void __cpuinit rcu_prepare_kthreads(int cpu);
|
||||
|
||||
#endif /* #ifndef RCU_TREE_NONCORE */
|
||||
|
|
|
|||
|
|
@ -602,6 +602,15 @@ static void rcu_preempt_process_callbacks(void)
|
|||
&__get_cpu_var(rcu_preempt_data));
|
||||
}
|
||||
|
||||
#ifdef CONFIG_RCU_BOOST
|
||||
|
||||
static void rcu_preempt_do_callbacks(void)
|
||||
{
|
||||
rcu_do_batch(&rcu_preempt_state, &__get_cpu_var(rcu_preempt_data));
|
||||
}
|
||||
|
||||
#endif /* #ifdef CONFIG_RCU_BOOST */
|
||||
|
||||
/*
|
||||
* Queue a preemptible-RCU callback for invocation after a grace period.
|
||||
*/
|
||||
|
|
@ -1248,6 +1257,23 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Wake up the per-CPU kthread to invoke RCU callbacks.
|
||||
*/
|
||||
static void invoke_rcu_callbacks_kthread(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
__this_cpu_write(rcu_cpu_has_work, 1);
|
||||
if (__this_cpu_read(rcu_cpu_kthread_task) == NULL) {
|
||||
local_irq_restore(flags);
|
||||
return;
|
||||
}
|
||||
wake_up_process(__this_cpu_read(rcu_cpu_kthread_task));
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the affinity of the boost kthread. The CPU-hotplug locks are
|
||||
* held, so no one should be messing with the existence of the boost
|
||||
|
|
@ -1288,6 +1314,7 @@ static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
|
|||
|
||||
if (&rcu_preempt_state != rsp)
|
||||
return 0;
|
||||
rsp->boost = 1;
|
||||
if (rnp->boost_kthread_task != NULL)
|
||||
return 0;
|
||||
t = kthread_create(rcu_boost_kthread, (void *)rnp,
|
||||
|
|
@ -1299,13 +1326,372 @@ static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
|
|||
raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
||||
sp.sched_priority = RCU_KTHREAD_PRIO;
|
||||
sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
|
||||
wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __cpuinit rcu_wake_one_boost_kthread(struct rcu_node *rnp)
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
|
||||
/*
|
||||
* Stop the RCU's per-CPU kthread when its CPU goes offline,.
|
||||
*/
|
||||
static void rcu_stop_cpu_kthread(int cpu)
|
||||
{
|
||||
if (rnp->boost_kthread_task)
|
||||
wake_up_process(rnp->boost_kthread_task);
|
||||
struct task_struct *t;
|
||||
|
||||
/* Stop the CPU's kthread. */
|
||||
t = per_cpu(rcu_cpu_kthread_task, cpu);
|
||||
if (t != NULL) {
|
||||
per_cpu(rcu_cpu_kthread_task, cpu) = NULL;
|
||||
kthread_stop(t);
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
|
||||
|
||||
static void rcu_kthread_do_work(void)
|
||||
{
|
||||
rcu_do_batch(&rcu_sched_state, &__get_cpu_var(rcu_sched_data));
|
||||
rcu_do_batch(&rcu_bh_state, &__get_cpu_var(rcu_bh_data));
|
||||
rcu_preempt_do_callbacks();
|
||||
}
|
||||
|
||||
/*
|
||||
* Wake up the specified per-rcu_node-structure kthread.
|
||||
* Because the per-rcu_node kthreads are immortal, we don't need
|
||||
* to do anything to keep them alive.
|
||||
*/
|
||||
static void invoke_rcu_node_kthread(struct rcu_node *rnp)
|
||||
{
|
||||
struct task_struct *t;
|
||||
|
||||
t = rnp->node_kthread_task;
|
||||
if (t != NULL)
|
||||
wake_up_process(t);
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the specified CPU's kthread to run RT or not, as specified by
|
||||
* the to_rt argument. The CPU-hotplug locks are held, so the task
|
||||
* is not going away.
|
||||
*/
|
||||
static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
|
||||
{
|
||||
int policy;
|
||||
struct sched_param sp;
|
||||
struct task_struct *t;
|
||||
|
||||
t = per_cpu(rcu_cpu_kthread_task, cpu);
|
||||
if (t == NULL)
|
||||
return;
|
||||
if (to_rt) {
|
||||
policy = SCHED_FIFO;
|
||||
sp.sched_priority = RCU_KTHREAD_PRIO;
|
||||
} else {
|
||||
policy = SCHED_NORMAL;
|
||||
sp.sched_priority = 0;
|
||||
}
|
||||
sched_setscheduler_nocheck(t, policy, &sp);
|
||||
}
|
||||
|
||||
/*
|
||||
* Timer handler to initiate the waking up of per-CPU kthreads that
|
||||
* have yielded the CPU due to excess numbers of RCU callbacks.
|
||||
* We wake up the per-rcu_node kthread, which in turn will wake up
|
||||
* the booster kthread.
|
||||
*/
|
||||
static void rcu_cpu_kthread_timer(unsigned long arg)
|
||||
{
|
||||
struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, arg);
|
||||
struct rcu_node *rnp = rdp->mynode;
|
||||
|
||||
atomic_or(rdp->grpmask, &rnp->wakemask);
|
||||
invoke_rcu_node_kthread(rnp);
|
||||
}
|
||||
|
||||
/*
|
||||
* Drop to non-real-time priority and yield, but only after posting a
|
||||
* timer that will cause us to regain our real-time priority if we
|
||||
* remain preempted. Either way, we restore our real-time priority
|
||||
* before returning.
|
||||
*/
|
||||
static void rcu_yield(void (*f)(unsigned long), unsigned long arg)
|
||||
{
|
||||
struct sched_param sp;
|
||||
struct timer_list yield_timer;
|
||||
|
||||
setup_timer_on_stack(&yield_timer, f, arg);
|
||||
mod_timer(&yield_timer, jiffies + 2);
|
||||
sp.sched_priority = 0;
|
||||
sched_setscheduler_nocheck(current, SCHED_NORMAL, &sp);
|
||||
set_user_nice(current, 19);
|
||||
schedule();
|
||||
sp.sched_priority = RCU_KTHREAD_PRIO;
|
||||
sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
|
||||
del_timer(&yield_timer);
|
||||
}
|
||||
|
||||
/*
|
||||
* Handle cases where the rcu_cpu_kthread() ends up on the wrong CPU.
|
||||
* This can happen while the corresponding CPU is either coming online
|
||||
* or going offline. We cannot wait until the CPU is fully online
|
||||
* before starting the kthread, because the various notifier functions
|
||||
* can wait for RCU grace periods. So we park rcu_cpu_kthread() until
|
||||
* the corresponding CPU is online.
|
||||
*
|
||||
* Return 1 if the kthread needs to stop, 0 otherwise.
|
||||
*
|
||||
* Caller must disable bh. This function can momentarily enable it.
|
||||
*/
|
||||
static int rcu_cpu_kthread_should_stop(int cpu)
|
||||
{
|
||||
while (cpu_is_offline(cpu) ||
|
||||
!cpumask_equal(¤t->cpus_allowed, cpumask_of(cpu)) ||
|
||||
smp_processor_id() != cpu) {
|
||||
if (kthread_should_stop())
|
||||
return 1;
|
||||
per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
|
||||
per_cpu(rcu_cpu_kthread_cpu, cpu) = raw_smp_processor_id();
|
||||
local_bh_enable();
|
||||
schedule_timeout_uninterruptible(1);
|
||||
if (!cpumask_equal(¤t->cpus_allowed, cpumask_of(cpu)))
|
||||
set_cpus_allowed_ptr(current, cpumask_of(cpu));
|
||||
local_bh_disable();
|
||||
}
|
||||
per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Per-CPU kernel thread that invokes RCU callbacks. This replaces the
|
||||
* earlier RCU softirq.
|
||||
*/
|
||||
static int rcu_cpu_kthread(void *arg)
|
||||
{
|
||||
int cpu = (int)(long)arg;
|
||||
unsigned long flags;
|
||||
int spincnt = 0;
|
||||
unsigned int *statusp = &per_cpu(rcu_cpu_kthread_status, cpu);
|
||||
char work;
|
||||
char *workp = &per_cpu(rcu_cpu_has_work, cpu);
|
||||
|
||||
for (;;) {
|
||||
*statusp = RCU_KTHREAD_WAITING;
|
||||
rcu_wait(*workp != 0 || kthread_should_stop());
|
||||
local_bh_disable();
|
||||
if (rcu_cpu_kthread_should_stop(cpu)) {
|
||||
local_bh_enable();
|
||||
break;
|
||||
}
|
||||
*statusp = RCU_KTHREAD_RUNNING;
|
||||
per_cpu(rcu_cpu_kthread_loops, cpu)++;
|
||||
local_irq_save(flags);
|
||||
work = *workp;
|
||||
*workp = 0;
|
||||
local_irq_restore(flags);
|
||||
if (work)
|
||||
rcu_kthread_do_work();
|
||||
local_bh_enable();
|
||||
if (*workp != 0)
|
||||
spincnt++;
|
||||
else
|
||||
spincnt = 0;
|
||||
if (spincnt > 10) {
|
||||
*statusp = RCU_KTHREAD_YIELDING;
|
||||
rcu_yield(rcu_cpu_kthread_timer, (unsigned long)cpu);
|
||||
spincnt = 0;
|
||||
}
|
||||
}
|
||||
*statusp = RCU_KTHREAD_STOPPED;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Spawn a per-CPU kthread, setting up affinity and priority.
|
||||
* Because the CPU hotplug lock is held, no other CPU will be attempting
|
||||
* to manipulate rcu_cpu_kthread_task. There might be another CPU
|
||||
* attempting to access it during boot, but the locking in kthread_bind()
|
||||
* will enforce sufficient ordering.
|
||||
*
|
||||
* Please note that we cannot simply refuse to wake up the per-CPU
|
||||
* kthread because kthreads are created in TASK_UNINTERRUPTIBLE state,
|
||||
* which can result in softlockup complaints if the task ends up being
|
||||
* idle for more than a couple of minutes.
|
||||
*
|
||||
* However, please note also that we cannot bind the per-CPU kthread to its
|
||||
* CPU until that CPU is fully online. We also cannot wait until the
|
||||
* CPU is fully online before we create its per-CPU kthread, as this would
|
||||
* deadlock the system when CPU notifiers tried waiting for grace
|
||||
* periods. So we bind the per-CPU kthread to its CPU only if the CPU
|
||||
* is online. If its CPU is not yet fully online, then the code in
|
||||
* rcu_cpu_kthread() will wait until it is fully online, and then do
|
||||
* the binding.
|
||||
*/
|
||||
static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu)
|
||||
{
|
||||
struct sched_param sp;
|
||||
struct task_struct *t;
|
||||
|
||||
if (!rcu_kthreads_spawnable ||
|
||||
per_cpu(rcu_cpu_kthread_task, cpu) != NULL)
|
||||
return 0;
|
||||
t = kthread_create(rcu_cpu_kthread, (void *)(long)cpu, "rcuc%d", cpu);
|
||||
if (IS_ERR(t))
|
||||
return PTR_ERR(t);
|
||||
if (cpu_online(cpu))
|
||||
kthread_bind(t, cpu);
|
||||
per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
|
||||
WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL);
|
||||
sp.sched_priority = RCU_KTHREAD_PRIO;
|
||||
sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
|
||||
per_cpu(rcu_cpu_kthread_task, cpu) = t;
|
||||
wake_up_process(t); /* Get to TASK_INTERRUPTIBLE quickly. */
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Per-rcu_node kthread, which is in charge of waking up the per-CPU
|
||||
* kthreads when needed. We ignore requests to wake up kthreads
|
||||
* for offline CPUs, which is OK because force_quiescent_state()
|
||||
* takes care of this case.
|
||||
*/
|
||||
static int rcu_node_kthread(void *arg)
|
||||
{
|
||||
int cpu;
|
||||
unsigned long flags;
|
||||
unsigned long mask;
|
||||
struct rcu_node *rnp = (struct rcu_node *)arg;
|
||||
struct sched_param sp;
|
||||
struct task_struct *t;
|
||||
|
||||
for (;;) {
|
||||
rnp->node_kthread_status = RCU_KTHREAD_WAITING;
|
||||
rcu_wait(atomic_read(&rnp->wakemask) != 0);
|
||||
rnp->node_kthread_status = RCU_KTHREAD_RUNNING;
|
||||
raw_spin_lock_irqsave(&rnp->lock, flags);
|
||||
mask = atomic_xchg(&rnp->wakemask, 0);
|
||||
rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */
|
||||
for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) {
|
||||
if ((mask & 0x1) == 0)
|
||||
continue;
|
||||
preempt_disable();
|
||||
t = per_cpu(rcu_cpu_kthread_task, cpu);
|
||||
if (!cpu_online(cpu) || t == NULL) {
|
||||
preempt_enable();
|
||||
continue;
|
||||
}
|
||||
per_cpu(rcu_cpu_has_work, cpu) = 1;
|
||||
sp.sched_priority = RCU_KTHREAD_PRIO;
|
||||
sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
|
||||
preempt_enable();
|
||||
}
|
||||
}
|
||||
/* NOTREACHED */
|
||||
rnp->node_kthread_status = RCU_KTHREAD_STOPPED;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the per-rcu_node kthread's affinity to cover all CPUs that are
|
||||
* served by the rcu_node in question. The CPU hotplug lock is still
|
||||
* held, so the value of rnp->qsmaskinit will be stable.
|
||||
*
|
||||
* We don't include outgoingcpu in the affinity set, use -1 if there is
|
||||
* no outgoing CPU. If there are no CPUs left in the affinity set,
|
||||
* this function allows the kthread to execute on any CPU.
|
||||
*/
|
||||
static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
|
||||
{
|
||||
cpumask_var_t cm;
|
||||
int cpu;
|
||||
unsigned long mask = rnp->qsmaskinit;
|
||||
|
||||
if (rnp->node_kthread_task == NULL)
|
||||
return;
|
||||
if (!alloc_cpumask_var(&cm, GFP_KERNEL))
|
||||
return;
|
||||
cpumask_clear(cm);
|
||||
for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1)
|
||||
if ((mask & 0x1) && cpu != outgoingcpu)
|
||||
cpumask_set_cpu(cpu, cm);
|
||||
if (cpumask_weight(cm) == 0) {
|
||||
cpumask_setall(cm);
|
||||
for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++)
|
||||
cpumask_clear_cpu(cpu, cm);
|
||||
WARN_ON_ONCE(cpumask_weight(cm) == 0);
|
||||
}
|
||||
set_cpus_allowed_ptr(rnp->node_kthread_task, cm);
|
||||
rcu_boost_kthread_setaffinity(rnp, cm);
|
||||
free_cpumask_var(cm);
|
||||
}
|
||||
|
||||
/*
|
||||
* Spawn a per-rcu_node kthread, setting priority and affinity.
|
||||
* Called during boot before online/offline can happen, or, if
|
||||
* during runtime, with the main CPU-hotplug locks held. So only
|
||||
* one of these can be executing at a time.
|
||||
*/
|
||||
static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp,
|
||||
struct rcu_node *rnp)
|
||||
{
|
||||
unsigned long flags;
|
||||
int rnp_index = rnp - &rsp->node[0];
|
||||
struct sched_param sp;
|
||||
struct task_struct *t;
|
||||
|
||||
if (!rcu_kthreads_spawnable ||
|
||||
rnp->qsmaskinit == 0)
|
||||
return 0;
|
||||
if (rnp->node_kthread_task == NULL) {
|
||||
t = kthread_create(rcu_node_kthread, (void *)rnp,
|
||||
"rcun%d", rnp_index);
|
||||
if (IS_ERR(t))
|
||||
return PTR_ERR(t);
|
||||
raw_spin_lock_irqsave(&rnp->lock, flags);
|
||||
rnp->node_kthread_task = t;
|
||||
raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
||||
sp.sched_priority = 99;
|
||||
sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
|
||||
wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
|
||||
}
|
||||
return rcu_spawn_one_boost_kthread(rsp, rnp, rnp_index);
|
||||
}
|
||||
|
||||
/*
|
||||
* Spawn all kthreads -- called as soon as the scheduler is running.
|
||||
*/
|
||||
static int __init rcu_spawn_kthreads(void)
|
||||
{
|
||||
int cpu;
|
||||
struct rcu_node *rnp;
|
||||
|
||||
rcu_kthreads_spawnable = 1;
|
||||
for_each_possible_cpu(cpu) {
|
||||
per_cpu(rcu_cpu_has_work, cpu) = 0;
|
||||
if (cpu_online(cpu))
|
||||
(void)rcu_spawn_one_cpu_kthread(cpu);
|
||||
}
|
||||
rnp = rcu_get_root(rcu_state);
|
||||
(void)rcu_spawn_one_node_kthread(rcu_state, rnp);
|
||||
if (NUM_RCU_NODES > 1) {
|
||||
rcu_for_each_leaf_node(rcu_state, rnp)
|
||||
(void)rcu_spawn_one_node_kthread(rcu_state, rnp);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
early_initcall(rcu_spawn_kthreads);
|
||||
|
||||
static void __cpuinit rcu_prepare_kthreads(int cpu)
|
||||
{
|
||||
struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
|
||||
struct rcu_node *rnp = rdp->mynode;
|
||||
|
||||
/* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
|
||||
if (rcu_kthreads_spawnable) {
|
||||
(void)rcu_spawn_one_cpu_kthread(cpu);
|
||||
if (rnp->node_kthread_task == NULL)
|
||||
(void)rcu_spawn_one_node_kthread(rcu_state, rnp);
|
||||
}
|
||||
}
|
||||
|
||||
#else /* #ifdef CONFIG_RCU_BOOST */
|
||||
|
|
@ -1315,23 +1701,32 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
|
|||
raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
||||
}
|
||||
|
||||
static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp,
|
||||
cpumask_var_t cm)
|
||||
static void invoke_rcu_callbacks_kthread(void)
|
||||
{
|
||||
WARN_ON_ONCE(1);
|
||||
}
|
||||
|
||||
static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
|
||||
{
|
||||
}
|
||||
|
||||
static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
|
||||
struct rcu_node *rnp,
|
||||
int rnp_index)
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
|
||||
static void rcu_stop_cpu_kthread(int cpu)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __cpuinit rcu_wake_one_boost_kthread(struct rcu_node *rnp)
|
||||
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
|
||||
|
||||
static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
|
||||
{
|
||||
}
|
||||
|
||||
static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
|
||||
{
|
||||
}
|
||||
|
||||
static void __cpuinit rcu_prepare_kthreads(int cpu)
|
||||
{
|
||||
}
|
||||
|
||||
|
|
@ -1509,7 +1904,7 @@ static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff);
|
|||
*
|
||||
* Because it is not legal to invoke rcu_process_callbacks() with irqs
|
||||
* disabled, we do one pass of force_quiescent_state(), then do a
|
||||
* invoke_rcu_cpu_kthread() to cause rcu_process_callbacks() to be invoked
|
||||
* invoke_rcu_core() to cause rcu_process_callbacks() to be invoked
|
||||
* later. The per-cpu rcu_dyntick_drain variable controls the sequencing.
|
||||
*/
|
||||
int rcu_needs_cpu(int cpu)
|
||||
|
|
@ -1560,7 +1955,7 @@ int rcu_needs_cpu(int cpu)
|
|||
|
||||
/* If RCU callbacks are still pending, RCU still needs this CPU. */
|
||||
if (c)
|
||||
invoke_rcu_cpu_kthread();
|
||||
invoke_rcu_core();
|
||||
return c;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -46,6 +46,8 @@
|
|||
#define RCU_TREE_NONCORE
|
||||
#include "rcutree.h"
|
||||
|
||||
#ifdef CONFIG_RCU_BOOST
|
||||
|
||||
DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
|
||||
DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_cpu);
|
||||
DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
|
||||
|
|
@ -58,6 +60,8 @@ static char convert_kthread_status(unsigned int kthread_status)
|
|||
return "SRWOY"[kthread_status];
|
||||
}
|
||||
|
||||
#endif /* #ifdef CONFIG_RCU_BOOST */
|
||||
|
||||
static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
|
||||
{
|
||||
if (!rdp->beenonline)
|
||||
|
|
@ -76,7 +80,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
|
|||
rdp->dynticks_fqs);
|
||||
#endif /* #ifdef CONFIG_NO_HZ */
|
||||
seq_printf(m, " of=%lu ri=%lu", rdp->offline_fqs, rdp->resched_ipi);
|
||||
seq_printf(m, " ql=%ld qs=%c%c%c%c kt=%d/%c/%d ktl=%x b=%ld",
|
||||
seq_printf(m, " ql=%ld qs=%c%c%c%c",
|
||||
rdp->qlen,
|
||||
".N"[rdp->nxttail[RCU_NEXT_READY_TAIL] !=
|
||||
rdp->nxttail[RCU_NEXT_TAIL]],
|
||||
|
|
@ -84,13 +88,16 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
|
|||
rdp->nxttail[RCU_NEXT_READY_TAIL]],
|
||||
".W"[rdp->nxttail[RCU_DONE_TAIL] !=
|
||||
rdp->nxttail[RCU_WAIT_TAIL]],
|
||||
".D"[&rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]],
|
||||
".D"[&rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]]);
|
||||
#ifdef CONFIG_RCU_BOOST
|
||||
seq_printf(m, " kt=%d/%c/%d ktl=%x",
|
||||
per_cpu(rcu_cpu_has_work, rdp->cpu),
|
||||
convert_kthread_status(per_cpu(rcu_cpu_kthread_status,
|
||||
rdp->cpu)),
|
||||
per_cpu(rcu_cpu_kthread_cpu, rdp->cpu),
|
||||
per_cpu(rcu_cpu_kthread_loops, rdp->cpu) & 0xffff,
|
||||
rdp->blimit);
|
||||
per_cpu(rcu_cpu_kthread_loops, rdp->cpu) & 0xffff);
|
||||
#endif /* #ifdef CONFIG_RCU_BOOST */
|
||||
seq_printf(m, " b=%ld", rdp->blimit);
|
||||
seq_printf(m, " ci=%lu co=%lu ca=%lu\n",
|
||||
rdp->n_cbs_invoked, rdp->n_cbs_orphaned, rdp->n_cbs_adopted);
|
||||
}
|
||||
|
|
@ -147,18 +154,21 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
|
|||
rdp->dynticks_fqs);
|
||||
#endif /* #ifdef CONFIG_NO_HZ */
|
||||
seq_printf(m, ",%lu,%lu", rdp->offline_fqs, rdp->resched_ipi);
|
||||
seq_printf(m, ",%ld,\"%c%c%c%c\",%d,\"%c\",%ld", rdp->qlen,
|
||||
seq_printf(m, ",%ld,\"%c%c%c%c\"", rdp->qlen,
|
||||
".N"[rdp->nxttail[RCU_NEXT_READY_TAIL] !=
|
||||
rdp->nxttail[RCU_NEXT_TAIL]],
|
||||
".R"[rdp->nxttail[RCU_WAIT_TAIL] !=
|
||||
rdp->nxttail[RCU_NEXT_READY_TAIL]],
|
||||
".W"[rdp->nxttail[RCU_DONE_TAIL] !=
|
||||
rdp->nxttail[RCU_WAIT_TAIL]],
|
||||
".D"[&rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]],
|
||||
".D"[&rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]]);
|
||||
#ifdef CONFIG_RCU_BOOST
|
||||
seq_printf(m, ",%d,\"%c\"",
|
||||
per_cpu(rcu_cpu_has_work, rdp->cpu),
|
||||
convert_kthread_status(per_cpu(rcu_cpu_kthread_status,
|
||||
rdp->cpu)),
|
||||
rdp->blimit);
|
||||
rdp->cpu)));
|
||||
#endif /* #ifdef CONFIG_RCU_BOOST */
|
||||
seq_printf(m, ",%ld", rdp->blimit);
|
||||
seq_printf(m, ",%lu,%lu,%lu\n",
|
||||
rdp->n_cbs_invoked, rdp->n_cbs_orphaned, rdp->n_cbs_adopted);
|
||||
}
|
||||
|
|
@ -169,7 +179,11 @@ static int show_rcudata_csv(struct seq_file *m, void *unused)
|
|||
#ifdef CONFIG_NO_HZ
|
||||
seq_puts(m, "\"dt\",\"dt nesting\",\"dt NMI nesting\",\"df\",");
|
||||
#endif /* #ifdef CONFIG_NO_HZ */
|
||||
seq_puts(m, "\"of\",\"ri\",\"ql\",\"b\",\"ci\",\"co\",\"ca\"\n");
|
||||
seq_puts(m, "\"of\",\"ri\",\"ql\",\"qs\"");
|
||||
#ifdef CONFIG_RCU_BOOST
|
||||
seq_puts(m, "\"kt\",\"ktl\"");
|
||||
#endif /* #ifdef CONFIG_RCU_BOOST */
|
||||
seq_puts(m, ",\"b\",\"ci\",\"co\",\"ca\"\n");
|
||||
#ifdef CONFIG_TREE_PREEMPT_RCU
|
||||
seq_puts(m, "\"rcu_preempt:\"\n");
|
||||
PRINT_RCU_DATA(rcu_preempt_data, print_one_rcu_data_csv, m);
|
||||
|
|
|
|||
|
|
@ -38,6 +38,14 @@ struct resource iomem_resource = {
|
|||
};
|
||||
EXPORT_SYMBOL(iomem_resource);
|
||||
|
||||
/* constraints to be met while allocating resources */
|
||||
struct resource_constraint {
|
||||
resource_size_t min, max, align;
|
||||
resource_size_t (*alignf)(void *, const struct resource *,
|
||||
resource_size_t, resource_size_t);
|
||||
void *alignf_data;
|
||||
};
|
||||
|
||||
static DEFINE_RWLOCK(resource_lock);
|
||||
|
||||
static void *r_next(struct seq_file *m, void *v, loff_t *pos)
|
||||
|
|
@ -384,16 +392,13 @@ static bool resource_contains(struct resource *res1, struct resource *res2)
|
|||
}
|
||||
|
||||
/*
|
||||
* Find empty slot in the resource tree given range and alignment.
|
||||
* Find empty slot in the resource tree with the given range and
|
||||
* alignment constraints
|
||||
*/
|
||||
static int find_resource(struct resource *root, struct resource *new,
|
||||
resource_size_t size, resource_size_t min,
|
||||
resource_size_t max, resource_size_t align,
|
||||
resource_size_t (*alignf)(void *,
|
||||
const struct resource *,
|
||||
resource_size_t,
|
||||
resource_size_t),
|
||||
void *alignf_data)
|
||||
static int __find_resource(struct resource *root, struct resource *old,
|
||||
struct resource *new,
|
||||
resource_size_t size,
|
||||
struct resource_constraint *constraint)
|
||||
{
|
||||
struct resource *this = root->child;
|
||||
struct resource tmp = *new, avail, alloc;
|
||||
|
|
@ -404,25 +409,26 @@ static int find_resource(struct resource *root, struct resource *new,
|
|||
* Skip past an allocated resource that starts at 0, since the assignment
|
||||
* of this->start - 1 to tmp->end below would cause an underflow.
|
||||
*/
|
||||
if (this && this->start == 0) {
|
||||
tmp.start = this->end + 1;
|
||||
if (this && this->start == root->start) {
|
||||
tmp.start = (this == old) ? old->start : this->end + 1;
|
||||
this = this->sibling;
|
||||
}
|
||||
for(;;) {
|
||||
if (this)
|
||||
tmp.end = this->start - 1;
|
||||
tmp.end = (this == old) ? this->end : this->start - 1;
|
||||
else
|
||||
tmp.end = root->end;
|
||||
|
||||
resource_clip(&tmp, min, max);
|
||||
resource_clip(&tmp, constraint->min, constraint->max);
|
||||
arch_remove_reservations(&tmp);
|
||||
|
||||
/* Check for overflow after ALIGN() */
|
||||
avail = *new;
|
||||
avail.start = ALIGN(tmp.start, align);
|
||||
avail.start = ALIGN(tmp.start, constraint->align);
|
||||
avail.end = tmp.end;
|
||||
if (avail.start >= tmp.start) {
|
||||
alloc.start = alignf(alignf_data, &avail, size, align);
|
||||
alloc.start = constraint->alignf(constraint->alignf_data, &avail,
|
||||
size, constraint->align);
|
||||
alloc.end = alloc.start + size - 1;
|
||||
if (resource_contains(&avail, &alloc)) {
|
||||
new->start = alloc.start;
|
||||
|
|
@ -432,14 +438,75 @@ static int find_resource(struct resource *root, struct resource *new,
|
|||
}
|
||||
if (!this)
|
||||
break;
|
||||
tmp.start = this->end + 1;
|
||||
if (this != old)
|
||||
tmp.start = this->end + 1;
|
||||
this = this->sibling;
|
||||
}
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
/*
|
||||
* Find empty slot in the resource tree given range and alignment.
|
||||
*/
|
||||
static int find_resource(struct resource *root, struct resource *new,
|
||||
resource_size_t size,
|
||||
struct resource_constraint *constraint)
|
||||
{
|
||||
return __find_resource(root, NULL, new, size, constraint);
|
||||
}
|
||||
|
||||
/**
|
||||
* allocate_resource - allocate empty slot in the resource tree given range & alignment
|
||||
* reallocate_resource - allocate a slot in the resource tree given range & alignment.
|
||||
* The resource will be relocated if the new size cannot be reallocated in the
|
||||
* current location.
|
||||
*
|
||||
* @root: root resource descriptor
|
||||
* @old: resource descriptor desired by caller
|
||||
* @newsize: new size of the resource descriptor
|
||||
* @constraint: the size and alignment constraints to be met.
|
||||
*/
|
||||
int reallocate_resource(struct resource *root, struct resource *old,
|
||||
resource_size_t newsize,
|
||||
struct resource_constraint *constraint)
|
||||
{
|
||||
int err=0;
|
||||
struct resource new = *old;
|
||||
struct resource *conflict;
|
||||
|
||||
write_lock(&resource_lock);
|
||||
|
||||
if ((err = __find_resource(root, old, &new, newsize, constraint)))
|
||||
goto out;
|
||||
|
||||
if (resource_contains(&new, old)) {
|
||||
old->start = new.start;
|
||||
old->end = new.end;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (old->child) {
|
||||
err = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (resource_contains(old, &new)) {
|
||||
old->start = new.start;
|
||||
old->end = new.end;
|
||||
} else {
|
||||
__release_resource(old);
|
||||
*old = new;
|
||||
conflict = __request_resource(root, old);
|
||||
BUG_ON(conflict);
|
||||
}
|
||||
out:
|
||||
write_unlock(&resource_lock);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* allocate_resource - allocate empty slot in the resource tree given range & alignment.
|
||||
* The resource will be reallocated with a new size if it was already allocated
|
||||
* @root: root resource descriptor
|
||||
* @new: resource descriptor desired by caller
|
||||
* @size: requested resource region size
|
||||
|
|
@ -459,12 +526,25 @@ int allocate_resource(struct resource *root, struct resource *new,
|
|||
void *alignf_data)
|
||||
{
|
||||
int err;
|
||||
struct resource_constraint constraint;
|
||||
|
||||
if (!alignf)
|
||||
alignf = simple_align_resource;
|
||||
|
||||
constraint.min = min;
|
||||
constraint.max = max;
|
||||
constraint.align = align;
|
||||
constraint.alignf = alignf;
|
||||
constraint.alignf_data = alignf_data;
|
||||
|
||||
if ( new->parent ) {
|
||||
/* resource is already allocated, try reallocating with
|
||||
the new constraints */
|
||||
return reallocate_resource(root, new, size, &constraint);
|
||||
}
|
||||
|
||||
write_lock(&resource_lock);
|
||||
err = find_resource(root, new, size, min, max, align, alignf, alignf_data);
|
||||
err = find_resource(root, new, size, &constraint);
|
||||
if (err >= 0 && __request_resource(root, new))
|
||||
err = -EBUSY;
|
||||
write_unlock(&resource_lock);
|
||||
|
|
|
|||
|
|
@ -292,8 +292,8 @@ static DEFINE_SPINLOCK(task_group_lock);
|
|||
* (The default weight is 1024 - so there's no practical
|
||||
* limitation from this.)
|
||||
*/
|
||||
#define MIN_SHARES 2
|
||||
#define MAX_SHARES (1UL << (18 + SCHED_LOAD_RESOLUTION))
|
||||
#define MIN_SHARES (1UL << 1)
|
||||
#define MAX_SHARES (1UL << 18)
|
||||
|
||||
static int root_task_group_load = ROOT_TASK_GROUP_LOAD;
|
||||
#endif
|
||||
|
|
@ -8449,10 +8449,7 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)
|
|||
if (!tg->se[0])
|
||||
return -EINVAL;
|
||||
|
||||
if (shares < MIN_SHARES)
|
||||
shares = MIN_SHARES;
|
||||
else if (shares > MAX_SHARES)
|
||||
shares = MAX_SHARES;
|
||||
shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
|
||||
|
||||
mutex_lock(&shares_mutex);
|
||||
if (tg->shares == shares)
|
||||
|
|
|
|||
|
|
@ -1096,7 +1096,7 @@ static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flag
|
|||
* to move current somewhere else, making room for our non-migratable
|
||||
* task.
|
||||
*/
|
||||
if (p->prio == rq->curr->prio && !need_resched())
|
||||
if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
|
||||
check_preempt_equal_prio(rq, p);
|
||||
#endif
|
||||
}
|
||||
|
|
@ -1239,6 +1239,10 @@ static int find_lowest_rq(struct task_struct *task)
|
|||
int this_cpu = smp_processor_id();
|
||||
int cpu = task_cpu(task);
|
||||
|
||||
/* Make sure the mask is initialized first */
|
||||
if (unlikely(!lowest_mask))
|
||||
return -1;
|
||||
|
||||
if (task->rt.nr_cpus_allowed == 1)
|
||||
return -1; /* No other targets possible */
|
||||
|
||||
|
|
|
|||
|
|
@ -2365,7 +2365,7 @@ int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
|
|||
/**
|
||||
* sys_rt_sigprocmask - change the list of currently blocked signals
|
||||
* @how: whether to add, remove, or set signals
|
||||
* @set: stores pending signals
|
||||
* @nset: stores pending signals
|
||||
* @oset: previous value of signal mask if non-null
|
||||
* @sigsetsize: size of sigset_t type
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -74,7 +74,7 @@ static struct notifier_block __cpuinitdata hotplug_cfd_notifier = {
|
|||
.notifier_call = hotplug_cfd,
|
||||
};
|
||||
|
||||
static int __cpuinit init_call_single_data(void)
|
||||
void __init call_function_init(void)
|
||||
{
|
||||
void *cpu = (void *)(long)smp_processor_id();
|
||||
int i;
|
||||
|
|
@ -88,10 +88,7 @@ static int __cpuinit init_call_single_data(void)
|
|||
|
||||
hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu);
|
||||
register_cpu_notifier(&hotplug_cfd_notifier);
|
||||
|
||||
return 0;
|
||||
}
|
||||
early_initcall(init_call_single_data);
|
||||
|
||||
/*
|
||||
* csd_lock/csd_unlock used to serialize access to per-cpu csd resources
|
||||
|
|
|
|||
|
|
@ -58,7 +58,7 @@ DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
|
|||
|
||||
char *softirq_to_name[NR_SOFTIRQS] = {
|
||||
"HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
|
||||
"TASKLET", "SCHED", "HRTIMER"
|
||||
"TASKLET", "SCHED", "HRTIMER", "RCU"
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -285,16 +285,18 @@ ret:
|
|||
static int add_del_listener(pid_t pid, const struct cpumask *mask, int isadd)
|
||||
{
|
||||
struct listener_list *listeners;
|
||||
struct listener *s, *tmp;
|
||||
struct listener *s, *tmp, *s2;
|
||||
unsigned int cpu;
|
||||
|
||||
if (!cpumask_subset(mask, cpu_possible_mask))
|
||||
return -EINVAL;
|
||||
|
||||
s = NULL;
|
||||
if (isadd == REGISTER) {
|
||||
for_each_cpu(cpu, mask) {
|
||||
s = kmalloc_node(sizeof(struct listener), GFP_KERNEL,
|
||||
cpu_to_node(cpu));
|
||||
if (!s)
|
||||
s = kmalloc_node(sizeof(struct listener),
|
||||
GFP_KERNEL, cpu_to_node(cpu));
|
||||
if (!s)
|
||||
goto cleanup;
|
||||
s->pid = pid;
|
||||
|
|
@ -303,9 +305,16 @@ static int add_del_listener(pid_t pid, const struct cpumask *mask, int isadd)
|
|||
|
||||
listeners = &per_cpu(listener_array, cpu);
|
||||
down_write(&listeners->sem);
|
||||
list_for_each_entry_safe(s2, tmp, &listeners->list, list) {
|
||||
if (s2->pid == pid)
|
||||
goto next_cpu;
|
||||
}
|
||||
list_add(&s->list, &listeners->list);
|
||||
s = NULL;
|
||||
next_cpu:
|
||||
up_write(&listeners->sem);
|
||||
}
|
||||
kfree(s);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -42,15 +42,75 @@ static struct alarm_base {
|
|||
clockid_t base_clockid;
|
||||
} alarm_bases[ALARM_NUMTYPE];
|
||||
|
||||
/* freezer delta & lock used to handle clock_nanosleep triggered wakeups */
|
||||
static ktime_t freezer_delta;
|
||||
static DEFINE_SPINLOCK(freezer_delta_lock);
|
||||
|
||||
#ifdef CONFIG_RTC_CLASS
|
||||
/* rtc timer and device for setting alarm wakeups at suspend */
|
||||
static struct rtc_timer rtctimer;
|
||||
static struct rtc_device *rtcdev;
|
||||
#endif
|
||||
static DEFINE_SPINLOCK(rtcdev_lock);
|
||||
|
||||
/* freezer delta & lock used to handle clock_nanosleep triggered wakeups */
|
||||
static ktime_t freezer_delta;
|
||||
static DEFINE_SPINLOCK(freezer_delta_lock);
|
||||
/**
|
||||
* has_wakealarm - check rtc device has wakealarm ability
|
||||
* @dev: current device
|
||||
* @name_ptr: name to be returned
|
||||
*
|
||||
* This helper function checks to see if the rtc device can wake
|
||||
* from suspend.
|
||||
*/
|
||||
static int has_wakealarm(struct device *dev, void *name_ptr)
|
||||
{
|
||||
struct rtc_device *candidate = to_rtc_device(dev);
|
||||
|
||||
if (!candidate->ops->set_alarm)
|
||||
return 0;
|
||||
if (!device_may_wakeup(candidate->dev.parent))
|
||||
return 0;
|
||||
|
||||
*(const char **)name_ptr = dev_name(dev);
|
||||
return 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* alarmtimer_get_rtcdev - Return selected rtcdevice
|
||||
*
|
||||
* This function returns the rtc device to use for wakealarms.
|
||||
* If one has not already been chosen, it checks to see if a
|
||||
* functional rtc device is available.
|
||||
*/
|
||||
static struct rtc_device *alarmtimer_get_rtcdev(void)
|
||||
{
|
||||
struct device *dev;
|
||||
char *str;
|
||||
unsigned long flags;
|
||||
struct rtc_device *ret;
|
||||
|
||||
spin_lock_irqsave(&rtcdev_lock, flags);
|
||||
if (!rtcdev) {
|
||||
/* Find an rtc device and init the rtc_timer */
|
||||
dev = class_find_device(rtc_class, NULL, &str, has_wakealarm);
|
||||
/* If we have a device then str is valid. See has_wakealarm() */
|
||||
if (dev) {
|
||||
rtcdev = rtc_class_open(str);
|
||||
/*
|
||||
* Drop the reference we got in class_find_device,
|
||||
* rtc_open takes its own.
|
||||
*/
|
||||
put_device(dev);
|
||||
rtc_timer_init(&rtctimer, NULL, NULL);
|
||||
}
|
||||
}
|
||||
ret = rtcdev;
|
||||
spin_unlock_irqrestore(&rtcdev_lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
#else
|
||||
#define alarmtimer_get_rtcdev() (0)
|
||||
#define rtcdev (0)
|
||||
#endif
|
||||
|
||||
|
||||
/**
|
||||
|
|
@ -166,6 +226,7 @@ static int alarmtimer_suspend(struct device *dev)
|
|||
struct rtc_time tm;
|
||||
ktime_t min, now;
|
||||
unsigned long flags;
|
||||
struct rtc_device *rtc;
|
||||
int i;
|
||||
|
||||
spin_lock_irqsave(&freezer_delta_lock, flags);
|
||||
|
|
@ -173,8 +234,9 @@ static int alarmtimer_suspend(struct device *dev)
|
|||
freezer_delta = ktime_set(0, 0);
|
||||
spin_unlock_irqrestore(&freezer_delta_lock, flags);
|
||||
|
||||
rtc = rtcdev;
|
||||
/* If we have no rtcdev, just return */
|
||||
if (!rtcdev)
|
||||
if (!rtc)
|
||||
return 0;
|
||||
|
||||
/* Find the soonest timer to expire*/
|
||||
|
|
@ -199,12 +261,12 @@ static int alarmtimer_suspend(struct device *dev)
|
|||
WARN_ON(min.tv64 < NSEC_PER_SEC);
|
||||
|
||||
/* Setup an rtc timer to fire that far in the future */
|
||||
rtc_timer_cancel(rtcdev, &rtctimer);
|
||||
rtc_read_time(rtcdev, &tm);
|
||||
rtc_timer_cancel(rtc, &rtctimer);
|
||||
rtc_read_time(rtc, &tm);
|
||||
now = rtc_tm_to_ktime(tm);
|
||||
now = ktime_add(now, min);
|
||||
|
||||
rtc_timer_start(rtcdev, &rtctimer, now, ktime_set(0, 0));
|
||||
rtc_timer_start(rtc, &rtctimer, now, ktime_set(0, 0));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -322,6 +384,9 @@ static int alarm_clock_getres(const clockid_t which_clock, struct timespec *tp)
|
|||
{
|
||||
clockid_t baseid = alarm_bases[clock2alarm(which_clock)].base_clockid;
|
||||
|
||||
if (!alarmtimer_get_rtcdev())
|
||||
return -ENOTSUPP;
|
||||
|
||||
return hrtimer_get_res(baseid, tp);
|
||||
}
|
||||
|
||||
|
|
@ -336,6 +401,9 @@ static int alarm_clock_get(clockid_t which_clock, struct timespec *tp)
|
|||
{
|
||||
struct alarm_base *base = &alarm_bases[clock2alarm(which_clock)];
|
||||
|
||||
if (!alarmtimer_get_rtcdev())
|
||||
return -ENOTSUPP;
|
||||
|
||||
*tp = ktime_to_timespec(base->gettime());
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -351,6 +419,9 @@ static int alarm_timer_create(struct k_itimer *new_timer)
|
|||
enum alarmtimer_type type;
|
||||
struct alarm_base *base;
|
||||
|
||||
if (!alarmtimer_get_rtcdev())
|
||||
return -ENOTSUPP;
|
||||
|
||||
if (!capable(CAP_WAKE_ALARM))
|
||||
return -EPERM;
|
||||
|
||||
|
|
@ -385,6 +456,9 @@ static void alarm_timer_get(struct k_itimer *timr,
|
|||
*/
|
||||
static int alarm_timer_del(struct k_itimer *timr)
|
||||
{
|
||||
if (!rtcdev)
|
||||
return -ENOTSUPP;
|
||||
|
||||
alarm_cancel(&timr->it.alarmtimer);
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -402,6 +476,9 @@ static int alarm_timer_set(struct k_itimer *timr, int flags,
|
|||
struct itimerspec *new_setting,
|
||||
struct itimerspec *old_setting)
|
||||
{
|
||||
if (!rtcdev)
|
||||
return -ENOTSUPP;
|
||||
|
||||
/* Save old values */
|
||||
old_setting->it_interval =
|
||||
ktime_to_timespec(timr->it.alarmtimer.period);
|
||||
|
|
@ -541,6 +618,9 @@ static int alarm_timer_nsleep(const clockid_t which_clock, int flags,
|
|||
int ret = 0;
|
||||
struct restart_block *restart;
|
||||
|
||||
if (!alarmtimer_get_rtcdev())
|
||||
return -ENOTSUPP;
|
||||
|
||||
if (!capable(CAP_WAKE_ALARM))
|
||||
return -EPERM;
|
||||
|
||||
|
|
@ -638,65 +718,3 @@ static int __init alarmtimer_init(void)
|
|||
}
|
||||
device_initcall(alarmtimer_init);
|
||||
|
||||
#ifdef CONFIG_RTC_CLASS
|
||||
/**
|
||||
* has_wakealarm - check rtc device has wakealarm ability
|
||||
* @dev: current device
|
||||
* @name_ptr: name to be returned
|
||||
*
|
||||
* This helper function checks to see if the rtc device can wake
|
||||
* from suspend.
|
||||
*/
|
||||
static int __init has_wakealarm(struct device *dev, void *name_ptr)
|
||||
{
|
||||
struct rtc_device *candidate = to_rtc_device(dev);
|
||||
|
||||
if (!candidate->ops->set_alarm)
|
||||
return 0;
|
||||
if (!device_may_wakeup(candidate->dev.parent))
|
||||
return 0;
|
||||
|
||||
*(const char **)name_ptr = dev_name(dev);
|
||||
return 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* alarmtimer_init_late - Late initializing of alarmtimer code
|
||||
*
|
||||
* This function locates a rtc device to use for wakealarms.
|
||||
* Run as late_initcall to make sure rtc devices have been
|
||||
* registered.
|
||||
*/
|
||||
static int __init alarmtimer_init_late(void)
|
||||
{
|
||||
struct device *dev;
|
||||
char *str;
|
||||
|
||||
/* Find an rtc device and init the rtc_timer */
|
||||
dev = class_find_device(rtc_class, NULL, &str, has_wakealarm);
|
||||
/* If we have a device then str is valid. See has_wakealarm() */
|
||||
if (dev) {
|
||||
rtcdev = rtc_class_open(str);
|
||||
/*
|
||||
* Drop the reference we got in class_find_device,
|
||||
* rtc_open takes its own.
|
||||
*/
|
||||
put_device(dev);
|
||||
}
|
||||
if (!rtcdev) {
|
||||
printk(KERN_WARNING "No RTC device found, ALARM timers will"
|
||||
" not wake from suspend");
|
||||
}
|
||||
rtc_timer_init(&rtctimer, NULL, NULL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
static int __init alarmtimer_init_late(void)
|
||||
{
|
||||
printk(KERN_WARNING "Kernel not built with RTC support, ALARM timers"
|
||||
" will not wake from suspend");
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
late_initcall(alarmtimer_init_late);
|
||||
|
|
|
|||
|
|
@ -185,7 +185,6 @@ static struct clocksource *watchdog;
|
|||
static struct timer_list watchdog_timer;
|
||||
static DECLARE_WORK(watchdog_work, clocksource_watchdog_work);
|
||||
static DEFINE_SPINLOCK(watchdog_lock);
|
||||
static cycle_t watchdog_last;
|
||||
static int watchdog_running;
|
||||
|
||||
static int clocksource_watchdog_kthread(void *data);
|
||||
|
|
@ -254,11 +253,6 @@ static void clocksource_watchdog(unsigned long data)
|
|||
if (!watchdog_running)
|
||||
goto out;
|
||||
|
||||
wdnow = watchdog->read(watchdog);
|
||||
wd_nsec = clocksource_cyc2ns((wdnow - watchdog_last) & watchdog->mask,
|
||||
watchdog->mult, watchdog->shift);
|
||||
watchdog_last = wdnow;
|
||||
|
||||
list_for_each_entry(cs, &watchdog_list, wd_list) {
|
||||
|
||||
/* Clocksource already marked unstable? */
|
||||
|
|
@ -268,19 +262,28 @@ static void clocksource_watchdog(unsigned long data)
|
|||
continue;
|
||||
}
|
||||
|
||||
local_irq_disable();
|
||||
csnow = cs->read(cs);
|
||||
wdnow = watchdog->read(watchdog);
|
||||
local_irq_enable();
|
||||
|
||||
/* Clocksource initialized ? */
|
||||
if (!(cs->flags & CLOCK_SOURCE_WATCHDOG)) {
|
||||
cs->flags |= CLOCK_SOURCE_WATCHDOG;
|
||||
cs->wd_last = csnow;
|
||||
cs->wd_last = wdnow;
|
||||
cs->cs_last = csnow;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Check the deviation from the watchdog clocksource. */
|
||||
cs_nsec = clocksource_cyc2ns((csnow - cs->wd_last) &
|
||||
wd_nsec = clocksource_cyc2ns((wdnow - cs->wd_last) & watchdog->mask,
|
||||
watchdog->mult, watchdog->shift);
|
||||
|
||||
cs_nsec = clocksource_cyc2ns((csnow - cs->cs_last) &
|
||||
cs->mask, cs->mult, cs->shift);
|
||||
cs->wd_last = csnow;
|
||||
cs->cs_last = csnow;
|
||||
cs->wd_last = wdnow;
|
||||
|
||||
/* Check the deviation from the watchdog clocksource. */
|
||||
if (abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) {
|
||||
clocksource_unstable(cs, cs_nsec - wd_nsec);
|
||||
continue;
|
||||
|
|
@ -318,7 +321,6 @@ static inline void clocksource_start_watchdog(void)
|
|||
return;
|
||||
init_timer(&watchdog_timer);
|
||||
watchdog_timer.function = clocksource_watchdog;
|
||||
watchdog_last = watchdog->read(watchdog);
|
||||
watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL;
|
||||
add_timer_on(&watchdog_timer, cpumask_first(cpu_online_mask));
|
||||
watchdog_running = 1;
|
||||
|
|
|
|||
|
|
@ -2740,7 +2740,7 @@ static int ftrace_process_regex(struct ftrace_hash *hash,
|
|||
{
|
||||
char *func, *command, *next = buff;
|
||||
struct ftrace_func_command *p;
|
||||
int ret;
|
||||
int ret = -EINVAL;
|
||||
|
||||
func = strsep(&next, ":");
|
||||
|
||||
|
|
@ -3330,6 +3330,7 @@ static int ftrace_process_locs(struct module *mod,
|
|||
{
|
||||
unsigned long *p;
|
||||
unsigned long addr;
|
||||
unsigned long flags;
|
||||
|
||||
mutex_lock(&ftrace_lock);
|
||||
p = start;
|
||||
|
|
@ -3346,7 +3347,13 @@ static int ftrace_process_locs(struct module *mod,
|
|||
ftrace_record_ip(addr);
|
||||
}
|
||||
|
||||
/*
|
||||
* Disable interrupts to prevent interrupts from executing
|
||||
* code that is being modified.
|
||||
*/
|
||||
local_irq_save(flags);
|
||||
ftrace_update_code(mod);
|
||||
local_irq_restore(flags);
|
||||
mutex_unlock(&ftrace_lock);
|
||||
|
||||
return 0;
|
||||
|
|
|
|||
|
|
@ -1870,8 +1870,12 @@ fs_initcall(init_kprobe_trace);
|
|||
|
||||
#ifdef CONFIG_FTRACE_STARTUP_TEST
|
||||
|
||||
static int kprobe_trace_selftest_target(int a1, int a2, int a3,
|
||||
int a4, int a5, int a6)
|
||||
/*
|
||||
* The "__used" keeps gcc from removing the function symbol
|
||||
* from the kallsyms table.
|
||||
*/
|
||||
static __used int kprobe_trace_selftest_target(int a1, int a2, int a3,
|
||||
int a4, int a5, int a6)
|
||||
{
|
||||
return a1 + a2 + a3 + a4 + a5 + a6;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -240,13 +240,10 @@ static const char **find_next(void *v, loff_t *pos)
|
|||
const char **fmt = v;
|
||||
int start_index;
|
||||
|
||||
if (!fmt)
|
||||
fmt = __start___trace_bprintk_fmt + *pos;
|
||||
|
||||
start_index = __stop___trace_bprintk_fmt - __start___trace_bprintk_fmt;
|
||||
|
||||
if (*pos < start_index)
|
||||
return fmt;
|
||||
return __start___trace_bprintk_fmt + *pos;
|
||||
|
||||
return find_next_mod_format(start_index, v, fmt, pos);
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue