Merge branch 'timers/core' into sched/hrtimers
Merge sched/core and timers/core so we can apply the sched balancing patch queue, which depends on both.
This commit is contained in:
commit
624bbdfac9
74 changed files with 2062 additions and 1329 deletions
|
|
@ -51,9 +51,11 @@
|
|||
|
||||
static struct workqueue_struct *perf_wq;
|
||||
|
||||
typedef int (*remote_function_f)(void *);
|
||||
|
||||
struct remote_function_call {
|
||||
struct task_struct *p;
|
||||
int (*func)(void *info);
|
||||
remote_function_f func;
|
||||
void *info;
|
||||
int ret;
|
||||
};
|
||||
|
|
@ -86,7 +88,7 @@ static void remote_function(void *data)
|
|||
* -EAGAIN - when the process moved away
|
||||
*/
|
||||
static int
|
||||
task_function_call(struct task_struct *p, int (*func) (void *info), void *info)
|
||||
task_function_call(struct task_struct *p, remote_function_f func, void *info)
|
||||
{
|
||||
struct remote_function_call data = {
|
||||
.p = p,
|
||||
|
|
@ -110,7 +112,7 @@ task_function_call(struct task_struct *p, int (*func) (void *info), void *info)
|
|||
*
|
||||
* returns: @func return value or -ENXIO when the cpu is offline
|
||||
*/
|
||||
static int cpu_function_call(int cpu, int (*func) (void *info), void *info)
|
||||
static int cpu_function_call(int cpu, remote_function_f func, void *info)
|
||||
{
|
||||
struct remote_function_call data = {
|
||||
.p = NULL,
|
||||
|
|
@ -747,62 +749,31 @@ perf_cgroup_mark_enabled(struct perf_event *event,
|
|||
/*
|
||||
* function must be called with interrupts disbled
|
||||
*/
|
||||
static enum hrtimer_restart perf_cpu_hrtimer_handler(struct hrtimer *hr)
|
||||
static enum hrtimer_restart perf_mux_hrtimer_handler(struct hrtimer *hr)
|
||||
{
|
||||
struct perf_cpu_context *cpuctx;
|
||||
enum hrtimer_restart ret = HRTIMER_NORESTART;
|
||||
int rotations = 0;
|
||||
|
||||
WARN_ON(!irqs_disabled());
|
||||
|
||||
cpuctx = container_of(hr, struct perf_cpu_context, hrtimer);
|
||||
|
||||
rotations = perf_rotate_context(cpuctx);
|
||||
|
||||
/*
|
||||
* arm timer if needed
|
||||
*/
|
||||
if (rotations) {
|
||||
raw_spin_lock(&cpuctx->hrtimer_lock);
|
||||
if (rotations)
|
||||
hrtimer_forward_now(hr, cpuctx->hrtimer_interval);
|
||||
ret = HRTIMER_RESTART;
|
||||
}
|
||||
else
|
||||
cpuctx->hrtimer_active = 0;
|
||||
raw_spin_unlock(&cpuctx->hrtimer_lock);
|
||||
|
||||
return ret;
|
||||
return rotations ? HRTIMER_RESTART : HRTIMER_NORESTART;
|
||||
}
|
||||
|
||||
/* CPU is going down */
|
||||
void perf_cpu_hrtimer_cancel(int cpu)
|
||||
static void __perf_mux_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu)
|
||||
{
|
||||
struct perf_cpu_context *cpuctx;
|
||||
struct pmu *pmu;
|
||||
unsigned long flags;
|
||||
|
||||
if (WARN_ON(cpu != smp_processor_id()))
|
||||
return;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
list_for_each_entry_rcu(pmu, &pmus, entry) {
|
||||
cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
|
||||
|
||||
if (pmu->task_ctx_nr == perf_sw_context)
|
||||
continue;
|
||||
|
||||
hrtimer_cancel(&cpuctx->hrtimer);
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static void __perf_cpu_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu)
|
||||
{
|
||||
struct hrtimer *hr = &cpuctx->hrtimer;
|
||||
struct hrtimer *timer = &cpuctx->hrtimer;
|
||||
struct pmu *pmu = cpuctx->ctx.pmu;
|
||||
int timer;
|
||||
u64 interval;
|
||||
|
||||
/* no multiplexing needed for SW PMU */
|
||||
if (pmu->task_ctx_nr == perf_sw_context)
|
||||
|
|
@ -812,31 +783,36 @@ static void __perf_cpu_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu)
|
|||
* check default is sane, if not set then force to
|
||||
* default interval (1/tick)
|
||||
*/
|
||||
timer = pmu->hrtimer_interval_ms;
|
||||
if (timer < 1)
|
||||
timer = pmu->hrtimer_interval_ms = PERF_CPU_HRTIMER;
|
||||
interval = pmu->hrtimer_interval_ms;
|
||||
if (interval < 1)
|
||||
interval = pmu->hrtimer_interval_ms = PERF_CPU_HRTIMER;
|
||||
|
||||
cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer);
|
||||
cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * interval);
|
||||
|
||||
hrtimer_init(hr, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
|
||||
hr->function = perf_cpu_hrtimer_handler;
|
||||
raw_spin_lock_init(&cpuctx->hrtimer_lock);
|
||||
hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
|
||||
timer->function = perf_mux_hrtimer_handler;
|
||||
}
|
||||
|
||||
static void perf_cpu_hrtimer_restart(struct perf_cpu_context *cpuctx)
|
||||
static int perf_mux_hrtimer_restart(struct perf_cpu_context *cpuctx)
|
||||
{
|
||||
struct hrtimer *hr = &cpuctx->hrtimer;
|
||||
struct hrtimer *timer = &cpuctx->hrtimer;
|
||||
struct pmu *pmu = cpuctx->ctx.pmu;
|
||||
unsigned long flags;
|
||||
|
||||
/* not for SW PMU */
|
||||
if (pmu->task_ctx_nr == perf_sw_context)
|
||||
return;
|
||||
return 0;
|
||||
|
||||
if (hrtimer_active(hr))
|
||||
return;
|
||||
raw_spin_lock_irqsave(&cpuctx->hrtimer_lock, flags);
|
||||
if (!cpuctx->hrtimer_active) {
|
||||
cpuctx->hrtimer_active = 1;
|
||||
hrtimer_forward_now(timer, cpuctx->hrtimer_interval);
|
||||
hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED);
|
||||
}
|
||||
raw_spin_unlock_irqrestore(&cpuctx->hrtimer_lock, flags);
|
||||
|
||||
if (!hrtimer_callback_running(hr))
|
||||
__hrtimer_start_range_ns(hr, cpuctx->hrtimer_interval,
|
||||
0, HRTIMER_MODE_REL_PINNED, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void perf_pmu_disable(struct pmu *pmu)
|
||||
|
|
@ -1935,7 +1911,7 @@ group_sched_in(struct perf_event *group_event,
|
|||
|
||||
if (event_sched_in(group_event, cpuctx, ctx)) {
|
||||
pmu->cancel_txn(pmu);
|
||||
perf_cpu_hrtimer_restart(cpuctx);
|
||||
perf_mux_hrtimer_restart(cpuctx);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
|
|
@ -1982,7 +1958,7 @@ group_error:
|
|||
|
||||
pmu->cancel_txn(pmu);
|
||||
|
||||
perf_cpu_hrtimer_restart(cpuctx);
|
||||
perf_mux_hrtimer_restart(cpuctx);
|
||||
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
|
@ -2255,7 +2231,7 @@ static int __perf_event_enable(void *info)
|
|||
*/
|
||||
if (leader != event) {
|
||||
group_sched_out(leader, cpuctx, ctx);
|
||||
perf_cpu_hrtimer_restart(cpuctx);
|
||||
perf_mux_hrtimer_restart(cpuctx);
|
||||
}
|
||||
if (leader->attr.pinned) {
|
||||
update_group_times(leader);
|
||||
|
|
@ -6863,9 +6839,8 @@ static void perf_swevent_start_hrtimer(struct perf_event *event)
|
|||
} else {
|
||||
period = max_t(u64, 10000, hwc->sample_period);
|
||||
}
|
||||
__hrtimer_start_range_ns(&hwc->hrtimer,
|
||||
ns_to_ktime(period), 0,
|
||||
HRTIMER_MODE_REL_PINNED, 0);
|
||||
hrtimer_start(&hwc->hrtimer, ns_to_ktime(period),
|
||||
HRTIMER_MODE_REL_PINNED);
|
||||
}
|
||||
|
||||
static void perf_swevent_cancel_hrtimer(struct perf_event *event)
|
||||
|
|
@ -7166,6 +7141,8 @@ perf_event_mux_interval_ms_show(struct device *dev,
|
|||
return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->hrtimer_interval_ms);
|
||||
}
|
||||
|
||||
static DEFINE_MUTEX(mux_interval_mutex);
|
||||
|
||||
static ssize_t
|
||||
perf_event_mux_interval_ms_store(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
|
|
@ -7185,17 +7162,21 @@ perf_event_mux_interval_ms_store(struct device *dev,
|
|||
if (timer == pmu->hrtimer_interval_ms)
|
||||
return count;
|
||||
|
||||
mutex_lock(&mux_interval_mutex);
|
||||
pmu->hrtimer_interval_ms = timer;
|
||||
|
||||
/* update all cpuctx for this PMU */
|
||||
for_each_possible_cpu(cpu) {
|
||||
get_online_cpus();
|
||||
for_each_online_cpu(cpu) {
|
||||
struct perf_cpu_context *cpuctx;
|
||||
cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
|
||||
cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer);
|
||||
|
||||
if (hrtimer_active(&cpuctx->hrtimer))
|
||||
hrtimer_forward_now(&cpuctx->hrtimer, cpuctx->hrtimer_interval);
|
||||
cpu_function_call(cpu,
|
||||
(remote_function_f)perf_mux_hrtimer_restart, cpuctx);
|
||||
}
|
||||
put_online_cpus();
|
||||
mutex_unlock(&mux_interval_mutex);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
|
@ -7300,7 +7281,7 @@ skip_type:
|
|||
lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock);
|
||||
cpuctx->ctx.pmu = pmu;
|
||||
|
||||
__perf_cpu_hrtimer_init(cpuctx, cpu);
|
||||
__perf_mux_hrtimer_init(cpuctx, cpu);
|
||||
|
||||
cpuctx->unique_pmu = pmu;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2064,11 +2064,8 @@ static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
|
|||
queue_me(q, hb);
|
||||
|
||||
/* Arm the timer */
|
||||
if (timeout) {
|
||||
if (timeout)
|
||||
hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
|
||||
if (!hrtimer_active(&timeout->timer))
|
||||
timeout->task = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we have been removed from the hash list, then another task
|
||||
|
|
|
|||
|
|
@ -1182,11 +1182,8 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
|
|||
set_current_state(state);
|
||||
|
||||
/* Setup the timer, when timeout != NULL */
|
||||
if (unlikely(timeout)) {
|
||||
if (unlikely(timeout))
|
||||
hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
|
||||
if (!hrtimer_active(&timeout->timer))
|
||||
timeout->task = NULL;
|
||||
}
|
||||
|
||||
ret = task_blocks_on_rt_mutex(lock, &waiter, current, chwalk);
|
||||
|
||||
|
|
|
|||
|
|
@ -1368,9 +1368,9 @@ static void rcu_prepare_kthreads(int cpu)
|
|||
* any flavor of RCU.
|
||||
*/
|
||||
#ifndef CONFIG_RCU_NOCB_CPU_ALL
|
||||
int rcu_needs_cpu(unsigned long *delta_jiffies)
|
||||
int rcu_needs_cpu(u64 basemono, u64 *nextevt)
|
||||
{
|
||||
*delta_jiffies = ULONG_MAX;
|
||||
*nextevt = KTIME_MAX;
|
||||
return rcu_cpu_has_callbacks(NULL);
|
||||
}
|
||||
#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
|
||||
|
|
@ -1481,16 +1481,17 @@ static bool __maybe_unused rcu_try_advance_all_cbs(void)
|
|||
* The caller must have disabled interrupts.
|
||||
*/
|
||||
#ifndef CONFIG_RCU_NOCB_CPU_ALL
|
||||
int rcu_needs_cpu(unsigned long *dj)
|
||||
int rcu_needs_cpu(u64 basemono, u64 *nextevt)
|
||||
{
|
||||
struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
|
||||
unsigned long dj;
|
||||
|
||||
/* Snapshot to detect later posting of non-lazy callback. */
|
||||
rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted;
|
||||
|
||||
/* If no callbacks, RCU doesn't need the CPU. */
|
||||
if (!rcu_cpu_has_callbacks(&rdtp->all_lazy)) {
|
||||
*dj = ULONG_MAX;
|
||||
*nextevt = KTIME_MAX;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
@ -1504,11 +1505,12 @@ int rcu_needs_cpu(unsigned long *dj)
|
|||
|
||||
/* Request timer delay depending on laziness, and round. */
|
||||
if (!rdtp->all_lazy) {
|
||||
*dj = round_up(rcu_idle_gp_delay + jiffies,
|
||||
dj = round_up(rcu_idle_gp_delay + jiffies,
|
||||
rcu_idle_gp_delay) - jiffies;
|
||||
} else {
|
||||
*dj = round_jiffies(rcu_idle_lazy_gp_delay + jiffies) - jiffies;
|
||||
dj = round_jiffies(rcu_idle_lazy_gp_delay + jiffies) - jiffies;
|
||||
}
|
||||
*nextevt = basemono + dj * TICK_NSEC;
|
||||
return 0;
|
||||
}
|
||||
#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
|
||||
|
|
|
|||
|
|
@ -90,26 +90,6 @@
|
|||
#define CREATE_TRACE_POINTS
|
||||
#include <trace/events/sched.h>
|
||||
|
||||
void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period)
|
||||
{
|
||||
unsigned long delta;
|
||||
ktime_t soft, hard, now;
|
||||
|
||||
for (;;) {
|
||||
if (hrtimer_active(period_timer))
|
||||
break;
|
||||
|
||||
now = hrtimer_cb_get_time(period_timer);
|
||||
hrtimer_forward(period_timer, now, period);
|
||||
|
||||
soft = hrtimer_get_softexpires(period_timer);
|
||||
hard = hrtimer_get_expires(period_timer);
|
||||
delta = ktime_to_ns(ktime_sub(hard, soft));
|
||||
__hrtimer_start_range_ns(period_timer, soft, delta,
|
||||
HRTIMER_MODE_ABS_PINNED, 0);
|
||||
}
|
||||
}
|
||||
|
||||
DEFINE_MUTEX(sched_domains_mutex);
|
||||
DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
|
||||
|
||||
|
|
@ -355,12 +335,11 @@ static enum hrtimer_restart hrtick(struct hrtimer *timer)
|
|||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
static int __hrtick_restart(struct rq *rq)
|
||||
static void __hrtick_restart(struct rq *rq)
|
||||
{
|
||||
struct hrtimer *timer = &rq->hrtick_timer;
|
||||
ktime_t time = hrtimer_get_softexpires(timer);
|
||||
|
||||
return __hrtimer_start_range_ns(timer, time, 0, HRTIMER_MODE_ABS_PINNED, 0);
|
||||
hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -440,8 +419,8 @@ void hrtick_start(struct rq *rq, u64 delay)
|
|||
* doesn't make sense. Rely on vruntime for fairness.
|
||||
*/
|
||||
delay = max_t(u64, delay, 10000LL);
|
||||
__hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0,
|
||||
HRTIMER_MODE_REL_PINNED, 0);
|
||||
hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay),
|
||||
HRTIMER_MODE_REL_PINNED);
|
||||
}
|
||||
|
||||
static inline void init_hrtick(void)
|
||||
|
|
@ -8146,10 +8125,8 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
|
|||
|
||||
__refill_cfs_bandwidth_runtime(cfs_b);
|
||||
/* restart the period timer (if active) to handle new period expiry */
|
||||
if (runtime_enabled && cfs_b->timer_active) {
|
||||
/* force a reprogram */
|
||||
__start_cfs_bandwidth(cfs_b, true);
|
||||
}
|
||||
if (runtime_enabled)
|
||||
start_cfs_bandwidth(cfs_b);
|
||||
raw_spin_unlock_irq(&cfs_b->lock);
|
||||
|
||||
for_each_online_cpu(i) {
|
||||
|
|
|
|||
|
|
@ -503,8 +503,6 @@ static int start_dl_timer(struct sched_dl_entity *dl_se, bool boosted)
|
|||
struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
|
||||
struct rq *rq = rq_of_dl_rq(dl_rq);
|
||||
ktime_t now, act;
|
||||
ktime_t soft, hard;
|
||||
unsigned long range;
|
||||
s64 delta;
|
||||
|
||||
if (boosted)
|
||||
|
|
@ -527,15 +525,9 @@ static int start_dl_timer(struct sched_dl_entity *dl_se, bool boosted)
|
|||
if (ktime_us_delta(act, now) < 0)
|
||||
return 0;
|
||||
|
||||
hrtimer_set_expires(&dl_se->dl_timer, act);
|
||||
hrtimer_start(&dl_se->dl_timer, act, HRTIMER_MODE_ABS);
|
||||
|
||||
soft = hrtimer_get_softexpires(&dl_se->dl_timer);
|
||||
hard = hrtimer_get_expires(&dl_se->dl_timer);
|
||||
range = ktime_to_ns(ktime_sub(hard, soft));
|
||||
__hrtimer_start_range_ns(&dl_se->dl_timer, soft,
|
||||
range, HRTIMER_MODE_ABS, 0);
|
||||
|
||||
return hrtimer_active(&dl_se->dl_timer);
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -230,8 +230,6 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
|
|||
#endif
|
||||
#endif
|
||||
#ifdef CONFIG_CFS_BANDWIDTH
|
||||
SEQ_printf(m, " .%-30s: %d\n", "tg->cfs_bandwidth.timer_active",
|
||||
cfs_rq->tg->cfs_bandwidth.timer_active);
|
||||
SEQ_printf(m, " .%-30s: %d\n", "throttled",
|
||||
cfs_rq->throttled);
|
||||
SEQ_printf(m, " .%-30s: %d\n", "throttle_count",
|
||||
|
|
|
|||
|
|
@ -3504,16 +3504,7 @@ static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
|
|||
if (cfs_b->quota == RUNTIME_INF)
|
||||
amount = min_amount;
|
||||
else {
|
||||
/*
|
||||
* If the bandwidth pool has become inactive, then at least one
|
||||
* period must have elapsed since the last consumption.
|
||||
* Refresh the global state and ensure bandwidth timer becomes
|
||||
* active.
|
||||
*/
|
||||
if (!cfs_b->timer_active) {
|
||||
__refill_cfs_bandwidth_runtime(cfs_b);
|
||||
__start_cfs_bandwidth(cfs_b, false);
|
||||
}
|
||||
start_cfs_bandwidth(cfs_b);
|
||||
|
||||
if (cfs_b->runtime > 0) {
|
||||
amount = min(cfs_b->runtime, min_amount);
|
||||
|
|
@ -3662,6 +3653,7 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
|
|||
struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
|
||||
struct sched_entity *se;
|
||||
long task_delta, dequeue = 1;
|
||||
bool empty;
|
||||
|
||||
se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
|
||||
|
||||
|
|
@ -3691,13 +3683,21 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
|
|||
cfs_rq->throttled = 1;
|
||||
cfs_rq->throttled_clock = rq_clock(rq);
|
||||
raw_spin_lock(&cfs_b->lock);
|
||||
empty = list_empty(&cfs_rq->throttled_list);
|
||||
|
||||
/*
|
||||
* Add to the _head_ of the list, so that an already-started
|
||||
* distribute_cfs_runtime will not see us
|
||||
*/
|
||||
list_add_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
|
||||
if (!cfs_b->timer_active)
|
||||
__start_cfs_bandwidth(cfs_b, false);
|
||||
|
||||
/*
|
||||
* If we're the first throttled task, make sure the bandwidth
|
||||
* timer is running.
|
||||
*/
|
||||
if (empty)
|
||||
start_cfs_bandwidth(cfs_b);
|
||||
|
||||
raw_spin_unlock(&cfs_b->lock);
|
||||
}
|
||||
|
||||
|
|
@ -3812,13 +3812,6 @@ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
|
|||
if (cfs_b->idle && !throttled)
|
||||
goto out_deactivate;
|
||||
|
||||
/*
|
||||
* if we have relooped after returning idle once, we need to update our
|
||||
* status as actually running, so that other cpus doing
|
||||
* __start_cfs_bandwidth will stop trying to cancel us.
|
||||
*/
|
||||
cfs_b->timer_active = 1;
|
||||
|
||||
__refill_cfs_bandwidth_runtime(cfs_b);
|
||||
|
||||
if (!throttled) {
|
||||
|
|
@ -3863,7 +3856,6 @@ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
|
|||
return 0;
|
||||
|
||||
out_deactivate:
|
||||
cfs_b->timer_active = 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
|
@ -3878,7 +3870,7 @@ static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC;
|
|||
* Are we near the end of the current quota period?
|
||||
*
|
||||
* Requires cfs_b->lock for hrtimer_expires_remaining to be safe against the
|
||||
* hrtimer base being cleared by __hrtimer_start_range_ns. In the case of
|
||||
* hrtimer base being cleared by hrtimer_start. In the case of
|
||||
* migrate_hrtimers, base is never cleared, so we are fine.
|
||||
*/
|
||||
static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire)
|
||||
|
|
@ -3906,8 +3898,9 @@ static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b)
|
|||
if (runtime_refresh_within(cfs_b, min_left))
|
||||
return;
|
||||
|
||||
start_bandwidth_timer(&cfs_b->slack_timer,
|
||||
ns_to_ktime(cfs_bandwidth_slack_period));
|
||||
hrtimer_start(&cfs_b->slack_timer,
|
||||
ns_to_ktime(cfs_bandwidth_slack_period),
|
||||
HRTIMER_MODE_REL);
|
||||
}
|
||||
|
||||
/* we know any runtime found here is valid as update_curr() precedes return */
|
||||
|
|
@ -4027,6 +4020,7 @@ static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
|
|||
{
|
||||
struct cfs_bandwidth *cfs_b =
|
||||
container_of(timer, struct cfs_bandwidth, slack_timer);
|
||||
|
||||
do_sched_cfs_slack_timer(cfs_b);
|
||||
|
||||
return HRTIMER_NORESTART;
|
||||
|
|
@ -4036,20 +4030,19 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
|
|||
{
|
||||
struct cfs_bandwidth *cfs_b =
|
||||
container_of(timer, struct cfs_bandwidth, period_timer);
|
||||
ktime_t now;
|
||||
int overrun;
|
||||
int idle = 0;
|
||||
|
||||
raw_spin_lock(&cfs_b->lock);
|
||||
for (;;) {
|
||||
now = hrtimer_cb_get_time(timer);
|
||||
overrun = hrtimer_forward(timer, now, cfs_b->period);
|
||||
|
||||
overrun = hrtimer_forward_now(timer, cfs_b->period);
|
||||
if (!overrun)
|
||||
break;
|
||||
|
||||
idle = do_sched_cfs_period_timer(cfs_b, overrun);
|
||||
}
|
||||
if (idle)
|
||||
cfs_b->period_active = 0;
|
||||
raw_spin_unlock(&cfs_b->lock);
|
||||
|
||||
return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
|
||||
|
|
@ -4063,7 +4056,7 @@ void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
|
|||
cfs_b->period = ns_to_ktime(default_cfs_period());
|
||||
|
||||
INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
|
||||
hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
||||
hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
|
||||
cfs_b->period_timer.function = sched_cfs_period_timer;
|
||||
hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
||||
cfs_b->slack_timer.function = sched_cfs_slack_timer;
|
||||
|
|
@ -4075,28 +4068,15 @@ static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
|
|||
INIT_LIST_HEAD(&cfs_rq->throttled_list);
|
||||
}
|
||||
|
||||
/* requires cfs_b->lock, may release to reprogram timer */
|
||||
void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b, bool force)
|
||||
void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
|
||||
{
|
||||
/*
|
||||
* The timer may be active because we're trying to set a new bandwidth
|
||||
* period or because we're racing with the tear-down path
|
||||
* (timer_active==0 becomes visible before the hrtimer call-back
|
||||
* terminates). In either case we ensure that it's re-programmed
|
||||
*/
|
||||
while (unlikely(hrtimer_active(&cfs_b->period_timer)) &&
|
||||
hrtimer_try_to_cancel(&cfs_b->period_timer) < 0) {
|
||||
/* bounce the lock to allow do_sched_cfs_period_timer to run */
|
||||
raw_spin_unlock(&cfs_b->lock);
|
||||
cpu_relax();
|
||||
raw_spin_lock(&cfs_b->lock);
|
||||
/* if someone else restarted the timer then we're done */
|
||||
if (!force && cfs_b->timer_active)
|
||||
return;
|
||||
}
|
||||
lockdep_assert_held(&cfs_b->lock);
|
||||
|
||||
cfs_b->timer_active = 1;
|
||||
start_bandwidth_timer(&cfs_b->period_timer, cfs_b->period);
|
||||
if (!cfs_b->period_active) {
|
||||
cfs_b->period_active = 1;
|
||||
hrtimer_forward_now(&cfs_b->period_timer, cfs_b->period);
|
||||
hrtimer_start_expires(&cfs_b->period_timer, HRTIMER_MODE_ABS_PINNED);
|
||||
}
|
||||
}
|
||||
|
||||
static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
|
||||
|
|
|
|||
|
|
@ -18,19 +18,22 @@ static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
|
|||
{
|
||||
struct rt_bandwidth *rt_b =
|
||||
container_of(timer, struct rt_bandwidth, rt_period_timer);
|
||||
ktime_t now;
|
||||
int overrun;
|
||||
int idle = 0;
|
||||
int overrun;
|
||||
|
||||
raw_spin_lock(&rt_b->rt_runtime_lock);
|
||||
for (;;) {
|
||||
now = hrtimer_cb_get_time(timer);
|
||||
overrun = hrtimer_forward(timer, now, rt_b->rt_period);
|
||||
|
||||
overrun = hrtimer_forward_now(timer, rt_b->rt_period);
|
||||
if (!overrun)
|
||||
break;
|
||||
|
||||
raw_spin_unlock(&rt_b->rt_runtime_lock);
|
||||
idle = do_sched_rt_period_timer(rt_b, overrun);
|
||||
raw_spin_lock(&rt_b->rt_runtime_lock);
|
||||
}
|
||||
if (idle)
|
||||
rt_b->rt_period_active = 0;
|
||||
raw_spin_unlock(&rt_b->rt_runtime_lock);
|
||||
|
||||
return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
|
||||
}
|
||||
|
|
@ -52,11 +55,12 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
|
|||
if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
|
||||
return;
|
||||
|
||||
if (hrtimer_active(&rt_b->rt_period_timer))
|
||||
return;
|
||||
|
||||
raw_spin_lock(&rt_b->rt_runtime_lock);
|
||||
start_bandwidth_timer(&rt_b->rt_period_timer, rt_b->rt_period);
|
||||
if (!rt_b->rt_period_active) {
|
||||
rt_b->rt_period_active = 1;
|
||||
hrtimer_forward_now(&rt_b->rt_period_timer, rt_b->rt_period);
|
||||
hrtimer_start_expires(&rt_b->rt_period_timer, HRTIMER_MODE_ABS_PINNED);
|
||||
}
|
||||
raw_spin_unlock(&rt_b->rt_runtime_lock);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -137,6 +137,7 @@ struct rt_bandwidth {
|
|||
ktime_t rt_period;
|
||||
u64 rt_runtime;
|
||||
struct hrtimer rt_period_timer;
|
||||
unsigned int rt_period_active;
|
||||
};
|
||||
|
||||
void __dl_clear_params(struct task_struct *p);
|
||||
|
|
@ -221,7 +222,7 @@ struct cfs_bandwidth {
|
|||
s64 hierarchical_quota;
|
||||
u64 runtime_expires;
|
||||
|
||||
int idle, timer_active;
|
||||
int idle, period_active;
|
||||
struct hrtimer period_timer, slack_timer;
|
||||
struct list_head throttled_cfs_rq;
|
||||
|
||||
|
|
@ -312,7 +313,7 @@ extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
|
|||
extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
|
||||
|
||||
extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b);
|
||||
extern void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b, bool force);
|
||||
extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
|
||||
extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq);
|
||||
|
||||
extern void free_rt_sched_group(struct task_group *tg);
|
||||
|
|
@ -1410,8 +1411,6 @@ static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { }
|
|||
static inline void sched_avg_update(struct rq *rq) { }
|
||||
#endif
|
||||
|
||||
extern void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period);
|
||||
|
||||
/*
|
||||
* __task_rq_lock - lock the rq @p resides on.
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -13,19 +13,4 @@ obj-$(CONFIG_TIMER_STATS) += timer_stats.o
|
|||
obj-$(CONFIG_DEBUG_FS) += timekeeping_debug.o
|
||||
obj-$(CONFIG_TEST_UDELAY) += test_udelay.o
|
||||
|
||||
$(obj)/time.o: $(obj)/timeconst.h
|
||||
|
||||
quiet_cmd_hzfile = HZFILE $@
|
||||
cmd_hzfile = echo "hz=$(CONFIG_HZ)" > $@
|
||||
|
||||
targets += hz.bc
|
||||
$(obj)/hz.bc: $(objtree)/include/config/hz.h FORCE
|
||||
$(call if_changed,hzfile)
|
||||
|
||||
quiet_cmd_bc = BC $@
|
||||
cmd_bc = bc -q $(filter-out FORCE,$^) > $@
|
||||
|
||||
targets += timeconst.h
|
||||
$(obj)/timeconst.h: $(obj)/hz.bc $(src)/timeconst.bc FORCE
|
||||
$(call if_changed,bc)
|
||||
|
||||
$(obj)/time.o: $(objtree)/include/config/
|
||||
|
|
|
|||
|
|
@ -317,19 +317,16 @@ EXPORT_SYMBOL_GPL(alarm_init);
|
|||
* @alarm: ptr to alarm to set
|
||||
* @start: time to run the alarm
|
||||
*/
|
||||
int alarm_start(struct alarm *alarm, ktime_t start)
|
||||
void alarm_start(struct alarm *alarm, ktime_t start)
|
||||
{
|
||||
struct alarm_base *base = &alarm_bases[alarm->type];
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
spin_lock_irqsave(&base->lock, flags);
|
||||
alarm->node.expires = start;
|
||||
alarmtimer_enqueue(base, alarm);
|
||||
ret = hrtimer_start(&alarm->timer, alarm->node.expires,
|
||||
HRTIMER_MODE_ABS);
|
||||
hrtimer_start(&alarm->timer, alarm->node.expires, HRTIMER_MODE_ABS);
|
||||
spin_unlock_irqrestore(&base->lock, flags);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(alarm_start);
|
||||
|
||||
|
|
@ -338,12 +335,12 @@ EXPORT_SYMBOL_GPL(alarm_start);
|
|||
* @alarm: ptr to alarm to set
|
||||
* @start: time relative to now to run the alarm
|
||||
*/
|
||||
int alarm_start_relative(struct alarm *alarm, ktime_t start)
|
||||
void alarm_start_relative(struct alarm *alarm, ktime_t start)
|
||||
{
|
||||
struct alarm_base *base = &alarm_bases[alarm->type];
|
||||
|
||||
start = ktime_add(start, base->gettime());
|
||||
return alarm_start(alarm, start);
|
||||
alarm_start(alarm, start);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(alarm_start_relative);
|
||||
|
||||
|
|
@ -495,12 +492,12 @@ static enum alarmtimer_restart alarm_handle_timer(struct alarm *alarm,
|
|||
*/
|
||||
static int alarm_clock_getres(const clockid_t which_clock, struct timespec *tp)
|
||||
{
|
||||
clockid_t baseid = alarm_bases[clock2alarm(which_clock)].base_clockid;
|
||||
|
||||
if (!alarmtimer_get_rtcdev())
|
||||
return -EINVAL;
|
||||
|
||||
return hrtimer_get_res(baseid, tp);
|
||||
tp->tv_sec = 0;
|
||||
tp->tv_nsec = hrtimer_resolution;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -94,8 +94,8 @@ u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(clockevent_delta2ns);
|
||||
|
||||
static int __clockevents_set_state(struct clock_event_device *dev,
|
||||
enum clock_event_state state)
|
||||
static int __clockevents_switch_state(struct clock_event_device *dev,
|
||||
enum clock_event_state state)
|
||||
{
|
||||
/* Transition with legacy set_mode() callback */
|
||||
if (dev->set_mode) {
|
||||
|
|
@ -134,32 +134,44 @@ static int __clockevents_set_state(struct clock_event_device *dev,
|
|||
return -ENOSYS;
|
||||
return dev->set_state_oneshot(dev);
|
||||
|
||||
case CLOCK_EVT_STATE_ONESHOT_STOPPED:
|
||||
/* Core internal bug */
|
||||
if (WARN_ONCE(!clockevent_state_oneshot(dev),
|
||||
"Current state: %d\n",
|
||||
clockevent_get_state(dev)))
|
||||
return -EINVAL;
|
||||
|
||||
if (dev->set_state_oneshot_stopped)
|
||||
return dev->set_state_oneshot_stopped(dev);
|
||||
else
|
||||
return -ENOSYS;
|
||||
|
||||
default:
|
||||
return -ENOSYS;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* clockevents_set_state - set the operating state of a clock event device
|
||||
* clockevents_switch_state - set the operating state of a clock event device
|
||||
* @dev: device to modify
|
||||
* @state: new state
|
||||
*
|
||||
* Must be called with interrupts disabled !
|
||||
*/
|
||||
void clockevents_set_state(struct clock_event_device *dev,
|
||||
enum clock_event_state state)
|
||||
void clockevents_switch_state(struct clock_event_device *dev,
|
||||
enum clock_event_state state)
|
||||
{
|
||||
if (dev->state != state) {
|
||||
if (__clockevents_set_state(dev, state))
|
||||
if (clockevent_get_state(dev) != state) {
|
||||
if (__clockevents_switch_state(dev, state))
|
||||
return;
|
||||
|
||||
dev->state = state;
|
||||
clockevent_set_state(dev, state);
|
||||
|
||||
/*
|
||||
* A nsec2cyc multiplicator of 0 is invalid and we'd crash
|
||||
* on it, so fix it up and emit a warning:
|
||||
*/
|
||||
if (state == CLOCK_EVT_STATE_ONESHOT) {
|
||||
if (clockevent_state_oneshot(dev)) {
|
||||
if (unlikely(!dev->mult)) {
|
||||
dev->mult = 1;
|
||||
WARN_ON(1);
|
||||
|
|
@ -174,7 +186,7 @@ void clockevents_set_state(struct clock_event_device *dev,
|
|||
*/
|
||||
void clockevents_shutdown(struct clock_event_device *dev)
|
||||
{
|
||||
clockevents_set_state(dev, CLOCK_EVT_STATE_SHUTDOWN);
|
||||
clockevents_switch_state(dev, CLOCK_EVT_STATE_SHUTDOWN);
|
||||
dev->next_event.tv64 = KTIME_MAX;
|
||||
}
|
||||
|
||||
|
|
@ -248,7 +260,7 @@ static int clockevents_program_min_delta(struct clock_event_device *dev)
|
|||
delta = dev->min_delta_ns;
|
||||
dev->next_event = ktime_add_ns(ktime_get(), delta);
|
||||
|
||||
if (dev->state == CLOCK_EVT_STATE_SHUTDOWN)
|
||||
if (clockevent_state_shutdown(dev))
|
||||
return 0;
|
||||
|
||||
dev->retries++;
|
||||
|
|
@ -285,7 +297,7 @@ static int clockevents_program_min_delta(struct clock_event_device *dev)
|
|||
delta = dev->min_delta_ns;
|
||||
dev->next_event = ktime_add_ns(ktime_get(), delta);
|
||||
|
||||
if (dev->state == CLOCK_EVT_STATE_SHUTDOWN)
|
||||
if (clockevent_state_shutdown(dev))
|
||||
return 0;
|
||||
|
||||
dev->retries++;
|
||||
|
|
@ -317,9 +329,13 @@ int clockevents_program_event(struct clock_event_device *dev, ktime_t expires,
|
|||
|
||||
dev->next_event = expires;
|
||||
|
||||
if (dev->state == CLOCK_EVT_STATE_SHUTDOWN)
|
||||
if (clockevent_state_shutdown(dev))
|
||||
return 0;
|
||||
|
||||
/* We must be in ONESHOT state here */
|
||||
WARN_ONCE(!clockevent_state_oneshot(dev), "Current state: %d\n",
|
||||
clockevent_get_state(dev));
|
||||
|
||||
/* Shortcut for clockevent devices that can deal with ktime. */
|
||||
if (dev->features & CLOCK_EVT_FEAT_KTIME)
|
||||
return dev->set_next_ktime(expires, dev);
|
||||
|
|
@ -362,7 +378,7 @@ static int clockevents_replace(struct clock_event_device *ced)
|
|||
struct clock_event_device *dev, *newdev = NULL;
|
||||
|
||||
list_for_each_entry(dev, &clockevent_devices, list) {
|
||||
if (dev == ced || dev->state != CLOCK_EVT_STATE_DETACHED)
|
||||
if (dev == ced || !clockevent_state_detached(dev))
|
||||
continue;
|
||||
|
||||
if (!tick_check_replacement(newdev, dev))
|
||||
|
|
@ -388,7 +404,7 @@ static int clockevents_replace(struct clock_event_device *ced)
|
|||
static int __clockevents_try_unbind(struct clock_event_device *ced, int cpu)
|
||||
{
|
||||
/* Fast track. Device is unused */
|
||||
if (ced->state == CLOCK_EVT_STATE_DETACHED) {
|
||||
if (clockevent_state_detached(ced)) {
|
||||
list_del_init(&ced->list);
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -445,7 +461,8 @@ static int clockevents_sanity_check(struct clock_event_device *dev)
|
|||
if (dev->set_mode) {
|
||||
/* We shouldn't be supporting new modes now */
|
||||
WARN_ON(dev->set_state_periodic || dev->set_state_oneshot ||
|
||||
dev->set_state_shutdown || dev->tick_resume);
|
||||
dev->set_state_shutdown || dev->tick_resume ||
|
||||
dev->set_state_oneshot_stopped);
|
||||
|
||||
BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED);
|
||||
return 0;
|
||||
|
|
@ -480,7 +497,7 @@ void clockevents_register_device(struct clock_event_device *dev)
|
|||
BUG_ON(clockevents_sanity_check(dev));
|
||||
|
||||
/* Initialize state to DETACHED */
|
||||
dev->state = CLOCK_EVT_STATE_DETACHED;
|
||||
clockevent_set_state(dev, CLOCK_EVT_STATE_DETACHED);
|
||||
|
||||
if (!dev->cpumask) {
|
||||
WARN_ON(num_possible_cpus() > 1);
|
||||
|
|
@ -545,11 +562,11 @@ int __clockevents_update_freq(struct clock_event_device *dev, u32 freq)
|
|||
{
|
||||
clockevents_config(dev, freq);
|
||||
|
||||
if (dev->state == CLOCK_EVT_STATE_ONESHOT)
|
||||
if (clockevent_state_oneshot(dev))
|
||||
return clockevents_program_event(dev, dev->next_event, false);
|
||||
|
||||
if (dev->state == CLOCK_EVT_STATE_PERIODIC)
|
||||
return __clockevents_set_state(dev, CLOCK_EVT_STATE_PERIODIC);
|
||||
if (clockevent_state_periodic(dev))
|
||||
return __clockevents_switch_state(dev, CLOCK_EVT_STATE_PERIODIC);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -603,13 +620,13 @@ void clockevents_exchange_device(struct clock_event_device *old,
|
|||
*/
|
||||
if (old) {
|
||||
module_put(old->owner);
|
||||
clockevents_set_state(old, CLOCK_EVT_STATE_DETACHED);
|
||||
clockevents_switch_state(old, CLOCK_EVT_STATE_DETACHED);
|
||||
list_del(&old->list);
|
||||
list_add(&old->list, &clockevents_released);
|
||||
}
|
||||
|
||||
if (new) {
|
||||
BUG_ON(new->state != CLOCK_EVT_STATE_DETACHED);
|
||||
BUG_ON(!clockevent_state_detached(new));
|
||||
clockevents_shutdown(new);
|
||||
}
|
||||
}
|
||||
|
|
@ -622,7 +639,7 @@ void clockevents_suspend(void)
|
|||
struct clock_event_device *dev;
|
||||
|
||||
list_for_each_entry_reverse(dev, &clockevent_devices, list)
|
||||
if (dev->suspend)
|
||||
if (dev->suspend && !clockevent_state_detached(dev))
|
||||
dev->suspend(dev);
|
||||
}
|
||||
|
||||
|
|
@ -634,7 +651,7 @@ void clockevents_resume(void)
|
|||
struct clock_event_device *dev;
|
||||
|
||||
list_for_each_entry(dev, &clockevent_devices, list)
|
||||
if (dev->resume)
|
||||
if (dev->resume && !clockevent_state_detached(dev))
|
||||
dev->resume(dev);
|
||||
}
|
||||
|
||||
|
|
@ -665,7 +682,7 @@ void tick_cleanup_dead_cpu(int cpu)
|
|||
if (cpumask_test_cpu(cpu, dev->cpumask) &&
|
||||
cpumask_weight(dev->cpumask) == 1 &&
|
||||
!tick_is_broadcast_device(dev)) {
|
||||
BUG_ON(dev->state != CLOCK_EVT_STATE_DETACHED);
|
||||
BUG_ON(!clockevent_state_detached(dev));
|
||||
list_del(&dev->list);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -23,6 +23,8 @@
|
|||
* o Allow clocksource drivers to be unregistered
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/device.h>
|
||||
#include <linux/clocksource.h>
|
||||
#include <linux/init.h>
|
||||
|
|
@ -216,10 +218,11 @@ static void clocksource_watchdog(unsigned long data)
|
|||
|
||||
/* Check the deviation from the watchdog clocksource. */
|
||||
if ((abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD)) {
|
||||
pr_warn("timekeeping watchdog: Marking clocksource '%s' as unstable, because the skew is too large:\n", cs->name);
|
||||
pr_warn(" '%s' wd_now: %llx wd_last: %llx mask: %llx\n",
|
||||
pr_warn("timekeeping watchdog: Marking clocksource '%s' as unstable because the skew is too large:\n",
|
||||
cs->name);
|
||||
pr_warn(" '%s' wd_now: %llx wd_last: %llx mask: %llx\n",
|
||||
watchdog->name, wdnow, wdlast, watchdog->mask);
|
||||
pr_warn(" '%s' cs_now: %llx cs_last: %llx mask: %llx\n",
|
||||
pr_warn(" '%s' cs_now: %llx cs_last: %llx mask: %llx\n",
|
||||
cs->name, csnow, cslast, cs->mask);
|
||||
__clocksource_unstable(cs);
|
||||
continue;
|
||||
|
|
@ -567,9 +570,8 @@ static void __clocksource_select(bool skipcur)
|
|||
*/
|
||||
if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) && oneshot) {
|
||||
/* Override clocksource cannot be used. */
|
||||
printk(KERN_WARNING "Override clocksource %s is not "
|
||||
"HRT compatible. Cannot switch while in "
|
||||
"HRT/NOHZ mode\n", cs->name);
|
||||
pr_warn("Override clocksource %s is not HRT compatible - cannot switch while in HRT/NOHZ mode\n",
|
||||
cs->name);
|
||||
override_name[0] = 0;
|
||||
} else
|
||||
/* Override clocksource can be used. */
|
||||
|
|
@ -708,8 +710,8 @@ void __clocksource_update_freq_scale(struct clocksource *cs, u32 scale, u32 freq
|
|||
|
||||
clocksource_update_max_deferment(cs);
|
||||
|
||||
pr_info("clocksource %s: mask: 0x%llx max_cycles: 0x%llx, max_idle_ns: %lld ns\n",
|
||||
cs->name, cs->mask, cs->max_cycles, cs->max_idle_ns);
|
||||
pr_info("%s: mask: 0x%llx max_cycles: 0x%llx, max_idle_ns: %lld ns\n",
|
||||
cs->name, cs->mask, cs->max_cycles, cs->max_idle_ns);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__clocksource_update_freq_scale);
|
||||
|
||||
|
|
@ -1008,12 +1010,10 @@ __setup("clocksource=", boot_override_clocksource);
|
|||
static int __init boot_override_clock(char* str)
|
||||
{
|
||||
if (!strcmp(str, "pmtmr")) {
|
||||
printk("Warning: clock=pmtmr is deprecated. "
|
||||
"Use clocksource=acpi_pm.\n");
|
||||
pr_warn("clock=pmtmr is deprecated - use clocksource=acpi_pm\n");
|
||||
return boot_override_clocksource("acpi_pm");
|
||||
}
|
||||
printk("Warning! clock= boot option is deprecated. "
|
||||
"Use clocksource=xyz\n");
|
||||
pr_warn("clock= boot option is deprecated - use clocksource=xyz\n");
|
||||
return boot_override_clocksource(str);
|
||||
}
|
||||
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load diff
|
|
@ -35,6 +35,7 @@ unsigned long tick_nsec;
|
|||
static u64 tick_length;
|
||||
static u64 tick_length_base;
|
||||
|
||||
#define SECS_PER_DAY 86400
|
||||
#define MAX_TICKADJ 500LL /* usecs */
|
||||
#define MAX_TICKADJ_SCALED \
|
||||
(((MAX_TICKADJ * NSEC_PER_USEC) << NTP_SCALE_SHIFT) / NTP_INTERVAL_FREQ)
|
||||
|
|
@ -76,6 +77,9 @@ static long time_adjust;
|
|||
/* constant (boot-param configurable) NTP tick adjustment (upscaled) */
|
||||
static s64 ntp_tick_adj;
|
||||
|
||||
/* second value of the next pending leapsecond, or TIME64_MAX if no leap */
|
||||
static time64_t ntp_next_leap_sec = TIME64_MAX;
|
||||
|
||||
#ifdef CONFIG_NTP_PPS
|
||||
|
||||
/*
|
||||
|
|
@ -349,6 +353,7 @@ void ntp_clear(void)
|
|||
tick_length = tick_length_base;
|
||||
time_offset = 0;
|
||||
|
||||
ntp_next_leap_sec = TIME64_MAX;
|
||||
/* Clear PPS state variables */
|
||||
pps_clear();
|
||||
}
|
||||
|
|
@ -359,6 +364,21 @@ u64 ntp_tick_length(void)
|
|||
return tick_length;
|
||||
}
|
||||
|
||||
/**
|
||||
* ntp_get_next_leap - Returns the next leapsecond in CLOCK_REALTIME ktime_t
|
||||
*
|
||||
* Provides the time of the next leapsecond against CLOCK_REALTIME in
|
||||
* a ktime_t format. Returns KTIME_MAX if no leapsecond is pending.
|
||||
*/
|
||||
ktime_t ntp_get_next_leap(void)
|
||||
{
|
||||
ktime_t ret;
|
||||
|
||||
if ((time_state == TIME_INS) && (time_status & STA_INS))
|
||||
return ktime_set(ntp_next_leap_sec, 0);
|
||||
ret.tv64 = KTIME_MAX;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* this routine handles the overflow of the microsecond field
|
||||
|
|
@ -382,15 +402,21 @@ int second_overflow(unsigned long secs)
|
|||
*/
|
||||
switch (time_state) {
|
||||
case TIME_OK:
|
||||
if (time_status & STA_INS)
|
||||
if (time_status & STA_INS) {
|
||||
time_state = TIME_INS;
|
||||
else if (time_status & STA_DEL)
|
||||
ntp_next_leap_sec = secs + SECS_PER_DAY -
|
||||
(secs % SECS_PER_DAY);
|
||||
} else if (time_status & STA_DEL) {
|
||||
time_state = TIME_DEL;
|
||||
ntp_next_leap_sec = secs + SECS_PER_DAY -
|
||||
((secs+1) % SECS_PER_DAY);
|
||||
}
|
||||
break;
|
||||
case TIME_INS:
|
||||
if (!(time_status & STA_INS))
|
||||
if (!(time_status & STA_INS)) {
|
||||
ntp_next_leap_sec = TIME64_MAX;
|
||||
time_state = TIME_OK;
|
||||
else if (secs % 86400 == 0) {
|
||||
} else if (secs % SECS_PER_DAY == 0) {
|
||||
leap = -1;
|
||||
time_state = TIME_OOP;
|
||||
printk(KERN_NOTICE
|
||||
|
|
@ -398,19 +424,21 @@ int second_overflow(unsigned long secs)
|
|||
}
|
||||
break;
|
||||
case TIME_DEL:
|
||||
if (!(time_status & STA_DEL))
|
||||
if (!(time_status & STA_DEL)) {
|
||||
ntp_next_leap_sec = TIME64_MAX;
|
||||
time_state = TIME_OK;
|
||||
else if ((secs + 1) % 86400 == 0) {
|
||||
} else if ((secs + 1) % SECS_PER_DAY == 0) {
|
||||
leap = 1;
|
||||
ntp_next_leap_sec = TIME64_MAX;
|
||||
time_state = TIME_WAIT;
|
||||
printk(KERN_NOTICE
|
||||
"Clock: deleting leap second 23:59:59 UTC\n");
|
||||
}
|
||||
break;
|
||||
case TIME_OOP:
|
||||
ntp_next_leap_sec = TIME64_MAX;
|
||||
time_state = TIME_WAIT;
|
||||
break;
|
||||
|
||||
case TIME_WAIT:
|
||||
if (!(time_status & (STA_INS | STA_DEL)))
|
||||
time_state = TIME_OK;
|
||||
|
|
@ -547,6 +575,7 @@ static inline void process_adj_status(struct timex *txc, struct timespec64 *ts)
|
|||
if ((time_status & STA_PLL) && !(txc->status & STA_PLL)) {
|
||||
time_state = TIME_OK;
|
||||
time_status = STA_UNSYNC;
|
||||
ntp_next_leap_sec = TIME64_MAX;
|
||||
/* restart PPS frequency calibration */
|
||||
pps_reset_freq_interval();
|
||||
}
|
||||
|
|
@ -711,6 +740,24 @@ int __do_adjtimex(struct timex *txc, struct timespec64 *ts, s32 *time_tai)
|
|||
if (!(time_status & STA_NANO))
|
||||
txc->time.tv_usec /= NSEC_PER_USEC;
|
||||
|
||||
/* Handle leapsec adjustments */
|
||||
if (unlikely(ts->tv_sec >= ntp_next_leap_sec)) {
|
||||
if ((time_state == TIME_INS) && (time_status & STA_INS)) {
|
||||
result = TIME_OOP;
|
||||
txc->tai++;
|
||||
txc->time.tv_sec--;
|
||||
}
|
||||
if ((time_state == TIME_DEL) && (time_status & STA_DEL)) {
|
||||
result = TIME_WAIT;
|
||||
txc->tai--;
|
||||
txc->time.tv_sec++;
|
||||
}
|
||||
if ((time_state == TIME_OOP) &&
|
||||
(ts->tv_sec == ntp_next_leap_sec)) {
|
||||
result = TIME_WAIT;
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ extern void ntp_init(void);
|
|||
extern void ntp_clear(void);
|
||||
/* Returns how long ticks are at present, in ns / 2^NTP_SCALE_SHIFT. */
|
||||
extern u64 ntp_tick_length(void);
|
||||
extern ktime_t ntp_get_next_leap(void);
|
||||
extern int second_overflow(unsigned long secs);
|
||||
extern int ntp_validate_timex(struct timex *);
|
||||
extern int __do_adjtimex(struct timex *, struct timespec64 *, s32 *);
|
||||
|
|
|
|||
|
|
@ -272,13 +272,20 @@ static int posix_get_tai(clockid_t which_clock, struct timespec *tp)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int posix_get_hrtimer_res(clockid_t which_clock, struct timespec *tp)
|
||||
{
|
||||
tp->tv_sec = 0;
|
||||
tp->tv_nsec = hrtimer_resolution;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize everything, well, just everything in Posix clocks/timers ;)
|
||||
*/
|
||||
static __init int init_posix_timers(void)
|
||||
{
|
||||
struct k_clock clock_realtime = {
|
||||
.clock_getres = hrtimer_get_res,
|
||||
.clock_getres = posix_get_hrtimer_res,
|
||||
.clock_get = posix_clock_realtime_get,
|
||||
.clock_set = posix_clock_realtime_set,
|
||||
.clock_adj = posix_clock_realtime_adj,
|
||||
|
|
@ -290,7 +297,7 @@ static __init int init_posix_timers(void)
|
|||
.timer_del = common_timer_del,
|
||||
};
|
||||
struct k_clock clock_monotonic = {
|
||||
.clock_getres = hrtimer_get_res,
|
||||
.clock_getres = posix_get_hrtimer_res,
|
||||
.clock_get = posix_ktime_get_ts,
|
||||
.nsleep = common_nsleep,
|
||||
.nsleep_restart = hrtimer_nanosleep_restart,
|
||||
|
|
@ -300,7 +307,7 @@ static __init int init_posix_timers(void)
|
|||
.timer_del = common_timer_del,
|
||||
};
|
||||
struct k_clock clock_monotonic_raw = {
|
||||
.clock_getres = hrtimer_get_res,
|
||||
.clock_getres = posix_get_hrtimer_res,
|
||||
.clock_get = posix_get_monotonic_raw,
|
||||
};
|
||||
struct k_clock clock_realtime_coarse = {
|
||||
|
|
@ -312,7 +319,7 @@ static __init int init_posix_timers(void)
|
|||
.clock_get = posix_get_monotonic_coarse,
|
||||
};
|
||||
struct k_clock clock_tai = {
|
||||
.clock_getres = hrtimer_get_res,
|
||||
.clock_getres = posix_get_hrtimer_res,
|
||||
.clock_get = posix_get_tai,
|
||||
.nsleep = common_nsleep,
|
||||
.nsleep_restart = hrtimer_nanosleep_restart,
|
||||
|
|
@ -322,7 +329,7 @@ static __init int init_posix_timers(void)
|
|||
.timer_del = common_timer_del,
|
||||
};
|
||||
struct k_clock clock_boottime = {
|
||||
.clock_getres = hrtimer_get_res,
|
||||
.clock_getres = posix_get_hrtimer_res,
|
||||
.clock_get = posix_get_boottime,
|
||||
.nsleep = common_nsleep,
|
||||
.nsleep_restart = hrtimer_nanosleep_restart,
|
||||
|
|
|
|||
|
|
@ -22,6 +22,7 @@ static void bc_set_mode(enum clock_event_mode mode,
|
|||
struct clock_event_device *bc)
|
||||
{
|
||||
switch (mode) {
|
||||
case CLOCK_EVT_MODE_UNUSED:
|
||||
case CLOCK_EVT_MODE_SHUTDOWN:
|
||||
/*
|
||||
* Note, we cannot cancel the timer here as we might
|
||||
|
|
@ -66,9 +67,11 @@ static int bc_set_next(ktime_t expires, struct clock_event_device *bc)
|
|||
* hrtimer_{start/cancel} functions call into tracing,
|
||||
* calls to these functions must be bound within RCU_NONIDLE.
|
||||
*/
|
||||
RCU_NONIDLE(bc_moved = (hrtimer_try_to_cancel(&bctimer) >= 0) ?
|
||||
!hrtimer_start(&bctimer, expires, HRTIMER_MODE_ABS_PINNED) :
|
||||
0);
|
||||
RCU_NONIDLE({
|
||||
bc_moved = hrtimer_try_to_cancel(&bctimer) >= 0;
|
||||
if (bc_moved)
|
||||
hrtimer_start(&bctimer, expires,
|
||||
HRTIMER_MODE_ABS_PINNED);});
|
||||
if (bc_moved) {
|
||||
/* Bind the "device" to the cpu */
|
||||
bc->bound_on = smp_processor_id();
|
||||
|
|
@ -99,10 +102,13 @@ static enum hrtimer_restart bc_handler(struct hrtimer *t)
|
|||
{
|
||||
ce_broadcast_hrtimer.event_handler(&ce_broadcast_hrtimer);
|
||||
|
||||
if (ce_broadcast_hrtimer.next_event.tv64 == KTIME_MAX)
|
||||
switch (ce_broadcast_hrtimer.mode) {
|
||||
case CLOCK_EVT_MODE_ONESHOT:
|
||||
if (ce_broadcast_hrtimer.next_event.tv64 != KTIME_MAX)
|
||||
return HRTIMER_RESTART;
|
||||
default:
|
||||
return HRTIMER_NORESTART;
|
||||
|
||||
return HRTIMER_RESTART;
|
||||
}
|
||||
}
|
||||
|
||||
void tick_setup_hrtimer_broadcast(void)
|
||||
|
|
|
|||
|
|
@ -255,18 +255,18 @@ int tick_receive_broadcast(void)
|
|||
/*
|
||||
* Broadcast the event to the cpus, which are set in the mask (mangled).
|
||||
*/
|
||||
static void tick_do_broadcast(struct cpumask *mask)
|
||||
static bool tick_do_broadcast(struct cpumask *mask)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
struct tick_device *td;
|
||||
bool local = false;
|
||||
|
||||
/*
|
||||
* Check, if the current cpu is in the mask
|
||||
*/
|
||||
if (cpumask_test_cpu(cpu, mask)) {
|
||||
cpumask_clear_cpu(cpu, mask);
|
||||
td = &per_cpu(tick_cpu_device, cpu);
|
||||
td->evtdev->event_handler(td->evtdev);
|
||||
local = true;
|
||||
}
|
||||
|
||||
if (!cpumask_empty(mask)) {
|
||||
|
|
@ -279,16 +279,17 @@ static void tick_do_broadcast(struct cpumask *mask)
|
|||
td = &per_cpu(tick_cpu_device, cpumask_first(mask));
|
||||
td->evtdev->broadcast(mask);
|
||||
}
|
||||
return local;
|
||||
}
|
||||
|
||||
/*
|
||||
* Periodic broadcast:
|
||||
* - invoke the broadcast handlers
|
||||
*/
|
||||
static void tick_do_periodic_broadcast(void)
|
||||
static bool tick_do_periodic_broadcast(void)
|
||||
{
|
||||
cpumask_and(tmpmask, cpu_online_mask, tick_broadcast_mask);
|
||||
tick_do_broadcast(tmpmask);
|
||||
return tick_do_broadcast(tmpmask);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -296,34 +297,26 @@ static void tick_do_periodic_broadcast(void)
|
|||
*/
|
||||
static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
|
||||
{
|
||||
ktime_t next;
|
||||
struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
|
||||
bool bc_local;
|
||||
|
||||
raw_spin_lock(&tick_broadcast_lock);
|
||||
bc_local = tick_do_periodic_broadcast();
|
||||
|
||||
tick_do_periodic_broadcast();
|
||||
if (clockevent_state_oneshot(dev)) {
|
||||
ktime_t next = ktime_add(dev->next_event, tick_period);
|
||||
|
||||
/*
|
||||
* The device is in periodic mode. No reprogramming necessary:
|
||||
*/
|
||||
if (dev->state == CLOCK_EVT_STATE_PERIODIC)
|
||||
goto unlock;
|
||||
|
||||
/*
|
||||
* Setup the next period for devices, which do not have
|
||||
* periodic mode. We read dev->next_event first and add to it
|
||||
* when the event already expired. clockevents_program_event()
|
||||
* sets dev->next_event only when the event is really
|
||||
* programmed to the device.
|
||||
*/
|
||||
for (next = dev->next_event; ;) {
|
||||
next = ktime_add(next, tick_period);
|
||||
|
||||
if (!clockevents_program_event(dev, next, false))
|
||||
goto unlock;
|
||||
tick_do_periodic_broadcast();
|
||||
clockevents_program_event(dev, next, true);
|
||||
}
|
||||
unlock:
|
||||
raw_spin_unlock(&tick_broadcast_lock);
|
||||
|
||||
/*
|
||||
* We run the handler of the local cpu after dropping
|
||||
* tick_broadcast_lock because the handler might deadlock when
|
||||
* trying to switch to oneshot mode.
|
||||
*/
|
||||
if (bc_local)
|
||||
td->evtdev->event_handler(td->evtdev);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -532,23 +525,19 @@ static void tick_broadcast_set_affinity(struct clock_event_device *bc,
|
|||
irq_set_affinity(bc->irq, bc->cpumask);
|
||||
}
|
||||
|
||||
static int tick_broadcast_set_event(struct clock_event_device *bc, int cpu,
|
||||
ktime_t expires, int force)
|
||||
static void tick_broadcast_set_event(struct clock_event_device *bc, int cpu,
|
||||
ktime_t expires)
|
||||
{
|
||||
int ret;
|
||||
if (!clockevent_state_oneshot(bc))
|
||||
clockevents_switch_state(bc, CLOCK_EVT_STATE_ONESHOT);
|
||||
|
||||
if (bc->state != CLOCK_EVT_STATE_ONESHOT)
|
||||
clockevents_set_state(bc, CLOCK_EVT_STATE_ONESHOT);
|
||||
|
||||
ret = clockevents_program_event(bc, expires, force);
|
||||
if (!ret)
|
||||
tick_broadcast_set_affinity(bc, cpumask_of(cpu));
|
||||
return ret;
|
||||
clockevents_program_event(bc, expires, 1);
|
||||
tick_broadcast_set_affinity(bc, cpumask_of(cpu));
|
||||
}
|
||||
|
||||
static void tick_resume_broadcast_oneshot(struct clock_event_device *bc)
|
||||
{
|
||||
clockevents_set_state(bc, CLOCK_EVT_STATE_ONESHOT);
|
||||
clockevents_switch_state(bc, CLOCK_EVT_STATE_ONESHOT);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -566,7 +555,7 @@ void tick_check_oneshot_broadcast_this_cpu(void)
|
|||
* switched over, leave the device alone.
|
||||
*/
|
||||
if (td->mode == TICKDEV_MODE_ONESHOT) {
|
||||
clockevents_set_state(td->evtdev,
|
||||
clockevents_switch_state(td->evtdev,
|
||||
CLOCK_EVT_STATE_ONESHOT);
|
||||
}
|
||||
}
|
||||
|
|
@ -580,9 +569,9 @@ static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
|
|||
struct tick_device *td;
|
||||
ktime_t now, next_event;
|
||||
int cpu, next_cpu = 0;
|
||||
bool bc_local;
|
||||
|
||||
raw_spin_lock(&tick_broadcast_lock);
|
||||
again:
|
||||
dev->next_event.tv64 = KTIME_MAX;
|
||||
next_event.tv64 = KTIME_MAX;
|
||||
cpumask_clear(tmpmask);
|
||||
|
|
@ -624,7 +613,7 @@ again:
|
|||
/*
|
||||
* Wakeup the cpus which have an expired event.
|
||||
*/
|
||||
tick_do_broadcast(tmpmask);
|
||||
bc_local = tick_do_broadcast(tmpmask);
|
||||
|
||||
/*
|
||||
* Two reasons for reprogram:
|
||||
|
|
@ -636,15 +625,15 @@ again:
|
|||
* - There are pending events on sleeping CPUs which were not
|
||||
* in the event mask
|
||||
*/
|
||||
if (next_event.tv64 != KTIME_MAX) {
|
||||
/*
|
||||
* Rearm the broadcast device. If event expired,
|
||||
* repeat the above
|
||||
*/
|
||||
if (tick_broadcast_set_event(dev, next_cpu, next_event, 0))
|
||||
goto again;
|
||||
}
|
||||
if (next_event.tv64 != KTIME_MAX)
|
||||
tick_broadcast_set_event(dev, next_cpu, next_event);
|
||||
|
||||
raw_spin_unlock(&tick_broadcast_lock);
|
||||
|
||||
if (bc_local) {
|
||||
td = this_cpu_ptr(&tick_cpu_device);
|
||||
td->evtdev->event_handler(td->evtdev);
|
||||
}
|
||||
}
|
||||
|
||||
static int broadcast_needs_cpu(struct clock_event_device *bc, int cpu)
|
||||
|
|
@ -670,7 +659,7 @@ static void broadcast_shutdown_local(struct clock_event_device *bc,
|
|||
if (dev->next_event.tv64 < bc->next_event.tv64)
|
||||
return;
|
||||
}
|
||||
clockevents_set_state(dev, CLOCK_EVT_STATE_SHUTDOWN);
|
||||
clockevents_switch_state(dev, CLOCK_EVT_STATE_SHUTDOWN);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -726,7 +715,7 @@ int tick_broadcast_oneshot_control(enum tick_broadcast_state state)
|
|||
*/
|
||||
if (!cpumask_test_cpu(cpu, tick_broadcast_force_mask) &&
|
||||
dev->next_event.tv64 < bc->next_event.tv64)
|
||||
tick_broadcast_set_event(bc, cpu, dev->next_event, 1);
|
||||
tick_broadcast_set_event(bc, cpu, dev->next_event);
|
||||
}
|
||||
/*
|
||||
* If the current CPU owns the hrtimer broadcast
|
||||
|
|
@ -740,7 +729,7 @@ int tick_broadcast_oneshot_control(enum tick_broadcast_state state)
|
|||
cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
|
||||
} else {
|
||||
if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) {
|
||||
clockevents_set_state(dev, CLOCK_EVT_STATE_ONESHOT);
|
||||
clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT);
|
||||
/*
|
||||
* The cpu which was handling the broadcast
|
||||
* timer marked this cpu in the broadcast
|
||||
|
|
@ -842,7 +831,7 @@ void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
|
|||
|
||||
/* Set it up only once ! */
|
||||
if (bc->event_handler != tick_handle_oneshot_broadcast) {
|
||||
int was_periodic = bc->state == CLOCK_EVT_STATE_PERIODIC;
|
||||
int was_periodic = clockevent_state_periodic(bc);
|
||||
|
||||
bc->event_handler = tick_handle_oneshot_broadcast;
|
||||
|
||||
|
|
@ -858,10 +847,10 @@ void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
|
|||
tick_broadcast_oneshot_mask, tmpmask);
|
||||
|
||||
if (was_periodic && !cpumask_empty(tmpmask)) {
|
||||
clockevents_set_state(bc, CLOCK_EVT_STATE_ONESHOT);
|
||||
clockevents_switch_state(bc, CLOCK_EVT_STATE_ONESHOT);
|
||||
tick_broadcast_init_next_event(tmpmask,
|
||||
tick_next_period);
|
||||
tick_broadcast_set_event(bc, cpu, tick_next_period, 1);
|
||||
tick_broadcast_set_event(bc, cpu, tick_next_period);
|
||||
} else
|
||||
bc->next_event.tv64 = KTIME_MAX;
|
||||
} else {
|
||||
|
|
|
|||
|
|
@ -102,7 +102,17 @@ void tick_handle_periodic(struct clock_event_device *dev)
|
|||
|
||||
tick_periodic(cpu);
|
||||
|
||||
if (dev->state != CLOCK_EVT_STATE_ONESHOT)
|
||||
#if defined(CONFIG_HIGH_RES_TIMERS) || defined(CONFIG_NO_HZ_COMMON)
|
||||
/*
|
||||
* The cpu might have transitioned to HIGHRES or NOHZ mode via
|
||||
* update_process_times() -> run_local_timers() ->
|
||||
* hrtimer_run_queues().
|
||||
*/
|
||||
if (dev->event_handler != tick_handle_periodic)
|
||||
return;
|
||||
#endif
|
||||
|
||||
if (!clockevent_state_oneshot(dev))
|
||||
return;
|
||||
for (;;) {
|
||||
/*
|
||||
|
|
@ -140,7 +150,7 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
|
|||
|
||||
if ((dev->features & CLOCK_EVT_FEAT_PERIODIC) &&
|
||||
!tick_broadcast_oneshot_active()) {
|
||||
clockevents_set_state(dev, CLOCK_EVT_STATE_PERIODIC);
|
||||
clockevents_switch_state(dev, CLOCK_EVT_STATE_PERIODIC);
|
||||
} else {
|
||||
unsigned long seq;
|
||||
ktime_t next;
|
||||
|
|
@ -150,7 +160,7 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
|
|||
next = tick_next_period;
|
||||
} while (read_seqretry(&jiffies_lock, seq));
|
||||
|
||||
clockevents_set_state(dev, CLOCK_EVT_STATE_ONESHOT);
|
||||
clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT);
|
||||
|
||||
for (;;) {
|
||||
if (!clockevents_program_event(dev, next, false))
|
||||
|
|
@ -367,7 +377,7 @@ void tick_shutdown(unsigned int cpu)
|
|||
* Prevent that the clock events layer tries to call
|
||||
* the set mode function!
|
||||
*/
|
||||
dev->state = CLOCK_EVT_STATE_DETACHED;
|
||||
clockevent_set_state(dev, CLOCK_EVT_STATE_DETACHED);
|
||||
dev->mode = CLOCK_EVT_MODE_UNUSED;
|
||||
clockevents_exchange_device(dev, NULL);
|
||||
dev->event_handler = clockevents_handle_noop;
|
||||
|
|
|
|||
|
|
@ -36,11 +36,22 @@ static inline int tick_device_is_functional(struct clock_event_device *dev)
|
|||
return !(dev->features & CLOCK_EVT_FEAT_DUMMY);
|
||||
}
|
||||
|
||||
static inline enum clock_event_state clockevent_get_state(struct clock_event_device *dev)
|
||||
{
|
||||
return dev->state_use_accessors;
|
||||
}
|
||||
|
||||
static inline void clockevent_set_state(struct clock_event_device *dev,
|
||||
enum clock_event_state state)
|
||||
{
|
||||
dev->state_use_accessors = state;
|
||||
}
|
||||
|
||||
extern void clockevents_shutdown(struct clock_event_device *dev);
|
||||
extern void clockevents_exchange_device(struct clock_event_device *old,
|
||||
struct clock_event_device *new);
|
||||
extern void clockevents_set_state(struct clock_event_device *dev,
|
||||
enum clock_event_state state);
|
||||
extern void clockevents_switch_state(struct clock_event_device *dev,
|
||||
enum clock_event_state state);
|
||||
extern int clockevents_program_event(struct clock_event_device *dev,
|
||||
ktime_t expires, bool force);
|
||||
extern void clockevents_handle_noop(struct clock_event_device *dev);
|
||||
|
|
@ -137,3 +148,5 @@ extern void tick_nohz_init(void);
|
|||
# else
|
||||
static inline void tick_nohz_init(void) { }
|
||||
#endif
|
||||
|
||||
extern u64 get_next_timer_interrupt(unsigned long basej, u64 basem);
|
||||
|
|
|
|||
|
|
@ -28,6 +28,22 @@ int tick_program_event(ktime_t expires, int force)
|
|||
{
|
||||
struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
|
||||
|
||||
if (unlikely(expires.tv64 == KTIME_MAX)) {
|
||||
/*
|
||||
* We don't need the clock event device any more, stop it.
|
||||
*/
|
||||
clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT_STOPPED);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (unlikely(clockevent_state_oneshot_stopped(dev))) {
|
||||
/*
|
||||
* We need the clock event again, configure it in ONESHOT mode
|
||||
* before using it.
|
||||
*/
|
||||
clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT);
|
||||
}
|
||||
|
||||
return clockevents_program_event(dev, expires, force);
|
||||
}
|
||||
|
||||
|
|
@ -38,7 +54,7 @@ void tick_resume_oneshot(void)
|
|||
{
|
||||
struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
|
||||
|
||||
clockevents_set_state(dev, CLOCK_EVT_STATE_ONESHOT);
|
||||
clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT);
|
||||
clockevents_program_event(dev, ktime_get(), true);
|
||||
}
|
||||
|
||||
|
|
@ -50,7 +66,7 @@ void tick_setup_oneshot(struct clock_event_device *newdev,
|
|||
ktime_t next_event)
|
||||
{
|
||||
newdev->event_handler = handler;
|
||||
clockevents_set_state(newdev, CLOCK_EVT_STATE_ONESHOT);
|
||||
clockevents_switch_state(newdev, CLOCK_EVT_STATE_ONESHOT);
|
||||
clockevents_program_event(newdev, next_event, true);
|
||||
}
|
||||
|
||||
|
|
@ -81,7 +97,7 @@ int tick_switch_to_oneshot(void (*handler)(struct clock_event_device *))
|
|||
|
||||
td->mode = TICKDEV_MODE_ONESHOT;
|
||||
dev->event_handler = handler;
|
||||
clockevents_set_state(dev, CLOCK_EVT_STATE_ONESHOT);
|
||||
clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT);
|
||||
tick_broadcast_switch_to_oneshot();
|
||||
return 0;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -565,156 +565,144 @@ u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us);
|
||||
|
||||
static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
|
||||
{
|
||||
hrtimer_cancel(&ts->sched_timer);
|
||||
hrtimer_set_expires(&ts->sched_timer, ts->last_tick);
|
||||
|
||||
/* Forward the time to expire in the future */
|
||||
hrtimer_forward(&ts->sched_timer, now, tick_period);
|
||||
|
||||
if (ts->nohz_mode == NOHZ_MODE_HIGHRES)
|
||||
hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED);
|
||||
else
|
||||
tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
|
||||
}
|
||||
|
||||
static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
|
||||
ktime_t now, int cpu)
|
||||
{
|
||||
unsigned long seq, last_jiffies, next_jiffies, delta_jiffies;
|
||||
ktime_t last_update, expires, ret = { .tv64 = 0 };
|
||||
unsigned long rcu_delta_jiffies;
|
||||
struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
|
||||
u64 time_delta;
|
||||
|
||||
time_delta = timekeeping_max_deferment();
|
||||
u64 basemono, next_tick, next_tmr, next_rcu, delta, expires;
|
||||
unsigned long seq, basejiff;
|
||||
ktime_t tick;
|
||||
|
||||
/* Read jiffies and the time when jiffies were updated last */
|
||||
do {
|
||||
seq = read_seqbegin(&jiffies_lock);
|
||||
last_update = last_jiffies_update;
|
||||
last_jiffies = jiffies;
|
||||
basemono = last_jiffies_update.tv64;
|
||||
basejiff = jiffies;
|
||||
} while (read_seqretry(&jiffies_lock, seq));
|
||||
ts->last_jiffies = basejiff;
|
||||
|
||||
if (rcu_needs_cpu(&rcu_delta_jiffies) ||
|
||||
if (rcu_needs_cpu(basemono, &next_rcu) ||
|
||||
arch_needs_cpu() || irq_work_needs_cpu()) {
|
||||
next_jiffies = last_jiffies + 1;
|
||||
delta_jiffies = 1;
|
||||
next_tick = basemono + TICK_NSEC;
|
||||
} else {
|
||||
/* Get the next timer wheel timer */
|
||||
next_jiffies = get_next_timer_interrupt(last_jiffies);
|
||||
delta_jiffies = next_jiffies - last_jiffies;
|
||||
if (rcu_delta_jiffies < delta_jiffies) {
|
||||
next_jiffies = last_jiffies + rcu_delta_jiffies;
|
||||
delta_jiffies = rcu_delta_jiffies;
|
||||
/*
|
||||
* Get the next pending timer. If high resolution
|
||||
* timers are enabled this only takes the timer wheel
|
||||
* timers into account. If high resolution timers are
|
||||
* disabled this also looks at the next expiring
|
||||
* hrtimer.
|
||||
*/
|
||||
next_tmr = get_next_timer_interrupt(basejiff, basemono);
|
||||
ts->next_timer = next_tmr;
|
||||
/* Take the next rcu event into account */
|
||||
next_tick = next_rcu < next_tmr ? next_rcu : next_tmr;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the tick is due in the next period, keep it ticking or
|
||||
* restart it proper.
|
||||
*/
|
||||
delta = next_tick - basemono;
|
||||
if (delta <= (u64)TICK_NSEC) {
|
||||
tick.tv64 = 0;
|
||||
if (!ts->tick_stopped)
|
||||
goto out;
|
||||
if (delta == 0) {
|
||||
/* Tick is stopped, but required now. Enforce it */
|
||||
tick_nohz_restart(ts, now);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Do not stop the tick, if we are only one off (or less)
|
||||
* or if the cpu is required for RCU:
|
||||
* If this cpu is the one which updates jiffies, then give up
|
||||
* the assignment and let it be taken by the cpu which runs
|
||||
* the tick timer next, which might be this cpu as well. If we
|
||||
* don't drop this here the jiffies might be stale and
|
||||
* do_timer() never invoked. Keep track of the fact that it
|
||||
* was the one which had the do_timer() duty last. If this cpu
|
||||
* is the one which had the do_timer() duty last, we limit the
|
||||
* sleep time to the timekeeping max_deferement value.
|
||||
* Otherwise we can sleep as long as we want.
|
||||
*/
|
||||
if (!ts->tick_stopped && delta_jiffies <= 1)
|
||||
goto out;
|
||||
|
||||
/* Schedule the tick, if we are at least one jiffie off */
|
||||
if ((long)delta_jiffies >= 1) {
|
||||
|
||||
/*
|
||||
* If this cpu is the one which updates jiffies, then
|
||||
* give up the assignment and let it be taken by the
|
||||
* cpu which runs the tick timer next, which might be
|
||||
* this cpu as well. If we don't drop this here the
|
||||
* jiffies might be stale and do_timer() never
|
||||
* invoked. Keep track of the fact that it was the one
|
||||
* which had the do_timer() duty last. If this cpu is
|
||||
* the one which had the do_timer() duty last, we
|
||||
* limit the sleep time to the timekeeping
|
||||
* max_deferement value which we retrieved
|
||||
* above. Otherwise we can sleep as long as we want.
|
||||
*/
|
||||
if (cpu == tick_do_timer_cpu) {
|
||||
tick_do_timer_cpu = TICK_DO_TIMER_NONE;
|
||||
ts->do_timer_last = 1;
|
||||
} else if (tick_do_timer_cpu != TICK_DO_TIMER_NONE) {
|
||||
time_delta = KTIME_MAX;
|
||||
ts->do_timer_last = 0;
|
||||
} else if (!ts->do_timer_last) {
|
||||
time_delta = KTIME_MAX;
|
||||
}
|
||||
delta = timekeeping_max_deferment();
|
||||
if (cpu == tick_do_timer_cpu) {
|
||||
tick_do_timer_cpu = TICK_DO_TIMER_NONE;
|
||||
ts->do_timer_last = 1;
|
||||
} else if (tick_do_timer_cpu != TICK_DO_TIMER_NONE) {
|
||||
delta = KTIME_MAX;
|
||||
ts->do_timer_last = 0;
|
||||
} else if (!ts->do_timer_last) {
|
||||
delta = KTIME_MAX;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NO_HZ_FULL
|
||||
if (!ts->inidle) {
|
||||
time_delta = min(time_delta,
|
||||
scheduler_tick_max_deferment());
|
||||
}
|
||||
/* Limit the tick delta to the maximum scheduler deferment */
|
||||
if (!ts->inidle)
|
||||
delta = min(delta, scheduler_tick_max_deferment());
|
||||
#endif
|
||||
|
||||
/*
|
||||
* calculate the expiry time for the next timer wheel
|
||||
* timer. delta_jiffies >= NEXT_TIMER_MAX_DELTA signals
|
||||
* that there is no timer pending or at least extremely
|
||||
* far into the future (12 days for HZ=1000). In this
|
||||
* case we set the expiry to the end of time.
|
||||
*/
|
||||
if (likely(delta_jiffies < NEXT_TIMER_MAX_DELTA)) {
|
||||
/*
|
||||
* Calculate the time delta for the next timer event.
|
||||
* If the time delta exceeds the maximum time delta
|
||||
* permitted by the current clocksource then adjust
|
||||
* the time delta accordingly to ensure the
|
||||
* clocksource does not wrap.
|
||||
*/
|
||||
time_delta = min_t(u64, time_delta,
|
||||
tick_period.tv64 * delta_jiffies);
|
||||
}
|
||||
/* Calculate the next expiry time */
|
||||
if (delta < (KTIME_MAX - basemono))
|
||||
expires = basemono + delta;
|
||||
else
|
||||
expires = KTIME_MAX;
|
||||
|
||||
if (time_delta < KTIME_MAX)
|
||||
expires = ktime_add_ns(last_update, time_delta);
|
||||
else
|
||||
expires.tv64 = KTIME_MAX;
|
||||
expires = min_t(u64, expires, next_tick);
|
||||
tick.tv64 = expires;
|
||||
|
||||
/* Skip reprogram of event if its not changed */
|
||||
if (ts->tick_stopped && ktime_equal(expires, dev->next_event))
|
||||
goto out;
|
||||
/* Skip reprogram of event if its not changed */
|
||||
if (ts->tick_stopped && (expires == dev->next_event.tv64))
|
||||
goto out;
|
||||
|
||||
ret = expires;
|
||||
/*
|
||||
* nohz_stop_sched_tick can be called several times before
|
||||
* the nohz_restart_sched_tick is called. This happens when
|
||||
* interrupts arrive which do not cause a reschedule. In the
|
||||
* first call we save the current tick time, so we can restart
|
||||
* the scheduler tick in nohz_restart_sched_tick.
|
||||
*/
|
||||
if (!ts->tick_stopped) {
|
||||
nohz_balance_enter_idle(cpu);
|
||||
calc_load_enter_idle();
|
||||
|
||||
/*
|
||||
* nohz_stop_sched_tick can be called several times before
|
||||
* the nohz_restart_sched_tick is called. This happens when
|
||||
* interrupts arrive which do not cause a reschedule. In the
|
||||
* first call we save the current tick time, so we can restart
|
||||
* the scheduler tick in nohz_restart_sched_tick.
|
||||
*/
|
||||
if (!ts->tick_stopped) {
|
||||
nohz_balance_enter_idle(cpu);
|
||||
calc_load_enter_idle();
|
||||
|
||||
ts->last_tick = hrtimer_get_expires(&ts->sched_timer);
|
||||
ts->tick_stopped = 1;
|
||||
trace_tick_stop(1, " ");
|
||||
}
|
||||
|
||||
/*
|
||||
* If the expiration time == KTIME_MAX, then
|
||||
* in this case we simply stop the tick timer.
|
||||
*/
|
||||
if (unlikely(expires.tv64 == KTIME_MAX)) {
|
||||
if (ts->nohz_mode == NOHZ_MODE_HIGHRES)
|
||||
hrtimer_cancel(&ts->sched_timer);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
|
||||
hrtimer_start(&ts->sched_timer, expires,
|
||||
HRTIMER_MODE_ABS_PINNED);
|
||||
/* Check, if the timer was already in the past */
|
||||
if (hrtimer_active(&ts->sched_timer))
|
||||
goto out;
|
||||
} else if (!tick_program_event(expires, 0))
|
||||
goto out;
|
||||
/*
|
||||
* We are past the event already. So we crossed a
|
||||
* jiffie boundary. Update jiffies and raise the
|
||||
* softirq.
|
||||
*/
|
||||
tick_do_update_jiffies64(ktime_get());
|
||||
ts->last_tick = hrtimer_get_expires(&ts->sched_timer);
|
||||
ts->tick_stopped = 1;
|
||||
trace_tick_stop(1, " ");
|
||||
}
|
||||
raise_softirq_irqoff(TIMER_SOFTIRQ);
|
||||
out:
|
||||
ts->next_jiffies = next_jiffies;
|
||||
ts->last_jiffies = last_jiffies;
|
||||
ts->sleep_length = ktime_sub(dev->next_event, now);
|
||||
|
||||
return ret;
|
||||
/*
|
||||
* If the expiration time == KTIME_MAX, then we simply stop
|
||||
* the tick timer.
|
||||
*/
|
||||
if (unlikely(expires == KTIME_MAX)) {
|
||||
if (ts->nohz_mode == NOHZ_MODE_HIGHRES)
|
||||
hrtimer_cancel(&ts->sched_timer);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (ts->nohz_mode == NOHZ_MODE_HIGHRES)
|
||||
hrtimer_start(&ts->sched_timer, tick, HRTIMER_MODE_ABS_PINNED);
|
||||
else
|
||||
tick_program_event(tick, 1);
|
||||
out:
|
||||
/* Update the estimated sleep length */
|
||||
ts->sleep_length = ktime_sub(dev->next_event, now);
|
||||
return tick;
|
||||
}
|
||||
|
||||
static void tick_nohz_full_stop_tick(struct tick_sched *ts)
|
||||
|
|
@ -876,32 +864,6 @@ ktime_t tick_nohz_get_sleep_length(void)
|
|||
return ts->sleep_length;
|
||||
}
|
||||
|
||||
static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
|
||||
{
|
||||
hrtimer_cancel(&ts->sched_timer);
|
||||
hrtimer_set_expires(&ts->sched_timer, ts->last_tick);
|
||||
|
||||
while (1) {
|
||||
/* Forward the time to expire in the future */
|
||||
hrtimer_forward(&ts->sched_timer, now, tick_period);
|
||||
|
||||
if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
|
||||
hrtimer_start_expires(&ts->sched_timer,
|
||||
HRTIMER_MODE_ABS_PINNED);
|
||||
/* Check, if the timer was already in the past */
|
||||
if (hrtimer_active(&ts->sched_timer))
|
||||
break;
|
||||
} else {
|
||||
if (!tick_program_event(
|
||||
hrtimer_get_expires(&ts->sched_timer), 0))
|
||||
break;
|
||||
}
|
||||
/* Reread time and update jiffies */
|
||||
now = ktime_get();
|
||||
tick_do_update_jiffies64(now);
|
||||
}
|
||||
}
|
||||
|
||||
static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now)
|
||||
{
|
||||
/* Update jiffies first */
|
||||
|
|
@ -972,12 +934,6 @@ void tick_nohz_idle_exit(void)
|
|||
local_irq_enable();
|
||||
}
|
||||
|
||||
static int tick_nohz_reprogram(struct tick_sched *ts, ktime_t now)
|
||||
{
|
||||
hrtimer_forward(&ts->sched_timer, now, tick_period);
|
||||
return tick_program_event(hrtimer_get_expires(&ts->sched_timer), 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* The nohz low res interrupt handler
|
||||
*/
|
||||
|
|
@ -996,10 +952,8 @@ static void tick_nohz_handler(struct clock_event_device *dev)
|
|||
if (unlikely(ts->tick_stopped))
|
||||
return;
|
||||
|
||||
while (tick_nohz_reprogram(ts, now)) {
|
||||
now = ktime_get();
|
||||
tick_do_update_jiffies64(now);
|
||||
}
|
||||
hrtimer_forward(&ts->sched_timer, now, tick_period);
|
||||
tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -1013,11 +967,9 @@ static void tick_nohz_switch_to_nohz(void)
|
|||
if (!tick_nohz_enabled)
|
||||
return;
|
||||
|
||||
local_irq_disable();
|
||||
if (tick_switch_to_oneshot(tick_nohz_handler)) {
|
||||
local_irq_enable();
|
||||
if (tick_switch_to_oneshot(tick_nohz_handler))
|
||||
return;
|
||||
}
|
||||
|
||||
tick_nohz_active = 1;
|
||||
ts->nohz_mode = NOHZ_MODE_LOWRES;
|
||||
|
||||
|
|
@ -1029,13 +981,9 @@ static void tick_nohz_switch_to_nohz(void)
|
|||
/* Get the next period */
|
||||
next = tick_init_jiffy_update();
|
||||
|
||||
for (;;) {
|
||||
hrtimer_set_expires(&ts->sched_timer, next);
|
||||
if (!tick_program_event(next, 0))
|
||||
break;
|
||||
next = ktime_add(next, tick_period);
|
||||
}
|
||||
local_irq_enable();
|
||||
hrtimer_forward_now(&ts->sched_timer, tick_period);
|
||||
hrtimer_set_expires(&ts->sched_timer, next);
|
||||
tick_program_event(next, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -1167,15 +1115,8 @@ void tick_setup_sched_timer(void)
|
|||
hrtimer_add_expires_ns(&ts->sched_timer, offset);
|
||||
}
|
||||
|
||||
for (;;) {
|
||||
hrtimer_forward(&ts->sched_timer, now, tick_period);
|
||||
hrtimer_start_expires(&ts->sched_timer,
|
||||
HRTIMER_MODE_ABS_PINNED);
|
||||
/* Check, if the timer was already in the past */
|
||||
if (hrtimer_active(&ts->sched_timer))
|
||||
break;
|
||||
now = ktime_get();
|
||||
}
|
||||
hrtimer_forward(&ts->sched_timer, now, tick_period);
|
||||
hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED);
|
||||
|
||||
#ifdef CONFIG_NO_HZ_COMMON
|
||||
if (tick_nohz_enabled) {
|
||||
|
|
@ -1227,7 +1168,7 @@ void tick_oneshot_notify(void)
|
|||
* Called cyclic from the hrtimer softirq (driven by the timer
|
||||
* softirq) allow_nohz signals, that we can switch into low-res nohz
|
||||
* mode, because high resolution timers are disabled (either compile
|
||||
* or runtime).
|
||||
* or runtime). Called with interrupts disabled.
|
||||
*/
|
||||
int tick_check_oneshot_change(int allow_nohz)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -57,7 +57,7 @@ struct tick_sched {
|
|||
ktime_t iowait_sleeptime;
|
||||
ktime_t sleep_length;
|
||||
unsigned long last_jiffies;
|
||||
unsigned long next_jiffies;
|
||||
u64 next_timer;
|
||||
ktime_t idle_expires;
|
||||
int do_timer_last;
|
||||
};
|
||||
|
|
|
|||
|
|
@ -41,7 +41,7 @@
|
|||
#include <asm/uaccess.h>
|
||||
#include <asm/unistd.h>
|
||||
|
||||
#include "timeconst.h"
|
||||
#include <generated/timeconst.h>
|
||||
#include "timekeeping.h"
|
||||
|
||||
/*
|
||||
|
|
@ -173,6 +173,10 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
|
|||
return error;
|
||||
|
||||
if (tz) {
|
||||
/* Verify we're witin the +-15 hrs range */
|
||||
if (tz->tz_minuteswest > 15*60 || tz->tz_minuteswest < -15*60)
|
||||
return -EINVAL;
|
||||
|
||||
sys_tz = *tz;
|
||||
update_vsyscall_tz();
|
||||
if (firsttime) {
|
||||
|
|
@ -483,9 +487,11 @@ struct timespec64 ns_to_timespec64(const s64 nsec)
|
|||
}
|
||||
EXPORT_SYMBOL(ns_to_timespec64);
|
||||
#endif
|
||||
/*
|
||||
* When we convert to jiffies then we interpret incoming values
|
||||
* the following way:
|
||||
/**
|
||||
* msecs_to_jiffies: - convert milliseconds to jiffies
|
||||
* @m: time in milliseconds
|
||||
*
|
||||
* conversion is done as follows:
|
||||
*
|
||||
* - negative values mean 'infinite timeout' (MAX_JIFFY_OFFSET)
|
||||
*
|
||||
|
|
@ -493,66 +499,36 @@ EXPORT_SYMBOL(ns_to_timespec64);
|
|||
* MAX_JIFFY_OFFSET values] mean 'infinite timeout' too.
|
||||
*
|
||||
* - all other values are converted to jiffies by either multiplying
|
||||
* the input value by a factor or dividing it with a factor
|
||||
* the input value by a factor or dividing it with a factor and
|
||||
* handling any 32-bit overflows.
|
||||
* for the details see __msecs_to_jiffies()
|
||||
*
|
||||
* We must also be careful about 32-bit overflows.
|
||||
* msecs_to_jiffies() checks for the passed in value being a constant
|
||||
* via __builtin_constant_p() allowing gcc to eliminate most of the
|
||||
* code, __msecs_to_jiffies() is called if the value passed does not
|
||||
* allow constant folding and the actual conversion must be done at
|
||||
* runtime.
|
||||
* the _msecs_to_jiffies helpers are the HZ dependent conversion
|
||||
* routines found in include/linux/jiffies.h
|
||||
*/
|
||||
unsigned long msecs_to_jiffies(const unsigned int m)
|
||||
unsigned long __msecs_to_jiffies(const unsigned int m)
|
||||
{
|
||||
/*
|
||||
* Negative value, means infinite timeout:
|
||||
*/
|
||||
if ((int)m < 0)
|
||||
return MAX_JIFFY_OFFSET;
|
||||
|
||||
#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
|
||||
/*
|
||||
* HZ is equal to or smaller than 1000, and 1000 is a nice
|
||||
* round multiple of HZ, divide with the factor between them,
|
||||
* but round upwards:
|
||||
*/
|
||||
return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ);
|
||||
#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
|
||||
/*
|
||||
* HZ is larger than 1000, and HZ is a nice round multiple of
|
||||
* 1000 - simply multiply with the factor between them.
|
||||
*
|
||||
* But first make sure the multiplication result cannot
|
||||
* overflow:
|
||||
*/
|
||||
if (m > jiffies_to_msecs(MAX_JIFFY_OFFSET))
|
||||
return MAX_JIFFY_OFFSET;
|
||||
|
||||
return m * (HZ / MSEC_PER_SEC);
|
||||
#else
|
||||
/*
|
||||
* Generic case - multiply, round and divide. But first
|
||||
* check that if we are doing a net multiplication, that
|
||||
* we wouldn't overflow:
|
||||
*/
|
||||
if (HZ > MSEC_PER_SEC && m > jiffies_to_msecs(MAX_JIFFY_OFFSET))
|
||||
return MAX_JIFFY_OFFSET;
|
||||
|
||||
return (MSEC_TO_HZ_MUL32 * m + MSEC_TO_HZ_ADJ32)
|
||||
>> MSEC_TO_HZ_SHR32;
|
||||
#endif
|
||||
return _msecs_to_jiffies(m);
|
||||
}
|
||||
EXPORT_SYMBOL(msecs_to_jiffies);
|
||||
EXPORT_SYMBOL(__msecs_to_jiffies);
|
||||
|
||||
unsigned long usecs_to_jiffies(const unsigned int u)
|
||||
unsigned long __usecs_to_jiffies(const unsigned int u)
|
||||
{
|
||||
if (u > jiffies_to_usecs(MAX_JIFFY_OFFSET))
|
||||
return MAX_JIFFY_OFFSET;
|
||||
#if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
|
||||
return (u + (USEC_PER_SEC / HZ) - 1) / (USEC_PER_SEC / HZ);
|
||||
#elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC)
|
||||
return u * (HZ / USEC_PER_SEC);
|
||||
#else
|
||||
return (USEC_TO_HZ_MUL32 * u + USEC_TO_HZ_ADJ32)
|
||||
>> USEC_TO_HZ_SHR32;
|
||||
#endif
|
||||
return _usecs_to_jiffies(u);
|
||||
}
|
||||
EXPORT_SYMBOL(usecs_to_jiffies);
|
||||
EXPORT_SYMBOL(__usecs_to_jiffies);
|
||||
|
||||
/*
|
||||
* The TICK_NSEC - 1 rounds up the value to the next resolution. Note
|
||||
|
|
|
|||
|
|
@ -50,7 +50,7 @@ define timeconst(hz) {
|
|||
print "#include <linux/types.h>\n\n"
|
||||
|
||||
print "#if HZ != ", hz, "\n"
|
||||
print "#error \qkernel/timeconst.h has the wrong HZ value!\q\n"
|
||||
print "#error \qinclude/generated/timeconst.h has the wrong HZ value!\q\n"
|
||||
print "#endif\n\n"
|
||||
|
||||
if (hz < 2) {
|
||||
|
|
@ -105,4 +105,5 @@ define timeconst(hz) {
|
|||
halt
|
||||
}
|
||||
|
||||
hz = read();
|
||||
timeconst(hz)
|
||||
|
|
|
|||
|
|
@ -118,18 +118,6 @@ static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
|
|||
|
||||
#ifdef CONFIG_DEBUG_TIMEKEEPING
|
||||
#define WARNING_FREQ (HZ*300) /* 5 minute rate-limiting */
|
||||
/*
|
||||
* These simple flag variables are managed
|
||||
* without locks, which is racy, but ok since
|
||||
* we don't really care about being super
|
||||
* precise about how many events were seen,
|
||||
* just that a problem was observed.
|
||||
*/
|
||||
static int timekeeping_underflow_seen;
|
||||
static int timekeeping_overflow_seen;
|
||||
|
||||
/* last_warning is only modified under the timekeeping lock */
|
||||
static long timekeeping_last_warning;
|
||||
|
||||
static void timekeeping_check_update(struct timekeeper *tk, cycle_t offset)
|
||||
{
|
||||
|
|
@ -149,29 +137,30 @@ static void timekeeping_check_update(struct timekeeper *tk, cycle_t offset)
|
|||
}
|
||||
}
|
||||
|
||||
if (timekeeping_underflow_seen) {
|
||||
if (jiffies - timekeeping_last_warning > WARNING_FREQ) {
|
||||
if (tk->underflow_seen) {
|
||||
if (jiffies - tk->last_warning > WARNING_FREQ) {
|
||||
printk_deferred("WARNING: Underflow in clocksource '%s' observed, time update ignored.\n", name);
|
||||
printk_deferred(" Please report this, consider using a different clocksource, if possible.\n");
|
||||
printk_deferred(" Your kernel is probably still fine.\n");
|
||||
timekeeping_last_warning = jiffies;
|
||||
tk->last_warning = jiffies;
|
||||
}
|
||||
timekeeping_underflow_seen = 0;
|
||||
tk->underflow_seen = 0;
|
||||
}
|
||||
|
||||
if (timekeeping_overflow_seen) {
|
||||
if (jiffies - timekeeping_last_warning > WARNING_FREQ) {
|
||||
if (tk->overflow_seen) {
|
||||
if (jiffies - tk->last_warning > WARNING_FREQ) {
|
||||
printk_deferred("WARNING: Overflow in clocksource '%s' observed, time update capped.\n", name);
|
||||
printk_deferred(" Please report this, consider using a different clocksource, if possible.\n");
|
||||
printk_deferred(" Your kernel is probably still fine.\n");
|
||||
timekeeping_last_warning = jiffies;
|
||||
tk->last_warning = jiffies;
|
||||
}
|
||||
timekeeping_overflow_seen = 0;
|
||||
tk->overflow_seen = 0;
|
||||
}
|
||||
}
|
||||
|
||||
static inline cycle_t timekeeping_get_delta(struct tk_read_base *tkr)
|
||||
{
|
||||
struct timekeeper *tk = &tk_core.timekeeper;
|
||||
cycle_t now, last, mask, max, delta;
|
||||
unsigned int seq;
|
||||
|
||||
|
|
@ -197,13 +186,13 @@ static inline cycle_t timekeeping_get_delta(struct tk_read_base *tkr)
|
|||
* mask-relative negative values.
|
||||
*/
|
||||
if (unlikely((~delta & mask) < (mask >> 3))) {
|
||||
timekeeping_underflow_seen = 1;
|
||||
tk->underflow_seen = 1;
|
||||
delta = 0;
|
||||
}
|
||||
|
||||
/* Cap delta value to the max_cycles values to avoid mult overflows */
|
||||
if (unlikely(delta > max)) {
|
||||
timekeeping_overflow_seen = 1;
|
||||
tk->overflow_seen = 1;
|
||||
delta = tkr->clock->max_cycles;
|
||||
}
|
||||
|
||||
|
|
@ -550,6 +539,17 @@ int pvclock_gtod_unregister_notifier(struct notifier_block *nb)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(pvclock_gtod_unregister_notifier);
|
||||
|
||||
/*
|
||||
* tk_update_leap_state - helper to update the next_leap_ktime
|
||||
*/
|
||||
static inline void tk_update_leap_state(struct timekeeper *tk)
|
||||
{
|
||||
tk->next_leap_ktime = ntp_get_next_leap();
|
||||
if (tk->next_leap_ktime.tv64 != KTIME_MAX)
|
||||
/* Convert to monotonic time */
|
||||
tk->next_leap_ktime = ktime_sub(tk->next_leap_ktime, tk->offs_real);
|
||||
}
|
||||
|
||||
/*
|
||||
* Update the ktime_t based scalar nsec members of the timekeeper
|
||||
*/
|
||||
|
|
@ -591,17 +591,25 @@ static void timekeeping_update(struct timekeeper *tk, unsigned int action)
|
|||
ntp_clear();
|
||||
}
|
||||
|
||||
tk_update_leap_state(tk);
|
||||
tk_update_ktime_data(tk);
|
||||
|
||||
update_vsyscall(tk);
|
||||
update_pvclock_gtod(tk, action & TK_CLOCK_WAS_SET);
|
||||
|
||||
update_fast_timekeeper(&tk->tkr_mono, &tk_fast_mono);
|
||||
update_fast_timekeeper(&tk->tkr_raw, &tk_fast_raw);
|
||||
|
||||
if (action & TK_CLOCK_WAS_SET)
|
||||
tk->clock_was_set_seq++;
|
||||
/*
|
||||
* The mirroring of the data to the shadow-timekeeper needs
|
||||
* to happen last here to ensure we don't over-write the
|
||||
* timekeeper structure on the next update with stale data
|
||||
*/
|
||||
if (action & TK_MIRROR)
|
||||
memcpy(&shadow_timekeeper, &tk_core.timekeeper,
|
||||
sizeof(tk_core.timekeeper));
|
||||
|
||||
update_fast_timekeeper(&tk->tkr_mono, &tk_fast_mono);
|
||||
update_fast_timekeeper(&tk->tkr_raw, &tk_fast_raw);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -699,6 +707,23 @@ ktime_t ktime_get(void)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(ktime_get);
|
||||
|
||||
u32 ktime_get_resolution_ns(void)
|
||||
{
|
||||
struct timekeeper *tk = &tk_core.timekeeper;
|
||||
unsigned int seq;
|
||||
u32 nsecs;
|
||||
|
||||
WARN_ON(timekeeping_suspended);
|
||||
|
||||
do {
|
||||
seq = read_seqcount_begin(&tk_core.seq);
|
||||
nsecs = tk->tkr_mono.mult >> tk->tkr_mono.shift;
|
||||
} while (read_seqcount_retry(&tk_core.seq, seq));
|
||||
|
||||
return nsecs;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ktime_get_resolution_ns);
|
||||
|
||||
static ktime_t *offsets[TK_OFFS_MAX] = {
|
||||
[TK_OFFS_REAL] = &tk_core.timekeeper.offs_real,
|
||||
[TK_OFFS_BOOT] = &tk_core.timekeeper.offs_boot,
|
||||
|
|
@ -1179,28 +1204,20 @@ void __weak read_persistent_clock64(struct timespec64 *ts64)
|
|||
}
|
||||
|
||||
/**
|
||||
* read_boot_clock - Return time of the system start.
|
||||
* read_boot_clock64 - Return time of the system start.
|
||||
*
|
||||
* Weak dummy function for arches that do not yet support it.
|
||||
* Function to read the exact time the system has been started.
|
||||
* Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported.
|
||||
* Returns a timespec64 with tv_sec=0 and tv_nsec=0 if unsupported.
|
||||
*
|
||||
* XXX - Do be sure to remove it once all arches implement it.
|
||||
*/
|
||||
void __weak read_boot_clock(struct timespec *ts)
|
||||
void __weak read_boot_clock64(struct timespec64 *ts)
|
||||
{
|
||||
ts->tv_sec = 0;
|
||||
ts->tv_nsec = 0;
|
||||
}
|
||||
|
||||
void __weak read_boot_clock64(struct timespec64 *ts64)
|
||||
{
|
||||
struct timespec ts;
|
||||
|
||||
read_boot_clock(&ts);
|
||||
*ts64 = timespec_to_timespec64(ts);
|
||||
}
|
||||
|
||||
/* Flag for if timekeeping_resume() has injected sleeptime */
|
||||
static bool sleeptime_injected;
|
||||
|
||||
|
|
@ -1836,8 +1853,9 @@ void update_wall_time(void)
|
|||
* memcpy under the tk_core.seq against one before we start
|
||||
* updating.
|
||||
*/
|
||||
timekeeping_update(tk, clock_set);
|
||||
memcpy(real_tk, tk, sizeof(*tk));
|
||||
timekeeping_update(real_tk, clock_set);
|
||||
/* The memcpy must come last. Do not put anything here! */
|
||||
write_seqcount_end(&tk_core.seq);
|
||||
out:
|
||||
raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
|
||||
|
|
@ -1925,48 +1943,21 @@ void do_timer(unsigned long ticks)
|
|||
calc_global_load(ticks);
|
||||
}
|
||||
|
||||
/**
|
||||
* ktime_get_update_offsets_tick - hrtimer helper
|
||||
* @offs_real: pointer to storage for monotonic -> realtime offset
|
||||
* @offs_boot: pointer to storage for monotonic -> boottime offset
|
||||
* @offs_tai: pointer to storage for monotonic -> clock tai offset
|
||||
*
|
||||
* Returns monotonic time at last tick and various offsets
|
||||
*/
|
||||
ktime_t ktime_get_update_offsets_tick(ktime_t *offs_real, ktime_t *offs_boot,
|
||||
ktime_t *offs_tai)
|
||||
{
|
||||
struct timekeeper *tk = &tk_core.timekeeper;
|
||||
unsigned int seq;
|
||||
ktime_t base;
|
||||
u64 nsecs;
|
||||
|
||||
do {
|
||||
seq = read_seqcount_begin(&tk_core.seq);
|
||||
|
||||
base = tk->tkr_mono.base;
|
||||
nsecs = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift;
|
||||
|
||||
*offs_real = tk->offs_real;
|
||||
*offs_boot = tk->offs_boot;
|
||||
*offs_tai = tk->offs_tai;
|
||||
} while (read_seqcount_retry(&tk_core.seq, seq));
|
||||
|
||||
return ktime_add_ns(base, nsecs);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HIGH_RES_TIMERS
|
||||
/**
|
||||
* ktime_get_update_offsets_now - hrtimer helper
|
||||
* @cwsseq: pointer to check and store the clock was set sequence number
|
||||
* @offs_real: pointer to storage for monotonic -> realtime offset
|
||||
* @offs_boot: pointer to storage for monotonic -> boottime offset
|
||||
* @offs_tai: pointer to storage for monotonic -> clock tai offset
|
||||
*
|
||||
* Returns current monotonic time and updates the offsets
|
||||
* Returns current monotonic time and updates the offsets if the
|
||||
* sequence number in @cwsseq and timekeeper.clock_was_set_seq are
|
||||
* different.
|
||||
*
|
||||
* Called from hrtimer_interrupt() or retrigger_next_event()
|
||||
*/
|
||||
ktime_t ktime_get_update_offsets_now(ktime_t *offs_real, ktime_t *offs_boot,
|
||||
ktime_t *offs_tai)
|
||||
ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq, ktime_t *offs_real,
|
||||
ktime_t *offs_boot, ktime_t *offs_tai)
|
||||
{
|
||||
struct timekeeper *tk = &tk_core.timekeeper;
|
||||
unsigned int seq;
|
||||
|
|
@ -1978,15 +1969,23 @@ ktime_t ktime_get_update_offsets_now(ktime_t *offs_real, ktime_t *offs_boot,
|
|||
|
||||
base = tk->tkr_mono.base;
|
||||
nsecs = timekeeping_get_ns(&tk->tkr_mono);
|
||||
base = ktime_add_ns(base, nsecs);
|
||||
|
||||
if (*cwsseq != tk->clock_was_set_seq) {
|
||||
*cwsseq = tk->clock_was_set_seq;
|
||||
*offs_real = tk->offs_real;
|
||||
*offs_boot = tk->offs_boot;
|
||||
*offs_tai = tk->offs_tai;
|
||||
}
|
||||
|
||||
/* Handle leapsecond insertion adjustments */
|
||||
if (unlikely(base.tv64 >= tk->next_leap_ktime.tv64))
|
||||
*offs_real = ktime_sub(tk->offs_real, ktime_set(1, 0));
|
||||
|
||||
*offs_real = tk->offs_real;
|
||||
*offs_boot = tk->offs_boot;
|
||||
*offs_tai = tk->offs_tai;
|
||||
} while (read_seqcount_retry(&tk_core.seq, seq));
|
||||
|
||||
return ktime_add_ns(base, nsecs);
|
||||
return base;
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* do_adjtimex() - Accessor function to NTP __do_adjtimex function
|
||||
|
|
@ -2027,6 +2026,8 @@ int do_adjtimex(struct timex *txc)
|
|||
__timekeeping_set_tai_offset(tk, tai);
|
||||
timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
|
||||
}
|
||||
tk_update_leap_state(tk);
|
||||
|
||||
write_seqcount_end(&tk_core.seq);
|
||||
raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
|
||||
|
||||
|
|
|
|||
|
|
@ -3,19 +3,16 @@
|
|||
/*
|
||||
* Internal interfaces for kernel/time/
|
||||
*/
|
||||
extern ktime_t ktime_get_update_offsets_tick(ktime_t *offs_real,
|
||||
ktime_t *offs_boot,
|
||||
ktime_t *offs_tai);
|
||||
extern ktime_t ktime_get_update_offsets_now(ktime_t *offs_real,
|
||||
ktime_t *offs_boot,
|
||||
ktime_t *offs_tai);
|
||||
extern ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq,
|
||||
ktime_t *offs_real,
|
||||
ktime_t *offs_boot,
|
||||
ktime_t *offs_tai);
|
||||
|
||||
extern int timekeeping_valid_for_hres(void);
|
||||
extern u64 timekeeping_max_deferment(void);
|
||||
extern int timekeeping_inject_offset(struct timespec *ts);
|
||||
extern s32 timekeeping_get_tai_offset(void);
|
||||
extern void timekeeping_set_tai_offset(s32 tai_offset);
|
||||
extern void timekeeping_clocktai(struct timespec *ts);
|
||||
extern int timekeeping_suspend(void);
|
||||
extern void timekeeping_resume(void);
|
||||
|
||||
|
|
|
|||
|
|
@ -49,6 +49,8 @@
|
|||
#include <asm/timex.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
#include "tick-internal.h"
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include <trace/events/timer.h>
|
||||
|
||||
|
|
@ -434,7 +436,7 @@ static void internal_add_timer(struct tvec_base *base, struct timer_list *timer)
|
|||
* require special care against races with idle_cpu(), lets deal
|
||||
* with that later.
|
||||
*/
|
||||
if (!tbase_get_deferrable(base) || tick_nohz_full_cpu(base->cpu))
|
||||
if (!tbase_get_deferrable(timer->base) || tick_nohz_full_cpu(base->cpu))
|
||||
wake_up_nohz_cpu(base->cpu);
|
||||
}
|
||||
|
||||
|
|
@ -648,7 +650,7 @@ static inline void
|
|||
debug_activate(struct timer_list *timer, unsigned long expires)
|
||||
{
|
||||
debug_timer_activate(timer);
|
||||
trace_timer_start(timer, expires);
|
||||
trace_timer_start(timer, expires, tbase_get_deferrable(timer->base));
|
||||
}
|
||||
|
||||
static inline void debug_deactivate(struct timer_list *timer)
|
||||
|
|
@ -1311,54 +1313,48 @@ cascade:
|
|||
* Check, if the next hrtimer event is before the next timer wheel
|
||||
* event:
|
||||
*/
|
||||
static unsigned long cmp_next_hrtimer_event(unsigned long now,
|
||||
unsigned long expires)
|
||||
static u64 cmp_next_hrtimer_event(u64 basem, u64 expires)
|
||||
{
|
||||
ktime_t hr_delta = hrtimer_get_next_event();
|
||||
struct timespec tsdelta;
|
||||
unsigned long delta;
|
||||
u64 nextevt = hrtimer_get_next_event();
|
||||
|
||||
if (hr_delta.tv64 == KTIME_MAX)
|
||||
/*
|
||||
* If high resolution timers are enabled
|
||||
* hrtimer_get_next_event() returns KTIME_MAX.
|
||||
*/
|
||||
if (expires <= nextevt)
|
||||
return expires;
|
||||
|
||||
/*
|
||||
* Expired timer available, let it expire in the next tick
|
||||
* If the next timer is already expired, return the tick base
|
||||
* time so the tick is fired immediately.
|
||||
*/
|
||||
if (hr_delta.tv64 <= 0)
|
||||
return now + 1;
|
||||
|
||||
tsdelta = ktime_to_timespec(hr_delta);
|
||||
delta = timespec_to_jiffies(&tsdelta);
|
||||
if (nextevt <= basem)
|
||||
return basem;
|
||||
|
||||
/*
|
||||
* Limit the delta to the max value, which is checked in
|
||||
* tick_nohz_stop_sched_tick():
|
||||
* Round up to the next jiffie. High resolution timers are
|
||||
* off, so the hrtimers are expired in the tick and we need to
|
||||
* make sure that this tick really expires the timer to avoid
|
||||
* a ping pong of the nohz stop code.
|
||||
*
|
||||
* Use DIV_ROUND_UP_ULL to prevent gcc calling __divdi3
|
||||
*/
|
||||
if (delta > NEXT_TIMER_MAX_DELTA)
|
||||
delta = NEXT_TIMER_MAX_DELTA;
|
||||
|
||||
/*
|
||||
* Take rounding errors in to account and make sure, that it
|
||||
* expires in the next tick. Otherwise we go into an endless
|
||||
* ping pong due to tick_nohz_stop_sched_tick() retriggering
|
||||
* the timer softirq
|
||||
*/
|
||||
if (delta < 1)
|
||||
delta = 1;
|
||||
now += delta;
|
||||
if (time_before(now, expires))
|
||||
return now;
|
||||
return expires;
|
||||
return DIV_ROUND_UP_ULL(nextevt, TICK_NSEC) * TICK_NSEC;
|
||||
}
|
||||
|
||||
/**
|
||||
* get_next_timer_interrupt - return the jiffy of the next pending timer
|
||||
* @now: current time (in jiffies)
|
||||
* get_next_timer_interrupt - return the time (clock mono) of the next timer
|
||||
* @basej: base time jiffies
|
||||
* @basem: base time clock monotonic
|
||||
*
|
||||
* Returns the tick aligned clock monotonic time of the next pending
|
||||
* timer or KTIME_MAX if no timer is pending.
|
||||
*/
|
||||
unsigned long get_next_timer_interrupt(unsigned long now)
|
||||
u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
|
||||
{
|
||||
struct tvec_base *base = __this_cpu_read(tvec_bases);
|
||||
unsigned long expires = now + NEXT_TIMER_MAX_DELTA;
|
||||
u64 expires = KTIME_MAX;
|
||||
unsigned long nextevt;
|
||||
|
||||
/*
|
||||
* Pretend that there is no timer pending if the cpu is offline.
|
||||
|
|
@ -1371,14 +1367,15 @@ unsigned long get_next_timer_interrupt(unsigned long now)
|
|||
if (base->active_timers) {
|
||||
if (time_before_eq(base->next_timer, base->timer_jiffies))
|
||||
base->next_timer = __next_timer_interrupt(base);
|
||||
expires = base->next_timer;
|
||||
nextevt = base->next_timer;
|
||||
if (time_before_eq(nextevt, basej))
|
||||
expires = basem;
|
||||
else
|
||||
expires = basem + (nextevt - basej) * TICK_NSEC;
|
||||
}
|
||||
spin_unlock(&base->lock);
|
||||
|
||||
if (time_before_eq(expires, now))
|
||||
return now;
|
||||
|
||||
return cmp_next_hrtimer_event(now, expires);
|
||||
return cmp_next_hrtimer_event(basem, expires);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
@ -1409,8 +1406,6 @@ static void run_timer_softirq(struct softirq_action *h)
|
|||
{
|
||||
struct tvec_base *base = __this_cpu_read(tvec_bases);
|
||||
|
||||
hrtimer_run_pending();
|
||||
|
||||
if (time_after_eq(jiffies, base->timer_jiffies))
|
||||
__run_timers(base);
|
||||
}
|
||||
|
|
@ -1697,14 +1692,14 @@ unsigned long msleep_interruptible(unsigned int msecs)
|
|||
|
||||
EXPORT_SYMBOL(msleep_interruptible);
|
||||
|
||||
static int __sched do_usleep_range(unsigned long min, unsigned long max)
|
||||
static void __sched do_usleep_range(unsigned long min, unsigned long max)
|
||||
{
|
||||
ktime_t kmin;
|
||||
unsigned long delta;
|
||||
|
||||
kmin = ktime_set(0, min * NSEC_PER_USEC);
|
||||
delta = (max - min) * NSEC_PER_USEC;
|
||||
return schedule_hrtimeout_range(&kmin, delta, HRTIMER_MODE_REL);
|
||||
schedule_hrtimeout_range(&kmin, delta, HRTIMER_MODE_REL);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -1712,7 +1707,7 @@ static int __sched do_usleep_range(unsigned long min, unsigned long max)
|
|||
* @min: Minimum time in usecs to sleep
|
||||
* @max: Maximum time in usecs to sleep
|
||||
*/
|
||||
void usleep_range(unsigned long min, unsigned long max)
|
||||
void __sched usleep_range(unsigned long min, unsigned long max)
|
||||
{
|
||||
__set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
do_usleep_range(min, max);
|
||||
|
|
|
|||
|
|
@ -35,13 +35,20 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
|
|||
* This allows printing both to /proc/timer_list and
|
||||
* to the console (on SysRq-Q):
|
||||
*/
|
||||
#define SEQ_printf(m, x...) \
|
||||
do { \
|
||||
if (m) \
|
||||
seq_printf(m, x); \
|
||||
else \
|
||||
printk(x); \
|
||||
} while (0)
|
||||
__printf(2, 3)
|
||||
static void SEQ_printf(struct seq_file *m, const char *fmt, ...)
|
||||
{
|
||||
va_list args;
|
||||
|
||||
va_start(args, fmt);
|
||||
|
||||
if (m)
|
||||
seq_vprintf(m, fmt, args);
|
||||
else
|
||||
vprintk(fmt, args);
|
||||
|
||||
va_end(args);
|
||||
}
|
||||
|
||||
static void print_name_offset(struct seq_file *m, void *sym)
|
||||
{
|
||||
|
|
@ -120,10 +127,10 @@ static void
|
|||
print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
|
||||
{
|
||||
SEQ_printf(m, " .base: %pK\n", base);
|
||||
SEQ_printf(m, " .index: %d\n",
|
||||
base->index);
|
||||
SEQ_printf(m, " .resolution: %Lu nsecs\n",
|
||||
(unsigned long long)ktime_to_ns(base->resolution));
|
||||
SEQ_printf(m, " .index: %d\n", base->index);
|
||||
|
||||
SEQ_printf(m, " .resolution: %u nsecs\n", (unsigned) hrtimer_resolution);
|
||||
|
||||
SEQ_printf(m, " .get_time: ");
|
||||
print_name_offset(m, base->get_time);
|
||||
SEQ_printf(m, "\n");
|
||||
|
|
@ -158,7 +165,7 @@ static void print_cpu(struct seq_file *m, int cpu, u64 now)
|
|||
P(nr_events);
|
||||
P(nr_retries);
|
||||
P(nr_hangs);
|
||||
P_ns(max_hang_time);
|
||||
P(max_hang_time);
|
||||
#endif
|
||||
#undef P
|
||||
#undef P_ns
|
||||
|
|
@ -184,7 +191,7 @@ static void print_cpu(struct seq_file *m, int cpu, u64 now)
|
|||
P_ns(idle_sleeptime);
|
||||
P_ns(iowait_sleeptime);
|
||||
P(last_jiffies);
|
||||
P(next_jiffies);
|
||||
P(next_timer);
|
||||
P_ns(idle_expires);
|
||||
SEQ_printf(m, "jiffies: %Lu\n",
|
||||
(unsigned long long)jiffies);
|
||||
|
|
@ -251,6 +258,12 @@ print_tickdevice(struct seq_file *m, struct tick_device *td, int cpu)
|
|||
SEQ_printf(m, "\n");
|
||||
}
|
||||
|
||||
if (dev->set_state_oneshot_stopped) {
|
||||
SEQ_printf(m, " oneshot stopped: ");
|
||||
print_name_offset(m, dev->set_state_oneshot_stopped);
|
||||
SEQ_printf(m, "\n");
|
||||
}
|
||||
|
||||
if (dev->tick_resume) {
|
||||
SEQ_printf(m, " resume: ");
|
||||
print_name_offset(m, dev->tick_resume);
|
||||
|
|
@ -269,11 +282,11 @@ static void timer_list_show_tickdevices_header(struct seq_file *m)
|
|||
{
|
||||
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
|
||||
print_tickdevice(m, tick_get_broadcast_device(), -1);
|
||||
SEQ_printf(m, "tick_broadcast_mask: %08lx\n",
|
||||
cpumask_bits(tick_get_broadcast_mask())[0]);
|
||||
SEQ_printf(m, "tick_broadcast_mask: %*pb\n",
|
||||
cpumask_pr_args(tick_get_broadcast_mask()));
|
||||
#ifdef CONFIG_TICK_ONESHOT
|
||||
SEQ_printf(m, "tick_broadcast_oneshot_mask: %08lx\n",
|
||||
cpumask_bits(tick_get_broadcast_oneshot_mask())[0]);
|
||||
SEQ_printf(m, "tick_broadcast_oneshot_mask: %*pb\n",
|
||||
cpumask_pr_args(tick_get_broadcast_oneshot_mask()));
|
||||
#endif
|
||||
SEQ_printf(m, "\n");
|
||||
#endif
|
||||
|
|
@ -282,7 +295,7 @@ static void timer_list_show_tickdevices_header(struct seq_file *m)
|
|||
|
||||
static inline void timer_list_header(struct seq_file *m, u64 now)
|
||||
{
|
||||
SEQ_printf(m, "Timer List Version: v0.7\n");
|
||||
SEQ_printf(m, "Timer List Version: v0.8\n");
|
||||
SEQ_printf(m, "HRTIMER_MAX_CLOCK_BASES: %d\n", HRTIMER_MAX_CLOCK_BASES);
|
||||
SEQ_printf(m, "now at %Ld nsecs\n", (unsigned long long)now);
|
||||
SEQ_printf(m, "\n");
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue