diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h index 40e9b5a12732db0b9413b93a136e15215f402232..19abb6c3eb73f876c6054805eee015921b96e1b3 100644 --- a/include/trace/events/timer.h +++ b/include/trace/events/timer.h @@ -368,8 +368,7 @@ TRACE_EVENT(itimer_expire, tick_dep_name(PERF_EVENTS) \ tick_dep_name(SCHED) \ tick_dep_name(CLOCK_UNSTABLE) \ - tick_dep_name(RCU) \ - tick_dep_name_end(RCU_EXP) + tick_dep_name_end(RCU) #undef tick_dep_name #undef tick_dep_mask_name diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index a9530e866e5f1b755e5798a5de68577b58a31b4d..36d7464c8962543d6b29fa43f3c70d28c3c41362 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c @@ -331,7 +331,7 @@ static void tick_handle_periodic_broadcast(struct clock_event_device *dev) bc_local = tick_do_periodic_broadcast(); if (clockevent_state_oneshot(dev)) { - ktime_t next = ktime_add_ns(dev->next_event, TICK_NSEC); + ktime_t next = ktime_add(dev->next_event, tick_period); clockevents_program_event(dev, next, true); } diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index 2b7448ae5b4785c0aafbedd271c4de9e6d0a7ba8..6c9c342dd0e53a70970b7daf1218af0917a24985 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c @@ -30,6 +30,7 @@ DEFINE_PER_CPU(struct tick_device, tick_cpu_device); * Tick next event: keeps track of the tick time */ ktime_t tick_next_period; +ktime_t tick_period; /* * tick_do_timer_cpu is a timer core internal variable which holds the CPU NR @@ -87,7 +88,7 @@ static void tick_periodic(int cpu) write_seqcount_begin(&jiffies_seq); /* Keep track of the next tick event */ - tick_next_period = ktime_add_ns(tick_next_period, TICK_NSEC); + tick_next_period = ktime_add(tick_next_period, tick_period); do_timer(1); write_seqcount_end(&jiffies_seq); @@ -126,7 +127,7 @@ void tick_handle_periodic(struct clock_event_device *dev) * Setup the next period for devices, which do not have * periodic mode: */ - next = ktime_add_ns(next, TICK_NSEC); + next = ktime_add(next, tick_period); if (!clockevents_program_event(dev, next, false)) return; @@ -172,7 +173,7 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast) for (;;) { if (!clockevents_program_event(dev, next, false)) return; - next = ktime_add_ns(next, TICK_NSEC); + next = ktime_add(next, tick_period); } } } @@ -216,19 +217,10 @@ static void tick_setup_device(struct tick_device *td, * this cpu: */ if (tick_do_timer_cpu == TICK_DO_TIMER_BOOT) { - ktime_t next_p; - u32 rem; - tick_do_timer_cpu = cpu; - next_p = ktime_get(); - div_u64_rem(next_p, TICK_NSEC, &rem); - if (rem) { - next_p -= rem; - next_p += TICK_NSEC; - } - - tick_next_period = next_p; + tick_next_period = ktime_get(); + tick_period = NSEC_PER_SEC / HZ; #ifdef CONFIG_NO_HZ_FULL /* * The boot CPU may be nohz_full, in which case set diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h index e61c1244e7d464c0fdc6c2facb664bf33ac5748a..5294f5b1f955015cc41cb8443716eaed1b3e1166 100644 --- a/kernel/time/tick-internal.h +++ b/kernel/time/tick-internal.h @@ -15,6 +15,7 @@ DECLARE_PER_CPU(struct tick_device, tick_cpu_device); extern ktime_t tick_next_period; +extern ktime_t tick_period; extern int tick_do_timer_cpu __read_mostly; extern void tick_setup_periodic(struct clock_event_device *dev, int broadcast); diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 17dc3f53efef86c8cafa2159751e4e52fa16c31d..e4e0d032126bc3daab47e6a7cde9d71181cab925 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -53,67 +53,49 @@ static ktime_t last_jiffies_update; */ static void tick_do_update_jiffies64(ktime_t now) { - unsigned long ticks = 1; + unsigned long ticks = 0; ktime_t delta; /* - * Do a quick check without holding jiffies_lock. The READ_ONCE() - * pairs with the update done later in this function. - * - * This is also an intentional data race which is even safe on - * 32bit in theory. If there is a concurrent update then the check - * might give a random answer. It does not matter because if it - * returns then the concurrent update is already taking care, if it - * falls through then it will pointlessly contend on jiffies_lock. - * - * Though there is one nasty case on 32bit due to store tearing of - * the 64bit value. If the first 32bit store makes the quick check - * return on all other CPUs and the writing CPU context gets - * delayed to complete the second store (scheduled out on virt) - * then jiffies can become stale for up to ~2^32 nanoseconds - * without noticing. After that point all CPUs will wait for - * jiffies lock. - * - * OTOH, this is not any different than the situation with NOHZ=off - * where one CPU is responsible for updating jiffies and - * timekeeping. If that CPU goes out for lunch then all other CPUs - * will operate on stale jiffies until it decides to come back. + * Do a quick check without holding jiffies_lock: + * The READ_ONCE() pairs with two updates done later in this function. */ - if (ktime_before(now, READ_ONCE(tick_next_period))) + delta = ktime_sub(now, READ_ONCE(last_jiffies_update)); + if (delta < tick_period) return; /* Reevaluate with jiffies_lock held */ raw_spin_lock(&jiffies_lock); - if (ktime_before(now, tick_next_period)) { - raw_spin_unlock(&jiffies_lock); - return; - } - write_seqcount_begin(&jiffies_seq); - delta = ktime_sub(now, tick_next_period); - if (unlikely(delta >= TICK_NSEC)) { - /* Slow path for long idle sleep times */ - s64 incr = TICK_NSEC; + delta = ktime_sub(now, last_jiffies_update); + if (delta >= tick_period) { - ticks += ktime_divns(delta, incr); + delta = ktime_sub(delta, tick_period); + /* Pairs with the lockless read in this function. */ + WRITE_ONCE(last_jiffies_update, + ktime_add(last_jiffies_update, tick_period)); - last_jiffies_update = ktime_add_ns(last_jiffies_update, - incr * ticks); - } else { - last_jiffies_update = ktime_add_ns(last_jiffies_update, - TICK_NSEC); - } + /* Slow path for long timeouts */ + if (unlikely(delta >= tick_period)) { + s64 incr = ktime_to_ns(tick_period); - do_timer(ticks); + ticks = ktime_divns(delta, incr); - /* - * Keep the tick_next_period variable up to date. WRITE_ONCE() - * pairs with the READ_ONCE() in the lockless quick check above. - */ - WRITE_ONCE(tick_next_period, - ktime_add_ns(last_jiffies_update, TICK_NSEC)); + /* Pairs with the lockless read in this function. */ + WRITE_ONCE(last_jiffies_update, + ktime_add_ns(last_jiffies_update, + incr * ticks)); + } + do_timer(++ticks); + /* Keep the tick_next_period variable up to date */ + tick_next_period = ktime_add(last_jiffies_update, tick_period); + } else { + write_seqcount_end(&jiffies_seq); + raw_spin_unlock(&jiffies_lock); + return; + } write_seqcount_end(&jiffies_seq); raw_spin_unlock(&jiffies_lock); update_wall_time(); @@ -231,11 +213,6 @@ static bool check_tick_dependency(atomic_t *dep) return true; } - if (val & TICK_DEP_MASK_RCU_EXP) { - trace_tick_stop(0, TICK_DEP_MASK_RCU_EXP); - return true; - } - return false; } @@ -688,7 +665,7 @@ static void tick_nohz_restart(struct tick_sched *ts, ktime_t now) hrtimer_set_expires(&ts->sched_timer, ts->last_tick); /* Forward the time to expire in the future */ - hrtimer_forward(&ts->sched_timer, now, TICK_NSEC); + hrtimer_forward(&ts->sched_timer, now, tick_period); if (ts->nohz_mode == NOHZ_MODE_HIGHRES) { hrtimer_start_expires(&ts->sched_timer, @@ -1250,7 +1227,7 @@ static void tick_nohz_handler(struct clock_event_device *dev) if (unlikely(ts->tick_stopped)) return; - hrtimer_forward(&ts->sched_timer, now, TICK_NSEC); + hrtimer_forward(&ts->sched_timer, now, tick_period); tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1); } @@ -1287,7 +1264,7 @@ static void tick_nohz_switch_to_nohz(void) next = tick_init_jiffy_update(); hrtimer_set_expires(&ts->sched_timer, next); - hrtimer_forward_now(&ts->sched_timer, TICK_NSEC); + hrtimer_forward_now(&ts->sched_timer, tick_period); tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1); tick_nohz_activate(ts, NOHZ_MODE_LOWRES); } @@ -1353,7 +1330,7 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer) if (unlikely(ts->tick_stopped)) return HRTIMER_NORESTART; - hrtimer_forward(timer, now, TICK_NSEC); + hrtimer_forward(timer, now, tick_period); return HRTIMER_RESTART; } @@ -1387,13 +1364,13 @@ void tick_setup_sched_timer(void) /* Offset the tick to avert jiffies_lock contention. */ if (sched_skew_tick) { - u64 offset = TICK_NSEC >> 1; + u64 offset = ktime_to_ns(tick_period) >> 1; do_div(offset, num_possible_cpus()); offset *= smp_processor_id(); hrtimer_add_expires_ns(&ts->sched_timer, offset); } - hrtimer_forward(&ts->sched_timer, now, TICK_NSEC); + hrtimer_forward(&ts->sched_timer, now, tick_period); hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED_HARD); tick_nohz_activate(ts, NOHZ_MODE_HIGHRES); }