From f1697318f4c7b263434140cb9a7dd4773ed65515 Mon Sep 17 00:00:00 2001 From: "yang.yang29@zte.com.cn" Date: Fri, 2 Jun 2023 18:46:26 +0800 Subject: [PATCH 1/5] kernel/sched/core: add migrate_disable() commit a73bbe4b98f56b8c69e3932df7e083d1d94f5109 upstream. --- include/linux/preempt.h | 23 +++++++ include/linux/sched.h | 7 +++ include/linux/smp.h | 3 + kernel/sched/core.c | 130 +++++++++++++++++++++++++++++++++++++++- kernel/sched/debug.c | 4 ++ 5 files changed, 165 insertions(+), 2 deletions(-) diff --git a/include/linux/preempt.h b/include/linux/preempt.h index f10333a2b676..3548856e1869 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -185,6 +185,22 @@ do { \ #define preemptible() (preempt_count() == 0 && !irqs_disabled()) +#ifdef CONFIG_SMP + +extern void migrate_disable(void); +extern void migrate_enable(void); + +int __migrate_disabled(struct task_struct *p); + +#else +#define migrate_disable() barrier() +#define migrate_enable() barrier() +static inline int __migrate_disabled(struct task_struct *p) +{ + return 0; +} +#endif + #ifdef CONFIG_PREEMPT #define preempt_enable() \ do { \ @@ -253,6 +269,13 @@ do { \ #define preempt_enable_notrace() barrier() #define preemptible() 0 +#define migrate_disable() barrier() +#define migrate_enable() barrier() + +static inline int __migrate_disabled(struct task_struct *p) +{ + return 0; +} #endif /* CONFIG_PREEMPT_COUNT */ #ifdef MODULE diff --git a/include/linux/sched.h b/include/linux/sched.h index e12457d73520..999f6c2572bd 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -747,6 +747,13 @@ struct task_struct { int nr_cpus_allowed; const cpumask_t *cpus_ptr; cpumask_t cpus_mask; +#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP) + int migrate_disable; + int migrate_disable_update; +# ifdef CONFIG_SCHED_DEBUG + int migrate_disable_atomic; +# endif +#endif #ifdef CONFIG_PREEMPT_RCU int rcu_read_lock_nesting; diff --git a/include/linux/smp.h b/include/linux/smp.h index 9fb239e12b82..5801e516ba63 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -202,6 +202,9 @@ static inline int get_boot_cpu_id(void) #define get_cpu() ({ preempt_disable(); smp_processor_id(); }) #define put_cpu() preempt_enable() +#define get_cpu_light() ({ migrate_disable(); smp_processor_id(); }) +#define put_cpu_light() migrate_enable() + /* * Callback to arch code if there's nosmp or maxcpus=0 on the * boot command line: diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 9098f6e3ea61..96ff0a2a61ff 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1033,7 +1033,15 @@ void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_ma p->nr_cpus_allowed = cpumask_weight(new_mask); } -void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) +#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP) +int __migrate_disabled(struct task_struct *p) +{ + return p->migrate_disable; +} +#endif + +static void __do_set_cpus_allowed_tail(struct task_struct *p, + const struct cpumask *new_mask) { struct rq *rq = task_rq(p); bool queued, running; @@ -1062,6 +1070,20 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) set_curr_task(rq, p); } +void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) +{ +#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP) + if (__migrate_disabled(p)) { + lockdep_assert_held(&p->pi_lock); + + cpumask_copy(&p->cpus_mask, new_mask); + p->migrate_disable_update = 1; + return; + } +#endif + __do_set_cpus_allowed_tail(p, new_mask); +} + /* * Change a given task's CPU affinity. Migrate the thread to a * proper CPU and schedule it away if the CPU it's executing on @@ -1121,9 +1143,16 @@ static int __set_cpus_allowed_ptr(struct task_struct *p, } /* Can the task run on the task's current CPU? If so, we're done */ - if (cpumask_test_cpu(task_cpu(p), new_mask)) + if (cpumask_test_cpu(task_cpu(p), new_mask) || __migrate_disabled(p)) goto out; +#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP) + if (__migrate_disabled(p)) { + p->migrate_disable_update = 1; + goto out; + } +#endif + if (task_running(rq, p) || p->state == TASK_WAKING) { struct migration_arg arg = { p, dest_cpu }; /* Need help from migration thread: drop lock and wait. */ @@ -7395,6 +7424,103 @@ const u32 sched_prio_to_wmult[40] = { #undef CREATE_TRACE_POINTS +#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP) + +void migrate_disable(void) +{ + struct task_struct *p = current; + + if (in_atomic() || irqs_disabled()) { +#ifdef CONFIG_SCHED_DEBUG + p->migrate_disable_atomic++; +#endif + return; + } +#ifdef CONFIG_SCHED_DEBUG + WARN_ON_ONCE(p->migrate_disable_atomic); +#endif + + if (p->migrate_disable) { + p->migrate_disable++; + return; + } + + preempt_disable(); + p->migrate_disable = 1; + + p->cpus_ptr = cpumask_of(smp_processor_id()); + p->nr_cpus_allowed = 1; + + preempt_enable(); +} +EXPORT_SYMBOL(migrate_disable); + +void migrate_enable(void) +{ + struct task_struct *p = current; + + if (in_atomic() || irqs_disabled()) { +#ifdef CONFIG_SCHED_DEBUG + p->migrate_disable_atomic--; +#endif + return; + } + +#ifdef CONFIG_SCHED_DEBUG + WARN_ON_ONCE(p->migrate_disable_atomic); +#endif + + WARN_ON_ONCE(p->migrate_disable <= 0); + if (p->migrate_disable > 1) { + p->migrate_disable--; + return; + } + + preempt_disable(); + + p->cpus_ptr = &p->cpus_mask; + p->nr_cpus_allowed = cpumask_weight(&p->cpus_mask); + p->migrate_disable = 0; + + if (p->migrate_disable_update) { + struct rq *rq; + struct rq_flags rf; + + rq = task_rq_lock(p, &rf); + update_rq_clock(rq); + + __do_set_cpus_allowed_tail(p, &p->cpus_mask); + task_rq_unlock(rq, p, &rf); + + p->migrate_disable_update = 0; + + WARN_ON(smp_processor_id() != task_cpu(p)); + if (!cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) { + const struct cpumask *cpu_valid_mask = cpu_active_mask; + struct migration_arg arg; + unsigned int dest_cpu; + + if (p->flags & PF_KTHREAD) { + /* + * Kernel threads are allowed on online && !active CPUs + */ + cpu_valid_mask = cpu_online_mask; + } + dest_cpu = cpumask_any_and(cpu_valid_mask, &p->cpus_mask); + arg.task = p; + arg.dest_cpu = dest_cpu; + + preempt_enable(); + stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg); + tlb_migrate_finish(p->mm); + return; + } + } + preempt_enable(); +} +EXPORT_SYMBOL(migrate_enable); +#endif + void sched_task_release(struct task_struct *p) { } diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index fd463da512b5..057c610f0004 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -991,6 +991,10 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m) P(dl.runtime); P(dl.deadline); } +#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP) + P(migrate_disable); +#endif + P(nr_cpus_allowed); #undef PN_SCHEDSTAT #undef PN #undef __PN -- Gitee From 73c8dbc5f6fe8c3bba6eed945a6c20978d512504 Mon Sep 17 00:00:00 2001 From: "yang.yang29@zte.com.cn" Date: Fri, 2 Jun 2023 18:48:56 +0800 Subject: [PATCH 2/5] sched/migrate_disable: Add export_symbol_gpl for __migrate_disabled commit ac059eda6d9782325d4d8432b5bee7638eafd90a upstream. Jonathan reported that lttng/modules can't use __migrate_disabled(). This function is only used by sched/core itself and the tracing infrastructure to report the migrate counter (lttng does probably the same). Since the rework migrate_disable() it moved from sched.h to preempt.h and is became an exported function instead of a "static inline" due to the header recursion of preempt vs sched. Since the compiler inlines the function for sched/core usage, add a EXPORT_SYMBOL_GPL to allow the module/LTTNG usage. Reported-by: Jonathan Rajott Signed-off-by: Sebastian Andrzej Siewior --- kernel/sched/core.c | 1 + 1 file changed, 1 insertion(+) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 96ff0a2a61ff..f3709be5f5a0 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1038,6 +1038,7 @@ int __migrate_disabled(struct task_struct *p) { return p->migrate_disable; } +EXPORT_SYMBOL_GPL(__migrate_disabled); #endif static void __do_set_cpus_allowed_tail(struct task_struct *p, -- Gitee From 1b0ec95f316f965b26f1eab3d4e547c278e5206b Mon Sep 17 00:00:00 2001 From: "yang.yang29@zte.com.cn" Date: Fri, 2 Jun 2023 19:08:06 +0800 Subject: [PATCH 3/5] arm: at91: do not disable/enable clocks in a row commit b4f795e23aa449ae7116c445646a5ee51f90c022 upstream. Currently the driver will disable the clock and enable it one line later if it is switching from periodic mode into one shot. This can be avoided and causes a needless warning on -RT. Signed-off-by: Sebastian Andrzej Siewior --- drivers/clocksource/tcb_clksrc.c | 33 ++++++++++++++++++++++++++++---- 1 file changed, 29 insertions(+), 4 deletions(-) diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c index 43f4d5c4d6fa..de6baf564dfe 100644 --- a/drivers/clocksource/tcb_clksrc.c +++ b/drivers/clocksource/tcb_clksrc.c @@ -126,6 +126,7 @@ static struct clocksource clksrc = { struct tc_clkevt_device { struct clock_event_device clkevt; struct clk *clk; + bool clk_enabled; void __iomem *regs; }; @@ -143,6 +144,24 @@ static struct tc_clkevt_device *to_tc_clkevt(struct clock_event_device *clkevt) */ static u32 timer_clock; +static void tc_clk_disable(struct clock_event_device *d) +{ + struct tc_clkevt_device *tcd = to_tc_clkevt(d); + + clk_disable(tcd->clk); + tcd->clk_enabled = false; +} + +static void tc_clk_enable(struct clock_event_device *d) +{ + struct tc_clkevt_device *tcd = to_tc_clkevt(d); + + if (tcd->clk_enabled) + return; + clk_enable(tcd->clk); + tcd->clk_enabled = true; +} + static int tc_shutdown(struct clock_event_device *d) { struct tc_clkevt_device *tcd = to_tc_clkevt(d); @@ -150,8 +169,14 @@ static int tc_shutdown(struct clock_event_device *d) writel(0xff, regs + ATMEL_TC_REG(2, IDR)); writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR)); + return 0; +} + +static int tc_shutdown_clk_off(struct clock_event_device *d) +{ + tc_shutdown(d); if (!clockevent_state_detached(d)) - clk_disable(tcd->clk); + tc_clk_disable(d); return 0; } @@ -164,7 +189,7 @@ static int tc_set_oneshot(struct clock_event_device *d) if (clockevent_state_oneshot(d) || clockevent_state_periodic(d)) tc_shutdown(d); - clk_enable(tcd->clk); + tc_clk_enable(d); /* slow clock, count up to RC, then irq and stop */ writel(timer_clock | ATMEL_TC_CPCSTOP | ATMEL_TC_WAVE | @@ -186,7 +211,7 @@ static int tc_set_periodic(struct clock_event_device *d) /* By not making the gentime core emulate periodic mode on top * of oneshot, we get lower overhead and improved accuracy. */ - clk_enable(tcd->clk); + tc_clk_enable(d); /* slow clock, count up to RC, then irq and restart */ writel(timer_clock | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO, @@ -220,7 +245,7 @@ static struct tc_clkevt_device clkevt = { /* Should be lower than at91rm9200's system timer */ .rating = 125, .set_next_event = tc_next_event, - .set_state_shutdown = tc_shutdown, + .set_state_shutdown = tc_shutdown_clk_off, .set_state_periodic = tc_set_periodic, .set_state_oneshot = tc_set_oneshot, }, -- Gitee From b1eaaa34732472467a7508de88984e102f51134f Mon Sep 17 00:00:00 2001 From: "yang.yang29@zte.com.cn" Date: Fri, 2 Jun 2023 19:08:25 +0800 Subject: [PATCH 4/5] clocksource: TCLIB: Allow higher clock rates for clock events MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 45b7375a7348d45d17515da0b294d479805fd855 upstream. As default the TCLIB uses the 32KiHz base clock rate for clock events. Add a compile time selection to allow higher clock resulution. (fixed up by Sami Pietikäinen ) Signed-off-by: Benedikt Spranger Signed-off-by: Thomas Gleixner --- drivers/clocksource/tcb_clksrc.c | 36 +++++++++++++++++++------------- drivers/misc/Kconfig | 12 +++++++++-- 2 files changed, 31 insertions(+), 17 deletions(-) diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c index de6baf564dfe..ba15242a6066 100644 --- a/drivers/clocksource/tcb_clksrc.c +++ b/drivers/clocksource/tcb_clksrc.c @@ -25,8 +25,7 @@ * this 32 bit free-running counter. the second channel is not used. * * - The third channel may be used to provide a 16-bit clockevent - * source, used in either periodic or oneshot mode. This runs - * at 32 KiHZ, and can handle delays of up to two seconds. + * source, used in either periodic or oneshot mode. * * A boot clocksource and clockevent source are also currently needed, * unless the relevant platforms (ARM/AT91, AVR32/AT32) are changed so @@ -127,6 +126,7 @@ struct tc_clkevt_device { struct clock_event_device clkevt; struct clk *clk; bool clk_enabled; + u32 freq; void __iomem *regs; }; @@ -135,13 +135,6 @@ static struct tc_clkevt_device *to_tc_clkevt(struct clock_event_device *clkevt) return container_of(clkevt, struct tc_clkevt_device, clkevt); } -/* For now, we always use the 32K clock ... this optimizes for NO_HZ, - * because using one of the divided clocks would usually mean the - * tick rate can never be less than several dozen Hz (vs 0.5 Hz). - * - * A divided clock could be good for high resolution timers, since - * 30.5 usec resolution can seem "low". - */ static u32 timer_clock; static void tc_clk_disable(struct clock_event_device *d) @@ -191,7 +184,7 @@ static int tc_set_oneshot(struct clock_event_device *d) tc_clk_enable(d); - /* slow clock, count up to RC, then irq and stop */ + /* count up to RC, then irq and stop */ writel(timer_clock | ATMEL_TC_CPCSTOP | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO, regs + ATMEL_TC_REG(2, CMR)); writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER)); @@ -213,10 +206,10 @@ static int tc_set_periodic(struct clock_event_device *d) */ tc_clk_enable(d); - /* slow clock, count up to RC, then irq and restart */ + /* count up to RC, then irq and restart */ writel(timer_clock | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO, regs + ATMEL_TC_REG(2, CMR)); - writel((32768 + HZ / 2) / HZ, tcaddr + ATMEL_TC_REG(2, RC)); + writel((tcd->freq + HZ / 2) / HZ, tcaddr + ATMEL_TC_REG(2, RC)); /* Enable clock and interrupts on RC compare */ writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER)); @@ -243,7 +236,11 @@ static struct tc_clkevt_device clkevt = { .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, /* Should be lower than at91rm9200's system timer */ +#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK .rating = 125, +#else + .rating = 200, +#endif .set_next_event = tc_next_event, .set_state_shutdown = tc_shutdown_clk_off, .set_state_periodic = tc_set_periodic, @@ -265,8 +262,9 @@ static irqreturn_t ch2_irq(int irq, void *handle) return IRQ_NONE; } -static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx) +static int __init setup_clkevents(struct atmel_tc *tc, int divisor_idx) { + unsigned divisor = atmel_tc_divisors[divisor_idx]; int ret; struct clk *t2_clk = tc->clk[2]; int irq = tc->irq[2]; @@ -287,7 +285,11 @@ static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx) clkevt.regs = tc->regs; clkevt.clk = t2_clk; - timer_clock = clk32k_divisor_idx; + timer_clock = divisor_idx; + if (!divisor) + clkevt.freq = 32768; + else + clkevt.freq = clk_get_rate(t2_clk) / divisor; clkevt.clkevt.cpumask = cpumask_of(0); @@ -298,7 +300,7 @@ static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx) return ret; } - clockevents_config_and_register(&clkevt.clkevt, 32768, 1, 0xffff); + clockevents_config_and_register(&clkevt.clkevt, clkevt.freq, 1, 0xffff); return ret; } @@ -435,7 +437,11 @@ static int __init tcb_clksrc_init(void) goto err_disable_t1; /* channel 2: periodic and oneshot timer support */ +#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK ret = setup_clkevents(tc, clk32k_divisor_idx); +#else + ret = setup_clkevents(tc, best_divisor_idx); +#endif if (ret) goto err_unregister_clksrc; diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index 3726eacdf65d..0900dec7ec04 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -69,8 +69,7 @@ config ATMEL_TCB_CLKSRC are combined to make a single 32-bit timer. When GENERIC_CLOCKEVENTS is defined, the third timer channel - may be used as a clock event device supporting oneshot mode - (delays of up to two seconds) based on the 32 KiHz clock. + may be used as a clock event device supporting oneshot mode. config ATMEL_TCB_CLKSRC_BLOCK int @@ -83,6 +82,15 @@ config ATMEL_TCB_CLKSRC_BLOCK TC can be used for other purposes, such as PWM generation and interval timing. +config ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK + bool "TC Block use 32 KiHz clock" + depends on ATMEL_TCB_CLKSRC + default y + help + Select this to use 32 KiHz base clock rate as TC block clock + source for clock events. + + config DUMMY_IRQ tristate "Dummy IRQ handler" default n -- Gitee From 7248a996a9fbffb7ace1a9c560b42dab76e754b4 Mon Sep 17 00:00:00 2001 From: "yang.yang29@zte.com.cn" Date: Fri, 2 Jun 2023 19:08:56 +0800 Subject: [PATCH 5/5] timekeeping: Split jiffies seqlock commit fe98a4f2036225a3ccb28c230f5245fa0eddec4c upstream. Replace jiffies_lock seqlock with a simple seqcounter and a rawlock so it can be taken in atomic context on RT. Signed-off-by: Thomas Gleixner --- kernel/time/jiffies.c | 7 ++++--- kernel/time/tick-common.c | 10 ++++++---- kernel/time/tick-sched.c | 19 ++++++++++++------- kernel/time/timekeeping.c | 6 ++++-- kernel/time/timekeeping.h | 3 ++- 5 files changed, 28 insertions(+), 17 deletions(-) diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c index 497719127bf9..62acb8914c9e 100644 --- a/kernel/time/jiffies.c +++ b/kernel/time/jiffies.c @@ -74,7 +74,8 @@ static struct clocksource clocksource_jiffies = { .max_cycles = 10, }; -__cacheline_aligned_in_smp DEFINE_SEQLOCK(jiffies_lock); +__cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(jiffies_lock); +__cacheline_aligned_in_smp seqcount_t jiffies_seq; #if (BITS_PER_LONG < 64) u64 get_jiffies_64(void) @@ -83,9 +84,9 @@ u64 get_jiffies_64(void) u64 ret; do { - seq = read_seqbegin(&jiffies_lock); + seq = read_seqcount_begin(&jiffies_seq); ret = jiffies_64; - } while (read_seqretry(&jiffies_lock, seq)); + } while (read_seqcount_retry(&jiffies_seq, seq)); return ret; } EXPORT_SYMBOL(get_jiffies_64); diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index a02e0f6b287c..32f5101f07ce 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c @@ -79,13 +79,15 @@ int tick_is_oneshot_available(void) static void tick_periodic(int cpu) { if (tick_do_timer_cpu == cpu) { - write_seqlock(&jiffies_lock); + raw_spin_lock(&jiffies_lock); + write_seqcount_begin(&jiffies_seq); /* Keep track of the next tick event */ tick_next_period = ktime_add(tick_next_period, tick_period); do_timer(1); - write_sequnlock(&jiffies_lock); + write_seqcount_end(&jiffies_seq); + raw_spin_unlock(&jiffies_lock); update_wall_time(); } @@ -157,9 +159,9 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast) ktime_t next; do { - seq = read_seqbegin(&jiffies_lock); + seq = read_seqcount_begin(&jiffies_seq); next = tick_next_period; - } while (read_seqretry(&jiffies_lock, seq)); + } while (read_seqcount_retry(&jiffies_seq, seq)); clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT); diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index a3b89cfda81a..b4e5bd70b0c3 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -67,7 +67,8 @@ static void tick_do_update_jiffies64(ktime_t now) return; /* Reevaluate with jiffies_lock held */ - write_seqlock(&jiffies_lock); + raw_spin_lock(&jiffies_lock); + write_seqcount_begin(&jiffies_seq); delta = ktime_sub(now, last_jiffies_update); if (delta >= tick_period) { @@ -90,10 +91,12 @@ static void tick_do_update_jiffies64(ktime_t now) /* Keep the tick_next_period variable up to date */ tick_next_period = ktime_add(last_jiffies_update, tick_period); } else { - write_sequnlock(&jiffies_lock); + write_seqcount_end(&jiffies_seq); + raw_spin_unlock(&jiffies_lock); return; } - write_sequnlock(&jiffies_lock); + write_seqcount_end(&jiffies_seq); + raw_spin_unlock(&jiffies_lock); update_wall_time(); } @@ -104,12 +107,14 @@ static ktime_t tick_init_jiffy_update(void) { ktime_t period; - write_seqlock(&jiffies_lock); + raw_spin_lock(&jiffies_lock); + write_seqcount_begin(&jiffies_seq); /* Did we start the jiffies update yet ? */ if (last_jiffies_update == 0) last_jiffies_update = tick_next_period; period = last_jiffies_update; - write_sequnlock(&jiffies_lock); + write_seqcount_end(&jiffies_seq); + raw_spin_unlock(&jiffies_lock); return period; } @@ -652,10 +657,10 @@ static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu) /* Read jiffies and the time when jiffies were updated last */ do { - seq = read_seqbegin(&jiffies_lock); + seq = read_seqcount_begin(&jiffies_seq); basemono = last_jiffies_update; basejiff = jiffies; - } while (read_seqretry(&jiffies_lock, seq)); + } while (read_seqcount_retry(&jiffies_seq, seq)); ts->last_jiffies = basejiff; ts->timer_expires_base = basemono; diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 76840557ee74..5205f836bb1a 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -2410,8 +2410,10 @@ EXPORT_SYMBOL(hardpps); */ void xtime_update(unsigned long ticks) { - write_seqlock(&jiffies_lock); + raw_spin_lock(&jiffies_lock); + write_seqcount_begin(&jiffies_seq); do_timer(ticks); - write_sequnlock(&jiffies_lock); + write_seqcount_end(&jiffies_seq); + raw_spin_unlock(&jiffies_lock); update_wall_time(); } diff --git a/kernel/time/timekeeping.h b/kernel/time/timekeeping.h index 141ab3ab0354..099737f6f10c 100644 --- a/kernel/time/timekeeping.h +++ b/kernel/time/timekeeping.h @@ -25,7 +25,8 @@ static inline void sched_clock_resume(void) { } extern void do_timer(unsigned long ticks); extern void update_wall_time(void); -extern seqlock_t jiffies_lock; +extern raw_spinlock_t jiffies_lock; +extern seqcount_t jiffies_seq; #define CS_NAME_LEN 32 -- Gitee