Skip to content

Commit f1a0a37

Browse files
Valentin SchneiderIngo Molnar
authored andcommitted
sched/core: Initialize the idle task with preemption disabled
As pointed out by commit de9b8f5 ("sched: Fix crash trying to dequeue/enqueue the idle thread") init_idle() can and will be invoked more than once on the same idle task. At boot time, it is invoked for the boot CPU thread by sched_init(). Then smp_init() creates the threads for all the secondary CPUs and invokes init_idle() on them. As the hotplug machinery brings the secondaries to life, it will issue calls to idle_thread_get(), which itself invokes init_idle() yet again. In this case it's invoked twice more per secondary: at _cpu_up(), and at bringup_cpu(). Given smp_init() already initializes the idle tasks for all *possible* CPUs, no further initialization should be required. Now, removing init_idle() from idle_thread_get() exposes some interesting expectations with regards to the idle task's preempt_count: the secondary startup always issues a preempt_disable(), requiring some reset of the preempt count to 0 between hot-unplug and hotplug, which is currently served by idle_thread_get() -> idle_init(). Given the idle task is supposed to have preemption disabled once and never see it re-enabled, it seems that what we actually want is to initialize its preempt_count to PREEMPT_DISABLED and leave it there. Do that, and remove init_idle() from idle_thread_get(). Secondary startups were patched via coccinelle: @BeGone@ @@ -preempt_disable(); ... cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); Signed-off-by: Valentin Schneider <[email protected]> Signed-off-by: Ingo Molnar <[email protected]> Acked-by: Peter Zijlstra <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 9f26990 commit f1a0a37

File tree

25 files changed

+8
-34
lines changed

25 files changed

+8
-34
lines changed

arch/alpha/kernel/smp.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -166,7 +166,6 @@ smp_callin(void)
166166
DBGS(("smp_callin: commencing CPU %d current %p active_mm %p\n",
167167
cpuid, current, current->active_mm));
168168

169-
preempt_disable();
170169
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
171170
}
172171

arch/arc/kernel/smp.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -189,7 +189,6 @@ void start_kernel_secondary(void)
189189
pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu);
190190

191191
local_irq_enable();
192-
preempt_disable();
193192
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
194193
}
195194

arch/arm/kernel/smp.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -432,7 +432,6 @@ asmlinkage void secondary_start_kernel(void)
432432
#endif
433433
pr_debug("CPU%u: Booted secondary processor\n", cpu);
434434

435-
preempt_disable();
436435
trace_hardirqs_off();
437436

438437
/*

arch/arm64/include/asm/preempt.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ static inline void preempt_count_set(u64 pc)
2323
} while (0)
2424

2525
#define init_idle_preempt_count(p, cpu) do { \
26-
task_thread_info(p)->preempt_count = PREEMPT_ENABLED; \
26+
task_thread_info(p)->preempt_count = PREEMPT_DISABLED; \
2727
} while (0)
2828

2929
static inline void set_preempt_need_resched(void)

arch/arm64/kernel/smp.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -224,7 +224,6 @@ asmlinkage notrace void secondary_start_kernel(void)
224224
init_gic_priority_masking();
225225

226226
rcu_cpu_starting(cpu);
227-
preempt_disable();
228227
trace_hardirqs_off();
229228

230229
/*

arch/csky/kernel/smp.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -281,7 +281,6 @@ void csky_start_secondary(void)
281281
pr_info("CPU%u Online: %s...\n", cpu, __func__);
282282

283283
local_irq_enable();
284-
preempt_disable();
285284
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
286285
}
287286

arch/ia64/kernel/smpboot.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -441,7 +441,6 @@ start_secondary (void *unused)
441441
#endif
442442
efi_map_pal_code();
443443
cpu_init();
444-
preempt_disable();
445444
smp_callin();
446445

447446
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);

arch/mips/kernel/smp.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -348,7 +348,6 @@ asmlinkage void start_secondary(void)
348348
*/
349349

350350
calibrate_delay();
351-
preempt_disable();
352351
cpu = smp_processor_id();
353352
cpu_data[cpu].udelay_val = loops_per_jiffy;
354353

arch/openrisc/kernel/smp.c

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -145,8 +145,6 @@ asmlinkage __init void secondary_start_kernel(void)
145145
set_cpu_online(cpu, true);
146146

147147
local_irq_enable();
148-
149-
preempt_disable();
150148
/*
151149
* OK, it's off to the idle thread for us
152150
*/

arch/parisc/kernel/smp.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -302,7 +302,6 @@ void __init smp_callin(unsigned long pdce_proc)
302302
#endif
303303

304304
smp_cpu_init(slave_id);
305-
preempt_disable();
306305

307306
flush_cache_all_local(); /* start with known state */
308307
flush_tlb_all_local(NULL);

arch/powerpc/kernel/smp.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1547,7 +1547,6 @@ void start_secondary(void *unused)
15471547
smp_store_cpu_info(cpu);
15481548
set_dec(tb_ticks_per_jiffy);
15491549
rcu_cpu_starting(cpu);
1550-
preempt_disable();
15511550
cpu_callin_map[cpu] = 1;
15521551

15531552
if (smp_ops->setup_cpu)

arch/riscv/kernel/smpboot.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -180,7 +180,6 @@ asmlinkage __visible void smp_callin(void)
180180
* Disable preemption before enabling interrupts, so we don't try to
181181
* schedule a CPU that hasn't actually started yet.
182182
*/
183-
preempt_disable();
184183
local_irq_enable();
185184
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
186185
}

arch/s390/include/asm/preempt.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ static inline void preempt_count_set(int pc)
3232
#define init_task_preempt_count(p) do { } while (0)
3333

3434
#define init_idle_preempt_count(p, cpu) do { \
35-
S390_lowcore.preempt_count = PREEMPT_ENABLED; \
35+
S390_lowcore.preempt_count = PREEMPT_DISABLED; \
3636
} while (0)
3737

3838
static inline void set_preempt_need_resched(void)
@@ -91,7 +91,7 @@ static inline void preempt_count_set(int pc)
9191
#define init_task_preempt_count(p) do { } while (0)
9292

9393
#define init_idle_preempt_count(p, cpu) do { \
94-
S390_lowcore.preempt_count = PREEMPT_ENABLED; \
94+
S390_lowcore.preempt_count = PREEMPT_DISABLED; \
9595
} while (0)
9696

9797
static inline void set_preempt_need_resched(void)

arch/s390/kernel/smp.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -878,7 +878,6 @@ static void smp_init_secondary(void)
878878
restore_access_regs(S390_lowcore.access_regs_save_area);
879879
cpu_init();
880880
rcu_cpu_starting(cpu);
881-
preempt_disable();
882881
init_cpu_timer();
883882
vtime_init();
884883
vdso_getcpu_init();

arch/sh/kernel/smp.c

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -186,8 +186,6 @@ asmlinkage void start_secondary(void)
186186

187187
per_cpu_trap_init();
188188

189-
preempt_disable();
190-
191189
notify_cpu_starting(cpu);
192190

193191
local_irq_enable();

arch/sparc/kernel/smp_32.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -348,7 +348,6 @@ static void sparc_start_secondary(void *arg)
348348
*/
349349
arch_cpu_pre_starting(arg);
350350

351-
preempt_disable();
352351
cpu = smp_processor_id();
353352

354353
notify_cpu_starting(cpu);

arch/sparc/kernel/smp_64.c

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -138,9 +138,6 @@ void smp_callin(void)
138138

139139
set_cpu_online(cpuid, true);
140140

141-
/* idle thread is expected to have preempt disabled */
142-
preempt_disable();
143-
144141
local_irq_enable();
145142

146143
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);

arch/x86/include/asm/preempt.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@ static __always_inline void preempt_count_set(int pc)
4444
#define init_task_preempt_count(p) do { } while (0)
4545

4646
#define init_idle_preempt_count(p, cpu) do { \
47-
per_cpu(__preempt_count, (cpu)) = PREEMPT_ENABLED; \
47+
per_cpu(__preempt_count, (cpu)) = PREEMPT_DISABLED; \
4848
} while (0)
4949

5050
/*

arch/x86/kernel/smpboot.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -236,7 +236,6 @@ static void notrace start_secondary(void *unused)
236236
cpu_init();
237237
rcu_cpu_starting(raw_smp_processor_id());
238238
x86_cpuinit.early_percpu_clock_init();
239-
preempt_disable();
240239
smp_callin();
241240

242241
enable_start_cpu0 = 0;

arch/xtensa/kernel/smp.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -145,7 +145,6 @@ void secondary_start_kernel(void)
145145
cpumask_set_cpu(cpu, mm_cpumask(mm));
146146
enter_lazy_tlb(mm, current);
147147

148-
preempt_disable();
149148
trace_hardirqs_off();
150149

151150
calibrate_delay();

include/asm-generic/preempt.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ static __always_inline void preempt_count_set(int pc)
2929
} while (0)
3030

3131
#define init_idle_preempt_count(p, cpu) do { \
32-
task_thread_info(p)->preempt_count = PREEMPT_ENABLED; \
32+
task_thread_info(p)->preempt_count = PREEMPT_DISABLED; \
3333
} while (0)
3434

3535
static __always_inline void set_preempt_need_resched(void)

init/main.c

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -941,11 +941,7 @@ asmlinkage __visible void __init __no_sanitize_address start_kernel(void)
941941
* time - but meanwhile we still have a functioning scheduler.
942942
*/
943943
sched_init();
944-
/*
945-
* Disable preemption - early bootup scheduling is extremely
946-
* fragile until we cpu_idle() for the first time.
947-
*/
948-
preempt_disable();
944+
949945
if (WARN(!irqs_disabled(),
950946
"Interrupts were enabled *very* early, fixing it\n"))
951947
local_irq_disable();

kernel/fork.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2412,7 +2412,7 @@ static inline void init_idle_pids(struct task_struct *idle)
24122412
}
24132413
}
24142414

2415-
struct task_struct *fork_idle(int cpu)
2415+
struct task_struct * __init fork_idle(int cpu)
24162416
{
24172417
struct task_struct *task;
24182418
struct kernel_clone_args args = {

kernel/sched/core.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8227,7 +8227,7 @@ void show_state_filter(unsigned long state_filter)
82278227
* NOTE: this function does not set the idle thread's NEED_RESCHED
82288228
* flag, to make booting more robust.
82298229
*/
8230-
void init_idle(struct task_struct *idle, int cpu)
8230+
void __init init_idle(struct task_struct *idle, int cpu)
82318231
{
82328232
struct rq *rq = cpu_rq(cpu);
82338233
unsigned long flags;

kernel/smpboot.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,6 @@ struct task_struct *idle_thread_get(unsigned int cpu)
3333

3434
if (!tsk)
3535
return ERR_PTR(-ENOMEM);
36-
init_idle(tsk, cpu);
3736
return tsk;
3837
}
3938

0 commit comments

Comments
 (0)