Skip to content

Commit a3d5c34

Browse files
committed
Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fixes from Ingo Molnar: "Two smaller fixes - plus a context tracking tracing fix that is a bit bigger" * 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: tracing/context-tracking: Add preempt_schedule_context() for tracing sched: Fix clear NOHZ_BALANCE_KICK sched/x86: Construct all sibling maps if smt
2 parents 86c7667 + 29bb9e5 commit a3d5c34

File tree

4 files changed

+78
-9
lines changed

4 files changed

+78
-9
lines changed

arch/x86/kernel/smpboot.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -372,15 +372,15 @@ static bool __cpuinit match_mc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
372372

373373
void __cpuinit set_cpu_sibling_map(int cpu)
374374
{
375-
bool has_mc = boot_cpu_data.x86_max_cores > 1;
376375
bool has_smt = smp_num_siblings > 1;
376+
bool has_mp = has_smt || boot_cpu_data.x86_max_cores > 1;
377377
struct cpuinfo_x86 *c = &cpu_data(cpu);
378378
struct cpuinfo_x86 *o;
379379
int i;
380380

381381
cpumask_set_cpu(cpu, cpu_sibling_setup_mask);
382382

383-
if (!has_smt && !has_mc) {
383+
if (!has_mp) {
384384
cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
385385
cpumask_set_cpu(cpu, cpu_llc_shared_mask(cpu));
386386
cpumask_set_cpu(cpu, cpu_core_mask(cpu));
@@ -394,7 +394,7 @@ void __cpuinit set_cpu_sibling_map(int cpu)
394394
if ((i == cpu) || (has_smt && match_smt(c, o)))
395395
link_mask(sibling, cpu, i);
396396

397-
if ((i == cpu) || (has_mc && match_llc(c, o)))
397+
if ((i == cpu) || (has_mp && match_llc(c, o)))
398398
link_mask(llc_shared, cpu, i);
399399

400400
}
@@ -406,7 +406,7 @@ void __cpuinit set_cpu_sibling_map(int cpu)
406406
for_each_cpu(i, cpu_sibling_setup_mask) {
407407
o = &cpu_data(i);
408408

409-
if ((i == cpu) || (has_mc && match_mc(c, o))) {
409+
if ((i == cpu) || (has_mp && match_mc(c, o))) {
410410
link_mask(core, cpu, i);
411411

412412
/*

include/linux/preempt.h

Lines changed: 17 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,9 +33,25 @@ do { \
3333
preempt_schedule(); \
3434
} while (0)
3535

36+
#ifdef CONFIG_CONTEXT_TRACKING
37+
38+
void preempt_schedule_context(void);
39+
40+
#define preempt_check_resched_context() \
41+
do { \
42+
if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
43+
preempt_schedule_context(); \
44+
} while (0)
45+
#else
46+
47+
#define preempt_check_resched_context() preempt_check_resched()
48+
49+
#endif /* CONFIG_CONTEXT_TRACKING */
50+
3651
#else /* !CONFIG_PREEMPT */
3752

3853
#define preempt_check_resched() do { } while (0)
54+
#define preempt_check_resched_context() do { } while (0)
3955

4056
#endif /* CONFIG_PREEMPT */
4157

@@ -88,7 +104,7 @@ do { \
88104
do { \
89105
preempt_enable_no_resched_notrace(); \
90106
barrier(); \
91-
preempt_check_resched(); \
107+
preempt_check_resched_context(); \
92108
} while (0)
93109

94110
#else /* !CONFIG_PREEMPT_COUNT */

kernel/context_tracking.c

Lines changed: 40 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -70,6 +70,46 @@ void user_enter(void)
7070
local_irq_restore(flags);
7171
}
7272

73+
#ifdef CONFIG_PREEMPT
74+
/**
75+
* preempt_schedule_context - preempt_schedule called by tracing
76+
*
77+
* The tracing infrastructure uses preempt_enable_notrace to prevent
78+
* recursion and tracing preempt enabling caused by the tracing
79+
* infrastructure itself. But as tracing can happen in areas coming
80+
* from userspace or just about to enter userspace, a preempt enable
81+
* can occur before user_exit() is called. This will cause the scheduler
82+
* to be called when the system is still in usermode.
83+
*
84+
* To prevent this, the preempt_enable_notrace will use this function
85+
* instead of preempt_schedule() to exit user context if needed before
86+
* calling the scheduler.
87+
*/
88+
void __sched notrace preempt_schedule_context(void)
89+
{
90+
struct thread_info *ti = current_thread_info();
91+
enum ctx_state prev_ctx;
92+
93+
if (likely(ti->preempt_count || irqs_disabled()))
94+
return;
95+
96+
/*
97+
* Need to disable preemption in case user_exit() is traced
98+
* and the tracer calls preempt_enable_notrace() causing
99+
* an infinite recursion.
100+
*/
101+
preempt_disable_notrace();
102+
prev_ctx = exception_enter();
103+
preempt_enable_no_resched_notrace();
104+
105+
preempt_schedule();
106+
107+
preempt_disable_notrace();
108+
exception_exit(prev_ctx);
109+
preempt_enable_notrace();
110+
}
111+
EXPORT_SYMBOL_GPL(preempt_schedule_context);
112+
#endif /* CONFIG_PREEMPT */
73113

74114
/**
75115
* user_exit - Inform the context tracking that the CPU is

kernel/sched/core.c

Lines changed: 17 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -633,7 +633,19 @@ void wake_up_nohz_cpu(int cpu)
633633
static inline bool got_nohz_idle_kick(void)
634634
{
635635
int cpu = smp_processor_id();
636-
return idle_cpu(cpu) && test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu));
636+
637+
if (!test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu)))
638+
return false;
639+
640+
if (idle_cpu(cpu) && !need_resched())
641+
return true;
642+
643+
/*
644+
* We can't run Idle Load Balance on this CPU for this time so we
645+
* cancel it and clear NOHZ_BALANCE_KICK
646+
*/
647+
clear_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu));
648+
return false;
637649
}
638650

639651
#else /* CONFIG_NO_HZ_COMMON */
@@ -1393,8 +1405,9 @@ static void sched_ttwu_pending(void)
13931405

13941406
void scheduler_ipi(void)
13951407
{
1396-
if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick()
1397-
&& !tick_nohz_full_cpu(smp_processor_id()))
1408+
if (llist_empty(&this_rq()->wake_list)
1409+
&& !tick_nohz_full_cpu(smp_processor_id())
1410+
&& !got_nohz_idle_kick())
13981411
return;
13991412

14001413
/*
@@ -1417,7 +1430,7 @@ void scheduler_ipi(void)
14171430
/*
14181431
* Check if someone kicked us for doing the nohz idle load balance.
14191432
*/
1420-
if (unlikely(got_nohz_idle_kick() && !need_resched())) {
1433+
if (unlikely(got_nohz_idle_kick())) {
14211434
this_rq()->idle_balance = 1;
14221435
raise_softirq_irqoff(SCHED_SOFTIRQ);
14231436
}

0 commit comments

Comments
 (0)