Skip to content

Instantly share code, notes, and snippets.

@devinus
Created January 14, 2024 14:41
Show Gist options
  • Save devinus/6dc43b62c730988ea43b592ad38f7379 to your computer and use it in GitHub Desktop.
Save devinus/6dc43b62c730988ea43b592ad38f7379 to your computer and use it in GitHub Desktop.
From fd6caf7f1043246facab5dfe6966beb109ec61a5 Mon Sep 17 00:00:00 2001
From: Devin Alexander Torres <[email protected]>
Date: Sun, 14 Jan 2024 08:29:34 -0600
Subject: [PATCH] linux6.7.y-bore4.0.2
---
init/Kconfig | 19 +++
kernel/sched/core.c | 140 ++++++++++++++++++++++
kernel/sched/debug.c | 3 +
kernel/sched/fair.c | 253 ++++++++++++++++++++++++++++++++++++----
kernel/sched/features.h | 4 +
5 files changed, 399 insertions(+), 20 deletions(-)
diff --git a/init/Kconfig b/init/Kconfig
index fbed5094de51..2b6ac6b9f01e 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1258,6 +1258,25 @@ config CHECKPOINT_RESTORE
If unsure, say N here.
+config SCHED_BORE
+ bool "Burst-Oriented Response Enhancer"
+ default y
+ help
+ In Desktop and Mobile computing, one might prefer interactive
+ tasks to keep responsive no matter what they run in the background.
+
+ Enabling this kernel feature modifies the scheduler to discriminate
+ tasks by their burst time (runtime since it last went sleeping or
+ yielding state) and prioritize those that run less bursty.
+ Such tasks usually include window compositor, widgets backend,
+ terminal emulator, video playback, games and so on.
+ With a little impact to scheduling fairness, it may improve
+ responsiveness especially under heavy background workload.
+
+ You can turn it off by setting the sysctl kernel.sched_bore = 0.
+
+ If unsure, say Y here.
+
config SCHED_AUTOGROUP
bool "Automatic process group scheduling"
select CGROUPS
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 31dd842eed20..84f3ab6ae063 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4488,6 +4488,135 @@ int wake_up_state(struct task_struct *p, unsigned int state)
return try_to_wake_up(p, state, 0);
}
+#ifdef CONFIG_SCHED_BORE
+extern bool sched_bore;
+extern u8 sched_burst_fork_atavistic;
+extern uint sched_burst_cache_lifetime;
+
+void __init sched_init_bore(void) {
+ init_task.se.burst_time = 0;
+ init_task.se.prev_burst_penalty = 0;
+ init_task.se.curr_burst_penalty = 0;
+ init_task.se.burst_penalty = 0;
+ init_task.se.slice_score = 0;
+ init_task.se.child_burst_last_cached = 0;
+ init_task.se.slice_load = 0;
+}
+
+void inline sched_fork_bore(struct task_struct *p) {
+ p->se.burst_time = 0;
+ p->se.curr_burst_penalty = 0;
+ p->se.slice_score = 0;
+ p->se.child_burst_last_cached = 0;
+ p->se.slice_load = 0;
+}
+
+static u32 count_child_tasks(struct task_struct *p) {
+ struct task_struct *child;
+ u32 cnt = 0;
+ list_for_each_entry(child, &p->children, sibling) {cnt++;}
+ return cnt;
+}
+
+static inline bool child_burst_cache_expired(struct task_struct *p, u64 now) {
+ return (p->se.child_burst_last_cached + sched_burst_cache_lifetime < now);
+}
+
+static void __update_child_burst_cache(
+ struct task_struct *p, u32 cnt, u32 sum, u64 now) {
+ u8 avg = 0;
+ if (cnt) avg = sum / cnt;
+ p->se.child_burst = max(avg, p->se.burst_penalty);
+ p->se.child_burst_cnt = cnt;
+ p->se.child_burst_last_cached = now;
+}
+
+static inline void update_child_burst_direct(struct task_struct *p, u64 now) {
+ struct task_struct *child;
+ u32 cnt = 0;
+ u32 sum = 0;
+
+ list_for_each_entry(child, &p->children, sibling) {
+ if (child->sched_class != &fair_sched_class) continue;
+ cnt++;
+ sum += child->se.burst_penalty;
+ }
+
+ __update_child_burst_cache(p, cnt, sum, now);
+}
+
+static inline u8 __inherit_burst_direct(struct task_struct *p, u64 now) {
+ struct task_struct *parent = p->real_parent;
+ if (child_burst_cache_expired(parent, now))
+ update_child_burst_direct(parent, now);
+
+ return parent->se.child_burst;
+}
+
+static inline void update_child_burst_topological(
+ struct task_struct *p, u64 now, u32 depth, u32 *acnt, u32 *asum) {
+ struct task_struct *child, *dec;
+ u32 cnt = 0, dcnt = 0;
+ u32 sum = 0;
+
+ list_for_each_entry(child, &p->children, sibling) {
+ dec = child;
+ while ((dcnt = count_child_tasks(dec)) == 1)
+ dec = list_first_entry(&dec->children, struct task_struct, sibling);
+
+ if (!dcnt || !depth) {
+ if (dec->sched_class != &fair_sched_class) continue;
+ cnt++;
+ sum += dec->se.burst_penalty;
+ continue;
+ }
+ if (!child_burst_cache_expired(dec, now)) {
+ cnt += dec->se.child_burst_cnt;
+ sum += (u32)dec->se.child_burst * dec->se.child_burst_cnt;
+ continue;
+ }
+ update_child_burst_topological(dec, now, depth - 1, &cnt, &sum);
+ }
+
+ __update_child_burst_cache(p, cnt, sum, now);
+ *acnt += cnt;
+ *asum += sum;
+}
+
+static inline u8 __inherit_burst_topological(struct task_struct *p, u64 now) {
+ struct task_struct *anc = p->real_parent;
+ u32 cnt = 0, sum = 0;
+
+ while (anc->real_parent != anc && count_child_tasks(anc) == 1)
+ anc = anc->real_parent;
+
+ if (child_burst_cache_expired(anc, now))
+ update_child_burst_topological(
+ anc, now, sched_burst_fork_atavistic - 1, &cnt, &sum);
+
+ return anc->se.child_burst;
+}
+
+static inline void inherit_burst(struct task_struct *p) {
+ u8 burst_cache;
+ u64 now = ktime_get_ns();
+
+ read_lock(&tasklist_lock);
+ burst_cache = likely(sched_burst_fork_atavistic)?
+ __inherit_burst_topological(p, now):
+ __inherit_burst_direct(p, now);
+ read_unlock(&tasklist_lock);
+
+ p->se.prev_burst_penalty = max(p->se.prev_burst_penalty, burst_cache);
+}
+
+static inline void sched_post_fork_bore(struct task_struct *p) {
+ if (p->sched_class == &fair_sched_class && likely(sched_bore))
+ inherit_burst(p);
+ p->se.burst_penalty = p->se.prev_burst_penalty;
+}
+#endif // CONFIG_SCHED_BORE
+
/*
* Perform scheduler related setup for a newly forked process p.
* p is forked by current.
@@ -4504,6 +4633,9 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
p->se.prev_sum_exec_runtime = 0;
p->se.nr_migrations = 0;
p->se.vruntime = 0;
+#ifdef CONFIG_SCHED_BORE
+ sched_fork_bore(p);
+#endif // CONFIG_SCHED_BORE
p->se.vlag = 0;
p->se.slice = sysctl_sched_base_slice;
INIT_LIST_HEAD(&p->se.group_node);
@@ -4823,6 +4955,9 @@ void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs)
void sched_post_fork(struct task_struct *p)
{
+#ifdef CONFIG_SCHED_BORE
+ sched_post_fork_bore(p);
+#endif // CONFIG_SCHED_BORE
uclamp_post_fork(p);
}
@@ -9899,6 +10034,11 @@ void __init sched_init(void)
BUG_ON(&dl_sched_class != &stop_sched_class + 1);
#endif
+#ifdef CONFIG_SCHED_BORE
+ sched_init_bore();
+ printk(KERN_INFO "BORE (Burst-Oriented Response Enhancer) CPU Scheduler modification 4.0.2 by Masahito Suzuki");
+#endif // CONFIG_SCHED_BORE
+
wait_bit_init();
#ifdef CONFIG_FAIR_GROUP_SCHED
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 4580a450700e..cf2c694c94da 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -595,6 +595,9 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
SPLIT_NS(schedstat_val_or_zero(p->stats.sum_sleep_runtime)),
SPLIT_NS(schedstat_val_or_zero(p->stats.sum_block_runtime)));
+#ifdef CONFIG_SCHED_BORE
+ SEQ_printf(m, " %2d", p->se.slice_score);
+#endif
#ifdef CONFIG_NUMA_BALANCING
SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
#endif
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 83ce6ce8b86f..7fe25327bede 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -20,6 +20,9 @@
* Adaptive scheduling granularity, math enhancements by Peter Zijlstra
* Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
*
+ * Burst-Oriented Response Enhancer (BORE) CPU Scheduler
+ * Copyright (C) 2021-2024 Masahito Suzuki <[email protected]>
+ *
* Remove energy efficiency functions by Alexandre Frade
* (C) 2021 Alexandre Frade <[email protected]>
*/
@@ -67,20 +70,118 @@
* SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
* SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
*
- * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
+ * (BORE default SCHED_TUNABLESCALING_NONE = *1 constant)
+ * (EEVDF default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
*/
unsigned int sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_NONE;
/*
* Minimal preemption granularity for CPU-bound tasks:
*
- * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
+ * (BORE default: 3 msec constant, units: nanoseconds)
+ * (EEVDF default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
*/
+#ifdef CONFIG_SCHED_BORE
+unsigned int sysctl_sched_base_slice = 3000000ULL;
+static unsigned int normalized_sysctl_sched_base_slice = 3000000ULL;
+#else // CONFIG_SCHED_BORE
unsigned int sysctl_sched_base_slice = 750000ULL;
static unsigned int normalized_sysctl_sched_base_slice = 750000ULL;
+#endif // CONFIG_SCHED_BORE
const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
+#ifdef CONFIG_SCHED_BORE
+bool __read_mostly sched_bore = 1;
+bool __read_mostly sched_burst_score_rounding = 0;
+bool __read_mostly sched_burst_smoothness_long = 1;
+bool __read_mostly sched_burst_smoothness_short = 0;
+u8 __read_mostly sched_burst_fork_atavistic = 2;
+u8 __read_mostly sched_burst_penalty_offset = 22;
+uint __read_mostly sched_burst_penalty_scale = 1280;
+uint __read_mostly sched_burst_cache_lifetime = 60000000;
+static u8 sixty_four = 64;
+static uint maxval_12_bits = 4095;
+
+#define MAX_BURST_PENALTY (39U <<2)
+
+static inline u32 log2plus1_u64_u32f8(u64 v) {
+ u32 msb = fls64(v);
+ s32 excess_bits = msb - 9;
+ u8 fractional = (0 <= excess_bits)? v >> excess_bits: v << -excess_bits;
+ return msb << 8 | fractional;
+}
+
+static inline u32 calc_burst_penalty(u64 burst_time) {
+ u32 greed, tolerance, penalty, scaled_penalty;
+
+ greed = log2plus1_u64_u32f8(burst_time);
+ tolerance = sched_burst_penalty_offset << 8;
+ penalty = max(0, (s32)greed - (s32)tolerance);
+ scaled_penalty = penalty * sched_burst_penalty_scale >> 16;
+
+ return min(MAX_BURST_PENALTY, scaled_penalty);
+}
+
+static inline void update_burst_penalty(struct sched_entity *se) {
+ se->curr_burst_penalty = calc_burst_penalty(se->burst_time);
+ se->burst_penalty = max(se->prev_burst_penalty, se->curr_burst_penalty);
+}
+
+static inline u64 scale_slice(u64 delta, struct sched_entity *se) {
+ return mul_u64_u32_shr(delta, sched_prio_to_wmult[se->slice_score], 22);
+}
+
+static inline u64 __unscale_slice(u64 delta, u8 score) {
+ return mul_u64_u32_shr(delta, sched_prio_to_weight[score], 10);
+}
+
+static inline u64 unscale_slice(u64 delta, struct sched_entity *se) {
+ return __unscale_slice(delta, se->slice_score);
+}
+
+static void avg_vruntime_add(struct cfs_rq *cfs_rq, struct sched_entity *se);
+static void avg_vruntime_sub(struct cfs_rq *cfs_rq, struct sched_entity *se);
+
+static void update_slice_score(struct sched_entity *se) {
+ struct cfs_rq *cfs_rq = cfs_rq_of(se);
+ u8 prev_score = se->slice_score;
+ u32 penalty = se->burst_penalty;
+ if (sched_burst_score_rounding) penalty += 0x2U;
+ se->slice_score = penalty >> 2;
+
+ if (se->slice_score != prev_score && se->slice_load) {
+ avg_vruntime_sub(cfs_rq, se);
+ avg_vruntime_add(cfs_rq, se);
+ }
+}
+
+static inline u32 binary_smooth(u32 new, u32 old) {
+ int increment = new - old;
+ return (0 <= increment)?
+ old + ( increment >> (int)sched_burst_smoothness_long):
+ old - (-increment >> (int)sched_burst_smoothness_short);
+}
+
+static void restart_burst(struct sched_entity *se) {
+ se->burst_penalty = se->prev_burst_penalty =
+ binary_smooth(se->curr_burst_penalty, se->prev_burst_penalty);
+ se->curr_burst_penalty = 0;
+ se->burst_time = 0;
+ update_slice_score(se);
+}
+
+static inline void restart_burst_rescale_deadline(struct sched_entity *se) {
+ u64 wremain, vremain = se->deadline - se->vruntime;
+ u8 prev_score = se->slice_score;
+ restart_burst(se);
+ if (prev_score > se->slice_score) {
+ wremain = __unscale_slice(vremain, prev_score);
+ se->deadline = se->vruntime + scale_slice(wremain, se);
+ }
+}
+#endif // CONFIG_SCHED_BORE
+
int sched_thermal_decay_shift;
static int __init setup_sched_thermal_decay_shift(char *str)
{
@@ -140,6 +241,70 @@ static unsigned int sysctl_numa_balancing_promote_rate_limit = 65536;
#ifdef CONFIG_SYSCTL
static struct ctl_table sched_fair_sysctls[] = {
+#ifdef CONFIG_SCHED_BORE
+ {
+ .procname = "sched_bore",
+ .data = &sched_bore,
+ .maxlen = sizeof(bool),
+ .mode = 0644,
+ .proc_handler = &proc_dobool,
+ },
+ {
+ .procname = "sched_burst_cache_lifetime",
+ .data = &sched_burst_cache_lifetime,
+ .maxlen = sizeof(uint),
+ .mode = 0644,
+ .proc_handler = proc_douintvec,
+ },
+ {
+ .procname = "sched_burst_fork_atavistic",
+ .data = &sched_burst_fork_atavistic,
+ .maxlen = sizeof(u8),
+ .mode = 0644,
+ .proc_handler = &proc_dou8vec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_THREE,
+ },
+ {
+ .procname = "sched_burst_penalty_offset",
+ .data = &sched_burst_penalty_offset,
+ .maxlen = sizeof(u8),
+ .mode = 0644,
+ .proc_handler = &proc_dou8vec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = &sixty_four,
+ },
+ {
+ .procname = "sched_burst_penalty_scale",
+ .data = &sched_burst_penalty_scale,
+ .maxlen = sizeof(uint),
+ .mode = 0644,
+ .proc_handler = &proc_douintvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = &maxval_12_bits,
+ },
+ {
+ .procname = "sched_burst_score_rounding",
+ .data = &sched_burst_score_rounding,
+ .maxlen = sizeof(bool),
+ .mode = 0644,
+ .proc_handler = &proc_dobool,
+ },
+ {
+ .procname = "sched_burst_smoothness_long",
+ .data = &sched_burst_smoothness_long,
+ .maxlen = sizeof(bool),
+ .mode = 0644,
+ .proc_handler = &proc_dobool,
+ },
+ {
+ .procname = "sched_burst_smoothness_short",
+ .data = &sched_burst_smoothness_short,
+ .maxlen = sizeof(bool),
+ .mode = 0644,
+ .proc_handler = &proc_dobool,
+ },
+#endif // CONFIG_SCHED_BORE
#ifdef CONFIG_CFS_BANDWIDTH
{
.procname = "sched_cfs_bandwidth_slice_us",
@@ -301,6 +466,9 @@ static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se)
if (unlikely(se->load.weight != NICE_0_LOAD))
delta = __calc_delta(delta, NICE_0_LOAD, &se->load);
+#ifdef CONFIG_SCHED_BORE
+ if (likely(sched_bore)) delta = scale_slice(delta, se);
+#endif // CONFIG_SCHED_BORE
return delta;
}
@@ -623,10 +791,22 @@ static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
*
* As measured, the max (key * weight) value was ~44 bits for a kernel build.
*/
+static unsigned long calc_avg_load_weight(struct sched_entity *se) {
+ unsigned long weight = scale_load_down(se->load.weight);
+#ifdef CONFIG_SCHED_BORE
+ weight <<= 5;
+ if (likely(sched_bore)) weight = unscale_slice(weight, se);
+#endif // CONFIG_SCHED_BORE
+ return weight;
+}
+
static void
avg_vruntime_add(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
- unsigned long weight = scale_load_down(se->load.weight);
+ unsigned long weight = calc_avg_load_weight(se);
+#ifdef CONFIG_SCHED_BORE
+ se->slice_load = weight;
+#endif // CONFIG_SCHED_BORE
s64 key = entity_key(cfs_rq, se);
cfs_rq->avg_vruntime += key * weight;
@@ -636,7 +816,13 @@ avg_vruntime_add(struct cfs_rq *cfs_rq, struct sched_entity *se)
static void
avg_vruntime_sub(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
- unsigned long weight = scale_load_down(se->load.weight);
+ unsigned long weight;
+#if !defined(CONFIG_SCHED_BORE)
+ weight = scale_load_down(se->load.weight);
+#else // CONFIG_SCHED_BORE
+ weight = se->slice_load;
+ se->slice_load = 0;
+#endif // CONFIG_SCHED_BORE
s64 key = entity_key(cfs_rq, se);
cfs_rq->avg_vruntime -= key * weight;
@@ -656,14 +842,14 @@ void avg_vruntime_update(struct cfs_rq *cfs_rq, s64 delta)
* Specifically: avg_runtime() + 0 must result in entity_eligible() := true
* For this to be so, the result of this function must have a left bias.
*/
-u64 avg_vruntime(struct cfs_rq *cfs_rq)
+static u64 avg_key(struct cfs_rq *cfs_rq)
{
struct sched_entity *curr = cfs_rq->curr;
s64 avg = cfs_rq->avg_vruntime;
long load = cfs_rq->avg_load;
if (curr && curr->on_rq) {
- unsigned long weight = scale_load_down(curr->load.weight);
+ unsigned long weight = calc_avg_load_weight(curr);
avg += entity_key(cfs_rq, curr) * weight;
load += weight;
@@ -676,7 +862,11 @@ u64 avg_vruntime(struct cfs_rq *cfs_rq)
avg = div_s64(avg, load);
}
- return cfs_rq->min_vruntime + avg;
+ return avg;
+}
+
+inline u64 avg_vruntime(struct cfs_rq *cfs_rq) {
+ return cfs_rq->min_vruntime + avg_key(cfs_rq);
}
/*
@@ -697,13 +887,8 @@ u64 avg_vruntime(struct cfs_rq *cfs_rq)
*/
static void update_entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
- s64 lag, limit;
-
SCHED_WARN_ON(!se->on_rq);
- lag = avg_vruntime(cfs_rq) - se->vruntime;
-
- limit = calc_delta_fair(max_t(u64, 2*se->slice, TICK_NSEC), se);
- se->vlag = clamp(lag, -limit, limit);
+ se->vlag = avg_vruntime(cfs_rq) - se->vruntime;
}
/*
@@ -730,7 +915,7 @@ int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se)
long load = cfs_rq->avg_load;
if (curr && curr->on_rq) {
- unsigned long weight = scale_load_down(curr->load.weight);
+ unsigned long weight = calc_avg_load_weight(curr);
avg += entity_key(cfs_rq, curr) * weight;
load += weight;
@@ -1019,6 +1204,9 @@ static void update_deadline(struct cfs_rq *cfs_rq, struct sched_entity *se)
/*
* EEVDF: vd_i = ve_i + r_i / w_i
*/
+#ifdef CONFIG_SCHED_BORE
+ update_slice_score(se);
+#endif // CONFIG_SCHED_BORE
se->deadline = se->vruntime + calc_delta_fair(se->slice, se);
/*
@@ -1161,7 +1349,11 @@ static void update_curr(struct cfs_rq *cfs_rq)
curr->sum_exec_runtime += delta_exec;
schedstat_add(cfs_rq->exec_clock, delta_exec);
- curr->vruntime += calc_delta_fair(delta_exec, curr);
+#ifdef CONFIG_SCHED_BORE
+ curr->burst_time += delta_exec;
+ update_burst_penalty(curr);
+#endif // CONFIG_SCHED_BORE
+ curr->vruntime += max(1ULL, calc_delta_fair(delta_exec, curr));
update_deadline(cfs_rq, curr);
update_min_vruntime(cfs_rq);
@@ -5134,7 +5326,12 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
struct sched_entity *curr = cfs_rq->curr;
unsigned long load;
- lag = se->vlag;
+ u64 limit = calc_delta_fair(max_t(u64, se->slice*2, TICK_NSEC), se);
+ s64 overmet = limit, undermet = limit;
+#ifdef CONFIG_SCHED_BORE
+ if (likely(sched_bore)) overmet = div_s64(overmet, 2);
+#endif // CONFIG_SCHED_BORE
+ lag = clamp(se->vlag, -overmet, undermet);
/*
* If we want to place a task and preserve lag, we have to
@@ -5190,9 +5387,9 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
*/
load = cfs_rq->avg_load;
if (curr && curr->on_rq)
- load += scale_load_down(curr->load.weight);
+ load += calc_avg_load_weight(curr);
- lag *= load + scale_load_down(se->load.weight);
+ lag *= load + calc_avg_load_weight(se);
if (WARN_ON_ONCE(!load))
load = 1;
lag = div_s64(lag, load);
@@ -6762,6 +6959,12 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
bool was_sched_idle = sched_idle_rq(rq);
util_est_dequeue(&rq->cfs, p);
+#ifdef CONFIG_SCHED_BORE
+ if (task_sleep) {
+ update_curr(cfs_rq_of(se));
+ restart_burst(se);
+ }
+#endif // CONFIG_SCHED_BORE
for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
@@ -8240,11 +8443,12 @@ static void yield_task_fair(struct rq *rq)
/*
* Are we the only task in the tree?
*/
+#ifdef CONFIG_SCHED_BORE
+ if (unlikely(!sched_bore))
+#endif // CONFIG_SCHED_BORE
if (unlikely(rq->nr_running == 1))
return;
- clear_buddies(cfs_rq, se);
-
update_rq_clock(rq);
/*
* Update run-time statistics of the 'current'.
@@ -8256,6 +8460,12 @@ static void yield_task_fair(struct rq *rq)
* and double the fastpath cost.
*/
rq_clock_skip_update(rq);
+#ifdef CONFIG_SCHED_BORE
+ restart_burst_rescale_deadline(se);
+ if (unlikely(rq->nr_running == 1)) return;
+#endif // CONFIG_SCHED_BORE
+
+ clear_buddies(cfs_rq, se);
se->deadline += calc_delta_fair(se->slice, se);
}
@@ -12336,6 +12546,9 @@ static void task_fork_fair(struct task_struct *p)
curr = cfs_rq->curr;
if (curr)
update_curr(cfs_rq);
+#ifdef CONFIG_SCHED_BORE
+ update_slice_score(se);
+#endif // CONFIG_SCHED_BORE
place_entity(cfs_rq, se, ENQUEUE_INITIAL);
rq_unlock(rq, &rf);
}
diff --git a/kernel/sched/features.h b/kernel/sched/features.h
index a3ddf84de430..841a42857952 100644
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -6,7 +6,11 @@
*/
SCHED_FEAT(PLACE_LAG, true)
SCHED_FEAT(PLACE_DEADLINE_INITIAL, true)
+#ifdef CONFIG_SCHED_BORE
+SCHED_FEAT(RUN_TO_PARITY, false)
+#else // CONFIG_SCHED_BORE
SCHED_FEAT(RUN_TO_PARITY, true)
+#endif // CONFIG_SCHED_BORE
/*
* Prefer to schedule the task we woke last (assuming it failed
--
2.43.0
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment