Skip to content

Instantly share code, notes, and snippets.

@laoar
Last active April 19, 2020 10:04
Show Gist options
  • Save laoar/219aa670b7d17f1bc0b7d27345505ce0 to your computer and use it in GitHub Desktop.
Save laoar/219aa670b7d17f1bc0b7d27345505ce0 to your computer and use it in GitHub Desktop.
ff
diff --git a/fs/inode.c b/fs/inode.c
index fef457a..fb4a0a0 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -753,6 +753,39 @@ static enum lru_status inode_lru_isolate(struct list_head *item,
return LRU_ROTATE;
}
+ /* Page protection only works in reclaimer */
+ if (inode->i_data.nrpages && current->reclaim_state) {
+ struct bdi_writeback *wb = inode_to_wb(inode);
+ struct mem_cgroup *memcg;
+ unsigned long cgroup_size;
+ unsigned long protect;
+
+ if (wb) {
+ rcu_read_lock();
+ memcg = mem_cgroup_from_css(wb->memcg_css);
+ if (memcg) {
+ cgroup_size = mem_cgroup_size(memcg);
+ protect = memcg->in_low_reclaim ? memcg->memory.emin :
+ max(memcg->memory.emin, memcg->memory.elow);
+ if (inode->i_data.nrpages + protect >
+ cgroup_size) {
+ rcu_read_unlock();
+ spin_unlock(&inode->i_lock);
+ return LRU_ROTATE;
+ }
+ }
+ rcu_read_unlock();
+ }
+ }
+
if (inode_has_buffers(inode) || inode->i_data.nrpages) {
__iget(inode);
spin_unlock(&inode->i_lock);
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index a7a0a1a5..0526d30 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -229,20 +229,28 @@ struct mem_cgroup {
/*
* Should the accounting and control be hierarchical, per subtree?
*/
- bool use_hierarchy;
+ unsigned int use_hierarchy: 1;
/*
* Should the OOM killer kill all belonging tasks, had it kill one?
*/
- bool oom_group;
+ unsigned int oom_group: 1;
/* protected by memcg_oom_lock */
- bool oom_lock;
- int under_oom;
+ unsigned int oom_lock: 1;
+ unsigned int under_oom: 1;
- int swappiness;
/* OOM-Killer disable */
- int oom_kill_disable;
+ unsigned int oom_kill_disable: 1;
+
+ /* Legacy tcp memory accounting */
+ unsigned int tcpmem_active: 1;
+ unsigned int tcpmem_pressure: 1;
+
+ /* Soft protection will be ignored if it's true */
+ unsigned int in_low_reclaim: 1;
+
+ int swappiness;
/* memory.events and memory.events.local */
struct cgroup_file events_file;
@@ -297,9 +305,6 @@ struct mem_cgroup {
unsigned long socket_pressure;
- /* Legacy tcp memory accounting */
- bool tcpmem_active;
- int tcpmem_pressure;
#ifdef CONFIG_MEMCG_KMEM
/* Index in the kmem_cache->memcg_params.memcg_caches array */
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 5a6445e..144e1ae 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2663,9 +2663,11 @@ static void shrink_node_memcgs(pg_data_t *pgdat, struct scan_control *sc)
* of reclaimable memory from other cgroups.
*/
if (!sc->memcg_low_reclaim) {
+ memcg->in_low_reclaim = 0;
sc->memcg_low_skipped = 1;
continue;
}
+ memcg->in_low_reclaim = 1;
memcg_memory_event(memcg, MEMCG_LOW);
break;
case MEMCG_PROT_NONE:
@@ -2687,6 +2689,9 @@ static void shrink_node_memcgs(pg_data_t *pgdat, struct scan_control *sc)
shrink_slab(sc->gfp_mask, pgdat->node_id, memcg,
sc->priority);
+ if (memcg->in_low_reclaim)
+ memcg->in_low_reclaim = 0;
+
/* Record the group's reclaim efficiency */
vmpressure(sc->gfp_mask, memcg, false,
sc->nr_scanned - scanned,
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment