Skip to content

Instantly share code, notes, and snippets.

@laoar
Last active October 22, 2017 10:33
Show Gist options
  • Save laoar/17cbe63e809ce66ca04c566b88bf8f20 to your computer and use it in GitHub Desktop.
Save laoar/17cbe63e809ce66ca04c566b88bf8f20 to your computer and use it in GitHub Desktop.
memcg-aware slab
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -145,7 +145,7 @@ static int anon_pipe_buf_steal(struct pipe_inode_info *pipe,
if (page_count(page) == 1) {
if (memcg_kmem_enabled())
- memcg_kmem_uncharge(page, 0);
+ memcg_kmem_uncharge(page, 0, NULL);
__SetPageLocked(page);
return 0;
}
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 69966c4..3334ff1 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -1098,9 +1098,9 @@ static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep);
void memcg_kmem_put_cache(struct kmem_cache *cachep);
int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
- struct mem_cgroup *memcg);
+ struct kmem_cache *s);
int memcg_kmem_charge(struct page *page, gfp_t gfp, int order);
-void memcg_kmem_uncharge(struct page *page, int order);
+void memcg_kmem_uncharge(struct page *page, int order, struct kmem_cache *s);
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1133,6 +1133,8 @@ unsigned int memcg1_stats[] = {
NR_FILE_DIRTY,
NR_WRITEBACK,
MEMCG_SWAP,
+ NR_SLAB_RECLAIMABLE,
+ NR_SLAB_UNRECLAIMABLE,
};
static const char *const memcg1_stat_names[] = {
@@ -1144,6 +1146,8 @@ static const char *const memcg1_stat_names[] = {
"dirty",
"writeback",
"swap",
+ "slab_reclaimable",
+ "slab_unreclaimable",
};
#define K(x) ((x) << (PAGE_SHIFT-10))
@@ -2321,11 +2325,17 @@ void memcg_kmem_put_cache(struct kmem_cache *cachep)
* Returns 0 on success, an error code on failure.
*/
int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
- struct mem_cgroup *memcg)
+ struct kmem_cache *s)
{
unsigned int nr_pages = 1 << order;
struct page_counter *counter;
int ret;
+ struct mem_cgroup *memcg;
+
+ if (s)
+ memcg = s->memcg_params.memcg;
+ else
+ memcg = get_mem_cgroup_from_mm(current->mm);
ret = try_charge(memcg, gfp, nr_pages);
if (ret)
@@ -2339,6 +2349,13 @@ int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
page->mem_cgroup = memcg;
+ if (s) {
+ if (s->flags & SLAB_RECLAIM_ACCOUNT)
+ __this_cpu_add(memcg->stat->count[NR_SLAB_RECLAIMABLE], nr_pages);
+ else
+ __this_cpu_add(memcg->stat->count[NR_SLAB_UNRECLAIMABLE], nr_pages);
+ }
+
return 0;
}
@@ -2360,7 +2377,7 @@ int memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
memcg = get_mem_cgroup_from_mm(current->mm);
if (!mem_cgroup_is_root(memcg)) {
- ret = memcg_kmem_charge_memcg(page, gfp, order, memcg);
+ ret = memcg_kmem_charge_memcg(page, gfp, order, NULL);
if (!ret)
__SetPageKmemcg(page);
}
@@ -2372,7 +2389,7 @@ int memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
* @page: page to uncharge
* @order: allocation order
*/
-void memcg_kmem_uncharge(struct page *page, int order)
+void memcg_kmem_uncharge(struct page *page, int order, struct kmem_cache *s)
{
struct mem_cgroup *memcg = page->mem_cgroup;
unsigned int nr_pages = 1 << order;
@@ -2389,6 +2406,13 @@ void memcg_kmem_uncharge(struct page *page, int order)
if (do_memsw_account())
page_counter_uncharge(&memcg->memsw, nr_pages);
+ if (s) {
+ if (s->flags & SLAB_RECLAIM_ACCOUNT)
+ __this_cpu_sub(memcg->stat->count[NR_SLAB_RECLAIMABLE], nr_pages);
+ else
+ __this_cpu_sub(memcg->stat->count[NR_SLAB_UNRECLAIMABLE], nr_pages);
+ }
+
page->mem_cgroup = NULL;
/* slab pages do not have PageKmemcg flag set */
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 77e4d3c..81ad71f 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1040,7 +1040,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
if (PageMappingFlags(page))
page->mapping = NULL;
if (memcg_kmem_enabled() && PageKmemcg(page))
- memcg_kmem_uncharge(page, order);
+ memcg_kmem_uncharge(page, order, NULL);
if (check_free)
bad += free_pages_check(page);
if (bad)
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -279,7 +279,7 @@ static __always_inline int memcg_charge_slab(struct page *page,
return 0;
if (is_root_cache(s))
return 0;
- return memcg_kmem_charge_memcg(page, gfp, order, s->memcg_params.memcg);
+ return memcg_kmem_charge_memcg(page, gfp, order, s);
}
static __always_inline void memcg_uncharge_slab(struct page *page, int order,
@@ -287,7 +287,7 @@ static __always_inline void memcg_uncharge_slab(struct page *page, int order,
{
if (!memcg_kmem_enabled())
return;
- memcg_kmem_uncharge(page, order);
+ memcg_kmem_uncharge(page, order, s);
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment