Created
April 19, 2014 16:17
-
-
Save t-yuki/11089221 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
From 155f3ef694ee54fc2f9a3ad8f75e3a4e81477e27 Mon Sep 17 00:00:00 2001 | |
From: Yukinari Toyota <[email protected]> | |
Date: Thu, 13 Feb 2014 09:28:59 +0900 | |
Subject: [PATCH] hugetlb: force bootmem prealloc with continuous memblock alloc | |
--- | |
arch/x86/kernel/setup.c | 2 + | |
mm/hugetlb.c | 53 ++++++++++++++++------------------------------- | |
2 files changed, 20 insertions(+), 35 deletions(-) | |
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c | |
index ce72964..6d0a681 100644 | |
--- a/arch/x86/kernel/setup.c | |
+++ b/arch/x86/kernel/setup.c | |
@@ -403,6 +403,7 @@ static void __init reserve_initrd(void) | |
printk(KERN_INFO "RAMDISK: [mem %#010llx-%#010llx]\n", ramdisk_image, | |
ramdisk_end - 1); | |
+#if 0 /* force relocate to clear the way to allocate large continuous memblock */ | |
if (pfn_range_is_mapped(PFN_DOWN(ramdisk_image), | |
PFN_DOWN(ramdisk_end))) { | |
/* All are mapped, easy case */ | |
@@ -410,6 +411,7 @@ static void __init reserve_initrd(void) | |
initrd_end = initrd_start + ramdisk_size; | |
return; | |
} | |
+#endif | |
relocate_initrd(); | |
diff --git a/mm/hugetlb.c b/mm/hugetlb.c | |
index c01cb9f..3698f0c 100644 | |
--- a/mm/hugetlb.c | |
+++ b/mm/hugetlb.c | |
@@ -1263,16 +1263,16 @@ struct page *alloc_huge_page_noerr(struct vm_area_struct *vma, | |
return page; | |
} | |
-int __weak alloc_bootmem_huge_page(struct hstate *h) | |
+static int __init alloc_bootmem_huge_pages_atonce(struct hstate *h, int nr_pages) | |
{ | |
struct huge_bootmem_page *m; | |
int nr_nodes, node; | |
+ unsigned long i; | |
+ void *addr; | |
for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) { | |
- void *addr; | |
- | |
addr = memblock_virt_alloc_try_nid_nopanic( | |
- huge_page_size(h), huge_page_size(h), | |
+ huge_page_size(h)*nr_pages, huge_page_size(h), | |
0, BOOTMEM_ALLOC_ACCESSIBLE, node); | |
if (addr) { | |
/* | |
@@ -1284,22 +1284,25 @@ int __weak alloc_bootmem_huge_page(struct hstate *h) | |
goto found; | |
} | |
} | |
+ WARN_ON(1); | |
return 0; | |
found: | |
- BUG_ON((unsigned long)virt_to_phys(m) & (huge_page_size(h) - 1)); | |
- /* Put them into a private list first because mem_map is not up yet */ | |
- list_add(&m->list, &huge_boot_pages); | |
- m->hstate = h; | |
+ for(i = 0; i < nr_pages; i++) { | |
+ m = addr + huge_page_size(h)*i; | |
+ BUG_ON((unsigned long)virt_to_phys(m) & (huge_page_size(h) - 1)); | |
+ /* Put them into a private list first because mem_map is not up yet */ | |
+ list_add(&m->list, &huge_boot_pages); | |
+ m->hstate = h; | |
+ } | |
return 1; | |
} | |
+ | |
static void prep_compound_huge_page(struct page *page, int order) | |
{ | |
- if (unlikely(order > (MAX_ORDER - 1))) | |
- prep_compound_gigantic_page(page, order); | |
- else | |
- prep_compound_page(page, order); | |
+ // since we always use bootmem, call the below even if order is not gigantic | |
+ prep_compound_gigantic_page(page, order); | |
} | |
/* Put bootmem huge pages into the standard lists after mem_map is up */ | |
@@ -1335,28 +1338,13 @@ static void __init gather_bootmem_prealloc(void) | |
static void __init hugetlb_hstate_alloc_pages(struct hstate *h) | |
{ | |
- unsigned long i; | |
- | |
- for (i = 0; i < h->max_huge_pages; ++i) { | |
- if (h->order >= MAX_ORDER) { | |
- if (!alloc_bootmem_huge_page(h)) | |
- break; | |
- } else if (!alloc_fresh_huge_page(h, | |
- &node_states[N_MEMORY])) | |
- break; | |
+ if (!alloc_bootmem_huge_pages_atonce(h, h->max_huge_pages)) { | |
+ h->max_huge_pages = 0; | |
} | |
- h->max_huge_pages = i; | |
} | |
static void __init hugetlb_init_hstates(void) | |
{ | |
- struct hstate *h; | |
- | |
- for_each_hstate(h) { | |
- /* oversize hugepages were init'ed in early boot */ | |
- if (h->order < MAX_ORDER) | |
- hugetlb_hstate_alloc_pages(h); | |
- } | |
} | |
static char * __init memfmt(char *buf, unsigned long n) | |
@@ -2028,12 +2016,7 @@ static int __init hugetlb_nrpages_setup(char *s) | |
if (sscanf(s, "%lu", mhp) <= 0) | |
*mhp = 0; | |
- /* | |
- * Global state is always initialized later in hugetlb_init. | |
- * But we need to allocate >= MAX_ORDER hstates here early to still | |
- * use the bootmem allocator. | |
- */ | |
- if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER) | |
+ if (hugetlb_max_hstate) | |
hugetlb_hstate_alloc_pages(parsed_hstate); | |
last_mhp = mhp; | |
-- | |
1.7.1 | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment