x86_32: trim memory by updating e820

when MTRRs are not covering the whole e820 table, we need to trim the
RAM and need to update e820.

reuse some code on 64-bit as well.

here need to add early_get_cap and use it in early_cpu_detect, and move
mtrr_bp_init early.

The code successfully trimmed the memory map on Justin's system:

from:

 [    0.000000]  BIOS-e820: 0000000100000000 - 000000022c000000 (usable)

to:

 [    0.000000]   modified: 0000000100000000 - 0000000228000000 (usable)
 [    0.000000]   modified: 0000000228000000 - 000000022c000000 (reserved)

According to Justin it makes quite a difference:

|  When I boot the box without any trimming it acts like a 286 or 386,
|  takes about 10 minutes to boot (using raptor disks).

Signed-off-by: Yinghai Lu <yinghai.lu@sun.com>
Tested-by: Justin Piszcz <jpiszcz@lucidpixels.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
Yinghai Lu 2008-01-30 13:33:32 +01:00 committed by Ingo Molnar
parent 11201e603d
commit 093af8d7f0
6 changed files with 64 additions and 10 deletions

View file

@ -583,7 +583,7 @@ and is between 256 and 4096 characters. It is defined in the file
See drivers/char/README.epca and See drivers/char/README.epca and
Documentation/digiepca.txt. Documentation/digiepca.txt.
disable_mtrr_trim [X86-64, Intel only] disable_mtrr_trim [X86, Intel and AMD only]
By default the kernel will trim any uncacheable By default the kernel will trim any uncacheable
memory out of your available memory pool based on memory out of your available memory pool based on
MTRR settings. This parameter disables that behavior, MTRR settings. This parameter disables that behavior,

View file

@ -278,6 +278,33 @@ void __init cpu_detect(struct cpuinfo_x86 *c)
c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8; c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8;
} }
} }
static void __cpuinit early_get_cap(struct cpuinfo_x86 *c)
{
u32 tfms, xlvl;
int ebx;
memset(&c->x86_capability, 0, sizeof c->x86_capability);
if (have_cpuid_p()) {
/* Intel-defined flags: level 0x00000001 */
if (c->cpuid_level >= 0x00000001) {
u32 capability, excap;
cpuid(0x00000001, &tfms, &ebx, &excap, &capability);
c->x86_capability[0] = capability;
c->x86_capability[4] = excap;
}
/* AMD-defined flags: level 0x80000001 */
xlvl = cpuid_eax(0x80000000);
if ((xlvl & 0xffff0000) == 0x80000000) {
if (xlvl >= 0x80000001) {
c->x86_capability[1] = cpuid_edx(0x80000001);
c->x86_capability[6] = cpuid_ecx(0x80000001);
}
}
}
}
/* Do minimum CPU detection early. /* Do minimum CPU detection early.
Fields really needed: vendor, cpuid_level, family, model, mask, cache alignment. Fields really needed: vendor, cpuid_level, family, model, mask, cache alignment.
@ -306,6 +333,8 @@ static void __init early_cpu_detect(void)
early_init_intel(c); early_init_intel(c);
break; break;
} }
early_get_cap(c);
} }
static void __cpuinit generic_identify(struct cpuinfo_x86 * c) static void __cpuinit generic_identify(struct cpuinfo_x86 * c)
@ -485,7 +514,6 @@ void __init identify_boot_cpu(void)
identify_cpu(&boot_cpu_data); identify_cpu(&boot_cpu_data);
sysenter_setup(); sysenter_setup();
enable_sep_cpu(); enable_sep_cpu();
mtrr_bp_init();
} }
void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c) void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c)

View file

@ -624,7 +624,6 @@ static struct sysdev_driver mtrr_sysdev_driver = {
.resume = mtrr_restore, .resume = mtrr_restore,
}; };
#ifdef CONFIG_X86_64
static int disable_mtrr_trim; static int disable_mtrr_trim;
static int __init disable_mtrr_trim_setup(char *str) static int __init disable_mtrr_trim_setup(char *str)
@ -643,13 +642,10 @@ early_param("disable_mtrr_trim", disable_mtrr_trim_setup);
#define Tom2Enabled (1U << 21) #define Tom2Enabled (1U << 21)
#define Tom2ForceMemTypeWB (1U << 22) #define Tom2ForceMemTypeWB (1U << 22)
static __init int amd_special_default_mtrr(unsigned long end_pfn) static __init int amd_special_default_mtrr(void)
{ {
u32 l, h; u32 l, h;
/* Doesn't apply to memory < 4GB */
if (end_pfn <= (0xffffffff >> PAGE_SHIFT))
return 0;
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
return 0; return 0;
if (boot_cpu_data.x86 < 0xf || boot_cpu_data.x86 > 0x11) if (boot_cpu_data.x86 < 0xf || boot_cpu_data.x86 > 0x11)
@ -687,9 +683,14 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
* Make sure we only trim uncachable memory on machines that * Make sure we only trim uncachable memory on machines that
* support the Intel MTRR architecture: * support the Intel MTRR architecture:
*/ */
if (!is_cpu(INTEL) || disable_mtrr_trim)
return 0;
rdmsr(MTRRdefType_MSR, def, dummy); rdmsr(MTRRdefType_MSR, def, dummy);
def &= 0xff; def &= 0xff;
if (!is_cpu(INTEL) || disable_mtrr_trim || def != MTRR_TYPE_UNCACHABLE) if (def != MTRR_TYPE_UNCACHABLE)
return 0;
if (amd_special_default_mtrr())
return 0; return 0;
/* Find highest cached pfn */ /* Find highest cached pfn */
@ -703,8 +704,14 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
highest_addr = base + size; highest_addr = base + size;
} }
if (amd_special_default_mtrr(end_pfn)) /* kvm/qemu doesn't have mtrr set right, don't trim them all */
if (!highest_addr) {
printk(KERN_WARNING "***************\n");
printk(KERN_WARNING "**** WARNING: likely strange cpu\n");
printk(KERN_WARNING "**** MTRRs all blank, cpu in qemu?\n");
printk(KERN_WARNING "***************\n");
return 0; return 0;
}
if ((highest_addr >> PAGE_SHIFT) < end_pfn) { if ((highest_addr >> PAGE_SHIFT) < end_pfn) {
printk(KERN_WARNING "***************\n"); printk(KERN_WARNING "***************\n");
@ -726,7 +733,6 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
return 0; return 0;
} }
#endif
/** /**
* mtrr_bp_init - initialize mtrrs on the boot CPU * mtrr_bp_init - initialize mtrrs on the boot CPU

View file

@ -749,3 +749,14 @@ static int __init parse_memmap(char *arg)
return 0; return 0;
} }
early_param("memmap", parse_memmap); early_param("memmap", parse_memmap);
void __init update_e820(void)
{
u8 nr_map;
nr_map = e820.nr_map;
if (sanitize_e820_map(e820.map, &nr_map))
return;
e820.nr_map = nr_map;
printk(KERN_INFO "modified physical RAM map:\n");
print_memory_map("modified");
}

View file

@ -48,6 +48,7 @@
#include <video/edid.h> #include <video/edid.h>
#include <asm/mtrr.h>
#include <asm/apic.h> #include <asm/apic.h>
#include <asm/e820.h> #include <asm/e820.h>
#include <asm/mpspec.h> #include <asm/mpspec.h>
@ -758,6 +759,11 @@ void __init setup_arch(char **cmdline_p)
max_low_pfn = setup_memory(); max_low_pfn = setup_memory();
/* update e820 for memory not covered by WB MTRRs */
mtrr_bp_init();
if (mtrr_trim_uncached_memory(max_pfn))
max_low_pfn = setup_memory();
#ifdef CONFIG_VMI #ifdef CONFIG_VMI
/* /*
* Must be after max_low_pfn is determined, and before kernel * Must be after max_low_pfn is determined, and before kernel

View file

@ -19,12 +19,15 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
extern struct e820map e820; extern struct e820map e820;
extern void update_e820(void);
extern int e820_all_mapped(unsigned long start, unsigned long end, extern int e820_all_mapped(unsigned long start, unsigned long end,
unsigned type); unsigned type);
extern int e820_any_mapped(u64 start, u64 end, unsigned type); extern int e820_any_mapped(u64 start, u64 end, unsigned type);
extern void find_max_pfn(void); extern void find_max_pfn(void);
extern void register_bootmem_low_pages(unsigned long max_low_pfn); extern void register_bootmem_low_pages(unsigned long max_low_pfn);
extern void add_memory_region(unsigned long long start,
unsigned long long size, int type);
extern void e820_register_memory(void); extern void e820_register_memory(void);
extern void limit_regions(unsigned long long size); extern void limit_regions(unsigned long long size);
extern void print_memory_map(char *who); extern void print_memory_map(char *who);