vmm: try to reserve lower virtual address space

It's not guaranteed to be robust and will break in the future - I told you.

Related to issue #365, issue #519, issue #666
This commit is contained in:
Alexander Boettcher 2013-03-05 11:29:56 +01:00 committed by Norman Feske
parent 40e27b2379
commit e85e11bec1
1 changed files with 29 additions and 11 deletions

View File

@ -222,6 +222,8 @@ class Guest_memory
remaining_size(backing_store_size-fb_size)
{
try {
/* free up preliminary mapping to reserve lower address space */
Genode::env()->rm_session()->detach(PAGE_SIZE);
/*
* Attach reservation to the beginning of the local address space.
@ -238,7 +240,9 @@ class Guest_memory
_fb_addr = Genode::env()->rm_session()->attach_at(_fb_ds,
((Genode::addr_t) _local_addr)+backing_store_size-fb_size);
} catch (Genode::Rm_session::Region_conflict) { }
} catch (Genode::Rm_session::Region_conflict) {
PERR("region conflict");
}
}
~Guest_memory()
@ -1510,21 +1514,33 @@ class Machine : public StaticReceiver<Machine>
extern unsigned long _prog_img_beg; /* begin of program image (link address) */
extern unsigned long _prog_img_end; /* end of program image */
namespace Genode { Rm_session *env_context_area_rm_session(); }
namespace Genode {
Rm_session *env_context_area_rm_session();
void __attribute__((constructor)) init_context_area_vmm() {
/**
* XXX Invoke env_context_rm_session to make sure the virtual region of
* the context area is reserved at core. Typically this happens
* when the first time a thread is allocated. Unfortunately,
* beforehand the VMM may try to grab the same region for
* large VM sizes.
*/
env_context_area_rm_session();
}
}
int main(int argc, char **argv)
{
Genode::printf("--- Vancouver VMM starting ---\n");
/**
* XXX Invoke env_context_rm_session to make sure the virtual region of
* the context area is reserved at core. Typically this happens when
* the first time a thread is allocated.
* Unfortunately, beforehand the VMM tries to grab the same region for
* large VM sizes.
/*
* Reserve complete lower address space so that nobody else can take it.
* When we found out how much memory we actually should use for the VM,
* the reservation is adjusted to the real size.
*/
Genode::env_context_area_rm_session();
Genode::Rm_connection pre_reservation(0, Genode::Native_config::context_area_virtual_base());
Genode::env()->rm_session()->attach_at(pre_reservation.dataspace(),
PAGE_SIZE, 0, PAGE_SIZE);
Genode::printf("--- Vancouver VMM starting ---\n");
/* request max available memory */
Genode::addr_t vm_size = Genode::env()->ram_session()->avail();
@ -1544,6 +1560,8 @@ int main(int argc, char **argv)
} catch (...) { }
static Guest_memory guest_memory(vm_size, fb_size);
/* free up temporary rm_session */
Genode::env()->parent()->close(pre_reservation.cap());
/* diagnostic messages */
Genode::printf("[0x%08lx, 0x%08lx) - %lu MiB - guest physical memory\n",