linux-user: Fix mmap_find_vma returning invalid addresses.
Don't return addresses that aren't properly aligned for the guest, e.g. when the guest has a larger page size than the host. Don't return addresses that are outside the virtual address space for the target, by paying proper attention to the h2g/g2h macros. At the same time, place the default mapping base for 64-bit guests (on 64-bit hosts) outside the low 4G. Consistently interpret mmap_next_start in the guest address space. Signed-off-by: Richard Henderson <rth@twiddle.net>
This commit is contained in:
parent
7bc7b099df
commit
14f24e1465
|
@ -39,8 +39,8 @@
|
||||||
char *exec_path;
|
char *exec_path;
|
||||||
|
|
||||||
int singlestep;
|
int singlestep;
|
||||||
#if defined(CONFIG_USE_GUEST_BASE)
|
|
||||||
unsigned long mmap_min_addr;
|
unsigned long mmap_min_addr;
|
||||||
|
#if defined(CONFIG_USE_GUEST_BASE)
|
||||||
unsigned long guest_base;
|
unsigned long guest_base;
|
||||||
int have_guest_base;
|
int have_guest_base;
|
||||||
#endif
|
#endif
|
||||||
|
@ -2812,16 +2812,14 @@ int main(int argc, char **argv, char **envp)
|
||||||
* proper page alignment for guest_base.
|
* proper page alignment for guest_base.
|
||||||
*/
|
*/
|
||||||
guest_base = HOST_PAGE_ALIGN(guest_base);
|
guest_base = HOST_PAGE_ALIGN(guest_base);
|
||||||
|
#endif /* CONFIG_USE_GUEST_BASE */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Read in mmap_min_addr kernel parameter. This value is used
|
* Read in mmap_min_addr kernel parameter. This value is used
|
||||||
* When loading the ELF image to determine whether guest_base
|
* When loading the ELF image to determine whether guest_base
|
||||||
* is needed.
|
* is needed. It is also used in mmap_find_vma.
|
||||||
*
|
|
||||||
* When user has explicitly set the quest base, we skip this
|
|
||||||
* test.
|
|
||||||
*/
|
*/
|
||||||
if (!have_guest_base) {
|
{
|
||||||
FILE *fp;
|
FILE *fp;
|
||||||
|
|
||||||
if ((fp = fopen("/proc/sys/vm/mmap_min_addr", "r")) != NULL) {
|
if ((fp = fopen("/proc/sys/vm/mmap_min_addr", "r")) != NULL) {
|
||||||
|
@ -2833,7 +2831,6 @@ int main(int argc, char **argv, char **envp)
|
||||||
fclose(fp);
|
fclose(fp);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_USE_GUEST_BASE */
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Prepare copy of argv vector for target.
|
* Prepare copy of argv vector for target.
|
||||||
|
|
|
@ -264,12 +264,15 @@ static int mmap_frag(abi_ulong real_start,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#if defined(__CYGWIN__)
|
#if HOST_LONG_BITS == 64 && TARGET_ABI_BITS == 64
|
||||||
|
# define TASK_UNMAPPED_BASE (1ul << 38)
|
||||||
|
#elif defined(__CYGWIN__)
|
||||||
/* Cygwin doesn't have a whole lot of address space. */
|
/* Cygwin doesn't have a whole lot of address space. */
|
||||||
static abi_ulong mmap_next_start = 0x18000000;
|
# define TASK_UNMAPPED_BASE 0x18000000
|
||||||
#else
|
#else
|
||||||
static abi_ulong mmap_next_start = 0x40000000;
|
# define TASK_UNMAPPED_BASE 0x40000000
|
||||||
#endif
|
#endif
|
||||||
|
static abi_ulong mmap_next_start = TASK_UNMAPPED_BASE;
|
||||||
|
|
||||||
unsigned long last_brk;
|
unsigned long last_brk;
|
||||||
|
|
||||||
|
@ -281,19 +284,24 @@ unsigned long last_brk;
|
||||||
*/
|
*/
|
||||||
abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size)
|
abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size)
|
||||||
{
|
{
|
||||||
void *ptr;
|
void *ptr, *prev;
|
||||||
abi_ulong addr;
|
abi_ulong addr;
|
||||||
|
int wrapped, repeat;
|
||||||
size = HOST_PAGE_ALIGN(size);
|
|
||||||
start &= qemu_host_page_mask;
|
|
||||||
|
|
||||||
/* If 'start' == 0, then a default start address is used. */
|
/* If 'start' == 0, then a default start address is used. */
|
||||||
if (start == 0)
|
if (start == 0) {
|
||||||
start = mmap_next_start;
|
start = mmap_next_start;
|
||||||
|
} else {
|
||||||
|
start &= qemu_host_page_mask;
|
||||||
|
}
|
||||||
|
|
||||||
|
size = HOST_PAGE_ALIGN(size);
|
||||||
|
|
||||||
addr = start;
|
addr = start;
|
||||||
|
wrapped = repeat = 0;
|
||||||
|
prev = 0;
|
||||||
|
|
||||||
for(;;) {
|
for (;; prev = ptr) {
|
||||||
/*
|
/*
|
||||||
* Reserve needed memory area to avoid a race.
|
* Reserve needed memory area to avoid a race.
|
||||||
* It should be discarded using:
|
* It should be discarded using:
|
||||||
|
@ -301,31 +309,77 @@ abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size)
|
||||||
* - mremap() with MREMAP_FIXED flag
|
* - mremap() with MREMAP_FIXED flag
|
||||||
* - shmat() with SHM_REMAP flag
|
* - shmat() with SHM_REMAP flag
|
||||||
*/
|
*/
|
||||||
ptr = mmap((void *)(unsigned long)addr, size, PROT_NONE,
|
ptr = mmap(g2h(addr), size, PROT_NONE,
|
||||||
MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
|
MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
|
||||||
|
|
||||||
/* ENOMEM, if host address space has no memory */
|
/* ENOMEM, if host address space has no memory */
|
||||||
if (ptr == MAP_FAILED)
|
if (ptr == MAP_FAILED) {
|
||||||
return (abi_ulong)-1;
|
return (abi_ulong)-1;
|
||||||
|
}
|
||||||
|
|
||||||
/* If address fits target address space we've found what we need */
|
/* Count the number of sequential returns of the same address.
|
||||||
if ((unsigned long)ptr + size - 1 <= (abi_ulong)-1)
|
This is used to modify the search algorithm below. */
|
||||||
break;
|
repeat = (ptr == prev ? repeat + 1 : 0);
|
||||||
|
|
||||||
/* Unmap and try again with new page */
|
if (h2g_valid(ptr + size - 1)) {
|
||||||
|
addr = h2g(ptr);
|
||||||
|
|
||||||
|
if ((addr & ~TARGET_PAGE_MASK) == 0) {
|
||||||
|
/* Success. */
|
||||||
|
if (start == mmap_next_start && addr >= TASK_UNMAPPED_BASE) {
|
||||||
|
mmap_next_start = addr + size;
|
||||||
|
}
|
||||||
|
return addr;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* The address is not properly aligned for the target. */
|
||||||
|
switch (repeat) {
|
||||||
|
case 0:
|
||||||
|
/* Assume the result that the kernel gave us is the
|
||||||
|
first with enough free space, so start again at the
|
||||||
|
next higher target page. */
|
||||||
|
addr = TARGET_PAGE_ALIGN(addr);
|
||||||
|
break;
|
||||||
|
case 1:
|
||||||
|
/* Sometimes the kernel decides to perform the allocation
|
||||||
|
at the top end of memory instead. */
|
||||||
|
addr &= TARGET_PAGE_MASK;
|
||||||
|
break;
|
||||||
|
case 2:
|
||||||
|
/* Start over at low memory. */
|
||||||
|
addr = 0;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
/* Fail. This unaligned block must the last. */
|
||||||
|
addr = -1;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
/* Since the result the kernel gave didn't fit, start
|
||||||
|
again at low memory. If any repetition, fail. */
|
||||||
|
addr = (repeat ? -1 : 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Unmap and try again. */
|
||||||
munmap(ptr, size);
|
munmap(ptr, size);
|
||||||
addr += qemu_host_page_size;
|
|
||||||
|
|
||||||
/* ENOMEM if we check whole of target address space */
|
/* ENOMEM if we checked the whole of the target address space. */
|
||||||
if (addr == start)
|
if (addr == -1ul) {
|
||||||
return (abi_ulong)-1;
|
return (abi_ulong)-1;
|
||||||
|
} else if (addr == 0) {
|
||||||
|
if (wrapped) {
|
||||||
|
return (abi_ulong)-1;
|
||||||
|
}
|
||||||
|
wrapped = 1;
|
||||||
|
/* Don't actually use 0 when wrapping, instead indicate
|
||||||
|
that we'd truely like an allocation in low memory. */
|
||||||
|
addr = (mmap_min_addr > TARGET_PAGE_SIZE
|
||||||
|
? TARGET_PAGE_ALIGN(mmap_min_addr)
|
||||||
|
: TARGET_PAGE_SIZE);
|
||||||
|
} else if (wrapped && addr >= start) {
|
||||||
|
return (abi_ulong)-1;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Update default start address */
|
|
||||||
if (start == mmap_next_start)
|
|
||||||
mmap_next_start = (unsigned long)ptr + size;
|
|
||||||
|
|
||||||
return h2g(ptr);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* NOTE: all the constants are the HOST ones */
|
/* NOTE: all the constants are the HOST ones */
|
||||||
|
|
|
@ -133,9 +133,7 @@ void init_task_state(TaskState *ts);
|
||||||
void task_settid(TaskState *);
|
void task_settid(TaskState *);
|
||||||
void stop_all_tasks(void);
|
void stop_all_tasks(void);
|
||||||
extern const char *qemu_uname_release;
|
extern const char *qemu_uname_release;
|
||||||
#if defined(CONFIG_USE_GUEST_BASE)
|
|
||||||
extern unsigned long mmap_min_addr;
|
extern unsigned long mmap_min_addr;
|
||||||
#endif
|
|
||||||
|
|
||||||
/* ??? See if we can avoid exposing so much of the loader internals. */
|
/* ??? See if we can avoid exposing so much of the loader internals. */
|
||||||
/*
|
/*
|
||||||
|
|
Loading…
Reference in a new issue