tcg: Fold CPUTLBWindow into CPUTLBDesc

Both structures are allocated once per mmu_idx.
There is no reason for them to be separate.

Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Richard Henderson 2019-03-22 08:36:40 -07:00
parent 19735c837a
commit 79e4208506
2 changed files with 16 additions and 25 deletions

View file

@ -79,11 +79,11 @@ static inline size_t sizeof_tlb(CPUArchState *env, uintptr_t mmu_idx)
return env->tlb_mask[mmu_idx] + (1 << CPU_TLB_ENTRY_BITS);
}
static void tlb_window_reset(CPUTLBWindow *window, int64_t ns,
static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
size_t max_entries)
{
window->begin_ns = ns;
window->max_entries = max_entries;
desc->window_begin_ns = ns;
desc->window_max_entries = max_entries;
}
static void tlb_dyn_init(CPUArchState *env)
@ -94,7 +94,7 @@ static void tlb_dyn_init(CPUArchState *env)
CPUTLBDesc *desc = &env->tlb_d[i];
size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS;
tlb_window_reset(&desc->window, get_clock_realtime(), 0);
tlb_window_reset(desc, get_clock_realtime(), 0);
desc->n_used_entries = 0;
env->tlb_mask[i] = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
env->tlb_table[i] = g_new(CPUTLBEntry, n_entries);
@ -151,18 +151,18 @@ static void tlb_mmu_resize_locked(CPUArchState *env, int mmu_idx)
int64_t now = get_clock_realtime();
int64_t window_len_ms = 100;
int64_t window_len_ns = window_len_ms * 1000 * 1000;
bool window_expired = now > desc->window.begin_ns + window_len_ns;
bool window_expired = now > desc->window_begin_ns + window_len_ns;
if (desc->n_used_entries > desc->window.max_entries) {
desc->window.max_entries = desc->n_used_entries;
if (desc->n_used_entries > desc->window_max_entries) {
desc->window_max_entries = desc->n_used_entries;
}
rate = desc->window.max_entries * 100 / old_size;
rate = desc->window_max_entries * 100 / old_size;
if (rate > 70) {
new_size = MIN(old_size << 1, 1 << CPU_TLB_DYN_MAX_BITS);
} else if (rate < 30 && window_expired) {
size_t ceil = pow2ceil(desc->window.max_entries);
size_t expected_rate = desc->window.max_entries * 100 / ceil;
size_t ceil = pow2ceil(desc->window_max_entries);
size_t expected_rate = desc->window_max_entries * 100 / ceil;
/*
* Avoid undersizing when the max number of entries seen is just below
@ -182,7 +182,7 @@ static void tlb_mmu_resize_locked(CPUArchState *env, int mmu_idx)
if (new_size == old_size) {
if (window_expired) {
tlb_window_reset(&desc->window, now, desc->n_used_entries);
tlb_window_reset(desc, now, desc->n_used_entries);
}
return;
}
@ -190,7 +190,7 @@ static void tlb_mmu_resize_locked(CPUArchState *env, int mmu_idx)
g_free(env->tlb_table[mmu_idx]);
g_free(env->iotlb[mmu_idx]);
tlb_window_reset(&desc->window, now, 0);
tlb_window_reset(desc, now, 0);
/* desc->n_used_entries is cleared by the caller */
env->tlb_mask[mmu_idx] = (new_size - 1) << CPU_TLB_ENTRY_BITS;
env->tlb_table[mmu_idx] = g_try_new(CPUTLBEntry, new_size);

View file

@ -127,18 +127,6 @@ typedef struct CPUIOTLBEntry {
MemTxAttrs attrs;
} CPUIOTLBEntry;
/**
* struct CPUTLBWindow
* @begin_ns: host time (in ns) at the beginning of the time window
* @max_entries: maximum number of entries observed in the window
*
* See also: tlb_mmu_resize_locked()
*/
typedef struct CPUTLBWindow {
int64_t begin_ns;
size_t max_entries;
} CPUTLBWindow;
typedef struct CPUTLBDesc {
/*
* Describe a region covering all of the large pages allocated
@ -148,9 +136,12 @@ typedef struct CPUTLBDesc {
*/
target_ulong large_page_addr;
target_ulong large_page_mask;
/* host time (in ns) at the beginning of the time window */
int64_t window_begin_ns;
/* maximum number of entries observed in the window */
size_t window_max_entries;
/* The next index to use in the tlb victim table. */
size_t vindex;
CPUTLBWindow window;
size_t n_used_entries;
} CPUTLBDesc;