Merge branch 'icount-update' into HEAD

Merge the original development branch due to breakage caused by the
MTTCG merge.

Conflicts:
	cpu-exec.c
	translate-common.c

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
stable-2.9
Paolo Bonzini 2017-03-03 16:39:18 +01:00
commit 30f3dda24b
7 changed files with 79 additions and 100 deletions

View File

@ -186,12 +186,6 @@ static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, TranslationBlock *itb)
cc->set_pc(cpu, last_tb->pc);
}
}
if (tb_exit == TB_EXIT_REQUESTED) {
/* We were asked to stop executing TBs (probably a pending
* interrupt. We've now stopped, so clear the flag.
*/
atomic_set(&cpu->tcg_exit_req, 0);
}
return ret;
}
@ -560,8 +554,9 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
qemu_mutex_unlock_iothread();
}
if (unlikely(atomic_read(&cpu->exit_request) || replay_has_interrupt())) {
/* Finally, check if we need to exit to the main loop. */
if (unlikely(atomic_read(&cpu->exit_request)
|| (use_icount && cpu->icount_decr.u16.low + cpu->icount_extra == 0))) {
atomic_set(&cpu->exit_request, 0);
cpu->exception_index = EXCP_INTERRUPT;
return true;
@ -571,62 +566,54 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
}
static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
TranslationBlock **last_tb, int *tb_exit,
SyncClocks *sc)
TranslationBlock **last_tb, int *tb_exit)
{
uintptr_t ret;
if (unlikely(atomic_read(&cpu->exit_request))) {
return;
}
int32_t insns_left;
trace_exec_tb(tb, tb->pc);
ret = cpu_tb_exec(cpu, tb);
tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK);
*tb_exit = ret & TB_EXIT_MASK;
switch (*tb_exit) {
case TB_EXIT_REQUESTED:
if (*tb_exit != TB_EXIT_REQUESTED) {
*last_tb = tb;
return;
}
*last_tb = NULL;
insns_left = atomic_read(&cpu->icount_decr.u32);
atomic_set(&cpu->icount_decr.u16.high, 0);
if (insns_left < 0) {
/* Something asked us to stop executing chained TBs; just
* continue round the main loop. Whatever requested the exit
* will also have set something else (eg interrupt_request)
* which we will handle next time around the loop. But we
* need to ensure the tcg_exit_req read in generated code
* comes before the next read of cpu->exit_request or
* cpu->interrupt_request.
* will also have set something else (eg exit_request or
* interrupt_request) which we will handle next time around
* the loop. But we need to ensure the zeroing of icount_decr
* comes before the next read of cpu->exit_request
* or cpu->interrupt_request.
*/
smp_mb();
*last_tb = NULL;
break;
case TB_EXIT_ICOUNT_EXPIRED:
{
/* Instruction counter expired. */
#ifdef CONFIG_USER_ONLY
abort();
#else
int insns_left = cpu->icount_decr.u32;
*last_tb = NULL;
if (cpu->icount_extra && insns_left >= 0) {
/* Refill decrementer and continue execution. */
cpu->icount_extra += insns_left;
insns_left = MIN(0xffff, cpu->icount_extra);
cpu->icount_extra -= insns_left;
cpu->icount_decr.u16.low = insns_left;
} else {
if (insns_left > 0) {
/* Execute remaining instructions. */
cpu_exec_nocache(cpu, insns_left, tb, false);
align_clocks(sc, cpu);
}
cpu->exception_index = EXCP_INTERRUPT;
cpu_loop_exit(cpu);
return;
}
/* Instruction counter expired. */
assert(use_icount);
#ifndef CONFIG_USER_ONLY
if (cpu->icount_extra) {
/* Refill decrementer and continue execution. */
cpu->icount_extra += insns_left;
insns_left = MIN(0xffff, cpu->icount_extra);
cpu->icount_extra -= insns_left;
cpu->icount_decr.u16.low = insns_left;
} else {
/* Execute any remaining instructions, then let the main loop
* handle the next event.
*/
if (insns_left > 0) {
cpu_exec_nocache(cpu, insns_left, tb, false);
}
break;
}
#endif
}
default:
*last_tb = tb;
break;
}
}
/* main execution loop */
@ -635,7 +622,7 @@ int cpu_exec(CPUState *cpu)
{
CPUClass *cc = CPU_GET_CLASS(cpu);
int ret;
SyncClocks sc;
SyncClocks sc = { 0 };
/* replay_interrupt may need current_cpu */
current_cpu = cpu;
@ -683,7 +670,7 @@ int cpu_exec(CPUState *cpu)
while (!cpu_handle_interrupt(cpu, &last_tb)) {
TranslationBlock *tb = tb_find(cpu, last_tb, tb_exit);
cpu_loop_exec_tb(cpu, tb, &last_tb, &tb_exit, &sc);
cpu_loop_exec_tb(cpu, tb, &last_tb, &tb_exit);
/* Try to align the host and virtual clocks
if the guest is in advance */
align_clocks(&sc, cpu);

View File

@ -6,58 +6,55 @@
/* Helpers for instruction counting code generation. */
static int icount_start_insn_idx;
static TCGLabel *icount_label;
static TCGLabel *exitreq_label;
static inline void gen_tb_start(TranslationBlock *tb)
{
TCGv_i32 count, flag, imm;
TCGv_i32 count, imm;
exitreq_label = gen_new_label();
flag = tcg_temp_new_i32();
tcg_gen_ld_i32(flag, cpu_env,
offsetof(CPUState, tcg_exit_req) - ENV_OFFSET);
tcg_gen_brcondi_i32(TCG_COND_NE, flag, 0, exitreq_label);
tcg_temp_free_i32(flag);
if (!(tb->cflags & CF_USE_ICOUNT)) {
return;
if (tb->cflags & CF_USE_ICOUNT) {
count = tcg_temp_local_new_i32();
} else {
count = tcg_temp_new_i32();
}
icount_label = gen_new_label();
count = tcg_temp_local_new_i32();
tcg_gen_ld_i32(count, cpu_env,
-ENV_OFFSET + offsetof(CPUState, icount_decr.u32));
imm = tcg_temp_new_i32();
/* We emit a movi with a dummy immediate argument. Keep the insn index
* of the movi so that we later (when we know the actual insn count)
* can update the immediate argument with the actual insn count. */
icount_start_insn_idx = tcg_op_buf_count();
tcg_gen_movi_i32(imm, 0xdeadbeef);
if (tb->cflags & CF_USE_ICOUNT) {
imm = tcg_temp_new_i32();
/* We emit a movi with a dummy immediate argument. Keep the insn index
* of the movi so that we later (when we know the actual insn count)
* can update the immediate argument with the actual insn count. */
icount_start_insn_idx = tcg_op_buf_count();
tcg_gen_movi_i32(imm, 0xdeadbeef);
tcg_gen_sub_i32(count, count, imm);
tcg_temp_free_i32(imm);
tcg_gen_sub_i32(count, count, imm);
tcg_temp_free_i32(imm);
}
tcg_gen_brcondi_i32(TCG_COND_LT, count, 0, exitreq_label);
if (tb->cflags & CF_USE_ICOUNT) {
tcg_gen_st16_i32(count, cpu_env,
-ENV_OFFSET + offsetof(CPUState, icount_decr.u16.low));
}
tcg_gen_brcondi_i32(TCG_COND_LT, count, 0, icount_label);
tcg_gen_st16_i32(count, cpu_env,
-ENV_OFFSET + offsetof(CPUState, icount_decr.u16.low));
tcg_temp_free_i32(count);
}
static void gen_tb_end(TranslationBlock *tb, int num_insns)
{
gen_set_label(exitreq_label);
tcg_gen_exit_tb((uintptr_t)tb + TB_EXIT_REQUESTED);
if (tb->cflags & CF_USE_ICOUNT) {
/* Update the num_insn immediate parameter now that we know
* the actual insn count. */
tcg_set_insn_param(icount_start_insn_idx, 1, num_insns);
gen_set_label(icount_label);
tcg_gen_exit_tb((uintptr_t)tb + TB_EXIT_ICOUNT_EXPIRED);
}
gen_set_label(exitreq_label);
tcg_gen_exit_tb((uintptr_t)tb + TB_EXIT_REQUESTED);
/* Terminate the linked list. */
tcg_ctx.gen_op_buf[tcg_ctx.gen_op_buf[0].prev].next = 0;
}

View File

@ -275,11 +275,11 @@ struct qemu_work_item;
* @stopped: Indicates the CPU has been artificially stopped.
* @unplug: Indicates a pending CPU unplug request.
* @crash_occurred: Indicates the OS reported a crash (panic) for this CPU
* @tcg_exit_req: Set to force TCG to stop executing linked TBs for this
* CPU and return to its top level loop.
* @singlestep_enabled: Flags for single-stepping.
* @icount_extra: Instructions until next timer event.
* @icount_decr: Number of cycles left, with interrupt flag in high bit.
* @icount_decr: Low 16 bits: number of cycles left, only used in icount mode.
* High 16 bits: Set to -1 to force TCG to stop executing linked TBs for this
* CPU and return to its top level loop (even in non-icount mode).
* This allows a single read-compare-cbranch-write sequence to test
* for both decrementer underflow and exceptions.
* @can_do_io: Nonzero if memory-mapped IO is safe. Deterministic execution
@ -382,10 +382,6 @@ struct CPUState {
/* TODO Move common fields from CPUArchState here. */
int cpu_index; /* used by alpha TCG */
uint32_t halted; /* used by alpha, cris, ppc TCG */
union {
uint32_t u32;
icount_decr_u16 u16;
} icount_decr;
uint32_t can_do_io;
int32_t exception_index; /* used by m68k TCG */
@ -398,7 +394,10 @@ struct CPUState {
offset from AREG0. Leave this field at the end so as to make the
(absolute value) offset as small as possible. This reduces code
size, especially for hosts without large memory offsets. */
uint32_t tcg_exit_req;
union {
uint32_t u32;
icount_decr_u16 u16;
} icount_decr;
bool hax_vcpu_dirty;
struct hax_vcpu_state *hax_vcpu;

View File

@ -133,7 +133,7 @@ void cpu_exit(CPUState *cpu)
atomic_set(&cpu->exit_request, 1);
/* Ensure cpu_exec will see the exit request after TCG has exited. */
smp_wmb();
atomic_set(&cpu->tcg_exit_req, 1);
atomic_set(&cpu->icount_decr.u16.high, -1);
}
int cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu,

View File

@ -1101,7 +1101,6 @@ static inline unsigned get_mmuidx(TCGMemOpIdx oi)
#define TB_EXIT_MASK 3
#define TB_EXIT_IDX0 0
#define TB_EXIT_IDX1 1
#define TB_EXIT_ICOUNT_EXPIRED 2
#define TB_EXIT_REQUESTED 3
#ifdef HAVE_TCG_QEMU_TB_EXEC

View File

@ -1930,7 +1930,7 @@ void cpu_interrupt(CPUState *cpu, int mask)
{
g_assert(qemu_mutex_iothread_locked());
cpu->interrupt_request |= mask;
cpu->tcg_exit_req = 1;
cpu->icount_decr.u16.high = -1;
}
/*

View File

@ -43,14 +43,11 @@ static void tcg_handle_interrupt(CPUState *cpu, int mask)
if (!qemu_cpu_is_self(cpu)) {
qemu_cpu_kick(cpu);
} else {
if (use_icount) {
cpu->icount_decr.u16.high = 0xffff;
if (!cpu->can_do_io
&& (mask & ~old_mask) != 0) {
cpu_abort(cpu, "Raised interrupt while not in I/O function");
}
} else {
cpu->tcg_exit_req = 1;
cpu->icount_decr.u16.high = -1;
if (use_icount &&
!cpu->can_do_io
&& (mask & ~old_mask) != 0) {
cpu_abort(cpu, "Raised interrupt while not in I/O function");
}
}
}