cpu: Move running field to CPUState

Pass CPUState to cpu_exec_{start,end}() functions.

Signed-off-by: Andreas Färber <afaerber@suse.de>
This commit is contained in:
Andreas Färber 2012-12-17 07:34:52 +01:00
parent 0d34282fdd
commit 0315c31cda
3 changed files with 24 additions and 16 deletions

View file

@ -191,7 +191,6 @@ typedef struct CPUWatchpoint {
int exception_index; \ int exception_index; \
\ \
CPUArchState *next_cpu; /* next CPU sharing TB cache */ \ CPUArchState *next_cpu; /* next CPU sharing TB cache */ \
int running; /* Nonzero if cpu is currently running(usermode). */ \
/* user data */ \ /* user data */ \
void *opaque; \ void *opaque; \
\ \

View file

@ -66,6 +66,7 @@ struct kvm_run;
* @nr_threads: Number of threads within this CPU. * @nr_threads: Number of threads within this CPU.
* @numa_node: NUMA node this CPU is belonging to. * @numa_node: NUMA node this CPU is belonging to.
* @host_tid: Host thread ID. * @host_tid: Host thread ID.
* @running: #true if CPU is currently running (usermode).
* @created: Indicates whether the CPU thread has been successfully created. * @created: Indicates whether the CPU thread has been successfully created.
* @stop: Indicates a pending stop request. * @stop: Indicates a pending stop request.
* @stopped: Indicates the CPU has been artificially stopped. * @stopped: Indicates the CPU has been artificially stopped.
@ -88,6 +89,7 @@ struct CPUState {
#endif #endif
int thread_id; int thread_id;
uint32_t host_tid; uint32_t host_tid;
bool running;
struct QemuCond *halt_cond; struct QemuCond *halt_cond;
struct qemu_work_item *queued_work_first, *queued_work_last; struct qemu_work_item *queued_work_first, *queued_work_last;
bool thread_kicked; bool thread_kicked;

View file

@ -151,13 +151,16 @@ static inline void exclusive_idle(void)
static inline void start_exclusive(void) static inline void start_exclusive(void)
{ {
CPUArchState *other; CPUArchState *other;
CPUState *other_cpu;
pthread_mutex_lock(&exclusive_lock); pthread_mutex_lock(&exclusive_lock);
exclusive_idle(); exclusive_idle();
pending_cpus = 1; pending_cpus = 1;
/* Make all other cpus stop executing. */ /* Make all other cpus stop executing. */
for (other = first_cpu; other; other = other->next_cpu) { for (other = first_cpu; other; other = other->next_cpu) {
if (other->running) { other_cpu = ENV_GET_CPU(other);
if (other_cpu->running) {
pending_cpus++; pending_cpus++;
cpu_exit(other); cpu_exit(other);
} }
@ -176,19 +179,19 @@ static inline void end_exclusive(void)
} }
/* Wait for exclusive ops to finish, and begin cpu execution. */ /* Wait for exclusive ops to finish, and begin cpu execution. */
static inline void cpu_exec_start(CPUArchState *env) static inline void cpu_exec_start(CPUState *cpu)
{ {
pthread_mutex_lock(&exclusive_lock); pthread_mutex_lock(&exclusive_lock);
exclusive_idle(); exclusive_idle();
env->running = 1; cpu->running = true;
pthread_mutex_unlock(&exclusive_lock); pthread_mutex_unlock(&exclusive_lock);
} }
/* Mark cpu as not executing, and release pending exclusive ops. */ /* Mark cpu as not executing, and release pending exclusive ops. */
static inline void cpu_exec_end(CPUArchState *env) static inline void cpu_exec_end(CPUState *cpu)
{ {
pthread_mutex_lock(&exclusive_lock); pthread_mutex_lock(&exclusive_lock);
env->running = 0; cpu->running = false;
if (pending_cpus > 1) { if (pending_cpus > 1) {
pending_cpus--; pending_cpus--;
if (pending_cpus == 1) { if (pending_cpus == 1) {
@ -210,11 +213,11 @@ void cpu_list_unlock(void)
} }
#else /* if !CONFIG_USE_NPTL */ #else /* if !CONFIG_USE_NPTL */
/* These are no-ops because we are not threadsafe. */ /* These are no-ops because we are not threadsafe. */
static inline void cpu_exec_start(CPUArchState *env) static inline void cpu_exec_start(CPUState *cpu)
{ {
} }
static inline void cpu_exec_end(CPUArchState *env) static inline void cpu_exec_end(CPUState *cpu)
{ {
} }
@ -697,15 +700,16 @@ done:
void cpu_loop(CPUARMState *env) void cpu_loop(CPUARMState *env)
{ {
CPUState *cs = CPU(arm_env_get_cpu(env));
int trapnr; int trapnr;
unsigned int n, insn; unsigned int n, insn;
target_siginfo_t info; target_siginfo_t info;
uint32_t addr; uint32_t addr;
for(;;) { for(;;) {
cpu_exec_start(env); cpu_exec_start(cs);
trapnr = cpu_arm_exec(env); trapnr = cpu_arm_exec(env);
cpu_exec_end(env); cpu_exec_end(cs);
switch(trapnr) { switch(trapnr) {
case EXCP_UDEF: case EXCP_UDEF:
{ {
@ -912,14 +916,15 @@ void cpu_loop(CPUARMState *env)
void cpu_loop(CPUUniCore32State *env) void cpu_loop(CPUUniCore32State *env)
{ {
CPUState *cs = CPU(uc32_env_get_cpu(env));
int trapnr; int trapnr;
unsigned int n, insn; unsigned int n, insn;
target_siginfo_t info; target_siginfo_t info;
for (;;) { for (;;) {
cpu_exec_start(env); cpu_exec_start(cs);
trapnr = uc32_cpu_exec(env); trapnr = uc32_cpu_exec(env);
cpu_exec_end(env); cpu_exec_end(cs);
switch (trapnr) { switch (trapnr) {
case UC32_EXCP_PRIV: case UC32_EXCP_PRIV:
{ {
@ -1367,14 +1372,15 @@ static int do_store_exclusive(CPUPPCState *env)
void cpu_loop(CPUPPCState *env) void cpu_loop(CPUPPCState *env)
{ {
CPUState *cs = CPU(ppc_env_get_cpu(env));
target_siginfo_t info; target_siginfo_t info;
int trapnr; int trapnr;
target_ulong ret; target_ulong ret;
for(;;) { for(;;) {
cpu_exec_start(env); cpu_exec_start(cs);
trapnr = cpu_ppc_exec(env); trapnr = cpu_ppc_exec(env);
cpu_exec_end(env); cpu_exec_end(cs);
switch(trapnr) { switch(trapnr) {
case POWERPC_EXCP_NONE: case POWERPC_EXCP_NONE:
/* Just go on */ /* Just go on */
@ -2184,14 +2190,15 @@ static int do_store_exclusive(CPUMIPSState *env)
void cpu_loop(CPUMIPSState *env) void cpu_loop(CPUMIPSState *env)
{ {
CPUState *cs = CPU(mips_env_get_cpu(env));
target_siginfo_t info; target_siginfo_t info;
int trapnr, ret; int trapnr, ret;
unsigned int syscall_num; unsigned int syscall_num;
for(;;) { for(;;) {
cpu_exec_start(env); cpu_exec_start(cs);
trapnr = cpu_mips_exec(env); trapnr = cpu_mips_exec(env);
cpu_exec_end(env); cpu_exec_end(cs);
switch(trapnr) { switch(trapnr) {
case EXCP_SYSCALL: case EXCP_SYSCALL:
syscall_num = env->active_tc.gpr[2] - 4000; syscall_num = env->active_tc.gpr[2] - 4000;