AioContext: take bottom halves into account when computing aio_poll timeout

Right now, QEMU invokes aio_bh_poll before the "poll" phase
of aio_poll.  It is simpler to do it afterwards and skip the
"poll" phase altogether when the OS-dependent parts of AioContext
are invoked from GSource.  This way, AioContext behaves more
similarly when used as a GSource vs. when used as stand-alone.

As a start, take bottom halves into account when computing the
poll timeout.  If a bottom half is ready, do a non-blocking
poll.  As a side effect, this makes idle bottom halves work
with aio_poll; an improvement, but not really an important
one since they are deprecated.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
Paolo Bonzini 2014-07-09 11:53:01 +02:00 committed by Stefan Hajnoczi
parent 3cbbe9fd1f
commit 845ca10dd0
4 changed files with 29 additions and 17 deletions

View file

@ -249,7 +249,7 @@ bool aio_poll(AioContext *ctx, bool blocking)
/* wait until next event */ /* wait until next event */
ret = qemu_poll_ns((GPollFD *)ctx->pollfds->data, ret = qemu_poll_ns((GPollFD *)ctx->pollfds->data,
ctx->pollfds->len, ctx->pollfds->len,
blocking ? timerlistgroup_deadline_ns(&ctx->tlg) : 0); blocking ? aio_compute_timeout(ctx) : 0);
/* if we have any readable fds, dispatch event */ /* if we have any readable fds, dispatch event */
if (ret > 0) { if (ret > 0) {

View file

@ -165,8 +165,8 @@ bool aio_poll(AioContext *ctx, bool blocking)
while (count > 0) { while (count > 0) {
int ret; int ret;
timeout = blocking ? timeout = blocking
qemu_timeout_ns_to_ms(timerlistgroup_deadline_ns(&ctx->tlg)) : 0; ? qemu_timeout_ns_to_ms(aio_compute_timeout(ctx)) : 0;
ret = WaitForMultipleObjects(count, events, FALSE, timeout); ret = WaitForMultipleObjects(count, events, FALSE, timeout);
/* if we have any signaled events, dispatch event */ /* if we have any signaled events, dispatch event */

32
async.c
View file

@ -152,39 +152,43 @@ void qemu_bh_delete(QEMUBH *bh)
bh->deleted = 1; bh->deleted = 1;
} }
static gboolean int64_t
aio_ctx_prepare(GSource *source, gint *timeout) aio_compute_timeout(AioContext *ctx)
{ {
AioContext *ctx = (AioContext *) source; int64_t deadline;
int timeout = -1;
QEMUBH *bh; QEMUBH *bh;
int deadline;
/* We assume there is no timeout already supplied */
*timeout = -1;
for (bh = ctx->first_bh; bh; bh = bh->next) { for (bh = ctx->first_bh; bh; bh = bh->next) {
if (!bh->deleted && bh->scheduled) { if (!bh->deleted && bh->scheduled) {
if (bh->idle) { if (bh->idle) {
/* idle bottom halves will be polled at least /* idle bottom halves will be polled at least
* every 10ms */ * every 10ms */
*timeout = 10; timeout = 10000000;
} else { } else {
/* non-idle bottom halves will be executed /* non-idle bottom halves will be executed
* immediately */ * immediately */
*timeout = 0; return 0;
return true;
} }
} }
} }
deadline = qemu_timeout_ns_to_ms(timerlistgroup_deadline_ns(&ctx->tlg)); deadline = timerlistgroup_deadline_ns(&ctx->tlg);
if (deadline == 0) { if (deadline == 0) {
*timeout = 0; return 0;
return true;
} else { } else {
*timeout = qemu_soonest_timeout(*timeout, deadline); return qemu_soonest_timeout(timeout, deadline);
} }
}
return false; static gboolean
aio_ctx_prepare(GSource *source, gint *timeout)
{
AioContext *ctx = (AioContext *) source;
/* We assume there is no timeout already supplied */
*timeout = qemu_timeout_ns_to_ms(aio_compute_timeout(ctx));
return *timeout == 0;
} }
static gboolean static gboolean

View file

@ -303,4 +303,12 @@ static inline void aio_timer_init(AioContext *ctx,
timer_init(ts, ctx->tlg.tl[type], scale, cb, opaque); timer_init(ts, ctx->tlg.tl[type], scale, cb, opaque);
} }
/**
* aio_compute_timeout:
* @ctx: the aio context
*
* Compute the timeout that a blocking aio_poll should use.
*/
int64_t aio_compute_timeout(AioContext *ctx);
#endif #endif