AioContext: export and use aio_dispatch

So far, aio_poll's scheme was dispatch/poll/dispatch, where
the first dispatch phase was used only in the GSource case in
order to avoid a blocking poll.  Earlier patches changed it to
dispatch/prepare/poll/dispatch, where prepare is aio_compute_timeout.

By making aio_dispatch public, we can remove the first dispatch
phase altogether, so that both aio_poll and the GSource use the same
prepare/poll/dispatch scheme.

This patch breaks the invariant that aio_poll(..., true) will not block
the first time it returns false.  This used to be fundamental for
qemu_aio_flush's implementation as "while (qemu_aio_wait()) {}" but
no code in QEMU relies on this invariant anymore.  The return value
of aio_poll() is now comparable with that of g_main_context_iteration.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
Paolo Bonzini 2014-07-09 11:53:05 +02:00 committed by Stefan Hajnoczi
parent 3672fa5083
commit e4c7e2d12d
4 changed files with 24 additions and 70 deletions

View file

@ -119,11 +119,20 @@ bool aio_pending(AioContext *ctx)
return false;
}
static bool aio_dispatch(AioContext *ctx)
bool aio_dispatch(AioContext *ctx)
{
AioHandler *node;
bool progress = false;
/*
* If there are callbacks left that have been queued, we need to call them.
* Do not call select in this case, because it is possible that the caller
* does not need a complete flush (as is the case for aio_poll loops).
*/
if (aio_bh_poll(ctx)) {
progress = true;
}
/*
* We have to walk very carefully in case aio_set_fd_handler is
* called while we're walking.
@ -184,22 +193,9 @@ bool aio_poll(AioContext *ctx, bool blocking)
/* aio_notify can avoid the expensive event_notifier_set if
* everything (file descriptors, bottom halves, timers) will
* be re-evaluated before the next blocking poll(). This happens
* in two cases:
*
* 1) when aio_poll is called with blocking == false
*
* 2) when we are called after poll(). If we are called before
* poll(), bottom halves will not be re-evaluated and we need
* aio_notify() if blocking == true.
*
* The first aio_dispatch() only does something when AioContext is
* running as a GSource, and in that case aio_poll is used only
* with blocking == false, so this optimization is already quite
* effective. However, the code is ugly and should be restructured
* to have a single aio_dispatch() call. To do this, we need to
* reorganize aio_poll into a prepare/poll/dispatch model like
* glib's.
* be re-evaluated before the next blocking poll(). This is
* already true when aio_poll is called with blocking == false;
* if blocking == true, it is only true after poll() returns.
*
* If we're in a nested event loop, ctx->dispatching might be true.
* In that case we can restore it just before returning, but we
@ -207,26 +203,6 @@ bool aio_poll(AioContext *ctx, bool blocking)
*/
aio_set_dispatching(ctx, !blocking);
/*
* If there are callbacks left that have been queued, we need to call them.
* Do not call select in this case, because it is possible that the caller
* does not need a complete flush (as is the case for aio_poll loops).
*/
if (aio_bh_poll(ctx)) {
blocking = false;
progress = true;
}
/* Re-evaluate condition (1) above. */
aio_set_dispatching(ctx, !blocking);
if (aio_dispatch(ctx)) {
progress = true;
}
if (progress && !blocking) {
goto out;
}
ctx->walking_handlers++;
g_array_set_size(ctx->pollfds, 0);
@ -264,15 +240,10 @@ bool aio_poll(AioContext *ctx, bool blocking)
/* Run dispatch even if there were no readable fds to run timers */
aio_set_dispatching(ctx, true);
if (aio_bh_poll(ctx)) {
progress = true;
}
if (aio_dispatch(ctx)) {
progress = true;
}
out:
aio_set_dispatching(ctx, was_dispatching);
return progress;
}

View file

@ -130,11 +130,12 @@ static bool aio_dispatch_handlers(AioContext *ctx, HANDLE event)
return progress;
}
static bool aio_dispatch(AioContext *ctx)
bool aio_dispatch(AioContext *ctx)
{
bool progress;
progress = aio_dispatch_handlers(ctx, INVALID_HANDLE_VALUE);
progress = aio_bh_poll(ctx);
progress |= aio_dispatch_handlers(ctx, INVALID_HANDLE_VALUE);
progress |= timerlistgroup_run_timers(&ctx->tlg);
return progress;
}
@ -149,23 +150,6 @@ bool aio_poll(AioContext *ctx, bool blocking)
progress = false;
/*
* If there are callbacks left that have been queued, we need to call then.
* Do not call select in this case, because it is possible that the caller
* does not need a complete flush (as is the case for aio_poll loops).
*/
if (aio_bh_poll(ctx)) {
blocking = false;
progress = true;
}
/* Dispatch any pending callbacks from the GSource. */
progress |= aio_dispatch(ctx);
if (progress && !blocking) {
return true;
}
ctx->walking_handlers++;
/* fill fd sets */
@ -205,14 +189,7 @@ bool aio_poll(AioContext *ctx, bool blocking)
events[ret - WAIT_OBJECT_0] = events[--count];
}
if (blocking) {
/* Run the timers a second time. We do this because otherwise aio_wait
* will not note progress - and will stop a drain early - if we have
* a timer that was not ready to run entering g_poll but is ready
* after g_poll. This will only do anything if a timer has expired.
*/
progress |= timerlistgroup_run_timers(&ctx->tlg);
}
progress |= timerlistgroup_run_timers(&ctx->tlg);
return progress;
}

View file

@ -213,7 +213,7 @@ aio_ctx_dispatch(GSource *source,
AioContext *ctx = (AioContext *) source;
assert(callback == NULL);
aio_poll(ctx, false);
aio_dispatch(ctx);
return true;
}

View file

@ -211,6 +211,12 @@ void qemu_bh_delete(QEMUBH *bh);
*/
bool aio_pending(AioContext *ctx);
/* Dispatch any pending callbacks from the GSource attached to the AioContext.
*
* This is used internally in the implementation of the GSource.
*/
bool aio_dispatch(AioContext *ctx);
/* Progress in completing AIO work to occur. This can issue new pending
* aio as a result of executing I/O completion or bh callbacks.
*