Introduce a variant of xcb_poll_for_event for examining event queue.
In some circumstances using xcb_poll_for_event is suboptimal because it checks the connection for new events. This may lead to a lot of failed nonblocking read system calls. Signed-off-by: Rami Ylimäki <rami.ylimaki@vincit.fi> Signed-off-by: Jamey Sharp <jamey@minilop.net>
This commit is contained in:
parent
b64cd0df88
commit
527df3c84b
16
src/xcb.h
16
src/xcb.h
|
@ -271,6 +271,22 @@ xcb_generic_event_t *xcb_wait_for_event(xcb_connection_t *c);
|
||||||
*/
|
*/
|
||||||
xcb_generic_event_t *xcb_poll_for_event(xcb_connection_t *c);
|
xcb_generic_event_t *xcb_poll_for_event(xcb_connection_t *c);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Returns the next event without reading from the connection.
|
||||||
|
* @param c: The connection to the X server.
|
||||||
|
* @return The next already queued event from the server.
|
||||||
|
*
|
||||||
|
* This is a version of xcb_poll_for_event that only examines the
|
||||||
|
* event queue for new events. The function doesn't try to read new
|
||||||
|
* events from the connection if no queued events are found.
|
||||||
|
*
|
||||||
|
* This function is useful for callers that know in advance that all
|
||||||
|
* interesting events have already been read from the connection. For
|
||||||
|
* example, callers might use xcb_wait_for_reply and be interested
|
||||||
|
* only of events that preceded a specific reply.
|
||||||
|
*/
|
||||||
|
xcb_generic_event_t *xcb_poll_for_queued_event(xcb_connection_t *c);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Returns the next event or error that precedes the given request.
|
* @brief Returns the next event or error that precedes the given request.
|
||||||
* @param c: The connection to the X server.
|
* @param c: The connection to the X server.
|
||||||
|
|
14
src/xcb_in.c
14
src/xcb_in.c
|
@ -535,7 +535,7 @@ xcb_generic_event_t *xcb_wait_for_event(xcb_connection_t *c)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
xcb_generic_event_t *xcb_poll_for_event(xcb_connection_t *c)
|
static xcb_generic_event_t *poll_for_next_event(xcb_connection_t *c, int queued)
|
||||||
{
|
{
|
||||||
xcb_generic_event_t *ret = 0;
|
xcb_generic_event_t *ret = 0;
|
||||||
if(!c->has_error)
|
if(!c->has_error)
|
||||||
|
@ -543,13 +543,23 @@ xcb_generic_event_t *xcb_poll_for_event(xcb_connection_t *c)
|
||||||
pthread_mutex_lock(&c->iolock);
|
pthread_mutex_lock(&c->iolock);
|
||||||
/* FIXME: follow X meets Z architecture changes. */
|
/* FIXME: follow X meets Z architecture changes. */
|
||||||
ret = get_event(c);
|
ret = get_event(c);
|
||||||
if(!ret && _xcb_in_read(c)) /* _xcb_in_read shuts down the connection on error */
|
if(!ret && !queued && _xcb_in_read(c)) /* _xcb_in_read shuts down the connection on error */
|
||||||
ret = get_event(c);
|
ret = get_event(c);
|
||||||
pthread_mutex_unlock(&c->iolock);
|
pthread_mutex_unlock(&c->iolock);
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
xcb_generic_event_t *xcb_poll_for_event(xcb_connection_t *c)
|
||||||
|
{
|
||||||
|
return poll_for_next_event(c, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
xcb_generic_event_t *xcb_poll_for_queued_event(xcb_connection_t *c)
|
||||||
|
{
|
||||||
|
return poll_for_next_event(c, 1);
|
||||||
|
}
|
||||||
|
|
||||||
static xcb_generic_event_t *get_event_until(xcb_connection_t *c, uint64_t request)
|
static xcb_generic_event_t *get_event_until(xcb_connection_t *c, uint64_t request)
|
||||||
{
|
{
|
||||||
if(c->in.events && XCB_SEQUENCE_COMPARE(c->in.events->sequence, <=, request))
|
if(c->in.events && XCB_SEQUENCE_COMPARE(c->in.events->sequence, <=, request))
|
||||||
|
|
Loading…
Reference in New Issue