Segfault when saving draft: Closes #7366

Imap.Folder is now more paranoid about checking if the ClientSession
is available before performing an operation.  The ReplayQueue does
a better job of flushing pending commands when closing the Folder.
This commit is contained in:
Jim Nelson 2013-09-05 16:37:16 -07:00
parent 9a13769907
commit 3bb0dca6f5
4 changed files with 103 additions and 47 deletions

View file

@ -548,6 +548,21 @@ private class Geary.ImapEngine.GenericFolder : Geary.AbstractFolder, Geary.Folde
cancel_remote_open_timer();
// Close the replay queues; if a "natural" close, flush pending operations so everything
// gets a chance to run; if forced close, drop everything outstanding
try {
if (replay_queue != null) {
debug("Closing replay queue for %s... (flush_pending=%s)", to_string(),
(!remote_reason.is_error()).to_string());
yield replay_queue.close_async(!remote_reason.is_error());
debug("Closed replay queue for %s", to_string());
}
} catch (Error replay_queue_err) {
debug("Error closing %s replay queue: %s", to_string(), replay_queue_err.message);
}
replay_queue = null;
// Notify all callers waiting for the remote folder that it's not coming available
Imap.Folder? closing_remote_folder = remote_folder;
try {
@ -556,13 +571,15 @@ private class Geary.ImapEngine.GenericFolder : Geary.AbstractFolder, Geary.Folde
debug("close_internal_async: Unable to fire remote semaphore: %s", err.message);
}
// disconnect signals; note that there's no yield between clearing the remote folder and
// this, so they can't sneak in
if (closing_remote_folder != null) {
closing_remote_folder.appended.disconnect(on_remote_appended);
closing_remote_folder.removed.disconnect(on_remote_removed);
closing_remote_folder.disconnected.disconnect(on_remote_disconnected);
// to avoid keeping the caller waiting while the remote end closes, close it in the
// background
// to avoid keeping the caller waiting while the remote end closes (i.e. drops the
// connection or performs an IMAP CLOSE operation), close it in the background
//
// TODO: Problem with this is that we cannot effectively signal or report a close error,
// because by the time this operation completes the folder is considered closed. That
@ -578,19 +595,6 @@ private class Geary.ImapEngine.GenericFolder : Geary.AbstractFolder, Geary.Folde
// see above note for why this must be called every time
notify_closed(local_reason);
// Close the replay queues *after* the folder has been closed (in case any final upcalls
// come and can be handled)
try {
if (replay_queue != null) {
debug("Closing replay queue for %s...", to_string());
yield replay_queue.close_async();
debug("Closed replay queue for %s", to_string());
}
} catch (Error replay_queue_err) {
debug("Error closing %s replay queue: %s", to_string(), replay_queue_err.message);
}
replay_queue = null;
remote_opened = false;
notify_closed(CloseReason.FOLDER_CLOSED);

View file

@ -9,10 +9,10 @@ private class Geary.ImapEngine.ReplayQueue : Geary.BaseObject {
// see as high as 250ms
private const int NOTIFICATION_QUEUE_WAIT_MSEC = 1000;
private class ReplayClose : ReplayOperation {
public ReplayClose() {
private class CloseReplayQueue : ReplayOperation {
public CloseReplayQueue() {
// LOCAL_AND_REMOTE to make sure this operation is flushed all the way down the pipe
base ("Close", ReplayOperation.Scope.LOCAL_AND_REMOTE);
base ("CloseReplayQueue", ReplayOperation.Scope.LOCAL_AND_REMOTE);
}
public override void notify_remote_removed_position(Imap.SequenceNumber removed) {
@ -136,7 +136,8 @@ private class Geary.ImapEngine.ReplayQueue : Geary.BaseObject {
* Returns false if the operation was not schedule (queue already closed).
*/
public bool schedule(ReplayOperation op) {
if (is_closed) {
// ReplayClose is allowed past the velvet ropes even as the hoi palloi is turned away
if (is_closed && !(op is CloseReplayQueue)) {
debug("Unable to schedule replay operation %s on %s: replay queue closed", op.to_string(),
to_string());
@ -248,32 +249,74 @@ private class Geary.ImapEngine.ReplayQueue : Geary.BaseObject {
replay_op.notify_remote_removed_ids(ids);
}
public async void close_async(Cancellable? cancellable = null) throws Error {
/**
* Closes the {@link ReplayQueue}.
*
* If flush_pending is false, outstanding operations are cancelled. If a {@link ReplayOperation}
* has executed locally and is waiting to execute remotely, it will be backed out.
*
* Otherwise, all outstanding operations are permitted to execute, but no new ones may be
* scheduled.
*
* A ReplayQueue cannot be re-opened.
*/
public async void close_async(bool flush_pending, Cancellable? cancellable = null) throws Error {
if (is_closed)
return;
closing();
// cancel timeout and flush what's available
// cancel notification queue timeout
if (notification_timer != null)
notification_timer.cancel();
on_notification_timeout();
// piggyback on the notification timer callback to flush notification operations
if (flush_pending)
on_notification_timeout();
// flush a ReplayClose operation down the pipe so all enqueued operations complete
ReplayClose? close_op = new ReplayClose();
if (!schedule(close_op))
close_op = null;
// mark as closed *after* scheduling, otherwise schedule() will fail
// mark as closed now to prevent further scheduling ... ReplayClose gets special
// consideration in schedule()
is_closed = true;
if (close_op != null)
yield close_op.wait_for_ready_async(cancellable);
closing();
// if not flushing pending, clear out all waiting operations, backing out any that need to
// be backed out
if (!flush_pending)
yield clear_pending_async(cancellable);
// flush a ReplayClose operation down the pipe so all working operations complete
CloseReplayQueue close_op = new CloseReplayQueue();
bool scheduled = schedule(close_op);
assert(scheduled);
yield close_op.wait_for_ready_async(cancellable);
closed();
}
private async void clear_pending_async(Cancellable? cancellable) {
// note that this merely clears the queue; disabling the timer is performed in close_async
notification_queue.clear();
// clear the local queue; nothing more to do there
local_queue.clear();
// have to backout elements that have executed locally but not remotely
// clear the remote queue before backing out, otherwise the queue might proceed while
// yielding
Gee.List<ReplayOperation> remote_list = new Gee.ArrayList<ReplayOperation>();
remote_list.add_all(remote_queue.get_all());
remote_queue.clear();
foreach (ReplayOperation op in remote_list) {
try {
yield op.backout_local_async();
} catch (Error err) {
debug("Error backing out operation %s: %s", op.to_string(), err.message);
}
}
}
private async void do_replay_local_async() {
bool queue_running = true;
while (queue_running) {
@ -288,7 +331,7 @@ private class Geary.ImapEngine.ReplayQueue : Geary.BaseObject {
}
// If this is a Close operation, shut down the queue after processing it
if (op is ReplayClose)
if (op is CloseReplayQueue)
queue_running = false;
bool local_execute = false;
@ -387,7 +430,7 @@ private class Geary.ImapEngine.ReplayQueue : Geary.BaseObject {
// ReplayClose means this queue (and the folder) are closing, so handle errors a little
// differently
bool is_close_op = op is ReplayClose;
bool is_close_op = op is CloseReplayQueue;
// wait until the remote folder is opened (or returns false, in which case closed)
bool folder_opened = true;

View file

@ -129,21 +129,23 @@ private class Geary.Imap.Folder : BaseObject {
if (session == null)
return;
session.exists.disconnect(on_exists);
session.expunge.disconnect(on_expunge);
session.fetch.disconnect(on_fetch);
session.recent.disconnect(on_recent);
session.search.disconnect(on_search);
session.status_response_received.disconnect(on_status_response);
session.disconnected.disconnect(on_disconnected);
// set this.session to null before yielding to ClientSessionManager
ClientSession release_session = session;
session = null;
release_session.exists.disconnect(on_exists);
release_session.expunge.disconnect(on_expunge);
release_session.fetch.disconnect(on_fetch);
release_session.recent.disconnect(on_recent);
release_session.search.disconnect(on_search);
release_session.status_response_received.disconnect(on_status_response);
release_session.disconnected.disconnect(on_disconnected);
try {
yield session_mgr.release_session_async(session, cancellable);
yield session_mgr.release_session_async(release_session, cancellable);
} catch (Error err) {
debug("Unable to release session %s: %s", session.to_string(), err.message);
}
session = null;
}
private void on_exists(int total) {
@ -254,7 +256,7 @@ private class Geary.Imap.Folder : BaseObject {
}
private void check_open() throws Error {
if (!is_open)
if (!is_open || session == null)
throw new EngineError.OPEN_REQUIRED("Imap.Folder %s not open", to_string());
}
@ -267,6 +269,10 @@ private class Geary.Imap.Folder : BaseObject {
// execute commands with mutex locked
Error? err = null;
try {
// check open after acquiring mutex, so that if an error is thrown it's caught and
// mutex can be closed
check_open();
responses = yield session.send_multiple_commands_async(cmds, cancellable);
} catch (Error store_fetch_err) {
err = store_fetch_err;
@ -326,6 +332,8 @@ private class Geary.Imap.Folder : BaseObject {
// TODO: Offer parameter so a SortedSet could be returned (or, the caller must supply the Set)
public async Gee.Set<Imap.UID>? list_uids_async(MessageSet msg_set, Cancellable? cancellable)
throws Error {
check_open();
FetchCommand cmd = new FetchCommand.data_type(msg_set, FetchDataSpecifier.UID);
Gee.HashMap<SequenceNumber, FetchedData>? fetched;

View file

@ -96,11 +96,12 @@ public class Geary.Nonblocking.Mailbox<G> : BaseObject {
}
/**
* Returns a read-only version of the mailbox queue that can be iterated in queue-order.
*
* Since the queue could potentially alter when the main loop runs, it's important to only
* examine the queue when not allowing other operations to process.
*
* This returns a read-only list in queue-order. Altering will not affect the queue. Use
* revoke() to remove enqueued operations.
* Altering will not affect the actual queue. Use {@link revoke} to remove enqueued operations.
*/
public Gee.Collection<G> get_all() {
return queue.read_only_view;