diff --git a/deltachat-ffi/deltachat.h b/deltachat-ffi/deltachat.h index 638ab74b71..80b208d827 100644 --- a/deltachat-ffi/deltachat.h +++ b/deltachat-ffi/deltachat.h @@ -2644,10 +2644,6 @@ void dc_str_unref (char* str); /** * Creates an object for sending a backup to another device. * - * Before calling this function IO must be stopped using dc_accounts_stop_io() - * or dc_stop_io() so that no changes to the blobs or database are happening. - * IO should only be restarted once dc_backup_provider_wait() has returned. - * * The backup is sent to through a peer-to-peer channel which is bootstrapped * by a QR-code. The backup contains the entire state of the account * including credentials. This can be used to setup a new device. @@ -2708,9 +2704,7 @@ char* dc_backup_provider_get_qr_svg (const dc_backup_provider_t* backup_provider /** * Waits for the sending to finish. * - * This is a blocking call and should only be called once. Once this function - * returns IO can be started again using dc_accounts_start_io() or - * dc_start_io(). + * This is a blocking call and should only be called once. * * @memberof dc_backup_provider_t * @param backup_provider The backup provider object as created by diff --git a/src/imex.rs b/src/imex.rs index bd965fa3dc..a329b1474e 100644 --- a/src/imex.rs +++ b/src/imex.rs @@ -91,7 +91,7 @@ pub async fn imex( let cancel = context.alloc_ongoing().await?; let res = { - let mut guard = context.scheduler.pause(context).await; + let mut guard = context.scheduler.pause(context.clone()).await; let res = imex_inner(context, what, path, passphrase) .race(async { cancel.recv().await.ok(); diff --git a/src/imex/transfer.rs b/src/imex/transfer.rs index a294359e7f..ba334ca492 100644 --- a/src/imex/transfer.rs +++ b/src/imex/transfer.rs @@ -91,6 +91,7 @@ impl BackupProvider { // Acquire global "ongoing" mutex. let cancel_token = context.alloc_ongoing().await?; + let mut paused_guard = context.scheduler.pause(context.clone()).await; let context_dir = context .get_blobdir() .parent() @@ -118,15 +119,19 @@ impl BackupProvider { Ok((provider, ticket)) => (provider, ticket), Err(err) => { context.free_ongoing().await; + paused_guard.resume().await; return Err(err); } }; - let handle = tokio::spawn(Self::watch_provider( - context.clone(), - provider, - cancel_token, - dbfile, - )); + let handle = { + let context = context.clone(); + tokio::spawn(async move { + let res = Self::watch_provider(&context, provider, cancel_token, dbfile).await; + context.free_ongoing().await; + paused_guard.resume().await; + res + }) + }; let slf = Self { handle, ticket }; let qr = slf.qr(); *context.export_provider.lock().expect("poisoned lock") = Some(qr); @@ -181,7 +186,7 @@ impl BackupProvider { /// The *cancel_token* is the handle for the ongoing process mutex, when this completes /// we must cancel this operation. async fn watch_provider( - context: Context, + context: &Context, mut provider: Provider, cancel_token: Receiver<()>, _dbfile: TempPathGuard, @@ -262,7 +267,6 @@ impl BackupProvider { context.emit_event(SendProgress::Failed.into()) } } - context.free_ongoing().await; res } @@ -373,7 +377,7 @@ pub async fn get_backup(context: &Context, qr: Qr) -> Result<()> { !context.is_configured().await?, "Cannot import backups to accounts in use." ); - let mut guard = context.scheduler.pause(context).await; + let mut guard = context.scheduler.pause(context.clone()).await; // Acquire global "ongoing" mutex. let cancel_token = context.alloc_ongoing().await?; diff --git a/src/scheduler.rs b/src/scheduler.rs index d5571f8356..c37cb3f86d 100644 --- a/src/scheduler.rs +++ b/src/scheduler.rs @@ -95,10 +95,10 @@ impl SchedulerState { /// If in the meantime [`SchedulerState::start`] or [`SchedulerState::stop`] is called /// resume will do the right thing and restore the scheduler to the state requested by /// the last call. - pub(crate) async fn pause<'a>(&'_ self, context: &'a Context) -> IoPausedGuard<'a> { + pub(crate) async fn pause<'a>(&'_ self, context: Context) -> IoPausedGuard { let mut inner = self.inner.write().await; inner.paused = true; - Self::do_stop(inner, context).await; + Self::do_stop(inner, &context).await; IoPausedGuard { context, done: false, @@ -195,12 +195,12 @@ struct InnerSchedulerState { } #[derive(Debug)] -pub(crate) struct IoPausedGuard<'a> { - context: &'a Context, +pub(crate) struct IoPausedGuard { + context: Context, done: bool, } -impl<'a> IoPausedGuard<'a> { +impl IoPausedGuard { pub(crate) async fn resume(&mut self) { self.done = true; let mut inner = self.context.scheduler.inner.write().await; @@ -211,7 +211,7 @@ impl<'a> IoPausedGuard<'a> { } } -impl<'a> Drop for IoPausedGuard<'a> { +impl Drop for IoPausedGuard { fn drop(&mut self) { if self.done { return;