From cb655d465bf89329c8305de9ee0a9d529be2d888 Mon Sep 17 00:00:00 2001 From: Florian M Date: Mon, 15 Sep 2025 11:51:21 +0200 Subject: [PATCH 1/3] WIP restore backwards compatibility for old publish task in API v10 --- conf/application.conf | 4 +- .../controllers/LegacyController.scala | 44 ++++++++++++++++++- 2 files changed, 45 insertions(+), 3 deletions(-) diff --git a/conf/application.conf b/conf/application.conf index 3cbf40806a..77f036fd42 100644 --- a/conf/application.conf +++ b/conf/application.conf @@ -156,8 +156,8 @@ features { taskReopenAllowedInSeconds = 30 allowDeleteDatasets = true # to enable jobs for local development, use "yarn enable-jobs" to also activate it in the database - jobsEnabled = false - voxelyticsEnabled = false + jobsEnabled = true + voxelyticsEnabled = true neuronInferralCostPerGVx = 1 mitochondriaInferralCostPerGVx = 0.5 alignmentCostPerGVx = 0.5 diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/LegacyController.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/LegacyController.scala index 6c094b9b04..12c62528e8 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/LegacyController.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/LegacyController.scala @@ -12,6 +12,7 @@ import com.scalableminds.webknossos.datastore.models.{ } import com.scalableminds.webknossos.datastore.models.datasource.{DataSourceId, UnusableDataSource, UsableDataSource} import com.scalableminds.webknossos.datastore.services.mesh.FullMeshRequest +import com.scalableminds.webknossos.datastore.services.uploading.ReserveUploadInformation import com.scalableminds.webknossos.datastore.services.{ DSRemoteWebknossosClient, DataSourceService, @@ -19,11 +20,23 @@ import com.scalableminds.webknossos.datastore.services.{ DatasetCache, UserAccessRequest } -import play.api.libs.json.Json +import play.api.libs.json.{Json, OFormat} import play.api.mvc.{Action, AnyContent, PlayBodyParsers, RawBuffer, Result} import scala.concurrent.ExecutionContext +case class LegacyReserveManualUploadInformation( + datasetName: String, + organization: String, + initialTeamIds: List[String], + folderId: Option[String], + requireUniqueName: Boolean = false, +) +object LegacyReserveManualUploadInformation { + implicit val jsonFormat: OFormat[LegacyReserveManualUploadInformation] = + Json.format[LegacyReserveManualUploadInformation] +} + class LegacyController @Inject()( accessTokenService: DataStoreAccessTokenService, remoteWebknossosClient: DSRemoteWebknossosClient, @@ -32,6 +45,7 @@ class LegacyController @Inject()( meshController: DSMeshController, dataSourceController: DataSourceController, dataSourceService: DataSourceService, + dsRemoteWebknossosClient: DSRemoteWebknossosClient, datasetCache: DatasetCache )(implicit ec: ExecutionContext, bodyParsers: PlayBodyParsers) extends Controller @@ -46,6 +60,34 @@ class LegacyController @Inject()( "Reserving manual uploads via datastore route /datasets/reserveManualUpload is no longer available in this WEBKNOSSOS server version. This is an exception to the listed API compatibility. Please use a client version that supports API version 11 or newer.") } + // To be called by people with disk access but not DatasetManager role. This way, they can upload a dataset manually on disk, + // and it can be put in a webknossos folder where they have access + def reserveManualUploadV10(): Action[LegacyReserveManualUploadInformation] = + Action.async(validateJson[LegacyReserveManualUploadInformation]) { implicit request => + accessTokenService.validateAccessFromTokenContext( + UserAccessRequest.administrateDataSources(request.body.organization)) { + for { + reservedDatasetInfo <- dsRemoteWebknossosClient.reserveDataSourceUpload( + ReserveUploadInformation( + "aManualUpload", + request.body.datasetName, + request.body.organization, + 0, + Some(List.empty), + None, + None, + request.body.initialTeamIds, + request.body.folderId, + Some(request.body.requireUniqueName) + ) + ) ?~> "dataset.upload.validation.failed" + } yield + Ok( + Json.obj("newDatasetId" -> reservedDatasetInfo.newDatasetId, + "directoryName" -> reservedDatasetInfo.directoryName)) + } + } + def requestViaWebknossosV9( organizationId: String, datasetDirectoryName: String, From 1ed35f7e7ec778f697c1a08e6f90ae66b39b8e4b Mon Sep 17 00:00:00 2001 From: Florian M Date: Mon, 15 Sep 2025 13:49:25 +0200 Subject: [PATCH 2/3] restore reserveManualUpload v10 --- app/controllers/WKRemoteDataStoreController.scala | 2 ++ app/models/dataset/DatasetService.scala | 6 ++++++ .../datastore/controllers/LegacyController.scala | 10 ++-------- .../datastore/services/uploading/UploadService.scala | 3 ++- 4 files changed, 12 insertions(+), 9 deletions(-) diff --git a/app/controllers/WKRemoteDataStoreController.scala b/app/controllers/WKRemoteDataStoreController.scala index bd3bdd539c..3dcda3759a 100644 --- a/app/controllers/WKRemoteDataStoreController.scala +++ b/app/controllers/WKRemoteDataStoreController.scala @@ -84,6 +84,8 @@ class WKRemoteDataStoreController @Inject()( layersToLinkWithDirectoryName <- Fox.serialCombined(uploadInfo.layersToLink.getOrElse(List.empty))(l => validateLayerToLink(l, user)) ?~> "dataset.upload.invalidLinkedLayers" newDatasetId = ObjectId.generate + _ <- Fox.runIf(request.body.requireUniqueName.getOrElse(false))( + datasetService.assertNewDatasetNameUnique(request.body.name, organization._id)) dataset <- datasetService.createPreliminaryDataset(newDatasetId, uploadInfo.name, datasetService.generateDirectoryName(uploadInfo.name, diff --git a/app/models/dataset/DatasetService.scala b/app/models/dataset/DatasetService.scala index a2e833f5fb..e87b8dc265 100644 --- a/app/models/dataset/DatasetService.scala +++ b/app/models/dataset/DatasetService.scala @@ -66,6 +66,12 @@ class DatasetService @Inject()(organizationDAO: OrganizationDAO, _ <- Fox.fromBool(!name.startsWith(".")) ?~> "dataset.layer.name.invalid.startsWithDot" } yield () + def assertNewDatasetNameUnique(name: String, organizationId: String): Fox[Unit] = + for { + exists <- datasetDAO.doesDatasetNameExistInOrganization(name, organizationId) + _ <- Fox.fromBool(!exists) ?~> "dataset.name.taken" + } yield () + def checkNameAvailable(organizationId: String, datasetName: String): Fox[Unit] = for { isDatasetNameAlreadyTaken <- datasetDAO.doesDatasetNameExistInOrganization(datasetName, organizationId) diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/LegacyController.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/LegacyController.scala index 12c62528e8..98c3a8d52e 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/LegacyController.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/LegacyController.scala @@ -28,8 +28,8 @@ import scala.concurrent.ExecutionContext case class LegacyReserveManualUploadInformation( datasetName: String, organization: String, - initialTeamIds: List[String], - folderId: Option[String], + initialTeamIds: List[ObjectId], + folderId: Option[ObjectId], requireUniqueName: Boolean = false, ) object LegacyReserveManualUploadInformation { @@ -54,12 +54,6 @@ class LegacyController @Inject()( override def allowRemoteOrigin: Boolean = true - def reserveManualUploadV10: Action[AnyContent] = - Action.async { implicit request => - Fox.failure( - "Reserving manual uploads via datastore route /datasets/reserveManualUpload is no longer available in this WEBKNOSSOS server version. This is an exception to the listed API compatibility. Please use a client version that supports API version 11 or newer.") - } - // To be called by people with disk access but not DatasetManager role. This way, they can upload a dataset manually on disk, // and it can be put in a webknossos folder where they have access def reserveManualUploadV10(): Action[LegacyReserveManualUploadInformation] = diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala index 4970cb2ec5..32eb409e87 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala @@ -35,7 +35,8 @@ case class ReserveUploadInformation( totalFileSizeInBytes: Option[Long], layersToLink: Option[List[LegacyLinkedLayerIdentifier]], initialTeams: List[ObjectId], // team ids - folderId: Option[ObjectId]) + folderId: Option[ObjectId], + requireUniqueName: Option[Boolean]) object ReserveUploadInformation { implicit val reserveUploadInformation: OFormat[ReserveUploadInformation] = Json.format[ReserveUploadInformation] } From 4692d1ba816f9e85f65da5062b8e748359a84d64 Mon Sep 17 00:00:00 2001 From: Florian M Date: Mon, 15 Sep 2025 13:59:06 +0200 Subject: [PATCH 3/3] undo dev changes --- conf/application.conf | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/conf/application.conf b/conf/application.conf index 77f036fd42..3cbf40806a 100644 --- a/conf/application.conf +++ b/conf/application.conf @@ -156,8 +156,8 @@ features { taskReopenAllowedInSeconds = 30 allowDeleteDatasets = true # to enable jobs for local development, use "yarn enable-jobs" to also activate it in the database - jobsEnabled = true - voxelyticsEnabled = true + jobsEnabled = false + voxelyticsEnabled = false neuronInferralCostPerGVx = 1 mitochondriaInferralCostPerGVx = 0.5 alignmentCostPerGVx = 0.5