Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
37 commits
Select commit Hold shift + click to select a range
4e6c661
split startings_job_modals into separate files
hotzenklotz Aug 11, 2025
59ecc38
rename file names to snake case
hotzenklotz Aug 11, 2025
a35e8fc
move train ai model tab
hotzenklotz Aug 11, 2025
2be6e63
formatting
hotzenklotz Aug 11, 2025
475175e
moved train_ai_model file to the other AI modal tabs
hotzenklotz Aug 12, 2025
bbd0657
formatting
hotzenklotz Aug 12, 2025
0c1dda0
added route for instance AI model training
hotzenklotz Aug 13, 2025
0a8bd90
stuff
hotzenklotz Aug 13, 2025
e4b3441
remove unreliable autocomplete
hotzenklotz Aug 13, 2025
9b7d33e
added some react optimizations
hotzenklotz Aug 13, 2025
15850d0
fetch the model category for ai models from backend
hotzenklotz Aug 13, 2025
f516eb3
adjust explanation/wording for pre-trained models a bit
hotzenklotz Aug 13, 2025
11dfad2
add seed generator distance for instance/nuclei model inference
hotzenklotz Aug 13, 2025
9a4dfde
fixes
hotzenklotz Aug 14, 2025
e10df97
changelog
hotzenklotz Aug 14, 2025
b9d429f
Merge branch 'master' of github.com:scalableminds/webknossos into spl…
hotzenklotz Aug 14, 2025
377ddf6
Update frontend/javascripts/viewer/view/action-bar/ai_job_modals/form…
hotzenklotz Aug 14, 2025
2473f91
Update app/controllers/AiModelController.scala
hotzenklotz Aug 14, 2025
fd71ced
backend formatting
hotzenklotz Aug 14, 2025
a5afc38
apply feedback
hotzenklotz Aug 14, 2025
433a5b5
added more types and react.callbacks for better perf
hotzenklotz Aug 15, 2025
95c78ed
formatting and code rabbit feedback
hotzenklotz Aug 15, 2025
77c0faf
restore application.conf
hotzenklotz Aug 15, 2025
682287f
fix typescript types
hotzenklotz Aug 15, 2025
ed811da
Merge branch 'master' into split_ai_modals
hotzenklotz Aug 18, 2025
461dbb5
Update frontend/javascripts/viewer/view/action-bar/ai_job_modals/form…
hotzenklotz Aug 20, 2025
0f80b11
Update frontend/javascripts/viewer/view/action-bar/ai_job_modals/form…
hotzenklotz Aug 20, 2025
2c2495c
Update frontend/javascripts/viewer/view/action-bar/ai_job_modals/form…
hotzenklotz Aug 20, 2025
ca08d0f
Apply PR feedback
hotzenklotz Aug 20, 2025
5028633
Merge branch 'split_ai_modals' of github.com:scalableminds/webknossos…
hotzenklotz Aug 20, 2025
578f68b
Merge branch 'master' of github.com:scalableminds/webknossos into spl…
hotzenklotz Aug 20, 2025
ffac159
apply PR feedback
hotzenklotz Aug 21, 2025
3c69584
Merge branch 'master' of github.com:scalableminds/webknossos into spl…
hotzenklotz Aug 21, 2025
2fad242
Merge branch 'master' into split_ai_modals
hotzenklotz Aug 21, 2025
0d90fad
Merge branch 'split_ai_modals' of github.com:scalableminds/webknossos…
hotzenklotz Aug 21, 2025
72478d8
Update frontend/javascripts/admin/job/job_list_view.tsx
hotzenklotz Aug 21, 2025
831f0a9
Merge branch 'master' into split_ai_modals
hotzenklotz Aug 25, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
132 changes: 113 additions & 19 deletions app/controllers/AiModelController.scala
Original file line number Diff line number Diff line change
Expand Up @@ -30,14 +30,25 @@ object TrainingAnnotationSpecification {
implicit val jsonFormat: OFormat[TrainingAnnotationSpecification] = Json.format[TrainingAnnotationSpecification]
}

case class RunTrainingParameters(trainingAnnotations: List[TrainingAnnotationSpecification],
name: String,
comment: Option[String],
aiModelCategory: Option[AiModelCategory],
workflowYaml: Option[String])
case class RunNeuronModelTrainingParameters(trainingAnnotations: List[TrainingAnnotationSpecification],
name: String,
aiModelCategory: Option[AiModelCategory],
comment: Option[String],
workflowYaml: Option[String])

object RunTrainingParameters {
implicit val jsonFormat: OFormat[RunTrainingParameters] = Json.format[RunTrainingParameters]
object RunNeuronModelTrainingParameters {
implicit val jsonFormat: OFormat[RunNeuronModelTrainingParameters] = Json.format[RunNeuronModelTrainingParameters]
}

case class RunInstanceModelTrainingParameters(trainingAnnotations: List[TrainingAnnotationSpecification],
name: String,
aiModelCategory: Option[AiModelCategory],
maxDistanceNm: Option[Double],
comment: Option[String],
workflowYaml: Option[String])

object RunInstanceModelTrainingParameters {
implicit val jsonFormat: OFormat[RunInstanceModelTrainingParameters] = Json.format[RunInstanceModelTrainingParameters]
}

case class RunInferenceParameters(annotationId: Option[ObjectId],
Expand All @@ -48,7 +59,8 @@ case class RunInferenceParameters(annotationId: Option[ObjectId],
boundingBox: String,
newDatasetName: String,
maskAnnotationLayerName: Option[String],
workflowYaml: Option[String])
workflowYaml: Option[String],
seedGeneratorDistanceThreshold: Option[Double])

object RunInferenceParameters {
implicit val jsonFormat: OFormat[RunInferenceParameters] = Json.format[RunInferenceParameters]
Expand Down Expand Up @@ -127,21 +139,18 @@ class AiModelController @Inject()(
}
}

def runNeuronTraining: Action[RunTrainingParameters] = sil.SecuredAction.async(validateJson[RunTrainingParameters]) {
implicit request =>
def runNeuronTraining: Action[RunNeuronModelTrainingParameters] =
sil.SecuredAction.async(validateJson[RunNeuronModelTrainingParameters]) { implicit request =>
for {
_ <- userService.assertIsSuperUser(request.identity)
trainingAnnotations = request.body.trainingAnnotations
_ <- Fox
.fromBool(trainingAnnotations.nonEmpty || request.body.workflowYaml.isDefined) ?~> "aiModel.training.zeroAnnotations"
_ <- Fox.fromBool(trainingAnnotations.nonEmpty || request.body.workflowYaml.isDefined) ?~> "aiModel.training.zeroAnnotations"
firstAnnotationId <- trainingAnnotations.headOption.map(_.annotationId).toFox
annotation <- annotationDAO.findOne(firstAnnotationId)
dataset <- datasetDAO.findOne(annotation._dataset)
_ <- Fox
.fromBool(request.identity._organization == dataset._organization) ?~> "job.trainModel.notAllowed.organization" ~> FORBIDDEN
_ <- Fox.fromBool(request.identity._organization == dataset._organization) ?~> "job.trainModel.notAllowed.organization" ~> FORBIDDEN
dataStore <- dataStoreDAO.findOneByName(dataset._dataStore) ?~> "dataStore.notFound"
_ <- Fox
.serialCombined(request.body.trainingAnnotations.map(_.annotationId))(annotationDAO.findOne) ?~> "annotation.notFound"
_ <- Fox.serialCombined(request.body.trainingAnnotations.map(_.annotationId))(annotationDAO.findOne) ?~> "annotation.notFound"
modelId = ObjectId.generate
organization <- organizationDAO.findOne(request.identity._organization)
jobCommand = JobCommand.train_neuron_model
Expand All @@ -154,8 +163,7 @@ class AiModelController @Inject()(
existingAiModelsCount <- aiModelDAO.countByNameAndOrganization(request.body.name,
request.identity._organization)
_ <- Fox.fromBool(existingAiModelsCount == 0) ?~> "aiModel.nameInUse"
newTrainingJob <- jobService
.submitJob(jobCommand, commandArgs, request.identity, dataStore.name) ?~> "job.couldNotRunTrainModel"
newTrainingJob <- jobService.submitJob(jobCommand, commandArgs, request.identity, dataStore.name) ?~> "job.couldNotRunTrainModel"
newAiModel = AiModel(
_id = modelId,
_organization = request.identity._organization,
Expand All @@ -171,7 +179,93 @@ class AiModelController @Inject()(
_ <- aiModelDAO.insertOne(newAiModel)
newAiModelJs <- aiModelService.publicWrites(newAiModel, request.identity)
} yield Ok(newAiModelJs)
}
}

def runInstanceTraining: Action[RunInstanceModelTrainingParameters] =
sil.SecuredAction.async(validateJson[RunInstanceModelTrainingParameters]) { implicit request =>
for {
_ <- userService.assertIsSuperUser(request.identity)
trainingAnnotations = request.body.trainingAnnotations
_ <- Fox.fromBool(trainingAnnotations.nonEmpty || request.body.workflowYaml.isDefined) ?~> "aiModel.training.zeroAnnotations"
firstAnnotationId <- trainingAnnotations.headOption.map(_.annotationId).toFox
annotation <- annotationDAO.findOne(firstAnnotationId)
dataset <- datasetDAO.findOne(annotation._dataset)
_ <- Fox.fromBool(request.identity._organization == dataset._organization) ?~> "job.trainModel.notAllowed.organization" ~> FORBIDDEN
dataStore <- dataStoreDAO.findOneByName(dataset._dataStore) ?~> "dataStore.notFound"
_ <- Fox.serialCombined(request.body.trainingAnnotations.map(_.annotationId))(annotationDAO.findOne) ?~> "annotation.notFound"
modelId = ObjectId.generate
organization <- organizationDAO.findOne(request.identity._organization)
jobCommand = JobCommand.train_instance_model
commandArgs = Json.obj(
"training_annotations" -> Json.toJson(trainingAnnotations),
"organization_id" -> organization._id,
"model_id" -> modelId,
"custom_workflow_provided_by_user" -> request.body.workflowYaml,
"max_distance_nm" -> request.body.maxDistanceNm
)
existingAiModelsCount <- aiModelDAO.countByNameAndOrganization(request.body.name,
request.identity._organization)
_ <- Fox.fromBool(existingAiModelsCount == 0) ?~> "aiModel.nameInUse"
newTrainingJob <- jobService.submitJob(jobCommand, commandArgs, request.identity, dataStore.name) ?~> "job.couldNotRunTrainModel"
newAiModel = AiModel(
_id = modelId,
_organization = request.identity._organization,
_sharedOrganizations = List(),
_dataStore = dataStore.name,
_user = request.identity._id,
_trainingJob = Some(newTrainingJob._id),
_trainingAnnotations = trainingAnnotations.map(_.annotationId),
name = request.body.name,
comment = request.body.comment,
category = request.body.aiModelCategory
)
_ <- aiModelDAO.insertOne(newAiModel)
newAiModelJs <- aiModelService.publicWrites(newAiModel, request.identity)
} yield Ok(newAiModelJs)
}

def runCustomInstanceModelInference: Action[RunInferenceParameters] =
sil.SecuredAction.async(validateJson[RunInferenceParameters]) { implicit request =>
for {
_ <- userService.assertIsSuperUser(request.identity)
organization <- organizationDAO.findOne(request.body.organizationId)(GlobalAccessContext) ?~> Messages(
"organization.notFound",
request.body.organizationId)
_ <- Fox.fromBool(request.identity._organization == organization._id) ?~> "job.runInference.notAllowed.organization" ~> FORBIDDEN
dataset <- datasetDAO.findOneByDirectoryNameAndOrganization(request.body.datasetDirectoryName, organization._id)
dataStore <- dataStoreDAO.findOneByName(dataset._dataStore) ?~> "dataStore.notFound"
_ <- aiModelDAO.findOne(request.body.aiModelId) ?~> "aiModel.notFound"
_ <- datasetService.assertValidDatasetName(request.body.newDatasetName)
jobCommand = JobCommand.infer_instances
boundingBox <- BoundingBox.fromLiteral(request.body.boundingBox).toFox
commandArgs = Json.obj(
"dataset_id" -> dataset._id,
"organization_id" -> organization._id,
"dataset_name" -> dataset.name,
"layer_name" -> request.body.colorLayerName,
"bbox" -> boundingBox.toLiteral,
"model_id" -> request.body.aiModelId,
"dataset_directory_name" -> request.body.datasetDirectoryName,
"new_dataset_name" -> request.body.newDatasetName,
"custom_workflow_provided_by_user" -> request.body.workflowYaml,
"seed_generator_distance_threshold" -> request.body.seedGeneratorDistanceThreshold
)
newInferenceJob <- jobService.submitJob(jobCommand, commandArgs, request.identity, dataStore.name) ?~> "job.couldNotRunInferWithModel"
newAiInference = AiInference(
_id = ObjectId.generate,
_organization = request.identity._organization,
_aiModel = request.body.aiModelId,
_newDataset = None,
_annotation = request.body.annotationId,
boundingBox = boundingBox,
_inferenceJob = newInferenceJob._id,
newSegmentationLayerName = "segmentation",
maskAnnotationLayerName = request.body.maskAnnotationLayerName
)
_ <- aiInferenceDAO.insertOne(newAiInference)
newAiModelJs <- aiInferenceService.publicWrites(newAiInference, request.identity)
} yield Ok(newAiModelJs)
}

def runCustomNeuronInference: Action[RunInferenceParameters] =
sil.SecuredAction.async(validateJson[RunInferenceParameters]) { implicit request =>
Expand Down
3 changes: 2 additions & 1 deletion app/models/aimodels/AiModel.scala
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,8 @@ class AiModelService @Inject()(dataStoreDAO: DataStoreDAO,
"comment" -> aiModel.comment,
"trainingJob" -> trainingJobJsOpt,
"created" -> aiModel.created,
"sharedOrganizationIds" -> sharedOrganizationIds
"sharedOrganizationIds" -> sharedOrganizationIds,
"category" -> aiModel.category
)
Comment on lines +74 to 76
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue

Serialize category as string to match frontend contract

publicWrites currently passes an Option[AiModelCategory]. Unless an implicit Writes exists for AiModelCategory, this will either not compile or produce unexpected JSON. The frontend expects a string union.

Apply:

-        "sharedOrganizationIds" -> sharedOrganizationIds,
-        "category" -> aiModel.category
+        "sharedOrganizationIds" -> sharedOrganizationIds,
+        "category" -> aiModel.category.map(_.toString)
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
"sharedOrganizationIds" -> sharedOrganizationIds,
"category" -> aiModel.category
)
"sharedOrganizationIds" -> sharedOrganizationIds,
"category" -> aiModel.category.map(_.toString)
)
🤖 Prompt for AI Agents
In app/models/aimodels/AiModel.scala around lines 74 to 76, the current
publicWrites is passing an Option[AiModelCategory] directly which relies on an
implicit Writes for AiModelCategory and can produce wrong JSON; change the
serialization to emit the category as an Option[String] (e.g. map the category
to its string name/value) so the JSON field "category" becomes a string union
matching the frontend contract, and remove reliance on an implicit Writes for
the enum.

}

Expand Down
4 changes: 2 additions & 2 deletions app/models/job/JobCommand.scala
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,8 @@ object JobCommand extends ExtendedEnumeration {
*/

val compute_mesh_file, compute_segment_index_file, convert_to_wkw, export_tiff, find_largest_segment_id,
globalize_floodfills, infer_nuclei, infer_neurons, materialize_volume_annotation, render_animation,
infer_mitochondria, align_sections, train_model, infer_with_model, train_neuron_model = Value
globalize_floodfills, infer_nuclei, infer_neurons, infer_instances, materialize_volume_annotation, render_animation,
infer_mitochondria, align_sections, train_model, infer_with_model, train_neuron_model, train_instance_model = Value

val highPriorityJobs: Set[Value] = Set(convert_to_wkw, export_tiff)
val lowPriorityJobs: Set[Value] = values.diff(highPriorityJobs)
Expand Down
6 changes: 4 additions & 2 deletions conf/webknossos.latest.routes
Original file line number Diff line number Diff line change
Expand Up @@ -285,8 +285,10 @@ POST /jobs/:id/attachDatasetToInference
GET /jobs/:id/export controllers.JobController.redirectToExport(id: ObjectId)

# AI Models
POST /aiModels/runNeuronTraining controllers.AiModelController.runNeuronTraining
POST /aiModels/inferences/runCustomNeuronInference controllers.AiModelController.runCustomNeuronInference
POST /aiModels/runNeuronModelTraining controllers.AiModelController.runNeuronTraining
POST /aiModels/runInstanceModelTraining controllers.AiModelController.runInstanceTraining
POST /aiModels/inferences/runCustomNeuronModelInference controllers.AiModelController.runCustomNeuronInference
POST /aiModels/inferences/runCustomInstanceModelInference controllers.AiModelController.runCustomInstanceModelInference
GET /aiModels/inferences/:id controllers.AiModelController.readAiInferenceInfo(id: ObjectId)
GET /aiModels/inferences controllers.AiModelController.listAiInferences
GET /aiModels controllers.AiModelController.listAiModels
Expand Down
52 changes: 43 additions & 9 deletions frontend/javascripts/admin/api/jobs.ts
Original file line number Diff line number Diff line change
Expand Up @@ -351,7 +351,11 @@ export function startAlignSectionsJob(
});
}

type AiModelCategory = "em_neurons" | "em_nuclei";
// This enum needs to be kept in sync with the backend/database
export enum APIAiModelCategory {
EM_NEURONS = "em_neurons",
EM_NUCLEI = "em_nuclei",
}

type AiModelTrainingAnnotationSpecification = {
annotationId: string;
Expand All @@ -360,22 +364,38 @@ type AiModelTrainingAnnotationSpecification = {
mag: Vector3;
};

type RunTrainingParameters = {
trainingAnnotations: Array<AiModelTrainingAnnotationSpecification>;
type RunNeuronModelTrainingParameters = {
trainingAnnotations: AiModelTrainingAnnotationSpecification[];
name: string;
aiModelCategory: APIAiModelCategory.EM_NEURONS;
comment?: string;
workflowYaml?: string;
};

export function runNeuronTraining(params: RunNeuronModelTrainingParameters) {
return Request.sendJSONReceiveJSON("/api/aiModels/runNeuronModelTraining", {
method: "POST",
data: JSON.stringify(params),
});
}

type RunInstanceModelTrainingParameters = {
trainingAnnotations: AiModelTrainingAnnotationSpecification[];
name: string;
aiModelCategory: APIAiModelCategory.EM_NUCLEI;
maxDistanceNm: number;
comment?: string;
aiModelCategory?: AiModelCategory;
workflowYaml?: string;
};

export function runNeuronTraining(params: RunTrainingParameters) {
return Request.sendJSONReceiveJSON("/api/aiModels/runNeuronTraining", {
export function runInstanceModelTraining(params: RunInstanceModelTrainingParameters) {
return Request.sendJSONReceiveJSON("/api/aiModels/runInstanceModelTraining", {
method: "POST",
data: JSON.stringify(params),
});
}

type RunInferenceParameters = {
export type BaseModelInferenceParameters = {
annotationId?: string;
aiModelId: string;
datasetDirectoryName: string;
Expand All @@ -386,9 +406,23 @@ type RunInferenceParameters = {
workflowYaml?: string;
// maskAnnotationLayerName?: string | null
};
type RunNeuronModelInferenceParameters = BaseModelInferenceParameters;

type RunInstanceModelInferenceParameters = BaseModelInferenceParameters & {
seedGeneratorDistanceThreshold: number;
};

export function runNeuronModelInferenceWithAiModelJob(params: RunNeuronModelInferenceParameters) {
return Request.sendJSONReceiveJSON("/api/aiModels/inferences/runCustomNeuronModelInference", {
method: "POST",
data: JSON.stringify({ ...params, boundingBox: params.boundingBox.join(",") }),
});
}

export function runNeuronInferenceWithAiModelJob(params: RunInferenceParameters) {
return Request.sendJSONReceiveJSON("/api/aiModels/inferences/runCustomNeuronInference", {
export function runInstanceModelInferenceWithAiModelJob(
params: RunInstanceModelInferenceParameters,
) {
return Request.sendJSONReceiveJSON("/api/aiModels/inferences/runCustomInstanceModelInference", {
method: "POST",
data: JSON.stringify({ ...params, boundingBox: params.boundingBox.join(",") }),
});
Expand Down
29 changes: 22 additions & 7 deletions frontend/javascripts/admin/job/job_list_view.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ export function JobState({ job }: { job: APIJob }) {

return (
<Tooltip title={tooltip}>
<span className="icon-margin-right">{icon}</span>
<span>{icon}</span>
{jobStateNormalized}
</Tooltip>
);
Expand Down Expand Up @@ -235,7 +235,7 @@ function JobListView() {
) {
return (
<span>
Neuron inferral for layer {job.layerName} of{" "}
AI Neuron inferral for layer <i>{job.layerName}</i> of{" "}
<Link to={linkToDataset}>{job.datasetName}</Link>{" "}
</span>
);
Comment on lines +238 to 241
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue

User-visible wording: fix “inferral” and grammar

These UI strings are shown to end-users. “Inferral” is non-standard; “inference” is the correct term. Also, “AI instances segmentation” should be “AI instance segmentation.”

Apply this diff to correct the wording:

-          AI Neuron inferral for layer <i>{job.layerName}</i> of{" "}
+          AI Neuron inference for layer <i>{job.layerName}</i> of{" "}
-          AI Mitochondria inferral for layer <i>{job.layerName}</i> of{" "}
+          AI Mitochondria inference for layer <i>{job.layerName}</i> of{" "}
-          AI instances segmentation for layer <i>{job.layerName}</i> of{" "}
+          AI instance segmentation for layer <i>{job.layerName}</i> of{" "}

Also applies to: 259-262, 263-269

🤖 Prompt for AI Agents
In frontend/javascripts/admin/job/job_list_view.tsx around lines 238-241 (and
also update the similar strings at 259-262 and 263-269), replace user-facing
words: change “inferral” to “inference” and fix plural/grammar “AI instances
segmentation” to “AI instance segmentation”; update the JSX/text nodes
accordingly so they read e.g. “AI Neuron inference for layer
<i>{job.layerName}</i> of <Link to={linkToDataset}>{job.datasetName}</Link>” and
other occurrences to use “AI instance segmentation.”

Expand All @@ -256,14 +256,21 @@ function JobListView() {
) {
return (
<span>
Mitochondria inferral for layer {job.layerName} of{" "}
AI Mitochondria inferral for layer <i>{job.layerName}</i> of{" "}
<Link to={linkToDataset}>{job.datasetName}</Link>{" "}
</span>
);
} else if (job.type === APIJobType.INFER_INSTANCES && linkToDataset != null && job.layerName) {
return (
<span>
AI instance segmentation for layer <i>{job.layerName}</i> of{" "}
<Link to={linkToDataset}>{job.datasetName}</Link>{" "}
</span>
);
} else if (job.type === APIJobType.ALIGN_SECTIONS && linkToDataset != null && job.layerName) {
return (
<span>
Align sections for layer {job.layerName} of{" "}
Align sections for layer <i>{job.layerName}</i> of{" "}
<Link to={linkToDataset}>{job.datasetName}</Link>{" "}
</span>
);
Expand All @@ -277,11 +284,19 @@ function JobListView() {
: null}
</span>
);
} else if (job.type === APIJobType.TRAIN_NEURON_MODEL || APIJobType.DEPRECATED_TRAIN_MODEL) {
const numberOfTrainingAnnotations = job.trainingAnnotations.length;
} else if (
job.type === APIJobType.TRAIN_NEURON_MODEL ||
job.type === APIJobType.TRAIN_INSTANCE_MODEL ||
job.type === APIJobType.DEPRECATED_TRAIN_MODEL
) {
const numberOfTrainingAnnotations = job.trainingAnnotations?.length || 0;
const modelName =
job.type === APIJobType.TRAIN_NEURON_MODEL || job.type === APIJobType.DEPRECATED_TRAIN_MODEL
? "neuron model"
: "instance model";
return (
<span>
{`Train neuron model on ${numberOfTrainingAnnotations} ${Utils.pluralize("annotation", numberOfTrainingAnnotations)}. `}
{`Train ${modelName} on ${numberOfTrainingAnnotations} ${Utils.pluralize("annotation", numberOfTrainingAnnotations)}. `}
{getShowTrainingDataLink(job.trainingAnnotations)}
</span>
Comment on lines +299 to 301
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🛠️ Refactor suggestion

Guard “Show Training Data” link against empty arrays

If trainingAnnotations is an empty array, getShowTrainingDataLink will access index 0 and throw. Gate the link on count > 0.

Apply this diff:

-          {getShowTrainingDataLink(job.trainingAnnotations)}
+          {numberOfTrainingAnnotations > 0 &&
+            getShowTrainingDataLink(job.trainingAnnotations)}

Alternatively, harden getShowTrainingDataLink itself to return null when length === 0:

export const getShowTrainingDataLink = (trainingAnnotations?: { annotationId: string }[]) => {
  if (!trainingAnnotations || trainingAnnotations.length === 0) return null;
  return trainingAnnotations.length > 1
    ? /* multi-annotation modal link */
    : /* single annotation link */;
};
🤖 Prompt for AI Agents
In frontend/javascripts/admin/job/job_list_view.tsx around lines 299 to 301, the
call to getShowTrainingDataLink(job.trainingAnnotations) can throw when
trainingAnnotations is an empty array because the helper accesses index 0; guard
the link render by only calling getShowTrainingDataLink when
job.trainingAnnotations exists and has length > 0 (e.g. wrap with a conditional
check for length > 0), or alternatively modify getShowTrainingDataLink to return
null when trainingAnnotations is falsy or length === 0 so callers can safely
render its result.

);
Expand Down
Loading