@@ -165,13 +165,13 @@ def invoke(self, context: InvocationContext) -> ImageOutput:
165
165
title = "Canny Processor" ,
166
166
tags = ["controlnet" , "canny" ],
167
167
category = "controlnet" ,
168
- version = "1.3.2 " ,
168
+ version = "1.3.3 " ,
169
169
)
170
170
class CannyImageProcessorInvocation (ImageProcessorInvocation ):
171
171
"""Canny edge detection for ControlNet"""
172
172
173
- detect_resolution : int = InputField (default = 512 , ge = 0 , description = FieldDescriptions .detect_res )
174
- image_resolution : int = InputField (default = 512 , ge = 0 , description = FieldDescriptions .image_res )
173
+ detect_resolution : int = InputField (default = 512 , ge = 1 , description = FieldDescriptions .detect_res )
174
+ image_resolution : int = InputField (default = 512 , ge = 1 , description = FieldDescriptions .image_res )
175
175
low_threshold : int = InputField (
176
176
default = 100 , ge = 0 , le = 255 , description = "The low threshold of the Canny pixel gradient (0-255)"
177
177
)
@@ -199,13 +199,13 @@ def run_processor(self, image: Image.Image) -> Image.Image:
199
199
title = "HED (softedge) Processor" ,
200
200
tags = ["controlnet" , "hed" , "softedge" ],
201
201
category = "controlnet" ,
202
- version = "1.2.2 " ,
202
+ version = "1.2.3 " ,
203
203
)
204
204
class HedImageProcessorInvocation (ImageProcessorInvocation ):
205
205
"""Applies HED edge detection to image"""
206
206
207
- detect_resolution : int = InputField (default = 512 , ge = 0 , description = FieldDescriptions .detect_res )
208
- image_resolution : int = InputField (default = 512 , ge = 0 , description = FieldDescriptions .image_res )
207
+ detect_resolution : int = InputField (default = 512 , ge = 1 , description = FieldDescriptions .detect_res )
208
+ image_resolution : int = InputField (default = 512 , ge = 1 , description = FieldDescriptions .image_res )
209
209
# safe not supported in controlnet_aux v0.0.3
210
210
# safe: bool = InputField(default=False, description=FieldDescriptions.safe_mode)
211
211
scribble : bool = InputField (default = False , description = FieldDescriptions .scribble_mode )
@@ -228,13 +228,13 @@ def run_processor(self, image: Image.Image) -> Image.Image:
228
228
title = "Lineart Processor" ,
229
229
tags = ["controlnet" , "lineart" ],
230
230
category = "controlnet" ,
231
- version = "1.2.2 " ,
231
+ version = "1.2.3 " ,
232
232
)
233
233
class LineartImageProcessorInvocation (ImageProcessorInvocation ):
234
234
"""Applies line art processing to image"""
235
235
236
- detect_resolution : int = InputField (default = 512 , ge = 0 , description = FieldDescriptions .detect_res )
237
- image_resolution : int = InputField (default = 512 , ge = 0 , description = FieldDescriptions .image_res )
236
+ detect_resolution : int = InputField (default = 512 , ge = 1 , description = FieldDescriptions .detect_res )
237
+ image_resolution : int = InputField (default = 512 , ge = 1 , description = FieldDescriptions .image_res )
238
238
coarse : bool = InputField (default = False , description = "Whether to use coarse mode" )
239
239
240
240
def run_processor (self , image : Image .Image ) -> Image .Image :
@@ -250,13 +250,13 @@ def run_processor(self, image: Image.Image) -> Image.Image:
250
250
title = "Lineart Anime Processor" ,
251
251
tags = ["controlnet" , "lineart" , "anime" ],
252
252
category = "controlnet" ,
253
- version = "1.2.2 " ,
253
+ version = "1.2.3 " ,
254
254
)
255
255
class LineartAnimeImageProcessorInvocation (ImageProcessorInvocation ):
256
256
"""Applies line art anime processing to image"""
257
257
258
- detect_resolution : int = InputField (default = 512 , ge = 0 , description = FieldDescriptions .detect_res )
259
- image_resolution : int = InputField (default = 512 , ge = 0 , description = FieldDescriptions .image_res )
258
+ detect_resolution : int = InputField (default = 512 , ge = 1 , description = FieldDescriptions .detect_res )
259
+ image_resolution : int = InputField (default = 512 , ge = 1 , description = FieldDescriptions .image_res )
260
260
261
261
def run_processor (self , image : Image .Image ) -> Image .Image :
262
262
processor = LineartAnimeProcessor ()
@@ -273,15 +273,15 @@ def run_processor(self, image: Image.Image) -> Image.Image:
273
273
title = "Midas Depth Processor" ,
274
274
tags = ["controlnet" , "midas" ],
275
275
category = "controlnet" ,
276
- version = "1.2.3 " ,
276
+ version = "1.2.4 " ,
277
277
)
278
278
class MidasDepthImageProcessorInvocation (ImageProcessorInvocation ):
279
279
"""Applies Midas depth processing to image"""
280
280
281
281
a_mult : float = InputField (default = 2.0 , ge = 0 , description = "Midas parameter `a_mult` (a = a_mult * PI)" )
282
282
bg_th : float = InputField (default = 0.1 , ge = 0 , description = "Midas parameter `bg_th`" )
283
- detect_resolution : int = InputField (default = 512 , ge = 0 , description = FieldDescriptions .detect_res )
284
- image_resolution : int = InputField (default = 512 , ge = 0 , description = FieldDescriptions .image_res )
283
+ detect_resolution : int = InputField (default = 512 , ge = 1 , description = FieldDescriptions .detect_res )
284
+ image_resolution : int = InputField (default = 512 , ge = 1 , description = FieldDescriptions .image_res )
285
285
# depth_and_normal not supported in controlnet_aux v0.0.3
286
286
# depth_and_normal: bool = InputField(default=False, description="whether to use depth and normal mode")
287
287
@@ -304,13 +304,13 @@ def run_processor(self, image):
304
304
title = "Normal BAE Processor" ,
305
305
tags = ["controlnet" ],
306
306
category = "controlnet" ,
307
- version = "1.2.2 " ,
307
+ version = "1.2.3 " ,
308
308
)
309
309
class NormalbaeImageProcessorInvocation (ImageProcessorInvocation ):
310
310
"""Applies NormalBae processing to image"""
311
311
312
- detect_resolution : int = InputField (default = 512 , ge = 0 , description = FieldDescriptions .detect_res )
313
- image_resolution : int = InputField (default = 512 , ge = 0 , description = FieldDescriptions .image_res )
312
+ detect_resolution : int = InputField (default = 512 , ge = 1 , description = FieldDescriptions .detect_res )
313
+ image_resolution : int = InputField (default = 512 , ge = 1 , description = FieldDescriptions .image_res )
314
314
315
315
def run_processor (self , image ):
316
316
normalbae_processor = NormalBaeDetector .from_pretrained ("lllyasviel/Annotators" )
@@ -321,13 +321,13 @@ def run_processor(self, image):
321
321
322
322
323
323
@invocation (
324
- "mlsd_image_processor" , title = "MLSD Processor" , tags = ["controlnet" , "mlsd" ], category = "controlnet" , version = "1.2.2 "
324
+ "mlsd_image_processor" , title = "MLSD Processor" , tags = ["controlnet" , "mlsd" ], category = "controlnet" , version = "1.2.3 "
325
325
)
326
326
class MlsdImageProcessorInvocation (ImageProcessorInvocation ):
327
327
"""Applies MLSD processing to image"""
328
328
329
- detect_resolution : int = InputField (default = 512 , ge = 0 , description = FieldDescriptions .detect_res )
330
- image_resolution : int = InputField (default = 512 , ge = 0 , description = FieldDescriptions .image_res )
329
+ detect_resolution : int = InputField (default = 512 , ge = 1 , description = FieldDescriptions .detect_res )
330
+ image_resolution : int = InputField (default = 512 , ge = 1 , description = FieldDescriptions .image_res )
331
331
thr_v : float = InputField (default = 0.1 , ge = 0 , description = "MLSD parameter `thr_v`" )
332
332
thr_d : float = InputField (default = 0.1 , ge = 0 , description = "MLSD parameter `thr_d`" )
333
333
@@ -344,13 +344,13 @@ def run_processor(self, image):
344
344
345
345
346
346
@invocation (
347
- "pidi_image_processor" , title = "PIDI Processor" , tags = ["controlnet" , "pidi" ], category = "controlnet" , version = "1.2.2 "
347
+ "pidi_image_processor" , title = "PIDI Processor" , tags = ["controlnet" , "pidi" ], category = "controlnet" , version = "1.2.3 "
348
348
)
349
349
class PidiImageProcessorInvocation (ImageProcessorInvocation ):
350
350
"""Applies PIDI processing to image"""
351
351
352
- detect_resolution : int = InputField (default = 512 , ge = 0 , description = FieldDescriptions .detect_res )
353
- image_resolution : int = InputField (default = 512 , ge = 0 , description = FieldDescriptions .image_res )
352
+ detect_resolution : int = InputField (default = 512 , ge = 1 , description = FieldDescriptions .detect_res )
353
+ image_resolution : int = InputField (default = 512 , ge = 1 , description = FieldDescriptions .image_res )
354
354
safe : bool = InputField (default = False , description = FieldDescriptions .safe_mode )
355
355
scribble : bool = InputField (default = False , description = FieldDescriptions .scribble_mode )
356
356
@@ -371,13 +371,13 @@ def run_processor(self, image):
371
371
title = "Content Shuffle Processor" ,
372
372
tags = ["controlnet" , "contentshuffle" ],
373
373
category = "controlnet" ,
374
- version = "1.2.2 " ,
374
+ version = "1.2.3 " ,
375
375
)
376
376
class ContentShuffleImageProcessorInvocation (ImageProcessorInvocation ):
377
377
"""Applies content shuffle processing to image"""
378
378
379
- detect_resolution : int = InputField (default = 512 , ge = 0 , description = FieldDescriptions .detect_res )
380
- image_resolution : int = InputField (default = 512 , ge = 0 , description = FieldDescriptions .image_res )
379
+ detect_resolution : int = InputField (default = 512 , ge = 1 , description = FieldDescriptions .detect_res )
380
+ image_resolution : int = InputField (default = 512 , ge = 1 , description = FieldDescriptions .image_res )
381
381
h : int = InputField (default = 512 , ge = 0 , description = "Content shuffle `h` parameter" )
382
382
w : int = InputField (default = 512 , ge = 0 , description = "Content shuffle `w` parameter" )
383
383
f : int = InputField (default = 256 , ge = 0 , description = "Content shuffle `f` parameter" )
@@ -401,7 +401,7 @@ def run_processor(self, image):
401
401
title = "Zoe (Depth) Processor" ,
402
402
tags = ["controlnet" , "zoe" , "depth" ],
403
403
category = "controlnet" ,
404
- version = "1.2.2 " ,
404
+ version = "1.2.3 " ,
405
405
)
406
406
class ZoeDepthImageProcessorInvocation (ImageProcessorInvocation ):
407
407
"""Applies Zoe depth processing to image"""
@@ -417,15 +417,15 @@ def run_processor(self, image):
417
417
title = "Mediapipe Face Processor" ,
418
418
tags = ["controlnet" , "mediapipe" , "face" ],
419
419
category = "controlnet" ,
420
- version = "1.2.3 " ,
420
+ version = "1.2.4 " ,
421
421
)
422
422
class MediapipeFaceProcessorInvocation (ImageProcessorInvocation ):
423
423
"""Applies mediapipe face processing to image"""
424
424
425
425
max_faces : int = InputField (default = 1 , ge = 1 , description = "Maximum number of faces to detect" )
426
426
min_confidence : float = InputField (default = 0.5 , ge = 0 , le = 1 , description = "Minimum confidence for face detection" )
427
- detect_resolution : int = InputField (default = 512 , ge = 0 , description = FieldDescriptions .detect_res )
428
- image_resolution : int = InputField (default = 512 , ge = 0 , description = FieldDescriptions .image_res )
427
+ detect_resolution : int = InputField (default = 512 , ge = 1 , description = FieldDescriptions .detect_res )
428
+ image_resolution : int = InputField (default = 512 , ge = 1 , description = FieldDescriptions .image_res )
429
429
430
430
def run_processor (self , image ):
431
431
mediapipe_face_processor = MediapipeFaceDetector ()
@@ -444,16 +444,16 @@ def run_processor(self, image):
444
444
title = "Leres (Depth) Processor" ,
445
445
tags = ["controlnet" , "leres" , "depth" ],
446
446
category = "controlnet" ,
447
- version = "1.2.2 " ,
447
+ version = "1.2.3 " ,
448
448
)
449
449
class LeresImageProcessorInvocation (ImageProcessorInvocation ):
450
450
"""Applies leres processing to image"""
451
451
452
452
thr_a : float = InputField (default = 0 , description = "Leres parameter `thr_a`" )
453
453
thr_b : float = InputField (default = 0 , description = "Leres parameter `thr_b`" )
454
454
boost : bool = InputField (default = False , description = "Whether to use boost mode" )
455
- detect_resolution : int = InputField (default = 512 , ge = 0 , description = FieldDescriptions .detect_res )
456
- image_resolution : int = InputField (default = 512 , ge = 0 , description = FieldDescriptions .image_res )
455
+ detect_resolution : int = InputField (default = 512 , ge = 1 , description = FieldDescriptions .detect_res )
456
+ image_resolution : int = InputField (default = 512 , ge = 1 , description = FieldDescriptions .image_res )
457
457
458
458
def run_processor (self , image ):
459
459
leres_processor = LeresDetector .from_pretrained ("lllyasviel/Annotators" )
@@ -473,7 +473,7 @@ def run_processor(self, image):
473
473
title = "Tile Resample Processor" ,
474
474
tags = ["controlnet" , "tile" ],
475
475
category = "controlnet" ,
476
- version = "1.2.2 " ,
476
+ version = "1.2.3 " ,
477
477
)
478
478
class TileResamplerProcessorInvocation (ImageProcessorInvocation ):
479
479
"""Tile resampler processor"""
@@ -513,13 +513,13 @@ def run_processor(self, img):
513
513
title = "Segment Anything Processor" ,
514
514
tags = ["controlnet" , "segmentanything" ],
515
515
category = "controlnet" ,
516
- version = "1.2.3 " ,
516
+ version = "1.2.4 " ,
517
517
)
518
518
class SegmentAnythingProcessorInvocation (ImageProcessorInvocation ):
519
519
"""Applies segment anything processing to image"""
520
520
521
- detect_resolution : int = InputField (default = 512 , ge = 0 , description = FieldDescriptions .detect_res )
522
- image_resolution : int = InputField (default = 512 , ge = 0 , description = FieldDescriptions .image_res )
521
+ detect_resolution : int = InputField (default = 512 , ge = 1 , description = FieldDescriptions .detect_res )
522
+ image_resolution : int = InputField (default = 512 , ge = 1 , description = FieldDescriptions .image_res )
523
523
524
524
def run_processor (self , image ):
525
525
# segment_anything_processor = SamDetector.from_pretrained("ybelkada/segment-anything", subfolder="checkpoints")
@@ -560,12 +560,12 @@ def show_anns(self, anns: List[Dict]):
560
560
title = "Color Map Processor" ,
561
561
tags = ["controlnet" ],
562
562
category = "controlnet" ,
563
- version = "1.2.2 " ,
563
+ version = "1.2.3 " ,
564
564
)
565
565
class ColorMapImageProcessorInvocation (ImageProcessorInvocation ):
566
566
"""Generates a color map from the provided image"""
567
567
568
- color_map_tile_size : int = InputField (default = 64 , ge = 0 , description = FieldDescriptions .tile_size )
568
+ color_map_tile_size : int = InputField (default = 64 , ge = 1 , description = FieldDescriptions .tile_size )
569
569
570
570
def run_processor (self , image : Image .Image ):
571
571
np_image = np .array (image , dtype = np .uint8 )
@@ -592,15 +592,15 @@ def run_processor(self, image: Image.Image):
592
592
title = "Depth Anything Processor" ,
593
593
tags = ["controlnet" , "depth" , "depth anything" ],
594
594
category = "controlnet" ,
595
- version = "1.1.1 " ,
595
+ version = "1.1.2 " ,
596
596
)
597
597
class DepthAnythingImageProcessorInvocation (ImageProcessorInvocation ):
598
598
"""Generates a depth map based on the Depth Anything algorithm"""
599
599
600
600
model_size : DEPTH_ANYTHING_MODEL_SIZES = InputField (
601
601
default = "small" , description = "The size of the depth model to use"
602
602
)
603
- resolution : int = InputField (default = 512 , ge = 64 , multiple_of = 64 , description = FieldDescriptions .image_res )
603
+ resolution : int = InputField (default = 512 , ge = 1 , description = FieldDescriptions .image_res )
604
604
605
605
def run_processor (self , image : Image .Image ):
606
606
depth_anything_detector = DepthAnythingDetector ()
@@ -615,15 +615,15 @@ def run_processor(self, image: Image.Image):
615
615
title = "DW Openpose Image Processor" ,
616
616
tags = ["controlnet" , "dwpose" , "openpose" ],
617
617
category = "controlnet" ,
618
- version = "1.1.0 " ,
618
+ version = "1.1.1 " ,
619
619
)
620
620
class DWOpenposeImageProcessorInvocation (ImageProcessorInvocation ):
621
621
"""Generates an openpose pose from an image using DWPose"""
622
622
623
623
draw_body : bool = InputField (default = True )
624
624
draw_face : bool = InputField (default = False )
625
625
draw_hands : bool = InputField (default = False )
626
- image_resolution : int = InputField (default = 512 , ge = 0 , description = FieldDescriptions .image_res )
626
+ image_resolution : int = InputField (default = 512 , ge = 1 , description = FieldDescriptions .image_res )
627
627
628
628
def run_processor (self , image : Image .Image ):
629
629
dw_openpose = DWOpenposeDetector ()
@@ -642,15 +642,15 @@ def run_processor(self, image: Image.Image):
642
642
title = "Heuristic Resize" ,
643
643
tags = ["image, controlnet" ],
644
644
category = "image" ,
645
- version = "1.0.0 " ,
645
+ version = "1.0.1 " ,
646
646
classification = Classification .Prototype ,
647
647
)
648
648
class HeuristicResizeInvocation (BaseInvocation ):
649
649
"""Resize an image using a heuristic method. Preserves edge maps."""
650
650
651
651
image : ImageField = InputField (description = "The image to resize" )
652
- width : int = InputField (default = 512 , gt = 0 , description = "The width to resize to (px)" )
653
- height : int = InputField (default = 512 , gt = 0 , description = "The height to resize to (px)" )
652
+ width : int = InputField (default = 512 , ge = 1 , description = "The width to resize to (px)" )
653
+ height : int = InputField (default = 512 , ge = 1 , description = "The height to resize to (px)" )
654
654
655
655
def invoke (self , context : InvocationContext ) -> ImageOutput :
656
656
image = context .images .get_pil (self .image .image_name , "RGB" )
0 commit comments