Skip to content

Commit 1af9930

Browse files
authored
Merge branch 'main' into small-improvements
2 parents c9dc27a + c6f9661 commit 1af9930

File tree

4 files changed

+16
-0
lines changed

4 files changed

+16
-0
lines changed

Diff for: invokeai/app/invocations/image.py

+6
Original file line numberDiff line numberDiff line change
@@ -1095,6 +1095,7 @@ class ExpandMaskWithFadeInvocation(BaseInvocation, WithMetadata, WithBoard):
10951095
"""Expands a mask with a fade effect. The mask uses black to indicate areas to keep from the generated image and white for areas to discard.
10961096
The mask is thresholded to create a binary mask, and then a distance transform is applied to create a fade effect.
10971097
The fade size is specified in pixels, and the mask is expanded by that amount. The result is a mask with a smooth transition from black to white.
1098+
If the fade size is 0, the mask is returned as-is.
10981099
"""
10991100

11001101
mask: ImageField = InputField(description="The mask to expand")
@@ -1104,6 +1105,11 @@ class ExpandMaskWithFadeInvocation(BaseInvocation, WithMetadata, WithBoard):
11041105
def invoke(self, context: InvocationContext) -> ImageOutput:
11051106
pil_mask = context.images.get_pil(self.mask.image_name, mode="L")
11061107

1108+
if self.fade_size_px == 0:
1109+
# If the fade size is 0, just return the mask as-is.
1110+
image_dto = context.images.save(image=pil_mask, image_category=ImageCategory.MASK)
1111+
return ImageOutput.build(image_dto)
1112+
11071113
np_mask = numpy.array(pil_mask)
11081114

11091115
# Threshold the mask to create a binary mask - 0 for black, 255 for white

Diff for: invokeai/backend/llava_onevision_model.py

+7
Original file line numberDiff line numberDiff line change
@@ -47,3 +47,10 @@ def run(self, prompt: str, images: list[Image], device: torch.device, dtype: tor
4747

4848
def to(self, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None) -> None:
4949
self._vllm_model.to(device=device, dtype=dtype)
50+
51+
def calc_size(self) -> int:
52+
"""Get size of the model in memory in bytes."""
53+
# HACK(ryand): Fix this issue with circular imports.
54+
from invokeai.backend.model_manager.load.model_util import calc_module_size
55+
56+
return calc_module_size(self._vllm_model)

Diff for: invokeai/backend/model_manager/load/model_util.py

+2
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@
1515
from invokeai.backend.image_util.grounding_dino.grounding_dino_pipeline import GroundingDinoPipeline
1616
from invokeai.backend.image_util.segment_anything.segment_anything_pipeline import SegmentAnythingPipeline
1717
from invokeai.backend.ip_adapter.ip_adapter import IPAdapter
18+
from invokeai.backend.llava_onevision_model import LlavaOnevisionModel
1819
from invokeai.backend.model_manager.taxonomy import AnyModel
1920
from invokeai.backend.onnx.onnx_runtime import IAIOnnxRuntimeModel
2021
from invokeai.backend.patches.model_patch_raw import ModelPatchRaw
@@ -50,6 +51,7 @@ def calc_model_size_by_data(logger: logging.Logger, model: AnyModel) -> int:
5051
SegmentAnythingPipeline,
5152
DepthAnythingPipeline,
5253
SigLipPipeline,
54+
LlavaOnevisionModel,
5355
),
5456
):
5557
return model.calc_size()

Diff for: invokeai/frontend/web/src/services/api/schema.ts

+1
Original file line numberDiff line numberDiff line change
@@ -6451,6 +6451,7 @@ export type components = {
64516451
* @description Expands a mask with a fade effect. The mask uses black to indicate areas to keep from the generated image and white for areas to discard.
64526452
* The mask is thresholded to create a binary mask, and then a distance transform is applied to create a fade effect.
64536453
* The fade size is specified in pixels, and the mask is expanded by that amount. The result is a mask with a smooth transition from black to white.
6454+
* If the fade size is 0, the mask is returned as-is.
64546455
*/
64556456
ExpandMaskWithFadeInvocation: {
64566457
/**

0 commit comments

Comments
 (0)