Skip to content

Commit e61c5a3

Browse files
committed
Merge
2 parents 8c63378 + 1f86320 commit e61c5a3

File tree

122 files changed

+2237
-869
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

122 files changed

+2237
-869
lines changed

.github/workflows/frontend-checks.yml

+6-1
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,12 @@ jobs:
4444
- name: check for changed frontend files
4545
if: ${{ inputs.always_run != true }}
4646
id: changed-files
47-
uses: tj-actions/changed-files@v42
47+
# Pinned to the _hash_ for v45.0.9 to prevent supply-chain attacks.
48+
# See:
49+
# - CVE-2025-30066
50+
# - https://www.stepsecurity.io/blog/harden-runner-detection-tj-actions-changed-files-action-is-compromised
51+
# - https://github.com/tj-actions/changed-files/issues/2463
52+
uses: tj-actions/changed-files@a284dc1814e3fd07f2e34267fc8f81227ed29fb8
4853
with:
4954
files_yaml: |
5055
frontend:

.github/workflows/frontend-tests.yml

+6-1
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,12 @@ jobs:
4444
- name: check for changed frontend files
4545
if: ${{ inputs.always_run != true }}
4646
id: changed-files
47-
uses: tj-actions/changed-files@v42
47+
# Pinned to the _hash_ for v45.0.9 to prevent supply-chain attacks.
48+
# See:
49+
# - CVE-2025-30066
50+
# - https://www.stepsecurity.io/blog/harden-runner-detection-tj-actions-changed-files-action-is-compromised
51+
# - https://github.com/tj-actions/changed-files/issues/2463
52+
uses: tj-actions/changed-files@a284dc1814e3fd07f2e34267fc8f81227ed29fb8
4853
with:
4954
files_yaml: |
5055
frontend:

.github/workflows/python-checks.yml

+6-1
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,12 @@ jobs:
4343
- name: check for changed python files
4444
if: ${{ inputs.always_run != true }}
4545
id: changed-files
46-
uses: tj-actions/changed-files@v42
46+
# Pinned to the _hash_ for v45.0.9 to prevent supply-chain attacks.
47+
# See:
48+
# - CVE-2025-30066
49+
# - https://www.stepsecurity.io/blog/harden-runner-detection-tj-actions-changed-files-action-is-compromised
50+
# - https://github.com/tj-actions/changed-files/issues/2463
51+
uses: tj-actions/changed-files@a284dc1814e3fd07f2e34267fc8f81227ed29fb8
4752
with:
4853
files_yaml: |
4954
python:

.github/workflows/python-tests.yml

+6-1
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,12 @@ jobs:
7777
- name: check for changed python files
7878
if: ${{ inputs.always_run != true }}
7979
id: changed-files
80-
uses: tj-actions/changed-files@v42
80+
# Pinned to the _hash_ for v45.0.9 to prevent supply-chain attacks.
81+
# See:
82+
# - CVE-2025-30066
83+
# - https://www.stepsecurity.io/blog/harden-runner-detection-tj-actions-changed-files-action-is-compromised
84+
# - https://github.com/tj-actions/changed-files/issues/2463
85+
uses: tj-actions/changed-files@a284dc1814e3fd07f2e34267fc8f81227ed29fb8
8186
with:
8287
files_yaml: |
8388
python:

.github/workflows/typegen-checks.yml

+6-1
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,12 @@ jobs:
4242
- name: check for changed files
4343
if: ${{ inputs.always_run != true }}
4444
id: changed-files
45-
uses: tj-actions/changed-files@v42
45+
# Pinned to the _hash_ for v45.0.9 to prevent supply-chain attacks.
46+
# See:
47+
# - CVE-2025-30066
48+
# - https://www.stepsecurity.io/blog/harden-runner-detection-tj-actions-changed-files-action-is-compromised
49+
# - https://github.com/tj-actions/changed-files/issues/2463
50+
uses: tj-actions/changed-files@a284dc1814e3fd07f2e34267fc8f81227ed29fb8
4651
with:
4752
files_yaml: |
4853
src:

invokeai/app/invocations/compel.py

+8-8
Original file line numberDiff line numberDiff line change
@@ -40,10 +40,10 @@
4040

4141
@invocation(
4242
"compel",
43-
title="Prompt",
43+
title="Prompt - SD1.5",
4444
tags=["prompt", "compel"],
4545
category="conditioning",
46-
version="1.2.0",
46+
version="1.2.1",
4747
)
4848
class CompelInvocation(BaseInvocation):
4949
"""Parse prompt using compel package to conditioning."""
@@ -233,10 +233,10 @@ def _lora_loader() -> Iterator[Tuple[ModelPatchRaw, float]]:
233233

234234
@invocation(
235235
"sdxl_compel_prompt",
236-
title="SDXL Prompt",
236+
title="Prompt - SDXL",
237237
tags=["sdxl", "compel", "prompt"],
238238
category="conditioning",
239-
version="1.2.0",
239+
version="1.2.1",
240240
)
241241
class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
242242
"""Parse prompt using compel package to conditioning."""
@@ -327,10 +327,10 @@ def invoke(self, context: InvocationContext) -> ConditioningOutput:
327327

328328
@invocation(
329329
"sdxl_refiner_compel_prompt",
330-
title="SDXL Refiner Prompt",
330+
title="Prompt - SDXL Refiner",
331331
tags=["sdxl", "compel", "prompt"],
332332
category="conditioning",
333-
version="1.1.1",
333+
version="1.1.2",
334334
)
335335
class SDXLRefinerCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
336336
"""Parse prompt using compel package to conditioning."""
@@ -376,10 +376,10 @@ class CLIPSkipInvocationOutput(BaseInvocationOutput):
376376

377377
@invocation(
378378
"clip_skip",
379-
title="CLIP Skip",
379+
title="Apply CLIP Skip - SD1.5, SDXL",
380380
tags=["clipskip", "clip", "skip"],
381381
category="conditioning",
382-
version="1.1.0",
382+
version="1.1.1",
383383
)
384384
class CLIPSkipInvocation(BaseInvocation):
385385
"""Skip layers in clip text_encoder model."""

invokeai/app/invocations/controlnet_image_processors.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -87,7 +87,7 @@ class ControlOutput(BaseInvocationOutput):
8787
control: ControlField = OutputField(description=FieldDescriptions.control)
8888

8989

90-
@invocation("controlnet", title="ControlNet", tags=["controlnet"], category="controlnet", version="1.1.2")
90+
@invocation("controlnet", title="ControlNet - SD1.5, SDXL", tags=["controlnet"], category="controlnet", version="1.1.3")
9191
class ControlNetInvocation(BaseInvocation):
9292
"""Collects ControlNet info to pass to other nodes"""
9393

invokeai/app/invocations/denoise_latents.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -127,10 +127,10 @@ def get_scheduler(
127127

128128
@invocation(
129129
"denoise_latents",
130-
title="Denoise Latents",
130+
title="Denoise - SD1.5, SDXL",
131131
tags=["latents", "denoise", "txt2img", "t2i", "t2l", "img2img", "i2i", "l2l"],
132132
category="latents",
133-
version="1.5.3",
133+
version="1.5.4",
134134
)
135135
class DenoiseLatentsInvocation(BaseInvocation):
136136
"""Denoises noisy latents to decodable images"""

invokeai/app/invocations/fields.py

+2
Original file line numberDiff line numberDiff line change
@@ -59,6 +59,7 @@ class UIType(str, Enum, metaclass=MetaEnum):
5959
ControlLoRAModel = "ControlLoRAModelField"
6060
SigLipModel = "SigLipModelField"
6161
FluxReduxModel = "FluxReduxModelField"
62+
LlavaOnevisionModel = "LLaVAModelField"
6263
# endregion
6364

6465
# region Misc Field Types
@@ -205,6 +206,7 @@ class FieldDescriptions:
205206
freeu_b2 = "Scaling factor for stage 2 to amplify the contributions of backbone features."
206207
instantx_control_mode = "The control mode for InstantX ControlNet union models. Ignored for other ControlNet models. The standard mapping is: canny (0), tile (1), depth (2), blur (3), pose (4), gray (5), low quality (6). Negative values will be treated as 'None'."
207208
flux_redux_conditioning = "FLUX Redux conditioning tensor"
209+
vllm_model = "The VLLM model to use"
208210

209211

210212
class ImageField(BaseModel):

invokeai/app/invocations/flux_control_lora_loader.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -21,10 +21,10 @@ class FluxControlLoRALoaderOutput(BaseInvocationOutput):
2121

2222
@invocation(
2323
"flux_control_lora_loader",
24-
title="Flux Control LoRA",
24+
title="Control LoRA - FLUX",
2525
tags=["lora", "model", "flux"],
2626
category="model",
27-
version="1.1.0",
27+
version="1.1.1",
2828
classification=Classification.Prototype,
2929
)
3030
class FluxControlLoRALoaderInvocation(BaseInvocation):

invokeai/app/invocations/flux_model_loader.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -37,10 +37,10 @@ class FluxModelLoaderOutput(BaseInvocationOutput):
3737

3838
@invocation(
3939
"flux_model_loader",
40-
title="Flux Main Model",
40+
title="Main Model - FLUX",
4141
tags=["model", "flux"],
4242
category="model",
43-
version="1.0.5",
43+
version="1.0.6",
4444
classification=Classification.Prototype,
4545
)
4646
class FluxModelLoaderInvocation(BaseInvocation):

invokeai/app/invocations/flux_text_encoder.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -26,10 +26,10 @@
2626

2727
@invocation(
2828
"flux_text_encoder",
29-
title="FLUX Text Encoding",
29+
title="Prompt - FLUX",
3030
tags=["prompt", "conditioning", "flux"],
3131
category="conditioning",
32-
version="1.1.1",
32+
version="1.1.2",
3333
classification=Classification.Prototype,
3434
)
3535
class FluxTextEncoderInvocation(BaseInvocation):

invokeai/app/invocations/flux_vae_decode.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -22,10 +22,10 @@
2222

2323
@invocation(
2424
"flux_vae_decode",
25-
title="FLUX Latents to Image",
25+
title="Latents to Image - FLUX",
2626
tags=["latents", "image", "vae", "l2i", "flux"],
2727
category="latents",
28-
version="1.0.1",
28+
version="1.0.2",
2929
)
3030
class FluxVaeDecodeInvocation(BaseInvocation, WithMetadata, WithBoard):
3131
"""Generates an image from latents."""

invokeai/app/invocations/flux_vae_encode.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -19,10 +19,10 @@
1919

2020
@invocation(
2121
"flux_vae_encode",
22-
title="FLUX Image to Latents",
22+
title="Image to Latents - FLUX",
2323
tags=["latents", "image", "vae", "i2l", "flux"],
2424
category="latents",
25-
version="1.0.0",
25+
version="1.0.1",
2626
)
2727
class FluxVaeEncodeInvocation(BaseInvocation):
2828
"""Encodes an image into latents."""

invokeai/app/invocations/ideal_size.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -19,9 +19,9 @@ class IdealSizeOutput(BaseInvocationOutput):
1919

2020
@invocation(
2121
"ideal_size",
22-
title="Ideal Size",
22+
title="Ideal Size - SD1.5, SDXL",
2323
tags=["latents", "math", "ideal_size"],
24-
version="1.0.4",
24+
version="1.0.5",
2525
)
2626
class IdealSizeInvocation(BaseInvocation):
2727
"""Calculates the ideal size for generation to avoid duplication"""

invokeai/app/invocations/image_to_latents.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -31,10 +31,10 @@
3131

3232
@invocation(
3333
"i2l",
34-
title="Image to Latents",
34+
title="Image to Latents - SD1.5, SDXL",
3535
tags=["latents", "image", "vae", "i2l"],
3636
category="latents",
37-
version="1.1.0",
37+
version="1.1.1",
3838
)
3939
class ImageToLatentsInvocation(BaseInvocation):
4040
"""Encodes an image into latents."""

invokeai/app/invocations/ip_adapter.py

+7-1
Original file line numberDiff line numberDiff line change
@@ -69,7 +69,13 @@ class IPAdapterOutput(BaseInvocationOutput):
6969
}
7070

7171

72-
@invocation("ip_adapter", title="IP-Adapter", tags=["ip_adapter", "control"], category="ip_adapter", version="1.5.0")
72+
@invocation(
73+
"ip_adapter",
74+
title="IP-Adapter - SD1.5, SDXL",
75+
tags=["ip_adapter", "control"],
76+
category="ip_adapter",
77+
version="1.5.1",
78+
)
7379
class IPAdapterInvocation(BaseInvocation):
7480
"""Collects IP-Adapter info to pass to other nodes."""
7581

invokeai/app/invocations/latents_to_image.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -31,10 +31,10 @@
3131

3232
@invocation(
3333
"l2i",
34-
title="Latents to Image",
34+
title="Latents to Image - SD1.5, SDXL",
3535
tags=["latents", "image", "vae", "l2i"],
3636
category="latents",
37-
version="1.3.1",
37+
version="1.3.2",
3838
)
3939
class LatentsToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
4040
"""Generates an image from latents."""
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,60 @@
1+
from typing import Any
2+
3+
import torch
4+
from PIL.Image import Image
5+
from pydantic import field_validator
6+
7+
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
8+
from invokeai.app.invocations.fields import FieldDescriptions, ImageField, InputField, UIComponent, UIType
9+
from invokeai.app.invocations.model import ModelIdentifierField
10+
from invokeai.app.invocations.primitives import StringOutput
11+
from invokeai.app.services.shared.invocation_context import InvocationContext
12+
from invokeai.backend.llava_onevision_model import LlavaOnevisionModel
13+
from invokeai.backend.util.devices import TorchDevice
14+
15+
16+
@invocation("llava_onevision_vllm", title="LLaVA OneVision VLLM", tags=["vllm"], category="vllm", version="1.0.0")
17+
class LlavaOnevisionVllmInvocation(BaseInvocation):
18+
"""Run a LLaVA OneVision VLLM model."""
19+
20+
images: list[ImageField] | ImageField | None = InputField(default=None, max_length=3, description="Input image.")
21+
prompt: str = InputField(
22+
default="",
23+
description="Input text prompt.",
24+
ui_component=UIComponent.Textarea,
25+
)
26+
vllm_model: ModelIdentifierField = InputField(
27+
title="LLaVA Model Type",
28+
description=FieldDescriptions.vllm_model,
29+
ui_type=UIType.LlavaOnevisionModel,
30+
)
31+
32+
@field_validator("images", mode="before")
33+
def listify_images(cls, v: Any) -> list:
34+
if v is None:
35+
return v
36+
if not isinstance(v, list):
37+
return [v]
38+
return v
39+
40+
def _get_images(self, context: InvocationContext) -> list[Image]:
41+
if self.images is None:
42+
return []
43+
44+
image_fields = self.images if isinstance(self.images, list) else [self.images]
45+
return [context.images.get_pil(image_field.image_name, "RGB") for image_field in image_fields]
46+
47+
@torch.no_grad()
48+
def invoke(self, context: InvocationContext) -> StringOutput:
49+
images = self._get_images(context)
50+
51+
with context.models.load(self.vllm_model) as vllm_model:
52+
assert isinstance(vllm_model, LlavaOnevisionModel)
53+
output = vllm_model.run(
54+
prompt=self.prompt,
55+
images=images,
56+
device=TorchDevice.choose_torch_device(),
57+
dtype=TorchDevice.choose_torch_dtype(),
58+
)
59+
60+
return StringOutput(value=output)

invokeai/app/invocations/metadata_linked.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -610,10 +610,10 @@ class LatentsMetaOutput(LatentsOutput, MetadataOutput):
610610

611611
@invocation(
612612
"denoise_latents_meta",
613-
title="Denoise Latents + metadata",
613+
title=f"{DenoiseLatentsInvocation.UIConfig.title} + Metadata",
614614
tags=["latents", "denoise", "txt2img", "t2i", "t2l", "img2img", "i2i", "l2l"],
615615
category="latents",
616-
version="1.1.0",
616+
version="1.1.1",
617617
)
618618
class DenoiseLatentsMetaInvocation(DenoiseLatentsInvocation, WithMetadata):
619619
def invoke(self, context: InvocationContext) -> LatentsMetaOutput:
@@ -675,10 +675,10 @@ def _loras_to_json(obj: Union[Any, list[Any]]):
675675

676676
@invocation(
677677
"flux_denoise_meta",
678-
title="Flux Denoise + metadata",
678+
title=f"{FluxDenoiseInvocation.UIConfig.title} + Metadata",
679679
tags=["flux", "latents", "denoise", "txt2img", "t2i", "t2l", "img2img", "i2i", "l2l"],
680680
category="latents",
681-
version="1.0.0",
681+
version="1.0.1",
682682
)
683683
class FluxDenoiseLatentsMetaInvocation(FluxDenoiseInvocation, WithMetadata):
684684
"""Run denoising process with a FLUX transformer model + metadata."""

0 commit comments

Comments
 (0)