Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
20 commits
Select commit Hold shift + click to select a range
bc82599
Add Qwen Image Edit 2511 model support
lstein Mar 24, 2026
a373e7e
fix: diffusers compat, VL encoder image resize, badge wrapping
lstein Mar 27, 2026
1f248b7
fix: free BnB-quantized encoder after use, suppress int8 cast warning
lstein Mar 27, 2026
5cafafe
chore: ruff & uv lock
lstein Mar 27, 2026
073b08b
Merge branch 'main' into feat/qwen-image-edit-2511
lstein Mar 27, 2026
c9f781e
fix: bypass HfApi in model install test
lstein Mar 27, 2026
ceaf97d
feat(backend): add support for a Comfy LoRA variant
lstein Mar 27, 2026
01d6f09
refactor: rename QwenImageEdit -> QwenImage base type
lstein Mar 28, 2026
c23f9ca
feat: add QwenImage variant type to distinguish edit vs txt2img models
lstein Mar 28, 2026
e13faf5
fix: make QwenImage variant optional to fix model detection tags
lstein Mar 28, 2026
87bda37
fix: rename edit starter model variables to qwen_image_edit_* prefix
lstein Mar 28, 2026
62ccb75
fix: remove unused exports (zQwenImageVariantType, isQwenImageEditMai…
lstein Mar 28, 2026
cbfd804
fix: restore correct GGUF filenames in Qwen Image Edit starter model …
lstein Mar 28, 2026
939f6f9
feat: tag edit starter models with variant, pass variant through install
lstein Mar 28, 2026
4982343
fix: only set zero_cond_t=True for edit-variant GGUF models
lstein Mar 28, 2026
3a39a79
fix: prevent Flux LoRAs from being detected as Qwen Image LoRAs
lstein Mar 28, 2026
60cf18b
test: add Qwen Image LoRA model identification tests
lstein Mar 28, 2026
153f260
chore: ruff
lstein Mar 28, 2026
2d89174
chore: ruff
lstein Mar 28, 2026
2eb2c55
fix: don't force reference image to output aspect ratio in VAE encoding
lstein Mar 28, 2026
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions invokeai/app/api/dependencies.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@
CogView4ConditioningInfo,
ConditioningFieldData,
FLUXConditioningInfo,
QwenImageConditioningInfo,
SD3ConditioningInfo,
SDXLConditioningInfo,
ZImageConditioningInfo,
Expand Down Expand Up @@ -140,6 +141,7 @@ def initialize(
SD3ConditioningInfo,
CogView4ConditioningInfo,
ZImageConditioningInfo,
QwenImageConditioningInfo,
],
ephemeral=True,
),
Expand Down
8 changes: 8 additions & 0 deletions invokeai/app/invocations/fields.py
Original file line number Diff line number Diff line change
Expand Up @@ -171,6 +171,8 @@ class FieldDescriptions:
sd3_model = "SD3 model (MMDiTX) to load"
cogview4_model = "CogView4 model (Transformer) to load"
z_image_model = "Z-Image model (Transformer) to load"
qwen_image_model = "Qwen Image Edit model (Transformer) to load"
qwen_vl_encoder = "Qwen2.5-VL tokenizer, processor and text/vision encoder"
sdxl_main_model = "SDXL Main model (UNet, VAE, CLIP1, CLIP2) to load"
sdxl_refiner_model = "SDXL Refiner Main Modde (UNet, VAE, CLIP2) to load"
onnx_main_model = "ONNX Main model (UNet, VAE, CLIP) to load"
Expand Down Expand Up @@ -340,6 +342,12 @@ class ZImageConditioningField(BaseModel):
)


class QwenImageConditioningField(BaseModel):
"""A Qwen Image Edit conditioning tensor primitive value"""

conditioning_name: str = Field(description="The name of conditioning tensor")


class ConditioningField(BaseModel):
"""A conditioning tensor primitive value"""

Expand Down
4 changes: 4 additions & 0 deletions invokeai/app/invocations/metadata.py
Original file line number Diff line number Diff line change
Expand Up @@ -166,6 +166,10 @@ def invoke(self, context: InvocationContext) -> MetadataOutput:
"z_image_img2img",
"z_image_inpaint",
"z_image_outpaint",
"qwen_image_txt2img",
"qwen_image_img2img",
"qwen_image_inpaint",
"qwen_image_outpaint",
]


Expand Down
7 changes: 7 additions & 0 deletions invokeai/app/invocations/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,13 @@ class GlmEncoderField(BaseModel):
text_encoder: ModelIdentifierField = Field(description="Info to load text_encoder submodel")


class QwenVLEncoderField(BaseModel):
"""Field for Qwen2.5-VL encoder used by Qwen Image Edit models."""

tokenizer: ModelIdentifierField = Field(description="Info to load tokenizer submodel")
text_encoder: ModelIdentifierField = Field(description="Info to load text_encoder submodel")


class Qwen3EncoderField(BaseModel):
"""Field for Qwen3 text encoder used by Z-Image models."""

Expand Down
12 changes: 12 additions & 0 deletions invokeai/app/invocations/primitives.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
InputField,
LatentsField,
OutputField,
QwenImageConditioningField,
SD3ConditioningField,
TensorField,
UIComponent,
Expand Down Expand Up @@ -473,6 +474,17 @@ def build(cls, conditioning_name: str) -> "ZImageConditioningOutput":
return cls(conditioning=ZImageConditioningField(conditioning_name=conditioning_name))


@invocation_output("qwen_image_conditioning_output")
class QwenImageConditioningOutput(BaseInvocationOutput):
"""Base class for nodes that output a Qwen Image Edit conditioning tensor."""

conditioning: QwenImageConditioningField = OutputField(description=FieldDescriptions.cond)

@classmethod
def build(cls, conditioning_name: str) -> "QwenImageConditioningOutput":
return cls(conditioning=QwenImageConditioningField(conditioning_name=conditioning_name))


@invocation_output("conditioning_output")
class ConditioningOutput(BaseInvocationOutput):
"""Base class for nodes that output a single conditioning tensor"""
Expand Down
Loading
Loading