In this video we show you how to create a workflow that allows you to design new fashion trends in the blink of an eye! Using a combination of Controlnet, IP Adapter and GPT Vision, this mini-app can
Step by step
- Click Build to start a new glif project.
- Add two Image Input Blocks.
- Label one "Original Image" and the other "Style Image".
- Add a Multipick Input Block.
- List the integers 0.0 to 1.0 incrementally by 0.1 each step in the previous block.
- Add a Glif Block.
- Switch the Glif Block to GPT Vision.
- Add the parameters outlined below.
- Add an LLM Block and under "Advanced", select Claude 3.5 Sonnet.
- Adjust the LLM to the below parameters.
- (Optional) Add Several Text Combine Blocks to set values for the IPA Strength (1), Mask Blur (30), and Target (outfit).
- (Optional) Add a Text Combine Block for the prompt: bizarre fashion photograph of person wearing [style description], shot on film.
- Add a ComfyUI Block.
- Input below JSON into ComfyUI Block.
- Publish the glif!
Code and Content
GPT Vision Input
*Text Prompt
You're a fashion designer, taking inspiration from all sorts of images. Today you want to create a unisex outfit inspired by this image. Write a brief description of the outfit, focus on the style, theme, colors and fabrics and materials (bizarre is good!). This is for a high end weirdo fashion brand, so any and all materials are possible, you can even go sci-fi. Just go, write a 6-7 word sentence, no more, no yapping:
Image Prompt
[Style Image]
maxTokens
100
Claude 3.5 Input
Prompt
Name a fashion trend based on an outfit that is the following: [gpt vision] Only return one or two words, no intro, just go, write it in ALL CAPS - make it very unique, not some lame existing genre (no "Cyberpunk"), it needs to super leftfield and weird!
Max Tokens
50
System Prompt
You're a fashion editor, picking up on the most insane and cool trends and subcultures. You name trends, often ending in -core.
ComfyUI JSON
{
"38": {
"inputs": {
"model_name": "sam_vit_h (2.56GB)"
},
"class_type": "SAMModelLoader (segment anything)",
"_meta": {
"title": "SAMModelLoader (segment anything)"
}
},
"40": {
"inputs": {
"image": "{image-input1}"
},
"class_type": "LoadImage",
"_meta": {
"title": "Load Image"
}
},
"41": {
"inputs": {
"model_name": "GroundingDINO_SwinT_OGC (694MB)"
},
"class_type": "GroundingDinoModelLoader (segment anything)",
"_meta": {
"title": "GroundingDinoModelLoader (segment anything)"
}
},
"42": {
"inputs": {
"prompt": "{target}",
"threshold": 0.5,
"sam_model": [
"38",
0
],
"grounding_dino_model": [
"41",
0
],
"image": [
"40",
0
]
},
"class_type": "GroundingDinoSAMSegment (segment anything)",
"_meta": {
"title": "GroundingDinoSAMSegment (segment anything)"
}
},
"47": {
"inputs": {
"samples": [
"52",
0
],
"vae": [
"53",
2
]
},
"class_type": "VAEDecode",
"_meta": {
"title": "VAE Decode"
}
},
"52": {
"inputs": {
"seed": 452183510506570,
"steps": 27,
"cfg": 5,
"sampler_name": "euler_ancestral",
"scheduler": "normal",
"denoise": 1,
"model": [
"73",
0
],
"positive": [
"80",
0
],
"negative": [
"74",
1
],
"latent_image": [
"74",
2
]
},
"class_type": "KSampler",
"_meta": {
"title": "KSampler"
}
},
"53": {
"inputs": {
"ckpt_name": "Juggernaut_X_RunDiffusion.safetensors"
},
"class_type": "CheckpointLoaderSimple",
"_meta": {
"title": "Load Checkpoint"
}
},
"54": {
"inputs": {
"text": "{prompt}",
"clip": [
"53",
1
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
}
},
"55": {
"inputs": {
"text": "watermark, blurry, bad quality, illustration, pixelated, boring, ugly, wrong, tiled",
"clip": [
"53",
1
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
}
},
"67": {
"inputs": {
"expand": [
"88",
1
],
"incremental_expandrate": 0,
"tapered_corners": true,
"flip_input": false,
"blur_radius": 15,
"lerp_alpha": 1,
"decay_factor": 1,
"fill_holes": false,
"mask": [
"42",
1
]
},
"class_type": "GrowMaskWithBlur",
"_meta": {
"title": "Grow Mask With Blur"
}
},
"72": {
"inputs": {
"filename_prefix": "ComfyUI",
"images": [
"47",
0
]
},
"class_type": "SaveImage",
"_meta": {
"title": "Save Image"
}
},
"73": {
"inputs": {
"model": [
"90",
0
]
},
"class_type": "DifferentialDiffusion",
"_meta": {
"title": "Differential Diffusion"
}
},
"74": {
"inputs": {
"positive": [
"54",
0
],
"negative": [
"55",
0
],
"vae": [
"53",
2
],
"pixels": [
"40",
0
],
"mask": [
"67",
0
]
},
"class_type": "InpaintModelConditioning",
"_meta": {
"title": "InpaintModelConditioning"
}
},
"76": {
"inputs": {
"preset": "PLUS (high strength)",
"model": [
"53",
0
]
},
"class_type": "IPAdapterUnifiedLoader",
"_meta": {
"title": "IPAdapter Unified Loader"
}
},
"77": {
"inputs": {
"clip_name": "CLIP-ViT-H-14-laion2B-s32B-b79K.safetensors"
},
"class_type": "CLIPVisionLoader",
"_meta": {
"title": "Load CLIP Vision"
}
},
"78": {
"inputs": {
"variable": "{IPAstrength}",
"fallback": ""
},
"class_type": "GlifVariable",
"_meta": {
"title": "Glif Variable"
}
},
"79": {
"inputs": {
"control_net_name": "mistoLine_fp16.safetensors"
},
"class_type": "ControlNetLoader",
"_meta": {
"title": "Load ControlNet Model"
}
},
"80": {
"inputs": {
"strength": [
"82",
2
],
"conditioning": [
"74",
0
],
"control_net": [
"79",
0
],
"image": [
"81",
0
]
},
"class_type": "ControlNetApply",
"_meta": {
"title": "Apply ControlNet"
}
},
"81": {
"inputs": {
"image": [
"40",
0
]
},
"class_type": "AnyLinePreprocessor",
"_meta": {
"title": "TheMisto.ai Anyline"
}
},
"82": {
"inputs": {
"variable": "{cn_strength}",
"fallback": ""
},
"class_type": "GlifVariable",
"_meta": {
"title": "Glif Variable"
}
},
"83": {
"inputs": {
"image": "{image-input2}"
},
"class_type": "LoadImage",
"_meta": {
"title": "Load Image"
}
},
"88": {
"inputs": {
"variable": "{maskblur}",
"fallback": "10"
},
"class_type": "GlifVariable",
"_meta": {
"title": "Glif Variable"
}
},
"90": {
"inputs": {
"weight_style": [
"78",
2
],
"weight_composition": 0,
"expand_style": false,
"combine_embeds": "average",
"start_at": 0,
"end_at": 1,
"embeds_scaling": "V only",
"model": [
"76",
0
],
"ipadapter": [
"76",
1
],
"image_style": [
"83",
0
],
"image_composition": [
"83",
0
],
"clip_vision": [
"77",
0
]
},
"class_type": "IPAdapterStyleComposition",
"_meta": {
"title": "IPAdapter Style & Composition SDXL"
}
}
}