Tan commited on
Commit
3ddffde
·
verified ·
1 Parent(s): ff8f12e

Upload ComfyUI workflow configuration

Browse files
Files changed (1) hide show
  1. workflow/workflow_api.json +228 -0
workflow/workflow_api.json ADDED
@@ -0,0 +1,228 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "6": {
3
+ "inputs": {
4
+ "text": "",
5
+ "clip": [
6
+ "249",
7
+ 1
8
+ ]
9
+ },
10
+ "class_type": "CLIPTextEncode",
11
+ "_meta": {
12
+ "title": "CLIP Text Encode (Positive Prompt)"
13
+ }
14
+ },
15
+ "8": {
16
+ "inputs": {
17
+ "samples": [
18
+ "31",
19
+ 0
20
+ ],
21
+ "vae": [
22
+ "39",
23
+ 0
24
+ ]
25
+ },
26
+ "class_type": "VAEDecode",
27
+ "_meta": {
28
+ "title": "VAE Decode"
29
+ }
30
+ },
31
+ "31": {
32
+ "inputs": {
33
+ "seed": 993480294246291,
34
+ "steps": 20,
35
+ "cfg": 1,
36
+ "sampler_name": "er_sde",
37
+ "scheduler": "kl_optimal",
38
+ "denoise": 1,
39
+ "model": [
40
+ "249",
41
+ 0
42
+ ],
43
+ "positive": [
44
+ "35",
45
+ 0
46
+ ],
47
+ "negative": [
48
+ "135",
49
+ 0
50
+ ],
51
+ "latent_image": [
52
+ "258",
53
+ 0
54
+ ]
55
+ },
56
+ "class_type": "KSampler",
57
+ "_meta": {
58
+ "title": "KSampler"
59
+ }
60
+ },
61
+ "35": {
62
+ "inputs": {
63
+ "guidance": 5,
64
+ "conditioning": [
65
+ "177",
66
+ 0
67
+ ]
68
+ },
69
+ "class_type": "FluxGuidance",
70
+ "_meta": {
71
+ "title": "FluxGuidance"
72
+ }
73
+ },
74
+ "37": {
75
+ "inputs": {
76
+ "unet_name": "flux1-kontext-dev.safetensors",
77
+ "weight_dtype": "default"
78
+ },
79
+ "class_type": "UNETLoader",
80
+ "_meta": {
81
+ "title": "Load Diffusion Model"
82
+ }
83
+ },
84
+ "38": {
85
+ "inputs": {
86
+ "clip_name1": "clip_l.safetensors",
87
+ "clip_name2": "t5xxl_fp8_e4m3fn_scaled.safetensors",
88
+ "type": "flux",
89
+ "device": "default"
90
+ },
91
+ "class_type": "DualCLIPLoader",
92
+ "_meta": {
93
+ "title": "DualCLIPLoader"
94
+ }
95
+ },
96
+ "39": {
97
+ "inputs": {
98
+ "vae_name": "ae.safetensors"
99
+ },
100
+ "class_type": "VAELoader",
101
+ "_meta": {
102
+ "title": "Load VAE"
103
+ }
104
+ },
105
+ "42": {
106
+ "inputs": {
107
+ "image": [
108
+ "248",
109
+ 0
110
+ ]
111
+ },
112
+ "class_type": "FluxKontextImageScale",
113
+ "_meta": {
114
+ "title": "FluxKontextImageScale"
115
+ }
116
+ },
117
+ "124": {
118
+ "inputs": {
119
+ "pixels": [
120
+ "42",
121
+ 0
122
+ ],
123
+ "vae": [
124
+ "39",
125
+ 0
126
+ ]
127
+ },
128
+ "class_type": "VAEEncode",
129
+ "_meta": {
130
+ "title": "VAE Encode"
131
+ }
132
+ },
133
+ "135": {
134
+ "inputs": {
135
+ "conditioning": [
136
+ "261",
137
+ 0
138
+ ]
139
+ },
140
+ "class_type": "ConditioningZeroOut",
141
+ "_meta": {
142
+ "title": "ConditioningZeroOut"
143
+ }
144
+ },
145
+ "177": {
146
+ "inputs": {
147
+ "conditioning": [
148
+ "6",
149
+ 0
150
+ ],
151
+ "latent": [
152
+ "124",
153
+ 0
154
+ ]
155
+ },
156
+ "class_type": "ReferenceLatent",
157
+ "_meta": {
158
+ "title": "ReferenceLatent"
159
+ }
160
+ },
161
+ "247": {
162
+ "inputs": {
163
+ "filename_prefix": "ComfyUI",
164
+ "images": [
165
+ "8",
166
+ 0
167
+ ]
168
+ },
169
+ "class_type": "SaveImage",
170
+ "_meta": {
171
+ "title": "Save Image"
172
+ }
173
+ },
174
+ "248": {
175
+ "inputs": {
176
+ "image": "Screenshot 2025-09-22 at 13.19.59.png"
177
+ },
178
+ "class_type": "LoadImage",
179
+ "_meta": {
180
+ "title": "Load Image"
181
+ }
182
+ },
183
+ "249": {
184
+ "inputs": {
185
+ "lora_name": "flux_kontext_lora_v4_consolidated_000010000.safetensors",
186
+ "strength_model": 1.0000000000000002,
187
+ "strength_clip": 1.0000000000000002,
188
+ "model": [
189
+ "37",
190
+ 0
191
+ ],
192
+ "clip": [
193
+ "38",
194
+ 0
195
+ ]
196
+ },
197
+ "class_type": "LoraLoader",
198
+ "_meta": {
199
+ "title": "Load LoRA"
200
+ }
201
+ },
202
+ "258": {
203
+ "inputs": {
204
+ "amount": 1,
205
+ "samples": [
206
+ "124",
207
+ 0
208
+ ]
209
+ },
210
+ "class_type": "RepeatLatentBatch",
211
+ "_meta": {
212
+ "title": "Repeat Latent Batch"
213
+ }
214
+ },
215
+ "261": {
216
+ "inputs": {
217
+ "text": "",
218
+ "clip": [
219
+ "249",
220
+ 1
221
+ ]
222
+ },
223
+ "class_type": "CLIPTextEncode",
224
+ "_meta": {
225
+ "title": "CLIP Text Encode (Negative Prompt)"
226
+ }
227
+ }
228
+ }