Yatheshr commited on
Commit
b7d62f6
Β·
verified Β·
1 Parent(s): a3ffc86

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +51 -40
app.py CHANGED
@@ -2,62 +2,73 @@ import torch
2
  import gradio as gr
3
  from diffusers import StableDiffusionPipeline, StableDiffusionInpaintPipeline
4
  from PIL import Image
5
- import numpy as np
6
  from huggingface_hub import login
7
 
8
- # Authenticate (asks only once)
9
- hf_token = input("πŸ” Enter your Hugging Face token: ").strip()
10
- login(hf_token)
11
-
12
- # Check device
13
  device = "cuda" if torch.cuda.is_available() else "cpu"
14
 
15
- # Load models
16
- text2img_pipe = StableDiffusionPipeline.from_pretrained(
17
- "stabilityai/stable-diffusion-2-1",
18
- torch_dtype=torch.float16 if device == "cuda" else torch.float32,
19
- use_auth_token=hf_token
20
- ).to(device)
 
 
 
 
 
21
 
22
- inpaint_pipe = StableDiffusionInpaintPipeline.from_pretrained(
23
- "stable-diffusion-v1-5/stable-diffusion-inpainting",
24
- torch_dtype=torch.float16 if device == "cuda" else torch.float32,
25
- use_auth_token=hf_token
26
- ).to(device)
27
 
28
- # Image generation
29
- def generate_image(prompt):
30
- image = text2img_pipe(prompt).images[0]
31
- return image
 
 
32
 
33
- # Image inpainting (manipulation)
34
- def manipulate_image(prompt, image, mask):
35
- if image is None or mask is None:
36
- return "Please upload both image and mask.", None
 
 
 
37
 
38
- image = image.convert("RGB").resize((512, 512))
39
- mask = mask.convert("L").resize((512, 512)) # grayscale
40
 
41
- result = inpaint_pipe(prompt=prompt, image=image, mask_image=mask).images[0]
42
- return "Image updated!", result
 
 
43
 
44
  # Gradio UI
45
  with gr.Blocks() as demo:
46
- gr.Markdown("## 🎨 AI Image Generator & Editor")
47
 
48
  with gr.Tab("πŸ–ΌοΈ Generate Image from Text"):
49
- prompt = gr.Textbox(label="Describe your image")
50
- gen_btn = gr.Button("Generate")
 
51
  gen_output = gr.Image(label="Generated Image")
52
- gen_btn.click(fn=generate_image, inputs=prompt, outputs=gen_output)
 
53
 
54
- with gr.Tab("πŸͺ„ Manipulate Existing Image"):
55
- prompt2 = gr.Textbox(label="Describe the edit (e.g., 'remove the tree')")
 
56
  img_input = gr.Image(label="Upload Image", type="pil", tool="editor")
57
- mask_input = gr.Image(label="Draw Mask on Area to Edit", tool="sketch", type="pil")
58
- edit_btn = gr.Button("Apply Edit")
59
- status = gr.Textbox(label="Status")
60
  edited_img = gr.Image(label="Edited Image")
61
- edit_btn.click(fn=manipulate_image, inputs=[prompt2, img_input, mask_input], outputs=[status, edited_img])
 
62
 
63
- demo.launch(server_name="0.0.0.0", server_port=7860)
 
2
  import gradio as gr
3
  from diffusers import StableDiffusionPipeline, StableDiffusionInpaintPipeline
4
  from PIL import Image
 
5
  from huggingface_hub import login
6
 
7
+ # Globals
8
+ text2img_pipe = None
9
+ inpaint_pipe = None
 
 
10
  device = "cuda" if torch.cuda.is_available() else "cpu"
11
 
12
+ # Text-to-image generation
13
+ def generate_image(hf_token, prompt):
14
+ global text2img_pipe
15
+ try:
16
+ if text2img_pipe is None:
17
+ login(hf_token)
18
+ text2img_pipe = StableDiffusionPipeline.from_pretrained(
19
+ "stabilityai/stable-diffusion-2-1",
20
+ torch_dtype=torch.float16 if device == "cuda" else torch.float32,
21
+ use_auth_token=hf_token
22
+ ).to(device)
23
 
24
+ image = text2img_pipe(prompt).images[0]
25
+ return "βœ… Image generated", image
26
+ except Exception as e:
27
+ return f"❌ Error: {str(e)}", None
 
28
 
29
+ # Image manipulation (inpainting)
30
+ def manipulate_image(hf_token, prompt, image, mask):
31
+ global inpaint_pipe
32
+ try:
33
+ if image is None or mask is None:
34
+ return "⚠️ Please upload both image and mask", None
35
 
36
+ if inpaint_pipe is None:
37
+ login(hf_token)
38
+ inpaint_pipe = StableDiffusionInpaintPipeline.from_pretrained(
39
+ "stable-diffusion-v1-5/stable-diffusion-inpainting",
40
+ torch_dtype=torch.float16 if device == "cuda" else torch.float32,
41
+ use_auth_token=hf_token
42
+ ).to(device)
43
 
44
+ image = image.convert("RGB").resize((512, 512))
45
+ mask = mask.convert("L").resize((512, 512))
46
 
47
+ result = inpaint_pipe(prompt=prompt, image=image, mask_image=mask).images[0]
48
+ return "βœ… Image updated", result
49
+ except Exception as e:
50
+ return f"❌ Error: {str(e)}", None
51
 
52
  # Gradio UI
53
  with gr.Blocks() as demo:
54
+ gr.Markdown("## 🎨 AI Image Generator & Editor (Stable Diffusion)")
55
 
56
  with gr.Tab("πŸ–ΌοΈ Generate Image from Text"):
57
+ token1 = gr.Textbox(label="πŸ” Hugging Face Token", type="password")
58
+ prompt1 = gr.Textbox(label="Prompt (e.g., 'a sunset over mountains')")
59
+ status1 = gr.Textbox(label="Status")
60
  gen_output = gr.Image(label="Generated Image")
61
+ gen_btn = gr.Button("Generate Image")
62
+ gen_btn.click(fn=generate_image, inputs=[token1, prompt1], outputs=[status1, gen_output])
63
 
64
+ with gr.Tab("πŸͺ„ Manipulate Uploaded Image"):
65
+ token2 = gr.Textbox(label="πŸ” Hugging Face Token", type="password")
66
+ prompt2 = gr.Textbox(label="Edit Prompt (e.g., 'remove the background')")
67
  img_input = gr.Image(label="Upload Image", type="pil", tool="editor")
68
+ mask_input = gr.Image(label="Draw Mask", type="pil", tool="sketch")
69
+ status2 = gr.Textbox(label="Status")
 
70
  edited_img = gr.Image(label="Edited Image")
71
+ edit_btn = gr.Button("Apply Edit")
72
+ edit_btn.click(fn=manipulate_image, inputs=[token2, prompt2, img_input, mask_input], outputs=[status2, edited_img])
73
 
74
+ demo.launch()