alexander-potemkin commited on
Commit
b48aabb
·
verified ·
1 Parent(s): fa8f9a3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +47 -12
app.py CHANGED
@@ -1,18 +1,53 @@
1
  import gradio as gr
2
- from transformers import pipeline
 
 
 
 
3
 
4
- pipeline = pipeline(task="image-classification", model="julien-c/hotdog-not-hotdog")
5
 
6
- def predict(input_img):
7
- predictions = pipeline(input_img)
8
- return input_img, {p["label"]: p["score"] for p in predictions}
 
 
 
9
 
10
- gradio_app = gr.Interface(
11
- predict,
12
- inputs=gr.Image(label="Select hot dog candidate", sources=['upload', 'webcam'], type="pil"),
13
- outputs=[gr.Image(label="Processed Image"), gr.Label(label="Result", num_top_classes=2)],
14
- title="Hot Dog? Or Not?",
15
  )
16
 
17
- if __name__ == "__main__":
18
- gradio_app.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ import spaces
3
+ import torch
4
+ import os
5
+ from compel import Compel, ReturnedEmbeddingsType
6
+ from diffusers import DiffusionPipeline
7
 
 
8
 
9
+ model_name = os.environ.get('MODEL_NAME', 'UnfilteredAI/NSFW-gen-v2')
10
+ pipe = DiffusionPipeline.from_pretrained(
11
+ model_name,
12
+ torch_dtype=torch.float16
13
+ )
14
+ pipe.to('cuda')
15
 
16
+ compel = Compel(
17
+ tokenizer=[pipe.tokenizer, pipe.tokenizer_2] ,
18
+ text_encoder=[pipe.text_encoder, pipe.text_encoder_2],
19
+ returned_embeddings_type=ReturnedEmbeddingsType.PENULTIMATE_HIDDEN_STATES_NON_NORMALIZED,
20
+ requires_pooled=[False, True]
21
  )
22
 
23
+
24
+ @spaces.GPU(duration=120)
25
+ def generate(prompt, negative_prompt, num_inference_steps, guidance_scale, width, height, num_samples):
26
+ embeds, pooled = compel(prompt)
27
+ neg_embeds, neg_pooled = compel(negative_prompt)
28
+ return pipe(
29
+ prompt_embeds=embeds,
30
+ pooled_prompt_embeds=pooled,
31
+ negative_prompt_embeds=neg_embeds,
32
+ negative_pooled_prompt_embeds=neg_pooled,
33
+ num_inference_steps=num_inference_steps,
34
+ guidance_scale=guidance_scale,
35
+ width=width,
36
+ height=height,
37
+ num_images_per_prompt=num_samples
38
+ ).images
39
+
40
+
41
+ gr.Interface(
42
+ fn=generate,
43
+ inputs=[
44
+ gr.Text(label="Prompt"),
45
+ gr.Text("", label="Negative Prompt"),
46
+ gr.Number(7, label="Number inference steps"),
47
+ gr.Number(3, label="Guidance scale"),
48
+ gr.Number(512, label="Width"),
49
+ gr.Number(512, label="Height"),
50
+ gr.Number(1, label="# images"),
51
+ ],
52
+ outputs=gr.Gallery(),
53
+ ).launch()