Blood076 commited on
Commit
9d21122
·
verified ·
1 Parent(s): 7530482

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +138 -1
app.py CHANGED
@@ -1,4 +1,141 @@
1
  import gradio as gr
 
 
 
 
2
 
3
 
4
- gr.load("models/city96/FLUX.1-schnell-gguf").launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ import numpy as np
3
+ import random
4
+ from diffusers import DiffusionPipeline
5
+ import torch
6
 
7
 
8
+ gr.load("models/city96/FLUX.1-schnell-gguf").launch()
9
+
10
+ MAX_SEED = np.iinfo(np.int32).max
11
+ MAX_IMAGE_SIZE = 1024
12
+
13
+
14
+ # @spaces.GPU #[uncomment to use ZeroGPU]
15
+ def infer(
16
+ prompt,
17
+ negative_prompt,
18
+ seed,
19
+ randomize_seed,
20
+ width,
21
+ height,
22
+ guidance_scale,
23
+ num_inference_steps,
24
+ progress=gr.Progress(track_tqdm=True),
25
+ ):
26
+ if randomize_seed:
27
+ seed = random.randint(0, MAX_SEED)
28
+
29
+ generator = torch.Generator().manual_seed(seed)
30
+
31
+ image = pipe(
32
+ prompt=prompt,
33
+ negative_prompt=negative_prompt,
34
+ guidance_scale=guidance_scale,
35
+ num_inference_steps=num_inference_steps,
36
+ width=width,
37
+ height=height,
38
+ generator=generator,
39
+ ).images[0]
40
+
41
+ return image, seed
42
+
43
+
44
+ examples = [
45
+ "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
46
+ "An astronaut riding a green horse",
47
+ "A delicious ceviche cheesecake slice",
48
+ ]
49
+
50
+ css = """
51
+ #col-container {
52
+ margin: 0 auto;
53
+ max-width: 640px;
54
+ }
55
+ """
56
+
57
+ with gr.Blocks(css=css) as demo:
58
+ with gr.Column(elem_id="col-container"):
59
+ gr.Markdown(" # Text-to-Image Gradio Template")
60
+
61
+ with gr.Row():
62
+ prompt = gr.Text(
63
+ label="Prompt",
64
+ show_label=False,
65
+ max_lines=1,
66
+ placeholder="Enter your prompt",
67
+ container=False,
68
+ )
69
+
70
+ run_button = gr.Button("Run", scale=0, variant="primary")
71
+
72
+ result = gr.Image(label="Result", show_label=False)
73
+
74
+ with gr.Accordion("Advanced Settings", open=False):
75
+ negative_prompt = gr.Text(
76
+ label="Negative prompt",
77
+ max_lines=1,
78
+ placeholder="Enter a negative prompt",
79
+ visible=False,
80
+ )
81
+
82
+ seed = gr.Slider(
83
+ label="Seed",
84
+ minimum=0,
85
+ maximum=MAX_SEED,
86
+ step=1,
87
+ value=0,
88
+ )
89
+
90
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
91
+
92
+ with gr.Row():
93
+ width = gr.Slider(
94
+ label="Width",
95
+ minimum=256,
96
+ maximum=MAX_IMAGE_SIZE,
97
+ step=32,
98
+ value=1024, # Replace with defaults that work for your model
99
+ )
100
+
101
+ height = gr.Slider(
102
+ label="Height",
103
+ minimum=256,
104
+ maximum=MAX_IMAGE_SIZE,
105
+ step=32,
106
+ value=1024, # Replace with defaults that work for your model
107
+ )
108
+
109
+ with gr.Row():
110
+ guidance_scale = gr.Slider(
111
+ label="Guidance scale",
112
+ minimum=0.0,
113
+ maximum=10.0,
114
+ step=0.1,
115
+ value=0.0, # Replace with defaults that work for your model
116
+ )
117
+
118
+ num_inference_steps = gr.Slider(
119
+ label="Number of inference steps",
120
+ minimum=1,
121
+ maximum=50,
122
+ step=1,
123
+ value=1, # Replace with defaults that work for your model
124
+ )
125
+
126
+ gr.Examples(examples=examples, inputs=[prompt])
127
+ gr.on(
128
+ triggers=[run_button.click, prompt.submit],
129
+ fn=infer,
130
+ inputs=[
131
+ prompt,
132
+ negative_prompt,
133
+ seed,
134
+ randomize_seed,
135
+ width,
136
+ height,
137
+ guidance_scale,
138
+ num_inference_steps,
139
+ ],
140
+ outputs=[result, seed],
141
+ )