-
Notifications
You must be signed in to change notification settings - Fork 76
/
app.py
98 lines (78 loc) · 3.55 KB
/
app.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
import gradio as gr
import torch
import os
import glob
from datetime import datetime
from PIL import Image
from diffusers.utils import load_image
from diffusers import EulerDiscreteScheduler
from pipline_StableDiffusion_ConsistentID import ConsistentIDStableDiffusionPipeline
import sys
# Gets the absolute path of the current script
script_directory = os.path.dirname(os.path.realpath(__file__))
# The GPU peak consumption is about 6G.
def process(inputImage,prompt,negative_prompt):
device = "cuda"
### Download the model from huggingface and put it locally, then place the model in a local directory and specify the directory location.
base_model_path = "SG161222/Realistic_Vision_V6.0_B1_noVAE"
consistentID_path = "JackAILab/ConsistentID/ConsistentID-v1.bin"
### Load base model
pipe = ConsistentIDStableDiffusionPipeline.from_pretrained(
base_model_path,
torch_dtype=torch.float16,
use_safetensors=True,
variant="fp16"
).to(device)
### Load consistentID_model checkpoint
pipe.load_ConsistentID_model(
os.path.dirname(consistentID_path),
subfolder="",
weight_name=os.path.basename(consistentID_path),
trigger_word="img",
)
pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
# hyper-parameter
select_images = load_image(Image.fromarray(inputImage))
num_steps = 50
merge_steps = 30
if prompt == "":
prompt = "A man, in a forest, adventuring"
if negative_prompt == "":
negative_prompt = "monochrome, lowres, bad anatomy, worst quality, low quality, blurry"
#Extend Prompt
prompt = "cinematic photo," + prompt + ", 50mm photograph, half-length portrait, film, bokeh, professional, 4k, highly detailed"
negtive_prompt_group="((((ugly)))), (((duplicate))), ((morbid)), ((mutilated)), [out of frame], extra fingers, mutated hands, ((poorly drawn hands)), ((poorly drawn face)), (((mutation))), (((deformed))), ((ugly)), blurry, ((bad anatomy)), (((bad proportions))), ((extra limbs)), cloned face, (((disfigured))). out of frame, ugly, extra limbs, (bad anatomy), gross proportions, (malformed limbs), ((missing arms)), ((missing legs)), (((extra arms))), (((extra legs))), mutated hands, (fused fingers), (too many fingers), (((long neck)))"
negative_prompt = negative_prompt + negtive_prompt_group
seed = torch.randint(0, 1000, (1,)).item()
generator = torch.Generator(device=device).manual_seed(seed)
images = pipe(
prompt=prompt,
width=512,
height=512,
input_id_images=select_images,
negative_prompt=negative_prompt,
num_images_per_prompt=1,
num_inference_steps=num_steps,
start_merge_step=merge_steps,
generator=generator,
).images[0]
current_date = datetime.today()
output_dir = script_directory + f"/images/gradio_outputs"
if not os.path.exists(output_dir):
os.makedirs(output_dir)
images.save(os.path.join(output_dir, f"{current_date}-{seed}.jpg"))
return os.path.join(output_dir, f"{current_date}-{seed}.jpg")
iface = gr.Interface(
fn=process,
inputs=[
gr.Image(label="Upload Image"),
gr.Textbox(label="prompt",placeholder="A man, in a forest, adventuring"),
gr.Textbox(label="negative prompt",placeholder="monochrome, lowres, bad anatomy, worst quality, low quality, blurry"),
],
outputs=[
gr.Image(label="Output"),
],
title="ConsistentID Demo",
description="Put reference portrait below"
)
iface.launch(share=True)