-
Notifications
You must be signed in to change notification settings - Fork 14
/
nodes.py
136 lines (113 loc) · 5.06 KB
/
nodes.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
import os,sys
import yaml
import time
import argparse
import folder_paths
from Hallo.scripts.inference import inference_process
now_dir = os.path.dirname(os.path.abspath(__file__))
input_dir = folder_paths.get_input_directory()
output_dir = folder_paths.get_output_directory()
ckpt_dir = os.path.join(now_dir,"pretrained_models")
print(ckpt_dir)
class HalloNode:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"source_image": ("IMAGEPATH",),
"driving_audio":("AUDIOPATH",),
"pose_weight" :("FLOAT",{
"default": 1.
}),
"face_weight":("FLOAT",{
"default": 1.
}),
"lip_weight":("FLOAT",{
"default": 1.
}),
"face_expand_ratio":("FLOAT",{
"default": 1.2
})
},
"optional":{
"sd_model": (folder_paths.get_filename_list("diffusers"),),
}
}
RETURN_TYPES = ("VIDEO",)
#RETURN_NAMES = ("image_output_name",)
FUNCTION = "generate"
#OUTPUT_NODE = False
CATEGORY = "AIFSH_Hallo"
def generate(self,source_image,driving_audio,pose_weight,face_weight,lip_weight,face_expand_ratio,sd_model=None):
python_exec = sys.executable or "python"
infer_py = os.path.join(now_dir,"Hallo","scripts","inference.py")
default_yaml_path = os.path.join(now_dir,"Hallo","configs","inference","default.yaml")
with open(default_yaml_path, 'r', encoding="utf-8") as f:
yaml_data = yaml.load(f.read(),Loader=yaml.SafeLoader)
# yaml_data["source_image"] = source_image
# yaml_data['driving_audio'] = driving_audio
yaml_data['save_path'] = os.path.join(output_dir, "hallo")
yaml_data['audio_ckpt_dir'] = os.path.join(ckpt_dir,"hallo")
if sd_model is not None and "safetensors" not in sd_model:
base_model_path = folder_paths.get_full_path("diffusers", sd_model)
else:
base_model_path = os.path.join(ckpt_dir,"stable-diffusion-v1-5")
yaml_data['base_model_path'] = base_model_path
print(yaml_data['base_model_path'])
yaml_data['motion_module_path'] = os.path.join(ckpt_dir,"motion_module","mm_sd_v15_v2.ckpt")
yaml_data['face_analysis']['model_path'] = os.path.join(ckpt_dir,"face_analysis")
yaml_data['wav2vec']['model_path'] = os.path.join(ckpt_dir,"wav2vec","wav2vec2-base-960h")
yaml_data['audio_separator']['model_path'] = os.path.join(ckpt_dir,"audio_separator","Kim_Vocal_2.onnx")
yaml_data['vae']['model_path'] = os.path.join(ckpt_dir,"sd-vae-ft-mse")
tmp_yaml_path = os.path.join(now_dir,'tmp.yaml')
with open(tmp_yaml_path,'w', encoding="utf-8") as f:
yaml.dump(data=yaml_data,stream=f,Dumper=yaml.Dumper)
outfile = os.path.join(output_dir,f"hallo_{time.time_ns()}.mp4")
os.environ["face_landmarker"] = os.path.join(ckpt_dir,"face_analysis","models","face_landmarker_v2_with_blendshapes.task")
cmd = f"""{python_exec} {infer_py} --config "{tmp_yaml_path}" --source_image "{source_image}" --driving_audio "{driving_audio}" --output {outfile} --pose_weight {pose_weight} --face_weight {face_weight} --lip_weight {lip_weight} --face_expand_ratio {face_expand_ratio}"""
print(cmd)
os.system(cmd)
os.remove(tmp_yaml_path)
return (outfile, )
class LoadAudioPath:
@classmethod
def INPUT_TYPES(s):
files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f)) and f.split('.')[-1].lower() in ["wav", "mp3","flac","m4a"]]
return {"required":
{"audio": (sorted(files),)},
}
CATEGORY = "AIFSH_Hallo"
RETURN_TYPES = ("AUDIOPATH",)
FUNCTION = "load_audio"
def load_audio(self, audio):
audio_path = folder_paths.get_annotated_filepath(audio)
return (audio_path,)
class LoadImagePath:
@classmethod
def INPUT_TYPES(s):
input_dir = folder_paths.get_input_directory()
files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f)) and f.split('.')[-1].lower() in ['bmp','jpg','png','webp','jpeg'] ]
return {"required":
{"image": (sorted(files), {"image_upload": True})},
}
CATEGORY = "AIFSH_Hallo"
RETURN_TYPES = ("IMAGEPATH",)
FUNCTION = "load_image"
def load_image(self, image):
image_path = folder_paths.get_annotated_filepath(image)
return (image_path,)
class PreViewVideo:
@classmethod
def INPUT_TYPES(s):
return {"required":{
"video":("VIDEO",),
}}
CATEGORY = "AIFSH_Hallo"
DESCRIPTION = "hello world!"
RETURN_TYPES = ()
OUTPUT_NODE = True
FUNCTION = "load_video"
def load_video(self, video):
video_name = os.path.basename(video)
video_path_name = os.path.basename(os.path.dirname(video))
return {"ui":{"video":[video_name,video_path_name]}}