Skip to content

Commit

Permalink
Merge pull request #3433 from lllyasviel/develop
Browse files Browse the repository at this point in the history
Release 2.5.3
  • Loading branch information
mashb1t committed Aug 3, 2024
2 parents c2dc17e + c4d5b16 commit f0dcf5a
Show file tree
Hide file tree
Showing 20 changed files with 85 additions and 38 deletions.
4 changes: 2 additions & 2 deletions extras/BLIP/models/blip.py
Original file line number Diff line number Diff line change
Expand Up @@ -216,9 +216,9 @@ def is_url(url_or_filename):
def load_checkpoint(model,url_or_filename):
if is_url(url_or_filename):
cached_file = download_cached_file(url_or_filename, check_hash=False, progress=True)
checkpoint = torch.load(cached_file, map_location='cpu')
checkpoint = torch.load(cached_file, map_location='cpu', weights_only=True)
elif os.path.isfile(url_or_filename):
checkpoint = torch.load(url_or_filename, map_location='cpu')
checkpoint = torch.load(url_or_filename, map_location='cpu', weights_only=True)
else:
raise RuntimeError('checkpoint url or path is invalid')

Expand Down
4 changes: 2 additions & 2 deletions extras/BLIP/models/blip_nlvr.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,9 +78,9 @@ def blip_nlvr(pretrained='',**kwargs):
def load_checkpoint(model,url_or_filename):
if is_url(url_or_filename):
cached_file = download_cached_file(url_or_filename, check_hash=False, progress=True)
checkpoint = torch.load(cached_file, map_location='cpu')
checkpoint = torch.load(cached_file, map_location='cpu', weights_only=True)
elif os.path.isfile(url_or_filename):
checkpoint = torch.load(url_or_filename, map_location='cpu')
checkpoint = torch.load(url_or_filename, map_location='cpu', weights_only=True)
else:
raise RuntimeError('checkpoint url or path is invalid')
state_dict = checkpoint['model']
Expand Down
2 changes: 1 addition & 1 deletion extras/facexlib/detection/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ def init_detection_model(model_name, half=False, device='cuda', model_rootpath=N
url=model_url, model_dir='facexlib/weights', progress=True, file_name=None, save_dir=model_rootpath)

# TODO: clean pretrained model
load_net = torch.load(model_path, map_location=lambda storage, loc: storage)
load_net = torch.load(model_path, map_location=lambda storage, loc: storage, weights_only=True)
# remove unnecessary 'module.'
for k, v in deepcopy(load_net).items():
if k.startswith('module.'):
Expand Down
2 changes: 1 addition & 1 deletion extras/facexlib/parsing/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ def init_parsing_model(model_name='bisenet', half=False, device='cuda', model_ro

model_path = load_file_from_url(
url=model_url, model_dir='facexlib/weights', progress=True, file_name=None, save_dir=model_rootpath)
load_net = torch.load(model_path, map_location=lambda storage, loc: storage)
load_net = torch.load(model_path, map_location=lambda storage, loc: storage, weights_only=True)
model.load_state_dict(load_net, strict=True)
model.eval()
model = model.to(device)
Expand Down
2 changes: 1 addition & 1 deletion extras/ip_adapter.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ def load_ip_adapter(clip_vision_path, ip_negative_path, ip_adapter_path):
offload_device = torch.device('cpu')

use_fp16 = model_management.should_use_fp16(device=load_device)
ip_state_dict = torch.load(ip_adapter_path, map_location="cpu")
ip_state_dict = torch.load(ip_adapter_path, map_location="cpu", weights_only=True)
plus = "latents" in ip_state_dict["image_proj"]
cross_attention_dim = ip_state_dict["ip_adapter"]["1.to_k_ip.weight"].shape[1]
sdxl = cross_attention_dim == 2048
Expand Down
2 changes: 1 addition & 1 deletion fooocus_version.py
Original file line number Diff line number Diff line change
@@ -1 +1 @@
version = '2.5.2'
version = '2.5.3'
1 change: 1 addition & 0 deletions language/en.json
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
"Content Type": "Content Type",
"Photograph": "Photograph",
"Art/Anime": "Art/Anime",
"Appy Styles": "Appy Styles",
"Describe this Image into Prompt": "Describe this Image into Prompt",
"Image Size and Recommended Size": "Image Size and Recommended Size",
"Upscale or Variation:": "Upscale or Variation:",
Expand Down
2 changes: 1 addition & 1 deletion ldm_patched/ldm/modules/encoders/noise_aug_modules.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ def __init__(self, *args, clip_stats_path=None, timestep_dim=256, **kwargs):
if clip_stats_path is None:
clip_mean, clip_std = torch.zeros(timestep_dim), torch.ones(timestep_dim)
else:
clip_mean, clip_std = torch.load(clip_stats_path, map_location="cpu")
clip_mean, clip_std = torch.load(clip_stats_path, map_location="cpu", weights_only=True)
self.register_buffer("data_mean", clip_mean[None, :], persistent=False)
self.register_buffer("data_std", clip_std[None, :], persistent=False)
self.time_embed = Timestep(timestep_dim)
Expand Down
2 changes: 1 addition & 1 deletion ldm_patched/modules/sd1_clip.py
Original file line number Diff line number Diff line change
Expand Up @@ -326,7 +326,7 @@ def load_embed(embedding_name, embedding_directory, embedding_size, embed_key=No
except:
embed_out = safe_load_embed_zip(embed_path)
else:
embed = torch.load(embed_path, map_location="cpu")
embed = torch.load(embed_path, map_location="cpu", weights_only=True)
except Exception as e:
print(traceback.format_exc())
print()
Expand Down
6 changes: 3 additions & 3 deletions ldm_patched/pfn/architecture/face/codeformer.py
Original file line number Diff line number Diff line change
Expand Up @@ -377,15 +377,15 @@ def __init__(
)

if model_path is not None:
chkpt = torch.load(model_path, map_location="cpu")
chkpt = torch.load(model_path, map_location="cpu", weights_only=True)
if "params_ema" in chkpt:
self.load_state_dict(
torch.load(model_path, map_location="cpu")["params_ema"]
torch.load(model_path, map_location="cpu", weights_only=True)["params_ema"]
)
logger.info(f"vqgan is loaded from: {model_path} [params_ema]")
elif "params" in chkpt:
self.load_state_dict(
torch.load(model_path, map_location="cpu")["params"]
torch.load(model_path, map_location="cpu", weights_only=True)["params"]
)
logger.info(f"vqgan is loaded from: {model_path} [params]")
else:
Expand Down
4 changes: 2 additions & 2 deletions ldm_patched/pfn/architecture/face/gfpgan_bilinear_arch.py
Original file line number Diff line number Diff line change
Expand Up @@ -273,8 +273,8 @@ def __init__(
if decoder_load_path:
self.stylegan_decoder.load_state_dict(
torch.load(
decoder_load_path, map_location=lambda storage, loc: storage
)["params_ema"]
decoder_load_path, map_location=lambda storage, loc: storage,
weights_only=True)["params_ema"]
)
# fix decoder without updating params
if fix_decoder:
Expand Down
4 changes: 2 additions & 2 deletions ldm_patched/pfn/architecture/face/gfpganv1_arch.py
Original file line number Diff line number Diff line change
Expand Up @@ -373,8 +373,8 @@ def __init__(
if decoder_load_path:
self.stylegan_decoder.load_state_dict(
torch.load(
decoder_load_path, map_location=lambda storage, loc: storage
)["params_ema"]
decoder_load_path, map_location=lambda storage, loc: storage,
weights_only=True)["params_ema"]
)
# fix decoder without updating params
if fix_decoder:
Expand Down
4 changes: 2 additions & 2 deletions ldm_patched/pfn/architecture/face/gfpganv1_clean_arch.py
Original file line number Diff line number Diff line change
Expand Up @@ -284,8 +284,8 @@ def __init__(
if decoder_load_path:
self.stylegan_decoder.load_state_dict(
torch.load(
decoder_load_path, map_location=lambda storage, loc: storage
)["params_ema"]
decoder_load_path, map_location=lambda storage, loc: storage,
weights_only=True)["params_ema"]
)
# fix decoder without updating params
if fix_decoder:
Expand Down
13 changes: 13 additions & 0 deletions modules/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -702,6 +702,19 @@ def init_temp_path(path: str | None, default_path: str) -> str:
expected_type=str
)

default_describe_apply_prompts_checkbox = get_config_item_or_set_default(
key='default_describe_apply_prompts_checkbox',
default_value=True,
validator=lambda x: isinstance(x, bool),
expected_type=bool
)
default_describe_content_type = get_config_item_or_set_default(
key='default_describe_content_type',
default_value=[modules.flags.describe_type_photo],
validator=lambda x: all(k in modules.flags.describe_types for k in x),
expected_type=list
)

config_dict["default_loras"] = default_loras = default_loras[:default_max_lora_number] + [[True, 'None', 1.0] for _ in range(default_max_lora_number - len(default_loras))]

# mapping config to meta parameter
Expand Down
2 changes: 1 addition & 1 deletion modules/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -231,7 +231,7 @@ def get_previewer(model):
if vae_approx_filename in VAE_approx_models:
VAE_approx_model = VAE_approx_models[vae_approx_filename]
else:
sd = torch.load(vae_approx_filename, map_location='cpu')
sd = torch.load(vae_approx_filename, map_location='cpu', weights_only=True)
VAE_approx_model = VAEApprox()
VAE_approx_model.load_state_dict(sd)
del sd
Expand Down
1 change: 1 addition & 0 deletions modules/flags.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,7 @@

describe_type_photo = 'Photograph'
describe_type_anime = 'Art/Anime'
describe_types = [describe_type_photo, describe_type_anime]

sdxl_aspect_ratios = [
'704*1408', '704*1344', '768*1344', '768*1280', '832*1216', '832*1152',
Expand Down
2 changes: 1 addition & 1 deletion modules/inpaint_worker.py
Original file line number Diff line number Diff line change
Expand Up @@ -196,7 +196,7 @@ def patch(self, inpaint_head_model_path, inpaint_latent, inpaint_latent_mask, mo

if inpaint_head_model is None:
inpaint_head_model = InpaintHead()
sd = torch.load(inpaint_head_model_path, map_location='cpu')
sd = torch.load(inpaint_head_model_path, map_location='cpu', weights_only=True)
inpaint_head_model.load_state_dict(sd)

feed = torch.cat([
Expand Down
2 changes: 1 addition & 1 deletion modules/upscaler.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ def perform_upscale(img):

if model is None:
model_filename = downloading_upscale_model()
sd = torch.load(model_filename)
sd = torch.load(model_filename, weights_only=True)
sdo = OrderedDict()
for k, v in sd.items():
sdo[k.replace('residual_block_', 'RDB')] = v
Expand Down
7 changes: 7 additions & 0 deletions update_log.md
Original file line number Diff line number Diff line change
@@ -1,3 +1,10 @@
# [2.5.3](https://github.com/lllyasviel/Fooocus/releases/tag/v2.5.3)

* Fix prompt bug when only negative styles are selected
* Add various config settings for image input, see list in [PR](https://github.com/lllyasviel/Fooocus/pull/3382)
* Only load weights from non-safetensors files, preventing harmful code injection
* Add checkbox for applying/resetting styles when describing images, also allowing multiple describe content types

# [2.5.2](https://github.com/lllyasviel/Fooocus/releases/tag/v2.5.2)

* Fix not adding positive prompt when styles didn't have a {prompt} placeholder in the positive prompt
Expand Down
57 changes: 41 additions & 16 deletions webui.py
Original file line number Diff line number Diff line change
Expand Up @@ -337,10 +337,11 @@ def generate_mask(image, mask_model, cloth_category, dino_prompt_text, sam_model
with gr.Column():
describe_input_image = grh.Image(label='Image', source='upload', type='numpy', show_label=False)
with gr.Column():
describe_method = gr.Radio(
describe_methods = gr.CheckboxGroup(
label='Content Type',
choices=[flags.describe_type_photo, flags.describe_type_anime],
value=flags.describe_type_photo)
choices=flags.describe_types,
value=modules.config.default_describe_content_type)
describe_apply_styles = gr.Checkbox(label='Appy Styles', value=modules.config.default_describe_apply_prompts_checkbox)
describe_btn = gr.Button(value='Describe this Image into Prompt')
describe_image_size = gr.Textbox(label='Image Size and Recommended Size', elem_id='describe_image_size', visible=False)
gr.HTML('<a href="https://github.com/lllyasviel/Fooocus/discussions/1363" target="_blank">\U0001F4D4 Documentation</a>')
Expand Down Expand Up @@ -1060,30 +1061,54 @@ def trigger_metadata_import(file, state_is_generating):
gr.Audio(interactive=False, value=notification_file, elem_id='audio_notification', visible=False)
break

def trigger_describe(mode, img):
if mode == flags.describe_type_photo:
def trigger_describe(modes, img, apply_styles):
describe_prompts = []
styles = set()

if flags.describe_type_photo in modes:
from extras.interrogate import default_interrogator as default_interrogator_photo
return default_interrogator_photo(img), ["Fooocus V2", "Fooocus Enhance", "Fooocus Sharp"]
if mode == flags.describe_type_anime:
describe_prompts.append(default_interrogator_photo(img))
styles.update(["Fooocus V2", "Fooocus Enhance", "Fooocus Sharp"])

if flags.describe_type_anime in modes:
from extras.wd14tagger import default_interrogator as default_interrogator_anime
return default_interrogator_anime(img), ["Fooocus V2", "Fooocus Masterpiece"]
return mode, ["Fooocus V2"]
describe_prompts.append(default_interrogator_anime(img))
styles.update(["Fooocus V2", "Fooocus Masterpiece"])

describe_btn.click(trigger_describe, inputs=[describe_method, describe_input_image],
outputs=[prompt, style_selections], show_progress=True, queue=True)
if len(styles) == 0 or not apply_styles:
styles = gr.update()
else:
styles = list(styles)

if len(describe_prompts) == 0:
describe_prompt = gr.update()
else:
describe_prompt = ', '.join(describe_prompts)

return describe_prompt, styles

describe_btn.click(trigger_describe, inputs=[describe_methods, describe_input_image, describe_apply_styles],
outputs=[prompt, style_selections], show_progress=True, queue=True) \
.then(fn=style_sorter.sort_styles, inputs=style_selections, outputs=style_selections, queue=False, show_progress=False) \
.then(lambda: None, _js='()=>{refresh_style_localization();}')

if args_manager.args.enable_auto_describe_image:
def trigger_auto_describe(mode, img, prompt):
def trigger_auto_describe(mode, img, prompt, apply_styles):
# keep prompt if not empty
if prompt == '':
return trigger_describe(mode, img)
return trigger_describe(mode, img, apply_styles)
return gr.update(), gr.update()

uov_input_image.upload(trigger_auto_describe, inputs=[describe_method, uov_input_image, prompt],
outputs=[prompt, style_selections], show_progress=True, queue=True)
uov_input_image.upload(trigger_auto_describe, inputs=[describe_methods, uov_input_image, prompt, describe_apply_styles],
outputs=[prompt, style_selections], show_progress=True, queue=True) \
.then(fn=style_sorter.sort_styles, inputs=style_selections, outputs=style_selections, queue=False, show_progress=False) \
.then(lambda: None, _js='()=>{refresh_style_localization();}')

enhance_input_image.upload(lambda: gr.update(value=True), outputs=enhance_checkbox, queue=False, show_progress=False) \
.then(trigger_auto_describe, inputs=[describe_method, enhance_input_image, prompt], outputs=[prompt, style_selections], show_progress=True, queue=True)
.then(trigger_auto_describe, inputs=[describe_methods, enhance_input_image, prompt, describe_apply_styles],
outputs=[prompt, style_selections], show_progress=True, queue=True) \
.then(fn=style_sorter.sort_styles, inputs=style_selections, outputs=style_selections, queue=False, show_progress=False) \
.then(lambda: None, _js='()=>{refresh_style_localization();}')

def dump_default_english_config():
from modules.localization import dump_english_config
Expand Down

0 comments on commit f0dcf5a

Please sign in to comment.