From 56dc761ad32c26d9619dff083b6a8036bc673f74 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sun, 20 Oct 2024 10:59:22 +0900 Subject: [PATCH] fix formating --- modules/extras.py | 58 +++++++++++++++++++++++++---------------------- 1 file changed, 31 insertions(+), 27 deletions(-) diff --git a/modules/extras.py b/modules/extras.py index 32da3d3ec91..297487ba39f 100644 --- a/modules/extras.py +++ b/modules/extras.py @@ -12,17 +12,21 @@ import gradio as gr import safetensors.torch + def pnginfo_format_string(plain_text): content = "
\n".join(html.escape(x) for x in str(plain_text).split('\n')) return content + def pnginfo_format_setting(name, value): cls_name = 'geninfo-setting-string' if value.startswith('"') else 'geninfo-setting-value' return f"{html.escape(name)}: {html.escape(value)}" + def pnginfo_format_quicklink(name): return f"[{html.escape(name)}]" + def run_pnginfo(image): if image is None: return '', '', '' @@ -32,38 +36,38 @@ def run_pnginfo(image): info = '' parser = png_parser.PngParser(geninfo) if parser.valid: - info += f""" + info += f"""

parameters
{pnginfo_format_quicklink("Copy")} {pnginfo_format_quicklink("Prompt")}""" - if parser.negative is not None: - info += f' {pnginfo_format_quicklink("Negative")}' - info += f""" {pnginfo_format_quicklink("Settings")} + if parser.negative is not None: + info += f' {pnginfo_format_quicklink("Negative")}' + info += f""" {pnginfo_format_quicklink("Settings")}

{pnginfo_format_string(parser.positive)}

""" - if parser.negative is not None: - info += f""" + if parser.negative is not None: + info += f"""

Negative prompt:
{pnginfo_format_string(parser.negative)}

""" - if parser.settings is None: - info += f"{plaintext_to_html(str(parser.parameters))}" - else: - info += "

" - first = True - for setting in parser.settings: - if first: - first = False - else: - info += ", " - info += pnginfo_format_setting(str(setting[0]), str(setting[1])+str(setting[2])) - info += "

" - - if parser.extra is not None: - info += f"

{pnginfo_format_string(parser.extra)}

" - - info += "
\n" + if parser.settings is None: + info += f"{plaintext_to_html(str(parser.parameters))}" + else: + info += "

" + first = True + for setting in parser.settings: + if first: + first = False + else: + info += ", " + info += pnginfo_format_setting(str(setting[0]), str(setting[1])+str(setting[2])) + info += "

" + + if parser.extra is not None: + info += f"

{pnginfo_format_string(parser.extra)}

" + + info += "\n" else: items = {**{'parameters': geninfo}, **items} @@ -248,8 +252,8 @@ def filename_nothing(): if a.shape[1] == 4 and b.shape[1] == 8: raise RuntimeError("When merging instruct-pix2pix model with a normal one, A must be the instruct-pix2pix model.") - if a.shape[1] == 8 and b.shape[1] == 4:#If we have an Instruct-Pix2Pix model... - theta_0[key][:, 0:4, :, :] = theta_func2(a[:, 0:4, :, :], b, multiplier)#Merge only the vectors the models have in common. Otherwise we get an error due to dimension mismatch. + if a.shape[1] == 8 and b.shape[1] == 4: # If we have an Instruct-Pix2Pix model... + theta_0[key][:, 0:4, :, :] = theta_func2(a[:, 0:4, :, :], b, multiplier) # Merge only the vectors the models have in common. Otherwise we get an error due to dimension mismatch. result_is_instruct_pix2pix_model = True else: assert a.shape[1] == 9 and b.shape[1] == 4, f"Bad dimensions for merged layer {key}: A={a.shape}, B={b.shape}" @@ -320,7 +324,7 @@ def filename_nothing(): if save_metadata and add_merge_recipe: merge_recipe = { - "type": "webui", # indicate this model was merged with webui's built-in merger + "type": "webui", # indicate this model was merged with webui's built-in merger "primary_model_hash": primary_model_info.sha256, "secondary_model_hash": secondary_model_info.sha256 if secondary_model_info else None, "tertiary_model_hash": tertiary_model_info.sha256 if tertiary_model_info else None, @@ -358,7 +362,7 @@ def add_model_metadata(checkpoint_info): _, extension = os.path.splitext(output_modelname) if extension.lower() == ".safetensors": - safetensors.torch.save_file(theta_0, output_modelname, metadata=metadata if len(metadata)>0 else None) + safetensors.torch.save_file(theta_0, output_modelname, metadata=metadata if len(metadata) > 0 else None) else: torch.save(theta_0, output_modelname)