Skip to content

Commit

Permalink
Merge branch 'dev-a1111' into dev-local-202406
Browse files Browse the repository at this point in the history
* dev-a1111:
  fix passing of literal backslash (AUTOMATIC1111#16671)
  fix prompt-bracket-checker miscounting of literal tokens (AUTOMATIC1111#16669)
  Bump safetensors to v0.4.5
  Honor lossless webp compression option in API
  use shared.hf_endpoint (AUTOMATIC1111#16611)
  Warn if WebUI is installed under a dot directory (AUTOMATIC1111#16584)
  sd_xl_v.yaml: use_checkpoint = False
  XYZ option to disable grid (AUTOMATIC1111#16416)
  Fix Default system None filter logic (AUTOMATIC1111#16309)
  Fix weighting config for SDXL v-pred
  InputAccordion duplicate elem_id handling (AUTOMATIC1111#16381)
  addEventListener {passive: false} (AUTOMATIC1111#16575)
  pyenv-win compatibility - another approach (AUTOMATIC1111#16287)
  Disable Hires checkpoint if same as First pass checkpoint (AUTOMATIC1111#16269)
  Fix postprocessing_enable_in_main_ui ScriptPostprocessing elem_id (AUTOMATIC1111#16373)
  extra_only / main_ui_only ScriptPostprocessing (AUTOMATIC1111#16374)
  Allow newline in Extra Network activation text (AUTOMATIC1111#16428)
  image embedding data cache (AUTOMATIC1111#16556)
  Fix typo
  • Loading branch information
bluelovers committed Nov 26, 2024
2 parents 06544fc + 023454b commit 8da9861
Show file tree
Hide file tree
Showing 31 changed files with 239 additions and 118 deletions.
4 changes: 2 additions & 2 deletions configs/sd_xl_v.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ model:
num_idx: 1000

weighting_config:
target: sgm.modules.diffusionmodules.denoiser_weighting.EpsWeighting
target: sgm.modules.diffusionmodules.denoiser_weighting.VWeighting
scaling_config:
target: sgm.modules.diffusionmodules.denoiser_scaling.VScaling
discretization_config:
Expand All @@ -21,7 +21,7 @@ model:
params:
adm_in_channels: 2816
num_classes: sequential
use_checkpoint: True
use_checkpoint: False
in_channels: 4
out_channels: 4
model_channels: 320
Expand Down
2 changes: 1 addition & 1 deletion extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js
Original file line number Diff line number Diff line change
Expand Up @@ -816,7 +816,7 @@ onUiLoaded(async() => {
// Increase or decrease brush size based on scroll direction
adjustBrushSize(elemId, e.deltaY);
}
});
}, {passive: false});

// Handle the move event for pan functionality. Updates the panX and panY variables and applies the new transform to the target element.
function handleMoveKeyDown(e) {
Expand Down
2 changes: 1 addition & 1 deletion extensions-builtin/hypertile/hypertile.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
"""
Hypertile module for splitting attention layers in SD-1.5 U-Net and SD-1.5 VAE
Warn: The patch works well only if the input image has a width and height that are multiples of 128
Original author: @tfernd Github: https://github.com/tfernd/HyperTile
Original author: @tfernd GitHub: https://github.com/tfernd/HyperTile
"""

from __future__ import annotations
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,14 +34,14 @@ def ui(self):
with ui_components.InputAccordion(False, label="Auto-sized crop") as enable:
gr.Markdown('Each image is center-cropped with an automatically chosen width and height.')
with gr.Row():
mindim = gr.Slider(minimum=64, maximum=2048, step=8, label="Dimension lower bound", value=384, elem_id="postprocess_multicrop_mindim")
maxdim = gr.Slider(minimum=64, maximum=2048, step=8, label="Dimension upper bound", value=768, elem_id="postprocess_multicrop_maxdim")
mindim = gr.Slider(minimum=64, maximum=2048, step=8, label="Dimension lower bound", value=384, elem_id=self.elem_id_suffix("postprocess_multicrop_mindim"))
maxdim = gr.Slider(minimum=64, maximum=2048, step=8, label="Dimension upper bound", value=768, elem_id=self.elem_id_suffix("postprocess_multicrop_maxdim"))
with gr.Row():
minarea = gr.Slider(minimum=64 * 64, maximum=2048 * 2048, step=1, label="Area lower bound", value=64 * 64, elem_id="postprocess_multicrop_minarea")
maxarea = gr.Slider(minimum=64 * 64, maximum=2048 * 2048, step=1, label="Area upper bound", value=640 * 640, elem_id="postprocess_multicrop_maxarea")
minarea = gr.Slider(minimum=64 * 64, maximum=2048 * 2048, step=1, label="Area lower bound", value=64 * 64, elem_id=self.elem_id_suffix("postprocess_multicrop_minarea"))
maxarea = gr.Slider(minimum=64 * 64, maximum=2048 * 2048, step=1, label="Area upper bound", value=640 * 640, elem_id=self.elem_id_suffix("postprocess_multicrop_maxarea"))
with gr.Row():
objective = gr.Radio(["Maximize area", "Minimize error"], value="Maximize area", label="Resizing objective", elem_id="postprocess_multicrop_objective")
threshold = gr.Slider(minimum=0, maximum=1, step=0.01, label="Error threshold", value=0.1, elem_id="postprocess_multicrop_threshold")
objective = gr.Radio(["Maximize area", "Minimize error"], value="Maximize area", label="Resizing objective", elem_id=self.elem_id_suffix("postprocess_multicrop_objective"))
threshold = gr.Slider(minimum=0, maximum=1, step=0.01, label="Error threshold", value=0.1, elem_id=self.elem_id_suffix("postprocess_multicrop_threshold"))

return {
"enable": enable,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,10 +11,10 @@ class ScriptPostprocessingFocalCrop(scripts_postprocessing.ScriptPostprocessing)

def ui(self):
with ui_components.InputAccordion(False, label="Auto focal point crop") as enable:
face_weight = gr.Slider(label='Focal point face weight', value=0.9, minimum=0.0, maximum=1.0, step=0.05, elem_id="postprocess_focal_crop_face_weight")
entropy_weight = gr.Slider(label='Focal point entropy weight', value=0.15, minimum=0.0, maximum=1.0, step=0.05, elem_id="postprocess_focal_crop_entropy_weight")
edges_weight = gr.Slider(label='Focal point edges weight', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id="postprocess_focal_crop_edges_weight")
debug = gr.Checkbox(label='Create debug image', elem_id="train_process_focal_crop_debug")
face_weight = gr.Slider(label='Focal point face weight', value=0.9, minimum=0.0, maximum=1.0, step=0.05, elem_id=self.elem_id_suffix("postprocess_focal_crop_face_weight"))
entropy_weight = gr.Slider(label='Focal point entropy weight', value=0.15, minimum=0.0, maximum=1.0, step=0.05, elem_id=self.elem_id_suffix("postprocess_focal_crop_entropy_weight"))
edges_weight = gr.Slider(label='Focal point edges weight', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id=self.elem_id_suffix("postprocess_focal_crop_edges_weight"))
debug = gr.Checkbox(label='Create debug image', elem_id=self.elem_id_suffix("train_process_focal_crop_debug"))

return {
"enable": enable,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -35,8 +35,8 @@ class ScriptPostprocessingSplitOversized(scripts_postprocessing.ScriptPostproces
def ui(self):
with ui_components.InputAccordion(False, label="Split oversized images") as enable:
with gr.Row():
split_threshold = gr.Slider(label='Threshold', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id="postprocess_split_threshold")
overlap_ratio = gr.Slider(label='Overlap ratio', value=0.2, minimum=0.0, maximum=0.9, step=0.05, elem_id="postprocess_overlap_ratio")
split_threshold = gr.Slider(label='Threshold', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id=self.elem_id_suffix("postprocess_split_threshold"))
overlap_ratio = gr.Slider(label='Overlap ratio', value=0.2, minimum=0.0, maximum=0.9, step=0.05, elem_id=self.elem_id_suffix("postprocess_overlap_ratio"))

return {
"enable": enable,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,11 +4,11 @@
// If there's a mismatch, the keyword counter turns red and if you hover on it, a tooltip tells you what's wrong.

function checkBrackets(textArea, counterElt) {
var counts = {};
(textArea.value.match(/[(){}[\]]/g) || []).forEach(bracket => {
counts[bracket] = (counts[bracket] || 0) + 1;
const counts = {};
textArea.value.matchAll(/(?<!\\)(?:\\\\)*?([(){}[\]])/g).forEach(bracket => {
counts[bracket[1]] = (counts[bracket[1]] || 0) + 1;
});
var errors = [];
const errors = [];

function checkPair(open, close, kind) {
if (counts[open] !== counts[close]) {
Expand Down
2 changes: 1 addition & 1 deletion html/footer.html
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
<div>
<a href="{api_docs}">API</a>
 • 
<a href="https://github.com/AUTOMATIC1111/stable-diffusion-webui">Github</a>
<a href="https://github.com/AUTOMATIC1111/stable-diffusion-webui">GitHub</a>
 • 
<a href="https://gradio.app">Gradio</a>
 • 
Expand Down
2 changes: 1 addition & 1 deletion javascript/contextMenus.js
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ var contextMenuInit = function() {
e.preventDefault();
}
});
});
}, {passive: false});
});
eventListenerApplied = true;

Expand Down
2 changes: 1 addition & 1 deletion javascript/extraNetworks.js
Original file line number Diff line number Diff line change
Expand Up @@ -201,7 +201,7 @@ function setupExtraNetworks() {
setupExtraNetworksForTab('img2img');
}

var re_extranet = /<([^:^>]+:[^:]+):[\d.]+>(.*)/;
var re_extranet = /<([^:^>]+:[^:]+):[\d.]+>(.*)/s;
var re_extranet_g = /<([^:^>]+:[^:]+):[\d.]+>/g;

var re_extranet_neg = /\(([^:^>]+:[\d.]+)\)/;
Expand Down
2 changes: 1 addition & 1 deletion javascript/resizeHandle.js
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@
} else {
R.screenX = evt.changedTouches[0].screenX;
}
});
}, {passive: false});
});

resizeHandle.addEventListener('dblclick', onDoubleClick);
Expand Down
2 changes: 1 addition & 1 deletion modules/api/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@ def encode_pil_to_base64(image):
if opts.samples_format.lower() in ("jpg", "jpeg"):
image.save(output_bytes, format="JPEG", exif = exif_bytes, quality=opts.jpeg_quality)
else:
image.save(output_bytes, format="WEBP", exif = exif_bytes, quality=opts.jpeg_quality)
image.save(output_bytes, format="WEBP", exif = exif_bytes, quality=opts.jpeg_quality, lossless=opts.webp_lossless)

else:
raise HTTPException(status_code=500, detail="Invalid image format")
Expand Down
8 changes: 4 additions & 4 deletions modules/dat_model.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import os

from modules import modelloader, errors
from modules.shared import cmd_opts, opts
from modules.shared import cmd_opts, opts, hf_endpoint
from modules.upscaler import Upscaler, UpscalerData
from modules.upscaler_utils import upscale_with_model

Expand Down Expand Up @@ -71,21 +71,21 @@ def get_dat_models(scaler):
return [
UpscalerData(
name="DAT x2",
path="https://huggingface.co/w-e-w/DAT/resolve/main/experiments/pretrained_models/DAT/DAT_x2.pth",
path=f"{hf_endpoint}/w-e-w/DAT/resolve/main/experiments/pretrained_models/DAT/DAT_x2.pth",
scale=2,
upscaler=scaler,
sha256='7760aa96e4ee77e29d4f89c3a4486200042e019461fdb8aa286f49aa00b89b51',
),
UpscalerData(
name="DAT x3",
path="https://huggingface.co/w-e-w/DAT/resolve/main/experiments/pretrained_models/DAT/DAT_x3.pth",
path=f"{hf_endpoint}/w-e-w/DAT/resolve/main/experiments/pretrained_models/DAT/DAT_x3.pth",
scale=3,
upscaler=scaler,
sha256='581973e02c06f90d4eb90acf743ec9604f56f3c2c6f9e1e2c2b38ded1f80d197',
),
UpscalerData(
name="DAT x4",
path="https://huggingface.co/w-e-w/DAT/resolve/main/experiments/pretrained_models/DAT/DAT_x4.pth",
path=f"{hf_endpoint}/w-e-w/DAT/resolve/main/experiments/pretrained_models/DAT/DAT_x4.pth",
scale=4,
upscaler=scaler,
sha256='391a6ce69899dff5ea3214557e9d585608254579217169faf3d4c353caff049e',
Expand Down
6 changes: 3 additions & 3 deletions modules/models/sd3/sd3_cond.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ def __getitem__(self, key):
return self.file.get_tensor(key)


CLIPL_URL = "https://huggingface.co/AUTOMATIC/stable-diffusion-3-medium-text-encoders/resolve/main/clip_l.safetensors"
CLIPL_URL = f"{shared.hf_endpoint}/AUTOMATIC/stable-diffusion-3-medium-text-encoders/resolve/main/clip_l.safetensors"
CLIPL_CONFIG = {
"hidden_act": "quick_gelu",
"hidden_size": 768,
Expand All @@ -33,7 +33,7 @@ def __getitem__(self, key):
"num_hidden_layers": 12,
}

CLIPG_URL = "https://huggingface.co/AUTOMATIC/stable-diffusion-3-medium-text-encoders/resolve/main/clip_g.safetensors"
CLIPG_URL = f"{shared.hf_endpoint}/AUTOMATIC/stable-diffusion-3-medium-text-encoders/resolve/main/clip_g.safetensors"
CLIPG_CONFIG = {
"hidden_act": "gelu",
"hidden_size": 1280,
Expand All @@ -43,7 +43,7 @@ def __getitem__(self, key):
"textual_inversion_key": "clip_g",
}

T5_URL = "https://huggingface.co/AUTOMATIC/stable-diffusion-3-medium-text-encoders/resolve/main/t5xxl_fp16.safetensors"
T5_URL = f"{shared.hf_endpoint}/AUTOMATIC/stable-diffusion-3-medium-text-encoders/resolve/main/t5xxl_fp16.safetensors"
T5_CONFIG = {
"d_ff": 10240,
"d_model": 4096,
Expand Down
5 changes: 4 additions & 1 deletion modules/processing.py
Original file line number Diff line number Diff line change
Expand Up @@ -1261,7 +1261,10 @@ def init(self, all_prompts, all_seeds, all_subseeds):
if self.hr_checkpoint_info is None:
raise Exception(f'Could not find checkpoint with name {self.hr_checkpoint_name}')

self.extra_generation_params["Hires checkpoint"] = self.hr_checkpoint_info.short_title
if shared.sd_model.sd_checkpoint_info == self.hr_checkpoint_info:
self.hr_checkpoint_info = None
else:
self.extra_generation_params["Hires checkpoint"] = self.hr_checkpoint_info.short_title

if self.hr_sampler_name is not None and self.hr_sampler_name != self.sampler_name:
self.extra_generation_params["Hires sampler"] = self.hr_sampler_name
Expand Down
3 changes: 2 additions & 1 deletion modules/scripts_auto_postprocessing.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ def show(self, is_img2img):
return scripts.AlwaysVisible

def ui(self, is_img2img):
self.script.tab_name = '_img2img' if is_img2img else '_txt2img'
self.postprocessing_controls = self.script.ui()
return self.postprocessing_controls.values()

Expand All @@ -33,7 +34,7 @@ def create_auto_preprocessing_script_data():

for name in shared.opts.postprocessing_enable_in_main_ui:
script = next(iter([x for x in scripts.postprocessing_scripts_data if x.script_class.name == name]), None)
if script is None:
if script is None or script.script_class.extra_only:
continue

constructor = lambda s=script: ScriptPostprocessingForMainUI(s.script_class())
Expand Down
36 changes: 31 additions & 5 deletions modules/scripts_postprocessing.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import re
import dataclasses
import os
import gradio as gr
Expand Down Expand Up @@ -59,6 +60,10 @@ class ScriptPostprocessing:
args_from = None
args_to = None

# define if the script should be used only in extras or main UI
extra_only = None
main_ui_only = None

order = 1000
"""scripts will be ordred by this value in postprocessing UI"""

Expand Down Expand Up @@ -97,6 +102,31 @@ def process_firstpass(self, pp: PostprocessedImage, **args):
def image_changed(self):
pass

tab_name = '' # used by ScriptPostprocessingForMainUI
replace_pattern = re.compile(r'\s')
rm_pattern = re.compile(r'[^a-z_0-9]')

def elem_id(self, item_id):
"""
Helper function to generate id for a HTML element
constructs final id out of script name and user-supplied item_id
'script_extras_{self.name.lower()}_{item_id}'
{tab_name} will append to the end of the id if set
tab_name will be set to '_img2img' or '_txt2img' if use by ScriptPostprocessingForMainUI
Extensions should use this function to generate element IDs
"""
return self.elem_id_suffix(f'extras_{self.name.lower()}_{item_id}')

def elem_id_suffix(self, base_id):
"""
Append tab_name to the base_id
Extensions that already have specific there element IDs and wish to keep their IDs the same when possible should use this function
"""
base_id = self.rm_pattern.sub('', self.replace_pattern.sub('_', base_id))
return f'{base_id}{self.tab_name}'


def wrap_call(func, filename, funcname, *args, default=None, **kwargs):
try:
Expand All @@ -119,10 +149,6 @@ def initialize_scripts(self, scripts_data):
for script_data in scripts_data:
script: ScriptPostprocessing = script_data.script_class()
script.filename = script_data.path

if script.name == "Simple Upscale":
continue

self.scripts.append(script)

def create_script_ui(self, script, inputs):
Expand Down Expand Up @@ -152,7 +178,7 @@ def script_score(name):

return len(self.scripts)

filtered_scripts = [script for script in self.scripts if script.name not in scripts_filter_out]
filtered_scripts = [script for script in self.scripts if script.name not in scripts_filter_out and not script.main_ui_only]
script_scores = {script.name: (script_score(script.name), script.order, script.name, original_index) for original_index, script in enumerate(filtered_scripts)}

return sorted(filtered_scripts, key=lambda x: script_scores[x.name])
Expand Down
2 changes: 1 addition & 1 deletion modules/sd_disable_initialization.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ def transformers_modeling_utils_load_pretrained_model(*args, **kwargs):
def transformers_utils_hub_get_file_from_cache(original, url, *args, **kwargs):

# this file is always 404, prevent making request
if url == 'https://huggingface.co/openai/clip-vit-large-patch14/resolve/main/added_tokens.json' or url == 'openai/clip-vit-large-patch14' and args[0] == 'added_tokens.json':
if url == f'{shared.hf_endpoint}/openai/clip-vit-large-patch14/resolve/main/added_tokens.json' or url == 'openai/clip-vit-large-patch14' and args[0] == 'added_tokens.json':
return None

try:
Expand Down
8 changes: 5 additions & 3 deletions modules/shared_items.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,10 +16,12 @@ def dat_models_names():
return [x.name for x in modules.dat_model.get_dat_models(None)]


def postprocessing_scripts():
def postprocessing_scripts(filter_out_extra_only=False, filter_out_main_ui_only=False):
import modules.scripts

return modules.scripts.scripts_postproc.scripts
return list(filter(
lambda s: (not filter_out_extra_only or not s.extra_only) and (not filter_out_main_ui_only or not s.main_ui_only),
modules.scripts.scripts_postproc.scripts,
))


def sd_vae_items():
Expand Down
7 changes: 4 additions & 3 deletions modules/shared_options.py
Original file line number Diff line number Diff line change
Expand Up @@ -291,6 +291,7 @@
"textual_inversion_print_at_load": OptionInfo(False, "Print a list of Textual Inversion embeddings when loading model"),
"textual_inversion_add_hashes_to_infotext": OptionInfo(True, "Add Textual Inversion hashes to infotext"),
"sd_hypernetwork": OptionInfo("None", "Add hypernetwork to prompt", gr.Dropdown, lambda: {"choices": ["None", *shared.hypernetworks]}, refresh=shared_items.reload_hypernetworks),
"textual_inversion_image_embedding_data_cache": OptionInfo(False, 'Cache the data of image embeddings').info('potentially increase TI load time at the cost some disk space'),
}))

options_templates.update(options_section(('ui_prompt_editing', "Prompt editing", "ui"), {
Expand Down Expand Up @@ -410,9 +411,9 @@
}))

options_templates.update(options_section(('postprocessing', "Postprocessing", "postprocessing"), {
'postprocessing_enable_in_main_ui': OptionInfo([], "Enable postprocessing operations in txt2img and img2img tabs", ui_components.DropdownMulti, lambda: {"choices": [x.name for x in shared_items.postprocessing_scripts()]}),
'postprocessing_disable_in_extras': OptionInfo([], "Disable postprocessing operations in extras tab", ui_components.DropdownMulti, lambda: {"choices": [x.name for x in shared_items.postprocessing_scripts()]}),
'postprocessing_operation_order': OptionInfo([], "Postprocessing operation order", ui_components.DropdownMulti, lambda: {"choices": [x.name for x in shared_items.postprocessing_scripts()]}),
'postprocessing_enable_in_main_ui': OptionInfo([], "Enable postprocessing operations in txt2img and img2img tabs", ui_components.DropdownMulti, lambda: {"choices": [x.name for x in shared_items.postprocessing_scripts(filter_out_extra_only=True)]}),
'postprocessing_disable_in_extras': OptionInfo([], "Disable postprocessing operations in extras tab", ui_components.DropdownMulti, lambda: {"choices": [x.name for x in shared_items.postprocessing_scripts(filter_out_main_ui_only=True)]}),
'postprocessing_operation_order': OptionInfo([], "Postprocessing operation order", ui_components.DropdownMulti, lambda: {"choices": [x.name for x in shared_items.postprocessing_scripts(filter_out_main_ui_only=True)]}),
'upscaling_max_images_in_cache': OptionInfo(5, "Maximum number of images in upscaling cache", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}),
'postprocessing_existing_caption_action': OptionInfo("Ignore", "Action for existing captions", gr.Radio, {"choices": ["Ignore", "Keep", "Prepend", "Append"]}).info("when generating captions using postprocessing; Ignore = use generated; Keep = use original; Prepend/Append = combine both"),
}))
Expand Down
Loading

0 comments on commit 8da9861

Please sign in to comment.