Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Sd3 dev:Maintaining Project Compatibility for Python 3.9 Users Without Upgrade Requirements. #16087

Closed
wants to merge 13 commits into from
Closed
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,7 @@ For the purposes of getting Google and other search engines to crawl the wiki, h
## Credits
Licenses for borrowed code can be found in `Settings -> Licenses` screen, and also in `html/licenses.html` file.

- Stable Diffusion - https://github.com/Stability-AI/stablediffusion, https://github.com/CompVis/taming-transformers
- Stable Diffusion - https://github.com/Stability-AI/stablediffusion, https://github.com/CompVis/taming-transformers, https://github.com/mcmonkey4eva/sd3-ref
- k-diffusion - https://github.com/crowsonkb/k-diffusion.git
- Spandrel - https://github.com/chaiNNer-org/spandrel implementing
- GFPGAN - https://github.com/TencentARC/GFPGAN.git
Expand Down
5 changes: 5 additions & 0 deletions configs/sd3-inference.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
model:
target: modules.models.sd3.sd3_model.SD3Inferencer
params:
shift: 3
state_dict: null
4 changes: 3 additions & 1 deletion extensions-builtin/Lora/networks.py
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,9 @@ def assign_network_names_to_compvis_modules(sd_model):
network_layer_mapping[network_name] = module
module.network_layer_name = network_name
else:
for name, module in shared.sd_model.cond_stage_model.wrapped.named_modules():
cond_stage_model = getattr(shared.sd_model.cond_stage_model, 'wrapped', shared.sd_model.cond_stage_model)

for name, module in cond_stage_model.named_modules():
network_name = name.replace(".", "_")
network_layer_mapping[network_name] = module
module.network_layer_name = network_name
Expand Down
28 changes: 23 additions & 5 deletions modules/lowvram.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,12 @@
from collections import namedtuple

import torch
from modules import devices, shared

module_in_gpu = None
cpu = torch.device("cpu")

ModuleWithParent = namedtuple('ModuleWithParent', ['module', 'parent'], defaults=['None'])

def send_everything_to_cpu():
global module_in_gpu
Expand Down Expand Up @@ -75,13 +78,14 @@ def first_stage_model_decode_wrap(z):
(sd_model, 'depth_model'),
(sd_model, 'embedder'),
(sd_model, 'model'),
(sd_model, 'embedder'),
]

is_sdxl = hasattr(sd_model, 'conditioner')
is_sd2 = not is_sdxl and hasattr(sd_model.cond_stage_model, 'model')

if is_sdxl:
if hasattr(sd_model, 'medvram_fields'):
to_remain_in_cpu = sd_model.medvram_fields()
elif is_sdxl:
to_remain_in_cpu.append((sd_model, 'conditioner'))
elif is_sd2:
to_remain_in_cpu.append((sd_model.cond_stage_model, 'model'))
Expand All @@ -103,7 +107,21 @@ def first_stage_model_decode_wrap(z):
setattr(obj, field, module)

# register hooks for those the first three models
if is_sdxl:
if hasattr(sd_model.cond_stage_model, "medvram_modules"):
for module in sd_model.cond_stage_model.medvram_modules():
if isinstance(module, ModuleWithParent):
parent = module.parent
module = module.module
else:
parent = None

if module:
module.register_forward_pre_hook(send_me_to_gpu)

if parent:
parents[module] = parent

elif is_sdxl:
sd_model.conditioner.register_forward_pre_hook(send_me_to_gpu)
elif is_sd2:
sd_model.cond_stage_model.model.register_forward_pre_hook(send_me_to_gpu)
Expand All @@ -117,9 +135,9 @@ def first_stage_model_decode_wrap(z):
sd_model.first_stage_model.register_forward_pre_hook(send_me_to_gpu)
sd_model.first_stage_model.encode = first_stage_model_encode_wrap
sd_model.first_stage_model.decode = first_stage_model_decode_wrap
if sd_model.depth_model:
if hasattr(sd_model, 'depth_model'):
sd_model.depth_model.register_forward_pre_hook(send_me_to_gpu)
if sd_model.embedder:
if hasattr(sd_model, 'embedder'):
sd_model.embedder.register_forward_pre_hook(send_me_to_gpu)

if use_medvram:
Expand Down
Loading
Loading