Files changed (9) hide show
  1. .gitignore +0 -207
  2. README.md +1 -1
  3. app.py +191 -346
  4. constants.py +33 -88
  5. image_processor.py +2 -2
  6. packages.txt +1 -1
  7. pre-requirements.txt +0 -1
  8. requirements.txt +3 -11
  9. utils.py +485 -714
.gitignore DELETED
@@ -1,207 +0,0 @@
1
- # Byte-compiled / optimized / DLL files
2
- __pycache__/
3
- *.py[codz]
4
- *$py.class
5
-
6
- # C extensions
7
- *.so
8
-
9
- # Distribution / packaging
10
- .Python
11
- build/
12
- develop-eggs/
13
- dist/
14
- downloads/
15
- eggs/
16
- .eggs/
17
- lib/
18
- lib64/
19
- parts/
20
- sdist/
21
- var/
22
- wheels/
23
- share/python-wheels/
24
- *.egg-info/
25
- .installed.cfg
26
- *.egg
27
- MANIFEST
28
-
29
- # PyInstaller
30
- # Usually these files are written by a python script from a template
31
- # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
- *.manifest
33
- *.spec
34
-
35
- # Installer logs
36
- pip-log.txt
37
- pip-delete-this-directory.txt
38
-
39
- # Unit test / coverage reports
40
- htmlcov/
41
- .tox/
42
- .nox/
43
- .coverage
44
- .coverage.*
45
- .cache
46
- nosetests.xml
47
- coverage.xml
48
- *.cover
49
- *.py.cover
50
- .hypothesis/
51
- .pytest_cache/
52
- cover/
53
-
54
- # Translations
55
- *.mo
56
- *.pot
57
-
58
- # Django stuff:
59
- *.log
60
- local_settings.py
61
- db.sqlite3
62
- db.sqlite3-journal
63
-
64
- # Flask stuff:
65
- instance/
66
- .webassets-cache
67
-
68
- # Scrapy stuff:
69
- .scrapy
70
-
71
- # Sphinx documentation
72
- docs/_build/
73
-
74
- # PyBuilder
75
- .pybuilder/
76
- target/
77
-
78
- # Jupyter Notebook
79
- .ipynb_checkpoints
80
-
81
- # IPython
82
- profile_default/
83
- ipython_config.py
84
-
85
- # pyenv
86
- # For a library or package, you might want to ignore these files since the code is
87
- # intended to run in multiple environments; otherwise, check them in:
88
- # .python-version
89
-
90
- # pipenv
91
- # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
- # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
- # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
- # install all needed dependencies.
95
- #Pipfile.lock
96
-
97
- # UV
98
- # Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
99
- # This is especially recommended for binary packages to ensure reproducibility, and is more
100
- # commonly ignored for libraries.
101
- #uv.lock
102
-
103
- # poetry
104
- # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
105
- # This is especially recommended for binary packages to ensure reproducibility, and is more
106
- # commonly ignored for libraries.
107
- # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
108
- #poetry.lock
109
- #poetry.toml
110
-
111
- # pdm
112
- # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
113
- # pdm recommends including project-wide configuration in pdm.toml, but excluding .pdm-python.
114
- # https://pdm-project.org/en/latest/usage/project/#working-with-version-control
115
- #pdm.lock
116
- #pdm.toml
117
- .pdm-python
118
- .pdm-build/
119
-
120
- # pixi
121
- # Similar to Pipfile.lock, it is generally recommended to include pixi.lock in version control.
122
- #pixi.lock
123
- # Pixi creates a virtual environment in the .pixi directory, just like venv module creates one
124
- # in the .venv directory. It is recommended not to include this directory in version control.
125
- .pixi
126
-
127
- # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
128
- __pypackages__/
129
-
130
- # Celery stuff
131
- celerybeat-schedule
132
- celerybeat.pid
133
-
134
- # SageMath parsed files
135
- *.sage.py
136
-
137
- # Environments
138
- .env
139
- .envrc
140
- .venv
141
- env/
142
- venv/
143
- ENV/
144
- env.bak/
145
- venv.bak/
146
-
147
- # Spyder project settings
148
- .spyderproject
149
- .spyproject
150
-
151
- # Rope project settings
152
- .ropeproject
153
-
154
- # mkdocs documentation
155
- /site
156
-
157
- # mypy
158
- .mypy_cache/
159
- .dmypy.json
160
- dmypy.json
161
-
162
- # Pyre type checker
163
- .pyre/
164
-
165
- # pytype static type analyzer
166
- .pytype/
167
-
168
- # Cython debug symbols
169
- cython_debug/
170
-
171
- # PyCharm
172
- # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
173
- # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
174
- # and can be added to the global gitignore or merged into this file. For a more nuclear
175
- # option (not recommended) you can uncomment the following to ignore the entire idea folder.
176
- #.idea/
177
-
178
- # Abstra
179
- # Abstra is an AI-powered process automation framework.
180
- # Ignore directories containing user credentials, local state, and settings.
181
- # Learn more at https://abstra.io/docs
182
- .abstra/
183
-
184
- # Visual Studio Code
185
- # Visual Studio Code specific template is maintained in a separate VisualStudioCode.gitignore
186
- # that can be found at https://github.com/github/gitignore/blob/main/Global/VisualStudioCode.gitignore
187
- # and can be added to the global gitignore or merged into this file. However, if you prefer,
188
- # you could uncomment the following to ignore the entire vscode folder
189
- # .vscode/
190
-
191
- # Ruff stuff:
192
- .ruff_cache/
193
-
194
- # PyPI configuration file
195
- .pypirc
196
-
197
- # Cursor
198
- # Cursor is an AI-powered code editor. `.cursorignore` specifies files/directories to
199
- # exclude from AI features like autocomplete and code analysis. Recommended for sensitive data
200
- # refer to https://docs.cursor.com/context/ignore-files
201
- .cursorignore
202
- .cursorindexingignore
203
-
204
- # Marimo
205
- marimo/_static/
206
- marimo/_lsp/
207
- __marimo__/
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
README.md CHANGED
@@ -4,7 +4,7 @@ emoji: 🧩🖼️
4
  colorFrom: red
5
  colorTo: pink
6
  sdk: gradio
7
- sdk_version: 5.44.1
8
  app_file: app.py
9
  pinned: true
10
  license: mit
 
4
  colorFrom: red
5
  colorTo: pink
6
  sdk: gradio
7
+ sdk_version: 4.31.3
8
  app_file: app.py
9
  pinned: true
10
  license: mit
app.py CHANGED
@@ -1,6 +1,5 @@
1
  import spaces
2
  import os
3
- from argparse import ArgumentParser
4
  from stablepy import (
5
  Model_Diffusers,
6
  SCHEDULE_TYPE_OPTIONS,
@@ -8,7 +7,6 @@ from stablepy import (
8
  check_scheduler_compatibility,
9
  TASK_AND_PREPROCESSORS,
10
  FACE_RESTORATION_MODELS,
11
- PROMPT_WEIGHT_OPTIONS_PRIORITY,
12
  scheduler_names,
13
  )
14
  from constants import (
@@ -42,8 +40,6 @@ from constants import (
42
  DIFFUSERS_CONTROLNET_MODEL,
43
  IP_MODELS,
44
  MODE_IP_OPTIONS,
45
- CACHE_HF_ROOT,
46
- CACHE_HF,
47
  )
48
  from stablepy.diffusers_vanilla.style_prompt_config import STYLE_NAMES
49
  import torch
@@ -64,7 +60,6 @@ from utils import (
64
  progress_step_bar,
65
  html_template_message,
66
  escape_html,
67
- clear_hf_cache,
68
  )
69
  from image_processor import preprocessor_tab
70
  from datetime import datetime
@@ -77,18 +72,11 @@ from diffusers import FluxPipeline
77
  # import urllib.parse
78
  import subprocess
79
 
80
- IS_ZERO_GPU = bool(os.getenv("SPACES_ZERO_GPU"))
81
- HIDE_API = bool(os.getenv("HIDE_API"))
82
- if IS_ZERO_GPU:
83
- subprocess.run("rm -rf /data-nvme/zerogpu-offload/*", env={}, shell=True)
84
- IS_GPU_MODE = True if IS_ZERO_GPU else (True if torch.cuda.is_available() else False)
85
- img_path = "./images/"
86
- allowed_path = os.path.abspath(img_path)
87
- delete_cache_time = (9600, 9600) if IS_ZERO_GPU else (86400, 86400)
88
-
89
  ImageFile.LOAD_TRUNCATED_IMAGES = True
90
  torch.backends.cuda.matmul.allow_tf32 = True
91
  # os.environ["PYTORCH_NO_CUDA_MEMORY_CACHING"] = "1"
 
92
 
93
  directories = [DIRECTORY_MODELS, DIRECTORY_LORAS, DIRECTORY_VAES, DIRECTORY_EMBEDS, DIRECTORY_UPSCALERS]
94
  for directory in directories:
@@ -96,15 +84,19 @@ for directory in directories:
96
 
97
  # Download stuffs
98
  for url in [url.strip() for url in DOWNLOAD_MODEL.split(',')]:
99
- download_things(DIRECTORY_MODELS, url, HF_TOKEN, CIVITAI_API_KEY)
 
100
  for url in [url.strip() for url in DOWNLOAD_VAE.split(',')]:
101
- download_things(DIRECTORY_VAES, url, HF_TOKEN, CIVITAI_API_KEY)
 
102
  for url in [url.strip() for url in DOWNLOAD_LORA.split(',')]:
103
- download_things(DIRECTORY_LORAS, url, HF_TOKEN, CIVITAI_API_KEY)
 
104
 
105
  # Download Embeddings
106
  for url_embed in DOWNLOAD_EMBEDS:
107
- download_things(DIRECTORY_EMBEDS, url_embed, HF_TOKEN, CIVITAI_API_KEY)
 
108
 
109
  # Build list models
110
  embed_list = get_model_list(DIRECTORY_EMBEDS)
@@ -122,16 +114,15 @@ vae_model_list.insert(0, "None")
122
 
123
  print('\033[33m🏁 Download and listing of valid models completed.\033[0m')
124
 
125
- components = None
126
- if IS_ZERO_GPU:
127
- flux_repo = "camenduru/FLUX.1-dev-diffusers"
128
- flux_pipe = FluxPipeline.from_pretrained(
129
- flux_repo,
130
- transformer=None,
131
- torch_dtype=torch.bfloat16,
132
- ).to("cuda")
133
- components = flux_pipe.components
134
- delete_model(flux_repo)
135
 
136
  #######################
137
  # GUI
@@ -141,17 +132,7 @@ diffusers.utils.logging.set_verbosity(40)
141
  warnings.filterwarnings(action="ignore", category=FutureWarning, module="diffusers")
142
  warnings.filterwarnings(action="ignore", category=UserWarning, module="diffusers")
143
  warnings.filterwarnings(action="ignore", category=FutureWarning, module="transformers")
144
-
145
- parser = ArgumentParser(description='DiffuseCraft: Create images from text prompts.', add_help=True)
146
- parser.add_argument("--share", action="store_true", dest="share_enabled", default=False, help="Enable sharing")
147
- parser.add_argument('--theme', type=str, default="NoCrypt/miku", help='Set the theme (default: NoCrypt/miku)')
148
- parser.add_argument("--ssr", action="store_true", help="Enable SSR (Server-Side Rendering)")
149
- parser.add_argument("--log-level", type=str, default="INFO", choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"], help="Set logging level (default: INFO)")
150
- args = parser.parse_args()
151
-
152
- logger.setLevel(
153
- "INFO" if IS_ZERO_GPU else getattr(logging, args.log_level.upper())
154
- )
155
 
156
  CSS = """
157
  .contain { display: flex; flex-direction: column; }
@@ -161,12 +142,6 @@ CSS = """
161
  """
162
 
163
 
164
- def lora_chk(lora_):
165
- if isinstance(lora_, str) and lora_.strip() not in ["", "None"]:
166
- return lora_
167
- return None
168
-
169
-
170
  class GuiSD:
171
  def __init__(self, stream=True):
172
  self.model = None
@@ -175,22 +150,13 @@ class GuiSD:
175
  self.last_load = datetime.now()
176
  self.inventory = []
177
 
178
- def update_storage_models(self, storage_floor_gb=30, required_inventory_for_purge=3):
179
  while get_used_storage_gb() > storage_floor_gb:
180
  if len(self.inventory) < required_inventory_for_purge:
181
  break
182
  removal_candidate = self.inventory.pop(0)
183
  delete_model(removal_candidate)
184
 
185
- # Cleanup after 60 seconds of inactivity
186
- lowPrioCleanup = max((datetime.now() - self.last_load).total_seconds(), 0) > 60
187
- if lowPrioCleanup and (len(self.inventory) >= required_inventory_for_purge - 1) and not self.status_loading and get_used_storage_gb(CACHE_HF_ROOT) > (storage_floor_gb * 2):
188
- print("Cleaning up Hugging Face cache...")
189
- clear_hf_cache()
190
- self.inventory = [
191
- m for m in self.inventory if os.path.exists(m)
192
- ]
193
-
194
  def update_inventory(self, model_name):
195
  if model_name not in single_file_model_list:
196
  self.inventory = [
@@ -201,21 +167,14 @@ class GuiSD:
201
  def load_new_model(self, model_name, vae_model, task, controlnet_model, progress=gr.Progress(track_tqdm=True)):
202
 
203
  # download link model > model_name
204
- if model_name.startswith("http"):
205
- yield f"Downloading model: {model_name}"
206
- model_name = download_things(DIRECTORY_MODELS, model_name, HF_TOKEN, CIVITAI_API_KEY)
207
- if not model_name:
208
- raise ValueError("Error retrieving model information from URL")
209
 
210
- if IS_ZERO_GPU:
211
- self.update_storage_models()
212
 
213
  vae_model = vae_model if vae_model != "None" else None
214
  model_type = get_model_type(model_name)
215
  dtype_model = torch.bfloat16 if model_type == "FLUX" else torch.float16
216
 
217
  if not os.path.exists(model_name):
218
- logger.debug(f"model_name={model_name}, vae_model={vae_model}, task={task}, controlnet_model={controlnet_model}")
219
  _ = download_diffuser_repo(
220
  repo_name=model_name,
221
  model_type=model_type,
@@ -260,10 +219,10 @@ class GuiSD:
260
  type_model_precision=dtype_model,
261
  retain_task_model_in_cache=False,
262
  controlnet_model=controlnet_model,
263
- device="cpu" if IS_ZERO_GPU else None,
264
  env_components=components,
265
  )
266
- self.model.advanced_params(image_preprocessor_cuda_active=IS_GPU_MODE)
267
  else:
268
  if self.model.base_model_id != model_name:
269
  load_now_time = datetime.now()
@@ -273,8 +232,7 @@ class GuiSD:
273
  print("Waiting for the previous model's time ops...")
274
  time.sleep(9 - elapsed_time)
275
 
276
- if IS_ZERO_GPU:
277
- self.model.device = torch.device("cpu")
278
  self.model.load_pipe(
279
  model_name,
280
  task_name=TASK_STABLEPY[task],
@@ -377,7 +335,7 @@ class GuiSD:
377
  t2i_adapter_preprocessor,
378
  t2i_adapter_conditioning_scale,
379
  t2i_adapter_conditioning_factor,
380
- enable_live_preview,
381
  freeu,
382
  generator_in_cpu,
383
  adetailer_inpaint_only,
@@ -428,7 +386,7 @@ class GuiSD:
428
  vae_msg = f"VAE: {vae_model}" if vae_model else ""
429
  msg_lora = ""
430
 
431
- logger.debug(f"Config model: {model_name}, {vae_model}, {loras_list}")
432
 
433
  task = TASK_STABLEPY[task]
434
 
@@ -526,26 +484,26 @@ class GuiSD:
526
  "distance_threshold": distance_threshold,
527
  "recolor_gamma_correction": float(recolor_gamma_correction),
528
  "tile_blur_sigma": int(tile_blur_sigma),
529
- "lora_A": lora_chk(lora1),
530
  "lora_scale_A": lora_scale1,
531
- "lora_B": lora_chk(lora2),
532
  "lora_scale_B": lora_scale2,
533
- "lora_C": lora_chk(lora3),
534
  "lora_scale_C": lora_scale3,
535
- "lora_D": lora_chk(lora4),
536
  "lora_scale_D": lora_scale4,
537
- "lora_E": lora_chk(lora5),
538
  "lora_scale_E": lora_scale5,
539
- "lora_F": lora_chk(lora6),
540
  "lora_scale_F": lora_scale6,
541
- "lora_G": lora_chk(lora7),
542
  "lora_scale_G": lora_scale7,
543
  "textual_inversion": embed_list if textual_inversion else [],
544
  "syntax_weights": syntax_weights, # "Classic"
545
  "sampler": sampler,
546
  "schedule_type": schedule_type,
547
  "schedule_prediction_type": schedule_prediction_type,
548
- "xformers_memory_efficient_attention": False,
549
  "gui_active": True,
550
  "loop_generation": loop_generation,
551
  "controlnet_conditioning_scale": float(controlnet_output_scaling_in_unet),
@@ -560,7 +518,7 @@ class GuiSD:
560
  "leave_progress_bar": leave_progress_bar,
561
  "disable_progress_bar": disable_progress_bar,
562
  "image_previews": image_previews,
563
- "display_images": False,
564
  "save_generated_images": save_generated_images,
565
  "filename_pattern": filename_pattern,
566
  "image_storage_location": image_storage_location,
@@ -596,11 +554,11 @@ class GuiSD:
596
  # kwargs for diffusers pipeline
597
  if guidance_rescale:
598
  pipe_params["guidance_rescale"] = guidance_rescale
599
- if IS_ZERO_GPU:
600
- self.model.device = torch.device("cuda:0")
601
- if hasattr(self.model.pipe, "transformer") and loras_list != ["None"] * self.model.num_loras:
602
- self.model.pipe.transformer.to(self.model.device)
603
- logger.debug("transformer to cuda")
604
 
605
  actual_progress = 0
606
  info_images = gr.update()
@@ -630,20 +588,15 @@ class GuiSD:
630
 
631
  download_links = "<br>".join(
632
  [
633
- f'<a href="{path.replace("/images/", f"/gradio_api/file={allowed_path}/")}" download="{os.path.basename(path)}">Download Image {i + 1}</a>'
634
  for i, path in enumerate(image_path)
635
  ]
636
  )
637
  if save_generated_images:
638
  info_images += f"<br>{download_links}"
639
 
640
- if not display_images:
641
- img = gr.update()
642
  info_state = "COMPLETE"
643
 
644
- elif not enable_live_preview:
645
- img = gr.update()
646
-
647
  yield info_state, img, info_images
648
 
649
 
@@ -744,8 +697,7 @@ def sd_gen_generate_pipeline(*args):
744
 
745
  @spaces.GPU(duration=15)
746
  def process_upscale(image, upscaler_name, upscaler_size):
747
- if image is None:
748
- return None
749
 
750
  from stablepy.diffusers_vanilla.utils import save_pil_image_with_metadata
751
  from stablepy import load_upscaler_model
@@ -762,7 +714,7 @@ def process_upscale(image, upscaler_name, upscaler_size):
762
 
763
  name_upscaler = f"./{DIRECTORY_UPSCALERS}/{name_upscaler.split('/')[-1]}"
764
 
765
- scaler_beta = load_upscaler_model(model=name_upscaler, tile=(0 if IS_ZERO_GPU else 192), tile_overlap=8, device=("cuda" if IS_GPU_MODE else "cpu"), half=IS_GPU_MODE)
766
  image_up = scaler_beta.upscale(image, upscaler_size, True)
767
 
768
  image_path = save_pil_image_with_metadata(image_up, f'{os.getcwd()}/up_images', exif_image)
@@ -771,11 +723,11 @@ def process_upscale(image, upscaler_name, upscaler_size):
771
 
772
 
773
  # https://huggingface.co/spaces/BestWishYsh/ConsisID-preview-Space/discussions/1#674969a022b99c122af5d407
774
- # dynamic_gpu_duration.zerogpu = True
775
- # sd_gen_generate_pipeline.zerogpu = True
776
  sd_gen = GuiSD()
777
 
778
- with gr.Blocks(theme=args.theme, css=CSS, fill_width=True, fill_height=False) as app:
779
  gr.Markdown("# 🧩 DiffuseCraft")
780
  gr.Markdown(SUBTITLE_GUI)
781
  with gr.Tab("Generation"):
@@ -791,14 +743,10 @@ with gr.Blocks(theme=args.theme, css=CSS, fill_width=True, fill_height=False) as
791
 
792
  return gr.update(value=task_name, choices=new_choices)
793
 
794
- with gr.Accordion("Model and Task", open=True, visible=True):
795
- task_gui = gr.Dropdown(label="Task", choices=SDXL_TASK, value=TASK_MODEL_LIST[0])
796
- model_name_gui = gr.Dropdown(label="Model", choices=model_list, value=model_list[0], allow_custom_value=True)
797
  prompt_gui = gr.Textbox(lines=5, placeholder="Enter prompt", label="Prompt")
798
-
799
- with gr.Accordion("Negative prompt", open=False, visible=True):
800
- neg_prompt_gui = gr.Textbox(lines=3, placeholder="Enter Neg prompt", label="Negative prompt", value="bad anatomy, ((many hands, bad hands, missing fingers)), anatomical nonsense, ugly, deformed, bad proportions, bad shadow, extra limbs, missing limbs, floating limbs, disconnected limbs, malformed hands, poorly drawn, mutation, mutated hands and fingers, extra legs, interlocked fingers, extra arms, disfigured face, long neck, asymmetrical eyes, lowres, extra digit, fewer digits, cropped, jpeg artifacts, signature, watermark, username, blurry, duplicate, bad composition, text, worst quality, normal quality, low quality, very displeasing, monochrome, grayscale, black and white, desaturated, low contrast, muted tones, washed out, unfinished, incomplete, draft, logo, backlighting")
801
-
802
  with gr.Row(equal_height=False):
803
  set_params_gui = gr.Button(value="↙️", variant="secondary", size="sm")
804
  clear_prompt_gui = gr.Button(value="🗑️", variant="secondary", size="sm")
@@ -828,179 +776,137 @@ with gr.Blocks(theme=args.theme, css=CSS, fill_width=True, fill_height=False) as
828
 
829
  actual_task_info = gr.HTML()
830
 
831
- with gr.Row(equal_height=False, variant="default", visible=IS_ZERO_GPU):
832
  gpu_duration_gui = gr.Number(minimum=5, maximum=240, value=59, show_label=False, container=False, info="GPU time duration (seconds)")
833
  with gr.Column():
834
  verbose_info_gui = gr.Checkbox(value=False, container=False, label="Status info")
835
  load_lora_cpu_gui = gr.Checkbox(value=False, container=False, label="Load LoRAs on CPU")
836
 
837
  with gr.Column(scale=1):
838
- with gr.Accordion("Generation settings", open=True, visible=True):
839
- steps_gui = gr.Slider(minimum=1, maximum=100, step=1, value=30, label="Steps")
840
- cfg_gui = gr.Slider(minimum=0, maximum=30, step=0.5, value=7., label="CFG")
841
- sampler_gui = gr.Dropdown(label="Sampler", choices=scheduler_names, value="Euler")
842
- schedule_type_gui = gr.Dropdown(label="Schedule type", choices=SCHEDULE_TYPE_OPTIONS, value=SCHEDULE_TYPE_OPTIONS[0])
843
- img_width_gui = gr.Slider(minimum=64, maximum=4096, step=8, value=832, label="Img Width")
844
- img_height_gui = gr.Slider(minimum=64, maximum=4096, step=8, value=1216, label="Img Height")
845
- seed_gui = gr.Number(minimum=-1, maximum=9999999999, value=-1, label="Seed")
846
- pag_scale_gui = gr.Slider(minimum=0.0, maximum=10.0, step=0.1, value=0.0, label="PAG Scale")
847
- with gr.Row():
848
- clip_skip_gui = gr.Checkbox(value=True, label="Layer 2 Clip Skip")
849
- free_u_gui = gr.Checkbox(value=True, label="FreeU")
850
-
851
- with gr.Row(equal_height=False):
852
- num_images_gui = gr.Slider(minimum=1, maximum=(16 if IS_ZERO_GPU else 20), step=1, value=1, label="Images")
853
- prompt_syntax_gui = gr.Dropdown(label="Prompt Syntax", choices=PROMPT_W_OPTIONS, value=PROMPT_W_OPTIONS[2][1])
854
- vae_model_gui = gr.Dropdown(label="VAE Model", choices=vae_model_list, value=vae_model_list[0])
855
-
856
-
857
- def run_set_params_gui(base_prompt, name_model):
858
- valid_receptors = { # default values
859
- "prompt": gr.update(value=base_prompt),
860
- "neg_prompt": gr.update(value=""),
861
- "Steps": gr.update(value=30),
862
- "width": gr.update(value=1024),
863
- "height": gr.update(value=1024),
864
- "Seed": gr.update(value=-1),
865
- "Sampler": gr.update(value="Euler"),
866
- "CFG scale": gr.update(value=7.), # cfg
867
- "Clip skip": gr.update(value=True),
868
- "Model": gr.update(value=name_model),
869
- "Schedule type": gr.update(value="Automatic"),
870
- "PAG": gr.update(value=.0),
871
- "FreeU": gr.update(value=False),
872
- "Hires upscaler": gr.update(),
873
- "Hires upscale": gr.update(),
874
- "Hires steps": gr.update(),
875
- "Hires denoising strength": gr.update(),
876
- "Hires CFG": gr.update(),
877
- "Hires sampler": gr.update(),
878
- "Hires schedule type": gr.update(),
879
- "Image resolution": gr.update(value=1024),
880
- "Strength": gr.update(),
881
- "Prompt emphasis": gr.update(),
882
- }
883
-
884
- # Generate up to 7 LoRAs
885
- for i in range(1, 8):
886
- valid_receptors[f"Lora_{i}"] = gr.update()
887
- valid_receptors[f"Lora_scale_{i}"] = gr.update()
888
-
889
- valid_keys = list(valid_receptors.keys())
890
-
891
- parameters = extract_parameters(base_prompt)
892
- # print(parameters)
893
-
894
- if "Sampler" in parameters:
895
- value_sampler = parameters["Sampler"]
896
- for s_type in SCHEDULE_TYPE_OPTIONS:
897
- if s_type in value_sampler:
898
- value_sampler = value_sampler.replace(s_type, "").strip()
899
- parameters["Sampler"] = value_sampler
900
- parameters["Schedule type"] = s_type
901
-
902
- params_lora = []
903
- if ">" in parameters["prompt"] and "<" in parameters["prompt"]:
904
- params_lora = re.findall(r'<lora:[^>]+>', parameters["prompt"])
905
- if "Loras" in parameters:
906
- params_lora += re.findall(r'<lora:[^>]+>', parameters["Loras"])
907
-
908
- if params_lora:
909
- parsed_params = []
910
- for tag_l in params_lora:
911
- try:
912
- inner = tag_l.strip("<>") # remove < >
913
- _, data_l = inner.split(":", 1) # remove the "lora:" part
914
- parts_l = data_l.split(":")
915
-
916
- name_l = parts_l[0]
917
- weight_l = float(parts_l[1]) if len(parts_l) > 1 else 1.0 # default weight = 1.0
918
-
919
- parsed_params.append((name_l, weight_l))
920
- except Exception as e:
921
- print(f"Error parsing LoRA tag {tag_l}: {e}")
922
-
923
- new_lora_model_list = get_model_list(DIRECTORY_LORAS)
924
- new_lora_model_list.insert(0, "None")
925
-
926
- num_lora = 1
927
- for parsed_l, parsed_s in parsed_params:
928
- filtered_loras = [m for m in new_lora_model_list if parsed_l in m]
929
- if filtered_loras:
930
- parameters[f"Lora_{num_lora}"] = filtered_loras[0]
931
- parameters[f"Lora_scale_{num_lora}"] = parsed_s
932
- num_lora += 1
933
-
934
- # continue = discard new value
935
- for key, val in parameters.items():
936
- # print(val)
937
- if key in valid_keys:
938
- try:
939
- if key == "Sampler":
940
- if val not in scheduler_names:
941
- continue
942
- if key in ["Schedule type", "Hires schedule type"]:
943
- if val not in SCHEDULE_TYPE_OPTIONS:
944
- continue
945
- if key == "Hires sampler":
946
- if val not in POST_PROCESSING_SAMPLER:
947
- continue
948
- if key == "Prompt emphasis":
949
- if val not in PROMPT_WEIGHT_OPTIONS_PRIORITY:
950
- continue
951
- elif key == "Clip skip":
952
- if "," in str(val):
953
- val = val.replace(",", "")
954
- if int(val) >= 2:
955
  val = True
956
- if key == "prompt":
957
- if ">" in val and "<" in val:
958
- val = re.sub(r'<[^>]+>', '', val) # Delete html and loras
959
- print("Removed LoRA written in the prompt")
960
- if key in ["prompt", "neg_prompt"]:
961
- val = re.sub(r'\s+', ' ', re.sub(r',+', ',', val)).strip()
962
- if key in ["Steps", "width", "height", "Seed", "Hires steps", "Image resolution"]:
963
- val = int(val)
964
- if key == "FreeU":
965
- val = True
966
- if key in ["CFG scale", "PAG", "Hires upscale", "Hires denoising strength", "Hires CFG", "Strength"]:
967
- val = float(val)
968
- if key == "Model":
969
- filtered_models = [m for m in model_list if val in m]
970
- if filtered_models:
971
- val = filtered_models[0]
972
- else:
973
- val = name_model
974
- if key == "Hires upscaler":
975
- if val not in UPSCALER_KEYS:
976
  continue
977
- if key == "Seed":
978
- continue
979
-
980
- valid_receptors[key] = gr.update(value=val)
981
- # print(val, type(val))
982
- # print(valid_receptors)
983
- except Exception as e:
984
- print(str(e))
985
- return [value for value in valid_receptors.values()]
986
-
987
- def run_clear_prompt_gui():
988
- return gr.update(value=""), gr.update(value="")
989
- clear_prompt_gui.click(
990
- run_clear_prompt_gui, [], [prompt_gui, neg_prompt_gui]
991
- )
 
 
 
 
 
 
 
 
 
992
 
993
- def run_set_random_seed():
994
- return -1
995
- set_random_seed.click(
996
- run_set_random_seed, [], seed_gui
997
- )
 
 
 
 
 
 
 
 
 
 
998
 
999
  with gr.Accordion("Hires fix", open=False, visible=True):
1000
 
1001
  upscaler_model_path_gui = gr.Dropdown(label="Upscaler", choices=UPSCALER_KEYS, value=UPSCALER_KEYS[0])
1002
  upscaler_increases_size_gui = gr.Slider(minimum=1.1, maximum=4., step=0.1, value=1.2, label="Upscale by")
1003
- upscaler_tile_size_gui = gr.Slider(minimum=0, maximum=512, step=16, value=(0 if IS_ZERO_GPU else 192), label="Upscaler Tile Size", info="0 = no tiling")
1004
  upscaler_tile_overlap_gui = gr.Slider(minimum=0, maximum=48, step=1, value=8, label="Upscaler Tile Overlap")
1005
  hires_steps_gui = gr.Slider(minimum=0, value=30, maximum=100, step=1, label="Hires Steps")
1006
  hires_denoising_strength_gui = gr.Slider(minimum=0.1, maximum=1.0, step=0.01, value=0.55, label="Hires Denoising Strength")
@@ -1017,8 +923,7 @@ with gr.Blocks(theme=args.theme, css=CSS, fill_width=True, fill_height=False) as
1017
  return gr.Dropdown(label=label, choices=lora_model_list, value="None", allow_custom_value=True, visible=visible)
1018
 
1019
  def lora_scale_slider(label, visible=True):
1020
- val_lora = 8 if IS_ZERO_GPU else 10
1021
- return gr.Slider(minimum=-val_lora, maximum=val_lora, step=0.01, value=0.33, label=label, visible=visible)
1022
 
1023
  lora1_gui = lora_dropdown("Lora1")
1024
  lora_scale_1_gui = lora_scale_slider("Lora Scale 1")
@@ -1030,10 +935,10 @@ with gr.Blocks(theme=args.theme, css=CSS, fill_width=True, fill_height=False) as
1030
  lora_scale_4_gui = lora_scale_slider("Lora Scale 4")
1031
  lora5_gui = lora_dropdown("Lora5")
1032
  lora_scale_5_gui = lora_scale_slider("Lora Scale 5")
1033
- lora6_gui = lora_dropdown("Lora6")
1034
- lora_scale_6_gui = lora_scale_slider("Lora Scale 6")
1035
- lora7_gui = lora_dropdown("Lora7")
1036
- lora_scale_7_gui = lora_scale_slider("Lora Scale 7")
1037
 
1038
  with gr.Accordion("From URL", open=False, visible=True):
1039
  text_lora = gr.Textbox(
@@ -1042,7 +947,7 @@ with gr.Blocks(theme=args.theme, css=CSS, fill_width=True, fill_height=False) as
1042
  lines=1,
1043
  info="It has to be .safetensors files, and you can also download them from Hugging Face.",
1044
  )
1045
- romanize_text = gr.Checkbox(value=False, label="Transliterate name", visible=(not IS_ZERO_GPU))
1046
  button_lora = gr.Button("Get and Refresh the LoRA Lists")
1047
  new_lora_status = gr.HTML()
1048
  button_lora.click(
@@ -1107,8 +1012,8 @@ with gr.Blocks(theme=args.theme, css=CSS, fill_width=True, fill_height=False) as
1107
  preprocess_resolution_gui = gr.Slider(minimum=64, maximum=2048, step=64, value=512, label="Preprocessor Resolution")
1108
  low_threshold_gui = gr.Slider(minimum=1, maximum=255, step=1, value=100, label="'CANNY' low threshold")
1109
  high_threshold_gui = gr.Slider(minimum=1, maximum=255, step=1, value=200, label="'CANNY' high threshold")
1110
- value_threshold_gui = gr.Slider(minimum=0.0, maximum=2.0, step=0.01, value=0.1, label="'MLSD' Hough value threshold")
1111
- distance_threshold_gui = gr.Slider(minimum=0.0, maximum=20.0, step=0.01, value=0.1, label="'MLSD' Hough distance threshold")
1112
  recolor_gamma_correction_gui = gr.Number(minimum=0., maximum=25., value=1., step=0.001, label="'RECOLOR' gamma correction")
1113
  tile_blur_sigma_gui = gr.Number(minimum=0, maximum=100, value=9, step=1, label="'TILE' blur sigma")
1114
 
@@ -1143,7 +1048,7 @@ with gr.Blocks(theme=args.theme, css=CSS, fill_width=True, fill_height=False) as
1143
  gr.Info(f"{len(sd_gen.model.STYLE_NAMES)} styles loaded")
1144
  return gr.update(value=None, choices=sd_gen.model.STYLE_NAMES)
1145
 
1146
- style_button.click(load_json_style_file, [style_json_gui], [style_prompt_gui])
1147
 
1148
  with gr.Accordion("Textual inversion", open=False, visible=False):
1149
  active_textual_inversion_gui = gr.Checkbox(value=False, label="Active Textual Inversion in prompt")
@@ -1189,67 +1094,23 @@ with gr.Blocks(theme=args.theme, css=CSS, fill_width=True, fill_height=False) as
1189
  schedule_prediction_type_gui = gr.Dropdown(label="Discrete Sampling Type", choices=SCHEDULE_PREDICTION_TYPE_OPTIONS, value=SCHEDULE_PREDICTION_TYPE_OPTIONS[0])
1190
  guidance_rescale_gui = gr.Number(label="CFG rescale:", value=0., step=0.01, minimum=0., maximum=1.5)
1191
  save_generated_images_gui = gr.Checkbox(value=True, label="Create a download link for the images")
1192
- enable_live_preview_gui = gr.Checkbox(value=True, label="Enable live previews")
1193
- display_images_gui = gr.Checkbox(value=True, label="Show final results")
1194
  filename_pattern_gui = gr.Textbox(label="Filename pattern", value="model,seed", placeholder="model,seed,sampler,schedule_type,img_width,img_height,guidance_scale,num_steps,vae,prompt_section,neg_prompt_section", lines=1)
1195
  hires_before_adetailer_gui = gr.Checkbox(value=False, label="Hires Before Adetailer")
1196
  hires_after_adetailer_gui = gr.Checkbox(value=True, label="Hires After Adetailer")
1197
  generator_in_cpu_gui = gr.Checkbox(value=False, label="Generator in CPU")
1198
- with gr.Column(visible=(not IS_ZERO_GPU)):
1199
- image_storage_location_gui = gr.Textbox(value=img_path, label="Image Storage Location")
1200
- disable_progress_bar_gui = gr.Checkbox(value=False, label="Disable Progress Bar")
1201
- leave_progress_bar_gui = gr.Checkbox(value=True, label="Leave Progress Bar")
1202
 
1203
  with gr.Accordion("More settings", open=False, visible=False):
1204
  loop_generation_gui = gr.Slider(minimum=1, value=1, label="Loop Generation")
1205
  retain_task_cache_gui = gr.Checkbox(value=False, label="Retain task model in cache")
1206
-
1207
- image_previews_gui = gr.Checkbox(value=True, label="Image Previews (alt)")
 
 
 
1208
  retain_compel_previous_load_gui = gr.Checkbox(value=False, label="Retain Compel Previous Load")
1209
  retain_detailfix_model_previous_load_gui = gr.Checkbox(value=False, label="Retain Detailfix Model Previous Load")
1210
  retain_hires_model_previous_load_gui = gr.Checkbox(value=False, label="Retain Hires Model Previous Load")
1211
-
1212
- set_params_gui.click(
1213
- run_set_params_gui, [prompt_gui, model_name_gui], [
1214
- prompt_gui,
1215
- neg_prompt_gui,
1216
- steps_gui,
1217
- img_width_gui,
1218
- img_height_gui,
1219
- seed_gui,
1220
- sampler_gui,
1221
- cfg_gui,
1222
- clip_skip_gui,
1223
- model_name_gui,
1224
- schedule_type_gui,
1225
- pag_scale_gui,
1226
- free_u_gui,
1227
- upscaler_model_path_gui,
1228
- upscaler_increases_size_gui,
1229
- hires_steps_gui,
1230
- hires_denoising_strength_gui,
1231
- hires_guidance_scale_gui,
1232
- hires_sampler_gui,
1233
- hires_schedule_type_gui,
1234
- image_resolution_gui,
1235
- strength_gui,
1236
- prompt_syntax_gui,
1237
- lora1_gui,
1238
- lora_scale_1_gui,
1239
- lora2_gui,
1240
- lora_scale_2_gui,
1241
- lora3_gui,
1242
- lora_scale_3_gui,
1243
- lora4_gui,
1244
- lora_scale_4_gui,
1245
- lora5_gui,
1246
- lora_scale_5_gui,
1247
- lora6_gui,
1248
- lora_scale_6_gui,
1249
- lora7_gui,
1250
- lora_scale_7_gui,
1251
- ],
1252
- )
1253
 
1254
  with gr.Accordion("Examples and help", open=False, visible=True):
1255
  gr.Markdown(HELP_GUI)
@@ -1306,21 +1167,10 @@ with gr.Blocks(theme=args.theme, css=CSS, fill_width=True, fill_height=False) as
1306
  # "hsl(360, 120, 120)" # in fact any valid colorstring
1307
  ]
1308
  ),
1309
- eraser=gr.Eraser(default_size="16"),
1310
- render=True,
1311
- visible=False,
1312
- interactive=False,
1313
  )
1314
-
1315
- show_canvas = gr.Button("SHOW INPAINT CANVAS")
1316
-
1317
- def change_visibility_canvas():
1318
- return gr.update(visible=True, interactive=True), gr.update(visible=False)
1319
- show_canvas.click(change_visibility_canvas, [], [image_base, show_canvas])
1320
-
1321
  invert_mask = gr.Checkbox(value=False, label="Invert mask")
1322
  btn = gr.Button("Create mask")
1323
-
1324
  with gr.Column(scale=1):
1325
  img_source = gr.Image(interactive=False)
1326
  img_result = gr.Image(label="Mask image", show_label=True, interactive=False)
@@ -1382,7 +1232,6 @@ with gr.Blocks(theme=args.theme, css=CSS, fill_width=True, fill_height=False) as
1382
  outputs=[load_model_gui],
1383
  queue=True,
1384
  show_progress="minimal",
1385
- api_name=(False if HIDE_API else None),
1386
  ).success(
1387
  fn=sd_gen_generate_pipeline, # fn=sd_gen.generate_pipeline,
1388
  inputs=[
@@ -1462,7 +1311,7 @@ with gr.Blocks(theme=args.theme, css=CSS, fill_width=True, fill_height=False) as
1462
  t2i_adapter_preprocessor_gui,
1463
  adapter_conditioning_scale_gui,
1464
  adapter_conditioning_factor_gui,
1465
- enable_live_preview_gui,
1466
  free_u_gui,
1467
  generator_in_cpu_gui,
1468
  adetailer_inpaint_only_gui,
@@ -1511,16 +1360,12 @@ with gr.Blocks(theme=args.theme, css=CSS, fill_width=True, fill_height=False) as
1511
  outputs=[load_model_gui, result_images, actual_task_info],
1512
  queue=True,
1513
  show_progress="minimal",
1514
- # api_name=(False if HIDE_API else None),
1515
  )
1516
 
1517
- if __name__ == "__main__":
1518
- app.queue()
1519
- app.launch(
1520
- show_error=True,
1521
- share=args.share_enabled,
1522
- debug=True,
1523
- ssr_mode=args.ssr,
1524
- allowed_paths=[allowed_path],
1525
- show_api=(not HIDE_API),
1526
- )
 
1
  import spaces
2
  import os
 
3
  from stablepy import (
4
  Model_Diffusers,
5
  SCHEDULE_TYPE_OPTIONS,
 
7
  check_scheduler_compatibility,
8
  TASK_AND_PREPROCESSORS,
9
  FACE_RESTORATION_MODELS,
 
10
  scheduler_names,
11
  )
12
  from constants import (
 
40
  DIFFUSERS_CONTROLNET_MODEL,
41
  IP_MODELS,
42
  MODE_IP_OPTIONS,
 
 
43
  )
44
  from stablepy.diffusers_vanilla.style_prompt_config import STYLE_NAMES
45
  import torch
 
60
  progress_step_bar,
61
  html_template_message,
62
  escape_html,
 
63
  )
64
  from image_processor import preprocessor_tab
65
  from datetime import datetime
 
72
  # import urllib.parse
73
  import subprocess
74
 
75
+ subprocess.run("rm -rf /data-nvme/zerogpu-offload/*", env={}, shell=True)
 
 
 
 
 
 
 
 
76
  ImageFile.LOAD_TRUNCATED_IMAGES = True
77
  torch.backends.cuda.matmul.allow_tf32 = True
78
  # os.environ["PYTORCH_NO_CUDA_MEMORY_CACHING"] = "1"
79
+ print(os.getenv("SPACES_ZERO_GPU"))
80
 
81
  directories = [DIRECTORY_MODELS, DIRECTORY_LORAS, DIRECTORY_VAES, DIRECTORY_EMBEDS, DIRECTORY_UPSCALERS]
82
  for directory in directories:
 
84
 
85
  # Download stuffs
86
  for url in [url.strip() for url in DOWNLOAD_MODEL.split(',')]:
87
+ if not os.path.exists(f"./models/{url.split('/')[-1]}"):
88
+ download_things(DIRECTORY_MODELS, url, HF_TOKEN, CIVITAI_API_KEY)
89
  for url in [url.strip() for url in DOWNLOAD_VAE.split(',')]:
90
+ if not os.path.exists(f"./vaes/{url.split('/')[-1]}"):
91
+ download_things(DIRECTORY_VAES, url, HF_TOKEN, CIVITAI_API_KEY)
92
  for url in [url.strip() for url in DOWNLOAD_LORA.split(',')]:
93
+ if not os.path.exists(f"./loras/{url.split('/')[-1]}"):
94
+ download_things(DIRECTORY_LORAS, url, HF_TOKEN, CIVITAI_API_KEY)
95
 
96
  # Download Embeddings
97
  for url_embed in DOWNLOAD_EMBEDS:
98
+ if not os.path.exists(f"./embedings/{url_embed.split('/')[-1]}"):
99
+ download_things(DIRECTORY_EMBEDS, url_embed, HF_TOKEN, CIVITAI_API_KEY)
100
 
101
  # Build list models
102
  embed_list = get_model_list(DIRECTORY_EMBEDS)
 
114
 
115
  print('\033[33m🏁 Download and listing of valid models completed.\033[0m')
116
 
117
+ flux_repo = "camenduru/FLUX.1-dev-diffusers"
118
+ flux_pipe = FluxPipeline.from_pretrained(
119
+ flux_repo,
120
+ transformer=None,
121
+ torch_dtype=torch.bfloat16,
122
+ ).to("cuda")
123
+ components = flux_pipe.components
124
+ delete_model(flux_repo)
125
+ # components = None
 
126
 
127
  #######################
128
  # GUI
 
132
  warnings.filterwarnings(action="ignore", category=FutureWarning, module="diffusers")
133
  warnings.filterwarnings(action="ignore", category=UserWarning, module="diffusers")
134
  warnings.filterwarnings(action="ignore", category=FutureWarning, module="transformers")
135
+ logger.setLevel(logging.DEBUG)
 
 
 
 
 
 
 
 
 
 
136
 
137
  CSS = """
138
  .contain { display: flex; flex-direction: column; }
 
142
  """
143
 
144
 
 
 
 
 
 
 
145
  class GuiSD:
146
  def __init__(self, stream=True):
147
  self.model = None
 
150
  self.last_load = datetime.now()
151
  self.inventory = []
152
 
153
+ def update_storage_models(self, storage_floor_gb=24, required_inventory_for_purge=3):
154
  while get_used_storage_gb() > storage_floor_gb:
155
  if len(self.inventory) < required_inventory_for_purge:
156
  break
157
  removal_candidate = self.inventory.pop(0)
158
  delete_model(removal_candidate)
159
 
 
 
 
 
 
 
 
 
 
160
  def update_inventory(self, model_name):
161
  if model_name not in single_file_model_list:
162
  self.inventory = [
 
167
  def load_new_model(self, model_name, vae_model, task, controlnet_model, progress=gr.Progress(track_tqdm=True)):
168
 
169
  # download link model > model_name
 
 
 
 
 
170
 
171
+ self.update_storage_models()
 
172
 
173
  vae_model = vae_model if vae_model != "None" else None
174
  model_type = get_model_type(model_name)
175
  dtype_model = torch.bfloat16 if model_type == "FLUX" else torch.float16
176
 
177
  if not os.path.exists(model_name):
 
178
  _ = download_diffuser_repo(
179
  repo_name=model_name,
180
  model_type=model_type,
 
219
  type_model_precision=dtype_model,
220
  retain_task_model_in_cache=False,
221
  controlnet_model=controlnet_model,
222
+ device="cpu",
223
  env_components=components,
224
  )
225
+ self.model.advanced_params(image_preprocessor_cuda_active=True)
226
  else:
227
  if self.model.base_model_id != model_name:
228
  load_now_time = datetime.now()
 
232
  print("Waiting for the previous model's time ops...")
233
  time.sleep(9 - elapsed_time)
234
 
235
+ self.model.device = torch.device("cpu")
 
236
  self.model.load_pipe(
237
  model_name,
238
  task_name=TASK_STABLEPY[task],
 
335
  t2i_adapter_preprocessor,
336
  t2i_adapter_conditioning_scale,
337
  t2i_adapter_conditioning_factor,
338
+ xformers_memory_efficient_attention,
339
  freeu,
340
  generator_in_cpu,
341
  adetailer_inpaint_only,
 
386
  vae_msg = f"VAE: {vae_model}" if vae_model else ""
387
  msg_lora = ""
388
 
389
+ print("Config model:", model_name, vae_model, loras_list)
390
 
391
  task = TASK_STABLEPY[task]
392
 
 
484
  "distance_threshold": distance_threshold,
485
  "recolor_gamma_correction": float(recolor_gamma_correction),
486
  "tile_blur_sigma": int(tile_blur_sigma),
487
+ "lora_A": lora1 if lora1 != "None" else None,
488
  "lora_scale_A": lora_scale1,
489
+ "lora_B": lora2 if lora2 != "None" else None,
490
  "lora_scale_B": lora_scale2,
491
+ "lora_C": lora3 if lora3 != "None" else None,
492
  "lora_scale_C": lora_scale3,
493
+ "lora_D": lora4 if lora4 != "None" else None,
494
  "lora_scale_D": lora_scale4,
495
+ "lora_E": lora5 if lora5 != "None" else None,
496
  "lora_scale_E": lora_scale5,
497
+ "lora_F": lora6 if lora6 != "None" else None,
498
  "lora_scale_F": lora_scale6,
499
+ "lora_G": lora7 if lora7 != "None" else None,
500
  "lora_scale_G": lora_scale7,
501
  "textual_inversion": embed_list if textual_inversion else [],
502
  "syntax_weights": syntax_weights, # "Classic"
503
  "sampler": sampler,
504
  "schedule_type": schedule_type,
505
  "schedule_prediction_type": schedule_prediction_type,
506
+ "xformers_memory_efficient_attention": xformers_memory_efficient_attention,
507
  "gui_active": True,
508
  "loop_generation": loop_generation,
509
  "controlnet_conditioning_scale": float(controlnet_output_scaling_in_unet),
 
518
  "leave_progress_bar": leave_progress_bar,
519
  "disable_progress_bar": disable_progress_bar,
520
  "image_previews": image_previews,
521
+ "display_images": display_images,
522
  "save_generated_images": save_generated_images,
523
  "filename_pattern": filename_pattern,
524
  "image_storage_location": image_storage_location,
 
554
  # kwargs for diffusers pipeline
555
  if guidance_rescale:
556
  pipe_params["guidance_rescale"] = guidance_rescale
557
+
558
+ self.model.device = torch.device("cuda:0")
559
+ if hasattr(self.model.pipe, "transformer") and loras_list != ["None"] * self.model.num_loras:
560
+ self.model.pipe.transformer.to(self.model.device)
561
+ print("transformer to cuda")
562
 
563
  actual_progress = 0
564
  info_images = gr.update()
 
588
 
589
  download_links = "<br>".join(
590
  [
591
+ f'<a href="{path.replace("/images/", "/file=/home/user/app/images/")}" download="{os.path.basename(path)}">Download Image {i + 1}</a>'
592
  for i, path in enumerate(image_path)
593
  ]
594
  )
595
  if save_generated_images:
596
  info_images += f"<br>{download_links}"
597
 
 
 
598
  info_state = "COMPLETE"
599
 
 
 
 
600
  yield info_state, img, info_images
601
 
602
 
 
697
 
698
  @spaces.GPU(duration=15)
699
  def process_upscale(image, upscaler_name, upscaler_size):
700
+ if image is None: return None
 
701
 
702
  from stablepy.diffusers_vanilla.utils import save_pil_image_with_metadata
703
  from stablepy import load_upscaler_model
 
714
 
715
  name_upscaler = f"./{DIRECTORY_UPSCALERS}/{name_upscaler.split('/')[-1]}"
716
 
717
+ scaler_beta = load_upscaler_model(model=name_upscaler, tile=0, tile_overlap=8, device="cuda", half=True)
718
  image_up = scaler_beta.upscale(image, upscaler_size, True)
719
 
720
  image_path = save_pil_image_with_metadata(image_up, f'{os.getcwd()}/up_images', exif_image)
 
723
 
724
 
725
  # https://huggingface.co/spaces/BestWishYsh/ConsisID-preview-Space/discussions/1#674969a022b99c122af5d407
726
+ dynamic_gpu_duration.zerogpu = True
727
+ sd_gen_generate_pipeline.zerogpu = True
728
  sd_gen = GuiSD()
729
 
730
+ with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
731
  gr.Markdown("# 🧩 DiffuseCraft")
732
  gr.Markdown(SUBTITLE_GUI)
733
  with gr.Tab("Generation"):
 
743
 
744
  return gr.update(value=task_name, choices=new_choices)
745
 
746
+ task_gui = gr.Dropdown(label="Task", choices=SDXL_TASK, value=TASK_MODEL_LIST[0])
747
+ model_name_gui = gr.Dropdown(label="Model", choices=model_list, value=model_list[0], allow_custom_value=True)
 
748
  prompt_gui = gr.Textbox(lines=5, placeholder="Enter prompt", label="Prompt")
749
+ neg_prompt_gui = gr.Textbox(lines=3, placeholder="Enter Neg prompt", label="Negative prompt", value="lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, worst quality, low quality, very displeasing, (bad)")
 
 
 
750
  with gr.Row(equal_height=False):
751
  set_params_gui = gr.Button(value="↙️", variant="secondary", size="sm")
752
  clear_prompt_gui = gr.Button(value="🗑️", variant="secondary", size="sm")
 
776
 
777
  actual_task_info = gr.HTML()
778
 
779
+ with gr.Row(equal_height=False, variant="default"):
780
  gpu_duration_gui = gr.Number(minimum=5, maximum=240, value=59, show_label=False, container=False, info="GPU time duration (seconds)")
781
  with gr.Column():
782
  verbose_info_gui = gr.Checkbox(value=False, container=False, label="Status info")
783
  load_lora_cpu_gui = gr.Checkbox(value=False, container=False, label="Load LoRAs on CPU")
784
 
785
  with gr.Column(scale=1):
786
+ steps_gui = gr.Slider(minimum=1, maximum=100, step=1, value=28, label="Steps")
787
+ cfg_gui = gr.Slider(minimum=0, maximum=30, step=0.5, value=7., label="CFG")
788
+ sampler_gui = gr.Dropdown(label="Sampler", choices=scheduler_names, value="Euler")
789
+ schedule_type_gui = gr.Dropdown(label="Schedule type", choices=SCHEDULE_TYPE_OPTIONS, value=SCHEDULE_TYPE_OPTIONS[0])
790
+ img_width_gui = gr.Slider(minimum=64, maximum=4096, step=8, value=1024, label="Img Width")
791
+ img_height_gui = gr.Slider(minimum=64, maximum=4096, step=8, value=1024, label="Img Height")
792
+ seed_gui = gr.Number(minimum=-1, maximum=9999999999, value=-1, label="Seed")
793
+ pag_scale_gui = gr.Slider(minimum=0.0, maximum=10.0, step=0.1, value=0.0, label="PAG Scale")
794
+ with gr.Row():
795
+ clip_skip_gui = gr.Checkbox(value=True, label="Layer 2 Clip Skip")
796
+ free_u_gui = gr.Checkbox(value=False, label="FreeU")
797
+
798
+ with gr.Row(equal_height=False):
799
+
800
+ def run_set_params_gui(base_prompt, name_model):
801
+ valid_receptors = { # default values
802
+ "prompt": gr.update(value=base_prompt),
803
+ "neg_prompt": gr.update(value=""),
804
+ "Steps": gr.update(value=30),
805
+ "width": gr.update(value=1024),
806
+ "height": gr.update(value=1024),
807
+ "Seed": gr.update(value=-1),
808
+ "Sampler": gr.update(value="Euler"),
809
+ "CFG scale": gr.update(value=7.), # cfg
810
+ "Clip skip": gr.update(value=True),
811
+ "Model": gr.update(value=name_model),
812
+ "Schedule type": gr.update(value="Automatic"),
813
+ "PAG": gr.update(value=.0),
814
+ "FreeU": gr.update(value=False),
815
+ }
816
+ valid_keys = list(valid_receptors.keys())
817
+
818
+ parameters = extract_parameters(base_prompt)
819
+ # print(parameters)
820
+
821
+ if "Sampler" in parameters:
822
+ value_sampler = parameters["Sampler"]
823
+ for s_type in SCHEDULE_TYPE_OPTIONS:
824
+ if s_type in value_sampler:
825
+ value_sampler = value_sampler.replace(s_type, "").strip()
826
+ parameters["Sampler"] = value_sampler
827
+ parameters["Schedule type"] = s_type
828
+
829
+ for key, val in parameters.items():
830
+ # print(val)
831
+ if key in valid_keys:
832
+ try:
833
+ if key == "Sampler":
834
+ if val not in scheduler_names:
835
+ continue
836
+ if key == "Schedule type":
837
+ if val not in SCHEDULE_TYPE_OPTIONS:
838
+ val = "Automatic"
839
+ elif key == "Clip skip":
840
+ if "," in str(val):
841
+ val = val.replace(",", "")
842
+ if int(val) >= 2:
843
+ val = True
844
+ if key == "prompt":
845
+ if ">" in val and "<" in val:
846
+ val = re.sub(r'<[^>]+>', '', val)
847
+ print("Removed LoRA written in the prompt")
848
+ if key in ["prompt", "neg_prompt"]:
849
+ val = re.sub(r'\s+', ' ', re.sub(r',+', ',', val)).strip()
850
+ if key in ["Steps", "width", "height", "Seed"]:
851
+ val = int(val)
852
+ if key == "FreeU":
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
853
  val = True
854
+ if key in ["CFG scale", "PAG"]:
855
+ val = float(val)
856
+ if key == "Model":
857
+ filtered_models = [m for m in model_list if val in m]
858
+ if filtered_models:
859
+ val = filtered_models[0]
860
+ else:
861
+ val = name_model
862
+ if key == "Seed":
 
 
 
 
 
 
 
 
 
 
 
863
  continue
864
+ valid_receptors[key] = gr.update(value=val)
865
+ # print(val, type(val))
866
+ # print(valid_receptors)
867
+ except Exception as e:
868
+ print(str(e))
869
+ return [value for value in valid_receptors.values()]
870
+
871
+ set_params_gui.click(
872
+ run_set_params_gui, [prompt_gui, model_name_gui], [
873
+ prompt_gui,
874
+ neg_prompt_gui,
875
+ steps_gui,
876
+ img_width_gui,
877
+ img_height_gui,
878
+ seed_gui,
879
+ sampler_gui,
880
+ cfg_gui,
881
+ clip_skip_gui,
882
+ model_name_gui,
883
+ schedule_type_gui,
884
+ pag_scale_gui,
885
+ free_u_gui,
886
+ ],
887
+ )
888
 
889
+ def run_clear_prompt_gui():
890
+ return gr.update(value=""), gr.update(value="")
891
+ clear_prompt_gui.click(
892
+ run_clear_prompt_gui, [], [prompt_gui, neg_prompt_gui]
893
+ )
894
+
895
+ def run_set_random_seed():
896
+ return -1
897
+ set_random_seed.click(
898
+ run_set_random_seed, [], seed_gui
899
+ )
900
+
901
+ num_images_gui = gr.Slider(minimum=1, maximum=5, step=1, value=1, label="Images")
902
+ prompt_syntax_gui = gr.Dropdown(label="Prompt Syntax", choices=PROMPT_W_OPTIONS, value=PROMPT_W_OPTIONS[1][1])
903
+ vae_model_gui = gr.Dropdown(label="VAE Model", choices=vae_model_list, value=vae_model_list[0])
904
 
905
  with gr.Accordion("Hires fix", open=False, visible=True):
906
 
907
  upscaler_model_path_gui = gr.Dropdown(label="Upscaler", choices=UPSCALER_KEYS, value=UPSCALER_KEYS[0])
908
  upscaler_increases_size_gui = gr.Slider(minimum=1.1, maximum=4., step=0.1, value=1.2, label="Upscale by")
909
+ upscaler_tile_size_gui = gr.Slider(minimum=0, maximum=512, step=16, value=0, label="Upscaler Tile Size", info="0 = no tiling")
910
  upscaler_tile_overlap_gui = gr.Slider(minimum=0, maximum=48, step=1, value=8, label="Upscaler Tile Overlap")
911
  hires_steps_gui = gr.Slider(minimum=0, value=30, maximum=100, step=1, label="Hires Steps")
912
  hires_denoising_strength_gui = gr.Slider(minimum=0.1, maximum=1.0, step=0.01, value=0.55, label="Hires Denoising Strength")
 
923
  return gr.Dropdown(label=label, choices=lora_model_list, value="None", allow_custom_value=True, visible=visible)
924
 
925
  def lora_scale_slider(label, visible=True):
926
+ return gr.Slider(minimum=-2, maximum=2, step=0.01, value=0.33, label=label, visible=visible)
 
927
 
928
  lora1_gui = lora_dropdown("Lora1")
929
  lora_scale_1_gui = lora_scale_slider("Lora Scale 1")
 
935
  lora_scale_4_gui = lora_scale_slider("Lora Scale 4")
936
  lora5_gui = lora_dropdown("Lora5")
937
  lora_scale_5_gui = lora_scale_slider("Lora Scale 5")
938
+ lora6_gui = lora_dropdown("Lora6", visible=False)
939
+ lora_scale_6_gui = lora_scale_slider("Lora Scale 6", visible=False)
940
+ lora7_gui = lora_dropdown("Lora7", visible=False)
941
+ lora_scale_7_gui = lora_scale_slider("Lora Scale 7", visible=False)
942
 
943
  with gr.Accordion("From URL", open=False, visible=True):
944
  text_lora = gr.Textbox(
 
947
  lines=1,
948
  info="It has to be .safetensors files, and you can also download them from Hugging Face.",
949
  )
950
+ romanize_text = gr.Checkbox(value=False, label="Transliterate name", visible=False)
951
  button_lora = gr.Button("Get and Refresh the LoRA Lists")
952
  new_lora_status = gr.HTML()
953
  button_lora.click(
 
1012
  preprocess_resolution_gui = gr.Slider(minimum=64, maximum=2048, step=64, value=512, label="Preprocessor Resolution")
1013
  low_threshold_gui = gr.Slider(minimum=1, maximum=255, step=1, value=100, label="'CANNY' low threshold")
1014
  high_threshold_gui = gr.Slider(minimum=1, maximum=255, step=1, value=200, label="'CANNY' high threshold")
1015
+ value_threshold_gui = gr.Slider(minimum=1, maximum=2.0, step=0.01, value=0.1, label="'MLSD' Hough value threshold")
1016
+ distance_threshold_gui = gr.Slider(minimum=1, maximum=20.0, step=0.01, value=0.1, label="'MLSD' Hough distance threshold")
1017
  recolor_gamma_correction_gui = gr.Number(minimum=0., maximum=25., value=1., step=0.001, label="'RECOLOR' gamma correction")
1018
  tile_blur_sigma_gui = gr.Number(minimum=0, maximum=100, value=9, step=1, label="'TILE' blur sigma")
1019
 
 
1048
  gr.Info(f"{len(sd_gen.model.STYLE_NAMES)} styles loaded")
1049
  return gr.update(value=None, choices=sd_gen.model.STYLE_NAMES)
1050
 
1051
+ style_button.click(load_json_style_file, [style_json_gui], [style_prompt_gui])
1052
 
1053
  with gr.Accordion("Textual inversion", open=False, visible=False):
1054
  active_textual_inversion_gui = gr.Checkbox(value=False, label="Active Textual Inversion in prompt")
 
1094
  schedule_prediction_type_gui = gr.Dropdown(label="Discrete Sampling Type", choices=SCHEDULE_PREDICTION_TYPE_OPTIONS, value=SCHEDULE_PREDICTION_TYPE_OPTIONS[0])
1095
  guidance_rescale_gui = gr.Number(label="CFG rescale:", value=0., step=0.01, minimum=0., maximum=1.5)
1096
  save_generated_images_gui = gr.Checkbox(value=True, label="Create a download link for the images")
 
 
1097
  filename_pattern_gui = gr.Textbox(label="Filename pattern", value="model,seed", placeholder="model,seed,sampler,schedule_type,img_width,img_height,guidance_scale,num_steps,vae,prompt_section,neg_prompt_section", lines=1)
1098
  hires_before_adetailer_gui = gr.Checkbox(value=False, label="Hires Before Adetailer")
1099
  hires_after_adetailer_gui = gr.Checkbox(value=True, label="Hires After Adetailer")
1100
  generator_in_cpu_gui = gr.Checkbox(value=False, label="Generator in CPU")
 
 
 
 
1101
 
1102
  with gr.Accordion("More settings", open=False, visible=False):
1103
  loop_generation_gui = gr.Slider(minimum=1, value=1, label="Loop Generation")
1104
  retain_task_cache_gui = gr.Checkbox(value=False, label="Retain task model in cache")
1105
+ leave_progress_bar_gui = gr.Checkbox(value=True, label="Leave Progress Bar")
1106
+ disable_progress_bar_gui = gr.Checkbox(value=False, label="Disable Progress Bar")
1107
+ display_images_gui = gr.Checkbox(value=False, label="Display Images")
1108
+ image_previews_gui = gr.Checkbox(value=True, label="Image Previews")
1109
+ image_storage_location_gui = gr.Textbox(value="./images", label="Image Storage Location")
1110
  retain_compel_previous_load_gui = gr.Checkbox(value=False, label="Retain Compel Previous Load")
1111
  retain_detailfix_model_previous_load_gui = gr.Checkbox(value=False, label="Retain Detailfix Model Previous Load")
1112
  retain_hires_model_previous_load_gui = gr.Checkbox(value=False, label="Retain Hires Model Previous Load")
1113
+ xformers_memory_efficient_attention_gui = gr.Checkbox(value=False, label="Xformers Memory Efficient Attention")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1114
 
1115
  with gr.Accordion("Examples and help", open=False, visible=True):
1116
  gr.Markdown(HELP_GUI)
 
1167
  # "hsl(360, 120, 120)" # in fact any valid colorstring
1168
  ]
1169
  ),
1170
+ eraser=gr.Eraser(default_size="16")
 
 
 
1171
  )
 
 
 
 
 
 
 
1172
  invert_mask = gr.Checkbox(value=False, label="Invert mask")
1173
  btn = gr.Button("Create mask")
 
1174
  with gr.Column(scale=1):
1175
  img_source = gr.Image(interactive=False)
1176
  img_result = gr.Image(label="Mask image", show_label=True, interactive=False)
 
1232
  outputs=[load_model_gui],
1233
  queue=True,
1234
  show_progress="minimal",
 
1235
  ).success(
1236
  fn=sd_gen_generate_pipeline, # fn=sd_gen.generate_pipeline,
1237
  inputs=[
 
1311
  t2i_adapter_preprocessor_gui,
1312
  adapter_conditioning_scale_gui,
1313
  adapter_conditioning_factor_gui,
1314
+ xformers_memory_efficient_attention_gui,
1315
  free_u_gui,
1316
  generator_in_cpu_gui,
1317
  adetailer_inpaint_only_gui,
 
1360
  outputs=[load_model_gui, result_images, actual_task_info],
1361
  queue=True,
1362
  show_progress="minimal",
 
1363
  )
1364
 
1365
+ app.queue()
1366
+
1367
+ app.launch(
1368
+ show_error=True,
1369
+ debug=True,
1370
+ allowed_paths=["./images/"],
1371
+ )
 
 
 
constants.py CHANGED
@@ -7,34 +7,28 @@ from stablepy import (
7
  ALL_BUILTIN_UPSCALERS,
8
  IP_ADAPTERS_SD,
9
  IP_ADAPTERS_SDXL,
10
- PROMPT_WEIGHT_OPTIONS_PRIORITY,
11
  )
12
 
13
- IS_ZERO_GPU = bool(os.getenv("SPACES_ZERO_GPU"))
14
-
15
  # - **Download Models**
16
- DOWNLOAD_MODEL = "https://huggingface.co/zuv0/test/resolve/main/milkyWonderland_v40.safetensors"
17
 
18
  # - **Download VAEs**
19
- DOWNLOAD_VAE = "https://huggingface.co/Anzhc/Anzhcs-VAEs/resolve/main/SDXL%20Anime%20VAE%20Dec-only%20B3.safetensors, https://huggingface.co/fp16-guy/anything_kl-f8-anime2_vae-ft-mse-840000-ema-pruned_blessed_clearvae_fp16_cleaned/resolve/main/vae-ft-mse-840000-ema-pruned_fp16.safetensors?download=true"
20
 
21
  # - **Download LoRAs**
22
  DOWNLOAD_LORA = "https://huggingface.co/Leopain/color/resolve/main/Coloring_book_-_LineArt.safetensors, https://civitai.com/api/download/models/135867, https://huggingface.co/Linaqruf/anime-detailer-xl-lora/resolve/main/anime-detailer-xl.safetensors?download=true, https://huggingface.co/Linaqruf/style-enhancer-xl-lora/resolve/main/style-enhancer-xl.safetensors?download=true, https://huggingface.co/ByteDance/Hyper-SD/resolve/main/Hyper-SD15-8steps-CFG-lora.safetensors?download=true, https://huggingface.co/ByteDance/Hyper-SD/resolve/main/Hyper-SDXL-8steps-CFG-lora.safetensors?download=true"
23
 
24
  LOAD_DIFFUSERS_FORMAT_MODEL = [
25
- 'TestOrganizationPleaseIgnore/potato_quality_anime_plzwork_sdxl',
26
- 'TestOrganizationPleaseIgnore/rinAnim8drawIllustriousXL_v20_sdxl',
27
- 'TestOrganizationPleaseIgnore/perfectrsbmixIllustrious_definitiveiota_sdxl',
28
  'stabilityai/stable-diffusion-xl-base-1.0',
29
  'Laxhar/noobai-XL-1.1',
30
  'Laxhar/noobai-XL-Vpred-1.0',
31
  'black-forest-labs/FLUX.1-dev',
32
- 'black-forest-labs/FLUX.1-Krea-dev',
33
  'John6666/blue-pencil-flux1-v021-fp8-flux',
34
  'John6666/wai-ani-flux-v10forfp8-fp8-flux',
35
  'John6666/xe-anime-flux-v04-fp8-flux',
36
  'John6666/lyh-anime-flux-v2a1-fp8-flux',
37
  'John6666/carnival-unchained-v10-fp8-flux',
 
38
  'Freepik/flux.1-lite-8B-alpha',
39
  'shauray/FluxDev-HyperSD-merged',
40
  'mikeyandfriends/PixelWave_FLUX.1-dev_03',
@@ -43,21 +37,23 @@ LOAD_DIFFUSERS_FORMAT_MODEL = [
43
  # 'ostris/OpenFLUX.1',
44
  'shuttleai/shuttle-3-diffusion',
45
  'Laxhar/noobai-XL-1.0',
 
46
  'Laxhar/noobai-XL-0.77',
47
  'John6666/noobai-xl-nai-xl-epsilonpred075version-sdxl',
48
  'Laxhar/noobai-XL-0.6',
49
  'John6666/noobai-xl-nai-xl-epsilonpred05version-sdxl',
50
  'John6666/noobai-cyberfix-v10-sdxl',
51
  'John6666/noobaiiter-xl-vpred-v075-sdxl',
52
- 'John6666/ripplemix-noob-vpred10-illustrious01-v14-sdxl',
53
- 'John6666/sigmaih-15-sdxl',
 
 
54
  'John6666/ntr-mix-illustrious-xl-noob-xl-xi-sdxl',
55
  'John6666/ntr-mix-illustrious-xl-noob-xl-xii-sdxl',
56
  'John6666/ntr-mix-illustrious-xl-noob-xl-xiii-sdxl',
57
- 'martineux/nova-unreal10',
58
  'John6666/mistoon-anime-v10illustrious-sdxl',
59
- 'John6666/hassaku-xl-illustrious-v22-sdxl',
60
- 'John6666/hassaku-xl-illustrious-v31-sdxl',
61
  'John6666/haruki-mix-illustrious-v10-sdxl',
62
  'John6666/noobreal-v10-sdxl',
63
  'John6666/complicated-noobai-merge-vprediction-sdxl',
@@ -68,8 +64,6 @@ LOAD_DIFFUSERS_FORMAT_MODEL = [
68
  'Laxhar/noobai-XL-Vpred-0.65',
69
  'Laxhar/noobai-XL-Vpred-0.6',
70
  'John6666/cat-tower-noobai-xl-checkpoint-v14vpred-sdxl',
71
- 'John6666/cat-tower-noobai-xl-checkpoint-v15vpred-sdxl',
72
- 'John6666/cat-tower-noobai-xl-checkpoint-v20-vpred-sdxl',
73
  'John6666/noobai-xl-nai-xl-vpred05version-sdxl',
74
  'John6666/noobai-fusion2-vpred-itercomp-v1-sdxl',
75
  'John6666/noobai-xl-nai-xl-vpredtestversion-sdxl',
@@ -80,46 +74,19 @@ LOAD_DIFFUSERS_FORMAT_MODEL = [
80
  'John6666/obsession-illustriousxl-v21-sdxl',
81
  'John6666/obsession-illustriousxl-v30-sdxl',
82
  'John6666/obsession-illustriousxl-v31-sdxl',
83
- 'John6666/one-obsession-13-sdxl',
84
- 'John6666/one-obsession-14-24d-sdxl',
85
- 'John6666/one-obsession-15-noobai-sdxl',
86
- 'John6666/one-obsession-v16-noobai-sdxl',
87
- 'John6666/one-obsession-17-red-sdxl',
88
- 'martineux/oneobs18',
89
- 'martineux/oneobsession19',
90
- 'John6666/cat-tower-noobai-xl-checkpoint-v14-epsilon-pred-sdxl',
91
- 'martineux/cattower-chenkin-xl',
92
- 'John6666/prefect-illustrious-xl-v3-sdxl',
93
- 'martineux/perfect4',
94
- 'martineux/prefectIllustriousXL_v5',
95
  'John6666/wai-nsfw-illustrious-v70-sdxl',
96
- 'John6666/wai-nsfw-illustrious-sdxl-v140-sdxl',
97
- 'martineux/waiIllustriousSDXL_v160',
98
  'John6666/illustrious-pony-mix-v3-sdxl',
99
- 'John6666/nova-anime-xl-il-v90-sdxl',
100
- 'John6666/nova-anime-xl-il-v110-sdxl',
101
- 'frankjoshua/novaAnimeXL_ilV140',
102
- 'John6666/nova-orange-xl-re-v10-sdxl',
103
- 'John6666/nova-orange-xl-v110-sdxl',
104
- 'John6666/nova-orange-xl-re-v20-sdxl',
105
- 'John6666/nova-unreal-xl-v60-sdxl',
106
- 'John6666/nova-unreal-xl-v70-sdxl',
107
- 'John6666/nova-unreal-xl-v80-sdxl',
108
- 'martineux/nova-unreal10',
109
- 'John6666/nova-cartoon-xl-v40-sdxl',
110
- 'martineux/novacartoon6',
111
- 'martineux/novareal8',
112
  'John6666/silvermoon-mix03-illustrious-v10-sdxl',
113
  'eienmojiki/Anything-XL',
114
  'eienmojiki/Starry-XL-v5.2',
115
- 'votepurchase/plantMilkModelSuite_walnut',
116
  'John6666/meinaxl-v2-sdxl',
117
  'Eugeoter/artiwaifu-diffusion-2.0',
118
  'comin/IterComp',
119
- 'John6666/epicrealism-xl-v8kiss-sdxl',
120
- 'John6666/epicrealism-xl-v10kiss2-sdxl',
121
  'John6666/epicrealism-xl-vxiabeast-sdxl',
122
- 'John6666/epicrealism-xl-vxvii-crystal-clear-realism-sdxl',
 
123
  'misri/zavychromaxl_v80',
124
  'SG161222/RealVisXL_V4.0',
125
  'SG161222/RealVisXL_V5.0',
@@ -135,10 +102,8 @@ LOAD_DIFFUSERS_FORMAT_MODEL = [
135
  'John6666/ebara-mfcg-pony-mix-v12-sdxl',
136
  'John6666/t-ponynai3-v51-sdxl',
137
  'John6666/t-ponynai3-v65-sdxl',
138
- 'John6666/t-ponynai3-v7-sdxl',
139
  'John6666/prefect-pony-xl-v3-sdxl',
140
  'John6666/prefect-pony-xl-v4-sdxl',
141
- 'John6666/prefect-pony-xl-v50-sdxl',
142
  'John6666/mala-anime-mix-nsfw-pony-xl-v5-sdxl',
143
  'John6666/wai-ani-nsfw-ponyxl-v10-sdxl',
144
  'John6666/wai-real-mix-v11-sdxl',
@@ -146,14 +111,13 @@ LOAD_DIFFUSERS_FORMAT_MODEL = [
146
  'John6666/wai-c-v6-sdxl',
147
  'John6666/iniverse-mix-xl-sfwnsfw-pony-guofeng-v43-sdxl',
148
  'John6666/sifw-annihilation-xl-v2-sdxl',
149
- 'John6666/sifw-annihilation-xl-v305illustrious-beta-sdxl',
150
  'John6666/photo-realistic-pony-v5-sdxl',
151
  'John6666/pony-realism-v21main-sdxl',
152
  'John6666/pony-realism-v22main-sdxl',
153
- 'John6666/pony-realism-v23-ultra-sdxl',
 
154
  'John6666/cyberrealistic-pony-v65-sdxl',
155
  'John6666/cyberrealistic-pony-v7-sdxl',
156
- 'John6666/cyberrealistic-pony-v127-alternative-sdxl',
157
  'GraydientPlatformAPI/realcartoon-pony-diffusion',
158
  'John6666/nova-anime-xl-pony-v5-sdxl',
159
  'John6666/autismmix-sdxl-autismmix-pony-sdxl',
@@ -163,31 +127,13 @@ LOAD_DIFFUSERS_FORMAT_MODEL = [
163
  'John6666/duchaiten-pony-real-v11fix-sdxl',
164
  'John6666/duchaiten-pony-real-v20-sdxl',
165
  'John6666/duchaiten-pony-xl-no-score-v70-sdxl',
 
 
166
  'KBlueLeaf/Kohaku-XL-Zeta',
167
  'cagliostrolab/animagine-xl-3.1',
168
- 'cagliostrolab/animagine-xl-4.0',
169
  'yodayo-ai/kivotos-xl-2.0',
170
  'yodayo-ai/holodayo-xl-2.1',
171
  'yodayo-ai/clandestine-xl-1.0',
172
- 'Raelina/Raehoshi-illust-XL-8',
173
- 'johnkillington/chenkinxmilfynoobai_v20-MLX',
174
- 'martineux/unholydesire5-xl',
175
- 'abacaxthebrave/Unholy_Desire_Mix_ILXL',
176
- 'martineux/diving5',
177
- 'martineux/diving7',
178
- 'martineux/mergestein-animuplus-xl',
179
- 'martineux/mergestein-uncannyr2-xl',
180
- 'martineux/steincustom_V12',
181
- 'martineux/miaomiao-realskin1p25-xl',
182
- 'martineux/miaov18',
183
- 'John6666/garage-mix-noob-vpred-eps-v10-vpred-sdxl',
184
- 'TestOrganizationPleaseIgnore/perfectrsbmixIllustrious_definitivelambda_sdxl',
185
- 'TestOrganizationPleaseIgnore/rinFlanimeIllustrious_v27_sdxl',
186
- 'TestOrganizationPleaseIgnore/rinAnimepopcute_v30_sdxl',
187
- 'TestOrganizationPleaseIgnore/potato_quality_anime_zzz_sdxl',
188
- 'https://huggingface.co/chemwolf/Karmix-XL-v0/resolve/main/Karmix-XL-v0.safetensors?download=true',
189
- 'https://civitai.com/api/download/models/128713?type=Model&format=SafeTensor&size=pruned&fp=fp16',
190
- 'https://civitai.com/models/30240?modelVersionId=125771',
191
  'digiplay/majicMIX_sombre_v2',
192
  'digiplay/majicMIX_realistic_v6',
193
  'digiplay/majicMIX_realistic_v7',
@@ -213,9 +159,9 @@ LOAD_DIFFUSERS_FORMAT_MODEL = [
213
  'GraydientPlatformAPI/realcartoon3d-17',
214
  'GraydientPlatformAPI/realcartoon-pixar11',
215
  'GraydientPlatformAPI/realcartoon-real17',
 
216
  ]
217
 
218
-
219
  DIFFUSERS_FORMAT_LORAS = [
220
  "nerijs/animation2k-flux",
221
  "XLabs-AI/flux-RealismLora",
@@ -237,11 +183,8 @@ DIRECTORY_VAES = 'vaes'
237
  DIRECTORY_EMBEDS = 'embedings'
238
  DIRECTORY_UPSCALERS = 'upscalers'
239
 
 
240
  STORAGE_ROOT = "/home/user/"
241
- CACHE_HF_ROOT = os.path.expanduser("~/.cache/huggingface")
242
- CACHE_HF = os.path.join(CACHE_HF_ROOT, "hub")
243
- if IS_ZERO_GPU:
244
- os.environ["HF_HOME"] = CACHE_HF
245
 
246
  TASK_STABLEPY = {
247
  'txt2img': 'txt2img',
@@ -283,7 +226,6 @@ UPSCALER_DICT_GUI = {
283
  # "realesr-general-x4v3": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth",
284
  # "realesr-general-wdn-x4v3": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-wdn-x4v3.pth",
285
  "4x-UltraSharp": "https://huggingface.co/Shandypur/ESRGAN-4x-UltraSharp/resolve/main/4x-UltraSharp.pth",
286
- "Real-ESRGAN-Anime-finetuning": "https://huggingface.co/danhtran2mind/Real-ESRGAN-Anime-finetuning/resolve/main/Real-ESRGAN-Anime-finetuning.pth",
287
  "4x_foolhardy_Remacri": "https://huggingface.co/FacehugmanIII/4x_foolhardy_Remacri/resolve/main/4x_foolhardy_Remacri.pth",
288
  "Remacri4xExtraSmoother": "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/Remacri%204x%20ExtraSmoother.pth",
289
  "AnimeSharp4x": "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/AnimeSharp%204x.pth",
@@ -356,9 +298,15 @@ DIFFUSERS_CONTROLNET_MODEL = [
356
  # "InstantX/FLUX.1-dev-Controlnet-Canny",
357
  ]
358
 
359
- PROMPT_W_OPTIONS = [(pwf, pwf) for pwf in PROMPT_WEIGHT_OPTIONS_PRIORITY]
360
- PROMPT_W_OPTIONS[0] = ("Classic format: (word:weight)", "Classic")
361
- PROMPT_W_OPTIONS[1] = ("Compel format: (word)weight", "Compel")
 
 
 
 
 
 
362
 
363
  WARNING_MSG_VAE = (
364
  "Use the right VAE for your model to maintain image quality. The wrong"
@@ -411,11 +359,9 @@ SUBTITLE_GUI = (
411
  " to perform different tasks in image generation."
412
  )
413
 
414
- msg_zero = "" if not IS_ZERO_GPU else "- The current space runs on a ZERO GPU which is assigned for approximately 60 seconds; Therefore, if you submit expensive tasks, the operation may be canceled upon reaching the maximum allowed time with 'GPU TASK ABORTED'."
415
-
416
  HELP_GUI = (
417
- f"""### Help:
418
- {msg_zero}
419
  - Distorted or strange images often result from high prompt weights, so it's best to use low weights and scales, and consider using Classic variants like 'Classic-original'.
420
  - For better results with Pony Diffusion, try using sampler DPM++ 1s or DPM2 with Compel or Classic prompt weights.
421
  """
@@ -539,7 +485,7 @@ EXAMPLES_GUI = [
539
  20,
540
  4.0,
541
  -1,
542
- ("loras/Coloring_book_-_LineArt.safetensors" if os.path.exists("loras/Coloring_book_-_LineArt.safetensors") else "None"),
543
  1.0,
544
  "DPM++ 2M SDE",
545
  1024,
@@ -634,7 +580,6 @@ EXAMPLES_GUI = [
634
  RESOURCES = (
635
  """### Resources
636
  - John6666's space has some great features you might find helpful [link](https://huggingface.co/spaces/John6666/DiffuseCraftMod).
637
- - Try the image generator in Colab’s free tier, which provides free GPU [link](https://github.com/R3gm/SD_diffusers_interactive).
638
- - `DiffuseCraft` in Colab:[link](https://github.com/R3gm/DiffuseCraft?tab=readme-ov-file#diffusecraft).
639
  """
640
  )
 
7
  ALL_BUILTIN_UPSCALERS,
8
  IP_ADAPTERS_SD,
9
  IP_ADAPTERS_SDXL,
 
10
  )
11
 
 
 
12
  # - **Download Models**
13
+ DOWNLOAD_MODEL = "https://huggingface.co/TechnoByte/MilkyWonderland/resolve/main/milkyWonderland_v40.safetensors"
14
 
15
  # - **Download VAEs**
16
+ DOWNLOAD_VAE = "https://huggingface.co/fp16-guy/anything_kl-f8-anime2_vae-ft-mse-840000-ema-pruned_blessed_clearvae_fp16_cleaned/resolve/main/vae-ft-mse-840000-ema-pruned_fp16.safetensors?download=true"
17
 
18
  # - **Download LoRAs**
19
  DOWNLOAD_LORA = "https://huggingface.co/Leopain/color/resolve/main/Coloring_book_-_LineArt.safetensors, https://civitai.com/api/download/models/135867, https://huggingface.co/Linaqruf/anime-detailer-xl-lora/resolve/main/anime-detailer-xl.safetensors?download=true, https://huggingface.co/Linaqruf/style-enhancer-xl-lora/resolve/main/style-enhancer-xl.safetensors?download=true, https://huggingface.co/ByteDance/Hyper-SD/resolve/main/Hyper-SD15-8steps-CFG-lora.safetensors?download=true, https://huggingface.co/ByteDance/Hyper-SD/resolve/main/Hyper-SDXL-8steps-CFG-lora.safetensors?download=true"
20
 
21
  LOAD_DIFFUSERS_FORMAT_MODEL = [
 
 
 
22
  'stabilityai/stable-diffusion-xl-base-1.0',
23
  'Laxhar/noobai-XL-1.1',
24
  'Laxhar/noobai-XL-Vpred-1.0',
25
  'black-forest-labs/FLUX.1-dev',
 
26
  'John6666/blue-pencil-flux1-v021-fp8-flux',
27
  'John6666/wai-ani-flux-v10forfp8-fp8-flux',
28
  'John6666/xe-anime-flux-v04-fp8-flux',
29
  'John6666/lyh-anime-flux-v2a1-fp8-flux',
30
  'John6666/carnival-unchained-v10-fp8-flux',
31
+ 'John6666/iniverse-mix-xl-sfwnsfw-fluxdfp16nsfwv11-fp8-flux',
32
  'Freepik/flux.1-lite-8B-alpha',
33
  'shauray/FluxDev-HyperSD-merged',
34
  'mikeyandfriends/PixelWave_FLUX.1-dev_03',
 
37
  # 'ostris/OpenFLUX.1',
38
  'shuttleai/shuttle-3-diffusion',
39
  'Laxhar/noobai-XL-1.0',
40
+ 'John6666/noobai-xl-nai-xl-epsilonpred10version-sdxl',
41
  'Laxhar/noobai-XL-0.77',
42
  'John6666/noobai-xl-nai-xl-epsilonpred075version-sdxl',
43
  'Laxhar/noobai-XL-0.6',
44
  'John6666/noobai-xl-nai-xl-epsilonpred05version-sdxl',
45
  'John6666/noobai-cyberfix-v10-sdxl',
46
  'John6666/noobaiiter-xl-vpred-v075-sdxl',
47
+ 'John6666/ntr-mix-illustrious-xl-noob-xl-v40-sdxl',
48
+ 'John6666/ntr-mix-illustrious-xl-noob-xl-ntrmix35-sdxl',
49
+ 'John6666/ntr-mix-illustrious-xl-noob-xl-v777-sdxl',
50
+ 'John6666/ntr-mix-illustrious-xl-noob-xl-v777forlora-sdxl',
51
  'John6666/ntr-mix-illustrious-xl-noob-xl-xi-sdxl',
52
  'John6666/ntr-mix-illustrious-xl-noob-xl-xii-sdxl',
53
  'John6666/ntr-mix-illustrious-xl-noob-xl-xiii-sdxl',
 
54
  'John6666/mistoon-anime-v10illustrious-sdxl',
55
+ 'John6666/hassaku-xl-illustrious-v10-sdxl',
56
+ 'John6666/hassaku-xl-illustrious-v10style-sdxl',
57
  'John6666/haruki-mix-illustrious-v10-sdxl',
58
  'John6666/noobreal-v10-sdxl',
59
  'John6666/complicated-noobai-merge-vprediction-sdxl',
 
64
  'Laxhar/noobai-XL-Vpred-0.65',
65
  'Laxhar/noobai-XL-Vpred-0.6',
66
  'John6666/cat-tower-noobai-xl-checkpoint-v14vpred-sdxl',
 
 
67
  'John6666/noobai-xl-nai-xl-vpred05version-sdxl',
68
  'John6666/noobai-fusion2-vpred-itercomp-v1-sdxl',
69
  'John6666/noobai-xl-nai-xl-vpredtestversion-sdxl',
 
74
  'John6666/obsession-illustriousxl-v21-sdxl',
75
  'John6666/obsession-illustriousxl-v30-sdxl',
76
  'John6666/obsession-illustriousxl-v31-sdxl',
 
 
 
 
 
 
 
 
 
 
 
 
77
  'John6666/wai-nsfw-illustrious-v70-sdxl',
 
 
78
  'John6666/illustrious-pony-mix-v3-sdxl',
79
+ 'John6666/nova-anime-xl-illustriousv10-sdxl',
80
+ 'John6666/nova-orange-xl-v30-sdxl',
 
 
 
 
 
 
 
 
 
 
 
81
  'John6666/silvermoon-mix03-illustrious-v10-sdxl',
82
  'eienmojiki/Anything-XL',
83
  'eienmojiki/Starry-XL-v5.2',
 
84
  'John6666/meinaxl-v2-sdxl',
85
  'Eugeoter/artiwaifu-diffusion-2.0',
86
  'comin/IterComp',
 
 
87
  'John6666/epicrealism-xl-vxiabeast-sdxl',
88
+ 'John6666/epicrealism-xl-v10kiss2-sdxl',
89
+ 'John6666/epicrealism-xl-v8kiss-sdxl',
90
  'misri/zavychromaxl_v80',
91
  'SG161222/RealVisXL_V4.0',
92
  'SG161222/RealVisXL_V5.0',
 
102
  'John6666/ebara-mfcg-pony-mix-v12-sdxl',
103
  'John6666/t-ponynai3-v51-sdxl',
104
  'John6666/t-ponynai3-v65-sdxl',
 
105
  'John6666/prefect-pony-xl-v3-sdxl',
106
  'John6666/prefect-pony-xl-v4-sdxl',
 
107
  'John6666/mala-anime-mix-nsfw-pony-xl-v5-sdxl',
108
  'John6666/wai-ani-nsfw-ponyxl-v10-sdxl',
109
  'John6666/wai-real-mix-v11-sdxl',
 
111
  'John6666/wai-c-v6-sdxl',
112
  'John6666/iniverse-mix-xl-sfwnsfw-pony-guofeng-v43-sdxl',
113
  'John6666/sifw-annihilation-xl-v2-sdxl',
 
114
  'John6666/photo-realistic-pony-v5-sdxl',
115
  'John6666/pony-realism-v21main-sdxl',
116
  'John6666/pony-realism-v22main-sdxl',
117
+ 'John6666/cyberrealistic-pony-v63-sdxl',
118
+ 'John6666/cyberrealistic-pony-v64-sdxl',
119
  'John6666/cyberrealistic-pony-v65-sdxl',
120
  'John6666/cyberrealistic-pony-v7-sdxl',
 
121
  'GraydientPlatformAPI/realcartoon-pony-diffusion',
122
  'John6666/nova-anime-xl-pony-v5-sdxl',
123
  'John6666/autismmix-sdxl-autismmix-pony-sdxl',
 
127
  'John6666/duchaiten-pony-real-v11fix-sdxl',
128
  'John6666/duchaiten-pony-real-v20-sdxl',
129
  'John6666/duchaiten-pony-xl-no-score-v70-sdxl',
130
+ 'odyssey-labs/OdysseyXL-3.0',
131
+ 'odyssey-labs/OdysseyXL-4.0',
132
  'KBlueLeaf/Kohaku-XL-Zeta',
133
  'cagliostrolab/animagine-xl-3.1',
 
134
  'yodayo-ai/kivotos-xl-2.0',
135
  'yodayo-ai/holodayo-xl-2.1',
136
  'yodayo-ai/clandestine-xl-1.0',
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
137
  'digiplay/majicMIX_sombre_v2',
138
  'digiplay/majicMIX_realistic_v6',
139
  'digiplay/majicMIX_realistic_v7',
 
159
  'GraydientPlatformAPI/realcartoon3d-17',
160
  'GraydientPlatformAPI/realcartoon-pixar11',
161
  'GraydientPlatformAPI/realcartoon-real17',
162
+ 'nitrosocke/Ghibli-Diffusion',
163
  ]
164
 
 
165
  DIFFUSERS_FORMAT_LORAS = [
166
  "nerijs/animation2k-flux",
167
  "XLabs-AI/flux-RealismLora",
 
183
  DIRECTORY_EMBEDS = 'embedings'
184
  DIRECTORY_UPSCALERS = 'upscalers'
185
 
186
+ CACHE_HF = "/home/user/.cache/huggingface/hub/"
187
  STORAGE_ROOT = "/home/user/"
 
 
 
 
188
 
189
  TASK_STABLEPY = {
190
  'txt2img': 'txt2img',
 
226
  # "realesr-general-x4v3": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth",
227
  # "realesr-general-wdn-x4v3": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-wdn-x4v3.pth",
228
  "4x-UltraSharp": "https://huggingface.co/Shandypur/ESRGAN-4x-UltraSharp/resolve/main/4x-UltraSharp.pth",
 
229
  "4x_foolhardy_Remacri": "https://huggingface.co/FacehugmanIII/4x_foolhardy_Remacri/resolve/main/4x_foolhardy_Remacri.pth",
230
  "Remacri4xExtraSmoother": "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/Remacri%204x%20ExtraSmoother.pth",
231
  "AnimeSharp4x": "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/AnimeSharp%204x.pth",
 
298
  # "InstantX/FLUX.1-dev-Controlnet-Canny",
299
  ]
300
 
301
+ PROMPT_W_OPTIONS = [
302
+ ("Compel format: (word)weight", "Compel"),
303
+ ("Classic format: (word:weight)", "Classic"),
304
+ ("Classic-original format: (word:weight)", "Classic-original"),
305
+ ("Classic-no_norm format: (word:weight)", "Classic-no_norm"),
306
+ ("Classic-sd_embed format: (word:weight)", "Classic-sd_embed"),
307
+ ("Classic-ignore", "Classic-ignore"),
308
+ ("None", "None"),
309
+ ]
310
 
311
  WARNING_MSG_VAE = (
312
  "Use the right VAE for your model to maintain image quality. The wrong"
 
359
  " to perform different tasks in image generation."
360
  )
361
 
 
 
362
  HELP_GUI = (
363
+ """### Help:
364
+ - The current space runs on a ZERO GPU which is assigned for approximately 60 seconds; Therefore, if you submit expensive tasks, the operation may be canceled upon reaching the maximum allowed time with 'GPU TASK ABORTED'.
365
  - Distorted or strange images often result from high prompt weights, so it's best to use low weights and scales, and consider using Classic variants like 'Classic-original'.
366
  - For better results with Pony Diffusion, try using sampler DPM++ 1s or DPM2 with Compel or Classic prompt weights.
367
  """
 
485
  20,
486
  4.0,
487
  -1,
488
+ "loras/Coloring_book_-_LineArt.safetensors",
489
  1.0,
490
  "DPM++ 2M SDE",
491
  1024,
 
580
  RESOURCES = (
581
  """### Resources
582
  - John6666's space has some great features you might find helpful [link](https://huggingface.co/spaces/John6666/DiffuseCraftMod).
583
+ - You can also try the image generator in Colab’s free tier, which provides free GPU [link](https://github.com/R3gm/SD_diffusers_interactive).
 
584
  """
585
  )
image_processor.py CHANGED
@@ -92,8 +92,8 @@ def preprocessor_tab():
92
  pre_processor_resolution = gr.Slider(minimum=64, maximum=2048, step=64, value=512, label="Preprocessor Resolution")
93
  pre_low_threshold = gr.Slider(minimum=1, maximum=255, step=1, value=100, label="'CANNY' low threshold")
94
  pre_high_threshold = gr.Slider(minimum=1, maximum=255, step=1, value=200, label="'CANNY' high threshold")
95
- pre_value_threshold = gr.Slider(minimum=0., maximum=2.0, step=0.01, value=0.1, label="'MLSD' Hough value threshold")
96
- pre_distance_threshold = gr.Slider(minimum=0., maximum=20.0, step=0.01, value=0.1, label="'MLSD' Hough distance threshold")
97
  pre_recolor_mode = gr.Dropdown(label="'RECOLOR' mode", choices=["luminance", "intensity"], value="luminance")
98
  pre_recolor_gamma_correction = gr.Number(minimum=0., maximum=25., value=1., step=0.001, label="'RECOLOR' gamma correction")
99
  pre_blur_k_size = gr.Number(minimum=0, maximum=100, value=9, step=1, label="'BLUR' sigma")
 
92
  pre_processor_resolution = gr.Slider(minimum=64, maximum=2048, step=64, value=512, label="Preprocessor Resolution")
93
  pre_low_threshold = gr.Slider(minimum=1, maximum=255, step=1, value=100, label="'CANNY' low threshold")
94
  pre_high_threshold = gr.Slider(minimum=1, maximum=255, step=1, value=200, label="'CANNY' high threshold")
95
+ pre_value_threshold = gr.Slider(minimum=1, maximum=2.0, step=0.01, value=0.1, label="'MLSD' Hough value threshold")
96
+ pre_distance_threshold = gr.Slider(minimum=1, maximum=20.0, step=0.01, value=0.1, label="'MLSD' Hough distance threshold")
97
  pre_recolor_mode = gr.Dropdown(label="'RECOLOR' mode", choices=["luminance", "intensity"], value="luminance")
98
  pre_recolor_gamma_correction = gr.Number(minimum=0., maximum=25., value=1., step=0.001, label="'RECOLOR' gamma correction")
99
  pre_blur_k_size = gr.Number(minimum=0, maximum=100, value=9, step=1, label="'BLUR' sigma")
packages.txt CHANGED
@@ -1,3 +1,3 @@
1
  git-lfs
2
- aria2
3
  ffmpeg
 
1
  git-lfs
2
+ aria2 -y
3
  ffmpeg
pre-requirements.txt DELETED
@@ -1 +0,0 @@
1
- pip>=23.0.0
 
 
requirements.txt CHANGED
@@ -1,13 +1,5 @@
1
- stablepy==0.6.5
2
- torch==2.5.1
3
- diffusers
4
  gdown
5
  opencv-python
6
- unidecode
7
- pydantic==2.10.6
8
- huggingface_hub
9
- hf_transfer
10
- hf_xet
11
- spaces
12
- gradio==5.44.1
13
- matplotlib-inline
 
1
+ stablepy==0.6.0
2
+ torch==2.2.0
 
3
  gdown
4
  opencv-python
5
+ unidecode
 
 
 
 
 
 
 
utils.py CHANGED
@@ -1,714 +1,485 @@
1
- import os
2
- import re
3
- import gradio as gr
4
- from constants import (
5
- DIFFUSERS_FORMAT_LORAS,
6
- CIVITAI_API_KEY,
7
- HF_TOKEN,
8
- MODEL_TYPE_CLASS,
9
- DIRECTORY_LORAS,
10
- DIRECTORY_MODELS,
11
- DIFFUSECRAFT_CHECKPOINT_NAME,
12
- CACHE_HF_ROOT,
13
- CACHE_HF,
14
- STORAGE_ROOT,
15
- )
16
- from huggingface_hub import HfApi, get_hf_file_metadata, snapshot_download
17
- from diffusers import DiffusionPipeline
18
- from huggingface_hub import model_info as model_info_data
19
- from diffusers.pipelines.pipeline_loading_utils import variant_compatible_siblings
20
- from stablepy.diffusers_vanilla.utils import checkpoint_model_type
21
- from pathlib import PosixPath
22
- from unidecode import unidecode
23
- import urllib.parse
24
- import copy
25
- import requests
26
- from requests.adapters import HTTPAdapter
27
- from urllib3.util import Retry
28
- import shutil
29
- import subprocess
30
- import json
31
- import html as _html
32
-
33
- IS_ZERO_GPU = bool(os.getenv("SPACES_ZERO_GPU"))
34
- USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:127.0) Gecko/20100101 Firefox/127.0'
35
- MODEL_ARCH = {
36
- 'stable-diffusion-xl-v1-base/lora': "Stable Diffusion XL (Illustrious, Pony, NoobAI)",
37
- 'stable-diffusion-v1/lora': "Stable Diffusion 1.5",
38
- 'flux-1-dev/lora': "Flux",
39
- }
40
-
41
-
42
- def read_safetensors_header_from_url(url: str):
43
- """Read safetensors header from a remote Hugging Face file."""
44
- meta = get_hf_file_metadata(url)
45
-
46
- # Step 1: first 8 bytes → header length
47
- resp = requests.get(meta.location, headers={"Range": "bytes=0-7"})
48
- resp.raise_for_status()
49
- header_len = int.from_bytes(resp.content, "little")
50
-
51
- # Step 2: fetch full header JSON
52
- end = 8 + header_len - 1
53
- resp = requests.get(meta.location, headers={"Range": f"bytes=8-{end}"})
54
- resp.raise_for_status()
55
- header_json = resp.content.decode("utf-8")
56
-
57
- return json.loads(header_json)
58
-
59
-
60
- def read_safetensors_header_from_file(path: str):
61
- """Read safetensors header from a local file."""
62
- with open(path, "rb") as f:
63
- # Step 1: first 8 bytes → header length
64
- header_len = int.from_bytes(f.read(8), "little")
65
-
66
- # Step 2: read header JSON
67
- header_json = f.read(header_len).decode("utf-8")
68
-
69
- return json.loads(header_json)
70
-
71
-
72
- class LoraHeaderInformation:
73
- """
74
- Encapsulates parsed info from a LoRA JSON header and provides
75
- a compact HTML summary via .to_html().
76
- """
77
-
78
- def __init__(self, json_data):
79
- self.original_json = copy.deepcopy(json_data or {})
80
-
81
- # Check if text encoder was trained
82
- # guard for json_data being a mapping
83
- try:
84
- self.text_encoder_trained = any("text_model" in ln for ln in json_data)
85
- except Exception:
86
- self.text_encoder_trained = False
87
-
88
- # Metadata (may be None)
89
- metadata = (json_data or {}).get("__metadata__", None)
90
- self.metadata = metadata
91
-
92
- # Default values
93
- self.architecture = "undefined"
94
- self.prediction_type = "undefined"
95
- self.base_model = "undefined"
96
- self.author = "undefined"
97
- self.title = "undefined"
98
- self.common_tags_list = []
99
-
100
- if metadata:
101
- self.architecture = MODEL_ARCH.get(
102
- metadata.get('modelspec.architecture', None),
103
- "undefined"
104
- )
105
-
106
- self.prediction_type = metadata.get('modelspec.prediction_type', "undefined")
107
- self.base_model = metadata.get('ss_sd_model_name', "undefined")
108
- self.author = metadata.get('modelspec.author', "undefined")
109
- self.title = metadata.get('modelspec.title', "undefined")
110
-
111
- base_model_hash = metadata.get('ss_new_sd_model_hash', None) # SHA256
112
- # AUTOV1 ss_sd_model_hash
113
- # https://civitai.com/api/v1/model-versions/by-hash/{base_model_hash} # Info
114
- if base_model_hash:
115
- self.base_model += f" hash={base_model_hash}"
116
-
117
- # Extract tags
118
- try:
119
- tags = metadata.get('ss_tag_frequency') if "ss_tag_frequency" in metadata else metadata.get('ss_datasets', "")
120
- tags = json.loads(tags) if tags else ""
121
-
122
- if isinstance(tags, list):
123
- tags = tags[0].get("tag_frequency", {})
124
-
125
- if tags:
126
- self.common_tags_list = list(tags[list(tags.keys())[0]].keys())
127
- except Exception:
128
- self.common_tags_list = []
129
-
130
- def to_dict(self):
131
- """Return a plain dict summary of parsed fields."""
132
- return {
133
- "architecture": self.architecture,
134
- "prediction_type": self.prediction_type,
135
- "base_model": self.base_model,
136
- "author": self.author,
137
- "title": self.title,
138
- "text_encoder_trained": bool(self.text_encoder_trained),
139
- "common_tags": self.common_tags_list,
140
- }
141
-
142
- def to_html(self, limit_tags=20):
143
- """
144
- Return a compact HTML snippet (string) showing the parsed info
145
- in a small font. Values are HTML-escaped.
146
- """
147
- # helper to escape
148
- esc = _html.escape
149
-
150
- rows = [
151
- ("Title", esc(str(self.title))),
152
- ("Author", esc(str(self.author))),
153
- ("Architecture", esc(str(self.architecture))),
154
- ("Base model", esc(str(self.base_model))),
155
- ("Prediction type", esc(str(self.prediction_type))),
156
- ("Text encoder trained", esc(str(self.text_encoder_trained))),
157
- ("Reference tags", esc(str(", ".join(self.common_tags_list[:limit_tags])))),
158
- ]
159
-
160
- # small, compact table with inline styling (small font)
161
- html_rows = "".join(
162
- f"<tr><th style='text-align:left;padding:2px 6px;white-space:nowrap'>{k}</th>"
163
- f"<td style='padding:2px 6px'>{v}</td></tr>"
164
- for k, v in rows
165
- )
166
-
167
- html_snippet = (
168
- "<div style='font-family:system-ui, -apple-system, \"Segoe UI\", Roboto, "
169
- "Helvetica, Arial, \"Noto Sans\", sans-serif; font-size:12px; line-height:1.2; "
170
- "'>"
171
- f"<table style='border-collapse:collapse; font-size:12px;'>"
172
- f"{html_rows}"
173
- "</table>"
174
- "</div>"
175
- )
176
-
177
- return html_snippet
178
-
179
-
180
- def request_json_data(url):
181
- model_version_id = url.split('/')[-1]
182
- if "?modelVersionId=" in model_version_id:
183
- match = re.search(r'modelVersionId=(\d+)', url)
184
- model_version_id = match.group(1)
185
-
186
- endpoint_url = f"https://civitai.com/api/v1/model-versions/{model_version_id}"
187
-
188
- params = {}
189
- headers = {'User-Agent': USER_AGENT, 'content-type': 'application/json'}
190
- session = requests.Session()
191
- retries = Retry(total=5, backoff_factor=1, status_forcelist=[500, 502, 503, 504])
192
- session.mount("https://", HTTPAdapter(max_retries=retries))
193
-
194
- try:
195
- result = session.get(endpoint_url, params=params, headers=headers, stream=True, timeout=(3.0, 15))
196
- result.raise_for_status()
197
- json_data = result.json()
198
- return json_data if json_data else None
199
- except Exception as e:
200
- print(f"Error: {e}")
201
- return None
202
-
203
-
204
- class ModelInformation:
205
- def __init__(self, json_data):
206
- self.model_version_id = json_data.get("id", "")
207
- self.model_id = json_data.get("modelId", "")
208
- self.download_url = json_data.get("downloadUrl", "")
209
- self.model_url = f"https://civitai.com/models/{self.model_id}?modelVersionId={self.model_version_id}"
210
- self.filename_url = next(
211
- (v.get("name", "") for v in json_data.get("files", []) if str(self.model_version_id) in v.get("downloadUrl", "") and v.get("type", "Model") == "Model"), ""
212
- )
213
- self.filename_url = self.filename_url if self.filename_url else ""
214
- self.description = json_data.get("description", "")
215
- if self.description is None:
216
- self.description = ""
217
- self.model_name = json_data.get("model", {}).get("name", "")
218
- self.model_type = json_data.get("model", {}).get("type", "")
219
- self.nsfw = json_data.get("model", {}).get("nsfw", False)
220
- self.poi = json_data.get("model", {}).get("poi", False)
221
- self.images = [img.get("url", "") for img in json_data.get("images", [])]
222
- self.example_prompt = json_data.get("trainedWords", [""])[0] if json_data.get("trainedWords") else ""
223
- self.original_json = copy.deepcopy(json_data)
224
-
225
-
226
- def get_civit_params(url):
227
- try:
228
- json_data = request_json_data(url)
229
- mdc = ModelInformation(json_data)
230
- if mdc.download_url and mdc.filename_url:
231
- return mdc.download_url, mdc.filename_url, mdc.model_url
232
- else:
233
- ValueError("Invalid Civitai model URL")
234
- except Exception as e:
235
- print(f"Error retrieving Civitai metadata: {e} — fallback to direct download")
236
- return url, None, None
237
-
238
-
239
- def civ_redirect_down(url, dir_, civitai_api_key, romanize, alternative_name):
240
- filename_base = filename = None
241
-
242
- if alternative_name:
243
- output_path = os.path.join(dir_, alternative_name)
244
- if os.path.exists(output_path):
245
- return output_path, alternative_name
246
-
247
- # Follow the redirect to get the actual download URL
248
- curl_command = (
249
- f'curl -L -sI --connect-timeout 5 --max-time 5 '
250
- f'-H "Content-Type: application/json" '
251
- f'-H "Authorization: Bearer {civitai_api_key}" "{url}"'
252
- )
253
-
254
- headers = os.popen(curl_command).read()
255
-
256
- # Look for the redirected "Location" URL
257
- location_match = re.search(r'location: (.+)', headers, re.IGNORECASE)
258
-
259
- if location_match:
260
- redirect_url = location_match.group(1).strip()
261
-
262
- # Extract the filename from the redirect URL's "Content-Disposition"
263
- filename_match = re.search(r'filename%3D%22(.+?)%22', redirect_url)
264
- if filename_match:
265
- encoded_filename = filename_match.group(1)
266
- # Decode the URL-encoded filename
267
- decoded_filename = urllib.parse.unquote(encoded_filename)
268
-
269
- filename = unidecode(decoded_filename) if romanize else decoded_filename
270
- # print(f"Filename redirect: {filename}")
271
-
272
- filename_base = alternative_name if alternative_name else filename
273
- if not filename_base:
274
- return None, None
275
- elif os.path.exists(os.path.join(dir_, filename_base)):
276
- return os.path.join(dir_, filename_base), filename_base
277
-
278
- aria2_command = (
279
- f'aria2c --console-log-level=error --summary-interval=10 -c -x 16 '
280
- f'-k 1M -s 16 -d "{dir_}" -o "{filename_base}" "{redirect_url}"'
281
- )
282
- r_code = os.system(aria2_command) # noqa
283
-
284
- # if r_code != 0:
285
- # raise RuntimeError(f"Failed to download file: {filename_base}. Error code: {r_code}")
286
-
287
- output_path = os.path.join(dir_, filename_base)
288
- if not os.path.exists(output_path):
289
- return None, filename_base
290
-
291
- return output_path, filename_base
292
-
293
-
294
- def civ_api_down(url, dir_, civitai_api_key, civ_filename):
295
- """
296
- This method is susceptible to being blocked because it generates a lot of temp redirect links with aria2c.
297
- If an API key limit is reached, generating a new API key and using it can fix the issue.
298
- """
299
- output_path = None
300
-
301
- url_dl = url + f"?token={civitai_api_key}"
302
- if not civ_filename:
303
- aria2_command = f'aria2c -c -x 1 -s 1 -d "{dir_}" "{url_dl}"'
304
- os.system(aria2_command)
305
- else:
306
- output_path = os.path.join(dir_, civ_filename)
307
- if not os.path.exists(output_path):
308
- aria2_command = (
309
- f'aria2c --console-log-level=error --summary-interval=10 -c -x 16 '
310
- f'-k 1M -s 16 -d "{dir_}" -o "{civ_filename}" "{url_dl}"'
311
- )
312
- os.system(aria2_command)
313
-
314
- return output_path
315
-
316
-
317
- def drive_down(url, dir_):
318
- import gdown
319
-
320
- output_path = None
321
-
322
- drive_id, _ = gdown.parse_url.parse_url(url, warning=False)
323
- dir_files = os.listdir(dir_)
324
-
325
- for dfile in dir_files:
326
- if drive_id in dfile:
327
- output_path = os.path.join(dir_, dfile)
328
- break
329
-
330
- if not output_path:
331
- original_path = gdown.download(url, f"{dir_}/", fuzzy=True)
332
-
333
- dir_name, base_name = os.path.split(original_path)
334
- name, ext = base_name.rsplit(".", 1)
335
- new_name = f"{name}_{drive_id}.{ext}"
336
- output_path = os.path.join(dir_name, new_name)
337
-
338
- os.rename(original_path, output_path)
339
-
340
- return output_path
341
-
342
-
343
- def hf_down(url, dir_, hf_token, romanize):
344
- url = url.replace("?download=true", "")
345
- # url = urllib.parse.quote(url, safe=':/') # fix encoding
346
-
347
- filename = unidecode(url.split('/')[-1]) if romanize else url.split('/')[-1]
348
- output_path = os.path.join(dir_, filename)
349
-
350
- if os.path.exists(output_path):
351
- return output_path
352
-
353
- if "/blob/" in url:
354
- url = url.replace("/blob/", "/resolve/")
355
-
356
- if hf_token:
357
- user_header = f'"Authorization: Bearer {hf_token}"'
358
- os.system(f"aria2c --console-log-level=error --summary-interval=10 --header={user_header} -c -x 16 -k 1M -s 16 {url} -d {dir_} -o {filename}")
359
- else:
360
- os.system(f"aria2c --optimize-concurrent-downloads --console-log-level=error --summary-interval=10 -c -x 16 -k 1M -s 16 {url} -d {dir_} -o {filename}")
361
-
362
- return output_path
363
-
364
-
365
- def download_things(directory, url, hf_token="", civitai_api_key="", romanize=False):
366
- url = url.strip()
367
- downloaded_file_path = None
368
-
369
- if "drive.google.com" in url:
370
- downloaded_file_path = drive_down(url, directory)
371
- elif "huggingface.co" in url:
372
- downloaded_file_path = hf_down(url, directory, hf_token, romanize)
373
- elif "civitai.com" in url:
374
- if not civitai_api_key:
375
- msg = "You need an API key to download Civitai models."
376
- print(f"\033[91m{msg}\033[0m")
377
- gr.Warning(msg)
378
- return None
379
-
380
- url, civ_filename, civ_page = get_civit_params(url)
381
- if civ_page and not IS_ZERO_GPU:
382
- print(f"\033[92mCivitai model: {civ_filename} [page: {civ_page}]\033[0m")
383
-
384
- downloaded_file_path, civ_filename = civ_redirect_down(url, directory, civitai_api_key, romanize, civ_filename)
385
-
386
- if not downloaded_file_path:
387
- msg = (
388
- "Download failed.\n"
389
- "If this is due to an API limit, generating a new API key may resolve the issue.\n"
390
- "Attempting to download using the old method..."
391
- )
392
- print(msg)
393
- gr.Warning(msg)
394
- downloaded_file_path = civ_api_down(url, directory, civitai_api_key, civ_filename)
395
- else:
396
- os.system(f"aria2c --console-log-level=error --summary-interval=10 -c -x 16 -k 1M -s 16 -d {directory} {url}")
397
-
398
- return downloaded_file_path
399
-
400
-
401
- def get_model_list(directory_path):
402
- model_list = []
403
- valid_extensions = {'.ckpt', '.pt', '.pth', '.safetensors', '.bin'}
404
-
405
- for filename in os.listdir(directory_path):
406
- if os.path.splitext(filename)[1] in valid_extensions:
407
- # name_without_extension = os.path.splitext(filename)[0]
408
- file_path = os.path.join(directory_path, filename)
409
- # model_list.append((name_without_extension, file_path))
410
- model_list.append(file_path)
411
- print('\033[34mFILE: ' + file_path + '\033[0m')
412
- return model_list
413
-
414
-
415
- def extract_parameters(input_string):
416
- parameters = {}
417
- input_string = input_string.replace("\n", "")
418
-
419
- if "Negative prompt:" not in input_string:
420
- if "Steps:" in input_string:
421
- input_string = input_string.replace("Steps:", "Negative prompt: Steps:")
422
- else:
423
- msg = "Generation data is invalid."
424
- gr.Warning(msg)
425
- print(msg)
426
- parameters["prompt"] = input_string
427
- return parameters
428
-
429
- parm = input_string.split("Negative prompt:")
430
- parameters["prompt"] = parm[0].strip()
431
- if "Steps:" not in parm[1]:
432
- parameters["neg_prompt"] = parm[1].strip()
433
- return parameters
434
- parm = parm[1].split("Steps:")
435
- parameters["neg_prompt"] = parm[0].strip()
436
- input_string = "Steps:" + parm[1]
437
-
438
- # Extracting Steps
439
- steps_match = re.search(r'Steps: (\d+)', input_string)
440
- if steps_match:
441
- parameters['Steps'] = int(steps_match.group(1))
442
-
443
- # Extracting Size
444
- size_match = re.search(r'Size: (\d+x\d+)', input_string)
445
- if size_match:
446
- parameters['Size'] = size_match.group(1)
447
- width, height = map(int, parameters['Size'].split('x'))
448
- parameters['width'] = width
449
- parameters['height'] = height
450
-
451
- # Extracting other parameters
452
- other_parameters = re.findall(r'([^,:]+): (.*?)(?=, [^,:]+:|$)', input_string)
453
- for param in other_parameters:
454
- parameters[param[0].strip()] = param[1].strip('"')
455
-
456
- return parameters
457
-
458
-
459
- def get_my_lora(link_url, romanize):
460
- l_name = ""
461
- for url in [url.strip() for url in link_url.split(',')]:
462
- if not os.path.exists(f"./loras/{url.split('/')[-1]}"):
463
- l_name = download_things(DIRECTORY_LORAS, url, HF_TOKEN, CIVITAI_API_KEY, romanize)
464
- new_lora_model_list = get_model_list(DIRECTORY_LORAS)
465
- new_lora_model_list.insert(0, "None")
466
- new_lora_model_list = new_lora_model_list + DIFFUSERS_FORMAT_LORAS
467
- msg_lora = "Downloaded"
468
- if l_name:
469
- msg_lora += f": <b>{l_name}</b>"
470
- print(msg_lora)
471
-
472
- try:
473
- # Works with non-Civitai loras.
474
- json_data = read_safetensors_header_from_file(l_name)
475
- metadata_lora = LoraHeaderInformation(json_data)
476
- msg_lora += "<br>" + metadata_lora.to_html()
477
- except Exception:
478
- pass
479
-
480
- return gr.update(
481
- choices=new_lora_model_list
482
- ), gr.update(
483
- choices=new_lora_model_list
484
- ), gr.update(
485
- choices=new_lora_model_list
486
- ), gr.update(
487
- choices=new_lora_model_list
488
- ), gr.update(
489
- choices=new_lora_model_list
490
- ), gr.update(
491
- choices=new_lora_model_list
492
- ), gr.update(
493
- choices=new_lora_model_list
494
- ), gr.update(
495
- value=msg_lora
496
- )
497
-
498
-
499
- def info_html(json_data, title, subtitle):
500
- return f"""
501
- <div style='padding: 0; border-radius: 10px;'>
502
- <p style='margin: 0; font-weight: bold;'>{title}</p>
503
- <details>
504
- <summary>Details</summary>
505
- <p style='margin: 0; font-weight: bold;'>{subtitle}</p>
506
- </details>
507
- </div>
508
- """
509
-
510
-
511
- def get_model_type(repo_id: str):
512
- api = HfApi(token=os.environ.get("HF_TOKEN")) # if use private or gated model
513
- default = "SD 1.5"
514
- try:
515
- if os.path.exists(repo_id):
516
- tag, _, _, _ = checkpoint_model_type(repo_id)
517
- return DIFFUSECRAFT_CHECKPOINT_NAME[tag]
518
- else:
519
- model = api.model_info(repo_id=repo_id, timeout=5.0)
520
- tags = model.tags
521
- for tag in tags:
522
- if tag in MODEL_TYPE_CLASS.keys():
523
- return MODEL_TYPE_CLASS.get(tag, default)
524
-
525
- except Exception:
526
- return default
527
- return default
528
-
529
-
530
- def restart_space(repo_id: str, factory_reboot: bool):
531
- api = HfApi(token=os.environ.get("HF_TOKEN"))
532
- try:
533
- runtime = api.get_space_runtime(repo_id=repo_id)
534
- if runtime.stage == "RUNNING":
535
- api.restart_space(repo_id=repo_id, factory_reboot=factory_reboot)
536
- print(f"Restarting space: {repo_id}")
537
- else:
538
- print(f"Space {repo_id} is in stage: {runtime.stage}")
539
- except Exception as e:
540
- print(e)
541
-
542
-
543
- def extract_exif_data(image):
544
- if image is None:
545
- return ""
546
-
547
- try:
548
- metadata_keys = ['parameters', 'metadata', 'prompt', 'Comment']
549
-
550
- for key in metadata_keys:
551
- if key in image.info:
552
- return image.info[key]
553
-
554
- return str(image.info)
555
-
556
- except Exception as e:
557
- return f"Error extracting metadata: {str(e)}"
558
-
559
-
560
- def create_mask_now(img, invert):
561
- import numpy as np
562
- import time
563
-
564
- time.sleep(0.5)
565
-
566
- transparent_image = img["layers"][0]
567
-
568
- # Extract the alpha channel
569
- alpha_channel = np.array(transparent_image)[:, :, 3]
570
-
571
- # Create a binary mask by thresholding the alpha channel
572
- binary_mask = alpha_channel > 1
573
-
574
- if invert:
575
- print("Invert")
576
- # Invert the binary mask so that the drawn shape is white and the rest is black
577
- binary_mask = np.invert(binary_mask)
578
-
579
- # Convert the binary mask to a 3-channel RGB mask
580
- rgb_mask = np.stack((binary_mask,) * 3, axis=-1)
581
-
582
- # Convert the mask to uint8
583
- rgb_mask = rgb_mask.astype(np.uint8) * 255
584
-
585
- return img["background"], rgb_mask
586
-
587
-
588
- def download_diffuser_repo(repo_name: str, model_type: str, revision: str = "main", token=True):
589
-
590
- variant = None
591
- if token is True and not os.environ.get("HF_TOKEN"):
592
- token = None
593
-
594
- if model_type == "SDXL":
595
- info = model_info_data(
596
- repo_name,
597
- token=token,
598
- revision=revision,
599
- timeout=5.0,
600
- )
601
-
602
- filenames = {sibling.rfilename for sibling in info.siblings}
603
- model_filenames, variant_filenames = variant_compatible_siblings(
604
- filenames, variant="fp16"
605
- )
606
-
607
- if len(variant_filenames):
608
- variant = "fp16"
609
-
610
- if model_type == "FLUX":
611
- cached_folder = snapshot_download(
612
- repo_id=repo_name,
613
- allow_patterns="transformer/*"
614
- )
615
- else:
616
- cached_folder = DiffusionPipeline.download(
617
- pretrained_model_name=repo_name,
618
- force_download=False,
619
- token=token,
620
- revision=revision,
621
- # mirror="https://hf-mirror.com",
622
- variant=variant,
623
- use_safetensors=True,
624
- trust_remote_code=False,
625
- timeout=5.0,
626
- )
627
-
628
- if isinstance(cached_folder, PosixPath):
629
- cached_folder = cached_folder.as_posix()
630
-
631
- # Task model
632
- # from huggingface_hub import hf_hub_download
633
- # hf_hub_download(
634
- # task_model,
635
- # filename="diffusion_pytorch_model.safetensors", # fix fp16 variant
636
- # )
637
-
638
- return cached_folder
639
-
640
-
641
- def get_folder_size_gb(folder_path):
642
- result = subprocess.run(["du", "-s", folder_path], capture_output=True, text=True)
643
-
644
- total_size_kb = int(result.stdout.split()[0])
645
- total_size_gb = total_size_kb / (1024 ** 2)
646
-
647
- return total_size_gb
648
-
649
-
650
- def get_used_storage_gb(path_storage=STORAGE_ROOT):
651
- try:
652
- used_gb = get_folder_size_gb(path_storage)
653
- print(f"Used Storage: {used_gb:.2f} GB")
654
- except Exception as e:
655
- used_gb = 999
656
- print(f"Error while retrieving the used storage: {e}.")
657
-
658
- return used_gb
659
-
660
-
661
- def delete_model(removal_candidate):
662
- print(f"Removing: {removal_candidate}")
663
-
664
- if os.path.exists(removal_candidate):
665
- os.remove(removal_candidate)
666
- else:
667
- diffusers_model = f"{CACHE_HF}{DIRECTORY_MODELS}--{removal_candidate.replace('/', '--')}"
668
- if os.path.isdir(diffusers_model):
669
- shutil.rmtree(diffusers_model)
670
-
671
-
672
- def clear_hf_cache():
673
- """
674
- Clears the entire Hugging Face cache at ~/.cache/huggingface.
675
- Hugging Face will re-download models as needed later.
676
- """
677
- try:
678
- if os.path.exists(CACHE_HF):
679
- shutil.rmtree(CACHE_HF, ignore_errors=True)
680
- print(f"Hugging Face cache cleared: {CACHE_HF}")
681
- else:
682
- print(f"No Hugging Face cache found at: {CACHE_HF}")
683
- except Exception as e:
684
- print(f"Error clearing Hugging Face cache: {e}")
685
-
686
-
687
- def progress_step_bar(step, total):
688
- # Calculate the percentage for the progress bar width
689
- percentage = min(100, ((step / total) * 100))
690
-
691
- return f"""
692
- <div style="position: relative; width: 100%; background-color: gray; border-radius: 5px; overflow: hidden;">
693
- <div style="width: {percentage}%; height: 17px; background-color: #800080; transition: width 0.5s;"></div>
694
- <div style="position: absolute; width: 100%; text-align: center; color: white; top: 0; line-height: 19px; font-size: 13px;">
695
- {int(percentage)}%
696
- </div>
697
- </div>
698
- """
699
-
700
-
701
- def html_template_message(msg):
702
- return f"""
703
- <div style="position: relative; width: 100%; background-color: gray; border-radius: 5px; overflow: hidden;">
704
- <div style="width: 0%; height: 17px; background-color: #800080; transition: width 0.5s;"></div>
705
- <div style="position: absolute; width: 100%; text-align: center; color: white; top: 0; line-height: 19px; font-size: 14px; font-weight: bold; text-shadow: 1px 1px 2px black;">
706
- {msg}
707
- </div>
708
- </div>
709
- """
710
-
711
-
712
- def escape_html(text):
713
- """Escapes HTML special characters in the input text."""
714
- return text.replace("<", "&lt;").replace(">", "&gt;").replace("\n", "<br>")
 
1
+ import os
2
+ import re
3
+ import gradio as gr
4
+ from constants import (
5
+ DIFFUSERS_FORMAT_LORAS,
6
+ CIVITAI_API_KEY,
7
+ HF_TOKEN,
8
+ MODEL_TYPE_CLASS,
9
+ DIRECTORY_LORAS,
10
+ DIRECTORY_MODELS,
11
+ DIFFUSECRAFT_CHECKPOINT_NAME,
12
+ CACHE_HF,
13
+ STORAGE_ROOT,
14
+ )
15
+ from huggingface_hub import HfApi
16
+ from huggingface_hub import snapshot_download
17
+ from diffusers import DiffusionPipeline
18
+ from huggingface_hub import model_info as model_info_data
19
+ from diffusers.pipelines.pipeline_loading_utils import variant_compatible_siblings
20
+ from stablepy.diffusers_vanilla.utils import checkpoint_model_type
21
+ from pathlib import PosixPath
22
+ from unidecode import unidecode
23
+ import urllib.parse
24
+ import copy
25
+ import requests
26
+ from requests.adapters import HTTPAdapter
27
+ from urllib3.util import Retry
28
+ import shutil
29
+ import subprocess
30
+
31
+ USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:127.0) Gecko/20100101 Firefox/127.0'
32
+
33
+
34
+ def request_json_data(url):
35
+ model_version_id = url.split('/')[-1]
36
+ if "?modelVersionId=" in model_version_id:
37
+ match = re.search(r'modelVersionId=(\d+)', url)
38
+ model_version_id = match.group(1)
39
+
40
+ endpoint_url = f"https://civitai.com/api/v1/model-versions/{model_version_id}"
41
+
42
+ params = {}
43
+ headers = {'User-Agent': USER_AGENT, 'content-type': 'application/json'}
44
+ session = requests.Session()
45
+ retries = Retry(total=5, backoff_factor=1, status_forcelist=[500, 502, 503, 504])
46
+ session.mount("https://", HTTPAdapter(max_retries=retries))
47
+
48
+ try:
49
+ result = session.get(endpoint_url, params=params, headers=headers, stream=True, timeout=(3.0, 15))
50
+ result.raise_for_status()
51
+ json_data = result.json()
52
+ return json_data if json_data else None
53
+ except Exception as e:
54
+ print(f"Error: {e}")
55
+ return None
56
+
57
+
58
+ class ModelInformation:
59
+ def __init__(self, json_data):
60
+ self.model_version_id = json_data.get("id", "")
61
+ self.model_id = json_data.get("modelId", "")
62
+ self.download_url = json_data.get("downloadUrl", "")
63
+ self.model_url = f"https://civitai.com/models/{self.model_id}?modelVersionId={self.model_version_id}"
64
+ self.filename_url = next(
65
+ (v.get("name", "") for v in json_data.get("files", []) if str(self.model_version_id) in v.get("downloadUrl", "") and v.get("type", "Model") == "Model"), ""
66
+ )
67
+ self.filename_url = self.filename_url if self.filename_url else ""
68
+ self.description = json_data.get("description", "")
69
+ if self.description is None: self.description = ""
70
+ self.model_name = json_data.get("model", {}).get("name", "")
71
+ self.model_type = json_data.get("model", {}).get("type", "")
72
+ self.nsfw = json_data.get("model", {}).get("nsfw", False)
73
+ self.poi = json_data.get("model", {}).get("poi", False)
74
+ self.images = [img.get("url", "") for img in json_data.get("images", [])]
75
+ self.example_prompt = json_data.get("trainedWords", [""])[0] if json_data.get("trainedWords") else ""
76
+ self.original_json = copy.deepcopy(json_data)
77
+
78
+
79
+ def retrieve_model_info(url):
80
+ json_data = request_json_data(url)
81
+ if not json_data:
82
+ return None
83
+ model_descriptor = ModelInformation(json_data)
84
+ return model_descriptor
85
+
86
+
87
+ def download_things(directory, url, hf_token="", civitai_api_key="", romanize=False):
88
+ url = url.strip()
89
+ downloaded_file_path = None
90
+
91
+ if "drive.google.com" in url:
92
+ original_dir = os.getcwd()
93
+ os.chdir(directory)
94
+ os.system(f"gdown --fuzzy {url}")
95
+ os.chdir(original_dir)
96
+ elif "huggingface.co" in url:
97
+ url = url.replace("?download=true", "")
98
+ # url = urllib.parse.quote(url, safe=':/') # fix encoding
99
+ if "/blob/" in url:
100
+ url = url.replace("/blob/", "/resolve/")
101
+ user_header = f'"Authorization: Bearer {hf_token}"'
102
+
103
+ filename = unidecode(url.split('/')[-1]) if romanize else url.split('/')[-1]
104
+
105
+ if hf_token:
106
+ os.system(f"aria2c --console-log-level=error --summary-interval=10 --header={user_header} -c -x 16 -k 1M -s 16 {url} -d {directory} -o {filename}")
107
+ else:
108
+ os.system(f"aria2c --optimize-concurrent-downloads --console-log-level=error --summary-interval=10 -c -x 16 -k 1M -s 16 {url} -d {directory} -o {filename}")
109
+
110
+ downloaded_file_path = os.path.join(directory, filename)
111
+
112
+ elif "civitai.com" in url:
113
+
114
+ if not civitai_api_key:
115
+ print("\033[91mYou need an API key to download Civitai models.\033[0m")
116
+
117
+ model_profile = retrieve_model_info(url)
118
+ if (
119
+ model_profile is not None
120
+ and model_profile.download_url
121
+ and model_profile.filename_url
122
+ ):
123
+ url = model_profile.download_url
124
+ filename = unidecode(model_profile.filename_url) if romanize else model_profile.filename_url
125
+ else:
126
+ if "?" in url:
127
+ url = url.split("?")[0]
128
+ filename = ""
129
+
130
+ url_dl = url + f"?token={civitai_api_key}"
131
+ print(f"Filename: {filename}")
132
+
133
+ param_filename = ""
134
+ if filename:
135
+ param_filename = f"-o '{filename}'"
136
+
137
+ aria2_command = (
138
+ f'aria2c --console-log-level=error --summary-interval=10 -c -x 16 '
139
+ f'-k 1M -s 16 -d "{directory}" {param_filename} "{url_dl}"'
140
+ )
141
+ os.system(aria2_command)
142
+
143
+ if param_filename and os.path.exists(os.path.join(directory, filename)):
144
+ downloaded_file_path = os.path.join(directory, filename)
145
+
146
+ # # PLAN B
147
+ # # Follow the redirect to get the actual download URL
148
+ # curl_command = (
149
+ # f'curl -L -sI --connect-timeout 5 --max-time 5 '
150
+ # f'-H "Content-Type: application/json" '
151
+ # f'-H "Authorization: Bearer {civitai_api_key}" "{url}"'
152
+ # )
153
+
154
+ # headers = os.popen(curl_command).read()
155
+
156
+ # # Look for the redirected "Location" URL
157
+ # location_match = re.search(r'location: (.+)', headers, re.IGNORECASE)
158
+
159
+ # if location_match:
160
+ # redirect_url = location_match.group(1).strip()
161
+
162
+ # # Extract the filename from the redirect URL's "Content-Disposition"
163
+ # filename_match = re.search(r'filename%3D%22(.+?)%22', redirect_url)
164
+ # if filename_match:
165
+ # encoded_filename = filename_match.group(1)
166
+ # # Decode the URL-encoded filename
167
+ # decoded_filename = urllib.parse.unquote(encoded_filename)
168
+
169
+ # filename = unidecode(decoded_filename) if romanize else decoded_filename
170
+ # print(f"Filename: {filename}")
171
+
172
+ # aria2_command = (
173
+ # f'aria2c --console-log-level=error --summary-interval=10 -c -x 16 '
174
+ # f'-k 1M -s 16 -d "{directory}" -o "{filename}" "{redirect_url}"'
175
+ # )
176
+ # return_code = os.system(aria2_command)
177
+
178
+ # # if return_code != 0:
179
+ # # raise RuntimeError(f"Failed to download file: {filename}. Error code: {return_code}")
180
+ # downloaded_file_path = os.path.join(directory, filename)
181
+ # if not os.path.exists(downloaded_file_path):
182
+ # downloaded_file_path = None
183
+
184
+ # if not downloaded_file_path:
185
+ # # Old method
186
+ # if "?" in url:
187
+ # url = url.split("?")[0]
188
+ # url = url + f"?token={civitai_api_key}"
189
+ # os.system(f"aria2c --console-log-level=error --summary-interval=10 -c -x 16 -k 1M -s 16 -d {directory} {url}")
190
+
191
+ else:
192
+ os.system(f"aria2c --console-log-level=error --summary-interval=10 -c -x 16 -k 1M -s 16 -d {directory} {url}")
193
+
194
+ return downloaded_file_path
195
+
196
+
197
+ def get_model_list(directory_path):
198
+ model_list = []
199
+ valid_extensions = {'.ckpt', '.pt', '.pth', '.safetensors', '.bin'}
200
+
201
+ for filename in os.listdir(directory_path):
202
+ if os.path.splitext(filename)[1] in valid_extensions:
203
+ # name_without_extension = os.path.splitext(filename)[0]
204
+ file_path = os.path.join(directory_path, filename)
205
+ # model_list.append((name_without_extension, file_path))
206
+ model_list.append(file_path)
207
+ print('\033[34mFILE: ' + file_path + '\033[0m')
208
+ return model_list
209
+
210
+
211
+ def extract_parameters(input_string):
212
+ parameters = {}
213
+ input_string = input_string.replace("\n", "")
214
+
215
+ if "Negative prompt:" not in input_string:
216
+ if "Steps:" in input_string:
217
+ input_string = input_string.replace("Steps:", "Negative prompt: Steps:")
218
+ else:
219
+ print("Invalid metadata")
220
+ parameters["prompt"] = input_string
221
+ return parameters
222
+
223
+ parm = input_string.split("Negative prompt:")
224
+ parameters["prompt"] = parm[0].strip()
225
+ if "Steps:" not in parm[1]:
226
+ print("Steps not detected")
227
+ parameters["neg_prompt"] = parm[1].strip()
228
+ return parameters
229
+ parm = parm[1].split("Steps:")
230
+ parameters["neg_prompt"] = parm[0].strip()
231
+ input_string = "Steps:" + parm[1]
232
+
233
+ # Extracting Steps
234
+ steps_match = re.search(r'Steps: (\d+)', input_string)
235
+ if steps_match:
236
+ parameters['Steps'] = int(steps_match.group(1))
237
+
238
+ # Extracting Size
239
+ size_match = re.search(r'Size: (\d+x\d+)', input_string)
240
+ if size_match:
241
+ parameters['Size'] = size_match.group(1)
242
+ width, height = map(int, parameters['Size'].split('x'))
243
+ parameters['width'] = width
244
+ parameters['height'] = height
245
+
246
+ # Extracting other parameters
247
+ other_parameters = re.findall(r'([^,:]+): (.*?)(?=, [^,:]+:|$)', input_string)
248
+ for param in other_parameters:
249
+ parameters[param[0].strip()] = param[1].strip('"')
250
+
251
+ return parameters
252
+
253
+
254
+ def get_my_lora(link_url, romanize):
255
+ l_name = ""
256
+ for url in [url.strip() for url in link_url.split(',')]:
257
+ if not os.path.exists(f"./loras/{url.split('/')[-1]}"):
258
+ l_name = download_things(DIRECTORY_LORAS, url, HF_TOKEN, CIVITAI_API_KEY, romanize)
259
+ new_lora_model_list = get_model_list(DIRECTORY_LORAS)
260
+ new_lora_model_list.insert(0, "None")
261
+ new_lora_model_list = new_lora_model_list + DIFFUSERS_FORMAT_LORAS
262
+ msg_lora = "Downloaded"
263
+ if l_name:
264
+ msg_lora += f": <b>{l_name}</b>"
265
+ print(msg_lora)
266
+
267
+ return gr.update(
268
+ choices=new_lora_model_list
269
+ ), gr.update(
270
+ choices=new_lora_model_list
271
+ ), gr.update(
272
+ choices=new_lora_model_list
273
+ ), gr.update(
274
+ choices=new_lora_model_list
275
+ ), gr.update(
276
+ choices=new_lora_model_list
277
+ ), gr.update(
278
+ choices=new_lora_model_list
279
+ ), gr.update(
280
+ choices=new_lora_model_list
281
+ ), gr.update(
282
+ value=msg_lora
283
+ )
284
+
285
+
286
+ def info_html(json_data, title, subtitle):
287
+ return f"""
288
+ <div style='padding: 0; border-radius: 10px;'>
289
+ <p style='margin: 0; font-weight: bold;'>{title}</p>
290
+ <details>
291
+ <summary>Details</summary>
292
+ <p style='margin: 0; font-weight: bold;'>{subtitle}</p>
293
+ </details>
294
+ </div>
295
+ """
296
+
297
+
298
+ def get_model_type(repo_id: str):
299
+ api = HfApi(token=os.environ.get("HF_TOKEN")) # if use private or gated model
300
+ default = "SD 1.5"
301
+ try:
302
+ if os.path.exists(repo_id):
303
+ tag, _, _, _ = checkpoint_model_type(repo_id)
304
+ return DIFFUSECRAFT_CHECKPOINT_NAME[tag]
305
+ else:
306
+ model = api.model_info(repo_id=repo_id, timeout=5.0)
307
+ tags = model.tags
308
+ for tag in tags:
309
+ if tag in MODEL_TYPE_CLASS.keys(): return MODEL_TYPE_CLASS.get(tag, default)
310
+
311
+ except Exception:
312
+ return default
313
+ return default
314
+
315
+
316
+ def restart_space(repo_id: str, factory_reboot: bool):
317
+ api = HfApi(token=os.environ.get("HF_TOKEN"))
318
+ try:
319
+ runtime = api.get_space_runtime(repo_id=repo_id)
320
+ if runtime.stage == "RUNNING":
321
+ api.restart_space(repo_id=repo_id, factory_reboot=factory_reboot)
322
+ print(f"Restarting space: {repo_id}")
323
+ else:
324
+ print(f"Space {repo_id} is in stage: {runtime.stage}")
325
+ except Exception as e:
326
+ print(e)
327
+
328
+
329
+ def extract_exif_data(image):
330
+ if image is None:
331
+ return ""
332
+
333
+ try:
334
+ metadata_keys = ['parameters', 'metadata', 'prompt', 'Comment']
335
+
336
+ for key in metadata_keys:
337
+ if key in image.info:
338
+ return image.info[key]
339
+
340
+ return str(image.info)
341
+
342
+ except Exception as e:
343
+ return f"Error extracting metadata: {str(e)}"
344
+
345
+
346
+ def create_mask_now(img, invert):
347
+ import numpy as np
348
+ import time
349
+
350
+ time.sleep(0.5)
351
+
352
+ transparent_image = img["layers"][0]
353
+
354
+ # Extract the alpha channel
355
+ alpha_channel = np.array(transparent_image)[:, :, 3]
356
+
357
+ # Create a binary mask by thresholding the alpha channel
358
+ binary_mask = alpha_channel > 1
359
+
360
+ if invert:
361
+ print("Invert")
362
+ # Invert the binary mask so that the drawn shape is white and the rest is black
363
+ binary_mask = np.invert(binary_mask)
364
+
365
+ # Convert the binary mask to a 3-channel RGB mask
366
+ rgb_mask = np.stack((binary_mask,) * 3, axis=-1)
367
+
368
+ # Convert the mask to uint8
369
+ rgb_mask = rgb_mask.astype(np.uint8) * 255
370
+
371
+ return img["background"], rgb_mask
372
+
373
+
374
+ def download_diffuser_repo(repo_name: str, model_type: str, revision: str = "main", token=True):
375
+
376
+ variant = None
377
+ if token is True and not os.environ.get("HF_TOKEN"):
378
+ token = None
379
+
380
+ if model_type == "SDXL":
381
+ info = model_info_data(
382
+ repo_name,
383
+ token=token,
384
+ revision=revision,
385
+ timeout=5.0,
386
+ )
387
+
388
+ filenames = {sibling.rfilename for sibling in info.siblings}
389
+ model_filenames, variant_filenames = variant_compatible_siblings(
390
+ filenames, variant="fp16"
391
+ )
392
+
393
+ if len(variant_filenames):
394
+ variant = "fp16"
395
+
396
+ if model_type == "FLUX":
397
+ cached_folder = snapshot_download(
398
+ repo_id=repo_name,
399
+ allow_patterns="transformer/*"
400
+ )
401
+ else:
402
+ cached_folder = DiffusionPipeline.download(
403
+ pretrained_model_name=repo_name,
404
+ force_download=False,
405
+ token=token,
406
+ revision=revision,
407
+ # mirror="https://hf-mirror.com",
408
+ variant=variant,
409
+ use_safetensors=True,
410
+ trust_remote_code=False,
411
+ timeout=5.0,
412
+ )
413
+
414
+ if isinstance(cached_folder, PosixPath):
415
+ cached_folder = cached_folder.as_posix()
416
+
417
+ # Task model
418
+ # from huggingface_hub import hf_hub_download
419
+ # hf_hub_download(
420
+ # task_model,
421
+ # filename="diffusion_pytorch_model.safetensors", # fix fp16 variant
422
+ # )
423
+
424
+ return cached_folder
425
+
426
+
427
+ def get_folder_size_gb(folder_path):
428
+ result = subprocess.run(["du", "-s", folder_path], capture_output=True, text=True)
429
+
430
+ total_size_kb = int(result.stdout.split()[0])
431
+ total_size_gb = total_size_kb / (1024 ** 2)
432
+
433
+ return total_size_gb
434
+
435
+
436
+ def get_used_storage_gb():
437
+ try:
438
+ used_gb = get_folder_size_gb(STORAGE_ROOT)
439
+ print(f"Used Storage: {used_gb:.2f} GB")
440
+ except Exception as e:
441
+ used_gb = 999
442
+ print(f"Error while retrieving the used storage: {e}.")
443
+
444
+ return used_gb
445
+
446
+
447
+ def delete_model(removal_candidate):
448
+ print(f"Removing: {removal_candidate}")
449
+
450
+ if os.path.exists(removal_candidate):
451
+ os.remove(removal_candidate)
452
+ else:
453
+ diffusers_model = f"{CACHE_HF}{DIRECTORY_MODELS}--{removal_candidate.replace('/', '--')}"
454
+ if os.path.isdir(diffusers_model):
455
+ shutil.rmtree(diffusers_model)
456
+
457
+
458
+ def progress_step_bar(step, total):
459
+ # Calculate the percentage for the progress bar width
460
+ percentage = min(100, ((step / total) * 100))
461
+
462
+ return f"""
463
+ <div style="position: relative; width: 100%; background-color: gray; border-radius: 5px; overflow: hidden;">
464
+ <div style="width: {percentage}%; height: 17px; background-color: #800080; transition: width 0.5s;"></div>
465
+ <div style="position: absolute; width: 100%; text-align: center; color: white; top: 0; line-height: 19px; font-size: 13px;">
466
+ {int(percentage)}%
467
+ </div>
468
+ </div>
469
+ """
470
+
471
+
472
+ def html_template_message(msg):
473
+ return f"""
474
+ <div style="position: relative; width: 100%; background-color: gray; border-radius: 5px; overflow: hidden;">
475
+ <div style="width: 0%; height: 17px; background-color: #800080; transition: width 0.5s;"></div>
476
+ <div style="position: absolute; width: 100%; text-align: center; color: white; top: 0; line-height: 19px; font-size: 14px; font-weight: bold; text-shadow: 1px 1px 2px black;">
477
+ {msg}
478
+ </div>
479
+ </div>
480
+ """
481
+
482
+
483
+ def escape_html(text):
484
+ """Escapes HTML special characters in the input text."""
485
+ return text.replace("<", "&lt;").replace(">", "&gt;").replace("\n", "<br>")