Spaces:
Runtime error
Runtime error
add persistant storage
Browse files
app.py
CHANGED
|
@@ -15,8 +15,17 @@ import numpy as np
|
|
| 15 |
os.environ["GRADIO_ANALYTICS_ENABLED"] = "False"
|
| 16 |
CONVERSION_SCRIPT = "./llama.cpp/convert_hf_to_gguf.py"
|
| 17 |
|
|
|
|
| 18 |
log_dir = "/data/logs"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 19 |
os.makedirs(log_dir, exist_ok=True)
|
|
|
|
|
|
|
|
|
|
| 20 |
|
| 21 |
logging.basicConfig(
|
| 22 |
filename=os.path.join(log_dir, "app.log"),
|
|
@@ -50,7 +59,8 @@ Original model: https://huggingface.co/{model_id}
|
|
| 50 |
Run them directly with [llama.cpp](https://github.com/ggml-org/llama.cpp), or any other llama.cpp based project
|
| 51 |
## Prompt format
|
| 52 |
```
|
| 53 |
-
|
|
|
|
| 54 |
```
|
| 55 |
## Download a file (not the whole branch) from below:
|
| 56 |
| Filename | Quant type | File Size | Split |
|
|
@@ -278,13 +288,14 @@ def process_model(
|
|
| 278 |
)
|
| 279 |
dl_pattern += [pattern]
|
| 280 |
|
| 281 |
-
|
| 282 |
-
os.
|
|
|
|
| 283 |
|
| 284 |
-
with tempfile.TemporaryDirectory(dir=
|
| 285 |
fp16 = str(Path(outdir) / f"{model_name}.fp16.gguf")
|
| 286 |
|
| 287 |
-
with tempfile.TemporaryDirectory(dir=
|
| 288 |
local_dir = Path(tmpdir) / model_name
|
| 289 |
api.snapshot_download(
|
| 290 |
repo_id=model_id,
|
|
|
|
| 15 |
os.environ["GRADIO_ANALYTICS_ENABLED"] = "False"
|
| 16 |
CONVERSION_SCRIPT = "./llama.cpp/convert_hf_to_gguf.py"
|
| 17 |
|
| 18 |
+
# Set up persistent storage paths
|
| 19 |
log_dir = "/data/logs"
|
| 20 |
+
downloads_dir = "/data/downloads"
|
| 21 |
+
outputs_dir = "/data/outputs"
|
| 22 |
+
models_dir = "/data/models"
|
| 23 |
+
|
| 24 |
+
# Create directories if they don't exist
|
| 25 |
os.makedirs(log_dir, exist_ok=True)
|
| 26 |
+
os.makedirs(downloads_dir, exist_ok=True)
|
| 27 |
+
os.makedirs(outputs_dir, exist_ok=True)
|
| 28 |
+
os.makedirs(models_dir, exist_ok=True)
|
| 29 |
|
| 30 |
logging.basicConfig(
|
| 31 |
filename=os.path.join(log_dir, "app.log"),
|
|
|
|
| 59 |
Run them directly with [llama.cpp](https://github.com/ggml-org/llama.cpp), or any other llama.cpp based project
|
| 60 |
## Prompt format
|
| 61 |
```
|
| 62 |
+
{{system_prompt}}
|
| 63 |
+
{{prompt}}
|
| 64 |
```
|
| 65 |
## Download a file (not the whole branch) from below:
|
| 66 |
| Filename | Quant type | File Size | Split |
|
|
|
|
| 288 |
)
|
| 289 |
dl_pattern += [pattern]
|
| 290 |
|
| 291 |
+
# Use persistent directories
|
| 292 |
+
model_download_dir = os.path.join(downloads_dir, model_name)
|
| 293 |
+
os.makedirs(model_download_dir, exist_ok=True)
|
| 294 |
|
| 295 |
+
with tempfile.TemporaryDirectory(dir=outputs_dir) as outdir:
|
| 296 |
fp16 = str(Path(outdir) / f"{model_name}.fp16.gguf")
|
| 297 |
|
| 298 |
+
with tempfile.TemporaryDirectory(dir=model_download_dir) as tmpdir:
|
| 299 |
local_dir = Path(tmpdir) / model_name
|
| 300 |
api.snapshot_download(
|
| 301 |
repo_id=model_id,
|