ggerganov commited on
Commit
92e3777
·
unverified ·
1 Parent(s): 41e000d

models : change HF hosting from dataset to model

Browse files
README.md CHANGED
@@ -466,7 +466,7 @@ The original models are converted to a custom binary format. This allows to pack
466
  You can download the converted models using the [models/download-ggml-model.sh](models/download-ggml-model.sh) script
467
  or manually from here:
468
 
469
- - https://huggingface.co/datasets/ggerganov/whisper.cpp
470
  - https://ggml.ggerganov.com
471
 
472
  For more details, see the conversion script [models/convert-pt-to-ggml.py](models/convert-pt-to-ggml.py) or the README
 
466
  You can download the converted models using the [models/download-ggml-model.sh](models/download-ggml-model.sh) script
467
  or manually from here:
468
 
469
+ - https://huggingface.co/ggerganov/whisper.cpp
470
  - https://ggml.ggerganov.com
471
 
472
  For more details, see the conversion script [models/convert-pt-to-ggml.py](models/convert-pt-to-ggml.py) or the README
bindings/go/examples/go-model-download/main.go CHANGED
@@ -17,9 +17,9 @@ import (
17
  // CONSTANTS
18
 
19
  const (
20
- srcUrl = "https://huggingface.co/datasets/ggerganov/whisper.cpp/resolve/main" // The location of the models
21
- srcExt = ".bin" // Filename extension
22
- bufSize = 1024 * 64 // Size of the buffer used for downloading the model
23
  )
24
 
25
  var (
 
17
  // CONSTANTS
18
 
19
  const (
20
+ srcUrl = "https://huggingface.co/ggerganov/whisper.cpp/resolve/main" // The location of the models
21
+ srcExt = ".bin" // Filename extension
22
+ bufSize = 1024 * 64 // Size of the buffer used for downloading the model
23
  )
24
 
25
  var (
examples/talk/README.md CHANGED
@@ -31,7 +31,7 @@ To run this, you will need a ggml GPT-2 model: [instructions](https://github.com
31
  Alternatively, you can simply download the smallest ggml GPT-2 117M model (240 MB) like this:
32
 
33
  ```
34
- wget --quiet --show-progress -O models/ggml-gpt-2-117M.bin https://huggingface.co/datasets/ggerganov/ggml/raw/main/ggml-model-gpt-2-117M.bin
35
  ```
36
 
37
  ## TTS
 
31
  Alternatively, you can simply download the smallest ggml GPT-2 117M model (240 MB) like this:
32
 
33
  ```
34
+ wget --quiet --show-progress -O models/ggml-gpt-2-117M.bin https://huggingface.co/ggerganov/ggml/raw/main/ggml-model-gpt-2-117M.bin
35
  ```
36
 
37
  ## TTS
models/README.md CHANGED
@@ -6,7 +6,7 @@ using the [convert-pt-to-ggml.py](convert-pt-to-ggml.py) script. You can either
6
  the `ggml` files yourself using the conversion script, or you can use the [download-ggml-model.sh](download-ggml-model.sh)
7
  script to download the already converted models. Currently, they are hosted on the following locations:
8
 
9
- - https://huggingface.co/datasets/ggerganov/whisper.cpp
10
  - https://ggml.ggerganov.com
11
 
12
  Sample usage:
@@ -23,7 +23,7 @@ You can now use it like this:
23
 
24
  A third option to obtain the model files is to download them from Hugging Face:
25
 
26
- https://huggingface.co/datasets/ggerganov/whisper.cpp/tree/main
27
 
28
  ## Available models
29
 
 
6
  the `ggml` files yourself using the conversion script, or you can use the [download-ggml-model.sh](download-ggml-model.sh)
7
  script to download the already converted models. Currently, they are hosted on the following locations:
8
 
9
+ - https://huggingface.co/ggerganov/whisper.cpp
10
  - https://ggml.ggerganov.com
11
 
12
  Sample usage:
 
23
 
24
  A third option to obtain the model files is to download them from Hugging Face:
25
 
26
+ https://huggingface.co/ggerganov/whisper.cpp/tree/main
27
 
28
  ## Available models
29
 
models/download-ggml-model.cmd CHANGED
@@ -40,7 +40,7 @@ if exist "ggml-%model%.bin" (
40
  goto :eof
41
  )
42
 
43
- PowerShell -NoProfile -ExecutionPolicy Bypass -Command "Invoke-WebRequest -Uri https://huggingface.co/datasets/ggerganov/whisper.cpp/resolve/main/ggml-%model%.bin -OutFile ggml-%model%.bin"
44
 
45
  if %ERRORLEVEL% neq 0 (
46
  echo Failed to download ggml model %model%
 
40
  goto :eof
41
  )
42
 
43
+ PowerShell -NoProfile -ExecutionPolicy Bypass -Command "Invoke-WebRequest -Uri https://huggingface.co/ggerganov/whisper.cpp/resolve/main/ggml-%model%.bin -OutFile ggml-%model%.bin"
44
 
45
  if %ERRORLEVEL% neq 0 (
46
  echo Failed to download ggml model %model%
models/download-ggml-model.sh CHANGED
@@ -6,7 +6,7 @@
6
  #src="https://ggml.ggerganov.com"
7
  #pfx="ggml-model-whisper"
8
 
9
- src="https://huggingface.co/datasets/ggerganov/whisper.cpp"
10
  pfx="resolve/main/ggml"
11
 
12
  # get the path of this script
 
6
  #src="https://ggml.ggerganov.com"
7
  #pfx="ggml-model-whisper"
8
 
9
+ src="https://huggingface.co/ggerganov/whisper.cpp"
10
  pfx="resolve/main/ggml"
11
 
12
  # get the path of this script
whisper.cpp CHANGED
@@ -631,7 +631,6 @@ struct whisper_context {
631
  int64_t t_load_us = 0;
632
  int64_t t_start_us = 0;
633
 
634
-
635
  ggml_type wtype = ggml_type::GGML_TYPE_F16; // weight type (FP32 or FP16)
636
 
637
  whisper_model model;
 
631
  int64_t t_load_us = 0;
632
  int64_t t_start_us = 0;
633
 
 
634
  ggml_type wtype = ggml_type::GGML_TYPE_F16; // weight type (FP32 or FP16)
635
 
636
  whisper_model model;