Skip to content

Commit

Permalink
#60 updated requirements.txt, test cases, audio services, added API v…
Browse files Browse the repository at this point in the history
…ersioning
  • Loading branch information
sethu committed Dec 28, 2024
1 parent 4808cba commit 448645d
Show file tree
Hide file tree
Showing 16 changed files with 111 additions and 449 deletions.
112 changes: 0 additions & 112 deletions app.py

This file was deleted.

9 changes: 0 additions & 9 deletions config.py

This file was deleted.

25 changes: 0 additions & 25 deletions download_model.py

This file was deleted.

9 changes: 0 additions & 9 deletions download_whisper.py

This file was deleted.

84 changes: 0 additions & 84 deletions extract_entities.py

This file was deleted.

18 changes: 0 additions & 18 deletions load_model.py

This file was deleted.

16 changes: 0 additions & 16 deletions logger.py

This file was deleted.

27 changes: 0 additions & 27 deletions logging.connf

This file was deleted.

1 change: 1 addition & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -20,3 +20,4 @@ google-api-python-client
python-dotenv
whisper-timestamped
silero-vad
fastapi_versionizer
2 changes: 1 addition & 1 deletion service/audio_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
# journal={arXiv preprint arXiv:2212.04356},
# year={2022}
# }
# @article{JSSv031i07,
# @article{JSSv031i07,\
# title={Computing and Visualizing Dynamic Time Warping Alignments in R: The dtw Package},
# author={Giorgino, Toni},
# journal={Journal of Statistical Software},
Expand Down
6 changes: 5 additions & 1 deletion service/load_model.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,16 @@
import torch
import whisper
import whisper_timestamped

#load the whisper model from net if it isn't stored locally
def load_model(model_id, model_path, is_ts):
#check GPU is avaialbe
device = "cuda" if torch.cuda.is_available() else "cpu"
#device = "cpu"
model = whisper.load_model(model_id, device=device, download_root=model_path,)
if (is_ts):
model = whisper_timestamped.load_model(model_id, device=device, download_root=model_path)
else:
model = whisper.load_model(model_id, device=device, download_root=model_path)
print(
f"Model will be run on {device}\n"
f"Model is {'multilingual' if model.is_multilingual else 'English-only'} "
Expand Down
Loading

0 comments on commit 448645d

Please sign in to comment.