Skip to content

Commit

Permalink
Merge pull request #15 from ColoredCow/fix/using-cuda-if-avaialble
Browse files Browse the repository at this point in the history
Use cuda if available
  • Loading branch information
pankaj-ag authored Nov 12, 2024
2 parents b82a8af + 29372e9 commit cc419f7
Showing 1 changed file with 4 additions and 3 deletions.
7 changes: 4 additions & 3 deletions transcription.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,11 +4,12 @@

import whisper

device = "cuda" if torch.cuda.is_available() else "cpu"

# Load the Whisper model and processor from Hugging Face
def load_asr_model(modelName):
processor = WhisperProcessor.from_pretrained(modelName)
model = WhisperForConditionalGeneration.from_pretrained(modelName)
device = "cuda" if torch.cuda.is_available() else "cpu"
model = model.to(device)
return processor, model

Expand All @@ -18,7 +19,7 @@ def transcribe_audio(file_path, model, processor, language):

# Preprocess audio with WhisperProcessor
inputs = processor(audio_array, sampling_rate=sampling_rate, return_tensors="pt")
input_features = inputs.input_features
input_features = inputs.input_features.to(device)

# Generate transcription using the fine-tuned model
with torch.no_grad():
Expand All @@ -33,7 +34,7 @@ def translate_audio(file_path, model, processor, language):

# Preprocess audio with WhisperProcessor
inputs = processor(audio_array, sampling_rate=sampling_rate, return_tensors="pt")
input_features = inputs.input_features
input_features = inputs.input_features.to(device)

# Generate transcription using the fine-tuned model
with torch.no_grad():
Expand Down

0 comments on commit cc419f7

Please sign in to comment.