from faster_whisper import WhisperModel
import os

AUDIO_FILE = "/sda-disk/www/whisper/sample/BeinSample.wav"

if not os.path.isfile(AUDIO_FILE):
    raise FileNotFoundError(f"Audio file not found: {AUDIO_FILE}")

# Load model
model = WhisperModel(
    "large-v3",              # change to "medium" if you want faster
    device="cuda",           # use "cpu" if no GPU
    compute_type="float16"   # best for NVIDIA GPUs
)

# Transcribe
segments, info = model.transcribe(
    AUDIO_FILE,
    beam_size=5,
    vad_filter=True,         # very important for long audio / broadcasts
    condition_on_previous_text=False,
    temperature=0.0,
    language=None            # set None for auto-detect
)

print("Detected language:", info.language)
print("Probability:", info.language_probability)
print("-" * 50)

for segment in segments:
    print(
        f"[{segment.start:8.2f}s -> {segment.end:8.2f}s] "
        f"{segment.text}"
    )
