fix: fix undropped semaphores memory leak issues.

This commit is contained in:
syntaxbullet
2026-02-18 14:52:21 +01:00
parent c73fe3c2c4
commit e429adca48
3 changed files with 16 additions and 4 deletions

View File

@@ -6,8 +6,6 @@ import threading
import time
from typing import Any
# Disable tokenizers parallelism to avoid leaked semaphore warnings on shutdown.
os.environ.setdefault("TOKENIZERS_PARALLELISM", "false")
# Run offline — models are downloaded during setup, no need to hit HuggingFace on every launch.
os.environ.setdefault("HF_HUB_OFFLINE", "1")
@@ -575,9 +573,13 @@ class CalliopeApp(rumps.App):
self.postprocessor = None
def _on_quit(self, sender) -> None:
self._release_postprocessor()
self.hotkeys.stop()
self.recorder.stop()
# Wait for any in-flight transcription so PyTorch isn't killed mid-operation,
# which would cause a SIGTRAP from native threads being torn down uncleanly.
self._transcribe_done.wait(timeout=10)
self._release_transcriber()
self._release_postprocessor()
# Stop overlay timers synchronously to avoid retain cycles on quit.
self.overlay.cleanup()
rumps.quit_application()

View File

@@ -1,6 +1,15 @@
"""CLI entry point using click."""
import logging
import os
# Set these before any library import so tokenizer/OpenMP threads are never spawned.
# TOKENIZERS_PARALLELISM=false prevents the HF fast-tokenizer from creating a Rust
# thread-pool backed by OS semaphores (which leak on unclean shutdown → trace trap).
# OMP_NUM_THREADS / MKL_NUM_THREADS prevent OpenMP/MKL from forking worker threads.
os.environ.setdefault("TOKENIZERS_PARALLELISM", "false")
os.environ.setdefault("OMP_NUM_THREADS", "1")
os.environ.setdefault("MKL_NUM_THREADS", "1")
import click

View File

@@ -71,7 +71,8 @@ class Transcriber:
generate_kwargs = {}
if self._context:
if self._cached_prompt_ids is None:
self._cached_prompt_ids = self._tokenizer.get_prompt_ids(self._context)
device = self._pipe.model.device
self._cached_prompt_ids = torch.tensor(self._tokenizer.get_prompt_ids(self._context), device=device)
generate_kwargs["prompt_ids"] = self._cached_prompt_ids
pipe_kwargs = {