fix: disable SSL verify for self-signed OpenAI proxy + handle no-user-msg

- Pass httpx.AsyncClient(verify=False) to OpenAI STT/TTS to support
  self-signed certificate on OPENAI_BASE_URL proxy
- Handle generate_reply calls with no user message by falling back to
  system/developer instructions

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
hailin 2026-02-28 21:39:49 -08:00
parent 4d47c6a955
commit e302891f16
2 changed files with 23 additions and 0 deletions

View File

@ -137,10 +137,17 @@ async def entrypoint(ctx: JobContext) -> None:
# Build STT # Build STT
if settings.stt_provider == "openai": if settings.stt_provider == "openai":
from livekit.plugins import openai as openai_plugin from livekit.plugins import openai as openai_plugin
import httpx as _httpx
import openai as _openai
# OPENAI_BASE_URL may use a self-signed certificate (e.g. proxy)
_http_client = _httpx.AsyncClient(verify=False)
_oai_client = _openai.AsyncOpenAI(http_client=_http_client)
stt = openai_plugin.STT( stt = openai_plugin.STT(
model=settings.openai_stt_model, model=settings.openai_stt_model,
language=settings.whisper_language, language=settings.whisper_language,
client=_oai_client,
) )
else: else:
stt = LocalWhisperSTT( stt = LocalWhisperSTT(
@ -151,10 +158,16 @@ async def entrypoint(ctx: JobContext) -> None:
# Build TTS # Build TTS
if settings.tts_provider == "openai": if settings.tts_provider == "openai":
from livekit.plugins import openai as openai_plugin from livekit.plugins import openai as openai_plugin
import httpx as _httpx
import openai as _openai
_http_client_tts = _httpx.AsyncClient(verify=False)
_oai_client_tts = _openai.AsyncOpenAI(http_client=_http_client_tts)
tts = openai_plugin.TTS( tts = openai_plugin.TTS(
model=settings.openai_tts_model, model=settings.openai_tts_model,
voice=settings.openai_tts_voice, voice=settings.openai_tts_voice,
client=_oai_client_tts,
) )
else: else:
tts = LocalKokoroTTS( tts = LocalKokoroTTS(

View File

@ -100,6 +100,16 @@ class AgentServiceLLMStream(llm.LLMStream):
user_text = item.text_content user_text = item.text_content
break break
if not user_text:
# on_enter/generate_reply may call LLM without a user message;
# look for the developer/system instruction to use as prompt
for item in self._chat_ctx.items:
if getattr(item, "type", None) != "message":
continue
if item.role in ("developer", "system"):
user_text = item.text_content
break
if not user_text: if not user_text:
logger.warning("No user message found in chat context") logger.warning("No user message found in chat context")
return return