import asyncio
import threading
import sys
import traceback  # 상세 에러 출력을 위해 추가
from fastapi import APIRouter, WebSocket, Depends, WebSocketDisconnect, FastAPI
from fastapi.security import OAuth2PasswordBearer
from google.cloud.speech import RecognitionConfig, StreamingRecognitionConfig
from google.cloud import speech
import janus
import queue
import os
import logging

# [중요] 경로가 정확한지 다시 확인하세요. (절대 경로 추천)
# 예: os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = '/home/user/project/medical_memo.json'
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = 'medical_memo.json'

app = FastAPI()

# 로깅 설정 강화
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

RATE = 16000
CHUNK = int(RATE / 10)

# Google STT 설정
config = RecognitionConfig(
    encoding=RecognitionConfig.AudioEncoding.LINEAR16,
    sample_rate_hertz=16000,
    audio_channel_count=1,
    use_enhanced=True,
    language_code="ja-JP",
    model="command_and_search",
    enable_spoken_punctuation=True
)
streaming_config = StreamingRecognitionConfig(config=config, interim_results=True)

class SpeechClientBridge:
    def __init__(self, streaming_config, on_response):
        self._on_response = on_response
        self._queue = queue.Queue()
        self._ended = False
        self.streaming_config = streaming_config

    def start(self, q, thread):
        print("[DEBUG] SpeechClientBridge 스레드 시작됨")
        self._ended = False
        self.q = q
        self.thread = thread
        
        try:
            # 1. 클라이언트 생성 시도 (인증 파일 없으면 여기서 에러 발생)
            client = speech.SpeechClient()
            print("[DEBUG] Google SpeechClient 생성 성공")
            
            stream = self.generator()
            requests = (
                speech.StreamingRecognizeRequest(audio_content=content)
                for content in stream
            )
            
            # 2. 스트리밍 인식 요청 시작
            responses = client.streaming_recognize(self.streaming_config, requests)
            print("[DEBUG] Google API 스트리밍 연결 성공, 응답 대기 중...")
            
            self.process_responses_loop(responses)
            
        except Exception as e:
            print(f"\n[CRITICAL ERROR] Bridge start 내부 에러: {e}")
            traceback.print_exc() # 상세 에러 위치 출력
            self.q.put({"is_final": True, "transcript": f"Error: {str(e)}"})
        
        print("[DEBUG] SpeechClientBridge 스레드 종료")

    def terminate(self):
        self._ended = True

    def add_request(self, buffer):
        self._queue.put(bytes(buffer), block=False)

    def process_responses_loop(self, responses):
        try:
            for response in responses:
                self._on_response(response, self.q, self.thread)
                if self._ended:
                    break
        except Exception as e:
            # 3. 응답 처리 중 에러 (오디오 포맷 불일치, 할당량 초과 등)
            print(f"\n[CRITICAL ERROR] Google API 응답 처리 중 에러 발생: {e}")
            traceback.print_exc()

    def generator(self):
        while not self._ended:
            chunk = self._queue.get()
            if chunk is None:
                return
            data = [chunk]
            while True:
                try:
                    chunk = self._queue.get(block=False)
                    if chunk is None:
                        return
                    data.append(chunk)
                except queue.Empty:
                    break
            yield b"".join(data)


def on_transcription_response(response, q, websocket):
    if not response.results:
        return
    result = response.results[0]
    if not result.alternatives:
        return

    transcript = result.alternatives[0].transcript
    
    if not result.is_final:
        print(f"[Partial] {transcript}") # 터미널 로그
        q.put({"is_final": False, "transcript": transcript})
    else:
        print(f"[Final] {transcript}")   # 터미널 로그
        q.put({"is_final": True, "transcript": transcript})


async def send_transcription(websocket: WebSocket, async_q: janus._AsyncQueueProxy):
    try:
        while True:
            send_data = await async_q.get()
            await websocket.send_json({
                "is_final": send_data['is_final'],
                "transcript": send_data['transcript']
            })
    except asyncio.CancelledError:
        pass
    except Exception as e:
        print(f"[ERROR] 클라이언트로 전송 실패: {e}")


@app.websocket("/wslang/ja/")
async def websocket_endpoint(websocket: WebSocket):
    await websocket.accept()
    print(">> 클라이언트 웹소켓 연결됨")
    
    q = janus.Queue()
    bridge = SpeechClientBridge(streaming_config, on_transcription_response)
    
    # 별도 스레드에서 Bridge 시작
    bridge_task = asyncio.create_task(asyncio.to_thread(bridge.start, q.sync_q, websocket))
    send_task = asyncio.create_task(send_transcription(websocket, q.async_q))
    
    try:
        while True:
            data = await websocket.receive()
            
            if 'text' in data:
                message = data['text']
                print(f"[Received Text] {message}")
                await websocket.send_text(message)
                
            elif 'bytes' in data:
                message = data['bytes']
                # 오디오 데이터가 들어오는지 로그로 확인 (너무 많으면 주석 처리)
                # print(f"[Audio] {len(message)} bytes 수신", end='\r') 
                if message:
                    bridge.add_request(message)
            else:
                pass
                
    except WebSocketDisconnect:
        print(">> 클라이언트 연결 끊김")
        bridge.terminate()
        send_task.cancel()
    except Exception as e:
        print(f"[ERROR] 웹소켓 엔드포인트 에러: {e}")
        traceback.print_exc()
        bridge.terminate()
        send_task.cancel()