| .. | .. |
|---|
| 26 | 26 | |
|---|
| 27 | 27 | const MESSAGES_DIR = "pailot-messages"; |
|---|
| 28 | 28 | |
|---|
| 29 | | -/** Strip heavy fields (base64 images, audio URIs) before persisting. */ |
|---|
| 29 | +/** Strip heavy fields (base64 images, audio URIs) before persisting. |
|---|
| 30 | + * Voice messages keep their content (transcript) but lose audioUri |
|---|
| 31 | + * since cache files won't survive app restarts. */ |
|---|
| 30 | 32 | function lightMessage(m: Message): Message { |
|---|
| 31 | 33 | const light = { ...m }; |
|---|
| 32 | 34 | if (light.imageBase64) light.imageBase64 = undefined; |
|---|
| .. | .. |
|---|
| 63 | 65 | if (!file.endsWith(".json")) continue; |
|---|
| 64 | 66 | const sessionId = file.replace(".json", ""); |
|---|
| 65 | 67 | const content = await fs.readAsStringAsync(`${dir}${file}`); |
|---|
| 66 | | - result[sessionId] = JSON.parse(content) as Message[]; |
|---|
| 68 | + result[sessionId] = (JSON.parse(content) as Message[]) |
|---|
| 69 | + // Drop voice messages with no audio and no content (empty chunks) |
|---|
| 70 | + .filter((m) => !(m.type === "voice" && !m.audioUri && !m.content)) |
|---|
| 71 | + .map((m) => { |
|---|
| 72 | + // Voice messages without audio but with transcript → show as text |
|---|
| 73 | + if (m.type === "voice" && !m.audioUri && m.content) { |
|---|
| 74 | + return { ...m, type: "text" }; |
|---|
| 75 | + } |
|---|
| 76 | + return m; |
|---|
| 77 | + }); |
|---|
| 67 | 78 | } |
|---|
| 68 | 79 | return result; |
|---|
| 69 | 80 | } catch { |
|---|
| .. | .. |
|---|
| 179 | 190 | } |
|---|
| 180 | 191 | }, [messages]); |
|---|
| 181 | 192 | |
|---|
| 182 | | - // On connect: ask gateway to detect the focused iTerm2 session and sync |
|---|
| 193 | + // On connect: ask gateway to sync sessions. If we already had a session |
|---|
| 194 | + // selected, tell the gateway so it preserves our selection instead of |
|---|
| 195 | + // jumping to whatever iTerm has focused on the Mac. |
|---|
| 183 | 196 | useEffect(() => { |
|---|
| 184 | 197 | if (status === "connected") { |
|---|
| 185 | 198 | needsSync.current = true; |
|---|
| 186 | | - sendCommand("sync"); |
|---|
| 199 | + sendCommand("sync", activeSessionId ? { activeSessionId } : undefined); |
|---|
| 187 | 200 | } |
|---|
| 201 | + // eslint-disable-next-line react-hooks/exhaustive-deps — only fire on status change |
|---|
| 188 | 202 | }, [status, sendCommand]); |
|---|
| 189 | 203 | |
|---|
| 190 | 204 | // Helper: add a message to the active session |
|---|
| .. | .. |
|---|
| 233 | 247 | }, |
|---|
| 234 | 248 | [] |
|---|
| 235 | 249 | ); |
|---|
| 250 | + |
|---|
| 251 | + // Update a message's content (e.g., voice transcript reflection) |
|---|
| 252 | + const updateMessageContent = useCallback((id: string, content: string) => { |
|---|
| 253 | + setMessages((prev) => { |
|---|
| 254 | + const next = prev.map((m) => |
|---|
| 255 | + m.id === id ? { ...m, content } : m |
|---|
| 256 | + ); |
|---|
| 257 | + setActiveSessionId((sessId) => { |
|---|
| 258 | + if (sessId) { |
|---|
| 259 | + messagesMapRef.current[sessId] = next; |
|---|
| 260 | + debouncedSave(messagesMapRef.current); |
|---|
| 261 | + } |
|---|
| 262 | + return sessId; |
|---|
| 263 | + }); |
|---|
| 264 | + return next; |
|---|
| 265 | + }); |
|---|
| 266 | + }, []); |
|---|
| 236 | 267 | |
|---|
| 237 | 268 | // Handle incoming WebSocket messages |
|---|
| 238 | 269 | useEffect(() => { |
|---|
| .. | .. |
|---|
| 322 | 353 | sendCommand("sessions"); |
|---|
| 323 | 354 | break; |
|---|
| 324 | 355 | } |
|---|
| 356 | + case "transcript": { |
|---|
| 357 | + // Voice → text reflection: replace voice bubble with transcribed text |
|---|
| 358 | + updateMessageContent(data.messageId, data.content); |
|---|
| 359 | + break; |
|---|
| 360 | + } |
|---|
| 325 | 361 | case "error": { |
|---|
| 326 | 362 | const msg: Message = { |
|---|
| 327 | 363 | id: generateId(), |
|---|
| .. | .. |
|---|
| 339 | 375 | return () => { |
|---|
| 340 | 376 | onMessageReceived.current = null; |
|---|
| 341 | 377 | }; |
|---|
| 342 | | - }, [onMessageReceived, sendCommand, addMessageToActive, syncActiveFromSessions]); |
|---|
| 378 | + }, [onMessageReceived, sendCommand, addMessageToActive, updateMessageContent, syncActiveFromSessions]); |
|---|
| 343 | 379 | |
|---|
| 344 | 380 | const sendTextMessage = useCallback( |
|---|
| 345 | 381 | (text: string) => { |
|---|
| .. | .. |
|---|
| 375 | 411 | addMessageToActive(msg); |
|---|
| 376 | 412 | try { |
|---|
| 377 | 413 | const base64 = await encodeAudioToBase64(audioUri); |
|---|
| 378 | | - const sent = wsVoice(base64); |
|---|
| 414 | + const sent = wsVoice(base64, "", id); |
|---|
| 379 | 415 | updateMessageStatus(id, sent ? "sent" : "error"); |
|---|
| 380 | 416 | } catch (err) { |
|---|
| 381 | 417 | console.error("Failed to encode audio:", err); |
|---|