From af1543135d42adc2e97dc5243aeef7418cd3b00d Mon Sep 17 00:00:00 2001
From: Matthias Nott <mnott@mnsoft.org>
Date: Sat, 07 Mar 2026 08:39:26 +0100
Subject: [PATCH] feat: dual address auto-switch, custom icon, notifications, image support
---
components/chat/VoiceButton.tsx | 158 +++++++++++++++++++++++++---------------------------
1 files changed, 76 insertions(+), 82 deletions(-)
diff --git a/components/chat/VoiceButton.tsx b/components/chat/VoiceButton.tsx
index 9ebaa82..93b357a 100644
--- a/components/chat/VoiceButton.tsx
+++ b/components/chat/VoiceButton.tsx
@@ -1,59 +1,34 @@
-import React, { useCallback, useEffect, useRef, useState } from "react";
+import React, { useCallback, useRef, useState } from "react";
import { Animated, Pressable, Text, View } from "react-native";
import * as Haptics from "expo-haptics";
import {
- ExpoSpeechRecognitionModule,
- useSpeechRecognitionEvent,
-} from "expo-speech-recognition";
+ useAudioRecorder,
+ RecordingPresets,
+ requestRecordingPermissionsAsync,
+ setAudioModeAsync,
+} from "expo-audio";
+import { stopPlayback } from "../../services/audio";
interface VoiceButtonProps {
- onTranscript: (text: string) => void;
+ onVoiceRecorded: (uri: string) => void;
}
const VOICE_BUTTON_SIZE = 72;
/**
- * Tap-to-toggle voice button using on-device speech recognition.
- * - Tap once: start listening
- * - Tap again: stop and send transcript
- * - Long-press while listening: cancel (discard)
+ * Tap-to-toggle voice button using expo-audio recording.
+ * Records audio and returns the file URI for the caller to send.
+ * - Tap once: start recording
+ * - Tap again: stop and send
+ * - Long-press while recording: cancel (discard)
*/
-export function VoiceButton({ onTranscript }: VoiceButtonProps) {
- const [isListening, setIsListening] = useState(false);
- const [transcript, setTranscript] = useState("");
+export function VoiceButton({ onVoiceRecorded }: VoiceButtonProps) {
+ const [isRecording, setIsRecording] = useState(false);
const pulseAnim = useRef(new Animated.Value(1)).current;
const glowAnim = useRef(new Animated.Value(0)).current;
const pulseLoop = useRef<Animated.CompositeAnimation | null>(null);
- const cancelledRef = useRef(false);
- // Speech recognition events
- useSpeechRecognitionEvent("start", () => {
- setIsListening(true);
- });
-
- useSpeechRecognitionEvent("end", () => {
- setIsListening(false);
- stopPulse();
-
- // Send transcript if we have one and weren't cancelled
- if (!cancelledRef.current && transcript.trim()) {
- onTranscript(transcript.trim());
- }
- setTranscript("");
- cancelledRef.current = false;
- });
-
- useSpeechRecognitionEvent("result", (event) => {
- const text = event.results[0]?.transcript ?? "";
- setTranscript(text);
- });
-
- useSpeechRecognitionEvent("error", (event) => {
- console.error("Speech recognition error:", event.error, event.message);
- setIsListening(false);
- stopPulse();
- setTranscript("");
- });
+ const recorder = useAudioRecorder(RecordingPresets.HIGH_QUALITY);
const startPulse = useCallback(() => {
pulseLoop.current = Animated.loop(
@@ -88,49 +63,73 @@
}).start();
}, [pulseAnim, glowAnim]);
- const startListening = useCallback(async () => {
- const result = await ExpoSpeechRecognitionModule.requestPermissionsAsync();
- if (!result.granted) return;
+ const startRecording = useCallback(async () => {
+ try {
+ await stopPlayback();
- cancelledRef.current = false;
- setTranscript("");
- startPulse();
+ const { granted } = await requestRecordingPermissionsAsync();
+ if (!granted) return;
- ExpoSpeechRecognitionModule.start({
- lang: "en-US",
- interimResults: true,
- continuous: true,
- });
- }, [startPulse]);
+ await setAudioModeAsync({
+ allowsRecording: true,
+ playsInSilentMode: true,
+ });
- const stopAndSend = useCallback(() => {
+ startPulse();
+ await recorder.prepareToRecordAsync();
+ recorder.record();
+ setIsRecording(true);
+ } catch (err) {
+ console.error("Failed to start recording:", err);
+ stopPulse();
+ setIsRecording(false);
+ }
+ }, [recorder, startPulse, stopPulse]);
+
+ const stopAndSend = useCallback(async () => {
stopPulse();
- cancelledRef.current = false;
- ExpoSpeechRecognitionModule.stop();
- }, [stopPulse]);
+ setIsRecording(false);
+ try {
+ await recorder.stop();
+ // Reset audio mode for playback
+ await setAudioModeAsync({
+ allowsRecording: false,
+ playsInSilentMode: true,
+ });
+ const uri = recorder.uri;
+ if (uri) {
+ onVoiceRecorded(uri);
+ }
+ } catch (err) {
+ console.error("Failed to stop recording:", err);
+ }
+ }, [recorder, stopPulse, onVoiceRecorded]);
- const cancelListening = useCallback(() => {
+ const cancelRecording = useCallback(async () => {
Haptics.notificationAsync(Haptics.NotificationFeedbackType.Warning);
stopPulse();
- cancelledRef.current = true;
- setTranscript("");
- ExpoSpeechRecognitionModule.abort();
- }, [stopPulse]);
+ setIsRecording(false);
+ try {
+ await recorder.stop();
+ } catch {
+ // ignore
+ }
+ }, [recorder, stopPulse]);
const handleTap = useCallback(async () => {
Haptics.impactAsync(Haptics.ImpactFeedbackStyle.Medium);
- if (isListening) {
- stopAndSend();
+ if (isRecording) {
+ await stopAndSend();
} else {
- await startListening();
+ await startRecording();
}
- }, [isListening, stopAndSend, startListening]);
+ }, [isRecording, stopAndSend, startRecording]);
const handleLongPress = useCallback(() => {
- if (isListening) {
- cancelListening();
+ if (isRecording) {
+ cancelRecording();
}
- }, [isListening, cancelListening]);
+ }, [isRecording, cancelRecording]);
return (
<View style={{ alignItems: "center", justifyContent: "center" }}>
@@ -141,7 +140,7 @@
width: VOICE_BUTTON_SIZE + 24,
height: VOICE_BUTTON_SIZE + 24,
borderRadius: (VOICE_BUTTON_SIZE + 24) / 2,
- backgroundColor: isListening ? "rgba(255, 159, 67, 0.12)" : "transparent",
+ backgroundColor: isRecording ? "rgba(255, 159, 67, 0.12)" : "transparent",
transform: [{ scale: pulseAnim }],
opacity: glowAnim,
}}
@@ -158,35 +157,30 @@
width: VOICE_BUTTON_SIZE,
height: VOICE_BUTTON_SIZE,
borderRadius: VOICE_BUTTON_SIZE / 2,
- backgroundColor: isListening ? "#FF9F43" : "#4A9EFF",
+ backgroundColor: isRecording ? "#FF9F43" : "#4A9EFF",
alignItems: "center",
justifyContent: "center",
- shadowColor: isListening ? "#FF9F43" : "#4A9EFF",
+ shadowColor: isRecording ? "#FF9F43" : "#4A9EFF",
shadowOffset: { width: 0, height: 4 },
shadowOpacity: 0.4,
shadowRadius: 12,
elevation: 8,
}}
>
- <Text style={{ fontSize: 28 }}>{isListening ? "⏹" : "🎤"}</Text>
+ <Text style={{ fontSize: 28 }}>{isRecording ? "⏹" : "🎤"}</Text>
</View>
</Pressable>
- {/* Label / transcript preview */}
+ {/* Label */}
<Text
style={{
- color: isListening ? "#FF9F43" : "#5A5A78",
+ color: isRecording ? "#FF9F43" : "#5A5A78",
fontSize: 11,
marginTop: 4,
- fontWeight: isListening ? "600" : "400",
- maxWidth: 200,
- textAlign: "center",
+ fontWeight: isRecording ? "600" : "400",
}}
- numberOfLines={2}
>
- {isListening
- ? transcript || "Listening..."
- : "Tap to talk"}
+ {isRecording ? "Recording..." : "Tap to talk"}
</Text>
</View>
);
--
Gitblit v1.3.1