Include full contents of all nested repositories
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
2153
openclaw/apps/ios/Sources/Voice/TalkModeManager.swift
Normal file
2153
openclaw/apps/ios/Sources/Voice/TalkModeManager.swift
Normal file
File diff suppressed because it is too large
Load Diff
87
openclaw/apps/ios/Sources/Voice/TalkOrbOverlay.swift
Normal file
87
openclaw/apps/ios/Sources/Voice/TalkOrbOverlay.swift
Normal file
@@ -0,0 +1,87 @@
|
||||
import SwiftUI
|
||||
|
||||
struct TalkOrbOverlay: View {
|
||||
@Environment(NodeAppModel.self) private var appModel
|
||||
@State private var pulse: Bool = false
|
||||
|
||||
var body: some View {
|
||||
let seam = self.appModel.seamColor
|
||||
let status = self.appModel.talkMode.statusText.trimmingCharacters(in: .whitespacesAndNewlines)
|
||||
let mic = min(max(self.appModel.talkMode.micLevel, 0), 1)
|
||||
|
||||
VStack(spacing: 14) {
|
||||
ZStack {
|
||||
Circle()
|
||||
.stroke(seam.opacity(0.26), lineWidth: 2)
|
||||
.frame(width: 320, height: 320)
|
||||
.scaleEffect(self.pulse ? 1.15 : 0.96)
|
||||
.opacity(self.pulse ? 0.0 : 1.0)
|
||||
.animation(.easeOut(duration: 1.3).repeatForever(autoreverses: false), value: self.pulse)
|
||||
|
||||
Circle()
|
||||
.stroke(seam.opacity(0.18), lineWidth: 2)
|
||||
.frame(width: 320, height: 320)
|
||||
.scaleEffect(self.pulse ? 1.45 : 1.02)
|
||||
.opacity(self.pulse ? 0.0 : 0.9)
|
||||
.animation(.easeOut(duration: 1.9).repeatForever(autoreverses: false).delay(0.2), value: self.pulse)
|
||||
|
||||
Circle()
|
||||
.fill(
|
||||
RadialGradient(
|
||||
colors: [
|
||||
seam.opacity(0.75 + (0.20 * mic)),
|
||||
seam.opacity(0.40),
|
||||
Color.black.opacity(0.55),
|
||||
],
|
||||
center: .center,
|
||||
startRadius: 1,
|
||||
endRadius: 112))
|
||||
.frame(width: 190, height: 190)
|
||||
.scaleEffect(1.0 + (0.12 * mic))
|
||||
.overlay(
|
||||
Circle()
|
||||
.stroke(seam.opacity(0.35), lineWidth: 1))
|
||||
.shadow(color: seam.opacity(0.32), radius: 26, x: 0, y: 0)
|
||||
.shadow(color: Color.black.opacity(0.50), radius: 22, x: 0, y: 10)
|
||||
}
|
||||
.contentShape(Circle())
|
||||
.onTapGesture {
|
||||
self.appModel.talkMode.userTappedOrb()
|
||||
}
|
||||
|
||||
let agentName = self.appModel.activeAgentName.trimmingCharacters(in: .whitespacesAndNewlines)
|
||||
if !agentName.isEmpty {
|
||||
Text("Bot: \(agentName)")
|
||||
.font(.system(.caption, design: .rounded).weight(.semibold))
|
||||
.foregroundStyle(Color.white.opacity(0.70))
|
||||
}
|
||||
|
||||
if !status.isEmpty, status != "Off" {
|
||||
Text(status)
|
||||
.font(.system(.footnote, design: .rounded).weight(.semibold))
|
||||
.foregroundStyle(Color.white.opacity(0.92))
|
||||
.padding(.horizontal, 12)
|
||||
.padding(.vertical, 8)
|
||||
.background(
|
||||
Capsule()
|
||||
.fill(Color.black.opacity(0.40))
|
||||
.overlay(
|
||||
Capsule().stroke(seam.opacity(0.22), lineWidth: 1)))
|
||||
}
|
||||
|
||||
if self.appModel.talkMode.isListening {
|
||||
Capsule()
|
||||
.fill(seam.opacity(0.90))
|
||||
.frame(width: max(18, 180 * mic), height: 6)
|
||||
.animation(.easeOut(duration: 0.12), value: mic)
|
||||
.accessibilityLabel("Microphone level")
|
||||
}
|
||||
}
|
||||
.padding(28)
|
||||
.onAppear {
|
||||
self.pulse = true
|
||||
}
|
||||
.accessibilityElement(children: .combine)
|
||||
.accessibilityLabel("Talk Mode \(status)")
|
||||
}
|
||||
}
|
||||
46
openclaw/apps/ios/Sources/Voice/VoiceTab.swift
Normal file
46
openclaw/apps/ios/Sources/Voice/VoiceTab.swift
Normal file
@@ -0,0 +1,46 @@
|
||||
import SwiftUI
|
||||
|
||||
struct VoiceTab: View {
|
||||
@Environment(NodeAppModel.self) private var appModel
|
||||
@Environment(VoiceWakeManager.self) private var voiceWake
|
||||
@AppStorage("voiceWake.enabled") private var voiceWakeEnabled: Bool = false
|
||||
@AppStorage("talk.enabled") private var talkEnabled: Bool = false
|
||||
|
||||
var body: some View {
|
||||
NavigationStack {
|
||||
List {
|
||||
Section("Status") {
|
||||
LabeledContent("Voice Wake", value: self.voiceWakeEnabled ? "Enabled" : "Disabled")
|
||||
LabeledContent("Listener", value: self.voiceWake.isListening ? "Listening" : "Idle")
|
||||
Text(self.voiceWake.statusText)
|
||||
.font(.footnote)
|
||||
.foregroundStyle(.secondary)
|
||||
LabeledContent("Talk Mode", value: self.talkEnabled ? "Enabled" : "Disabled")
|
||||
}
|
||||
|
||||
Section("Notes") {
|
||||
let triggers = self.voiceWake.activeTriggerWords
|
||||
Group {
|
||||
if triggers.isEmpty {
|
||||
Text("Add wake words in Settings.")
|
||||
} else if triggers.count == 1 {
|
||||
Text("Say “\(triggers[0]) …” to trigger.")
|
||||
} else if triggers.count == 2 {
|
||||
Text("Say “\(triggers[0]) …” or “\(triggers[1]) …” to trigger.")
|
||||
} else {
|
||||
Text("Say “\(triggers.joined(separator: " …”, “")) …” to trigger.")
|
||||
}
|
||||
}
|
||||
.foregroundStyle(.secondary)
|
||||
}
|
||||
}
|
||||
.navigationTitle("Voice")
|
||||
.onChange(of: self.voiceWakeEnabled) { _, newValue in
|
||||
self.appModel.setVoiceWakeEnabled(newValue)
|
||||
}
|
||||
.onChange(of: self.talkEnabled) { _, newValue in
|
||||
self.appModel.setTalkEnabled(newValue)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
495
openclaw/apps/ios/Sources/Voice/VoiceWakeManager.swift
Normal file
495
openclaw/apps/ios/Sources/Voice/VoiceWakeManager.swift
Normal file
@@ -0,0 +1,495 @@
|
||||
import AVFAudio
|
||||
import Foundation
|
||||
import Observation
|
||||
import OpenClawKit
|
||||
import Speech
|
||||
import SwabbleKit
|
||||
|
||||
private func makeAudioTapEnqueueCallback(queue: AudioBufferQueue) -> @Sendable (AVAudioPCMBuffer, AVAudioTime) -> Void {
|
||||
{ buffer, _ in
|
||||
// This callback is invoked on a realtime audio thread/queue. Keep it tiny and nonisolated.
|
||||
queue.enqueueCopy(of: buffer)
|
||||
}
|
||||
}
|
||||
|
||||
private final class AudioBufferQueue: @unchecked Sendable {
|
||||
private let lock = NSLock()
|
||||
private var buffers: [AVAudioPCMBuffer] = []
|
||||
|
||||
func enqueueCopy(of buffer: AVAudioPCMBuffer) {
|
||||
guard let copy = buffer.deepCopy() else { return }
|
||||
self.lock.lock()
|
||||
self.buffers.append(copy)
|
||||
self.lock.unlock()
|
||||
}
|
||||
|
||||
func drain() -> [AVAudioPCMBuffer] {
|
||||
self.lock.lock()
|
||||
let drained = self.buffers
|
||||
self.buffers.removeAll(keepingCapacity: true)
|
||||
self.lock.unlock()
|
||||
return drained
|
||||
}
|
||||
|
||||
func clear() {
|
||||
self.lock.lock()
|
||||
self.buffers.removeAll(keepingCapacity: false)
|
||||
self.lock.unlock()
|
||||
}
|
||||
}
|
||||
|
||||
extension AVAudioPCMBuffer {
|
||||
fileprivate func deepCopy() -> AVAudioPCMBuffer? {
|
||||
let format = self.format
|
||||
let frameLength = self.frameLength
|
||||
guard let copy = AVAudioPCMBuffer(pcmFormat: format, frameCapacity: frameLength) else {
|
||||
return nil
|
||||
}
|
||||
copy.frameLength = frameLength
|
||||
|
||||
if let src = self.floatChannelData, let dst = copy.floatChannelData {
|
||||
let channels = Int(format.channelCount)
|
||||
let frames = Int(frameLength)
|
||||
for ch in 0..<channels {
|
||||
dst[ch].update(from: src[ch], count: frames)
|
||||
}
|
||||
return copy
|
||||
}
|
||||
|
||||
if let src = self.int16ChannelData, let dst = copy.int16ChannelData {
|
||||
let channels = Int(format.channelCount)
|
||||
let frames = Int(frameLength)
|
||||
for ch in 0..<channels {
|
||||
dst[ch].update(from: src[ch], count: frames)
|
||||
}
|
||||
return copy
|
||||
}
|
||||
|
||||
if let src = self.int32ChannelData, let dst = copy.int32ChannelData {
|
||||
let channels = Int(format.channelCount)
|
||||
let frames = Int(frameLength)
|
||||
for ch in 0..<channels {
|
||||
dst[ch].update(from: src[ch], count: frames)
|
||||
}
|
||||
return copy
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@MainActor
|
||||
@Observable
|
||||
final class VoiceWakeManager: NSObject {
|
||||
var isEnabled: Bool = false
|
||||
var isListening: Bool = false
|
||||
var statusText: String = "Off"
|
||||
var triggerWords: [String] = VoiceWakePreferences.loadTriggerWords()
|
||||
var lastTriggeredCommand: String?
|
||||
|
||||
private let audioEngine = AVAudioEngine()
|
||||
private var speechRecognizer: SFSpeechRecognizer?
|
||||
private var recognitionRequest: SFSpeechAudioBufferRecognitionRequest?
|
||||
private var recognitionTask: SFSpeechRecognitionTask?
|
||||
private var tapQueue: AudioBufferQueue?
|
||||
private var tapDrainTask: Task<Void, Never>?
|
||||
|
||||
private var lastDispatched: String?
|
||||
private var onCommand: (@Sendable (String) async -> Void)?
|
||||
private var userDefaultsObserver: NSObjectProtocol?
|
||||
private var suppressedByTalk: Bool = false
|
||||
|
||||
override init() {
|
||||
super.init()
|
||||
self.triggerWords = VoiceWakePreferences.loadTriggerWords()
|
||||
self.userDefaultsObserver = NotificationCenter.default.addObserver(
|
||||
forName: UserDefaults.didChangeNotification,
|
||||
object: UserDefaults.standard,
|
||||
queue: .main,
|
||||
using: { [weak self] _ in
|
||||
Task { @MainActor in
|
||||
self?.handleUserDefaultsDidChange()
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@MainActor deinit {
|
||||
if let userDefaultsObserver = self.userDefaultsObserver {
|
||||
NotificationCenter.default.removeObserver(userDefaultsObserver)
|
||||
}
|
||||
}
|
||||
|
||||
var activeTriggerWords: [String] {
|
||||
VoiceWakePreferences.sanitizeTriggerWords(self.triggerWords)
|
||||
}
|
||||
|
||||
private func handleUserDefaultsDidChange() {
|
||||
let updated = VoiceWakePreferences.loadTriggerWords()
|
||||
if updated != self.triggerWords {
|
||||
self.triggerWords = updated
|
||||
}
|
||||
}
|
||||
|
||||
func configure(onCommand: @escaping @Sendable (String) async -> Void) {
|
||||
self.onCommand = onCommand
|
||||
}
|
||||
|
||||
func setEnabled(_ enabled: Bool) {
|
||||
self.isEnabled = enabled
|
||||
if enabled {
|
||||
Task { await self.start() }
|
||||
} else {
|
||||
self.stop()
|
||||
}
|
||||
}
|
||||
|
||||
func setSuppressedByTalk(_ suppressed: Bool) {
|
||||
self.suppressedByTalk = suppressed
|
||||
if suppressed {
|
||||
_ = self.suspendForExternalAudioCapture()
|
||||
if self.isEnabled {
|
||||
self.statusText = "Paused"
|
||||
}
|
||||
} else {
|
||||
if self.isEnabled {
|
||||
Task { await self.start() }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func start() async {
|
||||
guard self.isEnabled else { return }
|
||||
if self.isListening { return }
|
||||
guard !self.suppressedByTalk else {
|
||||
self.isListening = false
|
||||
self.statusText = "Paused"
|
||||
return
|
||||
}
|
||||
|
||||
if ProcessInfo.processInfo.environment["SIMULATOR_DEVICE_NAME"] != nil ||
|
||||
ProcessInfo.processInfo.environment["SIMULATOR_UDID"] != nil
|
||||
{
|
||||
// The iOS Simulator’s audio stack is unreliable for long-running microphone capture.
|
||||
// (We’ve observed CoreAudio deadlocks after TCC permission prompts.)
|
||||
self.isListening = false
|
||||
self.statusText = "Voice Wake isn’t supported on Simulator"
|
||||
return
|
||||
}
|
||||
|
||||
self.statusText = "Requesting permissions…"
|
||||
|
||||
let micOk = await Self.requestMicrophonePermission()
|
||||
guard micOk else {
|
||||
self.statusText = Self.permissionMessage(
|
||||
kind: "Microphone",
|
||||
status: AVAudioSession.sharedInstance().recordPermission)
|
||||
self.isListening = false
|
||||
return
|
||||
}
|
||||
|
||||
let speechOk = await Self.requestSpeechPermission()
|
||||
guard speechOk else {
|
||||
self.statusText = Self.permissionMessage(
|
||||
kind: "Speech recognition",
|
||||
status: SFSpeechRecognizer.authorizationStatus())
|
||||
self.isListening = false
|
||||
return
|
||||
}
|
||||
|
||||
self.speechRecognizer = SFSpeechRecognizer()
|
||||
guard self.speechRecognizer != nil else {
|
||||
self.statusText = "Speech recognizer unavailable"
|
||||
self.isListening = false
|
||||
return
|
||||
}
|
||||
|
||||
do {
|
||||
try Self.configureAudioSession()
|
||||
try self.startRecognition()
|
||||
self.isListening = true
|
||||
self.statusText = "Listening"
|
||||
} catch {
|
||||
self.isListening = false
|
||||
self.statusText = "Start failed: \(error.localizedDescription)"
|
||||
}
|
||||
}
|
||||
|
||||
func stop() {
|
||||
self.isEnabled = false
|
||||
self.isListening = false
|
||||
self.statusText = "Off"
|
||||
|
||||
self.tapDrainTask?.cancel()
|
||||
self.tapDrainTask = nil
|
||||
self.tapQueue?.clear()
|
||||
self.tapQueue = nil
|
||||
|
||||
self.recognitionTask?.cancel()
|
||||
self.recognitionTask = nil
|
||||
self.recognitionRequest = nil
|
||||
|
||||
if self.audioEngine.isRunning {
|
||||
self.audioEngine.stop()
|
||||
self.audioEngine.inputNode.removeTap(onBus: 0)
|
||||
}
|
||||
|
||||
try? AVAudioSession.sharedInstance().setActive(false, options: .notifyOthersOnDeactivation)
|
||||
}
|
||||
|
||||
/// Temporarily releases the microphone so other subsystems (e.g. camera video capture) can record audio.
|
||||
/// Returns `true` when listening was active and was suspended.
|
||||
func suspendForExternalAudioCapture() -> Bool {
|
||||
guard self.isEnabled, self.isListening else { return false }
|
||||
|
||||
self.isListening = false
|
||||
self.statusText = "Paused"
|
||||
|
||||
self.tapDrainTask?.cancel()
|
||||
self.tapDrainTask = nil
|
||||
self.tapQueue?.clear()
|
||||
self.tapQueue = nil
|
||||
|
||||
self.recognitionTask?.cancel()
|
||||
self.recognitionTask = nil
|
||||
self.recognitionRequest = nil
|
||||
|
||||
if self.audioEngine.isRunning {
|
||||
self.audioEngine.stop()
|
||||
self.audioEngine.inputNode.removeTap(onBus: 0)
|
||||
}
|
||||
|
||||
try? AVAudioSession.sharedInstance().setActive(false, options: .notifyOthersOnDeactivation)
|
||||
return true
|
||||
}
|
||||
|
||||
func resumeAfterExternalAudioCapture(wasSuspended: Bool) {
|
||||
guard wasSuspended else { return }
|
||||
Task { await self.start() }
|
||||
}
|
||||
|
||||
private func startRecognition() throws {
|
||||
self.recognitionTask?.cancel()
|
||||
self.recognitionTask = nil
|
||||
self.tapDrainTask?.cancel()
|
||||
self.tapDrainTask = nil
|
||||
self.tapQueue?.clear()
|
||||
self.tapQueue = nil
|
||||
|
||||
let request = SFSpeechAudioBufferRecognitionRequest()
|
||||
request.shouldReportPartialResults = true
|
||||
self.recognitionRequest = request
|
||||
|
||||
let inputNode = self.audioEngine.inputNode
|
||||
inputNode.removeTap(onBus: 0)
|
||||
|
||||
let recordingFormat = inputNode.outputFormat(forBus: 0)
|
||||
|
||||
let queue = AudioBufferQueue()
|
||||
self.tapQueue = queue
|
||||
let tapBlock: @Sendable (AVAudioPCMBuffer, AVAudioTime) -> Void = makeAudioTapEnqueueCallback(queue: queue)
|
||||
inputNode.installTap(
|
||||
onBus: 0,
|
||||
bufferSize: 1024,
|
||||
format: recordingFormat,
|
||||
block: tapBlock)
|
||||
|
||||
self.audioEngine.prepare()
|
||||
try self.audioEngine.start()
|
||||
|
||||
let handler = self.makeRecognitionResultHandler()
|
||||
self.recognitionTask = self.speechRecognizer?.recognitionTask(with: request, resultHandler: handler)
|
||||
|
||||
self.tapDrainTask = Task { [weak self] in
|
||||
guard let self, let queue = self.tapQueue else { return }
|
||||
while !Task.isCancelled {
|
||||
try? await Task.sleep(nanoseconds: 40_000_000)
|
||||
let drained = queue.drain()
|
||||
if drained.isEmpty { continue }
|
||||
for buf in drained {
|
||||
request.append(buf)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private nonisolated func makeRecognitionResultHandler() -> @Sendable (SFSpeechRecognitionResult?, Error?) -> Void {
|
||||
{ [weak self] result, error in
|
||||
let transcript = result?.bestTranscription.formattedString
|
||||
let segments = result.flatMap { result in
|
||||
transcript.map { WakeWordSpeechSegments.from(transcription: result.bestTranscription, transcript: $0) }
|
||||
} ?? []
|
||||
let errorText = error?.localizedDescription
|
||||
|
||||
Task { @MainActor in
|
||||
self?.handleRecognitionCallback(transcript: transcript, segments: segments, errorText: errorText)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private func handleRecognitionCallback(transcript: String?, segments: [WakeWordSegment], errorText: String?) {
|
||||
if let errorText {
|
||||
self.statusText = "Recognizer error: \(errorText)"
|
||||
self.isListening = false
|
||||
|
||||
let shouldRestart = self.isEnabled
|
||||
if shouldRestart {
|
||||
Task {
|
||||
try? await Task.sleep(nanoseconds: 700_000_000)
|
||||
await self.start()
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
guard let transcript else { return }
|
||||
guard let cmd = self.extractCommand(from: transcript, segments: segments) else { return }
|
||||
|
||||
if cmd == self.lastDispatched { return }
|
||||
self.lastDispatched = cmd
|
||||
self.lastTriggeredCommand = cmd
|
||||
self.statusText = "Triggered"
|
||||
|
||||
Task { [weak self] in
|
||||
guard let self else { return }
|
||||
await self.onCommand?(cmd)
|
||||
await self.startIfEnabled()
|
||||
}
|
||||
}
|
||||
|
||||
private func startIfEnabled() async {
|
||||
let shouldRestart = self.isEnabled
|
||||
if shouldRestart {
|
||||
await self.start()
|
||||
}
|
||||
}
|
||||
|
||||
private func extractCommand(from transcript: String, segments: [WakeWordSegment]) -> String? {
|
||||
Self.extractCommand(from: transcript, segments: segments, triggers: self.activeTriggerWords)
|
||||
}
|
||||
|
||||
nonisolated static func extractCommand(
|
||||
from transcript: String,
|
||||
segments: [WakeWordSegment],
|
||||
triggers: [String],
|
||||
minPostTriggerGap: TimeInterval = 0.45) -> String?
|
||||
{
|
||||
let config = WakeWordGateConfig(triggers: triggers, minPostTriggerGap: minPostTriggerGap)
|
||||
return WakeWordGate.match(transcript: transcript, segments: segments, config: config)?.command
|
||||
}
|
||||
|
||||
private static func configureAudioSession() throws {
|
||||
let session = AVAudioSession.sharedInstance()
|
||||
try session.setCategory(.playAndRecord, mode: .measurement, options: [
|
||||
.duckOthers,
|
||||
.mixWithOthers,
|
||||
.allowBluetoothHFP,
|
||||
.defaultToSpeaker,
|
||||
])
|
||||
try session.setActive(true, options: [])
|
||||
}
|
||||
|
||||
private nonisolated static func requestMicrophonePermission() async -> Bool {
|
||||
let session = AVAudioSession.sharedInstance()
|
||||
switch session.recordPermission {
|
||||
case .granted:
|
||||
return true
|
||||
case .denied:
|
||||
return false
|
||||
case .undetermined:
|
||||
break
|
||||
@unknown default:
|
||||
return false
|
||||
}
|
||||
|
||||
return await self.requestPermissionWithTimeout { completion in
|
||||
AVAudioSession.sharedInstance().requestRecordPermission { ok in
|
||||
completion(ok)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private nonisolated static func requestSpeechPermission() async -> Bool {
|
||||
let status = SFSpeechRecognizer.authorizationStatus()
|
||||
switch status {
|
||||
case .authorized:
|
||||
return true
|
||||
case .denied, .restricted:
|
||||
return false
|
||||
case .notDetermined:
|
||||
break
|
||||
@unknown default:
|
||||
return false
|
||||
}
|
||||
|
||||
return await self.requestPermissionWithTimeout { completion in
|
||||
SFSpeechRecognizer.requestAuthorization { authStatus in
|
||||
completion(authStatus == .authorized)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private nonisolated static func requestPermissionWithTimeout(
|
||||
_ operation: @escaping @Sendable (@escaping (Bool) -> Void) -> Void) async -> Bool
|
||||
{
|
||||
do {
|
||||
return try await AsyncTimeout.withTimeout(
|
||||
seconds: 8,
|
||||
onTimeout: { NSError(domain: "VoiceWake", code: 6, userInfo: [
|
||||
NSLocalizedDescriptionKey: "permission request timed out",
|
||||
]) },
|
||||
operation: {
|
||||
await withCheckedContinuation(isolation: nil) { cont in
|
||||
Task { @MainActor in
|
||||
operation { ok in
|
||||
cont.resume(returning: ok)
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
} catch {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
private static func permissionMessage(
|
||||
kind: String,
|
||||
status: AVAudioSession.RecordPermission) -> String
|
||||
{
|
||||
switch status {
|
||||
case .denied:
|
||||
return "\(kind) permission denied"
|
||||
case .undetermined:
|
||||
return "\(kind) permission not granted"
|
||||
case .granted:
|
||||
return "\(kind) permission denied"
|
||||
@unknown default:
|
||||
return "\(kind) permission denied"
|
||||
}
|
||||
}
|
||||
|
||||
private static func permissionMessage(
|
||||
kind: String,
|
||||
status: SFSpeechRecognizerAuthorizationStatus) -> String
|
||||
{
|
||||
switch status {
|
||||
case .denied:
|
||||
return "\(kind) permission denied"
|
||||
case .restricted:
|
||||
return "\(kind) permission restricted"
|
||||
case .notDetermined:
|
||||
return "\(kind) permission not granted"
|
||||
case .authorized:
|
||||
return "\(kind) permission denied"
|
||||
@unknown default:
|
||||
return "\(kind) permission denied"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#if DEBUG
|
||||
extension VoiceWakeManager {
|
||||
func _test_handleRecognitionCallback(transcript: String?, segments: [WakeWordSegment], errorText: String?) {
|
||||
self.handleRecognitionCallback(transcript: transcript, segments: segments, errorText: errorText)
|
||||
}
|
||||
}
|
||||
#endif
|
||||
44
openclaw/apps/ios/Sources/Voice/VoiceWakePreferences.swift
Normal file
44
openclaw/apps/ios/Sources/Voice/VoiceWakePreferences.swift
Normal file
@@ -0,0 +1,44 @@
|
||||
import Foundation
|
||||
|
||||
enum VoiceWakePreferences {
|
||||
static let enabledKey = "voiceWake.enabled"
|
||||
static let triggerWordsKey = "voiceWake.triggerWords"
|
||||
|
||||
// Keep defaults aligned with the mac app.
|
||||
static let defaultTriggerWords: [String] = ["openclaw", "claude"]
|
||||
static let maxWords = 32
|
||||
static let maxWordLength = 64
|
||||
|
||||
static func decodeGatewayTriggers(from payloadJSON: String) -> [String]? {
|
||||
guard let data = payloadJSON.data(using: .utf8) else { return nil }
|
||||
return self.decodeGatewayTriggers(from: data)
|
||||
}
|
||||
|
||||
static func decodeGatewayTriggers(from data: Data) -> [String]? {
|
||||
struct Payload: Decodable { var triggers: [String] }
|
||||
guard let decoded = try? JSONDecoder().decode(Payload.self, from: data) else { return nil }
|
||||
return self.sanitizeTriggerWords(decoded.triggers)
|
||||
}
|
||||
|
||||
static func loadTriggerWords(defaults: UserDefaults = .standard) -> [String] {
|
||||
defaults.stringArray(forKey: self.triggerWordsKey) ?? self.defaultTriggerWords
|
||||
}
|
||||
|
||||
static func saveTriggerWords(_ words: [String], defaults: UserDefaults = .standard) {
|
||||
defaults.set(words, forKey: self.triggerWordsKey)
|
||||
}
|
||||
|
||||
static func sanitizeTriggerWords(_ words: [String]) -> [String] {
|
||||
let cleaned = words
|
||||
.map { $0.trimmingCharacters(in: .whitespacesAndNewlines) }
|
||||
.filter { !$0.isEmpty }
|
||||
.prefix(Self.maxWords)
|
||||
.map { String($0.prefix(Self.maxWordLength)) }
|
||||
return cleaned.isEmpty ? Self.defaultTriggerWords : cleaned
|
||||
}
|
||||
|
||||
static func displayString(for words: [String]) -> String {
|
||||
let sanitized = self.sanitizeTriggerWords(words)
|
||||
return sanitized.joined(separator: ", ")
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user