Hi,
I am creating an app that can include videos or images in it's data. While
@Attribute(.externalStorage)
helps with images, with AVAssets I actually would like access to the URL behind that data. (as it would be stupid to load and then save the data again just to have a URL)
One key component is to keep all of this clean enough so that I can use (private) CloudKit syncing with the resulting model.
All the best
Christoph
Audio
RSS for tagDive into the technical aspects of audio on your device, including codecs, format support, and customization options.
Selecting any option will automatically load the page
Post
Replies
Boosts
Views
Activity
Good day, ladies and gents.
I have an application that reads audio from the microphone. I'd like it to also be able to read from the Mac's audio output stream. (A bonus would be if it could detect when the Mac is playing music.)
I'd eventually be able to figure it out reading docs, but if someone can give a hint, I'd be very grateful, and would owe you the libation of your choice.
Here's the code used to set up the AudioUnit:
-(NSString*) configureAU
{
AudioComponent component = NULL;
AudioComponentDescription description;
OSStatus err = noErr;
UInt32 param;
AURenderCallbackStruct callback;
if( audioUnit ) { AudioComponentInstanceDispose( audioUnit ); audioUnit = NULL; } // was CloseComponent
// Open the AudioOutputUnit
description.componentType = kAudioUnitType_Output;
description.componentSubType = kAudioUnitSubType_HALOutput;
description.componentManufacturer = kAudioUnitManufacturer_Apple;
description.componentFlags = 0;
description.componentFlagsMask = 0;
if( component = AudioComponentFindNext( NULL, &description ) )
{
err = AudioComponentInstanceNew( component, &audioUnit );
if( err != noErr ) { audioUnit = NULL; return [ NSString stringWithFormat: @"Couldn't open AudioUnit component (ID=%d)", err] ; }
}
// Configure the AudioOutputUnit:
// You must enable the Audio Unit (AUHAL) for input and output for the same device.
// When using AudioUnitSetProperty the 4th parameter in the method refers to an AudioUnitElement.
// When using an AudioOutputUnit for input the element will be '1' and the output element will be '0'.
param = 1; // Enable input on the AUHAL
err = AudioUnitSetProperty( audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, 1, ¶m, sizeof(UInt32) ); chkerr("Couldn't set first EnableIO prop (enable inpjt) (ID=%d)");
param = 0; // Disable output on the AUHAL
err = AudioUnitSetProperty( audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, 0, ¶m, sizeof(UInt32) ); chkerr("Couldn't set second EnableIO property on the audio unit (disable ootpjt) (ID=%d)");
param = sizeof(AudioDeviceID); // Select the default input device
AudioObjectPropertyAddress OutputAddr = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
err = AudioObjectGetPropertyData( kAudioObjectSystemObject, &OutputAddr, 0, NULL, ¶m, &inputDeviceID );
chkerr("Couldn't get default input device (ID=%d)");
// Set the current device to the default input unit
err = AudioUnitSetProperty( audioUnit, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, 0, &inputDeviceID, sizeof(AudioDeviceID) );
chkerr("Failed to hook up input device to our AudioUnit (ID=%d)");
callback.inputProc = AudioInputProc; // Setup render callback, to be called when the AUHAL has input data
callback.inputProcRefCon = self;
err = AudioUnitSetProperty( audioUnit, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, 0, &callback, sizeof(AURenderCallbackStruct) );
chkerr("Could not install render callback on our AudioUnit (ID=%d)");
param = sizeof(AudioStreamBasicDescription); // get hardware device format
err = AudioUnitGetProperty( audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 1, &deviceFormat, ¶m );
chkerr("Could not install render callback on our AudioUnit (ID=%d)");
audioChannels = MAX( deviceFormat.mChannelsPerFrame, 2 ); // Twiddle the format to our liking
actualOutputFormat.mChannelsPerFrame = audioChannels;
actualOutputFormat.mSampleRate = deviceFormat.mSampleRate;
actualOutputFormat.mFormatID = kAudioFormatLinearPCM;
actualOutputFormat.mFormatFlags = kAudioFormatFlagIsFloat | kAudioFormatFlagIsPacked | kAudioFormatFlagIsNonInterleaved;
if( actualOutputFormat.mFormatID == kAudioFormatLinearPCM && audioChannels == 1 )
actualOutputFormat.mFormatFlags &= ~kLinearPCMFormatFlagIsNonInterleaved;
#if __BIG_ENDIAN__
actualOutputFormat.mFormatFlags |= kAudioFormatFlagIsBigEndian;
#endif
actualOutputFormat.mBitsPerChannel = sizeof(Float32) * 8;
actualOutputFormat.mBytesPerFrame = actualOutputFormat.mBitsPerChannel / 8;
actualOutputFormat.mFramesPerPacket = 1;
actualOutputFormat.mBytesPerPacket = actualOutputFormat.mBytesPerFrame;
// Set the AudioOutputUnit output data format
err = AudioUnitSetProperty( audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, &actualOutputFormat, sizeof(AudioStreamBasicDescription));
chkerr("Could not change the stream format of the output device (ID=%d)");
param = sizeof(UInt32); // Get the number of frames in the IO buffer(s)
err = AudioUnitGetProperty( audioUnit, kAudioDevicePropertyBufferFrameSize, kAudioUnitScope_Global, 0, &audioSamples, ¶m );
chkerr("Could not determine audio sample size (ID=%d)");
err = AudioUnitInitialize( audioUnit ); // Initialize the AU
chkerr("Could not initialize the AudioUnit (ID=%d)");
// Allocate our audio buffers
audioBuffer = [self allocateAudioBufferListWithNumChannels: actualOutputFormat.mChannelsPerFrame size: audioSamples * actualOutputFormat.mBytesPerFrame];
if( audioBuffer == NULL ) { [ self cleanUp ]; return [NSString stringWithFormat: @"Could not allocate buffers for recording (ID=%d)", err]; }
return nil;
}
(...again, it would be nice to know if audio output is active and thereby choose the clean output stream over the noisy mic, but that would be a different chunk of code, and my main question may just be a quick edit to this chunk.)
Thanks for your attention! ==Dave
[p.s. if i get more than one useful answer, can i "Accept" more than one, to spread the credit around?]
{pps: of course, the code lines up prettier in a monospaced font!}
I might have misunderstood the docs, but is Call Translation going to be available for VOIP applications? Eg in an already connected VOIP call, would it be possible for Call Translations to be enabled on an iOS 26 and Apple Intelligence supported device?
I have personally tried it and it doesn’t look like it supported VOIP but would love to confirm this.
reference: https://developer.apple.com/documentation/callkit/cxsettranslatingcallaction/
Topic:
Media Technologies
SubTopic:
Audio
Hello! I'm use AVFoundation for preview video and audio from selected device, and I try use AVAudioEngine for preview audio in real-time, but I can't or I don't understand how select input device? I can hear only my microphone in real-time
So far, I'm using AVCaptureAudioPreviewOutput for in real-time hear audio, but I think has delay.
On iOS works easy with AVAudioEngine, but on macOS bruh...
Topic:
Media Technologies
SubTopic:
Audio
Tags:
AudioToolbox
AVAudioSession
AVAudioEngine
AVFoundation
I’m working on a macOS app, written in Swift. My goal is to record audio from an external microphone, e.g., one connected via USB.
For this, I’m using an AVCaptureSession and recording its output with an AVAssetWriter. This works perfectly in principle (and reliably with internal microphones, for example).
The problem occurs after my app has successfully completed the first recording and I then want to make additional recordings (which makes me think it might be process-dependent, because it works again after restarting the app).
The problem: Noisy or distorted-sounding audio files. In addition, the following error message appears in the Console from CoreAudio / its AudioConverter:
Input data proc returned inconsistent 512 packets for 2048 bytes; at 3 bytes per packet, that is actually 682 packets
It is easy to reproduce. This problem is reproducible even if I don’t configure the AVAssetWriter manually and instead let it receive its audioSettings using a preset from an AVOutputSettingsAssistant. I’m running on macOS 15.0 (24A335).
I’ve filed a feedback including a demo project → FB15333298 🎟️
I would greatly appreciate any help!
Have a great day,
Martin
Bug Report: ScreenCaptureKit System Audio Capture Crashes with EXC_BAD_ACCESS
Summary
When using ScreenCaptureKit to capture system audio for extended periods, the application crashes with EXC_BAD_ACCESS in Swift's error handling runtime. The crash occurs in swift_getErrorValue when trying to process an error from the SCStream delegate method didStopWithError. This appears to be a framework-level issue in ScreenCaptureKit or its underlying ReplayKit implementation.
Environment
macOS Sonoma 14.6.1
Swift 5.8
ScreenCaptureKit framework
Detailed Description
Our application captures system audio using ScreenCaptureKit's audio capture capabilities. After successfully capturing for several minutes (typically after 3-4 segments of 60-second recordings), the application crashes with an EXC_BAD_ACCESS error. The crash happens when the Swift runtime attempts to process an error in the SCStreamDelegate.stream(_:didStopWithError:) method.
The crash consistently occurs in swift_getErrorValue when attempting to access the class of what appears to be a null object. This suggests that the error being passed from the system framework to our delegate method is malformed or contains invalid memory.
Steps to Reproduce
Create an SCStream with audio capture enabled
Add audio output to the stream
Start capture and write audio data to disk
Allow the capture to run for several minutes (3-5 minutes typically triggers the issue)
The app will crash with EXC_BAD_ACCESS in swift_getErrorValue
Code Sample
func stream(_ stream: SCStream, didStopWithError error: Error) {
print("Stream stopped with error: \(error)") // Crash occurs before this line executes
}
func stream(_ stream: SCStream, didOutputSampleBuffer sampleBuffer: CMSampleBuffer, of type: SCStreamOutputType) {
guard type == .audio, sampleBuffer.isValid else { return }
// Process audio data...
}
Expected Behavior
The error should be properly propagated to the delegate method, allowing for graceful error handling and recovery.
Actual Behavior
The application crashes with EXC_BAD_ACCESS when the Swift runtime attempts to process the error in swift_getErrorValue.
Crash Log Details
Thread #35, queue = 'com.apple.NSXPCConnection.m-user.com.apple.replayd', stop reason = EXC_BAD_ACCESS (code=1, address=0x0)
frame #0: 0x0000000194c3088c libswiftCore.dylib`swift::_swift_getClass(void const*) + 8
frame #1: 0x0000000194c30104 libswiftCore.dylib`swift_getErrorValue + 40
frame #2: 0x00000001057fba30 shadow`NewScreenCaptureService.stream(stream=0x0000600002de6700, error=Swift.Error @ 0x000000016b7b5e30) at NEW+ScreenCaptureService.swift:365:15
frame #3: 0x00000001057fc050 shadow`@objc NewScreenCaptureService.stream(_:didStopWithError:) at <compiler-generated>:0
frame #4: 0x0000000219ec5ca0 ScreenCaptureKit`-[SCStreamManager stream:didStopWithError:] + 456
frame #5: 0x00000001ca68a5cc ReplayKit`-[RPScreenRecorder stream:didStopWithError:] + 84
frame #6: 0x00000001ca696ff8 ReplayKit`-[RPDaemonProxy stream:didStopWithError:] + 224
Printing description of stream._streamQueue:
error: ObjectiveC.id:4294967281:18: note: 'id' has been explicitly marked unavailable here
public typealias id = AnyObject
^
error: /var/folders/v4/3xg1hmp93gjd8_xlzmryf_wm0000gn/T/expr23-dfa421..cpp:1:65: 'id' is unavailable in Swift: 'id' is not available in Swift; use 'Any'
Swift._DebuggerSupport.stringForPrintObject(Swift.UnsafePointer<id>(bitPattern: 0x104ae08c0)!.pointee)
^~
ObjectiveC.id:2:18: note: 'id' has been explicitly marked unavailable here
public typealias id = AnyObject
^
warning: /var/folders/v4/3xg1hmp93gjd8_xlzmryf_wm0000gn/T/expr23-dfa421..cpp:5:7: initialization of variable '$__lldb_error_result' was never used; consider replacing with assignment to '_' or removing it
var $__lldb_error_result = __lldb_tmp_error
~~~~^~~~~~~~~~~~~~~~~~~~
_
Before the crash, we observed this error message in the console:
[ERROR] *****SCStream*****RemoteAudioQueueOperationHandlerWithError:1015 Error received from the remote queue -16665
Additional Context
The issue occurs consistently after approximately 3-4 successful audio segment recordings of 60 seconds each
Commenting out custom segment rotation logic does not prevent the crash
The crash involves XPC communication with Apple's ReplayKit daemon
The error appears to be corrupted or malformed when crossing the XPC boundary
Workarounds Attempted
Added proper thread safety for all published properties using DispatchQueue.main.async
Implemented more robust error handling in the delegate methods
None of these approaches prevented the crash since it occurs at the Swift runtime level before our code executes.
Impact
This issue prevents reliable long-duration audio capture using ScreenCaptureKit.
This bug significantly limits the usefulness of ScreenCaptureKit for any application requiring continuous system audio capture for more than a few minutes.
Perhaps this issue might be related to a macOS bug where the system dialog indicates that the screen is being shared, even though nothing is actually being shared. Moreover, when attempting to stop sharing, nothing happens.
Here is the demo from Apple's site
This issues is specific to iOS 18.
When running this demo, we are getting new text when we have a gap in speaking, the recognitionTask(with:resultHandler:) provides new text which is only spoken after the gap and not the concatenation of old text and the new spoken text.
I work on an iOS app that records video and audio. We've been getting reports for a while from users who are experiencing their video recordings being cut off. After investigating, I found that many users are receiving the AVAudioSessionMediaServicesWereResetNotification (.mediaServicesWereResetNotification) notification while recording. It's associated with the AVFoundationErrorDomain[-11819] error, which seems to indicate that the system audio daemon crashed. We have a handler registered to end the recording, show the user a prompt, and restart our AV sessions. However, from our logs this looks to be happening to hundreds of users every day and it's not an ideal user experience, so I would like to figure out why this is happening and if it's due to something that we're doing wrong.
The debug menu option to trigger the audio session reset is not of much use, because it can't be triggered unless you leave the app and go to system settings. So our app can't be recording video when the debug reset is triggered. So far I haven't found a way to reproduced the issue locally, but I can see that it's happening to users from logs.
I've found some posts online from developers experiencing similar issues, but none of them seem to directly address our issue. The system error doesn't include a userInfo dictionary, and as far as I can tell it's a system daemon crash so any logs would need to be captured from the OS.
Is there any way that I could get more information about what may be causing this error that I may have missed?
Topic:
Media Technologies
SubTopic:
Audio
iPhoneやiPadにおいて、画面上のボタンなどをタップした際に、特定の楽器音を発音させる方法をご存知の方いらっしゃいませんか?
現在音楽学習アプリを作成途中で、画面上の鍵盤や指板のボタン状のframeに、単音又は和音を割当て発音させる事を考えております
SwiftUIのcodeのみで実現できないでしょうか
嘗て、MIDIのlevel1の楽器の発音機能があった様に記憶していますが、現在のOS上では同様の機能を実装してないように思えます
皆様のお知恵をお貸しください
Since many users like me use Apple Music on Android, the app is almost as feature-rich as iOS. It would be fantastic if the developers could add the new iOS 26 features to the Android app, along with a minor UI change. I know it’s challenging to implement liquid glass on Android hardware or design, but features like auto-mix, pronunciation, and translation could be added.
kindly consider this request !!!!
Sequoia 15.4.1 (24E263)
XCode: 16.3 (16E140)
Logic Pro: 11.2.1
I’ve been developing a complex audio unit for Mac OS that works perfectly well in its own bespoke host app and is now well into its beta testing stage.
It did take some effort to get it to work well in Logic Pro however and all was fine and working well until:
The AU part is an empty app extension with a framework containing its code.
The framework contains Swift code for the UI and C code for the DSP parts.
When the framework is compiled using the Swift 5 compiler the AU will run in Logic with no problems.
(I should also mention that AU passes the most strict auval tests).
But… when the framework is compiled with Swift 6 Logic Pro cannot load it.
Logic displays a message saying the audio unit could not be loaded and to contact the developer.
My own host app loads the AU perfectly well with the Swift 6 version, so I know there’s nothing wrong with the audio unit.
I cannot find any differences in any of the built output files except, of course, the actual binary code in the framework.
I’ve worked for hours on this and cannot find a solution other than to build the framework in Swift 5.
(I worked hard to get all the async code updated and working with Swift 6! so I feel a little cheated!)
What is happening?
Is this a bug in Logic?
Is this a bug in Swift 6 compiler/linker?
I’m at the Duh! hands in the air, tearing out hair stage! ( once again!)
Hi all,
I'm working on an audio visualizer app that plays files from the user's music library utilizing MediaPlayer and AVAudioEngine. I'm working on getting the music library functionality working before the visualizer aspect.
After setting up the engine for file playback, my app inexplicably crashes with an EXC_BREAKPOINT with code = 1. Usually this means I'm unwrapping a nil value, but I think I'm handling the optionals correctly with guard statements. I'm not able to pinpoint where it's crashing. I think it's either in the play function or the setupAudioEngine function. I removed the processAudioBuffer function and my code still crashes the same way, so it's not that. The device that I'm testing this on is running iOS 26 beta 3, although my app is designed for iOS 18 and above.
After commenting out code, it seems that the app crashes at the scheduleFile call in the play function, but I'm not fully sure.
Here is the setupAudioEngine function:
private func setupAudioEngine() {
do {
try AVAudioSession.sharedInstance().setCategory(.playback, mode: .default)
try AVAudioSession.sharedInstance().setActive(true)
} catch {
print("Audio session error: \(error)")
}
engine.attach(playerNode)
engine.attach(analyzer)
engine.connect(playerNode, to: analyzer, format: nil)
engine.connect(analyzer, to: engine.mainMixerNode, format: nil)
analyzer.installTap(onBus: 0, bufferSize: 1024, format: nil) { [weak self] buffer, _ in
self?.processAudioBuffer(buffer)
}
}
Here is the play function:
func play(_ mediaItem: MPMediaItem) {
guard let assetURL = mediaItem.assetURL else {
print("No asset URL for media item")
return
}
stop()
do {
audioFile = try AVAudioFile(forReading: assetURL)
guard let audioFile else {
print("Failed to create audio file")
return
}
duration = Double(audioFile.length) / audioFile.fileFormat.sampleRate
if !engine.isRunning {
try engine.start()
}
playerNode.scheduleFile(audioFile, at: nil)
playerNode.play()
DispatchQueue.main.async { [weak self] in
self?.isPlaying = true
self?.startDisplayLink()
}
} catch {
print("Error playing audio: \(error)")
DispatchQueue.main.async { [weak self] in
self?.isPlaying = false
self?.stopDisplayLink()
}
}
}
Here is a link to my test project if you want to try it out for yourself:
https://github.com/aabagdi/VisualMan-example
Thanks!
I've got a problem with my app where I'm testing it on my own phone.
I'm using audio kit to generate tones as part of the app. Everything seems to work fine. Sounds start, Stop, etc. They play when the app is closed and when the phone is locked, so background is working.
However, I'm seeing an issue where, even when STOP is pressed and the application exited, if I get a notification such as a text message, the base tone for the app starts to play.
If I then open the app, check the Start/Stop button - it says start so that. hasnt' been activated. If I click Start, then a 2nd tone starts. This one stops with the Stop button. However the original tone that was set off by an incoming message carries on playing.
Until I go to the Open Apps View on the phone and slide the application upwards.
For the life of me, I can't figure out whats happening here.
I have a PCM audio buffer (AVAudioPCMFormatInt16). When I try to play it using AVPlayerNode / AVAudioEngine an exception is thrown:
"[[busArray objectAtIndexedSubscript:(NSUInteger)element] setFormat:format error:&nsErr]: returned false, error Error Domain=NSOSStatusErrorDomain Code=-10868
(related thread https://forums.developer.apple.com/forums/thread/700497?answerId=780530022#780530022)
If I convert the buffer to AVAudioPCMFormatFloat32 playback works.
My questions are:
Does AVAudioEngine / AVPlayerNode require AVAudioPCMBuffer to be in the Float32 format? Is there a way I can configure it to accept another format instead for my application?
If 1 is YES is this documented anywhere?
If 1 is YES is this required format subject to change at any point?
Thanks!
I was looking to watch the "AVAudioEngine in Practice" session video from WWDC 2014 but I can't find it anywhere (https://forums.developer.apple.com/forums/thread/747008).
So experimenting with the new SpeechTranscriber, if I do:
let transcriber = SpeechTranscriber(
locale: locale,
transcriptionOptions: [],
reportingOptions: [.volatileResults],
attributeOptions: [.audioTimeRange]
)
only the final result has audio time ranges, not the volatile results.
Is this a performance consideration? If there is no performance problem, it would be nice to have the option to also get speech time ranges for volatile responses.
I'm not presenting the volatile text at all in the UI, I was just trying to keep statistics about the non-speech and the speech noise level, this way I can determine when the noise level falls under the noisefloor for a while.
The goal here was to finalize the recording automatically, when the noise level indicate that the user has finished speaking.
I have a simple AVAudioEngine graph as follows:
AVAudioPlayerNode -> AVAudioUnitEQ -> AVAudioUnitTimePitch -> AVAudioUnitReverb -> Main mixer node of AVAudioEngine.
I noticed that whenever I have AVAudioUnitTimePitch or AVAudioUnitVarispeed in the graph, I noticed a very distinct crackling/popping sound in my Airpods Pro 2 when starting up the engine and playing the AVAudioPlayerNode and unable to find the reason why this is happening. When I remove the node, the crackling completely goes away. How do I fix this problem since i need the user to be able to control the pitch and rate of the audio during playback.
import AVKit
@Observable @MainActor
class AudioEngineManager {
nonisolated private let engine = AVAudioEngine()
private let playerNode = AVAudioPlayerNode()
private let reverb = AVAudioUnitReverb()
private let pitch = AVAudioUnitTimePitch()
private let eq = AVAudioUnitEQ(numberOfBands: 10)
private var audioFile: AVAudioFile?
private var fadePlayPauseTask: Task<Void, Error>?
private var playPauseCurrentFadeTime: Double = 0
init() {
setupAudioEngine()
}
private func setupAudioEngine() {
guard let url = Bundle.main.url(forResource: "Song name goes here", withExtension: "mp3") else {
print("Audio file not found")
return
}
do {
audioFile = try AVAudioFile(forReading: url)
} catch {
print("Failed to load audio file: \(error)")
return
}
reverb.loadFactoryPreset(.mediumHall)
reverb.wetDryMix = 50
pitch.pitch = 0 // Increase pitch by 500 cents (5 semitones)
engine.attach(playerNode)
engine.attach(pitch)
engine.attach(reverb)
engine.attach(eq)
// Connect: player -> pitch -> reverb -> output
engine.connect(playerNode, to: eq, format: audioFile?.processingFormat)
engine.connect(eq, to: pitch, format: audioFile?.processingFormat)
engine.connect(pitch, to: reverb, format: audioFile?.processingFormat)
engine.connect(reverb, to: engine.mainMixerNode, format: audioFile?.processingFormat)
}
func prepare() {
guard let audioFile else { return }
playerNode.scheduleFile(audioFile, at: nil)
}
func play() {
DispatchQueue.global().async { [weak self] in
guard let self else { return }
engine.prepare()
try? engine.start()
DispatchQueue.main.async { [weak self] in
guard let self else { return }
playerNode.play()
fadePlayPauseTask?.cancel()
playPauseCurrentFadeTime = 0
fadePlayPauseTask = Task { [weak self] in
guard let self else { return }
while true {
let volume = updateVolume(for: playPauseCurrentFadeTime / 0.1, rising: true)
// Ramp up volume until 1 is reached
if volume >= 1 { break }
engine.mainMixerNode.outputVolume = volume
try await Task.sleep(for: .milliseconds(10))
playPauseCurrentFadeTime += 0.01
}
engine.mainMixerNode.outputVolume = 1
}
}
}
}
func pause() {
fadePlayPauseTask?.cancel()
playPauseCurrentFadeTime = 0
fadePlayPauseTask = Task { [weak self] in
guard let self else { return }
while true {
let volume = updateVolume(for: playPauseCurrentFadeTime / 0.1, rising: false)
// Ramp down volume until 0 is reached
if volume <= 0 { break }
engine.mainMixerNode.outputVolume = volume
try await Task.sleep(for: .milliseconds(10))
playPauseCurrentFadeTime += 0.01
}
engine.mainMixerNode.outputVolume = 0
playerNode.pause()
// Shut down engine once ramp down completes
DispatchQueue.global().async { [weak self] in
guard let self else { return }
engine.pause()
}
}
}
private func updateVolume(for x: Double, rising: Bool) -> Float {
if rising {
// Fade in
return Float(pow(x, 2) * (3.0 - 2.0 * (x)))
} else {
// Fade out
return Float(1 - (pow(x, 2) * (3.0 - 2.0 * (x))))
}
}
func setPitch(_ value: Float) {
pitch.pitch = value
}
func setReverbMix(_ value: Float) {
reverb.wetDryMix = value
}
}
struct ContentView: View {
@State private var audioManager = AudioEngineManager()
@State private var pitch: Float = 0
@State private var reverb: Float = 0
var body: some View {
VStack(spacing: 20) {
Text("🎵 Audio Player with Reverb & Pitch")
.font(.title2)
HStack {
Button("Prepare") {
audioManager.prepare()
}
Button("Play") {
audioManager.play()
}
.padding()
.background(Color.green)
.foregroundColor(.white)
.cornerRadius(10)
Button("Pause") {
audioManager.pause()
}
.padding()
.background(Color.red)
.foregroundColor(.white)
.cornerRadius(10)
}
VStack {
Text("Pitch: \(Int(pitch)) cents")
Slider(value: $pitch, in: -2400...2400, step: 100) { _ in
audioManager.setPitch(pitch)
}
}
VStack {
Text("Reverb Mix: \(Int(reverb))%")
Slider(value: $reverb, in: 0...100, step: 1) { _ in
audioManager.setReverbMix(reverb)
}
}
}
.padding()
}
}
Hello Apple Developer Community,
I am seeking clarification on the intended display behavior of HLS audio tracks within the iOS 26 (or current beta) native player, specifically concerning the NAME and LANGUAGE attributes of the EXT-X-MEDIA tag.
In our HLS manifests, we define alternative audio tracks using EXT-X-MEDIA tags, like so:
#EXT-X-MEDIA:TYPE=AUDIO,GROUP-ID="audio",LANGUAGE="ja",NAME="AUDIO-1",DEFAULT=YES,AUTOSELECT=YES,URI="audio_ja.m3u8"
#EXT-X-MEDIA:TYPE=AUDIO,GROUP-ID="audio",LANGUAGE="ja",NAME="AUDIO-2",URI="audio_en.m3u8"
Our observation is that when an audio track is selected and its name is displayed in the native iOS media controls (e.g., Control Center or within a full-screen video player's UI), the value specified in the NAME attribute ("AUDIO-1", "AUDIO-2") does not seem to be used. Instead, the display appears to derive from the LANGUAGE attribute ("ja", "en"), often showing the system's localized string for that language (e.g., "Japanese", "English").
We would like to understand the official or intended behavior regarding this.
Is it the expected behavior for the iOS native player to prioritize the LANGUAGE attribute (or its localized equivalent) over the NAME attribute for displaying the selected audio track's label?
If this is the intended design, what is the recommended best practice for developers who wish to present a custom, human-readable name for audio tracks (beyond the standard language name) in the native iOS UI?
Are there any specific AVPlayer properties or AVMediaSelectionOption considerations that would allow more granular control over this display, or is this entirely managed by the system based on the LANGUAGE attribute?
Any insights or official guidance on this behavior in iOS 26 (and potentially previous versions) would be greatly appreciated.
Thank you for your time and assistance.
So,
I've been wondering how fast a an offline STT -> ML Prompt -> TTS roundtrip would be.
Interestingly, for many tests, the SpeechTranscriber (STT) takes the bulk of the time, compared to generating a FoundationModel response and creating the Audio using TTS.
E.g.
InteractionStatistics:
- listeningStarted: 21:24:23 4480 2423
- timeTillFirstAboveNoiseFloor: 01.794
- timeTillLastNoiseAboveFloor: 02.383
- timeTillFirstSpeechDetected: 02.399
- timeTillTranscriptFinalized: 04.510
- timeTillFirstMLModelResponse: 04.938
- timeTillMLModelResponse: 05.379
- timeTillTTSStarted: 04.962
- timeTillTTSFinished: 11.016
- speechLength: 06.054
- timeToResponse: 02.578
- transcript: This is a test.
- mlModelResponse: Sure! I'm ready to help with your test. What do you need help with?
Here, between my audio input ending and the Text-2-Speech starting top play (using AVSpeechUtterance) the total response time was 2.5s.
Of that time, it took the SpeechAnalyzer 2.1s to get the transcript finalized, FoundationModel only took 0.4s to respond (and TTS started playing nearly instantly).
I'm already using reportingOptions: [.volatileResults, .fastResults] so it's probably as fast as possible right now?
I'm just surprised the STT takes so much longer compared to the other parts (all being CoreML based, aren't they?)
I am having issues deploying my iOS app, that uses ShazamKit, to get working on a Mac with Apple silicon.
When uploading the archive to App Store Connect I do get
ITMS-90863: Macs with Apple silicon support issue - The app links with libraries that aren’t present in macOS:
/usr/lib/swift/libswiftShazamKit.dylib
Is ShazamKit not supported for iOS apps that can run on Macs with Apple silicon? Or is there something I should fix in my setup / deployment?
Context:
I am currently developing an app using the Push-to-Talk (PTT) framework. I have reviewed both the PTT framework documentation and the CallKit demo project to better understand how to properly manage audio session activation and AVAudioEngine setup.
I am not activating the audio session manually. The audio session configuration is handled in the incomingPushResult or didBeginTransmitting callbacks from the PTChannelManagerDelegate.
I am using a single AVAudioEngine instance for both input and playback. The engine is started in the didActivate callback from the PTChannelManagerDelegate. When I receive a push in full duplex mode, I set the active participant to the user who is speaking.
Issue
When I attempt to talk while the other participant is already speaking, my input tap on the input node takes a few seconds to return valid PCM audio data. Initially, it returns an empty PCM audio block.
Details:
The audio session is already active and configured with .playAndRecord.
The input tap is already installed when the engine is started.
When I talk from a neutral state (no one is speaking), the system plays the standard "microphone activation" tone, which covers this initial delay. However, this does not happen when I am already receiving audio.
Assumptions / Current Setup
Because the audio session is active in play and record, I assumed that microphone input would be available immediately, even while receiving audio.
However, there seems to be a delay before valid input is delivered to the tap, only occurring when switching from a receive state to simultaneously talking.
Questions
Is this expected behavior when using the PTT framework in full duplex mode with a shared AVAudioEngine?
Should I be restarting or reconfiguring the engine or audio session when beginning to talk while receiving audio?
Is there a recommended pattern for managing microphone readiness in this scenario to avoid the initial empty PCM buffer?
Would using separate engines for input and output improve responsiveness?
I would like to confirm the correct approach to handling simultaneous talk and receive in full duplex mode using PTT framework and AVAudioEngine. Specifically, I need guidance on ensuring the microphone is ready to capture audio immediately without the delay seen in my current implementation.
Relevant Code Snippets
Engine Setup
func setup() {
let input = audioEngine.inputNode
do {
try input.setVoiceProcessingEnabled(true)
} catch {
print("Could not enable voice processing \(error)")
return
}
input.isVoiceProcessingAGCEnabled = false
let output = audioEngine.outputNode
let mainMixer = audioEngine.mainMixerNode
audioEngine.connect(pttPlayerNode, to: mainMixer, format: outputFormat)
audioEngine.connect(beepNode, to: mainMixer, format: outputFormat)
audioEngine.connect(mainMixer, to: output, format: outputFormat)
// Initialize converters
converter = AVAudioConverter(from: inputFormat, to: outputFormat)!
f32ToInt16Converter = AVAudioConverter(from: outputFormat, to: inputFormat)!
audioEngine.prepare()
}
Input Tap Installation
func installTap() {
guard AudioHandler.shared.checkMicrophonePermission() else {
print("Microphone not granted for recording")
return
}
guard !isInputTapped else {
print("[AudioEngine] Input is already tapped!")
return
}
let input = audioEngine.inputNode
let microphoneFormat = input.inputFormat(forBus: 0)
let microphoneDownsampler = AVAudioConverter(from: microphoneFormat, to: outputFormat)!
let desiredFormat = outputFormat
let inputFramesNeeded = AVAudioFrameCount((Double(OpusCodec.DECODED_PACKET_NUM_SAMPLES) * microphoneFormat.sampleRate) / desiredFormat.sampleRate)
input.installTap(onBus: 0, bufferSize: inputFramesNeeded, format: input.inputFormat(forBus: 0)) { [weak self] buffer, when in
guard let self = self else { return }
// Output buffer: 1920 frames at 16kHz
guard let outputBuffer = AVAudioPCMBuffer(pcmFormat: desiredFormat, frameCapacity: AVAudioFrameCount(OpusCodec.DECODED_PACKET_NUM_SAMPLES)) else { return }
outputBuffer.frameLength = outputBuffer.frameCapacity
let inputBlock: AVAudioConverterInputBlock = { inNumPackets, outStatus in
outStatus.pointee = .haveData
return buffer
}
var error: NSError?
let converterResult = microphoneDownsampler.convert(to: outputBuffer, error: &error, withInputFrom: inputBlock)
if converterResult != .haveData {
DebugLogger.shared.print("Downsample error \(converterResult)")
} else {
self.handleDownsampledBuffer(outputBuffer)
}
}
isInputTapped = true
}