I'm building a macOS app using SwiftUI, and I want to create a draggable floating webcam preview window
Right now, I have something like this:
import SwiftUI
import AVFoundation
struct WebcamPreviewView: View {
let captureSession: AVCaptureSession?
var body: some View {
ZStack {
if let session = captureSession {
CameraPreviewLayer(session: session)
.clipShape(RoundedRectangle(cornerRadius: 50))
.overlay(
RoundedRectangle(cornerRadius: 50)
.strokeBorder(Color.white.opacity(0.2), lineWidth: 2)
)
} else {
VStack(spacing: 8) {
Image(systemName: "video.slash.fill")
.font(.system(size: 40))
.foregroundColor(.white.opacity(0.6))
Text("No Camera")
.font(.caption)
.foregroundColor(.white.opacity(0.6))
}
}
}
.shadow(color: .black.opacity(0.3), radius: 10, x: 0, y: 5)
}
}
struct CameraPreviewLayer: NSViewRepresentable {
let session: AVCaptureSession
func makeNSView(context: Context) -> NSView {
let view = NSView()
view.wantsLayer = true
let previewLayer = AVCaptureVideoPreviewLayer(session: session)
previewLayer.videoGravity = .resizeAspectFill
previewLayer.frame = view.bounds
view.layer = previewLayer
return view
}
func updateNSView(_ nsView: NSView, context: Context) {
if let previewLayer = nsView.layer as? AVCaptureVideoPreviewLayer {
previewLayer.frame = nsView.bounds
}
}
}
This is my SwiftUI side code to show the webcam, and I am trying to create it as a floating window which appears on top of all other apps windows etc. however, even when the webcam is clicked, it should not steal the focus from other apps, the other apps should be able to function properly as they already are.
import Cocoa
import SwiftUI
class WebcamPreviewWindow: NSPanel {
private static let defaultSize = CGSize(width: 200, height: 200)
private var initialClickLocation: NSPoint = .zero
init() {
let screenFrame = NSScreen.main?.visibleFrame ?? .zero
let origin = CGPoint(
x: screenFrame.maxX - Self.defaultSize.width - 20,
y: screenFrame.minY + 20
)
super.init(
contentRect: CGRect(origin: origin, size: Self.defaultSize),
styleMask: [.borderless],
backing: .buffered,
defer: false
)
isOpaque = false
backgroundColor = .clear
hasShadow = false
level = .screenSaver
collectionBehavior = [
.canJoinAllSpaces,
.fullScreenAuxiliary,
.stationary,
.ignoresCycle
]
ignoresMouseEvents = false
acceptsMouseMovedEvents = true
hidesOnDeactivate = false
becomesKeyOnlyIfNeeded = false
}
// MARK: - Focus Prevention
override var canBecomeKey: Bool { false }
override var canBecomeMain: Bool { false }
override var acceptsFirstResponder: Bool { false }
override func makeKey() {
}
override func mouseDown(with event: NSEvent) {
initialClickLocation = event.locationInWindow
}
override func mouseDragged(with event: NSEvent) {
let current = event.locationInWindow
let dx = current.x - initialClickLocation.x
let dy = current.y - initialClickLocation.y
let newOrigin = CGPoint(
x: frame.origin.x + dx,
y: frame.origin.y + dy
)
setFrameOrigin(newOrigin)
}
func show<Content: View>(with view: Content) {
let host = NSHostingView(rootView: view)
host.autoresizingMask = [.width, .height]
host.frame = contentLayoutRect
contentView = host
orderFrontRegardless()
}
func hide() {
orderOut(nil)
contentView = nil
}
}
This is my Appkit Side code make a floating window, however, when the webcam preview is clicked, it makes it as the focus app and I have to click anywhere else to loose the focus to be able to use the rest of the windows.
Selecting any option will automatically load the page
Post
Replies
Boosts
Views
Activity
I’m building a macOS video editor that uses AVComposition and AVVideoComposition.
Initially, my renderer creates a composition with some default video/audio tracks:
@Published var composition: AVComposition?
@Published var videoComposition: AVVideoComposition?
@Published var playerItem: AVPlayerItem?
Then I call a buildComposition() function that inserts all the default video segments.
Later in the editing workflow, the user may choose to add their own custom video clip. For this I have a function like:
private func handlePickedVideo(_ url: URL) {
guard url.startAccessingSecurityScopedResource() else {
print("Failed to access security-scoped resource")
return
}
let asset = AVURLAsset(url: url)
let videoTracks = asset.tracks(withMediaType: .video)
guard let firstVideoTrack = videoTracks.first else {
print("No video track found")
url.stopAccessingSecurityScopedResource()
return
}
renderer.insertUserVideoTrack(from: asset, track: firstVideoTrack)
url.stopAccessingSecurityScopedResource()
}
What I want to achieve is the same behavior professional video editors provide,
after the composition has already been initialized and built, the user should be able to add a new video track and the composition should update live, meaning the preview player should immediately reflect the changes without rebuilding everything from scratch manually.
How can I structure my AVComposition / AVMutableComposition and my rendering pipeline so that adding a new clip later updates the existing composition in real time (similar to Final Cut/Adobe Premiere), instead of needing to rebuild everything from zero?
You can find a playable version of this entire setup at :- https://github.com/zaidbren/SimpleEditor
Hello everyone,
I'm working on a screen recording app using ScreenCaptureKit and I've hit a strange issue. My app records the screen to an .mp4 file, and everything works perfectly until the .captureMicrophone is false
In this case, I get a valid, playable .mp4 file.
However, as soon as I try to enable the microphone by setting streamConfig.captureMicrophone = true, the recording seems to work, but the final .mp4 file is corrupted and cannot be played by QuickTime or any other player. This happens whether capturesAudio (app audio) is on or off.
I've already added the "Privacy - Microphone Usage Description" (NSMicrophoneUsageDescription) to my Info.plist, so I don't think it's a permissions problem.
I have my logic split into a ScreenRecorder class that manages state and a CaptureEngine that handles the SCStream. Here is how I'm configuring my SCStream:
ScreenRecorder.swift
// This is my main SCStreamConfiguration
private var streamConfiguration: SCStreamConfiguration {
var streamConfig = SCStreamConfiguration()
// ... other HDR/preset config ...
// These are the problem properties
streamConfig.capturesAudio = isAudioCaptureEnabled
streamConfig.captureMicrophone = isMicCaptureEnabled // breaks it if true
streamConfig.excludesCurrentProcessAudio = false
streamConfig.showsCursor = false
if let region = selectedRegion, let display = currentDisplay {
// My region/frame logic (works fine)
let regionWidth = Int(region.frame.width)
let regionHeight = Int(region.frame.height)
streamConfig.width = regionWidth * scaleFactor
streamConfig.height = regionHeight * scaleFactor
// ... (sourceRect logic) ...
}
streamConfig.pixelFormat = kCVPixelFormatType_32BGRA
streamConfig.colorSpaceName = CGColorSpace.sRGB
streamConfig.minimumFrameInterval = CMTime(value: 1, timescale: 60)
return streamConfig
}
And here is how I'm setting up the SCRecordingOutput that writes the file:
ScreenRecorder.swift
private func initRecordingOutput(for region: ScreenPickerManager.SelectedRegion) throws {
let screeRecordingOutputURL = try RecordingWorkspace.createScreenRecordingVideoFile(
in: workspaceURL,
sessionIndex: sessionIndex
)
let recordingConfiguration = SCRecordingOutputConfiguration()
recordingConfiguration.outputURL = screeRecordingOutputURL
recordingConfiguration.outputFileType = .mp4
recordingConfiguration.videoCodecType = .hevc
let recordingOutput = SCRecordingOutput(configuration: recordingConfiguration, delegate: self)
self.recordingOutput = recordingOutput
}
Finally, my CaptureEngine adds these to the SCStream:
CaptureEngine.swift
class CaptureEngine: NSObject, @unchecked Sendable {
private(set) var stream: SCStream?
private var streamOutput: CaptureEngineStreamOutput?
// ... (dispatch queues) ...
func startCapture(configuration: SCStreamConfiguration, filter: SCContentFilter, recordingOutput: SCRecordingOutput) async throws {
let streamOutput = CaptureEngineStreamOutput()
self.streamOutput = streamOutput
do {
stream = SCStream(filter: filter, configuration: configuration, delegate: streamOutput)
// Add outputs for raw buffers (not used for file recording)
try stream?.addStreamOutput(streamOutput, type: .screen, sampleHandlerQueue: videoSampleBufferQueue)
try stream?.addStreamOutput(streamOutput, type: .audio, sampleHandlerQueue: audioSampleBufferQueue)
try stream?.addStreamOutput(streamOutput, type: .microphone, sampleHandlerQueue: micSampleBufferQueue)
// Add the file recording output
try stream?.addRecordingOutput(recordingOutput)
try await stream?.startCapture()
} catch {
logger.error("Failed to start capture: \(error.localizedDescription)")
throw error
}
}
// ... (stopCapture, etc.) ...
}
When I had the .captureMicrophone value to be false, I get a perfect .mp4 video playable everywhere, however, when its true, I am getting corrupted video which doesn't play at all :-
Hey everyone,
I'm stuck on a really frustrating AVFoundation problem. I'm building a video editor that uses a custom AVVideoCompositor to add effects, and I need the final output to be 60 FPS.
So basically, I create an AVMutableComposition to sequence my video clips. I create an AVMutableVideoComposition and set the frame rate to 60 FPS: videoComposition.frameDuration = CMTime(value: 1, timescale: 60)
I assign my CustomVideoCompositor class to the videoComposition.
I create an AVPlayerItem with the composition and video composition.
The Problem:
Playback Works: When I play the AVPlayerItem in an AVPlayer, it's perfect. It plays at a smooth 60 FPS, and my custom compositor's startRequest method is called 60 times per second.
Export Fails: When I try to export the exact same composition and video composition using AVAssetExportSession, the final .mp4 file is always 30 FPS (or 29.97).
I've logged inside my custom compositor during the export, and it's definitely being called 30 times per second, so it's generating the 30 frames. It seems like AVAssetExportSession is just dropping every other frame when it encodes the video.
My source videos are screen recordings which I recorded using ScreenCaptureKit itself with the minimum frame interval to be 60.
Here is my export function. I'm using the AVAssetExportPresetHighestQuality preset :-
func exportVideo(to outputURL: URL) async throws {
guard let composition = composition,
let videoComposition = videoComposition else {
throw VideoCompositionError.noValidVideos
}
try? FileManager.default.removeItem(at: outputURL)
guard let exportSession = AVAssetExportSession(
asset: composition,
presetName: AVAssetExportPresetHighestQuality // Is this the problem?
) else {
throw VideoCompositionError.trackCreationFailed
}
exportSession.outputFileType = .mp4
exportSession.videoComposition = videoComposition // This has the 60fps setting
try await exportSession.export(to: outputURL, as: .mp4)
}
I've created a bare bones sample project that shows this exact bug in action. The resulting video is 60fps during playback, but only 30fps during the export. https://github.com/zaidbren/SimpleEditor
My Question:
Why is AVAssetExportSession ignoring my 60 FPS frameDuration and defaulting to 30 FPS, even though AVPlayer respects it?
I'm building a Swift video editor with AVFoundation and a custom compositor. Despite setting AVVideoComposition.frameDuration to 60 FPS, I'm seeing significant frame skipping during playback.
Console Output Shows Frame Skipping
Frame #0 at 0.0 ms (fps: 60.0)
Frame #2 at 33.333333333333336 ms (fps: 60.0)
Frame #6 at 100.0 ms (fps: 60.0)
Frame #10 at 166.66666666666666 ms (fps: 60.0)
Frame #32 at 533.3333333333334 ms (fps: 60.0)
Frame #62 at 1033.3333333333335 ms (fps: 60.0)
Frame #96 at 1600.0 ms (fps: 60.0)
Instead of frames every ~16.67ms (60 FPS), I'm getting irregular intervals, sometimes 33ms, 67ms, or hundreds of milliseconds apart.
Renderer.swift (Key Parts)
@MainActor
class Renderer: ObservableObject {
@Published var playerItem: AVPlayerItem?
private let assetManager: ProjectAssetManager?
private let compositorId: String
func buildComposition() async {
// ... load mouse moves/clicks data ...
let composition = AVMutableComposition()
let videoTrack = composition.addMutableTrack(
withMediaType: .video,
preferredTrackID: kCMPersistentTrackID_Invalid
)
var currentTime = CMTime.zero
var layerInstructions: [AVMutableVideoCompositionLayerInstruction] = []
// Insert video segments
for videoURL in videoURLs {
let asset = AVAsset(url: videoURL)
let tracks = try await asset.loadTracks(withMediaType: .video)
let assetVideoTrack = tracks.first
let duration = try await asset.load(.duration)
try videoTrack.insertTimeRange(
CMTimeRange(start: .zero, duration: duration),
of: assetVideoTrack,
at: currentTime
)
let layerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: videoTrack)
let transform = try await assetVideoTrack.load(.preferredTransform)
layerInstruction.setTransform(transform, at: currentTime)
layerInstructions.append(layerInstruction)
currentTime = CMTimeAdd(currentTime, duration)
}
let videoComposition = AVMutableVideoComposition()
videoComposition.frameDuration = CMTime(value: 1, timescale: 60) // 60 FPS
// Set render size from first video
if let firstURL = videoURLs.first {
let firstAsset = AVAsset(url: firstURL)
let firstTrack = try await firstAsset.loadTracks(withMediaType: .video).first
let naturalSize = try await firstTrack.load(.naturalSize)
let transform = try await firstTrack.load(.preferredTransform)
videoComposition.renderSize = CGSize(
width: abs(naturalSize.applying(transform).width),
height: abs(naturalSize.applying(transform).height)
)
}
let instruction = CompositorInstruction()
instruction.timeRange = CMTimeRange(start: .zero, duration: currentTime)
instruction.layerInstructions = layerInstructions
instruction.compositorId = compositorId
videoComposition.instructions = [instruction]
videoComposition.customVideoCompositorClass = CustomVideoCompositor.self
let playerItem = AVPlayerItem(asset: composition)
playerItem.videoComposition = videoComposition
self.playerItem = playerItem
}
}
class CompositorInstruction: NSObject, AVVideoCompositionInstructionProtocol {
var timeRange: CMTimeRange = .zero
var enablePostProcessing: Bool = false
var containsTweening: Bool = false
var requiredSourceTrackIDs: [NSValue]?
var passthroughTrackID: CMPersistentTrackID = kCMPersistentTrackID_Invalid
var layerInstructions: [AVVideoCompositionLayerInstruction] = []
var compositorId: String = ""
}
class CustomVideoCompositor: NSObject, AVVideoCompositing {
var sourcePixelBufferAttributes: [String : Any]? = [
kCVPixelBufferPixelFormatTypeKey as String: Int(kCVPixelFormatType_32BGRA)
]
var requiredPixelBufferAttributesForRenderContext: [String : Any] = [
kCVPixelBufferPixelFormatTypeKey as String: Int(kCVPixelFormatType_32BGRA)
]
func renderContextChanged(_ newRenderContext: AVVideoCompositionRenderContext) {}
func startRequest(_ asyncVideoCompositionRequest: AVAsynchronousVideoCompositionRequest) {
guard let sourceTrackID = asyncVideoCompositionRequest.sourceTrackIDs.first?.int32Value,
let sourcePixelBuffer = asyncVideoCompositionRequest.sourceFrame(byTrackID: sourceTrackID),
let outputBuffer = asyncVideoCompositionRequest.renderContext.newPixelBuffer() else {
asyncVideoCompositionRequest.finish(with: NSError(domain: "VideoCompositor", code: -1))
return
}
let videoComposition = asyncVideoCompositionRequest.renderContext.videoComposition
let frameDuration = videoComposition.frameDuration
let fps = Double(frameDuration.timescale) / Double(frameDuration.value)
let compositionTime = asyncVideoCompositionRequest.compositionTime
let seconds = CMTimeGetSeconds(compositionTime)
let frameInMilliseconds = seconds * 1000
let frameNumber = Int(round(seconds * fps))
print("Frame #\(frameNumber) at \(frameInMilliseconds) ms (fps: \(fps))")
asyncVideoCompositionRequest.finish(withComposedVideoFrame: outputBuffer)
}
func cancelAllPendingVideoCompositionRequests() {}
}
VideoPlayerViewModel
@MainActor
class VideoPlayerViewModel: ObservableObject {
let player = AVPlayer()
private let renderer: Renderer
func loadVideo() async {
await renderer.buildComposition()
if let playerItem = renderer.playerItem {
player.replaceCurrentItem(with: playerItem)
}
}
}
What I've Tried
Frame skipping is consistent—exact same timestamps on every playback
Issue persists even with minimal processing (just passing through buffers)
Occurs regardless of compositor complexity
Please note that I need every frame at exact millisecond intervals for my application. Frame loss or inconsistent frameInMillisecond values are not acceptable.
I’m building a macOS document based app using SwiftUI’s DocumentGroup API. By default, when a document based app launches, macOS automatically shows a file open panel or creates a new untitled document window.
However, I want to suppress this default behavior and instead show a custom welcome window when the app starts — something similar to how Xcode or Final Cut Pro shows a “Welcome” or “Start Project” screen first.
So basically, when the user opens the app normally, it should not open the document selector or create a document automatically. Instead, it should show my custom SwiftUI or AppKit window. Here is my Code :-
//MyApp.swift
import SwiftUI
import AppKit
@main
struct PhiaApp: App {
@NSApplicationDelegateAdaptor(AppDelegate.self) var appDelegate
var body: some Scene {
DocumentGroup(newDocument: MyDocumentModel()) { file in
EditorView(document: file.document, filePath: file.fileURL)
}
Settings { EmptyView() }
}
}
Current I have this code setup for my MainApp.swift, where I am using the AppDelegate to create a custom recording window using appkit and also defining the DocumentGroup to handle the custom .myapp file opens. However, when I launch the app, its showing my appkit window as well as the macOs native file Selector to select the file I want to open.
I want when the user opens the app normally, it should not open the document selector or create a document automatically. Instead, it should show my custom SwiftUI or AppKit window. However, the app should still fully support opening .myapp documents by double clicking from Finder, using the standard File → Open and File → New menu options, also having multiple document windows open at once.
This is my AppDelegate.swift file :-
import AppKit
import SwiftUI
class AppDelegate: NSObject, NSApplicationDelegate {
var panel: Panel?
private var statusItem: NSStatusItem?
func applicationDidFinishLaunching(_ notification: Notification) {
showWindow()
}
// MARK: - Window control
func showWindow() {
if panel == nil {
let root = RecordingViewMain()
let newPanel = Panel(rootView: root)
if let screen = NSScreen.main {
let size = NSSize(width: 360, height: 240)
let origin = NSPoint(
x: screen.visibleFrame.midX - size.width / 2,
y: screen.visibleFrame.midY - size.height / 2
)
newPanel.setFrame(NSRect(origin: origin, size: size), display: true)
}
panel = newPanel
}
panel?.makeKeyAndOrderFront(nil)
}
func hideWindow() {
panel?.orderOut(nil)
}
@objc private func showPanelAction() {
showWindow()
}
@objc private func quitAction() {
NSApp.terminate(nil)
}
}
Hello, I am trying to capture screen recording ( output.mp4 ) using ScreenCaptureKit and also the mouse positions during the recording ( mouse.json ). The recording and the mouse positions ( tracked based on mouse movements events only ) needs to be perfectly synced in order to add effects in post editing.
I started off by using the await stream?.startCapture() and after that starting my mouse tracking function :-
try await captureEngine.startCapture(configuration: config, filter: filter, recordingOutput: recordingOutput)
let captureStartTime = Date()
mouseTracker?.startTracking(with: captureStartTime)
But every time I tested, there is a clear inconsistency in sync between the recorded video and the recorded mouse positions.
The only thing I want is to know when exactly does the recording "actually" started so that I can start the mouse capture at that same time, and thus I tried using the Delegates, but being able to set them up perfectly.
import Foundation
import AVFAudio
import ScreenCaptureKit
import OSLog
import Combine
class CaptureEngine: NSObject, @unchecked Sendable {
private let logger = Logger()
private(set) var stream: SCStream?
private var streamOutput: CaptureEngineStreamOutput?
private var recordingOutput: SCRecordingOutput?
private let videoSampleBufferQueue = DispatchQueue(label: "com.francestudio.phia.VideoSampleBufferQueue")
private let audioSampleBufferQueue = DispatchQueue(label: "com.francestudio.phia.AudioSampleBufferQueue")
private let micSampleBufferQueue = DispatchQueue(label: "com.francestudio.phia.MicSampleBufferQueue")
func startCapture(configuration: SCStreamConfiguration, filter: SCContentFilter, recordingOutput: SCRecordingOutput) async throws {
// Create the stream output delegate.
let streamOutput = CaptureEngineStreamOutput()
self.streamOutput = streamOutput
do {
stream = SCStream(filter: filter, configuration: configuration, delegate: streamOutput)
try stream?.addStreamOutput(streamOutput, type: .screen, sampleHandlerQueue: videoSampleBufferQueue)
try stream?.addStreamOutput(streamOutput, type: .audio, sampleHandlerQueue: audioSampleBufferQueue)
try stream?.addStreamOutput(streamOutput, type: .microphone, sampleHandlerQueue: micSampleBufferQueue)
self.recordingOutput = recordingOutput
recordingOutput.delegate = self
try stream?.addRecordingOutput(recordingOutput)
try await stream?.startCapture()
} catch {
logger.error("Failed to start capture: \(error.localizedDescription)")
throw error
}
}
func stopCapture() async throws {
do {
try await stream?.stopCapture()
} catch {
logger.error("Failed to stop capture: \(error.localizedDescription)")
throw error
}
}
func update(configuration: SCStreamConfiguration, filter: SCContentFilter) async {
do {
try await stream?.updateConfiguration(configuration)
try await stream?.updateContentFilter(filter)
} catch {
logger.error("Failed to update the stream session: \(String(describing: error))")
}
}
func stopRecordingOutputForStream(_ recordingOutput: SCRecordingOutput) throws {
try self.stream?.removeRecordingOutput(recordingOutput)
}
}
// MARK: - SCRecordingOutputDelegate
extension CaptureEngine: SCRecordingOutputDelegate {
func recordingOutputDidStartRecording(_ recordingOutput: SCRecordingOutput) {
let startTime = Date()
logger.info("Recording output did start recording \(startTime)")
}
func recordingOutputDidFinishRecording(_ recordingOutput: SCRecordingOutput) {
logger.info("Recording output did finish recording")
}
func recordingOutput(_ recordingOutput: SCRecordingOutput, didFailWithError error: any Error) {
logger.error("Recording output failed with error: \(error.localizedDescription)")
}
}
private class CaptureEngineStreamOutput: NSObject, SCStreamOutput, SCStreamDelegate {
private let logger = Logger()
override init() {
super.init()
}
func stream(_ stream: SCStream, didOutputSampleBuffer sampleBuffer: CMSampleBuffer, of outputType: SCStreamOutputType) {
guard sampleBuffer.isValid else { return }
switch outputType {
case .screen:
break
case .audio:
break
case .microphone:
break
@unknown default:
logger.error("Encountered unknown stream output type:")
}
}
func stream(_ stream: SCStream, didStopWithError error: Error) {
logger.error("Stream stopped with error: \(error.localizedDescription)")
}
}
I am getting error
Value of type 'SCRecordingOutput' has no member 'delegate'
Even though I am targeting macOs 15+ ( macOs 26 actually ) and macOs only.
What is the best way to achieving the desired result? Is there any other / better way to do it?
Hello everyone, I am trying to implement ScreenCaptureKit into my project, I am using MacOs 26 for the target version and followed this official project from apple regarding the screencapture kit. https://developer.apple.com/documentation/ScreenCaptureKit/capturing-screen-content-in-macos
I used the official exact code and implemented in my app, but the results are not good. The video look blurry, unclear, lost colors and its like 720p honestly.
The 1st video frame t is result when I integrate it in my app.
After that, I used another app ( which was built in electron, they were using screencapturekit as well ) and there results were a lot better. The 2nd video frame is when I recorded using their application. It appears as close to as system display
I tried multiple things, but no impressive results. For my purpose, I want to the final recorded video to be as good as the display quality of the system. I also applied .hdr local display and coronolicial, but no help with that as well. Changed codecs to .mov, .hevc, but still no help
Why is not the recoded video as high quality as the display
I am trying to set up code signing for my macOS/Tauri app and I’m running into a problem with my Developer ID Application certificate in Keychain Access.
Steps I followed:
Generated a CSR on my Mac using Keychain Access → Certificate Assistant → Request a Certificate From a Certificate Authority.
Uploaded the CSR to the Apple Developer portal.
Downloaded the resulting .cer file and installed it in my login Keychain.
The certificate appears under All Items, but it does not show under My Certificates, and there is no private key attached.
What I expected:
The certificate should pair with the private key created during CSR generation and show under My Certificates, allowing me to export a .p12 file.
What I’ve tried so far:
Verified that the WWDR Intermediate Certificate is installed.
Ensured I’m on the same Mac and same login Keychain where I created the CSR.
Revoked and regenerated the certificate multiple times.
Tried importing into both login and system Keychains.
Problem:
The certificate never links with the private key and therefore cannot be used for signing.
Has anyone experienced this issue or knows why the certificate would fail to pair with the private key in Keychain Access? Any workaround or fix would be greatly appreciated.
Topic:
Code Signing
SubTopic:
Certificates, Identifiers & Profiles
Tags:
Signing Certificates
Code Signing
Developer ID
I am trying to achieve an animated gradient effect that changes values over time based on the current seconds. I am also using AVPlayer and AVMutableVideoComposition along with custom instruction and class to generate the effect. I didn't want to load any video file, but rather generate a custom video with my own set of instructions. I used Metal Compute shaders to generate the effects and make the video to be 20 seconds.
However, when I run the code, I get a frozen player with the gradient applied, but when I try to play the video, I get this warning in the console :- Visual isTranslatable: NO; reason: observation failure: noObservations
Here is the screenshot :-
My entire code :-
import AVFoundation
import Metal
class GradientVideoCompositorTest: NSObject, AVVideoCompositing {
var sourcePixelBufferAttributes: [String: Any]? = [
kCVPixelBufferPixelFormatTypeKey as String: kCVPixelFormatType_32BGRA
]
var requiredPixelBufferAttributesForRenderContext: [String: Any] = [
kCVPixelBufferPixelFormatTypeKey as String: kCVPixelFormatType_32BGRA
]
private var renderContext: AVVideoCompositionRenderContext?
private var metalDevice: MTLDevice!
private var metalCommandQueue: MTLCommandQueue!
private var metalLibrary: MTLLibrary!
private var metalPipeline: MTLComputePipelineState!
override init() {
super.init()
setupMetal()
}
func setupMetal() {
guard let device = MTLCreateSystemDefaultDevice(),
let queue = device.makeCommandQueue(),
let library = try? device.makeDefaultLibrary(),
let function = library.makeFunction(name: "gradientShader") else {
fatalError("Metal setup failed")
}
self.metalDevice = device
self.metalCommandQueue = queue
self.metalLibrary = library
self.metalPipeline = try? device.makeComputePipelineState(function: function)
}
func renderContextChanged(_ newRenderContext: AVVideoCompositionRenderContext) {
renderContext = newRenderContext
}
func startRequest(_ request: AVAsynchronousVideoCompositionRequest) {
guard let outputPixelBuffer = renderContext?.newPixelBuffer(),
let metalTexture = createMetalTexture(from: outputPixelBuffer) else {
request.finish(with: NSError(domain: "com.example.gradient", code: -1, userInfo: nil))
return
}
var time = Float(request.compositionTime.seconds)
renderGradient(to: metalTexture, time: time)
request.finish(withComposedVideoFrame: outputPixelBuffer)
}
private func createMetalTexture(from pixelBuffer: CVPixelBuffer) -> MTLTexture? {
var texture: MTLTexture?
let width = CVPixelBufferGetWidth(pixelBuffer)
let height = CVPixelBufferGetHeight(pixelBuffer)
let textureDescriptor = MTLTextureDescriptor.texture2DDescriptor(
pixelFormat: .bgra8Unorm,
width: width,
height: height,
mipmapped: false
)
textureDescriptor.usage = [.shaderWrite, .shaderRead]
CVPixelBufferLockBaseAddress(pixelBuffer, .readOnly)
if let textureCache = createTextureCache(), let cvTexture = createCVMetalTexture(from: pixelBuffer, cache: textureCache) {
texture = CVMetalTextureGetTexture(cvTexture)
}
CVPixelBufferUnlockBaseAddress(pixelBuffer, .readOnly)
return texture
}
private func renderGradient(to texture: MTLTexture, time: Float) {
guard let commandBuffer = metalCommandQueue.makeCommandBuffer(),
let commandEncoder = commandBuffer.makeComputeCommandEncoder() else { return }
commandEncoder.setComputePipelineState(metalPipeline)
commandEncoder.setTexture(texture, index: 0)
var mutableTime = time
commandEncoder.setBytes(&mutableTime, length: MemoryLayout<Float>.size, index: 0)
let threadsPerGroup = MTLSize(width: 16, height: 16, depth: 1)
let threadGroups = MTLSize(
width: (texture.width + 15) / 16,
height: (texture.height + 15) / 16,
depth: 1
)
commandEncoder.dispatchThreadgroups(threadGroups, threadsPerThreadgroup: threadsPerGroup)
commandEncoder.endEncoding()
commandBuffer.commit()
}
private func createTextureCache() -> CVMetalTextureCache? {
var cache: CVMetalTextureCache?
CVMetalTextureCacheCreate(kCFAllocatorDefault, nil, metalDevice, nil, &cache)
return cache
}
private func createCVMetalTexture(from pixelBuffer: CVPixelBuffer, cache: CVMetalTextureCache) -> CVMetalTexture? {
var cvTexture: CVMetalTexture?
let width = CVPixelBufferGetWidth(pixelBuffer)
let height = CVPixelBufferGetHeight(pixelBuffer)
CVMetalTextureCacheCreateTextureFromImage(
kCFAllocatorDefault,
cache,
pixelBuffer,
nil,
.bgra8Unorm,
width,
height,
0,
&cvTexture
)
return cvTexture
}
}
class GradientCompositionInstructionTest: NSObject, AVVideoCompositionInstructionProtocol {
var timeRange: CMTimeRange
var enablePostProcessing: Bool = true
var containsTweening: Bool = true
var requiredSourceTrackIDs: [NSValue]? = nil
var passthroughTrackID: CMPersistentTrackID = kCMPersistentTrackID_Invalid
init(timeRange: CMTimeRange) {
self.timeRange = timeRange
}
}
func createGradientVideoComposition(duration: CMTime, size: CGSize) -> AVMutableVideoComposition {
let composition = AVMutableComposition()
let instruction = GradientCompositionInstructionTest(timeRange: CMTimeRange(start: .zero, duration: duration))
let videoComposition = AVMutableVideoComposition()
videoComposition.customVideoCompositorClass = GradientVideoCompositorTest.self
videoComposition.renderSize = size
videoComposition.frameDuration = CMTime(value: 1, timescale: 30) // 30 FPS
videoComposition.instructions = [instruction]
return videoComposition
}
#include <metal_stdlib>
using namespace metal;
kernel void gradientShader(texture2d<float, access::write> output [[texture(0)]],
constant float &time [[buffer(0)]],
uint2 id [[thread_position_in_grid]]) {
float2 uv = float2(id) / float2(output.get_width(), output.get_height());
// Animated colors based on time
float3 color1 = float3(sin(time) * 0.8 + 0.1, 0.6, 1.0);
float3 color2 = float3(0.12, 0.99, cos(time) * 0.9 + 0.3);
// Linear interpolation for gradient
float3 gradientColor = mix(color1, color2, uv.y);
output.write(float4(gradientColor, 1.0), id);
}