I'm assuming you want to read the depth data for a certain point within the texture on the CPU?
It's a bit more straightforward in metal code, but here is a quick extension that might help you
swift
import CoreVideo
import Metal
import simd
extension CVPixelBuffer {
// Requires CVPixelBufferLockBaseAddress(_:_:) first
var data: UnsafeRawBufferPointer? {
let size = CVPixelBufferGetDataSize(self)
return .init(start: CVPixelBufferGetBaseAddress(self), count: size)
}
var pixelSize: simd_int2 {
simd_int2(Int32(width), Int32(height))
}
var width: Int {
CVPixelBufferGetWidth(self)
}
var height: Int {
CVPixelBufferGetHeight(self)
}
func sample(location: simd_float2) - simd_float4? {
let pixelSize = self.pixelSize
guard pixelSize.x 0 && pixelSize.y 0 else { return nil }
guard CVPixelBufferLockBaseAddress(self, .readOnly) == noErr else { return nil }
guard let data = data else { return nil }
defer { CVPixelBufferUnlockBaseAddress(self, .readOnly) }
let pix = location * simd_float2(pixelSize)
let clamped = clamp(simd_int2(pix), min: .zero, max: pixelSize &- simd_int2(1,1))
let bytesPerRow = CVPixelBufferGetBytesPerRow(self)
let row = Int(clamped.y)
let column = Int(clamped.x)
let rowPtr = data.baseAddress! + row * bytesPerRow
switch CVPixelBufferGetPixelFormatType(self) {
case kCVPixelFormatType_DepthFloat32:
// Bind the row to the right type
let typed = rowPtr.assumingMemoryBound(to: Float.self)
return .init(typed[column], 0, 0, 0)
case kCVPixelFormatType_32BGRA:
// Bind the row to the right type
let typed = rowPtr.assumingMemoryBound(to: UInt8.self)
return .init(Float(typed[column]) / Float(UInt8.max), 0, 0, 0)
default:
return nil
}
}
}