The SavitzkyGolay class with comments:
class SavitzkyGolay {
static func coefficients(windowLength: Int, polynomialOrder: Int, derivativeOrder: Int = 0, delta: Int = 1) -> [Double] {
// windowLength is the number of coefficients returned by this function.
guard windowLength > 0 else {
fatalError("windowLength must be positive")
}
// polynomialOrder is the order of the polynomial used to smooth the noisy data (the coefficients are calculated independently from the noisy data)
guard polynomialOrder < windowLength else {
fatalError("polynomialOrder must be less than windowLength")
}
// The derivativeOrder is the order of the derivative to compute. If it is set to zero, the noisy data is smoothed without differentiating.
guard derivativeOrder <= polynomialOrder else {
return [Double](repeating: 0, count: windowLength)
}
let (halfWindow, remainder) = windowLength.quotientAndRemainder(dividingBy: 2)
var pos = Double(halfWindow)
// pos should not be a round number (because otherwise this function won't work appropriately), so if windowLength is even, which means that halfWindow and thereby pos are round numbers, subtract 0.5 from pos.
if remainder == 0 {
pos -= 0.5
}
// Form the matrix A. The columns of A are powers of the integers from -pos to windowLength - pos - 1. The powers (i.e., rows) range from 0 to polynomialOrder.
let V = [Double](stride(from: Double(windowLength) - pos - 1, through: -pos, by: -1))
let P = [Double](stride(from: 0, through: Double(polynomialOrder), by: 1))
let A = P.map { exponent in
V.map {
pow($0, exponent)
}
}
// Form the vector B. It determines which order derivative is returned.
var B = [Double](repeating: 0, count: polynomialOrder + 1)
// The value assigned to B[derivativeOrder] scales the result to take into account the order of the derivative and the distance between two values of the noisy data.
B[derivativeOrder] = Double(factorial(derivativeOrder)) / pow(Double(delta), Double(derivativeOrder))
// Return the least-squares solution of Ax = B
return leastSquaresSolution(A: A, B: B)
}
}
leastSquaresSolution with comments:
func leastSquaresSolution(A: [[Double]], B: [Double]) -> [Double] {
// Get the sparse matrix of A. A sparse matrix is a matrix that omits all zero's. sparseMatrix() returns an array of doubles, omitting all zero's, together with information about where in the array the columns start and what the indices of the rows are (for more information, take a look at the sparseMatrix() function).
let sparseA = A.sparseMatrix()
// Copy some constants and save them in variables, so that they can be pointed to using withUnsafeMutableBufferPointer.
var sparseAValuesCopy = sparseA.values
var bValues = B
// The vector X returned by this function has as many values as A has columns. In other words, X has as many values as A's transpose has rows. So X should be initialised to have that amount of values.
var xValues = [Double](repeating: 0, count: A.transpose().count)
sparseAValuesCopy.withUnsafeMutableBufferPointer { valuesPtr in
// Construct the matrix A in Ax = B
let a = SparseMatrix_Double(
structure: sparseA.structure,
data: valuesPtr.baseAddress!
)
bValues.withUnsafeMutableBufferPointer { bPtr in
xValues.withUnsafeMutableBufferPointer { xPtr in
// Construct the matrices B and x in Ax = B
let b = DenseVector_Double(
count: Int32(B.count),
data: bPtr.baseAddress!
)
let x = DenseVector_Double(
count: Int32(A.transpose().count),
data: xPtr.baseAddress!
)
// Try to find X with SparseSolve (this is where it seems to go wrong)
let status = SparseSolve(SparseLSMR(), a, b, x, SparsePreconditionerDiagScaling)
if status != SparseIterativeConverged {
fatalError("Failed to converge. Returned with error \(status).")
}
}
}
}
// Should return x in Ax = B
return xValues
}