Skip to content

Commit

Permalink
Merge pull request #192 from borglab/feature/fg_gradient
Browse files Browse the repository at this point in the history
add a gradient descent factor graph optimizer
  • Loading branch information
ProfFan authored Oct 12, 2020
2 parents 56af73c + f26cca3 commit cfcbe9a
Show file tree
Hide file tree
Showing 5 changed files with 159 additions and 1 deletion.
15 changes: 15 additions & 0 deletions Sources/SwiftFusion/Inference/FactorGraph.swift
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,21 @@ public struct FactorGraph {
return storage.values.lazy.map { $0.errors(at: x).reduce(0, +) }.reduce(0, +)
}

/// Returns the gradient of `error` at `x`.
// TODO: If we make `VariableAssignments` `Differentiabe`, then we can make `error`
// `@differentiable` and use `gradient(of: error)` instead of defining this custom gradient
// method.
public func errorGradient(at x: VariableAssignments) -> AllVectors {
var grad = x.tangentVectorZeros
for factors in storage.values {
guard let linearizableFactors = AnyVectorFactorArrayBuffer(factors) else {
continue
}
linearizableFactors.accumulateErrorGradient(at: x, into: &grad)
}
return grad
}

/// Returns the total error, at `x`, of all the linearizable factors.
public func linearizableError(at x: VariableAssignments) -> Double {
return storage.values.reduce(0) { (result, factors) in
Expand Down
47 changes: 46 additions & 1 deletion Sources/SwiftFusion/Inference/FactorsStorage.swift
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,28 @@ extension ArrayStorage where Element: VectorFactor {
})
}
}

/// Increments `result` by the gradients of `self`'s errors at `x`.
func accumulateErrorGradient(
at x: VariableAssignments,
into result: inout AllVectors
) {
typealias Variables = Element.Variables
typealias LVariables = Element.LinearizableComponent.Variables
typealias GradVariables = LVariables.TangentVector
Variables.withBufferBaseAddresses(x) { varsBufs in
GradVariables.withMutableBufferBaseAddresses(&result) { gradBufs in
for factor in self {
let vars = Variables(at: factor.edges, in: varsBufs)
let (lFactor, lVars) = factor.linearizableComponent(at: vars)
let gradIndices = LVariables.linearized(lFactor.edges)
let grads = GradVariables(at: gradIndices, in: GradVariables.withoutMutation(gradBufs))
let newGrads = grads + gradient(at: lVars) { lFactor.errorVector(at: $0).squaredNorm }
newGrads.assign(into: gradIndices, in: gradBufs)
}
}
}
}
}

extension ArrayStorage where Element == PPCATrackingFactor {
Expand Down Expand Up @@ -204,6 +226,13 @@ class VectorFactorArrayDispatch: FactorArrayDispatch {
final let linearized:
(_ self_: Self_, _ x: VariableAssignments) -> AnyGaussianFactorArrayBuffer

/// A function incrementing `result` by the gradients of `storage`'s factors' errors at `x`.
///
/// - Requires: `storage` is the address of an `ArrayStorage` whose `Element` has a
/// subclass-specific `VectorFactor` type.
final let accumulateErrorGradient:
(_ self_: Self_, _ x: VariableAssignments, _ result: inout AllVectors) -> ()

/// Creates an instance for elements of type `Element` using the given `Linearization`.
init<Element: VectorFactor, Linearization: LinearApproximationFactor>(
_ e: Type<Element>, linearization: Type<Linearization>
Expand All @@ -214,13 +243,15 @@ class VectorFactorArrayDispatch: FactorArrayDispatch {
errorVectors = { self_, x in
.init(self_[unsafelyAssumingElementType: e].storage.errorVectors(at: x))
}

linearized = { self_, x in
.init(
self_[unsafelyAssumingElementType: e].storage.linearized(at: x)
as ArrayBuffer<Linearization>
)
}
accumulateErrorGradient = { self_, x, result in
self_[unsafelyAssumingElementType: e].storage.accumulateErrorGradient(at: x, into: &result)
}
super.init(e)
}

Expand All @@ -236,6 +267,9 @@ class VectorFactorArrayDispatch: FactorArrayDispatch {
self_[unsafelyAssumingElementType: e].storage.customLinearized(at: x)
)
}
accumulateErrorGradient = { self_, x, result in
self_[unsafelyAssumingElementType: e].storage.accumulateErrorGradient(at: x, into: &result)
}
super.init(e)
}

Expand All @@ -251,6 +285,9 @@ class VectorFactorArrayDispatch: FactorArrayDispatch {
self_[unsafelyAssumingElementType: e].storage.customLinearized(at: x)
)
}
accumulateErrorGradient = { self_, x, result in
self_[unsafelyAssumingElementType: e].storage.accumulateErrorGradient(at: x, into: &result)
}
super.init(e)
}
}
Expand Down Expand Up @@ -313,6 +350,14 @@ extension AnyArrayBuffer where Dispatch: VectorFactorArrayDispatch {
func linearized(at x: VariableAssignments) -> AnyGaussianFactorArrayBuffer {
dispatch.linearized(self.upcast, x)
}

/// Increments `result` by the gradients of `self`'s errors at `x`.
func accumulateErrorGradient(
at x: VariableAssignments,
into result: inout AllVectors
) {
dispatch.accumulateErrorGradient(self.upcast, x, &result)
}
}

// MARK: - Type-erased arrays of `GaussianFactor`s.
Expand Down
30 changes: 30 additions & 0 deletions Sources/SwiftFusion/Optimizers/GradientDescent.swift
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
// Copyright 2020 The SwiftFusion Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

/// Optimizes the variables in a factor graph to minimize the error, using gradient descent.
public struct GradientDescent {
/// The fraction of the gradient to move per step.
public var learningRate: Double

/// Creates an instance with the given `learningRate`.
public init(learningRate: Double) {
self.learningRate = learningRate
}

/// Moves `values` along the gradient of `objective`'s error function for a single gradient
/// descent step.
public func update(_ values: inout VariableAssignments, objective: FactorGraph) {
values.move(along: -learningRate * objective.errorGradient(at: values))
}
}
25 changes: 25 additions & 0 deletions Tests/SwiftFusionTests/Inference/FactorGraphTests.swift
Original file line number Diff line number Diff line change
Expand Up @@ -253,4 +253,29 @@ class FactorGraphTests: XCTestCase {

XCTFail("failed to find the global minimum after \(attemptCount) attempts")
}

/// Test the gradient of the error of a factor graph.
func testGradient() {
var vars = VariableAssignments()
let v1ID = vars.store(Vector2(1, 2))
let v2ID = vars.store(Vector2(3, 4))
let v3ID = vars.store(Vector3(5, 6, 7))

var graph = FactorGraph()
graph.store(ScalarJacobianFactor(edges: Tuple1(v1ID), scalar: 1))
graph.store(ScalarJacobianFactor(edges: Tuple1(v1ID), scalar: 2))
graph.store(ScalarJacobianFactor(edges: Tuple1(v2ID), scalar: 5))
graph.store(ScalarJacobianFactor(edges: Tuple1(v3ID), scalar: 10))

let grad = graph.errorGradient(at: vars)

// gradient of ||1 * v1||^2 + ||2 * v1||^2 at v1 = (1, 2)
XCTAssertEqual(grad[v1ID], Vector2(10, 20))

// gradient of ||5 * v2||^2 at v2 = (3, 4)
XCTAssertEqual(grad[v2ID], Vector2(150, 200))

// gradient of ||10 * v3||^2 at v3 = (5, 6, 7)
XCTAssertEqual(grad[v3ID], Vector3(1000, 1200, 1400))
}
}
43 changes: 43 additions & 0 deletions Tests/SwiftFusionTests/Optimizers/GradientDescentTests.swift
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
// Copyright 2020 The SwiftFusion Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

import SwiftFusion
import XCTest

final class GradientDescentTests: XCTestCase {
/// Test convergence for a simple Pose2SLAM graph.
func testPose2SLAM() {
var x = VariableAssignments()
let pose1ID = x.store(Pose2(Rot2(0.2), Vector2(0.5, 0.0)))
let pose2ID = x.store(Pose2(Rot2(-0.2), Vector2(2.3, 0.1)))
let pose3ID = x.store(Pose2(Rot2(.pi / 2), Vector2(4.1, 0.1)))
let pose4ID = x.store(Pose2(Rot2(.pi), Vector2(4.0, 2.0)))
let pose5ID = x.store(Pose2(Rot2(-.pi / 2), Vector2(2.1, 2.1)))

var graph = FactorGraph()
graph.store(BetweenFactor(pose2ID, pose1ID, Pose2(2.0, 0.0, .pi / 2)))
graph.store(BetweenFactor(pose3ID, pose2ID, Pose2(2.0, 0.0, .pi / 2)))
graph.store(BetweenFactor(pose4ID, pose3ID, Pose2(2.0, 0.0, .pi / 2)))
graph.store(BetweenFactor(pose5ID, pose4ID, Pose2(2.0, 0.0, .pi / 2)))
graph.store(PriorFactor(pose1ID, Pose2(0, 0, 0)))

var optimizer = GradientDescent(learningRate: 1e-2)
for _ in 0..<10000 {
optimizer.update(&x, objective: graph)
}

// Test condition: pose 5 should be identical to pose 1 (close loop).
XCTAssertEqual(between(x[pose1ID], x[pose5ID]).t.norm, 0.0, accuracy: 1e-2)
}
}

0 comments on commit cfcbe9a

Please sign in to comment.