Oh hey I think the kalman filter works.

Goes a little crazy 'cause the measurements are bogus though.
1 files changed, 77 insertions(+), 27 deletions(-)

M apps/goatherd/src/alg_kalman.erl
M apps/goatherd/src/alg_kalman.erl +77 -27
@@ 17,45 17,95 @@ 
 
 
 -module(alg_kalman).
--export([run/4]).
+-export([run2/1]).
+
+% Ok to make life simpler for expository purposes we're just gonna use a
+% 1D position model
+
+-record(state, {x, vx}).
+-record(uncertainty, {err_x, err_vx}).
 
-%% Kalman Gain is also written Kn
-kalman_gain(EstimateUncertainty, MeasurementUncertainty) ->
-    EstimateUncertainty / (EstimateUncertainty +
-                           MeasurementUncertainty).
+%% Kalman Gain is also written Kn.
+%%
+%% These are all independent of what model we use so we just do a single
+%% dimension at a time instead of working with state/uncertainty
+%% structs.
+-spec kalman_gain(float(), float()) -> float().
+kalman_gain(Estimate, Measurement) ->
+    Estimate / (Estimate + Measurement).
 
 %% Estimate uncertainty update
-covariance_update(KalmanGain, PreviousUncertainty) ->
-    (1 - KalmanGain) * PreviousUncertainty.
+%%
+%% Again we just use single dimensional values
+-spec covariance_update(float(), float()) -> float().
+covariance_update(KalmanGain, PreviousErr) ->
+    (1 - KalmanGain) * PreviousErr.
 
-%% Extrapolate uncertainty from estimated uncertainty
-covariance_extrapolation(Value, ValueDeriv, Dt) ->
-    Value + ValueDeriv * (Dt * Dt).
+%% Extrapolate uncertainty from estimated uncertainty.
+%%
+%% This requires knowledge of the physics model to operate.
+-spec covariance_extrapolation(#uncertainty{}, float()) -> #uncertainty{}.
+covariance_extrapolation(Err, Dt) ->
+    ErrX = Err#uncertainty.err_x,
+    ErrVx = Err#uncertainty.err_vx,
+    % For each, the equation is:
+    % Value + ValueDeriv * (Dt * Dt).
+    ErrX2 = ErrX + ErrVx * (Dt * Dt),
+    % We have no acceleration measurement
+    % But do we really not update the uncertainty of the at all?
+    % Or we do it in the covariance_update
+    ErrVx2 = ErrVx,
+    #uncertainty { err_x = ErrX2, err_vx = ErrVx2 }.
 
 %% Update state
-update(PredictedState, Gain, Measurement) ->
-    PredictedState + Gain * (Measurement - PredictedState).
+update(PredictedState, GainX, GainVX, Measurement) ->
+    X2 = PredictedState#state.x + GainX * (Measurement#state.x -
+                                          PredictedState#state.x),
+    V2 = PredictedState#state.vx + GainVX * (Measurement#state.vx -
+                                          PredictedState#state.vx),
+    #state { x = X2, vx = V2 }.
 
-predict(EstimatedState, Model, Dt) ->
-    Model(EstimatedState, Dt).
+%% Predict a new state using our model
+%% For now we hard-code the model to be 1d physics
+predict(EstimatedState, Dt) ->
+    X = EstimatedState#state.x,
+    Vx = EstimatedState#state.vx,
+    %Ax = EstimatedState#state.ax,
+    % Our measurement doesn't have enough data to handle change in accel
+    Ax = 0,
 
-run(N, InitialState, InitialEstimateUncertainty, Model) ->
+    X2 = X + Vx * Dt + Ax * (Dt * Dt),
+    Vx2 = Vx + Ax * Dt,
+    #state { x = X2, vx = Vx2 }.
 
-run2(N, State, EstimateUncertainty, Model) ->
+measure() ->
+    {#state { x = 1.0, vx = 1.0}, #uncertainty { err_x = 0.1, err_vx = 0.1 }}.
+
+run(N, State, Err, MeasurementState, MeasurementErr) ->
+    Dt = 1,
     if
         N == 0 -> {};
         true ->
-            Measurement = fdsafdsafsfas,
-            MeasurementUncertainty = fdsafdsafsa, % needs to be part of the measurement...
-            Gain = kalman_gain(EstimateUncertainty, MeasurementUncertainty),
+            PredictedState = predict(State, Dt),
+            PredictedErr = covariance_extrapolation(Err, Dt),
+            io:format("Iter ~p, State: ~p Err: ~p~n", [N, PredictedState, PredictedErr]),
 
-            EstimatedState = update(InitialState, Gain, Measurement),
-            EstimatedUncertainty = covariance_update(Gain, EstimateUncertainty),
+            GainX = kalman_gain(PredictedErr#uncertainty.err_x, MeasurementErr#uncertainty.err_x),
+            GainVX = kalman_gain(PredictedErr#uncertainty.err_vx, MeasurementErr#uncertainty.err_vx),
+
+            EstimatedState = update(PredictedState, GainX, GainVX, MeasurementState),
+            EstimatedErrX = covariance_update(PredictedErr#uncertainty.err_x, Dt),
+            EstimatedErrVX = covariance_update(PredictedErr#uncertainty.err_vx, Dt),
+            EstimatedErr = #uncertainty { err_x = EstimatedErrX, err_vx = EstimatedErrVX },
 
-            PredictedState = predict(EstimatedState, Model, 1),
-            PredictedUncertainty = covariance_extrapolation(EstimatedUncertainty
-            io:format("Iter ~p, Measurement: ~p Prediction: ~p~n", [N,
-                                                                    Measurement,
-                                                                    PredictedState]),
-            run(N-1, PredictedState, MeasurementUncertainty, Model)
+            {NewMeasurement, NewMeasurementErr} = measure(),
+            run(N-1, EstimatedState, EstimatedErr, NewMeasurement, NewMeasurementErr)
     end.
+
+
+run2(N) ->
+    InitialState = #state { x = 0.0, vx = 0.5 },
+    InitialErr = #uncertainty { err_x = 0.1, err_vx = 0.1 },
+    {NewMeasurement, NewMeasurementErr} = measure(),
+
+    run(N, InitialState, InitialErr, NewMeasurement, NewMeasurementErr).