@@ 17,7 17,7 @@
-module(alg_kalman).
--export([run2/1]).
+-export([run/0]).
% Ok to make life simpler for expository purposes we're just gonna use a
% 1D position model
@@ 78,34 78,60 @@ predict(EstimatedState, Dt) ->
Vx2 = Vx + Ax * Dt,
#state { x = X2, vx = Vx2 }.
-measure() ->
- {#state { x = 1.0, vx = 1.0}, #uncertainty { err_x = 0.1, err_vx = 0.1 }}.
-
-run(N, State, Err, MeasurementState, MeasurementErr) ->
- Dt = 1,
- if
- N == 0 -> {};
- true ->
- PredictedState = predict(State, Dt),
- PredictedErr = covariance_extrapolation(Err, Dt),
- io:format("Iter ~p, State: ~p Err: ~p~n", [N, PredictedState, PredictedErr]),
+measurements() ->
+ States = [{#state { x = 0.0, vx = 1.0}, #uncertainty { err_x = 0.1, err_vx = 0.1 }},
+ {#state { x = 1.0, vx = 1.0}, #uncertainty { err_x = 0.1, err_vx = 0.1 }},
+ {#state { x = 2.0, vx = 1.0}, #uncertainty { err_x = 0.1, err_vx = 0.1 }},
+ {#state { x = 3.0, vx = 1.0}, #uncertainty { err_x = 0.1, err_vx = 0.1 }},
+ {#state { x = 4.0, vx = 1.0}, #uncertainty { err_x = 0.1, err_vx = 0.1 }},
+ {#state { x = 5.0, vx = 1.0}, #uncertainty { err_x = 0.1, err_vx = 0.1 }},
+ {#state { x = 6.0, vx = 1.0}, #uncertainty { err_x = 0.1, err_vx = 0.1 }},
+ {#state { x = 7.0, vx = 1.0}, #uncertainty { err_x = 0.1, err_vx = 0.1 }},
+ {#state { x = 8.0, vx = 1.0}, #uncertainty { err_x = 0.1, err_vx = 0.1 }},
+ {#state { x = 9.0, vx = 1.0}, #uncertainty { err_x = 0.1, err_vx = 0.1 }},
+ {#state { x = 10.0, vx = 1.0}, #uncertainty { err_x = 0.1, err_vx = 0.1 }}
+ ],
+ Err = 0.1,
+ F = fun({S, E}) ->
+ {#state{
+ x = S#state.x + rand:uniform() * Err,
+ vx = S#state.vx + rand:uniform() * Err
+ },
+ #uncertainty {
+ err_x = E#uncertainty.err_x + rand:uniform() * Err,
+ err_vx = E#uncertainty.err_vx + rand:uniform() * Err
+ }}
+ end,
+ lists:map(F, States).
- GainX = kalman_gain(PredictedErr#uncertainty.err_x, MeasurementErr#uncertainty.err_x),
- GainVX = kalman_gain(PredictedErr#uncertainty.err_vx, MeasurementErr#uncertainty.err_vx),
+%% Run one step of the filter.
+%% Takes a state and error state, plus a measurement,
+%% and returns new ones.
+step(State, Err, {MeasurementState, MeasurementErr}) ->
+ Dt = 1.0,
+ PredictedState = predict(State, Dt),
+ PredictedErr = covariance_extrapolation(Err, Dt),
- EstimatedState = update(PredictedState, GainX, GainVX, MeasurementState),
- EstimatedErrX = covariance_update(PredictedErr#uncertainty.err_x, Dt),
- EstimatedErrVX = covariance_update(PredictedErr#uncertainty.err_vx, Dt),
- EstimatedErr = #uncertainty { err_x = EstimatedErrX, err_vx = EstimatedErrVX },
+ GainX = kalman_gain(PredictedErr#uncertainty.err_x, MeasurementErr#uncertainty.err_x),
+ GainVX = kalman_gain(PredictedErr#uncertainty.err_vx, MeasurementErr#uncertainty.err_vx),
- {NewMeasurement, NewMeasurementErr} = measure(),
- run(N-1, EstimatedState, EstimatedErr, NewMeasurement, NewMeasurementErr)
- end.
+ EstimatedState = update(PredictedState, GainX, GainVX, MeasurementState),
+ EstimatedErrX = covariance_update(PredictedErr#uncertainty.err_x, Dt),
+ EstimatedErrVX = covariance_update(PredictedErr#uncertainty.err_vx, Dt),
+ EstimatedErr = #uncertainty { err_x = EstimatedErrX, err_vx = EstimatedErrVX },
+ {EstimatedState, EstimatedErr}.
-run2(N) ->
- InitialState = #state { x = 0.0, vx = 0.5 },
+run() ->
+ InitialState = #state { x = 0.0, vx = 0.0 },
InitialErr = #uncertainty { err_x = 0.1, err_vx = 0.1 },
- {NewMeasurement, NewMeasurementErr} = measure(),
+ Measurements = measurements(),
+ run2(InitialState, InitialErr, Measurements).
+
- run(N, InitialState, InitialErr, NewMeasurement, NewMeasurementErr).
+run2(State, Err, []) ->
+ {State, Err};
+run2(State, Err, [Measurement|Rest]) ->
+ io:format("Measurement: ~p State: ~p Err: ~p~n", [Measurement, State, Err]),
+ {NewState, NewErr} = step(State, Err, Measurement),
+ run2(NewState, NewErr, Rest).