Removed the subdir.
This commit is contained in:
4
pyrate/pyrate/__init__.py
Normal file
4
pyrate/pyrate/__init__.py
Normal file
@ -0,0 +1,4 @@
|
||||
"""The Pyrate package for autonomous surface vehicles."""
|
||||
|
||||
__version__ = "22.04"
|
||||
__author__ = "Sailing Team Darmstadt e. V. members and affiliates"
|
6
pyrate/pyrate/act/__init__.py
Normal file
6
pyrate/pyrate/act/__init__.py
Normal file
@ -0,0 +1,6 @@
|
||||
"""The act package provides tools to use the employed actuators of the robot to execute planned actions.
|
||||
Usually, this includes the computation of required motor positions to minimize
|
||||
the error between desired and actual states.
|
||||
|
||||
In the ``control`` package, classes for controlling actuators such that deviations
|
||||
from desired and measured states are driven towards zero are implemented."""
|
9
pyrate/pyrate/act/control/__init__.py
Normal file
9
pyrate/pyrate/act/control/__init__.py
Normal file
@ -0,0 +1,9 @@
|
||||
"""This package provides controllers that compute motor inputs, e.g.. angles or
|
||||
voltages, such that a desired state can be reached and held."""
|
||||
|
||||
from .anti_windup_lqr import AntiWindupLqr
|
||||
from .anti_windup_pid import AntiWindupPid
|
||||
from .lqr import Lqr
|
||||
from .pid import Pid
|
||||
|
||||
__all__ = ["AntiWindupLqr", "AntiWindupPid", "Lqr", "Pid"]
|
119
pyrate/pyrate/act/control/anti_windup_lqr.py
Normal file
119
pyrate/pyrate/act/control/anti_windup_lqr.py
Normal file
@ -0,0 +1,119 @@
|
||||
"""This module implements the Linear Quadratic Regulator with integral part and anti-windup."""
|
||||
|
||||
# Mathematics
|
||||
from numpy import clip
|
||||
from numpy import hstack
|
||||
from numpy import ndarray
|
||||
from numpy import vstack
|
||||
from numpy import zeros
|
||||
|
||||
# LQR control
|
||||
from .lqr import Lqr
|
||||
|
||||
|
||||
class AntiWindupLqr(Lqr):
|
||||
|
||||
"""The anti-windup LQR controller, including an integration state for zero stationary error.
|
||||
|
||||
This controller resembles the LQR with added clipping on the control signal to a user-set
|
||||
maximum value. Furthermore, the integral of the error over time is pruned (anti windup).
|
||||
|
||||
Examples:
|
||||
First, import some helper functions from numpy.
|
||||
|
||||
>>> from numpy import array
|
||||
>>> from numpy import eye
|
||||
>>> from numpy import vstack
|
||||
|
||||
We then setup the Lqr controller with some control constants.
|
||||
|
||||
>>> controller = AntiWindupLqr(
|
||||
... array([[0, 1], [0, 0]]),
|
||||
... array([0, 1])[:, None],
|
||||
... array([1, 0])[None, :],
|
||||
... eye(3),
|
||||
... array([[1.0]]),
|
||||
... array([1.0]),
|
||||
... 0.5,
|
||||
... )
|
||||
|
||||
We then specify an initial and desired state.
|
||||
|
||||
>>> initial = vstack([1.0, 0.0])
|
||||
>>> desired = vstack([0.0])
|
||||
|
||||
Finally, we retrieve a control signal from the Lqr based on the values we just set.
|
||||
|
||||
>>> signal = controller.control(desired, initial)
|
||||
|
||||
Args:
|
||||
A: System matrix (continous time) ``(n, n)``
|
||||
B: Input matrix ``(n, 1)``
|
||||
C: Output matrix ``(1, n)``
|
||||
Q: State cost matrix (pos. semi definite, symmetric) ``(n+1, n+1)``
|
||||
R: Control cost matrix (pos. definite, symmetric) ``(1, n)``
|
||||
max_control: Limit of control signal
|
||||
dt: Time between measurements
|
||||
keep_trace: Whether to store a trace of control signals, states, etc.
|
||||
"""
|
||||
|
||||
# In this context, we reproduce a common PID notation
|
||||
# pylint: disable=invalid-name, too-many-arguments
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
A: ndarray,
|
||||
B: ndarray,
|
||||
C: ndarray,
|
||||
Q: ndarray,
|
||||
R: ndarray,
|
||||
max_control: ndarray,
|
||||
dt: float,
|
||||
keep_trace: bool = False,
|
||||
) -> None: # noqa: E741
|
||||
# Controller specification for augmented state
|
||||
n = A.shape[0] + 1
|
||||
A_i = zeros((n, n))
|
||||
A_i[1:, 1:] = A
|
||||
A_i[0, 1:] = -C
|
||||
B_i = vstack([zeros((1, 1)), B])
|
||||
C_i = hstack([zeros((1, 1)), C])
|
||||
|
||||
# Setup internal LQR controller and own attributes
|
||||
super().__init__(A_i, B_i, C_i, Q, R, dt, keep_trace, calculate_feed_forward=False)
|
||||
self.V *= (self.C * self.K).sum()
|
||||
self.max_control = max_control
|
||||
self.summed_error = 0.0
|
||||
|
||||
def control(self, desired: ndarray, state: ndarray) -> ndarray:
|
||||
"""Compute the control signal based on LQR controller.
|
||||
|
||||
Args:
|
||||
desired: The desired output
|
||||
state: The current state
|
||||
|
||||
Returns:
|
||||
The control signal
|
||||
"""
|
||||
|
||||
# Prepend summed error to state vector
|
||||
state_i = vstack([self.summed_error, state])
|
||||
|
||||
# Compute errors
|
||||
error = desired - self.C @ state_i
|
||||
self.summed_error += self.dt * error
|
||||
|
||||
# Get the basic PID control signal and clip to specified boundary
|
||||
lqr_signal = super().control(desired, state_i)
|
||||
control_signal: ndarray = clip(lqr_signal, -abs(self.max_control), abs(self.max_control))
|
||||
|
||||
# Prune integral part, i.e. apply anti wind up
|
||||
self.summed_error += (lqr_signal - control_signal) / self.K[0, 0]
|
||||
|
||||
return control_signal
|
||||
|
||||
def reset(self) -> None:
|
||||
"""Resets the filter's memory, i.e. set the error integral to zero and empty the process trace."""
|
||||
|
||||
super().reset()
|
||||
self.summed_error = 0.0
|
87
pyrate/pyrate/act/control/anti_windup_pid.py
Normal file
87
pyrate/pyrate/act/control/anti_windup_pid.py
Normal file
@ -0,0 +1,87 @@
|
||||
"""This module implements the PID (proportional integral derivative) controller."""
|
||||
|
||||
# Mathematics
|
||||
from numpy import clip
|
||||
from numpy import ndarray
|
||||
|
||||
# PID controller
|
||||
from .pid import Pid
|
||||
|
||||
|
||||
class AntiWindupPid(Pid):
|
||||
|
||||
"""The PID controller with anti-windup, i.e. a limited control output and integral part.
|
||||
|
||||
This controller resembles the PID with added clipping on the control signal to a user-set
|
||||
maximum value. Furthermore, the integral of the error over time is pruned (anti windup).
|
||||
|
||||
Examples:
|
||||
First, import some helper functions from numpy.
|
||||
|
||||
>>> from numpy import array
|
||||
|
||||
We then setup the Pid controller with some control constants.
|
||||
|
||||
>>> controller = AntiWindupPid(
|
||||
... array([0.5]),
|
||||
... array([0.1]),
|
||||
... array([0.0]),
|
||||
... 5.0,
|
||||
... 0.1,
|
||||
... )
|
||||
|
||||
We then specify an initial and desired state as well as the current state derivative.
|
||||
|
||||
>>> initial = array([5.0])
|
||||
>>> desired = array([0.0])
|
||||
>>> derivative = array([0.0])
|
||||
|
||||
Finally, we retrieve a control signal from the Pid based on the values we just set.
|
||||
|
||||
>>> signal = controller.control(desired, initial, derivative)
|
||||
|
||||
Args:
|
||||
P: Proportional control constant ``(n,)``
|
||||
I: Integral control constant ``(n,)``
|
||||
D: Derivative control constant ``(n,)``
|
||||
max_control: Limit of control signal
|
||||
dt: Time between measurements
|
||||
keep_trace: Whether to store a trace of control signals, states, etc.
|
||||
"""
|
||||
|
||||
# In this context, we reproduce a common LQR notation
|
||||
# pylint: disable=too-many-arguments
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
P: ndarray,
|
||||
I: ndarray, # noqa: E741
|
||||
D: ndarray,
|
||||
max_control: float,
|
||||
dt: float,
|
||||
keep_trace: bool = False,
|
||||
) -> None:
|
||||
# Setup internal PID controller and own attributes
|
||||
super().__init__(P, I, D, dt, keep_trace)
|
||||
self.max_control = max_control
|
||||
|
||||
def control(self, desired: ndarray, state: ndarray, state_derivative: ndarray) -> ndarray:
|
||||
"""Compute the control signal based on proportional, integral and derivative terms.
|
||||
|
||||
Args:
|
||||
desired: The desired state
|
||||
state: The current state
|
||||
state_derivative: The current state derivative
|
||||
|
||||
Returns:
|
||||
The control signal
|
||||
"""
|
||||
|
||||
# Get the basic PID control signal and clip to specified boundary
|
||||
pid_signal = super().control(desired, state, state_derivative)
|
||||
control_signal: ndarray = clip(pid_signal, -abs(self.max_control), abs(self.max_control))
|
||||
|
||||
# Prune integral part, i.e. apply anti wind up
|
||||
self.summed_error -= (pid_signal - control_signal) / self.I
|
||||
|
||||
return control_signal
|
147
pyrate/pyrate/act/control/lqr.py
Normal file
147
pyrate/pyrate/act/control/lqr.py
Normal file
@ -0,0 +1,147 @@
|
||||
"""This module implements the Linear Quadratic Regulator."""
|
||||
|
||||
# Mathematics
|
||||
from numpy.linalg import inv
|
||||
from numpy import ndarray
|
||||
from numpy import ones
|
||||
from scipy.linalg import solve
|
||||
from scipy.linalg import solve_continuous_are
|
||||
|
||||
# Data modelling
|
||||
from pandas import concat
|
||||
from pandas import DataFrame
|
||||
|
||||
|
||||
class Lqr:
|
||||
"""The LQR controller.
|
||||
|
||||
The linear-quadratic-regulator (LQR) is a feedback controller that solves linear-quadratic
|
||||
problems at minimum cost. Such problems are defined by linear differential equations and
|
||||
quadratic cost functions.
|
||||
|
||||
Examples:
|
||||
First, import some helper functions from numpy.
|
||||
|
||||
>>> from numpy import array
|
||||
>>> from numpy import eye
|
||||
>>> from numpy import vstack
|
||||
|
||||
We then setup the Lqr controller with some control constants.
|
||||
|
||||
>>> controller = Lqr(
|
||||
... array([[0, 1], [0, 0]]),
|
||||
... array([0, 1])[:, None],
|
||||
... array([1, 0])[None, :],
|
||||
... eye(2),
|
||||
... array([[1.0]]),
|
||||
... 0.5,
|
||||
... )
|
||||
|
||||
We then specify an initial and desired state.
|
||||
|
||||
>>> initial = vstack([0.0, 0.0])
|
||||
>>> desired = vstack([0.0])
|
||||
|
||||
Finally, we retrieve a control signal from the Lqr based on the values we just set.
|
||||
|
||||
>>> signal = controller.control(desired, initial)
|
||||
|
||||
Args:
|
||||
A: System matrix (continous time) ``(n, n)``
|
||||
B: Input matrix ``(n, 1)``
|
||||
C: Output matrix ``(1, n)``
|
||||
Q: State cost matrix (pos. semi definite, symmetric) ``(n, n)``
|
||||
R: Control cost matrix (pos. definite, symmetric) ``(1, n)``
|
||||
dt: Time between measurements
|
||||
keep_trace: Whether to store a trace of control signals, states, etc.
|
||||
calculate_feed_forward: Whether to compute a feed forward part
|
||||
|
||||
References:
|
||||
- https://en.wikipedia.org/wiki/Linear%E2%80%93quadratic_regulator
|
||||
"""
|
||||
|
||||
# In this context, we reproduce a common PID notation
|
||||
# pylint: disable=invalid-name, too-many-arguments
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
A: ndarray,
|
||||
B: ndarray,
|
||||
C: ndarray,
|
||||
Q: ndarray,
|
||||
R: ndarray,
|
||||
dt: float,
|
||||
keep_trace: bool = False,
|
||||
calculate_feed_forward: bool = True,
|
||||
) -> None: # noqa: E741
|
||||
# Dimensionality checks
|
||||
assert len(A.shape) == 2 and A.shape[0] == A.shape[1], "Matrix A is not square!"
|
||||
assert B.shape[0] == A.shape[0], "Wrong shape for input matrix B!"
|
||||
assert C.shape[1] == A.shape[0], "Wrong shape for output matrix C!"
|
||||
|
||||
# Controller specification
|
||||
self.A = A
|
||||
self.B = B
|
||||
self.C = C
|
||||
self.Q = Q
|
||||
self.R = R
|
||||
self.dt = dt
|
||||
|
||||
# Compute controller gain
|
||||
# For reference, see here: https://en.wikipedia.org/wiki/Linear%E2%80%93quadratic_regulator
|
||||
self.P = solve_continuous_are(self.A, self.B, self.Q, self.R)
|
||||
self.K = solve(self.R, self.B.T @ self.P)
|
||||
|
||||
# Calculate static feed forward
|
||||
if calculate_feed_forward:
|
||||
self.V = inv(-self.C @ inv(self.A - self.B @ self.K) @ self.B)
|
||||
else:
|
||||
self.V = ones([1, 1])
|
||||
|
||||
# Objects for process tracing
|
||||
self.keep_trace = keep_trace
|
||||
self.process = DataFrame(
|
||||
columns=[
|
||||
"desired",
|
||||
"state",
|
||||
"error",
|
||||
"control_signal",
|
||||
]
|
||||
)
|
||||
|
||||
def control(self, desired: ndarray, state: ndarray) -> ndarray:
|
||||
"""Compute the control signal based on LQR controller.
|
||||
|
||||
Args:
|
||||
desired: The desired output
|
||||
state: The current state
|
||||
|
||||
Returns:
|
||||
The control signal
|
||||
"""
|
||||
|
||||
# Compute errors
|
||||
error = desired - self.C @ state
|
||||
|
||||
# Compute feedback and feed forward values
|
||||
control_signal: ndarray = -self.K @ state + self.V @ desired
|
||||
|
||||
# Append control step to process trace
|
||||
if self.keep_trace:
|
||||
new = DataFrame(
|
||||
{
|
||||
"desired": (desired.copy(),),
|
||||
"state": (state.copy(),),
|
||||
"error": (error.copy(),),
|
||||
"control_signal": (control_signal.copy(),),
|
||||
}
|
||||
)
|
||||
self.process = concat([self.process, new], ignore_index=True)
|
||||
|
||||
# Return result
|
||||
return control_signal
|
||||
|
||||
def reset(self) -> None:
|
||||
"""Resets the filter's memory, i.e. set the error integral to zero and empty the process trace."""
|
||||
|
||||
self.process = self.process[0:0]
|
133
pyrate/pyrate/act/control/pid.py
Normal file
133
pyrate/pyrate/act/control/pid.py
Normal file
@ -0,0 +1,133 @@
|
||||
"""This module implements the PID (proportional integral derivative) controller."""
|
||||
|
||||
# Mathematics
|
||||
from numpy import dot
|
||||
from numpy import ndarray
|
||||
from numpy import zeros_like
|
||||
|
||||
# Data modelling
|
||||
from pandas import concat
|
||||
from pandas import DataFrame
|
||||
|
||||
|
||||
class Pid:
|
||||
|
||||
"""The PID controller.
|
||||
|
||||
The proportional-integral-derivative controller (PID) is an industriy-standard feedback control loop.
|
||||
This controller responds proportionally to the error, i.e. deviation of the desired state,
|
||||
its time derivative and integral.
|
||||
|
||||
Examples:
|
||||
First, import some helper functions from numpy.
|
||||
|
||||
>>> from numpy import array
|
||||
|
||||
We then setup the Pid controller with some control constants.
|
||||
|
||||
>>> controller = Pid(
|
||||
... array([0.5]),
|
||||
... array([0.0]),
|
||||
... array([0.0]),
|
||||
... 0.1,
|
||||
... )
|
||||
|
||||
We then specify an initial and desired state as well as the current state derivative.
|
||||
|
||||
>>> initial = array([5.0])
|
||||
>>> desired = array([0.0])
|
||||
>>> derivative = array([0.0])
|
||||
|
||||
Finally, we retrieve a control signal from the Pid based on the values we just set.
|
||||
|
||||
>>> signal = controller.control(desired, initial, derivative)
|
||||
|
||||
Args:
|
||||
P: Proportional control constant ``(n,)``
|
||||
I: Integral control constant ``(n,)``
|
||||
D: Derivative control constant ``(n,)``
|
||||
dt: Time between measurements
|
||||
keep_trace: Whether to store a trace of control signals, states, etc.
|
||||
|
||||
References:
|
||||
- https://en.wikipedia.org/wiki/PID_controller
|
||||
"""
|
||||
|
||||
# In this context, we reproduce a common PID notation
|
||||
# pylint: disable=invalid-name, too-many-arguments
|
||||
|
||||
def __init__(self, P: ndarray, I: ndarray, D: ndarray, dt: float, keep_trace: bool = False): # noqa: E741
|
||||
# Controller specification
|
||||
self.P = P
|
||||
self.I = I # noqa: E741
|
||||
self.D = D
|
||||
self.dt = dt
|
||||
|
||||
# Error summation field
|
||||
self.summed_error = zeros_like(P).transpose()
|
||||
|
||||
# Objects for process tracing
|
||||
self.keep_trace = keep_trace
|
||||
self.process = DataFrame(
|
||||
columns=[
|
||||
"desired",
|
||||
"state",
|
||||
"state_derivative",
|
||||
"error",
|
||||
"summed_error",
|
||||
"proportional",
|
||||
"integral",
|
||||
"derivative",
|
||||
"control_signal",
|
||||
]
|
||||
)
|
||||
|
||||
def control(self, desired: ndarray, state: ndarray, state_derivative: ndarray) -> ndarray:
|
||||
"""Compute the control signal based on proportional, integral and derivative terms.
|
||||
|
||||
Args:
|
||||
desired: The desired state
|
||||
state: The current state
|
||||
state_derivative: The current state's derivative
|
||||
|
||||
Returns:
|
||||
The control signal
|
||||
"""
|
||||
|
||||
# Compute errors
|
||||
error = desired - state
|
||||
self.summed_error += self.dt * error
|
||||
|
||||
# Compute PID values
|
||||
proportional = dot(self.P, error)
|
||||
integral = dot(self.I, self.summed_error)
|
||||
derivative = dot(self.D, state_derivative)
|
||||
|
||||
# Compute control signal
|
||||
control_signal: ndarray = proportional + integral - derivative
|
||||
|
||||
# Append control step to process trace
|
||||
if self.keep_trace:
|
||||
new = DataFrame(
|
||||
{
|
||||
"desired": (desired.copy(),),
|
||||
"state": (state.copy(),),
|
||||
"state_derivative": (state_derivative.copy(),),
|
||||
"error": (error.copy(),),
|
||||
"summed_error": (self.summed_error.copy(),),
|
||||
"proportional": (proportional.copy(),),
|
||||
"integral": (integral.copy(),),
|
||||
"derivative": (derivative.copy(),),
|
||||
"control_signal": (control_signal.copy(),),
|
||||
},
|
||||
)
|
||||
self.process = concat([self.process, new], ignore_index=True)
|
||||
|
||||
# Return result
|
||||
return control_signal
|
||||
|
||||
def reset(self) -> None:
|
||||
"""Resets the filter's memory, i.e. set the error integral to zero and empty the process trace."""
|
||||
|
||||
self.summed_error = zeros_like(self.P).transpose()
|
||||
self.process = self.process[0:0]
|
1
pyrate/pyrate/common/__init__.py
Normal file
1
pyrate/pyrate/common/__init__.py
Normal file
@ -0,0 +1 @@
|
||||
"""Contains generic helper functionality like file IO, mathematics and testing helpers."""
|
7
pyrate/pyrate/common/charts/__init__.py
Normal file
7
pyrate/pyrate/common/charts/__init__.py
Normal file
@ -0,0 +1,7 @@
|
||||
"""Enables handling of nautical charts and storage of obstacles in a spatial database."""
|
||||
|
||||
from .db import SpatialiteDatabase
|
||||
from .s57_files import ChartFileHandler
|
||||
from .s57_files import S57ChartHandler
|
||||
|
||||
__all__ = ["SpatialiteDatabase", "ChartFileHandler", "S57ChartHandler"]
|
681
pyrate/pyrate/common/charts/db.py
Normal file
681
pyrate/pyrate/common/charts/db.py
Normal file
@ -0,0 +1,681 @@
|
||||
"""This module adds support for a Spatialite database (SQLite DB with extra modules).
|
||||
|
||||
This module requires the *libsqlite3-mod-spatialite* system dependency.
|
||||
The database allows for storage and efficient retrieval via spatial indexing of elements.
|
||||
|
||||
References:
|
||||
- `General information <https://www.gaia-gis.it/gaia-sins/spatialite-manual-2.3.1.html>`__
|
||||
- `The website of Spatialite <https://www.gaia-gis.it/fossil/libspatialite/index/>`__
|
||||
- `The website of Spatialite and friends <https://www.gaia-gis.it/gaia-sins/>`__
|
||||
- `Cookbook, Chapter "Spatial Indexing support"
|
||||
<https://www.gaia-gis.it/gaia-sins/spatialite-cookbook-5/cookbook_topics.06.html#topic_Spatial_Indexing_support>`__
|
||||
- `Cookbook, Chapter "Creating a well designed DB"
|
||||
<https://www.gaia-gis.it/gaia-sins/spatialite-cookbook-5/cookbook_topics.03.html#topic_Creating_a_well_designed_DB>`__
|
||||
- `SQL functions reference list <https://www.gaia-gis.it/gaia-sins/spatialite-sql-5.0.0.html>`__
|
||||
"""
|
||||
|
||||
# Python standard
|
||||
from contextlib import closing
|
||||
from contextlib import contextmanager
|
||||
from math import degrees
|
||||
import random
|
||||
import string
|
||||
from warnings import warn
|
||||
|
||||
# Database interface
|
||||
import sqlite3
|
||||
from sqlite3 import Connection
|
||||
|
||||
# Typing
|
||||
from typing import cast
|
||||
from typing import Generator
|
||||
from typing import Iterable
|
||||
from typing import Iterator
|
||||
from typing import Optional
|
||||
|
||||
# Shapely for internal abstraction
|
||||
from shapely.geometry import LineString
|
||||
from shapely.geometry import Point
|
||||
from shapely.geometry import Polygon
|
||||
import shapely.wkb
|
||||
|
||||
# Planning primitives
|
||||
from pyrate.plan.geometry import Direction
|
||||
from pyrate.plan.geometry import LocationType
|
||||
from pyrate.plan.geometry import PolarGeometry
|
||||
from pyrate.plan.geometry import PolarLocation
|
||||
from pyrate.plan.geometry import PolarPolygon
|
||||
from pyrate.plan.geometry import PolarRoute
|
||||
|
||||
# Geospatial helpers
|
||||
from pyrate.plan.geometry.helpers import difference_latitude
|
||||
from pyrate.plan.geometry.helpers import difference_longitude
|
||||
from pyrate.plan.geometry.helpers import meters2rad
|
||||
|
||||
# Import this to enable GDAL/libgeos exceptions if it has not already happened
|
||||
from . import s57_files as tmp_import
|
||||
|
||||
del tmp_import
|
||||
|
||||
|
||||
class SpatialiteDatabase:
|
||||
|
||||
"""Allows for IO with the *Spatialite* SQLite database containing obstacles.
|
||||
|
||||
Reading of entries from the database is implemented using generators, i.e. the elements are retrieved
|
||||
one by one as they are consumed by the caller. While this allows for the processing of large amounts of
|
||||
data in constant memory, it also keeps the cursor to the database open until all elements have been
|
||||
consumed. To consume all of the rows at once simply wrap it into the list constructor like this:
|
||||
``all_as_list = list(database.read_all_obstacles())``. Note that only the parsing to Pyrate primitives is
|
||||
done lazily, while the actual database reading happens eagerly.
|
||||
|
||||
Internally, a spatial index is used for fast *retrieval* of obstacles given geometric constrains.
|
||||
For example, this makes queries for all obstacles in a given bounding boxes take time roughly
|
||||
proportional to the result set, and not the total size of the database.
|
||||
Some real-world benchmarks can be obtained from the script :ref:`script-benchmark_db_and_projections`
|
||||
and are discussed in :ref:`design-decisions-local-projections`.
|
||||
See ``SpatialiteDatabase._CREATE_TABLES_SQL_STATEMENT`` for details on the structure of the
|
||||
database. The longitude is always the first/the X component of the two-dimensional geometries.
|
||||
|
||||
*QGIS* can natively open the created databases for visual inspection. It's very efficient too.
|
||||
|
||||
A single polygon in the dabase might get split into multiple ones in a query due to clipping. A unique
|
||||
index is maintained with best-effort by adding subsequent numbers to the other slices of the same polygon.
|
||||
This assumes that indices are somewhat uniformly distributed and not sequential numbers.
|
||||
|
||||
Examples:
|
||||
First, let us create some polygons to be stored (PolarPoint and PolarRoute would also work):
|
||||
|
||||
>>> from pyrate.plan.geometry import PolarLocation, PolarPolygon, LocationType
|
||||
>>> locations = [PolarLocation(50, 50), PolarLocation(50, 51), PolarLocation(51, 51)]
|
||||
>>> polygon_1 = PolarPolygon(locations=locations, name="A Polygon, YaY!")
|
||||
>>> polygon_2 = PolarPolygon(locations=locations, name="Some Name", identifier=42,
|
||||
... location_type=LocationType.LAND)
|
||||
>>> polygons = [polygon_1, polygon_2]
|
||||
>>> polygons #doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
|
||||
[PolarPolygon(locations=[...], name="A Polygon, YaY!"),
|
||||
PolarPolygon(locations=[...], location_type=LocationType.LAND, name="Some Name", identifier=42)]
|
||||
|
||||
Then, you can simply store and then retrieve some polygons.
|
||||
Note, that you have to call :meth:`SpatialiteDatabase.close` after using it or use it as a context
|
||||
manager, as shown here.
|
||||
|
||||
>>> from pyrate.common.charts import SpatialiteDatabase
|
||||
>>> with SpatialiteDatabase(":memory:") as database:
|
||||
... print(len(database))
|
||||
... database.write_geometries(polygons)
|
||||
... # We need to wrap it into a call to `list()` to evaluate the generator returned by
|
||||
... # `read_obstacles_around` while the database is still open
|
||||
... read = list(database.read_geometries_around(locations[0], radius=200_000)) # 200 km
|
||||
... assert len(database) == len(read)
|
||||
... print(len(database))
|
||||
0
|
||||
2
|
||||
>>> # The database does not guarantee an order of the result set
|
||||
>>> sort_by_name = lambda geometry: geometry.name
|
||||
>>> read = list(sorted(read, key=sort_by_name))
|
||||
>>> read #doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
|
||||
[PolarPolygon(locations=[...], name="A Polygon, YaY!", identifier=1),
|
||||
PolarPolygon(locations=[...], location_type=LocationType.LAND, name="Some Name", identifier=42)]
|
||||
>>> read == polygons
|
||||
False
|
||||
>>> # This is due to the first polygon now being given a new unused identifier
|
||||
>>> read[0].identifier
|
||||
1
|
||||
>>> # So we reset it here for the comparison to succeed
|
||||
>>> read[0].identifier = None
|
||||
>>> # Now, they should be almost equal (except for floating-point approximations)
|
||||
>>> polygon_1.equals_almost_congruent(read[0])
|
||||
True
|
||||
>>> polygon_2.equals_almost_congruent(read[1])
|
||||
True
|
||||
|
||||
A full example application can be found in the script :ref:`script-benchmark_db_and_projections`.
|
||||
|
||||
Possible extensions:
|
||||
- Allow for retrieving from arbitrary PolarPolygon/bounding box. See ``_read_obstacles_clipped``.
|
||||
- The method :meth:`~read_obstacles_around` could be easily extended/complemented to support ellipses.
|
||||
However, rotation of that eclipse would only make this useful, and adding that appears to be tricky.
|
||||
|
||||
Note:
|
||||
Use with ``storage_path=":memory:"`` (see example above) to open a ephemeral database that resides in
|
||||
RAM. This works when only a single databse user (like a :class:`~SpatialiteDatabase` instance) will
|
||||
access it.
|
||||
Otherwise, passing ``"file::memory:?cache=shared"`` as a file will allow the same database to be
|
||||
accessed by multiple different users within the same process. Both are useful for (unit-)testing too.
|
||||
When passing extra options like ``"file::memory:?cache=shared"``,
|
||||
you will also have to pass ``uri=True`` to :class:`~SpatialiteDatabase`
|
||||
such that the parameters do not get mistaken for being part of the an actual file name:
|
||||
>>> with SpatialiteDatabase("file::memory:?cache=shared", uri=True) as database:
|
||||
... print(len(database))
|
||||
0
|
||||
|
||||
Args:
|
||||
storage_path: the path where to look for the database file, usually ending with ``.sqlite``
|
||||
issue_create_statement: tries to create the table(s) and indices if not yet existing:
|
||||
this can be safely let enabled as existing tables make this a NO-OP
|
||||
kwargs: additional parameters to be passed to the database creation, see :class:`sqlite3.Connection`
|
||||
|
||||
Raises:
|
||||
IOError: When the data base file cannot be accessed
|
||||
RuntimeError: If the Spatialite extension (*libsqlite3-mod-spatialite*) cannot be loaded
|
||||
"""
|
||||
|
||||
#: The Spatial Reference System Identifier used for storing objects
|
||||
#: this is the widely used WGS 84; see: https://spatialreference.org/ref/epsg/4326/
|
||||
_SRID: int = 4326
|
||||
|
||||
def __init__(self, storage_path: str, issue_create_statement: bool = True, **kwargs) -> None:
|
||||
# This may raise an IOError:
|
||||
self._connection: Connection = sqlite3.connect(storage_path, **kwargs)
|
||||
|
||||
try:
|
||||
# load the spatialite module
|
||||
self._connection.load_extension("mod_spatialite.so")
|
||||
except sqlite3.OperationalError as error: # pragma: no cover
|
||||
raise RuntimeError(
|
||||
"Cannot load the spatialite extension. Is it installed (see installation instructions)? "
|
||||
f"Error was: {error}"
|
||||
) from error
|
||||
|
||||
if issue_create_statement:
|
||||
self.create_tables()
|
||||
|
||||
def create_tables(self, table_name: str = "Obstacles") -> None:
|
||||
"""This creates the table(s) and indices in the database (if they do not exist yet).
|
||||
|
||||
See the module documentation of :mod:`pyrate.common.charts.db` for more information.
|
||||
|
||||
Args:
|
||||
table_name: The name of the table to initialize
|
||||
"""
|
||||
# Check if the table "Obstacles" is present
|
||||
check = f"SELECT COUNT(1) FROM SQLITE_MASTER WHERE name = '{table_name}'"
|
||||
with closing(self._connection.execute(check)) as cursor:
|
||||
count = cast(int, cursor.fetchone()[0]) # This needs to be cast as the result is untyped
|
||||
|
||||
if count == 0:
|
||||
# It is not present, so we initialize the database here
|
||||
|
||||
statement = f"""
|
||||
CREATE TABLE IF NOT EXISTS '{table_name}' (
|
||||
id INTEGER PRIMARY KEY NOT NULL,
|
||||
location_type TINYINT unsigned NOT NULL DEFAULT 0,
|
||||
name VARCHAR CHARACTER DEFAULT NULL,
|
||||
CHECK (location_type <= {LocationType.max_value()})
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS by_location_type ON '{table_name}' (location_type);
|
||||
|
||||
SELECT InitSpatialMetaDataFull();
|
||||
SELECT AddGeometryColumn(
|
||||
'{table_name}',
|
||||
'geometry',
|
||||
{SpatialiteDatabase._SRID},
|
||||
'GEOMETRY', -- just as fast as e.g. POLYGON but more flexible
|
||||
'XY',
|
||||
TRUE -- set to NOT NULL
|
||||
);
|
||||
SELECT CreateSpatialIndex(
|
||||
'{table_name}',
|
||||
'geometry'
|
||||
);
|
||||
|
||||
-- can only be done after column is added; but is not supported by SQLite
|
||||
-- ALTER TABLE '{table_name}' ADD CHECK (IsValid(geometry));
|
||||
"""
|
||||
|
||||
with self._connection: # auto-commits at the end
|
||||
with self.disable_synchronization():
|
||||
self._connection.executescript(statement).close()
|
||||
|
||||
@contextmanager
|
||||
def disable_synchronization(self) -> Iterator[None]:
|
||||
"""Temporarily disables file system synchronization for consistency to increase write performance.
|
||||
|
||||
To quote the `documentation of SQLite <https://www.sqlite.org/pragma.html#pragma_synchronous>`__:
|
||||
|
||||
"With synchronous OFF (0), SQLite continues without syncing as soon as it has handed data off to
|
||||
the operating system. If the application running SQLite crashes, the data will be safe, but the
|
||||
database might become corrupted if the operating system crashes or the computer loses power before
|
||||
that data has been written to the disk surface. On the other hand, commits can be orders of
|
||||
magnitude faster with synchronous OFF."
|
||||
|
||||
"""
|
||||
self._connection.execute("PRAGMA synchronous=OFF").close()
|
||||
yield
|
||||
self._connection.execute("PRAGMA synchronous=ON").close()
|
||||
|
||||
def copy_contents_to_database(self, file_path: str, update: bool = False, **kwargs) -> None:
|
||||
"""Dumps the content of this obstacle database to a new one.
|
||||
|
||||
This can be useful in cases where one wants to first create an extremely fast in-memory database and
|
||||
later copy it to a file on disk.
|
||||
|
||||
Args:
|
||||
file_path: the path of the other database
|
||||
update: is set to ``True``, update/replace on conflicting identifier; else raise an error in that
|
||||
case
|
||||
kwargs: additional parameters to be passed to the database creation, see attribute ``kwargs`` of
|
||||
:class:`~SpatialiteDatabase`
|
||||
|
||||
Raises:
|
||||
sqlite3.IntegrityError: If a duplicate key should have been inserted and ``update`` was set to
|
||||
``False``
|
||||
"""
|
||||
# init the other database
|
||||
with SpatialiteDatabase(file_path, **kwargs):
|
||||
pass
|
||||
|
||||
database_name = _random_name()
|
||||
|
||||
command = "REPLACE" if update else "INSERT"
|
||||
statements = f"""
|
||||
ATTACH '{file_path}' AS {database_name};
|
||||
{command} INTO {database_name}.Obstacles SELECT * FROM main.Obstacles;
|
||||
DETACH {database_name};
|
||||
"""
|
||||
|
||||
with self._connection: # auto-commits at the end
|
||||
self._connection.executescript(statements).close()
|
||||
|
||||
def simplify_contents(self, simplify_tolerance: float) -> None:
|
||||
"""Simplifies all geometries within the database. Always runs :meth:`~vacuum` afterwards.
|
||||
|
||||
Args:
|
||||
simplify_tolerance: the tolerance within all new points shall lie wrt. to the old ones, in meters,
|
||||
non-negative. Set to zero to disable.
|
||||
|
||||
Further ideas:
|
||||
- Keep topology between objects, not just within them, e.g. see
|
||||
`this blog post <https://trac.osgeo.org/postgis/wiki/UsersWikiSimplifyPreserveTopology>`__.
|
||||
"""
|
||||
assert simplify_tolerance >= 0, "tolerance must be non-negative"
|
||||
|
||||
if simplify_tolerance > 0:
|
||||
tolerance_degrees = degrees(meters2rad(simplify_tolerance))
|
||||
statement = (
|
||||
f"UPDATE Obstacles SET geometry = ST_SimplifyPreserveTopology(geometry, {tolerance_degrees})"
|
||||
)
|
||||
|
||||
with self._connection: # auto-commits at the end
|
||||
self._connection.execute(statement).close()
|
||||
|
||||
self.vacuum()
|
||||
|
||||
def vacuum(self) -> None:
|
||||
"""Defragments the database. This is useful after deleting or shrinking many entries."""
|
||||
with self._connection: # auto-commits at the end
|
||||
self._connection.execute("VACUUM").close()
|
||||
|
||||
def write_geometry(
|
||||
self, geometry: PolarGeometry, update: bool = False, raise_on_failure: bool = True
|
||||
) -> None:
|
||||
"""Alias for ``write_obstacles([obstacle], update)``. See :meth:`~write_obstacles`.
|
||||
|
||||
Args:
|
||||
geometry: the geometry to place into the database, identified by
|
||||
its :attr:`~pyrate.plan.geometry.Geospatial.identifier`
|
||||
update: see :meth:`~write_obstacles`
|
||||
raise_on_failure: see :meth:`~write_obstacles`
|
||||
"""
|
||||
|
||||
return self.write_geometries([geometry], update=update, raise_on_failure=raise_on_failure)
|
||||
|
||||
def write_geometries(
|
||||
self, geometries: Iterable[PolarGeometry], update: bool = False, raise_on_failure: bool = True
|
||||
) -> None:
|
||||
"""Writes geometries into the database.
|
||||
|
||||
All geometries are only identified by their identifier as a primary key.
|
||||
If ``update is True``, any existing geometries with the same IDs will be updated/replaced.
|
||||
If ``update is False``, an exception is raised if duplicate keys are to be inserted.
|
||||
|
||||
Args:
|
||||
geometries:
|
||||
The geometries to place into the database, identified by their
|
||||
:attr:`~pyrate.plan.geometry.Geospatial.identifier`.
|
||||
update:
|
||||
If set to ``True``, update/replace on conflicting identifiers;
|
||||
else raise an error in that case.
|
||||
If set to ``True``, no guarantees about inserts can be made (see :class:`ValueError` below).
|
||||
raise_on_failure:
|
||||
If set to ``False`` suppress the :class:`ValueError` below and instead print a warning.
|
||||
|
||||
Raises:
|
||||
sqlite3.IntegrityError: If a duplicate key should have been inserted and ``update`` was set to
|
||||
``False``
|
||||
sqlite3.IntegrityError: If a value was not within the constraints; should never happen if all
|
||||
:class:`~pyrate.plan.geometry.polygon.PolarPolygon` were created properly
|
||||
ValueError:
|
||||
If the provided geometries are not valid according to *spatialite* and could not be repaired.
|
||||
However, any valid geometries will have been inserted by then.
|
||||
Also, this is only possible to be checked if ``update`` is set to ``False``.
|
||||
Else, incomplete inserts will simply be ignored.
|
||||
Only very basic cleanup is attempted.
|
||||
Suppressed if ``raise_on_failure is False``.
|
||||
"""
|
||||
|
||||
count_before = self.count_geometries()
|
||||
|
||||
# Build query
|
||||
command = "REPLACE" if update else "INSERT"
|
||||
statement = f"""
|
||||
WITH _insert_temp(id,location_type,name,geometry)
|
||||
AS (VALUES (?,?,?,SanitizeGeometry(GeomFromWKB(?,{SpatialiteDatabase._SRID}))))
|
||||
|
||||
{command} INTO Obstacles
|
||||
SELECT * FROM _insert_temp WHERE IsValid(geometry)
|
||||
"""
|
||||
|
||||
# Convert data
|
||||
data = [(g.identifier, g.location_type, g.name, to_wkb(g)) for g in geometries]
|
||||
|
||||
# Execute statement
|
||||
with self._connection: # auto-commits at the end
|
||||
self._connection.executemany(statement, data).close()
|
||||
|
||||
# TODO(Felix.Divo):
|
||||
# We want to notify the user if the insert was incomplete, i.e. if a geometry was invalid
|
||||
# (1) `cursor.rowcount` from executemany() does not work since it returns -1
|
||||
# (2) `cursor.lastrowid` contains only the last ID, so we cannot use that
|
||||
# (3) Appending `RETURNING id` causes exceptions when used with executemany():
|
||||
# "sqlite3.ProgrammingError: executemany() can only execute DML statements"
|
||||
# (4) So we do the stupid thing: Count before and after. Does not work with ``update=True``!
|
||||
# This could also cause problems with concurrency.
|
||||
# (5) One could also repair by Buffer(), though that might not be what is desired,
|
||||
# cf. https://shapely.readthedocs.io/en/stable/manual.html#object.buffer.
|
||||
|
||||
# Make sure that all rows were valid
|
||||
missing = count_before + len(data) - self.count_geometries()
|
||||
if not update and missing > 0:
|
||||
message = f"{missing} of the {len(data)} geometries were invalid"
|
||||
if raise_on_failure:
|
||||
raise ValueError(message)
|
||||
warn(message)
|
||||
|
||||
def read_all(
|
||||
self, only_location_type: Optional[LocationType] = None
|
||||
) -> Generator[PolarGeometry, None, None]:
|
||||
"""Read all stored geometries, optionally filtered by a type.
|
||||
|
||||
Args:
|
||||
only_location_type: get only geometries of that type, if not set to ``None``
|
||||
|
||||
Yields:
|
||||
The geometries as read from the database.
|
||||
"""
|
||||
yield from self._read_geometries(
|
||||
geometry_column="geometry", # no transformation, just select the column
|
||||
only_location_type=only_location_type,
|
||||
)
|
||||
|
||||
def read_geometries_around(
|
||||
self,
|
||||
around: PolarLocation,
|
||||
radius: float = 10_000.0,
|
||||
only_location_type: Optional[LocationType] = None,
|
||||
) -> Generator[PolarGeometry, None, None]:
|
||||
"""Reads and clips geometries in a given radius around some location.
|
||||
|
||||
The geometries are clipped to the circle given by the location ``around`` and ``radius``. This means
|
||||
that any parts stretching beyond the circles are not returned and the geometry approximately follows
|
||||
the outer circle in such cases. If the ellipse if deformed at (very) high/low latitudes, the clipping
|
||||
area is selected such that at least all geometries in the defined area are included, and possibly some
|
||||
more.
|
||||
|
||||
Note:
|
||||
This method internally uses an ellipse as the clipping area even for circular clipping areas in
|
||||
order to account for some distortions of polar coordinates at high latitudes. Also, keep in mind
|
||||
that clipping/selecting for inclusion in the result set is not a perfect operation, as the
|
||||
clipping area is internally discretized to a geometry. It thus has corners, and is not smooth
|
||||
like an ellipse in the mathematical sense.
|
||||
|
||||
Args:
|
||||
around: The point around which obstacles are to be extracted; assumed to be in degrees
|
||||
radius: The radius around *around* to retrieve items from in meters; default: 10 000 m.
|
||||
The behaviour is unspecified if it is very large, like more than 1 000 000 m.
|
||||
It must be at least zero.
|
||||
only_location_type: Get only obstacles of that type, if not set to ``None``
|
||||
|
||||
Yields:
|
||||
The geometries as read from the database.
|
||||
"""
|
||||
|
||||
# Safety assertions on given radius
|
||||
assert radius >= 0.0, "the radius must be non-negative"
|
||||
assert (
|
||||
radius <= 1_000_000
|
||||
), "radius > 1_000_000; see docs; this is not a fundamental restriction but it is untested"
|
||||
|
||||
# The distance in polar coordinate is always equal when going either west or east
|
||||
east_west, _ = around.translate(Direction.East, radius)
|
||||
longitudal_radius = difference_longitude(around.longitude, east_west.longitude)
|
||||
|
||||
# The above might be different when going north or south
|
||||
# To disambiguate, we take the larger of the two distances as the radius
|
||||
north, _ = around.translate(Direction.North, radius)
|
||||
south, _ = around.translate(Direction.South, radius)
|
||||
|
||||
latitudal_radius = max(
|
||||
difference_latitude(around.latitude, north.latitude),
|
||||
difference_latitude(around.latitude, south.latitude),
|
||||
)
|
||||
|
||||
# Place a corner of the discretized ellipse every 24 degrees,
|
||||
# i.e. turn it into a polygon with 15 corners
|
||||
every_degrees = 24
|
||||
|
||||
# MakeEllipse(...) takes the two radii in lat/long direction in degrees and returns a line string
|
||||
clipping_area = (
|
||||
f"MakePolygon(MakeEllipse({around.longitude}, {around.latitude}, "
|
||||
f"{longitudal_radius}, {latitudal_radius}, {SpatialiteDatabase._SRID}, {every_degrees}))"
|
||||
)
|
||||
|
||||
yield from self._read_geometries_clipped(clipping_area, only_location_type)
|
||||
|
||||
def _read_geometries_clipped(
|
||||
self, clipping_area: str, only_location_type: Optional[LocationType]
|
||||
) -> Generator[PolarGeometry, None, None]:
|
||||
"""Internal helper for querying for clipped geometries.
|
||||
|
||||
Args:
|
||||
clipping_area: The area to clip to
|
||||
only_location_type: The type of the read location
|
||||
|
||||
Yields:
|
||||
The geometries clipped to the given area.
|
||||
"""
|
||||
yield from self._read_geometries(
|
||||
geometry_column=f"Intersection(geometry, {clipping_area})",
|
||||
only_location_type=only_location_type,
|
||||
)
|
||||
|
||||
def _read_geometries(
|
||||
self, geometry_column: str, only_location_type: Optional[LocationType]
|
||||
) -> Generator[PolarGeometry, None, None]:
|
||||
"""Factors out the common parts of assembling SQL statements for the ``read_*`` methods.
|
||||
|
||||
Args:
|
||||
geometry_column:
|
||||
A SQL "column name" that can be used in a SELECT clause and which returns a Spatialite
|
||||
geometry. Examples are ``"geometry"`` to simply return the geometries unmodified or something
|
||||
like ``"Reverse(geometry) as ignored_column_name"`` to perform some modification.
|
||||
The name does not matter.
|
||||
only_location_type: Get only obstacles of that type, if not set to ``None``
|
||||
|
||||
Yields:
|
||||
The geometries as read from the database.
|
||||
"""
|
||||
if only_location_type is None:
|
||||
additional_where_constraint = ""
|
||||
else:
|
||||
additional_where_constraint = f"AND location_type = {only_location_type.value}"
|
||||
|
||||
# `IsValid(wrapped_geometry)` excludes empty geometries which can sometimes occur
|
||||
yield from self._read_from_sql(
|
||||
f"""
|
||||
WITH temptable AS (
|
||||
SELECT id, location_type, name, ({geometry_column}) as wrapped_geometry
|
||||
FROM Obstacles
|
||||
WHERE wrapped_geometry IS NOT NULL AND IsValid(wrapped_geometry)
|
||||
{additional_where_constraint}
|
||||
)
|
||||
SELECT id, location_type, name, AsBinary(wrapped_geometry) as geometry
|
||||
FROM temptable
|
||||
"""
|
||||
)
|
||||
|
||||
def _read_from_sql(self, sql_statement: str) -> Generator[PolarGeometry, None, None]: # noqa: C901
|
||||
"""Reads geometries for a given complete SQL query.
|
||||
|
||||
Supports reading these geometry types and maps them to instances of
|
||||
:attr:`pyrate.plan.geometry.PolarGeometry`:
|
||||
|
||||
- ``Point``
|
||||
- ``LineString`` and ``MultiLineString``
|
||||
- ``Polygon`` and ``MultiPolygon``
|
||||
|
||||
Args:
|
||||
sql_statement: The SQL statement to query with and read geometries from
|
||||
|
||||
Yields:
|
||||
The geometries as read from the database.
|
||||
"""
|
||||
with closing(self._connection.execute(sql_statement)) as cursor:
|
||||
# This should be theoretically parallelizable, but was not required as of now
|
||||
# keep in mind that `cursor.fetchall()` returns a list, not a generator
|
||||
for (identifier, location_type, name, binary_geometry) in cursor.fetchall():
|
||||
parsed_geometry = shapely.wkb.loads(binary_geometry)
|
||||
geometry_type = parsed_geometry.type
|
||||
|
||||
# The database contains only the geometry types Point, LineString and Polygon.
|
||||
# However, depending on the performed operation, some entries might be cut into
|
||||
# MultiLineString or MultiPolygon, so we need to be able to decode them too.
|
||||
# MultiPoint can currently not occur.
|
||||
|
||||
def to_polygon(
|
||||
polygon: Polygon, unique_identifier: int, name=name, location_type=location_type
|
||||
) -> PolarPolygon:
|
||||
locations = [PolarLocation(y, x) for (x, y) in polygon.exterior.coords]
|
||||
return PolarPolygon(locations, LocationType(location_type), name, unique_identifier)
|
||||
|
||||
def to_route(
|
||||
line_string: LineString, unique_identifier: int, name=name, location_type=location_type
|
||||
) -> PolarRoute:
|
||||
locations = [PolarLocation(y, x) for (x, y) in line_string.coords]
|
||||
return PolarRoute(locations, LocationType(location_type), name, unique_identifier)
|
||||
|
||||
if geometry_type == "Point":
|
||||
point = cast(Point, parsed_geometry)
|
||||
yield PolarLocation(
|
||||
latitude=point.y,
|
||||
longitude=point.x,
|
||||
location_type=LocationType(location_type),
|
||||
name=name,
|
||||
identifier=identifier,
|
||||
)
|
||||
|
||||
elif geometry_type == "LineString":
|
||||
yield to_route(cast(LineString, parsed_geometry), identifier)
|
||||
|
||||
elif geometry_type == "MultiLineString":
|
||||
for index, route in enumerate(parsed_geometry.geoms):
|
||||
# Make identifier unique by best effort
|
||||
yield to_route(cast(LineString, route), unique_identifier=identifier + index)
|
||||
|
||||
elif geometry_type == "Polygon":
|
||||
yield to_polygon(cast(Polygon, parsed_geometry), identifier)
|
||||
|
||||
elif geometry_type == "MultiPolygon":
|
||||
for index, polygon in enumerate(parsed_geometry.geoms):
|
||||
# Make identifier unique by best effort
|
||||
yield to_polygon(cast(Polygon, polygon), unique_identifier=identifier + index)
|
||||
|
||||
else: # pragma: no cover
|
||||
# This should never happen in a well-formatted database
|
||||
raise RuntimeError(f'illegal geometry type "{geometry_type}" returned')
|
||||
|
||||
def clear(self) -> None:
|
||||
"""Deletes all obstacles from the database, but does not touch the table structure or indices."""
|
||||
|
||||
with self._connection: # auto-commits at the end
|
||||
self._connection.execute("DELETE FROM Obstacles").close()
|
||||
|
||||
def count_geometries(self) -> int:
|
||||
"""Counts all obstacles in the database."""
|
||||
|
||||
with closing(self._connection.execute("SELECT COUNT(*) FROM Obstacles")) as cursor:
|
||||
result = cursor.fetchone()
|
||||
|
||||
return cast(int, result[0]) # needs to be cast as the result is untyped
|
||||
|
||||
def count_vertices(self) -> int:
|
||||
"""Counts all vertices of all obstacles in the database."""
|
||||
|
||||
statement = "SELECT SUM(ST_NPoints(geometry)) FROM Obstacles"
|
||||
with closing(self._connection.execute(statement)) as cursor:
|
||||
result = cursor.fetchone()
|
||||
|
||||
count = cast(Optional[int], result[0]) # needs to be cast as the result is untyped
|
||||
if count is None:
|
||||
# this can happen if the database is empty since `SUM` will return `NULL` in that case
|
||||
return 0
|
||||
|
||||
return count
|
||||
|
||||
def __len__(self) -> int:
|
||||
return self.count_geometries()
|
||||
|
||||
def close(self) -> None:
|
||||
"""Closes the connection to the database and releases all associated resources.
|
||||
|
||||
It is not really documented in the standard library but :meth:`sqlite3.Connection.close` and thus
|
||||
this method can apparently be called multiple times.
|
||||
"""
|
||||
|
||||
# The `_connection` is unset if an exception has been thrown, but
|
||||
# `close()` might still be called when the database was used as a
|
||||
# context manager
|
||||
if hasattr(self, "_connection"):
|
||||
self._connection.close() # pragma: no cover
|
||||
|
||||
def __enter__(self) -> "SpatialiteDatabase":
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_value, traceback) -> None:
|
||||
self.close()
|
||||
|
||||
# Makes sure the connection is closed when this object ceases to exist
|
||||
def __del__(self) -> None:
|
||||
self.close()
|
||||
|
||||
|
||||
def _random_name() -> str:
|
||||
"""Returns a probably unique random name consisting only of latin letters."""
|
||||
return "".join(random.choices(string.ascii_letters, k=32))
|
||||
|
||||
|
||||
def to_wkb(geometry: PolarGeometry) -> bytes:
|
||||
"""Converts the given geometries into well-known binary (WKB) bytes.
|
||||
|
||||
Args:
|
||||
geometry: The polar geometry to be converted
|
||||
|
||||
Returns:
|
||||
The WKB representation of the geometry
|
||||
|
||||
Raises:
|
||||
NotImplementedError:
|
||||
If the geometry type cannot be converted to bytes.
|
||||
This will never occur when the type signature is obeyed.
|
||||
"""
|
||||
|
||||
if isinstance(geometry, PolarLocation):
|
||||
return cast(bytes, shapely.wkb.dumps(Point(geometry.longitude, geometry.latitude)))
|
||||
if isinstance(geometry, PolarRoute):
|
||||
return cast(bytes, shapely.wkb.dumps(LineString(geometry.to_numpy())))
|
||||
if isinstance(geometry, PolarPolygon):
|
||||
return cast(bytes, shapely.wkb.dumps(Polygon(geometry.to_numpy())))
|
||||
|
||||
# Can never occur if the type signature was obeyed but better be explicit here
|
||||
raise NotImplementedError(f"unknown geometry type: {type(PolarLocation).__name__}")
|
319
pyrate/pyrate/common/charts/s57_files.py
Normal file
319
pyrate/pyrate/common/charts/s57_files.py
Normal file
@ -0,0 +1,319 @@
|
||||
"""Allows to find and read nautical charts. Currently, this only supports IHO S-57 charts.
|
||||
|
||||
Examples:
|
||||
This shows how to recursively read all obstacles/relevant chart objects from a given directory:
|
||||
|
||||
>>> from pyrate.common.charts import ChartFileHandler, S57ChartHandler
|
||||
>>> path_to_charts = "stda/data/charts/noaa_vector/data"
|
||||
>>> # Nothing about this is is specific to the `S57ChartHandler`, so cast it to `ChartFileHandler`
|
||||
>>> handler: ChartFileHandler = S57ChartHandler()
|
||||
>>> polygons = [ #doctest: +SKIP
|
||||
... handler.read_chart_file(chart_file)
|
||||
... for chart_file in handler.find_chart_files(path_to_charts)
|
||||
... ]
|
||||
|
||||
Ideas:
|
||||
- Maybe use `Fiona <https://pypi.org/project/Fiona/>`__ as an alternative?
|
||||
|
||||
Resources:
|
||||
- Documentation on the S-57 file format and the relevant parts of GDAL:
|
||||
- https://gdal.org/python/osgeo.ogr-module.html
|
||||
- https://gdal.org/drivers/vector/s57.html
|
||||
- https://www.teledynecaris.com/s-57/frames/S57catalog.htm (the entire object catalogue!)
|
||||
- https://gdal.org/api/python_gotchas.html (!)
|
||||
- Examples and Cookbooks:
|
||||
- https://pcjericks.github.io/py-gdalogr-cookbook/vector_layers.html
|
||||
- and more general: https://pcjericks.github.io/py-gdalogr-cookbook/index.html
|
||||
- https://lists.osgeo.org/pipermail/gdal-dev/2008-April/016767.html
|
||||
- Helpers:
|
||||
- The program QGIS is very helpful because it can open S-57 files visually.
|
||||
"""
|
||||
|
||||
# Python standard
|
||||
from abc import ABC
|
||||
from abc import abstractmethod
|
||||
from hashlib import sha1
|
||||
import os
|
||||
import os.path
|
||||
from pathlib import Path
|
||||
import sys
|
||||
from warnings import catch_warnings
|
||||
from warnings import simplefilter
|
||||
from warnings import warn
|
||||
|
||||
# Typing
|
||||
from typing import Generator
|
||||
from typing import Mapping
|
||||
from typing import Optional
|
||||
from typing import Tuple
|
||||
from typing import Union
|
||||
|
||||
# Planning primitives
|
||||
from pyrate.plan.geometry import LocationType
|
||||
from pyrate.plan.geometry import PolarLocation
|
||||
from pyrate.plan.geometry import PolarPolygon
|
||||
|
||||
# Allow osgeo to be missing
|
||||
# Set to True if the osgeo is available, or False if not
|
||||
_OSGEO_PRESENT: bool
|
||||
try:
|
||||
# This emits warnings (at least on Python 3.8)
|
||||
with catch_warnings():
|
||||
simplefilter("ignore", DeprecationWarning, lineno=8)
|
||||
from osgeo import gdal
|
||||
from osgeo import ogr
|
||||
except ImportError as _error: # pragma: no cover
|
||||
_OSGEO_PRESENT = False
|
||||
warn(
|
||||
"Could not import package osgeo. Please install it as described in the README. "
|
||||
f"Error was: {_error}"
|
||||
)
|
||||
del _error
|
||||
else:
|
||||
_OSGEO_PRESENT = True
|
||||
ogr.UseExceptions()
|
||||
|
||||
|
||||
#: Currently there are only locations and polygons, see :meth:`S57ChartHandler._create_obstacle`
|
||||
PolarChartGeometry = Union[PolarLocation, PolarPolygon]
|
||||
|
||||
|
||||
class ChartFileHandler(ABC):
|
||||
"""This is a generic class for handling chart files, that defines a common interface."""
|
||||
|
||||
@staticmethod
|
||||
@abstractmethod
|
||||
def find_chart_files(search_path: Union[str, "os.PathLike[str]"]) -> Generator[Path, None, None]:
|
||||
"""Recursively find all files that can be handled by this handler.
|
||||
|
||||
Args:
|
||||
search_path: The path to search in recursively. Follows symlinks.
|
||||
|
||||
Yields:
|
||||
str: A path per found file
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def read_chart_file(
|
||||
self, path: Union[str, "os.PathLike[str]"]
|
||||
) -> Generator[PolarChartGeometry, None, None]:
|
||||
"""Reads a chart file and converts the relevant layers/features into ChartObstacles.
|
||||
|
||||
Args:
|
||||
path: The path to a chart file of the right format
|
||||
"""
|
||||
|
||||
|
||||
class S57ChartHandler(ChartFileHandler):
|
||||
"""Reads IHO S-57 chart files. The returned geometries are *not* checked for validity.
|
||||
|
||||
These chart objects are extracted from the source files:
|
||||
|
||||
- Landmasses (from S-57 object type ``LNAM``)
|
||||
- Depth values (from S-57 object type ``DEPARE``, via attribute ``DRVAL2``, assumed to be in meters)
|
||||
- Buoys (from S-57 object type ``BOY*``, e.g. ``BOYCAR``)
|
||||
- Possibly more in the future
|
||||
|
||||
The identifiers of the created objects are created deterministically from the chart name and the already
|
||||
contained identifiers. They are supposed to be unique across all charts. They are created by first
|
||||
assembling a string that is guaranteed to be a globally unique identifier from the chart file name and the
|
||||
``LNAM`` field. Then, the string is hashed and truncated to form a 63-bit identifier.
|
||||
|
||||
The names of the objects are created like this:
|
||||
``{chart file name}#{chart-unique alphanumeric identifier} ({human-readable type}): "{common name}"``.
|
||||
|
||||
All objects are associated with the applicable :class:`pyrate.plan.geometry.LocationType`.
|
||||
|
||||
Raises:
|
||||
ImportError: If the :mod:`osgeo` package is missing
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
if not _OSGEO_PRESENT: # pragma: no cover
|
||||
raise ImportError('the "osgeo" package must be installed for this handler to function')
|
||||
|
||||
#: This maps layer names to the corresponding parameters for S57ChartHandler._create_obstacle(...)
|
||||
#: These are not all objects but merely the ones which are trivial to map.
|
||||
_SIMPLE_MAPPINGS: Mapping[str, Tuple[LocationType, str]] = {
|
||||
"LNDARE": (LocationType.LAND, "Landmass"),
|
||||
"BOYCAR": (LocationType.OBSTRUCTION, "Buoy (BOYCAR)"),
|
||||
"BOYINB": (LocationType.OBSTRUCTION, "Buoy (BOYINB)"),
|
||||
"BOYISD": (LocationType.OBSTRUCTION, "Buoy (BOYISD)"),
|
||||
"BOYLAT": (LocationType.OBSTRUCTION, "Buoy (BOYLAT)"),
|
||||
"BOYSAW": (LocationType.OBSTRUCTION, "Buoy (BOYSAW)"),
|
||||
"BOYSPP": (LocationType.OBSTRUCTION, "Buoy (BOYSPP)"),
|
||||
# TODO(Felix): Should be included later on; See #19
|
||||
# "OBSTRN": (LocationType.OBSTRUCTION, "Obstruction"),
|
||||
# "OFSPLF": (LocationType.OBSTRUCTION, "Platform"),
|
||||
# "OSPARE": (LocationType.OBSTRUCTION, "Production Area/Wind farm"),
|
||||
# "PILPNT": (LocationType.OBSTRUCTION, "Post"),
|
||||
# "MIPARE": (LocationType.OBSTRUCTION, "Military Exercise Area"),
|
||||
# "DMPGRD": (LocationType.OBSTRUCTION, "Dumping Ground"),
|
||||
# TODO(Felix): maybe later add anchorage and water sport; See #19
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def find_chart_files(search_path: Union[str, "os.PathLike[str]"]) -> Generator[Path, None, None]:
|
||||
for root, _, files in os.walk(str(search_path), followlinks=True):
|
||||
for file in files:
|
||||
if file.endswith(".000"):
|
||||
# assume it is an IHO S-57 file
|
||||
yield Path(root) / file
|
||||
# else: ignore the file
|
||||
|
||||
def read_chart_file(
|
||||
self, path: Union[str, "os.PathLike[str]"]
|
||||
) -> Generator[PolarChartGeometry, None, None]:
|
||||
"""Reads a chart file and converts the relevant layers/features into ChartObstacles.
|
||||
|
||||
Args:
|
||||
path: The path to the S-57 chart file (e.g. ``something.000``)
|
||||
|
||||
Returns:
|
||||
All relevant obstacles with globally unique and deterministic names
|
||||
|
||||
Raises:
|
||||
FileNotFoundError: If the database file(s) is/are missing
|
||||
IOError: If the database file(s) cannot be opened for another reason
|
||||
"""
|
||||
file_path = str(path)
|
||||
|
||||
if not os.path.exists(file_path):
|
||||
raise FileNotFoundError(f"cannot open dataset: {file_path}")
|
||||
|
||||
# open database
|
||||
dataset = ogr.Open(file_path, gdal.GA_ReadOnly)
|
||||
if not dataset:
|
||||
raise IOError(f"cannot open dataset (invalid file): {file_path}")
|
||||
|
||||
file_name = os.path.splitext(os.path.basename(file_path))[0]
|
||||
file_name_bytes = file_name.encode()
|
||||
|
||||
# read contents
|
||||
for i in range(int(dataset.GetLayerCount())):
|
||||
layer = dataset.GetLayerByIndex(i)
|
||||
for geometry, feature_id in S57ChartHandler._convert_layer_to_obstacles(layer):
|
||||
|
||||
# prepend the name of the file to make it unique and ease lookup of objects in the source
|
||||
# this is also required because the LNAM field is not guaranteed to be unique across files
|
||||
geometry.name = f"{file_name}#{geometry.name}"
|
||||
|
||||
# hash a combination of file name and feature identifier as that together is globally unique
|
||||
hashed_id = sha1(file_name_bytes + feature_id.encode()).digest()
|
||||
# truncate to 64 bit and create an int from it
|
||||
identifier = int.from_bytes(hashed_id[-8:], sys.byteorder, signed=True)
|
||||
# cut off the most-significant bit to arrive at 63 bits
|
||||
geometry.identifier = identifier & 0x7F_FF_FF_FF_FF_FF_FF_FF
|
||||
|
||||
yield geometry
|
||||
|
||||
@staticmethod
|
||||
def _convert_layer_to_obstacles(
|
||||
layer: ogr.Layer,
|
||||
) -> Generator[Tuple[PolarChartGeometry, str], None, None]:
|
||||
"""Converts the relevant obstacles of a layer into :attr:`s57_files.PolarChartGeometry`.
|
||||
|
||||
Args:
|
||||
layer: The layer to search in
|
||||
|
||||
Returns:
|
||||
For each relevant feature in the layer: a polygon and a feature ID (32 bit)
|
||||
"""
|
||||
|
||||
layer_name = layer.GetName()
|
||||
|
||||
# we first do the more complicated stuff and then convert using S57ChartHandler.SIMPLE_MAPPINGS
|
||||
|
||||
if layer_name == "DEPARE": # "depth area"
|
||||
for feature in layer:
|
||||
# Warning: we assume these depths are given in meters, which could be wrong in some cases but
|
||||
# worked in our tests
|
||||
depth_max = feature["DRVAL2"]
|
||||
|
||||
if depth_max <= 5:
|
||||
yield from S57ChartHandler._create_obstacle(
|
||||
feature, "Depth <= 5m", LocationType.SHALLOW_WATER
|
||||
)
|
||||
elif depth_max <= 10:
|
||||
yield from S57ChartHandler._create_obstacle(
|
||||
feature, "Depth <= 10m", LocationType.SHALLOW_WATER
|
||||
)
|
||||
elif depth_max <= 20:
|
||||
yield from S57ChartHandler._create_obstacle(
|
||||
feature, "Depth <= 20m", LocationType.SHALLOW_WATER
|
||||
)
|
||||
elif depth_max <= 50:
|
||||
yield from S57ChartHandler._create_obstacle(
|
||||
feature, "Depth <= 50m", LocationType.SHALLOW_WATER
|
||||
)
|
||||
|
||||
else:
|
||||
if layer_name in S57ChartHandler._SIMPLE_MAPPINGS:
|
||||
location_type, human_readable_type = S57ChartHandler._SIMPLE_MAPPINGS[layer_name]
|
||||
for feature in layer:
|
||||
yield from S57ChartHandler._create_obstacle(feature, human_readable_type, location_type)
|
||||
|
||||
@staticmethod
|
||||
def _create_obstacle(
|
||||
feature: ogr.Feature,
|
||||
human_readable_type: str,
|
||||
location_type: LocationType,
|
||||
) -> Generator[Tuple[PolarChartGeometry, str], None, None]:
|
||||
"""Creates a point or area obstacle from a given feature.
|
||||
|
||||
Args:
|
||||
feature: The feature to transform
|
||||
human_readable_type: A human-readable string describing what this is, like ``"landmass"``
|
||||
location_type: The location type to be used
|
||||
|
||||
Returns:
|
||||
(1) A location or polygon that represents an obstacle
|
||||
(2) A (not necessarily unique) feature ID (32 bit) for that obstacle; but unique per chart file
|
||||
"""
|
||||
|
||||
# This ID is guaranteed to be unique within the chart file and composed of AGEN, FIDN, and FIDS
|
||||
feature_id: str = feature["LNAM"]
|
||||
assert feature_id is not None, "the LNAM field is mandatory for all objects"
|
||||
|
||||
# Remark: feature.IsFieldSetAndNotNull("OBJNAM") seems to work but logs tons of errors to syserr
|
||||
# It is not mandatory for all types of chart objects
|
||||
object_name: Optional[str]
|
||||
try:
|
||||
object_name = feature["OBJNAM"] # might be None
|
||||
except (ValueError, KeyError):
|
||||
object_name = None
|
||||
|
||||
if object_name is None:
|
||||
object_name = "---"
|
||||
else:
|
||||
# Replace broken unicode text (surrogates)
|
||||
object_name = object_name.encode("utf-8", "replace").decode("utf-8")
|
||||
|
||||
# Construct the obstacle's name
|
||||
name = f'{feature_id} ({human_readable_type}): "{object_name}"'
|
||||
|
||||
# Extract the geometries (as the feature may or may not contain a geometry collection)
|
||||
geometry = feature.GetGeometryRef()
|
||||
geometry_type = geometry.GetGeometryType()
|
||||
|
||||
if geometry_type == ogr.wkbPoint:
|
||||
point = PolarLocation(
|
||||
latitude=geometry.GetY(), longitude=geometry.GetX(), name=name, location_type=location_type
|
||||
)
|
||||
yield point, feature_id
|
||||
|
||||
elif geometry_type == ogr.wkbLineString:
|
||||
# Ignore this feature as there are currently no feature being extracted that are
|
||||
# LineStrings and relevant for navigation
|
||||
# TODO(Someone): One should verify that this is okay; See #125
|
||||
warn(f"Ignoring LineString geometry in chart: {name}")
|
||||
|
||||
elif geometry_type == ogr.wkbPolygon:
|
||||
# TODO(Felix): We throw away the inner rings (i.e. the holes); See #106
|
||||
outer_ring = geometry.GetGeometryRef(0)
|
||||
points = [PolarLocation(latitude=lat, longitude=lon) for lon, lat in outer_ring.GetPoints()]
|
||||
yield PolarPolygon(points, name=name, location_type=location_type), feature_id
|
||||
|
||||
else:
|
||||
# Apparently, no other geometries appear in charts
|
||||
raise NotImplementedError(f"Cannot handle geometry type {ogr.GeometryTypeToName(geometry_type)}")
|
5
pyrate/pyrate/common/math/__init__.py
Normal file
5
pyrate/pyrate/common/math/__init__.py
Normal file
@ -0,0 +1,5 @@
|
||||
"""Provides mathematical classes that are useful throughout Pyrate's codebase."""
|
||||
|
||||
from .gaussian import Gaussian
|
||||
|
||||
__all__ = ["Gaussian"]
|
132
pyrate/pyrate/common/math/gaussian.py
Normal file
132
pyrate/pyrate/common/math/gaussian.py
Normal file
@ -0,0 +1,132 @@
|
||||
"""This module includes an abstraction of gaussian distributions."""
|
||||
|
||||
# Typing
|
||||
from typing import cast
|
||||
|
||||
# Mathematics
|
||||
from numpy import ndarray
|
||||
from scipy.stats import multivariate_normal
|
||||
|
||||
|
||||
class Gaussian:
|
||||
|
||||
"""A weighted multivariate gaussian distribution.
|
||||
|
||||
Examples:
|
||||
A Gaussian can be simply created from a mean and covarinace vector (and an optional weight):
|
||||
|
||||
>>> from numpy import array
|
||||
>>> from numpy import vstack
|
||||
>>> mean = vstack([0.0, 0.0])
|
||||
>>> covariance = array([[1.0, 0.0], [0.0, 1.0]])
|
||||
>>> N = Gaussian(mean, covariance, weight=1.0)
|
||||
>>> N(vstack([0.0, 0.0])) # doctest: +ELLIPSIS
|
||||
0.159...
|
||||
|
||||
Two Gaussians are equal if and only if all attributes are equal:
|
||||
|
||||
>>> N == N
|
||||
True
|
||||
>>> other_covariance = array([[99.0, 0.0], [0.0, 99.0]])
|
||||
>>> other_N = Gaussian(mean, other_covariance, weight=1.0)
|
||||
>>> other_N(vstack([10.0, 10.0])) # doctest: +ELLIPSIS
|
||||
0.000585...
|
||||
>>> N == other_N
|
||||
False
|
||||
|
||||
Args:
|
||||
mean: The mean of the distribution as column vector, of dimension ``(n, 1)``
|
||||
covariance: The covariance matrix of the distribution, of dimension ``(n, n)``
|
||||
weight: The weight of the distribution, e.g. within a mixture model
|
||||
|
||||
References:
|
||||
- https://en.wikipedia.org/wiki/Multivariate_normal_distribution
|
||||
"""
|
||||
|
||||
def __init__(self, mean: ndarray, covariance: ndarray, weight: float = 1.0) -> None:
|
||||
# Sanity checks on given parameters
|
||||
assert len(mean.shape) == 2 and mean.shape[1] == 1, "Mean needs to be a column vector!"
|
||||
assert len(covariance.shape) == 2, "Covariance needs to be a 2D matrix!"
|
||||
assert covariance.shape[0] == covariance.shape[1], "Covariance needs to be a square matrix!"
|
||||
assert covariance.shape[0] == mean.shape[0], "Dimensions of mean and covariance don't fit!"
|
||||
|
||||
# Assign values
|
||||
self.mean = mean
|
||||
self.covariance = covariance
|
||||
self.weight = weight
|
||||
|
||||
# ######################################
|
||||
# Properties following a common filter notation
|
||||
# pylint: disable=invalid-name
|
||||
@property
|
||||
def x(self) -> ndarray:
|
||||
"""A shorthand for the distribution's mean.
|
||||
|
||||
Returns:
|
||||
The mean, of dimension ``(1, n)``
|
||||
"""
|
||||
|
||||
return self.mean
|
||||
|
||||
@x.setter
|
||||
def x(self, value: ndarray) -> None:
|
||||
self.mean = value
|
||||
|
||||
@property
|
||||
def P(self) -> ndarray:
|
||||
"""A shorthand for the distribution's covariance matrix.
|
||||
|
||||
Returns:
|
||||
The covariance, of dimension ``(n, n)``
|
||||
"""
|
||||
|
||||
return self.covariance
|
||||
|
||||
@P.setter
|
||||
def P(self, value: ndarray) -> None:
|
||||
self.covariance = value
|
||||
|
||||
@property
|
||||
def w(self) -> float:
|
||||
"""A shorthand for the distribution's weight.
|
||||
|
||||
Returns:
|
||||
The weight of this distribution
|
||||
"""
|
||||
|
||||
return self.weight
|
||||
|
||||
@w.setter
|
||||
def w(self, value: float):
|
||||
self.weight = value
|
||||
|
||||
def __call__(self, value: ndarray) -> float:
|
||||
"""Evaluate the gaussian at the given location.
|
||||
|
||||
Args:
|
||||
value: Where to evaluate the gaussian, of dimension ``(n, 1)``
|
||||
|
||||
Returns:
|
||||
The probability density at the given location
|
||||
"""
|
||||
|
||||
# Compute weighted probability density function
|
||||
distribution = multivariate_normal(mean=self.mean.T[0], cov=self.covariance)
|
||||
|
||||
return self.weight * cast(float, distribution.pdf(value.T[0]))
|
||||
|
||||
def __eq__(self, other) -> bool:
|
||||
"""Checks if two multivariate normal distributions are equal.
|
||||
|
||||
Args:
|
||||
other: The distribution to compare within
|
||||
|
||||
Returns:
|
||||
Whether the two distributions are the same
|
||||
"""
|
||||
|
||||
return (
|
||||
cast(bool, (self.mean == other.mean).all())
|
||||
and cast(bool, (self.covariance == other.covariance).all())
|
||||
and self.weight == other.weight
|
||||
)
|
39
pyrate/pyrate/common/raster_datasets/__init__.py
Normal file
39
pyrate/pyrate/common/raster_datasets/__init__.py
Normal file
@ -0,0 +1,39 @@
|
||||
"""The module contains methods to access raster data sets (as opposed to vector data sets).
|
||||
|
||||
The :class:`~pyrate.common.raster_datasets.geo_datasets.DataSetAccess` allows to read data arrays from
|
||||
raster datasets using query windows. It also computes such windows for a given point and radius.
|
||||
However, client code will often want to use some transformed properties of these datasets.
|
||||
To that end, a concrete :class:`~pyrate.common.raster_datasets.transformer_base.BaseTransformer` can be used,
|
||||
either implemented in the :mod:`~pyrate.common.raster_datasets.transformers_concrete` module
|
||||
or in some client code.
|
||||
Transformers query some data source for given nodes and radii of influence and then return some
|
||||
- well, transformed - property vectors for the query nodes.
|
||||
By the way: Instances of :class:`~pyrate.plan.graph.geo_graph.GeoNavigationGraph`
|
||||
directly accept instances of :class:`~pyrate.common.raster_datasets.transformer_base.BaseTransformer`
|
||||
to generate properties for nodes in the graph.
|
||||
|
||||
.. inheritance-diagram::
|
||||
pyrate.common.raster_datasets.transformers_concrete.ConstantTransformer
|
||||
pyrate.common.raster_datasets.transformers_concrete.BathymetricTransformer
|
||||
:parts: 2
|
||||
:top-classes: pyrate.common.raster_datasets.transformer_base.BaseTransformer
|
||||
|
||||
You might want to set some options for the underlying *rasterio*/*GDAL* drivers, like
|
||||
`GDAL_CACHEMAX <https://trac.osgeo.org/gdal/wiki/ConfigOptions#GDAL_CACHEMAX>`_
|
||||
("If its value is small (less than 100000), it is assumed to be measured in megabytes, otherwise in bytes."):
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
with rasterio.Env(GDAL_CACHEMAX=1024):
|
||||
with DataSetAccess(...) as data_set:
|
||||
pass # do cool stuff
|
||||
|
||||
"""
|
||||
|
||||
from .geo_datasets import DataSetAccess
|
||||
from .transformer_base import BaseDatasetTransformer
|
||||
from .transformer_base import BaseTransformer
|
||||
|
||||
# Don't directly expose transformers_concrete here to keep it simple
|
||||
|
||||
__all__ = ["BaseTransformer", "BaseDatasetTransformer", "DataSetAccess"]
|
415
pyrate/pyrate/common/raster_datasets/geo_datasets.py
Normal file
415
pyrate/pyrate/common/raster_datasets/geo_datasets.py
Normal file
@ -0,0 +1,415 @@
|
||||
"""
|
||||
This module provides an abstraction over geographical data sets which can be used to efficiently retrieve
|
||||
properties for many nodes on a possibly irregular grid.
|
||||
|
||||
The implementation is currently single threaded but should be straightforward to parallelize.
|
||||
Many commonly used datasets require a couple of gigabytes of memory, so make sure you do not open too many at
|
||||
once.
|
||||
"""
|
||||
|
||||
# Standard library
|
||||
import math
|
||||
|
||||
# Typing
|
||||
from typing import Any
|
||||
from typing import cast
|
||||
from typing import ContextManager
|
||||
from typing import Optional
|
||||
from typing import Tuple
|
||||
from typing import Union
|
||||
|
||||
# Scientific
|
||||
from numpy import allclose
|
||||
from numpy import clip
|
||||
from numpy import hstack
|
||||
from numpy import linspace
|
||||
from numpy import meshgrid
|
||||
from numpy import ndarray
|
||||
|
||||
# Raster data set library
|
||||
import rasterio
|
||||
import rasterio.coords
|
||||
import rasterio.io
|
||||
import rasterio.windows
|
||||
from rasterio.windows import Window
|
||||
|
||||
# Geographic helpers
|
||||
from pyrate.plan.geometry.geospatial import MAXIMUM_EARTH_CIRCUMFERENCE
|
||||
from pyrate.plan.geometry.helpers import meters2rad
|
||||
|
||||
|
||||
class DataSetAccess(ContextManager["DataSetAccess"]):
|
||||
"""Represents a global raster geo dataset that can be efficiently queried for a set of nodes.
|
||||
|
||||
See :meth:`~.get_bounding_windows_around` for why two bounding boxes/windows are supported by some
|
||||
methods.
|
||||
|
||||
Notes:
|
||||
The type of the data that is read depends on the dataset that is used.
|
||||
|
||||
Warning:
|
||||
This class shall only be used as a context manager (using the `with`-syntax) in order to initialize
|
||||
and clean up any resources that are required and possibly several gigabytes large. The behaviour of
|
||||
this class is undefined after the context was left, as the internal data array is deleted to free up
|
||||
memory. Also, the data is only guaranteed to be available once the context was entered.
|
||||
|
||||
Warning:
|
||||
There are many subtle pitfalls with processing geographical datasets. They are only alleviated by
|
||||
software abstractions like *GDAL*, *rasterio* and also *Pyrate* to some degree and are often badly
|
||||
documented. For instance: All raster datasets define a grid (well, the *raster*). The creator(s) of
|
||||
the dataset define whether the data at each entry refers to the center of the cell or the grid line of
|
||||
the raster [1]. However, in a lot of software handling these datasets (like *GDAL* and *rasterio*),
|
||||
it is not always clear what interpretation is used. *rasterio* seems to always assume grid-centered
|
||||
values, and so does Pyrate. This could lead to small discrepancies between the data extracted from the
|
||||
dataset and its real structure. On large-scale queries spanning many cells, this will not be a
|
||||
problem.
|
||||
|
||||
While **many** libraries like *rasterio* exist for querying raster datasets at grid-like points or even
|
||||
irregular points, none seems to allow for getting all raw data points of some datasets around some point
|
||||
with some radius [2]. This was, however, required since we wanted to calculate statistics like "number of
|
||||
points below sea level" for all data points around a given center point and radius. If we interpolated the
|
||||
dataset to the center point, we would only get some local information and not cover the entire area within
|
||||
the radius. The following libraries were investigated but did not provide the needed functionality:
|
||||
|
||||
- `GDALRasterBand tool from GDAL <https://gdal.org/api/gdalrasterband_cpp.html>`_
|
||||
|
||||
- `grdtrack tool from GMT <https://docs.generic-mapping-tools.org/latest/grdtrack.html>`_
|
||||
or `its Python version <https://www.pygmt.org/latest/api/generated/pygmt.grdtrack.html>`_
|
||||
|
||||
- `geonum's topodata module <https://geonum.readthedocs.io/en/latest/api.html#module-geonum.topodata>`_
|
||||
|
||||
Args:
|
||||
dataset: A :mod:`rasterio` raster dataset to read from or path to the file to open.
|
||||
It must cover the entire planet with ``(0, 0)`` degrees being in the center.
|
||||
raster_band_index: the index of band (the "layer") of the raster dataset (*GDAL*/*rasterio*
|
||||
terminology)
|
||||
|
||||
Attributes:
|
||||
dataset: the underlying :mod:`rasterio` dataset; read-only
|
||||
raster_band_index: the index of band (the "layer") of the raster dataset (*GDAL*/*rasterio*
|
||||
terminology); read-only
|
||||
|
||||
References:
|
||||
- [1] This concept is also called grid- vs cell-registration. See also
|
||||
`this Earth Science post <https://earthscience.stackexchange.com/q/4868/10692>`__.
|
||||
- [2] Some `kind answers <https://lists.osgeo.org/pipermail/gdal-dev/2020-December/053125.html>`__
|
||||
on the *gdal-dev* mailing list that did not help.
|
||||
"""
|
||||
|
||||
#: The bounding box that any dataset wrapped by this class must match
|
||||
_DATASET_BOUNDING_BOX = rasterio.coords.BoundingBox(left=-180.0, bottom=-90.0, right=180.0, top=90.0)
|
||||
|
||||
def __init__(self, dataset: Union[str, rasterio.DatasetReader], raster_band_index: int = 1) -> None:
|
||||
self.dataset = rasterio.open(dataset) if isinstance(dataset, str) else dataset
|
||||
|
||||
self.raster_band_index = raster_band_index
|
||||
|
||||
self._data_array: ndarray
|
||||
self._dataset_window = Window.from_slices(
|
||||
rows=(0, self.dataset.height),
|
||||
cols=(0, self.dataset.width),
|
||||
)
|
||||
|
||||
assert allclose(DataSetAccess._DATASET_BOUNDING_BOX, self.dataset.bounds, atol=1e-12), (
|
||||
"the dataset needs to cover the entire planet with (0, 0) degrees being in the center but was "
|
||||
+ repr(self.dataset.bounds)
|
||||
)
|
||||
|
||||
def get_bounding_windows_around(
|
||||
self, center_latitude: float, center_longitude: float, radius: float
|
||||
) -> Tuple[Window, Optional[Window]]:
|
||||
"""Computes a bounding boxes/windows around the given center containing all points within the radius.
|
||||
|
||||
This method will return one bounding box per coordinate pair for *most* locations on earth.
|
||||
However, near longitude +180°/-180° the dataset wraps around at the side, and as such, a bounding box
|
||||
used for querying the dataset contains one or two such bounding boxes. Due to the internal design of
|
||||
numpy, such a "wrapping" query cannot be expressed in a single slice [1] [2]. Thus, this method
|
||||
might return one or two windows.
|
||||
Correspondingly, :meth:`~.lat_lon_meshgrid_for` and :meth:`~.data_for` take one or two windows each.
|
||||
|
||||
Args:
|
||||
center_latitude: The latitude of center of the box, in radians
|
||||
center_longitude: The longitude of center of the box, in radians
|
||||
radius: The radius around the center that the box should include. Strictly positive, in meters.
|
||||
|
||||
Returns:
|
||||
One or two integer bounding windows as a tuple; might be slightly overestimated (by design)
|
||||
|
||||
References:
|
||||
- [1] Some `answers <https://lists.osgeo.org/pipermail/gdal-dev/2020-December/053132.html>`_
|
||||
on the *gdal-dev* mailing list that did not help
|
||||
|
||||
- [2] Numpy docs on
|
||||
`Basic Slicing and Indexing
|
||||
<https://numpy.org/doc/stable/reference/arrays.indexing.html#basic-slicing-and-indexing>`_
|
||||
"""
|
||||
|
||||
# pylint: disable=too-many-locals
|
||||
|
||||
assert radius > 0.0, "radius must be strictly positive"
|
||||
|
||||
center_longitude = math.degrees(center_longitude)
|
||||
center_latitude = math.degrees(center_latitude)
|
||||
|
||||
delta_lat = math.degrees(meters2rad(radius)) # uniform across the globe
|
||||
assert delta_lat > 0.0, "is the input in radians?"
|
||||
|
||||
# Better slightly overestimate it by using the earth circumference at the equator
|
||||
# That is about 1% larger than mean circumference
|
||||
earth_circumference_at_lat = math.cos(math.radians(center_latitude)) * MAXIMUM_EARTH_CIRCUMFERENCE
|
||||
delta_lon = radius / earth_circumference_at_lat * 360.0
|
||||
assert delta_lon > 0.0, "is the input in radians?"
|
||||
|
||||
# Top & bottom are simple: just clip the latitude at the poles
|
||||
# Left & right are a bit trickier since that possibly requires the creation of two slices
|
||||
|
||||
# These four coordinates determine the primary window
|
||||
left = center_longitude - delta_lon
|
||||
bottom = max(center_latitude - delta_lat, -90.0)
|
||||
right = center_longitude + delta_lon
|
||||
top = min(center_latitude + delta_lat, +90.0)
|
||||
|
||||
# `additional_window` determines the extra query if wrapping near longitude (+/-) 180° occurs
|
||||
# `window` is geographically more west-ward than `additional_window`, if the latter exists
|
||||
# Keep in mind though, that the common border might lie on +/- 180° longitude and thus on the
|
||||
# border of the dataset/data array
|
||||
window: Window
|
||||
additional_window: Optional[Window]
|
||||
|
||||
# Handle the horizontal overflow of the window
|
||||
# This also handles the case where 2*radius is larger than the width of the dataset
|
||||
if left < -180.0: # Overflow on the left
|
||||
overshoot = clip(-(left + 180), 0.0, 360.0)
|
||||
left_wrapped = +180 - overshoot
|
||||
# It might be the case that it also overflows on the right if the overall radius was so large
|
||||
# that the window(s) would wrap around the world more than once. This can especially happen
|
||||
# at high latitudes, where horizontally wrapping around the globe can happen at arbitrarily small
|
||||
# radii/window sizes near the poles. We thus clip it to (in sum) only cover the world once.
|
||||
right = clip(right, -180.0, left_wrapped)
|
||||
|
||||
# If the bounds overflow on the left, make the wrapped (i.e. the non-clipped) one the primary
|
||||
# window, as it is geographically more west-ward
|
||||
window = self.dataset.window(left_wrapped, bottom, +180.0, top)
|
||||
additional_window = self.dataset.window(
|
||||
-180.0, bottom, right, top
|
||||
) # Possibly a window with zero width
|
||||
|
||||
elif right > +180.0: # Overflow on the right
|
||||
overshoot = clip(right - 180, 0.0, 360.0)
|
||||
right_wrapped = -180 + overshoot
|
||||
# See the previous case "Overflow on the left" for an analogous explanation
|
||||
left = clip(left, right_wrapped, +180.0)
|
||||
|
||||
# If the bounds overflow on the right, make the clipped (i.e. the non-wrapped) one the primary
|
||||
# window, as it is geographically more west-ward
|
||||
window = self.dataset.window(left, bottom, +180.0, top)
|
||||
|
||||
# `right_wrapped == -180` similar to above cannot occur here since then we must have landed in the
|
||||
# `left < -180.0` branch instead
|
||||
assert right_wrapped > -180, "The window would extend zero meters from east to west"
|
||||
|
||||
additional_window = self.dataset.window(-180.0, bottom, right_wrapped, top)
|
||||
|
||||
else: # No overflow at the bounds occurred, so we only need one `window`
|
||||
window = self.dataset.window(left, bottom, right, top)
|
||||
additional_window = None
|
||||
|
||||
# Jointly round the window(s) to integers and return the result
|
||||
return self._round_windows_ceil(window, additional_window)
|
||||
|
||||
def _round_windows_ceil(
|
||||
self, window_1: Window, window_2: Optional[Window]
|
||||
) -> Tuple[Window, Optional[Window]]:
|
||||
"""Rounds one or two windows to integer types and avoids an overlap to be created.
|
||||
|
||||
Always rounds to the larger windows if non-integer bounds are given. This guarantees that at least all
|
||||
points initially given as the window(s) are also included in the resulting windows.
|
||||
|
||||
The actual rounding is done in :func:`rasterio.windows.window_index`.
|
||||
|
||||
Args:
|
||||
window_1: The left window
|
||||
window_2: An optional right window (geographically touches the eastern/right side of
|
||||
``window_1``). Keep in mind though, that the common border might lie on +/- 180°
|
||||
longitude and thus on the border of the dataset/data array.
|
||||
|
||||
Returns:
|
||||
One or two windows, rounded to :class:`int` values.
|
||||
Due to rounding, this method may return only a single window even if two were initially provided.
|
||||
"""
|
||||
(_, (_, w1_old_right)) = window_1.toranges()
|
||||
|
||||
# Round the first window
|
||||
# The actual rounding is done in :func:`rasterio.windows.window_index`
|
||||
window_1 = Window.from_slices(*cast(Tuple[slice, slice], rasterio.windows.window_index(window_1)))
|
||||
# The rounding may move it beyond the bounds of the dataset, so clip it at the array borders
|
||||
window_1 = window_1.intersection(self._dataset_window)
|
||||
|
||||
if window_2 is not None:
|
||||
# Adjust `window_2` in the correct direction for a possibly created overlap
|
||||
# Afterward, round it too
|
||||
|
||||
# Unpack the bounds that we will work with
|
||||
((w1_top, w1_bottom), (_, w1_right)) = window_1.toranges()
|
||||
(_, (left, right)) = window_2.toranges()
|
||||
|
||||
# Correct for the horizontal change that was induced by enlarging the `window_1`
|
||||
# This will make `window_2` smaller if their common boundary was not already on a cell border
|
||||
left += w1_right - w1_old_right
|
||||
|
||||
# Round away from the existing `window_1`, i.e. to the right/geographically west-ward
|
||||
left = int(math.ceil(left))
|
||||
right = int(math.ceil(right))
|
||||
|
||||
# There is a 1-cell overlap between the windows that was created by rounding, i.e.
|
||||
# ``right == w1_left``. Therefore, we cut one index off.
|
||||
right -= 1
|
||||
|
||||
# The case ``left == w1_right`` cannot occur since the second window is always guaranteed to be to
|
||||
# the right of the first. We still check that though:
|
||||
assert (
|
||||
left - w1_right
|
||||
) % self.dataset.width == 0, "this can never happen if the second window is truly to the right"
|
||||
|
||||
# Make sure that the extra window is non-empty and if not, just discard it
|
||||
if right - left <= 0:
|
||||
window_2 = None
|
||||
|
||||
else:
|
||||
# We simply adopt the top and bottom bounds as they are the same in both windows
|
||||
window_2 = Window.from_slices(rows=(w1_top, w1_bottom), cols=(left, right))
|
||||
|
||||
# May become obsolete if https://github.com/mapbox/rasterio/pull/2090 gets accepted
|
||||
def to_int(win: Window) -> Window:
|
||||
((float_top, float_bottom), (float_left, float_right)) = win.toranges()
|
||||
return Window.from_slices(
|
||||
rows=(int(float_top), int(float_bottom)), cols=(int(float_left), int(float_right))
|
||||
)
|
||||
|
||||
return to_int(window_1), window_2 if window_2 is None else to_int(window_2)
|
||||
|
||||
def _lat_lon_meshgrid_single(self, window: Window, radians: bool) -> Tuple[ndarray, ndarray]:
|
||||
"""Creates a meshgrid with all coordinates the data set has entries for in the given window.
|
||||
|
||||
Args:
|
||||
window: as returned by :meth:`~get_bounding_windows_around`
|
||||
radians: if ``True`` return in radians, else in degrees
|
||||
|
||||
Returns:
|
||||
A latitude, longitude meshgrid matching the data returned by :meth:`~_data_single`
|
||||
"""
|
||||
|
||||
# These values are in degrees
|
||||
longitude_left, latitude_up = self.dataset.xy(window.row_off, window.col_off)
|
||||
longitude_right, latitude_down = self.dataset.xy(
|
||||
window.row_off + window.height, window.col_off + window.width
|
||||
)
|
||||
|
||||
if radians:
|
||||
longitude_left = math.radians(longitude_left)
|
||||
latitude_up = math.radians(latitude_up)
|
||||
longitude_right = math.radians(longitude_right)
|
||||
latitude_down = math.radians(latitude_down)
|
||||
|
||||
coords_lat = linspace(latitude_up, latitude_down, window.height)
|
||||
coords_lon = linspace(longitude_left, longitude_right, window.width)
|
||||
|
||||
coords_lat, coords_lon = meshgrid(coords_lat, coords_lon, indexing="ij")
|
||||
assert coords_lat.shape == coords_lon.shape
|
||||
|
||||
return coords_lat, coords_lon
|
||||
|
||||
def lat_lon_meshgrid_for(
|
||||
self,
|
||||
window: Window,
|
||||
additional_window: Optional[Window],
|
||||
radians: bool,
|
||||
) -> Tuple[ndarray, ndarray]:
|
||||
"""Creates a meshgrid with all coordinates the data set has entries for in the given windows.
|
||||
|
||||
Args:
|
||||
window: as returned by :meth:`~get_bounding_windows_around`
|
||||
additional_window: as returned by :meth:`~get_bounding_windows_around`
|
||||
radians: if ``True`` return in radians, else in degrees
|
||||
|
||||
Returns:
|
||||
A single latitude, longitude meshgrid matching the data returned by :meth:`~data_for`
|
||||
"""
|
||||
|
||||
coords_lat, coords_lon = self._lat_lon_meshgrid_single(window, radians)
|
||||
|
||||
# append additional window (only if required)
|
||||
if additional_window is not None:
|
||||
coords_lat_additional, coords_lon_additional = self._lat_lon_meshgrid_single(
|
||||
additional_window, radians
|
||||
)
|
||||
coords_lat = hstack((coords_lat, coords_lat_additional))
|
||||
coords_lon = hstack((coords_lon, coords_lon_additional))
|
||||
|
||||
return coords_lat, coords_lon
|
||||
|
||||
def _data_single(self, window: Window) -> ndarray:
|
||||
"""Get all data points within the given window.
|
||||
|
||||
Notes:
|
||||
The type of the data that is read depends on the dataset that is used.
|
||||
See ``self.dataset.dtypes``.
|
||||
|
||||
Warnings:
|
||||
Never modify the data returned by this method directly! It is only a view into the raw data.
|
||||
|
||||
Args:
|
||||
window: A window as returned by :meth:`~get_bounding_windows_around`
|
||||
|
||||
Returns:
|
||||
The 2D data array matching the coordinates returned by :meth:`~_lat_lon_meshgrid_single`
|
||||
"""
|
||||
assert hasattr(self, "_data_array"), "DataSetAccess must be used as a context manager and be open"
|
||||
|
||||
# one could read by this:
|
||||
# self.dataset.read(self.raster_band_index, window=window)
|
||||
# however, this does not map the file into memory and is thus about 10x slower than directly using a
|
||||
# numpy array
|
||||
|
||||
# keep in mind that the slice will create a view into the raw data, and not a copy
|
||||
# this is intentional to make the data access fast
|
||||
data: ndarray = self._data_array[window.toslices()]
|
||||
assert data.shape == (window.height, window.width)
|
||||
return data
|
||||
|
||||
def data_for(self, window: Window, additional_window: Optional[Window]) -> ndarray:
|
||||
"""Get all data points within the given windows as a single array.
|
||||
|
||||
Notes:
|
||||
The type of the data that is read depends on the dataset that is used.
|
||||
See ``self.dataset.dtypes``.
|
||||
|
||||
Warnings:
|
||||
Never modify the data returned by this method directly! It is only a view into the raw data.
|
||||
|
||||
Args:
|
||||
window: as returned by :meth:`~get_bounding_windows_around`
|
||||
additional_window: as returned by :meth:`~get_bounding_windows_around`
|
||||
|
||||
Returns:
|
||||
The single 2D data array matching the coordinates returned by :meth:`~lat_lon_meshgrid_for`
|
||||
"""
|
||||
|
||||
result = self._data_single(window)
|
||||
|
||||
# append additional window (only if required)
|
||||
if additional_window is not None:
|
||||
additional_result = self._data_single(additional_window)
|
||||
result = hstack((result, additional_result))
|
||||
|
||||
return result
|
||||
|
||||
def __enter__(self) -> "DataSetAccess":
|
||||
self.dataset.__enter__()
|
||||
self._data_array = self.dataset.read(self.raster_band_index)
|
||||
self._data_array.flags.writeable = False # make this read-only to prevent accidents
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb) -> Any:
|
||||
del self._data_array
|
||||
return self.dataset.__exit__(exc_type, exc_val, exc_tb)
|
147
pyrate/pyrate/common/raster_datasets/transformer_base.py
Normal file
147
pyrate/pyrate/common/raster_datasets/transformer_base.py
Normal file
@ -0,0 +1,147 @@
|
||||
"""
|
||||
This module provides the base classes for the transformers, which extract property vectors for given query
|
||||
points form (usually) a geographical dataset.
|
||||
"""
|
||||
|
||||
# Standard library
|
||||
from abc import ABC
|
||||
from abc import abstractmethod
|
||||
from itertools import repeat
|
||||
|
||||
# Typing
|
||||
from typing import Any
|
||||
from typing import cast
|
||||
from typing import ContextManager
|
||||
from typing import Iterable
|
||||
from typing import Sequence
|
||||
from typing import Tuple
|
||||
from typing import Union
|
||||
|
||||
# Scientific
|
||||
from numpy import array
|
||||
from numpy import empty
|
||||
from numpy import ndarray
|
||||
from numpy.typing import DTypeLike
|
||||
from pandas import DataFrame
|
||||
|
||||
# Progress bar
|
||||
from tqdm import tqdm
|
||||
|
||||
# Typing helpers
|
||||
from .geo_datasets import DataSetAccess
|
||||
|
||||
|
||||
class BaseTransformer(ContextManager["BaseTransformer"], ABC):
|
||||
"""This class allows to query some data source for a property at each node.
|
||||
|
||||
Subclasses will usually override :meth:`_get_transformed_at` in order to return the data vector for some
|
||||
specific node with given latitude and longitude. Note that the result of calling
|
||||
:meth:`~get_transformed_at_nodes` is a :class:`pandas.DataFrame`, in order to allow a single transformer
|
||||
to return multiple values for each vector if this simplifies or speeds up calculations.
|
||||
|
||||
Querying all nodes prints a progress bar to the command line by default.
|
||||
|
||||
Warning:
|
||||
This class (and any subclasses) shall only be used as a context manager. See :class:`~DataSetAccess`
|
||||
for the reasons for this.
|
||||
|
||||
Args:
|
||||
structured_dtype: For each column in the query result a tuple consisting of a human-readable names and
|
||||
the (numpy) data type of property. This follows the syntax of NumPy's
|
||||
`"Structured Datatypes" <https://numpy.org/doc/stable/user/basics.rec.html#structured-datatypes>`_
|
||||
.
|
||||
|
||||
See Also:
|
||||
BaseDatasetTransformer
|
||||
"""
|
||||
|
||||
def __init__(self, structured_dtype: Sequence[Tuple[str, DTypeLike]]) -> None:
|
||||
super().__init__()
|
||||
self.structured_dtype = cast(DTypeLike, structured_dtype)
|
||||
|
||||
@abstractmethod
|
||||
def _get_transformed_at(self, latitude: float, longitude: float, radius: float) -> Tuple:
|
||||
"""Get the property at some specific node, given by its geographical location.
|
||||
|
||||
Args:
|
||||
latitude: The geographical location of the node, in radians
|
||||
longitude: The geographical location of the node, in radians
|
||||
radius: The radius of the area that this node shall represent, in meters
|
||||
|
||||
Returns:
|
||||
A single property vector for each single node.
|
||||
"""
|
||||
|
||||
def get_transformed_at_nodes(
|
||||
self,
|
||||
latitudes: ndarray,
|
||||
longitudes: ndarray,
|
||||
radius: Union[float, ndarray],
|
||||
show_progress: bool = False,
|
||||
) -> DataFrame:
|
||||
"""Computes the property for each individual node. Prints a progress bar by default.
|
||||
|
||||
Args:
|
||||
latitudes: latitude values of all nodes, in radians
|
||||
longitudes: longitude values of all nodes, in radians
|
||||
radius: the radius around each node that it should represent, in meters; may be an array of shape
|
||||
`(num_nodes, )` or a single scalar if the radius is uniform
|
||||
show_progress: whether to print a nice and simple progress bar
|
||||
|
||||
Returns:
|
||||
An array of all values generated for each (latitude, longitude) node, with shape
|
||||
`(number of nodes, number of properties per node)`
|
||||
"""
|
||||
|
||||
assert latitudes.shape == longitudes.shape
|
||||
|
||||
radii = repeat(radius) if isinstance(radius, float) else cast(Iterable[float], radius)
|
||||
|
||||
if len(latitudes) > 0:
|
||||
result = [
|
||||
self._get_transformed_at(latitude, longitude, rad)
|
||||
for latitude, longitude, rad in tqdm(
|
||||
zip(latitudes, longitudes, radii),
|
||||
unit=" nodes",
|
||||
unit_scale=True,
|
||||
colour="white",
|
||||
total=len(latitudes),
|
||||
disable=not show_progress,
|
||||
)
|
||||
]
|
||||
else:
|
||||
result = empty((0, len(self.structured_dtype))) # type: ignore
|
||||
|
||||
assert len(result) == latitudes.shape[0]
|
||||
|
||||
# this also ensures that all property vectors have the same length
|
||||
structured_array = array(result, dtype=self.structured_dtype)
|
||||
return DataFrame.from_records(structured_array)
|
||||
|
||||
def __enter__(self) -> "BaseTransformer":
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb) -> Any:
|
||||
return False
|
||||
|
||||
|
||||
class BaseDatasetTransformer(BaseTransformer, ABC):
|
||||
"""A specialized dataset transformer which extracts properties from a geographical dataset.
|
||||
|
||||
Args:
|
||||
structured_dtype: see constructor argument ``structured_dtype`` in :class:`BaseTransformer`
|
||||
dataset: A dataset to read from.
|
||||
It is automatically managed when this class is used as a context manager.
|
||||
"""
|
||||
|
||||
def __init__(self, structured_dtype: Sequence[Tuple[str, DTypeLike]], dataset: DataSetAccess) -> None:
|
||||
super().__init__(structured_dtype)
|
||||
self.dataset = dataset
|
||||
|
||||
def __enter__(self) -> "BaseDatasetTransformer":
|
||||
self.dataset.__enter__()
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb) -> Any:
|
||||
super().__exit__(exc_type, exc_val, exc_tb)
|
||||
return self.dataset.__exit__(exc_type, exc_val, exc_tb)
|
163
pyrate/pyrate/common/raster_datasets/transformers_concrete.py
Normal file
163
pyrate/pyrate/common/raster_datasets/transformers_concrete.py
Normal file
@ -0,0 +1,163 @@
|
||||
"""
|
||||
This module exposes specific property transformers.
|
||||
|
||||
See the `data repository <https://gitlab.sailingteam.hg.tu-darmstadt.de/informatik/data>`_ for details on
|
||||
the actual data sets referenced and used here.
|
||||
"""
|
||||
|
||||
# Standard library
|
||||
from enum import auto
|
||||
from enum import Enum
|
||||
|
||||
# Typing
|
||||
from typing import Any
|
||||
from typing import List
|
||||
from typing import Optional
|
||||
from typing import Sequence
|
||||
from typing import Tuple
|
||||
|
||||
# Scientific
|
||||
import numpy
|
||||
from numpy import clip
|
||||
from numpy import count_nonzero
|
||||
from numpy import extract
|
||||
from numpy import float32
|
||||
from numpy.typing import DTypeLike
|
||||
|
||||
# Helpers and own typing
|
||||
from ...plan.geometry.helpers import haversine_numpy
|
||||
from .geo_datasets import DataSetAccess
|
||||
from .transformer_base import BaseDatasetTransformer
|
||||
from .transformer_base import BaseTransformer
|
||||
|
||||
|
||||
class ConstantTransformer(BaseTransformer):
|
||||
|
||||
"""A very simple transformer class to fill a property with a constant value.
|
||||
|
||||
Args:
|
||||
value: The constant value to be added to each node
|
||||
dtype: The numpy data type of the resulting property vector field
|
||||
name: The name of the property. If set to ``None``, a reasonable default name will be used.
|
||||
"""
|
||||
|
||||
# pylint: disable=too-few-public-methods
|
||||
|
||||
# idea: could make this class explicitly generic in the type signatures
|
||||
def __init__(self, value: Any, dtype: DTypeLike, name: Optional[str] = None) -> None:
|
||||
name = f"constant value of {value}" if name is None else name
|
||||
super().__init__([(name, dtype)])
|
||||
self.value = value
|
||||
|
||||
def _get_transformed_at(self, latitude: float, longitude: float, radius: float) -> Tuple[Any]:
|
||||
return (self.value,) # return a tuple
|
||||
|
||||
|
||||
class BathymetricTransformer(BaseDatasetTransformer):
|
||||
|
||||
"""Extracts values from a given bathymetric datasets, i.e. depth information.
|
||||
|
||||
The datatype for all modes is ``np.float32``. Raises a :class:`ValueError` if not data is found for any
|
||||
given query ``(latitude, longitude, radius)``.
|
||||
|
||||
Args:
|
||||
dataset: the data set to be used
|
||||
modes: a sequence of modes of the values to be extracted; see :class:`~BathymetricTransformer.Modes`
|
||||
"""
|
||||
|
||||
# pylint: disable=too-few-public-methods
|
||||
|
||||
#: The depth/elevation below which a point is considered navigable, in meters;
|
||||
#: positive values are above sea level, negative ones below
|
||||
NAVIGABLE_BELOW: float = -5.0
|
||||
|
||||
class Modes(Enum):
|
||||
"""The different modes how depth values can be extracted."""
|
||||
|
||||
AVERAGE_DEPTH = auto()
|
||||
"""The average depth, weighted with more emphasis on the area near the query point, in meters.
|
||||
|
||||
This is a visualisation of the mode when applied to the `Earth 2014
|
||||
<https://gitlab.sailingteam.hg.tu-darmstadt.de/informatik/data/-/tree/master/topography/earth2014>`_
|
||||
topographic/bathymetric dataset:
|
||||
|
||||
.. image:: plot_global_bathymetry_depth.png
|
||||
:alt: AVERAGE_DEPTH mode when applied to the Earth 2014 topographic/bathymetric dataset
|
||||
"""
|
||||
|
||||
FRACTION_NAVIGABLE = auto()
|
||||
"""The fraction of data points at which a boat can navigate, as a scalar in :math:`[0, 1]`.
|
||||
|
||||
See :attr:`BathymetricTransformer.NAVIGABLE_BELOW` for what is considered navigable.
|
||||
Only the topographic/bathymetric height/depth value is used for determining navigability as an
|
||||
approximation, and not actual water coverage.
|
||||
To not count the Netherlands as navigable, the value is set to a vale a little bit below zero,
|
||||
where zero means sea level.
|
||||
|
||||
This is a visualisation of the mode when applied to the `Earth 2014
|
||||
<https://gitlab.sailingteam.hg.tu-darmstadt.de/informatik/data/-/tree/master/topography/earth2014>`_
|
||||
topographic/bathymetric dataset:
|
||||
|
||||
.. image:: plot_global_bathymetry_fraction_navigable.png
|
||||
:alt: FRACTION_NAVIGABLE mode when applied to the Earth 2014 topographic/bathymetric dataset
|
||||
"""
|
||||
|
||||
@property
|
||||
def column_name(self) -> str:
|
||||
"""Returns the name that is used for the dataframe column."""
|
||||
return f"bathymetric data ({self.name})"
|
||||
|
||||
def __init__(self, dataset: DataSetAccess, modes: Sequence[Modes] = tuple(Modes)) -> None:
|
||||
structured_dtype = [(mode.column_name, float32) for mode in modes]
|
||||
super().__init__(structured_dtype, dataset)
|
||||
self.modes = modes
|
||||
|
||||
def _get_transformed_at(self, latitude: float, longitude: float, radius: float) -> Tuple:
|
||||
# pylint: disable=too-many-locals
|
||||
assert radius > 0, "the radius must be positive"
|
||||
|
||||
windows = self.dataset.get_bounding_windows_around(
|
||||
center_latitude=latitude, center_longitude=longitude, radius=radius
|
||||
)
|
||||
lats, lons = self.dataset.lat_lon_meshgrid_for(*windows, radians=True)
|
||||
assert lats.shape == lons.shape
|
||||
|
||||
# the depth data is the negative for below-sea level and positive for landmasses
|
||||
depth = self.dataset.data_for(*windows) # as int16 !
|
||||
assert depth.shape == lats.shape
|
||||
|
||||
# get the distances to all points
|
||||
distances = haversine_numpy(lats, lons, latitude, longitude)
|
||||
|
||||
result: List[float] = [] # will be transformed to float32 later
|
||||
|
||||
for mode in self.modes: # respect the ordering
|
||||
|
||||
if mode is BathymetricTransformer.Modes.AVERAGE_DEPTH:
|
||||
# we simply use the inverse distance as a weight for the weighted arithmetic mean
|
||||
weights = clip(1.0 - (distances / radius), 0.0, 1.0)
|
||||
try:
|
||||
average = numpy.average(depth.astype(float32), weights=weights)
|
||||
except ZeroDivisionError as error:
|
||||
raise ValueError(
|
||||
f"no points in radius {radius} m around (lat, lon) = {(latitude, longitude)} rad"
|
||||
) from error
|
||||
result.append(average)
|
||||
|
||||
elif mode is BathymetricTransformer.Modes.FRACTION_NAVIGABLE:
|
||||
# we use the distance to cut off unwanted entries
|
||||
depth_within_radius = extract(distances <= radius, depth) # also flattens
|
||||
if len(depth_within_radius) == 0:
|
||||
raise ValueError(
|
||||
f"no points in radius {radius} m around (lat, lon) = {(latitude, longitude)} radians"
|
||||
)
|
||||
number_of_navigable = count_nonzero(
|
||||
depth_within_radius <= BathymetricTransformer.NAVIGABLE_BELOW
|
||||
)
|
||||
fraction = number_of_navigable / len(depth_within_radius)
|
||||
result.append(fraction)
|
||||
|
||||
else: # pragma: no branch
|
||||
raise ValueError(f"invalid mode {mode}") # pragma: no cover
|
||||
|
||||
return tuple(result)
|
39
pyrate/pyrate/common/testing/__init__.py
Normal file
39
pyrate/pyrate/common/testing/__init__.py
Normal file
@ -0,0 +1,39 @@
|
||||
"""This module contains helpers for writing and running tests.
|
||||
|
||||
In particular, it contains flags for detecting specific scenarios (like the test running on a CI platform)
|
||||
and a variety of custom strategies for *Hypothesis*.
|
||||
"""
|
||||
|
||||
# Standard library
|
||||
from os import environ
|
||||
|
||||
# Spatialite for environment setup
|
||||
from pyrate.common.charts import SpatialiteDatabase
|
||||
|
||||
|
||||
def env(name: str) -> bool:
|
||||
"""Checks if an environment variable exists and its value in lower case is ``{yes, true, t, 1}``.
|
||||
|
||||
Args:
|
||||
name: The name of the environment variable to check
|
||||
"""
|
||||
|
||||
return environ.get(name, "").lower() in ("yes", "true", "t", "1")
|
||||
|
||||
|
||||
#: Set to ``True`` if running on a CI server (best effort)
|
||||
IS_CI: bool = env("CI") or env("CONTINUOUS_INTEGRATION")
|
||||
|
||||
#: Whether to intensify tests at the expense of more time, e.g. with more example data for hypothesis
|
||||
IS_EXTENDED_TESTING: bool = env("EXTENDED_TESTING")
|
||||
|
||||
|
||||
#: True iff the Spatialite SQLite extension is installed & can be used
|
||||
SPATIALITE_AVAILABLE: bool
|
||||
try:
|
||||
with SpatialiteDatabase(":memory:"):
|
||||
pass # make sure it is properly closed
|
||||
except RuntimeError: # pragma: no cover
|
||||
SPATIALITE_AVAILABLE = False
|
||||
else:
|
||||
SPATIALITE_AVAILABLE = True
|
18
pyrate/pyrate/common/testing/strategies/__init__.py
Normal file
18
pyrate/pyrate/common/testing/strategies/__init__.py
Normal file
@ -0,0 +1,18 @@
|
||||
"""This module provides testing helpers like hypothesis strategies.
|
||||
|
||||
Some typecasts of custom strategies using :func:`hypothesis.strategies.composite` are actually wrong but
|
||||
required to paint over some Mypy shortcomings
|
||||
(see `hypothesis#2748 <https://github.com/HypothesisWorks/hypothesis/issues/2748>`_).
|
||||
"""
|
||||
|
||||
# Typing
|
||||
from typing import Callable
|
||||
from typing import TypeVar
|
||||
|
||||
# Hypothesis typing
|
||||
from hypothesis.strategies import SearchStrategy
|
||||
|
||||
|
||||
#: The type of the draw parameter to :func:`hypothesis.strategies.composite` strategies
|
||||
T = TypeVar("T")
|
||||
DrawType = Callable[[SearchStrategy[T]], T]
|
141
pyrate/pyrate/common/testing/strategies/dynamic_system.py
Normal file
141
pyrate/pyrate/common/testing/strategies/dynamic_system.py
Normal file
@ -0,0 +1,141 @@
|
||||
"""Contains helpers based on hypothesis test data generators."""
|
||||
|
||||
# Typing
|
||||
from typing import cast
|
||||
from typing import Tuple
|
||||
|
||||
# Hypothesis testing
|
||||
from hypothesis.extra.numpy import arrays
|
||||
from hypothesis.strategies import composite
|
||||
from hypothesis.strategies import floats
|
||||
from hypothesis.strategies import lists
|
||||
from hypothesis.strategies import SearchStrategy
|
||||
|
||||
# Mathematics
|
||||
from numpy import eye
|
||||
from numpy import float64
|
||||
|
||||
# Gaussian representation
|
||||
from pyrate.common.math import Gaussian
|
||||
|
||||
# Own typing
|
||||
from . import DrawType # pylint: disable=unused-import
|
||||
|
||||
|
||||
# In this context, we reproduce a common filter notation
|
||||
# pylint: disable=invalid-name, too-many-locals, unused-argument
|
||||
|
||||
|
||||
@composite
|
||||
def linear_model(
|
||||
draw: DrawType, state_dim: int = 2, input_dim: int = 1, sensor_dim: int = 2
|
||||
) -> SearchStrategy[Tuple]:
|
||||
"""Generate a linear state space model.
|
||||
|
||||
Args:
|
||||
draw: see :func:`hypothesis.strategies.composite`
|
||||
state_dim: Number of state variables
|
||||
input_dim: Number of input variables
|
||||
sensor_dim: Number of measurement variables
|
||||
"""
|
||||
|
||||
# Strategies
|
||||
float_strategy = floats(0, 5)
|
||||
|
||||
# Transition model
|
||||
F = draw(arrays(float64, (state_dim, state_dim), elements=float_strategy))
|
||||
|
||||
# Input model
|
||||
B = draw(arrays(float64, (state_dim, input_dim), elements=float_strategy))
|
||||
|
||||
# Measurement model
|
||||
H = draw(arrays(float64, (sensor_dim, state_dim), elements=float_strategy))
|
||||
|
||||
# Symmetric, positive definite process noise
|
||||
q = draw(arrays(float64, (state_dim, 1), elements=float_strategy))
|
||||
Q = q @ q.T + eye(state_dim)
|
||||
|
||||
# Symmetric, positive definite sensor noise
|
||||
r = draw(arrays(float64, (sensor_dim, 1), elements=float_strategy))
|
||||
R = r @ r.T + eye(sensor_dim)
|
||||
|
||||
# Initial belief
|
||||
x = draw(arrays(float64, (state_dim, 1), elements=float_strategy))
|
||||
p = draw(arrays(float64, (state_dim, 1), elements=float_strategy))
|
||||
P = p @ p.T + eye(state_dim)
|
||||
|
||||
estimate = Gaussian(x, P)
|
||||
|
||||
# Measurements and inputs
|
||||
measurements = draw(
|
||||
lists(arrays(float64, (sensor_dim, 1), elements=float_strategy), min_size=2, max_size=4)
|
||||
)
|
||||
inputs = draw(
|
||||
lists(
|
||||
arrays(float64, (input_dim, 1), elements=float_strategy),
|
||||
min_size=len(measurements),
|
||||
max_size=len(measurements),
|
||||
)
|
||||
)
|
||||
|
||||
# Return model
|
||||
result = estimate, F, B, H, Q, R, measurements, inputs
|
||||
return cast(SearchStrategy[Tuple], result)
|
||||
|
||||
|
||||
@composite
|
||||
def nonlinear_model(draw: DrawType, state_dim: int = 2, sensor_dim: int = 2) -> SearchStrategy[Tuple]:
|
||||
"""Generate a nonlinear state space model.
|
||||
|
||||
Args:
|
||||
draw: see :func:`hypothesis.strategies.composite`
|
||||
state_dim: Number of state variables
|
||||
sensor_dim: Number of measurement variables
|
||||
"""
|
||||
|
||||
# Strategies
|
||||
float_strategy = floats(0, 5)
|
||||
|
||||
# Transition model
|
||||
F = draw(arrays(float64, (state_dim, state_dim), elements=float_strategy))
|
||||
|
||||
def f(x):
|
||||
return F @ x
|
||||
|
||||
# Jacobi of f about state
|
||||
def Jf(x):
|
||||
return F
|
||||
|
||||
# Measurement model
|
||||
H = draw(arrays(float64, (sensor_dim, state_dim), elements=float_strategy))
|
||||
|
||||
def h(x):
|
||||
return H @ x
|
||||
|
||||
# Jacobi of h about state
|
||||
def Jh(state):
|
||||
return H
|
||||
|
||||
# Symmetric, positive definite process noise
|
||||
q = draw(arrays(float64, (state_dim, 1), elements=float_strategy))
|
||||
Q = q @ q.T + eye(state_dim)
|
||||
|
||||
# Symmetric, positive definite sensor noise
|
||||
r = draw(arrays(float64, (sensor_dim, 1), elements=float_strategy))
|
||||
R = r @ r.T + eye(sensor_dim)
|
||||
|
||||
# Initial belief
|
||||
x = draw(arrays(float64, (state_dim, 1), elements=float_strategy))
|
||||
p = draw(arrays(float64, (state_dim, 1), elements=float_strategy))
|
||||
P = p @ p.T + eye(state_dim)
|
||||
|
||||
estimate = Gaussian(x, P)
|
||||
|
||||
# Measurements
|
||||
measurements = draw(
|
||||
lists(arrays(float64, (sensor_dim, 1), elements=float_strategy), min_size=2, max_size=4)
|
||||
)
|
||||
|
||||
# Return model
|
||||
result = estimate, f, F, Jf, h, H, Jh, Q, R, measurements
|
||||
return cast(SearchStrategy[Tuple], result)
|
448
pyrate/pyrate/common/testing/strategies/geometry.py
Normal file
448
pyrate/pyrate/common/testing/strategies/geometry.py
Normal file
@ -0,0 +1,448 @@
|
||||
"""Contains helpers like hypothesis test data generators."""
|
||||
|
||||
# Typing
|
||||
from typing import Any
|
||||
from typing import cast
|
||||
from typing import Dict
|
||||
from typing import Optional
|
||||
from typing import Union
|
||||
|
||||
# Hypothesis testing
|
||||
from hypothesis import assume
|
||||
import hypothesis.extra.numpy as numpy_st
|
||||
import hypothesis.strategies as st
|
||||
from hypothesis.strategies import SearchStrategy
|
||||
|
||||
# Scientific stack
|
||||
import numpy
|
||||
from scipy.spatial import Voronoi
|
||||
|
||||
# Planning primitives
|
||||
from shapely.geometry import box
|
||||
from shapely.geometry import MultiLineString
|
||||
from shapely.geometry import Polygon
|
||||
from shapely.ops import polygonize
|
||||
from shapely.ops import unary_union
|
||||
|
||||
# Geospatial objects
|
||||
from pyrate.plan.geometry import CartesianGeometry
|
||||
from pyrate.plan.geometry import CartesianLocation
|
||||
from pyrate.plan.geometry import CartesianPolygon
|
||||
from pyrate.plan.geometry import CartesianRoute
|
||||
from pyrate.plan.geometry import Geospatial
|
||||
from pyrate.plan.geometry import LocationType
|
||||
from pyrate.plan.geometry import PolarGeometry
|
||||
from pyrate.plan.geometry import PolarLocation
|
||||
from pyrate.plan.geometry import PolarPolygon
|
||||
from pyrate.plan.geometry import PolarRoute
|
||||
|
||||
# Own typing
|
||||
from . import DrawType # pylint: disable=unused-import
|
||||
|
||||
|
||||
@st.composite
|
||||
def geo_bearings(draw: DrawType) -> SearchStrategy[float]:
|
||||
"""Returns a direction/bearing/azimuth/yaw for navigation in degrees in :math:`[0, 360)`.
|
||||
|
||||
Args:
|
||||
draw: see :func:`hypothesis.strategies.composite`
|
||||
"""
|
||||
bearing = draw(st.floats(min_value=0.0, max_value=360.0, exclude_max=True))
|
||||
return cast(SearchStrategy[float], bearing)
|
||||
|
||||
|
||||
@st.composite
|
||||
def geospatial_identifiers(draw: DrawType) -> SearchStrategy[Optional[int]]:
|
||||
"""Returns identifiers for subclasses of :class:`pyrate.plan.geometry.Geospatial`.
|
||||
|
||||
Args:
|
||||
draw: see :func:`hypothesis.strategies.composite`
|
||||
"""
|
||||
identifier = draw(st.one_of(st.none(), st.integers(min_value=0, max_value=(2**63) - 1)))
|
||||
return cast(SearchStrategy[Optional[int]], identifier)
|
||||
|
||||
|
||||
@st.composite
|
||||
def location_types(draw: DrawType) -> SearchStrategy[LocationType]:
|
||||
"""Returns location types.
|
||||
|
||||
Args:
|
||||
draw: see :func:`hypothesis.strategies.composite`
|
||||
"""
|
||||
location_type = draw(st.sampled_from(LocationType))
|
||||
return cast(SearchStrategy[LocationType], location_type)
|
||||
|
||||
|
||||
@st.composite
|
||||
def geospatial_attributes(draw: DrawType) -> SearchStrategy[Dict[str, Any]]:
|
||||
"""Returns the common attributes for subclasses of :class:`pyrate.plan.geometry.Geospatial`.
|
||||
|
||||
Args:
|
||||
draw: see :func:`hypothesis.strategies.composite`
|
||||
"""
|
||||
attributes = {
|
||||
"location_type": draw(location_types()),
|
||||
"name": draw(st.text()),
|
||||
"identifier": draw(geospatial_identifiers()),
|
||||
}
|
||||
return cast(SearchStrategy[Dict[str, Any]], attributes)
|
||||
|
||||
|
||||
@st.composite
|
||||
def polar_locations(draw: DrawType) -> SearchStrategy[PolarLocation]:
|
||||
"""Returns a polar location.
|
||||
|
||||
Args:
|
||||
draw: see :func:`hypothesis.strategies.composite`
|
||||
"""
|
||||
location = PolarLocation(
|
||||
draw(st.floats(min_value=-90.0, max_value=+90.0)),
|
||||
draw(st.floats(min_value=-180.0, max_value=+180.0, exclude_max=True)),
|
||||
**draw(geospatial_attributes()),
|
||||
)
|
||||
return cast(SearchStrategy[PolarLocation], location)
|
||||
|
||||
|
||||
@st.composite
|
||||
def cartesian_locations(
|
||||
draw: DrawType,
|
||||
origin: SearchStrategy[Union[CartesianPolygon, None]] = st.one_of(st.none(), polar_locations()),
|
||||
) -> SearchStrategy[CartesianLocation]:
|
||||
"""Returns a cartesian location.
|
||||
|
||||
Args:
|
||||
draw: see :func:`hypothesis.strategies.composite`
|
||||
origin: an optional strategy for specifying origins, defaults to providing both ``None`` and real
|
||||
locations
|
||||
"""
|
||||
location = CartesianLocation(
|
||||
draw(st.floats(min_value=-10_000.0, max_value=+10_000.0)),
|
||||
draw(st.floats(min_value=-10_000.0, max_value=+10_000.0)),
|
||||
origin=draw(origin),
|
||||
**draw(geospatial_attributes()),
|
||||
)
|
||||
return cast(SearchStrategy[CartesianLocation], location)
|
||||
|
||||
|
||||
@st.composite
|
||||
def polar_objects(
|
||||
draw: DrawType, stable: bool = False, non_repeating: bool = False
|
||||
) -> SearchStrategy[PolarGeometry]:
|
||||
"""Returns polar geometries.
|
||||
|
||||
The concrete type is sampled randomly from all three polar geometries.
|
||||
|
||||
Args:
|
||||
draw: see :func:`hypothesis.strategies.composite`
|
||||
stable: see :func:`~polar_routes_stable`
|
||||
non_repeating: if ``True``, the strategy will not produce routes with duplicate locations.
|
||||
Ignored if ``stable`` is given.
|
||||
"""
|
||||
possible_sources = st.one_of(
|
||||
[
|
||||
polar_locations(),
|
||||
polar_routes_stable() if stable else polar_routes(non_repeating=non_repeating),
|
||||
polar_polygons(),
|
||||
]
|
||||
)
|
||||
geospatial: PolarGeometry = draw(possible_sources)
|
||||
return cast(SearchStrategy[PolarGeometry], geospatial)
|
||||
|
||||
|
||||
@st.composite
|
||||
def cartesian_objects(draw: DrawType) -> SearchStrategy[CartesianGeometry]:
|
||||
"""Returns cartesian geometries.
|
||||
|
||||
The concrete type is sampled randomly from all three cartesian geometries.
|
||||
|
||||
Args:
|
||||
draw: see :func:`hypothesis.strategies.composite`
|
||||
"""
|
||||
possible_sources = st.one_of(
|
||||
[
|
||||
cartesian_locations(),
|
||||
cartesian_routes(),
|
||||
cartesian_polygons(),
|
||||
]
|
||||
)
|
||||
geospatial: CartesianGeometry = draw(possible_sources)
|
||||
return cast(SearchStrategy[CartesianGeometry], geospatial)
|
||||
|
||||
|
||||
@st.composite
|
||||
def geospatial_objects(draw: DrawType, stable: bool = False) -> SearchStrategy[Geospatial]:
|
||||
"""Returns instances of the abstract class :class:`pyrate.plan.geometry.Geospatial`.
|
||||
|
||||
The concrete type is sampled randomly from all six cartesian and polar geometries.
|
||||
|
||||
Args:
|
||||
draw: see :func:`hypothesis.strategies.composite`
|
||||
stable: see :func:`~polar_routes_stable`
|
||||
"""
|
||||
geospatial: Geospatial = draw(st.one_of([polar_objects(stable=stable), cartesian_objects()]))
|
||||
return cast(SearchStrategy[Geospatial], geospatial)
|
||||
|
||||
|
||||
@st.composite
|
||||
def cartesian_polygons( # pylint: disable=too-many-arguments,too-many-locals
|
||||
draw: DrawType,
|
||||
min_vertices: int = 5,
|
||||
max_vertices: int = 15,
|
||||
scale: float = 100_000,
|
||||
center_x: float = 0.0,
|
||||
center_y: float = 0.0,
|
||||
origin: SearchStrategy[Union[CartesianPolygon, None]] = st.one_of(st.none(), polar_locations()),
|
||||
) -> SearchStrategy[CartesianPolygon]:
|
||||
"""Returns non-empty valid cartesian polygons around the origin of the coordinate system.
|
||||
|
||||
Inspired `by testing code from the spatialpandas
|
||||
<https://github.com/holoviz/spatialpandas/blob/efdabe5c736db8103a4bfedca55a414a365b754a/tests/geometry/strategies.py#L141>`_
|
||||
library.
|
||||
|
||||
Args:
|
||||
draw: see :func:`hypothesis.strategies.composite`
|
||||
min_vertices: the minimum number of locations that shall form this polygon; needs to be at least ``5``
|
||||
such that the generation algorithm works reliably
|
||||
max_vertices: the minimum number of locations that shall form this polygon; needs to be larger than
|
||||
``min_vertices``; this may not be very large as this will make example generation
|
||||
extremely slow
|
||||
scale: the maximum that a single coordinate value may be away from the center (in meters)
|
||||
center_x: the east-west center (in meters)
|
||||
center_y: the north-south center (in meters)
|
||||
origin: an optional strategy for specifying origins, defaults to providing both ``None`` and real
|
||||
locations
|
||||
|
||||
Raises:
|
||||
ValueError: if polygon generation fails
|
||||
"""
|
||||
|
||||
assert min_vertices >= 5, "min_vertices needs to be at least 5"
|
||||
assert max_vertices >= min_vertices, "max_vertices needs to be at least min_vertices"
|
||||
assert scale >= 0.0, "scale must be non-negative"
|
||||
|
||||
count = draw(st.integers(min_value=min_vertices, max_value=max_vertices))
|
||||
|
||||
# very often, this only takes a single try
|
||||
# it is highly unlikely that it will take more than 50
|
||||
tries = 50
|
||||
for _ in range(tries): # pragma: no branch
|
||||
|
||||
# create points in [-0.5, +0.5]
|
||||
points = numpy.random.rand(count, 2) - 0.5
|
||||
# scale them to the desired size
|
||||
points *= scale * 2
|
||||
|
||||
voronoi = Voronoi(points)
|
||||
multi_line_string = MultiLineString(
|
||||
[voronoi.vertices[s] for s in voronoi.ridge_vertices if all(numpy.array(s) >= 0)]
|
||||
)
|
||||
|
||||
poly = unary_union(list(polygonize(multi_line_string)))
|
||||
poly = poly.intersection(box(-scale, -scale, scale, scale))
|
||||
|
||||
if ( # pragma: no branch
|
||||
isinstance(poly, Polygon) and not poly.is_empty and poly.is_simple and poly.is_valid
|
||||
):
|
||||
coordinates = numpy.array(poly.exterior.coords)
|
||||
# move them to the desired center
|
||||
coordinates[:, 0] += center_x
|
||||
coordinates[:, 1] += center_y
|
||||
|
||||
polygon = CartesianPolygon.from_numpy(
|
||||
coordinates, origin=draw(origin), **draw(geospatial_attributes())
|
||||
)
|
||||
return cast(SearchStrategy[CartesianPolygon], polygon)
|
||||
|
||||
# This should practically never occur (the probability is very, very low)
|
||||
raise ValueError("Failed to construct polygon") # pragma: no cover
|
||||
|
||||
|
||||
@st.composite
|
||||
def polar_polygons(
|
||||
draw: DrawType,
|
||||
min_vertices: int = 5,
|
||||
max_vertices: int = 15,
|
||||
scale: float = 100_000,
|
||||
center: Optional[PolarLocation] = None,
|
||||
) -> SearchStrategy[PolarPolygon]:
|
||||
"""Returns non-empty valid polar polygons.
|
||||
|
||||
Args:
|
||||
draw: see :func:`hypothesis.strategies.composite`
|
||||
min_vertices: the minimum number of locations that shall form this polygon; needs to be at least ``5``
|
||||
such that the generation algorithm works reliably
|
||||
max_vertices: the minimum number of locations that shall form this polygon; needs to be larger than
|
||||
``min_vertices``; this may not be very large as this will make example generation
|
||||
extremely slow
|
||||
scale: the maximum that a single coordinate value may be away from the center (in meters)
|
||||
center: the center of the polygon or ``None`` randomly select one
|
||||
|
||||
Raises:
|
||||
ValueError: if polygon generation fails
|
||||
"""
|
||||
cartesian = draw(
|
||||
cartesian_polygons(
|
||||
min_vertices=min_vertices,
|
||||
max_vertices=max_vertices,
|
||||
scale=scale,
|
||||
origin=polar_locations() if center is None else st.just(center), # type: ignore
|
||||
)
|
||||
)
|
||||
return cast(SearchStrategy[PolarPolygon], cartesian.to_polar())
|
||||
|
||||
|
||||
@st.composite
|
||||
def cartesian_routes( # pylint: disable=too-many-arguments
|
||||
draw: DrawType,
|
||||
min_vertices: int = 2,
|
||||
max_vertices: int = 10,
|
||||
scale: float = 100_000,
|
||||
center_x: float = 0.0,
|
||||
center_y: float = 0.0,
|
||||
origin: SearchStrategy[Union[CartesianPolygon, None]] = st.one_of(st.none(), polar_locations()),
|
||||
non_repeating: bool = True,
|
||||
) -> SearchStrategy[CartesianRoute]:
|
||||
"""Returns a cartesian route.
|
||||
|
||||
Args:
|
||||
draw: see :func:`hypothesis.strategies.composite`
|
||||
min_vertices: the minimum number of locations that shall form this route, must be ``2`` or greater
|
||||
max_vertices: the maximum number of locations that shall form this route
|
||||
scale: the maximum that a single coordinate value may be away from the center (in meters); strictly
|
||||
positive
|
||||
center_x: the east-west center (in meters)
|
||||
center_y: the north-south center (in meters)
|
||||
origin: an optional strategy for specifying origins, defaults to providing both ``None`` and real
|
||||
locations
|
||||
non_repeating: if ``True``, the route will not contain any duplicate locations
|
||||
"""
|
||||
|
||||
assert min_vertices >= 2, "min_vertices may not be less than 2"
|
||||
assert (
|
||||
max_vertices is None or max_vertices >= min_vertices
|
||||
), "max_vertices may not be less than min_vertices"
|
||||
assert scale > 0.0, "scale must be strictly positive"
|
||||
|
||||
# define the actual values in the coordinate arrays
|
||||
elements_x = st.floats(min_value=center_x - scale, max_value=center_x + scale)
|
||||
elements_y = st.floats(min_value=center_y - scale, max_value=center_y + scale)
|
||||
|
||||
# define the number of coordinates; we must draw directly and not pass the strategy such that x and y have
|
||||
# the same number of elements
|
||||
length = draw(st.integers(min_value=min_vertices, max_value=max_vertices))
|
||||
|
||||
# create the actual coordinates and ensure that all are different from each other
|
||||
coordinates_x = draw(numpy_st.arrays(dtype="float64", shape=length, elements=elements_x, unique=True))
|
||||
coordinates_y = draw(numpy_st.arrays(dtype="float64", shape=length, elements=elements_y, unique=True))
|
||||
coordinates = numpy.vstack((coordinates_x, coordinates_y)).T
|
||||
|
||||
# make sure that the route has non-zero length
|
||||
if numpy.abs(numpy.diff(coordinates, axis=1)).sum() < 1: # one meter in the typical interpretation
|
||||
coordinates[0, 0] += 1 # add an arbitrary value # pragma: no cover
|
||||
|
||||
assume(not non_repeating or (numpy.abs(numpy.diff(coordinates, axis=1)) > 1).all()) # Difficult to handle
|
||||
|
||||
# create the route with the other parameters of geospatial objects
|
||||
route = CartesianRoute.from_numpy(coordinates, origin=draw(origin), **draw(geospatial_attributes()))
|
||||
return cast(SearchStrategy[CartesianRoute], route)
|
||||
|
||||
|
||||
@st.composite
|
||||
def polar_routes(
|
||||
draw: DrawType,
|
||||
min_vertices: int = 2,
|
||||
max_vertices: int = 10,
|
||||
non_repeating: bool = True,
|
||||
) -> SearchStrategy[PolarRoute]:
|
||||
"""Returns a polar route.
|
||||
|
||||
Args:
|
||||
draw: see :func:`hypothesis.strategies.composite`
|
||||
min_vertices: the minimum number of locations that shall form this route, must be ``2`` or greater
|
||||
max_vertices: the maximum number of locations that shall form this route or ``None`` to let
|
||||
*hypothesis* decide
|
||||
non_repeating: if ``True``, the route will not contain any duplicate locations
|
||||
"""
|
||||
|
||||
assert min_vertices >= 2, "min_vertices may not be less than 2"
|
||||
assert (
|
||||
max_vertices is None or max_vertices >= min_vertices
|
||||
), "max_vertices may not be less than min_vertices"
|
||||
|
||||
# define the actual values in the coordinate arrays
|
||||
elements_latitude = st.floats(min_value=-90, max_value=90)
|
||||
elements_longitude = st.floats(min_value=-180, max_value=180, exclude_max=True)
|
||||
|
||||
# define the number of coordinates; we must draw directly and not pass the strategy such that x and y have
|
||||
# the same number of elements
|
||||
length = draw(st.integers(min_value=min_vertices, max_value=max_vertices))
|
||||
|
||||
# create the actual coordinates
|
||||
coordinates_latitude = draw(
|
||||
numpy_st.arrays(dtype="float64", shape=length, elements=elements_latitude, unique=True)
|
||||
)
|
||||
coordinates_longitude = draw(
|
||||
numpy_st.arrays(dtype="float64", shape=length, elements=elements_longitude, unique=True)
|
||||
)
|
||||
coordinates = numpy.vstack((coordinates_latitude, coordinates_longitude)).T
|
||||
|
||||
# make sure that the route has non-zero length
|
||||
# there is no single correct value for the threshold near the poles, but 1e-4 appears to work fine
|
||||
if numpy.abs(numpy.diff(coordinates, axis=1)).sum() < 1e-4:
|
||||
coordinates[0, 0] += 1e-4 # add an arbitrary value # pragma: no cover
|
||||
|
||||
assume(
|
||||
not non_repeating or (numpy.abs(numpy.diff(coordinates, axis=1)) > 1e-4).all()
|
||||
) # Difficult to handle
|
||||
|
||||
# create the route with the other parameters of geospatial objects
|
||||
try:
|
||||
route = PolarRoute.from_numpy(coordinates, **draw(geospatial_attributes()))
|
||||
except ValueError:
|
||||
# This can still happen if the duplicate entries check above does not catch it
|
||||
assume(False) # pragma: no cover
|
||||
|
||||
# Make sure we only generate routes that can be projected
|
||||
try:
|
||||
route.to_cartesian(route.locations[0])
|
||||
except AssertionError:
|
||||
assume(False) # pragma: no cover
|
||||
|
||||
return cast(SearchStrategy[PolarRoute], route)
|
||||
|
||||
|
||||
@st.composite
|
||||
def polar_routes_stable(
|
||||
draw: DrawType,
|
||||
min_vertices: int = 5, # polar_polygons() requires at least 5
|
||||
max_vertices: int = 10,
|
||||
) -> SearchStrategy[PolarRoute]:
|
||||
"""Returns a polar route where the vertices are not too far apart.
|
||||
|
||||
It is therefore numerically more stable when projecting to a cartesian plane.
|
||||
|
||||
Args:
|
||||
draw: see :func:`hypothesis.strategies.composite`
|
||||
min_vertices: the minimum number of locations that shall form this route, must be ``5`` or greater
|
||||
max_vertices: the maximum number of locations that shall form this route or ``None`` to let
|
||||
*hypothesis* decide
|
||||
"""
|
||||
|
||||
assert min_vertices >= 5, "min_vertices may not be less than 5"
|
||||
assert (
|
||||
max_vertices is None or max_vertices >= min_vertices
|
||||
), "max_vertices may not be less than min_vertices"
|
||||
|
||||
# We create a polygon since that is known to cause fewer numerical issues when projecting
|
||||
# since we generate them in a way that guarantees similar locality of the vertices
|
||||
polygon: PolarPolygon = draw(polar_polygons(min_vertices=min_vertices, max_vertices=max_vertices))
|
||||
|
||||
return cast(
|
||||
SearchStrategy[PolarRoute],
|
||||
PolarRoute.from_numpy(
|
||||
polygon.to_numpy(),
|
||||
location_type=polygon.location_type,
|
||||
name=polygon.name,
|
||||
identifier=polygon.identifier,
|
||||
),
|
||||
)
|
11
pyrate/pyrate/plan/__init__.py
Normal file
11
pyrate/pyrate/plan/__init__.py
Normal file
@ -0,0 +1,11 @@
|
||||
"""The plan package provides tools to plan actions that can aid a roboter in reaching its goals.
|
||||
One important goal is reaching certain locations despite physically constrained
|
||||
movement abilities of a sailing vessel.
|
||||
Thus, a big part of this package deals with navigation strategies.
|
||||
|
||||
In the ``geometry`` package, general geometrical objects and transformations are
|
||||
provided in cartesian (local 2D world) and spherical (latitude/longitude) coordinates.
|
||||
|
||||
The ``graph`` module provides navigation tools where the world is modeled as a graph.
|
||||
This includes generating a graph, assigning properties to nodes of the graph and
|
||||
finding good paths on the graph."""
|
49
pyrate/pyrate/plan/geometry/__init__.py
Normal file
49
pyrate/pyrate/plan/geometry/__init__.py
Normal file
@ -0,0 +1,49 @@
|
||||
"""This package provides geometric abstractions for action planning algorithms.
|
||||
|
||||
Warning:
|
||||
This module docstring is not included in the *Sphinx* documentation.
|
||||
"""
|
||||
|
||||
import typing # Will be removed from the namespace after being used below
|
||||
|
||||
|
||||
from .geospatial import Direction
|
||||
from .geospatial import Geospatial
|
||||
from .geospatial import LocationType
|
||||
|
||||
from .location import CartesianLocation
|
||||
from .location import PolarLocation
|
||||
|
||||
from .polygon import CartesianPolygon
|
||||
from .polygon import PolarPolygon
|
||||
|
||||
from .route import CartesianRoute
|
||||
from .route import PolarRoute
|
||||
|
||||
# provide useful aliases
|
||||
|
||||
#: Any of :class:`pyrate.plan.geometry.PolarLocation`, :class:`pyrate.plan.geometry.PolarRoute` and
|
||||
#: :class:`pyrate.plan.geometry.PolarPolygon`.
|
||||
PolarGeometry = typing.Union[PolarLocation, PolarRoute, PolarPolygon]
|
||||
|
||||
#: Any of :class:`pyrate.plan.geometry.CartesianLocation`, :class:`pyrate.plan.geometry.CartesianRoute` and
|
||||
#: :class:`pyrate.plan.geometry.CartesianPolygon`.
|
||||
CartesianGeometry = typing.Union[CartesianLocation, CartesianRoute, CartesianPolygon]
|
||||
|
||||
del typing
|
||||
|
||||
# don't expose .helpers here as it will rarely be used directly
|
||||
|
||||
__all__ = [
|
||||
"CartesianLocation",
|
||||
"CartesianPolygon",
|
||||
"CartesianRoute",
|
||||
"CartesianGeometry",
|
||||
"Direction",
|
||||
"Geospatial",
|
||||
"LocationType",
|
||||
"PolarLocation",
|
||||
"PolarPolygon",
|
||||
"PolarRoute",
|
||||
"PolarGeometry",
|
||||
]
|
221
pyrate/pyrate/plan/geometry/geospatial.py
Normal file
221
pyrate/pyrate/plan/geometry/geospatial.py
Normal file
@ -0,0 +1,221 @@
|
||||
"""This module contains common base classes for the geospatial objects like polygons, routes and points."""
|
||||
|
||||
# Standard library
|
||||
from abc import ABC
|
||||
from abc import abstractmethod
|
||||
from enum import Enum
|
||||
from enum import IntEnum
|
||||
from math import pi
|
||||
|
||||
# Typing
|
||||
from typing import Any
|
||||
from typing import cast
|
||||
from typing import Dict
|
||||
from typing import Optional
|
||||
from typing import Union
|
||||
|
||||
# Geospatial
|
||||
from geojson import dumps
|
||||
from geojson import Feature
|
||||
|
||||
|
||||
#: The mean earth radius at the equator in meters (taken from
|
||||
#: `Earth radius (Wikipedia) <https://en.wikipedia.org/wiki/Earth_radius#Mean_radius>`__).
|
||||
MEAN_EARTH_RADIUS = 6371_008.8
|
||||
|
||||
#: The mean earth circumference in meters (derived form :attr:`~MEAN_EARTH_RADIUS`).
|
||||
MEAN_EARTH_CIRCUMFERENCE = MEAN_EARTH_RADIUS * 2.0 * pi
|
||||
|
||||
#: The maximal earth circumference in meters (i.e. at the equator; taken from
|
||||
#: `Earth's circumference (Wikipedia) <https://en.wikipedia.org/wiki/Earth%27s_circumference>`__).
|
||||
MAXIMUM_EARTH_CIRCUMFERENCE = 40_075_017.0
|
||||
|
||||
|
||||
class LocationType(IntEnum):
|
||||
|
||||
"""Represents what type a location is of.
|
||||
|
||||
Notes:
|
||||
The values are set to fixed values such that they can be serialized.
|
||||
New members may therefore only be added below, with strictly ascending numbers.
|
||||
"""
|
||||
|
||||
#: An object of unknown type.
|
||||
UNKNOWN = 0
|
||||
|
||||
#: An abstract thing that is used for testing purposes and may not have a correspondence in the real
|
||||
#: world. Ships should usually avoid such obstacles too, as they could represent things like
|
||||
#: `icebergs <https://de.wikipedia.org/wiki/RMS_Titanic>`__.
|
||||
TESTING = 1
|
||||
|
||||
#: A generic obstruction like a buoy, oil rig or special area extracted form a nautical chart.
|
||||
OBSTRUCTION = 2
|
||||
|
||||
#: A land mass like an island or continent.
|
||||
LAND = 3
|
||||
|
||||
#: Water area that might be considered not sufficiently deep for navigation depending on the context.
|
||||
SHALLOW_WATER = 4
|
||||
|
||||
#: Some object representing special weather conditions, like strong winds or just precipitation.
|
||||
WEATHER = 5
|
||||
|
||||
#: An object representing other vessels 🚢.
|
||||
#: It might have been detected via
|
||||
#: `AIS <https://en.wikipedia.org/wiki/Automatic_identification_system>`__.
|
||||
SHIP = 6
|
||||
|
||||
@classmethod
|
||||
def max_value(cls) -> int:
|
||||
"""Get the maximum value of all members of this enum."""
|
||||
|
||||
return max(cls)
|
||||
|
||||
|
||||
class Direction(float, Enum):
|
||||
|
||||
"""A simple collection of named "compass" bearings 🧭 in degrees for self-documenting code."""
|
||||
|
||||
# pylint: disable=invalid-name
|
||||
North = 0.0
|
||||
East = 90.0
|
||||
South = 180.0
|
||||
West = 270.0
|
||||
|
||||
|
||||
class Geospatial(ABC):
|
||||
|
||||
"""The common abstract base class for both polar and cartesian geospatial objects.
|
||||
|
||||
See :meth:`~Geospatial.to_geo_json` for hints on how this class can be used for visualizing geometries.
|
||||
|
||||
Args:
|
||||
location_type: The type of this polygon
|
||||
name: An optional name of this polygon
|
||||
identifier: An optional unique identifier for this object, in :math:`[0, 2^{63})`, i.e. 64 signed bits
|
||||
"""
|
||||
|
||||
def __init__(self, location_type: LocationType, name: Optional[str], identifier: Optional[int]) -> None:
|
||||
self.location_type = location_type
|
||||
self.name = name
|
||||
self.identifier = identifier
|
||||
|
||||
super().__init__()
|
||||
|
||||
@property
|
||||
def identifier(self) -> Optional[int]:
|
||||
"""The numerical identifier of this object.
|
||||
|
||||
Must be `None` or in :math:`[0, 2^{63})`, i.e. 64 signed bits.
|
||||
"""
|
||||
|
||||
return self._identifier
|
||||
|
||||
@identifier.setter
|
||||
def identifier(self, value: Optional[int]) -> None:
|
||||
assert value is None or 0 <= value < 2**63, "Identifiers must be in [0, 2**63) or None"
|
||||
|
||||
self._identifier = value
|
||||
|
||||
def to_geo_json(self, indent: Optional[Union[int, str]] = None, **kwargs) -> str:
|
||||
"""Returns the GeoJSON representation of the geometry embedded into a feature.
|
||||
|
||||
Args:
|
||||
indent: the number of levels to indent or ``None`` for compactness (see :func:`json.dumps`)
|
||||
kwargs: much like indent, any keyword argument that can be passed to :func:`json.dumps`,
|
||||
like ``allow_nan``, ``sort_keys``, and more
|
||||
|
||||
Returns:
|
||||
The GeoJSON representation as a string
|
||||
|
||||
Examples:
|
||||
See also: :ref:`geometry-plotting`.
|
||||
|
||||
GeoJSON is a widely used format that can be interpreted by a variety of GIS programs (geo
|
||||
information systems). Among them are for example the very simple website
|
||||
`geojson.io <https://geojson.io/>`__.
|
||||
However, sometimes the geometries are too large to be handled by the web browser.
|
||||
Then there are other programs available, like the free open-source tool
|
||||
`QGIS (Desktop) <https://www.qgis.org/de/site/>`__. Its even available in the usual Ubuntu
|
||||
repositories, so just run ``[sudo] apt install qgis``. later, you can simply copy-pasta it into
|
||||
the tool.
|
||||
|
||||
The geojson representation can be obtained like this (using a
|
||||
:class:`~pyrate.plan.geometry.location.PolarLocation` just as an example):
|
||||
|
||||
>>> from pyrate.plan.geometry import PolarLocation
|
||||
>>> team_room = PolarLocation(latitude=49.878091, longitude=8.654052)
|
||||
>>> print(team_room.to_geo_json(indent=4))
|
||||
{
|
||||
"type": "Feature",
|
||||
"geometry": {
|
||||
"type": "Point",
|
||||
"coordinates": [
|
||||
8.654052,
|
||||
49.878091
|
||||
]
|
||||
},
|
||||
"properties": {}
|
||||
}
|
||||
|
||||
See also:
|
||||
- `GeoJSON on Wikipedia <https://en.wikipedia.org/wiki/GeoJSON>`__
|
||||
- `geojson.io <https://geojson.io/>`__
|
||||
- `QGIS (Desktop) <https://www.qgis.org/de/site/>`__
|
||||
"""
|
||||
|
||||
# this relies on the inheriting instance to provide __geo_interface__ property/attribute
|
||||
return cast(str, dumps(Feature(geometry=self), indent=indent, **kwargs))
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def __geo_interface__(self) -> Dict[str, Any]:
|
||||
raise NotImplementedError()
|
||||
|
||||
@abstractmethod
|
||||
def __eq__(self, other: Any) -> bool:
|
||||
return (
|
||||
isinstance(other, Geospatial)
|
||||
and self.location_type == other.location_type
|
||||
and self.name == other.name
|
||||
and self.identifier == other.identifier
|
||||
)
|
||||
|
||||
@property
|
||||
def _repr_extras(self) -> str:
|
||||
"""Create a string representation of the three extra attributes for use in :meth:`~__repr__`.
|
||||
|
||||
Examples:
|
||||
The output is suited to be directly inlucded before the final closing bracet of a typical
|
||||
implementation of ``__repr__()``:
|
||||
|
||||
>>> from pyrate.plan.geometry import PolarLocation
|
||||
>>> PolarLocation(0, 0)._repr_extras
|
||||
''
|
||||
>>> PolarLocation(0, 0, location_type=LocationType.UNKNOWN, name=None)._repr_extras
|
||||
''
|
||||
>>> PolarLocation(0, 0, name="")._repr_extras
|
||||
', name=""'
|
||||
>>> PolarLocation(0, 0, location_type=LocationType.SHIP, identifier=12)._repr_extras
|
||||
', location_type=LocationType.SHIP, identifier=12'
|
||||
|
||||
The class :class:`pyrate.plan.geometry.location.PolarLocation` was only chosen as an example.
|
||||
|
||||
Returns:
|
||||
The extra arguments in the syntax of keyword arguments, as is common for :meth:`~__repr__`.
|
||||
"""
|
||||
|
||||
result = ""
|
||||
|
||||
if self.location_type != LocationType.UNKNOWN:
|
||||
result += f", location_type=LocationType.{self.location_type.name}"
|
||||
if self.name is not None:
|
||||
result += f', name="{self.name}"'
|
||||
if self.identifier is not None:
|
||||
result += f", identifier={self.identifier}"
|
||||
|
||||
return result
|
||||
|
||||
@abstractmethod
|
||||
def __repr__(self) -> str:
|
||||
raise NotImplementedError()
|
729
pyrate/pyrate/plan/geometry/helpers.py
Normal file
729
pyrate/pyrate/plan/geometry/helpers.py
Normal file
@ -0,0 +1,729 @@
|
||||
"""Contains helpers for dealing with distances and normalization of spherical coordinates and compass
|
||||
directions. Also allows for translating (collections of) points in polar coordinates.
|
||||
|
||||
Maybe we should use `geopandas <https://geopandas.org/reference.html#geopandas.GeoSeries.distance>`__.
|
||||
|
||||
References:
|
||||
- Introduction on `Wikipedia <https://en.wikipedia.org/wiki/Great-circle_distance>`__
|
||||
- Simple discussion on `StackOverflow <https://stackoverflow.com/q/38248046/3753684>`__
|
||||
- Charles F. F. Karney (2013): Algorithms for geodesics.
|
||||
`Paper as PDF <https://link.springer.com/content/pdf/10.1007%2Fs00190-012-0578-z.pdf>`__.
|
||||
- `Walter Bislin's Blog <https://walter.bislins.ch/bloge/index.asp?page=Distances+on+Globe+and+Flat+Earth>`__
|
||||
"""
|
||||
|
||||
# Python standard library
|
||||
from math import atan2
|
||||
from math import pi
|
||||
from math import tau
|
||||
|
||||
# Typing
|
||||
from typing import cast
|
||||
from typing import Tuple
|
||||
from typing import TypeVar
|
||||
from typing import Union
|
||||
from warnings import warn
|
||||
|
||||
# Scientific
|
||||
import numpy
|
||||
from numpy import absolute
|
||||
from numpy import arccos
|
||||
from numpy import arcsin
|
||||
from numpy import arctan2
|
||||
from numpy import array
|
||||
from numpy import choose
|
||||
from numpy import clip
|
||||
from numpy import cos
|
||||
from numpy import full
|
||||
from numpy import hypot
|
||||
from numpy import isfinite
|
||||
from numpy import isscalar
|
||||
from numpy import ndarray
|
||||
from numpy import sin
|
||||
from numpy import sqrt
|
||||
from numpy import square
|
||||
|
||||
# Geospatial
|
||||
from pyproj import Geod
|
||||
|
||||
# Own constants
|
||||
from .geospatial import MEAN_EARTH_CIRCUMFERENCE
|
||||
from .geospatial import MEAN_EARTH_RADIUS
|
||||
|
||||
# Constants -------------------------------------------------------------------
|
||||
|
||||
#: A scalar or a numpy array
|
||||
ScalarOrArray = TypeVar("ScalarOrArray", float, ndarray)
|
||||
|
||||
#: The pyproj WGS84 object used as the basis for all polar representations and coordinate projections
|
||||
WGS84_PYPROJ_GEOD = Geod("+ellps=WGS84 +units=m")
|
||||
|
||||
|
||||
# Normalize -------------------------------------------------------------------
|
||||
|
||||
|
||||
def _normalize_circular_range(value: ScalarOrArray, minimum: float, maximum: float) -> ScalarOrArray:
|
||||
"""Normalizes the value to reside in :math:`[minimum, maximum[` by wrapping around.
|
||||
|
||||
Used by the other normalization functions in this package.
|
||||
|
||||
Args:
|
||||
value: the value to be normalized
|
||||
minimum: the minimum of the desired bounds
|
||||
maximum: the maximum of the desired bounds, assumed to be truly larger than *minimum*
|
||||
|
||||
Returns:
|
||||
The normalized value
|
||||
"""
|
||||
|
||||
# general approach: remove offset -> normalize with span -> add offset
|
||||
span = maximum - minimum
|
||||
|
||||
# the second `% span` is required due to floating point issues: `-1e-15 % 360` -> `360.0`,
|
||||
# but not less than `360.0` as required
|
||||
return ((value - minimum) % span) % span + minimum
|
||||
|
||||
|
||||
def normalize_latitude(value: ScalarOrArray) -> ScalarOrArray:
|
||||
"""Normalizes a latitudal value to the usual bounds by wrapping around.
|
||||
|
||||
Note:
|
||||
This is already done automatically by
|
||||
:attr:`pyrate.plan.geometry.location.PolarLocation.latitude`.
|
||||
|
||||
Examples:
|
||||
>>> normalize_latitude(20.0)
|
||||
20.0
|
||||
>>> normalize_latitude(-90.0)
|
||||
-90.0
|
||||
>>> normalize_latitude(90.0)
|
||||
90.0
|
||||
|
||||
It is also possible to wrap over the pole coordinates.
|
||||
|
||||
>>> normalize_latitude(91.0)
|
||||
89.0
|
||||
>>> normalize_latitude(185.0)
|
||||
-5.0
|
||||
|
||||
Take care: this will also normalize rubbish values.
|
||||
|
||||
>>> normalize_latitude(3229764.25)
|
||||
-24.25
|
||||
|
||||
Args:
|
||||
value: the raw latitudal value in degrees
|
||||
|
||||
Returns:
|
||||
the normalized value in :math:`[-90, +90]` degrees
|
||||
"""
|
||||
|
||||
# touch_point_*: the latitudes would meet at this point if values outside [-90, +90] would be allowed
|
||||
# pole_*: the actual bounds of the latitude values; they describe the south and north poles
|
||||
touch_point_min, touch_point_max = -180.0, +180.0
|
||||
pole_down, pole_up = -90.0, +90.0
|
||||
|
||||
# map into [-180.0, +180.0] by modulo exactly as with the longitude
|
||||
value = _normalize_circular_range(value, touch_point_min, touch_point_max)
|
||||
|
||||
# map into [-90.0, +90.0] by mirroring, since `100°` would be `180° - 100° = 80°` and not
|
||||
# `100° mod 90° = 10°` (as an example)
|
||||
try:
|
||||
if value > pole_up:
|
||||
return touch_point_max - value
|
||||
if value < pole_down:
|
||||
return touch_point_min - value
|
||||
return value
|
||||
|
||||
except ValueError:
|
||||
clipped_below = choose(value < pole_down, (value, touch_point_min - value))
|
||||
clipped_above = choose(value > pole_up, (clipped_below, touch_point_max - value))
|
||||
return cast(ScalarOrArray, clipped_above)
|
||||
|
||||
|
||||
def normalize_longitude(value: ScalarOrArray) -> ScalarOrArray:
|
||||
"""Normalizes a longitudal value to the usual bounds by wrapping.
|
||||
|
||||
Note:
|
||||
This is already done automatically by
|
||||
:attr:`pyrate.plan.geometry.location.PolarLocation.longitude`.
|
||||
|
||||
Examples:
|
||||
|
||||
>>> normalize_longitude(136.0)
|
||||
136.0
|
||||
>>> normalize_longitude(-86.0)
|
||||
-86.0
|
||||
>>> normalize_longitude(-180.0)
|
||||
-180.0
|
||||
|
||||
You can also get rid of redundant values, e.g. at 180.0°,
|
||||
as well as wrap around the boundaries.
|
||||
|
||||
>>> normalize_longitude(+180.0)
|
||||
-180.0
|
||||
>>> normalize_longitude(185.0)
|
||||
-175.0
|
||||
|
||||
Take care: this will also normalize rubbish values.
|
||||
|
||||
>>> normalize_longitude(3229764.25)
|
||||
-155.75
|
||||
|
||||
Args:
|
||||
value: the raw longitudal value in degrees
|
||||
|
||||
Returns:
|
||||
the normalized value in :math:`[-180, +180[` degrees
|
||||
"""
|
||||
|
||||
return _normalize_circular_range(value, -180.0, +180.0)
|
||||
|
||||
|
||||
def normalize_direction(value: ScalarOrArray) -> ScalarOrArray:
|
||||
"""Normalizes a direction (azimuth/yaw) value to the usual 360° compass values.
|
||||
|
||||
Examples:
|
||||
|
||||
>>> normalize_direction(45.0)
|
||||
45.0
|
||||
>>> normalize_direction(250.0)
|
||||
250.0
|
||||
>>> normalize_direction(-6.0)
|
||||
354.0
|
||||
>>> normalize_direction(360.0)
|
||||
0.0
|
||||
>>> normalize_direction(450.0)
|
||||
90.0
|
||||
|
||||
Take care: this will also normalize rubbish values.
|
||||
|
||||
>>> normalize_longitude(3229764.25)
|
||||
-155.75
|
||||
|
||||
Args:
|
||||
value: the raw value in degrees
|
||||
|
||||
Returns:
|
||||
the normalized value in :math:`[0, 360[` degrees
|
||||
"""
|
||||
|
||||
return _normalize_circular_range(value, 0.0, 360.0)
|
||||
|
||||
|
||||
# Difference ------------------------------------------------------------------
|
||||
|
||||
|
||||
def _difference_circular_range(
|
||||
value_a: ScalarOrArray, value_b: ScalarOrArray, minimum: float, maximum: float
|
||||
) -> ScalarOrArray:
|
||||
"""Calculates differences on a circular number line, where minimum and maximum meet.
|
||||
|
||||
The values do not need to be normalized.
|
||||
|
||||
If the difference between ``value_a`` and ``value_b`` is not finite (i.e. ``NaN``, ``+Inf`` or ``-Inf``) a
|
||||
warning is printed and ``NaN`` is returned. Both other values are assumed to be finite.
|
||||
|
||||
Args:
|
||||
value_a: the first value
|
||||
value_b: the second value
|
||||
minimum: the minimum of the desired bounds
|
||||
maximum: the maximum of the desired bounds, assumed to be strictly larger than ``minimum``
|
||||
|
||||
Returns:
|
||||
the normalized value in :math:`[0, (maximum - minimum)/2]`
|
||||
"""
|
||||
|
||||
raw_difference = value_a - value_b
|
||||
|
||||
if not isfinite(raw_difference).all():
|
||||
warn(
|
||||
"_difference_circular_range(): "
|
||||
f"difference between {value_a} and {value_b} was not a valid number: {raw_difference}",
|
||||
UserWarning,
|
||||
)
|
||||
|
||||
span = maximum - minimum
|
||||
difference: ScalarOrArray = raw_difference % span
|
||||
|
||||
# take the smaller one of the two possible distances, i.e. the smaller path around the circular range
|
||||
try:
|
||||
# Try the cae where we have floats, not arrays
|
||||
if difference > span / 2.0:
|
||||
return span - difference
|
||||
return difference
|
||||
|
||||
except ValueError:
|
||||
return cast(ScalarOrArray, choose(difference > span / 2.0, (difference, span - difference)))
|
||||
|
||||
|
||||
def difference_latitude(value_a: ScalarOrArray, value_b: ScalarOrArray) -> ScalarOrArray:
|
||||
"""Calculates the difference between two latitudal values.
|
||||
|
||||
The values do not need to be normalized.
|
||||
|
||||
If the difference between ``value_a`` and ``value_b`` is not finite (i.e. ``NaN``, ``+Inf`` or ``-Inf``) a
|
||||
warning is printed and ``NaN`` is returned.
|
||||
|
||||
Examples:
|
||||
|
||||
>>> difference_latitude(-45.0, +50.0)
|
||||
95.0
|
||||
>>> difference_latitude(-90.0, -90.0)
|
||||
0.0
|
||||
>>> difference_latitude(-90.0, +90.0) # the maximum distance
|
||||
180.0
|
||||
>>> difference_latitude(-90.0, +190.0)
|
||||
80.0
|
||||
|
||||
Take care: this will also calculate distances for rubbish values.
|
||||
|
||||
>>> difference_latitude(95324.0, 3224.25)
|
||||
60.25
|
||||
|
||||
Args:
|
||||
value_a: the first latitude in degrees
|
||||
value_b: the second latitude in degrees
|
||||
|
||||
Returns:
|
||||
The difference between the two values in degrees in :math:`[0, 180]`
|
||||
"""
|
||||
|
||||
# normalization is required because the distance between +80° and +100° shall be 0° and not 20°
|
||||
value_a = normalize_latitude(value_a)
|
||||
value_b = normalize_latitude(value_b)
|
||||
|
||||
# mathematically, there is no need to calculate in modulo `span` like in #difference_circular_range, since
|
||||
# both values are already guaranteed to be in [-90.0, +90.0] and their absolute difference already gets
|
||||
# what we need
|
||||
difference: ScalarOrArray = numpy.abs(value_a - value_b)
|
||||
|
||||
if not isfinite(difference).all():
|
||||
warn(
|
||||
"difference_latitude(): "
|
||||
f"difference between {value_a} and {value_b} was not a valid number: {difference}",
|
||||
UserWarning,
|
||||
)
|
||||
|
||||
return difference
|
||||
|
||||
|
||||
def difference_longitude(value_a: ScalarOrArray, value_b: ScalarOrArray) -> ScalarOrArray:
|
||||
"""Calculates the difference between two longitudal values.
|
||||
|
||||
The values do not need to be normalized.
|
||||
|
||||
If the difference between ``value_a`` and ``value_b`` is not finite (i.e. ``NaN``, ``+Inf`` or ``-Inf``) a
|
||||
warning is printed and ``NaN`` is returned.
|
||||
|
||||
Examples:
|
||||
|
||||
>>> difference_longitude(-145.0, +150.0)
|
||||
65.0
|
||||
>>> difference_longitude(-90.0, -90.0)
|
||||
0.0
|
||||
>>> difference_longitude(-90.0, +90.0) # the maximum distance
|
||||
180.0
|
||||
>>> difference_longitude(-180.0, +190.0)
|
||||
10.0
|
||||
|
||||
Take care: this will also calculate distances for rubbish values.
|
||||
|
||||
>>> difference_longitude(95324.0, 3224.25)
|
||||
60.25
|
||||
|
||||
Args:
|
||||
value_a: the first longitude in degrees
|
||||
value_b: the second longitude in degrees
|
||||
|
||||
Returns:
|
||||
The difference between the two values in degrees in :math:`[0, 180]`
|
||||
"""
|
||||
|
||||
return _difference_circular_range(value_a, value_b, -180.0, +180.0)
|
||||
|
||||
|
||||
def difference_direction(value_a: ScalarOrArray, value_b: ScalarOrArray) -> ScalarOrArray:
|
||||
"""Calculates the difference between two directional (azimuthal/yaw) values.
|
||||
|
||||
The values do not need to be normalized.
|
||||
|
||||
If the difference between ``value_a`` and ``value_b`` is not finite (i.e. ``NaN``, ``+Inf`` or ``-Inf``) a
|
||||
warning is printed and ``NaN`` is returned.
|
||||
|
||||
Examples:
|
||||
|
||||
>>> difference_direction(145.0, 165.0)
|
||||
20.0
|
||||
>>> difference_direction(42.0, 42.0)
|
||||
0.0
|
||||
>>> difference_direction(350.0, 334.5)
|
||||
15.5
|
||||
>>> difference_direction(270.0, 90.0) # the maximum distance
|
||||
180.0
|
||||
>>> difference_direction(365.0, 1.0)
|
||||
4.0
|
||||
>>> difference_direction(370.0, -20.0)
|
||||
30.0
|
||||
|
||||
Take care: this will also calculate distances for rubbish values.
|
||||
|
||||
>>> difference_direction(95324.0, 3224.25)
|
||||
60.25
|
||||
|
||||
Args:
|
||||
value_a: the first direction in degrees
|
||||
value_b: the second direction in degrees
|
||||
|
||||
Returns:
|
||||
The difference between the two values in degrees in :math:`[0, 180]`
|
||||
"""
|
||||
|
||||
return _difference_circular_range(value_a, value_b, 0.0, +360.0)
|
||||
|
||||
|
||||
# Translation -----------------------------------------------------------------
|
||||
|
||||
|
||||
def translate_floats(
|
||||
longitude: float, latitude: float, direction: float, distance: float
|
||||
) -> Tuple[Tuple[float, float], float]:
|
||||
"""Simply a convenience method for calling :func:`~.translate_numpy` with a single point.
|
||||
|
||||
Args:
|
||||
longitude: the original longitude in degrees
|
||||
latitude: the original latitude in degrees
|
||||
direction: the direction to translate into in degrees
|
||||
distance: the distance to translate by in meters
|
||||
|
||||
Returns:
|
||||
a pair ``(longitude, latitude)`` with the new coordinates and the back azimuth
|
||||
"""
|
||||
# just use the numpy variant as it would be converted to an array in pyproj internally anyhow
|
||||
coordinates_array = array([[longitude, latitude]])
|
||||
result, back = translate_numpy(coordinates_array, direction, distance)
|
||||
new_coordinates = (result[0, 0], result[0, 1])
|
||||
|
||||
return new_coordinates, back[0]
|
||||
|
||||
|
||||
def translate_numpy(
|
||||
coordinates: ndarray,
|
||||
direction: Union[float, ndarray],
|
||||
distance: Union[float, ndarray],
|
||||
) -> Tuple[ndarray, ndarray]:
|
||||
"""Translates the given point(s) by a given distance and direction/azimuth.
|
||||
|
||||
Everything is assumed to be in degrees.
|
||||
Furthermore, this method returns the back azimuth as documented below.
|
||||
|
||||
Under the hood uses :meth:`pyproj.Geod.fwd`, which computes the *forward transformation* or
|
||||
*forward azimuth*. This walks the given distance on the great circle arc given by the direction/
|
||||
azimuth. It uses the direction to define the initial azimuth, as the real azimuth will probably change
|
||||
along the great circle path (unless going exactly north/south or east/west).
|
||||
See also `this website <https://www.movable-type.co.uk/scripts/latlong.html>`__, sections "Bearing"
|
||||
and "Midpoint".
|
||||
|
||||
Note:
|
||||
See see the underlying geographiclib library, <geodesic.h>, *geod_direct()* for details on the
|
||||
behaviour poles and other special cases. It's rather strange. Also keep in mind that this
|
||||
method suffers from numerical issues like pretty much anything involving floating point
|
||||
computations.
|
||||
|
||||
Note:
|
||||
This is already provided in an object-oriented fashion by
|
||||
- :meth:`pyrate.plan.geometry.location.PolarLocation.translate`
|
||||
- :meth:`pyrate.plan.geometry.polygon.PolarPolygon.translate`
|
||||
- :meth:`pyrate.plan.geometry.route.PolarRoute.translate`
|
||||
|
||||
Args:
|
||||
coordinates: the coordinates as a numpy array with dimensions ``(number of points, 2)``,
|
||||
where the first component describes the longitude and the second one the latitude
|
||||
direction: The direction/azimuth to head to in degrees in :math:`[0, 360]` (0° is north, 90° is east).
|
||||
If it is a scalar, a single value is assumed for all points.
|
||||
If it is an array, it must be of shape ``(number of points, )``.
|
||||
distance: The distance to transpose by in meters; should not be very close to zero if the
|
||||
backwards azimuth shall be used due to numerical stability.
|
||||
If it is a scalar, a single value is assumed for all points.
|
||||
If it is an array, it must be of shape ``(number of points, )``.
|
||||
|
||||
Returns:
|
||||
(1) The new coordinates in the same format as the inout
|
||||
(2) The backwards azimuth in :math:`[0, 360)`, i.e. the direction which could be used to travel
|
||||
from the modified location back to the original one by translating given that ``direction`` and
|
||||
the same ``distance``.
|
||||
"""
|
||||
|
||||
# Convert from [0, 360[ to [-180, +180]
|
||||
if isscalar(direction):
|
||||
direction = cast(float, direction) # The cast is needed until isscalar() narrows the type correctly
|
||||
if direction > 180:
|
||||
direction -= 360
|
||||
azimuth = full((coordinates.shape[0],), direction)
|
||||
else:
|
||||
# The cast is needed until isscalar() narrows the type correctly
|
||||
azimuth = cast(ndarray, direction).copy()
|
||||
azimuth[azimuth > 180] -= 360
|
||||
|
||||
# Make sure that dist is an array
|
||||
dist = full((coordinates.shape[0],), distance) if isscalar(distance) else distance
|
||||
|
||||
# If any input to fwd() is an array/sequence, then all must be
|
||||
coordinates[:, 0], coordinates[:, 1], back_azimuth = WGS84_PYPROJ_GEOD.fwd(
|
||||
lons=coordinates[:, 0],
|
||||
lats=coordinates[:, 1],
|
||||
az=azimuth,
|
||||
dist=dist,
|
||||
radians=False,
|
||||
)
|
||||
|
||||
# back azimuth is in [-180, +180], so we need to convert to [0, 360[
|
||||
# see the underlying *geographiclib* library, <geodesic.h>, `geod_direct()`:
|
||||
# https://geographiclib.sourceforge.io/1.49/C/geodesic_8h.html#a676f59f07987ddd3dd4109fcfeccdb9d
|
||||
back_azimuth[back_azimuth < 0] += 360
|
||||
back_azimuth[back_azimuth == 360.0] = 0.0
|
||||
|
||||
return coordinates, back_azimuth
|
||||
|
||||
|
||||
# Distance --------------------------------------------------------------------
|
||||
|
||||
|
||||
def fast_distance_geo(
|
||||
latitudes: ScalarOrArray, longitudes: ScalarOrArray, center_latitude: float, center_longitude: float
|
||||
) -> ScalarOrArray:
|
||||
"""Approximates the great circle distance of all points to the center.
|
||||
|
||||
Warnings:
|
||||
All coordinates are assumed to be within about 250 km of the center to provide reasonable accuracy.
|
||||
Then, it was determined experimentally that the error compared to the great-circle distance was always
|
||||
below 5%.
|
||||
This was done by setting ``@hypothesis.settings(max_examples=50000)`` on the test case
|
||||
``TestDistanceCalculation.test_fast_distance_geo`` and observing that it did not fail.
|
||||
|
||||
Depending on the latitude **of the center**, the *equirectangular approximation*
|
||||
or the *polar coordinate flat-earth formula* are used. Both assume a spherical world and then flatten it
|
||||
onto a plane.
|
||||
|
||||
Args:
|
||||
latitudes: the latitude values, in radians in range :math:`[-\\frac{π}{2}, +\\frac{π}{2}]`
|
||||
longitudes: the longitude values, in radians in range :math:`[-π, +π]`
|
||||
center_latitude: the latitude of the center, in radians in range
|
||||
:math:`[-\\frac{π}{2}, +\\frac{π}{2}]`
|
||||
center_longitude: the longitude of the center, in radians in range :math:`[-π, +π]`
|
||||
|
||||
See Also:
|
||||
:func:`~haversine_numpy`: about three times slower but more precise
|
||||
|
||||
References:
|
||||
- Based on
|
||||
`Movable Type Scripts: Calculate distance, bearing and more between Latitude/Longitude points
|
||||
<https://www.movable-type.co.uk/scripts/latlong.html>`__
|
||||
(as of Dec. 2020), Section "Equirectangular approximation".
|
||||
In that source: ``phi = latitude``, ``lambda = longitude``, ``theta = co-latitude`` and
|
||||
``R = (mean) earth radius``.
|
||||
"""
|
||||
delta_lambda = _difference_circular_range(longitudes, center_longitude, -pi, +pi) # type: ignore
|
||||
|
||||
# The border value of about 75.0° latitude was determined by eye-balling from some Tissot's indicatrixes
|
||||
if abs(center_latitude) > 1.3962634015954636:
|
||||
# move all locations to the northern hemisphere first if required
|
||||
if center_latitude < 0:
|
||||
center_latitude = -center_latitude
|
||||
latitudes = -latitudes
|
||||
del longitudes, center_longitude # they are now wrong
|
||||
|
||||
# use the "polar coordinate flat-earth formula"
|
||||
theta_1 = (pi / 2) - latitudes
|
||||
theta_2 = (pi / 2) - center_latitude
|
||||
summed = square(theta_1) + square(theta_2) - 2 * theta_1 * theta_2 * cos(delta_lambda) # type: ignore
|
||||
summed = clip(summed, 0.0, None) # for numerical stability as above sum may be slightly negative
|
||||
return cast(ScalarOrArray, sqrt(summed) * MEAN_EARTH_RADIUS)
|
||||
|
||||
# use the "equirectangular approximation"
|
||||
d_lat = _difference_circular_range(latitudes, center_latitude, -pi / 2, +pi / 2) # type: ignore
|
||||
d_lon = delta_lambda * cos(center_latitude)
|
||||
dist_degrees = hypot(d_lat, d_lon) # type: ignore
|
||||
return cast(ScalarOrArray, dist_degrees * MEAN_EARTH_RADIUS)
|
||||
|
||||
|
||||
def haversine_numpy(
|
||||
latitudes: ScalarOrArray, longitudes: ScalarOrArray, center_latitude: float, center_longitude: float
|
||||
) -> ScalarOrArray:
|
||||
"""Calculate the great circle distance between each point to the center in meters.
|
||||
|
||||
Note:
|
||||
"The min() function protects against possible roundoff errors that could
|
||||
sabotage computation of the arcsine if the two points are very nearly
|
||||
antipodal (that is, on opposite sides of the Earth). Under these conditions,
|
||||
the Haversine Formula is ill-conditioned (see the discussion below), but
|
||||
the error, perhaps as large as 2 km [...], is in the context of a
|
||||
distance near 20,000 km [...]."
|
||||
(Source: `Movable Type Scripts: GIS FAQ Q5.1: Great circle distance between 2 points
|
||||
<https://www.movable-type.co.uk/scripts/gis-faq-5.1.html>`__)
|
||||
|
||||
Args:
|
||||
latitudes: the latitude values, in radians in range :math:`[-\\frac{π}{2}, +\\frac{π}{2}]`
|
||||
longitudes: the longitude values, in radians in range :math:`[-π, +π]`
|
||||
center_latitude: the latitude of the center, in radians in range
|
||||
:math:`[-\\frac{π}{2}, +\\frac{π}{2}]`
|
||||
center_longitude: the longitude of the center, in radians in range :math:`[-π, +π]`
|
||||
|
||||
See Also:
|
||||
:func:`~fast_distance_geo`: an approximation that is about three times faster
|
||||
|
||||
Returns:
|
||||
The great circle distance between each point to the center in meters.
|
||||
|
||||
References:
|
||||
- `Wikipedia: Haversine formula <https://en.wikipedia.org/wiki/Haversine_formula>`__
|
||||
"""
|
||||
d_lat = latitudes - center_latitude
|
||||
d_lon = longitudes - center_longitude
|
||||
summed = sin(d_lat / 2) ** 2 + cos(latitudes) * cos(center_latitude) * sin(d_lon / 2) ** 2
|
||||
# the intermediate result c is the great circle distance in radians
|
||||
d_rad = 2 * arcsin(numpy.minimum(sqrt(summed), 1.0))
|
||||
# the great circle distance will be in the same units as MEAN_EARTH_RADIUS
|
||||
return cast(ScalarOrArray, d_rad * MEAN_EARTH_RADIUS)
|
||||
|
||||
|
||||
# Conversion between meters and radians ---------------------------------------
|
||||
|
||||
|
||||
def meters2rad(meters: ScalarOrArray) -> ScalarOrArray:
|
||||
"""Meters to radians (latitude or longitude) at the equator."""
|
||||
return (meters / MEAN_EARTH_CIRCUMFERENCE) * (2.0 * pi)
|
||||
|
||||
|
||||
def rad2meters(rad: ScalarOrArray) -> ScalarOrArray:
|
||||
"""Radians (latitude or longitude) at the equator to meters."""
|
||||
return (rad / (2.0 * pi)) * MEAN_EARTH_CIRCUMFERENCE
|
||||
|
||||
|
||||
# Cartesian to Spherical ------------------------------------------------------
|
||||
|
||||
|
||||
def cartesian_to_spherical(xyz: ndarray) -> Tuple[ndarray, ndarray]:
|
||||
"""Converts cartesian coordinates on a unit sphere to spherical coordinates.
|
||||
|
||||
Args:
|
||||
xyz: The cartesian coordinates, expected as an array where each line contains three coordinates for
|
||||
a point.
|
||||
|
||||
Returns:
|
||||
The coordinates as latitude and longitude in radians,
|
||||
such that :math:`-\\frac{π}{2} ≤ φ ≤ +\\frac{π}{2}` is the latitude and :math:`-π ≤ θ < +π` is the
|
||||
longitude.
|
||||
|
||||
Raises:
|
||||
:class:`AssertionError`: if not all pints lie on the unit sphere, as then the altitude would be
|
||||
relevant, but it is not considered by this conversion
|
||||
|
||||
References:
|
||||
- `Movable Type Scripts: Vector-based geodesy
|
||||
<https://www.movable-type.co.uk/scripts/latlong-vectors.html>`__
|
||||
- `The relevant Wikipedia article
|
||||
<https://en.wikipedia.org/wiki/Spherical_coordinate_system#Cartesian_coordinates>`__.
|
||||
Note: In these formulas, mathematicians' coordinates are used, where :math:`0 ≤ φ ≤ π` is the
|
||||
latitude coming down from the pole and :math:`0 ≤ θ ≤ 2π` is the longitude,
|
||||
with the prime meridian being at :math:`π`.
|
||||
We convert these to the usual coordinate conventions of the geographic community within this method.
|
||||
- The `nvector library <https://github.com/pbrod/nvector/>`__ provides a possible alternative
|
||||
implementation (see section "Example 3: 'ECEF-vector to geodetic latitude'").
|
||||
"""
|
||||
# elevation / r:
|
||||
elevation = sqrt(xyz[:, 0] ** 2 + xyz[:, 1] ** 2 + xyz[:, 2] ** 2)
|
||||
assert not numpy.any(absolute(elevation - 1.0) > 1e-9), "not all points lie on the unit sphere"
|
||||
|
||||
# also normalize because the floating point representation of the cartesian coordinates might have
|
||||
# slightly messed with it; this value moves the borders of the clipping slightly inwards
|
||||
# in other words: it makes the clipped values lie *strict* within the bounds, and never
|
||||
# with equality
|
||||
move_in = 1e-14 # empirically worked well
|
||||
|
||||
# latitude / theta:
|
||||
# we know that the elevation is very close to 1, so we do not need to divide by it
|
||||
latitudes = arccos(xyz[:, 2])
|
||||
latitudes = clip(latitudes, move_in, pi - move_in) # clip at the poles
|
||||
latitudes -= pi / 2 # convert from mathematical to geographic convention
|
||||
|
||||
# longitude / phi
|
||||
longitudes = arctan2(xyz[:, 1], xyz[:, 0])
|
||||
# we also clip here although wrapping using modulo 2*pi would be more appropriate
|
||||
# however, this had introduced new numerical new problems which are avoided by clipping
|
||||
# This also guarantees that each longitude is strictly less than 180°
|
||||
longitudes = clip(longitudes, -pi + move_in, +pi - move_in)
|
||||
|
||||
return latitudes, longitudes
|
||||
|
||||
|
||||
# Mean computation on angles and coordinates ----------------------------------
|
||||
|
||||
|
||||
def mean_coordinate(latitudes: ndarray, longitudes: ndarray) -> Tuple[float, float]:
|
||||
"""Computes a reasonable mean coordinate if possible.
|
||||
|
||||
Args:
|
||||
latitudes: The array of latitude values to compute the mean of, in degrees. Will be flattened.
|
||||
longitudes: The array of longitude values to compute the mean of, in degrees. Will be flattened.
|
||||
Must be of the same length as ``latitudes``.
|
||||
|
||||
Returns:
|
||||
The mean coordinate of the given ones, in degrees as ``(latitude, longitude)``.
|
||||
|
||||
Raises:
|
||||
ValueError: If no meaningful mean (of the longitudes) can be computed. See :func:`~mean_angle`.
|
||||
|
||||
See Also:
|
||||
- :func:`~mean_angle`
|
||||
"""
|
||||
assert len(latitudes) == len(longitudes), "Both coordinate arrays must have the same length"
|
||||
|
||||
# In case of the latitude values, the "ambiguous" case of antipodal angles/points can be solved by
|
||||
# observing that only latitude values between -90° and +90° are allowed. Therefore, +/- 0° is a reasonable
|
||||
# result in this case.
|
||||
try:
|
||||
latitude = mean_angle(numpy.radians(latitudes))
|
||||
except ValueError:
|
||||
latitude = 0.0
|
||||
|
||||
# In the case of longitudes, simply let the ValueError raise as there is nothing we can do here
|
||||
longitude = mean_angle(numpy.radians(longitudes))
|
||||
|
||||
return numpy.degrees(latitude), numpy.degrees(longitude)
|
||||
|
||||
|
||||
def mean_angle(radians: ndarray, tolerance: float = 1e-6) -> float:
|
||||
"""Computes a reasonable mean value if possible.
|
||||
|
||||
Args:
|
||||
radians: The array of angles to compute the mean of, in radians. Will be flattened.
|
||||
tolerance: If both components of the cartesian intermediate representation are less than this value,
|
||||
a ``ValueError`` with a descriptive error message will be raised.
|
||||
|
||||
Returns:
|
||||
The mean angle of the given ones
|
||||
|
||||
References:
|
||||
- `Mean of circular quantities (section Mean of angles) on Wikipedia
|
||||
<https://en.wikipedia.org/wiki/Mean_of_circular_quantities#Mean_of_angles>`
|
||||
|
||||
Raises:
|
||||
ValueError: If no meaningful mean can be computed. This is the case when two antipodal angles are
|
||||
given or the sum of multiple ones is "antipodal".
|
||||
|
||||
See Also:
|
||||
- :func:`~mean_coordinate`
|
||||
"""
|
||||
|
||||
x: float = sin(radians).sum()
|
||||
y: float = cos(radians).sum()
|
||||
|
||||
if abs(x) < tolerance and abs(y) < tolerance:
|
||||
raise ValueError(
|
||||
"The mean angle of nearly antipodal is ambiguous. "
|
||||
"If this arises while computing mean points on polygons and routes, "
|
||||
"the geometry likely is just so large that many approximations will not work anymore. "
|
||||
"Consider splitting them up into smaller ones."
|
||||
)
|
||||
|
||||
return atan2(x, y) % tau
|
491
pyrate/pyrate/plan/geometry/location.py
Normal file
491
pyrate/pyrate/plan/geometry/location.py
Normal file
@ -0,0 +1,491 @@
|
||||
"""This module implements abstractions for timestamped geospatial locations in WGS84 and local coordinates.
|
||||
|
||||
Two locations are ``==`` if and only if they are equal according to ``equals_exact()``.
|
||||
"""
|
||||
|
||||
# Standard library
|
||||
from copy import deepcopy
|
||||
from math import cos
|
||||
from math import radians
|
||||
from math import sin
|
||||
|
||||
# Typing
|
||||
from typing import Any
|
||||
from typing import cast
|
||||
from typing import Dict
|
||||
from typing import Optional
|
||||
from typing import Tuple
|
||||
|
||||
# Mathematics
|
||||
from geopy.distance import GeodesicDistance
|
||||
from geopy.distance import GreatCircleDistance
|
||||
from numpy import array
|
||||
from numpy import ndarray
|
||||
from pyproj import Proj
|
||||
from shapely.affinity import translate
|
||||
from shapely.geometry import Point
|
||||
|
||||
# Basis
|
||||
from .geospatial import Geospatial
|
||||
from .geospatial import LocationType
|
||||
|
||||
# Helpers
|
||||
from .helpers import normalize_latitude
|
||||
from .helpers import normalize_longitude
|
||||
from .helpers import translate_floats
|
||||
|
||||
|
||||
class PolarLocation(Geospatial):
|
||||
|
||||
"""A geospatial location representing a spatial object on earth.
|
||||
|
||||
See `here <http://www.movable-type.co.uk/scripts/latlong.html>`__ for a nice collection of formulas and
|
||||
explanations on geographic transformations and calculations. This is the *Rome* for geographic calculation
|
||||
questions on *Stack Overflow*: All roads seem to eventually lead here.
|
||||
|
||||
Examples:
|
||||
First import some packages
|
||||
|
||||
>>> from math import isclose
|
||||
>>> from pyrate.plan.geometry import PolarLocation, Direction
|
||||
|
||||
Then create two example coordinates to work with:
|
||||
|
||||
>>> team_room = PolarLocation(latitude=49.878091, longitude=8.654052)
|
||||
>>> frankfurt = PolarLocation(latitude=50.113709, longitude=8.656561)
|
||||
|
||||
Translate the team room 27 km north, which is towards *Frankfurt*:
|
||||
|
||||
>>> team_room, direction_going_back = team_room.translate(direction=Direction.North, distance=27_000)
|
||||
>>> assert isclose(direction_going_back, Direction.South)
|
||||
|
||||
The variable ``team_room`` now represents a location in/near *Frankfurt*,
|
||||
only a couple hundred meters away from the location ``frankfurt``:
|
||||
|
||||
>>> print(team_room.distance(frankfurt)) # doctest: +ELLIPSIS
|
||||
812.512...
|
||||
|
||||
Coordinats can also be projected onto a local tanged plane and back.
|
||||
The ``origin`` defines the point where the plane touches the sphere.
|
||||
|
||||
>>> frankfurt == frankfurt.to_cartesian(origin=frankfurt).to_polar()
|
||||
True
|
||||
|
||||
Args:
|
||||
latitude: The latitude in degrees (will be normalized)
|
||||
longitude: The longitude in degrees (will be normalized)
|
||||
location_type: The type of this polygon
|
||||
name: An optional name of this polygon
|
||||
identifier: An optional unique identifier for this object, in :math:`[0, 2**63)`, i.e. 64 signed bits
|
||||
"""
|
||||
|
||||
def __init__( # pylint: disable=too-many-arguments
|
||||
self,
|
||||
latitude: float,
|
||||
longitude: float,
|
||||
location_type: LocationType = LocationType.UNKNOWN,
|
||||
name: Optional[str] = None,
|
||||
identifier: Optional[int] = None,
|
||||
) -> None:
|
||||
# Type hints
|
||||
self._latitude: float
|
||||
self._longitude: float
|
||||
self._projection: Optional[Proj]
|
||||
|
||||
# Attributes setup
|
||||
self.latitude = latitude
|
||||
self.longitude = longitude
|
||||
# self._projection = None # already set by the property accesses before
|
||||
|
||||
super().__init__(location_type=location_type, name=name, identifier=identifier)
|
||||
|
||||
@property
|
||||
def latitude(self) -> float:
|
||||
"""The latitude of this location in degrees in :math:`[-90, +90]`.
|
||||
|
||||
The value is always disambiguated/normalized.
|
||||
"""
|
||||
|
||||
return self._latitude
|
||||
|
||||
@latitude.setter
|
||||
def latitude(self, latitude: float) -> None:
|
||||
self._projection = None
|
||||
self._latitude = normalize_latitude(latitude)
|
||||
|
||||
@property
|
||||
def longitude(self) -> float:
|
||||
"""The longitude of this location degrees in in :math:`[-180, +180)`.
|
||||
|
||||
The value is always disambiguated/normalized.
|
||||
"""
|
||||
|
||||
return self._longitude
|
||||
|
||||
@longitude.setter
|
||||
def longitude(self, longitude: float) -> None:
|
||||
self._projection = None
|
||||
self._longitude = normalize_longitude(longitude)
|
||||
|
||||
@property
|
||||
def projection(self) -> Proj:
|
||||
"""Derive a :class:`pyproj.Proj` instance for projecting points.
|
||||
|
||||
This instance is cached for performance reasons, since its creation is relatively time consuming. The
|
||||
cache is appropriately invalidated when setting a new :attr:`~latitude` or :attr:`~longitude`.
|
||||
"""
|
||||
|
||||
if self._projection is None:
|
||||
self._projection = Proj(
|
||||
proj="tmerc",
|
||||
ellps="WGS84",
|
||||
units="m",
|
||||
lon_0=self.longitude,
|
||||
lat_0=self.latitude,
|
||||
)
|
||||
|
||||
return self._projection
|
||||
|
||||
def to_cartesian(self, origin: "PolarLocation") -> "CartesianLocation":
|
||||
"""Projects this point to a cartesian representation according to the given global reference.
|
||||
|
||||
Args:
|
||||
origin: The reference by which to project onto the local tangent plane
|
||||
|
||||
Returns:
|
||||
The cartesian representation of this point with the given reference point being set
|
||||
"""
|
||||
|
||||
# convert to cartesian
|
||||
east, north = origin.projection(self.longitude, self.latitude)
|
||||
|
||||
return CartesianLocation(
|
||||
east,
|
||||
north,
|
||||
location_type=self.location_type,
|
||||
name=self.name,
|
||||
identifier=self.identifier,
|
||||
origin=origin,
|
||||
)
|
||||
|
||||
def translate(self, direction: float, distance: float) -> Tuple["PolarLocation", float]:
|
||||
"""Translates this location and returns the new location and back-azimuth.
|
||||
|
||||
See :func:`pyrate.plan.geometry.helpers.translate_floats` for details.
|
||||
"""
|
||||
|
||||
back_azimuth: float # this is required for mypy, don't know why that is
|
||||
(longitude, latitude), back_azimuth = translate_floats(
|
||||
self.longitude, self.latitude, direction, distance
|
||||
)
|
||||
|
||||
new_location = PolarLocation(
|
||||
longitude=longitude,
|
||||
latitude=latitude,
|
||||
location_type=self.location_type,
|
||||
name=self.name,
|
||||
identifier=self.identifier,
|
||||
)
|
||||
|
||||
return new_location, back_azimuth
|
||||
|
||||
def distance(self, other: "PolarLocation", approximate: bool = False) -> float:
|
||||
"""Calculate the horizontal geodesic distance to another location in meters, assumes degrees.
|
||||
|
||||
This assumes an ellipsoidal earth and converges for any pair of points on earth.
|
||||
It is accurate to round-off and uses *geographiclib* (https://pypi.org/project/geographiclib/)
|
||||
via *geopy* (https://pypi.org/project/geopy/).
|
||||
|
||||
The faster *great-circle distance* can also be used by setting *approximate=True*.
|
||||
It assumes only a spherical earth and is guaranteed to give a result for any pair of points.
|
||||
It is wrong by up to 0.5% and based on *geopy*. It is advised to use the exact solution unless you
|
||||
know what you are doing.
|
||||
|
||||
See also:
|
||||
- https://en.wikipedia.org/wiki/Geodesics_on_an_ellipsoid
|
||||
- https://en.wikipedia.org/wiki/Great-circle_distance
|
||||
- https://en.wikipedia.org/wiki/Geographical_distance
|
||||
|
||||
Args:
|
||||
other: The location to measure the distance to in degrees
|
||||
approximate: Whether to use a faster approximation or not (default: ``False``)
|
||||
|
||||
Returns:
|
||||
The distance to the other point in meters
|
||||
"""
|
||||
|
||||
# input as latitude, longitude
|
||||
this = (self.latitude, self.longitude)
|
||||
that = (other.latitude, other.longitude)
|
||||
|
||||
if approximate:
|
||||
distance = GreatCircleDistance(this, that).meters
|
||||
else:
|
||||
distance = GeodesicDistance(this, that).meters
|
||||
|
||||
# Geopy is not typed as of now
|
||||
return cast(float, distance)
|
||||
|
||||
@property
|
||||
def __geo_interface__(self) -> Dict[str, Any]:
|
||||
return {"type": "Point", "coordinates": (self.longitude, self.latitude)}
|
||||
|
||||
def __eq__(self, other: Any) -> bool:
|
||||
return self.equals_exact(other, tolerance=0.0)
|
||||
|
||||
def equals(self, other: Any) -> bool:
|
||||
"""Determines whether the given ``other`` object exactly equals this one.
|
||||
|
||||
This function mimics :meth:`shapely.geometry.base.BaseGeometry.equals`:
|
||||
"Refers to point-set equality (or topological equality), and is equivalent to
|
||||
``self.within(other) and self.contains(other)``."
|
||||
|
||||
Args:
|
||||
other: The object to compare to
|
||||
|
||||
Returns:
|
||||
Whether this and the other object are exactly equal
|
||||
"""
|
||||
# The above docstring is also copied by PolarPolygon and PolarRoute
|
||||
|
||||
return self.equals_exact(other, 0.0)
|
||||
|
||||
def equals_exact(self, other: Any, tolerance: float) -> bool:
|
||||
"""Determines whether the given ``other`` object equals this one.
|
||||
|
||||
This function mimics :meth:`shapely.geometry.base.BaseGeometry.equals_exact`:
|
||||
"Refers to coordinate equality, which requires coordinates to be equal
|
||||
and in the same order for all components of a geometry."
|
||||
|
||||
Args:
|
||||
other: The object to compare to
|
||||
tolerance: The absolute deviation in meters that is tolerated on the latitude and longitude values
|
||||
|
||||
Returns:
|
||||
Whether this and the ``other`` object are (nearly) equal
|
||||
"""
|
||||
# The above docstring is also copied by PolarPolygon and PolarRoute
|
||||
|
||||
return (
|
||||
isinstance(other, PolarLocation)
|
||||
and Geospatial.__eq__(self, other)
|
||||
and self.distance(other) <= tolerance
|
||||
)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
# we leave out self._projection due to performance reasons and because it is redundant
|
||||
return f"PolarLocation(latitude={self.latitude}, longitude={self.longitude}{self._repr_extras})"
|
||||
|
||||
|
||||
class CartesianLocation(Geospatial, Point):
|
||||
"""A point in the cartesian plane based on local coordinates with an optional global reference.
|
||||
|
||||
Examples:
|
||||
You can simply create a cartesion location like this, where coordinates are in meters:
|
||||
|
||||
>>> location_a = CartesianLocation(east=10, north=-20)
|
||||
>>> location_b = CartesianLocation(east=-30, north=0)
|
||||
>>> distance = location_a.distance(location_b)
|
||||
>>> distance # doctest: +ELLIPSIS
|
||||
44.721...
|
||||
|
||||
Keep in mind that locations (like all other cartesian geomerties) are iummutable due to the underlying
|
||||
Shapely library:
|
||||
|
||||
>>> location_a.x = 5.0
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
AttributeError: can't set attribute
|
||||
|
||||
The attributes ``east`` and ``north`` are provided as aliases for ``x`` and ``y``:
|
||||
|
||||
>>> assert location_a.x == location_a.east
|
||||
>>> assert location_a.y == location_a.north
|
||||
|
||||
You can also project them to a polar coordinate.
|
||||
To do this, one must only provide a reference point ``origin`` either when constructing the loction
|
||||
or when calling :meth:`~to_polar`:
|
||||
|
||||
>>> reference = PolarLocation(latitude=50, longitude=30)
|
||||
>>> location_a.origin = reference
|
||||
>>> location_b.origin = reference
|
||||
>>> location_a.to_polar().distance(location_b.to_polar()) # doctest: +ELLIPSIS
|
||||
44.721...
|
||||
|
||||
As any :class:`~CartesianLocation` also inherits from :class:`shapely.geometry.Point`,
|
||||
we can also use :mod:`shapely` methods
|
||||
(see `the Shapely docs <https://shapely.readthedocs.io/en/stable/manual.html>`__).
|
||||
For example, we can inflate the point using ``buffer()``.
|
||||
Mind though, that this will return a :mod:`shapely` geometry and not a :mod:`pyrate.plan.geometry`
|
||||
object.
|
||||
|
||||
>>> buffered = location_a.buffer(10)
|
||||
>>> buffered.geometryType()
|
||||
'Polygon'
|
||||
|
||||
Thus, we need to convert it back to a pyrate object like so (keep in mind that we now need a polygon):
|
||||
|
||||
>>> from pyrate.plan.geometry.polygon import CartesianPolygon
|
||||
>>> buffered_pyrate = CartesianPolygon.from_shapely(buffered)
|
||||
>>> buffered.equals(buffered_pyrate)
|
||||
True
|
||||
|
||||
Args:
|
||||
east: The easting of the location in meters
|
||||
north: The northing of the location in meters
|
||||
origin: A reference that can be used to project this cartesian representation (back)
|
||||
into a polar one
|
||||
location_type: The type of this polygon
|
||||
name: An optional name of this polygon
|
||||
identifier: An optional unique identifier for this object, in :math:`[0, 2**63)`, i.e. 64 signed bits
|
||||
"""
|
||||
|
||||
def __init__( # pylint: disable=too-many-arguments
|
||||
self,
|
||||
east: float,
|
||||
north: float,
|
||||
origin: Optional["PolarLocation"] = None,
|
||||
location_type: LocationType = LocationType.UNKNOWN,
|
||||
name: Optional[str] = None,
|
||||
identifier: Optional[int] = None,
|
||||
) -> None:
|
||||
# Set attribute
|
||||
self.origin = origin
|
||||
|
||||
# Typing hints (actually defined by shapely)
|
||||
self.x: float
|
||||
self.y: float
|
||||
|
||||
# Initialize the super classes
|
||||
Point.__init__(self, east, north)
|
||||
Geospatial.__init__(self, location_type=location_type, name=name, identifier=identifier)
|
||||
|
||||
#: Named access to the internal shapely point ``x``. Readonly.
|
||||
east: float = Point.x
|
||||
|
||||
#: Named access to the internal shapely point ``y``. Readonly.
|
||||
north: float = Point.y
|
||||
|
||||
def to_polar(self, origin: Optional["PolarLocation"] = None) -> PolarLocation:
|
||||
"""Computes the polar representation of this point.
|
||||
|
||||
Args:
|
||||
origin: The global reference to be used for back-projection, must be set if and only if
|
||||
:attr:`~pyrate.plan.geometry.CartesianLocation.origin` is ``None``
|
||||
|
||||
Returns:
|
||||
The global, polar representation of this point
|
||||
"""
|
||||
|
||||
if origin is None:
|
||||
if self.origin is None:
|
||||
raise ValueError("need to give an explicit origin when the instance does not have one")
|
||||
origin = self.origin
|
||||
elif self.origin is not None:
|
||||
raise ValueError("provided an explicit origin while the instance already has one")
|
||||
|
||||
# convert to cartesian
|
||||
longitude, latitude = origin.projection(self.east, self.north, inverse=True)
|
||||
|
||||
return PolarLocation(
|
||||
longitude=longitude,
|
||||
latitude=latitude,
|
||||
location_type=self.location_type,
|
||||
name=self.name,
|
||||
identifier=self.identifier,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def from_shapely(cls, point: Point, *args, **kwargs) -> "CartesianLocation":
|
||||
"""Create a cartesian location from a shapely point.
|
||||
|
||||
Args:
|
||||
point: A shapely point
|
||||
*args: Positional arguments to be passed to :class:`~CartesianLocation`
|
||||
**kwargs: Keyword arguments to be passed to :class:`~CartesianLocation`
|
||||
|
||||
Returns:
|
||||
The cartesian location created from the given geometry and other parameters
|
||||
"""
|
||||
return cls(point.x, point.y, *args, **kwargs)
|
||||
|
||||
def translate(self, direction: float, distance: float) -> Tuple["CartesianLocation", ndarray]:
|
||||
"""Translates this location.
|
||||
|
||||
Args:
|
||||
direction: The direction angle in degrees (``0`` is north, clockwise)
|
||||
distance: The distance to translate bin meters
|
||||
|
||||
Returns:
|
||||
The translated polygon and the translation vector ``(x_offset, y_offset)`` in meters
|
||||
that can be used to reconstruct the original polygon
|
||||
"""
|
||||
|
||||
x_offset = sin(radians(direction)) * distance
|
||||
y_offset = cos(radians(direction)) * distance
|
||||
|
||||
return (
|
||||
CartesianLocation.from_shapely(
|
||||
translate(Point(self.east, self.north), xoff=x_offset, yoff=y_offset),
|
||||
location_type=self.location_type,
|
||||
name=self.name,
|
||||
identifier=self.identifier,
|
||||
origin=self.origin,
|
||||
),
|
||||
array([-x_offset, -y_offset]),
|
||||
)
|
||||
|
||||
@property
|
||||
def __geo_interface__(self) -> Dict[str, Any]:
|
||||
return {"type": "Point", "coordinates": (self.east, self.north)}
|
||||
|
||||
def __copy__(self) -> "CartesianLocation":
|
||||
return CartesianLocation(
|
||||
east=self.east,
|
||||
north=self.north,
|
||||
location_type=self.location_type,
|
||||
name=self.name,
|
||||
identifier=self.identifier,
|
||||
origin=self.origin,
|
||||
)
|
||||
|
||||
def __deepcopy__(self, memodict: Dict) -> "CartesianLocation":
|
||||
return CartesianLocation(
|
||||
east=deepcopy(self.east, memodict),
|
||||
north=deepcopy(self.north, memodict),
|
||||
location_type=deepcopy(self.location_type, memodict),
|
||||
name=deepcopy(self.name, memodict),
|
||||
identifier=deepcopy(self.identifier, memodict),
|
||||
origin=deepcopy(self.origin, memodict),
|
||||
)
|
||||
|
||||
def __eq__(self, other: Any) -> bool:
|
||||
return self.equals_exact(other, tolerance=0.0)
|
||||
|
||||
# Inherits the docstring
|
||||
def equals(self, other: Any) -> bool: # pylint: disable=missing-function-docstring
|
||||
return (
|
||||
isinstance(other, CartesianLocation)
|
||||
and Point.equals(self, other)
|
||||
and Geospatial.__eq__(self, other)
|
||||
and self.origin == other.origin
|
||||
)
|
||||
|
||||
# Inherits the docstring
|
||||
def equals_exact( # pylint: disable=missing-function-docstring
|
||||
self, other: Any, tolerance: float
|
||||
) -> bool:
|
||||
return (
|
||||
isinstance(other, CartesianLocation)
|
||||
and Point.equals_exact(self, other, tolerance)
|
||||
and Geospatial.__eq__(self, other)
|
||||
and self.origin == other.origin
|
||||
)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
origin = f", origin={self.origin}" if self.origin is not None else ""
|
||||
return f"CartesianLocation(east={self.east}, north={self.north}{origin}{self._repr_extras})"
|
||||
|
||||
def __str__(self) -> str:
|
||||
# this is required to override shapely.geometry.Point.__str__()
|
||||
return self.__repr__()
|
624
pyrate/pyrate/plan/geometry/polygon.py
Normal file
624
pyrate/pyrate/plan/geometry/polygon.py
Normal file
@ -0,0 +1,624 @@
|
||||
"""This module implements abstractions for geospatial, polygonal shapes in WGS84 and local cartesian
|
||||
coordinates using shapely.
|
||||
|
||||
Two polygons are ``==`` if and only if they are equal according to ``equals_exact()``.
|
||||
"""
|
||||
|
||||
# Python standard library
|
||||
from copy import deepcopy
|
||||
from math import cos
|
||||
from math import radians
|
||||
from math import sin
|
||||
|
||||
# Typing
|
||||
from typing import Any
|
||||
from typing import cast
|
||||
from typing import Dict
|
||||
from typing import List
|
||||
from typing import Optional
|
||||
from typing import Tuple
|
||||
|
||||
# Data modelling
|
||||
from numpy import array
|
||||
from numpy import isfinite
|
||||
from numpy import ndarray
|
||||
from shapely.affinity import translate
|
||||
from shapely.geometry import Polygon
|
||||
|
||||
# Geospatial basis
|
||||
from .geospatial import Geospatial
|
||||
from .geospatial import LocationType
|
||||
|
||||
# Geospatial helpers
|
||||
from .helpers import mean_coordinate
|
||||
from .helpers import translate_numpy
|
||||
from .helpers import WGS84_PYPROJ_GEOD
|
||||
|
||||
# Location representation
|
||||
from .location import CartesianLocation
|
||||
from .location import PolarLocation
|
||||
|
||||
|
||||
class PolarPolygon(Geospatial):
|
||||
|
||||
"""A polygon based on WGS84 coordinates.
|
||||
|
||||
An object with only a single point may be represented by a polygon with three times the same location.
|
||||
|
||||
Args:
|
||||
locations: The points that make up this polygon; see :attr:`~.locations`
|
||||
location_type: The type of this polygon
|
||||
name: An optional name of this polygon
|
||||
identifier: The polygon's optional unique identifier, in :math:`[0, 2**63)`, i.e. 64 signed bits
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
locations: List[PolarLocation],
|
||||
location_type: LocationType = LocationType.UNKNOWN,
|
||||
name: Optional[str] = None,
|
||||
identifier: Optional[int] = None,
|
||||
) -> None:
|
||||
# Type hints
|
||||
self._locations: List[PolarLocation]
|
||||
|
||||
# Attributes setup
|
||||
self.locations = locations
|
||||
self._mean_location: Optional[PolarLocation] = None
|
||||
|
||||
super().__init__(location_type=location_type, name=name, identifier=identifier)
|
||||
|
||||
@property
|
||||
def locations(self) -> List[PolarLocation]:
|
||||
"""The points that make up this polygon.
|
||||
|
||||
Getter:
|
||||
At least three points are returned.
|
||||
|
||||
Setter:
|
||||
The list is closed if not already done, such that the first and last points in the list always
|
||||
match exactly. Raises an :class:`AssertionError` if less than three points are given.
|
||||
"""
|
||||
|
||||
return self._locations
|
||||
|
||||
@locations.setter
|
||||
def locations(self, locations: List[PolarLocation]) -> None:
|
||||
assert len(locations) >= 3, "a polygon must contain at least three points"
|
||||
|
||||
# close the ring as shapely would do it
|
||||
# comparison is done by exact comparison
|
||||
if (
|
||||
locations[0].latitude != locations[-1].latitude
|
||||
or locations[0].longitude != locations[-1].longitude
|
||||
):
|
||||
locations.append(locations[0])
|
||||
|
||||
self._locations = locations
|
||||
self._mean_location = None
|
||||
|
||||
def to_cartesian(self, origin: PolarLocation) -> "CartesianPolygon":
|
||||
"""Projects this polygon to a cartesian representation according to the given global reference.
|
||||
|
||||
Args:
|
||||
origin: The reference point by which to project onto the local tangent plane
|
||||
|
||||
Returns:
|
||||
The cartesian representation of this polygon with the given reference point being set
|
||||
"""
|
||||
|
||||
# convert to cartesian
|
||||
coordinates = self.to_numpy()
|
||||
coordinates[:, 0], coordinates[:, 1] = origin.projection(coordinates[:, 0], coordinates[:, 1])
|
||||
|
||||
return CartesianPolygon.from_numpy(
|
||||
coordinates,
|
||||
location_type=self.location_type,
|
||||
name=self.name,
|
||||
identifier=self.identifier,
|
||||
origin=origin,
|
||||
)
|
||||
|
||||
def distance_to_vertices(self, location: PolarLocation, approximate: bool = False) -> float:
|
||||
"""Computes the distance of the given location to the nearest vertex of this polygon.
|
||||
|
||||
Args:
|
||||
location: The location to compute the distance from
|
||||
approximate: Whether to use a less precise, faster method or not
|
||||
"""
|
||||
|
||||
return min([location.distance(loc, approximate) for loc in self.locations])
|
||||
|
||||
@property
|
||||
def area(self) -> float:
|
||||
"""Returns the area of the polygon in :math:`meters^2`.
|
||||
|
||||
Only simple polygons are supported, i.e. not self-intersecting ones.
|
||||
See :meth:`pyproj.Geod.polygon_area_perimeter` for the implementation.
|
||||
The returned value is always non-negative.
|
||||
"""
|
||||
|
||||
_, area = WGS84_PYPROJ_GEOD.polygon_area_perimeter(
|
||||
lons=[location.longitude for location in self.locations],
|
||||
lats=[location.latitude for location in self.locations],
|
||||
radians=False,
|
||||
)
|
||||
# pyproj is not typed as of now
|
||||
# the returned area is signed with the ordering of the points
|
||||
return abs(area)
|
||||
|
||||
@property
|
||||
def is_valid(self) -> bool:
|
||||
"""Whether this geometry is valid according to :mod:`shapely`. Quite expensive, not cached.
|
||||
|
||||
Invalid ones might cross themselves or have zero area. Other tools might still refuse it, like *GEOS*.
|
||||
"""
|
||||
|
||||
return cast(bool, self.to_cartesian(self.mean).is_valid)
|
||||
|
||||
def simplify(self, tolerance: float, preserve_topology: bool = True) -> "PolarPolygon":
|
||||
"""Creates a simplified copy analogous to :meth:`shapely.geometry.Polygon.simplify`.
|
||||
|
||||
The simplification is achieved by reducing its number of vertices in a way that least deforms the
|
||||
shape.
|
||||
|
||||
Args:
|
||||
tolerance: This is passed to :meth:`shapely.geometry.Polygon.simplify`:
|
||||
"All points in the simplified object will be within the tolerance distance of the
|
||||
original geometry."
|
||||
preserve_topology: This is passed to :meth:`shapely.geometry.Polygon.simplify`:
|
||||
"By default a slower algorithm is used that preserves topology."
|
||||
|
||||
Returns:
|
||||
A simplified version of the polygon with the same other attributes
|
||||
"""
|
||||
|
||||
projection_center = self.mean
|
||||
cartesian = self.to_cartesian(projection_center)
|
||||
|
||||
simplified = cartesian.simplify(tolerance, preserve_topology)
|
||||
coords = array(simplified.exterior.xy).T # this is the fastest known method
|
||||
|
||||
result_cartesian = CartesianPolygon.from_numpy(
|
||||
coords, location_type=self.location_type, name=self.name, identifier=self.identifier
|
||||
)
|
||||
return result_cartesian.to_polar(projection_center)
|
||||
|
||||
def translate(self, direction: float, distance: float) -> Tuple["PolarPolygon", ndarray]:
|
||||
"""Translates this location and returns the new polygon and back-azimuth.
|
||||
|
||||
See :func:`pyrate.plan.geometry.helpers.translate_floats` for details.
|
||||
"""
|
||||
|
||||
new_coordinates, back_azimuth_array = translate_numpy(self.to_numpy(), direction, distance)
|
||||
|
||||
new_polygon = PolarPolygon.from_numpy(
|
||||
new_coordinates,
|
||||
location_type=self.location_type,
|
||||
name=self.name,
|
||||
identifier=self.identifier,
|
||||
)
|
||||
|
||||
return new_polygon, back_azimuth_array
|
||||
|
||||
def to_numpy(self) -> ndarray:
|
||||
"""Converts the coordinates defining this polygon into a :class:`numpy.ndarray`.
|
||||
|
||||
Returns:
|
||||
An array with shape ``(number of locations, 2)``, where each location is represented by a pair of
|
||||
``(longitude, latitude)``, each in degrees.
|
||||
|
||||
See Also:
|
||||
:meth:`~from_numpy`
|
||||
"""
|
||||
|
||||
return array(
|
||||
[(location.longitude, location.latitude) for location in self.locations],
|
||||
dtype="float64",
|
||||
order="C",
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def from_numpy(cls, data: ndarray, *args, **kwargs) -> "PolarPolygon":
|
||||
"""Create a polar polygon from a numpy representation.
|
||||
|
||||
Args:
|
||||
data: An array with shape ``(number of locations, 2)``, where each location is represented by a
|
||||
pair of ``(longitude, latitude)``, each in degrees.
|
||||
args: positional arguments to be passed to :class:`~PolarPolygon`
|
||||
kwargs: keyword arguments to be passed to :class:`~PolarPolygon`
|
||||
|
||||
Returns:
|
||||
The polar polygon created from the given coordinates and other parameters
|
||||
|
||||
Raises:
|
||||
AssertionError: If the shape of ``data`` is invalid or contains non-finite values
|
||||
|
||||
See Also:
|
||||
:meth:`~to_numpy`
|
||||
"""
|
||||
|
||||
assert len(data.shape) == 2
|
||||
assert data.shape[1] == 2
|
||||
assert isfinite(data).all(), "Invalid values in CartesianPolygon.from_numpy()"
|
||||
|
||||
return cls([PolarLocation(latitude=lat, longitude=lon) for (lon, lat) in data], *args, **kwargs)
|
||||
|
||||
@property
|
||||
def mean(self) -> PolarLocation:
|
||||
"""Computes a reasonable mean location of the polygon, if possible. The result is cached.
|
||||
|
||||
Raises:
|
||||
ValueError: If no meaningful mean (of the longitudes) can be computed.
|
||||
See :func:`pyrate.plan.geometry.helpers.mean_angle`.
|
||||
"""
|
||||
|
||||
if self._mean_location is None:
|
||||
coordinates = self.to_numpy()
|
||||
latitude, longitude = mean_coordinate(latitudes=coordinates[:, 1], longitudes=coordinates[:, 0])
|
||||
name = f"{self.name} - mean" if self.name else "mean"
|
||||
self._mean_location = PolarLocation(latitude, longitude, name=name)
|
||||
|
||||
return self._mean_location
|
||||
|
||||
@property
|
||||
def __geo_interface__(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"type": "Polygon",
|
||||
"coordinates": [
|
||||
# the inner array is only the exterior ring,
|
||||
# and we don't have an interior one
|
||||
[(location.longitude, location.latitude) for location in self.locations]
|
||||
],
|
||||
}
|
||||
|
||||
def __eq__(self, other: Any) -> bool:
|
||||
return self.equals_exact(other, tolerance=0.0)
|
||||
|
||||
def equals(self, other: Any) -> bool: # pylint: disable=missing-function-docstring
|
||||
return (
|
||||
isinstance(other, PolarPolygon)
|
||||
and self.to_cartesian(self.mean).equals(other.to_cartesian(self.mean))
|
||||
and Geospatial.__eq__(self, other)
|
||||
)
|
||||
|
||||
equals.__doc__ = PolarLocation.equals.__doc__
|
||||
|
||||
def equals_exact(self, other: Any, tolerance: float) -> bool:
|
||||
# pylint: disable=missing-function-docstring
|
||||
|
||||
return (
|
||||
isinstance(other, PolarPolygon)
|
||||
and self.to_cartesian(self.mean).equals_exact(other.to_cartesian(self.mean), tolerance)
|
||||
and Geospatial.__eq__(self, other)
|
||||
)
|
||||
|
||||
equals_exact.__doc__ = PolarLocation.equals_exact.__doc__
|
||||
|
||||
def equals_almost_congruent(
|
||||
self, other: Any, rel_tolerance: float = 1e-6, abs_tolerance: float = 1e-6
|
||||
) -> bool:
|
||||
"""Returns whether two objects are approximately congruent and their attributes equal exactly.
|
||||
|
||||
See :meth:`~almost_congruent` for details on the specific definition of congruence and the tolerances.
|
||||
|
||||
Args:
|
||||
other: The object to compare with
|
||||
rel_tolerance: The relative tolerance (relative to the larger area)
|
||||
abs_tolerance: The absolute area of tolerance in square meters
|
||||
|
||||
Returns:
|
||||
Whether this and the ``other`` polygon are approximately congruent and all attributes are equal.
|
||||
Returns ``False`` if ``other`` is not a :class:`~PolarPolygon`.
|
||||
"""
|
||||
|
||||
return (
|
||||
isinstance(other, PolarPolygon)
|
||||
and self.almost_congruent(other, rel_tolerance=rel_tolerance, abs_tolerance=abs_tolerance)
|
||||
and Geospatial.__eq__(self, other)
|
||||
)
|
||||
|
||||
def almost_congruent(
|
||||
self, other: "PolarPolygon", rel_tolerance: float = 1e-6, abs_tolerance: float = 1e-6
|
||||
) -> bool:
|
||||
"""Returns whether two polygons are approximately congruent while allowing for small differences.
|
||||
|
||||
This function is not directly part of shapely and is somewhat costly to compute. It has to:
|
||||
- Project both polygons to cartesian coordinates (to continue with shapely calculations).
|
||||
- Calculate the area of the symmetric difference between this and the other polygon.
|
||||
- Calculate the area of both individual polygons.
|
||||
|
||||
The arguments follow the style of :func:`math.isclose`.
|
||||
|
||||
Args:
|
||||
other: The polygon to compare with
|
||||
rel_tolerance: The relative tolerance (relative to the larger area)
|
||||
abs_tolerance: The absolute area of tolerance in square meters
|
||||
|
||||
Returns:
|
||||
Whether this and the other polygon are approximately congruent. The larger one of the relative
|
||||
and absolute tolerance is used.
|
||||
"""
|
||||
|
||||
return self.to_cartesian(self.mean).almost_congruent(
|
||||
other.to_cartesian(self.mean), rel_tolerance=rel_tolerance, abs_tolerance=abs_tolerance
|
||||
)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
locations = ", ".join(str(loc) for loc in self.locations)
|
||||
|
||||
return f"PolarPolygon(locations=[{locations}]{self._repr_extras})"
|
||||
|
||||
|
||||
class CartesianPolygon(Geospatial, Polygon):
|
||||
"""A cartesian polygon based on local coordinates with an optional global reference.
|
||||
|
||||
Note:
|
||||
For the sake of simplicity and performance, this class does not store the given
|
||||
:class:`~pyrate.plan.geometry.location.CartesianLocation` instances directly,
|
||||
but only their coordinates.
|
||||
Thus, when reading back attributes like ``origin``, ``name``, etc. of the locations they are derived
|
||||
from the polygon instance and not from the individual locations.
|
||||
|
||||
Args:
|
||||
locations: The list of locations that this shape consists of; see :attr:`~.locations`
|
||||
location_type: The type of this polygon
|
||||
name: The name of this polygon
|
||||
identifier: The polygon's optional unique identifier, in :math:`[0, 2**63)`, i.e. 64 signed bits
|
||||
origin: A reference that can be used to project this cartesian representation (back)
|
||||
into a polar one
|
||||
"""
|
||||
|
||||
def __init__( # pylint: disable=too-many-arguments
|
||||
self,
|
||||
locations: List[CartesianLocation],
|
||||
location_type: LocationType = LocationType.UNKNOWN,
|
||||
name: Optional[str] = None,
|
||||
identifier: Optional[int] = None,
|
||||
origin: Optional[PolarLocation] = None,
|
||||
) -> None:
|
||||
self.origin = origin
|
||||
|
||||
if isinstance(locations, list):
|
||||
Polygon.__init__(self, [location.coords[0] for location in locations])
|
||||
else:
|
||||
# this is required for an efficient implementation of CartesianPolygon.from_numpy
|
||||
# we do not add this possibility to the type signature to make people use from_numpy().
|
||||
Polygon.__init__(self, locations)
|
||||
|
||||
Geospatial.__init__(self, location_type=location_type, name=name, identifier=identifier)
|
||||
|
||||
@property
|
||||
def locations(self) -> List[CartesianLocation]:
|
||||
"""Get the locations of this polygon. See the class description for caveats."""
|
||||
|
||||
return [
|
||||
CartesianLocation(
|
||||
x,
|
||||
y,
|
||||
origin=self.origin,
|
||||
location_type=self.location_type,
|
||||
name=self.name,
|
||||
identifier=self.identifier,
|
||||
)
|
||||
for (x, y) in self.exterior.coords
|
||||
]
|
||||
|
||||
def to_polar(self, origin: Optional[PolarLocation] = None) -> PolarPolygon:
|
||||
"""Computes the polar representation of this shape.
|
||||
|
||||
Args:
|
||||
origin: The global reference to be used for back-projection, must be set if and only if
|
||||
:attr:`~pyrate.plan.geometry.CartesianPolygon.origin` is ``None``
|
||||
|
||||
Returns:
|
||||
The global, polar representation of this geometry
|
||||
"""
|
||||
|
||||
if origin is None:
|
||||
if self.origin is None:
|
||||
raise ValueError("need to give an explicit origin when the instance does not have one")
|
||||
origin = self.origin
|
||||
elif self.origin is not None:
|
||||
raise ValueError("provided an explicit origin while the instance already has one")
|
||||
|
||||
# convert to cartesian
|
||||
coordinates = self.to_numpy()
|
||||
coordinates[:, 0], coordinates[:, 1] = origin.projection(
|
||||
coordinates[:, 0], coordinates[:, 1], inverse=True
|
||||
)
|
||||
|
||||
return PolarPolygon.from_numpy(
|
||||
coordinates, location_type=self.location_type, name=self.name, identifier=self.identifier
|
||||
)
|
||||
|
||||
def to_numpy(self) -> ndarray:
|
||||
"""Converts the coordinates defining this polygon into a :class:`numpy.ndarray`.
|
||||
|
||||
Returns:
|
||||
An array with shape ``(number of locations, 2)``, where each location is represented by a pair of
|
||||
``(longitude, latitude)``, each in degrees.
|
||||
|
||||
See Also:
|
||||
:meth:`~from_numpy`
|
||||
"""
|
||||
|
||||
return array(self.exterior.coords, dtype="float64", order="C")
|
||||
|
||||
@classmethod
|
||||
def from_numpy(cls, data: ndarray, *args, **kwargs) -> "CartesianPolygon":
|
||||
"""Create a cartesian polygon from a numpy representation.
|
||||
|
||||
Args:
|
||||
data: An array with shape ``(number of locations, 2)``, where each location is represented by a
|
||||
pair of ``(longitude, latitude)``, each in degrees.
|
||||
*args: Positional arguments to be passed to :class:`~CartesianPolygon`
|
||||
**kwargs: Keyword arguments to be passed to :class:`~CartesianPolygon`
|
||||
|
||||
Returns:
|
||||
The polar polygon created from the given coordinates and other parameters
|
||||
|
||||
Raises:
|
||||
AssertionError: If the shape of ``data`` is invalid or contains non-finite values
|
||||
|
||||
See Also:
|
||||
:meth:`~to_numpy`
|
||||
"""
|
||||
|
||||
assert len(data.shape) == 2
|
||||
assert data.shape[1] == 2
|
||||
assert isfinite(data).all(), "Invalid values in PolarPolygon.from_numpy()"
|
||||
|
||||
return cls(data, *args, **kwargs) # type: ignore
|
||||
|
||||
@classmethod
|
||||
def from_shapely(cls, polygon: Polygon, *args, **kwargs) -> "CartesianPolygon":
|
||||
"""Create a cartesian polygon from a shapely polygon.
|
||||
|
||||
Args:
|
||||
polygon: A shapely polygon
|
||||
*args: Positional arguments to be passed to :class:`~CartesianPolygon`
|
||||
**kwargs: Keyword arguments to be passed to :class:`~CartesianPolygon`
|
||||
|
||||
Returns:
|
||||
The cartesian polygon created from the given geometry and other parameters
|
||||
"""
|
||||
return cls.from_numpy(array(polygon.exterior.xy).T, *args, **kwargs)
|
||||
|
||||
def translate(self, direction: float, distance: float) -> Tuple["CartesianPolygon", ndarray]:
|
||||
"""Translates this polygon.
|
||||
|
||||
Args:
|
||||
direction: The direction angle in degrees (``0`` is north, clockwise)
|
||||
distance: The distance to translate bin meters
|
||||
|
||||
Returns:
|
||||
The translated polygon and the translation vector ``(x_offset, y_offset)`` in meters
|
||||
that can be used to reconstruct the original polygon
|
||||
"""
|
||||
|
||||
x_offset = sin(radians(direction)) * distance
|
||||
y_offset = cos(radians(direction)) * distance
|
||||
|
||||
return (
|
||||
CartesianPolygon.from_shapely(
|
||||
translate(Polygon(self.to_numpy()), xoff=x_offset, yoff=y_offset),
|
||||
location_type=self.location_type,
|
||||
name=self.name,
|
||||
identifier=self.identifier,
|
||||
origin=self.origin,
|
||||
),
|
||||
array([-x_offset, -y_offset]),
|
||||
)
|
||||
|
||||
@property
|
||||
def __geo_interface__(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"type": "Polygon",
|
||||
"coordinates": [
|
||||
# the inner array is only the exterior ring,
|
||||
# and we don't have an interior one
|
||||
list(self.exterior.coords),
|
||||
],
|
||||
}
|
||||
|
||||
def __copy__(self) -> "CartesianPolygon":
|
||||
return CartesianPolygon(
|
||||
locations=self.locations,
|
||||
location_type=self.location_type,
|
||||
name=self.name,
|
||||
identifier=self.identifier,
|
||||
origin=self.origin,
|
||||
)
|
||||
|
||||
def __deepcopy__(self, memodict: Dict) -> "CartesianPolygon":
|
||||
return CartesianPolygon(
|
||||
locations=deepcopy(self.locations, memodict),
|
||||
location_type=deepcopy(self.location_type, memodict),
|
||||
name=deepcopy(self.name, memodict),
|
||||
identifier=deepcopy(self.identifier, memodict),
|
||||
origin=deepcopy(self.origin, memodict),
|
||||
)
|
||||
|
||||
def __eq__(self, other: Any) -> bool:
|
||||
return self.equals_exact(other, tolerance=0.0)
|
||||
|
||||
# Inherits the docstring
|
||||
def equals(self, other: Any) -> bool: # pylint: disable=missing-function-docstring
|
||||
return (
|
||||
isinstance(other, CartesianPolygon)
|
||||
and Polygon.equals(self, other)
|
||||
and Geospatial.__eq__(self, other)
|
||||
and self.origin == other.origin
|
||||
)
|
||||
|
||||
# Inherits the docstring
|
||||
def equals_exact( # pylint: disable=missing-function-docstring
|
||||
self, other: Any, tolerance: float
|
||||
) -> bool:
|
||||
return (
|
||||
isinstance(other, CartesianPolygon)
|
||||
and Polygon.equals_exact(self, other, tolerance)
|
||||
and Geospatial.__eq__(self, other)
|
||||
and self.origin == other.origin
|
||||
)
|
||||
|
||||
def equals_almost_congruent(
|
||||
self, other: Any, rel_tolerance: float = 1e-6, abs_tolerance: float = 1e-6
|
||||
) -> bool:
|
||||
"""Returns whether two objects are approximately congruent and their attributes equal exactly.
|
||||
|
||||
See :meth:`~almost_congruent` for details on the specific definition of congruence and the tolerances.
|
||||
|
||||
Args:
|
||||
other: The object to compare with
|
||||
rel_tolerance: The relative tolerance (relative to the larger area)
|
||||
abs_tolerance: The absolute area of tolerance in square meters
|
||||
|
||||
Returns:
|
||||
Whether this and the ``other`` polygon are approximately congruent and all attributes are equal.
|
||||
Returns ``False`` if ``other`` is not a :class:`~CartesianPolygon`.
|
||||
"""
|
||||
|
||||
return (
|
||||
isinstance(other, CartesianPolygon)
|
||||
and self.almost_congruent(other, rel_tolerance=rel_tolerance, abs_tolerance=abs_tolerance)
|
||||
and Geospatial.__eq__(self, other)
|
||||
and self.origin == other.origin
|
||||
)
|
||||
|
||||
def almost_congruent(
|
||||
self, other: "CartesianPolygon", rel_tolerance: float = 1e-6, abs_tolerance: float = 1e-6
|
||||
) -> bool:
|
||||
"""Returns whether two polygons are approximately congruent while allowing for small differences.
|
||||
|
||||
This function is not directly part of shapely and is somewhat costly to compute. It has to:
|
||||
- Calculate the area of the symmetric difference between this and the ``other`` polygon.
|
||||
- Calculate the area of both individual polygons.
|
||||
|
||||
The arguments follow the style of :func:`math.isclose`.
|
||||
|
||||
Args:
|
||||
other: The polygon to compare with
|
||||
rel_tolerance: The relative tolerance (relative to the larger area)
|
||||
abs_tolerance: The absolute area of tolerance in square meters
|
||||
|
||||
Returns:
|
||||
Whether this and the ``other`` polygon are approximately congruent. The larger one of the relative
|
||||
and absolute tolerance is used.
|
||||
"""
|
||||
|
||||
rel_tolerance_as_abs: float = max(self.area, other.area) * rel_tolerance
|
||||
tolerance: float = max(rel_tolerance_as_abs, abs_tolerance)
|
||||
difference: float = self.symmetric_difference(other).area
|
||||
return difference <= tolerance
|
||||
|
||||
def __repr__(self) -> str:
|
||||
origin = f", origin={self.origin}" if self.origin is not None else ""
|
||||
locations = ", ".join(f"({x}, {y})" for x, y in self.exterior.coords)
|
||||
|
||||
return f"CartesianPolygon(locations=[{locations}]{origin}{self._repr_extras})"
|
||||
|
||||
def __str__(self) -> str:
|
||||
# this is required to override shapely.geometry.Polygon.__str__()
|
||||
return self.__repr__()
|
466
pyrate/pyrate/plan/geometry/route.py
Normal file
466
pyrate/pyrate/plan/geometry/route.py
Normal file
@ -0,0 +1,466 @@
|
||||
"""This module implements abstractions for geospatial routes (line strings) in WGS84 and local coordinate
|
||||
frames.
|
||||
|
||||
Two routes are ``==`` if and only if they are equal according to ``equals_exact()``.
|
||||
"""
|
||||
|
||||
# Python standard library
|
||||
from copy import deepcopy
|
||||
from math import cos
|
||||
from math import radians
|
||||
from math import sin
|
||||
|
||||
# Typing
|
||||
from typing import Any
|
||||
from typing import Dict
|
||||
from typing import List
|
||||
from typing import Optional
|
||||
from typing import Tuple
|
||||
|
||||
# Data modelling
|
||||
from numpy import array
|
||||
from numpy import isfinite
|
||||
from numpy import ndarray
|
||||
from shapely.affinity import translate
|
||||
from shapely.geometry import LineString
|
||||
|
||||
# Geospatial basis
|
||||
from .geospatial import Geospatial
|
||||
from .geospatial import LocationType
|
||||
|
||||
# Geospatial helpers
|
||||
from .helpers import mean_coordinate
|
||||
from .helpers import translate_numpy
|
||||
|
||||
# Location representation
|
||||
from .location import CartesianLocation
|
||||
from .location import PolarLocation
|
||||
|
||||
|
||||
class PolarRoute(Geospatial):
|
||||
|
||||
"""A route (line string) based on WGS84 coordinates.
|
||||
|
||||
Note:
|
||||
This class does not yet support simplification as it was not required so far.
|
||||
|
||||
Args:
|
||||
locations: The two or more points that make up this route; see :attr:`~.locations`
|
||||
location_type: The type of this polygon
|
||||
name: An optional name of this polygon
|
||||
identifier: The route's optional unique identifier, in :math:`[0, 2**63)`, i.e. 64 signed bits
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
locations: List[PolarLocation],
|
||||
location_type: LocationType = LocationType.UNKNOWN,
|
||||
name: Optional[str] = None,
|
||||
identifier: Optional[int] = None,
|
||||
) -> None:
|
||||
# Type hints
|
||||
self._locations: List[PolarLocation]
|
||||
|
||||
# Attributes setup
|
||||
self.locations = locations
|
||||
self._mean_location: Optional[PolarLocation] = None
|
||||
|
||||
super().__init__(location_type=location_type, name=name, identifier=identifier)
|
||||
|
||||
# See Shapely issue
|
||||
if self.length(approximate=True) < 1e-9:
|
||||
raise ValueError(f"(Nearly) zero-length line strings are not allowed by Shapely; got {locations}")
|
||||
|
||||
@property
|
||||
def locations(self) -> List[PolarLocation]:
|
||||
"""The points that make up this route.
|
||||
|
||||
Getter:
|
||||
At least two points are returned.
|
||||
|
||||
Setter:
|
||||
Raises an :class:`AssertionError` if less than two points are given.
|
||||
"""
|
||||
|
||||
return self._locations
|
||||
|
||||
@locations.setter
|
||||
def locations(self, locations: List[PolarLocation]) -> None:
|
||||
assert len(locations) >= 2, "a route must contain at least two points"
|
||||
self._locations = locations
|
||||
self._mean_location = None
|
||||
|
||||
def distance_to_vertices(self, location: PolarLocation, approximate: bool = False) -> float:
|
||||
"""Computes the distance of the given ``location`` to the nearest vertex of this route.
|
||||
|
||||
Args:
|
||||
location: The location to compute the distance from
|
||||
approximate: Whether to use a less precise, faster method or not
|
||||
"""
|
||||
|
||||
return min([location.distance(loc, approximate) for loc in self.locations])
|
||||
|
||||
def length(self, approximate: bool = False) -> float:
|
||||
"""Compute the length of this route from start to end.
|
||||
|
||||
Args:
|
||||
approximate: Whether to use a less precise, faster method or not
|
||||
"""
|
||||
|
||||
return sum([a.distance(b, approximate) for (a, b) in zip(self.locations[:-1], self.locations[1:])])
|
||||
|
||||
def to_cartesian(self, origin: PolarLocation) -> "CartesianRoute":
|
||||
"""Projects this route to a cartesian representation according to the given global reference.
|
||||
|
||||
Args:
|
||||
origin: The reference by which to project onto the local tangent plane
|
||||
|
||||
Returns:
|
||||
The cartesian representation of this route with the given reference point being set
|
||||
"""
|
||||
|
||||
# convert to cartesian
|
||||
coordinates = self.to_numpy()
|
||||
coordinates[:, 0], coordinates[:, 1] = origin.projection(coordinates[:, 0], coordinates[:, 1])
|
||||
|
||||
return CartesianRoute.from_numpy(
|
||||
coordinates,
|
||||
origin=origin,
|
||||
location_type=self.location_type,
|
||||
name=self.name,
|
||||
identifier=self.identifier,
|
||||
)
|
||||
|
||||
def to_numpy(self) -> ndarray:
|
||||
"""Converts the coordinates defining this route into a :class:`numpy.ndarray`.
|
||||
|
||||
Returns:
|
||||
An array with shape ``(number of locations, 2)``, where each location is represented by a pair of
|
||||
``(longitude, latitude)``, each in degrees.
|
||||
|
||||
See Also:
|
||||
:meth:`~from_numpy`
|
||||
"""
|
||||
|
||||
return array(
|
||||
[(location.longitude, location.latitude) for location in self.locations],
|
||||
dtype="float64",
|
||||
order="C",
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def from_numpy(cls, data: ndarray, *args, **kwargs) -> "PolarRoute":
|
||||
"""Create a polar route from a numpy representation.
|
||||
|
||||
Args:
|
||||
data: An array with shape ``(number of locations, 2)``, where each location is represented by a
|
||||
pair of ``(longitude, latitude)``, each in degrees.
|
||||
*args: Positional arguments to be passed to :class:`~PolarRoute`
|
||||
**kwargs: Keyword arguments to be passed to :class:`~PolarRoute`
|
||||
|
||||
Returns:
|
||||
The polar route created from the given coordinates and other parameters
|
||||
|
||||
Raises:
|
||||
AssertionError: If the shape of ``data`` is invalid or contains non-finite values
|
||||
|
||||
See Also:
|
||||
:meth:`~to_numpy`
|
||||
"""
|
||||
|
||||
assert len(data.shape) == 2
|
||||
assert data.shape[1] == 2
|
||||
assert isfinite(data).all(), "Invalid values in CartesianRoute.from_numpy()"
|
||||
|
||||
return cls([PolarLocation(latitude=lat, longitude=lon) for (lon, lat) in data], *args, **kwargs)
|
||||
|
||||
def translate(self, direction: float, distance: float) -> Tuple["PolarRoute", ndarray]:
|
||||
"""Translates this location and returns the new route and back-azimuth.
|
||||
|
||||
See :func:`pyrate.plan.geometry.helpers.translate_floats` for details.
|
||||
"""
|
||||
|
||||
new_coordinates, back_azimuth_array = translate_numpy(self.to_numpy(), direction, distance)
|
||||
|
||||
new_route = PolarRoute.from_numpy(
|
||||
new_coordinates,
|
||||
location_type=self.location_type,
|
||||
name=self.name,
|
||||
identifier=self.identifier,
|
||||
)
|
||||
|
||||
return new_route, back_azimuth_array
|
||||
|
||||
@property
|
||||
def mean(self) -> PolarLocation:
|
||||
"""Computes a reasonable mean location of the route, if possible. The result is cached.
|
||||
|
||||
Raises:
|
||||
ValueError: If no meaningful mean (of the longitudes) can be computed.
|
||||
See :func:`pyrate.plan.geometry.helpers.mean_angle`.
|
||||
"""
|
||||
|
||||
if self._mean_location is None:
|
||||
coordinates = self.to_numpy()
|
||||
latitude, longitude = mean_coordinate(latitudes=coordinates[:, 1], longitudes=coordinates[:, 0])
|
||||
name = f"{self.name} - mean" if self.name else "mean"
|
||||
self._mean_location = PolarLocation(latitude, longitude, name=name)
|
||||
|
||||
return self._mean_location
|
||||
|
||||
@property
|
||||
def __geo_interface__(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"type": "LineString",
|
||||
"coordinates": [(location.longitude, location.latitude) for location in self.locations],
|
||||
}
|
||||
|
||||
def __eq__(self, other: Any) -> bool:
|
||||
return self.equals_exact(other, tolerance=0.0)
|
||||
|
||||
def equals(self, other: Any) -> bool: # pylint: disable=missing-function-docstring
|
||||
return (
|
||||
isinstance(other, PolarRoute)
|
||||
and self.to_cartesian(self.mean).equals(other.to_cartesian(self.mean))
|
||||
and Geospatial.__eq__(self, other)
|
||||
)
|
||||
|
||||
equals.__doc__ = PolarLocation.equals.__doc__
|
||||
|
||||
def equals_exact(self, other: Any, tolerance: float) -> bool:
|
||||
# pylint: disable=missing-function-docstring
|
||||
|
||||
return (
|
||||
isinstance(other, PolarRoute)
|
||||
and self.to_cartesian(self.mean).equals_exact(other.to_cartesian(self.mean), tolerance)
|
||||
and Geospatial.__eq__(self, other)
|
||||
)
|
||||
|
||||
equals_exact.__doc__ = PolarLocation.equals_exact.__doc__
|
||||
|
||||
def __repr__(self) -> str:
|
||||
locations = ", ".join(str(loc) for loc in self.locations)
|
||||
return f"PolarRoute(locations=[{locations}]{self._repr_extras})"
|
||||
|
||||
|
||||
class CartesianRoute(Geospatial, LineString):
|
||||
|
||||
"""A cartesian route (line string) in local coordinates, optionally with a global reference point.
|
||||
|
||||
Note:
|
||||
For the sake of simplicity and performance, this class does not store the given
|
||||
:class:`~pyrate.plan.geometry.location.CartesianLocation` instances directly,
|
||||
but only their coordinates.
|
||||
Thus, when reading back attributes like ``origin``, ``name``, etc. of the locations they are derived
|
||||
from the route instance and not from the individual locations.
|
||||
|
||||
Args:
|
||||
locations: The list of two or more locations that this shape consists of; see :attr:`~locations`
|
||||
location_type: The type of this route
|
||||
name: The name of this route
|
||||
identifier: The route's optional unique identifier, in :math:`[0, 2**63)`, i.e. 64 signed bits
|
||||
origin: A reference that can be used to project this cartesian representation (back)
|
||||
into a polar one
|
||||
"""
|
||||
|
||||
def __init__( # pylint: disable=too-many-arguments
|
||||
self,
|
||||
locations: List[CartesianLocation],
|
||||
location_type: LocationType = LocationType.UNKNOWN,
|
||||
name: Optional[str] = None,
|
||||
identifier: Optional[int] = None,
|
||||
origin: Optional[PolarLocation] = None,
|
||||
) -> None:
|
||||
# Store attributes
|
||||
self.origin = origin
|
||||
|
||||
if isinstance(locations, list):
|
||||
LineString.__init__(self, [location.coords[0] for location in locations])
|
||||
else:
|
||||
# this is required for an efficient implementation of CartesianRoute.from_numpy
|
||||
# we do not add this possibility to the type signature to make people use from_numpy().
|
||||
LineString.__init__(self, locations)
|
||||
|
||||
Geospatial.__init__(self, location_type=location_type, name=name, identifier=identifier)
|
||||
|
||||
# See Shapely issue
|
||||
if self.length < 1e-9:
|
||||
raise ValueError(f"(Nearly) zero-length line strings are not allowed by Shapely; got {locations}")
|
||||
|
||||
@property
|
||||
def locations(self) -> List[CartesianLocation]:
|
||||
"""Get the locations of this route. See the class description for caveats."""
|
||||
|
||||
return [
|
||||
CartesianLocation(
|
||||
x,
|
||||
y,
|
||||
origin=self.origin,
|
||||
location_type=self.location_type,
|
||||
name=self.name,
|
||||
identifier=self.identifier,
|
||||
)
|
||||
for (x, y) in self.coords
|
||||
]
|
||||
|
||||
def to_polar(self, origin: Optional[PolarLocation] = None) -> PolarRoute:
|
||||
"""Computes the polar representation of this route.
|
||||
|
||||
Args:
|
||||
origin: The global reference to be used for back-projection, must be set if and only if
|
||||
:attr:`~origin` is ``None``
|
||||
|
||||
Returns:
|
||||
The global, polar representation of this route
|
||||
"""
|
||||
|
||||
if origin is None:
|
||||
if self.origin is None:
|
||||
raise ValueError("need to give an explicit origin when the instance does not have one")
|
||||
origin = self.origin
|
||||
elif self.origin is not None:
|
||||
raise ValueError("provided an explicit origin while the instance already has one")
|
||||
|
||||
# convert to cartesian
|
||||
coordinates = self.to_numpy()
|
||||
coordinates[:, 0], coordinates[:, 1] = origin.projection(
|
||||
coordinates[:, 0], coordinates[:, 1], inverse=True
|
||||
)
|
||||
|
||||
return PolarRoute.from_numpy(
|
||||
coordinates, location_type=self.location_type, name=self.name, identifier=self.identifier
|
||||
)
|
||||
|
||||
def to_numpy(self) -> ndarray:
|
||||
"""Converts the coordinates defining this route into a :class:`numpy.ndarray`.
|
||||
|
||||
Returns:
|
||||
An array with shape ``(number of locations, 2)``, where each location is represented by a pair of
|
||||
``(longitude, latitude)``, each in degrees.
|
||||
|
||||
See Also:
|
||||
:meth:`~from_numpy`
|
||||
"""
|
||||
|
||||
return array(self.coords, dtype="float64", order="C")
|
||||
|
||||
@classmethod
|
||||
def from_numpy(cls, data: ndarray, *args, **kwargs) -> "CartesianRoute":
|
||||
"""Create a cartesian route from a numpy representation.
|
||||
|
||||
Args:
|
||||
data: An array with shape ``(number of locations, 2)``, where each location is represented by a
|
||||
pair of ``(longitude, latitude)``, each in degrees.
|
||||
*args: positional arguments to be passed to :class:`~CartesianRoute`
|
||||
**kwargs: keyword arguments to be passed to :class:`~CartesianRoute`
|
||||
|
||||
Returns:
|
||||
The cartesian route created from the given coordinates and other parameters
|
||||
|
||||
Raises:
|
||||
AssertionError: If the shape of ``data`` is invalid or contains non-finite values
|
||||
|
||||
See Also:
|
||||
:meth:`~to_numpy`
|
||||
"""
|
||||
|
||||
assert len(data.shape) == 2
|
||||
assert data.shape[1] == 2
|
||||
assert isfinite(data).all(), "Invalid values in PolarRoute.from_numpy()"
|
||||
|
||||
return cls(data, *args, **kwargs) # type: ignore
|
||||
|
||||
@classmethod
|
||||
def from_shapely(cls, line_string: LineString, *args, **kwargs) -> "CartesianRoute":
|
||||
"""Create a cartesian route from a shapely line string.
|
||||
|
||||
Args:
|
||||
line_string: A shapely line_string
|
||||
*args: Positional arguments to be passed to :class:`~CartesianRoute`
|
||||
**kwargs: Keyword arguments to be passed to :class:`~CartesianRoute`
|
||||
|
||||
Returns:
|
||||
The cartesian route created from the given geometry and other parameters
|
||||
"""
|
||||
return cls.from_numpy(array(line_string.xy).T, *args, **kwargs)
|
||||
|
||||
def translate(self, direction: float, distance: float) -> Tuple["CartesianRoute", ndarray]:
|
||||
"""Translates this route.
|
||||
|
||||
Args:
|
||||
direction: The direction angle in degrees (``0`` is north, clockwise)
|
||||
distance: The distance to translate bin meters
|
||||
|
||||
Returns:
|
||||
The translated route and the translation vector ``(x_offset, y_offset)`` in meters
|
||||
that can be used to reconstruct the original route
|
||||
"""
|
||||
|
||||
x_offset = sin(radians(direction)) * distance
|
||||
y_offset = cos(radians(direction)) * distance
|
||||
|
||||
return (
|
||||
CartesianRoute.from_shapely(
|
||||
translate(LineString(self.to_numpy()), xoff=x_offset, yoff=y_offset),
|
||||
location_type=self.location_type,
|
||||
name=self.name,
|
||||
identifier=self.identifier,
|
||||
origin=self.origin,
|
||||
),
|
||||
array([-x_offset, -y_offset]),
|
||||
)
|
||||
|
||||
@property
|
||||
def __geo_interface__(self) -> Dict[str, Any]:
|
||||
return {"type": "LineString", "coordinates": self.coords}
|
||||
|
||||
def __copy__(self) -> "CartesianRoute":
|
||||
return CartesianRoute(
|
||||
locations=self.locations,
|
||||
location_type=self.location_type,
|
||||
name=self.name,
|
||||
identifier=self.identifier,
|
||||
origin=self.origin,
|
||||
)
|
||||
|
||||
def __deepcopy__(self, memodict: Dict) -> "CartesianRoute":
|
||||
return CartesianRoute(
|
||||
locations=deepcopy(self.locations, memodict),
|
||||
location_type=deepcopy(self.location_type, memodict),
|
||||
name=deepcopy(self.name, memodict),
|
||||
identifier=deepcopy(self.identifier, memodict),
|
||||
origin=deepcopy(self.origin, memodict),
|
||||
)
|
||||
|
||||
def __eq__(self, other: Any) -> bool:
|
||||
return self.equals_exact(other, tolerance=0.0)
|
||||
|
||||
# Inherits the docstring
|
||||
def equals(self, other: Any) -> bool: # pylint: disable=missing-function-docstring
|
||||
return (
|
||||
isinstance(other, CartesianRoute)
|
||||
and LineString.equals(self, other)
|
||||
and Geospatial.__eq__(self, other)
|
||||
and self.origin == other.origin
|
||||
)
|
||||
|
||||
# Inherits the docstring
|
||||
def equals_exact( # pylint: disable=missing-function-docstring
|
||||
self, other: Any, tolerance: float
|
||||
) -> bool:
|
||||
return (
|
||||
isinstance(other, CartesianRoute)
|
||||
and LineString.equals_exact(self, other, tolerance)
|
||||
and Geospatial.__eq__(self, other)
|
||||
and self.origin == other.origin
|
||||
)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
origin = f", origin={self.origin}" if self.origin is not None else ""
|
||||
locations = ", ".join(f"({x}, {y})" for x, y in self.coords)
|
||||
|
||||
return f"CartesianRoute(locations=[{locations}]{origin}{self._repr_extras})"
|
||||
|
||||
def __str__(self) -> str:
|
||||
# this is required to override shapely.geometry.LineString.__str__()
|
||||
return self.__repr__()
|
26
pyrate/pyrate/plan/graph/__init__.py
Normal file
26
pyrate/pyrate/plan/graph/__init__.py
Normal file
@ -0,0 +1,26 @@
|
||||
"""
|
||||
The ``graph`` module provides navigation tools where the world is modeled as a graph.
|
||||
This includes generating a graph, assigning properties to nodes of the graph and finding good paths on it.
|
||||
|
||||
Two graph models are provided:
|
||||
:class:`~pyrate.plan.graph.graph.NavigationGraph` is a generic implementation and in
|
||||
:class:`~pyrate.plan.graph.geo_graph.GeoNavigationGraph`, nodes are referenced to geographical locations.
|
||||
"""
|
||||
|
||||
from .graph import NavigationGraph
|
||||
|
||||
from .geo_graph import GeoNavigationGraph
|
||||
|
||||
from .generate import angular_distance_for
|
||||
from .generate import create_earth_graph
|
||||
from .generate import great_circle_distance_distance_for
|
||||
from .generate import min_required_frequency
|
||||
|
||||
__all__ = [
|
||||
"GeoNavigationGraph",
|
||||
"NavigationGraph",
|
||||
"angular_distance_for",
|
||||
"create_earth_graph",
|
||||
"great_circle_distance_distance_for",
|
||||
"min_required_frequency",
|
||||
]
|
350
pyrate/pyrate/plan/graph/generate.py
Normal file
350
pyrate/pyrate/plan/graph/generate.py
Normal file
@ -0,0 +1,350 @@
|
||||
"""
|
||||
Creates a grid on a globe with vertices and edges. Assumes that the earth is a sphere.
|
||||
|
||||
|
||||
Examples:
|
||||
The usual approach it to first determine the maximum allowed distance between two nodes on the graph.
|
||||
Note, that small distances (e.g. of less than 100km) might take a while to compute and can create very
|
||||
large graphs. See the script
|
||||
:ref:`earth_graph_frequency_statistics.py <script-earth_graph_frequency_statistics-example>`
|
||||
for details on the performance and size of the output.
|
||||
|
||||
>>> maximum_node_distance = 100_000 # in meters
|
||||
|
||||
Then, the minimum required ``frequency`` can be computed from that, which is an integer value determining
|
||||
the granularity of the graph.
|
||||
Higher frequencies result in finer graphs.
|
||||
|
||||
>>> frequency = min_required_frequency(maximum_node_distance, in_meters=True)
|
||||
|
||||
We could have also passed the angular distance in radians (by setting ``in_meters=False``).
|
||||
Alternatively, we can now compute the actual angular distance and great-circle distance in meters from the
|
||||
frequency that we know have.
|
||||
It is in general less (or equal) than the ``maximum_node_distance``, as the ``frequency`` only allows for
|
||||
integer steps in the geranularity.
|
||||
|
||||
>>> angular_distance_for(frequency) # doctest: +ELLIPSIS
|
||||
0.01559...
|
||||
>>> actual_node_distance = great_circle_distance_distance_for(frequency) # again, in meters
|
||||
>>> actual_node_distance # doctest: +ELLIPSIS
|
||||
99347.242...
|
||||
>>> actual_node_distance <= maximum_node_distance
|
||||
True
|
||||
|
||||
Now, we can finally generate the :class:`~pyrate.plan.graph.geo_graph.GeoNavigationGraph`.
|
||||
If we wanted to have some progress messsages printed, we would pass ``print_status=True``.
|
||||
|
||||
>>> graph = create_earth_graph(frequency)
|
||||
>>> len(graph) # the number of nodes
|
||||
50412
|
||||
>>> graph.num_edges
|
||||
151230
|
||||
|
||||
Furthermore, the ``graph`` has some specific attributes set, which result from the icoshedron subdivision
|
||||
approach of the algorithm.
|
||||
These allow for certain optimizations and more convenience when applying algorithms as they do not have
|
||||
to be passed explicictly to other functions.
|
||||
|
||||
>>> graph.node_radius * 2 == actual_node_distance
|
||||
True
|
||||
>>> graph.max_neighbors == 6
|
||||
True
|
||||
|
||||
Visualization
|
||||
-------------
|
||||
|
||||
The following visualization shows how the vertices of an earth graph are spread when plotted using the
|
||||
`mercator projection <https://en.wikipedia.org/wiki/Mercator_projection>`_.
|
||||
The vertices and their area near the equator are very evenly spaced.
|
||||
However, their positions and shapes get very distorted at high latitudes (i.e. near the north and south
|
||||
poles) `due to the projection <https://en.wikipedia.org/wiki/Mercator_projection#Distortion_of_sizes>`_.
|
||||
|
||||
.. image:: vertices_distribution_mercator.png
|
||||
:alt: visualization of the vertices of an earth graph, distorted by the mercator projection
|
||||
|
||||
The following plot illustrates the area of influence/responsibility around each vertex.
|
||||
Again, notice how the quite evenly spaced vertices are very distorted in this projection at high latitudes.
|
||||
The visualization was obtained by computing the fraction of navigable area within the nodes' vicinity.
|
||||
Navigability was simply determined by the land being below sea level as a rough approximation.
|
||||
|
||||
.. image:: vertices_area_of_influence.png
|
||||
:alt: visualization of the area of influence/responsibility of all vertices of an earth graph,
|
||||
distorted by the mercator projection;
|
||||
obtained by computing the fraction of navigable area within the nodes vicinity
|
||||
|
||||
Note:
|
||||
Some methods require the `Antiprism <https://www.antiprism.com>`_ software package (version 0.26+).
|
||||
There is a PPA for it available `here <https://launchpad.net/~antiprism/+archive/ubuntu/ppa>`_.
|
||||
Use ``add-apt-repository ppa:antiprism/ppa && apt install antiprism`` on Ubuntu.
|
||||
"""
|
||||
|
||||
# Standard library
|
||||
from math import ceil
|
||||
from math import degrees
|
||||
import subprocess
|
||||
from warnings import warn
|
||||
|
||||
# Typing
|
||||
from typing import Tuple
|
||||
|
||||
# Scientific
|
||||
from numpy import compress
|
||||
from numpy import empty
|
||||
from numpy import empty_like
|
||||
from numpy import float64
|
||||
from numpy import genfromtxt
|
||||
from numpy import maximum
|
||||
from numpy import minimum
|
||||
from numpy import ndarray
|
||||
from numpy import uint32
|
||||
from numpy import unique
|
||||
|
||||
# Geometry helpers
|
||||
from pyrate.plan.geometry.helpers import cartesian_to_spherical
|
||||
from pyrate.plan.geometry.helpers import meters2rad
|
||||
from pyrate.plan.geometry.helpers import rad2meters
|
||||
|
||||
# Graph implementation
|
||||
from pyrate.plan.graph import GeoNavigationGraph
|
||||
|
||||
|
||||
def create_earth_graph(frequency: int, print_status: bool = False) -> GeoNavigationGraph:
|
||||
"""Returns roughly equally spaced points on the earth, creating an *icoshpere* 🌐.
|
||||
|
||||
This basically works by constructing a geodesic polyhedron based on an icosahedron as a starting point,
|
||||
dividing it (with *Class I*) as much as required by the desired distance and then projecting it onto a
|
||||
sphere. The following image visualizes the process for the case ``frequency = 6``:
|
||||
|
||||
.. image:: https://upload.wikimedia.org/wikipedia/commons/f/ff/Geodesic_icosahedral_polyhedron_example.png
|
||||
:alt:
|
||||
Illustration of icosahedron subdivision: (1) create a regular icosahedron, (2) perform 6-frequency
|
||||
subdivision of all faces and (3) project all vertices on a sphere;
|
||||
Licensed under *CC BY-SA 4.0*: Created by *Tomruen* and provided on
|
||||
`Wikipedia <https://commons.wikimedia.org/wiki/File:Geodesic_icosahedral_polyhedron_example.png>`_.
|
||||
|
||||
The implementation is mostly single-threaded. See the script
|
||||
:ref:`earth_graph_frequency_statistics.py <script-earth_graph_frequency_statistics-example>`
|
||||
for details on the performance and size of the output.
|
||||
|
||||
*Class I* determines the way that the icosahedron is sliced (see
|
||||
`Wikipedia: Geodesic notation <https://en.wikipedia.org/wiki/Geodesic_polyhedron#Geodesic_notation>`_).
|
||||
It was chosen since it is apparently used quite often to create regular grids
|
||||
(according to `Wikipedia <https://en.wikipedia.org/wiki/Geodesic_grid#Construction>`_)
|
||||
and since if appears to be very regular (see
|
||||
`this <https://www.antiprism.com/examples/200_programs/650_geodesic/imagelist.html>`_ page in the
|
||||
Antiprism documentation for a visualization).
|
||||
|
||||
References:
|
||||
- https://en.wikipedia.org/wiki/Geodesic_grid#Construction
|
||||
- https://en.wikipedia.org/wiki/Geodesic_polyhedron
|
||||
- https://people.sc.fsu.edu/~jburkardt/presentations/sphere_grid_2014_fsu.pdf
|
||||
|
||||
Further ideas:
|
||||
- One could also use `Goldberg polyhedra <https://en.wikipedia.org/wiki/Goldberg_polyhedron>`_,
|
||||
as those are the `duals <https://en.wikipedia.org/wiki/Dual_polygon>`_ to the geodesic spheres used
|
||||
in this implementation and should also work.
|
||||
- Alternatively, one could also use the already implemented
|
||||
`Quaternary Triangular Meshes <https://github.com/paulojraposo/QTM>`_
|
||||
|
||||
Args:
|
||||
frequency: The number of subdivisions per icosahedron edge.
|
||||
Keep in mind that high frequencies could be computationally costly.
|
||||
print_status: If set to ``True``, print human-readable status messages about the progress.
|
||||
|
||||
Returns:
|
||||
A graph covering the entire earth
|
||||
"""
|
||||
angular_distance = angular_distance_for(frequency)
|
||||
distance_meters = great_circle_distance_distance_for(frequency)
|
||||
if print_status:
|
||||
print(f"creating an earth grid with a point distance of at most {distance_meters / 1000:.3f} km")
|
||||
print(f"dividing each edge of the base icosahedron into {frequency} parts")
|
||||
print(f"the angular distance of vertices will be ~{degrees(angular_distance):.6f}°")
|
||||
|
||||
if 10_000 < distance_meters < 25_000: # pragma: no cover
|
||||
warn("this might take a while", ResourceWarning)
|
||||
elif distance_meters <= 10_000: # pragma: no cover
|
||||
warn("this might take *very* long", ResourceWarning)
|
||||
|
||||
# this defines how to slice the edges/triangles
|
||||
polyhedron_class = "1"
|
||||
|
||||
if print_status:
|
||||
print('calling Antiprisim\'s "geodesic"')
|
||||
|
||||
# check the geodesic/Antiprism version
|
||||
_assert_antiprism_is_installed()
|
||||
|
||||
command = f"geodesic -M s -c {polyhedron_class} -f {frequency} ico"
|
||||
# check_output raises an Error on a non-zero exit code
|
||||
# use ASCII encoding since the resulting file will not contain any Unicode text
|
||||
output = subprocess.check_output(command.split(), encoding="ascii")
|
||||
|
||||
if print_status:
|
||||
print("parsing the resulting OFF file")
|
||||
latitudes, longitudes, edges = _parse_off_file(output)
|
||||
|
||||
if print_status:
|
||||
print("finished earth grid generation")
|
||||
|
||||
return GeoNavigationGraph.from_coordinates_radians(
|
||||
latitudes=latitudes,
|
||||
longitudes=longitudes,
|
||||
edges=edges,
|
||||
max_neighbors=6,
|
||||
node_radius=distance_meters / 2,
|
||||
)
|
||||
|
||||
|
||||
#: The approximate angle between two edges on an icosahedron, in radians, about 63.4°
|
||||
_ALPHA = 1.1071487177940905030170654601785
|
||||
# calculation:
|
||||
# (note: we use latitude is in [-pi/2, +pi/2], longitude is in [-pi, +pi])
|
||||
# take two edges in spherical coordinates,
|
||||
# see https://en.wikipedia.org/wiki/Regular_icosahedron#Spherical_coordinates
|
||||
# (in the link, other coordinates are used!)
|
||||
# we choose A=(lat_a, lon_a)=(pi/2, 0) and
|
||||
# B=(lat_b, lon_b)=(arctan(1/2), 0) for simplicity
|
||||
# then the angle between them is given by
|
||||
# alpha = lat_a - lat_b = pi/2 - arctan(1/2)
|
||||
# result: https://www.wolframalpha.com/input/?i=pi%2F2+-+arctan%281%2F2%29
|
||||
|
||||
|
||||
def min_required_frequency(desired_distance: float, in_meters: bool) -> int:
|
||||
"""Compute the minimum frequency to reach the ``desired_distance`` by icosahedron subdivision.
|
||||
|
||||
Here, the frequency is the number of cuts to make on an edge of a polyhedron.
|
||||
Higher frequencies result in finer graphs.
|
||||
|
||||
Args:
|
||||
desired_distance: The maximum distance that two neighboring nodes may be apart.
|
||||
Must be a strictly positive number.
|
||||
If ``in_meters`` is ``True`` in meters of the great-circle distance, else the
|
||||
angular distance in radians.
|
||||
in_meters: Interpret ``desired_distance`` as meters instead of as radians
|
||||
|
||||
Returns:
|
||||
The minimum frequency to reach the ``desired_distance``, at least ``1``
|
||||
"""
|
||||
assert desired_distance > 0, "the desired_angular_distance must be positive"
|
||||
|
||||
if in_meters:
|
||||
desired_angular_distance = meters2rad(desired_distance)
|
||||
else:
|
||||
desired_angular_distance = desired_distance
|
||||
|
||||
# calculate the number of slices per edge (=the frequency) by simple division:
|
||||
frequency = _ALPHA / desired_angular_distance
|
||||
|
||||
# if the distance is too big, we simply do not divide the edges at all
|
||||
frequency = max(frequency, 1.0)
|
||||
|
||||
# then we need to round: we round up since we would rather have
|
||||
# more edges than too few of them
|
||||
return int(ceil(frequency))
|
||||
|
||||
|
||||
def great_circle_distance_distance_for(frequency: int) -> float:
|
||||
"""The great-circle distance that subdivision with the frequency will result in.
|
||||
|
||||
Args:
|
||||
frequency: The frequency of the subdivision, at least ``1``
|
||||
|
||||
Returns:
|
||||
The great-circle distance that the frequency will result in, in meters
|
||||
"""
|
||||
return rad2meters(angular_distance_for(frequency))
|
||||
|
||||
|
||||
def angular_distance_for(frequency: int) -> float:
|
||||
"""The angular distance that subdivision with the frequency will result in.
|
||||
|
||||
Args:
|
||||
frequency: The frequency of the subdivision, at least ``1``
|
||||
|
||||
Returns:
|
||||
The angular distance that the frequency will result in, in radians
|
||||
"""
|
||||
assert frequency >= 1, "the frequency must be at least one"
|
||||
return _ALPHA / frequency
|
||||
|
||||
|
||||
_ANTIPRISM_REQUIRED_VERSION = (0, 26)
|
||||
#: The minimum required Antiprism version
|
||||
|
||||
|
||||
def _assert_antiprism_is_installed() -> None:
|
||||
"""Raises an exception if *Antiprism* (with the geodesic tool) in not installed in the required version.
|
||||
|
||||
Raises:
|
||||
:class:`AssertionError`: If the *Antiprism* version is insufficient
|
||||
"""
|
||||
try:
|
||||
version = subprocess.check_output(["geodesic", "--version"], encoding="utf8").split(" ", 3)[2]
|
||||
except FileNotFoundError as error: # pragma: no cover
|
||||
raise AssertionError(
|
||||
'Could not call tool "geodesic" from Antiprism, is it installed? (See installation instructions.)'
|
||||
) from error
|
||||
|
||||
assert tuple(int(v) for v in version.split(".")) >= _ANTIPRISM_REQUIRED_VERSION, (
|
||||
f'tool "geodesic" from Antiprism version >= {_ANTIPRISM_REQUIRED_VERSION} is required, '
|
||||
f"but you have version {version}!"
|
||||
)
|
||||
|
||||
|
||||
def _parse_off_file(source_text: str) -> Tuple[ndarray, ndarray, ndarray]:
|
||||
"""Parses an Antiprism OFF file and return the result in spherical coordinates.
|
||||
|
||||
Warnings:
|
||||
Assumes that the point :math:`(0, 0, 0)` is not present and that all faces
|
||||
are triangles or "polygons" with fewer vertices.
|
||||
|
||||
Warnings:
|
||||
This is only meant to parse OFF files produced by *Antiprism*.
|
||||
These are not standard OFF files as described
|
||||
`here <https://www.antiprism.com/programs/off_format.html>`_!
|
||||
|
||||
Args:
|
||||
source_text: The raw file content to be parsed
|
||||
|
||||
Returns:
|
||||
all vertices' points (like returned by :meth:`~off_handler.cartesian_to_spherical`)
|
||||
as well as a list of all edges, each consisting of zero-based indices
|
||||
of the endpoints from the first argument.
|
||||
"""
|
||||
# split
|
||||
source = source_text.splitlines()
|
||||
assert len(source) >= 2, "OFF file must have at least two lines"
|
||||
|
||||
# check header
|
||||
assert source[0] == "OFF", 'file does not start with "OFF"'
|
||||
|
||||
# get size of file
|
||||
# note: num_edges is usually not set to a correct value, so we ignore the last value
|
||||
num_vertices, num_faces, _ = map(int, source[1].split())
|
||||
|
||||
# get the vertices
|
||||
points = genfromtxt(source[2:], max_rows=num_vertices, dtype=float64)
|
||||
latitudes, longitudes = cartesian_to_spherical(points)
|
||||
|
||||
# get faces
|
||||
faces = genfromtxt(source[2 + num_vertices :], max_rows=num_faces, usecols=(0, 1, 2, 3), dtype=uint32)
|
||||
triangles = compress(faces[:, 0] == 3, faces[:, 1:4], axis=0)
|
||||
del faces # free this memory
|
||||
count = len(triangles)
|
||||
|
||||
# now we want to transform each triangle into three edges
|
||||
edges = empty([count * 3, 2], dtype=uint32)
|
||||
edges[0:count, :] = triangles[:, (0, 1)]
|
||||
edges[count : 2 * count, :] = triangles[:, (1, 2)]
|
||||
edges[2 * count : 3 * count, :] = triangles[:, (0, 2)]
|
||||
|
||||
# then we filter out duplicates or wrong values
|
||||
# sort the IDs in each row in ascending order, to find duplicates since the graph is directed
|
||||
# one could also use `np.sort`
|
||||
sorted_edges = empty_like(edges)
|
||||
sorted_edges[:, 0] = minimum(edges[:, 0], edges[:, 1])
|
||||
sorted_edges[:, 1] = maximum(edges[:, 0], edges[:, 1])
|
||||
edges = unique(sorted_edges, axis=0)
|
||||
|
||||
return latitudes, longitudes, edges
|
248
pyrate/pyrate/plan/graph/geo_graph.py
Normal file
248
pyrate/pyrate/plan/graph/geo_graph.py
Normal file
@ -0,0 +1,248 @@
|
||||
"""This module provides geo-referenced navigation graphs."""
|
||||
|
||||
# Typing
|
||||
from typing import Any
|
||||
from typing import List
|
||||
from typing import Optional
|
||||
from typing import Sequence
|
||||
from typing import Union
|
||||
|
||||
# Scientific
|
||||
import numpy
|
||||
from numpy import degrees
|
||||
from numpy import ndarray
|
||||
from numpy import radians
|
||||
|
||||
# Scientific
|
||||
from pandas import DataFrame
|
||||
from pandas import Series
|
||||
|
||||
# Progress bars
|
||||
from tqdm import tqdm
|
||||
|
||||
# Own typing
|
||||
from pyrate.common.raster_datasets import BaseTransformer
|
||||
from pyrate.plan.graph.graph import NavigationGraph
|
||||
|
||||
|
||||
class GeoNavigationGraph(NavigationGraph):
|
||||
"""An undirected navigation graph specifically for geo-referenced graphs.
|
||||
|
||||
It is similar to the more generic :class:`~pyrate.plan.graph.graph.NavigationGraph` but ensures that the
|
||||
property dataframe always contains columns `Latitude (radians)` and `Longitude (radians)`.
|
||||
Not providing these when creating the graph will result in an :class:`AssertionError`.
|
||||
|
||||
This class also useful methods for adding new properties and plotting the graph.
|
||||
|
||||
Examples:
|
||||
This creates a very simple graph with two connected nodes at *Darmstadt* and *Griesheim*.
|
||||
|
||||
>>> import numpy
|
||||
>>> nodes = DataFrame(data={'Latitude (radians)': numpy.radians([49.872222, 49.863889]), \
|
||||
'Longitude (radians)': numpy.radians([ 8.652778, 8.563889])})
|
||||
>>> edges = numpy.array([[0, 1], ])
|
||||
>>> graph = GeoNavigationGraph(nodes, edges)
|
||||
>>> graph.neighbors
|
||||
array([[1],
|
||||
[0]], dtype=int32)
|
||||
>>> graph.latitudes_degrees
|
||||
0 49.872222
|
||||
1 49.863889
|
||||
Name: Latitude (radians), dtype: float64
|
||||
|
||||
Alternatively, such a graph can be created using `GeoNavigationGraph.from_coordinates_*`
|
||||
|
||||
>>> same_graph = GeoNavigationGraph.from_coordinates_degrees( \
|
||||
latitudes=[49.872222, 49.863889], longitudes=[ 8.652778, 8.563889], edges=edges)
|
||||
>>> graph == same_graph
|
||||
True
|
||||
|
||||
Args:
|
||||
nodes: See :class:`~pyrate.plan.graph.graph.NavigationGraph`.
|
||||
This must contain columns ``"Latitude (radians)"`` (with values in :math:`[-π/2, +π/2]`) and
|
||||
``"Longitude (radians)"`` (with values in :math:`[-π, +π)`).
|
||||
edges: See :class:`~pyrate.plan.graph.graph.NavigationGraph`.
|
||||
neighbours: See :class:`~pyrate.plan.graph.graph.NavigationGraph`.
|
||||
max_neighbors: See :class:`~pyrate.plan.graph.graph.NavigationGraph`.
|
||||
node_radius: The radius around each node of the area on the globe that it should represent, in meters,
|
||||
non-negative. It can be interpreded as the radius of influence or of responsibility.
|
||||
It may be an array of shape ``(num_nodes, )`` or a single scalar if the radius is uniform
|
||||
across all nodes.
|
||||
Setting this allows it to be omitted in some methods of this class, like in
|
||||
:meth:`~append_property`.
|
||||
"""
|
||||
|
||||
def __init__(self, *args, node_radius: Optional[Union[float, ndarray]] = None, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
assert node_radius is None or numpy.all(node_radius >= 0)
|
||||
self.node_radius = node_radius
|
||||
|
||||
assert "Latitude (radians)" in self.nodes, 'column "Latitude (radians)" missing'
|
||||
assert "Longitude (radians)" in self.nodes, 'column "Longitude (radians)" missing'
|
||||
|
||||
@classmethod
|
||||
def from_coordinates_radians(
|
||||
cls,
|
||||
latitudes: ndarray,
|
||||
longitudes: ndarray,
|
||||
node_properties: Optional[DataFrame] = None,
|
||||
**kwargs: Any,
|
||||
) -> "GeoNavigationGraph":
|
||||
"""Creates a new geo-referenced navigation graph from the given coordinates and node properties.
|
||||
|
||||
The same as the constructor of :class:`GeoNavigationGraph`, except that that latitude, longitude and
|
||||
properties of the nodes can be given separately. For clarity, everything should be passes as
|
||||
keyword arguments.
|
||||
|
||||
Args:
|
||||
latitudes: The latitudes of all nodes in radians in :math:`[-π/2, +π/2]`
|
||||
longitudes: The longitudes of all nodes in radians in :math:`[-π, +π)`
|
||||
node_properties: The properties of all nodes (will be modified if not set to ``None``)
|
||||
kwargs**: passed to the constructor of :class:`GeoNavigationGraph`
|
||||
|
||||
Returns:
|
||||
A newly created graph
|
||||
"""
|
||||
if node_properties is None:
|
||||
node_properties = DataFrame() # create an empty one
|
||||
|
||||
node_properties["Latitude (radians)"] = latitudes
|
||||
node_properties["Longitude (radians)"] = longitudes
|
||||
|
||||
assert "nodes" not in kwargs, (
|
||||
"do not pass nodes, instead explicitly set them via latitudes, "
|
||||
"longitudes and node_properties or directly use the constructor instead"
|
||||
)
|
||||
|
||||
return cls(node_properties, **kwargs)
|
||||
|
||||
@classmethod
|
||||
def from_coordinates_degrees(
|
||||
cls, latitudes: ndarray, longitudes: ndarray, **kwargs: Any
|
||||
) -> "GeoNavigationGraph":
|
||||
"""The same as :func:`~from_coordinates_radians` except that the coordinates are in degrees.
|
||||
|
||||
For clarity, everything should be passes as keyword arguments.
|
||||
|
||||
Args:
|
||||
latitudes: The latitudes of all nodes in degrees in :math:`[-90, +90]`
|
||||
longitudes: The latitudes of all nodes in degrees in :math:`[-180, +180)`
|
||||
kwargs**: passed to :func:`~from_coordinates_radians`
|
||||
"""
|
||||
return GeoNavigationGraph.from_coordinates_radians(radians(latitudes), radians(longitudes), **kwargs)
|
||||
|
||||
@staticmethod
|
||||
def _serialized_attributes() -> List[str]:
|
||||
"""The list of attributes that shall be (de)serialized (on top of the nodes and edges)."""
|
||||
return NavigationGraph._serialized_attributes() + ["node_radius"]
|
||||
|
||||
@property
|
||||
def latitudes_radians(self) -> Series:
|
||||
"""The latitudes of all nodes in radians in :math:`[-π/2, +π/2]`."""
|
||||
return self.nodes["Latitude (radians)"]
|
||||
|
||||
@property
|
||||
def longitudes_radians(self) -> Series:
|
||||
"""The longitudes of all nodes in radians in :math:`[-π, +π)`."""
|
||||
return self.nodes["Longitude (radians)"]
|
||||
|
||||
@property
|
||||
def latitudes_degrees(self) -> Series:
|
||||
"""The latitudes of all nodes in degrees in :math:`[-90, +90]`."""
|
||||
return degrees(self.latitudes_radians)
|
||||
|
||||
@property
|
||||
def longitudes_degrees(self) -> Series:
|
||||
"""The longitudes of all nodes in degrees in :math:`[-180, +180)`."""
|
||||
return degrees(self.longitudes_radians)
|
||||
|
||||
@property
|
||||
def node_properties(self) -> DataFrame:
|
||||
"""The properties of all nodes as a view (as opposed to a copy).
|
||||
|
||||
This is the same as :attr:`~nodes`, but without the latitude and longitude values.
|
||||
"""
|
||||
return self.nodes.drop(columns=["Latitude (radians)", "Longitude (radians)"])
|
||||
|
||||
def clear_node_properties(self) -> None:
|
||||
"""Deletes all properties but retains the coordinate values."""
|
||||
self.nodes = self.nodes[["Latitude (radians)", "Longitude (radians)"]]
|
||||
|
||||
def append_property(
|
||||
self,
|
||||
transformer: BaseTransformer,
|
||||
node_radius: Optional[Union[float, ndarray]] = None,
|
||||
show_progress: bool = False,
|
||||
) -> None:
|
||||
"""Append the properties given by the transformer.
|
||||
|
||||
The name and data type are taken from the given ``transformer``.
|
||||
|
||||
Args:
|
||||
transformer: The dimension/property that shall be queried for each node
|
||||
node_radius: The radius around each node of the area on the globe that it should represent,
|
||||
in meters, non-negative.
|
||||
It may be an array of shape ``(num_nodes, )`` or a single scalar if the radius is
|
||||
uniform across all nodes.
|
||||
This value is uniform across all ``transformers``.
|
||||
It may be omitted if :attr:`~node_radius` is set.
|
||||
show_progress: Whether to print a simple progress bar
|
||||
|
||||
See Also:
|
||||
:meth:`~append_properties`
|
||||
|
||||
Raises:
|
||||
:class:`ValueError`: if a property with that name is already present
|
||||
"""
|
||||
node_radius = node_radius if node_radius is not None else self.node_radius
|
||||
assert node_radius is not None, (
|
||||
"parameter node_radius must be set either with the method or the object attribute but is "
|
||||
"missing on both"
|
||||
)
|
||||
|
||||
with transformer:
|
||||
new = transformer.get_transformed_at_nodes(
|
||||
self.latitudes_radians, self.longitudes_radians, node_radius, show_progress=show_progress
|
||||
)
|
||||
self.nodes = self.nodes.join(new)
|
||||
|
||||
def append_properties(
|
||||
self,
|
||||
transformers: Sequence[BaseTransformer],
|
||||
node_radius: Optional[Union[float, ndarray]] = None,
|
||||
show_progress: bool = False,
|
||||
) -> None:
|
||||
"""Append multiple properties at once. This has the benefit of printing a combined progress bar.
|
||||
|
||||
Args:
|
||||
transformers: The dimensions/properties that shall be queried for each node
|
||||
node_radius: The radius around each node of the area on the globe that it should represent,
|
||||
in meters, non-negative.
|
||||
It may be an array of shape ``(num_nodes, )`` or a single scalar if the radius is
|
||||
uniform across all nodes.
|
||||
This value is uniform across all ``transformers``.
|
||||
It may be omitted if :attr:`~node_radius` is set.
|
||||
show_progress: Whether to print a simple progress bar
|
||||
|
||||
See Also:
|
||||
:meth:`~append_property`
|
||||
|
||||
Raises:
|
||||
ValueError: if a property with any given name is already present
|
||||
"""
|
||||
node_radius = node_radius if node_radius is not None else self.node_radius
|
||||
assert node_radius is not None, (
|
||||
"parameter node_radius must be set either with the method or the object attribute but is "
|
||||
"missing on both"
|
||||
)
|
||||
|
||||
for transformer in tqdm(transformers, unit=" transformers", colour="blue", disable=not show_progress):
|
||||
self.append_property(transformer, node_radius, show_progress)
|
||||
|
||||
def __eq__(self, other: Any) -> bool:
|
||||
return (
|
||||
isinstance(other, GeoNavigationGraph)
|
||||
and NavigationGraph.__eq__(self, other)
|
||||
and self.node_radius == other.node_radius
|
||||
)
|
245
pyrate/pyrate/plan/graph/graph.py
Normal file
245
pyrate/pyrate/plan/graph/graph.py
Normal file
@ -0,0 +1,245 @@
|
||||
"""This module provides generic navigation graphs."""
|
||||
|
||||
# Typing
|
||||
from typing import Any
|
||||
from typing import List
|
||||
from typing import Optional
|
||||
from typing import Sized
|
||||
from typing import Type
|
||||
from typing import TypeVar
|
||||
|
||||
# Mathematics
|
||||
from numpy import array_equal
|
||||
from numpy import asarray
|
||||
from numpy import compress
|
||||
from numpy import cumsum
|
||||
from numpy import empty
|
||||
from numpy import int32
|
||||
from numpy import logical_and
|
||||
from numpy import logical_not
|
||||
from numpy import ndarray
|
||||
|
||||
# Scientific
|
||||
import h5py
|
||||
from pandas import DataFrame
|
||||
from pandas import read_hdf
|
||||
|
||||
|
||||
NavigationGraphSubclass = TypeVar("NavigationGraphSubclass", bound="NavigationGraph")
|
||||
|
||||
|
||||
class NavigationGraph(Sized):
|
||||
"""A generic undirected graph that can be used for navigation.
|
||||
|
||||
It is represented by nodes and their properties as rows in a pandas dataframe, and edges as an array of
|
||||
indices of nodes for connections. Additionally, :attr:`~neighbors` array is provided which provides all
|
||||
neighbors for all nodes for faster access in graph search.
|
||||
|
||||
Args:
|
||||
nodes: the nodes as a dataframe where each row is a node
|
||||
edges: the edges of shape ``(number_of_edges, 2)``, where each row contains the indices of two
|
||||
neighboring nodes
|
||||
neighbours: the neighbors of all nodes with shape ``(number_of_nodes, max_neighbors_per_node)``,
|
||||
where each row contains the indices of all neighbors of the node, filled with ``-1`` at
|
||||
the end
|
||||
max_neighbors: the maximum number of neighbors of any node (optional); this can be set to allow for
|
||||
some optimizations (e.g. in the neighbor search)
|
||||
|
||||
Examples:
|
||||
This creates a very simple node where ``0`` and ``1`` as well as ``1`` and ``2`` are connected to from
|
||||
a small chain.
|
||||
|
||||
>>> nodes = DataFrame(data={'property_1': [1, 2, 3], 'property_2': [10, 20, 30]})
|
||||
>>> edges = asarray([[0, 1], [1, 2]])
|
||||
>>> graph = NavigationGraph(nodes, edges)
|
||||
>>> graph.neighbors
|
||||
array([[ 1, -1],
|
||||
[ 0, 2],
|
||||
[ 1, -1]], dtype=int32)
|
||||
>>> len(graph)
|
||||
3
|
||||
>>> graph.num_edges
|
||||
2
|
||||
|
||||
See Also:
|
||||
:class:`~pyrate.plan.graph.geo_graph.GeoNavigationGraph`:
|
||||
A more specific implementation that references nodes to geographic locations and contains
|
||||
useful methods for adding properties from datasets and plotting the graph
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
nodes: DataFrame,
|
||||
edges: ndarray,
|
||||
neighbours: Optional[ndarray] = None,
|
||||
max_neighbors: Optional[int] = None,
|
||||
) -> None:
|
||||
super().__init__()
|
||||
|
||||
self.nodes = nodes
|
||||
|
||||
assert (
|
||||
len(edges.shape) == 2 and edges.shape[1] == 2
|
||||
), "the edges must be a 2D-array of shape (number_of_edges, 2)"
|
||||
self.edges = edges
|
||||
|
||||
assert neighbours is None or neighbours.shape[0] == len(nodes)
|
||||
self._neighbors = neighbours
|
||||
|
||||
assert max_neighbors is None or max_neighbors >= 0, "max_neighbors must be non-negative"
|
||||
self.max_neighbors = max_neighbors
|
||||
|
||||
@property
|
||||
def neighbors(self) -> ndarray:
|
||||
"""The list of neighbors of each node, identified by their node index.
|
||||
|
||||
An array of dimension ``(number_of_nodes, max_neighbors_per_node)``, with each row containing the
|
||||
indices of the neighbors of the node at that position and the rest of the row filled with ``-1``.
|
||||
|
||||
This might take a short while to be computed for the first time but the result is cached and
|
||||
also serialized if present at the point of saving it to disk.
|
||||
See :ref:`script-benchmark_graph_neighbor_search` for performance measurements and a link to an issue
|
||||
about speeding up this search for neighbors.
|
||||
"""
|
||||
if self._neighbors is not None:
|
||||
return self._neighbors
|
||||
|
||||
if self.nodes.empty: # this requires special case
|
||||
self._neighbors = empty((0, 0), dtype=int32)
|
||||
|
||||
else:
|
||||
# each row/inner list contains the neighbors of the node at the index
|
||||
# and the rest of the row is filled with -1s
|
||||
neighbors: List[List[int]] = [[] for _ in range(len(self))]
|
||||
|
||||
for from_node, to_node in self.edges:
|
||||
neighbors[from_node].append(to_node)
|
||||
neighbors[to_node].append(from_node)
|
||||
|
||||
# calculate length of maximal list
|
||||
longest = len(max(neighbors, key=len))
|
||||
# make the lists equal in length by filling with -1
|
||||
neighbors = [x + [-1] * (longest - len(x)) for x in neighbors]
|
||||
|
||||
self._neighbors = asarray(neighbors, dtype=int32)
|
||||
|
||||
return self._neighbors
|
||||
|
||||
def prune_nodes(self, keep_condition: ndarray) -> None:
|
||||
"""Only retain the given nodes with their properties and appropriately update all edges and neighbors.
|
||||
|
||||
For example, this should decrease the number of nodes and edges by about 30% when filtering with the
|
||||
``keep_condition`` set to ``my_graph.nodes["elevation_to_sea_level"] < 0.0`` on a graph representing
|
||||
earth.
|
||||
|
||||
Args:
|
||||
keep_condition: the nodes which to keep as a numpy array with boolean values
|
||||
"""
|
||||
assert keep_condition.shape == (len(self),), "keep condition shape does not match nodes"
|
||||
|
||||
# filter points
|
||||
self.nodes = self.nodes[keep_condition]
|
||||
|
||||
# filter edges
|
||||
keep_condition_edges = logical_and(keep_condition[self.edges[:, 0]], keep_condition[self.edges[:, 1]])
|
||||
self.edges = compress(keep_condition_edges, self.edges, axis=0)
|
||||
# then correct the indices that the entries in filtered_edges refer to by subtracting the number of
|
||||
# removed entries before each one
|
||||
index_shift = cumsum(logical_not(keep_condition)).astype(self.edges.dtype)
|
||||
self.edges -= index_shift[self.edges]
|
||||
|
||||
# reset neighbors
|
||||
self._neighbors = None
|
||||
|
||||
@staticmethod
|
||||
def _serialized_attributes() -> List[str]:
|
||||
"""The list of attributes that shall be (de)serialized (on top of the nodes and edges)."""
|
||||
return ["max_neighbors"]
|
||||
|
||||
def to_disk(self, file_path: str, overwrite_existing: bool = False) -> None:
|
||||
"""Save the graph to disk. Possibly missing parent directories are automatically created.
|
||||
|
||||
The data is stored in an interoperable `HDF5 <https://docs.h5py.org/en/stable/>`_ file with the
|
||||
keys ``nodes``, ``edges`` and optionally ``neighbors``.
|
||||
|
||||
The ``nodes`` are compressed using the default settings of :meth:`pandas.DataFrame.to_hdf`.
|
||||
The ``edges`` (and ``neighbors`` if present) are slightly compressed using the library
|
||||
`h5py <https://docs.h5py.org/en/stable/high/dataset.html#filter-pipeline>`_ (using GZIP level 4).
|
||||
See also `the available options in h5py
|
||||
<https://docs.h5py.org/en/stable/faq.html#what-compression-processing-filters-are-supported>`_.
|
||||
|
||||
Args:
|
||||
file_path: the path to the file where to store the graph; usually ends with ``.hdf5``
|
||||
overwrite_existing: whether to overwrite the file if it already exists; else, this causes an error
|
||||
to be risen
|
||||
|
||||
Raises:
|
||||
IOError: when the file cannot be accessed or written to, or it already exists and
|
||||
``overwrite_existing`` is not set
|
||||
|
||||
See Also:
|
||||
:meth:`~from_disk`
|
||||
"""
|
||||
compression_options = {"compression": "gzip", "compression_opts": 4}
|
||||
|
||||
with h5py.File(file_path, "w" if overwrite_existing else "w-") as graph_file:
|
||||
graph_file.create_dataset("edges", data=self.edges, **compression_options)
|
||||
if self._neighbors is not None:
|
||||
graph_file.create_dataset("neighbors", data=self.neighbors, **compression_options)
|
||||
|
||||
# Serialize attributes
|
||||
for attribute in self._serialized_attributes():
|
||||
graph_file.attrs[attribute] = getattr(self, attribute)
|
||||
|
||||
# pandas automatically chooses an appropriate compression
|
||||
self.nodes.to_hdf(file_path, key="nodes", mode="r+", append=True)
|
||||
|
||||
@classmethod
|
||||
def from_disk(cls: Type[NavigationGraphSubclass], file_path: str) -> NavigationGraphSubclass:
|
||||
"""Reads a file from disk.
|
||||
|
||||
Assumes the HDF5-based format compatible to the one created by :meth:`~NavigationGraph`.
|
||||
|
||||
Args:
|
||||
file_path: the path to the file where to read the graph from; usually ends with ``.hdf5``
|
||||
|
||||
Raises:
|
||||
IOError: when the file cannot be accessed or read from
|
||||
|
||||
Returns:
|
||||
The newly loaded navigation graph, which will be of a subclass of :class:`NavigationGraph` if this
|
||||
method was called on that class.
|
||||
|
||||
See also:
|
||||
:meth:`~to_disk`
|
||||
"""
|
||||
with h5py.File(file_path, "r") as graph_file:
|
||||
edges = graph_file["edges"][:]
|
||||
neighbors = graph_file["neighbors"][:] if "neighbors" in graph_file else None
|
||||
|
||||
# Deserialize attributes
|
||||
attributes = {
|
||||
attribute: graph_file.attrs[attribute] for attribute in cls._serialized_attributes()
|
||||
}
|
||||
|
||||
nodes = read_hdf(file_path, key="nodes")
|
||||
assert isinstance(nodes, DataFrame)
|
||||
|
||||
return cls(nodes, edges, neighbors, **attributes)
|
||||
|
||||
@property
|
||||
def num_edges(self) -> int:
|
||||
"""Returns the number of edges. The number of nodes can be obtained via ``len(graph)``."""
|
||||
return self.edges.shape[0]
|
||||
|
||||
def __len__(self) -> int:
|
||||
return len(self.nodes)
|
||||
|
||||
def __eq__(self, other: Any) -> bool:
|
||||
return (
|
||||
isinstance(other, NavigationGraph)
|
||||
and self.nodes.equals(other.nodes)
|
||||
and array_equal(self.edges, other.edges)
|
||||
# no need to check array_equal(self.neighbors, other.neighbors) as it is a derived property
|
||||
and self.max_neighbors == other.max_neighbors
|
||||
)
|
42
pyrate/pyrate/plan/nearplanner/__init__.py
Normal file
42
pyrate/pyrate/plan/nearplanner/__init__.py
Normal file
@ -0,0 +1,42 @@
|
||||
"""Provides the means to describe possible routes (via TimingFrames) in a given environment (CostModel)
|
||||
and find a possible route (via Optimizer) in a given context.
|
||||
|
||||
:class:`~near_planner.planning_backend.timing_frame.TimingFrame` encapsulates a
|
||||
:class:`~pyrate.plan.geometry.route.CartesianRoute` instance. The class further provides an option to
|
||||
simulate the route with the help of a :class:`~near_planner.planning_backend.polar_model.PolarModel`
|
||||
instance, simulating timing and speed information. This is done via the method
|
||||
:meth:`~near_planner.planning_backend.timing_frame.TimingFrame.update_times`. Using a
|
||||
:class:`~near_planner.planning_backend.cost_model.CostModel` a
|
||||
:class:`~near_planner.planning_backend.timing_frame.TimingFrame` can be evaluated to an
|
||||
:class:`~near_planner.planning_backend.timing_frame.EvaluatedTimingFrame`
|
||||
|
||||
The subclass :class:`~near_planner.planning_backend.timing_frame.EvaluatedTimingFrame` allows for further
|
||||
saving of information on possible collisions and distances to
|
||||
:class:`~near_planner.planning_backend.obstacle.Obstacle`\\ s . These are conveniently provided via
|
||||
properties. If the information is deemed incorrect, the frame can be revoked via
|
||||
:meth:`~near_planner.planning_backend.timing_frame.EvaluatedTimingFrame.revoke` and turned back into a
|
||||
:class:`~near_planner.planning_backend.timing_frame.TimingFrame`.
|
||||
|
||||
The :class:`~near_planner.planning_backend.cost_model.CostModel` class allows to conveniently store
|
||||
obstacles and environment information. A :class:`~near_planner.planning_backend.timing_frame.TimingFrame`
|
||||
can be turned into an :class:`~near_planner.planning_backend.timing_frame.EvaluatedTimingFrame` via the
|
||||
:meth:`~near_planner.planning_backend.cost_model.CostModel.evaluate` method. This method also calculates
|
||||
the cost of taking the TimingFrame by various metrics, stores it in
|
||||
:attr:`~near_planner.planning_backend.timing_frame.EvaluatedTimingFrame.actual_cost`. The calculated cost is
|
||||
further returned.
|
||||
|
||||
The :class:`~near_planner.planning_backend.optimizer.Optimizer` class allows to determine, hopefully
|
||||
optimal, routes towards a goal, given by a :class:`pyrate.plan.geometry.location.CartesianLocation`. This
|
||||
is done by initializing the :class:`~near_planner.planning_backend.optimizer.Optimizer` with information
|
||||
about the current environment. After that a goal and
|
||||
:class:`~near_planner.planning_backend.holders.OptimizationParameters` can be provided to the method
|
||||
:meth:`~near_planner.planning_backend.optimizer.Optimizer.optimizer` which generates a hopefully optimal
|
||||
route candidate. The :class:`~near_planner.planning_backend.optimizer.Optimizer` is furthermore able to
|
||||
react to a changing :class:`~near_planner.planning_backend.obstacle.Obstacle` population by changing its
|
||||
state via several methods.
|
||||
|
||||
The package further provides various (data)classes and methods to help in the interaction with the core
|
||||
classes. This includes several cost functions to provide
|
||||
:class:`~near_planner.planning_backend.obstacle.Obstacle` with, functions to merge numpy arrays and
|
||||
dataclasses to wrap various parameters in.
|
||||
"""
|
180
pyrate/pyrate/plan/nearplanner/cost_functions.py
Normal file
180
pyrate/pyrate/plan/nearplanner/cost_functions.py
Normal file
@ -0,0 +1,180 @@
|
||||
"""This module contains a collection of cost functions."""
|
||||
|
||||
# Support for abstract classes
|
||||
from abc import ABC
|
||||
from abc import abstractmethod
|
||||
|
||||
# Static Typing
|
||||
from typing import Optional
|
||||
from typing import Tuple
|
||||
|
||||
# Scientific Computing
|
||||
import numpy as np
|
||||
from scipy import linalg
|
||||
|
||||
# Backend
|
||||
from .evaluated_timing_frame import EvaluatedTimingFrame
|
||||
|
||||
|
||||
class CostFunction(ABC):
|
||||
|
||||
"""Class to encapsulate different types of cost functions for different types of obstacles.
|
||||
|
||||
A cost function describes the cost of passing by an obstacle ad a given distance in a unit-less
|
||||
measurement. Cost functions must be differentiable with respect to its argument dist.
|
||||
"""
|
||||
|
||||
#: A human-readable name of the cost function
|
||||
name: str
|
||||
|
||||
@abstractmethod
|
||||
def cost(self, dist: float) -> np.floating:
|
||||
"""Calculates the cost of an obstacle based on a distance.
|
||||
|
||||
Args:
|
||||
dist: distance to evaluate the cost from
|
||||
|
||||
Returns:
|
||||
The evaluated cost
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def cost_grad(self, dist: float) -> Tuple[np.floating, np.floating]:
|
||||
"""Calculates the cost AND the derivative of it w.r.t. to the distance.
|
||||
|
||||
Args:
|
||||
dist: distance to base calculation upon
|
||||
|
||||
Returns:
|
||||
A tuple of ``(cost, gradient)``
|
||||
"""
|
||||
|
||||
|
||||
class CostFunctionLinear(CostFunction):
|
||||
|
||||
"""Class that represents a linear Cost Function.
|
||||
|
||||
This function is bounded by the parameter attr:`maximum_cost`.
|
||||
Reproduces the values of the function :math:`f(x) = maximum_cost - fact*x`.
|
||||
|
||||
Args:
|
||||
fact: factor for cost decay
|
||||
maximum_cost: maximal cost
|
||||
"""
|
||||
|
||||
def __init__(self, fact: float = 1.0, maximum_cost: float = 100) -> None:
|
||||
self.fact: np.floating = np.float64(fact)
|
||||
self.maximum_cost = maximum_cost
|
||||
self.name = "linear"
|
||||
|
||||
def cost(self, dist: float) -> np.floating:
|
||||
return np.float64(-self.fact * np.float64(dist) + self.maximum_cost)
|
||||
|
||||
def cost_grad(self, dist: float) -> Tuple[np.floating, np.floating]:
|
||||
return np.float64(-self.fact * dist + self.maximum_cost), np.float64(-self.fact)
|
||||
|
||||
|
||||
class CostFunctionInverse(CostFunction):
|
||||
|
||||
"""Class that represents a inversely proportional cost function.
|
||||
|
||||
This functions produces values in the interval :math:`[0, ∞)`.
|
||||
Reproduces the values of the function :math:`f(x) = 1/x`.
|
||||
|
||||
Args:
|
||||
fact: undecayed cost for a unit of distance
|
||||
"""
|
||||
|
||||
def __init__(self, fact: float = 1.0) -> None:
|
||||
self.fact: np.floating = np.float64(fact)
|
||||
self.name = "linear"
|
||||
|
||||
def cost(self, dist: float) -> np.floating:
|
||||
return np.float64(self.fact / np.float64(dist))
|
||||
|
||||
def cost_grad(self, dist: float) -> Tuple[np.floating, np.floating]:
|
||||
return np.float64(self.fact / np.float64(dist)), np.float64(self.fact / np.float64(dist**2))
|
||||
|
||||
|
||||
class CostFunctionExp(CostFunction):
|
||||
|
||||
"""Simple Exponential cost function for use with gradient and cost calculations in TimingFrame
|
||||
|
||||
Args:
|
||||
safety_dist: safety distance the ship should hold
|
||||
clip: clip to determine accuracy
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self, safety_dist: float = 3.0, clip: float = 100000, linear_scale: float = 100, scale: float = 1
|
||||
) -> None:
|
||||
self.name = "Exponential cost"
|
||||
|
||||
self.clip: np.floating = np.float64(clip)
|
||||
self.scale: np.floating = np.float64(scale)
|
||||
self.linear_scale: np.floating = np.float64(linear_scale)
|
||||
self.safety_dist: np.floating = np.float64(safety_dist)
|
||||
|
||||
def cost(self, dist: float) -> np.floating:
|
||||
if dist == 0:
|
||||
return self.clip
|
||||
|
||||
with np.errstate(over="ignore"):
|
||||
temp = np.clip(np.exp(self.scale * self.safety_dist / dist), None, self.clip)
|
||||
|
||||
return np.float64(temp * self.linear_scale)
|
||||
|
||||
def cost_grad(self, dist: float) -> Tuple[np.floating, np.floating]:
|
||||
with np.errstate(over="ignore"):
|
||||
cost = np.exp(self.scale * self.safety_dist / dist) if dist > 0 else self.clip
|
||||
|
||||
grad = -cost * self.scale * self.safety_dist / dist**2 if dist > 0 else 0
|
||||
cost = np.clip(cost, None, self.clip)
|
||||
grad = np.clip(grad, -self.clip, self.clip)
|
||||
|
||||
assert not np.isnan(grad).any()
|
||||
assert not np.isnan(cost).any()
|
||||
|
||||
return np.float64(cost), np.float64(grad)
|
||||
|
||||
|
||||
def default_cache_metric(frame: EvaluatedTimingFrame, lock: Optional[EvaluatedTimingFrame]) -> float:
|
||||
|
||||
"""This is a simple demo cost function for the route cache.
|
||||
|
||||
It determines exactly what is the 'best' TimingFrame with respect to an environment (already
|
||||
captured in ``frame.actual_cost``) and in this case the route we are currently pursuing.
|
||||
|
||||
Args:
|
||||
frame: frame that is to be judged
|
||||
lock: frame the route cache is locked onto (last recommended frame)
|
||||
|
||||
Returns:
|
||||
score for the TimingFrame
|
||||
"""
|
||||
|
||||
angle = 0
|
||||
if lock:
|
||||
list_of_locations = frame.route.locations
|
||||
heading = np.array(
|
||||
[
|
||||
list_of_locations[1].east - list_of_locations[0].east,
|
||||
list_of_locations[1].north - list_of_locations[0].north,
|
||||
]
|
||||
)
|
||||
|
||||
list_of_locations2 = lock.route.locations
|
||||
heading2 = np.array(
|
||||
[
|
||||
list_of_locations2[1].east - list_of_locations2[0].east,
|
||||
list_of_locations2[1].north - list_of_locations2[0].north,
|
||||
]
|
||||
)
|
||||
# normalize
|
||||
heading2 = heading2 / linalg.norm(heading2)
|
||||
|
||||
angle = np.degrees(np.arccos(np.dot(heading, heading2)))
|
||||
|
||||
assert frame.actual_cost != 0.0
|
||||
|
||||
return float(frame.actual_cost + np.clip(angle * 10, a_min=0, a_max=frame.actual_cost * 0.2))
|
576
pyrate/pyrate/plan/nearplanner/cost_model.py
Normal file
576
pyrate/pyrate/plan/nearplanner/cost_model.py
Normal file
@ -0,0 +1,576 @@
|
||||
"""
|
||||
cost model code for use in the near-planner
|
||||
"""
|
||||
|
||||
# Dataclass Support
|
||||
from dataclasses import fields
|
||||
|
||||
# Static Typing
|
||||
from typing import cast
|
||||
from typing import Dict
|
||||
from typing import Generator
|
||||
from typing import Iterable
|
||||
from typing import List
|
||||
from typing import Optional
|
||||
from typing import Tuple
|
||||
from typing import Union
|
||||
|
||||
# Scientific Computing
|
||||
import numpy as np
|
||||
from numpy.typing import NDArray
|
||||
|
||||
from scipy import linalg
|
||||
|
||||
from .evaluated_timing_frame import CollisionData
|
||||
from .evaluated_timing_frame import EvaluatedTimingFrame
|
||||
|
||||
from .timing_frame import TimingFrame
|
||||
|
||||
from .obstacle import Obstacle
|
||||
from .polar_model import PolarModel
|
||||
from . import utils
|
||||
# Custom Type for later use
|
||||
TimingFrameGradients = Tuple[NDArray[np.floating], NDArray[np.floating], NDArray[np.floating]]
|
||||
|
||||
|
||||
class CostModel:
|
||||
"""Class for evaluating and optimizing cost of timed nautical routes.
|
||||
|
||||
Each route is modeled as a :class:`~.timing_frame.TimingFrame`.
|
||||
|
||||
Args:
|
||||
obstacles:
|
||||
Dictionary containing :class:`~.obstacle.Obstacle` s to regard in future route evaluations,
|
||||
indexed by unique keys.
|
||||
polar_model: polar of the boat to simulate
|
||||
"""
|
||||
|
||||
def __init__(self, obstacles: Dict[str, Obstacle], polar_model: PolarModel) -> None:
|
||||
self._obstacles = obstacles
|
||||
self._model = polar_model
|
||||
|
||||
self._points_per_obstacle: List[int] = []
|
||||
|
||||
self._obstacles_edges: NDArray[np.floating] = np.array([], dtype=np.float64)
|
||||
self._obstacles_points: NDArray[np.floating] = np.array([], dtype=np.float64)
|
||||
self._obstacles_speeds: NDArray[np.floating] = np.array([], dtype=np.float64)
|
||||
self._obstacles_inner_normals: NDArray[np.floating] = np.array([], dtype=np.float64)
|
||||
|
||||
self._obstacles_squared_edges_length: Optional[NDArray[np.floating]] = np.array([], dtype=np.float64)
|
||||
|
||||
self._update()
|
||||
|
||||
@property
|
||||
def obstacles(self) -> Dict[str, Obstacle]:
|
||||
"""Dictionary of IDs with :class:`~.obstacle.Obstacle`\\ s represented by this :class:`CostModel`."""
|
||||
return self._obstacles
|
||||
|
||||
def _update(self) -> None:
|
||||
"""Updates and recalculates geometric relationships between :class:`~.obstacle.Obstacle`\\ s.
|
||||
|
||||
Warning:
|
||||
To be called at any change to :attr:`obstacles` by the internal methods.
|
||||
"""
|
||||
|
||||
if not self.obstacles:
|
||||
return
|
||||
|
||||
self.distance_matrix = self._check_obstacles()
|
||||
|
||||
self._obstacles_points = np.concatenate(
|
||||
[obstacle.unique_points for obstacle in self._obstacles.values()], axis=0
|
||||
)
|
||||
self._obstacles_inner_normals = np.concatenate(
|
||||
[o.inner_normals for o in self._obstacles.values()], axis=0
|
||||
)
|
||||
self._obstacles_edges = np.concatenate([o.edges for o in self._obstacles.values()])
|
||||
self._points_per_obstacle = [obstacle.unique_points.shape[0] for obstacle in self._obstacles.values()]
|
||||
|
||||
self._obstacles_speeds = np.array(
|
||||
[
|
||||
v
|
||||
for i, o in enumerate(self._obstacles.values())
|
||||
for v in [o.speed] * self._points_per_obstacle[i]
|
||||
]
|
||||
)
|
||||
|
||||
# check case if no obstacle
|
||||
self._obstacles_speeds = (
|
||||
np.array([[]]) if len(self._obstacles_speeds.shape) == 1 else self._obstacles_speeds
|
||||
)
|
||||
self._obstacles_points = (
|
||||
np.array([[]]) if len(self._obstacles_points.shape) == 1 else self._obstacles_points
|
||||
)
|
||||
|
||||
self._obstacles_squared_edges_length = np.concatenate(
|
||||
[o.squared_edges_length for o in self._obstacles.values()]
|
||||
)
|
||||
|
||||
def _check_obstacles(self) -> NDArray[np.floating]:
|
||||
obstacle_list = list(self._obstacles.values())
|
||||
distance_matrix = np.zeros((len(obstacle_list), len(obstacle_list)))
|
||||
for obs1 in range(len(obstacle_list)):
|
||||
for obs2 in range(len(obstacle_list)):
|
||||
if not obs1 == obs2:
|
||||
distance = np.float64(obstacle_list[obs1].shape.distance(obstacle_list[obs2].shape))
|
||||
distance_matrix[obs1][obs2] = distance
|
||||
if distance == 0:
|
||||
# TODO: join obs1 and obs2
|
||||
pass
|
||||
|
||||
return distance_matrix
|
||||
|
||||
def evaluate(
|
||||
self, frame_to_evaluate: TimingFrame, initial_cost: float = 0
|
||||
) -> Tuple[EvaluatedTimingFrame, np.floating]:
|
||||
"""Evaluates a timed route given in form of a :class:`~.timing_frame.TimingFrame`.
|
||||
|
||||
Upgrades his name to a :class:`~.evaluated_timing_frame.EvaluatedTimingFrame` and also enters a valid
|
||||
value for :attr:`~.evaluated_timing_frame.EvaluatedTimingFrame.actual_cost`.
|
||||
|
||||
Args:
|
||||
frame_to_evaluate:
|
||||
The :class:`~.timing_frame.TimingFrame` to evaluate with regards to models properties
|
||||
initial_cost: The base cost of a route just an optional way to vertically scale route cost
|
||||
|
||||
Returns:
|
||||
Tuple of the :class:`~.evaluated_timing_frame.EvaluatedTimingFrame` and its actual cost for
|
||||
convenience
|
||||
"""
|
||||
|
||||
if isinstance(frame_to_evaluate, EvaluatedTimingFrame):
|
||||
return frame_to_evaluate, frame_to_evaluate.actual_cost
|
||||
|
||||
# Evaluate the frame
|
||||
frame_to_evaluate.update_times(self._model)
|
||||
data = self._collision_eval(frame_to_evaluate)
|
||||
|
||||
# Wrap in the new object
|
||||
evaluated_frame = EvaluatedTimingFrame(
|
||||
frame_to_evaluate.route, data, start_time=frame_to_evaluate.start_time
|
||||
)
|
||||
evaluated_frame.update_times(self._model)
|
||||
|
||||
# Update the actual_cost
|
||||
cost = np.float64(initial_cost)
|
||||
for key, obstacle in self._obstacles.items():
|
||||
min_dist = np.amin(evaluated_frame.data_on_collisions[key].min_dist)
|
||||
cost += obstacle.cost(min_dist)
|
||||
cost += evaluated_frame.times[-1]
|
||||
evaluated_frame.actual_cost = cost
|
||||
|
||||
return evaluated_frame, cost
|
||||
|
||||
def add_obstacle(self, obstacles_to_add: Dict[str, Obstacle]) -> None:
|
||||
"""Adds an obstacle to the model.
|
||||
|
||||
Args:
|
||||
obstacles_to_add:
|
||||
The :class:`set` of :class:`~.obstacle.Obstacle`\\ s to add to the model,
|
||||
or alternatively a :class:`dict` containing them indexed by IDs
|
||||
"""
|
||||
self._obstacles.update(obstacles_to_add)
|
||||
self._update()
|
||||
|
||||
def contains_obstacle(self, obstacle_id_to_check: str) -> bool:
|
||||
"""Checks if the model describes a given obstacle id.
|
||||
|
||||
This is if and only if they are contained in a held dictionary with the id as key.
|
||||
|
||||
Args:
|
||||
obstacle_id_to_check: string key of the obstacle to check if held
|
||||
|
||||
Returns:
|
||||
if obstacle is described in this instance
|
||||
"""
|
||||
return obstacle_id_to_check in self._obstacles.keys()
|
||||
|
||||
def delete_obstacle(self, id_s_to_delete: Union[str, List[str]]) -> List[Obstacle]:
|
||||
"""Takes either a key or a list of keys to delete
|
||||
|
||||
Args:
|
||||
id_s_to_delete: list or single key of obstacle(s) to delete from model
|
||||
|
||||
Returns:
|
||||
deleted obstacles
|
||||
"""
|
||||
obstacles_deleted = []
|
||||
if hasattr(id_s_to_delete, "__iter__"):
|
||||
id_s_to_delete = cast(List[str], id_s_to_delete)
|
||||
for iter_id in id_s_to_delete:
|
||||
deleted_obstacle = self._obstacles.pop(iter_id, None)
|
||||
|
||||
assert deleted_obstacle is not None, "tried to delete a non existent identifier"
|
||||
obstacles_deleted.append(deleted_obstacle)
|
||||
else:
|
||||
id_s_to_delete = cast(str, id_s_to_delete)
|
||||
|
||||
obstacles_deleted.append(self._obstacles[id_s_to_delete])
|
||||
del self._obstacles[id_s_to_delete]
|
||||
self._update()
|
||||
return obstacles_deleted
|
||||
|
||||
def rebase_obstacles(self, obstacles_to_rebase: Dict[str, Obstacle]) -> None:
|
||||
"""Completely rebases this model on the given obstacles.
|
||||
|
||||
Args:
|
||||
obstacles_to_rebase: Dictionary of :class:`~.obstacle.Obstacle`\\ s with keys to rebase on
|
||||
"""
|
||||
self._obstacles = obstacles_to_rebase
|
||||
self._update()
|
||||
|
||||
# ---- Route Evaluation
|
||||
|
||||
def _collision_eval(self, frame: TimingFrame) -> Dict[str, CollisionData]:
|
||||
"""Private collision evaluation logic
|
||||
|
||||
Args:
|
||||
frame: The :class:`~.timing_frame.TimingFrame` to be evaluated
|
||||
|
||||
Returns:
|
||||
collision data for each obstacle packed in as :class:`~.holders.CollisionData` with id as key
|
||||
"""
|
||||
data = CollisionData()
|
||||
|
||||
if not isinstance(frame, EvaluatedTimingFrame):
|
||||
|
||||
if len(self._obstacles.items()) == 0:
|
||||
return {}
|
||||
|
||||
seg_points = np.concatenate((utils.shapely_point_to_ndarray(frame.position)[None, :], frame._segment_points), axis=0)
|
||||
|
||||
points_on_route, times, delta_times, speeds = cast(
|
||||
Tuple[NDArray[np.floating], NDArray[np.floating], NDArray[np.floating], NDArray[np.floating]],
|
||||
(
|
||||
np.array(list(seg_points), dtype=np.float64)[:-1],
|
||||
np.array(list(frame.times[:]), dtype=np.float64)[:-1],
|
||||
np.array(list(np.diff(frame.times)), dtype=np.float64)[:],
|
||||
np.array(list(frame.speeds[:]))[:],
|
||||
),
|
||||
)
|
||||
|
||||
#delta_times = np.append(delta_times, np.array([0]), axis=0)[1:]
|
||||
|
||||
edges, data.delta_speed = self.obstacle_time_projection(times, speeds)
|
||||
|
||||
data.delta_points = points_on_route[:, :, None] - edges
|
||||
|
||||
# find time and place of collision
|
||||
# normal dist
|
||||
data.normal_distances = np.einsum(
|
||||
"ix, txi-> ti", self._obstacles_inner_normals, data.delta_points
|
||||
)
|
||||
# normal speed/vectors
|
||||
data.normal_vectors = np.einsum("ix, txi-> ti", self._obstacles_inner_normals, data.delta_speed)
|
||||
|
||||
with np.errstate(divide="ignore", invalid="ignore"):
|
||||
data.time_collision = np.nan_to_num(-1 * data.normal_distances / data.normal_vectors)
|
||||
|
||||
with np.errstate(over="ignore"):
|
||||
x_coll: NDArray[np.floating] = (
|
||||
np.einsum(
|
||||
"txi, ix-> ti",
|
||||
data.delta_points + data.time_collision[:, None, :] * data.delta_speed,
|
||||
self._obstacles_edges,
|
||||
)
|
||||
/ self._obstacles_squared_edges_length
|
||||
)
|
||||
|
||||
data.collision_status = (
|
||||
(x_coll >= 0)
|
||||
* (x_coll <= 1)
|
||||
* (data.time_collision >= 0)
|
||||
* (data.time_collision <= delta_times[:, None])
|
||||
)
|
||||
|
||||
for obs_iter in self._obstacles.values():
|
||||
if obs_iter.shape.contains(frame.route.locations[0]) and obs_iter.shape.contains(
|
||||
frame.route.locations[-1]
|
||||
):
|
||||
data.collision_status[0][0] = True
|
||||
|
||||
# determine nearest point between time points
|
||||
dv2 = np.einsum("ijk, ijk-> ik", data.delta_speed, data.delta_speed)
|
||||
with np.errstate(divide="ignore", invalid="ignore"):
|
||||
data.time_min = np.nan_to_num(
|
||||
-1 * np.einsum("ijk, ijk-> ik", data.delta_points, data.delta_speed) / dv2
|
||||
)
|
||||
|
||||
data.time_min[dv2[:, 0] == 0, :] = 0.0
|
||||
data.time_min[(data.time_min < 0) + (data.time_min > delta_times[:, None])] = 0
|
||||
|
||||
# case 3: nearest point between obstacle points
|
||||
with np.errstate(divide="ignore", invalid="ignore"):
|
||||
x_min = (
|
||||
np.einsum("ijk, kj->ik", data.delta_points, self._obstacles_edges)
|
||||
/ self._obstacles_squared_edges_length
|
||||
)
|
||||
x_min[:, self._obstacles_squared_edges_length == 0] = 0.0
|
||||
x_min[(x_min >= 1) + (x_min < 0)] = 0
|
||||
|
||||
# difference vector
|
||||
assert self._obstacles is not None
|
||||
data.distance_vectors = np.stack(
|
||||
(
|
||||
data.delta_points + data.delta_speed * data.time_min[:, None, :],
|
||||
data.delta_points - self._obstacles_edges.T[None, :, :] * x_min[:, None, :],
|
||||
),
|
||||
axis=-2,
|
||||
)
|
||||
|
||||
data.min_dist = np.amin(linalg.norm(data.distance_vectors, axis=1), axis=-2)
|
||||
data.min_dist[data.collision_status] = 0
|
||||
|
||||
data.x_min = x_min
|
||||
data.speeds = speeds[:, :, None]
|
||||
|
||||
return self._pack_collision_data(data)
|
||||
|
||||
return frame.data_on_collisions
|
||||
|
||||
def _pack_collision_data(self, data: CollisionData) -> Dict[str, CollisionData]:
|
||||
"""Packs collision data dictionary created by :meth:`_collision_eval` into a dictionary.
|
||||
|
||||
Done for each :class:`~.obstacle.Obstacle` allows for obstacle specific querying.
|
||||
|
||||
Args:
|
||||
data: Dictionary of collision data created by :meth:`_collision_eval`
|
||||
|
||||
Returns:
|
||||
Dictionary with obstacle IDs as keys and the :class:`~.holders.CollisionData` as values
|
||||
"""
|
||||
|
||||
cumulative_points = np.cumsum([0] + self._points_per_obstacle)
|
||||
indices = [
|
||||
(cumulative_points[i], cumulative_points[i + 1]) for i in range(len(self._points_per_obstacle))
|
||||
]
|
||||
|
||||
return_val: Dict[str, CollisionData] = {}
|
||||
|
||||
for obstacle_iter, ind in zip(self._obstacles.keys(), indices):
|
||||
collision_iter: CollisionData = CollisionData()
|
||||
for attr in fields(collision_iter):
|
||||
data_value_of_attr: np.ndarray = getattr(data, attr.name)
|
||||
if data_value_of_attr.shape[-1] > 1:
|
||||
setattr(collision_iter, attr.name, data_value_of_attr.T[ind[0] : ind[1]].T) # noqa: E203
|
||||
else:
|
||||
setattr(collision_iter, attr.name, data_value_of_attr.T[0].T)
|
||||
return_val[obstacle_iter] = collision_iter
|
||||
|
||||
return return_val
|
||||
|
||||
def obstacle_time_projection(
|
||||
self, times: NDArray[np.floating], speeds: NDArray[np.floating]
|
||||
) -> Tuple[NDArray[np.floating], NDArray[np.floating]]:
|
||||
"""Projects saved obstacles in their predicted locations at given times.
|
||||
|
||||
Also calculates their vectorized relative speed on route segments relative to each obstacles. For
|
||||
the following dimensional information let x denote the cumulated sum of all unique points of all
|
||||
obstacles currently stored in the cost model.
|
||||
|
||||
Args:
|
||||
times: Time constraints of route with shape ``(number of time points, )``
|
||||
speeds: speeds of route ``()``
|
||||
|
||||
Return: A tuple of ``(points, speeds)`` containing a tensor of projected obstacle coordinates at
|
||||
each time point of the route``points`` , has dimension ``(number of time points, x, 2)``,
|
||||
and ``speeds`` has dimension ``(number of time points, 2, number of obstacles stored, 2)``.
|
||||
|
||||
"""
|
||||
|
||||
if len(self._obstacles_points.T.shape) == 1:
|
||||
assert len(self._obstacles_speeds.T.shape) == 1, (self._obstacles_points, self._obstacles_speeds)
|
||||
# in the case of no obstacles numpy implicitly cast dimensions resulting in an error
|
||||
|
||||
return np.array([[[]]], dtype=np.float64), np.array([[[]]], dtype=np.float64)
|
||||
|
||||
assert len(self._obstacles_speeds.T.shape) == 2, self._obstacles_speeds.shape
|
||||
assert len(self._obstacles_points.T.shape) == 2, self._obstacles_points.shape
|
||||
|
||||
projected_points = (
|
||||
self._obstacles_points.T[None, :, :] + self._obstacles_speeds.T[None, :, :] * times[:, None, None]
|
||||
)
|
||||
#print("+"*10)
|
||||
#print(times)
|
||||
#print(speeds)
|
||||
#print("#"*10)
|
||||
relative_speeds = speeds[:, :, None] - self._obstacles_speeds.T[None, :, :]
|
||||
return projected_points, relative_speeds
|
||||
|
||||
# ---- Gradient Calculation
|
||||
|
||||
def gradients( # pylint: disable=too-many-locals
|
||||
self, frame_s: Union[TimingFrame, List[TimingFrame]]
|
||||
) -> Union[Tuple[List[float], List[NDArray[np.floating]]], Tuple[float, NDArray[np.floating]]]:
|
||||
"""Calculates the route gradients of the route with respect to full route cost.
|
||||
|
||||
Arguments:
|
||||
frame_s: frame(s) to calculate gradients from
|
||||
|
||||
Returns:
|
||||
Tuple of cost of taking :class:`~.timing_frame.TimingFrame`\\ (s) and cost derivative of
|
||||
:class:`~.timing_frame.TimingFrame`\\ (s)
|
||||
"""
|
||||
if not hasattr(frame_s, "__iter__"):
|
||||
frames = [cast(TimingFrame, frame_s)]
|
||||
else:
|
||||
frames = cast(List[TimingFrame], frame_s)
|
||||
|
||||
evaluated_frames: List[EvaluatedTimingFrame] = []
|
||||
for frame in frames:
|
||||
frame.update_times(self._model)
|
||||
evaluated, _ = self.evaluate(frame)
|
||||
evaluated_frames.append(evaluated)
|
||||
|
||||
if not self._obstacles or len(self._obstacles.values()) == 0:
|
||||
cost_return, grad_return = [r.cost for r in frames], [r.cost_grad() for r in frames]
|
||||
|
||||
if len(evaluated_frames) == 1:
|
||||
assert len(grad_return) == 1
|
||||
return cost_return[0], grad_return[0]
|
||||
return cost_return, grad_return
|
||||
|
||||
costs_array, obs_gradients = self._calc_gradients(evaluated_frames)
|
||||
costs = [c + r.cost for c, r in zip(costs_array, evaluated_frames)]
|
||||
gradients = []
|
||||
for frame, (grad_subs, grad_times, grad_speeds) in zip(evaluated_frames, obs_gradients):
|
||||
assert not np.isnan(grad_times).any(), grad_times
|
||||
gradients.append(
|
||||
frame.cost_grad(
|
||||
other_cost_dtimes=grad_times, dcost_dspeed=grad_speeds, dcost_dpoints_ext=grad_subs
|
||||
)
|
||||
)
|
||||
|
||||
if len(evaluated_frames) == 1:
|
||||
assert len(gradients) == 1
|
||||
return costs[0], gradients[0]
|
||||
return costs, gradients
|
||||
|
||||
def _calc_gradients(
|
||||
self, frames: List[EvaluatedTimingFrame]
|
||||
) -> Tuple[NDArray[np.floating], Iterable[TimingFrameGradients]]:
|
||||
"""Calculates cost derivatives in respect to several aspects of a list of frames
|
||||
|
||||
Args:
|
||||
frames: frames to calculate Gradients
|
||||
|
||||
Returns: Tuple of cumulative cost and an iterable that yields a tuple of gradients. The first
|
||||
entry in the gradient tuple describes the route gradient w.r.t. the locations of the stored
|
||||
obstacles. The second entry describes the gradient w.r.t the simulated times and the third
|
||||
gradient is calculated w.r.t. the obstacle speeds under an assumption of constant speed. Each
|
||||
gradient should've the shape ``(number of route segments, 2)``. The length of the iterable
|
||||
is the same as the given list of frames.
|
||||
"""
|
||||
|
||||
# pylint: disable-msg=too-many-locals
|
||||
# this is ok due to it being already split into several hierarchical methods and i see no way to
|
||||
# reduce local variables without butchering visibility
|
||||
# shorten it
|
||||
|
||||
# evaluate the attributes at the lowest distance, needed for gradient calculation
|
||||
array_shape = (len(frames), len(self._obstacles))
|
||||
time_seg, obst_seg, min_dist, grad_distances, t_min, cost = [np.zeros(array_shape) for _ in range(6)]
|
||||
speeds, dist_vect = [np.zeros((*array_shape, 2)) for _ in range(2)]
|
||||
for i, frame in enumerate(frames):
|
||||
|
||||
for j, (obstacle_key, obstacle) in enumerate(self._obstacles.items()):
|
||||
data = frame.data_on_collisions[obstacle_key]
|
||||
|
||||
# pylint: disable-msg=unbalanced-tuple-unpacking
|
||||
# this is safe due to how data is created
|
||||
frame_index, obstacle_index = np.unravel_index(
|
||||
np.argmin(data.min_dist, axis=None), data.min_dist.shape
|
||||
)
|
||||
min_dist[i, j] = data.min_dist[frame_index, obstacle_index]
|
||||
|
||||
assert not (np.isinf(min_dist).any()), data
|
||||
|
||||
t_min[i, j] = data.time_min[frame_index, obstacle_index]
|
||||
cost[i, j], grad_distances[i, j] = obstacle.distance_gradient(min_dist[i, j])
|
||||
|
||||
# if obstacle.soft and min_dist[i, j] == 0:
|
||||
# TO (BJK): add soft obstacles (this is a leftover from old code)
|
||||
# cost[i, j] += obstacle.duration_cost.cost(self._duration_eval(route, obstacle))
|
||||
# grad_distances[i, j] = 0
|
||||
|
||||
if min_dist[i, j] == 0:
|
||||
min_dist[i, j] = 1e-3
|
||||
time_seg[i, j] = frame_index
|
||||
obst_seg[i, j] = obstacle_index
|
||||
|
||||
speeds[i, j, :] = data.speeds[frame_index, :]
|
||||
|
||||
assert data.distance_vectors.shape[2] == 2, "case not handled"
|
||||
|
||||
distance_index = data.distance_vectors[frame_index, :, :, obstacle_index]
|
||||
dist_vect[i, j, :] = min(distance_index[:, 0], distance_index[:, 1], key=linalg.norm)
|
||||
|
||||
# chain rule for derivation
|
||||
assert not (np.isnan(grad_distances).any()), grad_distances
|
||||
assert not (np.isnan(min_dist).any()), min_dist
|
||||
assert not (np.isnan(dist_vect).any()), dist_vect
|
||||
|
||||
dcost_d_distvec = np.nan_to_num((grad_distances / min_dist))[:, :, None] * dist_vect
|
||||
|
||||
assert not (np.isnan(dcost_d_distvec).any()), min_dist
|
||||
|
||||
dcost_d_deltapoints = dcost_d_distvec
|
||||
dcost_d_speed = dcost_d_distvec * t_min[:, :, None]
|
||||
dcost_d_times = np.sum(dcost_d_deltapoints * (-speeds), axis=-1)
|
||||
|
||||
assert not (np.isnan(dcost_d_times).any()), (speeds, grad_distances, min_dist, dist_vect)
|
||||
|
||||
return (
|
||||
cast(NDArray[np.floating], cost.sum(axis=1)),
|
||||
self._split_grads((dcost_d_deltapoints, dcost_d_times, dcost_d_speed), time_seg, frames),
|
||||
)
|
||||
|
||||
def _split_grads(
|
||||
self,
|
||||
gradients: Tuple[NDArray[np.floating], NDArray[np.floating], NDArray[np.floating]],
|
||||
time_seg: NDArray[np.floating],
|
||||
frames: List[EvaluatedTimingFrame],
|
||||
) -> Generator[TimingFrameGradients, None, None]:
|
||||
"""Split combined gradients into separate gradients for each different routes.
|
||||
|
||||
Args:
|
||||
gradients: gradients (grad points, grad times, grad velocity) each gradient of the shape
|
||||
``(number of obstacles, number of route segments, 2)``
|
||||
time_seg: indices of minimal distance time segment
|
||||
frames: frames of the gradients
|
||||
|
||||
Yields: Gradients split per route as a Tuple. The first entry in the gradient tuple describes the
|
||||
route gradient w.r.t. the locations of the stored obstacles. The second entry describes the
|
||||
gradient w.r.t the simulated times and the third gradient is calculated w.r.t. the obstacle
|
||||
speeds under an assumption of constant speed. Each gradient should've the shape
|
||||
``(number of route segments, 2)``
|
||||
"""
|
||||
for i, frame in enumerate(frames):
|
||||
frame_length = frame.times.shape[0]
|
||||
|
||||
grad_subs, grad_times, grad_speeds = (
|
||||
np.zeros((frame_length - 1, 2)),
|
||||
np.zeros(frame_length),
|
||||
np.zeros((frame_length, 2)),
|
||||
)
|
||||
for j, _ in enumerate(self._obstacles):
|
||||
# (leftovers from old code may safe time if soft obstacles to be added)
|
||||
# if False:
|
||||
# if obst.soft and frame_index.data_on_collision[on].any():
|
||||
# g_subs, g_times, g_speeds = self.duration_grad(frame_index, on, obst)
|
||||
# grad_times += g_times
|
||||
# grad_speeds += g_speeds
|
||||
# grad_subs += g_subs
|
||||
# else:
|
||||
|
||||
time_segment = int(time_seg[i, j])
|
||||
if time_segment:
|
||||
grad_subs[time_segment - 1] += gradients[0][i, j, :]
|
||||
grad_times[time_segment] += gradients[1][i, j]
|
||||
grad_speeds[time_segment, :] += gradients[2][i, j, :]
|
||||
|
||||
yield (
|
||||
cast(NDArray[np.floating], grad_subs),
|
||||
cast(NDArray[np.floating], grad_times),
|
||||
cast(NDArray[np.floating], grad_speeds),
|
||||
)
|
163
pyrate/pyrate/plan/nearplanner/evaluated_timing_frame.py
Normal file
163
pyrate/pyrate/plan/nearplanner/evaluated_timing_frame.py
Normal file
@ -0,0 +1,163 @@
|
||||
"""Contains evaluated timing frames."""
|
||||
|
||||
# Static Typing
|
||||
from typing import cast
|
||||
from typing import Dict
|
||||
from typing import Optional
|
||||
from typing import Set
|
||||
from typing import Tuple
|
||||
from typing import Union
|
||||
|
||||
import numpy.typing as npt
|
||||
|
||||
# Scientific Computing
|
||||
import numpy as np
|
||||
|
||||
# Geometry
|
||||
from pyrate.plan.geometry.location import CartesianLocation
|
||||
from pyrate.plan.geometry.route import CartesianRoute
|
||||
|
||||
from .holders import CollisionData
|
||||
from .timing_frame import TimingFrame
|
||||
|
||||
|
||||
class EvaluatedTimingFrame(TimingFrame):
|
||||
"""Class used as a wrapper for evaluated :class:`~.timing_frame.TimingFrame`.
|
||||
|
||||
It is guaranteed that instances of this class posses a :attr:`data_on_collision` Evaluation.
|
||||
For :class:`~.timing_frame.TimingFrame` instances upgraded by a CostModel simulated times are also
|
||||
guaranteed.
|
||||
|
||||
Args:
|
||||
route: route simulated by the :class:`EvaluatedTimingFrame`
|
||||
data_on_collisions: data on possible collision to encapsulate
|
||||
start_time: start time to start route at
|
||||
|
||||
Attributes:
|
||||
actual_cost: accumulated cost of time taken, distance to obstacles and their speed
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self, route: CartesianRoute, data_on_collisions: Dict[str, CollisionData], start_time: float = 0
|
||||
) -> None:
|
||||
self._data_on_collisions = data_on_collisions
|
||||
self._revoked = False
|
||||
self.actual_cost = np.float64(0.0)
|
||||
super().__init__(route, start_time)
|
||||
|
||||
def __str__(self) -> str:
|
||||
return f"Evaluated{super().__str__()}"
|
||||
|
||||
def revoke(self, new_position: CartesianLocation) -> TimingFrame:
|
||||
"""Revokes the `~.timing_frame.TimingFrame`.
|
||||
|
||||
This results in this instance becoming invalid
|
||||
and returns a new non evaluated :class:`~.timing_frame.TimingFrame` with adapted first point.
|
||||
|
||||
Args:
|
||||
new_position: new first position of the revoked :class:`~.timing_frame.TimingFrame`
|
||||
|
||||
Returns:
|
||||
stripped :class:`~.timing_frame.TimingFrame`
|
||||
"""
|
||||
self.position = new_position
|
||||
new_frame: TimingFrame = TimingFrame(self.route, start_time=self.start_time)
|
||||
self._revoked = True
|
||||
return new_frame
|
||||
|
||||
@property
|
||||
def valid(self) -> bool:
|
||||
"""True iff :class:`~.timing_frame.TimingFrame` was already evaluated and no collision was found"""
|
||||
if self._data_on_collisions is not None and not self._revoked:
|
||||
return not any(o.collision_status.any() for k, o in self._data_on_collisions.items())
|
||||
return False
|
||||
|
||||
@property
|
||||
def imminent_collision(self) -> Tuple[str, float]:
|
||||
"""Collision time for imminent collisions
|
||||
|
||||
Raises:
|
||||
AssertionError: If attribute is called before TimingFrame has been evaluated
|
||||
|
||||
Return:
|
||||
tuple of the obstacle id and time of the next collision
|
||||
"""
|
||||
assert self.data_on_collisions is not None
|
||||
temp = {}
|
||||
|
||||
for key, obstacle_data in self.data_on_collisions.items():
|
||||
if not obstacle_data.collision_status.any():
|
||||
temp[key] = [np.inf]
|
||||
else:
|
||||
temp[key] = (obstacle_data.time_collision + self._times[1:, None])[
|
||||
obstacle_data.collision_status
|
||||
].min()
|
||||
|
||||
return cast(Tuple[str, float], temp.get(cast(str, min(temp.items(), key=lambda x: x[1]))))
|
||||
|
||||
@property
|
||||
def collision_times(self) -> Dict[str, float]:
|
||||
"""Times for collisions with each obstacle
|
||||
|
||||
Raises:
|
||||
AssertionError: If attribute is called before TimingFrame hasn't yet been evaluated
|
||||
|
||||
Return:
|
||||
dictionary that maps an obstacle id to the time of collision if route is taken
|
||||
"""
|
||||
assert self.data_on_collisions is not None
|
||||
temp = {}
|
||||
for key, obstacle_data in self.data_on_collisions.items():
|
||||
if not obstacle_data.collision_status.any():
|
||||
temp[key] = np.inf
|
||||
else:
|
||||
temp[key] = (obstacle_data.time_collision + self._times[:-1, None])[
|
||||
obstacle_data.collision_status
|
||||
].min()
|
||||
return cast(Dict[str, float], temp)
|
||||
|
||||
@property
|
||||
def collision_segment(self) -> Dict[str, Optional[npt.NDArray[np.floating]]]:
|
||||
"""Segments where collision happens as a dictionary with obstacle ids as key.
|
||||
|
||||
Raises:
|
||||
AssertionError: If attribute is called before TimingFrame hasn't yet been evaluated
|
||||
|
||||
Return:
|
||||
Dictionary mapping obstacle IDs to index of route segment where collision is predicted
|
||||
"""
|
||||
assert self.data_on_collisions is not None
|
||||
temp: Dict[str, Optional[npt.NDArray[np.floating]]] = {}
|
||||
for key, obstacle_data in self.data_on_collisions.items():
|
||||
if not obstacle_data.collision_status.any():
|
||||
temp[key] = None
|
||||
else:
|
||||
temp[key] = np.argmax(obstacle_data.collision_status.sum(axis=1)) # type: ignore
|
||||
return temp
|
||||
|
||||
@property
|
||||
def minimal_dist(
|
||||
self,
|
||||
) -> Union[Set[np.floating], Dict[str, Tuple[np.floating, Optional[npt.NDArray[np.floating]]]]]:
|
||||
"""Dictionary of minimal distances per obstacle ID.
|
||||
|
||||
Return:
|
||||
If not yet evaluated ``{0}``, else a mapping from obstacle ID to ``(distance_x, distance_y)``.
|
||||
"""
|
||||
if self.data_on_collisions is None:
|
||||
return {np.float64(0.0)}
|
||||
temp: Dict[str, Tuple[np.floating, Optional[npt.NDArray[np.floating]]]] = {}
|
||||
for key, obstacle_data in self.data_on_collisions.items():
|
||||
if obstacle_data.collision_status.any():
|
||||
temp[key] = np.float64(0.0), None
|
||||
else:
|
||||
temp[key] = (
|
||||
np.float64(obstacle_data.min_dist.min()),
|
||||
np.argmin(obstacle_data.min_dist.min(axis=1)).astype(np.float64),
|
||||
) # type: ignore
|
||||
return temp
|
||||
|
||||
@property
|
||||
def data_on_collisions(self) -> Dict[str, CollisionData]:
|
||||
"""Information about collisions with each obstacle, index by obstacle keys."""
|
||||
return self._data_on_collisions
|
17
pyrate/pyrate/plan/nearplanner/exceptions.py
Normal file
17
pyrate/pyrate/plan/nearplanner/exceptions.py
Normal file
@ -0,0 +1,17 @@
|
||||
"""This module defines exceptions for use in the near-planning backend."""
|
||||
|
||||
|
||||
class PlanningError(Exception):
|
||||
"""Base Class for all exceptions throw during route planning"""
|
||||
|
||||
|
||||
class NoRouteFoundException(PlanningError):
|
||||
"""Exception thrown if route discovery failed
|
||||
|
||||
Args:
|
||||
message: message to display
|
||||
"""
|
||||
|
||||
def __init__(self, message: str):
|
||||
self.message = message
|
||||
super().__init__(message)
|
92
pyrate/pyrate/plan/nearplanner/holders.py
Normal file
92
pyrate/pyrate/plan/nearplanner/holders.py
Normal file
@ -0,0 +1,92 @@
|
||||
"""collection of dataclasses used in the planning backend and node-service of the near-planner node"""
|
||||
|
||||
# Dataclass Support
|
||||
from dataclasses import dataclass
|
||||
|
||||
from typing import Optional
|
||||
|
||||
# Scientific Computing
|
||||
import numpy as np
|
||||
|
||||
# Static Typing
|
||||
import numpy.typing as npt
|
||||
|
||||
|
||||
@dataclass
|
||||
class CollisionData:
|
||||
"""Holds information on possible future collisions or collision avoidance with a single obstacle.
|
||||
|
||||
The stored information is not intended to be directly read from this dataclass, but to be stored
|
||||
and post-processed internally in an :class:`~planning_backend.timing_frame.EvaluatedTimingFrame`
|
||||
and corresponding information be read through its properties.
|
||||
"""
|
||||
|
||||
collision_status: npt.NDArray[np.bool_] = np.array([], dtype=np.bool_)
|
||||
"""array, whose entries describe if a segment i of a frame has a collision with an obstacle """
|
||||
min_dist: npt.NDArray[np.floating] = np.array([], dtype=np.float64)
|
||||
"""array, whose entries describe the minimal distances at each time steps to the obstacle"""
|
||||
normal_distances: npt.NDArray[np.floating] = np.array([], dtype=np.float64)
|
||||
"""array, whose entries describe the normal distances of obstacle towards the boat on each segment"""
|
||||
normal_vectors: npt.NDArray[np.floating] = np.array([], dtype=np.float64)
|
||||
"""array, whose entries describe the normal vectors of each obstacle at each time step"""
|
||||
time_collision: npt.NDArray[np.floating] = np.array([], dtype=np.float64)
|
||||
"""array, whose entries describe the time of a future collision with an object"""
|
||||
time_min: npt.NDArray[np.floating] = np.array([], dtype=np.float64)
|
||||
"""array, whose entries describe the time of the first collision"""
|
||||
x_min: npt.NDArray[np.floating] = np.array([], dtype=np.float64)
|
||||
"""array, whose entries describe the place of the first collision"""
|
||||
delta_speed: npt.NDArray[np.floating] = np.array([], dtype=np.float64)
|
||||
"""array, describing the relative speed of route segments towards the obstacle"""
|
||||
delta_points: npt.NDArray[np.floating] = np.array([], dtype=np.float64)
|
||||
"""array, describing relative position of route towards obstacle on each time step"""
|
||||
speeds: npt.NDArray[np.floating] = np.array([], dtype=np.float64)
|
||||
"""array, describing relative speeds towards obstacle"""
|
||||
distance_vectors: npt.NDArray[np.floating] = np.array([], dtype=np.float64)
|
||||
"""array, describing the vectorized distances towards the given obstacle"""
|
||||
|
||||
|
||||
@dataclass
|
||||
class EstimationParameters:
|
||||
"""Holds estimation parameters for finding first initial routes.
|
||||
|
||||
This class is mainly used by instances of :class:`~.optimizer.Optimizer` while searching an initial guess
|
||||
for a valid :class:`~.timing_frame.TimingFrame` that later can be optimized.
|
||||
"""
|
||||
|
||||
first_try_construction: bool = True
|
||||
"""if first to try deterministic construction by obstacle hulls"""
|
||||
|
||||
max_len_relative: float = 0.1
|
||||
"""Maximum fraction of the length of a new edge to explore, relative to the distance to the goal"""
|
||||
max_count: int = 300
|
||||
"""maximal number of potential nodes are explored before resulting in a failure"""
|
||||
p_goal: float = 0.2
|
||||
"""probability the goal is chosen as the next node"""
|
||||
|
||||
|
||||
@dataclass
|
||||
class OptimizationParameters:
|
||||
"""Holds parameters for running :class:`~.optimizer.Optimizer`."""
|
||||
|
||||
estimation_parameters: EstimationParameters
|
||||
"""parameters used for initial exploration"""
|
||||
n_samples: int = 25
|
||||
"""maximum number of initial samples that will be selected, optimized and the results compared"""
|
||||
inital_stepsize: float = 0.1
|
||||
"""fraction of the direct distance to goal which will be used as the initial step size for optimization"""
|
||||
n_iter_grad: int = 500
|
||||
"""maximum number of steps used by the gradient descent algorithm"""
|
||||
n_break: int = 10
|
||||
"""maximum number of initial samples which will be selected to be optimized and compared"""
|
||||
adaptive_step_size: bool = True
|
||||
"""if to use adaptive step size during gradient descent"""
|
||||
prune: bool = True
|
||||
"""if to prune the resulting routes"""
|
||||
verbose: bool = False
|
||||
"""if to print additional information"""
|
||||
overwrite_grad_lim: Optional[float] = None
|
||||
"""optional parameter to overwrite to cut of the gradient at each optimization step"""
|
||||
adaptive_data_rate_penalty_on_loss_sign_change: float = 2.5
|
||||
"""penalty applied to lossful sign changes during adaptive step size optimization"""
|
||||
adaptive_data_rate_general_regulization_factor: float = 1.5
|
||||
"""regularization positive and negative used to adapt step size based on whether cost in/decreases"""
|
137
pyrate/pyrate/plan/nearplanner/obstacle.py
Normal file
137
pyrate/pyrate/plan/nearplanner/obstacle.py
Normal file
@ -0,0 +1,137 @@
|
||||
"""
|
||||
code for the Obstacle
|
||||
"""
|
||||
|
||||
# Static Typing
|
||||
from typing import cast
|
||||
from typing import Tuple
|
||||
|
||||
from numpy.typing import NDArray
|
||||
|
||||
# Scientific Computing
|
||||
import numpy as np
|
||||
|
||||
from scipy import linalg
|
||||
|
||||
# Geometry
|
||||
from pyrate.plan.geometry.polygon import CartesianPolygon
|
||||
|
||||
from .cost_functions import CostFunction
|
||||
from .cost_functions import CostFunctionLinear
|
||||
|
||||
class Obstacle:
|
||||
"""An obstacle with a polygon shape and vector speed.
|
||||
|
||||
Wrapper class around a :class:`~pyrate.plan.geometry.polygon.CartesianPolygon`. Assumes constant speed.
|
||||
|
||||
Args:
|
||||
polygon: Polygon shape obstacle shall be based on
|
||||
speed: 2D speed vector describing the obstacle's movement in ``(east, north)`` direction.
|
||||
cost_function: The cost function that shall apply to this :class:`~.obstacle.Obstacle`
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
polygon: CartesianPolygon,
|
||||
speed: NDArray[np.floating],
|
||||
cost_function: CostFunction = CostFunctionLinear(),
|
||||
) -> None:
|
||||
self.shape = polygon
|
||||
self.speed = speed if speed is not None else np.zeros(2)
|
||||
self.cost_function = cost_function
|
||||
|
||||
def __call__(self, time: float) -> "Obstacle":
|
||||
"""Transposes Obstacle to a given time.
|
||||
|
||||
Args:
|
||||
time: time to transpose to
|
||||
|
||||
Returns:
|
||||
A copy of this object transposed to given time
|
||||
"""
|
||||
projection = self.project_at_time(time)
|
||||
cartesian_poly = CartesianPolygon.from_numpy(projection)
|
||||
return Obstacle(cartesian_poly, self.speed, cost_function=self.cost_function)
|
||||
|
||||
def distance_gradient(self, min_dist: float) -> Tuple[np.floating, np.floating]:
|
||||
"""Calculates the distance gradient.
|
||||
|
||||
Args:
|
||||
min_dist: minimal distance to calculate distance gradient from
|
||||
|
||||
Returns:
|
||||
derivative of cost with regards to minimal distance
|
||||
"""
|
||||
return self.cost_function.cost_grad(min_dist)
|
||||
|
||||
def cost(self, min_distance: float) -> np.floating:
|
||||
"""Calculates the cost of the obstacle in respect to the minimal distance.
|
||||
|
||||
Args:
|
||||
min_distance: minimal distance to base the cost of the obstacle on
|
||||
|
||||
Returns:
|
||||
cost of taking a route in the minimal distance of this obstacle
|
||||
"""
|
||||
return self.cost_function.cost(min_distance)
|
||||
|
||||
def project_at_time(self, time: float) -> NDArray[np.floating]:
|
||||
"""Projects the obstacle shape at a given time.
|
||||
|
||||
Args:
|
||||
time: time to project the obstacle shape to
|
||||
|
||||
Returns:
|
||||
numpy.ndarray with coordinates of the projected shape
|
||||
"""
|
||||
return cast(NDArray[np.floating], self.shape.to_numpy() + self.speed * time)
|
||||
|
||||
@property
|
||||
def unique_points(self) -> NDArray[np.floating]:
|
||||
"""only unique points of polygon shape
|
||||
|
||||
Returns:
|
||||
numpy-array of unique points that make up the polygon
|
||||
"""
|
||||
return cast(NDArray[np.floating], self.shape.to_numpy()[:-1, :])
|
||||
|
||||
@property
|
||||
def edges(self) -> NDArray[np.floating]:
|
||||
"""method for getting edges in a numpy array
|
||||
|
||||
Returns:
|
||||
numpy-array of polygon edges
|
||||
"""
|
||||
temp = self.shape.to_numpy()
|
||||
return cast(NDArray[np.floating], (np.concatenate((temp[1:, :], temp[:1, :])) - temp)[:-1, :])
|
||||
|
||||
@property
|
||||
def squared_edges_length(self) -> NDArray[np.floating]:
|
||||
"""method for getting the squared length of polygon edges in a numpy array
|
||||
|
||||
Returns:
|
||||
numpy-array of squared lengths of polygon edges
|
||||
"""
|
||||
return cast(NDArray[np.floating], linalg.norm(self.edges, axis=1) ** 2)
|
||||
|
||||
@property
|
||||
def edges_length(self) -> NDArray[np.floating]:
|
||||
"""method for getting edges length in a numpy array
|
||||
|
||||
Returns:
|
||||
numpy-array of lengths of polygon edges
|
||||
"""
|
||||
return cast(NDArray[np.floating], linalg.norm(self.edges, axis=1))
|
||||
|
||||
@property
|
||||
def inner_normals(self) -> NDArray[np.floating]:
|
||||
"""method for getting normals in numpy-array
|
||||
|
||||
Returns:
|
||||
numpy-array of edge normals
|
||||
"""
|
||||
diffs = self.edges
|
||||
normals = np.concatenate((diffs[:, 1:2], -diffs[:, 0:1]), axis=1)
|
||||
with np.errstate(divide="ignore"):
|
||||
_ = cast(NDArray[np.floating], normals / linalg.norm(normals, axis=1, keepdims=True))
|
||||
return _
|
737
pyrate/pyrate/plan/nearplanner/optimizer.py
Normal file
737
pyrate/pyrate/plan/nearplanner/optimizer.py
Normal file
@ -0,0 +1,737 @@
|
||||
"""
|
||||
optimizer code for use in the near-planner ros-node
|
||||
"""
|
||||
|
||||
# Dataclass Support
|
||||
from dataclasses import dataclass
|
||||
from typing import Dict
|
||||
from typing import List
|
||||
from typing import Optional
|
||||
from typing import Tuple
|
||||
from typing import Union
|
||||
|
||||
# Static Typing
|
||||
from typing import cast
|
||||
|
||||
# Numerical Computing
|
||||
import numpy as np
|
||||
import shapely
|
||||
import shapely.ops
|
||||
from numpy import typing as npt
|
||||
|
||||
# Geometry
|
||||
from pyrate.plan.geometry.route import CartesianRoute
|
||||
from scipy import linalg
|
||||
from scipy.special import softmax
|
||||
from shapely.geometry import Point
|
||||
|
||||
from . import utils
|
||||
from .cost_model import CostModel
|
||||
from .evaluated_timing_frame import EvaluatedTimingFrame
|
||||
from .exceptions import NoRouteFoundException
|
||||
from .holders import EstimationParameters
|
||||
from .holders import OptimizationParameters
|
||||
from .obstacle import Obstacle
|
||||
from .polar_model import PolarModel
|
||||
from .timing_frame import TimingFrame
|
||||
|
||||
|
||||
class Optimizer:
|
||||
"""Debug class for interactive interpreter.
|
||||
|
||||
Object for initializing and optimizing (near) routes / planning based on obstacles and polar diagrams.
|
||||
Provides optimization regarding sailing time and obstacle danger, which is evaluated
|
||||
via cost functions.
|
||||
|
||||
Args:
|
||||
wind_information: optional current tuple of ``(angle, speed)``, in (rad, strength in m/s)
|
||||
obstacles: optional dict mapping obstacle keys to obstacle objects
|
||||
heading: optional current heading angle of the boat, in radians, right hand from true north
|
||||
position: optional current position of the sailboat, in ``(east, north)``/``(x, y)`` coordinates
|
||||
"""
|
||||
|
||||
def __init__( # pylint: disable=R0913
|
||||
self,
|
||||
position: Optional[Point] = None,
|
||||
obstacles: Optional[Dict[str, Obstacle]] = None,
|
||||
heading: Optional[npt.NDArray[np.floating]] = None,
|
||||
wind_information: Optional[Tuple[float, float]] = None,
|
||||
) -> None:
|
||||
# environment variables
|
||||
wind_angle, wind_speed = wind_information if wind_information is not None else (0, 0)
|
||||
|
||||
self._wind_angle: np.floating = np.float64(wind_angle)
|
||||
self._wind_speed: np.floating = np.float64(wind_speed)
|
||||
|
||||
self._grad_lim: np.floating = np.float32(5.0)
|
||||
|
||||
# boat model and goal
|
||||
self.boat_polar: PolarModel = PolarModel(wind_speed, wind_angle)
|
||||
|
||||
# obstacles
|
||||
self._obstacles: Dict[str, Obstacle] = obstacles or {}
|
||||
|
||||
self.cost_model: CostModel = CostModel(self._obstacles, self.boat_polar)
|
||||
|
||||
# boat position
|
||||
self._heading: Optional[npt.NDArray[np.floating]] = heading
|
||||
|
||||
self.heading_dir: Optional[npt.NDArray[np.floating]] = np.array([1, 0])
|
||||
if heading is not None:
|
||||
self.heading_dir = np.array([np.cos(heading), np.sin(heading)])
|
||||
|
||||
self._position: Point = position if position is not None else Point(0, 0)
|
||||
|
||||
self._goal: Optional[Point] = None
|
||||
|
||||
def __str__(self):
|
||||
return f"Optimizer(wind=({self._wind_angle},{self._wind_speed}), position={self._position}, goal={self._goal}, obs={self._obstacles.items()})"
|
||||
|
||||
# ---- UPDATE INTERFACES ---- #
|
||||
@property
|
||||
def position(self) -> Point:
|
||||
"""Position of the boat held by the optimizer"""
|
||||
return self._position
|
||||
|
||||
@position.setter
|
||||
def position(self, value: Point) -> None:
|
||||
self._position = value
|
||||
|
||||
@property
|
||||
def goal(self) -> Optional[Point]:
|
||||
"""Current sub goal we have to create a route to"""
|
||||
return self._goal
|
||||
|
||||
@goal.setter
|
||||
def goal(self, value: Point) -> None:
|
||||
self._goal = value
|
||||
|
||||
@property
|
||||
def heading(self) -> Optional[npt.NDArray[np.floating]]:
|
||||
"""Current heading of the optimizer"""
|
||||
return self._heading
|
||||
|
||||
@heading.setter
|
||||
def heading(self, value: Point) -> None:
|
||||
self._heading = value
|
||||
self.heading_dir = np.array([np.cos(value), np.sin(value)])
|
||||
|
||||
def on_added_obstacles(self, obstacles: Dict[str, Obstacle]) -> None:
|
||||
"""Event to call if obstacles added to simulation.
|
||||
|
||||
This event changes the underlying :class:`CostModel` to adapt to deleted obstacles.
|
||||
|
||||
Args:
|
||||
obstacles: obstacles, optionally keyed by ids, to add
|
||||
"""
|
||||
self.cost_model.add_obstacle(obstacles)
|
||||
|
||||
def on_reset_obstacles(self, obstacles: Dict[str, Obstacle]) -> None:
|
||||
"""Convenience Event to call if a drastic change in obstacle population has happened to simulation.
|
||||
|
||||
The same effects of this method can be achieved by separately calling :meth:`on_added_obstacles` and
|
||||
:meth:`on_deleted_obstacles`. As the behaviours of these methods overlap this method provides a more
|
||||
efficient, joint and easier way to deal with a reset or large movement in the stored obstacle
|
||||
population.
|
||||
|
||||
Args:
|
||||
obstacles: dictionary of obstacles to reset the stored obstacle population upon, keyed by
|
||||
obstacle ids
|
||||
"""
|
||||
self.cost_model.rebase_obstacles(obstacles)
|
||||
|
||||
def on_deleted_obstacles(self, obstacle_id_s: Union[str, List[str]]) -> Optional[List[Obstacle]]:
|
||||
"""Event to call to delete obstacles from simulation.
|
||||
|
||||
This event changes the underlying :class:`CostModel` to adapt to deleted obstacles.
|
||||
|
||||
Args:
|
||||
obstacle_id_s: obstacle, optionally keyed by ids, by ids to delete
|
||||
"""
|
||||
deleted_obs = self.cost_model.delete_obstacle(obstacle_id_s)
|
||||
return deleted_obs
|
||||
|
||||
@property
|
||||
def wind_angle(self) -> float:
|
||||
"""Wind angle to simulate"""
|
||||
return float(self._wind_angle)
|
||||
|
||||
@wind_angle.setter
|
||||
def wind_angle(self, value: float) -> None:
|
||||
self._wind_angle = np.float64(value)
|
||||
self.boat_polar.wind_direction = np.float64(value)
|
||||
|
||||
@property
|
||||
def wind_speed(self) -> float:
|
||||
"""Wind speed to simulate"""
|
||||
return float(self._wind_speed)
|
||||
|
||||
@wind_speed.setter
|
||||
def wind_speed(self, value: float) -> None:
|
||||
self._wind_speed = np.float64(value)
|
||||
self.boat_polar.wind_speed = np.float64(value)
|
||||
|
||||
# ---- OPTIMIZE LOGIC ---- #
|
||||
|
||||
def optimize( # noqa: C901
|
||||
self,
|
||||
goal: Point,
|
||||
optimization_parameters: OptimizationParameters,
|
||||
heading: Optional[npt.NDArray[np.floating]] = None,
|
||||
custom: bool = True,
|
||||
) -> Tuple[Optional[EvaluatedTimingFrame], Optional[List[EvaluatedTimingFrame]]]:
|
||||
"""Main optimize method of :class:`Optimizer`.
|
||||
|
||||
It uses different methods to determine an optimized
|
||||
:class:`~.evaluated_timing_frame.EvaluatedTimingFrame`
|
||||
from a given heading, goal and optimization parameters.
|
||||
|
||||
Args:
|
||||
goal: :class:`shapely.geometry.Point` specifying the route goal in local cartesian coordinates
|
||||
optimization_parameters: :class`~dataclasses.dataclass` holding parameters for optimization
|
||||
heading: :class:`np.ndarray` 2d vector describing the current heading of the boat
|
||||
custom: to use custom method (:meth:`optimize_gradient`)
|
||||
|
||||
Returns:
|
||||
optimized :class:`~.evaluated_timing_frame.EvaluatedTimingFrame` and an optional list of
|
||||
intermediate results produced during the optimization and selection algorithm
|
||||
|
||||
Raises:
|
||||
NoRouteFoundException: if no route was found during route discovery
|
||||
"""
|
||||
# pylint: disable-msg=too-many-locals
|
||||
|
||||
if goal is not None:
|
||||
self._goal = goal
|
||||
if heading is not None:
|
||||
self._heading = heading
|
||||
|
||||
assert self._goal is not None
|
||||
assert self.wind_angle is not None
|
||||
assert self.wind_speed is not None
|
||||
assert self.position is not None
|
||||
|
||||
# prepare initial guess
|
||||
init_frames: List[EvaluatedTimingFrame] = []
|
||||
final_frames: List[EvaluatedTimingFrame] = []
|
||||
costs = []
|
||||
construction_successful = False
|
||||
|
||||
if optimization_parameters.overwrite_grad_lim:
|
||||
self.grad_lim = optimization_parameters.overwrite_grad_lim
|
||||
# todo (BEN)
|
||||
|
||||
if optimization_parameters.estimation_parameters.first_try_construction:
|
||||
constructed_routes = self._construct_init(
|
||||
goal=self._goal, parameters=optimization_parameters.estimation_parameters
|
||||
)
|
||||
|
||||
# remove cycles
|
||||
constructed_routes = [f.remove_single_cycles() for f in constructed_routes]
|
||||
|
||||
# simulate
|
||||
_ = [f.update_times(boat_model=self.boat_polar) for f in constructed_routes]
|
||||
# evaluate
|
||||
init_frames = [self.cost_model.evaluate(f)[0] for f in constructed_routes]
|
||||
|
||||
# throw away invalid
|
||||
init_frames = list(filter(lambda frame: frame.valid, init_frames))
|
||||
|
||||
construction_successful = len(init_frames) != 0
|
||||
|
||||
if optimization_parameters.verbose:
|
||||
print(f"[C] Construction finished with {len(init_frames)} remaining")
|
||||
|
||||
if not construction_successful:
|
||||
for _ in range(optimization_parameters.n_samples):
|
||||
frame = self._sample_tree_init(
|
||||
self._goal, parameters=optimization_parameters.estimation_parameters
|
||||
)
|
||||
if frame is None:
|
||||
continue
|
||||
|
||||
frame = frame.remove_single_cycles()
|
||||
frame.update_times(boat_model=self.boat_polar)
|
||||
evaluated, _ = self.cost_model.evaluate(frame)
|
||||
|
||||
init_frames.append(evaluated)
|
||||
if len(init_frames) >= optimization_parameters.n_break:
|
||||
break
|
||||
|
||||
assert all(f.valid for f in init_frames)
|
||||
|
||||
if len(init_frames) == 0:
|
||||
raise NoRouteFoundException("route discovery failed")
|
||||
|
||||
# optimize
|
||||
log_grad = []
|
||||
|
||||
for frame in init_frames:
|
||||
if custom:
|
||||
optimized_frame, list_of_steps = self.optimize_gradient(
|
||||
frame,
|
||||
step_size_parameter=optimization_parameters.inital_stepsize
|
||||
* linalg.norm(self._goal - self.position),
|
||||
optimization_parameters=optimization_parameters, # TODO (BEN) here was something missing
|
||||
)
|
||||
log_grad.append(list_of_steps)
|
||||
else:
|
||||
raise NotImplementedError
|
||||
|
||||
optimized_frame = optimized_frame.remove_single_cycles() # todo execute sometimes in iteration?
|
||||
optimized_frame.update_times(boat_model=self.boat_polar)
|
||||
optimized_frame_evaluated, _ = self.cost_model.evaluate(optimized_frame)
|
||||
costs.append(optimized_frame_evaluated.actual_cost)
|
||||
final_frames.append(optimized_frame_evaluated)
|
||||
|
||||
if optimization_parameters.prune:
|
||||
for frame in final_frames:
|
||||
frame.prune(eps=0.5 / 180 * np.pi)
|
||||
frame.update_times(boat_model=self.boat_polar)
|
||||
frame, _ = self.cost_model.evaluate(frame)
|
||||
|
||||
# handle optimization and initialization failure
|
||||
if np.amin(costs) > 1e10:
|
||||
return None, None
|
||||
|
||||
# return TimingFrame with minimal cost and the attempts needed to reach it
|
||||
return final_frames[np.argmin(costs)], log_grad[np.argmin(costs)]
|
||||
|
||||
def optimize_gradient( # noqa: C901
|
||||
self,
|
||||
init_frame: EvaluatedTimingFrame,
|
||||
optimization_parameters: OptimizationParameters,
|
||||
step_size_parameter: float = 10.0,
|
||||
only_attempt: bool = False,
|
||||
) -> Tuple[EvaluatedTimingFrame, List[EvaluatedTimingFrame]]:
|
||||
"""Optimization routine for gradient based optimization
|
||||
|
||||
Args:
|
||||
only_attempt: if to assure computed route is valid
|
||||
init_frame: initial frame to optimized
|
||||
step_size_parameter: step size for gradient descend
|
||||
optimization_parameters: parameters for optimization bundled in a dataclass
|
||||
|
||||
Returns:
|
||||
optimized :class:`~.timing_frame.TimingFrame`
|
||||
"""
|
||||
# pylint: disable-msg=too-many-locals
|
||||
# pylint: disable-msg=too-many-statements
|
||||
step_size: npt.NDArray[np.floating] = step_size_parameter * np.ones(
|
||||
(init_frame.points.shape[0] - 2, 2)
|
||||
)
|
||||
|
||||
last_grad: Optional[npt.NDArray[np.floating]] = None
|
||||
last_subgoals: Optional[npt.NDArray[np.floating]] = None
|
||||
|
||||
cost_list = []
|
||||
result_list = [init_frame]
|
||||
frame = init_frame
|
||||
frame.update_times(self.boat_polar)
|
||||
|
||||
assert init_frame.valid or only_attempt
|
||||
|
||||
last_cost: Optional[np.floating] = None
|
||||
for i in range(optimization_parameters.n_iter_grad):
|
||||
cost, grad = cast(Tuple[np.floating, npt.NDArray[np.floating]], self.cost_model.gradients(frame))
|
||||
if optimization_parameters.verbose:
|
||||
print(
|
||||
f" [C] cost {cost} at stepsize {step_size} with gradient <{grad}> \n subgoals {last_subgoals}"
|
||||
)
|
||||
if self._heading is not None:
|
||||
assert self._heading is not None
|
||||
# # assert False, grad
|
||||
grad[0, :] = self._heading * np.clip(
|
||||
np.sum(self._heading * grad[0, :]),
|
||||
-10,
|
||||
min(10.0, linalg.norm((frame.points[1, :] - self.position) / step_size[0, :])),
|
||||
)
|
||||
frame.update_times(self.boat_polar)
|
||||
|
||||
frame_evaluated, _ = self.cost_model.evaluate(frame)
|
||||
# step size adaption:
|
||||
if optimization_parameters.adaptive_step_size:
|
||||
|
||||
if last_cost is not None:
|
||||
assert last_cost is not None
|
||||
if cost - last_cost > 1e-6 or not frame_evaluated.valid:
|
||||
result_list.append(frame)
|
||||
cost_list.append(cost)
|
||||
step_size /= optimization_parameters.adaptive_data_rate_general_regulization_factor
|
||||
assert last_grad is not None
|
||||
step_size[
|
||||
grad * last_grad < 0
|
||||
] /= optimization_parameters.adaptive_data_rate_penalty_on_loss_sign_change
|
||||
assert last_subgoals is not None and last_grad is not None
|
||||
new_subs = cast(
|
||||
npt.NDArray[np.floating],
|
||||
last_subgoals
|
||||
- np.concatenate(
|
||||
(
|
||||
np.zeros((1, 2)),
|
||||
step_size * np.clip(last_grad, -self._grad_lim, self._grad_lim),
|
||||
np.zeros((1, 2)),
|
||||
),
|
||||
axis=0,
|
||||
),
|
||||
)
|
||||
frame = TimingFrame( # type:ignore
|
||||
CartesianRoute.from_numpy(new_subs)
|
||||
)
|
||||
frame.update_times(self.boat_polar)
|
||||
# print(f" ->{last_cost}: {last_grad}")
|
||||
continue
|
||||
|
||||
assert last_grad is not None
|
||||
|
||||
step_size[
|
||||
grad * last_grad < 0
|
||||
] /= optimization_parameters.adaptive_data_rate_general_regulization_factor
|
||||
step_size[
|
||||
grad * last_grad > 0
|
||||
] *= optimization_parameters.adaptive_data_rate_general_regulization_factor
|
||||
|
||||
if (step_size < 1e-6).all():
|
||||
break
|
||||
last_cost = cost
|
||||
cost_list.append(cost)
|
||||
last_subgoals = frame.points
|
||||
result_list.append(frame)
|
||||
last_grad = grad
|
||||
# print(f" ->last state committed {last_cost}: {last_grad} : {last_subgoals}")
|
||||
# print(f"apply grad<{grad}> {step_size * np.clip(grad, -self._grad_lim, self._grad_lim)}")
|
||||
new_subs = frame.points - np.concatenate(
|
||||
(
|
||||
np.zeros((1, 2)),
|
||||
step_size * np.clip(grad, -self._grad_lim, self._grad_lim),
|
||||
np.zeros((1, 2)),
|
||||
),
|
||||
axis=0,
|
||||
)
|
||||
|
||||
assert not (np.isnan(new_subs).any()) or only_attempt, grad
|
||||
|
||||
frame = TimingFrame(CartesianRoute.from_numpy(new_subs)) # type: ignore
|
||||
frame.update_times(self.boat_polar)
|
||||
|
||||
frame_evaluated, actual_cost = self.cost_model.evaluate(frame)
|
||||
cost = np.float64(actual_cost)
|
||||
|
||||
if last_cost is not None and cost - last_cost > 0 or not frame_evaluated.valid: # got worse
|
||||
assert last_subgoals is not None
|
||||
frame = TimingFrame(CartesianRoute.from_numpy(last_subgoals)) # type: ignore
|
||||
frame.update_times(self.boat_polar)
|
||||
frame_evaluated, cost = self.cost_model.evaluate(frame) # evaluate the frame
|
||||
if not only_attempt:
|
||||
assert frame_evaluated.valid
|
||||
cost = np.float32(cost)
|
||||
|
||||
frame_evaluated.actual_cost = cost
|
||||
return frame_evaluated, result_list
|
||||
|
||||
# --- SOLUTION SPACE EXPLORATION --- #
|
||||
|
||||
@dataclass
|
||||
class _RRTNode:
|
||||
"""Class describing solution node for initial guess."""
|
||||
|
||||
position: Point
|
||||
time: float
|
||||
distance: float
|
||||
angle: Optional[float]
|
||||
points: List
|
||||
node_identifier: int
|
||||
|
||||
def _sample_tree_init(
|
||||
self, goal: Point, parameters: EstimationParameters = EstimationParameters()
|
||||
) -> Optional[TimingFrame]:
|
||||
"""Initial guessing method implementation
|
||||
|
||||
Args:
|
||||
goal: Shapely Point to guess towards to
|
||||
parameters: dictionary of addition parameters
|
||||
|
||||
Returns:
|
||||
initial TimingFrame guesses, None if no TimingFrame found
|
||||
"""
|
||||
|
||||
distance_to_goal = goal.distance(self.position)
|
||||
|
||||
nodes = [
|
||||
self._RRTNode(
|
||||
position=self.position,
|
||||
time=0,
|
||||
distance=distance_to_goal,
|
||||
angle=None,
|
||||
points=[],
|
||||
node_identifier=0,
|
||||
)
|
||||
]
|
||||
segments = []
|
||||
|
||||
for _ in range(parameters.max_count):
|
||||
|
||||
new_point = self._sample_new_node(goal, p_goal=parameters.p_goal)
|
||||
new_point, node = self._select_expansion_node(
|
||||
new_point, nodes, max_len=distance_to_goal * parameters.max_len_relative
|
||||
)
|
||||
|
||||
new_frame = TimingFrame(
|
||||
CartesianRoute.from_numpy(
|
||||
np.concatenate(
|
||||
[utils.shapely_point_to_ndarray(self.position)[None, :]]
|
||||
+ [utils.shapely_point_to_ndarray(p)[None, :] for p in node.points]
|
||||
+ [utils.shapely_point_to_ndarray(new_point)[None, :]]
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
new_frame.update_times(self.boat_polar)
|
||||
|
||||
new_frame_evaluated, _ = self.cost_model.evaluate(new_frame)
|
||||
|
||||
if new_frame_evaluated.valid:
|
||||
segments.append(new_frame_evaluated)
|
||||
new_distance = goal.distance(new_point)
|
||||
if new_distance < 0.1:
|
||||
return TimingFrame(
|
||||
CartesianRoute.from_numpy(
|
||||
np.concatenate(
|
||||
[utils.shapely_point_to_ndarray(self.position)[None, :]]
|
||||
+ [utils.shapely_point_to_ndarray(p)[None, :] for p in node.points]
|
||||
+ [utils.shapely_point_to_ndarray(new_point)[None, :]]
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
new_node = self._RRTNode(
|
||||
position=new_point,
|
||||
time=new_frame_evaluated.times[-1],
|
||||
distance=new_distance,
|
||||
angle=None,
|
||||
points=node.points + [new_point],
|
||||
node_identifier=0,
|
||||
)
|
||||
nodes.append(new_node)
|
||||
|
||||
# print("Counter exceeded")
|
||||
return None
|
||||
|
||||
def _sample_new_node(self, goal: Point, p_dir_goal=0.5, p_goal=0.2, std=None) -> Point:
|
||||
"""sampling of a new :class:`_RRTNode`
|
||||
|
||||
Args:
|
||||
goal: shapely point to develop towards
|
||||
p_dir_goal: probability to choose the goal direction as next direction
|
||||
p_goal: probability to choose the goal as next node
|
||||
std: minimum derivation from current direction
|
||||
"""
|
||||
if np.random.rand() < p_goal: # sample the goal position
|
||||
return goal
|
||||
if np.random.rand() < p_dir_goal:
|
||||
mean = goal
|
||||
else:
|
||||
mean = self.position
|
||||
if std is None:
|
||||
dist = goal.distance(self.position)
|
||||
std = dist / 1.5
|
||||
return Point(mean.x + std * np.random.randn(), mean.y + std * np.random.randn())
|
||||
|
||||
def _select_expansion_node(
|
||||
self, new_point: Point, nodes: List[_RRTNode], max_len: float
|
||||
) -> Tuple[Point, _RRTNode]:
|
||||
"""Selects a new node to expand towards. Takes the :attr:`wind_angle` into account.
|
||||
|
||||
Args:
|
||||
new_point: shapely point describing relative movements towards the goal taken in the next step
|
||||
nodes: list of :class:`_RTTNode`\\s which already have been selected in the past
|
||||
max_len: The maximum absolute length to walk towards the goal
|
||||
|
||||
Returns:
|
||||
A new point to expand to and the corresponding node. This point now contains the absolute
|
||||
positional information of the next point.
|
||||
"""
|
||||
|
||||
def distance_metric(nodes_to_compare: List[Optimizer._RRTNode], point: Point) -> List[float]:
|
||||
"""Just a little vectorized version of point.distance().
|
||||
|
||||
One could have solved that with .vectorize() but this is more declarative.
|
||||
|
||||
Args:
|
||||
nodes_to_compare: nodes to apply metric onto
|
||||
point: points to measure towards
|
||||
|
||||
Returns:
|
||||
distance by metric
|
||||
"""
|
||||
return [point.distance(n.position) for n in nodes_to_compare]
|
||||
|
||||
# calculate distances from each possible node to current node and determine therefore
|
||||
# their probabilities to progress to them
|
||||
|
||||
distances = distance_metric(nodes, new_point)
|
||||
|
||||
probabilities = np.nan_to_num(softmax(-10 / np.amin(distances) * np.array(distances)))
|
||||
|
||||
exp_node: Optimizer._RRTNode = np.random.choice(nodes, p=probabilities) # type: ignore
|
||||
|
||||
delta_new_exp = utils.shapely_point_to_ndarray(new_point) - utils.shapely_point_to_ndarray(
|
||||
exp_node.position
|
||||
)
|
||||
distance_to_new, angle_toward_new = (
|
||||
linalg.norm(delta_new_exp),
|
||||
np.arctan2(delta_new_exp[1], delta_new_exp[0]),
|
||||
)
|
||||
|
||||
wind_angle = self._wind_angle # save it for thread safety
|
||||
if wind_angle is not None:
|
||||
d_angle = (angle_toward_new - wind_angle + np.pi) % (2 * np.pi) - np.pi
|
||||
|
||||
if d_angle > np.pi * 5 / 6.0:
|
||||
angle_toward_new = wind_angle + np.pi * 3 / 4.0
|
||||
elif d_angle < -np.pi * 5 / 6.0:
|
||||
angle_toward_new = wind_angle - np.pi * 3 / 4.0
|
||||
|
||||
distance_to_new = min(distance_to_new, max_len)
|
||||
|
||||
projected_array = np.array(
|
||||
[distance_to_new * np.cos(angle_toward_new), distance_to_new * np.sin(angle_toward_new)]
|
||||
)
|
||||
|
||||
new_point_clipped = Point(
|
||||
projected_array[0] + exp_node.position.x, projected_array[1] + exp_node.position.y
|
||||
)
|
||||
return new_point_clipped, exp_node
|
||||
|
||||
# --- ROUTE CONSTRUCTION --- #
|
||||
|
||||
def _get_intersecting_obstacle_ids(
|
||||
self, route: shapely.geometry.LineString
|
||||
) -> List[Tuple[int, Obstacle]]:
|
||||
to_return = []
|
||||
for i, ob in enumerate(self.cost_model.obstacles.values()):
|
||||
if ob.shape.intersects(route):
|
||||
to_return.append((i, ob))
|
||||
return to_return
|
||||
|
||||
def _tangent_route_on_obstacle(
|
||||
self, route_seg: shapely.geometry.LineString, obstacle_index: int, scale: float = 1
|
||||
) -> List[CartesianRoute]:
|
||||
|
||||
assert scale > 0
|
||||
assert scale < 2
|
||||
|
||||
obstacle = list(self.cost_model.obstacles.values())[obstacle_index]
|
||||
distances = self.cost_model.distance_matrix[obstacle_index]
|
||||
center = obstacle.shape.centroid
|
||||
|
||||
min_distance = np.min(distances[np.nonzero(distances)], initial=np.inf)
|
||||
if min_distance == np.inf:
|
||||
min_distance = 0
|
||||
max_distance_from_center = max([ver.distance(center) for ver in obstacle.shape.locations])
|
||||
|
||||
scale = scale * (min_distance / max_distance_from_center + 1)
|
||||
intersection = cast(shapely.geometry.LineString, obstacle.shape.intersection(route_seg))
|
||||
boundary_paths: List[CartesianRoute] = []
|
||||
for poly in shapely.ops.split(obstacle.shape, intersection):
|
||||
poly = cast(shapely.geometry.Polygon, poly)
|
||||
_ = poly.exterior.difference(intersection)
|
||||
if isinstance(_, shapely.geometry.multilinestring.MultiLineString):
|
||||
_ = shapely.ops.linemerge(_)
|
||||
_ = cast(shapely.geometry.linestring.LineString, _)
|
||||
# noinspection PyUnresolvedReferences
|
||||
_ = shapely.affinity.scale(_, xfact=scale, yfact=scale, zfact=scale, origin=center)
|
||||
line_coords = list(_.coords)
|
||||
|
||||
line_array = np.array(line_coords)
|
||||
|
||||
boundary_paths.append(CartesianRoute.from_numpy(line_array))
|
||||
boundary_paths.append(CartesianRoute.from_numpy(line_array[::-1]))
|
||||
|
||||
return boundary_paths
|
||||
|
||||
def _construct_init(
|
||||
self, goal: Point, parameters: EstimationParameters = EstimationParameters()
|
||||
) -> Optional[List[TimingFrame]]:
|
||||
"""
|
||||
|
||||
Args:
|
||||
goal:
|
||||
parameters:
|
||||
|
||||
Returns:
|
||||
|
||||
"""
|
||||
|
||||
subgoals = np.concatenate(
|
||||
(
|
||||
utils.shapely_point_to_ndarray(self.position)[None, :],
|
||||
utils.shapely_point_to_ndarray(goal)[None, :],
|
||||
),
|
||||
axis=0,
|
||||
)
|
||||
|
||||
inital_frame: TimingFrame = TimingFrame(CartesianRoute.from_numpy(subgoals))
|
||||
inital_frame.update_times(self.boat_polar)
|
||||
inital_frame_evaluated: EvaluatedTimingFrame = self.cost_model.evaluate(inital_frame)[0]
|
||||
|
||||
collision_times = inital_frame_evaluated.collision_times
|
||||
|
||||
_ = list(collision_times.values())
|
||||
collisions_in_order = np.array(list(np.argsort(np.array(_))))
|
||||
collisions_in_order = collisions_in_order[: np.sum(np.isfinite(np.array(_)))]
|
||||
|
||||
frame_parts = {
|
||||
i: self._tangent_route_on_obstacle(inital_frame.route, i, scale=1)
|
||||
for i, intersecting_obstacle in self._get_intersecting_obstacle_ids(inital_frame.route)
|
||||
}
|
||||
|
||||
if len(frame_parts) == 0:
|
||||
route_vec = utils.shapely_point_to_ndarray(goal) - utils.shapely_point_to_ndarray(self.position)
|
||||
route_vect_r = route_vec + np.array([-0.01, 0])
|
||||
route_vect_l = route_vec + np.array([0.01, 0])
|
||||
simple_route = np.concatenate(
|
||||
(
|
||||
utils.shapely_point_to_ndarray(self.position)[None, :],
|
||||
utils.shapely_point_to_ndarray(self.position)[None, :] + 0.2 * route_vect_l[None, :],
|
||||
utils.shapely_point_to_ndarray(self.position)[None, :] + 0.4 * route_vect_r[None, :],
|
||||
utils.shapely_point_to_ndarray(self.position)[None, :] + 0.6 * route_vect_l[None, :],
|
||||
utils.shapely_point_to_ndarray(self.position)[None, :] + 0.8 * route_vect_r[None, :],
|
||||
utils.shapely_point_to_ndarray(goal)[None, :],
|
||||
),
|
||||
axis=0,
|
||||
)
|
||||
|
||||
simple_frame = TimingFrame(CartesianRoute.from_numpy(simple_route))
|
||||
|
||||
return [simple_frame]
|
||||
|
||||
# frame_parts[0] exists
|
||||
|
||||
frames_on_bench = []
|
||||
for obs_already_finished, obs_ind in enumerate(collisions_in_order):
|
||||
if obs_already_finished == 0:
|
||||
for first_part in frame_parts[obs_ind]:
|
||||
frames_on_bench.append(TimingFrame(first_part))
|
||||
else:
|
||||
temp = []
|
||||
for obstacle_hull in frame_parts[obs_ind]:
|
||||
for old_frame in frames_on_bench:
|
||||
temp.append(old_frame.append(obstacle_hull))
|
||||
frames_on_bench = temp
|
||||
|
||||
constructed_route_arrays = [
|
||||
np.concatenate(
|
||||
(
|
||||
utils.shapely_point_to_ndarray(self.position)[None, :],
|
||||
f.route.to_numpy(),
|
||||
utils.shapely_point_to_ndarray(goal)[None, :],
|
||||
),
|
||||
axis=0,
|
||||
)
|
||||
for f in frames_on_bench
|
||||
]
|
||||
constructed_route: List[TimingFrame] = [
|
||||
TimingFrame(CartesianRoute.from_numpy(array)) for array in constructed_route_arrays
|
||||
]
|
||||
return constructed_route
|
172
pyrate/pyrate/plan/nearplanner/polar_model.py
Normal file
172
pyrate/pyrate/plan/nearplanner/polar_model.py
Normal file
@ -0,0 +1,172 @@
|
||||
"""
|
||||
naive code for polar model used in the optimizer and cost model names
|
||||
"""
|
||||
|
||||
# Dataclass Support
|
||||
from abc import ABC
|
||||
from abc import abstractmethod
|
||||
|
||||
# Static Typing
|
||||
from typing import cast
|
||||
|
||||
from numpy.typing import NDArray
|
||||
|
||||
# Scientific Computing
|
||||
import numpy as np
|
||||
|
||||
|
||||
from scipy.interpolate import UnivariateSpline
|
||||
|
||||
|
||||
class BoatSpeedPolar(ABC):
|
||||
"""An exemplary boat speed polar.
|
||||
|
||||
It provides the wind speed relation based on the boats speed polar diagram
|
||||
(wind direction and strength dependent boat speed).
|
||||
An example can be seen `Link here <https://en.wikipedia.org/wiki/Polar_diagram_(sailing)>`
|
||||
|
||||
for future: improve simulation capabilities or include an interface to an simulation node
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
# spline
|
||||
self.values = {"alpha": [np.pi * i / 5.0 for i in range(6)], "v": [0.9, 1.6, 1.5, 1.0, 0, 0]}
|
||||
|
||||
alphas = np.array(self.values["alpha"][:-1] + [0.85 * np.pi] + [self.values["alpha"][-1]])
|
||||
self.alphas = np.concatenate((np.flipud(-alphas[1:-1]), alphas, 2 * np.pi + np.flipud(-alphas[1:-1])))
|
||||
|
||||
speeds = np.array(self.values["v"][:-1] + [-0.04] + [self.values["v"][-1]])
|
||||
self.speeds = np.concatenate((np.flipud(speeds[1:-1]), speeds, np.flipud(speeds[1:-1])))
|
||||
self.speed_polar = UnivariateSpline(self.alphas, self.speeds, s=0)
|
||||
|
||||
@abstractmethod
|
||||
def speed(self, angles: NDArray[np.floating]) -> NDArray[np.floating]:
|
||||
"""Returns simulated speed of the boat at a given angle.
|
||||
|
||||
Args:
|
||||
angles: angles to simulate speed at
|
||||
|
||||
Returns:
|
||||
simulated speed
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def speed_grad(self, angles: NDArray[np.floating]) -> NDArray[np.floating]:
|
||||
"""Returns gradient at a given array of angles
|
||||
|
||||
Args:
|
||||
angles: angles to simulate speed at
|
||||
|
||||
Returns:
|
||||
gradient of the spline model to use in general cost gradient calculation
|
||||
"""
|
||||
|
||||
def _speed(
|
||||
self, angle: NDArray[np.floating], wind_speed: np.floating, wind_direction: np.floating
|
||||
) -> NDArray[np.floating]:
|
||||
"""Return the boat speed at given heading, wind speed and wind angle.
|
||||
|
||||
Spline interpolation regarding angle and quadratic model for wind speed
|
||||
|
||||
Args:
|
||||
angle: wind angle to evaluate spline model on
|
||||
wind_speed: wind speed to evaluate spline model on
|
||||
wind_direction: wind direction to evaluate spline model on
|
||||
|
||||
Returns:
|
||||
predicted speed for the boat at given circumstance
|
||||
"""
|
||||
max_speed = 15
|
||||
speed_clipped = np.clip(wind_speed, np.float64(0), np.float64(max_speed))
|
||||
return np.multiply(
|
||||
self._spline_eval(angle - wind_direction), 1 - (max_speed - speed_clipped) / max_speed
|
||||
)
|
||||
|
||||
def _speed_grad(
|
||||
self, angle: NDArray[np.floating], wind_speed: np.floating, wind_direction: np.floating
|
||||
) -> NDArray[np.floating]:
|
||||
"""Return the boat speed derivative.
|
||||
|
||||
Does this with respect to the angle at given heading, wind speed and wind angle
|
||||
spline interpolation regarding angle and quadratic model for wind speed
|
||||
|
||||
Args:
|
||||
angle: angle to determine speed derivative from
|
||||
wind_speed: wind speed to determine speed derivative from
|
||||
wind_direction: wind direction to determine speed derivative from
|
||||
"""
|
||||
max_speed = 15
|
||||
speed_clipped = np.clip(wind_speed, np.float64(0), np.float64(max_speed))
|
||||
return np.multiply(
|
||||
self._spline_der(angle - np.float32(wind_direction)), 1 - (max_speed - speed_clipped) / max_speed
|
||||
)
|
||||
|
||||
def _spline_eval(self, angle: NDArray[np.floating]) -> NDArray[np.floating]:
|
||||
"""Evaluate spline at a given angle.
|
||||
|
||||
Args:
|
||||
angle: angle to evaluate spline at
|
||||
|
||||
Returns:
|
||||
spline evaluation at given angle
|
||||
"""
|
||||
ang = np.add(angle, np.pi) % (2 * np.pi) - np.pi
|
||||
|
||||
return cast(NDArray[np.floating], self.speed_polar(np.abs(ang)))
|
||||
|
||||
def _spline_der(self, angle: NDArray[np.floating]) -> NDArray[np.floating]:
|
||||
"""Evaluates spline derivative at given angle.
|
||||
|
||||
Args:
|
||||
angle: angle to evaluate spline derivative at
|
||||
|
||||
Returns:
|
||||
spline derivative at given angle
|
||||
"""
|
||||
ang = (angle + np.pi) % (2 * np.pi) - np.pi
|
||||
|
||||
return cast(NDArray[np.floating], self.speed_polar(np.abs(ang), nu=1) * np.sign(ang))
|
||||
|
||||
|
||||
class PolarModel(BoatSpeedPolar):
|
||||
"""A very simple implementation of a polar model.
|
||||
|
||||
Args:
|
||||
wind_speed: The speed of the wind in, m/s
|
||||
wind_direction: The direction from which the wind is blowing, in radians
|
||||
manoeuvre_time: approximate time each turning manoeuvre takes
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self, wind_speed: float = 0, wind_direction: float = 0, manoeuvre_time: float = 10.0
|
||||
) -> None:
|
||||
super().__init__()
|
||||
|
||||
self.wind_speed = np.float64(wind_speed)
|
||||
self.wind_direction = np.float64(wind_direction)
|
||||
self.manoeuvre_time = np.float64(manoeuvre_time)
|
||||
|
||||
def speed(self, angles: NDArray[np.floating]) -> NDArray[np.floating]:
|
||||
"""Returns simulated speed of the boat at a given angle.
|
||||
|
||||
Args:
|
||||
angles: angles to simulate speed at
|
||||
|
||||
Returns:
|
||||
simulated speed
|
||||
"""
|
||||
return self._speed(angles, self.wind_speed, self.wind_direction)
|
||||
|
||||
def speed_grad(self, angles: NDArray[np.floating]) -> NDArray[np.floating]:
|
||||
"""Returns gradient at a given array of angles.
|
||||
|
||||
Args:
|
||||
angles: angles to simulate speed at
|
||||
|
||||
Returns:
|
||||
gradient of the spline model to use in general cost gradient calculation
|
||||
"""
|
||||
#print(f"windA {angles} {self.wind_direction}")
|
||||
_ = self._speed_grad(angles, self.wind_speed, self.wind_direction)
|
||||
#print(f"grad {_}")
|
||||
return _
|
582
pyrate/pyrate/plan/nearplanner/timing_frame.py
Normal file
582
pyrate/pyrate/plan/nearplanner/timing_frame.py
Normal file
@ -0,0 +1,582 @@
|
||||
"""Contains timing frames."""
|
||||
|
||||
import itertools
|
||||
|
||||
# Static Typing
|
||||
from copy import copy
|
||||
from typing import cast
|
||||
from typing import Optional
|
||||
from typing import Tuple
|
||||
from typing import Union
|
||||
|
||||
import numpy.typing as npt
|
||||
|
||||
# Scientific Computing
|
||||
import numpy as np
|
||||
|
||||
from scipy import linalg
|
||||
|
||||
import shapely.ops
|
||||
from shapely.coords import CoordinateSequence
|
||||
from shapely.geometry import LineString, Point
|
||||
|
||||
# Geometry
|
||||
from pyrate.plan.geometry.location import CartesianLocation
|
||||
from pyrate.plan.geometry.route import CartesianRoute
|
||||
|
||||
from . import utils
|
||||
|
||||
from .polar_model import PolarModel
|
||||
|
||||
|
||||
class TimingFrame:
|
||||
"""A wrapper class around CartesianRoutes under corresponding time and speed constraints.
|
||||
|
||||
Args:
|
||||
route: The :class:`~pyrate.plan.geometry.route.CartesianRoute` to be wrapped
|
||||
start_time: Optional clock time in seconds the frame should start at
|
||||
"""
|
||||
|
||||
def __init__(self, route: CartesianRoute, start_time: float = 0):
|
||||
self.route = route
|
||||
|
||||
self.identifier: int = 0
|
||||
self.simulated = False
|
||||
|
||||
self._respect_manoeuvre = True
|
||||
|
||||
self._start_time = start_time
|
||||
|
||||
self._delta_times: npt.NDArray[np.floating] = np.array([0])
|
||||
self._scalar_speeds: npt.NDArray[np.floating] = np.array([0])
|
||||
self._times: npt.NDArray[np.floating] = np.arange(0, route.to_numpy().shape[0] - 1, dtype=np.float64)
|
||||
self._times_without_manoeuvre: npt.NDArray[np.floating] = self.times
|
||||
|
||||
self._model: Optional[PolarModel] = None
|
||||
self._angles: npt.NDArray[np.floating] = np.array([], dtype=np.float64)
|
||||
self._distances: npt.NDArray[np.floating] = np.array([], dtype=np.float64)
|
||||
self._end_times: npt.NDArray[np.floating] = np.array([], dtype=np.float64)
|
||||
self._directions: npt.NDArray[np.floating] = np.array([], dtype=np.float64)
|
||||
self._start_times: npt.NDArray[np.floating] = np.array([], dtype=np.float64)
|
||||
self._delta_positions: npt.NDArray[np.floating] = np.array([], dtype=np.float64)
|
||||
|
||||
self._segment_points: npt.NDArray[np.floating] = utils.merge_numpy_array(
|
||||
self.route.to_numpy(), self.route.to_numpy()[:, :]
|
||||
)
|
||||
|
||||
self._speeds: npt.NDArray[np.floating] = np.array([], dtype=np.float64)
|
||||
if not self._speeds.shape[0] == self.route.to_numpy().shape[0]:
|
||||
temp: npt.NDArray[np.floating] = route.to_numpy()
|
||||
self._speeds = cast(
|
||||
npt.NDArray[np.floating], np.append((temp[1:, :] - temp[:-1, :]), [[0, 0]], axis=0)
|
||||
)
|
||||
|
||||
def __str__(self) -> str:
|
||||
return f"TimingFrame({self.route.to_numpy()})"
|
||||
|
||||
def __call__(self, time_to_fetch: float) -> npt.NDArray[np.floating]:
|
||||
"""Give the position of the sailboat following this route at a given time
|
||||
|
||||
Args:
|
||||
time_to_fetch: point in time for query
|
||||
|
||||
Returns:
|
||||
Numpy array with cartesian coordinates (dim 2)
|
||||
"""
|
||||
time = cast(np.floating, np.float32(time_to_fetch))
|
||||
|
||||
if not self.simulated:
|
||||
assert time >= self._times[0]
|
||||
assert time <= self._times[-1]
|
||||
|
||||
index: int = int(np.argmax(self._times >= time) - 1)
|
||||
partition = (time - self._times[index]) / (self._times[index + 1] - self._times[index])
|
||||
if index == 0:
|
||||
before = utils.shapely_point_to_ndarray(self.position)
|
||||
else:
|
||||
before = cast(npt.NDArray[np.floating], self.route.to_numpy()[index - 1])
|
||||
|
||||
return cast(
|
||||
npt.NDArray[np.floating],
|
||||
before + partition * (cast(npt.NDArray[np.floating], self.route.to_numpy()[index]) - before),
|
||||
)
|
||||
|
||||
assert self.simulated
|
||||
|
||||
assert self._angles is not None
|
||||
assert self._delta_positions is not None
|
||||
assert self._distances is not None
|
||||
assert self._directions is not None
|
||||
assert self._end_times is not None
|
||||
assert self._start_times is not None
|
||||
|
||||
assert time >= self._start_times[0]
|
||||
assert time <= self._end_times[-1]
|
||||
|
||||
index = int(np.argmax(self._start_times >= time) - 1)
|
||||
if not (self._start_times >= time).any():
|
||||
index = self._start_times.shape[0] - 1
|
||||
with np.errstate(divide="ignore", invalid="ignore"):
|
||||
partition = (time - self._start_times[index]) / (
|
||||
self._end_times[index] - self._start_times[index]
|
||||
)
|
||||
partition = 1.0 if partition > 1.0 else partition
|
||||
if index == 0:
|
||||
before = utils.shapely_point_to_ndarray(self.position)
|
||||
else:
|
||||
before = cast(npt.NDArray[np.floating], self.route.to_numpy()[index - 1])
|
||||
return cast(
|
||||
npt.NDArray[np.floating],
|
||||
before + partition * (cast(npt.NDArray[np.floating], self.route.to_numpy()[index]) - before),
|
||||
)
|
||||
|
||||
@property
|
||||
def points(self) -> npt.NDArray[np.floating]:
|
||||
"""Points of the route in a numpy array."""
|
||||
return cast(npt.NDArray[np.floating], self.route.to_numpy())
|
||||
|
||||
@property
|
||||
def position(self) -> CartesianLocation:
|
||||
"""first point of the route"""
|
||||
return CartesianLocation(east=self.points[0][0], north=self.points[0][1])
|
||||
|
||||
@position.setter
|
||||
def position(self, value: CartesianLocation) -> None:
|
||||
tmp = self.points
|
||||
|
||||
tmp[0][0] = value.east
|
||||
tmp[0][1] = value.north
|
||||
|
||||
self.route = CartesianRoute.from_numpy(tmp)
|
||||
|
||||
@property
|
||||
def start_time(self) -> float:
|
||||
"""Start time of the :class:`TimingFrame`."""
|
||||
return self._start_time
|
||||
|
||||
@property
|
||||
def speeds(self) -> npt.NDArray[np.floating]:
|
||||
"""Speeds of the route segments in a numpy array."""
|
||||
return self._speeds
|
||||
|
||||
@property
|
||||
def times(self) -> npt.NDArray[np.floating]:
|
||||
"""Returns the timing of the route segments.
|
||||
|
||||
After :meth:`update_times` has been called, this includes manoeuvre times.
|
||||
"""
|
||||
return self._times
|
||||
|
||||
@property
|
||||
def segment_points(self) -> npt.NDArray[np.floating]:
|
||||
"""The points of each segment as a numpy array representation."""
|
||||
return self._segment_points
|
||||
|
||||
@property
|
||||
def valid(self) -> bool:
|
||||
"""``True`` iff it was already evaluated and no collision has been found."""
|
||||
return False # Overridden in superclass
|
||||
|
||||
@property
|
||||
def cost(self) -> float:
|
||||
"""Cost of the route. This is only describes the time it needs to take the route.
|
||||
|
||||
Warning:
|
||||
Only really accurate after :meth:`update_times` has been called
|
||||
"""
|
||||
return float(self._times[-1])
|
||||
|
||||
def update_times(self, boat_model: Optional[PolarModel] = None) -> float:
|
||||
"""Updates the timing constraint of a :class:`TimingFrame` to adapt to a polar model.
|
||||
|
||||
This fills in manoeuvre times, angles and speed information.
|
||||
|
||||
Args:
|
||||
boat_model: boat model to adapt to
|
||||
|
||||
Returns:
|
||||
time needed to reach goal under given constraints
|
||||
"""
|
||||
if boat_model is not None:
|
||||
self._model = boat_model
|
||||
else:
|
||||
assert self._model
|
||||
|
||||
self._delta_positions = cast(
|
||||
npt.NDArray[np.floating],
|
||||
self.route.to_numpy()[1:] - self.route.to_numpy()[:-1, :],
|
||||
)
|
||||
|
||||
self._distances = cast(npt.NDArray[np.floating], linalg.norm(self._delta_positions, axis=1))
|
||||
|
||||
with np.errstate(divide="ignore", invalid="ignore"):
|
||||
self._directions = cast(
|
||||
npt.NDArray[np.floating], self._delta_positions / self._distances[:, None]
|
||||
)
|
||||
self._directions[self._distances == 0, :] = 0
|
||||
|
||||
self._angles = cast(
|
||||
npt.NDArray[np.floating], np.arctan2(self._delta_positions[:, 1], self._delta_positions[:, 0])
|
||||
)
|
||||
|
||||
# apply transformation to right hand y axis
|
||||
self._angles = np.vectorize(utils.transform_angles_leftx_to_righty, signature="()->()")(self._angles)
|
||||
|
||||
self._scalar_speeds = cast(
|
||||
npt.NDArray[np.floating], np.clip(self._model.speed(self._angles), 1e-3, None).astype(np.float64)
|
||||
)
|
||||
|
||||
self._delta_times = cast(
|
||||
npt.NDArray[np.floating], np.clip(self._distances / self._scalar_speeds, 0, None)
|
||||
)
|
||||
|
||||
self._speeds = cast(npt.NDArray[np.floating], self._scalar_speeds[:, None] * self._directions)
|
||||
|
||||
self._speeds[self._directions == 0] = 0
|
||||
|
||||
self._times = np.concatenate((np.zeros(1), np.cumsum(self._delta_times)))
|
||||
self._times += self._start_time
|
||||
|
||||
self._times_without_manoeuvre = self._times
|
||||
|
||||
self._add_manoeuvre_time(self._angles)
|
||||
|
||||
self.simulated = True
|
||||
|
||||
return float(self.times[-1])
|
||||
|
||||
def prune(self, eps: float = 0.01) -> None:
|
||||
"""Function to delete subgoals with neglectable direction changes, to reduce route points.
|
||||
|
||||
Args:
|
||||
eps: angle difference threshold for deciding on the edge deletion (in radians)
|
||||
"""
|
||||
delta_pos = self.points - np.concatenate(
|
||||
(utils.shapely_point_to_ndarray(self.position)[None, :], self.points[:-1, :]), axis=0
|
||||
)
|
||||
angles = np.arctan2(delta_pos[:, 1], delta_pos[:, 0])
|
||||
# calculate angle differences
|
||||
delta_ang = np.abs(angles[1:] - angles[:-1])
|
||||
# create a mask from angle differences between each segment and given threshold
|
||||
mask = np.append(delta_ang > eps, True)
|
||||
mask[0] = True
|
||||
mask[-1] = True
|
||||
|
||||
self.route = CartesianRoute.from_numpy(self.route.to_numpy()[mask])
|
||||
|
||||
def cost_grad(
|
||||
self,
|
||||
other_cost_dtimes: Optional[npt.NDArray[np.floating]] = None,
|
||||
dcost_dpoints_ext: Optional[npt.NDArray[np.floating]] = None,
|
||||
dcost_dspeed: Optional[npt.NDArray[np.floating]] = None,
|
||||
time_cost: float = 2,
|
||||
) -> npt.NDArray[np.floating]:
|
||||
"""Calculates the cost gradient(derivative) of a ``TimingFrame`` with to subgoal timings.
|
||||
|
||||
If other time gradients are given returns gradient with respect to all variables.
|
||||
|
||||
Args:
|
||||
other_cost_dtimes: calculated gradient w.r.t timings of ``(number of segments, 2)``
|
||||
dcost_dpoints_ext: calculated gradient w.r.t location of obstacles of ``(number of segments, 2)``
|
||||
dcost_dspeed: calculated gradient in respect to speed of obstacles of ``(number of segments, 2)``
|
||||
time_cost: coefficient for cost per timing interval
|
||||
|
||||
Returns: cost gradient of the ``TimingFrame`` cumulated w.r.t. the subgoal timings, segment speeds,
|
||||
segment times and optionally additionally given obstacle gradients. This gradient is of the shape
|
||||
``(number of segments, 2)``
|
||||
"""
|
||||
|
||||
grad = np.zeros(self._times.shape[0] - 1)
|
||||
|
||||
grad[-1] = np.float64(time_cost)
|
||||
|
||||
# handle case if no partial gradients are given
|
||||
if other_cost_dtimes is None or dcost_dpoints_ext is None or dcost_dspeed is None:
|
||||
return self._gradients(grad)
|
||||
|
||||
if self._respect_manoeuvre:
|
||||
dcost_dpoints = dcost_dpoints_ext[:-1:2] + dcost_dpoints_ext[1::2]
|
||||
|
||||
else:
|
||||
dcost_dpoints = dcost_dpoints_ext
|
||||
|
||||
# assure mypy and us that there are no overflows
|
||||
assert not np.isnan(grad + other_cost_dtimes[:-1]).any(), (grad, other_cost_dtimes[:-1])
|
||||
|
||||
return self._gradients(grad + other_cost_dtimes[:-1], dcost_dspeed) + cast(
|
||||
npt.NDArray[np.floating], dcost_dpoints
|
||||
)
|
||||
|
||||
def _gradients(
|
||||
self,
|
||||
dcost_dtimes: npt.NDArray[np.floating],
|
||||
dcost_speed: npt.NDArray[np.floating] = np.array([], dtype=np.float64),
|
||||
) -> npt.NDArray[np.floating]:
|
||||
"""Calculates the gradients of the time events with respect to the route edge coordinates.
|
||||
|
||||
Also calculates the gradient of the vector speeds for each sequence.
|
||||
|
||||
Args:
|
||||
dcost_dtimes: previously calculated gradient in respect to timings
|
||||
dcost_speed: previously calculated gradient in respect to speed of obstacles and speed
|
||||
on route segments
|
||||
|
||||
Returns: cost gradient of the :class:`TimingFrame` for time and speed, of the shape
|
||||
``(number of segments, 2)``
|
||||
"""
|
||||
|
||||
if dcost_speed.size == 0:
|
||||
dcost_speed = np.zeros((dcost_dtimes.shape[0], 2))
|
||||
|
||||
cost_grad_delta_times = np.flipud(np.cumsum(np.flipud(dcost_dtimes)))
|
||||
|
||||
if not self._respect_manoeuvre:
|
||||
dcost_dtimes_basic = cost_grad_delta_times
|
||||
else:
|
||||
dcost_dtimes_basic = cost_grad_delta_times[::2]
|
||||
dcost_speed = dcost_speed[::2, :]
|
||||
|
||||
assert not np.isnan(dcost_dtimes_basic).any(), dcost_dtimes
|
||||
|
||||
dcost_ddist = dcost_dtimes_basic / self._scalar_speeds
|
||||
dcost_dscal_speed = -dcost_dtimes_basic * self._distances / self._scalar_speeds ** 2
|
||||
dcost_dscal_speed += np.einsum("ij, ij-> i", dcost_speed, self._directions)
|
||||
|
||||
dcost_dscal_speed = np.nan_to_num(dcost_dscal_speed)
|
||||
|
||||
dcost_ddirections = dcost_speed * self._scalar_speeds[:, None]
|
||||
|
||||
assert self._model is not None
|
||||
# print("*"*30)
|
||||
# print(dcost_speed)
|
||||
# print(dcost_dscal_speed)
|
||||
# print(self._scalar_speeds)
|
||||
# print(self._directions)
|
||||
# print("_"*30)
|
||||
# print(dcost_ddist)
|
||||
# print("-*-"*10)
|
||||
# print(dcost_dscal_speed)
|
||||
# print("/"*30)
|
||||
# print(self._model.speed_grad(self._angles))
|
||||
dcost_dang = dcost_dscal_speed * self._model.speed_grad(self._angles)
|
||||
# print("-o-"*10)
|
||||
# print(dcost_dang)
|
||||
if self._respect_manoeuvre:
|
||||
dcost_dang += self._manoeuvre_time(self._angles, cost_grad_delta_times[1::2])[1]
|
||||
|
||||
dcost_ddist = np.nan_to_num(dcost_ddist)
|
||||
|
||||
assert not np.isnan(dcost_ddist).any(), self._scalar_speeds
|
||||
assert not np.isnan(dcost_dang).any(), (dcost_dscal_speed, dcost_dang)
|
||||
assert not np.isnan(dcost_ddirections).any(), dcost_ddirections
|
||||
|
||||
return self._transpose_gradients_to_polar(dcost_ddist, dcost_dang, dcost_ddirections)
|
||||
|
||||
def _add_manoeuvre_time(self, angles: npt.NDArray[np.floating]) -> None:
|
||||
"""Calculates and appends manoeuvre time to timing vector of the :class:`TimingFrame`
|
||||
|
||||
Args:
|
||||
angles: angles ot the :class:`TimingFrame` of shape ``(number of segments - 1, )``
|
||||
"""
|
||||
|
||||
delta_times_manoeuvre = self._manoeuvre_time(angles)
|
||||
# print("+-+"*10)
|
||||
# print(delta_times_manoeuvre)
|
||||
# print("+-+"*10)
|
||||
times_manoeuvre = np.cumsum(delta_times_manoeuvre)
|
||||
|
||||
self._start_times = self._times_without_manoeuvre[:-1] + np.concatenate(
|
||||
(np.zeros(1), times_manoeuvre)
|
||||
)
|
||||
self._end_times = self._times_without_manoeuvre[1:] + np.concatenate((np.zeros(1), times_manoeuvre))
|
||||
|
||||
# print(f"{self._start_times} -<>- {self._end_times}")
|
||||
self._times = utils.merge_numpy_array(self._start_times, self._end_times)
|
||||
# print(f"{self._times}")
|
||||
self._speeds = utils.merge_numpy_array(self._speeds, np.zeros(self._speeds.shape)[:-1, :])
|
||||
self._segment_points = utils.merge_numpy_array(self.route.to_numpy()[1:], self.route.to_numpy()[1:-1])
|
||||
|
||||
def _manoeuvre_time(
|
||||
self,
|
||||
angles,
|
||||
dcost_dtimes: npt.NDArray[np.floating] = np.array([], dtype=np.float64),
|
||||
time_loss: Optional[np.floating] = None,
|
||||
c_angle: float = 10.0,
|
||||
) -> Union[npt.NDArray[np.floating], Tuple[npt.NDArray[np.floating], npt.NDArray[np.floating]]]:
|
||||
"""Calculates the needed time for each maneuver at each subgoal.
|
||||
|
||||
If the cost gradient with respect to time is given also returns the partial cost derivative
|
||||
with regards to manoeuvre simulation.
|
||||
|
||||
Args:
|
||||
angles: array of turning angles at each subgoal of the shape ``(number of route segments, )``
|
||||
dcost_dtimes: previously calculated cost derivative w.r.t time ``(number of route segments, )``
|
||||
time_loss: time loss coefficient to be applied at each turning
|
||||
c_angle: a coefficient for scaling the angle
|
||||
|
||||
Returns:
|
||||
Calculated additional time needed for each turning as an array of shape ``(number of route
|
||||
segments, )``. If the partial derivative w.r.t speed is given also returns an additional gradient
|
||||
of shape ``(number of route segments, )`` w.r.t the angles. This gradient is simulated via an
|
||||
exponential cost function.
|
||||
"""
|
||||
|
||||
if time_loss is None:
|
||||
assert self._model is not None
|
||||
time_loss = self._model.manoeuvre_time
|
||||
d_angle = angles[1:] - angles[:-1]
|
||||
|
||||
d_angle = -np.pi + ((d_angle + np.pi) % (2 * np.pi))
|
||||
|
||||
scale = c_angle / 180.0 * np.pi
|
||||
|
||||
times = time_loss * (1 - np.exp(-(d_angle ** 2) / scale ** 2))
|
||||
|
||||
if dcost_dtimes.shape[0] == 0:
|
||||
return cast(npt.NDArray[np.floating], times)
|
||||
|
||||
d_dang = time_loss * np.exp(-(d_angle ** 2) / scale ** 2) / scale ** 2 * dcost_dtimes * 2 * d_angle
|
||||
return times, np.append(-d_dang, 0) + np.concatenate((np.zeros(1), d_dang))
|
||||
|
||||
def _transpose_gradients_to_polar(
|
||||
self,
|
||||
dcost_ddist: npt.NDArray[np.floating],
|
||||
dcost_dang: npt.NDArray[np.floating],
|
||||
dcost_ddirections: npt.NDArray[np.floating],
|
||||
) -> npt.NDArray[np.floating]:
|
||||
"""Calculate the cumulated gradient for the cartesian to polar transform according to chain rule.
|
||||
|
||||
Args:
|
||||
dcost_ddist: cost gradient w.r.t distance vectors of shape ``(number of route segments, 2)``
|
||||
dcost_dang: cost gradient w.r.t segment angles ``(number of route segments, )``
|
||||
dcost_ddirections: cost gradient w.r.t direction vectors ``(number of route segments, 2)``
|
||||
|
||||
Returns:
|
||||
transposed and cumulated gradient of shape ``(number of route segments, 2)``
|
||||
"""
|
||||
|
||||
with np.errstate(divide="ignore", invalid="ignore"):
|
||||
addition = -(
|
||||
np.einsum("ij, ij-> i", dcost_ddirections, self._delta_positions) / self._distances ** 2
|
||||
)
|
||||
addition = np.nan_to_num(addition)
|
||||
|
||||
assert not np.isnan(addition).any(), addition
|
||||
|
||||
dcost_ddist += addition
|
||||
|
||||
# print(f"dc_ddist {dcost_ddist}")
|
||||
# print(f"dc_ddist {dcost_ddirections}")
|
||||
# print(f"dc_dang {dcost_dang}")
|
||||
|
||||
# print(f"addition {addition}")
|
||||
# print(f"distances {self._distances}")
|
||||
|
||||
# print(f"rotdelta {np.concatenate((-self._delta_positions[:, 1:2], self._delta_positions[:, 0:1]), axis=1)}")
|
||||
# print(f"unclipped {(dcost_dang / self._distances**2)}")
|
||||
# print(f"clipped {np.clip((dcost_dang / self._distances**2), None, 1e6)}")
|
||||
# assert non NaN values
|
||||
|
||||
assert not np.isnan(dcost_ddirections).any(), dcost_ddirections
|
||||
assert not np.isnan(dcost_ddist).any(), (dcost_ddist, self._distances)
|
||||
assert not np.isnan(dcost_dang).any(), dcost_dang
|
||||
|
||||
grad_delta: npt.NDArray[np.floating] = (
|
||||
dcost_ddirections / self._distances[:, None]
|
||||
+ (dcost_ddist / self._distances)[:, None] * self._delta_positions
|
||||
+ np.concatenate((-self._delta_positions[:, 1:2], self._delta_positions[:, 0:1]), axis=1)
|
||||
* np.clip((dcost_dang / self._distances ** 2), None, 1e6)[:, None]
|
||||
)
|
||||
|
||||
# print(f"graddelta {grad_delta}")
|
||||
grad_delta[self._distances == 0, :] = 0.0
|
||||
|
||||
# remove NaN values because first manoeuvre should'nt be moved
|
||||
|
||||
assert not np.isnan(grad_delta).any(), grad_delta
|
||||
|
||||
grad = -grad_delta[1:, :] + grad_delta[:-1, :]
|
||||
|
||||
# print(f"grad {grad}")
|
||||
assert not np.isnan(grad).any(), (grad, grad_delta)
|
||||
|
||||
return cast(npt.NDArray[np.floating], grad)
|
||||
|
||||
@staticmethod
|
||||
def detect_crossing(linestring: LineString) -> Optional[CartesianLocation]:
|
||||
segments = list(map(LineString, zip(linestring.coords[:-1], linestring.coords[1:])))
|
||||
|
||||
for seg1, seg2 in itertools.combinations(segments, 2):
|
||||
if seg1.coords[0] == seg2.coords[0]:
|
||||
return CartesianLocation.from_shapely(Point(seg1.coords[-0]))
|
||||
# return CartesianLocation.from_shapely(Point(seg1.coords[-0])), CartesianLocation.from_shapely(Point(seg2.coords[-0]))
|
||||
if seg1.crosses(seg2):
|
||||
return seg1.intersection(seg2)
|
||||
# return seg1.intersection(seg2), seg2.intersection(seg1)
|
||||
# todo (BEN) refactor
|
||||
|
||||
return None
|
||||
|
||||
def remove_single_cycles(self) -> "TimingFrame":
|
||||
|
||||
simple_route = []
|
||||
route_to_work = self.route
|
||||
crossing = TimingFrame.detect_crossing(route_to_work)
|
||||
|
||||
# while there are crossings
|
||||
while crossing:
|
||||
line_strings_start, remaining_route = list(shapely.ops.split(route_to_work, crossing))
|
||||
# keep before self intersection
|
||||
simple_route.append(line_strings_start)
|
||||
|
||||
# remaining_route = line_strings_split[1]
|
||||
# if the last point / line is the line / point being crossed there is no element after.
|
||||
if len(remaining_route.coords) <= 2:
|
||||
final_route_elements = []
|
||||
for part in simple_route:
|
||||
final_route_elements += list(part.coords)
|
||||
final_route_elements += remaining_route.coords[-1:]
|
||||
linestring_without_cycle = LineString(final_route_elements)
|
||||
|
||||
return TimingFrame(CartesianRoute.from_shapely(linestring_without_cycle))
|
||||
# todo (BEN) Refactor
|
||||
|
||||
remaining_route = LineString(remaining_route.coords[1:][::-1]) # start from next coordinates
|
||||
buffed_crossing = crossing.buffer(0.001)
|
||||
line_string_end = shapely.ops.split(remaining_route, buffed_crossing)[0]
|
||||
line_string_end = LineString(line_string_end.coords[::-1])
|
||||
|
||||
if len(line_string_end.coords) <= 2:
|
||||
final_route_elements = []
|
||||
for part in simple_route:
|
||||
final_route_elements += list(part.coords)
|
||||
final_route_elements += line_string_end.coords[-1:]
|
||||
linestring_without_cycle = LineString(final_route_elements)
|
||||
return TimingFrame(CartesianRoute.from_shapely(linestring_without_cycle))
|
||||
|
||||
line_string_end = LineString(line_string_end.coords[1:])
|
||||
# repeat
|
||||
|
||||
crossing = TimingFrame.detect_crossing(line_string_end)
|
||||
route_to_work = line_string_end
|
||||
# if there are no more cycles keep the rest
|
||||
simple_route.append(route_to_work)
|
||||
|
||||
final_route_elements = []
|
||||
for part in simple_route:
|
||||
final_route_elements += list(part.coords)
|
||||
linestring_without_cycle = LineString(final_route_elements)
|
||||
return TimingFrame(CartesianRoute.from_shapely(linestring_without_cycle))
|
||||
|
||||
def append(self, route: CartesianRoute) -> "TimingFrame":
|
||||
_ = np.concatenate((self.route.to_numpy(), route.to_numpy()), axis=0)
|
||||
return TimingFrame(CartesianRoute.from_numpy(_), start_time=self.start_time)
|
||||
|
||||
def prepend(self, route: CartesianRoute) -> "TimingFrame":
|
||||
_ = np.concatenate((route.to_numpy(), self.route.to_numpy()), axis=0)
|
||||
return TimingFrame(CartesianRoute.from_numpy(_), start_time=self.start_time)
|
||||
|
||||
def inject(self, ind: int, route: CartesianRoute):
|
||||
_ = np.concatenate((route.to_numpy()[0 : ind - 1, 0:1], self.route.to_numpy()), axis=0)
|
||||
_ = np.concatenate((_, route.to_numpy()[ind - 1 :, 0:1]), axis=0)
|
||||
return TimingFrame(CartesianRoute.from_numpy(_), start_time=self.start_time)
|
76
pyrate/pyrate/plan/nearplanner/utils.py
Normal file
76
pyrate/pyrate/plan/nearplanner/utils.py
Normal file
@ -0,0 +1,76 @@
|
||||
"""
|
||||
utilities module for the planning backend
|
||||
"""
|
||||
|
||||
# Scientific Computing
|
||||
import numpy as np
|
||||
import numpy.typing as npt
|
||||
|
||||
# Geometry
|
||||
from shapely.geometry import Point
|
||||
|
||||
|
||||
def angle_between(vector_a: np.ndarray, vector_b: np.ndarray) -> float:
|
||||
"""Code snippet to determine right-hand angle between two numpy arrays.
|
||||
|
||||
Args:
|
||||
vector_a: the first vector, of shape ``(spacial_dimension, )``
|
||||
vector_b: the second vector, of shape ``(spacial_dimension, )``
|
||||
|
||||
Returns:
|
||||
the right-hand angle encompassed between ``vector_a`` and ``vector_b``, in degrees
|
||||
"""
|
||||
|
||||
ang1 = np.arctan2(*vector_a[::-1])
|
||||
ang2 = np.arctan2(*vector_b[::-1])
|
||||
return float(np.rad2deg((ang1 - ang2) % (2 * np.pi)))
|
||||
|
||||
|
||||
def transform_angles_leftx_to_righty(angle: np.float64) -> np.float64:
|
||||
"""Transpose angle from lefthand x axis to righthand y-axis angle (to north)
|
||||
|
||||
Args:
|
||||
angle: lefthand angle measured from x axis [-pi:pi]
|
||||
Returns:
|
||||
right hand angle referenced from y axis [-pi:pi]
|
||||
"""
|
||||
mapped_angle = 0.5 * np.pi - angle
|
||||
if mapped_angle > 0.5 * np.pi:
|
||||
mapped_angle = -(2 * np.pi - mapped_angle)
|
||||
return mapped_angle
|
||||
|
||||
|
||||
def merge_numpy_array(
|
||||
array_a: npt.NDArray[np.floating], array_b: npt.NDArray[np.floating]
|
||||
) -> npt.NDArray[np.floating]:
|
||||
"""Merges two numpy arrays into one along the first axis.
|
||||
|
||||
Args:
|
||||
array_a: numpy array a to merge
|
||||
array_b: numpy array b to merge
|
||||
|
||||
Returns:
|
||||
merged numpy array
|
||||
"""
|
||||
# merge two numpy arrays in alternating order along the first dimension
|
||||
if len(array_b) == 0:
|
||||
return array_a
|
||||
shape = array_a.shape[0] + array_b.shape[0]
|
||||
res = np.zeros([shape] + list(array_a.shape[1:]), dtype=np.float64)
|
||||
res[::2] = array_a
|
||||
res[1::2] = array_b
|
||||
|
||||
return res
|
||||
|
||||
|
||||
def shapely_point_to_ndarray(point: Point) -> npt.NDArray[np.floating]:
|
||||
"""Projects a :mod:`shapely`/:mod:`pyrate.plan.geometry` point onto a numpy array.
|
||||
|
||||
Args:
|
||||
point: point to be projected
|
||||
|
||||
Returns:
|
||||
The numpy-array representation of the point in ``(x, y)`` format
|
||||
"""
|
||||
|
||||
return np.array([point.x, point.y], dtype=np.float64)
|
0
pyrate/pyrate/py.typed
Normal file
0
pyrate/pyrate/py.typed
Normal file
1
pyrate/pyrate/sense/__init__.py
Normal file
1
pyrate/pyrate/sense/__init__.py
Normal file
@ -0,0 +1 @@
|
||||
"""This package provides methods for state estimation, visual perception, mapping and similar."""
|
9
pyrate/pyrate/sense/filters/__init__.py
Normal file
9
pyrate/pyrate/sense/filters/__init__.py
Normal file
@ -0,0 +1,9 @@
|
||||
"""This package provides filters for state estimations based on noisy measurements."""
|
||||
|
||||
from .extended import ExtendedKalman
|
||||
from .extended_gmphd import ExtendedGaussianMixturePHD
|
||||
from .gmphd import GaussianMixturePHD
|
||||
from .kalman import Kalman
|
||||
from .unscented import UnscentedKalman
|
||||
|
||||
__all__ = ["Kalman", "ExtendedKalman", "UnscentedKalman", "GaussianMixturePHD", "ExtendedGaussianMixturePHD"]
|
165
pyrate/pyrate/sense/filters/extended.py
Normal file
165
pyrate/pyrate/sense/filters/extended.py
Normal file
@ -0,0 +1,165 @@
|
||||
"""This module implements the extended Kalman filter for non-linear state
|
||||
estimation."""
|
||||
|
||||
# Standard library
|
||||
from copy import deepcopy
|
||||
|
||||
# Typing
|
||||
from typing import Callable
|
||||
from typing import Union
|
||||
|
||||
# Mathematics
|
||||
from numpy.linalg import inv
|
||||
from numpy import ndarray
|
||||
|
||||
# Data modelling
|
||||
from pandas import concat
|
||||
from pandas import DataFrame
|
||||
|
||||
# Pyrate
|
||||
from pyrate.common.math import Gaussian
|
||||
|
||||
|
||||
class ExtendedKalman:
|
||||
|
||||
"""The extended Kalman filter for non-linear state estimation.
|
||||
|
||||
This filter behaves similarly to the standard Kalman filter, but utilizes nonlinear
|
||||
models and their jacobian matrix to estimate state variables whose process and/or relation to
|
||||
the measured properties cannot be accurately described by a linear model.
|
||||
|
||||
Examples:
|
||||
Start by importing the necessary numpy functions.
|
||||
|
||||
>>> from numpy import array
|
||||
>>> from numpy import cos
|
||||
>>> from numpy import eye
|
||||
>>> from numpy import sin
|
||||
>>> from numpy import vstack
|
||||
|
||||
Setup the model. In this case, we track a sine wave.
|
||||
Thereby we choose the transition model and its jacobian, as well as the linear
|
||||
measurement model, like so.
|
||||
|
||||
>>> f = lambda x: sin(x)
|
||||
>>> F = lambda x: array([cos(x)])
|
||||
>>> H = lambda x: array([[1.0]])
|
||||
>>> h = lambda x: x
|
||||
|
||||
Furthermore, we assume the following noise on the process and measurements.
|
||||
|
||||
>>> Q = eye(1)
|
||||
>>> R = eye(1)
|
||||
|
||||
Our initial belief is at 0.
|
||||
|
||||
>>> mean = vstack([0.0])
|
||||
>>> covariance = array([[1.0]])
|
||||
>>> estimate = Gaussian(mean, covariance)
|
||||
|
||||
Then, we initialize the filter.
|
||||
|
||||
>>> kalman = ExtendedKalman(F, f, estimate, H, h, Q, R)
|
||||
|
||||
We first predict with the provided model and then correct the prediction with a
|
||||
measurement of the true position.
|
||||
|
||||
>>> kalman.predict()
|
||||
>>> kalman.correct(array([5.]))
|
||||
|
||||
Args:
|
||||
F: Linearized state transition model, i.e. the jacobi matrix of f (n, n)
|
||||
f: Non-linear state transition model that describes the state's evolution
|
||||
from one timestep to the next
|
||||
estimate: Initial belief, i.e. the gaussian that describes your initial guess
|
||||
on the state and your uncertainty
|
||||
H: Linearized measurement model, i.e. the jacobi matrix of h (m, n)
|
||||
h: Non-linear measurement model that maps a state variable into the measured space
|
||||
Q: Process noise matrix, i.e. the covariance of the state transition (n, n)
|
||||
R: Measurement noise matrix, i.e. the covariance of the sensor readings (m, m)
|
||||
keep_trace: Flag for tracking filter process
|
||||
|
||||
References:
|
||||
- https://en.wikipedia.org/wiki/Extended_Kalman_filter
|
||||
"""
|
||||
|
||||
# In this context, we reproduce a common filter notation
|
||||
# pylint: disable=invalid-name
|
||||
# pylint: disable=too-many-instance-attributes, too-many-arguments
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
F: Union[ndarray, Callable[..., ndarray]],
|
||||
f: Callable[..., ndarray],
|
||||
estimate: Gaussian,
|
||||
H: Union[ndarray, Callable[..., ndarray]],
|
||||
h: Callable[..., ndarray],
|
||||
Q: ndarray,
|
||||
R: ndarray,
|
||||
keep_trace: bool = False,
|
||||
):
|
||||
# Initial belief
|
||||
self.estimate = deepcopy(estimate)
|
||||
self.prediction = deepcopy(estimate)
|
||||
|
||||
# Model specification
|
||||
self.f = f
|
||||
self.F = F
|
||||
self.h = h
|
||||
self.H = H
|
||||
self.Q = Q
|
||||
self.R = R
|
||||
|
||||
# Residual and its covariance matrix
|
||||
self.y: ndarray
|
||||
self.S: ndarray
|
||||
|
||||
# Kalman gain
|
||||
self.K: ndarray
|
||||
|
||||
# Objects for process tracing
|
||||
self.keep_trace = keep_trace
|
||||
self.predictions = DataFrame(columns=["x", "P", "F"])
|
||||
self.estimates = DataFrame(columns=["x", "P", "z"])
|
||||
|
||||
def predict(self, **kwargs) -> None:
|
||||
"""Predict a future state based on a linear forward model with optional system input."""
|
||||
|
||||
# Linearize and predict state transition
|
||||
self.prediction.x = self.f(x=self.estimate.x, **kwargs)
|
||||
F = self.F(self.prediction.x, **kwargs) if callable(self.F) else self.F
|
||||
self.prediction.P = F @ self.estimate.P @ F.T + self.Q
|
||||
|
||||
# Append prediction data to trace
|
||||
if self.keep_trace:
|
||||
new = DataFrame(
|
||||
{"x": (self.prediction.x.copy(),), "P": (self.prediction.P.copy(),), "F": (F.copy(),)}
|
||||
)
|
||||
self.predictions = concat([self.predictions, new], ignore_index=True)
|
||||
|
||||
def correct(self, z: ndarray, **kwargs) -> None:
|
||||
"""Correct a state prediction based on a measurement."""
|
||||
|
||||
# Check for differing measurement model
|
||||
H, h = kwargs.pop("H", self.H), kwargs.pop("h", self.h)
|
||||
|
||||
# Approximate about predicted state
|
||||
H_x: ndarray = H(self.prediction.x, **kwargs) if callable(H) else H
|
||||
|
||||
# Compute the residual and its covariance
|
||||
self.y = z - h(self.prediction.x, **kwargs)
|
||||
self.S = H_x @ self.prediction.P @ H_x.T + self.R
|
||||
|
||||
# Compute the new Kalman gain
|
||||
self.K = self.prediction.P @ H_x.T @ inv(self.S)
|
||||
|
||||
# Estimate new state
|
||||
self.estimate.x = self.prediction.x + self.K @ self.y
|
||||
self.estimate.P = self.prediction.P - self.K @ self.S @ self.K.T
|
||||
|
||||
# Append estimation data to trace
|
||||
if self.keep_trace:
|
||||
new = DataFrame(
|
||||
{"x": (self.estimate.x.copy(),), "P": (self.estimate.P.copy(),), "z": (z.copy(),)}
|
||||
)
|
||||
self.estimates = concat([self.estimates, new], ignore_index=True)
|
210
pyrate/pyrate/sense/filters/extended_gmphd.py
Normal file
210
pyrate/pyrate/sense/filters/extended_gmphd.py
Normal file
@ -0,0 +1,210 @@
|
||||
"""This module implements the extended Gaussian Mixture PHD filter for linear
|
||||
multi target tracking.."""
|
||||
|
||||
# Standard library
|
||||
from copy import deepcopy
|
||||
|
||||
# Typing
|
||||
from typing import Callable
|
||||
from typing import List
|
||||
from typing import Union
|
||||
|
||||
# Mathematics
|
||||
from numpy.linalg import inv
|
||||
from numpy import ndarray
|
||||
|
||||
# Gaussians for state representation
|
||||
from pyrate.common.math import Gaussian
|
||||
|
||||
# Base class
|
||||
from .gmphd import GaussianMixturePHD
|
||||
|
||||
|
||||
class ExtendedGaussianMixturePHD(GaussianMixturePHD):
|
||||
|
||||
"""The extended gaussian mixture PHD filter for non-linear multi-target tracking.
|
||||
|
||||
The extended gaussian mixture PHD filter is a multi target tracker for non-linear state space models.
|
||||
It can be regarded as an extension of the extended Kalman filter formulas to so-called random
|
||||
finite sets (RFS). The PHD filter follows the same prediction-correction scheme for state
|
||||
estimation as the single target extended Kalman filters. As an additional part of the interface,
|
||||
the internal model for the filter's belief needs to be pruned regularly as to limit
|
||||
the computational complexity. The extraction of a state estimate is similarly more
|
||||
sophisticated in the PHD filter and requires the use of a dedicated procedure.
|
||||
|
||||
Examples:
|
||||
Start by importing the necessary numpy functions.
|
||||
|
||||
>>> from numpy import array
|
||||
>>> from numpy import cos
|
||||
>>> from numpy import eye
|
||||
>>> from numpy import sin
|
||||
>>> from numpy import vstack
|
||||
|
||||
Setup the model. In this case, we track sine waves.
|
||||
Thereby we choose the transition model and its jacobian, as well as the linear
|
||||
measurement model, like so.
|
||||
|
||||
>>> f = lambda x: sin(x)
|
||||
>>> F = lambda x: array([cos(x)])
|
||||
>>> H = lambda x: array([[1.0]])
|
||||
>>> h = lambda x: x
|
||||
|
||||
Furthermore, we assume the following noise on the process and measurements.
|
||||
|
||||
>>> Q = eye(1)
|
||||
>>> R = eye(1)
|
||||
|
||||
Our belief of how targets are generetaded is for them to start with
|
||||
a position at zero.
|
||||
|
||||
>>> mean = vstack([0.0])
|
||||
>>> covariance = array([[1.0]])
|
||||
>>> birth_belief = [Gaussian(mean, covariance)]
|
||||
|
||||
We need to tell the filter how certain we are to detect targets and whether they survive.
|
||||
Also, the amount of clutter in the observed environment is quantized.
|
||||
|
||||
>>> survival_rate = 0.99
|
||||
>>> detection_rate = 0.99
|
||||
>>> intensity = 0.01
|
||||
|
||||
Then, we initialize the filter. This model has not input, so we ignore B.
|
||||
|
||||
>>> phd = ExtendedGaussianMixturePHD(
|
||||
... birth_belief,
|
||||
... survival_rate,
|
||||
... detection_rate,
|
||||
... intensity,
|
||||
... F,
|
||||
... f,
|
||||
... H,
|
||||
... h,
|
||||
... Q,
|
||||
... R
|
||||
... )
|
||||
|
||||
We first predict with the provided model and then correct the prediction with a
|
||||
measurement, in this case of a single targets' position.
|
||||
|
||||
>>> phd.predict()
|
||||
>>> phd.correct([array([5.])])
|
||||
|
||||
Args:
|
||||
birth_belief: GMM of target births
|
||||
survival_rate: Probability of a target to survive a timestep
|
||||
detection_rate: Probability of a target to be detected at a timestep
|
||||
intensity: Clutter intensity
|
||||
F: Linear state transition model (n, n)
|
||||
f: Non-linear state transition model that describes the state's evolution
|
||||
from one timestep to the next
|
||||
H: Linearized measurement model, i.e. the jacobi matrix of h (m, n)
|
||||
h: Non-linear measurement model that maps a state variable into the measured space
|
||||
Q: Process noise matrix (n, n)
|
||||
R: Measurement noise matrix (m, m)
|
||||
"""
|
||||
|
||||
# In this context, we reproduce a common filter notation
|
||||
# pylint: disable=invalid-name
|
||||
# pylint: disable=too-many-arguments
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
birth_belief: List[Gaussian],
|
||||
survival_rate: float,
|
||||
detection_rate: float,
|
||||
intensity: float,
|
||||
F: Union[ndarray, Callable[..., ndarray]],
|
||||
f: Callable[..., ndarray],
|
||||
H: Union[ndarray, Callable[..., ndarray]],
|
||||
h: Callable[..., ndarray],
|
||||
Q: ndarray,
|
||||
R: ndarray,
|
||||
):
|
||||
# Extended filter specification
|
||||
self.f = f
|
||||
self.h = h
|
||||
|
||||
# Initializes internal linear model
|
||||
super().__init__(birth_belief, survival_rate, detection_rate, intensity, F, H, Q, R)
|
||||
|
||||
def predict(self, **kwargs) -> None:
|
||||
"""Predict the future state."""
|
||||
|
||||
# Spontaneous birth
|
||||
born = deepcopy(self.birth_belief)
|
||||
|
||||
# Spawning off of existing targets
|
||||
# Not implemented at this point in time
|
||||
spawned: List[Gaussian] = []
|
||||
|
||||
# Prediction for existing targets
|
||||
for component in self.gmm:
|
||||
component.x = self.f(x=component.x, **kwargs)
|
||||
F = self.F(component.x, **kwargs) if callable(self.F) else self.F
|
||||
component.P = F @ component.P @ F.T + self.Q
|
||||
component.w *= self.survival_rate
|
||||
|
||||
# Concatenate with newborn and spawned target components
|
||||
self.gmm += born + spawned
|
||||
|
||||
def correct(self, measurements: ndarray, **kwargs) -> None:
|
||||
"""Correct the former prediction based on a sensor reading.
|
||||
|
||||
Args:
|
||||
measurements: Measurements at this timestep
|
||||
**kwargs: Optional measurement models H and/or h as well as their parameters
|
||||
"""
|
||||
|
||||
# pylint: disable=too-many-locals
|
||||
|
||||
# Check for differing measurement model
|
||||
H, h = kwargs.pop("H", self.H), kwargs.pop("h", self.h)
|
||||
|
||||
# ######################################
|
||||
# Construction of update components
|
||||
|
||||
mu: List[ndarray] = [] # Means mapped to measurement space
|
||||
S: List[ndarray] = [] # Residual covariance
|
||||
K: List[ndarray] = [] # Gains
|
||||
P: List[ndarray] = [] # Covariance
|
||||
|
||||
for i, component in zip(range(len(self.gmm)), self.gmm):
|
||||
# Approximate about predicted state
|
||||
H_x: ndarray = H(component.x, **kwargs) if callable(H) else H
|
||||
|
||||
mu.append(h(component.x, **kwargs))
|
||||
S.append(self.R + H_x @ component.P @ H_x.T)
|
||||
K.append(component.P @ H_x.T @ inv(S[i]))
|
||||
P.append(component.P - K[i] @ S[i] @ K[i].T)
|
||||
|
||||
# ######################################
|
||||
# Update
|
||||
|
||||
# Undetected assumption
|
||||
updated = deepcopy(self.gmm)
|
||||
for component in updated:
|
||||
component.w *= 1 - self.detection_rate
|
||||
|
||||
# Measured assumption
|
||||
for z in measurements:
|
||||
# Fill batch with corrected components
|
||||
batch = [
|
||||
Gaussian(
|
||||
self.gmm[i].x + K[i] @ (z - mu[i]),
|
||||
P[i],
|
||||
self.detection_rate * self.gmm[i].w * Gaussian(mu[i], S[i])(z),
|
||||
)
|
||||
for i in range(len(self.gmm))
|
||||
]
|
||||
|
||||
# Normalize weights
|
||||
sum_of_weights = sum([c.w for c in batch])
|
||||
for component in batch:
|
||||
component.w /= self.intensity + sum_of_weights
|
||||
|
||||
# Append batch to updated GMM
|
||||
updated += batch
|
||||
|
||||
# Set updated as new gaussian mixture model
|
||||
self.gmm = updated
|
287
pyrate/pyrate/sense/filters/gmphd.py
Normal file
287
pyrate/pyrate/sense/filters/gmphd.py
Normal file
@ -0,0 +1,287 @@
|
||||
"""This module implements the Gaussian Mixture PHD filter for linear
|
||||
multi target tracking.."""
|
||||
|
||||
# Standard library
|
||||
from copy import deepcopy
|
||||
|
||||
# Typing
|
||||
from typing import Callable
|
||||
from typing import List
|
||||
from typing import Union
|
||||
|
||||
# Mathematics
|
||||
from numpy import array
|
||||
from numpy.linalg import inv
|
||||
from numpy import ndarray
|
||||
|
||||
# Filter basis
|
||||
from pyrate.common.math import Gaussian
|
||||
|
||||
|
||||
class GaussianMixturePHD:
|
||||
|
||||
"""The gaussian mixture PHD filter for linear multi-target tracking.
|
||||
|
||||
The gaussian mixture PHD filter is a multi target tracker for linear state space models.
|
||||
It can be regarded as an extension of the Kalman filter formulas to so-called random
|
||||
finite sets (RFS). The PHD filter follows the same prediction-correction scheme for state
|
||||
estimation as the single target Kalman filters. As an additional part of the interface,
|
||||
the internal model for the filter's belief needs to be pruned regularly as to limit
|
||||
the computational complexity. The extraction of a state estimate is similarly more
|
||||
sophisticated in the PHD filter and requires the use of a dedicated procedure.
|
||||
|
||||
Examples:
|
||||
Start by importing the necessary numpy functions.
|
||||
|
||||
>>> from numpy import array
|
||||
>>> from numpy import eye
|
||||
>>> from numpy import vstack
|
||||
|
||||
Setup the model.
|
||||
In this case, we track 1D positions with constant velocities.
|
||||
Thereby we choose the transition model like so.
|
||||
|
||||
>>> F = array([[1.0, 1.0], [0.0, 0.0]])
|
||||
|
||||
The measurements will be positions and no velocities.
|
||||
|
||||
>>> H = array([[1.0, 0.0]])
|
||||
|
||||
Furthermore, we assume the following noise on the process and measurements.
|
||||
|
||||
>>> Q = eye(2)
|
||||
>>> R = eye(1)
|
||||
|
||||
Our belief of how targets are generetaded is for them to start with
|
||||
a position and velocity of 0.
|
||||
|
||||
>>> mean = vstack([0.0, 0.0])
|
||||
>>> covariance = array([[1.0, 0.0], [0.0, 1.0]])
|
||||
>>> birth_belief = [Gaussian(mean, covariance)]
|
||||
|
||||
We need to tell the filter how certain we are to detect targets and whether they survive.
|
||||
Also, the amount of clutter in the observed environment is quantized.
|
||||
|
||||
>>> survival_rate = 0.99
|
||||
>>> detection_rate = 0.99
|
||||
>>> intensity = 0.01
|
||||
|
||||
Then, we initialize the filter. This model has not input, so we ignore B.
|
||||
|
||||
>>> phd = GaussianMixturePHD(
|
||||
... birth_belief,
|
||||
... survival_rate,
|
||||
... detection_rate,
|
||||
... intensity,
|
||||
... F,
|
||||
... H,
|
||||
... Q,
|
||||
... R
|
||||
... )
|
||||
|
||||
We first predict with the provided model and then correct the prediction with a
|
||||
measurement, in this case of a single targets' position.
|
||||
|
||||
>>> phd.predict()
|
||||
>>> phd.correct([array([5.])])
|
||||
|
||||
Args:
|
||||
birth_belief: GMM of target births
|
||||
survival_rate: Probability of a target to survive a timestep
|
||||
detection_rate: Probability of a target to be detected at a timestep
|
||||
intensity: Clutter intensity
|
||||
F: Linearstate transition model (n, n)
|
||||
H: Linear measurement model (m, n)
|
||||
Q: Process noise matrix (n, n)
|
||||
R: Measurement noise matrix (m, m)
|
||||
"""
|
||||
|
||||
# In this context, we reproduce a common filter notation
|
||||
# pylint: disable=invalid-name
|
||||
# pylint: disable=too-many-arguments
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
birth_belief: List[Gaussian],
|
||||
survival_rate: float,
|
||||
detection_rate: float,
|
||||
intensity: float,
|
||||
F: Union[ndarray, Callable[..., ndarray]],
|
||||
H: Union[ndarray, Callable[..., ndarray]],
|
||||
Q: ndarray,
|
||||
R: ndarray,
|
||||
):
|
||||
# Filter specification
|
||||
self.F = F
|
||||
self.H = H
|
||||
self.Q = Q
|
||||
self.R = R
|
||||
|
||||
# Gaussian mixture model for spontaneous birth of new targets
|
||||
self.birth_belief = birth_belief
|
||||
|
||||
# Rates of survival, detection and clutter intensity
|
||||
self.survival_rate = survival_rate
|
||||
self.detection_rate = detection_rate
|
||||
self.intensity = intensity
|
||||
|
||||
# Gaussian mixture model
|
||||
self.gmm: List[Gaussian] = []
|
||||
|
||||
def extract(self, threshold: float = 0.5) -> List[ndarray]:
|
||||
"""Extract a state representation based on spikes in the current GMM.
|
||||
|
||||
Args:
|
||||
threshold: Weight that a component needs to have to be considered a target state
|
||||
"""
|
||||
|
||||
# Memory for all estimated states
|
||||
states: List[ndarray] = []
|
||||
|
||||
# Every component with sufficient weight is considered to be a target
|
||||
for component in self.gmm:
|
||||
if component.w > threshold:
|
||||
# A component with weight over 1 represents multiple targets
|
||||
states += [component.x for _ in range(int(round(component.w)))]
|
||||
|
||||
# Return all extracted states
|
||||
return states
|
||||
|
||||
def prune(self, threshold: float, merge_distance: float, max_components: int) -> None:
|
||||
"""Reduces the number of gaussian mixture components.
|
||||
|
||||
Args:
|
||||
threshold: Truncation threshold s.t. components with weight < threshold are removed
|
||||
merge_distance: Merging threshold s.t. components 'close enough' will be merged
|
||||
max_components: Maximum number of gaussians after pruning
|
||||
"""
|
||||
|
||||
# Select a subset of components to be pruned
|
||||
selected = [component for component in self.gmm if component.w > threshold]
|
||||
|
||||
# Create new list for pruned mixture model
|
||||
pruned: List[Gaussian] = []
|
||||
|
||||
# While candidates for pruning exist ...
|
||||
while selected:
|
||||
# Find mean of component with maximum weight
|
||||
index = max(range(len(selected)), key=lambda index: selected[index].w)
|
||||
|
||||
mean = selected[index].x
|
||||
|
||||
# Select components to be merged and remove merged from selected
|
||||
mergeable = [
|
||||
c for c in selected if ((c.x - mean).T @ inv(c.P) @ (c.x - mean)).item() <= merge_distance
|
||||
]
|
||||
selected = [c for c in selected if c not in mergeable]
|
||||
|
||||
# Compute new mixture component
|
||||
merged_weight = sum([component.w for component in mergeable])
|
||||
merged_mean = array(sum([component.w * component.x for component in mergeable]) / merged_weight)
|
||||
merged_covariance = array(
|
||||
sum(
|
||||
[
|
||||
component.w * (component.P + (mean - component.x) @ (mean - component.x).T)
|
||||
for component in mergeable
|
||||
]
|
||||
)
|
||||
/ merged_weight
|
||||
)
|
||||
|
||||
# Store the component
|
||||
pruned.append(Gaussian(merged_mean, merged_covariance, merged_weight))
|
||||
|
||||
# Remove components with minimum weight if maximum number is exceeded
|
||||
while len(pruned) > max_components:
|
||||
# Find index of component with minimum weight
|
||||
index = min(range(len(pruned)), key=lambda index: pruned[index].w)
|
||||
|
||||
# Remove the component
|
||||
del pruned[index]
|
||||
|
||||
# Update GMM with pruned model
|
||||
self.gmm = deepcopy(pruned)
|
||||
|
||||
def predict(self, **kwargs) -> None:
|
||||
"""Predict the future state."""
|
||||
|
||||
# Compute F if additional parameters are needed
|
||||
if callable(self.F):
|
||||
F = self.F(**kwargs)
|
||||
else:
|
||||
F = self.F
|
||||
|
||||
# Spontaneous birth
|
||||
born = deepcopy(self.birth_belief)
|
||||
|
||||
# Spawning off of existing targets
|
||||
# Not implemented at this point in time
|
||||
spawned: List[Gaussian] = []
|
||||
|
||||
# Prediction for existing targets
|
||||
for component in self.gmm:
|
||||
component.x = F @ component.x
|
||||
component.P = F @ component.P @ F.T + self.Q
|
||||
component.w *= self.survival_rate
|
||||
|
||||
# Concatenate with newborn and spawned target components
|
||||
self.gmm += born + spawned
|
||||
|
||||
def correct(self, measurements: ndarray, **kwargs) -> None:
|
||||
"""Correct the former prediction based on a sensor reading.
|
||||
|
||||
Args:
|
||||
measurements: Measurements at this timestep
|
||||
"""
|
||||
|
||||
# Check for differing measurement model
|
||||
H = kwargs.pop("H", self.H)
|
||||
|
||||
# Compute H if additional parameters are needed
|
||||
if callable(H):
|
||||
H = H(**kwargs)
|
||||
|
||||
# ######################################
|
||||
# Construction of update components
|
||||
|
||||
mu: List[ndarray] = [] # Means mapped to measurement space
|
||||
S: List[ndarray] = [] # Residual covariance
|
||||
K: List[ndarray] = [] # Gains
|
||||
P: List[ndarray] = [] # Covariance
|
||||
|
||||
for i, component in zip(range(len(self.gmm)), self.gmm):
|
||||
mu.append(H @ component.x)
|
||||
S.append(self.R + H @ component.P @ H.T)
|
||||
K.append(component.P @ H.T @ inv(S[i]))
|
||||
P.append(component.P - K[i] @ S[i] @ K[i].T)
|
||||
|
||||
# ######################################
|
||||
# Update
|
||||
|
||||
# Undetected assumption
|
||||
updated = deepcopy(self.gmm)
|
||||
for component in updated:
|
||||
component.w *= 1 - self.detection_rate
|
||||
|
||||
# Measured assumption
|
||||
for z in measurements:
|
||||
# Fill batch with corrected components
|
||||
batch = [
|
||||
Gaussian(
|
||||
self.gmm[i].x + K[i] @ (z - mu[i]),
|
||||
P[i],
|
||||
self.detection_rate * self.gmm[i].w * Gaussian(mu[i], S[i])(z),
|
||||
)
|
||||
for i in range(len(self.gmm))
|
||||
]
|
||||
|
||||
# Normalize weights
|
||||
sum_of_weights = sum([c.w for c in batch])
|
||||
for component in batch:
|
||||
component.w /= self.intensity + sum_of_weights
|
||||
|
||||
# Append batch to updated GMM
|
||||
updated += batch
|
||||
|
||||
# Set updated as new gaussian mixture model
|
||||
self.gmm = updated
|
178
pyrate/pyrate/sense/filters/kalman.py
Normal file
178
pyrate/pyrate/sense/filters/kalman.py
Normal file
@ -0,0 +1,178 @@
|
||||
"""This module implements the Kalman filter for state estimation based on
|
||||
linear state transition and measurement models."""
|
||||
|
||||
# Standard library
|
||||
from copy import deepcopy
|
||||
|
||||
# Typing
|
||||
from typing import Callable
|
||||
from typing import Optional
|
||||
from typing import Union
|
||||
|
||||
# Mathematics
|
||||
from numpy.linalg import inv
|
||||
from numpy import ndarray
|
||||
|
||||
# Data modelling
|
||||
from pandas import concat
|
||||
from pandas import DataFrame
|
||||
|
||||
# Pyrate
|
||||
from pyrate.common.math import Gaussian
|
||||
|
||||
|
||||
class Kalman:
|
||||
|
||||
"""The Kalman filter for linear state estimation.
|
||||
|
||||
The Kalman filter is a single target tracker for linear state space models, i.e. models that
|
||||
describe the transition of a state variable and its relationship to sensor readings
|
||||
as matrix-vector-multiplications.
|
||||
Additionally, the Kalman filter is based on the assumption that the state process and
|
||||
measurements are sampled from a Gaussian distribution.
|
||||
|
||||
Examples:
|
||||
First, import some helper functions from numpy.
|
||||
|
||||
>>> from numpy import array
|
||||
>>> from numpy import eye
|
||||
>>> from numpy import vstack
|
||||
|
||||
Then, setup the system's model.
|
||||
In this case, we track a 1D position that we assume to have a constant velocity.
|
||||
Thereby we choose the transition model and measurement function like so.
|
||||
|
||||
>>> F = array([[1.0, 1.0], [0.0, 0.0]])
|
||||
>>> H = array([[1.0, 0.0]])
|
||||
|
||||
Furthermore, we assume the following covariance matrices to model
|
||||
the noise in our model and measurements.
|
||||
|
||||
>>> Q = eye(2)
|
||||
>>> R = eye(1)
|
||||
|
||||
Our initial belief is a position and velocity of 0.
|
||||
|
||||
>>> mean = vstack([0.0, 0.0])
|
||||
>>> covariance = array([[1.0, 0.0], [0.0, 1.0]])
|
||||
>>> estimate = Gaussian(mean, covariance)
|
||||
|
||||
Then, we initialize the filter.
|
||||
Since, this model has not input we can ignore the control function B.
|
||||
|
||||
>>> kalman = Kalman(F, estimate, H, Q, R)
|
||||
|
||||
Now, we can predict based on the provided model and correct predictions with
|
||||
measurements of the true position.
|
||||
|
||||
>>> kalman.predict()
|
||||
>>> kalman.correct(array([5.]))
|
||||
|
||||
Predictions and corrections do not need to alternate every time.
|
||||
As an example, you can predict the state multiple times should your measurements be
|
||||
unavailable for an extended period of time.
|
||||
|
||||
Args:
|
||||
F: State transition model, i.e. the change of x in a single timestep (n, n)
|
||||
estimate: Initial belief, i.e. the gaussian distribution that describes your initial guess
|
||||
on the target's state
|
||||
H: Measurement model, i.e. a mapping from a state to measurement space (m, n)
|
||||
Q: Process noise matrix, i.e. the covariance of the state transition (n, n)
|
||||
R: Measurement noise matrix, i.e. the covariance of the sensor readings (m, m)
|
||||
B: Input dynamics model, i.e. the influence of a set system input on the state transition (1, k)
|
||||
keep_trace: Flag for tracking filter process
|
||||
|
||||
References:
|
||||
- https://en.wikipedia.org/wiki/Kalman_filter
|
||||
"""
|
||||
|
||||
# In this context, we reproduce a common filter notation
|
||||
# pylint: disable=invalid-name
|
||||
# pylint: disable=too-many-instance-attributes, too-many-arguments
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
F: Union[ndarray, Callable[..., ndarray]],
|
||||
estimate: Gaussian,
|
||||
H: Union[ndarray, Callable[..., ndarray]],
|
||||
Q: ndarray,
|
||||
R: ndarray,
|
||||
B: Optional[ndarray] = None,
|
||||
keep_trace: bool = False,
|
||||
):
|
||||
# Initial belief
|
||||
self.estimate = deepcopy(estimate)
|
||||
self.prediction = deepcopy(estimate)
|
||||
|
||||
# Model specification
|
||||
self.F = F
|
||||
self.B = B
|
||||
self.H = H
|
||||
self.Q = Q
|
||||
self.R = R
|
||||
|
||||
# Residual and its covariance matrix
|
||||
self.y: ndarray
|
||||
self.S: ndarray
|
||||
|
||||
# Kalman gain
|
||||
self.K: ndarray
|
||||
|
||||
# Objects for process tracing
|
||||
self.keep_trace = keep_trace
|
||||
self.predictions = DataFrame(columns=["x", "P", "F"])
|
||||
self.estimates = DataFrame(columns=["x", "P", "z"])
|
||||
|
||||
def predict(self, **kwargs) -> None:
|
||||
"""Predict a future state based on a linear forward model with optional system input."""
|
||||
|
||||
# Compute F if additional parameters are needed
|
||||
F = self.F(**kwargs) if callable(self.F) else self.F
|
||||
|
||||
# Predict next state
|
||||
self.prediction.x = F @ self.estimate.x
|
||||
self.prediction.P = F @ self.estimate.P @ F.T + self.Q
|
||||
|
||||
# Consider system input
|
||||
u = kwargs.pop("u", None)
|
||||
if u is not None:
|
||||
self.prediction.x += self.B @ u
|
||||
|
||||
# Append prediction data to trace
|
||||
if self.keep_trace:
|
||||
new = DataFrame(
|
||||
{"x": (self.prediction.x.copy(),), "P": (self.prediction.P.copy(),), "F": (F.copy(),)}
|
||||
)
|
||||
self.predictions = concat([self.predictions, new], ignore_index=True)
|
||||
|
||||
def correct(self, z: ndarray, **kwargs) -> None:
|
||||
"""Correct a state prediction based on a measurement.
|
||||
|
||||
Args:
|
||||
z: The measurement taken at this timestep
|
||||
"""
|
||||
|
||||
# Check for differing measurement model
|
||||
H = kwargs.pop("H", self.H)
|
||||
|
||||
# Compute H if additional parameters are needed
|
||||
if callable(H):
|
||||
H = H(**kwargs)
|
||||
|
||||
# Compute the residual and its covariance
|
||||
self.y = z - H @ self.prediction.x
|
||||
self.S = H @ self.prediction.P @ H.T + self.R
|
||||
|
||||
# Compute the new Kalman gain
|
||||
self.K = self.prediction.P @ H.T @ inv(self.S)
|
||||
|
||||
# Estimate new state
|
||||
self.estimate.x = self.prediction.x + self.K @ self.y
|
||||
self.estimate.P = self.prediction.P - self.K @ self.S @ self.K.T
|
||||
|
||||
# Append estimation data to trace
|
||||
if self.keep_trace:
|
||||
new = DataFrame(
|
||||
{"x": (self.estimate.x.copy(),), "P": (self.estimate.P.copy(),), "z": (z.copy(),)}
|
||||
)
|
||||
self.estimates = concat([self.estimates, new], ignore_index=True)
|
238
pyrate/pyrate/sense/filters/unscented.py
Normal file
238
pyrate/pyrate/sense/filters/unscented.py
Normal file
@ -0,0 +1,238 @@
|
||||
"""This module provides an implementation of the Unscented Kalman filter
|
||||
for non-linear state estimation."""
|
||||
|
||||
# Standard library
|
||||
from copy import deepcopy
|
||||
|
||||
# Typing
|
||||
from typing import Callable
|
||||
|
||||
# Mathematics
|
||||
from numpy import array
|
||||
from numpy import hstack
|
||||
from numpy.linalg import inv
|
||||
from numpy import ndarray
|
||||
from numpy import outer
|
||||
from numpy import tensordot
|
||||
from numpy import vectorize
|
||||
from numpy import vstack
|
||||
from scipy.linalg import cholesky
|
||||
|
||||
# Data modelling
|
||||
from pandas import concat
|
||||
from pandas import DataFrame
|
||||
|
||||
# Pyrate
|
||||
from pyrate.common.math import Gaussian
|
||||
|
||||
|
||||
class UnscentedKalman:
|
||||
|
||||
"""The unscented Kalman filter for non-linear state estimation.
|
||||
|
||||
This filter behaves similarly to the standard Kalman filter, but utilizes the so-called
|
||||
unscented transform to approximate gaussian distributions by sampling from the given
|
||||
nonlinear models to estimate state variables whose process and/or relation to
|
||||
the measured properties cannot be accurately described by a linear model.
|
||||
|
||||
Examples:
|
||||
To use the UKF, here we utilize numpy's functionality.
|
||||
|
||||
>>> from numpy import array
|
||||
>>> from numpy import cos
|
||||
>>> from numpy import eye
|
||||
>>> from numpy import sin
|
||||
>>> from numpy import vstack
|
||||
|
||||
Setup the model. In this case, we track a sine wave.
|
||||
Thereby we choose the transition model and its jacobian, as well as the linear
|
||||
measurement model, like so.
|
||||
|
||||
>>> f = lambda x: sin(x)
|
||||
>>> F = lambda x: array([cos(x)])
|
||||
>>> H = lambda x: array([[1.0]])
|
||||
>>> h = lambda x: x
|
||||
|
||||
Furthermore, we assume the following noise on the process and measurements.
|
||||
|
||||
>>> Q = eye(1)
|
||||
>>> R = eye(1)
|
||||
|
||||
Our initial belief is at 0.
|
||||
|
||||
>>> mean = vstack([0.0])
|
||||
>>> covariance = array([[1.0]])
|
||||
>>> estimate = Gaussian(mean, covariance)
|
||||
|
||||
Then, we initialize the filter. This model has not input, so we ignore B.
|
||||
|
||||
>>> kalman = UnscentedKalman(f, estimate, h, Q, R)
|
||||
|
||||
We first predict with the provided model and then correct the prediction with a
|
||||
measurement of the true position.
|
||||
|
||||
>>> kalman.predict()
|
||||
>>> kalman.correct(array([5.]))
|
||||
|
||||
Args:
|
||||
f: Non-linear state transition model that describes the state's evolution
|
||||
from one timestep to the next
|
||||
estimate: Initial belief, i.e. the gaussian that describes your initial guess
|
||||
on the state and your uncertainty
|
||||
h: Non-linear measurement model that maps a state variable into the measured space
|
||||
Q: Process noise matrix, i.e. the covariance of the state transition (n, n)
|
||||
R: Measurement noise matrix, i.e. the covariance of the sensor readings (m, m)
|
||||
alpha: Spread of sample points, pick between 0. and 1.
|
||||
beta: Sigma point parameter, 2 is optimal for gaussian problems
|
||||
kappa: Sigma point parameter, a common choice for kappa is to subtract 3
|
||||
from your state's dimension
|
||||
keep_trace: Flag for tracking filter process
|
||||
|
||||
References:
|
||||
- https://en.wikipedia.org/wiki/Unscented_Kalman_filter
|
||||
"""
|
||||
|
||||
# In this context, we reproduce a common filter notation
|
||||
# pylint: disable=invalid-name
|
||||
# pylint: disable=too-many-instance-attributes, too-many-arguments
|
||||
# Required lambda is falsely accused
|
||||
# pylint: disable=unnecessary-lambda
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
f: Callable[..., ndarray],
|
||||
estimate: Gaussian,
|
||||
h: Callable[..., ndarray],
|
||||
Q: ndarray,
|
||||
R: ndarray,
|
||||
alpha: float = 1.0,
|
||||
beta: float = 2.0,
|
||||
kappa: float = 1.0,
|
||||
keep_trace: bool = False,
|
||||
):
|
||||
# Initial belief
|
||||
self.estimate = deepcopy(estimate)
|
||||
self.prediction = deepcopy(estimate)
|
||||
|
||||
# Model specification
|
||||
self.f = f
|
||||
self.h = h
|
||||
self.Q = Q
|
||||
self.R = R
|
||||
self.alpha = alpha
|
||||
self.beta = beta
|
||||
self.kappa = kappa
|
||||
|
||||
# Residual and its covariance matrix
|
||||
self.y: ndarray
|
||||
self.S: ndarray
|
||||
|
||||
# Predicted sigma points and measurements
|
||||
self.Y: ndarray
|
||||
self.Z: ndarray
|
||||
|
||||
# Kalman gain
|
||||
self.K: ndarray
|
||||
|
||||
# Merwe initial points and weights
|
||||
self.X: ndarray
|
||||
self.mean_weights: ndarray
|
||||
self.cov_weights: ndarray
|
||||
self.setup_weights()
|
||||
|
||||
# Objects for process tracing
|
||||
self.keep_trace = keep_trace
|
||||
self.predictions = DataFrame(columns=["x", "P", "X", "Y"])
|
||||
self.estimates = DataFrame(columns=["x", "P", "z"])
|
||||
|
||||
def setup_weights(self) -> None:
|
||||
"""Computes mean and covariance weights for unscented transform"""
|
||||
|
||||
# Aliases for calculation
|
||||
n = self.estimate.x.size
|
||||
l = self.alpha**2 * n + self.kappa # noqa: E741
|
||||
|
||||
# Weights for mean and covariance
|
||||
self.mean_weights = array([l / (n + l)] + [1 / (2 * (n + l))] * (2 * n))
|
||||
self.cov_weights = array(
|
||||
[l / (n + l) + 1 - self.alpha**2 + self.beta] + [1 / (2 * (n + l))] * (2 * n)
|
||||
)
|
||||
|
||||
def compute_sigma_points(self) -> None:
|
||||
"""Calculates van der Merwe's sigma points"""
|
||||
|
||||
# Compute the distances for each point
|
||||
distance_factor = self.estimate.x.size * (1 + self.alpha**2) + self.kappa
|
||||
distances = cholesky(distance_factor * self.estimate.P)
|
||||
|
||||
# Sigma points
|
||||
self.X = hstack([self.estimate.x, self.estimate.x + distances, self.estimate.x - distances])
|
||||
|
||||
def predict(self, **kwargs) -> None:
|
||||
"""Predict a future state based on a linear forward model with optional system input.
|
||||
|
||||
Args:
|
||||
**kwargs: Arguments that are passed to forward model
|
||||
"""
|
||||
|
||||
# Compute and propagate Merwe points
|
||||
self.compute_sigma_points()
|
||||
self.Y = vectorize(lambda x: self.f(x, **kwargs), signature="(m)->(n)")(self.X.T).T
|
||||
|
||||
# Predict next state as mean of distribution
|
||||
self.prediction.x = vstack(self.mean_weights @ self.Y.T)
|
||||
self.prediction.P = (
|
||||
tensordot(
|
||||
self.cov_weights,
|
||||
[outer(y - self.prediction.x.T, y - self.prediction.x.T) for y in self.Y.T],
|
||||
axes=1,
|
||||
)
|
||||
+ self.Q
|
||||
)
|
||||
|
||||
# Append prediction data to trace
|
||||
if self.keep_trace:
|
||||
new = DataFrame(
|
||||
{
|
||||
"x": (self.prediction.x.copy(),),
|
||||
"P": (self.prediction.P.copy(),),
|
||||
"X": (deepcopy(self.X),),
|
||||
"Y": (deepcopy(self.Y),),
|
||||
}
|
||||
)
|
||||
self.predictions = concat([self.predictions, new], ignore_index=True)
|
||||
|
||||
def correct(self, z: ndarray, **kwargs) -> None:
|
||||
"""Correct a state prediction based on a measurement."""
|
||||
|
||||
# Check for differing measurement model
|
||||
h = kwargs.pop("h", self.h)
|
||||
|
||||
# Compute measurement distribution
|
||||
self.Z = vectorize(lambda y: h(y, **kwargs), signature="(m)->(n)")(self.Y.T).T
|
||||
mean_Z = vstack(self.mean_weights @ self.Z.T)
|
||||
|
||||
# Compute the residual and its covariance
|
||||
self.y = z - mean_Z
|
||||
self.S = (
|
||||
tensordot(self.cov_weights, [outer(z - mean_Z.T, z - mean_Z.T) for z in self.Z.T], axes=1)
|
||||
+ self.R
|
||||
)
|
||||
|
||||
# Compute the new Kalman gain
|
||||
self.K = tensordot(
|
||||
self.cov_weights,
|
||||
[outer(y - self.prediction.x.T, z - mean_Z.T) for y, z in zip(self.Y.T, self.Z.T)],
|
||||
axes=1,
|
||||
) @ inv(self.S)
|
||||
|
||||
# Estimate new state
|
||||
self.estimate.x = self.prediction.x + self.K @ self.y
|
||||
self.estimate.P = self.prediction.P - self.K @ self.S @ self.K.T
|
||||
|
||||
# Append estimation data to trace
|
||||
if self.keep_trace:
|
||||
new = DataFrame(
|
||||
{"x": (self.estimate.x.copy(),), "P": (self.estimate.P.copy(),), "z": (z.copy(),)}
|
||||
)
|
||||
self.estimates = concat([self.estimates, new], ignore_index=True)
|
7
pyrate/pyrate/sense/smoothers/__init__.py
Normal file
7
pyrate/pyrate/sense/smoothers/__init__.py
Normal file
@ -0,0 +1,7 @@
|
||||
"""This package provides smoothers for state estimations based on noisy measurements."""
|
||||
|
||||
from .extended import ExtendedRts
|
||||
from .rts import Rts
|
||||
from .unscented import UnscentedRts
|
||||
|
||||
__all__ = ["Rts", "ExtendedRts", "UnscentedRts"]
|
134
pyrate/pyrate/sense/smoothers/extended.py
Normal file
134
pyrate/pyrate/sense/smoothers/extended.py
Normal file
@ -0,0 +1,134 @@
|
||||
"""This module implements the Extended Rauch-Tung-Striebel (RTS) filter for state estimation based on
|
||||
linearized state transition and measurement models."""
|
||||
|
||||
# Typing
|
||||
from typing import Callable
|
||||
|
||||
# Mathematics
|
||||
from numpy.linalg import inv
|
||||
from numpy import ndarray
|
||||
|
||||
# Data modelling
|
||||
from pandas import DataFrame
|
||||
|
||||
# Pyrate
|
||||
from pyrate.common.math import Gaussian
|
||||
from pyrate.sense.filters import ExtendedKalman
|
||||
|
||||
|
||||
class ExtendedRts(ExtendedKalman):
|
||||
|
||||
"""The Extended RTS smoother for non-linear state estimation.
|
||||
|
||||
The Extended RTS smoother is a single target state estimator for non-linear
|
||||
models and their jacobi matrix to estimate state variables whose process and/or relation to
|
||||
the measured properties cannot be accurately described by a linear model.
|
||||
|
||||
Examples:
|
||||
Start by importing the necessary numpy functions.
|
||||
|
||||
>>> from numpy import array
|
||||
>>> from numpy import cos
|
||||
>>> from numpy import eye
|
||||
>>> from numpy import sin
|
||||
>>> from numpy import vstack
|
||||
|
||||
Setup the model. In this case, we track a sine wave.
|
||||
Thereby we choose the transition model and its jacobian, as well as the linear
|
||||
measurement model, like so.
|
||||
|
||||
>>> f = lambda x: sin(x)
|
||||
>>> F = lambda x: array([cos(x)])
|
||||
>>> H = lambda x: array([[1.0]])
|
||||
>>> h = lambda x: x
|
||||
|
||||
Furthermore, we assume the following noise on the process and measurements.
|
||||
|
||||
>>> Q = eye(1)
|
||||
>>> R = eye(1)
|
||||
|
||||
Our initial belief is at 0.
|
||||
|
||||
>>> mean = vstack([0.0])
|
||||
>>> covariance = array([[1.0]])
|
||||
>>> estimate = Gaussian(mean, covariance)
|
||||
|
||||
Then, we initialize the filter. This model has not input, so we ignore B.
|
||||
|
||||
>>> rts = ExtendedRts(F, f, estimate, H, h, Q, R)
|
||||
|
||||
We first predict with the provided model and then correct the prediction with
|
||||
measurements of the true position.
|
||||
|
||||
>>> for i in range(10):
|
||||
... rts.predict()
|
||||
... rts.correct(array([5.]))
|
||||
|
||||
So far, this is equivalent to using the standard Extended Kalman filter.
|
||||
We can now get a better estimate of the state trajectory by using the RTS smoothing algorithm.
|
||||
Hereby, old estimates get updated recursively by their successors.
|
||||
|
||||
>>> smooth_estimates = rts.smooth()
|
||||
|
||||
Args:
|
||||
F: Linearized state transition model, i.e. the jacobi matrix of f (n, n)
|
||||
f: Non-linear state transition model that describes the state's evolution
|
||||
from one timestep to the next
|
||||
estimate: Initial belief, i.e. the gaussian that describes your initial guess
|
||||
on the state and your uncertainty
|
||||
H: Linearized measurement model, i.e. the jacobi matrix of h (m, n)
|
||||
h: Non-linear measurement model that maps a state variable into the measured space
|
||||
Q: Process noise matrix, i.e. the covariance of the state transition (n, n)
|
||||
R: Measurement noise matrix, i.e. the covariance of the sensor readings (m, m)
|
||||
"""
|
||||
|
||||
# In this context, we reproduce a common filter notation
|
||||
# pylint: disable=invalid-name
|
||||
# pylint: disable=too-many-arguments
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
F: Callable[..., ndarray],
|
||||
f: Callable[..., ndarray],
|
||||
estimate: Gaussian,
|
||||
H: Callable[..., ndarray],
|
||||
h: Callable[..., ndarray],
|
||||
Q: ndarray,
|
||||
R: ndarray,
|
||||
):
|
||||
super().__init__(F, f, estimate, H, h, Q, R, keep_trace=True)
|
||||
|
||||
def smooth(self) -> DataFrame:
|
||||
"""Apply RTS smoothing.
|
||||
|
||||
Returns:
|
||||
The smoothed data with columns `"x"` and `"P"`
|
||||
"""
|
||||
|
||||
# Dataframe of smoothed estimates
|
||||
# The latest estimated cannot be improved
|
||||
smoothed = DataFrame(columns=["x", "P"])
|
||||
smoothed.loc[self.estimates.index[-1]] = {
|
||||
"x": self.estimates.iloc[-1].x,
|
||||
"P": self.estimates.iloc[-1].P,
|
||||
}
|
||||
|
||||
# Recursively go back in time
|
||||
for i in self.estimates.index[-2::-1]:
|
||||
# Access next predictions and estimates for smoothing
|
||||
predicted_x = self.predictions.iloc[i + 1].x
|
||||
predicted_P = self.predictions.iloc[i + 1].P
|
||||
prediction_F = self.predictions.iloc[i + 1].F
|
||||
estimated_x = self.estimates.iloc[i].x
|
||||
estimated_P = self.estimates.iloc[i].P
|
||||
|
||||
# Compute smoothing gain
|
||||
G = estimated_P @ prediction_F @ inv(predicted_P)
|
||||
|
||||
# Append to smoothed DataFrame
|
||||
smoothed.loc[i] = {
|
||||
"x": estimated_x + G @ (smoothed.loc[i + 1].x - predicted_x),
|
||||
"P": estimated_P + G @ (smoothed.loc[i + 1].P - predicted_P) @ G.T,
|
||||
}
|
||||
|
||||
return smoothed
|
136
pyrate/pyrate/sense/smoothers/rts.py
Normal file
136
pyrate/pyrate/sense/smoothers/rts.py
Normal file
@ -0,0 +1,136 @@
|
||||
"""This module implements the Rauch-Tung-Striebel (RTS) filter for state estimation based on
|
||||
linear state transition and measurement models."""
|
||||
|
||||
# Typing
|
||||
from typing import Callable
|
||||
from typing import Optional
|
||||
from typing import Union
|
||||
|
||||
# Mathematics
|
||||
from numpy.linalg import inv
|
||||
from numpy import ndarray
|
||||
|
||||
# Data modelling
|
||||
from pandas import DataFrame
|
||||
|
||||
# Pyrate
|
||||
from pyrate.common.math import Gaussian
|
||||
from pyrate.sense.filters import Kalman
|
||||
|
||||
|
||||
class Rts(Kalman):
|
||||
|
||||
"""The RTS smoother for linear state estimation.
|
||||
|
||||
The RTS smoother is a single target state estimator for linear state space models, i.e. models that
|
||||
describe the transition of a state variable and its relationship to sensor readings
|
||||
as matrix-vector-multiplications.
|
||||
Additionally, the RTS smoother is based on the assumption that the state process and
|
||||
measurements are sampled from a Gaussian distribution.
|
||||
|
||||
Examples:
|
||||
First, import some helper functions from numpy.
|
||||
|
||||
>>> from numpy import array
|
||||
>>> from numpy import eye
|
||||
>>> from numpy import vstack
|
||||
|
||||
Then, setup the system's model.
|
||||
In this case, we track a 1D position that we assume to have a constant velocity.
|
||||
Thereby we choose the transition model and measurement function like so.
|
||||
|
||||
>>> F = array([[1.0, 1.0], [0.0, 0.0]])
|
||||
>>> H = array([[1.0, 0.0]])
|
||||
|
||||
Furthermore, we assume the following covariance matrices to model
|
||||
the noise in our model and measurements.
|
||||
|
||||
>>> Q = eye(2)
|
||||
>>> R = eye(1)
|
||||
|
||||
Our initial belief is a position and velocity of 0.
|
||||
|
||||
>>> mean = vstack([0.0, 0.0])
|
||||
>>> covariance = array([[1.0, 0.0], [0.0, 1.0]])
|
||||
>>> estimate = Gaussian(mean, covariance)
|
||||
|
||||
Then, we initialize the smoother.
|
||||
Since, this model has not input we can ignore the control function B.
|
||||
|
||||
>>> rts = Rts(F, estimate, H, Q, R)
|
||||
|
||||
Now, we can predict based on the provided model and correct predictions with
|
||||
measurements of the true position.
|
||||
|
||||
>>> for i in range(10):
|
||||
... rts.predict()
|
||||
... rts.correct(array([5.]))
|
||||
|
||||
So far, this is equivalent to using the standard Kalman filter.
|
||||
We can now get a better estimate of the state trajectory by using the RTS smoothing algorithm.
|
||||
Hereby, old estimates get updated recursively by their successors.
|
||||
|
||||
>>> smooth_estimates = rts.smooth()
|
||||
|
||||
Args:
|
||||
F: State transition model, i.e. the change of x in a single timestep (n, n)
|
||||
estimate: Initial belief, i.e. the gaussian distribution that describes your initial guess
|
||||
on the target's state
|
||||
H: Measurement model, i.e. a mapping from a state to measurement space (m, n)
|
||||
Q: Process noise matrix, i.e. the covariance of the state transition (n, n)
|
||||
R: Measurement noise matrix, i.e. the covariance of the sensor readings (m, m)
|
||||
B: Input dynamics model, i.e. the influence of a set system input on the state transition (1, k)
|
||||
|
||||
References:
|
||||
- https://en.wikipedia.org/wiki/Kalman_filter#Rauch%E2%80%93Tung%E2%80%93Striebel
|
||||
"""
|
||||
|
||||
# In this context, we reproduce a common filter notation
|
||||
# pylint: disable=invalid-name
|
||||
# pylint: disable=too-many-arguments
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
F: Union[ndarray, Callable[..., ndarray]],
|
||||
estimate: Gaussian,
|
||||
H: Union[ndarray, Callable[..., ndarray]],
|
||||
Q: ndarray,
|
||||
R: ndarray,
|
||||
B: Optional[ndarray] = None,
|
||||
):
|
||||
super().__init__(F, estimate, H, Q, R, B, keep_trace=True)
|
||||
|
||||
def smooth(self) -> DataFrame:
|
||||
"""Apply RTS smoothing.
|
||||
|
||||
Returns:
|
||||
The smoothed data with columns `"x"` and `"P"`
|
||||
"""
|
||||
|
||||
# Dataframe of smoothed estimates
|
||||
# The latest estimated cannot be improved
|
||||
smoothed = DataFrame(columns=["x", "P"])
|
||||
smoothed.loc[self.estimates.index[-1]] = {
|
||||
"x": self.estimates.iloc[-1].x,
|
||||
"P": self.estimates.iloc[-1].P,
|
||||
}
|
||||
|
||||
# Recursively go back in time
|
||||
for i in self.estimates.index[-2::-1]:
|
||||
# Access next predictions and estimates for smoothing
|
||||
predicted_x = self.predictions.iloc[i + 1].x
|
||||
predicted_P = self.predictions.iloc[i + 1].P
|
||||
prediction_F = self.predictions.iloc[i + 1].F
|
||||
estimated_x = self.estimates.iloc[i].x
|
||||
estimated_P = self.estimates.iloc[i].P
|
||||
|
||||
# Compute smoothing gain
|
||||
G = estimated_P @ prediction_F @ inv(predicted_P)
|
||||
|
||||
# Append to smoothed DataFrame
|
||||
smoothed.loc[i] = {
|
||||
"x": estimated_x + G @ (smoothed.loc[i + 1].x - predicted_x),
|
||||
"P": estimated_P + G @ (smoothed.loc[i + 1].P - predicted_P) @ G.T,
|
||||
}
|
||||
|
||||
return smoothed
|
145
pyrate/pyrate/sense/smoothers/unscented.py
Normal file
145
pyrate/pyrate/sense/smoothers/unscented.py
Normal file
@ -0,0 +1,145 @@
|
||||
"""This module implements the Extended Rauch-Tung-Striebel (RTS) filter for state estimation based on
|
||||
linearized state transition and measurement models."""
|
||||
|
||||
# Typing
|
||||
from typing import Callable
|
||||
|
||||
# Mathematics
|
||||
from numpy.linalg import inv
|
||||
from numpy import ndarray
|
||||
from numpy import outer
|
||||
from numpy import tensordot
|
||||
|
||||
# Data modelling
|
||||
from pandas import DataFrame
|
||||
|
||||
# Pyrate
|
||||
from pyrate.common.math import Gaussian
|
||||
from pyrate.sense.filters import UnscentedKalman
|
||||
|
||||
|
||||
class UnscentedRts(UnscentedKalman):
|
||||
|
||||
"""The unscented Kalman filter for non-linear state estimation.
|
||||
|
||||
This filter behaves similarly to the standard Rts smoother, but utilizes the so-called
|
||||
unscented transform to approximate gaussian distributions by sampling from the given
|
||||
nonlinear models to estimate state variables whose process and/or relation to
|
||||
the measured properties cannot be accurately described by a linear model.
|
||||
|
||||
Examples:
|
||||
To use the UKF, here we utilize numpy's functionality.
|
||||
|
||||
>>> from numpy import array
|
||||
>>> from numpy import cos
|
||||
>>> from numpy import eye
|
||||
>>> from numpy import sin
|
||||
>>> from numpy import vstack
|
||||
|
||||
Setup the model. In this case, we track a sine wave.
|
||||
Thereby we choose the transition model and its jacobian, as well as the linear
|
||||
measurement model, like so.
|
||||
|
||||
>>> f = lambda x: sin(x)
|
||||
>>> F = lambda x: array([cos(x)])
|
||||
>>> H = lambda x: array([[1.0]])
|
||||
>>> h = lambda x: x
|
||||
|
||||
Furthermore, we assume the following noise on the process and measurements.
|
||||
|
||||
>>> Q = eye(1)
|
||||
>>> R = eye(1)
|
||||
|
||||
Our initial belief is at 0.
|
||||
|
||||
>>> mean = vstack([0.0])
|
||||
>>> covariance = array([[1.0]])
|
||||
>>> estimate = Gaussian(mean, covariance)
|
||||
|
||||
Then, we initialize the filter. This model has not input, so we ignore B.
|
||||
|
||||
>>> rts = UnscentedRts(f, estimate, h, Q, R)
|
||||
|
||||
We first predict with the provided model and then correct the prediction with
|
||||
measurements of the true position.
|
||||
|
||||
>>> for i in range(10):
|
||||
... rts.predict()
|
||||
... rts.correct(array([5.]))
|
||||
|
||||
So far, this is equivalent to using the standard Kalman filter.
|
||||
We can now get a better estimate of the state trajectory by using the RTS smoothing algorithm.
|
||||
Hereby, old estimates get updated recursively by their successors.
|
||||
|
||||
>>> smooth_estimates = rts.smooth()
|
||||
|
||||
Args:
|
||||
f: Non-linear state transition model that describes the state's evolution
|
||||
from one timestep to the next
|
||||
estimate: Initial belief, i.e. the gaussian that describes your initial guess
|
||||
on the state and your uncertainty
|
||||
h: Non-linear measurement model that maps a state variable into the measured space
|
||||
Q: Process noise matrix, i.e. the covariance of the state transition (n, n)
|
||||
R: Measurement noise matrix, i.e. the covariance of the sensor readings (m, m)
|
||||
alpha: Spread of sample points, pick between 0. and 1.
|
||||
beta: Sigma point parameter, 2 is optimal for gaussian problems
|
||||
kappa: Sigma point parameter, a common choice for kappa is to subtract 3
|
||||
from your state's dimension
|
||||
"""
|
||||
|
||||
# In this context, we reproduce a common filter notation
|
||||
# pylint: disable=invalid-name
|
||||
# pylint: disable=too-many-arguments
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
f: Callable[..., ndarray],
|
||||
estimate: Gaussian,
|
||||
h: Callable[..., ndarray],
|
||||
Q: ndarray,
|
||||
R: ndarray,
|
||||
alpha: float = 1.0,
|
||||
beta: float = 2.0,
|
||||
kappa: float = 1.0,
|
||||
):
|
||||
super().__init__(f, estimate, h, Q, R, alpha, beta, kappa, keep_trace=True)
|
||||
|
||||
def smooth(self) -> DataFrame:
|
||||
"""Apply RTS smoothing.
|
||||
|
||||
Returns:
|
||||
The smoothed data with columns `"x"` and `"P"`
|
||||
"""
|
||||
|
||||
# Dataframe of smoothed estimates
|
||||
# The latest estimated cannot be improved
|
||||
smoothed = DataFrame(columns=["x", "P"])
|
||||
smoothed.loc[self.estimates.index[-1]] = {
|
||||
"x": self.estimates.iloc[-1].x,
|
||||
"P": self.estimates.iloc[-1].P,
|
||||
}
|
||||
|
||||
# Recursively go back in time
|
||||
for i in self.estimates.index[-2::-1]:
|
||||
# Access next predictions and estimates for smoothing
|
||||
predicted_x = self.predictions.iloc[i + 1].x
|
||||
predicted_P = self.predictions.iloc[i + 1].P
|
||||
X = self.predictions.iloc[i + 1].X
|
||||
Y = self.predictions.iloc[i + 1].Y
|
||||
estimated_x = self.estimates.iloc[i].x
|
||||
estimated_P = self.estimates.iloc[i].P
|
||||
|
||||
# Compute smoothing gain
|
||||
G = tensordot(
|
||||
self.cov_weights,
|
||||
[outer(x - estimated_x.T, y - predicted_x.T) for x, y in zip(X.T, Y.T)],
|
||||
axes=1,
|
||||
) @ inv(predicted_P)
|
||||
|
||||
# Append to smoothed DataFrame
|
||||
smoothed.loc[i] = {
|
||||
"x": estimated_x + G @ (smoothed.loc[i + 1].x - predicted_x),
|
||||
"P": estimated_P + G @ (smoothed.loc[i + 1].P - predicted_P) @ G.T,
|
||||
}
|
||||
|
||||
return smoothed
|
1
pyrate/pyrate/sense/vision/__init__.py
Normal file
1
pyrate/pyrate/sense/vision/__init__.py
Normal file
@ -0,0 +1 @@
|
||||
"""This package provides methods for visual perception."""
|
180
pyrate/pyrate/sense/vision/image_line.py
Normal file
180
pyrate/pyrate/sense/vision/image_line.py
Normal file
@ -0,0 +1,180 @@
|
||||
"""
|
||||
This module implements the ``ImageLine`` used by ``ObstacleLocator`` to construct the horizon in an image.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
# Standard library
|
||||
from math import atan2
|
||||
from math import cos
|
||||
from math import sin
|
||||
from math import sqrt
|
||||
from math import tan
|
||||
|
||||
# Typing
|
||||
from typing import cast
|
||||
from typing import Tuple
|
||||
|
||||
# Numpy
|
||||
import numpy
|
||||
from numpy import arange
|
||||
from numpy import arctan
|
||||
from numpy import diff
|
||||
from numpy import linspace
|
||||
from numpy import ndarray
|
||||
from numpy import pi
|
||||
|
||||
|
||||
class ImageLine:
|
||||
|
||||
"""Represents a line in the image.
|
||||
|
||||
The line can be constructed using
|
||||
either points, a height and an angle or polar coordinates using the respective ``from_X`` methods.
|
||||
|
||||
Args:
|
||||
image_shape: Shape of the image ``(width, height)``
|
||||
x_y_coordinates: x- and y-coordinates of both endpoints as ``((x1, x2), (y1, y2))``
|
||||
height: Height of the line in the image space
|
||||
angle: Angle of the line in the image
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
image_shape: Tuple[int, int],
|
||||
x_y_coordinates: Tuple[Tuple[int, int], Tuple[int, int]],
|
||||
height: int,
|
||||
angle: float,
|
||||
):
|
||||
self.image_width, self.image_height = image_shape
|
||||
self.x_y_coordinates = x_y_coordinates
|
||||
self.height = height
|
||||
self.angle = angle
|
||||
|
||||
self.x_points = arange(x_y_coordinates[0][0], x_y_coordinates[0][1] + 1).astype(int)
|
||||
self.y_points = numpy.round(linspace(*x_y_coordinates[1], num=len(self.x_points))).astype(int)
|
||||
|
||||
@classmethod
|
||||
def from_points(
|
||||
cls, image_shape: Tuple[int, int], points: Tuple[Tuple[int, int], Tuple[int, int]]
|
||||
) -> ImageLine:
|
||||
"""Construct an ``ImageLine`` from the two endpoints of the line (in the image space).
|
||||
|
||||
Args:
|
||||
image_shape: Shape of the image ``(width, height)``
|
||||
points: End points of the line ``((x1, y1), (x2, y2))``
|
||||
|
||||
Returns:
|
||||
New ``ImageLine`` from two points
|
||||
"""
|
||||
|
||||
x_coordinates = cast(Tuple[int, int], tuple(p[0] for p in points))
|
||||
y_coordinates = cast(Tuple[int, int], tuple(p[1] for p in points))
|
||||
|
||||
height = numpy.sum(y_coordinates) / 2
|
||||
angle = atan2(diff(y_coordinates), diff(x_coordinates))
|
||||
|
||||
return cls(
|
||||
image_shape=image_shape,
|
||||
x_y_coordinates=(x_coordinates, y_coordinates),
|
||||
height=height,
|
||||
angle=angle,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def from_height_angle(
|
||||
cls, image_shape: Tuple[int, int], relative_height: float, angle: float
|
||||
) -> ImageLine:
|
||||
"""Construct an ``ImageLine`` from its relative height and angle.
|
||||
|
||||
Note: This method of creation might create end_points and indices values that are
|
||||
out of bound of the image if the angle of the represented line is too large.
|
||||
|
||||
Args:
|
||||
image_shape: Shape of the image ``(width, height)``
|
||||
relative_height: Relative height of the line (based on ``floor(image_shape[0] / 2)``)
|
||||
angle: Angle of the line
|
||||
|
||||
Returns:
|
||||
New ``ImageLine`` from height and angle
|
||||
"""
|
||||
|
||||
x_coordinates = (0, image_shape[0])
|
||||
diff_height = int(tan(angle) * image_shape[0] / 2)
|
||||
height = int(relative_height + image_shape[1] / 2)
|
||||
y_coordinates = (height - diff_height, height + diff_height)
|
||||
|
||||
return cls(
|
||||
image_shape=image_shape,
|
||||
x_y_coordinates=(x_coordinates, y_coordinates),
|
||||
height=height,
|
||||
angle=angle,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def from_polar(cls, image_shape: Tuple[int, int], radius: float, alpha: float) -> ImageLine:
|
||||
# pylint: disable=too-many-locals
|
||||
"""Construct an ``ImageLine`` from its polar representation.
|
||||
|
||||
The line is represented by its distance from the coordinate origin (radius) and the
|
||||
line rotation angle in radians.
|
||||
|
||||
Note: This method of creation might create end_points and indices values that are
|
||||
out of bound of the image if the angle of the represented line is too large.
|
||||
|
||||
Args:
|
||||
image_shape: Shape of the image ``(width, height)``
|
||||
radius: Distance from the coordinate origin
|
||||
alpha: Line rotation angle in radians
|
||||
|
||||
Returns:
|
||||
New ``ImageLine`` from the polar representation
|
||||
"""
|
||||
|
||||
cos_alpha, sin_alpha = cos(alpha), sin(alpha)
|
||||
# line_end pixels
|
||||
x_coordinates = (0, image_shape[0])
|
||||
y_coordinates = (int(radius / sin_alpha), int((radius - cos_alpha * image_shape[0]) / sin_alpha))
|
||||
# pixels of a line
|
||||
x_points: ndarray = arange(0, image_shape[0])
|
||||
y_points: ndarray = numpy.round((radius - cos_alpha * x_points) / sin_alpha).astype(int)
|
||||
|
||||
angle = alpha - pi / 2 # of horizon
|
||||
d_2, alpha_0 = sqrt(image_shape[1] ** 2 + image_shape[0] ** 2) / 2, arctan(
|
||||
image_shape[1] / image_shape[0]
|
||||
)
|
||||
height_rel = radius - d_2 * cos(alpha - alpha_0) # over image center
|
||||
height = int(height_rel + image_shape[1] // 2)
|
||||
|
||||
line = cls(
|
||||
image_shape=image_shape,
|
||||
x_y_coordinates=(x_coordinates, y_coordinates),
|
||||
height=height,
|
||||
angle=angle,
|
||||
)
|
||||
line.x_points = x_points
|
||||
line.y_points = y_points
|
||||
return line
|
||||
|
||||
@property
|
||||
def indices(self) -> Tuple[ndarray, ndarray]:
|
||||
"""Returns pixel coordinates of the line on the image
|
||||
|
||||
Returns:
|
||||
two ``ndarray``s containing every x and y coordinate of pixels the line covers
|
||||
"""
|
||||
|
||||
return self.x_points, self.y_points
|
||||
|
||||
@property
|
||||
def end_points(self) -> Tuple[Tuple[int, int], Tuple[int, int]]:
|
||||
"""Returns the two end points defining the line
|
||||
|
||||
Returns:
|
||||
x, y coordinates of both end points as in ``((x1, x2), (y1, y2))``
|
||||
"""
|
||||
|
||||
return (self.x_y_coordinates[0][0], self.x_y_coordinates[1][0]), (
|
||||
self.x_y_coordinates[0][1],
|
||||
self.x_y_coordinates[1][1],
|
||||
)
|
54
pyrate/pyrate/sense/vision/image_rectangle.py
Normal file
54
pyrate/pyrate/sense/vision/image_rectangle.py
Normal file
@ -0,0 +1,54 @@
|
||||
"""
|
||||
This module implements the representation of an obstacle or object in an image
|
||||
(modelled as a bounding box), used by ``ObstacleLocator``.
|
||||
"""
|
||||
|
||||
# Typing
|
||||
from typing import Tuple
|
||||
|
||||
|
||||
class ImageRectangle:
|
||||
|
||||
"""Represents an object's bounding box in the image plane as a rectangular bounding box.
|
||||
|
||||
Args:
|
||||
rectangle: x and y coordinates, width and height of the rectangle
|
||||
offset: position offset (e.g. due to subimage used for detection)
|
||||
angle: rotation offset (due to rotate detection)
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self, rectangle: Tuple[int, int, int, int], offset: Tuple[int, int] = (0, 0), angle: float = 0
|
||||
) -> None:
|
||||
|
||||
self.x_rel, self.y_rel, self.width, self.height = rectangle
|
||||
self.offset = offset
|
||||
self._x_coord, self._y_coord = self.x_rel + offset[0], self.y_rel + offset[1]
|
||||
self.angle = angle
|
||||
|
||||
def rectangle_to_corner(self, offset: bool = True) -> Tuple[Tuple[int, int], Tuple[int, int]]:
|
||||
"""Returns bounding box corners for OpenCV drawing functions.
|
||||
|
||||
Args:
|
||||
offset: If True, return coordinates offset by the offset specified when
|
||||
creating the `ImageRectangle`. If False, ignore the offset.
|
||||
|
||||
Returns:
|
||||
Coordinates of the upper left and lower right points of the rectangle ``(x1, y1), (x2, y2)``
|
||||
"""
|
||||
|
||||
if offset:
|
||||
x_res, y_res = self._x_coord, self._y_coord
|
||||
else:
|
||||
x_res, y_res = self.x_rel, self.y_rel
|
||||
return (x_res, y_res), (x_res + self.width, y_res + self.height)
|
||||
|
||||
@property
|
||||
def bottom_center(self) -> Tuple[int, int]:
|
||||
"""Returns the bottom center coordinates of the bounding box.
|
||||
|
||||
Returns:
|
||||
x, y coordinates of the bottom center pixel
|
||||
"""
|
||||
|
||||
return self._x_coord + self.width // 2, self._y_coord + self.height
|
143
pyrate/pyrate/sense/vision/obstacle_locator.py
Normal file
143
pyrate/pyrate/sense/vision/obstacle_locator.py
Normal file
@ -0,0 +1,143 @@
|
||||
"""
|
||||
This module implements a mechanism to construct the horizon line in an image,
|
||||
as well as (in the future) find obstacles in the image and return their locations.
|
||||
"""
|
||||
|
||||
# Typing
|
||||
from typing import List
|
||||
from typing import Tuple
|
||||
|
||||
# Numpy
|
||||
import numpy
|
||||
from numpy import arctan
|
||||
from numpy import ndarray
|
||||
from numpy import pi
|
||||
from numpy import radians
|
||||
|
||||
# Scientific
|
||||
from cv2 import blur
|
||||
from cv2 import Canny
|
||||
from cv2 import HoughLines
|
||||
from cv2 import INTER_AREA
|
||||
from cv2 import medianBlur
|
||||
from cv2 import resize
|
||||
|
||||
# Auxiliary modules
|
||||
from .image_line import ImageLine
|
||||
|
||||
|
||||
class ObstacleLocator: # pylint: disable=too-few-public-methods
|
||||
|
||||
"""Canny based horizon extraction and (in the future) obstacle detection class.
|
||||
|
||||
Args:
|
||||
image_width: Width of images in pixel
|
||||
image_height: Height of images in pixel
|
||||
"""
|
||||
|
||||
def __init__(self, image_width: int, image_height: int) -> None:
|
||||
# Precompute common image attributes
|
||||
self.image_width, self.image_height = image_width, image_height
|
||||
self._center = image_width // 2, image_height // 2
|
||||
self._min_angle = arctan(self.image_width / self.image_height)
|
||||
|
||||
def _preprocess(self, image: ndarray, box_blur_size: int = 90, median_blur_size: int = 3) -> ndarray:
|
||||
"""Preprocesses an image, reduces its resolution and smooths it.
|
||||
|
||||
Args:
|
||||
image: Image to preprocess
|
||||
box_blur_size: The size of the box blur filter is calculated as
|
||||
`image_height // box_blur_size`. Only modify default value with prior testing.
|
||||
median_blur_size: Size of the median filter aperture. Must be
|
||||
odd and greater than 1; only modify default value with prior testing.
|
||||
|
||||
Returns:
|
||||
Preprocessed image
|
||||
"""
|
||||
|
||||
image = resize(image, (self.image_width, self.image_height), interpolation=INTER_AREA)
|
||||
blur_size = self.image_height // box_blur_size
|
||||
image = blur(image, (blur_size, blur_size))
|
||||
image = medianBlur(image, median_blur_size)
|
||||
return image
|
||||
|
||||
def _detect_lines( # pylint: disable=too-many-arguments
|
||||
self,
|
||||
image: ndarray,
|
||||
n_max: int,
|
||||
min_line_length: float,
|
||||
canny_threshold: int,
|
||||
angle_resolution: float = 0.2,
|
||||
) -> Tuple[List[ImageLine], List[int]]:
|
||||
"""Detect lines in an image using the Canny and Hough filter
|
||||
|
||||
Args:
|
||||
image: Image where lines should be detected in
|
||||
n_max: Max number of lines that should be returned
|
||||
min_line_length: Line length threshold
|
||||
canny_threshold: Threshold for the hysteresis procedure
|
||||
angle_resolution: Distance resolution of the accumulator
|
||||
|
||||
Returns:
|
||||
Detected lines, their lengths and the dissimilarity of the regions separated by each line
|
||||
"""
|
||||
|
||||
# extract lines in image
|
||||
edges = Canny(image, canny_threshold, canny_threshold * 2)
|
||||
lines = HoughLines(
|
||||
edges,
|
||||
1,
|
||||
radians(angle_resolution),
|
||||
int(self.image_width * min_line_length),
|
||||
min_theta=self._min_angle,
|
||||
max_theta=pi - self._min_angle,
|
||||
)
|
||||
if lines is None:
|
||||
return [], []
|
||||
lines = [
|
||||
ImageLine.from_polar((self.image_width, self.image_height), *line[0]) for line in lines[:n_max]
|
||||
]
|
||||
line_lengths = self._evaluate_lines(edges, lines)
|
||||
|
||||
return lines, line_lengths
|
||||
|
||||
@staticmethod
|
||||
def _evaluate_lines(
|
||||
edges: ndarray,
|
||||
lines: List[ImageLine],
|
||||
) -> List[int]:
|
||||
"""Evaluate the length and separation of image by each line.
|
||||
|
||||
Args:
|
||||
image: The image the line is drawn on
|
||||
edges: Edges in image
|
||||
lines: ``ImageLine``s that shall be evaluated
|
||||
|
||||
Returns:
|
||||
A tuple containing each line's length and the dissimilarity of the regions separated by the line
|
||||
"""
|
||||
|
||||
# number of points on line
|
||||
votes = [numpy.sum(edges[line.y_points, line.x_points] > 0).astype(int) for line in lines]
|
||||
return votes
|
||||
|
||||
def detect_horizon(
|
||||
self, img: ndarray, n_max: int = 3, min_visibility: float = 0.2, canny_threshold: int = 35
|
||||
) -> Tuple[List[ImageLine], List[int]]:
|
||||
"""Detects possible horizon line(s).
|
||||
|
||||
Args:
|
||||
img: Image to analyze
|
||||
n_max: Max number of lines to extract
|
||||
min_visibility: Minimum length of horizon line to be considered
|
||||
canny_threshold: Canny threshold for edge detection
|
||||
|
||||
Returns:
|
||||
Horizon lines (in image space shrunk to ``(self._w, self._h)`` as set in :meth:`__init__`!),
|
||||
their respective lengths, and
|
||||
their separation factors (dissimilarity of the two regions of the image
|
||||
separated by that line)
|
||||
"""
|
||||
|
||||
img = self._preprocess(img)
|
||||
return self._detect_lines(img, n_max, min_visibility, canny_threshold)
|
Reference in New Issue
Block a user