|
Alina Nitzke, Benjamin Niedermann, Luciana Fenoglio-Marc, Jürgen Kusche, and Jan-Henrik Haunert. Reconstructing the time-variable sea surface from tide gauge records using optimal data-dependent triangulations. Computers & Geosciences, 157:104920, 2021.
abstract
doi
bibtex
|
| Reconstructions of sea level prior to the satellite altimeter era are usually derived from tide gauge records; however most algorithms for this assume that modes of sea level variability are stationary which is not true over several decades. Here we suggest a method that is based on optimized data-dependent triangulations of the network of gauge stations. Data-dependent triangulations are triangulations of point sets that rely not only on 2D point positions but also on additional data (here: sea surface anomalies). In particular, min-error criteria have been suggested to construct triangulations that approximate a given surface. In this article, we show how data-dependent triangulations with min-error criteria can be used to reconstruct 2D maps of the sea surface anomaly over a longer time period, assuming that anomalies are continuously monitored at a sparse set of stations and, in addition, observations of a control surface is provided over a shorter time period. At the heart of our method is the idea to learn a min-error triangulation based on the control data that is available, and to use the learned triangulation subsequently to compute piece-wise linear surface models for epochs in which only observations from monitoring stations are given. Moreover, we combine our approach of min-error triangulation with k-order Delaunay triangulation to stabilize the triangles geometrically. We show that this approach is in particular advantageous for the reconstruction of the sea surface by combining tide gauge measurements (which are sparse in space but cover a long period back in time) with data of modern satellite altimetry (which have a high spatial resolution but cover only the last decades). We show how to learn a min-error triangulation and a min-error k-order Delaunay triangulation using an exact algorithm based on integer linear programming. We confront our reconstructions against the Delaunay triangulation which had been proposed earlier for sea-surface modeling and find superior quality. With real data for the North Sea we show that the min-error triangulation outperforms the Delaunay method substantially for reconstructions back in time up to 18 years, and the k-order Delaunay min-error triangulation even up to 21 years for k=2. With a running time of less than one second our approach would be applicable to areas with far greater extent than the North Sea. @article{Nitzke2021104920,
abstract = {Reconstructions of sea level prior to the satellite altimeter era are usually derived from tide gauge records; however most algorithms for this assume that modes of sea level variability are stationary which is not true over several decades. Here we suggest a method that is based on optimized data-dependent triangulations of the network of gauge stations. Data-dependent triangulations are triangulations of point sets that rely not only on 2D point positions but also on additional data (here: sea surface anomalies). In particular, min-error criteria have been suggested to construct triangulations that approximate a given surface. In this article, we show how data-dependent triangulations with min-error criteria can be used to reconstruct 2D maps of the sea surface anomaly over a longer time period, assuming that anomalies are continuously monitored at a sparse set of stations and, in addition, observations of a control surface is provided over a shorter time period. At the heart of our method is the idea to learn a min-error triangulation based on the control data that is available, and to use the learned triangulation subsequently to compute piece-wise linear surface models for epochs in which only observations from monitoring stations are given. Moreover, we combine our approach of min-error triangulation with k-order Delaunay triangulation to stabilize the triangles geometrically. We show that this approach is in particular advantageous for the reconstruction of the sea surface by combining tide gauge measurements (which are sparse in space but cover a long period back in time) with data of modern satellite altimetry (which have a high spatial resolution but cover only the last decades). We show how to learn a min-error triangulation and a min-error k-order Delaunay triangulation using an exact algorithm based on integer linear programming. We confront our reconstructions against the Delaunay triangulation which had been proposed earlier for sea-surface modeling and find superior quality. With real data for the North Sea we show that the min-error triangulation outperforms the Delaunay method substantially for reconstructions back in time up to 18 years, and the k-order Delaunay min-error triangulation even up to 21 years for k=2. With a running time of less than one second our approach would be applicable to areas with far greater extent than the North Sea.},
author = {Alina Nitzke and Benjamin Niedermann and Luciana Fenoglio-Marc and J\"{u}rgen Kusche and Jan-Henrik Haunert},
doi = {10.1016/j.cageo.2021.104920},
issn = {0098-3004},
journal = {Computers \& Geosciences},
keywords = {Historical evolution of sea surfaces, Time-variable sea-surface reconstruction, Data-dependent triangulations, Higher-order Delaunay triangulations, Min-error triangulations, Mathematical programming},
pages = {104920},
title = {Reconstructing the time-variable sea surface from tide gauge records using optimal data-dependent triangulations},
url = {https://www.sciencedirect.com/science/article/pii/S0098300421002107},
volume = {157},
year = {2021}
}
|
|
Youness Dehbi, Johannes Leonhardt, Johannes Oehrlein, and Jan-Henrik Haunert. Optimal scan planning with enforced network connectivity for the acquisition of three-dimensional indoor models. ISPRS Journal of Photogrammetry and Remote Sensing, 180:103-116, 2021.
abstract
doi
bibtex
|
| The positioning of laser scanners for indoor surveying is still a time and cost expensive process. This article proposes an optimization approach for computing an admissible sensor placement with the minimal number of sensor view point positions. The approach facilitates both wall and floor surveying based on a floorplan of the study object. Optimal solutions are calculated by solving an Integer Linear Program
that respects manufacturer specifications incorporating constraints such as full coverage. To enable a subsequent co-registration of the scans, a flow-based constraint formulation ensuring the connectivity of the selected positions in an appropriately defined geometric intersection graph is introduced. The method has been evaluated on real-world objects and compared to heuristic methods that have frequently been used for related problems. Our solutions outperform heuristic approaches regarding both running time and the number of TLS stations. In a case study with a larger floorplan of an institute building and with different parameter settings,
our method resulted in a solution with at least two stations less compared to a solution generated by an expert. @article{dehbi2021optimalscan,
abstract = {The positioning of laser scanners for indoor surveying is still a time and cost expensive process. This article proposes an optimization approach for computing an admissible sensor placement with the minimal number of sensor view point positions. The approach facilitates both wall and floor surveying based on a floorplan of the study object. Optimal solutions are calculated by solving an Integer Linear Program
that respects manufacturer specifications incorporating constraints such as full coverage. To enable a subsequent co-registration of the scans, a flow-based constraint formulation ensuring the connectivity of the selected positions in an appropriately defined geometric intersection graph is introduced. The method has been evaluated on real-world objects and compared to heuristic methods that have frequently been used for related problems. Our solutions outperform heuristic approaches regarding both running time and the number of TLS stations. In a case study with a larger floorplan of an institute building and with different parameter settings,
our method resulted in a solution with at least two stations less compared to a solution generated by an expert.},
author = {Dehbi, Youness and Leonhardt, Johannes and Oehrlein, Johannes and Haunert, Jan-Henrik},
doi = {10.1016/j.isprsjprs.2021.07.013},
issn = {0924-2716},
journal = {ISPRS Journal of Photogrammetry and Remote Sensing},
pages = {103-116},
title = {Optimal scan planning with enforced network connectivity for the acquisition of three-dimensional indoor models},
url = {https://www.sciencedirect.com/science/article/pii/S0924271621001982},
volume = {180},
year = {2021}
}
|
|
Annika Bonerath, Jan-Henrik Haunert, Joseph S. B. Mitchell, and Benjamin Niedermann. Shortcut hulls: vertex-restricted outer simplifications of polygons. In Meng He, and Don Sheehy, editors. Proceedings of the 33rd Canadian Conference on Computational Geometry, pages 12-23. 2021.
abstract
bibtex
|
| Let P be a crossing-free polygon and C a set of shortcuts,
where each shortcut is a directed straight-line segment
connecting two vertices of P. A shortcut hull of
P is another crossing-free polygon that encloses P and
whose oriented boundary is composed of elements from
C. Shortcut hulls find their application in geo-related
problems such as the simplification of contour lines. We
aim at a shortcut hull that linearly balances the enclosed
area and perimeter. If no holes in the shortcut hull
are allowed, the problem admits a straight-forward solution
via shortest paths. For the more challenging case
that the shortcut hull may contain holes, we present a
polynomial-time algorithm that is based on computing
a constrained, weighted triangulation of the input polygon’s
exterior. We use this problem as a starting point
for investigating further variants, e.g., restricting the
number of edges or bends. We demonstrate that shortcut
hulls can be used for drawing the rough extent of
point sets as well as for the schematization of polygons. @inproceedings{Haunert2021,
abstract = {Let P be a crossing-free polygon and C a set of shortcuts,
where each shortcut is a directed straight-line segment
connecting two vertices of P. A shortcut hull of
P is another crossing-free polygon that encloses P and
whose oriented boundary is composed of elements from
C. Shortcut hulls find their application in geo-related
problems such as the simplification of contour lines. We
aim at a shortcut hull that linearly balances the enclosed
area and perimeter. If no holes in the shortcut hull
are allowed, the problem admits a straight-forward solution
via shortest paths. For the more challenging case
that the shortcut hull may contain holes, we present a
polynomial-time algorithm that is based on computing
a constrained, weighted triangulation of the input polygon’s
exterior. We use this problem as a starting point
for investigating further variants, e.g., restricting the
number of edges or bends. We demonstrate that shortcut
hulls can be used for drawing the rough extent of
point sets as well as for the schematization of polygons.},
author = {Annika Bonerath and Jan-Henrik Haunert and Joseph S. B. Mitchell and Benjamin Niedermann},
booktitle = {Proceedings of the 33rd Canadian Conference on Computational Geometry},
editor = {Meng He and Don Sheehy},
pages = {12-23},
title = {Shortcut Hulls: Vertex-restricted Outer Simplifications of Polygons},
url = {https://projects.cs.dal.ca/cccg2021/wordpress/wp-content/uploads/2021/08/CCCG2021.pdf},
year = {2021}
}
|
|
Jan-Henrik Haunert. Interaktive Karten für große Datens\"atze - eine algorithmische und kartographische Herausforderung. In Geoinformationssysteme 2021 - Beiträge zur 8. Münchner GI-Runde, pages 63-64. 2021.
doi
bibtex
|
| @inproceedings{Haunert2021,
author = {Jan-Henrik Haunert},
booktitle = {Geoinformationssysteme 2021 -- Beitr\"{a}ge zur 8. M\"{u}nchner GI-Runde},
doi = {https://www.geoinfo.uni-bonn.de/publikationen/pdf/Haunert_Abstract_GI-Runde_2021.pdf},
pages = {63--64},
title = {{I}nteraktive {K}arten für große {D}atens\"{a}tze -- eine algorithmische und kartographische {H}erausforderung},
year = {2021}
}
|
|
Anna Arutyunova, Anne Driemel, Jan-Henrik Haunert, Herman Haverkort, Petra Mutzel, and Heiko Röglin. Minimum-error triangulation is NP-hard. In Proceedings of the 37th European Workshop on Computational Geometry (EuroCG'21). 2021.
abstract
bibtex
|
| Given two sets of points in the plane, $P$ and $R$, with a real-valued function defined on $P ∪R$, we define for any triangulation defined on $P$ the vertical error at the points of $R$ and wish to find a triangulation that minimizes this error. We show that this problem is NP-hard to approximate within any approximation factor. @inproceedings{Arutyunova2021,
abstract = {Given two sets of points in the plane, $P$ and $R$, with a real-valued function defined on $P \cup R$, we define for any triangulation defined on $P$ the vertical error at the points of $R$ and wish to find a triangulation that minimizes this error. We show that this problem is NP-hard to approximate within any approximation factor.},
author = {Anna Arutyunova and Anne Driemel and Jan-Henrik Haunert and Herman Haverkort and Petra Mutzel and Heiko R\"{o}glin},
booktitle = {Proceedings of the 37th European Workshop on Computational Geometry (EuroCG'21)},
title = {Minimum-Error Triangulation is {NP}-hard},
year = {2021}
}
|
|
Jakob Geiger, Sabine Cornelsen, Jan-Henrik Haunert, Philipp Kindermann, Tamara Mchedlidze, Martin Nöllenburg, Yoshio Okamoto, and Alexander Wolff. ClusterSets: Optimizing planar clusters in categorical point data. Computer Graphics Forum, 0(0):0, 2021. EuroVis 2021 special issue, accepted for publication
abstract
bibtex
|
| In geographic data analysis, one is often given point data of
different categories (such as facilities of a university categorized
by department). Drawing upon recent research on
set visualization, we want to visualize category membership by
connecting points of the same category with visual links.
Existing approaches that follow this path usually insist on
connecting all members of a category, which may lead to many
crossings and visual clutter. We propose an approach that
avoids crossings between connections of different categories
completely. Instead of connecting \emphall data points of the same
category, we subdivide categories into smaller, local clusters where
needed. We do a case study comparing the legibility of drawings
produced by our approach and those by existing approaches.
In our problem formulation, we are additionally given a graph $G$ on
the data points whose edges express some sort of proximity. Our aim
is to find a subgraph $G'$ of $G$ with the following properties:
(i)~edges connect only data points of the same category, (ii)~no two
edges cross, and (iii)~the number of connected components (clusters)
is minimized. We then visualize the clusters in~$G'$.
For arbitrary graphs, the resulting
optimization problem, Cluster Minimization, is NP-hard (even to
approximate). Therefore, we introduce two heuristics. We do an
extensive benchmark test on real-world data. Comparisons with exact
solutions indicate that our heuristics do astonishing well for
certain relative-neighborhood graphs. @article{geiger2021,
abstract = {In geographic data analysis, one is often given point data of
different categories (such as facilities of a university categorized
by department). Drawing upon recent research on
set visualization, we want to visualize category membership by
connecting points of the same category with visual links.
Existing approaches that follow this path usually insist on
connecting all members of a category, which may lead to many
crossings and visual clutter. We propose an approach that
avoids crossings between connections of different categories
completely. Instead of connecting \emph{all} data points of the same
category, we subdivide categories into smaller, local clusters where
needed. We do a case study comparing the legibility of drawings
produced by our approach and those by existing approaches.
In our problem formulation, we are additionally given a graph $G$ on
the data points whose edges express some sort of proximity. Our aim
is to find a subgraph $G'$ of $G$ with the following properties:
(i)~edges connect only data points of the same category, (ii)~no two
edges cross, and (iii)~the number of connected components (clusters)
is minimized. We then visualize the clusters in~$G'$.
For arbitrary graphs, the resulting
optimization problem, Cluster Minimization, is NP-hard (even to
approximate). Therefore, we introduce two heuristics. We do an
extensive benchmark test on real-world data. Comparisons with exact
solutions indicate that our heuristics do astonishing well for
certain relative-neighborhood graphs.},
author = {Jakob Geiger and Sabine Cornelsen and Jan-Henrik Haunert and Philipp Kindermann and Tamara Mchedlidze and Martin N\"{o}llenburg and Yoshio Okamoto and Alexander Wolff},
journal = {Computer Graphics Forum},
note = {EuroVis 2021 special issue, accepted for publication},
number = {0},
pages = {0},
title = {{ClusterSets}: {O}ptimizing Planar Clusters in Categorical Point Data},
volume = {0},
year = {2021}
}
|
|
Sven Gedicke, Johannes Oehrlein, and Jan-Henrik Haunert. Aggregating land-use polygons considering line features as separating map elements. Cartography and Geographic Information Science, 48(2):124-139, 2021.
abstract
doi
bibtex
|
| Map generalization is the process of deriving small-scale target maps from a large-scale source map or database while preserving valuable information. In this paper we focus on topographic data, in particular areas of different land-use classes and line features representing the road network. When reducing the map scale, some areas need to be merged to larger composite regions. This process is known as area aggregation. Given a planar partition of areas, one usually aims to build geometrically compact regions of sufficient size while keeping class changes small. Since line features (e.g. roads) are perceived as separating elements in a map, we suggest integrating them into the process of area aggregation. Our aim is that boundaries of regions coincide with line features in such a way that strokes (i.e. chains of line features with small angles of deflection) are not broken into short sections. Complementing the criteria of compact regions and preserving land-use information, we consider this aim as a third criterion. Regarding all three criteria, we formalize an optimization problem and solve it with a heuristic approach using simulated annealing. Our evaluation is based on experiments with different parameter settings. In particular, we compare results of a baseline method that considers two criteria, namely compactness and class changes, with results of our new method that additionally considers our stroke-based criterion. Our results show that this third criterion can be substantially improved while keeping the quality with respect to the original two criteria on a similar level. @article{GedickeCaGIS2021,
abstract = {Map generalization is the process of deriving small-scale target maps from a large-scale source map or database while preserving valuable information. In this paper we focus on topographic data, in particular areas of different land-use classes and line features representing the road network. When reducing the map scale, some areas need to be merged to larger composite regions. This process is known as area aggregation. Given a planar partition of areas, one usually aims to build geometrically compact regions of sufficient size while keeping class changes small. Since line features (e.g. roads) are perceived as separating elements in a map, we suggest integrating them into the process of area aggregation. Our aim is that boundaries of regions coincide with line features in such a way that strokes (i.e. chains of line features with small angles of deflection) are not broken into short sections. Complementing the criteria of compact regions and preserving land-use information, we consider this aim as a third criterion. Regarding all three criteria, we formalize an optimization problem and solve it with a heuristic approach using simulated annealing. Our evaluation is based on experiments with different parameter settings. In particular, we compare results of a baseline method that considers two criteria, namely compactness and class changes, with results of our new method that additionally considers our stroke-based criterion. Our results show that this third criterion can be substantially improved while keeping the quality with respect to the original two criteria on a similar level.},
author = {Sven Gedicke and Johannes Oehrlein and Jan-Henrik Haunert},
doi = {10.1080/15230406.2020.1851613},
journal = {Cartography and Geographic Information Science},
number = {2},
pages = {124--139},
title = {Aggregating land-use polygons considering line features as separating map elements},
volume = {48},
year = {2021}
}
|
|
L. Lucks, L. Klingbeil, L. Plümer, and Y. Dehbi. Improving trajectory estimation using 3d city models and kinematic point clouds. Transactions in GIS, 25(1):238-260, 2021.
abstract
doi
bibtex
|
| Accurate and robust positioning of vehicles in urban environments is of high importance for autonomous driving or mobile mapping. In mobile mapping systems, a simultaneous mapping of the environment using laser scanning and an accurate positioning using GNSS is targeted. This requirement is often not guaranteed in shadowed cities where GNSS signals are usually disturbed, weak or even unavailable. We propose a novel approach which incorporates prior knowledge, i.e. 3D city
model of the environment, and improves the trajectory. The recorded point cloud is matched with the semantic city model using a point-to-plane ICP. A pre-classification step enables an informed sampling of appropriate matching points. Random Forest is used as classifier to discriminate between facade and remaining points. Local inconsistencies are tackled by a segment-wise partitioning of the point cloud where an interpolation guarantees a seamless transition between the segments. The general @article{dehbi2020improving,
abstract = {Accurate and robust positioning of vehicles in urban environments is of high importance for autonomous driving or mobile mapping. In mobile mapping systems, a simultaneous mapping of the environment using laser scanning and an accurate positioning using GNSS is targeted. This requirement is often not guaranteed in shadowed cities where GNSS signals are usually disturbed, weak or even unavailable. We propose a novel approach which incorporates prior knowledge, i.e. 3D city
model of the environment, and improves the trajectory. The recorded point cloud is matched with the semantic city model using a point-to-plane ICP. A pre-classification step enables an informed sampling of appropriate matching points. Random Forest is used as classifier to discriminate between facade and remaining points. Local inconsistencies are tackled by a segment-wise partitioning of the point cloud where an interpolation guarantees a seamless transition between the segments. The general},
author = {Lucks, L. and Klingbeil, L. and Pl\"{u}mer, L. and Dehbi, Y.},
doi = {10.1111/tgis.12719},
journal = {Transactions in GIS},
number = {1},
pages = {238-260},
title = {Improving Trajectory Estimation Using 3D City Models and Kinematic Point Clouds},
volume = {25},
year = {2021}
}
|
|
Sujoy Bhore, Jan-Henrik Haunert, Fabian Klute, Guangping Li, and Martin Nöllenburg. Balanced independent and dominating sets on colored interval graphs. In Proc. 47th International Conference on Current Trends in Theory and Practice of Computer Science (SOFSEM 2021), pages 89-103. Springer International Publishing, 2021.
abstract
doi
bibtex
|
| We study two new versions of independent and dominating set problems on vertex-colored interval graphs, namely f-Balanced Independent Set (f-BIS) and f-Balanced Dominating Set (f-BDS). Let \$\$G=(V,E)\$\$G=(V,E)be an interval graph with a color assignment function \$\$\\\backslash,\backslashmathrm\\backslashgamma \\backslash,\\:V \backslashrightarrow \backslash\1,\backslashldots ,k\backslash\\$\$$\gamma$:V\textrightarrow\1,\ldots,k\that maps all vertices in G onto k colors. A subset of vertices \$\$S\backslashsubseteq V\$\$S⊆Vis called f-balanced if S contains f vertices from each color class. In the f-BIS and f-BDS problems, the objective is to compute an independent set or a dominating set that is f-balanced. We show that both problems are NP-complete even on proper interval graphs. For the BIS problem on interval graphs, we design two FPT algorithms, one parameterized by (f, k) and the other by the vertex cover number of G. Moreover, for an optimization variant of BIS on interval graphs, we present a polynomial time approximation scheme (PTAS) and an \$\$O(n\backslashlog n)\$\$O(nlogn)time 2-approximation algorithm. @inproceedings{Bhore2021,
abstract = {We study two new versions of independent and dominating set problems on vertex-colored interval graphs, namely f-Balanced Independent Set (f-BIS) and f-Balanced Dominating Set (f-BDS). Let {\$}{\$}G=(V,E){\$}{\$}G=(V,E)be an interval graph with a color assignment function {\$}{\$}{\{}{\{}{\backslash},{\backslash}mathrm{\{}{\backslash}gamma {\}}{\backslash},{\}}{\}}:V {\backslash}rightarrow {\backslash}{\{}1,{\backslash}ldots ,k{\backslash}{\}}{\$}{\$}$\gamma$:V{\textrightarrow}{\{}1,{\ldots},k{\}}that maps all vertices in G onto k colors. A subset of vertices {\$}{\$}S{\backslash}subseteq V{\$}{\$}S⊆Vis called f-balanced if S contains f vertices from each color class. In the f-BIS and f-BDS problems, the objective is to compute an independent set or a dominating set that is f-balanced. We show that both problems are NP-complete even on proper interval graphs. For the BIS problem on interval graphs, we design two FPT algorithms, one parameterized by (f, k) and the other by the vertex cover number of G. Moreover, for an optimization variant of BIS on interval graphs, we present a polynomial time approximation scheme (PTAS) and an {\$}{\$}O(n{\backslash}log n){\$}{\$}O(nlogn)time 2-approximation algorithm.},
address = {Cham},
author = {Bhore, Sujoy and Haunert, Jan-Henrik and Klute, Fabian and Li, Guangping and N{\"o}llenburg, Martin},
booktitle = {Proc. 47th International Conference on Current Trends in Theory and Practice of Computer Science (SOFSEM 2021)},
doi = {10.1007/978-3-030-67731-2_7},
isbn = {978-3-030-67731-2},
pages = {89--103},
publisher = {Springer International Publishing},
title = {Balanced Independent and Dominating Sets on Colored Interval Graphs},
year = {2021}
}
|
|
Sven Gedicke, Annika Bonerath, Benjamin Niedermann, and Jan-Henrik Haunert. Zoomless Maps: External Labeling Methods for the Interactive Exploration of Dense Point Sets at a Fixed Map Scale. IEEE Transactions on Visualization and Computer Graphics, 27(2):1247-1256, 2021. https://youtu.be/IuMhk8jp54c
abstract
doi
bibtex
|
| Visualizing spatial data on small-screen devices such as
smartphones and smartwatches poses new challenges in computational
cartography. The current interfaces for map exploration require
their users to zoom in and out frequently. Indeed, zooming and
panning are tools suitable for choosing the map extent corresponding
to an area of interest. They are not as suitable, however, for
resolving the graphical clutter caused by a high feature
density since zooming in to a large map scale leads to a loss of
context. Therefore, in this paper, we present new external labeling methods that allow a user to navigate through dense sets of
points of interest while keeping the current map extent fixed. We provide a unified model, in which labels are
placed at the boundary of the map and visually associated with the
corresponding features via connecting lines, which are called
leaders. Since the screen space is limited, labeling all features
at the same time is impractical. Therefore, at any time, we label a
subset of the features. We offer interaction techniques to change
the current selection of features systematically and, thus, give the
user access to all features. We distinguish three methods, which
allow the user either to slide the labels along the bottom side of the map or
to browse the labels based on pages or stacks. We present a generic
algorithmic framework that provides us with the possibility of
expressing the different variants of interaction techniques as
optimization problems in a unified way. We propose both exact
algorithms and fast and simple heuristics that solve the
optimization problems taking into account different criteria such as
the ranking of the labels, the total leader length as well as the
distance between leaders. In experiments on real-world data we
evaluate these algorithms and discuss the three variants with
respect to their strengths and weaknesses proving the flexibility of
the presented algorithmic framework. @article{gbnh-zm-21,
abstract = {Visualizing spatial data on small-screen devices such as
smartphones and smartwatches poses new challenges in computational
cartography. The current interfaces for map exploration require
their users to zoom in and out frequently. Indeed, zooming and
panning are tools suitable for choosing the map extent corresponding
to an area of interest. They are not as suitable, however, for
resolving the graphical clutter caused by a high feature
density since zooming in to a large map scale leads to a loss of
context. Therefore, in this paper, we present new external labeling methods that allow a user to navigate through dense sets of
points of interest while keeping the current map extent fixed. We provide a unified model, in which labels are
placed at the boundary of the map and visually associated with the
corresponding features via connecting lines, which are called
leaders. Since the screen space is limited, labeling all features
at the same time is impractical. Therefore, at any time, we label a
subset of the features. We offer interaction techniques to change
the current selection of features systematically and, thus, give the
user access to all features. We distinguish three methods, which
allow the user either to slide the labels along the bottom side of the map or
to browse the labels based on pages or stacks. We present a generic
algorithmic framework that provides us with the possibility of
expressing the different variants of interaction techniques as
optimization problems in a unified way. We propose both exact
algorithms and fast and simple heuristics that solve the
optimization problems taking into account different criteria such as
the ranking of the labels, the total leader length as well as the
distance between leaders. In experiments on real-world data we
evaluate these algorithms and discuss the three variants with
respect to their strengths and weaknesses proving the flexibility of
the presented algorithmic framework.},
author = {Gedicke, Sven and Bonerath, Annika and Niedermann, Benjamin and Haunert, Jan-Henrik},
doi = {10.1109/TVCG.2020.3030399},
journal = {IEEE Transactions on Visualization and Computer Graphics},
note = {https://youtu.be/IuMhk8jp54c},
number = {2},
pages = {1247--1256},
title = {{Z}oomless {M}aps: {E}xternal {L}abeling {M}ethods for the {I}nteractive {E}xploration of {D}ense {P}oint {S}ets at a {F}ixed {M}ap {S}cale},
volume = {27},
year = {2021}
}
|
|
Y. Dehbi, A. Henn, G. Gröger, V. Stroh, and L. Plümer. Robust and fast reconstruction of complex roofs with active sampling from 3d point clouds. Transactions in GIS, 25(1):112-133, 2021.
abstract
doi
bibtex
|
| This article proposes a novel method for the 3D-reconstruction of LoD2 buildings from LIDAR data. We propose an active sampling strategy which applies a cascade of filters focusing on promising samples in an early stage, thus avoiding the pitfalls of RANSAC based approaches. Filters are based on prior knowledge represented by (non-parametric) density distributions. In our approach samples are pairs of surflets, i.e. 3D points together with normal vectors derived from a plane approximation of
their neighborhood. Surflet pairs provide parameters for model candidates such as azimuth, inclination and ridge height, as well as parameters estimating internal precision and consistency. This provides a ranking of roof model candidates and leads to a small number of promising hypotheses. Building footprints are derived in a preprocessing
step using machine learning methods, in particular Support Vector Machines (SVM). @article{dehbi2020robust,
abstract = {This article proposes a novel method for the 3D-reconstruction of LoD2 buildings from LIDAR data. We propose an active sampling strategy which applies a cascade of filters focusing on promising samples in an early stage, thus avoiding the pitfalls of RANSAC based approaches. Filters are based on prior knowledge represented by (non-parametric) density distributions. In our approach samples are pairs of surflets, i.e. 3D points together with normal vectors derived from a plane approximation of
their neighborhood. Surflet pairs provide parameters for model candidates such as azimuth, inclination and ridge height, as well as parameters estimating internal precision and consistency. This provides a ranking of roof model candidates and leads to a small number of promising hypotheses. Building footprints are derived in a preprocessing
step using machine learning methods, in particular Support Vector Machines (SVM).},
author = {Dehbi, Y. and Henn, A. and Gr\"{o}ger, G. and Stroh, V. and Pl\"{u}mer, L.},
doi = {10.1111/tgis.12659},
journal = {Transactions in GIS},
number = {1},
pages = {112-133},
title = {Robust and Fast Reconstruction of Complex Roofs with Active Sampling from 3D Point Clouds},
volume = {25},
year = {2021}
}
|
|
Y. Dehbi, S. Koppers, and L. Plümer. Looking for a needle in a haystack: probability density based classification and reconstruction of dormers from 3d point clouds. Transactions in GIS, 25(1):44-70, 2021.
abstract
doi
bibtex
|
| Accurate reconstruction of roofs with dormers is challenging. Without careful separation of the dormer points from the points on the roof surface, the
estimation of the roof areas is distorted. The characteristic distortion of the density distribution in comparison to the expected normal distribution is the
starting point of our method. We propose a hierarchical method which improves roof reconstruction from LiDAR point clouds in a model-based manner
separating dormer points from roof points using classification methods. The key idea is to exploit probability density functions (PDFs) to reveal roof
properties and design skilful features for a supervised learning method using support vector machines (SVMs). The approach is tested based on real data
as well as simulated point clouds. @article{dehbi2020probability,
abstract = {Accurate reconstruction of roofs with dormers is challenging. Without careful separation of the dormer points from the points on the roof surface, the
estimation of the roof areas is distorted. The characteristic distortion of the density distribution in comparison to the expected normal distribution is the
starting point of our method. We propose a hierarchical method which improves roof reconstruction from LiDAR point clouds in a model-based manner
separating dormer points from roof points using classification methods. The key idea is to exploit probability density functions (PDFs) to reveal roof
properties and design skilful features for a supervised learning method using support vector machines (SVMs). The approach is tested based on real data
as well as simulated point clouds.},
author = {Dehbi, Y. and Koppers, S. and Pl\"umer, L.},
doi = {10.1111/tgis.12658},
journal = {Transactions in GIS},
number = {1},
pages = {44-70},
title = {Looking for a needle in a haystack: Probability density based classification and reconstruction of dormers from 3D point clouds},
volume = {25},
year = {2021}
}
|
|
J.-H. Haunert, D. Schmidt, and M. Schmidt. Anonymization via clustering of locations in road networks (short paper). In Proc. 11th International Conference on Geographic Information Science (GIScience '21). 2021.
abstract
doi
bibtex
|
| Data related to households or addresses needs be published in an aggregated form to obfuscate sensitive information about individuals.
Usually, the data is aggregated to the level of existing administrative zones, but these often do not correspond to formal models of privacy or a desired level of anonymity.
Therefore, automatic privacy-preserving spatial clustering methods are needed.
To address this need, we present algorithms to partition a given set of locations into $k$-anonymous clusters, meaning that each cluster contains at least $k$ locations.
We assume that the locations are given as a set $T ⊆V$ of terminals in a weighted graph $G = (V, E)$ representing a road network.
Our approach is to compute a forest in $G$, i.e., a set of trees, each of which corresponds to a cluster.
We ensure the $k$-anonymity of the clusters by constraining the trees to span at least $k$ terminals each (plus an arbitrary number of non-terminal nodes called Steiner nodes).
By minimizing the total edge weight of the forest, we ensure that the clusters reflect the proximity among the locations.
Although the problem is NP-hard, we were able to solve instances of several hundreds of terminals using integer linear programming.
Moreover, we present an efficient approximation algorithm and show that it can be used to process large and fine-grained data sets. @inproceedings{haunert2021anonymization,
abstract = {Data related to households or addresses needs be published in an aggregated form to obfuscate sensitive information about individuals.
Usually, the data is aggregated to the level of existing administrative zones, but these often do not correspond to formal models of privacy or a desired level of anonymity.
Therefore, automatic privacy-preserving spatial clustering methods are needed.
To address this need, we present algorithms to partition a given set of locations into $k$-anonymous clusters, meaning that each cluster contains at least $k$ locations.
We assume that the locations are given as a set $T \subseteq V$ of terminals in a weighted graph $G = (V, E)$ representing a road network.
Our approach is to compute a forest in $G$, i.e., a set of trees, each of which corresponds to a cluster.
We ensure the $k$-anonymity of the clusters by constraining the trees to span at least $k$ terminals each (plus an arbitrary number of non-terminal nodes called Steiner nodes).
By minimizing the total edge weight of the forest, we ensure that the clusters reflect the proximity among the locations.
Although the problem is NP-hard, we were able to solve instances of several hundreds of terminals using integer linear programming.
Moreover, we present an efficient approximation algorithm and show that it can be used to process large and fine-grained data sets.},
author = {Haunert, J.-H. and Schmidt, D. and Schmidt, M.},
booktitle = {Proc. 11th International Conference on Geographic Information Science (GIScience '21)},
doi = {10.25436/E2CC7P},
title = {Anonymization via Clustering of Locations in Road Networks (Short Paper)},
year = {2021}
}
|
|
Peter Rottmann, Anne Driemel, Herman Haverkort, Heiko Röglin, and Jan-Henrik Haunert. Bicriteria aggregation of polygons via graph cuts. In Krzysztof Janowicz, and Judith A. Verstegen, editors, volume 208 of Leibniz International Proceedings in Informatics (LIPIcs). 11th International Conference on Geographic Information Science (GIScience 2021) - Part II, pages 6:1-6:16. Schloss Dagstuhl - Leibniz-Zentrum für Informatik, 2021.
abstract
doi
bibtex
|
| We present a new method for the task of detecting groups of polygons in a given geographic data set and computing a representative polygon for each group. This task is relevant in map generalization where the aim is to derive a less detailed map from a given map. Following a classical approach, we define the output polygons by merging the input polygons with a set of triangles that we select from a constrained Delaunay triangulation of the input polygons' exterior. The innovation of our method is to compute the selection of triangles by solving a bicriteria optimization problem. While on the one hand we aim at minimizing the total area of the outputs polygons, we aim on the other hand at minimizing their total perimeter. We combine these two objectives in a weighted sum and study two computational problems that naturally arise. In the first problem, the parameter that balances the two objectives is fixed and the aim is to compute a single optimal solution. In the second problem, the aim is to compute a set containing an optimal solution for every possible value of the parameter. We present efficient algorithms for these problems based on computing a minimum cut in an appropriately defined graph. Moreover, we show how the result set of the second problem can be approximated with few solutions. In an experimental evaluation, we finally show that the method is able to derive settlement areas from building footprints that are similar to reference solutions. @inproceedings{rottmann2021bicriteria,
abstract = {We present a new method for the task of detecting groups of polygons in a given geographic data set and computing a representative polygon for each group. This task is relevant in map generalization where the aim is to derive a less detailed map from a given map. Following a classical approach, we define the output polygons by merging the input polygons with a set of triangles that we select from a constrained Delaunay triangulation of the input polygons' exterior. The innovation of our method is to compute the selection of triangles by solving a bicriteria optimization problem. While on the one hand we aim at minimizing the total area of the outputs polygons, we aim on the other hand at minimizing their total perimeter. We combine these two objectives in a weighted sum and study two computational problems that naturally arise. In the first problem, the parameter that balances the two objectives is fixed and the aim is to compute a single optimal solution. In the second problem, the aim is to compute a set containing an optimal solution for every possible value of the parameter. We present efficient algorithms for these problems based on computing a minimum cut in an appropriately defined graph. Moreover, we show how the result set of the second problem can be approximated with few solutions. In an experimental evaluation, we finally show that the method is able to derive settlement areas from building footprints that are similar to reference solutions.},
address = {Dagstuhl, Germany},
annote = {Keywords: map generalization, aggregation, graph cuts, bicriteria optimization},
author = {Peter Rottmann and Anne Driemel and Herman Haverkort and Heiko R{\"o}glin and Jan-Henrik Haunert},
booktitle = {11th International Conference on Geographic Information Science (GIScience 2021) - Part II},
doi = {10.4230/LIPIcs.GIScience.2021.II.6},
editor = {Janowicz, Krzysztof and Verstegen, Judith A.},
isbn = {978-3-95977-208-2},
issn = {1868-8969},
pages = {6:1--6:16},
publisher = {Schloss Dagstuhl -- Leibniz-Zentrum f{\"u}r Informatik},
series = {Leibniz International Proceedings in Informatics (LIPIcs)},
title = {Bicriteria Aggregation of Polygons via Graph Cuts},
url = {https://drops.dagstuhl.de/opus/volltexte/2021/14765},
urn = {urn:nbn:de:0030-drops-147658},
volume = {208},
year = {2021}
}
|
|
Peter Rottmann, Thorbjörn Posewsky, Andres Milioto, Cyrill Stachniss, and Jens Behley. Improving monocular depth estimation by semantic pre-training. In Proceedings of the 2021 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), pages 5916-5923. 2021.
abstract
doi
bibtex
|
| Knowing the distance to nearby objects is crucial for autonomous cars to navigate safely in everyday traffic. In this paper, we investigate monocular depth estimation, which advanced substantially within the last years and is providing increasingly more accurate results while only requiring a single camera image as input. In line with recent work, we use an encoder-decoder structure with so-called packing layers to estimate depth values in a self-supervised fashion. We propose integrating a joint pre-training of semantic segmentation plus depth estimation on a dataset providing semantic labels. By using a separate semantic decoder that is only needed for pre-training, we can keep the network comparatively small. Our extensive experimental evaluation shows that the addition of such pre-training improves the depth estimation performance substantially. Finally, we show that we achieve competitive performance on the KITTI dataset despite using a much smaller and more efficient network. @inproceedings{rottmann2021iros,
abstract = {Knowing the distance to nearby objects is crucial for autonomous cars to navigate safely in everyday traffic. In this paper, we investigate monocular depth estimation, which advanced substantially within the last years and is providing increasingly more accurate results while only requiring a single camera image as input. In line with recent work, we use an encoder-decoder structure with so-called packing layers to estimate depth values in a self-supervised fashion. We propose integrating a joint pre-training of semantic segmentation plus depth estimation on a dataset providing semantic labels. By using a separate semantic decoder that is only needed for pre-training, we can keep the network comparatively small. Our extensive experimental evaluation shows that the addition of such pre-training improves the depth estimation performance substantially. Finally, we show that we achieve competitive performance on the KITTI dataset despite using a much smaller and more efficient network.},
author = {Peter Rottmann and Thorbjörn Posewsky and Andres Milioto and Cyrill Stachniss and Jens Behley},
booktitle = {Proceedings of the 2021 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)},
doi = {10.1109/IROS51168.2021.9636546},
pages = {5916-5923},
title = {{Improving Monocular Depth Estimation by Semantic Pre-training}},
year = {2021}
}
|
|
Timon Behr, Thomas C. van Dijk, Axel Forsch, Jan-Henrik Haunert, and Sabine Storandt. Map matching for semi-restricted trajectories. In Krzysztof Janowicz, and Judith A. Verstegen, editors, volume 208 of Leibniz International Proceedings in Informatics (LIPIcs). Proc. 11th International Conference on Geographic Information Science (GIScience '21) - Part II, pages 12:1-12:16. Schloss Dagstuhl - Leibniz-Zentrum für Informatik, 2021.
abstract
doi
bibtex
|
| We consider the problem of matching trajectories to a road map, giving particular consideration to trajectories that do not exclusively follow the underlying network. Such trajectories arise, for example, when a person walks through the inner part of a city, crossing market squares or parking lots. We call such trajectories semi-restricted. Sensible map matching of semi-restricted trajectories requires the ability to differentiate between restricted and unrestricted movement. We develop in this paper an approach that efficiently and reliably computes concise representations of such trajectories that maintain their semantic characteristics. Our approach utilizes OpenStreetMap data to not only extract the network but also areas that allow for free movement (as e.g. parks) as well as obstacles (as e.g. buildings). We discuss in detail how to incorporate this information in the map matching process, and demonstrate the applicability of our method in an experimental evaluation on real pedestrian and bicycle trajectories. @inproceedings{behr2021matching,
abstract = {We consider the problem of matching trajectories to a road map, giving particular consideration to trajectories that do not exclusively follow the underlying network. Such trajectories arise, for example, when a person walks through the inner part of a city, crossing market squares or parking lots. We call such trajectories semi-restricted. Sensible map matching of semi-restricted trajectories requires the ability to differentiate between restricted and unrestricted movement. We develop in this paper an approach that efficiently and reliably computes concise representations of such trajectories that maintain their semantic characteristics. Our approach utilizes OpenStreetMap data to not only extract the network but also areas that allow for free movement (as e.g. parks) as well as obstacles (as e.g. buildings). We discuss in detail how to incorporate this information in the map matching process, and demonstrate the applicability of our method in an experimental evaluation on real pedestrian and bicycle trajectories.},
address = {Dagstuhl, Germany},
annote = {Keywords: map matching, OpenStreetMap, GPS, trajectory, road network},
author = {Behr, Timon and van Dijk, Thomas C. and Forsch, Axel and Haunert, Jan-Henrik and Storandt, Sabine},
booktitle = {Proc. 11th International Conference on Geographic Information Science (GIScience '21) - Part II},
doi = {10.4230/LIPIcs.GIScience.2021.II.12},
editor = {Janowicz, Krzysztof and Verstegen, Judith A.},
isbn = {978-3-95977-208-2},
issn = {1868-8969},
pages = {12:1--12:16},
publisher = {Schloss Dagstuhl -- Leibniz-Zentrum f{\"u}r Informatik},
series = {Leibniz International Proceedings in Informatics (LIPIcs)},
title = {{Map Matching for Semi-Restricted Trajectories}},
url = {https://drops.dagstuhl.de/opus/volltexte/2021/14771},
urn = {urn:nbn:de:0030-drops-147717},
volume = {208},
year = {2021}
}
|
|
Axel Forsch, Youness Dehbi, Benjamin Niedermann, Johannes Oehrlein, Peter Rottmann, and Jan-Henrik Haunert. Multimodal travel-time maps with formally correct and schematic isochrones. Transactions in GIS, 25(6):3233-3256, 2021.
abstract
doi
bibtex
|
| The automatic generation of travel-time maps is a prerequisite for many fields of application such as tourist assistance and spatial decision support systems. The task is to determine outlines of zones that are reachable from a user's location in a given amount of time. In this work, we focus on travel-time maps with a formally guaranteed separation property in the sense that a zone exactly contains the part of the road network that is reachable within a pre-defined time from a given starting point and start time. In contrast to other automated methods, our approach generates schematized travel-time maps that reduce the visual complexity by representing each zone by an octilinear polygon. We aim at octilinear polygons with a small number of bends to further optimize the legibility of the map. The reachable parts of the road network are determined by the integration of timetable information for different modes of public transportation and pedestrian walkways based on a multi-modal time-expanded network. Moreover, the generated travel-time maps visualize multiple travel times using a map overlay of different time zones. In experiments on real-world data we compare our schematic visualizations to travel-time maps created with other visualization techniques. @article{forsch2021isochrones,
abstract = {The automatic generation of travel-time maps is a prerequisite for many fields of application such as tourist assistance and spatial decision support systems. The task is to determine outlines of zones that are reachable from a user's location in a given amount of time. In this work, we focus on travel-time maps with a formally guaranteed separation property in the sense that a zone exactly contains the part of the road network that is reachable within a pre-defined time from a given starting point and start time. In contrast to other automated methods, our approach generates schematized travel-time maps that reduce the visual complexity by representing each zone by an octilinear polygon. We aim at octilinear polygons with a small number of bends to further optimize the legibility of the map. The reachable parts of the road network are determined by the integration of timetable information for different modes of public transportation and pedestrian walkways based on a multi-modal time-expanded network. Moreover, the generated travel-time maps visualize multiple travel times using a map overlay of different time zones. In experiments on real-world data we compare our schematic visualizations to travel-time maps created with other visualization techniques.},
author = {Forsch, Axel and Dehbi, Youness and Niedermann, Benjamin and Oehrlein, Johannes and Rottmann, Peter and Haunert, Jan-Henrik},
doi = {10.1111/tgis.12821},
journal = {Transactions in GIS},
number = {6},
pages = {3233--3256},
title = {Multimodal Travel-Time Maps with Formally Correct and Schematic Isochrones},
volume = {25},
year = {2021}
}
|
|
S. Gedicke, A. Jabrayilov, B. Niedermann, P. Mutzel, and J.-H. Haunert. Point feature label placement for multi-page maps on small-screen devices. Computers & Graphics, 100:66-80, 2021.
abstract
doi
bibtex
|
| Map applications on mobile devices such as smartphones and smartwatches have become ubiquitous. When visualizing spatial data on such small-screen devices, one major challenge is annotating the data with labels (e.g., small icons). The restricted space requires new visualization techniques as established strategies, such as maximizing the number of placed labels, easily lead to the omission of information. We propose an approach that distributes all labels contained in a temporarily fixed map section on multiple pages. Applying interaction techniques for navigating through the pages, a user can access all information both without any overlapping labels and without the need for zooming. We propose a method with two phases; a pre-processing phase and a query phase. We use an optimization approach to pre-compute labelings on the level of a whole city and provide the on-demand querying of individual labelings at a more local level. Our approach provides a consistent label-page assignment, meaning that labels do not appear and disappear when the user pans the map. Further, our model provides quick access to important information and a spatially balanced distribution of labels on pages. In experiments on real-world data we analyze different parameter settings and show that our model yields high-quality labelings. @article{gjnmh-zm-21,
abstract = {Map applications on mobile devices such as smartphones and smartwatches have become ubiquitous. When visualizing spatial data on such small-screen devices, one major challenge is annotating the data with labels (e.g., small icons). The restricted space requires new visualization techniques as established strategies, such as maximizing the number of placed labels, easily lead to the omission of information. We propose an approach that distributes all labels contained in a temporarily fixed map section on multiple pages. Applying interaction techniques for navigating through the pages, a user can access all information both without any overlapping labels and without the need for zooming. We propose a method with two phases; a pre-processing phase and a query phase. We use an optimization approach to pre-compute labelings on the level of a whole city and provide the on-demand querying of individual labelings at a more local level. Our approach provides a consistent label-page assignment, meaning that labels do not appear and disappear when the user pans the map. Further, our model provides quick access to important information and a spatially balanced distribution of labels on pages. In experiments on real-world data we analyze different parameter settings and show that our model yields high-quality labelings.},
author = {Gedicke, S. and Jabrayilov, A. and Niedermann, B. and Mutzel, P. and Haunert, J.-H.},
doi = {10.1016/j.cag.2021.07.019},
issn = {0097-8493},
journal = {Computers & Graphics},
pages = {66-80},
title = {Point feature label placement for multi-page maps on small-screen devices},
url = {https://www.sciencedirect.com/science/article/pii/S0097849321001564},
volume = {100},
year = {2021}
}
|