|
Alexander Naumann, Annika Bonerath, and Jan-Henrik Haunert. Scalable many-to-many building footprint matching. Information Fusion, , 2025. Accepted for publication. Already available online.
abstract
doi
bibtex
|
| The amount of available geospatial data, particularly different data sets describing the same area, grows continuously. Finding entity correspondences between multiple such datasets is a vital prerequisite for data integration, fusion, and quality assessment. In this work, we focus on polygon datasets, more specifically, building footprints. We compute many-to-many matchings between two input datasets to find correspondences between polygons. Existing research explored heuristic solutions, exact optimization approaches, and machine learning strategies. So far, none of these methods scale well to large datasets while providing a precise problem formulation to measure the quality of a computed matching. We present a fast and versatile algorithm based on a constrained optimization model. Given a partitioning of the datasets into connected subsets, it can solve instances of over 5 million polygons per dataset in under 7 min. Our approach is based on the Jaccard index (intersection over union, IoU) as its central quality measure. However, it is flexible and easily adaptable to different metrics proposed in other works. @article{naumann2025matching,
abstract = {The amount of available geospatial data, particularly different data sets describing the same area, grows continuously. Finding entity correspondences between multiple such datasets is a vital prerequisite for data integration, fusion, and quality assessment. In this work, we focus on polygon datasets, more specifically, building footprints. We compute many-to-many matchings between two input datasets to find correspondences between polygons. Existing research explored heuristic solutions, exact optimization approaches, and machine learning strategies. So far, none of these methods scale well to large datasets while providing a precise problem formulation to measure the quality of a computed matching. We present a fast and versatile algorithm based on a constrained optimization model. Given a partitioning of the datasets into connected subsets, it can solve instances of over 5 million polygons per dataset in under 7 min. Our approach is based on the Jaccard index (intersection over union, IoU) as its central quality measure. However, it is flexible and easily adaptable to different metrics proposed in other works.},
author = {Naumann, Alexander and Bonerath, Annika and Haunert, Jan-Henrik},
doi = {10.1016/j.inffus.2025.103360},
journal = {Information Fusion},
note = {Accepted for publication. Already available online.},
pages = {},
publisher = {Elsevier},
title = {Scalable many-to-many building footprint matching},
url = {https://www.sciencedirect.com/science/article/pii/S1566253525004336},
volume = {},
year = {2025}
}
|
|
Julius Knechtel, Youness Dehbi, Lasse Klingbeil, and Jan-Henrik Haunert. Simultaneous planning of standpoints and routing for laser scanning of buildings with network redundancy. ISPRS Journal of Photogrammetry and Remote Sensing, 224:59-74, 2025.
abstract
doi
bibtex
|
| Stop-and-go laser scanning is becoming increasingly prevalent in a variety of applications, e.g., the survey of the built environment. For this, a surveyor needs to select a set of standpoints as well as the route between them. This choice, however, has a high impact on both the economic efficiency of the respective survey as well as the completeness, accuracy, and subsequent registrability of the resulting point cloud. Assuming a set of building footprints as input, this article proposes a one-step optimization method to find the minimal number of selected standpoints based on scanner-related constraints. At the same time, we incorporate the length of the shortest route connecting the standpoints in the objective function. A local search method to speed up the time for solving the corresponding Mixed-Integer Linear Program (MILP) is additionally presented. The results for different scenarios show constantly shorter routes in comparison to existing approaches while still maintaining the minimal number of standpoints. Moreover, in our formulation we aim to minimize the effects of inaccuracies in the software-based registration. Inspired by the ideas of network survivability, we to this end propose a novel definition of connectivity tailored for laser scanning networks. On this basis, we enforce redundancy for the registration network of the survey. To prove the applicability of our formulation, we applied it to a large real-world scenario. This paves the way for the future use of fully automatic autonomous systems to provide a complete and high-quality model of the underlying building scenery. @article{knechtel2025scanPlanningNetwRedundancy,
abstract = {Stop-and-go laser scanning is becoming increasingly prevalent in a variety of applications, e.g., the survey of the built environment. For this, a surveyor needs to select a set of standpoints as well as the route between them. This choice, however, has a high impact on both the economic efficiency of the respective survey as well as the completeness, accuracy, and subsequent registrability of the resulting point cloud. Assuming a set of building footprints as input, this article proposes a one-step optimization method to find the minimal number of selected standpoints based on scanner-related constraints. At the same time, we incorporate the length of the shortest route connecting the standpoints in the objective function. A local search method to speed up the time for solving the corresponding Mixed-Integer Linear Program (MILP) is additionally presented. The results for different scenarios show constantly shorter routes in comparison to existing approaches while still maintaining the minimal number of standpoints. Moreover, in our formulation we aim to minimize the effects of inaccuracies in the software-based registration. Inspired by the ideas of network survivability, we to this end propose a novel definition of connectivity tailored for laser scanning networks. On this basis, we enforce redundancy for the registration network of the survey. To prove the applicability of our formulation, we applied it to a large real-world scenario. This paves the way for the future use of fully automatic autonomous systems to provide a complete and high-quality model of the underlying building scenery.},
author = {Knechtel, Julius and Dehbi, Youness and Klingbeil, Lasse and Haunert, Jan-Henrik},
doi = {https://doi.org/10.1016/j.isprsjprs.2025.03.017},
issn = {0924-2716},
journal = {ISPRS Journal of Photogrammetry and Remote Sensing},
pages = {59-74},
title = {Simultaneous Planning of Standpoints and Routing for Laser Scanning of Buildings with Network Redundancy},
url = {https://www.sciencedirect.com/science/article/pii/S0924271625001157},
volume = {224},
year = {2025}
}
|
|
Annika Bonerath, Anne Driemel, Jan-Henrik Haunert, Herman Haverkort, Elmar Langetepe, and Benjamin Niedermann. algorithms for consistent dynamic labeling of maps with a time-slider interface. IEEE Transactions on Visualization & Computer Graphics , (01):1-15, 2025.
abstract
doi
bibtex
|
| User interfaces for inspecting spatio-temporal events often allow their users to filter the events by specifying a time window with a time slider. We consider the case that filtered events are visualized on a map using textual or iconic labels. However, to ensure a clear visualization, not all filtered events are annotated with a label. We present algorithms for setting up a data structure that encodes for every possible time window the set of displayed labels. Our algorithms ensure that the displayed labels never overlap and guarantee the stability of the labeling during certain basic interactions with the time slider. Assuming that the labels have different priorities (weights), we aim to maximize the weight of the displayed labels integrated over all possible time windows. As basic interactions, we consider moving the entire time window, symmetrically scaling it, and dragging one of its endpoints. We consider two stability requirements: (1) during a basic interaction, a label should appear and disappear at most once; (2) if a label is displayed for a time window $Q$, then it is also displayed for all the time windows contained in $Q$ and that contain its timestamp. We prove that finding an optimal solution is NP-hard and propose efficient constant-factor approximation algorithms for unit-square and unit- disk labels, as well as a fast greedy heuristic for arbitrarily shaped labels. In experiments on real-world data, we compare the non-exact algorithms with an exact approach through integer linear programming. @article{bonerath2025twl,
abstract = { User interfaces for inspecting spatio-temporal events often allow their users to filter the events by specifying a time window with a time slider. We consider the case that filtered events are visualized on a map using textual or iconic labels. However, to ensure a clear visualization, not all filtered events are annotated with a label. We present algorithms for setting up a data structure that encodes for every possible time window the set of displayed labels. Our algorithms ensure that the displayed labels never overlap and guarantee the stability of the labeling during certain basic interactions with the time slider. Assuming that the labels have different priorities (weights), we aim to maximize the weight of the displayed labels integrated over all possible time windows. As basic interactions, we consider moving the entire time window, symmetrically scaling it, and dragging one of its endpoints. We consider two stability requirements: (1) during a basic interaction, a label should appear and disappear at most once; (2) if a label is displayed for a time window $Q$, then it is also displayed for all the time windows contained in $Q$ and that contain its timestamp. We prove that finding an optimal solution is NP-hard and propose efficient constant-factor approximation algorithms for unit-square and unit- disk labels, as well as a fast greedy heuristic for arbitrarily shaped labels. In experiments on real-world data, we compare the non-exact algorithms with an exact approach through integer linear programming. },
address = {Los Alamitos, CA, USA},
author = {Bonerath, Annika and Driemel, Anne and Haunert, Jan-Henrik and Haverkort, Herman and Langetepe, Elmar and Niedermann, Benjamin},
doi = {10.1109/TVCG.2025.3527582},
issn = {1941-0506},
journal = { IEEE Transactions on Visualization \& Computer Graphics },
keywords = {Labeling;Data visualization;Annotations;Tornadoes;Heuristic algorithms;Data structures;Spatial databases;Reproducibility of results;Earthquakes;Approximation algorithms},
number = {01},
pages = {1-15},
publisher = {IEEE Computer Society},
title = {{ Algorithms for Consistent Dynamic Labeling of Maps With a Time-Slider Interface }},
url = {https://doi.ieeecomputersociety.org/10.1109/TVCG.2025.3527582},
volume = {},
year = {2025}
}
|
|
Alexander Naumann, Annika Bonerath, and Jan-Henrik Haunert. Many-to-many polygon matching \`a la Jaccard. In Proc. European Symposium on Algorithms (ESA'24), pages 90:1-90:15. 2024.
abstract
doi
bibtex
|
| Integration of spatial data is a major field of research. An important task of data integration is finding correspondences between entities. Here, we focus on combining building footprint data from cadastre and from volunteered geographic information, in particular OpenStreetMap. Previous research on this topic has led to exact 1:1 matching approaches and heuristic m:n matching approaches, most of which are lacking a mathematical problem definition. We introduce a model for many-to-many polygon matching based on the well-established Jaccard index. This is a natural extension to the existing 1:1 matching approaches. We show that the problem is NP-complete and a naive approach via integer programming fails easily. By analyzing the structure of the problem in detail, we can reduce the number of variables significantly. This approach yields an optimal m:n matching even for large real-world instances with appropriate running time. In particular, for the set of all building footprints of the city of Bonn (119,300 / 97,284 polygons) it yielded an optimal solution in approximately 1 hour. @inproceedings{NaumannEtAl2024,
abstract = {Integration of spatial data is a major field of research. An important task of data integration is finding correspondences between entities. Here, we focus on combining building footprint data from cadastre and from volunteered geographic information, in particular OpenStreetMap. Previous research on this topic has led to exact 1:1 matching approaches and heuristic m:n matching approaches, most of which are lacking a mathematical problem definition. We introduce a model for many-to-many polygon matching based on the well-established Jaccard index. This is a natural extension to the existing 1:1 matching approaches. We show that the problem is NP-complete and a naive approach via integer programming fails easily. By analyzing the structure of the problem in detail, we can reduce the number of variables significantly. This approach yields an optimal m:n matching even for large real-world instances with appropriate running time. In particular, for the set of all building footprints of the city of Bonn (119,300 / 97,284 polygons) it yielded an optimal solution in approximately 1 hour.},
author = {Naumann, Alexander and Bonerath, Annika and Haunert, Jan-Henrik},
booktitle = {Proc. European Symposium on Algorithms (ESA'24)},
doi = {10.4230/LIPIcs.ESA.2024.90},
pages = {90:1--90:15},
title = {Many-to-many polygon matching \`{a} la {Jaccard}},
year = {2024}
}
|
|
Anna Arutyunova and
Anne Driemel and
Jan-Henrik Haunert and
Herman J. Haverkort and
Jürgen Kusche and
Elmar Langetepe and
Philip Mayer and
Petra Mutzel and
Heiko Röglin. Minimum-error triangulations for sea surface reconstruction. In volume 224 of LIPIcs. Proc. 38th International Symposium on Computational Geometry (SoCG 2022), pages 7:1-7:18. Schloss Dagstuhl - Leibniz-Zentrum für Informatik, 2022.
doi
bibtex
|
| @inproceedings{arutyunovaDHHKL22,
author = {Anna Arutyunova and
Anne Driemel and
Jan{-}Henrik Haunert and
Herman J. Haverkort and
J{\"{u}}rgen Kusche and
Elmar Langetepe and
Philip Mayer and
Petra Mutzel and
Heiko R{\"{o}}glin},
booktitle = {Proc. 38th International Symposium on Computational Geometry (SoCG 2022)},
doi = {10.4230/LIPIcs.SoCG.2022.7},
pages = {7:1--7:18},
publisher = {Schloss Dagstuhl -- Leibniz-Zentrum f{\"{u}}r Informatik},
series = {LIPIcs},
title = {Minimum-Error Triangulations for Sea Surface Reconstruction},
url = {https://doi.org/10.4230/LIPIcs.SoCG.2022.7},
volume = {224},
year = {2022}
}
|
|
Sven Gedicke, Annika Bonerath, Benjamin Niedermann, and Jan-Henrik Haunert. Zoomless Maps: External Labeling Methods for the Interactive Exploration of Dense Point Sets at a Fixed Map Scale. IEEE Transactions on Visualization and Computer Graphics, 27(2):1247-1256, 2021. https://youtu.be/IuMhk8jp54c
abstract
doi
bibtex
|
| Visualizing spatial data on small-screen devices such as
smartphones and smartwatches poses new challenges in computational
cartography. The current interfaces for map exploration require
their users to zoom in and out frequently. Indeed, zooming and
panning are tools suitable for choosing the map extent corresponding
to an area of interest. They are not as suitable, however, for
resolving the graphical clutter caused by a high feature
density since zooming in to a large map scale leads to a loss of
context. Therefore, in this paper, we present new external labeling methods that allow a user to navigate through dense sets of
points of interest while keeping the current map extent fixed. We provide a unified model, in which labels are
placed at the boundary of the map and visually associated with the
corresponding features via connecting lines, which are called
leaders. Since the screen space is limited, labeling all features
at the same time is impractical. Therefore, at any time, we label a
subset of the features. We offer interaction techniques to change
the current selection of features systematically and, thus, give the
user access to all features. We distinguish three methods, which
allow the user either to slide the labels along the bottom side of the map or
to browse the labels based on pages or stacks. We present a generic
algorithmic framework that provides us with the possibility of
expressing the different variants of interaction techniques as
optimization problems in a unified way. We propose both exact
algorithms and fast and simple heuristics that solve the
optimization problems taking into account different criteria such as
the ranking of the labels, the total leader length as well as the
distance between leaders. In experiments on real-world data we
evaluate these algorithms and discuss the three variants with
respect to their strengths and weaknesses proving the flexibility of
the presented algorithmic framework. @article{gbnh-zm-21,
abstract = {Visualizing spatial data on small-screen devices such as
smartphones and smartwatches poses new challenges in computational
cartography. The current interfaces for map exploration require
their users to zoom in and out frequently. Indeed, zooming and
panning are tools suitable for choosing the map extent corresponding
to an area of interest. They are not as suitable, however, for
resolving the graphical clutter caused by a high feature
density since zooming in to a large map scale leads to a loss of
context. Therefore, in this paper, we present new external labeling methods that allow a user to navigate through dense sets of
points of interest while keeping the current map extent fixed. We provide a unified model, in which labels are
placed at the boundary of the map and visually associated with the
corresponding features via connecting lines, which are called
leaders. Since the screen space is limited, labeling all features
at the same time is impractical. Therefore, at any time, we label a
subset of the features. We offer interaction techniques to change
the current selection of features systematically and, thus, give the
user access to all features. We distinguish three methods, which
allow the user either to slide the labels along the bottom side of the map or
to browse the labels based on pages or stacks. We present a generic
algorithmic framework that provides us with the possibility of
expressing the different variants of interaction techniques as
optimization problems in a unified way. We propose both exact
algorithms and fast and simple heuristics that solve the
optimization problems taking into account different criteria such as
the ranking of the labels, the total leader length as well as the
distance between leaders. In experiments on real-world data we
evaluate these algorithms and discuss the three variants with
respect to their strengths and weaknesses proving the flexibility of
the presented algorithmic framework.},
author = {Gedicke, Sven and Bonerath, Annika and Niedermann, Benjamin and Haunert, Jan-Henrik},
doi = {10.1109/TVCG.2020.3030399},
journal = {IEEE Transactions on Visualization and Computer Graphics},
note = {https://youtu.be/IuMhk8jp54c},
number = {2},
pages = {1247--1256},
title = {{Z}oomless {M}aps: {E}xternal {L}abeling {M}ethods for the {I}nteractive {E}xploration of {D}ense {P}oint {S}ets at a {F}ixed {M}ap {S}cale},
volume = {27},
year = {2021}
}
|
|
A. Bonerath, B. Niedermann, J. Diederich, Y. Orgeig, J. Oehrlein, and J.-H. Haunert. A time-windowed data structure for spatial density maps. In Proc. 28th ACM SIGSPATIAL International Conference on Advances in Geographic Information Systems (ACM SIGSPATIAL '20), pages 15-24. 2020.
abstract
doi
bibtex
|
| The visualization of spatio-temporal data helps researchers
understand global processes such as animal migration. In particular,
interactively restricting the data to different time windows reveals
new insights into the short-term and long-term changes of the
research data. Inspired by this use case, we consider the
visualization of point data annotated with time stamps. We pick up
classical, grid-based density maps as the underlying visualization
technique and enhance them with an efficient data structure for
arbitrarily specified time-window queries. The running time of the
queries is logarithmic in the total number of points and linear in
the number of actually colored cells. In experiments on real-world
data we show that the data structure answers time-window queries
within milliseconds, which supports the interactive exploration of large
point sets. Further, the data structure can be used to visualize
additional decision problems, e.g., it can answer whether
the sum or maximum of additional weights given with the points
exceed a certain threshold. We have defined the data structure
general enough to also support multiple thresholds expressed by
different colors. @inproceedings{bhn-twdssdm-20,
abstract = {The visualization of spatio-temporal data helps researchers
understand global processes such as animal migration. In particular,
interactively restricting the data to different time windows reveals
new insights into the short-term and long-term changes of the
research data. Inspired by this use case, we consider the
visualization of point data annotated with time stamps. We pick up
classical, grid-based density maps as the underlying visualization
technique and enhance them with an efficient data structure for
arbitrarily specified time-window queries. The running time of the
queries is logarithmic in the total number of points and linear in
the number of actually colored cells. In experiments on real-world
data we show that the data structure answers time-window queries
within milliseconds, which supports the interactive exploration of large
point sets. Further, the data structure can be used to visualize
additional decision problems, e.g., it can answer whether
the sum or maximum of additional weights given with the points
exceed a certain threshold. We have defined the data structure
general enough to also support multiple thresholds expressed by
different colors.},
author = {Bonerath, A. and Niedermann, B. and Diederich, J. and Orgeig, Y. and Oehrlein, J. and Haunert, J.-H.},
booktitle = {Proc. 28th ACM SIGSPATIAL International Conference on Advances in Geographic Information Systems (ACM SIGSPATIAL '20)},
doi = {10.1145/3397536.3422242},
pages = {15--24},
title = {{A Time-Windowed Data Structure for Spatial Density Maps}},
year = {2020}
}
|
|
A. Bonerath, B. Niedermann, and J.-H. Haunert. Retrieving alpha-shapes and schematic polygonal approximations for sets of points within queried temporal ranges. In Proc. 27th ACM SIGSPATIAL International Conference on Advances in Geographic Information Systems (ACM SIGSPATIAL '19), pages 249-258. 2019. trailer: https://youtu.be/mlnUDhbMSfQ
abstract
doi
bibtex
|
| The interactive exploration of data requires data structures that can be repeatedly queried to obtain simple visualizations of parts of the data. We consider the scenario that the data is a set of points each associated with a time stamp and that the result of each query is visualized by an α-shape, which generalizes the concept of convex hulls. Instead of computing each shape independently, we suggest and analyze a simple data structure that aggregates the α-shapes of all possible queries. Once the data structure is built, it particularly allows us to query single α-shapes without retrieving the actual (possibly large) point set and thus to rapidly produce small previews of the queried data. We discuss the data structure for the original α-shapes as well as for a schematized version of α-shapes, which further simplifies the visualization. We evaluate the data structure on real-world data. The experiments indicate linear memory consumption with respect to the number of points, which makes the data structure applicable in practice, although the size is quadratic for a theoretic worst case example. @inproceedings{bhn-rasspa-19,
abstract = {The interactive exploration of data requires data structures that can be repeatedly queried to obtain simple visualizations of parts of the data. We consider the scenario that the data is a set of points each associated with a time stamp and that the result of each query is visualized by an α-shape, which generalizes the concept of convex hulls. Instead of computing each shape independently, we suggest and analyze a simple data structure that aggregates the α-shapes of all possible queries. Once the data structure is built, it particularly allows us to query single α-shapes without retrieving the actual (possibly large) point set and thus to rapidly produce small previews of the queried data. We discuss the data structure for the original α-shapes as well as for a schematized version of α-shapes, which further simplifies the visualization. We evaluate the data structure on real-world data. The experiments indicate linear memory consumption with respect to the number of points, which makes the data structure applicable in practice, although the size is quadratic for a theoretic worst case example.},
author = {Bonerath, A. and Niedermann, B. and Haunert, J.-H.},
booktitle = {Proc. 27th ACM SIGSPATIAL International Conference on Advances in Geographic Information Systems (ACM SIGSPATIAL '19)},
doi = {10.1145/3347146.3359087},
note = {trailer: https://youtu.be/mlnUDhbMSfQ},
pages = {249--258},
title = {{Retrieving alpha-Shapes and Schematic Polygonal Approximations for Sets of Points within Queried Temporal Ranges}},
year = {2019}
}
|
|
B. Niedermann, and J.-H. Haunert. An algorithmic framework for labeling network maps. Algorithmica, 80(5):1493-1533, 2018.
abstract
doi
bibtex
|
| Drawing network maps automatically comprises two challenging steps, namely laying out the map and placing non-overlapping labels. In this paper we tackle the problem of labeling an already existing network map considering the application of metro maps. We present a flexible and versatile labeling model. Despite its simplicity, we prove that it is NP-complete to label a single line of the network. For a restricted variant of that model, we introduce an efficient algorithm that optimally labels a single line. Based on that algorithm, we present a general and sophisticated workflow for multiple metro lines, which is experimentally evaluated on real-world metro maps. @article{HaunertN15a,
abstract = {Drawing network maps automatically comprises two challenging steps, namely laying out the map and placing non-overlapping labels. In this paper we tackle the problem of labeling an already existing network map considering the application of metro maps. We present a flexible and versatile labeling model. Despite its simplicity, we prove that it is NP-complete to label a single line of the network. For a restricted variant of that model, we introduce an efficient algorithm that optimally labels a single line. Based on that algorithm, we present a general and sophisticated workflow for multiple metro lines, which is experimentally evaluated on real-world metro maps.},
author = {Niedermann, B. and Haunert, J.{-}H.},
doi = {10.1007/s00453-017-0350-0},
journal = {Algorithmica},
number = {5},
pages = {1493--1533},
title = {An Algorithmic Framework for Labeling Network Maps},
volume = {80},
year = {2018}
}
|
|
T. C. van Dijk, and J.-H. Haunert. Interactive focus maps using least-squares optimization. International Journal of Geographical Information Science, 28(10), 2014.
abstract
doi
bibtex
|
| We present a new algorithm that enlarges a focus region in a given network map without removing non-focus (i.e., context) network parts from the map or changing the map's size. In cartography, this problem is usually tackled with fish-eye projections, which, however, introduce severe distortion. Our new algorithm minimizes distortion and, with respect to this objective, produces results of similar quality compared to an existing algorithm. In contrast to the existing algorithm, the new algorithm achieves real-time performance that allows its application in interactive systems. We target applications where a user sets a focus by brushing parts of the network or the focus region is defined as the neighborhood of a moving user. A crucial feature of the algorithm is its capability of avoiding unwanted edge crossings. Basically, we first solve a least-squares optimization problem without constraints for avoiding edge crossings. The solution we find is then used to identify a small set of constraints needed for a crossing-free solution and, beyond this, allows us to start an animation enlarging the focus region before the final, crossing-free solution is found. Moreover, memorizing the non-crossing constraints from an initial run of the algorithm allows us to achieve a better runtime on any further run - assuming that the focus region does not move too much between two consecutive runs. As we show with experiments on real-world data, this enables response times well below 1 second. @article{VanDijkHaunert,
abstract = {We present a new algorithm that enlarges a focus region in a given network map without removing non-focus (i.e., context) network parts from the map or changing the map's size. In cartography, this problem is usually tackled with fish-eye projections, which, however, introduce severe distortion. Our new algorithm minimizes distortion and, with respect to this objective, produces results of similar quality compared to an existing algorithm. In contrast to the existing algorithm, the new algorithm achieves real-time performance that allows its application in interactive systems. We target applications where a user sets a focus by brushing parts of the network or the focus region is defined as the neighborhood of a moving user. A crucial feature of the algorithm is its capability of avoiding unwanted edge crossings. Basically, we first solve a least-squares optimization problem without constraints for avoiding edge crossings. The solution we find is then used to identify a small set of constraints needed for a crossing-free solution and, beyond this, allows us to start an animation enlarging the focus region before the final, crossing-free solution is found. Moreover, memorizing the non-crossing constraints from an initial run of the algorithm allows us to achieve a better runtime on any further run - assuming that the focus region does not move too much between two consecutive runs. As we show with experiments on real-world data, this enables response times well below 1 second.},
author = {T. C. van Dijk and J.-H. Haunert},
doi = {10.1080/13658816.2014.887718},
journal = {International Journal of Geographical Information Science},
number = {10},
title = {Interactive Focus Maps Using Least-Squares Optimization},
volume = {28},
year = {2014}
}
|
|
M. Fink, J.-H. Haunert, A. Schulz, J. Spoerhase, and A. Wolff. Algorithms for labeling focus regions. IEEE Transactions on Visualization and Computer Graphics, 18(12):2583-2592, 2012.
abstract
doi
bibtex
|
| In this paper, we investigate the problem of labeling point sites in focus regions of maps or diagrams. This problem occurs, for example, when the user of a mapping service wants to see the names of restaurants or other POIs in a crowded downtown area but keep the overview over a larger area. Our approach is to place the labels at the boundary of the focus region and connect each site with its label by a linear connection, which is called a leader. In this way, we move labels from the focus region to the less valuable context region surrounding it. In order to make the leader layout well readable, we present algorithms that rule out crossings between leaders and optimize other characteristics such as total leader length and distance between labels. \par This yields a new variant of the boundary labeling problem, which has been studied in the literature. Other than in traditional boundary labeling, where leaders are usually schematized polylines, we focus on leaders that are either straight-line segments or Bézier curves. Further, we present algorithms that, given the sites, find a position of the focus region that optimizes the above characteristics. \par We also consider a variant of the problem where we have more sites than space for labels. In this situation, we assume that the sites are prioritized by the user. Alternatively, we take a new facility-location perspective which yields a clustering of the sites. We label one representative of each cluster. If the user wishes, we apply our approach to the sites within a cluster, giving details on demand. @article{FinkEtAl2012,
abstract = {In this paper, we investigate the problem of labeling point sites in focus regions of maps or diagrams. This problem occurs, for example, when the user of a mapping service wants to see the names of restaurants or other POIs in a crowded downtown area but keep the overview over a larger area. Our approach is to place the labels at the boundary of the focus region and connect each site with its label by a linear connection, which is called a leader. In this way, we move labels from the focus region to the less valuable context region surrounding it. In order to make the leader layout well readable, we present algorithms that rule out crossings between leaders and optimize other characteristics such as total leader length and distance between labels. \par This yields a new variant of the boundary labeling problem, which has been studied in the literature. Other than in traditional boundary labeling, where leaders are usually schematized polylines, we focus on leaders that are either straight-line segments or B\'ezier curves. Further, we present algorithms that, given the sites, find a position of the focus region that optimizes the above characteristics. \par We also consider a variant of the problem where we have more sites than space for labels. In this situation, we assume that the sites are prioritized by the user. Alternatively, we take a new facility-location perspective which yields a clustering of the sites. We label one representative of each cluster. If the user wishes, we apply our approach to the sites within a cluster, giving details on demand.},
author = {Fink, M. and Haunert, J.-H. and Schulz, A. and Spoerhase, J. and Wolff, A.},
doi = {10.1109/TVCG.2012.193},
file = {fhssw-alfr-InfoVis12.pdf:http\://www1.informatik.uni-wuerzburg.de/pub/fink/paper/fhssw-alfr-InfoVis12.pdf:PDF},
journal = {IEEE Transactions on Visualization and Computer Graphics},
number = {12},
pages = {2583--2592},
timestamp = {2013-03-13T10:23:34.000+0100},
title = {Algorithms for Labeling Focus Regions},
volume = {18},
year = {2012}
}
|
|
J.-H. Haunert, and L. Sering. Drawing road networks with focus regions. IEEE Transactions on Visualization and Computer Graphics, 17(12):2555-2562, 2011.
abstract
doi
bibtex
|
| Mobile users of maps typically need detailed information about their surroundings plus some context information about remote places. In order to avoid that the map partly gets too dense, cartographers have designed mapping functions that enlarge a user-defined focus region such functions are sometimes called fish-eye projections. The extra map space occupied by the enlarged focus region is compensated by distorting other parts of the map. We argue that, in a map showing a network of roads relevant to the user, distortion should preferably take place in those areas where the network is sparse. Therefore, we do not apply a predefined mapping function. Instead, we consider the road network as a graph whose edges are the road segments. We compute a new spatial mapping with a graph-based optimization approach, minimizing the square sum of distortions at edges. Our optimization method is based on a convex quadratic program (CQP); CQPs can be solved in polynomial time. Important requirements on the output map are expressed as linear inequalities. In particular, we show how to forbid edge crossings. We have implemented our method in a prototype tool. For instances of different sizes, our method generated output maps that were far less distorted than those generated with a predefined fish-eye projection. Future work is needed to automate the selection of roads relevant to the user. Furthermore, we aim at fast heuristics for application in real-time systems. @article{haunertSering2011,
abstract = {Mobile users of maps typically need detailed information about their surroundings plus some context information about remote places. In order to avoid that the map partly gets too dense, cartographers have designed mapping functions that enlarge a user-defined focus region such functions are sometimes called fish-eye projections. The extra map space occupied by the enlarged focus region is compensated by distorting other parts of the map. We argue that, in a map showing a network of roads relevant to the user, distortion should preferably take place in those areas where the network is sparse. Therefore, we do not apply a predefined mapping function. Instead, we consider the road network as a graph whose edges are the road segments. We compute a new spatial mapping with a graph-based optimization approach, minimizing the square sum of distortions at edges. Our optimization method is based on a convex quadratic program (CQP); CQPs can be solved in polynomial time. Important requirements on the output map are expressed as linear inequalities. In particular, we show how to forbid edge crossings. We have implemented our method in a prototype tool. For instances of different sizes, our method generated output maps that were far less distorted than those generated with a predefined fish-eye projection. Future work is needed to automate the selection of roads relevant to the user. Furthermore, we aim at fast heuristics for application in real-time systems.},
author = {Haunert, J.-H. and Sering, L.},
doi = {10.1109/TVCG.2011.191},
file = {HaunertSering2011.pdf:http\://www1.informatik.uni-wuerzburg.de/pub/haunert/pdf/HaunertSering2011.pdf:PDF},
journal = {IEEE Transactions on Visualization and Computer Graphics},
number = {12},
pages = {2555--2562},
timestamp = {2012-04-12T14:42:23.000+0200},
title = {Drawing Road Networks with Focus Regions},
volume = {17},
year = {2011}
}
|
|
J.-H. Haunert, and A. Wolff. Area aggregation in map generalisation by mixed-integer programming. International Journal of Geographical Information Science, 24(12):1871-1897, 2010.
abstract
doi
bibtex
|
| Topographic databases normally contain areas of different land cover classes, commonly defining a planar partition, that is, gaps and overlaps are not allowed. When reducing the scale of such a database, some areas become too small for representation and need to be aggregated. This unintentionally but unavoidably results in changes of classes. In this article we present an optimisation method for the aggregation problem. This method aims to minimise changes of classes and to create compact shapes, subject to hard constraints ensuring aggregates of sufficient size for the target scale. To quantify class changes we apply a semantic distance measure. We give a graph theoretical problem formulation and prove that the problem is NP-hard, meaning that we cannot hope to find an efficient algorithm. Instead, we present a solution by mixed-integer programming that can be used to optimally solve small instances with existing optimisation software. In order to process large datasets, we introduce specialised heuristics that allow certain variables to be eliminated in advance and a problem instance to be decomposed into independent sub-instances. We tested our method for a dataset of the official German topographic database ATKIS with input scale 1:50,000 and output scale 1:250,000. For small instances, we compare results of this approach with optimal solutions that were obtained without heuristics. We compare results for large instances with those of an existing iterative algorithm and an alternative optimisation approach by simulated annealing. These tests allow us to conclude that, with the defined heuristics, our optimisation method yields high-quality results for large datasets in modest time. @article{haunertwolff2010b,
abstract = {Topographic databases normally contain areas of different land cover classes, commonly defining a planar partition, that is, gaps and overlaps are not allowed. When reducing the scale of such a database, some areas become too small for representation and need to be aggregated. This unintentionally but unavoidably results in changes of classes. In this article we present an optimisation method for the aggregation problem. This method aims to minimise changes of classes and to create compact shapes, subject to hard constraints ensuring aggregates of sufficient size for the target scale. To quantify class changes we apply a semantic distance measure. We give a graph theoretical problem formulation and prove that the problem is NP-hard, meaning that we cannot hope to find an efficient algorithm. Instead, we present a solution by mixed-integer programming that can be used to optimally solve small instances with existing optimisation software. In order to process large datasets, we introduce specialised heuristics that allow certain variables to be eliminated in advance and a problem instance to be decomposed into independent sub-instances. We tested our method for a dataset of the official German topographic database ATKIS with input scale 1:50,000 and output scale 1:250,000. For small instances, we compare results of this approach with optimal solutions that were obtained without heuristics. We compare results for large instances with those of an existing iterative algorithm and an alternative optimisation approach by simulated annealing. These tests allow us to conclude that, with the defined heuristics, our optimisation method yields high-quality results for large datasets in modest time.},
author = {Haunert, J.-H. and Wolff, A.},
doi = {10.1080/13658810903401008},
file = {HaunertWolff2010b.pdf:http\://www1.informatik.uni-wuerzburg.de/pub/haunert/pdf/HaunertWolff2010b.pdf:PDF},
journal = {International Journal of Geographical Information Science},
number = {12},
pages = {1871--1897},
timestamp = {2011-11-07T12:01:05.000+0100},
title = {Area aggregation in map generalisation by mixed-integer programming},
url = {http://www.informaworld.com/smpp/content~db=all~content=a930246337~tab=content},
volume = {24},
year = {2010}
}
|
|
J.-H. Haunert, and M. Sester. Area collapse and road centerlines based on straight skeletons. GeoInformatica, 12(2):169-191, 2008.
abstract
doi
bibtex
|
| Skeletonization of polygons is a technique, which is often applied to problems of cartography and geographic information science. Especially it is needed for generalization tasks such as the collapse of small or narrow areas, which are negligible for a certain scale. Different skeleton operators can be used for such tasks. One of them is the straight skeleton, which was rediscovered by computer scientists several years ago after decades of neglect. Its full range of practicability and its benefits for cartographic applications have not been revealed yet. Based on the straight skeleton an area collapse that preserves topological constraints as well as a partial area collapse can be performed. An automatic method for the derivation of road centerlines from a cadastral dataset, which uses special characteristics of the straight skeleton, is shown. @article{haunert2008,
abstract = {Skeletonization of polygons is a technique, which is often applied to problems of cartography and geographic information science. Especially it is needed for generalization tasks such as the collapse of small or narrow areas, which are negligible for a certain scale. Different skeleton operators can be used for such tasks. One of them is the straight skeleton, which was rediscovered by computer scientists several years ago after decades of neglect. Its full range of practicability and its benefits for cartographic applications have not been revealed yet. Based on the straight skeleton an area collapse that preserves topological constraints as well as a partial area collapse can be performed. An automatic method for the derivation of road centerlines from a cadastral dataset, which uses special characteristics of the straight skeleton, is shown.},
author = {Haunert, J.-H. and Sester, M.},
doi = {10.1007/s10707-007-0028-x},
file = {HaunertSester2008.pdf:http\://www1.informatik.uni-wuerzburg.de/pub/haunert/pdf/HaunertSester2008.pdf:PDF},
journal = {GeoInformatica},
number = {2},
pages = {169--191},
timestamp = {2011-11-07T12:01:04.000+0100},
title = {Area collapse and road centerlines based on straight skeletons},
url = {http://www.springerlink.com/content/20103w9530887286/},
volume = {12},
year = {2008}
}
|