|
M. Bekos, B. Niedermann, and M. Nöllenburg. External labeling techniques: a taxonomy and survey. Computer Graphics Forum, 38(3):833-860, 2019.
abstract
doi
bibtex
|
| Abstract External labeling is frequently used for annotating features in graphical displays and visualizations, such as technical illustrations, anatomical drawings, or maps, with textual information. Such a labeling connects features within an illustration by thin leader lines with their labels, which are placed in the empty space surrounding the image. Over the last twenty years, a large body of literature in diverse areas of computer science has been published that investigates many different aspects, models, and algorithms for automatically placing external labels for a given set of features. This state-of-the-art report introduces a first unified taxonomy for categorizing the different results in the literature and then presents a comprehensive survey of the state of the art, a sketch of the most relevant algorithmic techniques for external labeling algorithms, as well as a list of open research challenges in this multidisciplinary research field. @article{doi:10.1111/cgf.13729,
abstract = {Abstract External labeling is frequently used for annotating features in graphical displays and visualizations, such as technical illustrations, anatomical drawings, or maps, with textual information. Such a labeling connects features within an illustration by thin leader lines with their labels, which are placed in the empty space surrounding the image. Over the last twenty years, a large body of literature in diverse areas of computer science has been published that investigates many different aspects, models, and algorithms for automatically placing external labels for a given set of features. This state-of-the-art report introduces a first unified taxonomy for categorizing the different results in the literature and then presents a comprehensive survey of the state of the art, a sketch of the most relevant algorithmic techniques for external labeling algorithms, as well as a list of open research challenges in this multidisciplinary research field.},
author = {Bekos, M. and Niedermann, B. and Nöllenburg, M.},
doi = {10.1111/cgf.13729},
eprint = {https://onlinelibrary.wiley.com/doi/pdf/10.1111/cgf.13729},
journal = {Computer Graphics Forum},
number = {3},
pages = {833--860},
title = {External Labeling Techniques: A Taxonomy and Survey},
url = {https://onlinelibrary.wiley.com/doi/abs/10.1111/cgf.13729},
volume = {38},
year = {2019}
}
|
|
B. Niedermann, and J.-H. Haunert. Focus+context map labeling with optimized clutter reduction. International Journal of Cartography, 5(2-3):158-177, 2019. Special issue of 29th International Cartographic Conference (ICC'19)
abstract
doi
bibtex
|
| Zooming is a basic operation that many digital maps support for exploring them interactively. Especially
for maps on small-screen devices this is a helpful operation to uncover the user’s region of interest possibly hidden by
labels, e.g., points of interest represented by icons. However, by scaling the map larger the user loses the context. As a
consequent the user might need to repeatedly zoom in and out to explore the map step by step.
We present an approach that reduces the necessity of zooming by providing the user with the possibility of displacing
the labels of a circular focus region. To that end, we utilize techniques from focus+context maps implementing the
displacement of the labels by fish-eye projections. The visual association between labels and their point features is
established by connecting lines aggregated to bundles. Our approach particularly guarantees that labels move smoothly
when the user continuously shifts the focus region, which reduces distracting flickering effects while exploring the map
by panning the map view. Further, when the user stops moving the focus region, mathematical programming is applied to
optimize positions of the displaced labels. In an evaluation on real-world data and synthetically generated data we show
that our approach substantially increases the legibility of both the focus region and the displaced labels @article{niedermann2019,
abstract = {Zooming is a basic operation that many digital maps support for exploring them interactively. Especially
for maps on small-screen devices this is a helpful operation to uncover the user’s region of interest possibly hidden by
labels, e.g., points of interest represented by icons. However, by scaling the map larger the user loses the context. As a
consequent the user might need to repeatedly zoom in and out to explore the map step by step.
We present an approach that reduces the necessity of zooming by providing the user with the possibility of displacing
the labels of a circular focus region. To that end, we utilize techniques from focus+context maps implementing the
displacement of the labels by fish-eye projections. The visual association between labels and their point features is
established by connecting lines aggregated to bundles. Our approach particularly guarantees that labels move smoothly
when the user continuously shifts the focus region, which reduces distracting flickering effects while exploring the map
by panning the map view. Further, when the user stops moving the focus region, mathematical programming is applied to
optimize positions of the displaced labels. In an evaluation on real-world data and synthetically generated data we show
that our approach substantially increases the legibility of both the focus region and the displaced labels},
author = {Niedermann, B. and Haunert, J.{-}H.},
doi = {10.1080/23729333.2019.1613072},
journal = {International Journal of Cartography},
note = {Special issue of 29th International Cartographic Conference (ICC'19)},
number = {2--3},
pages = {158--177},
publisher = {Taylor & Francis},
title = {Focus+Context Map Labeling with Optimized Clutter Reduction},
volume = {5},
year = {2019}
}
|
|
B. Niedermann, I. Rutter, and M. Wolf. Efficient algorithms for ortho-radial graph drawing. In volume 129 of Leibniz International Proceedings in Informatics. Proc. 35th Annual ACM Symposium on Computational Geometry (SoCG '19). Schloss Dagstuhl-Leibniz-Zentrum fuer Informatik, 2019.
abstract
doi
bibtex
|
| Orthogonal drawings, i.e., embeddings of graphs into grids, are a classic topic in Graph Drawing.
Often the goal is to find a drawing that minimizes the number of bends on the edges. A key
ingredient for bend minimization algorithms is the existence of an orthogonal representation that
allows to describe such drawings purely combinatorially by only listing the angles between the edges
around each vertex and the directions of bends on the edges, but neglecting any kind of geometric
information such as vertex coordinates or edge lengths.
Barth et al. have established the existence of an analogous ortho-radial representation for
ortho-radial drawings, which are embeddings into an ortho-radial grid, whose gridlines are concentric
circles around the origin and straight-line spokes emanating from the origin but excluding the origin
itself. While any orthogonal representation admits an orthogonal drawing, it is the circularity of
the ortho-radial grid that makes the problem of characterizing valid ortho-radial representations all
the more complex and interesting. Barth et al. prove such a characterization. However, the proof
is existential and does not provide an efficient algorithm for testing whether a given ortho-radial
representation is valid, let alone actually obtaining a drawing from an ortho-radial representation.
In this paper we give quadratic-time algorithms for both of these tasks. They are based on
a suitably constrained left-first DFS in planar graphs and several new insights on ortho-radial
representations. Our validity check requires quadratic time, and a naive application of it would
yield a quartic algorithm for constructing a drawing from a valid ortho-radial representation @inproceedings{nrw-eaorgd-19,
abstract = {Orthogonal drawings, i.e., embeddings of graphs into grids, are a classic topic in Graph Drawing.
Often the goal is to find a drawing that minimizes the number of bends on the edges. A key
ingredient for bend minimization algorithms is the existence of an orthogonal representation that
allows to describe such drawings purely combinatorially by only listing the angles between the edges
around each vertex and the directions of bends on the edges, but neglecting any kind of geometric
information such as vertex coordinates or edge lengths.
Barth et al. have established the existence of an analogous ortho-radial representation for
ortho-radial drawings, which are embeddings into an ortho-radial grid, whose gridlines are concentric
circles around the origin and straight-line spokes emanating from the origin but excluding the origin
itself. While any orthogonal representation admits an orthogonal drawing, it is the circularity of
the ortho-radial grid that makes the problem of characterizing valid ortho-radial representations all
the more complex and interesting. Barth et al. prove such a characterization. However, the proof
is existential and does not provide an efficient algorithm for testing whether a given ortho-radial
representation is valid, let alone actually obtaining a drawing from an ortho-radial representation.
In this paper we give quadratic-time algorithms for both of these tasks. They are based on
a suitably constrained left-first DFS in planar graphs and several new insights on ortho-radial
representations. Our validity check requires quadratic time, and a naive application of it would
yield a quartic algorithm for constructing a drawing from a valid ortho-radial representation},
author = {Niedermann, B. and Rutter, I. and Wolf, M.},
booktitle = {Proc. 35th Annual ACM Symposium on Computational Geometry (SoCG '19)},
doi = {10.4230/LIPIcs.SoCG.2019.53},
publisher = {Schloss Dagstuhl--Leibniz-Zentrum fuer Informatik},
series = {Leibniz International Proceedings in Informatics},
title = {{Efficient Algorithms for Ortho-Radial Graph Drawing}},
volume = {129},
year = {2019}
}
|
|
L. Barth, A. Gemsa, B. Niedermann, and M. Nöllenburg. On the readability of leaders in boundary labeling. Information Visualization, 18(1):110-132, 2019.
abstract
doi
bibtex
|
| External labeling deals with annotating features in images with labels that are placed outside of the image and are connected by curves (so-called leaders) to the corresponding features. While external labeling has been extensively investigated from a perspective of automatization, the research on its readability has been neglected. In this article, we present the first formal user study on the readability of leader types in boundary labeling, a special variant of external labeling that considers rectangular image contours. We consider the four most studied leader types (straight, L-shaped, diagonal, and S-shaped) with respect to their performance, that is, whether and how fast a viewer can assign a feature to its label and vice versa. We give a detailed analysis of the results regarding the readability of the four models and discuss their aesthetic qualities based on the users’ preference judgments and interviews. As a consequence of our experiment, we can generally recommend L-shaped leaders as the best compromise between measured task performance and subjective preference ratings, while straight and diagonal leaders received mixed ratings in the two measures. S-shaped leaders are generally not recommended from a practical point of view. @article{doi:10.1177/1473871618799500,
abstract = {External labeling deals with annotating features in images with labels that are placed outside of the image and are connected by curves (so-called leaders) to the corresponding features. While external labeling has been extensively investigated from a perspective of automatization, the research on its readability has been neglected. In this article, we present the first formal user study on the readability of leader types in boundary labeling, a special variant of external labeling that considers rectangular image contours. We consider the four most studied leader types (straight, L-shaped, diagonal, and S-shaped) with respect to their performance, that is, whether and how fast a viewer can assign a feature to its label and vice versa. We give a detailed analysis of the results regarding the readability of the four models and discuss their aesthetic qualities based on the users’ preference judgments and interviews. As a consequence of our experiment, we can generally recommend L-shaped leaders as the best compromise between measured task performance and subjective preference ratings, while straight and diagonal leaders received mixed ratings in the two measures. S-shaped leaders are generally not recommended from a practical point of view.},
author = {Barth, L. and Gemsa, A. and Niedermann, B. and Nöllenburg, M.},
doi = {10.1177/1473871618799500},
journal = {Information Visualization},
number = {1},
pages = {110-132},
title = {On the readability of leaders in boundary labeling},
volume = {18},
year = {2019}
}
|
|
A. Bonerath, J.-H. Haunert, and B. Niedermann. Computing alpha-shapes for temporal range queries on point sets. In Proceedings of the 35rd European Workshop on Computational Geometry (EuroCG'19). 2019. Preprint.
abstract
bibtex
|
| The interactive exploration of data requires data structures that can be repeatedly queried to
obtain simple visualizations of parts of the data. In this paper we consider the scenario that the
data is a set of points each associated with a time stamp and that the result of each query is
visualized by an α-shape, which generalizes the concept of convex hulls. Instead of computing
each shape independently, we suggest and analyze a simple data structure that aggregates the
α-shapes of all possible queries. Once the data structure is built, it particularly allows us to
query single α-shapes without retrieving the actual (possibly large) point set and thus to rapidly
produce small previews of the queried data. @inproceedings{bhn-castrqps-19,
abstract = {The interactive exploration of data requires data structures that can be repeatedly queried to
obtain simple visualizations of parts of the data. In this paper we consider the scenario that the
data is a set of points each associated with a time stamp and that the result of each query is
visualized by an α-shape, which generalizes the concept of convex hulls. Instead of computing
each shape independently, we suggest and analyze a simple data structure that aggregates the
α-shapes of all possible queries. Once the data structure is built, it particularly allows us to
query single α-shapes without retrieving the actual (possibly large) point set and thus to rapidly
produce small previews of the queried data.},
author = {Bonerath, A. and Haunert, J.-H. and Niedermann, B.},
booktitle = {Proceedings of the 35rd European Workshop on Computational Geometry (EuroCG'19)},
note = {Preprint.},
title = {{Computing alpha-Shapes for Temporal Range Queries on Point Sets}},
year = {2019}
}
|
|
B. Niedermann, and J.-H. Haunert. Anchored metro maps: combining schematic maps
with geographic maps for multi-modal navigation. In Schematic Mapping Workshop 2019. 2019. Poster abstract.
abstract
bibtex
|
| For conducting navigation tasks in public trans-
portation systems, schematic maps (e.g., metro maps) are the first
choice, as they reduce the amount of information to a minimum.
On the other hand, for navigation tasks in street networks,
classical geographic maps are preferable, as they depict the
area as accurately as possible. In this work, we create synergies
between both types of maps by laying the metro map over the
street map. We call those maps anchored metro maps, as we
visually attach the metro stations of the metro map to their
counterparts on the street map. Currently, we are developing
algorithms optimizing the matching between both maps. In
future research we plan to use this approach to show that the
combination of schematic and geographical maps leads to an
improvement for certain navigation tasks. @inproceedings{nh-amm-19,
abstract = {For conducting navigation tasks in public trans-
portation systems, schematic maps (e.g., metro maps) are the first
choice, as they reduce the amount of information to a minimum.
On the other hand, for navigation tasks in street networks,
classical geographic maps are preferable, as they depict the
area as accurately as possible. In this work, we create synergies
between both types of maps by laying the metro map over the
street map. We call those maps anchored metro maps, as we
visually attach the metro stations of the metro map to their
counterparts on the street map. Currently, we are developing
algorithms optimizing the matching between both maps. In
future research we plan to use this approach to show that the
combination of schematic and geographical maps leads to an
improvement for certain navigation tasks.},
author = {Niedermann, B. and Haunert, J.-H.},
booktitle = {Schematic Mapping Workshop 2019},
note = {Poster abstract.},
title = {Anchored Metro Maps: Combining Schematic Maps
with Geographic Maps for Multi-modal Navigation},
year = {2019}
}
|
|
H.-Y. Wu, B. Niedermann, S. Takahashi, and M. Nöllenburg. A survey on computing schematic network maps: the challenge to interactivity. In Schematic Mapping Workshop 2019. 2019. Preprint.
abstract
bibtex
|
| Schematic maps are in daily use to show the connectivity of subway systems and to facilitate travellers to plan their
journeys effectively. This study surveys up-to-date algorithmic
approaches in order to give an overview of the state of the
art in schematic network mapping. The study investigates the
hypothesis that the choice of algorithmic approach is often guided
by the requirements of the mapping application. For example, an
algorithm that computes globally optimal solutions for schematic
maps is capable of producing results for printing, while it is not
suitable for computing instant layouts due to its long running
time. Our analysis and discussion, therefore, focus on the computational complexity of the problem formulation and the running
times of the schematic map algorithms, including algorithmic
network layout techniques and station labeling techniques. The
correlation between problem complexity and running time is
then visually depicted using scatter plot diagrams. Moreover,
since metro maps are common metaphors for data visualization,
we also investigate online tools and application domains using
metro map representations for analytics purposes, and finally
summarize the potential future opportunities for schematic maps. @inproceedings{wntn-scsnm-19,
abstract = {Schematic maps are in daily use to show the connectivity of subway systems and to facilitate travellers to plan their
journeys effectively. This study surveys up-to-date algorithmic
approaches in order to give an overview of the state of the
art in schematic network mapping. The study investigates the
hypothesis that the choice of algorithmic approach is often guided
by the requirements of the mapping application. For example, an
algorithm that computes globally optimal solutions for schematic
maps is capable of producing results for printing, while it is not
suitable for computing instant layouts due to its long running
time. Our analysis and discussion, therefore, focus on the computational complexity of the problem formulation and the running
times of the schematic map algorithms, including algorithmic
network layout techniques and station labeling techniques. The
correlation between problem complexity and running time is
then visually depicted using scatter plot diagrams. Moreover,
since metro maps are common metaphors for data visualization,
we also investigate online tools and application domains using
metro map representations for analytics purposes, and finally
summarize the potential future opportunities for schematic maps.},
author = {Wu, H.-Y. and Niedermann, B. and Takahashi, S. and Nöllenburg, M.},
booktitle = {Schematic Mapping Workshop 2019},
note = {Preprint.},
title = {A Survey on Computing Schematic Network Maps: The Challenge to Interactivity},
year = {2019}
}
|
|
Y. Dehbi, A. Henn, G. Gröger, V. Stroh, and L. Pl\m̈er. Active sampling and model based prediction for fast and robust detection and reconstruction of complex roofs in 3d point clouds. In volume IV-4/W8 of ISPRS Annals of the Photogrammetry, Remote Sensing and Spatial Information Sciences. Proc. 14th 3D Geoinfo Conference, pages 43-50. 2019.
abstract
doi
bibtex
|
| 3D city models in Level-of-Detail 2 (LoD2) are nowadays inevitable for many applications such as solar radiation calculation and energy demand estimation.
City-wide models are required which can solely be acquired by fully automatic approaches.
In this paper we propose a novel method for the 3D-reconstruction of LoD2 buildings with structured roofs and dormers from LIDAR data.
We apply a hybrid strategy which combines the strengths of top-down and bottom-up methods.
The main contribution is the introduction of an \textitactive sampling strategy which applies a cascade of filters focusing on promising samples in an early stage and avoiding the pitfalls of RANSAC based approaches.
Such filters are based on prior knowledge represented by (non-parametric) density distributions.
Samples are pairs of surflets, i.e. 3D points together with normal vectors derived from a plane approximation of their neighborhood.
Surflet pairs imply immediately important roof parameters such as azimuth, inclination and ridge height, as well as parameters for internal precision and consistency, giving a good base for assessment and ranking.
Ranking of samples leads to a small number of promising hypotheses.
Model selection is based on predictions for example of ridge positions which can easily be falsified based on the given observations.
Our approach does not require building footprints as prerequisite.
They are derived in a preprocessing step using machine learning methods, in particular Support Vector Machines (SVM). @inproceedings{dehbi2019active,
abstract = {3D city models in Level-of-Detail 2 (LoD2) are nowadays inevitable for many applications such as solar radiation calculation and energy demand estimation.
City-wide models are required which can solely be acquired by fully automatic approaches.
In this paper we propose a novel method for the 3D-reconstruction of LoD2 buildings with structured roofs and dormers from LIDAR data.
We apply a hybrid strategy which combines the strengths of top-down and bottom-up methods.
The main contribution is the introduction of an \textit{active sampling} strategy which applies a cascade of filters focusing on promising samples in an early stage and avoiding the pitfalls of RANSAC based approaches.
Such filters are based on prior knowledge represented by (non-parametric) density distributions.
Samples are pairs of surflets, i.e. 3D points together with normal vectors derived from a plane approximation of their neighborhood.
Surflet pairs imply immediately important roof parameters such as azimuth, inclination and ridge height, as well as parameters for internal precision and consistency, giving a good base for assessment and ranking.
Ranking of samples leads to a small number of promising hypotheses.
Model selection is based on predictions for example of ridge positions which can easily be falsified based on the given observations.
Our approach does not require building footprints as prerequisite.
They are derived in a preprocessing step using machine learning methods, in particular Support Vector Machines (SVM).},
author = {Dehbi, Y. and Henn, A. and Gr\"oger, G. and Stroh, V. and Pl\"umer, L.},
booktitle = {Proc. 14th 3D Geoinfo Conference},
doi = {10.5194/isprs-annals-IV-4-W8-43-2019},
pages = {43--50},
series = {ISPRS Annals of the Photogrammetry, Remote Sensing and Spatial Information Sciences},
title = {Active Sampling and Model Based Prediction for Fast and Robust Detection and Reconstruction of Complex Roofs in 3D Point Clouds},
volume = {IV-4/W8},
year = {2019}
}
|
|
F. Biljecki, and Y. Dehbi. Raise the roof: towards generating lod2 models without aerial surveys using machine learning. In volume IV-4/W8 of ISPRS Annals of the Photogrammetry, Remote Sensing and Spatial Information Sciences. Proc. 14th 3D Geoinfo Conference, pages 27-34. 2019.
abstract
doi
bibtex
|
| LoD2 models include roof shapes and thus provide added value over their LoD1 counterparts for some applications such as estimating the solar potential of rooftops.
However, because of laborious acquisition workflows they are more difficult to obtain than LoD1 models and are thus less prevalent in practice.
This paper explores whether the type of the roof of a building can be inferred from semantic LoD1 data, potentially leading to their free upgrade to LoD2, in a broader context of a workflow for their generation without aerial campaigns.
Inferring rooftop information has also other uses: data quality and verification of existing data, supporting roof reconstruction, and enriching LoD0/LoD1 data with the attribute of the roof type.
We tested a RandomForest classifier that analyses attributes of buildings predicting the type of the roof.
Experiments carried out on the 3D city model of Hamburg using 12 attributes achieve an accuracy of 85% in identifying the roof type from sparse data using a multiclass classification.
The performance of binary classification hits the roof: 92% accuracy in predicting whether a roof is flat or not.
It turns out that the two most useful variables are footprint area and building height (i.e. LoD1 models without any semantics, or LoD0 with such information), and using only them also yields relatively accurate results. @inproceedings{dehbi2019towards,
abstract = {LoD2 models include roof shapes and thus provide added value over their LoD1 counterparts for some applications such as estimating the solar potential of rooftops.
However, because of laborious acquisition workflows they are more difficult to obtain than LoD1 models and are thus less prevalent in practice.
This paper explores whether the type of the roof of a building can be inferred from semantic LoD1 data, potentially leading to their free upgrade to LoD2, in a broader context of a workflow for their generation without aerial campaigns.
Inferring rooftop information has also other uses: data quality and verification of existing data, supporting roof reconstruction, and enriching LoD0/LoD1 data with the attribute of the roof type.
We tested a RandomForest classifier that analyses attributes of buildings predicting the type of the roof.
Experiments carried out on the 3D city model of Hamburg using 12 attributes achieve an accuracy of 85\% in identifying the roof type from sparse data using a multiclass classification.
The performance of binary classification hits the roof: 92\% accuracy in predicting whether a roof is flat or not.
It turns out that the two most useful variables are footprint area and building height (i.e. LoD1 models without any semantics, or LoD0 with such information), and using only them also yields relatively accurate results.},
author = {Biljecki, F. and Dehbi, Y.},
booktitle = {Proc. 14th 3D Geoinfo Conference},
doi = {10.5194/isprs-annals-IV-4-W8-27-2019},
pages = {27--34},
series = {ISPRS Annals of the Photogrammetry, Remote Sensing and Spatial Information Sciences},
title = {Raise the roof: Towards generating LoD2 models without aerial surveys using machine learning},
volume = {IV-4/W8},
year = {2019}
}
|
|
Y. Dehbi, S. Koppers, and L. Plümer. Probability density based classification and reconstruction of roof structures from 3d point clouds. In volume XLII-4/W16 of ISPRS - International Archives of the Photogrammetry, Remote Sensing and Spatial Information Sciences. Proc. 6th International Conference on Geomatics and Geospatial Technology, pages 177-184. 2019.
abstract
doi
bibtex
|
| 3D building models including roofs are a key prerequisite in many fields of applications such as the estimation of solar suitability of rooftops. The accurate reconstruction of roofs with dormers is sometimes
challenging. Without careful separation of the dormer points from the points on the roof surface, the estimation of the roof areas is distorted in a most characteristic way, which then let the dormer points appear as white
noise. The characteristic distortion of the density distribution of the defects by dormers in comparison to the expected normal distribution is the starting point of our method. We propose a hierarchical method which improves
roof reconstruction from LiDAR point clouds in a model-based manner separating dormer points from roof points using classification methods. The key idea is to exploit probability density functions (PDFs) to reveal roof
properties and design skilful features for a supervised learning method using support vector machines (SVMs). Properties of the PDFs of measures such as residuals of model-based estimated roof models are used among others.
A clustering step leads to a semantic segmentation of the point cloud enabling subsequent reconstruction. The approach is tested based on real data as well as simulated point clouds. The latter allow for experiments for various
roof and dormer types with different parameters using an implemented simulation toolbox which generates virtual buildings and synthetic point clouds. @inproceedings{dehbi2019probability,
abstract = {3D building models including roofs are a key prerequisite in many fields of applications such as the estimation of solar suitability of rooftops. The accurate reconstruction of roofs with dormers is sometimes
challenging. Without careful separation of the dormer points from the points on the roof surface, the estimation of the roof areas is distorted in a most characteristic way, which then let the dormer points appear as white
noise. The characteristic distortion of the density distribution of the defects by dormers in comparison to the expected normal distribution is the starting point of our method. We propose a hierarchical method which improves
roof reconstruction from LiDAR point clouds in a model-based manner separating dormer points from roof points using classification methods. The key idea is to exploit probability density functions (PDFs) to reveal roof
properties and design skilful features for a supervised learning method using support vector machines (SVMs). Properties of the PDFs of measures such as residuals of model-based estimated roof models are used among others.
A clustering step leads to a semantic segmentation of the point cloud enabling subsequent reconstruction. The approach is tested based on real data as well as simulated point clouds. The latter allow for experiments for various
roof and dormer types with different parameters using an implemented simulation toolbox which generates virtual buildings and synthetic point clouds.},
author = {Dehbi, Y. and Koppers, S. and Pl\"umer, L.},
booktitle = {Proc. 6th International Conference on Geomatics and Geospatial Technology},
doi = {10.5194/isprs-archives-XLII-4-W16-177-2019},
pages = {177--184},
series = {ISPRS - International Archives of the Photogrammetry, Remote Sensing and Spatial Information Sciences},
title = {Probability density based classification and reconstruction of roof structures from 3D point clouds},
volume = {XLII-4/W16},
year = {2019}
}
|
|
Y. Dehbi, L. Lucks, J. Behmann, L. klingbeil, and L. Plümer. Improving gps trajectories using 3d city models and kinematic point clouds. In volume IV-4/W9 of ISPRS Annals of the Photogrammetry, Remote Sensing and Spatial Information Sciences. Proc. 4th International Conference on Smart Data and Smart Cities, pages 35-42. 2019.
abstract
doi
bibtex
|
| Accurate and robust positioning of vehicles in urban environments is of high importance for many applications (e.g. autonomous driving or mobile mapping). In the case of mobile mapping systems, a simultaneous mapping of the environment using laser scanning
and an accurate positioning using GNSS is targeted. This requirement is often not guaranteed in shadowed cities where GNSS signals are usually disturbed, weak or even unavailable. Both, the generated point clouds and the derived trajectory are consequently imprecise.
We propose a novel approach which incorporates prior knowledge, i.e. 3D building model of the environment, and improves the point cloud and the trajectory. The key idea is to benefit from the complementarity of both GNSS and 3D building models. The point
cloud is matched to the city model using a point-to-plane ICP. An informed sampling of appropriate matching points is enabled by a pre-classification step. Support vector machines (SVMs) are used to discriminate between facade and remaining points. Local
inconsistencies are tackled by a segment-wise partitioning of the point cloud where an interpolation guarantees a seamless transition between the segments. The full processing chain is implemented from the detection of facades in the point clouds, the matching
between them and the building models and the update of the trajectory estimate. The general applicability of the implemented method is demonstrated on an inner city data set recorded with a mobile mapping system. @inproceedings{dehbi2019Improving,
abstract = {Accurate and robust positioning of vehicles in urban environments is of high importance for many applications (e.g. autonomous driving or mobile mapping). In the case of mobile mapping systems, a simultaneous mapping of the environment using laser scanning
and an accurate positioning using GNSS is targeted. This requirement is often not guaranteed in shadowed cities where GNSS signals are usually disturbed, weak or even unavailable. Both, the generated point clouds and the derived trajectory are consequently imprecise.
We propose a novel approach which incorporates prior knowledge, i.e. 3D building model of the environment, and improves the point cloud and the trajectory. The key idea is to benefit from the complementarity of both GNSS and 3D building models. The point
cloud is matched to the city model using a point-to-plane ICP. An informed sampling of appropriate matching points is enabled by a pre-classification step. Support vector machines (SVMs) are used to discriminate between facade and remaining points. Local
inconsistencies are tackled by a segment-wise partitioning of the point cloud where an interpolation guarantees a seamless transition between the segments. The full processing chain is implemented from the detection of facades in the point clouds, the matching
between them and the building models and the update of the trajectory estimate. The general applicability of the implemented method is demonstrated on an inner city data set recorded with a mobile mapping system.},
author = {Dehbi, Y. and Lucks, L. and Behmann, J. and klingbeil, L. and Pl\"umer, L.},
booktitle = {Proc. 4th International Conference on Smart Data and Smart Cities},
doi = {10.5194/isprs-annals-IV-4-W9-35-2019},
pages = {35--42},
series = {ISPRS Annals of the Photogrammetry, Remote Sensing and Spatial Information Sciences},
title = {Improving GPS Trajectories using 3D City Models and Kinematic Point Clouds},
volume = {IV-4/W9},
year = {2019}
}
|
|
A. Förster, J. Behley, J. Behmann, and R. Roscher. Hyperspectral plant disease forecasting using generative adversarial networks. In International Geoscience and Remote Sensing Symposium. 2019.
abstract
bibtex
|
| With a limited amount of arable land, increasing demand for
food induced by growth in population can only be meet with
more effective crop production and more resistant plants.
Since crop plants are exposed to many different stress factors, it is relevant to investigate those factors as well as their
behavior and reactions. One of the most severe stress factors
are diseases, resulting in a high loss of cultivated plants. Our
main objective is the forecasting of the spread of disease
symptons on barley plants using a Cycle-Consistent Generative Adversarial Network. Our contributions are: (1) we
provide a daily forecast for one week to advance research
for better planning of plant protection measures, and (2) in
contrast to most approaches which use only RGB images,
we learn a model with hyperspectral images, providing an
information-rich result. In our experiments, we analyze
healthy barley leaves and leaves which were inoculated by
powdery mildew. Images of the leaves were acquired daily
with a hyperspectral microscope, from day 3 to day 14 after inoculation. We provide two methods for evaluating the
predicted time series with respect to the reference time series @inproceedings{foerster2019a,
abstract = {With a limited amount of arable land, increasing demand for
food induced by growth in population can only be meet with
more effective crop production and more resistant plants.
Since crop plants are exposed to many different stress factors, it is relevant to investigate those factors as well as their
behavior and reactions. One of the most severe stress factors
are diseases, resulting in a high loss of cultivated plants. Our
main objective is the forecasting of the spread of disease
symptons on barley plants using a Cycle-Consistent Generative Adversarial Network. Our contributions are: (1) we
provide a daily forecast for one week to advance research
for better planning of plant protection measures, and (2) in
contrast to most approaches which use only RGB images,
we learn a model with hyperspectral images, providing an
information-rich result. In our experiments, we analyze
healthy barley leaves and leaves which were inoculated by
powdery mildew. Images of the leaves were acquired daily
with a hyperspectral microscope, from day 3 to day 14 after inoculation. We provide two methods for evaluating the
predicted time series with respect to the reference time series},
author = {Förster, A. and Behley, J. and Behmann, J. and Roscher, R.},
booktitle = {International Geoscience and Remote Sensing Symposium},
date-added = {2019-05-26 21:01:17 +0200},
date-modified = {2019-05-26 21:07:38 +0200},
title = {Hyperspectral plant disease forecasting using generative adversarial networks},
year = {2019}
}
|
|
A. Bonerath, B. Niedermann, and J.-H. Haunert. Retrieving alpha-shapes and schematic polygonal approximations for sets of points within queried temporal ranges. In Proc. 27th ACM SIGSPATIAL International Conference on Advances in Geographic Information Systems (ACM SIGSPATIAL '19), pages 249-258. 2019. trailer: https://youtu.be/mlnUDhbMSfQ
abstract
doi
bibtex
|
| The interactive exploration of data requires data structures that can be repeatedly queried to obtain simple visualizations of parts of the data. We consider the scenario that the data is a set of points each associated with a time stamp and that the result of each query is visualized by an α-shape, which generalizes the concept of convex hulls. Instead of computing each shape independently, we suggest and analyze a simple data structure that aggregates the α-shapes of all possible queries. Once the data structure is built, it particularly allows us to query single α-shapes without retrieving the actual (possibly large) point set and thus to rapidly produce small previews of the queried data. We discuss the data structure for the original α-shapes as well as for a schematized version of α-shapes, which further simplifies the visualization. We evaluate the data structure on real-world data. The experiments indicate linear memory consumption with respect to the number of points, which makes the data structure applicable in practice, although the size is quadratic for a theoretic worst case example. @inproceedings{bhn-rasspa-19,
abstract = {The interactive exploration of data requires data structures that can be repeatedly queried to obtain simple visualizations of parts of the data. We consider the scenario that the data is a set of points each associated with a time stamp and that the result of each query is visualized by an α-shape, which generalizes the concept of convex hulls. Instead of computing each shape independently, we suggest and analyze a simple data structure that aggregates the α-shapes of all possible queries. Once the data structure is built, it particularly allows us to query single α-shapes without retrieving the actual (possibly large) point set and thus to rapidly produce small previews of the queried data. We discuss the data structure for the original α-shapes as well as for a schematized version of α-shapes, which further simplifies the visualization. We evaluate the data structure on real-world data. The experiments indicate linear memory consumption with respect to the number of points, which makes the data structure applicable in practice, although the size is quadratic for a theoretic worst case example.},
author = {Bonerath, A. and Niedermann, B. and Haunert, J.-H.},
booktitle = {Proc. 27th ACM SIGSPATIAL International Conference on Advances in Geographic Information Systems (ACM SIGSPATIAL '19)},
doi = {10.1145/3347146.3359087},
note = {trailer: https://youtu.be/mlnUDhbMSfQ},
pages = {249--258},
title = {{Retrieving alpha-Shapes and Schematic Polygonal Approximations for Sets of Points within Queried Temporal Ranges}},
year = {2019}
}
|
|
S. Gedicke, B. Niedermann, and J.-H. Haunert. Multi-page Labeling of Small-screen Maps with a Graph-coloring Approach. In LBS 2019: 15th International Conference on Location Based Services, November 11-13, 2019, Vienna, AT. 2019.
abstract
bibtex
|
| Annotating small-screen maps with additional content such as labels for points of interest is a highly challenging problem that requires new algorithmic solutions. A common labeling approach is to select a maximum-size subset of all labels such that no two labels constitute a graphical conflict and to display only the selected labels in the map. A disadvantage of this approach is that a user often has to zoom in and out repeatedly to access all points of interest in a certain region. Since this can be very cumbersome, we suggest an alternative approach that allows the scale of the map to be kept fixed. Our approach is to distribute all labels on multiple pages through which the user can navigate, for example, by swiping the pages from right to left. We in particular optimize the assignment of the labels to pages such that no page contains two conflicting labels, more important labels appear on the first pages, and sparsely labeled pages are avoided.
Algorithmically, we reduce this problem to a weighted and constrained graph coloring problem based on a graph representing conflicts between labels such that an optimal coloring of the graph corresponds to a multi-page labeling. We propose a simple greedy heuristic that is fast enough to be deployed in web-applications. We evaluate the quality of the obtained labelings by comparing them with optimal solutions, which we obtain by means of integer linear programming formulations. In our evaluation on real-world data we particularly show that the proposed heuristic achieves near-optimal solutions with respect to the chosen objective function and that it substantially improves the legibility of the labels in comparison to the simple strategy of assigning the labels to pages solely based on the labels' weights. @inproceedings{gedicke2019a,
abstract = {Annotating small-screen maps with additional content such as labels for points of interest is a highly challenging problem that requires new algorithmic solutions. A common labeling approach is to select a maximum-size subset of all labels such that no two labels constitute a graphical conflict and to display only the selected labels in the map. A disadvantage of this approach is that a user often has to zoom in and out repeatedly to access all points of interest in a certain region. Since this can be very cumbersome, we suggest an alternative approach that allows the scale of the map to be kept fixed. Our approach is to distribute all labels on multiple pages through which the user can navigate, for example, by swiping the pages from right to left. We in particular optimize the assignment of the labels to pages such that no page contains two conflicting labels, more important labels appear on the first pages, and sparsely labeled pages are avoided.
Algorithmically, we reduce this problem to a weighted and constrained graph coloring problem based on a graph representing conflicts between labels such that an optimal coloring of the graph corresponds to a multi-page labeling. We propose a simple greedy heuristic that is fast enough to be deployed in web-applications. We evaluate the quality of the obtained labelings by comparing them with optimal solutions, which we obtain by means of integer linear programming formulations. In our evaluation on real-world data we particularly show that the proposed heuristic achieves near-optimal solutions with respect to the chosen objective function and that it substantially improves the legibility of the labels in comparison to the simple strategy of assigning the labels to pages solely based on the labels' weights.},
author = {Gedicke, S. and Niedermann, B. and Haunert, J.-H.},
booktitle = {LBS 2019: 15th International Conference on Location Based Services, November 11--13, 2019, Vienna, AT},
title = {{M}ulti-page {L}abeling of {S}mall-screen {M}aps with a {G}raph-coloring {A}pproach},
year = {2019}
}
|
|
J. Oehrlein, B. Niedermann, and J.-H. Haunert. Analyzing the supply and detecting spatial patterns of urban green spaces via optimization. Journal of Photogrammetry, Remote Sensing and Geoinformation Science (PFG), 87(4):137-158, 2019.
abstract
doi
bibtex
|
| Green spaces in urban areas offer great possibilities of recreation, provided that they are easily accessible. Therefore, an ideal city should offer large green spaces close to where its residents live. Although there are several measures for the assessment of urban green spaces, the existing measures usually focus either on the total size of all green spaces or on their accessibility. Hence, in this paper, we present a new methodology for assessing green-space provision and accessibility in an integrated way. The core of our methodology is an algorithm based on linear programming that computes an optimal assignment between residential areas and green spaces. In a basic setting, it assigns green spaces of a prescribed size exclusively to each resident, such that an objective function that, in particular, considers the average distance between residents and assigned green spaces is optimized. We contribute a detailed presentation on how to engineer an assignment-based method, such that it yields plausible results (e.g., by considering distances in the road network) and becomes efficient enough for the analysis of large metropolitan areas (e.g., we were able to process an instance of Berlin with about 130,000 polygons representing green spaces, 18,000 polygons representing residential areas, and 6 million road segments). Furthermore, we show that the optimal assignments resulting from our method enable a subsequent analysis that reveals both interesting global properties of a city as well as spatial patterns. For example, our method allows us to identify neighborhoods with a shortage of green spaces, which will help spatial planners in their decision-making. @article{oehrlein2019-pfg,
abstract = {Green spaces in urban areas offer great possibilities of recreation, provided that they are easily accessible. Therefore, an ideal city should offer large green spaces close to where its residents live. Although there are several measures for the assessment of urban green spaces, the existing measures usually focus either on the total size of all green spaces or on their accessibility. Hence, in this paper, we present a new methodology for assessing green-space provision and accessibility in an integrated way. The core of our methodology is an algorithm based on linear programming that computes an optimal assignment between residential areas and green spaces. In a basic setting, it assigns green spaces of a prescribed size exclusively to each resident, such that an objective function that, in particular, considers the average distance between residents and assigned green spaces is optimized. We contribute a detailed presentation on how to engineer an assignment-based method, such that it yields plausible results (e.g., by considering distances in the road network) and becomes efficient enough for the analysis of large metropolitan areas (e.g., we were able to process an instance of Berlin with about 130,000 polygons representing green spaces, 18,000 polygons representing residential areas, and 6 million road segments). Furthermore, we show that the optimal assignments resulting from our method enable a subsequent analysis that reveals both interesting global properties of a city as well as spatial patterns. For example, our method allows us to identify neighborhoods with a shortage of green spaces, which will help spatial planners in their decision-making.},
author = {Oehrlein, J. and Niedermann, B. and Haunert, J.-H.},
doi = {10.1007/s41064-019-00081-0},
journal = {Journal of Photogrammetry, Remote Sensing and Geoinformation Science (PFG)},
number = {4},
pages = {137--158},
title = {Analyzing the Supply and Detecting Spatial Patterns of Urban Green Spaces via Optimization},
volume = {87},
year = {2019}
}
|
|
E. Lehndorff, A. Rodionov, C. Stremtan, W. Brand, L. Plümer, P. Rottmann, B. Spiering, and W. Amelung. Element patterns and carbon turnover in soil microaggregates. In volume 21. Geophysical Research Abstracts. 2019.
abstract
bibtex
|
| The mean residence time of organic matter in soil ranges from days to millenia. Although chemical and physical processes are known to control the storage of organic matter in soil, the role of spatial arrangements in soil remains largely unknown due to the lack of in-situ techniques operating on the micro-scale. We asked whether deterministic patterns in element arrangement occur in large and small soil microaggregates (250-53 and 53-20 µm), and how these contribute to the storage of organic matter. To do so, we studied surface soils with increasing clay contents (sandy to loamy Luvisols, Germany) and subjected 60 individual aggregates to elemental mapping by electron probe micro analysis (EPMA), which recorded C, N, P, Al, Fe, Ca, K, Cl, and Si contents at micrometer scale resolution. We further developed the first laser-ablation isotope ratio mass spectrometry technique in soil science detecting d13C composition on the micro-scale (LA-IRMS) and employed this to trace micro-gradients in undisturbed soil and soil microaggregate samples. We found a pronounced heterogeneity in aggregate structure and composition, which was not reproducible for different microaggregates from the same soil fraction, and which was largely independent from clay content in soil. However, neighborhood analyses revealed close spatial correlations between organic matter debris (C:N app. 100:10) and microbial organic matter (C:N app. 10:1), indicating a spatial relationship between source and consumer. There was no systematic relationship between soil minerals and organic matter, suggesting that well-established macroscale correlations between pedogenic oxides and clay with soil organic matter storage do not apply to soil microaggregates. From first applications of LA-IRMS to soil we found also a high d13C variability of up to 9 per mill along spatial gradients of less than 300 µm, suggesting the appearance of very small hotspots of isotope enrichment and organic matter turnover by metabolic processes. @inproceedings{lehndorff2019element,
abstract = {The mean residence time of organic matter in soil ranges from days to millenia. Although chemical and physical processes are known to control the storage of organic matter in soil, the role of spatial arrangements in soil remains largely unknown due to the lack of in-situ techniques operating on the micro-scale. We asked whether deterministic patterns in element arrangement occur in large and small soil microaggregates (250-53 and 53-20 µm), and how these contribute to the storage of organic matter. To do so, we studied surface soils with increasing clay contents (sandy to loamy Luvisols, Germany) and subjected 60 individual aggregates to elemental mapping by electron probe micro analysis (EPMA), which recorded C, N, P, Al, Fe, Ca, K, Cl, and Si contents at micrometer scale resolution. We further developed the first laser-ablation isotope ratio mass spectrometry technique in soil science detecting d13C composition on the micro-scale (LA-IRMS) and employed this to trace micro-gradients in undisturbed soil and soil microaggregate samples. We found a pronounced heterogeneity in aggregate structure and composition, which was not reproducible for different microaggregates from the same soil fraction, and which was largely independent from clay content in soil. However, neighborhood analyses revealed close spatial correlations between organic matter debris (C:N app. 100:10) and microbial organic matter (C:N app. 10:1), indicating a spatial relationship between source and consumer. There was no systematic relationship between soil minerals and organic matter, suggesting that well-established macroscale correlations between pedogenic oxides and clay with soil organic matter storage do not apply to soil microaggregates. From first applications of LA-IRMS to soil we found also a high d13C variability of up to 9 per mill along spatial gradients of less than 300 µm, suggesting the appearance of very small hotspots of isotope enrichment and organic matter turnover by metabolic processes.},
author = {Lehndorff, E. and Rodionov, A. and Stremtan, C. and Brand, W. and Pl{\"u}mer, L. and Rottmann, P. and Spiering, B. and Amelung, W.},
booktitle = {Geophysical Research Abstracts},
title = {Element patterns and carbon turnover in soil microaggregates.},
volume = {21},
year = {2019}
}
|