|
J. Sultan, G. Ben-Haim, and J.-H. Haunert. Extracting spatial patterns in bicycle routes from crowdsourced data. Transactions in GIS, 21(6):1321-1340, 2017.
abstract
doi
bibtex
|
| Much is done nowadays to provide cyclists with safe and sustainable road infrastructure. Its development requires the investigation of road usage and interactions between traffic commuters. This article is focused on exploiting crowdsourced user-generated data, namely GPS trajectories collected by cyclists and road network infrastructure generated by citizens, to extract and analyze spatial patterns and road-type use of cyclists in urban environments. Since user-generated data shows data-deficiencies, we introduce tailored spatial data-handling processes for which several algorithms are developed and implemented. These include data filtering and segmentation, map-matching and spatial arrangement of GPS trajectories with the road network. A spatial analysis and a characterization of road-type use are then carried out to investigate and identify specific spatial patterns of cycle routes. The proposed analysis was applied to the cities of Amsterdam (The Netherlands) and Osnabrück (Germany), proving its feasibility and reliability in mining road-type use and extracting pattern information and preferences. This information can help users who wish to explore friendlier and more interesting cycle patterns, based on collective usage, as well as city planners and transportation experts wishing to pinpoint areas most in need of further development and planning. @article{SultanEtAl17,
abstract = {Much is done nowadays to provide cyclists with safe and sustainable road infrastructure. Its development requires the investigation of road usage and interactions between traffic commuters. This article is focused on exploiting crowdsourced user-generated data, namely GPS trajectories collected by cyclists and road network infrastructure generated by citizens, to extract and analyze spatial patterns and road-type use of cyclists in urban environments. Since user-generated data shows data-deficiencies, we introduce tailored spatial data-handling processes for which several algorithms are developed and implemented. These include data filtering and segmentation, map-matching and spatial arrangement of GPS trajectories with the road network. A spatial analysis and a characterization of road-type use are then carried out to investigate and identify specific spatial patterns of cycle routes. The proposed analysis was applied to the cities of Amsterdam (The Netherlands) and Osnabrück (Germany), proving its feasibility and reliability in mining road-type use and extracting pattern information and preferences. This information can help users who wish to explore friendlier and more interesting cycle patterns, based on collective usage, as well as city planners and transportation experts wishing to pinpoint areas most in need of further development and planning.},
author = {Sultan, J. and Ben-Haim, G. and Haunert, J.-H.},
doi = {10.1111/tgis.12280},
journal = {Transactions in GIS},
number = {6},
pages = {1321-1340},
title = {Extracting spatial patterns in bicycle routes from crowdsourced data},
url = {http://onlinelibrary.wiley.com/doi/10.1111/tgis.12280/abstract},
volume = {21},
year = {2017}
}
|
|
J.-H. Haunert, and A. Wolff. Beyond maximum independent set: an extended integer linear program for point feature labeling. ISPRS Journal of Geo-Information, 6(11), 2017.
abstract
doi
bibtex
|
| Map labeling is a classical problem of cartography that has frequently been approached by combinatorial optimization. Given a set of features in a map and for each feature a set of label candidates, a common problem is to select an independent set of labels (that is, a labeling without label-label intersections) that contains as many labels as possible and at most one label for each feature. To obtain solutions of high cartographic quality, the labels can be weighted and one can maximize the total weight (rather than the number) of the selected labels. We argue, however, that when maximizing the weight of the labeling, the influences of labels on other labels are insufficiently addressed. Furthermore, in a maximum-weight labeling, the labels tend to be densely packed and thus the map background can be occluded too much. We propose extensions of an existing model to overcome these limitations. Since even without our extensions the problem is NP-hard, we cannot hope for an efficient exact algorithm for the problem. Therefore, we present a formalization of our model as an integer linear program (ILP). This allows us to compute optimal solutions in reasonable time, which we demonstrate both for randomly generated point sets and an existing data set of cities. Moreover, a relaxation of our ILP allows for a simple and efficient heuristic, which yielded near-optimal solutions for our instances. @article{HaunertW17,
abstract = {Map labeling is a classical problem of cartography that has frequently been approached by combinatorial optimization. Given a set of features in a map and for each feature a set of label candidates, a common problem is to select an independent set of labels (that is, a labeling without label-label intersections) that contains as many labels as possible and at most one label for each feature. To obtain solutions of high cartographic quality, the labels can be weighted and one can maximize the total weight (rather than the number) of the selected labels. We argue, however, that when maximizing the weight of the labeling, the influences of labels on other labels are insufficiently addressed. Furthermore, in a maximum-weight labeling, the labels tend to be densely packed and thus the map background can be occluded too much. We propose extensions of an existing model to overcome these limitations. Since even without our extensions the problem is NP-hard, we cannot hope for an efficient exact algorithm for the problem. Therefore, we present a formalization of our model as an integer linear program (ILP). This allows us to compute optimal solutions in reasonable time, which we demonstrate both for randomly generated point sets and an existing data set of cities. Moreover, a relaxation of our ILP allows for a simple and efficient heuristic, which yielded near-optimal solutions for our instances.},
author = {Haunert, J.{-}H. and Wolff, A.},
doi = {10.3390/ijgi6110342},
journal = {ISPRS Journal of Geo-Information},
number = {11},
title = {Beyond Maximum Independent Set: An Extended Integer Linear Program for Point Feature Labeling},
volume = {6},
year = {2017}
}
|
|
J. Oehrlein, and J.-H. Haunert. A cutting-plane method for contiguity-constrained spatial aggregation. Journal of Spatial Information Science, 15(1):89-120, 2017.
abstract
doi
bibtex
|
| Aggregating areas into larger regions is a common problem in spatial planning, geographic information science, and cartography. The aim can be to group administrative areal units into electoral districts or sales territories, in which case the problem is known as districting. In other cases, area aggregation is seen as a generalization or visualization task, which aims to reveal spatial patterns in geographic data. Despite these different motivations, the heart of the problem is the same: given a planar partition, one wants to aggregate several elements of this partition to regions. These often must have or exceed a particular size, be homogeneous with respect to some attribute, contiguous, and geometrically compact. Even simple problem variants are known to be NP-hard, meaning that there is no reasonable hope for an efficient exact algorithm. Nevertheless, the problem has been attacked with heuristic and exact methods. In this article we present a new exact method for area aggregation and compare it with a state-of-the-art method for the same problem. Our method results in a substantial decrease of the running time and, in particular, allowed us to solve certain instances that the existing method could not solve within five days. Both our new method and the existing method use integer linear programming, which allows existing problem solvers to be applied. Other than the existing method, however, our method employs a cutting-plane method, which is an advanced constraint-handling approach. We discuss this approach in detail and present its application to the aggregation of areas in choropleth maps. @article{DBLP:journals/josis/OehrleinH17,
abstract = {Aggregating areas into larger regions is a common problem in spatial planning, geographic information science, and cartography. The aim can be to group administrative areal units into electoral districts or sales territories, in which case the problem is known as districting. In other cases, area aggregation is seen as a generalization or visualization task, which aims to reveal spatial patterns in geographic data. Despite these different motivations, the heart of the problem is the same: given a planar partition, one wants to aggregate several elements of this partition to regions. These often must have or exceed a particular size, be homogeneous with respect to some attribute, contiguous, and geometrically compact. Even simple problem variants are known to be NP-hard, meaning that there is no reasonable hope for an efficient exact algorithm. Nevertheless, the problem has been attacked with heuristic and exact methods. In this article we present a new exact method for area aggregation and compare it with a state-of-the-art method for the same problem. Our method results in a substantial decrease of the running time and, in particular, allowed us to solve certain instances that the existing method could not solve within five days. Both our new method and the existing method use integer linear programming, which allows existing problem solvers to be applied. Other than the existing method, however, our method employs a cutting-plane method, which is an advanced constraint-handling approach. We discuss this approach in detail and present its application to the aggregation of areas in choropleth maps.},
author = {J. Oehrlein and J.{-}H. Haunert},
bibsource = {dblp computer science bibliography, https://dblp.org},
biburl = {https://dblp.org/rec/bib/journals/josis/OehrleinH17},
doi = {10.5311/JOSIS.2017.15.379},
journal = {Journal of Spatial Information Science},
number = {1},
pages = {89--120},
timestamp = {Wed, 24 Jan 2018 11:42:58 +0100},
title = {A cutting-plane method for contiguity-constrained spatial aggregation},
url = {https://doi.org/10.5311/JOSIS.2017.15.379},
volume = {15},
year = {2017}
}
|
|
J. Oehrlein, B. Niedermann, and J.-H. Haunert. Inferring the parametric weight of a bicriteria routing model from trajectories. In Proc. 25th ACM SIGSPATIAL International Conference on Advances in Geographic Information Systems (ACM SIGSPATIAL GIS '17), pages 59:1-59:4. 2017.
abstract
doi
bibtex
|
| Finding a shortest path between two nodes in a graph is a well-studied problem whose applicability in practice crucially relies on the choice of the applied cost function. Especially, for the key application of vehicle routing the cost function may consist of more than one optimization criterion (e.g., distance, travel time, etc.). Finding a good balance between these criteria is a challenging and essential task. We present an approach that learns that balance from existing GPS-tracks. The core of our approach is to find a balance factor alpha for a given set of GPS-tracks such that the tracks can be decomposed into a minimum number of optimal paths with respect to alpha.
In an experimental evaluation on real-world GPS-tracks of bicyclists we show that our approach yields an appropriate balance factor in a reasonable amount of time. @inproceedings{Oehrlein:2017:IPW:3139958.3140033,
abstract = {Finding a shortest path between two nodes in a graph is a well-studied problem whose applicability in practice crucially relies on the choice of the applied cost function. Especially, for the key application of vehicle routing the cost function may consist of more than one optimization criterion (e.g., distance, travel time, etc.). Finding a good balance between these criteria is a challenging and essential task. We present an approach that learns that balance from existing GPS-tracks. The core of our approach is to find a balance factor alpha for a given set of GPS-tracks such that the tracks can be decomposed into a minimum number of optimal paths with respect to alpha.
In an experimental evaluation on real-world GPS-tracks of bicyclists we show that our approach yields an appropriate balance factor in a reasonable amount of time.},
author = {Oehrlein, J. and Niedermann, B. and Haunert, J.-H.},
booktitle = {Proc. 25th ACM SIGSPATIAL International Conference on Advances in Geographic Information Systems (ACM SIGSPATIAL GIS '17)},
doi = {10.1145/3139958.3140033},
pages = {59:1--59:4},
title = {Inferring the Parametric Weight of a Bicriteria Routing Model from Trajectories},
url = {http://doi.acm.org/10.1145/3139958.3140033},
year = {2017}
}
|
|
Y. Dehbi, J.-H. Haunert, and L. Plümer. Stochastic and geometric reasoning for indoor building models with electric installations - bridging the gap between gis and bim. In volume IV-4/W5 of ISPRS Annals of Photogrammetry, Remote Sensing and Spatial Information Sciences. Proc. 12th 3D Geoinfo Conference, pages 33-39. 2017.
abstract
doi
bibtex
|
| 3D city and building models according to CityGML encode the geometry, represent the structure and model semantically relevant building parts such as doors, windows and balconies. Building information models support the building design, construction and the facility management. In contrast to CityGML, they include also objects which cannot be observed from the outside. The three dimensional indoor models characterize a missing link between both worlds. Their derivation, however, is expensive. The semantic automatic interpretation of 3D point clouds of indoor environments is a methodically demanding task. The data acquisition is costly and difficult. The laser scanners and image-based methods require the access to every room. Based on an approach which does not require an additional geometry acquisition of building indoors, we propose an attempt for filling the gaps between 3D building models and building information models. Based on sparse observations such as the building footprint and room areas, 3D indoor models are generated using combinatorial and stochastic reasoning. The derived models are expanded by a-priori not observable structures such as electric installation. Gaussian mixtures, linear and bi-linear constraints are used to represent the background knowledge and structural regularities. The derivation of hypothesised models is performed by stochastic reasoning using graphical models, Gauss-Markov models and MAP-estimators. @inproceedings{isprs-annals-IV-4-W5-33-2017,
abstract = {3D city and building models according to CityGML encode the geometry, represent the structure and model semantically relevant building parts such as doors, windows and balconies. Building information models support the building design, construction and the facility management. In contrast to CityGML, they include also objects which cannot be observed from the outside. The three dimensional indoor models characterize a missing link between both worlds. Their derivation, however, is expensive. The semantic automatic interpretation of 3D point clouds of indoor environments is a methodically demanding task. The data acquisition is costly and difficult. The laser scanners and image-based methods require the access to every room. Based on an approach which does not require an additional geometry acquisition of building indoors, we propose an attempt for filling the gaps between 3D building models and building information models. Based on sparse observations such as the building footprint and room areas, 3D indoor models are generated using combinatorial and stochastic reasoning. The derived models are expanded by a-priori not observable structures such as electric installation. Gaussian mixtures, linear and bi-linear constraints are used to represent the background knowledge and structural regularities. The derivation of hypothesised models is performed by stochastic reasoning using graphical models, Gauss-Markov models and MAP-estimators.},
author = {Dehbi, Y. and Haunert, J.-H. and Pl\"umer, L.},
booktitle = {Proc. 12th 3D Geoinfo Conference},
doi = {10.5194/isprs-annals-IV-4-W5-33-2017},
pages = {33--39},
series = {ISPRS Annals of Photogrammetry, Remote Sensing and Spatial Information Sciences},
title = {Stochastic and geometric reasoning for indoor building models with electric installations -- bridging the gap between GIS and BIM},
url = {https://www.isprs-ann-photogramm-remote-sens-spatial-inf-sci.net/IV-4-W5/33/2017/},
volume = {IV-4/W5},
year = {2017}
}
|
|
J. Oehrlein, T. C. van Dijk, and J.-H. Haunert. Gleichwertige Ziele in dynamischen Navigationskarten. In volume 26. DGPF Tagungsband, pages 138-146. 2017.
abstract
bibtex
|
| Die Generierung übersichtlicher Karten erfordert Verfahren der automatischen Generalisierung. Zur Darstellung auf Navigationskarten werden beispielsweise Objekte eines Verkehrsnetzes anhand ihrer Bedeutung für das Netz ausgewählt. Beschränkt man sich auf die Navigation von einem festen Startpunkt aus, verlieren viele Objekte für die Karte an Bedeutung. Durch Verzicht auf deren Darstellung wird die Karte übersichtlicher. Diesen Umstand nutzen van Dijk et al. (2016) für einen Algorithmus zur standortbasierten Generalisierung von Straßennetzen. Dieser trifft - abhängig von einem fest gewählten Standort - durch die Zusammenfassung von als gleichwertig erkannten Zielen eine Auswahl. Hebt man die Fixierung des Standorts auf, ergeben sich neue Möglichkeiten für mobile Geräte. Dieser Beitrag beschäftigt sich mit den Problemen, die mit der Dynamik Einzug in diesen Algorithmus halten, und bietet erste Lösungsansätze. @inproceedings{OvDH17,
abstract = {Die Generierung übersichtlicher Karten erfordert Verfahren der automatischen Generalisierung. Zur Darstellung auf Navigationskarten werden beispielsweise Objekte eines Verkehrsnetzes anhand ihrer Bedeutung für das Netz ausgewählt. Beschränkt man sich auf die Navigation von einem festen Startpunkt aus, verlieren viele Objekte für die Karte an Bedeutung. Durch Verzicht auf deren Darstellung wird die Karte übersichtlicher. Diesen Umstand nutzen van Dijk et al. (2016) für einen Algorithmus zur standortbasierten Generalisierung von Straßennetzen. Dieser trifft - abhängig von einem fest gewählten Standort - durch die Zusammenfassung von als gleichwertig erkannten Zielen eine Auswahl. Hebt man die Fixierung des Standorts auf, ergeben sich neue Möglichkeiten für mobile Geräte. Dieser Beitrag beschäftigt sich mit den Problemen, die mit der Dynamik Einzug in diesen Algorithmus halten, und bietet erste Lösungsansätze.},
author = {Oehrlein, J. and van Dijk, T. C. and Haunert, J.-H.},
booktitle = {DGPF Tagungsband},
pages = {138--146},
title = {Gleichwertige {Z}iele in dynamischen {N}avigationskarten},
volume = {26},
year = {2017}
}
|
|
D. Peng, A. Wolff, and J.-H. Haunert. Using the A* algorithm to find optimal sequences for area aggregation. In Michael P. Peterson, editors. Advances in Cartography and GIScience - Selections from the International Cartographic Conference 2017, pages 389-404. Springer International Publishing, 2017.
abstract
doi
bibtex
|
| Given two land-cover maps of different scales, we wish to find a sequence of small incremental changes that gradually transforms one map into the other. We assume that the two input maps consist of polygons that constitute a planar subdivision and belong to different land-cover classes. Every polygon in the small-scale map is the union of a set of polygons in the large-scale map. In each step of the sequence that we compute, the smallest area in the current map is merged with one of its neighbors. We do not select that neighbor according to a prescribed rule but define the whole sequence of pairwise merges at once, based on global optimization. An important requirement for such a method is a formalization of the problem in terms of optimization objectives and constraints, which we present together with a solution that is based on the so-called A A* algorithm. This algorithm allows us to limit the exploration of the search space such that we can compute solutions of high quality in reasonable time. We tested the method with a dataset of the official German topographic database ATKIS and discuss our results. @inproceedings{10.1007/978-3-319-57336-6_27,
abstract = {Given two land-cover maps of different scales, we wish to find a sequence of small incremental changes that gradually transforms one map into the other. We assume that the two input maps consist of polygons that constitute a planar subdivision and belong to different land-cover classes. Every polygon in the small-scale map is the union of a set of polygons in the large-scale map. In each step of the sequence that we compute, the smallest area in the current map is merged with one of its neighbors. We do not select that neighbor according to a prescribed rule but define the whole sequence of pairwise merges at once, based on global optimization. An important requirement for such a method is a formalization of the problem in terms of optimization objectives and constraints, which we present together with a solution that is based on the so-called A {A*} algorithm. This algorithm allows us to limit the exploration of the search space such that we can compute solutions of high quality in reasonable time. We tested the method with a dataset of the official German topographic database ATKIS and discuss our results.},
address = {Cham},
author = {Peng, D. and Wolff, A. and Haunert, J.-H.},
booktitle = {Advances in Cartography and GIScience -- Selections from the International Cartographic Conference 2017},
doi = {10.1007/978-3-319-57336-6_27},
editor = {Peterson, Michael P.},
isbn = {978-3-319-57336-6},
pages = {389--404},
publisher = {Springer International Publishing},
title = {Using the {A*} Algorithm to Find Optimal Sequences for Area Aggregation},
year = {2017}
}
|
|
Y. Dehbi, S. Loch-Dehbi, and L. Plümer. Parameter estimation and model selection for indoor models based on sparse observations. In volume IV-2/W4 of ISPRS Annals of Photogrammetry, Remote Sensing and Spatial Information Sciences. Proc. ISPRS Geospatial Week 2017, pages 303-310. 2017.
abstract
doi
bibtex
|
| This paper presents a novel method for the parameter estimation and model selection for the reconstruction of indoor environments based on sparse observations. While most approaches for the reconstruction of indoor models rely on dense observations, we predict scenes of the interior with high accuracy in the absence of indoor measurements. We use a model-based top-down approach and incorporate strong but profound prior knowledge. The latter includes probability density functions for model parameters and sparse observations such as room areas and the building footprint. The floorplan model is characterized by linear and bi-linear relations with discrete and continuous parameters. We focus on the stochastic estimation of model parameters based on a topological model derived by combinatorial reasoning in a first step. A Gauss-Markov model is applied for estimation and simulation of the model parameters. Symmetries are represented and exploited during the estimation process. Background knowledge as well as observations are incorporated in a maximum likelihood estimation and model selection is performed with AIC/BIC. The likelihood is also used for the detection and correction of potential errors in the topological model. Estimation results are presented and discussed. @inproceedings{isprs-annals-IV-2-W4-303-2017,
abstract = {This paper presents a novel method for the parameter estimation and model selection for the reconstruction of indoor environments based on sparse observations. While most approaches for the reconstruction of indoor models rely on dense observations, we predict scenes of the interior with high accuracy in the absence of indoor measurements. We use a model-based top-down approach and incorporate strong but profound prior knowledge. The latter includes probability density functions for model parameters and sparse observations such as room areas and the building footprint. The floorplan model is characterized by linear and bi-linear relations with discrete and continuous parameters. We focus on the stochastic estimation of model parameters based on a topological model derived by combinatorial reasoning in a first step. A Gauss-Markov model is applied for estimation and simulation of the model parameters. Symmetries are represented and exploited during the estimation process. Background knowledge as well as observations are incorporated in a maximum likelihood estimation and model selection is performed with AIC/BIC. The likelihood is also used for the detection and correction of potential errors in the topological model. Estimation results are presented and discussed.},
author = {Dehbi, Y. and Loch-Dehbi, S. and Pl\"umer, L.},
booktitle = {Proc. ISPRS Geospatial Week 2017},
doi = {10.5194/isprs-annals-IV-2-W4-303-2017},
pages = {303--310},
series = {ISPRS Annals of Photogrammetry, Remote Sensing and Spatial Information Sciences},
title = {Parameter estimation and model selection for indoor models based on sparse observations},
url = {https://www.isprs-ann-photogramm-remote-sens-spatial-inf-sci.net/IV-2-W4/303/2017/},
volume = {IV-2/W4},
year = {2017}
}
|
|
Y. Dehbi, F. Hadiji, Gerhard Gröger, Kristian Kersting, and L. Plümer. Statistical relational learning of grammar rules for 3d building reconstruction. Transactions in GIS, 21(1):134-150, 2017.
abstract
doi
bibtex
|
| The automatic interpretation of 3D point clouds for building reconstruction is a challenging task. The interpretation process requires highly structured models representing semantics. Formal grammars can describe structures as well as the parameters of buildings and their parts. We propose a novel approach for the automatic learning of weighted attributed context-free grammar rules for 3D building reconstruction, supporting the laborious manual design of rules. We separate structure from parameter learning. Specific Support Vector Machines (SVMs) are used to generate a weighted context-free grammar and predict structured outputs such as parse trees. The grammar is extended by parameters and constraints, which are learned based on a statistical relational learning method using Markov Logic Networks (MLNs). MLNs enforce the topological and geometric constraints. MLNs address uncertainty explicitly and provide probabilistic inference. They are able to deal with partial observations caused by occlusions. Uncertain projective geometry is used to deal with the uncertainty of the observations. Learning is based on a large building database covering different building styles and façade structures. In particular, a treebank that has been derived from the database is employed for structure learning. @article{DehbiEtAl2017,
abstract = {The automatic interpretation of 3D point clouds for building reconstruction is a challenging task. The interpretation process requires highly structured models representing semantics. Formal grammars can describe structures as well as the parameters of buildings and their parts. We propose a novel approach for the automatic learning of weighted attributed context-free grammar rules for 3D building reconstruction, supporting the laborious manual design of rules. We separate structure from parameter learning. Specific Support Vector Machines (SVMs) are used to generate a weighted context-free grammar and predict structured outputs such as parse trees. The grammar is extended by parameters and constraints, which are learned based on a statistical relational learning method using Markov Logic Networks (MLNs). MLNs enforce the topological and geometric constraints. MLNs address uncertainty explicitly and provide probabilistic inference. They are able to deal with partial observations caused by occlusions. Uncertain projective geometry is used to deal with the uncertainty of the observations. Learning is based on a large building database covering different building styles and façade structures. In particular, a treebank that has been derived from the database is employed for structure learning.},
author = {Dehbi, Y. and Hadiji, F. and Gr{\"{o}}ger, Gerhard and Kersting, Kristian and Pl{\"{u}}mer, L.},
doi = {10.1111/tgis.12200},
journal = {Transactions in GIS},
number = {1},
pages = {134--150},
title = {Statistical Relational Learning of Grammar Rules for 3D Building Reconstruction},
url = {http://dx.doi.org/10.1111/tgis.12200},
volume = {21},
year = {2017}
}
|
|
S. Loch-Dehbi, Y. Dehbi, and L. Plümer. Estimation of 3d indoor models with constraint propagation and stochastic reasoning in the absence of indoor measurements. ISPRS International Journal of Geo-Information, 6(3), 2017.
abstract
doi
bibtex
|
| This paper presents a novel method for the prediction of building floor plans based on sparse observations in the absence of measurements. We derive the most likely hypothesis using a maximum a posteriori probability approach. Background knowledge consisting of probability density functions of room shape and location parameters is learned from training data. Relations between rooms and room substructures are represented by linear and bilinear constraints. We perform reasoning on different levels providing a problem solution that is optimal with regard to the given information. In a first step, the problem is modeled as a constraint satisfaction problem. Constraint Logic Programming derives a solution which is topologically correct but suboptimal with regard to the geometric parameters. The search space is reduced using architectural constraints and browsed by intelligent search strategies which use domain knowledge. In a second step, graphical models are used for updating the initial hypothesis and refining its continuous parameters. We make use of Gaussian mixtures for model parameters in order to represent background knowledge and to get access to established methods for efficient and exact stochastic reasoning. We demonstrate our approach on different illustrative examples. Initially, we assume that floor plans are rectangular and that rooms are rectangles and discuss more general shapes afterwards. In a similar spirit, we predict door locations providing further important components of 3D indoor models. @article{Loch-DehbiEtAl2017,
abstract = {This paper presents a novel method for the prediction of building floor plans based on sparse observations in the absence of measurements. We derive the most likely hypothesis using a maximum a posteriori probability approach. Background knowledge consisting of probability density functions of room shape and location parameters is learned from training data. Relations between rooms and room substructures are represented by linear and bilinear constraints. We perform reasoning on different levels providing a problem solution that is optimal with regard to the given information. In a first step, the problem is modeled as a constraint satisfaction problem. Constraint Logic Programming derives a solution which is topologically correct but suboptimal with regard to the geometric parameters. The search space is reduced using architectural constraints and browsed by intelligent search strategies which use domain knowledge. In a second step, graphical models are used for updating the initial hypothesis and refining its continuous parameters. We make use of Gaussian mixtures for model parameters in order to represent background knowledge and to get access to established methods for efficient and exact stochastic reasoning. We demonstrate our approach on different illustrative examples. Initially, we assume that floor plans are rectangular and that rooms are rectangles and discuss more general shapes afterwards. In a similar spirit, we predict door locations providing further important components of 3D indoor models.},
article-number = {90},
author = {Loch-Dehbi, S. and Dehbi, Y. and Pl{\"{u}}mer, L.},
doi = {10.3390/ijgi6030090},
issn = {2220-9964},
journal = {ISPRS International Journal of Geo-Information},
number = {3},
title = {Estimation of 3D Indoor Models with Constraint Propagation and Stochastic Reasoning in the Absence of Indoor Measurements},
url = {http://www.mdpi.com/2220-9964/6/3/90},
volume = {6},
year = {2017}
}
|
|
B. Niedermann, M. Nöllenburg, and I. Rutter. Radial contour labeling with straight leaders. In Proceedings of IEEE Pacific Visualization Symposium (PacificVis'17), pages 295-304. IEEE Computer Society, 2017.
bibtex
|
| @inproceedings{nnr-rclsl-17,
author = {B. Niedermann and M. N{\"o}llenburg and I. Rutter},
booktitle = {Proceedings of IEEE Pacific Visualization Symposium (PacificVis'17)},
pages = {295--304},
publisher = {IEEE Computer Society},
title = {{Radial Contour Labeling with Straight Leaders}},
year = {2017}
}
|
|
B. Niedermann, M. Nöllenburg, and I. Rutter. Radial contour labeling with straight leaders. In Proceedings of the 33rd European Workshop on Computational Geometry (EuroCG'17). 2017. Preprint
bibtex
|
| @inproceedings{nnr-rclsl-eurocg-17,
author = {B. Niedermann and M. N{\"o}llenburg and I. Rutter},
booktitle = {Proceedings of the 33rd European Workshop on Computational Geometry (EuroCG'17)},
note = {Preprint},
title = {{Radial Contour Labeling with Straight Leaders}},
url = {http://csconferences.mah.se/eurocg2017/proceedings.pdf},
year = {2017}
}
|
|
L. Barth, B. Niedermann, I. Rutter, and M. Wolf. Towards a topology-shape-metrics framework for ortho-radial drawings. In Leibniz International Proceedings in Informatics. Proc. 33rd Annual ACM Symposium on Computational Geometry (SoCG '17), pages 14:1-14:16. 2017.
bibtex
|
| @inproceedings{bnrw-ttsmf-17,
author = {L. Barth and B. Niedermann and I. Rutter and M. Wolf},
booktitle = {Proc. 33rd Annual ACM Symposium on Computational Geometry (SoCG '17)},
pages = {14:1--14:16},
series = {Leibniz International Proceedings in Informatics},
title = {{Towards a Topology-Shape-Metrics Framework for Ortho-Radial Drawings}},
url = {http://dx.doi.org/10.4230/LIPIcs.SoCG.2017.14},
year = {2017}
}
|
|
L. Barth, B. Niedermann, I. Rutter, and M. Wolf. Towards a topology-shape-metrics framework for ortho-radial drawings. In Proceedings of the 33rd European Workshop on Computational Geometry (EuroCG'17). 2017. Preprint.
bibtex
|
| @inproceedings{bnrw-ttsmf-preprint-17,
author = {L. Barth and B. Niedermann and I. Rutter and M. Wolf},
booktitle = {Proceedings of the 33rd European Workshop on Computational Geometry (EuroCG'17)},
file = {bnrw-ttsmf-preprint-17.pdf:http\://i11www.ira.uka.de/extra/publications/bnrw-ttsmf-preprint-17.pdf:PDF},
note = {Preprint.},
title = {{Towards a Topology-Shape-Metrics Framework for Ortho-Radial Drawings}},
url = {http://csconferences.mah.se/eurocg2017/proceedings.pdf},
year = {2017}
}
|
|
B. Niedermann. Automatic label placement in maps and figures: models, algorithms and experiments. KIT-Bibiliothek. Karlsruher Institut für Technologie (KIT), 2017. Dissertation.
doi
bibtex
|
| @phdthesis{Niedermann2017_1000068424,
author = {B. Niedermann},
doi = {10.5445/IR/1000068424},
file = {1000068424:https\://publikationen.bibliothek.kit.edu/1000068424:PDF},
keywords = {Label Placement, Labeling, Maps, Figures},
language = {english},
pagetotal = {285},
publisher = {{Karlsruhe}},
school = {Karlsruher Institut für Technologie (KIT)},
series = {KIT-Bibiliothek},
title = {Automatic Label Placement in Maps and Figures: Models, Algorithms and Experiments},
type = {Dissertation},
year = {2017}
}
|