@conference {DBLP:conf/isvc/GussefeldHK16, title = {Creating Feasible Reflectance Data for Synthetic Optical Flow Datasets}, booktitle = {Advances in Visual Computing - 12th International Symposium, {ISVC} 2016, Las Vegas, NV, USA, December 12-14, 2016, Proceedings, Part {I}}, year = {2016}, abstract = {Optical flow ground truth generated by computer graphics has many advantages. For example, we can systematically vary scene parameters to understand algorithm sensitivities. But is synthetic ground truth realistic enough? Appropriate material models have been established as one of the major challenges for the creation of synthetic datasets: previous research has shown that highly sophisticated reflectance field acquisition methods yield results, which various optical flow methods cannot distinguish from real scenes. However, such methods are costly both in acquisition and rendering time and thus infeasible for large datasets. In this paper we find the simplest reflectance models (RM) for different groups of materials which still provide sufficient accuracy for optical flow performance analysis. It turns out that a spatially varying Phong RM is sufficient for simple materials. Normal estimation combined with Anisotropic RM can handle even very complex materials.}, doi = {10.1007/978-3-319-50835-1_8}, author = {G{\"u}ssefeld, Burkhard and Katrin Honauer and Daniel Kondermann} } @conference {Honauer-etal-2017-ICCV, title = {A Dataset and Evaluation Methodology for Depth Estimation on 4D Light Fields}, booktitle = {Computer Vision - ACCV 2016 : 13th Asian Conference on Computer Vision, Taipei, Taiwan, November 20-24, 2016, Revised Selected Papers, Part III}, year = {2016}, publisher = {Springer}, organization = {Springer}, address = {Cham}, abstract = {In computer vision communities such as stereo, optical flow, or visual tracking, commonly accepted and widely used benchmarks have enabled objective comparison and boosted scientific progress. In the emergent light field community, a comparable benchmark and evaluation methodology is still missing. The performance of newly proposed methods is often demonstrated qualitatively on a handful of images, making quantitative comparison and targeted progress very difficult. To overcome these difficulties, we propose a novel light field benchmark. We provide 24 carefully designed synthetic, densely sampled 4D light fields with highly accurate disparity ground truth. We thoroughly evaluate four state-of-the-art light field algorithms and one multi-view stereo algorithm using existing and novel error measures. This consolidated state-of-the art may serve as a baseline to stimulate and guide further scientific progress. We publish the benchmark website http://www.lightfield-analysis.net , an evaluation toolkit, and our rendering setup to encourage submissions of both algorithms and further datasets.}, isbn = {978-3-319-54186-0}, doi = {10.1007/978-3-319-54187-7_2}, author = {Katrin Honauer and Ole Johannsen and Daniel Kondermann and Bastian Goldl{\"u}cke}, editor = {Lai, Shang-Hong} } @conference {Kondermann_2016_CVPR_Workshops, title = {The HCI Benchmark Suite: Stereo and Flow Ground Truth With Uncertainties for Urban Autonomous Driving}, booktitle = {The IEEE Conference on Computer Vision and Pattern Recognition (CVPR) Workshops}, year = {2016}, month = {June}, abstract = { Recent advances in autonomous driving require more and more highly realistic reference data, even for difficult situations such as low light and bad weather. We present a new stereo and optical flow dataset to complement existing benchmarks. It was specifically designed to be representative for urban autonomous driving, including realistic, systematically varied radiometric and geometric challenges which were previously unavailable. The accuracy of the ground truth is evaluated based on Monte Carlo simulations yielding full, per-pixel distributions. Interquartile ranges are used as uncertainty measure to create binary masks for arbitrary accuracy thresholds and show that we achieved uncertainties better than those reported for comparable outdoor benchmarks. Binary masks for all dynamically moving regions are supplied with estimated stereo and flow values. An initial public benchmark dataset of 55 manually selected sequences between 19 and 100 frames long are made available in a dedicated website featuring interactive tools for database search, visualization, comparison and benchmarking. }, author = {Daniel Kondermann and Nair, Rahul and Katrin Honauer and Karsten Krispin and Jonas Andrulis and Alexander Brock and G{\"u}ssefeld, Burkhard and Mohsen Rahimimoghaddam and Sabine Hofmann and Brenner, Claus and Bernd J{\"a}hne} } @conference {Honauer-etal-2015-ICCV, title = {The HCI Stereo Metrics: Geometry-Aware Performance Analysis of Stereo Algorithms}, booktitle = {The IEEE International Conference on Computer Vision (ICCV)}, year = {2015}, month = {December}, abstract = {@InProceedings{Honauer_2015_ICCV, author = {Honauer, Katrin and Maier-Hein, Lena and Kondermann, Daniel}, title = {The HCI Stereo Metrics: Geometry-Aware Performance Analysis of Stereo Algorithms}, booktitle = {The IEEE International Conference on Computer Vision (ICCV)}, month = {December}, year = {2015} } }, author = {Katrin Honauer and Lena Maier-Hein and Daniel Kondermann} } @inbook {Kondermann-etal-2015-ACCV, title = {Stereo Ground Truth with Error Bars}, booktitle = {Computer Vision {\textendash} ACCV 2014: 12th Asian Conference on Computer Vision, Singapore, Singapore, November 1-5, 2014, Revised Selected Papers, Part V}, year = {2015}, pages = {595{\textendash}610}, publisher = {Springer International Publishing}, organization = {Springer International Publishing}, address = {Cham}, abstract = {Creating stereo ground truth based on real images is a measurement task. Measurements are never perfectly accurate: the depth at each pixel follows an error distribution. A common way to estimate the quality of measurements are error bars. In this paper we describe a methodology to add error bars to images of previously scanned static scenes. The main challenge for stereo ground truth error estimates based on such data is the nonlinear matching of 2D images to 3D points. Our method uses 2D feature quality, 3D point and calibration accuracy as well as covariance matrices of bundle adjustments. We sample the reference data error which is the 3D depth distribution of each point projected into 3D image space. The disparity distribution at each pixel location is then estimated by projecting samples of the reference data error on the 2D image plane. An analytical Gaussian error propagation is used to validate the results. As proof of concept, we created ground truth of an image sequence with 100 frames. Results show that disparity accuracies well below one pixel can be achieved, albeit with much large errors at depth discontinuities mainly caused by uncertain estimates of the camera location.}, isbn = {978-3-319-16814-2}, doi = {10.1007/978-3-319-16814-2_39}, url = {http://dx.doi.org/10.1007/978-3-319-16814-2_39}, author = {Daniel Kondermann and Nair, Rahul and Stephan Meister and Wolfgang Mischler and G{\"u}ssefeld, Burkhard and Katrin Honauer and Sabine Hofmann and Brenner, Claus and Bernd J{\"a}hne} } @conference {guessefeld2014, title = {Are reflectance field renderings appropriate for optical flow evaluation?}, booktitle = {International Conference on Image Processing, ICIP 2014}, year = {2014}, author = {G{\"u}ssefeld, Burkhard and Daniel Kondermann and Schwartz, Christopher and Klein, Reinhard} } @conference {maierhein2014, title = {Can masses of non-experts train highly accurate image classifiers? A crowdsourcing approach to instrument segmentation in laparoscopic images}, booktitle = {MICCAI}, year = {2014}, author = {Lena Maier-Hein and Sven Mersmann and Daniel Kondermann and Bodenstedt, S. and Sanchez, A. and C. Stock and Kenngott, H. and Eisenmann, M. and Speidel, S.} } @conference {maierhein2014a, title = {Crowdsourcing for reference correspondence generation in endoscopic images}, booktitle = {MICCAI}, year = {2014}, author = {Lena Maier-Hein and Sven Mersmann and Daniel Kondermann and C. Stock and Kenngott, H. and Sanchez, A. and Wagner, M. and Preukschas, A. and Wekerle, A. -L. and Helfert, S. and Bodenstedt, S. and Speidel, S.} } @conference {kondermann2014, title = {Stereo ground truth with error bars}, booktitle = {Asian Conference on Computer Vision, ACCV 2014}, year = {2014}, author = {Daniel Kondermann and Nair, Rahul and Stephan Meister and Wolfgang Mischler and G{\"u}ssefeld, Burkhard and Sabine Hofmann and Brenner, Claus and Bernd J{\"a}hne} } @conference {gottfried2014, title = {Time of flight motion compensation revisited}, booktitle = {International Conference on Image Processing, ICIP 2014}, year = {2014}, author = {Gottfried, Jens-Malte and Nair, Rahul and Stephan Meister and Christoph S. Garbe and Daniel Kondermann} } @conference {haeusler_13_ensemble, title = {Ensemble Learning for Confidence Measures in Stereo Vision}, booktitle = {CVPR 2013, in press}, year = {2013}, note = {1}, pages = {305-312}, doi = {10.1109/CVPR.2013.46}, author = {Haeusler, R. and Nair, R. and Daniel Kondermann} } @article {donath_13_how, title = {How Good is Crowdsourcing for Optical Flow Ground Truth Generation?}, journal = {submitted to CVPR}, year = {2013}, note = {1}, author = {Donath, A. and Daniel Kondermann} } @conference {becker_13_movie, title = {Movie Dimensionalization Via Sparse User Annotations}, booktitle = {submitted to 3DTV-Con}, year = {2013}, note = {1}, author = {Becker, M. and Baron, M. and Daniel Kondermann and Bussler, M. and Helzle, V.} } @conference {kondermann_13_movie, title = {Movie Dimensionalization Via Sparse User Annotations}, booktitle = {submitted to ICCV}, year = {2013}, note = {1}, author = {Daniel Kondermann and Becker, M.} } @conference {meister_13_simulation, title = {Simulation of Time-of-Flight Sensors using Global Illumination}, booktitle = {Vision, Modeling and Visualization (VMV), 2013 International Workshop on. Proceedings}, year = {2013}, pages = {33-40}, doi = {10.2312/PE.VMV.VMV13.033-040}, author = {Stephan Meister and Nair, R. and Daniel Kondermann} } @conference {berger_13_state, title = {A State of the Art Report on Kinect Sensor Setups in Computer Vision}, booktitle = {Time-of-Flight and Depth Imaging. Sensors, Algorithms, and Applications}, volume = {8200}, year = {2013}, pages = {257-272}, publisher = {Springer}, organization = {Springer}, doi = {10.1007/978-3-642-44964-2_12}, author = {Berger, K. and Stephan Meister and Nair, R. and Daniel Kondermann}, editor = {Grzegorzek, M. and Theobalt, C. and Andreas Kolb and Reinhard Koch} } @conference {nair_13_survey, title = {A Survey on Time-of-Flight Stereo Fusion}, booktitle = {Time-of-Flight Imaging: Algorithms, Sensors and Applications}, volume = {8022}, year = {2013}, pages = {105-127}, publisher = {Springer}, organization = {Springer}, doi = {10.1007/978-3-642-44964-2_6}, author = {Nair, R. and Ruhl, K. and Frank Lenzen and Stephan Meister and Sch{\"a}fer, H. and Christoph S. Garbe and Eisemann, M. and Daniel Kondermann}, editor = {Grzegorzek, M. and Theobalt, C. and Andreas Kolb and Reinhard Koch} } @incollection {Nair2013survey, title = {A Survey on Time-of-Flight Stereo Fusion}, volume = {8200}, year = {2013}, pages = {105-127}, publisher = {Springer}, author = {Nair, Rahul and Ruhl, Kai and Frank Lenzen and Stephan Meister and Sch{\"a}fer, Henrik and Christoph S. Garbe and Eisemann, Martin and Magnor, Marcus and Daniel Kondermann}, editor = {Grzegorzek, Marcin and Theobalt, Christian and Andreas Kolb and Reinhard Koch} } @article {marquez_13_when, title = {When is a confidence measure good enough?}, journal = {submitted to CVPR 2013}, year = {2013}, note = {1}, author = {Márquez-Valle, P. and Gil, D. and Hernàndez-Sabaté, A. and Daniel Kondermann} } @incollection {nair2012, title = {Ground truth for evaluating time of flight imaging}, volume = {8200}, year = {2012}, pages = {52--74}, publisher = {Springer}, doi = {10.1007/978-3-642-44964-2_4}, author = {Nair, Rahul and Stephan Meister and Lambers, Martin and Balda, Michael and Hofmann, Hannes and Andreas Kolb and Daniel Kondermann and Bernd J{\"a}hne} } @conference {nair_12_high, title = {High Accuracy TOF and Stereo Sensor Fusion at Interactive Rates}, booktitle = {submitted to ECCV 2012}, volume = {7584}, year = {2012}, note = {1}, pages = {1-11}, publisher = {Springer}, organization = {Springer}, doi = {10.1007/978-3-642-33868-7_1}, author = {Nair, R. and Frank Lenzen and Sch{\"a}fer, H. and Stephan Meister and Christoph S. Garbe and Daniel Kondermann} } @conference {Nair2012highaccuracy, title = {High accuracy TOF and stereo sensor fusion at interactive rates}, booktitle = {Computer Vision--ECCV 2012. Workshops and Demonstrations}, year = {2012}, pages = {1--11}, publisher = {Springer Berlin Heidelberg}, organization = {Springer Berlin Heidelberg}, author = {Nair, Rahul and Frank Lenzen and Stephan Meister and Sch{\"a}fer, Henrik and Christoph S. Garbe and Daniel Kondermann} } @article {meister_12_outdoor, title = {Outdoor Stereo Camera System for the Generation of Real-World Benchmark Data Sets}, journal = {Optical Engineering}, volume = {51}, year = {2012}, pages = {021107-1}, doi = {10.1117/1.OE.51.2.021107}, author = {Stephan Meister and Bernd J{\"a}hne and Daniel Kondermann} } @article {meister2012, title = {Outdoor stereo camera system for the generation of real-world benchmark data sets}, journal = {Opt. Eng.}, volume = {51}, year = {2012}, pages = {021107}, doi = {10.1117/1.OE.51.2.021107}, author = {Stephan Meister and Bernd J{\"a}hne and Daniel Kondermann} } @conference {kondermann_12_on, title = {On Performance Analysis of Optical Flow Algorithms}, booktitle = {Outdoor and Large-Scale Real-Worls Scene Analysis, Dagstuhl-Workshop 2011}, volume = {LNCS}, number = {7474}, year = {2012}, pages = {329-355}, publisher = {Springer}, organization = {Springer}, doi = {10.1007/978-3-642-34091-8_15}, author = {Daniel Kondermann and Steffen Abraham and W. F{\"o}rstner and Gehrig, S. and Imiya, A. and Bernd J{\"a}hne and Klose, F. and Magor, M. and Helmut Mayer and Mester, R. and Pajdla, T. and Reulke, R. and Zimmer, H.} } @conference {meister_12_when, title = {When Can We Use KinectFusion for Ground Truth Acquisition?}, booktitle = {Workshop on Color-Depth Camera Fusion in Robotics, IEEE International Conference on Intelligent Robots and Systems}, year = {2012}, note = {1}, author = {Stephan Meister and Izadi, S. and Kohli, P. and H{\"a}mmerle, M. and Carsten Rother and Daniel Kondermann} } @booklet {kondermann2011, title = {Alles im Fluss --- Optischer Fluss f{\"u}r industrielle Anwendungen}, year = {2011}, url = {http://www.gitverlag.com/de/print/4/18/issues/2009/3381.html}, author = {Daniel Kondermann} } @article {nair_11_high, title = {High Precision TOF-guided Depth from Stereo for Room Scanning}, journal = {CVMP, Proceedings.}, year = {2011}, note = {1}, author = {Nair, R. and Daniel Kondermann} } @article {berthe2010, title = {Three-dimensional, three-component wall-PIV}, journal = {Exp. Fluids}, volume = {48}, year = {2010}, pages = {online}, doi = {10.1007/s00348-009-0777-4}, author = {A. Berthe and Daniel Kondermann and Christensen, C. and Goubergrits, L. and Christoph S. Garbe and Affeld, K. and U. Kertzscher} } @phdthesis {kondermann2009a, title = {Modular Optical Flow Estimation with Applications to Fluid Dynamics}, year = {2009}, publisher = {IWR, Fakult{\"a}t f{\"u}r Mathematik und Informatik, Univ.\ Heidelberg}, url = {http://www.ub.uni-heidelberg.de/archiv/10184}, author = {Daniel Kondermann} } @incollection {garbe2009, title = {Spatiotemporal image analysis for flow measurements}, volume = {106}, year = {2009}, pages = {289--305}, publisher = {Springer}, abstract = {In this chapter, a framework will be presented for measuring and modeling transport processes using novel visualization techniques and extended optical flow techniques for digital image sequence analysis. In this way, parameters besides the 2-D xy velocity components can be extracted concurrently from the acquired 2-D image sequences, such as wall shear rates and momentum transport close to boundaries, diffusion coefficients, and depth z in addition to the z velocity components. Depending on the application, particularly the temporal regularization can be enhanced, leading to stabilization of results and reduction of spatial regularization. This is frequently of high importance for flows close to boundaries. Results from applications will be presented from the fields of environmental and life sciences as well as from engineering.}, doi = {10.1007/978-3-642-01106-1_29}, author = {Christoph S. Garbe and Daniel Kondermann and Markus Jehle and Bernd J{\"a}hne}, editor = {W. Nitsche and C. Dobriloff} } @incollection {berthe2009, title = {The wall PIV measurement technique for near wall flow fields in biofliud mechanics}, volume = {106}, year = {2009}, pages = {11--20}, publisher = {Springer}, abstract = {This chapter describes the development of a new time resolved 3D PIV technique for near wall flow field measurements. This measurement technique, called wall-PIV, is based on Beer-Lambert{\textquoteright}s law. It substitutes the classical PIV laser sheet by a diffuse, monochromatic full-field illumination that is limited to the near wall region by an absorbing molecular dye in the fluid. Aimed range of applications is the investigation of flow fields next to one- or two dimensionally curved, possibly flexing surfaces. The three dimensional three component flow estimation uses a new optical flow algorithm, based on particle trajectories. Results of the measurement technique{\textquoteright}s application on a displacement pediatric blood pump are presented.}, doi = {10.1007/978-3-642-01106-1_2}, author = {A. Berthe and Daniel Kondermann and Bernd J{\"a}hne and U. Ketzscher}, editor = {W. Nitsche and C. Dobriloff} } @article {scholz2008, title = {Double-pulse planar-LIF investigations using fluorescence motion analysis for mixture formation investigation}, journal = {Exp. Fluids}, volume = {45}, number = {4}, year = {2008}, pages = {583--593}, abstract = {A concept for dynamic mixture formation investigations of fuel/air mixtures is presented which can equally be applied to several other laser induced fluorescence (LIF) applications. Double-pulse LIF imaging was used to gain insight into dynamic mixture formation processes. The setup consists of a modified standard PIV setup. The "fuel/air ratio measurement by laser induced fluorescence (FARLIF)" approach is used for a quantification of the LIF images in order to obtain pairs of 2D fuel/air ratio maps. Two different evaluation concepts for LIF double pulse images are discussed. The first is based on the calculation of the temporal derivative field of the fuel/air ratio distribution. The result gives insight into the dynamic mixing process, showing where and how the mixture is changing locally. The second concept uses optical flow methods in order to estimate the motion of fluorescence (i.e., mixture) structures to gain insight into the dynamics, showing the distortion and the motion of the inhomogeneous mixture field. For this "fluorescence motion analysis" (FMA) two different evaluation approaches the "variational gradient based approach" and the "variational cross correlation based approach" are presented. For the validation of both, synthetic LIF image pairs with predefined motion fields were generated. Both methods were applied and the results compared with the known original motion field. This validation shows that FMA yields reliable results even for image pairs with low signal/noise ratio. Here, the "variational gradient based approach" turned out to be the better choice so far. Finally, the experimental combination of double-pulse FARLIF imaging with FMA and simultaneous PIV measurement is demonstrated. The comparison of the FMA motion field and the flow velocity field captured by PIV shows that both results basically reflect complementary information of the flow field. It is shown that the motion field of the fluorescence structures does not (necessarily) need to represent the actual flow velocity and that the flow velocity field alone can not illustrate the structure motion in any case. Therefore, the simultaneous measurement of both gives the deepest insight into the dynamic mixture formation process. The examined concepts and evaluation approaches of this paper can easily be adapted to various other planar LIF methods (with the LIF signal representing, e.g., species concentration, temperature, density etc.) broadening the insight for a wide range of different dynamic processes.}, doi = {10.1007/s00348-008-0537-x}, author = {J. Scholz and T. Wiersbinski and Paul Ruhnau and Daniel Kondermann and Christoph S. Garbe and R. Hain and Volker Beushausen} } @conference {andres2008, title = {On errors-in-variables regression with arbitrary covariance and its application to optical flow estimation}, booktitle = {IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2008}, year = {2008}, pages = {1--6}, publisher = {IEEE}, organization = {IEEE}, abstract = {Linear inverse problems in computer vision, including motion estimation, shape fitting and image reconstruction, give rise to parameter estimation problems with highly correlated errors in variables. Established total least squares methods estimate the most likely corrections Acirc and bcirc to a given data matrix [A, b] perturbed by additive Gaussian noise, such that there exists a solution y with [A + Acirc, b +bcirc]y = 0. In practice, regression imposes a more restrictive constraint namely the existence of a solution x with [A + Acirc]x = [b + bcirc]. In addition, more complicated correlations arise canonically from the use of linear filters. We, therefore, propose a maximum likelihood estimator for regression in the general case of arbitrary positive definite covariance matrices. We show that Acirc, bcirc and x can be found simultaneously by the unconstrained minimization of a multivariate polynomial which can, in principle, be carried out by means of a Grobner basis. Results for plane fitting and optical flow computation indicate the superiority of the proposed method.}, doi = {10.1109/CVPR.2008.4587571}, author = {Bj{\"o}rn Andres and Claudia Kondermann and Daniel Kondermann and Fred A. Hamprecht and Christoph S. Garbe} } @article {andres_08_errors-in-variables, title = {On errors-in-variables regression with arbitrary covariance and its application to optical flow estimation}, journal = {Computer Vision and Pattern Recognition, 2008. CVPR 2008. IEEE Conference on}, year = {2008}, note = {1}, pages = {1-6}, doi = {10.1109/CVPR.2008.4587571}, author = {Bj{\"o}rn Andres and Claudia Kondermann and Daniel Kondermann and Ullrich K{\"o}the and Fred A. Hamprecht and Christoph S. Garbe} } @conference {kondermann2008b, title = {Motion Estimation Based on a Temporal Model of Fluid Flows}, booktitle = {13th International Symposium on Flow Visualization}, year = {2008}, pages = {1-10}, author = {Daniel Kondermann and Claudia Kondermann and A. Berthe and U. Kertzscher and Christoph S. Garbe} } @conference {kondermann2008, title = {Postprocessing of optical flows via surface measures and motion inpainting}, booktitle = {Pattern Recognition}, volume = {5096}, year = {2008}, pages = {355--364}, abstract = {Dense optical flow fields are required for many applications. They can be obtained by means of various global methods which employ regularization techniques for propagating estimates to regions with insufficient information. However, incorrect flow estimates are propagated as well. We, therefore, propose surface measures for the detection of locations where the full flow can be estimated reliably, that is in the absence of occlusions, intensity changes, severe noise, transparent structures, aperture problems and homogeneous regions. In this way we obtain sparse, but reliable motion fields with lower angular errors. By subsequent application of a basic motion inpainting technique to such sparsified flow fields we obtain dense fields with smaller angular errors than obtained by the original combined local global (CLG) method and the structure tensor method in all test sequences. Experiments show that this postprocessing method makes error improvements of up to 38\% feasible.}, doi = {10.1007/978-3-540-69321-5_36}, author = {Claudia Kondermann and Daniel Kondermann and Christoph S. Garbe} } @conference {kondermann2007, title = {An adaptive confidence measure for optical flows based on linear subspace projections}, booktitle = {Proceedings of the 29th DAGM Symposium on Pattern Recognition}, volume = {4713}, year = {2007}, pages = {132--141}, publisher = {Springer}, organization = {Springer}, abstract = {Confidence measures are important for the validation of optical flow fields by estimating the correctness of each displacement vector. There are several frequently used confidence measures, which have been found of at best intermediate quality. Hence, we propose a new confidence measure based on linear subspace projections. The results are compared to the best previously proposed confidence measures with respect to an optimal confidence. Using the proposed measure we are able to improve previous results by up to 31\%.}, doi = {10.1007/978-3-540-74936-3_14}, author = {Claudia Kondermann and Daniel Kondermann and Bernd J{\"a}hne and Christoph S. Garbe and Christoph Schn{\"o}rr and Bernd J{\"a}hne}, editor = {Fred A. Hamprecht} } @conference {scholz2007, title = {Double-pulse planar-LIF investigations using fluorescence motion analysis for mixture formation investigation}, booktitle = {7th International Symposium on Particle Image Velocimetry Rome, 11. - 14.Sept.}, year = {2007}, author = {J. Scholz and T. Wiersbinski and Paul Ruhnau and Daniel Kondermann and Christoph S. Garbe and R. Hain and Volker Beushausen} }