Biography
Michael Tanner joined the Oxford Robotics Institute in 2014 in pursuit of his DPhil. He researched methods to fuse data from a variety of sensors (LIDAR, stereo or monocular cameras, etc.) to create large-scale, 3D dense reconstructions for autonomous vehicle applications.
In 2009, he completed his M.S. in Computer Engineering at the Air Force Institute of Technology in Dayton, Ohio in the United States where he leveraged the emerging general-purpose computing capabilities of graphics processors to accelerate multiple-target tracking systems. Following his M.S., he developed next-generation, low-power/high-performance computer clusters for airborne applications.
In his free time, Michael explores the English countryside with his wife and three children.
Publications
2018
- M. Tanner, S. Saftescu, A. Bewley, and P. Newman, “Meshed Up: Learnt Error Correction in 3D Reconstructions,” in 2018 IEEE International Conference on Robotics and Automation (ICRA), 2018, p. 3201–3206.
[Bibtex]@inproceedings{tanner2018meshed, title={Meshed Up: Learnt Error Correction in 3D Reconstructions}, author={Tanner, Michael and Saftescu, Stefan and Bewley, Alex and Newman, Paul}, booktitle={2018 IEEE International Conference on Robotics and Automation (ICRA)}, pages={3201--3206}, year={2018}, organization={IEEE}, url = {https://arxiv.org/abs/1801.09128}, pdf = {https://arxiv.org/pdf/1801.09128.pdf} }
2017
- G. Pascoe, W. Maddern, M. Tanner, P. Pinies, and P. Newman, “NID-SLAM: Robust Monocular SLAM using Normalised Information Distance,” in Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), Honolulu, HI, 2017.
[Bibtex]@InProceedings{PascoeCVPR2017, author = {Pascoe, Geoffrey and Maddern, Will and Tanner, Michael and Pinies, Pedro and Newman, Paul}, title = {{NID-SLAM}: Robust Monocular {SLAM} using Normalised Information Distance}, booktitle = {Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, year = {2017}, address = {Honolulu, HI}, month = {July}, pdf = {http://www.robots.ox.ac.uk/~mobile/Papers/2017CVPR_pascoe.pdf}, }
2016
- M. Tanner, P. Piniés, L. M. Paz, and P. Newman, “Keep Geometry in Context: Using Contextual Priors for Very-Large-Scale 3D Dense Reconstructions,” in Robotics: Science and Systems, Workshop on Geometry and Beyond: Representations, Physics, and Scene Understanding for Robotics, 2016.
[Bibtex]@InProceedings{TannerRSS2016, author = {Tanner, Michael and Pini{\'e}s, Pedro and Paz, Lina Maria and Newman, Paul}, title = {{Keep Geometry in Context: Using Contextual Priors for Very-Large-Scale 3D Dense Reconstructions}}, booktitle = {Robotics: Science and Systems, Workshop on Geometry and Beyond: Representations, Physics, and Scene Understanding for Robotics}, year = {2016}, month = June, pdf = {http://www.robots.ox.ac.uk/~mobile/Papers/RSS2016_Tanner.v3.camera_ready.pdf}, }
- M. Tanner, P. Piniés, L. M. Paz, and P. Newman, “DENSER Cities: A System for Dense Efficient Reconstructions of Cities,” ArXiv e-prints, 2016.
[Bibtex]@Article{TannerArXiv2016, author = {Tanner, Michael and Pini{\'e}s, Pedro and Paz, Lina Maria and Newman, Paul}, title = {{DENSER Cities: A System for Dense Efficient Reconstructions of Cities}}, journal = {ArXiv e-prints}, year = {2016}, month = apr, adsnote = {Provided by the SAO/NASA Astrophysics Data System}, adsurl = {http://adsabs.harvard.edu/abs/2016arXiv160403734T}, archiveprefix = {arXiv}, eprint = {1604.03734}, pdf = {http://arxiv.org/pdf/1604.03734v1.pdf}, keywords = {Computer Science - Computer Vision and Pattern Recognition, Computer Science - Robotics}, primaryclass = {cs.CV}, }
- M. Tanner, P. Piniés, L. M. Paz, and P. Newman, “What Lies Behind: Recovering Hidden Shape in Dense Mapping,” in Proceedings of the IEEE International Conference on Robotics and Automation (ICRA), Stockholm, Sweden, 2016.
[Bibtex]@InProceedings{TannerICRA2016, author = {Tanner, Michael and Pini{\'e}s, Pedro and Paz, Lina Maria and Newman, Paul}, title = {{W}hat {L}ies {B}ehind: {R}ecovering {H}idden {S}hape in {D}ense {M}apping}, booktitle = {{P}roceedings of the {IEEE} {I}nternational {C}onference on {R}obotics and {A}utomation ({ICRA})}, year = {2016}, address = {Stockholm, Sweden}, month = {May}, pdf = {http://www.robots.ox.ac.uk/~mobile/Papers/ICRA2016BORG2_Tanner.v6.camera_ready.pdf}, }
2015
- M. Tanner, P. Piniés, L. M. Paz, and P. Newman, “BOR2G: Building Optimal Regularised Reconstructions with GPUs (in cubes),” in International Conference on Field and Service Robotics (FSR), Toronto, ON, Canada, 2015.
[Bibtex]@InProceedings{TannerFSR2015, author = {Tanner, Michael and Pini{\'e}s, Pedro and Paz, Lina Maria and Newman, Paul}, title = {{BOR2G}: {B}uilding {O}ptimal {R}egularised {R}econstructions with {GPU}s (in cubes)}, booktitle = {{I}nternational {C}onference on {F}ield and {S}ervice {R}obotics ({FSR})}, year = {2015}, address = {Toronto, ON, Canada}, month = {June}, pdf = {http://www.robots.ox.ac.uk/~mobile/Papers/FSR2015BORG_Tanner.v2.final_publication.pdf}, }
2010
- M. A. Tanner and W. A. Keen, “Exploiting current-generation graphics hardware for synthetic-scene generation,” in Proc. SPIE 7663, Technologies for Synthetic Environments: Hardware-in-the-Loop Testing XV, 2010, p. 76630P-76630P-9.
[Bibtex]@inproceedings{tanner2010exploiting, author = {Tanner, Michael A. and Keen, Wayne A.}, title = {Exploiting current-generation graphics hardware for synthetic-scene generation}, booktitle = {Proc. SPIE 7663, Technologies for Synthetic Environments: Hardware-in-the-Loop Testing XV}, volume = {7663}, number = {}, pages = {76630P-76630P-9}, abstract = {Increasing seeker frame rate and pixel count, as well as the demand for higher levels of scene fidelity, have driven scene generation software for hardware-in-the-loop (HWIL) and software-in-the-loop (SWIL) testing to higher levels of parallelization. Because modern PC graphics cards provide multiple computational cores (240 shader cores for a current NVIDIA Corporation GeForce and Quadro cards), implementation of phenomenology codes on graphics processing units (GPUs) offers significant potential for simultaneous enhancement of simulation frame rate and fidelity. To take advantage of this potential requires algorithm implementation that is structured to minimize data transfers between the central processing unit (CPU) and the GPU. In this paper, preliminary methodologies developed at the Kinetic Hardware In-The-Loop Simulator (KHILS) will be presented. Included in this paper will be various language tradeoffs between conventional shader programming, Compute Unified Device Architecture (CUDA) and Open Computing Language (OpenCL), including performance trades and possible pathways for future tool development.}, year = {2010}, month = {apr} doi = {10.1117/12.851798}, URL = { http://dx.doi.org/10.1117/12.851798}, Pdf = {http://www.robots.ox.ac.uk/~mtanner/pdf/tanner2010exploiting.pdf} }
- W. Keen, M. Tanner, C. Coker, and D. Crow, “GPU based synthetic scene generation for maritime environments,” in Proc. SPIE 7663, Technologies for Synthetic Environments: Hardware-in-the-Loop Testing XV, 2010, p. 76630O-76630O-9.
[Bibtex]@inproceedings{keen2010gpu, author = {Keen, Wayne and Tanner, Michael and Coker, Charles and Crow, Dennis}, title = {GPU based synthetic scene generation for maritime environments}, booktitle = {Proc. SPIE 7663, Technologies for Synthetic Environments: Hardware-in-the-Loop Testing XV}, volume = {7663}, number = {}, pages = {76630O-76630O-9}, abstract = {Hardware and software in the loop modeling of maritime environments involves a wide variety of complex physical and optical phenomenology and effects. The scale of significant effects to be modeled range from the order of centimeters for capillary type waves and turbulent wake effects up to many meters for rolling waves. In addition, wakes for boats and ships operating at a wide variety of speeds and conditions provide additional levels of scene complexity. Generating synthetic scenes for such a detailed, multi-scaled and dynamic environment in a physically realistic yet computationally tractable fashion represents a significant challenge for scene generation tools. In this paper, next generation scene generation codes utilizing personal computer (PC) graphics processors with programmable shaders as well as CUDA (Compute Unified Device Architecture) and OpenCL (Open Computing Language) implementations will be presented.}, year = {2010}, month = {apr} doi = {10.1117/12.851782}, URL = { http://dx.doi.org/10.1117/12.851782}, Pdf = {http://www.robots.ox.ac.uk/~mtanner/pdf/keen2010gpu.pdf} }
- D. Crow, C. Coker, W. Keen, B. Smith, and M. Tanner, “High Fidelity Boost-Phase Scene Generation Supporting Digital Simulations and Hardware-In-The-Loop Testing Efforts,” in MSS Missile Defense Sensors, Environments, and Algorithms, 2010.
[Bibtex]@inproceedings{crow2010high, title={High Fidelity Boost-Phase Scene Generation Supporting Digital Simulations and Hardware-In-The-Loop Testing Efforts}, author={Crow, Dennis and Coker, Charles and Keen, Wayne and Smith, Brian and Tanner, Michael}, booktitle={MSS Missile Defense Sensors, Environments, and Algorithms}, year={2010} }
2009
- M. A. Tanner, “Image Processing for Multiple-Target Tracking on a Graphics Processing Unit,” Master Thesis, 2009.
[Bibtex]@mastersthesis{tanner2009image, title={Image Processing for Multiple-Target Tracking on a Graphics Processing Unit}, author={Tanner, Michael A.}, year={2009}, school={Air Force Institute of Technology}, Pdf = {http://www.robots.ox.ac.uk/~mtanner/pdf/tanner2009image.pdf} }