Compare commits

...

31 Commits

Author SHA1 Message Date
Jan Kowalczyk
7b5accb6c5 fixed plots 2025-10-21 19:04:19 +02:00
Jan Kowalczyk
8f983b890f formatting 2025-10-19 17:39:42 +02:00
Jan Kowalczyk
6cd2c7fbef abstract lidar capitalization 2025-10-19 17:34:38 +02:00
Jan Kowalczyk
62c424cd54 grammarly done 2025-10-19 17:29:31 +02:00
Jan Kowalczyk
bd9171f68e grammarly data chapter 2025-10-19 16:46:29 +02:00
Jan Kowalczyk
efdc33035b grammarly part 1 done 2025-10-19 16:27:22 +02:00
Jan Kowalczyk
f2c8fe241d cleanup 2025-10-18 18:27:13 +02:00
Jan Kowalczyk
ece887860b z-score rework 2025-10-18 18:01:41 +02:00
Jan Kowalczyk
c3830db913 metrics section draft 2025-10-18 17:23:18 +02:00
Jan Kowalczyk
3d21171a40 raw metrics section 2025-10-18 17:02:22 +02:00
Jan Kowalczyk
5aca00ad67 better grammarly prep 2025-10-18 12:47:16 +02:00
Jan Kowalczyk
374420727b cleanup for raw txt (grammar check) 2025-10-18 12:19:26 +02:00
Jan Kowalczyk
8697c07c0f reworked baselines 2025-10-18 11:28:12 +02:00
Jan Kowalczyk
5287f2c557 grammarly deepsad chap 2025-10-12 17:26:07 +02:00
Jan Kowalczyk
b7faf6e1b6 grammarly wip (bg chap done) 2025-10-12 16:56:49 +02:00
Jan Kowalczyk
0354ad37e1 grammarly intro 2025-10-12 16:03:27 +02:00
Jan Kowalczyk
32ab4e6a11 fixed all reasonable warnings 2025-10-12 15:45:13 +02:00
Jan Kowalczyk
055d403dfb aspell start 2025-10-11 18:09:18 +02:00
Jan Kowalczyk
28b6eba094 broken pdf 2025-10-11 16:37:34 +02:00
Jan Kowalczyk
436a25df11 broken pdf 2025-10-11 16:37:19 +02:00
Jan Kowalczyk
5d0610a875 feedback wip 2025-10-11 16:37:10 +02:00
Jan Kowalczyk
545b65d3d5 feedback WIP 2025-10-11 15:58:44 +02:00
Jan Kowalczyk
8db244901e feedback wip 2025-10-11 15:21:53 +02:00
Jan Kowalczyk
72afe9ebdc nicer looking abstract 2025-10-11 13:38:39 +02:00
Jan Kowalczyk
81c1e5b7af added abstract 2025-09-29 19:00:58 +02:00
Jan Kowalczyk
6040f5f144 draft 2025-09-29 18:54:35 +02:00
Jan Kowalczyk
d5f5a09d6f wip 2025-09-29 18:20:30 +02:00
Jan Kowalczyk
a6f5ecaba2 wip 2025-09-29 11:02:07 +02:00
Jan Kowalczyk
1f3e607e8d wip 2025-09-29 10:40:26 +02:00
Jan Kowalczyk
3bf457f2cf wip 2025-09-29 10:17:36 +02:00
Jan Kowalczyk
3eb7e662b0 wip 2025-09-28 20:07:05 +02:00
37 changed files with 3502 additions and 822 deletions

File diff suppressed because it is too large Load Diff

Binary file not shown.

File diff suppressed because it is too large Load Diff

View File

@@ -24,15 +24,12 @@
not used other than the declared sources/resources, and that I have
explicitly indicated all material which has been quoted either
literally or by content from the sources used.
\ifthenelse{\equal{\ThesisTitle}{master's thesis} \or
\equal{\ThesisTitle}{diploma thesis} \or
\equal{\ThesisTitle}{doctoral thesis}}
{The text document uploaded to TUGRAZonline is identical to the present \ThesisTitle.}{\reminder{TODO: fix \textbackslash ThesisTitle}}
The text document uploaded to TUGRAZonline is identical to the present \ThesisTitle.
\par\vspace*{4cm}
\centerline{
\begin{tabular}{m{1.5cm}cm{1.5cm}m{3cm}m{1.5cm}cm{1.5cm}}
\cline{1-3} \cline{5-7}
& date & & & & (signature) &\\
\end{tabular}}
\begin{tabular}{m{1.5cm}cm{1.5cm}m{3cm}m{1.5cm}cm{1.5cm}}
\cline{1-3} \cline{5-7}
& date & & & & (signature) & \\
\end{tabular}}

View File

@@ -55,7 +55,7 @@
\makeatother
% header and footer texts
\clearscrheadfoot % clear everything
\clearpairofpagestyles % clear everything
\KOMAoptions{headlines=1} % header needs two lines here
% [plain]{actual (scrheadings)}
\ihead[]{}%
@@ -141,46 +141,46 @@
\ifthenelse{\equal{\DocumentLanguage}{en}}{\renewcaptionname{USenglish}{\figurename}{Figure}}{}%
\ifthenelse{\equal{\DocumentLanguage}{de}}{\renewcaptionname{ngerman}{\figurename}{Abbildung}}{}%
\captionsetup{%
format=hang,% hanging captions
labelformat=simple,% just name and number
labelsep=colon,% colon and space
justification=justified,%
singlelinecheck=true,% center single line captions
font={footnotesize,it},% font style of label and text
margin=0.025\textwidth,% margin left/right of the caption (to textwidth)
indention=0pt,% no further indention (just hanging)
hangindent=0pt,% no further indention (just hanging)}
aboveskip=8pt,% same spacing above and...
belowskip=8pt}% ...below the float (this way tables shouln't be a problem, either)
format=hang,% hanging captions
labelformat=simple,% just name and number
labelsep=colon,% colon and space
justification=justified,%
singlelinecheck=true,% center single line captions
font={footnotesize,it},% font style of label and text
margin=0.025\textwidth,% margin left/right of the caption (to textwidth)
indention=0pt,% no further indention (just hanging)
hangindent=0pt,% no further indention (just hanging)}
aboveskip=8pt,% same spacing above and...
belowskip=8pt}% ...below the float (this way tables shouln't be a problem, either)
% code listings
\lstloadlanguages{VHDL,Matlab,[ANSI]C,Java,[LaTeX]TeX}
\lstset{%
% general
breaklines=true,% automatically break long lines
breakatwhitespace=true,% break only at white spaces
breakindent=1cm,% additional indentation for broken lines
% positioning
linewidth=\linewidth,% set width of whole thing to \linewidth
xleftmargin=0.1\linewidth,%
% frame and caption
frame=tlrb,% frame the entire thing
framexleftmargin=1cm,% to include linenumbering into frame
captionpos=b,% caption at bottom
% format parameters
basicstyle=\ttfamily\tiny,% small true type font
keywordstyle=\color{black},%
identifierstyle=\color{black},%
commentstyle=\color[rgb]{0.45,0.45,0.45},% gray
stringstyle=\color{black},%
showstringspaces=false,%
showtabs=false,%
tabsize=2,%
% linenumbers
numberstyle=\tiny,%
numbers=left,%
numbersep=3mm,%
firstnumber=1,%
stepnumber=1,% number every line (0: off)
numberblanklines=true%
% general
breaklines=true,% automatically break long lines
breakatwhitespace=true,% break only at white spaces
breakindent=1cm,% additional indentation for broken lines
% positioning
linewidth=\linewidth,% set width of whole thing to \linewidth
xleftmargin=0.1\linewidth,%
% frame and caption
frame=tlrb,% frame the entire thing
framexleftmargin=1cm,% to include linenumbering into frame
captionpos=b,% caption at bottom
% format parameters
basicstyle=\ttfamily\tiny,% small true type font
keywordstyle=\color{black},%
identifierstyle=\color{black},%
commentstyle=\color[rgb]{0.45,0.45,0.45},% gray
stringstyle=\color{black},%
showstringspaces=false,%
showtabs=false,%
tabsize=2,%
% linenumbers
numberstyle=\tiny,%
numbers=left,%
numbersep=3mm,%
firstnumber=1,%
stepnumber=1,% number every line (0: off)
numberblanklines=true%
}

View File

@@ -47,33 +47,33 @@
\usepackage{fixltx2e}% LaTeX 2e bugfixes
\usepackage{ifthen}% for optional parts
\ifthenelse{\equal{\PaperSize}{a4paper}}{
\usepackage[paper=\PaperSize,twoside=\Twosided,%
textheight=246mm,%
textwidth=160mm,%
heightrounded=true,% round textheight to multiple of lines (avoids overfull vboxes)
ignoreall=true,% do not include header, footer, and margins in calculations
marginparsep=5pt,% marginpar only used for signs (centered), thus only small sep. needed
marginparwidth=10mm,% prevent margin notes to be out of page
hmarginratio=2:1,% set margin ration (inner:outer for twoside) - (2:3 is default)
]{geometry}}{}%
\usepackage[paper=\PaperSize,twoside=\Twosided,%
textheight=246mm,%
textwidth=160mm,%
heightrounded=true,% round textheight to multiple of lines (avoids overfull vboxes)
ignoreall=true,% do not include header, footer, and margins in calculations
marginparsep=5pt,% marginpar only used for signs (centered), thus only small sep. needed
marginparwidth=10mm,% prevent margin notes to be out of page
hmarginratio=2:1,% set margin ration (inner:outer for twoside) - (2:3 is default)
]{geometry}}{}%
\ifthenelse{\equal{\PaperSize}{letterpaper}}{
\usepackage[paper=\PaperSize,twoside=\Twosided,%
textheight=9in,%
textwidth=6.5in,%
heightrounded=true,% round textheight to multiple of lines (avoids overfull vboxes)
ignoreheadfoot=false,% do not include header and footer in calculations
marginparsep=5pt,% marginpar only used for signs (centered), thus only small sep. needed
marginparwidth=10mm,% prevent margin notes to be out of page
hmarginratio=3:2,% set margin ration (inner:outer for twoside) - (2:3 is default)
]{geometry}}{}%
\usepackage[paper=\PaperSize,twoside=\Twosided,%
textheight=9in,%
textwidth=6.5in,%
heightrounded=true,% round textheight to multiple of lines (avoids overfull vboxes)
ignoreheadfoot=false,% do not include header and footer in calculations
marginparsep=5pt,% marginpar only used for signs (centered), thus only small sep. needed
marginparwidth=10mm,% prevent margin notes to be out of page
hmarginratio=3:2,% set margin ration (inner:outer for twoside) - (2:3 is default)
]{geometry}}{}%
\ifthenelse{\equal{\DocumentLanguage}{en}}{\usepackage[T1]{fontenc}\usepackage[utf8]{inputenc}\usepackage[USenglish]{babel}}{}%
\ifthenelse{\equal{\DocumentLanguage}{de}}{\usepackage[T1]{fontenc}\usepackage[utf8]{inputenc}\usepackage[ngerman]{babel}}{}%
\usepackage[%
headtopline,plainheadtopline,% activate all lines (header and footer)
headsepline,plainheadsepline,%
footsepline,plainfootsepline,%
footbotline,plainfootbotline,%
automark% auto update \..mark
headtopline,plainheadtopline,% activate all lines (header and footer)
headsepline,plainheadsepline,%
footsepline,plainfootsepline,%
footbotline,plainfootbotline,%
automark% auto update \..mark
]{scrlayer-scrpage}% (KOMA)
\usepackage{imakeidx}
\usepackage[]{caption}% customize captions
@@ -91,7 +91,7 @@ automark% auto update \..mark
\usepackage[normalem]{ulem}% cross-out, strike-out, underlines (normalem: keep \emph italic)
%\usepackage[safe]{textcomp}% loading in safe mode to avoid problems (see LaTeX companion)
%\usepackage[geometry,misc]{ifsym}% technical symbols
\usepackage{remreset}%\@removefromreset commands (e.g., for continuous footnote numbering)
%\usepackage{remreset}%\@removefromreset commands (e.g., for continuous footnote numbering)
\usepackage{paralist}% extended list environments
% \usepackage[Sonny]{fncychap}
\usepackage[avantgarde]{quotchap}
@@ -140,35 +140,35 @@ automark% auto update \..mark
\usepackage{mdwlist} %list extensions
\ifthenelse{\equal{\DocumentLanguage}{de}}
{
\usepackage[german]{fancyref} %Bessere Querverweise
\usepackage[locale=DE]{siunitx} %Zahlen und SI Einheiten => Binary units aktivieren...
\usepackage[autostyle=true, %Anführungszeichen und Übersetzung der Literaturverweise
german=quotes]{csquotes} %Anführungszeichen und Übersetzung der Literaturverweise
\usepackage[german]{fancyref} %Bessere Querverweise
\usepackage[locale=DE]{siunitx} %Zahlen und SI Einheiten => Binary units aktivieren...
\usepackage[autostyle=true, %Anführungszeichen und Übersetzung der Literaturverweise
german=quotes]{csquotes} %Anführungszeichen und Übersetzung der Literaturverweise
}
{
\usepackage[english]{fancyref} %Bessere Querverweise
\usepackage[locale=US]{siunitx} %Zahlen und SI Einheiten => Binary units aktivieren...
\usepackage[autostyle=true] %Anführungszeichen und Übersetzung der Literaturverweise
{csquotes}
\usepackage[english]{fancyref} %Bessere Querverweise
\usepackage[locale=US]{siunitx} %Zahlen und SI Einheiten => Binary units aktivieren...
\usepackage[autostyle=true] %Anführungszeichen und Übersetzung der Literaturverweise
{csquotes}
}
\sisetup{detect-weight=true, detect-family=true} %format like surrounding environment
%extending fancyref for listings in both languages:
\newcommand*{\fancyreflstlabelprefix}{lst}
\fancyrefaddcaptions{english}{%
\providecommand*{\freflstname}{listing}%
\providecommand*{\Freflstname}{Listing}%
\providecommand*{\freflstname}{listing}%
\providecommand*{\Freflstname}{Listing}%
}
\fancyrefaddcaptions{german}{%
\providecommand*{\freflstname}{Listing}%
\providecommand*{\Freflstname}{Listing}%
\providecommand*{\freflstname}{Listing}%
\providecommand*{\Freflstname}{Listing}%
}
\frefformat{plain}{\fancyreflstlabelprefix}{\freflstname\fancyrefdefaultspacing#1}
\Frefformat{plain}{\fancyreflstlabelprefix}{\Freflstname\fancyrefdefaultspacing#1}
\frefformat{vario}{\fancyreflstlabelprefix}{%
\freflstname\fancyrefdefaultspacing#1#3%
\freflstname\fancyrefdefaultspacing#1#3%
}
\Frefformat{vario}{\fancyreflstlabelprefix}{%
\Freflstname\fancyrefdefaultspacing#1#3%
\Freflstname\fancyrefdefaultspacing#1#3%
}
\sisetup{separate-uncertainty} %enable uncertainity for siunitx
@@ -176,30 +176,30 @@ automark% auto update \..mark
\DeclareSIUnit\permille{\text{\textperthousand}} %add \permille to siunitx
\usepackage{xfrac} %Schönere brüche für SI Einheiten
\sisetup{per-mode=fraction, %Bruchstriche bei SI Einheiten aktivieren
fraction-function=\sfrac} %xfrac als Bruchstrichfunktion verwenden
fraction-function=\sfrac} %xfrac als Bruchstrichfunktion verwenden
\usepackage[scaled=0.78]{inconsolata}%Schreibmaschinenschrift für Quellcode
\usepackage[backend=biber, %Literaturverweiserweiterung Backend auswählen
bibencoding=utf8, %.bib-File ist utf8-codiert...
maxbibnames=99, %Immer alle Authoren in der Bibliographie darstellen...
style=ieee
bibencoding=utf8, %.bib-File ist utf8-codiert...
maxbibnames=99, %Immer alle Authoren in der Bibliographie darstellen...
style=ieee
]{biblatex}
\bibliography{bib/bibliography} %literatur.bib wird geladen und als Literaturverweis Datei verwendet
\ifthenelse{\equal{\FramedLinks}{true}}
{
\usepackage[%
breaklinks=true,% allow line break in links
colorlinks=false,% if false: framed link
linkcolor=black,anchorcolor=black,citecolor=black,filecolor=black,%
menucolor=black,urlcolor=black,bookmarksnumbered=true]{hyperref}% hyperlinks for references
\usepackage[%
breaklinks=true,% allow line break in links
colorlinks=false,% if false: framed link
linkcolor=black,anchorcolor=black,citecolor=black,filecolor=black,%
menucolor=black,urlcolor=black,bookmarksnumbered=true]{hyperref}% hyperlinks for references
}
{
\usepackage[%
breaklinks=true,% allow line break in links
colorlinks=true,% if false: framed link
linkcolor=black,anchorcolor=black,citecolor=black,filecolor=black,%
menucolor=black,urlcolor=black,bookmarksnumbered=true]{hyperref}% hyperlinks for references
\usepackage[%
breaklinks=true,% allow line break in links
colorlinks=true,% if false: framed link
linkcolor=black,anchorcolor=black,citecolor=black,filecolor=black,%
menucolor=black,urlcolor=black,bookmarksnumbered=true]{hyperref}% hyperlinks for references
}
\setcounter{biburlnumpenalty}{100}%Urls in Bibliographie Zeilenbrechbar machen
@@ -213,8 +213,8 @@ style=ieee
\ifthenelse{\equal{\DocumentLanguage}{de}}
{
\deftranslation[to=ngerman] %Dem Paket babel den deutschen Abkürzungsverzeichnis-Kapitelnamen
{Acronyms}{Abkürzungsverzeichnis} %beibringen
\deftranslation[to=ngerman] %Dem Paket babel den deutschen Abkürzungsverzeichnis-Kapitelnamen
{Acronyms}{Abkürzungsverzeichnis} %beibringen
}{}
% misc

View File

@@ -41,7 +41,7 @@
numpages = {58},
keywords = {outlier detection, Anomaly detection},
},
@dataset{alexander_kyuroson_2023_7913307,
dataset{alexander_kyuroson_2023_7913307,
author = {Alexander Kyuroson and Niklas Dahlquist and Nikolaos Stathoulopoulos
and Vignesh Kottayam Viswanathan and Anton Koval and George
Nikolakopoulos},
@@ -85,37 +85,6 @@
pages = {716721},
}
,
@inproceedings{deepsvdd,
title = {Deep One-Class Classification},
author = {Ruff, Lukas and Vandermeulen, Robert and Goernitz, Nico and Deecke,
Lucas and Siddiqui, Shoaib Ahmed and Binder, Alexander and M{\"u}ller
, Emmanuel and Kloft, Marius},
booktitle = {Proceedings of the 35th International Conference on Machine
Learning},
pages = {4393--4402},
year = {2018},
editor = {Dy, Jennifer and Krause, Andreas},
volume = {80},
series = {Proceedings of Machine Learning Research},
month = {10--15 Jul},
publisher = {PMLR},
pdf = {http://proceedings.mlr.press/v80/ruff18a/ruff18a.pdf},
url = {https://proceedings.mlr.press/v80/ruff18a.html},
abstract = {Despite the great advances made by deep learning in many machine
learning problems, there is a relative dearth of deep learning
approaches for anomaly detection. Those approaches which do exist
involve networks trained to perform a task other than anomaly
detection, namely generative models or compression, which are in
turn adapted for use in anomaly detection; they are not trained on
an anomaly detection based objective. In this paper we introduce a
new anomaly detection method—Deep Support Vector Data Description—,
which is trained on an anomaly detection based objective. The
adaptation to the deep regime necessitates that our neural network
and training procedure satisfy certain properties, which we
demonstrate theoretically. We show the effectiveness of our method
on MNIST and CIFAR-10 image benchmark datasets as well as on the
detection of adversarial examples of GTSRB stop signs.},
},
@inproceedings{deep_svdd,
title = {Deep One-Class Classification},
author = {Ruff, Lukas and Vandermeulen, Robert and Goernitz, Nico and Deecke,
@@ -235,7 +204,7 @@
performance;Current measurement},
doi = {10.1109/IROS51168.2021.9636694},
},
@article{deep_learning_overview,
article{deep_learning_overview,
title = {Deep learning in neural networks: An overview},
journal = {Neural Networks},
volume = {61},
@@ -289,7 +258,7 @@
autoencoder algorithm are summarized, and prospected for its future
development directions are addressed.},
},
@article{semi_overview,
article{semi_overview,
author = {Yang, Xiangli and Song, Zixing and King, Irwin and Xu, Zenglin},
journal = {IEEE Transactions on Knowledge and Data Engineering},
title = {A Survey on Deep Semi-Supervised Learning},
@@ -302,7 +271,7 @@
learning;semi-supervised learning;deep learning},
doi = {10.1109/TKDE.2022.3220219},
},
@book{ai_fundamentals_book,
book{ai_fundamentals_book,
title = {Fundamentals of Artificial Intelligence},
url = {http://dx.doi.org/10.1007/978-81-322-3972-7},
DOI = {10.1007/978-81-322-3972-7},
@@ -312,7 +281,7 @@
language = {en},
},
@article{machine_learning_overview,
article{machine_learning_overview,
title = {Machine Learning from Theory to Algorithms: An Overview},
volume = {1142},
ISSN = {1742-6596},
@@ -550,7 +519,7 @@
year = {1998},
pages = {22782324},
},
@article{ef_concept_source,
article{ef_concept_source,
title = {Multi-Year ENSO Forecasts Using Parallel Convolutional Neural
Networks With Heterogeneous Architecture},
volume = {8},
@@ -600,9 +569,189 @@
publisher = {MIT Press},
note = {\url{http://www.deeplearningbook.org}},
year = {2016},
},
@misc{mobilenet,
doi = {10.48550/ARXIV.1704.04861},
url = {https://arxiv.org/abs/1704.04861},
author = {Howard, Andrew G. and Zhu, Menglong and Chen, Bo and Kalenichenko,
Dmitry and Wang, Weijun and Weyand, Tobias and Andreetto, Marco and
Adam, Hartwig},
keywords = {Computer Vision and Pattern Recognition (cs.CV), FOS: Computer and
information sciences, FOS: Computer and information sciences},
title = {MobileNets: Efficient Convolutional Neural Networks for Mobile Vision
Applications},
publisher = {arXiv},
year = {2017},
copyright = {arXiv.org perpetual, non-exclusive license},
},
@inproceedings{shufflenet,
title = {ShuffleNet: An Extremely Efficient Convolutional Neural Network for
Mobile Devices},
url = {http://dx.doi.org/10.1109/CVPR.2018.00716},
DOI = {10.1109/cvpr.2018.00716},
booktitle = {2018 IEEE/CVF Conference on Computer Vision and Pattern
Recognition},
publisher = {IEEE},
author = {Zhang, Xiangyu and Zhou, Xinyu and Lin, Mengxiao and Sun, Jian},
year = {2018},
month = jun,
},
@article{bg_svm,
title = {Support-vector networks},
author = {Cortes, Corinna and Vapnik, Vladimir},
journal = {Machine learning},
volume = {20},
number = {3},
pages = {273--297},
year = {1995},
publisher = {Springer},
},
@article{bg_kmeans,
author = {Lloyd, S.},
journal = {IEEE Transactions on Information Theory},
title = {Least squares quantization in PCM},
year = {1982},
volume = {28},
number = {2},
pages = {129-137},
keywords = {Noise;Quantization (signal);Voltage;Receivers;Pulse
modulation;Sufficient conditions;Stochastic processes;Probabilistic
logic;Urban areas;Q measurement},
doi = {10.1109/TIT.1982.1056489},
},
@inproceedings{bg_dbscan,
added-at = {2023-12-13T07:32:13.000+0100},
author = {Ester, Martin and Kriegel, Hans-Peter and Sander, Jörg and Xu,
Xiaowei},
biburl = {
https://www.bibsonomy.org/bibtex/279a9f3560daefa3775bd35543b4482e1/admin
},
booktitle = {KDD},
crossref = {conf/kdd/1996},
editor = {Simoudis, Evangelos and Han, Jiawei and Fayyad, Usama M.},
ee = {http://www.aaai.org/Library/KDD/1996/kdd96-037.php},
interhash = {ba33e4d6b4e5b26bd9f543f26b7d250a},
intrahash = {79a9f3560daefa3775bd35543b4482e1},
isbn = {1-57735-004-9},
keywords = {},
pages = {226-231},
publisher = {AAAI Press},
timestamp = {2023-12-13T07:32:13.000+0100},
title = {A Density-Based Algorithm for Discovering Clusters in Large Spatial
Databases with Noise.},
url = {http://dblp.uni-trier.de/db/conf/kdd/kdd96.html#EsterKSX96},
year = 1996,
},
@article{bg_pca,
author = { Karl Pearson F.R.S. },
title = {LIII. On lines and planes of closest fit to systems of points in
space},
journal = {The London, Edinburgh, and Dublin Philosophical Magazine and
Journal of Science},
volume = {2},
number = {11},
pages = {559-572},
year = {1901},
publisher = {Taylor & Francis},
doi = {10.1080/14786440109462720},
},
@article{bg_infomax,
author = {Linsker, R.},
journal = {Computer},
title = {Self-organization in a perceptual network},
year = {1988},
volume = {21},
number = {3},
pages = {105-117},
keywords = {Intelligent networks;Biological information
theory;Circuits;Biology computing;Animal
structures;Neuroscience;Genetics;System testing;Neural
networks;Constraint theory},
doi = {10.1109/2.36},
},
@article{bg_slam,
title = {On the Representation and Estimation of Spatial Uncertainty},
volume = {5},
ISSN = {1741-3176},
url = {http://dx.doi.org/10.1177/027836498600500404},
DOI = {10.1177/027836498600500404},
number = {4},
journal = {The International Journal of Robotics Research},
publisher = {SAGE Publications},
author = {Smith, Randall C. and Cheeseman, Peter},
year = {1986},
month = dec,
pages = {5668},
},
@article{roc_vs_prc2,
title = {Context discovery for anomaly detection},
volume = {19},
ISSN = {2364-4168},
url = {http://dx.doi.org/10.1007/s41060-024-00586-x},
DOI = {10.1007/s41060-024-00586-x},
number = {1},
journal = {International Journal of Data Science and Analytics},
publisher = {Springer Science and Business Media LLC},
author = {Calikus, Ece and Nowaczyk, Slawomir and Dikmen, Onur},
year = {2024},
month = jun,
pages = {99113},
},
@article{roc_vs_prc,
title = {On the evaluation of unsupervised outlier detection: measures,
datasets, and an empirical study},
volume = {30},
ISSN = {1573-756X},
url = {http://dx.doi.org/10.1007/s10618-015-0444-8},
DOI = {10.1007/s10618-015-0444-8},
number = {4},
journal = {Data Mining and Knowledge Discovery},
publisher = {Springer Science and Business Media LLC},
author = {Campos, Guilherme O. and Zimek, Arthur and Sander, J\"{o}rg and
Campello, Ricardo J. G. B. and Micenková, Barbora and Schubert, Erich
and Assent, Ira and Houle, Michael E.},
year = {2016},
month = jan,
pages = {891927},
},
@inproceedings{roc,
title = {Basic principles of ROC analysis},
author = {Metz, Charles E},
booktitle = {Seminars in nuclear medicine},
volume = {8},
number = {4},
pages = {283--298},
year = {1978},
organization = {Elsevier},
},
@article{prc,
title = {A critical investigation of recall and precision as measures of
retrieval system performance},
volume = {7},
ISSN = {1558-2868},
url = {http://dx.doi.org/10.1145/65943.65945},
DOI = {10.1145/65943.65945},
number = {3},
journal = {ACM Transactions on Information Systems},
publisher = {Association for Computing Machinery (ACM)},
author = {Raghavan, Vijay and Bollmann, Peter and Jung, Gwang S.},
year = {1989},
month = jul,
pages = {205229},
},
@article{zscore,
title = {Advanced engineering mathematics},
author = {Kreyszig, Erwin and Stroud, K and Stephenson, G},
journal = {Integration},
volume = {9},
number = {4},
pages = {1014},
year = {2008},
publisher = {John Wiley \& Sons, Inc. 9 th edition, 2006 Page 2 of 6 Teaching
methods~…},
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 93 KiB

After

Width:  |  Height:  |  Size: 85 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 95 KiB

After

Width:  |  Height:  |  Size: 88 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.4 MiB

After

Width:  |  Height:  |  Size: 1.4 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 220 KiB

After

Width:  |  Height:  |  Size: 211 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 31 KiB

After

Width:  |  Height:  |  Size: 26 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 45 KiB

After

Width:  |  Height:  |  Size: 37 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 42 KiB

After

Width:  |  Height:  |  Size: 36 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 732 KiB

After

Width:  |  Height:  |  Size: 718 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 688 KiB

After

Width:  |  Height:  |  Size: 691 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 380 KiB

After

Width:  |  Height:  |  Size: 365 KiB

View File

@@ -0,0 +1,11 @@
-- drop-images.lua
-- Replaces all images (figures, graphics) with a short placeholder.
function Image(el) return pandoc.Str("[image omitted]") end
-- For LaTeX figures that are still raw
function RawBlock(el)
if el.format == "tex" and el.text:match("\\begin%s*{%s*figure%s*}") then
return pandoc.Plain({pandoc.Str("[figure omitted]")})
end
end

View File

@@ -0,0 +1,11 @@
-- drop-tables.lua
-- Removes LaTeX tabular and tabularx environments (and their contents).
function RawBlock(el)
if el.format == "tex" then
-- Check for tabular or tabularx environment
if el.text:match("\\begin%s*{%s*tabularx?%s*}") then
return pandoc.Plain({pandoc.Str("[table omitted]")})
end
end
end

View File

@@ -0,0 +1,43 @@
-- keep-citations.lua
-- Replace citations with a placeholder and eat any preceding space.
local PH = "[citation]"
-- Pandoc-native citations (if the reader produced Cite nodes)
function Cite(el) return pandoc.Str(PH) end
-- Raw LaTeX \cite-like macros (when not parsed as Cite)
function RawInline(el)
if el.format and el.format:match("tex") and el.text:match("\\%a-*cite%*?") then
return pandoc.Str(PH)
end
end
-- Remove a single leading Space before our placeholder
local function squash_spaces(inlines)
local out = {}
local i = 1
while i <= #inlines do
local cur = inlines[i]
local nxt = inlines[i + 1]
if cur and cur.t == "Space" and nxt and nxt.t == "Str" and nxt.text ==
PH then
table.insert(out, nxt)
i = i + 2
else
table.insert(out, cur)
i = i + 1
end
end
return out
end
function Para(el)
el.content = squash_spaces(el.content)
return el
end
function Plain(el)
el.content = squash_spaces(el.content)
return el
end

View File

@@ -0,0 +1,48 @@
-- math-omit.lua
-- Replace any math with a placeholder and ensure a space before it when appropriate.
local PH = "[math omitted]"
function Math(el)
-- Emit the placeholder as a Str; spacing is fixed in Para/Plain below.
return pandoc.Str(PH)
end
local function ensure_space_before_ph(inlines)
local out = {}
for i = 1, #inlines do
local cur = inlines[i]
if cur.t == "Str" and cur.text == PH then
local prev = out[#out]
local need_space = true
-- No space if it's the first token in the block
if not prev then
need_space = false
elseif prev.t == "Space" then
need_space = false
elseif prev.t == "Str" then
-- If previous char is an opening bracket/paren/slash/hyphen or whitespace, skip
local last = prev.text:sub(-1)
if last:match("[%(%[%{%/%-]") or last:match("%s") then
need_space = false
end
end
if need_space then table.insert(out, pandoc.Space()) end
table.insert(out, cur)
else
table.insert(out, cur)
end
end
return out
end
function Para(el)
el.content = ensure_space_before_ph(el.content)
return el
end
function Plain(el)
el.content = ensure_space_before_ph(el.content)
return el
end

View File

@@ -15,6 +15,8 @@
let
pkgs = import nixpkgs { inherit system; };
aspellWithDicts = pkgs.aspellWithDicts (d: [ d.en ]);
latex-packages = with pkgs; [
texlive.combined.scheme-full
which
@@ -26,16 +28,42 @@
zathura
wmctrl
python312
pandoc
pandoc-lua-filters
];
filtersPath = "${pkgs.pandoc-lua-filters}/share/pandoc/filters";
in
{
devShell = pkgs.mkShell {
buildInputs = [
latex-packages
dev-packages
aspellWithDicts
];
};
shellHook = ''
set -eu
# local folder in your repo to reference in commands
link_target="pandoc-filters"
# refresh symlink each time you enter the shell
ln -sfn ${filtersPath} "$link_target"
echo "Linked $link_target -> ${filtersPath}"
# (optional) write a defaults file that uses the relative symlink
if [ ! -f pandoc.defaults.yaml ]; then
cat > pandoc.defaults.yaml <<'YAML'
from: latex
to: plain
wrap: none
lua-filter:
- pandoc-filters/latex-hyphen.lua
- pandoc-filters/pandoc-quotes.lua
YAML
echo "Wrote pandoc.defaults.yaml"
fi
'';
}
);
}

61
thesis/tex2plaintext.sh Executable file
View File

@@ -0,0 +1,61 @@
#!/usr/bin/env bash
set -euo pipefail
# Usage:
# ./tex2plaintext.sh [INPUT_TEX] [OUT_BASENAME]
#
# Defaults:
# INPUT_TEX = Main.txt (your original file name)
# OUT_BASENAME = thesis (produces thesis.txt, thesis_part1.txt, thesis_part2.txt)
INPUT_TEX="${1:-Main.tex}"
OUT_BASE="${2:-thesis}"
FLAT_TEX="flat.tex"
NO_TABLES_TEX="flat_notables.tex"
PLAIN_TXT="${OUT_BASE}.txt"
PART1_TXT="${OUT_BASE}_part1.txt"
PART2_TXT="${OUT_BASE}_part2.txt"
MARKER="Data and Preprocessing"
echo "[1/5] Flattening with latexpand -> ${FLAT_TEX}"
latexpand "${INPUT_TEX}" > "${FLAT_TEX}"
echo "[2/5] Removing tabular/tabularx environments -> ${NO_TABLES_TEX}"
# Replace entire tabular / tabularx environments with a placeholder
perl -0777 -pe 's/\\begin\{(tabularx?)\}.*?\\end\{\1\}/[table omitted]/gs' \
"${FLAT_TEX}" > "${NO_TABLES_TEX}"
echo "[3/5] Converting to plain text with pandoc -> ${PLAIN_TXT}"
pandoc -f latex -t plain --wrap=none \
--lua-filter=filters/keep-citations.lua \
--lua-filter=filters/math-omit.lua \
"${NO_TABLES_TEX}" -o "${PLAIN_TXT}"
echo "[4/5] Replacing [] placeholders with [figure]"
sed -i 's/\[\]/[figure]/g' "${PLAIN_TXT}"
echo "[5/5] Splitting ${PLAIN_TXT} before the marker line: \"${MARKER}\""
# Ensure the marker exists exactly on its own line
if ! grep -xq "${MARKER}" "${PLAIN_TXT}"; then
echo "ERROR: Marker line not found exactly as \"${MARKER}\" in ${PLAIN_TXT}."
echo " (It must be the only content on that line.)"
exit 1
fi
# Clean previous outputs if present
rm -f -- "${PART1_TXT}" "${PART2_TXT}"
# Split so the marker line becomes the FIRST line of part 2
awk -v marker="${MARKER}" -v out1="${PART1_TXT}" -v out2="${PART2_TXT}" '
BEGIN { current = out1 }
$0 == marker { current = out2; print $0 > current; next }
{ print $0 > current }
' "${PLAIN_TXT}"
echo "Done."
echo " - ${PLAIN_TXT}"
echo " - ${PART1_TXT}"
echo " - ${PART2_TXT}"

View File

@@ -1,3 +1,9 @@
\addcontentsline{toc}{chapter}{Abstract (English)}
\begin{center}\Large\bfseries Abstract (English)\end{center}\vspace*{1cm}\noindent
Write some fancy abstract here!
\addcontentsline{toc}{chapter}{Abstract}
\begin{center}\Large\bfseries Abstract\end{center}\vspace*{1cm}\noindent
Autonomous robots are increasingly used in search and rescue (SAR) missions. In these missions, LiDAR sensors are often the most important source of environmental data. However, LiDAR data can degrade under hazardous conditions, especially when airborne particles such as smoke or dust are present. This degradation can lead to errors in mapping and navigation and may endanger both the robot and humans. Therefore, robots need a way to estimate the reliability of their LiDAR data, so that they can make better-informed decisions.
\bigskip
This thesis investigates whether anomaly detection methods can be used to quantify LiDAR data degradation caused by airborne particles such as smoke and dust. We apply a semi-supervised deep learning approach called DeepSAD, which produces an anomaly score for each LiDAR scan, serving as a measure of data reliability.
\bigskip
We evaluate this method against baseline methods on a subterranean dataset that includes LiDAR scans degraded by artificial smoke. Our results show that DeepSAD consistently outperforms the baselines and can clearly distinguish degraded from normal scans. At the same time, we find that the limited availability of labeled data and the lack of robust ground truth remain major challenges. Despite these limitations, our work demonstrates that anomaly detection methods are a promising tool for LiDAR degradation quantification in SAR scenarios.

View File

@@ -1,3 +1,3 @@
\addcontentsline{toc}{chapter}{Acknowledgements}
\begin{center}\Large\bfseries Acknowledgements\end{center}\vspace*{1cm}\noindent
Here you can tell us, how thankful you are for this amazing template ;)
\addcontentsline{toc}{chapter}{Artificial Intelligence Usage Disclaimer}
\begin{center}\Large\bfseries Artificial Intelligence Usage Disclaimer\end{center}\vspace*{1cm}\noindent
During the creation of this thesis, an LLM-based Artificial Intelligence tool was used for stylistic and grammatical revision of the author's own work.

View File

@@ -1,6 +1,6 @@
{ pkgs, ... }:
let
native_dependencies = with pkgs.python312Packages; [
native_dependencies = with pkgs.python311Packages; [
torch-bin
torchvision-bin
aggdraw # for visualtorch
@@ -16,7 +16,7 @@ in
packages = native_dependencies ++ tools;
languages.python = {
enable = true;
package = pkgs.python312;
package = pkgs.python311;
uv = {
enable = true;
sync.enable = true;

View File

@@ -12,7 +12,7 @@ import numpy as np
import polars as pl
# CHANGE THIS IMPORT IF YOUR LOADER MODULE IS NAMED DIFFERENTLY
from plot_scripts.load_results import load_pretraining_results_dataframe
from load_results import load_pretraining_results_dataframe
# ----------------------------
# Config
@@ -78,8 +78,8 @@ def build_arch_curves_from_df(
"overall": (dims, means, stds),
} }
"""
if "split" not in df.columns:
raise ValueError("Expected 'split' column in AE dataframe.")
# if "split" not in df.columns:
# raise ValueError("Expected 'split' column in AE dataframe.")
if "scores" not in df.columns:
raise ValueError("Expected 'scores' column in AE dataframe.")
if "network" not in df.columns or "latent_dim" not in df.columns:
@@ -88,7 +88,7 @@ def build_arch_curves_from_df(
raise ValueError(f"Expected '{label_field}' column in AE dataframe.")
# Keep only test split
df = df.filter(pl.col("split") == "test")
# df = df.filter(pl.col("split") == "test")
groups: dict[tuple[str, int], dict[str, list[float]]] = {}
@@ -201,7 +201,7 @@ def plot_multi_loss_curve(arch_results, title, output_path, colors=None):
plt.xlabel("Latent Dimensionality")
plt.ylabel("Test Loss")
plt.title(title)
# plt.title(title)
plt.legend()
plt.grid(True, alpha=0.3)
plt.xticks(all_dims)

View File

@@ -171,28 +171,28 @@ def plot_combined_timeline(
range(num_bins), near_sensor_binned, color=color, linestyle="--", alpha=0.6
)
# Add vertical lines for manually labeled frames if available
if all_paths[i].with_suffix(".npy").name in manually_labeled_anomaly_frames:
begin_frame, end_frame = manually_labeled_anomaly_frames[
all_paths[i].with_suffix(".npy").name
]
# Convert frame numbers to normalized timeline positions
begin_pos = (begin_frame / exp_len) * (num_bins - 1)
end_pos = (end_frame / exp_len) * (num_bins - 1)
# # Add vertical lines for manually labeled frames if available
# if all_paths[i].with_suffix(".npy").name in manually_labeled_anomaly_frames:
# begin_frame, end_frame = manually_labeled_anomaly_frames[
# all_paths[i].with_suffix(".npy").name
# ]
# # Convert frame numbers to normalized timeline positions
# begin_pos = (begin_frame / exp_len) * (num_bins - 1)
# end_pos = (end_frame / exp_len) * (num_bins - 1)
# Add vertical lines with matching color and loose dotting
ax1.axvline(
x=begin_pos,
color=color,
linestyle=":",
alpha=0.6,
)
ax1.axvline(
x=end_pos,
color=color,
linestyle=":",
alpha=0.6,
)
# # Add vertical lines with matching color and loose dotting
# ax1.axvline(
# x=begin_pos,
# color=color,
# linestyle=":",
# alpha=0.6,
# )
# ax1.axvline(
# x=end_pos,
# color=color,
# linestyle=":",
# alpha=0.6,
# )
# Customize axes
ax1.set_xlabel("Normalized Timeline")
@@ -202,7 +202,7 @@ def plot_combined_timeline(
ax1.set_ylabel("Missing Points (%)")
ax2.set_ylabel("Points with <0.5m Range (%)")
plt.title(title)
# plt.title(title)
# Create legends without fixed positions
# First get all lines and labels for experiments
@@ -221,7 +221,8 @@ def plot_combined_timeline(
)
# Create single legend in top right corner with consistent margins
fig.legend(all_handles, all_labels, loc="upper right", borderaxespad=4.8)
# fig.legend(all_handles, all_labels, loc="upper right", borderaxespad=2.8)
fig.legend(all_handles, all_labels, bbox_to_anchor=(0.95, 0.99))
plt.grid(True, alpha=0.3)

View File

@@ -122,8 +122,8 @@ def plot_data_points_pie(normal_experiment_frames, anomaly_experiment_frames):
# prepare data for pie chart
labels = [
"Normal Lidar Frames\nNon-Degraded Pointclouds",
"Anomalous Lidar Frames\nDegraded Pointclouds",
"Normal Lidar Frames\nNon-Degraded Point Clouds",
"Anomalous Lidar Frames\nDegraded Point Clouds",
]
sizes = [total_normal_frames, total_anomaly_frames]
explode = (0.1, 0) # explode the normal slice
@@ -150,9 +150,9 @@ def plot_data_points_pie(normal_experiment_frames, anomaly_experiment_frames):
va="center",
color="black",
)
plt.title(
"Distribution of Normal and Anomalous\nPointclouds in all Experiments (Lidar Frames)"
)
# plt.title(
# "Distribution of Normal and Anomalous\nPointclouds in all Experiments (Lidar Frames)"
# )
plt.tight_layout()
# save the plot

View File

@@ -5,7 +5,6 @@ from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
from pointcloudset import Dataset
# define data path containing the bag files
all_data_path = Path("/home/fedex/mt/data/subter")
@@ -82,7 +81,7 @@ def plot_data_points(normal_experiment_paths, anomaly_experiment_paths, title):
plt.figure(figsize=(10, 5))
plt.hist(missing_points_normal, bins=100, alpha=0.5, label="Normal Experiments")
plt.hist(missing_points_anomaly, bins=100, alpha=0.5, label="Anomaly Experiments")
plt.title(title)
# plt.title(title)
plt.xlabel("Number of Missing Points")
plt.ylabel("Number of Pointclouds")
plt.legend()
@@ -109,7 +108,7 @@ def plot_data_points(normal_experiment_paths, anomaly_experiment_paths, title):
label="Anomaly Experiments",
orientation="horizontal",
)
plt.title(title)
# plt.title(title)
plt.xlabel("Number of Pointclouds")
plt.ylabel("Number of Missing Points")
plt.legend()
@@ -142,7 +141,7 @@ def plot_data_points(normal_experiment_paths, anomaly_experiment_paths, title):
label="Anomaly Experiments",
density=True,
)
plt.title(title)
# plt.title(title)
plt.xlabel("Number of Missing Points")
plt.ylabel("Density")
plt.legend()
@@ -169,7 +168,7 @@ def plot_data_points(normal_experiment_paths, anomaly_experiment_paths, title):
label="Anomaly Experiments (With Artifical Smoke)",
density=True,
)
plt.title(title)
# plt.title(title)
plt.xlabel("Percentage of Missing Lidar Measurements")
plt.ylabel("Density")
# display the x axis as percentages
@@ -210,7 +209,7 @@ def plot_data_points(normal_experiment_paths, anomaly_experiment_paths, title):
alpha=0.5,
label="Anomaly Experiments",
)
plt.title(title)
# plt.title(title)
plt.xlabel("Number of Missing Points")
plt.ylabel("Normalized Density")
plt.legend()

View File

@@ -5,7 +5,6 @@ from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
from pointcloudset import Dataset
# define data path containing the bag files
all_data_path = Path("/home/fedex/mt/data/subter")
@@ -164,7 +163,7 @@ def plot_data_points(normal_experiment_paths, anomaly_experiment_paths, title):
plt.gca().set_yticklabels(
["{:.0f}%".format(y * 100) for y in plt.gca().get_yticks()]
)
plt.title("Particles Closer than 0.5m to the Sensor")
# plt.title("Particles Closer than 0.5m to the Sensor")
plt.ylabel("Percentage of measurements closer than 0.5m")
plt.tight_layout()
plt.savefig(output_datetime_path / f"particles_near_sensor_boxplot_{rt}.png")
@@ -186,7 +185,7 @@ def plot_data_points(normal_experiment_paths, anomaly_experiment_paths, title):
plt.gca().set_yticklabels(
["{:.0f}%".format(y * 100) for y in plt.gca().get_yticks()]
)
plt.title("Particles Closer than 0.5m to the Sensor")
# plt.title("Particles Closer than 0.5m to the Sensor")
plt.ylabel("Percentage of measurements closer than 0.5m")
plt.ylim(0, 0.05)
plt.tight_layout()

View File

@@ -112,18 +112,27 @@ cmap = get_colormap_with_special_missing_color(
args.colormap, args.missing_data_color, args.reverse_colormap
)
# --- Create a figure with 2 vertical subplots ---
# --- Create a figure with 2 vertical subplots and move titles to the left ---
fig, (ax1, ax2) = plt.subplots(nrows=2, ncols=1, figsize=(10, 5))
for ax, frame, title in zip(
# leave extra left margin for the left-side labels
fig.subplots_adjust(left=0.14, hspace=0.05)
for ax, frame, label in zip(
(ax1, ax2),
(frame1, frame2),
(
"Projection of Lidar Frame without Degradation",
"Projection of Lidar Frame with Degradation (Artifical Smoke)",
),
("(a)", "(b)"),
):
im = ax.imshow(frame, cmap=cmap, aspect="auto", vmin=global_vmin, vmax=global_vmax)
ax.set_title(title)
# place the "title" to the left, vertically centered relative to the axes
ax.text(
-0.02, # negative x places text left of the axes (in axes coordinates)
0.5,
label,
transform=ax.transAxes,
va="center",
ha="right",
fontsize=12,
)
ax.axis("off")
# Adjust layout to fit margins for a paper

View File

@@ -260,11 +260,11 @@ def baseline_transform(clean: np.ndarray, other: np.ndarray, mode: str):
def pick_method_series(gdf: pl.DataFrame, label: str) -> Optional[np.ndarray]:
if label == "DeepSAD (LeNet)":
if label == "DeepSAD LeNet":
sel = gdf.filter(
(pl.col("network") == "subter_LeNet") & (pl.col("model") == "deepsad")
)
elif label == "DeepSAD (efficient)":
elif label == "DeepSAD Efficient":
sel = gdf.filter(
(pl.col("network") == "subter_efficient") & (pl.col("model") == "deepsad")
)
@@ -311,8 +311,8 @@ def compare_two_experiments_progress(
include_stats: bool = True,
):
methods = [
"DeepSAD (LeNet)",
"DeepSAD (efficient)",
"DeepSAD LeNet",
"DeepSAD Efficient",
"OCSVM",
"Isolation Forest",
]
@@ -392,8 +392,8 @@ def compare_two_experiments_progress(
axes = axes.ravel()
method_to_axidx = {
"DeepSAD (LeNet)": 0,
"DeepSAD (efficient)": 1,
"DeepSAD LeNet": 0,
"DeepSAD Efficient": 1,
"OCSVM": 2,
"Isolation Forest": 3,
}
@@ -404,6 +404,8 @@ def compare_two_experiments_progress(
if not stats_available:
print("[WARN] One or both stats missing. Subplots will include methods only.")
letters = ["a", "b", "c", "d"]
for label, axidx in method_to_axidx.items():
ax = axes[axidx]
yc = curves_clean.get(label)
@@ -412,7 +414,7 @@ def compare_two_experiments_progress(
ax.text(
0.5, 0.5, "No data", ha="center", va="center", transform=ax.transAxes
)
ax.set_title(label)
ax.set_title(f"({letters[axidx]}) {label}")
ax.grid(True, alpha=0.3)
continue
@@ -435,6 +437,7 @@ def compare_two_experiments_progress(
)
ax.set_ylabel(y_label)
ax.set_title(label)
ax.set_title(f"({letters[axidx]}) {label}")
ax.grid(True, alpha=0.3)
# Right axis #1 (closest to plot): Missing points (%)
@@ -550,11 +553,11 @@ def compare_two_experiments_progress(
for ax in axes:
ax.set_xlabel("Progress through experiment (%)")
fig.suptitle(
f"AD Method vs Stats Inference — progress-normalized\n"
f"Transform: z-score normalized to non-degraded experiment | EMA(α={EMA_ALPHA_METHODS})",
fontsize=14,
)
# fig.suptitle(
# f"AD Method vs Stats Inference — progress-normalized\n"
# f"Transform: z-score normalized to non-degraded experiment | EMA(α={EMA_ALPHA_METHODS})",
# fontsize=14,
# )
fig.tight_layout(rect=[0, 0, 1, 0.99])
out_name = (

View File

@@ -161,7 +161,7 @@ def _ensure_dim_axes(fig_title: str):
fig, axes = plt.subplots(
nrows=4, ncols=2, figsize=(12, 16), constrained_layout=True
)
fig.suptitle(fig_title, fontsize=14)
# fig.suptitle(fig_title, fontsize=14)
axes = axes.ravel()
return fig, axes
@@ -213,11 +213,13 @@ def plot_grid_from_df(
legend_labels = []
have_legend = False
letters = ["a", "b", "c", "d", "e", "f", "g", "h"]
for i, dim in enumerate(LATENT_DIMS):
if i >= 7:
break # last slot reserved for legend
ax = axes[i]
ax.set_title(f"Latent Dim. = {dim}")
ax.set_title(f"({letters[i]}) Latent Dim. = {dim}")
ax.grid(True, alpha=0.3)
if kind == "roc":

View File

@@ -260,9 +260,9 @@ def make_figures_for_dim(
fig_roc, axes = plt.subplots(
nrows=2, ncols=1, figsize=(7, 10), constrained_layout=True
)
fig_roc.suptitle(
f"ROC — {EVALS_LABELS[eval_type]} — Latent Dim.={latent_dim}", fontsize=14
)
# fig_roc.suptitle(
# f"ROC — {EVALS_LABELS[eval_type]} — Latent Dim.={latent_dim}", fontsize=14
# )
_plot_panel(
axes[0],
@@ -272,7 +272,7 @@ def make_figures_for_dim(
latent_dim=latent_dim,
kind="roc",
)
axes[0].set_title("DeepSAD (LeNet) + Baselines")
axes[0].set_title("(a) DeepSAD (LeNet) + Baselines")
_plot_panel(
axes[1],
@@ -282,7 +282,7 @@ def make_figures_for_dim(
latent_dim=latent_dim,
kind="roc",
)
axes[1].set_title("DeepSAD (Efficient) + Baselines")
axes[1].set_title("(b) DeepSAD (Efficient) + Baselines")
out_roc = out_dir / f"roc_{latent_dim}_{eval_type}.png"
fig_roc.savefig(out_roc, dpi=150, bbox_inches="tight")
@@ -292,9 +292,9 @@ def make_figures_for_dim(
fig_prc, axes = plt.subplots(
nrows=2, ncols=1, figsize=(7, 10), constrained_layout=True
)
fig_prc.suptitle(
f"PRC — {EVALS_LABELS[eval_type]} — Latent Dim.={latent_dim}", fontsize=14
)
# fig_prc.suptitle(
# f"PRC — {EVALS_LABELS[eval_type]} — Latent Dim.={latent_dim}", fontsize=14
# )
_plot_panel(
axes[0],
@@ -304,7 +304,7 @@ def make_figures_for_dim(
latent_dim=latent_dim,
kind="prc",
)
axes[0].set_title("DeepSAD (LeNet) + Baselines")
axes[0].set_title("(a)")
_plot_panel(
axes[1],
@@ -314,7 +314,7 @@ def make_figures_for_dim(
latent_dim=latent_dim,
kind="prc",
)
axes[1].set_title("DeepSAD (Efficient) + Baselines")
axes[1].set_title("(b)")
out_prc = out_dir / f"prc_{latent_dim}_{eval_type}.png"
fig_prc.savefig(out_prc, dpi=150, bbox_inches="tight")

View File

@@ -6,6 +6,8 @@ readme = "README.md"
requires-python = ">=3.11.9"
dependencies = [
"pandas>=2.3.2",
"pointcloudset>=0.11.0",
"polars>=1.33.0",
"pyarrow>=21.0.0",
"tabulate>=0.9.0",
]

1901
tools/uv.lock generated

File diff suppressed because it is too large Load Diff