Compare commits

...

52 Commits

Author SHA1 Message Date
Jan Kowalczyk
7b5accb6c5 fixed plots 2025-10-21 19:04:19 +02:00
Jan Kowalczyk
8f983b890f formatting 2025-10-19 17:39:42 +02:00
Jan Kowalczyk
6cd2c7fbef abstract lidar capitalization 2025-10-19 17:34:38 +02:00
Jan Kowalczyk
62c424cd54 grammarly done 2025-10-19 17:29:31 +02:00
Jan Kowalczyk
bd9171f68e grammarly data chapter 2025-10-19 16:46:29 +02:00
Jan Kowalczyk
efdc33035b grammarly part 1 done 2025-10-19 16:27:22 +02:00
Jan Kowalczyk
f2c8fe241d cleanup 2025-10-18 18:27:13 +02:00
Jan Kowalczyk
ece887860b z-score rework 2025-10-18 18:01:41 +02:00
Jan Kowalczyk
c3830db913 metrics section draft 2025-10-18 17:23:18 +02:00
Jan Kowalczyk
3d21171a40 raw metrics section 2025-10-18 17:02:22 +02:00
Jan Kowalczyk
5aca00ad67 better grammarly prep 2025-10-18 12:47:16 +02:00
Jan Kowalczyk
374420727b cleanup for raw txt (grammar check) 2025-10-18 12:19:26 +02:00
Jan Kowalczyk
8697c07c0f reworked baselines 2025-10-18 11:28:12 +02:00
Jan Kowalczyk
5287f2c557 grammarly deepsad chap 2025-10-12 17:26:07 +02:00
Jan Kowalczyk
b7faf6e1b6 grammarly wip (bg chap done) 2025-10-12 16:56:49 +02:00
Jan Kowalczyk
0354ad37e1 grammarly intro 2025-10-12 16:03:27 +02:00
Jan Kowalczyk
32ab4e6a11 fixed all reasonable warnings 2025-10-12 15:45:13 +02:00
Jan Kowalczyk
055d403dfb aspell start 2025-10-11 18:09:18 +02:00
Jan Kowalczyk
28b6eba094 broken pdf 2025-10-11 16:37:34 +02:00
Jan Kowalczyk
436a25df11 broken pdf 2025-10-11 16:37:19 +02:00
Jan Kowalczyk
5d0610a875 feedback wip 2025-10-11 16:37:10 +02:00
Jan Kowalczyk
545b65d3d5 feedback WIP 2025-10-11 15:58:44 +02:00
Jan Kowalczyk
8db244901e feedback wip 2025-10-11 15:21:53 +02:00
Jan Kowalczyk
72afe9ebdc nicer looking abstract 2025-10-11 13:38:39 +02:00
Jan Kowalczyk
81c1e5b7af added abstract 2025-09-29 19:00:58 +02:00
Jan Kowalczyk
6040f5f144 draft 2025-09-29 18:54:35 +02:00
Jan Kowalczyk
d5f5a09d6f wip 2025-09-29 18:20:30 +02:00
Jan Kowalczyk
a6f5ecaba2 wip 2025-09-29 11:02:07 +02:00
Jan Kowalczyk
1f3e607e8d wip 2025-09-29 10:40:26 +02:00
Jan Kowalczyk
3bf457f2cf wip 2025-09-29 10:17:36 +02:00
Jan Kowalczyk
3eb7e662b0 wip 2025-09-28 20:07:05 +02:00
Jan Kowalczyk
2411f8b1a7 shorter reinforcement bg 2025-09-28 19:17:47 +02:00
Jan Kowalczyk
fe45de00ca hyperparam section & setup rework 2025-09-28 18:58:03 +02:00
Jan Kowalczyk
1e71600102 reworked deepsad procedure diagram 2025-09-28 16:12:24 +02:00
Jan Kowalczyk
d93f1a52a9 reworked lidar figure caption 2025-09-28 14:47:43 +02:00
Jan Kowalczyk
e34a374adc wip overall small changes to figures 2025-09-28 14:35:10 +02:00
Jan Kowalczyk
f36477ed9b updated captions and removed all comments 2025-09-28 13:20:39 +02:00
Jan Kowalczyk
52dabf0f89 wip, replaced bg figures 2025-09-28 12:50:58 +02:00
Jan Kowalczyk
e00d1a33e3 reworked results chpt 2025-09-27 19:01:59 +02:00
Jan Kowalczyk
c270783225 wip 2025-09-27 16:34:52 +02:00
Jan Kowalczyk
cfb77dccab wip 2025-09-25 15:29:52 +02:00
Jan Kowalczyk
4c8df5cae0 wip results 2025-09-22 15:39:46 +02:00
Jan Kowalczyk
f93bbaeec1 wip conclusion 2025-09-22 14:13:03 +02:00
Jan Kowalczyk
9ec73c5992 results inference discussion 2025-09-22 09:41:58 +02:00
Jan Kowalczyk
8e7c210872 wip 2025-09-22 08:15:54 +02:00
Jan Kowalczyk
a20a4a0832 results ae section 2025-09-18 11:58:28 +02:00
Jan Kowalczyk
8f36bd2e07 new complete auc table 2025-09-17 11:43:38 +02:00
Jan Kowalczyk
936d2ecb6e correct auc table scrip 2025-09-17 11:43:26 +02:00
Jan Kowalczyk
95867bde7a table plot 2025-09-17 11:07:07 +02:00
Jan Kowalczyk
cc5a8d25d3 inference plots, results structure wip 2025-09-15 14:25:15 +02:00
Jan Kowalczyk
e20c2235ed wip 2025-09-15 11:21:40 +02:00
Jan Kowalczyk
e7624d2786 wip inference 2025-09-15 11:21:30 +02:00
76 changed files with 7284 additions and 1390 deletions

View File

@@ -261,6 +261,80 @@ class IsoForest(object):
logger.info("Test Time: {:.3f}s".format(self.results["test_time"]))
logger.info("Finished testing.")
def inference(
self,
dataset: BaseADDataset,
device: str = "cpu",
n_jobs_dataloader: int = 0,
batch_size: int = 32,
):
"""Perform inference on the dataset using the trained Isolation Forest model."""
logger = logging.getLogger()
# Get inference data loader
_, _, inference_loader = dataset.loaders(
batch_size=batch_size, num_workers=n_jobs_dataloader
)
# Get data from loader
X = ()
idxs = []
file_ids = []
frame_ids = []
logger.info("Starting inference...")
start_time = time.time()
for data in inference_loader:
inputs, idx, (file_id, frame_id) = data
inputs = inputs.to(device)
if self.hybrid:
inputs = self.ae_net.encoder(inputs)
X_batch = inputs.view(inputs.size(0), -1)
X += (X_batch.cpu().data.numpy(),)
# Store indices and metadata
idxs.extend(idx.cpu().data.numpy().tolist())
file_ids.extend(file_id.cpu().data.numpy().tolist())
frame_ids.extend(frame_id.cpu().data.numpy().tolist())
X = np.concatenate(X)
# Get anomaly scores
scores = (-1.0) * self.model.decision_function(X)
scores = scores.flatten()
# Store inference results
self.inference_time = time.time() - start_time
self.inference_indices = np.array(idxs)
self.inference_file_ids = np.array(file_ids)
self.inference_frame_ids = np.array(frame_ids)
# Create index mapping similar to DeepSAD trainer
self.inference_index_mapping = {
"indices": self.inference_indices,
"file_ids": self.inference_file_ids,
"frame_ids": self.inference_frame_ids,
}
# Log inference statistics
logger.info(f"Number of inference samples: {len(self.inference_indices)}")
logger.info(
f"Number of unique files: {len(np.unique(self.inference_file_ids))}"
)
logger.info("Inference Time: {:.3f}s".format(self.inference_time))
logger.info(
"Score statistics: "
f"min={scores.min():.3f}, "
f"max={scores.max():.3f}, "
f"mean={scores.mean():.3f}, "
f"std={scores.std():.3f}"
)
logger.info("Finished inference.")
return scores
def load_ae(self, dataset_name, model_path):
"""Load pretrained autoencoder from model_path for feature extraction in a hybrid Isolation Forest model."""

View File

@@ -453,6 +453,80 @@ class OCSVM(object):
logger.info("Test Time: {:.3f}s".format(self.results["test_time"]))
logger.info("Finished testing.")
def inference(
self,
dataset: BaseADDataset,
device: str = "cpu",
n_jobs_dataloader: int = 0,
batch_size: int = 32,
):
"""Perform inference on the dataset using the trained OC-SVM model."""
logger = logging.getLogger()
# Get inference data loader
_, _, inference_loader = dataset.loaders(
batch_size=batch_size, num_workers=n_jobs_dataloader
)
# Get data from loader
X = ()
idxs = []
file_ids = []
frame_ids = []
logger.info("Starting inference...")
start_time = time.time()
for data in inference_loader:
inputs, idx, (file_id, frame_id) = data
inputs = inputs.to(device)
if self.hybrid:
inputs = self.ae_net.encoder(inputs)
X_batch = inputs.view(inputs.size(0), -1)
X += (X_batch.cpu().data.numpy(),)
# Store indices and metadata
idxs.extend(idx.cpu().data.numpy().tolist())
file_ids.extend(file_id.cpu().data.numpy().tolist())
frame_ids.extend(frame_id.cpu().data.numpy().tolist())
X = np.concatenate(X)
# Get anomaly scores
scores = (-1.0) * self.model.decision_function(X)
scores = scores.flatten()
# Store inference results
self.inference_time = time.time() - start_time
self.inference_indices = np.array(idxs)
self.inference_file_ids = np.array(file_ids)
self.inference_frame_ids = np.array(frame_ids)
# Create index mapping similar to DeepSAD trainer
self.inference_index_mapping = {
"indices": self.inference_indices,
"file_ids": self.inference_file_ids,
"frame_ids": self.inference_frame_ids,
}
# Log inference statistics
logger.info(f"Number of inference samples: {len(self.inference_indices)}")
logger.info(
f"Number of unique files: {len(np.unique(self.inference_file_ids))}"
)
logger.info("Inference Time: {:.3f}s".format(self.inference_time))
logger.info(
"Score statistics: "
f"min={scores.min():.3f}, "
f"max={scores.max():.3f}, "
f"mean={scores.mean():.3f}, "
f"std={scores.std():.3f}"
)
logger.info("Finished inference.")
return scores
def load_ae(self, model_path, net_name, device="cpu"):
"""Load pretrained autoencoder from model_path for feature extraction in a hybrid OC-SVM model."""

View File

@@ -338,6 +338,8 @@ class SubTerInference(VisionDataset):
self.frame_ids = np.arange(self.data.shape[0], dtype=np.int32)
self.file_names = {0: experiment_file.name}
self.transform = transform if transform else transforms.ToTensor()
def __len__(self):
return len(self.data)

View File

@@ -638,57 +638,185 @@ def main(
cfg.save_config(export_json=xp_path + "/config.json")
elif action == "infer":
# Inference uses a deterministic, non-shuffled loader to preserve temporal order
dataset = load_dataset(
dataset_name,
cfg.settings["dataset_name"],
data_path,
normal_class,
known_outlier_class,
n_known_outlier_classes,
ratio_known_normal,
ratio_known_outlier,
ratio_pollution,
cfg.settings["normal_class"],
cfg.settings["known_outlier_class"],
cfg.settings["n_known_outlier_classes"],
cfg.settings["ratio_known_normal"],
cfg.settings["ratio_known_outlier"],
cfg.settings["ratio_pollution"],
random_state=np.random.RandomState(cfg.settings["seed"]),
k_fold_num=False,
inference=True,
)
# Log random sample of known anomaly classes if more than 1 class
if n_known_outlier_classes > 1:
logger.info("Known anomaly classes: %s" % (dataset.known_outlier_classes,))
# Initialize DeepSAD model and set neural network phi
deepSAD = DeepSAD(latent_space_dim, cfg.settings["eta"])
deepSAD.set_network(net_name)
# If specified, load Deep SAD model (center c, network weights, and possibly autoencoder weights)
if not load_model:
# --- Expect a model DIRECTORY (aligned with 'retest') ---
if (
(not load_model)
or (not Path(load_model).exists())
or (not Path(load_model).is_dir())
):
logger.error(
"For inference mode a model has to be loaded! Pass the --load_model option with the model path!"
"For inference mode a model directory has to be loaded! "
"Pass the --load_model option with the model directory path!"
)
return
load_model = Path(load_model)
# Resolve expected model artifacts (single-model / no k-fold suffixes)
deepsad_model_path = load_model / "model_deepsad.tar"
ae_model_path = load_model / "model_ae.tar"
ocsvm_model_path = load_model / "model_ocsvm.pkl"
isoforest_model_path = load_model / "model_isoforest.pkl"
# Sanity check model files exist
model_paths = [
deepsad_model_path,
ae_model_path,
ocsvm_model_path,
isoforest_model_path,
]
missing = [p.name for p in model_paths if not p.exists() or not p.is_file()]
if missing:
logger.error(
"The following model files do not exist in the provided model directory: "
+ ", ".join(missing)
)
return
deepSAD.load_model(model_path=load_model, load_ae=True, map_location=device)
logger.info("Loading model from %s." % load_model)
# Prepare output paths
inf_dir = Path(xp_path) / "inference"
inf_dir.mkdir(parents=True, exist_ok=True)
base_stem = Path(Path(dataset.root).stem) # keep your previous naming
# DeepSAD outputs (keep legacy filenames for backward compatibility)
deepsad_scores_path = inf_dir / Path(
base_stem.stem + "_deepsad_scores"
).with_suffix(".npy")
deepsad_outputs_path = inf_dir / Path(base_stem.stem + "_outputs").with_suffix(
".npy"
)
# Baselines
ocsvm_scores_path = inf_dir / Path(
base_stem.stem + "_ocsvm_scores"
).with_suffix(".npy")
isoforest_scores_path = inf_dir / Path(
base_stem.stem + "_isoforest_scores"
).with_suffix(".npy")
inference_results, all_outputs = deepSAD.inference(
dataset, device=device, n_jobs_dataloader=n_jobs_dataloader
)
inference_results_path = (
Path(xp_path)
/ "inference"
/ Path(Path(dataset.root).stem).with_suffix(".npy")
)
inference_outputs_path = (
Path(xp_path)
/ "inference"
/ Path(Path(dataset.root).stem + "_outputs").with_suffix(".npy")
# Common loader settings
_n_jobs = (
n_jobs_dataloader
if "n_jobs_dataloader" in locals()
else cfg.settings.get("n_jobs_dataloader", 0)
)
inference_results_path.parent.mkdir(parents=True, exist_ok=True)
np.save(inference_results_path, inference_results, fix_imports=False)
np.save(inference_outputs_path, all_outputs, fix_imports=False)
# ----------------- DeepSAD -----------------
deepSAD = DeepSAD(cfg.settings["latent_space_dim"], cfg.settings["eta"])
deepSAD.set_network(cfg.settings["net_name"])
deepSAD.load_model(
model_path=deepsad_model_path, load_ae=True, map_location=device
)
logger.info("Loaded DeepSAD model from %s.", deepsad_model_path)
deepsad_scores, deepsad_all_outputs = deepSAD.inference(
dataset, device=device, n_jobs_dataloader=_n_jobs
)
np.save(deepsad_scores_path, deepsad_scores)
# np.save(deepsad_outputs_path, deepsad_all_outputs)
logger.info(
f"Inference: median={np.median(inference_results)} mean={np.mean(inference_results)} min={inference_results.min()} max={inference_results.max()}"
"DeepSAD inference: median=%.6f mean=%.6f min=%.6f max=%.6f",
float(np.median(deepsad_scores)),
float(np.mean(deepsad_scores)),
float(np.min(deepsad_scores)),
float(np.max(deepsad_scores)),
)
# ----------------- OCSVM (hybrid) -----------------
ocsvm_scores = None
ocsvm = OCSVM(
kernel=cfg.settings["ocsvm_kernel"],
nu=cfg.settings["ocsvm_nu"],
hybrid=True,
latent_space_dim=cfg.settings["latent_space_dim"],
)
# load AE to build the feature extractor for hybrid OCSVM
ocsvm.load_ae(
net_name=cfg.settings["net_name"],
model_path=ae_model_path,
device=device,
)
ocsvm.load_model(import_path=ocsvm_model_path)
ocsvm_scores = ocsvm.inference(
dataset, device=device, n_jobs_dataloader=_n_jobs, batch_size=32
)
if ocsvm_scores is not None:
np.save(ocsvm_scores_path, ocsvm_scores)
logger.info(
"OCSVM inference: median=%.6f mean=%.6f min=%.6f max=%.6f",
float(np.median(ocsvm_scores)),
float(np.mean(ocsvm_scores)),
float(np.min(ocsvm_scores)),
float(np.max(ocsvm_scores)),
)
else:
logger.warning("OCSVM scores could not be determined; no array saved.")
# ----------------- Isolation Forest -----------------
isoforest_scores = None
Isoforest = IsoForest(
hybrid=False,
n_estimators=cfg.settings["isoforest_n_estimators"],
max_samples=cfg.settings["isoforest_max_samples"],
contamination=cfg.settings["isoforest_contamination"],
n_jobs=cfg.settings["isoforest_n_jobs_model"],
seed=cfg.settings["seed"],
)
Isoforest.load_model(import_path=isoforest_model_path, device=device)
isoforest_scores = Isoforest.inference(
dataset, device=device, n_jobs_dataloader=_n_jobs
)
if isoforest_scores is not None:
np.save(isoforest_scores_path, isoforest_scores)
logger.info(
"IsolationForest inference: median=%.6f mean=%.6f min=%.6f max=%.6f",
float(np.median(isoforest_scores)),
float(np.mean(isoforest_scores)),
float(np.min(isoforest_scores)),
float(np.max(isoforest_scores)),
)
else:
logger.warning(
"Isolation Forest scores could not be determined; no array saved."
)
# Final summary (DeepSAD always runs; baselines are best-effort)
logger.info(
"Inference complete. Saved arrays to %s:\n"
" DeepSAD scores: %s\n"
" DeepSAD outputs: %s\n"
" OCSVM scores: %s\n"
" IsoForest scores: %s",
inf_dir,
deepsad_scores_path.name,
deepsad_outputs_path.name,
ocsvm_scores_path.name if ocsvm_scores is not None else "(not saved)",
isoforest_scores_path.name
if isoforest_scores is not None
else "(not saved)",
)
elif action == "ae_elbow_test":
# Load data once
dataset = load_dataset(

View File

@@ -177,6 +177,8 @@ class DeepSADTrainer(BaseTrainer):
batch_size=self.batch_size, num_workers=self.n_jobs_dataloader
)
latent_dim = net.rep_dim
# Set device for network
net = net.to(self.device)
@@ -184,7 +186,9 @@ class DeepSADTrainer(BaseTrainer):
logger.info("Starting inference...")
n_batches = 0
start_time = time.time()
all_outputs = np.zeros((len(inference_loader.dataset), 1024), dtype=np.float32)
all_outputs = np.zeros(
(len(inference_loader.dataset), latent_dim), dtype=np.float32
)
scores = []
net.eval()

File diff suppressed because it is too large Load Diff

Binary file not shown.

File diff suppressed because it is too large Load Diff

View File

@@ -24,15 +24,12 @@
not used other than the declared sources/resources, and that I have
explicitly indicated all material which has been quoted either
literally or by content from the sources used.
\ifthenelse{\equal{\ThesisTitle}{master's thesis} \or
\equal{\ThesisTitle}{diploma thesis} \or
\equal{\ThesisTitle}{doctoral thesis}}
{The text document uploaded to TUGRAZonline is identical to the present \ThesisTitle.}{\reminder{TODO: fix \textbackslash ThesisTitle}}
The text document uploaded to TUGRAZonline is identical to the present \ThesisTitle.
\par\vspace*{4cm}
\centerline{
\begin{tabular}{m{1.5cm}cm{1.5cm}m{3cm}m{1.5cm}cm{1.5cm}}
\cline{1-3} \cline{5-7}
& date & & & & (signature) &\\
\end{tabular}}
\begin{tabular}{m{1.5cm}cm{1.5cm}m{3cm}m{1.5cm}cm{1.5cm}}
\cline{1-3} \cline{5-7}
& date & & & & (signature) & \\
\end{tabular}}

View File

@@ -55,7 +55,7 @@
\makeatother
% header and footer texts
\clearscrheadfoot % clear everything
\clearpairofpagestyles % clear everything
\KOMAoptions{headlines=1} % header needs two lines here
% [plain]{actual (scrheadings)}
\ihead[]{}%
@@ -141,46 +141,46 @@
\ifthenelse{\equal{\DocumentLanguage}{en}}{\renewcaptionname{USenglish}{\figurename}{Figure}}{}%
\ifthenelse{\equal{\DocumentLanguage}{de}}{\renewcaptionname{ngerman}{\figurename}{Abbildung}}{}%
\captionsetup{%
format=hang,% hanging captions
labelformat=simple,% just name and number
labelsep=colon,% colon and space
justification=justified,%
singlelinecheck=true,% center single line captions
font={footnotesize,it},% font style of label and text
margin=0.025\textwidth,% margin left/right of the caption (to textwidth)
indention=0pt,% no further indention (just hanging)
hangindent=0pt,% no further indention (just hanging)}
aboveskip=8pt,% same spacing above and...
belowskip=8pt}% ...below the float (this way tables shouln't be a problem, either)
format=hang,% hanging captions
labelformat=simple,% just name and number
labelsep=colon,% colon and space
justification=justified,%
singlelinecheck=true,% center single line captions
font={footnotesize,it},% font style of label and text
margin=0.025\textwidth,% margin left/right of the caption (to textwidth)
indention=0pt,% no further indention (just hanging)
hangindent=0pt,% no further indention (just hanging)}
aboveskip=8pt,% same spacing above and...
belowskip=8pt}% ...below the float (this way tables shouln't be a problem, either)
% code listings
\lstloadlanguages{VHDL,Matlab,[ANSI]C,Java,[LaTeX]TeX}
\lstset{%
% general
breaklines=true,% automatically break long lines
breakatwhitespace=true,% break only at white spaces
breakindent=1cm,% additional indentation for broken lines
% positioning
linewidth=\linewidth,% set width of whole thing to \linewidth
xleftmargin=0.1\linewidth,%
% frame and caption
frame=tlrb,% frame the entire thing
framexleftmargin=1cm,% to include linenumbering into frame
captionpos=b,% caption at bottom
% format parameters
basicstyle=\ttfamily\tiny,% small true type font
keywordstyle=\color{black},%
identifierstyle=\color{black},%
commentstyle=\color[rgb]{0.45,0.45,0.45},% gray
stringstyle=\color{black},%
showstringspaces=false,%
showtabs=false,%
tabsize=2,%
% linenumbers
numberstyle=\tiny,%
numbers=left,%
numbersep=3mm,%
firstnumber=1,%
stepnumber=1,% number every line (0: off)
numberblanklines=true%
% general
breaklines=true,% automatically break long lines
breakatwhitespace=true,% break only at white spaces
breakindent=1cm,% additional indentation for broken lines
% positioning
linewidth=\linewidth,% set width of whole thing to \linewidth
xleftmargin=0.1\linewidth,%
% frame and caption
frame=tlrb,% frame the entire thing
framexleftmargin=1cm,% to include linenumbering into frame
captionpos=b,% caption at bottom
% format parameters
basicstyle=\ttfamily\tiny,% small true type font
keywordstyle=\color{black},%
identifierstyle=\color{black},%
commentstyle=\color[rgb]{0.45,0.45,0.45},% gray
stringstyle=\color{black},%
showstringspaces=false,%
showtabs=false,%
tabsize=2,%
% linenumbers
numberstyle=\tiny,%
numbers=left,%
numbersep=3mm,%
firstnumber=1,%
stepnumber=1,% number every line (0: off)
numberblanklines=true%
}

View File

@@ -147,22 +147,22 @@
% standard
\newcommand{\fig}[3]{\begin{figure}\centering\includegraphics[width=\textwidth]{#2}\caption{#3}\label{fig:#1}\end{figure}}%
% with controllable parameters
\newcommand{\figc}[4]{\begin{figure}\centering\includegraphics[#1]{#2}\caption{#3}\label{fig:#4}\end{figure}}%
\newcommand{\figc}[4]{\begin{figure}\centering\includegraphics[#4]{#2}\caption{#3}\label{fig:#1}\end{figure}}%
% two subfigures
\newcommand{\twofig}[6]{\begin{figure}\centering%
\subfigure[#2]{\includegraphics[width=0.495\textwidth]{#1}}%
\subfigure[#4]{\includegraphics[width=0.495\textwidth]{#3}}%
\caption{#5}\label{fig:#6}\end{figure}}%
\subfigure[#2]{\includegraphics[width=0.495\textwidth]{#1}}%
\subfigure[#4]{\includegraphics[width=0.495\textwidth]{#3}}%
\caption{#5}\label{fig:#6}\end{figure}}%
% two subfigures with labels for each subplot
\newcommand{\twofigs}[8]{\begin{figure}\centering%
\subfigure[#2]{\includegraphics[width=0.495\textwidth]{#1}\label{fig:#8#3}}%
\subfigure[#5]{\includegraphics[width=0.495\textwidth]{#4}\label{fig:#8#6}}%
\caption{#7}\label{fig:#8}\end{figure}}%
\subfigure[#2]{\includegraphics[width=0.495\textwidth]{#1}\label{fig:#8#3}}%
\subfigure[#5]{\includegraphics[width=0.495\textwidth]{#4}\label{fig:#8#6}}%
\caption{#7}\label{fig:#8}\end{figure}}%
% two subfigures and controllable parameters
\newcommand{\twofigc}[8]{\begin{figure}\centering%
\subfigure[#3]{\includegraphics[#1]{#2}}%
\subfigure[#6]{\includegraphics[#4]{#5}}%
\caption{#7}\label{fig:#8}\end{figure}}%
\subfigure[#3]{\includegraphics[#1]{#2}}%
\subfigure[#6]{\includegraphics[#4]{#5}}%
\caption{#7}\label{fig:#8}\end{figure}}%
% framed figures
% standard
@@ -171,19 +171,19 @@
\newcommand{\figcf}[4]{\begin{figure}\centering\fbox{\includegraphics[#1]{#2}}\caption{#3}\label{fig:#4}\end{figure}}%
% two subfigures
\newcommand{\twofigf}[6]{\begin{figure}\centering%
\fbox{\subfigure[#2]{\includegraphics[width=0.495\textwidth]{#1}}}%
\fbox{\subfigure[#4]{\includegraphics[width=0.495\textwidth]{#3}}}%
\caption{#5}\label{fig:#6}\end{figure}}%
\fbox{\subfigure[#2]{\includegraphics[width=0.495\textwidth]{#1}}}%
\fbox{\subfigure[#4]{\includegraphics[width=0.495\textwidth]{#3}}}%
\caption{#5}\label{fig:#6}\end{figure}}%
% two subfigures with labels for each subplot
\newcommand{\twofigsf}[8]{\begin{figure}\centering%
\fbox{\subfigure[#2]{\includegraphics[width=0.495\textwidth]{#1}\label{fig:#8#3}}}%
\fbox{\subfigure[#5]{\includegraphics[width=0.495\textwidth]{#4}\label{fig:#8#6}}}%
\caption{#7}\label{fig:#8}\end{figure}}%
\fbox{\subfigure[#2]{\includegraphics[width=0.495\textwidth]{#1}\label{fig:#8#3}}}%
\fbox{\subfigure[#5]{\includegraphics[width=0.495\textwidth]{#4}\label{fig:#8#6}}}%
\caption{#7}\label{fig:#8}\end{figure}}%
% two subfigures and controllable parameters
\newcommand{\twofigcf}[8]{\begin{figure}\centering%
\fbox{\subfigure[#3]{\includegraphics[#1]{#2}}}%
\fbox{\subfigure[#6]{\includegraphics[#4]{#5}}}%
\caption{#7}\label{fig:#8}\end{figure}}%
\fbox{\subfigure[#3]{\includegraphics[#1]{#2}}}%
\fbox{\subfigure[#6]{\includegraphics[#4]{#5}}}%
\caption{#7}\label{fig:#8}\end{figure}}%
% listings
\newcommand{\filelisting}[5][]{\lstinputlisting[style=#2,caption={#4},label={lst:#5},#1]{#3}}

View File

@@ -47,33 +47,33 @@
\usepackage{fixltx2e}% LaTeX 2e bugfixes
\usepackage{ifthen}% for optional parts
\ifthenelse{\equal{\PaperSize}{a4paper}}{
\usepackage[paper=\PaperSize,twoside=\Twosided,%
textheight=246mm,%
textwidth=160mm,%
heightrounded=true,% round textheight to multiple of lines (avoids overfull vboxes)
ignoreall=true,% do not include header, footer, and margins in calculations
marginparsep=5pt,% marginpar only used for signs (centered), thus only small sep. needed
marginparwidth=10mm,% prevent margin notes to be out of page
hmarginratio=2:1,% set margin ration (inner:outer for twoside) - (2:3 is default)
]{geometry}}{}%
\usepackage[paper=\PaperSize,twoside=\Twosided,%
textheight=246mm,%
textwidth=160mm,%
heightrounded=true,% round textheight to multiple of lines (avoids overfull vboxes)
ignoreall=true,% do not include header, footer, and margins in calculations
marginparsep=5pt,% marginpar only used for signs (centered), thus only small sep. needed
marginparwidth=10mm,% prevent margin notes to be out of page
hmarginratio=2:1,% set margin ration (inner:outer for twoside) - (2:3 is default)
]{geometry}}{}%
\ifthenelse{\equal{\PaperSize}{letterpaper}}{
\usepackage[paper=\PaperSize,twoside=\Twosided,%
textheight=9in,%
textwidth=6.5in,%
heightrounded=true,% round textheight to multiple of lines (avoids overfull vboxes)
ignoreheadfoot=false,% do not include header and footer in calculations
marginparsep=5pt,% marginpar only used for signs (centered), thus only small sep. needed
marginparwidth=10mm,% prevent margin notes to be out of page
hmarginratio=3:2,% set margin ration (inner:outer for twoside) - (2:3 is default)
]{geometry}}{}%
\usepackage[paper=\PaperSize,twoside=\Twosided,%
textheight=9in,%
textwidth=6.5in,%
heightrounded=true,% round textheight to multiple of lines (avoids overfull vboxes)
ignoreheadfoot=false,% do not include header and footer in calculations
marginparsep=5pt,% marginpar only used for signs (centered), thus only small sep. needed
marginparwidth=10mm,% prevent margin notes to be out of page
hmarginratio=3:2,% set margin ration (inner:outer for twoside) - (2:3 is default)
]{geometry}}{}%
\ifthenelse{\equal{\DocumentLanguage}{en}}{\usepackage[T1]{fontenc}\usepackage[utf8]{inputenc}\usepackage[USenglish]{babel}}{}%
\ifthenelse{\equal{\DocumentLanguage}{de}}{\usepackage[T1]{fontenc}\usepackage[utf8]{inputenc}\usepackage[ngerman]{babel}}{}%
\usepackage[%
headtopline,plainheadtopline,% activate all lines (header and footer)
headsepline,plainheadsepline,%
footsepline,plainfootsepline,%
footbotline,plainfootbotline,%
automark% auto update \..mark
headtopline,plainheadtopline,% activate all lines (header and footer)
headsepline,plainheadsepline,%
footsepline,plainfootsepline,%
footbotline,plainfootbotline,%
automark% auto update \..mark
]{scrlayer-scrpage}% (KOMA)
\usepackage{imakeidx}
\usepackage[]{caption}% customize captions
@@ -91,7 +91,7 @@ automark% auto update \..mark
\usepackage[normalem]{ulem}% cross-out, strike-out, underlines (normalem: keep \emph italic)
%\usepackage[safe]{textcomp}% loading in safe mode to avoid problems (see LaTeX companion)
%\usepackage[geometry,misc]{ifsym}% technical symbols
\usepackage{remreset}%\@removefromreset commands (e.g., for continuous footnote numbering)
%\usepackage{remreset}%\@removefromreset commands (e.g., for continuous footnote numbering)
\usepackage{paralist}% extended list environments
% \usepackage[Sonny]{fncychap}
\usepackage[avantgarde]{quotchap}
@@ -140,35 +140,35 @@ automark% auto update \..mark
\usepackage{mdwlist} %list extensions
\ifthenelse{\equal{\DocumentLanguage}{de}}
{
\usepackage[german]{fancyref} %Bessere Querverweise
\usepackage[locale=DE]{siunitx} %Zahlen und SI Einheiten => Binary units aktivieren...
\usepackage[autostyle=true, %Anführungszeichen und Übersetzung der Literaturverweise
german=quotes]{csquotes} %Anführungszeichen und Übersetzung der Literaturverweise
\usepackage[german]{fancyref} %Bessere Querverweise
\usepackage[locale=DE]{siunitx} %Zahlen und SI Einheiten => Binary units aktivieren...
\usepackage[autostyle=true, %Anführungszeichen und Übersetzung der Literaturverweise
german=quotes]{csquotes} %Anführungszeichen und Übersetzung der Literaturverweise
}
{
\usepackage[english]{fancyref} %Bessere Querverweise
\usepackage[locale=US]{siunitx} %Zahlen und SI Einheiten => Binary units aktivieren...
\usepackage[autostyle=true] %Anführungszeichen und Übersetzung der Literaturverweise
{csquotes}
\usepackage[english]{fancyref} %Bessere Querverweise
\usepackage[locale=US]{siunitx} %Zahlen und SI Einheiten => Binary units aktivieren...
\usepackage[autostyle=true] %Anführungszeichen und Übersetzung der Literaturverweise
{csquotes}
}
\sisetup{detect-weight=true, detect-family=true} %format like surrounding environment
%extending fancyref for listings in both languages:
\newcommand*{\fancyreflstlabelprefix}{lst}
\fancyrefaddcaptions{english}{%
\providecommand*{\freflstname}{listing}%
\providecommand*{\Freflstname}{Listing}%
\providecommand*{\freflstname}{listing}%
\providecommand*{\Freflstname}{Listing}%
}
\fancyrefaddcaptions{german}{%
\providecommand*{\freflstname}{Listing}%
\providecommand*{\Freflstname}{Listing}%
\providecommand*{\freflstname}{Listing}%
\providecommand*{\Freflstname}{Listing}%
}
\frefformat{plain}{\fancyreflstlabelprefix}{\freflstname\fancyrefdefaultspacing#1}
\Frefformat{plain}{\fancyreflstlabelprefix}{\Freflstname\fancyrefdefaultspacing#1}
\frefformat{vario}{\fancyreflstlabelprefix}{%
\freflstname\fancyrefdefaultspacing#1#3%
\freflstname\fancyrefdefaultspacing#1#3%
}
\Frefformat{vario}{\fancyreflstlabelprefix}{%
\Freflstname\fancyrefdefaultspacing#1#3%
\Freflstname\fancyrefdefaultspacing#1#3%
}
\sisetup{separate-uncertainty} %enable uncertainity for siunitx
@@ -176,30 +176,30 @@ automark% auto update \..mark
\DeclareSIUnit\permille{\text{\textperthousand}} %add \permille to siunitx
\usepackage{xfrac} %Schönere brüche für SI Einheiten
\sisetup{per-mode=fraction, %Bruchstriche bei SI Einheiten aktivieren
fraction-function=\sfrac} %xfrac als Bruchstrichfunktion verwenden
fraction-function=\sfrac} %xfrac als Bruchstrichfunktion verwenden
\usepackage[scaled=0.78]{inconsolata}%Schreibmaschinenschrift für Quellcode
\usepackage[backend=biber, %Literaturverweiserweiterung Backend auswählen
bibencoding=utf8, %.bib-File ist utf8-codiert...
maxbibnames=99, %Immer alle Authoren in der Bibliographie darstellen...
style=ieee
bibencoding=utf8, %.bib-File ist utf8-codiert...
maxbibnames=99, %Immer alle Authoren in der Bibliographie darstellen...
style=ieee
]{biblatex}
\bibliography{bib/bibliography} %literatur.bib wird geladen und als Literaturverweis Datei verwendet
\ifthenelse{\equal{\FramedLinks}{true}}
{
\usepackage[%
breaklinks=true,% allow line break in links
colorlinks=false,% if false: framed link
linkcolor=black,anchorcolor=black,citecolor=black,filecolor=black,%
menucolor=black,urlcolor=black,bookmarksnumbered=true]{hyperref}% hyperlinks for references
\usepackage[%
breaklinks=true,% allow line break in links
colorlinks=false,% if false: framed link
linkcolor=black,anchorcolor=black,citecolor=black,filecolor=black,%
menucolor=black,urlcolor=black,bookmarksnumbered=true]{hyperref}% hyperlinks for references
}
{
\usepackage[%
breaklinks=true,% allow line break in links
colorlinks=true,% if false: framed link
linkcolor=black,anchorcolor=black,citecolor=black,filecolor=black,%
menucolor=black,urlcolor=black,bookmarksnumbered=true]{hyperref}% hyperlinks for references
\usepackage[%
breaklinks=true,% allow line break in links
colorlinks=true,% if false: framed link
linkcolor=black,anchorcolor=black,citecolor=black,filecolor=black,%
menucolor=black,urlcolor=black,bookmarksnumbered=true]{hyperref}% hyperlinks for references
}
\setcounter{biburlnumpenalty}{100}%Urls in Bibliographie Zeilenbrechbar machen
@@ -213,8 +213,8 @@ style=ieee
\ifthenelse{\equal{\DocumentLanguage}{de}}
{
\deftranslation[to=ngerman] %Dem Paket babel den deutschen Abkürzungsverzeichnis-Kapitelnamen
{Acronyms}{Abkürzungsverzeichnis} %beibringen
\deftranslation[to=ngerman] %Dem Paket babel den deutschen Abkürzungsverzeichnis-Kapitelnamen
{Acronyms}{Abkürzungsverzeichnis} %beibringen
}{}
% misc

View File

@@ -41,7 +41,7 @@
numpages = {58},
keywords = {outlier detection, Anomaly detection},
},
@dataset{alexander_kyuroson_2023_7913307,
dataset{alexander_kyuroson_2023_7913307,
author = {Alexander Kyuroson and Niklas Dahlquist and Nikolaos Stathoulopoulos
and Vignesh Kottayam Viswanathan and Anton Koval and George
Nikolakopoulos},
@@ -85,37 +85,6 @@
pages = {716721},
}
,
@inproceedings{deepsvdd,
title = {Deep One-Class Classification},
author = {Ruff, Lukas and Vandermeulen, Robert and Goernitz, Nico and Deecke,
Lucas and Siddiqui, Shoaib Ahmed and Binder, Alexander and M{\"u}ller
, Emmanuel and Kloft, Marius},
booktitle = {Proceedings of the 35th International Conference on Machine
Learning},
pages = {4393--4402},
year = {2018},
editor = {Dy, Jennifer and Krause, Andreas},
volume = {80},
series = {Proceedings of Machine Learning Research},
month = {10--15 Jul},
publisher = {PMLR},
pdf = {http://proceedings.mlr.press/v80/ruff18a/ruff18a.pdf},
url = {https://proceedings.mlr.press/v80/ruff18a.html},
abstract = {Despite the great advances made by deep learning in many machine
learning problems, there is a relative dearth of deep learning
approaches for anomaly detection. Those approaches which do exist
involve networks trained to perform a task other than anomaly
detection, namely generative models or compression, which are in
turn adapted for use in anomaly detection; they are not trained on
an anomaly detection based objective. In this paper we introduce a
new anomaly detection method—Deep Support Vector Data Description—,
which is trained on an anomaly detection based objective. The
adaptation to the deep regime necessitates that our neural network
and training procedure satisfy certain properties, which we
demonstrate theoretically. We show the effectiveness of our method
on MNIST and CIFAR-10 image benchmark datasets as well as on the
detection of adversarial examples of GTSRB stop signs.},
},
@inproceedings{deep_svdd,
title = {Deep One-Class Classification},
author = {Ruff, Lukas and Vandermeulen, Robert and Goernitz, Nico and Deecke,
@@ -235,7 +204,7 @@
performance;Current measurement},
doi = {10.1109/IROS51168.2021.9636694},
},
@article{deep_learning_overview,
article{deep_learning_overview,
title = {Deep learning in neural networks: An overview},
journal = {Neural Networks},
volume = {61},
@@ -289,7 +258,7 @@
autoencoder algorithm are summarized, and prospected for its future
development directions are addressed.},
},
@article{semi_overview,
article{semi_overview,
author = {Yang, Xiangli and Song, Zixing and King, Irwin and Xu, Zenglin},
journal = {IEEE Transactions on Knowledge and Data Engineering},
title = {A Survey on Deep Semi-Supervised Learning},
@@ -302,7 +271,7 @@
learning;semi-supervised learning;deep learning},
doi = {10.1109/TKDE.2022.3220219},
},
@book{ai_fundamentals_book,
book{ai_fundamentals_book,
title = {Fundamentals of Artificial Intelligence},
url = {http://dx.doi.org/10.1007/978-81-322-3972-7},
DOI = {10.1007/978-81-322-3972-7},
@@ -312,7 +281,7 @@
language = {en},
},
@article{machine_learning_overview,
article{machine_learning_overview,
title = {Machine Learning from Theory to Algorithms: An Overview},
volume = {1142},
ISSN = {1742-6596},
@@ -550,7 +519,7 @@
year = {1998},
pages = {22782324},
},
@article{ef_concept_source,
article{ef_concept_source,
title = {Multi-Year ENSO Forecasts Using Parallel Convolutional Neural
Networks With Heterogeneous Architecture},
volume = {8},
@@ -563,8 +532,226 @@
and Tian, Hao and Song, Dehai and Wei, Zhiqiang},
year = {2021},
month = aug,
},
@article{ml_supervised_unsupervised_figure_source,
title = {Virtual reality in biology: could we become virtual naturalists?},
volume = {14},
ISSN = {1936-6434},
url = {http://dx.doi.org/10.1186/s12052-021-00147-x},
DOI = {10.1186/s12052-021-00147-x},
number = {1},
journal = {Evolution: Education and Outreach},
publisher = {Springer Science and Business Media LLC},
author = {Morimoto, Juliano and Ponton, Fleur},
year = {2021},
month = may,
},
@article{ml_autoencoder_figure_source,
title = "From Autoencoder to Beta-VAE",
author = "Weng, Lilian",
journal = "lilianweng.github.io",
year = "2018",
url = "https://lilianweng.github.io/posts/2018-08-12-vae/",
},
@conference{bg_lidar_figure_source,
title = "1D MEMS Micro-Scanning LiDAR",
author = "Norbert Druml and Ievgeniia Maksymova and Thomas Thurner and Lierop,
{D. van} and Hennecke, {Marcus E.} and Andreas Foroutan",
year = "2018",
month = sep,
day = "16",
language = "English",
},
@book{deep_learning_book,
title = {Deep Learning},
author = {Ian Goodfellow and Yoshua Bengio and Aaron Courville},
publisher = {MIT Press},
note = {\url{http://www.deeplearningbook.org}},
year = {2016},
},
@misc{mobilenet,
doi = {10.48550/ARXIV.1704.04861},
url = {https://arxiv.org/abs/1704.04861},
author = {Howard, Andrew G. and Zhu, Menglong and Chen, Bo and Kalenichenko,
Dmitry and Wang, Weijun and Weyand, Tobias and Andreetto, Marco and
Adam, Hartwig},
keywords = {Computer Vision and Pattern Recognition (cs.CV), FOS: Computer and
information sciences, FOS: Computer and information sciences},
title = {MobileNets: Efficient Convolutional Neural Networks for Mobile Vision
Applications},
publisher = {arXiv},
year = {2017},
copyright = {arXiv.org perpetual, non-exclusive license},
},
@inproceedings{shufflenet,
title = {ShuffleNet: An Extremely Efficient Convolutional Neural Network for
Mobile Devices},
url = {http://dx.doi.org/10.1109/CVPR.2018.00716},
DOI = {10.1109/cvpr.2018.00716},
booktitle = {2018 IEEE/CVF Conference on Computer Vision and Pattern
Recognition},
publisher = {IEEE},
author = {Zhang, Xiangyu and Zhou, Xinyu and Lin, Mengxiao and Sun, Jian},
year = {2018},
month = jun,
},
@article{bg_svm,
title = {Support-vector networks},
author = {Cortes, Corinna and Vapnik, Vladimir},
journal = {Machine learning},
volume = {20},
number = {3},
pages = {273--297},
year = {1995},
publisher = {Springer},
},
@article{bg_kmeans,
author = {Lloyd, S.},
journal = {IEEE Transactions on Information Theory},
title = {Least squares quantization in PCM},
year = {1982},
volume = {28},
number = {2},
pages = {129-137},
keywords = {Noise;Quantization (signal);Voltage;Receivers;Pulse
modulation;Sufficient conditions;Stochastic processes;Probabilistic
logic;Urban areas;Q measurement},
doi = {10.1109/TIT.1982.1056489},
},
@inproceedings{bg_dbscan,
added-at = {2023-12-13T07:32:13.000+0100},
author = {Ester, Martin and Kriegel, Hans-Peter and Sander, Jörg and Xu,
Xiaowei},
biburl = {
https://www.bibsonomy.org/bibtex/279a9f3560daefa3775bd35543b4482e1/admin
},
booktitle = {KDD},
crossref = {conf/kdd/1996},
editor = {Simoudis, Evangelos and Han, Jiawei and Fayyad, Usama M.},
ee = {http://www.aaai.org/Library/KDD/1996/kdd96-037.php},
interhash = {ba33e4d6b4e5b26bd9f543f26b7d250a},
intrahash = {79a9f3560daefa3775bd35543b4482e1},
isbn = {1-57735-004-9},
keywords = {},
pages = {226-231},
publisher = {AAAI Press},
timestamp = {2023-12-13T07:32:13.000+0100},
title = {A Density-Based Algorithm for Discovering Clusters in Large Spatial
Databases with Noise.},
url = {http://dblp.uni-trier.de/db/conf/kdd/kdd96.html#EsterKSX96},
year = 1996,
},
@article{bg_pca,
author = { Karl Pearson F.R.S. },
title = {LIII. On lines and planes of closest fit to systems of points in
space},
journal = {The London, Edinburgh, and Dublin Philosophical Magazine and
Journal of Science},
volume = {2},
number = {11},
pages = {559-572},
year = {1901},
publisher = {Taylor & Francis},
doi = {10.1080/14786440109462720},
},
@article{bg_infomax,
author = {Linsker, R.},
journal = {Computer},
title = {Self-organization in a perceptual network},
year = {1988},
volume = {21},
number = {3},
pages = {105-117},
keywords = {Intelligent networks;Biological information
theory;Circuits;Biology computing;Animal
structures;Neuroscience;Genetics;System testing;Neural
networks;Constraint theory},
doi = {10.1109/2.36},
},
@article{bg_slam,
title = {On the Representation and Estimation of Spatial Uncertainty},
volume = {5},
ISSN = {1741-3176},
url = {http://dx.doi.org/10.1177/027836498600500404},
DOI = {10.1177/027836498600500404},
number = {4},
journal = {The International Journal of Robotics Research},
publisher = {SAGE Publications},
author = {Smith, Randall C. and Cheeseman, Peter},
year = {1986},
month = dec,
pages = {5668},
},
@article{roc_vs_prc2,
title = {Context discovery for anomaly detection},
volume = {19},
ISSN = {2364-4168},
url = {http://dx.doi.org/10.1007/s41060-024-00586-x},
DOI = {10.1007/s41060-024-00586-x},
number = {1},
journal = {International Journal of Data Science and Analytics},
publisher = {Springer Science and Business Media LLC},
author = {Calikus, Ece and Nowaczyk, Slawomir and Dikmen, Onur},
year = {2024},
month = jun,
pages = {99113},
},
@article{roc_vs_prc,
title = {On the evaluation of unsupervised outlier detection: measures,
datasets, and an empirical study},
volume = {30},
ISSN = {1573-756X},
url = {http://dx.doi.org/10.1007/s10618-015-0444-8},
DOI = {10.1007/s10618-015-0444-8},
number = {4},
journal = {Data Mining and Knowledge Discovery},
publisher = {Springer Science and Business Media LLC},
author = {Campos, Guilherme O. and Zimek, Arthur and Sander, J\"{o}rg and
Campello, Ricardo J. G. B. and Micenková, Barbora and Schubert, Erich
and Assent, Ira and Houle, Michael E.},
year = {2016},
month = jan,
pages = {891927},
},
@inproceedings{roc,
title = {Basic principles of ROC analysis},
author = {Metz, Charles E},
booktitle = {Seminars in nuclear medicine},
volume = {8},
number = {4},
pages = {283--298},
year = {1978},
organization = {Elsevier},
},
@article{prc,
title = {A critical investigation of recall and precision as measures of
retrieval system performance},
volume = {7},
ISSN = {1558-2868},
url = {http://dx.doi.org/10.1145/65943.65945},
DOI = {10.1145/65943.65945},
number = {3},
journal = {ACM Transactions on Information Systems},
publisher = {Association for Computing Machinery (ACM)},
author = {Raghavan, Vijay and Bollmann, Peter and Jung, Gwang S.},
year = {1989},
month = jul,
pages = {205229},
},
@article{zscore,
title = {Advanced engineering mathematics},
author = {Kreyszig, Erwin and Stroud, K and Stephenson, G},
journal = {Integration},
volume = {9},
number = {4},
pages = {1014},
year = {2008},
publisher = {John Wiley \& Sons, Inc. 9 th edition, 2006 Page 2 of 6 Teaching
methods~…},
}

Binary file not shown.

Binary file not shown.

View File

@@ -1,5 +1,6 @@
\documentclass[tikz,border=10pt]{standalone}
\usepackage{tikz}
\usepackage{amsfonts}
\usetikzlibrary{positioning, shapes.geometric, fit, arrows, arrows.meta, backgrounds}
% Define box styles
@@ -7,9 +8,9 @@
databox/.style={rectangle, align=center, draw=black, fill=blue!50, thick, rounded corners},%, inner sep=4},
procbox/.style={rectangle, align=center, draw=black, fill=orange!30, thick, rounded corners},
hyperbox/.style={rectangle, align=center, draw=black, fill=green!30, thick, rounded corners},
stepsbox/.style={rectangle, align=left, draw=black,fill=white, rounded corners, minimum width=6cm, minimum height=1.5cm, font=\small},
outputbox/.style={rectangle, align=center, draw=red!80, fill=red!20, rounded corners, minimum width=6cm, minimum height=1.5cm, font=\small},
hlabelbox/.style={rectangle, align=center, draw=black,fill=white, rounded corners, minimum width=6cm, minimum height=1.5cm, font=\small},
stepsbox/.style={rectangle, align=left, draw=black,fill=white, rounded corners, minimum width=5.2cm, minimum height=1.5cm, font=\small},
outputbox/.style={rectangle, align=center, draw=red!80, fill=red!20, rounded corners, minimum width=5.2cm, minimum height=1.5cm, font=\small},
hlabelbox/.style={rectangle, align=center, draw=black,fill=white, rounded corners, minimum width=5.2cm, minimum height=1.5cm, font=\small},
vlabelbox/.style={rectangle, align=center, draw=black,fill=white, rounded corners, minimum width=3cm, minimum height=1.8cm, font=\small},
arrow/.style={-{Latex[length=3mm]}},
arrowlabel/.style={fill=white,inner sep=2pt,midway}
@@ -25,11 +26,11 @@
\begin{tikzpicture}[node distance=1cm and 2cm]
\node (data) {Data};
\node[right=7 of data] (process) {Procedure};
\node[right=7 of process] (hyper) {Hyperparameters};
\node[right=4.9 of data] (process) {Procedure};
\node[right=4.1 of process] (hyper) {Hyperparameters};
\begin{pgfonlayer}{foreground}
\node[hlabelbox, below=of data] (unlabeled) {\boxtitle{Unlabeled Data} More normal than \\ anomalous samples required};
\node[hlabelbox, below=1.29 of data] (unlabeled) {\boxtitle{Unlabeled Data} Significantly more normal than \\ anomalous samples required};
\node[hlabelbox, below=.1 of unlabeled] (labeled) {\boxtitle{Labeled Data} No requirement regarding ratio \\ +1 = normal, -1 = anomalous};
\end{pgfonlayer}
\begin{pgfonlayer}{background}
@@ -39,16 +40,16 @@
%\draw[arrow] (latent.east) -- node{} (autoenc.west);
\begin{pgfonlayer}{foreground}
\node[stepsbox, below=of process] (pretrainproc) {Train Autoencoder for $E_A$ Epochs \\ with $L_A$ Learning Rate \\ No Labels Used};
\node[outputbox, below=.1 of pretrainproc] (pretrainout) {\boxtitle{Outputs} Encoder Network \\ $\mathbf{w}$: Network Weights};
\node[stepsbox, below=of process] (pretrainproc) {Train Autoencoder $\mathcal{\phi}_{AE}$ \\ optimize Autoencoding Objective \\ for $E_A$ Epochs \\ with $L_A$ Learning Rate \\ No Labels Used / Required};
\node[outputbox, below=.1 of pretrainproc] (pretrainout) {\boxtitle{Outputs} $\mathcal{\phi}$: Encoder / DeepSAD Network \\ $\mathcal{W}_E$: Encoder Network Weights};
\end{pgfonlayer}
\begin{pgfonlayer}{background}
\node[procbox, fit=(pretrainproc) (pretrainout), label={[label distance = 1, name=pretrainlab]above:{\textbf{Pre-Training of Autoencoder}}}] (pretrain) {};
\end{pgfonlayer}
\begin{pgfonlayer}{foreground}
\node[hlabelbox, below=of hyper] (autoencarch) {\boxtitle{Autoencoder Architecture} Choose based on data type \\ Latent Space Size (based on complexity)};
\node[hlabelbox, below=.1 of autoencarch] (pretrainhyper) {\boxtitle{Hyperparameters} $E_A$: Number of Epochs \\ $L_A$: Learning Rate};
\node[hlabelbox, below=1.26 of hyper] (autoencarch) {\boxtitle{Autoencoder Architecture} $\mathcal{\phi}_{AE}$: Autoencoder Network \\ $\mathbb{R}^d$: Latent Space Size };
\node[hlabelbox, below=.1 of autoencarch] (pretrainhyper) {\boxtitle{Hyperparameters} $E_A$: Number of Epochs \\ $L_A$: Learning Rate AE};
\end{pgfonlayer}
\begin{pgfonlayer}{background}
\node[hyperbox, fit=(autoencarch) (pretrainhyper), label={[label distance = 1, name=autoenclabel]above:{\textbf{Pre-Training Hyperparameters}}}] (pretrainhyp) {};
@@ -61,7 +62,7 @@
% \draw[arrow] (node cs:name=autoenc,angle=196) |- (node cs:name=pretrain,angle=5);
\begin{pgfonlayer}{foreground}
\node[stepsbox, below=1.4 of pretrain] (calccproc) {1. Init Encoder with $\mathbf{w}$ \\ 2. Forward Pass on all data \\ 3. $\mathbf{c}$ = Mean Latent Representation};
\node[stepsbox, below=1.4 of pretrain] (calccproc) {Init Network $\mathcal{\phi}$ with $\mathcal{W}_E$ \\ Forward Pass on all data \\ Hypersphere center $\mathbf{c}$ is mean \\ of all Latent Representation};
\node[outputbox, below=.1 of calccproc] (calccout) {\boxtitle{Outputs} $\mathbf{c}$: Hypersphere Center};
\end{pgfonlayer}
\begin{pgfonlayer}{background}
@@ -76,21 +77,21 @@
%\draw[arrow] (node cs:name=traindata,angle=-45) |- node[arrowlabel]{all training data, labels removed} (node cs:name=calcc,angle=200);
\begin{pgfonlayer}{foreground}
\node[stepsbox, below=1.4 of calcc] (maintrainproc) {Train Network for $E_M$ Epochs \\ with $L_M$ Learning Rate \\ Considers Labels with $\eta$ strength};
\node[outputbox, below=.1 of maintrainproc] (maintrainout) {\boxtitle{Outputs} Encoder Network \\ $\mathbf{w}$: Network Weights \\ $\mathbf{c}$: Hypersphere Center};
\node[stepsbox, below=1.4 of calcc] (maintrainproc) {Init Network $\mathcal{\phi}$ with $\mathcal{W}_E$ \\ Train Network $\mathcal{\phi}$ \\ optimize DeepSAD Objective\\ for $E_M$ Epochs \\ with $L_M$ Learning Rate \\ Considers Labels with $\eta$ strength};
\node[outputbox, below=.1 of maintrainproc] (maintrainout) {\boxtitle{Outputs} $\mathcal{\phi}$: DeepSAD Network \\ $\mathcal{W}$: DeepSAD Network Weights \\ $\mathbf{c}$: Hypersphere Center};
\end{pgfonlayer}
\begin{pgfonlayer}{background}
\node[procbox, fit=(maintrainproc) (maintrainout), label={[label distance = 1, name=maintrainlab]above:{\textbf{Main Training}}}] (maintrain) {};
\end{pgfonlayer}
\begin{pgfonlayer}{foreground}
\node[hlabelbox, below=11.25 of hyper] (maintrainhyper) {$E_M$: Number of Epochs \\ $L_M$: Learning Rate \\ $\eta$: Strength Labeled/Unlabeled};
\node[hlabelbox, below=12.48 of hyper] (maintrainhyper) {$E_M$: Number of Epochs \\ $L_M$: Learning Rate \\ $\eta$: Weight Labeled/Unlabeled};
\end{pgfonlayer}
\begin{pgfonlayer}{background}
\node[hyperbox, fit=(maintrainhyper), label={[label distance = 1, name=autoenclabel]above:{\textbf{Main-Training Hyperparameters}}}] (maintrainhyp) {};
\end{pgfonlayer}
\draw[arrow] (node cs:name=pretrain,angle=-20) -- +(1, 0) |- (node cs:name=maintrain,angle=20);
\draw[arrow] (node cs:name=pretrain,angle=-50) |- +(1.5, -0.55) -- +(1.5,-5.4) -| (node cs:name=maintrain,angle=50);
%\draw[arrow] (pretrainoutput.south) -- (node cs:name=maintrain,angle=22);
@@ -101,7 +102,7 @@
\begin{pgfonlayer}{foreground}
\node[stepsbox, below=1.4 of maintrain] (inferenceproc) {Forward Pass through Network = $\mathbf{p}$ \\ Calculate Geometric Distance $\mathbf{p} \rightarrow \mathbf{c}$ \\ Anomaly Score = Geometric Distance};
\node[stepsbox, below=1.4 of maintrain] (inferenceproc) {Init Network $\mathcal{\phi}$ with $\mathcal{W}$ \\Forward Pass on sample = $\mathbf{p}$ \\ Calculate Distance $\mathbf{p} \rightarrow \mathbf{c}$ \\ Distance = Anomaly Score};
\node[outputbox, below=.1 of inferenceproc] (inferenceout) {\boxtitle{Outputs} Anomaly Score (Analog Value) \\ Higher for Anomalies};
\end{pgfonlayer}
\begin{pgfonlayer}{background}
@@ -109,7 +110,7 @@
\end{pgfonlayer}
\begin{pgfonlayer}{foreground}
\node[hlabelbox, below=11.8 of traindata] (newdatasample) {\boxtitle{New Data Sample} Same data type as training data};
\node[hlabelbox, below=13.32 of traindata] (newdatasample) {\boxtitle{New Data Sample} Same data type as training data};
\end{pgfonlayer}
\begin{pgfonlayer}{background}
\node[databox, fit=(newdatasample), label={[label distance = 1] above:{\textbf{Unseen Data}}}] (newdata) {};

Binary file not shown.

After

Width:  |  Height:  |  Size: 85 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 88 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 134 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 211 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.4 MiB

After

Width:  |  Height:  |  Size: 1.4 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 220 KiB

After

Width:  |  Height:  |  Size: 211 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 31 KiB

After

Width:  |  Height:  |  Size: 26 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 45 KiB

After

Width:  |  Height:  |  Size: 37 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 199 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 42 KiB

After

Width:  |  Height:  |  Size: 36 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 133 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 718 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 691 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 365 KiB

View File

@@ -0,0 +1,11 @@
-- drop-images.lua
-- Replaces all images (figures, graphics) with a short placeholder.
function Image(el) return pandoc.Str("[image omitted]") end
-- For LaTeX figures that are still raw
function RawBlock(el)
if el.format == "tex" and el.text:match("\\begin%s*{%s*figure%s*}") then
return pandoc.Plain({pandoc.Str("[figure omitted]")})
end
end

View File

@@ -0,0 +1,11 @@
-- drop-tables.lua
-- Removes LaTeX tabular and tabularx environments (and their contents).
function RawBlock(el)
if el.format == "tex" then
-- Check for tabular or tabularx environment
if el.text:match("\\begin%s*{%s*tabularx?%s*}") then
return pandoc.Plain({pandoc.Str("[table omitted]")})
end
end
end

View File

@@ -0,0 +1,43 @@
-- keep-citations.lua
-- Replace citations with a placeholder and eat any preceding space.
local PH = "[citation]"
-- Pandoc-native citations (if the reader produced Cite nodes)
function Cite(el) return pandoc.Str(PH) end
-- Raw LaTeX \cite-like macros (when not parsed as Cite)
function RawInline(el)
if el.format and el.format:match("tex") and el.text:match("\\%a-*cite%*?") then
return pandoc.Str(PH)
end
end
-- Remove a single leading Space before our placeholder
local function squash_spaces(inlines)
local out = {}
local i = 1
while i <= #inlines do
local cur = inlines[i]
local nxt = inlines[i + 1]
if cur and cur.t == "Space" and nxt and nxt.t == "Str" and nxt.text ==
PH then
table.insert(out, nxt)
i = i + 2
else
table.insert(out, cur)
i = i + 1
end
end
return out
end
function Para(el)
el.content = squash_spaces(el.content)
return el
end
function Plain(el)
el.content = squash_spaces(el.content)
return el
end

View File

@@ -0,0 +1,48 @@
-- math-omit.lua
-- Replace any math with a placeholder and ensure a space before it when appropriate.
local PH = "[math omitted]"
function Math(el)
-- Emit the placeholder as a Str; spacing is fixed in Para/Plain below.
return pandoc.Str(PH)
end
local function ensure_space_before_ph(inlines)
local out = {}
for i = 1, #inlines do
local cur = inlines[i]
if cur.t == "Str" and cur.text == PH then
local prev = out[#out]
local need_space = true
-- No space if it's the first token in the block
if not prev then
need_space = false
elseif prev.t == "Space" then
need_space = false
elseif prev.t == "Str" then
-- If previous char is an opening bracket/paren/slash/hyphen or whitespace, skip
local last = prev.text:sub(-1)
if last:match("[%(%[%{%/%-]") or last:match("%s") then
need_space = false
end
end
if need_space then table.insert(out, pandoc.Space()) end
table.insert(out, cur)
else
table.insert(out, cur)
end
end
return out
end
function Para(el)
el.content = ensure_space_before_ph(el.content)
return el
end
function Plain(el)
el.content = ensure_space_before_ph(el.content)
return el
end

View File

@@ -15,6 +15,8 @@
let
pkgs = import nixpkgs { inherit system; };
aspellWithDicts = pkgs.aspellWithDicts (d: [ d.en ]);
latex-packages = with pkgs; [
texlive.combined.scheme-full
which
@@ -26,16 +28,42 @@
zathura
wmctrl
python312
pandoc
pandoc-lua-filters
];
filtersPath = "${pkgs.pandoc-lua-filters}/share/pandoc/filters";
in
{
devShell = pkgs.mkShell {
buildInputs = [
latex-packages
dev-packages
aspellWithDicts
];
};
shellHook = ''
set -eu
# local folder in your repo to reference in commands
link_target="pandoc-filters"
# refresh symlink each time you enter the shell
ln -sfn ${filtersPath} "$link_target"
echo "Linked $link_target -> ${filtersPath}"
# (optional) write a defaults file that uses the relative symlink
if [ ! -f pandoc.defaults.yaml ]; then
cat > pandoc.defaults.yaml <<'YAML'
from: latex
to: plain
wrap: none
lua-filter:
- pandoc-filters/latex-hyphen.lua
- pandoc-filters/pandoc-quotes.lua
YAML
echo "Wrote pandoc.defaults.yaml"
fi
'';
}
);
}

61
thesis/tex2plaintext.sh Executable file
View File

@@ -0,0 +1,61 @@
#!/usr/bin/env bash
set -euo pipefail
# Usage:
# ./tex2plaintext.sh [INPUT_TEX] [OUT_BASENAME]
#
# Defaults:
# INPUT_TEX = Main.txt (your original file name)
# OUT_BASENAME = thesis (produces thesis.txt, thesis_part1.txt, thesis_part2.txt)
INPUT_TEX="${1:-Main.tex}"
OUT_BASE="${2:-thesis}"
FLAT_TEX="flat.tex"
NO_TABLES_TEX="flat_notables.tex"
PLAIN_TXT="${OUT_BASE}.txt"
PART1_TXT="${OUT_BASE}_part1.txt"
PART2_TXT="${OUT_BASE}_part2.txt"
MARKER="Data and Preprocessing"
echo "[1/5] Flattening with latexpand -> ${FLAT_TEX}"
latexpand "${INPUT_TEX}" > "${FLAT_TEX}"
echo "[2/5] Removing tabular/tabularx environments -> ${NO_TABLES_TEX}"
# Replace entire tabular / tabularx environments with a placeholder
perl -0777 -pe 's/\\begin\{(tabularx?)\}.*?\\end\{\1\}/[table omitted]/gs' \
"${FLAT_TEX}" > "${NO_TABLES_TEX}"
echo "[3/5] Converting to plain text with pandoc -> ${PLAIN_TXT}"
pandoc -f latex -t plain --wrap=none \
--lua-filter=filters/keep-citations.lua \
--lua-filter=filters/math-omit.lua \
"${NO_TABLES_TEX}" -o "${PLAIN_TXT}"
echo "[4/5] Replacing [] placeholders with [figure]"
sed -i 's/\[\]/[figure]/g' "${PLAIN_TXT}"
echo "[5/5] Splitting ${PLAIN_TXT} before the marker line: \"${MARKER}\""
# Ensure the marker exists exactly on its own line
if ! grep -xq "${MARKER}" "${PLAIN_TXT}"; then
echo "ERROR: Marker line not found exactly as \"${MARKER}\" in ${PLAIN_TXT}."
echo " (It must be the only content on that line.)"
exit 1
fi
# Clean previous outputs if present
rm -f -- "${PART1_TXT}" "${PART2_TXT}"
# Split so the marker line becomes the FIRST line of part 2
awk -v marker="${MARKER}" -v out1="${PART1_TXT}" -v out2="${PART2_TXT}" '
BEGIN { current = out1 }
$0 == marker { current = out2; print $0 > current; next }
{ print $0 > current }
' "${PLAIN_TXT}"
echo "Done."
echo " - ${PLAIN_TXT}"
echo " - ${PART1_TXT}"
echo " - ${PART2_TXT}"

View File

@@ -1,3 +1,9 @@
\addcontentsline{toc}{chapter}{Abstract (English)}
\begin{center}\Large\bfseries Abstract (English)\end{center}\vspace*{1cm}\noindent
Write some fancy abstract here!
\addcontentsline{toc}{chapter}{Abstract}
\begin{center}\Large\bfseries Abstract\end{center}\vspace*{1cm}\noindent
Autonomous robots are increasingly used in search and rescue (SAR) missions. In these missions, LiDAR sensors are often the most important source of environmental data. However, LiDAR data can degrade under hazardous conditions, especially when airborne particles such as smoke or dust are present. This degradation can lead to errors in mapping and navigation and may endanger both the robot and humans. Therefore, robots need a way to estimate the reliability of their LiDAR data, so that they can make better-informed decisions.
\bigskip
This thesis investigates whether anomaly detection methods can be used to quantify LiDAR data degradation caused by airborne particles such as smoke and dust. We apply a semi-supervised deep learning approach called DeepSAD, which produces an anomaly score for each LiDAR scan, serving as a measure of data reliability.
\bigskip
We evaluate this method against baseline methods on a subterranean dataset that includes LiDAR scans degraded by artificial smoke. Our results show that DeepSAD consistently outperforms the baselines and can clearly distinguish degraded from normal scans. At the same time, we find that the limited availability of labeled data and the lack of robust ground truth remain major challenges. Despite these limitations, our work demonstrates that anomaly detection methods are a promising tool for LiDAR degradation quantification in SAR scenarios.

View File

@@ -1,3 +1,3 @@
\addcontentsline{toc}{chapter}{Acknowledgements}
\begin{center}\Large\bfseries Acknowledgements\end{center}\vspace*{1cm}\noindent
Here you can tell us, how thankful you are for this amazing template ;)
\addcontentsline{toc}{chapter}{Artificial Intelligence Usage Disclaimer}
\begin{center}\Large\bfseries Artificial Intelligence Usage Disclaimer\end{center}\vspace*{1cm}\noindent
During the creation of this thesis, an LLM-based Artificial Intelligence tool was used for stylistic and grammatical revision of the author's own work.

Binary file not shown.

View File

@@ -30,7 +30,8 @@ arch = [
height=H8 * 1.6,
depth=D1,
width=W1,
caption=f"Latent Space",
caption="Latent Space",
captionshift=0,
),
# to_connection("fc1", "latent"),
# --------------------------- DECODER ---------------------------
@@ -39,19 +40,20 @@ arch = [
"fc3",
n_filer="{{8×128×8}}",
zlabeloffset=0.5,
offset="(2,0,0)",
offset="(2,-.5,0)",
to="(latent-east)",
height=H1,
depth=D512,
width=W1,
caption=f"FC",
captionshift=20,
),
to_Conv(
"unsqueeze",
s_filer="{{128×8}}",
zlabeloffset=0.4,
n_filer=32,
offset="(2,0,0)",
offset="(1.4,0,0)",
to="(fc3-east)",
height=H8,
depth=D128,
@@ -62,7 +64,7 @@ arch = [
# Reshape to 4×8×512
to_UnPool(
"up1",
offset="(2,0,0)",
offset="(1.2,0,0)",
n_filer=32,
to="(unsqueeze-east)",
height=H16,
@@ -101,7 +103,8 @@ arch = [
height=H16,
depth=D1024,
width=W32,
caption="",
caption="Deconv2",
captionshift=20,
),
to_Conv(
"dwdeconv3",
@@ -112,7 +115,7 @@ arch = [
height=H16,
depth=D1024,
width=W1,
caption="Deconv2",
caption="",
),
to_Conv(
"dwdeconv4",
@@ -134,7 +137,8 @@ arch = [
height=H32,
depth=D2048,
width=W16,
caption="",
caption="Deconv3",
captionshift=10,
),
to_Conv(
"dwdeconv5",
@@ -145,7 +149,7 @@ arch = [
height=H32,
depth=D2048,
width=W1,
caption="Deconv3",
caption="",
),
to_Conv(
"dwdeconv6",
@@ -164,7 +168,7 @@ arch = [
s_filer="{{2048×32}}",
zlabeloffset=0.15,
n_filer=1,
offset="(2,0,0)",
offset="(1.5,0,0)",
to="(dwdeconv6-east)",
height=H32,
depth=D2048,
@@ -178,12 +182,13 @@ arch = [
s_filer="{{2048×32}}",
zlabeloffset=0.15,
n_filer=1,
offset="(2,0,0)",
offset="(1.5,0,0)",
to="(outconv-east)",
height=H32,
depth=D2048,
width=W1,
caption="Output",
captionshift=5,
),
# to_connection("deconv2", "out"),
to_end(),

View File

@@ -28,6 +28,7 @@
{Box={
name=latent,
caption=Latent Space,
captionshift=0,
xlabel={{, }},
zlabeloffset=0.3,
zlabel=latent dim,
@@ -39,10 +40,11 @@
};
\pic[shift={(2,0,0)}] at (latent-east)
\pic[shift={(2,-.5,0)}] at (latent-east)
{Box={
name=fc3,
caption=FC,
captionshift=20,
xlabel={{" ","dummy"}},
zlabeloffset=0.5,
zlabel={{8×128×8}},
@@ -55,10 +57,11 @@
};
\pic[shift={(2,0,0)}] at (fc3-east)
\pic[shift={(1.4,0,0)}] at (fc3-east)
{Box={
name=unsqueeze,
caption=Unsqueeze,
captionshift=0,
xlabel={{32, }},
zlabeloffset=0.4,
zlabel={{128×8}},
@@ -70,10 +73,11 @@
};
\pic[shift={ (2,0,0) }] at (unsqueeze-east)
\pic[shift={ (1.2,0,0) }] at (unsqueeze-east)
{Box={
name=up1,
caption=,
captionshift=0,
fill=\UnpoolColor,
opacity=0.5,
xlabel={{32, }},
@@ -88,6 +92,7 @@
{Box={
name=dwdeconv1,
caption=Deconv1,
captionshift=0,
xlabel={{1, }},
zlabeloffset=0.3,
zlabel=,
@@ -103,6 +108,7 @@
{Box={
name=dwdeconv2,
caption=,
captionshift=0,
xlabel={{32, }},
zlabeloffset=0.4,
zlabel={{256×16}},
@@ -117,7 +123,8 @@
\pic[shift={ (2,0,0) }] at (dwdeconv2-east)
{Box={
name=up2,
caption=,
caption=Deconv2,
captionshift=20,
fill=\UnpoolColor,
opacity=0.5,
xlabel={{32, }},
@@ -131,7 +138,8 @@
\pic[shift={(0,0,0)}] at (up2-east)
{Box={
name=dwdeconv3,
caption=Deconv2,
caption=,
captionshift=0,
xlabel={{1, }},
zlabeloffset=0.3,
zlabel=,
@@ -147,6 +155,7 @@
{Box={
name=dwdeconv4,
caption=,
captionshift=0,
xlabel={{16, }},
zlabeloffset=0.17,
zlabel={{1024×16}},
@@ -161,7 +170,8 @@
\pic[shift={ (2,0,0) }] at (dwdeconv4-east)
{Box={
name=up3,
caption=,
caption=Deconv3,
captionshift=10,
fill=\UnpoolColor,
opacity=0.5,
xlabel={{16, }},
@@ -175,7 +185,8 @@
\pic[shift={(0,0,0)}] at (up3-east)
{Box={
name=dwdeconv5,
caption=Deconv3,
caption=,
captionshift=0,
xlabel={{1, }},
zlabeloffset=0.3,
zlabel=,
@@ -191,6 +202,7 @@
{Box={
name=dwdeconv6,
caption=,
captionshift=0,
xlabel={{8, }},
zlabeloffset=0.15,
zlabel={{2048×32}},
@@ -202,10 +214,11 @@
};
\pic[shift={(2,0,0)}] at (dwdeconv6-east)
\pic[shift={(1.5,0,0)}] at (dwdeconv6-east)
{Box={
name=outconv,
caption=Deconv4,
captionshift=0,
xlabel={{1, }},
zlabeloffset=0.15,
zlabel={{2048×32}},
@@ -217,10 +230,11 @@
};
\pic[shift={(2,0,0)}] at (outconv-east)
\pic[shift={(1.5,0,0)}] at (outconv-east)
{Box={
name=out,
caption=Output,
captionshift=5,
xlabel={{1, }},
zlabeloffset=0.15,
zlabel={{2048×32}},

Binary file not shown.

View File

@@ -125,7 +125,7 @@ arch = [
n_filer=8,
zlabeloffset=0.45,
s_filer="{{128×8}}",
offset="(2,0,0)",
offset="(1,0,0)",
to="(pool3-east)",
height=H8,
depth=D128,
@@ -137,12 +137,13 @@ arch = [
"fc1",
n_filer="{{8×128×8}}",
zlabeloffset=0.5,
offset="(2,0,0)",
offset="(2,-.5,0)",
to="(squeeze-east)",
height=H1,
depth=D512,
width=W1,
caption=f"FC",
caption="FC",
captionshift=0,
),
# to_connection("pool2", "fc1"),
# --------------------------- LATENT ---------------------------
@@ -150,7 +151,7 @@ arch = [
"latent",
n_filer="",
s_filer="latent dim",
offset="(2,0,0)",
offset="(1.3,0.5,0)",
to="(fc1-east)",
height=H8 * 1.6,
depth=D1,

View File

@@ -28,6 +28,7 @@
{Box={
name=input,
caption=Input,
captionshift=0,
xlabel={{1, }},
zlabeloffset=0.2,
zlabel={{2048×32}},
@@ -43,6 +44,7 @@
{Box={
name=dwconv1,
caption=,
captionshift=0,
xlabel={{1, }},
zlabeloffset=0.3,
zlabel=,
@@ -58,6 +60,7 @@
{Box={
name=dwconv2,
caption=Conv1,
captionshift=0,
xlabel={{16, }},
zlabeloffset=0.15,
zlabel={{2048×32}},
@@ -76,6 +79,7 @@
zlabeloffset=0.3,
zlabel={{512×32}},
caption=,
captionshift=0,
fill=\PoolColor,
opacity=0.5,
height=26,
@@ -89,6 +93,7 @@
{Box={
name=dwconv3,
caption=,
captionshift=0,
xlabel={{1, }},
zlabeloffset=0.3,
zlabel=,
@@ -104,6 +109,7 @@
{Box={
name=dwconv4,
caption=Conv2,
captionshift=0,
xlabel={{32, }},
zlabeloffset=0.3,
zlabel={{512×32}},
@@ -122,6 +128,7 @@
zlabeloffset=0.45,
zlabel={{256×16}},
caption=,
captionshift=0,
fill=\PoolColor,
opacity=0.5,
height=18,
@@ -138,6 +145,7 @@
zlabeloffset=0.45,
zlabel={{128×8}},
caption=,
captionshift=0,
fill=\PoolColor,
opacity=0.5,
height=12,
@@ -147,10 +155,11 @@
};
\pic[shift={(2,0,0)}] at (pool3-east)
\pic[shift={(1,0,0)}] at (pool3-east)
{Box={
name=squeeze,
caption=Squeeze,
captionshift=0,
xlabel={{8, }},
zlabeloffset=0.45,
zlabel={{128×8}},
@@ -162,10 +171,11 @@
};
\pic[shift={(2,0,0)}] at (squeeze-east)
\pic[shift={(2,-.5,0)}] at (squeeze-east)
{Box={
name=fc1,
caption=FC,
captionshift=0,
xlabel={{" ","dummy"}},
zlabeloffset=0.5,
zlabel={{8×128×8}},
@@ -178,10 +188,11 @@
};
\pic[shift={(2,0,0)}] at (fc1-east)
\pic[shift={(1.3,0.5,0)}] at (fc1-east)
{Box={
name=latent,
caption=Latent Space,
captionshift=0,
xlabel={{, }},
zlabeloffset=0.3,
zlabel=latent dim,

Binary file not shown.

View File

@@ -39,19 +39,20 @@ arch = [
"fc3",
n_filer="{{4×512×8}}",
zlabeloffset=0.35,
offset="(2,0,0)",
offset="(2,-.5,0)",
to="(latent-east)",
height=1.3,
depth=D512,
width=W1,
caption=f"FC",
captionshift=20,
),
# to_connection("latent", "fc3"),
# Reshape to 4×8×512
to_UnPool(
"up1",
n_filer=4,
offset="(2,0,0)",
offset="(2.5,0,0)",
to="(fc3-east)",
height=H16,
depth=D1024,
@@ -82,7 +83,8 @@ arch = [
height=H32,
depth=D2048,
width=W8,
caption="",
caption="Deconv2",
captionshift=10,
),
# to_connection("deconv1", "up2"),
# DeConv2 (5×5, same): 8->1, 32×2048
@@ -96,7 +98,7 @@ arch = [
height=H32,
depth=D2048,
width=W1,
caption="Deconv2",
caption="",
),
# to_connection("up2", "deconv2"),
# Output
@@ -111,6 +113,7 @@ arch = [
depth=D2048,
width=1.0,
caption="Output",
captionshift=5,
),
# to_connection("deconv2", "out"),
to_end(),

View File

@@ -28,6 +28,7 @@
{Box={
name=latent,
caption=Latent Space,
captionshift=0,
xlabel={{, }},
zlabeloffset=0.3,
zlabel=latent dim,
@@ -39,10 +40,11 @@
};
\pic[shift={(2,0,0)}] at (latent-east)
\pic[shift={(2,-.5,0)}] at (latent-east)
{Box={
name=fc3,
caption=FC,
captionshift=20,
xlabel={{" ","dummy"}},
zlabeloffset=0.35,
zlabel={{4×512×8}},
@@ -55,10 +57,11 @@
};
\pic[shift={ (2,0,0) }] at (fc3-east)
\pic[shift={ (2.5,0,0) }] at (fc3-east)
{Box={
name=up1,
caption=,
captionshift=0,
fill=\UnpoolColor,
opacity=0.5,
xlabel={{4, }},
@@ -73,6 +76,7 @@
{Box={
name=deconv1,
caption=Deconv1,
captionshift=0,
xlabel={{8, }},
zlabeloffset=0.2,
zlabel={{1024×16}},
@@ -87,7 +91,8 @@
\pic[shift={ (2,0,0) }] at (deconv1-east)
{Box={
name=up2,
caption=,
caption=Deconv2,
captionshift=10,
fill=\UnpoolColor,
opacity=0.5,
xlabel={{8, }},
@@ -101,7 +106,8 @@
\pic[shift={(0,0,0)}] at (up2-east)
{Box={
name=deconv2,
caption=Deconv2,
caption=,
captionshift=0,
xlabel={{1, }},
zlabeloffset=0.15,
zlabel={{2048×32}},
@@ -117,6 +123,7 @@
{Box={
name=out,
caption=Output,
captionshift=5,
xlabel={{1, }},
zlabeloffset=0.15,
zlabel={{2048×32}},

Binary file not shown.

View File

@@ -91,13 +91,14 @@ arch = [
to_fc(
"fc1",
n_filer="{{4×512×8}}",
offset="(2,0,0)",
offset="(2,-.5,0)",
zlabeloffset=0.5,
to="(pool2-east)",
height=1.3,
depth=D512,
width=W1,
caption=f"FC",
captionshift=20,
),
# to_connection("pool2", "fc1"),
# --------------------------- LATENT ---------------------------

View File

@@ -28,6 +28,7 @@
{Box={
name=input,
caption=Input,
captionshift=0,
xlabel={{1, }},
zlabeloffset=0.15,
zlabel={{2048×32}},
@@ -43,6 +44,7 @@
{Box={
name=conv1,
caption=Conv1,
captionshift=0,
xlabel={{8, }},
zlabeloffset=0.15,
zlabel={{2048×32}},
@@ -61,6 +63,7 @@
zlabeloffset=0.3,
zlabel={{1024×16}},
caption=,
captionshift=0,
fill=\PoolColor,
opacity=0.5,
height=18,
@@ -74,6 +77,7 @@
{Box={
name=conv2,
caption=Conv2,
captionshift=0,
xlabel={{4, }},
zlabeloffset=0.4,
zlabel={{1024×16\hspace{2.5em}512×8}},
@@ -92,6 +96,7 @@
zlabeloffset=0.3,
zlabel={{}},
caption=,
captionshift=0,
fill=\PoolColor,
opacity=0.5,
height=12,
@@ -101,10 +106,11 @@
};
\pic[shift={(2,0,0)}] at (pool2-east)
\pic[shift={(2,-.5,0)}] at (pool2-east)
{Box={
name=fc1,
caption=FC,
captionshift=20,
xlabel={{" ","dummy"}},
zlabeloffset=0.5,
zlabel={{4×512×8}},
@@ -121,6 +127,7 @@
{Box={
name=latent,
caption=Latent Space,
captionshift=0,
xlabel={{, }},
zlabeloffset=0.3,
zlabel=latent dim,

View File

@@ -57,8 +57,12 @@
\path (b1) edge ["\ylabel",midway] (a1); %height label
\tikzstyle{captionlabel}=[text width=15*\LastEastx/\scale,text centered]
\path (\LastEastx/2,-\y/2,+\z/2) + (0,-25pt) coordinate (cap)
% \tikzstyle{captionlabel}=[text width=15*\LastEastx/\scale,text centered,xshift=\captionshift pt]
% \path (\LastEastx/2,-\y/2,+\z/2) + (0,-25pt) coordinate (cap)
% edge ["\textcolor{black}{ \bf \caption}"',captionlabel](cap) ; %Block caption/pic object label
% Place caption: shift the coordinate by captionshift (NEW)
\path (\LastEastx/2,-\y/2,+\z/2) + (\captionshift pt,-25pt) coordinate (cap)
edge ["\textcolor{black}{ \bf \caption}"',captionlabel](cap) ; %Block caption/pic object label
%Define nodes to be used outside on the pic object
@@ -103,6 +107,7 @@ ylabel/.store in=\ylabel,
zlabel/.store in=\zlabel,
zlabeloffset/.store in=\zlabeloffset,
caption/.store in=\caption,
captionshift/.store in=\captionshift,
name/.store in=\name,
fill/.store in=\fill,
opacity/.store in=\opacity,
@@ -117,5 +122,6 @@ ylabel=,
zlabel=,
zlabeloffset=0.3,
caption=,
captionshift=0,
name=,
}

View File

@@ -75,6 +75,7 @@ def to_Conv(
height=40,
depth=40,
caption=" ",
captionshift=0,
):
return (
r"""
@@ -90,6 +91,9 @@ def to_Conv(
caption="""
+ caption
+ r""",
captionshift="""
+ str(captionshift)
+ """,
xlabel={{"""
+ str(n_filer)
+ """, }},
@@ -182,6 +186,7 @@ def to_Pool(
depth=32,
opacity=0.5,
caption=" ",
captionshift=0,
):
return (
r"""
@@ -206,6 +211,9 @@ def to_Pool(
caption="""
+ caption
+ r""",
captionshift="""
+ str(captionshift)
+ """,
fill=\PoolColor,
opacity="""
+ str(opacity)
@@ -236,6 +244,7 @@ def to_UnPool(
depth=32,
opacity=0.5,
caption=" ",
captionshift=0,
):
return (
r"""
@@ -251,6 +260,9 @@ def to_UnPool(
caption="""
+ caption
+ r""",
captionshift="""
+ str(captionshift)
+ r""",
fill=\UnpoolColor,
opacity="""
+ str(opacity)
@@ -335,6 +347,7 @@ def to_ConvSoftMax(
height=40,
depth=40,
caption=" ",
captionshift=0,
):
return (
r"""
@@ -350,6 +363,9 @@ def to_ConvSoftMax(
caption="""
+ caption
+ """,
captionshift="""
+ str(captionshift)
+ """,
zlabel="""
+ str(s_filer)
+ """,
@@ -380,6 +396,7 @@ def to_SoftMax(
depth=25,
opacity=0.8,
caption=" ",
captionshift=0,
z_label_offset=0,
):
return (
@@ -396,6 +413,9 @@ def to_SoftMax(
caption="""
+ caption
+ """,
captionshift="""
+ str(captionshift)
+ """,
xlabel={{" ","dummy"}},
zlabel="""
+ str(s_filer)
@@ -455,6 +475,7 @@ def to_fc(
height=2,
depth=10,
caption=" ",
captionshift=0,
# titlepos=0,
):
return (
@@ -471,6 +492,9 @@ def to_fc(
caption="""
+ caption
+ """,
captionshift="""
+ str(captionshift)
+ """,
xlabel={{" ","dummy"}},
zlabeloffset="""
+ str(zlabeloffset)

View File

@@ -1,8 +1,7 @@
from pathlib import Path
import polars as pl
from plot_scripts.load_results import (
from load_results import (
load_pretraining_results_dataframe,
load_results_dataframe,
)

View File

@@ -1,6 +1,6 @@
{ pkgs, ... }:
let
native_dependencies = with pkgs.python312Packages; [
native_dependencies = with pkgs.python311Packages; [
torch-bin
torchvision-bin
aggdraw # for visualtorch
@@ -16,7 +16,7 @@ in
packages = native_dependencies ++ tools;
languages.python = {
enable = true;
package = pkgs.python312;
package = pkgs.python311;
uv = {
enable = true;
sync.enable = true;

View File

@@ -12,7 +12,7 @@ import numpy as np
import polars as pl
# CHANGE THIS IMPORT IF YOUR LOADER MODULE IS NAMED DIFFERENTLY
from plot_scripts.load_results import load_pretraining_results_dataframe
from load_results import load_pretraining_results_dataframe
# ----------------------------
# Config
@@ -78,8 +78,8 @@ def build_arch_curves_from_df(
"overall": (dims, means, stds),
} }
"""
if "split" not in df.columns:
raise ValueError("Expected 'split' column in AE dataframe.")
# if "split" not in df.columns:
# raise ValueError("Expected 'split' column in AE dataframe.")
if "scores" not in df.columns:
raise ValueError("Expected 'scores' column in AE dataframe.")
if "network" not in df.columns or "latent_dim" not in df.columns:
@@ -88,7 +88,7 @@ def build_arch_curves_from_df(
raise ValueError(f"Expected '{label_field}' column in AE dataframe.")
# Keep only test split
df = df.filter(pl.col("split") == "test")
# df = df.filter(pl.col("split") == "test")
groups: dict[tuple[str, int], dict[str, list[float]]] = {}
@@ -201,7 +201,7 @@ def plot_multi_loss_curve(arch_results, title, output_path, colors=None):
plt.xlabel("Latent Dimensionality")
plt.ylabel("Test Loss")
plt.title(title)
# plt.title(title)
plt.legend()
plt.grid(True, alpha=0.3)
plt.xticks(all_dims)

View File

@@ -171,28 +171,28 @@ def plot_combined_timeline(
range(num_bins), near_sensor_binned, color=color, linestyle="--", alpha=0.6
)
# Add vertical lines for manually labeled frames if available
if all_paths[i].with_suffix(".npy").name in manually_labeled_anomaly_frames:
begin_frame, end_frame = manually_labeled_anomaly_frames[
all_paths[i].with_suffix(".npy").name
]
# Convert frame numbers to normalized timeline positions
begin_pos = (begin_frame / exp_len) * (num_bins - 1)
end_pos = (end_frame / exp_len) * (num_bins - 1)
# # Add vertical lines for manually labeled frames if available
# if all_paths[i].with_suffix(".npy").name in manually_labeled_anomaly_frames:
# begin_frame, end_frame = manually_labeled_anomaly_frames[
# all_paths[i].with_suffix(".npy").name
# ]
# # Convert frame numbers to normalized timeline positions
# begin_pos = (begin_frame / exp_len) * (num_bins - 1)
# end_pos = (end_frame / exp_len) * (num_bins - 1)
# Add vertical lines with matching color and loose dotting
ax1.axvline(
x=begin_pos,
color=color,
linestyle=":",
alpha=0.6,
)
ax1.axvline(
x=end_pos,
color=color,
linestyle=":",
alpha=0.6,
)
# # Add vertical lines with matching color and loose dotting
# ax1.axvline(
# x=begin_pos,
# color=color,
# linestyle=":",
# alpha=0.6,
# )
# ax1.axvline(
# x=end_pos,
# color=color,
# linestyle=":",
# alpha=0.6,
# )
# Customize axes
ax1.set_xlabel("Normalized Timeline")
@@ -202,7 +202,7 @@ def plot_combined_timeline(
ax1.set_ylabel("Missing Points (%)")
ax2.set_ylabel("Points with <0.5m Range (%)")
plt.title(title)
# plt.title(title)
# Create legends without fixed positions
# First get all lines and labels for experiments
@@ -221,7 +221,8 @@ def plot_combined_timeline(
)
# Create single legend in top right corner with consistent margins
fig.legend(all_handles, all_labels, loc="upper right", borderaxespad=4.8)
# fig.legend(all_handles, all_labels, loc="upper right", borderaxespad=2.8)
fig.legend(all_handles, all_labels, bbox_to_anchor=(0.95, 0.99))
plt.grid(True, alpha=0.3)

View File

@@ -122,8 +122,8 @@ def plot_data_points_pie(normal_experiment_frames, anomaly_experiment_frames):
# prepare data for pie chart
labels = [
"Normal Lidar Frames\nNon-Degraded Pointclouds",
"Anomalous Lidar Frames\nDegraded Pointclouds",
"Normal Lidar Frames\nNon-Degraded Point Clouds",
"Anomalous Lidar Frames\nDegraded Point Clouds",
]
sizes = [total_normal_frames, total_anomaly_frames]
explode = (0.1, 0) # explode the normal slice
@@ -150,9 +150,9 @@ def plot_data_points_pie(normal_experiment_frames, anomaly_experiment_frames):
va="center",
color="black",
)
plt.title(
"Distribution of Normal and Anomalous\nPointclouds in all Experiments (Lidar Frames)"
)
# plt.title(
# "Distribution of Normal and Anomalous\nPointclouds in all Experiments (Lidar Frames)"
# )
plt.tight_layout()
# save the plot

View File

@@ -5,7 +5,6 @@ from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
from pointcloudset import Dataset
# define data path containing the bag files
all_data_path = Path("/home/fedex/mt/data/subter")
@@ -82,7 +81,7 @@ def plot_data_points(normal_experiment_paths, anomaly_experiment_paths, title):
plt.figure(figsize=(10, 5))
plt.hist(missing_points_normal, bins=100, alpha=0.5, label="Normal Experiments")
plt.hist(missing_points_anomaly, bins=100, alpha=0.5, label="Anomaly Experiments")
plt.title(title)
# plt.title(title)
plt.xlabel("Number of Missing Points")
plt.ylabel("Number of Pointclouds")
plt.legend()
@@ -109,7 +108,7 @@ def plot_data_points(normal_experiment_paths, anomaly_experiment_paths, title):
label="Anomaly Experiments",
orientation="horizontal",
)
plt.title(title)
# plt.title(title)
plt.xlabel("Number of Pointclouds")
plt.ylabel("Number of Missing Points")
plt.legend()
@@ -142,7 +141,7 @@ def plot_data_points(normal_experiment_paths, anomaly_experiment_paths, title):
label="Anomaly Experiments",
density=True,
)
plt.title(title)
# plt.title(title)
plt.xlabel("Number of Missing Points")
plt.ylabel("Density")
plt.legend()
@@ -169,7 +168,7 @@ def plot_data_points(normal_experiment_paths, anomaly_experiment_paths, title):
label="Anomaly Experiments (With Artifical Smoke)",
density=True,
)
plt.title(title)
# plt.title(title)
plt.xlabel("Percentage of Missing Lidar Measurements")
plt.ylabel("Density")
# display the x axis as percentages
@@ -210,7 +209,7 @@ def plot_data_points(normal_experiment_paths, anomaly_experiment_paths, title):
alpha=0.5,
label="Anomaly Experiments",
)
plt.title(title)
# plt.title(title)
plt.xlabel("Number of Missing Points")
plt.ylabel("Normalized Density")
plt.legend()

View File

@@ -5,7 +5,6 @@ from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
from pointcloudset import Dataset
# define data path containing the bag files
all_data_path = Path("/home/fedex/mt/data/subter")
@@ -164,7 +163,7 @@ def plot_data_points(normal_experiment_paths, anomaly_experiment_paths, title):
plt.gca().set_yticklabels(
["{:.0f}%".format(y * 100) for y in plt.gca().get_yticks()]
)
plt.title("Particles Closer than 0.5m to the Sensor")
# plt.title("Particles Closer than 0.5m to the Sensor")
plt.ylabel("Percentage of measurements closer than 0.5m")
plt.tight_layout()
plt.savefig(output_datetime_path / f"particles_near_sensor_boxplot_{rt}.png")
@@ -186,7 +185,7 @@ def plot_data_points(normal_experiment_paths, anomaly_experiment_paths, title):
plt.gca().set_yticklabels(
["{:.0f}%".format(y * 100) for y in plt.gca().get_yticks()]
)
plt.title("Particles Closer than 0.5m to the Sensor")
# plt.title("Particles Closer than 0.5m to the Sensor")
plt.ylabel("Percentage of measurements closer than 0.5m")
plt.ylim(0, 0.05)
plt.tight_layout()

View File

@@ -112,18 +112,27 @@ cmap = get_colormap_with_special_missing_color(
args.colormap, args.missing_data_color, args.reverse_colormap
)
# --- Create a figure with 2 vertical subplots ---
# --- Create a figure with 2 vertical subplots and move titles to the left ---
fig, (ax1, ax2) = plt.subplots(nrows=2, ncols=1, figsize=(10, 5))
for ax, frame, title in zip(
# leave extra left margin for the left-side labels
fig.subplots_adjust(left=0.14, hspace=0.05)
for ax, frame, label in zip(
(ax1, ax2),
(frame1, frame2),
(
"Projection of Lidar Frame without Degradation",
"Projection of Lidar Frame with Degradation (Artifical Smoke)",
),
("(a)", "(b)"),
):
im = ax.imshow(frame, cmap=cmap, aspect="auto", vmin=global_vmin, vmax=global_vmax)
ax.set_title(title)
# place the "title" to the left, vertically centered relative to the axes
ax.text(
-0.02, # negative x places text left of the axes (in axes coordinates)
0.5,
label,
transform=ax.transAxes,
va="center",
ha="right",
fontsize=12,
)
ax.axis("off")
# Adjust layout to fit margins for a paper

View File

@@ -26,7 +26,8 @@ SCHEMA_STATIC = {
"eval": pl.Utf8, # "exp_based" | "manual_based"
"fold": pl.Int32,
# metrics
"auc": pl.Float64,
"roc_auc": pl.Float64, # <-- renamed from 'auc'
"prc_auc": pl.Float64, # <-- new
"ap": pl.Float64,
# per-sample scores: list of (idx, label, score)
"scores": pl.List(
@@ -75,7 +76,6 @@ PRETRAIN_SCHEMA = {
"semi_anomalous": pl.Int32,
"model": pl.Utf8, # always "ae"
"fold": pl.Int32,
"split": pl.Utf8, # "train" | "test"
# timings and optimization
"train_time": pl.Float64,
"test_time": pl.Float64,
@@ -96,10 +96,62 @@ PRETRAIN_SCHEMA = {
"config_json": pl.Utf8, # full config.json as string (for reference)
}
SCHEMA_INFERENCE = {
# identifiers / dims
"experiment": pl.Utf8, # e.g. "2_static_no_artifacts_illuminated_2023-01-23-001"
"network": pl.Utf8, # e.g. "LeNet", "efficient"
"latent_dim": pl.Int32,
"semi_normals": pl.Int32,
"semi_anomalous": pl.Int32,
"model": pl.Utf8, # "deepsad" | "isoforest" | "ocsvm"
# metrics
"scores": pl.List(pl.Float64),
# timings / housekeeping
"folder": pl.Utf8,
"config_json": pl.Utf8, # full config.json as string (for reference)
}
# ------------------------------------------------------------
# Helpers: curve/scores normalizers (tuples/ndarrays -> dict/list)
# ------------------------------------------------------------
def compute_prc_auc_from_curve(prc_curve: dict | None) -> float | None:
"""
Compute AUC of the Precision-Recall curve via trapezoidal rule.
Expects prc_curve = {"precision": [...], "recall": [...], "thr": [...] (optional)}.
Robust to NaNs, unsorted recall, and missing endpoints; returns np.nan if empty.
"""
if not prc_curve:
return np.nan
precision = np.asarray(prc_curve.get("precision", []), dtype=float)
recall = np.asarray(prc_curve.get("recall", []), dtype=float)
if precision.size == 0 or recall.size == 0:
return np.nan
mask = ~(np.isnan(precision) | np.isnan(recall))
precision, recall = precision[mask], recall[mask]
if recall.size == 0:
return np.nan
# Sort by recall, clip to [0,1]
order = np.argsort(recall)
recall = np.clip(recall[order], 0.0, 1.0)
precision = np.clip(precision[order], 0.0, 1.0)
# Ensure curve spans [0,1] in recall (hold precision constant at ends)
if recall[0] > 0.0:
recall = np.insert(recall, 0, 0.0)
precision = np.insert(precision, 0, precision[0])
if recall[-1] < 1.0:
recall = np.append(recall, 1.0)
precision = np.append(precision, precision[-1])
# Trapezoidal AUC
return float(np.trapezoid(precision, recall))
def _tolist(x):
if x is None:
return None
@@ -233,11 +285,11 @@ def normalize_bool_list(a) -> Optional[List[bool]]:
# ------------------------------------------------------------
# Low-level: read one experiment folder
# ------------------------------------------------------------
def read_config(exp_dir: Path) -> dict:
def read_config(exp_dir: Path, k_fold_required: bool = True) -> dict:
cfg = exp_dir / "config.json"
with cfg.open("r") as f:
c = json.load(f)
if not c.get("k_fold"):
if k_fold_required and not c.get("k_fold"):
raise ValueError(f"{exp_dir.name}: not trained as k-fold")
return c
@@ -343,23 +395,28 @@ def rows_from_ocsvm_default(data: dict, evals: List[str]) -> Dict[str, dict]:
# Build the Polars DataFrame
# ------------------------------------------------------------
def load_results_dataframe(root: Path, allow_cache: bool = True) -> pl.DataFrame:
"""
Walks experiment subdirs under `root`. For each (model, fold) it adds rows:
Columns (SCHEMA_STATIC):
network, latent_dim, semi_normals, semi_anomalous,
model, eval, fold,
auc, ap, scores{sample_idx,orig_label,score},
roc_curve{fpr,tpr,thr}, prc_curve{precision,recall,thr},
sample_indices, sample_labels, valid_mask,
train_time, test_time,
folder, k_fold_num
"""
if allow_cache:
cache = root / "results_cache.parquet"
if cache.exists():
try:
df = pl.read_parquet(cache)
print(f"[info] loaded cached results frame from {cache}")
# Backward-compat: old caches may have 'auc' but no 'roc_auc'/'prc_auc'
if "roc_auc" not in df.columns and "auc" in df.columns:
df = df.rename({"auc": "roc_auc"})
if "prc_auc" not in df.columns and "prc_curve" in df.columns:
df = df.with_columns(
pl.struct(
pl.col("prc_curve").struct.field("precision"),
pl.col("prc_curve").struct.field("recall"),
)
.map_elements(
lambda s: compute_prc_auc_from_curve(
{"precision": s[0], "recall": s[1]}
)
)
.alias("prc_auc")
)
return df
except Exception as e:
print(f"[warn] failed to load cache {cache}: {e}")
@@ -394,15 +451,17 @@ def load_results_dataframe(root: Path, allow_cache: bool = True) -> pl.DataFrame
continue
if model == "deepsad":
per_eval = rows_from_deepsad(data, EVALS) # eval -> dict
per_eval = rows_from_deepsad(data, EVALS)
elif model == "isoforest":
per_eval = rows_from_isoforest(data, EVALS) # eval -> dict
per_eval = rows_from_isoforest(data, EVALS)
elif model == "ocsvm":
per_eval = rows_from_ocsvm_default(data, EVALS) # eval -> dict
per_eval = rows_from_ocsvm_default(data, EVALS)
else:
per_eval = {}
for ev, vals in per_eval.items():
# compute prc_auc now (fast), rename auc->roc_auc
prc_auc_val = compute_prc_auc_from_curve(vals.get("prc"))
rows.append(
{
"network": network,
@@ -412,7 +471,8 @@ def load_results_dataframe(root: Path, allow_cache: bool = True) -> pl.DataFrame
"model": model,
"eval": ev,
"fold": fold,
"auc": vals["auc"],
"roc_auc": vals["auc"], # renamed
"prc_auc": prc_auc_val, # new
"ap": vals["ap"],
"scores": vals["scores"],
"roc_curve": vals["roc"],
@@ -428,20 +488,19 @@ def load_results_dataframe(root: Path, allow_cache: bool = True) -> pl.DataFrame
}
)
# If empty, return a typed empty frame
if not rows:
# Return a typed empty frame (new schema)
return pl.DataFrame(schema=SCHEMA_STATIC)
df = pl.DataFrame(rows, schema=SCHEMA_STATIC)
# Cast to efficient dtypes (categoricals etc.) no extra sanitation
# Cast to efficient dtypes (categoricals etc.)
df = df.with_columns(
pl.col("network", "model", "eval").cast(pl.Categorical),
pl.col(
"latent_dim", "semi_normals", "semi_anomalous", "fold", "k_fold_num"
).cast(pl.Int32),
pl.col("auc", "ap", "train_time", "test_time").cast(pl.Float64),
# NOTE: no cast on 'scores' here; it's already List(Struct) per schema.
pl.col("roc_auc", "prc_auc", "ap", "train_time", "test_time").cast(pl.Float64),
)
if allow_cache:
@@ -562,7 +621,7 @@ def load_pretraining_results_dataframe(
# Cast/optimize a bit (categoricals, ints, floats)
df = df.with_columns(
pl.col("network", "model", "split").cast(pl.Categorical),
pl.col("network", "model").cast(pl.Categorical),
pl.col(
"latent_dim", "semi_normals", "semi_anomalous", "fold", "k_fold_num"
).cast(pl.Int32),
@@ -589,7 +648,129 @@ def load_pretraining_results_dataframe(
return df
def load_inference_results_dataframe(
root: Path,
allow_cache: bool = True,
models: List[str] = MODELS,
) -> pl.DataFrame:
"""Load inference results from experiment folders.
Args:
root: Path to root directory containing experiment folders
allow_cache: Whether to use/create cache file
models: List of models to look for scores
Returns:
pl.DataFrame: DataFrame containing inference results
"""
if allow_cache:
cache = root / "inference_results_cache.parquet"
if cache.exists():
try:
df = pl.read_parquet(cache)
print(f"[info] loaded cached inference frame from {cache}")
return df
except Exception as e:
print(f"[warn] failed to load inference cache {cache}: {e}")
rows: List[dict] = []
exp_dirs = [p for p in root.iterdir() if p.is_dir()]
for exp_dir in sorted(exp_dirs):
try:
# Load and validate config
cfg = read_config(exp_dir, k_fold_required=False)
cfg_json = json.dumps(cfg, sort_keys=True)
# Extract config values
network = cfg.get("net_name")
latent_dim = int(cfg.get("latent_space_dim"))
semi_normals = int(cfg.get("num_known_normal"))
semi_anomalous = int(cfg.get("num_known_outlier"))
# Process each model's scores
inference_dir = exp_dir / "inference"
if not inference_dir.exists():
print(f"[warn] no inference directory for {exp_dir.name}")
continue
# Find all unique experiments in this folder's inference files
score_files = list(inference_dir.glob("*_scores.npy"))
if not score_files:
print(f"[warn] no score files in {inference_dir}")
continue
# Extract unique experiment names from score files
# Format: {experiment}_{model}_scores.npy
experiments = set()
for score_file in score_files:
exp_name = score_file.stem.rsplit("_", 2)[0]
experiments.add(exp_name)
# Load scores for each experiment and model
for experiment in sorted(experiments):
for model in models:
score_file = inference_dir / f"{experiment}_{model}_scores.npy"
if not score_file.exists():
print(f"[warn] missing score file for {experiment}, {model}")
continue
try:
scores = np.load(score_file)
rows.append(
{
"experiment": experiment,
"network": network,
"latent_dim": latent_dim,
"semi_normals": semi_normals,
"semi_anomalous": semi_anomalous,
"model": model,
"scores": scores.tolist(),
"folder": str(exp_dir),
"config_json": cfg_json,
}
)
except Exception as e:
print(
f"[warn] failed to load scores for {experiment}, {model}: {e}"
)
continue
except Exception as e:
print(f"[warn] skipping {exp_dir.name}: {e}")
continue
# If empty, return a typed empty frame
if not rows:
return pl.DataFrame(schema=SCHEMA_INFERENCE)
df = pl.DataFrame(rows, schema=SCHEMA_INFERENCE)
# Optimize datatypes
df = df.with_columns(
[
pl.col("experiment", "network", "model").cast(pl.Categorical),
pl.col("latent_dim", "semi_normals", "semi_anomalous").cast(pl.Int32),
]
)
# Cache if enabled
if allow_cache:
try:
df.write_parquet(cache)
print(f"[info] cached inference frame to {cache}")
except Exception as e:
print(f"[warn] failed to write cache {cache}: {e}")
return df
def main():
inference_root = Path("/home/fedex/mt/results/inference/copy")
df_inference = load_inference_results_dataframe(inference_root, allow_cache=True)
exit(0)
root = Path("/home/fedex/mt/results/copy")
df1 = load_results_dataframe(root, allow_cache=True)
exit(0)

View File

@@ -0,0 +1,306 @@
# ae_losses_table_from_df.py
from __future__ import annotations
import shutil
from dataclasses import dataclass
from datetime import datetime
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import polars as pl
# CHANGE THIS IMPORT IF YOUR LOADER MODULE IS NAMED DIFFERENTLY
from load_results import load_pretraining_results_dataframe
# ----------------------------
# Config
# ----------------------------
ROOT = Path("/home/fedex/mt/results/copy") # experiments root you pass to the loader
OUTPUT_DIR = Path("/home/fedex/mt/plots/results_ae_table")
# Which label field to use from the DF; "labels_exp_based" or "labels_manual_based"
LABEL_FIELD = "labels_exp_based"
# Which architectures to include (labels must match canonicalize_network)
WANTED_NETS = {"LeNet", "Efficient"}
# Formatting
DECIMALS = 4 # how many decimals to display for losses
BOLD_BEST = False # set True to bold per-group best (lower is better)
LOWER_IS_BETTER = True # for losses we want the minimum
# ----------------------------
# Helpers (ported/minified from your plotting script)
# ----------------------------
def canonicalize_network(name: str) -> str:
low = (name or "").lower()
if "lenet" in low:
return "LeNet"
if "efficient" in low:
return "Efficient"
return name or "unknown"
def calculate_batch_mean_loss(scores: np.ndarray, batch_size: int) -> float:
n = len(scores)
if n == 0:
return np.nan
if batch_size <= 0:
batch_size = n
n_batches = (n + batch_size - 1) // batch_size
acc = 0.0
for i in range(0, n, batch_size):
acc += float(np.mean(scores[i : i + batch_size]))
return acc / n_batches
def extract_batch_size(cfg_json: str) -> int:
import json
try:
cfg = json.loads(cfg_json) if cfg_json else {}
except Exception:
cfg = {}
return int(cfg.get("ae_batch_size") or cfg.get("batch_size") or 256)
@dataclass(frozen=True)
class Cell:
mean: float | None
std: float | None
def _fmt(mean: float | None) -> str:
return "--" if (mean is None or not (mean == mean)) else f"{mean:.{DECIMALS}f}"
def _bold_mask_display(
values: List[float | None], decimals: int, lower_is_better: bool
) -> List[bool]:
"""
Tie-aware bolding mask based on *displayed* precision.
For losses, lower is better (min). For metrics where higher is better, set lower_is_better=False.
"""
def disp(v: float | None) -> float | None:
if v is None or not (v == v):
return None
# use string → float to match display rounding exactly
return float(f"{v:.{decimals}f}")
rounded = [disp(v) for v in values]
finite = [v for v in rounded if v is not None]
if not finite:
return [False] * len(values)
target = min(finite) if lower_is_better else max(finite)
return [(v is not None and v == target) for v in rounded]
# ----------------------------
# Core
# ----------------------------
def build_losses_table_from_df(
df: pl.DataFrame, label_field: str
) -> Tuple[str, float | None]:
"""
Build a LaTeX table showing Overall loss (LeNet, Efficient) and Anomaly loss (LeNet, Efficient)
with one row per latent dimension. Returns (latex_table_string, max_std_overall).
"""
# Basic validation
required_cols = {"scores", "network", "latent_dim"}
missing = required_cols - set(df.columns)
if missing:
raise ValueError(f"Missing required columns in AE dataframe: {missing}")
if label_field not in df.columns:
raise ValueError(f"Expected '{label_field}' column in AE dataframe.")
# Canonicalize nets, compute per-row overall/anomaly losses
rows: List[dict] = []
for row in df.iter_rows(named=True):
net = canonicalize_network(row["network"])
if WANTED_NETS and net not in WANTED_NETS:
continue
dim = int(row["latent_dim"])
batch_size = extract_batch_size(row.get("config_json"))
scores = np.asarray(row["scores"] or [], dtype=float)
labels = row.get(label_field)
labels = np.asarray(labels, dtype=int) if labels is not None else None
overall_loss = calculate_batch_mean_loss(scores, batch_size)
anomaly_loss = np.nan
if labels is not None and labels.size == scores.size:
anomaly_scores = scores[labels == -1]
if anomaly_scores.size > 0:
anomaly_loss = calculate_batch_mean_loss(anomaly_scores, batch_size)
rows.append(
{
"net": net,
"latent_dim": dim,
"overall": overall_loss,
"anomaly": anomaly_loss,
}
)
if not rows:
raise ValueError(
"No rows available after filtering; check WANTED_NETS or input data."
)
df2 = pl.DataFrame(rows)
# Aggregate across folds per (net, latent_dim)
agg = df2.group_by(["net", "latent_dim"]).agg(
pl.col("overall").mean().alias("overall_mean"),
pl.col("overall").std().alias("overall_std"),
pl.col("anomaly").mean().alias("anomaly_mean"),
pl.col("anomaly").std().alias("anomaly_std"),
)
# Collect union of dims across both nets
dims = sorted(set(agg.get_column("latent_dim").to_list()))
# Build lookup
keymap: Dict[Tuple[str, int], Cell] = {}
keymap_anom: Dict[Tuple[str, int], Cell] = {}
max_std: float | None = None
def push_std(v: float | None):
nonlocal max_std
if v is None or not (v == v):
return
if max_std is None or v > max_std:
max_std = v
for r in agg.iter_rows(named=True):
k = (r["net"], int(r["latent_dim"]))
keymap[k] = Cell(r.get("overall_mean"), r.get("overall_std"))
keymap_anom[k] = Cell(r.get("anomaly_mean"), r.get("anomaly_std"))
push_std(r.get("overall_std"))
push_std(r.get("anomaly_std"))
# Ensure nets order consistent
nets_order = ["LeNet", "Efficient"]
nets_present = [n for n in nets_order if any(k[0] == n for k in keymap.keys())]
if not nets_present:
nets_present = sorted({k[0] for k in keymap.keys()})
# Build LaTeX table
header_left = [r"LeNet", r"Efficient"]
header_right = [r"LeNet", r"Efficient"]
lines: List[str] = []
lines.append(r"\begin{table}[t]")
lines.append(r"\centering")
lines.append(r"\setlength{\tabcolsep}{4pt}")
lines.append(r"\renewcommand{\arraystretch}{1.2}")
# vertical bar between the two groups
lines.append(r"\begin{tabularx}{\textwidth}{c*{2}{Y}|*{2}{Y}}")
lines.append(r"\toprule")
lines.append(
r" & \multicolumn{2}{c}{Overall loss} & \multicolumn{2}{c}{Anomaly loss} \\"
)
lines.append(r"\cmidrule(lr){2-3} \cmidrule(lr){4-5}")
lines.append(
r"Latent Dim. & "
+ " & ".join(header_left)
+ " & "
+ " & ".join(header_right)
+ r" \\"
)
lines.append(r"\midrule")
for d in dims:
# Gather values in order: Overall (LeNet, Efficient), Anomaly (LeNet, Efficient)
overall_vals = [keymap.get((n, d), Cell(None, None)).mean for n in nets_present]
anomaly_vals = [
keymap_anom.get((n, d), Cell(None, None)).mean for n in nets_present
]
overall_strs = [_fmt(v) for v in overall_vals]
anomaly_strs = [_fmt(v) for v in anomaly_vals]
if BOLD_BEST:
mask_overall = _bold_mask_display(overall_vals, DECIMALS, LOWER_IS_BETTER)
mask_anom = _bold_mask_display(anomaly_vals, DECIMALS, LOWER_IS_BETTER)
overall_strs = [
(r"\textbf{" + s + "}") if (m and s != "--") else s
for s, m in zip(overall_strs, mask_overall)
]
anomaly_strs = [
(r"\textbf{" + s + "}") if (m and s != "--") else s
for s, m in zip(anomaly_strs, mask_anom)
]
lines.append(
f"{d} & "
+ " & ".join(overall_strs)
+ " & "
+ " & ".join(anomaly_strs)
+ r" \\"
)
lines.append(r"\bottomrule")
lines.append(r"\end{tabularx}")
max_std_str = "n/a" if max_std is None else f"{max_std:.{DECIMALS}f}"
lines.append(
rf"\caption{{Autoencoder pre-training MSE losses (test split) across latent dimensions. "
rf"Left: overall loss; Right: anomaly-only loss. "
rf"Cells show means across folds (no $\pm$std). "
rf"Maximum observed standard deviation across all cells (not shown): {max_std_str}.}}"
)
lines.append(r"\end{table}")
return "\n".join(lines), max_std
# ----------------------------
# Entry
# ----------------------------
def main():
df = load_pretraining_results_dataframe(ROOT, allow_cache=True)
# Build LaTeX table
tex, max_std = build_losses_table_from_df(df, LABEL_FIELD)
# Output dirs
OUTPUT_DIR.mkdir(parents=True, exist_ok=True)
ts_dir = OUTPUT_DIR / "archive" / datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
ts_dir.mkdir(parents=True, exist_ok=True)
out_name = "ae_pretraining_losses_table.tex"
out_path = ts_dir / out_name
out_path.write_text(tex, encoding="utf-8")
# Save a copy of this script
script_path = Path(__file__)
try:
shutil.copy2(script_path, ts_dir / script_path.name)
except Exception:
pass
# Mirror latest
latest = OUTPUT_DIR / "latest"
latest.mkdir(parents=True, exist_ok=True)
# Clear
for f in latest.iterdir():
if f.is_file():
f.unlink()
# Copy
for f in ts_dir.iterdir():
if f.is_file():
shutil.copy2(f, latest / f.name)
print(f"Saved table to: {ts_dir}")
print(f"Also updated: {latest}")
print(f" - {out_name}")
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,273 @@
#!/usr/bin/env python3
from __future__ import annotations
import shutil
from dataclasses import dataclass
from datetime import datetime
from pathlib import Path
from typing import Dict, List, Tuple
import matplotlib.pyplot as plt
import numpy as np
import polars as pl
from matplotlib.ticker import MaxNLocator
# =========================
# Config
# =========================
ROOT = Path("/home/fedex/mt/results/copy")
OUTPUT_DIR = Path("/home/fedex/mt/plots/results_ap_over_latent")
# Labeling regimes (shown as separate subplots)
SEMI_LABELING_REGIMES: list[tuple[int, int]] = [(0, 0), (50, 10), (500, 100)]
# Evaluations: separate figure per eval
EVALS: list[str] = ["exp_based", "manual_based"]
# X-axis (latent dims)
LATENT_DIMS: list[int] = [32, 64, 128, 256, 512, 768, 1024]
# Visual style
FIGSIZE = (8, 8) # one tall figure with 3 compact subplots
MARKERSIZE = 7
SCATTER_ALPHA = 0.95
LINEWIDTH = 2.0
TREND_LINEWIDTH = 2.2
BAND_ALPHA = 0.18
# Toggle: show ±1 std bands (k-fold variability)
SHOW_STD_BANDS = True # <<< set to False to hide the bands
# Colors for the two DeepSAD backbones
COLOR_LENET = "#1f77b4" # blue
COLOR_EFFICIENT = "#ff7f0e" # orange
# =========================
# Loader
# =========================
from load_results import load_results_dataframe
# =========================
# Helpers
# =========================
def _with_net_label(df: pl.DataFrame) -> pl.DataFrame:
return df.with_columns(
pl.when(
pl.col("network").cast(pl.Utf8).str.to_lowercase().str.contains("lenet")
)
.then(pl.lit("LeNet"))
.when(
pl.col("network").cast(pl.Utf8).str.to_lowercase().str.contains("efficient")
)
.then(pl.lit("Efficient"))
.otherwise(pl.col("network").cast(pl.Utf8))
.alias("net_label")
)
def _filter_deepsad(df: pl.DataFrame) -> pl.DataFrame:
return df.filter(
(pl.col("model") == "deepsad")
& (pl.col("eval").is_in(EVALS))
& (pl.col("latent_dim").is_in(LATENT_DIMS))
& (pl.col("net_label").is_in(["LeNet", "Efficient"]))
).select(
"eval",
"net_label",
"latent_dim",
"semi_normals",
"semi_anomalous",
"fold",
"ap",
)
@dataclass(frozen=True)
class Agg:
mean: float
std: float
def aggregate_ap(df: pl.DataFrame) -> Dict[Tuple[str, str, int, int, int], Agg]:
out: Dict[Tuple[str, str, int, int, int], Agg] = {}
gb = (
df.group_by(
["eval", "net_label", "latent_dim", "semi_normals", "semi_anomalous"]
)
.agg(pl.col("ap").mean().alias("mean"), pl.col("ap").std().alias("std"))
.to_dicts()
)
for row in gb:
key = (
str(row["eval"]),
str(row["net_label"]),
int(row["latent_dim"]),
int(row["semi_normals"]),
int(row["semi_anomalous"]),
)
m = float(row["mean"]) if row["mean"] == row["mean"] else np.nan
s = float(row["std"]) if row["std"] == row["std"] else np.nan
out[key] = Agg(mean=m, std=s)
return out
def _lin_trend(xs: List[int], ys: List[float]) -> Tuple[np.ndarray, np.ndarray]:
if len(xs) < 2:
return np.array(xs, dtype=float), np.array(ys, dtype=float)
x = np.array(xs, dtype=float)
y = np.array(ys, dtype=float)
a, b = np.polyfit(x, y, 1)
x_fit = np.linspace(x.min(), x.max(), 200)
y_fit = a * x_fit + b
return x_fit, y_fit
def _dynamic_ylim(all_vals: List[float], all_errs: List[float]) -> Tuple[float, float]:
vals = np.array(all_vals, dtype=float)
errs = np.array(all_errs, dtype=float) if SHOW_STD_BANDS else np.zeros_like(vals)
valid = np.isfinite(vals)
if not np.any(valid):
return (0.0, 1.0)
v, e = vals[valid], errs[valid]
lo = np.min(v - e)
hi = np.max(v + e)
span = max(1e-3, hi - lo)
pad = 0.08 * span
y0 = max(0.0, lo - pad)
y1 = min(1.0, hi + pad)
if (y1 - y0) < 0.08:
mid = 0.5 * (y0 + y1)
y0 = max(0.0, mid - 0.04)
y1 = min(1.0, mid + 0.04)
return (float(y0), float(y1))
def _get_dim_mapping(dims: list[int]) -> dict[int, int]:
"""Map actual dimensions to evenly spaced positions (0, 1, 2, ...)"""
return {dim: i for i, dim in enumerate(dims)}
def plot_eval(ev: str, agg: Dict[Tuple[str, str, int, int, int], Agg], outdir: Path):
fig, axes = plt.subplots(
len(SEMI_LABELING_REGIMES),
1,
figsize=FIGSIZE,
constrained_layout=True,
sharex=True,
)
if len(SEMI_LABELING_REGIMES) == 1:
axes = [axes]
# Create dimension mapping
dim_mapping = _get_dim_mapping(LATENT_DIMS)
for ax, regime in zip(axes, SEMI_LABELING_REGIMES):
semi_n, semi_a = regime
data = {}
for net in ["LeNet", "Efficient"]:
xs, ys, es = [], [], []
for dim in LATENT_DIMS:
key = (ev, net, dim, semi_n, semi_a)
if key in agg:
xs.append(
dim_mapping[dim]
) # Use mapped position instead of actual dim
ys.append(agg[key].mean)
es.append(agg[key].std)
data[net] = (xs, ys, es)
for net, color in [("LeNet", COLOR_LENET), ("Efficient", COLOR_EFFICIENT)]:
xs, ys, es = data[net]
if not xs:
continue
# Set evenly spaced ticks with actual dimension labels
ax.set_xticks(list(dim_mapping.values()))
ax.set_xticklabels(LATENT_DIMS)
ax.yaxis.set_major_locator(MaxNLocator(nbins=5))
ax.scatter(
xs, ys, s=35, color=color, alpha=SCATTER_ALPHA, label=f"{net} (points)"
)
x_fit, y_fit = _lin_trend(xs, ys) # Now using mapped positions
ax.plot(
x_fit,
y_fit,
color=color,
linewidth=TREND_LINEWIDTH,
label=f"{net} (trend)",
)
if SHOW_STD_BANDS and es and np.any(np.isfinite(es)):
ylo = np.clip(np.array(ys) - np.array(es), 0.0, 1.0)
yhi = np.clip(np.array(ys) + np.array(es), 0.0, 1.0)
ax.fill_between(
xs, ylo, yhi, color=color, alpha=BAND_ALPHA, linewidth=0
)
all_vals, all_errs = [], []
for net in ["LeNet", "Efficient"]:
_, ys, es = data[net]
all_vals.extend(ys)
all_errs.extend(es)
y0, y1 = _dynamic_ylim(all_vals, all_errs)
ax.set_ylim(y0, y1)
ax.set_title(f"Labeling regime {semi_n}/{semi_a}", fontsize=11)
ax.grid(True, alpha=0.35)
axes[-1].set_xlabel("Latent dimension")
for ax in axes:
ax.set_ylabel("AP")
handles, labels = axes[0].get_legend_handles_labels()
fig.legend(handles, labels, ncol=2, loc="upper center", bbox_to_anchor=(0.75, 0.97))
fig.suptitle(f"AP vs. Latent Dimensionality — {ev.replace('_', ' ')}", y=1.05)
fname = f"ap_trends_{ev}.png"
fig.savefig(outdir / fname, dpi=150)
plt.close(fig)
def plot_all(agg: Dict[Tuple[str, str, int, int, int], Agg], outdir: Path):
outdir.mkdir(parents=True, exist_ok=True)
for ev in EVALS:
plot_eval(ev, agg, outdir)
def main():
df = load_results_dataframe(ROOT, allow_cache=True)
df = _with_net_label(df)
df = _filter_deepsad(df)
agg = aggregate_ap(df)
OUTPUT_DIR.mkdir(parents=True, exist_ok=True)
archive_dir = OUTPUT_DIR / "archive"
archive_dir.mkdir(parents=True, exist_ok=True)
ts_dir = archive_dir / datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
ts_dir.mkdir(parents=True, exist_ok=True)
plot_all(agg, ts_dir)
try:
script_path = Path(__file__)
shutil.copy2(script_path, ts_dir / script_path.name)
except Exception:
pass
latest = OUTPUT_DIR / "latest"
latest.mkdir(parents=True, exist_ok=True)
for f in latest.iterdir():
if f.is_file():
f.unlink()
for f in ts_dir.iterdir():
if f.is_file():
shutil.copy2(f, latest / f.name)
print(f"Saved plots to: {ts_dir}")
print(f"Also updated: {latest}")
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,260 @@
#!/usr/bin/env python3
from __future__ import annotations
import shutil
from dataclasses import dataclass
from datetime import datetime
from pathlib import Path
from typing import Dict, List, Tuple
import matplotlib.pyplot as plt
import numpy as np
import polars as pl
from matplotlib.ticker import MaxNLocator
# =========================
# Config
# =========================
ROOT = Path("/home/fedex/mt/results/copy")
OUTPUT_DIR = Path("/home/fedex/mt/plots/results_ap_over_semi")
# Labeling regimes (shown as separate subplots)
SEMI_LABELING_REGIMES: list[tuple[int, int]] = [(0, 0), (50, 10), (500, 100)]
# Evaluations: separate figure per eval
EVALS: list[str] = ["exp_based", "manual_based"]
# X-axis (latent dims)
LATENT_DIMS: list[int] = [32, 64, 128, 256, 512, 768, 1024]
LATENT_DIM: int = [32, 64, 128, 256, 512, 768, 1024]
# Visual style
FIGSIZE = (8, 8) # one tall figure with 3 compact subplots
MARKERSIZE = 7
SCATTER_ALPHA = 0.95
LINEWIDTH = 2.0
TREND_LINEWIDTH = 2.2
BAND_ALPHA = 0.18
# Toggle: show ±1 std bands (k-fold variability)
SHOW_STD_BANDS = True # <<< set to False to hide the bands
# Colors for the two DeepSAD backbones
COLOR_LENET = "#1f77b4" # blue
COLOR_EFFICIENT = "#ff7f0e" # orange
# =========================
# Loader
# =========================
from load_results import load_results_dataframe
# =========================
# Helpers
# =========================
def _with_net_label(df: pl.DataFrame) -> pl.DataFrame:
return df.with_columns(
pl.when(
pl.col("network").cast(pl.Utf8).str.to_lowercase().str.contains("lenet")
)
.then(pl.lit("LeNet"))
.when(
pl.col("network").cast(pl.Utf8).str.to_lowercase().str.contains("efficient")
)
.then(pl.lit("Efficient"))
.otherwise(pl.col("network").cast(pl.Utf8))
.alias("net_label")
)
def _filter_deepsad(df: pl.DataFrame) -> pl.DataFrame:
return df.filter(
(pl.col("model") == "deepsad")
& (pl.col("eval").is_in(EVALS))
& (pl.col("latent_dim").is_in(LATENT_DIMS))
& (pl.col("net_label").is_in(["LeNet", "Efficient"]))
).select(
"eval",
"net_label",
"latent_dim",
"semi_normals",
"semi_anomalous",
"fold",
"ap",
)
@dataclass(frozen=True)
class Agg:
mean: float
std: float
def aggregate_ap(df: pl.DataFrame) -> Dict[Tuple[str, str, int, int, int], Agg]:
out: Dict[Tuple[str, str, int, int, int], Agg] = {}
gb = (
df.group_by(
["eval", "net_label", "latent_dim", "semi_normals", "semi_anomalous"]
)
.agg(pl.col("ap").mean().alias("mean"), pl.col("ap").std().alias("std"))
.to_dicts()
)
for row in gb:
key = (
str(row["eval"]),
str(row["net_label"]),
int(row["latent_dim"]),
int(row["semi_normals"]),
int(row["semi_anomalous"]),
)
m = float(row["mean"]) if row["mean"] == row["mean"] else np.nan
s = float(row["std"]) if row["std"] == row["std"] else np.nan
out[key] = Agg(mean=m, std=s)
return out
def _lin_trend(xs: List[int], ys: List[float]) -> Tuple[np.ndarray, np.ndarray]:
if len(xs) < 2:
return np.array(xs, dtype=float), np.array(ys, dtype=float)
x = np.array(xs, dtype=float)
y = np.array(ys, dtype=float)
a, b = np.polyfit(x, y, 1)
x_fit = np.linspace(x.min(), x.max(), 200)
y_fit = a * x_fit + b
return x_fit, y_fit
def _dynamic_ylim(all_vals: List[float], all_errs: List[float]) -> Tuple[float, float]:
vals = np.array(all_vals, dtype=float)
errs = np.array(all_errs, dtype=float) if SHOW_STD_BANDS else np.zeros_like(vals)
valid = np.isfinite(vals)
if not np.any(valid):
return (0.0, 1.0)
v, e = vals[valid], errs[valid]
lo = np.min(v - e)
hi = np.max(v + e)
span = max(1e-3, hi - lo)
pad = 0.08 * span
y0 = max(0.0, lo - pad)
y1 = min(1.0, hi + pad)
if (y1 - y0) < 0.08:
mid = 0.5 * (y0 + y1)
y0 = max(0.0, mid - 0.04)
y1 = min(1.0, mid + 0.04)
return (float(y0), float(y1))
def plot_eval(ev: str, agg: Dict[Tuple[str, str, int, int, int], Agg], outdir: Path):
fig, axes = plt.subplots(
len(SEMI_LABELING_REGIMES),
1,
figsize=FIGSIZE,
constrained_layout=True,
sharex=True,
)
if len(SEMI_LABELING_REGIMES) == 1:
axes = [axes]
for ax, regime in zip(axes, SEMI_LABELING_REGIMES):
semi_n, semi_a = regime
data = {}
for net in ["LeNet", "Efficient"]:
xs, ys, es = [], [], []
for dim in LATENT_DIMS:
key = (ev, net, dim, semi_n, semi_a)
if key in agg:
xs.append(dim)
ys.append(agg[key].mean)
es.append(agg[key].std)
data[net] = (xs, ys, es)
for net, color in [("LeNet", COLOR_LENET), ("Efficient", COLOR_EFFICIENT)]:
xs, ys, es = data[net]
if not xs:
continue
ax.set_xticks(LATENT_DIMS)
ax.yaxis.set_major_locator(MaxNLocator(nbins=5)) # e.g., always 5 ticks
ax.scatter(
xs, ys, s=35, color=color, alpha=SCATTER_ALPHA, label=f"{net} (points)"
)
x_fit, y_fit = _lin_trend(xs, ys)
ax.plot(
x_fit,
y_fit,
color=color,
linewidth=TREND_LINEWIDTH,
label=f"{net} (trend)",
)
if SHOW_STD_BANDS and es and np.any(np.isfinite(es)):
ylo = np.clip(np.array(ys) - np.array(es), 0.0, 1.0)
yhi = np.clip(np.array(ys) + np.array(es), 0.0, 1.0)
ax.fill_between(
xs, ylo, yhi, color=color, alpha=BAND_ALPHA, linewidth=0
)
all_vals, all_errs = [], []
for net in ["LeNet", "Efficient"]:
_, ys, es = data[net]
all_vals.extend(ys)
all_errs.extend(es)
y0, y1 = _dynamic_ylim(all_vals, all_errs)
ax.set_ylim(y0, y1)
ax.set_title(f"Labeling regime {semi_n}/{semi_a}", fontsize=11)
ax.grid(True, alpha=0.35)
axes[-1].set_xlabel("Latent dimension")
for ax in axes:
ax.set_ylabel("AP")
handles, labels = axes[0].get_legend_handles_labels()
fig.legend(handles, labels, ncol=2, loc="upper center", bbox_to_anchor=(0.75, 0.97))
fig.suptitle(f"AP vs. Latent Dimensionality — {ev.replace('_', ' ')}", y=1.05)
fname = f"ap_trends_{ev}.png"
fig.savefig(outdir / fname, dpi=150)
plt.close(fig)
def plot_all(agg: Dict[Tuple[str, str, int, int, int], Agg], outdir: Path):
outdir.mkdir(parents=True, exist_ok=True)
for ev in EVALS:
plot_eval(ev, agg, outdir)
def main():
df = load_results_dataframe(ROOT, allow_cache=True)
df = _with_net_label(df)
df = _filter_deepsad(df)
agg = aggregate_ap(df)
OUTPUT_DIR.mkdir(parents=True, exist_ok=True)
archive_dir = OUTPUT_DIR / "archive"
archive_dir.mkdir(parents=True, exist_ok=True)
ts_dir = archive_dir / datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
ts_dir.mkdir(parents=True, exist_ok=True)
plot_all(agg, ts_dir)
try:
script_path = Path(__file__)
shutil.copy2(script_path, ts_dir / script_path.name)
except Exception:
pass
latest = OUTPUT_DIR / "latest"
latest.mkdir(parents=True, exist_ok=True)
for f in latest.iterdir():
if f.is_file():
f.unlink()
for f in ts_dir.iterdir():
if f.is_file():
shutil.copy2(f, latest / f.name)
print(f"Saved plots to: {ts_dir}")
print(f"Also updated: {latest}")
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,304 @@
import json
import pickle
import shutil
from datetime import datetime
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
# =========================
# User-configurable params
# =========================
# Single experiment to plot (stem of the .bag file, e.g. "3_smoke_human_walking_2023-01-23")
EXPERIMENT_NAME = "3_smoke_human_walking_2023-01-23"
# Directory that contains {EXPERIMENT_NAME}_{method}_scores.npy for methods in {"deepsad","ocsvm","isoforest"}
# Adjust this to where you save your per-method scores.
methods_scores_path = Path(
"/home/fedex/mt/projects/thesis-kowalczyk-jan/Deep-SAD-PyTorch/infer/DeepSAD/test/inference"
)
# Root data path containing .bag files used to build the cached stats
all_data_path = Path("/home/fedex/mt/data/subter")
# Output base directory (timestamped subfolder will be created here, then archived and copied to "latest/")
output_path = Path("/home/fedex/mt/plots/results_inference_timeline")
# Cache (stats + labels) directory — same as your original script
cache_path = output_path
# Assumed LiDAR frame resolution to convert counts -> percent (unchanged from original)
data_resolution = 32 * 2048
# Frames per second for x-axis time
FPS = 10.0
# Whether to try to align score sign so that higher = more degraded.
# If manual labels exist for this experiment, alignment uses anomaly window mean vs. outside.
ALIGN_SCORE_DIRECTION = True
# =========================
# Setup output folders
# =========================
datetime_folder_name = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
latest_folder_path = output_path / "latest"
archive_folder_path = output_path / "archive"
output_datetime_path = output_path / datetime_folder_name
output_path.mkdir(exist_ok=True, parents=True)
output_datetime_path.mkdir(exist_ok=True, parents=True)
latest_folder_path.mkdir(exist_ok=True, parents=True)
archive_folder_path.mkdir(exist_ok=True, parents=True)
# =========================
# Discover experiments to reconstruct indices consistent with caches
# =========================
normal_experiment_paths, anomaly_experiment_paths = [], []
if not all_data_path.exists():
raise FileNotFoundError(f"all_data_path does not exist: {all_data_path}")
for bag_file_path in all_data_path.iterdir():
if bag_file_path.suffix != ".bag":
continue
if "smoke" in bag_file_path.name:
anomaly_experiment_paths.append(bag_file_path)
else:
normal_experiment_paths.append(bag_file_path)
# Sort by filesize to match original ordering used when caches were generated
normal_experiment_paths = sorted(
normal_experiment_paths, key=lambda p: p.stat().st_size
)
anomaly_experiment_paths = sorted(
anomaly_experiment_paths, key=lambda p: p.stat().st_size
)
# Find the path for the requested experiment
exp_path = None
exp_is_anomaly = None
for p in anomaly_experiment_paths:
if p.stem == EXPERIMENT_NAME:
exp_path = p
exp_is_anomaly = True
break
if exp_path is None:
for p in normal_experiment_paths:
if p.stem == EXPERIMENT_NAME:
exp_path = p
exp_is_anomaly = False
break
if exp_path is None:
raise FileNotFoundError(
f"Experiment '{EXPERIMENT_NAME}' not found as a .bag in {all_data_path}"
)
# Get the index within the appropriate list
if exp_is_anomaly:
exp_index = anomaly_experiment_paths.index(exp_path)
else:
exp_index = normal_experiment_paths.index(exp_path)
# =========================
# Load cached statistical data
# =========================
missing_points_cache = Path(cache_path / "missing_points.pkl")
near_sensor_cache = Path(cache_path / "particles_near_sensor_counts_500.pkl")
if not missing_points_cache.exists():
raise FileNotFoundError(f"Missing points cache not found: {missing_points_cache}")
if not near_sensor_cache.exists():
raise FileNotFoundError(f"Near-sensor cache not found: {near_sensor_cache}")
with open(missing_points_cache, "rb") as f:
missing_points_normal, missing_points_anomaly = pickle.load(f)
with open(near_sensor_cache, "rb") as f:
near_sensor_normal, near_sensor_anomaly = pickle.load(f)
if exp_is_anomaly:
missing_points_series = np.asarray(missing_points_anomaly[exp_index], dtype=float)
near_sensor_series = np.asarray(near_sensor_anomaly[exp_index], dtype=float)
else:
missing_points_series = np.asarray(missing_points_normal[exp_index], dtype=float)
near_sensor_series = np.asarray(near_sensor_normal[exp_index], dtype=float)
# Convert counts to percentages of total points
missing_points_pct = (missing_points_series / data_resolution) * 100.0
near_sensor_pct = (near_sensor_series / data_resolution) * 100.0
# =========================
# Load manual anomaly frame borders (optional; used for sign alignment + vertical markers)
# =========================
manually_labeled_anomaly_frames = {}
labels_json_path = cache_path / "manually_labeled_anomaly_frames.json"
if labels_json_path.exists():
with open(labels_json_path, "r") as frame_borders_file:
manually_labeled_anomaly_frames_json = json.load(frame_borders_file)
for file in manually_labeled_anomaly_frames_json.get("files", []):
manually_labeled_anomaly_frames[file["filename"]] = (
file.get("semi_target_begin_frame", None),
file.get("semi_target_end_frame", None),
)
# The JSON uses .npy filenames (as in original script). Create this experiments key.
exp_npy_filename = exp_path.with_suffix(".npy").name
anomaly_window = manually_labeled_anomaly_frames.get(exp_npy_filename, (None, None))
# =========================
# Load method scores and z-score normalize per method
# =========================
def zscore_1d(x: np.ndarray, eps=1e-12):
x = np.asarray(x, dtype=float)
mu = np.mean(x)
sigma = np.std(x, ddof=0)
if sigma < eps:
return np.zeros_like(x)
return (x - mu) / sigma
def maybe_align_direction(z: np.ndarray, window):
"""Flip sign so that the anomaly window mean is higher than the outside mean, if labels exist."""
start, end = window
if start is None or end is None:
return z # no labels → leave as-is
start = int(max(0, start))
end = int(min(len(z), end))
if end <= start or end > len(z):
return z
inside_mean = float(np.mean(z[start:end]))
# outside: everything except [start:end]; handle edge cases
if start == 0 and end == len(z):
return z
outside_parts = []
if start > 0:
outside_parts.append(z[:start])
if end < len(z):
outside_parts.append(z[end:])
if not outside_parts:
return z
outside_mean = float(np.mean(np.concatenate(outside_parts)))
return z if inside_mean >= outside_mean else -z
methods = ["deepsad", "ocsvm", "isoforest"]
method_scores = {}
method_zscores = {}
if not methods_scores_path.exists():
raise FileNotFoundError(
f"Methods scores path does not exist: {methods_scores_path}"
)
for m in methods:
file_path = methods_scores_path / f"{EXPERIMENT_NAME}_{m}_scores.npy"
if not file_path.exists():
raise FileNotFoundError(f"Missing scores file for method '{m}': {file_path}")
s = np.load(file_path)
s = np.asarray(s, dtype=float).reshape(-1)
# If needed, truncate or pad to match stats length (should match if generated consistently)
n = min(len(s), len(missing_points_pct))
if len(s) != len(missing_points_pct):
# Align by truncation to the shortest length
s = s[:n]
# Also truncate stats to match
missing_points_pct = missing_points_pct[:n]
near_sensor_pct = near_sensor_pct[:n]
z = zscore_1d(s)
if ALIGN_SCORE_DIRECTION:
z = maybe_align_direction(z, anomaly_window)
method_scores[m] = s
method_zscores[m] = z
# Common time axis in seconds
num_frames = len(missing_points_pct)
t = np.arange(num_frames) / FPS
# =========================
# Plot 1: Missing points (%) vs. method z-scores
# =========================
fig1, axz1 = plt.subplots(figsize=(14, 6), constrained_layout=True)
axy1 = axz1.twinx()
# plot z-scores
for m in methods:
axz1.plot(t, method_zscores[m], label=f"{m} (z)", alpha=0.9)
# plot missing points (%)
axy1.plot(t, missing_points_pct, linestyle="--", alpha=0.7, label="Missing points (%)")
# vertical markers for anomaly window if available
start, end = anomaly_window
if start is not None and end is not None and 0 <= start < end <= num_frames:
axz1.axvline(x=start / FPS, linestyle=":", alpha=0.6)
axz1.axvline(x=end / FPS, linestyle=":", alpha=0.6)
axz1.set_xlabel("Time (s)")
axz1.set_ylabel("Anomaly score (z-score, ↑ = more degraded)")
axy1.set_ylabel("Missing points (%)")
axz1.set_title(f"{EXPERIMENT_NAME}\nDegradation vs. Missing Points")
# Build a combined legend
lines1, labels1 = axz1.get_legend_handles_labels()
lines2, labels2 = axy1.get_legend_handles_labels()
axz1.legend(lines1 + lines2, labels1 + labels2, loc="upper right")
axz1.grid(True, alpha=0.3)
fig1.savefig(
output_datetime_path / f"{EXPERIMENT_NAME}_zscores_vs_missing_points.png", dpi=150
)
plt.close(fig1)
# =========================
# Plot 2: Near-sensor (%) vs. method z-scores
# =========================
fig2, axz2 = plt.subplots(figsize=(14, 6), constrained_layout=True)
axy2 = axz2.twinx()
for m in methods:
axz2.plot(t, method_zscores[m], label=f"{m} (z)", alpha=0.9)
axy2.plot(t, near_sensor_pct, linestyle="--", alpha=0.7, label="Near-sensor <0.5m (%)")
start, end = anomaly_window
if start is not None and end is not None and 0 <= start < end <= num_frames:
axz2.axvline(x=start / FPS, linestyle=":", alpha=0.6)
axz2.axvline(x=end / FPS, linestyle=":", alpha=0.6)
axz2.set_xlabel("Time (s)")
axz2.set_ylabel("Anomaly score (z-score, ↑ = more degraded)")
axy2.set_ylabel("Near-sensor points (%)")
axz2.set_title(f"{EXPERIMENT_NAME}\nDegradation vs. Near-Sensor Points (<0.5 m)")
lines1, labels1 = axz2.get_legend_handles_labels()
lines2, labels2 = axy2.get_legend_handles_labels()
axz2.legend(lines1 + lines2, labels1 + labels2, loc="upper right")
axz2.grid(True, alpha=0.3)
fig2.savefig(
output_datetime_path / f"{EXPERIMENT_NAME}_zscores_vs_near_sensor.png", dpi=150
)
plt.close(fig2)
# =========================
# Preserve latest/, archive/, copy script
# =========================
# delete current latest folder
shutil.rmtree(latest_folder_path, ignore_errors=True)
# create new latest folder
latest_folder_path.mkdir(exist_ok=True, parents=True)
# copy contents of output folder to the latest folder
for file in output_datetime_path.iterdir():
shutil.copy2(file, latest_folder_path)
# copy this python script to preserve the code used
shutil.copy2(__file__, output_datetime_path)
shutil.copy2(__file__, latest_folder_path)
# move output date folder to archive
shutil.move(output_datetime_path, archive_folder_path)
print("Done. Plots saved and archived.")

View File

@@ -0,0 +1,459 @@
import json
import pickle
import shutil
from datetime import datetime
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import polars as pl
# =====================================
# User-configurable params
# =====================================
# Root directory that contains per-run outputs (your loader will scan this)
INFERENCE_ROOT = Path("/home/fedex/mt/results/inference/copy")
# Path that holds cached stats (same as before)
CACHE_PATH = Path("/home/fedex/mt/plots/data_anomalies_timeline")
# Root data path containing .bag files to rebuild ordering (for stats mapping)
ALL_DATA_PATH = Path("/home/fedex/mt/data/subter")
# Output base directory (timestamped subfolder will be created here, then archived and copied to "latest/")
OUTPUT_PATH = Path("/home/fedex/mt/plots/results_inference_timeline_smoothed")
# Frames per second for x-axis time
FPS = 10.0
# ---- Smoothing: EMA only ----
EMA_ALPHA = 0.1 # models (0,1], smaller = smoother
STATS_EMA_ALPHA = 0.1 # stats (absolute %); tweak independently if desired
# Whether to z-score per-curve for the model methods (recommended)
Z_SCORE_MODELS = True
# If some model's series is longer/shorter than others in a group, align to min length
ALIGN_TO_MIN_LENGTH = True
# Whether to try to align model score sign so that higher = more degraded using manual window
ALIGN_SCORE_DIRECTION = True
# LiDAR points per frame (for stats -> percent)
DATA_RESOLUTION = 32 * 2048
# =====================================
# Setup output folders
# =====================================
datetime_folder_name = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
latest_folder_path = OUTPUT_PATH / "latest"
archive_folder_path = OUTPUT_PATH / "archive"
output_datetime_path = OUTPUT_PATH / datetime_folder_name
OUTPUT_PATH.mkdir(exist_ok=True, parents=True)
archive_folder_path.mkdir(exist_ok=True, parents=True)
latest_folder_path.mkdir(exist_ok=True, parents=True)
output_datetime_path.mkdir(exist_ok=True, parents=True)
# =====================================
# Load Polars DataFrame via your helper
# =====================================
from load_results import load_inference_results_dataframe
df: pl.DataFrame = load_inference_results_dataframe(INFERENCE_ROOT)
# sanity
expected_cols = {
"experiment",
"network",
"latent_dim",
"semi_normals",
"semi_anomalous",
"model",
"scores",
"folder",
"config_json",
}
missing_cols = expected_cols - set(df.columns)
if missing_cols:
raise KeyError(f"DataFrame missing required columns: {sorted(missing_cols)}")
# =====================================
# Rebuild experiment → stats mapping (like your original)
# =====================================
def rebuild_experiment_index():
normals, anomalies = [], []
if not ALL_DATA_PATH.exists():
return [], [], {}
for bag in ALL_DATA_PATH.iterdir():
if bag.suffix != ".bag":
continue
if "smoke" in bag.name:
anomalies.append(bag)
else:
normals.append(bag)
normals = sorted(normals, key=lambda p: p.stat().st_size)
anomalies = sorted(anomalies, key=lambda p: p.stat().st_size)
mapping = {}
for i, p in enumerate(normals):
mapping[p.stem] = (False, i, p)
for i, p in enumerate(anomalies):
mapping[p.stem] = (True, i, p)
return normals, anomalies, mapping
normal_paths, anomaly_paths, exp_map = rebuild_experiment_index()
# Load cached statistical data (+ manual labels)
missing_points_cache = CACHE_PATH / "missing_points.pkl"
near_sensor_cache = CACHE_PATH / "particles_near_sensor_counts_500.pkl"
labels_json_path = CACHE_PATH / "manually_labeled_anomaly_frames.json"
missing_points_normal = missing_points_anomaly = None
near_sensor_normal = near_sensor_anomaly = None
if missing_points_cache.exists():
with open(missing_points_cache, "rb") as f:
missing_points_normal, missing_points_anomaly = pickle.load(f)
if near_sensor_cache.exists():
with open(near_sensor_cache, "rb") as f:
near_sensor_normal, near_sensor_anomaly = pickle.load(f)
manual_windows = {}
if labels_json_path.exists():
with open(labels_json_path, "r") as f:
labeled_json = json.load(f)
for file in labeled_json.get("files", []):
manual_windows[file["filename"]] = (
file.get("semi_target_begin_frame"),
file.get("semi_target_end_frame"),
)
def get_stats_for_experiment(exp_name: str):
"""
Returns:
missing_pct (np.ndarray) | None,
near_pct (np.ndarray) | None,
anomaly_window (tuple(start,end)) | (None,None)
"""
if exp_name not in exp_map:
return None, None, (None, None)
is_anomaly, idx, path = exp_map[exp_name]
missing = None
near = None
if missing_points_normal is not None and missing_points_anomaly is not None:
series = (
missing_points_anomaly[idx] if is_anomaly else missing_points_normal[idx]
)
missing = (np.asarray(series, dtype=float) / DATA_RESOLUTION) * 100.0
if near_sensor_normal is not None and near_sensor_anomaly is not None:
series = near_sensor_anomaly[idx] if is_anomaly else near_sensor_normal[idx]
near = (np.asarray(series, dtype=float) / DATA_RESOLUTION) * 100.0
npy_key = path.with_suffix(".npy").name
window = manual_windows.get(npy_key, (None, None))
return missing, near, window
# =====================================
# Helpers
# =====================================
def to_np(a):
"""Convert a Polars list cell to a 1D NumPy array of float."""
if a is None:
return None
return np.asarray(a, dtype=float).ravel()
def zscore_1d(x, eps=1e-12):
if x is None or len(x) == 0:
return x
mu = float(np.mean(x))
sigma = float(np.std(x, ddof=0))
return np.zeros_like(x) if sigma < eps else (x - mu) / sigma
def ema(x, alpha):
if x is None or len(x) == 0:
return x
y = np.empty_like(x, dtype=float)
y[0] = x[0]
for i in range(1, len(x)):
y[i] = alpha * x[i] + (1 - alpha) * y[i - 1]
return y
def apply_ema_models(x):
return ema(x, EMA_ALPHA)
def apply_ema_stats(x):
return ema(x, STATS_EMA_ALPHA)
def align_lengths(series_dict):
"""Truncate all series to the shortest available length."""
valid_lengths = [
len(v) for v in series_dict.values() if v is not None and len(v) > 0
]
if not valid_lengths:
return series_dict
min_len = min(valid_lengths)
return {k: (v[:min_len] if v is not None else None) for k, v in series_dict.items()}
def maybe_align_direction(z: np.ndarray, window):
"""Flip sign so that the anomaly window mean is higher than the outside mean, if labels exist."""
if z is None:
return z
start, end = window
if start is None or end is None:
return z
start = int(max(0, start))
end = int(min(len(z), end))
if end <= start or end > len(z):
return z
inside_mean = float(np.mean(z[start:end]))
if start == 0 and end == len(z):
return z
outside_parts = []
if start > 0:
outside_parts.append(z[:start])
if end < len(z):
outside_parts.append(z[end:])
if not outside_parts:
return z
outside_mean = float(np.mean(np.concatenate(outside_parts)))
return z if inside_mean >= outside_mean else -z
def safe_title(s: str) -> str:
return s.replace("_", " ")
# =====================================
# Model selection per group (network names updated)
# =====================================
group_cols = ["experiment", "latent_dim", "semi_normals", "semi_anomalous"]
def pick_rows(gdf: pl.DataFrame):
sel = {}
sel["DeepSAD (LeNet)"] = gdf.filter(
(pl.col("network") == "subter_LeNet") & (pl.col("model") == "deepsad")
)
sel["DeepSAD (efficient)"] = gdf.filter(
(pl.col("network") == "subter_efficient") & (pl.col("model") == "deepsad")
)
sel["OCSVM (LeNet)"] = gdf.filter(
(pl.col("network") == "subter_LeNet") & (pl.col("model") == "ocsvm")
)
sel["IsoForest (LeNet)"] = gdf.filter(
(pl.col("network") == "subter_LeNet") & (pl.col("model") == "isoforest")
)
chosen = {}
for k, dfk in sel.items():
chosen[k] = dfk.row(0) if dfk.height > 0 else None
return chosen
# =====================================
# Iterate groups and plot
# =====================================
plots_made = 0
for keys, g in df.group_by(group_cols, maintain_order=True):
experiment, latent_dim, semi_normals, semi_anomalous = keys
chosen = pick_rows(g)
# Extract series for models
curves_raw = {}
for label, row in chosen.items():
if row is None:
curves_raw[label] = None
continue
row_dict = {c: row[i] for i, c in enumerate(df.columns)}
scores = to_np(row_dict["scores"])
curves_raw[label] = scores
# If nothing to plot, skip group
if all(v is None or len(v) == 0 for v in curves_raw.values()):
continue
# Stats for this experiment (absolute %; no z-scoring)
missing_pct, near_pct, anomaly_window = get_stats_for_experiment(experiment)
# Optionally align lengths among model curves
curves = curves_raw.copy()
if ALIGN_TO_MIN_LENGTH:
curves = align_lengths(curves)
# Prepare processed model curves: z-score (if enabled) + EMA smoothing
proc = {}
for k, v in curves.items():
if v is None:
continue
x = zscore_1d(v) if Z_SCORE_MODELS else v.astype(float)
if ALIGN_SCORE_DIRECTION and anomaly_window != (None, None):
x = maybe_align_direction(x, anomaly_window)
x = apply_ema_models(x)
proc[k] = x
if not proc:
continue
# Establish time axis for model curves
any_len = len(next(iter(proc.values())))
t_models = np.arange(any_len) / FPS
# =========== Plot A: Scores-only (models z-scored; stats not shown) ===========
figA, axA = plt.subplots(figsize=(14, 6), constrained_layout=True)
for label, y in proc.items():
if y is not None:
axA.plot(t_models, y, label=label)
axA.set_xlabel("Time (s)")
axA.set_ylabel("Model anomaly score" + (" (z-score)" if Z_SCORE_MODELS else ""))
titleA = (
f"{safe_title(experiment)} | latent_dim={latent_dim}, "
f"semi_normals={semi_normals}, semi_anomalous={semi_anomalous}\n"
f"Smoothing: EMA(alpha={EMA_ALPHA})"
)
axA.set_title(titleA)
axA.grid(True, alpha=0.3)
axA.legend(loc="upper right")
fnameA = (
f"{experiment}_ld{latent_dim}_sn{semi_normals}_sa{semi_anomalous}"
f"_scores_EMA-{EMA_ALPHA}{'_z' if Z_SCORE_MODELS else ''}.png"
)
figA.savefig(output_datetime_path / fnameA, dpi=150)
plt.close(figA)
# =========== Plot B: Models (z-scored) + Missing Points (%) absolute ===========
if missing_pct is not None and len(missing_pct) > 0:
mp = missing_pct
if ALIGN_TO_MIN_LENGTH:
mp = mp[:any_len]
mp_s = apply_ema_stats(mp)
t_stats = np.arange(len(mp_s)) / FPS
figB, axB = plt.subplots(figsize=(14, 6), constrained_layout=True)
axBy = axB.twinx()
for label, y in proc.items():
if y is not None:
axB.plot(t_models, y, label=label)
axBy.plot(t_stats, mp_s, linestyle="--", label="Missing points (%)")
if anomaly_window != (None, None):
start, end = anomaly_window
if isinstance(start, int) and isinstance(end, int) and 0 <= start < end:
axB.axvline(start / FPS, linestyle=":", alpha=0.6)
axB.axvline(end / FPS, linestyle=":", alpha=0.6)
axB.set_xlabel("Time (s)")
axB.set_ylabel("Model anomaly score" + (" (z-score)" if Z_SCORE_MODELS else ""))
axBy.set_ylabel("Missing points (%)")
titleB = (
f"{safe_title(experiment)} | latent_dim={latent_dim}, "
f"semi_normals={semi_normals}, semi_anomalous={semi_anomalous}\n"
f"Models: EMA({EMA_ALPHA}) | Stats: EMA({STATS_EMA_ALPHA}) — + Missing points (absolute %)"
)
axB.set_title(titleB)
axB.grid(True, alpha=0.3)
lines1, labels1 = axB.get_legend_handles_labels()
lines2, labels2 = axBy.get_legend_handles_labels()
axB.legend(lines1 + lines2, labels1 + labels2, loc="upper right")
fnameB = (
f"{experiment}_ld{latent_dim}_sn{semi_normals}_sa{semi_anomalous}"
f"_scores_plus_missing_EMA-{EMA_ALPHA}_stats-{STATS_EMA_ALPHA}"
f"{'_z' if Z_SCORE_MODELS else ''}.png"
)
figB.savefig(output_datetime_path / fnameB, dpi=150)
plt.close(figB)
# =========== Plot C: Models (z-scored) + Near-sensor Points (%) absolute ===========
if near_pct is not None and len(near_pct) > 0:
ns = near_pct
if ALIGN_TO_MIN_LENGTH:
ns = ns[:any_len]
ns_s = apply_ema_stats(ns)
t_stats = np.arange(len(ns_s)) / FPS
figC, axC = plt.subplots(figsize=(14, 6), constrained_layout=True)
axCy = axC.twinx()
for label, y in proc.items():
if y is not None:
axC.plot(t_models, y, label=label)
axCy.plot(t_stats, ns_s, linestyle="--", label="Near-sensor <0.5m (%)")
if anomaly_window != (None, None):
start, end = anomaly_window
if isinstance(start, int) and isinstance(end, int) and 0 <= start < end:
axC.axvline(start / FPS, linestyle=":", alpha=0.6)
axC.axvline(end / FPS, linestyle=":", alpha=0.6)
axC.set_xlabel("Time (s)")
axC.set_ylabel("Model anomaly score" + (" (z-score)" if Z_SCORE_MODELS else ""))
axCy.set_ylabel("Near-sensor points (%)")
titleC = (
f"{safe_title(experiment)} | latent_dim={latent_dim}, "
f"semi_normals={semi_normals}, semi_anomalous={semi_anomalous}\n"
f"Models: EMA({EMA_ALPHA}) | Stats: EMA({STATS_EMA_ALPHA}) — + Near-sensor <0.5m (absolute %)"
)
axC.set_title(titleC)
axC.grid(True, alpha=0.3)
lines1, labels1 = axC.get_legend_handles_labels()
lines2, labels2 = axCy.get_legend_handles_labels()
axC.legend(lines1 + lines2, labels1 + labels2, loc="upper right")
fnameC = (
f"{experiment}_ld{latent_dim}_sn{semi_normals}_sa{semi_anomalous}"
f"_scores_plus_nearsensor_EMA-{EMA_ALPHA}_stats-{STATS_EMA_ALPHA}"
f"{'_z' if Z_SCORE_MODELS else ''}.png"
)
figC.savefig(output_datetime_path / fnameC, dpi=150)
plt.close(figC)
plots_made += 1
# =====================================
# Preserve latest/, archive/, copy script
# =====================================
# delete current latest folder
shutil.rmtree(latest_folder_path, ignore_errors=True)
# create new latest folder
latest_folder_path.mkdir(exist_ok=True, parents=True)
# copy contents of output folder to the latest folder
for file in output_datetime_path.iterdir():
shutil.copy2(file, latest_folder_path)
# copy this python script to preserve the code used
try:
shutil.copy2(__file__, output_datetime_path)
shutil.copy2(__file__, latest_folder_path)
except Exception:
# If running interactively, fall back to saving the config snapshot
(output_datetime_path / "run_config.json").write_text(
json.dumps(
{
"INFERENCE_ROOT": str(INFERENCE_ROOT),
"CACHE_PATH": str(CACHE_PATH),
"ALL_DATA_PATH": str(ALL_DATA_PATH),
"FPS": FPS,
"EMA_ALPHA": EMA_ALPHA,
"STATS_EMA_ALPHA": STATS_EMA_ALPHA,
"Z_SCORE_MODELS": Z_SCORE_MODELS,
"ALIGN_TO_MIN_LENGTH": ALIGN_TO_MIN_LENGTH,
"ALIGN_SCORE_DIRECTION": ALIGN_SCORE_DIRECTION,
"timestamp": datetime_folder_name,
},
indent=2,
)
)
# move output date folder to archive
shutil.move(output_datetime_path, archive_folder_path)
print(f"Done. Plotted {plots_made} groups. Archived under: {archive_folder_path}")

View File

@@ -0,0 +1,631 @@
#!/usr/bin/env python3
# results_inference_timelines_exp_compare.py
import json
import pickle
import re
import shutil
from datetime import datetime
from pathlib import Path
from typing import Dict, Optional, Tuple
import matplotlib.pyplot as plt
import numpy as np
import polars as pl
from load_results import load_inference_results_dataframe
from matplotlib.lines import Line2D
# =====================================
# User-configurable params
# =====================================
# Root directory that contains per-run outputs (your loader will scan this)
INFERENCE_ROOT = Path("/home/fedex/mt/results/inference/copy")
# Cached stats + manual labels (same location as your earlier scripts)
CACHE_PATH = Path("/home/fedex/mt/plots/results_inference_exp_compare")
# .bag directory (used only to rebuild experiment order for mapping stats)
ALL_DATA_PATH = Path("/home/fedex/mt/data/subter")
# Output base directory (timestamped subfolder will be created here, archived, and copied to latest/)
OUTPUT_PATH = Path("/home/fedex/mt/plots/results_inference_exp_compare")
# Two experiments to compare (exact strings as they appear in your DFs `experiment` column)
EXPERIMENT_CLEAN = "2_static_no_artifacts_illuminated_2023-01-23-001"
EXPERIMENT_DEGRADED = "3_smoke_human_walking_2023-01-23"
# Shared model configuration for BOTH experiments
LATENT_DIM = 32
SEMI_NORMALS = 0
SEMI_ANOMALOUS = 0
# Comparison y-axis mode for methods: "baseline_z" or "baseline_tailprob"
Y_MODE = "baseline_z"
# Progress axis resolution (number of bins from 0% to 100%)
PROGRESS_BINS = 100
# Frames per second for building time axes before progress-binning (informational only)
FPS = 10.0
# ---- EMA smoothing only ----
# Methods (scores) EMA alpha
EMA_ALPHA_METHODS = 0.1 # (0,1], smaller = smoother
# Stats (absolute %) EMA alpha
EMA_ALPHA_STATS = 0.1 # (0,1], smaller = smoother
# LiDAR points per frame (for stats -> percent)
DATA_RESOLUTION = 32 * 2048
# Copy this script into outputs for provenance (best-effort if not running as a file)
COPY_SELF = True
# =====================================
# Setup output folders
# =====================================
datetime_folder_name = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
latest_folder_path = OUTPUT_PATH / "latest"
archive_folder_path = OUTPUT_PATH / "archive"
output_datetime_path = OUTPUT_PATH / datetime_folder_name
OUTPUT_PATH.mkdir(exist_ok=True, parents=True)
archive_folder_path.mkdir(exist_ok=True, parents=True)
latest_folder_path.mkdir(exist_ok=True, parents=True)
output_datetime_path.mkdir(exist_ok=True, parents=True)
# =====================================
# Load Polars DataFrame via your helper
# =====================================
df: pl.DataFrame = load_inference_results_dataframe(INFERENCE_ROOT)
required_cols = {
"experiment",
"network",
"latent_dim",
"semi_normals",
"semi_anomalous",
"model",
"scores",
"folder",
"config_json",
}
missing = required_cols - set(df.columns)
if missing:
raise KeyError(f"DataFrame missing required columns: {sorted(missing)}")
# =====================================
# Rebuild experiment → stats mapping (like your original)
# =====================================
def rebuild_experiment_index():
normals, anomalies = [], []
if not ALL_DATA_PATH.exists():
return [], [], {}
for bag in ALL_DATA_PATH.iterdir():
if bag.suffix != ".bag":
continue
if "smoke" in bag.name:
anomalies.append(bag)
else:
normals.append(bag)
normals = sorted(normals, key=lambda p: p.stat().st_size)
anomalies = sorted(anomalies, key=lambda p: p.stat().st_size)
mapping = {}
for i, p in enumerate(normals):
mapping[p.stem] = (False, i, p)
for i, p in enumerate(anomalies):
mapping[p.stem] = (True, i, p)
return normals, anomalies, mapping
normal_paths, anomaly_paths, exp_map = rebuild_experiment_index()
# Load cached statistical data and manual labels
missing_points_cache = CACHE_PATH / "missing_points.pkl"
near_sensor_cache = CACHE_PATH / "particles_near_sensor_counts_500.pkl"
labels_json_path = CACHE_PATH / "manually_labeled_anomaly_frames.json"
missing_points_normal = missing_points_anomaly = None
near_sensor_normal = near_sensor_anomaly = None
if missing_points_cache.exists():
with open(missing_points_cache, "rb") as f:
missing_points_normal, missing_points_anomaly = pickle.load(f)
if near_sensor_cache.exists():
with open(near_sensor_cache, "rb") as f:
near_sensor_normal, near_sensor_anomaly = pickle.load(f)
manual_windows = {}
if labels_json_path.exists():
with open(labels_json_path, "r") as f:
labeled_json = json.load(f)
for file in labeled_json.get("files", []):
manual_windows[file["filename"]] = (
file.get("semi_target_begin_frame"),
file.get("semi_target_end_frame"),
)
# =====================================
# Helpers
# =====================================
def ema(x: np.ndarray, alpha: float) -> np.ndarray:
if x is None or len(x) == 0:
return x
y = np.empty_like(x, dtype=float)
y[0] = x[0]
for i in range(1, len(x)):
y[i] = alpha * x[i] + (1 - alpha) * y[i - 1]
return y
def to_np_list(list_cell) -> Optional[np.ndarray]:
if list_cell is None:
return None
return np.asarray(list_cell, dtype=float).ravel()
def normalize_exp_name(name: str) -> str:
# strip trailing run suffix like -001, -002 if present
return re.sub(r"-\d{3}$", "", name)
def map_experiment_to_stats_stem(exp_name: str) -> Optional[str]:
"""Try exact match, then prefix match with / without -### suffix stripped."""
if exp_name in exp_map:
return exp_name
base = normalize_exp_name(exp_name)
if base in exp_map:
return base
for stem in exp_map.keys():
if stem.startswith(exp_name) or stem.startswith(base):
return stem
return None
def get_stats_for_experiment(
exp_name: str,
) -> Tuple[
Optional[np.ndarray], Optional[np.ndarray], Tuple[Optional[int], Optional[int]]
]:
key = map_experiment_to_stats_stem(exp_name)
if key is None:
return None, None, (None, None)
is_anomaly, idx, path = exp_map[key]
missing = near = None
if missing_points_normal is not None and missing_points_anomaly is not None:
series = (
missing_points_anomaly[idx] if is_anomaly else missing_points_normal[idx]
)
missing = (np.asarray(series, dtype=float) / DATA_RESOLUTION) * 100.0
if near_sensor_normal is not None and near_sensor_anomaly is not None:
series = near_sensor_anomaly[idx] if is_anomaly else near_sensor_normal[idx]
near = (np.asarray(series, dtype=float) / DATA_RESOLUTION) * 100.0
npy_key = path.with_suffix(".npy").name
window = manual_windows.get(npy_key, (None, None))
return missing, near, window
def _bin_to_progress(x: np.ndarray, bins: int = PROGRESS_BINS) -> np.ndarray:
"""Average x into fixed #bins across its length (progress-normalized timeline)."""
if x is None or len(x) == 0:
return x
n = len(x)
edges = np.linspace(0, n, bins + 1, dtype=int)
out = np.empty(bins, dtype=float)
for i in range(bins):
a, b = edges[i], edges[i + 1]
if b <= a:
out[i] = out[i - 1] if i > 0 else x[0]
else:
out[i] = float(np.mean(x[a:b]))
return out
def _ecdf(x: np.ndarray):
xs = np.sort(np.asarray(x, dtype=float))
n = len(xs)
def F(t):
return float(np.searchsorted(xs, t, side="right")) / n
return F
def baseline_transform(clean: np.ndarray, other: np.ndarray, mode: str):
"""Transform using stats from clean only."""
assert mode in ("baseline_z", "baseline_tailprob")
if clean is None or len(clean) == 0:
return clean, other, "raw"
if mode == "baseline_z":
mu = float(np.mean(clean))
sd = float(np.std(clean, ddof=0))
if sd < 1e-12:
zc = clean - mu
zo = other - mu if other is not None else None
else:
zc = (clean - mu) / sd
zo = (other - mu) / sd if other is not None else None
return zc, zo, "Anomaly score (σ above clean)"
else:
F = _ecdf(clean)
tp_clean = np.array([1.0 - F(v) for v in clean], dtype=float)
tp_other = (
np.array([1.0 - F(v) for v in other], dtype=float)
if other is not None
else None
)
return tp_clean, tp_other, "Tail probability vs clean (1 - F_clean)"
def pick_method_series(gdf: pl.DataFrame, label: str) -> Optional[np.ndarray]:
if label == "DeepSAD LeNet":
sel = gdf.filter(
(pl.col("network") == "subter_LeNet") & (pl.col("model") == "deepsad")
)
elif label == "DeepSAD Efficient":
sel = gdf.filter(
(pl.col("network") == "subter_efficient") & (pl.col("model") == "deepsad")
)
elif label == "OCSVM":
sel = gdf.filter(
(pl.col("network") == "subter_LeNet") & (pl.col("model") == "ocsvm")
)
elif label == "Isolation Forest":
sel = gdf.filter(
(pl.col("network") == "subter_LeNet") & (pl.col("model") == "isoforest")
)
else:
sel = pl.DataFrame()
if sel.height == 0:
return None
row = sel.row(0)
row_dict = {c: row[i] for i, c in enumerate(sel.columns)}
return to_np_list(row_dict["scores"])
def group_slice(
df: pl.DataFrame,
experiment: str,
latent_dim: int,
semi_normals: int,
semi_anomalous: int,
) -> pl.DataFrame:
return df.filter(
(pl.col("experiment") == experiment)
& (pl.col("latent_dim") == latent_dim)
& (pl.col("semi_normals") == semi_normals)
& (pl.col("semi_anomalous") == semi_anomalous)
)
def compare_two_experiments_progress(
df: pl.DataFrame,
experiment_clean: str,
experiment_degraded: str,
latent_dim: int,
semi_normals: int,
semi_anomalous: int,
y_mode: str = "baseline_z",
include_stats: bool = True,
):
methods = [
"DeepSAD LeNet",
"DeepSAD Efficient",
"OCSVM",
"Isolation Forest",
]
g_clean = group_slice(
df, experiment_clean, latent_dim, semi_normals, semi_anomalous
)
g_deg = group_slice(
df, experiment_degraded, latent_dim, semi_normals, semi_anomalous
)
if g_clean.is_empty() or g_deg.is_empty():
print(
f"[WARN] Missing one of the experiment groups: clean({g_clean.height}), degraded({g_deg.height}). Skipping."
)
return 0
# Stats (% absolute, EMA smoothed later)
mp_clean, ns_clean, _ = get_stats_for_experiment(experiment_clean)
mp_deg, ns_deg, _ = get_stats_for_experiment(experiment_degraded)
# Build baseline-anchored, progress-binned curves per method
curves_clean: Dict[str, np.ndarray] = {}
curves_deg: Dict[str, np.ndarray] = {}
y_label = "Anomaly"
for label in methods:
s_clean = pick_method_series(g_clean, label)
s_deg = pick_method_series(g_deg, label)
if s_clean is None or s_deg is None:
continue
# Smooth raw with EMA for stability before fitting baseline
s_clean_sm = ema(s_clean.astype(float), EMA_ALPHA_METHODS)
s_deg_sm = ema(s_deg.astype(float), EMA_ALPHA_METHODS)
t_clean, t_deg, y_label = baseline_transform(s_clean_sm, s_deg_sm, y_mode)
# Progress-bin both
curves_clean[label] = _bin_to_progress(t_clean, PROGRESS_BINS)
curves_deg[label] = _bin_to_progress(t_deg, PROGRESS_BINS)
if not curves_clean:
print("[WARN] No method curves available for comparison in this config.")
return 0
x = np.linspace(0, 100, PROGRESS_BINS)
# Prep stats: absolute %, EMA, progress-binned
def prep_stat_pair(a, b):
if a is None or len(a) == 0 or b is None or len(b) == 0:
return None, None
a_s = ema(a.astype(float), EMA_ALPHA_STATS)
b_s = ema(b.astype(float), EMA_ALPHA_STATS)
return _bin_to_progress(a_s, PROGRESS_BINS), _bin_to_progress(
b_s, PROGRESS_BINS
)
mp_c, mp_d = prep_stat_pair(mp_clean, mp_deg)
ns_c, ns_d = prep_stat_pair(ns_clean, ns_deg)
# Colors & styles
COLOR_METHOD = "#d62728" # vibrant red
COLOR_MISSING = "#9ecae1" # pale blue
COLOR_NEAR = "#a1d99b" # pale green
LS_CLEAN = "--" # dashed for normal/clean
LS_DEG = "-" # solid for anomalous/degraded
LW_METHOD = 1.8
LW_METHOD_CLEAN = 1.2
LW_STATS = 1.6
ALPHA_STATS = 0.95
# Build the 2x2 subplots
fig, axes = plt.subplots(
4, 1, figsize=(12, 16), constrained_layout=True, sharex=False
)
axes = axes.ravel()
method_to_axidx = {
"DeepSAD LeNet": 0,
"DeepSAD Efficient": 1,
"OCSVM": 2,
"Isolation Forest": 3,
}
stats_available = (
mp_c is not None and mp_d is not None and ns_c is not None and ns_d is not None
)
if not stats_available:
print("[WARN] One or both stats missing. Subplots will include methods only.")
letters = ["a", "b", "c", "d"]
for label, axidx in method_to_axidx.items():
ax = axes[axidx]
yc = curves_clean.get(label)
yd = curves_deg.get(label)
if yc is None or yd is None:
ax.text(
0.5, 0.5, "No data", ha="center", va="center", transform=ax.transAxes
)
ax.set_title(f"({letters[axidx]}) {label}")
ax.grid(True, alpha=0.3)
continue
# Left axis: method score (z or tailprob)
ax.plot(
x,
yd,
linestyle=LS_DEG,
color=COLOR_METHOD,
linewidth=LW_METHOD,
label=f"{label} — degraded",
)
ax.plot(
x,
yc,
linestyle=LS_CLEAN,
color=COLOR_METHOD,
linewidth=LW_METHOD_CLEAN,
label=f"{label} — clean",
)
ax.set_ylabel(y_label)
ax.set_title(label)
ax.set_title(f"({letters[axidx]}) {label}")
ax.grid(True, alpha=0.3)
# Right axis #1 (closest to plot): Missing points (%)
axy_miss = ax.twinx()
if mp_c is not None and mp_d is not None:
axy_miss.plot(
x,
mp_d,
linestyle=LS_DEG,
color=COLOR_MISSING,
alpha=ALPHA_STATS,
linewidth=LW_STATS,
label="Missing points — degraded (%)",
)
axy_miss.plot(
x,
mp_c,
linestyle=LS_CLEAN,
color=COLOR_MISSING,
alpha=ALPHA_STATS,
linewidth=LW_STATS,
label="Missing points — clean (%)",
)
axy_miss.set_ylabel("Missing points (%)")
axy_miss.tick_params(axis="y") # , colors=COLOR_MISSING)
# axy_miss.spines["right"].set_edgecolor(COLOR_MISSING)
# Right axis #2 (slightly offset): Near-sensor points (%)
axy_near = ax.twinx()
# push this spine outward so it doesn't overlap the first right axis
axy_near.spines["right"].set_position(("axes", 1.08))
# make patch invisible so only spine shows
axy_near.set_frame_on(True)
axy_near.patch.set_visible(False)
if ns_c is not None and ns_d is not None:
axy_near.plot(
x,
ns_d,
linestyle=LS_DEG,
color=COLOR_NEAR,
alpha=ALPHA_STATS,
linewidth=LW_STATS,
label="Near-sensor — degraded (%)",
)
axy_near.plot(
x,
ns_c,
linestyle=LS_CLEAN,
color=COLOR_NEAR,
alpha=ALPHA_STATS,
linewidth=LW_STATS,
label="Near-sensor — clean (%)",
)
axy_near.set_ylabel("Near-sensor points (%)")
axy_near.tick_params(axis="y") # , colors=COLOR_NEAR)
# axy_near.spines["right"].set_edgecolor(COLOR_NEAR)
# Compose legend: show *method name* explicitly, plus the two stats
handles = [
Line2D(
[0],
[0],
color=COLOR_METHOD,
lw=LW_METHOD,
ls=LS_DEG,
label=f"{label} — degraded",
),
Line2D(
[0],
[0],
color=COLOR_METHOD,
lw=LW_METHOD_CLEAN,
ls=LS_CLEAN,
label=f"{label} — clean",
),
Line2D(
[0],
[0],
color=COLOR_MISSING,
lw=LW_STATS,
ls=LS_DEG,
label="Missing points — degraded",
),
Line2D(
[0],
[0],
color=COLOR_MISSING,
lw=LW_STATS,
ls=LS_CLEAN,
label="Missing points — clean",
),
Line2D(
[0],
[0],
color=COLOR_NEAR,
lw=LW_STATS,
ls=LS_DEG,
label="Near-sensor — degraded",
),
Line2D(
[0],
[0],
color=COLOR_NEAR,
lw=LW_STATS,
ls=LS_CLEAN,
label="Near-sensor — clean",
),
]
ax.legend(handles=handles, loc="upper left", fontsize=9, framealpha=0.9)
# Shared labels / super-title
for ax in axes:
ax.set_xlabel("Progress through experiment (%)")
# fig.suptitle(
# f"AD Method vs Stats Inference — progress-normalized\n"
# f"Transform: z-score normalized to non-degraded experiment | EMA(α={EMA_ALPHA_METHODS})",
# fontsize=14,
# )
fig.tight_layout(rect=[0, 0, 1, 0.99])
out_name = (
f"4up_{EXPERIMENT_CLEAN}_vs_{EXPERIMENT_DEGRADED}"
f"_ld{latent_dim}_sn{semi_normals}_sa{semi_anomalous}_{y_mode}_methods_vs_stats.png"
)
fig.savefig(output_datetime_path / out_name, dpi=150)
plt.close(fig)
return 1
# =====================================
# Run comparison & save
# =====================================
plots_made = compare_two_experiments_progress(
df=df,
experiment_clean=EXPERIMENT_CLEAN,
experiment_degraded=EXPERIMENT_DEGRADED,
latent_dim=LATENT_DIM,
semi_normals=SEMI_NORMALS,
semi_anomalous=SEMI_ANOMALOUS,
y_mode=Y_MODE,
include_stats=True,
)
# =====================================
# Preserve latest/, archive/, copy script
# =====================================
# delete current latest folder
shutil.rmtree(latest_folder_path, ignore_errors=True)
# create new latest folder
latest_folder_path.mkdir(exist_ok=True, parents=True)
# copy contents of output folder to the latest folder
for file in output_datetime_path.iterdir():
shutil.copy2(file, latest_folder_path)
# copy this python script to preserve the code used (best effort)
if COPY_SELF:
try:
shutil.copy2(__file__, output_datetime_path)
shutil.copy2(__file__, latest_folder_path)
except Exception:
(output_datetime_path / "run_config.json").write_text(
json.dumps(
{
"INFERENCE_ROOT": str(INFERENCE_ROOT),
"CACHE_PATH": str(CACHE_PATH),
"ALL_DATA_PATH": str(ALL_DATA_PATH),
"EXPERIMENT_CLEAN": EXPERIMENT_CLEAN,
"EXPERIMENT_DEGRADED": EXPERIMENT_DEGRADED,
"LATENT_DIM": LATENT_DIM,
"SEMI_NORMALS": SEMI_NORMALS,
"SEMI_ANOMALOUS": SEMI_ANOMALOUS,
"Y_MODE": Y_MODE,
"PROGRESS_BINS": PROGRESS_BINS,
"FPS": FPS,
"EMA_ALPHA_METHODS": EMA_ALPHA_METHODS,
"EMA_ALPHA_STATS": EMA_ALPHA_STATS,
"DATA_RESOLUTION": DATA_RESOLUTION,
"timestamp": datetime_folder_name,
},
indent=2,
)
)
# move output date folder to archive
shutil.move(output_datetime_path, archive_folder_path)
print(f"Done. Wrote {plots_made} figure(s). Archived under: {archive_folder_path}")

View File

@@ -7,10 +7,10 @@ from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import polars as pl
from matplotlib.lines import Line2D
# CHANGE THIS IMPORT IF YOUR LOADER MODULE IS NAMED DIFFERENTLY
from plot_scripts.load_results import load_results_dataframe
from load_results import load_results_dataframe
from matplotlib.lines import Line2D
# ----------------------------
# Config
@@ -26,6 +26,10 @@ SEMI_ANOMALOUS = 10
# Which evaluation columns to plot
EVALS = ["exp_based", "manual_based"]
EVALS_LABELS = {
"exp_based": "Experiment-Label-Based",
"manual_based": "Manually-Labeled",
}
# Latent dimensions to show as 7 subplots
LATENT_DIMS = [32, 64, 128, 256, 512, 768, 1024]
@@ -157,7 +161,7 @@ def _ensure_dim_axes(fig_title: str):
fig, axes = plt.subplots(
nrows=4, ncols=2, figsize=(12, 16), constrained_layout=True
)
fig.suptitle(fig_title, fontsize=14)
# fig.suptitle(fig_title, fontsize=14)
axes = axes.ravel()
return fig, axes
@@ -188,7 +192,7 @@ def plot_grid_from_df(
Create a 2x4 grid of subplots, one per latent dim; 8th panel holds legend.
kind: 'roc' or 'prc'
"""
fig_title = f"{kind.upper()}{eval_type} (semi = {semi_normals}/{semi_anomalous})"
fig_title = f"{kind.upper()}{EVALS_LABELS[eval_type]} (Semi-Labeling Regime = {semi_normals}/{semi_anomalous})"
fig, axes = _ensure_dim_axes(fig_title)
# plotting order & colors
@@ -209,11 +213,13 @@ def plot_grid_from_df(
legend_labels = []
have_legend = False
letters = ["a", "b", "c", "d", "e", "f", "g", "h"]
for i, dim in enumerate(LATENT_DIMS):
if i >= 7:
break # last slot reserved for legend
ax = axes[i]
ax.set_title(f"latent_dim = {dim}")
ax.set_title(f"({letters[i]}) Latent Dim. = {dim}")
ax.grid(True, alpha=0.3)
if kind == "roc":

View File

@@ -0,0 +1,505 @@
from __future__ import annotations
import shutil
from dataclasses import dataclass
from datetime import datetime
from pathlib import Path
import numpy as np
import polars as pl
# CHANGE THIS IMPORT IF YOUR LOADER MODULE IS NAMED DIFFERENTLY
from load_results import load_results_dataframe
# ----------------------------
# Config
# ----------------------------
ROOT = Path("/home/fedex/mt/results/copy") # experiments root you pass to the loader
OUTPUT_DIR = Path("/home/fedex/mt/plots/results_latent_space_tables")
# Semi-labeling regimes (semi_normals, semi_anomalous) in display order
SEMI_LABELING_REGIMES: list[tuple[int, int]] = [(0, 0), (50, 10), (500, 100)]
# Both evals are shown side-by-side in one table
EVALS_BOTH: tuple[str, str] = ("exp_based", "manual_based")
# Row order (latent dims)
LATENT_DIMS: list[int] = [32, 64, 128, 256, 512, 768, 1024]
# Column order (method shown to the user)
# We split DeepSAD into the two network backbones, like your plots.
METHOD_COLUMNS = [
("deepsad", "LeNet"), # DeepSAD (LeNet)
("deepsad", "Efficient"), # DeepSAD (Efficient)
("isoforest", "Efficient"), # IsolationForest (Efficient baseline)
("ocsvm", "Efficient"), # OC-SVM (Efficient baseline)
]
# Formatting
DECIMALS = 3 # cells look like 1.000 or 0.928 (3 decimals)
# ----------------------------
# Helpers
# ----------------------------
def _fmt_mean_std(mean: float | None, std: float | None) -> str:
"""Format mean ± std with 3 decimals (leading zero), or '--' if missing."""
if mean is None or not (mean == mean): # NaN check
return "--"
if std is None or not (std == std):
return f"{mean:.3f}"
return f"{mean:.3f}$\\,\\pm\\,{std:.3f}$"
def _with_net_label(df: pl.DataFrame) -> pl.DataFrame:
"""Add a canonical 'net_label' column like the plotting script (LeNet/Efficient/fallback)."""
return df.with_columns(
pl.when(
pl.col("network").cast(pl.Utf8).str.to_lowercase().str.contains("lenet")
)
.then(pl.lit("LeNet"))
.when(
pl.col("network").cast(pl.Utf8).str.to_lowercase().str.contains("efficient")
)
.then(pl.lit("Efficient"))
.otherwise(pl.col("network").cast(pl.Utf8))
.alias("net_label")
)
def _filter_base(df: pl.DataFrame) -> pl.DataFrame:
"""Restrict to valid dims/models and needed columns (no eval/regime filtering here)."""
return df.filter(
(pl.col("latent_dim").is_in(LATENT_DIMS))
& (pl.col("model").is_in(["deepsad", "isoforest", "ocsvm"]))
& (pl.col("eval").is_in(list(EVALS_BOTH)))
).select(
"model",
"net_label",
"latent_dim",
"fold",
"ap",
"eval",
"semi_normals",
"semi_anomalous",
)
@dataclass(frozen=True)
class Cell:
mean: float | None
std: float | None
def _compute_cells(df: pl.DataFrame) -> dict[tuple[str, int, str, str, int, int], Cell]:
"""
Compute per-(eval, latent_dim, model, net_label, semi_normals, semi_anomalous)
mean/std for AP across folds.
"""
if df.is_empty():
return {}
# For baselines (isoforest/ocsvm) constrain to Efficient backbone
df = df.filter(
pl.when(pl.col("model").is_in(["isoforest", "ocsvm"]))
.then(pl.col("net_label") == "Efficient")
.otherwise(True)
)
agg = (
df.group_by(
[
"eval",
"latent_dim",
"model",
"net_label",
"semi_normals",
"semi_anomalous",
]
)
.agg(pl.col("ap").mean().alias("mean_ap"), pl.col("ap").std().alias("std_ap"))
.to_dicts()
)
out: dict[tuple[str, int, str, str, int, int], Cell] = {}
for row in agg:
key = (
str(row["eval"]),
int(row["latent_dim"]),
str(row["model"]),
str(row["net_label"]),
int(row["semi_normals"]),
int(row["semi_anomalous"]),
)
out[key] = Cell(mean=row.get("mean_ap"), std=row.get("std_ap"))
return out
def method_label(model: str, net_label: str) -> str:
"""Map (model, net_label) to the four method names used in headers/caption."""
if model == "deepsad" and net_label == "LeNet":
return "DeepSAD (LeNet)"
if model == "deepsad" and net_label == "Efficient":
return "DeepSAD (Efficient)"
if model == "isoforest":
return "IsoForest"
if model == "ocsvm":
return "OC-SVM"
# ignore anything else (e.g., other backbones)
return ""
def per_method_median_std_from_cells(
cells: dict[tuple[str, int, str, str, int, int], Cell],
) -> dict[str, float]:
"""Compute the median std across all cells, per method."""
stds_by_method: dict[str, list[float]] = {
"DeepSAD (LeNet)": [],
"DeepSAD (Efficient)": [],
"IsoForest": [],
"OC-SVM": [],
}
for key, cell in cells.items():
(ev, dim, model, net, semi_n, semi_a) = key
name = method_label(model, net)
if name and (cell.std is not None) and (cell.std == cell.std): # not NaN
stds_by_method[name].append(cell.std)
return {
name: float(np.median(vals)) if vals else float("nan")
for name, vals in stds_by_method.items()
}
def per_method_max_std_from_cells(
cells: dict[tuple[str, int, str, str, int, int], Cell],
) -> tuple[dict[str, float], dict[str, tuple]]:
"""
Scan the aggregated 'cells' and return:
- max_std_by_method: dict {"DeepSAD (LeNet)": 0.037, ...}
- argmax_key_by_method: which cell (eval, dim, model, net, semi_n, semi_a) produced that max
Only considers the four methods shown in the table.
"""
max_std_by_method: dict[str, float] = {
"DeepSAD (LeNet)": float("nan"),
"DeepSAD (Efficient)": float("nan"),
"IsoForest": float("nan"),
"OC-SVM": float("nan"),
}
argmax_key_by_method: dict[str, tuple] = {}
for key, cell in cells.items():
(ev, dim, model, net, semi_n, semi_a) = key
name = method_label(model, net)
if name == "" or cell.std is None or not (cell.std == cell.std): # empty/NaN
continue
cur = max_std_by_method.get(name, float("nan"))
if (cur != cur) or (cell.std > cur): # handle NaN initial
max_std_by_method[name] = cell.std
argmax_key_by_method[name] = key
# Replace remaining NaNs with 0.0 for nice formatting
for k, v in list(max_std_by_method.items()):
if not (v == v): # NaN
max_std_by_method[k] = 0.0
return max_std_by_method, argmax_key_by_method
def _fmt_val(val: float | None) -> str:
"""
Format value as:
- '--' if None/NaN
- '1.0' if exactly 1 (within 1e-9)
- '.xx' otherwise (2 decimals, no leading 0)
"""
if val is None or not (val == val): # None or NaN
return "--"
if abs(val - 1.0) < 1e-9:
return "1.0"
return f"{val:.2f}".lstrip("0")
def _fmt_mean(mean: float | None) -> str:
return "--" if (mean is None or not (mean == mean)) else f"{mean:.{DECIMALS}f}"
def _bold_best_mask_display(values: list[float | None], decimals: int) -> list[bool]:
"""
Bolding mask based on *displayed* precision. Any entries that round (via f-string)
to the maximum at 'decimals' places are bolded (ties bolded).
"""
def disp(v: float | None) -> float | None:
if v is None or not (v == v):
return None
return float(f"{v:.{decimals}f}")
rounded = [disp(v) for v in values]
finite = [v for v in rounded if v is not None]
if not finite:
return [False] * len(values)
maxv = max(finite)
return [(v is not None and v == maxv) for v in rounded]
def _build_exp_based_table(
cells: dict[tuple[str, int, str, str, int, int], Cell],
*,
semi_labeling_regimes: list[tuple[int, int]],
) -> str:
"""
Build LaTeX table with mean ± std values for experiment-based evaluation only.
"""
header_cols = [
r"\rotheader{DeepSAD\\(LeNet)}",
r"\rotheader{DeepSAD\\(Efficient)}",
r"\rotheader{IsoForest}",
r"\rotheader{OC-SVM}",
]
lines: list[str] = []
lines.append(r"\begin{table}[t]")
lines.append(r"\centering")
lines.append(r"\setlength{\tabcolsep}{4pt}")
lines.append(r"\renewcommand{\arraystretch}{1.2}")
lines.append(r"\begin{tabularx}{\textwidth}{c*{4}{Y}}")
lines.append(r"\toprule")
lines.append(r"Latent Dim. & " + " & ".join(header_cols) + r" \\")
lines.append(r"\midrule")
for idx, (semi_n, semi_a) in enumerate(semi_labeling_regimes):
# regime label row
lines.append(
rf"\multicolumn{{5}}{{l}}{{\textbf{{Labeling regime: }}\(\mathbf{{{semi_n}/{semi_a}}}\)}} \\"
)
lines.append(r"\addlinespace[2pt]")
for dim in LATENT_DIMS:
row_vals = []
for model, net in METHOD_COLUMNS:
key = ("exp_based", dim, model, net, semi_n, semi_a)
cell = cells.get(key, Cell(None, None))
row_vals.append(_fmt_mean_std(cell.mean, cell.std))
lines.append(f"{dim} & " + " & ".join(row_vals) + r" \\")
if idx < len(semi_labeling_regimes) - 1:
lines.append(r"\midrule")
lines.append(r"\bottomrule")
lines.append(r"\end{tabularx}")
lines.append(
r"\caption{AP means $\pm$ std across 5 folds for experiment-based evaluation only, grouped by labeling regime.}"
)
lines.append(r"\end{table}")
return "\n".join(lines)
def _build_single_table(
cells: dict[tuple[str, int, str, str, int, int], Cell],
*,
semi_labeling_regimes: list[tuple[int, int]],
) -> tuple[str, float | None]:
"""
Build the LaTeX table string with grouped headers and regime blocks.
Returns (latex, max_std_overall).
"""
# Rotated header labels (90° slanted)
header_cols = [
r"\rotheader{DeepSAD\\(LeNet)}",
r"\rotheader{DeepSAD\\(Efficient)}",
r"\rotheader{IsoForest}",
r"\rotheader{OC-SVM}",
]
# Track max std across all cells
max_std: float | None = None
def push_std(std_val: float | None):
nonlocal max_std
if std_val is None or not (std_val == std_val):
return
if max_std is None or std_val > max_std:
max_std = std_val
lines: list[str] = []
# Table preamble / structure
lines.append(r"\begin{table}[t]")
lines.append(r"\centering")
lines.append(r"\setlength{\tabcolsep}{4pt}")
lines.append(r"\renewcommand{\arraystretch}{1.2}")
# Vertical rule between the two groups for data/header rows:
lines.append(r"\begin{tabularx}{\textwidth}{c*{4}{Y}|*{4}{Y}}")
lines.append(r"\toprule")
lines.append(
r" & \multicolumn{4}{c}{Experiment-based eval.} & \multicolumn{4}{c}{Handlabeled eval.} \\"
)
lines.append(r"\cmidrule(lr){2-5} \cmidrule(lr){6-9}")
lines.append(
r"Latent Dim. & "
+ " & ".join(header_cols)
+ " & "
+ " & ".join(header_cols)
+ r" \\"
)
lines.append(r"\midrule")
# Iterate regimes and rows
for idx, (semi_n, semi_a) in enumerate(semi_labeling_regimes):
# Regime label row (multicolumn suppresses the vertical bar in this row)
lines.append(
rf"\multicolumn{{9}}{{l}}{{\textbf{{Labeling regime: }}\(\mathbf{{{semi_n}/{semi_a}}}\) "
rf"\textit{{(normal/anomalous samples labeled)}}}} \\"
)
lines.append(r"\addlinespace[2pt]")
for dim in LATENT_DIMS:
# Values in order: left group (exp_based) 4 cols, right group (manual_based) 4 cols
means_left: list[float | None] = []
means_right: list[float | None] = []
cell_strs_left: list[str] = []
cell_strs_right: list[str] = []
# Left group: exp_based
eval_type = EVALS_BOTH[0]
for model, net in METHOD_COLUMNS:
key = (eval_type, dim, model, net, semi_n, semi_a)
cell = cells.get(key, Cell(None, None))
means_left.append(cell.mean)
cell_strs_left.append(_fmt_mean(cell.mean))
# mean_str = _fmt_val(cell.mean)
# std_str = _fmt_val(cell.std)
# if mean_str == "--":
# cell_strs_left.append("--")
# else:
# cell_strs_left.append(f"{mean_str} $\\textpm$ {std_str}")
push_std(cell.std)
# Right group: manual_based
eval_type = EVALS_BOTH[1]
for model, net in METHOD_COLUMNS:
key = (eval_type, dim, model, net, semi_n, semi_a)
cell = cells.get(key, Cell(None, None))
means_right.append(cell.mean)
cell_strs_right.append(_fmt_mean(cell.mean))
# mean_str = _fmt_val(cell.mean)
# std_str = _fmt_val(cell.std)
# if mean_str == "--":
# cell_strs_right.append("--")
# else:
# cell_strs_right.append(f"{mean_str} $\\textpm$ {std_str}")
push_std(cell.std)
# Bolding per group based on displayed precision
mask_left = _bold_best_mask_display(means_left, DECIMALS)
mask_right = _bold_best_mask_display(means_right, DECIMALS)
pretty_left = [
(r"\textbf{" + s + "}") if (do_bold and s != "--") else s
for s, do_bold in zip(cell_strs_left, mask_left)
]
pretty_right = [
(r"\textbf{" + s + "}") if (do_bold and s != "--") else s
for s, do_bold in zip(cell_strs_right, mask_right)
]
# Join with the vertical bar between groups automatically handled by column spec
lines.append(
f"{dim} & "
+ " & ".join(pretty_left)
+ " & "
+ " & ".join(pretty_right)
+ r" \\"
)
# Separator between regime blocks (but not after the last one)
if idx < len(semi_labeling_regimes) - 1:
lines.append(r"\midrule")
lines.append(r"\bottomrule")
lines.append(r"\end{tabularx}")
# Compute per-method max std across everything included in the table
# max_std_by_method, argmax_key = per_method_max_std_from_cells(cells)
median_std_by_method = per_method_median_std_from_cells(cells)
# Optional: print where each max came from (helps verify)
for name, v in median_std_by_method.items():
print(f"[max-std] {name}: {v:.3f}")
cap_parts = []
for name in ["DeepSAD (LeNet)", "DeepSAD (Efficient)", "IsoForest", "OC-SVM"]:
v = median_std_by_method.get(name, 0.0)
cap_parts.append(f"{name} {v:.3f}")
cap_str = "; ".join(cap_parts)
lines.append(
rf"\caption{{AP means across 5 folds for both evaluations, grouped by labeling regime. "
rf"Maximum observed standard deviation per method (not shown in table): {cap_str}.}}"
)
lines.append(r"\end{table}")
return "\n".join(lines), max_std
def main():
# Load full results DF (cache behavior handled by your loader)
df = load_results_dataframe(ROOT, allow_cache=True)
df = _with_net_label(df)
df = _filter_base(df)
# Prepare output dirs
OUTPUT_DIR.mkdir(parents=True, exist_ok=True)
archive_dir = OUTPUT_DIR / "archive"
archive_dir.mkdir(parents=True, exist_ok=True)
ts_dir = archive_dir / datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
ts_dir.mkdir(parents=True, exist_ok=True)
# Pre-compute aggregated cells (mean/std) for all evals/regimes
cells = _compute_cells(df)
# Build the single big table
tex, max_std = _build_single_table(
cells, semi_labeling_regimes=SEMI_LABELING_REGIMES
)
out_name = "ap_table_all_evals_all_regimes.tex"
out_path = ts_dir / out_name
out_path.write_text(tex, encoding="utf-8")
# Build experiment-based table with mean ± std
tex_exp = _build_exp_based_table(cells, semi_labeling_regimes=SEMI_LABELING_REGIMES)
out_name_exp = "ap_table_exp_based_mean_std.tex"
out_path_exp = ts_dir / out_name_exp
out_path_exp.write_text(tex_exp, encoding="utf-8")
# Copy this script to preserve the code used for the outputs
script_path = Path(__file__)
shutil.copy2(script_path, ts_dir / script_path.name)
# Mirror latest
latest = OUTPUT_DIR / "latest"
latest.mkdir(exist_ok=True, parents=True)
for f in latest.iterdir():
if f.is_file():
f.unlink()
for f in ts_dir.iterdir():
if f.is_file():
shutil.copy2(f, latest / f.name)
print(f"Saved table to: {ts_dir}")
print(f"Also updated: {latest}")
print(f" - {out_name}")
if __name__ == "__main__":
main()

View File

@@ -8,11 +8,11 @@ from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import polars as pl
from matplotlib.lines import Line2D
from scipy.stats import sem, t
# CHANGE THIS IMPORT IF YOUR LOADER MODULE NAME IS DIFFERENT
from plot_scripts.load_results import load_results_dataframe
from load_results import load_results_dataframe
from matplotlib.lines import Line2D
from scipy.stats import sem, t
# ---------------------------------
# Config
@@ -23,6 +23,10 @@ OUTPUT_DIR = Path("/home/fedex/mt/plots/results_semi_labels_comparison")
LATENT_DIMS = [32, 64, 128, 256, 512, 768, 1024]
SEMI_REGIMES = [(0, 0), (50, 10), (500, 100)]
EVALS = ["exp_based", "manual_based"]
EVALS_LABELS = {
"exp_based": "Experiment-Based Labels",
"manual_based": "Manually-Labeled",
}
# Interp grids
ROC_GRID = np.linspace(0.0, 1.0, 200)
@@ -30,6 +34,10 @@ PRC_GRID = np.linspace(0.0, 1.0, 200)
# Baselines are duplicated across nets; use Efficient-only to avoid repetition
BASELINE_NET = "Efficient"
BASELINE_LABELS = {
"isoforest": "Isolation Forest",
"ocsvm": "One-Class SVM",
}
# Colors/styles
COLOR_BASELINES = {
@@ -147,12 +155,8 @@ def _select_rows(
return df.filter(pl.all_horizontal(exprs))
def _auc_list(sub: pl.DataFrame) -> list[float]:
return [x for x in sub.select("auc").to_series().to_list() if x is not None]
def _ap_list(sub: pl.DataFrame) -> list[float]:
return [x for x in sub.select("ap").to_series().to_list() if x is not None]
def _auc_list(sub: pl.DataFrame, kind: str) -> list[float]:
return [x for x in sub.select(f"{kind}_auc").to_series().to_list() if x is not None]
def _plot_panel(
@@ -165,7 +169,7 @@ def _plot_panel(
kind: str,
):
"""
Plot one panel: DeepSAD (net_for_deepsad) with 3 regimes + baselines (from Efficient).
Plot one panel: DeepSAD (net_for_deepsad) with 3 regimes + Baselines (from Efficient).
Legend entries include mean±CI of AUC/AP.
"""
ax.grid(True, alpha=0.3)
@@ -200,9 +204,9 @@ def _plot_panel(
continue
# Metric for legend
metric_vals = _auc_list(sub_b) if kind == "roc" else _ap_list(sub_b)
metric_vals = _auc_list(sub_b, kind)
m, ci = mean_ci(metric_vals)
lab = f"{model} ({'AUC' if kind == 'roc' else 'AP'}={m:.3f}±{ci:.3f})"
lab = f"{BASELINE_LABELS[model]}\n(AUC={m:.3f}±{ci:.3f})"
color = COLOR_BASELINES[model]
h = ax.plot(grid, mean_y, lw=2, color=color, label=lab)[0]
@@ -230,9 +234,9 @@ def _plot_panel(
if np.all(np.isnan(mean_y)):
continue
metric_vals = _auc_list(sub_d) if kind == "roc" else _ap_list(sub_d)
metric_vals = _auc_list(sub_d, kind)
m, ci = mean_ci(metric_vals)
lab = f"DeepSAD {net_for_deepsad} semi {sn}/{sa} ({'AUC' if kind == 'roc' else 'AP'}={m:.3f}±{ci:.3f})"
lab = f"DeepSAD {net_for_deepsad}{sn}/{sa}\n(AUC={m:.3f}±{ci:.3f})"
color = COLOR_REGIMES[regime]
ls = LINESTYLES[regime]
@@ -246,7 +250,7 @@ def _plot_panel(
ax.plot([0, 1], [0, 1], "k--", alpha=0.6, label="Chance")
# Legend
ax.legend(loc="lower right", fontsize=9, frameon=True)
ax.legend(loc="upper right", fontsize=9, frameon=True)
def make_figures_for_dim(
@@ -254,9 +258,11 @@ def make_figures_for_dim(
):
# ROC: 2×1
fig_roc, axes = plt.subplots(
nrows=1, ncols=2, figsize=(14, 5), constrained_layout=True
nrows=2, ncols=1, figsize=(7, 10), constrained_layout=True
)
fig_roc.suptitle(f"ROC — {eval_type} — latent_dim={latent_dim}", fontsize=14)
# fig_roc.suptitle(
# f"ROC — {EVALS_LABELS[eval_type]} — Latent Dim.={latent_dim}", fontsize=14
# )
_plot_panel(
axes[0],
@@ -266,7 +272,7 @@ def make_figures_for_dim(
latent_dim=latent_dim,
kind="roc",
)
axes[0].set_title("DeepSAD (LeNet) + baselines")
axes[0].set_title("(a) DeepSAD (LeNet) + Baselines")
_plot_panel(
axes[1],
@@ -276,7 +282,7 @@ def make_figures_for_dim(
latent_dim=latent_dim,
kind="roc",
)
axes[1].set_title("DeepSAD (Efficient) + baselines")
axes[1].set_title("(b) DeepSAD (Efficient) + Baselines")
out_roc = out_dir / f"roc_{latent_dim}_{eval_type}.png"
fig_roc.savefig(out_roc, dpi=150, bbox_inches="tight")
@@ -284,9 +290,11 @@ def make_figures_for_dim(
# PRC: 2×1
fig_prc, axes = plt.subplots(
nrows=1, ncols=2, figsize=(14, 5), constrained_layout=True
nrows=2, ncols=1, figsize=(7, 10), constrained_layout=True
)
fig_prc.suptitle(f"PRC — {eval_type} — latent_dim={latent_dim}", fontsize=14)
# fig_prc.suptitle(
# f"PRC — {EVALS_LABELS[eval_type]} — Latent Dim.={latent_dim}", fontsize=14
# )
_plot_panel(
axes[0],
@@ -296,7 +304,7 @@ def make_figures_for_dim(
latent_dim=latent_dim,
kind="prc",
)
axes[0].set_title("DeepSAD (LeNet) + baselines")
axes[0].set_title("(a)")
_plot_panel(
axes[1],
@@ -306,7 +314,7 @@ def make_figures_for_dim(
latent_dim=latent_dim,
kind="prc",
)
axes[1].set_title("DeepSAD (Efficient) + baselines")
axes[1].set_title("(b)")
out_prc = out_dir / f"prc_{latent_dim}_{eval_type}.png"
fig_prc.savefig(out_prc, dpi=150, bbox_inches="tight")

View File

@@ -6,6 +6,8 @@ readme = "README.md"
requires-python = ">=3.11.9"
dependencies = [
"pandas>=2.3.2",
"pointcloudset>=0.11.0",
"polars>=1.33.0",
"pyarrow>=21.0.0",
"tabulate>=0.9.0",
]

1901
tools/uv.lock generated

File diff suppressed because it is too large Load Diff