zhiqing0205 commited on
Commit
83a82e0
·
1 Parent(s): 3de7bf6

Add scripts, results, and visualization archive

Browse files
results/few_shot_results.md ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # All Categories - Few-shot Protocol Results
2
+
3
+ **Last Updated:** 2025-08-18 03:35:23
4
+
5
+ **Protocol:** Few-shot
6
+ **Available Categories:** ----------, breakfast_box, juice_bottle, pushpins, screw_bag, splicing_connectors
7
+
8
+ ## Summary Table
9
+
10
+ | Category | K-shots | F1-Max (Image) | AUROC (Image) | F1-Max (Logical) | AUROC (Logical) | F1-Max (Structural) | AUROC (Structural) |
11
+ |----------|---------|----------------|---------------|------------------|-----------------|---------------------|-------------------|
12
+ | ---------- | --------- | ---------------- | --------------- | ------------------ | ----------------- | --------------------- | ------------------- |
13
+ | breakfast_box | 4 | 89.91 | 94.36 | 92.41 | 97.08 | 85.03 | 91.85 |
14
+ | juice_bottle | 4 | 88.16 | 84.26 | 83.54 | 85.46 | 77.31 | 82.46 |
15
+ | pushpins | 4 | 81.36 | 82.89 | 78.82 | 84.28 | 70.81 | 81.32 |
16
+ | screw_bag | 4 | 82.95 | 81.29 | 77.14 | 79.72 | 78.21 | 83.91 |
17
+ | splicing_connectors | 4 | 84.70 | 88.60 | 84.49 | 89.21 | 82.80 | 87.82 |
18
+ | **Average** | 4 | 85.42 | 86.28 | 83.28 | 87.15 | 78.83 | 85.47 |
results/full_data_results.md ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # All Categories - Full-data Protocol Results
2
+
3
+ **Last Updated:** 2025-08-18 15:14:57
4
+
5
+ **Protocol:** Full-data
6
+ **Available Categories:** ----------, breakfast_box, juice_bottle, pushpins, screw_bag, splicing_connectors
7
+
8
+ ## Summary Table
9
+
10
+ | Category | K-shots | F1-Max (Image) | AUROC (Image) | F1-Max (Logical) | AUROC (Logical) | F1-Max (Structural) | AUROC (Structural) |
11
+ |----------|---------|----------------|---------------|------------------|-----------------|---------------------|-------------------|
12
+ | ---------- | --------- | ---------------- | --------------- | ------------------ | ----------------- | --------------------- | ------------------- |
13
+ | breakfast_box | 4 | 91.95 | 95.69 | 90.45 | 97.14 | 90.48 | 94.35 |
14
+ | juice_bottle | 4 | 93.94 | 95.23 | 91.91 | 94.80 | 94.12 | 95.88 |
15
+ | pushpins | 4 | 81.34 | 83.95 | 79.53 | 84.68 | 73.58 | 83.14 |
16
+ | screw_bag | 4 | 85.22 | 83.20 | 77.85 | 80.20 | 86.03 | 88.22 |
17
+ | splicing_connectors | 4 | 91.26 | 93.48 | 86.57 | 90.34 | 93.57 | 97.47 |
18
+ | **Average** | 4 | 88.74 | 90.31 | 85.26 | 89.43 | 87.56 | 91.81 |
scripts/compute_averages.py ADDED
@@ -0,0 +1,182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Compute Average Metrics Script for LogSAD Results
4
+
5
+ This script automatically detects results MD files in the results/ directory,
6
+ calculates average metrics across all categories, and appends them to the files
7
+ if not already present.
8
+ """
9
+
10
+ import os
11
+ import re
12
+ import glob
13
+ from pathlib import Path
14
+
15
+
16
+ def has_average_metrics(file_path):
17
+ """Check if the file already contains average metrics row."""
18
+ try:
19
+ with open(file_path, 'r', encoding='utf-8') as f:
20
+ content = f.read()
21
+ return "| **Average**" in content or "| Average" in content
22
+ except Exception as e:
23
+ print(f"Error reading {file_path}: {e}")
24
+ return False
25
+
26
+
27
+ def parse_results_table(file_path):
28
+ """Parse the results table from MD file and extract metrics."""
29
+ try:
30
+ with open(file_path, 'r', encoding='utf-8') as f:
31
+ content = f.read()
32
+
33
+ lines = content.split('\n')
34
+ categories_data = []
35
+
36
+ # Find table data lines
37
+ for line in lines:
38
+ if '|' in line and line.count('|') >= 8:
39
+ parts = [p.strip() for p in line.split('|')]
40
+ # Skip header, separator, average row, and empty lines
41
+ if (len(parts) >= 8 and
42
+ parts[1] not in ['Category', '----------', '-----', '', '**Average**', 'Average'] and
43
+ parts[1] != '----------' and not parts[1].startswith('**Average')):
44
+ try:
45
+ category_name = parts[1]
46
+ f1_image = float(parts[3])
47
+ auroc_image = float(parts[4])
48
+ f1_logical = float(parts[5])
49
+ auroc_logical = float(parts[6])
50
+ f1_structural = float(parts[7])
51
+ auroc_structural = float(parts[8])
52
+
53
+ categories_data.append({
54
+ 'category': category_name,
55
+ 'f1_image': f1_image,
56
+ 'auroc_image': auroc_image,
57
+ 'f1_logical': f1_logical,
58
+ 'auroc_logical': auroc_logical,
59
+ 'f1_structural': f1_structural,
60
+ 'auroc_structural': auroc_structural
61
+ })
62
+ except (ValueError, IndexError):
63
+ continue
64
+
65
+ return categories_data
66
+ except Exception as e:
67
+ print(f"Error parsing {file_path}: {e}")
68
+ return []
69
+
70
+
71
+ def calculate_averages(categories_data):
72
+ """Calculate average metrics across all categories."""
73
+ if not categories_data:
74
+ return None
75
+
76
+ n = len(categories_data)
77
+ averages = {
78
+ 'f1_image': sum(cat['f1_image'] for cat in categories_data) / n,
79
+ 'auroc_image': sum(cat['auroc_image'] for cat in categories_data) / n,
80
+ 'f1_logical': sum(cat['f1_logical'] for cat in categories_data) / n,
81
+ 'auroc_logical': sum(cat['auroc_logical'] for cat in categories_data) / n,
82
+ 'f1_structural': sum(cat['f1_structural'] for cat in categories_data) / n,
83
+ 'auroc_structural': sum(cat['auroc_structural'] for cat in categories_data) / n
84
+ }
85
+
86
+ return averages
87
+
88
+
89
+ def append_averages_to_file(file_path, averages):
90
+ """Append average metrics as the last row of the existing table."""
91
+ try:
92
+ # Read the current file content
93
+ with open(file_path, 'r', encoding='utf-8') as f:
94
+ content = f.read()
95
+
96
+ # Find the table end and insert average row before it
97
+ lines = content.split('\n')
98
+ new_lines = []
99
+ table_found = False
100
+
101
+ for i, line in enumerate(lines):
102
+ new_lines.append(line)
103
+ # Check if this is a table row (has appropriate number of |)
104
+ if '|' in line and line.count('|') >= 8:
105
+ table_found = True
106
+ # Check if this might be the last data row (next line is empty or doesn't have table format)
107
+ if (i + 1 >= len(lines) or
108
+ lines[i + 1].strip() == '' or
109
+ '|' not in lines[i + 1] or
110
+ lines[i + 1].count('|') < 8):
111
+ # Add average row after this line
112
+ average_row = f"| **Average** | 4 | {averages['f1_image']:.2f} | {averages['auroc_image']:.2f} | {averages['f1_logical']:.2f} | {averages['auroc_logical']:.2f} | {averages['f1_structural']:.2f} | {averages['auroc_structural']:.2f} |"
113
+ new_lines.append(average_row)
114
+
115
+ # Write the updated content back to file
116
+ with open(file_path, 'w', encoding='utf-8') as f:
117
+ f.write('\n'.join(new_lines))
118
+
119
+ print(f"✓ Added average metrics row to {file_path}")
120
+ return True
121
+ except Exception as e:
122
+ print(f"✗ Error appending to {file_path}: {e}")
123
+ return False
124
+
125
+
126
+ def process_results_file(file_path):
127
+ """Process a single results file."""
128
+ print(f"Processing: {file_path}")
129
+
130
+ # Check if averages already exist
131
+ if has_average_metrics(file_path):
132
+ print(f" → Average metrics already exist, skipping")
133
+ return
134
+
135
+ # Parse the results table
136
+ categories_data = parse_results_table(file_path)
137
+ if not categories_data:
138
+ print(f" → No valid data found, skipping")
139
+ return
140
+
141
+ print(f" → Found {len(categories_data)} categories")
142
+
143
+ # Calculate averages
144
+ averages = calculate_averages(categories_data)
145
+ if not averages:
146
+ print(f" → Failed to calculate averages, skipping")
147
+ return
148
+
149
+ # Append averages to file
150
+ append_averages_to_file(file_path, averages)
151
+
152
+
153
+ def main():
154
+ """Main function to process all results files."""
155
+ print("LogSAD Average Metrics Computation")
156
+ print("=" * 50)
157
+
158
+ # Find all results MD files
159
+ results_pattern = "results/*_results.md"
160
+ results_files = glob.glob(results_pattern)
161
+
162
+ if not results_files:
163
+ print("No results files found matching pattern:", results_pattern)
164
+ return
165
+
166
+ print(f"Found {len(results_files)} results file(s):")
167
+ for file_path in results_files:
168
+ print(f" - {file_path}")
169
+
170
+ print("\nProcessing files...")
171
+ print("-" * 30)
172
+
173
+ # Process each file
174
+ for file_path in results_files:
175
+ process_results_file(file_path)
176
+ print()
177
+
178
+ print("Average metrics computation completed!")
179
+
180
+
181
+ if __name__ == "__main__":
182
+ main()
scripts/run_few_shot.sh ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Few-shot Protocol Evaluation Script for LogSAD
4
+ # This script runs evaluation for all categories in the MVTEC LOCO dataset
5
+
6
+ # Dataset path
7
+ DATASET_PATH="/root/autodl-tmp/datasets/mvtec_loco_anomaly_detection"
8
+
9
+ # Categories to evaluate
10
+ CATEGORIES=("breakfast_box" "juice_bottle" "pushpins" "screw_bag" "splicing_connectors")
11
+
12
+ echo "Starting Few-shot Protocol Evaluation..."
13
+ echo "Dataset path: $DATASET_PATH"
14
+ echo "Categories: ${CATEGORIES[@]}"
15
+ echo "=================================="
16
+
17
+ # Main loop to iterate through all categories
18
+ for i in "${!CATEGORIES[@]}"; do
19
+ category="${CATEGORIES[$i]}"
20
+ current=$((i + 1))
21
+ total=${#CATEGORIES[@]}
22
+
23
+ echo "[$current/$total] Evaluating category: $category"
24
+ echo "Command: python evaluation.py --module_path model_ensemble_few_shot --category $category --dataset_path $DATASET_PATH --viz"
25
+
26
+ python evaluation.py --module_path model_ensemble_few_shot --category "$category" --dataset_path "$DATASET_PATH" --viz
27
+
28
+ if [ $? -eq 0 ]; then
29
+ echo "✓ Successfully completed evaluation for $category"
30
+ else
31
+ echo "✗ Failed to evaluate $category"
32
+ exit 1
33
+ fi
34
+
35
+ echo "=================================="
36
+ done
37
+
38
+ echo "All categories evaluation completed!"
39
+ echo ""
40
+ echo "Results saved in results/ directory:"
41
+ echo " - Combined results: results/few_shot_results.md"
42
+
43
+ echo "=================================="
44
+ echo "Computing average metrics across all categories..."
45
+
46
+ # Run the average computation script
47
+ python scripts/compute_averages.py
48
+
49
+ echo "✓ Average metrics computation completed"
scripts/run_full_data.sh ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Full-data Protocol Evaluation Script for LogSAD
4
+ # This script first computes coreset and then runs evaluation for all categories in the MVTEC LOCO dataset
5
+
6
+ # Dataset path
7
+ DATASET_PATH="/root/autodl-tmp/datasets/mvtec_loco_anomaly_detection"
8
+
9
+ # Categories to evaluate
10
+ # CATEGORIES=("breakfast_box" "juice_bottle" "pushpins" "screw_bag" "splicing_connectors")
11
+ CATEGORIES=("splicing_connectors")
12
+
13
+ echo "Starting Full-data Protocol Evaluation..."
14
+ echo "Dataset path: $DATASET_PATH"
15
+ echo "Categories: ${CATEGORIES[@]}"
16
+ echo "=================================="
17
+
18
+ # Main loop to iterate through all categories
19
+ for i in "${!CATEGORIES[@]}"; do
20
+ category="${CATEGORIES[$i]}"
21
+ current=$((i + 1))
22
+ total=${#CATEGORIES[@]}
23
+
24
+ echo "[$current/$total] Processing category: $category"
25
+
26
+ # Step 1: Compute coreset for full-data scenarios
27
+ echo "Step 1: Computing coreset for $category"
28
+ echo "Command: python compute_coreset.py --module_path model_ensemble --category $category --dataset_path $DATASET_PATH"
29
+
30
+ python compute_coreset.py --module_path model_ensemble --category "$category" --dataset_path "$DATASET_PATH"
31
+
32
+ if [ $? -eq 0 ]; then
33
+ echo "✓ Successfully computed coreset for $category"
34
+ else
35
+ echo "✗ Failed to compute coreset for $category"
36
+ exit 1
37
+ fi
38
+
39
+ # Step 2: Run evaluation for full-data protocol
40
+ echo "Step 2: Running evaluation for $category"
41
+ echo "Command: python evaluation.py --module_path model_ensemble --category $category --dataset_path $DATASET_PATH --viz"
42
+
43
+ python evaluation.py --module_path model_ensemble --category "$category" --dataset_path "$DATASET_PATH" --viz
44
+
45
+ if [ $? -eq 0 ]; then
46
+ echo "✓ Successfully completed evaluation for $category"
47
+ else
48
+ echo "✗ Failed to evaluate $category"
49
+ exit 1
50
+ fi
51
+
52
+ echo "=================================="
53
+ done
54
+
55
+ echo "All categories processing completed!"
56
+ echo ""
57
+ echo "Results saved in results/ directory:"
58
+ echo " - Combined results: results/full_data_results.md"
59
+
60
+ echo "=================================="
61
+ echo "Computing average metrics across all categories..."
62
+
63
+ # Run the average computation script
64
+ python scripts/compute_averages.py
65
+
66
+ echo "✓ Average metrics computation completed"
scripts/setup_environment.sh ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # LogSAD Environment Setup Script
4
+ # This script creates and configures the complete environment for LogSAD
5
+ # Run this script to set up all dependencies and requirements
6
+
7
+ set -e # Exit on any error
8
+
9
+ echo "LogSAD Environment Setup"
10
+ echo "======================="
11
+ echo "Setting up conda environment and dependencies..."
12
+
13
+ # Create conda environment
14
+ echo "Creating conda environment 'logsad' with Python 3.10..."
15
+ conda create -n logsad python=3.10 -y
16
+
17
+ # Activate environment
18
+ echo "Activating environment..."
19
+ conda activate logsad
20
+
21
+ # Install PyTorch with CUDA support
22
+ echo "Installing PyTorch with CUDA 12.1 support..."
23
+ pip install torch==2.1.2+cu121 torchvision -f https://mirrors.aliyun.com/pytorch-wheels/cu121/
24
+
25
+ # Install project requirements
26
+ echo "Installing project requirements..."
27
+ pip install -r requirements.txt
28
+
29
+ # Install specific numpy version for compatibility
30
+ echo "Installing compatible numpy version..."
31
+ pip install numpy==1.23.1
32
+
33
+ echo ""
34
+ echo "✓ Environment setup completed successfully!"
35
+ echo ""
36
+ echo "To activate the environment, run:"
37
+ echo " conda activate logsad"
38
+ echo ""
39
+ echo "To run evaluations, use:"
40
+ echo " bash scripts/run_few_shot.sh # For few-shot protocol"
41
+ echo " bash scripts/run_full_data.sh # For full-data protocol"
visualization.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2e4f5793a0c62e806d2e9e9067717b5ccb4e366af2a746e665f99f9ec564de99
3
+ size 838311086