|
@@ -12,6 +12,7 @@ import matplotlib.pyplot as plt
|
|
|
import matplotlib.ticker as mtick
|
|
|
|
|
|
|
|
|
+
|
|
|
# The datastructures for this file are as follows
|
|
|
# models_dict: Dictionary - {model_id: model}
|
|
|
# predictions: DataArray - (data_id, model_id, prediction_value) - Prediction value has coords ['negative_prediction', 'positive_prediction', 'negative_actual', 'positive_actual']
|
|
@@ -78,7 +79,7 @@ def get_ensemble_predictions(models, dataset, device, id_offset=0):
|
|
|
zeros,
|
|
|
dims=('data_id', 'model_id', 'prediction_value'),
|
|
|
coords={
|
|
|
- 'data_id': range(len(dataset)),
|
|
|
+ 'data_id': range(id_offset, len(dataset) + id_offset),
|
|
|
'model_id': list(models.keys()),
|
|
|
'prediction_value': [
|
|
|
'negative_prediction',
|
|
@@ -160,7 +161,7 @@ def compute_ensemble_statistics(predictions: xr.DataArray):
|
|
|
|
|
|
# Compute the thresholded predictions given an array of predictions
|
|
|
def compute_thresholded_predictions(input_stats: xr.DataArray):
|
|
|
- quantiles = np.linspace(0.05, 0.95, 19) * 100
|
|
|
+ quantiles = np.linspace(0.00, 1.00, 21) * 100
|
|
|
metrics = ['accuracy', 'f1']
|
|
|
statistics = ['stdev', 'entropy', 'confidence']
|
|
|
|
|
@@ -218,6 +219,12 @@ def compute_metric(arr, metric):
|
|
|
return met.F1(
|
|
|
arr.loc[{'statistic': 'predicted'}], arr.loc[{'statistic': 'actual'}]
|
|
|
)
|
|
|
+ elif metric == 'ece':
|
|
|
+ true_labels = arr.loc[{'statistic': 'actual'}].values
|
|
|
+ predicted_labels = arr.loc[{'statistic': 'predicted'}].values
|
|
|
+ confidences = arr.loc[{'statistic': 'confidence'}].values
|
|
|
+
|
|
|
+ return calculate_ece_stats(confidences, predicted_labels, true_labels)
|
|
|
|
|
|
else:
|
|
|
raise ValueError('Invalid metric: ' + metric)
|
|
@@ -373,7 +380,9 @@ def compute_individual_statistics(predictions: xr.DataArray):
|
|
|
},
|
|
|
)
|
|
|
|
|
|
- for data_id in predictions.data_id:
|
|
|
+ for data_id in tqdm(
|
|
|
+ predictions.data_id, total=len(predictions.data_id), unit='images'
|
|
|
+ ):
|
|
|
for model_id in predictions.model_id:
|
|
|
data = predictions.loc[{'data_id': data_id, 'model_id': model_id}]
|
|
|
mean = data[0:2]
|
|
@@ -416,7 +425,9 @@ def compute_individual_thresholds(input_stats: xr.DataArray):
|
|
|
},
|
|
|
)
|
|
|
|
|
|
- for model_id in input_stats.model_id:
|
|
|
+ for model_id in tqdm(
|
|
|
+ input_stats.model_id, total=len(input_stats.model_id), unit='models'
|
|
|
+ ):
|
|
|
for statistic in statistics:
|
|
|
# First, we must compute the quantiles for the statistic
|
|
|
quantile_values = np.percentile(
|
|
@@ -550,8 +561,9 @@ def graph_all_individual_thresholded_predictions(
|
|
|
|
|
|
# Calculate statistics of subsets of models for sensitivity analysis
|
|
|
def calculate_subset_statistics(predictions: xr.DataArray):
|
|
|
- # Calculate subsets for 1-50 models
|
|
|
- subsets = range(1, len(predictions.model_id) + 1)
|
|
|
+ # Calculate subsets for 1-49 models
|
|
|
+ subsets = range(1, len(predictions.model_id))
|
|
|
+
|
|
|
zeros = np.zeros(
|
|
|
(len(predictions.data_id), len(subsets), 7)
|
|
|
) # Include stdev, but for 1 models set to NaN
|
|
@@ -574,7 +586,9 @@ def calculate_subset_statistics(predictions: xr.DataArray):
|
|
|
},
|
|
|
)
|
|
|
|
|
|
- for data_id in predictions.data_id:
|
|
|
+ for data_id in tqdm(
|
|
|
+ predictions.data_id, total=len(predictions.data_id), unit='images'
|
|
|
+ ):
|
|
|
for subset in subsets:
|
|
|
data = predictions.sel(
|
|
|
data_id=data_id, model_id=predictions.model_id[:subset]
|
|
@@ -603,22 +617,24 @@ def calculate_subset_statistics(predictions: xr.DataArray):
|
|
|
# Calculate Accuracy, F1 and ECE for subset stats - sensityvity analysis
|
|
|
def calculate_sensitivity_analysis(subset_stats: xr.DataArray):
|
|
|
subsets = subset_stats.model_count
|
|
|
- stats = ['accuracy', 'f1']
|
|
|
+ stats = ['accuracy', 'f1', 'ece']
|
|
|
|
|
|
zeros = np.zeros((len(subsets), len(stats)))
|
|
|
|
|
|
sens_analysis = xr.DataArray(
|
|
|
zeros,
|
|
|
dims=('model_count', 'statistic'),
|
|
|
- coords={'model_count': subsets, 'statistic': ['accuracy', 'f1']},
|
|
|
+ coords={'model_count': subsets, 'statistic': stats},
|
|
|
)
|
|
|
|
|
|
- for subset in subsets:
|
|
|
+ for subset in tqdm(subsets, total=len(subsets), unit='model subsets'):
|
|
|
+
|
|
|
data = subset_stats.sel(model_count=subset)
|
|
|
- acc = compute_metric(data, 'accuracy')
|
|
|
- f1 = compute_metric(data, 'f1')
|
|
|
+ acc = compute_metric(data, 'accuracy').item()
|
|
|
+ f1 = compute_metric(data, 'f1').item()
|
|
|
+ ece = compute_metric(data, 'ece').item()
|
|
|
|
|
|
- sens_analysis.loc[{'model_count': subset}] = [acc, f1]
|
|
|
+ sens_analysis.loc[{'model_count': subset.item()}] = [acc, f1, ece]
|
|
|
|
|
|
return sens_analysis
|
|
|
|
|
@@ -648,18 +664,12 @@ def calculate_overall_stats(ensemble_statistics: xr.DataArray):
|
|
|
|
|
|
|
|
|
# https://towardsdatascience.com/expected-calibration-error-ece-a-step-by-step-visual-explanation-with-python-code-c3e9aa12937d
|
|
|
-def calculate_ece_stats(statistics, bins=10):
|
|
|
+def calculate_ece_stats(confidences, predicted_labels, true_labels, bins=10):
|
|
|
bin_boundaries = np.linspace(0, 1, bins + 1)
|
|
|
bin_lowers = bin_boundaries[:-1]
|
|
|
bin_uppers = bin_boundaries[1:]
|
|
|
|
|
|
- confidences = ((statistics.sel(statistic='mean').values) - 0.5) * 2
|
|
|
- accuracies = statistics.sel(statistic='correct').values
|
|
|
-
|
|
|
ece = np.zeros(1)
|
|
|
- bin_accuracies = xr.DataArray(
|
|
|
- np.zeros(bins), dims=('lower_bound'), coords={'lower_bound': bin_lowers}
|
|
|
- )
|
|
|
|
|
|
for bin_lower, bin_upper in zip(bin_lowers, bin_uppers):
|
|
|
in_bin = np.logical_and(
|
|
@@ -668,16 +678,13 @@ def calculate_ece_stats(statistics, bins=10):
|
|
|
prob_in_bin = in_bin.mean()
|
|
|
|
|
|
if prob_in_bin.item() > 0:
|
|
|
- accuracy_in_bin = accuracies[in_bin].mean()
|
|
|
+ accuracy_in_bin = true_labels[in_bin].mean()
|
|
|
|
|
|
- bin_accuracies.loc[{'lower_bound': bin_lower}]
|
|
|
avg_confidence_in_bin = confidences[in_bin].mean()
|
|
|
- ece += np.abs(avg_confidence_in_bin - accuracy_in_bin) * prob_in_bin
|
|
|
|
|
|
- bin_accuracies.attrs['ece'] = ece
|
|
|
- bin_accuracies.attrs['bin_number'] = bins
|
|
|
+ ece += np.abs(avg_confidence_in_bin - accuracy_in_bin) * prob_in_bin
|
|
|
|
|
|
- return bin_accuracies
|
|
|
+ return ece
|
|
|
|
|
|
|
|
|
def plot_ece_graph(ece_stats, title, xlabel, ylabel, save_path):
|
|
@@ -788,6 +795,7 @@ def main():
|
|
|
print('Individual Thresholded Predictions Graphed')
|
|
|
|
|
|
# Compute subset statistics and graph
|
|
|
+ print('Computing Sensitivity Analysis...')
|
|
|
subset_stats = calculate_subset_statistics(predictions)
|
|
|
sens_analysis = calculate_sensitivity_analysis(subset_stats)
|
|
|
graph_sensitivity_analysis(
|
|
@@ -798,6 +806,14 @@ def main():
|
|
|
'# of Models',
|
|
|
'Accuracy',
|
|
|
)
|
|
|
+ graph_sensitivity_analysis(
|
|
|
+ sens_analysis,
|
|
|
+ 'ece',
|
|
|
+ f'{V4_PATH}/sens_analysis_ece.png',
|
|
|
+ 'Sensitivity Analysis of ECE vs. # of Models',
|
|
|
+ '# of Models',
|
|
|
+ 'ECE',
|
|
|
+ )
|
|
|
print(sens_analysis.sel(statistic='accuracy'))
|
|
|
print(calculate_overall_stats(ensemble_statistics))
|
|
|
|