-
Notifications
You must be signed in to change notification settings - Fork 25
/
Copy pathrand.py
72 lines (56 loc) · 2.13 KB
/
rand.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
# coding=utf-8
import numpy as np
import scipy.sparse as sparse
# Evaluation code courtesy of Juan Nunez-Iglesias, taken from
# https://github.com/janelia-flyem/gala/blob/master/gala/evaluate.py
def adapted_rand(seg, gt, all_stats=False):
"""Compute Adapted Rand error as defined by the SNEMI3D contest [1]
Formula is given as 1 - the maximal F-score of the Rand index
(excluding the zero component of the original labels). Adapted
from the SNEMI3D MATLAB script, hence the strange style.
Parameters
----------
seg : np.ndarray
the segmentation to score, where each value is the label at that point
gt : np.ndarray, same shape as seg
the groundtruth to score against, where each value is a label
all_stats : boolean, optional
whether to also return precision and recall as a 3-tuple with rand_error
Returns
-------
are : float
The adapted Rand error; equal to $1 - \frac{2pr}{p + r}$,
where $p$ and $r$ are the precision and recall described below.
prec : float, optional
The adapted Rand precision. (Only returned when `all_stats` is ``True``.)
rec : float, optional
The adapted Rand recall. (Only returned when `all_stats` is ``True``.)
References
----------
[1]: http://brainiac2.mit.edu/SNEMI3D/evaluation
"""
# segA is truth, segB is query
segA = np.ravel(gt)
segB = np.ravel(seg)
n = segA.size
n_labels_A = np.amax(segA) + 1
n_labels_B = np.amax(segB) + 1
ones_data = np.ones(n)
p_ij = sparse.csr_matrix((ones_data, (segA[:], segB[:])), shape=(n_labels_A, n_labels_B))
a = p_ij[1:n_labels_A,:]
b = p_ij[1:n_labels_A,1:n_labels_B]
c = p_ij[1:n_labels_A,0].todense()
d = b.multiply(b)
a_i = np.array(a.sum(1))
b_i = np.array(b.sum(0))
sumA = np.sum(a_i * a_i)
sumB = np.sum(b_i * b_i) + (np.sum(c) / n)
sumAB = np.sum(d) + (np.sum(c) / n)
precision = sumAB / sumB
recall = sumAB / sumA
fScore = 2.0 * precision * recall / (precision + recall)
are = 1.0 - fScore
if all_stats:
return (are, precision, recall)
else:
return are