Python源码示例:sklearn.metrics.auc()
示例1
def fit_model(self, data, cross_val_data, cross_val_labels):
eval_metrics = []
for i in range(self.n_ensemble):
train_sm = np.concatenate(cross_val_data[:i] +
cross_val_data[(i + 1):])
test_sm = cross_val_data[i]
train_labels = np.concatenate(cross_val_labels[:i] +
cross_val_labels[(i + 1):])
test_labels = cross_val_labels[i]
fp_train = get_fp(train_sm)
fp_test = get_fp(test_sm)
self.model[i].fit(fp_train, train_labels.ravel())
predicted = self.model[i].predict(fp_test)
if self.model_type == 'classifier':
fpr, tpr, thresholds = metrics.roc_curve(test_labels, predicted)
eval_metrics.append(metrics.auc(fpr, tpr))
metrics_type = 'AUC'
elif self.model_type == 'regressor':
r2 = metrics.r2_score(test_labels, predicted)
eval_metrics.append(r2)
metrics_type = 'R^2 score'
return eval_metrics, metrics_type
示例2
def compute_roc(y_true, y_pred, plot=False):
"""
TODO
:param y_true: ground truth
:param y_pred: predictions
:param plot:
:return:
"""
fpr, tpr, _ = roc_curve(y_true, y_pred)
auc_score = auc(fpr, tpr)
if plot:
plt.figure(figsize=(7, 6))
plt.plot(fpr, tpr, color='blue',
label='ROC (AUC = %0.4f)' % auc_score)
plt.legend(loc='lower right')
plt.title("ROC Curve")
plt.xlabel("FPR")
plt.ylabel("TPR")
plt.show()
return fpr, tpr, auc_score
示例3
def compute_roc_rfeinman(probs_neg, probs_pos, plot=False):
"""
TODO
:param probs_neg:
:param probs_pos:
:param plot:
:return:
"""
probs = np.concatenate((probs_neg, probs_pos))
labels = np.concatenate((np.zeros_like(probs_neg), np.ones_like(probs_pos)))
fpr, tpr, _ = roc_curve(labels, probs)
auc_score = auc(fpr, tpr)
if plot:
plt.figure(figsize=(7, 6))
plt.plot(fpr, tpr, color='blue',
label='ROC (AUC = %0.4f)' % auc_score)
plt.legend(loc='lower right')
plt.title("ROC Curve")
plt.xlabel("FPR")
plt.ylabel("TPR")
plt.show()
return fpr, tpr, auc_score
示例4
def compute_auc(y_true, y_pred, label_index):
"""Compute Area Under the Curve (AUC) metric.
Args:
y_true: true class
y_pred: probabilities for a class
label_index:
label_index == 1 => laughter (class1) vs. others (class0)
label_index == 2 => filler (class1) vs. others (class0)
Returns:
auc_val: AUC metric accuracy
"""
for i in range(y_true.shape[0]):
y_true[i] = 0 if y_true[i] != label_index else 1
y_true = np.reshape(y_true, (-1,))
y_pred = np.reshape(y_pred[:, label_index], (-1,))
try:
fpr, tpr, _ = roc_curve(y_true, y_pred, pos_label=1)
except UndefinedMetricWarning:
pass
auc_val = auc(fpr, tpr)
return auc_val
示例5
def roc(self, data, model, tt, name):
scores = self.get_predictions_loss(data, model, tt)[0]
labels = [prot["label"][:, 2] for prot in data[tt]]
fprs = []
tprs = []
roc_aucs = []
for s, l in zip(scores, labels):
fpr, tpr, _ = roc_curve(l, s)
roc_auc = auc(fpr, tpr)
fprs.append(fpr)
tprs.append(tpr)
roc_aucs.append(roc_auc)
auc_prot_med = np.median(roc_aucs)
auc_prot_ave = np.mean(roc_aucs)
printt("{} average protein auc: {:0.3f}".format(name, auc_prot_ave))
printt("{} median protein auc: {:0.3f}".format(name, auc_prot_med))
return ["auc_prot_ave_" + tt, "auc_prot_med_" + tt], [auc_prot_ave, auc_prot_med]
示例6
def get_all_metrics(model, eval_data, eval_labels, pred_labels):
fpr, tpr, thresholds_keras = roc_curve(eval_labels, pred_labels)
auc_ = auc(fpr, tpr)
print("auc_keras:" + str(auc_))
score = model.evaluate(eval_data, eval_labels, verbose=0)
print("Test accuracy: " + str(score[1]))
precision = precision_score(eval_labels, pred_labels)
print('Precision score: {0:0.2f}'.format(precision))
recall = recall_score(eval_labels, pred_labels)
print('Recall score: {0:0.2f}'.format(recall))
f1 = f1_score(eval_labels, pred_labels)
print('F1 score: {0:0.2f}'.format(f1))
average_precision = average_precision_score(eval_labels, pred_labels)
print('Average precision-recall score: {0:0.2f}'.format(average_precision))
return auc_, score[1], precision, recall, f1, average_precision, fpr, tpr
示例7
def get_all_metrics_(eval_labels, pred_labels):
fpr, tpr, thresholds_keras = roc_curve(eval_labels, pred_labels)
auc_ = auc(fpr, tpr)
print("auc_keras:" + str(auc_))
precision = precision_score(eval_labels, pred_labels)
print('Precision score: {0:0.2f}'.format(precision))
recall = recall_score(eval_labels, pred_labels)
print('Recall score: {0:0.2f}'.format(recall))
f1 = f1_score(eval_labels, pred_labels)
print('F1 score: {0:0.2f}'.format(f1))
average_precision = average_precision_score(eval_labels, pred_labels)
print('Average precision-recall score: {0:0.2f}'.format(average_precision))
return auc_, precision, recall, f1, average_precision, fpr, tpr
示例8
def compute_roc_auc_scores(y, y_pred):
"""Transforms the results dict into roc-auc-scores and prints scores.
Parameters
----------
results: dict
task_types: dict
dict mapping task names to output type. Each output type must be either
"classification" or "regression".
"""
try:
score = roc_auc_score(y, y_pred)
except ValueError:
warnings.warn("ROC AUC score calculation failed.")
score = 0.5
return score
示例9
def accuracy(y_true, y_pred):
# 计算混淆矩阵
y = np.zeros(len(y_true))
y_ = np.zeros(len(y_true))
for i in range(len(y_true)):
y[i] = np.argmax(y_true[i,:])
y_[i] = np.argmax(y_pred[i,:])
cnf_mat = confusion_matrix(y, y_)
# Acc = 1.0*(cnf_mat[1][1]+cnf_mat[0][0])/len(y_true)
# Sens = 1.0*cnf_mat[1][1]/(cnf_mat[1][1]+cnf_mat[1][0])
# Spec = 1.0*cnf_mat[0][0]/(cnf_mat[0][0]+cnf_mat[0][1])
# # 绘制ROC曲线
# fpr, tpr, thresholds = roc_curve(y_true[:,0], y_pred[:,0])
# Auc = auc(fpr, tpr)
# 计算多分类评价值
Sens = recall_score(y, y_, average='macro')
Prec = precision_score(y, y_, average='macro')
F1 = f1_score(y, y_, average='weighted')
Support = precision_recall_fscore_support(y, y_, beta=0.5, average=None)
return Sens, Prec, F1, cnf_mat
示例10
def compute_eer(loss_file,reverse,smoothing):
if not os.path.isdir(loss_file):
loss_file_list = [loss_file]
else:
loss_file_list = os.listdir(loss_file)
loss_file_list = [os.path.join(loss_file, sub_loss_file) for sub_loss_file in loss_file_list]
optimal_results = RecordResult(auc=np.inf)
for sub_loss_file in loss_file_list:
dataset, scores, labels = get_scores_labels(sub_loss_file,reverse,smoothing)
fpr, tpr, thresholds = metrics.roc_curve(labels, scores, pos_label=0)
eer = cal_eer(fpr, tpr)
results = RecordResult(fpr, tpr, eer, dataset, sub_loss_file)
if optimal_results > results:
optimal_results = results
if os.path.isdir(loss_file):
print(results)
print('##### optimal result and model EER = {}'.format(optimal_results))
return optimal_results
示例11
def __call__(self, pos_triples, neg_triples=None):
triples = pos_triples + neg_triples
labels = [1 for _ in range(len(pos_triples))] + [0 for _ in range(len(neg_triples))]
Xr, Xe = [], []
for (s_idx, p_idx, o_idx), label in zip(triples, labels):
Xr += [[p_idx]]
Xe += [[s_idx, o_idx]]
ascores = self.scoring_function([Xr, Xe])
ays = np.array(labels)
if self.rescale_predictions:
diffs = np.diff(np.sort(ascores))
min_diff = min(abs(diffs[np.nonzero(diffs)]))
if min_diff < 1e-8:
ascores = (ascores * (1e-7 / min_diff)).astype(np.float64)
aucroc_value = metrics.roc_auc_score(ays, ascores)
precision, recall, thresholds = metrics.precision_recall_curve(ays, ascores, pos_label=1)
aucpr_value = metrics.auc(recall, precision)
return aucroc_value, aucpr_value
示例12
def test_auc():
# Test Area Under Curve (AUC) computation
x = [0, 1]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0, 0]
y = [0, 1, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [0, 1]
y = [1, 1]
assert_array_almost_equal(auc(x, y), 1)
x = [0, 0.5, 1]
y = [0, 0.5, 1]
assert_array_almost_equal(auc(x, y), 0.5)
示例13
def test_auc_gold_labels_behaviour(self, device: str):
# Check that it works with different pos_label
auc = Auc(positive_label=4)
predictions = torch.randn(8, device=device)
labels = torch.randint(3, 5, (8,), dtype=torch.long, device=device)
# We make sure that the positive label is always present.
labels[0] = 4
auc(predictions, labels)
computed_auc_value = auc.get_metric(reset=True)
false_positive_rates, true_positive_rates, _ = metrics.roc_curve(
labels.cpu().numpy(), predictions.cpu().numpy(), pos_label=4
)
real_auc_value = metrics.auc(false_positive_rates, true_positive_rates)
assert_allclose(real_auc_value, computed_auc_value)
# Check that it errs on getting more than 2 labels.
with pytest.raises(ConfigurationError) as _:
labels = torch.tensor([3, 4, 5, 6, 7, 8, 9, 10], device=device)
auc(predictions, labels)
示例14
def compute_aupr(all_targets,all_predictions):
aupr_array = []
for i in range(all_targets.shape[1]):
try:
precision, recall, thresholds = metrics.precision_recall_curve(all_targets[:,i], all_predictions[:,i], pos_label=1)
auPR = metrics.auc(recall,precision,reorder=True)
if not math.isnan(auPR):
aupr_array.append(numpy.nan_to_num(auPR))
except:
pass
aupr_array = numpy.array(aupr_array)
mean_aupr = numpy.mean(aupr_array)
median_aupr = numpy.median(aupr_array)
var_aupr = numpy.var(aupr_array)
return mean_aupr,median_aupr,var_aupr,aupr_array
示例15
def _plot_macro_roc(fpr, tpr, n, lw, fmt, ax):
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n)]))
mean_tpr = np.zeros_like(all_fpr)
for i in range(n):
mean_tpr += interp(all_fpr, fpr[i], tpr[i])
mean_tpr /= n
fpr_macro = all_fpr
tpr_macro = mean_tpr
auc_macro = auc(fpr_macro, tpr_macro)
label = 'ROC curve: macro (AUC = {auc:{fmt}})'.format(auc=auc_macro, fmt=fmt)
ax.plot(fpr_macro,
tpr_macro,
label=label,
color='navy',
ls=':',
lw=lw)
示例16
def validate_on_lfw(model, lfw_160_path):
# Read the file containing the pairs used for testing
pairs = lfw.read_pairs('validation-LFW-pairs.txt')
# Get the paths for the corresponding images
paths, actual_issame = lfw.get_paths(lfw_160_path, pairs)
num_pairs = len(actual_issame)
all_embeddings = np.zeros((num_pairs * 2, 512), dtype='float32')
for k in tqdm.trange(num_pairs):
img1 = cv2.imread(paths[k * 2], cv2.IMREAD_COLOR)[:, :, ::-1]
img2 = cv2.imread(paths[k * 2 + 1], cv2.IMREAD_COLOR)[:, :, ::-1]
batch = np.stack([img1, img2], axis=0)
embeddings = model.eval_embeddings(batch)
all_embeddings[k * 2: k * 2 + 2, :] = embeddings
tpr, fpr, accuracy, val, val_std, far = lfw.evaluate(
all_embeddings, actual_issame, distance_metric=1, subtract_mean=True)
print('Accuracy: %2.5f+-%2.5f' % (np.mean(accuracy), np.std(accuracy)))
print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far))
auc = metrics.auc(fpr, tpr)
print('Area Under Curve (AUC): %1.3f' % auc)
eer = brentq(lambda x: 1. - x - interpolate.interp1d(fpr, tpr)(x), 0., 1.)
print('Equal Error Rate (EER): %1.3f' % eer)
示例17
def get_pr_auc(pr_results, args, plot=False, plot_name=""):
xys = [(0.0, 1.0, 0.)]
labels = []
for tau, result in pr_results.items():
xys += [(pr_results[tau]["recall"], pr_results[tau]["prec"], tau)]
labels += [tau]
xys.sort(key=lambda x: x[0])
xs = [i[0] for i in xys]
ys = [i[1] for i in xys]
# print("pr")
# for i in sorted(xys, key=lambda x: x[-1]): print(i)
# print("recall", xs)
# print("precis", ys)
_auc = auc(xs, ys)
if plot:
fig, ax = plt.subplots(nrows=1, ncols=1)
ax.plot(xs, ys, 'go-',)
for label, x, y in zip(labels, xs, ys):
ax.annotate(
label,
xy=(x, y), xytext=(-20, 20),
textcoords='offset points', ha='right', va='bottom',
bbox=dict(boxstyle='round,pad=0.5', fc='yellow', alpha=0.5),
arrowprops=dict(arrowstyle = '->', connectionstyle='arc3,rad=0'))
path = os.path.join(args.log_dir, "pr-{}.svg".format(plot_name))
print("Storing PR plot in", path)
fig.savefig(path)
plt.close(fig)
return _auc
示例18
def get_roc_auc(pr_results, args, plot=False, plot_name=""):
xys = [(0.0, 0.0, 0.0)]
labels = []
for tau, result in pr_results.items():
xys += [(pr_results[tau]["fpr"], pr_results[tau]["tpr"], tau)]
labels += [tau]
xys.sort(key=lambda x: x[0])
xs = [i[0] for i in xys]
ys = [i[1] for i in xys]
# print("roc")
# for i in sorted(xys, key=lambda x: x[-1]): print(i)
# print("fpr", xs)
# print("tpr", ys)
_auc = auc(xs, ys)
if plot:
fig, ax = plt.subplots(nrows=1, ncols=1)
ax.plot(xs, ys, 'go-',)
for label, x, y in zip(labels, xs, ys):
ax.annotate(
label,
xy=(x, y), xytext=(-20, 20),
textcoords='offset points', ha='right', va='bottom',
bbox=dict(boxstyle='round,pad=0.5', fc='yellow', alpha=0.5),
arrowprops=dict(arrowstyle = '->', connectionstyle='arc3,rad=0'))
path = os.path.join(args.log_dir, "roc-{}.svg".format(plot_name))
print("Storing ROC plot in", path)
fig.savefig(path)
plt.close(fig)
return _auc
示例19
def print_metrics(test_word_arrayLabel, result_type):
true_positives = 0
false_negatives = 0
false_positives = 0
true_negatives = 0
num_examples = len(test_word_arrayLabel)
for example_num in range(0, num_examples):
predicted_label = result_type[example_num]
if test_word_arrayLabel[example_num] == 1:
if predicted_label == 1:
true_positives += 1
elif predicted_label == 2:
false_negatives += 1
elif test_word_arrayLabel[example_num] == 2:
if predicted_label == 1:
false_positives += 1
elif predicted_label == 2:
true_negatives += 1
TPR=true_positives/(true_positives+false_negatives)
FPR=false_positives/(true_negatives+false_positives)
return TPR,FPR
# def plotROCCurve(ROC_value):
# fpr = dict()
# tpr = dict()
# roc_auc = dict()
# for i in range(2):
# fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])
# roc_auc[i] = auc(fpr[i], tpr[i])
#
# # Compute micro-average ROC curve and ROC area
# fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score.ravel())
# roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
示例20
def average_precision_score(y_true, y_score, average="macro",
sample_weight=None):
def _binary_average_precision(y_true, y_score, sample_weight=None):
precision, recall, thresholds = precision_recall_curve(
y_true, y_score, sample_weight=sample_weight)
return auc(recall, precision)
return _average_binary_score(_binary_average_precision, y_true, y_score,
average, sample_weight=sample_weight)
示例21
def roc(labels, scores, saveto=None):
"""Compute ROC curve and ROC area for each class"""
fpr = dict()
tpr = dict()
roc_auc = dict()
labels = labels.cpu()
scores = scores.cpu()
# True/False Positive Rates.
fpr, tpr, _ = roc_curve(labels, scores)
roc_auc = auc(fpr, tpr)
# Equal Error Rate
eer = brentq(lambda x: 1. - x - interp1d(fpr, tpr)(x), 0., 1.)
if saveto:
plt.figure()
lw = 2
plt.plot(fpr, tpr, color='darkorange', lw=lw, label='(AUC = %0.2f, EER = %0.2f)' % (roc_auc, eer))
plt.plot([eer], [1-eer], marker='o', markersize=5, color="navy")
plt.plot([0, 1], [1, 0], color='navy', lw=1, linestyle=':')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
plt.legend(loc="lower right")
plt.savefig(os.path.join(saveto, "ROC.pdf"))
plt.close()
return roc_auc
示例22
def compute_roc(probs_neg, probs_pos):
probs = np.concatenate((probs_neg, probs_pos))
labels = np.concatenate((np.zeros_like(probs_neg), np.ones_like(probs_pos)))
fpr, tpr, _ = roc_curve(labels, probs)
auc_score = auc(fpr, tpr)
return fpr, tpr, auc_score
示例23
def calc_metrics(testy, scores):
precision, recall, _ = precision_recall_curve(testy, scores)
roc_auc = roc_auc_score(testy, scores)
prc_auc = auc(recall, precision)
return roc_auc, prc_auc
示例24
def fit_model(self, data):
eval_metrics = []
if self.feature_type == 'fingerprints':
fps = get_fp(data.smiles)
elif self.feature_type == 'descriptors':
fps, _, _ = get_desc(data.smiles, self.calc)
if self.model_type == 'classifier':
cross_val_data, cross_val_labels = \
cross_validation_split(fps, data.binary_labels)
elif self.model_type == 'regressor':
cross_val_data, cross_val_labels = \
cross_validation_split(fps, data.property)
for i in range(self.n_ensemble):
train_sm = np.concatenate(cross_val_data[:i] + cross_val_data[(i + 1):])
test_sm = cross_val_data[i]
train_labels = np.concatenate(cross_val_labels[:i] +
cross_val_labels[(i + 1):])
test_labels = cross_val_labels[i]
if self.feature_type == 'descriptors':
train_sm, desc_mean = normalize_desc(train_sm)
self.desc_mean[i] = desc_mean
test_sm, _ = normalize_desc(test_sm, desc_mean)
self.model[i].fit(train_sm, train_labels.ravel())
predicted = self.model[i].predict(test_sm)
if self.model_type == 'classifier':
fpr, tpr, thresholds = metrics.roc_curve(test_labels, predicted)
eval_metrics.append(metrics.auc(fpr, tpr))
metrics_type = 'AUC'
elif self.model_type == 'regressor':
r2 = metrics.r2_score(test_labels, predicted)
eval_metrics.append(r2)
metrics_type = 'R^2 score'
return eval_metrics, metrics_type
示例25
def get_metrics(predictions,targets):
# Calculate metrics
# Accuarcy
acc = np.mean(np.equal(np.argmax(predictions,1),np.argmax(targets,1)))
# Confusion matrix
conf = confusion_matrix(np.argmax(targets,1),np.argmax(predictions,1))
# Class weighted accuracy
wacc = conf.diagonal()/conf.sum(axis=1)
# Auc
fpr = {}
tpr = {}
roc_auc = np.zeros([numClasses])
for i in range(numClasses):
fpr[i], tpr[i], _ = roc_curve(targets[:, i], predictions[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# F1 Score
f1 = f1_score(np.argmax(predictions,1),np.argmax(targets,1),average='weighted')
# Print
print("Accuracy:",acc)
print("F1-Score:",f1)
print("WACC:",wacc)
print("Mean WACC:",np.mean(wacc))
print("AUC:",roc_auc)
print("Mean Auc:",np.mean(roc_auc))
return acc, f1, wacc, roc_auc
# If its actual evaluation, evaluate each CV indipendently, show results both for each CV set and all of them together
示例26
def evalEnsemble(currComb,eval_auc=False):
currWacc = np.zeros([cvSize])
currAUC = np.zeros([cvSize])
for i in range(cvSize):
if evaluate_method == 'vote':
pred_argmax = np.argmax(accum_preds[i][currComb,:,:],2)
pred_eval = np.zeros([pred_argmax.shape[1],numClasses])
for j in range(pred_eval.shape[0]):
pred_eval[j,:] = np.bincount(pred_argmax[:,j],minlength=numClasses)
else:
pred_eval = np.mean(accum_preds[i][currComb,:,:],0)
# Confusion matrix
conf = confusion_matrix(np.argmax(final_targets[i],1),np.argmax(pred_eval,1))
# Class weighted accuracy
currWacc[i] = np.mean(conf.diagonal()/conf.sum(axis=1))
if eval_auc:
currAUC_ = np.zeros([numClasses])
for j in range(numClasses):
fpr, tpr, _ = roc_curve(final_targets[i][:,j], pred_eval[:, j])
currAUC_[j] = auc(fpr, tpr)
currAUC[i] = np.mean(currAUC_)
if eval_auc:
currAUCstd = np.std(currAUC)
currAUC = np.mean(currAUC)
else:
currAUCstd = currAUC
currWaccStd = np.std(currWacc)
currWacc = np.mean(currWacc)
if eval_auc:
return currWacc, currWaccStd, currAUC, currAUCstd
else:
return currWacc
示例27
def prc_auc_score(y, y_pred):
"""Compute area under precision-recall curve"""
if y.shape != y_pred.shape:
y = _ensure_one_hot(y)
assert y_pred.shape == y.shape
assert y_pred.shape[1] == 2
precision, recall, _ = precision_recall_curve(y[:, 1], y_pred[:, 1])
return auc(recall, precision)
示例28
def testLearnInLocalCluster(self, *_):
from mars.learn.neighbors import NearestNeighbors
from sklearn.neighbors import NearestNeighbors as SkNearestNeighbors
from mars.learn.metrics import roc_curve, auc
from sklearn.metrics import roc_curve as sklearn_roc_curve, auc as sklearn_auc
with new_cluster(scheduler_n_process=2, worker_n_process=3, shared_memory='20M') as cluster:
rs = np.random.RandomState(0)
raw_X = rs.rand(10, 5)
raw_Y = rs.rand(8, 5)
X = mt.tensor(raw_X, chunk_size=7)
Y = mt.tensor(raw_Y, chunk_size=(5, 3))
nn = NearestNeighbors(n_neighbors=3)
nn.fit(X)
ret = nn.kneighbors(Y, session=cluster.session)
snn = SkNearestNeighbors(n_neighbors=3)
snn.fit(raw_X)
expected = snn.kneighbors(raw_Y)
result = [r.fetch() for r in ret]
np.testing.assert_almost_equal(result[0], expected[0])
np.testing.assert_almost_equal(result[1], expected[1])
rs = np.random.RandomState(0)
raw = pd.DataFrame({'a': rs.randint(0, 10, (10,)),
'b': rs.rand(10)})
df = md.DataFrame(raw)
y = df['a'].to_tensor().astype('int')
pred = df['b'].to_tensor().astype('float')
fpr, tpr, thresholds = roc_curve(y, pred, pos_label=2)
m = auc(fpr, tpr)
sk_fpr, sk_tpr, sk_threshod = sklearn_roc_curve(raw['a'].to_numpy().astype('int'),
raw['b'].to_numpy().astype('float'),
pos_label=2)
expect_m = sklearn_auc(sk_fpr, sk_tpr)
self.assertAlmostEqual(m.fetch(), expect_m)
示例29
def plot_ROC(y_true, y_preds, num_classes, labels, micro=True, macro=True):
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(num_classes):
fpr[i], tpr[i], _ = metrics.roc_curve(y_true[:, i], y_preds[:, i])
roc_auc[i] = metrics.auc(fpr[i], tpr[i])
if micro:
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = metrics.roc_curve(y_true.ravel(), y_preds.ravel())
roc_auc["micro"] = metrics.auc(fpr["micro"], tpr["micro"])
if macro:
# Compute macro-average ROC curve and ROC area
# First aggregate all false positive rates
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(num_classes)]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(num_classes):
mean_tpr += interp(all_fpr, fpr[i], tpr[i])
# Finally average it and compute AUC
mean_tpr /= num_classes
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = metrics.auc(fpr["macro"], tpr["macro"])
for i in range(num_classes):
plot_class_ROC(fpr, tpr, roc_auc, i, labels)
plot_multi_ROC(fpr, tpr, roc_auc, num_classes, labels, micro, macro)
示例30
def prc_auc_score(y, y_pred):
"""Compute area under precision-recall curve"""
assert y_pred.shape == y.shape
assert y_pred.shape[1] == 2
precision, recall, _ = precision_recall_curve(y[:, 1], y_pred[:, 1])
return auc(recall, precision)