Python源码示例:sklearn.metrics.zero_one_loss()
示例1
def multi_class_classification(data_X,data_Y):
'''
calculate multi-class classification and return related evaluation metrics
'''
svc = svm.SVC(C=1, kernel='linear')
# X_train, X_test, y_train, y_test = train_test_split( data_X, data_Y, test_size=0.4, random_state=0)
clf = svc.fit(data_X, data_Y) #svm
# array = svc.coef_
# print array
predicted = cross_val_predict(clf, data_X, data_Y, cv=2)
print "accuracy",metrics.accuracy_score(data_Y, predicted)
print "f1 score macro",metrics.f1_score(data_Y, predicted, average='macro')
print "f1 score micro",metrics.f1_score(data_Y, predicted, average='micro')
print "precision score",metrics.precision_score(data_Y, predicted, average='macro')
print "recall score",metrics.recall_score(data_Y, predicted, average='macro')
print "hamming_loss",metrics.hamming_loss(data_Y, predicted)
print "classification_report", metrics.classification_report(data_Y, predicted)
print "jaccard_similarity_score", metrics.jaccard_similarity_score(data_Y, predicted)
# print "log_loss", metrics.log_loss(data_Y, predicted)
print "zero_one_loss", metrics.zero_one_loss(data_Y, predicted)
# print "AUC&ROC",metrics.roc_auc_score(data_Y, predicted)
# print "matthews_corrcoef", metrics.matthews_corrcoef(data_Y, predicted)
示例2
def evaluation_analysis(true_label,predicted):
'''
return all metrics results
'''
print "accuracy",metrics.accuracy_score(true_label, predicted)
print "f1 score macro",metrics.f1_score(true_label, predicted, average='macro')
print "f1 score micro",metrics.f1_score(true_label, predicted, average='micro')
print "precision score",metrics.precision_score(true_label, predicted, average='macro')
print "recall score",metrics.recall_score(true_label, predicted, average='macro')
print "hamming_loss",metrics.hamming_loss(true_label, predicted)
print "classification_report", metrics.classification_report(true_label, predicted)
print "jaccard_similarity_score", metrics.jaccard_similarity_score(true_label, predicted)
print "log_loss", metrics.log_loss(true_label, predicted)
print "zero_one_loss", metrics.zero_one_loss(true_label, predicted)
print "AUC&ROC",metrics.roc_auc_score(true_label, predicted)
print "matthews_corrcoef", metrics.matthews_corrcoef(true_label, predicted)
示例3
def test_mdr_custom_score():
"""Ensure that the MDR 'score' function outputs the right custom score passed in from the user"""
features = np.array([[2, 0],
[0, 0],
[0, 1],
[0, 0],
[0, 0],
[0, 0],
[0, 1],
[0, 0],
[0, 0],
[0, 1],
[0, 0],
[0, 0],
[0, 0],
[1, 1],
[1, 1]])
classes = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
mdr = MDRClassifier()
mdr.fit(features, classes)
assert mdr.score(features = features, class_labels = classes, scoring_function = accuracy_score) == 12. / 15
assert mdr.score(features = features, class_labels = classes, scoring_function = zero_one_loss) == 1 - 12. / 15
assert mdr.score(features = features, class_labels = classes, scoring_function = zero_one_loss, normalize=False) == 15 - 12
示例4
def test_multilabel_zero_one_loss_subset():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(zero_one_loss(y1, y2), 0.5)
assert_equal(zero_one_loss(y1, y1), 0)
assert_equal(zero_one_loss(y2, y2), 0)
assert_equal(zero_one_loss(y2, np.logical_not(y2)), 1)
assert_equal(zero_one_loss(y1, np.logical_not(y1)), 1)
assert_equal(zero_one_loss(y1, np.zeros(y1.shape)), 1)
assert_equal(zero_one_loss(y2, np.zeros(y1.shape)), 1)
示例5
def plot_logistic():
np.random.seed(12345)
fig, axes = plt.subplots(4, 4)
for i, ax in enumerate(axes.flatten()):
n_in = 1
n_ex = 150
X_train, y_train, X_test, y_test = random_classification_problem(
n_ex, n_classes=2, n_in=n_in, seed=i
)
LR = LogisticRegression(penalty="l2", gamma=0.2, fit_intercept=True)
LR.fit(X_train, y_train, lr=0.1, tol=1e-7, max_iter=1e7)
y_pred = (LR.predict(X_test) >= 0.5) * 1.0
loss = zero_one_loss(y_test, y_pred) * 100.0
LR_sk = LogisticRegression_sk(
penalty="l2", tol=0.0001, C=0.8, fit_intercept=True, random_state=i
)
LR_sk.fit(X_train, y_train)
y_pred_sk = (LR_sk.predict(X_test) >= 0.5) * 1.0
loss_sk = zero_one_loss(y_test, y_pred_sk) * 100.0
xmin = min(X_test) - 0.1 * (max(X_test) - min(X_test))
xmax = max(X_test) + 0.1 * (max(X_test) - min(X_test))
X_plot = np.linspace(xmin, xmax, 100)
y_plot = LR.predict(X_plot)
y_plot_sk = LR_sk.predict_proba(X_plot.reshape(-1, 1))[:, 1]
ax.scatter(X_test[y_pred == 0], y_test[y_pred == 0], alpha=0.5)
ax.scatter(X_test[y_pred == 1], y_test[y_pred == 1], alpha=0.5)
ax.plot(X_plot, y_plot, label="mine", alpha=0.75)
ax.plot(X_plot, y_plot_sk, label="sklearn", alpha=0.75)
ax.legend()
ax.set_title("Loss mine: {:.2f} Loss sklearn: {:.2f}".format(loss, loss_sk))
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
plt.tight_layout()
plt.savefig("plot_logistic.png", dpi=300)
plt.close("all")
示例6
def test_gentleboost_musk_fitting():
c = GentleBoostClassifier(
base_estimator=DecisionTreeRegressor(max_depth=1),
n_estimators=30,
learning_rate=1.0
)
data = MUSK1()
c.fit(data.data, np.sign(data.labels))
assert_array_less(c.estimator_errors_, 0.5)
assert zero_one_loss(np.sign(data.labels), c.predict(data.data)) < 0.1
示例7
def test_gentleboost_hastie_fitting():
c = GentleBoostClassifier(
base_estimator=DecisionTreeRegressor(max_depth=1),
n_estimators=30,
learning_rate=1.0
)
data = Hastie_10_2()
c.fit(data.data, np.sign(data.labels))
assert_array_less(c.estimator_errors_, 0.5)
assert zero_one_loss(np.sign(data.labels), c.predict(data.data)) < 0.2
示例8
def test_milboost_musk_fitting_lse():
c = MILBoostClassifier(
base_estimator=DecisionTreeClassifier(max_depth=1),
softmax=LogSumExponential(5.0),
n_estimators=30,
learning_rate=1.0
)
data = MUSK1()
c.fit(data.data, data.labels)
assert_array_less(c.estimator_errors_, 0.5)
assert zero_one_loss(np.sign(data.labels), c.predict(data.data)) < 0.30
示例9
def test_milboost_hastie_fitting():
c = MILBoostClassifier(
base_estimator=DecisionTreeClassifier(max_depth=1),
softmax=LogSumExponential(5.0),
n_estimators=30,
learning_rate=1.0
)
data = Hastie_10_2()
c.fit(data.data, data.labels)
assert_array_less(c.estimator_errors_, 0.5)
assert zero_one_loss(np.sign(data.labels), c.predict(data.data)) < 0.40
示例10
def test_logitboost_musk_fitting():
c = LogitBoostClassifier(
base_estimator=DecisionTreeRegressor(max_depth=1),
n_estimators=30,
learning_rate=1.0
)
data = MUSK1()
c.fit(data.data, np.sign(data.labels))
assert_array_less(c.estimator_errors_, 0.6)
assert zero_one_loss(np.sign(data.labels), c.predict(data.data)) < 0.05
示例11
def fit(self, data, target):
no_of_stages = self.no_of_stages
decision_stump = DecisionTreeClassifier(criterion='gini', splitter='best', max_depth=1, max_features=1)
#No. of samples
m = data.shape[0]
weight = numpy.ones(m)
weight = numpy.float32(weight)/m
Alpha = numpy.zeros(no_of_stages)
classifiers = []
for i in range(no_of_stages):
decision_stump = decision_stump.fit(data, target, sample_weight = weight)
classifiers.append(decision_stump)
pred = decision_stump.predict(data)
error = zero_one_loss(target, pred, normalize=True, sample_weight = weight)
if error > 0.5:
print 'error value is greater than 0.5!'
beta = error/(1-error)
if beta != 0:
weight[pred == target] = weight[pred==target]*beta
weight = weight / weight.sum()
print weight
# beta_mat = (pred==target)*beta
# beta_mat[beta_mat==0] = 1
# weight = numpy.multiply(weight, beta_mat)
if beta > 0:
alpha = math.log(1/beta)
else:
alpha = 10000 # make alpha extremly large if decision stump is totally correct.
Alpha[i] = alpha
self.Alpha = Alpha
self.classifiers = classifiers
示例12
def test_zero_one_loss(self):
result = self.df.metrics.zero_one_loss()
expected = metrics.zero_one_loss(self.target, self.pred)
self.assertEqual(result, expected)
示例13
def subset_01_loss(y_true, y_pred):
return zero_one_loss(y_true, y_pred)
示例14
def test_multilabel_zero_one_loss_subset():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(zero_one_loss(y1, y2), 0.5)
assert_equal(zero_one_loss(y1, y1), 0)
assert_equal(zero_one_loss(y2, y2), 0)
assert_equal(zero_one_loss(y2, np.logical_not(y2)), 1)
assert_equal(zero_one_loss(y1, np.logical_not(y1)), 1)
assert_equal(zero_one_loss(y1, np.zeros(y1.shape)), 1)
assert_equal(zero_one_loss(y2, np.zeros(y1.shape)), 1)