Python源码示例:sklearn.ensemble.GradientBoostingClassifier()
示例1
def search_cv(x_train, y_train, x_test, y_test, model=GradientBoostingClassifier(n_estimators=30)):
# grid search找到最好的参数
parameters = {'kernel': ('linear', 'rbf'), 'C': [1, 2, 4], 'gamma': [0.125, 0.25, 0.5, 1, 2, 4]}
clf = GridSearchCV(model, param_grid=parameters)
grid_search = clf.fit(x_train, y_train)
# 对结果打分
print("Best score: %0.3f" % grid_search.best_score_)
print(grid_search.best_estimator_)
# best prarams
print('best prarams:', clf.best_params_)
print('-----grid search end------------')
print('on all train set')
scores = cross_val_score(grid_search.best_estimator_, x_train, y_train, cv=3, scoring='accuracy')
print(scores.mean(), scores)
print('on test set')
scores = cross_val_score(grid_search.best_estimator_, x_test, y_test, cv=3, scoring='accuracy')
print(scores.mean(), scores)
示例2
def Train(data, modelcount, censhu, yanzhgdata):
model = GradientBoostingClassifier(loss='deviance', n_estimators=modelcount, max_depth=censhu, learning_rate=0.1, max_features='sqrt')
model.fit(data[:, :-1], data[:, -1])
# 给出训练数据的预测值
train_out = model.predict(data[:, :-1])
# 计算MSE
train_mse = fmse(data[:, -1], train_out)[0]
# 给出验证数据的预测值
add_yan = model.predict(yanzhgdata[:, :-1])
# 计算f1度量
add_mse = fmse(yanzhgdata[:, -1], add_yan)[0]
print(train_mse, add_mse)
return train_mse, add_mse
# 最终确定组合的函数
示例3
def recspre(estrs, predata, datadict, zhe):
mo, ze = estrs.split('-')
model = GradientBoostingClassifier(loss='deviance', n_estimators=int(mo), max_depth=int(ze), learning_rate=0.1)
model.fit(datadict[zhe]['train'][:, :-1], datadict[zhe]['train'][:, -1])
# 预测
yucede = model.predict(predata[:, :-1])
# 计算混淆矩阵
print(ConfuseMatrix(predata[:, -1], yucede))
return fmse(predata[:, -1], yucede)
# 主函数
示例4
def check_classification_synthetic(presort, loss):
# Test GradientBoostingClassifier on synthetic dataset used by
# Hastie et al. in ESLII Example 12.7.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=2,
max_depth=1, loss=loss,
learning_rate=1.0, random_state=0)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert_less(error_rate, 0.09)
gbrt = GradientBoostingClassifier(n_estimators=200, min_samples_split=2,
max_depth=1, loss=loss,
learning_rate=1.0, subsample=0.5,
random_state=0,
presort=presort)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert_less(error_rate, 0.08)
示例5
def test_probability_log():
# Predict probabilities.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert np.all(y_proba >= 0.0)
assert np.all(y_proba <= 1.0)
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
示例6
def test_check_inputs_predict_stages():
# check that predict_stages through an error if the type of X is not
# supported
x, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
x_sparse_csc = csc_matrix(x)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(x, y)
score = np.zeros((y.shape)).reshape(-1, 1)
assert_raise_message(ValueError,
"When X is a sparse matrix, a CSR format is expected",
predict_stages, clf.estimators_, x_sparse_csc,
clf.learning_rate, score)
x_fortran = np.asfortranarray(x)
assert_raise_message(ValueError,
"X should be C-ordered np.ndarray",
predict_stages, clf.estimators_, x_fortran,
clf.learning_rate, score)
示例7
def test_serialization():
# Check model serialization.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
try:
import cPickle as pickle
except ImportError:
import pickle
serialized_clf = pickle.dumps(clf, protocol=pickle.HIGHEST_PROTOCOL)
clf = None
clf = pickle.loads(serialized_clf)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
示例8
def test_verbose_output():
# Check verbose=1 does not cause error.
from io import StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=1, subsample=0.8)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# with OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 3) % (
'Iter', 'Train Loss', 'OOB Improve', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# one for 1-10 and then 9 for 20-100
assert_equal(10 + 9, n_lines)
示例9
def test_more_verbose_output():
# Check verbose=2 does not cause error.
from io import StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=2)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# no OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 2) % (
'Iter', 'Train Loss', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# 100 lines for n_estimators==100
assert_equal(100, n_lines)
示例10
def test_zero_estimator_clf():
# Test if init='zero' works for classification.
X = iris.data
y = np.array(iris.target)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert_greater(est.score(X, y), 0.96)
# binary clf
mask = y != 0
y[mask] = 1
y[~mask] = 0
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert_greater(est.score(X, y), 0.96)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, X, y)
示例11
def test_probability_exponential():
# Predict probabilities.
clf = GradientBoostingClassifier(loss='exponential',
n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert np.all(y_proba >= 0.0)
assert np.all(y_proba <= 1.0)
score = clf.decision_function(T).ravel()
assert_array_almost_equal(y_proba[:, 1], expit(2 * score))
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
示例12
def test_early_stopping_n_classes():
# when doing early stopping (_, , y_train, _ = train_test_split(X, y))
# there might be classes in y that are missing in y_train. As the init
# estimator will be trained on y_train, we need to raise an error if this
# happens.
X = [[1]] * 10
y = [0, 0] + [1] * 8 # only 2 negative class over 10 samples
gb = GradientBoostingClassifier(n_iter_no_change=5, random_state=0,
validation_fraction=8)
with pytest.raises(
ValueError,
match='The training data after the early stopping split'):
gb.fit(X, y)
# No error if we let training data be big enough
gb = GradientBoostingClassifier(n_iter_no_change=5, random_state=0,
validation_fraction=4)
示例13
def test_recursion_decision_function(target_feature):
# Make sure the recursion method (implicitly uses decision_function) has
# the same result as using brute method with
# response_method=decision_function
X, y = make_classification(n_classes=2, n_clusters_per_class=1,
random_state=1)
assert np.mean(y) == .5 # make sure the init estimator predicts 0 anyway
est = GradientBoostingClassifier(random_state=0, loss='deviance')
est.fit(X, y)
preds_1, _ = partial_dependence(est, X, [target_feature],
response_method='decision_function',
method='recursion')
preds_2, _ = partial_dependence(est, X, [target_feature],
response_method='decision_function',
method='brute')
assert_allclose(preds_1, preds_2, atol=1e-7)
示例14
def test_warning_recursion_non_constant_init():
# make sure that passing a non-constant init parameter to a GBDT and using
# recursion method yields a warning.
gbc = GradientBoostingClassifier(init=DummyClassifier(), random_state=0)
gbc.fit(X, y)
with pytest.warns(
UserWarning,
match='Using recursion method with a non-constant init predictor'):
partial_dependence(gbc, X, [0], method='recursion')
with pytest.warns(
UserWarning,
match='Using recursion method with a non-constant init predictor'):
partial_dependence(gbc, X, [0], method='recursion')
示例15
def setUpClass(self):
"""
Set up the unit test by loading the dataset and training a model.
"""
from sklearn.datasets import load_boston
import numpy as np
scikit_data = load_boston()
scikit_model = GradientBoostingClassifier(random_state=1)
t = scikit_data.target
target = np.digitize(t, np.histogram(t)[1]) - 1
scikit_model.fit(scikit_data.data, target)
self.target = target
# Save the data and the model
self.scikit_data = scikit_data
self.scikit_model = scikit_model
示例16
def main():
# prepare data
trainingSet=[]
testSet=[]
accuracy = 0.0
split = 0.25
loadDataset('../Dataset/phishing.data', split, trainingSet, testSet)
print('Train set: ' + repr(len(trainingSet)))
print('Test set: ' + repr(len(testSet)))
trainData = np.array(trainingSet)[:,0:np.array(trainingSet).shape[1] - 1]
columns = trainData.shape[1]
X = np.array(trainData)
y = np.array(trainingSet)[:,columns]
clf = GradientBoostingClassifier()
clf.fit(X, y)
testData = np.array(testSet)[:,0:np.array(trainingSet).shape[1] - 1]
X_test = np.array(testData)
y_test = np.array(testSet)[:,columns]
accuracy = clf.score(X_test,y_test)
accuracy *= 100
print("Accuracy %:",accuracy)
示例17
def learn(x, y, test_x):
# set sample weight
weight_list = []
for j in range(len(y)):
if y[j] == "0":
weight_list.append(variables.weight_0_gdbt_b)
if y[j] == "1000":
weight_list.append(variables.weight_1000_gdbt_b)
if y[j] == "1500":
weight_list.append(variables.weight_1500_gdbt_b)
if y[j] == "2000":
weight_list.append(variables.weight_2000_gdbt_b)
clf = GradientBoostingClassifier(loss='deviance', n_estimators=variables.n_estimators_gdbt_b,
learning_rate=variables.learning_rate_gdbt_b,
max_depth=variables.max_depth_gdbt_b, random_state=0,
min_samples_split=variables.min_samples_split_gdbt_b,
min_samples_leaf=variables.min_samples_leaf_gdbt_b,
subsample=variables.subsample_gdbt_b,
).fit(x, y, weight_list)
prediction_list = clf.predict(test_x)
return prediction_list
示例18
def gbc_gp_predict(train_x, train_y, test_x):
feature_indexs = getTopFeatures(train_x, train_y)
sub_x_Train = get_data(train_x, feature_indexs[:16], features.feature_pair_sub_list
,features.feature_pair_plus_list, features.feature_pair_mul_list, features.feature_pair_divide_list[:20])
sub_x_Test = get_data(test_x, feature_indexs[:16], features.feature_pair_sub_list
,features.feature_pair_plus_list, features.feature_pair_mul_list, features.feature_pair_divide_list[:20])
labels = toLabels(train_y)
gbc = GradientBoostingClassifier(n_estimators=3000, max_depth=9)
gbc.fit(sub_x_Train, labels)
pred_probs = gbc.predict_proba(sub_x_Test)[:,1]
ind_test = np.where(pred_probs>0.55)[0]
gp_preds_part = gbc_gp_predict_part(sub_x_Train, train_y, sub_x_Test[ind_test])
gp_preds = np.zeros(len(test_x))
gp_preds[ind_test] = gp_preds_part
return gp_preds
# invoke the function gbc_svr_predict_part
示例19
def env_5(request):
return Environment(
train_dataset=get_breast_cancer_data(),
results_path=assets_dir,
target_column="diagnosis",
metrics=["roc_auc_score"],
cv_type=StratifiedKFold,
cv_params=dict(n_splits=3, shuffle=True, random_state=32),
experiment_recorders=request.param,
)
##################################################
# Experiment Fixtures
##################################################
#################### GradientBoostingClassifier Experiments ####################
示例20
def __init__(self, options):
self.handle_options(options)
params = options.get('params', {})
out_params = convert_params(
params,
strs=['loss', 'max_features'],
floats=['learning_rate', 'min_weight_fraction_leaf'],
ints=['n_estimators', 'max_depth', 'min_samples_split',
'min_samples_leaf', 'max_leaf_nodes', 'random_state'],
)
valid_loss = ['deviance', 'exponential']
if 'loss' in out_params:
if out_params['loss'] not in valid_loss:
msg = "loss must be one of: {}".format(', '.join(valid_loss))
raise RuntimeError(msg)
if 'max_features' in out_params:
out_params['max_features'] = handle_max_features(out_params['max_features'])
if 'max_leaf_nodes' in out_params and 'max_depth' in out_params:
messages.warn('max_depth ignored when max_leaf_nodes is set')
self.estimator = _GradientBoostingClassifier(**out_params)
示例21
def test_register_model(self, iris_dataset):
pytest.importorskip('sklearn')
from sasctl import register_model
from sklearn.ensemble import GradientBoostingClassifier
TARGET = 'Species'
X = iris_dataset.drop(TARGET, axis=1)
y = iris_dataset[TARGET]
model = GradientBoostingClassifier()
model.fit(X, y)
model = register_model(model, self.MODEL_NAME, self.PROJECT_NAME, input=X, force=True)
assert model.name == self.MODEL_NAME
assert model.projectName == self.PROJECT_NAME
assert model.function.lower() == 'classification'
assert model.algorithm.lower() == 'gradient boosting'
assert model.tool.lower().startswith('python')
示例22
def gbtfunc(dep):
m = gbt(max_depth=dep, random_state=0)
m.fit(traindata, trainlabel)
predtrain = m.predict(traindata)
predtest = m.predict_proba(testdata)
# print predtest.shape, predtest[1,:]
return np.sum(predtrain == trainlabel) / float(traindata.shape[0]), \
np.mean((predtest[:,1]>0.5).astype(int) == testlabel), predtest # / float(testdata.shape[0]),
# trainacc, testacc, predtest = gbtfunc(3)
# print trainacc, testacc
# np.save('pixradiustest.npy', predtest[:,1])
示例23
def define_clfs_params(self):
'''
Defines all relevant parameters and classes for classfier objects.
Edit these if you wish to change parameters.
'''
# These are the classifiers
self.clfs = {
'RF': RandomForestClassifier(n_estimators = 50, n_jobs = -1),
'ET': ExtraTreesClassifier(n_estimators = 10, n_jobs = -1, criterion = 'entropy'),
'AB': AdaBoostClassifier(DecisionTreeClassifier(max_depth = [1, 5, 10, 15]), algorithm = "SAMME", n_estimators = 200),
'LR': LogisticRegression(penalty = 'l1', C = 1e5),
'SVM': svm.SVC(kernel = 'linear', probability = True, random_state = 0),
'GB': GradientBoostingClassifier(learning_rate = 0.05, subsample = 0.5, max_depth = 6, n_estimators = 10),
'NB': GaussianNB(),
'DT': DecisionTreeClassifier(),
'SGD': SGDClassifier(loss = 'log', penalty = 'l2'),
'KNN': KNeighborsClassifier(n_neighbors = 3)
}
# These are the parameters which will be run through
self.params = {
'RF':{'n_estimators': [1,10,100,1000], 'max_depth': [10, 15,20,30,40,50,60,70,100], 'max_features': ['sqrt','log2'],'min_samples_split': [2,5,10], 'random_state': [1]},
'LR': {'penalty': ['l1','l2'], 'C': [0.00001,0.0001,0.001,0.01,0.1,1,10], 'random_state': [1]},
'SGD': {'loss': ['log'], 'penalty': ['l2','l1','elasticnet'], 'random_state': [1]},
'ET': {'n_estimators': [1,10,100,1000], 'criterion' : ['gini', 'entropy'], 'max_depth': [1,3,5,10,15], 'max_features': ['sqrt','log2'],'min_samples_split': [2,5,10], 'random_state': [1]},
'AB': {'algorithm': ['SAMME', 'SAMME.R'], 'n_estimators': [1,10,100,1000], 'random_state': [1]},
'GB': {'n_estimators': [1,10,100,1000], 'learning_rate' : [0.001,0.01,0.05,0.1,0.5],'subsample' : [0.1,0.5,1.0], 'max_depth': [1,3,5,10,20,50,100], 'random_state': [1]},
'NB': {},
'DT': {'criterion': ['gini', 'entropy'], 'max_depth': [1,2,15,20,30,40,50], 'max_features': ['sqrt','log2'],'min_samples_split': [2,5,10], 'random_state': [1]},
'SVM' :{'C' :[0.00001,0.0001,0.001,0.01,0.1,1,10],'kernel':['linear'], 'random_state': [1]},
'KNN' :{'n_neighbors': [1,5,10,25,50,100],'weights': ['uniform','distance'],'algorithm': ['auto','ball_tree','kd_tree']}
}
示例24
def GBDT_First(self, data, max_depth=5, n_estimators=320):
model = GradientBoostingClassifier(loss='deviance', n_estimators=n_estimators, max_depth=max_depth,
learning_rate=0.1, max_features='sqrt')
model.fit(data['train'][:, :-1], data['train'][:, -1])
# 存储验证数据集结果和预测数据集结果
# 训练数据集的预测结果
xul = model.predict(data['train'][:, :-1])
# 验证的预测结果
yanre = model.predict(data['test'][:, :-1])
# 预测的预测结果
prer = model.predict(data['predict'][:, :-1])
# 每计算一折后,要计算训练、验证、预测数据的误差
xx = self.F1(xul, data['train'][:, -1])
yy = self.F1(yanre, data['test'][:, -1])
pp = self.F1(prer, data['predict'][:, -1])
# 开始结合
self.yanzhneg_pr.append(yanre)
self.yanzhneg_real = data['test'][:, -1]
self.predi.append(prer)
self.preal = data['predict'][:, -1]
# 存储误差
self.error_dict['GBDT'] = [xx, yy, pp]
return print('1层中的GBDT运行完毕')
# LightGBM
示例25
def fitGradientBoosting(data):
'''
Build a gradient boosting classier
'''
# create the classifier object
gradBoost = en.GradientBoostingClassifier(
min_samples_split=100, n_estimators=500)
# fit the data
return gradBoost.fit(data[0],data[1])
# the file name of the dataset
示例26
def test_partial_dependence_classifier():
# Test partial dependence for classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
pdp, axes = partial_dependence(clf, [0], X=X, grid_resolution=5)
# only 4 grid points instead of 5 because only 4 unique X[:,0] vals
assert pdp.shape == (1, 4)
assert axes[0].shape[0] == 4
# now with our own grid
X_ = np.asarray(X)
grid = np.unique(X_[:, 0])
pdp_2, axes = partial_dependence(clf, [0], grid=grid)
assert axes is None
assert_array_equal(pdp, pdp_2)
# with trivial (no-op) sample weights
clf.fit(X, y, sample_weight=np.ones(len(y)))
pdp_w, axes_w = partial_dependence(clf, [0], X=X, grid_resolution=5)
assert pdp_w.shape == (1, 4)
assert axes_w[0].shape[0] == 4
assert_allclose(pdp_w, pdp)
# with non-trivial sample weights
clf.fit(X, y, sample_weight=sample_weight)
pdp_w2, axes_w2 = partial_dependence(clf, [0], X=X, grid_resolution=5)
assert pdp_w2.shape == (1, 4)
assert axes_w2[0].shape[0] == 4
assert np.all(np.abs(pdp_w2 - pdp_w) / np.abs(pdp_w) > 0.1)
示例27
def test_partial_dependence_multiclass():
# Test partial dependence for multi-class classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
n_classes = clf.n_classes_
pdp, axes = partial_dependence(
clf, [0], X=iris.data, grid_resolution=grid_resolution)
assert pdp.shape == (n_classes, grid_resolution)
assert len(axes) == 1
assert axes[0].shape[0] == grid_resolution
示例28
def test_partial_dependecy_input():
# Test input validation of partial dependence.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=None, X=None)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=[0, 1], X=X)
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, partial_dependence,
{}, [0], X=X)
# Gradient boosting estimator must be fit
assert_raises(ValueError, partial_dependence,
GradientBoostingClassifier(), [0], X=X)
assert_raises(ValueError, partial_dependence, clf, [-1], X=X)
assert_raises(ValueError, partial_dependence, clf, [100], X=X)
# wrong ndim for grid
grid = np.random.rand(10, 2, 1)
assert_raises(ValueError, partial_dependence, clf, [0], grid=grid)
示例29
def test_plot_partial_dependence_input(pyplot):
# Test partial dependence plot function input checks.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
# not fitted yet
assert_raises(ValueError, plot_partial_dependence,
clf, X, [0])
clf.fit(X, y)
assert_raises(ValueError, plot_partial_dependence,
clf, np.array(X)[:, :0], [0])
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, plot_partial_dependence,
{}, X, [0])
# must be larger than -1
assert_raises(ValueError, plot_partial_dependence,
clf, X, [-1])
# too large feature value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [100])
# str feature but no feature_names
assert_raises(ValueError, plot_partial_dependence,
clf, X, ['foobar'])
# not valid features value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [{'foo': 'bar'}])
示例30
def test_plot_partial_dependence_multiclass(pyplot):
# Test partial dependence plot function on multi-class input.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label=0,
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# now with symbol labels
target = iris.target_names[iris.target]
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label='setosa',
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# label not in gbrt.classes_
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1], label='foobar',
grid_resolution=grid_resolution)
# label not provided
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1],
grid_resolution=grid_resolution)