#回归
x = [[3,6],[6,9],[9,12],[12,15],[15,18],[18,21],[21,23]]
# y = [[3],[6],[9],[12],[15],[18],[21]]
y = [3,6,9,12,15,18,21]
x1 = [[2,4],[4,6],[6,8],[8,10],[10,12]]
# y1 = [[2],[4],[6],[8],[10]]
y1 = [2,4,6,8,10]
#分类
from sklearn import datasets
from sklearn.model_selection import train_test_split
iris = datasets.load_iris()
# print(iris)
iris_x = iris.data
iris_y = iris.target
# print(iris_x)
x_train,x_test,y_train,y_test = train_test_split(iris_x,iris_y,test_size=0.3) #切割数据集
#adaboost 自适应提升算法
# #分类
# from sklearn.ensemble import AdaBoostClassifier
# adaboost = AdaBoostClassifier ()
# adaboost.fit(x_train,y_train)
# print(adaboost.score(x_test,y_test))
# print(adaboost.predict(x_test))
# #回归
# from sklearn.ensemble import AdaBoostRegressor
# adaboost = AdaBoostRegressor ()
# adaboost.fit(x,y)
# print(adaboost.score(x1,y1))
# print(adaboost.predict(x1))
#GBDT 梯度提升决策树
#分类
# from sklearn.ensemble import GradientBoostingClassifier
# grad = GradientBoostingClassifier ()
# grad.fit(x_train,y_train)
# print(grad.score(x_test,y_test))
# print(grad.predict(x_test))
#回归
# from sklearn.ensemble import GradientBoostingRegressor
# grad = GradientBoostingRegressor ()
# grad.fit(x,y)
# print(grad.score(x1,y1))
# print(grad.predict(x1))
#bagging 随机深林
# #分类
# from sklearn.ensemble import RandomForestClassifier
# bagging = RandomForestClassifier ()
# bagging.fit(x_train,y_train)
# print(bagging.score(x_test,y_test))
# print(bagging.predict(x_test))
# 回归
# from sklearn.ensemble import RandomForestRegressor
# bagging = RandomForestRegressor ()
# bagging.fit(x,y)
# print(bagging.score(x1,y1))
# print(bagging.predict(x1))