OpenCV python sklearn随机超参数搜索的实现
本文介绍了OpenCVpythonsklearn随机超参数搜索的实现,分享给大家,具体如下:
"""
房价预测数据集使用sklearn执行超参数搜索
"""
importmatplotlibasmpl
importmatplotlib.pyplotasplt
importnumpyasnp
importsklearn
importpandasaspd
importos
importsys
importtensorflowastf
fromtensorflow_core.python.keras.api._v2importkeras#不能使用python
fromsklearn.preprocessingimportStandardScaler
fromsklearn.datasetsimportfetch_california_housing
fromsklearn.model_selectionimporttrain_test_split,RandomizedSearchCV
fromscipy.statsimportreciprocal
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
asserttf.__version__.startswith('2.')
#0.打印导入模块的版本
print(tf.__version__)
print(sys.version_info)
formoduleinmpl,np,sklearn,pd,tf,keras:
print("%sversion:%s"%(module.__name__,module.__version__))
#显示学习曲线
defplot_learning_curves(his):
pd.DataFrame(his.history).plot(figsize=(8,5))
plt.grid(True)
plt.gca().set_ylim(0,1)
plt.show()
#1.加载数据集california房价
housing=fetch_california_housing()
print(housing.DESCR)
print(housing.data.shape)
print(housing.target.shape)
#2.拆分数据集训练集验证集测试集
x_train_all,x_test,y_train_all,y_test=train_test_split(
housing.data,housing.target,random_state=7)
x_train,x_valid,y_train,y_valid=train_test_split(
x_train_all,y_train_all,random_state=11)
print(x_train.shape,y_train.shape)
print(x_valid.shape,y_valid.shape)
print(x_test.shape,y_test.shape)
#3.数据集归一化
scaler=StandardScaler()
x_train_scaled=scaler.fit_transform(x_train)
x_valid_scaled=scaler.fit_transform(x_valid)
x_test_scaled=scaler.fit_transform(x_test)
#创建keras模型
defbuild_model(hidden_layers=1,#中间层的参数
layer_size=30,
learning_rate=3e-3):
#创建网络层
model=keras.models.Sequential()
model.add(keras.layers.Dense(layer_size,activation="relu",
input_shape=x_train.shape[1:]))
#隐藏层设置
for_inrange(hidden_layers-1):
model.add(keras.layers.Dense(layer_size,
activation="relu"))
model.add(keras.layers.Dense(1))
#优化器学习率
optimizer=keras.optimizers.SGD(lr=learning_rate)
model.compile(loss="mse",optimizer=optimizer)
returnmodel
defmain():
#RandomizedSearchCV
#1.转化为sklearn的model
sk_learn_model=keras.wrappers.scikit_learn.KerasRegressor(build_model)
callbacks=[keras.callbacks.EarlyStopping(patience=5,min_delta=1e-2)]
history=sk_learn_model.fit(x_train_scaled,y_train,epochs=100,
validation_data=(x_valid_scaled,y_valid),
callbacks=callbacks)
#2.定义超参数集合
#f(x)=1/(x*log(b/a))a<=x<=b
param_distribution={
"hidden_layers":[1,2,3,4],
"layer_size":np.arange(1,100),
"learning_rate":reciprocal(1e-4,1e-2),
}
#3.执行超搜索参数
#cross_validation:训练集分成n份,n-1训练,最后一份验证.
random_search_cv=RandomizedSearchCV(sk_learn_model,param_distribution,
n_iter=10,
cv=3,
n_jobs=1)
random_search_cv.fit(x_train_scaled,y_train,epochs=100,
validation_data=(x_valid_scaled,y_valid),
callbacks=callbacks)
#4.显示超参数
print(random_search_cv.best_params_)
print(random_search_cv.best_score_)
print(random_search_cv.best_estimator_)
model=random_search_cv.best_estimator_.model
print(model.evaluate(x_test_scaled,y_test))
#5.打印模型训练过程
plot_learning_curves(history)
if__name__=='__main__':
main()
以上就是本文的全部内容,希望对大家的学习有所帮助,也希望大家多多支持毛票票。
声明:本文内容来源于网络,版权归原作者所有,内容由互联网用户自发贡献自行上传,本网站不拥有所有权,未作人工编辑处理,也不承担相关法律责任。如果您发现有涉嫌版权的内容,欢迎发送邮件至:czq8825#qq.com(发邮件时,请将#更换为@)进行举报,并提供相关证据,一经查实,本站将立刻删除涉嫌侵权内容。