国产探花免费观看_亚洲丰满少妇自慰呻吟_97日韩有码在线_资源在线日韩欧美_一区二区精品毛片,辰东完美世界有声小说,欢乐颂第一季,yy玄幻小说排行榜完本

首頁 > 開發 > Python > 正文

OpenCV python sklearn隨機超參數搜索的實現

2024-09-09 19:03:38
字體:
來源:轉載
供稿:網友

本文介紹了OpenCV python sklearn隨機超參數搜索的實現,分享給大家,具體如下:

"""房價預測數據集 使用sklearn執行超參數搜索"""import matplotlib as mplimport matplotlib.pyplot as pltimport numpy as npimport sklearnimport pandas as pdimport osimport sysimport tensorflow as tffrom tensorflow_core.python.keras.api._v2 import keras # 不能使用 pythonfrom sklearn.preprocessing import StandardScalerfrom sklearn.datasets import fetch_california_housingfrom sklearn.model_selection import train_test_split, RandomizedSearchCVfrom scipy.stats import reciprocalos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'assert tf.__version__.startswith('2.')# 0.打印導入模塊的版本print(tf.__version__)print(sys.version_info)for module in mpl, np, sklearn, pd, tf, keras:  print("%s version:%s" % (module.__name__, module.__version__))# 顯示學習曲線def plot_learning_curves(his):  pd.DataFrame(his.history).plot(figsize=(8, 5))  plt.grid(True)  plt.gca().set_ylim(0, 1)  plt.show()# 1.加載數據集 california 房價housing = fetch_california_housing()print(housing.DESCR)print(housing.data.shape)print(housing.target.shape)# 2.拆分數據集 訓練集 驗證集 測試集x_train_all, x_test, y_train_all, y_test = train_test_split(  housing.data, housing.target, random_state=7)x_train, x_valid, y_train, y_valid = train_test_split(  x_train_all, y_train_all, random_state=11)print(x_train.shape, y_train.shape)print(x_valid.shape, y_valid.shape)print(x_test.shape, y_test.shape)# 3.數據集歸一化scaler = StandardScaler()x_train_scaled = scaler.fit_transform(x_train)x_valid_scaled = scaler.fit_transform(x_valid)x_test_scaled = scaler.fit_transform(x_test)# 創建keras模型def build_model(hidden_layers=1, # 中間層的參數        layer_size=30,        learning_rate=3e-3):  # 創建網絡層  model = keras.models.Sequential()  model.add(keras.layers.Dense(layer_size, activation="relu",                 input_shape=x_train.shape[1:])) # 隱藏層設置  for _ in range(hidden_layers - 1):    model.add(keras.layers.Dense(layer_size,                   activation="relu"))  model.add(keras.layers.Dense(1))  # 優化器學習率  optimizer = keras.optimizers.SGD(lr=learning_rate)  model.compile(loss="mse", optimizer=optimizer)  return modeldef main():  # RandomizedSearchCV  # 1.轉化為sklearn的model  sk_learn_model = keras.wrappers.scikit_learn.KerasRegressor(build_model)  callbacks = [keras.callbacks.EarlyStopping(patience=5, min_delta=1e-2)]  history = sk_learn_model.fit(x_train_scaled, y_train, epochs=100,                 validation_data=(x_valid_scaled, y_valid),                 callbacks=callbacks)  # 2.定義超參數集合  # f(x) = 1/(x*log(b/a)) a <= x <= b  param_distribution = {    "hidden_layers": [1, 2, 3, 4],    "layer_size": np.arange(1, 100),    "learning_rate": reciprocal(1e-4, 1e-2),  }  # 3.執行超搜索參數  # cross_validation:訓練集分成n份, n-1訓練, 最后一份驗證.  random_search_cv = RandomizedSearchCV(sk_learn_model, param_distribution,                     n_iter=10,                     cv=3,                     n_jobs=1)  random_search_cv.fit(x_train_scaled, y_train, epochs=100,             validation_data=(x_valid_scaled, y_valid),             callbacks=callbacks)  # 4.顯示超參數  print(random_search_cv.best_params_)  print(random_search_cv.best_score_)  print(random_search_cv.best_estimator_)  model = random_search_cv.best_estimator_.model  print(model.evaluate(x_test_scaled, y_test))  # 5.打印模型訓練過程  plot_learning_curves(history)if __name__ == '__main__':  main()
發表評論 共有條評論
用戶名: 密碼:
驗證碼: 匿名發表
主站蜘蛛池模板: 合川市| 修文县| 宁津县| 绥宁县| 裕民县| 东乡族自治县| 宁远县| 鹿邑县| 子长县| 平果县| 固镇县| 金阳县| 鄂尔多斯市| 永宁县| 浦江县| 胶南市| 彭山县| 义马市| 上思县| 怀来县| 锦州市| 江西省| 枞阳县| 财经| 洛川县| 兴国县| 台南县| 海安县| 阿拉善盟| 酒泉市| 禹城市| 崇左市| 隆子县| 大同县| 枞阳县| 嘉荫县| 宜春市| 泽库县| 青阳县| 秦安县| 即墨市|