首页 > 解决方案 > Scikit learn 的 GaussianProcessRegressor 用于 2D 输入(整数变量)和 1D 输出,给出 lbfgs 无法收敛

问题描述

我正在尝试使用 scikit learn 的 GaussianProcessRegressor 构建 gpr 模型。我的输入是 X 中的两个参数和 y 中的一个输出。由于我在 X 中的两个参数是整数变量,因此我的采样具有舍入函数。我有多个 y,但是这个例子只用一个表示。MWE如下,

import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from gpflow.utilities import print_summary
import autograd.numpy as anp
from pymoo.operators.sampling.latin_hypercube_sampling import LatinHypercubeSampling
import random
matplotlib.rcParams['figure.figsize'] = (12, 8)

from mpl_toolkits.mplot3d import Axes3D
from itertools import product
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm

from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn import preprocessing
from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C

U = [120.367741, 109.150247,  63.437321, 124.011068, 150.951319,
       119.642251, 128.394923,  83.953329, 129.253303,  44.344121]
Cost = [4666.562171, 9777.233392, 3371.281041, 3930.101425, 8859.018802,
       3739.25197 , 3279.530241, 5908.360626, 7019.216883, 4825.159301]

dPt  = [ 26.255972, 132.865377,  16.481485,  14.087791, 113.172855,
        18.707665,   7.803764,  58.79273 ,  66.628132,  48.24053 ]

dPa = [1257.549166,  987.326252,  417.604213, 1423.536753, 1820.50891 ,
       1300.117073, 1921.781696,  667.033892, 1999.196279,   68.579325]

massHX = [507.425268, 407.752448, 413.189147, 447.621576, 333.450891,
       385.099534, 318.953026, 471.180606, 440.372643, 369.695553]

#generated arrays
ntr, np_a, nr, nfins = [], [], [], []
ntr_bounds = [31,41]
nr_bounds  = [22,28]
np_bounds = [2,6]
nfins_bounds = [200,500]
population_size = 10
number_of_variables = 4
number_of_objectives = 5
xsample = LatinHypercubeSampling(smooth=False, iterations=40)
xpymoo = xsample._sample(population_size, number_of_variables)


for i in xpymoo[:,0]:
    ntr = np.append(ntr, round(i*(np.max(ntr_bounds) - np.min(ntr_bounds))+np.min(ntr_bounds)))

for i in xpymoo[:,1]:
    nr = np.append(nr, round(i*(np.max(nr_bounds) - np.min(nr_bounds))+np.min(nr_bounds)))
    
for i in xpymoo[:,2]:
    np_a = np.append(np_a, round(i*(np.max(np_bounds) - np.min(np_bounds))+np.min(np_bounds)))

for i in xpymoo[:,3]:
    nfins = np.append(nfins, round(i*(np.max(nfins_bounds)-np.min(nfins_bounds))+np.min(nfins_bounds)))

xarray = anp.column_stack([ntr,nr,np_a, nfins])#, np_a])

xarray1 = anp.column_stack([ntr, nr])
xarray2 = anp.column_stack([nr,np_a])
xarray3 = anp.column_stack([ntr, np_a]) 
xarray4 = anp.column_stack([ntr, nfins])
xarray5 = anp.column_stack([nr, nfins])
xarray6 = anp.column_stack([np_a, nfins])

xpossible = [xarray1, xarray2]#, xarray3, xarray4, xarray5, xarray6]
Farray = anp.column_stack([U])#,Cost,dPt,dPa,massHX])

for i in range(len(xpossible)):
    for j in range(len(Farray[0,:])):
        gridpoints=100
        fitxy = []
        x = np.linspace(xpossible[i][:,0].min(), xpossible[i][:,0].max(),gridpoints) #p
        y = np.linspace(xpossible[i][:,1].min(), xpossible[i][:,1].max(),gridpoints) #p
        xso = (np.array([x, y])).T

        kernel = RBF([12,12], (1e-2, 1e2))
        gp = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=1000)

        fitxy = gp.fit(xpossible[i], Farray[:,j])
        xy = np.array(list(product(x, y)))
        
        y_pred, MSE = gp.predict(xy, return_std=True)
        print("GPML kernel is: %s" % gp.kernel)
        print("GPML NLML is: %s" % gp.log_marginal_likelihood_value_)
        X0p, X1p = xy[:,0].reshape(gridpoints, gridpoints), xy[:,1].reshape(gridpoints, gridpoints)
        Zp = np.reshape(y_pred,(gridpoints, gridpoints))
        
        fig = plt.figure(figsize=(12,8), tight_layout=True)
        ax = fig.add_subplot(111, projection='3d')            
        
        surf = ax.plot_surface(X0p, X1p, Zp, 
                               rstride=1, cstride=1, 
                               cmap='jet', linewidth=0, 
                               antialiased=False, label='RSM')
        cset = ax.contour(X0p, X1p, Zp, zdir='z', offset=-1, cmap=cm.rainbow_r )
        plt.show()

对应的输出如下,

GPML kernel is: RBF(length_scale=[12, 12])
GPML NLML is: -47642.34687128109
C:\Users\aus1n19\Anaconda3\lib\site-packages\sklearn\gaussian_process\_gpr.py:506: ConvergenceWarning: lbfgs failed to converge (status=2):
ABNORMAL_TERMINATION_IN_LNSRCH.

Increase the number of iterations (max_iter) or scale the data as shown in:
    https://scikit-learn.org/stable/modules/preprocessing.html
  _check_optimize_result("lbfgs", opt_res)
GPML kernel is: RBF(length_scale=[12, 12])
GPML NLML is: -28412645460642.207

现在,我尝试在this1this2的帮助下运行它,它们都没有在 0,1 之间对其进行标准化,我认为这可能是不正确的情节和如此高的 NLML 的可能原因。关于如何让 lbfgs 在这里汇聚有什么建议吗?或者我可能遗漏了什么?

标签: pythonmachine-learningscikit-learngaussian-process

解决方案


推荐阅读