首页 > 解决方案 > 具有梯度下降的 Python 多项式回归

问题描述

我尝试使用梯度下降实现多项式回归。我想适应以下功能:

在此处输入图像描述

我使用的代码是:

import numpy as np
import matplotlib.pyplot as plt
import scipy.linalg
from sklearn.preprocessing import PolynomialFeatures
np.random.seed(seed=42)


def create_data():
    x = PolynomialFeatures(degree=5).fit_transform(np.linspace(-10,10,100).reshape(100,-1))
    l = lambda x_i: (1/3)*x_i**3-2*x_i**2+2*x_i+2
    data = l(x[:,1])
    noise = np.random.normal(0,0.1,size=np.shape(data))
    y = data+noise
    y= y.reshape(100,1)
    return {'x':x,'y':y}

def plot_function(x,y):
    fig = plt.figure(figsize=(10,10))
    plt.plot(x[:,1],[(1/3)*x_i**3-2*x_i**2+2*x_i+2 for x_i in x[:,1]],c='lightgreen',linewidth=3,zorder=0)
    plt.scatter(x[:,1],y)
    plt.show()


def w_update(y,x,batch,w_old,eta):
    derivative = np.sum([(y[i]-np.dot(w_old.T,x[i,:]))*x[i,:] for i in range(np.shape(x)[0])])
    print(derivative)
    return w_old+eta*(1/batch)*derivative



# initialize variables
w = np.random.normal(size=(6,1))
data = create_data()
x = data['x']
y = data['y']
plot_function(x,y)



# Update w
w_s = []
Error = []
for i in range(500):
    error = (1/2)*np.sum([(y[i]-np.dot(w.T,x[i,:]))**2 for i in range(len(x))])
    Error.append(error)
    w_prime = w_update(y,x,np.shape(x)[0],w,0.001)
    w = w_prime
    w_s.append(w)
# Plot the predicted function
plt.plot(x[:,1],np.dot(x,w))
plt.show()

# Plot the error
fig3 = plt.figure()
plt.scatter(range(len(Error[10:])),Error[10:])
plt.show()

但结果我收到了smth。奇怪,这完全超出了界限......我也尝试过改变迭代次数以及参数 theta 但它没有帮助。我想我在更新 w 时犯了一个错误。 在此处输入图像描述

标签: pythonmachine-learninglinear-regression

解决方案


我找到了解决方案。问题确实在我计算权重的部分。具体在:

np.sum([(y[d]-np.dot(w_old.T,x[d,:]))*x[d,:] for d in range(np.shape(x)[0])])

应该是这样的:

np.sum([-(y[d]-np.dot(w.T.copy(),x[d,:]))*x[d,:].reshape(np.shape(w)) for d in range(len(x))],axis=0)

我们必须添加 np.sum(axis=0) 才能得到我们想要的维度——>维度必须等于 w。numpy sum文档 sais

默认值,axis=None,将对输入数组的所有元素求和。

这不是我们想要达到的。在我们数组的第一个轴上添加轴 = 0 总和,该轴的维数为 (100,7,1),因此将 100 个维数 (7,1) 的元素相加,得到的数组的维数为 (7,1)正是我们想要的。实现这一点并清理代码会产生:

import numpy as np
import matplotlib.pyplot as plt
import scipy.linalg
from sklearn.preprocessing import PolynomialFeatures
from sklearn.preprocessing import MinMaxScaler
np.random.seed(seed=42)


def create_data():
    x = PolynomialFeatures(degree=6).fit_transform(np.linspace(-2,2,100).reshape(100,-1))
    x[:,1:] = MinMaxScaler(feature_range=(-2,2),copy=False).fit_transform(x[:,1:])
    l = lambda x_i: np.cos(0.8*np.pi*x_i)
    data = l(x[:,1])
    noise = np.random.normal(0,0.1,size=np.shape(data))
    y = data+noise
    y= y.reshape(100,1)
    # Normalize Data
    return {'x':x,'y':y}

def plot_function(x,y,w,Error,w_s):
    fig,ax = plt.subplots(nrows=1,ncols=2,figsize=(40,10))
    ax[0].plot(x[:,1],[np.cos(0.8*np.pi*x_i) for x_i in x[:,1]],c='lightgreen',linewidth=3,zorder=0)
    ax[0].scatter(x[:,1],y)
    ax[0].plot(x[:,1],np.dot(x,w))
    ax[0].set_title('Function')
    ax[1].scatter(range(iterations),Error)
    ax[1].set_title('Error')



    plt.show()



# initialize variables
data = create_data()
x = data['x']
y = data['y']
w = np.random.normal(size=(np.shape(x)[1],1))
eta = 0.1
iterations = 10000
batch = 10



def stochastic_gradient_descent(x,y,w,eta):
    derivative = -(y-np.dot(w.T,x))*x.reshape(np.shape(w))
    return eta*derivative


def batch_gradient_descent(x,y,w,eta):
    derivative = np.sum([-(y[d]-np.dot(w.T.copy(),x[d,:]))*x[d,:].reshape(np.shape(w)) for d in range(len(x))],axis=0)
    return eta*(1/len(x))*derivative


def mini_batch_gradient_descent(x,y,w,eta,batch):
    gradient_sum = np.zeros(shape=np.shape(w))
    for b in range(batch):
        choice = np.random.choice(list(range(len(x))))
        gradient_sum += -(y[choice]-np.dot(w.T,x[choice,:]))*x[choice,:].reshape(np.shape(w))
        return eta*(1/batch)*gradient_sum

# Update w
w_s = []
Error = []
for i in range(iterations):
    # Calculate error
    error = (1/2)*np.sum([(y[i]-np.dot(w.T,x[i,:]))**2 for i in range(len(x))])
    Error.append(error)
    # Stochastic Gradient Descent
    """
    for d in range(len(x)):
        w-= stochastic_gradient_descent(x[d,:],y[d],w,eta)
        w_s.append(w.copy())
    """
    # Minibatch Gradient Descent
    """
    w-= mini_batch_gradient_descent(x,y,w,eta,batch)
    """

    # Batch Gradient Descent

    w -= batch_gradient_descent(x,y,w,eta)




# Show predicted weights
print(w_s)

# Plot the predicted function and the Error
plot_function(x,y,w,Error,w_s)

结果我们收到: 在此处输入图像描述

这肯定可以通过改变 eta 和迭代次数以及切换到随机或小批量梯度下降或更复杂的优化算法来改进。


推荐阅读