模型选择 欠拟合与过拟合

o-Sakurajimamai-o / 2023-07-29 / 原文

# 模型选择 欠拟合与过拟合

# 创建数据集
from mxnet import autograd
from mxnet import ndarray as nd
from mxnet import gluon

num_train = 100
num_tset = 100

true_w = [1.2, -3.4, 5.6]
true_b = 5.0

# 生成数据集

X = nd.random_normal(shape=(num_train + num_tset, 1))
x = nd.concat(X, nd.power(X, 2), nd.power(X, 3))
y = true_w[0] * x[:, 0] + true_w[1] * x[:, 1] + true_w[2] * x[:, 2] + true_b
y += .1 * nd.random_normal(shape=y.shape)
y_train, y_test = y[:num_train], y[num_train:]
print('X:', X[:5], 'x:', x[:5], 'y:', y[:5])

# 定义训练和测试步骤
import matplotlib as mpl

mpl.rcParams['figure.dpi'] = 120
import matplotlib.pyplot as plt


def square_loss(y_hat, y):
    return (y_hat - y.reshape(y_hat.shape)) ** 2 / 2


def test(net, x, y):
    return square_loss(net(x), y).mean().asscalar()


def train(x_train, x_test, y_train, y_test):
    # 定义模型
    net = gluon.nn.Sequential()
    with net.name_scope():
        net.add(gluon.nn.Dense(1))  # 线性回归模型,只有一个输出单元
    net.initialize()  # 初始化模型的参数

    # 定义训练参数
    learning_rate = 0.01
    epochs = 100
    batch_size = 10

    # 创建数据迭代器
    dataset_train = gluon.data.ArrayDataset(x_train, y_train)
    data_iter_train = gluon.data.DataLoader(dataset_train, batch_size, shuffle=True)

    # 定义优化器和损失函数
    trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': learning_rate})
    square_loss = gluon.loss.L2Loss()  # 使用均方误差损失函数

    train_loss = []  # 用于记录训练集上的损失值
    test_loss = []  # 用于记录测试集上的损失值

    # 开始训练模型
    for e in range(epochs):
        for data, label in data_iter_train:
            with autograd.record():
                output = net(data)  # 前向传播计算输出
                loss = square_loss(output, label)  # 计算损失
            loss.backward()  # 反向传播求梯度
            trainer.step(batch_size)  # 更新模型参数

        # 计算并记录每个epoch结束后的训练集和测试集上的损失值
        train_loss.append(square_loss(net(x_train), y_train).mean().asscalar())
        test_loss.append(square_loss(net(x_test), y_test).mean().asscalar())

    # 绘制训练集和测试集上的损失曲线
    plt.plot(train_loss)
    plt.plot(test_loss)
    plt.legend(['train', 'test'])
    plt.show()

    # 返回学到的权重和偏置
    return ('learned weight', net[0].weight.data(),
            'learned bias', net[0].bias.data())


# 三阶多项式拟合-正常

train(x[:num_train, :], x[num_train:, :], y[:num_train], y[num_train:])

# 欠拟合 -->数据不够

train(X[:num_train, :], X[num_train:, :], y[:num_train], y[num_train:])

# 过拟合 -->训练量不足

train(x[0:2, :], x[num_train:, :], y[0:2], y[num_train:])