深度学习-->线性回归模型

o-Sakurajimamai-o / 2023-07-28 / 原文

# 线性回归

# 创建数据集
from mxnet import ndarray as nd
from mxnet import autograd as ad

num_input = 2
num_examples = 1000

true_w = [2, -3.4]
true_b = 4.2

x = nd.random_normal(shape=(num_examples, num_input))
y = true_w[0] * x[:, 0] + true_w[1] * x[:, 1] + true_b
y += .01 * nd.random_normal(shape=y.shape)
print(x[0:10], y[0:10])
# 数据读取
import random

batch_size = 10


def data_iter():
    idx = list(range(num_examples))  # 随机打乱样例
    random.shuffle(idx)  # 打乱
    for i in range(0, num_examples, batch_size):
        j = nd.array(idx[i:min(i + batch_size, num_examples)])
        yield nd.take(x, j), nd.take(y, j)  # 返回这两个数,对应下面的data,label,每次在在十个里面拿一个样例和一个结果


for data, label in data_iter():
    print(data, label)
# 查看要取多少次
n = 0
for data, label in data_iter():
    n = n + 1
print(n)  # 要取100次,因为有1000个样例,每次取10个

# 初始化模型参数
w = nd.random_normal(shape=(num_input, 1))
b = nd.zeros((1,))
params = [w, b]
# 之后需要不断地更新w,b,更新模型
for param in params:
    param.attach_grad()


# 定义模型
def net(x):
    return nd.dot(x, w) + b


print(net(data))


# 损失函数,用来衡量预测值与真实值的差距
# 一般使用平方误差来衡量
def square_loss(yhat, y):
    return (yhat - y.reshape(yhat.shape)) ** 2  # 加入reshape的作用是防止yhat是行,而y是列这种错误,用reshape进行广播纠错改正


# 优化,sgd函数使用随机梯度下降的算法,最终的目标就是让loss尽可能地小
def sgd(params, lr):
    for param in params:
        param[:] = param - lr * param.grad


# 训练模型
epochs = 5  # 表示为我要对数据扫5遍
learning_rate = 0.001
for e in range(epochs):
    total_loss = 0
    for data, label in data_iter():
        with ad.record():
            output = net(data)
            loss = square_loss(output, label)

        loss.backward()
        sgd(params, learning_rate)  # 优化误差
        total_loss += nd.sum(loss).asscalar()  # 求和,asscalar的作用是让最终值成为浮点数

    print("Epoch %d,average loss: %f" % (e, total_loss / num_examples))#打印出来每一步的平均误差

print(true_w, w)#打印出最终训练之后的w和b与真实的w和b的差距
print(true_b, b)

 更为简便的(使用gluon函数):

from mxnet import ndarray as nd
from mxnet import autograd
from mxnet import gluon

num_input = 2
num_examples = 1000

true_w = [2, -3.4]
true_b = 4.2

x = nd.random_normal(shape=(num_examples, num_input))
y = true_w[0] * x[:, 0] + true_w[1] * x[:, 1] + true_b
y += 0.01 * nd.random_normal(shape=y.shape)

# 数据读取

batch_size = 10
dataset = gluon.data.ArrayDataset(x, y)
data_iter = gluon.data.DataLoader(dataset, batch_size, shuffle=True)

for data, label in data_iter:
    print(data, label)
    break

# 定义模型
net = gluon.nn.Sequential()  # Sequential是一个容器,具有很多层,一开始定义为空
net.add(gluon.nn.Dense(1))
print(f"net:", net)

# 初始化模型
net.initialize()
# 损失函数
square_loss = gluon.loss.L2Loss()
# 优化
trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.1})
# 训练
epochs = 5
for e in range(epochs):
    total_loss = 0
    for data, label in data_iter:
        with autograd.record():
            output = net(data)
            loss = square_loss(output, label)
        loss.backward()
        trainer.step(batch_size)
        total_loss += nd.sum(loss).asscalar()
    print("Epoch %d,average loss: %f" % (e, total_loss / num_examples))

dense = net[0]
print(true_w, dense.weight.data())
print(true_b, dense.bias.data())

 图片识别算法 :

 

# 多类->线性回归
from mxnet import gluon
from mxnet import ndarray as nd
import matplotlib.pyplot as plt


def transform(data, label):
    return data.astype('float32') / 255, label.astype('float32')


mnist_train = gluon.data.vision.FashionMNIST(train=True, transform=transform)
mnist_test = gluon.data.vision.FashionMNIST(train=False, transform=transform)

data, label = mnist_train[0]
print("example shape:", data.shape, 'label', label)  # 3d,长宽高


# 打印图片
def show_images(images):
    n = images.shape[0]
    _, figs = plt.subplots(1, n, figsize=(15, 15))
    for i in range(n):
        figs[i].imshow(images[i].reshape((28, 28)).asnumpy())
        figs[i].axes.get_xaxis().set_visible(False)
        figs[i].axes.get_yaxis().set_visible(False)
    plt.show()


def get_text_label(label):
    text_labels = ['t-shirt', 'trouser', 'pullover', 'dress', 'coat',
                   'sandal', 'shirt', 'sneaker', 'bag', 'ankle boot']
    return [text_labels[int(i)] for i in label]


data, label = mnist_train[0:9]
show_images(data)
print(get_text_label(label))

# 数据读取
batch_size = 256
train_data = gluon.data.DataLoader(mnist_train, batch_size, shuffle=True)
test_data = gluon.data.DataLoader(mnist_test, batch_size, shuffle=False)

# 初始化模型
# 跟线性回归中的例子一样,我们将使用向量表示每个样本。已知每个样本输入是高和宽均为28像素的图像。模型的输入向量的长度是28×28=784:
# 该向量的每个元素对应图像中每个像素。由于图像有10个类别,单层神经网络输出层的输出个数为10.
# 因此softmax回归的权重和偏差参数分别为784×10和1×10的矩阵。
num_input = 784
num_output = 10

w = nd.random_normal(shape=(num_input, num_output))
b = nd.random_normal(shape=num_output)

params = [w, b]
for param in params:
    param.attach_grad()
# 定义模型

from mxnet import nd


# 实现softmax运算
# 我们先描述一下对如何对多维NDArray按维度操作。
# 给定一个NDArray矩阵X。我们可以只对其中同一列(axis=0)或同一行(axis=1)的元素求和,并在结果中保留行和列这两个维度(keepdims=True)。
# 矩阵X的行数是样本数,列数是输出个数。为了表达样本预测各个输出的概率,softmax运算会先通过exp函数对每个元素做指数运算,
# 再对exp矩阵同行元素求和,最后令矩阵每行各元素与该行元素之和相除。这样一来,最终得到的矩阵每行元素和为1且非负。
# 因此,该矩阵每行都是合法的概率分布。softmax运算的输出矩阵中的任意一行元素代表了一个样本在各个输出类别上的预测概率。
def softmax(x):  # 计算概率,首先概率需要为正,且加起来等于1
    exp = nd.exp(x)  # 绝对值函数
    partition = exp.sum(axis=1, keepdims=True)  # 自动使他们转化为合是1
    return exp / partition


x = nd.random_normal(shape=(2, 5))
x_prob = softmax(x)

print(x)
print(x_prob)
print(x_prob.sum(axis=1))


def net(x):
    return softmax(nd.dot(x.reshape((-1, num_input)), w) + b)  # 把数据输入归一,然后利用y=wx+b


# 优化损失
def cross_entropy(yhat, y):
    return -nd.pick(nd.log(yhat), y)


# 计算精度
def accuracy(output, label):
    return nd.mean(output.argmax(axis=1) == label).asscalar()  # 将概率最高的拿出来,然后与数据对比查看是否正确


# argmax是取最大的,mean对矩阵求均值


def evaluate_accuracy(data_iterator, net):
    acc = 0.
    for data, label in data_iterator:
        output = net(data)  # 取出输出值
        acc += accuracy(output, label)  # 求和
    return acc / len(data_iterator)  # 求平均


evaluate_accuracy(test_data, net)
print(evaluate_accuracy(test_data, net))

# 训练模型

import sys

sys.path.append('..')
from mxnet import autograd


# 优化函数
def sgd(params, lr):
    for param in params:
        param[:] = param - lr * param.grad


learning_rate = .1

for epoch in range(10):
    train_acc = 0.
    train_loss = 0.
    for data, label in train_data:
        with autograd.record():
            output = net(data)
            loss = cross_entropy(output, label)
        loss.backward()
        sgd(params, learning_rate / batch_size)
        train_loss += nd.mean(loss).asscalar()
        train_acc += accuracy(output, label)
    test_acc = evaluate_accuracy(test_data, net)
    print("Epoch %d. Loss: %f, Train acc %f, Test acc %f" % (epoch, train_loss / len(train_data), train_acc / len(train_data), test_acc / len(train_data)))

# 最后进行图片识别

data, label = mnist_test[0:9]
show_images(data)
print('true labels')
print(get_text_label(label))

predicted_labels = net(data).argmax(axis=1)
print('predicted labels')
print(get_text_label(predicted_labels.asnumpy()))

 

# 多层感知机

# 获取数据
import d2lzh
from mxnet import autograd

batch_size = 256
train_data, test_data = d2lzh.load_data_fashion_mnist(batch_size)

# 读入数据
from mxnet import ndarray as nd

num_input = 28 * 28
num_output = 10

num_hidden = 256  # 中间多加了一层
weight_scale = .01

w1 = nd.random_normal(shape=(num_input, num_hidden), scale=weight_scale)
b1 = nd.zeros(num_hidden)

w2 = nd.random_normal(shape=(num_hidden, num_output), scale=weight_scale)
b2 = nd.zeros(num_output)

params = [w1, b1, w2, b2]

for param in params:
    param.attach_grad()


# 激活函数

def relu(x):  # 如果x大于0就返回x,否则返回0
    return nd.maximum(x, 0)


# 定义模型

def net(x):
    x = x.reshape((-1, num_input))
    h1 = relu(nd.dot(x, w1) + b1)
    output = nd.dot(h1, w2) + b2
    return output


# softmax 与 损失熵函数

from mxnet import gluon

softmax_cross_entropy = gluon.loss.SoftmaxCrossEntropyLoss()


def accuracy(y_hat, y):
    return (y_hat.argmax(axis=1) == y.astype('float32')).mean().asscalar()


# 训练

learning_rate = .5

for epoch in range(5):
    train_loss = 0.
    train_acc = 0.
    for data, label in train_data:
        with autograd.record():
            output = net(data)
            loss = softmax_cross_entropy(output, label)

        loss.backward()
        d2lzh.sgd(params, learning_rate, batch_size)

        train_loss += nd.mean(loss).asscalar()
        train_acc += accuracy(output, label)

    test_acc = d2lzh.evaluate_accuracy(test_data, net)
    print("Epoch %d. Loss: %f, Train acc %f, Test acc %f" % (
        epoch, train_loss / len(train_data), train_acc / len(train_data), test_acc / len(train_data)))