AttributeError: ‘NoneType‘ object has no attribute ‘data‘

这篇具有很好参考价值的文章主要介绍了AttributeError: ‘NoneType‘ object has no attribute ‘data‘。希望对大家有所帮助。如果存在错误或未考虑完全的地方,请大家不吝赐教,您也可以点击"举报违法"按钮提交疑问。

在深度学习训练网络过程中,我们常遇到如下的问题:属性错误(其中非类型的对象没有属性'data'),解决的方法主要是查看网络构造是否出现问题。

废话不多说,实践出真知。举个轻量级神经网络训练的例子,源代码包含三部分:网络构造、数据预处理加载以及网络训练。(使用的训练数据为ide可直接下载数据,需要的码友可以直接复现)

  1. 网络构造

import torch
import torch.nn as nn

use_cuda = torch.cuda.is_available()


class dw_conv(nn.Module):
    # 深度卷积
    def __init__(self, in_channels, out_channels, stride):
        super(dw_conv, self).__init__()
        self.dw_conv_3 = nn.Conv2d(
            in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=stride, groups=in_channels, bias=False
        )
        self.bn = nn.BatchNorm2d(out_channels)
        self.relu = nn.ReLU(inplace=True)

    def forward(self, x):
        out = self.dw_conv_3(x)
        out = self.bn(out)
        out = self.relu(out)


class point_conv(nn.Module):
    def __init__(self, in_channels, out_channels):
        # 点卷积
        super(point_conv, self).__init__()
        self.conv_1x1 = nn.Conv2d(
            in_channels=in_channels, out_channels=out_channels, kernel_size=1
        )
        self.bn = nn.BatchNorm2d(out_channels)
        self.relu = nn.ReLU(True)

    def forward(self, x):
        out = self.conv_1x1(x)
        out = self.bn(out)
        out = self.relu(out)
        return out


class My_Mobilenet(nn.Module):
    def __init__(self, num_classes):
        super(My_Mobilenet, self).__init__()
        self.num_classes = num_classes
        # if large_img:
        #     self.features = nn.Sequential(
        #         nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, stride=2),
        #         # nn.BatchNorm2d(32),
        #         nn.ReLU(inplace=True),
        #         dw_conv(32, 32, 1),
        #         point_conv(32, 64),
        #         dw_conv(64, 64, 2),
        #         point_conv(64, 128),
        #         dw_conv(128, 128, 1),
        #         point_conv(128, 128),
        #         dw_conv(128, 128, 2),
        #         point_conv(128, 256),
        #         dw_conv(256, 256, 1),
        #         point_conv(256, 256),
        #         dw_conv(256, 256, 2),
        #         point_conv(256, 512),
        #
        #         dw_conv(512, 512, 1),
        #         point_conv(512, 512),
        #         dw_conv(512, 512, 1),
        #         point_conv(512, 512),
        #         dw_conv(512, 512, 1),
        #         point_conv(512, 512),
        #         dw_conv(512, 512, 1),
        #         point_conv(512, 512),
        #         dw_conv(512, 512, 1),
        #         point_conv(512, 512),
        #
        #         dw_conv(512, 512, 2),
        #         point_conv(512, 1024),
        #         dw_conv(1024, 1024, 2),
        #         point_conv(1024, 1024),
        #         nn.AvgPool2d(7),
        #         )
        # else:
        self.features = nn.Sequential(
            nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, stride=1),
            nn.BatchNorm2d(32),
            nn.ReLU(inplace=True),
            dw_conv(32, 32, 1),
            point_conv(32, 64),
            dw_conv(64, 64, 1),
            point_conv(64, 128),
            dw_conv(128, 128, 1),
            point_conv(128, 128),
            dw_conv(128, 128, 1),
            point_conv(128, 256),
            dw_conv(256, 256, 1),
            point_conv(256, 256),
            dw_conv(256, 256, 1),
            point_conv(256, 512),

            dw_conv(512, 512, 1),
            point_conv(512, 512),
            dw_conv(512, 512, 1),
            point_conv(512, 512),
            dw_conv(512, 512, 1),
            point_conv(512, 512),
            dw_conv(512, 512, 1),
            point_conv(512, 512),
            dw_conv(512, 512, 1),
            point_conv(512, 512),

            dw_conv(512, 512, 1),
            point_conv(512, 1024),
            dw_conv(1024, 1024, 1),
            point_conv(1024, 1024),
            nn.AvgPool2d(4),
        )
        self.fc = nn.Linear(1024, self.num_classes)

    def forward(self, x):
        out = self.features(x)
        out = out.view(-1, 1024)
        out = self.fc(out)

def mobilenet(num_classes):
    """
Model has been designed to work on either ImageNet or CIFAR-10
    :param num_classes:1000 for ImageNet, 10 for CIFAR-10
    :param large_img:True for ImageNet, False for CIFAR-10
    :param kwargs:
    :return:model
    """
    model = My_Mobilenet(num_classes)
    if use_cuda:
        model = model.cuda()
    return model


# from torchsummary import summary
# model = mobilenet(10, False)
# print(summary(model, (-1, 224, 224, -1)))
  1. 数据预处理加载

import numpy as np
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim

from torch.optim.lr_scheduler import StepLR
from torchvision import datasets, transforms
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
from sklearn.metrics import accuracy_score
from mobilenet_v1 import mobilenet

#  数据预处理 transforms
#  数据加载 datasets

#  transforms the dataset
train_transform = transforms.Compose([
    transforms.RandomCrop(32, padding=4),  # size大小可设置为 (32, 32)
    transforms.RandomHorizontalFlip(),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])

valid_transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])

test_transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])

#  load the dataset
valid_size = 0.1
train_dataset = datasets.CIFAR10(root='cifar10', train=True, download=False, transform=train_transform)
# 当根目录没有'cifar10'数据集时,train=True可以直接从网上下载该数据集并保留在预先设置的根目录中
valid_dataset = datasets.CIFAR10(root='cifar10', train=True, download=False, transform=valid_transform)

# 进行随机采样,根据需求可有可无
num_train = len(train_dataset)
# print(num_train)
indices = list(range(num_train))
split = int(valid_size * num_train)
np.random.shuffle(indices)
train_idx, valid_idx = indices[split:], indices[:split]
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)

train_loader = DataLoader(dataset=train_dataset, batch_size=16, sampler=train_sampler)
valid_loader = DataLoader(dataset=valid_dataset, batch_size=16, sampler=valid_sampler)

test_dataset = datasets.CIFAR10(root='cifar10', train=False, download=False, transform=valid_transform)
test_loader = DataLoader(dataset=test_dataset, batch_size=16, shuffle=False)


# print('len(train_loader):{}\tlen(valid_loader):{}\tlen(test_loader):{}'.format(
#     len(train_loader), len(valid_loader), len(test_loader)))
#
# print(train_loader.dataset)
  1. 网络训练

"""
模型评估从以下三个部分来设计:损失函数设计、模型训练、模型验证测试
"""
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim

from torch.optim.lr_scheduler import StepLR
from torchvision import datasets, transforms
from torch.autograd import Variable
from torch.utils.data.sampler import SubsetRandomSampler
from sklearn.metrics import accuracy_score
from mobilenet_v1 import mobilenet
from data_process_load import train_loader, valid_loader, test_loader
# import cifar10


use_cuda = torch.cuda.is_available()

model = mobilenet(num_classes=10)  # 先使用cifar10这个小数据集
optimizer = optim.Adam(model.parameters(), lr=0.01)
scheduler = StepLR(optimizer=optimizer, step_size=10, gamma=0.5)
criterion = nn.CrossEntropyLoss()


#  定义训练函数
def train(epoch):
    model.train()  # 表明所有的参数都在训练,需要更新,而不再固定
    for batch_idx, (datasets, target) in enumerate(train_loader):
        if use_cuda:
            datasets, target = datasets.cuda(), target.cuda()
        # data, target = Variable(data), Variable(target)
        optimizer.zero_grad()
        output = model(datasets)
        correct = 0
        # print(output)
        pred = output.data.max(1, keepdim=True)[1]
        # print(target.data.view_as(pred))
        # print(target, target.data)
        correct += pred.eq(target.data.view_as(pred)).sum()  # .eq()统计相同的个数
        loss = criterion(output, target)
        loss.backward()
        accuracy = 100.0 * correct / len(output)
        optimizer.step()
        if batch_idx % 10 == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss:{:.6f}, Accuracy:{:.2f}'.format(
                epoch, batch_idx * len(datasets), len(train_loader.dataset), 100.0 * batch_idx / len(train_loader), loss.item(), accuracy
            ))
    scheduler.step()


#   定义验证函数
def validate(epoch):
    model.eval()
    valid_loss = 0
    correct = 0
    for data, target in valid_loader:
        if use_cuda:
            data, target = data.cuda(), target.cuda()
        # data, target = Variable(data), Variable(target)
        output = model(data)
        valid_loss += F.cross_entropy(output, target, size_average=False).item()
        pred = output.data.max(1, keepdim=True)[1]
        correct +=pred.eq(target.data.view_as(pred)).sum()  # 正确类别分类个数

    valid_loss /= len(valid_loader)
    accuracy = 100. * correct / len(valid_loader)
    print('\nValidation set: Average loss: {:.4f}, Accuracy: {}/{}(:.2f)%\n'.format(
        valid_loss, correct, len(valid_loader), 100.0 * correct / len(valid_loader)
    ))
    return valid_loss, accuracy


def test(epoch):
    model.eval()
    test_loss = 0
    correct = 0
    for data, target in test_loader:
        if use_cuda:
            data, target = data.cuda(), target.cuda()
        # data, target = Variable(data, validate(True)), Variable(target)
        output = model(data)
        test_loss += F.cross_entropy(output, target, size_average=False).item()
        pred = output.data.max(1, keepdim=True)[1]
        correct += pred.eq(target.data.view_as(pred)).cpu().sum()

    test_loss += len(test_loader)
    print('\nTest set: Average loss:{.4f}, Accuracy:{}/{}(:.2f)%\n'.format(
        test_loss, correct, len(test_loader), 100.0 * correct / len(test_loader)
    ))


for epoch in range(50):
    train(epoch)
    loss, accuracy = validate(epoch)

将会出现一下报错情况。

AttributeError: ‘NoneType‘ object has no attribute ‘data‘

通过debug发现是网络中定义的类函数forward没有返回值再下方添加return out即可解决问题。文章来源地址https://www.toymoban.com/news/detail-510375.html

到了这里,关于AttributeError: ‘NoneType‘ object has no attribute ‘data‘的文章就介绍完了。如果您还想了解更多内容,请在右上角搜索TOY模板网以前的文章或继续浏览下面的相关文章,希望大家以后多多支持TOY模板网!

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处: 如若内容造成侵权/违法违规/事实不符,请点击违法举报进行投诉反馈,一经查实,立即删除!

领支付宝红包 赞助服务器费用

相关文章

觉得文章有用就打赏一下文章作者

支付宝扫一扫打赏

博客赞助

微信扫一扫打赏

请作者喝杯咖啡吧~博客赞助

支付宝扫一扫领取红包,优惠每天领

二维码1

领取红包

二维码2

领红包