分割方向API
https://github.com/qubvel/segmentation_models.pytorch
模型方面不需要写,只需要调用就可以了
以之前的 unet脑肿瘤分割完整代码为例
https://blog.csdn.net/qq_45845375/article/details/135588237
train2.py
import torch as t
import torch.nn as nn
from tqdm import tqdm #进度条
import segmentation_models_pytorch as smp
from dataset import *
device = t.device("cuda") if t.cuda.is_available() else t.device("cpu")
train_data=BrainMRIdataset(train_img,train_label,train_transformer)
test_data=BrainMRIdataset(test_img,test_label,test_transformer)
dl_train=DataLoader(train_data,batch_size=4,shuffle=True)
dl_test=DataLoader(test_data,batch_size=4,shuffle=True)
model = smp.Unet(
encoder_name="resnet34", # choose encoder, e.g. mobilenet_v2 or efficientnet-b7
encoder_weights="imagenet", # use `imagenet` pre-trained weights for encoder initialization
in_channels=3, # model input channels (1 for gray-scale images, 3 for RGB, etc.)
classes=2, # model output channels (number of classes in your dataset)
)
img,label=next(iter(dl_train))
model=model.to('cuda')
img=img.to('cuda')
pred=model(img)
label=label.to('cuda')
loss_fn=nn.CrossEntropyLoss()#交叉熵损失函数
loss_fn(pred,label)
optimizer=torch.optim.Adam(model.parameters(),lr=0.0001)
def train_epoch(epoch, model, trainloader, testloader):
correct = 0
total = 0
running_loss = 0
epoch_iou = [] #交并比
net=model.train()
for x, y in tqdm(testloader):
x, y = x.to('cuda'), y.to('cuda')
y_pred = model(x)
loss = loss_fn(y_pred, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
with torch.no_grad():
y_pred = torch.argmax(y_pred, dim=1)
correct += (y_pred == y).sum().item()
total += y.size(0)
running_loss += loss.item()
intersection = torch.logical_and(y, y_pred)
union = torch.logical_or(y, y_pred)
batch_iou = torch.sum(intersection) / torch.sum(union)
epoch_iou.append(batch_iou.item())
epoch_loss = running_loss / len(trainloader.dataset)
epoch_acc = correct / (total * 256 * 256)
test_correct = 0
test_total = 0
test_running_loss = 0
epoch_test_iou = []
t.save(net.state_dict(), './Results2/weights/unet_weight/{}.pth'.format(epoch))
model.eval()
with torch.no_grad():
for x, y in tqdm(testloader):
x, y = x.to('cuda'), y.to('cuda')
y_pred = model(x)
loss = loss_fn(y_pred, y)
y_pred = torch.argmax(y_pred, dim=1)
test_correct += (y_pred == y).sum().item()
test_total += y.size(0)
test_running_loss += loss.item()
intersection = torch.logical_and(y, y_pred)#预测值和真实值之间的交集
union = torch.logical_or(y, y_pred)#预测值和真实值之间的并集
batch_iou = torch.sum(intersection) / torch.sum(union)
epoch_test_iou.append(batch_iou.item())
epoch_test_loss = test_running_loss / len(testloader.dataset)
epoch_test_acc = test_correct / (test_total * 256 * 256)#预测正确的值除以总共的像素点
print('epoch: ', epoch,
'loss: ', round(epoch_loss, 3),
'accuracy:', round(epoch_acc, 3),
'IOU:', round(np.mean(epoch_iou), 3),
'test_loss: ', round(epoch_test_loss, 3),
'test_accuracy:', round(epoch_test_acc, 3),
'test_iou:', round(np.mean(epoch_test_iou), 3)
)
return epoch_loss, epoch_acc, epoch_test_loss, epoch_test_acc
if __name__ == "__main__":
epochs=5
for epoch in range(epochs):
train_epoch(epoch,
model,
dl_train,
dl_test)
test2.py文章来源:https://www.toymoban.com/news/detail-821597.html
import torch as t
import torch.nn as nn
import segmentation_models_pytorch as smp
from dataset import *
import matplotlib.pyplot as plt
device = t.device("cuda") if t.cuda.is_available() else t.device("cpu")
train_data=BrainMRIdataset(train_img,train_label,train_transformer)
test_data=BrainMRIdataset(test_img,test_label,test_transformer)
dl_train=DataLoader(train_data,batch_size=4,shuffle=True)
dl_test=DataLoader(test_data,batch_size=4,shuffle=True)
model = smp.Unet(
encoder_name="resnet34", # choose encoder, e.g. mobilenet_v2 or efficientnet-b7
encoder_weights="imagenet", # use `imagenet` pre-trained weights for encoder initialization
in_channels=3, # model input channels (1 for gray-scale images, 3 for RGB, etc.)
classes=2, # model output channels (number of classes in your dataset)
)
img,label=next(iter(dl_train))
model=model.to('cuda')
img=img.to('cuda')
pred=model(img)
label=label.to('cuda')
loss_fn=nn.CrossEntropyLoss()
loss_fn(pred,label)
optimizer=torch.optim.Adam(model.parameters(),lr=0.0001)
def test():
image, mask = next(iter(dl_test))
image=image.to('cuda')
net = model.eval()
net.to(device)
net.load_state_dict(t.load("./Results2/weights/unet_weight/4.pth"))
pred_mask = model(image)
pred_mask=pred_mask
mask=torch.squeeze(mask)
pred_mask=pred_mask.cpu()
num=4
plt.figure(figsize=(10, 10))
for i in range(num):
plt.subplot(num, 4, i*num+1)
plt.imshow(image[i].permute(1,2,0).cpu().numpy())
plt.subplot(num, 4, i*num+2)
plt.imshow(mask[i].cpu().numpy(),cmap='gray')#标签
plt.subplot(num, 4, i*num+3)
plt.imshow(torch.argmax(pred_mask[i].permute(1,2,0), axis=-1).detach().numpy(),cmap='gray')#预测
plt.show()
if __name__ == "__main__":
test()
文章来源地址https://www.toymoban.com/news/detail-821597.html
到了这里,关于分割方向API的文章就介绍完了。如果您还想了解更多内容,请在右上角搜索TOY模板网以前的文章或继续浏览下面的相关文章,希望大家以后多多支持TOY模板网!