pytorch實現focal loss的兩種方式

2021-09-12 05:41:27 字數 4594 閱讀 3727

import torch

import torch.nn.functional as f

import numpy as np

from torch.autograd import variable

'''pytorch實現focal loss的兩種方式(現在討論的是基於分割任務)

在計算損失函式的過程中考慮到類別不平衡的問題,假設加上背景類別共有6個類別

'''def compute_class_weights(histogram):

classweights = np.ones(6, dtype=np.float32)

normhist = histogram / np.sum(histogram)

for i in range(6):

classweights[i] = 1 / (np.log(1.10 + normhist[i]))

return classweights

def focal_loss_my(input,target):

''':param input: shape [batch_size,num_classes,h,w] 僅僅經過卷積操作後的輸出,並沒有經過任何啟用函式的作用

:param target: shape [batch_size,h,w]

:return:

'''n, c, h, w = input.size()

target = target.long()

input = input.transpose(1, 2).transpose(2, 3).contiguous().view(-1, c)

target = target.contiguous().view(-1)

number_0 = torch.sum(target == 0).item()

number_1 = torch.sum(target == 1).item()

number_2 = torch.sum(target == 2).item()

number_3 = torch.sum(target == 3).item()

number_4 = torch.sum(target == 4).item()

number_5 = torch.sum(target == 5).item()

frequency = torch.tensor((number_0, number_1, number_2, number_3, number_4, number_5), dtype=torch.float32)

frequency = frequency.numpy()

classweights = compute_class_weights(frequency)

'''根據當前給出的ground truth label計算出每個類別所佔據的權重

'''# weights=torch.from_numpy(classweights).float().cuda()

weights = torch.from_numpy(classweights).float()

focal_frequency = f.nll_loss(f.softmax(input, dim=1), target, reduction='none')

'''上面一篇博文講過

f.nll_loss(torch.log(f.softmax(inputs, dim=1),target)的函式功能與f.cross_entropy相同

可見f.nll_loss中實現了對於target的one-hot encoding編碼功能,將其編碼成與input shape相同的tensor

然後與前面那一項(即f.nll_loss輸入的第一項)進行 element-wise production

相當於取出了 log(p_gt)即當前樣本點被分類為正確類別的概率

現在去掉取log的操作,相當於 focal_frequency shape [num_samples]

即取出ground truth類別的概率數值,並取了負號

'''focal_frequency += 1.0#shape [num_samples] 1-p(gt_classes)

focal_frequency = torch.pow(focal_frequency, 2) # torch.size([75])

focal_frequency = focal_frequency.repeat(c, 1)

'''進行repeat操作後,focal_frequency shape [num_classes,num_samples]

'''focal_frequency = focal_frequency.transpose(1, 0)

loss = f.nll_loss(focal_frequency * (torch.log(f.softmax(input, dim=1))), target, weight=none,

reduction='elementwise_mean')

return loss

def focal_loss_zhihu(input, target):

''':param input: 使用知乎上面大神給出的方案

:param target:

:return:

'''n, c, h, w = input.size()

target = target.long()

inputs = input.transpose(1, 2).transpose(2, 3).contiguous().view(-1, c)

target = target.contiguous().view(-1)

n = inputs.size(0)

c = inputs.size(1)

number_0 = torch.sum(target == 0).item()

number_1 = torch.sum(target == 1).item()

number_2 = torch.sum(target == 2).item()

number_3 = torch.sum(target == 3).item()

number_4 = torch.sum(target == 4).item()

number_5 = torch.sum(target == 5).item()

frequency = torch.tensor((number_0, number_1, number_2, number_3, number_4, number_5), dtype=torch.float32)

frequency = frequency.numpy()

classweights = compute_class_weights(frequency)

weights = torch.from_numpy(classweights).float()

weights=weights[target.view(-1)]#這行**非常重要

gamma = 2

p = f.softmax(inputs, dim=1)#shape [num_samples,num_classes]

class_mask = inputs.data.new(n, c).fill_(0)

class_mask = variable(class_mask)

ids = target.view(-1, 1)

class_mask.scatter_(1, ids.data, 1.)#shape [num_samples,num_classes] one-hot encoding

probs = (p * class_mask).sum(1).view(-1, 1)#shape [num_samples,]

log_p = probs.log()

print('in calculating batch_loss',weights.shape,probs.shape,log_p.shape)

# batch_loss = -weights * (torch.pow((1 - probs), gamma)) * log_p

batch_loss = -(torch.pow((1 - probs), gamma)) * log_p

print(batch_loss.shape)

loss = batch_loss.mean()

return loss

if __name__=='__main__':

pred=torch.rand((2,6,5,5))

y=torch.from_numpy(np.random.randint(0,6,(2,5,5)))

loss1=focal_loss_my(pred,y)

loss2=focal_loss_zhihu(pred,y)

print('loss1',loss1)

print('loss2', loss2)

'''in calculating batch_loss torch.size([50]) torch.size([50, 1]) torch.size([50, 1])

torch.size([50, 1])

loss1 tensor(1.3166)

loss2 tensor(1.3166)

'''

Pytorch 通過pytorch實現線性回歸

linear regression 線性回歸是分析乙個變數與另外乙個 多個 變數之間關係的方法 因變數 y 自變數 x 關係 線性 y wx b 分析 求解w,b 求解步驟 1.確定模型 2.選擇損失函式 3.求解梯度並更新w,b 此題 1.model y wx b 下為 實現 import tor...

pytorch實現分類

完整 實現分類 import torch import torch.nn.functional as f from torch.autograd import variable import matplotlib.pyplot as plt import torch.optim as optim 生...

pytorch 實現多層網路

import torch import torch.nn as nn import torch.nn.init as init import torch.nn.functional as f import math 讀取資料集 pima indians diabetes資料集 import pand...