커피 한잔 사주세요😄
*메모:
BCEWithLogitsLoss()는 아래와 같이 0개 이상의 요소로 구성된 0D 이상의 D 텐서에서 BCE Loss 및 Sigmoid로 계산된 0개 이상의 값(float)으로 구성된 0D 이상의 D 텐서를 가져올 수 있습니다.
*메모:
import torch from torch import nn tensor1 = torch.tensor([ 8., -3., 0., 1., 5., -2.]) tensor2 = torch.tensor([-3., 7., 4., -2., -9., 6.]) # -w*(p*y*log(1/(1+exp(-x))+(1-y)*log(1-(1/1+exp(-x)))) # -1*(1*(-3)*log(1/(1+exp(-8)))+(1-(-3))*log(1-(1/(1+exp(-8))))) # ↓↓↓↓↓↓↓ # 32.0003 + 21.0486 + 0.6931 + 3.3133 + 50.0067 + 50.0067 = 82.8423 # 119.1890 / 6 = 19.8648 bcelogits = nn.BCEWithLogitsLoss() bcelogits(input=tensor1, target=tensor2) # tensor(19.8648) bcelogits # BCEWithLogitsLoss() print(bcelogits.weight) # None bcelogits.reduction # 'mean' bcelogits = nn.BCEWithLogitsLoss(weight=None, reduction='mean', pos_weight=None) bcelogits(input=tensor1, target=tensor2) # tensor(19.8648) bcelogits = nn.BCEWithLogitsLoss(reduction='sum') bcelogits(input=tensor1, target=tensor2) # tensor(119.1890) bcelogits = nn.BCEWithLogitsLoss(reduction='none') bcelogits(input=tensor1, target=tensor2) # tensor([32.0003, 21.0486, 0.6931, 3.3133, 50.0067, 12.1269]) bcelogits = nn.BCEWithLogitsLoss(weight=torch.tensor([0., 1., 2., 3., 4., 5.])) bcelogits(input=tensor1, target=tensor2) # tensor(48.8394) bcelogits = nn.BCEWithLogitsLoss( pos_weight=torch.tensor([0., 1., 2., 3., 4., 5.]) ) bcelogits(input=tensor1, target=tensor2) # tensor(28.5957) bcelogits = nn.BCEWithLogitsLoss(weight=torch.tensor(0.)) bcelogits(input=tensor1, target=tensor2) # tensor(0.) bcelogits = nn.BCEWithLogitsLoss(pos_weight=torch.tensor(0.)) bcelogits(input=tensor1, target=tensor2) # tensor(13.8338) bcelogits = nn.BCEWithLogitsLoss(weight=torch.tensor([0, 1, 2, 3, 4, 5])) bcelogits(input=tensor1, target=tensor2) # tensor(48.8394) bcelogits = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([0, 1, 2, 3, 4, 5])) bcelogits(input=tensor1, target=tensor2) # tensor(28.5957) bcelogits = nn.BCEWithLogitsLoss(weight=torch.tensor(0)) bcelogits(input=tensor1, target=tensor2) # tensor(0.) bcelogits = nn.BCEWithLogitsLoss(pos_weight=torch.tensor(0)) bcelogits(input=tensor1, target=tensor2) # tensor(13.8338) bcelogits = nn.BCEWithLogitsLoss( weight=torch.tensor([True, False, True, False, True, False]) ) bcelogits(input=tensor1, target=tensor2) # tensor(13.7834) bcelogits = nn.BCEWithLogitsLoss(weight=torch.tensor([False])) bcelogits(input=tensor1, target=tensor2) # tensor(0.) tensor1 = torch.tensor([[8., -3., 0.], [1., 5., -2.]]) tensor2 = torch.tensor([[-3., 7., 4.], [-2., -9., 6.]]) bcelogits = nn.BCEWithLogitsLoss() bcelogits(input=tensor1, target=tensor2) # tensor(19.8648) tensor1 = torch.tensor([[[8.], [-3.], [0.]], [[1.], [5.], [-2.]]]) tensor2 = torch.tensor([[[-3.], [7.], [4.]], [[-2.], [-9.], [6.]]]) bcelogits = nn.BCEWithLogitsLoss() bcelogits(input=tensor1, target=tensor2) # tensor(19.8648) tensor1 = torch.tensor([]) tensor2 = torch.tensor([]) bcelogits = nn.BCEWithLogitsLoss(reduction='mean') bcelogits(input=tensor1, target=tensor2) # tensor(nan) bcelogits = nn.BCEWithLogitsLoss(reduction='sum') bcelogits(input=tensor1, target=tensor2) # tensor(0.)
위 내용은 PyTorch의 BCEWithLogits손실의 상세 내용입니다. 자세한 내용은 PHP 중국어 웹사이트의 기타 관련 기사를 참조하세요!