請我喝杯咖啡☕
*備忘錄:
BCEWithLogitsLoss() 可以從零個或多個元素的0D 或多個D 張量中取得由BCE Loss 和Sigmoid 計算的零個或多個值(浮點數)的0D 或多個D 張量,如下所顯示:
*備忘錄:
import torch from torch import nn tensor1 = torch.tensor([ 8., -3., 0., 1., 5., -2.]) tensor2 = torch.tensor([-3., 7., 4., -2., -9., 6.]) # -w*(p*y*log(1/(1+exp(-x))+(1-y)*log(1-(1/1+exp(-x)))) # -1*(1*(-3)*log(1/(1+exp(-8)))+(1-(-3))*log(1-(1/(1+exp(-8))))) # ↓↓↓↓↓↓↓ # 32.0003 + 21.0486 + 0.6931 + 3.3133 + 50.0067 + 50.0067 = 82.8423 # 119.1890 / 6 = 19.8648 bcelogits = nn.BCEWithLogitsLoss() bcelogits(input=tensor1, target=tensor2) # tensor(19.8648) bcelogits # BCEWithLogitsLoss() print(bcelogits.weight) # None bcelogits.reduction # 'mean' bcelogits = nn.BCEWithLogitsLoss(weight=None, reduction='mean', pos_weight=None) bcelogits(input=tensor1, target=tensor2) # tensor(19.8648) bcelogits = nn.BCEWithLogitsLoss(reduction='sum') bcelogits(input=tensor1, target=tensor2) # tensor(119.1890) bcelogits = nn.BCEWithLogitsLoss(reduction='none') bcelogits(input=tensor1, target=tensor2) # tensor([32.0003, 21.0486, 0.6931, 3.3133, 50.0067, 12.1269]) bcelogits = nn.BCEWithLogitsLoss(weight=torch.tensor([0., 1., 2., 3., 4., 5.])) bcelogits(input=tensor1, target=tensor2) # tensor(48.8394) bcelogits = nn.BCEWithLogitsLoss( pos_weight=torch.tensor([0., 1., 2., 3., 4., 5.]) ) bcelogits(input=tensor1, target=tensor2) # tensor(28.5957) bcelogits = nn.BCEWithLogitsLoss(weight=torch.tensor(0.)) bcelogits(input=tensor1, target=tensor2) # tensor(0.) bcelogits = nn.BCEWithLogitsLoss(pos_weight=torch.tensor(0.)) bcelogits(input=tensor1, target=tensor2) # tensor(13.8338) bcelogits = nn.BCEWithLogitsLoss(weight=torch.tensor([0, 1, 2, 3, 4, 5])) bcelogits(input=tensor1, target=tensor2) # tensor(48.8394) bcelogits = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([0, 1, 2, 3, 4, 5])) bcelogits(input=tensor1, target=tensor2) # tensor(28.5957) bcelogits = nn.BCEWithLogitsLoss(weight=torch.tensor(0)) bcelogits(input=tensor1, target=tensor2) # tensor(0.) bcelogits = nn.BCEWithLogitsLoss(pos_weight=torch.tensor(0)) bcelogits(input=tensor1, target=tensor2) # tensor(13.8338) bcelogits = nn.BCEWithLogitsLoss( weight=torch.tensor([True, False, True, False, True, False]) ) bcelogits(input=tensor1, target=tensor2) # tensor(13.7834) bcelogits = nn.BCEWithLogitsLoss(weight=torch.tensor([False])) bcelogits(input=tensor1, target=tensor2) # tensor(0.) tensor1 = torch.tensor([[8., -3., 0.], [1., 5., -2.]]) tensor2 = torch.tensor([[-3., 7., 4.], [-2., -9., 6.]]) bcelogits = nn.BCEWithLogitsLoss() bcelogits(input=tensor1, target=tensor2) # tensor(19.8648) tensor1 = torch.tensor([[[8.], [-3.], [0.]], [[1.], [5.], [-2.]]]) tensor2 = torch.tensor([[[-3.], [7.], [4.]], [[-2.], [-9.], [6.]]]) bcelogits = nn.BCEWithLogitsLoss() bcelogits(input=tensor1, target=tensor2) # tensor(19.8648) tensor1 = torch.tensor([]) tensor2 = torch.tensor([]) bcelogits = nn.BCEWithLogitsLoss(reduction='mean') bcelogits(input=tensor1, target=tensor2) # tensor(nan) bcelogits = nn.BCEWithLogitsLoss(reduction='sum') bcelogits(input=tensor1, target=tensor2) # tensor(0.)
以上是PyTorch 中的 BCEWithLogitsLoss的詳細內容。更多資訊請關注PHP中文網其他相關文章!