DEV Community

Super Kai (Kazuya Ito)
Super Kai (Kazuya Ito)

Posted on • Edited on

add in PyTorch

Buy Me a Coffee

*Memos:

add() can do addition with two of the 0D or more D tensors of zero or more elements or scalars or the 0D or more D tensor of zero or more elements and a scalar as shown below:

*Memos:

  • add() can be used with torch or a tensor.
  • The 1st argument(input) with torch(Type:tensor or scalar of int, float, complex or bool) or using a tensor(Type:tensor of int, float, complex or bool)(Required).
  • The 2nd argument with torch or the 1st argument with a tensor is other(Required-Type:tensor or scalar of int, float, complex or bool).
  • The 3rd argument with torch or the 2nd argument with a tensor is alpha(Optional-Default:1-Type:tensor or scalar of int, float, complex or bool). *otheris multiplied by alpha(input or a tensor+(otherxalpha)).
  • There is out argument with torch(Optional-Default:None-Type:tensor): *Memos:
    • out= must be used.
    • My post explains out argument.
import torch

tensor1 = torch.tensor([9, 7, 6])
tensor2 = torch.tensor([[4, -4, 3], [-2, 5, -5]])

torch.add(input=tensor1, other=tensor2)
tensor1.add(other=tensor2)
torch.add(input=tensor1, other=tensor2, alpha=1)
torch.add(input=tensor1, other=tensor2, alpha=torch.tensor(1))
# tensor([[13, 3, 9], [7, 12, 1]])

torch.add(input=tensor1, other=tensor2, alpha=0)
torch.add(input=tensor1, other=tensor2, alpha=torch.tensor(0))
# tensor([[9, 7, 6], [9, 7, 6]])

torch.add(input=tensor1, other=tensor2, alpha=2)
torch.add(input=tensor1, other=tensor2, alpha=torch.tensor(2))
# tensor([[17, -1, 12], [5, 17, -4]])

torch.add(input=tensor1, other=tensor2, alpha=-1)
torch.add(input=tensor1, other=tensor2, alpha=torch.tensor(-1))
# tensor([[5, 11, 3], [11, 2, 11]])

torch.add(input=tensor1, other=tensor2, alpha=-2)
torch.add(input=tensor1, other=tensor2, alpha=torch.tensor(-2))
# tensor([[1, 15, 0], [13, -3, 16]])

torch.add(input=9, other=tensor2)
torch.add(input=9, other=tensor2, alpha=1)
torch.add(input=9, other=tensor2, alpha=torch.tensor(1))
# tensor([[13, 5, 12], [7, 14, 4]])

torch.add(input=tensor1, other=4)
torch.add(input=tensor1, other=4, alpha=1)
torch.add(input=tensor1, other=4, alpha=torch.tensor(1))
# tensor([13, 11, 10])

torch.add(input=9, other=4)
torch.add(input=9, other=4, alpha=1)
torch.add(input=9, other=4, alpha=torch.tensor(1))
# tensor(13)

tensor1 = torch.tensor([9., 7., 6.])
tensor2 = torch.tensor([[4., -4., 3.], [-2., 5., -5.]])

torch.add(input=tensor1, other=tensor2)
torch.add(input=tensor1, other=tensor2, alpha=1.)
torch.add(input=tensor1, other=tensor2, alpha=torch.tensor(1.))
# tensor([[13., 3., 9.], [7., 12., 1.]])

torch.add(input=9., other=tensor2)
torch.add(input=9., other=tensor2, alpha=1.)
torch.add(input=9., other=tensor2, alpha=torch.tensor(1.))
# tensor([[13., 5., 12.], [7., 14., 4.]])

torch.add(input=tensor1, other=4.)
torch.add(input=tensor1, other=4., alpha=1.)
torch.add(input=tensor1, other=4., alpha=torch.tensor(1.))
# tensor([13., 11., 10.])

torch.add(input=9., other=4.)
torch.add(input=9., other=4., alpha=1.)
torch.add(input=9., other=4., alpha=torch.tensor(1.))
# tensor(13.)

tensor1 = torch.tensor([9.+0.j, 7.+0.j, 6.+0.j])
tensor2 = torch.tensor([[4.+0.j, -4.+0.j, 3.+0.j],
                        [-2.+0.j, 5.+0.j, -5.+0.j]])
torch.add(input=tensor1, other=tensor2)
torch.add(input=tensor1, other=tensor2, alpha=1.+0.j)
torch.add(input=tensor1, other=tensor2, alpha=torch.tensor(1.+0.j))
# tensor([[13.+0.j, 3.+0.j, 9.+0.j],
#         [7.+0.j, 12.+0.j, 1.+0.j]])

torch.add(input=9.+0.j, other=tensor2)
torch.add(input=9.+0.j, other=tensor2, alpha=1.+0.j)
torch.add(input=9.+0.j, other=tensor2, alpha=torch.tensor(1.+0.j))
# tensor([[13.+0.j, 5.+0.j, 12.+0.j],
#         [7.+0.j, 14.+0.j, 4.+0.j]])

torch.add(input=tensor1, other=4.+0.j)
torch.add(input=tensor1, other=4.+0.j, alpha=1.+0.j)
torch.add(input=tensor1, other=4.+0.j, alpha=torch.tensor(1.+0.j))
# tensor([13.+0.j, 11.+0.j, 10.+0.j])

torch.add(input=9.+0.j, other=4.+0.j)
torch.add(input=9.+0.j, other=4.+0.j, alpha=1.+0.j)
torch.add(input=9.+0.j, other=4.+0.j, alpha=torch.tensor(1.+0.j))
# tensor(13.+0.j)

tensor1 = torch.tensor([True, False, True])
tensor2 = torch.tensor([[False, True, False], [True, False, True]])

torch.add(input=tensor1, other=tensor2)
torch.add(input=tensor1, other=tensor2, alpha=True)
torch.add(input=tensor1, other=tensor2, alpha=torch.tensor(True))
# tensor([[True, True, True], [True, False, True]])

torch.add(input=True, other=tensor2)
torch.add(input=True, other=tensor2, alpha=True)
torch.add(input=True, other=tensor2, alpha=torch.tensor(True))
# tensor([[True, True, True], [True, True, True]])

torch.add(input=tensor1, other=False)
torch.add(input=tensor1, other=False, alpha=True)
torch.add(input=tensor1, other=False, alpha=torch.tensor(True))
# tensor([True, False, True])

torch.add(input=True, other=False)
torch.add(input=True, other=False, alpha=True)
torch.add(input=True, other=False, alpha=torch.tensor(True))
# tensor(True)
Enter fullscreen mode Exit fullscreen mode

Image of Datadog

The Future of AI, LLMs, and Observability on Google Cloud

Datadog sat down with Google’s Director of AI to discuss the current and future states of AI, ML, and LLMs on Google Cloud. Discover 7 key insights for technical leaders, covering everything from upskilling teams to observability best practices

Learn More

Top comments (0)

Sentry image

See why 4M developers consider Sentry, “not bad.”

Fixing code doesn’t have to be the worst part of your day. Learn how Sentry can help.

Learn more