Skip to content

Optim

Classes:

  • Lion

    PyTorch implementation of the Lion optimizer from https://github.com/google/automl/blob/master/lion/lion_pytorch.py

Lion

Bases: Optimizer

PyTorch implementation of the Lion optimizer from https://github.com/google/automl/blob/master/lion/lion_pytorch.py

Methods:

  • __init__

    Initialize the hyperparameters.

  • step

    Performs a single optimization step.

Source code in tapeagents/finetune/optim.py
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
class Lion(Optimizer):
    r"""PyTorch implementation of the Lion optimizer from https://github.com/google/automl/blob/master/lion/lion_pytorch.py"""

    def __init__(self, params, lr=1e-4, betas=(0.9, 0.99), weight_decay=0.0):
        """Initialize the hyperparameters.

        Args:
          params (iterable): iterable of parameters to optimize or dicts defining
            parameter groups
          lr (float, optional): learning rate (default: 1e-4)
          betas (Tuple[float, float], optional): coefficients used for computing
            running averages of gradient and its square (default: (0.9, 0.99))
          weight_decay (float, optional): weight decay coefficient (default: 0)
        """

        if not 0.0 <= lr:
            raise ValueError("Invalid learning rate: {}".format(lr))
        if not 0.0 <= betas[0] < 1.0:
            raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
        if not 0.0 <= betas[1] < 1.0:
            raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
        defaults = dict(lr=lr, betas=betas, weight_decay=weight_decay)
        super().__init__(params, defaults)

    @torch.no_grad()
    def step(self, closure=None):
        """Performs a single optimization step.

        Args:
          closure (callable, optional): A closure that reevaluates the model
            and returns the loss.

        Returns:
          (tensor): the loss.
        """
        loss = None
        if closure is not None:
            with torch.enable_grad():
                loss = closure()

        for group in self.param_groups:
            for p in group["params"]:
                if p.grad is None:
                    continue

                # Perform stepweight decay
                p.data.mul_(1 - group["lr"] * group["weight_decay"])

                grad = p.grad
                state = self.state[p]
                # State initialization
                if len(state) == 0:
                    # Exponential moving average of gradient values
                    state["exp_avg"] = torch.zeros_like(p)

                exp_avg = state["exp_avg"]
                beta1, beta2 = group["betas"]

                # Weight update
                update = exp_avg * beta1 + grad * (1 - beta1)
                p.add_(torch.sign(update), alpha=-group["lr"])
                # Decay the momentum running average coefficient
                exp_avg.mul_(beta2).add_(grad, alpha=1 - beta2)

        return loss

__init__(params, lr=0.0001, betas=(0.9, 0.99), weight_decay=0.0)

Initialize the hyperparameters.

Parameters:

  • params (iterable) –

    iterable of parameters to optimize or dicts defining parameter groups

  • lr (float, default: 0.0001 ) –

    learning rate (default: 1e-4)

  • betas (Tuple[float, float], default: (0.9, 0.99) ) –

    coefficients used for computing running averages of gradient and its square (default: (0.9, 0.99))

  • weight_decay (float, default: 0.0 ) –

    weight decay coefficient (default: 0)

Source code in tapeagents/finetune/optim.py
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
def __init__(self, params, lr=1e-4, betas=(0.9, 0.99), weight_decay=0.0):
    """Initialize the hyperparameters.

    Args:
      params (iterable): iterable of parameters to optimize or dicts defining
        parameter groups
      lr (float, optional): learning rate (default: 1e-4)
      betas (Tuple[float, float], optional): coefficients used for computing
        running averages of gradient and its square (default: (0.9, 0.99))
      weight_decay (float, optional): weight decay coefficient (default: 0)
    """

    if not 0.0 <= lr:
        raise ValueError("Invalid learning rate: {}".format(lr))
    if not 0.0 <= betas[0] < 1.0:
        raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
    if not 0.0 <= betas[1] < 1.0:
        raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
    defaults = dict(lr=lr, betas=betas, weight_decay=weight_decay)
    super().__init__(params, defaults)

step(closure=None)

Performs a single optimization step.

Parameters:

  • closure (callable, default: None ) –

    A closure that reevaluates the model and returns the loss.

Returns:

  • tensor

    the loss.

Source code in tapeagents/finetune/optim.py
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
@torch.no_grad()
def step(self, closure=None):
    """Performs a single optimization step.

    Args:
      closure (callable, optional): A closure that reevaluates the model
        and returns the loss.

    Returns:
      (tensor): the loss.
    """
    loss = None
    if closure is not None:
        with torch.enable_grad():
            loss = closure()

    for group in self.param_groups:
        for p in group["params"]:
            if p.grad is None:
                continue

            # Perform stepweight decay
            p.data.mul_(1 - group["lr"] * group["weight_decay"])

            grad = p.grad
            state = self.state[p]
            # State initialization
            if len(state) == 0:
                # Exponential moving average of gradient values
                state["exp_avg"] = torch.zeros_like(p)

            exp_avg = state["exp_avg"]
            beta1, beta2 = group["betas"]

            # Weight update
            update = exp_avg * beta1 + grad * (1 - beta1)
            p.add_(torch.sign(update), alpha=-group["lr"])
            # Decay the momentum running average coefficient
            exp_avg.mul_(beta2).add_(grad, alpha=1 - beta2)

    return loss