Adagrad
Adagrad optimizer
Follows Duchi et al - Adaptive Subgradient Methods for Online Learning and Stochastic Optimization
- 
                  
                  
Undocumented
Declaration
Swift
public typealias ParamTensor = Tensor<Layer.Parameter, Layer.Device> - 
                  
                  
Declaration
Swift
public private(set) var model: Layer { get } - 
                  
                  
Learning rate scaling factor
Declaration
Swift
public var learningRate: ParamTensor - 
                  
                  
Normalization scalar added to divisors
Declaration
Swift
public var epsilon: ParamTensor - 
                  
                  
Adagrad optimizer
Follows Duchi et al - Adaptive Subgradient Methods for Online Learning and Stochastic Optimization
Declaration
Swift
public init(model: Layer, learningRate: ParamTensor, epsilon: ParamTensor = 1e-8)Parameters
modelModel to optimize
learningRateLearning rate scaling factor
epsilonNormalization scalar added to divisors
 - 
                  
                  
Resets the state of the optimizer
Declaration
Swift
public mutating func reset() - 
                  
                  
Declaration
Swift
public mutating func update(along gradients: [ParamTensor]) 
- 
                  
                  
Declaration
Swift
public init(from decoder: Decoder) throws - 
                  
                  
Declaration
Swift
public func encode(to encoder: Encoder) throws 
            View on GitHub
          
      Adagrad Structure Reference