from nbdev import export2html

This is an unfinished preview.

This is from my tutoring that I do. This is a preview for study groups.

from fastai2.vision.all import *
path=untar_data(URLs.MNIST_TINY)
Path('/home/fast/.fastai/data').ls()
(#42) [Path('/home/fast/.fastai/data/imagenette-160.tgz'),Path('/home/fast/.fastai/data/imagenette-160'),Path('/home/fast/.fastai/data/mnist.pkl.gz'),Path('/home/fast/.fastai/data/imagenette.tgz'),Path('/home/fast/.fastai/data/imagenette'),Path('/home/fast/.fastai/data/danbooru2018'),Path('/home/fast/.fastai/data/horse2zebra'),Path('/home/fast/.fastai/data/Selfie-dataset'),Path('/home/fast/.fastai/data/oxford-iiit-pet'),Path('/home/fast/.fastai/data/planet_tiny')...]
db=DataBlock((ImageBlock, CategoryBlock), get_items=get_image_files, splitter=GrandparentSplitter(),
                   get_y=parent_label,batch_tfms=aug_transforms(do_flip=False))
dls=db.dataloaders(path,bs=16)
dls.show_batch()

Optimizers

SGD

This is basic setup, we will do this every time we change the code to reset all the involved variables.

x,labels = dls.one_batch()
x,labels=x.cpu(),labels.cpu()
m = nn.Sequential(nn.Conv2d(3,32,7,3,3),nn.Flatten(),nn.Linear(3200,2)) #very basic pytorch model, just linear operation, no activation, so not a deep model
l = nn.CrossEntropyLoss()
lr=0.1
opt=SGD(m.parameters(),lr)
pred=m(x)
loss=l(pred,labels)
loss.backward()
class OurSGD:
    def __init__(self,params,lr):
        self.params,self.lr=params,lr
    def step(self):
        updated_params=[]
        for p in self.params:
            updated_params.append(p.add(-self.lr*p.grad)) #important part!!!
        return updated_params

The important part above is the p.add(-self.lr * p.grad) part. This is the essence of SGD. Notice that we are returning the updated parameters in a list. This is not done in the actual implementation, and instead everything is updated in place. Otherwise this is effectively the same as the Fastai source code.

our_sgd=OurSGD(m.parameters(),lr)
our_parameters=our_sgd.step()

Q1. Please try to update the array on the left, in order to get them to be equal to our_parameters. Pay attention to how sgd is implemented.

parameters_equal([p for p in m.parameters()],our_parameters)
False
False
False
False

Remember, running Fastai's optimizer will update the weights. So you will have to rerun the above to get the above problem.

opt.step()

Here we do a comparison to Fastai's implmentation, just to make sure we are getting the same values. We use all_close here, because later on there will start to be slight variations as more math (and therefore error ! ) is introduced.

def parameters_equal(mps,ops):
    for mp,op in zip((mps),ops):
        print(mp.allclose(op))
parameters_equal(m.parameters(),our_parameters)
True
True
True
True
SGD with Momentum
x,labels = dls.one_batch()
x,labels=x.cpu(),labels.cpu()
m = nn.Sequential(nn.Conv2d(3,32,7,3,3),nn.Flatten(),nn.Linear(3200,2))
l = nn.CrossEntropyLoss()
lr=0.1
mom=0.9
opt=SGD(m.parameters(),lr,mom)
pred=m(x)
loss=l(pred,torch.zeros([pred.size()[0]],dtype=torch.long))
loss.backward()
class OurSGDwithMomentum:
    def __init__(self,params,lr,mom):
        self.params,self.lr=list(params),lr
        self.mom=mom ##added
        self.avg_grad=[torch.zeros_like(p) for p in self.params] #notice how avg_Grad starts at 0. 
    def step(self):
        updated_params=[]
        for i,p in enumerate(self.params):
            updated_params.append(p.add(-self.lr*self.mom_grad(i,p.grad)))
        return updated_params
    #avg_grad is weighted average using momentum
    def mom_grad(self,i,grad):
        self.avg_grad[i]=self.mom*self.avg_grad[i]+grad #this is the important part
        return self.avg_grad[i]

Above we add momentum, it is important to realize that momentum is a weighted average and not the "mean." This weighted average is used a bit in machine learning, so it is good to get this concept down now. Momentum is one of the more important hyper parameters after learning rate and weight decay(coming up next).

our_sgd=OurSGDwithMomentum(m.parameters(),lr,mom)
our_parameters=our_sgd.step()

Q2, make this one work as in Q1. Pay attention to how momentum works in the code above.

parameters_equal([p-lr*(mom+p.grad) for p in m.parameters()],our_parameters)
False
False
False
False
opt.step()
parameters_equal(m.parameters(),our_parameters)
True
True
True
True

Now for step #2! We do a second step, as there is "state" within momentum, and we need to make sure that this state carries over to the second step.

Q3. Because of state changes, update the code here to get the correct answer below.

our_answer=[p-lr*(our_sgd.avg_grad[i]+p.grad) for i,p in enumerate(m.parameters())] #loops through avg_grad now
our_parameters=our_sgd.step()
parameters_equal(our_answer,our_parameters) #this is testing your solution.
False
False
False
False
opt.step()
parameters_equal(m.parameters(),our_parameters)
True
True
True
True
SGD with Weight Decay

For SGD Weight Decay and l2_Regularization are effectively the same. One on the weights, one on the gradients. This is not the same for more complicated optimizers.

x,labels = dls.one_batch()
x,labels=x.cpu(),labels.cpu()
m = nn.Sequential(nn.Conv2d(3,32,7,3,3),nn.Flatten(),nn.Linear(3200,2))
l = nn.CrossEntropyLoss()
lr=0.1
mom=0.9
wd=0.01
opt=SGD(m.parameters(),lr,mom,wd)
pred=m(x)
loss=l(pred,torch.zeros([pred.size()[0]],dtype=torch.long))
loss.backward()
class Momentum:
    def __init__(self,params,lr,mom):
        self.mom=mom
        self.params=params
        self.avg_grads=[torch.zeros_like(p) for p in self.params] #avg_grad is weighted average using momentum
    def __call__(self,**kwargs):
        self.avg_grads = [ self.mom*avg_grad+p.grad for p,avg_grad in zip(self.params,self.avg_grads) ]
        return {'avg_grads': self.avg_grads,**kwargs}
class Weight_Decay:
    def __init__(self,params,lr,wd):
        self.lr=lr
        self.wd=wd
        self.params=params
    def __call__(self,**kwargs):
        return {**kwargs,'params':[p*(1-self.lr*self.wd) for p in self.params]} #same as params-lr*wd*params, important part!!!!
class OurSGD:
    def __init__(self,params,lr,mom,wd):
        self.params,self.lr=list(params),lr
        self.mom=Momentum(self.params,self.lr,mom)
        self.wd=Weight_Decay(self.params,self.lr,wd)
    def step(self):
        updated_params=[]
        self.params=self.wd()['params']
        avg_grads=self.mom()['avg_grads']
        for i,p in enumerate(self.params):
            updated_params.append(p.add(-self.lr*avg_grads[i])) 
        return updated_params

Okay, things have gotten more complicated. We now split Momentum and Weight Decay out into two seperate functions. These are optimizer callbacks in fastai. [p (1-self.lr self.wd) for p in self.params] is the important bit, as well as understanding the order the math is applied.

our_sgd=OurSGD(m.parameters(),lr,mom,wd)
def momentum(mom,avg_grad,p):
    return mom*avg_grad+p.grad
def weight_decay(wd):
    return wd
our_answers=[p-lr*momentum(mom,our_sgd.mom.avg_grads[i],p)-lr*weight_decay(wd) for i,p in enumerate(m.parameters())]
our_parameters=our_sgd.step()

Q4, make this one true by editing the weight_decay function above.

parameters_equal(our_answers,our_parameters)
False
False
False
False
opt.step()
parameters_equal(m.parameters(),our_parameters)
True
True
True
True

Step two...

opt.zero_grad()
pred=m(x)
loss=l(pred,torch.zeros([pred.size()[0]],dtype=torch.long))
loss.backward()
our_parameters=our_sgd.step()
opt.step()
parameters_equal(m.parameters(),our_parameters)
True
True
True
True
SGD with l2 reg

l2 reg and weight decay have very similar effects, so no reason to use both.

x = torch.randn([10,3,32,32])
m = nn.Sequential(nn.Conv2d(3,32,7,3,3),nn.Flatten(),nn.Linear(3872,2))
l = nn.CrossEntropyLoss()
lr=0.1
mom=0.9
wd=0.01
opt=SGD(m.parameters(),lr,mom,wd,decouple_wd= False)
pred=m(x)
loss=l(pred,torch.zeros([pred.size()[0]],dtype=torch.long))
loss.backward()

We are doing a bit of refactoring here to remove momentum and weight decay specific logic here. WE also split off the sgd_specific step while we are at it.

class Momentum:
    def __init__(self,params=None,lr=0.0001,mom=0.9,**kwargs):
        self.mom=mom
        self.params=params
        self.avg_grads=[torch.zeros_like(p) for p in self.params] #avg_grad is weighted average using momentum
    def __call__(self,params=None,**kwargs):
        params = self.params if params is None else params
        self.avg_grads = [ self.mom*avg_grad+p.grad for p,avg_grad in zip(params,self.avg_grads) ]
        return {**kwargs,'params':params,'avg_grads': self.avg_grads}
class Weight_Decay:
    def __init__(self,params=None,lr=0.0001,wd=0.01,decouple=True,**kwargs):
        self.lr=lr
        self.wd=wd
        self.params=params
        self.decouple=decouple
    def __call__(self,**kwargs):
        params = self._do_wd() if self.decouple else self._do_l2_reg()
        return {**kwargs,'params':params}
    def _do_wd(self,**kwargs):
        params=[p*(1-self.lr*self.wd) for p in self.params]
        for p,mp in zip(params,self.params):
            p.grad=mp.grad
        return params #same as params-lr*wd*params
    #this one is pretty ugly 
    def _do_l2_reg(self,**kwargs):
        params=[deepcopy(p) for p in self.params]
        for p,mp in zip(params,self.params):
            p.grad=mp.grad + self.wd* mp
        return params
class OurSGD:
    hypers=[Weight_Decay,Momentum]
    def __init__(self,params,lr,**kwargs):
        self.lr=lr
        self.params=params
    def __call__(self,params=None,avg_grads=None,**kwargs):
        return {**kwargs,'params':[ p.add(-self.lr*avg) for p,avg in zip(params,avg_grads) ]}
class OurOptimizer:
    def __init__(self,params,lr,opt,**kwargs):
        self.state={'params':list(params),'lr':lr}
        self.cbs=[cls(**self.state,**kwargs) for cls in [*opt.hypers,opt]]
    def step(self):
        state=self.state
        for cb in self.cbs:
            state=cb(**state)
        return state['params']
our_opt=OurOptimizer(m.parameters(),lr,OurSGD,decouple=False)
our_parameters=our_opt.step()
opt.step()
parameters_equal(m.parameters(),our_parameters)
True
True
True
True
opt.zero_grad()
pred=m(x)
loss=l(pred,torch.zeros([pred.size()[0]],dtype=torch.long))
loss.backward()
our_parameters=our_opt.step()
opt.step()
parameters_equal(m.parameters(),our_parameters)
True
True
True
True

Done with my refactoring. If you notice there is a issue of lots of for...loops going over the same data. In fastai each function momentum/weight_decay/sgd works on a single parameter at a time, and that is encapsulated in a single for...loop, instead of my approach of passing all the parameters to function that does the looping itself. I just got tired of refactoring at this point and decided to keep what I had.... lots of refactoring happened not in this notebook.

RMSProp

x = torch.randn([10,3,32,32])
m = nn.Sequential(nn.Conv2d(3,32,7,3,3),nn.Flatten(),nn.Linear(3872,2))
l = nn.CrossEntropyLoss()
lr=0.1
mom=0.9
wd=0.01
sqr_mom=0.95
opt=RMSProp(m.parameters(),lr,sqr_mom,mom,wd)
pred=m(x)
loss=l(pred,torch.zeros([pred.size()[0]],dtype=torch.long))
loss.backward()
class Momentum:
    def __init__(self,params=None,lr=0.0001,mom=0.9,**kwargs):
        self.mom=mom
        self.params=params
        self.avg_grads=[torch.zeros_like(p) for p in self.params] #avg_grad is weighted average using momentum
    def __call__(self,params=None,**kwargs):
        params = self.params if params is None else params
        self.avg_grads = [ self.mom*avg_grad+p.grad for p,avg_grad in zip(params,self.avg_grads) ]
        return {**kwargs,'params':params,'avg_grads': self.avg_grads}
class Weight_Decay:
    def __init__(self,params=None,lr=0.0001,wd=0.01,decouple=True,**kwargs):
        self.lr=lr
        self.wd=wd
        self.params=params
        self.decouple=decouple
    def __call__(self,**kwargs):
        params = self._do_wd() if self.decouple else self._do_l2_reg()
        return {**kwargs,'params':params}
    def _do_wd(self,**kwargs):
        params=[p*(1-self.lr*self.wd) for p in self.params]
        for p,mp in zip(params,self.params):
            p.grad=mp.grad
        return params #same as params-lr*wd*params
    #this one is pretty ugly 
    def _do_l2_reg(self,**kwargs):
        params=[deepcopy(p) for p in self.params]
        for p,mp in zip(params,self.params):
            p.grad=mp.grad + self.wd* mp
        return params
class OurSGD:
    hypers=[Weight_Decay,Momentum]
    def __init__(self,params,lr,**kwargs):
        self.lr=lr
        self.params=params
    def __call__(self,params=None,avg_grads=None,**kwargs):
        return {**kwargs,'params':[ p.add(-self.lr*avg) for p,avg in zip(params,avg_grads) ]}
class OurOptimizer:
    def __init__(self,params,lr,opt,**kwargs):
        self.state={'params':list(params),'lr':lr}
        self.cbs=[cls(**self.state,**kwargs) for cls in [*opt.hypers,opt]]
    def step(self):
        state=self.state
        for cb in self.cbs:
            state=cb(**state)
        return state['params']
class Learning_Rate_Decay:
    def __init__(self, params=None,sqr_mom=0.99,**kwargs):
        self.sqr_mom=sqr_mom
        self.sqr_avgs=[torch.zeros_like(p) for p in params]
    def __call__(self, params=None, dampening=True, **kwargs):
        damp = 1-sqr_mom if dampening else 1.
        self.sqr_avgs = [sqr_avg * self.sqr_mom + damp * p.grad.data ** 2 for p,sqr_avg in zip(params,self.sqr_avgs)]
        return { **kwargs,'params':params,'sqr_avgs':self.sqr_avgs}
class OurRMSProp:
    hypers=[Weight_Decay,Momentum,Learning_Rate_Decay]
    def __init__(self,lr,params,**kwargs):
        self.lr=lr
        self.params=params
    def __call__(self,params=None,avg_grads=None,eps=1e-08,sqr_avgs=None,**kwargs):
        return {**kwargs,'params':[ p.add(-self.lr*avg/(sqr_avg**(0.5)+eps)) for p,avg,sqr_avg in zip(params,avg_grads,sqr_avgs) ]}
our_opt=OurOptimizer(m.parameters(),lr,OurRMSProp,sqr_mom=0.95)
our_parameters=our_opt.step()
opt.step()
parameters_equal(m.parameters(),our_parameters)
True
True
True
True
opt.zero_grad()
pred=m(x)
loss=l(pred,torch.zeros([pred.size()[0]],dtype=torch.long))
loss.backward()
our_parameters=our_opt.step()
opt.step()
parameters_equal(m.parameters(),our_parameters)
True
True
True
True

Adam

x = torch.randn([10,3,32,32])
m = nn.Sequential(nn.Conv2d(3,32,7,3,3),nn.Flatten(),nn.Linear(3872,2))
l = nn.CrossEntropyLoss()
lr=0.1
mom=0.9
wd=0.01
eps=1e-05
sqr_mom=0.95
opt=Adam(m.parameters(),lr,mom,sqr_mom,eps,wd)
pred=m(x)
loss=l(pred,torch.zeros([pred.size()[0]],dtype=torch.long))
loss.backward()
class Weight_Decay:
    def __init__(self,params=None,lr=0.0001,wd=0.01,decouple=True,**kwargs):
        self.lr=lr
        self.wd=wd
        self.params=params
        self.decouple=decouple
    def __call__(self,**kwargs):
        params = self._do_wd() if self.decouple else self._do_l2_reg()
        return {**kwargs,'params':params}
    def _do_wd(self,**kwargs):
        params=[p*(1-self.lr*self.wd) for p in self.params]
        for p,mp in zip(params,self.params):
            p.grad=mp.grad
        return params #same as params-lr*wd*params
    #this one is pretty ugly 
    def _do_l2_reg(self,**kwargs):
        params=[deepcopy(p) for p in self.params]
        for p,mp in zip(params,self.params):
            p.grad=mp.grad + self.wd* mp
        return params
class OurSGD:
    hypers=[Weight_Decay,Momentum]
    def __init__(self,params,lr,**kwargs):
        self.lr=lr
        self.params=params
    def __call__(self,params=None,avg_grads=None,**kwargs):
        return {**kwargs,'params':[ p.add(-self.lr*avg) for p,avg in zip(params,avg_grads) ]}
class OurOptimizer:
    def __init__(self,params,lr,opt,**kwargs):
        self.state={'params':list(params),'lr':lr}
        self.cbs=[cls(**self.state,**kwargs) for cls in [*opt.hypers,opt]]
    def step(self):
        state=self.state
        for cb in self.cbs:
            state=cb(**state)
        return state['params']
class Learning_Rate_Decay:
    def __init__(self, params=None,sqr_mom=0.99,**kwargs):
        self.sqr_mom=sqr_mom
        self.sqr_avgs=[torch.zeros_like(p) for p in params]
    def __call__(self, params=None, dampening=True, **kwargs):
        damp = 1-sqr_mom if dampening else 1.
        self.sqr_avgs = [sqr_avg * self.sqr_mom + damp * p.grad.data ** 2 for p,sqr_avg in zip(params,self.sqr_avgs)]
        return { **kwargs,'params':params,'sqr_avgs':self.sqr_avgs}
class OurRMSProp:
    hypers=[Weight_Decay,Momentum,Learning_Rate_Decay]
    def __init__(self,lr,params,**kwargs):
        self.lr=lr
        self.params=params
    def __call__(self,params=None,avg_grads=None,eps=1e-08,sqr_avgs=None,**kwargs):
        return {**kwargs,'params':[ p.add(-self.lr*avg/(sqr_avg**(0.5)+eps)) for p,avg,sqr_avg in zip(params,avg_grads,sqr_avgs) ]}
class Step:
    def __init__(self,**kwargs):
        self.step=0
    def __call__(self,**kwargs):
        self.step+=1
        return {'step':self.step,**kwargs}
class Momentum:
    def __init__(self,params=None,lr=0.0001,mom=0.9,**kwargs):
        self.mom=mom
        self.params=params
        self.avg_grads=[torch.zeros_like(p) for p in self.params] #avg_grad is weighted average using momentum
    def __call__(self,params=None,**kwargs):
        params = self.params if params is None else params
        self.avg_grads = [ self.mom*avg_grad+(1-self.mom)*p.grad for p,avg_grad in zip(params,self.avg_grads) ]
        return {**kwargs,'params':params,'avg_grads': self.avg_grads}
class OurAdam:
    hypers=[Weight_Decay,Momentum,Learning_Rate_Decay,Step]
    def __init__(self,lr,params,mom=0.9,sqr_mom=0.99,eps=1e-08,**kwargs):
        self.lr=lr
        self.params=params
        self.mom=mom
        self.sqr_mom=sqr_mom
        self.eps=eps
    def __call__(self,step=1,params=None,avg_grads=None,sqr_avgs=None,**kwargs): #eps=1e-08
        sqr_avgs=[sqr_avg/(1 - sqr_mom**step) for sqr_avg in sqr_avgs]
        avg_grads = [avg_grad / (1 - mom**step) for avg_grad in avg_grads]
        return {**kwargs,'params':[ p.addcdiv( -lr ,grad_avg,(sqr_avg.sqrt() + self.eps )) for p,grad_avg,sqr_avg in zip(params,avg_grads,sqr_avgs) ]}
our_opt=OurOptimizer(m.parameters(),lr,OurAdam,eps=eps,sqr_mom=0.95)
our_parameters=our_opt.step()
opt.step()
#shows parameters close to not being equal
def parameters_equal_show(mps,ops):
    for mp,op in zip((mps),ops):
        print(mp.masked_select((mp-op).abs()>1e-08),op.masked_select((mp-op).abs()>1e-08))
        break
parameters_equal_show(m.parameters(),our_parameters)
tensor([ 0.1133,  0.1766,  0.2099,  0.1881,  0.1587,  0.2367,  0.2189,  0.2101,
         0.1876, -0.2224, -0.2393, -0.1429,  0.2033,  0.1898, -0.1359, -0.1989,
        -0.2205,  0.1676, -0.2199, -0.1297, -0.1014, -0.1833, -0.1241, -0.1455,
        -0.2361,  0.0979, -0.1395, -0.1275,  0.1032,  0.1991, -0.1265, -0.2069,
        -0.2465,  0.2102, -0.0968, -0.2100,  0.1213, -0.1896,  0.1769, -0.2089,
        -0.1518,  0.1592,  0.2491,  0.2233, -0.2382, -0.1474,  0.1746, -0.2236,
         0.2342, -0.1417,  0.1879, -0.1650, -0.2419, -0.1232, -0.2413, -0.2258,
         0.1095, -0.1980, -0.1882, -0.1661, -0.1769,  0.2334, -0.1878,  0.1639,
        -0.2143,  0.1531, -0.1679,  0.2016, -0.1644,  0.1493,  0.2032, -0.1417,
        -0.1873,  0.1333, -0.1954,  0.1659, -0.1852, -0.1532, -0.1191, -0.2032,
         0.2201, -0.1217,  0.1954, -0.1297,  0.2288, -0.1301, -0.2084, -0.2102,
        -0.1571, -0.1803, -0.1516,  0.2013, -0.2338, -0.2348,  0.2308, -0.1777,
        -0.1932,  0.2142, -0.1990,  0.1751, -0.1063,  0.1684,  0.1321, -0.2281,
         0.2266,  0.2006, -0.1572, -0.1618,  0.1587, -0.1836, -0.1399,  0.1631,
         0.2076, -0.1600,  0.1584,  0.1980, -0.1637, -0.2027,  0.1153, -0.1639,
         0.2354,  0.2395,  0.1963,  0.1393,  0.1422, -0.2111,  0.1376, -0.2402,
        -0.1623,  0.2126, -0.2116,  0.1634,  0.2348, -0.1085,  0.1771,  0.1734,
         0.2279, -0.0948,  0.2020,  0.1733,  0.1934, -0.2217, -0.1422, -0.2375,
         0.1274,  0.0943,  0.1806, -0.2292, -0.2484,  0.0879, -0.0870,  0.2374,
        -0.2179,  0.1361, -0.2142,  0.1657,  0.2259,  0.1768, -0.1184,  0.1224,
         0.1537,  0.2078, -0.1869,  0.1412, -0.1900,  0.2006,  0.1538, -0.2469,
         0.1525,  0.1741, -0.1754, -0.1602,  0.1738, -0.1929, -0.2245, -0.1532,
         0.2110, -0.1917, -0.2124, -0.1408,  0.2142,  0.1549,  0.2338,  0.1441,
         0.1377, -0.0856,  0.1355,  0.1945,  0.1407, -0.1251, -0.1614, -0.1672,
        -0.1015, -0.2143, -0.2052, -0.1670, -0.1607, -0.1440,  0.1313, -0.2089,
        -0.1839,  0.1929, -0.1760, -0.1672,  0.1439,  0.2235,  0.1692, -0.2194,
         0.2251,  0.2327,  0.1336,  0.2163,  0.1336,  0.1259, -0.1727,  0.2150,
         0.1043, -0.2463, -0.1123, -0.2426, -0.1792, -0.2002,  0.1751,  0.2148,
         0.1448, -0.1380, -0.1563, -0.1871,  0.1831, -0.2145,  0.1607, -0.1265,
        -0.1911,  0.1194, -0.2165,  0.1499, -0.2241,  0.2112, -0.2148,  0.2091,
        -0.1458, -0.1751, -0.1461, -0.0967, -0.2017,  0.2188, -0.2264, -0.1651,
         0.1476,  0.1527,  0.1455,  0.1243, -0.1918,  0.2038,  0.1374, -0.1014,
         0.1878,  0.1480,  0.2019, -0.2465,  0.1896, -0.0874,  0.1408,  0.1890,
        -0.1063, -0.1180,  0.2152, -0.2209, -0.0888, -0.1289, -0.0962, -0.1288,
        -0.1128, -0.2351, -0.1592,  0.2114, -0.1509,  0.1655,  0.1508, -0.2424,
         0.1167, -0.1288,  0.1192,  0.2172,  0.2097,  0.1360,  0.1987,  0.1679,
        -0.2080,  0.1977,  0.1119, -0.2058,  0.2321, -0.2175,  0.1355, -0.1258,
         0.2299, -0.1452,  0.1001,  0.2152,  0.1421,  0.1147,  0.2330, -0.1812,
        -0.2164, -0.1955, -0.2077,  0.0908, -0.1649,  0.1881, -0.1824,  0.2457,
        -0.2432,  0.1121, -0.1637, -0.2450,  0.1257, -0.1627,  0.1748,  0.2427,
        -0.1599, -0.1931, -0.1066,  0.1651, -0.1312, -0.1043, -0.2015, -0.1110,
         0.1249,  0.1858,  0.2090,  0.1888, -0.1954, -0.2169,  0.2114,  0.1973,
        -0.2127, -0.2306,  0.2417, -0.2018,  0.1643, -0.1919,  0.1861, -0.2181,
         0.1240,  0.1651, -0.1849,  0.1898,  0.2095, -0.1603,  0.2033,  0.1576,
         0.2191, -0.2073,  0.1760, -0.2221,  0.2012, -0.2427, -0.2261, -0.2418,
         0.0866, -0.1986,  0.1042,  0.1729, -0.2225,  0.2179, -0.2128, -0.2064,
         0.0927, -0.1463, -0.1697, -0.1840, -0.1575, -0.1765, -0.1417, -0.2115,
         0.2077,  0.1541, -0.1145,  0.2130, -0.2225, -0.1029, -0.1301,  0.2200,
        -0.1071, -0.2340,  0.1184, -0.1121,  0.2498, -0.1454, -0.1599, -0.1192,
         0.2128, -0.2047, -0.1026, -0.1550,  0.1381,  0.1884, -0.1259,  0.1918,
        -0.1030, -0.1590, -0.2230,  0.2229, -0.2175,  0.1678, -0.2143, -0.1747,
        -0.1552,  0.1536,  0.1499,  0.1595, -0.1967, -0.1533, -0.2196,  0.1193,
        -0.1477,  0.2348,  0.2465, -0.2066,  0.1739, -0.1321,  0.2126,  0.1777,
        -0.1872,  0.1361, -0.2462,  0.1694,  0.1474,  0.1672,  0.1734, -0.1947,
         0.1702, -0.1690,  0.1698, -0.1295,  0.1401, -0.1864, -0.1566,  0.1362,
         0.1980,  0.1747, -0.2286, -0.1648,  0.2033, -0.1179,  0.1884,  0.1921,
        -0.2183,  0.2279, -0.2139, -0.0851,  0.2477,  0.1419, -0.1451, -0.1677,
         0.2460,  0.2305,  0.1868, -0.2343, -0.1505,  0.2287,  0.1314,  0.2475,
        -0.1446, -0.1734,  0.1849,  0.1168, -0.2334, -0.2158, -0.2291, -0.2386,
         0.1709, -0.2310, -0.1924, -0.2474, -0.1943, -0.2114, -0.2292,  0.1596,
        -0.2397,  0.2005,  0.1538, -0.2183, -0.2213,  0.1287,  0.1810,  0.1724,
         0.1620, -0.1071,  0.1933,  0.2109, -0.2471, -0.1865,  0.2191,  0.1049,
         0.1630,  0.1005, -0.1664,  0.1153,  0.1687, -0.0914, -0.2499,  0.1455,
        -0.1582, -0.2422, -0.1850,  0.1949,  0.2278,  0.1610, -0.1282,  0.2283,
         0.2352,  0.1533, -0.2019, -0.1028,  0.1885,  0.1379,  0.1058,  0.1140,
        -0.2087, -0.1397, -0.1373,  0.2404,  0.2000, -0.1603,  0.1448, -0.2044,
         0.1402,  0.2451,  0.1484,  0.1715,  0.2371, -0.1006,  0.1071, -0.1647,
         0.1361, -0.1388, -0.1953, -0.2061,  0.1237, -0.2190, -0.2424,  0.2041,
         0.2076, -0.2180, -0.1903, -0.1864,  0.1390,  0.2429,  0.1950,  0.2327,
        -0.1784, -0.2185,  0.1452,  0.1724,  0.0899, -0.0891, -0.1403,  0.1413,
         0.2000,  0.1347, -0.2395, -0.1211,  0.1891, -0.1858,  0.2475,  0.2388,
        -0.1659, -0.2133,  0.2159,  0.2232,  0.1672,  0.1641, -0.2145,  0.1735,
        -0.1986,  0.1812,  0.1669, -0.1667, -0.1812, -0.1491, -0.1656,  0.2409,
        -0.1922,  0.1408, -0.1686,  0.2273, -0.1791, -0.1835, -0.1541, -0.1754,
         0.2418,  0.2302,  0.2209, -0.1581, -0.1870,  0.1369,  0.2049,  0.2351,
         0.2441,  0.2287,  0.2472, -0.1304,  0.2357,  0.1048, -0.2482,  0.1144,
         0.1021, -0.1148,  0.1297,  0.1271,  0.1215,  0.1378],
       grad_fn=<MaskedSelectBackward>) tensor([ 0.1133,  0.1766,  0.2099,  0.1881,  0.1587,  0.2367,  0.2189,  0.2101,
         0.1876, -0.2224, -0.2393, -0.1429,  0.2033,  0.1898, -0.1359, -0.1989,
        -0.2205,  0.1676, -0.2199, -0.1297, -0.1014, -0.1833, -0.1241, -0.1455,
        -0.2361,  0.0979, -0.1395, -0.1275,  0.1032,  0.1991, -0.1265, -0.2069,
        -0.2465,  0.2102, -0.0968, -0.2100,  0.1213, -0.1896,  0.1769, -0.2089,
        -0.1518,  0.1592,  0.2491,  0.2233, -0.2382, -0.1474,  0.1746, -0.2236,
         0.2342, -0.1417,  0.1879, -0.1650, -0.2419, -0.1232, -0.2413, -0.2258,
         0.1095, -0.1980, -0.1882, -0.1661, -0.1769,  0.2334, -0.1878,  0.1639,
        -0.2143,  0.1531, -0.1679,  0.2016, -0.1644,  0.1493,  0.2032, -0.1417,
        -0.1873,  0.1333, -0.1954,  0.1659, -0.1852, -0.1532, -0.1191, -0.2032,
         0.2201, -0.1217,  0.1954, -0.1297,  0.2288, -0.1301, -0.2084, -0.2102,
        -0.1571, -0.1803, -0.1516,  0.2013, -0.2338, -0.2348,  0.2308, -0.1777,
        -0.1932,  0.2142, -0.1990,  0.1751, -0.1063,  0.1684,  0.1321, -0.2281,
         0.2266,  0.2006, -0.1572, -0.1618,  0.1587, -0.1836, -0.1399,  0.1631,
         0.2076, -0.1600,  0.1584,  0.1980, -0.1637, -0.2027,  0.1153, -0.1639,
         0.2354,  0.2395,  0.1963,  0.1393,  0.1422, -0.2111,  0.1376, -0.2402,
        -0.1623,  0.2126, -0.2116,  0.1634,  0.2348, -0.1085,  0.1771,  0.1734,
         0.2279, -0.0948,  0.2020,  0.1733,  0.1934, -0.2217, -0.1422, -0.2375,
         0.1274,  0.0943,  0.1806, -0.2292, -0.2484,  0.0879, -0.0870,  0.2374,
        -0.2179,  0.1361, -0.2142,  0.1657,  0.2259,  0.1768, -0.1184,  0.1224,
         0.1537,  0.2078, -0.1869,  0.1412, -0.1900,  0.2006,  0.1538, -0.2469,
         0.1525,  0.1741, -0.1754, -0.1602,  0.1738, -0.1929, -0.2245, -0.1532,
         0.2110, -0.1917, -0.2124, -0.1408,  0.2142,  0.1549,  0.2338,  0.1441,
         0.1377, -0.0856,  0.1355,  0.1945,  0.1407, -0.1251, -0.1614, -0.1672,
        -0.1015, -0.2143, -0.2052, -0.1670, -0.1607, -0.1440,  0.1313, -0.2089,
        -0.1839,  0.1929, -0.1760, -0.1672,  0.1439,  0.2235,  0.1692, -0.2194,
         0.2251,  0.2327,  0.1336,  0.2163,  0.1336,  0.1259, -0.1727,  0.2150,
         0.1043, -0.2463, -0.1123, -0.2426, -0.1792, -0.2002,  0.1751,  0.2148,
         0.1448, -0.1380, -0.1563, -0.1871,  0.1831, -0.2145,  0.1607, -0.1265,
        -0.1911,  0.1194, -0.2165,  0.1499, -0.2241,  0.2112, -0.2148,  0.2091,
        -0.1458, -0.1751, -0.1461, -0.0967, -0.2017,  0.2188, -0.2264, -0.1651,
         0.1476,  0.1527,  0.1455,  0.1243, -0.1918,  0.2038,  0.1374, -0.1014,
         0.1878,  0.1480,  0.2019, -0.2465,  0.1896, -0.0874,  0.1408,  0.1890,
        -0.1063, -0.1180,  0.2152, -0.2209, -0.0888, -0.1289, -0.0962, -0.1288,
        -0.1128, -0.2351, -0.1592,  0.2114, -0.1509,  0.1655,  0.1508, -0.2424,
         0.1167, -0.1288,  0.1192,  0.2172,  0.2097,  0.1360,  0.1987,  0.1679,
        -0.2080,  0.1977,  0.1119, -0.2058,  0.2321, -0.2175,  0.1355, -0.1258,
         0.2299, -0.1452,  0.1001,  0.2152,  0.1421,  0.1147,  0.2330, -0.1812,
        -0.2164, -0.1955, -0.2077,  0.0908, -0.1649,  0.1881, -0.1824,  0.2457,
        -0.2432,  0.1121, -0.1637, -0.2450,  0.1257, -0.1627,  0.1748,  0.2427,
        -0.1599, -0.1931, -0.1066,  0.1651, -0.1312, -0.1043, -0.2015, -0.1110,
         0.1249,  0.1858,  0.2090,  0.1888, -0.1954, -0.2169,  0.2114,  0.1973,
        -0.2127, -0.2306,  0.2417, -0.2018,  0.1643, -0.1919,  0.1861, -0.2181,
         0.1240,  0.1651, -0.1849,  0.1898,  0.2095, -0.1603,  0.2033,  0.1576,
         0.2191, -0.2073,  0.1760, -0.2221,  0.2012, -0.2427, -0.2261, -0.2418,
         0.0866, -0.1986,  0.1042,  0.1729, -0.2225,  0.2179, -0.2128, -0.2064,
         0.0927, -0.1463, -0.1697, -0.1840, -0.1575, -0.1765, -0.1417, -0.2115,
         0.2077,  0.1541, -0.1145,  0.2130, -0.2225, -0.1029, -0.1301,  0.2200,
        -0.1071, -0.2340,  0.1184, -0.1121,  0.2498, -0.1454, -0.1599, -0.1192,
         0.2128, -0.2047, -0.1026, -0.1550,  0.1381,  0.1884, -0.1259,  0.1918,
        -0.1030, -0.1590, -0.2230,  0.2229, -0.2175,  0.1678, -0.2143, -0.1747,
        -0.1552,  0.1536,  0.1499,  0.1595, -0.1967, -0.1533, -0.2196,  0.1193,
        -0.1477,  0.2348,  0.2465, -0.2066,  0.1739, -0.1321,  0.2126,  0.1777,
        -0.1872,  0.1361, -0.2462,  0.1694,  0.1474,  0.1672,  0.1734, -0.1947,
         0.1702, -0.1690,  0.1698, -0.1295,  0.1401, -0.1864, -0.1566,  0.1362,
         0.1980,  0.1747, -0.2286, -0.1648,  0.2033, -0.1179,  0.1884,  0.1921,
        -0.2183,  0.2279, -0.2139, -0.0851,  0.2477,  0.1419, -0.1451, -0.1677,
         0.2460,  0.2305,  0.1868, -0.2343, -0.1505,  0.2287,  0.1314,  0.2475,
        -0.1446, -0.1734,  0.1849,  0.1168, -0.2334, -0.2158, -0.2291, -0.2386,
         0.1709, -0.2310, -0.1924, -0.2474, -0.1943, -0.2114, -0.2292,  0.1596,
        -0.2397,  0.2005,  0.1538, -0.2183, -0.2213,  0.1287,  0.1810,  0.1724,
         0.1620, -0.1071,  0.1933,  0.2109, -0.2471, -0.1865,  0.2191,  0.1049,
         0.1630,  0.1005, -0.1664,  0.1153,  0.1687, -0.0914, -0.2499,  0.1455,
        -0.1582, -0.2422, -0.1850,  0.1949,  0.2278,  0.1610, -0.1282,  0.2283,
         0.2352,  0.1533, -0.2019, -0.1028,  0.1885,  0.1379,  0.1058,  0.1140,
        -0.2087, -0.1397, -0.1373,  0.2404,  0.2000, -0.1603,  0.1448, -0.2044,
         0.1402,  0.2451,  0.1484,  0.1715,  0.2371, -0.1006,  0.1071, -0.1647,
         0.1361, -0.1388, -0.1953, -0.2061,  0.1237, -0.2190, -0.2424,  0.2041,
         0.2076, -0.2180, -0.1903, -0.1864,  0.1390,  0.2429,  0.1950,  0.2327,
        -0.1784, -0.2185,  0.1452,  0.1724,  0.0899, -0.0891, -0.1403,  0.1413,
         0.2000,  0.1347, -0.2395, -0.1211,  0.1891, -0.1858,  0.2475,  0.2388,
        -0.1659, -0.2133,  0.2159,  0.2232,  0.1672,  0.1641, -0.2145,  0.1735,
        -0.1986,  0.1812,  0.1669, -0.1667, -0.1812, -0.1491, -0.1656,  0.2409,
        -0.1922,  0.1408, -0.1686,  0.2273, -0.1791, -0.1835, -0.1541, -0.1754,
         0.2418,  0.2302,  0.2209, -0.1581, -0.1870,  0.1369,  0.2049,  0.2351,
         0.2441,  0.2287,  0.2472, -0.1304,  0.2357,  0.1048, -0.2482,  0.1144,
         0.1021, -0.1148,  0.1297,  0.1271,  0.1215,  0.1378],
       grad_fn=<MaskedSelectBackward>)
parameters_equal(m.parameters(),our_parameters)
True
True
True
True
opt.zero_grad()
pred=m(x)
loss=l(pred,torch.zeros([pred.size()[0]],dtype=torch.long))
loss.backward()
our_parameters=our_opt.step()
opt.step()
parameters_equal(m.parameters(),our_parameters)
True
True
True
True
opt.hypers
(#1) [{'wd': 0.01, 'sqr_mom': 0.95, 'lr': 0.1, 'mom': 0.9, 'eps': 1e-05}]
export2html.notebook2html(fname='2020-07-15-Optimizers.ipynb', dest='html/', template_file='fastpages.tpl',n_workers=1)
converting: 2020-07-15-Optimizers.ipynb
/home/fast/anaconda3/envs/fastai2/lib/python3.6/site-packages/jupyter_client/manager.py:358: FutureWarning: Method cleanup(connection_file=True) is deprecated, use cleanup_resources(restart=False).
  FutureWarning)