from fastai.vision.all import *
= untar_data(URLs.IMAGENETTE_320) path
= array(Image.open((path/'train').ls()[0].ls()[0])) img
show_image(img),img.shape
(<AxesSubplot:>, (320, 463, 3))
Schedule: Question, This notebook, How to read a research paper, Presentation/Less formal
14),8,replace=False) np.random.choice(np.arange(
array([2, 5, 1, 0, 9, 8, 6, 7])
img.shape
(320, 463, 3)
0]//2,img.shape[1]//2 img.shape[
(160, 231)
=img[:img.shape[0]//2,:img.shape[1]//2]
ex show_image(ex)
<AxesSubplot:>
0]//4,-img.shape[0]//4 img.shape[
(80, -80)
=img[img.shape[0]//4:-img.shape[0]//4,
ex1]//4:-img.shape[1]//4]
img.shape[ show_image(ex)
<AxesSubplot:>
ex.shape,img.shape
((160, 232, 3), (320, 463, 3))
show_image(img)
<AxesSubplot:>
list(range(10,0,-1))
[10, 9, 8, 7, 6, 5, 4, 3, 2, 1]
show_image(img),img.shape
(<AxesSubplot:>, (320, 463, 3))
-1:2,:-1:2]),img[:-1:2,:-1:2].shape show_image(img[:
(<AxesSubplot:>, (160, 231, 3))
1::2,1::2]),img[1::2,1::2].shape show_image(img[
(<AxesSubplot:>, (160, 231, 3))
-1:2,:-1:2]//2+img[1::2,1::2]//2 img[:
2,::2]//2+img[1::2,1::2]//2 img[::
ValueError: operands could not be broadcast together with shapes (160,232,3) (160,231,3)
1::2,1::2].shape img[
(160, 231, 3)
= img[:-1:2,:-1:2]//2+img[1::2,1::2]//2
ex show_image(ex),ex.shape
(<AxesSubplot:>, (160, 231, 3))
1/3,1/3,1/3]) array([
array([0.33333333, 0.33333333, 0.33333333])
1/3,1/3,1/3])@img[...,None]).shape (array([
(320, 463, 1)
max(),ex2.max() ex1.
(765, 255.0)
np.
= (array([1/2,1/2,1/2])@img[...,None]).clip(0,255)
ex1= (array([1/3,1/3,1/3])@img[...,None]).clip(0,255)
ex2='gray'),show_image(ex2,cmap='gray') show_image(ex1,cmap
(<AxesSubplot:>, <AxesSubplot:>)
=Normalize.from_stats(*imagenet_stats,cuda=False)
norm_tfmdef show_norm(img): show_images((norm_tfm.decode(img).clamp(0,1)),nrows=3)
= norm_tfm(TensorImage(img.transpose(2,0,1)).float()[None]/255) norm_img
= torch.randn_like(norm_img) noise
= torch.linspace(0,1,12)[...,None,None,None]; As.squeeze() As
tensor([0.0000, 0.0909, 0.1818, 0.2727, 0.3636, 0.4545, 0.5455, 0.6364, 0.7273,
0.8182, 0.9091, 1.0000])
**.5*norm_img (As)
1-As**.5).squeeze() (
tensor([1.0000, 0.6985, 0.5736, 0.4778, 0.3970, 0.3258, 0.2615, 0.2023, 0.1472,
0.0955, 0.0465, 0.0000])
1-As)**.5).squeeze() ((
tensor([1.0000, 0.9535, 0.9045, 0.8528, 0.7977, 0.7385, 0.6742, 0.6030, 0.5222,
0.4264, 0.3015, 0.0000])
**.5*norm_img+(1-As)**.5*noise) show_norm((As)
**.5*norm_img+(1-As**.5)*noise) show_norm((As)
As.squeeze(),As.shape
(tensor([0.0000, 0.0909, 0.1818, 0.2727, 0.3636, 0.4545, 0.5455, 0.6364, 0.7273,
0.8182, 0.9091, 1.0000]),
torch.Size([12, 1, 1, 1]))
norm_img.shape
torch.Size([1, 3, 320, 463])
**.5*norm_img) show_norm((As)
1-(As)**.5
tensor([[[[1.0000]]],
[[[0.6985]]],
[[[0.5736]]],
[[[0.4778]]],
[[[0.3970]]],
[[[0.3258]]],
[[[0.2615]]],
[[[0.2023]]],
[[[0.1472]]],
[[[0.0955]]],
[[[0.0465]]],
[[[0.0000]]]])
1-As)**.5*noise) show_norm((