diff --git a/docs/callback.cutmixup.html b/docs/callback.cutmixup.html index 91dba53..a854339 100644 --- a/docs/callback.cutmixup.html +++ b/docs/callback.cutmixup.html @@ -102,7 +102,7 @@

class MixUp -

class CutMix[source]

CutMix(alpha:float=1.0, uniform:bool=True, interp_label:bool | None=None) :: MixHandlerX

+

class CutMix[source]

CutMix(alpha:float=1.0, uniform:bool=True, interp_label:bool | None=None) :: MixHandlerX

Implementation of https://arxiv.org/abs/1905.04899. Supports MultiLoss

@@ -163,7 +163,7 @@

class CutMix -

class CutMixUp[source]

CutMixUp(mix_alpha:float=0.4, cut_alpha:float=1.0, mixup_ratio:Number=1, cutmix_ratio:Number=1, cutmix_uniform:bool=True, element:bool=True, interp_label:bool | None=None) :: MixUp

+

class CutMixUp[source]

CutMixUp(mix_alpha:float=0.4, cut_alpha:float=1.0, mixup_ratio:Number=1, cutmix_ratio:Number=1, cutmix_uniform:bool=True, element:bool=True, interp_label:bool | None=None) :: MixUp

Combo implementation of https://arxiv.org/abs/1710.09412 and https://arxiv.org/abs/1905.04899"

Supports element-wise application of MixUp and CutMix on a batch.

@@ -249,7 +249,7 @@

class CutMixUp -

class CutMixUpAugment[source]

CutMixUpAugment(mix_alpha:float=0.4, cut_alpha:float=1.0, mixup_ratio:Number=1, cutmix_ratio:Number=1, augment_ratio:Number=1, augment_finetune:Number | None=None, cutmix_uniform:bool=True, cutmixup_augs:listified[Transform | Callable[..., Transform]] | None=None, element:bool=True, interp_label:bool | None=None) :: MixUp

+

class CutMixUpAugment[source]

CutMixUpAugment(mix_alpha:float=0.4, cut_alpha:float=1.0, mixup_ratio:Number=1, cutmix_ratio:Number=1, augment_ratio:Number=1, augment_finetune:Number | None=None, cutmix_uniform:bool=True, cutmixup_augs:listified[Transform | Callable[..., Transform]] | None=None, element:bool=True, interp_label:bool | None=None) :: MixUp

Combo implementation of https://arxiv.org/abs/1710.09412 and https://arxiv.org/abs/1905.04899 plus Augmentation.

Supports element-wise application of MixUp, CutMix, and Augmentation on a batch.

diff --git a/fastxtend/__init__.py b/fastxtend/__init__.py index 3dc1f76..9b36b86 100644 --- a/fastxtend/__init__.py +++ b/fastxtend/__init__.py @@ -1 +1 @@ -__version__ = "0.1.0" +__version__ = "0.0.10" diff --git a/fastxtend/callback/cutmixup.py b/fastxtend/callback/cutmixup.py index 2fd02e0..1b72b0f 100644 --- a/fastxtend/callback/cutmixup.py +++ b/fastxtend/callback/cutmixup.py @@ -9,7 +9,7 @@ # Cell #nbdev_comment from __future__ import annotations -from torch.distributions import Bernoulli, Categorical +from torch.distributions import Categorical from torch.distributions.beta import Beta from fastcore.transform import Pipeline, Transform @@ -44,7 +44,9 @@ def before_batch(self): self.learn.yb = tuple(L(self.yb1,self.yb).map_zip(torch.lerp,weight=unsqueeze(self.lam, n=ny_dims-1))) def _mixup(self, bs): - lam = self.distrib.sample((bs,)).squeeze().to(self.x.device) + lam = self.distrib.sample((bs,)).to(self.x.device) + if len(lam.shape) > 1: + lam = lam.squeeze() lam = torch.stack([lam, 1-lam], 1) return lam.max(1)[0] @@ -77,8 +79,8 @@ def before_batch(self): def _uniform_cutmix(self, xb, xb1, H, W): "Add uniform patches and blend labels from another random item in batch" - self.lam = self.distrib.sample((1,)).to(self.x.device) - x1, y1, x2, y2 = self.rand_bbox(W, H, self.lam) + lam = self.distrib.sample((1,)).to(self.x.device) + x1, y1, x2, y2 = self.rand_bbox(W, H, lam) xb[..., y1:y2, x1:x2] = xb1[..., y1:y2, x1:x2] lam = (1 - ((x2-x1)*(y2-y1))/float(W*H)) return xb, lam @@ -149,17 +151,19 @@ def before_batch(self): xb1, self.yb1 = xb[shuffle], self.yb1[shuffle] # Apply MixUp - self.distrib = self.mix_distrib - self.lam[aug_type==0] = MixUp._mixup(self, xb[aug_type==0].shape[0]) - xb[aug_type==0] = torch.lerp(xb1[aug_type==0], xb[aug_type==0], weight=unsqueeze(self.lam[aug_type==0], n=3)) + if (aug_type==0).sum() > 0: + self.distrib = self.mix_distrib + self.lam[aug_type==0] = MixUp._mixup(self, xb[aug_type==0].shape[0]) + xb[aug_type==0] = torch.lerp(xb1[aug_type==0], xb[aug_type==0], weight=unsqueeze(self.lam[aug_type==0], n=3)) # Apply CutMix bs, _, H, W = xb[aug_type==1].size() - self.distrib = self.cut_distrib - if self.cutmix_uniform: - xb[aug_type==1], self.lam[aug_type==1] = CutMix._uniform_cutmix(self, xb[aug_type==1], xb1[aug_type==1], H, W) - else: - xb[aug_type==1], self.lam[aug_type==1] = CutMix._multi_cutmix(self, xb[aug_type==1], xb1[aug_type==1], H, W, bs) + if bs > 0: + self.distrib = self.cut_distrib + if self.cutmix_uniform: + xb[aug_type==1], self.lam[aug_type==1] = CutMix._uniform_cutmix(self, xb[aug_type==1], xb1[aug_type==1], H, W) + else: + xb[aug_type==1], self.lam[aug_type==1] = CutMix._multi_cutmix(self, xb[aug_type==1], xb1[aug_type==1], H, W, bs) self.learn.xb = (xb,) if not self.stack_y: @@ -175,6 +179,10 @@ def before_batch(self): self.distrib = self.cut_distrib CutMix.before_batch(self) +# Internal Cell +def _do_cutmixaug(t:Tensor): + return t.sum().item() > 0 + # Cell class CutMixUpAugment(MixUp, CutMix): """ @@ -263,8 +271,10 @@ def before_batch(self): bs, C, H, W = xb.size() self.lam = torch.zeros(bs, device=xb.device) aug_type = self.categorical.sample((bs,)) - shuffle = torch.randperm(xb[aug_type<2].shape[0]).to(xb.device) - self.yb1[aug_type<2] = self.yb1[aug_type<2][shuffle] + do_mix, do_cut, do_aug = _do_cutmixaug(aug_type==0), _do_cutmixaug(aug_type==1), _do_cutmixaug(aug_type==2) + if do_mix or do_cut: + shuffle = torch.randperm(xb[aug_type<2].shape[0]).to(xb.device) + self.yb1[aug_type<2] = self.yb1[aug_type<2][shuffle] # Apply IntToFloat to all samples xb = self._inttofloat_pipe(xb) @@ -273,33 +283,40 @@ def before_batch(self): xb2 = torch.zeros([bs, C, self._size[0], self._size[1]], dtype=xb.dtype, device=xb.device) if self._size is not None else torch.zeros_like(xb) # Apply MixUp/CutMix Augmentations to MixUp and CutMix samples - if self._docutmixaug: - xb2[aug_type<2] = self._cutmixaugs_pipe(xb[aug_type<2]) - else: - xb2[aug_type<2] = xb[aug_type<2] + if do_mix or do_cut: + if self._docutmixaug: + xb2[aug_type<2] = self._cutmixaugs_pipe(xb[aug_type<2]) + else: + xb2[aug_type<2] = xb[aug_type<2] # Original Augmentations - xb2[aug_type==2] = self._aug_pipe(xb[aug_type==2]) + if do_aug: + xb2[aug_type==2] = self._aug_pipe(xb[aug_type==2]) # Possibly Resized xb and shuffled xb1 xb = xb2 - xb1 = xb[aug_type<2][shuffle] + if do_mix or do_cut: + xb1 = xb[aug_type<2][shuffle] # Apply MixUp - self.distrib = self.mix_distrib - self.lam[aug_type==0] = MixUp._mixup(self, xb[aug_type==0].shape[0]) - xb[aug_type==0] = torch.lerp(xb1[aug_type[aug_type<2]==0], xb[aug_type==0], weight=unsqueeze(self.lam[aug_type==0], n=3)) + if do_mix: + self.distrib = self.mix_distrib + self.lam[aug_type==0] = MixUp._mixup(self, xb[aug_type==0].shape[0]) + xb[aug_type==0] = torch.lerp(xb1[aug_type[aug_type<2]==0], xb[aug_type==0], weight=unsqueeze(self.lam[aug_type==0], n=3)) # Apply CutMix - bs, _, H, W = xb[aug_type==1].size() - self.distrib = self.cut_distrib - if self.cutmix_uniform: - xb[aug_type==1], self.lam[aug_type==1] = CutMix._uniform_cutmix(self, xb[aug_type==1], xb1[aug_type[aug_type<2]==1], H, W) - else: - xb[aug_type==1], self.lam[aug_type==1] = CutMix._multi_cutmix(self, xb[aug_type==1], xb1[aug_type[aug_type<2]==1], H, W, bs) + if do_cut: + bs, _, H, W = xb[aug_type==1].size() + self.distrib = self.cut_distrib + if self.cutmix_uniform: + xb[aug_type==1], lam = CutMix._uniform_cutmix(self, xb[aug_type==1], xb1[aug_type[aug_type<2]==1], H, W) + self.lam[aug_type==1] = lam.expand(bs) + else: + xb[aug_type==1], self.lam[aug_type==1] = CutMix._multi_cutmix(self, xb[aug_type==1], xb1[aug_type[aug_type<2]==1], H, W, bs) # Normalize MixUp and CutMix - xb[aug_type<2] = self._norm_pipe(xb[aug_type<2]) + if do_mix or do_cut: + xb[aug_type<2] = self._norm_pipe(xb[aug_type<2]) self.learn.xb = (xb,) if not self.stack_y: diff --git a/nbs/callback.cutmixup.ipynb b/nbs/callback.cutmixup.ipynb index 2e7267f..df481b5 100644 --- a/nbs/callback.cutmixup.ipynb +++ b/nbs/callback.cutmixup.ipynb @@ -26,7 +26,7 @@ "#|export\n", "from __future__ import annotations\n", "\n", - "from torch.distributions import Bernoulli, Categorical\n", + "from torch.distributions import Categorical\n", "from torch.distributions.beta import Beta\n", "\n", "from fastcore.transform import Pipeline, Transform\n", @@ -85,7 +85,9 @@ " self.learn.yb = tuple(L(self.yb1,self.yb).map_zip(torch.lerp,weight=unsqueeze(self.lam, n=ny_dims-1)))\n", "\n", " def _mixup(self, bs):\n", - " lam = self.distrib.sample((bs,)).squeeze().to(self.x.device)\n", + " lam = self.distrib.sample((bs,)).to(self.x.device)\n", + " if len(lam.shape) > 1:\n", + " lam = lam.squeeze()\n", " lam = torch.stack([lam, 1-lam], 1)\n", " return lam.max(1)[0]" ] @@ -132,8 +134,8 @@ "\n", " def _uniform_cutmix(self, xb, xb1, H, W):\n", " \"Add uniform patches and blend labels from another random item in batch\"\n", - " self.lam = self.distrib.sample((1,)).to(self.x.device)\n", - " x1, y1, x2, y2 = self.rand_bbox(W, H, self.lam)\n", + " lam = self.distrib.sample((1,)).to(self.x.device)\n", + " x1, y1, x2, y2 = self.rand_bbox(W, H, lam)\n", " xb[..., y1:y2, x1:x2] = xb1[..., y1:y2, x1:x2]\n", " lam = (1 - ((x2-x1)*(y2-y1))/float(W*H))\n", " return xb, lam\n", @@ -218,17 +220,19 @@ " xb1, self.yb1 = xb[shuffle], self.yb1[shuffle]\n", "\n", " # Apply MixUp\n", - " self.distrib = self.mix_distrib\n", - " self.lam[aug_type==0] = MixUp._mixup(self, xb[aug_type==0].shape[0])\n", - " xb[aug_type==0] = torch.lerp(xb1[aug_type==0], xb[aug_type==0], weight=unsqueeze(self.lam[aug_type==0], n=3))\n", + " if (aug_type==0).sum() > 0:\n", + " self.distrib = self.mix_distrib\n", + " self.lam[aug_type==0] = MixUp._mixup(self, xb[aug_type==0].shape[0])\n", + " xb[aug_type==0] = torch.lerp(xb1[aug_type==0], xb[aug_type==0], weight=unsqueeze(self.lam[aug_type==0], n=3))\n", "\n", " # Apply CutMix\n", " bs, _, H, W = xb[aug_type==1].size()\n", - " self.distrib = self.cut_distrib\n", - " if self.cutmix_uniform: \n", - " xb[aug_type==1], self.lam[aug_type==1] = CutMix._uniform_cutmix(self, xb[aug_type==1], xb1[aug_type==1], H, W)\n", - " else: \n", - " xb[aug_type==1], self.lam[aug_type==1] = CutMix._multi_cutmix(self, xb[aug_type==1], xb1[aug_type==1], H, W, bs)\n", + " if bs > 0:\n", + " self.distrib = self.cut_distrib\n", + " if self.cutmix_uniform: \n", + " xb[aug_type==1], self.lam[aug_type==1] = CutMix._uniform_cutmix(self, xb[aug_type==1], xb1[aug_type==1], H, W)\n", + " else: \n", + " xb[aug_type==1], self.lam[aug_type==1] = CutMix._multi_cutmix(self, xb[aug_type==1], xb1[aug_type==1], H, W, bs)\n", "\n", " self.learn.xb = (xb,)\n", " if not self.stack_y:\n", @@ -252,6 +256,17 @@ "## CutMixUpAugment -" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "#|exporti\n", + "def _do_cutmixaug(t:Tensor):\n", + " return t.sum().item() > 0" + ] + }, { "cell_type": "code", "execution_count": null, @@ -346,8 +361,10 @@ " bs, C, H, W = xb.size()\n", " self.lam = torch.zeros(bs, device=xb.device)\n", " aug_type = self.categorical.sample((bs,))\n", - " shuffle = torch.randperm(xb[aug_type<2].shape[0]).to(xb.device)\n", - " self.yb1[aug_type<2] = self.yb1[aug_type<2][shuffle]\n", + " do_mix, do_cut, do_aug = _do_cutmixaug(aug_type==0), _do_cutmixaug(aug_type==1), _do_cutmixaug(aug_type==2)\n", + " if do_mix or do_cut:\n", + " shuffle = torch.randperm(xb[aug_type<2].shape[0]).to(xb.device)\n", + " self.yb1[aug_type<2] = self.yb1[aug_type<2][shuffle]\n", " \n", " # Apply IntToFloat to all samples \n", " xb = self._inttofloat_pipe(xb)\n", @@ -356,33 +373,40 @@ " xb2 = torch.zeros([bs, C, self._size[0], self._size[1]], dtype=xb.dtype, device=xb.device) if self._size is not None else torch.zeros_like(xb)\n", "\n", " # Apply MixUp/CutMix Augmentations to MixUp and CutMix samples\n", - " if self._docutmixaug:\n", - " xb2[aug_type<2] = self._cutmixaugs_pipe(xb[aug_type<2])\n", - " else:\n", - " xb2[aug_type<2] = xb[aug_type<2]\n", + " if do_mix or do_cut:\n", + " if self._docutmixaug:\n", + " xb2[aug_type<2] = self._cutmixaugs_pipe(xb[aug_type<2])\n", + " else:\n", + " xb2[aug_type<2] = xb[aug_type<2]\n", "\n", " # Original Augmentations\n", - " xb2[aug_type==2] = self._aug_pipe(xb[aug_type==2])\n", + " if do_aug: \n", + " xb2[aug_type==2] = self._aug_pipe(xb[aug_type==2])\n", "\n", " # Possibly Resized xb and shuffled xb1\n", " xb = xb2\n", - " xb1 = xb[aug_type<2][shuffle]\n", + " if do_mix or do_cut:\n", + " xb1 = xb[aug_type<2][shuffle]\n", "\n", " # Apply MixUp\n", - " self.distrib = self.mix_distrib\n", - " self.lam[aug_type==0] = MixUp._mixup(self, xb[aug_type==0].shape[0])\n", - " xb[aug_type==0] = torch.lerp(xb1[aug_type[aug_type<2]==0], xb[aug_type==0], weight=unsqueeze(self.lam[aug_type==0], n=3))\n", + " if do_mix:\n", + " self.distrib = self.mix_distrib\n", + " self.lam[aug_type==0] = MixUp._mixup(self, xb[aug_type==0].shape[0])\n", + " xb[aug_type==0] = torch.lerp(xb1[aug_type[aug_type<2]==0], xb[aug_type==0], weight=unsqueeze(self.lam[aug_type==0], n=3))\n", "\n", " # Apply CutMix\n", - " bs, _, H, W = xb[aug_type==1].size()\n", - " self.distrib = self.cut_distrib\n", - " if self.cutmix_uniform: \n", - " xb[aug_type==1], self.lam[aug_type==1] = CutMix._uniform_cutmix(self, xb[aug_type==1], xb1[aug_type[aug_type<2]==1], H, W)\n", - " else: \n", - " xb[aug_type==1], self.lam[aug_type==1] = CutMix._multi_cutmix(self, xb[aug_type==1], xb1[aug_type[aug_type<2]==1], H, W, bs)\n", + " if do_cut:\n", + " bs, _, H, W = xb[aug_type==1].size()\n", + " self.distrib = self.cut_distrib\n", + " if self.cutmix_uniform: \n", + " xb[aug_type==1], lam = CutMix._uniform_cutmix(self, xb[aug_type==1], xb1[aug_type[aug_type<2]==1], H, W)\n", + " self.lam[aug_type==1] = lam.expand(bs)\n", + " else: \n", + " xb[aug_type==1], self.lam[aug_type==1] = CutMix._multi_cutmix(self, xb[aug_type==1], xb1[aug_type[aug_type<2]==1], H, W, bs)\n", " \n", " # Normalize MixUp and CutMix\n", - " xb[aug_type<2] = self._norm_pipe(xb[aug_type<2]) \n", + " if do_mix or do_cut:\n", + " xb[aug_type<2] = self._norm_pipe(xb[aug_type<2]) \n", "\n", " self.learn.xb = (xb,)\n", " if not self.stack_y:\n", @@ -494,7 +518,7 @@ " learn.remove_cbs([Recorder, ProgressCallback])\n", " learn.epoch,learn.training = 0,True\n", " learn.dl = dls.train\n", - " b = dls.element()\n", + " b = dls.one_batch()\n", " learn._split(b)\n", " learn('before_train')\n", " learn('before_batch')\n", @@ -1122,9 +1146,9 @@ "

\n", " \n", " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", " \n", " \n", "
02.0405881.62851900:222.0507301.69440800:17
" @@ -1149,7 +1173,7 @@ " get_items=get_image_files, get_y=parent_label,\n", " item_tfms=Resize(128),\n", " batch_tfms=[*aug_transforms(), Normalize.from_stats(*imagenet_stats)])\n", - " dls = dblock.dataloaders(imagenette, bs=128, num_workers=num_cpus(), pin_memory=True)\n", + " dls = dblock.dataloaders(imagenette, bs=128, num_workers=num_cpus(), pin_memory=True)\n", "\n", " learn = Learner(dls, resnet34(num_classes=dls.c), cbs=CutMixUp(element=False)).to_channelslast()\n", " learn.fit_one_cycle(1, 3e-3)\n", @@ -1176,9 +1200,63 @@ " \n", " \n", " 0\n", - " 2.052253\n", - " 1.683779\n", - " 00:17\n", + " 2.041265\n", + " 1.650648\n", + " 00:14\n", + " \n", + " \n", + "" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "#|hide\n", + "#|slow\n", + "#|cuda\n", + "imagenette = untar_data(URLs.IMAGENETTE_160)\n", + "\n", + "with less_random():\n", + " dblock = DataBlock(blocks=(ImageBlock, CategoryBlock),\n", + " splitter=GrandparentSplitter(valid_name='val'),\n", + " get_items=get_image_files, get_y=parent_label,\n", + " item_tfms=Resize(128),\n", + " batch_tfms=[*aug_transforms(), Normalize.from_stats(*imagenet_stats)])\n", + " dls = dblock.dataloaders(imagenette, bs=128, num_workers=num_cpus(), pin_memory=True)\n", + "\n", + " learn = Learner(dls, resnet34(num_classes=dls.c), cbs=CutMixUp(cutmix_uniform=True)).to_channelslast()\n", + " learn.fit_one_cycle(1, 3e-3)\n", + " free_gpu_memory(learn, dls)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", "
epochtrain_lossvalid_losstime
02.0422191.63513600:22
" @@ -1203,7 +1281,7 @@ " get_items=get_image_files, get_y=parent_label,\n", " item_tfms=Resize(128),\n", " batch_tfms=[*aug_transforms(), Normalize.from_stats(*imagenet_stats)])\n", - " dls = dblock.dataloaders(imagenette, bs=128, num_workers=num_cpus(), pin_memory=True)\n", + " dls = dblock.dataloaders(imagenette, bs=128, num_workers=num_cpus(), pin_memory=True)\n", "\n", " learn = Learner(dls, resnet34(num_classes=dls.c), cbs=CutMixUp(cutmix_uniform=False)).to_channelslast()\n", " learn.fit_one_cycle(1, 3e-3)\n", @@ -1230,8 +1308,8 @@ " \n", " \n", " 0\n", - " 2.277530\n", - " 1.957705\n", + " 2.275790\n", + " 1.934458\n", " 00:15\n", " \n", " \n", @@ -1257,10 +1335,10 @@ " get_items=get_image_files, get_y=parent_label,\n", " item_tfms=Resize(128),\n", " batch_tfms=[*aug_transforms(), Normalize.from_stats(*imagenet_stats)])\n", - " dls = dblock.dataloaders(imagenette, bs=128, num_workers=num_cpus(), pin_memory=True)\n", + " dls = dblock.dataloaders(imagenette, bs=128, num_workers=num_cpus(), pin_memory=True)\n", "\n", " learn = Learner(dls, resnet34(num_classes=dls.c), \n", - " cbs=CutMixUpAugment(cutmix_uniform=False, cutmixup_augs=aug_transforms())).to_channelslast()\n", + " cbs=CutMixUpAugment(cutmix_uniform=True, cutmixup_augs=aug_transforms())).to_channelslast()\n", " learn.fit_one_cycle(1, 3e-3)\n", " free_gpu_memory(learn, dls)" ] @@ -1285,9 +1363,9 @@ " \n", " \n", " 0\n", - " 1.956891\n", - " 1.655091\n", - " 00:13\n", + " 1.937060\n", + " 1.594086\n", + " 00:17\n", " \n", " \n", "" @@ -1312,7 +1390,7 @@ " get_items=get_image_files, get_y=parent_label,\n", " item_tfms=Resize(128),\n", " batch_tfms=[*aug_transforms(), Normalize.from_stats(*imagenet_stats)])\n", - " dls = dblock.dataloaders(imagenette, bs=128, num_workers=num_cpus(), pin_memory=True)\n", + " dls = dblock.dataloaders(imagenette, bs=128, num_workers=num_cpus(), pin_memory=True)\n", "\n", " learn = Learner(dls, resnet34(num_classes=dls.c), cbs=CutMixUpAugment(cutmix_uniform=False, element=False)).to_channelslast()\n", " learn.fit_one_cycle(1, 3e-3)\n", diff --git a/settings.ini b/settings.ini index dfbd0a7..918ecd6 100644 --- a/settings.ini +++ b/settings.ini @@ -8,7 +8,7 @@ author = Benjamin Warner author_email = me@benjaminwarner.dev copyright = Benjamin Warner branch = main -version = 0.1.0 +version = 0.0.10 min_python = 3.7 audience = Developers language = English