From 65c658b314c86f5eae1598278c34c05dd79a3a20 Mon Sep 17 00:00:00 2001 From: Akihiro Nitta Date: Thu, 20 May 2021 03:01:42 +0900 Subject: [PATCH] Minor doc fixes (#301) * Fix colab link * Fix installation link * Capitalise pytorch * Add newlines for better visualisation in docs * Reduce dup installation * Fix typo * Fix typo * Formatting * Formatting * Fix trainer URL * Formatting Co-authored-by: thomas chaton --- docs/source/_templates/theme_variables.jinja | 8 +++---- docs/source/general/training.rst | 2 +- docs/source/quickstart.rst | 25 ++++++-------------- docs/source/reference/flash_to_pl.rst | 2 +- flash/core/data/data_pipeline.py | 3 +-- flash/core/data/process.py | 14 +++++------ flash/core/model.py | 6 ++--- flash/core/trainer.py | 20 ++++++++-------- flash_examples/generic_task.py | 13 ++++++++-- flash_notebooks/custom_task_tutorial.ipynb | 4 ++-- flash_notebooks/tabular_classification.ipynb | 2 +- 11 files changed, 48 insertions(+), 51 deletions(-) diff --git a/docs/source/_templates/theme_variables.jinja b/docs/source/_templates/theme_variables.jinja index 3ddf60d85d..073a47a64c 100644 --- a/docs/source/_templates/theme_variables.jinja +++ b/docs/source/_templates/theme_variables.jinja @@ -1,8 +1,8 @@ {%- set external_urls = { - 'github': 'https://github.com/PytorchLightning/lightning-flash', - 'github_issues': 'https://github.com/PytorchLightning/lightning-flash/issues', - 'contributing': 'https://github.com/PytorchLightning/pytorch-lightning/blob/master/CONTRIBUTING.md', - 'governance': 'https://github.com/PytorchLightning/pytorch-lightning/blob/master/governance.md', + 'github': 'https://github.com/PyTorchLightning/lightning-flash', + 'github_issues': 'https://github.com/PyTorchLightning/lightning-flash/issues', + 'contributing': 'https://github.com/PyTorchLightning/pytorch-lightning/blob/master/CONTRIBUTING.md', + 'governance': 'https://github.com/PyTorchLightning/pytorch-lightning/blob/master/governance.md', 'docs': 'https://lightning-flash.readthedocs.io', 'twitter': 'https://twitter.com/PyTorchLightnin', 'discuss': 'https://pytorch-lightning.slack.com', diff --git a/docs/source/general/training.rst b/docs/source/general/training.rst index 1ded187ff2..8edf3c8afd 100644 --- a/docs/source/general/training.rst +++ b/docs/source/general/training.rst @@ -58,7 +58,7 @@ Flash tasks supports many advanced training functionalities out-of-the-box, such # Train on TPUs flash.Trainer(tpu_cores=8) -You can add to the flash Trainer any argument from the Lightning trainer! Learn more about the Lightning Trainer `here `_. +You can add to the flash Trainer any argument from the Lightning trainer! Learn more about the Lightning Trainer `here `_. Trainer API diff --git a/docs/source/quickstart.rst b/docs/source/quickstart.rst index 17cf780248..9f08f7ed4e 100644 --- a/docs/source/quickstart.rst +++ b/docs/source/quickstart.rst @@ -6,7 +6,7 @@ Quick Start Flash is a high-level deep learning framework for fast prototyping, baselining, finetuning and solving deep learning problems. It features a set of tasks for you to use for inference and finetuning out of the box, and an easy to implement API to customize every step of the process for full flexibility. -Flash is built for beginners with a simple API that requires very little deep learning background, and for data scientists, kagglers, applied ML practitioners and deep learning researchers that want a quick way to get a deep learning baseline with advnaced features `Pytorch Lightning `_ offers. +Flash is built for beginners with a simple API that requires very little deep learning background, and for data scientists, Kagglers, applied ML practitioners and deep learning researchers that want a quick way to get a deep learning baseline with advanced features `PyTorch Lightning `_ offers. Why Flash? @@ -21,13 +21,13 @@ If you are just getting started with deep learning, Flash offers common deep lea Easy to scale ^^^^^^^^^^^^^ -Flash is built on top of `Pytorch Lightning `_, +Flash is built on top of `PyTorch Lightning `_, a powerful deep learning research framework for training models at scale. With the power of Lightning, you can train your flash tasks on any hardware: CPUs, GPUs or TPUs without any code changes. Easy to upskill ^^^^^^^^^^^^^^^ -If you want create more complex and custmoized models, you can refactor any part of flash with PyTorch or `Pytorch Lightning +If you want to create more complex and customized models, you can refactor any part of flash with PyTorch or `PyTorch Lightning `_ components to get all the flexibility you need. Lightning is just organized PyTorch with the unnecessary engineering details abstracted away. @@ -42,31 +42,20 @@ For Deep learning research Quickest way to a baseline ^^^^^^^^^^^^^^^^^^^^^^^^^^ -`Pytorch Lightning `_ is designed to abstract away unnecessary boilerplate, while enabling maximal flexibility. In order to provide full flexibility, solving very common deep learning problems such as classification in Lightning still requires some boilerplate. It can still take quite some time to get a baseline model running on a new dataset or out of domain task. We created Flash to answer our users need for a super quick way to baseline for Lightning using proven backbones for common data patterns. Flash aims to be the easiest starting point for your research- start with a Flash Task to benchmark against, and override any part of flash with Lightning or PyTorch components on your way to SOTA research. +`PyTorch Lightning `_ is designed to abstract away unnecessary boilerplate, while enabling maximal flexibility. In order to provide full flexibility, solving very common deep learning problems such as classification in Lightning still requires some boilerplate. It can still take quite some time to get a baseline model running on a new dataset or out of domain task. We created Flash to answer our users need for a super quick way to baseline for Lightning using proven backbones for common data patterns. Flash aims to be the easiest starting point for your research- start with a Flash Task to benchmark against, and override any part of flash with Lightning or PyTorch components on your way to SOTA research. Flexibility where you want it ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Flash tasks are essentialy LightningModules, and the Flash Trainer is a thin wrapper for the Lightning Trainer. You can use your own LightningModule instead of the Flash task, the Lightning Trainer instead of the flash trainer, etc. Flash helps you focus even more only on your research, and less on anything else. +Flash tasks are essentially LightningModules, and the Flash Trainer is a thin wrapper for the Lightning Trainer. You can use your own LightningModule instead of the Flash task, the Lightning Trainer instead of the flash trainer, etc. Flash helps you focus even more only on your research, and less on anything else. Standard best practices ^^^^^^^^^^^^^^^^^^^^^^^ -Flash tasks implement the standard best practices for a variety of diffrent models and domains, to save you time digging through different implementations. Flash abstracts even more details than lightning, allowing deep learning experts to share their tips and tricks for solving scoped deep learning problems. +Flash tasks implement the standard best practices for a variety of different models and domains, to save you time digging through different implementations. Flash abstracts even more details than Lightning, allowing deep learning experts to share their tips and tricks for solving scoped deep learning problems. .. tip:: Read :doc:`here ` to understand when to use Flash vs Lightning. ----- - -Install -======= - -You can install flash using pip or conda: - -.. code-block:: bash - - pip install lightning-flash -U - ------ Tasks @@ -156,7 +145,7 @@ When you have enough data, you're likely better off training from scratch instea A few Built-in Tasks ==================== -- :doc:`Generic Flash Task ` +- :doc:`General Task ` - :doc:`ImageClassification ` - :doc:`ImageEmbedder ` - :doc:`TextClassification ` diff --git a/docs/source/reference/flash_to_pl.rst b/docs/source/reference/flash_to_pl.rst index 6f6051ce80..39dd990600 100644 --- a/docs/source/reference/flash_to_pl.rst +++ b/docs/source/reference/flash_to_pl.rst @@ -2,7 +2,7 @@ From Flash to Lightning ####################### -Flash is built on top of `Pytorch Lightning +Flash is built on top of `PyTorch Lightning `_ to abstract away the unnecessary boilerplate for: - Data science diff --git a/flash/core/data/data_pipeline.py b/flash/core/data/data_pipeline.py index 0446ba308a..0a4bb6d540 100644 --- a/flash/core/data/data_pipeline.py +++ b/flash/core/data/data_pipeline.py @@ -68,7 +68,7 @@ def __repr__(self) -> str: class DataPipeline: """ DataPipeline holds the engineering logic to connect - :class:`~flash.core.data.process.Preprocess` and/or ``PostProcess`` objects to + :class:`~flash.core.data.process.Preprocess` and/or :class:`~flash.core.data.process.PostProcess` objects to the ``DataModule``, Flash ``Task`` and ``Trainer``. Example:: @@ -84,7 +84,6 @@ class CustomPostprocess(Postprocess): # And it can attached to both the datamodule and model. datamodule.data_pipeline = custom_data_pipeline - model.data_pipeline = custom_data_pipeline """ diff --git a/flash/core/data/process.py b/flash/core/data/process.py index f98723c7b7..4b0b4d6858 100644 --- a/flash/core/data/process.py +++ b/flash/core/data/process.py @@ -121,10 +121,10 @@ class Preprocess(BasePreprocess, Properties, Module): as it will impact performances. Data processing can be configured by overriding hooks or through transforms. The preprocess transforms are given as - a mapping from hook names to callables. Default transforms can be configured by overriding the `default_transforms` - or `{train,val,test,predict}_default_transforms` methods. These can then be overridden by the user with the - `{train,val,test,predict}_transform` arguments to the ``Preprocess``. All of the hooks can be used in the transform - mappings. + a mapping from hook names to callables. Default transforms can be configured by overriding the + ``default_transforms`` or ``{train,val,test,predict}_default_transforms`` methods. These can then be overridden by + the user with the ``{train,val,test,predict}_transform`` arguments to the ``Preprocess``. All of the hooks can be + used in the transform mappings. Example:: @@ -144,7 +144,7 @@ def train_default_transforms() -> Mapping[str, Callable]: } When overriding hooks for particular stages, you can prefix with ``train``, ``val``, ``test`` or ``predict``. For - example, you can achieve the same as the above example by implementing ```train_pre_tensor_transform`` and + example, you can achieve the same as the above example by implementing ``train_pre_tensor_transform`` and ``train_to_tensor_transform``. Example:: @@ -160,8 +160,8 @@ def to_tensor_transform(self, sample: PIL.Image) -> torch.Tensor: def collate(self, samples: List[torch.Tensor]) -> torch.Tensor: return torch.utils.data._utils.collate.default_collate(samples) - Each hook is aware of the Trainer ``running stage`` through booleans. These are useful for adapting functionality - for a stage without duplicating code. + Each hook is aware of the Trainer running stage through booleans. These are useful for adapting functionality for a + stage without duplicating code. Example:: diff --git a/flash/core/model.py b/flash/core/model.py index d0a735c89e..08960bbbff 100644 --- a/flash/core/model.py +++ b/flash/core/model.py @@ -82,9 +82,9 @@ class Task(LightningModule): Args: model: Model to use for the task. loss_fn: Loss function for training - optimizer: Optimizer to use for training, defaults to `torch.optim.Adam`. + optimizer: Optimizer to use for training, defaults to :class:`torch.optim.Adam`. metrics: Metrics to compute for training and evaluation. - learning_rate: Learning rate to use for training, defaults to `5e-5`. + learning_rate: Learning rate to use for training, defaults to ``5e-5``. preprocess: :class:`~flash.core.data.process.Preprocess` to use as the default for this task. postprocess: :class:`~flash.core.data.process.Postprocess` to use as the default for this task. """ @@ -286,7 +286,7 @@ def build_data_pipeline( objects. These will be overridden in the following resolution order (lowest priority first): - Lightning ``Datamodule``, either attached to the :class:`.Trainer` or to the :class:`.Task`. - - :class:`.Task` defaults given to ``.Task.__init__``. + - :class:`.Task` defaults given to :meth:`.Task.__init__`. - :class:`.Task` manual overrides by setting :py:attr:`~data_pipeline`. - :class:`.DataPipeline` passed to this method. diff --git a/flash/core/trainer.py b/flash/core/trainer.py index e9ab86d5d5..5b5eccfe45 100644 --- a/flash/core/trainer.py +++ b/flash/core/trainer.py @@ -73,7 +73,7 @@ def fit( datamodule: Optional[LightningDataModule] = None, ): r""" - Runs the full optimization routine. Same as pytorch_lightning.Trainer().fit() + Runs the full optimization routine. Same as :meth:`pytorch_lightning.Trainer.fit` Args: datamodule: A instance of :class:`LightningDataModule`. @@ -101,7 +101,7 @@ def finetune( ): r""" - Runs the full optimization routine. Same as pytorch_lightning.Trainer().fit(), but unfreezes layers + Runs the full optimization routine. Same as :meth:`pytorch_lightning.Trainer.fit`, but unfreezes layers of the backbone throughout training layers of the backbone throughout training. Args: @@ -109,21 +109,21 @@ def finetune( model: Model to fit. - train_dataloader: A Pytorch DataLoader with training samples. If the model has + train_dataloader: A PyTorch DataLoader with training samples. If the model has a predefined train_dataloader method this will be skipped. - val_dataloaders: Either a single Pytorch Dataloader or a list of them, specifying validation samples. + val_dataloaders: Either a single PyTorch Dataloader or a list of them, specifying validation samples. If the model has a predefined val_dataloaders method this will be skipped strategy: Should either be a string or a finetuning callback subclassing - ``pytorch_lightning.callbacks.BaseFinetuning``. + :class:`pytorch_lightning.callbacks.BaseFinetuning`. - Currently, default strategies can be enabled with these strings: - - ``no_freeze``, - - ``freeze``, - - ``freeze_unfreeze``, - - ``unfreeze_milestones`` + Default strategies can be enabled with these strings: + - ``"no_freeze"``, + - ``"freeze"``, + - ``"freeze_unfreeze"``, + - ``"unfreeze_milestones"``. """ self._resolve_callbacks(model, strategy) return super().fit(model, train_dataloader, val_dataloaders, datamodule) diff --git a/flash_examples/generic_task.py b/flash_examples/generic_task.py index 7b903d46ae..01ebe64aca 100644 --- a/flash_examples/generic_task.py +++ b/flash_examples/generic_task.py @@ -35,13 +35,22 @@ ) # 3. Load a dataset -dataset = datasets.MNIST(os.path.join(_PATH_ROOT, 'data'), download=False, transform=transforms.ToTensor()) +dataset = datasets.MNIST( + os.path.join(_PATH_ROOT, 'data'), + download=False, + transform=transforms.ToTensor(), +) # 4. Split the data randomly train, val, test = random_split(dataset, [50000, 5000, 5000]) # type: ignore # 5. Create the model -classifier = ClassificationTask(model, loss_fn=nn.functional.cross_entropy, optimizer=optim.Adam, learning_rate=10e-3) +classifier = ClassificationTask( + model, + loss_fn=nn.functional.cross_entropy, + optimizer=optim.Adam, + learning_rate=10e-3, +) # 6. Create the trainer trainer = pl.Trainer( diff --git a/flash_notebooks/custom_task_tutorial.ipynb b/flash_notebooks/custom_task_tutorial.ipynb index d55680695e..048036dcca 100644 --- a/flash_notebooks/custom_task_tutorial.ipynb +++ b/flash_notebooks/custom_task_tutorial.ipynb @@ -4,7 +4,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "\n", + "\n", " \"Open\n", "" ] @@ -30,7 +30,7 @@ "outputs": [], "source": [ "%%capture\n", - "! pip install git+https://github.com/PyTorchLightning/pytorch-flash.git\n", + "! pip install git+https://github.com/PyTorchLightning/lightning-flash.git\n", "! pip install git+https://github.com/PyTorchLightning/pytorch-lightning.git" ] }, diff --git a/flash_notebooks/tabular_classification.ipynb b/flash_notebooks/tabular_classification.ipynb index 0c51b3d121..3369d03b45 100644 --- a/flash_notebooks/tabular_classification.ipynb +++ b/flash_notebooks/tabular_classification.ipynb @@ -40,7 +40,7 @@ "outputs": [], "source": [ "# %%capture\n", - "! pip install git+https://github.com/PyTorchLightning/pytorch-flash.git" + "! pip install git+https://github.com/PyTorchLightning/lightning-flash.git" ] }, {