1+ .. NOTE: This file is auto-generated from examples/data/torchvision/index.rst
2+ .. This is done so this file can be easily viewed from the GitHub UI.
3+ .. **DO NOT EDIT**
4+
15 Torchvision
26===========
37
@@ -7,8 +11,8 @@ Torchvision
711Make sure to read the following sections of the documentation before using this
812example:
913
10- * :ref: ` pytorch_setup `
11- * :ref: ` 001 - Single GPU Job `
14+ * ` examples/frameworks/ pytorch_setup < https://github.com/mila-iqia/mila-docs/tree/master/docs/examples/frameworks/pytorch_setup >`_
15+ * ` examples/distributed/single_gpu < https://github.com/mila-iqia/mila-docs/tree/master/docs/examples/distributed/single_gpu >`_
1216
1317The full source code for this example is available on `the mila-docs GitHub
1418repository.
@@ -19,7 +23,7 @@ repository.
1923
2024.. code :: diff
2125
22- # distributed/001_single_gpu /job.sh -> data/torchvision/job.sh
26+ # distributed/single_gpu /job.sh -> data/torchvision/job.sh
2327 #!/bin/bash
2428 #SBATCH --gpus-per-task=rtx8000:1
2529 #SBATCH --cpus-per-task=4
@@ -84,7 +88,7 @@ repository.
8488
8589.. code :: diff
8690
87- # distributed/001_single_gpu /main.py -> data/torchvision/main.py
91+ # distributed/single_gpu /main.py -> data/torchvision/main.py
8892 -"""Single-GPU training example."""
8993 +"""Torchvision training example."""
9094 import logging
@@ -198,7 +202,8 @@ repository.
198202 logger.debug(f"Accuracy: {accuracy.item():.2%}")
199203 logger.debug(f"Average Loss: {loss.item()}")
200204
201- # Advance the progress bar one step, and update the "postfix" () the progress bar. (nicer than just)
205+ - # Advance the progress bar one step and update the progress bar text.
206+ + # Advance the progress bar one step, and update the "postfix" () the progress bar. (nicer than just)
202207 progress_bar.update(1)
203208 progress_bar.set_postfix(loss=loss.item(), accuracy=accuracy.item())
204209 progress_bar.close()
@@ -243,7 +248,8 @@ repository.
243248 - """Returns the training, validation, and test splits for CIFAR10.
244249 + """Returns the training, validation, and test splits for iNat.
245250
246- NOTE: We don't use image transforms here for simplicity.
251+ - NOTE: We don't use image transforms here for simplicity.
252+ + NOTE: We use the same image transforms here for train/val/test just to keep things simple.
247253 Having different transformations for train and validation would complicate things a bit.
248254 Later examples will show how to do the train/val/test split properly when using transforms.
249255 """
@@ -308,13 +314,11 @@ repository.
308314 from torchvision.datasets import INaturalist
309315
310316
311- def link_file (src :str , dest :str ) :
312- Path( src) .symlink_to(dest)
317+ def link_file (src : Path , dest : Path) -> None :
318+ src.symlink_to(dest)
313319
314320
315- def link_files (src :str , dest :str , workers = 4 ):
316- src = Path(src)
317- dest = Path(dest)
321+ def link_files (src : Path, dest : Path, workers : int = 4 ) -> None :
318322 os.makedirs(dest, exist_ok = True )
319323 with Pool(processes = workers) as pool:
320324 for path, dnames, fnames in os.walk(str (src)):
0 commit comments