Skip to content

Commit

Permalink
Merge branch 'master' into docstring
Browse files Browse the repository at this point in the history
  • Loading branch information
Prathamesh010 authored Oct 8, 2024
2 parents fe78a14 + 820b20f commit ce3277a
Show file tree
Hide file tree
Showing 57 changed files with 915 additions and 196 deletions.
44 changes: 21 additions & 23 deletions README.md

Large diffs are not rendered by default.

1 change: 1 addition & 0 deletions docs/source/getting_started/lightly_at_a_glance.rst
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ Let's now load an image dataset and create a PyTorch dataloader.
.. code-block:: python
import torch
import lightly.data as data
# Create a dataset from your image folder.
dataset = data.LightlyDataset(
Expand Down
2 changes: 1 addition & 1 deletion docs/source/tutorials/structure_your_input.rst
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ Tutorial 1: Structure Your Input
The modern-day open-source ecosystem has changed a lot over the years, and there are now
many viable options for data pipelining. The `torchvision.data <https://pytorch.org/vision/main/datasets.html>`_ submodule provides a robust implementation for most use cases,
and the `Hugging Face Hub <https://hf.co>`_ has emerged as a growing collection of datasets that span a variety of domains and tasks.
It you want to use your own data, the ability to quickly create datasets and dataloaders is of prime importance.
If you want to use your own data, the ability to quickly create datasets and dataloaders is of prime importance.

In this tutorial, we will provide a brief overview of the `LightlyDataset <https://docs.lightly.ai/self-supervised-learning/lightly.data.html#lightly.data.dataset.LightlyDataset>`_
and go through examples of using datasets from various open-source libraries such as PyTorch and
Expand Down
34 changes: 28 additions & 6 deletions examples/notebooks/pytorch/aim.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -129,16 +129,38 @@
"cell_type": "code",
"execution_count": null,
"id": "8",
"metadata": {},
"metadata": {
"lines_to_next_cell": 2
},
"outputs": [],
"source": [
"transform = AIMTransform()\n",
"# we ignore object detection annotations by setting target_transform to return 0\n",
"# we ignore object detection annotations by setting target_transform to return 0"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "9",
"metadata": {},
"outputs": [],
"source": [
"def target_transform(t):\n",
" return 0"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "10",
"metadata": {},
"outputs": [],
"source": [
"dataset = torchvision.datasets.VOCDetection(\n",
" \"datasets/pascal_voc\",\n",
" download=True,\n",
" transform=transform,\n",
" target_transform=lambda t: 0,\n",
" target_transform=target_transform,\n",
")\n",
"# or create a dataset from a folder containing images or videos:\n",
"# dataset = LightlyDataset(\"path/to/folder\")"
Expand All @@ -147,7 +169,7 @@
{
"cell_type": "code",
"execution_count": null,
"id": "9",
"id": "11",
"metadata": {},
"outputs": [],
"source": [
Expand All @@ -163,7 +185,7 @@
{
"cell_type": "code",
"execution_count": null,
"id": "10",
"id": "12",
"metadata": {},
"outputs": [],
"source": [
Expand All @@ -174,7 +196,7 @@
{
"cell_type": "code",
"execution_count": null,
"id": "11",
"id": "13",
"metadata": {},
"outputs": [],
"source": [
Expand Down
34 changes: 27 additions & 7 deletions examples/notebooks/pytorch/dino.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -139,13 +139,33 @@
"metadata": {},
"outputs": [],
"source": [
"transform = DINOTransform()\n",
"transform = DINOTransform()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "11",
"metadata": {},
"outputs": [],
"source": [
"# we ignore object detection annotations by setting target_transform to return 0\n",
"def target_transform(t):\n",
" return 0"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "12",
"metadata": {},
"outputs": [],
"source": [
"dataset = torchvision.datasets.VOCDetection(\n",
" \"datasets/pascal_voc\",\n",
" download=True,\n",
" transform=transform,\n",
" target_transform=lambda t: 0,\n",
" target_transform=target_transform,\n",
")\n",
"# or create a dataset from a folder containing images or videos:\n",
"# dataset = LightlyDataset(\"path/to/folder\")"
Expand All @@ -154,7 +174,7 @@
{
"cell_type": "code",
"execution_count": null,
"id": "11",
"id": "13",
"metadata": {},
"outputs": [],
"source": [
Expand All @@ -170,7 +190,7 @@
{
"cell_type": "code",
"execution_count": null,
"id": "12",
"id": "14",
"metadata": {},
"outputs": [],
"source": [
Expand All @@ -185,7 +205,7 @@
{
"cell_type": "code",
"execution_count": null,
"id": "13",
"id": "15",
"metadata": {},
"outputs": [],
"source": [
Expand All @@ -195,7 +215,7 @@
{
"cell_type": "code",
"execution_count": null,
"id": "14",
"id": "16",
"metadata": {},
"outputs": [],
"source": [
Expand All @@ -205,7 +225,7 @@
{
"cell_type": "code",
"execution_count": null,
"id": "15",
"id": "17",
"metadata": {},
"outputs": [],
"source": [
Expand Down
38 changes: 29 additions & 9 deletions examples/notebooks/pytorch/ijepa.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -128,21 +128,41 @@
"transform = IJEPATransform()"
]
},
{
"cell_type": "markdown",
"id": "8",
"metadata": {
"lines_to_next_cell": 2
},
"source": [
"we ignore object detection annotations by setting target_transform to return 0\n",
"or create a dataset from a folder containing images or videos:\n",
"dataset = LightlyDataset(\"path/to/folder\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "8",
"id": "9",
"metadata": {},
"outputs": [],
"source": [
"def target_transform(t):\n",
" return 0"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "10",
"metadata": {},
"outputs": [],
"source": [
"# we ignore object detection annotations by setting target_transform to return 0\n",
"# or create a dataset from a folder containing images or videos:\n",
"# dataset = LightlyDataset(\"path/to/folder\")\n",
"dataset = torchvision.datasets.VOCDetection(\n",
" \"datasets/pascal_voc\",\n",
" download=True,\n",
" transform=transform,\n",
" target_transform=lambda t: 0,\n",
" target_transform=target_transform,\n",
")\n",
"data_loader = torch.utils.data.DataLoader(\n",
" dataset, collate_fn=collator, batch_size=10, persistent_workers=False\n",
Expand All @@ -152,7 +172,7 @@
{
"cell_type": "code",
"execution_count": null,
"id": "9",
"id": "11",
"metadata": {},
"outputs": [],
"source": [
Expand All @@ -169,7 +189,7 @@
{
"cell_type": "code",
"execution_count": null,
"id": "10",
"id": "12",
"metadata": {},
"outputs": [],
"source": [
Expand All @@ -181,7 +201,7 @@
{
"cell_type": "code",
"execution_count": null,
"id": "11",
"id": "13",
"metadata": {},
"outputs": [],
"source": [
Expand All @@ -194,7 +214,7 @@
{
"cell_type": "code",
"execution_count": null,
"id": "12",
"id": "14",
"metadata": {},
"outputs": [],
"source": [
Expand Down
34 changes: 28 additions & 6 deletions examples/notebooks/pytorch/mae.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -141,16 +141,38 @@
"cell_type": "code",
"execution_count": null,
"id": "7",
"metadata": {},
"metadata": {
"lines_to_next_cell": 2
},
"outputs": [],
"source": [
"transform = MAETransform()\n",
"# we ignore object detection annotations by setting target_transform to return 0\n",
"# we ignore object detection annotations by setting target_transform to return 0"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "8",
"metadata": {},
"outputs": [],
"source": [
"def target_transform(t):\n",
" return 0"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "9",
"metadata": {},
"outputs": [],
"source": [
"dataset = torchvision.datasets.VOCDetection(\n",
" \"datasets/pascal_voc\",\n",
" download=True,\n",
" transform=transform,\n",
" target_transform=lambda t: 0,\n",
" target_transform=target_transform,\n",
")\n",
"# or create a dataset from a folder containing images or videos:\n",
"# dataset = LightlyDataset(\"path/to/folder\")"
Expand All @@ -159,7 +181,7 @@
{
"cell_type": "code",
"execution_count": null,
"id": "8",
"id": "10",
"metadata": {},
"outputs": [],
"source": [
Expand All @@ -175,7 +197,7 @@
{
"cell_type": "code",
"execution_count": null,
"id": "9",
"id": "11",
"metadata": {},
"outputs": [],
"source": [
Expand All @@ -186,7 +208,7 @@
{
"cell_type": "code",
"execution_count": null,
"id": "10",
"id": "12",
"metadata": {},
"outputs": [],
"source": [
Expand Down
36 changes: 29 additions & 7 deletions examples/notebooks/pytorch/msn.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -134,16 +134,38 @@
"cell_type": "code",
"execution_count": null,
"id": "8",
"metadata": {},
"metadata": {
"lines_to_next_cell": 2
},
"outputs": [],
"source": [
"transform = MSNTransform()\n",
"# we ignore object detection annotations by setting target_transform to return 0\n",
"# we ignore object detection annotations by setting target_transform to return 0"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "9",
"metadata": {},
"outputs": [],
"source": [
"def target_transform(t):\n",
" return 0"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "10",
"metadata": {},
"outputs": [],
"source": [
"dataset = torchvision.datasets.VOCDetection(\n",
" \"datasets/pascal_voc\",\n",
" download=True,\n",
" transform=transform,\n",
" target_transform=lambda t: 0,\n",
" target_transform=target_transform,\n",
")\n",
"# or create a dataset from a folder containing images or videos:\n",
"# dataset = LightlyDataset(\"path/to/folder\")"
Expand All @@ -152,7 +174,7 @@
{
"cell_type": "code",
"execution_count": null,
"id": "9",
"id": "11",
"metadata": {},
"outputs": [],
"source": [
Expand All @@ -168,7 +190,7 @@
{
"cell_type": "code",
"execution_count": null,
"id": "10",
"id": "12",
"metadata": {},
"outputs": [],
"source": [
Expand All @@ -178,7 +200,7 @@
{
"cell_type": "code",
"execution_count": null,
"id": "11",
"id": "13",
"metadata": {},
"outputs": [],
"source": [
Expand All @@ -193,7 +215,7 @@
{
"cell_type": "code",
"execution_count": null,
"id": "12",
"id": "14",
"metadata": {},
"outputs": [],
"source": [
Expand Down
Loading

0 comments on commit ce3277a

Please sign in to comment.