diff --git a/doc/source/_toc.yml b/doc/source/_toc.yml index e33c577ca..038bdebb6 100644 --- a/doc/source/_toc.yml +++ b/doc/source/_toc.yml @@ -184,6 +184,7 @@ parts: sections: - file: ray-air/examples/analyze_tuning_results title: "Analyze hyperparameter tuning results" + - file: ray-air/examples/torch_image_example - file: ray-air/examples/torch_incremental_learning - file: ray-air/examples/rl_serving_example - file: ray-air/examples/upload_to_comet_ml diff --git a/doc/source/ray-air/examples/torch_image_example.ipynb b/doc/source/ray-air/examples/torch_image_example.ipynb new file mode 100644 index 000000000..8b7739941 --- /dev/null +++ b/doc/source/ray-air/examples/torch_image_example.ipynb @@ -0,0 +1,733 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "c656b46d", + "metadata": {}, + "source": [ + "# Training a Torch Classifier\n", + "\n", + "This tutorial demonstrates how to train an image classifier using the [Ray AI Runtime](air) (AIR).\n", + "\n", + "You should be familiar with [PyTorch](https://pytorch.org/) before starting the tutorial. If you need a refresher, read PyTorch's [training a classifier](https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html) tutorial.\n", + "\n", + "## Before you begin\n", + "\n", + "* Install the [Ray AI Runtime](air). You'll need Ray 1.13 later to run this example." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "20a51fae", + "metadata": {}, + "outputs": [], + "source": [ + "!pip install 'ray[air]'" + ] + }, + { + "cell_type": "markdown", + "id": "4d6a1fbd", + "metadata": {}, + "source": [ + "* Install `requests`, `torch`, and `torchvision`" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "2860f0d8", + "metadata": {}, + "outputs": [], + "source": [ + "!pip install requests torch torchvision" + ] + }, + { + "cell_type": "markdown", + "id": "b2e47e6b", + "metadata": {}, + "source": [ + "## Load and normalize CIFAR-10\n", + "\n", + "We'll train our classifier on a popular image dataset called [CIFAR-10](https://www.cs.toronto.edu/~kriz/cifar.html).\n", + "\n", + "First, let's load CIFAR-10 into a Ray Dataset." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "39170e60", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2022-05-26 14:49:27,034\tINFO services.py:1477 -- View the Ray dashboard at \u001b[1m\u001b[32mhttp://127.0.0.1:8265\u001b[39m\u001b[22m\n", + "2022-05-26 14:49:29,044\tWARNING read_api.py:253 -- The number of blocks in this dataset (1) limits its parallelism to 1 concurrent tasks. This is much less than the number of available CPU slots in the cluster. Use `.repartition(n)` to increase the number of dataset blocks.\n", + "\u001b[2m\u001b[36m(_prepare_read pid=13653)\u001b[0m 2022-05-26 14:49:29,041\tWARNING torch_datasource.py:55 -- `SimpleTorchDatasource` doesn't support parallel reads. The `parallelism` argument will be ignored.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[2m\u001b[36m(_execute_read_task pid=13653)\u001b[0m Files already downloaded and verified\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2022-05-26 14:49:46,308\tWARNING read_api.py:253 -- The number of blocks in this dataset (1) limits its parallelism to 1 concurrent tasks. This is much less than the number of available CPU slots in the cluster. Use `.repartition(n)` to increase the number of dataset blocks.\n", + "\u001b[2m\u001b[36m(_prepare_read pid=13653)\u001b[0m 2022-05-26 14:49:46,305\tWARNING torch_datasource.py:55 -- `SimpleTorchDatasource` doesn't support parallel reads. The `parallelism` argument will be ignored.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[2m\u001b[36m(_execute_read_task pid=13653)\u001b[0m Files already downloaded and verified\n" + ] + } + ], + "source": [ + "import ray\n", + "from ray.data.datasource import SimpleTorchDatasource\n", + "import torchvision\n", + "import torchvision.transforms as transforms\n", + "\n", + "transform = transforms.Compose(\n", + " [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]\n", + ")\n", + "\n", + "def train_dataset_factory():\n", + " return torchvision.datasets.CIFAR10(root=\"./data\", download=True, train=True, transform=transform)\n", + "\n", + "def test_dataset_factory():\n", + " return torchvision.datasets.CIFAR10(root=\"./data\", download=True, train=False, transform=transform)\n", + "\n", + "train_dataset: ray.data.Dataset = ray.data.read_datasource(SimpleTorchDatasource(), dataset_factory=train_dataset_factory)\n", + "test_dataset: ray.data.Dataset = ray.data.read_datasource(SimpleTorchDatasource(), dataset_factory=test_dataset_factory)" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "aedafe59", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "Dataset(num_blocks=1, num_rows=50000, schema=)" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "train_dataset" + ] + }, + { + "cell_type": "markdown", + "id": "cbdade1a", + "metadata": {}, + "source": [ + "Note that {py:class}`SimpleTorchDatasource ` loads all data into memory, so you shouldn't use it with larger datasets.\n", + "\n", + "Next, let's represent our data using pandas dataframes instead of tuples. This lets us call methods like {py:meth}`Dataset.to_torch ` later in the tutorial." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "8f3bc4fc", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[dataset]: Run `pip install tqdm` to enable progress reporting.\n", + "\u001b[2m\u001b[36m(_map_block_nosplit pid=13653)\u001b[0m Files already downloaded and verified\n", + "\u001b[2m\u001b[36m(_map_block_nosplit pid=13653)\u001b[0m Files already downloaded and verified\n" + ] + } + ], + "source": [ + "from typing import Tuple\n", + "import pandas as pd\n", + "from ray.data.extensions import TensorArray\n", + "import torch\n", + "\n", + "\n", + "def convert_batch_to_pandas(batch: Tuple[torch.Tensor, int]) -> pd.DataFrame:\n", + " images = [TensorArray(image.numpy()) for image, _ in batch]\n", + " labels = [label for _, label in batch]\n", + "\n", + " df = pd.DataFrame({\"image\": images, \"label\": labels})\n", + "\n", + " return df\n", + "\n", + "\n", + "train_dataset = train_dataset.map_batches(convert_batch_to_pandas)\n", + "test_dataset = test_dataset.map_batches(convert_batch_to_pandas)" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "4aa50f2e", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "Dataset(num_blocks=1, num_rows=50000, schema={image: object, label: int64})" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "train_dataset" + ] + }, + { + "cell_type": "markdown", + "id": "454bd960", + "metadata": {}, + "source": [ + "## Train a convolutional neural network\n", + "\n", + "Now that we've created our datasets, let's define the training logic." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "19046672", + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "import torch.nn as nn\n", + "import torch.nn.functional as F\n", + "\n", + "\n", + "class Net(nn.Module):\n", + " def __init__(self):\n", + " super().__init__()\n", + " self.conv1 = nn.Conv2d(3, 6, 5)\n", + " self.pool = nn.MaxPool2d(2, 2)\n", + " self.conv2 = nn.Conv2d(6, 16, 5)\n", + " self.fc1 = nn.Linear(16 * 5 * 5, 120)\n", + " self.fc2 = nn.Linear(120, 84)\n", + " self.fc3 = nn.Linear(84, 10)\n", + "\n", + " def forward(self, x):\n", + " x = self.pool(F.relu(self.conv1(x)))\n", + " x = self.pool(F.relu(self.conv2(x)))\n", + " x = torch.flatten(x, 1) # flatten all dimensions except batch\n", + " x = F.relu(self.fc1(x))\n", + " x = F.relu(self.fc2(x))\n", + " x = self.fc3(x)\n", + " return x" + ] + }, + { + "cell_type": "markdown", + "id": "c8e84b09", + "metadata": {}, + "source": [ + "We define our training logic in a function called `train_loop_per_worker`.\n", + "\n", + "`train_loop_per_worker` contains regular PyTorch code with a few notable exceptions:\n", + "* We wrap our model with {py:func}`train.torch.prepare_model `.\n", + "* We call {py:func}`train.get_dataset_shard ` and {py:meth}`Dataset.to_torch ` to convert a subset of our training data to a Torch dataset.\n", + "* We save model state using {py:func}`train.save_checkpoint `." + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "3e212725", + "metadata": {}, + "outputs": [], + "source": [ + "from ray import train\n", + "import torch.optim as optim\n", + "\n", + "\n", + "def train_loop_per_worker(config):\n", + " model = train.torch.prepare_model(Net())\n", + "\n", + " criterion = nn.CrossEntropyLoss()\n", + " optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)\n", + "\n", + " train_dataset_shard: torch.utils.data.Dataset = train.get_dataset_shard(\"train\").to_torch(\n", + " feature_columns=[\"image\"],\n", + " label_column=\"label\",\n", + " batch_size=config[\"batch_size\"],\n", + " unsqueeze_feature_tensors=False,\n", + " unsqueeze_label_tensor=False\n", + " )\n", + "\n", + " for epoch in range(2):\n", + " running_loss = 0.0\n", + " for i, data in enumerate(train_dataset_shard):\n", + " # get the inputs; data is a list of [inputs, labels]\n", + " inputs, labels = data\n", + "\n", + " # zero the parameter gradients\n", + " optimizer.zero_grad()\n", + "\n", + " # forward + backward + optimize\n", + " outputs = model(inputs)\n", + " loss = criterion(outputs, labels)\n", + " loss.backward()\n", + " optimizer.step()\n", + "\n", + " # print statistics\n", + " running_loss += loss.item()\n", + " if i % 2000 == 1999: # print every 2000 mini-batches\n", + " print(f\"[{epoch + 1}, {i + 1:5d}] loss: {running_loss / 2000:.3f}\")\n", + " running_loss = 0.0\n", + "\n", + " train.save_checkpoint(model=model.module.state_dict())" + ] + }, + { + "cell_type": "markdown", + "id": "5f55484e", + "metadata": {}, + "source": [ + "Finally, we can train our model. This should take a few minutes to run." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "46c48f35", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "== Status ==
Current time: 2022-05-26 14:52:09 (running for 00:02:01.90)
Memory usage on this node: 16.6/64.0 GiB
Using FIFO scheduling algorithm.
Resources requested: 0/16 CPUs, 0/0 GPUs, 0.0/44.98 GiB heap, 0.0/2.0 GiB objects
Result logdir: /Users/balaji/ray_results/TorchTrainer_2022-05-26_14-50-07
Number of trials: 1/1 (1 TERMINATED)
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "
Trial name status loc
TorchTrainer_cf234_00000TERMINATED127.0.0.1:13741


" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\u001b[2m\u001b[36m(BaseWorkerMixin pid=13750)\u001b[0m 2022-05-26 14:50:12,654\tINFO torch.py:346 -- Setting up process group for: env:// [rank=1, world_size=2]\n", + "\u001b[2m\u001b[36m(BaseWorkerMixin pid=13750)\u001b[0m [W ProcessGroupGloo.cpp:715] Warning: Unable to resolve hostname to a (local) address. Using the loopback address as fallback. Manually set the network interface to bind to with GLOO_SOCKET_IFNAME. (function operator())\n", + "\u001b[2m\u001b[36m(BaseWorkerMixin pid=13749)\u001b[0m 2022-05-26 14:50:12,652\tINFO torch.py:346 -- Setting up process group for: env:// [rank=0, world_size=2]\n", + "\u001b[2m\u001b[36m(BaseWorkerMixin pid=13749)\u001b[0m [W ProcessGroupGloo.cpp:715] Warning: Unable to resolve hostname to a (local) address. Using the loopback address as fallback. Manually set the network interface to bind to with GLOO_SOCKET_IFNAME. (function operator())\n", + "\u001b[2m\u001b[36m(BaseWorkerMixin pid=13750)\u001b[0m 2022-05-26 14:50:16,045\tINFO torch.py:98 -- Moving model to device: cpu\n", + "\u001b[2m\u001b[36m(BaseWorkerMixin pid=13750)\u001b[0m 2022-05-26 14:50:16,045\tINFO torch.py:132 -- Wrapping provided model in DDP.\n", + "\u001b[2m\u001b[36m(BaseWorkerMixin pid=13749)\u001b[0m 2022-05-26 14:50:16,045\tINFO torch.py:98 -- Moving model to device: cpu\n", + "\u001b[2m\u001b[36m(BaseWorkerMixin pid=13749)\u001b[0m 2022-05-26 14:50:16,045\tINFO torch.py:132 -- Wrapping provided model in DDP.\n", + "\u001b[2m\u001b[36m(BaseWorkerMixin pid=13750)\u001b[0m /Users/balaji/GitHub/ray/python/ray/ml/utils/torch_utils.py:64: UserWarning: The given NumPy array is not writable, and PyTorch does not support non-writable tensors. This means writing to this tensor will result in undefined behavior. You may want to copy the array to protect its data or make it writable before converting it to a tensor. This type of warning will be suppressed for the rest of this program. (Triggered internally at /Users/distiller/project/pytorch/torch/csrc/utils/tensor_numpy.cpp:178.)\n", + "\u001b[2m\u001b[36m(BaseWorkerMixin pid=13750)\u001b[0m return torch.as_tensor(vals, dtype=dtype)\n", + "\u001b[2m\u001b[36m(BaseWorkerMixin pid=13749)\u001b[0m /Users/balaji/GitHub/ray/python/ray/ml/utils/torch_utils.py:64: UserWarning: The given NumPy array is not writable, and PyTorch does not support non-writable tensors. This means writing to this tensor will result in undefined behavior. You may want to copy the array to protect its data or make it writable before converting it to a tensor. This type of warning will be suppressed for the rest of this program. (Triggered internally at /Users/distiller/project/pytorch/torch/csrc/utils/tensor_numpy.cpp:178.)\n", + "\u001b[2m\u001b[36m(BaseWorkerMixin pid=13749)\u001b[0m return torch.as_tensor(vals, dtype=dtype)\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[2m\u001b[36m(BaseWorkerMixin pid=13750)\u001b[0m [1, 2000] loss: 2.208\n", + "\u001b[2m\u001b[36m(BaseWorkerMixin pid=13749)\u001b[0m [1, 2000] loss: 2.198\n", + "\u001b[2m\u001b[36m(BaseWorkerMixin pid=13750)\u001b[0m [1, 4000] loss: 1.906\n", + "\u001b[2m\u001b[36m(BaseWorkerMixin pid=13749)\u001b[0m [1, 4000] loss: 1.876\n", + "\u001b[2m\u001b[36m(BaseWorkerMixin pid=13750)\u001b[0m [1, 6000] loss: 1.718\n", + "\u001b[2m\u001b[36m(BaseWorkerMixin pid=13749)\u001b[0m [1, 6000] loss: 1.736\n", + "\u001b[2m\u001b[36m(BaseWorkerMixin pid=13750)\u001b[0m [1, 8000] loss: 1.641\n", + "\u001b[2m\u001b[36m(BaseWorkerMixin pid=13749)\u001b[0m [1, 8000] loss: 1.658\n", + "\u001b[2m\u001b[36m(BaseWorkerMixin pid=13750)\u001b[0m [1, 10000] loss: 1.586\n", + "\u001b[2m\u001b[36m(BaseWorkerMixin pid=13749)\u001b[0m [1, 10000] loss: 1.547\n", + "\u001b[2m\u001b[36m(BaseWorkerMixin pid=13750)\u001b[0m [1, 12000] loss: 1.488\n", + "\u001b[2m\u001b[36m(BaseWorkerMixin pid=13749)\u001b[0m [1, 12000] loss: 1.494\n", + "\u001b[2m\u001b[36m(BaseWorkerMixin pid=13750)\u001b[0m [2, 2000] loss: 1.417\n", + "\u001b[2m\u001b[36m(BaseWorkerMixin pid=13749)\u001b[0m [2, 2000] loss: 1.452\n", + "\u001b[2m\u001b[36m(BaseWorkerMixin pid=13750)\u001b[0m [2, 4000] loss: 1.413\n", + "\u001b[2m\u001b[36m(BaseWorkerMixin pid=13749)\u001b[0m [2, 4000] loss: 1.409\n", + "\u001b[2m\u001b[36m(BaseWorkerMixin pid=13750)\u001b[0m [2, 6000] loss: 1.397\n", + "\u001b[2m\u001b[36m(BaseWorkerMixin pid=13749)\u001b[0m [2, 6000] loss: 1.372\n", + "\u001b[2m\u001b[36m(BaseWorkerMixin pid=13750)\u001b[0m [2, 8000] loss: 1.361\n", + "\u001b[2m\u001b[36m(BaseWorkerMixin pid=13749)\u001b[0m [2, 8000] loss: 1.382\n", + "\u001b[2m\u001b[36m(BaseWorkerMixin pid=13750)\u001b[0m [2, 10000] loss: 1.339\n", + "\u001b[2m\u001b[36m(BaseWorkerMixin pid=13749)\u001b[0m [2, 10000] loss: 1.309\n", + "\u001b[2m\u001b[36m(BaseWorkerMixin pid=13750)\u001b[0m [2, 12000] loss: 1.276\n", + "\u001b[2m\u001b[36m(BaseWorkerMixin pid=13749)\u001b[0m [2, 12000] loss: 1.285\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2022-05-26 14:52:09,873\tERROR checkpoint_manager.py:189 -- Result dict has no key: training_iteration. checkpoint_score_attr must be set to a key of the result dict. Valid keys are ['trial_id', 'experiment_id', 'date', 'timestamp', 'pid', 'hostname', 'node_ip', 'config', 'done']\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Trial TorchTrainer_cf234_00000 completed. Last result: \n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2022-05-26 14:52:09,986\tINFO tune.py:752 -- Total run time: 122.04 seconds (121.90 seconds for the tuning loop).\n" + ] + } + ], + "source": [ + "from ray.ml.train.integrations.torch import TorchTrainer\n", + "\n", + "trainer = TorchTrainer(\n", + " train_loop_per_worker=train_loop_per_worker,\n", + " train_loop_config={\"batch_size\": 2},\n", + " datasets={\"train\": train_dataset},\n", + " scaling_config={\"num_workers\": 2}\n", + ")\n", + "result = trainer.fit()\n", + "latest_checkpoint = result.checkpoint" + ] + }, + { + "cell_type": "markdown", + "id": "b0e5c7d2", + "metadata": {}, + "source": [ + "To scale your training script, create a [Ray Cluster](deployment-guide) and increase the number of workers. If your cluster contains GPUs, add `\"use_gpu\": True` to your scaling config.\n", + "\n", + "```{code-block} python\n", + "scaling_config={\"num_workers\": 8, \"use_gpu\": True}\n", + "```\n", + "\n", + "## Test the network on the test data\n", + "\n", + "Let's see how our model performs.\n", + "\n", + "To classify images in the test dataset, we'll need to create a {py:class}`Predictor `.\n", + "\n", + "{py:class}`Predictors ` load data from checkpoints and efficiently perform inference. In contrast to {py:class}`TorchPredictor `, which performs inference on a single batch, {py:class}`BatchPredictor ` performs inference on an entire dataset. Because we want to classify all of the images in the test dataset, we'll use a {py:class}`BatchPredictor `." + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "751b0b2a", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\u001b[2m\u001b[36m(BlockWorker pid=13962)\u001b[0m /Users/balaji/GitHub/ray/python/ray/ml/utils/torch_utils.py:64: UserWarning: The given NumPy array is not writable, and PyTorch does not support non-writable tensors. This means writing to this tensor will result in undefined behavior. You may want to copy the array to protect its data or make it writable before converting it to a tensor. This type of warning will be suppressed for the rest of this program. (Triggered internally at /Users/distiller/project/pytorch/torch/csrc/utils/tensor_numpy.cpp:178.)\n", + "\u001b[2m\u001b[36m(BlockWorker pid=13962)\u001b[0m return torch.as_tensor(vals, dtype=dtype)\n" + ] + } + ], + "source": [ + "from ray.ml.predictors.integrations.torch import TorchPredictor\n", + "from ray.ml.batch_predictor import BatchPredictor\n", + "\n", + "batch_predictor = BatchPredictor.from_checkpoint(\n", + " checkpoint=latest_checkpoint,\n", + " predictor_cls=TorchPredictor,\n", + " model=Net(),\n", + ")\n", + "\n", + "outputs: ray.data.Dataset = batch_predictor.predict(\n", + " data=test_dataset, feature_columns=[\"image\"], unsqueeze=False\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "202a6ef3", + "metadata": {}, + "source": [ + "Our model outputs a list of energies for each class. To classify an image, we\n", + "choose the class that has the highest energy." + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "1e0681db", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{'prediction': 3}\n" + ] + } + ], + "source": [ + "import numpy as np\n", + "\n", + "def convert_logits_to_classes(df):\n", + " best_class = df[\"predictions\"].map(lambda x: x.argmax())\n", + " df[\"prediction\"] = best_class\n", + " return df[[\"prediction\"]]\n", + "\n", + "predictions = outputs.map_batches(\n", + " convert_logits_to_classes, batch_format=\"pandas\"\n", + ")\n", + "\n", + "predictions.show(1)" + ] + }, + { + "cell_type": "markdown", + "id": "549018b3", + "metadata": {}, + "source": [ + "Now that we've classified all of the images, let's figure out which images were\n", + "classified correctly. The ``predictions`` dataset contains predicted labels and \n", + "the ``test_dataset`` contains the true labels. To determine whether an image \n", + "was classified correctly, we join the two datasets and check if the predicted \n", + "labels are the same as the actual labels." + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "2d356a73", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{'prediction': 3, 'label': 3, 'correct': True}\n" + ] + } + ], + "source": [ + "def calculate_prediction_scores(df):\n", + " df[\"correct\"] = df[\"prediction\"] == df[\"label\"]\n", + " return df[[\"prediction\", \"label\", \"correct\"]]\n", + "\n", + "scores = test_dataset.zip(predictions).map_batches(calculate_prediction_scores)\n", + "\n", + "scores.show(1)" + ] + }, + { + "cell_type": "markdown", + "id": "44ff5a1d", + "metadata": {}, + "source": [ + "To compute our test accuracy, we'll count how many images the model classified \n", + "correctly and divide that number by the total number of test images." + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "4d6171e9", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "0.5531" + ] + }, + "execution_count": 13, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "scores.sum(on=\"correct\") / scores.count()" + ] + }, + { + "cell_type": "markdown", + "id": "ea902889", + "metadata": {}, + "source": [ + "## Deploy the network and make a prediction\n", + "\n", + "Our model seems to perform decently, so let's deploy the model to an \n", + "endpoint. This'll allow us to make predictions over the Internet." + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "cc4dd783", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\u001b[2m\u001b[36m(ServeController pid=13967)\u001b[0m INFO 2022-05-26 14:52:14,630 controller 13967 checkpoint_path.py:17 - Using RayInternalKVStore for controller checkpoint and recovery.\n", + "\u001b[2m\u001b[36m(ServeController pid=13967)\u001b[0m INFO 2022-05-26 14:52:14,633 controller 13967 http_state.py:112 - Starting HTTP proxy with name 'SERVE_CONTROLLER_ACTOR:SERVE_PROXY_ACTOR-node:127.0.0.1-0' on node 'node:127.0.0.1-0' listening on '127.0.0.1:8000'\n", + "\u001b[2m\u001b[36m(HTTPProxyActor pid=13969)\u001b[0m INFO: Started server process [13969]\n", + "\u001b[2m\u001b[36m(ServeController pid=13967)\u001b[0m INFO 2022-05-26 14:52:16,241 controller 13967 deployment_state.py:1218 - Adding 1 replicas to deployment 'my-deployment'.\n" + ] + } + ], + "source": [ + "from ray import serve\n", + "from ray.serve.model_wrappers import ModelWrapperDeployment\n", + "\n", + "serve.start(detached=True)\n", + "deployment = ModelWrapperDeployment.options(name=\"my-deployment\")\n", + "deployment.deploy(TorchPredictor, latest_checkpoint, batching_params=False, model=Net())" + ] + }, + { + "cell_type": "markdown", + "id": "0944ddbd", + "metadata": {}, + "source": [ + "Let's classify a test image." + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "19f43c62", + "metadata": {}, + "outputs": [], + "source": [ + "batch = test_dataset.take(1)\n", + "array = np.expand_dims(np.array(batch[0][\"image\"]), axis=0)" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "1576f7cc", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "(1, 3, 32, 32)" + ] + }, + "execution_count": 16, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "array.shape" + ] + }, + { + "cell_type": "markdown", + "id": "f63e995d", + "metadata": {}, + "source": [ + "You can perform inference against a deployed model by posting a dictionary with an `\"array\"` key. To learn more about the default input schema, read the {py:class}`NdArray ` documentation." + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "id": "6b5d452f", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'predictions': {'0': [-1.1721627712249756,\n", + " -1.2344744205474854,\n", + " -0.0395149365067482,\n", + " 2.5982346534729004,\n", + " -0.7517635822296143,\n", + " 1.6971060037612915,\n", + " -0.27467942237854004,\n", + " -0.8857517242431641,\n", + " 1.4102720022201538,\n", + " -1.8619050979614258]}}" + ] + }, + "execution_count": 17, + "metadata": {}, + "output_type": "execute_result" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\u001b[2m\u001b[36m(HTTPProxyActor pid=13969)\u001b[0m INFO 2022-05-26 14:52:18,593 http_proxy 127.0.0.1 http_proxy.py:315 - POST /my-deployment 307 4.9ms\n", + "\u001b[2m\u001b[36m(HTTPProxyActor pid=13969)\u001b[0m INFO 2022-05-26 14:52:18,616 http_proxy 127.0.0.1 http_proxy.py:315 - POST /my-deployment 200 20.6ms\n", + "\u001b[2m\u001b[36m(my-deployment pid=13971)\u001b[0m INFO 2022-05-26 14:52:18,591 my-deployment my-deployment#HdSekn replica.py:478 - HANDLE __call__ OK 0.3ms\n", + "\u001b[2m\u001b[36m(my-deployment pid=13971)\u001b[0m INFO 2022-05-26 14:52:18,615 my-deployment my-deployment#HdSekn replica.py:478 - HANDLE __call__ OK 17.5ms\n" + ] + } + ], + "source": [ + "import requests\n", + "\n", + "payload = {\"array\": array.tolist()}\n", + "response = requests.post(deployment.url, json=payload)\n", + "response.json()" + ] + } + ], + "metadata": { + "interpreter": { + "hash": "63e5f6eec01b85d783aa3a898bfbc5e143aba32dbede6a46dbb3b17d5dbd2e6a" + }, + "kernelspec": { + "display_name": "Python 3.8.12 ('.venv': venv)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.12" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +}