mirror of
https://github.com/vale981/ray
synced 2025-03-07 02:51:39 -05:00

Updates the landing page to match the format and content of Tune's. Added some shorter quickstarts and sharpened up the messaging in our "Why choose Serve?" section, those are the main content changes. I also moved all of the `doc_code` into one directory and added a bazel target that should run all of the examples added there. Split into a separate PR: https://github.com/ray-project/ray/pull/24736.
22 lines
555 B
Python
22 lines
555 B
Python
import requests
|
|
from ray import serve
|
|
|
|
|
|
# 1: Define a Ray Serve deployment.
|
|
@serve.deployment(route_prefix="/")
|
|
class MyModelDeployment:
|
|
def __init__(self, msg: str):
|
|
# Initialize model state: could be very large neural net weights.
|
|
self._msg = msg
|
|
|
|
def __call__(self, request):
|
|
return {"result": self._msg}
|
|
|
|
|
|
# 2: Deploy the model.
|
|
serve.start()
|
|
MyModelDeployment.deploy(msg="Hello world!")
|
|
|
|
# 3: Query the deployment and print the result.
|
|
print(requests.get("http://localhost:8000/").json())
|
|
# {'result': 'Hello world!'}
|