mirror of
https://github.com/vale981/ray
synced 2025-03-09 21:06:39 -04:00
26 lines
681 B
Python
26 lines
681 B
Python
import requests
|
|
from transformers import pipeline
|
|
from ray import serve
|
|
|
|
|
|
# 1: Wrap the pretrained sentiment analysis model in a Serve deployment.
|
|
@serve.deployment(route_prefix="/")
|
|
class SentimentAnalysisDeployment:
|
|
def __init__(self):
|
|
self._model = pipeline("sentiment-analysis")
|
|
|
|
def __call__(self, request):
|
|
return self._model(request.query_params["text"])[0]
|
|
|
|
|
|
# 2: Deploy the deployment.
|
|
|
|
serve.run(SentimentAnalysisDeployment.bind())
|
|
|
|
# 3: Query the deployment and print the result.
|
|
print(
|
|
requests.get(
|
|
"http://localhost:8000/", params={"text": "Ray Serve is great!"}
|
|
).json()
|
|
)
|
|
# {'label': 'POSITIVE', 'score': 0.9998476505279541}
|