Skip to content

Commit 8164562

Browse files
committed
Added pytorch_yolov5_local_model_inference sample code
1 parent 0865ac9 commit 8164562

File tree

4 files changed

+92
-0
lines changed

4 files changed

+92
-0
lines changed
Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,28 @@
1+
import os.path
2+
import torch
3+
import json
4+
5+
6+
def model_fn(model_dir):
7+
model_path = os.path.join(model_dir,'yolov5s.pt')
8+
print(f'model_fn - model_path: {model_path}')
9+
model = torch.hub.load('ultralytics/yolov5', 'custom', path=model_path)
10+
return model
11+
12+
13+
def input_fn(serialized_input_data, content_type):
14+
if content_type == 'application/json':
15+
print(f'input_fn - serialized_input_data: {serialized_input_data}')
16+
input_data = json.loads(serialized_input_data)
17+
return input_data
18+
else:
19+
raise Exception('Requested unsupported ContentType in Accept: ' + content_type)
20+
return
21+
22+
23+
def predict_fn(input_data, model):
24+
print(f'predict_fn - input_data: {input_data}')
25+
imgs = [input_data]
26+
results = model(imgs)
27+
df = results.pandas().xyxy[0]
28+
return(df.to_json(orient="split"))
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
seaborn
Lines changed: 59 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,59 @@
1+
# This is a sample Python program for deploying a YOLOV5 pre-trained model to a SageMaker endpoint.
2+
# Inference is done with a URL of the image, which is the http payload for the SageMaker Endpoint.
3+
# This implementation will work on your *local computer*.
4+
#
5+
# This example is based on: https://github.com/aws/amazon-sagemaker-examples/blob/master/frameworks/tensorflow/get_started_mnist_deploy.ipynb
6+
#
7+
# Prerequisites:
8+
# 1. Install required Python packages:
9+
# `pip install -r requirements.txt`
10+
# 2. Docker Desktop installed and running on your computer:
11+
# `docker ps`
12+
# 3. You should have AWS credentials configured on your local machine
13+
# in order to be able to pull the docker image from ECR.
14+
###############################################################################################
15+
from sagemaker.deserializers import JSONDeserializer
16+
from sagemaker.local import LocalSession
17+
from sagemaker.pytorch import PyTorchModel
18+
from sagemaker.serializers import JSONSerializer
19+
20+
DUMMY_IAM_ROLE = 'arn:aws:iam::111111111111:role/service-role/AmazonSageMaker-ExecutionRole-20200101T000001'
21+
22+
23+
def main():
24+
session = LocalSession()
25+
session.config = {'local': {'local_code': True}}
26+
27+
role = DUMMY_IAM_ROLE
28+
model_dir = 's3://aws-ml-blog/artifacts/pytorch-yolov5-local-model-inference/model.tar.gz'
29+
30+
model = PyTorchModel(
31+
entry_point='inference.py',
32+
source_dir = './code',
33+
role=role,
34+
model_data=model_dir,
35+
framework_version='1.8',
36+
py_version='py3'
37+
)
38+
39+
print('Deploying endpoint in local mode')
40+
print(
41+
'Note: if launching for the first time in local mode, container image download might take a few minutes to complete.')
42+
predictor = model.deploy(
43+
initial_instance_count=1,
44+
instance_type='local',
45+
)
46+
47+
print('Endpoint deployed in local mode')
48+
49+
predictor.serializer = JSONSerializer()
50+
predictor.deserializer = JSONDeserializer()
51+
predictions = predictor.predict("https://ultralytics.com/images/zidane.jpg")
52+
print("predictions: {}".format(predictions))
53+
54+
print('About to delete the endpoint')
55+
predictor.delete_endpoint()
56+
57+
58+
if __name__ == "__main__":
59+
main()
Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,4 @@
1+
numpy
2+
pandas
3+
sagemaker>=2.0.0<3.0.0
4+
sagemaker[local]

0 commit comments

Comments
 (0)