@@ -70,11 +70,12 @@ def hello():
70
70
###############################################################################
71
71
# We will make slight changes to the above snippet, so that it suits our API
72
72
# definition. First, we will rename the method to ``predict``. We will update
73
- # the endpoint path to ``/predict`` and make it so that it also accepts POST
73
+ # the endpoint path to ``/predict``. Since the image files will be sent via
74
+ # HTTP POST requests, we will update it so that it also accepts only POST
74
75
# requests:
75
76
76
77
77
- @app .route ('/predict' , methods = ['GET' , ' POST' ])
78
+ @app .route ('/predict' , methods = ['POST' ])
78
79
def predict ():
79
80
return 'Hello World!'
80
81
@@ -86,7 +87,7 @@ def predict():
86
87
from flask import Flask , jsonify
87
88
app = Flask (__name__ )
88
89
89
- @app .route ('/predict' , methods = ['GET' , ' POST' ])
90
+ @app .route ('/predict' , methods = ['POST' ])
90
91
def predict ():
91
92
return jsonify ({'class_id' : 'IMAGE_NET_XXX' , 'class_name' : 'Cat' })
92
93
@@ -144,15 +145,15 @@ def transform_image(image_bytes):
144
145
#
145
146
# Now will use a pretrained DenseNet 121 model to predict the image class. We
146
147
# will use one from ``torchvision`` library, load the model and get an
147
- # inference. Instead of using a pretrained model, you can also load your own
148
- # model, to learn more about it check this
149
- # :doc:`tutorial </beginner/saving_loading_models>`.
148
+ # inference. While we'll be using a pretrained model in this example, you can
149
+ # use this same approach for your own models. See more about loading your
150
+ # models in this :doc:`tutorial </beginner/saving_loading_models>`.
150
151
151
152
from torchvision import models
152
153
153
- # make sure to pass `pretrained` as `True` to use the pretrained weights
154
+ # Make sure to pass `pretrained` as `True` to use the pretrained weights:
154
155
model = models .densenet121 (pretrained = True )
155
- # since we are using our model only for inference, switch to `eval` mode
156
+ # Since we are using our model only for inference, switch to `eval` mode:
156
157
model .eval ()
157
158
158
159
@@ -165,11 +166,13 @@ def get_prediction(image_bytes):
165
166
166
167
######################################################################
167
168
# The tensor ``y_hat`` will contain the index of the predicted class id.
168
- # However we need a human readable class name. For that we need a class id
169
+ # However, we need a human readable class name. For that we need a class id
169
170
# to name mapping. Download
170
171
# `this file <https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json>`_
171
- # and place it in current directory as file ``imagenet_class_index.json``. We
172
- # will load this JSON file and get the class name of the predicted index.
172
+ # and place it in current directory as file ``imagenet_class_index.json``.
173
+ # This file contains the mapping of ImageNet class id to ImageNet class
174
+ # name. We will load this JSON file and get the class name of the
175
+ # predicted index.
173
176
174
177
import json
175
178
@@ -195,7 +198,7 @@ def get_prediction(image_bytes):
195
198
print (get_prediction (image_bytes = image_bytes ))
196
199
197
200
######################################################################
198
- # and you should get a response like this
201
+ # You should get a response like this:
199
202
200
203
['n02124075' , 'Egyptian_cat' ]
201
204
@@ -204,12 +207,12 @@ def get_prediction(image_bytes):
204
207
# readable name.
205
208
#
206
209
# .. Note ::
207
- # Did you notice why ``model`` variable is not part of ``get_prediction``
208
- # method? Or why it is a global variable? Loading a model can be an
209
- # expensive operation, memory, CPU wise . If we loaded the model in the
210
- # ``get_prediction`` method, then it will get unnecessarily loaded every
210
+ # Did you notice that why ``model`` variable is not part of ``get_prediction``
211
+ # method? Or why is model a global variable? Loading a model can be an
212
+ # expensive operation in terms of memory and compute . If we loaded the model in the
213
+ # ``get_prediction`` method, then it would get unnecessarily loaded every
211
214
# time the method is called. Since, we are building a web server, there
212
- # could be thousands of requests per second and we cannot keep loading the
215
+ # could be thousands of requests per second, and we cannot keep loading the
213
216
# model everytime. So, we keep the model loaded in memory once. In
214
217
# production systems it is a good idea to load the model first and then
215
218
# start serving the requests.
@@ -219,9 +222,8 @@ def get_prediction(image_bytes):
219
222
# ---------------------------------------
220
223
#
221
224
# In this final part we will add our model to our Flask API server. Since
222
- # our API server supposed to take an image file, we will update our ``predict``
223
- # method to read files from the requests. We will also restrict this method
224
- # to POST requests for the sake of simplicity
225
+ # our API server is supposed to take an image file, we will update our ``predict``
226
+ # method to read files from the requests:
225
227
226
228
from flask import request
227
229
@@ -295,13 +297,13 @@ def predict():
295
297
296
298
#######################################################################
297
299
# We can use a command line tool like curl or Postman to send requests to
298
- # this webserver
300
+ # this webserver:
299
301
#
300
302
# ::
301
303
#
302
304
# $ curl -X POST -F file=@cat_pic.jpeg http://localhost:5000/predict
303
305
#
304
- # And you will get a response in the form
306
+ # You will get a response in the form:
305
307
#
306
308
# ::
307
309
#
@@ -313,24 +315,26 @@ def predict():
313
315
# Next steps
314
316
# --------------
315
317
#
316
- # The server we wrote is quite triavial and is not ready for production. So,
317
- # here are some things you can do to make it better
318
+ # The server we wrote is quite trivial and and may not do everything
319
+ # you need for your production application. So, here are some things you
320
+ # can do to make it better:
318
321
#
319
322
# - The endpoint ``/predict`` assumes that always there will be a image file
320
323
# in the request. This may not hold true for all requests. Our user may
321
324
# send image with a different parameter or send no images at all.
322
325
#
323
- # - User may send non-image type files too. Since we are not handling errors,
324
- # this will break our server. Loading the image within an exception block
325
- # is a good idea.
326
+ # - The user may send non-image type files too. Since we are not handling
327
+ # errors, this will break our server. Loading the image within an
328
+ # exception block is a good idea.
326
329
#
327
- # - There is a possibility that the image sent by user may not get recoginzed
328
- # by our model. Above code should be updated to handle this case.
330
+ # - Even though the model can recognize a large number of classes of images,
331
+ # it may not be able to recognize all images. Enhance the implementation
332
+ # to handle cases when the model does not recognize anything in the image.
329
333
#
330
- # - We run the Flask server in a development mode, which not suitable for
331
- # deploying in production. You may check `this tutorial <http://flask.pocoo.org/docs/1.0/tutorial/deploy/>`_
334
+ # - We run the Flask server in the development mode, which is not suitable for
335
+ # deploying in production. You can check out `this tutorial <http://flask.pocoo.org/docs/1.0/tutorial/deploy/>`_
332
336
# for deploying a Flask server in production.
333
337
#
334
- # - You can also add a welcome page with a form which takes the image and
335
- # displays the prediction. Check the `demo <https://pytorch-imagenet.herokuapp.com/>`_
336
- # of the same or its `source code <https://github.com/avinassh/pytorch-flask-api-heroku>`_.
338
+ # - You can also add a UI by creating a page with a form which takes the image and
339
+ # displays the prediction. Check out the `demo <https://pytorch-imagenet.herokuapp.com/>`_
340
+ # of a similar project and its `source code <https://github.com/avinassh/pytorch-flask-api-heroku>`_.
0 commit comments