From 23cb10abc148c6004e5dde015555cf6038d6e49f Mon Sep 17 00:00:00 2001 From: James Gallagher Date: Fri, 27 Oct 2023 22:23:46 +0100 Subject: [PATCH 01/18] start work on video inference bindings --- roboflow/models/video.py | 193 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 193 insertions(+) create mode 100644 roboflow/models/video.py diff --git a/roboflow/models/video.py b/roboflow/models/video.py new file mode 100644 index 00000000..75c0fb15 --- /dev/null +++ b/roboflow/models/video.py @@ -0,0 +1,193 @@ +import json +from urllib.parse import urljoin +import magic + +import requests +import time + +from roboflow.config import API_URL +from roboflow.models.inference import InferenceModel + +VALID_VIDEO_EXTENSIONS = [".mp4"] + + +def is_mp4(filename): + mime = magic.Magic(mime=True) + file_type = mime.from_file(filename) + return file_type == "video/mp4" + + +def is_valid_video(filename): + return is_mp4(filename) + + +class VideoInferenceModel(InferenceModel): + """ + Run inference on an object detection model hosted on Roboflow or served through Roboflow Inference. + """ + + def __init__( + self, + api_key, + ): + """ + Create a ObjectDetectionModel object through which you can run inference. + + Args: + api_key (str): Your API key (obtained via your workspace API settings page). + """ + self.__api_key = api_key + + def predict( + self, + video_path, + ) -> str: + """ + Infers detections based on image from specified model and image path. + + Args: + image_path (str): path to the image you'd like to perform prediction on + hosted (bool): whether the image you're providing is hosted on Roboflow + format (str): The format of the output. + + Returns: + PredictionGroup Object + + Example: + >>> import roboflow + + >>> rf = roboflow.Roboflow(api_key="") + + >>> project = rf.workspace().project("PROJECT_ID") + + >>> model = project.version("1").model + + >>> prediction = model.predict("video.mp4") + """ + + url = urljoin(API_URL, "/video_upload_signed_url/?api_key=", self.__api_key) + + if not is_valid_video(video_path): + raise Exception("Video path is not valid") + + payload = json.dumps( + { + "file_name": video_path, + } + ) + + headers = {"Content-Type": "application/json"} + + response = requests.request("POST", url, headers=headers, data=payload) + + signed_url = response.json()["signed_url"] + + print("Uploaded video to signed url: " + signed_url) + + url = urljoin(API_URL, "/videoinfer/?api_key=", self.__api_key) + + payload = json.dumps( + { + "input_url": signed_url, + "infer_fps": 5, + "models": [ + { + "model_id": self.dataset_id, + "model_version": self.version, + "inference_type": "object-detection", + } + ], + } + ) + + response = requests.request("POST", url, headers=headers, data=payload) + + job_id = response.json()["job_id"] + + self.job_id = job_id + + return job_id + + def poll_for_results(self, job_id: str = None) -> dict: + """ + Polls the Roboflow API to check if video inference is complete. + + Returns: + Inference results as a dict + + Example: + >>> import roboflow + + >>> rf = roboflow.Roboflow(api_key="") + + >>> project = rf.workspace().project("PROJECT_ID") + + >>> model = project.version("1").model + + >>> prediction = model.predict("video.mp4") + + >>> results = model.poll_for_results() + """ + + if job_id is None: + job_id = self.job_id + + url = urljoin( + API_URL, "/videoinfer/?api_key=", self.__api_key, "&job_id=", self.job_id + ) + + response = requests.get(url, headers={"Content-Type": "application/json"}) + + data = response.json() + + if data["success"] != 0 or data["status_info"] != "success": + print("Job not complete yet. Check back in a minute.") + return {} + + output_signed_url = data["output_signed_url"] + + inference_data = requests.get( + output_signed_url, headers={"Content-Type": "application/json"} + ) + + # frame_offset and model name are top-level keys + return inference_data.json() + + def poll_until_results(self, job_id) -> dict: + """ + Polls the Roboflow API to check if video inference is complete. + + When inference is complete, the results are returned. + + Returns: + Inference results as a dict + + Example: + >>> import roboflow + + >>> rf = roboflow.Roboflow(api_key="") + + >>> project = rf.workspace().project("PROJECT_ID") + + >>> model = project.version("1").model + + >>> prediction = model.predict("video.mp4") + + >>> results = model.poll_until_results() + """ + if job_id is None: + job_id = self.job_id + + attempts = 0 + + while True: + response = self.poll_for_response() + + time.sleep(60) + + print(f"({attempts * 60}s): Checking for inference results") + + attempts += 1 + + if response != {}: + return response From beb4735ed678d964db5d199c9135da856b9079a1 Mon Sep 17 00:00:00 2001 From: James Gallagher Date: Fri, 27 Oct 2023 22:31:01 +0100 Subject: [PATCH 02/18] update predict() response --- roboflow/models/video.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/roboflow/models/video.py b/roboflow/models/video.py index 75c0fb15..2aac8290 100644 --- a/roboflow/models/video.py +++ b/roboflow/models/video.py @@ -4,6 +4,7 @@ import requests import time +from typing import List from roboflow.config import API_URL from roboflow.models.inference import InferenceModel @@ -31,7 +32,7 @@ def __init__( api_key, ): """ - Create a ObjectDetectionModel object through which you can run inference. + Create a VideoDetectionModel object through which you can run inference on videos. Args: api_key (str): Your API key (obtained via your workspace API settings page). @@ -41,7 +42,7 @@ def __init__( def predict( self, video_path, - ) -> str: + ) -> List[str, str]: """ Infers detections based on image from specified model and image path. @@ -106,7 +107,7 @@ def predict( self.job_id = job_id - return job_id + return job_id, signed_url def poll_for_results(self, job_id: str = None) -> dict: """ From 00220cc9f216ee22bd9206b734cd2544a75a60b4 Mon Sep 17 00:00:00 2001 From: James Gallagher Date: Fri, 27 Oct 2023 22:39:52 +0100 Subject: [PATCH 03/18] add additional video model support, add new validation flags --- roboflow/models/video.py | 71 ++++++++++++++++++++++++++++++---------- 1 file changed, 53 insertions(+), 18 deletions(-) diff --git a/roboflow/models/video.py b/roboflow/models/video.py index 2aac8290..cdff859c 100644 --- a/roboflow/models/video.py +++ b/roboflow/models/video.py @@ -1,15 +1,29 @@ import json +import time +from typing import List from urllib.parse import urljoin -import magic +import magic import requests -import time -from typing import List +import os from roboflow.config import API_URL from roboflow.models.inference import InferenceModel -VALID_VIDEO_EXTENSIONS = [".mp4"] +MAXIMUM_VIDEO_SIZE = 1024 * 1024 * 1024 * 5 # 5GB + +SUPPORTED_ADDITIONAL_MODELS = { + "clip": { + "model_id": "clip", + "model_version": "1", + "inference_type": "clip-embed-image" + }, + "gaze": { + "model_id": "gaze", + "model_version": "1", + "inference_type": "gaze-detection" + } +} def is_mp4(filename): @@ -19,8 +33,15 @@ def is_mp4(filename): def is_valid_video(filename): - return is_mp4(filename) - + # check file size + if os.path.getsize(filename) > MAXIMUM_VIDEO_SIZE: + return False + + # check file type + if not is_mp4(filename): + return False + + return True class VideoInferenceModel(InferenceModel): """ @@ -41,18 +62,20 @@ def __init__( def predict( self, - video_path, + video_path: str, + fps: int = 5, + additional_models: list = None, ) -> List[str, str]: """ Infers detections based on image from specified model and image path. Args: image_path (str): path to the image you'd like to perform prediction on - hosted (bool): whether the image you're providing is hosted on Roboflow - format (str): The format of the output. + video_path (str): path to the video you'd like to perform prediction on + fps (int): frames per second to run inference Returns: - PredictionGroup Object + A list of the signed url and job id Example: >>> import roboflow @@ -63,11 +86,18 @@ def predict( >>> model = project.version("1").model - >>> prediction = model.predict("video.mp4") + >>> prediction = model.predict("video.mp4", fps=5) """ url = urljoin(API_URL, "/video_upload_signed_url/?api_key=", self.__api_key) + if fps > 5: + raise Exception("FPS must be less than or equal to 5.") + + for model in additional_models: + if model not in SUPPORTED_ADDITIONAL_MODELS: + raise Exception(f"Model {model} is not supported for video inference.") + if not is_valid_video(video_path): raise Exception("Video path is not valid") @@ -87,17 +117,22 @@ def predict( url = urljoin(API_URL, "/videoinfer/?api_key=", self.__api_key) + models = [ + { + "model_id": self.dataset_id, + "model_version": self.version, + "inference_type": "object-detection", + } + ] + + for model in additional_models: + models.append(SUPPORTED_ADDITIONAL_MODELS[model]) + payload = json.dumps( { "input_url": signed_url, "infer_fps": 5, - "models": [ - { - "model_id": self.dataset_id, - "model_version": self.version, - "inference_type": "object-detection", - } - ], + "models": models } ) From 1cca73ee46c02674ea6f36a4a3be83dbfcfbed7e Mon Sep 17 00:00:00 2001 From: James Gallagher Date: Fri, 27 Oct 2023 22:52:17 +0100 Subject: [PATCH 04/18] add new mime types to check --- roboflow/models/video.py | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/roboflow/models/video.py b/roboflow/models/video.py index cdff859c..c79d400d 100644 --- a/roboflow/models/video.py +++ b/roboflow/models/video.py @@ -12,6 +12,8 @@ MAXIMUM_VIDEO_SIZE = 1024 * 1024 * 1024 * 5 # 5GB +SUPPORTED_ROBOFLOW_MODELS = ["object-detection", "classification", "instance-segmentation"] + SUPPORTED_ADDITIONAL_MODELS = { "clip": { "model_id": "clip", @@ -26,10 +28,10 @@ } -def is_mp4(filename): +def is_valid_mime(filename): mime = magic.Magic(mime=True) file_type = mime.from_file(filename) - return file_type == "video/mp4" + return file_type in ["video/mp4", "video/avi", "video/webm"] def is_valid_video(filename): @@ -38,7 +40,7 @@ def is_valid_video(filename): return False # check file type - if not is_mp4(filename): + if not is_valid_mime(filename): return False return True @@ -63,6 +65,7 @@ def __init__( def predict( self, video_path: str, + inference_type: str, fps: int = 5, additional_models: list = None, ) -> List[str, str]: @@ -70,8 +73,8 @@ def predict( Infers detections based on image from specified model and image path. Args: - image_path (str): path to the image you'd like to perform prediction on video_path (str): path to the video you'd like to perform prediction on + inference_type (str): type of the model to run fps (int): frames per second to run inference Returns: @@ -97,6 +100,9 @@ def predict( for model in additional_models: if model not in SUPPORTED_ADDITIONAL_MODELS: raise Exception(f"Model {model} is not supported for video inference.") + + if inference_type not in SUPPORTED_ROBOFLOW_MODELS: + raise Exception(f"Model {inference_type} is not supported for video inference.") if not is_valid_video(video_path): raise Exception("Video path is not valid") @@ -121,7 +127,7 @@ def predict( { "model_id": self.dataset_id, "model_version": self.version, - "inference_type": "object-detection", + "inference_type": self.inference_type, } ] From c91fee95baf4e84fa2cf1deeadccab2e59088a94 Mon Sep 17 00:00:00 2001 From: James Gallagher Date: Fri, 27 Oct 2023 22:52:39 +0100 Subject: [PATCH 05/18] remove file size check --- roboflow/models/video.py | 7 ------- 1 file changed, 7 deletions(-) diff --git a/roboflow/models/video.py b/roboflow/models/video.py index c79d400d..e7271b17 100644 --- a/roboflow/models/video.py +++ b/roboflow/models/video.py @@ -5,13 +5,10 @@ import magic import requests -import os from roboflow.config import API_URL from roboflow.models.inference import InferenceModel -MAXIMUM_VIDEO_SIZE = 1024 * 1024 * 1024 * 5 # 5GB - SUPPORTED_ROBOFLOW_MODELS = ["object-detection", "classification", "instance-segmentation"] SUPPORTED_ADDITIONAL_MODELS = { @@ -35,10 +32,6 @@ def is_valid_mime(filename): def is_valid_video(filename): - # check file size - if os.path.getsize(filename) > MAXIMUM_VIDEO_SIZE: - return False - # check file type if not is_valid_mime(filename): return False From 915eddb3bd1ad82428751dfdb61320583d913779 Mon Sep 17 00:00:00 2001 From: James Gallagher Date: Fri, 27 Oct 2023 22:54:17 +0100 Subject: [PATCH 06/18] add python-magic requirement, update docstring --- requirements.txt | 3 ++- roboflow/models/video.py | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index 178e17c3..640632b8 100644 --- a/requirements.txt +++ b/requirements.txt @@ -16,4 +16,5 @@ supervision urllib3>=1.26.6 tqdm>=4.41.0 PyYAML>=5.3.1 -requests_toolbelt \ No newline at end of file +requests_toolbelt +python-magic \ No newline at end of file diff --git a/roboflow/models/video.py b/roboflow/models/video.py index e7271b17..5b568ee3 100644 --- a/roboflow/models/video.py +++ b/roboflow/models/video.py @@ -82,7 +82,7 @@ def predict( >>> model = project.version("1").model - >>> prediction = model.predict("video.mp4", fps=5) + >>> prediction = model.predict("video.mp4", fps=5, inference_type="object-detection" """ url = urljoin(API_URL, "/video_upload_signed_url/?api_key=", self.__api_key) From e6b627e7f78c8e701e0fbf1af3cf2399785f4392 Mon Sep 17 00:00:00 2001 From: James Gallagher Date: Mon, 30 Oct 2023 19:10:52 +0000 Subject: [PATCH 07/18] work on video inference --- roboflow/models/inference.py | 227 ++++++++++++++++++++++++++++ roboflow/models/object_detection.py | 5 +- roboflow/models/video.py | 2 +- 3 files changed, 231 insertions(+), 3 deletions(-) diff --git a/roboflow/models/inference.py b/roboflow/models/inference.py index 2c22104b..eaf2c384 100644 --- a/roboflow/models/inference.py +++ b/roboflow/models/inference.py @@ -3,11 +3,48 @@ import requests from PIL import Image +import time +import json +from typing import List +# import magic +from urllib.parse import urljoin from requests_toolbelt.multipart.encoder import MultipartEncoder from roboflow.util.image_utils import validate_image_path from roboflow.util.prediction import PredictionGroup +from roboflow.config import API_URL + +API_URL = "https://api.roboflow.one" + +SUPPORTED_ROBOFLOW_MODELS = ["batch-video"] + +SUPPORTED_ADDITIONAL_MODELS = { + "clip": { + "model_id": "clip", + "model_version": "1", + "inference_type": "clip-embed-image" + }, + "gaze": { + "model_id": "gaze", + "model_version": "1", + "inference_type": "gaze-detection" + } +} + + +def is_valid_mime(filename): + mime = magic.Magic(mime=True) + file_type = mime.from_file(filename) + return file_type in ["video/mp4", "video/avi", "video/webm"] + + +def is_valid_video(filename): + # # check file type + # if not is_valid_mime(filename): + # return False + + return True class InferenceModel: def __init__( @@ -25,6 +62,7 @@ def __init__( api_key (str): private roboflow api key version_id (str): the ID of the dataset version to use for inference """ + self.__api_key = api_key self.id = version_id @@ -111,3 +149,192 @@ def predict(self, image_path, prediction_type=None, **kwargs): image_dims=image_dims, colors=self.colors, ) + + + def predict_video( + self, + video_path: str, + fps: int = 5, + additional_models: list = [], + prediction_type: str = "batch-video", + ) -> List[str]: + """ + Infers detections based on image from specified model and image path. + + Args: + video_path (str): path to the video you'd like to perform prediction on + prediction_type (str): type of the model to run + fps (int): frames per second to run inference + + Returns: + A list of the signed url and job id + + Example: + >>> import roboflow + + >>> rf = roboflow.Roboflow(api_key="") + + >>> project = rf.workspace().project("PROJECT_ID") + + >>> model = project.version("1").model + + >>> prediction = model.predict("video.mp4", fps=5, inference_type="object-detection") + """ + + url = urljoin(API_URL, "/video_upload_signed_url?api_key=" + self.__api_key) + + if fps > 5: + raise Exception("FPS must be less than or equal to 5.") + + for model in additional_models: + if model not in SUPPORTED_ADDITIONAL_MODELS: + raise Exception(f"Model {model} is not supported for video inference.") + + if prediction_type not in SUPPORTED_ROBOFLOW_MODELS: + raise Exception(f"{prediction_type} is not supported for video inference.") + + if not is_valid_video(video_path): + raise Exception("Video path is not valid") + + payload = json.dumps( + { + "file_name": video_path, + } + ) + + headers = {"Content-Type": "application/json"} + + try: + response = requests.request("POST", url, headers=headers, data=payload) + except Exception as e: + raise Exception(f"Error uploading video: {e}") + + signed_url = response.json()["signed_url"] + + print("Uploaded video to signed url: " + signed_url) + + url = urljoin(API_URL, "/videoinfer/?api_key=" + self.__api_key) + + # check if ObjectDetectionModel, ClassificationModel, or InstanceSegmentationModel + model_class = self.__class__.__name__ + + if model_class == "ObjectDetectionModel": + self.type = "object-detection" + elif model_class == "ClassificationModel": + self.type = "classification" + elif model_class == "InstanceSegmentationModel": + self.type = "instance-segmentation" + else: + raise Exception("Model type not supported for video inference.") + + models = [ + { + "model_id": self.dataset_id, + "model_version": self.version, + "inference_type": self.type, + } + ] + + for model in additional_models: + models.append(SUPPORTED_ADDITIONAL_MODELS[model]) + + payload = json.dumps( + { + "input_url": signed_url, + "infer_fps": 5, + "models": models + } + ) + + response = requests.request("POST", url, headers=headers, data=payload) + + job_id = response.json()["job_id"] + + self.job_id = job_id + + return job_id, signed_url + + def poll_for_video_results(self, job_id: str = None) -> dict: + """ + Polls the Roboflow API to check if video inference is complete. + + Returns: + Inference results as a dict + + Example: + >>> import roboflow + + >>> rf = roboflow.Roboflow(api_key="") + + >>> project = rf.workspace().project("PROJECT_ID") + + >>> model = project.version("1").model + + >>> prediction = model.predict("video.mp4") + + >>> results = model.poll_for_video_results() + """ + + if job_id is None: + job_id = self.job_id + + url = urljoin( + API_URL, "/videoinfer/?api_key=" + self.__api_key + "&job_id=" + self.job_id + ) + + response = requests.get(url, headers={"Content-Type": "application/json"}) + + data = response.json() + + # for model in infer_models: + # if model["infer_success"] == 1: + if data.get("output_signed_url") is None: + return {} + + output_signed_url = data["output_signed_url"] + + inference_data = requests.get( + output_signed_url, headers={"Content-Type": "application/json"} + ) + + # frame_offset and model name are top-level keys + return inference_data.json() + + def poll_until_video_results(self, job_id) -> dict: + """ + Polls the Roboflow API to check if video inference is complete. + + When inference is complete, the results are returned. + + Returns: + Inference results as a dict + + Example: + >>> import roboflow + + >>> rf = roboflow.Roboflow(api_key="") + + >>> project = rf.workspace().project("PROJECT_ID") + + >>> model = project.version("1").model + + >>> prediction = model.predict("video.mp4") + + >>> results = model.poll_until_results() + """ + if job_id is None: + job_id = self.job_id + + attempts = 0 + + while True: + print(f"({attempts * 60}s): Checking for inference results") + + response = self.poll_for_video_results() + + time.sleep(60) + + attempts += 1 + + if response != {}: + return response \ No newline at end of file diff --git a/roboflow/models/object_detection.py b/roboflow/models/object_detection.py index 26c05b23..9cf49b2c 100644 --- a/roboflow/models/object_detection.py +++ b/roboflow/models/object_detection.py @@ -17,9 +17,9 @@ from roboflow.util.image_utils import check_image_url from roboflow.util.prediction import PredictionGroup from roboflow.util.versions import print_warn_for_wrong_dependencies_versions +from roboflow.models.inference import InferenceModel - -class ObjectDetectionModel: +class ObjectDetectionModel(InferenceModel): """ Run inference on an object detection model hosted on Roboflow or served through Roboflow Inference. """ @@ -67,6 +67,7 @@ def __init__( """ # Instantiate different API URL parameters # To be moved to predict + super(ObjectDetectionModel, self).__init__(api_key, id) self.__api_key = api_key self.id = id self.name = name diff --git a/roboflow/models/video.py b/roboflow/models/video.py index 5b568ee3..563fc1cc 100644 --- a/roboflow/models/video.py +++ b/roboflow/models/video.py @@ -82,7 +82,7 @@ def predict( >>> model = project.version("1").model - >>> prediction = model.predict("video.mp4", fps=5, inference_type="object-detection" + >>> prediction = model.predict("video.mp4", fps=5, inference_type="object-detection") """ url = urljoin(API_URL, "/video_upload_signed_url/?api_key=", self.__api_key) From f5f06ec94f58138ec7b8d1101bf3bc13054731a7 Mon Sep 17 00:00:00 2001 From: James Gallagher Date: Tue, 31 Oct 2023 17:33:35 +0000 Subject: [PATCH 08/18] add video upload logic --- roboflow/models/inference.py | 89 +++++++++++++++++------------------- 1 file changed, 41 insertions(+), 48 deletions(-) diff --git a/roboflow/models/inference.py b/roboflow/models/inference.py index eaf2c384..7731290e 100644 --- a/roboflow/models/inference.py +++ b/roboflow/models/inference.py @@ -6,6 +6,7 @@ import time import json from typing import List + # import magic from urllib.parse import urljoin from requests_toolbelt.multipart.encoder import MultipartEncoder @@ -15,37 +16,22 @@ from roboflow.config import API_URL -API_URL = "https://api.roboflow.one" - SUPPORTED_ROBOFLOW_MODELS = ["batch-video"] SUPPORTED_ADDITIONAL_MODELS = { "clip": { "model_id": "clip", "model_version": "1", - "inference_type": "clip-embed-image" + "inference_type": "clip-embed-image", }, "gaze": { "model_id": "gaze", "model_version": "1", - "inference_type": "gaze-detection" - } + "inference_type": "gaze-detection", + }, } -def is_valid_mime(filename): - mime = magic.Magic(mime=True) - file_type = mime.from_file(filename) - return file_type in ["video/mp4", "video/avi", "video/webm"] - - -def is_valid_video(filename): - # # check file type - # if not is_valid_mime(filename): - # return False - - return True - class InferenceModel: def __init__( self, @@ -150,7 +136,6 @@ def predict(self, image_path, prediction_type=None, **kwargs): colors=self.colors, ) - def predict_video( self, video_path: str, @@ -185,16 +170,25 @@ def predict_video( if fps > 5: raise Exception("FPS must be less than or equal to 5.") - + for model in additional_models: if model not in SUPPORTED_ADDITIONAL_MODELS: raise Exception(f"Model {model} is not supported for video inference.") - + if prediction_type not in SUPPORTED_ROBOFLOW_MODELS: raise Exception(f"{prediction_type} is not supported for video inference.") - if not is_valid_video(video_path): - raise Exception("Video path is not valid") + # check if ObjectDetectionModel, ClassificationModel, or InstanceSegmentationModel + model_class = self.__class__.__name__ + + if model_class == "ObjectDetectionModel": + self.type = "object-detection" + elif model_class == "ClassificationModel": + self.type = "classification" + elif model_class == "InstanceSegmentationModel": + self.type = "instance-segmentation" + else: + raise Exception("Model type not supported for video inference.") payload = json.dumps( { @@ -202,30 +196,33 @@ def predict_video( } ) - headers = {"Content-Type": "application/json"} + if not video_path.startswith(("http://", "https://")): + headers = {"Content-Type": "application/json"} - try: - response = requests.request("POST", url, headers=headers, data=payload) - except Exception as e: - raise Exception(f"Error uploading video: {e}") - - signed_url = response.json()["signed_url"] + try: + response = requests.request("POST", url, headers=headers, data=payload) + except Exception as e: + raise Exception(f"Error uploading video: {e}") - print("Uploaded video to signed url: " + signed_url) + signed_url = response.json()["signed_url"] - url = urljoin(API_URL, "/videoinfer/?api_key=" + self.__api_key) + # make a POST request to the signed URL + headers = {"Content-Type": "application/octet-stream"} - # check if ObjectDetectionModel, ClassificationModel, or InstanceSegmentationModel - model_class = self.__class__.__name__ + try: + with open(video_path, "rb") as f: + video_data = f.read() + except Exception as e: + raise Exception(f"Error reading video: {e}") - if model_class == "ObjectDetectionModel": - self.type = "object-detection" - elif model_class == "ClassificationModel": - self.type = "classification" - elif model_class == "InstanceSegmentationModel": - self.type = "instance-segmentation" + try: + requests.put(signed_url, data=video_data, headers=headers) + except Exception as e: + raise Exception(f"There was an error uploading the video: {e}") else: - raise Exception("Model type not supported for video inference.") + signed_url = video_path + + url = urljoin(API_URL, "/videoinfer/?api_key=" + self.__api_key) models = [ { @@ -239,11 +236,7 @@ def predict_video( models.append(SUPPORTED_ADDITIONAL_MODELS[model]) payload = json.dumps( - { - "input_url": signed_url, - "infer_fps": 5, - "models": models - } + {"input_url": signed_url, "infer_fps": 5, "models": models} ) response = requests.request("POST", url, headers=headers, data=payload) @@ -290,7 +283,7 @@ def poll_for_video_results(self, job_id: str = None) -> dict: # if model["infer_success"] == 1: if data.get("output_signed_url") is None: return {} - + output_signed_url = data["output_signed_url"] inference_data = requests.get( @@ -337,4 +330,4 @@ def poll_until_video_results(self, job_id) -> dict: attempts += 1 if response != {}: - return response \ No newline at end of file + return response From 7e1a93d4e78a4a6024733bcbe4e9546f53ff534e Mon Sep 17 00:00:00 2001 From: James Gallagher Date: Tue, 31 Oct 2023 17:35:22 +0000 Subject: [PATCH 09/18] update status check --- roboflow/models/inference.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/roboflow/models/inference.py b/roboflow/models/inference.py index 7731290e..b1c157b2 100644 --- a/roboflow/models/inference.py +++ b/roboflow/models/inference.py @@ -279,9 +279,7 @@ def poll_for_video_results(self, job_id: str = None) -> dict: data = response.json() - # for model in infer_models: - # if model["infer_success"] == 1: - if data.get("output_signed_url") is None: + if data.get("status") != 0: return {} output_signed_url = data["output_signed_url"] From 4550254395ac8ac2b2014b08ad6a8b1d0889832d Mon Sep 17 00:00:00 2001 From: Sachin Agarwal Date: Tue, 31 Oct 2023 17:46:24 +0000 Subject: [PATCH 10/18] Minor tweaks --- roboflow/models/inference.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/roboflow/models/inference.py b/roboflow/models/inference.py index b1c157b2..e9f14a25 100644 --- a/roboflow/models/inference.py +++ b/roboflow/models/inference.py @@ -239,8 +239,8 @@ def predict_video( {"input_url": signed_url, "infer_fps": 5, "models": models} ) - response = requests.request("POST", url, headers=headers, data=payload) - + response = requests.request("POST", url, headers = {"Content-Type": "application/json"}, data=payload) + # check if error happens {'error': 'An unexpected error occurred while queueing the video job at 2023-10-31T17:41:58.076Z'} job_id = response.json()["job_id"] self.job_id = job_id From 96db4b33bca69f88b982b4cafb5973e37765cc01 Mon Sep 17 00:00:00 2001 From: James Gallagher Date: Tue, 31 Oct 2023 18:05:55 +0000 Subject: [PATCH 11/18] add headers reset --- roboflow/models/inference.py | 28 ++++++++++++++++++++++++---- 1 file changed, 24 insertions(+), 4 deletions(-) diff --git a/roboflow/models/inference.py b/roboflow/models/inference.py index e9f14a25..3d41c139 100644 --- a/roboflow/models/inference.py +++ b/roboflow/models/inference.py @@ -203,6 +203,9 @@ def predict_video( response = requests.request("POST", url, headers=headers, data=payload) except Exception as e: raise Exception(f"Error uploading video: {e}") + + if not response.ok: + raise Exception(f"Error uploading video: {response.text}") signed_url = response.json()["signed_url"] @@ -216,9 +219,12 @@ def predict_video( raise Exception(f"Error reading video: {e}") try: - requests.put(signed_url, data=video_data, headers=headers) + result = requests.put(signed_url, data=video_data, headers=headers) except Exception as e: raise Exception(f"There was an error uploading the video: {e}") + + if not result.ok: + raise Exception(f"There was an error uploading the video: {result.text}") else: signed_url = video_path @@ -239,8 +245,16 @@ def predict_video( {"input_url": signed_url, "infer_fps": 5, "models": models} ) - response = requests.request("POST", url, headers = {"Content-Type": "application/json"}, data=payload) - # check if error happens {'error': 'An unexpected error occurred while queueing the video job at 2023-10-31T17:41:58.076Z'} + headers = {"Content-Type": "application/json"} + + try: + response = requests.request("POST", url, headers=headers, data=payload) + except Exception as e: + raise Exception(f"Error starting video inference: {e}") + + if not response.ok: + raise Exception(f"Error starting video inference: {response.text}") + job_id = response.json()["job_id"] self.job_id = job_id @@ -275,7 +289,13 @@ def poll_for_video_results(self, job_id: str = None) -> dict: API_URL, "/videoinfer/?api_key=" + self.__api_key + "&job_id=" + self.job_id ) - response = requests.get(url, headers={"Content-Type": "application/json"}) + try: + response = requests.get(url, headers={"Content-Type": "application/json"}) + except Exception as e: + raise Exception(f"Error getting video inference results: {e}") + + if not response.ok: + raise Exception(f"Error getting video inference results: {response.text}") data = response.json() From dd9dcfc0fefa34f6613375aa03cebd0c7c01163a Mon Sep 17 00:00:00 2001 From: James Gallagher Date: Tue, 31 Oct 2023 19:25:20 +0000 Subject: [PATCH 12/18] add CLIPModel and GazeModel classes, allow use in video inference --- roboflow/__init__.py | 1 + roboflow/models/__init__.py | 2 + roboflow/models/clip.py | 16 +++++++ roboflow/models/gaze.py | 16 +++++++ roboflow/models/inference.py | 55 +++++++++++++++--------- roboflow/models/instance_segmentation.py | 3 +- roboflow/models/object_detection.py | 6 ++- roboflow/models/semantic_segmentation.py | 3 +- roboflow/models/video.py | 29 +++++++------ 9 files changed, 93 insertions(+), 38 deletions(-) create mode 100644 roboflow/models/clip.py create mode 100644 roboflow/models/gaze.py diff --git a/roboflow/__init__.py b/roboflow/__init__.py index 165c561f..cafa9de2 100644 --- a/roboflow/__init__.py +++ b/roboflow/__init__.py @@ -11,6 +11,7 @@ from roboflow.core.project import Project from roboflow.core.workspace import Workspace from roboflow.util.general import write_line +from roboflow.models import CLIPModel, GazeModel __version__ = "1.1.7" diff --git a/roboflow/models/__init__.py b/roboflow/models/__init__.py index e69de29b..634783d9 100644 --- a/roboflow/models/__init__.py +++ b/roboflow/models/__init__.py @@ -0,0 +1,2 @@ +from .clip import CLIPModel +from .gaze import GazeModel diff --git a/roboflow/models/clip.py b/roboflow/models/clip.py new file mode 100644 index 00000000..56c39c6a --- /dev/null +++ b/roboflow/models/clip.py @@ -0,0 +1,16 @@ +from .inference import InferenceModel + + +class CLIPModel(InferenceModel): + """ + Run inference on CLIP, hosted on Roboflow. + """ + + def __init__(self, api_key: str): + """ + Initialize a CLIP model. + + Args: + api_key: Your Roboflow API key. + """ + super().__init__(api_key=api_key) diff --git a/roboflow/models/gaze.py b/roboflow/models/gaze.py new file mode 100644 index 00000000..e9fb7dfc --- /dev/null +++ b/roboflow/models/gaze.py @@ -0,0 +1,16 @@ +from .inference import InferenceModel + + +class GazeModel(InferenceModel): + """ + Run inference on a gaze detection model, hosted on Roboflow. + """ + + def __init__(self, api_key: str): + """ + Initialize a CLIP model. + + Args: + api_key: Your Roboflow API key. + """ + super().__init__(api_key=api_key) diff --git a/roboflow/models/inference.py b/roboflow/models/inference.py index 3d41c139..bd243b97 100644 --- a/roboflow/models/inference.py +++ b/roboflow/models/inference.py @@ -1,21 +1,19 @@ import io -import urllib - -import requests -from PIL import Image -import time import json +import time +import urllib from typing import List - # import magic from urllib.parse import urljoin + +import requests +from PIL import Image from requests_toolbelt.multipart.encoder import MultipartEncoder +from roboflow.config import API_URL from roboflow.util.image_utils import validate_image_path from roboflow.util.prediction import PredictionGroup -from roboflow.config import API_URL - SUPPORTED_ROBOFLOW_MODELS = ["batch-video"] SUPPORTED_ADDITIONAL_MODELS = { @@ -178,7 +176,6 @@ def predict_video( if prediction_type not in SUPPORTED_ROBOFLOW_MODELS: raise Exception(f"{prediction_type} is not supported for video inference.") - # check if ObjectDetectionModel, ClassificationModel, or InstanceSegmentationModel model_class = self.__class__.__name__ if model_class == "ObjectDetectionModel": @@ -187,6 +184,10 @@ def predict_video( self.type = "classification" elif model_class == "InstanceSegmentationModel": self.type = "instance-segmentation" + elif model_class == "GazeModel": + self.type = "gaze-detection" + elif model_class == "CLIP": + self.type = "clip-embed-image" else: raise Exception("Model type not supported for video inference.") @@ -203,7 +204,7 @@ def predict_video( response = requests.request("POST", url, headers=headers, data=payload) except Exception as e: raise Exception(f"Error uploading video: {e}") - + if not response.ok: raise Exception(f"Error uploading video: {response.text}") @@ -222,21 +223,33 @@ def predict_video( result = requests.put(signed_url, data=video_data, headers=headers) except Exception as e: raise Exception(f"There was an error uploading the video: {e}") - + if not result.ok: - raise Exception(f"There was an error uploading the video: {result.text}") + raise Exception( + f"There was an error uploading the video: {result.text}" + ) else: signed_url = video_path url = urljoin(API_URL, "/videoinfer/?api_key=" + self.__api_key) - models = [ - { - "model_id": self.dataset_id, - "model_version": self.version, - "inference_type": self.type, - } - ] + if model_class in ("CLIPModel", "GazeModel"): + if model_class == "CLIPModel": + model = "clip" + else: + model = "gaze" + + models = [ + { + "model_id": SUPPORTED_ADDITIONAL_MODELS[model]["model_id"], + "model_version": SUPPORTED_ADDITIONAL_MODELS[model][ + "model_version" + ], + "inference_type": SUPPORTED_ADDITIONAL_MODELS[model][ + "inference_type" + ], + } + ] for model in additional_models: models.append(SUPPORTED_ADDITIONAL_MODELS[model]) @@ -251,7 +264,7 @@ def predict_video( response = requests.request("POST", url, headers=headers, data=payload) except Exception as e: raise Exception(f"Error starting video inference: {e}") - + if not response.ok: raise Exception(f"Error starting video inference: {response.text}") @@ -293,7 +306,7 @@ def poll_for_video_results(self, job_id: str = None) -> dict: response = requests.get(url, headers={"Content-Type": "application/json"}) except Exception as e: raise Exception(f"Error getting video inference results: {e}") - + if not response.ok: raise Exception(f"Error getting video inference results: {response.text}") diff --git a/roboflow/models/instance_segmentation.py b/roboflow/models/instance_segmentation.py index 807a4cde..07d6b6a7 100644 --- a/roboflow/models/instance_segmentation.py +++ b/roboflow/models/instance_segmentation.py @@ -1,4 +1,5 @@ -from roboflow.config import INSTANCE_SEGMENTATION_MODEL, INSTANCE_SEGMENTATION_URL +from roboflow.config import (INSTANCE_SEGMENTATION_MODEL, + INSTANCE_SEGMENTATION_URL) from roboflow.models.inference import InferenceModel diff --git a/roboflow/models/object_detection.py b/roboflow/models/object_detection.py index 9cf49b2c..8870822a 100644 --- a/roboflow/models/object_detection.py +++ b/roboflow/models/object_detection.py @@ -13,11 +13,13 @@ import tqdm from PIL import Image -from roboflow.config import API_URL, OBJECT_DETECTION_MODEL, OBJECT_DETECTION_URL +from roboflow.config import (API_URL, OBJECT_DETECTION_MODEL, + OBJECT_DETECTION_URL) +from roboflow.models.inference import InferenceModel from roboflow.util.image_utils import check_image_url from roboflow.util.prediction import PredictionGroup from roboflow.util.versions import print_warn_for_wrong_dependencies_versions -from roboflow.models.inference import InferenceModel + class ObjectDetectionModel(InferenceModel): """ diff --git a/roboflow/models/semantic_segmentation.py b/roboflow/models/semantic_segmentation.py index 47b20c70..7c5eb069 100644 --- a/roboflow/models/semantic_segmentation.py +++ b/roboflow/models/semantic_segmentation.py @@ -1,4 +1,5 @@ -from roboflow.config import SEMANTIC_SEGMENTATION_MODEL, SEMANTIC_SEGMENTATION_URL +from roboflow.config import (SEMANTIC_SEGMENTATION_MODEL, + SEMANTIC_SEGMENTATION_URL) from roboflow.models.inference import InferenceModel diff --git a/roboflow/models/video.py b/roboflow/models/video.py index 563fc1cc..4a2f679f 100644 --- a/roboflow/models/video.py +++ b/roboflow/models/video.py @@ -9,19 +9,23 @@ from roboflow.config import API_URL from roboflow.models.inference import InferenceModel -SUPPORTED_ROBOFLOW_MODELS = ["object-detection", "classification", "instance-segmentation"] +SUPPORTED_ROBOFLOW_MODELS = [ + "object-detection", + "classification", + "instance-segmentation", +] SUPPORTED_ADDITIONAL_MODELS = { "clip": { "model_id": "clip", "model_version": "1", - "inference_type": "clip-embed-image" + "inference_type": "clip-embed-image", }, "gaze": { "model_id": "gaze", "model_version": "1", - "inference_type": "gaze-detection" - } + "inference_type": "gaze-detection", + }, } @@ -35,9 +39,10 @@ def is_valid_video(filename): # check file type if not is_valid_mime(filename): return False - + return True + class VideoInferenceModel(InferenceModel): """ Run inference on an object detection model hosted on Roboflow or served through Roboflow Inference. @@ -89,13 +94,15 @@ def predict( if fps > 5: raise Exception("FPS must be less than or equal to 5.") - + for model in additional_models: if model not in SUPPORTED_ADDITIONAL_MODELS: raise Exception(f"Model {model} is not supported for video inference.") - + if inference_type not in SUPPORTED_ROBOFLOW_MODELS: - raise Exception(f"Model {inference_type} is not supported for video inference.") + raise Exception( + f"Model {inference_type} is not supported for video inference." + ) if not is_valid_video(video_path): raise Exception("Video path is not valid") @@ -128,11 +135,7 @@ def predict( models.append(SUPPORTED_ADDITIONAL_MODELS[model]) payload = json.dumps( - { - "input_url": signed_url, - "infer_fps": 5, - "models": models - } + {"input_url": signed_url, "infer_fps": 5, "models": models} ) response = requests.request("POST", url, headers=headers, data=payload) From 8c22cdf5d1eef45c4fa951b6a90311b9b65bd58e Mon Sep 17 00:00:00 2001 From: James Gallagher Date: Tue, 31 Oct 2023 19:26:52 +0000 Subject: [PATCH 13/18] run black --- roboflow/models/inference.py | 1 + roboflow/models/instance_segmentation.py | 3 +-- roboflow/models/object_detection.py | 3 +-- roboflow/models/semantic_segmentation.py | 3 +-- 4 files changed, 4 insertions(+), 6 deletions(-) diff --git a/roboflow/models/inference.py b/roboflow/models/inference.py index bd243b97..b78d958f 100644 --- a/roboflow/models/inference.py +++ b/roboflow/models/inference.py @@ -3,6 +3,7 @@ import time import urllib from typing import List + # import magic from urllib.parse import urljoin diff --git a/roboflow/models/instance_segmentation.py b/roboflow/models/instance_segmentation.py index 07d6b6a7..807a4cde 100644 --- a/roboflow/models/instance_segmentation.py +++ b/roboflow/models/instance_segmentation.py @@ -1,5 +1,4 @@ -from roboflow.config import (INSTANCE_SEGMENTATION_MODEL, - INSTANCE_SEGMENTATION_URL) +from roboflow.config import INSTANCE_SEGMENTATION_MODEL, INSTANCE_SEGMENTATION_URL from roboflow.models.inference import InferenceModel diff --git a/roboflow/models/object_detection.py b/roboflow/models/object_detection.py index 8870822a..654beef0 100644 --- a/roboflow/models/object_detection.py +++ b/roboflow/models/object_detection.py @@ -13,8 +13,7 @@ import tqdm from PIL import Image -from roboflow.config import (API_URL, OBJECT_DETECTION_MODEL, - OBJECT_DETECTION_URL) +from roboflow.config import API_URL, OBJECT_DETECTION_MODEL, OBJECT_DETECTION_URL from roboflow.models.inference import InferenceModel from roboflow.util.image_utils import check_image_url from roboflow.util.prediction import PredictionGroup diff --git a/roboflow/models/semantic_segmentation.py b/roboflow/models/semantic_segmentation.py index 7c5eb069..47b20c70 100644 --- a/roboflow/models/semantic_segmentation.py +++ b/roboflow/models/semantic_segmentation.py @@ -1,5 +1,4 @@ -from roboflow.config import (SEMANTIC_SEGMENTATION_MODEL, - SEMANTIC_SEGMENTATION_URL) +from roboflow.config import SEMANTIC_SEGMENTATION_MODEL, SEMANTIC_SEGMENTATION_URL from roboflow.models.inference import InferenceModel From 5187587d161d6f4a9eb1406de0bf16a5f3b25aa4 Mon Sep 17 00:00:00 2001 From: James Gallagher Date: Wed, 1 Nov 2023 12:39:54 +0000 Subject: [PATCH 14/18] get base file name for signed url creation --- roboflow/models/inference.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/roboflow/models/inference.py b/roboflow/models/inference.py index b78d958f..94a9aa40 100644 --- a/roboflow/models/inference.py +++ b/roboflow/models/inference.py @@ -1,10 +1,10 @@ import io +import os import json import time import urllib from typing import List -# import magic from urllib.parse import urljoin import requests @@ -194,7 +194,7 @@ def predict_video( payload = json.dumps( { - "file_name": video_path, + "file_name": os.path.basename(video_path), } ) From 7af4637654777841eb531ed3f353a6b22e35cb3c Mon Sep 17 00:00:00 2001 From: James Gallagher Date: Wed, 1 Nov 2023 19:16:55 +0000 Subject: [PATCH 15/18] fix clip, allow access to signed url expires --- roboflow/models/clip.py | 2 +- roboflow/models/inference.py | 21 ++++++++++++++------- 2 files changed, 15 insertions(+), 8 deletions(-) diff --git a/roboflow/models/clip.py b/roboflow/models/clip.py index 56c39c6a..a5cfe256 100644 --- a/roboflow/models/clip.py +++ b/roboflow/models/clip.py @@ -13,4 +13,4 @@ def __init__(self, api_key: str): Args: api_key: Your Roboflow API key. """ - super().__init__(api_key=api_key) + super().__init__(api_key=api_key, version_id="BASE_MODEL") diff --git a/roboflow/models/inference.py b/roboflow/models/inference.py index 94a9aa40..fc5c9a7f 100644 --- a/roboflow/models/inference.py +++ b/roboflow/models/inference.py @@ -51,10 +51,11 @@ def __init__( self.__api_key = api_key self.id = version_id - version_info = self.id.rsplit("/") - self.dataset_id = version_info[1] - self.version = version_info[2] - self.colors = {} if colors is None else colors + if version_id != "BASE_MODEL": + version_info = self.id.rsplit("/") + self.dataset_id = version_info[1] + self.version = version_info[2] + self.colors = {} if colors is None else colors def __get_image_params(self, image_path): """ @@ -162,9 +163,11 @@ def predict_video( >>> model = project.version("1").model - >>> prediction = model.predict("video.mp4", fps=5, inference_type="object-detection") + >>> job_id, signed_url, signed_url_expires = model.predict_video("video.mp4", fps=5, inference_type="object-detection") """ + signed_url_expires = None + url = urljoin(API_URL, "/video_upload_signed_url?api_key=" + self.__api_key) if fps > 5: @@ -187,7 +190,7 @@ def predict_video( self.type = "instance-segmentation" elif model_class == "GazeModel": self.type = "gaze-detection" - elif model_class == "CLIP": + elif model_class == "CLIPModel": self.type = "clip-embed-image" else: raise Exception("Model type not supported for video inference.") @@ -211,6 +214,10 @@ def predict_video( signed_url = response.json()["signed_url"] + signed_url_expires = ( + signed_url.split("&X-Goog-Expires")[1].split("&")[0].strip("=") + ) + # make a POST request to the signed URL headers = {"Content-Type": "application/octet-stream"} @@ -273,7 +280,7 @@ def predict_video( self.job_id = job_id - return job_id, signed_url + return job_id, signed_url, signed_url_expires def poll_for_video_results(self, job_id: str = None) -> dict: """ From 27cbecc5d76647f04f7b4e558bf1075125c55cb0 Mon Sep 17 00:00:00 2001 From: James Gallagher Date: Fri, 3 Nov 2023 10:53:36 +0000 Subject: [PATCH 16/18] respond to feedback, run black and isort --- roboflow/__init__.py | 2 +- roboflow/models/inference.py | 3 +-- roboflow/models/video.py | 45 ++++++++++++++++++++++-------------- 3 files changed, 30 insertions(+), 20 deletions(-) diff --git a/roboflow/__init__.py b/roboflow/__init__.py index cafa9de2..c7ad68ab 100644 --- a/roboflow/__init__.py +++ b/roboflow/__init__.py @@ -10,8 +10,8 @@ from roboflow.config import API_URL, APP_URL, DEMO_KEYS, load_roboflow_api_key from roboflow.core.project import Project from roboflow.core.workspace import Workspace -from roboflow.util.general import write_line from roboflow.models import CLIPModel, GazeModel +from roboflow.util.general import write_line __version__ = "1.1.7" diff --git a/roboflow/models/inference.py b/roboflow/models/inference.py index fc5c9a7f..64fce042 100644 --- a/roboflow/models/inference.py +++ b/roboflow/models/inference.py @@ -1,10 +1,9 @@ import io -import os import json +import os import time import urllib from typing import List - from urllib.parse import urljoin import requests diff --git a/roboflow/models/video.py b/roboflow/models/video.py index 4a2f679f..191a593e 100644 --- a/roboflow/models/video.py +++ b/roboflow/models/video.py @@ -92,8 +92,10 @@ def predict( url = urljoin(API_URL, "/video_upload_signed_url/?api_key=", self.__api_key) - if fps > 5: - raise Exception("FPS must be less than or equal to 5.") + # get video frame rate + + if fps > 30: + raise Exception("FPS must be less than or equal to 30.") for model in additional_models: if model not in SUPPORTED_ADDITIONAL_MODELS: @@ -174,22 +176,31 @@ def poll_for_results(self, job_id: str = None) -> dict: API_URL, "/videoinfer/?api_key=", self.__api_key, "&job_id=", self.job_id ) - response = requests.get(url, headers={"Content-Type": "application/json"}) + try: + response = requests.get(url, headers={"Content-Type": "application/json"}) + except Exception as e: + print(e) + raise Exception("Error polling for results.") - data = response.json() + if not response.ok: + raise Exception("Error polling for results.") - if data["success"] != 0 or data["status_info"] != "success": - print("Job not complete yet. Check back in a minute.") - return {} + data = response.json() - output_signed_url = data["output_signed_url"] + if data["success"] == 0: + output_signed_url = data["output_signed_url"] - inference_data = requests.get( - output_signed_url, headers={"Content-Type": "application/json"} - ) + inference_data = requests.get( + output_signed_url, headers={"Content-Type": "application/json"} + ) - # frame_offset and model name are top-level keys - return inference_data.json() + # frame_offset and model name are top-level keys + return inference_data.json() + elif data["success"] == 1: + print("Job not complete yet. Check back in a minute.") + return {} + else: + raise Exception("Job failed.") def poll_until_results(self, job_id) -> dict: """ @@ -221,11 +232,11 @@ def poll_until_results(self, job_id) -> dict: while True: response = self.poll_for_response() - time.sleep(60) - - print(f"({attempts * 60}s): Checking for inference results") - attempts += 1 if response != {}: return response + + print(f"({attempts * 60}s): Checking for inference results") + + time.sleep(60) From 5dd4ee91aaf0215279b77d54160c9362f0d0c487 Mon Sep 17 00:00:00 2001 From: James Gallagher Date: Fri, 3 Nov 2023 11:03:47 +0000 Subject: [PATCH 17/18] respond to feedback, run black and isort --- roboflow/models/video.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/roboflow/models/video.py b/roboflow/models/video.py index 191a593e..092d6c28 100644 --- a/roboflow/models/video.py +++ b/roboflow/models/video.py @@ -92,14 +92,12 @@ def predict( url = urljoin(API_URL, "/video_upload_signed_url/?api_key=", self.__api_key) - # get video frame rate - if fps > 30: raise Exception("FPS must be less than or equal to 30.") for model in additional_models: if model not in SUPPORTED_ADDITIONAL_MODELS: - raise Exception(f"Model {model} is not supported for video inference.") + raise Exception(f"Model {model} is no t supported for video inference.") if inference_type not in SUPPORTED_ROBOFLOW_MODELS: raise Exception( @@ -181,7 +179,7 @@ def poll_for_results(self, job_id: str = None) -> dict: except Exception as e: print(e) raise Exception("Error polling for results.") - + if not response.ok: raise Exception("Error polling for results.") From aa250cca7972ddccdf4bec1771009d08b5fada62 Mon Sep 17 00:00:00 2001 From: James Gallagher Date: Fri, 3 Nov 2023 13:32:55 +0000 Subject: [PATCH 18/18] run black --- roboflow/models/video.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roboflow/models/video.py b/roboflow/models/video.py index 092d6c28..b17ac361 100644 --- a/roboflow/models/video.py +++ b/roboflow/models/video.py @@ -179,7 +179,7 @@ def poll_for_results(self, job_id: str = None) -> dict: except Exception as e: print(e) raise Exception("Error polling for results.") - + if not response.ok: raise Exception("Error polling for results.")