diff --git a/inference/core/entities/requests/gaze.py b/inference/core/entities/requests/gaze.py index 009738f8c..003a33533 100644 --- a/inference/core/entities/requests/gaze.py +++ b/inference/core/entities/requests/gaze.py @@ -22,7 +22,7 @@ class GazeDetectionInferenceRequest(BaseRequest): gaze_version_id: Optional[str] = Field( default=GAZE_VERSION_ID, - examples=["l2cs"], + examples=["L2CS"], description="The version ID of Gaze to be used for this request. Must be one of l2cs.", ) diff --git a/inference/core/env.py b/inference/core/env.py index 8f1964ba9..9d7cd9d74 100644 --- a/inference/core/env.py +++ b/inference/core/env.py @@ -73,7 +73,7 @@ CLIP_MODEL_ID = f"clip/{CLIP_VERSION_ID}" # Gaze version ID, default is "L2CS" -GAZE_VERSION_ID = os.getenv("GAZE_VERSION_ID", "l2cs") +GAZE_VERSION_ID = os.getenv("GAZE_VERSION_ID", "L2CS") # Gaze model ID GAZE_MODEL_ID = f"gaze/{GAZE_VERSION_ID}" diff --git a/tests/workflows/integration_tests/execution/conftest.py b/tests/workflows/integration_tests/execution/conftest.py index 440f1217a..fb1d07e01 100644 --- a/tests/workflows/integration_tests/execution/conftest.py +++ b/tests/workflows/integration_tests/execution/conftest.py @@ -96,4 +96,3 @@ def bool_env(val): @pytest.fixture(scope="function") def face_image() -> np.ndarray: return cv2.imread(os.path.join(ASSETS_DIR, "face.jpeg")) - diff --git a/tests/workflows/integration_tests/execution/test_workflow_with_gaze.py b/tests/workflows/integration_tests/execution/test_workflow_with_gaze.py index e526a39a1..44a92593b 100644 --- a/tests/workflows/integration_tests/execution/test_workflow_with_gaze.py +++ b/tests/workflows/integration_tests/execution/test_workflow_with_gaze.py @@ -1,6 +1,7 @@ +import sys + import numpy as np import pytest -import sys from inference.core.env import WORKFLOWS_MAX_CONCURRENT_STEPS from inference.core.managers.base import ModelManager @@ -14,7 +15,11 @@ "version": "1.0", "inputs": [ {"type": "WorkflowImage", "name": "image"}, - {"type": "WorkflowParameter", "name": "do_run_face_detection", "default_value": True}, + { + "type": "WorkflowParameter", + "name": "do_run_face_detection", + "default_value": True, + }, ], "steps": [ { @@ -76,7 +81,9 @@ workflow_definition=GAZE_DETECTION_WORKFLOW, workflow_name_in_app="gaze-detection", ) -@pytest.mark.skip(reason="Test not supported on Python 3.12+, skipping due to dependencies conflict when building CI") +@pytest.mark.skip( + reason="Test not supported on Python 3.12+, skipping due to dependencies conflict when building CI" +) def test_gaze_workflow_with_face_detection( model_manager: ModelManager, face_image: np.ndarray, @@ -109,21 +116,24 @@ def test_gaze_workflow_with_face_detection( "pitch_degrees", "visualization", }, "Expected all outputs to be registered" - + # Check face predictions assert len(result[0]["face_predictions"]) > 0, "Expected at least one face detected" assert result[0]["face_predictions"].data["prediction_type"][0] == "facial-landmark" - + # Check angles assert len(result[0]["yaw_degrees"]) == len(result[0]["face_predictions"]) assert len(result[0]["pitch_degrees"]) == len(result[0]["face_predictions"]) - + # Check visualization assert not np.array_equal( face_image, result[0]["visualization"].numpy_image ), "Expected visualization to modify the image" -@pytest.mark.skip(reason="Test not supported on Python 3.12+, skipping due to dependencies conflict when building CI") + +@pytest.mark.skip( + reason="Test not supported on Python 3.12+, skipping due to dependencies conflict when building CI" +) def test_gaze_workflow_batch_processing( model_manager: ModelManager, face_image: np.ndarray, @@ -151,6 +161,8 @@ def test_gaze_workflow_batch_processing( # then assert len(result) == 2, "Expected results for both images" # Results should be identical since we used the same image - assert result[0]["face_predictions"].box_area == result[1]["face_predictions"].box_area + assert ( + result[0]["face_predictions"].box_area == result[1]["face_predictions"].box_area + ) assert result[0]["yaw_degrees"] == result[1]["yaw_degrees"] - assert result[0]["pitch_degrees"] == result[1]["pitch_degrees"] \ No newline at end of file + assert result[0]["pitch_degrees"] == result[1]["pitch_degrees"] diff --git a/tests/workflows/unit_tests/core_steps/models/foundation/test_gaze.py b/tests/workflows/unit_tests/core_steps/models/foundation/test_gaze.py index 32dec1aa7..4bcbe5fa1 100644 --- a/tests/workflows/unit_tests/core_steps/models/foundation/test_gaze.py +++ b/tests/workflows/unit_tests/core_steps/models/foundation/test_gaze.py @@ -1,10 +1,9 @@ -from unittest.mock import MagicMock, patch -from pydantic import BaseModel from typing import List +from unittest.mock import MagicMock, patch import numpy as np import pytest -from pydantic import ValidationError +from pydantic import BaseModel, ValidationError from inference.core.workflows.core_steps.common.entities import StepExecutionMode from inference.core.workflows.core_steps.models.foundation.gaze.v1 import ( @@ -44,7 +43,7 @@ def mock_model_manager(): "landmarks": [ {"x": 120, "y": 120}, {"x": 130, "y": 120}, - ] + ], }, yaw=0.5, # ~28.6 degrees pitch=-0.2, # ~-11.5 degrees @@ -131,7 +130,7 @@ def test_run_locally(mock_model_manager, mock_workflow_image_data): "yaw_degrees", "pitch_degrees", } - + # Check angles are converted to degrees correctly assert len(result[0]["yaw_degrees"]) == 1 assert len(result[0]["pitch_degrees"]) == 1 diff --git a/tests/workflows/unit_tests/core_steps/sampling/test_identify_changes.py b/tests/workflows/unit_tests/core_steps/sampling/test_identify_changes.py index f2ec6cbc0..ee48149d3 100644 --- a/tests/workflows/unit_tests/core_steps/sampling/test_identify_changes.py +++ b/tests/workflows/unit_tests/core_steps/sampling/test_identify_changes.py @@ -9,36 +9,34 @@ "threshold_percentile": 0.2, "warmup": 3, "smoothing_factor": 0.1, - "window_size": 10 + "window_size": 10, } + def get_perturbed_value(initial_value: np.ndarray, perturbation: float) -> np.ndarray: # randomly fluctuate by +- rand in perturbation in dimensions - return initial_value + np.random.uniform(-perturbation, perturbation, size=len(initial_value)) + return initial_value + np.random.uniform( + -perturbation, perturbation, size=len(initial_value) + ) + def test_identify_changes() -> None: # given identify_changes_block = IdentifyChangesBlockV1() - + # 5 random floats between -1 and 1 initial_value = np.random.uniform(-1, 1, size=5) initial_value_normalized = np.array(initial_value) / np.linalg.norm(initial_value) # warm up - result = identify_changes_block.run( - **default_inputs, - embedding = initial_value - ) + result = identify_changes_block.run(**default_inputs, embedding=initial_value) assert result is not None assert not result.get("is_outlier") assert result.get("warming_up") for i in range(10): - result = identify_changes_block.run( - **default_inputs, - embedding = initial_value - ) + result = identify_changes_block.run(**default_inputs, embedding=initial_value) assert np.allclose(result.get("average"), initial_value_normalized) assert np.allclose(result.get("std"), [0, 0, 0, 0, 0]) @@ -48,14 +46,10 @@ def test_identify_changes() -> None: # add a bit of variance for i in range(10): result = identify_changes_block.run( - **default_inputs, - embedding = get_perturbed_value(initial_value, 1e-4) + **default_inputs, embedding=get_perturbed_value(initial_value, 1e-4) ) - result = identify_changes_block.run( - **default_inputs, - embedding = initial_value - ) + result = identify_changes_block.run(**default_inputs, embedding=initial_value) assert not result.get("is_outlier") # ensure that the average and std have changed @@ -64,11 +58,10 @@ def test_identify_changes() -> None: # make a large change result = identify_changes_block.run( - **default_inputs, - embedding = [0.5, 0.5, 0.5, 0.5, 0.5] + **default_inputs, embedding=[0.5, 0.5, 0.5, 0.5, 0.5] ) assert result.get("is_outlier") # average and std should not be zero anymore assert not np.allclose(result.get("average"), initial_value_normalized) - assert not np.all(result.get("std") == [0, 0, 0, 0, 0]) \ No newline at end of file + assert not np.all(result.get("std") == [0, 0, 0, 0, 0]) diff --git a/tests/workflows/unit_tests/core_steps/sampling/test_identify_outliers.py b/tests/workflows/unit_tests/core_steps/sampling/test_identify_outliers.py index 3b3ddd566..2e5e77f97 100644 --- a/tests/workflows/unit_tests/core_steps/sampling/test_identify_outliers.py +++ b/tests/workflows/unit_tests/core_steps/sampling/test_identify_outliers.py @@ -1,31 +1,30 @@ import numpy as np +import pytest from inference.core.workflows.core_steps.sampling.identify_outliers.v1 import ( IdentifyOutliersBlockV1, ) -default_inputs = { - "threshold_percentile": 0.05, - "warmup": 3, - "window_size": 32 -} +default_inputs = {"threshold_percentile": 0.05, "warmup": 3, "window_size": 32} + def get_perturbed_value(initial_value: np.ndarray, perturbation: float) -> np.ndarray: # randomly fluctuate by +- rand in perturbation in dimensions - return initial_value + np.random.uniform(-perturbation, perturbation, size=len(initial_value)) + return initial_value + np.random.uniform( + -perturbation, perturbation, size=len(initial_value) + ) + +@pytest.mark.skip(reason="Solve flakiness of the block: https://github.com/roboflow/inference/issues/901") def test_identify_outliers() -> None: # given identify_changes_block = IdentifyOutliersBlockV1() - + # 5 random floats between -1 and 1 initial_value = np.random.uniform(-1, 1, size=5) # warm up - result = identify_changes_block.run( - **default_inputs, - embedding = initial_value - ) + result = identify_changes_block.run(**default_inputs, embedding=initial_value) assert result is not None assert not result.get("is_outlier") @@ -34,8 +33,7 @@ def test_identify_outliers() -> None: # add a bit of variance for i in range(32): result = identify_changes_block.run( - **default_inputs, - embedding = get_perturbed_value(initial_value, 1e-6) + **default_inputs, embedding=get_perturbed_value(initial_value, 1e-6) ) assert not result.get("is_outlier") @@ -43,8 +41,7 @@ def test_identify_outliers() -> None: # make a large change result = identify_changes_block.run( - **default_inputs, - embedding = [0.5, 0.5, 0.5, 0.5, 0.5] + **default_inputs, embedding=[0.5, 0.5, 0.5, 0.5, 0.5] ) - assert result.get("is_outlier") \ No newline at end of file + assert result.get("is_outlier")