From 8ac54bf13824b2ac4d10558b02047d73336f6b39 Mon Sep 17 00:00:00 2001 From: Andrew Wason Date: Tue, 1 Oct 2024 18:53:37 -0400 Subject: [PATCH] key support. output file support. new tests. GH actions CI. --- .github/workflows/test.yml | 30 +- README.md | 19 +- llm_transformers.py | 76 +++-- pyproject.toml | 3 + tests/test_transformers.py | 588 +++++++++++++++++++++++++++++++++++++ uv.lock | 2 + 6 files changed, 689 insertions(+), 29 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 900e481..cbcbdf6 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -10,18 +10,34 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: ["3.8", "3.9", "3.10", "3.11", "3.12"] + python-version: ["3.10", "3.12"] steps: - uses: actions/checkout@v4 + - name: Install uv + uses: astral-sh/setup-uv@v2 + with: + enable-cache: true + cache-dependency-glob: "uv.lock" - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} - cache: pip - cache-dependency-path: pyproject.toml - name: Install dependencies - run: | - pip install -e '.[test]' + run: uv sync --all-extras --dev --python ${{ matrix.python-version }} --python-preference only-system + - name: Cache models + id: cache-models + uses: actions/cache@v4 + with: + path: ~/.cache/huggingface/hub/ + save-always: true + # Update cache every time since models may be added + # https://github.com/actions/cache/blob/main/tips-and-workarounds.md#update-a-cache + key: models-${{ runner.os }}-${{ github.run_id }} + restore-keys: | + models-${{ runner.os }} - name: Run tests - run: | - python -m pytest + run: uv run pytest tests + + +#XXX cache models +#XXX lint \ No newline at end of file diff --git a/README.md b/README.md index d575942..7c254c7 100644 --- a/README.md +++ b/README.md @@ -16,8 +16,20 @@ llm install llm-transformers ## Usage XXX document `-o verbose True` +XXX HF_TOKEN/key usage -## Transformer tasks +Most models are freely accessible, some of them require accepting a license agreement and using a Hugging Face [API token](https://huggingface.co/settings/tokens) that has access to the model. +You can use `llm keys set huggingface`, or set the `HF_TOKEN` env var, or use the `--key` option to `llm`. + +```sh-session +$ llm -m transformers -o model meta-llama/Llama-3.2-1B "A dog has" +Error: You are trying to access a gated repo. +Make sure to have access to it at https://huggingface.co/meta-llama/Llama-3.2-1B. +$ llm --key hf_******************** -m transformers -o model meta-llama/Llama-3.2-1B "A dog has" +A dog has been named as the killer of a woman who was found dead in her home. +``` + +## Transformer Pipeline Tasks ### [audio-classification](https://huggingface.co/docs/transformers/en/main_classes/pipelines#transformers.AudioClassificationPipeline) @@ -92,6 +104,9 @@ Not supported. $ llm -m transformers -o task image-segmentation https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png /var/folders/b1/1j9kkk053txc5krqbh0lj5t00000gn/T/tmp0z8zvd8i.png (bird: 0.999439) /var/folders/b1/1j9kkk053txc5krqbh0lj5t00000gn/T/tmpik_7r5qn.png (bird: 0.998787) +$ llm -m transformers -o task image-segmentation -o output /tmp/segment.png https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png +/tmp/segment-00.png (bird: 0.999439) +/tmp/segment-01.png (bird: 0.998787) ``` ### [image-to-image](https://huggingface.co/docs/transformers/en/main_classes/pipelines#transformers.ImageToImagePipeline) @@ -213,6 +228,8 @@ Your question was: "What is the capital of France?" ```sh-session $ llm -m transformers -o kwargs '{"generate_kwargs": {"max_new_tokens": 100}}' -o model facebook/musicgen-small "techno music" /var/folders/b1/1j9kkk053txc5krqbh0lj5t00000gn/T/tmpoueh05y6.wav +$ llm -m transformers -o task text-to-audio "Hello world" +/var/folders/b1/1j9kkk053txc5krqbh0lj5t00000gn/T/tmpmpwhkd8p.wav ``` ### [token-classification](https://huggingface.co/docs/transformers/en/main_classes/pipelines#transformers.TokenClassificationPipeline) diff --git a/llm_transformers.py b/llm_transformers.py index fc4b29a..2c51eba 100644 --- a/llm_transformers.py +++ b/llm_transformers.py @@ -1,7 +1,10 @@ +# Copyright (C) 2024 Andrew Wason +# SPDX-License-Identifier: Apache-2.0 import csv import itertools import json import logging +import pathlib import re import tempfile import typing as ta @@ -10,10 +13,11 @@ import click import llm +import numpy import soundfile as sf import torch from PIL import Image -from pydantic import ConfigDict, Field, field_validator, model_validator +from pydantic import Field, field_validator, model_validator from transformers import pipeline from transformers.pipelines import Pipeline, check_task, get_supported_tasks from transformers.utils import get_available_devices @@ -31,10 +35,31 @@ def supported_tasks() -> ta.Iterator[str]: yield task -def save_image(image: Image.Image) -> str: - with tempfile.NamedTemporaryFile(suffix=".png", delete=False, delete_on_close=False) as f: - image.save(f, format="png") - return f.name +def save_image(image: Image.Image, output: pathlib.Path | None) -> str: + if output is None: + with tempfile.NamedTemporaryFile(suffix=".png", delete=False, delete_on_close=False) as f: + image.save(f, format="png") + return f.name + else: + image.save(str(output)) + return str(output) + + +def save_audio(audio: numpy.ndarray, sample_rate: int, output: pathlib.Path | None) -> str: + def save(f: ta.BinaryIO) -> None: + # musicgen is shape (batch_size, num_channels, sequence_length) + # https://huggingface.co/docs/transformers/v4.45.1/en/model_doc/musicgen#unconditional-generation + # XXX check shape of other audio pipelines + sf.write(f, audio[0].T, sample_rate) + + if output is None: + with tempfile.NamedTemporaryFile(suffix=".wav", delete=False, delete_on_close=False) as f: + save(f) + return f.name + else: + with open(output, "wb") as f: + save(f) + return str(output) def handle_required_kwarg(kwargs: dict, options: llm.Options, name: str, format: str, task: str) -> None: @@ -92,6 +117,8 @@ def silence(verbose: bool | None = None): class Transformers(llm.Model): model_id = "transformers" + needs_key = "huggingface" # only some models need a key + key_env_var = "HF_TOKEN" pipe: Pipeline | None = None @@ -108,8 +135,12 @@ class Options(llm.Options): description="Additional context for transformer, often a file path or URL, required by some transformers.", default=None, ) + output: pathlib.Path | None = Field( + description="Output file path. Some models generate binary image/audio outputs which will be saved in this file, or a temporary file if not specified.", + default=None, + ) device: str | None = Field( - description="Device name. `llm transformers list-devices`.", default=None + description="Torch device name. `llm transformers list-devices`.", default=None ) verbose: bool | None = Field( description="Logging is disabled by default, enable this to see transformers warnings.", @@ -216,24 +247,20 @@ def handle_inputs( return args, kwargs def handle_result( - self, task: str, result: ta.Any, response: llm.Response + self, task: str, result: ta.Any, prompt: llm.Prompt, response: llm.Response ) -> ta.Generator[str, None, None]: match task, result: case "image-to-image", Image.Image() as image: - path = save_image(image) + path = save_image(image, prompt.options.output) response.response_json = {task: {"output": path}} yield path case "automatic-speech-recognition", {"text": str(text)}: response.response_json = {task: result} yield text - case "text-to-audio", {"audio": audio, "sampling_rate": int(sampling_rate)}: - with tempfile.NamedTemporaryFile(suffix=".wav", delete=False, delete_on_close=False) as f: - # musicgen is shape (batch_size, num_channels, sequence_length) - # https://huggingface.co/docs/transformers/v4.45.1/en/model_doc/musicgen#unconditional-generation - # XXX check shape of other audio pipelines - sf.write(f, audio[0].T, sampling_rate) - response.response_json = {task: {"output": f.name}} - yield f.name + case "text-to-audio", {"audio": numpy.ndarray() as audio, "sampling_rate": int(sample_rate)}: + path = save_audio(audio, sample_rate, prompt.options.output) + response.response_json = {task: {"output": path}} + yield path case "object-detection", [ { "score": float(), @@ -245,8 +272,14 @@ def handle_result( yield json.dumps(result, indent=4) case "image-segmentation", [{"score": float(), "label": str(), "mask": Image.Image()}, *_]: responses = [] - for item in result: - path = save_image(item["mask"]) + if prompt.options.output: + out = prompt.options.output + output_template = str(out.with_name(f"{out.stem}-{{:02}}{out.suffix}")) + else: + output_template = None + for i, item in enumerate(result): + output = output_template.format(i) if output_template else None + path = save_image(item["mask"], output) responses.append({"score": item["score"], "label": item["label"], "output": path}) response.response_json = {task: responses} yield "\n".join( @@ -272,8 +305,8 @@ def handle_result( ]: response.response_json = {task: result} yield "\n".join(f"{item['sequence']} (score={item['score']})" for item in result) - case "depth-estimation", {"predicted_depth": torch.Tensor(), "depth": Image.Image(depth)}: - path = save_image(depth) + case "depth-estimation", {"predicted_depth": torch.Tensor(), "depth": Image.Image() as depth}: + path = save_image(depth, prompt.options.output) response.response_json = {task: {"output": path}} yield path case "document-question-answering", [ @@ -361,6 +394,7 @@ def execute( if prompt.options.device is not None else None, framework="pt", + token=self.key, ) elif (prompt.options.task and self.pipe.task != prompt.options.task) or ( prompt.options.model and self.pipe.model.name_or_path != prompt.options.model @@ -375,4 +409,4 @@ def execute( result = self.pipe(*args, **kwargs) - yield from self.handle_result(normalized_task, result, response) + yield from self.handle_result(normalized_task, result, prompt, response) diff --git a/pyproject.toml b/pyproject.toml index ebad08e..b09aa1f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,3 +1,5 @@ +# Copyright (C) 2024 Andrew Wason +# SPDX-License-Identifier: Apache-2.0 [project] name = "llm-transformers" version = "0.1" @@ -20,6 +22,7 @@ dependencies = [ "protobuf", "pandas", "av", + "numpy>=2.1.1", ] [project.urls] diff --git a/tests/test_transformers.py b/tests/test_transformers.py index 765a750..930bd13 100644 --- a/tests/test_transformers.py +++ b/tests/test_transformers.py @@ -1,4 +1,592 @@ +# Copyright (C) 2024 Andrew Wason +# SPDX-License-Identifier: Apache-2.0 +import json +import pathlib +import re +import sys +import tempfile +from contextlib import ExitStack, contextmanager + +import pytest +import soundfile as sf +from llm.cli import cli from llm.plugins import pm +from PIL import Image + + +def image_validator(*sizes: tuple[int, int]): + def validator(out: str) -> bool: + paths = out.splitlines() + result = all(Image.open(path).size == size for size, path in zip(sizes, paths, strict=True)) + for path in paths: + pathlib.Path(path).unlink(missing_ok=True) + assert result + + return validator + + +def audio_validator(sample_rate: int): + def validator(out: str) -> bool: + path = out.strip() + actual_sample_rate = sf.read(path)[1] + pathlib.Path(path).unlink(missing_ok=True) + assert actual_sample_rate == sample_rate + + return validator + + +def equals_validator(a, b): + assert a == b + + +def regex_validator(value, regex): + assert re.match(regex, value, re.MULTILINE) + + +def startswith_validator(out: str, start: str): + assert out.startswith(start) + + +def segment_validator(out: str) -> bool: + lines = out.splitlines() + result = ( + all( + line.split(" ", maxsplit=1)[1] == expected + for line, expected in zip(lines, ["(bird: 0.999439)", "(bird: 0.998787)"], strict=True) + ), + ) + for line in lines: + pathlib.Path(line.split(" ", maxsplit=1)[0]).unlink(missing_ok=True) + assert result + + +@contextmanager +def prepare_table(): + with tempfile.NamedTemporaryFile("w", suffix=".csv", delete_on_close=False) as f: + f.write( + "Repository,Stars,Contributors,Programming language\n" + "Transformers,36542,651,Python\n" + "Datasets,4512,77,Python\n" + 'Tokenizers,3934,34,"Rust, Python and NodeJS' + ) + f.close() + yield f.name + + +testdata = { + "audio-classification": ( + [ + "llm", + "-m", + "transformers", + "-o", + "verbose", + "true", + "-o", + "task", + "audio-classification", + "https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/1.flac", + ], + lambda out: startswith_validator(out, "_unknown_ "), + ), + "automatic-speech-recognition": ( + [ + "llm", + "-m", + "transformers", + "-o", + "verbose", + "true", + "-o", + "task", + "automatic-speech-recognition", + "https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/1.flac", + ], + lambda out: equals_validator( + out, + ( + "HE HOPED THERE WOULD BE STEW FOR DINNER TURNIPS AND CARROTS AND BRUISED POTATOES AND FAT " + "MUTTON PIECES TO BE LADLED OUT IN THICK PEPPERED FLOWER FAT AND SAUCE\n" + ), + ), + ), + "depth-estimation": ( + [ + "llm", + "-m", + "transformers", + "-o", + "verbose", + "true", + "-o", + "task", + "depth-estimation", + "http://images.cocodataset.org/val2017/000000039769.jpg", + ], + image_validator((640, 480)), + ), + "document-question-answering": ( + [ + "llm", + "-m", + "transformers", + "-o", + "verbose", + "true", + "-o", + "task", + "document-question-answering", + "-o", + "context", + "https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png", + "What is the invoice number?", + ], + lambda out: equals_validator(out, "us-001\n"), + ), + "fill-mask": ( + [ + "llm", + "-m", + "transformers", + "-o", + "verbose", + "true", + "-o", + "task", + "fill-mask", + "My is about to explode", + ], + lambda out: equals_validator( + out, + ( + "My brain is about to explode (score=0.09140042215585709)\n" + "My heart is about to explode (score=0.07742168009281158)\n" + "My head is about to explode (score=0.05137857422232628)\n" + "My fridge is about to explode (score=0.029346412047743797)\n" + "My house is about to explode (score=0.02866862528026104)\n" + ), + ), + ), + "image-classification": ( + [ + "llm", + "-m", + "transformers", + "-o", + "verbose", + "true", + "-o", + "task", + "image-classification", + "https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png", + ], + lambda out: equals_validator( + out, + ( + "macaw (0.9905233979225159)\n" + "African grey, African gray, Psittacus erithacus (0.005603480152785778)\n" + "toucan (0.001056905253790319)\n" + "sulphur-crested cockatoo, Kakatoe galerita, Cacatua galerita (0.0006811501225456595)\n" + "lorikeet (0.0006714339251630008)\n" + ), + ), + ), + "image-segmentation": ( + [ + "llm", + "-m", + "transformers", + "-o", + "verbose", + "true", + "-o", + "task", + "image-segmentation", + "https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png", + ], + segment_validator, + ), + "image-to-image": ( + [ + "llm", + "-m", + "transformers", + "-o", + "verbose", + "true", + "-o", + "task", + "image-to-image", + "http://images.cocodataset.org/val2017/000000039769.jpg", + ], + image_validator((1296, 976)), + ), + "image-to-text": ( + [ + "llm", + "-m", + "transformers", + "-o", + "verbose", + "true", + "-o", + "task", + "image-to-text", + "https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png", + ], + lambda out: equals_validator(out, "two birds are standing next to each other \n"), + ), + "object-detection": ( + [ + "llm", + "-m", + "transformers", + "-o", + "verbose", + "true", + "-o", + "task", + "object-detection", + "https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png", + ], + lambda out: equals_validator( + json.loads(out), + [ + { + "score": 0.9966394901275635, + "label": "bird", + "box": {"xmin": 69, "ymin": 171, "xmax": 396, "ymax": 507}, + }, + { + "score": 0.999381422996521, + "label": "bird", + "box": {"xmin": 398, "ymin": 105, "xmax": 767, "ymax": 507}, + }, + ], + ), + ), + "question-answering": ( + [ + "llm", + "-m", + "transformers", + "-o", + "verbose", + "true", + "-o", + "task", + "question-answering", + "-o", + "context", + "My name is Wolfgang and I live in Berlin", + "Where do I live?", + ], + lambda out: equals_validator(out, "Berlin\n"), + ), + "summarization": ( + [ + "llm", + "-m", + "transformers", + "-o", + "verbose", + "true", + "-o", + "task", + "summarization", + "-o", + "kwargs", + '{"min_length": 2, "max_length": 7}', + "An apple a day, keeps the doctor away", + ], + lambda out: equals_validator(out, " An apple a day\n"), + ), + "table-question-answering": ( + [ + "llm", + "-m", + "transformers", + "-o", + "verbose", + "true", + "-o", + "task", + "table-question-answering", + "-o", + "context", + prepare_table, + "How many stars does the transformers repository have?", + ], + lambda out: equals_validator(out, "AVERAGE > 36542\n"), + ), + "text2text-generation": ( + [ + "llm", + "-m", + "transformers", + "-o", + "verbose", + "true", + "-o", + "task", + "text2text-generation", + "question: What is 42 ? context: 42 is the answer to life, the universe and everything", + ], + lambda out: equals_validator(out, "the answer to life, the universe and everything\n"), + ), + "text-classification": ( + [ + "llm", + "-m", + "transformers", + "-o", + "verbose", + "true", + "-o", + "task", + "text-classification", + "We are very happy to show you the 🤗 Transformers library", + ], + lambda out: equals_validator(out, "POSITIVE (0.9997681975364685)\n"), + ), + "text-generation": ( + [ + "llm", + "-m", + "transformers", + "-o", + "verbose", + "true", + "-o", + "task", + "text-generation", + "I am going to elect", + ], + lambda out: startswith_validator(out, "I am going to elect"), + ), + "text-to-audio": ( + [ + "llm", + "-m", + "transformers", + "-o", + "verbose", + "true", + "-o", + "kwargs", + '{"generate_kwargs": {"max_new_tokens": 100}}', + "-o", + "model", + "facebook/musicgen-small", + "techno music", + ], + audio_validator(32000), + ), + "token-classification": ( + [ + "llm", + "-m", + "transformers", + "-o", + "verbose", + "true", + "-o", + "task", + "token-classification", + "My name is Sarah and I live in London", + ], + lambda out: equals_validator( + out, "Sarah (I-PER: 0.9982994198799133)\nLondon (I-LOC: 0.998397171497345)\n" + ), + ), + "translation_en_to_fr": ( + [ + "llm", + "-m", + "transformers", + "-o", + "verbose", + "true", + "-o", + "task", + "translation_en_to_fr", + "How old are you?", + ], + lambda out: equals_validator(out, " quel âge êtes-vous?\n"), + ), + "video-classification": ( + [ + "llm", + "-m", + "transformers", + "-o", + "verbose", + "true", + "-o", + "task", + "video-classification", + "https://huggingface.co/datasets/Xuehai/MMWorld/resolve/main/Amazing%20street%20dance%20performance%20from%20Futunity%20UK%20-%20Move%20It%202013/Amazing%20street%20dance%20performance%20from%20Futunity%20UK%20-%20Move%20It%202013.mp4", + ], + lambda out: equals_validator( + out, + ( + "dancing ballet (0.006608937866985798)\n" + "spinning poi (0.006111182738095522)\n" + "air drumming (0.005756791681051254)\n" + "singing (0.005747966933995485)\n" + "punching bag (0.00565463537350297)\n" + ), + ), + ), + "visual-question-answering": ( + [ + "llm", + "-m", + "transformers", + "-o", + "verbose", + "true", + "-o" "task", + "visual-question-answering", + "-o", + "context", + "https://huggingface.co/datasets/Narsil/image_dummy/raw/main/lena.png", + "What is she wearing?", + ], + lambda out: regex_validator( + out, + ( + "hat \\(0.948026\\d+\\)\n" + "fedora \\(0.00863\\d+\\)\n" + "clothes \\(0.003124\\d+\\)\n" + "sun hat \\(0.002937\\d+\\)\n" + "nothing \\(0.002096\\d+\\)\n" + ), + ), + ), + "zero-shot-classification": ( + [ + "llm", + "-m", + "transformers", + "-o", + "verbose", + "true", + "-o", + "task", + "zero-shot-classification", + "-o", + "context", + "urgent,not urgent,phone,tablet,computer", + "I have a problem with my iphone that needs to be resolved asap!!", + ], + lambda out: equals_validator( + out, + ( + "urgent (0.5036348700523376)\n" + "phone (0.4788002371788025)\n" + "computer (0.012600351125001907)\n" + "not urgent (0.0026557915844023228)\n" + "tablet (0.0023087668232619762)\n" + ), + ), + ), + "zero-shot-image-classification": ( + [ + "llm", + "-m", + "transformers", + "-o", + "verbose", + "true", + "-o", + "task", + "zero-shot-image-classification", + "-o", + "context", + "black and white,photorealist,painting", + "https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png", + ], + lambda out: equals_validator( + out, + ( + "black and white (0.9736384749412537)\n" + "photorealist (0.02141517587006092)\n" + "painting (0.004946451168507338)\n" + ), + ), + ), + "zero-shot-audio-classification": ( + [ + "llm", + "-m", + "transformers", + "-o", + "verbose", + "true", + "-o", + "task", + "zero-shot-audio-classification", + "-o", + "context", + "Sound of a bird,Sound of a dog", + "https://huggingface.co/datasets/s3prl/Nonspeech/resolve/main/animal_sound/n52.wav", + ], + lambda out: equals_validator( + out, + ("Sound of a bird (0.9998763799667358)\n" "Sound of a dog (0.00012355657236184925)\n"), + ), + ), + "zero-shot-object-detection": ( + [ + "llm", + "-m", + "transformers", + "-o", + "verbose", + "true", + "-o", + "task", + "zero-shot-object-detection", + "-o", + "context", + "cat,couch", + "http://images.cocodataset.org/val2017/000000039769.jpg", + ], + lambda out: equals_validator( + json.loads(out), + [ + { + "score": 0.2868139445781708, + "label": "cat", + "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}, + }, + { + "score": 0.2537268102169037, + "label": "cat", + "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}, + }, + { + "score": 0.12082991003990173, + "label": "couch", + "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}, + }, + ], + ), + ), +} + + +@pytest.mark.parametrize("llm_args,validator", testdata.values(), ids=testdata.keys()) +def test_transformer(monkeypatch, capsys, llm_args, validator): + with ExitStack() as stack: + prepared_args = [stack.enter_context(arg()) if callable(arg) else arg for arg in llm_args] + monkeypatch.setattr(sys, "argv", prepared_args) + monkeypatch.setattr(sys.stdin, "isatty", lambda: True) # prevent llm from trying to read from stdin + monkeypatch.setattr(sys, "exit", lambda x=None: None) + cli() + captured = capsys.readouterr() + validator(captured.out) def test_plugin_is_installed(): diff --git a/uv.lock b/uv.lock index 3dac96a..51eb088 100644 --- a/uv.lock +++ b/uv.lock @@ -418,6 +418,7 @@ source = { editable = "." } dependencies = [ { name = "av" }, { name = "llm" }, + { name = "numpy" }, { name = "pandas" }, { name = "pillow" }, { name = "protobuf" }, @@ -438,6 +439,7 @@ dev = [ requires-dist = [ { name = "av" }, { name = "llm", specifier = ">=0.16" }, + { name = "numpy", specifier = ">=2.1.1" }, { name = "pandas" }, { name = "pillow" }, { name = "protobuf" },