From 1c0799d51333595653eac25ec60150b202e905c4 Mon Sep 17 00:00:00 2001 From: luv-bansal Date: Tue, 21 Jan 2025 11:02:33 +0000 Subject: [PATCH] git config to 3.11 and model.py for cpu --- .../llms/lmdeploy-llama-3_2-1b-instruct/config.yaml | 2 +- models/model_upload/llms/openai-gpt4/config.yaml | 2 +- models/model_upload/llms/vllm-mistral-7b-instruct/config.yaml | 2 +- .../multimodal_models/vllm-miniCPM-2.6/config.yaml | 2 +- models/model_upload/ocr/got-ocr2.0/1/model.py | 3 +-- models/model_upload/ocr/got-ocr2.0/config.yaml | 2 +- .../model_upload/speech-recognition/openai-whisper/config.yaml | 2 +- models/model_upload/test-upload/mbart/1/model.py | 3 +-- 8 files changed, 8 insertions(+), 10 deletions(-) diff --git a/models/model_upload/llms/lmdeploy-llama-3_2-1b-instruct/config.yaml b/models/model_upload/llms/lmdeploy-llama-3_2-1b-instruct/config.yaml index abc33ea..40b691c 100644 --- a/models/model_upload/llms/lmdeploy-llama-3_2-1b-instruct/config.yaml +++ b/models/model_upload/llms/lmdeploy-llama-3_2-1b-instruct/config.yaml @@ -7,7 +7,7 @@ model: model_type_id: "text-to-text" build_info: - python_version: "3.10" + python_version: "3.11" inference_compute_info: cpu_limit: "2" diff --git a/models/model_upload/llms/openai-gpt4/config.yaml b/models/model_upload/llms/openai-gpt4/config.yaml index 52faa80..cd326bc 100644 --- a/models/model_upload/llms/openai-gpt4/config.yaml +++ b/models/model_upload/llms/openai-gpt4/config.yaml @@ -7,7 +7,7 @@ model: model_type_id: "text-to-text" build_info: - python_version: "3.10" + python_version: "3.11" inference_compute_info: cpu_limit: "1" diff --git a/models/model_upload/llms/vllm-mistral-7b-instruct/config.yaml b/models/model_upload/llms/vllm-mistral-7b-instruct/config.yaml index 83473f4..944b20d 100644 --- a/models/model_upload/llms/vllm-mistral-7b-instruct/config.yaml +++ b/models/model_upload/llms/vllm-mistral-7b-instruct/config.yaml @@ -7,7 +7,7 @@ model: model_type_id: "text-to-text" build_info: - python_version: "3.10" + python_version: "3.11" inference_compute_info: cpu_limit: "1" diff --git a/models/model_upload/multimodal_models/vllm-miniCPM-2.6/config.yaml b/models/model_upload/multimodal_models/vllm-miniCPM-2.6/config.yaml index a08872f..798b9ef 100644 --- a/models/model_upload/multimodal_models/vllm-miniCPM-2.6/config.yaml +++ b/models/model_upload/multimodal_models/vllm-miniCPM-2.6/config.yaml @@ -7,7 +7,7 @@ model: model_type_id: "multimodal-to-text" build_info: - python_version: "3.10" + python_version: "3.11" inference_compute_info: cpu_limit: "1" diff --git a/models/model_upload/ocr/got-ocr2.0/1/model.py b/models/model_upload/ocr/got-ocr2.0/1/model.py index 57aa89b..fdf0aa9 100644 --- a/models/model_upload/ocr/got-ocr2.0/1/model.py +++ b/models/model_upload/ocr/got-ocr2.0/1/model.py @@ -38,10 +38,9 @@ def load_model(self): checkpoint_path, trust_remote_code=True, use_safetensors=True, - device_map="cuda", + device_map= self.device, low_cpu_mem_usage=True, pad_token_id=self.tokenizer.eos_token_id) - self.model.eval().cuda() logger.info("Done loading Model checkpoints!") def predict(self, request: service_pb2.PostModelOutputsRequest diff --git a/models/model_upload/ocr/got-ocr2.0/config.yaml b/models/model_upload/ocr/got-ocr2.0/config.yaml index 93bb28f..2c190f9 100644 --- a/models/model_upload/ocr/got-ocr2.0/config.yaml +++ b/models/model_upload/ocr/got-ocr2.0/config.yaml @@ -7,7 +7,7 @@ model: model_type_id: "image-to-text" build_info: - python_version: "3.10" + python_version: "3.11" inference_compute_info: cpu_limit: "1" diff --git a/models/model_upload/speech-recognition/openai-whisper/config.yaml b/models/model_upload/speech-recognition/openai-whisper/config.yaml index 519ae3e..9db2ffe 100644 --- a/models/model_upload/speech-recognition/openai-whisper/config.yaml +++ b/models/model_upload/speech-recognition/openai-whisper/config.yaml @@ -7,7 +7,7 @@ model: model_type_id: "audio-to-text" build_info: - python_version: "3.10" + python_version: "3.12" inference_compute_info: cpu_limit: "1" diff --git a/models/model_upload/test-upload/mbart/1/model.py b/models/model_upload/test-upload/mbart/1/model.py index b1a5239..6e06aff 100644 --- a/models/model_upload/test-upload/mbart/1/model.py +++ b/models/model_upload/test-upload/mbart/1/model.py @@ -39,7 +39,7 @@ def load_model(self): # if checkpoints section is in config.yaml file then checkpoints will be downloaded at this path during model upload time. self.tokenizer = AutoTokenizer.from_pretrained(checkpoints) self.model = AutoModelForSeq2SeqLM.from_pretrained( - checkpoints, torch_dtype="auto", device_map="auto") + checkpoints, torch_dtype="auto", device_map=self.device) def predict(self, request: service_pb2.PostModelOutputsRequest ) -> Iterator[service_pb2.MultiOutputResponse]: @@ -51,7 +51,6 @@ def predict(self, request: service_pb2.PostModelOutputsRequest raw_texts = [] for t in texts: inputs = self.tokenizer.encode(t, return_tensors="pt").to(self.device) - # inputs = self.tokenizer.encode("Translate to English: Je t'aime.", return_tensors="pt").to(self.device) outputs = self.model.generate(inputs) print(self.tokenizer.decode(outputs[0])) raw_texts.append(self.tokenizer.decode(outputs[0]))